@geekmidas/cli 0.48.0 → 0.49.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/dist/{dokploy-api-DvzIDxTj.mjs → dokploy-api-94KzmTVf.mjs} +4 -4
  2. package/dist/dokploy-api-94KzmTVf.mjs.map +1 -0
  3. package/dist/dokploy-api-CItuaWTq.mjs +3 -0
  4. package/dist/dokploy-api-DBNE8MDt.cjs +3 -0
  5. package/dist/{dokploy-api-BDLu0qWi.cjs → dokploy-api-YD8WCQfW.cjs} +4 -4
  6. package/dist/dokploy-api-YD8WCQfW.cjs.map +1 -0
  7. package/dist/index.cjs +2392 -1888
  8. package/dist/index.cjs.map +1 -1
  9. package/dist/index.mjs +2389 -1885
  10. package/dist/index.mjs.map +1 -1
  11. package/package.json +6 -4
  12. package/src/build/__tests__/handler-templates.spec.ts +947 -0
  13. package/src/deploy/__tests__/__fixtures__/entry-apps/async-entry.ts +24 -0
  14. package/src/deploy/__tests__/__fixtures__/entry-apps/nested-config-entry.ts +24 -0
  15. package/src/deploy/__tests__/__fixtures__/entry-apps/no-env-entry.ts +12 -0
  16. package/src/deploy/__tests__/__fixtures__/entry-apps/simple-entry.ts +14 -0
  17. package/src/deploy/__tests__/__fixtures__/entry-apps/throwing-entry.ts +16 -0
  18. package/src/deploy/__tests__/__fixtures__/env-parsers/non-function-export.ts +10 -0
  19. package/src/deploy/__tests__/__fixtures__/env-parsers/parseable-env-parser.ts +18 -0
  20. package/src/deploy/__tests__/__fixtures__/env-parsers/throwing-env-parser.ts +18 -0
  21. package/src/deploy/__tests__/__fixtures__/env-parsers/valid-env-parser.ts +16 -0
  22. package/src/deploy/__tests__/dns-verification.spec.ts +229 -0
  23. package/src/deploy/__tests__/dokploy-api.spec.ts +2 -3
  24. package/src/deploy/__tests__/domain.spec.ts +7 -3
  25. package/src/deploy/__tests__/env-resolver.spec.ts +469 -0
  26. package/src/deploy/__tests__/index.spec.ts +12 -12
  27. package/src/deploy/__tests__/secrets.spec.ts +4 -1
  28. package/src/deploy/__tests__/sniffer.spec.ts +326 -1
  29. package/src/deploy/__tests__/state.spec.ts +844 -0
  30. package/src/deploy/dns/hostinger-api.ts +4 -1
  31. package/src/deploy/dns/index.ts +113 -1
  32. package/src/deploy/docker.ts +1 -2
  33. package/src/deploy/dokploy-api.ts +18 -9
  34. package/src/deploy/domain.ts +5 -4
  35. package/src/deploy/env-resolver.ts +278 -0
  36. package/src/deploy/index.ts +525 -119
  37. package/src/deploy/secrets.ts +7 -2
  38. package/src/deploy/sniffer-envkit-patch.ts +43 -0
  39. package/src/deploy/sniffer-hooks.ts +52 -0
  40. package/src/deploy/sniffer-loader.ts +23 -0
  41. package/src/deploy/sniffer-worker.ts +74 -0
  42. package/src/deploy/sniffer.ts +136 -14
  43. package/src/deploy/state.ts +162 -1
  44. package/src/init/versions.ts +3 -3
  45. package/tsconfig.tsbuildinfo +1 -1
  46. package/dist/dokploy-api-BDLu0qWi.cjs.map +0 -1
  47. package/dist/dokploy-api-BN3V57z1.mjs +0 -3
  48. package/dist/dokploy-api-BdCKjFDA.cjs +0 -3
  49. package/dist/dokploy-api-DvzIDxTj.mjs.map +0 -1
package/dist/index.mjs CHANGED
@@ -3,7 +3,7 @@ import { __require, getAppBuildOrder, getDependencyEnvVars, getDeployTargetError
3
3
  import { getAppNameFromCwd, loadAppConfig, loadConfig, loadWorkspaceConfig, parseModuleConfig } from "./config-C3LSBNSl.mjs";
4
4
  import { ConstructGenerator, EndpointGenerator, OPENAPI_OUTPUT_PATH, OpenApiTsGenerator, generateOpenApi, openapiCommand, resolveOpenApiConfig } from "./openapi-C3C-BzIZ.mjs";
5
5
  import { getKeyPath, maskPassword, readStageSecrets, secretsExist, setCustomSecret, toEmbeddableSecrets, writeStageSecrets } from "./storage-Dhst7BhI.mjs";
6
- import { DokployApi } from "./dokploy-api-DvzIDxTj.mjs";
6
+ import { DokployApi } from "./dokploy-api-94KzmTVf.mjs";
7
7
  import { encryptSecrets } from "./encryption-BC4MAODn.mjs";
8
8
  import { generateReactQueryCommand } from "./openapi-react-query-ZoP9DPbY.mjs";
9
9
  import { createRequire } from "node:module";
@@ -23,13 +23,14 @@ import { Cron } from "@geekmidas/constructs/crons";
23
23
  import { Function } from "@geekmidas/constructs/functions";
24
24
  import { Subscriber } from "@geekmidas/constructs/subscribers";
25
25
  import { createHash, randomBytes } from "node:crypto";
26
+ import { Client } from "pg";
26
27
  import { lookup } from "node:dns/promises";
27
- import { pathToFileURL } from "node:url";
28
+ import { fileURLToPath, pathToFileURL } from "node:url";
28
29
  import prompts from "prompts";
29
30
 
30
31
  //#region package.json
31
32
  var name = "@geekmidas/cli";
32
- var version = "0.47.0";
33
+ var version = "0.49.0";
33
34
  var description = "CLI tools for building Lambda handlers, server applications, and generating OpenAPI specs";
34
35
  var private$1 = false;
35
36
  var type = "module";
@@ -85,12 +86,14 @@ var dependencies = {
85
86
  "hono": "~4.8.0",
86
87
  "lodash.kebabcase": "^4.1.1",
87
88
  "openapi-typescript": "^7.4.2",
89
+ "pg": "~8.17.1",
88
90
  "prompts": "~2.4.2"
89
91
  };
90
92
  var devDependencies = {
91
93
  "@geekmidas/testkit": "workspace:*",
92
94
  "@types/lodash.kebabcase": "^4.1.9",
93
95
  "@types/node": "~24.9.1",
96
+ "@types/pg": "~8.16.0",
94
97
  "@types/prompts": "~2.4.9",
95
98
  "typescript": "^5.8.2",
96
99
  "vitest": "^3.2.4",
@@ -253,7 +256,7 @@ const logger$11 = console;
253
256
  * Validate Dokploy token by making a test API call
254
257
  */
255
258
  async function validateDokployToken(endpoint, token) {
256
- const { DokployApi: DokployApi$1 } = await import("./dokploy-api-BN3V57z1.mjs");
259
+ const { DokployApi: DokployApi$1 } = await import("./dokploy-api-CItuaWTq.mjs");
257
260
  const api = new DokployApi$1({
258
261
  baseUrl: endpoint,
259
262
  token
@@ -2274,2027 +2277,2143 @@ function getAppOutputPath(workspace, _appName, app) {
2274
2277
  }
2275
2278
 
2276
2279
  //#endregion
2277
- //#region src/docker/compose.ts
2278
- /** Default Docker images for services */
2279
- const DEFAULT_SERVICE_IMAGES = {
2280
- postgres: "postgres",
2281
- redis: "redis",
2282
- rabbitmq: "rabbitmq"
2283
- };
2284
- /** Default Docker image versions for services */
2285
- const DEFAULT_SERVICE_VERSIONS = {
2286
- postgres: "16-alpine",
2287
- redis: "7-alpine",
2288
- rabbitmq: "3-management-alpine"
2289
- };
2290
- /** Get the default full image reference for a service */
2291
- function getDefaultImage(serviceName) {
2292
- return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
2280
+ //#region src/deploy/state.ts
2281
+ /**
2282
+ * Get the state file path for a stage
2283
+ */
2284
+ function getStateFilePath(workspaceRoot, stage) {
2285
+ return join(workspaceRoot, ".gkm", `deploy-${stage}.json`);
2293
2286
  }
2294
- /** Normalize services config to a consistent format - returns Map of service name to full image reference */
2295
- function normalizeServices(services) {
2296
- const result = /* @__PURE__ */ new Map();
2297
- if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
2298
- else for (const [name$1, config$1] of Object.entries(services)) {
2299
- const serviceName = name$1;
2300
- if (config$1 === true) result.set(serviceName, getDefaultImage(serviceName));
2301
- else if (config$1 && typeof config$1 === "object") {
2302
- const serviceConfig = config$1;
2303
- if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
2304
- else {
2305
- const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
2306
- result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
2307
- }
2308
- }
2287
+ /**
2288
+ * Read the deploy state for a stage
2289
+ * Returns null if state file doesn't exist
2290
+ */
2291
+ async function readStageState(workspaceRoot, stage) {
2292
+ const filePath = getStateFilePath(workspaceRoot, stage);
2293
+ try {
2294
+ const content = await readFile(filePath, "utf-8");
2295
+ return JSON.parse(content);
2296
+ } catch (error) {
2297
+ if (error.code === "ENOENT") return null;
2298
+ console.warn(`Warning: Could not read deploy state: ${error}`);
2299
+ return null;
2309
2300
  }
2310
- return result;
2311
2301
  }
2312
2302
  /**
2313
- * Generate docker-compose.yml for production deployment
2303
+ * Write the deploy state for a stage
2314
2304
  */
2315
- function generateDockerCompose(options) {
2316
- const { imageName, registry, port, healthCheckPath, services } = options;
2317
- const serviceMap = normalizeServices(services);
2318
- const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
2319
- let yaml = `version: '3.8'
2320
-
2321
- services:
2322
- api:
2323
- build:
2324
- context: ../..
2325
- dockerfile: .gkm/docker/Dockerfile
2326
- image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
2327
- container_name: ${imageName}
2328
- restart: unless-stopped
2329
- ports:
2330
- - "\${PORT:-${port}}:${port}"
2331
- environment:
2332
- - NODE_ENV=production
2333
- `;
2334
- if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
2335
- `;
2336
- if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
2337
- `;
2338
- if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
2339
- `;
2340
- yaml += ` healthcheck:
2341
- test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
2342
- interval: 30s
2343
- timeout: 3s
2344
- retries: 3
2345
- `;
2346
- if (serviceMap.size > 0) {
2347
- yaml += ` depends_on:
2348
- `;
2349
- for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
2350
- condition: service_healthy
2351
- `;
2352
- }
2353
- yaml += ` networks:
2354
- - app-network
2355
- `;
2356
- const postgresImage = serviceMap.get("postgres");
2357
- if (postgresImage) yaml += `
2358
- postgres:
2359
- image: ${postgresImage}
2360
- container_name: postgres
2361
- restart: unless-stopped
2362
- environment:
2363
- POSTGRES_USER: \${POSTGRES_USER:-postgres}
2364
- POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
2365
- POSTGRES_DB: \${POSTGRES_DB:-app}
2366
- volumes:
2367
- - postgres_data:/var/lib/postgresql/data
2368
- healthcheck:
2369
- test: ["CMD-SHELL", "pg_isready -U postgres"]
2370
- interval: 5s
2371
- timeout: 5s
2372
- retries: 5
2373
- networks:
2374
- - app-network
2375
- `;
2376
- const redisImage = serviceMap.get("redis");
2377
- if (redisImage) yaml += `
2378
- redis:
2379
- image: ${redisImage}
2380
- container_name: redis
2381
- restart: unless-stopped
2382
- volumes:
2383
- - redis_data:/data
2384
- healthcheck:
2385
- test: ["CMD", "redis-cli", "ping"]
2386
- interval: 5s
2387
- timeout: 5s
2388
- retries: 5
2389
- networks:
2390
- - app-network
2391
- `;
2392
- const rabbitmqImage = serviceMap.get("rabbitmq");
2393
- if (rabbitmqImage) yaml += `
2394
- rabbitmq:
2395
- image: ${rabbitmqImage}
2396
- container_name: rabbitmq
2397
- restart: unless-stopped
2398
- environment:
2399
- RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
2400
- RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
2401
- ports:
2402
- - "15672:15672" # Management UI
2403
- volumes:
2404
- - rabbitmq_data:/var/lib/rabbitmq
2405
- healthcheck:
2406
- test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
2407
- interval: 10s
2408
- timeout: 5s
2409
- retries: 5
2410
- networks:
2411
- - app-network
2412
- `;
2413
- yaml += `
2414
- volumes:
2415
- `;
2416
- if (serviceMap.has("postgres")) yaml += ` postgres_data:
2417
- `;
2418
- if (serviceMap.has("redis")) yaml += ` redis_data:
2419
- `;
2420
- if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
2421
- `;
2422
- yaml += `
2423
- networks:
2424
- app-network:
2425
- driver: bridge
2426
- `;
2427
- return yaml;
2305
+ async function writeStageState(workspaceRoot, stage, state) {
2306
+ const filePath = getStateFilePath(workspaceRoot, stage);
2307
+ const dir = join(workspaceRoot, ".gkm");
2308
+ await mkdir(dir, { recursive: true });
2309
+ state.lastDeployedAt = (/* @__PURE__ */ new Date()).toISOString();
2310
+ await writeFile(filePath, JSON.stringify(state, null, 2));
2428
2311
  }
2429
2312
  /**
2430
- * Generate a minimal docker-compose.yml for API only
2313
+ * Create a new empty state for a stage
2431
2314
  */
2432
- function generateMinimalDockerCompose(options) {
2433
- const { imageName, registry, port, healthCheckPath } = options;
2434
- const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
2435
- return `version: '3.8'
2436
-
2437
- services:
2438
- api:
2439
- build:
2440
- context: ../..
2441
- dockerfile: .gkm/docker/Dockerfile
2442
- image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
2443
- container_name: ${imageName}
2444
- restart: unless-stopped
2445
- ports:
2446
- - "\${PORT:-${port}}:${port}"
2447
- environment:
2448
- - NODE_ENV=production
2449
- healthcheck:
2450
- test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
2451
- interval: 30s
2452
- timeout: 3s
2453
- retries: 3
2454
- networks:
2455
- - app-network
2456
-
2457
- networks:
2458
- app-network:
2459
- driver: bridge
2460
- `;
2315
+ function createEmptyState(stage, environmentId) {
2316
+ return {
2317
+ provider: "dokploy",
2318
+ stage,
2319
+ environmentId,
2320
+ applications: {},
2321
+ services: {},
2322
+ lastDeployedAt: (/* @__PURE__ */ new Date()).toISOString()
2323
+ };
2461
2324
  }
2462
2325
  /**
2463
- * Generate docker-compose.yml for a workspace with all apps as services.
2464
- * Apps can communicate with each other via service names.
2465
- * @internal Exported for testing
2326
+ * Get application ID from state
2466
2327
  */
2467
- function generateWorkspaceCompose(workspace, options = {}) {
2468
- const { registry } = options;
2469
- const apps = Object.entries(workspace.apps);
2470
- const services = workspace.services;
2471
- const hasPostgres = services.db !== void 0 && services.db !== false;
2472
- const hasRedis = services.cache !== void 0 && services.cache !== false;
2473
- const hasMail = services.mail !== void 0 && services.mail !== false;
2474
- const postgresImage = getInfraServiceImage("postgres", services.db);
2475
- const redisImage = getInfraServiceImage("redis", services.cache);
2476
- let yaml = `# Docker Compose for ${workspace.name} workspace
2477
- # Generated by gkm - do not edit manually
2478
-
2479
- services:
2480
- `;
2481
- for (const [appName, app] of apps) yaml += generateAppService(appName, app, apps, {
2482
- registry,
2483
- hasPostgres,
2484
- hasRedis
2485
- });
2486
- if (hasPostgres) yaml += `
2487
- postgres:
2488
- image: ${postgresImage}
2489
- container_name: ${workspace.name}-postgres
2490
- restart: unless-stopped
2491
- environment:
2492
- POSTGRES_USER: \${POSTGRES_USER:-postgres}
2493
- POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
2494
- POSTGRES_DB: \${POSTGRES_DB:-app}
2495
- volumes:
2496
- - postgres_data:/var/lib/postgresql/data
2497
- healthcheck:
2498
- test: ["CMD-SHELL", "pg_isready -U postgres"]
2499
- interval: 5s
2500
- timeout: 5s
2501
- retries: 5
2502
- networks:
2503
- - workspace-network
2504
- `;
2505
- if (hasRedis) yaml += `
2506
- redis:
2507
- image: ${redisImage}
2508
- container_name: ${workspace.name}-redis
2509
- restart: unless-stopped
2510
- volumes:
2511
- - redis_data:/data
2512
- healthcheck:
2513
- test: ["CMD", "redis-cli", "ping"]
2514
- interval: 5s
2515
- timeout: 5s
2516
- retries: 5
2517
- networks:
2518
- - workspace-network
2519
- `;
2520
- if (hasMail) yaml += `
2521
- mailpit:
2522
- image: axllent/mailpit:latest
2523
- container_name: ${workspace.name}-mailpit
2524
- restart: unless-stopped
2525
- ports:
2526
- - "8025:8025" # Web UI
2527
- - "1025:1025" # SMTP
2528
- networks:
2529
- - workspace-network
2530
- `;
2531
- yaml += `
2532
- volumes:
2533
- `;
2534
- if (hasPostgres) yaml += ` postgres_data:
2535
- `;
2536
- if (hasRedis) yaml += ` redis_data:
2537
- `;
2538
- yaml += `
2539
- networks:
2540
- workspace-network:
2541
- driver: bridge
2542
- `;
2543
- return yaml;
2328
+ function getApplicationId(state, appName) {
2329
+ return state?.applications[appName];
2544
2330
  }
2545
2331
  /**
2546
- * Get infrastructure service image with version.
2332
+ * Set application ID in state (mutates state)
2547
2333
  */
2548
- function getInfraServiceImage(serviceName, config$1) {
2549
- const defaults = {
2550
- postgres: "postgres:16-alpine",
2551
- redis: "redis:7-alpine"
2552
- };
2553
- if (!config$1 || config$1 === true) return defaults[serviceName];
2554
- if (typeof config$1 === "object") {
2555
- if (config$1.image) return config$1.image;
2556
- if (config$1.version) {
2557
- const baseImage = serviceName === "postgres" ? "postgres" : "redis";
2558
- return `${baseImage}:${config$1.version}`;
2559
- }
2560
- }
2561
- return defaults[serviceName];
2334
+ function setApplicationId(state, appName, applicationId) {
2335
+ state.applications[appName] = applicationId;
2562
2336
  }
2563
2337
  /**
2564
- * Generate a service definition for an app.
2338
+ * Get postgres ID from state
2565
2339
  */
2566
- function generateAppService(appName, app, allApps, options) {
2567
- const { registry, hasPostgres, hasRedis } = options;
2568
- const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
2569
- const healthCheckPath = app.type === "frontend" ? "/" : "/health";
2570
- const healthCheckCmd = app.type === "frontend" ? `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}/"]` : `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}${healthCheckPath}"]`;
2571
- let yaml = `
2572
- ${appName}:
2573
- build:
2574
- context: .
2575
- dockerfile: .gkm/docker/Dockerfile.${appName}
2576
- image: ${imageRef}\${${appName.toUpperCase()}_IMAGE:-${appName}}:\${TAG:-latest}
2577
- container_name: ${appName}
2578
- restart: unless-stopped
2579
- ports:
2580
- - "\${${appName.toUpperCase()}_PORT:-${app.port}}:${app.port}"
2581
- environment:
2582
- - NODE_ENV=production
2583
- - PORT=${app.port}
2584
- `;
2585
- for (const dep of app.dependencies) {
2586
- const depApp = allApps.find(([name$1]) => name$1 === dep)?.[1];
2587
- if (depApp) yaml += ` - ${dep.toUpperCase()}_URL=http://${dep}:${depApp.port}
2588
- `;
2589
- }
2590
- if (app.type === "backend") {
2591
- if (hasPostgres) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
2592
- `;
2593
- if (hasRedis) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
2594
- `;
2595
- }
2596
- yaml += ` healthcheck:
2597
- test: ${healthCheckCmd}
2598
- interval: 30s
2599
- timeout: 3s
2600
- retries: 3
2601
- `;
2602
- const dependencies$1 = [...app.dependencies];
2603
- if (app.type === "backend") {
2604
- if (hasPostgres) dependencies$1.push("postgres");
2605
- if (hasRedis) dependencies$1.push("redis");
2606
- }
2607
- if (dependencies$1.length > 0) {
2608
- yaml += ` depends_on:
2609
- `;
2610
- for (const dep of dependencies$1) yaml += ` ${dep}:
2611
- condition: service_healthy
2612
- `;
2613
- }
2614
- yaml += ` networks:
2615
- - workspace-network
2616
- `;
2617
- return yaml;
2340
+ function getPostgresId(state) {
2341
+ return state?.services.postgresId;
2618
2342
  }
2619
-
2620
- //#endregion
2621
- //#region src/docker/templates.ts
2622
- const LOCKFILES = [
2623
- ["pnpm-lock.yaml", "pnpm"],
2624
- ["bun.lockb", "bun"],
2625
- ["yarn.lock", "yarn"],
2626
- ["package-lock.json", "npm"]
2627
- ];
2628
2343
  /**
2629
- * Detect package manager from lockfiles
2630
- * Walks up the directory tree to find lockfile (for monorepos)
2344
+ * Set postgres ID in state (mutates state)
2631
2345
  */
2632
- function detectPackageManager$1(cwd = process.cwd()) {
2633
- let dir = cwd;
2634
- const root = parse(dir).root;
2635
- while (dir !== root) {
2636
- for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(dir, lockfile))) return pm;
2637
- dir = dirname(dir);
2638
- }
2639
- for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(root, lockfile))) return pm;
2640
- return "pnpm";
2346
+ function setPostgresId(state, postgresId) {
2347
+ state.services.postgresId = postgresId;
2641
2348
  }
2642
2349
  /**
2643
- * Find the lockfile path by walking up the directory tree
2644
- * Returns the full path to the lockfile, or null if not found
2350
+ * Get redis ID from state
2645
2351
  */
2646
- function findLockfilePath(cwd = process.cwd()) {
2647
- let dir = cwd;
2648
- const root = parse(dir).root;
2649
- while (dir !== root) {
2650
- for (const [lockfile] of LOCKFILES) {
2651
- const lockfilePath = join(dir, lockfile);
2652
- if (existsSync(lockfilePath)) return lockfilePath;
2653
- }
2654
- dir = dirname(dir);
2655
- }
2656
- for (const [lockfile] of LOCKFILES) {
2657
- const lockfilePath = join(root, lockfile);
2658
- if (existsSync(lockfilePath)) return lockfilePath;
2659
- }
2660
- return null;
2352
+ function getRedisId(state) {
2353
+ return state?.services.redisId;
2661
2354
  }
2662
2355
  /**
2663
- * Check if we're in a monorepo (lockfile is in a parent directory)
2356
+ * Set redis ID in state (mutates state)
2664
2357
  */
2665
- function isMonorepo(cwd = process.cwd()) {
2666
- const lockfilePath = findLockfilePath(cwd);
2667
- if (!lockfilePath) return false;
2668
- const lockfileDir = dirname(lockfilePath);
2669
- return lockfileDir !== cwd;
2358
+ function setRedisId(state, redisId) {
2359
+ state.services.redisId = redisId;
2670
2360
  }
2671
2361
  /**
2672
- * Check if turbo.json exists (walks up directory tree)
2362
+ * Set app credentials in state (mutates state)
2673
2363
  */
2674
- function hasTurboConfig(cwd = process.cwd()) {
2675
- let dir = cwd;
2676
- const root = parse(dir).root;
2677
- while (dir !== root) {
2678
- if (existsSync(join(dir, "turbo.json"))) return true;
2679
- dir = dirname(dir);
2680
- }
2681
- return existsSync(join(root, "turbo.json"));
2364
+ function setAppCredentials(state, appName, credentials) {
2365
+ if (!state.appCredentials) state.appCredentials = {};
2366
+ state.appCredentials[appName] = credentials;
2682
2367
  }
2683
2368
  /**
2684
- * Get install command for turbo builds (without frozen lockfile)
2685
- * Turbo prune creates a subset that may not perfectly match the lockfile
2369
+ * Get all app credentials from state
2686
2370
  */
2687
- function getTurboInstallCmd(pm) {
2688
- const commands = {
2689
- pnpm: "pnpm install",
2690
- npm: "npm install",
2691
- yarn: "yarn install",
2692
- bun: "bun install"
2693
- };
2694
- return commands[pm];
2371
+ function getAllAppCredentials(state) {
2372
+ return state?.appCredentials ?? {};
2695
2373
  }
2696
2374
  /**
2697
- * Get package manager specific commands and paths
2375
+ * Get a generated secret for an app
2698
2376
  */
2699
- function getPmConfig(pm) {
2700
- const configs = {
2701
- pnpm: {
2702
- install: "corepack enable && corepack prepare pnpm@latest --activate",
2703
- lockfile: "pnpm-lock.yaml",
2704
- fetch: "pnpm fetch",
2705
- installCmd: "pnpm install --frozen-lockfile --offline",
2706
- cacheTarget: "/root/.local/share/pnpm/store",
2707
- cacheId: "pnpm",
2708
- run: "pnpm",
2709
- exec: "pnpm exec",
2710
- dlx: "pnpm dlx",
2711
- addGlobal: "pnpm add -g"
2712
- },
2713
- npm: {
2714
- install: "",
2715
- lockfile: "package-lock.json",
2716
- fetch: "",
2717
- installCmd: "npm ci",
2718
- cacheTarget: "/root/.npm",
2719
- cacheId: "npm",
2720
- run: "npm run",
2721
- exec: "npx",
2722
- dlx: "npx",
2723
- addGlobal: "npm install -g"
2724
- },
2725
- yarn: {
2726
- install: "corepack enable && corepack prepare yarn@stable --activate",
2727
- lockfile: "yarn.lock",
2728
- fetch: "",
2729
- installCmd: "yarn install --frozen-lockfile",
2730
- cacheTarget: "/root/.yarn/cache",
2731
- cacheId: "yarn",
2732
- run: "yarn",
2733
- exec: "yarn exec",
2734
- dlx: "yarn dlx",
2735
- addGlobal: "yarn global add"
2736
- },
2737
- bun: {
2738
- install: "npm install -g bun",
2739
- lockfile: "bun.lockb",
2740
- fetch: "",
2741
- installCmd: "bun install --frozen-lockfile",
2742
- cacheTarget: "/root/.bun/install/cache",
2743
- cacheId: "bun",
2744
- run: "bun run",
2745
- exec: "bunx",
2746
- dlx: "bunx",
2747
- addGlobal: "bun add -g"
2748
- }
2377
+ function getGeneratedSecret(state, appName, secretName) {
2378
+ return state?.generatedSecrets?.[appName]?.[secretName];
2379
+ }
2380
+ /**
2381
+ * Set a generated secret for an app (mutates state)
2382
+ */
2383
+ function setGeneratedSecret(state, appName, secretName, value) {
2384
+ if (!state.generatedSecrets) state.generatedSecrets = {};
2385
+ if (!state.generatedSecrets[appName]) state.generatedSecrets[appName] = {};
2386
+ state.generatedSecrets[appName][secretName] = value;
2387
+ }
2388
+ /**
2389
+ * Set DNS verification record for a hostname (mutates state)
2390
+ */
2391
+ function setDnsVerification(state, hostname, serverIp) {
2392
+ if (!state.dnsVerified) state.dnsVerified = {};
2393
+ state.dnsVerified[hostname] = {
2394
+ serverIp,
2395
+ verifiedAt: (/* @__PURE__ */ new Date()).toISOString()
2749
2396
  };
2750
- return configs[pm];
2751
2397
  }
2752
2398
  /**
2753
- * Generate a multi-stage Dockerfile for building from source
2754
- * Optimized for build speed with:
2755
- * - BuildKit cache mounts for package manager store
2756
- * - pnpm fetch for better layer caching (when using pnpm)
2757
- * - Optional turbo prune for monorepos
2399
+ * Check if a hostname is already verified with the given IP
2758
2400
  */
2759
- function generateMultiStageDockerfile(options) {
2760
- const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
2761
- if (turbo) return generateTurboDockerfile({
2762
- ...options,
2763
- turboPackage: turboPackage ?? "api"
2764
- });
2765
- const pm = getPmConfig(packageManager);
2766
- const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
2767
- const hasFetch = packageManager === "pnpm";
2768
- const depsStage = hasFetch ? `# Copy lockfile first for better caching
2769
- COPY ${pm.lockfile} ./
2770
-
2771
- # Fetch dependencies (downloads to virtual store, cached separately)
2772
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
2773
- ${pm.fetch}
2774
-
2775
- # Copy package.json after fetch
2776
- COPY package.json ./
2777
-
2778
- # Install from cache (fast - no network needed)
2779
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
2780
- ${pm.installCmd}` : `# Copy package files
2781
- COPY package.json ${pm.lockfile} ./
2782
-
2783
- # Install dependencies with cache
2784
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
2785
- ${pm.installCmd}`;
2786
- return `# syntax=docker/dockerfile:1
2787
- # Stage 1: Dependencies
2788
- FROM ${baseImage} AS deps
2789
-
2790
- WORKDIR /app
2791
- ${installPm}
2792
- ${depsStage}
2793
-
2794
- # Stage 2: Build
2795
- FROM deps AS builder
2796
-
2797
- WORKDIR /app
2798
-
2799
- # Copy source (deps already installed)
2800
- COPY . .
2801
-
2802
- # Debug: Show node_modules/.bin contents and build production server
2803
- RUN echo "=== node_modules/.bin contents ===" && \
2804
- ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
2805
- echo "=== Checking for gkm ===" && \
2806
- which gkm 2>/dev/null || echo "gkm not in PATH" && \
2807
- ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
2808
- echo "=== Running build ===" && \
2809
- ./node_modules/.bin/gkm build --provider server --production
2810
-
2811
- # Stage 3: Production
2812
- FROM ${baseImage} AS runner
2813
-
2814
- WORKDIR /app
2815
-
2816
- # Install tini for proper signal handling as PID 1
2817
- RUN apk add --no-cache tini
2818
-
2819
- # Create non-root user
2820
- RUN addgroup --system --gid 1001 nodejs && \\
2821
- adduser --system --uid 1001 hono
2822
-
2823
- # Copy bundled server
2824
- COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
2825
-
2826
- # Environment
2827
- ENV NODE_ENV=production
2828
- ENV PORT=${port}
2829
-
2830
- # Health check
2831
- HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
2832
- CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
2833
-
2834
- # Switch to non-root user
2835
- USER hono
2401
+ function isDnsVerified(state, hostname, serverIp) {
2402
+ const record = state?.dnsVerified?.[hostname];
2403
+ return record?.serverIp === serverIp;
2404
+ }
2836
2405
 
2837
- EXPOSE ${port}
2406
+ //#endregion
2407
+ //#region src/deploy/dns/hostinger-api.ts
2408
+ /**
2409
+ * Hostinger DNS API client
2410
+ *
2411
+ * API Documentation: https://developers.hostinger.com/
2412
+ * Authentication: Bearer token from hpanel.hostinger.com/profile/api
2413
+ */
2414
+ const HOSTINGER_API_BASE = "https://developers.hostinger.com";
2415
+ /**
2416
+ * Hostinger API error
2417
+ */
2418
+ var HostingerApiError = class extends Error {
2419
+ constructor(message, status, statusText, errors) {
2420
+ super(message);
2421
+ this.status = status;
2422
+ this.statusText = statusText;
2423
+ this.errors = errors;
2424
+ this.name = "HostingerApiError";
2425
+ }
2426
+ };
2427
+ /**
2428
+ * Hostinger DNS API client
2429
+ *
2430
+ * @example
2431
+ * ```ts
2432
+ * const api = new HostingerApi(token);
2433
+ *
2434
+ * // Get all records for a domain
2435
+ * const records = await api.getRecords('traflabs.io');
2436
+ *
2437
+ * // Create/update records
2438
+ * await api.upsertRecords('traflabs.io', [
2439
+ * { name: 'api.joemoer', type: 'A', ttl: 300, records: ['1.2.3.4'] }
2440
+ * ]);
2441
+ * ```
2442
+ */
2443
+ var HostingerApi = class {
2444
+ token;
2445
+ constructor(token) {
2446
+ this.token = token;
2447
+ }
2448
+ /**
2449
+ * Make a request to the Hostinger API
2450
+ */
2451
+ async request(method, endpoint, body) {
2452
+ const url = `${HOSTINGER_API_BASE}${endpoint}`;
2453
+ const response = await fetch(url, {
2454
+ method,
2455
+ headers: {
2456
+ "Content-Type": "application/json",
2457
+ Authorization: `Bearer ${this.token}`
2458
+ },
2459
+ body: body ? JSON.stringify(body) : void 0
2460
+ });
2461
+ if (!response.ok) {
2462
+ let errorMessage = `Hostinger API error: ${response.status} ${response.statusText}`;
2463
+ let errors;
2464
+ try {
2465
+ const errorBody = await response.json();
2466
+ if (errorBody.message) errorMessage = `Hostinger API error: ${errorBody.message}`;
2467
+ errors = errorBody.errors;
2468
+ } catch {}
2469
+ throw new HostingerApiError(errorMessage, response.status, response.statusText, errors);
2470
+ }
2471
+ const text = await response.text();
2472
+ if (!text || text.trim() === "") return void 0;
2473
+ return JSON.parse(text);
2474
+ }
2475
+ /**
2476
+ * Get all DNS records for a domain
2477
+ *
2478
+ * @param domain - Root domain (e.g., 'traflabs.io')
2479
+ */
2480
+ async getRecords(domain) {
2481
+ const response = await this.request("GET", `/api/dns/v1/zones/${domain}`);
2482
+ return response.data || [];
2483
+ }
2484
+ /**
2485
+ * Create or update DNS records
2486
+ *
2487
+ * @param domain - Root domain (e.g., 'traflabs.io')
2488
+ * @param records - Records to create/update
2489
+ * @param overwrite - If true, replaces all existing records. If false, merges with existing.
2490
+ */
2491
+ async upsertRecords(domain, records, overwrite = false) {
2492
+ await this.request("PUT", `/api/dns/v1/zones/${domain}`, {
2493
+ overwrite,
2494
+ zone: records
2495
+ });
2496
+ }
2497
+ /**
2498
+ * Validate DNS records before applying
2499
+ *
2500
+ * @param domain - Root domain (e.g., 'traflabs.io')
2501
+ * @param records - Records to validate
2502
+ * @returns true if valid, throws if invalid
2503
+ */
2504
+ async validateRecords(domain, records) {
2505
+ await this.request("POST", `/api/dns/v1/zones/${domain}/validate`, {
2506
+ overwrite: false,
2507
+ zone: records
2508
+ });
2509
+ return true;
2510
+ }
2511
+ /**
2512
+ * Delete specific DNS records
2513
+ *
2514
+ * @param domain - Root domain (e.g., 'traflabs.io')
2515
+ * @param filters - Filters to match records for deletion
2516
+ */
2517
+ async deleteRecords(domain, filters) {
2518
+ await this.request("DELETE", `/api/dns/v1/zones/${domain}`, { filters });
2519
+ }
2520
+ /**
2521
+ * Check if a specific record exists
2522
+ *
2523
+ * @param domain - Root domain (e.g., 'traflabs.io')
2524
+ * @param name - Subdomain name (e.g., 'api.joemoer')
2525
+ * @param type - Record type (e.g., 'A')
2526
+ */
2527
+ async recordExists(domain, name$1, type$1 = "A") {
2528
+ const records = await this.getRecords(domain);
2529
+ return records.some((r) => r.name === name$1 && r.type === type$1);
2530
+ }
2531
+ /**
2532
+ * Create a single A record if it doesn't exist
2533
+ *
2534
+ * @param domain - Root domain (e.g., 'traflabs.io')
2535
+ * @param subdomain - Subdomain name (e.g., 'api.joemoer')
2536
+ * @param ip - IP address to point to
2537
+ * @param ttl - TTL in seconds (default: 300)
2538
+ * @returns true if created, false if already exists
2539
+ */
2540
+ async createARecordIfNotExists(domain, subdomain, ip, ttl = 300) {
2541
+ const exists = await this.recordExists(domain, subdomain, "A");
2542
+ if (exists) return false;
2543
+ await this.upsertRecords(domain, [{
2544
+ name: subdomain,
2545
+ type: "A",
2546
+ ttl,
2547
+ records: [{ content: ip }]
2548
+ }]);
2549
+ return true;
2550
+ }
2551
+ };
2838
2552
 
2839
- # Use tini as entrypoint to handle PID 1 responsibilities
2840
- ENTRYPOINT ["/sbin/tini", "--"]
2841
- CMD ["node", "server.mjs"]
2842
- `;
2553
+ //#endregion
2554
+ //#region src/deploy/dns/index.ts
2555
+ const logger$6 = console;
2556
+ /**
2557
+ * Resolve IP address from a hostname
2558
+ */
2559
+ async function resolveHostnameToIp(hostname) {
2560
+ try {
2561
+ const addresses = await lookup(hostname, { family: 4 });
2562
+ return addresses.address;
2563
+ } catch (error) {
2564
+ throw new Error(`Failed to resolve IP for ${hostname}: ${error instanceof Error ? error.message : "Unknown error"}`);
2565
+ }
2566
+ }
2567
+ /**
2568
+ * Extract subdomain from full hostname relative to root domain
2569
+ *
2570
+ * @example
2571
+ * extractSubdomain('api.joemoer.traflabs.io', 'traflabs.io') => 'api.joemoer'
2572
+ * extractSubdomain('joemoer.traflabs.io', 'traflabs.io') => 'joemoer'
2573
+ */
2574
+ function extractSubdomain(hostname, rootDomain) {
2575
+ if (!hostname.endsWith(rootDomain)) throw new Error(`Hostname ${hostname} is not under root domain ${rootDomain}`);
2576
+ const subdomain = hostname.slice(0, -(rootDomain.length + 1));
2577
+ return subdomain || "@";
2578
+ }
2579
+ /**
2580
+ * Generate required DNS records for a deployment
2581
+ */
2582
+ function generateRequiredRecords(appHostnames, rootDomain, serverIp) {
2583
+ const records = [];
2584
+ for (const [appName, hostname] of appHostnames) {
2585
+ const subdomain = extractSubdomain(hostname, rootDomain);
2586
+ records.push({
2587
+ hostname,
2588
+ subdomain,
2589
+ type: "A",
2590
+ value: serverIp,
2591
+ appName
2592
+ });
2593
+ }
2594
+ return records;
2595
+ }
2596
+ /**
2597
+ * Print DNS records table
2598
+ */
2599
+ function printDnsRecordsTable(records, rootDomain) {
2600
+ logger$6.log(`\n 📋 DNS Records for ${rootDomain}:`);
2601
+ logger$6.log(" ┌─────────────────────────────────────┬──────┬─────────────────┬────────┐");
2602
+ logger$6.log(" │ Subdomain │ Type │ Value │ Status │");
2603
+ logger$6.log(" ├─────────────────────────────────────┼──────┼─────────────────┼────────┤");
2604
+ for (const record of records) {
2605
+ const subdomain = record.subdomain.padEnd(35);
2606
+ const type$1 = record.type.padEnd(4);
2607
+ const value = record.value.padEnd(15);
2608
+ let status;
2609
+ if (record.error) status = "✗";
2610
+ else if (record.created) status = "✓ new";
2611
+ else if (record.existed) status = "✓";
2612
+ else status = "?";
2613
+ logger$6.log(` │ ${subdomain} │ ${type$1} │ ${value} │ ${status.padEnd(6)} │`);
2614
+ }
2615
+ logger$6.log(" └─────────────────────────────────────┴──────┴─────────────────┴────────┘");
2616
+ }
2617
+ /**
2618
+ * Print DNS records in a simple format for manual setup
2619
+ */
2620
+ function printDnsRecordsSimple(records, rootDomain) {
2621
+ logger$6.log("\n 📋 Required DNS Records:");
2622
+ logger$6.log(` Add these A records to your DNS provider (${rootDomain}):\n`);
2623
+ for (const record of records) logger$6.log(` ${record.subdomain} → ${record.value} (A record)`);
2624
+ logger$6.log("");
2625
+ }
2626
+ /**
2627
+ * Prompt for input (reuse from deploy/index.ts pattern)
2628
+ */
2629
+ async function promptForToken(message) {
2630
+ const { stdin: stdin$1, stdout: stdout$1 } = await import("node:process");
2631
+ if (!stdin$1.isTTY) throw new Error("Interactive input required for Hostinger token.");
2632
+ stdout$1.write(message);
2633
+ return new Promise((resolve$1) => {
2634
+ let value = "";
2635
+ const onData = (char) => {
2636
+ const c = char.toString();
2637
+ if (c === "\n" || c === "\r") {
2638
+ stdin$1.setRawMode(false);
2639
+ stdin$1.pause();
2640
+ stdin$1.removeListener("data", onData);
2641
+ stdout$1.write("\n");
2642
+ resolve$1(value);
2643
+ } else if (c === "") {
2644
+ stdin$1.setRawMode(false);
2645
+ stdin$1.pause();
2646
+ stdout$1.write("\n");
2647
+ process.exit(1);
2648
+ } else if (c === "" || c === "\b") {
2649
+ if (value.length > 0) value = value.slice(0, -1);
2650
+ } else value += c;
2651
+ };
2652
+ stdin$1.setRawMode(true);
2653
+ stdin$1.resume();
2654
+ stdin$1.on("data", onData);
2655
+ });
2656
+ }
2657
+ /**
2658
+ * Create DNS records using the configured provider
2659
+ */
2660
+ async function createDnsRecords(records, dnsConfig) {
2661
+ const { provider, domain: rootDomain, ttl = 300 } = dnsConfig;
2662
+ if (provider === "manual") return records.map((r) => ({
2663
+ ...r,
2664
+ created: false,
2665
+ existed: false
2666
+ }));
2667
+ if (provider === "hostinger") return createHostingerRecords(records, rootDomain, ttl);
2668
+ if (provider === "cloudflare") {
2669
+ logger$6.log(" ⚠ Cloudflare DNS integration not yet implemented");
2670
+ return records.map((r) => ({
2671
+ ...r,
2672
+ error: "Cloudflare not implemented"
2673
+ }));
2674
+ }
2675
+ return records;
2676
+ }
2677
+ /**
2678
+ * Create DNS records at Hostinger
2679
+ */
2680
+ async function createHostingerRecords(records, rootDomain, ttl) {
2681
+ let token = await getHostingerToken();
2682
+ if (!token) {
2683
+ logger$6.log("\n 📋 Hostinger API token not found.");
2684
+ logger$6.log(" Get your token from: https://hpanel.hostinger.com/profile/api\n");
2685
+ try {
2686
+ token = await promptForToken(" Hostinger API Token: ");
2687
+ await storeHostingerToken(token);
2688
+ logger$6.log(" ✓ Token saved");
2689
+ } catch {
2690
+ logger$6.log(" ⚠ Could not get token, skipping DNS creation");
2691
+ return records.map((r) => ({
2692
+ ...r,
2693
+ error: "No API token"
2694
+ }));
2695
+ }
2696
+ }
2697
+ const api = new HostingerApi(token);
2698
+ const results = [];
2699
+ let existingRecords = [];
2700
+ try {
2701
+ existingRecords = await api.getRecords(rootDomain);
2702
+ } catch (error) {
2703
+ const message = error instanceof Error ? error.message : "Unknown error";
2704
+ logger$6.log(` ⚠ Failed to fetch existing DNS records: ${message}`);
2705
+ return records.map((r) => ({
2706
+ ...r,
2707
+ error: message
2708
+ }));
2709
+ }
2710
+ for (const record of records) {
2711
+ const existing = existingRecords.find((r) => r.name === record.subdomain && r.type === "A");
2712
+ if (existing) {
2713
+ results.push({
2714
+ ...record,
2715
+ existed: true,
2716
+ created: false
2717
+ });
2718
+ continue;
2719
+ }
2720
+ try {
2721
+ await api.upsertRecords(rootDomain, [{
2722
+ name: record.subdomain,
2723
+ type: "A",
2724
+ ttl,
2725
+ records: [{ content: record.value }]
2726
+ }]);
2727
+ results.push({
2728
+ ...record,
2729
+ created: true,
2730
+ existed: false
2731
+ });
2732
+ } catch (error) {
2733
+ const message = error instanceof Error ? error.message : "Unknown error";
2734
+ results.push({
2735
+ ...record,
2736
+ error: message
2737
+ });
2738
+ }
2739
+ }
2740
+ return results;
2843
2741
  }
2844
2742
  /**
2845
- * Generate a Dockerfile optimized for Turbo monorepos
2846
- * Uses turbo prune to create minimal Docker context
2743
+ * Main DNS orchestration function for deployments
2847
2744
  */
2848
- function generateTurboDockerfile(options) {
2849
- const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
2850
- const pm = getPmConfig(packageManager);
2851
- const installPm = pm.install ? `RUN ${pm.install}` : "";
2852
- const turboInstallCmd = getTurboInstallCmd(packageManager);
2853
- const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
2854
- return `# syntax=docker/dockerfile:1
2855
- # Stage 1: Prune monorepo
2856
- FROM ${baseImage} AS pruner
2857
-
2858
- WORKDIR /app
2859
-
2860
- ${installPm}
2861
-
2862
- COPY . .
2863
-
2864
- # Prune to only include necessary packages
2865
- RUN ${turboCmd} prune ${turboPackage} --docker
2866
-
2867
- # Stage 2: Install dependencies
2868
- FROM ${baseImage} AS deps
2869
-
2870
- WORKDIR /app
2871
-
2872
- ${installPm}
2873
-
2874
- # Copy pruned lockfile and package.jsons
2875
- COPY --from=pruner /app/out/${pm.lockfile} ./
2876
- COPY --from=pruner /app/out/json/ ./
2877
-
2878
- # Install dependencies (no frozen-lockfile since turbo prune creates a subset)
2879
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
2880
- ${turboInstallCmd}
2881
-
2882
- # Stage 3: Build
2883
- FROM deps AS builder
2884
-
2885
- WORKDIR /app
2886
-
2887
- # Copy pruned source
2888
- COPY --from=pruner /app/out/full/ ./
2889
-
2890
- # Debug: Show node_modules/.bin contents and build production server
2891
- RUN echo "=== node_modules/.bin contents ===" && \
2892
- ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
2893
- echo "=== Checking for gkm ===" && \
2894
- which gkm 2>/dev/null || echo "gkm not in PATH" && \
2895
- ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
2896
- echo "=== Running build ===" && \
2897
- ./node_modules/.bin/gkm build --provider server --production
2898
-
2899
- # Stage 4: Production
2900
- FROM ${baseImage} AS runner
2901
-
2902
- WORKDIR /app
2903
-
2904
- RUN apk add --no-cache tini
2905
-
2906
- RUN addgroup --system --gid 1001 nodejs && \\
2907
- adduser --system --uid 1001 hono
2908
-
2909
- COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
2910
-
2911
- ENV NODE_ENV=production
2912
- ENV PORT=${port}
2913
-
2914
- HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
2915
- CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
2916
-
2917
- USER hono
2745
+ async function orchestrateDns(appHostnames, dnsConfig, dokployEndpoint) {
2746
+ if (!dnsConfig) return null;
2747
+ const { domain: rootDomain, autoCreate = true } = dnsConfig;
2748
+ logger$6.log("\n🌐 Setting up DNS records...");
2749
+ let serverIp;
2750
+ try {
2751
+ const endpointUrl = new URL(dokployEndpoint);
2752
+ serverIp = await resolveHostnameToIp(endpointUrl.hostname);
2753
+ logger$6.log(` Server IP: ${serverIp} (from ${endpointUrl.hostname})`);
2754
+ } catch (error) {
2755
+ const message = error instanceof Error ? error.message : "Unknown error";
2756
+ logger$6.log(` ⚠ Failed to resolve server IP: ${message}`);
2757
+ return null;
2758
+ }
2759
+ const requiredRecords = generateRequiredRecords(appHostnames, rootDomain, serverIp);
2760
+ if (requiredRecords.length === 0) {
2761
+ logger$6.log(" No DNS records needed");
2762
+ return {
2763
+ records: [],
2764
+ success: true,
2765
+ serverIp
2766
+ };
2767
+ }
2768
+ let finalRecords;
2769
+ if (autoCreate && dnsConfig.provider !== "manual") {
2770
+ logger$6.log(` Creating DNS records at ${dnsConfig.provider}...`);
2771
+ finalRecords = await createDnsRecords(requiredRecords, dnsConfig);
2772
+ const created = finalRecords.filter((r) => r.created).length;
2773
+ const existed = finalRecords.filter((r) => r.existed).length;
2774
+ const failed = finalRecords.filter((r) => r.error).length;
2775
+ if (created > 0) logger$6.log(` ✓ Created ${created} DNS record(s)`);
2776
+ if (existed > 0) logger$6.log(` ✓ ${existed} record(s) already exist`);
2777
+ if (failed > 0) logger$6.log(` ⚠ ${failed} record(s) failed`);
2778
+ } else finalRecords = requiredRecords;
2779
+ printDnsRecordsTable(finalRecords, rootDomain);
2780
+ const hasFailures = finalRecords.some((r) => r.error);
2781
+ if (dnsConfig.provider === "manual" || hasFailures) printDnsRecordsSimple(finalRecords.filter((r) => !r.created && !r.existed), rootDomain);
2782
+ return {
2783
+ records: finalRecords,
2784
+ success: !hasFailures,
2785
+ serverIp
2786
+ };
2787
+ }
2788
+ /**
2789
+ * Verify DNS records resolve correctly after deployment.
2790
+ *
2791
+ * This function:
2792
+ * 1. Checks state for previously verified hostnames (skips if already verified with same IP)
2793
+ * 2. Attempts to resolve each hostname to an IP
2794
+ * 3. Compares resolved IP with expected server IP
2795
+ * 4. Updates state with verification results
2796
+ *
2797
+ * @param appHostnames - Map of app names to hostnames
2798
+ * @param serverIp - Expected IP address the hostnames should resolve to
2799
+ * @param state - Deploy state for caching verification results
2800
+ * @returns Array of verification results
2801
+ */
2802
+ async function verifyDnsRecords(appHostnames, serverIp, state) {
2803
+ const results = [];
2804
+ logger$6.log("\n🔍 Verifying DNS records...");
2805
+ for (const [appName, hostname] of appHostnames) {
2806
+ if (isDnsVerified(state, hostname, serverIp)) {
2807
+ logger$6.log(` ✓ ${hostname} (previously verified)`);
2808
+ results.push({
2809
+ hostname,
2810
+ appName,
2811
+ verified: true,
2812
+ expectedIp: serverIp,
2813
+ skipped: true
2814
+ });
2815
+ continue;
2816
+ }
2817
+ try {
2818
+ const resolvedIp = await resolveHostnameToIp(hostname);
2819
+ if (resolvedIp === serverIp) {
2820
+ setDnsVerification(state, hostname, serverIp);
2821
+ logger$6.log(` ✓ ${hostname} → ${resolvedIp}`);
2822
+ results.push({
2823
+ hostname,
2824
+ appName,
2825
+ verified: true,
2826
+ resolvedIp,
2827
+ expectedIp: serverIp
2828
+ });
2829
+ } else {
2830
+ logger$6.log(` ⚠ ${hostname} resolves to ${resolvedIp}, expected ${serverIp}`);
2831
+ results.push({
2832
+ hostname,
2833
+ appName,
2834
+ verified: false,
2835
+ resolvedIp,
2836
+ expectedIp: serverIp
2837
+ });
2838
+ }
2839
+ } catch (error) {
2840
+ const message = error instanceof Error ? error.message : "Unknown error";
2841
+ logger$6.log(` ⚠ ${hostname} DNS not propagated (${message})`);
2842
+ results.push({
2843
+ hostname,
2844
+ appName,
2845
+ verified: false,
2846
+ expectedIp: serverIp,
2847
+ error: message
2848
+ });
2849
+ }
2850
+ }
2851
+ const verified = results.filter((r) => r.verified).length;
2852
+ const skipped = results.filter((r) => r.skipped).length;
2853
+ const pending = results.filter((r) => !r.verified).length;
2854
+ if (pending > 0) {
2855
+ logger$6.log(`\n ${verified} verified, ${pending} pending propagation`);
2856
+ logger$6.log(" DNS changes may take 5-30 minutes to propagate");
2857
+ } else if (skipped > 0) logger$6.log(` ${verified} verified (${skipped} from cache)`);
2858
+ return results;
2859
+ }
2918
2860
 
2919
- EXPOSE ${port}
2861
+ //#endregion
2862
+ //#region src/docker/compose.ts
2863
+ /** Default Docker images for services */
2864
+ const DEFAULT_SERVICE_IMAGES = {
2865
+ postgres: "postgres",
2866
+ redis: "redis",
2867
+ rabbitmq: "rabbitmq"
2868
+ };
2869
+ /** Default Docker image versions for services */
2870
+ const DEFAULT_SERVICE_VERSIONS = {
2871
+ postgres: "16-alpine",
2872
+ redis: "7-alpine",
2873
+ rabbitmq: "3-management-alpine"
2874
+ };
2875
+ /** Get the default full image reference for a service */
2876
+ function getDefaultImage(serviceName) {
2877
+ return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
2878
+ }
2879
+ /** Normalize services config to a consistent format - returns Map of service name to full image reference */
2880
+ function normalizeServices(services) {
2881
+ const result = /* @__PURE__ */ new Map();
2882
+ if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
2883
+ else for (const [name$1, config$1] of Object.entries(services)) {
2884
+ const serviceName = name$1;
2885
+ if (config$1 === true) result.set(serviceName, getDefaultImage(serviceName));
2886
+ else if (config$1 && typeof config$1 === "object") {
2887
+ const serviceConfig = config$1;
2888
+ if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
2889
+ else {
2890
+ const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
2891
+ result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
2892
+ }
2893
+ }
2894
+ }
2895
+ return result;
2896
+ }
2897
+ /**
2898
+ * Generate docker-compose.yml for production deployment
2899
+ */
2900
+ function generateDockerCompose(options) {
2901
+ const { imageName, registry, port, healthCheckPath, services } = options;
2902
+ const serviceMap = normalizeServices(services);
2903
+ const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
2904
+ let yaml = `version: '3.8'
2920
2905
 
2921
- ENTRYPOINT ["/sbin/tini", "--"]
2922
- CMD ["node", "server.mjs"]
2906
+ services:
2907
+ api:
2908
+ build:
2909
+ context: ../..
2910
+ dockerfile: .gkm/docker/Dockerfile
2911
+ image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
2912
+ container_name: ${imageName}
2913
+ restart: unless-stopped
2914
+ ports:
2915
+ - "\${PORT:-${port}}:${port}"
2916
+ environment:
2917
+ - NODE_ENV=production
2918
+ `;
2919
+ if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
2920
+ `;
2921
+ if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
2922
+ `;
2923
+ if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
2924
+ `;
2925
+ yaml += ` healthcheck:
2926
+ test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
2927
+ interval: 30s
2928
+ timeout: 3s
2929
+ retries: 3
2930
+ `;
2931
+ if (serviceMap.size > 0) {
2932
+ yaml += ` depends_on:
2933
+ `;
2934
+ for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
2935
+ condition: service_healthy
2936
+ `;
2937
+ }
2938
+ yaml += ` networks:
2939
+ - app-network
2940
+ `;
2941
+ const postgresImage = serviceMap.get("postgres");
2942
+ if (postgresImage) yaml += `
2943
+ postgres:
2944
+ image: ${postgresImage}
2945
+ container_name: postgres
2946
+ restart: unless-stopped
2947
+ environment:
2948
+ POSTGRES_USER: \${POSTGRES_USER:-postgres}
2949
+ POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
2950
+ POSTGRES_DB: \${POSTGRES_DB:-app}
2951
+ volumes:
2952
+ - postgres_data:/var/lib/postgresql/data
2953
+ healthcheck:
2954
+ test: ["CMD-SHELL", "pg_isready -U postgres"]
2955
+ interval: 5s
2956
+ timeout: 5s
2957
+ retries: 5
2958
+ networks:
2959
+ - app-network
2923
2960
  `;
2924
- }
2925
- /**
2926
- * Generate a slim Dockerfile for pre-built bundles
2927
- */
2928
- function generateSlimDockerfile(options) {
2929
- const { baseImage, port, healthCheckPath } = options;
2930
- return `# Slim Dockerfile for pre-built production bundle
2931
- FROM ${baseImage}
2932
-
2933
- WORKDIR /app
2934
-
2935
- # Install tini for proper signal handling as PID 1
2936
- # Handles SIGTERM propagation and zombie process reaping
2937
- RUN apk add --no-cache tini
2938
-
2939
- # Create non-root user
2940
- RUN addgroup --system --gid 1001 nodejs && \\
2941
- adduser --system --uid 1001 hono
2942
-
2943
- # Copy pre-built bundle
2944
- COPY .gkm/server/dist/server.mjs ./
2945
-
2946
- # Environment
2947
- ENV NODE_ENV=production
2948
- ENV PORT=${port}
2949
-
2950
- # Health check
2951
- HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
2952
- CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
2953
-
2954
- # Switch to non-root user
2955
- USER hono
2956
-
2957
- EXPOSE ${port}
2958
-
2959
- # Use tini as entrypoint to handle PID 1 responsibilities
2960
- ENTRYPOINT ["/sbin/tini", "--"]
2961
- CMD ["node", "server.mjs"]
2961
+ const redisImage = serviceMap.get("redis");
2962
+ if (redisImage) yaml += `
2963
+ redis:
2964
+ image: ${redisImage}
2965
+ container_name: redis
2966
+ restart: unless-stopped
2967
+ volumes:
2968
+ - redis_data:/data
2969
+ healthcheck:
2970
+ test: ["CMD", "redis-cli", "ping"]
2971
+ interval: 5s
2972
+ timeout: 5s
2973
+ retries: 5
2974
+ networks:
2975
+ - app-network
2976
+ `;
2977
+ const rabbitmqImage = serviceMap.get("rabbitmq");
2978
+ if (rabbitmqImage) yaml += `
2979
+ rabbitmq:
2980
+ image: ${rabbitmqImage}
2981
+ container_name: rabbitmq
2982
+ restart: unless-stopped
2983
+ environment:
2984
+ RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
2985
+ RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
2986
+ ports:
2987
+ - "15672:15672" # Management UI
2988
+ volumes:
2989
+ - rabbitmq_data:/var/lib/rabbitmq
2990
+ healthcheck:
2991
+ test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
2992
+ interval: 10s
2993
+ timeout: 5s
2994
+ retries: 5
2995
+ networks:
2996
+ - app-network
2997
+ `;
2998
+ yaml += `
2999
+ volumes:
3000
+ `;
3001
+ if (serviceMap.has("postgres")) yaml += ` postgres_data:
3002
+ `;
3003
+ if (serviceMap.has("redis")) yaml += ` redis_data:
2962
3004
  `;
3005
+ if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
3006
+ `;
3007
+ yaml += `
3008
+ networks:
3009
+ app-network:
3010
+ driver: bridge
3011
+ `;
3012
+ return yaml;
2963
3013
  }
2964
3014
  /**
2965
- * Generate .dockerignore file
3015
+ * Generate a minimal docker-compose.yml for API only
2966
3016
  */
2967
- function generateDockerignore() {
2968
- return `# Dependencies
2969
- node_modules
2970
- .pnpm-store
2971
-
2972
- # Build output (except what we need)
2973
- .gkm/aws*
2974
- .gkm/server/*.ts
2975
- !.gkm/server/dist
2976
-
2977
- # IDE and editor
2978
- .idea
2979
- .vscode
2980
- *.swp
2981
- *.swo
2982
-
2983
- # Git
2984
- .git
2985
- .gitignore
2986
-
2987
- # Logs
2988
- *.log
2989
- npm-debug.log*
2990
- pnpm-debug.log*
2991
-
2992
- # Test files
2993
- **/*.test.ts
2994
- **/*.spec.ts
2995
- **/__tests__
2996
- coverage
2997
-
2998
- # Documentation
2999
- docs
3000
- *.md
3001
- !README.md
3017
+ function generateMinimalDockerCompose(options) {
3018
+ const { imageName, registry, port, healthCheckPath } = options;
3019
+ const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
3020
+ return `version: '3.8'
3002
3021
 
3003
- # Environment files (handle secrets separately)
3004
- .env
3005
- .env.*
3006
- !.env.example
3022
+ services:
3023
+ api:
3024
+ build:
3025
+ context: ../..
3026
+ dockerfile: .gkm/docker/Dockerfile
3027
+ image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
3028
+ container_name: ${imageName}
3029
+ restart: unless-stopped
3030
+ ports:
3031
+ - "\${PORT:-${port}}:${port}"
3032
+ environment:
3033
+ - NODE_ENV=production
3034
+ healthcheck:
3035
+ test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
3036
+ interval: 30s
3037
+ timeout: 3s
3038
+ retries: 3
3039
+ networks:
3040
+ - app-network
3007
3041
 
3008
- # Docker files (don't copy recursively)
3009
- Dockerfile*
3010
- docker-compose*
3011
- .dockerignore
3042
+ networks:
3043
+ app-network:
3044
+ driver: bridge
3012
3045
  `;
3013
3046
  }
3014
3047
  /**
3015
- * Generate docker-entrypoint.sh for custom startup logic
3048
+ * Generate docker-compose.yml for a workspace with all apps as services.
3049
+ * Apps can communicate with each other via service names.
3050
+ * @internal Exported for testing
3016
3051
  */
3017
- function generateDockerEntrypoint() {
3018
- return `#!/bin/sh
3019
- set -e
3020
-
3021
- # Run any custom startup scripts here
3022
- # Example: wait for database
3023
- # until nc -z $DB_HOST $DB_PORT; do
3024
- # echo "Waiting for database..."
3025
- # sleep 1
3026
- # done
3052
+ function generateWorkspaceCompose(workspace, options = {}) {
3053
+ const { registry } = options;
3054
+ const apps = Object.entries(workspace.apps);
3055
+ const services = workspace.services;
3056
+ const hasPostgres = services.db !== void 0 && services.db !== false;
3057
+ const hasRedis = services.cache !== void 0 && services.cache !== false;
3058
+ const hasMail = services.mail !== void 0 && services.mail !== false;
3059
+ const postgresImage = getInfraServiceImage("postgres", services.db);
3060
+ const redisImage = getInfraServiceImage("redis", services.cache);
3061
+ let yaml = `# Docker Compose for ${workspace.name} workspace
3062
+ # Generated by gkm - do not edit manually
3027
3063
 
3028
- # Execute the main command
3029
- exec "$@"
3064
+ services:
3065
+ `;
3066
+ for (const [appName, app] of apps) yaml += generateAppService(appName, app, apps, {
3067
+ registry,
3068
+ hasPostgres,
3069
+ hasRedis
3070
+ });
3071
+ if (hasPostgres) yaml += `
3072
+ postgres:
3073
+ image: ${postgresImage}
3074
+ container_name: ${workspace.name}-postgres
3075
+ restart: unless-stopped
3076
+ environment:
3077
+ POSTGRES_USER: \${POSTGRES_USER:-postgres}
3078
+ POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
3079
+ POSTGRES_DB: \${POSTGRES_DB:-app}
3080
+ volumes:
3081
+ - postgres_data:/var/lib/postgresql/data
3082
+ healthcheck:
3083
+ test: ["CMD-SHELL", "pg_isready -U postgres"]
3084
+ interval: 5s
3085
+ timeout: 5s
3086
+ retries: 5
3087
+ networks:
3088
+ - workspace-network
3089
+ `;
3090
+ if (hasRedis) yaml += `
3091
+ redis:
3092
+ image: ${redisImage}
3093
+ container_name: ${workspace.name}-redis
3094
+ restart: unless-stopped
3095
+ volumes:
3096
+ - redis_data:/data
3097
+ healthcheck:
3098
+ test: ["CMD", "redis-cli", "ping"]
3099
+ interval: 5s
3100
+ timeout: 5s
3101
+ retries: 5
3102
+ networks:
3103
+ - workspace-network
3104
+ `;
3105
+ if (hasMail) yaml += `
3106
+ mailpit:
3107
+ image: axllent/mailpit:latest
3108
+ container_name: ${workspace.name}-mailpit
3109
+ restart: unless-stopped
3110
+ ports:
3111
+ - "8025:8025" # Web UI
3112
+ - "1025:1025" # SMTP
3113
+ networks:
3114
+ - workspace-network
3115
+ `;
3116
+ yaml += `
3117
+ volumes:
3030
3118
  `;
3119
+ if (hasPostgres) yaml += ` postgres_data:
3120
+ `;
3121
+ if (hasRedis) yaml += ` redis_data:
3122
+ `;
3123
+ yaml += `
3124
+ networks:
3125
+ workspace-network:
3126
+ driver: bridge
3127
+ `;
3128
+ return yaml;
3031
3129
  }
3032
3130
  /**
3033
- * Resolve Docker configuration from GkmConfig with defaults
3131
+ * Get infrastructure service image with version.
3034
3132
  */
3035
- function resolveDockerConfig$1(config$1) {
3036
- const docker = config$1.docker ?? {};
3037
- let defaultImageName = "api";
3038
- try {
3039
- const pkg$1 = __require(`${process.cwd()}/package.json`);
3040
- if (pkg$1.name) defaultImageName = pkg$1.name.replace(/^@[^/]+\//, "");
3041
- } catch {}
3042
- return {
3043
- registry: docker.registry ?? "",
3044
- imageName: docker.imageName ?? defaultImageName,
3045
- baseImage: docker.baseImage ?? "node:22-alpine",
3046
- port: docker.port ?? 3e3,
3047
- compose: docker.compose
3133
+ function getInfraServiceImage(serviceName, config$1) {
3134
+ const defaults = {
3135
+ postgres: "postgres:16-alpine",
3136
+ redis: "redis:7-alpine"
3048
3137
  };
3138
+ if (!config$1 || config$1 === true) return defaults[serviceName];
3139
+ if (typeof config$1 === "object") {
3140
+ if (config$1.image) return config$1.image;
3141
+ if (config$1.version) {
3142
+ const baseImage = serviceName === "postgres" ? "postgres" : "redis";
3143
+ return `${baseImage}:${config$1.version}`;
3144
+ }
3145
+ }
3146
+ return defaults[serviceName];
3049
3147
  }
3050
3148
  /**
3051
- * Generate a Dockerfile for Next.js frontend apps using standalone output.
3052
- * Uses turbo prune for monorepo optimization.
3053
- * @internal Exported for testing
3149
+ * Generate a service definition for an app.
3054
3150
  */
3055
- function generateNextjsDockerfile(options) {
3056
- const { baseImage, port, appPath, turboPackage, packageManager, publicUrlArgs = ["NEXT_PUBLIC_API_URL", "NEXT_PUBLIC_AUTH_URL"] } = options;
3057
- const pm = getPmConfig(packageManager);
3058
- const installPm = pm.install ? `RUN ${pm.install}` : "";
3059
- const turboInstallCmd = getTurboInstallCmd(packageManager);
3060
- const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3061
- const publicUrlArgDeclarations = publicUrlArgs.map((arg) => `ARG ${arg}=""`).join("\n");
3062
- const publicUrlEnvDeclarations = publicUrlArgs.map((arg) => `ENV ${arg}=$${arg}`).join("\n");
3063
- return `# syntax=docker/dockerfile:1
3064
- # Next.js standalone Dockerfile with turbo prune optimization
3065
-
3066
- # Stage 1: Prune monorepo
3067
- FROM ${baseImage} AS pruner
3068
-
3069
- WORKDIR /app
3070
-
3071
- ${installPm}
3072
-
3073
- COPY . .
3074
-
3075
- # Prune to only include necessary packages
3076
- RUN ${turboCmd} prune ${turboPackage} --docker
3077
-
3078
- # Stage 2: Install dependencies
3079
- FROM ${baseImage} AS deps
3080
-
3081
- WORKDIR /app
3082
-
3083
- ${installPm}
3084
-
3085
- # Copy pruned lockfile and package.jsons
3086
- COPY --from=pruner /app/out/${pm.lockfile} ./
3087
- COPY --from=pruner /app/out/json/ ./
3088
-
3089
- # Install dependencies
3090
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3091
- ${turboInstallCmd}
3092
-
3093
- # Stage 3: Build
3094
- FROM deps AS builder
3095
-
3096
- WORKDIR /app
3097
-
3098
- # Build-time args for public API URLs (populated by gkm deploy)
3099
- # These get baked into the Next.js build as public environment variables
3100
- ${publicUrlArgDeclarations}
3101
-
3102
- # Convert ARGs to ENVs for Next.js build
3103
- ${publicUrlEnvDeclarations}
3104
-
3105
- # Copy pruned source
3106
- COPY --from=pruner /app/out/full/ ./
3107
-
3108
- # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3109
- # Using wildcard to make it optional for single-app projects
3110
- COPY --from=pruner /app/tsconfig.* ./
3111
-
3112
- # Ensure public directory exists (may be empty for scaffolded projects)
3113
- RUN mkdir -p ${appPath}/public
3114
-
3115
- # Set Next.js to produce standalone output
3116
- ENV NEXT_TELEMETRY_DISABLED=1
3117
-
3118
- # Build the application
3119
- RUN ${turboCmd} run build --filter=${turboPackage}
3120
-
3121
- # Stage 4: Production
3122
- FROM ${baseImage} AS runner
3123
-
3124
- WORKDIR /app
3125
-
3126
- # Install tini for proper signal handling
3127
- RUN apk add --no-cache tini
3128
-
3129
- # Create non-root user
3130
- RUN addgroup --system --gid 1001 nodejs && \\
3131
- adduser --system --uid 1001 nextjs
3132
-
3133
- # Set environment
3134
- ENV NODE_ENV=production
3135
- ENV NEXT_TELEMETRY_DISABLED=1
3136
- ENV PORT=${port}
3137
- ENV HOSTNAME="0.0.0.0"
3138
-
3139
- # Copy static files and standalone output
3140
- COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/standalone ./
3141
- COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/static ./${appPath}/.next/static
3142
- COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/public ./${appPath}/public
3143
-
3144
- USER nextjs
3145
-
3146
- EXPOSE ${port}
3147
-
3148
- ENTRYPOINT ["/sbin/tini", "--"]
3149
- CMD ["node", "${appPath}/server.js"]
3151
+ function generateAppService(appName, app, allApps, options) {
3152
+ const { registry, hasPostgres, hasRedis } = options;
3153
+ const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
3154
+ const healthCheckPath = app.type === "frontend" ? "/" : "/health";
3155
+ const healthCheckCmd = app.type === "frontend" ? `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}/"]` : `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}${healthCheckPath}"]`;
3156
+ let yaml = `
3157
+ ${appName}:
3158
+ build:
3159
+ context: .
3160
+ dockerfile: .gkm/docker/Dockerfile.${appName}
3161
+ image: ${imageRef}\${${appName.toUpperCase()}_IMAGE:-${appName}}:\${TAG:-latest}
3162
+ container_name: ${appName}
3163
+ restart: unless-stopped
3164
+ ports:
3165
+ - "\${${appName.toUpperCase()}_PORT:-${app.port}}:${app.port}"
3166
+ environment:
3167
+ - NODE_ENV=production
3168
+ - PORT=${app.port}
3150
3169
  `;
3151
- }
3152
- /**
3153
- * Generate a Dockerfile for backend apps in a workspace.
3154
- * Uses turbo prune for monorepo optimization.
3155
- * @internal Exported for testing
3156
- */
3157
- function generateBackendDockerfile(options) {
3158
- const { baseImage, port, appPath, turboPackage, packageManager, healthCheckPath = "/health" } = options;
3159
- const pm = getPmConfig(packageManager);
3160
- const installPm = pm.install ? `RUN ${pm.install}` : "";
3161
- const turboInstallCmd = getTurboInstallCmd(packageManager);
3162
- const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3163
- return `# syntax=docker/dockerfile:1
3164
- # Backend Dockerfile with turbo prune optimization
3165
-
3166
- # Stage 1: Prune monorepo
3167
- FROM ${baseImage} AS pruner
3168
-
3169
- WORKDIR /app
3170
-
3171
- ${installPm}
3172
-
3173
- COPY . .
3174
-
3175
- # Prune to only include necessary packages
3176
- RUN ${turboCmd} prune ${turboPackage} --docker
3177
-
3178
- # Stage 2: Install dependencies
3179
- FROM ${baseImage} AS deps
3180
-
3181
- WORKDIR /app
3182
-
3183
- ${installPm}
3184
-
3185
- # Copy pruned lockfile and package.jsons
3186
- COPY --from=pruner /app/out/${pm.lockfile} ./
3187
- COPY --from=pruner /app/out/json/ ./
3188
-
3189
- # Install dependencies
3190
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3191
- ${turboInstallCmd}
3192
-
3193
- # Stage 3: Build
3194
- FROM deps AS builder
3195
-
3196
- WORKDIR /app
3197
-
3198
- # Build-time args for encrypted secrets
3199
- ARG GKM_ENCRYPTED_CREDENTIALS=""
3200
- ARG GKM_CREDENTIALS_IV=""
3201
-
3202
- # Copy pruned source
3203
- COPY --from=pruner /app/out/full/ ./
3204
-
3205
- # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3206
- # Using wildcard to make it optional for single-app projects
3207
- COPY --from=pruner /app/gkm.config.* ./
3208
- COPY --from=pruner /app/tsconfig.* ./
3209
-
3210
- # Write encrypted credentials for gkm build to embed
3211
- RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
3212
- mkdir -p ${appPath}/.gkm && \
3213
- echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
3214
- echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
3215
- fi
3216
-
3217
- # Build production server using gkm
3218
- RUN cd ${appPath} && ./node_modules/.bin/gkm build --provider server --production
3219
-
3220
- # Stage 4: Production
3221
- FROM ${baseImage} AS runner
3222
-
3223
- WORKDIR /app
3224
-
3225
- RUN apk add --no-cache tini
3226
-
3227
- RUN addgroup --system --gid 1001 nodejs && \\
3228
- adduser --system --uid 1001 hono
3229
-
3230
- # Copy bundled server
3231
- COPY --from=builder --chown=hono:nodejs /app/${appPath}/.gkm/server/dist/server.mjs ./
3232
-
3233
- ENV NODE_ENV=production
3234
- ENV PORT=${port}
3235
-
3236
- HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3237
- CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3238
-
3239
- USER hono
3240
-
3241
- EXPOSE ${port}
3242
-
3243
- ENTRYPOINT ["/sbin/tini", "--"]
3244
- CMD ["node", "server.mjs"]
3170
+ for (const dep of app.dependencies) {
3171
+ const depApp = allApps.find(([name$1]) => name$1 === dep)?.[1];
3172
+ if (depApp) yaml += ` - ${dep.toUpperCase()}_URL=http://${dep}:${depApp.port}
3173
+ `;
3174
+ }
3175
+ if (app.type === "backend") {
3176
+ if (hasPostgres) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
3245
3177
  `;
3246
- }
3247
- /**
3248
- * Generate a Dockerfile for apps with a custom entry point.
3249
- * Uses esbuild to bundle the entry point into dist/index.mjs with all dependencies.
3250
- * This is used for apps that don't use gkm routes (e.g., Better Auth servers).
3251
- * @internal Exported for testing
3252
- */
3253
- function generateEntryDockerfile(options) {
3254
- const { baseImage, port, appPath, entry, turboPackage, packageManager, healthCheckPath = "/health" } = options;
3255
- const pm = getPmConfig(packageManager);
3256
- const installPm = pm.install ? `RUN ${pm.install}` : "";
3257
- const turboInstallCmd = getTurboInstallCmd(packageManager);
3258
- const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3259
- return `# syntax=docker/dockerfile:1
3260
- # Entry-based Dockerfile with turbo prune + tsdown bundling
3261
-
3262
- # Stage 1: Prune monorepo
3263
- FROM ${baseImage} AS pruner
3264
-
3265
- WORKDIR /app
3266
-
3267
- ${installPm}
3268
-
3269
- COPY . .
3270
-
3271
- # Prune to only include necessary packages
3272
- RUN ${turboCmd} prune ${turboPackage} --docker
3273
-
3274
- # Stage 2: Install dependencies
3275
- FROM ${baseImage} AS deps
3276
-
3277
- WORKDIR /app
3278
-
3279
- ${installPm}
3280
-
3281
- # Copy pruned lockfile and package.jsons
3282
- COPY --from=pruner /app/out/${pm.lockfile} ./
3283
- COPY --from=pruner /app/out/json/ ./
3284
-
3285
- # Install dependencies
3286
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3287
- ${turboInstallCmd}
3288
-
3289
- # Stage 3: Build with tsdown
3290
- FROM deps AS builder
3291
-
3292
- WORKDIR /app
3293
-
3294
- # Build-time args for encrypted secrets
3295
- ARG GKM_ENCRYPTED_CREDENTIALS=""
3296
- ARG GKM_CREDENTIALS_IV=""
3297
-
3298
- # Copy pruned source
3299
- COPY --from=pruner /app/out/full/ ./
3300
-
3301
- # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3302
- # Using wildcard to make it optional for single-app projects
3303
- COPY --from=pruner /app/tsconfig.* ./
3304
-
3305
- # Write encrypted credentials for tsdown to embed via define
3306
- RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
3307
- mkdir -p ${appPath}/.gkm && \
3308
- echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
3309
- echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
3310
- fi
3311
-
3312
- # Bundle entry point with esbuild (outputs to dist/index.mjs)
3313
- # Creates a fully standalone bundle with all dependencies included
3314
- # Use define to embed credentials if present
3315
- RUN cd ${appPath} && \
3316
- if [ -f .gkm/credentials.enc ]; then \
3317
- CREDS=$(cat .gkm/credentials.enc) && \
3318
- IV=$(cat .gkm/credentials.iv) && \
3319
- npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
3320
- --outfile=dist/index.mjs --packages=bundle \
3321
- --banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);' \
3322
- --define:__GKM_ENCRYPTED_CREDENTIALS__="'\\"$CREDS\\"'" \
3323
- --define:__GKM_CREDENTIALS_IV__="'\\"$IV\\"'"; \
3324
- else \
3325
- npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
3326
- --outfile=dist/index.mjs --packages=bundle \
3327
- --banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);'; \
3328
- fi
3329
-
3330
- # Stage 4: Production
3331
- FROM ${baseImage} AS runner
3332
-
3333
- WORKDIR /app
3334
-
3335
- RUN apk add --no-cache tini
3336
-
3337
- RUN addgroup --system --gid 1001 nodejs && \\
3338
- adduser --system --uid 1001 app
3339
-
3340
- # Copy bundled output only (no node_modules needed - fully bundled)
3341
- COPY --from=builder --chown=app:nodejs /app/${appPath}/dist/index.mjs ./
3342
-
3343
- ENV NODE_ENV=production
3344
- ENV PORT=${port}
3345
-
3346
- HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3347
- CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3348
-
3349
- USER app
3350
-
3351
- EXPOSE ${port}
3352
-
3353
- ENTRYPOINT ["/sbin/tini", "--"]
3354
- CMD ["node", "index.mjs"]
3178
+ if (hasRedis) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
3179
+ `;
3180
+ }
3181
+ yaml += ` healthcheck:
3182
+ test: ${healthCheckCmd}
3183
+ interval: 30s
3184
+ timeout: 3s
3185
+ retries: 3
3186
+ `;
3187
+ const dependencies$1 = [...app.dependencies];
3188
+ if (app.type === "backend") {
3189
+ if (hasPostgres) dependencies$1.push("postgres");
3190
+ if (hasRedis) dependencies$1.push("redis");
3191
+ }
3192
+ if (dependencies$1.length > 0) {
3193
+ yaml += ` depends_on:
3194
+ `;
3195
+ for (const dep of dependencies$1) yaml += ` ${dep}:
3196
+ condition: service_healthy
3355
3197
  `;
3198
+ }
3199
+ yaml += ` networks:
3200
+ - workspace-network
3201
+ `;
3202
+ return yaml;
3356
3203
  }
3357
3204
 
3358
3205
  //#endregion
3359
- //#region src/docker/index.ts
3360
- const logger$6 = console;
3206
+ //#region src/docker/templates.ts
3207
+ const LOCKFILES = [
3208
+ ["pnpm-lock.yaml", "pnpm"],
3209
+ ["bun.lockb", "bun"],
3210
+ ["yarn.lock", "yarn"],
3211
+ ["package-lock.json", "npm"]
3212
+ ];
3361
3213
  /**
3362
- * Docker command implementation
3363
- * Generates Dockerfile, docker-compose.yml, and related files
3364
- *
3365
- * Default: Multi-stage Dockerfile that builds from source inside Docker
3366
- * --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
3214
+ * Detect package manager from lockfiles
3215
+ * Walks up the directory tree to find lockfile (for monorepos)
3367
3216
  */
3368
- async function dockerCommand(options) {
3369
- const loadedConfig = await loadWorkspaceConfig();
3370
- if (loadedConfig.type === "workspace") {
3371
- logger$6.log("📦 Detected workspace configuration");
3372
- return workspaceDockerCommand(loadedConfig.workspace, options);
3373
- }
3374
- const config$1 = await loadConfig();
3375
- const dockerConfig = resolveDockerConfig$1(config$1);
3376
- const serverConfig = typeof config$1.providers?.server === "object" ? config$1.providers.server : void 0;
3377
- const healthCheckPath = serverConfig?.production?.healthCheck ?? "/health";
3378
- const useSlim = options.slim === true;
3379
- if (useSlim) {
3380
- const distDir = join(process.cwd(), ".gkm", "server", "dist");
3381
- const hasBuild = existsSync(join(distDir, "server.mjs"));
3382
- if (!hasBuild) throw new Error("Slim Dockerfile requires a pre-built bundle. Run `gkm build --provider server --production` first, or omit --slim to use multi-stage build.");
3217
+ function detectPackageManager$1(cwd = process.cwd()) {
3218
+ let dir = cwd;
3219
+ const root = parse(dir).root;
3220
+ while (dir !== root) {
3221
+ for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(dir, lockfile))) return pm;
3222
+ dir = dirname(dir);
3383
3223
  }
3384
- const dockerDir = join(process.cwd(), ".gkm", "docker");
3385
- await mkdir(dockerDir, { recursive: true });
3386
- const packageManager = detectPackageManager$1();
3387
- const inMonorepo = isMonorepo();
3388
- const hasTurbo = hasTurboConfig();
3389
- let useTurbo = options.turbo ?? false;
3390
- if (inMonorepo && !useSlim) if (hasTurbo) {
3391
- useTurbo = true;
3392
- logger$6.log(" Detected monorepo with turbo.json - using turbo prune");
3393
- } else throw new Error("Monorepo detected but turbo.json not found.\n\nDocker builds in monorepos require Turborepo for proper dependency isolation.\n\nTo fix this:\n 1. Install turbo: pnpm add -Dw turbo\n 2. Create turbo.json in your monorepo root\n 3. Run this command again\n\nSee: https://turbo.build/repo/docs/guides/tools/docker");
3394
- let turboPackage = options.turboPackage ?? dockerConfig.imageName;
3395
- if (useTurbo && !options.turboPackage) try {
3396
- const pkg$1 = __require(`${process.cwd()}/package.json`);
3397
- if (pkg$1.name) {
3398
- turboPackage = pkg$1.name;
3399
- logger$6.log(` Turbo package: ${turboPackage}`);
3400
- }
3401
- } catch {}
3402
- const templateOptions = {
3403
- imageName: dockerConfig.imageName,
3404
- baseImage: dockerConfig.baseImage,
3405
- port: dockerConfig.port,
3406
- healthCheckPath,
3407
- prebuilt: useSlim,
3408
- turbo: useTurbo,
3409
- turboPackage,
3410
- packageManager
3411
- };
3412
- const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
3413
- const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
3414
- const dockerfilePath = join(dockerDir, "Dockerfile");
3415
- await writeFile(dockerfilePath, dockerfile);
3416
- logger$6.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
3417
- const composeOptions = {
3418
- imageName: dockerConfig.imageName,
3419
- registry: options.registry ?? dockerConfig.registry,
3420
- port: dockerConfig.port,
3421
- healthCheckPath,
3422
- services: dockerConfig.compose?.services ?? {}
3423
- };
3424
- const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
3425
- const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
3426
- const composePath = join(dockerDir, "docker-compose.yml");
3427
- await writeFile(composePath, dockerCompose);
3428
- logger$6.log("Generated: .gkm/docker/docker-compose.yml");
3429
- const dockerignore = generateDockerignore();
3430
- const dockerignorePath = join(process.cwd(), ".dockerignore");
3431
- await writeFile(dockerignorePath, dockerignore);
3432
- logger$6.log("Generated: .dockerignore (project root)");
3433
- const entrypoint = generateDockerEntrypoint();
3434
- const entrypointPath = join(dockerDir, "docker-entrypoint.sh");
3435
- await writeFile(entrypointPath, entrypoint);
3436
- logger$6.log("Generated: .gkm/docker/docker-entrypoint.sh");
3437
- const result = {
3438
- dockerfile: dockerfilePath,
3439
- dockerCompose: composePath,
3440
- dockerignore: dockerignorePath,
3441
- entrypoint: entrypointPath
3442
- };
3443
- if (options.build) await buildDockerImage(dockerConfig.imageName, options);
3444
- if (options.push) await pushDockerImage(dockerConfig.imageName, options);
3445
- return result;
3224
+ for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(root, lockfile))) return pm;
3225
+ return "pnpm";
3446
3226
  }
3447
3227
  /**
3448
- * Ensure lockfile exists in the build context
3449
- * For monorepos, copies from workspace root if needed
3450
- * Returns cleanup function if file was copied
3228
+ * Find the lockfile path by walking up the directory tree
3229
+ * Returns the full path to the lockfile, or null if not found
3451
3230
  */
3452
- function ensureLockfile(cwd) {
3453
- const lockfilePath = findLockfilePath(cwd);
3454
- if (!lockfilePath) {
3455
- logger$6.warn("\n⚠️ No lockfile found. Docker build may fail or use stale dependencies.");
3456
- return null;
3231
+ function findLockfilePath(cwd = process.cwd()) {
3232
+ let dir = cwd;
3233
+ const root = parse(dir).root;
3234
+ while (dir !== root) {
3235
+ for (const [lockfile] of LOCKFILES) {
3236
+ const lockfilePath = join(dir, lockfile);
3237
+ if (existsSync(lockfilePath)) return lockfilePath;
3238
+ }
3239
+ dir = dirname(dir);
3457
3240
  }
3458
- const lockfileName = basename(lockfilePath);
3459
- const localLockfile = join(cwd, lockfileName);
3460
- if (lockfilePath === localLockfile) return null;
3461
- logger$6.log(` Copying ${lockfileName} from monorepo root...`);
3462
- copyFileSync(lockfilePath, localLockfile);
3463
- return () => {
3464
- try {
3465
- unlinkSync(localLockfile);
3466
- } catch {}
3467
- };
3241
+ for (const [lockfile] of LOCKFILES) {
3242
+ const lockfilePath = join(root, lockfile);
3243
+ if (existsSync(lockfilePath)) return lockfilePath;
3244
+ }
3245
+ return null;
3468
3246
  }
3469
3247
  /**
3470
- * Build Docker image
3471
- * Uses BuildKit for cache mount support
3248
+ * Check if we're in a monorepo (lockfile is in a parent directory)
3472
3249
  */
3473
- async function buildDockerImage(imageName, options) {
3474
- const tag = options.tag ?? "latest";
3475
- const registry = options.registry;
3476
- const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
3477
- logger$6.log(`\n🐳 Building Docker image: ${fullImageName}`);
3478
- const cwd = process.cwd();
3479
- const cleanup = ensureLockfile(cwd);
3480
- try {
3481
- execSync(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
3482
- cwd,
3483
- stdio: "inherit",
3484
- env: {
3485
- ...process.env,
3486
- DOCKER_BUILDKIT: "1"
3487
- }
3488
- });
3489
- logger$6.log(`✅ Docker image built: ${fullImageName}`);
3490
- } catch (error) {
3491
- throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
3492
- } finally {
3493
- cleanup?.();
3494
- }
3250
+ function isMonorepo(cwd = process.cwd()) {
3251
+ const lockfilePath = findLockfilePath(cwd);
3252
+ if (!lockfilePath) return false;
3253
+ const lockfileDir = dirname(lockfilePath);
3254
+ return lockfileDir !== cwd;
3495
3255
  }
3496
3256
  /**
3497
- * Push Docker image to registry
3257
+ * Check if turbo.json exists (walks up directory tree)
3498
3258
  */
3499
- async function pushDockerImage(imageName, options) {
3500
- const tag = options.tag ?? "latest";
3501
- const registry = options.registry;
3502
- if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
3503
- const fullImageName = `${registry}/${imageName}:${tag}`;
3504
- logger$6.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
3505
- try {
3506
- execSync(`docker push ${fullImageName}`, {
3507
- cwd: process.cwd(),
3508
- stdio: "inherit"
3509
- });
3510
- logger$6.log(`✅ Docker image pushed: ${fullImageName}`);
3511
- } catch (error) {
3512
- throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
3259
+ function hasTurboConfig(cwd = process.cwd()) {
3260
+ let dir = cwd;
3261
+ const root = parse(dir).root;
3262
+ while (dir !== root) {
3263
+ if (existsSync(join(dir, "turbo.json"))) return true;
3264
+ dir = dirname(dir);
3513
3265
  }
3266
+ return existsSync(join(root, "turbo.json"));
3514
3267
  }
3515
3268
  /**
3516
- * Get the package name from package.json in an app directory.
3269
+ * Get install command for turbo builds (without frozen lockfile)
3270
+ * Turbo prune creates a subset that may not perfectly match the lockfile
3517
3271
  */
3518
- function getAppPackageName(appPath) {
3519
- try {
3520
- const pkgPath = join(appPath, "package.json");
3521
- if (!existsSync(pkgPath)) return void 0;
3522
- const content = readFileSync(pkgPath, "utf-8");
3523
- const pkg$1 = JSON.parse(content);
3524
- return pkg$1.name;
3525
- } catch {
3526
- return void 0;
3527
- }
3272
+ function getTurboInstallCmd(pm) {
3273
+ const commands = {
3274
+ pnpm: "pnpm install",
3275
+ npm: "npm install",
3276
+ yarn: "yarn install",
3277
+ bun: "bun install"
3278
+ };
3279
+ return commands[pm];
3528
3280
  }
3529
3281
  /**
3530
- * Generate Dockerfiles for all apps in a workspace.
3531
- * @internal Exported for testing
3282
+ * Get package manager specific commands and paths
3532
3283
  */
3533
- async function workspaceDockerCommand(workspace, options) {
3534
- const results = [];
3535
- const apps = Object.entries(workspace.apps);
3536
- logger$6.log(`\n🐳 Generating Dockerfiles for workspace: ${workspace.name}`);
3537
- const dockerDir = join(workspace.root, ".gkm", "docker");
3538
- await mkdir(dockerDir, { recursive: true });
3539
- const packageManager = detectPackageManager$1(workspace.root);
3540
- logger$6.log(` Package manager: ${packageManager}`);
3541
- for (const [appName, app] of apps) {
3542
- const appPath = app.path;
3543
- const fullAppPath = join(workspace.root, appPath);
3544
- const turboPackage = getAppPackageName(fullAppPath) ?? appName;
3545
- const imageName = appName;
3546
- const hasEntry = !!app.entry;
3547
- const buildType = hasEntry ? "entry" : app.type;
3548
- logger$6.log(`\n 📄 Generating Dockerfile for ${appName} (${buildType})`);
3549
- let dockerfile;
3550
- if (app.type === "frontend") dockerfile = generateNextjsDockerfile({
3551
- imageName,
3552
- baseImage: "node:22-alpine",
3553
- port: app.port,
3554
- appPath,
3555
- turboPackage,
3556
- packageManager
3557
- });
3558
- else if (app.entry) dockerfile = generateEntryDockerfile({
3559
- imageName,
3560
- baseImage: "node:22-alpine",
3561
- port: app.port,
3562
- appPath,
3563
- entry: app.entry,
3564
- turboPackage,
3565
- packageManager,
3566
- healthCheckPath: "/health"
3567
- });
3568
- else dockerfile = generateBackendDockerfile({
3569
- imageName,
3570
- baseImage: "node:22-alpine",
3571
- port: app.port,
3572
- appPath,
3573
- turboPackage,
3574
- packageManager,
3575
- healthCheckPath: "/health"
3576
- });
3577
- const dockerfilePath = join(dockerDir, `Dockerfile.${appName}`);
3578
- await writeFile(dockerfilePath, dockerfile);
3579
- logger$6.log(` Generated: .gkm/docker/Dockerfile.${appName}`);
3580
- results.push({
3581
- appName,
3582
- type: app.type,
3583
- dockerfile: dockerfilePath,
3584
- imageName
3585
- });
3586
- }
3587
- const dockerignore = generateDockerignore();
3588
- const dockerignorePath = join(workspace.root, ".dockerignore");
3589
- await writeFile(dockerignorePath, dockerignore);
3590
- logger$6.log(`\n Generated: .dockerignore (workspace root)`);
3591
- const dockerCompose = generateWorkspaceCompose(workspace, { registry: options.registry });
3592
- const composePath = join(dockerDir, "docker-compose.yml");
3593
- await writeFile(composePath, dockerCompose);
3594
- logger$6.log(` Generated: .gkm/docker/docker-compose.yml`);
3595
- logger$6.log(`\n✅ Generated ${results.length} Dockerfile(s) + docker-compose.yml`);
3596
- logger$6.log("\n📋 Build commands:");
3597
- for (const result of results) {
3598
- const icon = result.type === "backend" ? "⚙️" : "🌐";
3599
- logger$6.log(` ${icon} docker build -f .gkm/docker/Dockerfile.${result.appName} -t ${result.imageName} .`);
3600
- }
3601
- logger$6.log("\n📋 Run all services:");
3602
- logger$6.log(" docker compose -f .gkm/docker/docker-compose.yml up --build");
3603
- return {
3604
- apps: results,
3605
- dockerCompose: composePath,
3606
- dockerignore: dockerignorePath
3284
+ function getPmConfig(pm) {
3285
+ const configs = {
3286
+ pnpm: {
3287
+ install: "corepack enable && corepack prepare pnpm@latest --activate",
3288
+ lockfile: "pnpm-lock.yaml",
3289
+ fetch: "pnpm fetch",
3290
+ installCmd: "pnpm install --frozen-lockfile --offline",
3291
+ cacheTarget: "/root/.local/share/pnpm/store",
3292
+ cacheId: "pnpm",
3293
+ run: "pnpm",
3294
+ exec: "pnpm exec",
3295
+ dlx: "pnpm dlx",
3296
+ addGlobal: "pnpm add -g"
3297
+ },
3298
+ npm: {
3299
+ install: "",
3300
+ lockfile: "package-lock.json",
3301
+ fetch: "",
3302
+ installCmd: "npm ci",
3303
+ cacheTarget: "/root/.npm",
3304
+ cacheId: "npm",
3305
+ run: "npm run",
3306
+ exec: "npx",
3307
+ dlx: "npx",
3308
+ addGlobal: "npm install -g"
3309
+ },
3310
+ yarn: {
3311
+ install: "corepack enable && corepack prepare yarn@stable --activate",
3312
+ lockfile: "yarn.lock",
3313
+ fetch: "",
3314
+ installCmd: "yarn install --frozen-lockfile",
3315
+ cacheTarget: "/root/.yarn/cache",
3316
+ cacheId: "yarn",
3317
+ run: "yarn",
3318
+ exec: "yarn exec",
3319
+ dlx: "yarn dlx",
3320
+ addGlobal: "yarn global add"
3321
+ },
3322
+ bun: {
3323
+ install: "npm install -g bun",
3324
+ lockfile: "bun.lockb",
3325
+ fetch: "",
3326
+ installCmd: "bun install --frozen-lockfile",
3327
+ cacheTarget: "/root/.bun/install/cache",
3328
+ cacheId: "bun",
3329
+ run: "bun run",
3330
+ exec: "bunx",
3331
+ dlx: "bunx",
3332
+ addGlobal: "bun add -g"
3333
+ }
3607
3334
  };
3335
+ return configs[pm];
3608
3336
  }
3337
+ /**
3338
+ * Generate a multi-stage Dockerfile for building from source
3339
+ * Optimized for build speed with:
3340
+ * - BuildKit cache mounts for package manager store
3341
+ * - pnpm fetch for better layer caching (when using pnpm)
3342
+ * - Optional turbo prune for monorepos
3343
+ */
3344
+ function generateMultiStageDockerfile(options) {
3345
+ const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
3346
+ if (turbo) return generateTurboDockerfile({
3347
+ ...options,
3348
+ turboPackage: turboPackage ?? "api"
3349
+ });
3350
+ const pm = getPmConfig(packageManager);
3351
+ const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
3352
+ const hasFetch = packageManager === "pnpm";
3353
+ const depsStage = hasFetch ? `# Copy lockfile first for better caching
3354
+ COPY ${pm.lockfile} ./
3355
+
3356
+ # Fetch dependencies (downloads to virtual store, cached separately)
3357
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3358
+ ${pm.fetch}
3359
+
3360
+ # Copy package.json after fetch
3361
+ COPY package.json ./
3362
+
3363
+ # Install from cache (fast - no network needed)
3364
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3365
+ ${pm.installCmd}` : `# Copy package files
3366
+ COPY package.json ${pm.lockfile} ./
3367
+
3368
+ # Install dependencies with cache
3369
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3370
+ ${pm.installCmd}`;
3371
+ return `# syntax=docker/dockerfile:1
3372
+ # Stage 1: Dependencies
3373
+ FROM ${baseImage} AS deps
3374
+
3375
+ WORKDIR /app
3376
+ ${installPm}
3377
+ ${depsStage}
3378
+
3379
+ # Stage 2: Build
3380
+ FROM deps AS builder
3381
+
3382
+ WORKDIR /app
3383
+
3384
+ # Copy source (deps already installed)
3385
+ COPY . .
3386
+
3387
+ # Debug: Show node_modules/.bin contents and build production server
3388
+ RUN echo "=== node_modules/.bin contents ===" && \
3389
+ ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
3390
+ echo "=== Checking for gkm ===" && \
3391
+ which gkm 2>/dev/null || echo "gkm not in PATH" && \
3392
+ ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
3393
+ echo "=== Running build ===" && \
3394
+ ./node_modules/.bin/gkm build --provider server --production
3395
+
3396
+ # Stage 3: Production
3397
+ FROM ${baseImage} AS runner
3398
+
3399
+ WORKDIR /app
3400
+
3401
+ # Install tini for proper signal handling as PID 1
3402
+ RUN apk add --no-cache tini
3609
3403
 
3610
- //#endregion
3611
- //#region src/deploy/docker.ts
3612
- /**
3613
- * Get app name from package.json in the current working directory
3614
- * Used for Dokploy app/project naming
3615
- */
3616
- function getAppNameFromCwd$1() {
3617
- const packageJsonPath = join(process.cwd(), "package.json");
3618
- if (!existsSync(packageJsonPath)) return void 0;
3619
- try {
3620
- const pkg$1 = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
3621
- if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
3622
- } catch {}
3623
- return void 0;
3624
- }
3625
- /**
3626
- * Get app name from package.json adjacent to the lockfile (project root)
3627
- * Used for Docker image naming
3628
- */
3629
- function getAppNameFromPackageJson() {
3630
- const cwd = process.cwd();
3631
- const lockfilePath = findLockfilePath(cwd);
3632
- if (!lockfilePath) return void 0;
3633
- const projectRoot = dirname(lockfilePath);
3634
- const packageJsonPath = join(projectRoot, "package.json");
3635
- if (!existsSync(packageJsonPath)) return void 0;
3636
- try {
3637
- const pkg$1 = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
3638
- if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
3639
- } catch {}
3640
- return void 0;
3404
+ # Create non-root user
3405
+ RUN addgroup --system --gid 1001 nodejs && \\
3406
+ adduser --system --uid 1001 hono
3407
+
3408
+ # Copy bundled server
3409
+ COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
3410
+
3411
+ # Environment
3412
+ ENV NODE_ENV=production
3413
+ ENV PORT=${port}
3414
+
3415
+ # Health check
3416
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3417
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3418
+
3419
+ # Switch to non-root user
3420
+ USER hono
3421
+
3422
+ EXPOSE ${port}
3423
+
3424
+ # Use tini as entrypoint to handle PID 1 responsibilities
3425
+ ENTRYPOINT ["/sbin/tini", "--"]
3426
+ CMD ["node", "server.mjs"]
3427
+ `;
3641
3428
  }
3642
- const logger$5 = console;
3643
3429
  /**
3644
- * Get the full image reference
3430
+ * Generate a Dockerfile optimized for Turbo monorepos
3431
+ * Uses turbo prune to create minimal Docker context
3645
3432
  */
3646
- function getImageRef(registry, imageName, tag) {
3647
- if (registry) return `${registry}/${imageName}:${tag}`;
3648
- return `${imageName}:${tag}`;
3433
+ function generateTurboDockerfile(options) {
3434
+ const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
3435
+ const pm = getPmConfig(packageManager);
3436
+ const installPm = pm.install ? `RUN ${pm.install}` : "";
3437
+ const turboInstallCmd = getTurboInstallCmd(packageManager);
3438
+ const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3439
+ return `# syntax=docker/dockerfile:1
3440
+ # Stage 1: Prune monorepo
3441
+ FROM ${baseImage} AS pruner
3442
+
3443
+ WORKDIR /app
3444
+
3445
+ ${installPm}
3446
+
3447
+ COPY . .
3448
+
3449
+ # Prune to only include necessary packages
3450
+ RUN ${turboCmd} prune ${turboPackage} --docker
3451
+
3452
+ # Stage 2: Install dependencies
3453
+ FROM ${baseImage} AS deps
3454
+
3455
+ WORKDIR /app
3456
+
3457
+ ${installPm}
3458
+
3459
+ # Copy pruned lockfile and package.jsons
3460
+ COPY --from=pruner /app/out/${pm.lockfile} ./
3461
+ COPY --from=pruner /app/out/json/ ./
3462
+
3463
+ # Install dependencies (no frozen-lockfile since turbo prune creates a subset)
3464
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3465
+ ${turboInstallCmd}
3466
+
3467
+ # Stage 3: Build
3468
+ FROM deps AS builder
3469
+
3470
+ WORKDIR /app
3471
+
3472
+ # Copy pruned source
3473
+ COPY --from=pruner /app/out/full/ ./
3474
+
3475
+ # Debug: Show node_modules/.bin contents and build production server
3476
+ RUN echo "=== node_modules/.bin contents ===" && \
3477
+ ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
3478
+ echo "=== Checking for gkm ===" && \
3479
+ which gkm 2>/dev/null || echo "gkm not in PATH" && \
3480
+ ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
3481
+ echo "=== Running build ===" && \
3482
+ ./node_modules/.bin/gkm build --provider server --production
3483
+
3484
+ # Stage 4: Production
3485
+ FROM ${baseImage} AS runner
3486
+
3487
+ WORKDIR /app
3488
+
3489
+ RUN apk add --no-cache tini
3490
+
3491
+ RUN addgroup --system --gid 1001 nodejs && \\
3492
+ adduser --system --uid 1001 hono
3493
+
3494
+ COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
3495
+
3496
+ ENV NODE_ENV=production
3497
+ ENV PORT=${port}
3498
+
3499
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3500
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3501
+
3502
+ USER hono
3503
+
3504
+ EXPOSE ${port}
3505
+
3506
+ ENTRYPOINT ["/sbin/tini", "--"]
3507
+ CMD ["node", "server.mjs"]
3508
+ `;
3649
3509
  }
3650
3510
  /**
3651
- * Build Docker image
3652
- * @param imageRef - Full image reference (registry/name:tag)
3653
- * @param appName - Name of the app (used for Dockerfile.{appName} in workspaces)
3654
- * @param buildArgs - Build arguments to pass to docker build
3511
+ * Generate a slim Dockerfile for pre-built bundles
3655
3512
  */
3656
- async function buildImage(imageRef, appName, buildArgs) {
3657
- logger$5.log(`\n🔨 Building Docker image: ${imageRef}`);
3658
- const cwd = process.cwd();
3659
- const lockfilePath = findLockfilePath(cwd);
3660
- const lockfileDir = lockfilePath ? dirname(lockfilePath) : cwd;
3661
- const inMonorepo = lockfileDir !== cwd;
3662
- if (appName || inMonorepo) logger$5.log(" Generating Dockerfile for monorepo (turbo prune)...");
3663
- else logger$5.log(" Generating Dockerfile...");
3664
- await dockerCommand({});
3665
- const dockerfileSuffix = appName ? `.${appName}` : "";
3666
- const dockerfilePath = `.gkm/docker/Dockerfile${dockerfileSuffix}`;
3667
- const buildCwd = lockfilePath && (inMonorepo || appName) ? lockfileDir : cwd;
3668
- if (buildCwd !== cwd) logger$5.log(` Building from workspace root: ${buildCwd}`);
3669
- const buildArgsString = buildArgs && buildArgs.length > 0 ? buildArgs.map((arg) => `--build-arg "${arg}"`).join(" ") : "";
3670
- try {
3671
- const cmd = [
3672
- "DOCKER_BUILDKIT=1 docker build",
3673
- "--platform linux/amd64",
3674
- `-f ${dockerfilePath}`,
3675
- `-t ${imageRef}`,
3676
- buildArgsString,
3677
- "."
3678
- ].filter(Boolean).join(" ");
3679
- execSync(cmd, {
3680
- cwd: buildCwd,
3681
- stdio: "inherit",
3682
- env: {
3683
- ...process.env,
3684
- DOCKER_BUILDKIT: "1"
3685
- }
3686
- });
3687
- logger$5.log(`✅ Image built: ${imageRef}`);
3688
- } catch (error) {
3689
- throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
3690
- }
3513
+ function generateSlimDockerfile(options) {
3514
+ const { baseImage, port, healthCheckPath } = options;
3515
+ return `# Slim Dockerfile for pre-built production bundle
3516
+ FROM ${baseImage}
3517
+
3518
+ WORKDIR /app
3519
+
3520
+ # Install tini for proper signal handling as PID 1
3521
+ # Handles SIGTERM propagation and zombie process reaping
3522
+ RUN apk add --no-cache tini
3523
+
3524
+ # Create non-root user
3525
+ RUN addgroup --system --gid 1001 nodejs && \\
3526
+ adduser --system --uid 1001 hono
3527
+
3528
+ # Copy pre-built bundle
3529
+ COPY .gkm/server/dist/server.mjs ./
3530
+
3531
+ # Environment
3532
+ ENV NODE_ENV=production
3533
+ ENV PORT=${port}
3534
+
3535
+ # Health check
3536
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3537
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3538
+
3539
+ # Switch to non-root user
3540
+ USER hono
3541
+
3542
+ EXPOSE ${port}
3543
+
3544
+ # Use tini as entrypoint to handle PID 1 responsibilities
3545
+ ENTRYPOINT ["/sbin/tini", "--"]
3546
+ CMD ["node", "server.mjs"]
3547
+ `;
3691
3548
  }
3692
3549
  /**
3693
- * Push Docker image to registry
3550
+ * Generate .dockerignore file
3694
3551
  */
3695
- async function pushImage(imageRef) {
3696
- logger$5.log(`\n☁️ Pushing image: ${imageRef}`);
3697
- try {
3698
- execSync(`docker push ${imageRef}`, {
3699
- cwd: process.cwd(),
3700
- stdio: "inherit"
3701
- });
3702
- logger$5.log(`✅ Image pushed: ${imageRef}`);
3703
- } catch (error) {
3704
- throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
3705
- }
3552
+ function generateDockerignore() {
3553
+ return `# Dependencies
3554
+ node_modules
3555
+ .pnpm-store
3556
+
3557
+ # Build output (except what we need)
3558
+ .gkm/aws*
3559
+ .gkm/server/*.ts
3560
+ !.gkm/server/dist
3561
+
3562
+ # IDE and editor
3563
+ .idea
3564
+ .vscode
3565
+ *.swp
3566
+ *.swo
3567
+
3568
+ # Git
3569
+ .git
3570
+ .gitignore
3571
+
3572
+ # Logs
3573
+ *.log
3574
+ npm-debug.log*
3575
+ pnpm-debug.log*
3576
+
3577
+ # Test files
3578
+ **/*.test.ts
3579
+ **/*.spec.ts
3580
+ **/__tests__
3581
+ coverage
3582
+
3583
+ # Documentation
3584
+ docs
3585
+ *.md
3586
+ !README.md
3587
+
3588
+ # Environment files (handle secrets separately)
3589
+ .env
3590
+ .env.*
3591
+ !.env.example
3592
+
3593
+ # Docker files (don't copy recursively)
3594
+ Dockerfile*
3595
+ docker-compose*
3596
+ .dockerignore
3597
+ `;
3706
3598
  }
3707
3599
  /**
3708
- * Deploy using Docker (build and optionally push image)
3600
+ * Generate docker-entrypoint.sh for custom startup logic
3709
3601
  */
3710
- async function deployDocker(options) {
3711
- const { stage, tag, skipPush, masterKey, config: config$1, buildArgs } = options;
3712
- const imageName = config$1.imageName;
3713
- const imageRef = getImageRef(config$1.registry, imageName, tag);
3714
- await buildImage(imageRef, config$1.appName, buildArgs);
3715
- if (!skipPush) if (!config$1.registry) logger$5.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
3716
- else await pushImage(imageRef);
3717
- logger$5.log("\n✅ Docker deployment ready!");
3718
- logger$5.log(`\n📋 Deployment details:`);
3719
- logger$5.log(` Image: ${imageRef}`);
3720
- logger$5.log(` Stage: ${stage}`);
3721
- if (masterKey) {
3722
- logger$5.log(`\n🔐 Deploy with this environment variable:`);
3723
- logger$5.log(` GKM_MASTER_KEY=${masterKey}`);
3724
- logger$5.log("\n Example docker run:");
3725
- logger$5.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
3726
- }
3727
- return {
3728
- imageRef,
3729
- masterKey
3730
- };
3602
+ function generateDockerEntrypoint() {
3603
+ return `#!/bin/sh
3604
+ set -e
3605
+
3606
+ # Run any custom startup scripts here
3607
+ # Example: wait for database
3608
+ # until nc -z $DB_HOST $DB_PORT; do
3609
+ # echo "Waiting for database..."
3610
+ # sleep 1
3611
+ # done
3612
+
3613
+ # Execute the main command
3614
+ exec "$@"
3615
+ `;
3731
3616
  }
3732
3617
  /**
3733
- * Resolve Docker deploy config from gkm config
3734
- * - imageName: from config, or cwd package.json, or 'app' (for Docker image)
3735
- * - projectName: from root package.json, or 'app' (for Dokploy project)
3736
- * - appName: from cwd package.json, or projectName (for Dokploy app within project)
3618
+ * Resolve Docker configuration from GkmConfig with defaults
3737
3619
  */
3738
- function resolveDockerConfig(config$1) {
3739
- const projectName = getAppNameFromPackageJson() ?? "app";
3740
- const appName = getAppNameFromCwd$1() ?? projectName;
3741
- const imageName = config$1.docker?.imageName ?? appName;
3620
+ function resolveDockerConfig$1(config$1) {
3621
+ const docker = config$1.docker ?? {};
3622
+ let defaultImageName = "api";
3623
+ try {
3624
+ const pkg$1 = __require(`${process.cwd()}/package.json`);
3625
+ if (pkg$1.name) defaultImageName = pkg$1.name.replace(/^@[^/]+\//, "");
3626
+ } catch {}
3742
3627
  return {
3743
- registry: config$1.docker?.registry,
3744
- imageName,
3745
- projectName,
3746
- appName
3628
+ registry: docker.registry ?? "",
3629
+ imageName: docker.imageName ?? defaultImageName,
3630
+ baseImage: docker.baseImage ?? "node:22-alpine",
3631
+ port: docker.port ?? 3e3,
3632
+ compose: docker.compose
3747
3633
  };
3748
3634
  }
3749
-
3750
- //#endregion
3751
- //#region src/deploy/dokploy.ts
3752
- const logger$4 = console;
3753
3635
  /**
3754
- * Get the Dokploy API token from stored credentials or environment
3636
+ * Generate a Dockerfile for Next.js frontend apps using standalone output.
3637
+ * Uses turbo prune for monorepo optimization.
3638
+ * @internal Exported for testing
3755
3639
  */
3756
- async function getApiToken$1() {
3757
- const token = await getDokployToken();
3758
- if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
3759
- return token;
3640
+ function generateNextjsDockerfile(options) {
3641
+ const { baseImage, port, appPath, turboPackage, packageManager, publicUrlArgs = ["NEXT_PUBLIC_API_URL", "NEXT_PUBLIC_AUTH_URL"] } = options;
3642
+ const pm = getPmConfig(packageManager);
3643
+ const installPm = pm.install ? `RUN ${pm.install}` : "";
3644
+ const turboInstallCmd = getTurboInstallCmd(packageManager);
3645
+ const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3646
+ const publicUrlArgDeclarations = publicUrlArgs.map((arg) => `ARG ${arg}=""`).join("\n");
3647
+ const publicUrlEnvDeclarations = publicUrlArgs.map((arg) => `ENV ${arg}=$${arg}`).join("\n");
3648
+ return `# syntax=docker/dockerfile:1
3649
+ # Next.js standalone Dockerfile with turbo prune optimization
3650
+
3651
+ # Stage 1: Prune monorepo
3652
+ FROM ${baseImage} AS pruner
3653
+
3654
+ WORKDIR /app
3655
+
3656
+ ${installPm}
3657
+
3658
+ COPY . .
3659
+
3660
+ # Prune to only include necessary packages
3661
+ RUN ${turboCmd} prune ${turboPackage} --docker
3662
+
3663
+ # Stage 2: Install dependencies
3664
+ FROM ${baseImage} AS deps
3665
+
3666
+ WORKDIR /app
3667
+
3668
+ ${installPm}
3669
+
3670
+ # Copy pruned lockfile and package.jsons
3671
+ COPY --from=pruner /app/out/${pm.lockfile} ./
3672
+ COPY --from=pruner /app/out/json/ ./
3673
+
3674
+ # Install dependencies
3675
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3676
+ ${turboInstallCmd}
3677
+
3678
+ # Stage 3: Build
3679
+ FROM deps AS builder
3680
+
3681
+ WORKDIR /app
3682
+
3683
+ # Build-time args for public API URLs (populated by gkm deploy)
3684
+ # These get baked into the Next.js build as public environment variables
3685
+ ${publicUrlArgDeclarations}
3686
+
3687
+ # Convert ARGs to ENVs for Next.js build
3688
+ ${publicUrlEnvDeclarations}
3689
+
3690
+ # Copy pruned source
3691
+ COPY --from=pruner /app/out/full/ ./
3692
+
3693
+ # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3694
+ # Using wildcard to make it optional for single-app projects
3695
+ COPY --from=pruner /app/tsconfig.* ./
3696
+
3697
+ # Ensure public directory exists (may be empty for scaffolded projects)
3698
+ RUN mkdir -p ${appPath}/public
3699
+
3700
+ # Set Next.js to produce standalone output
3701
+ ENV NEXT_TELEMETRY_DISABLED=1
3702
+
3703
+ # Build the application
3704
+ RUN ${turboCmd} run build --filter=${turboPackage}
3705
+
3706
+ # Stage 4: Production
3707
+ FROM ${baseImage} AS runner
3708
+
3709
+ WORKDIR /app
3710
+
3711
+ # Install tini for proper signal handling
3712
+ RUN apk add --no-cache tini
3713
+
3714
+ # Create non-root user
3715
+ RUN addgroup --system --gid 1001 nodejs && \\
3716
+ adduser --system --uid 1001 nextjs
3717
+
3718
+ # Set environment
3719
+ ENV NODE_ENV=production
3720
+ ENV NEXT_TELEMETRY_DISABLED=1
3721
+ ENV PORT=${port}
3722
+ ENV HOSTNAME="0.0.0.0"
3723
+
3724
+ # Copy static files and standalone output
3725
+ COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/standalone ./
3726
+ COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/static ./${appPath}/.next/static
3727
+ COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/public ./${appPath}/public
3728
+
3729
+ USER nextjs
3730
+
3731
+ EXPOSE ${port}
3732
+
3733
+ ENTRYPOINT ["/sbin/tini", "--"]
3734
+ CMD ["node", "${appPath}/server.js"]
3735
+ `;
3760
3736
  }
3761
3737
  /**
3762
- * Create a Dokploy API client
3738
+ * Generate a Dockerfile for backend apps in a workspace.
3739
+ * Uses turbo prune for monorepo optimization.
3740
+ * @internal Exported for testing
3763
3741
  */
3764
- async function createApi$1(endpoint) {
3765
- const token = await getApiToken$1();
3766
- return new DokployApi({
3767
- baseUrl: endpoint,
3768
- token
3769
- });
3742
+ function generateBackendDockerfile(options) {
3743
+ const { baseImage, port, appPath, turboPackage, packageManager, healthCheckPath = "/health" } = options;
3744
+ const pm = getPmConfig(packageManager);
3745
+ const installPm = pm.install ? `RUN ${pm.install}` : "";
3746
+ const turboInstallCmd = getTurboInstallCmd(packageManager);
3747
+ const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3748
+ return `# syntax=docker/dockerfile:1
3749
+ # Backend Dockerfile with turbo prune optimization
3750
+
3751
+ # Stage 1: Prune monorepo
3752
+ FROM ${baseImage} AS pruner
3753
+
3754
+ WORKDIR /app
3755
+
3756
+ ${installPm}
3757
+
3758
+ COPY . .
3759
+
3760
+ # Prune to only include necessary packages
3761
+ RUN ${turboCmd} prune ${turboPackage} --docker
3762
+
3763
+ # Stage 2: Install dependencies
3764
+ FROM ${baseImage} AS deps
3765
+
3766
+ WORKDIR /app
3767
+
3768
+ ${installPm}
3769
+
3770
+ # Copy pruned lockfile and package.jsons
3771
+ COPY --from=pruner /app/out/${pm.lockfile} ./
3772
+ COPY --from=pruner /app/out/json/ ./
3773
+
3774
+ # Install dependencies
3775
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3776
+ ${turboInstallCmd}
3777
+
3778
+ # Stage 3: Build
3779
+ FROM deps AS builder
3780
+
3781
+ WORKDIR /app
3782
+
3783
+ # Build-time args for encrypted secrets
3784
+ ARG GKM_ENCRYPTED_CREDENTIALS=""
3785
+ ARG GKM_CREDENTIALS_IV=""
3786
+
3787
+ # Copy pruned source
3788
+ COPY --from=pruner /app/out/full/ ./
3789
+
3790
+ # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3791
+ # Using wildcard to make it optional for single-app projects
3792
+ COPY --from=pruner /app/gkm.config.* ./
3793
+ COPY --from=pruner /app/tsconfig.* ./
3794
+
3795
+ # Write encrypted credentials for gkm build to embed
3796
+ RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
3797
+ mkdir -p ${appPath}/.gkm && \
3798
+ echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
3799
+ echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
3800
+ fi
3801
+
3802
+ # Build production server using gkm
3803
+ RUN cd ${appPath} && ./node_modules/.bin/gkm build --provider server --production
3804
+
3805
+ # Stage 4: Production
3806
+ FROM ${baseImage} AS runner
3807
+
3808
+ WORKDIR /app
3809
+
3810
+ RUN apk add --no-cache tini
3811
+
3812
+ RUN addgroup --system --gid 1001 nodejs && \\
3813
+ adduser --system --uid 1001 hono
3814
+
3815
+ # Copy bundled server
3816
+ COPY --from=builder --chown=hono:nodejs /app/${appPath}/.gkm/server/dist/server.mjs ./
3817
+
3818
+ ENV NODE_ENV=production
3819
+ ENV PORT=${port}
3820
+
3821
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3822
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3823
+
3824
+ USER hono
3825
+
3826
+ EXPOSE ${port}
3827
+
3828
+ ENTRYPOINT ["/sbin/tini", "--"]
3829
+ CMD ["node", "server.mjs"]
3830
+ `;
3770
3831
  }
3771
3832
  /**
3772
- * Deploy to Dokploy
3833
+ * Generate a Dockerfile for apps with a custom entry point.
3834
+ * Uses esbuild to bundle the entry point into dist/index.mjs with all dependencies.
3835
+ * This is used for apps that don't use gkm routes (e.g., Better Auth servers).
3836
+ * @internal Exported for testing
3773
3837
  */
3774
- async function deployDokploy(options) {
3775
- const { stage, imageRef, masterKey, config: config$1 } = options;
3776
- logger$4.log(`\n🎯 Deploying to Dokploy...`);
3777
- logger$4.log(` Endpoint: ${config$1.endpoint}`);
3778
- logger$4.log(` Application: ${config$1.applicationId}`);
3779
- const api = await createApi$1(config$1.endpoint);
3780
- logger$4.log(` Configuring Docker image: ${imageRef}`);
3781
- const registryOptions = {};
3782
- if (config$1.registryId) {
3783
- registryOptions.registryId = config$1.registryId;
3784
- logger$4.log(` Using Dokploy registry: ${config$1.registryId}`);
3785
- } else {
3786
- const storedRegistryId = await getDokployRegistryId();
3787
- if (storedRegistryId) {
3788
- registryOptions.registryId = storedRegistryId;
3789
- logger$4.log(` Using stored Dokploy registry: ${storedRegistryId}`);
3790
- } else if (config$1.registryCredentials) {
3791
- registryOptions.username = config$1.registryCredentials.username;
3792
- registryOptions.password = config$1.registryCredentials.password;
3793
- registryOptions.registryUrl = config$1.registryCredentials.registryUrl;
3794
- logger$4.log(` Using registry credentials for: ${config$1.registryCredentials.registryUrl}`);
3795
- } else {
3796
- const username = process.env.DOCKER_REGISTRY_USERNAME;
3797
- const password = process.env.DOCKER_REGISTRY_PASSWORD;
3798
- const registryUrl = process.env.DOCKER_REGISTRY_URL || config$1.registry;
3799
- if (username && password && registryUrl) {
3800
- registryOptions.username = username;
3801
- registryOptions.password = password;
3802
- registryOptions.registryUrl = registryUrl;
3803
- logger$4.log(` Using registry credentials from environment`);
3804
- }
3805
- }
3806
- }
3807
- await api.saveDockerProvider(config$1.applicationId, imageRef, registryOptions);
3808
- logger$4.log(" ✓ Docker provider configured");
3809
- const envVars = {};
3810
- if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
3811
- if (Object.keys(envVars).length > 0) {
3812
- logger$4.log(" Updating environment variables...");
3813
- const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
3814
- await api.saveApplicationEnv(config$1.applicationId, envString);
3815
- logger$4.log(" ✓ Environment variables updated");
3816
- }
3817
- logger$4.log(" Triggering deployment...");
3818
- await api.deployApplication(config$1.applicationId);
3819
- logger$4.log(" ✓ Deployment triggered");
3820
- logger$4.log("\n✅ Dokploy deployment initiated!");
3821
- logger$4.log(`\n📋 Deployment details:`);
3822
- logger$4.log(` Image: ${imageRef}`);
3823
- logger$4.log(` Stage: ${stage}`);
3824
- logger$4.log(` Application ID: ${config$1.applicationId}`);
3825
- if (masterKey) logger$4.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
3826
- const deploymentUrl = `${config$1.endpoint}/project/${config$1.projectId}`;
3827
- logger$4.log(`\n🔗 View deployment: ${deploymentUrl}`);
3828
- return {
3829
- imageRef,
3830
- masterKey,
3831
- url: deploymentUrl
3832
- };
3838
+ function generateEntryDockerfile(options) {
3839
+ const { baseImage, port, appPath, entry, turboPackage, packageManager, healthCheckPath = "/health" } = options;
3840
+ const pm = getPmConfig(packageManager);
3841
+ const installPm = pm.install ? `RUN ${pm.install}` : "";
3842
+ const turboInstallCmd = getTurboInstallCmd(packageManager);
3843
+ const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3844
+ return `# syntax=docker/dockerfile:1
3845
+ # Entry-based Dockerfile with turbo prune + tsdown bundling
3846
+
3847
+ # Stage 1: Prune monorepo
3848
+ FROM ${baseImage} AS pruner
3849
+
3850
+ WORKDIR /app
3851
+
3852
+ ${installPm}
3853
+
3854
+ COPY . .
3855
+
3856
+ # Prune to only include necessary packages
3857
+ RUN ${turboCmd} prune ${turboPackage} --docker
3858
+
3859
+ # Stage 2: Install dependencies
3860
+ FROM ${baseImage} AS deps
3861
+
3862
+ WORKDIR /app
3863
+
3864
+ ${installPm}
3865
+
3866
+ # Copy pruned lockfile and package.jsons
3867
+ COPY --from=pruner /app/out/${pm.lockfile} ./
3868
+ COPY --from=pruner /app/out/json/ ./
3869
+
3870
+ # Install dependencies
3871
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3872
+ ${turboInstallCmd}
3873
+
3874
+ # Stage 3: Build with tsdown
3875
+ FROM deps AS builder
3876
+
3877
+ WORKDIR /app
3878
+
3879
+ # Build-time args for encrypted secrets
3880
+ ARG GKM_ENCRYPTED_CREDENTIALS=""
3881
+ ARG GKM_CREDENTIALS_IV=""
3882
+
3883
+ # Copy pruned source
3884
+ COPY --from=pruner /app/out/full/ ./
3885
+
3886
+ # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3887
+ # Using wildcard to make it optional for single-app projects
3888
+ COPY --from=pruner /app/tsconfig.* ./
3889
+
3890
+ # Write encrypted credentials for tsdown to embed via define
3891
+ RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
3892
+ mkdir -p ${appPath}/.gkm && \
3893
+ echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
3894
+ echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
3895
+ fi
3896
+
3897
+ # Bundle entry point with esbuild (outputs to dist/index.mjs)
3898
+ # Creates a fully standalone bundle with all dependencies included
3899
+ # Use define to embed credentials if present
3900
+ RUN cd ${appPath} && \
3901
+ if [ -f .gkm/credentials.enc ]; then \
3902
+ CREDS=$(cat .gkm/credentials.enc) && \
3903
+ IV=$(cat .gkm/credentials.iv) && \
3904
+ npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
3905
+ --outfile=dist/index.mjs --packages=bundle \
3906
+ --banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);' \
3907
+ --define:__GKM_ENCRYPTED_CREDENTIALS__="'\\"$CREDS\\"'" \
3908
+ --define:__GKM_CREDENTIALS_IV__="'\\"$IV\\"'"; \
3909
+ else \
3910
+ npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
3911
+ --outfile=dist/index.mjs --packages=bundle \
3912
+ --banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);'; \
3913
+ fi
3914
+
3915
+ # Stage 4: Production
3916
+ FROM ${baseImage} AS runner
3917
+
3918
+ WORKDIR /app
3919
+
3920
+ RUN apk add --no-cache tini
3921
+
3922
+ RUN addgroup --system --gid 1001 nodejs && \\
3923
+ adduser --system --uid 1001 app
3924
+
3925
+ # Copy bundled output only (no node_modules needed - fully bundled)
3926
+ COPY --from=builder --chown=app:nodejs /app/${appPath}/dist/index.mjs ./
3927
+
3928
+ ENV NODE_ENV=production
3929
+ ENV PORT=${port}
3930
+
3931
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3932
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3933
+
3934
+ USER app
3935
+
3936
+ EXPOSE ${port}
3937
+
3938
+ ENTRYPOINT ["/sbin/tini", "--"]
3939
+ CMD ["node", "index.mjs"]
3940
+ `;
3833
3941
  }
3834
3942
 
3835
3943
  //#endregion
3836
- //#region src/deploy/state.ts
3837
- /**
3838
- * Get the state file path for a stage
3839
- */
3840
- function getStateFilePath(workspaceRoot, stage) {
3841
- return join(workspaceRoot, ".gkm", `deploy-${stage}.json`);
3842
- }
3944
+ //#region src/docker/index.ts
3945
+ const logger$5 = console;
3843
3946
  /**
3844
- * Read the deploy state for a stage
3845
- * Returns null if state file doesn't exist
3947
+ * Docker command implementation
3948
+ * Generates Dockerfile, docker-compose.yml, and related files
3949
+ *
3950
+ * Default: Multi-stage Dockerfile that builds from source inside Docker
3951
+ * --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
3846
3952
  */
3847
- async function readStageState(workspaceRoot, stage) {
3848
- const filePath = getStateFilePath(workspaceRoot, stage);
3849
- try {
3850
- const content = await readFile(filePath, "utf-8");
3851
- return JSON.parse(content);
3852
- } catch (error) {
3853
- if (error.code === "ENOENT") return null;
3854
- console.warn(`Warning: Could not read deploy state: ${error}`);
3855
- return null;
3953
+ async function dockerCommand(options) {
3954
+ const loadedConfig = await loadWorkspaceConfig();
3955
+ if (loadedConfig.type === "workspace") {
3956
+ logger$5.log("📦 Detected workspace configuration");
3957
+ return workspaceDockerCommand(loadedConfig.workspace, options);
3856
3958
  }
3857
- }
3858
- /**
3859
- * Write the deploy state for a stage
3860
- */
3861
- async function writeStageState(workspaceRoot, stage, state) {
3862
- const filePath = getStateFilePath(workspaceRoot, stage);
3863
- const dir = join(workspaceRoot, ".gkm");
3864
- await mkdir(dir, { recursive: true });
3865
- state.lastDeployedAt = (/* @__PURE__ */ new Date()).toISOString();
3866
- await writeFile(filePath, JSON.stringify(state, null, 2));
3867
- }
3868
- /**
3869
- * Create a new empty state for a stage
3870
- */
3871
- function createEmptyState(stage, environmentId) {
3872
- return {
3873
- provider: "dokploy",
3874
- stage,
3875
- environmentId,
3876
- applications: {},
3877
- services: {},
3878
- lastDeployedAt: (/* @__PURE__ */ new Date()).toISOString()
3959
+ const config$1 = await loadConfig();
3960
+ const dockerConfig = resolveDockerConfig$1(config$1);
3961
+ const serverConfig = typeof config$1.providers?.server === "object" ? config$1.providers.server : void 0;
3962
+ const healthCheckPath = serverConfig?.production?.healthCheck ?? "/health";
3963
+ const useSlim = options.slim === true;
3964
+ if (useSlim) {
3965
+ const distDir = join(process.cwd(), ".gkm", "server", "dist");
3966
+ const hasBuild = existsSync(join(distDir, "server.mjs"));
3967
+ if (!hasBuild) throw new Error("Slim Dockerfile requires a pre-built bundle. Run `gkm build --provider server --production` first, or omit --slim to use multi-stage build.");
3968
+ }
3969
+ const dockerDir = join(process.cwd(), ".gkm", "docker");
3970
+ await mkdir(dockerDir, { recursive: true });
3971
+ const packageManager = detectPackageManager$1();
3972
+ const inMonorepo = isMonorepo();
3973
+ const hasTurbo = hasTurboConfig();
3974
+ let useTurbo = options.turbo ?? false;
3975
+ if (inMonorepo && !useSlim) if (hasTurbo) {
3976
+ useTurbo = true;
3977
+ logger$5.log(" Detected monorepo with turbo.json - using turbo prune");
3978
+ } else throw new Error("Monorepo detected but turbo.json not found.\n\nDocker builds in monorepos require Turborepo for proper dependency isolation.\n\nTo fix this:\n 1. Install turbo: pnpm add -Dw turbo\n 2. Create turbo.json in your monorepo root\n 3. Run this command again\n\nSee: https://turbo.build/repo/docs/guides/tools/docker");
3979
+ let turboPackage = options.turboPackage ?? dockerConfig.imageName;
3980
+ if (useTurbo && !options.turboPackage) try {
3981
+ const pkg$1 = __require(`${process.cwd()}/package.json`);
3982
+ if (pkg$1.name) {
3983
+ turboPackage = pkg$1.name;
3984
+ logger$5.log(` Turbo package: ${turboPackage}`);
3985
+ }
3986
+ } catch {}
3987
+ const templateOptions = {
3988
+ imageName: dockerConfig.imageName,
3989
+ baseImage: dockerConfig.baseImage,
3990
+ port: dockerConfig.port,
3991
+ healthCheckPath,
3992
+ prebuilt: useSlim,
3993
+ turbo: useTurbo,
3994
+ turboPackage,
3995
+ packageManager
3996
+ };
3997
+ const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
3998
+ const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
3999
+ const dockerfilePath = join(dockerDir, "Dockerfile");
4000
+ await writeFile(dockerfilePath, dockerfile);
4001
+ logger$5.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
4002
+ const composeOptions = {
4003
+ imageName: dockerConfig.imageName,
4004
+ registry: options.registry ?? dockerConfig.registry,
4005
+ port: dockerConfig.port,
4006
+ healthCheckPath,
4007
+ services: dockerConfig.compose?.services ?? {}
3879
4008
  };
4009
+ const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
4010
+ const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
4011
+ const composePath = join(dockerDir, "docker-compose.yml");
4012
+ await writeFile(composePath, dockerCompose);
4013
+ logger$5.log("Generated: .gkm/docker/docker-compose.yml");
4014
+ const dockerignore = generateDockerignore();
4015
+ const dockerignorePath = join(process.cwd(), ".dockerignore");
4016
+ await writeFile(dockerignorePath, dockerignore);
4017
+ logger$5.log("Generated: .dockerignore (project root)");
4018
+ const entrypoint = generateDockerEntrypoint();
4019
+ const entrypointPath = join(dockerDir, "docker-entrypoint.sh");
4020
+ await writeFile(entrypointPath, entrypoint);
4021
+ logger$5.log("Generated: .gkm/docker/docker-entrypoint.sh");
4022
+ const result = {
4023
+ dockerfile: dockerfilePath,
4024
+ dockerCompose: composePath,
4025
+ dockerignore: dockerignorePath,
4026
+ entrypoint: entrypointPath
4027
+ };
4028
+ if (options.build) await buildDockerImage(dockerConfig.imageName, options);
4029
+ if (options.push) await pushDockerImage(dockerConfig.imageName, options);
4030
+ return result;
3880
4031
  }
3881
4032
  /**
3882
- * Get application ID from state
3883
- */
3884
- function getApplicationId(state, appName) {
3885
- return state?.applications[appName];
3886
- }
3887
- /**
3888
- * Set application ID in state (mutates state)
3889
- */
3890
- function setApplicationId(state, appName, applicationId) {
3891
- state.applications[appName] = applicationId;
3892
- }
3893
- /**
3894
- * Get postgres ID from state
3895
- */
3896
- function getPostgresId(state) {
3897
- return state?.services.postgresId;
3898
- }
3899
- /**
3900
- * Set postgres ID in state (mutates state)
4033
+ * Ensure lockfile exists in the build context
4034
+ * For monorepos, copies from workspace root if needed
4035
+ * Returns cleanup function if file was copied
3901
4036
  */
3902
- function setPostgresId(state, postgresId) {
3903
- state.services.postgresId = postgresId;
4037
+ function ensureLockfile(cwd) {
4038
+ const lockfilePath = findLockfilePath(cwd);
4039
+ if (!lockfilePath) {
4040
+ logger$5.warn("\n⚠️ No lockfile found. Docker build may fail or use stale dependencies.");
4041
+ return null;
4042
+ }
4043
+ const lockfileName = basename(lockfilePath);
4044
+ const localLockfile = join(cwd, lockfileName);
4045
+ if (lockfilePath === localLockfile) return null;
4046
+ logger$5.log(` Copying ${lockfileName} from monorepo root...`);
4047
+ copyFileSync(lockfilePath, localLockfile);
4048
+ return () => {
4049
+ try {
4050
+ unlinkSync(localLockfile);
4051
+ } catch {}
4052
+ };
3904
4053
  }
3905
4054
  /**
3906
- * Get redis ID from state
4055
+ * Build Docker image
4056
+ * Uses BuildKit for cache mount support
3907
4057
  */
3908
- function getRedisId(state) {
3909
- return state?.services.redisId;
4058
+ async function buildDockerImage(imageName, options) {
4059
+ const tag = options.tag ?? "latest";
4060
+ const registry = options.registry;
4061
+ const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
4062
+ logger$5.log(`\n🐳 Building Docker image: ${fullImageName}`);
4063
+ const cwd = process.cwd();
4064
+ const cleanup = ensureLockfile(cwd);
4065
+ try {
4066
+ execSync(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
4067
+ cwd,
4068
+ stdio: "inherit",
4069
+ env: {
4070
+ ...process.env,
4071
+ DOCKER_BUILDKIT: "1"
4072
+ }
4073
+ });
4074
+ logger$5.log(`✅ Docker image built: ${fullImageName}`);
4075
+ } catch (error) {
4076
+ throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
4077
+ } finally {
4078
+ cleanup?.();
4079
+ }
3910
4080
  }
3911
4081
  /**
3912
- * Set redis ID in state (mutates state)
4082
+ * Push Docker image to registry
3913
4083
  */
3914
- function setRedisId(state, redisId) {
3915
- state.services.redisId = redisId;
4084
+ async function pushDockerImage(imageName, options) {
4085
+ const tag = options.tag ?? "latest";
4086
+ const registry = options.registry;
4087
+ if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
4088
+ const fullImageName = `${registry}/${imageName}:${tag}`;
4089
+ logger$5.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
4090
+ try {
4091
+ execSync(`docker push ${fullImageName}`, {
4092
+ cwd: process.cwd(),
4093
+ stdio: "inherit"
4094
+ });
4095
+ logger$5.log(`✅ Docker image pushed: ${fullImageName}`);
4096
+ } catch (error) {
4097
+ throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
4098
+ }
3916
4099
  }
3917
-
3918
- //#endregion
3919
- //#region src/deploy/dns/hostinger-api.ts
3920
- /**
3921
- * Hostinger DNS API client
3922
- *
3923
- * API Documentation: https://developers.hostinger.com/
3924
- * Authentication: Bearer token from hpanel.hostinger.com/profile/api
3925
- */
3926
- const HOSTINGER_API_BASE = "https://developers.hostinger.com";
3927
4100
  /**
3928
- * Hostinger API error
4101
+ * Get the package name from package.json in an app directory.
3929
4102
  */
3930
- var HostingerApiError = class extends Error {
3931
- constructor(message, status, statusText, errors) {
3932
- super(message);
3933
- this.status = status;
3934
- this.statusText = statusText;
3935
- this.errors = errors;
3936
- this.name = "HostingerApiError";
4103
+ function getAppPackageName(appPath) {
4104
+ try {
4105
+ const pkgPath = join(appPath, "package.json");
4106
+ if (!existsSync(pkgPath)) return void 0;
4107
+ const content = readFileSync(pkgPath, "utf-8");
4108
+ const pkg$1 = JSON.parse(content);
4109
+ return pkg$1.name;
4110
+ } catch {
4111
+ return void 0;
3937
4112
  }
3938
- };
4113
+ }
3939
4114
  /**
3940
- * Hostinger DNS API client
3941
- *
3942
- * @example
3943
- * ```ts
3944
- * const api = new HostingerApi(token);
3945
- *
3946
- * // Get all records for a domain
3947
- * const records = await api.getRecords('traflabs.io');
3948
- *
3949
- * // Create/update records
3950
- * await api.upsertRecords('traflabs.io', [
3951
- * { name: 'api.joemoer', type: 'A', ttl: 300, records: ['1.2.3.4'] }
3952
- * ]);
3953
- * ```
4115
+ * Generate Dockerfiles for all apps in a workspace.
4116
+ * @internal Exported for testing
3954
4117
  */
3955
- var HostingerApi = class {
3956
- token;
3957
- constructor(token) {
3958
- this.token = token;
3959
- }
3960
- /**
3961
- * Make a request to the Hostinger API
3962
- */
3963
- async request(method, endpoint, body) {
3964
- const url = `${HOSTINGER_API_BASE}${endpoint}`;
3965
- const response = await fetch(url, {
3966
- method,
3967
- headers: {
3968
- "Content-Type": "application/json",
3969
- Authorization: `Bearer ${this.token}`
3970
- },
3971
- body: body ? JSON.stringify(body) : void 0
4118
+ async function workspaceDockerCommand(workspace, options) {
4119
+ const results = [];
4120
+ const apps = Object.entries(workspace.apps);
4121
+ logger$5.log(`\n🐳 Generating Dockerfiles for workspace: ${workspace.name}`);
4122
+ const dockerDir = join(workspace.root, ".gkm", "docker");
4123
+ await mkdir(dockerDir, { recursive: true });
4124
+ const packageManager = detectPackageManager$1(workspace.root);
4125
+ logger$5.log(` Package manager: ${packageManager}`);
4126
+ for (const [appName, app] of apps) {
4127
+ const appPath = app.path;
4128
+ const fullAppPath = join(workspace.root, appPath);
4129
+ const turboPackage = getAppPackageName(fullAppPath) ?? appName;
4130
+ const imageName = appName;
4131
+ const hasEntry = !!app.entry;
4132
+ const buildType = hasEntry ? "entry" : app.type;
4133
+ logger$5.log(`\n 📄 Generating Dockerfile for ${appName} (${buildType})`);
4134
+ let dockerfile;
4135
+ if (app.type === "frontend") dockerfile = generateNextjsDockerfile({
4136
+ imageName,
4137
+ baseImage: "node:22-alpine",
4138
+ port: app.port,
4139
+ appPath,
4140
+ turboPackage,
4141
+ packageManager
3972
4142
  });
3973
- if (!response.ok) {
3974
- let errorMessage = `Hostinger API error: ${response.status} ${response.statusText}`;
3975
- let errors;
3976
- try {
3977
- const errorBody = await response.json();
3978
- if (errorBody.message) errorMessage = `Hostinger API error: ${errorBody.message}`;
3979
- errors = errorBody.errors;
3980
- } catch {}
3981
- throw new HostingerApiError(errorMessage, response.status, response.statusText, errors);
3982
- }
3983
- const text = await response.text();
3984
- if (!text || text.trim() === "") return void 0;
3985
- return JSON.parse(text);
3986
- }
3987
- /**
3988
- * Get all DNS records for a domain
3989
- *
3990
- * @param domain - Root domain (e.g., 'traflabs.io')
3991
- */
3992
- async getRecords(domain) {
3993
- const response = await this.request("GET", `/api/dns/v1/zones/${domain}`);
3994
- return response.data || [];
3995
- }
3996
- /**
3997
- * Create or update DNS records
3998
- *
3999
- * @param domain - Root domain (e.g., 'traflabs.io')
4000
- * @param records - Records to create/update
4001
- * @param overwrite - If true, replaces all existing records. If false, merges with existing.
4002
- */
4003
- async upsertRecords(domain, records, overwrite = false) {
4004
- await this.request("PUT", `/api/dns/v1/zones/${domain}`, {
4005
- overwrite,
4006
- zone: records
4143
+ else if (app.entry) dockerfile = generateEntryDockerfile({
4144
+ imageName,
4145
+ baseImage: "node:22-alpine",
4146
+ port: app.port,
4147
+ appPath,
4148
+ entry: app.entry,
4149
+ turboPackage,
4150
+ packageManager,
4151
+ healthCheckPath: "/health"
4007
4152
  });
4008
- }
4009
- /**
4010
- * Validate DNS records before applying
4011
- *
4012
- * @param domain - Root domain (e.g., 'traflabs.io')
4013
- * @param records - Records to validate
4014
- * @returns true if valid, throws if invalid
4015
- */
4016
- async validateRecords(domain, records) {
4017
- await this.request("POST", `/api/dns/v1/zones/${domain}/validate`, {
4018
- overwrite: false,
4019
- zone: records
4153
+ else dockerfile = generateBackendDockerfile({
4154
+ imageName,
4155
+ baseImage: "node:22-alpine",
4156
+ port: app.port,
4157
+ appPath,
4158
+ turboPackage,
4159
+ packageManager,
4160
+ healthCheckPath: "/health"
4161
+ });
4162
+ const dockerfilePath = join(dockerDir, `Dockerfile.${appName}`);
4163
+ await writeFile(dockerfilePath, dockerfile);
4164
+ logger$5.log(` Generated: .gkm/docker/Dockerfile.${appName}`);
4165
+ results.push({
4166
+ appName,
4167
+ type: app.type,
4168
+ dockerfile: dockerfilePath,
4169
+ imageName
4020
4170
  });
4021
- return true;
4022
- }
4023
- /**
4024
- * Delete specific DNS records
4025
- *
4026
- * @param domain - Root domain (e.g., 'traflabs.io')
4027
- * @param filters - Filters to match records for deletion
4028
- */
4029
- async deleteRecords(domain, filters) {
4030
- await this.request("DELETE", `/api/dns/v1/zones/${domain}`, { filters });
4031
- }
4032
- /**
4033
- * Check if a specific record exists
4034
- *
4035
- * @param domain - Root domain (e.g., 'traflabs.io')
4036
- * @param name - Subdomain name (e.g., 'api.joemoer')
4037
- * @param type - Record type (e.g., 'A')
4038
- */
4039
- async recordExists(domain, name$1, type$1 = "A") {
4040
- const records = await this.getRecords(domain);
4041
- return records.some((r) => r.name === name$1 && r.type === type$1);
4042
4171
  }
4043
- /**
4044
- * Create a single A record if it doesn't exist
4045
- *
4046
- * @param domain - Root domain (e.g., 'traflabs.io')
4047
- * @param subdomain - Subdomain name (e.g., 'api.joemoer')
4048
- * @param ip - IP address to point to
4049
- * @param ttl - TTL in seconds (default: 300)
4050
- * @returns true if created, false if already exists
4051
- */
4052
- async createARecordIfNotExists(domain, subdomain, ip, ttl = 300) {
4053
- const exists = await this.recordExists(domain, subdomain, "A");
4054
- if (exists) return false;
4055
- await this.upsertRecords(domain, [{
4056
- name: subdomain,
4057
- type: "A",
4058
- ttl,
4059
- records: [{ content: ip }]
4060
- }]);
4061
- return true;
4172
+ const dockerignore = generateDockerignore();
4173
+ const dockerignorePath = join(workspace.root, ".dockerignore");
4174
+ await writeFile(dockerignorePath, dockerignore);
4175
+ logger$5.log(`\n Generated: .dockerignore (workspace root)`);
4176
+ const dockerCompose = generateWorkspaceCompose(workspace, { registry: options.registry });
4177
+ const composePath = join(dockerDir, "docker-compose.yml");
4178
+ await writeFile(composePath, dockerCompose);
4179
+ logger$5.log(` Generated: .gkm/docker/docker-compose.yml`);
4180
+ logger$5.log(`\n✅ Generated ${results.length} Dockerfile(s) + docker-compose.yml`);
4181
+ logger$5.log("\n📋 Build commands:");
4182
+ for (const result of results) {
4183
+ const icon = result.type === "backend" ? "⚙️" : "🌐";
4184
+ logger$5.log(` ${icon} docker build -f .gkm/docker/Dockerfile.${result.appName} -t ${result.imageName} .`);
4062
4185
  }
4063
- };
4186
+ logger$5.log("\n📋 Run all services:");
4187
+ logger$5.log(" docker compose -f .gkm/docker/docker-compose.yml up --build");
4188
+ return {
4189
+ apps: results,
4190
+ dockerCompose: composePath,
4191
+ dockerignore: dockerignorePath
4192
+ };
4193
+ }
4064
4194
 
4065
4195
  //#endregion
4066
- //#region src/deploy/dns/index.ts
4067
- const logger$3 = console;
4196
+ //#region src/deploy/docker.ts
4068
4197
  /**
4069
- * Resolve IP address from a hostname
4198
+ * Get app name from package.json in the current working directory
4199
+ * Used for Dokploy app/project naming
4070
4200
  */
4071
- async function resolveHostnameToIp(hostname) {
4201
+ function getAppNameFromCwd$1() {
4202
+ const packageJsonPath = join(process.cwd(), "package.json");
4203
+ if (!existsSync(packageJsonPath)) return void 0;
4072
4204
  try {
4073
- const addresses = await lookup(hostname, { family: 4 });
4074
- return addresses.address;
4075
- } catch (error) {
4076
- throw new Error(`Failed to resolve IP for ${hostname}: ${error instanceof Error ? error.message : "Unknown error"}`);
4077
- }
4205
+ const pkg$1 = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
4206
+ if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
4207
+ } catch {}
4208
+ return void 0;
4078
4209
  }
4079
4210
  /**
4080
- * Extract subdomain from full hostname relative to root domain
4081
- *
4082
- * @example
4083
- * extractSubdomain('api.joemoer.traflabs.io', 'traflabs.io') => 'api.joemoer'
4084
- * extractSubdomain('joemoer.traflabs.io', 'traflabs.io') => 'joemoer'
4211
+ * Get app name from package.json adjacent to the lockfile (project root)
4212
+ * Used for Docker image naming
4085
4213
  */
4086
- function extractSubdomain(hostname, rootDomain) {
4087
- if (!hostname.endsWith(rootDomain)) throw new Error(`Hostname ${hostname} is not under root domain ${rootDomain}`);
4088
- const subdomain = hostname.slice(0, -(rootDomain.length + 1));
4089
- return subdomain || "@";
4214
+ function getAppNameFromPackageJson() {
4215
+ const cwd = process.cwd();
4216
+ const lockfilePath = findLockfilePath(cwd);
4217
+ if (!lockfilePath) return void 0;
4218
+ const projectRoot = dirname(lockfilePath);
4219
+ const packageJsonPath = join(projectRoot, "package.json");
4220
+ if (!existsSync(packageJsonPath)) return void 0;
4221
+ try {
4222
+ const pkg$1 = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
4223
+ if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
4224
+ } catch {}
4225
+ return void 0;
4090
4226
  }
4227
+ const logger$4 = console;
4091
4228
  /**
4092
- * Generate required DNS records for a deployment
4229
+ * Get the full image reference
4093
4230
  */
4094
- function generateRequiredRecords(appHostnames, rootDomain, serverIp) {
4095
- const records = [];
4096
- for (const [appName, hostname] of appHostnames) {
4097
- const subdomain = extractSubdomain(hostname, rootDomain);
4098
- records.push({
4099
- hostname,
4100
- subdomain,
4101
- type: "A",
4102
- value: serverIp,
4103
- appName
4231
+ function getImageRef(registry, imageName, tag) {
4232
+ if (registry) return `${registry}/${imageName}:${tag}`;
4233
+ return `${imageName}:${tag}`;
4234
+ }
4235
+ /**
4236
+ * Build Docker image
4237
+ * @param imageRef - Full image reference (registry/name:tag)
4238
+ * @param appName - Name of the app (used for Dockerfile.{appName} in workspaces)
4239
+ * @param buildArgs - Build arguments to pass to docker build
4240
+ */
4241
+ async function buildImage(imageRef, appName, buildArgs) {
4242
+ logger$4.log(`\n🔨 Building Docker image: ${imageRef}`);
4243
+ const cwd = process.cwd();
4244
+ const lockfilePath = findLockfilePath(cwd);
4245
+ const lockfileDir = lockfilePath ? dirname(lockfilePath) : cwd;
4246
+ const inMonorepo = lockfileDir !== cwd;
4247
+ if (appName || inMonorepo) logger$4.log(" Generating Dockerfile for monorepo (turbo prune)...");
4248
+ else logger$4.log(" Generating Dockerfile...");
4249
+ await dockerCommand({});
4250
+ const dockerfileSuffix = appName ? `.${appName}` : "";
4251
+ const dockerfilePath = `.gkm/docker/Dockerfile${dockerfileSuffix}`;
4252
+ const buildCwd = lockfilePath && (inMonorepo || appName) ? lockfileDir : cwd;
4253
+ if (buildCwd !== cwd) logger$4.log(` Building from workspace root: ${buildCwd}`);
4254
+ const buildArgsString = buildArgs && buildArgs.length > 0 ? buildArgs.map((arg) => `--build-arg "${arg}"`).join(" ") : "";
4255
+ try {
4256
+ const cmd = [
4257
+ "DOCKER_BUILDKIT=1 docker build",
4258
+ "--platform linux/amd64",
4259
+ `-f ${dockerfilePath}`,
4260
+ `-t ${imageRef}`,
4261
+ buildArgsString,
4262
+ "."
4263
+ ].filter(Boolean).join(" ");
4264
+ execSync(cmd, {
4265
+ cwd: buildCwd,
4266
+ stdio: "inherit",
4267
+ env: {
4268
+ ...process.env,
4269
+ DOCKER_BUILDKIT: "1"
4270
+ }
4104
4271
  });
4272
+ logger$4.log(`✅ Image built: ${imageRef}`);
4273
+ } catch (error) {
4274
+ throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
4105
4275
  }
4106
- return records;
4107
4276
  }
4108
4277
  /**
4109
- * Print DNS records table
4278
+ * Push Docker image to registry
4110
4279
  */
4111
- function printDnsRecordsTable(records, rootDomain) {
4112
- logger$3.log("\n 📋 DNS Records for " + rootDomain + ":");
4113
- logger$3.log(" ┌─────────────────────────────────────┬──────┬─────────────────┬────────┐");
4114
- logger$3.log(" │ Subdomain │ Type │ Value │ Status │");
4115
- logger$3.log(" ├─────────────────────────────────────┼──────┼─────────────────┼────────┤");
4116
- for (const record of records) {
4117
- const subdomain = record.subdomain.padEnd(35);
4118
- const type$1 = record.type.padEnd(4);
4119
- const value = record.value.padEnd(15);
4120
- let status;
4121
- if (record.error) status = "✗";
4122
- else if (record.created) status = "✓ new";
4123
- else if (record.existed) status = "✓";
4124
- else status = "?";
4125
- logger$3.log(` │ ${subdomain} │ ${type$1} │ ${value} │ ${status.padEnd(6)} │`);
4280
+ async function pushImage(imageRef) {
4281
+ logger$4.log(`\n☁️ Pushing image: ${imageRef}`);
4282
+ try {
4283
+ execSync(`docker push ${imageRef}`, {
4284
+ cwd: process.cwd(),
4285
+ stdio: "inherit"
4286
+ });
4287
+ logger$4.log(`✅ Image pushed: ${imageRef}`);
4288
+ } catch (error) {
4289
+ throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
4126
4290
  }
4127
- logger$3.log(" └─────────────────────────────────────┴──────┴─────────────────┴────────┘");
4128
4291
  }
4129
4292
  /**
4130
- * Print DNS records in a simple format for manual setup
4293
+ * Deploy using Docker (build and optionally push image)
4131
4294
  */
4132
- function printDnsRecordsSimple(records, rootDomain) {
4133
- logger$3.log("\n 📋 Required DNS Records:");
4134
- logger$3.log(` Add these A records to your DNS provider (${rootDomain}):\n`);
4135
- for (const record of records) logger$3.log(` ${record.subdomain} → ${record.value} (A record)`);
4136
- logger$3.log("");
4295
+ async function deployDocker(options) {
4296
+ const { stage, tag, skipPush, masterKey, config: config$1, buildArgs } = options;
4297
+ const imageName = config$1.imageName;
4298
+ const imageRef = getImageRef(config$1.registry, imageName, tag);
4299
+ await buildImage(imageRef, config$1.appName, buildArgs);
4300
+ if (!skipPush) if (!config$1.registry) logger$4.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
4301
+ else await pushImage(imageRef);
4302
+ logger$4.log("\n✅ Docker deployment ready!");
4303
+ logger$4.log(`\n📋 Deployment details:`);
4304
+ logger$4.log(` Image: ${imageRef}`);
4305
+ logger$4.log(` Stage: ${stage}`);
4306
+ if (masterKey) {
4307
+ logger$4.log(`\n🔐 Deploy with this environment variable:`);
4308
+ logger$4.log(` GKM_MASTER_KEY=${masterKey}`);
4309
+ logger$4.log("\n Example docker run:");
4310
+ logger$4.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
4311
+ }
4312
+ return {
4313
+ imageRef,
4314
+ masterKey
4315
+ };
4137
4316
  }
4138
4317
  /**
4139
- * Prompt for input (reuse from deploy/index.ts pattern)
4318
+ * Resolve Docker deploy config from gkm config
4319
+ * - imageName: from config, or cwd package.json, or 'app' (for Docker image)
4320
+ * - projectName: from root package.json, or 'app' (for Dokploy project)
4321
+ * - appName: from cwd package.json, or projectName (for Dokploy app within project)
4140
4322
  */
4141
- async function promptForToken(message) {
4142
- const { stdin: stdin$1, stdout: stdout$1 } = await import("node:process");
4143
- if (!stdin$1.isTTY) throw new Error("Interactive input required for Hostinger token.");
4144
- stdout$1.write(message);
4145
- return new Promise((resolve$1) => {
4146
- let value = "";
4147
- const onData = (char) => {
4148
- const c = char.toString();
4149
- if (c === "\n" || c === "\r") {
4150
- stdin$1.setRawMode(false);
4151
- stdin$1.pause();
4152
- stdin$1.removeListener("data", onData);
4153
- stdout$1.write("\n");
4154
- resolve$1(value);
4155
- } else if (c === "") {
4156
- stdin$1.setRawMode(false);
4157
- stdin$1.pause();
4158
- stdout$1.write("\n");
4159
- process.exit(1);
4160
- } else if (c === "" || c === "\b") {
4161
- if (value.length > 0) value = value.slice(0, -1);
4162
- } else value += c;
4163
- };
4164
- stdin$1.setRawMode(true);
4165
- stdin$1.resume();
4166
- stdin$1.on("data", onData);
4167
- });
4323
+ function resolveDockerConfig(config$1) {
4324
+ const projectName = getAppNameFromPackageJson() ?? "app";
4325
+ const appName = getAppNameFromCwd$1() ?? projectName;
4326
+ const imageName = config$1.docker?.imageName ?? appName;
4327
+ return {
4328
+ registry: config$1.docker?.registry,
4329
+ imageName,
4330
+ projectName,
4331
+ appName
4332
+ };
4168
4333
  }
4334
+
4335
+ //#endregion
4336
+ //#region src/deploy/dokploy.ts
4337
+ const logger$3 = console;
4169
4338
  /**
4170
- * Create DNS records using the configured provider
4339
+ * Get the Dokploy API token from stored credentials or environment
4171
4340
  */
4172
- async function createDnsRecords(records, dnsConfig) {
4173
- const { provider, domain: rootDomain, ttl = 300 } = dnsConfig;
4174
- if (provider === "manual") return records.map((r) => ({
4175
- ...r,
4176
- created: false,
4177
- existed: false
4178
- }));
4179
- if (provider === "hostinger") return createHostingerRecords(records, rootDomain, ttl);
4180
- if (provider === "cloudflare") {
4181
- logger$3.log(" ⚠ Cloudflare DNS integration not yet implemented");
4182
- return records.map((r) => ({
4183
- ...r,
4184
- error: "Cloudflare not implemented"
4185
- }));
4186
- }
4187
- return records;
4341
+ async function getApiToken$1() {
4342
+ const token = await getDokployToken();
4343
+ if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
4344
+ return token;
4188
4345
  }
4189
4346
  /**
4190
- * Create DNS records at Hostinger
4347
+ * Create a Dokploy API client
4191
4348
  */
4192
- async function createHostingerRecords(records, rootDomain, ttl) {
4193
- let token = await getHostingerToken();
4194
- if (!token) {
4195
- logger$3.log("\n 📋 Hostinger API token not found.");
4196
- logger$3.log(" Get your token from: https://hpanel.hostinger.com/profile/api\n");
4197
- try {
4198
- token = await promptForToken(" Hostinger API Token: ");
4199
- await storeHostingerToken(token);
4200
- logger$3.log(" ✓ Token saved");
4201
- } catch {
4202
- logger$3.log(" ⚠ Could not get token, skipping DNS creation");
4203
- return records.map((r) => ({
4204
- ...r,
4205
- error: "No API token"
4206
- }));
4207
- }
4208
- }
4209
- const api = new HostingerApi(token);
4210
- const results = [];
4211
- let existingRecords = [];
4212
- try {
4213
- existingRecords = await api.getRecords(rootDomain);
4214
- } catch (error) {
4215
- const message = error instanceof Error ? error.message : "Unknown error";
4216
- logger$3.log(` ⚠ Failed to fetch existing DNS records: ${message}`);
4217
- return records.map((r) => ({
4218
- ...r,
4219
- error: message
4220
- }));
4221
- }
4222
- for (const record of records) {
4223
- const existing = existingRecords.find((r) => r.name === record.subdomain && r.type === "A");
4224
- if (existing) {
4225
- results.push({
4226
- ...record,
4227
- existed: true,
4228
- created: false
4229
- });
4230
- continue;
4231
- }
4232
- try {
4233
- await api.upsertRecords(rootDomain, [{
4234
- name: record.subdomain,
4235
- type: "A",
4236
- ttl,
4237
- records: [{ content: record.value }]
4238
- }]);
4239
- results.push({
4240
- ...record,
4241
- created: true,
4242
- existed: false
4243
- });
4244
- } catch (error) {
4245
- const message = error instanceof Error ? error.message : "Unknown error";
4246
- results.push({
4247
- ...record,
4248
- error: message
4249
- });
4250
- }
4251
- }
4252
- return results;
4349
+ async function createApi$1(endpoint) {
4350
+ const token = await getApiToken$1();
4351
+ return new DokployApi({
4352
+ baseUrl: endpoint,
4353
+ token
4354
+ });
4253
4355
  }
4254
4356
  /**
4255
- * Main DNS orchestration function for deployments
4357
+ * Deploy to Dokploy
4256
4358
  */
4257
- async function orchestrateDns(appHostnames, dnsConfig, dokployEndpoint) {
4258
- if (!dnsConfig) return null;
4259
- const { domain: rootDomain, autoCreate = true } = dnsConfig;
4260
- logger$3.log("\n🌐 Setting up DNS records...");
4261
- let serverIp;
4262
- try {
4263
- const endpointUrl = new URL(dokployEndpoint);
4264
- serverIp = await resolveHostnameToIp(endpointUrl.hostname);
4265
- logger$3.log(` Server IP: ${serverIp} (from ${endpointUrl.hostname})`);
4266
- } catch (error) {
4267
- const message = error instanceof Error ? error.message : "Unknown error";
4268
- logger$3.log(` ⚠ Failed to resolve server IP: ${message}`);
4269
- return null;
4359
+ async function deployDokploy(options) {
4360
+ const { stage, imageRef, masterKey, config: config$1 } = options;
4361
+ logger$3.log(`\n🎯 Deploying to Dokploy...`);
4362
+ logger$3.log(` Endpoint: ${config$1.endpoint}`);
4363
+ logger$3.log(` Application: ${config$1.applicationId}`);
4364
+ const api = await createApi$1(config$1.endpoint);
4365
+ logger$3.log(` Configuring Docker image: ${imageRef}`);
4366
+ const registryOptions = {};
4367
+ if (config$1.registryId) {
4368
+ registryOptions.registryId = config$1.registryId;
4369
+ logger$3.log(` Using Dokploy registry: ${config$1.registryId}`);
4370
+ } else {
4371
+ const storedRegistryId = await getDokployRegistryId();
4372
+ if (storedRegistryId) {
4373
+ registryOptions.registryId = storedRegistryId;
4374
+ logger$3.log(` Using stored Dokploy registry: ${storedRegistryId}`);
4375
+ } else if (config$1.registryCredentials) {
4376
+ registryOptions.username = config$1.registryCredentials.username;
4377
+ registryOptions.password = config$1.registryCredentials.password;
4378
+ registryOptions.registryUrl = config$1.registryCredentials.registryUrl;
4379
+ logger$3.log(` Using registry credentials for: ${config$1.registryCredentials.registryUrl}`);
4380
+ } else {
4381
+ const username = process.env.DOCKER_REGISTRY_USERNAME;
4382
+ const password = process.env.DOCKER_REGISTRY_PASSWORD;
4383
+ const registryUrl = process.env.DOCKER_REGISTRY_URL || config$1.registry;
4384
+ if (username && password && registryUrl) {
4385
+ registryOptions.username = username;
4386
+ registryOptions.password = password;
4387
+ registryOptions.registryUrl = registryUrl;
4388
+ logger$3.log(` Using registry credentials from environment`);
4389
+ }
4390
+ }
4270
4391
  }
4271
- const requiredRecords = generateRequiredRecords(appHostnames, rootDomain, serverIp);
4272
- if (requiredRecords.length === 0) {
4273
- logger$3.log(" No DNS records needed");
4274
- return {
4275
- records: [],
4276
- success: true,
4277
- serverIp
4278
- };
4392
+ await api.saveDockerProvider(config$1.applicationId, imageRef, registryOptions);
4393
+ logger$3.log(" ✓ Docker provider configured");
4394
+ const envVars = {};
4395
+ if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
4396
+ if (Object.keys(envVars).length > 0) {
4397
+ logger$3.log(" Updating environment variables...");
4398
+ const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
4399
+ await api.saveApplicationEnv(config$1.applicationId, envString);
4400
+ logger$3.log(" ✓ Environment variables updated");
4279
4401
  }
4280
- let finalRecords;
4281
- if (autoCreate && dnsConfig.provider !== "manual") {
4282
- logger$3.log(` Creating DNS records at ${dnsConfig.provider}...`);
4283
- finalRecords = await createDnsRecords(requiredRecords, dnsConfig);
4284
- const created = finalRecords.filter((r) => r.created).length;
4285
- const existed = finalRecords.filter((r) => r.existed).length;
4286
- const failed = finalRecords.filter((r) => r.error).length;
4287
- if (created > 0) logger$3.log(` Created ${created} DNS record(s)`);
4288
- if (existed > 0) logger$3.log(` ✓ ${existed} record(s) already exist`);
4289
- if (failed > 0) logger$3.log(` ⚠ ${failed} record(s) failed`);
4290
- } else finalRecords = requiredRecords;
4291
- printDnsRecordsTable(finalRecords, rootDomain);
4292
- const hasFailures = finalRecords.some((r) => r.error);
4293
- if (dnsConfig.provider === "manual" || hasFailures) printDnsRecordsSimple(finalRecords.filter((r) => !r.created && !r.existed), rootDomain);
4402
+ logger$3.log(" Triggering deployment...");
4403
+ await api.deployApplication(config$1.applicationId);
4404
+ logger$3.log(" ✓ Deployment triggered");
4405
+ logger$3.log("\n✅ Dokploy deployment initiated!");
4406
+ logger$3.log(`\n📋 Deployment details:`);
4407
+ logger$3.log(` Image: ${imageRef}`);
4408
+ logger$3.log(` Stage: ${stage}`);
4409
+ logger$3.log(` Application ID: ${config$1.applicationId}`);
4410
+ if (masterKey) logger$3.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
4411
+ const deploymentUrl = `${config$1.endpoint}/project/${config$1.projectId}`;
4412
+ logger$3.log(`\n🔗 View deployment: ${deploymentUrl}`);
4294
4413
  return {
4295
- records: finalRecords,
4296
- success: !hasFailures,
4297
- serverIp
4414
+ imageRef,
4415
+ masterKey,
4416
+ url: deploymentUrl
4298
4417
  };
4299
4418
  }
4300
4419
 
@@ -4373,6 +4492,107 @@ function getPublicUrlArgNames(app) {
4373
4492
  return app.dependencies.map((dep) => `NEXT_PUBLIC_${dep.toUpperCase()}_URL`);
4374
4493
  }
4375
4494
 
4495
+ //#endregion
4496
+ //#region src/deploy/env-resolver.ts
4497
+ /**
4498
+ * Generate a secure random secret (64 hex characters = 32 bytes)
4499
+ */
4500
+ function generateSecret() {
4501
+ return randomBytes(32).toString("hex");
4502
+ }
4503
+ /**
4504
+ * Get or generate a secret for an app.
4505
+ * If the secret already exists in state, returns it.
4506
+ * Otherwise generates a new one and stores it.
4507
+ */
4508
+ function getOrGenerateSecret(state, appName, secretName) {
4509
+ const existing = getGeneratedSecret(state, appName, secretName);
4510
+ if (existing) return existing;
4511
+ const generated = generateSecret();
4512
+ setGeneratedSecret(state, appName, secretName, generated);
4513
+ return generated;
4514
+ }
4515
+ /**
4516
+ * Build a DATABASE_URL for an app with per-app credentials
4517
+ */
4518
+ function buildDatabaseUrl(credentials, postgres) {
4519
+ const { dbUser, dbPassword } = credentials;
4520
+ const { host, port, database } = postgres;
4521
+ return `postgresql://${encodeURIComponent(dbUser)}:${encodeURIComponent(dbPassword)}@${host}:${port}/${database}`;
4522
+ }
4523
+ /**
4524
+ * Build a REDIS_URL
4525
+ */
4526
+ function buildRedisUrl(redis) {
4527
+ const { host, port, password } = redis;
4528
+ if (password) return `redis://:${encodeURIComponent(password)}@${host}:${port}`;
4529
+ return `redis://${host}:${port}`;
4530
+ }
4531
+ /**
4532
+ * Resolve a single environment variable
4533
+ */
4534
+ function resolveEnvVar(varName, context) {
4535
+ switch (varName) {
4536
+ case "PORT": return String(context.app.port);
4537
+ case "NODE_ENV": return context.stage === "production" ? "production" : "development";
4538
+ case "DATABASE_URL":
4539
+ if (context.appCredentials && context.postgres) return buildDatabaseUrl(context.appCredentials, context.postgres);
4540
+ break;
4541
+ case "REDIS_URL":
4542
+ if (context.redis) return buildRedisUrl(context.redis);
4543
+ break;
4544
+ case "BETTER_AUTH_URL": return `https://${context.appHostname}`;
4545
+ case "BETTER_AUTH_SECRET": return getOrGenerateSecret(context.state, context.appName, "BETTER_AUTH_SECRET");
4546
+ case "BETTER_AUTH_TRUSTED_ORIGINS":
4547
+ if (context.frontendUrls.length > 0) return context.frontendUrls.join(",");
4548
+ break;
4549
+ case "GKM_MASTER_KEY":
4550
+ if (context.masterKey) return context.masterKey;
4551
+ break;
4552
+ }
4553
+ if (context.userSecrets) {
4554
+ if (context.userSecrets.custom[varName]) return context.userSecrets.custom[varName];
4555
+ if (varName in context.userSecrets.urls) return context.userSecrets.urls[varName];
4556
+ if (varName === "POSTGRES_PASSWORD" && context.userSecrets.services.postgres) return context.userSecrets.services.postgres.password;
4557
+ if (varName === "REDIS_PASSWORD" && context.userSecrets.services.redis) return context.userSecrets.services.redis.password;
4558
+ }
4559
+ return void 0;
4560
+ }
4561
+ /**
4562
+ * Resolve all environment variables for an app
4563
+ */
4564
+ function resolveEnvVars(requiredVars, context) {
4565
+ const resolved = {};
4566
+ const missing = [];
4567
+ for (const varName of requiredVars) {
4568
+ const value = resolveEnvVar(varName, context);
4569
+ if (value !== void 0) resolved[varName] = value;
4570
+ else missing.push(varName);
4571
+ }
4572
+ return {
4573
+ resolved,
4574
+ missing
4575
+ };
4576
+ }
4577
+ /**
4578
+ * Format missing variables error message
4579
+ */
4580
+ function formatMissingVarsError(appName, missing, stage) {
4581
+ const varList = missing.map((v) => ` - ${v}`).join("\n");
4582
+ return `Deployment failed: ${appName} is missing required environment variables:\n${varList}\n\nAdd them with:\n gkm secrets:set <VAR_NAME> <value> --stage ${stage}\n\nOr add them to the app's requiredEnv in gkm.config.ts to have them auto-resolved.`;
4583
+ }
4584
+ /**
4585
+ * Validate that all required environment variables can be resolved
4586
+ */
4587
+ function validateEnvVars(requiredVars, context) {
4588
+ const { resolved, missing } = resolveEnvVars(requiredVars, context);
4589
+ return {
4590
+ valid: missing.length === 0,
4591
+ missing,
4592
+ resolved
4593
+ };
4594
+ }
4595
+
4376
4596
  //#endregion
4377
4597
  //#region src/deploy/init.ts
4378
4598
  const logger$2 = console;
@@ -4645,14 +4865,17 @@ function generateSecretsReport(encryptedApps, sniffedApps) {
4645
4865
 
4646
4866
  //#endregion
4647
4867
  //#region src/deploy/sniffer.ts
4868
+ const __filename = fileURLToPath(import.meta.url);
4869
+ const __dirname = dirname(__filename);
4648
4870
  /**
4649
4871
  * Get required environment variables for an app.
4650
4872
  *
4651
- * Detection strategy:
4652
- * - Frontend apps: Returns empty (no server secrets)
4653
- * - Apps with `requiredEnv`: Uses explicit list from config
4654
- * - Apps with `envParser`: Runs SnifferEnvironmentParser to detect usage
4655
- * - Apps with neither: Returns empty
4873
+ * Detection strategy (in order):
4874
+ * 1. Frontend apps: Returns empty (no server secrets)
4875
+ * 2. Apps with `requiredEnv`: Uses explicit list from config
4876
+ * 3. Entry apps: Imports entry file in subprocess to capture config.parse() calls
4877
+ * 4. Apps with `envParser`: Runs SnifferEnvironmentParser to detect usage
4878
+ * 5. Apps with neither: Returns empty
4656
4879
  *
4657
4880
  * This function handles "fire and forget" async operations gracefully,
4658
4881
  * capturing errors and unhandled rejections without failing the build.
@@ -4673,6 +4896,14 @@ async function sniffAppEnvironment(app, appName, workspacePath, options = {}) {
4673
4896
  appName,
4674
4897
  requiredEnvVars: [...app.requiredEnv]
4675
4898
  };
4899
+ if (app.entry) {
4900
+ const result = await sniffEntryFile(app.entry, app.path, workspacePath);
4901
+ if (logWarnings && result.error) console.warn(`[sniffer] ${appName}: Entry file threw error during sniffing (env vars still captured): ${result.error.message}`);
4902
+ return {
4903
+ appName,
4904
+ requiredEnvVars: result.envVars
4905
+ };
4906
+ }
4676
4907
  if (app.envParser) {
4677
4908
  const result = await sniffEnvParser(app.envParser, app.path, workspacePath);
4678
4909
  if (logWarnings) {
@@ -4690,6 +4921,80 @@ async function sniffAppEnvironment(app, appName, workspacePath, options = {}) {
4690
4921
  };
4691
4922
  }
4692
4923
  /**
4924
+ * Sniff an entry file by importing it in a subprocess.
4925
+ *
4926
+ * Entry apps call `config.parse()` at module load time. To capture which
4927
+ * env vars are accessed, we:
4928
+ * 1. Spawn a subprocess with a module loader hook
4929
+ * 2. The loader intercepts `@geekmidas/envkit` and replaces EnvironmentParser
4930
+ * with SnifferEnvironmentParser
4931
+ * 3. Import the entry file (triggers config.parse())
4932
+ * 4. Capture and return the accessed env var names
4933
+ *
4934
+ * This approach provides process isolation - each app is sniffed in its own
4935
+ * subprocess, preventing module cache pollution.
4936
+ *
4937
+ * @param entryPath - Relative path to the entry file (e.g., './src/index.ts')
4938
+ * @param appPath - The app's path relative to workspace (e.g., 'apps/auth')
4939
+ * @param workspacePath - Absolute path to workspace root
4940
+ * @returns EntrySniffResult with env vars and optional error
4941
+ */
4942
+ async function sniffEntryFile(entryPath, appPath, workspacePath) {
4943
+ const fullEntryPath = resolve(workspacePath, appPath, entryPath);
4944
+ const loaderPath = resolve(__dirname, "sniffer-loader.ts");
4945
+ const workerPath = resolve(__dirname, "sniffer-worker.ts");
4946
+ return new Promise((resolvePromise) => {
4947
+ const child = spawn("node", [
4948
+ "--import",
4949
+ loaderPath,
4950
+ workerPath,
4951
+ fullEntryPath
4952
+ ], {
4953
+ cwd: resolve(workspacePath, appPath),
4954
+ stdio: [
4955
+ "ignore",
4956
+ "pipe",
4957
+ "pipe"
4958
+ ],
4959
+ env: {
4960
+ ...process.env,
4961
+ NODE_OPTIONS: "--import tsx"
4962
+ }
4963
+ });
4964
+ let stdout$1 = "";
4965
+ let stderr = "";
4966
+ child.stdout.on("data", (data) => {
4967
+ stdout$1 += data.toString();
4968
+ });
4969
+ child.stderr.on("data", (data) => {
4970
+ stderr += data.toString();
4971
+ });
4972
+ child.on("close", (code) => {
4973
+ try {
4974
+ const jsonMatch = stdout$1.match(/\{[^{}]*"envVars"[^{}]*\}[^{]*$/);
4975
+ if (jsonMatch) {
4976
+ const result = JSON.parse(jsonMatch[0]);
4977
+ resolvePromise({
4978
+ envVars: result.envVars || [],
4979
+ error: result.error ? new Error(result.error) : void 0
4980
+ });
4981
+ return;
4982
+ }
4983
+ } catch {}
4984
+ resolvePromise({
4985
+ envVars: [],
4986
+ error: new Error(`Failed to sniff entry file (exit code ${code}): ${stderr || stdout$1 || "No output"}`)
4987
+ });
4988
+ });
4989
+ child.on("error", (err) => {
4990
+ resolvePromise({
4991
+ envVars: [],
4992
+ error: err
4993
+ });
4994
+ });
4995
+ });
4996
+ }
4997
+ /**
4693
4998
  * Run the SnifferEnvironmentParser on an envParser module to detect
4694
4999
  * which environment variables it accesses.
4695
5000
  *
@@ -4799,10 +5104,130 @@ async function prompt(message, hidden = false) {
4799
5104
  }
4800
5105
  }
4801
5106
  /**
5107
+ * Wait for Postgres to be ready to accept connections.
5108
+ *
5109
+ * Polls the Postgres server until it accepts a connection or max retries reached.
5110
+ * Used after enabling the external port to ensure the database is accessible
5111
+ * before creating users.
5112
+ *
5113
+ * @param host - The Postgres server hostname
5114
+ * @param port - The external port (typically 5432)
5115
+ * @param user - Master database user (postgres)
5116
+ * @param password - Master database password
5117
+ * @param database - Database name to connect to
5118
+ * @param maxRetries - Maximum number of connection attempts (default: 30)
5119
+ * @param retryIntervalMs - Milliseconds between retries (default: 2000)
5120
+ * @throws Error if Postgres is not ready after maxRetries
5121
+ */
5122
+ async function waitForPostgres(host, port, user, password, database, maxRetries = 30, retryIntervalMs = 2e3) {
5123
+ for (let i = 0; i < maxRetries; i++) try {
5124
+ const client = new Client({
5125
+ host,
5126
+ port,
5127
+ user,
5128
+ password,
5129
+ database
5130
+ });
5131
+ await client.connect();
5132
+ await client.end();
5133
+ return;
5134
+ } catch {
5135
+ if (i < maxRetries - 1) {
5136
+ logger$1.log(` Waiting for Postgres... (${i + 1}/${maxRetries})`);
5137
+ await new Promise((r) => setTimeout(r, retryIntervalMs));
5138
+ }
5139
+ }
5140
+ throw new Error(`Postgres not ready after ${maxRetries} retries`);
5141
+ }
5142
+ /**
5143
+ * Initialize Postgres with per-app users and schemas.
5144
+ *
5145
+ * This function implements the same user/schema isolation pattern used in local
5146
+ * dev mode (see docker/postgres/init.sh). It:
5147
+ *
5148
+ * 1. Temporarily enables the external Postgres port
5149
+ * 2. Connects using master credentials
5150
+ * 3. Creates each user with appropriate schema permissions
5151
+ * 4. Disables the external port for security
5152
+ *
5153
+ * Schema assignment follows this pattern:
5154
+ * - `api` app: Uses `public` schema (shared tables, migrations run here)
5155
+ * - Other apps: Get their own schema with `search_path` configured
5156
+ *
5157
+ * @param api - The Dokploy API client
5158
+ * @param postgres - The provisioned Postgres service details
5159
+ * @param serverHostname - The Dokploy server hostname (for external connection)
5160
+ * @param users - Array of users to create with their schema configuration
5161
+ *
5162
+ * @example
5163
+ * ```ts
5164
+ * await initializePostgresUsers(api, postgres, 'dokploy.example.com', [
5165
+ * { name: 'api', password: 'xxx', usePublicSchema: true },
5166
+ * { name: 'auth', password: 'yyy', usePublicSchema: false },
5167
+ * ]);
5168
+ * ```
5169
+ */
5170
+ async function initializePostgresUsers(api, postgres, serverHostname, users) {
5171
+ logger$1.log("\n🔧 Initializing database users...");
5172
+ const externalPort = 5432;
5173
+ logger$1.log(` Enabling external port ${externalPort}...`);
5174
+ await api.savePostgresExternalPort(postgres.postgresId, externalPort);
5175
+ await api.deployPostgres(postgres.postgresId);
5176
+ logger$1.log(` Waiting for Postgres to be accessible at ${serverHostname}:${externalPort}...`);
5177
+ await waitForPostgres(serverHostname, externalPort, postgres.databaseUser, postgres.databasePassword, postgres.databaseName);
5178
+ const client = new Client({
5179
+ host: serverHostname,
5180
+ port: externalPort,
5181
+ user: postgres.databaseUser,
5182
+ password: postgres.databasePassword,
5183
+ database: postgres.databaseName
5184
+ });
5185
+ try {
5186
+ await client.connect();
5187
+ for (const user of users) {
5188
+ const schemaName = user.usePublicSchema ? "public" : user.name;
5189
+ logger$1.log(` Creating user "${user.name}" with schema "${schemaName}"...`);
5190
+ await client.query(`
5191
+ DO $$ BEGIN
5192
+ CREATE USER "${user.name}" WITH PASSWORD '${user.password}';
5193
+ EXCEPTION WHEN duplicate_object THEN
5194
+ ALTER USER "${user.name}" WITH PASSWORD '${user.password}';
5195
+ END $$;
5196
+ `);
5197
+ if (user.usePublicSchema) await client.query(`
5198
+ GRANT ALL ON SCHEMA public TO "${user.name}";
5199
+ ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO "${user.name}";
5200
+ ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO "${user.name}";
5201
+ `);
5202
+ else await client.query(`
5203
+ CREATE SCHEMA IF NOT EXISTS "${schemaName}" AUTHORIZATION "${user.name}";
5204
+ ALTER USER "${user.name}" SET search_path TO "${schemaName}";
5205
+ GRANT USAGE ON SCHEMA "${schemaName}" TO "${user.name}";
5206
+ GRANT ALL ON ALL TABLES IN SCHEMA "${schemaName}" TO "${user.name}";
5207
+ ALTER DEFAULT PRIVILEGES IN SCHEMA "${schemaName}" GRANT ALL ON TABLES TO "${user.name}";
5208
+ `);
5209
+ logger$1.log(` ✓ User "${user.name}" configured`);
5210
+ }
5211
+ } finally {
5212
+ await client.end();
5213
+ }
5214
+ logger$1.log(" Disabling external port...");
5215
+ await api.savePostgresExternalPort(postgres.postgresId, null);
5216
+ await api.deployPostgres(postgres.postgresId);
5217
+ logger$1.log(" ✓ Database users initialized");
5218
+ }
5219
+ /**
5220
+ * Get the server hostname from the Dokploy endpoint URL
5221
+ */
5222
+ function getServerHostname(endpoint) {
5223
+ const url = new URL(endpoint);
5224
+ return url.hostname;
5225
+ }
5226
+ /**
4802
5227
  * Provision docker compose services in Dokploy
4803
5228
  * @internal Exported for testing
4804
5229
  */
4805
- async function provisionServices(api, projectId, environmentId, appName, services, existingServiceIds) {
5230
+ async function provisionServices(api, projectId, environmentId, projectName, services, existingServiceIds) {
4806
5231
  logger$1.log(`\n🔍 provisionServices called: services=${JSON.stringify(services)}, envId=${environmentId}`);
4807
5232
  if (!services || !environmentId) {
4808
5233
  logger$1.log(" Skipping: no services or no environmentId");
@@ -4823,9 +5248,12 @@ async function provisionServices(api, projectId, environmentId, appName, service
4823
5248
  else logger$1.log(` ⚠ Cached ID invalid, will create new`);
4824
5249
  }
4825
5250
  if (!postgres) {
4826
- const { randomBytes: randomBytes$1 } = await import("node:crypto");
4827
- const databasePassword = randomBytes$1(16).toString("hex");
4828
- const result = await api.findOrCreatePostgres(postgresName, projectId, environmentId, { databasePassword });
5251
+ const databasePassword = randomBytes(16).toString("hex");
5252
+ const databaseName = projectName.replace(/-/g, "_");
5253
+ const result = await api.findOrCreatePostgres(postgresName, projectId, environmentId, {
5254
+ databaseName,
5255
+ databasePassword
5256
+ });
4829
5257
  postgres = result.postgres;
4830
5258
  created = result.created;
4831
5259
  if (created) {
@@ -4893,12 +5321,6 @@ async function provisionServices(api, projectId, environmentId, appName, service
4893
5321
  */
4894
5322
  async function ensureDokploySetup(config$1, dockerConfig, stage, services) {
4895
5323
  logger$1.log("\n🔧 Checking Dokploy setup...");
4896
- const { readStageSecrets: readStageSecrets$1 } = await import("./storage-DNj_I11J.mjs");
4897
- const existingSecrets = await readStageSecrets$1(stage);
4898
- const existingUrls = {
4899
- DATABASE_URL: existingSecrets?.urls?.DATABASE_URL,
4900
- REDIS_URL: existingSecrets?.urls?.REDIS_URL
4901
- };
4902
5324
  let creds = await getDokployCredentials();
4903
5325
  if (!creds) {
4904
5326
  logger$1.log("\n📋 Dokploy credentials not found. Let's set them up.");
@@ -5224,6 +5646,8 @@ async function workspaceDeployCommand(workspace, options) {
5224
5646
  postgres: services.db !== void 0 && services.db !== false,
5225
5647
  redis: services.cache !== void 0 && services.cache !== false
5226
5648
  };
5649
+ let provisionedPostgres = null;
5650
+ let provisionedRedis = null;
5227
5651
  if (dockerServices.postgres || dockerServices.redis) {
5228
5652
  logger$1.log("\n🔧 Provisioning infrastructure services...");
5229
5653
  const existingServiceIds = {
@@ -5232,17 +5656,64 @@ async function workspaceDeployCommand(workspace, options) {
5232
5656
  };
5233
5657
  const provisionResult = await provisionServices(api, project.projectId, environmentId, workspace.name, dockerServices, existingServiceIds);
5234
5658
  if (provisionResult?.serviceIds) {
5235
- if (provisionResult.serviceIds.postgresId) setPostgresId(state, provisionResult.serviceIds.postgresId);
5236
- if (provisionResult.serviceIds.redisId) setRedisId(state, provisionResult.serviceIds.redisId);
5659
+ if (provisionResult.serviceIds.postgresId) {
5660
+ setPostgresId(state, provisionResult.serviceIds.postgresId);
5661
+ provisionedPostgres = await api.getPostgres(provisionResult.serviceIds.postgresId);
5662
+ }
5663
+ if (provisionResult.serviceIds.redisId) {
5664
+ setRedisId(state, provisionResult.serviceIds.redisId);
5665
+ provisionedRedis = await api.getRedis(provisionResult.serviceIds.redisId);
5666
+ }
5237
5667
  }
5238
5668
  }
5239
5669
  const backendApps = appsToDeployNames.filter((name$1) => workspace.apps[name$1].type === "backend");
5240
5670
  const frontendApps = appsToDeployNames.filter((name$1) => workspace.apps[name$1].type === "frontend");
5671
+ const perAppDbCredentials = /* @__PURE__ */ new Map();
5672
+ if (provisionedPostgres && backendApps.length > 0) {
5673
+ const appsNeedingDb = backendApps.filter((appName) => {
5674
+ const requirements = sniffedApps.get(appName);
5675
+ return requirements?.requiredEnvVars.includes("DATABASE_URL");
5676
+ });
5677
+ if (appsNeedingDb.length > 0) {
5678
+ logger$1.log(`\n🔐 Setting up per-app database credentials...`);
5679
+ logger$1.log(` Apps needing DATABASE_URL: ${appsNeedingDb.join(", ")}`);
5680
+ const existingCredentials = getAllAppCredentials(state);
5681
+ const usersToCreate = [];
5682
+ for (const appName of appsNeedingDb) {
5683
+ let credentials = existingCredentials[appName];
5684
+ if (credentials) logger$1.log(` ${appName}: Using existing credentials from state`);
5685
+ else {
5686
+ const password = randomBytes(16).toString("hex");
5687
+ credentials = {
5688
+ dbUser: appName,
5689
+ dbPassword: password
5690
+ };
5691
+ setAppCredentials(state, appName, credentials);
5692
+ logger$1.log(` ${appName}: Generated new credentials`);
5693
+ }
5694
+ perAppDbCredentials.set(appName, credentials);
5695
+ usersToCreate.push({
5696
+ name: appName,
5697
+ password: credentials.dbPassword,
5698
+ usePublicSchema: appName === "api"
5699
+ });
5700
+ }
5701
+ const serverHostname = getServerHostname(creds.endpoint);
5702
+ await initializePostgresUsers(api, provisionedPostgres, serverHostname, usersToCreate);
5703
+ }
5704
+ }
5241
5705
  const publicUrls = {};
5242
5706
  const results = [];
5243
5707
  const dokployConfig = workspace.deploy.dokploy;
5244
5708
  const appHostnames = /* @__PURE__ */ new Map();
5245
5709
  const appDomainIds = /* @__PURE__ */ new Map();
5710
+ const frontendUrls = [];
5711
+ for (const appName of frontendApps) {
5712
+ const app = workspace.apps[appName];
5713
+ const isMainFrontend = isMainFrontendApp(appName, app, workspace.apps);
5714
+ const hostname = resolveHost(appName, app, stage, dokployConfig, isMainFrontend);
5715
+ frontendUrls.push(`https://${hostname}`);
5716
+ }
5246
5717
  if (backendApps.length > 0) {
5247
5718
  logger$1.log("\n📦 PHASE 1: Deploying backend applications...");
5248
5719
  for (const appName of backendApps) {
@@ -5286,14 +5757,46 @@ async function workspaceDeployCommand(workspace, options) {
5286
5757
  },
5287
5758
  buildArgs
5288
5759
  });
5289
- const envVars = [`NODE_ENV=production`, `PORT=${app.port}`];
5290
- if (appSecrets && appSecrets.masterKey) envVars.push(`GKM_MASTER_KEY=${appSecrets.masterKey}`);
5760
+ const backendHost = resolveHost(appName, app, stage, dokployConfig, false);
5761
+ const envContext = {
5762
+ app,
5763
+ appName,
5764
+ stage,
5765
+ state,
5766
+ appCredentials: perAppDbCredentials.get(appName),
5767
+ postgres: provisionedPostgres ? {
5768
+ host: provisionedPostgres.appName,
5769
+ port: 5432,
5770
+ database: provisionedPostgres.databaseName
5771
+ } : void 0,
5772
+ redis: provisionedRedis ? {
5773
+ host: provisionedRedis.appName,
5774
+ port: 6379,
5775
+ password: provisionedRedis.databasePassword
5776
+ } : void 0,
5777
+ appHostname: backendHost,
5778
+ frontendUrls,
5779
+ userSecrets: stageSecrets ?? void 0,
5780
+ masterKey: appSecrets?.masterKey
5781
+ };
5782
+ const appRequirements = sniffedApps.get(appName);
5783
+ const requiredVars = appRequirements?.requiredEnvVars ?? [];
5784
+ const { valid, missing, resolved } = validateEnvVars(requiredVars, envContext);
5785
+ if (!valid) throw new Error(formatMissingVarsError(appName, missing, stage));
5786
+ const envVars = Object.entries(resolved).map(([key, value]) => `${key}=${value}`);
5787
+ if (Object.keys(resolved).length > 0) logger$1.log(` Resolved ${Object.keys(resolved).length} env vars: ${Object.keys(resolved).join(", ")}`);
5291
5788
  await api.saveDockerProvider(application.applicationId, imageRef, { registryId });
5292
5789
  await api.saveApplicationEnv(application.applicationId, envVars.join("\n"));
5293
5790
  logger$1.log(` Deploying to Dokploy...`);
5294
5791
  await api.deployApplication(application.applicationId);
5295
- const backendHost = resolveHost(appName, app, stage, dokployConfig, false);
5296
- try {
5792
+ const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
5793
+ const existingDomain = existingDomains.find((d) => d.host === backendHost);
5794
+ if (existingDomain) {
5795
+ appHostnames.set(appName, backendHost);
5796
+ appDomainIds.set(appName, existingDomain.domainId);
5797
+ publicUrls[appName] = `https://${backendHost}`;
5798
+ logger$1.log(` ✓ Domain: https://${backendHost} (existing)`);
5799
+ } else try {
5297
5800
  const domain = await api.createDomain({
5298
5801
  host: backendHost,
5299
5802
  port: app.port,
@@ -5303,18 +5806,13 @@ async function workspaceDeployCommand(workspace, options) {
5303
5806
  });
5304
5807
  appHostnames.set(appName, backendHost);
5305
5808
  appDomainIds.set(appName, domain.domainId);
5306
- const publicUrl = `https://${backendHost}`;
5307
- publicUrls[appName] = publicUrl;
5308
- logger$1.log(` ✓ Domain: ${publicUrl}`);
5809
+ publicUrls[appName] = `https://${backendHost}`;
5810
+ logger$1.log(` ✓ Domain: https://${backendHost} (created)`);
5309
5811
  } catch (domainError) {
5812
+ const message = domainError instanceof Error ? domainError.message : "Unknown error";
5813
+ logger$1.log(` ⚠ Domain creation failed: ${message}`);
5310
5814
  appHostnames.set(appName, backendHost);
5311
- try {
5312
- const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
5313
- const matchingDomain = existingDomains.find((d) => d.host === backendHost);
5314
- if (matchingDomain) appDomainIds.set(appName, matchingDomain.domainId);
5315
- } catch {}
5316
5815
  publicUrls[appName] = `https://${backendHost}`;
5317
- logger$1.log(` ℹ Domain already configured: https://${backendHost}`);
5318
5816
  }
5319
5817
  results.push({
5320
5818
  appName,
@@ -5383,7 +5881,14 @@ async function workspaceDeployCommand(workspace, options) {
5383
5881
  await api.deployApplication(application.applicationId);
5384
5882
  const isMainFrontend = isMainFrontendApp(appName, app, workspace.apps);
5385
5883
  const frontendHost = resolveHost(appName, app, stage, dokployConfig, isMainFrontend);
5386
- try {
5884
+ const existingFrontendDomains = await api.getDomainsByApplicationId(application.applicationId);
5885
+ const existingFrontendDomain = existingFrontendDomains.find((d) => d.host === frontendHost);
5886
+ if (existingFrontendDomain) {
5887
+ appHostnames.set(appName, frontendHost);
5888
+ appDomainIds.set(appName, existingFrontendDomain.domainId);
5889
+ publicUrls[appName] = `https://${frontendHost}`;
5890
+ logger$1.log(` ✓ Domain: https://${frontendHost} (existing)`);
5891
+ } else try {
5387
5892
  const domain = await api.createDomain({
5388
5893
  host: frontendHost,
5389
5894
  port: app.port,
@@ -5393,18 +5898,13 @@ async function workspaceDeployCommand(workspace, options) {
5393
5898
  });
5394
5899
  appHostnames.set(appName, frontendHost);
5395
5900
  appDomainIds.set(appName, domain.domainId);
5396
- const publicUrl = `https://${frontendHost}`;
5397
- publicUrls[appName] = publicUrl;
5398
- logger$1.log(` ✓ Domain: ${publicUrl}`);
5901
+ publicUrls[appName] = `https://${frontendHost}`;
5902
+ logger$1.log(` ✓ Domain: https://${frontendHost} (created)`);
5399
5903
  } catch (domainError) {
5904
+ const message = domainError instanceof Error ? domainError.message : "Unknown error";
5905
+ logger$1.log(` ⚠ Domain creation failed: ${message}`);
5400
5906
  appHostnames.set(appName, frontendHost);
5401
- try {
5402
- const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
5403
- const matchingDomain = existingDomains.find((d) => d.host === frontendHost);
5404
- if (matchingDomain) appDomainIds.set(appName, matchingDomain.domainId);
5405
- } catch {}
5406
5907
  publicUrls[appName] = `https://${frontendHost}`;
5407
- logger$1.log(` ℹ Domain already configured: https://${frontendHost}`);
5408
5908
  }
5409
5909
  results.push({
5410
5910
  appName,
@@ -5432,6 +5932,10 @@ async function workspaceDeployCommand(workspace, options) {
5432
5932
  const dnsConfig = workspace.deploy.dns;
5433
5933
  if (dnsConfig && appHostnames.size > 0) {
5434
5934
  const dnsResult = await orchestrateDns(appHostnames, dnsConfig, creds.endpoint);
5935
+ if (dnsResult?.serverIp && appHostnames.size > 0) {
5936
+ await verifyDnsRecords(appHostnames, dnsResult.serverIp, state);
5937
+ await writeStageState(workspace.root, stage, state);
5938
+ }
5435
5939
  if (dnsResult?.success && appHostnames.size > 0) {
5436
5940
  logger$1.log("\n🔒 Validating domains for SSL certificates...");
5437
5941
  for (const [appName, hostname] of appHostnames) try {
@@ -5732,10 +6236,10 @@ const GEEKMIDAS_VERSIONS = {
5732
6236
  "@geekmidas/cli": CLI_VERSION,
5733
6237
  "@geekmidas/client": "~0.5.0",
5734
6238
  "@geekmidas/cloud": "~0.2.0",
5735
- "@geekmidas/constructs": "~0.7.0",
6239
+ "@geekmidas/constructs": "~0.8.0",
5736
6240
  "@geekmidas/db": "~0.3.0",
5737
6241
  "@geekmidas/emailkit": "~0.2.0",
5738
- "@geekmidas/envkit": "~0.6.0",
6242
+ "@geekmidas/envkit": "~0.7.0",
5739
6243
  "@geekmidas/errors": "~0.1.0",
5740
6244
  "@geekmidas/events": "~0.2.0",
5741
6245
  "@geekmidas/logger": "~0.4.0",
@@ -5744,7 +6248,7 @@ const GEEKMIDAS_VERSIONS = {
5744
6248
  "@geekmidas/services": "~0.2.0",
5745
6249
  "@geekmidas/storage": "~0.1.0",
5746
6250
  "@geekmidas/studio": "~0.4.0",
5747
- "@geekmidas/telescope": "~0.5.0",
6251
+ "@geekmidas/telescope": "~0.6.0",
5748
6252
  "@geekmidas/testkit": "~0.6.0"
5749
6253
  };
5750
6254