@geekmidas/cli 0.47.0 → 0.49.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/dist/{dokploy-api-CMWlWq7-.mjs → dokploy-api-94KzmTVf.mjs} +7 -7
  2. package/dist/dokploy-api-94KzmTVf.mjs.map +1 -0
  3. package/dist/dokploy-api-CItuaWTq.mjs +3 -0
  4. package/dist/dokploy-api-DBNE8MDt.cjs +3 -0
  5. package/dist/{dokploy-api-BnX2OxyF.cjs → dokploy-api-YD8WCQfW.cjs} +7 -7
  6. package/dist/dokploy-api-YD8WCQfW.cjs.map +1 -0
  7. package/dist/index.cjs +2390 -1890
  8. package/dist/index.cjs.map +1 -1
  9. package/dist/index.mjs +2387 -1887
  10. package/dist/index.mjs.map +1 -1
  11. package/package.json +8 -6
  12. package/src/build/__tests__/handler-templates.spec.ts +947 -0
  13. package/src/deploy/__tests__/__fixtures__/entry-apps/async-entry.ts +24 -0
  14. package/src/deploy/__tests__/__fixtures__/entry-apps/nested-config-entry.ts +24 -0
  15. package/src/deploy/__tests__/__fixtures__/entry-apps/no-env-entry.ts +12 -0
  16. package/src/deploy/__tests__/__fixtures__/entry-apps/simple-entry.ts +14 -0
  17. package/src/deploy/__tests__/__fixtures__/entry-apps/throwing-entry.ts +16 -0
  18. package/src/deploy/__tests__/__fixtures__/env-parsers/non-function-export.ts +10 -0
  19. package/src/deploy/__tests__/__fixtures__/env-parsers/parseable-env-parser.ts +18 -0
  20. package/src/deploy/__tests__/__fixtures__/env-parsers/throwing-env-parser.ts +18 -0
  21. package/src/deploy/__tests__/__fixtures__/env-parsers/valid-env-parser.ts +16 -0
  22. package/src/deploy/__tests__/dns-verification.spec.ts +229 -0
  23. package/src/deploy/__tests__/dokploy-api.spec.ts +2 -3
  24. package/src/deploy/__tests__/domain.spec.ts +7 -3
  25. package/src/deploy/__tests__/env-resolver.spec.ts +469 -0
  26. package/src/deploy/__tests__/index.spec.ts +12 -12
  27. package/src/deploy/__tests__/secrets.spec.ts +4 -1
  28. package/src/deploy/__tests__/sniffer.spec.ts +326 -1
  29. package/src/deploy/__tests__/state.spec.ts +844 -0
  30. package/src/deploy/dns/hostinger-api.ts +9 -6
  31. package/src/deploy/dns/index.ts +115 -4
  32. package/src/deploy/docker.ts +1 -2
  33. package/src/deploy/dokploy-api.ts +20 -11
  34. package/src/deploy/domain.ts +5 -4
  35. package/src/deploy/env-resolver.ts +278 -0
  36. package/src/deploy/index.ts +534 -124
  37. package/src/deploy/secrets.ts +7 -2
  38. package/src/deploy/sniffer-envkit-patch.ts +43 -0
  39. package/src/deploy/sniffer-hooks.ts +52 -0
  40. package/src/deploy/sniffer-loader.ts +23 -0
  41. package/src/deploy/sniffer-worker.ts +74 -0
  42. package/src/deploy/sniffer.ts +136 -14
  43. package/src/deploy/state.ts +162 -1
  44. package/src/docker/templates.ts +10 -14
  45. package/src/init/versions.ts +3 -3
  46. package/tsconfig.tsbuildinfo +1 -1
  47. package/dist/dokploy-api-4a6h35VY.cjs +0 -3
  48. package/dist/dokploy-api-BnX2OxyF.cjs.map +0 -1
  49. package/dist/dokploy-api-CMWlWq7-.mjs.map +0 -1
  50. package/dist/dokploy-api-DQvi9iZa.mjs +0 -3
package/dist/index.mjs CHANGED
@@ -3,7 +3,7 @@ import { __require, getAppBuildOrder, getDependencyEnvVars, getDeployTargetError
3
3
  import { getAppNameFromCwd, loadAppConfig, loadConfig, loadWorkspaceConfig, parseModuleConfig } from "./config-C3LSBNSl.mjs";
4
4
  import { ConstructGenerator, EndpointGenerator, OPENAPI_OUTPUT_PATH, OpenApiTsGenerator, generateOpenApi, openapiCommand, resolveOpenApiConfig } from "./openapi-C3C-BzIZ.mjs";
5
5
  import { getKeyPath, maskPassword, readStageSecrets, secretsExist, setCustomSecret, toEmbeddableSecrets, writeStageSecrets } from "./storage-Dhst7BhI.mjs";
6
- import { DokployApi } from "./dokploy-api-CMWlWq7-.mjs";
6
+ import { DokployApi } from "./dokploy-api-94KzmTVf.mjs";
7
7
  import { encryptSecrets } from "./encryption-BC4MAODn.mjs";
8
8
  import { generateReactQueryCommand } from "./openapi-react-query-ZoP9DPbY.mjs";
9
9
  import { createRequire } from "node:module";
@@ -23,13 +23,14 @@ import { Cron } from "@geekmidas/constructs/crons";
23
23
  import { Function } from "@geekmidas/constructs/functions";
24
24
  import { Subscriber } from "@geekmidas/constructs/subscribers";
25
25
  import { createHash, randomBytes } from "node:crypto";
26
+ import { Client } from "pg";
26
27
  import { lookup } from "node:dns/promises";
27
- import { pathToFileURL } from "node:url";
28
+ import { fileURLToPath, pathToFileURL } from "node:url";
28
29
  import prompts from "prompts";
29
30
 
30
31
  //#region package.json
31
32
  var name = "@geekmidas/cli";
32
- var version = "0.47.0";
33
+ var version = "0.49.0";
33
34
  var description = "CLI tools for building Lambda handlers, server applications, and generating OpenAPI specs";
34
35
  var private$1 = false;
35
36
  var type = "module";
@@ -85,12 +86,14 @@ var dependencies = {
85
86
  "hono": "~4.8.0",
86
87
  "lodash.kebabcase": "^4.1.1",
87
88
  "openapi-typescript": "^7.4.2",
89
+ "pg": "~8.17.1",
88
90
  "prompts": "~2.4.2"
89
91
  };
90
92
  var devDependencies = {
91
93
  "@geekmidas/testkit": "workspace:*",
92
94
  "@types/lodash.kebabcase": "^4.1.9",
93
95
  "@types/node": "~24.9.1",
96
+ "@types/pg": "~8.16.0",
94
97
  "@types/prompts": "~2.4.9",
95
98
  "typescript": "^5.8.2",
96
99
  "vitest": "^3.2.4",
@@ -253,7 +256,7 @@ const logger$11 = console;
253
256
  * Validate Dokploy token by making a test API call
254
257
  */
255
258
  async function validateDokployToken(endpoint, token) {
256
- const { DokployApi: DokployApi$1 } = await import("./dokploy-api-DQvi9iZa.mjs");
259
+ const { DokployApi: DokployApi$1 } = await import("./dokploy-api-CItuaWTq.mjs");
257
260
  const api = new DokployApi$1({
258
261
  baseUrl: endpoint,
259
262
  token
@@ -2274,2032 +2277,2143 @@ function getAppOutputPath(workspace, _appName, app) {
2274
2277
  }
2275
2278
 
2276
2279
  //#endregion
2277
- //#region src/docker/compose.ts
2278
- /** Default Docker images for services */
2279
- const DEFAULT_SERVICE_IMAGES = {
2280
- postgres: "postgres",
2281
- redis: "redis",
2282
- rabbitmq: "rabbitmq"
2283
- };
2284
- /** Default Docker image versions for services */
2285
- const DEFAULT_SERVICE_VERSIONS = {
2286
- postgres: "16-alpine",
2287
- redis: "7-alpine",
2288
- rabbitmq: "3-management-alpine"
2289
- };
2290
- /** Get the default full image reference for a service */
2291
- function getDefaultImage(serviceName) {
2292
- return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
2280
+ //#region src/deploy/state.ts
2281
+ /**
2282
+ * Get the state file path for a stage
2283
+ */
2284
+ function getStateFilePath(workspaceRoot, stage) {
2285
+ return join(workspaceRoot, ".gkm", `deploy-${stage}.json`);
2293
2286
  }
2294
- /** Normalize services config to a consistent format - returns Map of service name to full image reference */
2295
- function normalizeServices(services) {
2296
- const result = /* @__PURE__ */ new Map();
2297
- if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
2298
- else for (const [name$1, config$1] of Object.entries(services)) {
2299
- const serviceName = name$1;
2300
- if (config$1 === true) result.set(serviceName, getDefaultImage(serviceName));
2301
- else if (config$1 && typeof config$1 === "object") {
2302
- const serviceConfig = config$1;
2303
- if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
2304
- else {
2305
- const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
2306
- result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
2307
- }
2308
- }
2287
+ /**
2288
+ * Read the deploy state for a stage
2289
+ * Returns null if state file doesn't exist
2290
+ */
2291
+ async function readStageState(workspaceRoot, stage) {
2292
+ const filePath = getStateFilePath(workspaceRoot, stage);
2293
+ try {
2294
+ const content = await readFile(filePath, "utf-8");
2295
+ return JSON.parse(content);
2296
+ } catch (error) {
2297
+ if (error.code === "ENOENT") return null;
2298
+ console.warn(`Warning: Could not read deploy state: ${error}`);
2299
+ return null;
2309
2300
  }
2310
- return result;
2311
2301
  }
2312
2302
  /**
2313
- * Generate docker-compose.yml for production deployment
2303
+ * Write the deploy state for a stage
2314
2304
  */
2315
- function generateDockerCompose(options) {
2316
- const { imageName, registry, port, healthCheckPath, services } = options;
2317
- const serviceMap = normalizeServices(services);
2318
- const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
2319
- let yaml = `version: '3.8'
2320
-
2321
- services:
2322
- api:
2323
- build:
2324
- context: ../..
2325
- dockerfile: .gkm/docker/Dockerfile
2326
- image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
2327
- container_name: ${imageName}
2328
- restart: unless-stopped
2329
- ports:
2330
- - "\${PORT:-${port}}:${port}"
2331
- environment:
2332
- - NODE_ENV=production
2333
- `;
2334
- if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
2335
- `;
2336
- if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
2337
- `;
2338
- if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
2339
- `;
2340
- yaml += ` healthcheck:
2341
- test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
2342
- interval: 30s
2343
- timeout: 3s
2344
- retries: 3
2345
- `;
2346
- if (serviceMap.size > 0) {
2347
- yaml += ` depends_on:
2348
- `;
2349
- for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
2350
- condition: service_healthy
2351
- `;
2352
- }
2353
- yaml += ` networks:
2354
- - app-network
2355
- `;
2356
- const postgresImage = serviceMap.get("postgres");
2357
- if (postgresImage) yaml += `
2358
- postgres:
2359
- image: ${postgresImage}
2360
- container_name: postgres
2361
- restart: unless-stopped
2362
- environment:
2363
- POSTGRES_USER: \${POSTGRES_USER:-postgres}
2364
- POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
2365
- POSTGRES_DB: \${POSTGRES_DB:-app}
2366
- volumes:
2367
- - postgres_data:/var/lib/postgresql/data
2368
- healthcheck:
2369
- test: ["CMD-SHELL", "pg_isready -U postgres"]
2370
- interval: 5s
2371
- timeout: 5s
2372
- retries: 5
2373
- networks:
2374
- - app-network
2375
- `;
2376
- const redisImage = serviceMap.get("redis");
2377
- if (redisImage) yaml += `
2378
- redis:
2379
- image: ${redisImage}
2380
- container_name: redis
2381
- restart: unless-stopped
2382
- volumes:
2383
- - redis_data:/data
2384
- healthcheck:
2385
- test: ["CMD", "redis-cli", "ping"]
2386
- interval: 5s
2387
- timeout: 5s
2388
- retries: 5
2389
- networks:
2390
- - app-network
2391
- `;
2392
- const rabbitmqImage = serviceMap.get("rabbitmq");
2393
- if (rabbitmqImage) yaml += `
2394
- rabbitmq:
2395
- image: ${rabbitmqImage}
2396
- container_name: rabbitmq
2397
- restart: unless-stopped
2398
- environment:
2399
- RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
2400
- RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
2401
- ports:
2402
- - "15672:15672" # Management UI
2403
- volumes:
2404
- - rabbitmq_data:/var/lib/rabbitmq
2405
- healthcheck:
2406
- test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
2407
- interval: 10s
2408
- timeout: 5s
2409
- retries: 5
2410
- networks:
2411
- - app-network
2412
- `;
2413
- yaml += `
2414
- volumes:
2415
- `;
2416
- if (serviceMap.has("postgres")) yaml += ` postgres_data:
2417
- `;
2418
- if (serviceMap.has("redis")) yaml += ` redis_data:
2419
- `;
2420
- if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
2421
- `;
2422
- yaml += `
2423
- networks:
2424
- app-network:
2425
- driver: bridge
2426
- `;
2427
- return yaml;
2305
+ async function writeStageState(workspaceRoot, stage, state) {
2306
+ const filePath = getStateFilePath(workspaceRoot, stage);
2307
+ const dir = join(workspaceRoot, ".gkm");
2308
+ await mkdir(dir, { recursive: true });
2309
+ state.lastDeployedAt = (/* @__PURE__ */ new Date()).toISOString();
2310
+ await writeFile(filePath, JSON.stringify(state, null, 2));
2428
2311
  }
2429
2312
  /**
2430
- * Generate a minimal docker-compose.yml for API only
2313
+ * Create a new empty state for a stage
2431
2314
  */
2432
- function generateMinimalDockerCompose(options) {
2433
- const { imageName, registry, port, healthCheckPath } = options;
2434
- const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
2435
- return `version: '3.8'
2436
-
2437
- services:
2438
- api:
2439
- build:
2440
- context: ../..
2441
- dockerfile: .gkm/docker/Dockerfile
2442
- image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
2443
- container_name: ${imageName}
2444
- restart: unless-stopped
2445
- ports:
2446
- - "\${PORT:-${port}}:${port}"
2447
- environment:
2448
- - NODE_ENV=production
2449
- healthcheck:
2450
- test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
2451
- interval: 30s
2452
- timeout: 3s
2453
- retries: 3
2454
- networks:
2455
- - app-network
2456
-
2457
- networks:
2458
- app-network:
2459
- driver: bridge
2460
- `;
2315
+ function createEmptyState(stage, environmentId) {
2316
+ return {
2317
+ provider: "dokploy",
2318
+ stage,
2319
+ environmentId,
2320
+ applications: {},
2321
+ services: {},
2322
+ lastDeployedAt: (/* @__PURE__ */ new Date()).toISOString()
2323
+ };
2461
2324
  }
2462
2325
  /**
2463
- * Generate docker-compose.yml for a workspace with all apps as services.
2464
- * Apps can communicate with each other via service names.
2465
- * @internal Exported for testing
2326
+ * Get application ID from state
2466
2327
  */
2467
- function generateWorkspaceCompose(workspace, options = {}) {
2468
- const { registry } = options;
2469
- const apps = Object.entries(workspace.apps);
2470
- const services = workspace.services;
2471
- const hasPostgres = services.db !== void 0 && services.db !== false;
2472
- const hasRedis = services.cache !== void 0 && services.cache !== false;
2473
- const hasMail = services.mail !== void 0 && services.mail !== false;
2474
- const postgresImage = getInfraServiceImage("postgres", services.db);
2475
- const redisImage = getInfraServiceImage("redis", services.cache);
2476
- let yaml = `# Docker Compose for ${workspace.name} workspace
2477
- # Generated by gkm - do not edit manually
2478
-
2479
- services:
2480
- `;
2481
- for (const [appName, app] of apps) yaml += generateAppService(appName, app, apps, {
2482
- registry,
2483
- hasPostgres,
2484
- hasRedis
2485
- });
2486
- if (hasPostgres) yaml += `
2487
- postgres:
2488
- image: ${postgresImage}
2489
- container_name: ${workspace.name}-postgres
2490
- restart: unless-stopped
2491
- environment:
2492
- POSTGRES_USER: \${POSTGRES_USER:-postgres}
2493
- POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
2494
- POSTGRES_DB: \${POSTGRES_DB:-app}
2495
- volumes:
2496
- - postgres_data:/var/lib/postgresql/data
2497
- healthcheck:
2498
- test: ["CMD-SHELL", "pg_isready -U postgres"]
2499
- interval: 5s
2500
- timeout: 5s
2501
- retries: 5
2502
- networks:
2503
- - workspace-network
2504
- `;
2505
- if (hasRedis) yaml += `
2506
- redis:
2507
- image: ${redisImage}
2508
- container_name: ${workspace.name}-redis
2509
- restart: unless-stopped
2510
- volumes:
2511
- - redis_data:/data
2512
- healthcheck:
2513
- test: ["CMD", "redis-cli", "ping"]
2514
- interval: 5s
2515
- timeout: 5s
2516
- retries: 5
2517
- networks:
2518
- - workspace-network
2519
- `;
2520
- if (hasMail) yaml += `
2521
- mailpit:
2522
- image: axllent/mailpit:latest
2523
- container_name: ${workspace.name}-mailpit
2524
- restart: unless-stopped
2525
- ports:
2526
- - "8025:8025" # Web UI
2527
- - "1025:1025" # SMTP
2528
- networks:
2529
- - workspace-network
2530
- `;
2531
- yaml += `
2532
- volumes:
2533
- `;
2534
- if (hasPostgres) yaml += ` postgres_data:
2535
- `;
2536
- if (hasRedis) yaml += ` redis_data:
2537
- `;
2538
- yaml += `
2539
- networks:
2540
- workspace-network:
2541
- driver: bridge
2542
- `;
2543
- return yaml;
2328
+ function getApplicationId(state, appName) {
2329
+ return state?.applications[appName];
2544
2330
  }
2545
2331
  /**
2546
- * Get infrastructure service image with version.
2332
+ * Set application ID in state (mutates state)
2547
2333
  */
2548
- function getInfraServiceImage(serviceName, config$1) {
2549
- const defaults = {
2550
- postgres: "postgres:16-alpine",
2551
- redis: "redis:7-alpine"
2552
- };
2553
- if (!config$1 || config$1 === true) return defaults[serviceName];
2554
- if (typeof config$1 === "object") {
2555
- if (config$1.image) return config$1.image;
2556
- if (config$1.version) {
2557
- const baseImage = serviceName === "postgres" ? "postgres" : "redis";
2558
- return `${baseImage}:${config$1.version}`;
2559
- }
2560
- }
2561
- return defaults[serviceName];
2334
+ function setApplicationId(state, appName, applicationId) {
2335
+ state.applications[appName] = applicationId;
2562
2336
  }
2563
2337
  /**
2564
- * Generate a service definition for an app.
2338
+ * Get postgres ID from state
2565
2339
  */
2566
- function generateAppService(appName, app, allApps, options) {
2567
- const { registry, hasPostgres, hasRedis } = options;
2568
- const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
2569
- const healthCheckPath = app.type === "frontend" ? "/" : "/health";
2570
- const healthCheckCmd = app.type === "frontend" ? `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}/"]` : `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}${healthCheckPath}"]`;
2571
- let yaml = `
2572
- ${appName}:
2573
- build:
2574
- context: .
2575
- dockerfile: .gkm/docker/Dockerfile.${appName}
2576
- image: ${imageRef}\${${appName.toUpperCase()}_IMAGE:-${appName}}:\${TAG:-latest}
2577
- container_name: ${appName}
2578
- restart: unless-stopped
2579
- ports:
2580
- - "\${${appName.toUpperCase()}_PORT:-${app.port}}:${app.port}"
2581
- environment:
2582
- - NODE_ENV=production
2583
- - PORT=${app.port}
2584
- `;
2585
- for (const dep of app.dependencies) {
2586
- const depApp = allApps.find(([name$1]) => name$1 === dep)?.[1];
2587
- if (depApp) yaml += ` - ${dep.toUpperCase()}_URL=http://${dep}:${depApp.port}
2588
- `;
2589
- }
2590
- if (app.type === "backend") {
2591
- if (hasPostgres) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
2592
- `;
2593
- if (hasRedis) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
2594
- `;
2595
- }
2596
- yaml += ` healthcheck:
2597
- test: ${healthCheckCmd}
2598
- interval: 30s
2599
- timeout: 3s
2600
- retries: 3
2601
- `;
2602
- const dependencies$1 = [...app.dependencies];
2603
- if (app.type === "backend") {
2604
- if (hasPostgres) dependencies$1.push("postgres");
2605
- if (hasRedis) dependencies$1.push("redis");
2606
- }
2607
- if (dependencies$1.length > 0) {
2608
- yaml += ` depends_on:
2609
- `;
2610
- for (const dep of dependencies$1) yaml += ` ${dep}:
2611
- condition: service_healthy
2612
- `;
2613
- }
2614
- yaml += ` networks:
2615
- - workspace-network
2616
- `;
2617
- return yaml;
2340
+ function getPostgresId(state) {
2341
+ return state?.services.postgresId;
2618
2342
  }
2619
-
2620
- //#endregion
2621
- //#region src/docker/templates.ts
2622
- const LOCKFILES = [
2623
- ["pnpm-lock.yaml", "pnpm"],
2624
- ["bun.lockb", "bun"],
2625
- ["yarn.lock", "yarn"],
2626
- ["package-lock.json", "npm"]
2627
- ];
2628
2343
  /**
2629
- * Detect package manager from lockfiles
2630
- * Walks up the directory tree to find lockfile (for monorepos)
2344
+ * Set postgres ID in state (mutates state)
2631
2345
  */
2632
- function detectPackageManager$1(cwd = process.cwd()) {
2633
- let dir = cwd;
2634
- const root = parse(dir).root;
2635
- while (dir !== root) {
2636
- for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(dir, lockfile))) return pm;
2637
- dir = dirname(dir);
2638
- }
2639
- for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(root, lockfile))) return pm;
2640
- return "pnpm";
2346
+ function setPostgresId(state, postgresId) {
2347
+ state.services.postgresId = postgresId;
2641
2348
  }
2642
2349
  /**
2643
- * Find the lockfile path by walking up the directory tree
2644
- * Returns the full path to the lockfile, or null if not found
2350
+ * Get redis ID from state
2645
2351
  */
2646
- function findLockfilePath(cwd = process.cwd()) {
2647
- let dir = cwd;
2648
- const root = parse(dir).root;
2649
- while (dir !== root) {
2650
- for (const [lockfile] of LOCKFILES) {
2651
- const lockfilePath = join(dir, lockfile);
2652
- if (existsSync(lockfilePath)) return lockfilePath;
2653
- }
2654
- dir = dirname(dir);
2655
- }
2656
- for (const [lockfile] of LOCKFILES) {
2657
- const lockfilePath = join(root, lockfile);
2658
- if (existsSync(lockfilePath)) return lockfilePath;
2659
- }
2660
- return null;
2352
+ function getRedisId(state) {
2353
+ return state?.services.redisId;
2661
2354
  }
2662
2355
  /**
2663
- * Check if we're in a monorepo (lockfile is in a parent directory)
2356
+ * Set redis ID in state (mutates state)
2664
2357
  */
2665
- function isMonorepo(cwd = process.cwd()) {
2666
- const lockfilePath = findLockfilePath(cwd);
2667
- if (!lockfilePath) return false;
2668
- const lockfileDir = dirname(lockfilePath);
2669
- return lockfileDir !== cwd;
2358
+ function setRedisId(state, redisId) {
2359
+ state.services.redisId = redisId;
2670
2360
  }
2671
2361
  /**
2672
- * Check if turbo.json exists (walks up directory tree)
2362
+ * Set app credentials in state (mutates state)
2673
2363
  */
2674
- function hasTurboConfig(cwd = process.cwd()) {
2675
- let dir = cwd;
2676
- const root = parse(dir).root;
2677
- while (dir !== root) {
2678
- if (existsSync(join(dir, "turbo.json"))) return true;
2679
- dir = dirname(dir);
2680
- }
2681
- return existsSync(join(root, "turbo.json"));
2364
+ function setAppCredentials(state, appName, credentials) {
2365
+ if (!state.appCredentials) state.appCredentials = {};
2366
+ state.appCredentials[appName] = credentials;
2682
2367
  }
2683
2368
  /**
2684
- * Get install command for turbo builds (without frozen lockfile)
2685
- * Turbo prune creates a subset that may not perfectly match the lockfile
2369
+ * Get all app credentials from state
2686
2370
  */
2687
- function getTurboInstallCmd(pm) {
2688
- const commands = {
2689
- pnpm: "pnpm install",
2690
- npm: "npm install",
2691
- yarn: "yarn install",
2692
- bun: "bun install"
2693
- };
2694
- return commands[pm];
2371
+ function getAllAppCredentials(state) {
2372
+ return state?.appCredentials ?? {};
2695
2373
  }
2696
2374
  /**
2697
- * Get package manager specific commands and paths
2375
+ * Get a generated secret for an app
2698
2376
  */
2699
- function getPmConfig(pm) {
2700
- const configs = {
2701
- pnpm: {
2702
- install: "corepack enable && corepack prepare pnpm@latest --activate",
2703
- lockfile: "pnpm-lock.yaml",
2704
- fetch: "pnpm fetch",
2705
- installCmd: "pnpm install --frozen-lockfile --offline",
2706
- cacheTarget: "/root/.local/share/pnpm/store",
2707
- cacheId: "pnpm",
2708
- run: "pnpm",
2709
- exec: "pnpm exec",
2710
- dlx: "pnpm dlx",
2711
- addGlobal: "pnpm add -g"
2712
- },
2713
- npm: {
2714
- install: "",
2715
- lockfile: "package-lock.json",
2716
- fetch: "",
2717
- installCmd: "npm ci",
2718
- cacheTarget: "/root/.npm",
2719
- cacheId: "npm",
2720
- run: "npm run",
2721
- exec: "npx",
2722
- dlx: "npx",
2723
- addGlobal: "npm install -g"
2724
- },
2725
- yarn: {
2726
- install: "corepack enable && corepack prepare yarn@stable --activate",
2727
- lockfile: "yarn.lock",
2728
- fetch: "",
2729
- installCmd: "yarn install --frozen-lockfile",
2730
- cacheTarget: "/root/.yarn/cache",
2731
- cacheId: "yarn",
2732
- run: "yarn",
2733
- exec: "yarn exec",
2734
- dlx: "yarn dlx",
2735
- addGlobal: "yarn global add"
2736
- },
2737
- bun: {
2738
- install: "npm install -g bun",
2739
- lockfile: "bun.lockb",
2740
- fetch: "",
2741
- installCmd: "bun install --frozen-lockfile",
2742
- cacheTarget: "/root/.bun/install/cache",
2743
- cacheId: "bun",
2744
- run: "bun run",
2745
- exec: "bunx",
2746
- dlx: "bunx",
2747
- addGlobal: "bun add -g"
2748
- }
2377
+ function getGeneratedSecret(state, appName, secretName) {
2378
+ return state?.generatedSecrets?.[appName]?.[secretName];
2379
+ }
2380
+ /**
2381
+ * Set a generated secret for an app (mutates state)
2382
+ */
2383
+ function setGeneratedSecret(state, appName, secretName, value) {
2384
+ if (!state.generatedSecrets) state.generatedSecrets = {};
2385
+ if (!state.generatedSecrets[appName]) state.generatedSecrets[appName] = {};
2386
+ state.generatedSecrets[appName][secretName] = value;
2387
+ }
2388
+ /**
2389
+ * Set DNS verification record for a hostname (mutates state)
2390
+ */
2391
+ function setDnsVerification(state, hostname, serverIp) {
2392
+ if (!state.dnsVerified) state.dnsVerified = {};
2393
+ state.dnsVerified[hostname] = {
2394
+ serverIp,
2395
+ verifiedAt: (/* @__PURE__ */ new Date()).toISOString()
2749
2396
  };
2750
- return configs[pm];
2751
2397
  }
2752
2398
  /**
2753
- * Generate a multi-stage Dockerfile for building from source
2754
- * Optimized for build speed with:
2755
- * - BuildKit cache mounts for package manager store
2756
- * - pnpm fetch for better layer caching (when using pnpm)
2757
- * - Optional turbo prune for monorepos
2399
+ * Check if a hostname is already verified with the given IP
2758
2400
  */
2759
- function generateMultiStageDockerfile(options) {
2760
- const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
2761
- if (turbo) return generateTurboDockerfile({
2762
- ...options,
2763
- turboPackage: turboPackage ?? "api"
2764
- });
2765
- const pm = getPmConfig(packageManager);
2766
- const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
2767
- const hasFetch = packageManager === "pnpm";
2768
- const depsStage = hasFetch ? `# Copy lockfile first for better caching
2769
- COPY ${pm.lockfile} ./
2770
-
2771
- # Fetch dependencies (downloads to virtual store, cached separately)
2772
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
2773
- ${pm.fetch}
2774
-
2775
- # Copy package.json after fetch
2776
- COPY package.json ./
2777
-
2778
- # Install from cache (fast - no network needed)
2779
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
2780
- ${pm.installCmd}` : `# Copy package files
2781
- COPY package.json ${pm.lockfile} ./
2782
-
2783
- # Install dependencies with cache
2784
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
2785
- ${pm.installCmd}`;
2786
- return `# syntax=docker/dockerfile:1
2787
- # Stage 1: Dependencies
2788
- FROM ${baseImage} AS deps
2789
-
2790
- WORKDIR /app
2791
- ${installPm}
2792
- ${depsStage}
2793
-
2794
- # Stage 2: Build
2795
- FROM deps AS builder
2796
-
2797
- WORKDIR /app
2798
-
2799
- # Copy source (deps already installed)
2800
- COPY . .
2801
-
2802
- # Debug: Show node_modules/.bin contents and build production server
2803
- RUN echo "=== node_modules/.bin contents ===" && \
2804
- ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
2805
- echo "=== Checking for gkm ===" && \
2806
- which gkm 2>/dev/null || echo "gkm not in PATH" && \
2807
- ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
2808
- echo "=== Running build ===" && \
2809
- ./node_modules/.bin/gkm build --provider server --production
2810
-
2811
- # Stage 3: Production
2812
- FROM ${baseImage} AS runner
2813
-
2814
- WORKDIR /app
2815
-
2816
- # Install tini for proper signal handling as PID 1
2817
- RUN apk add --no-cache tini
2818
-
2819
- # Create non-root user
2820
- RUN addgroup --system --gid 1001 nodejs && \\
2821
- adduser --system --uid 1001 hono
2822
-
2823
- # Copy bundled server
2824
- COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
2825
-
2826
- # Environment
2827
- ENV NODE_ENV=production
2828
- ENV PORT=${port}
2829
-
2830
- # Health check
2831
- HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
2832
- CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
2833
-
2834
- # Switch to non-root user
2835
- USER hono
2401
+ function isDnsVerified(state, hostname, serverIp) {
2402
+ const record = state?.dnsVerified?.[hostname];
2403
+ return record?.serverIp === serverIp;
2404
+ }
2836
2405
 
2837
- EXPOSE ${port}
2406
+ //#endregion
2407
+ //#region src/deploy/dns/hostinger-api.ts
2408
+ /**
2409
+ * Hostinger DNS API client
2410
+ *
2411
+ * API Documentation: https://developers.hostinger.com/
2412
+ * Authentication: Bearer token from hpanel.hostinger.com/profile/api
2413
+ */
2414
+ const HOSTINGER_API_BASE = "https://developers.hostinger.com";
2415
+ /**
2416
+ * Hostinger API error
2417
+ */
2418
+ var HostingerApiError = class extends Error {
2419
+ constructor(message, status, statusText, errors) {
2420
+ super(message);
2421
+ this.status = status;
2422
+ this.statusText = statusText;
2423
+ this.errors = errors;
2424
+ this.name = "HostingerApiError";
2425
+ }
2426
+ };
2427
+ /**
2428
+ * Hostinger DNS API client
2429
+ *
2430
+ * @example
2431
+ * ```ts
2432
+ * const api = new HostingerApi(token);
2433
+ *
2434
+ * // Get all records for a domain
2435
+ * const records = await api.getRecords('traflabs.io');
2436
+ *
2437
+ * // Create/update records
2438
+ * await api.upsertRecords('traflabs.io', [
2439
+ * { name: 'api.joemoer', type: 'A', ttl: 300, records: ['1.2.3.4'] }
2440
+ * ]);
2441
+ * ```
2442
+ */
2443
+ var HostingerApi = class {
2444
+ token;
2445
+ constructor(token) {
2446
+ this.token = token;
2447
+ }
2448
+ /**
2449
+ * Make a request to the Hostinger API
2450
+ */
2451
+ async request(method, endpoint, body) {
2452
+ const url = `${HOSTINGER_API_BASE}${endpoint}`;
2453
+ const response = await fetch(url, {
2454
+ method,
2455
+ headers: {
2456
+ "Content-Type": "application/json",
2457
+ Authorization: `Bearer ${this.token}`
2458
+ },
2459
+ body: body ? JSON.stringify(body) : void 0
2460
+ });
2461
+ if (!response.ok) {
2462
+ let errorMessage = `Hostinger API error: ${response.status} ${response.statusText}`;
2463
+ let errors;
2464
+ try {
2465
+ const errorBody = await response.json();
2466
+ if (errorBody.message) errorMessage = `Hostinger API error: ${errorBody.message}`;
2467
+ errors = errorBody.errors;
2468
+ } catch {}
2469
+ throw new HostingerApiError(errorMessage, response.status, response.statusText, errors);
2470
+ }
2471
+ const text = await response.text();
2472
+ if (!text || text.trim() === "") return void 0;
2473
+ return JSON.parse(text);
2474
+ }
2475
+ /**
2476
+ * Get all DNS records for a domain
2477
+ *
2478
+ * @param domain - Root domain (e.g., 'traflabs.io')
2479
+ */
2480
+ async getRecords(domain) {
2481
+ const response = await this.request("GET", `/api/dns/v1/zones/${domain}`);
2482
+ return response.data || [];
2483
+ }
2484
+ /**
2485
+ * Create or update DNS records
2486
+ *
2487
+ * @param domain - Root domain (e.g., 'traflabs.io')
2488
+ * @param records - Records to create/update
2489
+ * @param overwrite - If true, replaces all existing records. If false, merges with existing.
2490
+ */
2491
+ async upsertRecords(domain, records, overwrite = false) {
2492
+ await this.request("PUT", `/api/dns/v1/zones/${domain}`, {
2493
+ overwrite,
2494
+ zone: records
2495
+ });
2496
+ }
2497
+ /**
2498
+ * Validate DNS records before applying
2499
+ *
2500
+ * @param domain - Root domain (e.g., 'traflabs.io')
2501
+ * @param records - Records to validate
2502
+ * @returns true if valid, throws if invalid
2503
+ */
2504
+ async validateRecords(domain, records) {
2505
+ await this.request("POST", `/api/dns/v1/zones/${domain}/validate`, {
2506
+ overwrite: false,
2507
+ zone: records
2508
+ });
2509
+ return true;
2510
+ }
2511
+ /**
2512
+ * Delete specific DNS records
2513
+ *
2514
+ * @param domain - Root domain (e.g., 'traflabs.io')
2515
+ * @param filters - Filters to match records for deletion
2516
+ */
2517
+ async deleteRecords(domain, filters) {
2518
+ await this.request("DELETE", `/api/dns/v1/zones/${domain}`, { filters });
2519
+ }
2520
+ /**
2521
+ * Check if a specific record exists
2522
+ *
2523
+ * @param domain - Root domain (e.g., 'traflabs.io')
2524
+ * @param name - Subdomain name (e.g., 'api.joemoer')
2525
+ * @param type - Record type (e.g., 'A')
2526
+ */
2527
+ async recordExists(domain, name$1, type$1 = "A") {
2528
+ const records = await this.getRecords(domain);
2529
+ return records.some((r) => r.name === name$1 && r.type === type$1);
2530
+ }
2531
+ /**
2532
+ * Create a single A record if it doesn't exist
2533
+ *
2534
+ * @param domain - Root domain (e.g., 'traflabs.io')
2535
+ * @param subdomain - Subdomain name (e.g., 'api.joemoer')
2536
+ * @param ip - IP address to point to
2537
+ * @param ttl - TTL in seconds (default: 300)
2538
+ * @returns true if created, false if already exists
2539
+ */
2540
+ async createARecordIfNotExists(domain, subdomain, ip, ttl = 300) {
2541
+ const exists = await this.recordExists(domain, subdomain, "A");
2542
+ if (exists) return false;
2543
+ await this.upsertRecords(domain, [{
2544
+ name: subdomain,
2545
+ type: "A",
2546
+ ttl,
2547
+ records: [{ content: ip }]
2548
+ }]);
2549
+ return true;
2550
+ }
2551
+ };
2838
2552
 
2839
- # Use tini as entrypoint to handle PID 1 responsibilities
2840
- ENTRYPOINT ["/sbin/tini", "--"]
2841
- CMD ["node", "server.mjs"]
2842
- `;
2553
+ //#endregion
2554
+ //#region src/deploy/dns/index.ts
2555
+ const logger$6 = console;
2556
+ /**
2557
+ * Resolve IP address from a hostname
2558
+ */
2559
+ async function resolveHostnameToIp(hostname) {
2560
+ try {
2561
+ const addresses = await lookup(hostname, { family: 4 });
2562
+ return addresses.address;
2563
+ } catch (error) {
2564
+ throw new Error(`Failed to resolve IP for ${hostname}: ${error instanceof Error ? error.message : "Unknown error"}`);
2565
+ }
2843
2566
  }
2844
2567
  /**
2845
- * Generate a Dockerfile optimized for Turbo monorepos
2846
- * Uses turbo prune to create minimal Docker context
2568
+ * Extract subdomain from full hostname relative to root domain
2569
+ *
2570
+ * @example
2571
+ * extractSubdomain('api.joemoer.traflabs.io', 'traflabs.io') => 'api.joemoer'
2572
+ * extractSubdomain('joemoer.traflabs.io', 'traflabs.io') => 'joemoer'
2847
2573
  */
2848
- function generateTurboDockerfile(options) {
2849
- const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
2850
- const pm = getPmConfig(packageManager);
2851
- const installPm = pm.install ? `RUN ${pm.install}` : "";
2852
- const turboInstallCmd = getTurboInstallCmd(packageManager);
2853
- const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
2854
- return `# syntax=docker/dockerfile:1
2855
- # Stage 1: Prune monorepo
2856
- FROM ${baseImage} AS pruner
2857
-
2858
- WORKDIR /app
2859
-
2860
- ${installPm}
2861
-
2862
- COPY . .
2863
-
2864
- # Prune to only include necessary packages
2865
- RUN ${turboCmd} prune ${turboPackage} --docker
2866
-
2867
- # Stage 2: Install dependencies
2868
- FROM ${baseImage} AS deps
2869
-
2870
- WORKDIR /app
2871
-
2872
- ${installPm}
2873
-
2874
- # Copy pruned lockfile and package.jsons
2875
- COPY --from=pruner /app/out/${pm.lockfile} ./
2876
- COPY --from=pruner /app/out/json/ ./
2877
-
2878
- # Install dependencies (no frozen-lockfile since turbo prune creates a subset)
2879
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
2880
- ${turboInstallCmd}
2881
-
2882
- # Stage 3: Build
2883
- FROM deps AS builder
2884
-
2885
- WORKDIR /app
2886
-
2887
- # Copy pruned source
2888
- COPY --from=pruner /app/out/full/ ./
2889
-
2890
- # Debug: Show node_modules/.bin contents and build production server
2891
- RUN echo "=== node_modules/.bin contents ===" && \
2892
- ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
2893
- echo "=== Checking for gkm ===" && \
2894
- which gkm 2>/dev/null || echo "gkm not in PATH" && \
2895
- ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
2896
- echo "=== Running build ===" && \
2897
- ./node_modules/.bin/gkm build --provider server --production
2898
-
2899
- # Stage 4: Production
2900
- FROM ${baseImage} AS runner
2901
-
2902
- WORKDIR /app
2903
-
2904
- RUN apk add --no-cache tini
2905
-
2906
- RUN addgroup --system --gid 1001 nodejs && \\
2907
- adduser --system --uid 1001 hono
2908
-
2909
- COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
2910
-
2911
- ENV NODE_ENV=production
2912
- ENV PORT=${port}
2913
-
2914
- HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
2915
- CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
2916
-
2917
- USER hono
2918
-
2919
- EXPOSE ${port}
2920
-
2921
- ENTRYPOINT ["/sbin/tini", "--"]
2922
- CMD ["node", "server.mjs"]
2923
- `;
2574
+ function extractSubdomain(hostname, rootDomain) {
2575
+ if (!hostname.endsWith(rootDomain)) throw new Error(`Hostname ${hostname} is not under root domain ${rootDomain}`);
2576
+ const subdomain = hostname.slice(0, -(rootDomain.length + 1));
2577
+ return subdomain || "@";
2578
+ }
2579
+ /**
2580
+ * Generate required DNS records for a deployment
2581
+ */
2582
+ function generateRequiredRecords(appHostnames, rootDomain, serverIp) {
2583
+ const records = [];
2584
+ for (const [appName, hostname] of appHostnames) {
2585
+ const subdomain = extractSubdomain(hostname, rootDomain);
2586
+ records.push({
2587
+ hostname,
2588
+ subdomain,
2589
+ type: "A",
2590
+ value: serverIp,
2591
+ appName
2592
+ });
2593
+ }
2594
+ return records;
2924
2595
  }
2925
2596
  /**
2926
- * Generate a slim Dockerfile for pre-built bundles
2597
+ * Print DNS records table
2927
2598
  */
2928
- function generateSlimDockerfile(options) {
2929
- const { baseImage, port, healthCheckPath } = options;
2930
- return `# Slim Dockerfile for pre-built production bundle
2931
- FROM ${baseImage}
2932
-
2933
- WORKDIR /app
2934
-
2935
- # Install tini for proper signal handling as PID 1
2936
- # Handles SIGTERM propagation and zombie process reaping
2937
- RUN apk add --no-cache tini
2938
-
2939
- # Create non-root user
2940
- RUN addgroup --system --gid 1001 nodejs && \\
2941
- adduser --system --uid 1001 hono
2942
-
2943
- # Copy pre-built bundle
2944
- COPY .gkm/server/dist/server.mjs ./
2945
-
2946
- # Environment
2947
- ENV NODE_ENV=production
2948
- ENV PORT=${port}
2949
-
2950
- # Health check
2951
- HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
2952
- CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
2953
-
2954
- # Switch to non-root user
2955
- USER hono
2956
-
2957
- EXPOSE ${port}
2958
-
2959
- # Use tini as entrypoint to handle PID 1 responsibilities
2960
- ENTRYPOINT ["/sbin/tini", "--"]
2961
- CMD ["node", "server.mjs"]
2962
- `;
2599
+ function printDnsRecordsTable(records, rootDomain) {
2600
+ logger$6.log(`\n 📋 DNS Records for ${rootDomain}:`);
2601
+ logger$6.log(" ┌─────────────────────────────────────┬──────┬─────────────────┬────────┐");
2602
+ logger$6.log(" │ Subdomain │ Type │ Value │ Status │");
2603
+ logger$6.log(" ├─────────────────────────────────────┼──────┼─────────────────┼────────┤");
2604
+ for (const record of records) {
2605
+ const subdomain = record.subdomain.padEnd(35);
2606
+ const type$1 = record.type.padEnd(4);
2607
+ const value = record.value.padEnd(15);
2608
+ let status;
2609
+ if (record.error) status = "✗";
2610
+ else if (record.created) status = "✓ new";
2611
+ else if (record.existed) status = "✓";
2612
+ else status = "?";
2613
+ logger$6.log(` │ ${subdomain} │ ${type$1} │ ${value} │ ${status.padEnd(6)} │`);
2614
+ }
2615
+ logger$6.log(" └─────────────────────────────────────┴──────┴─────────────────┴────────┘");
2963
2616
  }
2964
2617
  /**
2965
- * Generate .dockerignore file
2618
+ * Print DNS records in a simple format for manual setup
2966
2619
  */
2967
- function generateDockerignore() {
2968
- return `# Dependencies
2969
- node_modules
2970
- .pnpm-store
2971
-
2972
- # Build output (except what we need)
2973
- .gkm/aws*
2974
- .gkm/server/*.ts
2975
- !.gkm/server/dist
2976
-
2977
- # IDE and editor
2978
- .idea
2979
- .vscode
2980
- *.swp
2981
- *.swo
2982
-
2983
- # Git
2984
- .git
2985
- .gitignore
2986
-
2987
- # Logs
2988
- *.log
2989
- npm-debug.log*
2990
- pnpm-debug.log*
2991
-
2992
- # Test files
2993
- **/*.test.ts
2994
- **/*.spec.ts
2995
- **/__tests__
2996
- coverage
2997
-
2998
- # Documentation
2999
- docs
3000
- *.md
3001
- !README.md
3002
-
3003
- # Environment files (handle secrets separately)
3004
- .env
3005
- .env.*
3006
- !.env.example
3007
-
3008
- # Docker files (don't copy recursively)
3009
- Dockerfile*
3010
- docker-compose*
3011
- .dockerignore
3012
- `;
2620
+ function printDnsRecordsSimple(records, rootDomain) {
2621
+ logger$6.log("\n 📋 Required DNS Records:");
2622
+ logger$6.log(` Add these A records to your DNS provider (${rootDomain}):\n`);
2623
+ for (const record of records) logger$6.log(` ${record.subdomain} → ${record.value} (A record)`);
2624
+ logger$6.log("");
3013
2625
  }
3014
2626
  /**
3015
- * Generate docker-entrypoint.sh for custom startup logic
2627
+ * Prompt for input (reuse from deploy/index.ts pattern)
3016
2628
  */
3017
- function generateDockerEntrypoint() {
3018
- return `#!/bin/sh
3019
- set -e
3020
-
3021
- # Run any custom startup scripts here
3022
- # Example: wait for database
3023
- # until nc -z $DB_HOST $DB_PORT; do
3024
- # echo "Waiting for database..."
3025
- # sleep 1
3026
- # done
3027
-
3028
- # Execute the main command
3029
- exec "$@"
3030
- `;
2629
+ async function promptForToken(message) {
2630
+ const { stdin: stdin$1, stdout: stdout$1 } = await import("node:process");
2631
+ if (!stdin$1.isTTY) throw new Error("Interactive input required for Hostinger token.");
2632
+ stdout$1.write(message);
2633
+ return new Promise((resolve$1) => {
2634
+ let value = "";
2635
+ const onData = (char) => {
2636
+ const c = char.toString();
2637
+ if (c === "\n" || c === "\r") {
2638
+ stdin$1.setRawMode(false);
2639
+ stdin$1.pause();
2640
+ stdin$1.removeListener("data", onData);
2641
+ stdout$1.write("\n");
2642
+ resolve$1(value);
2643
+ } else if (c === "") {
2644
+ stdin$1.setRawMode(false);
2645
+ stdin$1.pause();
2646
+ stdout$1.write("\n");
2647
+ process.exit(1);
2648
+ } else if (c === "" || c === "\b") {
2649
+ if (value.length > 0) value = value.slice(0, -1);
2650
+ } else value += c;
2651
+ };
2652
+ stdin$1.setRawMode(true);
2653
+ stdin$1.resume();
2654
+ stdin$1.on("data", onData);
2655
+ });
3031
2656
  }
3032
2657
  /**
3033
- * Resolve Docker configuration from GkmConfig with defaults
2658
+ * Create DNS records using the configured provider
3034
2659
  */
3035
- function resolveDockerConfig$1(config$1) {
3036
- const docker = config$1.docker ?? {};
3037
- let defaultImageName = "api";
2660
+ async function createDnsRecords(records, dnsConfig) {
2661
+ const { provider, domain: rootDomain, ttl = 300 } = dnsConfig;
2662
+ if (provider === "manual") return records.map((r) => ({
2663
+ ...r,
2664
+ created: false,
2665
+ existed: false
2666
+ }));
2667
+ if (provider === "hostinger") return createHostingerRecords(records, rootDomain, ttl);
2668
+ if (provider === "cloudflare") {
2669
+ logger$6.log(" ⚠ Cloudflare DNS integration not yet implemented");
2670
+ return records.map((r) => ({
2671
+ ...r,
2672
+ error: "Cloudflare not implemented"
2673
+ }));
2674
+ }
2675
+ return records;
2676
+ }
2677
+ /**
2678
+ * Create DNS records at Hostinger
2679
+ */
2680
+ async function createHostingerRecords(records, rootDomain, ttl) {
2681
+ let token = await getHostingerToken();
2682
+ if (!token) {
2683
+ logger$6.log("\n 📋 Hostinger API token not found.");
2684
+ logger$6.log(" Get your token from: https://hpanel.hostinger.com/profile/api\n");
2685
+ try {
2686
+ token = await promptForToken(" Hostinger API Token: ");
2687
+ await storeHostingerToken(token);
2688
+ logger$6.log(" ✓ Token saved");
2689
+ } catch {
2690
+ logger$6.log(" ⚠ Could not get token, skipping DNS creation");
2691
+ return records.map((r) => ({
2692
+ ...r,
2693
+ error: "No API token"
2694
+ }));
2695
+ }
2696
+ }
2697
+ const api = new HostingerApi(token);
2698
+ const results = [];
2699
+ let existingRecords = [];
3038
2700
  try {
3039
- const pkg$1 = __require(`${process.cwd()}/package.json`);
3040
- if (pkg$1.name) defaultImageName = pkg$1.name.replace(/^@[^/]+\//, "");
3041
- } catch {}
2701
+ existingRecords = await api.getRecords(rootDomain);
2702
+ } catch (error) {
2703
+ const message = error instanceof Error ? error.message : "Unknown error";
2704
+ logger$6.log(` ⚠ Failed to fetch existing DNS records: ${message}`);
2705
+ return records.map((r) => ({
2706
+ ...r,
2707
+ error: message
2708
+ }));
2709
+ }
2710
+ for (const record of records) {
2711
+ const existing = existingRecords.find((r) => r.name === record.subdomain && r.type === "A");
2712
+ if (existing) {
2713
+ results.push({
2714
+ ...record,
2715
+ existed: true,
2716
+ created: false
2717
+ });
2718
+ continue;
2719
+ }
2720
+ try {
2721
+ await api.upsertRecords(rootDomain, [{
2722
+ name: record.subdomain,
2723
+ type: "A",
2724
+ ttl,
2725
+ records: [{ content: record.value }]
2726
+ }]);
2727
+ results.push({
2728
+ ...record,
2729
+ created: true,
2730
+ existed: false
2731
+ });
2732
+ } catch (error) {
2733
+ const message = error instanceof Error ? error.message : "Unknown error";
2734
+ results.push({
2735
+ ...record,
2736
+ error: message
2737
+ });
2738
+ }
2739
+ }
2740
+ return results;
2741
+ }
2742
+ /**
2743
+ * Main DNS orchestration function for deployments
2744
+ */
2745
+ async function orchestrateDns(appHostnames, dnsConfig, dokployEndpoint) {
2746
+ if (!dnsConfig) return null;
2747
+ const { domain: rootDomain, autoCreate = true } = dnsConfig;
2748
+ logger$6.log("\n🌐 Setting up DNS records...");
2749
+ let serverIp;
2750
+ try {
2751
+ const endpointUrl = new URL(dokployEndpoint);
2752
+ serverIp = await resolveHostnameToIp(endpointUrl.hostname);
2753
+ logger$6.log(` Server IP: ${serverIp} (from ${endpointUrl.hostname})`);
2754
+ } catch (error) {
2755
+ const message = error instanceof Error ? error.message : "Unknown error";
2756
+ logger$6.log(` ⚠ Failed to resolve server IP: ${message}`);
2757
+ return null;
2758
+ }
2759
+ const requiredRecords = generateRequiredRecords(appHostnames, rootDomain, serverIp);
2760
+ if (requiredRecords.length === 0) {
2761
+ logger$6.log(" No DNS records needed");
2762
+ return {
2763
+ records: [],
2764
+ success: true,
2765
+ serverIp
2766
+ };
2767
+ }
2768
+ let finalRecords;
2769
+ if (autoCreate && dnsConfig.provider !== "manual") {
2770
+ logger$6.log(` Creating DNS records at ${dnsConfig.provider}...`);
2771
+ finalRecords = await createDnsRecords(requiredRecords, dnsConfig);
2772
+ const created = finalRecords.filter((r) => r.created).length;
2773
+ const existed = finalRecords.filter((r) => r.existed).length;
2774
+ const failed = finalRecords.filter((r) => r.error).length;
2775
+ if (created > 0) logger$6.log(` ✓ Created ${created} DNS record(s)`);
2776
+ if (existed > 0) logger$6.log(` ✓ ${existed} record(s) already exist`);
2777
+ if (failed > 0) logger$6.log(` ⚠ ${failed} record(s) failed`);
2778
+ } else finalRecords = requiredRecords;
2779
+ printDnsRecordsTable(finalRecords, rootDomain);
2780
+ const hasFailures = finalRecords.some((r) => r.error);
2781
+ if (dnsConfig.provider === "manual" || hasFailures) printDnsRecordsSimple(finalRecords.filter((r) => !r.created && !r.existed), rootDomain);
3042
2782
  return {
3043
- registry: docker.registry ?? "",
3044
- imageName: docker.imageName ?? defaultImageName,
3045
- baseImage: docker.baseImage ?? "node:22-alpine",
3046
- port: docker.port ?? 3e3,
3047
- compose: docker.compose
2783
+ records: finalRecords,
2784
+ success: !hasFailures,
2785
+ serverIp
3048
2786
  };
3049
2787
  }
3050
2788
  /**
3051
- * Generate a Dockerfile for Next.js frontend apps using standalone output.
3052
- * Uses turbo prune for monorepo optimization.
3053
- * @internal Exported for testing
2789
+ * Verify DNS records resolve correctly after deployment.
2790
+ *
2791
+ * This function:
2792
+ * 1. Checks state for previously verified hostnames (skips if already verified with same IP)
2793
+ * 2. Attempts to resolve each hostname to an IP
2794
+ * 3. Compares resolved IP with expected server IP
2795
+ * 4. Updates state with verification results
2796
+ *
2797
+ * @param appHostnames - Map of app names to hostnames
2798
+ * @param serverIp - Expected IP address the hostnames should resolve to
2799
+ * @param state - Deploy state for caching verification results
2800
+ * @returns Array of verification results
3054
2801
  */
3055
- function generateNextjsDockerfile(options) {
3056
- const { baseImage, port, appPath, turboPackage, packageManager, publicUrlArgs = ["NEXT_PUBLIC_API_URL", "NEXT_PUBLIC_AUTH_URL"] } = options;
3057
- const pm = getPmConfig(packageManager);
3058
- const installPm = pm.install ? `RUN ${pm.install}` : "";
3059
- const turboInstallCmd = getTurboInstallCmd(packageManager);
3060
- const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3061
- const publicUrlArgDeclarations = publicUrlArgs.map((arg) => `ARG ${arg}=""`).join("\n");
3062
- const publicUrlEnvDeclarations = publicUrlArgs.map((arg) => `ENV ${arg}=$${arg}`).join("\n");
3063
- return `# syntax=docker/dockerfile:1
3064
- # Next.js standalone Dockerfile with turbo prune optimization
3065
-
3066
- # Stage 1: Prune monorepo
3067
- FROM ${baseImage} AS pruner
3068
-
3069
- WORKDIR /app
3070
-
3071
- ${installPm}
3072
-
3073
- COPY . .
3074
-
3075
- # Prune to only include necessary packages
3076
- RUN ${turboCmd} prune ${turboPackage} --docker
3077
-
3078
- # Stage 2: Install dependencies
3079
- FROM ${baseImage} AS deps
3080
-
3081
- WORKDIR /app
3082
-
3083
- ${installPm}
3084
-
3085
- # Copy pruned lockfile and package.jsons
3086
- COPY --from=pruner /app/out/${pm.lockfile} ./
3087
- COPY --from=pruner /app/out/json/ ./
3088
-
3089
- # Install dependencies
3090
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3091
- ${turboInstallCmd}
3092
-
3093
- # Stage 3: Build
3094
- FROM deps AS builder
3095
-
3096
- WORKDIR /app
3097
-
3098
- # Build-time args for public API URLs (populated by gkm deploy)
3099
- # These get baked into the Next.js build as public environment variables
3100
- ${publicUrlArgDeclarations}
3101
-
3102
- # Convert ARGs to ENVs for Next.js build
3103
- ${publicUrlEnvDeclarations}
3104
-
3105
- # Copy pruned source
3106
- COPY --from=pruner /app/out/full/ ./
3107
-
3108
- # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3109
- # Using wildcard to make it optional for single-app projects
3110
- COPY --from=pruner /app/tsconfig.* ./
3111
-
3112
- # Ensure public directory exists (may be empty for scaffolded projects)
3113
- RUN mkdir -p ${appPath}/public
3114
-
3115
- # Set Next.js to produce standalone output
3116
- ENV NEXT_TELEMETRY_DISABLED=1
3117
-
3118
- # Build the application
3119
- RUN ${turboCmd} run build --filter=${turboPackage}
3120
-
3121
- # Stage 4: Production
3122
- FROM ${baseImage} AS runner
3123
-
3124
- WORKDIR /app
3125
-
3126
- # Install tini for proper signal handling
3127
- RUN apk add --no-cache tini
3128
-
3129
- # Create non-root user
3130
- RUN addgroup --system --gid 1001 nodejs && \\
3131
- adduser --system --uid 1001 nextjs
3132
-
3133
- # Set environment
3134
- ENV NODE_ENV=production
3135
- ENV NEXT_TELEMETRY_DISABLED=1
3136
- ENV PORT=${port}
3137
- ENV HOSTNAME="0.0.0.0"
3138
-
3139
- # Copy static files and standalone output
3140
- COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/standalone ./
3141
- COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/static ./${appPath}/.next/static
3142
- COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/public ./${appPath}/public
3143
-
3144
- # Health check
3145
- HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \\
3146
- CMD wget -q --spider http://localhost:${port}/ || exit 1
3147
-
3148
- USER nextjs
2802
+ async function verifyDnsRecords(appHostnames, serverIp, state) {
2803
+ const results = [];
2804
+ logger$6.log("\n🔍 Verifying DNS records...");
2805
+ for (const [appName, hostname] of appHostnames) {
2806
+ if (isDnsVerified(state, hostname, serverIp)) {
2807
+ logger$6.log(` ✓ ${hostname} (previously verified)`);
2808
+ results.push({
2809
+ hostname,
2810
+ appName,
2811
+ verified: true,
2812
+ expectedIp: serverIp,
2813
+ skipped: true
2814
+ });
2815
+ continue;
2816
+ }
2817
+ try {
2818
+ const resolvedIp = await resolveHostnameToIp(hostname);
2819
+ if (resolvedIp === serverIp) {
2820
+ setDnsVerification(state, hostname, serverIp);
2821
+ logger$6.log(` ✓ ${hostname} → ${resolvedIp}`);
2822
+ results.push({
2823
+ hostname,
2824
+ appName,
2825
+ verified: true,
2826
+ resolvedIp,
2827
+ expectedIp: serverIp
2828
+ });
2829
+ } else {
2830
+ logger$6.log(` ⚠ ${hostname} resolves to ${resolvedIp}, expected ${serverIp}`);
2831
+ results.push({
2832
+ hostname,
2833
+ appName,
2834
+ verified: false,
2835
+ resolvedIp,
2836
+ expectedIp: serverIp
2837
+ });
2838
+ }
2839
+ } catch (error) {
2840
+ const message = error instanceof Error ? error.message : "Unknown error";
2841
+ logger$6.log(` ⚠ ${hostname} DNS not propagated (${message})`);
2842
+ results.push({
2843
+ hostname,
2844
+ appName,
2845
+ verified: false,
2846
+ expectedIp: serverIp,
2847
+ error: message
2848
+ });
2849
+ }
2850
+ }
2851
+ const verified = results.filter((r) => r.verified).length;
2852
+ const skipped = results.filter((r) => r.skipped).length;
2853
+ const pending = results.filter((r) => !r.verified).length;
2854
+ if (pending > 0) {
2855
+ logger$6.log(`\n ${verified} verified, ${pending} pending propagation`);
2856
+ logger$6.log(" DNS changes may take 5-30 minutes to propagate");
2857
+ } else if (skipped > 0) logger$6.log(` ${verified} verified (${skipped} from cache)`);
2858
+ return results;
2859
+ }
3149
2860
 
3150
- EXPOSE ${port}
2861
+ //#endregion
2862
+ //#region src/docker/compose.ts
2863
+ /** Default Docker images for services */
2864
+ const DEFAULT_SERVICE_IMAGES = {
2865
+ postgres: "postgres",
2866
+ redis: "redis",
2867
+ rabbitmq: "rabbitmq"
2868
+ };
2869
+ /** Default Docker image versions for services */
2870
+ const DEFAULT_SERVICE_VERSIONS = {
2871
+ postgres: "16-alpine",
2872
+ redis: "7-alpine",
2873
+ rabbitmq: "3-management-alpine"
2874
+ };
2875
+ /** Get the default full image reference for a service */
2876
+ function getDefaultImage(serviceName) {
2877
+ return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
2878
+ }
2879
+ /** Normalize services config to a consistent format - returns Map of service name to full image reference */
2880
+ function normalizeServices(services) {
2881
+ const result = /* @__PURE__ */ new Map();
2882
+ if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
2883
+ else for (const [name$1, config$1] of Object.entries(services)) {
2884
+ const serviceName = name$1;
2885
+ if (config$1 === true) result.set(serviceName, getDefaultImage(serviceName));
2886
+ else if (config$1 && typeof config$1 === "object") {
2887
+ const serviceConfig = config$1;
2888
+ if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
2889
+ else {
2890
+ const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
2891
+ result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
2892
+ }
2893
+ }
2894
+ }
2895
+ return result;
2896
+ }
2897
+ /**
2898
+ * Generate docker-compose.yml for production deployment
2899
+ */
2900
+ function generateDockerCompose(options) {
2901
+ const { imageName, registry, port, healthCheckPath, services } = options;
2902
+ const serviceMap = normalizeServices(services);
2903
+ const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
2904
+ let yaml = `version: '3.8'
3151
2905
 
3152
- ENTRYPOINT ["/sbin/tini", "--"]
3153
- CMD ["node", "${appPath}/server.js"]
2906
+ services:
2907
+ api:
2908
+ build:
2909
+ context: ../..
2910
+ dockerfile: .gkm/docker/Dockerfile
2911
+ image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
2912
+ container_name: ${imageName}
2913
+ restart: unless-stopped
2914
+ ports:
2915
+ - "\${PORT:-${port}}:${port}"
2916
+ environment:
2917
+ - NODE_ENV=production
2918
+ `;
2919
+ if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
2920
+ `;
2921
+ if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
2922
+ `;
2923
+ if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
2924
+ `;
2925
+ yaml += ` healthcheck:
2926
+ test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
2927
+ interval: 30s
2928
+ timeout: 3s
2929
+ retries: 3
2930
+ `;
2931
+ if (serviceMap.size > 0) {
2932
+ yaml += ` depends_on:
2933
+ `;
2934
+ for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
2935
+ condition: service_healthy
2936
+ `;
2937
+ }
2938
+ yaml += ` networks:
2939
+ - app-network
2940
+ `;
2941
+ const postgresImage = serviceMap.get("postgres");
2942
+ if (postgresImage) yaml += `
2943
+ postgres:
2944
+ image: ${postgresImage}
2945
+ container_name: postgres
2946
+ restart: unless-stopped
2947
+ environment:
2948
+ POSTGRES_USER: \${POSTGRES_USER:-postgres}
2949
+ POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
2950
+ POSTGRES_DB: \${POSTGRES_DB:-app}
2951
+ volumes:
2952
+ - postgres_data:/var/lib/postgresql/data
2953
+ healthcheck:
2954
+ test: ["CMD-SHELL", "pg_isready -U postgres"]
2955
+ interval: 5s
2956
+ timeout: 5s
2957
+ retries: 5
2958
+ networks:
2959
+ - app-network
2960
+ `;
2961
+ const redisImage = serviceMap.get("redis");
2962
+ if (redisImage) yaml += `
2963
+ redis:
2964
+ image: ${redisImage}
2965
+ container_name: redis
2966
+ restart: unless-stopped
2967
+ volumes:
2968
+ - redis_data:/data
2969
+ healthcheck:
2970
+ test: ["CMD", "redis-cli", "ping"]
2971
+ interval: 5s
2972
+ timeout: 5s
2973
+ retries: 5
2974
+ networks:
2975
+ - app-network
2976
+ `;
2977
+ const rabbitmqImage = serviceMap.get("rabbitmq");
2978
+ if (rabbitmqImage) yaml += `
2979
+ rabbitmq:
2980
+ image: ${rabbitmqImage}
2981
+ container_name: rabbitmq
2982
+ restart: unless-stopped
2983
+ environment:
2984
+ RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
2985
+ RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
2986
+ ports:
2987
+ - "15672:15672" # Management UI
2988
+ volumes:
2989
+ - rabbitmq_data:/var/lib/rabbitmq
2990
+ healthcheck:
2991
+ test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
2992
+ interval: 10s
2993
+ timeout: 5s
2994
+ retries: 5
2995
+ networks:
2996
+ - app-network
2997
+ `;
2998
+ yaml += `
2999
+ volumes:
3000
+ `;
3001
+ if (serviceMap.has("postgres")) yaml += ` postgres_data:
3002
+ `;
3003
+ if (serviceMap.has("redis")) yaml += ` redis_data:
3004
+ `;
3005
+ if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
3006
+ `;
3007
+ yaml += `
3008
+ networks:
3009
+ app-network:
3010
+ driver: bridge
3154
3011
  `;
3012
+ return yaml;
3155
3013
  }
3156
3014
  /**
3157
- * Generate a Dockerfile for backend apps in a workspace.
3158
- * Uses turbo prune for monorepo optimization.
3159
- * @internal Exported for testing
3015
+ * Generate a minimal docker-compose.yml for API only
3160
3016
  */
3161
- function generateBackendDockerfile(options) {
3162
- const { baseImage, port, appPath, turboPackage, packageManager, healthCheckPath = "/health" } = options;
3163
- const pm = getPmConfig(packageManager);
3164
- const installPm = pm.install ? `RUN ${pm.install}` : "";
3165
- const turboInstallCmd = getTurboInstallCmd(packageManager);
3166
- const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3167
- return `# syntax=docker/dockerfile:1
3168
- # Backend Dockerfile with turbo prune optimization
3169
-
3170
- # Stage 1: Prune monorepo
3171
- FROM ${baseImage} AS pruner
3172
-
3173
- WORKDIR /app
3174
-
3175
- ${installPm}
3176
-
3177
- COPY . .
3178
-
3179
- # Prune to only include necessary packages
3180
- RUN ${turboCmd} prune ${turboPackage} --docker
3181
-
3182
- # Stage 2: Install dependencies
3183
- FROM ${baseImage} AS deps
3184
-
3185
- WORKDIR /app
3186
-
3187
- ${installPm}
3188
-
3189
- # Copy pruned lockfile and package.jsons
3190
- COPY --from=pruner /app/out/${pm.lockfile} ./
3191
- COPY --from=pruner /app/out/json/ ./
3192
-
3193
- # Install dependencies
3194
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3195
- ${turboInstallCmd}
3196
-
3197
- # Stage 3: Build
3198
- FROM deps AS builder
3199
-
3200
- WORKDIR /app
3201
-
3202
- # Build-time args for encrypted secrets
3203
- ARG GKM_ENCRYPTED_CREDENTIALS=""
3204
- ARG GKM_CREDENTIALS_IV=""
3205
-
3206
- # Copy pruned source
3207
- COPY --from=pruner /app/out/full/ ./
3208
-
3209
- # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3210
- # Using wildcard to make it optional for single-app projects
3211
- COPY --from=pruner /app/gkm.config.* ./
3212
- COPY --from=pruner /app/tsconfig.* ./
3213
-
3214
- # Write encrypted credentials for gkm build to embed
3215
- RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
3216
- mkdir -p ${appPath}/.gkm && \
3217
- echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
3218
- echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
3219
- fi
3220
-
3221
- # Build production server using gkm
3222
- RUN cd ${appPath} && ./node_modules/.bin/gkm build --provider server --production
3223
-
3224
- # Stage 4: Production
3225
- FROM ${baseImage} AS runner
3226
-
3227
- WORKDIR /app
3228
-
3229
- RUN apk add --no-cache tini
3230
-
3231
- RUN addgroup --system --gid 1001 nodejs && \\
3232
- adduser --system --uid 1001 hono
3233
-
3234
- # Copy bundled server
3235
- COPY --from=builder --chown=hono:nodejs /app/${appPath}/.gkm/server/dist/server.mjs ./
3236
-
3237
- ENV NODE_ENV=production
3238
- ENV PORT=${port}
3239
-
3240
- HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
3241
- CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
3242
-
3243
- USER hono
3017
+ function generateMinimalDockerCompose(options) {
3018
+ const { imageName, registry, port, healthCheckPath } = options;
3019
+ const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
3020
+ return `version: '3.8'
3244
3021
 
3245
- EXPOSE ${port}
3022
+ services:
3023
+ api:
3024
+ build:
3025
+ context: ../..
3026
+ dockerfile: .gkm/docker/Dockerfile
3027
+ image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
3028
+ container_name: ${imageName}
3029
+ restart: unless-stopped
3030
+ ports:
3031
+ - "\${PORT:-${port}}:${port}"
3032
+ environment:
3033
+ - NODE_ENV=production
3034
+ healthcheck:
3035
+ test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
3036
+ interval: 30s
3037
+ timeout: 3s
3038
+ retries: 3
3039
+ networks:
3040
+ - app-network
3246
3041
 
3247
- ENTRYPOINT ["/sbin/tini", "--"]
3248
- CMD ["node", "server.mjs"]
3042
+ networks:
3043
+ app-network:
3044
+ driver: bridge
3249
3045
  `;
3250
3046
  }
3251
3047
  /**
3252
- * Generate a Dockerfile for apps with a custom entry point.
3253
- * Uses esbuild to bundle the entry point into dist/index.mjs with all dependencies.
3254
- * This is used for apps that don't use gkm routes (e.g., Better Auth servers).
3048
+ * Generate docker-compose.yml for a workspace with all apps as services.
3049
+ * Apps can communicate with each other via service names.
3255
3050
  * @internal Exported for testing
3256
3051
  */
3257
- function generateEntryDockerfile(options) {
3258
- const { baseImage, port, appPath, entry, turboPackage, packageManager, healthCheckPath = "/health" } = options;
3259
- const pm = getPmConfig(packageManager);
3260
- const installPm = pm.install ? `RUN ${pm.install}` : "";
3261
- const turboInstallCmd = getTurboInstallCmd(packageManager);
3262
- const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3263
- return `# syntax=docker/dockerfile:1
3264
- # Entry-based Dockerfile with turbo prune + tsdown bundling
3265
-
3266
- # Stage 1: Prune monorepo
3267
- FROM ${baseImage} AS pruner
3268
-
3269
- WORKDIR /app
3270
-
3271
- ${installPm}
3272
-
3273
- COPY . .
3274
-
3275
- # Prune to only include necessary packages
3276
- RUN ${turboCmd} prune ${turboPackage} --docker
3277
-
3278
- # Stage 2: Install dependencies
3279
- FROM ${baseImage} AS deps
3280
-
3281
- WORKDIR /app
3282
-
3283
- ${installPm}
3284
-
3285
- # Copy pruned lockfile and package.jsons
3286
- COPY --from=pruner /app/out/${pm.lockfile} ./
3287
- COPY --from=pruner /app/out/json/ ./
3288
-
3289
- # Install dependencies
3290
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3291
- ${turboInstallCmd}
3292
-
3293
- # Stage 3: Build with tsdown
3294
- FROM deps AS builder
3295
-
3296
- WORKDIR /app
3297
-
3298
- # Build-time args for encrypted secrets
3299
- ARG GKM_ENCRYPTED_CREDENTIALS=""
3300
- ARG GKM_CREDENTIALS_IV=""
3301
-
3302
- # Copy pruned source
3303
- COPY --from=pruner /app/out/full/ ./
3304
-
3305
- # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3306
- # Using wildcard to make it optional for single-app projects
3307
- COPY --from=pruner /app/tsconfig.* ./
3308
-
3309
- # Write encrypted credentials for tsdown to embed via define
3310
- RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
3311
- mkdir -p ${appPath}/.gkm && \
3312
- echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
3313
- echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
3314
- fi
3315
-
3316
- # Bundle entry point with esbuild (outputs to dist/index.mjs)
3317
- # Creates a fully standalone bundle with all dependencies included
3318
- # Use define to embed credentials if present
3319
- RUN cd ${appPath} && \
3320
- if [ -f .gkm/credentials.enc ]; then \
3321
- CREDS=$(cat .gkm/credentials.enc) && \
3322
- IV=$(cat .gkm/credentials.iv) && \
3323
- npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
3324
- --outfile=dist/index.mjs --packages=bundle \
3325
- --banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);' \
3326
- --define:__GKM_ENCRYPTED_CREDENTIALS__="'\\"$CREDS\\"'" \
3327
- --define:__GKM_CREDENTIALS_IV__="'\\"$IV\\"'"; \
3328
- else \
3329
- npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
3330
- --outfile=dist/index.mjs --packages=bundle \
3331
- --banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);'; \
3332
- fi
3333
-
3334
- # Stage 4: Production
3335
- FROM ${baseImage} AS runner
3336
-
3337
- WORKDIR /app
3338
-
3339
- RUN apk add --no-cache tini
3340
-
3341
- RUN addgroup --system --gid 1001 nodejs && \\
3342
- adduser --system --uid 1001 app
3343
-
3344
- # Copy bundled output only (no node_modules needed - fully bundled)
3345
- COPY --from=builder --chown=app:nodejs /app/${appPath}/dist/index.mjs ./
3346
-
3347
- ENV NODE_ENV=production
3348
- ENV PORT=${port}
3349
-
3350
- HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
3351
- CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
3352
-
3353
- USER app
3354
-
3355
- EXPOSE ${port}
3052
+ function generateWorkspaceCompose(workspace, options = {}) {
3053
+ const { registry } = options;
3054
+ const apps = Object.entries(workspace.apps);
3055
+ const services = workspace.services;
3056
+ const hasPostgres = services.db !== void 0 && services.db !== false;
3057
+ const hasRedis = services.cache !== void 0 && services.cache !== false;
3058
+ const hasMail = services.mail !== void 0 && services.mail !== false;
3059
+ const postgresImage = getInfraServiceImage("postgres", services.db);
3060
+ const redisImage = getInfraServiceImage("redis", services.cache);
3061
+ let yaml = `# Docker Compose for ${workspace.name} workspace
3062
+ # Generated by gkm - do not edit manually
3356
3063
 
3357
- ENTRYPOINT ["/sbin/tini", "--"]
3358
- CMD ["node", "index.mjs"]
3064
+ services:
3065
+ `;
3066
+ for (const [appName, app] of apps) yaml += generateAppService(appName, app, apps, {
3067
+ registry,
3068
+ hasPostgres,
3069
+ hasRedis
3070
+ });
3071
+ if (hasPostgres) yaml += `
3072
+ postgres:
3073
+ image: ${postgresImage}
3074
+ container_name: ${workspace.name}-postgres
3075
+ restart: unless-stopped
3076
+ environment:
3077
+ POSTGRES_USER: \${POSTGRES_USER:-postgres}
3078
+ POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
3079
+ POSTGRES_DB: \${POSTGRES_DB:-app}
3080
+ volumes:
3081
+ - postgres_data:/var/lib/postgresql/data
3082
+ healthcheck:
3083
+ test: ["CMD-SHELL", "pg_isready -U postgres"]
3084
+ interval: 5s
3085
+ timeout: 5s
3086
+ retries: 5
3087
+ networks:
3088
+ - workspace-network
3089
+ `;
3090
+ if (hasRedis) yaml += `
3091
+ redis:
3092
+ image: ${redisImage}
3093
+ container_name: ${workspace.name}-redis
3094
+ restart: unless-stopped
3095
+ volumes:
3096
+ - redis_data:/data
3097
+ healthcheck:
3098
+ test: ["CMD", "redis-cli", "ping"]
3099
+ interval: 5s
3100
+ timeout: 5s
3101
+ retries: 5
3102
+ networks:
3103
+ - workspace-network
3104
+ `;
3105
+ if (hasMail) yaml += `
3106
+ mailpit:
3107
+ image: axllent/mailpit:latest
3108
+ container_name: ${workspace.name}-mailpit
3109
+ restart: unless-stopped
3110
+ ports:
3111
+ - "8025:8025" # Web UI
3112
+ - "1025:1025" # SMTP
3113
+ networks:
3114
+ - workspace-network
3115
+ `;
3116
+ yaml += `
3117
+ volumes:
3118
+ `;
3119
+ if (hasPostgres) yaml += ` postgres_data:
3120
+ `;
3121
+ if (hasRedis) yaml += ` redis_data:
3122
+ `;
3123
+ yaml += `
3124
+ networks:
3125
+ workspace-network:
3126
+ driver: bridge
3127
+ `;
3128
+ return yaml;
3129
+ }
3130
+ /**
3131
+ * Get infrastructure service image with version.
3132
+ */
3133
+ function getInfraServiceImage(serviceName, config$1) {
3134
+ const defaults = {
3135
+ postgres: "postgres:16-alpine",
3136
+ redis: "redis:7-alpine"
3137
+ };
3138
+ if (!config$1 || config$1 === true) return defaults[serviceName];
3139
+ if (typeof config$1 === "object") {
3140
+ if (config$1.image) return config$1.image;
3141
+ if (config$1.version) {
3142
+ const baseImage = serviceName === "postgres" ? "postgres" : "redis";
3143
+ return `${baseImage}:${config$1.version}`;
3144
+ }
3145
+ }
3146
+ return defaults[serviceName];
3147
+ }
3148
+ /**
3149
+ * Generate a service definition for an app.
3150
+ */
3151
+ function generateAppService(appName, app, allApps, options) {
3152
+ const { registry, hasPostgres, hasRedis } = options;
3153
+ const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
3154
+ const healthCheckPath = app.type === "frontend" ? "/" : "/health";
3155
+ const healthCheckCmd = app.type === "frontend" ? `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}/"]` : `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}${healthCheckPath}"]`;
3156
+ let yaml = `
3157
+ ${appName}:
3158
+ build:
3159
+ context: .
3160
+ dockerfile: .gkm/docker/Dockerfile.${appName}
3161
+ image: ${imageRef}\${${appName.toUpperCase()}_IMAGE:-${appName}}:\${TAG:-latest}
3162
+ container_name: ${appName}
3163
+ restart: unless-stopped
3164
+ ports:
3165
+ - "\${${appName.toUpperCase()}_PORT:-${app.port}}:${app.port}"
3166
+ environment:
3167
+ - NODE_ENV=production
3168
+ - PORT=${app.port}
3169
+ `;
3170
+ for (const dep of app.dependencies) {
3171
+ const depApp = allApps.find(([name$1]) => name$1 === dep)?.[1];
3172
+ if (depApp) yaml += ` - ${dep.toUpperCase()}_URL=http://${dep}:${depApp.port}
3173
+ `;
3174
+ }
3175
+ if (app.type === "backend") {
3176
+ if (hasPostgres) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
3177
+ `;
3178
+ if (hasRedis) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
3179
+ `;
3180
+ }
3181
+ yaml += ` healthcheck:
3182
+ test: ${healthCheckCmd}
3183
+ interval: 30s
3184
+ timeout: 3s
3185
+ retries: 3
3186
+ `;
3187
+ const dependencies$1 = [...app.dependencies];
3188
+ if (app.type === "backend") {
3189
+ if (hasPostgres) dependencies$1.push("postgres");
3190
+ if (hasRedis) dependencies$1.push("redis");
3191
+ }
3192
+ if (dependencies$1.length > 0) {
3193
+ yaml += ` depends_on:
3194
+ `;
3195
+ for (const dep of dependencies$1) yaml += ` ${dep}:
3196
+ condition: service_healthy
3197
+ `;
3198
+ }
3199
+ yaml += ` networks:
3200
+ - workspace-network
3359
3201
  `;
3202
+ return yaml;
3360
3203
  }
3361
3204
 
3362
3205
  //#endregion
3363
- //#region src/docker/index.ts
3364
- const logger$6 = console;
3206
+ //#region src/docker/templates.ts
3207
+ const LOCKFILES = [
3208
+ ["pnpm-lock.yaml", "pnpm"],
3209
+ ["bun.lockb", "bun"],
3210
+ ["yarn.lock", "yarn"],
3211
+ ["package-lock.json", "npm"]
3212
+ ];
3365
3213
  /**
3366
- * Docker command implementation
3367
- * Generates Dockerfile, docker-compose.yml, and related files
3368
- *
3369
- * Default: Multi-stage Dockerfile that builds from source inside Docker
3370
- * --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
3214
+ * Detect package manager from lockfiles
3215
+ * Walks up the directory tree to find lockfile (for monorepos)
3371
3216
  */
3372
- async function dockerCommand(options) {
3373
- const loadedConfig = await loadWorkspaceConfig();
3374
- if (loadedConfig.type === "workspace") {
3375
- logger$6.log("📦 Detected workspace configuration");
3376
- return workspaceDockerCommand(loadedConfig.workspace, options);
3377
- }
3378
- const config$1 = await loadConfig();
3379
- const dockerConfig = resolveDockerConfig$1(config$1);
3380
- const serverConfig = typeof config$1.providers?.server === "object" ? config$1.providers.server : void 0;
3381
- const healthCheckPath = serverConfig?.production?.healthCheck ?? "/health";
3382
- const useSlim = options.slim === true;
3383
- if (useSlim) {
3384
- const distDir = join(process.cwd(), ".gkm", "server", "dist");
3385
- const hasBuild = existsSync(join(distDir, "server.mjs"));
3386
- if (!hasBuild) throw new Error("Slim Dockerfile requires a pre-built bundle. Run `gkm build --provider server --production` first, or omit --slim to use multi-stage build.");
3217
+ function detectPackageManager$1(cwd = process.cwd()) {
3218
+ let dir = cwd;
3219
+ const root = parse(dir).root;
3220
+ while (dir !== root) {
3221
+ for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(dir, lockfile))) return pm;
3222
+ dir = dirname(dir);
3387
3223
  }
3388
- const dockerDir = join(process.cwd(), ".gkm", "docker");
3389
- await mkdir(dockerDir, { recursive: true });
3390
- const packageManager = detectPackageManager$1();
3391
- const inMonorepo = isMonorepo();
3392
- const hasTurbo = hasTurboConfig();
3393
- let useTurbo = options.turbo ?? false;
3394
- if (inMonorepo && !useSlim) if (hasTurbo) {
3395
- useTurbo = true;
3396
- logger$6.log(" Detected monorepo with turbo.json - using turbo prune");
3397
- } else throw new Error("Monorepo detected but turbo.json not found.\n\nDocker builds in monorepos require Turborepo for proper dependency isolation.\n\nTo fix this:\n 1. Install turbo: pnpm add -Dw turbo\n 2. Create turbo.json in your monorepo root\n 3. Run this command again\n\nSee: https://turbo.build/repo/docs/guides/tools/docker");
3398
- let turboPackage = options.turboPackage ?? dockerConfig.imageName;
3399
- if (useTurbo && !options.turboPackage) try {
3400
- const pkg$1 = __require(`${process.cwd()}/package.json`);
3401
- if (pkg$1.name) {
3402
- turboPackage = pkg$1.name;
3403
- logger$6.log(` Turbo package: ${turboPackage}`);
3224
+ for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(root, lockfile))) return pm;
3225
+ return "pnpm";
3226
+ }
3227
+ /**
3228
+ * Find the lockfile path by walking up the directory tree
3229
+ * Returns the full path to the lockfile, or null if not found
3230
+ */
3231
+ function findLockfilePath(cwd = process.cwd()) {
3232
+ let dir = cwd;
3233
+ const root = parse(dir).root;
3234
+ while (dir !== root) {
3235
+ for (const [lockfile] of LOCKFILES) {
3236
+ const lockfilePath = join(dir, lockfile);
3237
+ if (existsSync(lockfilePath)) return lockfilePath;
3404
3238
  }
3405
- } catch {}
3406
- const templateOptions = {
3407
- imageName: dockerConfig.imageName,
3408
- baseImage: dockerConfig.baseImage,
3409
- port: dockerConfig.port,
3410
- healthCheckPath,
3411
- prebuilt: useSlim,
3412
- turbo: useTurbo,
3413
- turboPackage,
3414
- packageManager
3415
- };
3416
- const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
3417
- const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
3418
- const dockerfilePath = join(dockerDir, "Dockerfile");
3419
- await writeFile(dockerfilePath, dockerfile);
3420
- logger$6.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
3421
- const composeOptions = {
3422
- imageName: dockerConfig.imageName,
3423
- registry: options.registry ?? dockerConfig.registry,
3424
- port: dockerConfig.port,
3425
- healthCheckPath,
3426
- services: dockerConfig.compose?.services ?? {}
3427
- };
3428
- const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
3429
- const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
3430
- const composePath = join(dockerDir, "docker-compose.yml");
3431
- await writeFile(composePath, dockerCompose);
3432
- logger$6.log("Generated: .gkm/docker/docker-compose.yml");
3433
- const dockerignore = generateDockerignore();
3434
- const dockerignorePath = join(process.cwd(), ".dockerignore");
3435
- await writeFile(dockerignorePath, dockerignore);
3436
- logger$6.log("Generated: .dockerignore (project root)");
3437
- const entrypoint = generateDockerEntrypoint();
3438
- const entrypointPath = join(dockerDir, "docker-entrypoint.sh");
3439
- await writeFile(entrypointPath, entrypoint);
3440
- logger$6.log("Generated: .gkm/docker/docker-entrypoint.sh");
3441
- const result = {
3442
- dockerfile: dockerfilePath,
3443
- dockerCompose: composePath,
3444
- dockerignore: dockerignorePath,
3445
- entrypoint: entrypointPath
3446
- };
3447
- if (options.build) await buildDockerImage(dockerConfig.imageName, options);
3448
- if (options.push) await pushDockerImage(dockerConfig.imageName, options);
3449
- return result;
3239
+ dir = dirname(dir);
3240
+ }
3241
+ for (const [lockfile] of LOCKFILES) {
3242
+ const lockfilePath = join(root, lockfile);
3243
+ if (existsSync(lockfilePath)) return lockfilePath;
3244
+ }
3245
+ return null;
3450
3246
  }
3451
3247
  /**
3452
- * Ensure lockfile exists in the build context
3453
- * For monorepos, copies from workspace root if needed
3454
- * Returns cleanup function if file was copied
3248
+ * Check if we're in a monorepo (lockfile is in a parent directory)
3455
3249
  */
3456
- function ensureLockfile(cwd) {
3250
+ function isMonorepo(cwd = process.cwd()) {
3457
3251
  const lockfilePath = findLockfilePath(cwd);
3458
- if (!lockfilePath) {
3459
- logger$6.warn("\n⚠️ No lockfile found. Docker build may fail or use stale dependencies.");
3460
- return null;
3461
- }
3462
- const lockfileName = basename(lockfilePath);
3463
- const localLockfile = join(cwd, lockfileName);
3464
- if (lockfilePath === localLockfile) return null;
3465
- logger$6.log(` Copying ${lockfileName} from monorepo root...`);
3466
- copyFileSync(lockfilePath, localLockfile);
3467
- return () => {
3468
- try {
3469
- unlinkSync(localLockfile);
3470
- } catch {}
3471
- };
3252
+ if (!lockfilePath) return false;
3253
+ const lockfileDir = dirname(lockfilePath);
3254
+ return lockfileDir !== cwd;
3472
3255
  }
3473
3256
  /**
3474
- * Build Docker image
3475
- * Uses BuildKit for cache mount support
3257
+ * Check if turbo.json exists (walks up directory tree)
3476
3258
  */
3477
- async function buildDockerImage(imageName, options) {
3478
- const tag = options.tag ?? "latest";
3479
- const registry = options.registry;
3480
- const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
3481
- logger$6.log(`\n🐳 Building Docker image: ${fullImageName}`);
3482
- const cwd = process.cwd();
3483
- const cleanup = ensureLockfile(cwd);
3484
- try {
3485
- execSync(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
3486
- cwd,
3487
- stdio: "inherit",
3488
- env: {
3489
- ...process.env,
3490
- DOCKER_BUILDKIT: "1"
3491
- }
3492
- });
3493
- logger$6.log(`✅ Docker image built: ${fullImageName}`);
3494
- } catch (error) {
3495
- throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
3496
- } finally {
3497
- cleanup?.();
3259
+ function hasTurboConfig(cwd = process.cwd()) {
3260
+ let dir = cwd;
3261
+ const root = parse(dir).root;
3262
+ while (dir !== root) {
3263
+ if (existsSync(join(dir, "turbo.json"))) return true;
3264
+ dir = dirname(dir);
3498
3265
  }
3266
+ return existsSync(join(root, "turbo.json"));
3499
3267
  }
3500
3268
  /**
3501
- * Push Docker image to registry
3269
+ * Get install command for turbo builds (without frozen lockfile)
3270
+ * Turbo prune creates a subset that may not perfectly match the lockfile
3502
3271
  */
3503
- async function pushDockerImage(imageName, options) {
3504
- const tag = options.tag ?? "latest";
3505
- const registry = options.registry;
3506
- if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
3507
- const fullImageName = `${registry}/${imageName}:${tag}`;
3508
- logger$6.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
3509
- try {
3510
- execSync(`docker push ${fullImageName}`, {
3511
- cwd: process.cwd(),
3512
- stdio: "inherit"
3513
- });
3514
- logger$6.log(`✅ Docker image pushed: ${fullImageName}`);
3515
- } catch (error) {
3516
- throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
3517
- }
3272
+ function getTurboInstallCmd(pm) {
3273
+ const commands = {
3274
+ pnpm: "pnpm install",
3275
+ npm: "npm install",
3276
+ yarn: "yarn install",
3277
+ bun: "bun install"
3278
+ };
3279
+ return commands[pm];
3518
3280
  }
3519
3281
  /**
3520
- * Get the package name from package.json in an app directory.
3282
+ * Get package manager specific commands and paths
3521
3283
  */
3522
- function getAppPackageName(appPath) {
3523
- try {
3524
- const pkgPath = join(appPath, "package.json");
3525
- if (!existsSync(pkgPath)) return void 0;
3526
- const content = readFileSync(pkgPath, "utf-8");
3527
- const pkg$1 = JSON.parse(content);
3528
- return pkg$1.name;
3529
- } catch {
3530
- return void 0;
3531
- }
3284
+ function getPmConfig(pm) {
3285
+ const configs = {
3286
+ pnpm: {
3287
+ install: "corepack enable && corepack prepare pnpm@latest --activate",
3288
+ lockfile: "pnpm-lock.yaml",
3289
+ fetch: "pnpm fetch",
3290
+ installCmd: "pnpm install --frozen-lockfile --offline",
3291
+ cacheTarget: "/root/.local/share/pnpm/store",
3292
+ cacheId: "pnpm",
3293
+ run: "pnpm",
3294
+ exec: "pnpm exec",
3295
+ dlx: "pnpm dlx",
3296
+ addGlobal: "pnpm add -g"
3297
+ },
3298
+ npm: {
3299
+ install: "",
3300
+ lockfile: "package-lock.json",
3301
+ fetch: "",
3302
+ installCmd: "npm ci",
3303
+ cacheTarget: "/root/.npm",
3304
+ cacheId: "npm",
3305
+ run: "npm run",
3306
+ exec: "npx",
3307
+ dlx: "npx",
3308
+ addGlobal: "npm install -g"
3309
+ },
3310
+ yarn: {
3311
+ install: "corepack enable && corepack prepare yarn@stable --activate",
3312
+ lockfile: "yarn.lock",
3313
+ fetch: "",
3314
+ installCmd: "yarn install --frozen-lockfile",
3315
+ cacheTarget: "/root/.yarn/cache",
3316
+ cacheId: "yarn",
3317
+ run: "yarn",
3318
+ exec: "yarn exec",
3319
+ dlx: "yarn dlx",
3320
+ addGlobal: "yarn global add"
3321
+ },
3322
+ bun: {
3323
+ install: "npm install -g bun",
3324
+ lockfile: "bun.lockb",
3325
+ fetch: "",
3326
+ installCmd: "bun install --frozen-lockfile",
3327
+ cacheTarget: "/root/.bun/install/cache",
3328
+ cacheId: "bun",
3329
+ run: "bun run",
3330
+ exec: "bunx",
3331
+ dlx: "bunx",
3332
+ addGlobal: "bun add -g"
3333
+ }
3334
+ };
3335
+ return configs[pm];
3532
3336
  }
3533
3337
  /**
3534
- * Generate Dockerfiles for all apps in a workspace.
3535
- * @internal Exported for testing
3338
+ * Generate a multi-stage Dockerfile for building from source
3339
+ * Optimized for build speed with:
3340
+ * - BuildKit cache mounts for package manager store
3341
+ * - pnpm fetch for better layer caching (when using pnpm)
3342
+ * - Optional turbo prune for monorepos
3536
3343
  */
3537
- async function workspaceDockerCommand(workspace, options) {
3538
- const results = [];
3539
- const apps = Object.entries(workspace.apps);
3540
- logger$6.log(`\n🐳 Generating Dockerfiles for workspace: ${workspace.name}`);
3541
- const dockerDir = join(workspace.root, ".gkm", "docker");
3542
- await mkdir(dockerDir, { recursive: true });
3543
- const packageManager = detectPackageManager$1(workspace.root);
3544
- logger$6.log(` Package manager: ${packageManager}`);
3545
- for (const [appName, app] of apps) {
3546
- const appPath = app.path;
3547
- const fullAppPath = join(workspace.root, appPath);
3548
- const turboPackage = getAppPackageName(fullAppPath) ?? appName;
3549
- const imageName = appName;
3550
- const hasEntry = !!app.entry;
3551
- const buildType = hasEntry ? "entry" : app.type;
3552
- logger$6.log(`\n 📄 Generating Dockerfile for ${appName} (${buildType})`);
3553
- let dockerfile;
3554
- if (app.type === "frontend") dockerfile = generateNextjsDockerfile({
3555
- imageName,
3556
- baseImage: "node:22-alpine",
3557
- port: app.port,
3558
- appPath,
3559
- turboPackage,
3560
- packageManager
3561
- });
3562
- else if (app.entry) dockerfile = generateEntryDockerfile({
3563
- imageName,
3564
- baseImage: "node:22-alpine",
3565
- port: app.port,
3566
- appPath,
3567
- entry: app.entry,
3568
- turboPackage,
3569
- packageManager,
3570
- healthCheckPath: "/health"
3571
- });
3572
- else dockerfile = generateBackendDockerfile({
3573
- imageName,
3574
- baseImage: "node:22-alpine",
3575
- port: app.port,
3576
- appPath,
3577
- turboPackage,
3578
- packageManager,
3579
- healthCheckPath: "/health"
3580
- });
3581
- const dockerfilePath = join(dockerDir, `Dockerfile.${appName}`);
3582
- await writeFile(dockerfilePath, dockerfile);
3583
- logger$6.log(` Generated: .gkm/docker/Dockerfile.${appName}`);
3584
- results.push({
3585
- appName,
3586
- type: app.type,
3587
- dockerfile: dockerfilePath,
3588
- imageName
3589
- });
3590
- }
3591
- const dockerignore = generateDockerignore();
3592
- const dockerignorePath = join(workspace.root, ".dockerignore");
3593
- await writeFile(dockerignorePath, dockerignore);
3594
- logger$6.log(`\n Generated: .dockerignore (workspace root)`);
3595
- const dockerCompose = generateWorkspaceCompose(workspace, { registry: options.registry });
3596
- const composePath = join(dockerDir, "docker-compose.yml");
3597
- await writeFile(composePath, dockerCompose);
3598
- logger$6.log(` Generated: .gkm/docker/docker-compose.yml`);
3599
- logger$6.log(`\n✅ Generated ${results.length} Dockerfile(s) + docker-compose.yml`);
3600
- logger$6.log("\n📋 Build commands:");
3601
- for (const result of results) {
3602
- const icon = result.type === "backend" ? "⚙️" : "🌐";
3603
- logger$6.log(` ${icon} docker build -f .gkm/docker/Dockerfile.${result.appName} -t ${result.imageName} .`);
3604
- }
3605
- logger$6.log("\n📋 Run all services:");
3606
- logger$6.log(" docker compose -f .gkm/docker/docker-compose.yml up --build");
3607
- return {
3608
- apps: results,
3609
- dockerCompose: composePath,
3610
- dockerignore: dockerignorePath
3611
- };
3612
- }
3344
+ function generateMultiStageDockerfile(options) {
3345
+ const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
3346
+ if (turbo) return generateTurboDockerfile({
3347
+ ...options,
3348
+ turboPackage: turboPackage ?? "api"
3349
+ });
3350
+ const pm = getPmConfig(packageManager);
3351
+ const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
3352
+ const hasFetch = packageManager === "pnpm";
3353
+ const depsStage = hasFetch ? `# Copy lockfile first for better caching
3354
+ COPY ${pm.lockfile} ./
3355
+
3356
+ # Fetch dependencies (downloads to virtual store, cached separately)
3357
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3358
+ ${pm.fetch}
3359
+
3360
+ # Copy package.json after fetch
3361
+ COPY package.json ./
3362
+
3363
+ # Install from cache (fast - no network needed)
3364
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3365
+ ${pm.installCmd}` : `# Copy package files
3366
+ COPY package.json ${pm.lockfile} ./
3367
+
3368
+ # Install dependencies with cache
3369
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3370
+ ${pm.installCmd}`;
3371
+ return `# syntax=docker/dockerfile:1
3372
+ # Stage 1: Dependencies
3373
+ FROM ${baseImage} AS deps
3374
+
3375
+ WORKDIR /app
3376
+ ${installPm}
3377
+ ${depsStage}
3378
+
3379
+ # Stage 2: Build
3380
+ FROM deps AS builder
3381
+
3382
+ WORKDIR /app
3383
+
3384
+ # Copy source (deps already installed)
3385
+ COPY . .
3386
+
3387
+ # Debug: Show node_modules/.bin contents and build production server
3388
+ RUN echo "=== node_modules/.bin contents ===" && \
3389
+ ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
3390
+ echo "=== Checking for gkm ===" && \
3391
+ which gkm 2>/dev/null || echo "gkm not in PATH" && \
3392
+ ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
3393
+ echo "=== Running build ===" && \
3394
+ ./node_modules/.bin/gkm build --provider server --production
3613
3395
 
3614
- //#endregion
3615
- //#region src/deploy/docker.ts
3616
- /**
3617
- * Get app name from package.json in the current working directory
3618
- * Used for Dokploy app/project naming
3619
- */
3620
- function getAppNameFromCwd$1() {
3621
- const packageJsonPath = join(process.cwd(), "package.json");
3622
- if (!existsSync(packageJsonPath)) return void 0;
3623
- try {
3624
- const pkg$1 = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
3625
- if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
3626
- } catch {}
3627
- return void 0;
3396
+ # Stage 3: Production
3397
+ FROM ${baseImage} AS runner
3398
+
3399
+ WORKDIR /app
3400
+
3401
+ # Install tini for proper signal handling as PID 1
3402
+ RUN apk add --no-cache tini
3403
+
3404
+ # Create non-root user
3405
+ RUN addgroup --system --gid 1001 nodejs && \\
3406
+ adduser --system --uid 1001 hono
3407
+
3408
+ # Copy bundled server
3409
+ COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
3410
+
3411
+ # Environment
3412
+ ENV NODE_ENV=production
3413
+ ENV PORT=${port}
3414
+
3415
+ # Health check
3416
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3417
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3418
+
3419
+ # Switch to non-root user
3420
+ USER hono
3421
+
3422
+ EXPOSE ${port}
3423
+
3424
+ # Use tini as entrypoint to handle PID 1 responsibilities
3425
+ ENTRYPOINT ["/sbin/tini", "--"]
3426
+ CMD ["node", "server.mjs"]
3427
+ `;
3628
3428
  }
3629
3429
  /**
3630
- * Get app name from package.json adjacent to the lockfile (project root)
3631
- * Used for Docker image naming
3430
+ * Generate a Dockerfile optimized for Turbo monorepos
3431
+ * Uses turbo prune to create minimal Docker context
3632
3432
  */
3633
- function getAppNameFromPackageJson() {
3634
- const cwd = process.cwd();
3635
- const lockfilePath = findLockfilePath(cwd);
3636
- if (!lockfilePath) return void 0;
3637
- const projectRoot = dirname(lockfilePath);
3638
- const packageJsonPath = join(projectRoot, "package.json");
3639
- if (!existsSync(packageJsonPath)) return void 0;
3640
- try {
3641
- const pkg$1 = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
3642
- if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
3643
- } catch {}
3644
- return void 0;
3433
+ function generateTurboDockerfile(options) {
3434
+ const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
3435
+ const pm = getPmConfig(packageManager);
3436
+ const installPm = pm.install ? `RUN ${pm.install}` : "";
3437
+ const turboInstallCmd = getTurboInstallCmd(packageManager);
3438
+ const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3439
+ return `# syntax=docker/dockerfile:1
3440
+ # Stage 1: Prune monorepo
3441
+ FROM ${baseImage} AS pruner
3442
+
3443
+ WORKDIR /app
3444
+
3445
+ ${installPm}
3446
+
3447
+ COPY . .
3448
+
3449
+ # Prune to only include necessary packages
3450
+ RUN ${turboCmd} prune ${turboPackage} --docker
3451
+
3452
+ # Stage 2: Install dependencies
3453
+ FROM ${baseImage} AS deps
3454
+
3455
+ WORKDIR /app
3456
+
3457
+ ${installPm}
3458
+
3459
+ # Copy pruned lockfile and package.jsons
3460
+ COPY --from=pruner /app/out/${pm.lockfile} ./
3461
+ COPY --from=pruner /app/out/json/ ./
3462
+
3463
+ # Install dependencies (no frozen-lockfile since turbo prune creates a subset)
3464
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3465
+ ${turboInstallCmd}
3466
+
3467
+ # Stage 3: Build
3468
+ FROM deps AS builder
3469
+
3470
+ WORKDIR /app
3471
+
3472
+ # Copy pruned source
3473
+ COPY --from=pruner /app/out/full/ ./
3474
+
3475
+ # Debug: Show node_modules/.bin contents and build production server
3476
+ RUN echo "=== node_modules/.bin contents ===" && \
3477
+ ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
3478
+ echo "=== Checking for gkm ===" && \
3479
+ which gkm 2>/dev/null || echo "gkm not in PATH" && \
3480
+ ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
3481
+ echo "=== Running build ===" && \
3482
+ ./node_modules/.bin/gkm build --provider server --production
3483
+
3484
+ # Stage 4: Production
3485
+ FROM ${baseImage} AS runner
3486
+
3487
+ WORKDIR /app
3488
+
3489
+ RUN apk add --no-cache tini
3490
+
3491
+ RUN addgroup --system --gid 1001 nodejs && \\
3492
+ adduser --system --uid 1001 hono
3493
+
3494
+ COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
3495
+
3496
+ ENV NODE_ENV=production
3497
+ ENV PORT=${port}
3498
+
3499
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3500
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3501
+
3502
+ USER hono
3503
+
3504
+ EXPOSE ${port}
3505
+
3506
+ ENTRYPOINT ["/sbin/tini", "--"]
3507
+ CMD ["node", "server.mjs"]
3508
+ `;
3645
3509
  }
3646
- const logger$5 = console;
3647
3510
  /**
3648
- * Get the full image reference
3511
+ * Generate a slim Dockerfile for pre-built bundles
3649
3512
  */
3650
- function getImageRef(registry, imageName, tag) {
3651
- if (registry) return `${registry}/${imageName}:${tag}`;
3652
- return `${imageName}:${tag}`;
3513
+ function generateSlimDockerfile(options) {
3514
+ const { baseImage, port, healthCheckPath } = options;
3515
+ return `# Slim Dockerfile for pre-built production bundle
3516
+ FROM ${baseImage}
3517
+
3518
+ WORKDIR /app
3519
+
3520
+ # Install tini for proper signal handling as PID 1
3521
+ # Handles SIGTERM propagation and zombie process reaping
3522
+ RUN apk add --no-cache tini
3523
+
3524
+ # Create non-root user
3525
+ RUN addgroup --system --gid 1001 nodejs && \\
3526
+ adduser --system --uid 1001 hono
3527
+
3528
+ # Copy pre-built bundle
3529
+ COPY .gkm/server/dist/server.mjs ./
3530
+
3531
+ # Environment
3532
+ ENV NODE_ENV=production
3533
+ ENV PORT=${port}
3534
+
3535
+ # Health check
3536
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3537
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3538
+
3539
+ # Switch to non-root user
3540
+ USER hono
3541
+
3542
+ EXPOSE ${port}
3543
+
3544
+ # Use tini as entrypoint to handle PID 1 responsibilities
3545
+ ENTRYPOINT ["/sbin/tini", "--"]
3546
+ CMD ["node", "server.mjs"]
3547
+ `;
3653
3548
  }
3654
3549
  /**
3655
- * Build Docker image
3656
- * @param imageRef - Full image reference (registry/name:tag)
3657
- * @param appName - Name of the app (used for Dockerfile.{appName} in workspaces)
3658
- * @param buildArgs - Build arguments to pass to docker build
3550
+ * Generate .dockerignore file
3659
3551
  */
3660
- async function buildImage(imageRef, appName, buildArgs) {
3661
- logger$5.log(`\n🔨 Building Docker image: ${imageRef}`);
3662
- const cwd = process.cwd();
3663
- const lockfilePath = findLockfilePath(cwd);
3664
- const lockfileDir = lockfilePath ? dirname(lockfilePath) : cwd;
3665
- const inMonorepo = lockfileDir !== cwd;
3666
- if (appName || inMonorepo) logger$5.log(" Generating Dockerfile for monorepo (turbo prune)...");
3667
- else logger$5.log(" Generating Dockerfile...");
3668
- await dockerCommand({});
3669
- const dockerfileSuffix = appName ? `.${appName}` : "";
3670
- const dockerfilePath = `.gkm/docker/Dockerfile${dockerfileSuffix}`;
3671
- const buildCwd = lockfilePath && (inMonorepo || appName) ? lockfileDir : cwd;
3672
- if (buildCwd !== cwd) logger$5.log(` Building from workspace root: ${buildCwd}`);
3673
- const buildArgsString = buildArgs && buildArgs.length > 0 ? buildArgs.map((arg) => `--build-arg "${arg}"`).join(" ") : "";
3674
- try {
3675
- const cmd = [
3676
- "DOCKER_BUILDKIT=1 docker build",
3677
- "--platform linux/amd64",
3678
- `-f ${dockerfilePath}`,
3679
- `-t ${imageRef}`,
3680
- buildArgsString,
3681
- "."
3682
- ].filter(Boolean).join(" ");
3683
- execSync(cmd, {
3684
- cwd: buildCwd,
3685
- stdio: "inherit",
3686
- env: {
3687
- ...process.env,
3688
- DOCKER_BUILDKIT: "1"
3689
- }
3690
- });
3691
- logger$5.log(`✅ Image built: ${imageRef}`);
3692
- } catch (error) {
3693
- throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
3694
- }
3552
+ function generateDockerignore() {
3553
+ return `# Dependencies
3554
+ node_modules
3555
+ .pnpm-store
3556
+
3557
+ # Build output (except what we need)
3558
+ .gkm/aws*
3559
+ .gkm/server/*.ts
3560
+ !.gkm/server/dist
3561
+
3562
+ # IDE and editor
3563
+ .idea
3564
+ .vscode
3565
+ *.swp
3566
+ *.swo
3567
+
3568
+ # Git
3569
+ .git
3570
+ .gitignore
3571
+
3572
+ # Logs
3573
+ *.log
3574
+ npm-debug.log*
3575
+ pnpm-debug.log*
3576
+
3577
+ # Test files
3578
+ **/*.test.ts
3579
+ **/*.spec.ts
3580
+ **/__tests__
3581
+ coverage
3582
+
3583
+ # Documentation
3584
+ docs
3585
+ *.md
3586
+ !README.md
3587
+
3588
+ # Environment files (handle secrets separately)
3589
+ .env
3590
+ .env.*
3591
+ !.env.example
3592
+
3593
+ # Docker files (don't copy recursively)
3594
+ Dockerfile*
3595
+ docker-compose*
3596
+ .dockerignore
3597
+ `;
3695
3598
  }
3696
3599
  /**
3697
- * Push Docker image to registry
3600
+ * Generate docker-entrypoint.sh for custom startup logic
3698
3601
  */
3699
- async function pushImage(imageRef) {
3700
- logger$5.log(`\n☁️ Pushing image: ${imageRef}`);
3701
- try {
3702
- execSync(`docker push ${imageRef}`, {
3703
- cwd: process.cwd(),
3704
- stdio: "inherit"
3705
- });
3706
- logger$5.log(`✅ Image pushed: ${imageRef}`);
3707
- } catch (error) {
3708
- throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
3709
- }
3602
+ function generateDockerEntrypoint() {
3603
+ return `#!/bin/sh
3604
+ set -e
3605
+
3606
+ # Run any custom startup scripts here
3607
+ # Example: wait for database
3608
+ # until nc -z $DB_HOST $DB_PORT; do
3609
+ # echo "Waiting for database..."
3610
+ # sleep 1
3611
+ # done
3612
+
3613
+ # Execute the main command
3614
+ exec "$@"
3615
+ `;
3710
3616
  }
3711
3617
  /**
3712
- * Deploy using Docker (build and optionally push image)
3618
+ * Resolve Docker configuration from GkmConfig with defaults
3713
3619
  */
3714
- async function deployDocker(options) {
3715
- const { stage, tag, skipPush, masterKey, config: config$1, buildArgs } = options;
3716
- const imageName = config$1.imageName;
3717
- const imageRef = getImageRef(config$1.registry, imageName, tag);
3718
- await buildImage(imageRef, config$1.appName, buildArgs);
3719
- if (!skipPush) if (!config$1.registry) logger$5.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
3720
- else await pushImage(imageRef);
3721
- logger$5.log("\n✅ Docker deployment ready!");
3722
- logger$5.log(`\n📋 Deployment details:`);
3723
- logger$5.log(` Image: ${imageRef}`);
3724
- logger$5.log(` Stage: ${stage}`);
3725
- if (masterKey) {
3726
- logger$5.log(`\n🔐 Deploy with this environment variable:`);
3727
- logger$5.log(` GKM_MASTER_KEY=${masterKey}`);
3728
- logger$5.log("\n Example docker run:");
3729
- logger$5.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
3730
- }
3620
+ function resolveDockerConfig$1(config$1) {
3621
+ const docker = config$1.docker ?? {};
3622
+ let defaultImageName = "api";
3623
+ try {
3624
+ const pkg$1 = __require(`${process.cwd()}/package.json`);
3625
+ if (pkg$1.name) defaultImageName = pkg$1.name.replace(/^@[^/]+\//, "");
3626
+ } catch {}
3731
3627
  return {
3732
- imageRef,
3733
- masterKey
3628
+ registry: docker.registry ?? "",
3629
+ imageName: docker.imageName ?? defaultImageName,
3630
+ baseImage: docker.baseImage ?? "node:22-alpine",
3631
+ port: docker.port ?? 3e3,
3632
+ compose: docker.compose
3734
3633
  };
3735
3634
  }
3736
3635
  /**
3737
- * Resolve Docker deploy config from gkm config
3738
- * - imageName: from config, or cwd package.json, or 'app' (for Docker image)
3739
- * - projectName: from root package.json, or 'app' (for Dokploy project)
3740
- * - appName: from cwd package.json, or projectName (for Dokploy app within project)
3636
+ * Generate a Dockerfile for Next.js frontend apps using standalone output.
3637
+ * Uses turbo prune for monorepo optimization.
3638
+ * @internal Exported for testing
3741
3639
  */
3742
- function resolveDockerConfig(config$1) {
3743
- const projectName = getAppNameFromPackageJson() ?? "app";
3744
- const appName = getAppNameFromCwd$1() ?? projectName;
3745
- const imageName = config$1.docker?.imageName ?? appName;
3746
- return {
3747
- registry: config$1.docker?.registry,
3748
- imageName,
3749
- projectName,
3750
- appName
3751
- };
3752
- }
3640
+ function generateNextjsDockerfile(options) {
3641
+ const { baseImage, port, appPath, turboPackage, packageManager, publicUrlArgs = ["NEXT_PUBLIC_API_URL", "NEXT_PUBLIC_AUTH_URL"] } = options;
3642
+ const pm = getPmConfig(packageManager);
3643
+ const installPm = pm.install ? `RUN ${pm.install}` : "";
3644
+ const turboInstallCmd = getTurboInstallCmd(packageManager);
3645
+ const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3646
+ const publicUrlArgDeclarations = publicUrlArgs.map((arg) => `ARG ${arg}=""`).join("\n");
3647
+ const publicUrlEnvDeclarations = publicUrlArgs.map((arg) => `ENV ${arg}=$${arg}`).join("\n");
3648
+ return `# syntax=docker/dockerfile:1
3649
+ # Next.js standalone Dockerfile with turbo prune optimization
3753
3650
 
3754
- //#endregion
3755
- //#region src/deploy/dokploy.ts
3756
- const logger$4 = console;
3757
- /**
3758
- * Get the Dokploy API token from stored credentials or environment
3759
- */
3760
- async function getApiToken$1() {
3761
- const token = await getDokployToken();
3762
- if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
3763
- return token;
3651
+ # Stage 1: Prune monorepo
3652
+ FROM ${baseImage} AS pruner
3653
+
3654
+ WORKDIR /app
3655
+
3656
+ ${installPm}
3657
+
3658
+ COPY . .
3659
+
3660
+ # Prune to only include necessary packages
3661
+ RUN ${turboCmd} prune ${turboPackage} --docker
3662
+
3663
+ # Stage 2: Install dependencies
3664
+ FROM ${baseImage} AS deps
3665
+
3666
+ WORKDIR /app
3667
+
3668
+ ${installPm}
3669
+
3670
+ # Copy pruned lockfile and package.jsons
3671
+ COPY --from=pruner /app/out/${pm.lockfile} ./
3672
+ COPY --from=pruner /app/out/json/ ./
3673
+
3674
+ # Install dependencies
3675
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3676
+ ${turboInstallCmd}
3677
+
3678
+ # Stage 3: Build
3679
+ FROM deps AS builder
3680
+
3681
+ WORKDIR /app
3682
+
3683
+ # Build-time args for public API URLs (populated by gkm deploy)
3684
+ # These get baked into the Next.js build as public environment variables
3685
+ ${publicUrlArgDeclarations}
3686
+
3687
+ # Convert ARGs to ENVs for Next.js build
3688
+ ${publicUrlEnvDeclarations}
3689
+
3690
+ # Copy pruned source
3691
+ COPY --from=pruner /app/out/full/ ./
3692
+
3693
+ # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3694
+ # Using wildcard to make it optional for single-app projects
3695
+ COPY --from=pruner /app/tsconfig.* ./
3696
+
3697
+ # Ensure public directory exists (may be empty for scaffolded projects)
3698
+ RUN mkdir -p ${appPath}/public
3699
+
3700
+ # Set Next.js to produce standalone output
3701
+ ENV NEXT_TELEMETRY_DISABLED=1
3702
+
3703
+ # Build the application
3704
+ RUN ${turboCmd} run build --filter=${turboPackage}
3705
+
3706
+ # Stage 4: Production
3707
+ FROM ${baseImage} AS runner
3708
+
3709
+ WORKDIR /app
3710
+
3711
+ # Install tini for proper signal handling
3712
+ RUN apk add --no-cache tini
3713
+
3714
+ # Create non-root user
3715
+ RUN addgroup --system --gid 1001 nodejs && \\
3716
+ adduser --system --uid 1001 nextjs
3717
+
3718
+ # Set environment
3719
+ ENV NODE_ENV=production
3720
+ ENV NEXT_TELEMETRY_DISABLED=1
3721
+ ENV PORT=${port}
3722
+ ENV HOSTNAME="0.0.0.0"
3723
+
3724
+ # Copy static files and standalone output
3725
+ COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/standalone ./
3726
+ COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/static ./${appPath}/.next/static
3727
+ COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/public ./${appPath}/public
3728
+
3729
+ USER nextjs
3730
+
3731
+ EXPOSE ${port}
3732
+
3733
+ ENTRYPOINT ["/sbin/tini", "--"]
3734
+ CMD ["node", "${appPath}/server.js"]
3735
+ `;
3764
3736
  }
3765
3737
  /**
3766
- * Create a Dokploy API client
3738
+ * Generate a Dockerfile for backend apps in a workspace.
3739
+ * Uses turbo prune for monorepo optimization.
3740
+ * @internal Exported for testing
3767
3741
  */
3768
- async function createApi$1(endpoint) {
3769
- const token = await getApiToken$1();
3770
- return new DokployApi({
3771
- baseUrl: endpoint,
3772
- token
3773
- });
3742
+ function generateBackendDockerfile(options) {
3743
+ const { baseImage, port, appPath, turboPackage, packageManager, healthCheckPath = "/health" } = options;
3744
+ const pm = getPmConfig(packageManager);
3745
+ const installPm = pm.install ? `RUN ${pm.install}` : "";
3746
+ const turboInstallCmd = getTurboInstallCmd(packageManager);
3747
+ const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3748
+ return `# syntax=docker/dockerfile:1
3749
+ # Backend Dockerfile with turbo prune optimization
3750
+
3751
+ # Stage 1: Prune monorepo
3752
+ FROM ${baseImage} AS pruner
3753
+
3754
+ WORKDIR /app
3755
+
3756
+ ${installPm}
3757
+
3758
+ COPY . .
3759
+
3760
+ # Prune to only include necessary packages
3761
+ RUN ${turboCmd} prune ${turboPackage} --docker
3762
+
3763
+ # Stage 2: Install dependencies
3764
+ FROM ${baseImage} AS deps
3765
+
3766
+ WORKDIR /app
3767
+
3768
+ ${installPm}
3769
+
3770
+ # Copy pruned lockfile and package.jsons
3771
+ COPY --from=pruner /app/out/${pm.lockfile} ./
3772
+ COPY --from=pruner /app/out/json/ ./
3773
+
3774
+ # Install dependencies
3775
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3776
+ ${turboInstallCmd}
3777
+
3778
+ # Stage 3: Build
3779
+ FROM deps AS builder
3780
+
3781
+ WORKDIR /app
3782
+
3783
+ # Build-time args for encrypted secrets
3784
+ ARG GKM_ENCRYPTED_CREDENTIALS=""
3785
+ ARG GKM_CREDENTIALS_IV=""
3786
+
3787
+ # Copy pruned source
3788
+ COPY --from=pruner /app/out/full/ ./
3789
+
3790
+ # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3791
+ # Using wildcard to make it optional for single-app projects
3792
+ COPY --from=pruner /app/gkm.config.* ./
3793
+ COPY --from=pruner /app/tsconfig.* ./
3794
+
3795
+ # Write encrypted credentials for gkm build to embed
3796
+ RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
3797
+ mkdir -p ${appPath}/.gkm && \
3798
+ echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
3799
+ echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
3800
+ fi
3801
+
3802
+ # Build production server using gkm
3803
+ RUN cd ${appPath} && ./node_modules/.bin/gkm build --provider server --production
3804
+
3805
+ # Stage 4: Production
3806
+ FROM ${baseImage} AS runner
3807
+
3808
+ WORKDIR /app
3809
+
3810
+ RUN apk add --no-cache tini
3811
+
3812
+ RUN addgroup --system --gid 1001 nodejs && \\
3813
+ adduser --system --uid 1001 hono
3814
+
3815
+ # Copy bundled server
3816
+ COPY --from=builder --chown=hono:nodejs /app/${appPath}/.gkm/server/dist/server.mjs ./
3817
+
3818
+ ENV NODE_ENV=production
3819
+ ENV PORT=${port}
3820
+
3821
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3822
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3823
+
3824
+ USER hono
3825
+
3826
+ EXPOSE ${port}
3827
+
3828
+ ENTRYPOINT ["/sbin/tini", "--"]
3829
+ CMD ["node", "server.mjs"]
3830
+ `;
3774
3831
  }
3775
3832
  /**
3776
- * Deploy to Dokploy
3833
+ * Generate a Dockerfile for apps with a custom entry point.
3834
+ * Uses esbuild to bundle the entry point into dist/index.mjs with all dependencies.
3835
+ * This is used for apps that don't use gkm routes (e.g., Better Auth servers).
3836
+ * @internal Exported for testing
3777
3837
  */
3778
- async function deployDokploy(options) {
3779
- const { stage, imageRef, masterKey, config: config$1 } = options;
3780
- logger$4.log(`\n🎯 Deploying to Dokploy...`);
3781
- logger$4.log(` Endpoint: ${config$1.endpoint}`);
3782
- logger$4.log(` Application: ${config$1.applicationId}`);
3783
- const api = await createApi$1(config$1.endpoint);
3784
- logger$4.log(` Configuring Docker image: ${imageRef}`);
3785
- const registryOptions = {};
3786
- if (config$1.registryId) {
3787
- registryOptions.registryId = config$1.registryId;
3788
- logger$4.log(` Using Dokploy registry: ${config$1.registryId}`);
3789
- } else {
3790
- const storedRegistryId = await getDokployRegistryId();
3791
- if (storedRegistryId) {
3792
- registryOptions.registryId = storedRegistryId;
3793
- logger$4.log(` Using stored Dokploy registry: ${storedRegistryId}`);
3794
- } else if (config$1.registryCredentials) {
3795
- registryOptions.username = config$1.registryCredentials.username;
3796
- registryOptions.password = config$1.registryCredentials.password;
3797
- registryOptions.registryUrl = config$1.registryCredentials.registryUrl;
3798
- logger$4.log(` Using registry credentials for: ${config$1.registryCredentials.registryUrl}`);
3799
- } else {
3800
- const username = process.env.DOCKER_REGISTRY_USERNAME;
3801
- const password = process.env.DOCKER_REGISTRY_PASSWORD;
3802
- const registryUrl = process.env.DOCKER_REGISTRY_URL || config$1.registry;
3803
- if (username && password && registryUrl) {
3804
- registryOptions.username = username;
3805
- registryOptions.password = password;
3806
- registryOptions.registryUrl = registryUrl;
3807
- logger$4.log(` Using registry credentials from environment`);
3808
- }
3809
- }
3810
- }
3811
- await api.saveDockerProvider(config$1.applicationId, imageRef, registryOptions);
3812
- logger$4.log(" ✓ Docker provider configured");
3813
- const envVars = {};
3814
- if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
3815
- if (Object.keys(envVars).length > 0) {
3816
- logger$4.log(" Updating environment variables...");
3817
- const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
3818
- await api.saveApplicationEnv(config$1.applicationId, envString);
3819
- logger$4.log(" ✓ Environment variables updated");
3820
- }
3821
- logger$4.log(" Triggering deployment...");
3822
- await api.deployApplication(config$1.applicationId);
3823
- logger$4.log(" ✓ Deployment triggered");
3824
- logger$4.log("\n✅ Dokploy deployment initiated!");
3825
- logger$4.log(`\n📋 Deployment details:`);
3826
- logger$4.log(` Image: ${imageRef}`);
3827
- logger$4.log(` Stage: ${stage}`);
3828
- logger$4.log(` Application ID: ${config$1.applicationId}`);
3829
- if (masterKey) logger$4.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
3830
- const deploymentUrl = `${config$1.endpoint}/project/${config$1.projectId}`;
3831
- logger$4.log(`\n🔗 View deployment: ${deploymentUrl}`);
3832
- return {
3833
- imageRef,
3834
- masterKey,
3835
- url: deploymentUrl
3836
- };
3838
+ function generateEntryDockerfile(options) {
3839
+ const { baseImage, port, appPath, entry, turboPackage, packageManager, healthCheckPath = "/health" } = options;
3840
+ const pm = getPmConfig(packageManager);
3841
+ const installPm = pm.install ? `RUN ${pm.install}` : "";
3842
+ const turboInstallCmd = getTurboInstallCmd(packageManager);
3843
+ const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3844
+ return `# syntax=docker/dockerfile:1
3845
+ # Entry-based Dockerfile with turbo prune + tsdown bundling
3846
+
3847
+ # Stage 1: Prune monorepo
3848
+ FROM ${baseImage} AS pruner
3849
+
3850
+ WORKDIR /app
3851
+
3852
+ ${installPm}
3853
+
3854
+ COPY . .
3855
+
3856
+ # Prune to only include necessary packages
3857
+ RUN ${turboCmd} prune ${turboPackage} --docker
3858
+
3859
+ # Stage 2: Install dependencies
3860
+ FROM ${baseImage} AS deps
3861
+
3862
+ WORKDIR /app
3863
+
3864
+ ${installPm}
3865
+
3866
+ # Copy pruned lockfile and package.jsons
3867
+ COPY --from=pruner /app/out/${pm.lockfile} ./
3868
+ COPY --from=pruner /app/out/json/ ./
3869
+
3870
+ # Install dependencies
3871
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3872
+ ${turboInstallCmd}
3873
+
3874
+ # Stage 3: Build with tsdown
3875
+ FROM deps AS builder
3876
+
3877
+ WORKDIR /app
3878
+
3879
+ # Build-time args for encrypted secrets
3880
+ ARG GKM_ENCRYPTED_CREDENTIALS=""
3881
+ ARG GKM_CREDENTIALS_IV=""
3882
+
3883
+ # Copy pruned source
3884
+ COPY --from=pruner /app/out/full/ ./
3885
+
3886
+ # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3887
+ # Using wildcard to make it optional for single-app projects
3888
+ COPY --from=pruner /app/tsconfig.* ./
3889
+
3890
+ # Write encrypted credentials for tsdown to embed via define
3891
+ RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
3892
+ mkdir -p ${appPath}/.gkm && \
3893
+ echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
3894
+ echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
3895
+ fi
3896
+
3897
+ # Bundle entry point with esbuild (outputs to dist/index.mjs)
3898
+ # Creates a fully standalone bundle with all dependencies included
3899
+ # Use define to embed credentials if present
3900
+ RUN cd ${appPath} && \
3901
+ if [ -f .gkm/credentials.enc ]; then \
3902
+ CREDS=$(cat .gkm/credentials.enc) && \
3903
+ IV=$(cat .gkm/credentials.iv) && \
3904
+ npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
3905
+ --outfile=dist/index.mjs --packages=bundle \
3906
+ --banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);' \
3907
+ --define:__GKM_ENCRYPTED_CREDENTIALS__="'\\"$CREDS\\"'" \
3908
+ --define:__GKM_CREDENTIALS_IV__="'\\"$IV\\"'"; \
3909
+ else \
3910
+ npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
3911
+ --outfile=dist/index.mjs --packages=bundle \
3912
+ --banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);'; \
3913
+ fi
3914
+
3915
+ # Stage 4: Production
3916
+ FROM ${baseImage} AS runner
3917
+
3918
+ WORKDIR /app
3919
+
3920
+ RUN apk add --no-cache tini
3921
+
3922
+ RUN addgroup --system --gid 1001 nodejs && \\
3923
+ adduser --system --uid 1001 app
3924
+
3925
+ # Copy bundled output only (no node_modules needed - fully bundled)
3926
+ COPY --from=builder --chown=app:nodejs /app/${appPath}/dist/index.mjs ./
3927
+
3928
+ ENV NODE_ENV=production
3929
+ ENV PORT=${port}
3930
+
3931
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3932
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3933
+
3934
+ USER app
3935
+
3936
+ EXPOSE ${port}
3937
+
3938
+ ENTRYPOINT ["/sbin/tini", "--"]
3939
+ CMD ["node", "index.mjs"]
3940
+ `;
3837
3941
  }
3838
3942
 
3839
3943
  //#endregion
3840
- //#region src/deploy/state.ts
3841
- /**
3842
- * Get the state file path for a stage
3843
- */
3844
- function getStateFilePath(workspaceRoot, stage) {
3845
- return join(workspaceRoot, ".gkm", `deploy-${stage}.json`);
3846
- }
3944
+ //#region src/docker/index.ts
3945
+ const logger$5 = console;
3847
3946
  /**
3848
- * Read the deploy state for a stage
3849
- * Returns null if state file doesn't exist
3947
+ * Docker command implementation
3948
+ * Generates Dockerfile, docker-compose.yml, and related files
3949
+ *
3950
+ * Default: Multi-stage Dockerfile that builds from source inside Docker
3951
+ * --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
3850
3952
  */
3851
- async function readStageState(workspaceRoot, stage) {
3852
- const filePath = getStateFilePath(workspaceRoot, stage);
3853
- try {
3854
- const content = await readFile(filePath, "utf-8");
3855
- return JSON.parse(content);
3856
- } catch (error) {
3857
- if (error.code === "ENOENT") return null;
3858
- console.warn(`Warning: Could not read deploy state: ${error}`);
3859
- return null;
3953
+ async function dockerCommand(options) {
3954
+ const loadedConfig = await loadWorkspaceConfig();
3955
+ if (loadedConfig.type === "workspace") {
3956
+ logger$5.log("📦 Detected workspace configuration");
3957
+ return workspaceDockerCommand(loadedConfig.workspace, options);
3860
3958
  }
3861
- }
3862
- /**
3863
- * Write the deploy state for a stage
3864
- */
3865
- async function writeStageState(workspaceRoot, stage, state) {
3866
- const filePath = getStateFilePath(workspaceRoot, stage);
3867
- const dir = join(workspaceRoot, ".gkm");
3868
- await mkdir(dir, { recursive: true });
3869
- state.lastDeployedAt = (/* @__PURE__ */ new Date()).toISOString();
3870
- await writeFile(filePath, JSON.stringify(state, null, 2));
3871
- }
3872
- /**
3873
- * Create a new empty state for a stage
3874
- */
3875
- function createEmptyState(stage, environmentId) {
3876
- return {
3877
- provider: "dokploy",
3878
- stage,
3879
- environmentId,
3880
- applications: {},
3881
- services: {},
3882
- lastDeployedAt: (/* @__PURE__ */ new Date()).toISOString()
3959
+ const config$1 = await loadConfig();
3960
+ const dockerConfig = resolveDockerConfig$1(config$1);
3961
+ const serverConfig = typeof config$1.providers?.server === "object" ? config$1.providers.server : void 0;
3962
+ const healthCheckPath = serverConfig?.production?.healthCheck ?? "/health";
3963
+ const useSlim = options.slim === true;
3964
+ if (useSlim) {
3965
+ const distDir = join(process.cwd(), ".gkm", "server", "dist");
3966
+ const hasBuild = existsSync(join(distDir, "server.mjs"));
3967
+ if (!hasBuild) throw new Error("Slim Dockerfile requires a pre-built bundle. Run `gkm build --provider server --production` first, or omit --slim to use multi-stage build.");
3968
+ }
3969
+ const dockerDir = join(process.cwd(), ".gkm", "docker");
3970
+ await mkdir(dockerDir, { recursive: true });
3971
+ const packageManager = detectPackageManager$1();
3972
+ const inMonorepo = isMonorepo();
3973
+ const hasTurbo = hasTurboConfig();
3974
+ let useTurbo = options.turbo ?? false;
3975
+ if (inMonorepo && !useSlim) if (hasTurbo) {
3976
+ useTurbo = true;
3977
+ logger$5.log(" Detected monorepo with turbo.json - using turbo prune");
3978
+ } else throw new Error("Monorepo detected but turbo.json not found.\n\nDocker builds in monorepos require Turborepo for proper dependency isolation.\n\nTo fix this:\n 1. Install turbo: pnpm add -Dw turbo\n 2. Create turbo.json in your monorepo root\n 3. Run this command again\n\nSee: https://turbo.build/repo/docs/guides/tools/docker");
3979
+ let turboPackage = options.turboPackage ?? dockerConfig.imageName;
3980
+ if (useTurbo && !options.turboPackage) try {
3981
+ const pkg$1 = __require(`${process.cwd()}/package.json`);
3982
+ if (pkg$1.name) {
3983
+ turboPackage = pkg$1.name;
3984
+ logger$5.log(` Turbo package: ${turboPackage}`);
3985
+ }
3986
+ } catch {}
3987
+ const templateOptions = {
3988
+ imageName: dockerConfig.imageName,
3989
+ baseImage: dockerConfig.baseImage,
3990
+ port: dockerConfig.port,
3991
+ healthCheckPath,
3992
+ prebuilt: useSlim,
3993
+ turbo: useTurbo,
3994
+ turboPackage,
3995
+ packageManager
3996
+ };
3997
+ const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
3998
+ const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
3999
+ const dockerfilePath = join(dockerDir, "Dockerfile");
4000
+ await writeFile(dockerfilePath, dockerfile);
4001
+ logger$5.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
4002
+ const composeOptions = {
4003
+ imageName: dockerConfig.imageName,
4004
+ registry: options.registry ?? dockerConfig.registry,
4005
+ port: dockerConfig.port,
4006
+ healthCheckPath,
4007
+ services: dockerConfig.compose?.services ?? {}
3883
4008
  };
4009
+ const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
4010
+ const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
4011
+ const composePath = join(dockerDir, "docker-compose.yml");
4012
+ await writeFile(composePath, dockerCompose);
4013
+ logger$5.log("Generated: .gkm/docker/docker-compose.yml");
4014
+ const dockerignore = generateDockerignore();
4015
+ const dockerignorePath = join(process.cwd(), ".dockerignore");
4016
+ await writeFile(dockerignorePath, dockerignore);
4017
+ logger$5.log("Generated: .dockerignore (project root)");
4018
+ const entrypoint = generateDockerEntrypoint();
4019
+ const entrypointPath = join(dockerDir, "docker-entrypoint.sh");
4020
+ await writeFile(entrypointPath, entrypoint);
4021
+ logger$5.log("Generated: .gkm/docker/docker-entrypoint.sh");
4022
+ const result = {
4023
+ dockerfile: dockerfilePath,
4024
+ dockerCompose: composePath,
4025
+ dockerignore: dockerignorePath,
4026
+ entrypoint: entrypointPath
4027
+ };
4028
+ if (options.build) await buildDockerImage(dockerConfig.imageName, options);
4029
+ if (options.push) await pushDockerImage(dockerConfig.imageName, options);
4030
+ return result;
3884
4031
  }
3885
4032
  /**
3886
- * Get application ID from state
3887
- */
3888
- function getApplicationId(state, appName) {
3889
- return state?.applications[appName];
3890
- }
3891
- /**
3892
- * Set application ID in state (mutates state)
3893
- */
3894
- function setApplicationId(state, appName, applicationId) {
3895
- state.applications[appName] = applicationId;
3896
- }
3897
- /**
3898
- * Get postgres ID from state
3899
- */
3900
- function getPostgresId(state) {
3901
- return state?.services.postgresId;
3902
- }
3903
- /**
3904
- * Set postgres ID in state (mutates state)
4033
+ * Ensure lockfile exists in the build context
4034
+ * For monorepos, copies from workspace root if needed
4035
+ * Returns cleanup function if file was copied
3905
4036
  */
3906
- function setPostgresId(state, postgresId) {
3907
- state.services.postgresId = postgresId;
4037
+ function ensureLockfile(cwd) {
4038
+ const lockfilePath = findLockfilePath(cwd);
4039
+ if (!lockfilePath) {
4040
+ logger$5.warn("\n⚠️ No lockfile found. Docker build may fail or use stale dependencies.");
4041
+ return null;
4042
+ }
4043
+ const lockfileName = basename(lockfilePath);
4044
+ const localLockfile = join(cwd, lockfileName);
4045
+ if (lockfilePath === localLockfile) return null;
4046
+ logger$5.log(` Copying ${lockfileName} from monorepo root...`);
4047
+ copyFileSync(lockfilePath, localLockfile);
4048
+ return () => {
4049
+ try {
4050
+ unlinkSync(localLockfile);
4051
+ } catch {}
4052
+ };
3908
4053
  }
3909
4054
  /**
3910
- * Get redis ID from state
4055
+ * Build Docker image
4056
+ * Uses BuildKit for cache mount support
3911
4057
  */
3912
- function getRedisId(state) {
3913
- return state?.services.redisId;
4058
+ async function buildDockerImage(imageName, options) {
4059
+ const tag = options.tag ?? "latest";
4060
+ const registry = options.registry;
4061
+ const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
4062
+ logger$5.log(`\n🐳 Building Docker image: ${fullImageName}`);
4063
+ const cwd = process.cwd();
4064
+ const cleanup = ensureLockfile(cwd);
4065
+ try {
4066
+ execSync(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
4067
+ cwd,
4068
+ stdio: "inherit",
4069
+ env: {
4070
+ ...process.env,
4071
+ DOCKER_BUILDKIT: "1"
4072
+ }
4073
+ });
4074
+ logger$5.log(`✅ Docker image built: ${fullImageName}`);
4075
+ } catch (error) {
4076
+ throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
4077
+ } finally {
4078
+ cleanup?.();
4079
+ }
3914
4080
  }
3915
4081
  /**
3916
- * Set redis ID in state (mutates state)
4082
+ * Push Docker image to registry
3917
4083
  */
3918
- function setRedisId(state, redisId) {
3919
- state.services.redisId = redisId;
4084
+ async function pushDockerImage(imageName, options) {
4085
+ const tag = options.tag ?? "latest";
4086
+ const registry = options.registry;
4087
+ if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
4088
+ const fullImageName = `${registry}/${imageName}:${tag}`;
4089
+ logger$5.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
4090
+ try {
4091
+ execSync(`docker push ${fullImageName}`, {
4092
+ cwd: process.cwd(),
4093
+ stdio: "inherit"
4094
+ });
4095
+ logger$5.log(`✅ Docker image pushed: ${fullImageName}`);
4096
+ } catch (error) {
4097
+ throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
4098
+ }
3920
4099
  }
3921
-
3922
- //#endregion
3923
- //#region src/deploy/dns/hostinger-api.ts
3924
- /**
3925
- * Hostinger DNS API client
3926
- *
3927
- * API Documentation: https://developers.hostinger.com/
3928
- * Authentication: Bearer token from hpanel.hostinger.com/profile/api
3929
- */
3930
- const HOSTINGER_API_BASE = "https://api.hostinger.com";
3931
4100
  /**
3932
- * Hostinger API error
4101
+ * Get the package name from package.json in an app directory.
3933
4102
  */
3934
- var HostingerApiError = class extends Error {
3935
- constructor(message, status, statusText, errors) {
3936
- super(message);
3937
- this.status = status;
3938
- this.statusText = statusText;
3939
- this.errors = errors;
3940
- this.name = "HostingerApiError";
4103
+ function getAppPackageName(appPath) {
4104
+ try {
4105
+ const pkgPath = join(appPath, "package.json");
4106
+ if (!existsSync(pkgPath)) return void 0;
4107
+ const content = readFileSync(pkgPath, "utf-8");
4108
+ const pkg$1 = JSON.parse(content);
4109
+ return pkg$1.name;
4110
+ } catch {
4111
+ return void 0;
3941
4112
  }
3942
- };
4113
+ }
3943
4114
  /**
3944
- * Hostinger DNS API client
3945
- *
3946
- * @example
3947
- * ```ts
3948
- * const api = new HostingerApi(token);
3949
- *
3950
- * // Get all records for a domain
3951
- * const records = await api.getRecords('traflabs.io');
3952
- *
3953
- * // Create/update records
3954
- * await api.upsertRecords('traflabs.io', [
3955
- * { name: 'api.joemoer', type: 'A', ttl: 300, records: ['1.2.3.4'] }
3956
- * ]);
3957
- * ```
4115
+ * Generate Dockerfiles for all apps in a workspace.
4116
+ * @internal Exported for testing
3958
4117
  */
3959
- var HostingerApi = class {
3960
- token;
3961
- constructor(token) {
3962
- this.token = token;
3963
- }
3964
- /**
3965
- * Make a request to the Hostinger API
3966
- */
3967
- async request(method, endpoint, body) {
3968
- const url = `${HOSTINGER_API_BASE}${endpoint}`;
3969
- const response = await fetch(url, {
3970
- method,
3971
- headers: {
3972
- "Content-Type": "application/json",
3973
- Authorization: `Bearer ${this.token}`
3974
- },
3975
- body: body ? JSON.stringify(body) : void 0
4118
+ async function workspaceDockerCommand(workspace, options) {
4119
+ const results = [];
4120
+ const apps = Object.entries(workspace.apps);
4121
+ logger$5.log(`\n🐳 Generating Dockerfiles for workspace: ${workspace.name}`);
4122
+ const dockerDir = join(workspace.root, ".gkm", "docker");
4123
+ await mkdir(dockerDir, { recursive: true });
4124
+ const packageManager = detectPackageManager$1(workspace.root);
4125
+ logger$5.log(` Package manager: ${packageManager}`);
4126
+ for (const [appName, app] of apps) {
4127
+ const appPath = app.path;
4128
+ const fullAppPath = join(workspace.root, appPath);
4129
+ const turboPackage = getAppPackageName(fullAppPath) ?? appName;
4130
+ const imageName = appName;
4131
+ const hasEntry = !!app.entry;
4132
+ const buildType = hasEntry ? "entry" : app.type;
4133
+ logger$5.log(`\n 📄 Generating Dockerfile for ${appName} (${buildType})`);
4134
+ let dockerfile;
4135
+ if (app.type === "frontend") dockerfile = generateNextjsDockerfile({
4136
+ imageName,
4137
+ baseImage: "node:22-alpine",
4138
+ port: app.port,
4139
+ appPath,
4140
+ turboPackage,
4141
+ packageManager
3976
4142
  });
3977
- if (!response.ok) {
3978
- let errorMessage = `Hostinger API error: ${response.status} ${response.statusText}`;
3979
- let errors;
3980
- try {
3981
- const errorBody = await response.json();
3982
- if (errorBody.message) errorMessage = `Hostinger API error: ${errorBody.message}`;
3983
- errors = errorBody.errors;
3984
- } catch {}
3985
- throw new HostingerApiError(errorMessage, response.status, response.statusText, errors);
3986
- }
3987
- const text = await response.text();
3988
- if (!text || text.trim() === "") return void 0;
3989
- return JSON.parse(text);
3990
- }
3991
- /**
3992
- * Get all DNS records for a domain
3993
- *
3994
- * @param domain - Root domain (e.g., 'traflabs.io')
3995
- */
3996
- async getRecords(domain) {
3997
- const response = await this.request("GET", `/api/dns/v1/zones/${domain}`);
3998
- return response.data || [];
3999
- }
4000
- /**
4001
- * Create or update DNS records
4002
- *
4003
- * @param domain - Root domain (e.g., 'traflabs.io')
4004
- * @param records - Records to create/update
4005
- * @param overwrite - If true, replaces all existing records. If false, merges with existing.
4006
- */
4007
- async upsertRecords(domain, records, overwrite = false) {
4008
- await this.request("PUT", `/api/dns/v1/zones/${domain}`, {
4009
- overwrite,
4010
- zone: records
4143
+ else if (app.entry) dockerfile = generateEntryDockerfile({
4144
+ imageName,
4145
+ baseImage: "node:22-alpine",
4146
+ port: app.port,
4147
+ appPath,
4148
+ entry: app.entry,
4149
+ turboPackage,
4150
+ packageManager,
4151
+ healthCheckPath: "/health"
4011
4152
  });
4012
- }
4013
- /**
4014
- * Validate DNS records before applying
4015
- *
4016
- * @param domain - Root domain (e.g., 'traflabs.io')
4017
- * @param records - Records to validate
4018
- * @returns true if valid, throws if invalid
4019
- */
4020
- async validateRecords(domain, records) {
4021
- await this.request("POST", `/api/dns/v1/zones/${domain}/validate`, {
4022
- overwrite: false,
4023
- zone: records
4153
+ else dockerfile = generateBackendDockerfile({
4154
+ imageName,
4155
+ baseImage: "node:22-alpine",
4156
+ port: app.port,
4157
+ appPath,
4158
+ turboPackage,
4159
+ packageManager,
4160
+ healthCheckPath: "/health"
4161
+ });
4162
+ const dockerfilePath = join(dockerDir, `Dockerfile.${appName}`);
4163
+ await writeFile(dockerfilePath, dockerfile);
4164
+ logger$5.log(` Generated: .gkm/docker/Dockerfile.${appName}`);
4165
+ results.push({
4166
+ appName,
4167
+ type: app.type,
4168
+ dockerfile: dockerfilePath,
4169
+ imageName
4024
4170
  });
4025
- return true;
4026
- }
4027
- /**
4028
- * Delete specific DNS records
4029
- *
4030
- * @param domain - Root domain (e.g., 'traflabs.io')
4031
- * @param filters - Filters to match records for deletion
4032
- */
4033
- async deleteRecords(domain, filters) {
4034
- await this.request("DELETE", `/api/dns/v1/zones/${domain}`, { filters });
4035
- }
4036
- /**
4037
- * Check if a specific record exists
4038
- *
4039
- * @param domain - Root domain (e.g., 'traflabs.io')
4040
- * @param name - Subdomain name (e.g., 'api.joemoer')
4041
- * @param type - Record type (e.g., 'A')
4042
- */
4043
- async recordExists(domain, name$1, type$1 = "A") {
4044
- const records = await this.getRecords(domain);
4045
- return records.some((r) => r.name === name$1 && r.type === type$1);
4046
4171
  }
4047
- /**
4048
- * Create a single A record if it doesn't exist
4049
- *
4050
- * @param domain - Root domain (e.g., 'traflabs.io')
4051
- * @param subdomain - Subdomain name (e.g., 'api.joemoer')
4052
- * @param ip - IP address to point to
4053
- * @param ttl - TTL in seconds (default: 300)
4054
- * @returns true if created, false if already exists
4055
- */
4056
- async createARecordIfNotExists(domain, subdomain, ip, ttl = 300) {
4057
- const exists = await this.recordExists(domain, subdomain, "A");
4058
- if (exists) return false;
4059
- await this.upsertRecords(domain, [{
4060
- name: subdomain,
4061
- type: "A",
4062
- ttl,
4063
- records: [ip]
4064
- }]);
4065
- return true;
4172
+ const dockerignore = generateDockerignore();
4173
+ const dockerignorePath = join(workspace.root, ".dockerignore");
4174
+ await writeFile(dockerignorePath, dockerignore);
4175
+ logger$5.log(`\n Generated: .dockerignore (workspace root)`);
4176
+ const dockerCompose = generateWorkspaceCompose(workspace, { registry: options.registry });
4177
+ const composePath = join(dockerDir, "docker-compose.yml");
4178
+ await writeFile(composePath, dockerCompose);
4179
+ logger$5.log(` Generated: .gkm/docker/docker-compose.yml`);
4180
+ logger$5.log(`\n✅ Generated ${results.length} Dockerfile(s) + docker-compose.yml`);
4181
+ logger$5.log("\n📋 Build commands:");
4182
+ for (const result of results) {
4183
+ const icon = result.type === "backend" ? "⚙️" : "🌐";
4184
+ logger$5.log(` ${icon} docker build -f .gkm/docker/Dockerfile.${result.appName} -t ${result.imageName} .`);
4066
4185
  }
4067
- };
4186
+ logger$5.log("\n📋 Run all services:");
4187
+ logger$5.log(" docker compose -f .gkm/docker/docker-compose.yml up --build");
4188
+ return {
4189
+ apps: results,
4190
+ dockerCompose: composePath,
4191
+ dockerignore: dockerignorePath
4192
+ };
4193
+ }
4068
4194
 
4069
4195
  //#endregion
4070
- //#region src/deploy/dns/index.ts
4071
- const logger$3 = console;
4196
+ //#region src/deploy/docker.ts
4072
4197
  /**
4073
- * Resolve IP address from a hostname
4198
+ * Get app name from package.json in the current working directory
4199
+ * Used for Dokploy app/project naming
4074
4200
  */
4075
- async function resolveHostnameToIp(hostname) {
4201
+ function getAppNameFromCwd$1() {
4202
+ const packageJsonPath = join(process.cwd(), "package.json");
4203
+ if (!existsSync(packageJsonPath)) return void 0;
4076
4204
  try {
4077
- const addresses = await lookup(hostname, { family: 4 });
4078
- return addresses.address;
4079
- } catch (error) {
4080
- throw new Error(`Failed to resolve IP for ${hostname}: ${error instanceof Error ? error.message : "Unknown error"}`);
4081
- }
4205
+ const pkg$1 = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
4206
+ if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
4207
+ } catch {}
4208
+ return void 0;
4082
4209
  }
4083
4210
  /**
4084
- * Extract subdomain from full hostname relative to root domain
4085
- *
4086
- * @example
4087
- * extractSubdomain('api.joemoer.traflabs.io', 'traflabs.io') => 'api.joemoer'
4088
- * extractSubdomain('joemoer.traflabs.io', 'traflabs.io') => 'joemoer'
4211
+ * Get app name from package.json adjacent to the lockfile (project root)
4212
+ * Used for Docker image naming
4089
4213
  */
4090
- function extractSubdomain(hostname, rootDomain) {
4091
- if (!hostname.endsWith(rootDomain)) throw new Error(`Hostname ${hostname} is not under root domain ${rootDomain}`);
4092
- const subdomain = hostname.slice(0, -(rootDomain.length + 1));
4093
- return subdomain || "@";
4214
+ function getAppNameFromPackageJson() {
4215
+ const cwd = process.cwd();
4216
+ const lockfilePath = findLockfilePath(cwd);
4217
+ if (!lockfilePath) return void 0;
4218
+ const projectRoot = dirname(lockfilePath);
4219
+ const packageJsonPath = join(projectRoot, "package.json");
4220
+ if (!existsSync(packageJsonPath)) return void 0;
4221
+ try {
4222
+ const pkg$1 = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
4223
+ if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
4224
+ } catch {}
4225
+ return void 0;
4094
4226
  }
4227
+ const logger$4 = console;
4095
4228
  /**
4096
- * Generate required DNS records for a deployment
4229
+ * Get the full image reference
4097
4230
  */
4098
- function generateRequiredRecords(appHostnames, rootDomain, serverIp) {
4099
- const records = [];
4100
- for (const [appName, hostname] of appHostnames) {
4101
- const subdomain = extractSubdomain(hostname, rootDomain);
4102
- records.push({
4103
- hostname,
4104
- subdomain,
4105
- type: "A",
4106
- value: serverIp,
4107
- appName
4231
+ function getImageRef(registry, imageName, tag) {
4232
+ if (registry) return `${registry}/${imageName}:${tag}`;
4233
+ return `${imageName}:${tag}`;
4234
+ }
4235
+ /**
4236
+ * Build Docker image
4237
+ * @param imageRef - Full image reference (registry/name:tag)
4238
+ * @param appName - Name of the app (used for Dockerfile.{appName} in workspaces)
4239
+ * @param buildArgs - Build arguments to pass to docker build
4240
+ */
4241
+ async function buildImage(imageRef, appName, buildArgs) {
4242
+ logger$4.log(`\n🔨 Building Docker image: ${imageRef}`);
4243
+ const cwd = process.cwd();
4244
+ const lockfilePath = findLockfilePath(cwd);
4245
+ const lockfileDir = lockfilePath ? dirname(lockfilePath) : cwd;
4246
+ const inMonorepo = lockfileDir !== cwd;
4247
+ if (appName || inMonorepo) logger$4.log(" Generating Dockerfile for monorepo (turbo prune)...");
4248
+ else logger$4.log(" Generating Dockerfile...");
4249
+ await dockerCommand({});
4250
+ const dockerfileSuffix = appName ? `.${appName}` : "";
4251
+ const dockerfilePath = `.gkm/docker/Dockerfile${dockerfileSuffix}`;
4252
+ const buildCwd = lockfilePath && (inMonorepo || appName) ? lockfileDir : cwd;
4253
+ if (buildCwd !== cwd) logger$4.log(` Building from workspace root: ${buildCwd}`);
4254
+ const buildArgsString = buildArgs && buildArgs.length > 0 ? buildArgs.map((arg) => `--build-arg "${arg}"`).join(" ") : "";
4255
+ try {
4256
+ const cmd = [
4257
+ "DOCKER_BUILDKIT=1 docker build",
4258
+ "--platform linux/amd64",
4259
+ `-f ${dockerfilePath}`,
4260
+ `-t ${imageRef}`,
4261
+ buildArgsString,
4262
+ "."
4263
+ ].filter(Boolean).join(" ");
4264
+ execSync(cmd, {
4265
+ cwd: buildCwd,
4266
+ stdio: "inherit",
4267
+ env: {
4268
+ ...process.env,
4269
+ DOCKER_BUILDKIT: "1"
4270
+ }
4108
4271
  });
4272
+ logger$4.log(`✅ Image built: ${imageRef}`);
4273
+ } catch (error) {
4274
+ throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
4109
4275
  }
4110
- return records;
4111
4276
  }
4112
4277
  /**
4113
- * Print DNS records table
4278
+ * Push Docker image to registry
4114
4279
  */
4115
- function printDnsRecordsTable(records, rootDomain) {
4116
- logger$3.log("\n 📋 DNS Records for " + rootDomain + ":");
4117
- logger$3.log(" ┌─────────────────────────────────────┬──────┬─────────────────┬────────┐");
4118
- logger$3.log(" │ Subdomain │ Type │ Value │ Status │");
4119
- logger$3.log(" ├─────────────────────────────────────┼──────┼─────────────────┼────────┤");
4120
- for (const record of records) {
4121
- const subdomain = record.subdomain.padEnd(35);
4122
- const type$1 = record.type.padEnd(4);
4123
- const value = record.value.padEnd(15);
4124
- let status;
4125
- if (record.error) status = "✗";
4126
- else if (record.created) status = "✓ new";
4127
- else if (record.existed) status = "✓";
4128
- else status = "?";
4129
- logger$3.log(` │ ${subdomain} │ ${type$1} │ ${value} │ ${status.padEnd(6)} │`);
4280
+ async function pushImage(imageRef) {
4281
+ logger$4.log(`\n☁️ Pushing image: ${imageRef}`);
4282
+ try {
4283
+ execSync(`docker push ${imageRef}`, {
4284
+ cwd: process.cwd(),
4285
+ stdio: "inherit"
4286
+ });
4287
+ logger$4.log(`✅ Image pushed: ${imageRef}`);
4288
+ } catch (error) {
4289
+ throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
4130
4290
  }
4131
- logger$3.log(" └─────────────────────────────────────┴──────┴─────────────────┴────────┘");
4132
4291
  }
4133
4292
  /**
4134
- * Print DNS records in a simple format for manual setup
4293
+ * Deploy using Docker (build and optionally push image)
4135
4294
  */
4136
- function printDnsRecordsSimple(records, rootDomain) {
4137
- logger$3.log("\n 📋 Required DNS Records:");
4138
- logger$3.log(` Add these A records to your DNS provider (${rootDomain}):\n`);
4139
- for (const record of records) logger$3.log(` ${record.subdomain} → ${record.value} (A record)`);
4140
- logger$3.log("");
4295
+ async function deployDocker(options) {
4296
+ const { stage, tag, skipPush, masterKey, config: config$1, buildArgs } = options;
4297
+ const imageName = config$1.imageName;
4298
+ const imageRef = getImageRef(config$1.registry, imageName, tag);
4299
+ await buildImage(imageRef, config$1.appName, buildArgs);
4300
+ if (!skipPush) if (!config$1.registry) logger$4.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
4301
+ else await pushImage(imageRef);
4302
+ logger$4.log("\n✅ Docker deployment ready!");
4303
+ logger$4.log(`\n📋 Deployment details:`);
4304
+ logger$4.log(` Image: ${imageRef}`);
4305
+ logger$4.log(` Stage: ${stage}`);
4306
+ if (masterKey) {
4307
+ logger$4.log(`\n🔐 Deploy with this environment variable:`);
4308
+ logger$4.log(` GKM_MASTER_KEY=${masterKey}`);
4309
+ logger$4.log("\n Example docker run:");
4310
+ logger$4.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
4311
+ }
4312
+ return {
4313
+ imageRef,
4314
+ masterKey
4315
+ };
4141
4316
  }
4142
4317
  /**
4143
- * Prompt for input (reuse from deploy/index.ts pattern)
4318
+ * Resolve Docker deploy config from gkm config
4319
+ * - imageName: from config, or cwd package.json, or 'app' (for Docker image)
4320
+ * - projectName: from root package.json, or 'app' (for Dokploy project)
4321
+ * - appName: from cwd package.json, or projectName (for Dokploy app within project)
4144
4322
  */
4145
- async function promptForToken(message) {
4146
- const { stdin: stdin$1, stdout: stdout$1 } = await import("node:process");
4147
- const readline$1 = await import("node:readline/promises");
4148
- if (!stdin$1.isTTY) throw new Error("Interactive input required for Hostinger token.");
4149
- stdout$1.write(message);
4150
- return new Promise((resolve$1) => {
4151
- let value = "";
4152
- const onData = (char) => {
4153
- const c = char.toString();
4154
- if (c === "\n" || c === "\r") {
4155
- stdin$1.setRawMode(false);
4156
- stdin$1.pause();
4157
- stdin$1.removeListener("data", onData);
4158
- stdout$1.write("\n");
4159
- resolve$1(value);
4160
- } else if (c === "") {
4161
- stdin$1.setRawMode(false);
4162
- stdin$1.pause();
4163
- stdout$1.write("\n");
4164
- process.exit(1);
4165
- } else if (c === "" || c === "\b") {
4166
- if (value.length > 0) value = value.slice(0, -1);
4167
- } else value += c;
4168
- };
4169
- stdin$1.setRawMode(true);
4170
- stdin$1.resume();
4171
- stdin$1.on("data", onData);
4172
- });
4323
+ function resolveDockerConfig(config$1) {
4324
+ const projectName = getAppNameFromPackageJson() ?? "app";
4325
+ const appName = getAppNameFromCwd$1() ?? projectName;
4326
+ const imageName = config$1.docker?.imageName ?? appName;
4327
+ return {
4328
+ registry: config$1.docker?.registry,
4329
+ imageName,
4330
+ projectName,
4331
+ appName
4332
+ };
4173
4333
  }
4334
+
4335
+ //#endregion
4336
+ //#region src/deploy/dokploy.ts
4337
+ const logger$3 = console;
4174
4338
  /**
4175
- * Create DNS records using the configured provider
4339
+ * Get the Dokploy API token from stored credentials or environment
4176
4340
  */
4177
- async function createDnsRecords(records, dnsConfig) {
4178
- const { provider, domain: rootDomain, ttl = 300 } = dnsConfig;
4179
- if (provider === "manual") return records.map((r) => ({
4180
- ...r,
4181
- created: false,
4182
- existed: false
4183
- }));
4184
- if (provider === "hostinger") return createHostingerRecords(records, rootDomain, ttl);
4185
- if (provider === "cloudflare") {
4186
- logger$3.log(" ⚠ Cloudflare DNS integration not yet implemented");
4187
- return records.map((r) => ({
4188
- ...r,
4189
- error: "Cloudflare not implemented"
4190
- }));
4191
- }
4192
- return records;
4341
+ async function getApiToken$1() {
4342
+ const token = await getDokployToken();
4343
+ if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
4344
+ return token;
4193
4345
  }
4194
4346
  /**
4195
- * Create DNS records at Hostinger
4347
+ * Create a Dokploy API client
4196
4348
  */
4197
- async function createHostingerRecords(records, rootDomain, ttl) {
4198
- let token = await getHostingerToken();
4199
- if (!token) {
4200
- logger$3.log("\n 📋 Hostinger API token not found.");
4201
- logger$3.log(" Get your token from: https://hpanel.hostinger.com/profile/api\n");
4202
- try {
4203
- token = await promptForToken(" Hostinger API Token: ");
4204
- await storeHostingerToken(token);
4205
- logger$3.log(" ✓ Token saved");
4206
- } catch {
4207
- logger$3.log(" ⚠ Could not get token, skipping DNS creation");
4208
- return records.map((r) => ({
4209
- ...r,
4210
- error: "No API token"
4211
- }));
4212
- }
4213
- }
4214
- const api = new HostingerApi(token);
4215
- const results = [];
4216
- let existingRecords = [];
4217
- try {
4218
- existingRecords = await api.getRecords(rootDomain);
4219
- } catch (error) {
4220
- const message = error instanceof Error ? error.message : "Unknown error";
4221
- logger$3.log(` ⚠ Failed to fetch existing DNS records: ${message}`);
4222
- return records.map((r) => ({
4223
- ...r,
4224
- error: message
4225
- }));
4226
- }
4227
- for (const record of records) {
4228
- const existing = existingRecords.find((r) => r.name === record.subdomain && r.type === "A");
4229
- if (existing) {
4230
- results.push({
4231
- ...record,
4232
- existed: true,
4233
- created: false
4234
- });
4235
- continue;
4236
- }
4237
- try {
4238
- await api.upsertRecords(rootDomain, [{
4239
- name: record.subdomain,
4240
- type: "A",
4241
- ttl,
4242
- records: [record.value]
4243
- }]);
4244
- results.push({
4245
- ...record,
4246
- created: true,
4247
- existed: false
4248
- });
4249
- } catch (error) {
4250
- const message = error instanceof Error ? error.message : "Unknown error";
4251
- results.push({
4252
- ...record,
4253
- error: message
4254
- });
4255
- }
4256
- }
4257
- return results;
4349
+ async function createApi$1(endpoint) {
4350
+ const token = await getApiToken$1();
4351
+ return new DokployApi({
4352
+ baseUrl: endpoint,
4353
+ token
4354
+ });
4258
4355
  }
4259
4356
  /**
4260
- * Main DNS orchestration function for deployments
4357
+ * Deploy to Dokploy
4261
4358
  */
4262
- async function orchestrateDns(appHostnames, dnsConfig, dokployEndpoint) {
4263
- if (!dnsConfig) return null;
4264
- const { domain: rootDomain, autoCreate = true } = dnsConfig;
4265
- logger$3.log("\n🌐 Setting up DNS records...");
4266
- let serverIp;
4267
- try {
4268
- const endpointUrl = new URL(dokployEndpoint);
4269
- serverIp = await resolveHostnameToIp(endpointUrl.hostname);
4270
- logger$3.log(` Server IP: ${serverIp} (from ${endpointUrl.hostname})`);
4271
- } catch (error) {
4272
- const message = error instanceof Error ? error.message : "Unknown error";
4273
- logger$3.log(` ⚠ Failed to resolve server IP: ${message}`);
4274
- return null;
4359
+ async function deployDokploy(options) {
4360
+ const { stage, imageRef, masterKey, config: config$1 } = options;
4361
+ logger$3.log(`\n🎯 Deploying to Dokploy...`);
4362
+ logger$3.log(` Endpoint: ${config$1.endpoint}`);
4363
+ logger$3.log(` Application: ${config$1.applicationId}`);
4364
+ const api = await createApi$1(config$1.endpoint);
4365
+ logger$3.log(` Configuring Docker image: ${imageRef}`);
4366
+ const registryOptions = {};
4367
+ if (config$1.registryId) {
4368
+ registryOptions.registryId = config$1.registryId;
4369
+ logger$3.log(` Using Dokploy registry: ${config$1.registryId}`);
4370
+ } else {
4371
+ const storedRegistryId = await getDokployRegistryId();
4372
+ if (storedRegistryId) {
4373
+ registryOptions.registryId = storedRegistryId;
4374
+ logger$3.log(` Using stored Dokploy registry: ${storedRegistryId}`);
4375
+ } else if (config$1.registryCredentials) {
4376
+ registryOptions.username = config$1.registryCredentials.username;
4377
+ registryOptions.password = config$1.registryCredentials.password;
4378
+ registryOptions.registryUrl = config$1.registryCredentials.registryUrl;
4379
+ logger$3.log(` Using registry credentials for: ${config$1.registryCredentials.registryUrl}`);
4380
+ } else {
4381
+ const username = process.env.DOCKER_REGISTRY_USERNAME;
4382
+ const password = process.env.DOCKER_REGISTRY_PASSWORD;
4383
+ const registryUrl = process.env.DOCKER_REGISTRY_URL || config$1.registry;
4384
+ if (username && password && registryUrl) {
4385
+ registryOptions.username = username;
4386
+ registryOptions.password = password;
4387
+ registryOptions.registryUrl = registryUrl;
4388
+ logger$3.log(` Using registry credentials from environment`);
4389
+ }
4390
+ }
4275
4391
  }
4276
- const requiredRecords = generateRequiredRecords(appHostnames, rootDomain, serverIp);
4277
- if (requiredRecords.length === 0) {
4278
- logger$3.log(" No DNS records needed");
4279
- return {
4280
- records: [],
4281
- success: true,
4282
- serverIp
4283
- };
4392
+ await api.saveDockerProvider(config$1.applicationId, imageRef, registryOptions);
4393
+ logger$3.log(" ✓ Docker provider configured");
4394
+ const envVars = {};
4395
+ if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
4396
+ if (Object.keys(envVars).length > 0) {
4397
+ logger$3.log(" Updating environment variables...");
4398
+ const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
4399
+ await api.saveApplicationEnv(config$1.applicationId, envString);
4400
+ logger$3.log(" ✓ Environment variables updated");
4284
4401
  }
4285
- let finalRecords;
4286
- if (autoCreate && dnsConfig.provider !== "manual") {
4287
- logger$3.log(` Creating DNS records at ${dnsConfig.provider}...`);
4288
- finalRecords = await createDnsRecords(requiredRecords, dnsConfig);
4289
- const created = finalRecords.filter((r) => r.created).length;
4290
- const existed = finalRecords.filter((r) => r.existed).length;
4291
- const failed = finalRecords.filter((r) => r.error).length;
4292
- if (created > 0) logger$3.log(` Created ${created} DNS record(s)`);
4293
- if (existed > 0) logger$3.log(` ✓ ${existed} record(s) already exist`);
4294
- if (failed > 0) logger$3.log(` ⚠ ${failed} record(s) failed`);
4295
- } else finalRecords = requiredRecords;
4296
- printDnsRecordsTable(finalRecords, rootDomain);
4297
- const hasFailures = finalRecords.some((r) => r.error);
4298
- if (dnsConfig.provider === "manual" || hasFailures) printDnsRecordsSimple(finalRecords.filter((r) => !r.created && !r.existed), rootDomain);
4402
+ logger$3.log(" Triggering deployment...");
4403
+ await api.deployApplication(config$1.applicationId);
4404
+ logger$3.log(" ✓ Deployment triggered");
4405
+ logger$3.log("\n✅ Dokploy deployment initiated!");
4406
+ logger$3.log(`\n📋 Deployment details:`);
4407
+ logger$3.log(` Image: ${imageRef}`);
4408
+ logger$3.log(` Stage: ${stage}`);
4409
+ logger$3.log(` Application ID: ${config$1.applicationId}`);
4410
+ if (masterKey) logger$3.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
4411
+ const deploymentUrl = `${config$1.endpoint}/project/${config$1.projectId}`;
4412
+ logger$3.log(`\n🔗 View deployment: ${deploymentUrl}`);
4299
4413
  return {
4300
- records: finalRecords,
4301
- success: !hasFailures,
4302
- serverIp
4414
+ imageRef,
4415
+ masterKey,
4416
+ url: deploymentUrl
4303
4417
  };
4304
4418
  }
4305
4419
 
@@ -4378,6 +4492,107 @@ function getPublicUrlArgNames(app) {
4378
4492
  return app.dependencies.map((dep) => `NEXT_PUBLIC_${dep.toUpperCase()}_URL`);
4379
4493
  }
4380
4494
 
4495
+ //#endregion
4496
+ //#region src/deploy/env-resolver.ts
4497
+ /**
4498
+ * Generate a secure random secret (64 hex characters = 32 bytes)
4499
+ */
4500
+ function generateSecret() {
4501
+ return randomBytes(32).toString("hex");
4502
+ }
4503
+ /**
4504
+ * Get or generate a secret for an app.
4505
+ * If the secret already exists in state, returns it.
4506
+ * Otherwise generates a new one and stores it.
4507
+ */
4508
+ function getOrGenerateSecret(state, appName, secretName) {
4509
+ const existing = getGeneratedSecret(state, appName, secretName);
4510
+ if (existing) return existing;
4511
+ const generated = generateSecret();
4512
+ setGeneratedSecret(state, appName, secretName, generated);
4513
+ return generated;
4514
+ }
4515
+ /**
4516
+ * Build a DATABASE_URL for an app with per-app credentials
4517
+ */
4518
+ function buildDatabaseUrl(credentials, postgres) {
4519
+ const { dbUser, dbPassword } = credentials;
4520
+ const { host, port, database } = postgres;
4521
+ return `postgresql://${encodeURIComponent(dbUser)}:${encodeURIComponent(dbPassword)}@${host}:${port}/${database}`;
4522
+ }
4523
+ /**
4524
+ * Build a REDIS_URL
4525
+ */
4526
+ function buildRedisUrl(redis) {
4527
+ const { host, port, password } = redis;
4528
+ if (password) return `redis://:${encodeURIComponent(password)}@${host}:${port}`;
4529
+ return `redis://${host}:${port}`;
4530
+ }
4531
+ /**
4532
+ * Resolve a single environment variable
4533
+ */
4534
+ function resolveEnvVar(varName, context) {
4535
+ switch (varName) {
4536
+ case "PORT": return String(context.app.port);
4537
+ case "NODE_ENV": return context.stage === "production" ? "production" : "development";
4538
+ case "DATABASE_URL":
4539
+ if (context.appCredentials && context.postgres) return buildDatabaseUrl(context.appCredentials, context.postgres);
4540
+ break;
4541
+ case "REDIS_URL":
4542
+ if (context.redis) return buildRedisUrl(context.redis);
4543
+ break;
4544
+ case "BETTER_AUTH_URL": return `https://${context.appHostname}`;
4545
+ case "BETTER_AUTH_SECRET": return getOrGenerateSecret(context.state, context.appName, "BETTER_AUTH_SECRET");
4546
+ case "BETTER_AUTH_TRUSTED_ORIGINS":
4547
+ if (context.frontendUrls.length > 0) return context.frontendUrls.join(",");
4548
+ break;
4549
+ case "GKM_MASTER_KEY":
4550
+ if (context.masterKey) return context.masterKey;
4551
+ break;
4552
+ }
4553
+ if (context.userSecrets) {
4554
+ if (context.userSecrets.custom[varName]) return context.userSecrets.custom[varName];
4555
+ if (varName in context.userSecrets.urls) return context.userSecrets.urls[varName];
4556
+ if (varName === "POSTGRES_PASSWORD" && context.userSecrets.services.postgres) return context.userSecrets.services.postgres.password;
4557
+ if (varName === "REDIS_PASSWORD" && context.userSecrets.services.redis) return context.userSecrets.services.redis.password;
4558
+ }
4559
+ return void 0;
4560
+ }
4561
+ /**
4562
+ * Resolve all environment variables for an app
4563
+ */
4564
+ function resolveEnvVars(requiredVars, context) {
4565
+ const resolved = {};
4566
+ const missing = [];
4567
+ for (const varName of requiredVars) {
4568
+ const value = resolveEnvVar(varName, context);
4569
+ if (value !== void 0) resolved[varName] = value;
4570
+ else missing.push(varName);
4571
+ }
4572
+ return {
4573
+ resolved,
4574
+ missing
4575
+ };
4576
+ }
4577
+ /**
4578
+ * Format missing variables error message
4579
+ */
4580
+ function formatMissingVarsError(appName, missing, stage) {
4581
+ const varList = missing.map((v) => ` - ${v}`).join("\n");
4582
+ return `Deployment failed: ${appName} is missing required environment variables:\n${varList}\n\nAdd them with:\n gkm secrets:set <VAR_NAME> <value> --stage ${stage}\n\nOr add them to the app's requiredEnv in gkm.config.ts to have them auto-resolved.`;
4583
+ }
4584
+ /**
4585
+ * Validate that all required environment variables can be resolved
4586
+ */
4587
+ function validateEnvVars(requiredVars, context) {
4588
+ const { resolved, missing } = resolveEnvVars(requiredVars, context);
4589
+ return {
4590
+ valid: missing.length === 0,
4591
+ missing,
4592
+ resolved
4593
+ };
4594
+ }
4595
+
4381
4596
  //#endregion
4382
4597
  //#region src/deploy/init.ts
4383
4598
  const logger$2 = console;
@@ -4650,14 +4865,17 @@ function generateSecretsReport(encryptedApps, sniffedApps) {
4650
4865
 
4651
4866
  //#endregion
4652
4867
  //#region src/deploy/sniffer.ts
4868
+ const __filename = fileURLToPath(import.meta.url);
4869
+ const __dirname = dirname(__filename);
4653
4870
  /**
4654
4871
  * Get required environment variables for an app.
4655
4872
  *
4656
- * Detection strategy:
4657
- * - Frontend apps: Returns empty (no server secrets)
4658
- * - Apps with `requiredEnv`: Uses explicit list from config
4659
- * - Apps with `envParser`: Runs SnifferEnvironmentParser to detect usage
4660
- * - Apps with neither: Returns empty
4873
+ * Detection strategy (in order):
4874
+ * 1. Frontend apps: Returns empty (no server secrets)
4875
+ * 2. Apps with `requiredEnv`: Uses explicit list from config
4876
+ * 3. Entry apps: Imports entry file in subprocess to capture config.parse() calls
4877
+ * 4. Apps with `envParser`: Runs SnifferEnvironmentParser to detect usage
4878
+ * 5. Apps with neither: Returns empty
4661
4879
  *
4662
4880
  * This function handles "fire and forget" async operations gracefully,
4663
4881
  * capturing errors and unhandled rejections without failing the build.
@@ -4678,6 +4896,14 @@ async function sniffAppEnvironment(app, appName, workspacePath, options = {}) {
4678
4896
  appName,
4679
4897
  requiredEnvVars: [...app.requiredEnv]
4680
4898
  };
4899
+ if (app.entry) {
4900
+ const result = await sniffEntryFile(app.entry, app.path, workspacePath);
4901
+ if (logWarnings && result.error) console.warn(`[sniffer] ${appName}: Entry file threw error during sniffing (env vars still captured): ${result.error.message}`);
4902
+ return {
4903
+ appName,
4904
+ requiredEnvVars: result.envVars
4905
+ };
4906
+ }
4681
4907
  if (app.envParser) {
4682
4908
  const result = await sniffEnvParser(app.envParser, app.path, workspacePath);
4683
4909
  if (logWarnings) {
@@ -4695,6 +4921,80 @@ async function sniffAppEnvironment(app, appName, workspacePath, options = {}) {
4695
4921
  };
4696
4922
  }
4697
4923
  /**
4924
+ * Sniff an entry file by importing it in a subprocess.
4925
+ *
4926
+ * Entry apps call `config.parse()` at module load time. To capture which
4927
+ * env vars are accessed, we:
4928
+ * 1. Spawn a subprocess with a module loader hook
4929
+ * 2. The loader intercepts `@geekmidas/envkit` and replaces EnvironmentParser
4930
+ * with SnifferEnvironmentParser
4931
+ * 3. Import the entry file (triggers config.parse())
4932
+ * 4. Capture and return the accessed env var names
4933
+ *
4934
+ * This approach provides process isolation - each app is sniffed in its own
4935
+ * subprocess, preventing module cache pollution.
4936
+ *
4937
+ * @param entryPath - Relative path to the entry file (e.g., './src/index.ts')
4938
+ * @param appPath - The app's path relative to workspace (e.g., 'apps/auth')
4939
+ * @param workspacePath - Absolute path to workspace root
4940
+ * @returns EntrySniffResult with env vars and optional error
4941
+ */
4942
+ async function sniffEntryFile(entryPath, appPath, workspacePath) {
4943
+ const fullEntryPath = resolve(workspacePath, appPath, entryPath);
4944
+ const loaderPath = resolve(__dirname, "sniffer-loader.ts");
4945
+ const workerPath = resolve(__dirname, "sniffer-worker.ts");
4946
+ return new Promise((resolvePromise) => {
4947
+ const child = spawn("node", [
4948
+ "--import",
4949
+ loaderPath,
4950
+ workerPath,
4951
+ fullEntryPath
4952
+ ], {
4953
+ cwd: resolve(workspacePath, appPath),
4954
+ stdio: [
4955
+ "ignore",
4956
+ "pipe",
4957
+ "pipe"
4958
+ ],
4959
+ env: {
4960
+ ...process.env,
4961
+ NODE_OPTIONS: "--import tsx"
4962
+ }
4963
+ });
4964
+ let stdout$1 = "";
4965
+ let stderr = "";
4966
+ child.stdout.on("data", (data) => {
4967
+ stdout$1 += data.toString();
4968
+ });
4969
+ child.stderr.on("data", (data) => {
4970
+ stderr += data.toString();
4971
+ });
4972
+ child.on("close", (code) => {
4973
+ try {
4974
+ const jsonMatch = stdout$1.match(/\{[^{}]*"envVars"[^{}]*\}[^{]*$/);
4975
+ if (jsonMatch) {
4976
+ const result = JSON.parse(jsonMatch[0]);
4977
+ resolvePromise({
4978
+ envVars: result.envVars || [],
4979
+ error: result.error ? new Error(result.error) : void 0
4980
+ });
4981
+ return;
4982
+ }
4983
+ } catch {}
4984
+ resolvePromise({
4985
+ envVars: [],
4986
+ error: new Error(`Failed to sniff entry file (exit code ${code}): ${stderr || stdout$1 || "No output"}`)
4987
+ });
4988
+ });
4989
+ child.on("error", (err) => {
4990
+ resolvePromise({
4991
+ envVars: [],
4992
+ error: err
4993
+ });
4994
+ });
4995
+ });
4996
+ }
4997
+ /**
4698
4998
  * Run the SnifferEnvironmentParser on an envParser module to detect
4699
4999
  * which environment variables it accesses.
4700
5000
  *
@@ -4804,10 +5104,130 @@ async function prompt(message, hidden = false) {
4804
5104
  }
4805
5105
  }
4806
5106
  /**
5107
+ * Wait for Postgres to be ready to accept connections.
5108
+ *
5109
+ * Polls the Postgres server until it accepts a connection or max retries reached.
5110
+ * Used after enabling the external port to ensure the database is accessible
5111
+ * before creating users.
5112
+ *
5113
+ * @param host - The Postgres server hostname
5114
+ * @param port - The external port (typically 5432)
5115
+ * @param user - Master database user (postgres)
5116
+ * @param password - Master database password
5117
+ * @param database - Database name to connect to
5118
+ * @param maxRetries - Maximum number of connection attempts (default: 30)
5119
+ * @param retryIntervalMs - Milliseconds between retries (default: 2000)
5120
+ * @throws Error if Postgres is not ready after maxRetries
5121
+ */
5122
+ async function waitForPostgres(host, port, user, password, database, maxRetries = 30, retryIntervalMs = 2e3) {
5123
+ for (let i = 0; i < maxRetries; i++) try {
5124
+ const client = new Client({
5125
+ host,
5126
+ port,
5127
+ user,
5128
+ password,
5129
+ database
5130
+ });
5131
+ await client.connect();
5132
+ await client.end();
5133
+ return;
5134
+ } catch {
5135
+ if (i < maxRetries - 1) {
5136
+ logger$1.log(` Waiting for Postgres... (${i + 1}/${maxRetries})`);
5137
+ await new Promise((r) => setTimeout(r, retryIntervalMs));
5138
+ }
5139
+ }
5140
+ throw new Error(`Postgres not ready after ${maxRetries} retries`);
5141
+ }
5142
+ /**
5143
+ * Initialize Postgres with per-app users and schemas.
5144
+ *
5145
+ * This function implements the same user/schema isolation pattern used in local
5146
+ * dev mode (see docker/postgres/init.sh). It:
5147
+ *
5148
+ * 1. Temporarily enables the external Postgres port
5149
+ * 2. Connects using master credentials
5150
+ * 3. Creates each user with appropriate schema permissions
5151
+ * 4. Disables the external port for security
5152
+ *
5153
+ * Schema assignment follows this pattern:
5154
+ * - `api` app: Uses `public` schema (shared tables, migrations run here)
5155
+ * - Other apps: Get their own schema with `search_path` configured
5156
+ *
5157
+ * @param api - The Dokploy API client
5158
+ * @param postgres - The provisioned Postgres service details
5159
+ * @param serverHostname - The Dokploy server hostname (for external connection)
5160
+ * @param users - Array of users to create with their schema configuration
5161
+ *
5162
+ * @example
5163
+ * ```ts
5164
+ * await initializePostgresUsers(api, postgres, 'dokploy.example.com', [
5165
+ * { name: 'api', password: 'xxx', usePublicSchema: true },
5166
+ * { name: 'auth', password: 'yyy', usePublicSchema: false },
5167
+ * ]);
5168
+ * ```
5169
+ */
5170
+ async function initializePostgresUsers(api, postgres, serverHostname, users) {
5171
+ logger$1.log("\n🔧 Initializing database users...");
5172
+ const externalPort = 5432;
5173
+ logger$1.log(` Enabling external port ${externalPort}...`);
5174
+ await api.savePostgresExternalPort(postgres.postgresId, externalPort);
5175
+ await api.deployPostgres(postgres.postgresId);
5176
+ logger$1.log(` Waiting for Postgres to be accessible at ${serverHostname}:${externalPort}...`);
5177
+ await waitForPostgres(serverHostname, externalPort, postgres.databaseUser, postgres.databasePassword, postgres.databaseName);
5178
+ const client = new Client({
5179
+ host: serverHostname,
5180
+ port: externalPort,
5181
+ user: postgres.databaseUser,
5182
+ password: postgres.databasePassword,
5183
+ database: postgres.databaseName
5184
+ });
5185
+ try {
5186
+ await client.connect();
5187
+ for (const user of users) {
5188
+ const schemaName = user.usePublicSchema ? "public" : user.name;
5189
+ logger$1.log(` Creating user "${user.name}" with schema "${schemaName}"...`);
5190
+ await client.query(`
5191
+ DO $$ BEGIN
5192
+ CREATE USER "${user.name}" WITH PASSWORD '${user.password}';
5193
+ EXCEPTION WHEN duplicate_object THEN
5194
+ ALTER USER "${user.name}" WITH PASSWORD '${user.password}';
5195
+ END $$;
5196
+ `);
5197
+ if (user.usePublicSchema) await client.query(`
5198
+ GRANT ALL ON SCHEMA public TO "${user.name}";
5199
+ ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO "${user.name}";
5200
+ ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO "${user.name}";
5201
+ `);
5202
+ else await client.query(`
5203
+ CREATE SCHEMA IF NOT EXISTS "${schemaName}" AUTHORIZATION "${user.name}";
5204
+ ALTER USER "${user.name}" SET search_path TO "${schemaName}";
5205
+ GRANT USAGE ON SCHEMA "${schemaName}" TO "${user.name}";
5206
+ GRANT ALL ON ALL TABLES IN SCHEMA "${schemaName}" TO "${user.name}";
5207
+ ALTER DEFAULT PRIVILEGES IN SCHEMA "${schemaName}" GRANT ALL ON TABLES TO "${user.name}";
5208
+ `);
5209
+ logger$1.log(` ✓ User "${user.name}" configured`);
5210
+ }
5211
+ } finally {
5212
+ await client.end();
5213
+ }
5214
+ logger$1.log(" Disabling external port...");
5215
+ await api.savePostgresExternalPort(postgres.postgresId, null);
5216
+ await api.deployPostgres(postgres.postgresId);
5217
+ logger$1.log(" ✓ Database users initialized");
5218
+ }
5219
+ /**
5220
+ * Get the server hostname from the Dokploy endpoint URL
5221
+ */
5222
+ function getServerHostname(endpoint) {
5223
+ const url = new URL(endpoint);
5224
+ return url.hostname;
5225
+ }
5226
+ /**
4807
5227
  * Provision docker compose services in Dokploy
4808
5228
  * @internal Exported for testing
4809
5229
  */
4810
- async function provisionServices(api, projectId, environmentId, appName, services, existingServiceIds) {
5230
+ async function provisionServices(api, projectId, environmentId, projectName, services, existingServiceIds) {
4811
5231
  logger$1.log(`\n🔍 provisionServices called: services=${JSON.stringify(services)}, envId=${environmentId}`);
4812
5232
  if (!services || !environmentId) {
4813
5233
  logger$1.log(" Skipping: no services or no environmentId");
@@ -4828,9 +5248,12 @@ async function provisionServices(api, projectId, environmentId, appName, service
4828
5248
  else logger$1.log(` ⚠ Cached ID invalid, will create new`);
4829
5249
  }
4830
5250
  if (!postgres) {
4831
- const { randomBytes: randomBytes$1 } = await import("node:crypto");
4832
- const databasePassword = randomBytes$1(16).toString("hex");
4833
- const result = await api.findOrCreatePostgres(postgresName, projectId, environmentId, { databasePassword });
5251
+ const databasePassword = randomBytes(16).toString("hex");
5252
+ const databaseName = projectName.replace(/-/g, "_");
5253
+ const result = await api.findOrCreatePostgres(postgresName, projectId, environmentId, {
5254
+ databaseName,
5255
+ databasePassword
5256
+ });
4834
5257
  postgres = result.postgres;
4835
5258
  created = result.created;
4836
5259
  if (created) {
@@ -4898,12 +5321,6 @@ async function provisionServices(api, projectId, environmentId, appName, service
4898
5321
  */
4899
5322
  async function ensureDokploySetup(config$1, dockerConfig, stage, services) {
4900
5323
  logger$1.log("\n🔧 Checking Dokploy setup...");
4901
- const { readStageSecrets: readStageSecrets$1 } = await import("./storage-DNj_I11J.mjs");
4902
- const existingSecrets = await readStageSecrets$1(stage);
4903
- const existingUrls = {
4904
- DATABASE_URL: existingSecrets?.urls?.DATABASE_URL,
4905
- REDIS_URL: existingSecrets?.urls?.REDIS_URL
4906
- };
4907
5324
  let creds = await getDokployCredentials();
4908
5325
  if (!creds) {
4909
5326
  logger$1.log("\n📋 Dokploy credentials not found. Let's set them up.");
@@ -5229,6 +5646,8 @@ async function workspaceDeployCommand(workspace, options) {
5229
5646
  postgres: services.db !== void 0 && services.db !== false,
5230
5647
  redis: services.cache !== void 0 && services.cache !== false
5231
5648
  };
5649
+ let provisionedPostgres = null;
5650
+ let provisionedRedis = null;
5232
5651
  if (dockerServices.postgres || dockerServices.redis) {
5233
5652
  logger$1.log("\n🔧 Provisioning infrastructure services...");
5234
5653
  const existingServiceIds = {
@@ -5237,17 +5656,64 @@ async function workspaceDeployCommand(workspace, options) {
5237
5656
  };
5238
5657
  const provisionResult = await provisionServices(api, project.projectId, environmentId, workspace.name, dockerServices, existingServiceIds);
5239
5658
  if (provisionResult?.serviceIds) {
5240
- if (provisionResult.serviceIds.postgresId) setPostgresId(state, provisionResult.serviceIds.postgresId);
5241
- if (provisionResult.serviceIds.redisId) setRedisId(state, provisionResult.serviceIds.redisId);
5659
+ if (provisionResult.serviceIds.postgresId) {
5660
+ setPostgresId(state, provisionResult.serviceIds.postgresId);
5661
+ provisionedPostgres = await api.getPostgres(provisionResult.serviceIds.postgresId);
5662
+ }
5663
+ if (provisionResult.serviceIds.redisId) {
5664
+ setRedisId(state, provisionResult.serviceIds.redisId);
5665
+ provisionedRedis = await api.getRedis(provisionResult.serviceIds.redisId);
5666
+ }
5242
5667
  }
5243
5668
  }
5244
5669
  const backendApps = appsToDeployNames.filter((name$1) => workspace.apps[name$1].type === "backend");
5245
5670
  const frontendApps = appsToDeployNames.filter((name$1) => workspace.apps[name$1].type === "frontend");
5671
+ const perAppDbCredentials = /* @__PURE__ */ new Map();
5672
+ if (provisionedPostgres && backendApps.length > 0) {
5673
+ const appsNeedingDb = backendApps.filter((appName) => {
5674
+ const requirements = sniffedApps.get(appName);
5675
+ return requirements?.requiredEnvVars.includes("DATABASE_URL");
5676
+ });
5677
+ if (appsNeedingDb.length > 0) {
5678
+ logger$1.log(`\n🔐 Setting up per-app database credentials...`);
5679
+ logger$1.log(` Apps needing DATABASE_URL: ${appsNeedingDb.join(", ")}`);
5680
+ const existingCredentials = getAllAppCredentials(state);
5681
+ const usersToCreate = [];
5682
+ for (const appName of appsNeedingDb) {
5683
+ let credentials = existingCredentials[appName];
5684
+ if (credentials) logger$1.log(` ${appName}: Using existing credentials from state`);
5685
+ else {
5686
+ const password = randomBytes(16).toString("hex");
5687
+ credentials = {
5688
+ dbUser: appName,
5689
+ dbPassword: password
5690
+ };
5691
+ setAppCredentials(state, appName, credentials);
5692
+ logger$1.log(` ${appName}: Generated new credentials`);
5693
+ }
5694
+ perAppDbCredentials.set(appName, credentials);
5695
+ usersToCreate.push({
5696
+ name: appName,
5697
+ password: credentials.dbPassword,
5698
+ usePublicSchema: appName === "api"
5699
+ });
5700
+ }
5701
+ const serverHostname = getServerHostname(creds.endpoint);
5702
+ await initializePostgresUsers(api, provisionedPostgres, serverHostname, usersToCreate);
5703
+ }
5704
+ }
5246
5705
  const publicUrls = {};
5247
5706
  const results = [];
5248
5707
  const dokployConfig = workspace.deploy.dokploy;
5249
5708
  const appHostnames = /* @__PURE__ */ new Map();
5250
5709
  const appDomainIds = /* @__PURE__ */ new Map();
5710
+ const frontendUrls = [];
5711
+ for (const appName of frontendApps) {
5712
+ const app = workspace.apps[appName];
5713
+ const isMainFrontend = isMainFrontendApp(appName, app, workspace.apps);
5714
+ const hostname = resolveHost(appName, app, stage, dokployConfig, isMainFrontend);
5715
+ frontendUrls.push(`https://${hostname}`);
5716
+ }
5251
5717
  if (backendApps.length > 0) {
5252
5718
  logger$1.log("\n📦 PHASE 1: Deploying backend applications...");
5253
5719
  for (const appName of backendApps) {
@@ -5291,14 +5757,46 @@ async function workspaceDeployCommand(workspace, options) {
5291
5757
  },
5292
5758
  buildArgs
5293
5759
  });
5294
- const envVars = [`NODE_ENV=production`, `PORT=${app.port}`];
5295
- if (appSecrets && appSecrets.masterKey) envVars.push(`GKM_MASTER_KEY=${appSecrets.masterKey}`);
5760
+ const backendHost = resolveHost(appName, app, stage, dokployConfig, false);
5761
+ const envContext = {
5762
+ app,
5763
+ appName,
5764
+ stage,
5765
+ state,
5766
+ appCredentials: perAppDbCredentials.get(appName),
5767
+ postgres: provisionedPostgres ? {
5768
+ host: provisionedPostgres.appName,
5769
+ port: 5432,
5770
+ database: provisionedPostgres.databaseName
5771
+ } : void 0,
5772
+ redis: provisionedRedis ? {
5773
+ host: provisionedRedis.appName,
5774
+ port: 6379,
5775
+ password: provisionedRedis.databasePassword
5776
+ } : void 0,
5777
+ appHostname: backendHost,
5778
+ frontendUrls,
5779
+ userSecrets: stageSecrets ?? void 0,
5780
+ masterKey: appSecrets?.masterKey
5781
+ };
5782
+ const appRequirements = sniffedApps.get(appName);
5783
+ const requiredVars = appRequirements?.requiredEnvVars ?? [];
5784
+ const { valid, missing, resolved } = validateEnvVars(requiredVars, envContext);
5785
+ if (!valid) throw new Error(formatMissingVarsError(appName, missing, stage));
5786
+ const envVars = Object.entries(resolved).map(([key, value]) => `${key}=${value}`);
5787
+ if (Object.keys(resolved).length > 0) logger$1.log(` Resolved ${Object.keys(resolved).length} env vars: ${Object.keys(resolved).join(", ")}`);
5296
5788
  await api.saveDockerProvider(application.applicationId, imageRef, { registryId });
5297
5789
  await api.saveApplicationEnv(application.applicationId, envVars.join("\n"));
5298
5790
  logger$1.log(` Deploying to Dokploy...`);
5299
5791
  await api.deployApplication(application.applicationId);
5300
- const backendHost = resolveHost(appName, app, stage, dokployConfig, false);
5301
- try {
5792
+ const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
5793
+ const existingDomain = existingDomains.find((d) => d.host === backendHost);
5794
+ if (existingDomain) {
5795
+ appHostnames.set(appName, backendHost);
5796
+ appDomainIds.set(appName, existingDomain.domainId);
5797
+ publicUrls[appName] = `https://${backendHost}`;
5798
+ logger$1.log(` ✓ Domain: https://${backendHost} (existing)`);
5799
+ } else try {
5302
5800
  const domain = await api.createDomain({
5303
5801
  host: backendHost,
5304
5802
  port: app.port,
@@ -5308,18 +5806,13 @@ async function workspaceDeployCommand(workspace, options) {
5308
5806
  });
5309
5807
  appHostnames.set(appName, backendHost);
5310
5808
  appDomainIds.set(appName, domain.domainId);
5311
- const publicUrl = `https://${backendHost}`;
5312
- publicUrls[appName] = publicUrl;
5313
- logger$1.log(` ✓ Domain: ${publicUrl}`);
5809
+ publicUrls[appName] = `https://${backendHost}`;
5810
+ logger$1.log(` ✓ Domain: https://${backendHost} (created)`);
5314
5811
  } catch (domainError) {
5812
+ const message = domainError instanceof Error ? domainError.message : "Unknown error";
5813
+ logger$1.log(` ⚠ Domain creation failed: ${message}`);
5315
5814
  appHostnames.set(appName, backendHost);
5316
- try {
5317
- const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
5318
- const matchingDomain = existingDomains.find((d) => d.host === backendHost);
5319
- if (matchingDomain) appDomainIds.set(appName, matchingDomain.domainId);
5320
- } catch {}
5321
5815
  publicUrls[appName] = `https://${backendHost}`;
5322
- logger$1.log(` ℹ Domain already configured: https://${backendHost}`);
5323
5816
  }
5324
5817
  results.push({
5325
5818
  appName,
@@ -5388,7 +5881,14 @@ async function workspaceDeployCommand(workspace, options) {
5388
5881
  await api.deployApplication(application.applicationId);
5389
5882
  const isMainFrontend = isMainFrontendApp(appName, app, workspace.apps);
5390
5883
  const frontendHost = resolveHost(appName, app, stage, dokployConfig, isMainFrontend);
5391
- try {
5884
+ const existingFrontendDomains = await api.getDomainsByApplicationId(application.applicationId);
5885
+ const existingFrontendDomain = existingFrontendDomains.find((d) => d.host === frontendHost);
5886
+ if (existingFrontendDomain) {
5887
+ appHostnames.set(appName, frontendHost);
5888
+ appDomainIds.set(appName, existingFrontendDomain.domainId);
5889
+ publicUrls[appName] = `https://${frontendHost}`;
5890
+ logger$1.log(` ✓ Domain: https://${frontendHost} (existing)`);
5891
+ } else try {
5392
5892
  const domain = await api.createDomain({
5393
5893
  host: frontendHost,
5394
5894
  port: app.port,
@@ -5398,18 +5898,13 @@ async function workspaceDeployCommand(workspace, options) {
5398
5898
  });
5399
5899
  appHostnames.set(appName, frontendHost);
5400
5900
  appDomainIds.set(appName, domain.domainId);
5401
- const publicUrl = `https://${frontendHost}`;
5402
- publicUrls[appName] = publicUrl;
5403
- logger$1.log(` ✓ Domain: ${publicUrl}`);
5901
+ publicUrls[appName] = `https://${frontendHost}`;
5902
+ logger$1.log(` ✓ Domain: https://${frontendHost} (created)`);
5404
5903
  } catch (domainError) {
5904
+ const message = domainError instanceof Error ? domainError.message : "Unknown error";
5905
+ logger$1.log(` ⚠ Domain creation failed: ${message}`);
5405
5906
  appHostnames.set(appName, frontendHost);
5406
- try {
5407
- const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
5408
- const matchingDomain = existingDomains.find((d) => d.host === frontendHost);
5409
- if (matchingDomain) appDomainIds.set(appName, matchingDomain.domainId);
5410
- } catch {}
5411
5907
  publicUrls[appName] = `https://${frontendHost}`;
5412
- logger$1.log(` ℹ Domain already configured: https://${frontendHost}`);
5413
5908
  }
5414
5909
  results.push({
5415
5910
  appName,
@@ -5437,14 +5932,19 @@ async function workspaceDeployCommand(workspace, options) {
5437
5932
  const dnsConfig = workspace.deploy.dns;
5438
5933
  if (dnsConfig && appHostnames.size > 0) {
5439
5934
  const dnsResult = await orchestrateDns(appHostnames, dnsConfig, creds.endpoint);
5440
- if (dnsResult?.success && appDomainIds.size > 0) {
5935
+ if (dnsResult?.serverIp && appHostnames.size > 0) {
5936
+ await verifyDnsRecords(appHostnames, dnsResult.serverIp, state);
5937
+ await writeStageState(workspace.root, stage, state);
5938
+ }
5939
+ if (dnsResult?.success && appHostnames.size > 0) {
5441
5940
  logger$1.log("\n🔒 Validating domains for SSL certificates...");
5442
- for (const [appName, domainId] of appDomainIds) try {
5443
- await api.validateDomain(domainId);
5444
- logger$1.log(` ✓ ${appName}: SSL validation triggered`);
5941
+ for (const [appName, hostname] of appHostnames) try {
5942
+ const result = await api.validateDomain(hostname);
5943
+ if (result.isValid) logger$1.log(` ✓ ${appName}: ${hostname} ${result.resolvedIp}`);
5944
+ else logger$1.log(` ⚠ ${appName}: ${hostname} not valid`);
5445
5945
  } catch (validationError) {
5446
5946
  const message = validationError instanceof Error ? validationError.message : "Unknown error";
5447
- logger$1.log(` ⚠ ${appName}: SSL validation failed - ${message}`);
5947
+ logger$1.log(` ⚠ ${appName}: validation failed - ${message}`);
5448
5948
  }
5449
5949
  }
5450
5950
  }
@@ -5736,10 +6236,10 @@ const GEEKMIDAS_VERSIONS = {
5736
6236
  "@geekmidas/cli": CLI_VERSION,
5737
6237
  "@geekmidas/client": "~0.5.0",
5738
6238
  "@geekmidas/cloud": "~0.2.0",
5739
- "@geekmidas/constructs": "~0.7.0",
6239
+ "@geekmidas/constructs": "~0.8.0",
5740
6240
  "@geekmidas/db": "~0.3.0",
5741
6241
  "@geekmidas/emailkit": "~0.2.0",
5742
- "@geekmidas/envkit": "~0.6.0",
6242
+ "@geekmidas/envkit": "~0.7.0",
5743
6243
  "@geekmidas/errors": "~0.1.0",
5744
6244
  "@geekmidas/events": "~0.2.0",
5745
6245
  "@geekmidas/logger": "~0.4.0",
@@ -5748,7 +6248,7 @@ const GEEKMIDAS_VERSIONS = {
5748
6248
  "@geekmidas/services": "~0.2.0",
5749
6249
  "@geekmidas/storage": "~0.1.0",
5750
6250
  "@geekmidas/studio": "~0.4.0",
5751
- "@geekmidas/telescope": "~0.5.0",
6251
+ "@geekmidas/telescope": "~0.6.0",
5752
6252
  "@geekmidas/testkit": "~0.6.0"
5753
6253
  };
5754
6254