@geekmidas/cli 0.48.0 → 0.49.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/dist/{dokploy-api-DvzIDxTj.mjs → dokploy-api-94KzmTVf.mjs} +4 -4
  2. package/dist/dokploy-api-94KzmTVf.mjs.map +1 -0
  3. package/dist/dokploy-api-CItuaWTq.mjs +3 -0
  4. package/dist/dokploy-api-DBNE8MDt.cjs +3 -0
  5. package/dist/{dokploy-api-BDLu0qWi.cjs → dokploy-api-YD8WCQfW.cjs} +4 -4
  6. package/dist/dokploy-api-YD8WCQfW.cjs.map +1 -0
  7. package/dist/index.cjs +2392 -1888
  8. package/dist/index.cjs.map +1 -1
  9. package/dist/index.mjs +2389 -1885
  10. package/dist/index.mjs.map +1 -1
  11. package/package.json +6 -4
  12. package/src/build/__tests__/handler-templates.spec.ts +947 -0
  13. package/src/deploy/__tests__/__fixtures__/entry-apps/async-entry.ts +24 -0
  14. package/src/deploy/__tests__/__fixtures__/entry-apps/nested-config-entry.ts +24 -0
  15. package/src/deploy/__tests__/__fixtures__/entry-apps/no-env-entry.ts +12 -0
  16. package/src/deploy/__tests__/__fixtures__/entry-apps/simple-entry.ts +14 -0
  17. package/src/deploy/__tests__/__fixtures__/entry-apps/throwing-entry.ts +16 -0
  18. package/src/deploy/__tests__/__fixtures__/env-parsers/non-function-export.ts +10 -0
  19. package/src/deploy/__tests__/__fixtures__/env-parsers/parseable-env-parser.ts +18 -0
  20. package/src/deploy/__tests__/__fixtures__/env-parsers/throwing-env-parser.ts +18 -0
  21. package/src/deploy/__tests__/__fixtures__/env-parsers/valid-env-parser.ts +16 -0
  22. package/src/deploy/__tests__/dns-verification.spec.ts +229 -0
  23. package/src/deploy/__tests__/dokploy-api.spec.ts +2 -3
  24. package/src/deploy/__tests__/domain.spec.ts +7 -3
  25. package/src/deploy/__tests__/env-resolver.spec.ts +469 -0
  26. package/src/deploy/__tests__/index.spec.ts +12 -12
  27. package/src/deploy/__tests__/secrets.spec.ts +4 -1
  28. package/src/deploy/__tests__/sniffer.spec.ts +326 -1
  29. package/src/deploy/__tests__/state.spec.ts +844 -0
  30. package/src/deploy/dns/hostinger-api.ts +4 -1
  31. package/src/deploy/dns/index.ts +113 -1
  32. package/src/deploy/docker.ts +1 -2
  33. package/src/deploy/dokploy-api.ts +18 -9
  34. package/src/deploy/domain.ts +5 -4
  35. package/src/deploy/env-resolver.ts +278 -0
  36. package/src/deploy/index.ts +525 -119
  37. package/src/deploy/secrets.ts +7 -2
  38. package/src/deploy/sniffer-envkit-patch.ts +43 -0
  39. package/src/deploy/sniffer-hooks.ts +52 -0
  40. package/src/deploy/sniffer-loader.ts +23 -0
  41. package/src/deploy/sniffer-worker.ts +74 -0
  42. package/src/deploy/sniffer.ts +136 -14
  43. package/src/deploy/state.ts +162 -1
  44. package/src/init/versions.ts +3 -3
  45. package/tsconfig.tsbuildinfo +1 -1
  46. package/dist/dokploy-api-BDLu0qWi.cjs.map +0 -1
  47. package/dist/dokploy-api-BN3V57z1.mjs +0 -3
  48. package/dist/dokploy-api-BdCKjFDA.cjs +0 -3
  49. package/dist/dokploy-api-DvzIDxTj.mjs.map +0 -1
package/dist/index.cjs CHANGED
@@ -4,7 +4,7 @@ const require_workspace = require('./workspace-CaVW6j2q.cjs');
4
4
  const require_config = require('./config-HYiM3iQJ.cjs');
5
5
  const require_openapi = require('./openapi-D7WwlpPF.cjs');
6
6
  const require_storage = require('./storage-BPRgh3DU.cjs');
7
- const require_dokploy_api = require('./dokploy-api-BDLu0qWi.cjs');
7
+ const require_dokploy_api = require('./dokploy-api-YD8WCQfW.cjs');
8
8
  const require_encryption = require('./encryption-DaCB_NmS.cjs');
9
9
  const require_openapi_react_query = require('./openapi-react-query-C_MxpBgF.cjs');
10
10
  const node_fs = require_chunk.__toESM(require("node:fs"));
@@ -23,6 +23,7 @@ const __geekmidas_constructs_crons = require_chunk.__toESM(require("@geekmidas/c
23
23
  const __geekmidas_constructs_functions = require_chunk.__toESM(require("@geekmidas/constructs/functions"));
24
24
  const __geekmidas_constructs_subscribers = require_chunk.__toESM(require("@geekmidas/constructs/subscribers"));
25
25
  const node_crypto = require_chunk.__toESM(require("node:crypto"));
26
+ const pg = require_chunk.__toESM(require("pg"));
26
27
  const node_dns_promises = require_chunk.__toESM(require("node:dns/promises"));
27
28
  const node_url = require_chunk.__toESM(require("node:url"));
28
29
  const prompts = require_chunk.__toESM(require("prompts"));
@@ -30,7 +31,7 @@ const node_module = require_chunk.__toESM(require("node:module"));
30
31
 
31
32
  //#region package.json
32
33
  var name = "@geekmidas/cli";
33
- var version = "0.47.0";
34
+ var version = "0.49.0";
34
35
  var description = "CLI tools for building Lambda handlers, server applications, and generating OpenAPI specs";
35
36
  var private$1 = false;
36
37
  var type = "module";
@@ -86,12 +87,14 @@ var dependencies = {
86
87
  "hono": "~4.8.0",
87
88
  "lodash.kebabcase": "^4.1.1",
88
89
  "openapi-typescript": "^7.4.2",
90
+ "pg": "~8.17.1",
89
91
  "prompts": "~2.4.2"
90
92
  };
91
93
  var devDependencies = {
92
94
  "@geekmidas/testkit": "workspace:*",
93
95
  "@types/lodash.kebabcase": "^4.1.9",
94
96
  "@types/node": "~24.9.1",
97
+ "@types/pg": "~8.16.0",
95
98
  "@types/prompts": "~2.4.9",
96
99
  "typescript": "^5.8.2",
97
100
  "vitest": "^3.2.4",
@@ -254,7 +257,7 @@ const logger$11 = console;
254
257
  * Validate Dokploy token by making a test API call
255
258
  */
256
259
  async function validateDokployToken(endpoint, token) {
257
- const { DokployApi: DokployApi$1 } = await Promise.resolve().then(() => require("./dokploy-api-BdCKjFDA.cjs"));
260
+ const { DokployApi: DokployApi$1 } = await Promise.resolve().then(() => require("./dokploy-api-DBNE8MDt.cjs"));
258
261
  const api = new DokployApi$1({
259
262
  baseUrl: endpoint,
260
263
  token
@@ -1867,9 +1870,9 @@ var DevServer = class {
1867
1870
  }
1868
1871
  async createServerEntry() {
1869
1872
  const { writeFile: fsWriteFile } = await import("node:fs/promises");
1870
- const { relative: relative$6, dirname: dirname$8 } = await import("node:path");
1873
+ const { relative: relative$6, dirname: dirname$9 } = await import("node:path");
1871
1874
  const serverPath = (0, node_path.join)(this.appRoot, ".gkm", this.provider, "server.ts");
1872
- const relativeAppPath = relative$6(dirname$8(serverPath), (0, node_path.join)(dirname$8(serverPath), "app.js"));
1875
+ const relativeAppPath = relative$6(dirname$9(serverPath), (0, node_path.join)(dirname$9(serverPath), "app.js"));
1873
1876
  const credentialsInjection = this.secretsJsonPath ? `import { Credentials } from '@geekmidas/envkit/credentials';
1874
1877
  import { existsSync, readFileSync } from 'node:fs';
1875
1878
 
@@ -2275,2027 +2278,2143 @@ function getAppOutputPath(workspace, _appName, app) {
2275
2278
  }
2276
2279
 
2277
2280
  //#endregion
2278
- //#region src/docker/compose.ts
2279
- /** Default Docker images for services */
2280
- const DEFAULT_SERVICE_IMAGES = {
2281
- postgres: "postgres",
2282
- redis: "redis",
2283
- rabbitmq: "rabbitmq"
2284
- };
2285
- /** Default Docker image versions for services */
2286
- const DEFAULT_SERVICE_VERSIONS = {
2287
- postgres: "16-alpine",
2288
- redis: "7-alpine",
2289
- rabbitmq: "3-management-alpine"
2290
- };
2291
- /** Get the default full image reference for a service */
2292
- function getDefaultImage(serviceName) {
2293
- return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
2281
+ //#region src/deploy/state.ts
2282
+ /**
2283
+ * Get the state file path for a stage
2284
+ */
2285
+ function getStateFilePath(workspaceRoot, stage) {
2286
+ return (0, node_path.join)(workspaceRoot, ".gkm", `deploy-${stage}.json`);
2294
2287
  }
2295
- /** Normalize services config to a consistent format - returns Map of service name to full image reference */
2296
- function normalizeServices(services) {
2297
- const result = /* @__PURE__ */ new Map();
2298
- if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
2299
- else for (const [name$1, config] of Object.entries(services)) {
2300
- const serviceName = name$1;
2301
- if (config === true) result.set(serviceName, getDefaultImage(serviceName));
2302
- else if (config && typeof config === "object") {
2303
- const serviceConfig = config;
2304
- if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
2305
- else {
2306
- const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
2307
- result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
2308
- }
2309
- }
2288
+ /**
2289
+ * Read the deploy state for a stage
2290
+ * Returns null if state file doesn't exist
2291
+ */
2292
+ async function readStageState(workspaceRoot, stage) {
2293
+ const filePath = getStateFilePath(workspaceRoot, stage);
2294
+ try {
2295
+ const content = await (0, node_fs_promises.readFile)(filePath, "utf-8");
2296
+ return JSON.parse(content);
2297
+ } catch (error) {
2298
+ if (error.code === "ENOENT") return null;
2299
+ console.warn(`Warning: Could not read deploy state: ${error}`);
2300
+ return null;
2310
2301
  }
2311
- return result;
2312
2302
  }
2313
2303
  /**
2314
- * Generate docker-compose.yml for production deployment
2304
+ * Write the deploy state for a stage
2315
2305
  */
2316
- function generateDockerCompose(options) {
2317
- const { imageName, registry, port, healthCheckPath, services } = options;
2318
- const serviceMap = normalizeServices(services);
2319
- const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
2320
- let yaml = `version: '3.8'
2321
-
2322
- services:
2323
- api:
2324
- build:
2325
- context: ../..
2326
- dockerfile: .gkm/docker/Dockerfile
2327
- image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
2328
- container_name: ${imageName}
2329
- restart: unless-stopped
2330
- ports:
2331
- - "\${PORT:-${port}}:${port}"
2332
- environment:
2333
- - NODE_ENV=production
2334
- `;
2335
- if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
2336
- `;
2337
- if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
2338
- `;
2339
- if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
2340
- `;
2341
- yaml += ` healthcheck:
2342
- test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
2343
- interval: 30s
2344
- timeout: 3s
2345
- retries: 3
2346
- `;
2347
- if (serviceMap.size > 0) {
2348
- yaml += ` depends_on:
2349
- `;
2350
- for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
2351
- condition: service_healthy
2352
- `;
2353
- }
2354
- yaml += ` networks:
2355
- - app-network
2356
- `;
2357
- const postgresImage = serviceMap.get("postgres");
2358
- if (postgresImage) yaml += `
2359
- postgres:
2360
- image: ${postgresImage}
2361
- container_name: postgres
2362
- restart: unless-stopped
2363
- environment:
2364
- POSTGRES_USER: \${POSTGRES_USER:-postgres}
2365
- POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
2366
- POSTGRES_DB: \${POSTGRES_DB:-app}
2367
- volumes:
2368
- - postgres_data:/var/lib/postgresql/data
2369
- healthcheck:
2370
- test: ["CMD-SHELL", "pg_isready -U postgres"]
2371
- interval: 5s
2372
- timeout: 5s
2373
- retries: 5
2374
- networks:
2375
- - app-network
2376
- `;
2377
- const redisImage = serviceMap.get("redis");
2378
- if (redisImage) yaml += `
2379
- redis:
2380
- image: ${redisImage}
2381
- container_name: redis
2382
- restart: unless-stopped
2383
- volumes:
2384
- - redis_data:/data
2385
- healthcheck:
2386
- test: ["CMD", "redis-cli", "ping"]
2387
- interval: 5s
2388
- timeout: 5s
2389
- retries: 5
2390
- networks:
2391
- - app-network
2392
- `;
2393
- const rabbitmqImage = serviceMap.get("rabbitmq");
2394
- if (rabbitmqImage) yaml += `
2395
- rabbitmq:
2396
- image: ${rabbitmqImage}
2397
- container_name: rabbitmq
2398
- restart: unless-stopped
2399
- environment:
2400
- RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
2401
- RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
2402
- ports:
2403
- - "15672:15672" # Management UI
2404
- volumes:
2405
- - rabbitmq_data:/var/lib/rabbitmq
2406
- healthcheck:
2407
- test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
2408
- interval: 10s
2409
- timeout: 5s
2410
- retries: 5
2411
- networks:
2412
- - app-network
2413
- `;
2414
- yaml += `
2415
- volumes:
2416
- `;
2417
- if (serviceMap.has("postgres")) yaml += ` postgres_data:
2418
- `;
2419
- if (serviceMap.has("redis")) yaml += ` redis_data:
2420
- `;
2421
- if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
2422
- `;
2423
- yaml += `
2424
- networks:
2425
- app-network:
2426
- driver: bridge
2427
- `;
2428
- return yaml;
2306
+ async function writeStageState(workspaceRoot, stage, state) {
2307
+ const filePath = getStateFilePath(workspaceRoot, stage);
2308
+ const dir = (0, node_path.join)(workspaceRoot, ".gkm");
2309
+ await (0, node_fs_promises.mkdir)(dir, { recursive: true });
2310
+ state.lastDeployedAt = (/* @__PURE__ */ new Date()).toISOString();
2311
+ await (0, node_fs_promises.writeFile)(filePath, JSON.stringify(state, null, 2));
2429
2312
  }
2430
2313
  /**
2431
- * Generate a minimal docker-compose.yml for API only
2314
+ * Create a new empty state for a stage
2432
2315
  */
2433
- function generateMinimalDockerCompose(options) {
2434
- const { imageName, registry, port, healthCheckPath } = options;
2435
- const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
2436
- return `version: '3.8'
2437
-
2438
- services:
2439
- api:
2440
- build:
2441
- context: ../..
2442
- dockerfile: .gkm/docker/Dockerfile
2443
- image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
2444
- container_name: ${imageName}
2445
- restart: unless-stopped
2446
- ports:
2447
- - "\${PORT:-${port}}:${port}"
2448
- environment:
2449
- - NODE_ENV=production
2450
- healthcheck:
2451
- test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
2452
- interval: 30s
2453
- timeout: 3s
2454
- retries: 3
2455
- networks:
2456
- - app-network
2457
-
2458
- networks:
2459
- app-network:
2460
- driver: bridge
2461
- `;
2316
+ function createEmptyState(stage, environmentId) {
2317
+ return {
2318
+ provider: "dokploy",
2319
+ stage,
2320
+ environmentId,
2321
+ applications: {},
2322
+ services: {},
2323
+ lastDeployedAt: (/* @__PURE__ */ new Date()).toISOString()
2324
+ };
2462
2325
  }
2463
2326
  /**
2464
- * Generate docker-compose.yml for a workspace with all apps as services.
2465
- * Apps can communicate with each other via service names.
2466
- * @internal Exported for testing
2327
+ * Get application ID from state
2467
2328
  */
2468
- function generateWorkspaceCompose(workspace, options = {}) {
2469
- const { registry } = options;
2470
- const apps = Object.entries(workspace.apps);
2471
- const services = workspace.services;
2472
- const hasPostgres = services.db !== void 0 && services.db !== false;
2473
- const hasRedis = services.cache !== void 0 && services.cache !== false;
2474
- const hasMail = services.mail !== void 0 && services.mail !== false;
2475
- const postgresImage = getInfraServiceImage("postgres", services.db);
2476
- const redisImage = getInfraServiceImage("redis", services.cache);
2477
- let yaml = `# Docker Compose for ${workspace.name} workspace
2478
- # Generated by gkm - do not edit manually
2479
-
2480
- services:
2481
- `;
2482
- for (const [appName, app] of apps) yaml += generateAppService(appName, app, apps, {
2483
- registry,
2484
- hasPostgres,
2485
- hasRedis
2486
- });
2487
- if (hasPostgres) yaml += `
2488
- postgres:
2489
- image: ${postgresImage}
2490
- container_name: ${workspace.name}-postgres
2491
- restart: unless-stopped
2492
- environment:
2493
- POSTGRES_USER: \${POSTGRES_USER:-postgres}
2494
- POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
2495
- POSTGRES_DB: \${POSTGRES_DB:-app}
2496
- volumes:
2497
- - postgres_data:/var/lib/postgresql/data
2498
- healthcheck:
2499
- test: ["CMD-SHELL", "pg_isready -U postgres"]
2500
- interval: 5s
2501
- timeout: 5s
2502
- retries: 5
2503
- networks:
2504
- - workspace-network
2505
- `;
2506
- if (hasRedis) yaml += `
2507
- redis:
2508
- image: ${redisImage}
2509
- container_name: ${workspace.name}-redis
2510
- restart: unless-stopped
2511
- volumes:
2512
- - redis_data:/data
2513
- healthcheck:
2514
- test: ["CMD", "redis-cli", "ping"]
2515
- interval: 5s
2516
- timeout: 5s
2517
- retries: 5
2518
- networks:
2519
- - workspace-network
2520
- `;
2521
- if (hasMail) yaml += `
2522
- mailpit:
2523
- image: axllent/mailpit:latest
2524
- container_name: ${workspace.name}-mailpit
2525
- restart: unless-stopped
2526
- ports:
2527
- - "8025:8025" # Web UI
2528
- - "1025:1025" # SMTP
2529
- networks:
2530
- - workspace-network
2531
- `;
2532
- yaml += `
2533
- volumes:
2534
- `;
2535
- if (hasPostgres) yaml += ` postgres_data:
2536
- `;
2537
- if (hasRedis) yaml += ` redis_data:
2538
- `;
2539
- yaml += `
2540
- networks:
2541
- workspace-network:
2542
- driver: bridge
2543
- `;
2544
- return yaml;
2329
+ function getApplicationId(state, appName) {
2330
+ return state?.applications[appName];
2545
2331
  }
2546
2332
  /**
2547
- * Get infrastructure service image with version.
2333
+ * Set application ID in state (mutates state)
2548
2334
  */
2549
- function getInfraServiceImage(serviceName, config) {
2550
- const defaults = {
2551
- postgres: "postgres:16-alpine",
2552
- redis: "redis:7-alpine"
2553
- };
2554
- if (!config || config === true) return defaults[serviceName];
2555
- if (typeof config === "object") {
2556
- if (config.image) return config.image;
2557
- if (config.version) {
2558
- const baseImage = serviceName === "postgres" ? "postgres" : "redis";
2559
- return `${baseImage}:${config.version}`;
2560
- }
2561
- }
2562
- return defaults[serviceName];
2335
+ function setApplicationId(state, appName, applicationId) {
2336
+ state.applications[appName] = applicationId;
2563
2337
  }
2564
2338
  /**
2565
- * Generate a service definition for an app.
2339
+ * Get postgres ID from state
2566
2340
  */
2567
- function generateAppService(appName, app, allApps, options) {
2568
- const { registry, hasPostgres, hasRedis } = options;
2569
- const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
2570
- const healthCheckPath = app.type === "frontend" ? "/" : "/health";
2571
- const healthCheckCmd = app.type === "frontend" ? `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}/"]` : `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}${healthCheckPath}"]`;
2572
- let yaml = `
2573
- ${appName}:
2574
- build:
2575
- context: .
2576
- dockerfile: .gkm/docker/Dockerfile.${appName}
2577
- image: ${imageRef}\${${appName.toUpperCase()}_IMAGE:-${appName}}:\${TAG:-latest}
2578
- container_name: ${appName}
2579
- restart: unless-stopped
2580
- ports:
2581
- - "\${${appName.toUpperCase()}_PORT:-${app.port}}:${app.port}"
2582
- environment:
2583
- - NODE_ENV=production
2584
- - PORT=${app.port}
2585
- `;
2586
- for (const dep of app.dependencies) {
2587
- const depApp = allApps.find(([name$1]) => name$1 === dep)?.[1];
2588
- if (depApp) yaml += ` - ${dep.toUpperCase()}_URL=http://${dep}:${depApp.port}
2589
- `;
2590
- }
2591
- if (app.type === "backend") {
2592
- if (hasPostgres) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
2593
- `;
2594
- if (hasRedis) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
2595
- `;
2596
- }
2597
- yaml += ` healthcheck:
2598
- test: ${healthCheckCmd}
2599
- interval: 30s
2600
- timeout: 3s
2601
- retries: 3
2602
- `;
2603
- const dependencies$1 = [...app.dependencies];
2604
- if (app.type === "backend") {
2605
- if (hasPostgres) dependencies$1.push("postgres");
2606
- if (hasRedis) dependencies$1.push("redis");
2607
- }
2608
- if (dependencies$1.length > 0) {
2609
- yaml += ` depends_on:
2610
- `;
2611
- for (const dep of dependencies$1) yaml += ` ${dep}:
2612
- condition: service_healthy
2613
- `;
2614
- }
2615
- yaml += ` networks:
2616
- - workspace-network
2617
- `;
2618
- return yaml;
2341
+ function getPostgresId(state) {
2342
+ return state?.services.postgresId;
2619
2343
  }
2620
-
2621
- //#endregion
2622
- //#region src/docker/templates.ts
2623
- const LOCKFILES = [
2624
- ["pnpm-lock.yaml", "pnpm"],
2625
- ["bun.lockb", "bun"],
2626
- ["yarn.lock", "yarn"],
2627
- ["package-lock.json", "npm"]
2628
- ];
2629
2344
  /**
2630
- * Detect package manager from lockfiles
2631
- * Walks up the directory tree to find lockfile (for monorepos)
2345
+ * Set postgres ID in state (mutates state)
2632
2346
  */
2633
- function detectPackageManager$1(cwd = process.cwd()) {
2634
- let dir = cwd;
2635
- const root = (0, node_path.parse)(dir).root;
2636
- while (dir !== root) {
2637
- for (const [lockfile, pm] of LOCKFILES) if ((0, node_fs.existsSync)((0, node_path.join)(dir, lockfile))) return pm;
2638
- dir = (0, node_path.dirname)(dir);
2639
- }
2640
- for (const [lockfile, pm] of LOCKFILES) if ((0, node_fs.existsSync)((0, node_path.join)(root, lockfile))) return pm;
2641
- return "pnpm";
2347
+ function setPostgresId(state, postgresId) {
2348
+ state.services.postgresId = postgresId;
2642
2349
  }
2643
2350
  /**
2644
- * Find the lockfile path by walking up the directory tree
2645
- * Returns the full path to the lockfile, or null if not found
2351
+ * Get redis ID from state
2646
2352
  */
2647
- function findLockfilePath(cwd = process.cwd()) {
2648
- let dir = cwd;
2649
- const root = (0, node_path.parse)(dir).root;
2650
- while (dir !== root) {
2651
- for (const [lockfile] of LOCKFILES) {
2652
- const lockfilePath = (0, node_path.join)(dir, lockfile);
2653
- if ((0, node_fs.existsSync)(lockfilePath)) return lockfilePath;
2654
- }
2655
- dir = (0, node_path.dirname)(dir);
2656
- }
2657
- for (const [lockfile] of LOCKFILES) {
2658
- const lockfilePath = (0, node_path.join)(root, lockfile);
2659
- if ((0, node_fs.existsSync)(lockfilePath)) return lockfilePath;
2660
- }
2661
- return null;
2353
+ function getRedisId(state) {
2354
+ return state?.services.redisId;
2662
2355
  }
2663
2356
  /**
2664
- * Check if we're in a monorepo (lockfile is in a parent directory)
2357
+ * Set redis ID in state (mutates state)
2665
2358
  */
2666
- function isMonorepo(cwd = process.cwd()) {
2667
- const lockfilePath = findLockfilePath(cwd);
2668
- if (!lockfilePath) return false;
2669
- const lockfileDir = (0, node_path.dirname)(lockfilePath);
2670
- return lockfileDir !== cwd;
2359
+ function setRedisId(state, redisId) {
2360
+ state.services.redisId = redisId;
2671
2361
  }
2672
2362
  /**
2673
- * Check if turbo.json exists (walks up directory tree)
2363
+ * Set app credentials in state (mutates state)
2674
2364
  */
2675
- function hasTurboConfig(cwd = process.cwd()) {
2676
- let dir = cwd;
2677
- const root = (0, node_path.parse)(dir).root;
2678
- while (dir !== root) {
2679
- if ((0, node_fs.existsSync)((0, node_path.join)(dir, "turbo.json"))) return true;
2680
- dir = (0, node_path.dirname)(dir);
2681
- }
2682
- return (0, node_fs.existsSync)((0, node_path.join)(root, "turbo.json"));
2365
+ function setAppCredentials(state, appName, credentials) {
2366
+ if (!state.appCredentials) state.appCredentials = {};
2367
+ state.appCredentials[appName] = credentials;
2683
2368
  }
2684
2369
  /**
2685
- * Get install command for turbo builds (without frozen lockfile)
2686
- * Turbo prune creates a subset that may not perfectly match the lockfile
2370
+ * Get all app credentials from state
2687
2371
  */
2688
- function getTurboInstallCmd(pm) {
2689
- const commands = {
2690
- pnpm: "pnpm install",
2691
- npm: "npm install",
2692
- yarn: "yarn install",
2693
- bun: "bun install"
2694
- };
2695
- return commands[pm];
2372
+ function getAllAppCredentials(state) {
2373
+ return state?.appCredentials ?? {};
2696
2374
  }
2697
2375
  /**
2698
- * Get package manager specific commands and paths
2376
+ * Get a generated secret for an app
2699
2377
  */
2700
- function getPmConfig(pm) {
2701
- const configs = {
2702
- pnpm: {
2703
- install: "corepack enable && corepack prepare pnpm@latest --activate",
2704
- lockfile: "pnpm-lock.yaml",
2705
- fetch: "pnpm fetch",
2706
- installCmd: "pnpm install --frozen-lockfile --offline",
2707
- cacheTarget: "/root/.local/share/pnpm/store",
2708
- cacheId: "pnpm",
2709
- run: "pnpm",
2710
- exec: "pnpm exec",
2711
- dlx: "pnpm dlx",
2712
- addGlobal: "pnpm add -g"
2713
- },
2714
- npm: {
2715
- install: "",
2716
- lockfile: "package-lock.json",
2717
- fetch: "",
2718
- installCmd: "npm ci",
2719
- cacheTarget: "/root/.npm",
2720
- cacheId: "npm",
2721
- run: "npm run",
2722
- exec: "npx",
2723
- dlx: "npx",
2724
- addGlobal: "npm install -g"
2725
- },
2726
- yarn: {
2727
- install: "corepack enable && corepack prepare yarn@stable --activate",
2728
- lockfile: "yarn.lock",
2729
- fetch: "",
2730
- installCmd: "yarn install --frozen-lockfile",
2731
- cacheTarget: "/root/.yarn/cache",
2732
- cacheId: "yarn",
2733
- run: "yarn",
2734
- exec: "yarn exec",
2735
- dlx: "yarn dlx",
2736
- addGlobal: "yarn global add"
2737
- },
2738
- bun: {
2739
- install: "npm install -g bun",
2740
- lockfile: "bun.lockb",
2741
- fetch: "",
2742
- installCmd: "bun install --frozen-lockfile",
2743
- cacheTarget: "/root/.bun/install/cache",
2744
- cacheId: "bun",
2745
- run: "bun run",
2746
- exec: "bunx",
2747
- dlx: "bunx",
2748
- addGlobal: "bun add -g"
2749
- }
2378
+ function getGeneratedSecret(state, appName, secretName) {
2379
+ return state?.generatedSecrets?.[appName]?.[secretName];
2380
+ }
2381
+ /**
2382
+ * Set a generated secret for an app (mutates state)
2383
+ */
2384
+ function setGeneratedSecret(state, appName, secretName, value) {
2385
+ if (!state.generatedSecrets) state.generatedSecrets = {};
2386
+ if (!state.generatedSecrets[appName]) state.generatedSecrets[appName] = {};
2387
+ state.generatedSecrets[appName][secretName] = value;
2388
+ }
2389
+ /**
2390
+ * Set DNS verification record for a hostname (mutates state)
2391
+ */
2392
+ function setDnsVerification(state, hostname, serverIp) {
2393
+ if (!state.dnsVerified) state.dnsVerified = {};
2394
+ state.dnsVerified[hostname] = {
2395
+ serverIp,
2396
+ verifiedAt: (/* @__PURE__ */ new Date()).toISOString()
2750
2397
  };
2751
- return configs[pm];
2752
2398
  }
2753
2399
  /**
2754
- * Generate a multi-stage Dockerfile for building from source
2755
- * Optimized for build speed with:
2756
- * - BuildKit cache mounts for package manager store
2757
- * - pnpm fetch for better layer caching (when using pnpm)
2758
- * - Optional turbo prune for monorepos
2400
+ * Check if a hostname is already verified with the given IP
2759
2401
  */
2760
- function generateMultiStageDockerfile(options) {
2761
- const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
2762
- if (turbo) return generateTurboDockerfile({
2763
- ...options,
2764
- turboPackage: turboPackage ?? "api"
2765
- });
2766
- const pm = getPmConfig(packageManager);
2767
- const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
2768
- const hasFetch = packageManager === "pnpm";
2769
- const depsStage = hasFetch ? `# Copy lockfile first for better caching
2770
- COPY ${pm.lockfile} ./
2771
-
2772
- # Fetch dependencies (downloads to virtual store, cached separately)
2773
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
2774
- ${pm.fetch}
2775
-
2776
- # Copy package.json after fetch
2777
- COPY package.json ./
2778
-
2779
- # Install from cache (fast - no network needed)
2780
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
2781
- ${pm.installCmd}` : `# Copy package files
2782
- COPY package.json ${pm.lockfile} ./
2783
-
2784
- # Install dependencies with cache
2785
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
2786
- ${pm.installCmd}`;
2787
- return `# syntax=docker/dockerfile:1
2788
- # Stage 1: Dependencies
2789
- FROM ${baseImage} AS deps
2790
-
2791
- WORKDIR /app
2792
- ${installPm}
2793
- ${depsStage}
2794
-
2795
- # Stage 2: Build
2796
- FROM deps AS builder
2797
-
2798
- WORKDIR /app
2799
-
2800
- # Copy source (deps already installed)
2801
- COPY . .
2802
-
2803
- # Debug: Show node_modules/.bin contents and build production server
2804
- RUN echo "=== node_modules/.bin contents ===" && \
2805
- ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
2806
- echo "=== Checking for gkm ===" && \
2807
- which gkm 2>/dev/null || echo "gkm not in PATH" && \
2808
- ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
2809
- echo "=== Running build ===" && \
2810
- ./node_modules/.bin/gkm build --provider server --production
2811
-
2812
- # Stage 3: Production
2813
- FROM ${baseImage} AS runner
2814
-
2815
- WORKDIR /app
2816
-
2817
- # Install tini for proper signal handling as PID 1
2818
- RUN apk add --no-cache tini
2819
-
2820
- # Create non-root user
2821
- RUN addgroup --system --gid 1001 nodejs && \\
2822
- adduser --system --uid 1001 hono
2823
-
2824
- # Copy bundled server
2825
- COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
2826
-
2827
- # Environment
2828
- ENV NODE_ENV=production
2829
- ENV PORT=${port}
2830
-
2831
- # Health check
2832
- HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
2833
- CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
2834
-
2835
- # Switch to non-root user
2836
- USER hono
2402
+ function isDnsVerified(state, hostname, serverIp) {
2403
+ const record = state?.dnsVerified?.[hostname];
2404
+ return record?.serverIp === serverIp;
2405
+ }
2837
2406
 
2838
- EXPOSE ${port}
2407
+ //#endregion
2408
+ //#region src/deploy/dns/hostinger-api.ts
2409
+ /**
2410
+ * Hostinger DNS API client
2411
+ *
2412
+ * API Documentation: https://developers.hostinger.com/
2413
+ * Authentication: Bearer token from hpanel.hostinger.com/profile/api
2414
+ */
2415
+ const HOSTINGER_API_BASE = "https://developers.hostinger.com";
2416
+ /**
2417
+ * Hostinger API error
2418
+ */
2419
+ var HostingerApiError = class extends Error {
2420
+ constructor(message, status, statusText, errors) {
2421
+ super(message);
2422
+ this.status = status;
2423
+ this.statusText = statusText;
2424
+ this.errors = errors;
2425
+ this.name = "HostingerApiError";
2426
+ }
2427
+ };
2428
+ /**
2429
+ * Hostinger DNS API client
2430
+ *
2431
+ * @example
2432
+ * ```ts
2433
+ * const api = new HostingerApi(token);
2434
+ *
2435
+ * // Get all records for a domain
2436
+ * const records = await api.getRecords('traflabs.io');
2437
+ *
2438
+ * // Create/update records
2439
+ * await api.upsertRecords('traflabs.io', [
2440
+ * { name: 'api.joemoer', type: 'A', ttl: 300, records: ['1.2.3.4'] }
2441
+ * ]);
2442
+ * ```
2443
+ */
2444
+ var HostingerApi = class {
2445
+ token;
2446
+ constructor(token) {
2447
+ this.token = token;
2448
+ }
2449
+ /**
2450
+ * Make a request to the Hostinger API
2451
+ */
2452
+ async request(method, endpoint, body) {
2453
+ const url = `${HOSTINGER_API_BASE}${endpoint}`;
2454
+ const response = await fetch(url, {
2455
+ method,
2456
+ headers: {
2457
+ "Content-Type": "application/json",
2458
+ Authorization: `Bearer ${this.token}`
2459
+ },
2460
+ body: body ? JSON.stringify(body) : void 0
2461
+ });
2462
+ if (!response.ok) {
2463
+ let errorMessage = `Hostinger API error: ${response.status} ${response.statusText}`;
2464
+ let errors;
2465
+ try {
2466
+ const errorBody = await response.json();
2467
+ if (errorBody.message) errorMessage = `Hostinger API error: ${errorBody.message}`;
2468
+ errors = errorBody.errors;
2469
+ } catch {}
2470
+ throw new HostingerApiError(errorMessage, response.status, response.statusText, errors);
2471
+ }
2472
+ const text = await response.text();
2473
+ if (!text || text.trim() === "") return void 0;
2474
+ return JSON.parse(text);
2475
+ }
2476
+ /**
2477
+ * Get all DNS records for a domain
2478
+ *
2479
+ * @param domain - Root domain (e.g., 'traflabs.io')
2480
+ */
2481
+ async getRecords(domain) {
2482
+ const response = await this.request("GET", `/api/dns/v1/zones/${domain}`);
2483
+ return response.data || [];
2484
+ }
2485
+ /**
2486
+ * Create or update DNS records
2487
+ *
2488
+ * @param domain - Root domain (e.g., 'traflabs.io')
2489
+ * @param records - Records to create/update
2490
+ * @param overwrite - If true, replaces all existing records. If false, merges with existing.
2491
+ */
2492
+ async upsertRecords(domain, records, overwrite = false) {
2493
+ await this.request("PUT", `/api/dns/v1/zones/${domain}`, {
2494
+ overwrite,
2495
+ zone: records
2496
+ });
2497
+ }
2498
+ /**
2499
+ * Validate DNS records before applying
2500
+ *
2501
+ * @param domain - Root domain (e.g., 'traflabs.io')
2502
+ * @param records - Records to validate
2503
+ * @returns true if valid, throws if invalid
2504
+ */
2505
+ async validateRecords(domain, records) {
2506
+ await this.request("POST", `/api/dns/v1/zones/${domain}/validate`, {
2507
+ overwrite: false,
2508
+ zone: records
2509
+ });
2510
+ return true;
2511
+ }
2512
+ /**
2513
+ * Delete specific DNS records
2514
+ *
2515
+ * @param domain - Root domain (e.g., 'traflabs.io')
2516
+ * @param filters - Filters to match records for deletion
2517
+ */
2518
+ async deleteRecords(domain, filters) {
2519
+ await this.request("DELETE", `/api/dns/v1/zones/${domain}`, { filters });
2520
+ }
2521
+ /**
2522
+ * Check if a specific record exists
2523
+ *
2524
+ * @param domain - Root domain (e.g., 'traflabs.io')
2525
+ * @param name - Subdomain name (e.g., 'api.joemoer')
2526
+ * @param type - Record type (e.g., 'A')
2527
+ */
2528
+ async recordExists(domain, name$1, type$1 = "A") {
2529
+ const records = await this.getRecords(domain);
2530
+ return records.some((r) => r.name === name$1 && r.type === type$1);
2531
+ }
2532
+ /**
2533
+ * Create a single A record if it doesn't exist
2534
+ *
2535
+ * @param domain - Root domain (e.g., 'traflabs.io')
2536
+ * @param subdomain - Subdomain name (e.g., 'api.joemoer')
2537
+ * @param ip - IP address to point to
2538
+ * @param ttl - TTL in seconds (default: 300)
2539
+ * @returns true if created, false if already exists
2540
+ */
2541
+ async createARecordIfNotExists(domain, subdomain, ip, ttl = 300) {
2542
+ const exists = await this.recordExists(domain, subdomain, "A");
2543
+ if (exists) return false;
2544
+ await this.upsertRecords(domain, [{
2545
+ name: subdomain,
2546
+ type: "A",
2547
+ ttl,
2548
+ records: [{ content: ip }]
2549
+ }]);
2550
+ return true;
2551
+ }
2552
+ };
2839
2553
 
2840
- # Use tini as entrypoint to handle PID 1 responsibilities
2841
- ENTRYPOINT ["/sbin/tini", "--"]
2842
- CMD ["node", "server.mjs"]
2843
- `;
2554
+ //#endregion
2555
+ //#region src/deploy/dns/index.ts
2556
+ const logger$6 = console;
2557
+ /**
2558
+ * Resolve IP address from a hostname
2559
+ */
2560
+ async function resolveHostnameToIp(hostname) {
2561
+ try {
2562
+ const addresses = await (0, node_dns_promises.lookup)(hostname, { family: 4 });
2563
+ return addresses.address;
2564
+ } catch (error) {
2565
+ throw new Error(`Failed to resolve IP for ${hostname}: ${error instanceof Error ? error.message : "Unknown error"}`);
2566
+ }
2567
+ }
2568
+ /**
2569
+ * Extract subdomain from full hostname relative to root domain
2570
+ *
2571
+ * @example
2572
+ * extractSubdomain('api.joemoer.traflabs.io', 'traflabs.io') => 'api.joemoer'
2573
+ * extractSubdomain('joemoer.traflabs.io', 'traflabs.io') => 'joemoer'
2574
+ */
2575
+ function extractSubdomain(hostname, rootDomain) {
2576
+ if (!hostname.endsWith(rootDomain)) throw new Error(`Hostname ${hostname} is not under root domain ${rootDomain}`);
2577
+ const subdomain = hostname.slice(0, -(rootDomain.length + 1));
2578
+ return subdomain || "@";
2579
+ }
2580
+ /**
2581
+ * Generate required DNS records for a deployment
2582
+ */
2583
+ function generateRequiredRecords(appHostnames, rootDomain, serverIp) {
2584
+ const records = [];
2585
+ for (const [appName, hostname] of appHostnames) {
2586
+ const subdomain = extractSubdomain(hostname, rootDomain);
2587
+ records.push({
2588
+ hostname,
2589
+ subdomain,
2590
+ type: "A",
2591
+ value: serverIp,
2592
+ appName
2593
+ });
2594
+ }
2595
+ return records;
2596
+ }
2597
+ /**
2598
+ * Print DNS records table
2599
+ */
2600
+ function printDnsRecordsTable(records, rootDomain) {
2601
+ logger$6.log(`\n 📋 DNS Records for ${rootDomain}:`);
2602
+ logger$6.log(" ┌─────────────────────────────────────┬──────┬─────────────────┬────────┐");
2603
+ logger$6.log(" │ Subdomain │ Type │ Value │ Status │");
2604
+ logger$6.log(" ├─────────────────────────────────────┼──────┼─────────────────┼────────┤");
2605
+ for (const record of records) {
2606
+ const subdomain = record.subdomain.padEnd(35);
2607
+ const type$1 = record.type.padEnd(4);
2608
+ const value = record.value.padEnd(15);
2609
+ let status;
2610
+ if (record.error) status = "✗";
2611
+ else if (record.created) status = "✓ new";
2612
+ else if (record.existed) status = "✓";
2613
+ else status = "?";
2614
+ logger$6.log(` │ ${subdomain} │ ${type$1} │ ${value} │ ${status.padEnd(6)} │`);
2615
+ }
2616
+ logger$6.log(" └─────────────────────────────────────┴──────┴─────────────────┴────────┘");
2617
+ }
2618
+ /**
2619
+ * Print DNS records in a simple format for manual setup
2620
+ */
2621
+ function printDnsRecordsSimple(records, rootDomain) {
2622
+ logger$6.log("\n 📋 Required DNS Records:");
2623
+ logger$6.log(` Add these A records to your DNS provider (${rootDomain}):\n`);
2624
+ for (const record of records) logger$6.log(` ${record.subdomain} → ${record.value} (A record)`);
2625
+ logger$6.log("");
2626
+ }
2627
+ /**
2628
+ * Prompt for input (reuse from deploy/index.ts pattern)
2629
+ */
2630
+ async function promptForToken(message) {
2631
+ const { stdin, stdout } = await import("node:process");
2632
+ if (!stdin.isTTY) throw new Error("Interactive input required for Hostinger token.");
2633
+ stdout.write(message);
2634
+ return new Promise((resolve$3) => {
2635
+ let value = "";
2636
+ const onData = (char) => {
2637
+ const c = char.toString();
2638
+ if (c === "\n" || c === "\r") {
2639
+ stdin.setRawMode(false);
2640
+ stdin.pause();
2641
+ stdin.removeListener("data", onData);
2642
+ stdout.write("\n");
2643
+ resolve$3(value);
2644
+ } else if (c === "") {
2645
+ stdin.setRawMode(false);
2646
+ stdin.pause();
2647
+ stdout.write("\n");
2648
+ process.exit(1);
2649
+ } else if (c === "" || c === "\b") {
2650
+ if (value.length > 0) value = value.slice(0, -1);
2651
+ } else value += c;
2652
+ };
2653
+ stdin.setRawMode(true);
2654
+ stdin.resume();
2655
+ stdin.on("data", onData);
2656
+ });
2657
+ }
2658
+ /**
2659
+ * Create DNS records using the configured provider
2660
+ */
2661
+ async function createDnsRecords(records, dnsConfig) {
2662
+ const { provider, domain: rootDomain, ttl = 300 } = dnsConfig;
2663
+ if (provider === "manual") return records.map((r) => ({
2664
+ ...r,
2665
+ created: false,
2666
+ existed: false
2667
+ }));
2668
+ if (provider === "hostinger") return createHostingerRecords(records, rootDomain, ttl);
2669
+ if (provider === "cloudflare") {
2670
+ logger$6.log(" ⚠ Cloudflare DNS integration not yet implemented");
2671
+ return records.map((r) => ({
2672
+ ...r,
2673
+ error: "Cloudflare not implemented"
2674
+ }));
2675
+ }
2676
+ return records;
2677
+ }
2678
+ /**
2679
+ * Create DNS records at Hostinger
2680
+ */
2681
+ async function createHostingerRecords(records, rootDomain, ttl) {
2682
+ let token = await getHostingerToken();
2683
+ if (!token) {
2684
+ logger$6.log("\n 📋 Hostinger API token not found.");
2685
+ logger$6.log(" Get your token from: https://hpanel.hostinger.com/profile/api\n");
2686
+ try {
2687
+ token = await promptForToken(" Hostinger API Token: ");
2688
+ await storeHostingerToken(token);
2689
+ logger$6.log(" ✓ Token saved");
2690
+ } catch {
2691
+ logger$6.log(" ⚠ Could not get token, skipping DNS creation");
2692
+ return records.map((r) => ({
2693
+ ...r,
2694
+ error: "No API token"
2695
+ }));
2696
+ }
2697
+ }
2698
+ const api = new HostingerApi(token);
2699
+ const results = [];
2700
+ let existingRecords = [];
2701
+ try {
2702
+ existingRecords = await api.getRecords(rootDomain);
2703
+ } catch (error) {
2704
+ const message = error instanceof Error ? error.message : "Unknown error";
2705
+ logger$6.log(` ⚠ Failed to fetch existing DNS records: ${message}`);
2706
+ return records.map((r) => ({
2707
+ ...r,
2708
+ error: message
2709
+ }));
2710
+ }
2711
+ for (const record of records) {
2712
+ const existing = existingRecords.find((r) => r.name === record.subdomain && r.type === "A");
2713
+ if (existing) {
2714
+ results.push({
2715
+ ...record,
2716
+ existed: true,
2717
+ created: false
2718
+ });
2719
+ continue;
2720
+ }
2721
+ try {
2722
+ await api.upsertRecords(rootDomain, [{
2723
+ name: record.subdomain,
2724
+ type: "A",
2725
+ ttl,
2726
+ records: [{ content: record.value }]
2727
+ }]);
2728
+ results.push({
2729
+ ...record,
2730
+ created: true,
2731
+ existed: false
2732
+ });
2733
+ } catch (error) {
2734
+ const message = error instanceof Error ? error.message : "Unknown error";
2735
+ results.push({
2736
+ ...record,
2737
+ error: message
2738
+ });
2739
+ }
2740
+ }
2741
+ return results;
2844
2742
  }
2845
2743
  /**
2846
- * Generate a Dockerfile optimized for Turbo monorepos
2847
- * Uses turbo prune to create minimal Docker context
2744
+ * Main DNS orchestration function for deployments
2848
2745
  */
2849
- function generateTurboDockerfile(options) {
2850
- const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
2851
- const pm = getPmConfig(packageManager);
2852
- const installPm = pm.install ? `RUN ${pm.install}` : "";
2853
- const turboInstallCmd = getTurboInstallCmd(packageManager);
2854
- const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
2855
- return `# syntax=docker/dockerfile:1
2856
- # Stage 1: Prune monorepo
2857
- FROM ${baseImage} AS pruner
2858
-
2859
- WORKDIR /app
2860
-
2861
- ${installPm}
2862
-
2863
- COPY . .
2864
-
2865
- # Prune to only include necessary packages
2866
- RUN ${turboCmd} prune ${turboPackage} --docker
2867
-
2868
- # Stage 2: Install dependencies
2869
- FROM ${baseImage} AS deps
2870
-
2871
- WORKDIR /app
2872
-
2873
- ${installPm}
2874
-
2875
- # Copy pruned lockfile and package.jsons
2876
- COPY --from=pruner /app/out/${pm.lockfile} ./
2877
- COPY --from=pruner /app/out/json/ ./
2878
-
2879
- # Install dependencies (no frozen-lockfile since turbo prune creates a subset)
2880
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
2881
- ${turboInstallCmd}
2882
-
2883
- # Stage 3: Build
2884
- FROM deps AS builder
2885
-
2886
- WORKDIR /app
2887
-
2888
- # Copy pruned source
2889
- COPY --from=pruner /app/out/full/ ./
2890
-
2891
- # Debug: Show node_modules/.bin contents and build production server
2892
- RUN echo "=== node_modules/.bin contents ===" && \
2893
- ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
2894
- echo "=== Checking for gkm ===" && \
2895
- which gkm 2>/dev/null || echo "gkm not in PATH" && \
2896
- ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
2897
- echo "=== Running build ===" && \
2898
- ./node_modules/.bin/gkm build --provider server --production
2899
-
2900
- # Stage 4: Production
2901
- FROM ${baseImage} AS runner
2902
-
2903
- WORKDIR /app
2904
-
2905
- RUN apk add --no-cache tini
2906
-
2907
- RUN addgroup --system --gid 1001 nodejs && \\
2908
- adduser --system --uid 1001 hono
2909
-
2910
- COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
2911
-
2912
- ENV NODE_ENV=production
2913
- ENV PORT=${port}
2914
-
2915
- HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
2916
- CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
2917
-
2918
- USER hono
2746
+ async function orchestrateDns(appHostnames, dnsConfig, dokployEndpoint) {
2747
+ if (!dnsConfig) return null;
2748
+ const { domain: rootDomain, autoCreate = true } = dnsConfig;
2749
+ logger$6.log("\n🌐 Setting up DNS records...");
2750
+ let serverIp;
2751
+ try {
2752
+ const endpointUrl = new URL(dokployEndpoint);
2753
+ serverIp = await resolveHostnameToIp(endpointUrl.hostname);
2754
+ logger$6.log(` Server IP: ${serverIp} (from ${endpointUrl.hostname})`);
2755
+ } catch (error) {
2756
+ const message = error instanceof Error ? error.message : "Unknown error";
2757
+ logger$6.log(` ⚠ Failed to resolve server IP: ${message}`);
2758
+ return null;
2759
+ }
2760
+ const requiredRecords = generateRequiredRecords(appHostnames, rootDomain, serverIp);
2761
+ if (requiredRecords.length === 0) {
2762
+ logger$6.log(" No DNS records needed");
2763
+ return {
2764
+ records: [],
2765
+ success: true,
2766
+ serverIp
2767
+ };
2768
+ }
2769
+ let finalRecords;
2770
+ if (autoCreate && dnsConfig.provider !== "manual") {
2771
+ logger$6.log(` Creating DNS records at ${dnsConfig.provider}...`);
2772
+ finalRecords = await createDnsRecords(requiredRecords, dnsConfig);
2773
+ const created = finalRecords.filter((r) => r.created).length;
2774
+ const existed = finalRecords.filter((r) => r.existed).length;
2775
+ const failed = finalRecords.filter((r) => r.error).length;
2776
+ if (created > 0) logger$6.log(` ✓ Created ${created} DNS record(s)`);
2777
+ if (existed > 0) logger$6.log(` ✓ ${existed} record(s) already exist`);
2778
+ if (failed > 0) logger$6.log(` ⚠ ${failed} record(s) failed`);
2779
+ } else finalRecords = requiredRecords;
2780
+ printDnsRecordsTable(finalRecords, rootDomain);
2781
+ const hasFailures = finalRecords.some((r) => r.error);
2782
+ if (dnsConfig.provider === "manual" || hasFailures) printDnsRecordsSimple(finalRecords.filter((r) => !r.created && !r.existed), rootDomain);
2783
+ return {
2784
+ records: finalRecords,
2785
+ success: !hasFailures,
2786
+ serverIp
2787
+ };
2788
+ }
2789
+ /**
2790
+ * Verify DNS records resolve correctly after deployment.
2791
+ *
2792
+ * This function:
2793
+ * 1. Checks state for previously verified hostnames (skips if already verified with same IP)
2794
+ * 2. Attempts to resolve each hostname to an IP
2795
+ * 3. Compares resolved IP with expected server IP
2796
+ * 4. Updates state with verification results
2797
+ *
2798
+ * @param appHostnames - Map of app names to hostnames
2799
+ * @param serverIp - Expected IP address the hostnames should resolve to
2800
+ * @param state - Deploy state for caching verification results
2801
+ * @returns Array of verification results
2802
+ */
2803
+ async function verifyDnsRecords(appHostnames, serverIp, state) {
2804
+ const results = [];
2805
+ logger$6.log("\n🔍 Verifying DNS records...");
2806
+ for (const [appName, hostname] of appHostnames) {
2807
+ if (isDnsVerified(state, hostname, serverIp)) {
2808
+ logger$6.log(` ✓ ${hostname} (previously verified)`);
2809
+ results.push({
2810
+ hostname,
2811
+ appName,
2812
+ verified: true,
2813
+ expectedIp: serverIp,
2814
+ skipped: true
2815
+ });
2816
+ continue;
2817
+ }
2818
+ try {
2819
+ const resolvedIp = await resolveHostnameToIp(hostname);
2820
+ if (resolvedIp === serverIp) {
2821
+ setDnsVerification(state, hostname, serverIp);
2822
+ logger$6.log(` ✓ ${hostname} → ${resolvedIp}`);
2823
+ results.push({
2824
+ hostname,
2825
+ appName,
2826
+ verified: true,
2827
+ resolvedIp,
2828
+ expectedIp: serverIp
2829
+ });
2830
+ } else {
2831
+ logger$6.log(` ⚠ ${hostname} resolves to ${resolvedIp}, expected ${serverIp}`);
2832
+ results.push({
2833
+ hostname,
2834
+ appName,
2835
+ verified: false,
2836
+ resolvedIp,
2837
+ expectedIp: serverIp
2838
+ });
2839
+ }
2840
+ } catch (error) {
2841
+ const message = error instanceof Error ? error.message : "Unknown error";
2842
+ logger$6.log(` ⚠ ${hostname} DNS not propagated (${message})`);
2843
+ results.push({
2844
+ hostname,
2845
+ appName,
2846
+ verified: false,
2847
+ expectedIp: serverIp,
2848
+ error: message
2849
+ });
2850
+ }
2851
+ }
2852
+ const verified = results.filter((r) => r.verified).length;
2853
+ const skipped = results.filter((r) => r.skipped).length;
2854
+ const pending = results.filter((r) => !r.verified).length;
2855
+ if (pending > 0) {
2856
+ logger$6.log(`\n ${verified} verified, ${pending} pending propagation`);
2857
+ logger$6.log(" DNS changes may take 5-30 minutes to propagate");
2858
+ } else if (skipped > 0) logger$6.log(` ${verified} verified (${skipped} from cache)`);
2859
+ return results;
2860
+ }
2919
2861
 
2920
- EXPOSE ${port}
2862
+ //#endregion
2863
+ //#region src/docker/compose.ts
2864
+ /** Default Docker images for services */
2865
+ const DEFAULT_SERVICE_IMAGES = {
2866
+ postgres: "postgres",
2867
+ redis: "redis",
2868
+ rabbitmq: "rabbitmq"
2869
+ };
2870
+ /** Default Docker image versions for services */
2871
+ const DEFAULT_SERVICE_VERSIONS = {
2872
+ postgres: "16-alpine",
2873
+ redis: "7-alpine",
2874
+ rabbitmq: "3-management-alpine"
2875
+ };
2876
+ /** Get the default full image reference for a service */
2877
+ function getDefaultImage(serviceName) {
2878
+ return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
2879
+ }
2880
+ /** Normalize services config to a consistent format - returns Map of service name to full image reference */
2881
+ function normalizeServices(services) {
2882
+ const result = /* @__PURE__ */ new Map();
2883
+ if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
2884
+ else for (const [name$1, config] of Object.entries(services)) {
2885
+ const serviceName = name$1;
2886
+ if (config === true) result.set(serviceName, getDefaultImage(serviceName));
2887
+ else if (config && typeof config === "object") {
2888
+ const serviceConfig = config;
2889
+ if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
2890
+ else {
2891
+ const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
2892
+ result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
2893
+ }
2894
+ }
2895
+ }
2896
+ return result;
2897
+ }
2898
+ /**
2899
+ * Generate docker-compose.yml for production deployment
2900
+ */
2901
+ function generateDockerCompose(options) {
2902
+ const { imageName, registry, port, healthCheckPath, services } = options;
2903
+ const serviceMap = normalizeServices(services);
2904
+ const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
2905
+ let yaml = `version: '3.8'
2921
2906
 
2922
- ENTRYPOINT ["/sbin/tini", "--"]
2923
- CMD ["node", "server.mjs"]
2907
+ services:
2908
+ api:
2909
+ build:
2910
+ context: ../..
2911
+ dockerfile: .gkm/docker/Dockerfile
2912
+ image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
2913
+ container_name: ${imageName}
2914
+ restart: unless-stopped
2915
+ ports:
2916
+ - "\${PORT:-${port}}:${port}"
2917
+ environment:
2918
+ - NODE_ENV=production
2919
+ `;
2920
+ if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
2921
+ `;
2922
+ if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
2923
+ `;
2924
+ if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
2925
+ `;
2926
+ yaml += ` healthcheck:
2927
+ test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
2928
+ interval: 30s
2929
+ timeout: 3s
2930
+ retries: 3
2931
+ `;
2932
+ if (serviceMap.size > 0) {
2933
+ yaml += ` depends_on:
2934
+ `;
2935
+ for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
2936
+ condition: service_healthy
2937
+ `;
2938
+ }
2939
+ yaml += ` networks:
2940
+ - app-network
2941
+ `;
2942
+ const postgresImage = serviceMap.get("postgres");
2943
+ if (postgresImage) yaml += `
2944
+ postgres:
2945
+ image: ${postgresImage}
2946
+ container_name: postgres
2947
+ restart: unless-stopped
2948
+ environment:
2949
+ POSTGRES_USER: \${POSTGRES_USER:-postgres}
2950
+ POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
2951
+ POSTGRES_DB: \${POSTGRES_DB:-app}
2952
+ volumes:
2953
+ - postgres_data:/var/lib/postgresql/data
2954
+ healthcheck:
2955
+ test: ["CMD-SHELL", "pg_isready -U postgres"]
2956
+ interval: 5s
2957
+ timeout: 5s
2958
+ retries: 5
2959
+ networks:
2960
+ - app-network
2924
2961
  `;
2925
- }
2926
- /**
2927
- * Generate a slim Dockerfile for pre-built bundles
2928
- */
2929
- function generateSlimDockerfile(options) {
2930
- const { baseImage, port, healthCheckPath } = options;
2931
- return `# Slim Dockerfile for pre-built production bundle
2932
- FROM ${baseImage}
2933
-
2934
- WORKDIR /app
2935
-
2936
- # Install tini for proper signal handling as PID 1
2937
- # Handles SIGTERM propagation and zombie process reaping
2938
- RUN apk add --no-cache tini
2939
-
2940
- # Create non-root user
2941
- RUN addgroup --system --gid 1001 nodejs && \\
2942
- adduser --system --uid 1001 hono
2943
-
2944
- # Copy pre-built bundle
2945
- COPY .gkm/server/dist/server.mjs ./
2946
-
2947
- # Environment
2948
- ENV NODE_ENV=production
2949
- ENV PORT=${port}
2950
-
2951
- # Health check
2952
- HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
2953
- CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
2954
-
2955
- # Switch to non-root user
2956
- USER hono
2957
-
2958
- EXPOSE ${port}
2959
-
2960
- # Use tini as entrypoint to handle PID 1 responsibilities
2961
- ENTRYPOINT ["/sbin/tini", "--"]
2962
- CMD ["node", "server.mjs"]
2962
+ const redisImage = serviceMap.get("redis");
2963
+ if (redisImage) yaml += `
2964
+ redis:
2965
+ image: ${redisImage}
2966
+ container_name: redis
2967
+ restart: unless-stopped
2968
+ volumes:
2969
+ - redis_data:/data
2970
+ healthcheck:
2971
+ test: ["CMD", "redis-cli", "ping"]
2972
+ interval: 5s
2973
+ timeout: 5s
2974
+ retries: 5
2975
+ networks:
2976
+ - app-network
2977
+ `;
2978
+ const rabbitmqImage = serviceMap.get("rabbitmq");
2979
+ if (rabbitmqImage) yaml += `
2980
+ rabbitmq:
2981
+ image: ${rabbitmqImage}
2982
+ container_name: rabbitmq
2983
+ restart: unless-stopped
2984
+ environment:
2985
+ RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
2986
+ RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
2987
+ ports:
2988
+ - "15672:15672" # Management UI
2989
+ volumes:
2990
+ - rabbitmq_data:/var/lib/rabbitmq
2991
+ healthcheck:
2992
+ test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
2993
+ interval: 10s
2994
+ timeout: 5s
2995
+ retries: 5
2996
+ networks:
2997
+ - app-network
2998
+ `;
2999
+ yaml += `
3000
+ volumes:
3001
+ `;
3002
+ if (serviceMap.has("postgres")) yaml += ` postgres_data:
2963
3003
  `;
3004
+ if (serviceMap.has("redis")) yaml += ` redis_data:
3005
+ `;
3006
+ if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
3007
+ `;
3008
+ yaml += `
3009
+ networks:
3010
+ app-network:
3011
+ driver: bridge
3012
+ `;
3013
+ return yaml;
2964
3014
  }
2965
3015
  /**
2966
- * Generate .dockerignore file
3016
+ * Generate a minimal docker-compose.yml for API only
2967
3017
  */
2968
- function generateDockerignore() {
2969
- return `# Dependencies
2970
- node_modules
2971
- .pnpm-store
2972
-
2973
- # Build output (except what we need)
2974
- .gkm/aws*
2975
- .gkm/server/*.ts
2976
- !.gkm/server/dist
2977
-
2978
- # IDE and editor
2979
- .idea
2980
- .vscode
2981
- *.swp
2982
- *.swo
2983
-
2984
- # Git
2985
- .git
2986
- .gitignore
2987
-
2988
- # Logs
2989
- *.log
2990
- npm-debug.log*
2991
- pnpm-debug.log*
2992
-
2993
- # Test files
2994
- **/*.test.ts
2995
- **/*.spec.ts
2996
- **/__tests__
2997
- coverage
2998
-
2999
- # Documentation
3000
- docs
3001
- *.md
3002
- !README.md
3018
+ function generateMinimalDockerCompose(options) {
3019
+ const { imageName, registry, port, healthCheckPath } = options;
3020
+ const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
3021
+ return `version: '3.8'
3003
3022
 
3004
- # Environment files (handle secrets separately)
3005
- .env
3006
- .env.*
3007
- !.env.example
3023
+ services:
3024
+ api:
3025
+ build:
3026
+ context: ../..
3027
+ dockerfile: .gkm/docker/Dockerfile
3028
+ image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
3029
+ container_name: ${imageName}
3030
+ restart: unless-stopped
3031
+ ports:
3032
+ - "\${PORT:-${port}}:${port}"
3033
+ environment:
3034
+ - NODE_ENV=production
3035
+ healthcheck:
3036
+ test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
3037
+ interval: 30s
3038
+ timeout: 3s
3039
+ retries: 3
3040
+ networks:
3041
+ - app-network
3008
3042
 
3009
- # Docker files (don't copy recursively)
3010
- Dockerfile*
3011
- docker-compose*
3012
- .dockerignore
3043
+ networks:
3044
+ app-network:
3045
+ driver: bridge
3013
3046
  `;
3014
3047
  }
3015
3048
  /**
3016
- * Generate docker-entrypoint.sh for custom startup logic
3049
+ * Generate docker-compose.yml for a workspace with all apps as services.
3050
+ * Apps can communicate with each other via service names.
3051
+ * @internal Exported for testing
3017
3052
  */
3018
- function generateDockerEntrypoint() {
3019
- return `#!/bin/sh
3020
- set -e
3021
-
3022
- # Run any custom startup scripts here
3023
- # Example: wait for database
3024
- # until nc -z $DB_HOST $DB_PORT; do
3025
- # echo "Waiting for database..."
3026
- # sleep 1
3027
- # done
3053
+ function generateWorkspaceCompose(workspace, options = {}) {
3054
+ const { registry } = options;
3055
+ const apps = Object.entries(workspace.apps);
3056
+ const services = workspace.services;
3057
+ const hasPostgres = services.db !== void 0 && services.db !== false;
3058
+ const hasRedis = services.cache !== void 0 && services.cache !== false;
3059
+ const hasMail = services.mail !== void 0 && services.mail !== false;
3060
+ const postgresImage = getInfraServiceImage("postgres", services.db);
3061
+ const redisImage = getInfraServiceImage("redis", services.cache);
3062
+ let yaml = `# Docker Compose for ${workspace.name} workspace
3063
+ # Generated by gkm - do not edit manually
3028
3064
 
3029
- # Execute the main command
3030
- exec "$@"
3065
+ services:
3066
+ `;
3067
+ for (const [appName, app] of apps) yaml += generateAppService(appName, app, apps, {
3068
+ registry,
3069
+ hasPostgres,
3070
+ hasRedis
3071
+ });
3072
+ if (hasPostgres) yaml += `
3073
+ postgres:
3074
+ image: ${postgresImage}
3075
+ container_name: ${workspace.name}-postgres
3076
+ restart: unless-stopped
3077
+ environment:
3078
+ POSTGRES_USER: \${POSTGRES_USER:-postgres}
3079
+ POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
3080
+ POSTGRES_DB: \${POSTGRES_DB:-app}
3081
+ volumes:
3082
+ - postgres_data:/var/lib/postgresql/data
3083
+ healthcheck:
3084
+ test: ["CMD-SHELL", "pg_isready -U postgres"]
3085
+ interval: 5s
3086
+ timeout: 5s
3087
+ retries: 5
3088
+ networks:
3089
+ - workspace-network
3090
+ `;
3091
+ if (hasRedis) yaml += `
3092
+ redis:
3093
+ image: ${redisImage}
3094
+ container_name: ${workspace.name}-redis
3095
+ restart: unless-stopped
3096
+ volumes:
3097
+ - redis_data:/data
3098
+ healthcheck:
3099
+ test: ["CMD", "redis-cli", "ping"]
3100
+ interval: 5s
3101
+ timeout: 5s
3102
+ retries: 5
3103
+ networks:
3104
+ - workspace-network
3105
+ `;
3106
+ if (hasMail) yaml += `
3107
+ mailpit:
3108
+ image: axllent/mailpit:latest
3109
+ container_name: ${workspace.name}-mailpit
3110
+ restart: unless-stopped
3111
+ ports:
3112
+ - "8025:8025" # Web UI
3113
+ - "1025:1025" # SMTP
3114
+ networks:
3115
+ - workspace-network
3116
+ `;
3117
+ yaml += `
3118
+ volumes:
3119
+ `;
3120
+ if (hasPostgres) yaml += ` postgres_data:
3121
+ `;
3122
+ if (hasRedis) yaml += ` redis_data:
3123
+ `;
3124
+ yaml += `
3125
+ networks:
3126
+ workspace-network:
3127
+ driver: bridge
3031
3128
  `;
3129
+ return yaml;
3032
3130
  }
3033
3131
  /**
3034
- * Resolve Docker configuration from GkmConfig with defaults
3132
+ * Get infrastructure service image with version.
3035
3133
  */
3036
- function resolveDockerConfig$1(config) {
3037
- const docker = config.docker ?? {};
3038
- let defaultImageName = "api";
3039
- try {
3040
- const pkg$1 = require(`${process.cwd()}/package.json`);
3041
- if (pkg$1.name) defaultImageName = pkg$1.name.replace(/^@[^/]+\//, "");
3042
- } catch {}
3043
- return {
3044
- registry: docker.registry ?? "",
3045
- imageName: docker.imageName ?? defaultImageName,
3046
- baseImage: docker.baseImage ?? "node:22-alpine",
3047
- port: docker.port ?? 3e3,
3048
- compose: docker.compose
3134
+ function getInfraServiceImage(serviceName, config) {
3135
+ const defaults = {
3136
+ postgres: "postgres:16-alpine",
3137
+ redis: "redis:7-alpine"
3049
3138
  };
3139
+ if (!config || config === true) return defaults[serviceName];
3140
+ if (typeof config === "object") {
3141
+ if (config.image) return config.image;
3142
+ if (config.version) {
3143
+ const baseImage = serviceName === "postgres" ? "postgres" : "redis";
3144
+ return `${baseImage}:${config.version}`;
3145
+ }
3146
+ }
3147
+ return defaults[serviceName];
3050
3148
  }
3051
3149
  /**
3052
- * Generate a Dockerfile for Next.js frontend apps using standalone output.
3053
- * Uses turbo prune for monorepo optimization.
3054
- * @internal Exported for testing
3150
+ * Generate a service definition for an app.
3055
3151
  */
3056
- function generateNextjsDockerfile(options) {
3057
- const { baseImage, port, appPath, turboPackage, packageManager, publicUrlArgs = ["NEXT_PUBLIC_API_URL", "NEXT_PUBLIC_AUTH_URL"] } = options;
3058
- const pm = getPmConfig(packageManager);
3059
- const installPm = pm.install ? `RUN ${pm.install}` : "";
3060
- const turboInstallCmd = getTurboInstallCmd(packageManager);
3061
- const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3062
- const publicUrlArgDeclarations = publicUrlArgs.map((arg) => `ARG ${arg}=""`).join("\n");
3063
- const publicUrlEnvDeclarations = publicUrlArgs.map((arg) => `ENV ${arg}=$${arg}`).join("\n");
3064
- return `# syntax=docker/dockerfile:1
3065
- # Next.js standalone Dockerfile with turbo prune optimization
3066
-
3067
- # Stage 1: Prune monorepo
3068
- FROM ${baseImage} AS pruner
3069
-
3070
- WORKDIR /app
3071
-
3072
- ${installPm}
3073
-
3074
- COPY . .
3075
-
3076
- # Prune to only include necessary packages
3077
- RUN ${turboCmd} prune ${turboPackage} --docker
3078
-
3079
- # Stage 2: Install dependencies
3080
- FROM ${baseImage} AS deps
3081
-
3082
- WORKDIR /app
3083
-
3084
- ${installPm}
3085
-
3086
- # Copy pruned lockfile and package.jsons
3087
- COPY --from=pruner /app/out/${pm.lockfile} ./
3088
- COPY --from=pruner /app/out/json/ ./
3089
-
3090
- # Install dependencies
3091
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3092
- ${turboInstallCmd}
3093
-
3094
- # Stage 3: Build
3095
- FROM deps AS builder
3096
-
3097
- WORKDIR /app
3098
-
3099
- # Build-time args for public API URLs (populated by gkm deploy)
3100
- # These get baked into the Next.js build as public environment variables
3101
- ${publicUrlArgDeclarations}
3102
-
3103
- # Convert ARGs to ENVs for Next.js build
3104
- ${publicUrlEnvDeclarations}
3105
-
3106
- # Copy pruned source
3107
- COPY --from=pruner /app/out/full/ ./
3108
-
3109
- # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3110
- # Using wildcard to make it optional for single-app projects
3111
- COPY --from=pruner /app/tsconfig.* ./
3112
-
3113
- # Ensure public directory exists (may be empty for scaffolded projects)
3114
- RUN mkdir -p ${appPath}/public
3115
-
3116
- # Set Next.js to produce standalone output
3117
- ENV NEXT_TELEMETRY_DISABLED=1
3118
-
3119
- # Build the application
3120
- RUN ${turboCmd} run build --filter=${turboPackage}
3121
-
3122
- # Stage 4: Production
3123
- FROM ${baseImage} AS runner
3124
-
3125
- WORKDIR /app
3126
-
3127
- # Install tini for proper signal handling
3128
- RUN apk add --no-cache tini
3129
-
3130
- # Create non-root user
3131
- RUN addgroup --system --gid 1001 nodejs && \\
3132
- adduser --system --uid 1001 nextjs
3133
-
3134
- # Set environment
3135
- ENV NODE_ENV=production
3136
- ENV NEXT_TELEMETRY_DISABLED=1
3137
- ENV PORT=${port}
3138
- ENV HOSTNAME="0.0.0.0"
3139
-
3140
- # Copy static files and standalone output
3141
- COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/standalone ./
3142
- COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/static ./${appPath}/.next/static
3143
- COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/public ./${appPath}/public
3144
-
3145
- USER nextjs
3146
-
3147
- EXPOSE ${port}
3148
-
3149
- ENTRYPOINT ["/sbin/tini", "--"]
3150
- CMD ["node", "${appPath}/server.js"]
3152
+ function generateAppService(appName, app, allApps, options) {
3153
+ const { registry, hasPostgres, hasRedis } = options;
3154
+ const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
3155
+ const healthCheckPath = app.type === "frontend" ? "/" : "/health";
3156
+ const healthCheckCmd = app.type === "frontend" ? `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}/"]` : `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}${healthCheckPath}"]`;
3157
+ let yaml = `
3158
+ ${appName}:
3159
+ build:
3160
+ context: .
3161
+ dockerfile: .gkm/docker/Dockerfile.${appName}
3162
+ image: ${imageRef}\${${appName.toUpperCase()}_IMAGE:-${appName}}:\${TAG:-latest}
3163
+ container_name: ${appName}
3164
+ restart: unless-stopped
3165
+ ports:
3166
+ - "\${${appName.toUpperCase()}_PORT:-${app.port}}:${app.port}"
3167
+ environment:
3168
+ - NODE_ENV=production
3169
+ - PORT=${app.port}
3151
3170
  `;
3152
- }
3153
- /**
3154
- * Generate a Dockerfile for backend apps in a workspace.
3155
- * Uses turbo prune for monorepo optimization.
3156
- * @internal Exported for testing
3157
- */
3158
- function generateBackendDockerfile(options) {
3159
- const { baseImage, port, appPath, turboPackage, packageManager, healthCheckPath = "/health" } = options;
3160
- const pm = getPmConfig(packageManager);
3161
- const installPm = pm.install ? `RUN ${pm.install}` : "";
3162
- const turboInstallCmd = getTurboInstallCmd(packageManager);
3163
- const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3164
- return `# syntax=docker/dockerfile:1
3165
- # Backend Dockerfile with turbo prune optimization
3166
-
3167
- # Stage 1: Prune monorepo
3168
- FROM ${baseImage} AS pruner
3169
-
3170
- WORKDIR /app
3171
-
3172
- ${installPm}
3173
-
3174
- COPY . .
3175
-
3176
- # Prune to only include necessary packages
3177
- RUN ${turboCmd} prune ${turboPackage} --docker
3178
-
3179
- # Stage 2: Install dependencies
3180
- FROM ${baseImage} AS deps
3181
-
3182
- WORKDIR /app
3183
-
3184
- ${installPm}
3185
-
3186
- # Copy pruned lockfile and package.jsons
3187
- COPY --from=pruner /app/out/${pm.lockfile} ./
3188
- COPY --from=pruner /app/out/json/ ./
3189
-
3190
- # Install dependencies
3191
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3192
- ${turboInstallCmd}
3193
-
3194
- # Stage 3: Build
3195
- FROM deps AS builder
3196
-
3197
- WORKDIR /app
3198
-
3199
- # Build-time args for encrypted secrets
3200
- ARG GKM_ENCRYPTED_CREDENTIALS=""
3201
- ARG GKM_CREDENTIALS_IV=""
3202
-
3203
- # Copy pruned source
3204
- COPY --from=pruner /app/out/full/ ./
3205
-
3206
- # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3207
- # Using wildcard to make it optional for single-app projects
3208
- COPY --from=pruner /app/gkm.config.* ./
3209
- COPY --from=pruner /app/tsconfig.* ./
3210
-
3211
- # Write encrypted credentials for gkm build to embed
3212
- RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
3213
- mkdir -p ${appPath}/.gkm && \
3214
- echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
3215
- echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
3216
- fi
3217
-
3218
- # Build production server using gkm
3219
- RUN cd ${appPath} && ./node_modules/.bin/gkm build --provider server --production
3220
-
3221
- # Stage 4: Production
3222
- FROM ${baseImage} AS runner
3223
-
3224
- WORKDIR /app
3225
-
3226
- RUN apk add --no-cache tini
3227
-
3228
- RUN addgroup --system --gid 1001 nodejs && \\
3229
- adduser --system --uid 1001 hono
3230
-
3231
- # Copy bundled server
3232
- COPY --from=builder --chown=hono:nodejs /app/${appPath}/.gkm/server/dist/server.mjs ./
3233
-
3234
- ENV NODE_ENV=production
3235
- ENV PORT=${port}
3236
-
3237
- HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3238
- CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3239
-
3240
- USER hono
3241
-
3242
- EXPOSE ${port}
3243
-
3244
- ENTRYPOINT ["/sbin/tini", "--"]
3245
- CMD ["node", "server.mjs"]
3171
+ for (const dep of app.dependencies) {
3172
+ const depApp = allApps.find(([name$1]) => name$1 === dep)?.[1];
3173
+ if (depApp) yaml += ` - ${dep.toUpperCase()}_URL=http://${dep}:${depApp.port}
3174
+ `;
3175
+ }
3176
+ if (app.type === "backend") {
3177
+ if (hasPostgres) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
3246
3178
  `;
3247
- }
3248
- /**
3249
- * Generate a Dockerfile for apps with a custom entry point.
3250
- * Uses esbuild to bundle the entry point into dist/index.mjs with all dependencies.
3251
- * This is used for apps that don't use gkm routes (e.g., Better Auth servers).
3252
- * @internal Exported for testing
3253
- */
3254
- function generateEntryDockerfile(options) {
3255
- const { baseImage, port, appPath, entry, turboPackage, packageManager, healthCheckPath = "/health" } = options;
3256
- const pm = getPmConfig(packageManager);
3257
- const installPm = pm.install ? `RUN ${pm.install}` : "";
3258
- const turboInstallCmd = getTurboInstallCmd(packageManager);
3259
- const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3260
- return `# syntax=docker/dockerfile:1
3261
- # Entry-based Dockerfile with turbo prune + tsdown bundling
3262
-
3263
- # Stage 1: Prune monorepo
3264
- FROM ${baseImage} AS pruner
3265
-
3266
- WORKDIR /app
3267
-
3268
- ${installPm}
3269
-
3270
- COPY . .
3271
-
3272
- # Prune to only include necessary packages
3273
- RUN ${turboCmd} prune ${turboPackage} --docker
3274
-
3275
- # Stage 2: Install dependencies
3276
- FROM ${baseImage} AS deps
3277
-
3278
- WORKDIR /app
3279
-
3280
- ${installPm}
3281
-
3282
- # Copy pruned lockfile and package.jsons
3283
- COPY --from=pruner /app/out/${pm.lockfile} ./
3284
- COPY --from=pruner /app/out/json/ ./
3285
-
3286
- # Install dependencies
3287
- RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3288
- ${turboInstallCmd}
3289
-
3290
- # Stage 3: Build with tsdown
3291
- FROM deps AS builder
3292
-
3293
- WORKDIR /app
3294
-
3295
- # Build-time args for encrypted secrets
3296
- ARG GKM_ENCRYPTED_CREDENTIALS=""
3297
- ARG GKM_CREDENTIALS_IV=""
3298
-
3299
- # Copy pruned source
3300
- COPY --from=pruner /app/out/full/ ./
3301
-
3302
- # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3303
- # Using wildcard to make it optional for single-app projects
3304
- COPY --from=pruner /app/tsconfig.* ./
3305
-
3306
- # Write encrypted credentials for tsdown to embed via define
3307
- RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
3308
- mkdir -p ${appPath}/.gkm && \
3309
- echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
3310
- echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
3311
- fi
3312
-
3313
- # Bundle entry point with esbuild (outputs to dist/index.mjs)
3314
- # Creates a fully standalone bundle with all dependencies included
3315
- # Use define to embed credentials if present
3316
- RUN cd ${appPath} && \
3317
- if [ -f .gkm/credentials.enc ]; then \
3318
- CREDS=$(cat .gkm/credentials.enc) && \
3319
- IV=$(cat .gkm/credentials.iv) && \
3320
- npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
3321
- --outfile=dist/index.mjs --packages=bundle \
3322
- --banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);' \
3323
- --define:__GKM_ENCRYPTED_CREDENTIALS__="'\\"$CREDS\\"'" \
3324
- --define:__GKM_CREDENTIALS_IV__="'\\"$IV\\"'"; \
3325
- else \
3326
- npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
3327
- --outfile=dist/index.mjs --packages=bundle \
3328
- --banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);'; \
3329
- fi
3330
-
3331
- # Stage 4: Production
3332
- FROM ${baseImage} AS runner
3333
-
3334
- WORKDIR /app
3335
-
3336
- RUN apk add --no-cache tini
3337
-
3338
- RUN addgroup --system --gid 1001 nodejs && \\
3339
- adduser --system --uid 1001 app
3340
-
3341
- # Copy bundled output only (no node_modules needed - fully bundled)
3342
- COPY --from=builder --chown=app:nodejs /app/${appPath}/dist/index.mjs ./
3343
-
3344
- ENV NODE_ENV=production
3345
- ENV PORT=${port}
3346
-
3347
- HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3348
- CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3349
-
3350
- USER app
3351
-
3352
- EXPOSE ${port}
3353
-
3354
- ENTRYPOINT ["/sbin/tini", "--"]
3355
- CMD ["node", "index.mjs"]
3179
+ if (hasRedis) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
3180
+ `;
3181
+ }
3182
+ yaml += ` healthcheck:
3183
+ test: ${healthCheckCmd}
3184
+ interval: 30s
3185
+ timeout: 3s
3186
+ retries: 3
3187
+ `;
3188
+ const dependencies$1 = [...app.dependencies];
3189
+ if (app.type === "backend") {
3190
+ if (hasPostgres) dependencies$1.push("postgres");
3191
+ if (hasRedis) dependencies$1.push("redis");
3192
+ }
3193
+ if (dependencies$1.length > 0) {
3194
+ yaml += ` depends_on:
3195
+ `;
3196
+ for (const dep of dependencies$1) yaml += ` ${dep}:
3197
+ condition: service_healthy
3198
+ `;
3199
+ }
3200
+ yaml += ` networks:
3201
+ - workspace-network
3356
3202
  `;
3203
+ return yaml;
3357
3204
  }
3358
3205
 
3359
3206
  //#endregion
3360
- //#region src/docker/index.ts
3361
- const logger$6 = console;
3207
+ //#region src/docker/templates.ts
3208
+ const LOCKFILES = [
3209
+ ["pnpm-lock.yaml", "pnpm"],
3210
+ ["bun.lockb", "bun"],
3211
+ ["yarn.lock", "yarn"],
3212
+ ["package-lock.json", "npm"]
3213
+ ];
3362
3214
  /**
3363
- * Docker command implementation
3364
- * Generates Dockerfile, docker-compose.yml, and related files
3365
- *
3366
- * Default: Multi-stage Dockerfile that builds from source inside Docker
3367
- * --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
3215
+ * Detect package manager from lockfiles
3216
+ * Walks up the directory tree to find lockfile (for monorepos)
3368
3217
  */
3369
- async function dockerCommand(options) {
3370
- const loadedConfig = await require_config.loadWorkspaceConfig();
3371
- if (loadedConfig.type === "workspace") {
3372
- logger$6.log("📦 Detected workspace configuration");
3373
- return workspaceDockerCommand(loadedConfig.workspace, options);
3374
- }
3375
- const config = await require_config.loadConfig();
3376
- const dockerConfig = resolveDockerConfig$1(config);
3377
- const serverConfig = typeof config.providers?.server === "object" ? config.providers.server : void 0;
3378
- const healthCheckPath = serverConfig?.production?.healthCheck ?? "/health";
3379
- const useSlim = options.slim === true;
3380
- if (useSlim) {
3381
- const distDir = (0, node_path.join)(process.cwd(), ".gkm", "server", "dist");
3382
- const hasBuild = (0, node_fs.existsSync)((0, node_path.join)(distDir, "server.mjs"));
3383
- if (!hasBuild) throw new Error("Slim Dockerfile requires a pre-built bundle. Run `gkm build --provider server --production` first, or omit --slim to use multi-stage build.");
3218
+ function detectPackageManager$1(cwd = process.cwd()) {
3219
+ let dir = cwd;
3220
+ const root = (0, node_path.parse)(dir).root;
3221
+ while (dir !== root) {
3222
+ for (const [lockfile, pm] of LOCKFILES) if ((0, node_fs.existsSync)((0, node_path.join)(dir, lockfile))) return pm;
3223
+ dir = (0, node_path.dirname)(dir);
3384
3224
  }
3385
- const dockerDir = (0, node_path.join)(process.cwd(), ".gkm", "docker");
3386
- await (0, node_fs_promises.mkdir)(dockerDir, { recursive: true });
3387
- const packageManager = detectPackageManager$1();
3388
- const inMonorepo = isMonorepo();
3389
- const hasTurbo = hasTurboConfig();
3390
- let useTurbo = options.turbo ?? false;
3391
- if (inMonorepo && !useSlim) if (hasTurbo) {
3392
- useTurbo = true;
3393
- logger$6.log(" Detected monorepo with turbo.json - using turbo prune");
3394
- } else throw new Error("Monorepo detected but turbo.json not found.\n\nDocker builds in monorepos require Turborepo for proper dependency isolation.\n\nTo fix this:\n 1. Install turbo: pnpm add -Dw turbo\n 2. Create turbo.json in your monorepo root\n 3. Run this command again\n\nSee: https://turbo.build/repo/docs/guides/tools/docker");
3395
- let turboPackage = options.turboPackage ?? dockerConfig.imageName;
3396
- if (useTurbo && !options.turboPackage) try {
3397
- const pkg$1 = require(`${process.cwd()}/package.json`);
3398
- if (pkg$1.name) {
3399
- turboPackage = pkg$1.name;
3400
- logger$6.log(` Turbo package: ${turboPackage}`);
3401
- }
3402
- } catch {}
3403
- const templateOptions = {
3404
- imageName: dockerConfig.imageName,
3405
- baseImage: dockerConfig.baseImage,
3406
- port: dockerConfig.port,
3407
- healthCheckPath,
3408
- prebuilt: useSlim,
3409
- turbo: useTurbo,
3410
- turboPackage,
3411
- packageManager
3412
- };
3413
- const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
3414
- const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
3415
- const dockerfilePath = (0, node_path.join)(dockerDir, "Dockerfile");
3416
- await (0, node_fs_promises.writeFile)(dockerfilePath, dockerfile);
3417
- logger$6.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
3418
- const composeOptions = {
3419
- imageName: dockerConfig.imageName,
3420
- registry: options.registry ?? dockerConfig.registry,
3421
- port: dockerConfig.port,
3422
- healthCheckPath,
3423
- services: dockerConfig.compose?.services ?? {}
3424
- };
3425
- const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
3426
- const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
3427
- const composePath = (0, node_path.join)(dockerDir, "docker-compose.yml");
3428
- await (0, node_fs_promises.writeFile)(composePath, dockerCompose);
3429
- logger$6.log("Generated: .gkm/docker/docker-compose.yml");
3430
- const dockerignore = generateDockerignore();
3431
- const dockerignorePath = (0, node_path.join)(process.cwd(), ".dockerignore");
3432
- await (0, node_fs_promises.writeFile)(dockerignorePath, dockerignore);
3433
- logger$6.log("Generated: .dockerignore (project root)");
3434
- const entrypoint = generateDockerEntrypoint();
3435
- const entrypointPath = (0, node_path.join)(dockerDir, "docker-entrypoint.sh");
3436
- await (0, node_fs_promises.writeFile)(entrypointPath, entrypoint);
3437
- logger$6.log("Generated: .gkm/docker/docker-entrypoint.sh");
3438
- const result = {
3439
- dockerfile: dockerfilePath,
3440
- dockerCompose: composePath,
3441
- dockerignore: dockerignorePath,
3442
- entrypoint: entrypointPath
3443
- };
3444
- if (options.build) await buildDockerImage(dockerConfig.imageName, options);
3445
- if (options.push) await pushDockerImage(dockerConfig.imageName, options);
3446
- return result;
3225
+ for (const [lockfile, pm] of LOCKFILES) if ((0, node_fs.existsSync)((0, node_path.join)(root, lockfile))) return pm;
3226
+ return "pnpm";
3447
3227
  }
3448
3228
  /**
3449
- * Ensure lockfile exists in the build context
3450
- * For monorepos, copies from workspace root if needed
3451
- * Returns cleanup function if file was copied
3229
+ * Find the lockfile path by walking up the directory tree
3230
+ * Returns the full path to the lockfile, or null if not found
3452
3231
  */
3453
- function ensureLockfile(cwd) {
3454
- const lockfilePath = findLockfilePath(cwd);
3455
- if (!lockfilePath) {
3456
- logger$6.warn("\n⚠️ No lockfile found. Docker build may fail or use stale dependencies.");
3457
- return null;
3458
- }
3459
- const lockfileName = (0, node_path.basename)(lockfilePath);
3460
- const localLockfile = (0, node_path.join)(cwd, lockfileName);
3461
- if (lockfilePath === localLockfile) return null;
3462
- logger$6.log(` Copying ${lockfileName} from monorepo root...`);
3463
- (0, node_fs.copyFileSync)(lockfilePath, localLockfile);
3464
- return () => {
3465
- try {
3466
- (0, node_fs.unlinkSync)(localLockfile);
3467
- } catch {}
3468
- };
3232
+ function findLockfilePath(cwd = process.cwd()) {
3233
+ let dir = cwd;
3234
+ const root = (0, node_path.parse)(dir).root;
3235
+ while (dir !== root) {
3236
+ for (const [lockfile] of LOCKFILES) {
3237
+ const lockfilePath = (0, node_path.join)(dir, lockfile);
3238
+ if ((0, node_fs.existsSync)(lockfilePath)) return lockfilePath;
3239
+ }
3240
+ dir = (0, node_path.dirname)(dir);
3241
+ }
3242
+ for (const [lockfile] of LOCKFILES) {
3243
+ const lockfilePath = (0, node_path.join)(root, lockfile);
3244
+ if ((0, node_fs.existsSync)(lockfilePath)) return lockfilePath;
3245
+ }
3246
+ return null;
3469
3247
  }
3470
3248
  /**
3471
- * Build Docker image
3472
- * Uses BuildKit for cache mount support
3249
+ * Check if we're in a monorepo (lockfile is in a parent directory)
3473
3250
  */
3474
- async function buildDockerImage(imageName, options) {
3475
- const tag = options.tag ?? "latest";
3476
- const registry = options.registry;
3477
- const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
3478
- logger$6.log(`\n🐳 Building Docker image: ${fullImageName}`);
3479
- const cwd = process.cwd();
3480
- const cleanup = ensureLockfile(cwd);
3481
- try {
3482
- (0, node_child_process.execSync)(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
3483
- cwd,
3484
- stdio: "inherit",
3485
- env: {
3486
- ...process.env,
3487
- DOCKER_BUILDKIT: "1"
3488
- }
3489
- });
3490
- logger$6.log(`✅ Docker image built: ${fullImageName}`);
3491
- } catch (error) {
3492
- throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
3493
- } finally {
3494
- cleanup?.();
3495
- }
3251
+ function isMonorepo(cwd = process.cwd()) {
3252
+ const lockfilePath = findLockfilePath(cwd);
3253
+ if (!lockfilePath) return false;
3254
+ const lockfileDir = (0, node_path.dirname)(lockfilePath);
3255
+ return lockfileDir !== cwd;
3496
3256
  }
3497
3257
  /**
3498
- * Push Docker image to registry
3258
+ * Check if turbo.json exists (walks up directory tree)
3499
3259
  */
3500
- async function pushDockerImage(imageName, options) {
3501
- const tag = options.tag ?? "latest";
3502
- const registry = options.registry;
3503
- if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
3504
- const fullImageName = `${registry}/${imageName}:${tag}`;
3505
- logger$6.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
3506
- try {
3507
- (0, node_child_process.execSync)(`docker push ${fullImageName}`, {
3508
- cwd: process.cwd(),
3509
- stdio: "inherit"
3510
- });
3511
- logger$6.log(`✅ Docker image pushed: ${fullImageName}`);
3512
- } catch (error) {
3513
- throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
3260
+ function hasTurboConfig(cwd = process.cwd()) {
3261
+ let dir = cwd;
3262
+ const root = (0, node_path.parse)(dir).root;
3263
+ while (dir !== root) {
3264
+ if ((0, node_fs.existsSync)((0, node_path.join)(dir, "turbo.json"))) return true;
3265
+ dir = (0, node_path.dirname)(dir);
3514
3266
  }
3267
+ return (0, node_fs.existsSync)((0, node_path.join)(root, "turbo.json"));
3515
3268
  }
3516
3269
  /**
3517
- * Get the package name from package.json in an app directory.
3270
+ * Get install command for turbo builds (without frozen lockfile)
3271
+ * Turbo prune creates a subset that may not perfectly match the lockfile
3518
3272
  */
3519
- function getAppPackageName(appPath) {
3520
- try {
3521
- const pkgPath = (0, node_path.join)(appPath, "package.json");
3522
- if (!(0, node_fs.existsSync)(pkgPath)) return void 0;
3523
- const content = (0, node_fs.readFileSync)(pkgPath, "utf-8");
3524
- const pkg$1 = JSON.parse(content);
3525
- return pkg$1.name;
3526
- } catch {
3527
- return void 0;
3528
- }
3273
+ function getTurboInstallCmd(pm) {
3274
+ const commands = {
3275
+ pnpm: "pnpm install",
3276
+ npm: "npm install",
3277
+ yarn: "yarn install",
3278
+ bun: "bun install"
3279
+ };
3280
+ return commands[pm];
3529
3281
  }
3530
3282
  /**
3531
- * Generate Dockerfiles for all apps in a workspace.
3532
- * @internal Exported for testing
3283
+ * Get package manager specific commands and paths
3533
3284
  */
3534
- async function workspaceDockerCommand(workspace, options) {
3535
- const results = [];
3536
- const apps = Object.entries(workspace.apps);
3537
- logger$6.log(`\n🐳 Generating Dockerfiles for workspace: ${workspace.name}`);
3538
- const dockerDir = (0, node_path.join)(workspace.root, ".gkm", "docker");
3539
- await (0, node_fs_promises.mkdir)(dockerDir, { recursive: true });
3540
- const packageManager = detectPackageManager$1(workspace.root);
3541
- logger$6.log(` Package manager: ${packageManager}`);
3542
- for (const [appName, app] of apps) {
3543
- const appPath = app.path;
3544
- const fullAppPath = (0, node_path.join)(workspace.root, appPath);
3545
- const turboPackage = getAppPackageName(fullAppPath) ?? appName;
3546
- const imageName = appName;
3547
- const hasEntry = !!app.entry;
3548
- const buildType = hasEntry ? "entry" : app.type;
3549
- logger$6.log(`\n 📄 Generating Dockerfile for ${appName} (${buildType})`);
3550
- let dockerfile;
3551
- if (app.type === "frontend") dockerfile = generateNextjsDockerfile({
3552
- imageName,
3553
- baseImage: "node:22-alpine",
3554
- port: app.port,
3555
- appPath,
3556
- turboPackage,
3557
- packageManager
3558
- });
3559
- else if (app.entry) dockerfile = generateEntryDockerfile({
3560
- imageName,
3561
- baseImage: "node:22-alpine",
3562
- port: app.port,
3563
- appPath,
3564
- entry: app.entry,
3565
- turboPackage,
3566
- packageManager,
3567
- healthCheckPath: "/health"
3568
- });
3569
- else dockerfile = generateBackendDockerfile({
3570
- imageName,
3571
- baseImage: "node:22-alpine",
3572
- port: app.port,
3573
- appPath,
3574
- turboPackage,
3575
- packageManager,
3576
- healthCheckPath: "/health"
3577
- });
3578
- const dockerfilePath = (0, node_path.join)(dockerDir, `Dockerfile.${appName}`);
3579
- await (0, node_fs_promises.writeFile)(dockerfilePath, dockerfile);
3580
- logger$6.log(` Generated: .gkm/docker/Dockerfile.${appName}`);
3581
- results.push({
3582
- appName,
3583
- type: app.type,
3584
- dockerfile: dockerfilePath,
3585
- imageName
3586
- });
3587
- }
3588
- const dockerignore = generateDockerignore();
3589
- const dockerignorePath = (0, node_path.join)(workspace.root, ".dockerignore");
3590
- await (0, node_fs_promises.writeFile)(dockerignorePath, dockerignore);
3591
- logger$6.log(`\n Generated: .dockerignore (workspace root)`);
3592
- const dockerCompose = generateWorkspaceCompose(workspace, { registry: options.registry });
3593
- const composePath = (0, node_path.join)(dockerDir, "docker-compose.yml");
3594
- await (0, node_fs_promises.writeFile)(composePath, dockerCompose);
3595
- logger$6.log(` Generated: .gkm/docker/docker-compose.yml`);
3596
- logger$6.log(`\n✅ Generated ${results.length} Dockerfile(s) + docker-compose.yml`);
3597
- logger$6.log("\n📋 Build commands:");
3598
- for (const result of results) {
3599
- const icon = result.type === "backend" ? "⚙️" : "🌐";
3600
- logger$6.log(` ${icon} docker build -f .gkm/docker/Dockerfile.${result.appName} -t ${result.imageName} .`);
3601
- }
3602
- logger$6.log("\n📋 Run all services:");
3603
- logger$6.log(" docker compose -f .gkm/docker/docker-compose.yml up --build");
3604
- return {
3605
- apps: results,
3606
- dockerCompose: composePath,
3607
- dockerignore: dockerignorePath
3285
+ function getPmConfig(pm) {
3286
+ const configs = {
3287
+ pnpm: {
3288
+ install: "corepack enable && corepack prepare pnpm@latest --activate",
3289
+ lockfile: "pnpm-lock.yaml",
3290
+ fetch: "pnpm fetch",
3291
+ installCmd: "pnpm install --frozen-lockfile --offline",
3292
+ cacheTarget: "/root/.local/share/pnpm/store",
3293
+ cacheId: "pnpm",
3294
+ run: "pnpm",
3295
+ exec: "pnpm exec",
3296
+ dlx: "pnpm dlx",
3297
+ addGlobal: "pnpm add -g"
3298
+ },
3299
+ npm: {
3300
+ install: "",
3301
+ lockfile: "package-lock.json",
3302
+ fetch: "",
3303
+ installCmd: "npm ci",
3304
+ cacheTarget: "/root/.npm",
3305
+ cacheId: "npm",
3306
+ run: "npm run",
3307
+ exec: "npx",
3308
+ dlx: "npx",
3309
+ addGlobal: "npm install -g"
3310
+ },
3311
+ yarn: {
3312
+ install: "corepack enable && corepack prepare yarn@stable --activate",
3313
+ lockfile: "yarn.lock",
3314
+ fetch: "",
3315
+ installCmd: "yarn install --frozen-lockfile",
3316
+ cacheTarget: "/root/.yarn/cache",
3317
+ cacheId: "yarn",
3318
+ run: "yarn",
3319
+ exec: "yarn exec",
3320
+ dlx: "yarn dlx",
3321
+ addGlobal: "yarn global add"
3322
+ },
3323
+ bun: {
3324
+ install: "npm install -g bun",
3325
+ lockfile: "bun.lockb",
3326
+ fetch: "",
3327
+ installCmd: "bun install --frozen-lockfile",
3328
+ cacheTarget: "/root/.bun/install/cache",
3329
+ cacheId: "bun",
3330
+ run: "bun run",
3331
+ exec: "bunx",
3332
+ dlx: "bunx",
3333
+ addGlobal: "bun add -g"
3334
+ }
3608
3335
  };
3336
+ return configs[pm];
3609
3337
  }
3338
+ /**
3339
+ * Generate a multi-stage Dockerfile for building from source
3340
+ * Optimized for build speed with:
3341
+ * - BuildKit cache mounts for package manager store
3342
+ * - pnpm fetch for better layer caching (when using pnpm)
3343
+ * - Optional turbo prune for monorepos
3344
+ */
3345
+ function generateMultiStageDockerfile(options) {
3346
+ const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
3347
+ if (turbo) return generateTurboDockerfile({
3348
+ ...options,
3349
+ turboPackage: turboPackage ?? "api"
3350
+ });
3351
+ const pm = getPmConfig(packageManager);
3352
+ const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
3353
+ const hasFetch = packageManager === "pnpm";
3354
+ const depsStage = hasFetch ? `# Copy lockfile first for better caching
3355
+ COPY ${pm.lockfile} ./
3356
+
3357
+ # Fetch dependencies (downloads to virtual store, cached separately)
3358
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3359
+ ${pm.fetch}
3360
+
3361
+ # Copy package.json after fetch
3362
+ COPY package.json ./
3363
+
3364
+ # Install from cache (fast - no network needed)
3365
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3366
+ ${pm.installCmd}` : `# Copy package files
3367
+ COPY package.json ${pm.lockfile} ./
3368
+
3369
+ # Install dependencies with cache
3370
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3371
+ ${pm.installCmd}`;
3372
+ return `# syntax=docker/dockerfile:1
3373
+ # Stage 1: Dependencies
3374
+ FROM ${baseImage} AS deps
3375
+
3376
+ WORKDIR /app
3377
+ ${installPm}
3378
+ ${depsStage}
3379
+
3380
+ # Stage 2: Build
3381
+ FROM deps AS builder
3382
+
3383
+ WORKDIR /app
3384
+
3385
+ # Copy source (deps already installed)
3386
+ COPY . .
3387
+
3388
+ # Debug: Show node_modules/.bin contents and build production server
3389
+ RUN echo "=== node_modules/.bin contents ===" && \
3390
+ ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
3391
+ echo "=== Checking for gkm ===" && \
3392
+ which gkm 2>/dev/null || echo "gkm not in PATH" && \
3393
+ ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
3394
+ echo "=== Running build ===" && \
3395
+ ./node_modules/.bin/gkm build --provider server --production
3396
+
3397
+ # Stage 3: Production
3398
+ FROM ${baseImage} AS runner
3399
+
3400
+ WORKDIR /app
3610
3401
 
3611
- //#endregion
3612
- //#region src/deploy/docker.ts
3613
- /**
3614
- * Get app name from package.json in the current working directory
3615
- * Used for Dokploy app/project naming
3616
- */
3617
- function getAppNameFromCwd$1() {
3618
- const packageJsonPath = (0, node_path.join)(process.cwd(), "package.json");
3619
- if (!(0, node_fs.existsSync)(packageJsonPath)) return void 0;
3620
- try {
3621
- const pkg$1 = JSON.parse((0, node_fs.readFileSync)(packageJsonPath, "utf-8"));
3622
- if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
3623
- } catch {}
3624
- return void 0;
3402
+ # Install tini for proper signal handling as PID 1
3403
+ RUN apk add --no-cache tini
3404
+
3405
+ # Create non-root user
3406
+ RUN addgroup --system --gid 1001 nodejs && \\
3407
+ adduser --system --uid 1001 hono
3408
+
3409
+ # Copy bundled server
3410
+ COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
3411
+
3412
+ # Environment
3413
+ ENV NODE_ENV=production
3414
+ ENV PORT=${port}
3415
+
3416
+ # Health check
3417
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3418
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3419
+
3420
+ # Switch to non-root user
3421
+ USER hono
3422
+
3423
+ EXPOSE ${port}
3424
+
3425
+ # Use tini as entrypoint to handle PID 1 responsibilities
3426
+ ENTRYPOINT ["/sbin/tini", "--"]
3427
+ CMD ["node", "server.mjs"]
3428
+ `;
3625
3429
  }
3626
3430
  /**
3627
- * Get app name from package.json adjacent to the lockfile (project root)
3628
- * Used for Docker image naming
3431
+ * Generate a Dockerfile optimized for Turbo monorepos
3432
+ * Uses turbo prune to create minimal Docker context
3629
3433
  */
3630
- function getAppNameFromPackageJson() {
3631
- const cwd = process.cwd();
3632
- const lockfilePath = findLockfilePath(cwd);
3633
- if (!lockfilePath) return void 0;
3634
- const projectRoot = (0, node_path.dirname)(lockfilePath);
3635
- const packageJsonPath = (0, node_path.join)(projectRoot, "package.json");
3636
- if (!(0, node_fs.existsSync)(packageJsonPath)) return void 0;
3637
- try {
3638
- const pkg$1 = JSON.parse((0, node_fs.readFileSync)(packageJsonPath, "utf-8"));
3639
- if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
3640
- } catch {}
3641
- return void 0;
3434
+ function generateTurboDockerfile(options) {
3435
+ const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
3436
+ const pm = getPmConfig(packageManager);
3437
+ const installPm = pm.install ? `RUN ${pm.install}` : "";
3438
+ const turboInstallCmd = getTurboInstallCmd(packageManager);
3439
+ const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3440
+ return `# syntax=docker/dockerfile:1
3441
+ # Stage 1: Prune monorepo
3442
+ FROM ${baseImage} AS pruner
3443
+
3444
+ WORKDIR /app
3445
+
3446
+ ${installPm}
3447
+
3448
+ COPY . .
3449
+
3450
+ # Prune to only include necessary packages
3451
+ RUN ${turboCmd} prune ${turboPackage} --docker
3452
+
3453
+ # Stage 2: Install dependencies
3454
+ FROM ${baseImage} AS deps
3455
+
3456
+ WORKDIR /app
3457
+
3458
+ ${installPm}
3459
+
3460
+ # Copy pruned lockfile and package.jsons
3461
+ COPY --from=pruner /app/out/${pm.lockfile} ./
3462
+ COPY --from=pruner /app/out/json/ ./
3463
+
3464
+ # Install dependencies (no frozen-lockfile since turbo prune creates a subset)
3465
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3466
+ ${turboInstallCmd}
3467
+
3468
+ # Stage 3: Build
3469
+ FROM deps AS builder
3470
+
3471
+ WORKDIR /app
3472
+
3473
+ # Copy pruned source
3474
+ COPY --from=pruner /app/out/full/ ./
3475
+
3476
+ # Debug: Show node_modules/.bin contents and build production server
3477
+ RUN echo "=== node_modules/.bin contents ===" && \
3478
+ ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
3479
+ echo "=== Checking for gkm ===" && \
3480
+ which gkm 2>/dev/null || echo "gkm not in PATH" && \
3481
+ ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
3482
+ echo "=== Running build ===" && \
3483
+ ./node_modules/.bin/gkm build --provider server --production
3484
+
3485
+ # Stage 4: Production
3486
+ FROM ${baseImage} AS runner
3487
+
3488
+ WORKDIR /app
3489
+
3490
+ RUN apk add --no-cache tini
3491
+
3492
+ RUN addgroup --system --gid 1001 nodejs && \\
3493
+ adduser --system --uid 1001 hono
3494
+
3495
+ COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
3496
+
3497
+ ENV NODE_ENV=production
3498
+ ENV PORT=${port}
3499
+
3500
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3501
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3502
+
3503
+ USER hono
3504
+
3505
+ EXPOSE ${port}
3506
+
3507
+ ENTRYPOINT ["/sbin/tini", "--"]
3508
+ CMD ["node", "server.mjs"]
3509
+ `;
3642
3510
  }
3643
- const logger$5 = console;
3644
3511
  /**
3645
- * Get the full image reference
3512
+ * Generate a slim Dockerfile for pre-built bundles
3646
3513
  */
3647
- function getImageRef(registry, imageName, tag) {
3648
- if (registry) return `${registry}/${imageName}:${tag}`;
3649
- return `${imageName}:${tag}`;
3514
+ function generateSlimDockerfile(options) {
3515
+ const { baseImage, port, healthCheckPath } = options;
3516
+ return `# Slim Dockerfile for pre-built production bundle
3517
+ FROM ${baseImage}
3518
+
3519
+ WORKDIR /app
3520
+
3521
+ # Install tini for proper signal handling as PID 1
3522
+ # Handles SIGTERM propagation and zombie process reaping
3523
+ RUN apk add --no-cache tini
3524
+
3525
+ # Create non-root user
3526
+ RUN addgroup --system --gid 1001 nodejs && \\
3527
+ adduser --system --uid 1001 hono
3528
+
3529
+ # Copy pre-built bundle
3530
+ COPY .gkm/server/dist/server.mjs ./
3531
+
3532
+ # Environment
3533
+ ENV NODE_ENV=production
3534
+ ENV PORT=${port}
3535
+
3536
+ # Health check
3537
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3538
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3539
+
3540
+ # Switch to non-root user
3541
+ USER hono
3542
+
3543
+ EXPOSE ${port}
3544
+
3545
+ # Use tini as entrypoint to handle PID 1 responsibilities
3546
+ ENTRYPOINT ["/sbin/tini", "--"]
3547
+ CMD ["node", "server.mjs"]
3548
+ `;
3650
3549
  }
3651
3550
  /**
3652
- * Build Docker image
3653
- * @param imageRef - Full image reference (registry/name:tag)
3654
- * @param appName - Name of the app (used for Dockerfile.{appName} in workspaces)
3655
- * @param buildArgs - Build arguments to pass to docker build
3551
+ * Generate .dockerignore file
3656
3552
  */
3657
- async function buildImage(imageRef, appName, buildArgs) {
3658
- logger$5.log(`\n🔨 Building Docker image: ${imageRef}`);
3659
- const cwd = process.cwd();
3660
- const lockfilePath = findLockfilePath(cwd);
3661
- const lockfileDir = lockfilePath ? (0, node_path.dirname)(lockfilePath) : cwd;
3662
- const inMonorepo = lockfileDir !== cwd;
3663
- if (appName || inMonorepo) logger$5.log(" Generating Dockerfile for monorepo (turbo prune)...");
3664
- else logger$5.log(" Generating Dockerfile...");
3665
- await dockerCommand({});
3666
- const dockerfileSuffix = appName ? `.${appName}` : "";
3667
- const dockerfilePath = `.gkm/docker/Dockerfile${dockerfileSuffix}`;
3668
- const buildCwd = lockfilePath && (inMonorepo || appName) ? lockfileDir : cwd;
3669
- if (buildCwd !== cwd) logger$5.log(` Building from workspace root: ${buildCwd}`);
3670
- const buildArgsString = buildArgs && buildArgs.length > 0 ? buildArgs.map((arg) => `--build-arg "${arg}"`).join(" ") : "";
3671
- try {
3672
- const cmd = [
3673
- "DOCKER_BUILDKIT=1 docker build",
3674
- "--platform linux/amd64",
3675
- `-f ${dockerfilePath}`,
3676
- `-t ${imageRef}`,
3677
- buildArgsString,
3678
- "."
3679
- ].filter(Boolean).join(" ");
3680
- (0, node_child_process.execSync)(cmd, {
3681
- cwd: buildCwd,
3682
- stdio: "inherit",
3683
- env: {
3684
- ...process.env,
3685
- DOCKER_BUILDKIT: "1"
3686
- }
3687
- });
3688
- logger$5.log(`✅ Image built: ${imageRef}`);
3689
- } catch (error) {
3690
- throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
3691
- }
3553
+ function generateDockerignore() {
3554
+ return `# Dependencies
3555
+ node_modules
3556
+ .pnpm-store
3557
+
3558
+ # Build output (except what we need)
3559
+ .gkm/aws*
3560
+ .gkm/server/*.ts
3561
+ !.gkm/server/dist
3562
+
3563
+ # IDE and editor
3564
+ .idea
3565
+ .vscode
3566
+ *.swp
3567
+ *.swo
3568
+
3569
+ # Git
3570
+ .git
3571
+ .gitignore
3572
+
3573
+ # Logs
3574
+ *.log
3575
+ npm-debug.log*
3576
+ pnpm-debug.log*
3577
+
3578
+ # Test files
3579
+ **/*.test.ts
3580
+ **/*.spec.ts
3581
+ **/__tests__
3582
+ coverage
3583
+
3584
+ # Documentation
3585
+ docs
3586
+ *.md
3587
+ !README.md
3588
+
3589
+ # Environment files (handle secrets separately)
3590
+ .env
3591
+ .env.*
3592
+ !.env.example
3593
+
3594
+ # Docker files (don't copy recursively)
3595
+ Dockerfile*
3596
+ docker-compose*
3597
+ .dockerignore
3598
+ `;
3692
3599
  }
3693
3600
  /**
3694
- * Push Docker image to registry
3601
+ * Generate docker-entrypoint.sh for custom startup logic
3695
3602
  */
3696
- async function pushImage(imageRef) {
3697
- logger$5.log(`\n☁️ Pushing image: ${imageRef}`);
3698
- try {
3699
- (0, node_child_process.execSync)(`docker push ${imageRef}`, {
3700
- cwd: process.cwd(),
3701
- stdio: "inherit"
3702
- });
3703
- logger$5.log(`✅ Image pushed: ${imageRef}`);
3704
- } catch (error) {
3705
- throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
3706
- }
3603
+ function generateDockerEntrypoint() {
3604
+ return `#!/bin/sh
3605
+ set -e
3606
+
3607
+ # Run any custom startup scripts here
3608
+ # Example: wait for database
3609
+ # until nc -z $DB_HOST $DB_PORT; do
3610
+ # echo "Waiting for database..."
3611
+ # sleep 1
3612
+ # done
3613
+
3614
+ # Execute the main command
3615
+ exec "$@"
3616
+ `;
3707
3617
  }
3708
3618
  /**
3709
- * Deploy using Docker (build and optionally push image)
3619
+ * Resolve Docker configuration from GkmConfig with defaults
3710
3620
  */
3711
- async function deployDocker(options) {
3712
- const { stage, tag, skipPush, masterKey, config, buildArgs } = options;
3713
- const imageName = config.imageName;
3714
- const imageRef = getImageRef(config.registry, imageName, tag);
3715
- await buildImage(imageRef, config.appName, buildArgs);
3716
- if (!skipPush) if (!config.registry) logger$5.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
3717
- else await pushImage(imageRef);
3718
- logger$5.log("\n✅ Docker deployment ready!");
3719
- logger$5.log(`\n📋 Deployment details:`);
3720
- logger$5.log(` Image: ${imageRef}`);
3721
- logger$5.log(` Stage: ${stage}`);
3722
- if (masterKey) {
3723
- logger$5.log(`\n🔐 Deploy with this environment variable:`);
3724
- logger$5.log(` GKM_MASTER_KEY=${masterKey}`);
3725
- logger$5.log("\n Example docker run:");
3726
- logger$5.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
3727
- }
3621
+ function resolveDockerConfig$1(config) {
3622
+ const docker = config.docker ?? {};
3623
+ let defaultImageName = "api";
3624
+ try {
3625
+ const pkg$1 = require(`${process.cwd()}/package.json`);
3626
+ if (pkg$1.name) defaultImageName = pkg$1.name.replace(/^@[^/]+\//, "");
3627
+ } catch {}
3728
3628
  return {
3729
- imageRef,
3730
- masterKey
3629
+ registry: docker.registry ?? "",
3630
+ imageName: docker.imageName ?? defaultImageName,
3631
+ baseImage: docker.baseImage ?? "node:22-alpine",
3632
+ port: docker.port ?? 3e3,
3633
+ compose: docker.compose
3731
3634
  };
3732
3635
  }
3733
3636
  /**
3734
- * Resolve Docker deploy config from gkm config
3735
- * - imageName: from config, or cwd package.json, or 'app' (for Docker image)
3736
- * - projectName: from root package.json, or 'app' (for Dokploy project)
3737
- * - appName: from cwd package.json, or projectName (for Dokploy app within project)
3637
+ * Generate a Dockerfile for Next.js frontend apps using standalone output.
3638
+ * Uses turbo prune for monorepo optimization.
3639
+ * @internal Exported for testing
3738
3640
  */
3739
- function resolveDockerConfig(config) {
3740
- const projectName = getAppNameFromPackageJson() ?? "app";
3741
- const appName = getAppNameFromCwd$1() ?? projectName;
3742
- const imageName = config.docker?.imageName ?? appName;
3743
- return {
3744
- registry: config.docker?.registry,
3745
- imageName,
3746
- projectName,
3747
- appName
3748
- };
3749
- }
3641
+ function generateNextjsDockerfile(options) {
3642
+ const { baseImage, port, appPath, turboPackage, packageManager, publicUrlArgs = ["NEXT_PUBLIC_API_URL", "NEXT_PUBLIC_AUTH_URL"] } = options;
3643
+ const pm = getPmConfig(packageManager);
3644
+ const installPm = pm.install ? `RUN ${pm.install}` : "";
3645
+ const turboInstallCmd = getTurboInstallCmd(packageManager);
3646
+ const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3647
+ const publicUrlArgDeclarations = publicUrlArgs.map((arg) => `ARG ${arg}=""`).join("\n");
3648
+ const publicUrlEnvDeclarations = publicUrlArgs.map((arg) => `ENV ${arg}=$${arg}`).join("\n");
3649
+ return `# syntax=docker/dockerfile:1
3650
+ # Next.js standalone Dockerfile with turbo prune optimization
3750
3651
 
3751
- //#endregion
3752
- //#region src/deploy/dokploy.ts
3753
- const logger$4 = console;
3754
- /**
3755
- * Get the Dokploy API token from stored credentials or environment
3756
- */
3757
- async function getApiToken$1() {
3758
- const token = await getDokployToken();
3759
- if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
3760
- return token;
3652
+ # Stage 1: Prune monorepo
3653
+ FROM ${baseImage} AS pruner
3654
+
3655
+ WORKDIR /app
3656
+
3657
+ ${installPm}
3658
+
3659
+ COPY . .
3660
+
3661
+ # Prune to only include necessary packages
3662
+ RUN ${turboCmd} prune ${turboPackage} --docker
3663
+
3664
+ # Stage 2: Install dependencies
3665
+ FROM ${baseImage} AS deps
3666
+
3667
+ WORKDIR /app
3668
+
3669
+ ${installPm}
3670
+
3671
+ # Copy pruned lockfile and package.jsons
3672
+ COPY --from=pruner /app/out/${pm.lockfile} ./
3673
+ COPY --from=pruner /app/out/json/ ./
3674
+
3675
+ # Install dependencies
3676
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3677
+ ${turboInstallCmd}
3678
+
3679
+ # Stage 3: Build
3680
+ FROM deps AS builder
3681
+
3682
+ WORKDIR /app
3683
+
3684
+ # Build-time args for public API URLs (populated by gkm deploy)
3685
+ # These get baked into the Next.js build as public environment variables
3686
+ ${publicUrlArgDeclarations}
3687
+
3688
+ # Convert ARGs to ENVs for Next.js build
3689
+ ${publicUrlEnvDeclarations}
3690
+
3691
+ # Copy pruned source
3692
+ COPY --from=pruner /app/out/full/ ./
3693
+
3694
+ # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3695
+ # Using wildcard to make it optional for single-app projects
3696
+ COPY --from=pruner /app/tsconfig.* ./
3697
+
3698
+ # Ensure public directory exists (may be empty for scaffolded projects)
3699
+ RUN mkdir -p ${appPath}/public
3700
+
3701
+ # Set Next.js to produce standalone output
3702
+ ENV NEXT_TELEMETRY_DISABLED=1
3703
+
3704
+ # Build the application
3705
+ RUN ${turboCmd} run build --filter=${turboPackage}
3706
+
3707
+ # Stage 4: Production
3708
+ FROM ${baseImage} AS runner
3709
+
3710
+ WORKDIR /app
3711
+
3712
+ # Install tini for proper signal handling
3713
+ RUN apk add --no-cache tini
3714
+
3715
+ # Create non-root user
3716
+ RUN addgroup --system --gid 1001 nodejs && \\
3717
+ adduser --system --uid 1001 nextjs
3718
+
3719
+ # Set environment
3720
+ ENV NODE_ENV=production
3721
+ ENV NEXT_TELEMETRY_DISABLED=1
3722
+ ENV PORT=${port}
3723
+ ENV HOSTNAME="0.0.0.0"
3724
+
3725
+ # Copy static files and standalone output
3726
+ COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/standalone ./
3727
+ COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/static ./${appPath}/.next/static
3728
+ COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/public ./${appPath}/public
3729
+
3730
+ USER nextjs
3731
+
3732
+ EXPOSE ${port}
3733
+
3734
+ ENTRYPOINT ["/sbin/tini", "--"]
3735
+ CMD ["node", "${appPath}/server.js"]
3736
+ `;
3761
3737
  }
3762
3738
  /**
3763
- * Create a Dokploy API client
3739
+ * Generate a Dockerfile for backend apps in a workspace.
3740
+ * Uses turbo prune for monorepo optimization.
3741
+ * @internal Exported for testing
3764
3742
  */
3765
- async function createApi$1(endpoint) {
3766
- const token = await getApiToken$1();
3767
- return new require_dokploy_api.DokployApi({
3768
- baseUrl: endpoint,
3769
- token
3770
- });
3743
+ function generateBackendDockerfile(options) {
3744
+ const { baseImage, port, appPath, turboPackage, packageManager, healthCheckPath = "/health" } = options;
3745
+ const pm = getPmConfig(packageManager);
3746
+ const installPm = pm.install ? `RUN ${pm.install}` : "";
3747
+ const turboInstallCmd = getTurboInstallCmd(packageManager);
3748
+ const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3749
+ return `# syntax=docker/dockerfile:1
3750
+ # Backend Dockerfile with turbo prune optimization
3751
+
3752
+ # Stage 1: Prune monorepo
3753
+ FROM ${baseImage} AS pruner
3754
+
3755
+ WORKDIR /app
3756
+
3757
+ ${installPm}
3758
+
3759
+ COPY . .
3760
+
3761
+ # Prune to only include necessary packages
3762
+ RUN ${turboCmd} prune ${turboPackage} --docker
3763
+
3764
+ # Stage 2: Install dependencies
3765
+ FROM ${baseImage} AS deps
3766
+
3767
+ WORKDIR /app
3768
+
3769
+ ${installPm}
3770
+
3771
+ # Copy pruned lockfile and package.jsons
3772
+ COPY --from=pruner /app/out/${pm.lockfile} ./
3773
+ COPY --from=pruner /app/out/json/ ./
3774
+
3775
+ # Install dependencies
3776
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3777
+ ${turboInstallCmd}
3778
+
3779
+ # Stage 3: Build
3780
+ FROM deps AS builder
3781
+
3782
+ WORKDIR /app
3783
+
3784
+ # Build-time args for encrypted secrets
3785
+ ARG GKM_ENCRYPTED_CREDENTIALS=""
3786
+ ARG GKM_CREDENTIALS_IV=""
3787
+
3788
+ # Copy pruned source
3789
+ COPY --from=pruner /app/out/full/ ./
3790
+
3791
+ # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3792
+ # Using wildcard to make it optional for single-app projects
3793
+ COPY --from=pruner /app/gkm.config.* ./
3794
+ COPY --from=pruner /app/tsconfig.* ./
3795
+
3796
+ # Write encrypted credentials for gkm build to embed
3797
+ RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
3798
+ mkdir -p ${appPath}/.gkm && \
3799
+ echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
3800
+ echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
3801
+ fi
3802
+
3803
+ # Build production server using gkm
3804
+ RUN cd ${appPath} && ./node_modules/.bin/gkm build --provider server --production
3805
+
3806
+ # Stage 4: Production
3807
+ FROM ${baseImage} AS runner
3808
+
3809
+ WORKDIR /app
3810
+
3811
+ RUN apk add --no-cache tini
3812
+
3813
+ RUN addgroup --system --gid 1001 nodejs && \\
3814
+ adduser --system --uid 1001 hono
3815
+
3816
+ # Copy bundled server
3817
+ COPY --from=builder --chown=hono:nodejs /app/${appPath}/.gkm/server/dist/server.mjs ./
3818
+
3819
+ ENV NODE_ENV=production
3820
+ ENV PORT=${port}
3821
+
3822
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3823
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3824
+
3825
+ USER hono
3826
+
3827
+ EXPOSE ${port}
3828
+
3829
+ ENTRYPOINT ["/sbin/tini", "--"]
3830
+ CMD ["node", "server.mjs"]
3831
+ `;
3771
3832
  }
3772
3833
  /**
3773
- * Deploy to Dokploy
3834
+ * Generate a Dockerfile for apps with a custom entry point.
3835
+ * Uses esbuild to bundle the entry point into dist/index.mjs with all dependencies.
3836
+ * This is used for apps that don't use gkm routes (e.g., Better Auth servers).
3837
+ * @internal Exported for testing
3774
3838
  */
3775
- async function deployDokploy(options) {
3776
- const { stage, imageRef, masterKey, config } = options;
3777
- logger$4.log(`\n🎯 Deploying to Dokploy...`);
3778
- logger$4.log(` Endpoint: ${config.endpoint}`);
3779
- logger$4.log(` Application: ${config.applicationId}`);
3780
- const api = await createApi$1(config.endpoint);
3781
- logger$4.log(` Configuring Docker image: ${imageRef}`);
3782
- const registryOptions = {};
3783
- if (config.registryId) {
3784
- registryOptions.registryId = config.registryId;
3785
- logger$4.log(` Using Dokploy registry: ${config.registryId}`);
3786
- } else {
3787
- const storedRegistryId = await getDokployRegistryId();
3788
- if (storedRegistryId) {
3789
- registryOptions.registryId = storedRegistryId;
3790
- logger$4.log(` Using stored Dokploy registry: ${storedRegistryId}`);
3791
- } else if (config.registryCredentials) {
3792
- registryOptions.username = config.registryCredentials.username;
3793
- registryOptions.password = config.registryCredentials.password;
3794
- registryOptions.registryUrl = config.registryCredentials.registryUrl;
3795
- logger$4.log(` Using registry credentials for: ${config.registryCredentials.registryUrl}`);
3796
- } else {
3797
- const username = process.env.DOCKER_REGISTRY_USERNAME;
3798
- const password = process.env.DOCKER_REGISTRY_PASSWORD;
3799
- const registryUrl = process.env.DOCKER_REGISTRY_URL || config.registry;
3800
- if (username && password && registryUrl) {
3801
- registryOptions.username = username;
3802
- registryOptions.password = password;
3803
- registryOptions.registryUrl = registryUrl;
3804
- logger$4.log(` Using registry credentials from environment`);
3805
- }
3806
- }
3807
- }
3808
- await api.saveDockerProvider(config.applicationId, imageRef, registryOptions);
3809
- logger$4.log(" ✓ Docker provider configured");
3810
- const envVars = {};
3811
- if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
3812
- if (Object.keys(envVars).length > 0) {
3813
- logger$4.log(" Updating environment variables...");
3814
- const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
3815
- await api.saveApplicationEnv(config.applicationId, envString);
3816
- logger$4.log(" ✓ Environment variables updated");
3817
- }
3818
- logger$4.log(" Triggering deployment...");
3819
- await api.deployApplication(config.applicationId);
3820
- logger$4.log(" ✓ Deployment triggered");
3821
- logger$4.log("\n✅ Dokploy deployment initiated!");
3822
- logger$4.log(`\n📋 Deployment details:`);
3823
- logger$4.log(` Image: ${imageRef}`);
3824
- logger$4.log(` Stage: ${stage}`);
3825
- logger$4.log(` Application ID: ${config.applicationId}`);
3826
- if (masterKey) logger$4.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
3827
- const deploymentUrl = `${config.endpoint}/project/${config.projectId}`;
3828
- logger$4.log(`\n🔗 View deployment: ${deploymentUrl}`);
3829
- return {
3830
- imageRef,
3831
- masterKey,
3832
- url: deploymentUrl
3833
- };
3839
+ function generateEntryDockerfile(options) {
3840
+ const { baseImage, port, appPath, entry, turboPackage, packageManager, healthCheckPath = "/health" } = options;
3841
+ const pm = getPmConfig(packageManager);
3842
+ const installPm = pm.install ? `RUN ${pm.install}` : "";
3843
+ const turboInstallCmd = getTurboInstallCmd(packageManager);
3844
+ const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
3845
+ return `# syntax=docker/dockerfile:1
3846
+ # Entry-based Dockerfile with turbo prune + tsdown bundling
3847
+
3848
+ # Stage 1: Prune monorepo
3849
+ FROM ${baseImage} AS pruner
3850
+
3851
+ WORKDIR /app
3852
+
3853
+ ${installPm}
3854
+
3855
+ COPY . .
3856
+
3857
+ # Prune to only include necessary packages
3858
+ RUN ${turboCmd} prune ${turboPackage} --docker
3859
+
3860
+ # Stage 2: Install dependencies
3861
+ FROM ${baseImage} AS deps
3862
+
3863
+ WORKDIR /app
3864
+
3865
+ ${installPm}
3866
+
3867
+ # Copy pruned lockfile and package.jsons
3868
+ COPY --from=pruner /app/out/${pm.lockfile} ./
3869
+ COPY --from=pruner /app/out/json/ ./
3870
+
3871
+ # Install dependencies
3872
+ RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
3873
+ ${turboInstallCmd}
3874
+
3875
+ # Stage 3: Build with tsdown
3876
+ FROM deps AS builder
3877
+
3878
+ WORKDIR /app
3879
+
3880
+ # Build-time args for encrypted secrets
3881
+ ARG GKM_ENCRYPTED_CREDENTIALS=""
3882
+ ARG GKM_CREDENTIALS_IV=""
3883
+
3884
+ # Copy pruned source
3885
+ COPY --from=pruner /app/out/full/ ./
3886
+
3887
+ # Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
3888
+ # Using wildcard to make it optional for single-app projects
3889
+ COPY --from=pruner /app/tsconfig.* ./
3890
+
3891
+ # Write encrypted credentials for tsdown to embed via define
3892
+ RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
3893
+ mkdir -p ${appPath}/.gkm && \
3894
+ echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
3895
+ echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
3896
+ fi
3897
+
3898
+ # Bundle entry point with esbuild (outputs to dist/index.mjs)
3899
+ # Creates a fully standalone bundle with all dependencies included
3900
+ # Use define to embed credentials if present
3901
+ RUN cd ${appPath} && \
3902
+ if [ -f .gkm/credentials.enc ]; then \
3903
+ CREDS=$(cat .gkm/credentials.enc) && \
3904
+ IV=$(cat .gkm/credentials.iv) && \
3905
+ npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
3906
+ --outfile=dist/index.mjs --packages=bundle \
3907
+ --banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);' \
3908
+ --define:__GKM_ENCRYPTED_CREDENTIALS__="'\\"$CREDS\\"'" \
3909
+ --define:__GKM_CREDENTIALS_IV__="'\\"$IV\\"'"; \
3910
+ else \
3911
+ npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
3912
+ --outfile=dist/index.mjs --packages=bundle \
3913
+ --banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);'; \
3914
+ fi
3915
+
3916
+ # Stage 4: Production
3917
+ FROM ${baseImage} AS runner
3918
+
3919
+ WORKDIR /app
3920
+
3921
+ RUN apk add --no-cache tini
3922
+
3923
+ RUN addgroup --system --gid 1001 nodejs && \\
3924
+ adduser --system --uid 1001 app
3925
+
3926
+ # Copy bundled output only (no node_modules needed - fully bundled)
3927
+ COPY --from=builder --chown=app:nodejs /app/${appPath}/dist/index.mjs ./
3928
+
3929
+ ENV NODE_ENV=production
3930
+ ENV PORT=${port}
3931
+
3932
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
3933
+ CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
3934
+
3935
+ USER app
3936
+
3937
+ EXPOSE ${port}
3938
+
3939
+ ENTRYPOINT ["/sbin/tini", "--"]
3940
+ CMD ["node", "index.mjs"]
3941
+ `;
3834
3942
  }
3835
3943
 
3836
3944
  //#endregion
3837
- //#region src/deploy/state.ts
3838
- /**
3839
- * Get the state file path for a stage
3840
- */
3841
- function getStateFilePath(workspaceRoot, stage) {
3842
- return (0, node_path.join)(workspaceRoot, ".gkm", `deploy-${stage}.json`);
3843
- }
3945
+ //#region src/docker/index.ts
3946
+ const logger$5 = console;
3844
3947
  /**
3845
- * Read the deploy state for a stage
3846
- * Returns null if state file doesn't exist
3948
+ * Docker command implementation
3949
+ * Generates Dockerfile, docker-compose.yml, and related files
3950
+ *
3951
+ * Default: Multi-stage Dockerfile that builds from source inside Docker
3952
+ * --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
3847
3953
  */
3848
- async function readStageState(workspaceRoot, stage) {
3849
- const filePath = getStateFilePath(workspaceRoot, stage);
3850
- try {
3851
- const content = await (0, node_fs_promises.readFile)(filePath, "utf-8");
3852
- return JSON.parse(content);
3853
- } catch (error) {
3854
- if (error.code === "ENOENT") return null;
3855
- console.warn(`Warning: Could not read deploy state: ${error}`);
3856
- return null;
3954
+ async function dockerCommand(options) {
3955
+ const loadedConfig = await require_config.loadWorkspaceConfig();
3956
+ if (loadedConfig.type === "workspace") {
3957
+ logger$5.log("📦 Detected workspace configuration");
3958
+ return workspaceDockerCommand(loadedConfig.workspace, options);
3857
3959
  }
3858
- }
3859
- /**
3860
- * Write the deploy state for a stage
3861
- */
3862
- async function writeStageState(workspaceRoot, stage, state) {
3863
- const filePath = getStateFilePath(workspaceRoot, stage);
3864
- const dir = (0, node_path.join)(workspaceRoot, ".gkm");
3865
- await (0, node_fs_promises.mkdir)(dir, { recursive: true });
3866
- state.lastDeployedAt = (/* @__PURE__ */ new Date()).toISOString();
3867
- await (0, node_fs_promises.writeFile)(filePath, JSON.stringify(state, null, 2));
3868
- }
3869
- /**
3870
- * Create a new empty state for a stage
3871
- */
3872
- function createEmptyState(stage, environmentId) {
3873
- return {
3874
- provider: "dokploy",
3875
- stage,
3876
- environmentId,
3877
- applications: {},
3878
- services: {},
3879
- lastDeployedAt: (/* @__PURE__ */ new Date()).toISOString()
3960
+ const config = await require_config.loadConfig();
3961
+ const dockerConfig = resolveDockerConfig$1(config);
3962
+ const serverConfig = typeof config.providers?.server === "object" ? config.providers.server : void 0;
3963
+ const healthCheckPath = serverConfig?.production?.healthCheck ?? "/health";
3964
+ const useSlim = options.slim === true;
3965
+ if (useSlim) {
3966
+ const distDir = (0, node_path.join)(process.cwd(), ".gkm", "server", "dist");
3967
+ const hasBuild = (0, node_fs.existsSync)((0, node_path.join)(distDir, "server.mjs"));
3968
+ if (!hasBuild) throw new Error("Slim Dockerfile requires a pre-built bundle. Run `gkm build --provider server --production` first, or omit --slim to use multi-stage build.");
3969
+ }
3970
+ const dockerDir = (0, node_path.join)(process.cwd(), ".gkm", "docker");
3971
+ await (0, node_fs_promises.mkdir)(dockerDir, { recursive: true });
3972
+ const packageManager = detectPackageManager$1();
3973
+ const inMonorepo = isMonorepo();
3974
+ const hasTurbo = hasTurboConfig();
3975
+ let useTurbo = options.turbo ?? false;
3976
+ if (inMonorepo && !useSlim) if (hasTurbo) {
3977
+ useTurbo = true;
3978
+ logger$5.log(" Detected monorepo with turbo.json - using turbo prune");
3979
+ } else throw new Error("Monorepo detected but turbo.json not found.\n\nDocker builds in monorepos require Turborepo for proper dependency isolation.\n\nTo fix this:\n 1. Install turbo: pnpm add -Dw turbo\n 2. Create turbo.json in your monorepo root\n 3. Run this command again\n\nSee: https://turbo.build/repo/docs/guides/tools/docker");
3980
+ let turboPackage = options.turboPackage ?? dockerConfig.imageName;
3981
+ if (useTurbo && !options.turboPackage) try {
3982
+ const pkg$1 = require(`${process.cwd()}/package.json`);
3983
+ if (pkg$1.name) {
3984
+ turboPackage = pkg$1.name;
3985
+ logger$5.log(` Turbo package: ${turboPackage}`);
3986
+ }
3987
+ } catch {}
3988
+ const templateOptions = {
3989
+ imageName: dockerConfig.imageName,
3990
+ baseImage: dockerConfig.baseImage,
3991
+ port: dockerConfig.port,
3992
+ healthCheckPath,
3993
+ prebuilt: useSlim,
3994
+ turbo: useTurbo,
3995
+ turboPackage,
3996
+ packageManager
3997
+ };
3998
+ const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
3999
+ const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
4000
+ const dockerfilePath = (0, node_path.join)(dockerDir, "Dockerfile");
4001
+ await (0, node_fs_promises.writeFile)(dockerfilePath, dockerfile);
4002
+ logger$5.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
4003
+ const composeOptions = {
4004
+ imageName: dockerConfig.imageName,
4005
+ registry: options.registry ?? dockerConfig.registry,
4006
+ port: dockerConfig.port,
4007
+ healthCheckPath,
4008
+ services: dockerConfig.compose?.services ?? {}
3880
4009
  };
4010
+ const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
4011
+ const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
4012
+ const composePath = (0, node_path.join)(dockerDir, "docker-compose.yml");
4013
+ await (0, node_fs_promises.writeFile)(composePath, dockerCompose);
4014
+ logger$5.log("Generated: .gkm/docker/docker-compose.yml");
4015
+ const dockerignore = generateDockerignore();
4016
+ const dockerignorePath = (0, node_path.join)(process.cwd(), ".dockerignore");
4017
+ await (0, node_fs_promises.writeFile)(dockerignorePath, dockerignore);
4018
+ logger$5.log("Generated: .dockerignore (project root)");
4019
+ const entrypoint = generateDockerEntrypoint();
4020
+ const entrypointPath = (0, node_path.join)(dockerDir, "docker-entrypoint.sh");
4021
+ await (0, node_fs_promises.writeFile)(entrypointPath, entrypoint);
4022
+ logger$5.log("Generated: .gkm/docker/docker-entrypoint.sh");
4023
+ const result = {
4024
+ dockerfile: dockerfilePath,
4025
+ dockerCompose: composePath,
4026
+ dockerignore: dockerignorePath,
4027
+ entrypoint: entrypointPath
4028
+ };
4029
+ if (options.build) await buildDockerImage(dockerConfig.imageName, options);
4030
+ if (options.push) await pushDockerImage(dockerConfig.imageName, options);
4031
+ return result;
3881
4032
  }
3882
4033
  /**
3883
- * Get application ID from state
3884
- */
3885
- function getApplicationId(state, appName) {
3886
- return state?.applications[appName];
3887
- }
3888
- /**
3889
- * Set application ID in state (mutates state)
3890
- */
3891
- function setApplicationId(state, appName, applicationId) {
3892
- state.applications[appName] = applicationId;
3893
- }
3894
- /**
3895
- * Get postgres ID from state
3896
- */
3897
- function getPostgresId(state) {
3898
- return state?.services.postgresId;
3899
- }
3900
- /**
3901
- * Set postgres ID in state (mutates state)
4034
+ * Ensure lockfile exists in the build context
4035
+ * For monorepos, copies from workspace root if needed
4036
+ * Returns cleanup function if file was copied
3902
4037
  */
3903
- function setPostgresId(state, postgresId) {
3904
- state.services.postgresId = postgresId;
4038
+ function ensureLockfile(cwd) {
4039
+ const lockfilePath = findLockfilePath(cwd);
4040
+ if (!lockfilePath) {
4041
+ logger$5.warn("\n⚠️ No lockfile found. Docker build may fail or use stale dependencies.");
4042
+ return null;
4043
+ }
4044
+ const lockfileName = (0, node_path.basename)(lockfilePath);
4045
+ const localLockfile = (0, node_path.join)(cwd, lockfileName);
4046
+ if (lockfilePath === localLockfile) return null;
4047
+ logger$5.log(` Copying ${lockfileName} from monorepo root...`);
4048
+ (0, node_fs.copyFileSync)(lockfilePath, localLockfile);
4049
+ return () => {
4050
+ try {
4051
+ (0, node_fs.unlinkSync)(localLockfile);
4052
+ } catch {}
4053
+ };
3905
4054
  }
3906
4055
  /**
3907
- * Get redis ID from state
4056
+ * Build Docker image
4057
+ * Uses BuildKit for cache mount support
3908
4058
  */
3909
- function getRedisId(state) {
3910
- return state?.services.redisId;
4059
+ async function buildDockerImage(imageName, options) {
4060
+ const tag = options.tag ?? "latest";
4061
+ const registry = options.registry;
4062
+ const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
4063
+ logger$5.log(`\n🐳 Building Docker image: ${fullImageName}`);
4064
+ const cwd = process.cwd();
4065
+ const cleanup = ensureLockfile(cwd);
4066
+ try {
4067
+ (0, node_child_process.execSync)(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
4068
+ cwd,
4069
+ stdio: "inherit",
4070
+ env: {
4071
+ ...process.env,
4072
+ DOCKER_BUILDKIT: "1"
4073
+ }
4074
+ });
4075
+ logger$5.log(`✅ Docker image built: ${fullImageName}`);
4076
+ } catch (error) {
4077
+ throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
4078
+ } finally {
4079
+ cleanup?.();
4080
+ }
3911
4081
  }
3912
4082
  /**
3913
- * Set redis ID in state (mutates state)
4083
+ * Push Docker image to registry
3914
4084
  */
3915
- function setRedisId(state, redisId) {
3916
- state.services.redisId = redisId;
4085
+ async function pushDockerImage(imageName, options) {
4086
+ const tag = options.tag ?? "latest";
4087
+ const registry = options.registry;
4088
+ if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
4089
+ const fullImageName = `${registry}/${imageName}:${tag}`;
4090
+ logger$5.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
4091
+ try {
4092
+ (0, node_child_process.execSync)(`docker push ${fullImageName}`, {
4093
+ cwd: process.cwd(),
4094
+ stdio: "inherit"
4095
+ });
4096
+ logger$5.log(`✅ Docker image pushed: ${fullImageName}`);
4097
+ } catch (error) {
4098
+ throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
4099
+ }
3917
4100
  }
3918
-
3919
- //#endregion
3920
- //#region src/deploy/dns/hostinger-api.ts
3921
- /**
3922
- * Hostinger DNS API client
3923
- *
3924
- * API Documentation: https://developers.hostinger.com/
3925
- * Authentication: Bearer token from hpanel.hostinger.com/profile/api
3926
- */
3927
- const HOSTINGER_API_BASE = "https://developers.hostinger.com";
3928
4101
  /**
3929
- * Hostinger API error
4102
+ * Get the package name from package.json in an app directory.
3930
4103
  */
3931
- var HostingerApiError = class extends Error {
3932
- constructor(message, status, statusText, errors) {
3933
- super(message);
3934
- this.status = status;
3935
- this.statusText = statusText;
3936
- this.errors = errors;
3937
- this.name = "HostingerApiError";
4104
+ function getAppPackageName(appPath) {
4105
+ try {
4106
+ const pkgPath = (0, node_path.join)(appPath, "package.json");
4107
+ if (!(0, node_fs.existsSync)(pkgPath)) return void 0;
4108
+ const content = (0, node_fs.readFileSync)(pkgPath, "utf-8");
4109
+ const pkg$1 = JSON.parse(content);
4110
+ return pkg$1.name;
4111
+ } catch {
4112
+ return void 0;
3938
4113
  }
3939
- };
4114
+ }
3940
4115
  /**
3941
- * Hostinger DNS API client
3942
- *
3943
- * @example
3944
- * ```ts
3945
- * const api = new HostingerApi(token);
3946
- *
3947
- * // Get all records for a domain
3948
- * const records = await api.getRecords('traflabs.io');
3949
- *
3950
- * // Create/update records
3951
- * await api.upsertRecords('traflabs.io', [
3952
- * { name: 'api.joemoer', type: 'A', ttl: 300, records: ['1.2.3.4'] }
3953
- * ]);
3954
- * ```
4116
+ * Generate Dockerfiles for all apps in a workspace.
4117
+ * @internal Exported for testing
3955
4118
  */
3956
- var HostingerApi = class {
3957
- token;
3958
- constructor(token) {
3959
- this.token = token;
3960
- }
3961
- /**
3962
- * Make a request to the Hostinger API
3963
- */
3964
- async request(method, endpoint, body) {
3965
- const url = `${HOSTINGER_API_BASE}${endpoint}`;
3966
- const response = await fetch(url, {
3967
- method,
3968
- headers: {
3969
- "Content-Type": "application/json",
3970
- Authorization: `Bearer ${this.token}`
3971
- },
3972
- body: body ? JSON.stringify(body) : void 0
4119
+ async function workspaceDockerCommand(workspace, options) {
4120
+ const results = [];
4121
+ const apps = Object.entries(workspace.apps);
4122
+ logger$5.log(`\n🐳 Generating Dockerfiles for workspace: ${workspace.name}`);
4123
+ const dockerDir = (0, node_path.join)(workspace.root, ".gkm", "docker");
4124
+ await (0, node_fs_promises.mkdir)(dockerDir, { recursive: true });
4125
+ const packageManager = detectPackageManager$1(workspace.root);
4126
+ logger$5.log(` Package manager: ${packageManager}`);
4127
+ for (const [appName, app] of apps) {
4128
+ const appPath = app.path;
4129
+ const fullAppPath = (0, node_path.join)(workspace.root, appPath);
4130
+ const turboPackage = getAppPackageName(fullAppPath) ?? appName;
4131
+ const imageName = appName;
4132
+ const hasEntry = !!app.entry;
4133
+ const buildType = hasEntry ? "entry" : app.type;
4134
+ logger$5.log(`\n 📄 Generating Dockerfile for ${appName} (${buildType})`);
4135
+ let dockerfile;
4136
+ if (app.type === "frontend") dockerfile = generateNextjsDockerfile({
4137
+ imageName,
4138
+ baseImage: "node:22-alpine",
4139
+ port: app.port,
4140
+ appPath,
4141
+ turboPackage,
4142
+ packageManager
3973
4143
  });
3974
- if (!response.ok) {
3975
- let errorMessage = `Hostinger API error: ${response.status} ${response.statusText}`;
3976
- let errors;
3977
- try {
3978
- const errorBody = await response.json();
3979
- if (errorBody.message) errorMessage = `Hostinger API error: ${errorBody.message}`;
3980
- errors = errorBody.errors;
3981
- } catch {}
3982
- throw new HostingerApiError(errorMessage, response.status, response.statusText, errors);
3983
- }
3984
- const text = await response.text();
3985
- if (!text || text.trim() === "") return void 0;
3986
- return JSON.parse(text);
3987
- }
3988
- /**
3989
- * Get all DNS records for a domain
3990
- *
3991
- * @param domain - Root domain (e.g., 'traflabs.io')
3992
- */
3993
- async getRecords(domain) {
3994
- const response = await this.request("GET", `/api/dns/v1/zones/${domain}`);
3995
- return response.data || [];
3996
- }
3997
- /**
3998
- * Create or update DNS records
3999
- *
4000
- * @param domain - Root domain (e.g., 'traflabs.io')
4001
- * @param records - Records to create/update
4002
- * @param overwrite - If true, replaces all existing records. If false, merges with existing.
4003
- */
4004
- async upsertRecords(domain, records, overwrite = false) {
4005
- await this.request("PUT", `/api/dns/v1/zones/${domain}`, {
4006
- overwrite,
4007
- zone: records
4144
+ else if (app.entry) dockerfile = generateEntryDockerfile({
4145
+ imageName,
4146
+ baseImage: "node:22-alpine",
4147
+ port: app.port,
4148
+ appPath,
4149
+ entry: app.entry,
4150
+ turboPackage,
4151
+ packageManager,
4152
+ healthCheckPath: "/health"
4008
4153
  });
4009
- }
4010
- /**
4011
- * Validate DNS records before applying
4012
- *
4013
- * @param domain - Root domain (e.g., 'traflabs.io')
4014
- * @param records - Records to validate
4015
- * @returns true if valid, throws if invalid
4016
- */
4017
- async validateRecords(domain, records) {
4018
- await this.request("POST", `/api/dns/v1/zones/${domain}/validate`, {
4019
- overwrite: false,
4020
- zone: records
4154
+ else dockerfile = generateBackendDockerfile({
4155
+ imageName,
4156
+ baseImage: "node:22-alpine",
4157
+ port: app.port,
4158
+ appPath,
4159
+ turboPackage,
4160
+ packageManager,
4161
+ healthCheckPath: "/health"
4162
+ });
4163
+ const dockerfilePath = (0, node_path.join)(dockerDir, `Dockerfile.${appName}`);
4164
+ await (0, node_fs_promises.writeFile)(dockerfilePath, dockerfile);
4165
+ logger$5.log(` Generated: .gkm/docker/Dockerfile.${appName}`);
4166
+ results.push({
4167
+ appName,
4168
+ type: app.type,
4169
+ dockerfile: dockerfilePath,
4170
+ imageName
4021
4171
  });
4022
- return true;
4023
- }
4024
- /**
4025
- * Delete specific DNS records
4026
- *
4027
- * @param domain - Root domain (e.g., 'traflabs.io')
4028
- * @param filters - Filters to match records for deletion
4029
- */
4030
- async deleteRecords(domain, filters) {
4031
- await this.request("DELETE", `/api/dns/v1/zones/${domain}`, { filters });
4032
- }
4033
- /**
4034
- * Check if a specific record exists
4035
- *
4036
- * @param domain - Root domain (e.g., 'traflabs.io')
4037
- * @param name - Subdomain name (e.g., 'api.joemoer')
4038
- * @param type - Record type (e.g., 'A')
4039
- */
4040
- async recordExists(domain, name$1, type$1 = "A") {
4041
- const records = await this.getRecords(domain);
4042
- return records.some((r) => r.name === name$1 && r.type === type$1);
4043
4172
  }
4044
- /**
4045
- * Create a single A record if it doesn't exist
4046
- *
4047
- * @param domain - Root domain (e.g., 'traflabs.io')
4048
- * @param subdomain - Subdomain name (e.g., 'api.joemoer')
4049
- * @param ip - IP address to point to
4050
- * @param ttl - TTL in seconds (default: 300)
4051
- * @returns true if created, false if already exists
4052
- */
4053
- async createARecordIfNotExists(domain, subdomain, ip, ttl = 300) {
4054
- const exists = await this.recordExists(domain, subdomain, "A");
4055
- if (exists) return false;
4056
- await this.upsertRecords(domain, [{
4057
- name: subdomain,
4058
- type: "A",
4059
- ttl,
4060
- records: [{ content: ip }]
4061
- }]);
4062
- return true;
4173
+ const dockerignore = generateDockerignore();
4174
+ const dockerignorePath = (0, node_path.join)(workspace.root, ".dockerignore");
4175
+ await (0, node_fs_promises.writeFile)(dockerignorePath, dockerignore);
4176
+ logger$5.log(`\n Generated: .dockerignore (workspace root)`);
4177
+ const dockerCompose = generateWorkspaceCompose(workspace, { registry: options.registry });
4178
+ const composePath = (0, node_path.join)(dockerDir, "docker-compose.yml");
4179
+ await (0, node_fs_promises.writeFile)(composePath, dockerCompose);
4180
+ logger$5.log(` Generated: .gkm/docker/docker-compose.yml`);
4181
+ logger$5.log(`\n✅ Generated ${results.length} Dockerfile(s) + docker-compose.yml`);
4182
+ logger$5.log("\n📋 Build commands:");
4183
+ for (const result of results) {
4184
+ const icon = result.type === "backend" ? "⚙️" : "🌐";
4185
+ logger$5.log(` ${icon} docker build -f .gkm/docker/Dockerfile.${result.appName} -t ${result.imageName} .`);
4063
4186
  }
4064
- };
4187
+ logger$5.log("\n📋 Run all services:");
4188
+ logger$5.log(" docker compose -f .gkm/docker/docker-compose.yml up --build");
4189
+ return {
4190
+ apps: results,
4191
+ dockerCompose: composePath,
4192
+ dockerignore: dockerignorePath
4193
+ };
4194
+ }
4065
4195
 
4066
4196
  //#endregion
4067
- //#region src/deploy/dns/index.ts
4068
- const logger$3 = console;
4197
+ //#region src/deploy/docker.ts
4069
4198
  /**
4070
- * Resolve IP address from a hostname
4199
+ * Get app name from package.json in the current working directory
4200
+ * Used for Dokploy app/project naming
4071
4201
  */
4072
- async function resolveHostnameToIp(hostname) {
4202
+ function getAppNameFromCwd$1() {
4203
+ const packageJsonPath = (0, node_path.join)(process.cwd(), "package.json");
4204
+ if (!(0, node_fs.existsSync)(packageJsonPath)) return void 0;
4073
4205
  try {
4074
- const addresses = await (0, node_dns_promises.lookup)(hostname, { family: 4 });
4075
- return addresses.address;
4076
- } catch (error) {
4077
- throw new Error(`Failed to resolve IP for ${hostname}: ${error instanceof Error ? error.message : "Unknown error"}`);
4078
- }
4206
+ const pkg$1 = JSON.parse((0, node_fs.readFileSync)(packageJsonPath, "utf-8"));
4207
+ if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
4208
+ } catch {}
4209
+ return void 0;
4079
4210
  }
4080
4211
  /**
4081
- * Extract subdomain from full hostname relative to root domain
4082
- *
4083
- * @example
4084
- * extractSubdomain('api.joemoer.traflabs.io', 'traflabs.io') => 'api.joemoer'
4085
- * extractSubdomain('joemoer.traflabs.io', 'traflabs.io') => 'joemoer'
4212
+ * Get app name from package.json adjacent to the lockfile (project root)
4213
+ * Used for Docker image naming
4086
4214
  */
4087
- function extractSubdomain(hostname, rootDomain) {
4088
- if (!hostname.endsWith(rootDomain)) throw new Error(`Hostname ${hostname} is not under root domain ${rootDomain}`);
4089
- const subdomain = hostname.slice(0, -(rootDomain.length + 1));
4090
- return subdomain || "@";
4215
+ function getAppNameFromPackageJson() {
4216
+ const cwd = process.cwd();
4217
+ const lockfilePath = findLockfilePath(cwd);
4218
+ if (!lockfilePath) return void 0;
4219
+ const projectRoot = (0, node_path.dirname)(lockfilePath);
4220
+ const packageJsonPath = (0, node_path.join)(projectRoot, "package.json");
4221
+ if (!(0, node_fs.existsSync)(packageJsonPath)) return void 0;
4222
+ try {
4223
+ const pkg$1 = JSON.parse((0, node_fs.readFileSync)(packageJsonPath, "utf-8"));
4224
+ if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
4225
+ } catch {}
4226
+ return void 0;
4091
4227
  }
4228
+ const logger$4 = console;
4092
4229
  /**
4093
- * Generate required DNS records for a deployment
4230
+ * Get the full image reference
4094
4231
  */
4095
- function generateRequiredRecords(appHostnames, rootDomain, serverIp) {
4096
- const records = [];
4097
- for (const [appName, hostname] of appHostnames) {
4098
- const subdomain = extractSubdomain(hostname, rootDomain);
4099
- records.push({
4100
- hostname,
4101
- subdomain,
4102
- type: "A",
4103
- value: serverIp,
4104
- appName
4232
+ function getImageRef(registry, imageName, tag) {
4233
+ if (registry) return `${registry}/${imageName}:${tag}`;
4234
+ return `${imageName}:${tag}`;
4235
+ }
4236
+ /**
4237
+ * Build Docker image
4238
+ * @param imageRef - Full image reference (registry/name:tag)
4239
+ * @param appName - Name of the app (used for Dockerfile.{appName} in workspaces)
4240
+ * @param buildArgs - Build arguments to pass to docker build
4241
+ */
4242
+ async function buildImage(imageRef, appName, buildArgs) {
4243
+ logger$4.log(`\n🔨 Building Docker image: ${imageRef}`);
4244
+ const cwd = process.cwd();
4245
+ const lockfilePath = findLockfilePath(cwd);
4246
+ const lockfileDir = lockfilePath ? (0, node_path.dirname)(lockfilePath) : cwd;
4247
+ const inMonorepo = lockfileDir !== cwd;
4248
+ if (appName || inMonorepo) logger$4.log(" Generating Dockerfile for monorepo (turbo prune)...");
4249
+ else logger$4.log(" Generating Dockerfile...");
4250
+ await dockerCommand({});
4251
+ const dockerfileSuffix = appName ? `.${appName}` : "";
4252
+ const dockerfilePath = `.gkm/docker/Dockerfile${dockerfileSuffix}`;
4253
+ const buildCwd = lockfilePath && (inMonorepo || appName) ? lockfileDir : cwd;
4254
+ if (buildCwd !== cwd) logger$4.log(` Building from workspace root: ${buildCwd}`);
4255
+ const buildArgsString = buildArgs && buildArgs.length > 0 ? buildArgs.map((arg) => `--build-arg "${arg}"`).join(" ") : "";
4256
+ try {
4257
+ const cmd = [
4258
+ "DOCKER_BUILDKIT=1 docker build",
4259
+ "--platform linux/amd64",
4260
+ `-f ${dockerfilePath}`,
4261
+ `-t ${imageRef}`,
4262
+ buildArgsString,
4263
+ "."
4264
+ ].filter(Boolean).join(" ");
4265
+ (0, node_child_process.execSync)(cmd, {
4266
+ cwd: buildCwd,
4267
+ stdio: "inherit",
4268
+ env: {
4269
+ ...process.env,
4270
+ DOCKER_BUILDKIT: "1"
4271
+ }
4105
4272
  });
4273
+ logger$4.log(`✅ Image built: ${imageRef}`);
4274
+ } catch (error) {
4275
+ throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
4106
4276
  }
4107
- return records;
4108
4277
  }
4109
4278
  /**
4110
- * Print DNS records table
4279
+ * Push Docker image to registry
4111
4280
  */
4112
- function printDnsRecordsTable(records, rootDomain) {
4113
- logger$3.log("\n 📋 DNS Records for " + rootDomain + ":");
4114
- logger$3.log(" ┌─────────────────────────────────────┬──────┬─────────────────┬────────┐");
4115
- logger$3.log(" │ Subdomain │ Type │ Value │ Status │");
4116
- logger$3.log(" ├─────────────────────────────────────┼──────┼─────────────────┼────────┤");
4117
- for (const record of records) {
4118
- const subdomain = record.subdomain.padEnd(35);
4119
- const type$1 = record.type.padEnd(4);
4120
- const value = record.value.padEnd(15);
4121
- let status;
4122
- if (record.error) status = "✗";
4123
- else if (record.created) status = "✓ new";
4124
- else if (record.existed) status = "✓";
4125
- else status = "?";
4126
- logger$3.log(` │ ${subdomain} │ ${type$1} │ ${value} │ ${status.padEnd(6)} │`);
4281
+ async function pushImage(imageRef) {
4282
+ logger$4.log(`\n☁️ Pushing image: ${imageRef}`);
4283
+ try {
4284
+ (0, node_child_process.execSync)(`docker push ${imageRef}`, {
4285
+ cwd: process.cwd(),
4286
+ stdio: "inherit"
4287
+ });
4288
+ logger$4.log(`✅ Image pushed: ${imageRef}`);
4289
+ } catch (error) {
4290
+ throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
4127
4291
  }
4128
- logger$3.log(" └─────────────────────────────────────┴──────┴─────────────────┴────────┘");
4129
4292
  }
4130
4293
  /**
4131
- * Print DNS records in a simple format for manual setup
4294
+ * Deploy using Docker (build and optionally push image)
4132
4295
  */
4133
- function printDnsRecordsSimple(records, rootDomain) {
4134
- logger$3.log("\n 📋 Required DNS Records:");
4135
- logger$3.log(` Add these A records to your DNS provider (${rootDomain}):\n`);
4136
- for (const record of records) logger$3.log(` ${record.subdomain} → ${record.value} (A record)`);
4137
- logger$3.log("");
4296
+ async function deployDocker(options) {
4297
+ const { stage, tag, skipPush, masterKey, config, buildArgs } = options;
4298
+ const imageName = config.imageName;
4299
+ const imageRef = getImageRef(config.registry, imageName, tag);
4300
+ await buildImage(imageRef, config.appName, buildArgs);
4301
+ if (!skipPush) if (!config.registry) logger$4.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
4302
+ else await pushImage(imageRef);
4303
+ logger$4.log("\n✅ Docker deployment ready!");
4304
+ logger$4.log(`\n📋 Deployment details:`);
4305
+ logger$4.log(` Image: ${imageRef}`);
4306
+ logger$4.log(` Stage: ${stage}`);
4307
+ if (masterKey) {
4308
+ logger$4.log(`\n🔐 Deploy with this environment variable:`);
4309
+ logger$4.log(` GKM_MASTER_KEY=${masterKey}`);
4310
+ logger$4.log("\n Example docker run:");
4311
+ logger$4.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
4312
+ }
4313
+ return {
4314
+ imageRef,
4315
+ masterKey
4316
+ };
4138
4317
  }
4139
4318
  /**
4140
- * Prompt for input (reuse from deploy/index.ts pattern)
4319
+ * Resolve Docker deploy config from gkm config
4320
+ * - imageName: from config, or cwd package.json, or 'app' (for Docker image)
4321
+ * - projectName: from root package.json, or 'app' (for Dokploy project)
4322
+ * - appName: from cwd package.json, or projectName (for Dokploy app within project)
4141
4323
  */
4142
- async function promptForToken(message) {
4143
- const { stdin, stdout } = await import("node:process");
4144
- if (!stdin.isTTY) throw new Error("Interactive input required for Hostinger token.");
4145
- stdout.write(message);
4146
- return new Promise((resolve$3) => {
4147
- let value = "";
4148
- const onData = (char) => {
4149
- const c = char.toString();
4150
- if (c === "\n" || c === "\r") {
4151
- stdin.setRawMode(false);
4152
- stdin.pause();
4153
- stdin.removeListener("data", onData);
4154
- stdout.write("\n");
4155
- resolve$3(value);
4156
- } else if (c === "") {
4157
- stdin.setRawMode(false);
4158
- stdin.pause();
4159
- stdout.write("\n");
4160
- process.exit(1);
4161
- } else if (c === "" || c === "\b") {
4162
- if (value.length > 0) value = value.slice(0, -1);
4163
- } else value += c;
4164
- };
4165
- stdin.setRawMode(true);
4166
- stdin.resume();
4167
- stdin.on("data", onData);
4168
- });
4324
+ function resolveDockerConfig(config) {
4325
+ const projectName = getAppNameFromPackageJson() ?? "app";
4326
+ const appName = getAppNameFromCwd$1() ?? projectName;
4327
+ const imageName = config.docker?.imageName ?? appName;
4328
+ return {
4329
+ registry: config.docker?.registry,
4330
+ imageName,
4331
+ projectName,
4332
+ appName
4333
+ };
4169
4334
  }
4335
+
4336
+ //#endregion
4337
+ //#region src/deploy/dokploy.ts
4338
+ const logger$3 = console;
4170
4339
  /**
4171
- * Create DNS records using the configured provider
4340
+ * Get the Dokploy API token from stored credentials or environment
4172
4341
  */
4173
- async function createDnsRecords(records, dnsConfig) {
4174
- const { provider, domain: rootDomain, ttl = 300 } = dnsConfig;
4175
- if (provider === "manual") return records.map((r) => ({
4176
- ...r,
4177
- created: false,
4178
- existed: false
4179
- }));
4180
- if (provider === "hostinger") return createHostingerRecords(records, rootDomain, ttl);
4181
- if (provider === "cloudflare") {
4182
- logger$3.log(" ⚠ Cloudflare DNS integration not yet implemented");
4183
- return records.map((r) => ({
4184
- ...r,
4185
- error: "Cloudflare not implemented"
4186
- }));
4187
- }
4188
- return records;
4342
+ async function getApiToken$1() {
4343
+ const token = await getDokployToken();
4344
+ if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
4345
+ return token;
4189
4346
  }
4190
4347
  /**
4191
- * Create DNS records at Hostinger
4348
+ * Create a Dokploy API client
4192
4349
  */
4193
- async function createHostingerRecords(records, rootDomain, ttl) {
4194
- let token = await getHostingerToken();
4195
- if (!token) {
4196
- logger$3.log("\n 📋 Hostinger API token not found.");
4197
- logger$3.log(" Get your token from: https://hpanel.hostinger.com/profile/api\n");
4198
- try {
4199
- token = await promptForToken(" Hostinger API Token: ");
4200
- await storeHostingerToken(token);
4201
- logger$3.log(" ✓ Token saved");
4202
- } catch {
4203
- logger$3.log(" ⚠ Could not get token, skipping DNS creation");
4204
- return records.map((r) => ({
4205
- ...r,
4206
- error: "No API token"
4207
- }));
4208
- }
4209
- }
4210
- const api = new HostingerApi(token);
4211
- const results = [];
4212
- let existingRecords = [];
4213
- try {
4214
- existingRecords = await api.getRecords(rootDomain);
4215
- } catch (error) {
4216
- const message = error instanceof Error ? error.message : "Unknown error";
4217
- logger$3.log(` ⚠ Failed to fetch existing DNS records: ${message}`);
4218
- return records.map((r) => ({
4219
- ...r,
4220
- error: message
4221
- }));
4222
- }
4223
- for (const record of records) {
4224
- const existing = existingRecords.find((r) => r.name === record.subdomain && r.type === "A");
4225
- if (existing) {
4226
- results.push({
4227
- ...record,
4228
- existed: true,
4229
- created: false
4230
- });
4231
- continue;
4232
- }
4233
- try {
4234
- await api.upsertRecords(rootDomain, [{
4235
- name: record.subdomain,
4236
- type: "A",
4237
- ttl,
4238
- records: [{ content: record.value }]
4239
- }]);
4240
- results.push({
4241
- ...record,
4242
- created: true,
4243
- existed: false
4244
- });
4245
- } catch (error) {
4246
- const message = error instanceof Error ? error.message : "Unknown error";
4247
- results.push({
4248
- ...record,
4249
- error: message
4250
- });
4251
- }
4252
- }
4253
- return results;
4350
+ async function createApi$1(endpoint) {
4351
+ const token = await getApiToken$1();
4352
+ return new require_dokploy_api.DokployApi({
4353
+ baseUrl: endpoint,
4354
+ token
4355
+ });
4254
4356
  }
4255
4357
  /**
4256
- * Main DNS orchestration function for deployments
4358
+ * Deploy to Dokploy
4257
4359
  */
4258
- async function orchestrateDns(appHostnames, dnsConfig, dokployEndpoint) {
4259
- if (!dnsConfig) return null;
4260
- const { domain: rootDomain, autoCreate = true } = dnsConfig;
4261
- logger$3.log("\n🌐 Setting up DNS records...");
4262
- let serverIp;
4263
- try {
4264
- const endpointUrl = new URL(dokployEndpoint);
4265
- serverIp = await resolveHostnameToIp(endpointUrl.hostname);
4266
- logger$3.log(` Server IP: ${serverIp} (from ${endpointUrl.hostname})`);
4267
- } catch (error) {
4268
- const message = error instanceof Error ? error.message : "Unknown error";
4269
- logger$3.log(` ⚠ Failed to resolve server IP: ${message}`);
4270
- return null;
4360
+ async function deployDokploy(options) {
4361
+ const { stage, imageRef, masterKey, config } = options;
4362
+ logger$3.log(`\n🎯 Deploying to Dokploy...`);
4363
+ logger$3.log(` Endpoint: ${config.endpoint}`);
4364
+ logger$3.log(` Application: ${config.applicationId}`);
4365
+ const api = await createApi$1(config.endpoint);
4366
+ logger$3.log(` Configuring Docker image: ${imageRef}`);
4367
+ const registryOptions = {};
4368
+ if (config.registryId) {
4369
+ registryOptions.registryId = config.registryId;
4370
+ logger$3.log(` Using Dokploy registry: ${config.registryId}`);
4371
+ } else {
4372
+ const storedRegistryId = await getDokployRegistryId();
4373
+ if (storedRegistryId) {
4374
+ registryOptions.registryId = storedRegistryId;
4375
+ logger$3.log(` Using stored Dokploy registry: ${storedRegistryId}`);
4376
+ } else if (config.registryCredentials) {
4377
+ registryOptions.username = config.registryCredentials.username;
4378
+ registryOptions.password = config.registryCredentials.password;
4379
+ registryOptions.registryUrl = config.registryCredentials.registryUrl;
4380
+ logger$3.log(` Using registry credentials for: ${config.registryCredentials.registryUrl}`);
4381
+ } else {
4382
+ const username = process.env.DOCKER_REGISTRY_USERNAME;
4383
+ const password = process.env.DOCKER_REGISTRY_PASSWORD;
4384
+ const registryUrl = process.env.DOCKER_REGISTRY_URL || config.registry;
4385
+ if (username && password && registryUrl) {
4386
+ registryOptions.username = username;
4387
+ registryOptions.password = password;
4388
+ registryOptions.registryUrl = registryUrl;
4389
+ logger$3.log(` Using registry credentials from environment`);
4390
+ }
4391
+ }
4271
4392
  }
4272
- const requiredRecords = generateRequiredRecords(appHostnames, rootDomain, serverIp);
4273
- if (requiredRecords.length === 0) {
4274
- logger$3.log(" No DNS records needed");
4275
- return {
4276
- records: [],
4277
- success: true,
4278
- serverIp
4279
- };
4393
+ await api.saveDockerProvider(config.applicationId, imageRef, registryOptions);
4394
+ logger$3.log(" ✓ Docker provider configured");
4395
+ const envVars = {};
4396
+ if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
4397
+ if (Object.keys(envVars).length > 0) {
4398
+ logger$3.log(" Updating environment variables...");
4399
+ const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
4400
+ await api.saveApplicationEnv(config.applicationId, envString);
4401
+ logger$3.log(" ✓ Environment variables updated");
4280
4402
  }
4281
- let finalRecords;
4282
- if (autoCreate && dnsConfig.provider !== "manual") {
4283
- logger$3.log(` Creating DNS records at ${dnsConfig.provider}...`);
4284
- finalRecords = await createDnsRecords(requiredRecords, dnsConfig);
4285
- const created = finalRecords.filter((r) => r.created).length;
4286
- const existed = finalRecords.filter((r) => r.existed).length;
4287
- const failed = finalRecords.filter((r) => r.error).length;
4288
- if (created > 0) logger$3.log(` Created ${created} DNS record(s)`);
4289
- if (existed > 0) logger$3.log(` ✓ ${existed} record(s) already exist`);
4290
- if (failed > 0) logger$3.log(` ⚠ ${failed} record(s) failed`);
4291
- } else finalRecords = requiredRecords;
4292
- printDnsRecordsTable(finalRecords, rootDomain);
4293
- const hasFailures = finalRecords.some((r) => r.error);
4294
- if (dnsConfig.provider === "manual" || hasFailures) printDnsRecordsSimple(finalRecords.filter((r) => !r.created && !r.existed), rootDomain);
4403
+ logger$3.log(" Triggering deployment...");
4404
+ await api.deployApplication(config.applicationId);
4405
+ logger$3.log(" ✓ Deployment triggered");
4406
+ logger$3.log("\n✅ Dokploy deployment initiated!");
4407
+ logger$3.log(`\n📋 Deployment details:`);
4408
+ logger$3.log(` Image: ${imageRef}`);
4409
+ logger$3.log(` Stage: ${stage}`);
4410
+ logger$3.log(` Application ID: ${config.applicationId}`);
4411
+ if (masterKey) logger$3.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
4412
+ const deploymentUrl = `${config.endpoint}/project/${config.projectId}`;
4413
+ logger$3.log(`\n🔗 View deployment: ${deploymentUrl}`);
4295
4414
  return {
4296
- records: finalRecords,
4297
- success: !hasFailures,
4298
- serverIp
4415
+ imageRef,
4416
+ masterKey,
4417
+ url: deploymentUrl
4299
4418
  };
4300
4419
  }
4301
4420
 
@@ -4374,6 +4493,107 @@ function getPublicUrlArgNames(app) {
4374
4493
  return app.dependencies.map((dep) => `NEXT_PUBLIC_${dep.toUpperCase()}_URL`);
4375
4494
  }
4376
4495
 
4496
+ //#endregion
4497
+ //#region src/deploy/env-resolver.ts
4498
+ /**
4499
+ * Generate a secure random secret (64 hex characters = 32 bytes)
4500
+ */
4501
+ function generateSecret() {
4502
+ return (0, node_crypto.randomBytes)(32).toString("hex");
4503
+ }
4504
+ /**
4505
+ * Get or generate a secret for an app.
4506
+ * If the secret already exists in state, returns it.
4507
+ * Otherwise generates a new one and stores it.
4508
+ */
4509
+ function getOrGenerateSecret(state, appName, secretName) {
4510
+ const existing = getGeneratedSecret(state, appName, secretName);
4511
+ if (existing) return existing;
4512
+ const generated = generateSecret();
4513
+ setGeneratedSecret(state, appName, secretName, generated);
4514
+ return generated;
4515
+ }
4516
+ /**
4517
+ * Build a DATABASE_URL for an app with per-app credentials
4518
+ */
4519
+ function buildDatabaseUrl(credentials, postgres) {
4520
+ const { dbUser, dbPassword } = credentials;
4521
+ const { host, port, database } = postgres;
4522
+ return `postgresql://${encodeURIComponent(dbUser)}:${encodeURIComponent(dbPassword)}@${host}:${port}/${database}`;
4523
+ }
4524
+ /**
4525
+ * Build a REDIS_URL
4526
+ */
4527
+ function buildRedisUrl(redis) {
4528
+ const { host, port, password } = redis;
4529
+ if (password) return `redis://:${encodeURIComponent(password)}@${host}:${port}`;
4530
+ return `redis://${host}:${port}`;
4531
+ }
4532
+ /**
4533
+ * Resolve a single environment variable
4534
+ */
4535
+ function resolveEnvVar(varName, context) {
4536
+ switch (varName) {
4537
+ case "PORT": return String(context.app.port);
4538
+ case "NODE_ENV": return context.stage === "production" ? "production" : "development";
4539
+ case "DATABASE_URL":
4540
+ if (context.appCredentials && context.postgres) return buildDatabaseUrl(context.appCredentials, context.postgres);
4541
+ break;
4542
+ case "REDIS_URL":
4543
+ if (context.redis) return buildRedisUrl(context.redis);
4544
+ break;
4545
+ case "BETTER_AUTH_URL": return `https://${context.appHostname}`;
4546
+ case "BETTER_AUTH_SECRET": return getOrGenerateSecret(context.state, context.appName, "BETTER_AUTH_SECRET");
4547
+ case "BETTER_AUTH_TRUSTED_ORIGINS":
4548
+ if (context.frontendUrls.length > 0) return context.frontendUrls.join(",");
4549
+ break;
4550
+ case "GKM_MASTER_KEY":
4551
+ if (context.masterKey) return context.masterKey;
4552
+ break;
4553
+ }
4554
+ if (context.userSecrets) {
4555
+ if (context.userSecrets.custom[varName]) return context.userSecrets.custom[varName];
4556
+ if (varName in context.userSecrets.urls) return context.userSecrets.urls[varName];
4557
+ if (varName === "POSTGRES_PASSWORD" && context.userSecrets.services.postgres) return context.userSecrets.services.postgres.password;
4558
+ if (varName === "REDIS_PASSWORD" && context.userSecrets.services.redis) return context.userSecrets.services.redis.password;
4559
+ }
4560
+ return void 0;
4561
+ }
4562
+ /**
4563
+ * Resolve all environment variables for an app
4564
+ */
4565
+ function resolveEnvVars(requiredVars, context) {
4566
+ const resolved = {};
4567
+ const missing = [];
4568
+ for (const varName of requiredVars) {
4569
+ const value = resolveEnvVar(varName, context);
4570
+ if (value !== void 0) resolved[varName] = value;
4571
+ else missing.push(varName);
4572
+ }
4573
+ return {
4574
+ resolved,
4575
+ missing
4576
+ };
4577
+ }
4578
+ /**
4579
+ * Format missing variables error message
4580
+ */
4581
+ function formatMissingVarsError(appName, missing, stage) {
4582
+ const varList = missing.map((v) => ` - ${v}`).join("\n");
4583
+ return `Deployment failed: ${appName} is missing required environment variables:\n${varList}\n\nAdd them with:\n gkm secrets:set <VAR_NAME> <value> --stage ${stage}\n\nOr add them to the app's requiredEnv in gkm.config.ts to have them auto-resolved.`;
4584
+ }
4585
+ /**
4586
+ * Validate that all required environment variables can be resolved
4587
+ */
4588
+ function validateEnvVars(requiredVars, context) {
4589
+ const { resolved, missing } = resolveEnvVars(requiredVars, context);
4590
+ return {
4591
+ valid: missing.length === 0,
4592
+ missing,
4593
+ resolved
4594
+ };
4595
+ }
4596
+
4377
4597
  //#endregion
4378
4598
  //#region src/deploy/init.ts
4379
4599
  const logger$2 = console;
@@ -4646,14 +4866,17 @@ function generateSecretsReport(encryptedApps, sniffedApps) {
4646
4866
 
4647
4867
  //#endregion
4648
4868
  //#region src/deploy/sniffer.ts
4869
+ const __filename$1 = (0, node_url.fileURLToPath)(require("url").pathToFileURL(__filename).href);
4870
+ const __dirname$1 = (0, node_path.dirname)(__filename$1);
4649
4871
  /**
4650
4872
  * Get required environment variables for an app.
4651
4873
  *
4652
- * Detection strategy:
4653
- * - Frontend apps: Returns empty (no server secrets)
4654
- * - Apps with `requiredEnv`: Uses explicit list from config
4655
- * - Apps with `envParser`: Runs SnifferEnvironmentParser to detect usage
4656
- * - Apps with neither: Returns empty
4874
+ * Detection strategy (in order):
4875
+ * 1. Frontend apps: Returns empty (no server secrets)
4876
+ * 2. Apps with `requiredEnv`: Uses explicit list from config
4877
+ * 3. Entry apps: Imports entry file in subprocess to capture config.parse() calls
4878
+ * 4. Apps with `envParser`: Runs SnifferEnvironmentParser to detect usage
4879
+ * 5. Apps with neither: Returns empty
4657
4880
  *
4658
4881
  * This function handles "fire and forget" async operations gracefully,
4659
4882
  * capturing errors and unhandled rejections without failing the build.
@@ -4674,6 +4897,14 @@ async function sniffAppEnvironment(app, appName, workspacePath, options = {}) {
4674
4897
  appName,
4675
4898
  requiredEnvVars: [...app.requiredEnv]
4676
4899
  };
4900
+ if (app.entry) {
4901
+ const result = await sniffEntryFile(app.entry, app.path, workspacePath);
4902
+ if (logWarnings && result.error) console.warn(`[sniffer] ${appName}: Entry file threw error during sniffing (env vars still captured): ${result.error.message}`);
4903
+ return {
4904
+ appName,
4905
+ requiredEnvVars: result.envVars
4906
+ };
4907
+ }
4677
4908
  if (app.envParser) {
4678
4909
  const result = await sniffEnvParser(app.envParser, app.path, workspacePath);
4679
4910
  if (logWarnings) {
@@ -4691,6 +4922,80 @@ async function sniffAppEnvironment(app, appName, workspacePath, options = {}) {
4691
4922
  };
4692
4923
  }
4693
4924
  /**
4925
+ * Sniff an entry file by importing it in a subprocess.
4926
+ *
4927
+ * Entry apps call `config.parse()` at module load time. To capture which
4928
+ * env vars are accessed, we:
4929
+ * 1. Spawn a subprocess with a module loader hook
4930
+ * 2. The loader intercepts `@geekmidas/envkit` and replaces EnvironmentParser
4931
+ * with SnifferEnvironmentParser
4932
+ * 3. Import the entry file (triggers config.parse())
4933
+ * 4. Capture and return the accessed env var names
4934
+ *
4935
+ * This approach provides process isolation - each app is sniffed in its own
4936
+ * subprocess, preventing module cache pollution.
4937
+ *
4938
+ * @param entryPath - Relative path to the entry file (e.g., './src/index.ts')
4939
+ * @param appPath - The app's path relative to workspace (e.g., 'apps/auth')
4940
+ * @param workspacePath - Absolute path to workspace root
4941
+ * @returns EntrySniffResult with env vars and optional error
4942
+ */
4943
+ async function sniffEntryFile(entryPath, appPath, workspacePath) {
4944
+ const fullEntryPath = (0, node_path.resolve)(workspacePath, appPath, entryPath);
4945
+ const loaderPath = (0, node_path.resolve)(__dirname$1, "sniffer-loader.ts");
4946
+ const workerPath = (0, node_path.resolve)(__dirname$1, "sniffer-worker.ts");
4947
+ return new Promise((resolvePromise) => {
4948
+ const child = (0, node_child_process.spawn)("node", [
4949
+ "--import",
4950
+ loaderPath,
4951
+ workerPath,
4952
+ fullEntryPath
4953
+ ], {
4954
+ cwd: (0, node_path.resolve)(workspacePath, appPath),
4955
+ stdio: [
4956
+ "ignore",
4957
+ "pipe",
4958
+ "pipe"
4959
+ ],
4960
+ env: {
4961
+ ...process.env,
4962
+ NODE_OPTIONS: "--import tsx"
4963
+ }
4964
+ });
4965
+ let stdout = "";
4966
+ let stderr = "";
4967
+ child.stdout.on("data", (data) => {
4968
+ stdout += data.toString();
4969
+ });
4970
+ child.stderr.on("data", (data) => {
4971
+ stderr += data.toString();
4972
+ });
4973
+ child.on("close", (code) => {
4974
+ try {
4975
+ const jsonMatch = stdout.match(/\{[^{}]*"envVars"[^{}]*\}[^{]*$/);
4976
+ if (jsonMatch) {
4977
+ const result = JSON.parse(jsonMatch[0]);
4978
+ resolvePromise({
4979
+ envVars: result.envVars || [],
4980
+ error: result.error ? new Error(result.error) : void 0
4981
+ });
4982
+ return;
4983
+ }
4984
+ } catch {}
4985
+ resolvePromise({
4986
+ envVars: [],
4987
+ error: new Error(`Failed to sniff entry file (exit code ${code}): ${stderr || stdout || "No output"}`)
4988
+ });
4989
+ });
4990
+ child.on("error", (err) => {
4991
+ resolvePromise({
4992
+ envVars: [],
4993
+ error: err
4994
+ });
4995
+ });
4996
+ });
4997
+ }
4998
+ /**
4694
4999
  * Run the SnifferEnvironmentParser on an envParser module to detect
4695
5000
  * which environment variables it accesses.
4696
5001
  *
@@ -4800,10 +5105,130 @@ async function prompt(message, hidden = false) {
4800
5105
  }
4801
5106
  }
4802
5107
  /**
5108
+ * Wait for Postgres to be ready to accept connections.
5109
+ *
5110
+ * Polls the Postgres server until it accepts a connection or max retries reached.
5111
+ * Used after enabling the external port to ensure the database is accessible
5112
+ * before creating users.
5113
+ *
5114
+ * @param host - The Postgres server hostname
5115
+ * @param port - The external port (typically 5432)
5116
+ * @param user - Master database user (postgres)
5117
+ * @param password - Master database password
5118
+ * @param database - Database name to connect to
5119
+ * @param maxRetries - Maximum number of connection attempts (default: 30)
5120
+ * @param retryIntervalMs - Milliseconds between retries (default: 2000)
5121
+ * @throws Error if Postgres is not ready after maxRetries
5122
+ */
5123
+ async function waitForPostgres(host, port, user, password, database, maxRetries = 30, retryIntervalMs = 2e3) {
5124
+ for (let i = 0; i < maxRetries; i++) try {
5125
+ const client = new pg.Client({
5126
+ host,
5127
+ port,
5128
+ user,
5129
+ password,
5130
+ database
5131
+ });
5132
+ await client.connect();
5133
+ await client.end();
5134
+ return;
5135
+ } catch {
5136
+ if (i < maxRetries - 1) {
5137
+ logger$1.log(` Waiting for Postgres... (${i + 1}/${maxRetries})`);
5138
+ await new Promise((r) => setTimeout(r, retryIntervalMs));
5139
+ }
5140
+ }
5141
+ throw new Error(`Postgres not ready after ${maxRetries} retries`);
5142
+ }
5143
+ /**
5144
+ * Initialize Postgres with per-app users and schemas.
5145
+ *
5146
+ * This function implements the same user/schema isolation pattern used in local
5147
+ * dev mode (see docker/postgres/init.sh). It:
5148
+ *
5149
+ * 1. Temporarily enables the external Postgres port
5150
+ * 2. Connects using master credentials
5151
+ * 3. Creates each user with appropriate schema permissions
5152
+ * 4. Disables the external port for security
5153
+ *
5154
+ * Schema assignment follows this pattern:
5155
+ * - `api` app: Uses `public` schema (shared tables, migrations run here)
5156
+ * - Other apps: Get their own schema with `search_path` configured
5157
+ *
5158
+ * @param api - The Dokploy API client
5159
+ * @param postgres - The provisioned Postgres service details
5160
+ * @param serverHostname - The Dokploy server hostname (for external connection)
5161
+ * @param users - Array of users to create with their schema configuration
5162
+ *
5163
+ * @example
5164
+ * ```ts
5165
+ * await initializePostgresUsers(api, postgres, 'dokploy.example.com', [
5166
+ * { name: 'api', password: 'xxx', usePublicSchema: true },
5167
+ * { name: 'auth', password: 'yyy', usePublicSchema: false },
5168
+ * ]);
5169
+ * ```
5170
+ */
5171
+ async function initializePostgresUsers(api, postgres, serverHostname, users) {
5172
+ logger$1.log("\n🔧 Initializing database users...");
5173
+ const externalPort = 5432;
5174
+ logger$1.log(` Enabling external port ${externalPort}...`);
5175
+ await api.savePostgresExternalPort(postgres.postgresId, externalPort);
5176
+ await api.deployPostgres(postgres.postgresId);
5177
+ logger$1.log(` Waiting for Postgres to be accessible at ${serverHostname}:${externalPort}...`);
5178
+ await waitForPostgres(serverHostname, externalPort, postgres.databaseUser, postgres.databasePassword, postgres.databaseName);
5179
+ const client = new pg.Client({
5180
+ host: serverHostname,
5181
+ port: externalPort,
5182
+ user: postgres.databaseUser,
5183
+ password: postgres.databasePassword,
5184
+ database: postgres.databaseName
5185
+ });
5186
+ try {
5187
+ await client.connect();
5188
+ for (const user of users) {
5189
+ const schemaName = user.usePublicSchema ? "public" : user.name;
5190
+ logger$1.log(` Creating user "${user.name}" with schema "${schemaName}"...`);
5191
+ await client.query(`
5192
+ DO $$ BEGIN
5193
+ CREATE USER "${user.name}" WITH PASSWORD '${user.password}';
5194
+ EXCEPTION WHEN duplicate_object THEN
5195
+ ALTER USER "${user.name}" WITH PASSWORD '${user.password}';
5196
+ END $$;
5197
+ `);
5198
+ if (user.usePublicSchema) await client.query(`
5199
+ GRANT ALL ON SCHEMA public TO "${user.name}";
5200
+ ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO "${user.name}";
5201
+ ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO "${user.name}";
5202
+ `);
5203
+ else await client.query(`
5204
+ CREATE SCHEMA IF NOT EXISTS "${schemaName}" AUTHORIZATION "${user.name}";
5205
+ ALTER USER "${user.name}" SET search_path TO "${schemaName}";
5206
+ GRANT USAGE ON SCHEMA "${schemaName}" TO "${user.name}";
5207
+ GRANT ALL ON ALL TABLES IN SCHEMA "${schemaName}" TO "${user.name}";
5208
+ ALTER DEFAULT PRIVILEGES IN SCHEMA "${schemaName}" GRANT ALL ON TABLES TO "${user.name}";
5209
+ `);
5210
+ logger$1.log(` ✓ User "${user.name}" configured`);
5211
+ }
5212
+ } finally {
5213
+ await client.end();
5214
+ }
5215
+ logger$1.log(" Disabling external port...");
5216
+ await api.savePostgresExternalPort(postgres.postgresId, null);
5217
+ await api.deployPostgres(postgres.postgresId);
5218
+ logger$1.log(" ✓ Database users initialized");
5219
+ }
5220
+ /**
5221
+ * Get the server hostname from the Dokploy endpoint URL
5222
+ */
5223
+ function getServerHostname(endpoint) {
5224
+ const url = new URL(endpoint);
5225
+ return url.hostname;
5226
+ }
5227
+ /**
4803
5228
  * Provision docker compose services in Dokploy
4804
5229
  * @internal Exported for testing
4805
5230
  */
4806
- async function provisionServices(api, projectId, environmentId, appName, services, existingServiceIds) {
5231
+ async function provisionServices(api, projectId, environmentId, projectName, services, existingServiceIds) {
4807
5232
  logger$1.log(`\n🔍 provisionServices called: services=${JSON.stringify(services)}, envId=${environmentId}`);
4808
5233
  if (!services || !environmentId) {
4809
5234
  logger$1.log(" Skipping: no services or no environmentId");
@@ -4824,9 +5249,12 @@ async function provisionServices(api, projectId, environmentId, appName, service
4824
5249
  else logger$1.log(` ⚠ Cached ID invalid, will create new`);
4825
5250
  }
4826
5251
  if (!postgres) {
4827
- const { randomBytes: randomBytes$1 } = await import("node:crypto");
4828
- const databasePassword = randomBytes$1(16).toString("hex");
4829
- const result = await api.findOrCreatePostgres(postgresName, projectId, environmentId, { databasePassword });
5252
+ const databasePassword = (0, node_crypto.randomBytes)(16).toString("hex");
5253
+ const databaseName = projectName.replace(/-/g, "_");
5254
+ const result = await api.findOrCreatePostgres(postgresName, projectId, environmentId, {
5255
+ databaseName,
5256
+ databasePassword
5257
+ });
4830
5258
  postgres = result.postgres;
4831
5259
  created = result.created;
4832
5260
  if (created) {
@@ -4861,8 +5289,8 @@ async function provisionServices(api, projectId, environmentId, appName, service
4861
5289
  else logger$1.log(` ⚠ Cached ID invalid, will create new`);
4862
5290
  }
4863
5291
  if (!redis) {
4864
- const { randomBytes: randomBytes$1 } = await import("node:crypto");
4865
- const databasePassword = randomBytes$1(16).toString("hex");
5292
+ const { randomBytes: randomBytes$3 } = await import("node:crypto");
5293
+ const databasePassword = randomBytes$3(16).toString("hex");
4866
5294
  const result = await api.findOrCreateRedis(redisName, projectId, environmentId, { databasePassword });
4867
5295
  redis = result.redis;
4868
5296
  created = result.created;
@@ -4894,12 +5322,6 @@ async function provisionServices(api, projectId, environmentId, appName, service
4894
5322
  */
4895
5323
  async function ensureDokploySetup(config, dockerConfig, stage, services) {
4896
5324
  logger$1.log("\n🔧 Checking Dokploy setup...");
4897
- const { readStageSecrets: readStageSecrets$1 } = await Promise.resolve().then(() => require("./storage-fOR8dMu5.cjs"));
4898
- const existingSecrets = await readStageSecrets$1(stage);
4899
- const existingUrls = {
4900
- DATABASE_URL: existingSecrets?.urls?.DATABASE_URL,
4901
- REDIS_URL: existingSecrets?.urls?.REDIS_URL
4902
- };
4903
5325
  let creds = await getDokployCredentials();
4904
5326
  if (!creds) {
4905
5327
  logger$1.log("\n📋 Dokploy credentials not found. Let's set them up.");
@@ -5225,6 +5647,8 @@ async function workspaceDeployCommand(workspace, options) {
5225
5647
  postgres: services.db !== void 0 && services.db !== false,
5226
5648
  redis: services.cache !== void 0 && services.cache !== false
5227
5649
  };
5650
+ let provisionedPostgres = null;
5651
+ let provisionedRedis = null;
5228
5652
  if (dockerServices.postgres || dockerServices.redis) {
5229
5653
  logger$1.log("\n🔧 Provisioning infrastructure services...");
5230
5654
  const existingServiceIds = {
@@ -5233,17 +5657,64 @@ async function workspaceDeployCommand(workspace, options) {
5233
5657
  };
5234
5658
  const provisionResult = await provisionServices(api, project.projectId, environmentId, workspace.name, dockerServices, existingServiceIds);
5235
5659
  if (provisionResult?.serviceIds) {
5236
- if (provisionResult.serviceIds.postgresId) setPostgresId(state, provisionResult.serviceIds.postgresId);
5237
- if (provisionResult.serviceIds.redisId) setRedisId(state, provisionResult.serviceIds.redisId);
5660
+ if (provisionResult.serviceIds.postgresId) {
5661
+ setPostgresId(state, provisionResult.serviceIds.postgresId);
5662
+ provisionedPostgres = await api.getPostgres(provisionResult.serviceIds.postgresId);
5663
+ }
5664
+ if (provisionResult.serviceIds.redisId) {
5665
+ setRedisId(state, provisionResult.serviceIds.redisId);
5666
+ provisionedRedis = await api.getRedis(provisionResult.serviceIds.redisId);
5667
+ }
5238
5668
  }
5239
5669
  }
5240
5670
  const backendApps = appsToDeployNames.filter((name$1) => workspace.apps[name$1].type === "backend");
5241
5671
  const frontendApps = appsToDeployNames.filter((name$1) => workspace.apps[name$1].type === "frontend");
5672
+ const perAppDbCredentials = /* @__PURE__ */ new Map();
5673
+ if (provisionedPostgres && backendApps.length > 0) {
5674
+ const appsNeedingDb = backendApps.filter((appName) => {
5675
+ const requirements = sniffedApps.get(appName);
5676
+ return requirements?.requiredEnvVars.includes("DATABASE_URL");
5677
+ });
5678
+ if (appsNeedingDb.length > 0) {
5679
+ logger$1.log(`\n🔐 Setting up per-app database credentials...`);
5680
+ logger$1.log(` Apps needing DATABASE_URL: ${appsNeedingDb.join(", ")}`);
5681
+ const existingCredentials = getAllAppCredentials(state);
5682
+ const usersToCreate = [];
5683
+ for (const appName of appsNeedingDb) {
5684
+ let credentials = existingCredentials[appName];
5685
+ if (credentials) logger$1.log(` ${appName}: Using existing credentials from state`);
5686
+ else {
5687
+ const password = (0, node_crypto.randomBytes)(16).toString("hex");
5688
+ credentials = {
5689
+ dbUser: appName,
5690
+ dbPassword: password
5691
+ };
5692
+ setAppCredentials(state, appName, credentials);
5693
+ logger$1.log(` ${appName}: Generated new credentials`);
5694
+ }
5695
+ perAppDbCredentials.set(appName, credentials);
5696
+ usersToCreate.push({
5697
+ name: appName,
5698
+ password: credentials.dbPassword,
5699
+ usePublicSchema: appName === "api"
5700
+ });
5701
+ }
5702
+ const serverHostname = getServerHostname(creds.endpoint);
5703
+ await initializePostgresUsers(api, provisionedPostgres, serverHostname, usersToCreate);
5704
+ }
5705
+ }
5242
5706
  const publicUrls = {};
5243
5707
  const results = [];
5244
5708
  const dokployConfig = workspace.deploy.dokploy;
5245
5709
  const appHostnames = /* @__PURE__ */ new Map();
5246
5710
  const appDomainIds = /* @__PURE__ */ new Map();
5711
+ const frontendUrls = [];
5712
+ for (const appName of frontendApps) {
5713
+ const app = workspace.apps[appName];
5714
+ const isMainFrontend = isMainFrontendApp(appName, app, workspace.apps);
5715
+ const hostname = resolveHost(appName, app, stage, dokployConfig, isMainFrontend);
5716
+ frontendUrls.push(`https://${hostname}`);
5717
+ }
5247
5718
  if (backendApps.length > 0) {
5248
5719
  logger$1.log("\n📦 PHASE 1: Deploying backend applications...");
5249
5720
  for (const appName of backendApps) {
@@ -5287,14 +5758,46 @@ async function workspaceDeployCommand(workspace, options) {
5287
5758
  },
5288
5759
  buildArgs
5289
5760
  });
5290
- const envVars = [`NODE_ENV=production`, `PORT=${app.port}`];
5291
- if (appSecrets && appSecrets.masterKey) envVars.push(`GKM_MASTER_KEY=${appSecrets.masterKey}`);
5761
+ const backendHost = resolveHost(appName, app, stage, dokployConfig, false);
5762
+ const envContext = {
5763
+ app,
5764
+ appName,
5765
+ stage,
5766
+ state,
5767
+ appCredentials: perAppDbCredentials.get(appName),
5768
+ postgres: provisionedPostgres ? {
5769
+ host: provisionedPostgres.appName,
5770
+ port: 5432,
5771
+ database: provisionedPostgres.databaseName
5772
+ } : void 0,
5773
+ redis: provisionedRedis ? {
5774
+ host: provisionedRedis.appName,
5775
+ port: 6379,
5776
+ password: provisionedRedis.databasePassword
5777
+ } : void 0,
5778
+ appHostname: backendHost,
5779
+ frontendUrls,
5780
+ userSecrets: stageSecrets ?? void 0,
5781
+ masterKey: appSecrets?.masterKey
5782
+ };
5783
+ const appRequirements = sniffedApps.get(appName);
5784
+ const requiredVars = appRequirements?.requiredEnvVars ?? [];
5785
+ const { valid, missing, resolved } = validateEnvVars(requiredVars, envContext);
5786
+ if (!valid) throw new Error(formatMissingVarsError(appName, missing, stage));
5787
+ const envVars = Object.entries(resolved).map(([key, value]) => `${key}=${value}`);
5788
+ if (Object.keys(resolved).length > 0) logger$1.log(` Resolved ${Object.keys(resolved).length} env vars: ${Object.keys(resolved).join(", ")}`);
5292
5789
  await api.saveDockerProvider(application.applicationId, imageRef, { registryId });
5293
5790
  await api.saveApplicationEnv(application.applicationId, envVars.join("\n"));
5294
5791
  logger$1.log(` Deploying to Dokploy...`);
5295
5792
  await api.deployApplication(application.applicationId);
5296
- const backendHost = resolveHost(appName, app, stage, dokployConfig, false);
5297
- try {
5793
+ const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
5794
+ const existingDomain = existingDomains.find((d) => d.host === backendHost);
5795
+ if (existingDomain) {
5796
+ appHostnames.set(appName, backendHost);
5797
+ appDomainIds.set(appName, existingDomain.domainId);
5798
+ publicUrls[appName] = `https://${backendHost}`;
5799
+ logger$1.log(` ✓ Domain: https://${backendHost} (existing)`);
5800
+ } else try {
5298
5801
  const domain = await api.createDomain({
5299
5802
  host: backendHost,
5300
5803
  port: app.port,
@@ -5304,18 +5807,13 @@ async function workspaceDeployCommand(workspace, options) {
5304
5807
  });
5305
5808
  appHostnames.set(appName, backendHost);
5306
5809
  appDomainIds.set(appName, domain.domainId);
5307
- const publicUrl = `https://${backendHost}`;
5308
- publicUrls[appName] = publicUrl;
5309
- logger$1.log(` ✓ Domain: ${publicUrl}`);
5810
+ publicUrls[appName] = `https://${backendHost}`;
5811
+ logger$1.log(` ✓ Domain: https://${backendHost} (created)`);
5310
5812
  } catch (domainError) {
5813
+ const message = domainError instanceof Error ? domainError.message : "Unknown error";
5814
+ logger$1.log(` ⚠ Domain creation failed: ${message}`);
5311
5815
  appHostnames.set(appName, backendHost);
5312
- try {
5313
- const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
5314
- const matchingDomain = existingDomains.find((d) => d.host === backendHost);
5315
- if (matchingDomain) appDomainIds.set(appName, matchingDomain.domainId);
5316
- } catch {}
5317
5816
  publicUrls[appName] = `https://${backendHost}`;
5318
- logger$1.log(` ℹ Domain already configured: https://${backendHost}`);
5319
5817
  }
5320
5818
  results.push({
5321
5819
  appName,
@@ -5384,7 +5882,14 @@ async function workspaceDeployCommand(workspace, options) {
5384
5882
  await api.deployApplication(application.applicationId);
5385
5883
  const isMainFrontend = isMainFrontendApp(appName, app, workspace.apps);
5386
5884
  const frontendHost = resolveHost(appName, app, stage, dokployConfig, isMainFrontend);
5387
- try {
5885
+ const existingFrontendDomains = await api.getDomainsByApplicationId(application.applicationId);
5886
+ const existingFrontendDomain = existingFrontendDomains.find((d) => d.host === frontendHost);
5887
+ if (existingFrontendDomain) {
5888
+ appHostnames.set(appName, frontendHost);
5889
+ appDomainIds.set(appName, existingFrontendDomain.domainId);
5890
+ publicUrls[appName] = `https://${frontendHost}`;
5891
+ logger$1.log(` ✓ Domain: https://${frontendHost} (existing)`);
5892
+ } else try {
5388
5893
  const domain = await api.createDomain({
5389
5894
  host: frontendHost,
5390
5895
  port: app.port,
@@ -5394,18 +5899,13 @@ async function workspaceDeployCommand(workspace, options) {
5394
5899
  });
5395
5900
  appHostnames.set(appName, frontendHost);
5396
5901
  appDomainIds.set(appName, domain.domainId);
5397
- const publicUrl = `https://${frontendHost}`;
5398
- publicUrls[appName] = publicUrl;
5399
- logger$1.log(` ✓ Domain: ${publicUrl}`);
5902
+ publicUrls[appName] = `https://${frontendHost}`;
5903
+ logger$1.log(` ✓ Domain: https://${frontendHost} (created)`);
5400
5904
  } catch (domainError) {
5905
+ const message = domainError instanceof Error ? domainError.message : "Unknown error";
5906
+ logger$1.log(` ⚠ Domain creation failed: ${message}`);
5401
5907
  appHostnames.set(appName, frontendHost);
5402
- try {
5403
- const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
5404
- const matchingDomain = existingDomains.find((d) => d.host === frontendHost);
5405
- if (matchingDomain) appDomainIds.set(appName, matchingDomain.domainId);
5406
- } catch {}
5407
5908
  publicUrls[appName] = `https://${frontendHost}`;
5408
- logger$1.log(` ℹ Domain already configured: https://${frontendHost}`);
5409
5909
  }
5410
5910
  results.push({
5411
5911
  appName,
@@ -5433,6 +5933,10 @@ async function workspaceDeployCommand(workspace, options) {
5433
5933
  const dnsConfig = workspace.deploy.dns;
5434
5934
  if (dnsConfig && appHostnames.size > 0) {
5435
5935
  const dnsResult = await orchestrateDns(appHostnames, dnsConfig, creds.endpoint);
5936
+ if (dnsResult?.serverIp && appHostnames.size > 0) {
5937
+ await verifyDnsRecords(appHostnames, dnsResult.serverIp, state);
5938
+ await writeStageState(workspace.root, stage, state);
5939
+ }
5436
5940
  if (dnsResult?.success && appHostnames.size > 0) {
5437
5941
  logger$1.log("\n🔒 Validating domains for SSL certificates...");
5438
5942
  for (const [appName, hostname] of appHostnames) try {
@@ -5733,10 +6237,10 @@ const GEEKMIDAS_VERSIONS = {
5733
6237
  "@geekmidas/cli": CLI_VERSION,
5734
6238
  "@geekmidas/client": "~0.5.0",
5735
6239
  "@geekmidas/cloud": "~0.2.0",
5736
- "@geekmidas/constructs": "~0.7.0",
6240
+ "@geekmidas/constructs": "~0.8.0",
5737
6241
  "@geekmidas/db": "~0.3.0",
5738
6242
  "@geekmidas/emailkit": "~0.2.0",
5739
- "@geekmidas/envkit": "~0.6.0",
6243
+ "@geekmidas/envkit": "~0.7.0",
5740
6244
  "@geekmidas/errors": "~0.1.0",
5741
6245
  "@geekmidas/events": "~0.2.0",
5742
6246
  "@geekmidas/logger": "~0.4.0",
@@ -5745,7 +6249,7 @@ const GEEKMIDAS_VERSIONS = {
5745
6249
  "@geekmidas/services": "~0.2.0",
5746
6250
  "@geekmidas/storage": "~0.1.0",
5747
6251
  "@geekmidas/studio": "~0.4.0",
5748
- "@geekmidas/telescope": "~0.5.0",
6252
+ "@geekmidas/telescope": "~0.6.0",
5749
6253
  "@geekmidas/testkit": "~0.6.0"
5750
6254
  };
5751
6255