@geekmidas/cli 1.8.0 → 1.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +21 -0
- package/dist/{HostingerProvider-BiXdHjiq.cjs → HostingerProvider-5KYmwoK2.cjs} +1 -1
- package/dist/{HostingerProvider-BiXdHjiq.cjs.map → HostingerProvider-5KYmwoK2.cjs.map} +1 -1
- package/dist/{HostingerProvider-402UdK89.mjs → HostingerProvider-ANWchdiK.mjs} +1 -1
- package/dist/{HostingerProvider-402UdK89.mjs.map → HostingerProvider-ANWchdiK.mjs.map} +1 -1
- package/dist/{LocalStateProvider-CdspeSVL.cjs → LocalStateProvider-CLifRC0Y.cjs} +1 -1
- package/dist/{LocalStateProvider-CdspeSVL.cjs.map → LocalStateProvider-CLifRC0Y.cjs.map} +1 -1
- package/dist/{LocalStateProvider-BDm7ZqJo.mjs → LocalStateProvider-Dp0KkRcw.mjs} +1 -1
- package/dist/{LocalStateProvider-BDm7ZqJo.mjs.map → LocalStateProvider-Dp0KkRcw.mjs.map} +1 -1
- package/dist/{Route53Provider-DbBo7Uz5.mjs → Route53Provider-QoPgcXxn.mjs} +1 -1
- package/dist/{Route53Provider-DbBo7Uz5.mjs.map → Route53Provider-QoPgcXxn.mjs.map} +1 -1
- package/dist/{Route53Provider-kfJ77LmL.cjs → Route53Provider-owQQ4pn6.cjs} +1 -1
- package/dist/{Route53Provider-kfJ77LmL.cjs.map → Route53Provider-owQQ4pn6.cjs.map} +1 -1
- package/dist/{SSMStateProvider-DGrqYll0.cjs → SSMStateProvider-CT8tjl9o.cjs} +1 -1
- package/dist/{SSMStateProvider-DGrqYll0.cjs.map → SSMStateProvider-CT8tjl9o.cjs.map} +1 -1
- package/dist/{SSMStateProvider-DT0WV-E_.mjs → SSMStateProvider-CksOTB8M.mjs} +1 -1
- package/dist/{SSMStateProvider-DT0WV-E_.mjs.map → SSMStateProvider-CksOTB8M.mjs.map} +1 -1
- package/dist/{backup-provisioner-BIArpmTr.mjs → backup-provisioner-BEXoHTuC.mjs} +1 -1
- package/dist/{backup-provisioner-BIArpmTr.mjs.map → backup-provisioner-BEXoHTuC.mjs.map} +1 -1
- package/dist/{backup-provisioner-B5e-F6zX.cjs → backup-provisioner-C4noe75O.cjs} +1 -1
- package/dist/{backup-provisioner-B5e-F6zX.cjs.map → backup-provisioner-C4noe75O.cjs.map} +1 -1
- package/dist/{bundler-DgXsOSxc.mjs → bundler-DQYjKFPm.mjs} +1 -1
- package/dist/{bundler-DgXsOSxc.mjs.map → bundler-DQYjKFPm.mjs.map} +1 -1
- package/dist/{bundler-tHLLwYuU.cjs → bundler-NpfYPBUo.cjs} +1 -1
- package/dist/{bundler-tHLLwYuU.cjs.map → bundler-NpfYPBUo.cjs.map} +1 -1
- package/dist/config.d.mts +2 -2
- package/dist/fullstack-secrets-COWz084x.cjs +238 -0
- package/dist/fullstack-secrets-COWz084x.cjs.map +1 -0
- package/dist/fullstack-secrets-UZAFWuH4.mjs +202 -0
- package/dist/fullstack-secrets-UZAFWuH4.mjs.map +1 -0
- package/dist/{index-C-KxSGGK.d.mts → index-Bt2kX0-R.d.mts} +2 -2
- package/dist/{index-C-KxSGGK.d.mts.map → index-Bt2kX0-R.d.mts.map} +1 -1
- package/dist/index.cjs +1063 -730
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +1054 -721
- package/dist/index.mjs.map +1 -1
- package/dist/{openapi-react-query-DaTMSPD5.mjs → openapi-react-query-C4UdILaI.mjs} +1 -1
- package/dist/{openapi-react-query-DaTMSPD5.mjs.map → openapi-react-query-C4UdILaI.mjs.map} +1 -1
- package/dist/{openapi-react-query-BeXvk-wa.cjs → openapi-react-query-DYbBq-WJ.cjs} +1 -1
- package/dist/{openapi-react-query-BeXvk-wa.cjs.map → openapi-react-query-DYbBq-WJ.cjs.map} +1 -1
- package/dist/openapi-react-query.cjs +1 -1
- package/dist/openapi-react-query.mjs +1 -1
- package/dist/openapi.d.mts +1 -1
- package/dist/reconcile-7yarEvmK.cjs +36 -0
- package/dist/reconcile-7yarEvmK.cjs.map +1 -0
- package/dist/reconcile-D2WCDQue.mjs +36 -0
- package/dist/reconcile-D2WCDQue.mjs.map +1 -0
- package/dist/sync-6FoT41G3.mjs +3 -0
- package/dist/sync-CbeKrnQV.mjs +76 -0
- package/dist/sync-CbeKrnQV.mjs.map +1 -0
- package/dist/sync-DdkKaHqP.cjs +93 -0
- package/dist/sync-DdkKaHqP.cjs.map +1 -0
- package/dist/sync-RsnjXYwG.cjs +4 -0
- package/dist/{types-CZg5iUgD.d.mts → types-wXMIMOyK.d.mts} +1 -1
- package/dist/{types-CZg5iUgD.d.mts.map → types-wXMIMOyK.d.mts.map} +1 -1
- package/dist/workspace/index.d.mts +2 -2
- package/package.json +5 -5
- package/src/dev/__tests__/index.spec.ts +49 -0
- package/src/dev/index.ts +85 -64
- package/src/generators/SubscriberGenerator.ts +1 -0
- package/src/index.ts +171 -0
- package/src/init/index.ts +4 -23
- package/src/init/utils.ts +103 -2
- package/src/init/versions.ts +4 -4
- package/src/secrets/__tests__/reconcile.spec.ts +123 -0
- package/src/secrets/index.ts +20 -1
- package/src/secrets/reconcile.ts +53 -0
- package/src/secrets/sync.ts +136 -0
- package/src/setup/fullstack-secrets.ts +123 -0
- package/src/setup/index.ts +212 -0
- package/src/test/__tests__/web.spec.ts +1 -1
- package/src/upgrade/__tests__/index.spec.ts +354 -0
- package/src/upgrade/index.ts +253 -0
package/dist/index.mjs
CHANGED
|
@@ -8,7 +8,9 @@ import { getKeyPath, maskPassword, readStageSecrets, secretsExist, setCustomSecr
|
|
|
8
8
|
import { DokployApi } from "./dokploy-api-2ldYoN3i.mjs";
|
|
9
9
|
import { encryptSecrets } from "./encryption-BOH5M-f-.mjs";
|
|
10
10
|
import { CachedStateProvider } from "./CachedStateProvider-BDq5WqSy.mjs";
|
|
11
|
-
import {
|
|
11
|
+
import { createStageSecrets, generateDbPassword, generateDbUrl, generateFullstackCustomSecrets, rotateServicePassword, writeDockerEnvFromSecrets } from "./fullstack-secrets-UZAFWuH4.mjs";
|
|
12
|
+
import { generateReactQueryCommand } from "./openapi-react-query-C4UdILaI.mjs";
|
|
13
|
+
import { isSSMConfigured, pullSecrets, pushSecrets } from "./sync-CbeKrnQV.mjs";
|
|
12
14
|
import { createRequire } from "node:module";
|
|
13
15
|
import { copyFileSync, existsSync, readFileSync, unlinkSync } from "node:fs";
|
|
14
16
|
import { basename, dirname, join, parse, relative, resolve } from "node:path";
|
|
@@ -33,7 +35,7 @@ import prompts from "prompts";
|
|
|
33
35
|
|
|
34
36
|
//#region package.json
|
|
35
37
|
var name = "@geekmidas/cli";
|
|
36
|
-
var version = "1.
|
|
38
|
+
var version = "1.9.0";
|
|
37
39
|
var description = "CLI tools for building Lambda handlers, server applications, and generating OpenAPI specs";
|
|
38
40
|
var private$1 = false;
|
|
39
41
|
var type = "module";
|
|
@@ -131,7 +133,7 @@ var package_default = {
|
|
|
131
133
|
|
|
132
134
|
//#endregion
|
|
133
135
|
//#region src/auth/index.ts
|
|
134
|
-
const logger$
|
|
136
|
+
const logger$13 = console;
|
|
135
137
|
/**
|
|
136
138
|
* Validate Dokploy token by making a test API call
|
|
137
139
|
*/
|
|
@@ -199,36 +201,36 @@ async function prompt$1(message, hidden = false) {
|
|
|
199
201
|
async function loginCommand(options) {
|
|
200
202
|
const { service, token: providedToken, endpoint: providedEndpoint } = options;
|
|
201
203
|
if (service === "dokploy") {
|
|
202
|
-
logger$
|
|
204
|
+
logger$13.log("\n🔐 Logging in to Dokploy...\n");
|
|
203
205
|
let endpoint = providedEndpoint;
|
|
204
206
|
if (!endpoint) endpoint = await prompt$1("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
205
207
|
endpoint = endpoint.replace(/\/$/, "");
|
|
206
208
|
try {
|
|
207
209
|
new URL(endpoint);
|
|
208
210
|
} catch {
|
|
209
|
-
logger$
|
|
211
|
+
logger$13.error("Invalid URL format");
|
|
210
212
|
process.exit(1);
|
|
211
213
|
}
|
|
212
214
|
let token = providedToken;
|
|
213
215
|
if (!token) {
|
|
214
|
-
logger$
|
|
216
|
+
logger$13.log(`\nGenerate a token at: ${endpoint}/settings/profile\n`);
|
|
215
217
|
token = await prompt$1("API Token: ", true);
|
|
216
218
|
}
|
|
217
219
|
if (!token) {
|
|
218
|
-
logger$
|
|
220
|
+
logger$13.error("Token is required");
|
|
219
221
|
process.exit(1);
|
|
220
222
|
}
|
|
221
|
-
logger$
|
|
223
|
+
logger$13.log("\nValidating credentials...");
|
|
222
224
|
const isValid = await validateDokployToken(endpoint, token);
|
|
223
225
|
if (!isValid) {
|
|
224
|
-
logger$
|
|
226
|
+
logger$13.error("\n✗ Invalid credentials. Please check your token and try again.");
|
|
225
227
|
process.exit(1);
|
|
226
228
|
}
|
|
227
229
|
await storeDokployCredentials(token, endpoint);
|
|
228
|
-
logger$
|
|
229
|
-
logger$
|
|
230
|
-
logger$
|
|
231
|
-
logger$
|
|
230
|
+
logger$13.log("\n✓ Successfully logged in to Dokploy!");
|
|
231
|
+
logger$13.log(` Endpoint: ${endpoint}`);
|
|
232
|
+
logger$13.log(` Credentials stored in: ${getCredentialsPath()}`);
|
|
233
|
+
logger$13.log("\nYou can now use deploy commands without setting DOKPLOY_API_TOKEN.");
|
|
232
234
|
}
|
|
233
235
|
}
|
|
234
236
|
/**
|
|
@@ -238,28 +240,28 @@ async function logoutCommand(options) {
|
|
|
238
240
|
const { service = "dokploy" } = options;
|
|
239
241
|
if (service === "all") {
|
|
240
242
|
const dokployRemoved = await removeDokployCredentials();
|
|
241
|
-
if (dokployRemoved) logger$
|
|
242
|
-
else logger$
|
|
243
|
+
if (dokployRemoved) logger$13.log("\n✓ Logged out from all services");
|
|
244
|
+
else logger$13.log("\nNo stored credentials found");
|
|
243
245
|
return;
|
|
244
246
|
}
|
|
245
247
|
if (service === "dokploy") {
|
|
246
248
|
const removed = await removeDokployCredentials();
|
|
247
|
-
if (removed) logger$
|
|
248
|
-
else logger$
|
|
249
|
+
if (removed) logger$13.log("\n✓ Logged out from Dokploy");
|
|
250
|
+
else logger$13.log("\nNo Dokploy credentials found");
|
|
249
251
|
}
|
|
250
252
|
}
|
|
251
253
|
/**
|
|
252
254
|
* Show current login status
|
|
253
255
|
*/
|
|
254
256
|
async function whoamiCommand() {
|
|
255
|
-
logger$
|
|
257
|
+
logger$13.log("\n📋 Current credentials:\n");
|
|
256
258
|
const dokploy = await getDokployCredentials();
|
|
257
259
|
if (dokploy) {
|
|
258
|
-
logger$
|
|
259
|
-
logger$
|
|
260
|
-
logger$
|
|
261
|
-
} else logger$
|
|
262
|
-
logger$
|
|
260
|
+
logger$13.log(" Dokploy:");
|
|
261
|
+
logger$13.log(` Endpoint: ${dokploy.endpoint}`);
|
|
262
|
+
logger$13.log(` Token: ${maskToken(dokploy.token)}`);
|
|
263
|
+
} else logger$13.log(" Dokploy: Not logged in");
|
|
264
|
+
logger$13.log(`\n Credentials file: ${getCredentialsPath()}`);
|
|
263
265
|
}
|
|
264
266
|
/**
|
|
265
267
|
* Mask a token for display
|
|
@@ -345,7 +347,7 @@ function isEnabled(config$1) {
|
|
|
345
347
|
var CronGenerator = class extends ConstructGenerator {
|
|
346
348
|
async build(context, constructs, outputDir, options) {
|
|
347
349
|
const provider = options?.provider || "aws-lambda";
|
|
348
|
-
const logger$
|
|
350
|
+
const logger$14 = console;
|
|
349
351
|
const cronInfos = [];
|
|
350
352
|
if (constructs.length === 0 || provider !== "aws-lambda") return cronInfos;
|
|
351
353
|
const cronsDir = join(outputDir, "crons");
|
|
@@ -360,7 +362,7 @@ var CronGenerator = class extends ConstructGenerator {
|
|
|
360
362
|
memorySize: construct.memorySize,
|
|
361
363
|
environment: await construct.getEnvironment()
|
|
362
364
|
});
|
|
363
|
-
logger$
|
|
365
|
+
logger$14.log(`Generated cron handler: ${key}`);
|
|
364
366
|
}
|
|
365
367
|
return cronInfos;
|
|
366
368
|
}
|
|
@@ -396,7 +398,7 @@ var FunctionGenerator = class extends ConstructGenerator {
|
|
|
396
398
|
}
|
|
397
399
|
async build(context, constructs, outputDir, options) {
|
|
398
400
|
const provider = options?.provider || "aws-lambda";
|
|
399
|
-
const logger$
|
|
401
|
+
const logger$14 = console;
|
|
400
402
|
const functionInfos = [];
|
|
401
403
|
if (constructs.length === 0 || provider !== "aws-lambda") return functionInfos;
|
|
402
404
|
const functionsDir = join(outputDir, "functions");
|
|
@@ -410,7 +412,7 @@ var FunctionGenerator = class extends ConstructGenerator {
|
|
|
410
412
|
memorySize: construct.memorySize,
|
|
411
413
|
environment: await construct.getEnvironment()
|
|
412
414
|
});
|
|
413
|
-
logger$
|
|
415
|
+
logger$14.log(`Generated function handler: ${key}`);
|
|
414
416
|
}
|
|
415
417
|
return functionInfos;
|
|
416
418
|
}
|
|
@@ -443,11 +445,11 @@ var SubscriberGenerator = class extends ConstructGenerator {
|
|
|
443
445
|
}
|
|
444
446
|
async build(context, constructs, outputDir, options) {
|
|
445
447
|
const provider = options?.provider || "aws-lambda";
|
|
446
|
-
const logger$
|
|
448
|
+
const logger$14 = console;
|
|
447
449
|
const subscriberInfos = [];
|
|
448
450
|
if (provider === "server") {
|
|
449
451
|
await this.generateServerSubscribersFile(outputDir, constructs);
|
|
450
|
-
logger$
|
|
452
|
+
logger$14.log(`Generated server subscribers file with ${constructs.length} subscribers (polling mode)`);
|
|
451
453
|
return subscriberInfos;
|
|
452
454
|
}
|
|
453
455
|
if (constructs.length === 0) return subscriberInfos;
|
|
@@ -464,7 +466,7 @@ var SubscriberGenerator = class extends ConstructGenerator {
|
|
|
464
466
|
memorySize: construct.memorySize,
|
|
465
467
|
environment: await construct.getEnvironment()
|
|
466
468
|
});
|
|
467
|
-
logger$
|
|
469
|
+
logger$14.log(`Generated subscriber handler: ${key}`);
|
|
468
470
|
}
|
|
469
471
|
return subscriberInfos;
|
|
470
472
|
}
|
|
@@ -519,6 +521,7 @@ export const handler = adapter.handler;
|
|
|
519
521
|
* - sqs://region/account-id/queue-name (SQS queue)
|
|
520
522
|
* - sns://region/account-id/topic-name (SNS topic)
|
|
521
523
|
* - rabbitmq://host:port/queue-name (RabbitMQ)
|
|
524
|
+
* - pgboss://user:pass@host:port/database (pg-boss / PostgreSQL)
|
|
522
525
|
* - basic://in-memory (In-memory for testing)
|
|
523
526
|
*/
|
|
524
527
|
import type { EnvironmentParser } from '@geekmidas/envkit';
|
|
@@ -630,7 +633,7 @@ export async function setupSubscribers(
|
|
|
630
633
|
|
|
631
634
|
//#endregion
|
|
632
635
|
//#region src/workspace/client-generator.ts
|
|
633
|
-
const logger$
|
|
636
|
+
const logger$12 = console;
|
|
634
637
|
/**
|
|
635
638
|
* Get frontend apps that depend on a backend app.
|
|
636
639
|
*/
|
|
@@ -659,7 +662,7 @@ function countEndpoints(content) {
|
|
|
659
662
|
* Called when the backend's .gkm/openapi.ts file changes.
|
|
660
663
|
*/
|
|
661
664
|
async function copyClientToFrontends(workspace, backendAppName, options = {}) {
|
|
662
|
-
const log = options.silent ? () => {} : logger$
|
|
665
|
+
const log = options.silent ? () => {} : logger$12.log.bind(logger$12);
|
|
663
666
|
const results = [];
|
|
664
667
|
const backendApp = workspace.apps[backendAppName];
|
|
665
668
|
if (!backendApp || backendApp.type !== "backend") return results;
|
|
@@ -722,7 +725,7 @@ async function copyAllClients(workspace, options = {}) {
|
|
|
722
725
|
|
|
723
726
|
//#endregion
|
|
724
727
|
//#region src/dev/index.ts
|
|
725
|
-
const logger$
|
|
728
|
+
const logger$11 = console;
|
|
726
729
|
/**
|
|
727
730
|
* Load environment files
|
|
728
731
|
* @internal Exported for testing
|
|
@@ -773,7 +776,7 @@ async function findAvailablePort(preferredPort, maxAttempts = 10) {
|
|
|
773
776
|
for (let i = 0; i < maxAttempts; i++) {
|
|
774
777
|
const port = preferredPort + i;
|
|
775
778
|
if (await isPortAvailable(port)) return port;
|
|
776
|
-
logger$
|
|
779
|
+
logger$11.log(`⚠️ Port ${port} is in use, trying ${port + 1}...`);
|
|
777
780
|
}
|
|
778
781
|
throw new Error(`Could not find an available port after trying ${maxAttempts} ports starting from ${preferredPort}`);
|
|
779
782
|
}
|
|
@@ -856,27 +859,27 @@ async function resolveServicePorts(workspaceRoot) {
|
|
|
856
859
|
const savedState = await loadPortState(workspaceRoot);
|
|
857
860
|
const dockerEnv = {};
|
|
858
861
|
const ports = {};
|
|
859
|
-
logger$
|
|
862
|
+
logger$11.log("\n🔌 Resolving service ports...");
|
|
860
863
|
for (const mapping of mappings) {
|
|
861
864
|
const containerPort = getContainerHostPort(workspaceRoot, mapping.service, mapping.containerPort);
|
|
862
865
|
if (containerPort !== null) {
|
|
863
866
|
ports[mapping.envVar] = containerPort;
|
|
864
867
|
dockerEnv[mapping.envVar] = String(containerPort);
|
|
865
|
-
logger$
|
|
868
|
+
logger$11.log(` 🔄 ${mapping.service}:${mapping.containerPort}: reusing existing container on port ${containerPort}`);
|
|
866
869
|
continue;
|
|
867
870
|
}
|
|
868
871
|
const savedPort = savedState[mapping.envVar];
|
|
869
872
|
if (savedPort && await isPortAvailable(savedPort)) {
|
|
870
873
|
ports[mapping.envVar] = savedPort;
|
|
871
874
|
dockerEnv[mapping.envVar] = String(savedPort);
|
|
872
|
-
logger$
|
|
875
|
+
logger$11.log(` 💾 ${mapping.service}:${mapping.containerPort}: using saved port ${savedPort}`);
|
|
873
876
|
continue;
|
|
874
877
|
}
|
|
875
878
|
const resolvedPort = await findAvailablePort(mapping.defaultPort);
|
|
876
879
|
ports[mapping.envVar] = resolvedPort;
|
|
877
880
|
dockerEnv[mapping.envVar] = String(resolvedPort);
|
|
878
|
-
if (resolvedPort !== mapping.defaultPort) logger$
|
|
879
|
-
else logger$
|
|
881
|
+
if (resolvedPort !== mapping.defaultPort) logger$11.log(` ⚡ ${mapping.service}:${mapping.containerPort}: port ${mapping.defaultPort} occupied, using port ${resolvedPort}`);
|
|
882
|
+
else logger$11.log(` ✅ ${mapping.service}:${mapping.containerPort}: using default port ${resolvedPort}`);
|
|
880
883
|
}
|
|
881
884
|
await savePortState(workspaceRoot, ports);
|
|
882
885
|
return {
|
|
@@ -1020,7 +1023,7 @@ function getProductionConfigFromGkm(config$1) {
|
|
|
1020
1023
|
async function devCommand(options) {
|
|
1021
1024
|
if (options.entry) return entryDevCommand(options);
|
|
1022
1025
|
const defaultEnv = loadEnvFiles(".env");
|
|
1023
|
-
if (defaultEnv.loaded.length > 0) logger$
|
|
1026
|
+
if (defaultEnv.loaded.length > 0) logger$11.log(`📦 Loaded env: ${defaultEnv.loaded.join(", ")}`);
|
|
1024
1027
|
const appName = getAppNameFromCwd();
|
|
1025
1028
|
let config$1;
|
|
1026
1029
|
let appRoot = process.cwd();
|
|
@@ -1034,9 +1037,9 @@ async function devCommand(options) {
|
|
|
1034
1037
|
secretsRoot = appConfig.workspaceRoot;
|
|
1035
1038
|
workspaceAppName = appConfig.appName;
|
|
1036
1039
|
workspaceAppPort = appConfig.app.port;
|
|
1037
|
-
logger$
|
|
1040
|
+
logger$11.log(`📦 Running app: ${appConfig.appName} on port ${workspaceAppPort}`);
|
|
1038
1041
|
if (appConfig.app.entry) {
|
|
1039
|
-
logger$
|
|
1042
|
+
logger$11.log(`📄 Using entry point: ${appConfig.app.entry}`);
|
|
1040
1043
|
return entryDevCommand({
|
|
1041
1044
|
...options,
|
|
1042
1045
|
entry: appConfig.app.entry,
|
|
@@ -1047,7 +1050,7 @@ async function devCommand(options) {
|
|
|
1047
1050
|
} catch {
|
|
1048
1051
|
const loadedConfig = await loadWorkspaceConfig();
|
|
1049
1052
|
if (loadedConfig.type === "workspace") {
|
|
1050
|
-
logger$
|
|
1053
|
+
logger$11.log("📦 Detected workspace configuration");
|
|
1051
1054
|
return workspaceDevCommand(loadedConfig.workspace, options);
|
|
1052
1055
|
}
|
|
1053
1056
|
config$1 = loadedConfig.raw;
|
|
@@ -1055,34 +1058,34 @@ async function devCommand(options) {
|
|
|
1055
1058
|
else {
|
|
1056
1059
|
const loadedConfig = await loadWorkspaceConfig();
|
|
1057
1060
|
if (loadedConfig.type === "workspace") {
|
|
1058
|
-
logger$
|
|
1061
|
+
logger$11.log("📦 Detected workspace configuration");
|
|
1059
1062
|
return workspaceDevCommand(loadedConfig.workspace, options);
|
|
1060
1063
|
}
|
|
1061
1064
|
config$1 = loadedConfig.raw;
|
|
1062
1065
|
}
|
|
1063
1066
|
if (config$1.env) {
|
|
1064
1067
|
const { loaded, missing } = loadEnvFiles(config$1.env, appRoot);
|
|
1065
|
-
if (loaded.length > 0) logger$
|
|
1066
|
-
if (missing.length > 0) logger$
|
|
1068
|
+
if (loaded.length > 0) logger$11.log(`📦 Loaded env: ${loaded.join(", ")}`);
|
|
1069
|
+
if (missing.length > 0) logger$11.warn(`⚠️ Missing env files: ${missing.join(", ")}`);
|
|
1067
1070
|
}
|
|
1068
1071
|
const resolved = resolveProviders(config$1, { provider: "server" });
|
|
1069
|
-
logger$
|
|
1070
|
-
logger$
|
|
1071
|
-
if (config$1.functions) logger$
|
|
1072
|
-
if (config$1.crons) logger$
|
|
1073
|
-
if (config$1.subscribers) logger$
|
|
1074
|
-
logger$
|
|
1072
|
+
logger$11.log("🚀 Starting development server...");
|
|
1073
|
+
logger$11.log(`Loading routes from: ${config$1.routes}`);
|
|
1074
|
+
if (config$1.functions) logger$11.log(`Loading functions from: ${config$1.functions}`);
|
|
1075
|
+
if (config$1.crons) logger$11.log(`Loading crons from: ${config$1.crons}`);
|
|
1076
|
+
if (config$1.subscribers) logger$11.log(`Loading subscribers from: ${config$1.subscribers}`);
|
|
1077
|
+
logger$11.log(`Using envParser: ${config$1.envParser}`);
|
|
1075
1078
|
const { path: envParserPath, importPattern: envParserImportPattern } = parseModuleConfig(config$1.envParser, "envParser");
|
|
1076
1079
|
const { path: loggerPath, importPattern: loggerImportPattern } = parseModuleConfig(config$1.logger, "logger");
|
|
1077
1080
|
const telescope = normalizeTelescopeConfig(config$1.telescope);
|
|
1078
|
-
if (telescope) logger$
|
|
1081
|
+
if (telescope) logger$11.log(`🔭 Telescope enabled at ${telescope.path}`);
|
|
1079
1082
|
const studio = normalizeStudioConfig(config$1.studio);
|
|
1080
|
-
if (studio) logger$
|
|
1083
|
+
if (studio) logger$11.log(`🗄️ Studio enabled at ${studio.path}`);
|
|
1081
1084
|
const hooks = normalizeHooksConfig(config$1.hooks, appRoot);
|
|
1082
|
-
if (hooks) logger$
|
|
1085
|
+
if (hooks) logger$11.log(`🪝 Server hooks enabled from ${config$1.hooks?.server}`);
|
|
1083
1086
|
const openApiConfig = resolveOpenApiConfig(config$1);
|
|
1084
1087
|
const enableOpenApi = openApiConfig.enabled || resolved.enableOpenApi;
|
|
1085
|
-
if (enableOpenApi) logger$
|
|
1088
|
+
if (enableOpenApi) logger$11.log(`📄 OpenAPI output: ${OPENAPI_OUTPUT_PATH}`);
|
|
1086
1089
|
const buildContext = {
|
|
1087
1090
|
envParserPath,
|
|
1088
1091
|
envParserImportPattern,
|
|
@@ -1102,7 +1105,7 @@ async function devCommand(options) {
|
|
|
1102
1105
|
await mkdir(secretsDir, { recursive: true });
|
|
1103
1106
|
secretsJsonPath = join(secretsDir, "dev-secrets.json");
|
|
1104
1107
|
await writeFile(secretsJsonPath, JSON.stringify(appSecrets, null, 2));
|
|
1105
|
-
logger$
|
|
1108
|
+
logger$11.log(`🔐 Loaded ${Object.keys(appSecrets).length} secret(s)`);
|
|
1106
1109
|
}
|
|
1107
1110
|
const devServer = new DevServer(resolved.providers[0], options.port ?? workspaceAppPort ?? 3e3, options.portExplicit ?? false, enableOpenApi, telescope, studio, runtime, appRoot, secretsJsonPath);
|
|
1108
1111
|
await devServer.start();
|
|
@@ -1120,7 +1123,7 @@ async function devCommand(options) {
|
|
|
1120
1123
|
...hooksFile ? [hooksFile.endsWith(".ts") ? hooksFile : `${hooksFile}.ts`] : []
|
|
1121
1124
|
].flat().filter((p) => typeof p === "string");
|
|
1122
1125
|
const normalizedPatterns = watchPatterns.map((p) => p.startsWith("./") ? p.slice(2) : p);
|
|
1123
|
-
logger$
|
|
1126
|
+
logger$11.log(`👀 Watching for changes in: ${normalizedPatterns.join(", ")}`);
|
|
1124
1127
|
const resolvedFiles = await fg(normalizedPatterns, {
|
|
1125
1128
|
cwd: appRoot,
|
|
1126
1129
|
absolute: false,
|
|
@@ -1130,7 +1133,7 @@ async function devCommand(options) {
|
|
|
1130
1133
|
const parts = f.split("/");
|
|
1131
1134
|
return parts.slice(0, -1).join("/");
|
|
1132
1135
|
}))];
|
|
1133
|
-
logger$
|
|
1136
|
+
logger$11.log(`📁 Found ${resolvedFiles.length} files in ${dirsToWatch.length} directories`);
|
|
1134
1137
|
const watcher = chokidar.watch([...resolvedFiles, ...dirsToWatch], {
|
|
1135
1138
|
ignored: /(^|[/\\])\../,
|
|
1136
1139
|
persistent: true,
|
|
@@ -1138,27 +1141,27 @@ async function devCommand(options) {
|
|
|
1138
1141
|
cwd: appRoot
|
|
1139
1142
|
});
|
|
1140
1143
|
watcher.on("ready", () => {
|
|
1141
|
-
logger$
|
|
1144
|
+
logger$11.log("🔍 File watcher ready");
|
|
1142
1145
|
});
|
|
1143
1146
|
watcher.on("error", (error) => {
|
|
1144
|
-
logger$
|
|
1147
|
+
logger$11.error("❌ Watcher error:", error);
|
|
1145
1148
|
});
|
|
1146
1149
|
let rebuildTimeout = null;
|
|
1147
1150
|
watcher.on("change", async (path) => {
|
|
1148
|
-
logger$
|
|
1151
|
+
logger$11.log(`📝 File changed: ${path}`);
|
|
1149
1152
|
if (rebuildTimeout) clearTimeout(rebuildTimeout);
|
|
1150
1153
|
rebuildTimeout = setTimeout(async () => {
|
|
1151
1154
|
try {
|
|
1152
|
-
logger$
|
|
1155
|
+
logger$11.log("🔄 Rebuilding...");
|
|
1153
1156
|
await buildServer(config$1, buildContext, resolved.providers[0], enableOpenApi, appRoot, true);
|
|
1154
1157
|
if (enableOpenApi) await generateOpenApi(config$1, {
|
|
1155
1158
|
silent: true,
|
|
1156
1159
|
bustCache: true
|
|
1157
1160
|
});
|
|
1158
|
-
logger$
|
|
1161
|
+
logger$11.log("✅ Rebuild complete, restarting server...");
|
|
1159
1162
|
await devServer.restart();
|
|
1160
1163
|
} catch (error) {
|
|
1161
|
-
logger$
|
|
1164
|
+
logger$11.error("❌ Rebuild failed:", error.message);
|
|
1162
1165
|
}
|
|
1163
1166
|
}, 300);
|
|
1164
1167
|
});
|
|
@@ -1166,9 +1169,9 @@ async function devCommand(options) {
|
|
|
1166
1169
|
const shutdown = () => {
|
|
1167
1170
|
if (isShuttingDown) return;
|
|
1168
1171
|
isShuttingDown = true;
|
|
1169
|
-
logger$
|
|
1172
|
+
logger$11.log("\n🛑 Shutting down...");
|
|
1170
1173
|
Promise.all([watcher.close(), devServer.stop()]).catch((err) => {
|
|
1171
|
-
logger$
|
|
1174
|
+
logger$11.error("Error during shutdown:", err);
|
|
1172
1175
|
}).finally(() => {
|
|
1173
1176
|
process.exit(0);
|
|
1174
1177
|
});
|
|
@@ -1271,11 +1274,11 @@ async function loadDevSecrets(workspace) {
|
|
|
1271
1274
|
for (const stage of stages) if (secretsExist(stage, workspace.root)) {
|
|
1272
1275
|
const secrets = await readStageSecrets(stage, workspace.root);
|
|
1273
1276
|
if (secrets) {
|
|
1274
|
-
logger$
|
|
1277
|
+
logger$11.log(`🔐 Loading secrets from stage: ${stage}`);
|
|
1275
1278
|
return toEmbeddableSecrets(secrets);
|
|
1276
1279
|
}
|
|
1277
1280
|
}
|
|
1278
|
-
logger$
|
|
1281
|
+
logger$11.warn("⚠️ Secrets enabled but no dev/development secrets found. Run \"gkm setup\" to initialize your development environment");
|
|
1279
1282
|
return {};
|
|
1280
1283
|
}
|
|
1281
1284
|
/**
|
|
@@ -1290,7 +1293,7 @@ async function loadSecretsForApp(secretsRoot, appName) {
|
|
|
1290
1293
|
for (const stage of stages) if (secretsExist(stage, secretsRoot)) {
|
|
1291
1294
|
const stageSecrets = await readStageSecrets(stage, secretsRoot);
|
|
1292
1295
|
if (stageSecrets) {
|
|
1293
|
-
logger$
|
|
1296
|
+
logger$11.log(`🔐 Loading secrets from stage: ${stage}`);
|
|
1294
1297
|
secrets = toEmbeddableSecrets(stageSecrets);
|
|
1295
1298
|
break;
|
|
1296
1299
|
}
|
|
@@ -1315,11 +1318,11 @@ async function startWorkspaceServices(workspace, portEnv) {
|
|
|
1315
1318
|
if (services.cache) servicesToStart.push("redis");
|
|
1316
1319
|
if (services.mail) servicesToStart.push("mailpit");
|
|
1317
1320
|
if (servicesToStart.length === 0) return;
|
|
1318
|
-
logger$
|
|
1321
|
+
logger$11.log(`🐳 Starting services: ${servicesToStart.join(", ")}`);
|
|
1319
1322
|
try {
|
|
1320
1323
|
const composeFile = join(workspace.root, "docker-compose.yml");
|
|
1321
1324
|
if (!existsSync(composeFile)) {
|
|
1322
|
-
logger$
|
|
1325
|
+
logger$11.warn("⚠️ No docker-compose.yml found. Services will not be started.");
|
|
1323
1326
|
return;
|
|
1324
1327
|
}
|
|
1325
1328
|
execSync(`docker compose up -d ${servicesToStart.join(" ")}`, {
|
|
@@ -1330,9 +1333,9 @@ async function startWorkspaceServices(workspace, portEnv) {
|
|
|
1330
1333
|
...portEnv
|
|
1331
1334
|
}
|
|
1332
1335
|
});
|
|
1333
|
-
logger$
|
|
1336
|
+
logger$11.log("✅ Services started");
|
|
1334
1337
|
} catch (error) {
|
|
1335
|
-
logger$
|
|
1338
|
+
logger$11.error("❌ Failed to start services:", error.message);
|
|
1336
1339
|
throw error;
|
|
1337
1340
|
}
|
|
1338
1341
|
}
|
|
@@ -1349,41 +1352,41 @@ async function workspaceDevCommand(workspace, options) {
|
|
|
1349
1352
|
const appCount = Object.keys(workspace.apps).length;
|
|
1350
1353
|
const backendApps = Object.entries(workspace.apps).filter(([_, app]) => app.type === "backend");
|
|
1351
1354
|
const frontendApps = Object.entries(workspace.apps).filter(([_, app]) => app.type === "frontend");
|
|
1352
|
-
logger$
|
|
1353
|
-
logger$
|
|
1355
|
+
logger$11.log(`\n🚀 Starting workspace: ${workspace.name}`);
|
|
1356
|
+
logger$11.log(` ${backendApps.length} backend app(s), ${frontendApps.length} frontend app(s)`);
|
|
1354
1357
|
const conflicts = checkPortConflicts(workspace);
|
|
1355
1358
|
if (conflicts.length > 0) {
|
|
1356
|
-
for (const conflict of conflicts) logger$
|
|
1359
|
+
for (const conflict of conflicts) logger$11.error(`❌ Port conflict: Apps "${conflict.app1}" and "${conflict.app2}" both use port ${conflict.port}`);
|
|
1357
1360
|
throw new Error("Port conflicts detected. Please assign unique ports to each app.");
|
|
1358
1361
|
}
|
|
1359
1362
|
if (frontendApps.length > 0) {
|
|
1360
|
-
logger$
|
|
1363
|
+
logger$11.log("\n🔍 Validating frontend apps...");
|
|
1361
1364
|
const validationResults = await validateFrontendApps(workspace);
|
|
1362
1365
|
let hasErrors = false;
|
|
1363
1366
|
for (const result of validationResults) {
|
|
1364
1367
|
if (!result.valid) {
|
|
1365
1368
|
hasErrors = true;
|
|
1366
|
-
logger$
|
|
1367
|
-
for (const error of result.errors) logger$
|
|
1369
|
+
logger$11.error(`\n❌ Frontend app "${result.appName}" validation failed:`);
|
|
1370
|
+
for (const error of result.errors) logger$11.error(` • ${error}`);
|
|
1368
1371
|
}
|
|
1369
|
-
for (const warning of result.warnings) logger$
|
|
1372
|
+
for (const warning of result.warnings) logger$11.warn(` ⚠️ ${result.appName}: ${warning}`);
|
|
1370
1373
|
}
|
|
1371
1374
|
if (hasErrors) throw new Error("Frontend app validation failed. Fix the issues above and try again.");
|
|
1372
|
-
logger$
|
|
1375
|
+
logger$11.log("✅ Frontend apps validated");
|
|
1373
1376
|
}
|
|
1374
1377
|
if (frontendApps.length > 0 && backendApps.length > 0) {
|
|
1375
1378
|
const clientResults = await copyAllClients(workspace);
|
|
1376
1379
|
const copiedCount = clientResults.filter((r) => r.success).length;
|
|
1377
|
-
if (copiedCount > 0) logger$
|
|
1380
|
+
if (copiedCount > 0) logger$11.log(`\n📦 Copied ${copiedCount} API client(s)`);
|
|
1378
1381
|
}
|
|
1379
1382
|
const resolvedPorts = await resolveServicePorts(workspace.root);
|
|
1380
1383
|
await startWorkspaceServices(workspace, resolvedPorts.dockerEnv);
|
|
1381
1384
|
const secretsEnv = rewriteUrlsWithPorts(await loadDevSecrets(workspace), resolvedPorts);
|
|
1382
|
-
if (Object.keys(secretsEnv).length > 0) logger$
|
|
1385
|
+
if (Object.keys(secretsEnv).length > 0) logger$11.log(` Loaded ${Object.keys(secretsEnv).length} secret(s)`);
|
|
1383
1386
|
const dependencyEnv = generateAllDependencyEnvVars(workspace);
|
|
1384
1387
|
if (Object.keys(dependencyEnv).length > 0) {
|
|
1385
|
-
logger$
|
|
1386
|
-
for (const [key, value] of Object.entries(dependencyEnv)) logger$
|
|
1388
|
+
logger$11.log("📡 Dependency URLs:");
|
|
1389
|
+
for (const [key, value] of Object.entries(dependencyEnv)) logger$11.log(` ${key}=${value}`);
|
|
1387
1390
|
}
|
|
1388
1391
|
let turboFilter = [];
|
|
1389
1392
|
if (options.app) {
|
|
@@ -1392,18 +1395,18 @@ async function workspaceDevCommand(workspace, options) {
|
|
|
1392
1395
|
throw new Error(`App "${options.app}" not found. Available apps: ${appNames}`);
|
|
1393
1396
|
}
|
|
1394
1397
|
turboFilter = ["--filter", options.app];
|
|
1395
|
-
logger$
|
|
1398
|
+
logger$11.log(`\n🎯 Running single app: ${options.app}`);
|
|
1396
1399
|
} else if (options.filter) {
|
|
1397
1400
|
turboFilter = ["--filter", options.filter];
|
|
1398
|
-
logger$
|
|
1399
|
-
} else logger$
|
|
1401
|
+
logger$11.log(`\n🔍 Using filter: ${options.filter}`);
|
|
1402
|
+
} else logger$11.log(`\n🎯 Running all ${appCount} apps`);
|
|
1400
1403
|
const buildOrder = getAppBuildOrder(workspace);
|
|
1401
|
-
logger$
|
|
1404
|
+
logger$11.log("\n📋 Apps (in dependency order):");
|
|
1402
1405
|
for (const appName of buildOrder) {
|
|
1403
1406
|
const app = workspace.apps[appName];
|
|
1404
1407
|
if (!app) continue;
|
|
1405
1408
|
const deps = app.dependencies.length > 0 ? ` (depends on: ${app.dependencies.join(", ")})` : "";
|
|
1406
|
-
logger$
|
|
1409
|
+
logger$11.log(` ${app.type === "backend" ? "🔧" : "🌐"} ${appName} → http://localhost:${app.port}${deps}`);
|
|
1407
1410
|
}
|
|
1408
1411
|
const configFiles = [
|
|
1409
1412
|
"gkm.config.ts",
|
|
@@ -1425,7 +1428,7 @@ async function workspaceDevCommand(workspace, options) {
|
|
|
1425
1428
|
NODE_ENV: "development",
|
|
1426
1429
|
...configPath ? { GKM_CONFIG_PATH: configPath } : {}
|
|
1427
1430
|
};
|
|
1428
|
-
logger$
|
|
1431
|
+
logger$11.log("\n🏃 Starting turbo run dev...\n");
|
|
1429
1432
|
const turboProcess = spawn("pnpm", [
|
|
1430
1433
|
"turbo",
|
|
1431
1434
|
"run",
|
|
@@ -1447,7 +1450,7 @@ async function workspaceDevCommand(workspace, options) {
|
|
|
1447
1450
|
});
|
|
1448
1451
|
}
|
|
1449
1452
|
if (openApiPaths.length > 0) {
|
|
1450
|
-
logger$
|
|
1453
|
+
logger$11.log(`\n👀 Watching ${openApiPaths.length} backend OpenAPI spec(s) for changes`);
|
|
1451
1454
|
const pathToApp = new Map(openApiPaths.map((p) => [p.path, p.appName]));
|
|
1452
1455
|
openApiWatcher = chokidar.watch(openApiPaths.map((p) => p.path), {
|
|
1453
1456
|
persistent: true,
|
|
@@ -1460,13 +1463,13 @@ async function workspaceDevCommand(workspace, options) {
|
|
|
1460
1463
|
copyTimeout = setTimeout(async () => {
|
|
1461
1464
|
const backendAppName = pathToApp.get(changedPath);
|
|
1462
1465
|
if (!backendAppName) return;
|
|
1463
|
-
logger$
|
|
1466
|
+
logger$11.log(`\n🔄 OpenAPI spec changed for ${backendAppName}`);
|
|
1464
1467
|
try {
|
|
1465
1468
|
const results = await copyClientToFrontends(workspace, backendAppName, { silent: true });
|
|
1466
|
-
for (const result of results) if (result.success) logger$
|
|
1467
|
-
else if (result.error) logger$
|
|
1469
|
+
for (const result of results) if (result.success) logger$11.log(` 📦 Copied client to ${result.frontendApp} (${result.endpointCount} endpoints)`);
|
|
1470
|
+
else if (result.error) logger$11.error(` ❌ Failed to copy client to ${result.frontendApp}: ${result.error}`);
|
|
1468
1471
|
} catch (error) {
|
|
1469
|
-
logger$
|
|
1472
|
+
logger$11.error(` ❌ Failed to copy clients: ${error.message}`);
|
|
1470
1473
|
}
|
|
1471
1474
|
}, 200);
|
|
1472
1475
|
};
|
|
@@ -1478,7 +1481,7 @@ async function workspaceDevCommand(workspace, options) {
|
|
|
1478
1481
|
const shutdown = () => {
|
|
1479
1482
|
if (isShuttingDown) return;
|
|
1480
1483
|
isShuttingDown = true;
|
|
1481
|
-
logger$
|
|
1484
|
+
logger$11.log("\n🛑 Shutting down workspace...");
|
|
1482
1485
|
if (openApiWatcher) openApiWatcher.close().catch(() => {});
|
|
1483
1486
|
if (turboProcess.pid) try {
|
|
1484
1487
|
process.kill(-turboProcess.pid, "SIGTERM");
|
|
@@ -1493,7 +1496,7 @@ async function workspaceDevCommand(workspace, options) {
|
|
|
1493
1496
|
process.on("SIGTERM", shutdown);
|
|
1494
1497
|
return new Promise((resolve$1, reject) => {
|
|
1495
1498
|
turboProcess.on("error", (error) => {
|
|
1496
|
-
logger$
|
|
1499
|
+
logger$11.error("❌ Turbo error:", error);
|
|
1497
1500
|
reject(error);
|
|
1498
1501
|
});
|
|
1499
1502
|
turboProcess.on("exit", (code) => {
|
|
@@ -1606,7 +1609,7 @@ async function prepareEntryCredentials(options) {
|
|
|
1606
1609
|
secretsRoot = appInfo.workspaceRoot;
|
|
1607
1610
|
appName = appInfo.appName;
|
|
1608
1611
|
} catch (error) {
|
|
1609
|
-
logger$
|
|
1612
|
+
logger$11.log(`⚠️ Could not load workspace config: ${error.message}`);
|
|
1610
1613
|
secretsRoot = findSecretsRoot(cwd);
|
|
1611
1614
|
appName = getAppNameFromCwd(cwd) ?? void 0;
|
|
1612
1615
|
}
|
|
@@ -1636,11 +1639,11 @@ async function entryDevCommand(options) {
|
|
|
1636
1639
|
const entryPath = resolve(process.cwd(), entry);
|
|
1637
1640
|
if (!existsSync(entryPath)) throw new Error(`Entry file not found: ${entryPath}`);
|
|
1638
1641
|
const defaultEnv = loadEnvFiles(".env");
|
|
1639
|
-
if (defaultEnv.loaded.length > 0) logger$
|
|
1642
|
+
if (defaultEnv.loaded.length > 0) logger$11.log(`📦 Loaded env: ${defaultEnv.loaded.join(", ")}`);
|
|
1640
1643
|
const { credentials, resolvedPort, secretsJsonPath, appName } = await prepareEntryCredentials({ explicitPort: options.portExplicit ? options.port : void 0 });
|
|
1641
|
-
if (appName) logger$
|
|
1642
|
-
logger$
|
|
1643
|
-
if (Object.keys(credentials).length > 1) logger$
|
|
1644
|
+
if (appName) logger$11.log(`📦 App: ${appName} (port ${resolvedPort})`);
|
|
1645
|
+
logger$11.log(`🚀 Starting entry file: ${entry} on port ${resolvedPort}`);
|
|
1646
|
+
if (Object.keys(credentials).length > 1) logger$11.log(`🔐 Loaded ${Object.keys(credentials).length - 1} secret(s) + PORT`);
|
|
1644
1647
|
const wrapperDir = join(process.cwd(), ".gkm");
|
|
1645
1648
|
await mkdir(wrapperDir, { recursive: true });
|
|
1646
1649
|
const wrapperPath = join(wrapperDir, "entry-wrapper.ts");
|
|
@@ -1651,7 +1654,7 @@ async function entryDevCommand(options) {
|
|
|
1651
1654
|
const shutdown = () => {
|
|
1652
1655
|
if (isShuttingDown) return;
|
|
1653
1656
|
isShuttingDown = true;
|
|
1654
|
-
logger$
|
|
1657
|
+
logger$11.log("\n🛑 Shutting down...");
|
|
1655
1658
|
runner.stop();
|
|
1656
1659
|
process.exit(0);
|
|
1657
1660
|
};
|
|
@@ -1683,14 +1686,14 @@ var EntryRunner = class {
|
|
|
1683
1686
|
});
|
|
1684
1687
|
let restartTimeout = null;
|
|
1685
1688
|
this.watcher.on("change", (path) => {
|
|
1686
|
-
logger$
|
|
1689
|
+
logger$11.log(`📝 File changed: ${path}`);
|
|
1687
1690
|
if (restartTimeout) clearTimeout(restartTimeout);
|
|
1688
1691
|
restartTimeout = setTimeout(async () => {
|
|
1689
|
-
logger$
|
|
1692
|
+
logger$11.log("🔄 Restarting...");
|
|
1690
1693
|
await this.restart();
|
|
1691
1694
|
}, 300);
|
|
1692
1695
|
});
|
|
1693
|
-
logger$
|
|
1696
|
+
logger$11.log(`👀 Watching for changes in: ${watchDir}`);
|
|
1694
1697
|
}
|
|
1695
1698
|
}
|
|
1696
1699
|
async runProcess() {
|
|
@@ -1705,14 +1708,14 @@ var EntryRunner = class {
|
|
|
1705
1708
|
});
|
|
1706
1709
|
this.isRunning = true;
|
|
1707
1710
|
this.childProcess.on("error", (error) => {
|
|
1708
|
-
logger$
|
|
1711
|
+
logger$11.error("❌ Process error:", error);
|
|
1709
1712
|
});
|
|
1710
1713
|
this.childProcess.on("exit", (code) => {
|
|
1711
|
-
if (code !== null && code !== 0 && code !== 143) logger$
|
|
1714
|
+
if (code !== null && code !== 0 && code !== 143) logger$11.error(`❌ Process exited with code ${code}`);
|
|
1712
1715
|
this.isRunning = false;
|
|
1713
1716
|
});
|
|
1714
1717
|
await new Promise((resolve$1) => setTimeout(resolve$1, 500));
|
|
1715
|
-
if (this.isRunning) logger$
|
|
1718
|
+
if (this.isRunning) logger$11.log(`\n🎉 Running at http://localhost:${this.port}`);
|
|
1716
1719
|
}
|
|
1717
1720
|
async restart() {
|
|
1718
1721
|
this.stopProcess();
|
|
@@ -1738,6 +1741,66 @@ var EntryRunner = class {
|
|
|
1738
1741
|
}
|
|
1739
1742
|
}
|
|
1740
1743
|
};
|
|
1744
|
+
/**
|
|
1745
|
+
* Generate the content of the dev server entry file (server.ts).
|
|
1746
|
+
* Uses dynamic import for createApp so Credentials are populated
|
|
1747
|
+
* before any app modules evaluate.
|
|
1748
|
+
* @internal Exported for testing
|
|
1749
|
+
*/
|
|
1750
|
+
function generateServerEntryContent(options) {
|
|
1751
|
+
const { secretsJsonPath, runtime = "node", enableOpenApi = false, appImportPath = "./app.js" } = options;
|
|
1752
|
+
const credentialsInjection = secretsJsonPath ? `import { Credentials } from '@geekmidas/envkit/credentials';
|
|
1753
|
+
import { existsSync, readFileSync } from 'node:fs';
|
|
1754
|
+
|
|
1755
|
+
// Inject dev secrets into Credentials (must happen before app import)
|
|
1756
|
+
const secretsPath = '${secretsJsonPath}';
|
|
1757
|
+
if (existsSync(secretsPath)) {
|
|
1758
|
+
Object.assign(Credentials, JSON.parse(readFileSync(secretsPath, 'utf-8')));
|
|
1759
|
+
}
|
|
1760
|
+
|
|
1761
|
+
` : "";
|
|
1762
|
+
const serveCode = runtime === "bun" ? `Bun.serve({
|
|
1763
|
+
port,
|
|
1764
|
+
fetch: app.fetch,
|
|
1765
|
+
});` : `const { serve } = await import('@hono/node-server');
|
|
1766
|
+
const server = serve({
|
|
1767
|
+
fetch: app.fetch,
|
|
1768
|
+
port,
|
|
1769
|
+
});
|
|
1770
|
+
// Inject WebSocket support if available
|
|
1771
|
+
const injectWs = (app as any).__injectWebSocket;
|
|
1772
|
+
if (injectWs) {
|
|
1773
|
+
injectWs(server);
|
|
1774
|
+
console.log('🔌 Telescope real-time updates enabled');
|
|
1775
|
+
}`;
|
|
1776
|
+
return `#!/usr/bin/env node
|
|
1777
|
+
/**
|
|
1778
|
+
* Development server entry point
|
|
1779
|
+
* This file is auto-generated by 'gkm dev'
|
|
1780
|
+
*/
|
|
1781
|
+
${credentialsInjection}
|
|
1782
|
+
const port = process.argv.includes('--port')
|
|
1783
|
+
? Number.parseInt(process.argv[process.argv.indexOf('--port') + 1])
|
|
1784
|
+
: 3000;
|
|
1785
|
+
|
|
1786
|
+
// Dynamic import so Credentials are populated before env.ts evaluates
|
|
1787
|
+
const { createApp } = await import('${appImportPath}');
|
|
1788
|
+
|
|
1789
|
+
// createApp is async to support optional WebSocket setup
|
|
1790
|
+
const { app, start } = await createApp(undefined, ${enableOpenApi});
|
|
1791
|
+
|
|
1792
|
+
// Start the server
|
|
1793
|
+
start({
|
|
1794
|
+
port,
|
|
1795
|
+
serve: async (app, port) => {
|
|
1796
|
+
${serveCode}
|
|
1797
|
+
},
|
|
1798
|
+
}).catch((error) => {
|
|
1799
|
+
console.error('Failed to start server:', error);
|
|
1800
|
+
process.exit(1);
|
|
1801
|
+
});
|
|
1802
|
+
`;
|
|
1803
|
+
}
|
|
1741
1804
|
var DevServer = class {
|
|
1742
1805
|
serverProcess = null;
|
|
1743
1806
|
isRunning = false;
|
|
@@ -1762,11 +1825,11 @@ var DevServer = class {
|
|
|
1762
1825
|
this.actualPort = this.requestedPort;
|
|
1763
1826
|
} else {
|
|
1764
1827
|
this.actualPort = await findAvailablePort(this.requestedPort);
|
|
1765
|
-
if (this.actualPort !== this.requestedPort) logger$
|
|
1828
|
+
if (this.actualPort !== this.requestedPort) logger$11.log(`ℹ️ Port ${this.requestedPort} was in use, using port ${this.actualPort} instead`);
|
|
1766
1829
|
}
|
|
1767
1830
|
const serverEntryPath = join(this.appRoot, ".gkm", this.provider, "server.ts");
|
|
1768
1831
|
await this.createServerEntry();
|
|
1769
|
-
logger$
|
|
1832
|
+
logger$11.log(`\n✨ Starting server on port ${this.actualPort}...`);
|
|
1770
1833
|
this.serverProcess = spawn("npx", [
|
|
1771
1834
|
"tsx",
|
|
1772
1835
|
serverEntryPath,
|
|
@@ -1782,18 +1845,18 @@ var DevServer = class {
|
|
|
1782
1845
|
});
|
|
1783
1846
|
this.isRunning = true;
|
|
1784
1847
|
this.serverProcess.on("error", (error) => {
|
|
1785
|
-
logger$
|
|
1848
|
+
logger$11.error("❌ Server error:", error);
|
|
1786
1849
|
});
|
|
1787
1850
|
this.serverProcess.on("exit", (code, signal) => {
|
|
1788
|
-
if (code !== null && code !== 0 && signal !== "SIGTERM") logger$
|
|
1851
|
+
if (code !== null && code !== 0 && signal !== "SIGTERM") logger$11.error(`❌ Server exited with code ${code}`);
|
|
1789
1852
|
this.isRunning = false;
|
|
1790
1853
|
});
|
|
1791
1854
|
await new Promise((resolve$1) => setTimeout(resolve$1, 1e3));
|
|
1792
1855
|
if (this.isRunning) {
|
|
1793
|
-
logger$
|
|
1794
|
-
if (this.enableOpenApi) logger$
|
|
1795
|
-
if (this.telescope) logger$
|
|
1796
|
-
if (this.studio) logger$
|
|
1856
|
+
logger$11.log(`\n🎉 Server running at http://localhost:${this.actualPort}`);
|
|
1857
|
+
if (this.enableOpenApi) logger$11.log(`📚 API Docs available at http://localhost:${this.actualPort}/__docs`);
|
|
1858
|
+
if (this.telescope) logger$11.log(`🔭 Telescope available at http://localhost:${this.actualPort}${this.telescope.path}`);
|
|
1859
|
+
if (this.studio) logger$11.log(`🗄️ Studio available at http://localhost:${this.actualPort}${this.studio.path}`);
|
|
1797
1860
|
}
|
|
1798
1861
|
}
|
|
1799
1862
|
async stop() {
|
|
@@ -1831,58 +1894,12 @@ var DevServer = class {
|
|
|
1831
1894
|
}
|
|
1832
1895
|
async createServerEntry() {
|
|
1833
1896
|
const { writeFile: fsWriteFile } = await import("node:fs/promises");
|
|
1834
|
-
const { relative: relative$1, dirname: dirname$1 } = await import("node:path");
|
|
1835
1897
|
const serverPath = join(this.appRoot, ".gkm", this.provider, "server.ts");
|
|
1836
|
-
const
|
|
1837
|
-
|
|
1838
|
-
|
|
1839
|
-
|
|
1840
|
-
|
|
1841
|
-
const secretsPath = '${this.secretsJsonPath}';
|
|
1842
|
-
if (existsSync(secretsPath)) {
|
|
1843
|
-
Object.assign(Credentials, JSON.parse(readFileSync(secretsPath, 'utf-8')));
|
|
1844
|
-
}
|
|
1845
|
-
|
|
1846
|
-
` : "";
|
|
1847
|
-
const serveCode = this.runtime === "bun" ? `Bun.serve({
|
|
1848
|
-
port,
|
|
1849
|
-
fetch: app.fetch,
|
|
1850
|
-
});` : `const { serve } = await import('@hono/node-server');
|
|
1851
|
-
const server = serve({
|
|
1852
|
-
fetch: app.fetch,
|
|
1853
|
-
port,
|
|
1854
|
-
});
|
|
1855
|
-
// Inject WebSocket support if available
|
|
1856
|
-
const injectWs = (app as any).__injectWebSocket;
|
|
1857
|
-
if (injectWs) {
|
|
1858
|
-
injectWs(server);
|
|
1859
|
-
console.log('🔌 Telescope real-time updates enabled');
|
|
1860
|
-
}`;
|
|
1861
|
-
const content = `#!/usr/bin/env node
|
|
1862
|
-
/**
|
|
1863
|
-
* Development server entry point
|
|
1864
|
-
* This file is auto-generated by 'gkm dev'
|
|
1865
|
-
*/
|
|
1866
|
-
${credentialsInjection}import { createApp } from './${relativeAppPath.startsWith(".") ? relativeAppPath : `./${relativeAppPath}`}';
|
|
1867
|
-
|
|
1868
|
-
const port = process.argv.includes('--port')
|
|
1869
|
-
? Number.parseInt(process.argv[process.argv.indexOf('--port') + 1])
|
|
1870
|
-
: 3000;
|
|
1871
|
-
|
|
1872
|
-
// createApp is async to support optional WebSocket setup
|
|
1873
|
-
const { app, start } = await createApp(undefined, ${this.enableOpenApi});
|
|
1874
|
-
|
|
1875
|
-
// Start the server
|
|
1876
|
-
start({
|
|
1877
|
-
port,
|
|
1878
|
-
serve: async (app, port) => {
|
|
1879
|
-
${serveCode}
|
|
1880
|
-
},
|
|
1881
|
-
}).catch((error) => {
|
|
1882
|
-
console.error('Failed to start server:', error);
|
|
1883
|
-
process.exit(1);
|
|
1884
|
-
});
|
|
1885
|
-
`;
|
|
1898
|
+
const content = generateServerEntryContent({
|
|
1899
|
+
secretsJsonPath: this.secretsJsonPath,
|
|
1900
|
+
runtime: this.runtime,
|
|
1901
|
+
enableOpenApi: this.enableOpenApi
|
|
1902
|
+
});
|
|
1886
1903
|
await fsWriteFile(serverPath, content);
|
|
1887
1904
|
}
|
|
1888
1905
|
};
|
|
@@ -1901,11 +1918,11 @@ async function execCommand(commandArgs, options = {}) {
|
|
|
1901
1918
|
const cwd = options.cwd ?? process.cwd();
|
|
1902
1919
|
if (commandArgs.length === 0) throw new Error("No command specified. Usage: gkm exec -- <command>");
|
|
1903
1920
|
const defaultEnv = loadEnvFiles(".env");
|
|
1904
|
-
if (defaultEnv.loaded.length > 0) logger$
|
|
1921
|
+
if (defaultEnv.loaded.length > 0) logger$11.log(`📦 Loaded env: ${defaultEnv.loaded.join(", ")}`);
|
|
1905
1922
|
const { credentials, secretsJsonPath, appName, secretsRoot } = await prepareEntryCredentials({ cwd });
|
|
1906
|
-
if (appName) logger$
|
|
1923
|
+
if (appName) logger$11.log(`📦 App: ${appName}`);
|
|
1907
1924
|
const secretCount = Object.keys(credentials).filter((k) => k !== "PORT").length;
|
|
1908
|
-
if (secretCount > 0) logger$
|
|
1925
|
+
if (secretCount > 0) logger$11.log(`🔐 Loaded ${secretCount} secret(s)`);
|
|
1909
1926
|
const composePath = join(secretsRoot, "docker-compose.yml");
|
|
1910
1927
|
const mappings = parseComposePortMappings(composePath);
|
|
1911
1928
|
if (mappings.length > 0) {
|
|
@@ -1917,7 +1934,7 @@ async function execCommand(commandArgs, options = {}) {
|
|
|
1917
1934
|
mappings
|
|
1918
1935
|
});
|
|
1919
1936
|
Object.assign(credentials, rewritten);
|
|
1920
|
-
logger$
|
|
1937
|
+
logger$11.log(`🔌 Applied ${Object.keys(ports).length} port mapping(s)`);
|
|
1921
1938
|
}
|
|
1922
1939
|
}
|
|
1923
1940
|
try {
|
|
@@ -1934,7 +1951,7 @@ async function execCommand(commandArgs, options = {}) {
|
|
|
1934
1951
|
const [cmd, ...rawArgs] = commandArgs;
|
|
1935
1952
|
if (!cmd) throw new Error("No command specified");
|
|
1936
1953
|
const args = rawArgs.map((arg) => arg.replace(/\$PORT\b/g, credentials.PORT ?? "3000"));
|
|
1937
|
-
logger$
|
|
1954
|
+
logger$11.log(`🚀 Running: ${[cmd, ...args].join(" ")}`);
|
|
1938
1955
|
const existingNodeOptions = process.env.NODE_OPTIONS ?? "";
|
|
1939
1956
|
const tsxImport = "--import=tsx";
|
|
1940
1957
|
const preloadImport = `--import=${preloadPath}`;
|
|
@@ -1955,7 +1972,7 @@ async function execCommand(commandArgs, options = {}) {
|
|
|
1955
1972
|
const exitCode = await new Promise((resolve$1) => {
|
|
1956
1973
|
child.on("close", (code) => resolve$1(code ?? 0));
|
|
1957
1974
|
child.on("error", (error) => {
|
|
1958
|
-
logger$
|
|
1975
|
+
logger$11.error(`Failed to run command: ${error.message}`);
|
|
1959
1976
|
resolve$1(1);
|
|
1960
1977
|
});
|
|
1961
1978
|
});
|
|
@@ -1964,7 +1981,7 @@ async function execCommand(commandArgs, options = {}) {
|
|
|
1964
1981
|
|
|
1965
1982
|
//#endregion
|
|
1966
1983
|
//#region src/build/manifests.ts
|
|
1967
|
-
const logger$
|
|
1984
|
+
const logger$10 = console;
|
|
1968
1985
|
async function generateAwsManifest(outputDir, routes, functions, crons, subscribers) {
|
|
1969
1986
|
const manifestDir = join(outputDir, "manifest");
|
|
1970
1987
|
await mkdir(manifestDir, { recursive: true });
|
|
@@ -1989,8 +2006,8 @@ export type RoutePath = Route['path'];
|
|
|
1989
2006
|
`;
|
|
1990
2007
|
const manifestPath = join(manifestDir, "aws.ts");
|
|
1991
2008
|
await writeFile(manifestPath, content);
|
|
1992
|
-
logger$
|
|
1993
|
-
logger$
|
|
2009
|
+
logger$10.log(`Generated AWS manifest with ${awsRoutes.length} routes, ${functions.length} functions, ${crons.length} crons, ${subscribers.length} subscribers`);
|
|
2010
|
+
logger$10.log(`Manifest: ${relative(process.cwd(), manifestPath)}`);
|
|
1994
2011
|
}
|
|
1995
2012
|
async function generateServerManifest(outputDir, appInfo, routes, subscribers) {
|
|
1996
2013
|
const manifestDir = join(outputDir, "manifest");
|
|
@@ -2021,13 +2038,13 @@ export type RoutePath = Route['path'];
|
|
|
2021
2038
|
`;
|
|
2022
2039
|
const manifestPath = join(manifestDir, "server.ts");
|
|
2023
2040
|
await writeFile(manifestPath, content);
|
|
2024
|
-
logger$
|
|
2025
|
-
logger$
|
|
2041
|
+
logger$10.log(`Generated server manifest with ${serverRoutes.length} routes, ${serverSubscribers.length} subscribers`);
|
|
2042
|
+
logger$10.log(`Manifest: ${relative(process.cwd(), manifestPath)}`);
|
|
2026
2043
|
}
|
|
2027
2044
|
|
|
2028
2045
|
//#endregion
|
|
2029
2046
|
//#region src/build/index.ts
|
|
2030
|
-
const logger$
|
|
2047
|
+
const logger$9 = console;
|
|
2031
2048
|
async function buildCommand(options) {
|
|
2032
2049
|
const loadedConfig = await loadWorkspaceConfig();
|
|
2033
2050
|
if (loadedConfig.type === "workspace") {
|
|
@@ -2035,7 +2052,7 @@ async function buildCommand(options) {
|
|
|
2035
2052
|
const workspaceRoot = resolve(loadedConfig.workspace.root);
|
|
2036
2053
|
const isAtWorkspaceRoot = cwd === workspaceRoot;
|
|
2037
2054
|
if (isAtWorkspaceRoot) {
|
|
2038
|
-
logger$
|
|
2055
|
+
logger$9.log("📦 Detected workspace configuration");
|
|
2039
2056
|
return workspaceBuildCommand(loadedConfig.workspace, options);
|
|
2040
2057
|
}
|
|
2041
2058
|
}
|
|
@@ -2043,21 +2060,21 @@ async function buildCommand(options) {
|
|
|
2043
2060
|
const resolved = resolveProviders(config$1, options);
|
|
2044
2061
|
const productionConfigFromGkm = getProductionConfigFromGkm(config$1);
|
|
2045
2062
|
const production = normalizeProductionConfig(options.production ?? false, productionConfigFromGkm);
|
|
2046
|
-
if (production) logger$
|
|
2047
|
-
logger$
|
|
2048
|
-
logger$
|
|
2049
|
-
if (config$1.functions) logger$
|
|
2050
|
-
if (config$1.crons) logger$
|
|
2051
|
-
if (config$1.subscribers) logger$
|
|
2052
|
-
logger$
|
|
2063
|
+
if (production) logger$9.log(`🏭 Building for PRODUCTION`);
|
|
2064
|
+
logger$9.log(`Building with providers: ${resolved.providers.join(", ")}`);
|
|
2065
|
+
logger$9.log(`Loading routes from: ${config$1.routes}`);
|
|
2066
|
+
if (config$1.functions) logger$9.log(`Loading functions from: ${config$1.functions}`);
|
|
2067
|
+
if (config$1.crons) logger$9.log(`Loading crons from: ${config$1.crons}`);
|
|
2068
|
+
if (config$1.subscribers) logger$9.log(`Loading subscribers from: ${config$1.subscribers}`);
|
|
2069
|
+
logger$9.log(`Using envParser: ${config$1.envParser}`);
|
|
2053
2070
|
const { path: envParserPath, importPattern: envParserImportPattern } = parseModuleConfig(config$1.envParser, "envParser");
|
|
2054
2071
|
const { path: loggerPath, importPattern: loggerImportPattern } = parseModuleConfig(config$1.logger, "logger");
|
|
2055
2072
|
const telescope = production ? void 0 : normalizeTelescopeConfig(config$1.telescope);
|
|
2056
|
-
if (telescope) logger$
|
|
2073
|
+
if (telescope) logger$9.log(`🔭 Telescope enabled at ${telescope.path}`);
|
|
2057
2074
|
const studio = production ? void 0 : normalizeStudioConfig(config$1.studio);
|
|
2058
|
-
if (studio) logger$
|
|
2075
|
+
if (studio) logger$9.log(`🗄️ Studio enabled at ${studio.path}`);
|
|
2059
2076
|
const hooks = normalizeHooksConfig(config$1.hooks);
|
|
2060
|
-
if (hooks) logger$
|
|
2077
|
+
if (hooks) logger$9.log(`🪝 Server hooks enabled`);
|
|
2061
2078
|
const services = config$1.docker?.compose?.services;
|
|
2062
2079
|
const dockerServices = services ? Array.isArray(services) ? {
|
|
2063
2080
|
postgres: services.includes("postgres"),
|
|
@@ -2089,12 +2106,12 @@ async function buildCommand(options) {
|
|
|
2089
2106
|
config$1.crons ? cronGenerator.load(config$1.crons) : [],
|
|
2090
2107
|
config$1.subscribers ? subscriberGenerator.load(config$1.subscribers) : []
|
|
2091
2108
|
]);
|
|
2092
|
-
logger$
|
|
2093
|
-
logger$
|
|
2094
|
-
logger$
|
|
2095
|
-
logger$
|
|
2109
|
+
logger$9.log(`Found ${allEndpoints.length} endpoints`);
|
|
2110
|
+
logger$9.log(`Found ${allFunctions.length} functions`);
|
|
2111
|
+
logger$9.log(`Found ${allCrons.length} crons`);
|
|
2112
|
+
logger$9.log(`Found ${allSubscribers.length} subscribers`);
|
|
2096
2113
|
if (allEndpoints.length === 0 && allFunctions.length === 0 && allCrons.length === 0 && allSubscribers.length === 0) {
|
|
2097
|
-
logger$
|
|
2114
|
+
logger$9.log("No endpoints, functions, crons, or subscribers found to process");
|
|
2098
2115
|
return {};
|
|
2099
2116
|
}
|
|
2100
2117
|
const rootOutputDir = join(process.cwd(), ".gkm");
|
|
@@ -2109,7 +2126,7 @@ async function buildCommand(options) {
|
|
|
2109
2126
|
async function buildForProvider(provider, context, rootOutputDir, endpointGenerator, functionGenerator, cronGenerator, subscriberGenerator, endpoints, functions, crons, subscribers, enableOpenApi, skipBundle, stage) {
|
|
2110
2127
|
const outputDir = join(process.cwd(), ".gkm", provider);
|
|
2111
2128
|
await mkdir(outputDir, { recursive: true });
|
|
2112
|
-
logger$
|
|
2129
|
+
logger$9.log(`\nGenerating handlers for provider: ${provider}`);
|
|
2113
2130
|
const [routes, functionInfos, cronInfos, subscriberInfos] = await Promise.all([
|
|
2114
2131
|
endpointGenerator.build(context, endpoints, outputDir, {
|
|
2115
2132
|
provider,
|
|
@@ -2119,7 +2136,7 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
2119
2136
|
cronGenerator.build(context, crons, outputDir, { provider }),
|
|
2120
2137
|
subscriberGenerator.build(context, subscribers, outputDir, { provider })
|
|
2121
2138
|
]);
|
|
2122
|
-
logger$
|
|
2139
|
+
logger$9.log(`Generated ${routes.length} routes, ${functionInfos.length} functions, ${cronInfos.length} crons, ${subscriberInfos.length} subscribers for ${provider}`);
|
|
2123
2140
|
if (provider === "server") {
|
|
2124
2141
|
const routeMetadata = await Promise.all(endpoints.map(async ({ construct }) => ({
|
|
2125
2142
|
path: construct._path,
|
|
@@ -2134,8 +2151,8 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
2134
2151
|
await generateServerManifest(rootOutputDir, appInfo, routeMetadata, subscriberInfos);
|
|
2135
2152
|
let masterKey;
|
|
2136
2153
|
if (context.production?.bundle && !skipBundle) {
|
|
2137
|
-
logger$
|
|
2138
|
-
const { bundleServer } = await import("./bundler-
|
|
2154
|
+
logger$9.log(`\n📦 Bundling production server...`);
|
|
2155
|
+
const { bundleServer } = await import("./bundler-DQYjKFPm.mjs");
|
|
2139
2156
|
const allConstructs = [
|
|
2140
2157
|
...endpoints.map((e) => e.construct),
|
|
2141
2158
|
...functions.map((f) => f.construct),
|
|
@@ -2154,10 +2171,10 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
2154
2171
|
dockerServices
|
|
2155
2172
|
});
|
|
2156
2173
|
masterKey = bundleResult.masterKey;
|
|
2157
|
-
logger$
|
|
2174
|
+
logger$9.log(`✅ Bundle complete: .gkm/server/dist/server.mjs`);
|
|
2158
2175
|
if (masterKey) {
|
|
2159
|
-
logger$
|
|
2160
|
-
logger$
|
|
2176
|
+
logger$9.log(`\n🔐 Secrets encrypted for deployment`);
|
|
2177
|
+
logger$9.log(` Deploy with: GKM_MASTER_KEY=${masterKey}`);
|
|
2161
2178
|
}
|
|
2162
2179
|
}
|
|
2163
2180
|
return { masterKey };
|
|
@@ -2194,17 +2211,17 @@ async function workspaceBuildCommand(workspace, options) {
|
|
|
2194
2211
|
const apps = Object.entries(workspace.apps);
|
|
2195
2212
|
const backendApps = apps.filter(([, app]) => app.type === "backend");
|
|
2196
2213
|
const frontendApps = apps.filter(([, app]) => app.type === "frontend");
|
|
2197
|
-
logger$
|
|
2198
|
-
logger$
|
|
2199
|
-
logger$
|
|
2200
|
-
if (options.production) logger$
|
|
2214
|
+
logger$9.log(`\n🏗️ Building workspace: ${workspace.name}`);
|
|
2215
|
+
logger$9.log(` Backend apps: ${backendApps.map(([name$1]) => name$1).join(", ") || "none"}`);
|
|
2216
|
+
logger$9.log(` Frontend apps: ${frontendApps.map(([name$1]) => name$1).join(", ") || "none"}`);
|
|
2217
|
+
if (options.production) logger$9.log(` 🏭 Production mode enabled`);
|
|
2201
2218
|
const buildOrder = getAppBuildOrder(workspace);
|
|
2202
|
-
logger$
|
|
2219
|
+
logger$9.log(` Build order: ${buildOrder.join(" → ")}`);
|
|
2203
2220
|
const pm = detectPackageManager$2();
|
|
2204
|
-
logger$
|
|
2221
|
+
logger$9.log(`\n📦 Using ${pm} with Turbo for parallel builds...\n`);
|
|
2205
2222
|
try {
|
|
2206
2223
|
const turboCommand = getTurboCommand(pm);
|
|
2207
|
-
logger$
|
|
2224
|
+
logger$9.log(`Running: ${turboCommand}`);
|
|
2208
2225
|
await new Promise((resolve$1, reject) => {
|
|
2209
2226
|
const child = spawn(turboCommand, {
|
|
2210
2227
|
shell: true,
|
|
@@ -2232,15 +2249,15 @@ async function workspaceBuildCommand(workspace, options) {
|
|
|
2232
2249
|
outputPath
|
|
2233
2250
|
});
|
|
2234
2251
|
}
|
|
2235
|
-
logger$
|
|
2236
|
-
logger$
|
|
2252
|
+
logger$9.log(`\n✅ Workspace build complete!`);
|
|
2253
|
+
logger$9.log(`\n📋 Build Summary:`);
|
|
2237
2254
|
for (const result of results) {
|
|
2238
2255
|
const icon = result.type === "backend" ? "⚙️" : "🌐";
|
|
2239
|
-
logger$
|
|
2256
|
+
logger$9.log(` ${icon} ${result.appName}: ${result.outputPath || "built"}`);
|
|
2240
2257
|
}
|
|
2241
2258
|
} catch (error) {
|
|
2242
2259
|
const errorMessage = error instanceof Error ? error.message : "Build failed";
|
|
2243
|
-
logger$
|
|
2260
|
+
logger$9.log(`\n❌ Build failed: ${errorMessage}`);
|
|
2244
2261
|
for (const [appName, app] of apps) results.push({
|
|
2245
2262
|
appName,
|
|
2246
2263
|
type: app.type,
|
|
@@ -2414,11 +2431,11 @@ async function createDnsProvider(options) {
|
|
|
2414
2431
|
if (isDnsProvider(config$1.provider)) return config$1.provider;
|
|
2415
2432
|
const provider = config$1.provider;
|
|
2416
2433
|
if (provider === "hostinger") {
|
|
2417
|
-
const { HostingerProvider } = await import("./HostingerProvider-
|
|
2434
|
+
const { HostingerProvider } = await import("./HostingerProvider-ANWchdiK.mjs");
|
|
2418
2435
|
return new HostingerProvider();
|
|
2419
2436
|
}
|
|
2420
2437
|
if (provider === "route53") {
|
|
2421
|
-
const { Route53Provider } = await import("./Route53Provider-
|
|
2438
|
+
const { Route53Provider } = await import("./Route53Provider-QoPgcXxn.mjs");
|
|
2422
2439
|
const route53Config = config$1;
|
|
2423
2440
|
return new Route53Provider({
|
|
2424
2441
|
region: route53Config.region,
|
|
@@ -2432,7 +2449,7 @@ async function createDnsProvider(options) {
|
|
|
2432
2449
|
|
|
2433
2450
|
//#endregion
|
|
2434
2451
|
//#region src/deploy/dns/index.ts
|
|
2435
|
-
const logger$
|
|
2452
|
+
const logger$8 = console;
|
|
2436
2453
|
/**
|
|
2437
2454
|
* Check if DNS config is legacy format (single domain with `domain` property)
|
|
2438
2455
|
*/
|
|
@@ -2469,7 +2486,7 @@ function groupHostnamesByDomain(appHostnames, dnsConfig) {
|
|
|
2469
2486
|
for (const [appName, hostname] of appHostnames) {
|
|
2470
2487
|
const rootDomain = findRootDomain(hostname, dnsConfig);
|
|
2471
2488
|
if (!rootDomain) {
|
|
2472
|
-
logger$
|
|
2489
|
+
logger$8.log(` ⚠ No DNS config found for hostname: ${hostname}`);
|
|
2473
2490
|
continue;
|
|
2474
2491
|
}
|
|
2475
2492
|
if (!grouped.has(rootDomain)) grouped.set(rootDomain, /* @__PURE__ */ new Map());
|
|
@@ -2521,10 +2538,10 @@ function generateRequiredRecords(appHostnames, rootDomain, serverIp) {
|
|
|
2521
2538
|
* Print DNS records table
|
|
2522
2539
|
*/
|
|
2523
2540
|
function printDnsRecordsTable(records, rootDomain) {
|
|
2524
|
-
logger$
|
|
2525
|
-
logger$
|
|
2526
|
-
logger$
|
|
2527
|
-
logger$
|
|
2541
|
+
logger$8.log(`\n 📋 DNS Records for ${rootDomain}:`);
|
|
2542
|
+
logger$8.log(" ┌─────────────────────────────────────┬──────┬─────────────────┬────────┐");
|
|
2543
|
+
logger$8.log(" │ Subdomain │ Type │ Value │ Status │");
|
|
2544
|
+
logger$8.log(" ├─────────────────────────────────────┼──────┼─────────────────┼────────┤");
|
|
2528
2545
|
for (const record of records) {
|
|
2529
2546
|
const subdomain = record.subdomain.padEnd(35);
|
|
2530
2547
|
const type$1 = record.type.padEnd(4);
|
|
@@ -2534,18 +2551,18 @@ function printDnsRecordsTable(records, rootDomain) {
|
|
|
2534
2551
|
else if (record.created) status = "✓ new";
|
|
2535
2552
|
else if (record.existed) status = "✓";
|
|
2536
2553
|
else status = "?";
|
|
2537
|
-
logger$
|
|
2554
|
+
logger$8.log(` │ ${subdomain} │ ${type$1} │ ${value} │ ${status.padEnd(6)} │`);
|
|
2538
2555
|
}
|
|
2539
|
-
logger$
|
|
2556
|
+
logger$8.log(" └─────────────────────────────────────┴──────┴─────────────────┴────────┘");
|
|
2540
2557
|
}
|
|
2541
2558
|
/**
|
|
2542
2559
|
* Print DNS records in a simple format for manual setup
|
|
2543
2560
|
*/
|
|
2544
2561
|
function printDnsRecordsSimple(records, rootDomain) {
|
|
2545
|
-
logger$
|
|
2546
|
-
logger$
|
|
2547
|
-
for (const record of records) logger$
|
|
2548
|
-
logger$
|
|
2562
|
+
logger$8.log("\n 📋 Required DNS Records:");
|
|
2563
|
+
logger$8.log(` Add these A records to your DNS provider (${rootDomain}):\n`);
|
|
2564
|
+
for (const record of records) logger$8.log(` ${record.subdomain} → ${record.value} (A record)`);
|
|
2565
|
+
logger$8.log("");
|
|
2549
2566
|
}
|
|
2550
2567
|
/**
|
|
2551
2568
|
* Create DNS records for a single domain using its configured provider
|
|
@@ -2557,7 +2574,7 @@ async function createDnsRecordsForDomain(records, rootDomain, providerConfig) {
|
|
|
2557
2574
|
provider = await createDnsProvider({ config: providerConfig });
|
|
2558
2575
|
} catch (error) {
|
|
2559
2576
|
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2560
|
-
logger$
|
|
2577
|
+
logger$8.log(` ⚠ Failed to create DNS provider for ${rootDomain}: ${message}`);
|
|
2561
2578
|
return records.map((r) => ({
|
|
2562
2579
|
...r,
|
|
2563
2580
|
error: message
|
|
@@ -2611,7 +2628,7 @@ async function createDnsRecordsForDomain(records, rootDomain, providerConfig) {
|
|
|
2611
2628
|
}
|
|
2612
2629
|
} catch (error) {
|
|
2613
2630
|
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2614
|
-
logger$
|
|
2631
|
+
logger$8.log(` ⚠ Failed to create DNS records for ${rootDomain}: ${message}`);
|
|
2615
2632
|
return records.map((r) => ({
|
|
2616
2633
|
hostname: r.hostname,
|
|
2617
2634
|
subdomain: r.subdomain,
|
|
@@ -2638,20 +2655,20 @@ async function createDnsRecordsForDomain(records, rootDomain, providerConfig) {
|
|
|
2638
2655
|
async function orchestrateDns(appHostnames, dnsConfig, dokployEndpoint, state) {
|
|
2639
2656
|
if (!dnsConfig) return null;
|
|
2640
2657
|
const normalizedConfig = normalizeDnsConfig(dnsConfig);
|
|
2641
|
-
logger$
|
|
2658
|
+
logger$8.log("\n🌐 Setting up DNS records...");
|
|
2642
2659
|
let serverIp;
|
|
2643
2660
|
try {
|
|
2644
2661
|
const endpointUrl = new URL(dokployEndpoint);
|
|
2645
2662
|
serverIp = await resolveHostnameToIp(endpointUrl.hostname);
|
|
2646
|
-
logger$
|
|
2663
|
+
logger$8.log(` Server IP: ${serverIp} (from ${endpointUrl.hostname})`);
|
|
2647
2664
|
} catch (error) {
|
|
2648
2665
|
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2649
|
-
logger$
|
|
2666
|
+
logger$8.log(` ⚠ Failed to resolve server IP: ${message}`);
|
|
2650
2667
|
return null;
|
|
2651
2668
|
}
|
|
2652
2669
|
const groupedHostnames = groupHostnamesByDomain(appHostnames, normalizedConfig);
|
|
2653
2670
|
if (groupedHostnames.size === 0) {
|
|
2654
|
-
logger$
|
|
2671
|
+
logger$8.log(" No DNS records needed (no hostnames match configured domains)");
|
|
2655
2672
|
return {
|
|
2656
2673
|
records: [],
|
|
2657
2674
|
success: true,
|
|
@@ -2663,22 +2680,22 @@ async function orchestrateDns(appHostnames, dnsConfig, dokployEndpoint, state) {
|
|
|
2663
2680
|
for (const [rootDomain, domainHostnames] of groupedHostnames) {
|
|
2664
2681
|
const providerConfig = normalizedConfig[rootDomain];
|
|
2665
2682
|
if (!providerConfig) {
|
|
2666
|
-
logger$
|
|
2683
|
+
logger$8.log(` ⚠ No provider config for ${rootDomain}`);
|
|
2667
2684
|
continue;
|
|
2668
2685
|
}
|
|
2669
2686
|
const providerName = typeof providerConfig.provider === "string" ? providerConfig.provider : "custom";
|
|
2670
2687
|
const requiredRecords = generateRequiredRecords(domainHostnames, rootDomain, serverIp);
|
|
2671
2688
|
if (requiredRecords.length === 0) continue;
|
|
2672
|
-
logger$
|
|
2689
|
+
logger$8.log(` Creating DNS records for ${rootDomain} (${providerName})...`);
|
|
2673
2690
|
const domainRecords = await createDnsRecordsForDomain(requiredRecords, rootDomain, providerConfig);
|
|
2674
2691
|
allRecords.push(...domainRecords);
|
|
2675
2692
|
const created = domainRecords.filter((r) => r.created).length;
|
|
2676
2693
|
const existed = domainRecords.filter((r) => r.existed).length;
|
|
2677
2694
|
const failed = domainRecords.filter((r) => r.error).length;
|
|
2678
|
-
if (created > 0) logger$
|
|
2679
|
-
if (existed > 0) logger$
|
|
2695
|
+
if (created > 0) logger$8.log(` ✓ Created ${created} DNS record(s) for ${rootDomain}`);
|
|
2696
|
+
if (existed > 0) logger$8.log(` ✓ ${existed} record(s) already exist for ${rootDomain}`);
|
|
2680
2697
|
if (failed > 0) {
|
|
2681
|
-
logger$
|
|
2698
|
+
logger$8.log(` ⚠ ${failed} record(s) failed for ${rootDomain}`);
|
|
2682
2699
|
hasFailures = true;
|
|
2683
2700
|
}
|
|
2684
2701
|
if (state) {
|
|
@@ -2715,10 +2732,10 @@ async function orchestrateDns(appHostnames, dnsConfig, dokployEndpoint, state) {
|
|
|
2715
2732
|
*/
|
|
2716
2733
|
async function verifyDnsRecords(appHostnames, serverIp, state) {
|
|
2717
2734
|
const results = [];
|
|
2718
|
-
logger$
|
|
2735
|
+
logger$8.log("\n🔍 Verifying DNS records...");
|
|
2719
2736
|
for (const [appName, hostname] of appHostnames) {
|
|
2720
2737
|
if (isDnsVerified(state, hostname, serverIp)) {
|
|
2721
|
-
logger$
|
|
2738
|
+
logger$8.log(` ✓ ${hostname} (previously verified)`);
|
|
2722
2739
|
results.push({
|
|
2723
2740
|
hostname,
|
|
2724
2741
|
appName,
|
|
@@ -2732,7 +2749,7 @@ async function verifyDnsRecords(appHostnames, serverIp, state) {
|
|
|
2732
2749
|
const resolvedIp = await resolveHostnameToIp(hostname);
|
|
2733
2750
|
if (resolvedIp === serverIp) {
|
|
2734
2751
|
setDnsVerification(state, hostname, serverIp);
|
|
2735
|
-
logger$
|
|
2752
|
+
logger$8.log(` ✓ ${hostname} → ${resolvedIp}`);
|
|
2736
2753
|
results.push({
|
|
2737
2754
|
hostname,
|
|
2738
2755
|
appName,
|
|
@@ -2741,7 +2758,7 @@ async function verifyDnsRecords(appHostnames, serverIp, state) {
|
|
|
2741
2758
|
expectedIp: serverIp
|
|
2742
2759
|
});
|
|
2743
2760
|
} else {
|
|
2744
|
-
logger$
|
|
2761
|
+
logger$8.log(` ⚠ ${hostname} resolves to ${resolvedIp}, expected ${serverIp}`);
|
|
2745
2762
|
results.push({
|
|
2746
2763
|
hostname,
|
|
2747
2764
|
appName,
|
|
@@ -2752,7 +2769,7 @@ async function verifyDnsRecords(appHostnames, serverIp, state) {
|
|
|
2752
2769
|
}
|
|
2753
2770
|
} catch (error) {
|
|
2754
2771
|
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2755
|
-
logger$
|
|
2772
|
+
logger$8.log(` ⚠ ${hostname} DNS not propagated (${message})`);
|
|
2756
2773
|
results.push({
|
|
2757
2774
|
hostname,
|
|
2758
2775
|
appName,
|
|
@@ -2766,9 +2783,9 @@ async function verifyDnsRecords(appHostnames, serverIp, state) {
|
|
|
2766
2783
|
const skipped = results.filter((r) => r.skipped).length;
|
|
2767
2784
|
const pending = results.filter((r) => !r.verified).length;
|
|
2768
2785
|
if (pending > 0) {
|
|
2769
|
-
logger$
|
|
2770
|
-
logger$
|
|
2771
|
-
} else if (skipped > 0) logger$
|
|
2786
|
+
logger$8.log(`\n ${verified} verified, ${pending} pending propagation`);
|
|
2787
|
+
logger$8.log(" DNS changes may take 5-30 minutes to propagate");
|
|
2788
|
+
} else if (skipped > 0) logger$8.log(` ${verified} verified (${skipped} from cache)`);
|
|
2772
2789
|
return results;
|
|
2773
2790
|
}
|
|
2774
2791
|
|
|
@@ -3844,7 +3861,7 @@ CMD ["node", "index.mjs"]
|
|
|
3844
3861
|
|
|
3845
3862
|
//#endregion
|
|
3846
3863
|
//#region src/docker/index.ts
|
|
3847
|
-
const logger$
|
|
3864
|
+
const logger$7 = console;
|
|
3848
3865
|
/**
|
|
3849
3866
|
* Docker command implementation
|
|
3850
3867
|
* Generates Dockerfile, docker-compose.yml, and related files
|
|
@@ -3855,7 +3872,7 @@ const logger$5 = console;
|
|
|
3855
3872
|
async function dockerCommand(options) {
|
|
3856
3873
|
const loadedConfig = await loadWorkspaceConfig();
|
|
3857
3874
|
if (loadedConfig.type === "workspace") {
|
|
3858
|
-
logger$
|
|
3875
|
+
logger$7.log("📦 Detected workspace configuration");
|
|
3859
3876
|
return workspaceDockerCommand(loadedConfig.workspace, options);
|
|
3860
3877
|
}
|
|
3861
3878
|
const config$1 = await loadConfig();
|
|
@@ -3876,14 +3893,14 @@ async function dockerCommand(options) {
|
|
|
3876
3893
|
let useTurbo = options.turbo ?? false;
|
|
3877
3894
|
if (inMonorepo && !useSlim) if (hasTurbo) {
|
|
3878
3895
|
useTurbo = true;
|
|
3879
|
-
logger$
|
|
3896
|
+
logger$7.log(" Detected monorepo with turbo.json - using turbo prune");
|
|
3880
3897
|
} else throw new Error("Monorepo detected but turbo.json not found.\n\nDocker builds in monorepos require Turborepo for proper dependency isolation.\n\nTo fix this:\n 1. Install turbo: pnpm add -Dw turbo\n 2. Create turbo.json in your monorepo root\n 3. Run this command again\n\nSee: https://turbo.build/repo/docs/guides/tools/docker");
|
|
3881
3898
|
let turboPackage = options.turboPackage ?? dockerConfig.imageName;
|
|
3882
3899
|
if (useTurbo && !options.turboPackage) try {
|
|
3883
3900
|
const pkg$1 = __require(`${process.cwd()}/package.json`);
|
|
3884
3901
|
if (pkg$1.name) {
|
|
3885
3902
|
turboPackage = pkg$1.name;
|
|
3886
|
-
logger$
|
|
3903
|
+
logger$7.log(` Turbo package: ${turboPackage}`);
|
|
3887
3904
|
}
|
|
3888
3905
|
} catch {}
|
|
3889
3906
|
const templateOptions = {
|
|
@@ -3900,7 +3917,7 @@ async function dockerCommand(options) {
|
|
|
3900
3917
|
const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
|
|
3901
3918
|
const dockerfilePath = join(dockerDir, "Dockerfile");
|
|
3902
3919
|
await writeFile(dockerfilePath, dockerfile);
|
|
3903
|
-
logger$
|
|
3920
|
+
logger$7.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
|
|
3904
3921
|
const composeOptions = {
|
|
3905
3922
|
imageName: dockerConfig.imageName,
|
|
3906
3923
|
registry: options.registry ?? dockerConfig.registry,
|
|
@@ -3912,15 +3929,15 @@ async function dockerCommand(options) {
|
|
|
3912
3929
|
const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
|
|
3913
3930
|
const composePath = join(dockerDir, "docker-compose.yml");
|
|
3914
3931
|
await writeFile(composePath, dockerCompose);
|
|
3915
|
-
logger$
|
|
3932
|
+
logger$7.log("Generated: .gkm/docker/docker-compose.yml");
|
|
3916
3933
|
const dockerignore = generateDockerignore();
|
|
3917
3934
|
const dockerignorePath = join(process.cwd(), ".dockerignore");
|
|
3918
3935
|
await writeFile(dockerignorePath, dockerignore);
|
|
3919
|
-
logger$
|
|
3936
|
+
logger$7.log("Generated: .dockerignore (project root)");
|
|
3920
3937
|
const entrypoint = generateDockerEntrypoint();
|
|
3921
3938
|
const entrypointPath = join(dockerDir, "docker-entrypoint.sh");
|
|
3922
3939
|
await writeFile(entrypointPath, entrypoint);
|
|
3923
|
-
logger$
|
|
3940
|
+
logger$7.log("Generated: .gkm/docker/docker-entrypoint.sh");
|
|
3924
3941
|
const result = {
|
|
3925
3942
|
dockerfile: dockerfilePath,
|
|
3926
3943
|
dockerCompose: composePath,
|
|
@@ -3939,13 +3956,13 @@ async function dockerCommand(options) {
|
|
|
3939
3956
|
function ensureLockfile(cwd) {
|
|
3940
3957
|
const lockfilePath = findLockfilePath(cwd);
|
|
3941
3958
|
if (!lockfilePath) {
|
|
3942
|
-
logger$
|
|
3959
|
+
logger$7.warn("\n⚠️ No lockfile found. Docker build may fail or use stale dependencies.");
|
|
3943
3960
|
return null;
|
|
3944
3961
|
}
|
|
3945
3962
|
const lockfileName = basename(lockfilePath);
|
|
3946
3963
|
const localLockfile = join(cwd, lockfileName);
|
|
3947
3964
|
if (lockfilePath === localLockfile) return null;
|
|
3948
|
-
logger$
|
|
3965
|
+
logger$7.log(` Copying ${lockfileName} from monorepo root...`);
|
|
3949
3966
|
copyFileSync(lockfilePath, localLockfile);
|
|
3950
3967
|
return () => {
|
|
3951
3968
|
try {
|
|
@@ -3961,7 +3978,7 @@ async function buildDockerImage(imageName, options) {
|
|
|
3961
3978
|
const tag = options.tag ?? "latest";
|
|
3962
3979
|
const registry = options.registry;
|
|
3963
3980
|
const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
|
|
3964
|
-
logger$
|
|
3981
|
+
logger$7.log(`\n🐳 Building Docker image: ${fullImageName}`);
|
|
3965
3982
|
const cwd = process.cwd();
|
|
3966
3983
|
const cleanup = ensureLockfile(cwd);
|
|
3967
3984
|
try {
|
|
@@ -3973,7 +3990,7 @@ async function buildDockerImage(imageName, options) {
|
|
|
3973
3990
|
DOCKER_BUILDKIT: "1"
|
|
3974
3991
|
}
|
|
3975
3992
|
});
|
|
3976
|
-
logger$
|
|
3993
|
+
logger$7.log(`✅ Docker image built: ${fullImageName}`);
|
|
3977
3994
|
} catch (error) {
|
|
3978
3995
|
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
3979
3996
|
} finally {
|
|
@@ -3988,13 +4005,13 @@ async function pushDockerImage(imageName, options) {
|
|
|
3988
4005
|
const registry = options.registry;
|
|
3989
4006
|
if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
|
|
3990
4007
|
const fullImageName = `${registry}/${imageName}:${tag}`;
|
|
3991
|
-
logger$
|
|
4008
|
+
logger$7.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
|
|
3992
4009
|
try {
|
|
3993
4010
|
execSync(`docker push ${fullImageName}`, {
|
|
3994
4011
|
cwd: process.cwd(),
|
|
3995
4012
|
stdio: "inherit"
|
|
3996
4013
|
});
|
|
3997
|
-
logger$
|
|
4014
|
+
logger$7.log(`✅ Docker image pushed: ${fullImageName}`);
|
|
3998
4015
|
} catch (error) {
|
|
3999
4016
|
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
4000
4017
|
}
|
|
@@ -4020,11 +4037,11 @@ function getAppPackageName(appPath) {
|
|
|
4020
4037
|
async function workspaceDockerCommand(workspace, options) {
|
|
4021
4038
|
const results = [];
|
|
4022
4039
|
const apps = Object.entries(workspace.apps);
|
|
4023
|
-
logger$
|
|
4040
|
+
logger$7.log(`\n🐳 Generating Dockerfiles for workspace: ${workspace.name}`);
|
|
4024
4041
|
const dockerDir = join(workspace.root, ".gkm", "docker");
|
|
4025
4042
|
await mkdir(dockerDir, { recursive: true });
|
|
4026
4043
|
const packageManager = detectPackageManager$1(workspace.root);
|
|
4027
|
-
logger$
|
|
4044
|
+
logger$7.log(` Package manager: ${packageManager}`);
|
|
4028
4045
|
for (const [appName, app] of apps) {
|
|
4029
4046
|
const appPath = app.path;
|
|
4030
4047
|
const fullAppPath = join(workspace.root, appPath);
|
|
@@ -4032,7 +4049,7 @@ async function workspaceDockerCommand(workspace, options) {
|
|
|
4032
4049
|
const imageName = appName;
|
|
4033
4050
|
const hasEntry = !!app.entry;
|
|
4034
4051
|
const buildType = hasEntry ? "entry" : app.type;
|
|
4035
|
-
logger$
|
|
4052
|
+
logger$7.log(`\n 📄 Generating Dockerfile for ${appName} (${buildType})`);
|
|
4036
4053
|
let dockerfile;
|
|
4037
4054
|
if (app.type === "frontend") dockerfile = generateNextjsDockerfile({
|
|
4038
4055
|
imageName,
|
|
@@ -4063,7 +4080,7 @@ async function workspaceDockerCommand(workspace, options) {
|
|
|
4063
4080
|
});
|
|
4064
4081
|
const dockerfilePath = join(dockerDir, `Dockerfile.${appName}`);
|
|
4065
4082
|
await writeFile(dockerfilePath, dockerfile);
|
|
4066
|
-
logger$
|
|
4083
|
+
logger$7.log(` Generated: .gkm/docker/Dockerfile.${appName}`);
|
|
4067
4084
|
results.push({
|
|
4068
4085
|
appName,
|
|
4069
4086
|
type: app.type,
|
|
@@ -4074,19 +4091,19 @@ async function workspaceDockerCommand(workspace, options) {
|
|
|
4074
4091
|
const dockerignore = generateDockerignore();
|
|
4075
4092
|
const dockerignorePath = join(workspace.root, ".dockerignore");
|
|
4076
4093
|
await writeFile(dockerignorePath, dockerignore);
|
|
4077
|
-
logger$
|
|
4094
|
+
logger$7.log(`\n Generated: .dockerignore (workspace root)`);
|
|
4078
4095
|
const dockerCompose = generateWorkspaceCompose(workspace, { registry: options.registry });
|
|
4079
4096
|
const composePath = join(dockerDir, "docker-compose.yml");
|
|
4080
4097
|
await writeFile(composePath, dockerCompose);
|
|
4081
|
-
logger$
|
|
4082
|
-
logger$
|
|
4083
|
-
logger$
|
|
4098
|
+
logger$7.log(` Generated: .gkm/docker/docker-compose.yml`);
|
|
4099
|
+
logger$7.log(`\n✅ Generated ${results.length} Dockerfile(s) + docker-compose.yml`);
|
|
4100
|
+
logger$7.log("\n📋 Build commands:");
|
|
4084
4101
|
for (const result of results) {
|
|
4085
4102
|
const icon = result.type === "backend" ? "⚙️" : "🌐";
|
|
4086
|
-
logger$
|
|
4103
|
+
logger$7.log(` ${icon} docker build -f .gkm/docker/Dockerfile.${result.appName} -t ${result.imageName} .`);
|
|
4087
4104
|
}
|
|
4088
|
-
logger$
|
|
4089
|
-
logger$
|
|
4105
|
+
logger$7.log("\n📋 Run all services:");
|
|
4106
|
+
logger$7.log(" docker compose -f .gkm/docker/docker-compose.yml up --build");
|
|
4090
4107
|
return {
|
|
4091
4108
|
apps: results,
|
|
4092
4109
|
dockerCompose: composePath,
|
|
@@ -4126,7 +4143,7 @@ function getAppNameFromPackageJson() {
|
|
|
4126
4143
|
} catch {}
|
|
4127
4144
|
return void 0;
|
|
4128
4145
|
}
|
|
4129
|
-
const logger$
|
|
4146
|
+
const logger$6 = console;
|
|
4130
4147
|
/**
|
|
4131
4148
|
* Get the full image reference
|
|
4132
4149
|
*/
|
|
@@ -4141,18 +4158,18 @@ function getImageRef(registry, imageName, tag) {
|
|
|
4141
4158
|
* @param buildArgs - Build arguments to pass to docker build
|
|
4142
4159
|
*/
|
|
4143
4160
|
async function buildImage(imageRef, appName, buildArgs) {
|
|
4144
|
-
logger$
|
|
4161
|
+
logger$6.log(`\n🔨 Building Docker image: ${imageRef}`);
|
|
4145
4162
|
const cwd = process.cwd();
|
|
4146
4163
|
const lockfilePath = findLockfilePath(cwd);
|
|
4147
4164
|
const lockfileDir = lockfilePath ? dirname(lockfilePath) : cwd;
|
|
4148
4165
|
const inMonorepo = lockfileDir !== cwd;
|
|
4149
|
-
if (appName || inMonorepo) logger$
|
|
4150
|
-
else logger$
|
|
4166
|
+
if (appName || inMonorepo) logger$6.log(" Generating Dockerfile for monorepo (turbo prune)...");
|
|
4167
|
+
else logger$6.log(" Generating Dockerfile...");
|
|
4151
4168
|
await dockerCommand({});
|
|
4152
4169
|
const dockerfileSuffix = appName ? `.${appName}` : "";
|
|
4153
4170
|
const dockerfilePath = `.gkm/docker/Dockerfile${dockerfileSuffix}`;
|
|
4154
4171
|
const buildCwd = lockfilePath && (inMonorepo || appName) ? lockfileDir : cwd;
|
|
4155
|
-
if (buildCwd !== cwd) logger$
|
|
4172
|
+
if (buildCwd !== cwd) logger$6.log(` Building from workspace root: ${buildCwd}`);
|
|
4156
4173
|
const buildArgsString = buildArgs && buildArgs.length > 0 ? buildArgs.map((arg) => `--build-arg "${arg}"`).join(" ") : "";
|
|
4157
4174
|
try {
|
|
4158
4175
|
const cmd = [
|
|
@@ -4171,7 +4188,7 @@ async function buildImage(imageRef, appName, buildArgs) {
|
|
|
4171
4188
|
DOCKER_BUILDKIT: "1"
|
|
4172
4189
|
}
|
|
4173
4190
|
});
|
|
4174
|
-
logger$
|
|
4191
|
+
logger$6.log(`✅ Image built: ${imageRef}`);
|
|
4175
4192
|
} catch (error) {
|
|
4176
4193
|
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
4177
4194
|
}
|
|
@@ -4180,13 +4197,13 @@ async function buildImage(imageRef, appName, buildArgs) {
|
|
|
4180
4197
|
* Push Docker image to registry
|
|
4181
4198
|
*/
|
|
4182
4199
|
async function pushImage(imageRef) {
|
|
4183
|
-
logger$
|
|
4200
|
+
logger$6.log(`\n☁️ Pushing image: ${imageRef}`);
|
|
4184
4201
|
try {
|
|
4185
4202
|
execSync(`docker push ${imageRef}`, {
|
|
4186
4203
|
cwd: process.cwd(),
|
|
4187
4204
|
stdio: "inherit"
|
|
4188
4205
|
});
|
|
4189
|
-
logger$
|
|
4206
|
+
logger$6.log(`✅ Image pushed: ${imageRef}`);
|
|
4190
4207
|
} catch (error) {
|
|
4191
4208
|
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
4192
4209
|
}
|
|
@@ -4199,17 +4216,17 @@ async function deployDocker(options) {
|
|
|
4199
4216
|
const imageName = config$1.imageName;
|
|
4200
4217
|
const imageRef = getImageRef(config$1.registry, imageName, tag);
|
|
4201
4218
|
await buildImage(imageRef, config$1.appName, buildArgs);
|
|
4202
|
-
if (!skipPush) if (!config$1.registry) logger$
|
|
4219
|
+
if (!skipPush) if (!config$1.registry) logger$6.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
|
|
4203
4220
|
else await pushImage(imageRef);
|
|
4204
|
-
logger$
|
|
4205
|
-
logger$
|
|
4206
|
-
logger$
|
|
4207
|
-
logger$
|
|
4221
|
+
logger$6.log("\n✅ Docker deployment ready!");
|
|
4222
|
+
logger$6.log(`\n📋 Deployment details:`);
|
|
4223
|
+
logger$6.log(` Image: ${imageRef}`);
|
|
4224
|
+
logger$6.log(` Stage: ${stage}`);
|
|
4208
4225
|
if (masterKey) {
|
|
4209
|
-
logger$
|
|
4210
|
-
logger$
|
|
4211
|
-
logger$
|
|
4212
|
-
logger$
|
|
4226
|
+
logger$6.log(`\n🔐 Deploy with this environment variable:`);
|
|
4227
|
+
logger$6.log(` GKM_MASTER_KEY=${masterKey}`);
|
|
4228
|
+
logger$6.log("\n Example docker run:");
|
|
4229
|
+
logger$6.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
|
|
4213
4230
|
}
|
|
4214
4231
|
return {
|
|
4215
4232
|
imageRef,
|
|
@@ -4236,7 +4253,7 @@ function resolveDockerConfig(config$1) {
|
|
|
4236
4253
|
|
|
4237
4254
|
//#endregion
|
|
4238
4255
|
//#region src/deploy/dokploy.ts
|
|
4239
|
-
const logger$
|
|
4256
|
+
const logger$5 = console;
|
|
4240
4257
|
/**
|
|
4241
4258
|
* Get the Dokploy API token from stored credentials or environment
|
|
4242
4259
|
*/
|
|
@@ -4260,25 +4277,25 @@ async function createApi$1(endpoint) {
|
|
|
4260
4277
|
*/
|
|
4261
4278
|
async function deployDokploy(options) {
|
|
4262
4279
|
const { stage, imageRef, masterKey, config: config$1 } = options;
|
|
4263
|
-
logger$
|
|
4264
|
-
logger$
|
|
4265
|
-
logger$
|
|
4280
|
+
logger$5.log(`\n🎯 Deploying to Dokploy...`);
|
|
4281
|
+
logger$5.log(` Endpoint: ${config$1.endpoint}`);
|
|
4282
|
+
logger$5.log(` Application: ${config$1.applicationId}`);
|
|
4266
4283
|
const api = await createApi$1(config$1.endpoint);
|
|
4267
|
-
logger$
|
|
4284
|
+
logger$5.log(` Configuring Docker image: ${imageRef}`);
|
|
4268
4285
|
const registryOptions = {};
|
|
4269
4286
|
if (config$1.registryId) {
|
|
4270
4287
|
registryOptions.registryId = config$1.registryId;
|
|
4271
|
-
logger$
|
|
4288
|
+
logger$5.log(` Using Dokploy registry: ${config$1.registryId}`);
|
|
4272
4289
|
} else {
|
|
4273
4290
|
const storedRegistryId = await getDokployRegistryId();
|
|
4274
4291
|
if (storedRegistryId) {
|
|
4275
4292
|
registryOptions.registryId = storedRegistryId;
|
|
4276
|
-
logger$
|
|
4293
|
+
logger$5.log(` Using stored Dokploy registry: ${storedRegistryId}`);
|
|
4277
4294
|
} else if (config$1.registryCredentials) {
|
|
4278
4295
|
registryOptions.username = config$1.registryCredentials.username;
|
|
4279
4296
|
registryOptions.password = config$1.registryCredentials.password;
|
|
4280
4297
|
registryOptions.registryUrl = config$1.registryCredentials.registryUrl;
|
|
4281
|
-
logger$
|
|
4298
|
+
logger$5.log(` Using registry credentials for: ${config$1.registryCredentials.registryUrl}`);
|
|
4282
4299
|
} else {
|
|
4283
4300
|
const username = process.env.DOCKER_REGISTRY_USERNAME;
|
|
4284
4301
|
const password = process.env.DOCKER_REGISTRY_PASSWORD;
|
|
@@ -4287,31 +4304,31 @@ async function deployDokploy(options) {
|
|
|
4287
4304
|
registryOptions.username = username;
|
|
4288
4305
|
registryOptions.password = password;
|
|
4289
4306
|
registryOptions.registryUrl = registryUrl;
|
|
4290
|
-
logger$
|
|
4307
|
+
logger$5.log(` Using registry credentials from environment`);
|
|
4291
4308
|
}
|
|
4292
4309
|
}
|
|
4293
4310
|
}
|
|
4294
4311
|
await api.saveDockerProvider(config$1.applicationId, imageRef, registryOptions);
|
|
4295
|
-
logger$
|
|
4312
|
+
logger$5.log(" ✓ Docker provider configured");
|
|
4296
4313
|
const envVars = {};
|
|
4297
4314
|
if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
|
|
4298
4315
|
if (Object.keys(envVars).length > 0) {
|
|
4299
|
-
logger$
|
|
4316
|
+
logger$5.log(" Updating environment variables...");
|
|
4300
4317
|
const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
|
|
4301
4318
|
await api.saveApplicationEnv(config$1.applicationId, envString);
|
|
4302
|
-
logger$
|
|
4319
|
+
logger$5.log(" ✓ Environment variables updated");
|
|
4303
4320
|
}
|
|
4304
|
-
logger$
|
|
4321
|
+
logger$5.log(" Triggering deployment...");
|
|
4305
4322
|
await api.deployApplication(config$1.applicationId);
|
|
4306
|
-
logger$
|
|
4307
|
-
logger$
|
|
4308
|
-
logger$
|
|
4309
|
-
logger$
|
|
4310
|
-
logger$
|
|
4311
|
-
logger$
|
|
4312
|
-
if (masterKey) logger$
|
|
4323
|
+
logger$5.log(" ✓ Deployment triggered");
|
|
4324
|
+
logger$5.log("\n✅ Dokploy deployment initiated!");
|
|
4325
|
+
logger$5.log(`\n📋 Deployment details:`);
|
|
4326
|
+
logger$5.log(` Image: ${imageRef}`);
|
|
4327
|
+
logger$5.log(` Stage: ${stage}`);
|
|
4328
|
+
logger$5.log(` Application ID: ${config$1.applicationId}`);
|
|
4329
|
+
if (masterKey) logger$5.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
|
|
4313
4330
|
const deploymentUrl = `${config$1.endpoint}/project/${config$1.projectId}`;
|
|
4314
|
-
logger$
|
|
4331
|
+
logger$5.log(`\n🔗 View deployment: ${deploymentUrl}`);
|
|
4315
4332
|
return {
|
|
4316
4333
|
imageRef,
|
|
4317
4334
|
masterKey,
|
|
@@ -4477,7 +4494,7 @@ function validateEnvVars(requiredVars, context) {
|
|
|
4477
4494
|
|
|
4478
4495
|
//#endregion
|
|
4479
4496
|
//#region src/deploy/init.ts
|
|
4480
|
-
const logger$
|
|
4497
|
+
const logger$4 = console;
|
|
4481
4498
|
/**
|
|
4482
4499
|
* Get the Dokploy API token from stored credentials or environment
|
|
4483
4500
|
*/
|
|
@@ -4511,20 +4528,20 @@ async function createApi(endpoint) {
|
|
|
4511
4528
|
async function updateConfig(config$1, cwd = process.cwd()) {
|
|
4512
4529
|
const configPath = join(cwd, "gkm.config.ts");
|
|
4513
4530
|
if (!existsSync(configPath)) {
|
|
4514
|
-
logger$
|
|
4515
|
-
logger$
|
|
4516
|
-
logger$
|
|
4517
|
-
logger$
|
|
4518
|
-
logger$
|
|
4519
|
-
logger$
|
|
4520
|
-
logger$
|
|
4521
|
-
logger$
|
|
4531
|
+
logger$4.warn("\n gkm.config.ts not found. Add this configuration manually:\n");
|
|
4532
|
+
logger$4.log(` providers: {`);
|
|
4533
|
+
logger$4.log(` dokploy: {`);
|
|
4534
|
+
logger$4.log(` endpoint: '${config$1.endpoint}',`);
|
|
4535
|
+
logger$4.log(` projectId: '${config$1.projectId}',`);
|
|
4536
|
+
logger$4.log(` applicationId: '${config$1.applicationId}',`);
|
|
4537
|
+
logger$4.log(` },`);
|
|
4538
|
+
logger$4.log(` },`);
|
|
4522
4539
|
return;
|
|
4523
4540
|
}
|
|
4524
4541
|
const content = await readFile(configPath, "utf-8");
|
|
4525
4542
|
if (content.includes("dokploy:") && content.includes("applicationId:")) {
|
|
4526
|
-
logger$
|
|
4527
|
-
logger$
|
|
4543
|
+
logger$4.log("\n Dokploy config already exists in gkm.config.ts");
|
|
4544
|
+
logger$4.log(" Updating with new values...");
|
|
4528
4545
|
}
|
|
4529
4546
|
const registryLine = config$1.registryId ? `\n\t\t\tregistryId: '${config$1.registryId}',` : "";
|
|
4530
4547
|
const dokployConfigStr = `dokploy: {
|
|
@@ -4541,7 +4558,7 @@ async function updateConfig(config$1, cwd = process.cwd()) {
|
|
|
4541
4558
|
},
|
|
4542
4559
|
});`);
|
|
4543
4560
|
await writeFile(configPath, newContent);
|
|
4544
|
-
logger$
|
|
4561
|
+
logger$4.log("\n ✓ Updated gkm.config.ts with Dokploy configuration");
|
|
4545
4562
|
}
|
|
4546
4563
|
/**
|
|
4547
4564
|
* Initialize Dokploy deployment configuration
|
|
@@ -4550,24 +4567,24 @@ async function deployInitCommand(options) {
|
|
|
4550
4567
|
const { projectName, appName, projectId: existingProjectId, registryId } = options;
|
|
4551
4568
|
const endpoint = await getEndpoint(options.endpoint);
|
|
4552
4569
|
const api = await createApi(endpoint);
|
|
4553
|
-
logger$
|
|
4554
|
-
logger$
|
|
4570
|
+
logger$4.log(`\n🚀 Initializing Dokploy deployment...`);
|
|
4571
|
+
logger$4.log(` Endpoint: ${endpoint}`);
|
|
4555
4572
|
let projectId;
|
|
4556
4573
|
if (existingProjectId) {
|
|
4557
4574
|
projectId = existingProjectId;
|
|
4558
|
-
logger$
|
|
4575
|
+
logger$4.log(`\n📁 Using existing project: ${projectId}`);
|
|
4559
4576
|
} else {
|
|
4560
|
-
logger$
|
|
4577
|
+
logger$4.log(`\n📁 Looking for project: ${projectName}`);
|
|
4561
4578
|
const projects = await api.listProjects();
|
|
4562
4579
|
const existingProject = projects.find((p) => p.name.toLowerCase() === projectName.toLowerCase());
|
|
4563
4580
|
if (existingProject) {
|
|
4564
4581
|
projectId = existingProject.projectId;
|
|
4565
|
-
logger$
|
|
4582
|
+
logger$4.log(` Found existing project: ${projectId}`);
|
|
4566
4583
|
} else {
|
|
4567
|
-
logger$
|
|
4584
|
+
logger$4.log(` Creating new project...`);
|
|
4568
4585
|
const result = await api.createProject(projectName);
|
|
4569
4586
|
projectId = result.project.projectId;
|
|
4570
|
-
logger$
|
|
4587
|
+
logger$4.log(` ✓ Created project: ${projectId}`);
|
|
4571
4588
|
}
|
|
4572
4589
|
}
|
|
4573
4590
|
const project = await api.getProject(projectId);
|
|
@@ -4575,23 +4592,23 @@ async function deployInitCommand(options) {
|
|
|
4575
4592
|
const firstEnv = project.environments?.[0];
|
|
4576
4593
|
if (firstEnv) environmentId = firstEnv.environmentId;
|
|
4577
4594
|
else {
|
|
4578
|
-
logger$
|
|
4595
|
+
logger$4.log(` Creating production environment...`);
|
|
4579
4596
|
const env = await api.createEnvironment(projectId, "production");
|
|
4580
4597
|
environmentId = env.environmentId;
|
|
4581
4598
|
}
|
|
4582
|
-
logger$
|
|
4599
|
+
logger$4.log(`\n📦 Creating application: ${appName}`);
|
|
4583
4600
|
const application = await api.createApplication(appName, projectId, environmentId);
|
|
4584
|
-
logger$
|
|
4601
|
+
logger$4.log(` ✓ Created application: ${application.applicationId}`);
|
|
4585
4602
|
if (registryId) {
|
|
4586
|
-
logger$
|
|
4603
|
+
logger$4.log(`\n🔧 Configuring registry: ${registryId}`);
|
|
4587
4604
|
await api.updateApplication(application.applicationId, { registryId });
|
|
4588
|
-
logger$
|
|
4605
|
+
logger$4.log(` ✓ Registry configured`);
|
|
4589
4606
|
} else try {
|
|
4590
4607
|
const registries = await api.listRegistries();
|
|
4591
4608
|
if (registries.length > 0) {
|
|
4592
|
-
logger$
|
|
4593
|
-
for (const reg of registries) logger$
|
|
4594
|
-
logger$
|
|
4609
|
+
logger$4.log(`\n📋 Available registries:`);
|
|
4610
|
+
for (const reg of registries) logger$4.log(` - ${reg.registryName}: ${reg.registryUrl} (${reg.registryId})`);
|
|
4611
|
+
logger$4.log(`\n To use a registry, run with --registry-id <id>`);
|
|
4595
4612
|
}
|
|
4596
4613
|
} catch {}
|
|
4597
4614
|
const config$1 = {
|
|
@@ -4600,14 +4617,14 @@ async function deployInitCommand(options) {
|
|
|
4600
4617
|
applicationId: application.applicationId
|
|
4601
4618
|
};
|
|
4602
4619
|
await updateConfig(config$1);
|
|
4603
|
-
logger$
|
|
4604
|
-
logger$
|
|
4605
|
-
logger$
|
|
4606
|
-
logger$
|
|
4607
|
-
logger$
|
|
4608
|
-
logger$
|
|
4609
|
-
logger$
|
|
4610
|
-
logger$
|
|
4620
|
+
logger$4.log(`\n✅ Dokploy deployment initialized!`);
|
|
4621
|
+
logger$4.log(`\n📋 Configuration:`);
|
|
4622
|
+
logger$4.log(` Project ID: ${projectId}`);
|
|
4623
|
+
logger$4.log(` Application ID: ${application.applicationId}`);
|
|
4624
|
+
logger$4.log(`\n🔗 View in Dokploy: ${endpoint}/project/${projectId}`);
|
|
4625
|
+
logger$4.log(`\n📝 Next steps:`);
|
|
4626
|
+
logger$4.log(` 1. Initialize secrets: gkm secrets:init --stage production`);
|
|
4627
|
+
logger$4.log(` 2. Deploy: gkm deploy --provider dokploy --stage production`);
|
|
4611
4628
|
return config$1;
|
|
4612
4629
|
}
|
|
4613
4630
|
/**
|
|
@@ -4618,32 +4635,32 @@ async function deployListCommand(options) {
|
|
|
4618
4635
|
const api = await createApi(endpoint);
|
|
4619
4636
|
const { resource } = options;
|
|
4620
4637
|
if (resource === "projects") {
|
|
4621
|
-
logger$
|
|
4638
|
+
logger$4.log(`\n📁 Projects in ${endpoint}:`);
|
|
4622
4639
|
const projects = await api.listProjects();
|
|
4623
4640
|
if (projects.length === 0) {
|
|
4624
|
-
logger$
|
|
4641
|
+
logger$4.log(" No projects found");
|
|
4625
4642
|
return;
|
|
4626
4643
|
}
|
|
4627
4644
|
for (const project of projects) {
|
|
4628
|
-
logger$
|
|
4629
|
-
if (project.description) logger$
|
|
4645
|
+
logger$4.log(`\n ${project.name} (${project.projectId})`);
|
|
4646
|
+
if (project.description) logger$4.log(` ${project.description}`);
|
|
4630
4647
|
}
|
|
4631
4648
|
} else if (resource === "registries") {
|
|
4632
|
-
logger$
|
|
4649
|
+
logger$4.log(`\n🐳 Registries in ${endpoint}:`);
|
|
4633
4650
|
const registries = await api.listRegistries();
|
|
4634
4651
|
if (registries.length === 0) {
|
|
4635
|
-
logger$
|
|
4636
|
-
logger$
|
|
4652
|
+
logger$4.log(" No registries configured");
|
|
4653
|
+
logger$4.log(" Run \"gkm registry:setup\" to configure a registry");
|
|
4637
4654
|
return;
|
|
4638
4655
|
}
|
|
4639
4656
|
const storedRegistryId = await getDokployRegistryId();
|
|
4640
4657
|
for (const registry of registries) {
|
|
4641
4658
|
const isDefault = registry.registryId === storedRegistryId;
|
|
4642
4659
|
const marker = isDefault ? " (default)" : "";
|
|
4643
|
-
logger$
|
|
4644
|
-
logger$
|
|
4645
|
-
logger$
|
|
4646
|
-
if (registry.imagePrefix) logger$
|
|
4660
|
+
logger$4.log(`\n ${registry.registryName}${marker} (${registry.registryId})`);
|
|
4661
|
+
logger$4.log(` URL: ${registry.registryUrl}`);
|
|
4662
|
+
logger$4.log(` Username: ${registry.username}`);
|
|
4663
|
+
if (registry.imagePrefix) logger$4.log(` Prefix: ${registry.imagePrefix}`);
|
|
4647
4664
|
}
|
|
4648
4665
|
}
|
|
4649
4666
|
}
|
|
@@ -4666,19 +4683,19 @@ function isStateProvider(value) {
|
|
|
4666
4683
|
async function createStateProvider(options) {
|
|
4667
4684
|
const { config: config$1, workspaceRoot, workspaceName } = options;
|
|
4668
4685
|
if (!config$1) {
|
|
4669
|
-
const { LocalStateProvider } = await import("./LocalStateProvider-
|
|
4686
|
+
const { LocalStateProvider } = await import("./LocalStateProvider-Dp0KkRcw.mjs");
|
|
4670
4687
|
return new LocalStateProvider(workspaceRoot);
|
|
4671
4688
|
}
|
|
4672
4689
|
if (isStateProvider(config$1.provider)) return config$1.provider;
|
|
4673
4690
|
const provider = config$1.provider;
|
|
4674
4691
|
if (provider === "local") {
|
|
4675
|
-
const { LocalStateProvider } = await import("./LocalStateProvider-
|
|
4692
|
+
const { LocalStateProvider } = await import("./LocalStateProvider-Dp0KkRcw.mjs");
|
|
4676
4693
|
return new LocalStateProvider(workspaceRoot);
|
|
4677
4694
|
}
|
|
4678
4695
|
if (provider === "ssm") {
|
|
4679
4696
|
if (!workspaceName) throw new Error("Workspace name is required for SSM state provider. Set \"name\" in gkm.config.ts.");
|
|
4680
|
-
const { LocalStateProvider } = await import("./LocalStateProvider-
|
|
4681
|
-
const { SSMStateProvider } = await import("./SSMStateProvider-
|
|
4697
|
+
const { LocalStateProvider } = await import("./LocalStateProvider-Dp0KkRcw.mjs");
|
|
4698
|
+
const { SSMStateProvider } = await import("./SSMStateProvider-CksOTB8M.mjs");
|
|
4682
4699
|
const { CachedStateProvider: CachedStateProvider$1 } = await import("./CachedStateProvider-CI61keQ1.mjs");
|
|
4683
4700
|
const ssmConfig = config$1;
|
|
4684
4701
|
const local = new LocalStateProvider(workspaceRoot);
|
|
@@ -5111,7 +5128,7 @@ async function sniffAllApps(apps, workspacePath, options = {}) {
|
|
|
5111
5128
|
|
|
5112
5129
|
//#endregion
|
|
5113
5130
|
//#region src/deploy/index.ts
|
|
5114
|
-
const logger$
|
|
5131
|
+
const logger$3 = console;
|
|
5115
5132
|
/**
|
|
5116
5133
|
* Prompt for input
|
|
5117
5134
|
*/
|
|
@@ -5183,7 +5200,7 @@ async function waitForPostgres(host, port, user, password, database, maxRetries
|
|
|
5183
5200
|
return;
|
|
5184
5201
|
} catch {
|
|
5185
5202
|
if (i < maxRetries - 1) {
|
|
5186
|
-
logger$
|
|
5203
|
+
logger$3.log(` Waiting for Postgres... (${i + 1}/${maxRetries})`);
|
|
5187
5204
|
await new Promise((r) => setTimeout(r, retryIntervalMs));
|
|
5188
5205
|
}
|
|
5189
5206
|
}
|
|
@@ -5218,12 +5235,12 @@ async function waitForPostgres(host, port, user, password, database, maxRetries
|
|
|
5218
5235
|
* ```
|
|
5219
5236
|
*/
|
|
5220
5237
|
async function initializePostgresUsers(api, postgres, serverHostname, users) {
|
|
5221
|
-
logger$
|
|
5238
|
+
logger$3.log("\n🔧 Initializing database users...");
|
|
5222
5239
|
const externalPort = 5432;
|
|
5223
|
-
logger$
|
|
5240
|
+
logger$3.log(` Enabling external port ${externalPort}...`);
|
|
5224
5241
|
await api.savePostgresExternalPort(postgres.postgresId, externalPort);
|
|
5225
5242
|
await api.deployPostgres(postgres.postgresId);
|
|
5226
|
-
logger$
|
|
5243
|
+
logger$3.log(` Waiting for Postgres to be accessible at ${serverHostname}:${externalPort}...`);
|
|
5227
5244
|
await waitForPostgres(serverHostname, externalPort, postgres.databaseUser, postgres.databasePassword, postgres.databaseName);
|
|
5228
5245
|
const client = new Client({
|
|
5229
5246
|
host: serverHostname,
|
|
@@ -5236,7 +5253,7 @@ async function initializePostgresUsers(api, postgres, serverHostname, users) {
|
|
|
5236
5253
|
await client.connect();
|
|
5237
5254
|
for (const user of users) {
|
|
5238
5255
|
const schemaName = user.usePublicSchema ? "public" : user.name;
|
|
5239
|
-
logger$
|
|
5256
|
+
logger$3.log(` Creating user "${user.name}" with schema "${schemaName}"...`);
|
|
5240
5257
|
if (user.usePublicSchema) {
|
|
5241
5258
|
await client.query(`
|
|
5242
5259
|
DO $$ BEGIN
|
|
@@ -5271,15 +5288,15 @@ async function initializePostgresUsers(api, postgres, serverHostname, users) {
|
|
|
5271
5288
|
ALTER DEFAULT PRIVILEGES IN SCHEMA "${schemaName}" GRANT ALL ON TABLES TO "${user.name}";
|
|
5272
5289
|
`);
|
|
5273
5290
|
}
|
|
5274
|
-
logger$
|
|
5291
|
+
logger$3.log(` ✓ User "${user.name}" configured`);
|
|
5275
5292
|
}
|
|
5276
5293
|
} finally {
|
|
5277
5294
|
await client.end();
|
|
5278
5295
|
}
|
|
5279
|
-
logger$
|
|
5296
|
+
logger$3.log(" Disabling external port...");
|
|
5280
5297
|
await api.savePostgresExternalPort(postgres.postgresId, null);
|
|
5281
5298
|
await api.deployPostgres(postgres.postgresId);
|
|
5282
|
-
logger$
|
|
5299
|
+
logger$3.log(" ✓ Database users initialized");
|
|
5283
5300
|
}
|
|
5284
5301
|
/**
|
|
5285
5302
|
* Get the server hostname from the Dokploy endpoint URL
|
|
@@ -5293,24 +5310,24 @@ function getServerHostname(endpoint) {
|
|
|
5293
5310
|
* @internal Exported for testing
|
|
5294
5311
|
*/
|
|
5295
5312
|
async function provisionServices(api, projectId, environmentId, projectName, services, existingServiceIds) {
|
|
5296
|
-
logger$
|
|
5313
|
+
logger$3.log(`\n🔍 provisionServices called: services=${JSON.stringify(services)}, envId=${environmentId}`);
|
|
5297
5314
|
if (!services || !environmentId) {
|
|
5298
|
-
logger$
|
|
5315
|
+
logger$3.log(" Skipping: no services or no environmentId");
|
|
5299
5316
|
return void 0;
|
|
5300
5317
|
}
|
|
5301
5318
|
const serviceUrls = {};
|
|
5302
5319
|
const serviceIds = {};
|
|
5303
5320
|
if (services.postgres) {
|
|
5304
|
-
logger$
|
|
5321
|
+
logger$3.log("\n🐘 Checking PostgreSQL...");
|
|
5305
5322
|
const postgresName = "db";
|
|
5306
5323
|
try {
|
|
5307
5324
|
let postgres = null;
|
|
5308
5325
|
let created = false;
|
|
5309
5326
|
if (existingServiceIds?.postgresId) {
|
|
5310
|
-
logger$
|
|
5327
|
+
logger$3.log(` Using cached ID: ${existingServiceIds.postgresId}`);
|
|
5311
5328
|
postgres = await api.getPostgres(existingServiceIds.postgresId);
|
|
5312
|
-
if (postgres) logger$
|
|
5313
|
-
else logger$
|
|
5329
|
+
if (postgres) logger$3.log(` ✓ PostgreSQL found: ${postgres.postgresId}`);
|
|
5330
|
+
else logger$3.log(` ⚠ Cached ID invalid, will create new`);
|
|
5314
5331
|
}
|
|
5315
5332
|
if (!postgres) {
|
|
5316
5333
|
const databasePassword = randomBytes(16).toString("hex");
|
|
@@ -5322,10 +5339,10 @@ async function provisionServices(api, projectId, environmentId, projectName, ser
|
|
|
5322
5339
|
postgres = result.postgres;
|
|
5323
5340
|
created = result.created;
|
|
5324
5341
|
if (created) {
|
|
5325
|
-
logger$
|
|
5342
|
+
logger$3.log(` ✓ Created PostgreSQL: ${postgres.postgresId}`);
|
|
5326
5343
|
await api.deployPostgres(postgres.postgresId);
|
|
5327
|
-
logger$
|
|
5328
|
-
} else logger$
|
|
5344
|
+
logger$3.log(" ✓ PostgreSQL deployed");
|
|
5345
|
+
} else logger$3.log(` ✓ PostgreSQL already exists: ${postgres.postgresId}`);
|
|
5329
5346
|
}
|
|
5330
5347
|
serviceIds.postgresId = postgres.postgresId;
|
|
5331
5348
|
serviceUrls.DATABASE_HOST = postgres.appName;
|
|
@@ -5334,23 +5351,23 @@ async function provisionServices(api, projectId, environmentId, projectName, ser
|
|
|
5334
5351
|
serviceUrls.DATABASE_USER = postgres.databaseUser;
|
|
5335
5352
|
serviceUrls.DATABASE_PASSWORD = postgres.databasePassword;
|
|
5336
5353
|
serviceUrls.DATABASE_URL = `postgresql://${postgres.databaseUser}:${postgres.databasePassword}@${postgres.appName}:5432/${postgres.databaseName}`;
|
|
5337
|
-
logger$
|
|
5354
|
+
logger$3.log(` ✓ Database credentials configured`);
|
|
5338
5355
|
} catch (error) {
|
|
5339
5356
|
const message = error instanceof Error ? error.message : "Unknown error";
|
|
5340
|
-
logger$
|
|
5357
|
+
logger$3.log(` ⚠ Failed to provision PostgreSQL: ${message}`);
|
|
5341
5358
|
}
|
|
5342
5359
|
}
|
|
5343
5360
|
if (services.redis) {
|
|
5344
|
-
logger$
|
|
5361
|
+
logger$3.log("\n🔴 Checking Redis...");
|
|
5345
5362
|
const redisName = "cache";
|
|
5346
5363
|
try {
|
|
5347
5364
|
let redis = null;
|
|
5348
5365
|
let created = false;
|
|
5349
5366
|
if (existingServiceIds?.redisId) {
|
|
5350
|
-
logger$
|
|
5367
|
+
logger$3.log(` Using cached ID: ${existingServiceIds.redisId}`);
|
|
5351
5368
|
redis = await api.getRedis(existingServiceIds.redisId);
|
|
5352
|
-
if (redis) logger$
|
|
5353
|
-
else logger$
|
|
5369
|
+
if (redis) logger$3.log(` ✓ Redis found: ${redis.redisId}`);
|
|
5370
|
+
else logger$3.log(` ⚠ Cached ID invalid, will create new`);
|
|
5354
5371
|
}
|
|
5355
5372
|
if (!redis) {
|
|
5356
5373
|
const { randomBytes: randomBytes$1 } = await import("node:crypto");
|
|
@@ -5359,10 +5376,10 @@ async function provisionServices(api, projectId, environmentId, projectName, ser
|
|
|
5359
5376
|
redis = result.redis;
|
|
5360
5377
|
created = result.created;
|
|
5361
5378
|
if (created) {
|
|
5362
|
-
logger$
|
|
5379
|
+
logger$3.log(` ✓ Created Redis: ${redis.redisId}`);
|
|
5363
5380
|
await api.deployRedis(redis.redisId);
|
|
5364
|
-
logger$
|
|
5365
|
-
} else logger$
|
|
5381
|
+
logger$3.log(" ✓ Redis deployed");
|
|
5382
|
+
} else logger$3.log(` ✓ Redis already exists: ${redis.redisId}`);
|
|
5366
5383
|
}
|
|
5367
5384
|
serviceIds.redisId = redis.redisId;
|
|
5368
5385
|
serviceUrls.REDIS_HOST = redis.appName;
|
|
@@ -5370,10 +5387,10 @@ async function provisionServices(api, projectId, environmentId, projectName, ser
|
|
|
5370
5387
|
if (redis.databasePassword) serviceUrls.REDIS_PASSWORD = redis.databasePassword;
|
|
5371
5388
|
const password = redis.databasePassword ? `:${redis.databasePassword}@` : "";
|
|
5372
5389
|
serviceUrls.REDIS_URL = `redis://${password}${redis.appName}:6379`;
|
|
5373
|
-
logger$
|
|
5390
|
+
logger$3.log(` ✓ Redis credentials configured`);
|
|
5374
5391
|
} catch (error) {
|
|
5375
5392
|
const message = error instanceof Error ? error.message : "Unknown error";
|
|
5376
|
-
logger$
|
|
5393
|
+
logger$3.log(` ⚠ Failed to provision Redis: ${message}`);
|
|
5377
5394
|
}
|
|
5378
5395
|
}
|
|
5379
5396
|
return Object.keys(serviceUrls).length > 0 ? {
|
|
@@ -5385,10 +5402,10 @@ async function provisionServices(api, projectId, environmentId, projectName, ser
|
|
|
5385
5402
|
* Ensure Dokploy is fully configured, recovering/creating resources as needed
|
|
5386
5403
|
*/
|
|
5387
5404
|
async function ensureDokploySetup(config$1, dockerConfig, stage, services) {
|
|
5388
|
-
logger$
|
|
5405
|
+
logger$3.log("\n🔧 Checking Dokploy setup...");
|
|
5389
5406
|
let creds = await getDokployCredentials();
|
|
5390
5407
|
if (!creds) {
|
|
5391
|
-
logger$
|
|
5408
|
+
logger$3.log("\n📋 Dokploy credentials not found. Let's set them up.");
|
|
5392
5409
|
const endpoint = await prompt("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
5393
5410
|
const normalizedEndpoint = endpoint.replace(/\/$/, "");
|
|
5394
5411
|
try {
|
|
@@ -5396,9 +5413,9 @@ async function ensureDokploySetup(config$1, dockerConfig, stage, services) {
|
|
|
5396
5413
|
} catch {
|
|
5397
5414
|
throw new Error("Invalid URL format");
|
|
5398
5415
|
}
|
|
5399
|
-
logger$
|
|
5416
|
+
logger$3.log(`\nGenerate a token at: ${normalizedEndpoint}/settings/profile\n`);
|
|
5400
5417
|
const token = await prompt("API Token: ", true);
|
|
5401
|
-
logger$
|
|
5418
|
+
logger$3.log("\nValidating credentials...");
|
|
5402
5419
|
const isValid = await validateDokployToken(normalizedEndpoint, token);
|
|
5403
5420
|
if (!isValid) throw new Error("Invalid credentials. Please check your token.");
|
|
5404
5421
|
await storeDokployCredentials(token, normalizedEndpoint);
|
|
@@ -5406,7 +5423,7 @@ async function ensureDokploySetup(config$1, dockerConfig, stage, services) {
|
|
|
5406
5423
|
token,
|
|
5407
5424
|
endpoint: normalizedEndpoint
|
|
5408
5425
|
};
|
|
5409
|
-
logger$
|
|
5426
|
+
logger$3.log("✓ Credentials saved");
|
|
5410
5427
|
}
|
|
5411
5428
|
const api = new DokployApi({
|
|
5412
5429
|
baseUrl: creds.endpoint,
|
|
@@ -5414,20 +5431,20 @@ async function ensureDokploySetup(config$1, dockerConfig, stage, services) {
|
|
|
5414
5431
|
});
|
|
5415
5432
|
const existingConfig = config$1.providers?.dokploy;
|
|
5416
5433
|
if (existingConfig && typeof existingConfig !== "boolean" && existingConfig.applicationId && existingConfig.projectId) {
|
|
5417
|
-
logger$
|
|
5434
|
+
logger$3.log("✓ Dokploy config found in gkm.config.ts");
|
|
5418
5435
|
try {
|
|
5419
5436
|
const projectDetails = await api.getProject(existingConfig.projectId);
|
|
5420
|
-
logger$
|
|
5437
|
+
logger$3.log("✓ Project verified");
|
|
5421
5438
|
const storedRegistryId = existingConfig.registryId ?? await getDokployRegistryId();
|
|
5422
5439
|
const environments = projectDetails.environments ?? [];
|
|
5423
5440
|
let environment = environments.find((e) => e.name.toLowerCase() === stage.toLowerCase());
|
|
5424
5441
|
if (!environment) {
|
|
5425
|
-
logger$
|
|
5442
|
+
logger$3.log(` Creating "${stage}" environment...`);
|
|
5426
5443
|
environment = await api.createEnvironment(existingConfig.projectId, stage);
|
|
5427
|
-
logger$
|
|
5444
|
+
logger$3.log(` ✓ Created environment: ${environment.environmentId}`);
|
|
5428
5445
|
}
|
|
5429
5446
|
const environmentId$1 = environment.environmentId;
|
|
5430
|
-
logger$
|
|
5447
|
+
logger$3.log(` Services config: ${JSON.stringify(services)}, envId: ${environmentId$1}`);
|
|
5431
5448
|
const provisionResult$1 = await provisionServices(api, existingConfig.projectId, environmentId$1, dockerConfig.appName, services, void 0);
|
|
5432
5449
|
return {
|
|
5433
5450
|
config: {
|
|
@@ -5440,97 +5457,97 @@ async function ensureDokploySetup(config$1, dockerConfig, stage, services) {
|
|
|
5440
5457
|
serviceUrls: provisionResult$1?.serviceUrls
|
|
5441
5458
|
};
|
|
5442
5459
|
} catch {
|
|
5443
|
-
logger$
|
|
5460
|
+
logger$3.log("⚠ Project not found, will recover...");
|
|
5444
5461
|
}
|
|
5445
5462
|
}
|
|
5446
|
-
logger$
|
|
5463
|
+
logger$3.log("\n📁 Looking for project...");
|
|
5447
5464
|
const projectName = dockerConfig.projectName;
|
|
5448
5465
|
const projects = await api.listProjects();
|
|
5449
5466
|
let project = projects.find((p) => p.name.toLowerCase() === projectName.toLowerCase());
|
|
5450
5467
|
let environmentId;
|
|
5451
5468
|
if (project) {
|
|
5452
|
-
logger$
|
|
5469
|
+
logger$3.log(` Found existing project: ${project.name} (${project.projectId})`);
|
|
5453
5470
|
const projectDetails = await api.getProject(project.projectId);
|
|
5454
5471
|
const environments = projectDetails.environments ?? [];
|
|
5455
5472
|
const matchingEnv = environments.find((e) => e.name.toLowerCase() === stage.toLowerCase());
|
|
5456
5473
|
if (matchingEnv) {
|
|
5457
5474
|
environmentId = matchingEnv.environmentId;
|
|
5458
|
-
logger$
|
|
5475
|
+
logger$3.log(` Using environment: ${matchingEnv.name}`);
|
|
5459
5476
|
} else {
|
|
5460
|
-
logger$
|
|
5477
|
+
logger$3.log(` Creating "${stage}" environment...`);
|
|
5461
5478
|
const env = await api.createEnvironment(project.projectId, stage);
|
|
5462
5479
|
environmentId = env.environmentId;
|
|
5463
|
-
logger$
|
|
5480
|
+
logger$3.log(` ✓ Created environment: ${stage}`);
|
|
5464
5481
|
}
|
|
5465
5482
|
} else {
|
|
5466
|
-
logger$
|
|
5483
|
+
logger$3.log(` Creating project: ${projectName}`);
|
|
5467
5484
|
const result = await api.createProject(projectName);
|
|
5468
5485
|
project = result.project;
|
|
5469
5486
|
if (result.environment.name.toLowerCase() !== stage.toLowerCase()) {
|
|
5470
|
-
logger$
|
|
5487
|
+
logger$3.log(` Creating "${stage}" environment...`);
|
|
5471
5488
|
const env = await api.createEnvironment(project.projectId, stage);
|
|
5472
5489
|
environmentId = env.environmentId;
|
|
5473
5490
|
} else environmentId = result.environment.environmentId;
|
|
5474
|
-
logger$
|
|
5475
|
-
logger$
|
|
5491
|
+
logger$3.log(` ✓ Created project: ${project.projectId}`);
|
|
5492
|
+
logger$3.log(` ✓ Using environment: ${stage}`);
|
|
5476
5493
|
}
|
|
5477
|
-
logger$
|
|
5494
|
+
logger$3.log("\n📦 Looking for application...");
|
|
5478
5495
|
const appName = dockerConfig.appName;
|
|
5479
5496
|
let applicationId;
|
|
5480
5497
|
if (existingConfig && typeof existingConfig !== "boolean" && existingConfig.applicationId) {
|
|
5481
5498
|
applicationId = existingConfig.applicationId;
|
|
5482
|
-
logger$
|
|
5499
|
+
logger$3.log(` Using application from config: ${applicationId}`);
|
|
5483
5500
|
} else {
|
|
5484
|
-
logger$
|
|
5501
|
+
logger$3.log(` Creating application: ${appName}`);
|
|
5485
5502
|
const app = await api.createApplication(appName, project.projectId, environmentId);
|
|
5486
5503
|
applicationId = app.applicationId;
|
|
5487
|
-
logger$
|
|
5504
|
+
logger$3.log(` ✓ Created application: ${applicationId}`);
|
|
5488
5505
|
}
|
|
5489
|
-
logger$
|
|
5506
|
+
logger$3.log("\n🐳 Checking registry...");
|
|
5490
5507
|
let registryId = await getDokployRegistryId();
|
|
5491
5508
|
if (registryId) try {
|
|
5492
5509
|
const registry = await api.getRegistry(registryId);
|
|
5493
|
-
logger$
|
|
5510
|
+
logger$3.log(` Using registry: ${registry.registryName}`);
|
|
5494
5511
|
} catch {
|
|
5495
|
-
logger$
|
|
5512
|
+
logger$3.log(" ⚠ Stored registry not found, clearing...");
|
|
5496
5513
|
registryId = void 0;
|
|
5497
5514
|
await storeDokployRegistryId("");
|
|
5498
5515
|
}
|
|
5499
5516
|
if (!registryId) {
|
|
5500
5517
|
const registries = await api.listRegistries();
|
|
5501
5518
|
if (registries.length === 0) if (dockerConfig.registry) {
|
|
5502
|
-
logger$
|
|
5503
|
-
logger$
|
|
5519
|
+
logger$3.log(" No registries found in Dokploy. Let's create one.");
|
|
5520
|
+
logger$3.log(` Registry URL: ${dockerConfig.registry}`);
|
|
5504
5521
|
const username = await prompt("Registry username: ");
|
|
5505
5522
|
const password = await prompt("Registry password/token: ", true);
|
|
5506
5523
|
const registry = await api.createRegistry("Default Registry", dockerConfig.registry, username, password);
|
|
5507
5524
|
registryId = registry.registryId;
|
|
5508
5525
|
await storeDokployRegistryId(registryId);
|
|
5509
|
-
logger$
|
|
5510
|
-
} else logger$
|
|
5526
|
+
logger$3.log(` ✓ Registry created: ${registryId}`);
|
|
5527
|
+
} else logger$3.log(" ⚠ No registry configured. Set docker.registry in gkm.config.ts");
|
|
5511
5528
|
else {
|
|
5512
|
-
logger$
|
|
5529
|
+
logger$3.log(" Available registries:");
|
|
5513
5530
|
registries.forEach((reg, i) => {
|
|
5514
|
-
logger$
|
|
5531
|
+
logger$3.log(` ${i + 1}. ${reg.registryName} (${reg.registryUrl})`);
|
|
5515
5532
|
});
|
|
5516
|
-
if (dockerConfig.registry) logger$
|
|
5533
|
+
if (dockerConfig.registry) logger$3.log(` ${registries.length + 1}. Create new registry`);
|
|
5517
5534
|
const maxOption = dockerConfig.registry ? registries.length + 1 : registries.length;
|
|
5518
5535
|
const selection = await prompt(` Select registry (1-${maxOption}): `);
|
|
5519
5536
|
const index = parseInt(selection, 10) - 1;
|
|
5520
5537
|
if (index >= 0 && index < registries.length) {
|
|
5521
5538
|
registryId = registries[index].registryId;
|
|
5522
5539
|
await storeDokployRegistryId(registryId);
|
|
5523
|
-
logger$
|
|
5540
|
+
logger$3.log(` ✓ Selected: ${registries[index].registryName}`);
|
|
5524
5541
|
} else if (dockerConfig.registry && index === registries.length) {
|
|
5525
|
-
logger$
|
|
5526
|
-
logger$
|
|
5542
|
+
logger$3.log(`\n Creating new registry...`);
|
|
5543
|
+
logger$3.log(` Registry URL: ${dockerConfig.registry}`);
|
|
5527
5544
|
const username = await prompt(" Registry username: ");
|
|
5528
5545
|
const password = await prompt(" Registry password/token: ", true);
|
|
5529
5546
|
const registry = await api.createRegistry(dockerConfig.registry.replace(/^https?:\/\//, ""), dockerConfig.registry, username, password);
|
|
5530
5547
|
registryId = registry.registryId;
|
|
5531
5548
|
await storeDokployRegistryId(registryId);
|
|
5532
|
-
logger$
|
|
5533
|
-
} else logger$
|
|
5549
|
+
logger$3.log(` ✓ Registry created: ${registryId}`);
|
|
5550
|
+
} else logger$3.log(" ⚠ Invalid selection, skipping registry setup");
|
|
5534
5551
|
}
|
|
5535
5552
|
}
|
|
5536
5553
|
const dokployConfig = {
|
|
@@ -5540,10 +5557,10 @@ async function ensureDokploySetup(config$1, dockerConfig, stage, services) {
|
|
|
5540
5557
|
registryId: registryId ?? void 0
|
|
5541
5558
|
};
|
|
5542
5559
|
await updateConfig(dokployConfig);
|
|
5543
|
-
logger$
|
|
5544
|
-
logger$
|
|
5545
|
-
logger$
|
|
5546
|
-
if (registryId) logger$
|
|
5560
|
+
logger$3.log("\n✅ Dokploy setup complete!");
|
|
5561
|
+
logger$3.log(` Project: ${project.projectId}`);
|
|
5562
|
+
logger$3.log(` Application: ${applicationId}`);
|
|
5563
|
+
if (registryId) logger$3.log(` Registry: ${registryId}`);
|
|
5547
5564
|
const provisionResult = await provisionServices(api, project.projectId, environmentId, dockerConfig.appName, services, void 0);
|
|
5548
5565
|
return {
|
|
5549
5566
|
config: dokployConfig,
|
|
@@ -5574,45 +5591,45 @@ function generateTag(stage) {
|
|
|
5574
5591
|
async function workspaceDeployCommand(workspace, options) {
|
|
5575
5592
|
const { provider, stage, tag, apps: selectedApps } = options;
|
|
5576
5593
|
if (provider !== "dokploy") throw new Error(`Workspace deployment only supports Dokploy. Got: ${provider}`);
|
|
5577
|
-
logger$
|
|
5578
|
-
logger$
|
|
5594
|
+
logger$3.log(`\n🚀 Deploying workspace "${workspace.name}" to Dokploy...`);
|
|
5595
|
+
logger$3.log(` Stage: ${stage}`);
|
|
5579
5596
|
const imageTag = tag ?? generateTag(stage);
|
|
5580
|
-
logger$
|
|
5597
|
+
logger$3.log(` Tag: ${imageTag}`);
|
|
5581
5598
|
const buildOrder = getAppBuildOrder(workspace);
|
|
5582
5599
|
let appsToDeployNames = buildOrder;
|
|
5583
5600
|
if (selectedApps && selectedApps.length > 0) {
|
|
5584
5601
|
const invalidApps = selectedApps.filter((name$1) => !workspace.apps[name$1]);
|
|
5585
5602
|
if (invalidApps.length > 0) throw new Error(`Unknown apps: ${invalidApps.join(", ")}\nAvailable apps: ${Object.keys(workspace.apps).join(", ")}`);
|
|
5586
5603
|
appsToDeployNames = buildOrder.filter((name$1) => selectedApps.includes(name$1));
|
|
5587
|
-
logger$
|
|
5588
|
-
} else logger$
|
|
5604
|
+
logger$3.log(` Deploying apps: ${appsToDeployNames.join(", ")}`);
|
|
5605
|
+
} else logger$3.log(` Deploying all apps: ${appsToDeployNames.join(", ")}`);
|
|
5589
5606
|
const dokployApps = appsToDeployNames.filter((name$1) => {
|
|
5590
5607
|
const app = workspace.apps[name$1];
|
|
5591
5608
|
const target = app.resolvedDeployTarget;
|
|
5592
5609
|
if (!isDeployTargetSupported(target)) {
|
|
5593
|
-
logger$
|
|
5610
|
+
logger$3.log(` ⚠️ Skipping ${name$1}: ${getDeployTargetError(target, name$1)}`);
|
|
5594
5611
|
return false;
|
|
5595
5612
|
}
|
|
5596
5613
|
return true;
|
|
5597
5614
|
});
|
|
5598
5615
|
if (dokployApps.length === 0) throw new Error("No apps to deploy. All selected apps have unsupported deploy targets.");
|
|
5599
5616
|
appsToDeployNames = dokployApps;
|
|
5600
|
-
logger$
|
|
5617
|
+
logger$3.log("\n🔐 Loading secrets and analyzing environment requirements...");
|
|
5601
5618
|
const stageSecrets = await readStageSecrets(stage, workspace.root);
|
|
5602
5619
|
if (!stageSecrets) {
|
|
5603
|
-
logger$
|
|
5604
|
-
logger$
|
|
5620
|
+
logger$3.log(` ⚠️ No secrets found for stage "${stage}"`);
|
|
5621
|
+
logger$3.log(` Run "gkm secrets:init --stage ${stage}" to create secrets`);
|
|
5605
5622
|
}
|
|
5606
5623
|
const sniffedApps = await sniffAllApps(workspace.apps, workspace.root);
|
|
5607
5624
|
const encryptedSecrets = stageSecrets ? prepareSecretsForAllApps(stageSecrets, sniffedApps) : /* @__PURE__ */ new Map();
|
|
5608
5625
|
if (stageSecrets) {
|
|
5609
5626
|
const report = generateSecretsReport(encryptedSecrets, sniffedApps);
|
|
5610
|
-
if (report.appsWithSecrets.length > 0) logger$
|
|
5611
|
-
if (report.appsWithMissingSecrets.length > 0) for (const { appName, missing } of report.appsWithMissingSecrets) logger$
|
|
5627
|
+
if (report.appsWithSecrets.length > 0) logger$3.log(` ✓ Encrypted secrets for: ${report.appsWithSecrets.join(", ")}`);
|
|
5628
|
+
if (report.appsWithMissingSecrets.length > 0) for (const { appName, missing } of report.appsWithMissingSecrets) logger$3.log(` ⚠️ ${appName}: Missing secrets: ${missing.join(", ")}`);
|
|
5612
5629
|
}
|
|
5613
5630
|
let creds = await getDokployCredentials();
|
|
5614
5631
|
if (!creds) {
|
|
5615
|
-
logger$
|
|
5632
|
+
logger$3.log("\n📋 Dokploy credentials not found. Let's set them up.");
|
|
5616
5633
|
const endpoint = await prompt("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
5617
5634
|
const normalizedEndpoint = endpoint.replace(/\/$/, "");
|
|
5618
5635
|
try {
|
|
@@ -5620,9 +5637,9 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5620
5637
|
} catch {
|
|
5621
5638
|
throw new Error("Invalid URL format");
|
|
5622
5639
|
}
|
|
5623
|
-
logger$
|
|
5640
|
+
logger$3.log(`\nGenerate a token at: ${normalizedEndpoint}/settings/profile\n`);
|
|
5624
5641
|
const token = await prompt("API Token: ", true);
|
|
5625
|
-
logger$
|
|
5642
|
+
logger$3.log("\nValidating credentials...");
|
|
5626
5643
|
const isValid = await validateDokployToken(normalizedEndpoint, token);
|
|
5627
5644
|
if (!isValid) throw new Error("Invalid credentials. Please check your token.");
|
|
5628
5645
|
await storeDokployCredentials(token, normalizedEndpoint);
|
|
@@ -5630,43 +5647,43 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5630
5647
|
token,
|
|
5631
5648
|
endpoint: normalizedEndpoint
|
|
5632
5649
|
};
|
|
5633
|
-
logger$
|
|
5650
|
+
logger$3.log("✓ Credentials saved");
|
|
5634
5651
|
}
|
|
5635
5652
|
const api = new DokployApi({
|
|
5636
5653
|
baseUrl: creds.endpoint,
|
|
5637
5654
|
token: creds.token
|
|
5638
5655
|
});
|
|
5639
|
-
logger$
|
|
5656
|
+
logger$3.log("\n📁 Setting up Dokploy project...");
|
|
5640
5657
|
const projectName = workspace.name;
|
|
5641
5658
|
const projects = await api.listProjects();
|
|
5642
5659
|
let project = projects.find((p) => p.name.toLowerCase() === projectName.toLowerCase());
|
|
5643
5660
|
let environmentId;
|
|
5644
5661
|
if (project) {
|
|
5645
|
-
logger$
|
|
5662
|
+
logger$3.log(` Found existing project: ${project.name}`);
|
|
5646
5663
|
const projectDetails = await api.getProject(project.projectId);
|
|
5647
5664
|
const environments = projectDetails.environments ?? [];
|
|
5648
5665
|
const matchingEnv = environments.find((e) => e.name.toLowerCase() === stage.toLowerCase());
|
|
5649
5666
|
if (matchingEnv) {
|
|
5650
5667
|
environmentId = matchingEnv.environmentId;
|
|
5651
|
-
logger$
|
|
5668
|
+
logger$3.log(` Using environment: ${matchingEnv.name}`);
|
|
5652
5669
|
} else {
|
|
5653
|
-
logger$
|
|
5670
|
+
logger$3.log(` Creating "${stage}" environment...`);
|
|
5654
5671
|
const env = await api.createEnvironment(project.projectId, stage);
|
|
5655
5672
|
environmentId = env.environmentId;
|
|
5656
|
-
logger$
|
|
5673
|
+
logger$3.log(` ✓ Created environment: ${stage}`);
|
|
5657
5674
|
}
|
|
5658
5675
|
} else {
|
|
5659
|
-
logger$
|
|
5676
|
+
logger$3.log(` Creating project: ${projectName}`);
|
|
5660
5677
|
const result = await api.createProject(projectName);
|
|
5661
5678
|
project = result.project;
|
|
5662
5679
|
if (result.environment.name.toLowerCase() !== stage.toLowerCase()) {
|
|
5663
|
-
logger$
|
|
5680
|
+
logger$3.log(` Creating "${stage}" environment...`);
|
|
5664
5681
|
const env = await api.createEnvironment(project.projectId, stage);
|
|
5665
5682
|
environmentId = env.environmentId;
|
|
5666
5683
|
} else environmentId = result.environment.environmentId;
|
|
5667
|
-
logger$
|
|
5684
|
+
logger$3.log(` ✓ Created project: ${project.projectId}`);
|
|
5668
5685
|
}
|
|
5669
|
-
logger$
|
|
5686
|
+
logger$3.log("\n📋 Loading deploy state...");
|
|
5670
5687
|
const stateProvider = await createStateProvider({
|
|
5671
5688
|
config: workspace.state,
|
|
5672
5689
|
workspaceRoot: workspace.root,
|
|
@@ -5674,27 +5691,27 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5674
5691
|
});
|
|
5675
5692
|
let state = await stateProvider.read(stage);
|
|
5676
5693
|
if (state) {
|
|
5677
|
-
logger$
|
|
5694
|
+
logger$3.log(` Found existing state for stage "${stage}"`);
|
|
5678
5695
|
if (state.projectId !== project.projectId) {
|
|
5679
|
-
logger$
|
|
5696
|
+
logger$3.log(` ⚠ Project ID changed, updating state`);
|
|
5680
5697
|
state.projectId = project.projectId;
|
|
5681
5698
|
}
|
|
5682
5699
|
if (state.environmentId !== environmentId) {
|
|
5683
|
-
logger$
|
|
5700
|
+
logger$3.log(` ⚠ Environment ID changed, updating state`);
|
|
5684
5701
|
state.environmentId = environmentId;
|
|
5685
5702
|
}
|
|
5686
5703
|
} else {
|
|
5687
|
-
logger$
|
|
5704
|
+
logger$3.log(` Creating new state for stage "${stage}"`);
|
|
5688
5705
|
state = createEmptyState(stage, project.projectId, environmentId);
|
|
5689
5706
|
}
|
|
5690
|
-
logger$
|
|
5707
|
+
logger$3.log("\n🐳 Checking registry...");
|
|
5691
5708
|
let registryId = await getDokployRegistryId();
|
|
5692
5709
|
const registry = workspace.deploy.dokploy?.registry;
|
|
5693
5710
|
if (registryId) try {
|
|
5694
5711
|
const reg = await api.getRegistry(registryId);
|
|
5695
|
-
logger$
|
|
5712
|
+
logger$3.log(` Using registry: ${reg.registryName}`);
|
|
5696
5713
|
} catch {
|
|
5697
|
-
logger$
|
|
5714
|
+
logger$3.log(" ⚠ Stored registry not found, clearing...");
|
|
5698
5715
|
registryId = void 0;
|
|
5699
5716
|
await storeDokployRegistryId("");
|
|
5700
5717
|
}
|
|
@@ -5703,17 +5720,17 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5703
5720
|
if (registries.length > 0) {
|
|
5704
5721
|
registryId = registries[0].registryId;
|
|
5705
5722
|
await storeDokployRegistryId(registryId);
|
|
5706
|
-
logger$
|
|
5723
|
+
logger$3.log(` Using registry: ${registries[0].registryName}`);
|
|
5707
5724
|
} else if (registry) {
|
|
5708
|
-
logger$
|
|
5709
|
-
logger$
|
|
5725
|
+
logger$3.log(" No registries found in Dokploy. Let's create one.");
|
|
5726
|
+
logger$3.log(` Registry URL: ${registry}`);
|
|
5710
5727
|
const username = await prompt("Registry username: ");
|
|
5711
5728
|
const password = await prompt("Registry password/token: ", true);
|
|
5712
5729
|
const reg = await api.createRegistry("Default Registry", registry, username, password);
|
|
5713
5730
|
registryId = reg.registryId;
|
|
5714
5731
|
await storeDokployRegistryId(registryId);
|
|
5715
|
-
logger$
|
|
5716
|
-
} else logger$
|
|
5732
|
+
logger$3.log(` ✓ Registry created: ${registryId}`);
|
|
5733
|
+
} else logger$3.log(" ⚠ No registry configured. Set deploy.dokploy.registry in workspace config");
|
|
5717
5734
|
}
|
|
5718
5735
|
const services = workspace.services;
|
|
5719
5736
|
const dockerServices = {
|
|
@@ -5723,7 +5740,7 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5723
5740
|
let provisionedPostgres = null;
|
|
5724
5741
|
let provisionedRedis = null;
|
|
5725
5742
|
if (dockerServices.postgres || dockerServices.redis) {
|
|
5726
|
-
logger$
|
|
5743
|
+
logger$3.log("\n🔧 Provisioning infrastructure services...");
|
|
5727
5744
|
const existingServiceIds = {
|
|
5728
5745
|
postgresId: getPostgresId(state),
|
|
5729
5746
|
redisId: getRedisId(state)
|
|
@@ -5749,13 +5766,13 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5749
5766
|
return requirements?.requiredEnvVars.includes("DATABASE_URL");
|
|
5750
5767
|
});
|
|
5751
5768
|
if (appsNeedingDb.length > 0) {
|
|
5752
|
-
logger$
|
|
5753
|
-
logger$
|
|
5769
|
+
logger$3.log(`\n🔐 Setting up per-app database credentials...`);
|
|
5770
|
+
logger$3.log(` Apps needing DATABASE_URL: ${appsNeedingDb.join(", ")}`);
|
|
5754
5771
|
const existingCredentials = getAllAppCredentials(state);
|
|
5755
5772
|
const usersToCreate = [];
|
|
5756
5773
|
for (const appName of appsNeedingDb) {
|
|
5757
5774
|
let credentials = existingCredentials[appName];
|
|
5758
|
-
if (credentials) logger$
|
|
5775
|
+
if (credentials) logger$3.log(` ${appName}: Using existing credentials from state`);
|
|
5759
5776
|
else {
|
|
5760
5777
|
const password = randomBytes(16).toString("hex");
|
|
5761
5778
|
credentials = {
|
|
@@ -5763,7 +5780,7 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5763
5780
|
dbPassword: password
|
|
5764
5781
|
};
|
|
5765
5782
|
setAppCredentials(state, appName, credentials);
|
|
5766
|
-
logger$
|
|
5783
|
+
logger$3.log(` ${appName}: Generated new credentials`);
|
|
5767
5784
|
}
|
|
5768
5785
|
perAppDbCredentials.set(appName, credentials);
|
|
5769
5786
|
usersToCreate.push({
|
|
@@ -5777,8 +5794,8 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5777
5794
|
}
|
|
5778
5795
|
}
|
|
5779
5796
|
if (workspace.deploy?.backups && provisionedPostgres) {
|
|
5780
|
-
logger$
|
|
5781
|
-
const { provisionBackupDestination } = await import("./backup-provisioner-
|
|
5797
|
+
logger$3.log("\n💾 Provisioning backup destination...");
|
|
5798
|
+
const { provisionBackupDestination } = await import("./backup-provisioner-BEXoHTuC.mjs");
|
|
5782
5799
|
const backupState = await provisionBackupDestination({
|
|
5783
5800
|
api,
|
|
5784
5801
|
projectId: project.projectId,
|
|
@@ -5786,13 +5803,13 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5786
5803
|
stage,
|
|
5787
5804
|
config: workspace.deploy.backups,
|
|
5788
5805
|
existingState: getBackupState(state),
|
|
5789
|
-
logger: logger$
|
|
5806
|
+
logger: logger$3
|
|
5790
5807
|
});
|
|
5791
5808
|
setBackupState(state, backupState);
|
|
5792
5809
|
if (!backupState.postgresBackupId) {
|
|
5793
5810
|
const backupSchedule = workspace.deploy.backups.schedule ?? "0 2 * * *";
|
|
5794
5811
|
const backupRetention = workspace.deploy.backups.retention ?? 30;
|
|
5795
|
-
logger$
|
|
5812
|
+
logger$3.log(" Creating postgres backup schedule...");
|
|
5796
5813
|
const backup = await api.createPostgresBackup({
|
|
5797
5814
|
schedule: backupSchedule,
|
|
5798
5815
|
prefix: `${stage}/postgres`,
|
|
@@ -5803,8 +5820,8 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5803
5820
|
keepLatestCount: backupRetention
|
|
5804
5821
|
});
|
|
5805
5822
|
setPostgresBackupId(state, backup.backupId);
|
|
5806
|
-
logger$
|
|
5807
|
-
} else logger$
|
|
5823
|
+
logger$3.log(` ✓ Postgres backup schedule created (${backupSchedule})`);
|
|
5824
|
+
} else logger$3.log(" ✓ Using existing postgres backup schedule");
|
|
5808
5825
|
}
|
|
5809
5826
|
const publicUrls = {};
|
|
5810
5827
|
const results = [];
|
|
@@ -5819,25 +5836,25 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5819
5836
|
frontendUrls.push(`https://${hostname}`);
|
|
5820
5837
|
}
|
|
5821
5838
|
if (backendApps.length > 0) {
|
|
5822
|
-
logger$
|
|
5839
|
+
logger$3.log("\n📦 PHASE 1: Deploying backend applications...");
|
|
5823
5840
|
for (const appName of backendApps) {
|
|
5824
5841
|
const app = workspace.apps[appName];
|
|
5825
|
-
logger$
|
|
5842
|
+
logger$3.log(`\n ⚙️ Deploying ${appName}...`);
|
|
5826
5843
|
try {
|
|
5827
5844
|
const dokployAppName = appName;
|
|
5828
5845
|
let application = null;
|
|
5829
5846
|
const cachedAppId = getApplicationId(state, appName);
|
|
5830
5847
|
if (cachedAppId) {
|
|
5831
|
-
logger$
|
|
5848
|
+
logger$3.log(` Using cached ID: ${cachedAppId}`);
|
|
5832
5849
|
application = await api.getApplication(cachedAppId);
|
|
5833
|
-
if (application) logger$
|
|
5834
|
-
else logger$
|
|
5850
|
+
if (application) logger$3.log(` ✓ Application found: ${application.applicationId}`);
|
|
5851
|
+
else logger$3.log(` ⚠ Cached ID invalid, will create new`);
|
|
5835
5852
|
}
|
|
5836
5853
|
if (!application) {
|
|
5837
5854
|
const result = await api.findOrCreateApplication(dokployAppName, project.projectId, environmentId);
|
|
5838
5855
|
application = result.application;
|
|
5839
|
-
if (result.created) logger$
|
|
5840
|
-
else logger$
|
|
5856
|
+
if (result.created) logger$3.log(` Created application: ${application.applicationId}`);
|
|
5857
|
+
else logger$3.log(` Found existing application: ${application.applicationId}`);
|
|
5841
5858
|
}
|
|
5842
5859
|
setApplicationId(state, appName, application.applicationId);
|
|
5843
5860
|
const appSecrets = encryptedSecrets.get(appName);
|
|
@@ -5845,11 +5862,11 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5845
5862
|
if (appSecrets && appSecrets.secretCount > 0) {
|
|
5846
5863
|
buildArgs.push(`GKM_ENCRYPTED_CREDENTIALS=${appSecrets.payload.encrypted}`);
|
|
5847
5864
|
buildArgs.push(`GKM_CREDENTIALS_IV=${appSecrets.payload.iv}`);
|
|
5848
|
-
logger$
|
|
5865
|
+
logger$3.log(` Encrypted ${appSecrets.secretCount} secrets`);
|
|
5849
5866
|
}
|
|
5850
5867
|
const imageName = `${workspace.name}-${appName}`;
|
|
5851
5868
|
const imageRef = registry ? `${registry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
5852
|
-
logger$
|
|
5869
|
+
logger$3.log(` Building Docker image: ${imageRef}`);
|
|
5853
5870
|
await deployDocker({
|
|
5854
5871
|
stage,
|
|
5855
5872
|
tag: imageTag,
|
|
@@ -5899,10 +5916,10 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5899
5916
|
const { valid, missing, resolved } = validateEnvVars(requiredVars, envContext);
|
|
5900
5917
|
if (!valid) throw new Error(formatMissingVarsError(appName, missing, stage));
|
|
5901
5918
|
const envVars = Object.entries(resolved).map(([key, value]) => `${key}=${value}`);
|
|
5902
|
-
if (Object.keys(resolved).length > 0) logger$
|
|
5919
|
+
if (Object.keys(resolved).length > 0) logger$3.log(` Resolved ${Object.keys(resolved).length} env vars: ${Object.keys(resolved).join(", ")}`);
|
|
5903
5920
|
await api.saveDockerProvider(application.applicationId, imageRef, { registryId });
|
|
5904
5921
|
await api.saveApplicationEnv(application.applicationId, envVars.join("\n"));
|
|
5905
|
-
logger$
|
|
5922
|
+
logger$3.log(` Deploying to Dokploy...`);
|
|
5906
5923
|
await api.deployApplication(application.applicationId);
|
|
5907
5924
|
const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
|
|
5908
5925
|
const existingDomain = existingDomains.find((d) => d.host === backendHost);
|
|
@@ -5910,7 +5927,7 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5910
5927
|
appHostnames.set(appName, backendHost);
|
|
5911
5928
|
appDomainIds.set(appName, existingDomain.domainId);
|
|
5912
5929
|
publicUrls[appName] = `https://${backendHost}`;
|
|
5913
|
-
logger$
|
|
5930
|
+
logger$3.log(` ✓ Domain: https://${backendHost} (existing)`);
|
|
5914
5931
|
} else try {
|
|
5915
5932
|
const domain = await api.createDomain({
|
|
5916
5933
|
host: backendHost,
|
|
@@ -5922,10 +5939,10 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5922
5939
|
appHostnames.set(appName, backendHost);
|
|
5923
5940
|
appDomainIds.set(appName, domain.domainId);
|
|
5924
5941
|
publicUrls[appName] = `https://${backendHost}`;
|
|
5925
|
-
logger$
|
|
5942
|
+
logger$3.log(` ✓ Domain: https://${backendHost} (created)`);
|
|
5926
5943
|
} catch (domainError) {
|
|
5927
5944
|
const message = domainError instanceof Error ? domainError.message : "Unknown error";
|
|
5928
|
-
logger$
|
|
5945
|
+
logger$3.log(` ⚠ Domain creation failed: ${message}`);
|
|
5929
5946
|
appHostnames.set(appName, backendHost);
|
|
5930
5947
|
publicUrls[appName] = `https://${backendHost}`;
|
|
5931
5948
|
}
|
|
@@ -5936,10 +5953,10 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5936
5953
|
applicationId: application.applicationId,
|
|
5937
5954
|
imageRef
|
|
5938
5955
|
});
|
|
5939
|
-
logger$
|
|
5956
|
+
logger$3.log(` ✓ ${appName} deployed successfully`);
|
|
5940
5957
|
} catch (error) {
|
|
5941
5958
|
const message = error instanceof Error ? error.message : "Unknown error";
|
|
5942
|
-
logger$
|
|
5959
|
+
logger$3.log(` ✗ Failed to deploy ${appName}: ${message}`);
|
|
5943
5960
|
results.push({
|
|
5944
5961
|
appName,
|
|
5945
5962
|
type: app.type,
|
|
@@ -5951,25 +5968,25 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5951
5968
|
}
|
|
5952
5969
|
}
|
|
5953
5970
|
if (frontendApps.length > 0) {
|
|
5954
|
-
logger$
|
|
5971
|
+
logger$3.log("\n🌐 PHASE 2: Deploying frontend applications...");
|
|
5955
5972
|
for (const appName of frontendApps) {
|
|
5956
5973
|
const app = workspace.apps[appName];
|
|
5957
|
-
logger$
|
|
5974
|
+
logger$3.log(`\n 🌐 Deploying ${appName}...`);
|
|
5958
5975
|
try {
|
|
5959
5976
|
const dokployAppName = appName;
|
|
5960
5977
|
let application = null;
|
|
5961
5978
|
const cachedAppId = getApplicationId(state, appName);
|
|
5962
5979
|
if (cachedAppId) {
|
|
5963
|
-
logger$
|
|
5980
|
+
logger$3.log(` Using cached ID: ${cachedAppId}`);
|
|
5964
5981
|
application = await api.getApplication(cachedAppId);
|
|
5965
|
-
if (application) logger$
|
|
5966
|
-
else logger$
|
|
5982
|
+
if (application) logger$3.log(` ✓ Application found: ${application.applicationId}`);
|
|
5983
|
+
else logger$3.log(` ⚠ Cached ID invalid, will create new`);
|
|
5967
5984
|
}
|
|
5968
5985
|
if (!application) {
|
|
5969
5986
|
const result = await api.findOrCreateApplication(dokployAppName, project.projectId, environmentId);
|
|
5970
5987
|
application = result.application;
|
|
5971
|
-
if (result.created) logger$
|
|
5972
|
-
else logger$
|
|
5988
|
+
if (result.created) logger$3.log(` Created application: ${application.applicationId}`);
|
|
5989
|
+
else logger$3.log(` Found existing application: ${application.applicationId}`);
|
|
5973
5990
|
}
|
|
5974
5991
|
setApplicationId(state, appName, application.applicationId);
|
|
5975
5992
|
const dependencyUrls = {};
|
|
@@ -5991,17 +6008,17 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5991
6008
|
const sniffedVars = sniffedApps.get(appName)?.requiredEnvVars ?? [];
|
|
5992
6009
|
const { valid, missing, resolved } = validateEnvVars(sniffedVars, envContext);
|
|
5993
6010
|
if (!valid) throw new Error(formatMissingVarsError(appName, missing, stage));
|
|
5994
|
-
if (Object.keys(resolved).length > 0) logger$
|
|
6011
|
+
if (Object.keys(resolved).length > 0) logger$3.log(` Resolved ${Object.keys(resolved).length} env vars: ${Object.keys(resolved).join(", ")}`);
|
|
5995
6012
|
const buildArgs = [];
|
|
5996
6013
|
const publicUrlArgNames = [];
|
|
5997
6014
|
for (const [key, value] of Object.entries(resolved)) if (key.startsWith("NEXT_PUBLIC_")) {
|
|
5998
6015
|
buildArgs.push(`${key}=${value}`);
|
|
5999
6016
|
publicUrlArgNames.push(key);
|
|
6000
6017
|
}
|
|
6001
|
-
if (buildArgs.length > 0) logger$
|
|
6018
|
+
if (buildArgs.length > 0) logger$3.log(` Build args: ${publicUrlArgNames.join(", ")}`);
|
|
6002
6019
|
const imageName = `${workspace.name}-${appName}`;
|
|
6003
6020
|
const imageRef = registry ? `${registry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
6004
|
-
logger$
|
|
6021
|
+
logger$3.log(` Building Docker image: ${imageRef}`);
|
|
6005
6022
|
await deployDocker({
|
|
6006
6023
|
stage,
|
|
6007
6024
|
tag: imageTag,
|
|
@@ -6022,7 +6039,7 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
6022
6039
|
for (const [key, value] of Object.entries(resolved)) envVars.push(`${key}=${value}`);
|
|
6023
6040
|
await api.saveDockerProvider(application.applicationId, imageRef, { registryId });
|
|
6024
6041
|
await api.saveApplicationEnv(application.applicationId, envVars.join("\n"));
|
|
6025
|
-
logger$
|
|
6042
|
+
logger$3.log(` Deploying to Dokploy...`);
|
|
6026
6043
|
await api.deployApplication(application.applicationId);
|
|
6027
6044
|
const existingFrontendDomains = await api.getDomainsByApplicationId(application.applicationId);
|
|
6028
6045
|
const existingFrontendDomain = existingFrontendDomains.find((d) => d.host === frontendHost);
|
|
@@ -6030,7 +6047,7 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
6030
6047
|
appHostnames.set(appName, frontendHost);
|
|
6031
6048
|
appDomainIds.set(appName, existingFrontendDomain.domainId);
|
|
6032
6049
|
publicUrls[appName] = `https://${frontendHost}`;
|
|
6033
|
-
logger$
|
|
6050
|
+
logger$3.log(` ✓ Domain: https://${frontendHost} (existing)`);
|
|
6034
6051
|
} else try {
|
|
6035
6052
|
const domain = await api.createDomain({
|
|
6036
6053
|
host: frontendHost,
|
|
@@ -6042,10 +6059,10 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
6042
6059
|
appHostnames.set(appName, frontendHost);
|
|
6043
6060
|
appDomainIds.set(appName, domain.domainId);
|
|
6044
6061
|
publicUrls[appName] = `https://${frontendHost}`;
|
|
6045
|
-
logger$
|
|
6062
|
+
logger$3.log(` ✓ Domain: https://${frontendHost} (created)`);
|
|
6046
6063
|
} catch (domainError) {
|
|
6047
6064
|
const message = domainError instanceof Error ? domainError.message : "Unknown error";
|
|
6048
|
-
logger$
|
|
6065
|
+
logger$3.log(` ⚠ Domain creation failed: ${message}`);
|
|
6049
6066
|
appHostnames.set(appName, frontendHost);
|
|
6050
6067
|
publicUrls[appName] = `https://${frontendHost}`;
|
|
6051
6068
|
}
|
|
@@ -6056,10 +6073,10 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
6056
6073
|
applicationId: application.applicationId,
|
|
6057
6074
|
imageRef
|
|
6058
6075
|
});
|
|
6059
|
-
logger$
|
|
6076
|
+
logger$3.log(` ✓ ${appName} deployed successfully`);
|
|
6060
6077
|
} catch (error) {
|
|
6061
6078
|
const message = error instanceof Error ? error.message : "Unknown error";
|
|
6062
|
-
logger$
|
|
6079
|
+
logger$3.log(` ✗ Failed to deploy ${appName}: ${message}`);
|
|
6063
6080
|
results.push({
|
|
6064
6081
|
appName,
|
|
6065
6082
|
type: app.type,
|
|
@@ -6069,9 +6086,9 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
6069
6086
|
}
|
|
6070
6087
|
}
|
|
6071
6088
|
}
|
|
6072
|
-
logger$
|
|
6089
|
+
logger$3.log("\n📋 Saving deploy state...");
|
|
6073
6090
|
await stateProvider.write(stage, state);
|
|
6074
|
-
logger$
|
|
6091
|
+
logger$3.log(" ✓ State saved");
|
|
6075
6092
|
const dnsConfig = workspace.deploy.dns;
|
|
6076
6093
|
if (dnsConfig && appHostnames.size > 0) {
|
|
6077
6094
|
const dnsResult = await orchestrateDns(appHostnames, dnsConfig, creds.endpoint);
|
|
@@ -6080,27 +6097,27 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
6080
6097
|
await stateProvider.write(stage, state);
|
|
6081
6098
|
}
|
|
6082
6099
|
if (dnsResult?.success && appHostnames.size > 0) {
|
|
6083
|
-
logger$
|
|
6100
|
+
logger$3.log("\n🔒 Validating domains for SSL certificates...");
|
|
6084
6101
|
for (const [appName, hostname] of appHostnames) try {
|
|
6085
6102
|
const result = await api.validateDomain(hostname);
|
|
6086
|
-
if (result.isValid) logger$
|
|
6087
|
-
else logger$
|
|
6103
|
+
if (result.isValid) logger$3.log(` ✓ ${appName}: ${hostname} → ${result.resolvedIp}`);
|
|
6104
|
+
else logger$3.log(` ⚠ ${appName}: ${hostname} not valid`);
|
|
6088
6105
|
} catch (validationError) {
|
|
6089
6106
|
const message = validationError instanceof Error ? validationError.message : "Unknown error";
|
|
6090
|
-
logger$
|
|
6107
|
+
logger$3.log(` ⚠ ${appName}: validation failed - ${message}`);
|
|
6091
6108
|
}
|
|
6092
6109
|
}
|
|
6093
6110
|
}
|
|
6094
6111
|
const successCount = results.filter((r) => r.success).length;
|
|
6095
6112
|
const failedCount = results.filter((r) => !r.success).length;
|
|
6096
|
-
logger$
|
|
6097
|
-
logger$
|
|
6098
|
-
logger$
|
|
6099
|
-
logger$
|
|
6100
|
-
if (failedCount > 0) logger$
|
|
6113
|
+
logger$3.log(`\n${"─".repeat(50)}`);
|
|
6114
|
+
logger$3.log(`\n✅ Workspace deployment complete!`);
|
|
6115
|
+
logger$3.log(` Project: ${project.projectId}`);
|
|
6116
|
+
logger$3.log(` Successful: ${successCount}`);
|
|
6117
|
+
if (failedCount > 0) logger$3.log(` Failed: ${failedCount}`);
|
|
6101
6118
|
if (Object.keys(publicUrls).length > 0) {
|
|
6102
|
-
logger$
|
|
6103
|
-
for (const [name$1, url] of Object.entries(publicUrls)) logger$
|
|
6119
|
+
logger$3.log("\n 📡 Deployed URLs:");
|
|
6120
|
+
for (const [name$1, url] of Object.entries(publicUrls)) logger$3.log(` ${name$1}: ${url}`);
|
|
6104
6121
|
}
|
|
6105
6122
|
return {
|
|
6106
6123
|
apps: results,
|
|
@@ -6116,14 +6133,14 @@ async function deployCommand(options) {
|
|
|
6116
6133
|
const { provider, stage, tag, skipPush, skipBuild } = options;
|
|
6117
6134
|
const loadedConfig = await loadWorkspaceConfig();
|
|
6118
6135
|
if (loadedConfig.type === "workspace") {
|
|
6119
|
-
logger$
|
|
6136
|
+
logger$3.log("📦 Detected workspace configuration");
|
|
6120
6137
|
return workspaceDeployCommand(loadedConfig.workspace, options);
|
|
6121
6138
|
}
|
|
6122
|
-
logger$
|
|
6123
|
-
logger$
|
|
6139
|
+
logger$3.log(`\n🚀 Deploying to ${provider}...`);
|
|
6140
|
+
logger$3.log(` Stage: ${stage}`);
|
|
6124
6141
|
const config$1 = await loadConfig();
|
|
6125
6142
|
const imageTag = tag ?? generateTag(stage);
|
|
6126
|
-
logger$
|
|
6143
|
+
logger$3.log(` Tag: ${imageTag}`);
|
|
6127
6144
|
const dockerConfig = resolveDockerConfig(config$1);
|
|
6128
6145
|
const imageName = dockerConfig.imageName;
|
|
6129
6146
|
const registry = dockerConfig.registry;
|
|
@@ -6132,7 +6149,7 @@ async function deployCommand(options) {
|
|
|
6132
6149
|
let finalRegistry = registry;
|
|
6133
6150
|
if (provider === "dokploy") {
|
|
6134
6151
|
const composeServices = config$1.docker?.compose?.services;
|
|
6135
|
-
logger$
|
|
6152
|
+
logger$3.log(`\n🔍 Docker compose config: ${JSON.stringify(config$1.docker?.compose)}`);
|
|
6136
6153
|
const dockerServices = composeServices ? Array.isArray(composeServices) ? {
|
|
6137
6154
|
postgres: composeServices.includes("postgres"),
|
|
6138
6155
|
redis: composeServices.includes("redis"),
|
|
@@ -6149,7 +6166,7 @@ async function deployCommand(options) {
|
|
|
6149
6166
|
const { readStageSecrets: readStageSecrets$1, writeStageSecrets: writeStageSecrets$1, initStageSecrets } = await import("./storage-Dx_jZbq6.mjs");
|
|
6150
6167
|
let secrets = await readStageSecrets$1(stage);
|
|
6151
6168
|
if (!secrets) {
|
|
6152
|
-
logger$
|
|
6169
|
+
logger$3.log(` Creating secrets file for stage "${stage}"...`);
|
|
6153
6170
|
secrets = initStageSecrets(stage);
|
|
6154
6171
|
}
|
|
6155
6172
|
let updated = false;
|
|
@@ -6164,12 +6181,12 @@ async function deployCommand(options) {
|
|
|
6164
6181
|
const urlKey = key;
|
|
6165
6182
|
if (!secrets.urls[urlKey]) {
|
|
6166
6183
|
secrets.urls[urlKey] = value;
|
|
6167
|
-
logger$
|
|
6184
|
+
logger$3.log(` Saved ${key} to secrets.urls`);
|
|
6168
6185
|
updated = true;
|
|
6169
6186
|
}
|
|
6170
6187
|
} else if (!secrets.custom[key]) {
|
|
6171
6188
|
secrets.custom[key] = value;
|
|
6172
|
-
logger$
|
|
6189
|
+
logger$3.log(` Saved ${key} to secrets.custom`);
|
|
6173
6190
|
updated = true;
|
|
6174
6191
|
}
|
|
6175
6192
|
}
|
|
@@ -6178,14 +6195,14 @@ async function deployCommand(options) {
|
|
|
6178
6195
|
}
|
|
6179
6196
|
let masterKey;
|
|
6180
6197
|
if (!skipBuild) {
|
|
6181
|
-
logger$
|
|
6198
|
+
logger$3.log(`\n📦 Building for production...`);
|
|
6182
6199
|
const buildResult = await buildCommand({
|
|
6183
6200
|
provider: "server",
|
|
6184
6201
|
production: true,
|
|
6185
6202
|
stage
|
|
6186
6203
|
});
|
|
6187
6204
|
masterKey = buildResult.masterKey;
|
|
6188
|
-
} else logger$
|
|
6205
|
+
} else logger$3.log(`\n⏭️ Skipping build (--skip-build)`);
|
|
6189
6206
|
let result;
|
|
6190
6207
|
switch (provider) {
|
|
6191
6208
|
case "docker": {
|
|
@@ -6221,8 +6238,8 @@ async function deployCommand(options) {
|
|
|
6221
6238
|
break;
|
|
6222
6239
|
}
|
|
6223
6240
|
case "aws-lambda": {
|
|
6224
|
-
logger$
|
|
6225
|
-
logger$
|
|
6241
|
+
logger$3.log("\n⚠️ AWS Lambda deployment is not yet implemented.");
|
|
6242
|
+
logger$3.log(" Use SST or AWS CDK for Lambda deployments.");
|
|
6226
6243
|
result = {
|
|
6227
6244
|
imageRef,
|
|
6228
6245
|
masterKey
|
|
@@ -6231,7 +6248,7 @@ async function deployCommand(options) {
|
|
|
6231
6248
|
}
|
|
6232
6249
|
default: throw new Error(`Unknown deploy provider: ${provider}\nSupported providers: docker, dokploy, aws-lambda`);
|
|
6233
6250
|
}
|
|
6234
|
-
logger$
|
|
6251
|
+
logger$3.log("\n✅ Deployment complete!");
|
|
6235
6252
|
return result;
|
|
6236
6253
|
}
|
|
6237
6254
|
|
|
@@ -6403,123 +6420,6 @@ function printStateDetails(state) {
|
|
|
6403
6420
|
}
|
|
6404
6421
|
}
|
|
6405
6422
|
|
|
6406
|
-
//#endregion
|
|
6407
|
-
//#region src/secrets/generator.ts
|
|
6408
|
-
/**
|
|
6409
|
-
* Generate a secure random password using URL-safe base64 characters.
|
|
6410
|
-
* @param length Password length (default: 32)
|
|
6411
|
-
*/
|
|
6412
|
-
function generateSecurePassword(length = 32) {
|
|
6413
|
-
return randomBytes(Math.ceil(length * 3 / 4)).toString("base64url").slice(0, length);
|
|
6414
|
-
}
|
|
6415
|
-
/** Default service configurations */
|
|
6416
|
-
const SERVICE_DEFAULTS = {
|
|
6417
|
-
postgres: {
|
|
6418
|
-
host: "postgres",
|
|
6419
|
-
port: 5432,
|
|
6420
|
-
username: "app",
|
|
6421
|
-
database: "app"
|
|
6422
|
-
},
|
|
6423
|
-
redis: {
|
|
6424
|
-
host: "redis",
|
|
6425
|
-
port: 6379,
|
|
6426
|
-
username: "default"
|
|
6427
|
-
},
|
|
6428
|
-
rabbitmq: {
|
|
6429
|
-
host: "rabbitmq",
|
|
6430
|
-
port: 5672,
|
|
6431
|
-
username: "app",
|
|
6432
|
-
vhost: "/"
|
|
6433
|
-
}
|
|
6434
|
-
};
|
|
6435
|
-
/**
|
|
6436
|
-
* Generate credentials for a specific service.
|
|
6437
|
-
*/
|
|
6438
|
-
function generateServiceCredentials(service) {
|
|
6439
|
-
const defaults = SERVICE_DEFAULTS[service];
|
|
6440
|
-
return {
|
|
6441
|
-
...defaults,
|
|
6442
|
-
password: generateSecurePassword()
|
|
6443
|
-
};
|
|
6444
|
-
}
|
|
6445
|
-
/**
|
|
6446
|
-
* Generate credentials for multiple services.
|
|
6447
|
-
*/
|
|
6448
|
-
function generateServicesCredentials(services) {
|
|
6449
|
-
const result = {};
|
|
6450
|
-
for (const service of services) result[service] = generateServiceCredentials(service);
|
|
6451
|
-
return result;
|
|
6452
|
-
}
|
|
6453
|
-
/**
|
|
6454
|
-
* Generate connection URL for PostgreSQL.
|
|
6455
|
-
*/
|
|
6456
|
-
function generatePostgresUrl(creds) {
|
|
6457
|
-
const { username, password, host, port, database } = creds;
|
|
6458
|
-
return `postgresql://${username}:${encodeURIComponent(password)}@${host}:${port}/${database}`;
|
|
6459
|
-
}
|
|
6460
|
-
/**
|
|
6461
|
-
* Generate connection URL for Redis.
|
|
6462
|
-
*/
|
|
6463
|
-
function generateRedisUrl(creds) {
|
|
6464
|
-
const { password, host, port } = creds;
|
|
6465
|
-
return `redis://:${encodeURIComponent(password)}@${host}:${port}`;
|
|
6466
|
-
}
|
|
6467
|
-
/**
|
|
6468
|
-
* Generate connection URL for RabbitMQ.
|
|
6469
|
-
*/
|
|
6470
|
-
function generateRabbitmqUrl(creds) {
|
|
6471
|
-
const { username, password, host, port, vhost } = creds;
|
|
6472
|
-
const encodedVhost = encodeURIComponent(vhost ?? "/");
|
|
6473
|
-
return `amqp://${username}:${encodeURIComponent(password)}@${host}:${port}/${encodedVhost}`;
|
|
6474
|
-
}
|
|
6475
|
-
/**
|
|
6476
|
-
* Generate connection URLs from service credentials.
|
|
6477
|
-
*/
|
|
6478
|
-
function generateConnectionUrls(services) {
|
|
6479
|
-
const urls = {};
|
|
6480
|
-
if (services.postgres) urls.DATABASE_URL = generatePostgresUrl(services.postgres);
|
|
6481
|
-
if (services.redis) urls.REDIS_URL = generateRedisUrl(services.redis);
|
|
6482
|
-
if (services.rabbitmq) urls.RABBITMQ_URL = generateRabbitmqUrl(services.rabbitmq);
|
|
6483
|
-
return urls;
|
|
6484
|
-
}
|
|
6485
|
-
/**
|
|
6486
|
-
* Create a new StageSecrets object with generated credentials.
|
|
6487
|
-
*/
|
|
6488
|
-
function createStageSecrets(stage, services) {
|
|
6489
|
-
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
6490
|
-
const serviceCredentials = generateServicesCredentials(services);
|
|
6491
|
-
const urls = generateConnectionUrls(serviceCredentials);
|
|
6492
|
-
return {
|
|
6493
|
-
stage,
|
|
6494
|
-
createdAt: now,
|
|
6495
|
-
updatedAt: now,
|
|
6496
|
-
services: serviceCredentials,
|
|
6497
|
-
urls,
|
|
6498
|
-
custom: {}
|
|
6499
|
-
};
|
|
6500
|
-
}
|
|
6501
|
-
/**
|
|
6502
|
-
* Rotate password for a specific service.
|
|
6503
|
-
*/
|
|
6504
|
-
function rotateServicePassword(secrets, service) {
|
|
6505
|
-
const currentCreds = secrets.services[service];
|
|
6506
|
-
if (!currentCreds) throw new Error(`Service "${service}" not configured in secrets`);
|
|
6507
|
-
const newCreds = {
|
|
6508
|
-
...currentCreds,
|
|
6509
|
-
password: generateSecurePassword()
|
|
6510
|
-
};
|
|
6511
|
-
const newServices = {
|
|
6512
|
-
...secrets.services,
|
|
6513
|
-
[service]: newCreds
|
|
6514
|
-
};
|
|
6515
|
-
return {
|
|
6516
|
-
...secrets,
|
|
6517
|
-
updatedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
6518
|
-
services: newServices,
|
|
6519
|
-
urls: generateConnectionUrls(newServices)
|
|
6520
|
-
};
|
|
6521
|
-
}
|
|
6522
|
-
|
|
6523
6423
|
//#endregion
|
|
6524
6424
|
//#region src/init/versions.ts
|
|
6525
6425
|
const require$1 = createRequire(import.meta.url);
|
|
@@ -6545,14 +6445,14 @@ const GEEKMIDAS_VERSIONS = {
|
|
|
6545
6445
|
"@geekmidas/audit": "~1.0.0",
|
|
6546
6446
|
"@geekmidas/auth": "~1.0.0",
|
|
6547
6447
|
"@geekmidas/cache": "~1.0.0",
|
|
6548
|
-
"@geekmidas/client": "~
|
|
6448
|
+
"@geekmidas/client": "~3.0.0",
|
|
6549
6449
|
"@geekmidas/cloud": "~1.0.0",
|
|
6550
|
-
"@geekmidas/constructs": "~
|
|
6450
|
+
"@geekmidas/constructs": "~2.0.0",
|
|
6551
6451
|
"@geekmidas/db": "~1.0.0",
|
|
6552
6452
|
"@geekmidas/emailkit": "~1.0.0",
|
|
6553
6453
|
"@geekmidas/envkit": "~1.0.3",
|
|
6554
6454
|
"@geekmidas/errors": "~1.0.0",
|
|
6555
|
-
"@geekmidas/events": "~1.
|
|
6455
|
+
"@geekmidas/events": "~1.1.0",
|
|
6556
6456
|
"@geekmidas/logger": "~1.0.0",
|
|
6557
6457
|
"@geekmidas/rate-limit": "~1.0.0",
|
|
6558
6458
|
"@geekmidas/schema": "~1.0.0",
|
|
@@ -6560,7 +6460,7 @@ const GEEKMIDAS_VERSIONS = {
|
|
|
6560
6460
|
"@geekmidas/storage": "~1.0.0",
|
|
6561
6461
|
"@geekmidas/studio": "~1.0.0",
|
|
6562
6462
|
"@geekmidas/telescope": "~1.0.0",
|
|
6563
|
-
"@geekmidas/testkit": "~1.0.
|
|
6463
|
+
"@geekmidas/testkit": "~1.0.2",
|
|
6564
6464
|
"@geekmidas/cli": CLI_VERSION
|
|
6565
6465
|
};
|
|
6566
6466
|
|
|
@@ -10664,25 +10564,77 @@ function getRunCommand(pkgManager, script) {
|
|
|
10664
10564
|
default: return `npm run ${script}`;
|
|
10665
10565
|
}
|
|
10666
10566
|
}
|
|
10667
|
-
|
|
10668
|
-
|
|
10669
|
-
|
|
10567
|
+
const lockfileByPm = {
|
|
10568
|
+
pnpm: "pnpm-lock.yaml",
|
|
10569
|
+
yarn: "yarn.lock",
|
|
10570
|
+
npm: "package-lock.json",
|
|
10571
|
+
bun: "bun.lockb"
|
|
10572
|
+
};
|
|
10670
10573
|
/**
|
|
10671
|
-
*
|
|
10574
|
+
* Find the workspace/project root by walking up from cwd.
|
|
10575
|
+
* Checks for PM-specific workspace config, package.json#workspaces,
|
|
10576
|
+
* and lockfiles.
|
|
10672
10577
|
*/
|
|
10673
|
-
function
|
|
10674
|
-
|
|
10578
|
+
function findWorkspaceRoot(cwd, pm) {
|
|
10579
|
+
let dir = cwd;
|
|
10580
|
+
const root = parse(dir).root;
|
|
10581
|
+
const lockfile = lockfileByPm[pm];
|
|
10582
|
+
while (dir !== root) {
|
|
10583
|
+
if (pm === "pnpm" && existsSync(join(dir, "pnpm-workspace.yaml"))) return dir;
|
|
10584
|
+
const pkgJsonPath = join(dir, "package.json");
|
|
10585
|
+
if (existsSync(pkgJsonPath)) try {
|
|
10586
|
+
const pkg$1 = JSON.parse(readFileSync(pkgJsonPath, "utf-8"));
|
|
10587
|
+
if (pkg$1.workspaces) return dir;
|
|
10588
|
+
} catch {}
|
|
10589
|
+
if (existsSync(join(dir, lockfile))) return dir;
|
|
10590
|
+
dir = dirname(dir);
|
|
10591
|
+
}
|
|
10592
|
+
return cwd;
|
|
10675
10593
|
}
|
|
10676
10594
|
/**
|
|
10677
|
-
*
|
|
10678
|
-
*
|
|
10595
|
+
* Get workspace package glob patterns from pnpm-workspace.yaml
|
|
10596
|
+
* or package.json#workspaces.
|
|
10679
10597
|
*/
|
|
10680
|
-
function
|
|
10681
|
-
const
|
|
10682
|
-
|
|
10683
|
-
|
|
10598
|
+
function getWorkspaceGlobs(root) {
|
|
10599
|
+
const pnpmWorkspacePath = join(root, "pnpm-workspace.yaml");
|
|
10600
|
+
if (existsSync(pnpmWorkspacePath)) {
|
|
10601
|
+
const content = readFileSync(pnpmWorkspacePath, "utf-8");
|
|
10602
|
+
const parsed = parse$1(content);
|
|
10603
|
+
return parsed?.packages ?? [];
|
|
10604
|
+
}
|
|
10605
|
+
const rootPkgJsonPath = join(root, "package.json");
|
|
10606
|
+
if (existsSync(rootPkgJsonPath)) {
|
|
10607
|
+
const pkg$1 = JSON.parse(readFileSync(rootPkgJsonPath, "utf-8"));
|
|
10608
|
+
if (Array.isArray(pkg$1.workspaces)) return pkg$1.workspaces;
|
|
10609
|
+
if (pkg$1.workspaces?.packages) return pkg$1.workspaces.packages;
|
|
10610
|
+
}
|
|
10611
|
+
return [];
|
|
10684
10612
|
}
|
|
10685
10613
|
/**
|
|
10614
|
+
* Find all package.json files across a workspace.
|
|
10615
|
+
* Returns the root package.json plus all workspace member package.json paths.
|
|
10616
|
+
*/
|
|
10617
|
+
function findWorkspacePackages(cwd, pm) {
|
|
10618
|
+
const workspaceRoot = findWorkspaceRoot(cwd, pm);
|
|
10619
|
+
const results = [];
|
|
10620
|
+
const rootPkgJson = join(workspaceRoot, "package.json");
|
|
10621
|
+
if (existsSync(rootPkgJson)) results.push(rootPkgJson);
|
|
10622
|
+
const globs = getWorkspaceGlobs(workspaceRoot);
|
|
10623
|
+
for (const glob of globs) {
|
|
10624
|
+
const pattern = `${glob}/package.json`;
|
|
10625
|
+
const matches = fg.sync(pattern, {
|
|
10626
|
+
cwd: workspaceRoot,
|
|
10627
|
+
absolute: true,
|
|
10628
|
+
ignore: ["**/node_modules/**"]
|
|
10629
|
+
});
|
|
10630
|
+
results.push(...matches);
|
|
10631
|
+
}
|
|
10632
|
+
return [...new Set(results)];
|
|
10633
|
+
}
|
|
10634
|
+
|
|
10635
|
+
//#endregion
|
|
10636
|
+
//#region src/init/index.ts
|
|
10637
|
+
/**
|
|
10686
10638
|
* Main init command - scaffolds a new project
|
|
10687
10639
|
*/
|
|
10688
10640
|
async function initCommand(projectName, options = {}) {
|
|
@@ -10984,7 +10936,7 @@ function printNextSteps(projectName, options, pkgManager) {
|
|
|
10984
10936
|
|
|
10985
10937
|
//#endregion
|
|
10986
10938
|
//#region src/secrets/index.ts
|
|
10987
|
-
const logger = console;
|
|
10939
|
+
const logger$2 = console;
|
|
10988
10940
|
/**
|
|
10989
10941
|
* Extract service names from compose config.
|
|
10990
10942
|
*/
|
|
@@ -11000,23 +10952,33 @@ function getServicesFromConfig(services) {
|
|
|
11000
10952
|
async function secretsInitCommand(options) {
|
|
11001
10953
|
const { stage, force } = options;
|
|
11002
10954
|
if (!force && secretsExist(stage)) {
|
|
11003
|
-
logger.error(`Secrets already exist for stage "${stage}". Use --force to overwrite.`);
|
|
10955
|
+
logger$2.error(`Secrets already exist for stage "${stage}". Use --force to overwrite.`);
|
|
11004
10956
|
process.exit(1);
|
|
11005
10957
|
}
|
|
11006
10958
|
const config$1 = await loadConfig();
|
|
11007
10959
|
const services = getServicesFromConfig(config$1.docker?.compose?.services);
|
|
11008
|
-
if (services.length === 0) logger.warn("No services configured in docker.compose.services. Creating secrets with empty services.");
|
|
10960
|
+
if (services.length === 0) logger$2.warn("No services configured in docker.compose.services. Creating secrets with empty services.");
|
|
11009
10961
|
const secrets = createStageSecrets(stage, services);
|
|
10962
|
+
try {
|
|
10963
|
+
const loaded = await loadWorkspaceConfig();
|
|
10964
|
+
const isMultiApp = Object.keys(loaded.workspace.apps).length > 1;
|
|
10965
|
+
if (isMultiApp) {
|
|
10966
|
+
const customSecrets = generateFullstackCustomSecrets(loaded.workspace);
|
|
10967
|
+
secrets.custom = customSecrets;
|
|
10968
|
+
logger$2.log(" Detected workspace mode — generating per-app secrets");
|
|
10969
|
+
}
|
|
10970
|
+
} catch {}
|
|
11010
10971
|
await writeStageSecrets(secrets);
|
|
11011
|
-
logger.log(`\n✓ Secrets initialized for stage "${stage}"`);
|
|
11012
|
-
logger.log(` Location: .gkm/secrets/${stage}.json`);
|
|
11013
|
-
logger.log("\n Generated credentials for:");
|
|
11014
|
-
for (const service of services) logger.log(` - ${service}`);
|
|
11015
|
-
if (secrets.urls.DATABASE_URL) logger.log(`\n DATABASE_URL: ${maskUrl(secrets.urls.DATABASE_URL)}`);
|
|
11016
|
-
if (secrets.urls.REDIS_URL) logger.log(` REDIS_URL: ${maskUrl(secrets.urls.REDIS_URL)}`);
|
|
11017
|
-
if (secrets.urls.RABBITMQ_URL) logger.log(` RABBITMQ_URL: ${maskUrl(secrets.urls.RABBITMQ_URL)}`);
|
|
11018
|
-
logger.log(`\n
|
|
11019
|
-
logger.log(
|
|
10972
|
+
logger$2.log(`\n✓ Secrets initialized for stage "${stage}"`);
|
|
10973
|
+
logger$2.log(` Location: .gkm/secrets/${stage}.json`);
|
|
10974
|
+
logger$2.log("\n Generated credentials for:");
|
|
10975
|
+
for (const service of services) logger$2.log(` - ${service}`);
|
|
10976
|
+
if (secrets.urls.DATABASE_URL) logger$2.log(`\n DATABASE_URL: ${maskUrl(secrets.urls.DATABASE_URL)}`);
|
|
10977
|
+
if (secrets.urls.REDIS_URL) logger$2.log(` REDIS_URL: ${maskUrl(secrets.urls.REDIS_URL)}`);
|
|
10978
|
+
if (secrets.urls.RABBITMQ_URL) logger$2.log(` RABBITMQ_URL: ${maskUrl(secrets.urls.RABBITMQ_URL)}`);
|
|
10979
|
+
if (Object.keys(secrets.custom).length > 0) logger$2.log(`\n Custom secrets: ${Object.keys(secrets.custom).length}`);
|
|
10980
|
+
logger$2.log(`\n Use "gkm secrets:show --stage ${stage}" to view secrets`);
|
|
10981
|
+
logger$2.log(" Use \"gkm secrets:set <KEY> <VALUE> --stage " + stage + "\" to add custom secrets");
|
|
11020
10982
|
}
|
|
11021
10983
|
/**
|
|
11022
10984
|
* Read all data from stdin.
|
|
@@ -11035,21 +10997,21 @@ async function secretsSetCommand(key, value, options) {
|
|
|
11035
10997
|
let secretValue = value;
|
|
11036
10998
|
if (!secretValue) {
|
|
11037
10999
|
if (process.stdin.isTTY) {
|
|
11038
|
-
logger.error("No value provided. Use: gkm secrets:set KEY VALUE --stage <stage>");
|
|
11039
|
-
logger.error("Or pipe from stdin: echo \"value\" | gkm secrets:set KEY --stage <stage>");
|
|
11000
|
+
logger$2.error("No value provided. Use: gkm secrets:set KEY VALUE --stage <stage>");
|
|
11001
|
+
logger$2.error("Or pipe from stdin: echo \"value\" | gkm secrets:set KEY --stage <stage>");
|
|
11040
11002
|
process.exit(1);
|
|
11041
11003
|
}
|
|
11042
11004
|
secretValue = await readStdin();
|
|
11043
11005
|
if (!secretValue) {
|
|
11044
|
-
logger.error("No value received from stdin");
|
|
11006
|
+
logger$2.error("No value received from stdin");
|
|
11045
11007
|
process.exit(1);
|
|
11046
11008
|
}
|
|
11047
11009
|
}
|
|
11048
11010
|
try {
|
|
11049
11011
|
await setCustomSecret(stage, key, secretValue);
|
|
11050
|
-
logger.log(`\n✓ Secret "${key}" set for stage "${stage}"`);
|
|
11012
|
+
logger$2.log(`\n✓ Secret "${key}" set for stage "${stage}"`);
|
|
11051
11013
|
} catch (error) {
|
|
11052
|
-
logger.error(error instanceof Error ? error.message : "Failed to set secret");
|
|
11014
|
+
logger$2.error(error instanceof Error ? error.message : "Failed to set secret");
|
|
11053
11015
|
process.exit(1);
|
|
11054
11016
|
}
|
|
11055
11017
|
}
|
|
@@ -11060,32 +11022,32 @@ async function secretsShowCommand(options) {
|
|
|
11060
11022
|
const { stage, reveal } = options;
|
|
11061
11023
|
const secrets = await readStageSecrets(stage);
|
|
11062
11024
|
if (!secrets) {
|
|
11063
|
-
logger.error(`No secrets found for stage "${stage}". Run "gkm secrets:init --stage ${stage}" first.`);
|
|
11025
|
+
logger$2.error(`No secrets found for stage "${stage}". Run "gkm secrets:init --stage ${stage}" first.`);
|
|
11064
11026
|
process.exit(1);
|
|
11065
11027
|
}
|
|
11066
|
-
logger.log(`\nSecrets for stage "${stage}":`);
|
|
11067
|
-
logger.log(` Created: ${secrets.createdAt}`);
|
|
11068
|
-
logger.log(` Updated: ${secrets.updatedAt}`);
|
|
11069
|
-
logger.log("\nService Credentials:");
|
|
11028
|
+
logger$2.log(`\nSecrets for stage "${stage}":`);
|
|
11029
|
+
logger$2.log(` Created: ${secrets.createdAt}`);
|
|
11030
|
+
logger$2.log(` Updated: ${secrets.updatedAt}`);
|
|
11031
|
+
logger$2.log("\nService Credentials:");
|
|
11070
11032
|
for (const [service, creds] of Object.entries(secrets.services)) if (creds) {
|
|
11071
|
-
logger.log(`\n ${service}:`);
|
|
11072
|
-
logger.log(` host: ${creds.host}`);
|
|
11073
|
-
logger.log(` port: ${creds.port}`);
|
|
11074
|
-
logger.log(` username: ${creds.username}`);
|
|
11075
|
-
logger.log(` password: ${reveal ? creds.password : maskPassword(creds.password)}`);
|
|
11076
|
-
if (creds.database) logger.log(` database: ${creds.database}`);
|
|
11077
|
-
if (creds.vhost) logger.log(` vhost: ${creds.vhost}`);
|
|
11078
|
-
}
|
|
11079
|
-
logger.log("\nConnection URLs:");
|
|
11080
|
-
if (secrets.urls.DATABASE_URL) logger.log(` DATABASE_URL: ${reveal ? secrets.urls.DATABASE_URL : maskUrl(secrets.urls.DATABASE_URL)}`);
|
|
11081
|
-
if (secrets.urls.REDIS_URL) logger.log(` REDIS_URL: ${reveal ? secrets.urls.REDIS_URL : maskUrl(secrets.urls.REDIS_URL)}`);
|
|
11082
|
-
if (secrets.urls.RABBITMQ_URL) logger.log(` RABBITMQ_URL: ${reveal ? secrets.urls.RABBITMQ_URL : maskUrl(secrets.urls.RABBITMQ_URL)}`);
|
|
11033
|
+
logger$2.log(`\n ${service}:`);
|
|
11034
|
+
logger$2.log(` host: ${creds.host}`);
|
|
11035
|
+
logger$2.log(` port: ${creds.port}`);
|
|
11036
|
+
logger$2.log(` username: ${creds.username}`);
|
|
11037
|
+
logger$2.log(` password: ${reveal ? creds.password : maskPassword(creds.password)}`);
|
|
11038
|
+
if (creds.database) logger$2.log(` database: ${creds.database}`);
|
|
11039
|
+
if (creds.vhost) logger$2.log(` vhost: ${creds.vhost}`);
|
|
11040
|
+
}
|
|
11041
|
+
logger$2.log("\nConnection URLs:");
|
|
11042
|
+
if (secrets.urls.DATABASE_URL) logger$2.log(` DATABASE_URL: ${reveal ? secrets.urls.DATABASE_URL : maskUrl(secrets.urls.DATABASE_URL)}`);
|
|
11043
|
+
if (secrets.urls.REDIS_URL) logger$2.log(` REDIS_URL: ${reveal ? secrets.urls.REDIS_URL : maskUrl(secrets.urls.REDIS_URL)}`);
|
|
11044
|
+
if (secrets.urls.RABBITMQ_URL) logger$2.log(` RABBITMQ_URL: ${reveal ? secrets.urls.RABBITMQ_URL : maskUrl(secrets.urls.RABBITMQ_URL)}`);
|
|
11083
11045
|
const customKeys = Object.keys(secrets.custom);
|
|
11084
11046
|
if (customKeys.length > 0) {
|
|
11085
|
-
logger.log("\nCustom Secrets:");
|
|
11086
|
-
for (const [key, value] of Object.entries(secrets.custom)) logger.log(` ${key}: ${reveal ? value : maskPassword(value)}`);
|
|
11047
|
+
logger$2.log("\nCustom Secrets:");
|
|
11048
|
+
for (const [key, value] of Object.entries(secrets.custom)) logger$2.log(` ${key}: ${reveal ? value : maskPassword(value)}`);
|
|
11087
11049
|
}
|
|
11088
|
-
if (!reveal) logger.log("\nUse --reveal to show actual values");
|
|
11050
|
+
if (!reveal) logger$2.log("\nUse --reveal to show actual values");
|
|
11089
11051
|
}
|
|
11090
11052
|
/**
|
|
11091
11053
|
* Rotate passwords for services.
|
|
@@ -11094,25 +11056,25 @@ async function secretsRotateCommand(options) {
|
|
|
11094
11056
|
const { stage, service } = options;
|
|
11095
11057
|
const secrets = await readStageSecrets(stage);
|
|
11096
11058
|
if (!secrets) {
|
|
11097
|
-
logger.error(`No secrets found for stage "${stage}". Run "gkm secrets:init --stage ${stage}" first.`);
|
|
11059
|
+
logger$2.error(`No secrets found for stage "${stage}". Run "gkm secrets:init --stage ${stage}" first.`);
|
|
11098
11060
|
process.exit(1);
|
|
11099
11061
|
}
|
|
11100
11062
|
if (service) {
|
|
11101
11063
|
if (!secrets.services[service]) {
|
|
11102
|
-
logger.error(`Service "${service}" not configured in stage "${stage}"`);
|
|
11064
|
+
logger$2.error(`Service "${service}" not configured in stage "${stage}"`);
|
|
11103
11065
|
process.exit(1);
|
|
11104
11066
|
}
|
|
11105
11067
|
const updated = rotateServicePassword(secrets, service);
|
|
11106
11068
|
await writeStageSecrets(updated);
|
|
11107
|
-
logger.log(`\n✓ Password rotated for ${service} in stage "${stage}"`);
|
|
11069
|
+
logger$2.log(`\n✓ Password rotated for ${service} in stage "${stage}"`);
|
|
11108
11070
|
} else {
|
|
11109
11071
|
let updated = secrets;
|
|
11110
11072
|
const services = Object.keys(secrets.services);
|
|
11111
11073
|
for (const svc of services) updated = rotateServicePassword(updated, svc);
|
|
11112
11074
|
await writeStageSecrets(updated);
|
|
11113
|
-
logger.log(`\n✓ Passwords rotated for all services in stage "${stage}": ${services.join(", ")}`);
|
|
11075
|
+
logger$2.log(`\n✓ Passwords rotated for all services in stage "${stage}": ${services.join(", ")}`);
|
|
11114
11076
|
}
|
|
11115
|
-
logger.log(`\nUse "gkm secrets:show --stage ${stage}" to view new values`);
|
|
11077
|
+
logger$2.log(`\nUse "gkm secrets:show --stage ${stage}" to view new values`);
|
|
11116
11078
|
}
|
|
11117
11079
|
/**
|
|
11118
11080
|
* Import secrets from a JSON file.
|
|
@@ -11120,7 +11082,7 @@ async function secretsRotateCommand(options) {
|
|
|
11120
11082
|
async function secretsImportCommand(file, options) {
|
|
11121
11083
|
const { stage, merge = true } = options;
|
|
11122
11084
|
if (!existsSync(file)) {
|
|
11123
|
-
logger.error(`File not found: ${file}`);
|
|
11085
|
+
logger$2.error(`File not found: ${file}`);
|
|
11124
11086
|
process.exit(1);
|
|
11125
11087
|
}
|
|
11126
11088
|
let importedSecrets;
|
|
@@ -11130,12 +11092,12 @@ async function secretsImportCommand(file, options) {
|
|
|
11130
11092
|
if (typeof importedSecrets !== "object" || importedSecrets === null) throw new Error("JSON must be an object");
|
|
11131
11093
|
for (const [key, value] of Object.entries(importedSecrets)) if (typeof value !== "string") throw new Error(`Value for "${key}" must be a string, got ${typeof value}`);
|
|
11132
11094
|
} catch (error) {
|
|
11133
|
-
logger.error(`Failed to parse JSON file: ${error instanceof Error ? error.message : "Invalid JSON"}`);
|
|
11095
|
+
logger$2.error(`Failed to parse JSON file: ${error instanceof Error ? error.message : "Invalid JSON"}`);
|
|
11134
11096
|
process.exit(1);
|
|
11135
11097
|
}
|
|
11136
11098
|
const secrets = await readStageSecrets(stage);
|
|
11137
11099
|
if (!secrets) {
|
|
11138
|
-
logger.error(`No secrets found for stage "${stage}". Run "gkm secrets:init --stage ${stage}" first.`);
|
|
11100
|
+
logger$2.error(`No secrets found for stage "${stage}". Run "gkm secrets:init --stage ${stage}" first.`);
|
|
11139
11101
|
process.exit(1);
|
|
11140
11102
|
}
|
|
11141
11103
|
const updatedCustom = merge ? {
|
|
@@ -11150,10 +11112,10 @@ async function secretsImportCommand(file, options) {
|
|
|
11150
11112
|
await writeStageSecrets(updated);
|
|
11151
11113
|
const importedCount = Object.keys(importedSecrets).length;
|
|
11152
11114
|
const totalCount = Object.keys(updatedCustom).length;
|
|
11153
|
-
logger.log(`\n✓ Imported ${importedCount} secrets for stage "${stage}"`);
|
|
11154
|
-
if (merge && totalCount > importedCount) logger.log(` Total custom secrets: ${totalCount}`);
|
|
11155
|
-
logger.log("\n Imported keys:");
|
|
11156
|
-
for (const key of Object.keys(importedSecrets)) logger.log(` - ${key}`);
|
|
11115
|
+
logger$2.log(`\n✓ Imported ${importedCount} secrets for stage "${stage}"`);
|
|
11116
|
+
if (merge && totalCount > importedCount) logger$2.log(` Total custom secrets: ${totalCount}`);
|
|
11117
|
+
logger$2.log("\n Imported keys:");
|
|
11118
|
+
for (const key of Object.keys(importedSecrets)) logger$2.log(` - ${key}`);
|
|
11157
11119
|
}
|
|
11158
11120
|
/**
|
|
11159
11121
|
* Mask password in a URL for display.
|
|
@@ -11168,6 +11130,139 @@ function maskUrl(url) {
|
|
|
11168
11130
|
}
|
|
11169
11131
|
}
|
|
11170
11132
|
|
|
11133
|
+
//#endregion
|
|
11134
|
+
//#region src/setup/index.ts
|
|
11135
|
+
const logger$1 = console;
|
|
11136
|
+
/**
|
|
11137
|
+
* Setup development environment.
|
|
11138
|
+
*
|
|
11139
|
+
* Orchestrates:
|
|
11140
|
+
* 1. Load workspace config
|
|
11141
|
+
* 2. Resolve secrets (local → SSM → generate fresh)
|
|
11142
|
+
* 3. Write docker/.env from secrets
|
|
11143
|
+
* 4. Start Docker services
|
|
11144
|
+
*/
|
|
11145
|
+
async function setupCommand(options = {}) {
|
|
11146
|
+
const stage = options.stage ?? "development";
|
|
11147
|
+
logger$1.log("\n🔧 Setting up development environment...\n");
|
|
11148
|
+
let loadedConfig;
|
|
11149
|
+
try {
|
|
11150
|
+
loadedConfig = await loadWorkspaceConfig();
|
|
11151
|
+
} catch {
|
|
11152
|
+
logger$1.error("❌ No gkm.config.ts found. Run this command from a workspace root.");
|
|
11153
|
+
process.exit(1);
|
|
11154
|
+
}
|
|
11155
|
+
const { workspace } = loadedConfig;
|
|
11156
|
+
const isMultiApp = Object.keys(workspace.apps).length > 1;
|
|
11157
|
+
logger$1.log(`📦 Workspace: ${workspace.name}`);
|
|
11158
|
+
logger$1.log(`📱 Apps: ${Object.keys(workspace.apps).join(", ")}`);
|
|
11159
|
+
logger$1.log(`🔑 Stage: ${stage}\n`);
|
|
11160
|
+
const secrets = await resolveSecrets(stage, workspace, options);
|
|
11161
|
+
if (!secrets) {
|
|
11162
|
+
logger$1.error("❌ Failed to resolve secrets. Exiting.");
|
|
11163
|
+
process.exit(1);
|
|
11164
|
+
}
|
|
11165
|
+
if (isMultiApp && workspace.services.db) {
|
|
11166
|
+
await writeDockerEnvFromSecrets(secrets, workspace.root);
|
|
11167
|
+
logger$1.log("📄 Generated docker/.env with database passwords");
|
|
11168
|
+
}
|
|
11169
|
+
if (!options.skipDocker) {
|
|
11170
|
+
const composeFile = join(workspace.root, "docker-compose.yml");
|
|
11171
|
+
if (existsSync(composeFile)) {
|
|
11172
|
+
logger$1.log("");
|
|
11173
|
+
await startWorkspaceServices(workspace);
|
|
11174
|
+
} else logger$1.log("⚠️ No docker-compose.yml found. Skipping Docker services.");
|
|
11175
|
+
}
|
|
11176
|
+
printSummary(workspace, stage);
|
|
11177
|
+
}
|
|
11178
|
+
/**
|
|
11179
|
+
* Resolve secrets with priority:
|
|
11180
|
+
* 1. Local secrets exist → use them (preserves manual additions)
|
|
11181
|
+
* 2. SSM configured and has secrets → pull and use
|
|
11182
|
+
* 3. Neither → generate fresh secrets
|
|
11183
|
+
*
|
|
11184
|
+
* --force skips checks 1 and 2 and always regenerates.
|
|
11185
|
+
*/
|
|
11186
|
+
async function resolveSecrets(stage, workspace, options) {
|
|
11187
|
+
if (options.force) {
|
|
11188
|
+
logger$1.log("🔐 Generating fresh secrets (--force)...");
|
|
11189
|
+
return generateFreshSecrets(stage, workspace, options);
|
|
11190
|
+
}
|
|
11191
|
+
if (secretsExist(stage, workspace.root)) {
|
|
11192
|
+
logger$1.log("🔐 Using existing local secrets");
|
|
11193
|
+
const secrets = await readStageSecrets(stage, workspace.root);
|
|
11194
|
+
if (secrets) return secrets;
|
|
11195
|
+
}
|
|
11196
|
+
if (isSSMConfigured(workspace)) {
|
|
11197
|
+
logger$1.log("☁️ Checking for remote secrets in SSM...");
|
|
11198
|
+
try {
|
|
11199
|
+
const remoteSecrets = await pullSecrets(stage, workspace);
|
|
11200
|
+
if (remoteSecrets) {
|
|
11201
|
+
logger$1.log("✅ Pulled secrets from SSM");
|
|
11202
|
+
await writeStageSecrets(remoteSecrets, workspace.root);
|
|
11203
|
+
return remoteSecrets;
|
|
11204
|
+
}
|
|
11205
|
+
logger$1.log(" No remote secrets found");
|
|
11206
|
+
} catch (error) {
|
|
11207
|
+
logger$1.warn(`⚠️ Failed to pull from SSM: ${error.message}`);
|
|
11208
|
+
}
|
|
11209
|
+
}
|
|
11210
|
+
logger$1.log("🔐 Generating fresh development secrets...");
|
|
11211
|
+
return generateFreshSecrets(stage, workspace, options);
|
|
11212
|
+
}
|
|
11213
|
+
/**
|
|
11214
|
+
* Generate fresh secrets for the workspace.
|
|
11215
|
+
*/
|
|
11216
|
+
async function generateFreshSecrets(stage, workspace, options) {
|
|
11217
|
+
const serviceNames = [];
|
|
11218
|
+
if (workspace.services.db) serviceNames.push("postgres");
|
|
11219
|
+
if (workspace.services.cache) serviceNames.push("redis");
|
|
11220
|
+
const secrets = createStageSecrets(stage, serviceNames);
|
|
11221
|
+
const isMultiApp = Object.keys(workspace.apps).length > 1;
|
|
11222
|
+
if (isMultiApp) {
|
|
11223
|
+
const customSecrets = generateFullstackCustomSecrets(workspace);
|
|
11224
|
+
secrets.custom = customSecrets;
|
|
11225
|
+
} else secrets.custom = {
|
|
11226
|
+
NODE_ENV: "development",
|
|
11227
|
+
PORT: "3000",
|
|
11228
|
+
LOG_LEVEL: "debug",
|
|
11229
|
+
JWT_SECRET: `dev-${Date.now()}-${Math.random().toString(36).slice(2)}`
|
|
11230
|
+
};
|
|
11231
|
+
await writeStageSecrets(secrets, workspace.root);
|
|
11232
|
+
logger$1.log(` Secrets written to .gkm/secrets/${stage}.json`);
|
|
11233
|
+
if (isSSMConfigured(workspace) && !options.yes) {
|
|
11234
|
+
const { shouldPush } = await prompts({
|
|
11235
|
+
type: "confirm",
|
|
11236
|
+
name: "shouldPush",
|
|
11237
|
+
message: "Push secrets to SSM for team sharing?",
|
|
11238
|
+
initial: true
|
|
11239
|
+
});
|
|
11240
|
+
if (shouldPush) try {
|
|
11241
|
+
await pushSecrets(stage, workspace);
|
|
11242
|
+
logger$1.log("☁️ Secrets pushed to SSM");
|
|
11243
|
+
} catch (error) {
|
|
11244
|
+
logger$1.warn(`⚠️ Failed to push to SSM: ${error.message}`);
|
|
11245
|
+
}
|
|
11246
|
+
}
|
|
11247
|
+
return secrets;
|
|
11248
|
+
}
|
|
11249
|
+
/**
|
|
11250
|
+
* Print setup summary with next steps.
|
|
11251
|
+
*/
|
|
11252
|
+
function printSummary(workspace, stage) {
|
|
11253
|
+
logger$1.log(`\n${"─".repeat(50)}`);
|
|
11254
|
+
logger$1.log("\n✅ Development environment ready!\n");
|
|
11255
|
+
logger$1.log("📋 Apps:");
|
|
11256
|
+
for (const [name$1, app] of Object.entries(workspace.apps)) {
|
|
11257
|
+
const icon = app.type === "frontend" ? "🌐" : "🔧";
|
|
11258
|
+
logger$1.log(` ${icon} ${name$1} → http://localhost:${app.port}`);
|
|
11259
|
+
}
|
|
11260
|
+
logger$1.log("\n🚀 Next steps:");
|
|
11261
|
+
logger$1.log(" gkm dev # Start all apps");
|
|
11262
|
+
logger$1.log(` gkm secrets:show --stage ${stage} # View secrets`);
|
|
11263
|
+
logger$1.log("");
|
|
11264
|
+
}
|
|
11265
|
+
|
|
11171
11266
|
//#endregion
|
|
11172
11267
|
//#region src/test/index.ts
|
|
11173
11268
|
/**
|
|
@@ -11304,6 +11399,146 @@ async function ensureTestDatabase(env) {
|
|
|
11304
11399
|
}
|
|
11305
11400
|
}
|
|
11306
11401
|
|
|
11402
|
+
//#endregion
|
|
11403
|
+
//#region src/upgrade/index.ts
|
|
11404
|
+
const logger = console;
|
|
11405
|
+
async function upgradeCommand(options = {}) {
|
|
11406
|
+
const cwd = process.cwd();
|
|
11407
|
+
logger.log("\n📦 Scanning workspace for @geekmidas packages...\n");
|
|
11408
|
+
const pm = detectPackageManager(cwd);
|
|
11409
|
+
logger.log(` Package manager: ${pm}`);
|
|
11410
|
+
const packageJsonPaths = findWorkspacePackages(cwd, pm);
|
|
11411
|
+
logger.log(` Found ${packageJsonPaths.length} package(s) in workspace\n`);
|
|
11412
|
+
const dependencies$1 = scanForGeekmidasDeps(packageJsonPaths);
|
|
11413
|
+
if (dependencies$1.length === 0) {
|
|
11414
|
+
logger.log(" No @geekmidas packages found.\n");
|
|
11415
|
+
return;
|
|
11416
|
+
}
|
|
11417
|
+
const uniquePackages = [...new Set(dependencies$1.map((d) => d.packageName))];
|
|
11418
|
+
logger.log(` Checking ${uniquePackages.length} unique @geekmidas package(s) on npm...\n`);
|
|
11419
|
+
const latestVersions = await fetchLatestVersions(uniquePackages);
|
|
11420
|
+
const upgradeInfos = dependencies$1.map((dep) => resolveUpgradeInfo(dep, latestVersions));
|
|
11421
|
+
printUpgradeTable(upgradeInfos);
|
|
11422
|
+
const upgradable = upgradeInfos.filter((info) => info.needsUpgrade && !info.isWorkspaceRef);
|
|
11423
|
+
if (upgradable.length === 0) {
|
|
11424
|
+
logger.log("\n All @geekmidas packages are up to date!\n");
|
|
11425
|
+
return;
|
|
11426
|
+
}
|
|
11427
|
+
logger.log(`\n ${upgradable.length} package(s) can be upgraded.\n`);
|
|
11428
|
+
if (options.dryRun) {
|
|
11429
|
+
logger.log(" --dry-run: No changes made.\n");
|
|
11430
|
+
printUpgradeCommands(upgradable, pm);
|
|
11431
|
+
return;
|
|
11432
|
+
}
|
|
11433
|
+
executeUpgrade(upgradable, pm, cwd);
|
|
11434
|
+
logger.log("\n ✅ Upgrade complete! Run your tests to verify.\n");
|
|
11435
|
+
}
|
|
11436
|
+
function scanForGeekmidasDeps(packageJsonPaths) {
|
|
11437
|
+
const results = [];
|
|
11438
|
+
const depTypes = [
|
|
11439
|
+
"dependencies",
|
|
11440
|
+
"devDependencies",
|
|
11441
|
+
"peerDependencies"
|
|
11442
|
+
];
|
|
11443
|
+
for (const pkgJsonPath of packageJsonPaths) {
|
|
11444
|
+
const pkg$1 = JSON.parse(readFileSync(pkgJsonPath, "utf-8"));
|
|
11445
|
+
const workspaceName = pkg$1.name ?? pkgJsonPath;
|
|
11446
|
+
for (const depType of depTypes) {
|
|
11447
|
+
const deps = pkg$1[depType];
|
|
11448
|
+
if (!deps) continue;
|
|
11449
|
+
for (const [name$1, version$1] of Object.entries(deps)) {
|
|
11450
|
+
if (!name$1.startsWith("@geekmidas/")) continue;
|
|
11451
|
+
results.push({
|
|
11452
|
+
packageName: name$1,
|
|
11453
|
+
currentVersion: version$1,
|
|
11454
|
+
depType,
|
|
11455
|
+
packageJsonPath: pkgJsonPath,
|
|
11456
|
+
workspaceName
|
|
11457
|
+
});
|
|
11458
|
+
}
|
|
11459
|
+
}
|
|
11460
|
+
}
|
|
11461
|
+
return results;
|
|
11462
|
+
}
|
|
11463
|
+
async function fetchLatestVersions(packageNames) {
|
|
11464
|
+
const versions = /* @__PURE__ */ new Map();
|
|
11465
|
+
const results = await Promise.allSettled(packageNames.map(async (name$1) => {
|
|
11466
|
+
const res = await fetch(`https://registry.npmjs.org/${name$1}/latest`);
|
|
11467
|
+
if (!res.ok) throw new Error(`HTTP ${res.status}`);
|
|
11468
|
+
const data = await res.json();
|
|
11469
|
+
return {
|
|
11470
|
+
name: name$1,
|
|
11471
|
+
version: data.version
|
|
11472
|
+
};
|
|
11473
|
+
}));
|
|
11474
|
+
for (const result of results) if (result.status === "fulfilled") versions.set(result.value.name, result.value.version);
|
|
11475
|
+
return versions;
|
|
11476
|
+
}
|
|
11477
|
+
function resolveUpgradeInfo(dep, latestVersions) {
|
|
11478
|
+
const isWorkspaceRef = dep.currentVersion.startsWith("workspace:");
|
|
11479
|
+
const latestVersion = latestVersions.get(dep.packageName) ?? "unknown";
|
|
11480
|
+
const currentBare = dep.currentVersion.replace(/^[\^~>=<]*/g, "");
|
|
11481
|
+
const needsUpgrade = !isWorkspaceRef && latestVersion !== "unknown" && currentBare !== latestVersion;
|
|
11482
|
+
return {
|
|
11483
|
+
...dep,
|
|
11484
|
+
latestVersion,
|
|
11485
|
+
isWorkspaceRef,
|
|
11486
|
+
needsUpgrade
|
|
11487
|
+
};
|
|
11488
|
+
}
|
|
11489
|
+
function printUpgradeTable(infos) {
|
|
11490
|
+
const byPackage = /* @__PURE__ */ new Map();
|
|
11491
|
+
for (const info of infos) if (!byPackage.has(info.packageName)) byPackage.set(info.packageName, info);
|
|
11492
|
+
const nameWidth = 33;
|
|
11493
|
+
const verWidth = 14;
|
|
11494
|
+
const statusWidth = 14;
|
|
11495
|
+
const hr = ` ${"─".repeat(nameWidth + verWidth * 2 + statusWidth + 5)}`;
|
|
11496
|
+
logger.log(hr);
|
|
11497
|
+
logger.log(` ${"Package".padEnd(nameWidth)} ${"Current".padEnd(verWidth)} ${"Latest".padEnd(verWidth)} ${"Status".padEnd(statusWidth)}`);
|
|
11498
|
+
logger.log(hr);
|
|
11499
|
+
for (const [, info] of byPackage) {
|
|
11500
|
+
const name$1 = info.packageName.padEnd(nameWidth);
|
|
11501
|
+
const current = info.currentVersion.padEnd(verWidth);
|
|
11502
|
+
const latest = info.latestVersion.padEnd(verWidth);
|
|
11503
|
+
let status;
|
|
11504
|
+
if (info.isWorkspaceRef) status = "workspace";
|
|
11505
|
+
else if (info.needsUpgrade) status = "⬆ upgrade";
|
|
11506
|
+
else status = "✓ up-to-date";
|
|
11507
|
+
logger.log(` ${name$1} ${current} ${latest} ${status}`);
|
|
11508
|
+
}
|
|
11509
|
+
logger.log(hr);
|
|
11510
|
+
}
|
|
11511
|
+
function printUpgradeCommands(upgradable, pm) {
|
|
11512
|
+
logger.log(" Commands that would be run:\n");
|
|
11513
|
+
const uniquePackages = [...new Set(upgradable.map((i) => i.packageName))];
|
|
11514
|
+
const cmd = getWorkspaceUpgradeCommand(pm, uniquePackages);
|
|
11515
|
+
logger.log(` ${cmd}\n`);
|
|
11516
|
+
}
|
|
11517
|
+
function getWorkspaceUpgradeCommand(pm, packages) {
|
|
11518
|
+
const pkgList = packages.join(" ");
|
|
11519
|
+
switch (pm) {
|
|
11520
|
+
case "pnpm": return `pnpm update -r ${pkgList} --latest`;
|
|
11521
|
+
case "yarn": return `yarn upgrade ${pkgList}`;
|
|
11522
|
+
case "bun": return `bun update ${pkgList}`;
|
|
11523
|
+
case "npm": return `npm update ${pkgList} --workspaces`;
|
|
11524
|
+
default: return `npm update ${pkgList}`;
|
|
11525
|
+
}
|
|
11526
|
+
}
|
|
11527
|
+
function executeUpgrade(upgradable, pm, cwd) {
|
|
11528
|
+
const uniquePackages = [...new Set(upgradable.map((i) => i.packageName))];
|
|
11529
|
+
const cmd = getWorkspaceUpgradeCommand(pm, uniquePackages);
|
|
11530
|
+
logger.log(` Running: ${cmd}\n`);
|
|
11531
|
+
try {
|
|
11532
|
+
execSync(cmd, {
|
|
11533
|
+
cwd,
|
|
11534
|
+
stdio: "inherit",
|
|
11535
|
+
timeout: 12e4
|
|
11536
|
+
});
|
|
11537
|
+
} catch {
|
|
11538
|
+
throw new Error("Package upgrade failed. Check the output above for details.");
|
|
11539
|
+
}
|
|
11540
|
+
}
|
|
11541
|
+
|
|
11307
11542
|
//#endregion
|
|
11308
11543
|
//#region src/index.ts
|
|
11309
11544
|
const program = new Command();
|
|
@@ -11318,6 +11553,16 @@ program.command("init").description("Scaffold a new project").argument("[name]",
|
|
|
11318
11553
|
process.exit(1);
|
|
11319
11554
|
}
|
|
11320
11555
|
});
|
|
11556
|
+
program.command("setup").description("Setup development environment (secrets, Docker, database)").option("--stage <stage>", "Stage name", "development").option("--force", "Regenerate secrets even if they exist").option("--skip-docker", "Skip starting Docker services").option("-y, --yes", "Skip prompts").action(async (options) => {
|
|
11557
|
+
try {
|
|
11558
|
+
const globalOptions = program.opts();
|
|
11559
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
11560
|
+
await setupCommand(options);
|
|
11561
|
+
} catch (error) {
|
|
11562
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
11563
|
+
process.exit(1);
|
|
11564
|
+
}
|
|
11565
|
+
});
|
|
11321
11566
|
program.command("build").description("Build handlers from endpoints, functions, and crons").option("--provider <provider>", "Target provider for generated handlers (aws, server)").option("--providers <providers>", "[DEPRECATED] Use --provider instead. Target providers for generated handlers (comma-separated)").option("--enable-openapi", "Enable OpenAPI documentation generation for server builds").option("--production", "Build for production (no dev tools, bundled output)").option("--skip-bundle", "Skip bundling step in production build").option("--stage <stage>", "Inject encrypted secrets for deployment stage").action(async (options) => {
|
|
11322
11567
|
try {
|
|
11323
11568
|
const globalOptions = program.opts();
|
|
@@ -11514,6 +11759,84 @@ program.command("secrets:import").description("Import secrets from a JSON file")
|
|
|
11514
11759
|
process.exit(1);
|
|
11515
11760
|
}
|
|
11516
11761
|
});
|
|
11762
|
+
program.command("secrets:push").description("Push secrets to remote provider (SSM)").requiredOption("--stage <stage>", "Stage name").action(async (options) => {
|
|
11763
|
+
try {
|
|
11764
|
+
const globalOptions = program.opts();
|
|
11765
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
11766
|
+
const { loadWorkspaceConfig: loadWorkspaceConfig$1 } = await import("./config.mjs");
|
|
11767
|
+
const { pushSecrets: pushSecrets$1 } = await import("./sync-6FoT41G3.mjs");
|
|
11768
|
+
const { reconcileMissingSecrets } = await import("./reconcile-D2WCDQue.mjs");
|
|
11769
|
+
const { readStageSecrets: readStageSecrets$1, writeStageSecrets: writeStageSecrets$1 } = await import("./storage-Dx_jZbq6.mjs");
|
|
11770
|
+
const { workspace } = await loadWorkspaceConfig$1();
|
|
11771
|
+
const secrets = await readStageSecrets$1(options.stage, workspace.root);
|
|
11772
|
+
if (secrets) {
|
|
11773
|
+
const result = reconcileMissingSecrets(secrets, workspace);
|
|
11774
|
+
if (result) {
|
|
11775
|
+
await writeStageSecrets$1(result.secrets, workspace.root);
|
|
11776
|
+
console.log(` Reconciled ${result.addedKeys.length} missing secret(s):`);
|
|
11777
|
+
for (const key of result.addedKeys) console.log(` + ${key}`);
|
|
11778
|
+
}
|
|
11779
|
+
}
|
|
11780
|
+
await pushSecrets$1(options.stage, workspace);
|
|
11781
|
+
console.log(`\n✓ Secrets pushed for stage "${options.stage}"`);
|
|
11782
|
+
} catch (error) {
|
|
11783
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
11784
|
+
process.exit(1);
|
|
11785
|
+
}
|
|
11786
|
+
});
|
|
11787
|
+
program.command("secrets:pull").description("Pull secrets from remote provider (SSM)").requiredOption("--stage <stage>", "Stage name").action(async (options) => {
|
|
11788
|
+
try {
|
|
11789
|
+
const globalOptions = program.opts();
|
|
11790
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
11791
|
+
const { loadWorkspaceConfig: loadWorkspaceConfig$1 } = await import("./config.mjs");
|
|
11792
|
+
const { pullSecrets: pullSecrets$1 } = await import("./sync-6FoT41G3.mjs");
|
|
11793
|
+
const { writeStageSecrets: writeStageSecrets$1 } = await import("./storage-Dx_jZbq6.mjs");
|
|
11794
|
+
const { reconcileMissingSecrets } = await import("./reconcile-D2WCDQue.mjs");
|
|
11795
|
+
const { workspace } = await loadWorkspaceConfig$1();
|
|
11796
|
+
let secrets = await pullSecrets$1(options.stage, workspace);
|
|
11797
|
+
if (!secrets) {
|
|
11798
|
+
console.error(`No remote secrets found for stage "${options.stage}".`);
|
|
11799
|
+
process.exit(1);
|
|
11800
|
+
}
|
|
11801
|
+
const result = reconcileMissingSecrets(secrets, workspace);
|
|
11802
|
+
if (result) {
|
|
11803
|
+
secrets = result.secrets;
|
|
11804
|
+
console.log(` Reconciled ${result.addedKeys.length} missing secret(s):`);
|
|
11805
|
+
for (const key of result.addedKeys) console.log(` + ${key}`);
|
|
11806
|
+
}
|
|
11807
|
+
await writeStageSecrets$1(secrets, workspace.root);
|
|
11808
|
+
console.log(`\n✓ Secrets pulled for stage "${options.stage}"`);
|
|
11809
|
+
} catch (error) {
|
|
11810
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
11811
|
+
process.exit(1);
|
|
11812
|
+
}
|
|
11813
|
+
});
|
|
11814
|
+
program.command("secrets:reconcile").description("Backfill missing custom secrets from workspace config").option("--stage <stage>", "Stage name", "development").action(async (options) => {
|
|
11815
|
+
try {
|
|
11816
|
+
const globalOptions = program.opts();
|
|
11817
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
11818
|
+
const { loadWorkspaceConfig: loadWorkspaceConfig$1 } = await import("./config.mjs");
|
|
11819
|
+
const { reconcileMissingSecrets } = await import("./reconcile-D2WCDQue.mjs");
|
|
11820
|
+
const { readStageSecrets: readStageSecrets$1, writeStageSecrets: writeStageSecrets$1 } = await import("./storage-Dx_jZbq6.mjs");
|
|
11821
|
+
const { workspace } = await loadWorkspaceConfig$1();
|
|
11822
|
+
const secrets = await readStageSecrets$1(options.stage, workspace.root);
|
|
11823
|
+
if (!secrets) {
|
|
11824
|
+
console.error(`No secrets found for stage "${options.stage}". Run "gkm secrets:init --stage ${options.stage}" first.`);
|
|
11825
|
+
process.exit(1);
|
|
11826
|
+
}
|
|
11827
|
+
const result = reconcileMissingSecrets(secrets, workspace);
|
|
11828
|
+
if (!result) {
|
|
11829
|
+
console.log(`\n✓ Secrets for stage "${options.stage}" are up-to-date`);
|
|
11830
|
+
return;
|
|
11831
|
+
}
|
|
11832
|
+
await writeStageSecrets$1(result.secrets, workspace.root);
|
|
11833
|
+
console.log(`\n✓ Reconciled ${result.addedKeys.length} missing secret(s) for stage "${options.stage}":`);
|
|
11834
|
+
for (const key of result.addedKeys) console.log(` + ${key}`);
|
|
11835
|
+
} catch (error) {
|
|
11836
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
11837
|
+
process.exit(1);
|
|
11838
|
+
}
|
|
11839
|
+
});
|
|
11517
11840
|
program.command("deploy").description("Deploy application to a provider").requiredOption("--provider <provider>", "Deploy provider (docker, dokploy, aws-lambda)").requiredOption("--stage <stage>", "Deployment stage (e.g., production, staging)").option("--tag <tag>", "Image tag (default: stage-timestamp)").option("--skip-push", "Skip pushing image to registry").option("--skip-build", "Skip build step (use existing build)").action(async (options) => {
|
|
11518
11841
|
try {
|
|
11519
11842
|
const globalOptions = program.opts();
|
|
@@ -11660,6 +11983,16 @@ program.command("state:diff").description("Compare local and remote deployment s
|
|
|
11660
11983
|
process.exit(1);
|
|
11661
11984
|
}
|
|
11662
11985
|
});
|
|
11986
|
+
program.command("upgrade").description("Upgrade all @geekmidas packages to their latest versions").option("--dry-run", "Show what would be upgraded without making changes").action(async (options) => {
|
|
11987
|
+
try {
|
|
11988
|
+
const globalOptions = program.opts();
|
|
11989
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
11990
|
+
await upgradeCommand(options);
|
|
11991
|
+
} catch (error) {
|
|
11992
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
11993
|
+
process.exit(1);
|
|
11994
|
+
}
|
|
11995
|
+
});
|
|
11663
11996
|
program.parse();
|
|
11664
11997
|
|
|
11665
11998
|
//#endregion
|