@geekmidas/cli 0.9.0 → 0.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +525 -0
- package/dist/bundler-DRXCw_YR.mjs +70 -0
- package/dist/bundler-DRXCw_YR.mjs.map +1 -0
- package/dist/bundler-WsEvH_b2.cjs +71 -0
- package/dist/bundler-WsEvH_b2.cjs.map +1 -0
- package/dist/{config-CFls09Ey.cjs → config-AmInkU7k.cjs} +10 -8
- package/dist/config-AmInkU7k.cjs.map +1 -0
- package/dist/{config-Bq72aj8e.mjs → config-DYULeEv8.mjs} +6 -4
- package/dist/config-DYULeEv8.mjs.map +1 -0
- package/dist/config.cjs +1 -1
- package/dist/config.d.cts +2 -1
- package/dist/config.d.cts.map +1 -0
- package/dist/config.d.mts +2 -1
- package/dist/config.d.mts.map +1 -0
- package/dist/config.mjs +1 -1
- package/dist/encryption-C8H-38Yy.mjs +42 -0
- package/dist/encryption-C8H-38Yy.mjs.map +1 -0
- package/dist/encryption-Dyf_r1h-.cjs +44 -0
- package/dist/encryption-Dyf_r1h-.cjs.map +1 -0
- package/dist/index.cjs +2125 -184
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +2143 -197
- package/dist/index.mjs.map +1 -1
- package/dist/{openapi--vOy9mo4.mjs → openapi-BfFlOBCG.mjs} +812 -49
- package/dist/openapi-BfFlOBCG.mjs.map +1 -0
- package/dist/{openapi-CHhTPief.cjs → openapi-Bt_1FDpT.cjs} +805 -42
- package/dist/openapi-Bt_1FDpT.cjs.map +1 -0
- package/dist/{openapi-react-query-o5iMi8tz.cjs → openapi-react-query-B-sNWHFU.cjs} +5 -5
- package/dist/openapi-react-query-B-sNWHFU.cjs.map +1 -0
- package/dist/{openapi-react-query-CcciaVu5.mjs → openapi-react-query-B6XTeGqS.mjs} +5 -5
- package/dist/openapi-react-query-B6XTeGqS.mjs.map +1 -0
- package/dist/openapi-react-query.cjs +1 -1
- package/dist/openapi-react-query.d.cts.map +1 -0
- package/dist/openapi-react-query.d.mts.map +1 -0
- package/dist/openapi-react-query.mjs +1 -1
- package/dist/openapi.cjs +2 -2
- package/dist/openapi.d.cts +1 -1
- package/dist/openapi.d.cts.map +1 -0
- package/dist/openapi.d.mts +1 -1
- package/dist/openapi.d.mts.map +1 -0
- package/dist/openapi.mjs +2 -2
- package/dist/storage-BUYQJgz7.cjs +4 -0
- package/dist/storage-BXoJvmv2.cjs +149 -0
- package/dist/storage-BXoJvmv2.cjs.map +1 -0
- package/dist/storage-C9PU_30f.mjs +101 -0
- package/dist/storage-C9PU_30f.mjs.map +1 -0
- package/dist/storage-DLJAYxzJ.mjs +3 -0
- package/dist/{types-b-vwGpqc.d.cts → types-BR0M2v_c.d.mts} +100 -1
- package/dist/types-BR0M2v_c.d.mts.map +1 -0
- package/dist/{types-DXgiA1sF.d.mts → types-BhkZc-vm.d.cts} +100 -1
- package/dist/types-BhkZc-vm.d.cts.map +1 -0
- package/examples/cron-example.ts +27 -27
- package/examples/env.ts +27 -27
- package/examples/function-example.ts +31 -31
- package/examples/gkm.config.json +20 -20
- package/examples/gkm.config.ts +8 -8
- package/examples/gkm.minimal.config.json +5 -5
- package/examples/gkm.production.config.json +25 -25
- package/examples/logger.ts +2 -2
- package/package.json +6 -6
- package/src/__tests__/EndpointGenerator.hooks.spec.ts +191 -191
- package/src/__tests__/config.spec.ts +55 -55
- package/src/__tests__/loadEnvFiles.spec.ts +93 -93
- package/src/__tests__/normalizeHooksConfig.spec.ts +58 -58
- package/src/__tests__/openapi-react-query.spec.ts +497 -497
- package/src/__tests__/openapi.spec.ts +428 -428
- package/src/__tests__/test-helpers.ts +77 -76
- package/src/auth/__tests__/credentials.spec.ts +204 -0
- package/src/auth/__tests__/index.spec.ts +168 -0
- package/src/auth/credentials.ts +187 -0
- package/src/auth/index.ts +226 -0
- package/src/build/__tests__/index-new.spec.ts +474 -474
- package/src/build/__tests__/manifests.spec.ts +333 -333
- package/src/build/bundler.ts +141 -0
- package/src/build/endpoint-analyzer.ts +236 -0
- package/src/build/handler-templates.ts +1253 -0
- package/src/build/index.ts +250 -179
- package/src/build/manifests.ts +52 -52
- package/src/build/providerResolver.ts +145 -145
- package/src/build/types.ts +64 -43
- package/src/config.ts +39 -37
- package/src/deploy/__tests__/docker.spec.ts +111 -0
- package/src/deploy/__tests__/dokploy.spec.ts +245 -0
- package/src/deploy/__tests__/init.spec.ts +662 -0
- package/src/deploy/docker.ts +128 -0
- package/src/deploy/dokploy.ts +204 -0
- package/src/deploy/index.ts +136 -0
- package/src/deploy/init.ts +484 -0
- package/src/deploy/types.ts +48 -0
- package/src/dev/__tests__/index.spec.ts +266 -266
- package/src/dev/index.ts +647 -593
- package/src/docker/__tests__/compose.spec.ts +531 -0
- package/src/docker/__tests__/templates.spec.ts +280 -0
- package/src/docker/compose.ts +273 -0
- package/src/docker/index.ts +230 -0
- package/src/docker/templates.ts +446 -0
- package/src/generators/CronGenerator.ts +72 -72
- package/src/generators/EndpointGenerator.ts +699 -398
- package/src/generators/FunctionGenerator.ts +84 -84
- package/src/generators/Generator.ts +72 -72
- package/src/generators/OpenApiTsGenerator.ts +589 -589
- package/src/generators/SubscriberGenerator.ts +124 -124
- package/src/generators/__tests__/CronGenerator.spec.ts +433 -433
- package/src/generators/__tests__/EndpointGenerator.spec.ts +532 -382
- package/src/generators/__tests__/FunctionGenerator.spec.ts +244 -244
- package/src/generators/__tests__/SubscriberGenerator.spec.ts +397 -382
- package/src/generators/index.ts +4 -4
- package/src/index.ts +628 -206
- package/src/init/__tests__/generators.spec.ts +334 -334
- package/src/init/__tests__/init.spec.ts +332 -332
- package/src/init/__tests__/utils.spec.ts +89 -89
- package/src/init/generators/config.ts +175 -175
- package/src/init/generators/docker.ts +41 -41
- package/src/init/generators/env.ts +72 -72
- package/src/init/generators/index.ts +1 -1
- package/src/init/generators/models.ts +64 -64
- package/src/init/generators/monorepo.ts +161 -161
- package/src/init/generators/package.ts +71 -71
- package/src/init/generators/source.ts +6 -6
- package/src/init/index.ts +203 -208
- package/src/init/templates/api.ts +115 -115
- package/src/init/templates/index.ts +75 -75
- package/src/init/templates/minimal.ts +98 -98
- package/src/init/templates/serverless.ts +89 -89
- package/src/init/templates/worker.ts +98 -98
- package/src/init/utils.ts +54 -56
- package/src/openapi-react-query.ts +194 -194
- package/src/openapi.ts +63 -63
- package/src/secrets/__tests__/encryption.spec.ts +226 -0
- package/src/secrets/__tests__/generator.spec.ts +319 -0
- package/src/secrets/__tests__/index.spec.ts +91 -0
- package/src/secrets/__tests__/storage.spec.ts +403 -0
- package/src/secrets/encryption.ts +91 -0
- package/src/secrets/generator.ts +164 -0
- package/src/secrets/index.ts +383 -0
- package/src/secrets/storage.ts +134 -0
- package/src/secrets/types.ts +53 -0
- package/src/types.ts +295 -176
- package/tsconfig.json +9 -0
- package/tsdown.config.ts +11 -8
- package/dist/config-Bq72aj8e.mjs.map +0 -1
- package/dist/config-CFls09Ey.cjs.map +0 -1
- package/dist/openapi--vOy9mo4.mjs.map +0 -1
- package/dist/openapi-CHhTPief.cjs.map +0 -1
- package/dist/openapi-react-query-CcciaVu5.mjs.map +0 -1
- package/dist/openapi-react-query-o5iMi8tz.cjs.map +0 -1
package/dist/index.cjs
CHANGED
|
@@ -1,14 +1,17 @@
|
|
|
1
1
|
#!/usr/bin/env -S npx tsx
|
|
2
2
|
const require_chunk = require('./chunk-CUT6urMc.cjs');
|
|
3
|
-
const require_config = require('./config-
|
|
4
|
-
const require_openapi = require('./openapi-
|
|
5
|
-
const require_openapi_react_query = require('./openapi-react-query-
|
|
6
|
-
const
|
|
3
|
+
const require_config = require('./config-AmInkU7k.cjs');
|
|
4
|
+
const require_openapi = require('./openapi-Bt_1FDpT.cjs');
|
|
5
|
+
const require_openapi_react_query = require('./openapi-react-query-B-sNWHFU.cjs');
|
|
6
|
+
const require_storage = require('./storage-BXoJvmv2.cjs');
|
|
7
|
+
const node_fs = require_chunk.__toESM(require("node:fs"));
|
|
8
|
+
const node_path = require_chunk.__toESM(require("node:path"));
|
|
7
9
|
const commander = require_chunk.__toESM(require("commander"));
|
|
10
|
+
const node_process = require_chunk.__toESM(require("node:process"));
|
|
11
|
+
const node_readline_promises = require_chunk.__toESM(require("node:readline/promises"));
|
|
8
12
|
const node_fs_promises = require_chunk.__toESM(require("node:fs/promises"));
|
|
9
|
-
const
|
|
13
|
+
const node_os = require_chunk.__toESM(require("node:os"));
|
|
10
14
|
const node_child_process = require_chunk.__toESM(require("node:child_process"));
|
|
11
|
-
const node_fs = require_chunk.__toESM(require("node:fs"));
|
|
12
15
|
const node_net = require_chunk.__toESM(require("node:net"));
|
|
13
16
|
const chokidar = require_chunk.__toESM(require("chokidar"));
|
|
14
17
|
const dotenv = require_chunk.__toESM(require("dotenv"));
|
|
@@ -17,10 +20,11 @@ const __geekmidas_constructs_crons = require_chunk.__toESM(require("@geekmidas/c
|
|
|
17
20
|
const __geekmidas_constructs_functions = require_chunk.__toESM(require("@geekmidas/constructs/functions"));
|
|
18
21
|
const __geekmidas_constructs_subscribers = require_chunk.__toESM(require("@geekmidas/constructs/subscribers"));
|
|
19
22
|
const prompts = require_chunk.__toESM(require("prompts"));
|
|
23
|
+
const node_crypto = require_chunk.__toESM(require("node:crypto"));
|
|
20
24
|
|
|
21
25
|
//#region package.json
|
|
22
26
|
var name = "@geekmidas/cli";
|
|
23
|
-
var version = "0.
|
|
27
|
+
var version = "0.12.0";
|
|
24
28
|
var description = "CLI tools for building Lambda handlers, server applications, and generating OpenAPI specs";
|
|
25
29
|
var private$1 = false;
|
|
26
30
|
var type = "module";
|
|
@@ -100,6 +104,227 @@ var package_default = {
|
|
|
100
104
|
peerDependenciesMeta
|
|
101
105
|
};
|
|
102
106
|
|
|
107
|
+
//#endregion
|
|
108
|
+
//#region src/auth/credentials.ts
|
|
109
|
+
/**
|
|
110
|
+
* Get the path to the credentials directory
|
|
111
|
+
*/
|
|
112
|
+
function getCredentialsDir(options) {
|
|
113
|
+
const root = options?.root ?? (0, node_os.homedir)();
|
|
114
|
+
return (0, node_path.join)(root, ".gkm");
|
|
115
|
+
}
|
|
116
|
+
/**
|
|
117
|
+
* Get the path to the credentials file
|
|
118
|
+
*/
|
|
119
|
+
function getCredentialsPath(options) {
|
|
120
|
+
return (0, node_path.join)(getCredentialsDir(options), "credentials.json");
|
|
121
|
+
}
|
|
122
|
+
/**
|
|
123
|
+
* Ensure the credentials directory exists
|
|
124
|
+
*/
|
|
125
|
+
function ensureCredentialsDir(options) {
|
|
126
|
+
const dir = getCredentialsDir(options);
|
|
127
|
+
if (!(0, node_fs.existsSync)(dir)) (0, node_fs.mkdirSync)(dir, {
|
|
128
|
+
recursive: true,
|
|
129
|
+
mode: 448
|
|
130
|
+
});
|
|
131
|
+
}
|
|
132
|
+
/**
|
|
133
|
+
* Read stored credentials from disk
|
|
134
|
+
*/
|
|
135
|
+
async function readCredentials(options) {
|
|
136
|
+
const path = getCredentialsPath(options);
|
|
137
|
+
if (!(0, node_fs.existsSync)(path)) return {};
|
|
138
|
+
try {
|
|
139
|
+
const content = await (0, node_fs_promises.readFile)(path, "utf-8");
|
|
140
|
+
return JSON.parse(content);
|
|
141
|
+
} catch {
|
|
142
|
+
return {};
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
/**
|
|
146
|
+
* Write credentials to disk
|
|
147
|
+
*/
|
|
148
|
+
async function writeCredentials(credentials, options) {
|
|
149
|
+
ensureCredentialsDir(options);
|
|
150
|
+
const path = getCredentialsPath(options);
|
|
151
|
+
await (0, node_fs_promises.writeFile)(path, JSON.stringify(credentials, null, 2), { mode: 384 });
|
|
152
|
+
}
|
|
153
|
+
/**
|
|
154
|
+
* Store Dokploy credentials
|
|
155
|
+
*/
|
|
156
|
+
async function storeDokployCredentials(token, endpoint, options) {
|
|
157
|
+
const credentials = await readCredentials(options);
|
|
158
|
+
credentials.dokploy = {
|
|
159
|
+
token,
|
|
160
|
+
endpoint,
|
|
161
|
+
storedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
162
|
+
};
|
|
163
|
+
await writeCredentials(credentials, options);
|
|
164
|
+
}
|
|
165
|
+
/**
|
|
166
|
+
* Get stored Dokploy credentials
|
|
167
|
+
*/
|
|
168
|
+
async function getDokployCredentials(options) {
|
|
169
|
+
const credentials = await readCredentials(options);
|
|
170
|
+
if (!credentials.dokploy) return null;
|
|
171
|
+
return {
|
|
172
|
+
token: credentials.dokploy.token,
|
|
173
|
+
endpoint: credentials.dokploy.endpoint
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
/**
|
|
177
|
+
* Remove Dokploy credentials
|
|
178
|
+
*/
|
|
179
|
+
async function removeDokployCredentials(options) {
|
|
180
|
+
const credentials = await readCredentials(options);
|
|
181
|
+
if (!credentials.dokploy) return false;
|
|
182
|
+
delete credentials.dokploy;
|
|
183
|
+
await writeCredentials(credentials, options);
|
|
184
|
+
return true;
|
|
185
|
+
}
|
|
186
|
+
/**
|
|
187
|
+
* Get Dokploy API token, checking stored credentials first, then environment
|
|
188
|
+
*/
|
|
189
|
+
async function getDokployToken(options) {
|
|
190
|
+
const envToken = process.env.DOKPLOY_API_TOKEN;
|
|
191
|
+
if (envToken) return envToken;
|
|
192
|
+
const stored = await getDokployCredentials(options);
|
|
193
|
+
if (stored) return stored.token;
|
|
194
|
+
return null;
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
//#endregion
|
|
198
|
+
//#region src/auth/index.ts
|
|
199
|
+
const logger$9 = console;
|
|
200
|
+
/**
|
|
201
|
+
* Validate Dokploy token by making a test API call
|
|
202
|
+
*/
|
|
203
|
+
async function validateDokployToken(endpoint, token) {
|
|
204
|
+
try {
|
|
205
|
+
const response = await fetch(`${endpoint}/api/project.all`, {
|
|
206
|
+
method: "GET",
|
|
207
|
+
headers: {
|
|
208
|
+
"Content-Type": "application/json",
|
|
209
|
+
Authorization: `Bearer ${token}`
|
|
210
|
+
}
|
|
211
|
+
});
|
|
212
|
+
return response.ok;
|
|
213
|
+
} catch {
|
|
214
|
+
return false;
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
/**
|
|
218
|
+
* Prompt for input (handles both TTY and non-TTY)
|
|
219
|
+
*/
|
|
220
|
+
async function prompt(message, hidden = false) {
|
|
221
|
+
if (!process.stdin.isTTY) throw new Error("Interactive input required. Please provide --token option.");
|
|
222
|
+
const rl = node_readline_promises.createInterface({
|
|
223
|
+
input: node_process.stdin,
|
|
224
|
+
output: node_process.stdout
|
|
225
|
+
});
|
|
226
|
+
try {
|
|
227
|
+
if (hidden) {
|
|
228
|
+
process.stdout.write(message);
|
|
229
|
+
return new Promise((resolve$1) => {
|
|
230
|
+
let value = "";
|
|
231
|
+
const onData = (char) => {
|
|
232
|
+
const c = char.toString();
|
|
233
|
+
if (c === "\n" || c === "\r") {
|
|
234
|
+
process.stdin.removeListener("data", onData);
|
|
235
|
+
process.stdin.setRawMode(false);
|
|
236
|
+
process.stdout.write("\n");
|
|
237
|
+
resolve$1(value);
|
|
238
|
+
} else if (c === "") process.exit(1);
|
|
239
|
+
else if (c === "" || c === "\b") {
|
|
240
|
+
if (value.length > 0) value = value.slice(0, -1);
|
|
241
|
+
} else value += c;
|
|
242
|
+
};
|
|
243
|
+
process.stdin.setRawMode(true);
|
|
244
|
+
process.stdin.resume();
|
|
245
|
+
process.stdin.on("data", onData);
|
|
246
|
+
});
|
|
247
|
+
} else return await rl.question(message);
|
|
248
|
+
} finally {
|
|
249
|
+
rl.close();
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
/**
|
|
253
|
+
* Login to a service
|
|
254
|
+
*/
|
|
255
|
+
async function loginCommand(options) {
|
|
256
|
+
const { service, token: providedToken, endpoint: providedEndpoint } = options;
|
|
257
|
+
if (service === "dokploy") {
|
|
258
|
+
logger$9.log("\n🔐 Logging in to Dokploy...\n");
|
|
259
|
+
let endpoint = providedEndpoint;
|
|
260
|
+
if (!endpoint) endpoint = await prompt("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
261
|
+
endpoint = endpoint.replace(/\/$/, "");
|
|
262
|
+
try {
|
|
263
|
+
new URL(endpoint);
|
|
264
|
+
} catch {
|
|
265
|
+
logger$9.error("Invalid URL format");
|
|
266
|
+
process.exit(1);
|
|
267
|
+
}
|
|
268
|
+
let token = providedToken;
|
|
269
|
+
if (!token) {
|
|
270
|
+
logger$9.log(`\nGenerate a token at: ${endpoint}/settings/profile\n`);
|
|
271
|
+
token = await prompt("API Token: ", true);
|
|
272
|
+
}
|
|
273
|
+
if (!token) {
|
|
274
|
+
logger$9.error("Token is required");
|
|
275
|
+
process.exit(1);
|
|
276
|
+
}
|
|
277
|
+
logger$9.log("\nValidating credentials...");
|
|
278
|
+
const isValid = await validateDokployToken(endpoint, token);
|
|
279
|
+
if (!isValid) {
|
|
280
|
+
logger$9.error("\n✗ Invalid credentials. Please check your token and try again.");
|
|
281
|
+
process.exit(1);
|
|
282
|
+
}
|
|
283
|
+
await storeDokployCredentials(token, endpoint);
|
|
284
|
+
logger$9.log("\n✓ Successfully logged in to Dokploy!");
|
|
285
|
+
logger$9.log(` Endpoint: ${endpoint}`);
|
|
286
|
+
logger$9.log(` Credentials stored in: ${getCredentialsPath()}`);
|
|
287
|
+
logger$9.log("\nYou can now use deploy commands without setting DOKPLOY_API_TOKEN.");
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
/**
|
|
291
|
+
* Logout from a service
|
|
292
|
+
*/
|
|
293
|
+
async function logoutCommand(options) {
|
|
294
|
+
const { service = "dokploy" } = options;
|
|
295
|
+
if (service === "all") {
|
|
296
|
+
const dokployRemoved = await removeDokployCredentials();
|
|
297
|
+
if (dokployRemoved) logger$9.log("\n✓ Logged out from all services");
|
|
298
|
+
else logger$9.log("\nNo stored credentials found");
|
|
299
|
+
return;
|
|
300
|
+
}
|
|
301
|
+
if (service === "dokploy") {
|
|
302
|
+
const removed = await removeDokployCredentials();
|
|
303
|
+
if (removed) logger$9.log("\n✓ Logged out from Dokploy");
|
|
304
|
+
else logger$9.log("\nNo Dokploy credentials found");
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
/**
|
|
308
|
+
* Show current login status
|
|
309
|
+
*/
|
|
310
|
+
async function whoamiCommand() {
|
|
311
|
+
logger$9.log("\n📋 Current credentials:\n");
|
|
312
|
+
const dokploy = await getDokployCredentials();
|
|
313
|
+
if (dokploy) {
|
|
314
|
+
logger$9.log(" Dokploy:");
|
|
315
|
+
logger$9.log(` Endpoint: ${dokploy.endpoint}`);
|
|
316
|
+
logger$9.log(` Token: ${maskToken(dokploy.token)}`);
|
|
317
|
+
} else logger$9.log(" Dokploy: Not logged in");
|
|
318
|
+
logger$9.log(`\n Credentials file: ${getCredentialsPath()}`);
|
|
319
|
+
}
|
|
320
|
+
/**
|
|
321
|
+
* Mask a token for display
|
|
322
|
+
*/
|
|
323
|
+
function maskToken(token) {
|
|
324
|
+
if (token.length <= 8) return "****";
|
|
325
|
+
return `${token.slice(0, 4)}...${token.slice(-4)}`;
|
|
326
|
+
}
|
|
327
|
+
|
|
103
328
|
//#endregion
|
|
104
329
|
//#region src/build/providerResolver.ts
|
|
105
330
|
/**
|
|
@@ -176,13 +401,13 @@ function isEnabled(config) {
|
|
|
176
401
|
var CronGenerator = class extends require_openapi.ConstructGenerator {
|
|
177
402
|
async build(context, constructs, outputDir, options) {
|
|
178
403
|
const provider = options?.provider || "aws-lambda";
|
|
179
|
-
const logger$
|
|
404
|
+
const logger$10 = console;
|
|
180
405
|
const cronInfos = [];
|
|
181
406
|
if (constructs.length === 0 || provider !== "aws-lambda") return cronInfos;
|
|
182
407
|
const cronsDir = (0, node_path.join)(outputDir, "crons");
|
|
183
408
|
await (0, node_fs_promises.mkdir)(cronsDir, { recursive: true });
|
|
184
|
-
for (const { key, construct, path
|
|
185
|
-
const handlerFile = await this.generateCronHandler(cronsDir, path
|
|
409
|
+
for (const { key, construct, path } of constructs) {
|
|
410
|
+
const handlerFile = await this.generateCronHandler(cronsDir, path.relative, key, context);
|
|
186
411
|
cronInfos.push({
|
|
187
412
|
name: key,
|
|
188
413
|
handler: (0, node_path.relative)(process.cwd(), handlerFile).replace(/\.ts$/, ".handler"),
|
|
@@ -191,7 +416,7 @@ var CronGenerator = class extends require_openapi.ConstructGenerator {
|
|
|
191
416
|
memorySize: construct.memorySize,
|
|
192
417
|
environment: await construct.getEnvironment()
|
|
193
418
|
});
|
|
194
|
-
logger$
|
|
419
|
+
logger$10.log(`Generated cron handler: ${key}`);
|
|
195
420
|
}
|
|
196
421
|
return cronInfos;
|
|
197
422
|
}
|
|
@@ -227,13 +452,13 @@ var FunctionGenerator = class extends require_openapi.ConstructGenerator {
|
|
|
227
452
|
}
|
|
228
453
|
async build(context, constructs, outputDir, options) {
|
|
229
454
|
const provider = options?.provider || "aws-lambda";
|
|
230
|
-
const logger$
|
|
455
|
+
const logger$10 = console;
|
|
231
456
|
const functionInfos = [];
|
|
232
457
|
if (constructs.length === 0 || provider !== "aws-lambda") return functionInfos;
|
|
233
458
|
const functionsDir = (0, node_path.join)(outputDir, "functions");
|
|
234
459
|
await (0, node_fs_promises.mkdir)(functionsDir, { recursive: true });
|
|
235
|
-
for (const { key, construct, path
|
|
236
|
-
const handlerFile = await this.generateFunctionHandler(functionsDir, path
|
|
460
|
+
for (const { key, construct, path } of constructs) {
|
|
461
|
+
const handlerFile = await this.generateFunctionHandler(functionsDir, path.relative, key, context);
|
|
237
462
|
functionInfos.push({
|
|
238
463
|
name: key,
|
|
239
464
|
handler: (0, node_path.relative)(process.cwd(), handlerFile).replace(/\.ts$/, ".handler"),
|
|
@@ -241,7 +466,7 @@ var FunctionGenerator = class extends require_openapi.ConstructGenerator {
|
|
|
241
466
|
memorySize: construct.memorySize,
|
|
242
467
|
environment: await construct.getEnvironment()
|
|
243
468
|
});
|
|
244
|
-
logger$
|
|
469
|
+
logger$10.log(`Generated function handler: ${key}`);
|
|
245
470
|
}
|
|
246
471
|
return functionInfos;
|
|
247
472
|
}
|
|
@@ -274,19 +499,19 @@ var SubscriberGenerator = class extends require_openapi.ConstructGenerator {
|
|
|
274
499
|
}
|
|
275
500
|
async build(context, constructs, outputDir, options) {
|
|
276
501
|
const provider = options?.provider || "aws-lambda";
|
|
277
|
-
const logger$
|
|
502
|
+
const logger$10 = console;
|
|
278
503
|
const subscriberInfos = [];
|
|
279
504
|
if (provider === "server") {
|
|
280
505
|
await this.generateServerSubscribersFile(outputDir, constructs);
|
|
281
|
-
logger$
|
|
506
|
+
logger$10.log(`Generated server subscribers file with ${constructs.length} subscribers (polling mode)`);
|
|
282
507
|
return subscriberInfos;
|
|
283
508
|
}
|
|
284
509
|
if (constructs.length === 0) return subscriberInfos;
|
|
285
510
|
if (provider !== "aws-lambda") return subscriberInfos;
|
|
286
511
|
const subscribersDir = (0, node_path.join)(outputDir, "subscribers");
|
|
287
512
|
await (0, node_fs_promises.mkdir)(subscribersDir, { recursive: true });
|
|
288
|
-
for (const { key, construct, path
|
|
289
|
-
const handlerFile = await this.generateSubscriberHandler(subscribersDir, path
|
|
513
|
+
for (const { key, construct, path } of constructs) {
|
|
514
|
+
const handlerFile = await this.generateSubscriberHandler(subscribersDir, path.relative, key, construct, context);
|
|
290
515
|
subscriberInfos.push({
|
|
291
516
|
name: key,
|
|
292
517
|
handler: (0, node_path.relative)(process.cwd(), handlerFile).replace(/\.ts$/, ".handler"),
|
|
@@ -295,7 +520,7 @@ var SubscriberGenerator = class extends require_openapi.ConstructGenerator {
|
|
|
295
520
|
memorySize: construct.memorySize,
|
|
296
521
|
environment: await construct.getEnvironment()
|
|
297
522
|
});
|
|
298
|
-
logger$
|
|
523
|
+
logger$10.log(`Generated subscriber handler: ${key}`);
|
|
299
524
|
}
|
|
300
525
|
return subscriberInfos;
|
|
301
526
|
}
|
|
@@ -321,11 +546,11 @@ export const handler = adapter.handler;
|
|
|
321
546
|
const subscribersFileName = "subscribers.ts";
|
|
322
547
|
const subscribersPath = (0, node_path.join)(outputDir, subscribersFileName);
|
|
323
548
|
const importsByFile = /* @__PURE__ */ new Map();
|
|
324
|
-
for (const { path
|
|
325
|
-
const relativePath = (0, node_path.relative)((0, node_path.dirname)(subscribersPath), path
|
|
549
|
+
for (const { path, key } of subscribers) {
|
|
550
|
+
const relativePath = (0, node_path.relative)((0, node_path.dirname)(subscribersPath), path.relative);
|
|
326
551
|
const importPath = relativePath.replace(/\.ts$/, ".js");
|
|
327
552
|
if (!importsByFile.has(importPath)) importsByFile.set(importPath, []);
|
|
328
|
-
importsByFile.get(importPath)
|
|
553
|
+
importsByFile.get(importPath)?.push(key);
|
|
329
554
|
}
|
|
330
555
|
const imports = Array.from(importsByFile.entries()).map(([importPath, exports$2]) => `import { ${exports$2.join(", ")} } from '${importPath}';`).join("\n");
|
|
331
556
|
const allExportNames = subscribers.map(({ key }) => key);
|
|
@@ -380,7 +605,7 @@ export async function setupSubscribers(
|
|
|
380
605
|
return;
|
|
381
606
|
}
|
|
382
607
|
|
|
383
|
-
const serviceDiscovery = ServiceDiscovery.getInstance(
|
|
608
|
+
const serviceDiscovery = ServiceDiscovery.getInstance(envParser);
|
|
384
609
|
|
|
385
610
|
// Create connection once, outside the loop (more efficient)
|
|
386
611
|
// EventConnectionFactory automatically determines the right connection type
|
|
@@ -461,7 +686,7 @@ export async function setupSubscribers(
|
|
|
461
686
|
|
|
462
687
|
//#endregion
|
|
463
688
|
//#region src/dev/index.ts
|
|
464
|
-
const logger$
|
|
689
|
+
const logger$8 = console;
|
|
465
690
|
/**
|
|
466
691
|
* Load environment files
|
|
467
692
|
* @internal Exported for testing
|
|
@@ -512,7 +737,7 @@ async function findAvailablePort(preferredPort, maxAttempts = 10) {
|
|
|
512
737
|
for (let i = 0; i < maxAttempts; i++) {
|
|
513
738
|
const port = preferredPort + i;
|
|
514
739
|
if (await isPortAvailable(port)) return port;
|
|
515
|
-
logger$
|
|
740
|
+
logger$8.log(`⚠️ Port ${port} is in use, trying ${port + 1}...`);
|
|
516
741
|
}
|
|
517
742
|
throw new Error(`Could not find an available port after trying ${maxAttempts} ports starting from ${preferredPort}`);
|
|
518
743
|
}
|
|
@@ -582,33 +807,61 @@ function normalizeHooksConfig(config) {
|
|
|
582
807
|
const resolvedPath = (0, node_path.resolve)(process.cwd(), serverPath);
|
|
583
808
|
return { serverHooksPath: resolvedPath };
|
|
584
809
|
}
|
|
810
|
+
/**
|
|
811
|
+
* Normalize production configuration
|
|
812
|
+
* @internal Exported for testing
|
|
813
|
+
*/
|
|
814
|
+
function normalizeProductionConfig(cliProduction, configProduction) {
|
|
815
|
+
if (!cliProduction) return void 0;
|
|
816
|
+
const config = configProduction ?? {};
|
|
817
|
+
return {
|
|
818
|
+
enabled: true,
|
|
819
|
+
bundle: config.bundle ?? true,
|
|
820
|
+
minify: config.minify ?? true,
|
|
821
|
+
healthCheck: config.healthCheck ?? "/health",
|
|
822
|
+
gracefulShutdown: config.gracefulShutdown ?? true,
|
|
823
|
+
external: config.external ?? [],
|
|
824
|
+
subscribers: config.subscribers ?? "exclude",
|
|
825
|
+
openapi: config.openapi ?? false,
|
|
826
|
+
optimizedHandlers: config.optimizedHandlers ?? true
|
|
827
|
+
};
|
|
828
|
+
}
|
|
829
|
+
/**
|
|
830
|
+
* Get production config from GkmConfig
|
|
831
|
+
* @internal
|
|
832
|
+
*/
|
|
833
|
+
function getProductionConfigFromGkm(config) {
|
|
834
|
+
const serverConfig = config.providers?.server;
|
|
835
|
+
if (typeof serverConfig === "object") return serverConfig.production;
|
|
836
|
+
return void 0;
|
|
837
|
+
}
|
|
585
838
|
async function devCommand(options) {
|
|
586
839
|
const defaultEnv = loadEnvFiles(".env");
|
|
587
|
-
if (defaultEnv.loaded.length > 0) logger$
|
|
840
|
+
if (defaultEnv.loaded.length > 0) logger$8.log(`📦 Loaded env: ${defaultEnv.loaded.join(", ")}`);
|
|
588
841
|
const config = await require_config.loadConfig();
|
|
589
842
|
if (config.env) {
|
|
590
843
|
const { loaded, missing } = loadEnvFiles(config.env);
|
|
591
|
-
if (loaded.length > 0) logger$
|
|
592
|
-
if (missing.length > 0) logger$
|
|
844
|
+
if (loaded.length > 0) logger$8.log(`📦 Loaded env: ${loaded.join(", ")}`);
|
|
845
|
+
if (missing.length > 0) logger$8.warn(`⚠️ Missing env files: ${missing.join(", ")}`);
|
|
593
846
|
}
|
|
594
847
|
const resolved = resolveProviders(config, { provider: "server" });
|
|
595
|
-
logger$
|
|
596
|
-
logger$
|
|
597
|
-
if (config.functions) logger$
|
|
598
|
-
if (config.crons) logger$
|
|
599
|
-
if (config.subscribers) logger$
|
|
600
|
-
logger$
|
|
848
|
+
logger$8.log("🚀 Starting development server...");
|
|
849
|
+
logger$8.log(`Loading routes from: ${config.routes}`);
|
|
850
|
+
if (config.functions) logger$8.log(`Loading functions from: ${config.functions}`);
|
|
851
|
+
if (config.crons) logger$8.log(`Loading crons from: ${config.crons}`);
|
|
852
|
+
if (config.subscribers) logger$8.log(`Loading subscribers from: ${config.subscribers}`);
|
|
853
|
+
logger$8.log(`Using envParser: ${config.envParser}`);
|
|
601
854
|
const { path: envParserPath, importPattern: envParserImportPattern } = require_config.parseModuleConfig(config.envParser, "envParser");
|
|
602
855
|
const { path: loggerPath, importPattern: loggerImportPattern } = require_config.parseModuleConfig(config.logger, "logger");
|
|
603
856
|
const telescope = normalizeTelescopeConfig(config.telescope);
|
|
604
|
-
if (telescope) logger$
|
|
857
|
+
if (telescope) logger$8.log(`🔭 Telescope enabled at ${telescope.path}`);
|
|
605
858
|
const studio = normalizeStudioConfig(config.studio);
|
|
606
|
-
if (studio) logger$
|
|
859
|
+
if (studio) logger$8.log(`🗄️ Studio enabled at ${studio.path}`);
|
|
607
860
|
const hooks = normalizeHooksConfig(config.hooks);
|
|
608
|
-
if (hooks) logger$
|
|
861
|
+
if (hooks) logger$8.log(`🪝 Server hooks enabled from ${config.hooks?.server}`);
|
|
609
862
|
const openApiConfig = require_openapi.resolveOpenApiConfig(config);
|
|
610
863
|
const enableOpenApi = openApiConfig.enabled || resolved.enableOpenApi;
|
|
611
|
-
if (enableOpenApi) logger$
|
|
864
|
+
if (enableOpenApi) logger$8.log(`📄 OpenAPI output: ${require_openapi.OPENAPI_OUTPUT_PATH}`);
|
|
612
865
|
const buildContext = {
|
|
613
866
|
envParserPath,
|
|
614
867
|
envParserImportPattern,
|
|
@@ -623,9 +876,10 @@ async function devCommand(options) {
|
|
|
623
876
|
const runtime = config.runtime ?? "node";
|
|
624
877
|
const devServer = new DevServer(resolved.providers[0], options.port || 3e3, options.portExplicit ?? false, enableOpenApi, telescope, studio, runtime);
|
|
625
878
|
await devServer.start();
|
|
626
|
-
const envParserFile = config.envParser.split("#")[0];
|
|
627
|
-
const loggerFile = config.logger.split("#")[0];
|
|
628
|
-
const
|
|
879
|
+
const envParserFile = config.envParser.split("#")[0] ?? config.envParser;
|
|
880
|
+
const loggerFile = config.logger.split("#")[0] ?? config.logger;
|
|
881
|
+
const hooksFileParts = config.hooks?.server?.split("#");
|
|
882
|
+
const hooksFile = hooksFileParts?.[0];
|
|
629
883
|
const watchPatterns = [
|
|
630
884
|
config.routes,
|
|
631
885
|
...config.functions ? [config.functions] : [],
|
|
@@ -634,41 +888,44 @@ async function devCommand(options) {
|
|
|
634
888
|
envParserFile.endsWith(".ts") ? envParserFile : `${envParserFile}.ts`,
|
|
635
889
|
loggerFile.endsWith(".ts") ? loggerFile : `${loggerFile}.ts`,
|
|
636
890
|
...hooksFile ? [hooksFile.endsWith(".ts") ? hooksFile : `${hooksFile}.ts`] : []
|
|
637
|
-
].flat();
|
|
891
|
+
].flat().filter((p) => typeof p === "string");
|
|
638
892
|
const normalizedPatterns = watchPatterns.map((p) => p.startsWith("./") ? p.slice(2) : p);
|
|
639
|
-
logger$
|
|
893
|
+
logger$8.log(`👀 Watching for changes in: ${normalizedPatterns.join(", ")}`);
|
|
640
894
|
const resolvedFiles = await (0, fast_glob.default)(normalizedPatterns, {
|
|
641
895
|
cwd: process.cwd(),
|
|
642
896
|
absolute: false,
|
|
643
897
|
onlyFiles: true
|
|
644
898
|
});
|
|
645
|
-
const dirsToWatch = [...new Set(resolvedFiles.map((f) =>
|
|
646
|
-
|
|
899
|
+
const dirsToWatch = [...new Set(resolvedFiles.map((f) => {
|
|
900
|
+
const parts = f.split("/");
|
|
901
|
+
return parts.slice(0, -1).join("/");
|
|
902
|
+
}))];
|
|
903
|
+
logger$8.log(`📁 Found ${resolvedFiles.length} files in ${dirsToWatch.length} directories`);
|
|
647
904
|
const watcher = chokidar.default.watch([...resolvedFiles, ...dirsToWatch], {
|
|
648
|
-
ignored: /(^|[
|
|
905
|
+
ignored: /(^|[/\\])\../,
|
|
649
906
|
persistent: true,
|
|
650
907
|
ignoreInitial: true,
|
|
651
908
|
cwd: process.cwd()
|
|
652
909
|
});
|
|
653
910
|
watcher.on("ready", () => {
|
|
654
|
-
logger$
|
|
911
|
+
logger$8.log("🔍 File watcher ready");
|
|
655
912
|
});
|
|
656
913
|
watcher.on("error", (error) => {
|
|
657
|
-
logger$
|
|
914
|
+
logger$8.error("❌ Watcher error:", error);
|
|
658
915
|
});
|
|
659
916
|
let rebuildTimeout = null;
|
|
660
|
-
watcher.on("change", async (path
|
|
661
|
-
logger$
|
|
917
|
+
watcher.on("change", async (path) => {
|
|
918
|
+
logger$8.log(`📝 File changed: ${path}`);
|
|
662
919
|
if (rebuildTimeout) clearTimeout(rebuildTimeout);
|
|
663
920
|
rebuildTimeout = setTimeout(async () => {
|
|
664
921
|
try {
|
|
665
|
-
logger$
|
|
922
|
+
logger$8.log("🔄 Rebuilding...");
|
|
666
923
|
await buildServer(config, buildContext, resolved.providers[0], enableOpenApi);
|
|
667
924
|
if (enableOpenApi) await require_openapi.generateOpenApi(config, { silent: true });
|
|
668
|
-
logger$
|
|
925
|
+
logger$8.log("✅ Rebuild complete, restarting server...");
|
|
669
926
|
await devServer.restart();
|
|
670
927
|
} catch (error) {
|
|
671
|
-
logger$
|
|
928
|
+
logger$8.error("❌ Rebuild failed:", error.message);
|
|
672
929
|
}
|
|
673
930
|
}, 300);
|
|
674
931
|
});
|
|
@@ -676,9 +933,9 @@ async function devCommand(options) {
|
|
|
676
933
|
const shutdown = () => {
|
|
677
934
|
if (isShuttingDown) return;
|
|
678
935
|
isShuttingDown = true;
|
|
679
|
-
logger$
|
|
936
|
+
logger$8.log("\n🛑 Shutting down...");
|
|
680
937
|
Promise.all([watcher.close(), devServer.stop()]).catch((err) => {
|
|
681
|
-
logger$
|
|
938
|
+
logger$8.error("Error during shutdown:", err);
|
|
682
939
|
}).finally(() => {
|
|
683
940
|
process.exit(0);
|
|
684
941
|
});
|
|
@@ -731,11 +988,11 @@ var DevServer = class {
|
|
|
731
988
|
this.actualPort = this.requestedPort;
|
|
732
989
|
} else {
|
|
733
990
|
this.actualPort = await findAvailablePort(this.requestedPort);
|
|
734
|
-
if (this.actualPort !== this.requestedPort) logger$
|
|
991
|
+
if (this.actualPort !== this.requestedPort) logger$8.log(`ℹ️ Port ${this.requestedPort} was in use, using port ${this.actualPort} instead`);
|
|
735
992
|
}
|
|
736
993
|
const serverEntryPath = (0, node_path.join)(process.cwd(), ".gkm", this.provider, "server.ts");
|
|
737
994
|
await this.createServerEntry();
|
|
738
|
-
logger$
|
|
995
|
+
logger$8.log(`\n✨ Starting server on port ${this.actualPort}...`);
|
|
739
996
|
this.serverProcess = (0, node_child_process.spawn)("npx", [
|
|
740
997
|
"tsx",
|
|
741
998
|
serverEntryPath,
|
|
@@ -751,18 +1008,18 @@ var DevServer = class {
|
|
|
751
1008
|
});
|
|
752
1009
|
this.isRunning = true;
|
|
753
1010
|
this.serverProcess.on("error", (error) => {
|
|
754
|
-
logger$
|
|
1011
|
+
logger$8.error("❌ Server error:", error);
|
|
755
1012
|
});
|
|
756
1013
|
this.serverProcess.on("exit", (code, signal) => {
|
|
757
|
-
if (code !== null && code !== 0 && signal !== "SIGTERM") logger$
|
|
1014
|
+
if (code !== null && code !== 0 && signal !== "SIGTERM") logger$8.error(`❌ Server exited with code ${code}`);
|
|
758
1015
|
this.isRunning = false;
|
|
759
1016
|
});
|
|
760
1017
|
await new Promise((resolve$1) => setTimeout(resolve$1, 1e3));
|
|
761
1018
|
if (this.isRunning) {
|
|
762
|
-
logger$
|
|
763
|
-
if (this.enableOpenApi) logger$
|
|
764
|
-
if (this.telescope) logger$
|
|
765
|
-
if (this.studio) logger$
|
|
1019
|
+
logger$8.log(`\n🎉 Server running at http://localhost:${this.actualPort}`);
|
|
1020
|
+
if (this.enableOpenApi) logger$8.log(`📚 API Docs available at http://localhost:${this.actualPort}/__docs`);
|
|
1021
|
+
if (this.telescope) logger$8.log(`🔭 Telescope available at http://localhost:${this.actualPort}${this.telescope.path}`);
|
|
1022
|
+
if (this.studio) logger$8.log(`🗄️ Studio available at http://localhost:${this.actualPort}${this.studio.path}`);
|
|
766
1023
|
}
|
|
767
1024
|
}
|
|
768
1025
|
async stop() {
|
|
@@ -799,10 +1056,10 @@ var DevServer = class {
|
|
|
799
1056
|
await this.start();
|
|
800
1057
|
}
|
|
801
1058
|
async createServerEntry() {
|
|
802
|
-
const { writeFile: writeFile$
|
|
803
|
-
const { relative: relative$5, dirname: dirname$
|
|
1059
|
+
const { writeFile: writeFile$8 } = await import("node:fs/promises");
|
|
1060
|
+
const { relative: relative$5, dirname: dirname$5 } = await import("node:path");
|
|
804
1061
|
const serverPath = (0, node_path.join)(process.cwd(), ".gkm", this.provider, "server.ts");
|
|
805
|
-
const relativeAppPath = relative$5(dirname$
|
|
1062
|
+
const relativeAppPath = relative$5(dirname$5(serverPath), (0, node_path.join)(dirname$5(serverPath), "app.js"));
|
|
806
1063
|
const serveCode = this.runtime === "bun" ? `Bun.serve({
|
|
807
1064
|
port,
|
|
808
1065
|
fetch: app.fetch,
|
|
@@ -822,7 +1079,7 @@ var DevServer = class {
|
|
|
822
1079
|
* Development server entry point
|
|
823
1080
|
* This file is auto-generated by 'gkm dev'
|
|
824
1081
|
*/
|
|
825
|
-
import { createApp } from './${relativeAppPath.startsWith(".") ? relativeAppPath :
|
|
1082
|
+
import { createApp } from './${relativeAppPath.startsWith(".") ? relativeAppPath : `./${relativeAppPath}`}';
|
|
826
1083
|
|
|
827
1084
|
const port = process.argv.includes('--port')
|
|
828
1085
|
? Number.parseInt(process.argv[process.argv.indexOf('--port') + 1])
|
|
@@ -842,15 +1099,15 @@ start({
|
|
|
842
1099
|
process.exit(1);
|
|
843
1100
|
});
|
|
844
1101
|
`;
|
|
845
|
-
await writeFile$
|
|
1102
|
+
await writeFile$8(serverPath, content);
|
|
846
1103
|
}
|
|
847
1104
|
};
|
|
848
1105
|
|
|
849
1106
|
//#endregion
|
|
850
1107
|
//#region src/build/manifests.ts
|
|
851
|
-
const logger$
|
|
1108
|
+
const logger$7 = console;
|
|
852
1109
|
async function generateAwsManifest(outputDir, routes, functions, crons, subscribers) {
|
|
853
|
-
const manifestDir = (0,
|
|
1110
|
+
const manifestDir = (0, node_path.join)(outputDir, "manifest");
|
|
854
1111
|
await (0, node_fs_promises.mkdir)(manifestDir, { recursive: true });
|
|
855
1112
|
const awsRoutes = routes.filter((r) => r.method !== "ALL");
|
|
856
1113
|
const content = `export const manifest = {
|
|
@@ -871,13 +1128,13 @@ export type Authorizer = Route['authorizer'];
|
|
|
871
1128
|
export type HttpMethod = Route['method'];
|
|
872
1129
|
export type RoutePath = Route['path'];
|
|
873
1130
|
`;
|
|
874
|
-
const manifestPath = (0,
|
|
1131
|
+
const manifestPath = (0, node_path.join)(manifestDir, "aws.ts");
|
|
875
1132
|
await (0, node_fs_promises.writeFile)(manifestPath, content);
|
|
876
|
-
logger$
|
|
877
|
-
logger$
|
|
1133
|
+
logger$7.log(`Generated AWS manifest with ${awsRoutes.length} routes, ${functions.length} functions, ${crons.length} crons, ${subscribers.length} subscribers`);
|
|
1134
|
+
logger$7.log(`Manifest: ${(0, node_path.relative)(process.cwd(), manifestPath)}`);
|
|
878
1135
|
}
|
|
879
1136
|
async function generateServerManifest(outputDir, appInfo, routes, subscribers) {
|
|
880
|
-
const manifestDir = (0,
|
|
1137
|
+
const manifestDir = (0, node_path.join)(outputDir, "manifest");
|
|
881
1138
|
await (0, node_fs_promises.mkdir)(manifestDir, { recursive: true });
|
|
882
1139
|
const serverRoutes = routes.filter((r) => r.method !== "ALL").map((r) => ({
|
|
883
1140
|
path: r.path,
|
|
@@ -903,37 +1160,44 @@ export type Authorizer = Route['authorizer'];
|
|
|
903
1160
|
export type HttpMethod = Route['method'];
|
|
904
1161
|
export type RoutePath = Route['path'];
|
|
905
1162
|
`;
|
|
906
|
-
const manifestPath = (0,
|
|
1163
|
+
const manifestPath = (0, node_path.join)(manifestDir, "server.ts");
|
|
907
1164
|
await (0, node_fs_promises.writeFile)(manifestPath, content);
|
|
908
|
-
logger$
|
|
909
|
-
logger$
|
|
1165
|
+
logger$7.log(`Generated server manifest with ${serverRoutes.length} routes, ${serverSubscribers.length} subscribers`);
|
|
1166
|
+
logger$7.log(`Manifest: ${(0, node_path.relative)(process.cwd(), manifestPath)}`);
|
|
910
1167
|
}
|
|
911
1168
|
|
|
912
1169
|
//#endregion
|
|
913
1170
|
//#region src/build/index.ts
|
|
914
|
-
const logger = console;
|
|
1171
|
+
const logger$6 = console;
|
|
915
1172
|
async function buildCommand(options) {
|
|
916
1173
|
const config = await require_config.loadConfig();
|
|
917
1174
|
const resolved = resolveProviders(config, options);
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
if (
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
logger.log(`
|
|
1175
|
+
const productionConfigFromGkm = getProductionConfigFromGkm(config);
|
|
1176
|
+
const production = normalizeProductionConfig(options.production ?? false, productionConfigFromGkm);
|
|
1177
|
+
if (production) logger$6.log(`🏭 Building for PRODUCTION`);
|
|
1178
|
+
logger$6.log(`Building with providers: ${resolved.providers.join(", ")}`);
|
|
1179
|
+
logger$6.log(`Loading routes from: ${config.routes}`);
|
|
1180
|
+
if (config.functions) logger$6.log(`Loading functions from: ${config.functions}`);
|
|
1181
|
+
if (config.crons) logger$6.log(`Loading crons from: ${config.crons}`);
|
|
1182
|
+
if (config.subscribers) logger$6.log(`Loading subscribers from: ${config.subscribers}`);
|
|
1183
|
+
logger$6.log(`Using envParser: ${config.envParser}`);
|
|
924
1184
|
const { path: envParserPath, importPattern: envParserImportPattern } = require_config.parseModuleConfig(config.envParser, "envParser");
|
|
925
1185
|
const { path: loggerPath, importPattern: loggerImportPattern } = require_config.parseModuleConfig(config.logger, "logger");
|
|
926
|
-
const telescope = normalizeTelescopeConfig(config.telescope);
|
|
927
|
-
if (telescope) logger.log(`🔭 Telescope enabled at ${telescope.path}`);
|
|
1186
|
+
const telescope = production ? void 0 : normalizeTelescopeConfig(config.telescope);
|
|
1187
|
+
if (telescope) logger$6.log(`🔭 Telescope enabled at ${telescope.path}`);
|
|
1188
|
+
const studio = production ? void 0 : normalizeStudioConfig(config.studio);
|
|
1189
|
+
if (studio) logger$6.log(`🗄️ Studio enabled at ${studio.path}`);
|
|
928
1190
|
const hooks = normalizeHooksConfig(config.hooks);
|
|
929
|
-
if (hooks) logger.log(`🪝 Server hooks enabled`);
|
|
1191
|
+
if (hooks) logger$6.log(`🪝 Server hooks enabled`);
|
|
930
1192
|
const buildContext = {
|
|
931
1193
|
envParserPath,
|
|
932
1194
|
envParserImportPattern,
|
|
933
1195
|
loggerPath,
|
|
934
1196
|
loggerImportPattern,
|
|
935
1197
|
telescope,
|
|
936
|
-
|
|
1198
|
+
studio,
|
|
1199
|
+
hooks,
|
|
1200
|
+
production
|
|
937
1201
|
};
|
|
938
1202
|
const endpointGenerator = new require_openapi.EndpointGenerator();
|
|
939
1203
|
const functionGenerator = new FunctionGenerator();
|
|
@@ -945,45 +1209,1243 @@ async function buildCommand(options) {
|
|
|
945
1209
|
config.crons ? cronGenerator.load(config.crons) : [],
|
|
946
1210
|
config.subscribers ? subscriberGenerator.load(config.subscribers) : []
|
|
947
1211
|
]);
|
|
948
|
-
logger.log(`Found ${allEndpoints.length} endpoints`);
|
|
949
|
-
logger.log(`Found ${allFunctions.length} functions`);
|
|
950
|
-
logger.log(`Found ${allCrons.length} crons`);
|
|
951
|
-
logger.log(`Found ${allSubscribers.length} subscribers`);
|
|
1212
|
+
logger$6.log(`Found ${allEndpoints.length} endpoints`);
|
|
1213
|
+
logger$6.log(`Found ${allFunctions.length} functions`);
|
|
1214
|
+
logger$6.log(`Found ${allCrons.length} crons`);
|
|
1215
|
+
logger$6.log(`Found ${allSubscribers.length} subscribers`);
|
|
952
1216
|
if (allEndpoints.length === 0 && allFunctions.length === 0 && allCrons.length === 0 && allSubscribers.length === 0) {
|
|
953
|
-
logger.log("No endpoints, functions, crons, or subscribers found to process");
|
|
1217
|
+
logger$6.log("No endpoints, functions, crons, or subscribers found to process");
|
|
1218
|
+
return {};
|
|
1219
|
+
}
|
|
1220
|
+
const rootOutputDir = (0, node_path.join)(process.cwd(), ".gkm");
|
|
1221
|
+
await (0, node_fs_promises.mkdir)(rootOutputDir, { recursive: true });
|
|
1222
|
+
let result = {};
|
|
1223
|
+
for (const provider of resolved.providers) {
|
|
1224
|
+
const providerResult = await buildForProvider(provider, buildContext, rootOutputDir, endpointGenerator, functionGenerator, cronGenerator, subscriberGenerator, allEndpoints, allFunctions, allCrons, allSubscribers, resolved.enableOpenApi, options.skipBundle ?? false, options.stage);
|
|
1225
|
+
if (providerResult.masterKey) result = providerResult;
|
|
1226
|
+
}
|
|
1227
|
+
return result;
|
|
1228
|
+
}
|
|
1229
|
+
async function buildForProvider(provider, context, rootOutputDir, endpointGenerator, functionGenerator, cronGenerator, subscriberGenerator, endpoints, functions, crons, subscribers, enableOpenApi, skipBundle, stage) {
|
|
1230
|
+
const outputDir = (0, node_path.join)(process.cwd(), ".gkm", provider);
|
|
1231
|
+
await (0, node_fs_promises.mkdir)(outputDir, { recursive: true });
|
|
1232
|
+
logger$6.log(`\nGenerating handlers for provider: ${provider}`);
|
|
1233
|
+
const [routes, functionInfos, cronInfos, subscriberInfos] = await Promise.all([
|
|
1234
|
+
endpointGenerator.build(context, endpoints, outputDir, {
|
|
1235
|
+
provider,
|
|
1236
|
+
enableOpenApi
|
|
1237
|
+
}),
|
|
1238
|
+
functionGenerator.build(context, functions, outputDir, { provider }),
|
|
1239
|
+
cronGenerator.build(context, crons, outputDir, { provider }),
|
|
1240
|
+
subscriberGenerator.build(context, subscribers, outputDir, { provider })
|
|
1241
|
+
]);
|
|
1242
|
+
logger$6.log(`Generated ${routes.length} routes, ${functionInfos.length} functions, ${cronInfos.length} crons, ${subscriberInfos.length} subscribers for ${provider}`);
|
|
1243
|
+
if (provider === "server") {
|
|
1244
|
+
const routeMetadata = await Promise.all(endpoints.map(async ({ construct }) => ({
|
|
1245
|
+
path: construct._path,
|
|
1246
|
+
method: construct.method,
|
|
1247
|
+
handler: "",
|
|
1248
|
+
authorizer: construct.authorizer?.name ?? "none"
|
|
1249
|
+
})));
|
|
1250
|
+
const appInfo = {
|
|
1251
|
+
handler: (0, node_path.relative)(process.cwd(), (0, node_path.join)(outputDir, "app.ts")),
|
|
1252
|
+
endpoints: (0, node_path.relative)(process.cwd(), (0, node_path.join)(outputDir, "endpoints.ts"))
|
|
1253
|
+
};
|
|
1254
|
+
await generateServerManifest(rootOutputDir, appInfo, routeMetadata, subscriberInfos);
|
|
1255
|
+
let masterKey;
|
|
1256
|
+
if (context.production?.bundle && !skipBundle) {
|
|
1257
|
+
logger$6.log(`\n📦 Bundling production server...`);
|
|
1258
|
+
const { bundleServer } = await Promise.resolve().then(() => require("./bundler-WsEvH_b2.cjs"));
|
|
1259
|
+
const bundleResult = await bundleServer({
|
|
1260
|
+
entryPoint: (0, node_path.join)(outputDir, "server.ts"),
|
|
1261
|
+
outputDir: (0, node_path.join)(outputDir, "dist"),
|
|
1262
|
+
minify: context.production.minify,
|
|
1263
|
+
sourcemap: false,
|
|
1264
|
+
external: context.production.external,
|
|
1265
|
+
stage
|
|
1266
|
+
});
|
|
1267
|
+
masterKey = bundleResult.masterKey;
|
|
1268
|
+
logger$6.log(`✅ Bundle complete: .gkm/server/dist/server.mjs`);
|
|
1269
|
+
if (masterKey) {
|
|
1270
|
+
logger$6.log(`\n🔐 Secrets encrypted for deployment`);
|
|
1271
|
+
logger$6.log(` Deploy with: GKM_MASTER_KEY=${masterKey}`);
|
|
1272
|
+
}
|
|
1273
|
+
}
|
|
1274
|
+
return { masterKey };
|
|
1275
|
+
} else await generateAwsManifest(rootOutputDir, routes, functionInfos, cronInfos, subscriberInfos);
|
|
1276
|
+
return {};
|
|
1277
|
+
}
|
|
1278
|
+
|
|
1279
|
+
//#endregion
|
|
1280
|
+
//#region src/deploy/docker.ts
|
|
1281
|
+
const logger$5 = console;
|
|
1282
|
+
/**
|
|
1283
|
+
* Get the full image reference
|
|
1284
|
+
*/
|
|
1285
|
+
function getImageRef(registry, imageName, tag) {
|
|
1286
|
+
if (registry) return `${registry}/${imageName}:${tag}`;
|
|
1287
|
+
return `${imageName}:${tag}`;
|
|
1288
|
+
}
|
|
1289
|
+
/**
|
|
1290
|
+
* Build Docker image
|
|
1291
|
+
*/
|
|
1292
|
+
async function buildImage(imageRef) {
|
|
1293
|
+
logger$5.log(`\n🔨 Building Docker image: ${imageRef}`);
|
|
1294
|
+
try {
|
|
1295
|
+
(0, node_child_process.execSync)(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${imageRef} .`, {
|
|
1296
|
+
cwd: process.cwd(),
|
|
1297
|
+
stdio: "inherit",
|
|
1298
|
+
env: {
|
|
1299
|
+
...process.env,
|
|
1300
|
+
DOCKER_BUILDKIT: "1"
|
|
1301
|
+
}
|
|
1302
|
+
});
|
|
1303
|
+
logger$5.log(`✅ Image built: ${imageRef}`);
|
|
1304
|
+
} catch (error) {
|
|
1305
|
+
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
1306
|
+
}
|
|
1307
|
+
}
|
|
1308
|
+
/**
|
|
1309
|
+
* Push Docker image to registry
|
|
1310
|
+
*/
|
|
1311
|
+
async function pushImage(imageRef) {
|
|
1312
|
+
logger$5.log(`\n☁️ Pushing image: ${imageRef}`);
|
|
1313
|
+
try {
|
|
1314
|
+
(0, node_child_process.execSync)(`docker push ${imageRef}`, {
|
|
1315
|
+
cwd: process.cwd(),
|
|
1316
|
+
stdio: "inherit"
|
|
1317
|
+
});
|
|
1318
|
+
logger$5.log(`✅ Image pushed: ${imageRef}`);
|
|
1319
|
+
} catch (error) {
|
|
1320
|
+
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
1321
|
+
}
|
|
1322
|
+
}
|
|
1323
|
+
/**
|
|
1324
|
+
* Deploy using Docker (build and optionally push image)
|
|
1325
|
+
*/
|
|
1326
|
+
async function deployDocker(options) {
|
|
1327
|
+
const { stage, tag, skipPush, masterKey, config } = options;
|
|
1328
|
+
const imageName = config.imageName ?? "app";
|
|
1329
|
+
const imageRef = getImageRef(config.registry, imageName, tag);
|
|
1330
|
+
await buildImage(imageRef);
|
|
1331
|
+
if (!skipPush) if (!config.registry) logger$5.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
|
|
1332
|
+
else await pushImage(imageRef);
|
|
1333
|
+
logger$5.log("\n✅ Docker deployment ready!");
|
|
1334
|
+
logger$5.log(`\n📋 Deployment details:`);
|
|
1335
|
+
logger$5.log(` Image: ${imageRef}`);
|
|
1336
|
+
logger$5.log(` Stage: ${stage}`);
|
|
1337
|
+
if (masterKey) {
|
|
1338
|
+
logger$5.log(`\n🔐 Deploy with this environment variable:`);
|
|
1339
|
+
logger$5.log(` GKM_MASTER_KEY=${masterKey}`);
|
|
1340
|
+
logger$5.log("\n Example docker run:");
|
|
1341
|
+
logger$5.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
|
|
1342
|
+
}
|
|
1343
|
+
return {
|
|
1344
|
+
imageRef,
|
|
1345
|
+
masterKey
|
|
1346
|
+
};
|
|
1347
|
+
}
|
|
1348
|
+
/**
|
|
1349
|
+
* Resolve Docker deploy config from gkm config
|
|
1350
|
+
*/
|
|
1351
|
+
function resolveDockerConfig$1(config) {
|
|
1352
|
+
return {
|
|
1353
|
+
registry: config.docker?.registry,
|
|
1354
|
+
imageName: config.docker?.imageName
|
|
1355
|
+
};
|
|
1356
|
+
}
|
|
1357
|
+
|
|
1358
|
+
//#endregion
|
|
1359
|
+
//#region src/deploy/dokploy.ts
|
|
1360
|
+
const logger$4 = console;
|
|
1361
|
+
/**
|
|
1362
|
+
* Get the Dokploy API token from stored credentials or environment
|
|
1363
|
+
*/
|
|
1364
|
+
async function getApiToken$1() {
|
|
1365
|
+
const token = await getDokployToken();
|
|
1366
|
+
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
1367
|
+
return token;
|
|
1368
|
+
}
|
|
1369
|
+
/**
|
|
1370
|
+
* Make a request to the Dokploy API
|
|
1371
|
+
*/
|
|
1372
|
+
async function dokployRequest$1(endpoint, baseUrl, token, body) {
|
|
1373
|
+
const url = `${baseUrl}/api/${endpoint}`;
|
|
1374
|
+
const response = await fetch(url, {
|
|
1375
|
+
method: "POST",
|
|
1376
|
+
headers: {
|
|
1377
|
+
"Content-Type": "application/json",
|
|
1378
|
+
Authorization: `Bearer ${token}`
|
|
1379
|
+
},
|
|
1380
|
+
body: JSON.stringify(body)
|
|
1381
|
+
});
|
|
1382
|
+
if (!response.ok) {
|
|
1383
|
+
let errorMessage = `Dokploy API error: ${response.status} ${response.statusText}`;
|
|
1384
|
+
try {
|
|
1385
|
+
const errorBody = await response.json();
|
|
1386
|
+
if (errorBody.message) errorMessage = `Dokploy API error: ${errorBody.message}`;
|
|
1387
|
+
if (errorBody.issues?.length) errorMessage += `\n Issues: ${errorBody.issues.map((i) => i.message).join(", ")}`;
|
|
1388
|
+
} catch {}
|
|
1389
|
+
throw new Error(errorMessage);
|
|
1390
|
+
}
|
|
1391
|
+
return response.json();
|
|
1392
|
+
}
|
|
1393
|
+
/**
|
|
1394
|
+
* Update application environment variables
|
|
1395
|
+
*/
|
|
1396
|
+
async function updateEnvironment(baseUrl, token, applicationId, envVars) {
|
|
1397
|
+
logger$4.log(" Updating environment variables...");
|
|
1398
|
+
const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
|
|
1399
|
+
await dokployRequest$1("application.update", baseUrl, token, {
|
|
1400
|
+
applicationId,
|
|
1401
|
+
env: envString
|
|
1402
|
+
});
|
|
1403
|
+
logger$4.log(" ✓ Environment variables updated");
|
|
1404
|
+
}
|
|
1405
|
+
/**
|
|
1406
|
+
* Trigger application deployment
|
|
1407
|
+
*/
|
|
1408
|
+
async function triggerDeploy(baseUrl, token, applicationId) {
|
|
1409
|
+
logger$4.log(" Triggering deployment...");
|
|
1410
|
+
await dokployRequest$1("application.deploy", baseUrl, token, { applicationId });
|
|
1411
|
+
logger$4.log(" ✓ Deployment triggered");
|
|
1412
|
+
}
|
|
1413
|
+
/**
|
|
1414
|
+
* Deploy to Dokploy
|
|
1415
|
+
*/
|
|
1416
|
+
async function deployDokploy(options) {
|
|
1417
|
+
const { stage, imageRef, masterKey, config } = options;
|
|
1418
|
+
logger$4.log(`\n🎯 Deploying to Dokploy...`);
|
|
1419
|
+
logger$4.log(` Endpoint: ${config.endpoint}`);
|
|
1420
|
+
logger$4.log(` Application: ${config.applicationId}`);
|
|
1421
|
+
const token = await getApiToken$1();
|
|
1422
|
+
const envVars = {};
|
|
1423
|
+
if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
|
|
1424
|
+
if (Object.keys(envVars).length > 0) await updateEnvironment(config.endpoint, token, config.applicationId, envVars);
|
|
1425
|
+
await triggerDeploy(config.endpoint, token, config.applicationId);
|
|
1426
|
+
logger$4.log("\n✅ Dokploy deployment initiated!");
|
|
1427
|
+
logger$4.log(`\n📋 Deployment details:`);
|
|
1428
|
+
logger$4.log(` Image: ${imageRef}`);
|
|
1429
|
+
logger$4.log(` Stage: ${stage}`);
|
|
1430
|
+
logger$4.log(` Application ID: ${config.applicationId}`);
|
|
1431
|
+
if (masterKey) logger$4.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
|
|
1432
|
+
const deploymentUrl = `${config.endpoint}/project/${config.projectId}`;
|
|
1433
|
+
logger$4.log(`\n🔗 View deployment: ${deploymentUrl}`);
|
|
1434
|
+
return {
|
|
1435
|
+
imageRef,
|
|
1436
|
+
masterKey,
|
|
1437
|
+
url: deploymentUrl
|
|
1438
|
+
};
|
|
1439
|
+
}
|
|
1440
|
+
/**
|
|
1441
|
+
* Validate Dokploy configuration
|
|
1442
|
+
*/
|
|
1443
|
+
function validateDokployConfig(config) {
|
|
1444
|
+
if (!config) return false;
|
|
1445
|
+
const required = [
|
|
1446
|
+
"endpoint",
|
|
1447
|
+
"projectId",
|
|
1448
|
+
"applicationId"
|
|
1449
|
+
];
|
|
1450
|
+
const missing = required.filter((key) => !config[key]);
|
|
1451
|
+
if (missing.length > 0) throw new Error(`Missing Dokploy configuration: ${missing.join(", ")}\nConfigure in gkm.config.ts:
|
|
1452
|
+
providers: {
|
|
1453
|
+
dokploy: {
|
|
1454
|
+
endpoint: 'https://dokploy.example.com',
|
|
1455
|
+
projectId: 'proj_xxx',
|
|
1456
|
+
applicationId: 'app_xxx',
|
|
1457
|
+
},
|
|
1458
|
+
}`);
|
|
1459
|
+
return true;
|
|
1460
|
+
}
|
|
1461
|
+
|
|
1462
|
+
//#endregion
|
|
1463
|
+
//#region src/deploy/index.ts
|
|
1464
|
+
const logger$3 = console;
|
|
1465
|
+
/**
|
|
1466
|
+
* Generate image tag from stage and timestamp
|
|
1467
|
+
*/
|
|
1468
|
+
function generateTag(stage) {
|
|
1469
|
+
const timestamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-").slice(0, 19);
|
|
1470
|
+
return `${stage}-${timestamp}`;
|
|
1471
|
+
}
|
|
1472
|
+
/**
|
|
1473
|
+
* Main deploy command
|
|
1474
|
+
*/
|
|
1475
|
+
async function deployCommand(options) {
|
|
1476
|
+
const { provider, stage, tag, skipPush, skipBuild } = options;
|
|
1477
|
+
logger$3.log(`\n🚀 Deploying to ${provider}...`);
|
|
1478
|
+
logger$3.log(` Stage: ${stage}`);
|
|
1479
|
+
const config = await require_config.loadConfig();
|
|
1480
|
+
const imageTag = tag ?? generateTag(stage);
|
|
1481
|
+
logger$3.log(` Tag: ${imageTag}`);
|
|
1482
|
+
let masterKey;
|
|
1483
|
+
if (!skipBuild) {
|
|
1484
|
+
logger$3.log(`\n📦 Building for production...`);
|
|
1485
|
+
const buildResult = await buildCommand({
|
|
1486
|
+
provider: "server",
|
|
1487
|
+
production: true,
|
|
1488
|
+
stage
|
|
1489
|
+
});
|
|
1490
|
+
masterKey = buildResult.masterKey;
|
|
1491
|
+
} else logger$3.log(`\n⏭️ Skipping build (--skip-build)`);
|
|
1492
|
+
const dockerConfig = resolveDockerConfig$1(config);
|
|
1493
|
+
const imageName = dockerConfig.imageName ?? "app";
|
|
1494
|
+
const registry = dockerConfig.registry;
|
|
1495
|
+
const imageRef = registry ? `${registry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
1496
|
+
let result;
|
|
1497
|
+
switch (provider) {
|
|
1498
|
+
case "docker": {
|
|
1499
|
+
result = await deployDocker({
|
|
1500
|
+
stage,
|
|
1501
|
+
tag: imageTag,
|
|
1502
|
+
skipPush,
|
|
1503
|
+
masterKey,
|
|
1504
|
+
config: dockerConfig
|
|
1505
|
+
});
|
|
1506
|
+
break;
|
|
1507
|
+
}
|
|
1508
|
+
case "dokploy": {
|
|
1509
|
+
const dokployConfigRaw = config.providers?.dokploy;
|
|
1510
|
+
if (typeof dokployConfigRaw === "boolean" || !dokployConfigRaw) throw new Error("Dokploy provider requires configuration.\nConfigure in gkm.config.ts:\n providers: {\n dokploy: {\n endpoint: 'https://dokploy.example.com',\n projectId: 'proj_xxx',\n applicationId: 'app_xxx',\n },\n }");
|
|
1511
|
+
validateDokployConfig(dokployConfigRaw);
|
|
1512
|
+
const dokployConfig = dokployConfigRaw;
|
|
1513
|
+
await deployDocker({
|
|
1514
|
+
stage,
|
|
1515
|
+
tag: imageTag,
|
|
1516
|
+
skipPush: false,
|
|
1517
|
+
masterKey,
|
|
1518
|
+
config: {
|
|
1519
|
+
registry: dokployConfig.registry ?? dockerConfig.registry,
|
|
1520
|
+
imageName: dockerConfig.imageName
|
|
1521
|
+
}
|
|
1522
|
+
});
|
|
1523
|
+
result = await deployDokploy({
|
|
1524
|
+
stage,
|
|
1525
|
+
tag: imageTag,
|
|
1526
|
+
imageRef,
|
|
1527
|
+
masterKey,
|
|
1528
|
+
config: dokployConfig
|
|
1529
|
+
});
|
|
1530
|
+
break;
|
|
1531
|
+
}
|
|
1532
|
+
case "aws-lambda": {
|
|
1533
|
+
logger$3.log("\n⚠️ AWS Lambda deployment is not yet implemented.");
|
|
1534
|
+
logger$3.log(" Use SST or AWS CDK for Lambda deployments.");
|
|
1535
|
+
result = {
|
|
1536
|
+
imageRef,
|
|
1537
|
+
masterKey
|
|
1538
|
+
};
|
|
1539
|
+
break;
|
|
1540
|
+
}
|
|
1541
|
+
default: throw new Error(`Unknown deploy provider: ${provider}\nSupported providers: docker, dokploy, aws-lambda`);
|
|
1542
|
+
}
|
|
1543
|
+
logger$3.log("\n✅ Deployment complete!");
|
|
1544
|
+
return result;
|
|
1545
|
+
}
|
|
1546
|
+
|
|
1547
|
+
//#endregion
|
|
1548
|
+
//#region src/deploy/init.ts
|
|
1549
|
+
const logger$2 = console;
|
|
1550
|
+
/**
|
|
1551
|
+
* Get the Dokploy API token from stored credentials or environment
|
|
1552
|
+
*/
|
|
1553
|
+
async function getApiToken() {
|
|
1554
|
+
const token = await getDokployToken();
|
|
1555
|
+
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
1556
|
+
return token;
|
|
1557
|
+
}
|
|
1558
|
+
/**
|
|
1559
|
+
* Make a request to the Dokploy API
|
|
1560
|
+
*/
|
|
1561
|
+
async function dokployRequest(method, endpoint, baseUrl, token, body) {
|
|
1562
|
+
const url = `${baseUrl}/api/${endpoint}`;
|
|
1563
|
+
const response = await fetch(url, {
|
|
1564
|
+
method,
|
|
1565
|
+
headers: {
|
|
1566
|
+
"Content-Type": "application/json",
|
|
1567
|
+
Authorization: `Bearer ${token}`
|
|
1568
|
+
},
|
|
1569
|
+
body: body ? JSON.stringify(body) : void 0
|
|
1570
|
+
});
|
|
1571
|
+
if (!response.ok) {
|
|
1572
|
+
let errorMessage = `Dokploy API error: ${response.status} ${response.statusText}`;
|
|
1573
|
+
try {
|
|
1574
|
+
const errorBody = await response.json();
|
|
1575
|
+
if (errorBody.message) errorMessage = `Dokploy API error: ${errorBody.message}`;
|
|
1576
|
+
} catch {}
|
|
1577
|
+
throw new Error(errorMessage);
|
|
1578
|
+
}
|
|
1579
|
+
const text = await response.text();
|
|
1580
|
+
if (!text) return {};
|
|
1581
|
+
return JSON.parse(text);
|
|
1582
|
+
}
|
|
1583
|
+
/**
|
|
1584
|
+
* Get all projects from Dokploy
|
|
1585
|
+
*/
|
|
1586
|
+
async function getProjects(baseUrl, token) {
|
|
1587
|
+
return dokployRequest("GET", "project.all", baseUrl, token);
|
|
1588
|
+
}
|
|
1589
|
+
/**
|
|
1590
|
+
* Create a new project in Dokploy
|
|
1591
|
+
*/
|
|
1592
|
+
async function createProject(baseUrl, token, name$1, description$1) {
|
|
1593
|
+
return dokployRequest("POST", "project.create", baseUrl, token, {
|
|
1594
|
+
name: name$1,
|
|
1595
|
+
description: description$1 || `Created by gkm CLI`
|
|
1596
|
+
});
|
|
1597
|
+
}
|
|
1598
|
+
/**
|
|
1599
|
+
* Get project by ID to get environment info
|
|
1600
|
+
*/
|
|
1601
|
+
async function getProject(baseUrl, token, projectId) {
|
|
1602
|
+
return dokployRequest("POST", "project.one", baseUrl, token, { projectId });
|
|
1603
|
+
}
|
|
1604
|
+
/**
|
|
1605
|
+
* Create a new application in Dokploy
|
|
1606
|
+
*/
|
|
1607
|
+
async function createApplication(baseUrl, token, name$1, projectId) {
|
|
1608
|
+
const project = await getProject(baseUrl, token, projectId);
|
|
1609
|
+
let environmentId;
|
|
1610
|
+
const firstEnv = project.environments?.[0];
|
|
1611
|
+
if (firstEnv) environmentId = firstEnv.environmentId;
|
|
1612
|
+
else {
|
|
1613
|
+
const env = await dokployRequest("POST", "environment.create", baseUrl, token, {
|
|
1614
|
+
projectId,
|
|
1615
|
+
name: "production",
|
|
1616
|
+
description: "Production environment"
|
|
1617
|
+
});
|
|
1618
|
+
environmentId = env.environmentId;
|
|
1619
|
+
}
|
|
1620
|
+
return dokployRequest("POST", "application.create", baseUrl, token, {
|
|
1621
|
+
name: name$1,
|
|
1622
|
+
projectId,
|
|
1623
|
+
environmentId
|
|
1624
|
+
});
|
|
1625
|
+
}
|
|
1626
|
+
/**
|
|
1627
|
+
* Configure application for Docker registry deployment
|
|
1628
|
+
*/
|
|
1629
|
+
async function configureApplicationRegistry(baseUrl, token, applicationId, registryId) {
|
|
1630
|
+
await dokployRequest("POST", "application.update", baseUrl, token, {
|
|
1631
|
+
applicationId,
|
|
1632
|
+
registryId
|
|
1633
|
+
});
|
|
1634
|
+
}
|
|
1635
|
+
/**
|
|
1636
|
+
* Get available registries
|
|
1637
|
+
*/
|
|
1638
|
+
async function getRegistries(baseUrl, token) {
|
|
1639
|
+
return dokployRequest("GET", "registry.all", baseUrl, token);
|
|
1640
|
+
}
|
|
1641
|
+
/**
|
|
1642
|
+
* Update gkm.config.ts with Dokploy configuration
|
|
1643
|
+
*/
|
|
1644
|
+
async function updateConfig(config, cwd = process.cwd()) {
|
|
1645
|
+
const configPath = (0, node_path.join)(cwd, "gkm.config.ts");
|
|
1646
|
+
if (!(0, node_fs.existsSync)(configPath)) {
|
|
1647
|
+
logger$2.warn("\n gkm.config.ts not found. Add this configuration manually:\n");
|
|
1648
|
+
logger$2.log(` providers: {`);
|
|
1649
|
+
logger$2.log(` dokploy: {`);
|
|
1650
|
+
logger$2.log(` endpoint: '${config.endpoint}',`);
|
|
1651
|
+
logger$2.log(` projectId: '${config.projectId}',`);
|
|
1652
|
+
logger$2.log(` applicationId: '${config.applicationId}',`);
|
|
1653
|
+
logger$2.log(` },`);
|
|
1654
|
+
logger$2.log(` },`);
|
|
954
1655
|
return;
|
|
955
1656
|
}
|
|
956
|
-
const
|
|
957
|
-
|
|
958
|
-
|
|
1657
|
+
const content = await (0, node_fs_promises.readFile)(configPath, "utf-8");
|
|
1658
|
+
if (content.includes("dokploy:") && content.includes("applicationId:")) {
|
|
1659
|
+
logger$2.log("\n Dokploy config already exists in gkm.config.ts");
|
|
1660
|
+
logger$2.log(" Updating with new values...");
|
|
1661
|
+
}
|
|
1662
|
+
let newContent;
|
|
1663
|
+
if (content.includes("providers:")) if (content.includes("dokploy:")) newContent = content.replace(/dokploy:\s*\{[^}]*\}/, `dokploy: {
|
|
1664
|
+
endpoint: '${config.endpoint}',
|
|
1665
|
+
projectId: '${config.projectId}',
|
|
1666
|
+
applicationId: '${config.applicationId}',
|
|
1667
|
+
}`);
|
|
1668
|
+
else newContent = content.replace(/providers:\s*\{/, `providers: {
|
|
1669
|
+
dokploy: {
|
|
1670
|
+
endpoint: '${config.endpoint}',
|
|
1671
|
+
projectId: '${config.projectId}',
|
|
1672
|
+
applicationId: '${config.applicationId}',
|
|
1673
|
+
},`);
|
|
1674
|
+
else newContent = content.replace(/}\s*\)\s*;?\s*$/, `
|
|
1675
|
+
providers: {
|
|
1676
|
+
dokploy: {
|
|
1677
|
+
endpoint: '${config.endpoint}',
|
|
1678
|
+
projectId: '${config.projectId}',
|
|
1679
|
+
applicationId: '${config.applicationId}',
|
|
1680
|
+
},
|
|
1681
|
+
},
|
|
1682
|
+
});`);
|
|
1683
|
+
await (0, node_fs_promises.writeFile)(configPath, newContent);
|
|
1684
|
+
logger$2.log("\n ✓ Updated gkm.config.ts with Dokploy configuration");
|
|
1685
|
+
}
|
|
1686
|
+
/**
|
|
1687
|
+
* Initialize Dokploy deployment configuration
|
|
1688
|
+
*/
|
|
1689
|
+
async function deployInitCommand(options) {
|
|
1690
|
+
const { projectName, appName, projectId: existingProjectId, registryId } = options;
|
|
1691
|
+
let endpoint = options.endpoint;
|
|
1692
|
+
if (!endpoint) {
|
|
1693
|
+
const stored = await getDokployCredentials();
|
|
1694
|
+
if (stored) endpoint = stored.endpoint;
|
|
1695
|
+
else throw new Error("Dokploy endpoint not specified.\nEither run \"gkm login --service dokploy\" first, or provide --endpoint.");
|
|
1696
|
+
}
|
|
1697
|
+
logger$2.log(`\n🚀 Initializing Dokploy deployment...`);
|
|
1698
|
+
logger$2.log(` Endpoint: ${endpoint}`);
|
|
1699
|
+
const token = await getApiToken();
|
|
1700
|
+
let projectId;
|
|
1701
|
+
if (existingProjectId) {
|
|
1702
|
+
projectId = existingProjectId;
|
|
1703
|
+
logger$2.log(`\n📁 Using existing project: ${projectId}`);
|
|
1704
|
+
} else {
|
|
1705
|
+
logger$2.log(`\n📁 Looking for project: ${projectName}`);
|
|
1706
|
+
const projects = await getProjects(endpoint, token);
|
|
1707
|
+
const existingProject = projects.find((p) => p.name.toLowerCase() === projectName.toLowerCase());
|
|
1708
|
+
if (existingProject) {
|
|
1709
|
+
projectId = existingProject.projectId;
|
|
1710
|
+
logger$2.log(` Found existing project: ${projectId}`);
|
|
1711
|
+
} else {
|
|
1712
|
+
logger$2.log(` Creating new project...`);
|
|
1713
|
+
const project = await createProject(endpoint, token, projectName);
|
|
1714
|
+
projectId = project.projectId;
|
|
1715
|
+
logger$2.log(` ✓ Created project: ${projectId}`);
|
|
1716
|
+
}
|
|
1717
|
+
}
|
|
1718
|
+
logger$2.log(`\n📦 Creating application: ${appName}`);
|
|
1719
|
+
const application = await createApplication(endpoint, token, appName, projectId);
|
|
1720
|
+
logger$2.log(` ✓ Created application: ${application.applicationId}`);
|
|
1721
|
+
if (registryId) {
|
|
1722
|
+
logger$2.log(`\n🔧 Configuring registry: ${registryId}`);
|
|
1723
|
+
await configureApplicationRegistry(endpoint, token, application.applicationId, registryId);
|
|
1724
|
+
logger$2.log(` ✓ Registry configured`);
|
|
1725
|
+
} else try {
|
|
1726
|
+
const registries = await getRegistries(endpoint, token);
|
|
1727
|
+
if (registries.length > 0) {
|
|
1728
|
+
logger$2.log(`\n📋 Available registries:`);
|
|
1729
|
+
for (const reg of registries) logger$2.log(` - ${reg.registryName}: ${reg.registryUrl} (${reg.registryId})`);
|
|
1730
|
+
logger$2.log(`\n To use a registry, run with --registry-id <id>`);
|
|
1731
|
+
}
|
|
1732
|
+
} catch {}
|
|
1733
|
+
const config = {
|
|
1734
|
+
endpoint,
|
|
1735
|
+
projectId,
|
|
1736
|
+
applicationId: application.applicationId
|
|
1737
|
+
};
|
|
1738
|
+
await updateConfig(config);
|
|
1739
|
+
logger$2.log(`\n✅ Dokploy deployment initialized!`);
|
|
1740
|
+
logger$2.log(`\n📋 Configuration:`);
|
|
1741
|
+
logger$2.log(` Project ID: ${projectId}`);
|
|
1742
|
+
logger$2.log(` Application ID: ${application.applicationId}`);
|
|
1743
|
+
logger$2.log(`\n🔗 View in Dokploy: ${endpoint}/project/${projectId}`);
|
|
1744
|
+
logger$2.log(`\n📝 Next steps:`);
|
|
1745
|
+
logger$2.log(` 1. Initialize secrets: gkm secrets:init --stage production`);
|
|
1746
|
+
logger$2.log(` 2. Deploy: gkm deploy --provider dokploy --stage production`);
|
|
1747
|
+
return config;
|
|
1748
|
+
}
|
|
1749
|
+
/**
|
|
1750
|
+
* List available Dokploy resources
|
|
1751
|
+
*/
|
|
1752
|
+
async function deployListCommand(options) {
|
|
1753
|
+
let endpoint = options.endpoint;
|
|
1754
|
+
if (!endpoint) {
|
|
1755
|
+
const stored = await getDokployCredentials();
|
|
1756
|
+
if (stored) endpoint = stored.endpoint;
|
|
1757
|
+
else throw new Error("Dokploy endpoint not specified.\nEither run \"gkm login --service dokploy\" first, or provide --endpoint.");
|
|
1758
|
+
}
|
|
1759
|
+
const { resource } = options;
|
|
1760
|
+
const token = await getApiToken();
|
|
1761
|
+
if (resource === "projects") {
|
|
1762
|
+
logger$2.log(`\n📁 Projects in ${endpoint}:`);
|
|
1763
|
+
const projects = await getProjects(endpoint, token);
|
|
1764
|
+
if (projects.length === 0) {
|
|
1765
|
+
logger$2.log(" No projects found");
|
|
1766
|
+
return;
|
|
1767
|
+
}
|
|
1768
|
+
for (const project of projects) {
|
|
1769
|
+
logger$2.log(`\n ${project.name} (${project.projectId})`);
|
|
1770
|
+
if (project.description) logger$2.log(` ${project.description}`);
|
|
1771
|
+
}
|
|
1772
|
+
} else if (resource === "registries") {
|
|
1773
|
+
logger$2.log(`\n🐳 Registries in ${endpoint}:`);
|
|
1774
|
+
const registries = await getRegistries(endpoint, token);
|
|
1775
|
+
if (registries.length === 0) {
|
|
1776
|
+
logger$2.log(" No registries configured");
|
|
1777
|
+
logger$2.log(" Add a registry in Dokploy: Settings > Docker Registry");
|
|
1778
|
+
return;
|
|
1779
|
+
}
|
|
1780
|
+
for (const registry of registries) {
|
|
1781
|
+
logger$2.log(`\n ${registry.registryName} (${registry.registryId})`);
|
|
1782
|
+
logger$2.log(` URL: ${registry.registryUrl}`);
|
|
1783
|
+
logger$2.log(` Username: ${registry.username}`);
|
|
1784
|
+
if (registry.imagePrefix) logger$2.log(` Prefix: ${registry.imagePrefix}`);
|
|
1785
|
+
}
|
|
1786
|
+
}
|
|
1787
|
+
}
|
|
1788
|
+
|
|
1789
|
+
//#endregion
|
|
1790
|
+
//#region src/docker/compose.ts
|
|
1791
|
+
/** Default Docker images for services */
|
|
1792
|
+
const DEFAULT_SERVICE_IMAGES = {
|
|
1793
|
+
postgres: "postgres",
|
|
1794
|
+
redis: "redis",
|
|
1795
|
+
rabbitmq: "rabbitmq"
|
|
1796
|
+
};
|
|
1797
|
+
/** Default Docker image versions for services */
|
|
1798
|
+
const DEFAULT_SERVICE_VERSIONS = {
|
|
1799
|
+
postgres: "16-alpine",
|
|
1800
|
+
redis: "7-alpine",
|
|
1801
|
+
rabbitmq: "3-management-alpine"
|
|
1802
|
+
};
|
|
1803
|
+
/** Get the default full image reference for a service */
|
|
1804
|
+
function getDefaultImage(serviceName) {
|
|
1805
|
+
return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
|
|
1806
|
+
}
|
|
1807
|
+
/** Normalize services config to a consistent format - returns Map of service name to full image reference */
|
|
1808
|
+
function normalizeServices(services) {
|
|
1809
|
+
const result = /* @__PURE__ */ new Map();
|
|
1810
|
+
if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
|
|
1811
|
+
else for (const [name$1, config] of Object.entries(services)) {
|
|
1812
|
+
const serviceName = name$1;
|
|
1813
|
+
if (config === true) result.set(serviceName, getDefaultImage(serviceName));
|
|
1814
|
+
else if (config && typeof config === "object") {
|
|
1815
|
+
const serviceConfig = config;
|
|
1816
|
+
if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
|
|
1817
|
+
else {
|
|
1818
|
+
const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
|
|
1819
|
+
result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
|
|
1820
|
+
}
|
|
1821
|
+
}
|
|
1822
|
+
}
|
|
1823
|
+
return result;
|
|
1824
|
+
}
|
|
1825
|
+
/**
|
|
1826
|
+
* Generate docker-compose.yml for production deployment
|
|
1827
|
+
*/
|
|
1828
|
+
function generateDockerCompose(options) {
|
|
1829
|
+
const { imageName, registry, port, healthCheckPath, services } = options;
|
|
1830
|
+
const serviceMap = normalizeServices(services);
|
|
1831
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1832
|
+
let yaml = `version: '3.8'
|
|
1833
|
+
|
|
1834
|
+
services:
|
|
1835
|
+
api:
|
|
1836
|
+
build:
|
|
1837
|
+
context: ../..
|
|
1838
|
+
dockerfile: .gkm/docker/Dockerfile
|
|
1839
|
+
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1840
|
+
container_name: ${imageName}
|
|
1841
|
+
restart: unless-stopped
|
|
1842
|
+
ports:
|
|
1843
|
+
- "\${PORT:-${port}}:${port}"
|
|
1844
|
+
environment:
|
|
1845
|
+
- NODE_ENV=production
|
|
1846
|
+
`;
|
|
1847
|
+
if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
1848
|
+
`;
|
|
1849
|
+
if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
1850
|
+
`;
|
|
1851
|
+
if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
|
|
1852
|
+
`;
|
|
1853
|
+
yaml += ` healthcheck:
|
|
1854
|
+
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1855
|
+
interval: 30s
|
|
1856
|
+
timeout: 3s
|
|
1857
|
+
retries: 3
|
|
1858
|
+
`;
|
|
1859
|
+
if (serviceMap.size > 0) {
|
|
1860
|
+
yaml += ` depends_on:
|
|
1861
|
+
`;
|
|
1862
|
+
for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
|
|
1863
|
+
condition: service_healthy
|
|
1864
|
+
`;
|
|
1865
|
+
}
|
|
1866
|
+
yaml += ` networks:
|
|
1867
|
+
- app-network
|
|
1868
|
+
`;
|
|
1869
|
+
const postgresImage = serviceMap.get("postgres");
|
|
1870
|
+
if (postgresImage) yaml += `
|
|
1871
|
+
postgres:
|
|
1872
|
+
image: ${postgresImage}
|
|
1873
|
+
container_name: postgres
|
|
1874
|
+
restart: unless-stopped
|
|
1875
|
+
environment:
|
|
1876
|
+
POSTGRES_USER: \${POSTGRES_USER:-postgres}
|
|
1877
|
+
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
1878
|
+
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
1879
|
+
volumes:
|
|
1880
|
+
- postgres_data:/var/lib/postgresql/data
|
|
1881
|
+
healthcheck:
|
|
1882
|
+
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
1883
|
+
interval: 5s
|
|
1884
|
+
timeout: 5s
|
|
1885
|
+
retries: 5
|
|
1886
|
+
networks:
|
|
1887
|
+
- app-network
|
|
1888
|
+
`;
|
|
1889
|
+
const redisImage = serviceMap.get("redis");
|
|
1890
|
+
if (redisImage) yaml += `
|
|
1891
|
+
redis:
|
|
1892
|
+
image: ${redisImage}
|
|
1893
|
+
container_name: redis
|
|
1894
|
+
restart: unless-stopped
|
|
1895
|
+
volumes:
|
|
1896
|
+
- redis_data:/data
|
|
1897
|
+
healthcheck:
|
|
1898
|
+
test: ["CMD", "redis-cli", "ping"]
|
|
1899
|
+
interval: 5s
|
|
1900
|
+
timeout: 5s
|
|
1901
|
+
retries: 5
|
|
1902
|
+
networks:
|
|
1903
|
+
- app-network
|
|
1904
|
+
`;
|
|
1905
|
+
const rabbitmqImage = serviceMap.get("rabbitmq");
|
|
1906
|
+
if (rabbitmqImage) yaml += `
|
|
1907
|
+
rabbitmq:
|
|
1908
|
+
image: ${rabbitmqImage}
|
|
1909
|
+
container_name: rabbitmq
|
|
1910
|
+
restart: unless-stopped
|
|
1911
|
+
environment:
|
|
1912
|
+
RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
|
|
1913
|
+
RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
|
|
1914
|
+
ports:
|
|
1915
|
+
- "15672:15672" # Management UI
|
|
1916
|
+
volumes:
|
|
1917
|
+
- rabbitmq_data:/var/lib/rabbitmq
|
|
1918
|
+
healthcheck:
|
|
1919
|
+
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
|
1920
|
+
interval: 10s
|
|
1921
|
+
timeout: 5s
|
|
1922
|
+
retries: 5
|
|
1923
|
+
networks:
|
|
1924
|
+
- app-network
|
|
1925
|
+
`;
|
|
1926
|
+
yaml += `
|
|
1927
|
+
volumes:
|
|
1928
|
+
`;
|
|
1929
|
+
if (serviceMap.has("postgres")) yaml += ` postgres_data:
|
|
1930
|
+
`;
|
|
1931
|
+
if (serviceMap.has("redis")) yaml += ` redis_data:
|
|
1932
|
+
`;
|
|
1933
|
+
if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
|
|
1934
|
+
`;
|
|
1935
|
+
yaml += `
|
|
1936
|
+
networks:
|
|
1937
|
+
app-network:
|
|
1938
|
+
driver: bridge
|
|
1939
|
+
`;
|
|
1940
|
+
return yaml;
|
|
1941
|
+
}
|
|
1942
|
+
/**
|
|
1943
|
+
* Generate a minimal docker-compose.yml for API only
|
|
1944
|
+
*/
|
|
1945
|
+
function generateMinimalDockerCompose(options) {
|
|
1946
|
+
const { imageName, registry, port, healthCheckPath } = options;
|
|
1947
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1948
|
+
return `version: '3.8'
|
|
1949
|
+
|
|
1950
|
+
services:
|
|
1951
|
+
api:
|
|
1952
|
+
build:
|
|
1953
|
+
context: ../..
|
|
1954
|
+
dockerfile: .gkm/docker/Dockerfile
|
|
1955
|
+
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1956
|
+
container_name: ${imageName}
|
|
1957
|
+
restart: unless-stopped
|
|
1958
|
+
ports:
|
|
1959
|
+
- "\${PORT:-${port}}:${port}"
|
|
1960
|
+
environment:
|
|
1961
|
+
- NODE_ENV=production
|
|
1962
|
+
healthcheck:
|
|
1963
|
+
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1964
|
+
interval: 30s
|
|
1965
|
+
timeout: 3s
|
|
1966
|
+
retries: 3
|
|
1967
|
+
networks:
|
|
1968
|
+
- app-network
|
|
1969
|
+
|
|
1970
|
+
networks:
|
|
1971
|
+
app-network:
|
|
1972
|
+
driver: bridge
|
|
1973
|
+
`;
|
|
1974
|
+
}
|
|
1975
|
+
|
|
1976
|
+
//#endregion
|
|
1977
|
+
//#region src/docker/templates.ts
|
|
1978
|
+
/**
|
|
1979
|
+
* Detect package manager from lockfiles
|
|
1980
|
+
* Walks up the directory tree to find lockfile (for monorepos)
|
|
1981
|
+
*/
|
|
1982
|
+
function detectPackageManager$1(cwd = process.cwd()) {
|
|
1983
|
+
const lockfiles = [
|
|
1984
|
+
["pnpm-lock.yaml", "pnpm"],
|
|
1985
|
+
["bun.lockb", "bun"],
|
|
1986
|
+
["yarn.lock", "yarn"],
|
|
1987
|
+
["package-lock.json", "npm"]
|
|
1988
|
+
];
|
|
1989
|
+
let dir = cwd;
|
|
1990
|
+
const root = (0, node_path.parse)(dir).root;
|
|
1991
|
+
while (dir !== root) {
|
|
1992
|
+
for (const [lockfile, pm] of lockfiles) if ((0, node_fs.existsSync)((0, node_path.join)(dir, lockfile))) return pm;
|
|
1993
|
+
dir = (0, node_path.dirname)(dir);
|
|
1994
|
+
}
|
|
1995
|
+
for (const [lockfile, pm] of lockfiles) if ((0, node_fs.existsSync)((0, node_path.join)(root, lockfile))) return pm;
|
|
1996
|
+
return "pnpm";
|
|
1997
|
+
}
|
|
1998
|
+
/**
|
|
1999
|
+
* Get package manager specific commands and paths
|
|
2000
|
+
*/
|
|
2001
|
+
function getPmConfig(pm) {
|
|
2002
|
+
const configs = {
|
|
2003
|
+
pnpm: {
|
|
2004
|
+
install: "corepack enable && corepack prepare pnpm@latest --activate",
|
|
2005
|
+
lockfile: "pnpm-lock.yaml",
|
|
2006
|
+
fetch: "pnpm fetch",
|
|
2007
|
+
installCmd: "pnpm install --frozen-lockfile --offline",
|
|
2008
|
+
cacheTarget: "/root/.local/share/pnpm/store",
|
|
2009
|
+
cacheId: "pnpm",
|
|
2010
|
+
run: "pnpm",
|
|
2011
|
+
addGlobal: "pnpm add -g"
|
|
2012
|
+
},
|
|
2013
|
+
npm: {
|
|
2014
|
+
install: "",
|
|
2015
|
+
lockfile: "package-lock.json",
|
|
2016
|
+
fetch: "",
|
|
2017
|
+
installCmd: "npm ci",
|
|
2018
|
+
cacheTarget: "/root/.npm",
|
|
2019
|
+
cacheId: "npm",
|
|
2020
|
+
run: "npm run",
|
|
2021
|
+
addGlobal: "npm install -g"
|
|
2022
|
+
},
|
|
2023
|
+
yarn: {
|
|
2024
|
+
install: "corepack enable && corepack prepare yarn@stable --activate",
|
|
2025
|
+
lockfile: "yarn.lock",
|
|
2026
|
+
fetch: "",
|
|
2027
|
+
installCmd: "yarn install --frozen-lockfile",
|
|
2028
|
+
cacheTarget: "/root/.yarn/cache",
|
|
2029
|
+
cacheId: "yarn",
|
|
2030
|
+
run: "yarn",
|
|
2031
|
+
addGlobal: "yarn global add"
|
|
2032
|
+
},
|
|
2033
|
+
bun: {
|
|
2034
|
+
install: "npm install -g bun",
|
|
2035
|
+
lockfile: "bun.lockb",
|
|
2036
|
+
fetch: "",
|
|
2037
|
+
installCmd: "bun install --frozen-lockfile",
|
|
2038
|
+
cacheTarget: "/root/.bun/install/cache",
|
|
2039
|
+
cacheId: "bun",
|
|
2040
|
+
run: "bun run",
|
|
2041
|
+
addGlobal: "bun add -g"
|
|
2042
|
+
}
|
|
2043
|
+
};
|
|
2044
|
+
return configs[pm];
|
|
2045
|
+
}
|
|
2046
|
+
/**
|
|
2047
|
+
* Generate a multi-stage Dockerfile for building from source
|
|
2048
|
+
* Optimized for build speed with:
|
|
2049
|
+
* - BuildKit cache mounts for package manager store
|
|
2050
|
+
* - pnpm fetch for better layer caching (when using pnpm)
|
|
2051
|
+
* - Optional turbo prune for monorepos
|
|
2052
|
+
*/
|
|
2053
|
+
function generateMultiStageDockerfile(options) {
|
|
2054
|
+
const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
|
|
2055
|
+
if (turbo) return generateTurboDockerfile({
|
|
2056
|
+
...options,
|
|
2057
|
+
turboPackage: turboPackage ?? "api"
|
|
2058
|
+
});
|
|
2059
|
+
const pm = getPmConfig(packageManager);
|
|
2060
|
+
const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
|
|
2061
|
+
const hasFetch = packageManager === "pnpm";
|
|
2062
|
+
const depsStage = hasFetch ? `# Copy lockfile first for better caching
|
|
2063
|
+
COPY ${pm.lockfile} ./
|
|
2064
|
+
|
|
2065
|
+
# Fetch dependencies (downloads to virtual store, cached separately)
|
|
2066
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2067
|
+
${pm.fetch}
|
|
2068
|
+
|
|
2069
|
+
# Copy package.json after fetch
|
|
2070
|
+
COPY package.json ./
|
|
2071
|
+
|
|
2072
|
+
# Install from cache (fast - no network needed)
|
|
2073
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2074
|
+
${pm.installCmd}` : `# Copy package files
|
|
2075
|
+
COPY package.json ${pm.lockfile} ./
|
|
2076
|
+
|
|
2077
|
+
# Install dependencies with cache
|
|
2078
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2079
|
+
${pm.installCmd}`;
|
|
2080
|
+
return `# syntax=docker/dockerfile:1
|
|
2081
|
+
# Stage 1: Dependencies
|
|
2082
|
+
FROM ${baseImage} AS deps
|
|
2083
|
+
|
|
2084
|
+
WORKDIR /app
|
|
2085
|
+
${installPm}
|
|
2086
|
+
${depsStage}
|
|
2087
|
+
|
|
2088
|
+
# Stage 2: Build
|
|
2089
|
+
FROM deps AS builder
|
|
2090
|
+
|
|
2091
|
+
WORKDIR /app
|
|
2092
|
+
|
|
2093
|
+
# Copy source (deps already installed)
|
|
2094
|
+
COPY . .
|
|
2095
|
+
|
|
2096
|
+
# Build production server
|
|
2097
|
+
RUN ${pm.run} gkm build --provider server --production
|
|
2098
|
+
|
|
2099
|
+
# Stage 3: Production
|
|
2100
|
+
FROM ${baseImage} AS runner
|
|
2101
|
+
|
|
2102
|
+
WORKDIR /app
|
|
2103
|
+
|
|
2104
|
+
# Install tini for proper signal handling as PID 1
|
|
2105
|
+
RUN apk add --no-cache tini
|
|
2106
|
+
|
|
2107
|
+
# Create non-root user
|
|
2108
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2109
|
+
adduser --system --uid 1001 hono
|
|
2110
|
+
|
|
2111
|
+
# Copy bundled server
|
|
2112
|
+
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
2113
|
+
|
|
2114
|
+
# Environment
|
|
2115
|
+
ENV NODE_ENV=production
|
|
2116
|
+
ENV PORT=${port}
|
|
2117
|
+
|
|
2118
|
+
# Health check
|
|
2119
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2120
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2121
|
+
|
|
2122
|
+
# Switch to non-root user
|
|
2123
|
+
USER hono
|
|
2124
|
+
|
|
2125
|
+
EXPOSE ${port}
|
|
2126
|
+
|
|
2127
|
+
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
2128
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2129
|
+
CMD ["node", "server.mjs"]
|
|
2130
|
+
`;
|
|
2131
|
+
}
|
|
2132
|
+
/**
|
|
2133
|
+
* Generate a Dockerfile optimized for Turbo monorepos
|
|
2134
|
+
* Uses turbo prune to create minimal Docker context
|
|
2135
|
+
*/
|
|
2136
|
+
function generateTurboDockerfile(options) {
|
|
2137
|
+
const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
|
|
2138
|
+
const pm = getPmConfig(packageManager);
|
|
2139
|
+
const installPm = pm.install ? `RUN ${pm.install}` : "";
|
|
2140
|
+
const hasFetch = packageManager === "pnpm";
|
|
2141
|
+
const depsInstall = hasFetch ? `# Fetch and install from cache
|
|
2142
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2143
|
+
${pm.fetch}
|
|
2144
|
+
|
|
2145
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2146
|
+
${pm.installCmd}` : `# Install dependencies with cache
|
|
2147
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2148
|
+
${pm.installCmd}`;
|
|
2149
|
+
return `# syntax=docker/dockerfile:1
|
|
2150
|
+
# Stage 1: Prune monorepo
|
|
2151
|
+
FROM ${baseImage} AS pruner
|
|
2152
|
+
|
|
2153
|
+
WORKDIR /app
|
|
2154
|
+
|
|
2155
|
+
${installPm}
|
|
2156
|
+
RUN ${pm.addGlobal} turbo
|
|
2157
|
+
|
|
2158
|
+
COPY . .
|
|
2159
|
+
|
|
2160
|
+
# Prune to only include necessary packages
|
|
2161
|
+
RUN turbo prune ${turboPackage} --docker
|
|
2162
|
+
|
|
2163
|
+
# Stage 2: Install dependencies
|
|
2164
|
+
FROM ${baseImage} AS deps
|
|
2165
|
+
|
|
2166
|
+
WORKDIR /app
|
|
2167
|
+
|
|
2168
|
+
${installPm}
|
|
2169
|
+
|
|
2170
|
+
# Copy pruned lockfile and package.jsons
|
|
2171
|
+
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
2172
|
+
COPY --from=pruner /app/out/json/ ./
|
|
2173
|
+
|
|
2174
|
+
${depsInstall}
|
|
2175
|
+
|
|
2176
|
+
# Stage 3: Build
|
|
2177
|
+
FROM deps AS builder
|
|
2178
|
+
|
|
2179
|
+
WORKDIR /app
|
|
2180
|
+
|
|
2181
|
+
# Copy pruned source
|
|
2182
|
+
COPY --from=pruner /app/out/full/ ./
|
|
2183
|
+
|
|
2184
|
+
# Build production server
|
|
2185
|
+
RUN ${pm.run} gkm build --provider server --production
|
|
2186
|
+
|
|
2187
|
+
# Stage 4: Production
|
|
2188
|
+
FROM ${baseImage} AS runner
|
|
2189
|
+
|
|
2190
|
+
WORKDIR /app
|
|
2191
|
+
|
|
2192
|
+
RUN apk add --no-cache tini
|
|
2193
|
+
|
|
2194
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2195
|
+
adduser --system --uid 1001 hono
|
|
2196
|
+
|
|
2197
|
+
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
2198
|
+
|
|
2199
|
+
ENV NODE_ENV=production
|
|
2200
|
+
ENV PORT=${port}
|
|
2201
|
+
|
|
2202
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2203
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2204
|
+
|
|
2205
|
+
USER hono
|
|
2206
|
+
|
|
2207
|
+
EXPOSE ${port}
|
|
2208
|
+
|
|
2209
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2210
|
+
CMD ["node", "server.mjs"]
|
|
2211
|
+
`;
|
|
2212
|
+
}
|
|
2213
|
+
/**
|
|
2214
|
+
* Generate a slim Dockerfile for pre-built bundles
|
|
2215
|
+
*/
|
|
2216
|
+
function generateSlimDockerfile(options) {
|
|
2217
|
+
const { baseImage, port, healthCheckPath } = options;
|
|
2218
|
+
return `# Slim Dockerfile for pre-built production bundle
|
|
2219
|
+
FROM ${baseImage}
|
|
2220
|
+
|
|
2221
|
+
WORKDIR /app
|
|
2222
|
+
|
|
2223
|
+
# Install tini for proper signal handling as PID 1
|
|
2224
|
+
# Handles SIGTERM propagation and zombie process reaping
|
|
2225
|
+
RUN apk add --no-cache tini
|
|
2226
|
+
|
|
2227
|
+
# Create non-root user
|
|
2228
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2229
|
+
adduser --system --uid 1001 hono
|
|
2230
|
+
|
|
2231
|
+
# Copy pre-built bundle
|
|
2232
|
+
COPY .gkm/server/dist/server.mjs ./
|
|
2233
|
+
|
|
2234
|
+
# Environment
|
|
2235
|
+
ENV NODE_ENV=production
|
|
2236
|
+
ENV PORT=${port}
|
|
2237
|
+
|
|
2238
|
+
# Health check
|
|
2239
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2240
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2241
|
+
|
|
2242
|
+
# Switch to non-root user
|
|
2243
|
+
USER hono
|
|
2244
|
+
|
|
2245
|
+
EXPOSE ${port}
|
|
2246
|
+
|
|
2247
|
+
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
2248
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2249
|
+
CMD ["node", "server.mjs"]
|
|
2250
|
+
`;
|
|
2251
|
+
}
|
|
2252
|
+
/**
|
|
2253
|
+
* Generate .dockerignore file
|
|
2254
|
+
*/
|
|
2255
|
+
function generateDockerignore() {
|
|
2256
|
+
return `# Dependencies
|
|
2257
|
+
node_modules
|
|
2258
|
+
.pnpm-store
|
|
2259
|
+
|
|
2260
|
+
# Build output (except what we need)
|
|
2261
|
+
.gkm/aws*
|
|
2262
|
+
.gkm/server/*.ts
|
|
2263
|
+
!.gkm/server/dist
|
|
2264
|
+
|
|
2265
|
+
# IDE and editor
|
|
2266
|
+
.idea
|
|
2267
|
+
.vscode
|
|
2268
|
+
*.swp
|
|
2269
|
+
*.swo
|
|
2270
|
+
|
|
2271
|
+
# Git
|
|
2272
|
+
.git
|
|
2273
|
+
.gitignore
|
|
2274
|
+
|
|
2275
|
+
# Logs
|
|
2276
|
+
*.log
|
|
2277
|
+
npm-debug.log*
|
|
2278
|
+
pnpm-debug.log*
|
|
2279
|
+
|
|
2280
|
+
# Test files
|
|
2281
|
+
**/*.test.ts
|
|
2282
|
+
**/*.spec.ts
|
|
2283
|
+
**/__tests__
|
|
2284
|
+
coverage
|
|
2285
|
+
|
|
2286
|
+
# Documentation
|
|
2287
|
+
docs
|
|
2288
|
+
*.md
|
|
2289
|
+
!README.md
|
|
2290
|
+
|
|
2291
|
+
# Environment files (handle secrets separately)
|
|
2292
|
+
.env
|
|
2293
|
+
.env.*
|
|
2294
|
+
!.env.example
|
|
2295
|
+
|
|
2296
|
+
# Docker files (don't copy recursively)
|
|
2297
|
+
Dockerfile*
|
|
2298
|
+
docker-compose*
|
|
2299
|
+
.dockerignore
|
|
2300
|
+
`;
|
|
2301
|
+
}
|
|
2302
|
+
/**
|
|
2303
|
+
* Generate docker-entrypoint.sh for custom startup logic
|
|
2304
|
+
*/
|
|
2305
|
+
function generateDockerEntrypoint() {
|
|
2306
|
+
return `#!/bin/sh
|
|
2307
|
+
set -e
|
|
2308
|
+
|
|
2309
|
+
# Run any custom startup scripts here
|
|
2310
|
+
# Example: wait for database
|
|
2311
|
+
# until nc -z $DB_HOST $DB_PORT; do
|
|
2312
|
+
# echo "Waiting for database..."
|
|
2313
|
+
# sleep 1
|
|
2314
|
+
# done
|
|
2315
|
+
|
|
2316
|
+
# Execute the main command
|
|
2317
|
+
exec "$@"
|
|
2318
|
+
`;
|
|
2319
|
+
}
|
|
2320
|
+
/**
|
|
2321
|
+
* Resolve Docker configuration from GkmConfig with defaults
|
|
2322
|
+
*/
|
|
2323
|
+
function resolveDockerConfig(config) {
|
|
2324
|
+
const docker = config.docker ?? {};
|
|
2325
|
+
let defaultImageName = "api";
|
|
2326
|
+
try {
|
|
2327
|
+
const pkg = require(`${process.cwd()}/package.json`);
|
|
2328
|
+
if (pkg.name) defaultImageName = pkg.name.replace(/^@[^/]+\//, "");
|
|
2329
|
+
} catch {}
|
|
2330
|
+
return {
|
|
2331
|
+
registry: docker.registry ?? "",
|
|
2332
|
+
imageName: docker.imageName ?? defaultImageName,
|
|
2333
|
+
baseImage: docker.baseImage ?? "node:22-alpine",
|
|
2334
|
+
port: docker.port ?? 3e3,
|
|
2335
|
+
compose: docker.compose
|
|
2336
|
+
};
|
|
2337
|
+
}
|
|
2338
|
+
|
|
2339
|
+
//#endregion
|
|
2340
|
+
//#region src/docker/index.ts
|
|
2341
|
+
const logger$1 = console;
|
|
2342
|
+
/**
|
|
2343
|
+
* Docker command implementation
|
|
2344
|
+
* Generates Dockerfile, docker-compose.yml, and related files
|
|
2345
|
+
*
|
|
2346
|
+
* Default: Multi-stage Dockerfile that builds from source inside Docker
|
|
2347
|
+
* --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
|
|
2348
|
+
*/
|
|
2349
|
+
async function dockerCommand(options) {
|
|
2350
|
+
const config = await require_config.loadConfig();
|
|
2351
|
+
const dockerConfig = resolveDockerConfig(config);
|
|
2352
|
+
const serverConfig = typeof config.providers?.server === "object" ? config.providers.server : void 0;
|
|
2353
|
+
const healthCheckPath = serverConfig?.production?.healthCheck ?? "/health";
|
|
2354
|
+
const useSlim = options.slim === true;
|
|
2355
|
+
if (useSlim) {
|
|
2356
|
+
const distDir = (0, node_path.join)(process.cwd(), ".gkm", "server", "dist");
|
|
2357
|
+
const hasBuild = (0, node_fs.existsSync)((0, node_path.join)(distDir, "server.mjs"));
|
|
2358
|
+
if (!hasBuild) throw new Error("Slim Dockerfile requires a pre-built bundle. Run `gkm build --provider server --production` first, or omit --slim to use multi-stage build.");
|
|
2359
|
+
}
|
|
2360
|
+
const dockerDir = (0, node_path.join)(process.cwd(), ".gkm", "docker");
|
|
2361
|
+
await (0, node_fs_promises.mkdir)(dockerDir, { recursive: true });
|
|
2362
|
+
const packageManager = detectPackageManager$1();
|
|
2363
|
+
const templateOptions = {
|
|
2364
|
+
imageName: dockerConfig.imageName,
|
|
2365
|
+
baseImage: dockerConfig.baseImage,
|
|
2366
|
+
port: dockerConfig.port,
|
|
2367
|
+
healthCheckPath,
|
|
2368
|
+
prebuilt: useSlim,
|
|
2369
|
+
turbo: options.turbo,
|
|
2370
|
+
turboPackage: options.turboPackage ?? dockerConfig.imageName,
|
|
2371
|
+
packageManager
|
|
2372
|
+
};
|
|
2373
|
+
const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
|
|
2374
|
+
const dockerMode = useSlim ? "slim" : options.turbo ? "turbo" : "multi-stage";
|
|
2375
|
+
const dockerfilePath = (0, node_path.join)(dockerDir, "Dockerfile");
|
|
2376
|
+
await (0, node_fs_promises.writeFile)(dockerfilePath, dockerfile);
|
|
2377
|
+
logger$1.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
|
|
2378
|
+
const composeOptions = {
|
|
2379
|
+
imageName: dockerConfig.imageName,
|
|
2380
|
+
registry: options.registry ?? dockerConfig.registry,
|
|
2381
|
+
port: dockerConfig.port,
|
|
2382
|
+
healthCheckPath,
|
|
2383
|
+
services: dockerConfig.compose?.services ?? {}
|
|
2384
|
+
};
|
|
2385
|
+
const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
|
|
2386
|
+
const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
|
|
2387
|
+
const composePath = (0, node_path.join)(dockerDir, "docker-compose.yml");
|
|
2388
|
+
await (0, node_fs_promises.writeFile)(composePath, dockerCompose);
|
|
2389
|
+
logger$1.log("Generated: .gkm/docker/docker-compose.yml");
|
|
2390
|
+
const dockerignore = generateDockerignore();
|
|
2391
|
+
const dockerignorePath = (0, node_path.join)(process.cwd(), ".dockerignore");
|
|
2392
|
+
await (0, node_fs_promises.writeFile)(dockerignorePath, dockerignore);
|
|
2393
|
+
logger$1.log("Generated: .dockerignore (project root)");
|
|
2394
|
+
const entrypoint = generateDockerEntrypoint();
|
|
2395
|
+
const entrypointPath = (0, node_path.join)(dockerDir, "docker-entrypoint.sh");
|
|
2396
|
+
await (0, node_fs_promises.writeFile)(entrypointPath, entrypoint);
|
|
2397
|
+
logger$1.log("Generated: .gkm/docker/docker-entrypoint.sh");
|
|
2398
|
+
const result = {
|
|
2399
|
+
dockerfile: dockerfilePath,
|
|
2400
|
+
dockerCompose: composePath,
|
|
2401
|
+
dockerignore: dockerignorePath,
|
|
2402
|
+
entrypoint: entrypointPath
|
|
2403
|
+
};
|
|
2404
|
+
if (options.build) await buildDockerImage(dockerConfig.imageName, options);
|
|
2405
|
+
if (options.push) await pushDockerImage(dockerConfig.imageName, options);
|
|
2406
|
+
return result;
|
|
2407
|
+
}
|
|
2408
|
+
/**
|
|
2409
|
+
* Build Docker image
|
|
2410
|
+
* Uses BuildKit for cache mount support
|
|
2411
|
+
*/
|
|
2412
|
+
async function buildDockerImage(imageName, options) {
|
|
2413
|
+
const tag = options.tag ?? "latest";
|
|
2414
|
+
const registry = options.registry;
|
|
2415
|
+
const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
|
|
2416
|
+
logger$1.log(`\n🐳 Building Docker image: ${fullImageName}`);
|
|
2417
|
+
try {
|
|
2418
|
+
(0, node_child_process.execSync)(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
|
|
2419
|
+
cwd: process.cwd(),
|
|
2420
|
+
stdio: "inherit",
|
|
2421
|
+
env: {
|
|
2422
|
+
...process.env,
|
|
2423
|
+
DOCKER_BUILDKIT: "1"
|
|
2424
|
+
}
|
|
2425
|
+
});
|
|
2426
|
+
logger$1.log(`✅ Docker image built: ${fullImageName}`);
|
|
2427
|
+
} catch (error) {
|
|
2428
|
+
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2429
|
+
}
|
|
959
2430
|
}
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
const
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
handler: "",
|
|
979
|
-
authorizer: construct.authorizer?.name ?? "none"
|
|
980
|
-
})));
|
|
981
|
-
const appInfo = {
|
|
982
|
-
handler: (0, node_path.relative)(process.cwd(), (0, node_path.join)(outputDir, "app.ts")),
|
|
983
|
-
endpoints: (0, node_path.relative)(process.cwd(), (0, node_path.join)(outputDir, "endpoints.ts"))
|
|
984
|
-
};
|
|
985
|
-
await generateServerManifest(rootOutputDir, appInfo, routeMetadata, subscriberInfos);
|
|
986
|
-
} else await generateAwsManifest(rootOutputDir, routes, functionInfos, cronInfos, subscriberInfos);
|
|
2431
|
+
/**
|
|
2432
|
+
* Push Docker image to registry
|
|
2433
|
+
*/
|
|
2434
|
+
async function pushDockerImage(imageName, options) {
|
|
2435
|
+
const tag = options.tag ?? "latest";
|
|
2436
|
+
const registry = options.registry;
|
|
2437
|
+
if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
|
|
2438
|
+
const fullImageName = `${registry}/${imageName}:${tag}`;
|
|
2439
|
+
logger$1.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
|
|
2440
|
+
try {
|
|
2441
|
+
(0, node_child_process.execSync)(`docker push ${fullImageName}`, {
|
|
2442
|
+
cwd: process.cwd(),
|
|
2443
|
+
stdio: "inherit"
|
|
2444
|
+
});
|
|
2445
|
+
logger$1.log(`✅ Docker image pushed: ${fullImageName}`);
|
|
2446
|
+
} catch (error) {
|
|
2447
|
+
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2448
|
+
}
|
|
987
2449
|
}
|
|
988
2450
|
|
|
989
2451
|
//#endregion
|
|
@@ -1061,7 +2523,7 @@ export default defineConfig({
|
|
|
1061
2523
|
content: gkmConfig
|
|
1062
2524
|
}, {
|
|
1063
2525
|
path: "tsconfig.json",
|
|
1064
|
-
content: JSON.stringify(tsConfig, null, 2)
|
|
2526
|
+
content: `${JSON.stringify(tsConfig, null, 2)}\n`
|
|
1065
2527
|
}];
|
|
1066
2528
|
const biomeConfig = {
|
|
1067
2529
|
$schema: "https://biomejs.dev/schemas/1.9.4/schema.json",
|
|
@@ -1135,15 +2597,15 @@ export default defineConfig({
|
|
|
1135
2597
|
},
|
|
1136
2598
|
{
|
|
1137
2599
|
path: "tsconfig.json",
|
|
1138
|
-
content: JSON.stringify(tsConfig, null, 2)
|
|
2600
|
+
content: `${JSON.stringify(tsConfig, null, 2)}\n`
|
|
1139
2601
|
},
|
|
1140
2602
|
{
|
|
1141
2603
|
path: "biome.json",
|
|
1142
|
-
content: JSON.stringify(biomeConfig, null, 2)
|
|
2604
|
+
content: `${JSON.stringify(biomeConfig, null, 2)}\n`
|
|
1143
2605
|
},
|
|
1144
2606
|
{
|
|
1145
2607
|
path: "turbo.json",
|
|
1146
|
-
content: JSON.stringify(turboConfig, null, 2)
|
|
2608
|
+
content: `${JSON.stringify(turboConfig, null, 2)}\n`
|
|
1147
2609
|
}
|
|
1148
2610
|
];
|
|
1149
2611
|
}
|
|
@@ -1495,11 +2957,11 @@ export type UpdateUser = z.infer<typeof updateUserSchema>;
|
|
|
1495
2957
|
return [
|
|
1496
2958
|
{
|
|
1497
2959
|
path: "packages/models/package.json",
|
|
1498
|
-
content: JSON.stringify(packageJson, null, 2)
|
|
2960
|
+
content: `${JSON.stringify(packageJson, null, 2)}\n`
|
|
1499
2961
|
},
|
|
1500
2962
|
{
|
|
1501
2963
|
path: "packages/models/tsconfig.json",
|
|
1502
|
-
content: JSON.stringify(tsConfig, null, 2)
|
|
2964
|
+
content: `${JSON.stringify(tsConfig, null, 2)}\n`
|
|
1503
2965
|
},
|
|
1504
2966
|
{
|
|
1505
2967
|
path: "packages/models/src/index.ts",
|
|
@@ -1665,7 +3127,7 @@ coverage/
|
|
|
1665
3127
|
return [
|
|
1666
3128
|
{
|
|
1667
3129
|
path: "package.json",
|
|
1668
|
-
content: JSON.stringify(rootPackageJson, null, 2)
|
|
3130
|
+
content: `${JSON.stringify(rootPackageJson, null, 2)}\n`
|
|
1669
3131
|
},
|
|
1670
3132
|
{
|
|
1671
3133
|
path: "pnpm-workspace.yaml",
|
|
@@ -1673,15 +3135,15 @@ coverage/
|
|
|
1673
3135
|
},
|
|
1674
3136
|
{
|
|
1675
3137
|
path: "tsconfig.json",
|
|
1676
|
-
content: JSON.stringify(tsConfig, null, 2)
|
|
3138
|
+
content: `${JSON.stringify(tsConfig, null, 2)}\n`
|
|
1677
3139
|
},
|
|
1678
3140
|
{
|
|
1679
3141
|
path: "biome.json",
|
|
1680
|
-
content: JSON.stringify(biomeConfig, null, 2)
|
|
3142
|
+
content: `${JSON.stringify(biomeConfig, null, 2)}\n`
|
|
1681
3143
|
},
|
|
1682
3144
|
{
|
|
1683
3145
|
path: "turbo.json",
|
|
1684
|
-
content: JSON.stringify(turboConfig, null, 2)
|
|
3146
|
+
content: `${JSON.stringify(turboConfig, null, 2)}\n`
|
|
1685
3147
|
},
|
|
1686
3148
|
{
|
|
1687
3149
|
path: ".gitignore",
|
|
@@ -2402,19 +3864,19 @@ function generatePackageJson(options, template) {
|
|
|
2402
3864
|
if (studio) dependencies$1["@geekmidas/studio"] = "workspace:*";
|
|
2403
3865
|
if (database) {
|
|
2404
3866
|
dependencies$1["@geekmidas/db"] = "workspace:*";
|
|
2405
|
-
dependencies$1
|
|
2406
|
-
dependencies$1
|
|
3867
|
+
dependencies$1.kysely = "~0.28.2";
|
|
3868
|
+
dependencies$1.pg = "~8.16.0";
|
|
2407
3869
|
devDependencies$1["@types/pg"] = "~8.15.0";
|
|
2408
3870
|
}
|
|
2409
|
-
dependencies$1
|
|
3871
|
+
dependencies$1.zod = "~4.1.0";
|
|
2410
3872
|
if (monorepo) {
|
|
2411
3873
|
delete devDependencies$1["@biomejs/biome"];
|
|
2412
|
-
delete devDependencies$1
|
|
2413
|
-
delete scripts$1
|
|
2414
|
-
delete scripts$1
|
|
3874
|
+
delete devDependencies$1.turbo;
|
|
3875
|
+
delete scripts$1.lint;
|
|
3876
|
+
delete scripts$1.fmt;
|
|
2415
3877
|
delete scripts$1["fmt:check"];
|
|
2416
3878
|
dependencies$1[`@${name$1}/models`] = "workspace:*";
|
|
2417
|
-
delete dependencies$1
|
|
3879
|
+
delete dependencies$1.zod;
|
|
2418
3880
|
}
|
|
2419
3881
|
const sortObject = (obj) => Object.fromEntries(Object.entries(obj).sort(([a], [b]) => a.localeCompare(b)));
|
|
2420
3882
|
let packageName = name$1;
|
|
@@ -2438,7 +3900,7 @@ function generatePackageJson(options, template) {
|
|
|
2438
3900
|
};
|
|
2439
3901
|
return [{
|
|
2440
3902
|
path: "package.json",
|
|
2441
|
-
content: JSON.stringify(packageJson, null, 2)
|
|
3903
|
+
content: `${JSON.stringify(packageJson, null, 2)}\n`
|
|
2442
3904
|
}];
|
|
2443
3905
|
}
|
|
2444
3906
|
|
|
@@ -2498,7 +3960,6 @@ function getInstallCommand(pkgManager) {
|
|
|
2498
3960
|
case "pnpm": return "pnpm install";
|
|
2499
3961
|
case "yarn": return "yarn";
|
|
2500
3962
|
case "bun": return "bun install";
|
|
2501
|
-
case "npm":
|
|
2502
3963
|
default: return "npm install";
|
|
2503
3964
|
}
|
|
2504
3965
|
}
|
|
@@ -2510,7 +3971,6 @@ function getRunCommand(pkgManager, script) {
|
|
|
2510
3971
|
case "pnpm": return `pnpm ${script}`;
|
|
2511
3972
|
case "yarn": return `yarn ${script}`;
|
|
2512
3973
|
case "bun": return `bun run ${script}`;
|
|
2513
|
-
case "npm":
|
|
2514
3974
|
default: return `npm run ${script}`;
|
|
2515
3975
|
}
|
|
2516
3976
|
}
|
|
@@ -2594,21 +4054,12 @@ async function initCommand(projectName, options = {}) {
|
|
|
2594
4054
|
}
|
|
2595
4055
|
], { onCancel });
|
|
2596
4056
|
const name$1 = projectName || answers.name;
|
|
2597
|
-
if (!name$1)
|
|
2598
|
-
console.error(" Error: Project name is required\n");
|
|
2599
|
-
process.exit(1);
|
|
2600
|
-
}
|
|
4057
|
+
if (!name$1) process.exit(1);
|
|
2601
4058
|
if (projectName) {
|
|
2602
4059
|
const nameValid = validateProjectName(projectName);
|
|
2603
|
-
if (nameValid !== true)
|
|
2604
|
-
console.error(` Error: ${nameValid}\n`);
|
|
2605
|
-
process.exit(1);
|
|
2606
|
-
}
|
|
4060
|
+
if (nameValid !== true) process.exit(1);
|
|
2607
4061
|
const dirValid = checkDirectoryExists(projectName, cwd);
|
|
2608
|
-
if (dirValid !== true)
|
|
2609
|
-
console.error(` Error: ${dirValid}\n`);
|
|
2610
|
-
process.exit(1);
|
|
2611
|
-
}
|
|
4062
|
+
if (dirValid !== true) process.exit(1);
|
|
2612
4063
|
}
|
|
2613
4064
|
const monorepo = options.monorepo ?? (options.yes ? false : answers.monorepo ?? false);
|
|
2614
4065
|
const database = options.yes ? true : answers.database ?? true;
|
|
@@ -2638,14 +4089,14 @@ async function initCommand(projectName, options = {}) {
|
|
|
2638
4089
|
...generateDockerFiles(templateOptions, template)
|
|
2639
4090
|
];
|
|
2640
4091
|
const rootFiles = [...generateMonorepoFiles(templateOptions, template), ...generateModelsPackage(templateOptions)];
|
|
2641
|
-
for (const { path
|
|
2642
|
-
const fullPath = (0, node_path.join)(targetDir, path
|
|
4092
|
+
for (const { path, content } of rootFiles) {
|
|
4093
|
+
const fullPath = (0, node_path.join)(targetDir, path);
|
|
2643
4094
|
await (0, node_fs_promises.mkdir)((0, node_path.dirname)(fullPath), { recursive: true });
|
|
2644
4095
|
await (0, node_fs_promises.writeFile)(fullPath, content);
|
|
2645
4096
|
}
|
|
2646
|
-
for (const { path
|
|
2647
|
-
const fullPath = (0, node_path.join)(appDir, path
|
|
2648
|
-
const
|
|
4097
|
+
for (const { path, content } of appFiles) {
|
|
4098
|
+
const fullPath = (0, node_path.join)(appDir, path);
|
|
4099
|
+
const _displayPath = isMonorepo ? `${apiPath}/${path}` : path;
|
|
2649
4100
|
await (0, node_fs_promises.mkdir)((0, node_path.dirname)(fullPath), { recursive: true });
|
|
2650
4101
|
await (0, node_fs_promises.writeFile)(fullPath, content);
|
|
2651
4102
|
}
|
|
@@ -2655,9 +4106,7 @@ async function initCommand(projectName, options = {}) {
|
|
|
2655
4106
|
cwd: targetDir,
|
|
2656
4107
|
stdio: "inherit"
|
|
2657
4108
|
});
|
|
2658
|
-
} catch {
|
|
2659
|
-
console.error("\n Warning: Failed to install dependencies.");
|
|
2660
|
-
}
|
|
4109
|
+
} catch {}
|
|
2661
4110
|
try {
|
|
2662
4111
|
(0, node_child_process.execSync)("npx @biomejs/biome format --write --unsafe .", {
|
|
2663
4112
|
cwd: targetDir,
|
|
@@ -2665,7 +4114,310 @@ async function initCommand(projectName, options = {}) {
|
|
|
2665
4114
|
});
|
|
2666
4115
|
} catch {}
|
|
2667
4116
|
}
|
|
2668
|
-
const
|
|
4117
|
+
const _devCommand = getRunCommand(pkgManager, "dev");
|
|
4118
|
+
}
|
|
4119
|
+
|
|
4120
|
+
//#endregion
|
|
4121
|
+
//#region src/secrets/generator.ts
|
|
4122
|
+
/**
|
|
4123
|
+
* Generate a secure random password using URL-safe base64 characters.
|
|
4124
|
+
* @param length Password length (default: 32)
|
|
4125
|
+
*/
|
|
4126
|
+
function generateSecurePassword(length = 32) {
|
|
4127
|
+
return (0, node_crypto.randomBytes)(Math.ceil(length * 3 / 4)).toString("base64url").slice(0, length);
|
|
4128
|
+
}
|
|
4129
|
+
/** Default service configurations */
|
|
4130
|
+
const SERVICE_DEFAULTS = {
|
|
4131
|
+
postgres: {
|
|
4132
|
+
host: "postgres",
|
|
4133
|
+
port: 5432,
|
|
4134
|
+
username: "app",
|
|
4135
|
+
database: "app"
|
|
4136
|
+
},
|
|
4137
|
+
redis: {
|
|
4138
|
+
host: "redis",
|
|
4139
|
+
port: 6379,
|
|
4140
|
+
username: "default"
|
|
4141
|
+
},
|
|
4142
|
+
rabbitmq: {
|
|
4143
|
+
host: "rabbitmq",
|
|
4144
|
+
port: 5672,
|
|
4145
|
+
username: "app",
|
|
4146
|
+
vhost: "/"
|
|
4147
|
+
}
|
|
4148
|
+
};
|
|
4149
|
+
/**
|
|
4150
|
+
* Generate credentials for a specific service.
|
|
4151
|
+
*/
|
|
4152
|
+
function generateServiceCredentials(service) {
|
|
4153
|
+
const defaults = SERVICE_DEFAULTS[service];
|
|
4154
|
+
return {
|
|
4155
|
+
...defaults,
|
|
4156
|
+
password: generateSecurePassword()
|
|
4157
|
+
};
|
|
4158
|
+
}
|
|
4159
|
+
/**
|
|
4160
|
+
* Generate credentials for multiple services.
|
|
4161
|
+
*/
|
|
4162
|
+
function generateServicesCredentials(services) {
|
|
4163
|
+
const result = {};
|
|
4164
|
+
for (const service of services) result[service] = generateServiceCredentials(service);
|
|
4165
|
+
return result;
|
|
4166
|
+
}
|
|
4167
|
+
/**
|
|
4168
|
+
* Generate connection URL for PostgreSQL.
|
|
4169
|
+
*/
|
|
4170
|
+
function generatePostgresUrl(creds) {
|
|
4171
|
+
const { username, password, host, port, database } = creds;
|
|
4172
|
+
return `postgresql://${username}:${encodeURIComponent(password)}@${host}:${port}/${database}`;
|
|
4173
|
+
}
|
|
4174
|
+
/**
|
|
4175
|
+
* Generate connection URL for Redis.
|
|
4176
|
+
*/
|
|
4177
|
+
function generateRedisUrl(creds) {
|
|
4178
|
+
const { password, host, port } = creds;
|
|
4179
|
+
return `redis://:${encodeURIComponent(password)}@${host}:${port}`;
|
|
4180
|
+
}
|
|
4181
|
+
/**
|
|
4182
|
+
* Generate connection URL for RabbitMQ.
|
|
4183
|
+
*/
|
|
4184
|
+
function generateRabbitmqUrl(creds) {
|
|
4185
|
+
const { username, password, host, port, vhost } = creds;
|
|
4186
|
+
const encodedVhost = encodeURIComponent(vhost ?? "/");
|
|
4187
|
+
return `amqp://${username}:${encodeURIComponent(password)}@${host}:${port}/${encodedVhost}`;
|
|
4188
|
+
}
|
|
4189
|
+
/**
|
|
4190
|
+
* Generate connection URLs from service credentials.
|
|
4191
|
+
*/
|
|
4192
|
+
function generateConnectionUrls(services) {
|
|
4193
|
+
const urls = {};
|
|
4194
|
+
if (services.postgres) urls.DATABASE_URL = generatePostgresUrl(services.postgres);
|
|
4195
|
+
if (services.redis) urls.REDIS_URL = generateRedisUrl(services.redis);
|
|
4196
|
+
if (services.rabbitmq) urls.RABBITMQ_URL = generateRabbitmqUrl(services.rabbitmq);
|
|
4197
|
+
return urls;
|
|
4198
|
+
}
|
|
4199
|
+
/**
|
|
4200
|
+
* Create a new StageSecrets object with generated credentials.
|
|
4201
|
+
*/
|
|
4202
|
+
function createStageSecrets(stage, services) {
|
|
4203
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
4204
|
+
const serviceCredentials = generateServicesCredentials(services);
|
|
4205
|
+
const urls = generateConnectionUrls(serviceCredentials);
|
|
4206
|
+
return {
|
|
4207
|
+
stage,
|
|
4208
|
+
createdAt: now,
|
|
4209
|
+
updatedAt: now,
|
|
4210
|
+
services: serviceCredentials,
|
|
4211
|
+
urls,
|
|
4212
|
+
custom: {}
|
|
4213
|
+
};
|
|
4214
|
+
}
|
|
4215
|
+
/**
|
|
4216
|
+
* Rotate password for a specific service.
|
|
4217
|
+
*/
|
|
4218
|
+
function rotateServicePassword(secrets, service) {
|
|
4219
|
+
const currentCreds = secrets.services[service];
|
|
4220
|
+
if (!currentCreds) throw new Error(`Service "${service}" not configured in secrets`);
|
|
4221
|
+
const newCreds = {
|
|
4222
|
+
...currentCreds,
|
|
4223
|
+
password: generateSecurePassword()
|
|
4224
|
+
};
|
|
4225
|
+
const newServices = {
|
|
4226
|
+
...secrets.services,
|
|
4227
|
+
[service]: newCreds
|
|
4228
|
+
};
|
|
4229
|
+
return {
|
|
4230
|
+
...secrets,
|
|
4231
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
4232
|
+
services: newServices,
|
|
4233
|
+
urls: generateConnectionUrls(newServices)
|
|
4234
|
+
};
|
|
4235
|
+
}
|
|
4236
|
+
|
|
4237
|
+
//#endregion
|
|
4238
|
+
//#region src/secrets/index.ts
|
|
4239
|
+
const logger = console;
|
|
4240
|
+
/**
|
|
4241
|
+
* Extract service names from compose config.
|
|
4242
|
+
*/
|
|
4243
|
+
function getServicesFromConfig(services) {
|
|
4244
|
+
if (!services) return [];
|
|
4245
|
+
if (Array.isArray(services)) return services;
|
|
4246
|
+
return Object.entries(services).filter(([, config]) => config).map(([name$1]) => name$1);
|
|
4247
|
+
}
|
|
4248
|
+
/**
|
|
4249
|
+
* Initialize secrets for a stage.
|
|
4250
|
+
* Generates secure random passwords for configured services.
|
|
4251
|
+
*/
|
|
4252
|
+
async function secretsInitCommand(options) {
|
|
4253
|
+
const { stage, force } = options;
|
|
4254
|
+
if (!force && require_storage.secretsExist(stage)) {
|
|
4255
|
+
logger.error(`Secrets already exist for stage "${stage}". Use --force to overwrite.`);
|
|
4256
|
+
process.exit(1);
|
|
4257
|
+
}
|
|
4258
|
+
const config = await require_config.loadConfig();
|
|
4259
|
+
const services = getServicesFromConfig(config.docker?.compose?.services);
|
|
4260
|
+
if (services.length === 0) logger.warn("No services configured in docker.compose.services. Creating secrets with empty services.");
|
|
4261
|
+
const secrets = createStageSecrets(stage, services);
|
|
4262
|
+
await require_storage.writeStageSecrets(secrets);
|
|
4263
|
+
logger.log(`\n✓ Secrets initialized for stage "${stage}"`);
|
|
4264
|
+
logger.log(` Location: .gkm/secrets/${stage}.json`);
|
|
4265
|
+
logger.log("\n Generated credentials for:");
|
|
4266
|
+
for (const service of services) logger.log(` - ${service}`);
|
|
4267
|
+
if (secrets.urls.DATABASE_URL) logger.log(`\n DATABASE_URL: ${maskUrl(secrets.urls.DATABASE_URL)}`);
|
|
4268
|
+
if (secrets.urls.REDIS_URL) logger.log(` REDIS_URL: ${maskUrl(secrets.urls.REDIS_URL)}`);
|
|
4269
|
+
if (secrets.urls.RABBITMQ_URL) logger.log(` RABBITMQ_URL: ${maskUrl(secrets.urls.RABBITMQ_URL)}`);
|
|
4270
|
+
logger.log(`\n Use "gkm secrets:show --stage ${stage}" to view secrets`);
|
|
4271
|
+
logger.log(" Use \"gkm secrets:set <KEY> <VALUE> --stage " + stage + "\" to add custom secrets");
|
|
4272
|
+
}
|
|
4273
|
+
/**
|
|
4274
|
+
* Read all data from stdin.
|
|
4275
|
+
*/
|
|
4276
|
+
async function readStdin() {
|
|
4277
|
+
const chunks = [];
|
|
4278
|
+
for await (const chunk of process.stdin) chunks.push(chunk);
|
|
4279
|
+
return Buffer.concat(chunks).toString("utf-8").trim();
|
|
4280
|
+
}
|
|
4281
|
+
/**
|
|
4282
|
+
* Set a custom secret.
|
|
4283
|
+
* If value is not provided, reads from stdin.
|
|
4284
|
+
*/
|
|
4285
|
+
async function secretsSetCommand(key, value, options) {
|
|
4286
|
+
const { stage } = options;
|
|
4287
|
+
let secretValue = value;
|
|
4288
|
+
if (!secretValue) {
|
|
4289
|
+
if (process.stdin.isTTY) {
|
|
4290
|
+
logger.error("No value provided. Use: gkm secrets:set KEY VALUE --stage <stage>");
|
|
4291
|
+
logger.error("Or pipe from stdin: echo \"value\" | gkm secrets:set KEY --stage <stage>");
|
|
4292
|
+
process.exit(1);
|
|
4293
|
+
}
|
|
4294
|
+
secretValue = await readStdin();
|
|
4295
|
+
if (!secretValue) {
|
|
4296
|
+
logger.error("No value received from stdin");
|
|
4297
|
+
process.exit(1);
|
|
4298
|
+
}
|
|
4299
|
+
}
|
|
4300
|
+
try {
|
|
4301
|
+
await require_storage.setCustomSecret(stage, key, secretValue);
|
|
4302
|
+
logger.log(`\n✓ Secret "${key}" set for stage "${stage}"`);
|
|
4303
|
+
} catch (error) {
|
|
4304
|
+
logger.error(error instanceof Error ? error.message : "Failed to set secret");
|
|
4305
|
+
process.exit(1);
|
|
4306
|
+
}
|
|
4307
|
+
}
|
|
4308
|
+
/**
|
|
4309
|
+
* Show secrets for a stage.
|
|
4310
|
+
*/
|
|
4311
|
+
async function secretsShowCommand(options) {
|
|
4312
|
+
const { stage, reveal } = options;
|
|
4313
|
+
const secrets = await require_storage.readStageSecrets(stage);
|
|
4314
|
+
if (!secrets) {
|
|
4315
|
+
logger.error(`No secrets found for stage "${stage}". Run "gkm secrets:init --stage ${stage}" first.`);
|
|
4316
|
+
process.exit(1);
|
|
4317
|
+
}
|
|
4318
|
+
logger.log(`\nSecrets for stage "${stage}":`);
|
|
4319
|
+
logger.log(` Created: ${secrets.createdAt}`);
|
|
4320
|
+
logger.log(` Updated: ${secrets.updatedAt}`);
|
|
4321
|
+
logger.log("\nService Credentials:");
|
|
4322
|
+
for (const [service, creds] of Object.entries(secrets.services)) if (creds) {
|
|
4323
|
+
logger.log(`\n ${service}:`);
|
|
4324
|
+
logger.log(` host: ${creds.host}`);
|
|
4325
|
+
logger.log(` port: ${creds.port}`);
|
|
4326
|
+
logger.log(` username: ${creds.username}`);
|
|
4327
|
+
logger.log(` password: ${reveal ? creds.password : require_storage.maskPassword(creds.password)}`);
|
|
4328
|
+
if (creds.database) logger.log(` database: ${creds.database}`);
|
|
4329
|
+
if (creds.vhost) logger.log(` vhost: ${creds.vhost}`);
|
|
4330
|
+
}
|
|
4331
|
+
logger.log("\nConnection URLs:");
|
|
4332
|
+
if (secrets.urls.DATABASE_URL) logger.log(` DATABASE_URL: ${reveal ? secrets.urls.DATABASE_URL : maskUrl(secrets.urls.DATABASE_URL)}`);
|
|
4333
|
+
if (secrets.urls.REDIS_URL) logger.log(` REDIS_URL: ${reveal ? secrets.urls.REDIS_URL : maskUrl(secrets.urls.REDIS_URL)}`);
|
|
4334
|
+
if (secrets.urls.RABBITMQ_URL) logger.log(` RABBITMQ_URL: ${reveal ? secrets.urls.RABBITMQ_URL : maskUrl(secrets.urls.RABBITMQ_URL)}`);
|
|
4335
|
+
const customKeys = Object.keys(secrets.custom);
|
|
4336
|
+
if (customKeys.length > 0) {
|
|
4337
|
+
logger.log("\nCustom Secrets:");
|
|
4338
|
+
for (const [key, value] of Object.entries(secrets.custom)) logger.log(` ${key}: ${reveal ? value : require_storage.maskPassword(value)}`);
|
|
4339
|
+
}
|
|
4340
|
+
if (!reveal) logger.log("\nUse --reveal to show actual values");
|
|
4341
|
+
}
|
|
4342
|
+
/**
|
|
4343
|
+
* Rotate passwords for services.
|
|
4344
|
+
*/
|
|
4345
|
+
async function secretsRotateCommand(options) {
|
|
4346
|
+
const { stage, service } = options;
|
|
4347
|
+
const secrets = await require_storage.readStageSecrets(stage);
|
|
4348
|
+
if (!secrets) {
|
|
4349
|
+
logger.error(`No secrets found for stage "${stage}". Run "gkm secrets:init --stage ${stage}" first.`);
|
|
4350
|
+
process.exit(1);
|
|
4351
|
+
}
|
|
4352
|
+
if (service) {
|
|
4353
|
+
if (!secrets.services[service]) {
|
|
4354
|
+
logger.error(`Service "${service}" not configured in stage "${stage}"`);
|
|
4355
|
+
process.exit(1);
|
|
4356
|
+
}
|
|
4357
|
+
const updated = rotateServicePassword(secrets, service);
|
|
4358
|
+
await require_storage.writeStageSecrets(updated);
|
|
4359
|
+
logger.log(`\n✓ Password rotated for ${service} in stage "${stage}"`);
|
|
4360
|
+
} else {
|
|
4361
|
+
let updated = secrets;
|
|
4362
|
+
const services = Object.keys(secrets.services);
|
|
4363
|
+
for (const svc of services) updated = rotateServicePassword(updated, svc);
|
|
4364
|
+
await require_storage.writeStageSecrets(updated);
|
|
4365
|
+
logger.log(`\n✓ Passwords rotated for all services in stage "${stage}": ${services.join(", ")}`);
|
|
4366
|
+
}
|
|
4367
|
+
logger.log(`\nUse "gkm secrets:show --stage ${stage}" to view new values`);
|
|
4368
|
+
}
|
|
4369
|
+
/**
|
|
4370
|
+
* Import secrets from a JSON file.
|
|
4371
|
+
*/
|
|
4372
|
+
async function secretsImportCommand(file, options) {
|
|
4373
|
+
const { stage, merge = true } = options;
|
|
4374
|
+
if (!(0, node_fs.existsSync)(file)) {
|
|
4375
|
+
logger.error(`File not found: ${file}`);
|
|
4376
|
+
process.exit(1);
|
|
4377
|
+
}
|
|
4378
|
+
let importedSecrets;
|
|
4379
|
+
try {
|
|
4380
|
+
const content = await (0, node_fs_promises.readFile)(file, "utf-8");
|
|
4381
|
+
importedSecrets = JSON.parse(content);
|
|
4382
|
+
if (typeof importedSecrets !== "object" || importedSecrets === null) throw new Error("JSON must be an object");
|
|
4383
|
+
for (const [key, value] of Object.entries(importedSecrets)) if (typeof value !== "string") throw new Error(`Value for "${key}" must be a string, got ${typeof value}`);
|
|
4384
|
+
} catch (error) {
|
|
4385
|
+
logger.error(`Failed to parse JSON file: ${error instanceof Error ? error.message : "Invalid JSON"}`);
|
|
4386
|
+
process.exit(1);
|
|
4387
|
+
}
|
|
4388
|
+
const secrets = await require_storage.readStageSecrets(stage);
|
|
4389
|
+
if (!secrets) {
|
|
4390
|
+
logger.error(`No secrets found for stage "${stage}". Run "gkm secrets:init --stage ${stage}" first.`);
|
|
4391
|
+
process.exit(1);
|
|
4392
|
+
}
|
|
4393
|
+
const updatedCustom = merge ? {
|
|
4394
|
+
...secrets.custom,
|
|
4395
|
+
...importedSecrets
|
|
4396
|
+
} : importedSecrets;
|
|
4397
|
+
const updated = {
|
|
4398
|
+
...secrets,
|
|
4399
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
4400
|
+
custom: updatedCustom
|
|
4401
|
+
};
|
|
4402
|
+
await require_storage.writeStageSecrets(updated);
|
|
4403
|
+
const importedCount = Object.keys(importedSecrets).length;
|
|
4404
|
+
const totalCount = Object.keys(updatedCustom).length;
|
|
4405
|
+
logger.log(`\n✓ Imported ${importedCount} secrets for stage "${stage}"`);
|
|
4406
|
+
if (merge && totalCount > importedCount) logger.log(` Total custom secrets: ${totalCount}`);
|
|
4407
|
+
logger.log("\n Imported keys:");
|
|
4408
|
+
for (const key of Object.keys(importedSecrets)) logger.log(` - ${key}`);
|
|
4409
|
+
}
|
|
4410
|
+
/**
|
|
4411
|
+
* Mask password in a URL for display.
|
|
4412
|
+
*/
|
|
4413
|
+
function maskUrl(url) {
|
|
4414
|
+
try {
|
|
4415
|
+
const parsed = new URL(url);
|
|
4416
|
+
if (parsed.password) parsed.password = require_storage.maskPassword(parsed.password);
|
|
4417
|
+
return parsed.toString();
|
|
4418
|
+
} catch {
|
|
4419
|
+
return url;
|
|
4420
|
+
}
|
|
2669
4421
|
}
|
|
2670
4422
|
|
|
2671
4423
|
//#endregion
|
|
@@ -2677,34 +4429,39 @@ program.command("init").description("Scaffold a new project").argument("[name]",
|
|
|
2677
4429
|
const globalOptions = program.opts();
|
|
2678
4430
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
2679
4431
|
await initCommand(name$1, options);
|
|
2680
|
-
} catch (
|
|
2681
|
-
console.error("Init failed:", error.message);
|
|
4432
|
+
} catch (_error) {
|
|
2682
4433
|
process.exit(1);
|
|
2683
4434
|
}
|
|
2684
4435
|
});
|
|
2685
|
-
program.command("build").description("Build handlers from endpoints, functions, and crons").option("--provider <provider>", "Target provider for generated handlers (aws, server)").option("--providers <providers>", "[DEPRECATED] Use --provider instead. Target providers for generated handlers (comma-separated)").option("--enable-openapi", "Enable OpenAPI documentation generation for server builds").action(async (options) => {
|
|
4436
|
+
program.command("build").description("Build handlers from endpoints, functions, and crons").option("--provider <provider>", "Target provider for generated handlers (aws, server)").option("--providers <providers>", "[DEPRECATED] Use --provider instead. Target providers for generated handlers (comma-separated)").option("--enable-openapi", "Enable OpenAPI documentation generation for server builds").option("--production", "Build for production (no dev tools, bundled output)").option("--skip-bundle", "Skip bundling step in production build").option("--stage <stage>", "Inject encrypted secrets for deployment stage").action(async (options) => {
|
|
2686
4437
|
try {
|
|
2687
4438
|
const globalOptions = program.opts();
|
|
2688
4439
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
2689
4440
|
if (options.provider) {
|
|
2690
|
-
if (!["aws", "server"].includes(options.provider))
|
|
2691
|
-
console.error(`Invalid provider: ${options.provider}. Must be 'aws' or 'server'.`);
|
|
2692
|
-
process.exit(1);
|
|
2693
|
-
}
|
|
4441
|
+
if (!["aws", "server"].includes(options.provider)) process.exit(1);
|
|
2694
4442
|
await buildCommand({
|
|
2695
4443
|
provider: options.provider,
|
|
2696
|
-
enableOpenApi: options.enableOpenapi || false
|
|
4444
|
+
enableOpenApi: options.enableOpenapi || false,
|
|
4445
|
+
production: options.production || false,
|
|
4446
|
+
skipBundle: options.skipBundle || false,
|
|
4447
|
+
stage: options.stage
|
|
2697
4448
|
});
|
|
2698
4449
|
} else if (options.providers) {
|
|
2699
|
-
console.warn("⚠️ --providers flag is deprecated. Use --provider instead.");
|
|
2700
4450
|
const providerList = [...new Set(options.providers.split(",").map((p) => p.trim()))];
|
|
2701
4451
|
await buildCommand({
|
|
2702
4452
|
providers: providerList,
|
|
2703
|
-
enableOpenApi: options.enableOpenapi || false
|
|
4453
|
+
enableOpenApi: options.enableOpenapi || false,
|
|
4454
|
+
production: options.production || false,
|
|
4455
|
+
skipBundle: options.skipBundle || false,
|
|
4456
|
+
stage: options.stage
|
|
2704
4457
|
});
|
|
2705
|
-
} else await buildCommand({
|
|
2706
|
-
|
|
2707
|
-
|
|
4458
|
+
} else await buildCommand({
|
|
4459
|
+
enableOpenApi: options.enableOpenapi || false,
|
|
4460
|
+
production: options.production || false,
|
|
4461
|
+
skipBundle: options.skipBundle || false,
|
|
4462
|
+
stage: options.stage
|
|
4463
|
+
});
|
|
4464
|
+
} catch (_error) {
|
|
2708
4465
|
process.exit(1);
|
|
2709
4466
|
}
|
|
2710
4467
|
});
|
|
@@ -2713,12 +4470,11 @@ program.command("dev").description("Start development server with automatic relo
|
|
|
2713
4470
|
const globalOptions = program.opts();
|
|
2714
4471
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
2715
4472
|
await devCommand({
|
|
2716
|
-
port: options.port ? Number.parseInt(options.port) : 3e3,
|
|
4473
|
+
port: options.port ? Number.parseInt(options.port, 10) : 3e3,
|
|
2717
4474
|
portExplicit: !!options.port,
|
|
2718
4475
|
enableOpenApi: options.enableOpenapi ?? true
|
|
2719
4476
|
});
|
|
2720
|
-
} catch (
|
|
2721
|
-
console.error("Dev server failed:", error.message);
|
|
4477
|
+
} catch (_error) {
|
|
2722
4478
|
process.exit(1);
|
|
2723
4479
|
}
|
|
2724
4480
|
});
|
|
@@ -2742,8 +4498,7 @@ program.command("openapi").description("Generate OpenAPI specification from endp
|
|
|
2742
4498
|
const globalOptions = program.opts();
|
|
2743
4499
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
2744
4500
|
await require_openapi.openapiCommand({});
|
|
2745
|
-
} catch (
|
|
2746
|
-
console.error("OpenAPI generation failed:", error.message);
|
|
4501
|
+
} catch (_error) {
|
|
2747
4502
|
process.exit(1);
|
|
2748
4503
|
}
|
|
2749
4504
|
});
|
|
@@ -2752,8 +4507,194 @@ program.command("generate:react-query").description("Generate React Query hooks
|
|
|
2752
4507
|
const globalOptions = program.opts();
|
|
2753
4508
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
2754
4509
|
await require_openapi_react_query.generateReactQueryCommand(options);
|
|
4510
|
+
} catch (_error) {
|
|
4511
|
+
process.exit(1);
|
|
4512
|
+
}
|
|
4513
|
+
});
|
|
4514
|
+
program.command("docker").description("Generate Docker deployment files").option("--build", "Build Docker image after generating files").option("--push", "Push image to registry after building").option("--tag <tag>", "Image tag", "latest").option("--registry <registry>", "Container registry URL").option("--slim", "Use slim Dockerfile (assumes pre-built bundle exists)").option("--turbo", "Use turbo prune for monorepo optimization").option("--turbo-package <name>", "Package name for turbo prune").action(async (options) => {
|
|
4515
|
+
try {
|
|
4516
|
+
const globalOptions = program.opts();
|
|
4517
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4518
|
+
await dockerCommand(options);
|
|
4519
|
+
} catch (_error) {
|
|
4520
|
+
process.exit(1);
|
|
4521
|
+
}
|
|
4522
|
+
});
|
|
4523
|
+
program.command("prepack").description("Generate Docker files for production deployment").option("--build", "Build Docker image after generating files").option("--push", "Push image to registry after building").option("--tag <tag>", "Image tag", "latest").option("--registry <registry>", "Container registry URL").option("--slim", "Build locally first, then use slim Dockerfile").option("--skip-bundle", "Skip bundling step (only with --slim)").option("--turbo", "Use turbo prune for monorepo optimization").option("--turbo-package <name>", "Package name for turbo prune").action(async (options) => {
|
|
4524
|
+
try {
|
|
4525
|
+
const globalOptions = program.opts();
|
|
4526
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4527
|
+
if (options.slim) await buildCommand({
|
|
4528
|
+
provider: "server",
|
|
4529
|
+
production: true,
|
|
4530
|
+
skipBundle: options.skipBundle
|
|
4531
|
+
});
|
|
4532
|
+
await dockerCommand({
|
|
4533
|
+
build: options.build,
|
|
4534
|
+
push: options.push,
|
|
4535
|
+
tag: options.tag,
|
|
4536
|
+
registry: options.registry,
|
|
4537
|
+
slim: options.slim,
|
|
4538
|
+
turbo: options.turbo,
|
|
4539
|
+
turboPackage: options.turboPackage
|
|
4540
|
+
});
|
|
4541
|
+
if (options.slim) {}
|
|
4542
|
+
if (options.build) {
|
|
4543
|
+
const tag = options.tag ?? "latest";
|
|
4544
|
+
const registry = options.registry;
|
|
4545
|
+
const _imageRef = registry ? `${registry}/api:${tag}` : `api:${tag}`;
|
|
4546
|
+
}
|
|
4547
|
+
} catch (_error) {
|
|
4548
|
+
process.exit(1);
|
|
4549
|
+
}
|
|
4550
|
+
});
|
|
4551
|
+
program.command("secrets:init").description("Initialize secrets for a deployment stage").requiredOption("--stage <stage>", "Stage name (e.g., production, staging)").option("--force", "Overwrite existing secrets").action(async (options) => {
|
|
4552
|
+
try {
|
|
4553
|
+
const globalOptions = program.opts();
|
|
4554
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4555
|
+
await secretsInitCommand(options);
|
|
4556
|
+
} catch (_error) {
|
|
4557
|
+
process.exit(1);
|
|
4558
|
+
}
|
|
4559
|
+
});
|
|
4560
|
+
program.command("secrets:set").description("Set a custom secret for a stage").argument("<key>", "Secret key (e.g., API_KEY)").argument("[value]", "Secret value (reads from stdin if omitted)").requiredOption("--stage <stage>", "Stage name").action(async (key, value, options) => {
|
|
4561
|
+
try {
|
|
4562
|
+
const globalOptions = program.opts();
|
|
4563
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4564
|
+
await secretsSetCommand(key, value, options);
|
|
4565
|
+
} catch (_error) {
|
|
4566
|
+
process.exit(1);
|
|
4567
|
+
}
|
|
4568
|
+
});
|
|
4569
|
+
program.command("secrets:show").description("Show secrets for a stage").requiredOption("--stage <stage>", "Stage name").option("--reveal", "Show actual secret values (not masked)").action(async (options) => {
|
|
4570
|
+
try {
|
|
4571
|
+
const globalOptions = program.opts();
|
|
4572
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4573
|
+
await secretsShowCommand(options);
|
|
4574
|
+
} catch (_error) {
|
|
4575
|
+
process.exit(1);
|
|
4576
|
+
}
|
|
4577
|
+
});
|
|
4578
|
+
program.command("secrets:rotate").description("Rotate service passwords").requiredOption("--stage <stage>", "Stage name").option("--service <service>", "Specific service to rotate (postgres, redis, rabbitmq)").action(async (options) => {
|
|
4579
|
+
try {
|
|
4580
|
+
const globalOptions = program.opts();
|
|
4581
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4582
|
+
await secretsRotateCommand(options);
|
|
4583
|
+
} catch (_error) {
|
|
4584
|
+
process.exit(1);
|
|
4585
|
+
}
|
|
4586
|
+
});
|
|
4587
|
+
program.command("secrets:import").description("Import secrets from a JSON file").argument("<file>", "JSON file path (e.g., secrets.json)").requiredOption("--stage <stage>", "Stage name").option("--no-merge", "Replace all custom secrets instead of merging").action(async (file, options) => {
|
|
4588
|
+
try {
|
|
4589
|
+
const globalOptions = program.opts();
|
|
4590
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4591
|
+
await secretsImportCommand(file, options);
|
|
4592
|
+
} catch (_error) {
|
|
4593
|
+
process.exit(1);
|
|
4594
|
+
}
|
|
4595
|
+
});
|
|
4596
|
+
program.command("deploy").description("Deploy application to a provider").requiredOption("--provider <provider>", "Deploy provider (docker, dokploy, aws-lambda)").requiredOption("--stage <stage>", "Deployment stage (e.g., production, staging)").option("--tag <tag>", "Image tag (default: stage-timestamp)").option("--skip-push", "Skip pushing image to registry").option("--skip-build", "Skip build step (use existing build)").action(async (options) => {
|
|
4597
|
+
try {
|
|
4598
|
+
const globalOptions = program.opts();
|
|
4599
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4600
|
+
const validProviders = [
|
|
4601
|
+
"docker",
|
|
4602
|
+
"dokploy",
|
|
4603
|
+
"aws-lambda"
|
|
4604
|
+
];
|
|
4605
|
+
if (!validProviders.includes(options.provider)) {
|
|
4606
|
+
console.error(`Invalid provider: ${options.provider}\nValid providers: ${validProviders.join(", ")}`);
|
|
4607
|
+
process.exit(1);
|
|
4608
|
+
}
|
|
4609
|
+
await deployCommand({
|
|
4610
|
+
provider: options.provider,
|
|
4611
|
+
stage: options.stage,
|
|
4612
|
+
tag: options.tag,
|
|
4613
|
+
skipPush: options.skipPush,
|
|
4614
|
+
skipBuild: options.skipBuild
|
|
4615
|
+
});
|
|
4616
|
+
} catch (_error) {
|
|
4617
|
+
process.exit(1);
|
|
4618
|
+
}
|
|
4619
|
+
});
|
|
4620
|
+
program.command("deploy:init").description("Initialize Dokploy deployment (create project and application)").option("--endpoint <url>", "Dokploy server URL (uses stored credentials if logged in)").requiredOption("--project <name>", "Project name (creates if not exists)").requiredOption("--app <name>", "Application name").option("--project-id <id>", "Use existing project ID instead of creating").option("--registry-id <id>", "Configure registry for the application").action(async (options) => {
|
|
4621
|
+
try {
|
|
4622
|
+
const globalOptions = program.opts();
|
|
4623
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4624
|
+
await deployInitCommand({
|
|
4625
|
+
endpoint: options.endpoint,
|
|
4626
|
+
projectName: options.project,
|
|
4627
|
+
appName: options.app,
|
|
4628
|
+
projectId: options.projectId,
|
|
4629
|
+
registryId: options.registryId
|
|
4630
|
+
});
|
|
4631
|
+
} catch (error) {
|
|
4632
|
+
console.error(error instanceof Error ? error.message : "Failed to initialize deployment");
|
|
4633
|
+
process.exit(1);
|
|
4634
|
+
}
|
|
4635
|
+
});
|
|
4636
|
+
program.command("deploy:list").description("List Dokploy resources (projects, registries)").option("--endpoint <url>", "Dokploy server URL (uses stored credentials if logged in)").option("--projects", "List projects").option("--registries", "List registries").action(async (options) => {
|
|
4637
|
+
try {
|
|
4638
|
+
const globalOptions = program.opts();
|
|
4639
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4640
|
+
if (options.projects) await deployListCommand({
|
|
4641
|
+
endpoint: options.endpoint,
|
|
4642
|
+
resource: "projects"
|
|
4643
|
+
});
|
|
4644
|
+
if (options.registries) await deployListCommand({
|
|
4645
|
+
endpoint: options.endpoint,
|
|
4646
|
+
resource: "registries"
|
|
4647
|
+
});
|
|
4648
|
+
if (!options.projects && !options.registries) {
|
|
4649
|
+
await deployListCommand({
|
|
4650
|
+
endpoint: options.endpoint,
|
|
4651
|
+
resource: "projects"
|
|
4652
|
+
});
|
|
4653
|
+
await deployListCommand({
|
|
4654
|
+
endpoint: options.endpoint,
|
|
4655
|
+
resource: "registries"
|
|
4656
|
+
});
|
|
4657
|
+
}
|
|
4658
|
+
} catch (error) {
|
|
4659
|
+
console.error(error instanceof Error ? error.message : "Failed to list resources");
|
|
4660
|
+
process.exit(1);
|
|
4661
|
+
}
|
|
4662
|
+
});
|
|
4663
|
+
program.command("login").description("Authenticate with a deployment service").option("--service <service>", "Service to login to (dokploy)", "dokploy").option("--token <token>", "API token (will prompt if not provided)").option("--endpoint <url>", "Service endpoint URL").action(async (options) => {
|
|
4664
|
+
try {
|
|
4665
|
+
const globalOptions = program.opts();
|
|
4666
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4667
|
+
if (options.service !== "dokploy") {
|
|
4668
|
+
console.error(`Unknown service: ${options.service}. Supported: dokploy`);
|
|
4669
|
+
process.exit(1);
|
|
4670
|
+
}
|
|
4671
|
+
await loginCommand({
|
|
4672
|
+
service: options.service,
|
|
4673
|
+
token: options.token,
|
|
4674
|
+
endpoint: options.endpoint
|
|
4675
|
+
});
|
|
4676
|
+
} catch (error) {
|
|
4677
|
+
console.error(error instanceof Error ? error.message : "Failed to login");
|
|
4678
|
+
process.exit(1);
|
|
4679
|
+
}
|
|
4680
|
+
});
|
|
4681
|
+
program.command("logout").description("Remove stored credentials").option("--service <service>", "Service to logout from (dokploy, all)", "dokploy").action(async (options) => {
|
|
4682
|
+
try {
|
|
4683
|
+
const globalOptions = program.opts();
|
|
4684
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4685
|
+
await logoutCommand({ service: options.service });
|
|
4686
|
+
} catch (error) {
|
|
4687
|
+
console.error(error instanceof Error ? error.message : "Failed to logout");
|
|
4688
|
+
process.exit(1);
|
|
4689
|
+
}
|
|
4690
|
+
});
|
|
4691
|
+
program.command("whoami").description("Show current authentication status").action(async () => {
|
|
4692
|
+
try {
|
|
4693
|
+
const globalOptions = program.opts();
|
|
4694
|
+
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4695
|
+
await whoamiCommand();
|
|
2755
4696
|
} catch (error) {
|
|
2756
|
-
console.error(
|
|
4697
|
+
console.error(error instanceof Error ? error.message : "Failed to get status");
|
|
2757
4698
|
process.exit(1);
|
|
2758
4699
|
}
|
|
2759
4700
|
});
|