@brewnet/cli 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/LICENSE +184 -0
  2. package/dist/admin-server-DQVIEHV3.js +14 -0
  3. package/dist/admin-server-DQVIEHV3.js.map +1 -0
  4. package/dist/boilerplate-manager-P6QYUU7Q.js +29 -0
  5. package/dist/boilerplate-manager-P6QYUU7Q.js.map +1 -0
  6. package/dist/chunk-2VWMDHGI.js +1393 -0
  7. package/dist/chunk-2VWMDHGI.js.map +1 -0
  8. package/dist/chunk-4TJMJZMO.js +1173 -0
  9. package/dist/chunk-4TJMJZMO.js.map +1 -0
  10. package/dist/chunk-BAVGYMGA.js +114 -0
  11. package/dist/chunk-BAVGYMGA.js.map +1 -0
  12. package/dist/chunk-DH2VK3YI.js +293 -0
  13. package/dist/chunk-DH2VK3YI.js.map +1 -0
  14. package/dist/chunk-HCHY5UIQ.js +301 -0
  15. package/dist/chunk-HCHY5UIQ.js.map +1 -0
  16. package/dist/chunk-JFPHGZ6Z.js +254 -0
  17. package/dist/chunk-JFPHGZ6Z.js.map +1 -0
  18. package/dist/chunk-SIXBB6JU.js +2973 -0
  19. package/dist/chunk-SIXBB6JU.js.map +1 -0
  20. package/dist/chunk-SYV6PK3R.js +181 -0
  21. package/dist/chunk-SYV6PK3R.js.map +1 -0
  22. package/dist/chunk-ZKMWE5AH.js +444 -0
  23. package/dist/chunk-ZKMWE5AH.js.map +1 -0
  24. package/dist/cloudflare-client-TFT6VCXF.js +32 -0
  25. package/dist/cloudflare-client-TFT6VCXF.js.map +1 -0
  26. package/dist/compose-generator-O7GSIJ2S.js +19 -0
  27. package/dist/compose-generator-O7GSIJ2S.js.map +1 -0
  28. package/dist/frameworks-Z7VXDGP4.js +18 -0
  29. package/dist/frameworks-Z7VXDGP4.js.map +1 -0
  30. package/dist/index.d.ts +22 -0
  31. package/dist/index.js +7897 -0
  32. package/dist/index.js.map +1 -0
  33. package/dist/services/admin-daemon.d.ts +2 -0
  34. package/dist/services/admin-daemon.js +33 -0
  35. package/dist/services/admin-daemon.js.map +1 -0
  36. package/dist/stacks-M4FBTVO5.js +16 -0
  37. package/dist/stacks-M4FBTVO5.js.map +1 -0
  38. package/dist/state-2SI3P4JG.js +27 -0
  39. package/dist/state-2SI3P4JG.js.map +1 -0
  40. package/package.json +44 -0
@@ -0,0 +1,2973 @@
1
+ #!/usr/bin/env node
2
+ import {
3
+ DomainManager,
4
+ addApp,
5
+ addService,
6
+ appendDeployHistory,
7
+ createBackup,
8
+ getLogStats,
9
+ listBackups,
10
+ queryLogs,
11
+ readApps,
12
+ readDeployHistory,
13
+ removeApp,
14
+ removeService,
15
+ updateApp
16
+ } from "./chunk-2VWMDHGI.js";
17
+ import {
18
+ verifyToken
19
+ } from "./chunk-JFPHGZ6Z.js";
20
+ import {
21
+ SERVICE_REGISTRY,
22
+ getServiceDefinition
23
+ } from "./chunk-4TJMJZMO.js";
24
+ import {
25
+ getLastProject,
26
+ loadState,
27
+ logger
28
+ } from "./chunk-ZKMWE5AH.js";
29
+ import {
30
+ getStackById
31
+ } from "./chunk-SYV6PK3R.js";
32
+
33
+ // src/services/admin-server.ts
34
+ import { createServer } from "http";
35
+ import { createConnection } from "net";
36
+ import { join as join2, resolve, extname } from "path";
37
+ import { existsSync as existsSync3, readFileSync as readFileSync3, writeFileSync as writeFileSync3, statSync, createReadStream } from "fs";
38
+ import { fileURLToPath } from "url";
39
+ import { homedir as homedir2 } from "os";
40
+ import Dockerode from "dockerode";
41
+
42
+ // src/services/app-manager.ts
43
+ import { existsSync as existsSync2, readFileSync as readFileSync2, writeFileSync as writeFileSync2, readdirSync } from "fs";
44
+ import { join } from "path";
45
+ import { homedir } from "os";
46
+ import { randomBytes } from "crypto";
47
+ import { execa } from "execa";
48
+
49
+ // src/services/gitea-client.ts
50
+ import { existsSync, readFileSync, writeFileSync, chmodSync, mkdirSync, unlinkSync } from "fs";
51
+ import { dirname } from "path";
52
+ import { execSync } from "child_process";
53
+ var GiteaClient = class {
54
+ config;
55
+ constructor(config) {
56
+ this.config = config;
57
+ }
58
+ // ---------------------------------------------------------------------------
59
+ // Token management
60
+ // ---------------------------------------------------------------------------
61
+ /**
62
+ * Create a Gitea API token via Basic Auth.
63
+ * If the admin account has mustChangePassword=true (403), auto-fixes via docker exec and retries.
64
+ * Saves the token to tokenPath on success.
65
+ */
66
+ async _createToken() {
67
+ const { tokenPath, baseUrl, username, password } = this.config;
68
+ const basic = Buffer.from(`${username}:${password}`).toString("base64");
69
+ const makeRequest = () => fetch(`${baseUrl}/api/v1/users/${username}/tokens`, {
70
+ method: "POST",
71
+ headers: { Authorization: `Basic ${basic}`, "Content-Type": "application/json" },
72
+ body: JSON.stringify({
73
+ name: `brewnet-${Date.now()}`,
74
+ scopes: ["write:repository", "read:repository", "write:user", "read:user"]
75
+ }),
76
+ signal: AbortSignal.timeout(8e3)
77
+ });
78
+ let res = await makeRequest();
79
+ let wasFixed = false;
80
+ if (!res.ok) {
81
+ const body = await res.text();
82
+ if (res.status === 403 && body.includes("must change")) {
83
+ try {
84
+ execSync(
85
+ `docker exec -u git brewnet-gitea gitea admin user change-password --username ${username} --password ${password} --must-change-password=false`,
86
+ { stdio: "pipe" }
87
+ );
88
+ } catch (e) {
89
+ const stderr = e.stderr?.toString().trim() ?? String(e);
90
+ throw new Error(
91
+ `Gitea admin requires password change \u2014 auto-fix failed:
92
+ ${stderr}
93
+ Manual fix: docker exec -u git brewnet-gitea gitea admin user change-password --username ${username} --password <password> --must-change-password=false`
94
+ );
95
+ }
96
+ wasFixed = true;
97
+ res = await makeRequest();
98
+ if (!res.ok) {
99
+ throw new Error(
100
+ `Gitea token creation failed after auto-fix: ${res.status} ${await res.text()}`
101
+ );
102
+ }
103
+ } else {
104
+ throw new Error(`Gitea token creation failed: ${res.status} ${body}`);
105
+ }
106
+ }
107
+ const data = await res.json();
108
+ mkdirSync(dirname(tokenPath), { recursive: true });
109
+ writeFileSync(tokenPath, data.sha1, "utf-8");
110
+ chmodSync(tokenPath, 384);
111
+ return { wasFixed };
112
+ }
113
+ /**
114
+ * Explicit setup step — call once before any API operations.
115
+ * Validates any cached token; deletes and re-creates if stale (401).
116
+ * Returns what happened so the caller can surface it in job step logs.
117
+ */
118
+ async prepare() {
119
+ const { tokenPath, baseUrl } = this.config;
120
+ if (existsSync(tokenPath)) {
121
+ const token = readFileSync(tokenPath, "utf-8").trim();
122
+ try {
123
+ const check = await fetch(`${baseUrl}/api/v1/user`, {
124
+ headers: { Authorization: `token ${token}` },
125
+ signal: AbortSignal.timeout(8e3)
126
+ });
127
+ if (check.status !== 401) {
128
+ return { autoFixed: false, message: "token cached" };
129
+ }
130
+ unlinkSync(tokenPath);
131
+ } catch {
132
+ return { autoFixed: false, message: "token cached (network check skipped)" };
133
+ }
134
+ }
135
+ const { wasFixed } = await this._createToken();
136
+ return {
137
+ autoFixed: wasFixed,
138
+ message: wasFixed ? "mustChangePassword was set \u2014 auto-fixed via docker exec; token created" : "token created"
139
+ };
140
+ }
141
+ async ensureToken() {
142
+ const { tokenPath } = this.config;
143
+ if (existsSync(tokenPath)) {
144
+ return readFileSync(tokenPath, "utf-8").trim();
145
+ }
146
+ await this._createToken();
147
+ return readFileSync(tokenPath, "utf-8").trim();
148
+ }
149
+ async authHeaders() {
150
+ return {
151
+ Authorization: `token ${await this.ensureToken()}`,
152
+ "Content-Type": "application/json"
153
+ };
154
+ }
155
+ // ---------------------------------------------------------------------------
156
+ // Repository operations
157
+ // ---------------------------------------------------------------------------
158
+ async repoExists(name) {
159
+ const { baseUrl, username } = this.config;
160
+ const res = await fetch(
161
+ `${baseUrl}/api/v1/repos/${username}/${name}`,
162
+ { headers: await this.authHeaders() }
163
+ );
164
+ return res.status === 200;
165
+ }
166
+ /** Returns true if the repo exists but has no commits (empty: true from Gitea API). */
167
+ async repoIsEmpty(name) {
168
+ const { baseUrl, username } = this.config;
169
+ const res = await fetch(
170
+ `${baseUrl}/api/v1/repos/${username}/${name}`,
171
+ { headers: await this.authHeaders() }
172
+ );
173
+ if (res.status !== 200) return false;
174
+ const data = await res.json();
175
+ return data.empty === true;
176
+ }
177
+ /** Creates a private repo and returns the clone URL. */
178
+ async createRepo(name, description = "") {
179
+ const { baseUrl } = this.config;
180
+ const res = await fetch(`${baseUrl}/api/v1/user/repos`, {
181
+ method: "POST",
182
+ headers: await this.authHeaders(),
183
+ body: JSON.stringify({ name, description, private: false, auto_init: false })
184
+ });
185
+ if (!res.ok) {
186
+ const body = await res.text();
187
+ if (res.status === 409) {
188
+ const existing = await fetch(`${baseUrl}/api/v1/repos/${this.config.username}/${name}`, {
189
+ headers: await this.authHeaders()
190
+ });
191
+ if (existing.ok) {
192
+ const data2 = await existing.json();
193
+ return data2.clone_url;
194
+ }
195
+ }
196
+ if (res.status === 500 && body.includes("files already exist")) {
197
+ await this.deleteRepo(name).catch(() => {
198
+ });
199
+ const retry = await fetch(`${baseUrl}/api/v1/user/repos`, {
200
+ method: "POST",
201
+ headers: await this.authHeaders(),
202
+ body: JSON.stringify({ name, description, private: false, auto_init: false })
203
+ });
204
+ if (!retry.ok) {
205
+ throw new Error(`Gitea createRepo retry failed: ${retry.status} ${await retry.text()}`);
206
+ }
207
+ const retryData = await retry.json();
208
+ return retryData.clone_url;
209
+ }
210
+ throw new Error(`Gitea createRepo failed: ${res.status} ${body}`);
211
+ }
212
+ const data = await res.json();
213
+ return data.clone_url;
214
+ }
215
+ /** Patch a repo from private to public visibility. No-op if already public. */
216
+ async makeRepoPublic(name) {
217
+ const { baseUrl, username } = this.config;
218
+ const res = await fetch(`${baseUrl}/api/v1/repos/${username}/${name}`, {
219
+ method: "PATCH",
220
+ headers: await this.authHeaders(),
221
+ body: JSON.stringify({ private: false })
222
+ });
223
+ if (!res.ok) throw new Error(`Gitea makeRepoPublic failed: ${res.status} ${await res.text()}`);
224
+ }
225
+ async deleteRepo(name) {
226
+ const { baseUrl, username } = this.config;
227
+ await fetch(`${baseUrl}/api/v1/repos/${username}/${name}`, {
228
+ method: "DELETE",
229
+ headers: await this.authHeaders()
230
+ });
231
+ }
232
+ /** Returns all repos accessible to the authenticated user. */
233
+ async listRepos() {
234
+ const { baseUrl } = this.config;
235
+ const res = await fetch(`${baseUrl}/api/v1/user/repos`, {
236
+ headers: await this.authHeaders(),
237
+ signal: AbortSignal.timeout(8e3)
238
+ });
239
+ if (!res.ok) {
240
+ throw new Error(`Gitea listRepos failed: ${res.status} ${await res.text()}`);
241
+ }
242
+ return await res.json();
243
+ }
244
+ /** Fetch a single repo's detail (includes default_branch, ssh_url). */
245
+ async getRepo(name) {
246
+ const { baseUrl, username } = this.config;
247
+ const res = await fetch(`${baseUrl}/api/v1/repos/${username}/${name}`, {
248
+ headers: await this.authHeaders()
249
+ });
250
+ if (!res.ok) throw new Error(`Gitea getRepo failed: ${res.status} ${await res.text()}`);
251
+ return res.json();
252
+ }
253
+ /** Get the latest commit on a branch. Returns null for empty repos. */
254
+ async getLatestCommit(repoName, branch) {
255
+ const { baseUrl, username } = this.config;
256
+ const res = await fetch(
257
+ `${baseUrl}/api/v1/repos/${username}/${repoName}/commits?sha=${encodeURIComponent(branch)}&limit=1`,
258
+ { headers: await this.authHeaders() }
259
+ );
260
+ if (!res.ok) return null;
261
+ const commits = await res.json();
262
+ if (!commits.length) return null;
263
+ const c = commits[0];
264
+ return {
265
+ hash: c.sha,
266
+ shortHash: c.sha.slice(0, 7),
267
+ message: c.commit.message.split("\n")[0],
268
+ date: c.commit.committer.date
269
+ };
270
+ }
271
+ /** Register a push webhook on the repo. */
272
+ async createWebhook(repoName, webhookUrl, secret) {
273
+ const { baseUrl, username } = this.config;
274
+ const res = await fetch(`${baseUrl}/api/v1/repos/${username}/${repoName}/hooks`, {
275
+ method: "POST",
276
+ headers: await this.authHeaders(),
277
+ body: JSON.stringify({
278
+ type: "gitea",
279
+ config: { url: webhookUrl, content_type: "json", secret },
280
+ events: ["push"],
281
+ active: true
282
+ })
283
+ });
284
+ if (!res.ok) throw new Error(`Gitea createWebhook failed: ${res.status} ${await res.text()}`);
285
+ }
286
+ /** Returns branch names for a repo. Falls back to empty array on error. */
287
+ async listBranches(repoName) {
288
+ const { baseUrl, username } = this.config;
289
+ const res = await fetch(
290
+ `${baseUrl}/api/v1/repos/${username}/${repoName}/branches?limit=50`,
291
+ { headers: await this.authHeaders(), signal: AbortSignal.timeout(8e3) }
292
+ );
293
+ if (!res.ok) return [];
294
+ const data = await res.json();
295
+ return data.map((b) => b.name);
296
+ }
297
+ /** URL suitable for git remote add — includes credentials in URL (stored in .git/config which is chmod 600). */
298
+ authedCloneUrl(cloneUrl) {
299
+ const { username, password } = this.config;
300
+ const encUser = encodeURIComponent(username);
301
+ const encPass = encodeURIComponent(password);
302
+ return cloneUrl.replace("http://", `http://${encUser}:${encPass}@`);
303
+ }
304
+ };
305
+
306
+ // src/services/app-manager.ts
307
+ var BREWNET_DIR = join(homedir(), ".brewnet");
308
+ var GITEA_TOKEN_PATH = join(BREWNET_DIR, "gitea-token");
309
+ var DEPLOY_HISTORY_PATH = join(BREWNET_DIR, "deploy-history.json");
310
+ var jobs = /* @__PURE__ */ new Map();
311
+ function resolveAppsJsonPath() {
312
+ return join(BREWNET_DIR, "apps.json");
313
+ }
314
+ function readDotEnvValue(envPath, key) {
315
+ if (!existsSync2(envPath)) return "";
316
+ const lines = readFileSync2(envPath, "utf-8").split("\n");
317
+ for (const line of lines) {
318
+ const trimmed = line.trim();
319
+ if (trimmed.startsWith(`${key}=`)) {
320
+ return trimmed.slice(key.length + 1).trim();
321
+ }
322
+ }
323
+ return "";
324
+ }
325
+ var _boilerplateRegistered = false;
326
+ async function listApps() {
327
+ const appsJson = resolveAppsJsonPath();
328
+ const apps = readApps(appsJson);
329
+ if (_boilerplateRegistered) return apps;
330
+ _boilerplateRegistered = true;
331
+ try {
332
+ const ctx = resolveContext();
333
+ const bpPath = join(ctx.projectPath, ".brewnet-boilerplate.json");
334
+ if (existsSync2(bpPath)) {
335
+ const raw = JSON.parse(readFileSync2(bpPath, "utf-8"));
336
+ const bpMetas = Array.isArray(raw) ? raw : [raw];
337
+ let changed = false;
338
+ for (const bp of bpMetas) {
339
+ if (!bp.stackId || !bp.appDir) continue;
340
+ const exists = apps.some((a) => a.appDir === bp.appDir || a.stackId === bp.stackId);
341
+ if (!exists) {
342
+ const port = bp.backendUrl ? parseInt(new URL(bp.backendUrl).port || "8080", 10) : 8080;
343
+ const entry = {
344
+ name: bp.stackId,
345
+ mode: "boilerplate",
346
+ stackId: bp.stackId,
347
+ appDir: bp.appDir,
348
+ lang: bp.lang,
349
+ framework: bp.frameworkId,
350
+ port,
351
+ status: bp.status || "running",
352
+ createdAt: (/* @__PURE__ */ new Date()).toISOString()
353
+ };
354
+ apps.push(entry);
355
+ changed = true;
356
+ }
357
+ }
358
+ if (changed) {
359
+ writeFileSync2(appsJson, JSON.stringify(apps, null, 2), "utf-8");
360
+ }
361
+ }
362
+ } catch {
363
+ }
364
+ return apps;
365
+ }
366
+ function getDeployHistory(appName) {
367
+ const entries = readDeployHistory(DEPLOY_HISTORY_PATH);
368
+ if (!appName) return entries;
369
+ return entries.filter((e) => e.appName === appName);
370
+ }
371
+ async function listGiteaRepos() {
372
+ const ctx = resolveContext();
373
+ const gitea = new GiteaClient({
374
+ baseUrl: ctx.giteaBaseUrl,
375
+ username: ctx.giteaUser,
376
+ password: ctx.giteaPassword,
377
+ tokenPath: GITEA_TOKEN_PATH
378
+ });
379
+ await gitea.prepare();
380
+ return gitea.listRepos();
381
+ }
382
+ function getDeploySettings(appName) {
383
+ const apps = readApps(resolveAppsJsonPath());
384
+ const app = apps.find((a) => a.name === appName);
385
+ const settings = app?.deploySettings;
386
+ return settings ?? { autoDeploy: false, deployBranch: "main" };
387
+ }
388
+ function updateDeploySettings(appName, settings) {
389
+ const appsJson = resolveAppsJsonPath();
390
+ const apps = readApps(appsJson);
391
+ const app = apps.find((a) => a.name === appName);
392
+ if (!app) throw new Error(`App "${appName}" not found`);
393
+ const existing = app.deploySettings ?? { autoDeploy: false, deployBranch: "main" };
394
+ app.deploySettings = { ...existing, ...settings };
395
+ updateApp(appsJson, appName, app);
396
+ }
397
+ async function getAppGitInfo(appName) {
398
+ const ctx = resolveContext();
399
+ const apps = readApps(resolveAppsJsonPath());
400
+ const app = apps.find((a) => a.name === appName);
401
+ if (!app) throw new Error(`App "${appName}" not found`);
402
+ const gitea = new GiteaClient({
403
+ baseUrl: ctx.giteaBaseUrl,
404
+ username: ctx.giteaUser,
405
+ password: ctx.giteaPassword,
406
+ tokenPath: GITEA_TOKEN_PATH
407
+ });
408
+ let branch = "main";
409
+ let latestCommit = null;
410
+ let cloneUrlSsh = `ssh://git@localhost:2222/${ctx.giteaUser}/${appName}.git`;
411
+ try {
412
+ const repo = await gitea.getRepo(appName);
413
+ branch = repo.default_branch || "main";
414
+ cloneUrlSsh = repo.ssh_url || cloneUrlSsh;
415
+ if (repo.private) {
416
+ await gitea.makeRepoPublic(appName).catch((e) => {
417
+ console.warn(`[app-manager] makeRepoPublic failed for ${appName}: ${e instanceof Error ? e.message : String(e)}`);
418
+ });
419
+ }
420
+ latestCommit = await gitea.getLatestCommit(appName, branch);
421
+ } catch (e) {
422
+ console.warn(`[app-manager] getAppGitInfo Gitea call failed (${appName}): ${e instanceof Error ? e.message : String(e)}`);
423
+ }
424
+ return {
425
+ giteaUrl: `${ctx.giteaBaseUrl}/${ctx.giteaUser}/${appName}`,
426
+ cloneUrlHttp: `${ctx.giteaBaseUrl}/${ctx.giteaUser}/${appName}.git`,
427
+ cloneUrlSsh,
428
+ localPath: app.appDir,
429
+ branch,
430
+ latestCommit
431
+ };
432
+ }
433
+ async function rollbackApp(appName, commitHash) {
434
+ const job = newJob(appName, ["Checkout", "Build & Start", "Health check"]);
435
+ jobs.set(job.jobId, job);
436
+ setImmediate(() => void _runRollback(job, appName, commitHash));
437
+ return job.jobId;
438
+ }
439
+ async function _runRollback(job, appName, commitHash) {
440
+ try {
441
+ const apps = readApps(resolveAppsJsonPath());
442
+ const app = apps.find((a) => a.name === appName);
443
+ if (!app) throw new Error(`App "${appName}" not found`);
444
+ const target = commitHash || "HEAD~1";
445
+ setStep(job, 0, "running", `git checkout ${target.slice(0, 7)}`);
446
+ await execa("git", ["checkout", target], { cwd: app.appDir });
447
+ setStep(job, 0, "done");
448
+ await _injectQuickTunnelIfNeeded(app.appDir, appName, app.port);
449
+ setStep(job, 1, "running", "docker compose up --build");
450
+ await _dockerComposeUp(app.appDir, job);
451
+ setStep(job, 1, "done", "containers started");
452
+ setStep(job, 2, "running");
453
+ const healthUrl = _buildHealthUrl(app.appDir, app.port);
454
+ setStep(job, 2, "running", `polling ${healthUrl}`);
455
+ await _pollHealth(healthUrl, 12e4, job);
456
+ setStep(job, 2, "done");
457
+ updateApp(resolveAppsJsonPath(), appName, { status: "running" });
458
+ appendDeployHistory(DEPLOY_HISTORY_PATH, {
459
+ appName,
460
+ commitHash,
461
+ commitMessage: `Rollback to ${commitHash.slice(0, 7)}`,
462
+ status: "success",
463
+ deployedAt: (/* @__PURE__ */ new Date()).toISOString()
464
+ });
465
+ job.status = "done";
466
+ } catch (err) {
467
+ job.status = "failed";
468
+ job.error = err instanceof Error ? err.message : String(err);
469
+ for (const step of job.steps) {
470
+ if (step.status === "running" || step.status === "pending") step.status = "failed";
471
+ }
472
+ appendDeployHistory(DEPLOY_HISTORY_PATH, {
473
+ appName,
474
+ commitHash,
475
+ commitMessage: `Rollback to ${commitHash.slice(0, 7)}`,
476
+ status: "failed",
477
+ deployedAt: (/* @__PURE__ */ new Date()).toISOString()
478
+ });
479
+ }
480
+ }
481
+ async function getAppBranches(appName) {
482
+ const ctx = resolveContext();
483
+ const gitea = new GiteaClient({
484
+ baseUrl: ctx.giteaBaseUrl,
485
+ username: ctx.giteaUser,
486
+ password: ctx.giteaPassword,
487
+ tokenPath: GITEA_TOKEN_PATH
488
+ });
489
+ return gitea.listBranches(appName);
490
+ }
491
+ async function setupWebhook(appName, webhookUrl) {
492
+ const ctx = resolveContext();
493
+ const settings = getDeploySettings(appName);
494
+ const secret = settings.webhookSecret ?? randomBytes(16).toString("hex");
495
+ const gitea = new GiteaClient({
496
+ baseUrl: ctx.giteaBaseUrl,
497
+ username: ctx.giteaUser,
498
+ password: ctx.giteaPassword,
499
+ tokenPath: GITEA_TOKEN_PATH
500
+ });
501
+ await gitea.createWebhook(appName, webhookUrl, secret);
502
+ updateDeploySettings(appName, { webhookSecret: secret });
503
+ }
504
+ async function deployApp(appName) {
505
+ const job = newJob(appName, ["Pull", "Build & Start", "Health check"]);
506
+ jobs.set(job.jobId, job);
507
+ setImmediate(() => void _runDeploy(job, appName));
508
+ return job.jobId;
509
+ }
510
+ async function _runDeploy(job, appName) {
511
+ const apps = readApps(resolveAppsJsonPath());
512
+ const app = apps.find((a) => a.name === appName);
513
+ if (!app) {
514
+ job.status = "failed";
515
+ job.error = `App "${appName}" not found`;
516
+ return;
517
+ }
518
+ try {
519
+ const settings = getDeploySettings(appName);
520
+ setStep(job, 0, "running");
521
+ try {
522
+ const ctx = resolveContext();
523
+ const gitea = new GiteaClient({
524
+ baseUrl: ctx.giteaBaseUrl,
525
+ username: ctx.giteaUser,
526
+ password: ctx.giteaPassword,
527
+ tokenPath: GITEA_TOKEN_PATH
528
+ });
529
+ await gitea.prepare();
530
+ const repoExists = await gitea.repoExists(appName);
531
+ if (!repoExists) {
532
+ appendLog(job, "[pull] Gitea repo not found \u2014 recreating and pushing local code");
533
+ const cloneUrl = await gitea.createRepo(appName, `Brewnet app: ${appName}`);
534
+ const authedUrl = gitea.authedCloneUrl(cloneUrl);
535
+ await execa("git", ["remote", "add", "brewnet", authedUrl], { cwd: app.appDir }).catch(
536
+ () => execa("git", ["remote", "set-url", "brewnet", authedUrl], { cwd: app.appDir })
537
+ );
538
+ await execa("git", ["push", "brewnet", "HEAD:main", "--force"], { cwd: app.appDir });
539
+ appendLog(job, "[pull] Gitea repo recreated and code pushed \u2713");
540
+ } else if (!existsSync2(app.appDir)) {
541
+ appendLog(job, "[pull] appDir missing \u2014 cloning from Gitea");
542
+ const authedUrl = gitea.authedCloneUrl(`${ctx.giteaBaseUrl}/${ctx.giteaUser}/${appName}.git`);
543
+ await execa("git", ["clone", authedUrl, app.appDir]);
544
+ appendLog(job, "[pull] re-cloned from Gitea \u2713");
545
+ } else if (await gitea.repoIsEmpty(appName)) {
546
+ appendLog(job, "[pull] Gitea repo is empty \u2014 pushing local code");
547
+ const isShallow = await execa("git", ["rev-parse", "--is-shallow-repository"], { cwd: app.appDir }).then((r) => r.stdout.trim() === "true").catch(() => false);
548
+ if (isShallow) {
549
+ appendLog(job, "[pull] shallow clone detected \u2014 unshallowing");
550
+ await execa("git", ["fetch", "--unshallow", "origin"], { cwd: app.appDir }).catch(async () => {
551
+ const { reinitGit } = await import("./boilerplate-manager-P6QYUU7Q.js");
552
+ await reinitGit(app.appDir);
553
+ });
554
+ }
555
+ const authedUrl = gitea.authedCloneUrl(`${ctx.giteaBaseUrl}/${ctx.giteaUser}/${appName}.git`);
556
+ await execa("git", ["remote", "add", "brewnet", authedUrl], { cwd: app.appDir }).catch(
557
+ () => execa("git", ["remote", "set-url", "brewnet", authedUrl], { cwd: app.appDir })
558
+ );
559
+ await execa("git", ["push", "brewnet", "HEAD:main", "--force"], { cwd: app.appDir });
560
+ appendLog(job, "[pull] code pushed to Gitea \u2713");
561
+ } else {
562
+ await execa("git", ["pull", "brewnet", settings.deployBranch], { cwd: app.appDir }).catch((e) => {
563
+ appendLog(job, `[pull] git pull failed (non-critical): ${e instanceof Error ? e.message : String(e)}`);
564
+ });
565
+ }
566
+ } catch (e) {
567
+ appendLog(job, `[pull] Gitea sync failed (non-critical): ${e instanceof Error ? e.message : String(e)}`);
568
+ }
569
+ setStep(job, 0, "done");
570
+ const hasCompose = existsSync2(join(app.appDir, "docker-compose.yml")) || existsSync2(join(app.appDir, "compose.yml"));
571
+ if (!hasCompose) {
572
+ const projectType = _detectProjectType(app.appDir);
573
+ if (projectType) {
574
+ appendLog(job, `[scaffold] Detected ${projectType} project \u2014 generating Docker config`);
575
+ _scaffoldDockerConfig(app.appDir, appName, app.port, job, projectType);
576
+ } else {
577
+ throw new Error(
578
+ "This project has no docker-compose.yml or Dockerfile. Add a Dockerfile and docker-compose.yml to deploy, or use a Brewnet boilerplate."
579
+ );
580
+ }
581
+ }
582
+ await _injectQuickTunnelIfNeeded(app.appDir, appName, app.port);
583
+ setStep(job, 1, "running", "docker compose up --build");
584
+ await _dockerComposeUp(app.appDir, job);
585
+ setStep(job, 1, "done", "containers started");
586
+ setStep(job, 2, "running");
587
+ const healthUrlDeploy = _buildHealthUrl(app.appDir, app.port);
588
+ setStep(job, 2, "running", `polling ${healthUrlDeploy}`);
589
+ await _pollHealth(healthUrlDeploy, 12e4, job);
590
+ setStep(job, 2, "done");
591
+ updateApp(resolveAppsJsonPath(), appName, { status: "running" });
592
+ const headHash = await execa("git", ["rev-parse", "HEAD"], { cwd: app.appDir }).then((r) => r.stdout.trim()).catch(() => "");
593
+ const headMsg = await execa("git", ["log", "-1", "--format=%s"], { cwd: app.appDir }).then((r) => r.stdout.trim()).catch(() => "Manual deploy");
594
+ appendDeployHistory(DEPLOY_HISTORY_PATH, {
595
+ appName,
596
+ commitHash: headHash,
597
+ commitMessage: headMsg,
598
+ status: "success",
599
+ deployedAt: (/* @__PURE__ */ new Date()).toISOString()
600
+ });
601
+ job.status = "done";
602
+ } catch (err) {
603
+ job.status = "failed";
604
+ job.error = err instanceof Error ? err.message : String(err);
605
+ for (const step of job.steps) {
606
+ if (step.status === "running" || step.status === "pending") step.status = "failed";
607
+ }
608
+ const headHashFail = await execa("git", ["rev-parse", "HEAD"], { cwd: app.appDir }).then((r) => r.stdout.trim()).catch(() => "");
609
+ appendDeployHistory(DEPLOY_HISTORY_PATH, {
610
+ appName,
611
+ commitHash: headHashFail,
612
+ commitMessage: "Manual deploy",
613
+ status: "failed",
614
+ deployedAt: (/* @__PURE__ */ new Date()).toISOString()
615
+ });
616
+ }
617
+ }
618
+ function getAppDir(appName) {
619
+ const apps = readApps(resolveAppsJsonPath());
620
+ return apps.find((a) => a.name === appName)?.appDir;
621
+ }
622
+ function getJobStatus(jobId) {
623
+ return jobs.get(jobId);
624
+ }
625
+ function newJob(appName, stepLabels) {
626
+ return {
627
+ jobId: randomBytes(8).toString("hex"),
628
+ appName,
629
+ status: "running",
630
+ steps: stepLabels.map((label) => ({ label, status: "pending" }))
631
+ };
632
+ }
633
+ function setStep(job, index, status, message) {
634
+ const step = job.steps[index];
635
+ if (step) {
636
+ step.status = status;
637
+ if (message) step.message = message;
638
+ }
639
+ }
640
+ function appendLog(job, line) {
641
+ if (!job.logs) job.logs = [];
642
+ job.logs.push(line);
643
+ if (job.logs.length > 200) job.logs.splice(0, job.logs.length - 200);
644
+ }
645
+ function readBoilerplateMeta(projectPath) {
646
+ const p = join(projectPath, ".brewnet-boilerplate.json");
647
+ if (!existsSync2(p)) return [];
648
+ try {
649
+ const raw = JSON.parse(readFileSync2(p, "utf-8"));
650
+ return Array.isArray(raw) ? raw : [raw];
651
+ } catch {
652
+ return [];
653
+ }
654
+ }
655
+ function resolveContext() {
656
+ const last = getLastProject();
657
+ const state = loadState(last ?? "");
658
+ const raw = state?.projectPath ?? process.cwd();
659
+ const projectPath = raw.startsWith("~") ? join(homedir(), raw.slice(1)) : raw;
660
+ const envPath = join(projectPath, ".env");
661
+ const giteaUser = readDotEnvValue(envPath, "GITEA_ADMIN_USER") || state?.admin?.username || "admin";
662
+ const secretsPath = join(projectPath, "secrets", "admin_password");
663
+ const secretsPassword = existsSync2(secretsPath) ? readFileSync2(secretsPath, "utf-8").trim() : "";
664
+ const giteaPassword = secretsPassword || readDotEnvValue(envPath, "GITEA_ADMIN_PASSWORD") || state?.admin?.password || "";
665
+ const tunnelMode = state?.domain?.cloudflare?.tunnelMode ?? "";
666
+ const gitPort = state?.servers?.gitServer?.port ?? 3e3;
667
+ const giteaBaseUrl = tunnelMode === "quick" ? "http://localhost/git" : `http://localhost:${gitPort}`;
668
+ return { projectPath, giteaBaseUrl, giteaUser, giteaPassword };
669
+ }
670
+ async function _injectQuickTunnelIfNeeded(appDir, appName, port) {
671
+ try {
672
+ const last = getLastProject();
673
+ const state = loadState(last ?? "");
674
+ if (state?.domain?.cloudflare?.tunnelMode !== "quick") return;
675
+ const { injectTraefikForQuickTunnel } = await import("./boilerplate-manager-P6QYUU7Q.js");
676
+ injectTraefikForQuickTunnel(appDir, appName, port);
677
+ } catch (err) {
678
+ console.error(`[Quick Tunnel] Failed to inject Traefik labels for ${appName}: ${err instanceof Error ? err.message : String(err)}`);
679
+ }
680
+ }
681
+ function _detectProjectType(dir) {
682
+ try {
683
+ if (existsSync2(join(dir, "next.config.ts")) || existsSync2(join(dir, "next.config.mjs")) || existsSync2(join(dir, "next.config.js"))) return "nextjs";
684
+ if (existsSync2(join(dir, "package.json"))) return "nodejs";
685
+ if (existsSync2(join(dir, "requirements.txt")) || existsSync2(join(dir, "pyproject.toml"))) return "python";
686
+ if (existsSync2(join(dir, "go.mod"))) return "go";
687
+ if (existsSync2(join(dir, "Cargo.toml"))) return "rust";
688
+ if (existsSync2(join(dir, "pom.xml")) || existsSync2(join(dir, "build.gradle")) || existsSync2(join(dir, "build.gradle.kts"))) return "java";
689
+ if (existsSync2(join(dir, "index.html"))) return "static";
690
+ if (readdirSync(dir).some((f) => f.endsWith(".html"))) return "static";
691
+ } catch {
692
+ }
693
+ return null;
694
+ }
695
+ function _scaffoldDockerConfig(dir, _appName, port, job, detectedType) {
696
+ const type = detectedType || _detectProjectType(dir);
697
+ if (!type) throw new Error(`Cannot auto-detect project type in ${dir}. Add a Dockerfile and docker-compose.yml manually.`);
698
+ if (job && !detectedType) appendLog(job, `[scaffold] Detected ${type} project \u2014 generating Docker config`);
699
+ let dockerfile = "";
700
+ switch (type) {
701
+ case "nextjs":
702
+ dockerfile = [
703
+ "FROM node:22-alpine",
704
+ "WORKDIR /app",
705
+ "COPY package.json package-lock.json* pnpm-lock.yaml* yarn.lock* ./",
706
+ "RUN npm install --legacy-peer-deps 2>/dev/null || yarn install 2>/dev/null || true",
707
+ "COPY . .",
708
+ "RUN npm run build",
709
+ "ENV PORT=3000 HOSTNAME=0.0.0.0",
710
+ "EXPOSE 3000",
711
+ 'CMD ["npm", "start"]'
712
+ ].join("\n");
713
+ break;
714
+ case "nodejs":
715
+ dockerfile = [
716
+ "FROM node:22-alpine",
717
+ "WORKDIR /app",
718
+ "COPY package.json package-lock.json* pnpm-lock.yaml* yarn.lock* ./",
719
+ "RUN npm install --legacy-peer-deps || true",
720
+ "COPY . .",
721
+ "RUN npm run build 2>/dev/null || true",
722
+ "EXPOSE " + port,
723
+ 'CMD ["npm", "start"]'
724
+ ].join("\n");
725
+ break;
726
+ case "python":
727
+ dockerfile = [
728
+ "FROM python:3.13-slim",
729
+ "WORKDIR /app",
730
+ "COPY requirements.txt* pyproject.toml* ./",
731
+ "RUN pip install --no-cache-dir -r requirements.txt 2>/dev/null || pip install --no-cache-dir . 2>/dev/null || true",
732
+ "COPY . .",
733
+ "EXPOSE " + port,
734
+ 'CMD ["python", "-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "' + port + '"]'
735
+ ].join("\n");
736
+ break;
737
+ case "go":
738
+ dockerfile = [
739
+ "FROM golang:1.22-alpine AS builder",
740
+ "WORKDIR /app",
741
+ "COPY go.mod go.sum* ./",
742
+ "RUN go mod download",
743
+ "COPY . .",
744
+ "RUN CGO_ENABLED=0 go build -o server .",
745
+ "",
746
+ "FROM alpine",
747
+ "WORKDIR /app",
748
+ "COPY --from=builder /app/server .",
749
+ "EXPOSE " + port,
750
+ 'CMD ["./server"]'
751
+ ].join("\n");
752
+ break;
753
+ case "rust":
754
+ dockerfile = [
755
+ "FROM rust:1.88 AS builder",
756
+ "WORKDIR /app",
757
+ "COPY . .",
758
+ "RUN cargo build --release",
759
+ "",
760
+ "FROM debian:bookworm-slim",
761
+ "WORKDIR /app",
762
+ "COPY --from=builder /app/target/release/* /app/ 2>/dev/null || true",
763
+ "EXPOSE " + port,
764
+ 'CMD ["./app"]'
765
+ ].join("\n");
766
+ break;
767
+ case "java":
768
+ dockerfile = [
769
+ "FROM gradle:8.12-jdk21 AS builder",
770
+ "WORKDIR /app",
771
+ "COPY . .",
772
+ "RUN gradle build -x test 2>/dev/null || ./gradlew build -x test 2>/dev/null || mvn package -DskipTests 2>/dev/null || true",
773
+ "",
774
+ "FROM eclipse-temurin:21-jre-alpine",
775
+ "WORKDIR /app",
776
+ "COPY --from=builder /app/build/libs/*.jar app.jar 2>/dev/null || true",
777
+ "COPY --from=builder /app/target/*.jar app.jar 2>/dev/null || true",
778
+ "EXPOSE " + port,
779
+ 'CMD ["java", "-jar", "app.jar"]'
780
+ ].join("\n");
781
+ break;
782
+ case "static":
783
+ dockerfile = [
784
+ "FROM nginx:1.27-alpine",
785
+ "COPY . /usr/share/nginx/html/",
786
+ "EXPOSE 80"
787
+ ].join("\n");
788
+ break;
789
+ }
790
+ const internalPort = type === "nextjs" ? 3e3 : type === "static" ? 80 : port;
791
+ const compose = [
792
+ "services:",
793
+ " backend:",
794
+ " build: .",
795
+ " ports:",
796
+ ` - "${port}:${internalPort}"`,
797
+ " restart: unless-stopped",
798
+ " healthcheck:",
799
+ ` test: ["CMD", "wget", "-q", "-O", "/dev/null", "http://127.0.0.1:${internalPort}/"]`,
800
+ " interval: 10s",
801
+ " timeout: 5s",
802
+ " retries: 5"
803
+ ].join("\n");
804
+ if (!existsSync2(join(dir, "Dockerfile"))) {
805
+ writeFileSync2(join(dir, "Dockerfile"), dockerfile, "utf-8");
806
+ if (job) appendLog(job, "[scaffold] Generated Dockerfile");
807
+ }
808
+ writeFileSync2(join(dir, "docker-compose.yml"), compose, "utf-8");
809
+ if (job) appendLog(job, "[scaffold] Generated docker-compose.yml");
810
+ if (!existsSync2(join(dir, ".dockerignore"))) {
811
+ writeFileSync2(join(dir, ".dockerignore"), "node_modules\n.next\n.git\n*.md\n", "utf-8");
812
+ }
813
+ }
814
+ function ensureComposeFile(dir, appName, port, job) {
815
+ if (existsSync2(join(dir, "docker-compose.yml")) || existsSync2(join(dir, "compose.yml"))) return;
816
+ _scaffoldDockerConfig(dir, appName, port, job);
817
+ }
818
+ function _resolveBackendPort(appDir, fallbackPort) {
819
+ const envPath = join(appDir, ".env");
820
+ const val = readDotEnvValue(envPath, "BACKEND_PORT");
821
+ const parsed = val ? parseInt(val, 10) : NaN;
822
+ return isNaN(parsed) ? fallbackPort : parsed;
823
+ }
824
+ function detectBasePath(appDir) {
825
+ for (const name of ["next.config.ts", "next.config.mjs", "next.config.js"]) {
826
+ const p = join(appDir, name);
827
+ if (existsSync2(p)) {
828
+ const content = readFileSync2(p, "utf-8");
829
+ const match = content.match(/basePath\s*:\s*['"`]([^'"`]+)['"`]/);
830
+ if (match) return match[1];
831
+ }
832
+ }
833
+ return "";
834
+ }
835
+ function _buildHealthUrl(appDir, fallbackPort) {
836
+ const healthPort = _resolveBackendPort(appDir, fallbackPort);
837
+ const basePath = detectBasePath(appDir);
838
+ const isBoilerplate = existsSync2(join(appDir, ".env.example")) && readFileSync2(join(appDir, ".env.example"), "utf-8").includes("STACK_LANG");
839
+ const hasHealthRoute = existsSync2(join(appDir, "src", "app", "health")) || existsSync2(join(appDir, "backend", "src"));
840
+ const healthPath = isBoilerplate || hasHealthRoute ? "/health" : "/";
841
+ return `http://127.0.0.1:${healthPort}${basePath}${healthPath}`;
842
+ }
843
+ async function _pollHealth(url, maxMs = 12e4, job) {
844
+ const deadline = Date.now() + maxMs;
845
+ let attempt = 0;
846
+ while (Date.now() < deadline) {
847
+ attempt++;
848
+ try {
849
+ const res = await fetch(url, { signal: AbortSignal.timeout(5e3) });
850
+ if (res.ok) {
851
+ if (job) appendLog(job, `[health] \u2713 ${url} \u2192 ${res.status} (attempt ${attempt})`);
852
+ return;
853
+ }
854
+ if (job) appendLog(job, `[health] ${url} \u2192 ${res.status} (attempt ${attempt})`);
855
+ } catch {
856
+ if (job && attempt % 3 === 1) appendLog(job, `[health] waiting... ${url} (attempt ${attempt})`);
857
+ }
858
+ await new Promise((r) => setTimeout(r, 3e3));
859
+ }
860
+ if (job) appendLog(job, `[health] \u2717 timeout after ${maxMs / 1e3}s`);
861
+ throw new Error(`Health check timed out after ${maxMs / 1e3}s: ${url}`);
862
+ }
863
+ async function _dockerComposeUp(cwd, job) {
864
+ appendLog(job, `[docker] $ docker compose up -d --build`);
865
+ appendLog(job, `[docker] cwd: ${cwd}`);
866
+ const proc = execa("docker", ["compose", "up", "-d", "--build"], { cwd, reject: false });
867
+ proc.stdout?.on("data", (chunk) => {
868
+ for (const line of chunk.toString().split("\n").filter(Boolean)) {
869
+ appendLog(job, `[docker] ${line}`);
870
+ }
871
+ });
872
+ proc.stderr?.on("data", (chunk) => {
873
+ for (const line of chunk.toString().split("\n").filter(Boolean)) {
874
+ appendLog(job, `[docker] ${line}`);
875
+ }
876
+ });
877
+ const result = await proc;
878
+ if (result.exitCode !== 0) {
879
+ appendLog(job, `[docker] \u2717 exit code ${result.exitCode}`);
880
+ throw new Error(`Command failed with exit code ${result.exitCode}: docker compose up -d --build
881
+ ${result.stderr}`);
882
+ }
883
+ appendLog(job, `[docker] \u2713 containers started`);
884
+ }
885
+ async function createApp(opts) {
886
+ const job = newJob(opts.appName, ["Validating", "Gitea setup", "Gitea repo", "Git push", "Docker up", "Health check"]);
887
+ jobs.set(job.jobId, job);
888
+ setImmediate(() => void _runCreateApp(job, opts));
889
+ return job.jobId;
890
+ }
891
+ async function _runCreateApp(job, opts) {
892
+ try {
893
+ const ctx = resolveContext();
894
+ const appsJson = resolveAppsJsonPath();
895
+ setStep(job, 0, "running");
896
+ if (opts.mode === "boilerplate") {
897
+ if (!opts.stackId) throw new Error("stackId is required for boilerplate mode");
898
+ const metas = readBoilerplateMeta(ctx.projectPath);
899
+ const meta = metas.find((m) => m.stackId === opts.stackId);
900
+ if (meta) {
901
+ opts._meta = meta;
902
+ } else {
903
+ appendLog(job, `[info] Stack "${opts.stackId}" not installed locally \u2014 cloning fresh from catalog`);
904
+ opts._resolvedStackId = opts.stackId;
905
+ }
906
+ } else if (opts.mode === "git-clone") {
907
+ if (!opts.gitUrl) throw new Error("gitUrl is required for Git Clone mode");
908
+ } else if (opts.mode === "new-project") {
909
+ const { resolveStackId } = await import("./frameworks-Z7VXDGP4.js");
910
+ const stackId = resolveStackId(opts.language ?? "nodejs", opts.frameworkId ?? "express");
911
+ if (!stackId) throw new Error(`Unknown stack: ${opts.language}/${opts.frameworkId}`);
912
+ opts._resolvedStackId = stackId;
913
+ }
914
+ setStep(job, 0, "done");
915
+ setStep(job, 1, "running");
916
+ const gitea = new GiteaClient({
917
+ baseUrl: ctx.giteaBaseUrl,
918
+ username: ctx.giteaUser,
919
+ password: ctx.giteaPassword,
920
+ tokenPath: GITEA_TOKEN_PATH
921
+ });
922
+ const giteaPrep = await gitea.prepare();
923
+ setStep(job, 1, "done", giteaPrep.message);
924
+ if (opts.mode === "boilerplate" && opts._meta) {
925
+ await _createModeA(job, opts, ctx, gitea, appsJson);
926
+ } else if (opts.mode === "git-clone") {
927
+ await _createModeB(job, opts, ctx, gitea, appsJson);
928
+ } else {
929
+ await _createModeC(job, opts, ctx, gitea, appsJson);
930
+ }
931
+ job.status = "done";
932
+ } catch (err) {
933
+ job.status = "failed";
934
+ job.error = err instanceof Error ? err.message : String(err);
935
+ for (const step of job.steps) {
936
+ if (step.status === "running" || step.status === "pending") step.status = "failed";
937
+ }
938
+ }
939
+ }
940
+ async function _createModeA(job, opts, ctx, gitea, appsJson) {
941
+ const meta = opts._meta;
942
+ const port = opts.port ?? meta.port ?? parseInt(meta.backendUrl.split(":").pop() ?? "8080", 10);
943
+ setStep(job, 2, "running", `checking ${ctx.giteaUser}/${opts.appName}`);
944
+ const alreadyExists = await gitea.repoExists(opts.appName);
945
+ let cloneUrl;
946
+ if (!alreadyExists) {
947
+ setStep(job, 2, "running", `creating ${ctx.giteaUser}/${opts.appName}`);
948
+ cloneUrl = await gitea.createRepo(opts.appName, `Brewnet app: ${opts.appName}`);
949
+ } else {
950
+ cloneUrl = `${ctx.giteaBaseUrl}/${ctx.giteaUser}/${opts.appName}.git`;
951
+ }
952
+ setStep(job, 2, "done");
953
+ setStep(job, 3, "running", `pushing HEAD:main \u2192 ${ctx.giteaUser}/${opts.appName}`);
954
+ const shallowCheck = await execa("git", ["rev-parse", "--is-shallow-repository"], { cwd: meta.appDir }).catch(() => ({ stdout: "false" }));
955
+ if (shallowCheck.stdout.trim() === "true") {
956
+ await execa("git", ["fetch", "--unshallow", "origin"], { cwd: meta.appDir }).catch(async () => {
957
+ const { reinitGit } = await import("./boilerplate-manager-P6QYUU7Q.js");
958
+ await reinitGit(meta.appDir);
959
+ });
960
+ }
961
+ const authedUrl = gitea.authedCloneUrl(cloneUrl);
962
+ await execa("git", ["remote", "add", "brewnet", authedUrl], { cwd: meta.appDir }).catch(() => {
963
+ return execa("git", ["remote", "set-url", "brewnet", authedUrl], { cwd: meta.appDir });
964
+ });
965
+ await execa("git", ["push", "brewnet", "HEAD:main", "--force"], { cwd: meta.appDir });
966
+ setStep(job, 3, "done");
967
+ setStep(job, 4, "running", "docker compose up --build");
968
+ ensureComposeFile(meta.appDir, opts.appName, port, job);
969
+ await _injectQuickTunnelIfNeeded(meta.appDir, opts.appName, port);
970
+ await _dockerComposeUp(meta.appDir, job);
971
+ setStep(job, 4, "done", "containers started");
972
+ setStep(job, 5, "running");
973
+ const healthUrlA = _buildHealthUrl(meta.appDir, port);
974
+ setStep(job, 5, "running", `polling ${healthUrlA}`);
975
+ await _pollHealth(healthUrlA, 12e4, job);
976
+ setStep(job, 5, "done");
977
+ addApp(appsJson, {
978
+ name: opts.appName,
979
+ mode: "boilerplate",
980
+ stackId: opts.stackId,
981
+ appDir: meta.appDir,
982
+ lang: meta.lang,
983
+ framework: meta.frameworkId,
984
+ port,
985
+ giteaRepoUrl: `${ctx.giteaBaseUrl}/${ctx.giteaUser}/${opts.appName}`,
986
+ status: "running",
987
+ createdAt: (/* @__PURE__ */ new Date()).toISOString()
988
+ });
989
+ await setupWebhook(opts.appName, "http://localhost:8088/api/deploy/hook").catch((e) => {
990
+ console.warn("[webhook] registration failed (non-critical):", e instanceof Error ? e.message : String(e));
991
+ });
992
+ }
993
+ async function _createModeB(job, opts, ctx, gitea, appsJson) {
994
+ const port = opts.port ?? 8080;
995
+ const appDir = join(ctx.projectPath, "apps", opts.appName);
996
+ setStep(job, 2, "running", "Cloning external repository...");
997
+ const { reinitGit: reinitGitB } = await import("./boilerplate-manager-P6QYUU7Q.js");
998
+ const { rmSync } = await import("fs");
999
+ if (existsSync2(appDir)) {
1000
+ rmSync(appDir, { recursive: true, force: true });
1001
+ }
1002
+ const cloneArgs = ["clone", "--depth", "1"];
1003
+ if (opts.branch) cloneArgs.push("-b", opts.branch);
1004
+ cloneArgs.push(opts.gitUrl, appDir);
1005
+ await execa("git", cloneArgs);
1006
+ const envExPath = join(appDir, ".env.example");
1007
+ const envPath = join(appDir, ".env");
1008
+ if (existsSync2(envExPath)) {
1009
+ const { findFreePort: findFreePortB } = await import("./boilerplate-manager-P6QYUU7Q.js");
1010
+ let envContent = readFileSync2(envExPath, "utf-8");
1011
+ envContent = envContent.replace(/^BACKEND_PORT=.*/m, `BACKEND_PORT=${port}`);
1012
+ const fePort = await findFreePortB(port + 1);
1013
+ envContent = envContent.replace(/^FRONTEND_PORT=.*/m, `FRONTEND_PORT=${fePort}`);
1014
+ writeFileSync2(envPath, envContent, "utf-8");
1015
+ } else if (existsSync2(join(appDir, ".env"))) {
1016
+ let envContent = readFileSync2(join(appDir, ".env"), "utf-8");
1017
+ envContent = envContent.replace(/^BACKEND_PORT=.*/m, `BACKEND_PORT=${port}`);
1018
+ writeFileSync2(join(appDir, ".env"), envContent, "utf-8");
1019
+ }
1020
+ await reinitGitB(appDir);
1021
+ const alreadyExists = await gitea.repoExists(opts.appName);
1022
+ const cloneUrl = alreadyExists ? `${ctx.giteaBaseUrl}/${ctx.giteaUser}/${opts.appName}.git` : await gitea.createRepo(opts.appName, `Brewnet app: ${opts.appName}`);
1023
+ setStep(job, 2, "done");
1024
+ setStep(job, 3, "running");
1025
+ const authedUrl = gitea.authedCloneUrl(cloneUrl);
1026
+ await execa("git", ["remote", "add", "brewnet", authedUrl], { cwd: appDir });
1027
+ await execa("git", ["push", "brewnet", "HEAD:main", "--force"], { cwd: appDir });
1028
+ setStep(job, 3, "done");
1029
+ const hasCompose = existsSync2(join(appDir, "docker-compose.yml")) || existsSync2(join(appDir, "compose.yml"));
1030
+ if (hasCompose) {
1031
+ setStep(job, 4, "running", "docker compose up --build");
1032
+ await _injectQuickTunnelIfNeeded(appDir, opts.appName, port);
1033
+ await _dockerComposeUp(appDir, job);
1034
+ setStep(job, 4, "done", "containers started");
1035
+ setStep(job, 5, "running");
1036
+ const healthUrlB = _buildHealthUrl(appDir, port);
1037
+ setStep(job, 5, "running", `polling ${healthUrlB}`);
1038
+ await _pollHealth(healthUrlB, 12e4, job);
1039
+ setStep(job, 5, "done");
1040
+ } else {
1041
+ setStep(job, 4, "done", "skipped \u2014 no docker-compose.yml");
1042
+ setStep(job, 5, "done", "skipped \u2014 deploy separately");
1043
+ appendLog(job, "[clone] Gitea push completed \u2014 no docker-compose.yml, skipping Docker up");
1044
+ }
1045
+ addApp(appsJson, {
1046
+ name: opts.appName,
1047
+ mode: "git-clone",
1048
+ sourceUrl: opts.gitUrl,
1049
+ appDir,
1050
+ port,
1051
+ giteaRepoUrl: `${ctx.giteaBaseUrl}/${ctx.giteaUser}/${opts.appName}`,
1052
+ status: hasCompose ? "running" : "stopped",
1053
+ createdAt: (/* @__PURE__ */ new Date()).toISOString()
1054
+ });
1055
+ }
1056
+ async function _createModeC(job, opts, ctx, gitea, appsJson) {
1057
+ const { cloneStack, generateEnv, reinitGit, findFreePort } = await import("./boilerplate-manager-P6QYUU7Q.js");
1058
+ const { getStackById: getStackById2 } = await import("./stacks-M4FBTVO5.js");
1059
+ const stackId = opts._resolvedStackId;
1060
+ const requestedPort = opts.port ?? 8080;
1061
+ const appDir = join(ctx.projectPath, "apps", opts.appName);
1062
+ await cloneStack(stackId, appDir);
1063
+ const port = await findFreePort(requestedPort);
1064
+ const stackInfo = getStackById2(stackId);
1065
+ const frontendPort = stackInfo && !stackInfo.isUnified ? await findFreePort(port + 1) : void 0;
1066
+ generateEnv(appDir, stackId, "sqlite3", { hostPort: port, frontendPort });
1067
+ await reinitGit(appDir);
1068
+ setStep(job, 2, "running");
1069
+ const cloneUrl = await gitea.createRepo(opts.appName, `Brewnet app: ${opts.appName}`);
1070
+ setStep(job, 2, "done");
1071
+ setStep(job, 3, "running");
1072
+ const authedUrl = gitea.authedCloneUrl(cloneUrl);
1073
+ await execa("git", ["remote", "add", "brewnet", authedUrl], { cwd: appDir });
1074
+ await execa("git", ["push", "brewnet", "HEAD:main", "--force"], { cwd: appDir });
1075
+ setStep(job, 3, "done");
1076
+ setStep(job, 4, "running", "docker compose up --build");
1077
+ ensureComposeFile(appDir, opts.appName, port, job);
1078
+ await _injectQuickTunnelIfNeeded(appDir, opts.appName, port);
1079
+ await _dockerComposeUp(appDir, job);
1080
+ setStep(job, 4, "done", "containers started");
1081
+ setStep(job, 5, "running");
1082
+ const healthUrlC = _buildHealthUrl(appDir, port);
1083
+ setStep(job, 5, "running", `polling ${healthUrlC}`);
1084
+ await _pollHealth(healthUrlC, 12e4, job);
1085
+ setStep(job, 5, "done");
1086
+ addApp(appsJson, {
1087
+ name: opts.appName,
1088
+ mode: opts.mode === "boilerplate" ? "boilerplate" : "new-project",
1089
+ stackId,
1090
+ appDir,
1091
+ lang: opts.language ?? stackInfo?.language,
1092
+ framework: opts.frameworkId ?? stackInfo?.framework,
1093
+ port,
1094
+ giteaRepoUrl: `${ctx.giteaBaseUrl}/${ctx.giteaUser}/${opts.appName}`,
1095
+ status: "running",
1096
+ createdAt: (/* @__PURE__ */ new Date()).toISOString()
1097
+ });
1098
+ }
1099
+ async function startApp(appName) {
1100
+ const appsJson = resolveAppsJsonPath();
1101
+ const apps = readApps(appsJson);
1102
+ const app = apps.find((a) => a.name === appName);
1103
+ if (!app) throw new Error(`App "${appName}" not found`);
1104
+ await execa("docker", ["compose", "up", "-d"], { cwd: app.appDir });
1105
+ updateApp(appsJson, appName, { status: "running" });
1106
+ }
1107
+ async function stopApp(appName) {
1108
+ const appsJson = resolveAppsJsonPath();
1109
+ const apps = readApps(appsJson);
1110
+ const app = apps.find((a) => a.name === appName);
1111
+ if (!app) throw new Error(`App "${appName}" not found`);
1112
+ await execa("docker", ["compose", "down"], { cwd: app.appDir });
1113
+ updateApp(appsJson, appName, { status: "stopped" });
1114
+ }
1115
+ async function removeApp2(appName) {
1116
+ const appsJson = resolveAppsJsonPath();
1117
+ const apps = readApps(appsJson);
1118
+ const app = apps.find((a) => a.name === appName);
1119
+ if (!app) throw new Error(`App "${appName}" not found`);
1120
+ await execa("docker", ["compose", "down", "--volumes"], { cwd: app.appDir }).catch((e) => {
1121
+ console.warn("[removeApp] docker compose down failed:", e instanceof Error ? e.message : String(e));
1122
+ });
1123
+ removeApp(appsJson, appName);
1124
+ }
1125
+
1126
+ // src/services/admin-server.ts
1127
+ var PKG_ROOT = join2(fileURLToPath(import.meta.url), "../../../..");
1128
+ var ADMIN_UI_DIST = join2(PKG_ROOT, "packages/admin-ui/dist");
1129
+ var MIME_TYPES = {
1130
+ ".html": "text/html; charset=utf-8",
1131
+ ".js": "application/javascript; charset=utf-8",
1132
+ ".mjs": "application/javascript; charset=utf-8",
1133
+ ".css": "text/css; charset=utf-8",
1134
+ ".json": "application/json; charset=utf-8",
1135
+ ".svg": "image/svg+xml",
1136
+ ".png": "image/png",
1137
+ ".jpg": "image/jpeg",
1138
+ ".jpeg": "image/jpeg",
1139
+ ".ico": "image/x-icon",
1140
+ ".woff": "font/woff",
1141
+ ".woff2": "font/woff2",
1142
+ ".ttf": "font/ttf",
1143
+ ".webmanifest": "application/manifest+json"
1144
+ };
1145
+ function serveStaticFile(filePath, res, statusCode = 200) {
1146
+ const stat = statSync(filePath);
1147
+ const mime = MIME_TYPES[extname(filePath).toLowerCase()] ?? "application/octet-stream";
1148
+ const isHashed = new RegExp("assets/[^/]+-[A-Za-z0-9]{8,}\\.[^.]+$").test(filePath);
1149
+ const cacheControl = isHashed ? "public, max-age=31536000, immutable" : "no-cache, no-store, must-revalidate";
1150
+ res.writeHead(statusCode, {
1151
+ "Content-Type": mime,
1152
+ "Content-Length": stat.size,
1153
+ "Cache-Control": cacheControl
1154
+ });
1155
+ createReadStream(filePath).pipe(res);
1156
+ }
1157
+ var ICON_SVG = (() => {
1158
+ const candidates = [
1159
+ join2(PKG_ROOT, "public/images/icon.svg"),
1160
+ join2(PKG_ROOT, "../public/images/icon.svg")
1161
+ ];
1162
+ for (const p of candidates) {
1163
+ if (existsSync3(p)) return readFileSync3(p, "utf-8");
1164
+ }
1165
+ return `<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="4 6 38 38" fill="none" stroke="#f5a623" stroke-linecap="round" stroke-linejoin="round"><path d="M8 26H32V34C32 36.8 29.8 39 27 39H13C10.2 39 8 36.8 8 34V26Z" stroke-width="3.5" fill="none"/><path d="M32 28.5C35.5 28.5 37 30.5 37 32.5C37 34.5 35.5 36.5 32 36.5" stroke-width="3.5" fill="none"/><circle cx="20" cy="30" r="2.2" fill="#f5a623" stroke="none"/><path d="M16.5 20a5 5 0 0 1 7 0" stroke-width="3.5" fill="none"/><path d="M13.5 15.5a10 10 0 0 1 13 0" stroke-width="3.5" fill="none"/><path d="M10.5 11a15 15 0 0 1 19 0" stroke-width="3.5" fill="none"/></svg>`;
1166
+ })();
1167
+ var FAVICON_ICO = (() => {
1168
+ const candidates = [
1169
+ join2(PKG_ROOT, "public/images/favicon.ico"),
1170
+ join2(PKG_ROOT, "../public/images/favicon.ico")
1171
+ ];
1172
+ for (const p of candidates) {
1173
+ if (existsSync3(p)) return readFileSync3(p);
1174
+ }
1175
+ return null;
1176
+ })();
1177
+ var SERVICE_DETAIL_MAP = {
1178
+ Traefik: {
1179
+ description: "Go-based open-source reverse proxy and load balancer",
1180
+ license: "MIT",
1181
+ docs: "https://traefik.io/traefik/",
1182
+ features: [
1183
+ "Docker label-based automatic service discovery",
1184
+ "Let's Encrypt certificate auto-renewal",
1185
+ "Built-in web dashboard for route monitoring",
1186
+ "Middleware chain: BasicAuth, Rate Limit, IP Whitelist",
1187
+ "HTTP to HTTPS automatic redirect"
1188
+ ],
1189
+ credentials: {
1190
+ method: "basicauth",
1191
+ summary: "No login in dev mode (--api.insecure=true). Use BasicAuth middleware for production.",
1192
+ command: "htpasswd -nb admin YOUR_PASSWORD"
1193
+ },
1194
+ tips: [
1195
+ "Remove --api.insecure=true in production and add BasicAuth or Authelia",
1196
+ "Set exposedbydefault=false and explicitly enable each service with traefik.enable=true",
1197
+ "Add --certificatesresolvers.le.acme.email=YOUR_EMAIL for Let's Encrypt"
1198
+ ]
1199
+ },
1200
+ "Traefik Dashboard": {
1201
+ description: "Built-in Traefik web UI for monitoring routes, services, and middleware",
1202
+ license: "MIT",
1203
+ docs: "https://doc.traefik.io/traefik/operations/dashboard/",
1204
+ features: [
1205
+ "Real-time view of HTTP/TCP routers",
1206
+ "Service health and load balancer status",
1207
+ "Middleware chain visualization"
1208
+ ],
1209
+ credentials: {
1210
+ method: "none",
1211
+ summary: "No authentication in dev mode (--api.insecure=true). Protected by BasicAuth in production."
1212
+ },
1213
+ tips: [
1214
+ "Dashboard URL requires trailing slash: /dashboard/",
1215
+ "Secure with BasicAuth middleware before exposing externally"
1216
+ ]
1217
+ },
1218
+ Gitea: {
1219
+ description: "Lightweight self-hosted Git service written in Go",
1220
+ license: "MIT",
1221
+ docs: "https://about.gitea.com/",
1222
+ features: [
1223
+ "GitHub-like web UI with issues, PRs, wiki, project boards",
1224
+ "Gitea Actions \u2014 GitHub Actions compatible CI/CD",
1225
+ "Low memory footprint (~200 MB)",
1226
+ "LDAP, OAuth2, SMTP authentication support",
1227
+ "PostgreSQL, MySQL, SQLite backend support"
1228
+ ],
1229
+ credentials: {
1230
+ method: "wizard",
1231
+ summary: 'First visit shows Installation Wizard. Create admin account in "Administrator Account Settings" section.',
1232
+ command: "docker exec -it brewnet-gitea gitea admin user create --username admin --password PASSWORD --email admin@brewnet.dev --admin"
1233
+ },
1234
+ tips: [
1235
+ "Set DISABLE_REGISTRATION=true to allow only admin-created accounts",
1236
+ "Set REQUIRE_SIGNIN_VIEW=true to prevent anonymous repo browsing",
1237
+ "SSH port mapped to 3022 to avoid conflict with host SSH (22)"
1238
+ ]
1239
+ },
1240
+ Nextcloud: {
1241
+ description: "Self-hosted cloud storage platform (Google Drive/Dropbox alternative)",
1242
+ license: "AGPL-3.0",
1243
+ docs: "https://nextcloud.com/",
1244
+ features: [
1245
+ "File sync, sharing, and collaboration",
1246
+ "200+ app extensions: calendar, contacts, notes, office docs",
1247
+ "WebDAV protocol support",
1248
+ "Desktop and mobile clients available"
1249
+ ],
1250
+ credentials: {
1251
+ method: "env",
1252
+ summary: "Uses admin credentials set in Pre-Step of brewnet init wizard.",
1253
+ command: 'docker exec -u www-data brewnet-nextcloud php occ user:add USERNAME --display-name="Display Name"'
1254
+ },
1255
+ tips: [
1256
+ "Redis connection recommended for file locking and cache performance",
1257
+ "Add all access domains/IPs to NEXTCLOUD_TRUSTED_DOMAINS",
1258
+ "Switch background jobs to cron: docker exec -u www-data brewnet-nextcloud php occ background:cron"
1259
+ ]
1260
+ },
1261
+ PostgreSQL: {
1262
+ description: "Advanced open-source relational database",
1263
+ license: "PostgreSQL (BSD-like)",
1264
+ docs: "https://www.postgresql.org/",
1265
+ features: [
1266
+ "Full ACID compliance with MVCC",
1267
+ "Native JSON/JSONB support",
1268
+ "Full-text search, PostGIS, time-series extensions",
1269
+ "Logical and physical replication"
1270
+ ],
1271
+ credentials: {
1272
+ method: "env",
1273
+ summary: "Configured via POSTGRES_USER, POSTGRES_PASSWORD, POSTGRES_DB environment variables.",
1274
+ command: "docker exec -it brewnet-postgresql psql -U brewnet -d brewnet_db"
1275
+ },
1276
+ tips: [
1277
+ "Internal network only (brewnet-internal) \u2014 no host port exposed",
1278
+ "Data persisted in named volume \u2014 safe across container restarts",
1279
+ "Use init SQL scripts in docker-entrypoint-initdb.d/ for multi-DB setup"
1280
+ ]
1281
+ },
1282
+ MySQL: {
1283
+ description: "Popular open-source relational database",
1284
+ license: "GPL-2.0",
1285
+ docs: "https://www.mysql.com/",
1286
+ features: [
1287
+ "InnoDB storage engine with ACID transactions",
1288
+ "JSON support and document store",
1289
+ "Replication and clustering",
1290
+ "Widely supported by web applications"
1291
+ ],
1292
+ credentials: {
1293
+ method: "env",
1294
+ summary: "Configured via MYSQL_ROOT_PASSWORD, MYSQL_DATABASE, MYSQL_USER, MYSQL_PASSWORD environment variables.",
1295
+ command: "docker exec -it brewnet-mysql mysql -u brewnet -p brewnet_db"
1296
+ },
1297
+ tips: [
1298
+ "Internal network only (brewnet-internal) \u2014 no host port exposed",
1299
+ "Root password required at first startup",
1300
+ "Init SQL scripts run once from docker-entrypoint-initdb.d/"
1301
+ ]
1302
+ },
1303
+ Redis: {
1304
+ description: "In-memory key-value store for caching and message brokering",
1305
+ license: "BSD-3",
1306
+ docs: "https://redis.io/",
1307
+ features: [
1308
+ "Session storage, cache, message queue, Pub/Sub",
1309
+ "Single-threaded event loop \u2014 100K+ ops/sec",
1310
+ "RDB + AOF persistence support",
1311
+ "Used by Nextcloud file locking and Gitea caching"
1312
+ ],
1313
+ credentials: {
1314
+ method: "env",
1315
+ summary: "No traditional user accounts. Optionally secured with --requirepass flag.",
1316
+ command: "docker exec -it brewnet-redis redis-cli ping"
1317
+ },
1318
+ tips: [
1319
+ "Set --maxmemory and --maxmemory-policy to prevent unbounded memory growth",
1320
+ "Internal network only \u2014 no host port exposed",
1321
+ "Redis 6+ supports ACL for multi-user access control"
1322
+ ]
1323
+ },
1324
+ pgAdmin: {
1325
+ description: "Web-based administration tool for PostgreSQL",
1326
+ license: "PostgreSQL (BSD-like)",
1327
+ docs: "https://www.pgadmin.org/",
1328
+ features: [
1329
+ "SQL editor with query execution and plan visualization",
1330
+ "Table, index, view, and function GUI management",
1331
+ "Backup and restore (pg_dump, pg_restore)",
1332
+ "Multi-server management via server groups"
1333
+ ],
1334
+ credentials: {
1335
+ method: "env",
1336
+ summary: "Uses admin credentials set in Pre-Step. Email format: {username}@brewnet.dev. Register the DB server after first login."
1337
+ },
1338
+ tips: [
1339
+ 'Connect to PostgreSQL using hostname "postgresql" (Docker container name), port 5432',
1340
+ "Set PGADMIN_CONFIG_SERVER_MODE=False to skip login in dev mode",
1341
+ "Mount servers.json to auto-register DB servers on startup"
1342
+ ]
1343
+ },
1344
+ Jellyfin: {
1345
+ description: "Open-source media server (Plex/Emby free alternative)",
1346
+ license: "GPL-2.0",
1347
+ docs: "https://jellyfin.org/",
1348
+ features: [
1349
+ "Movies, TV, music, photos, and live TV/DVR",
1350
+ "Hardware transcoding (Intel QSV, NVIDIA NVENC, VAAPI)",
1351
+ "Clients for web, Android, iOS, Roku, Fire TV, Kodi",
1352
+ "DLNA support"
1353
+ ],
1354
+ credentials: {
1355
+ method: "wizard",
1356
+ summary: "First visit shows Setup Wizard. Create admin account in step 2 (User)."
1357
+ },
1358
+ tips: [
1359
+ "Mount media folders as read-only (:ro) for safety",
1360
+ "Add --device=/dev/dri:/dev/dri for Intel GPU hardware transcoding",
1361
+ "DLNA requires --net=host (does not work in Docker bridge mode)"
1362
+ ]
1363
+ },
1364
+ "SSH Server": {
1365
+ description: "Industry-standard remote access via OpenSSH in Docker",
1366
+ license: "BSD",
1367
+ docs: "https://www.openssh.com/",
1368
+ features: [
1369
+ "Key-based authentication (more secure than passwords)",
1370
+ "Built-in SFTP \u2014 no separate FTP server needed",
1371
+ "Port forwarding and tunneling support",
1372
+ "Remote management entry point for Brewnet containers"
1373
+ ],
1374
+ credentials: {
1375
+ method: "env",
1376
+ summary: "Uses admin username set in Pre-Step. Password auth enabled (PASSWORD_ACCESS=true); switch to key-only after setup.",
1377
+ command: "ssh -p 2222 USER@localhost"
1378
+ },
1379
+ tips: [
1380
+ "Switch to key-only auth after initial setup: set PASSWORD_ACCESS=false",
1381
+ "Port 2222 avoids conflict with host SSH (port 22)",
1382
+ "SFTP runs as SSH subsystem \u2014 no separate container needed"
1383
+ ]
1384
+ },
1385
+ FileBrowser: {
1386
+ description: "Lightweight web-based file manager written in Go",
1387
+ license: "Apache-2.0",
1388
+ docs: "https://filebrowser.org/",
1389
+ features: [
1390
+ "Upload, download, edit, and delete files via browser",
1391
+ "Multi-user support with per-user directory scoping",
1392
+ "Built-in code editor for text files",
1393
+ "Share link generation and shell command execution"
1394
+ ],
1395
+ credentials: {
1396
+ method: "none",
1397
+ summary: "Default user: admin. Random password printed to container logs on first start.",
1398
+ command: 'docker logs brewnet-filebrowser | grep "password"'
1399
+ },
1400
+ tips: [
1401
+ "Initial password is shown only once in logs \u2014 change it immediately",
1402
+ "Set per-user Scope to restrict directory access",
1403
+ "All settings and user data stored in filebrowser.db file"
1404
+ ]
1405
+ },
1406
+ "MinIO Console": {
1407
+ description: "S3-compatible object storage with a web console",
1408
+ license: "AGPL-3.0",
1409
+ docs: "https://min.io/",
1410
+ features: [
1411
+ "Amazon S3-compatible API",
1412
+ "Web console for bucket and object management",
1413
+ "Erasure coding and bitrot protection",
1414
+ "Multi-user IAM with policies"
1415
+ ],
1416
+ credentials: {
1417
+ method: "env",
1418
+ summary: "Uses admin credentials set in Pre-Step of brewnet init wizard."
1419
+ },
1420
+ tips: [
1421
+ "Console on port 9001, API on port 9000",
1422
+ "Create IAM users with limited policies for application access",
1423
+ "Use mc (MinIO Client) CLI for scripted bucket management"
1424
+ ]
1425
+ },
1426
+ Cloudflared: {
1427
+ description: "Cloudflare Tunnel daemon \u2014 exposes local services to the internet securely",
1428
+ license: "Apache-2.0",
1429
+ docs: "https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/",
1430
+ features: [
1431
+ "No port forwarding or public IP required",
1432
+ "Automatic SSL/TLS via Cloudflare",
1433
+ "Quick Tunnel (*.trycloudflare.com) or Named Tunnel with custom domain",
1434
+ "DDoS protection included"
1435
+ ],
1436
+ credentials: {
1437
+ method: "none",
1438
+ summary: "No login. Quick Tunnel needs no account. Named Tunnel uses TUNNEL_TOKEN from Cloudflare API."
1439
+ },
1440
+ tips: [
1441
+ "Quick Tunnel URL changes on every restart \u2014 use Named Tunnel for permanent access",
1442
+ "Check tunnel status: brewnet domain tunnel status",
1443
+ "Audit logs saved to ~/.brewnet/logs/tunnel.log"
1444
+ ]
1445
+ },
1446
+ Nginx: {
1447
+ description: "High-performance HTTP and reverse proxy server",
1448
+ license: "BSD-2",
1449
+ docs: "https://nginx.org/",
1450
+ features: [
1451
+ "Event-driven architecture \u2014 handles 10K+ concurrent connections",
1452
+ "Static file serving and reverse proxy",
1453
+ "Load balancing with multiple algorithms",
1454
+ "SSL/TLS termination"
1455
+ ],
1456
+ credentials: {
1457
+ method: "none",
1458
+ summary: "No built-in authentication. Use auth_basic module or upstream auth for protection."
1459
+ },
1460
+ tips: [
1461
+ "Default config serves welcome page on port 80",
1462
+ "Use location blocks for path-based routing to upstream services",
1463
+ "Reload config without downtime: nginx -s reload"
1464
+ ]
1465
+ },
1466
+ Caddy: {
1467
+ description: "Modern web server with automatic HTTPS",
1468
+ license: "Apache-2.0",
1469
+ docs: "https://caddyserver.com/",
1470
+ features: [
1471
+ "Automatic HTTPS with Let's Encrypt (zero config)",
1472
+ "HTTP/2 and HTTP/3 support out of the box",
1473
+ "Simple Caddyfile configuration",
1474
+ "Reverse proxy with health checks"
1475
+ ],
1476
+ credentials: {
1477
+ method: "none",
1478
+ summary: "No built-in authentication. Use basicauth directive in Caddyfile for protection."
1479
+ },
1480
+ tips: [
1481
+ "Caddyfile syntax is simpler than Nginx \u2014 great for small setups",
1482
+ "Automatic certificate management requires ports 80 and 443",
1483
+ "Use caddy reload for config changes without downtime"
1484
+ ]
1485
+ },
1486
+ Valkey: {
1487
+ description: "Open-source, high-performance Redis-compatible in-memory data store (Linux Foundation fork)",
1488
+ license: "BSD-3",
1489
+ docs: "https://valkey.io/",
1490
+ features: [
1491
+ "Drop-in Redis replacement \u2014 fully API compatible",
1492
+ "Session storage, cache, message queue, Pub/Sub",
1493
+ "RDB + AOF persistence support",
1494
+ "Active community-driven development post Redis license change"
1495
+ ],
1496
+ credentials: {
1497
+ method: "env",
1498
+ summary: "No traditional user accounts. Optionally secured with --requirepass flag.",
1499
+ command: "docker exec -it brewnet-valkey valkey-cli ping"
1500
+ },
1501
+ tips: [
1502
+ "Set --maxmemory and --maxmemory-policy to prevent unbounded memory growth",
1503
+ "Internal network only \u2014 no host port exposed",
1504
+ "Use OBJECT ENCODING to inspect memory layout of individual keys"
1505
+ ]
1506
+ },
1507
+ KeyDB: {
1508
+ description: "Multithreaded Redis-compatible in-memory database with higher throughput",
1509
+ license: "BSD-3",
1510
+ docs: "https://docs.keydb.dev/",
1511
+ features: [
1512
+ "Multi-threaded architecture \u2014 higher throughput than Redis on multi-core CPUs",
1513
+ "Active-Active replication for multi-master setups",
1514
+ "FLASH storage support for large datasets exceeding RAM",
1515
+ "Drop-in Redis replacement \u2014 fully API compatible"
1516
+ ],
1517
+ credentials: {
1518
+ method: "env",
1519
+ summary: "No traditional user accounts. Optionally secured with requirepass config.",
1520
+ command: "docker exec -it brewnet-keydb keydb-cli ping"
1521
+ },
1522
+ tips: [
1523
+ "Set server-threads to number of CPU cores for best performance",
1524
+ "Internal network only \u2014 no host port exposed",
1525
+ "Use keydb-cli --stat to monitor live throughput"
1526
+ ]
1527
+ }
1528
+ };
1529
+ var NAME_ALIASES = {
1530
+ "OpenSSH Server": "SSH Server",
1531
+ "Cloudflare Tunnel": "Cloudflared",
1532
+ "MinIO": "MinIO Console"
1533
+ };
1534
+ var docker = new Dockerode();
1535
+ var REQUIRED_SERVICES = /* @__PURE__ */ new Set(["traefik", "nginx", "caddy", "gitea"]);
1536
+ var INTERNAL_SERVICES = /* @__PURE__ */ new Set(["brewnet-welcome", "brewnet-landing", "cloudflared"]);
1537
+ var NO_HTTP_SERVICES = /* @__PURE__ */ new Set([
1538
+ "postgresql",
1539
+ "mysql",
1540
+ "mariadb",
1541
+ "openssh-server"
1542
+ ]);
1543
+ var TRAEFIK_PATH_SERVICES = {
1544
+ traefik: "http://localhost/dashboard/",
1545
+ gitea: "http://localhost/git",
1546
+ nextcloud: "http://localhost/cloud",
1547
+ pgadmin: "http://localhost:5050/pgadmin"
1548
+ };
1549
+ var KNOWN_SSH_PORTS = /* @__PURE__ */ new Set([22, 2222, 3022]);
1550
+ function getPrimaryPort(container) {
1551
+ const tcp = (container.Ports ?? []).filter((p) => p.Type === "tcp" && p.PublicPort && !KNOWN_SSH_PORTS.has(p.PublicPort)).sort((a, b) => a.PublicPort - b.PublicPort);
1552
+ return tcp[0]?.PublicPort ?? null;
1553
+ }
1554
+ function json(res, status, data) {
1555
+ const payload = JSON.stringify(data);
1556
+ res.writeHead(status, { "Content-Type": "application/json", "Content-Length": Buffer.byteLength(payload) });
1557
+ res.end(payload);
1558
+ }
1559
+ async function readBody(req) {
1560
+ return new Promise((resolve2) => {
1561
+ let body = "";
1562
+ req.on("data", (c) => body += c);
1563
+ req.on("end", () => resolve2(body));
1564
+ });
1565
+ }
1566
+ async function handleGetServices(_req, res, _parts, _body, _projectPath, urlMap = TRAEFIK_PATH_SERVICES, quickTunnelUrl = "", allowedDirs) {
1567
+ try {
1568
+ const allContainers = await docker.listContainers({ all: true });
1569
+ const services = [];
1570
+ for (const c of allContainers) {
1571
+ const composeService = c.Labels?.["com.docker.compose.service"];
1572
+ if (!composeService) continue;
1573
+ if (INTERNAL_SERVICES.has(composeService)) continue;
1574
+ const workingDir = c.Labels?.["com.docker.compose.project.working_dir"] ?? "";
1575
+ if (allowedDirs && allowedDirs.size > 0) {
1576
+ if (workingDir && workingDir.startsWith(_projectPath) && !allowedDirs.has(workingDir)) {
1577
+ continue;
1578
+ }
1579
+ }
1580
+ const def = getServiceDefinition(composeService);
1581
+ const s = c.State;
1582
+ const status = s === "running" ? "running" : s === "exited" ? "stopped" : "error";
1583
+ const port = getPrimaryPort(c) ?? def?.ports?.[0] ?? null;
1584
+ const labels = c.Labels ?? {};
1585
+ const primaryRouterKey = `traefik.http.routers.quicktunnel-${composeService}.rule`;
1586
+ const routerRule = labels[primaryRouterKey] && String(labels[primaryRouterKey]).includes("PathPrefix") ? [primaryRouterKey, labels[primaryRouterKey]] : Object.entries(labels).find(
1587
+ ([k, v]) => k.includes("traefik.http.routers.") && k.endsWith(".rule") && String(v).includes("PathPrefix")
1588
+ );
1589
+ let traefikPath = "";
1590
+ if (routerRule) {
1591
+ const pathMatch = String(routerRule[1]).match(/PathPrefix\(`([^`]+)`\)/);
1592
+ if (pathMatch) traefikPath = pathMatch[1];
1593
+ }
1594
+ const hasStripPrefix = Object.keys(labels).some(
1595
+ (k) => k.includes(".stripprefix.")
1596
+ );
1597
+ const localBasePath = traefikPath && !hasStripPrefix ? traefikPath : "";
1598
+ let externalUrl = null;
1599
+ const qtUrl = quickTunnelUrl;
1600
+ if (qtUrl && traefikPath) {
1601
+ let extPath = traefikPath;
1602
+ const stackLabel = labels["com.brewnet.stack"] ?? "";
1603
+ if (stackLabel === "nodejs-nextjs" || composeService === "backend" && extPath.includes("nextjs-app")) {
1604
+ extPath += "/api/hello";
1605
+ }
1606
+ externalUrl = qtUrl.replace(/\/$/, "") + extPath;
1607
+ }
1608
+ if (!externalUrl && qtUrl) {
1609
+ const EXT_PATH_MAP = {
1610
+ traefik: "",
1611
+ gitea: "/git",
1612
+ nextcloud: "/cloud",
1613
+ pgadmin: "/pgadmin",
1614
+ jellyfin: "/jellyfin",
1615
+ filebrowser: "/files",
1616
+ minio: "/minio"
1617
+ };
1618
+ if (EXT_PATH_MAP[composeService] !== void 0) {
1619
+ externalUrl = qtUrl.replace(/\/$/, "") + EXT_PATH_MAP[composeService];
1620
+ }
1621
+ }
1622
+ const stackId = workingDir && workingDir.startsWith(_projectPath) ? workingDir.slice(_projectPath.length).replace(/^[/\\]/, "") || void 0 : void 0;
1623
+ const GENERIC_BOILERPLATE_SERVICES = /* @__PURE__ */ new Set(["frontend", "backend"]);
1624
+ const composeProject = c.Labels?.["com.docker.compose.project"] ?? "";
1625
+ const isGeneric = GENERIC_BOILERPLATE_SERVICES.has(composeService) && !!composeProject;
1626
+ const serviceId = isGeneric ? `${composeProject}-${composeService}` : composeService;
1627
+ const serviceName = isGeneric ? `${composeProject}-${composeService === "frontend" ? "front" : "back"}` : def?.name ?? composeService;
1628
+ services.push({
1629
+ id: serviceId,
1630
+ name: serviceName,
1631
+ type: def ? inferType(composeService) : "unknown",
1632
+ status,
1633
+ cpu: "\u2014",
1634
+ memory: "\u2014",
1635
+ uptime: c.Status?.startsWith("Up") ? c.Status.replace(/^Up /, "") : "\u2014",
1636
+ port: port ?? null,
1637
+ // Show a local URL for any service with a public HTTP port.
1638
+ // Database/queue services (non-HTTP) are excluded via NO_HTTP_SERVICES.
1639
+ // urlMap overrides apply first (e.g. Traefik-path services like gitea → /git).
1640
+ // localBasePath: Next.js basePath stacks serve at /apps/{name} even locally.
1641
+ url: port && !NO_HTTP_SERVICES.has(composeService) ? urlMap[composeService] ?? `http://localhost:${port}${localBasePath}` : null,
1642
+ externalUrl,
1643
+ removable: !REQUIRED_SERVICES.has(composeService),
1644
+ stackId
1645
+ });
1646
+ }
1647
+ const running = services.filter((s) => s.status === "running").length;
1648
+ json(res, 200, {
1649
+ services,
1650
+ summary: { total: services.length, running, stopped: services.length - running }
1651
+ });
1652
+ } catch (err) {
1653
+ json(res, 500, { success: false, error: String(err), code: "BN001" });
1654
+ }
1655
+ }
1656
+ function inferType(id) {
1657
+ if (["traefik", "nginx", "caddy"].includes(id)) return "web";
1658
+ if (["postgresql", "mysql"].includes(id)) return "db";
1659
+ if (["nextcloud", "minio", "filebrowser"].includes(id)) return "file";
1660
+ if (["jellyfin"].includes(id)) return "media";
1661
+ if (["gitea"].includes(id)) return "git";
1662
+ if (["openssh-server"].includes(id)) return "ssh";
1663
+ return "app";
1664
+ }
1665
+ async function handleServiceAction(_req, res, parts, _body, _projectPath) {
1666
+ const serviceId = parts[3];
1667
+ const action = parts[4];
1668
+ if (!serviceId || !["start", "stop"].includes(action)) {
1669
+ json(res, 400, { success: false, error: "Invalid request" });
1670
+ return;
1671
+ }
1672
+ try {
1673
+ const containers = await docker.listContainers({ all: true });
1674
+ const match = containers.find(
1675
+ (c) => c.Labels?.["com.docker.compose.service"] === serviceId
1676
+ );
1677
+ if (!match) {
1678
+ json(res, 404, { success: false, error: "Service not found", code: "BN008" });
1679
+ return;
1680
+ }
1681
+ if (action === "start" && match.State === "running") {
1682
+ json(res, 400, { success: false, error: "Service is already running", code: "ALREADY_RUNNING" });
1683
+ return;
1684
+ }
1685
+ if (action === "stop" && match.State !== "running") {
1686
+ json(res, 400, { success: false, error: "Service is not running", code: "NOT_RUNNING" });
1687
+ return;
1688
+ }
1689
+ const container = docker.getContainer(match.Id);
1690
+ if (action === "start") {
1691
+ await container.start();
1692
+ } else {
1693
+ await container.stop();
1694
+ }
1695
+ const newStatus = action === "start" ? "running" : "stopped";
1696
+ json(res, 200, { success: true, id: serviceId, status: newStatus });
1697
+ } catch (err) {
1698
+ json(res, 500, { success: false, error: String(err), code: "BN001" });
1699
+ }
1700
+ }
1701
+ async function handleInstallService(_req, res, _parts, body, projectPath) {
1702
+ try {
1703
+ const { id } = JSON.parse(body);
1704
+ if (!id) {
1705
+ json(res, 400, { success: false, error: "Missing service id" });
1706
+ return;
1707
+ }
1708
+ const result = await addService(id, projectPath);
1709
+ if (result.success) {
1710
+ json(res, 202, { success: true, id, status: "installed", message: `Service ${id} added` });
1711
+ } else {
1712
+ const code = result.error?.includes("already") ? "ALREADY_EXISTS" : "BN006";
1713
+ json(res, result.error?.includes("already") ? 409 : 500, { success: false, error: result.error, code });
1714
+ }
1715
+ } catch (err) {
1716
+ json(res, 500, { success: false, error: String(err) });
1717
+ }
1718
+ }
1719
+ async function handleRemoveService(req, res, parts, _body, projectPath) {
1720
+ const serviceId = parts[3];
1721
+ if (!serviceId) {
1722
+ json(res, 400, { success: false, error: "Missing service id" });
1723
+ return;
1724
+ }
1725
+ if (REQUIRED_SERVICES.has(serviceId)) {
1726
+ json(res, 400, { success: false, error: `Cannot remove required service: ${serviceId}`, code: "REQUIRED_SERVICE" });
1727
+ return;
1728
+ }
1729
+ const url = new URL(req.url ?? "/", `http://localhost`);
1730
+ const purge = url.searchParams.get("purge") === "true";
1731
+ try {
1732
+ const result = await removeService(serviceId, projectPath, { purge });
1733
+ if (result.success) {
1734
+ json(res, 200, { success: true, id: serviceId, dataPreserved: !purge });
1735
+ } else {
1736
+ json(res, result.error?.includes("not found") ? 404 : 500, { success: false, error: result.error, code: "BN008" });
1737
+ }
1738
+ } catch (err) {
1739
+ json(res, 500, { success: false, error: String(err) });
1740
+ }
1741
+ }
1742
+ async function handleGetCatalog(_req, res, _parts, _body, _projectPath) {
1743
+ try {
1744
+ const installed = /* @__PURE__ */ new Set();
1745
+ const containers = await docker.listContainers({ all: true });
1746
+ for (const c of containers) {
1747
+ const id = c.Labels?.["com.docker.compose.service"];
1748
+ if (id) installed.add(id);
1749
+ }
1750
+ const catalog = [...SERVICE_REGISTRY.values()].filter((def) => !REQUIRED_SERVICES.has(def.id)).map((def) => ({
1751
+ id: def.id,
1752
+ name: def.name,
1753
+ description: "",
1754
+ category: inferType(def.id),
1755
+ image: def.image,
1756
+ ramEstimateMB: def.ramMB,
1757
+ installed: installed.has(def.id)
1758
+ }));
1759
+ json(res, 200, { catalog });
1760
+ } catch (err) {
1761
+ json(res, 500, { success: false, error: String(err) });
1762
+ }
1763
+ }
1764
+ async function handleBackup(req, res, _parts, _body, projectPath) {
1765
+ const backupsDir = join2(homedir2(), ".brewnet", "backups");
1766
+ if (req.method === "GET") {
1767
+ try {
1768
+ const backups = listBackups(backupsDir);
1769
+ json(res, 200, { backups });
1770
+ } catch (err) {
1771
+ json(res, 500, { success: false, error: String(err) });
1772
+ }
1773
+ return;
1774
+ }
1775
+ try {
1776
+ const record = createBackup(projectPath, backupsDir);
1777
+ json(res, 202, { success: true, backupId: record.id, status: "completed" });
1778
+ } catch (err) {
1779
+ json(res, 500, { success: false, error: String(err) });
1780
+ }
1781
+ }
1782
+ var domainOpLogs = /* @__PURE__ */ new Map();
1783
+ var domainOpListeners = /* @__PURE__ */ new Map();
1784
+ function writeDomainLog(appName, line) {
1785
+ const tagged = `[domain-connect] ${line}`;
1786
+ logger.info("domain", `[${appName}] ${line}`);
1787
+ if (!domainOpLogs.has(appName)) domainOpLogs.set(appName, []);
1788
+ const buf = domainOpLogs.get(appName);
1789
+ buf.push({ line: tagged, ts: Date.now() });
1790
+ if (buf.length > 200) buf.shift();
1791
+ domainOpListeners.get(appName)?.forEach((fn) => fn(tagged));
1792
+ }
1793
+ function createAdminServer(options = {}) {
1794
+ const port = options.port ?? 8088;
1795
+ let projectPath = options.projectPath ?? process.cwd();
1796
+ let wizardState = null;
1797
+ const last = getLastProject();
1798
+ if (last) {
1799
+ const state = loadState(last);
1800
+ if (state) {
1801
+ wizardState = state;
1802
+ if (!options.projectPath && state.projectPath) projectPath = state.projectPath;
1803
+ }
1804
+ }
1805
+ if (projectPath.startsWith("~/") || projectPath === "~") {
1806
+ projectPath = join2(homedir2(), projectPath.slice(1));
1807
+ }
1808
+ const username = wizardState?.admin?.username ?? "";
1809
+ const password = wizardState?.admin?.password ?? "";
1810
+ const maskUser = (u) => u.length > 2 ? u.slice(0, -2) + "**" : "**";
1811
+ const maskPass = (p) => p.length > 1 ? p[0] + "*".repeat(p.length - 1) : "********";
1812
+ const allowedWorkingDirs = /* @__PURE__ */ new Set();
1813
+ allowedWorkingDirs.add(projectPath);
1814
+ try {
1815
+ const bpMetaPath2 = join2(projectPath, ".brewnet-boilerplate.json");
1816
+ if (existsSync3(bpMetaPath2)) {
1817
+ const raw2 = JSON.parse(readFileSync3(bpMetaPath2, "utf-8"));
1818
+ const stacks2 = Array.isArray(raw2) ? raw2 : raw2.stackId ? [raw2] : [];
1819
+ for (const s of stacks2) {
1820
+ if (s.appDir) allowedWorkingDirs.add(s.appDir);
1821
+ }
1822
+ }
1823
+ const appsJsonPath = join2(homedir2(), ".brewnet", "apps.json");
1824
+ if (existsSync3(appsJsonPath)) {
1825
+ const apps = JSON.parse(readFileSync3(appsJsonPath, "utf-8"));
1826
+ for (const app of apps) {
1827
+ if (app.appDir) allowedWorkingDirs.add(app.appDir);
1828
+ }
1829
+ }
1830
+ } catch {
1831
+ }
1832
+ const dashConfig = {
1833
+ adminUsername: username ? maskUser(username) : "**",
1834
+ passwordHint: password ? maskPass(password) : "********",
1835
+ domainProvider: wizardState?.domain?.provider ?? "local",
1836
+ quickTunnelUrl: wizardState?.domain?.cloudflare?.quickTunnelUrl ?? "",
1837
+ zoneName: wizardState?.domain?.cloudflare?.zoneName ?? "",
1838
+ tunnelId: wizardState?.domain?.cloudflare?.tunnelId ?? ""
1839
+ };
1840
+ const runtimeUrlMap = {
1841
+ ...TRAEFIK_PATH_SERVICES,
1842
+ jellyfin: "http://localhost:8096/jellyfin/web/"
1843
+ };
1844
+ let quickTunnelDetected = !!dashConfig.quickTunnelUrl;
1845
+ async function detectQuickTunnelUrl() {
1846
+ if (quickTunnelDetected) return;
1847
+ quickTunnelDetected = true;
1848
+ try {
1849
+ const containers = await docker.listContainers({ all: true });
1850
+ const cf = containers.find(
1851
+ (c) => c.Labels?.["com.docker.compose.service"] === "cloudflared"
1852
+ );
1853
+ if (!cf || cf.State !== "running") return;
1854
+ const container = docker.getContainer(cf.Id);
1855
+ const logBuf = await container.logs({ stdout: true, stderr: true, tail: 50 });
1856
+ const logStr = logBuf.toString("utf-8");
1857
+ const match = logStr.match(/https?:\/\/([\w]+-[\w][\w-]*\.trycloudflare\.com)/i);
1858
+ if (match) {
1859
+ dashConfig.quickTunnelUrl = `https://${match[1]}`;
1860
+ dashConfig.domainProvider = "quick-tunnel";
1861
+ }
1862
+ } catch {
1863
+ }
1864
+ }
1865
+ let credentialsDetected = !!(username && password);
1866
+ async function detectCredentials() {
1867
+ if (credentialsDetected) return;
1868
+ credentialsDetected = true;
1869
+ try {
1870
+ const containers = await docker.listContainers({ all: true });
1871
+ const nc = containers.find(
1872
+ (c) => c.Labels?.["com.docker.compose.service"] === "nextcloud"
1873
+ );
1874
+ if (!nc) return;
1875
+ const info = await docker.getContainer(nc.Id).inspect();
1876
+ const envArr = info.Config?.Env ?? [];
1877
+ let u = "";
1878
+ let p = "";
1879
+ for (const entry of envArr) {
1880
+ if (!u && entry.startsWith("NEXTCLOUD_ADMIN_USER=")) {
1881
+ u = entry.split("=").slice(1).join("=");
1882
+ }
1883
+ if (!p && entry.startsWith("NEXTCLOUD_ADMIN_PASSWORD=")) {
1884
+ p = entry.split("=").slice(1).join("=");
1885
+ }
1886
+ }
1887
+ if (u || p) {
1888
+ dashConfig.adminUsername = maskUser(u || "admin");
1889
+ dashConfig.passwordHint = maskPass(p);
1890
+ }
1891
+ } catch {
1892
+ }
1893
+ }
1894
+ const server = createServer(async (req, res) => {
1895
+ const url = req.url ?? "/";
1896
+ const parts = url.split("?")[0].split("/").filter(Boolean);
1897
+ const body = await readBody(req);
1898
+ res.setHeader("Access-Control-Allow-Origin", "*");
1899
+ res.setHeader("Access-Control-Allow-Methods", "GET, POST, DELETE, OPTIONS");
1900
+ res.setHeader("Access-Control-Allow-Headers", "Content-Type, X-Admin-Password");
1901
+ if (req.method === "OPTIONS") {
1902
+ res.writeHead(204);
1903
+ res.end();
1904
+ return;
1905
+ }
1906
+ if (req.method === "GET" && url === "/icon.svg") {
1907
+ res.writeHead(200, { "Content-Type": "image/svg+xml", "Cache-Control": "public, max-age=86400" });
1908
+ res.end(ICON_SVG);
1909
+ return;
1910
+ }
1911
+ if (req.method === "GET" && url === "/favicon.ico") {
1912
+ if (FAVICON_ICO) {
1913
+ res.writeHead(200, { "Content-Type": "image/x-icon", "Cache-Control": "public, max-age=86400" });
1914
+ res.end(FAVICON_ICO);
1915
+ } else {
1916
+ res.writeHead(200, { "Content-Type": "image/svg+xml", "Cache-Control": "public, max-age=86400" });
1917
+ res.end(ICON_SVG);
1918
+ }
1919
+ return;
1920
+ }
1921
+ if (req.method === "GET" && url.startsWith("/assets/")) {
1922
+ const pathname = url.split("?")[0];
1923
+ const safePath = resolve(ADMIN_UI_DIST, "." + pathname);
1924
+ if (!safePath.startsWith(ADMIN_UI_DIST)) {
1925
+ res.writeHead(403);
1926
+ res.end("Forbidden");
1927
+ return;
1928
+ }
1929
+ if (existsSync3(safePath) && statSync(safePath).isFile()) {
1930
+ serveStaticFile(safePath, res);
1931
+ return;
1932
+ }
1933
+ res.writeHead(404);
1934
+ res.end("Not Found");
1935
+ return;
1936
+ }
1937
+ if (req.method === "GET" && !url.startsWith("/api/")) {
1938
+ const pathname = url.split("?")[0];
1939
+ const exactPath = resolve(ADMIN_UI_DIST, "." + (pathname === "/" ? "/index.html" : pathname));
1940
+ if (exactPath.startsWith(ADMIN_UI_DIST) && existsSync3(exactPath) && statSync(exactPath).isFile()) {
1941
+ serveStaticFile(exactPath, res);
1942
+ return;
1943
+ }
1944
+ const indexPath = join2(ADMIN_UI_DIST, "index.html");
1945
+ if (existsSync3(indexPath)) {
1946
+ serveStaticFile(indexPath, res);
1947
+ return;
1948
+ }
1949
+ res.writeHead(503, { "Content-Type": "text/plain" });
1950
+ res.end("Admin UI not built. Run: pnpm --filter @brewnet/admin-ui build");
1951
+ return;
1952
+ }
1953
+ if (parts[0] === "api") {
1954
+ try {
1955
+ if (parts[1] === "health" && req.method === "GET") {
1956
+ if (wizardState?.admin?.password) {
1957
+ const provided = req.headers["x-admin-password"];
1958
+ if (!provided || provided !== wizardState.admin.password) {
1959
+ json(res, 401, { error: "Unauthorized", message: "Admin password required" });
1960
+ return;
1961
+ }
1962
+ }
1963
+ json(res, 200, { status: "ok", version: "1.0.1" });
1964
+ return;
1965
+ }
1966
+ if (parts[1] === "services" && parts[2] === "catalog" && req.method === "GET") {
1967
+ json(res, 200, { catalog: SERVICE_DETAIL_MAP, aliases: NAME_ALIASES });
1968
+ return;
1969
+ }
1970
+ if (parts[1] === "config" && req.method === "GET") {
1971
+ await detectQuickTunnelUrl();
1972
+ await detectCredentials();
1973
+ json(res, 200, {
1974
+ adminUsername: dashConfig.adminUsername,
1975
+ passwordHint: dashConfig.passwordHint,
1976
+ domainProvider: dashConfig.domainProvider,
1977
+ quickTunnelUrl: dashConfig.quickTunnelUrl,
1978
+ zoneName: dashConfig.zoneName,
1979
+ tunnelId: dashConfig.tunnelId
1980
+ });
1981
+ return;
1982
+ }
1983
+ if (parts[1] === "services") {
1984
+ if (req.method === "GET" && parts.length === 2) {
1985
+ await handleGetServices(req, res, parts, body, projectPath, runtimeUrlMap, dashConfig.quickTunnelUrl, allowedWorkingDirs);
1986
+ return;
1987
+ }
1988
+ if (req.method === "POST" && parts[2] === "install") {
1989
+ await handleInstallService(req, res, parts, body, projectPath);
1990
+ return;
1991
+ }
1992
+ if (req.method === "POST" && parts[3] && ["start", "stop"].includes(parts[4] ?? "")) {
1993
+ await handleServiceAction(req, res, parts, body, projectPath);
1994
+ return;
1995
+ }
1996
+ if (req.method === "DELETE" && parts[3]) {
1997
+ await handleRemoveService(req, res, parts, body, projectPath);
1998
+ return;
1999
+ }
2000
+ }
2001
+ if (parts[1] === "catalog" && req.method === "GET") {
2002
+ await handleGetCatalog(req, res, parts, body, projectPath);
2003
+ return;
2004
+ }
2005
+ if (parts[1] === "backup") {
2006
+ await handleBackup(req, res, parts, body, projectPath);
2007
+ return;
2008
+ }
2009
+ if (parts[1] === "logs") {
2010
+ if (req.method === "GET" && parts[2] === "stats") {
2011
+ const stats = await getLogStats(projectPath);
2012
+ json(res, 200, stats);
2013
+ return;
2014
+ }
2015
+ if (req.method === "GET") {
2016
+ const qUrl = new URL(url, "http://localhost");
2017
+ const sources = qUrl.searchParams.get("source");
2018
+ const levels = qUrl.searchParams.get("level");
2019
+ const services = qUrl.searchParams.get("service");
2020
+ const since = qUrl.searchParams.get("since") ?? void 0;
2021
+ const until = qUrl.searchParams.get("until") ?? void 0;
2022
+ const search = qUrl.searchParams.get("search") ?? void 0;
2023
+ const limit = parseInt(qUrl.searchParams.get("limit") ?? "100", 10);
2024
+ const offset = parseInt(qUrl.searchParams.get("offset") ?? "0", 10);
2025
+ const result = await queryLogs(
2026
+ {
2027
+ sources: sources ? [sources] : void 0,
2028
+ levels: levels ? [levels] : void 0,
2029
+ services: services ? [services] : void 0,
2030
+ since,
2031
+ until,
2032
+ search,
2033
+ limit: isNaN(limit) ? 100 : limit,
2034
+ offset: isNaN(offset) ? 0 : offset
2035
+ },
2036
+ projectPath
2037
+ );
2038
+ json(res, 200, result);
2039
+ return;
2040
+ }
2041
+ }
2042
+ if (parts[1] === "apps") {
2043
+ if (req.method === "GET" && parts.length === 2) {
2044
+ const apps = await listApps();
2045
+ const history = getDeployHistory();
2046
+ const historyByApp = /* @__PURE__ */ new Map();
2047
+ for (const h of history) {
2048
+ if (h.status === "success") historyByApp.set(h.appName, h);
2049
+ }
2050
+ const bpMetaPath = join2(projectPath, ".brewnet-boilerplate.json");
2051
+ let bpMetaMap = /* @__PURE__ */ new Map();
2052
+ if (existsSync3(bpMetaPath)) {
2053
+ try {
2054
+ const raw = JSON.parse(readFileSync3(bpMetaPath, "utf-8"));
2055
+ const metas = Array.isArray(raw) ? raw : [raw];
2056
+ for (const m of metas) bpMetaMap.set(m.stackId, m);
2057
+ } catch {
2058
+ }
2059
+ }
2060
+ const enrichedApps = apps.map((a) => {
2061
+ const lastDeploy = historyByApp.get(a.name) ?? null;
2062
+ const qt = dashConfig.quickTunnelUrl;
2063
+ const bpMeta = a.mode === "boilerplate" && a.stackId ? bpMetaMap.get(a.stackId) : void 0;
2064
+ const isNonUnified = bpMeta ? bpMeta.isUnified === false : !!(a.stackId && getStackById(a.stackId)?.isUnified === false);
2065
+ let localUrl;
2066
+ let externalUrl;
2067
+ let backendLocalUrl = null;
2068
+ let backendExternalUrl = null;
2069
+ if (isNonUnified) {
2070
+ let frontendPort = 3e3;
2071
+ const feEnvPath = join2(a.appDir, ".env");
2072
+ if (existsSync3(feEnvPath)) {
2073
+ const feEnvContent = readFileSync3(feEnvPath, "utf-8");
2074
+ const fePortMatch = feEnvContent.match(/^FRONTEND_PORT=(\d+)/m);
2075
+ if (fePortMatch) frontendPort = parseInt(fePortMatch[1], 10);
2076
+ }
2077
+ localUrl = `http://127.0.0.1:${frontendPort}`;
2078
+ externalUrl = qt ? `${qt.replace(/\/$/, "")}/apps/${a.name}-ui` : null;
2079
+ backendLocalUrl = a.port ? `http://127.0.0.1:${a.port}` : null;
2080
+ backendExternalUrl = qt ? `${qt.replace(/\/$/, "")}/apps/${a.name}` : null;
2081
+ } else {
2082
+ localUrl = a.port ? `http://localhost:${a.port}` : null;
2083
+ if (a.appDir) {
2084
+ const bp = detectBasePath(a.appDir);
2085
+ if (bp && localUrl) localUrl += bp;
2086
+ }
2087
+ externalUrl = qt ? `${qt.replace(/\/$/, "")}/apps/${a.name}` : null;
2088
+ }
2089
+ return { ...a, lastDeployedAt: lastDeploy?.deployedAt ?? null, localUrl, externalUrl, backendLocalUrl, backendExternalUrl };
2090
+ });
2091
+ logger.info("admin-server", `[GET /api/apps] returning ${apps.length} app(s): ${JSON.stringify(apps.map((a) => a.name))}`);
2092
+ json(res, 200, { apps: enrichedApps });
2093
+ return;
2094
+ }
2095
+ if (req.method === "GET" && parts[2] === "boilerplates") {
2096
+ const bpPath = join2(projectPath, ".brewnet-boilerplate.json");
2097
+ const metas = [];
2098
+ if (existsSync3(bpPath)) {
2099
+ const raw = JSON.parse(readFileSync3(bpPath, "utf-8"));
2100
+ const wizardMetas = Array.isArray(raw) ? raw : [raw];
2101
+ metas.push(...wizardMetas);
2102
+ }
2103
+ const allApps = await listApps();
2104
+ for (const app of allApps) {
2105
+ if (app.mode !== "boilerplate" || !app.stackId) continue;
2106
+ if (metas.some((m) => m.appDir && app.appDir && m.appDir === app.appDir)) continue;
2107
+ const envPath = join2(app.appDir, ".env");
2108
+ let frontendPort;
2109
+ let dbDriver;
2110
+ let dbUser;
2111
+ let dbName;
2112
+ if (existsSync3(envPath)) {
2113
+ const envContent = readFileSync3(envPath, "utf-8");
2114
+ const fpMatch = envContent.match(/^FRONTEND_PORT=(\d+)/m);
2115
+ if (fpMatch) frontendPort = parseInt(fpMatch[1], 10);
2116
+ const ddMatch = envContent.match(/^DB_DRIVER=(.+)/m);
2117
+ if (ddMatch) dbDriver = ddMatch[1].trim();
2118
+ const duMatch = envContent.match(/^DB_USER=(.+)/m);
2119
+ if (duMatch) dbUser = duMatch[1].trim();
2120
+ const dnMatch = envContent.match(/^DB_NAME=(.+)/m);
2121
+ if (dnMatch) dbName = dnMatch[1].trim();
2122
+ }
2123
+ const stackEntry = getStackById(app.stackId);
2124
+ metas.push({
2125
+ stackId: app.stackId,
2126
+ appDir: app.appDir,
2127
+ backendUrl: `http://127.0.0.1:${app.port}`,
2128
+ frontendUrl: frontendPort ? `http://127.0.0.1:${frontendPort}` : void 0,
2129
+ isUnified: stackEntry?.isUnified ?? false,
2130
+ lang: app.lang,
2131
+ dbDriver,
2132
+ dbUser,
2133
+ dbName,
2134
+ status: app.status
2135
+ });
2136
+ }
2137
+ logger.info("admin-server", `[GET /api/apps/boilerplates] returning ${metas.length} boilerplate(s) (wizard=${metas.length - allApps.filter((a) => a.mode === "boilerplate" && a.stackId).length}, create-app=${allApps.filter((a) => a.mode === "boilerplate" && a.stackId).length})`);
2138
+ json(res, 200, { boilerplates: metas });
2139
+ return;
2140
+ }
2141
+ if (req.method === "POST" && parts[2] === "boilerplates" && parts[3] && (parts[4] === "stop" || parts[4] === "start")) {
2142
+ const stackId = decodeURIComponent(parts[3]);
2143
+ const action = parts[4];
2144
+ const bpPath = join2(projectPath, ".brewnet-boilerplate.json");
2145
+ if (!existsSync3(bpPath)) {
2146
+ json(res, 404, { error: "No boilerplates found" });
2147
+ return;
2148
+ }
2149
+ const bpRaw = JSON.parse(readFileSync3(bpPath, "utf-8"));
2150
+ const bpMetas = Array.isArray(bpRaw) ? bpRaw : [bpRaw];
2151
+ const meta = bpMetas.find((m) => m.stackId === stackId);
2152
+ if (!meta) {
2153
+ json(res, 404, { error: `Boilerplate "${stackId}" not found` });
2154
+ return;
2155
+ }
2156
+ const { execa: execaBp } = await import("execa");
2157
+ if (action === "stop") {
2158
+ await execaBp("docker", ["compose", "down"], { cwd: meta.appDir });
2159
+ meta.status = "stopped";
2160
+ } else {
2161
+ await execaBp("docker", ["compose", "up", "-d"], { cwd: meta.appDir });
2162
+ meta.status = "running";
2163
+ }
2164
+ const { writeFileSync: writeFileSync4 } = await import("fs");
2165
+ writeFileSync4(bpPath, JSON.stringify(bpMetas, null, 2), "utf-8");
2166
+ json(res, 200, { success: true });
2167
+ return;
2168
+ }
2169
+ if (req.method === "POST" && parts[2] === "create") {
2170
+ const opts = JSON.parse(body);
2171
+ const jobId = await createApp(opts);
2172
+ json(res, 202, { jobId });
2173
+ return;
2174
+ }
2175
+ if (req.method === "GET" && parts[2] === "jobs" && parts[3]) {
2176
+ const job = getJobStatus(parts[3]);
2177
+ if (!job) {
2178
+ json(res, 404, { error: "Job not found" });
2179
+ return;
2180
+ }
2181
+ json(res, 200, job);
2182
+ return;
2183
+ }
2184
+ if (req.method === "POST" && parts[3] === "start") {
2185
+ await startApp(decodeURIComponent(parts[2] ?? ""));
2186
+ json(res, 200, { success: true });
2187
+ return;
2188
+ }
2189
+ if (req.method === "POST" && parts[3] === "stop") {
2190
+ await stopApp(decodeURIComponent(parts[2] ?? ""));
2191
+ json(res, 200, { success: true });
2192
+ return;
2193
+ }
2194
+ if (req.method === "DELETE" && parts[2]) {
2195
+ await removeApp2(parts[2]);
2196
+ json(res, 200, { success: true });
2197
+ return;
2198
+ }
2199
+ if (req.method === "GET" && parts[2] && !["boilerplates", "jobs", "check-port"].includes(parts[2]) && parts.length === 3) {
2200
+ const apps = await listApps();
2201
+ const found = apps.find((a) => a.name === decodeURIComponent(parts[2]));
2202
+ if (!found) {
2203
+ json(res, 404, { error: "App not found" });
2204
+ return;
2205
+ }
2206
+ const history = getDeployHistory();
2207
+ const lastDeploy = history.filter((h) => h.appName === found.name && h.status === "success").pop() ?? null;
2208
+ const qt = dashConfig.quickTunnelUrl;
2209
+ const bpMetaSingle = found.mode === "boilerplate" && found.stackId ? (() => {
2210
+ const p = join2(projectPath, ".brewnet-boilerplate.json");
2211
+ if (!existsSync3(p)) return void 0;
2212
+ try {
2213
+ const raw = JSON.parse(readFileSync3(p, "utf-8"));
2214
+ const list = Array.isArray(raw) ? raw : [raw];
2215
+ return list.find((m) => m.stackId === found.stackId);
2216
+ } catch {
2217
+ return void 0;
2218
+ }
2219
+ })() : void 0;
2220
+ const isNonUnified = bpMetaSingle ? bpMetaSingle.isUnified === false : !!(found.stackId && getStackById(found.stackId)?.isUnified === false);
2221
+ let localUrlSingle;
2222
+ let externalUrlSingle;
2223
+ let backendLocalUrlSingle = null;
2224
+ let backendExternalUrlSingle = null;
2225
+ if (isNonUnified) {
2226
+ let frontendPort = 3e3;
2227
+ const feEnvPath = join2(found.appDir, ".env");
2228
+ if (existsSync3(feEnvPath)) {
2229
+ const m = readFileSync3(feEnvPath, "utf-8").match(/^FRONTEND_PORT=(\d+)/m);
2230
+ if (m) frontendPort = parseInt(m[1], 10);
2231
+ }
2232
+ localUrlSingle = `http://127.0.0.1:${frontendPort}`;
2233
+ externalUrlSingle = qt ? `${qt.replace(/\/$/, "")}/apps/${found.name}-ui` : null;
2234
+ backendLocalUrlSingle = found.port ? `http://127.0.0.1:${found.port}` : null;
2235
+ backendExternalUrlSingle = qt ? `${qt.replace(/\/$/, "")}/apps/${found.name}` : null;
2236
+ } else {
2237
+ localUrlSingle = found.port ? `http://localhost:${found.port}` : null;
2238
+ if (found.appDir) {
2239
+ const bp = detectBasePath(found.appDir);
2240
+ if (bp && localUrlSingle) localUrlSingle += bp;
2241
+ }
2242
+ externalUrlSingle = qt ? `${qt.replace(/\/$/, "")}/apps/${found.name}` : null;
2243
+ }
2244
+ const app = { ...found, lastDeployedAt: lastDeploy?.deployedAt ?? null, localUrl: localUrlSingle, externalUrl: externalUrlSingle, backendLocalUrl: backendLocalUrlSingle, backendExternalUrl: backendExternalUrlSingle };
2245
+ json(res, 200, { app });
2246
+ return;
2247
+ }
2248
+ if (req.method === "GET" && parts[3] === "git" && parts.length === 4) {
2249
+ try {
2250
+ const git = await getAppGitInfo(decodeURIComponent(parts[2] ?? ""));
2251
+ json(res, 200, { git });
2252
+ } catch (err) {
2253
+ json(res, 502, { error: String(err) });
2254
+ }
2255
+ return;
2256
+ }
2257
+ if (req.method === "GET" && parts[3] === "branches" && parts.length === 4) {
2258
+ try {
2259
+ const branches = await getAppBranches(decodeURIComponent(parts[2] ?? ""));
2260
+ json(res, 200, { branches });
2261
+ } catch (err) {
2262
+ json(res, 200, { branches: [] });
2263
+ }
2264
+ return;
2265
+ }
2266
+ if (req.method === "GET" && parts[3] === "deploy" && parts[4] === "settings") {
2267
+ const settings = getDeploySettings(decodeURIComponent(parts[2] ?? ""));
2268
+ json(res, 200, settings);
2269
+ return;
2270
+ }
2271
+ if (req.method === "PUT" && parts[3] === "deploy" && parts[4] === "settings") {
2272
+ const opts = JSON.parse(body);
2273
+ updateDeploySettings(decodeURIComponent(parts[2] ?? ""), opts);
2274
+ json(res, 200, { success: true });
2275
+ return;
2276
+ }
2277
+ if (req.method === "POST" && parts[3] === "deploy" && !parts[4]) {
2278
+ const jobId = await deployApp(decodeURIComponent(parts[2] ?? ""));
2279
+ json(res, 202, { jobId });
2280
+ return;
2281
+ }
2282
+ if (req.method === "POST" && parts[3] === "rollback" && !parts[4]) {
2283
+ let parsed = {};
2284
+ try {
2285
+ parsed = JSON.parse(body);
2286
+ } catch {
2287
+ json(res, 400, { error: "Invalid JSON" });
2288
+ return;
2289
+ }
2290
+ if (!parsed.commitHash) {
2291
+ json(res, 400, { error: "commitHash is required" });
2292
+ return;
2293
+ }
2294
+ const jobId = await rollbackApp(decodeURIComponent(parts[2] ?? ""), parsed.commitHash);
2295
+ json(res, 202, { jobId });
2296
+ return;
2297
+ }
2298
+ if (req.method === "GET" && parts[3] === "logs") {
2299
+ const appDir = getAppDir(decodeURIComponent(parts[2] ?? ""));
2300
+ if (!appDir) {
2301
+ json(res, 404, { error: "App not found" });
2302
+ return;
2303
+ }
2304
+ res.writeHead(200, {
2305
+ "Content-Type": "text/event-stream",
2306
+ "Cache-Control": "no-cache",
2307
+ "Connection": "keep-alive"
2308
+ });
2309
+ res.flushHeaders();
2310
+ const { execa: execaLocal } = await import("execa");
2311
+ const proc = execaLocal("docker", ["compose", "logs", "--follow", "--tail", "50"], {
2312
+ cwd: appDir,
2313
+ reject: false,
2314
+ stdout: "pipe",
2315
+ stderr: "pipe"
2316
+ });
2317
+ const sendLine = (line) => {
2318
+ if (line.trim()) res.write(`data: ${line.replace(/\r?\n/g, " ")}
2319
+
2320
+ `);
2321
+ };
2322
+ proc.stdout?.on("data", (chunk) => {
2323
+ for (const line of chunk.toString().split("\n")) sendLine(line);
2324
+ });
2325
+ proc.stderr?.on("data", (chunk) => {
2326
+ for (const line of chunk.toString().split("\n")) sendLine(line);
2327
+ });
2328
+ const sseAppName = decodeURIComponent(parts[2] ?? "");
2329
+ const domainListener = (line) => sendLine(line);
2330
+ if (!domainOpListeners.has(sseAppName)) domainOpListeners.set(sseAppName, /* @__PURE__ */ new Set());
2331
+ domainOpListeners.get(sseAppName).add(domainListener);
2332
+ for (const entry of domainOpLogs.get(sseAppName) ?? []) sendLine(entry.line);
2333
+ req.on("close", () => {
2334
+ try {
2335
+ proc.kill();
2336
+ } catch {
2337
+ }
2338
+ domainOpListeners.get(sseAppName)?.delete(domainListener);
2339
+ });
2340
+ return;
2341
+ }
2342
+ }
2343
+ if (parts[1] === "gitea" && parts[2] === "autologin" && req.method === "GET") {
2344
+ const reqUrl = new URL(req.url ?? "/", "http://localhost");
2345
+ const redirectPath = reqUrl.searchParams.get("redirect") ?? "/git";
2346
+ const giteaBase = "http://localhost/git";
2347
+ const targetUrl = `http://localhost${redirectPath.startsWith("/") ? redirectPath : "/" + redirectPath}`;
2348
+ try {
2349
+ const loginPageRes = await fetch(`${giteaBase}/user/login`, {
2350
+ redirect: "manual",
2351
+ headers: { "User-Agent": "brewnet-admin" },
2352
+ signal: AbortSignal.timeout(5e3)
2353
+ });
2354
+ const rawSetCookies = [];
2355
+ loginPageRes.headers.forEach((val, key) => {
2356
+ if (key.toLowerCase() === "set-cookie") rawSetCookies.push(val);
2357
+ });
2358
+ const csrfCookieFull = rawSetCookies.find((c) => c.startsWith("_csrf=")) ?? "";
2359
+ const csrfCookieVal = csrfCookieFull.split(";")[0] ?? "";
2360
+ const pageHtml = await loginPageRes.text();
2361
+ const csrfField = pageHtml.match(/name="_csrf"\s+value="([^"]+)"/)?.[1] ?? pageHtml.match(/value="([^"]+)"\s+name="_csrf"/)?.[1] ?? csrfCookieVal.replace("_csrf=", "");
2362
+ if (!csrfField) {
2363
+ res.writeHead(302, { Location: targetUrl });
2364
+ res.end();
2365
+ return;
2366
+ }
2367
+ const formBody = new URLSearchParams({
2368
+ _csrf: csrfField,
2369
+ user_name: username,
2370
+ password,
2371
+ remember: "on"
2372
+ });
2373
+ const loginRes = await fetch(`${giteaBase}/user/login`, {
2374
+ method: "POST",
2375
+ headers: {
2376
+ "Content-Type": "application/x-www-form-urlencoded",
2377
+ "Cookie": csrfCookieVal,
2378
+ "User-Agent": "brewnet-admin"
2379
+ },
2380
+ body: formBody.toString(),
2381
+ redirect: "manual",
2382
+ signal: AbortSignal.timeout(5e3)
2383
+ });
2384
+ const respCookies = [];
2385
+ loginRes.headers.forEach((val, key) => {
2386
+ if (key.toLowerCase() === "set-cookie") respCookies.push(val);
2387
+ });
2388
+ const forwardCookies = respCookies.filter((c) => !c.includes("Max-Age=0") && !c.match(/=;\s/));
2389
+ const responseHeaders = { Location: targetUrl };
2390
+ if (forwardCookies.length > 0) {
2391
+ responseHeaders["Set-Cookie"] = forwardCookies;
2392
+ } else {
2393
+ logger.warn("admin-server", "[gitea/autologin] login POST did not return session cookies");
2394
+ }
2395
+ res.writeHead(302, responseHeaders);
2396
+ res.end();
2397
+ } catch (err) {
2398
+ logger.warn("admin-server", `[gitea/autologin] failed: ${String(err)}`);
2399
+ res.writeHead(302, { Location: targetUrl });
2400
+ res.end();
2401
+ }
2402
+ return;
2403
+ }
2404
+ if (parts[1] === "deploy" && parts[2] === "history" && req.method === "GET") {
2405
+ const reqUrl = new URL(req.url ?? "/", "http://localhost");
2406
+ const appFilter = reqUrl.searchParams.get("app") ?? void 0;
2407
+ const entries = getDeployHistory(appFilter);
2408
+ json(res, 200, { history: entries });
2409
+ return;
2410
+ }
2411
+ if (parts[1] === "git" && parts[2] === "repos" && req.method === "GET") {
2412
+ try {
2413
+ const repos = await listGiteaRepos();
2414
+ const appsForEnrich = await listApps();
2415
+ const enriched = repos.map((repo) => {
2416
+ const r = repo;
2417
+ const connectedApp = appsForEnrich.find(
2418
+ (app) => app.giteaRepoUrl && (app.giteaRepoUrl.endsWith("/" + repo.name) || app.giteaRepoUrl.includes("/" + repo.name + "."))
2419
+ );
2420
+ return {
2421
+ ...repo,
2422
+ language: r["language"] ?? "",
2423
+ stars: r["stars_count"] ?? 0,
2424
+ updatedAt: r["updated"] ?? "",
2425
+ appName: connectedApp?.name
2426
+ };
2427
+ });
2428
+ json(res, 200, { repos: enriched });
2429
+ } catch (err) {
2430
+ json(res, 502, { success: false, error: String(err) });
2431
+ }
2432
+ return;
2433
+ }
2434
+ if (parts[1] === "git" && parts[2] === "repos" && parts[3] && parts[4] === "connect" && req.method === "POST") {
2435
+ const repoName = decodeURIComponent(parts[3]);
2436
+ let parsed = {};
2437
+ try {
2438
+ parsed = JSON.parse(body);
2439
+ } catch {
2440
+ json(res, 400, { error: "Invalid JSON" });
2441
+ return;
2442
+ }
2443
+ const appName = parsed.appName?.trim();
2444
+ if (!appName) {
2445
+ json(res, 400, { error: "appName required" });
2446
+ return;
2447
+ }
2448
+ try {
2449
+ const appsPath = join2(homedir2(), ".brewnet", "apps.json");
2450
+ let existing = existsSync3(appsPath) ? JSON.parse(readFileSync3(appsPath, "utf-8")) : [];
2451
+ const repos = await listGiteaRepos();
2452
+ const repo = repos.find((r) => r.name === repoName);
2453
+ if (!repo) {
2454
+ json(res, 404, { error: `Repo '${repoName}' not found in Gitea` });
2455
+ return;
2456
+ }
2457
+ const repoUrl = repo.clone_url.replace(/\.git$/, "");
2458
+ const conflict = Array.isArray(existing) ? existing.find((a) => a.name !== appName && a.giteaRepoUrl && (a.giteaRepoUrl.endsWith("/" + repoName) || a.giteaRepoUrl.includes("/" + repoName + "."))) : null;
2459
+ if (conflict) {
2460
+ json(res, 409, { error: `Repo already connected to app '${conflict.name}'` });
2461
+ return;
2462
+ }
2463
+ let app = Array.isArray(existing) ? existing.find((a) => a.name === appName) : null;
2464
+ if (!app) {
2465
+ const docker2 = new (await import("dockerode")).default();
2466
+ const containers = await docker2.listContainers({ all: true });
2467
+ const matchedContainer = containers.find((c) => {
2468
+ const svc = c.Labels?.["com.docker.compose.service"] ?? "";
2469
+ const proj = c.Labels?.["com.docker.compose.project"] ?? "";
2470
+ return proj.includes(repoName) || proj.includes(appName) || svc === appName;
2471
+ });
2472
+ const port2 = matchedContainer ? parseInt(String(matchedContainer.Ports?.[0]?.PublicPort ?? 0), 10) || 8080 : 8080;
2473
+ const lang = repo["language"] || "";
2474
+ app = {
2475
+ name: appName,
2476
+ mode: "boilerplate",
2477
+ appDir: join2(projectPath, repoName),
2478
+ lang,
2479
+ port: port2,
2480
+ giteaRepoUrl: repoUrl,
2481
+ status: matchedContainer?.State === "running" ? "running" : "stopped",
2482
+ createdAt: (/* @__PURE__ */ new Date()).toISOString()
2483
+ };
2484
+ if (Array.isArray(existing)) {
2485
+ existing.push(app);
2486
+ } else {
2487
+ existing = [app];
2488
+ }
2489
+ } else {
2490
+ app.giteaRepoUrl = repoUrl;
2491
+ }
2492
+ writeFileSync3(appsPath, JSON.stringify(existing, null, 2));
2493
+ json(res, 200, { ok: true });
2494
+ } catch (err) {
2495
+ json(res, 500, { error: String(err) });
2496
+ }
2497
+ return;
2498
+ }
2499
+ if (parts[1] === "apps" && parts[2] === "check-port" && req.method === "GET") {
2500
+ const reqUrl = new URL(req.url ?? "/", "http://localhost");
2501
+ const portStr = reqUrl.searchParams.get("port") ?? "";
2502
+ const port2 = parseInt(portStr, 10);
2503
+ if (!port2 || port2 < 1 || port2 > 65535) {
2504
+ json(res, 400, { error: "Invalid port" });
2505
+ return;
2506
+ }
2507
+ const available = await new Promise((resolve2) => {
2508
+ const sock = createConnection({ port: port2, host: "127.0.0.1" });
2509
+ sock.once("connect", () => {
2510
+ sock.destroy();
2511
+ resolve2(false);
2512
+ });
2513
+ sock.once("error", () => resolve2(true));
2514
+ sock.setTimeout(400, () => {
2515
+ sock.destroy();
2516
+ resolve2(true);
2517
+ });
2518
+ });
2519
+ json(res, 200, { port: port2, available });
2520
+ return;
2521
+ }
2522
+ if (parts[1] === "deploy" && parts[2] === "hook" && req.method === "POST") {
2523
+ try {
2524
+ const payload = JSON.parse(body);
2525
+ const appName = payload.repository?.name;
2526
+ const branch = (payload.ref ?? "").replace("refs/heads/", "");
2527
+ if (appName) {
2528
+ const settings = getDeploySettings(appName);
2529
+ if (settings.autoDeploy && branch === settings.deployBranch) {
2530
+ void deployApp(appName);
2531
+ }
2532
+ }
2533
+ } catch {
2534
+ }
2535
+ json(res, 200, { status: "accepted" });
2536
+ return;
2537
+ }
2538
+ if (parts[1] === "domain") {
2539
+ if (req.method === "GET" && parts[2] === "list") {
2540
+ await handleDomainList(res, wizardState);
2541
+ return;
2542
+ }
2543
+ if (req.method === "GET" && parts[2] === "apps") {
2544
+ handleDomainApps(res, wizardState);
2545
+ return;
2546
+ }
2547
+ if (req.method === "POST" && parts[2] === "connect") {
2548
+ await handleDomainConnect(res, body, wizardState);
2549
+ return;
2550
+ }
2551
+ if (!checkAdminAuth(req, res, wizardState)) return;
2552
+ if (req.method === "DELETE" && parts[2] === "disconnect" && parts[3]) {
2553
+ await handleDomainDisconnect(res, parts[3], wizardState);
2554
+ return;
2555
+ }
2556
+ if (req.method === "GET" && parts[2] === "status" && parts[3]) {
2557
+ await handleDomainStatus(res, parts[3], wizardState);
2558
+ return;
2559
+ }
2560
+ }
2561
+ if (parts[1] === "cloudflare") {
2562
+ if (!checkAdminAuth(req, res, wizardState)) return;
2563
+ if (req.method === "GET" && parts[2] === "zones") {
2564
+ await handleCloudflareZones(res, wizardState);
2565
+ return;
2566
+ }
2567
+ if (req.method === "POST" && parts[2] === "tunnel") {
2568
+ await handleCreateTunnel(res, body, wizardState, projectPath);
2569
+ return;
2570
+ }
2571
+ }
2572
+ if (parts[1] === "settings") {
2573
+ if (!checkAdminAuth(req, res, wizardState)) return;
2574
+ if (req.method === "GET" && parts[2] === "cloudflare") {
2575
+ handleSettingsCloudflareGet(res, wizardState);
2576
+ return;
2577
+ }
2578
+ if (req.method === "PUT" && parts[2] === "cloudflare") {
2579
+ await handleSettingsCloudflarePut(res, body, wizardState);
2580
+ return;
2581
+ }
2582
+ }
2583
+ json(res, 404, { success: false, error: "Not found" });
2584
+ } catch (err) {
2585
+ logger.error("admin-server", "Unhandled error", { error: String(err) });
2586
+ json(res, 500, { success: false, error: "Internal server error" });
2587
+ }
2588
+ return;
2589
+ }
2590
+ res.writeHead(404);
2591
+ res.end("Not found");
2592
+ });
2593
+ return {
2594
+ server,
2595
+ start: () => new Promise((resolve2, reject) => {
2596
+ server.listen(port, "127.0.0.1", () => {
2597
+ logger.info("admin-server", `Listening on http://localhost:${port}`, { port });
2598
+ resolve2(port);
2599
+ });
2600
+ server.once("error", reject);
2601
+ }),
2602
+ stop: () => new Promise((resolve2, reject) => {
2603
+ server.close((err) => err ? reject(err) : resolve2());
2604
+ })
2605
+ };
2606
+ }
2607
+ function checkAdminAuth(req, res, state) {
2608
+ if (!state?.admin?.password) {
2609
+ json(res, 401, { error: "Unauthorized", message: "Admin password not configured" });
2610
+ return false;
2611
+ }
2612
+ const provided = req.headers["x-admin-password"];
2613
+ if (!provided || provided !== state.admin.password) {
2614
+ json(res, 401, { error: "Unauthorized", message: "Admin password required for this operation" });
2615
+ return false;
2616
+ }
2617
+ return true;
2618
+ }
2619
+ async function handleDomainList(res, state) {
2620
+ if (!state) {
2621
+ json(res, 200, { connections: [], tunnel: null, credentialsConfigured: false });
2622
+ return;
2623
+ }
2624
+ try {
2625
+ const mgr = new DomainManager(state.projectName);
2626
+ const connections = mgr.list().map((c) => ({
2627
+ ...c,
2628
+ externalUrl: `https://${c.hostname}`
2629
+ }));
2630
+ let tunnel = null;
2631
+ const cf = state.domain.cloudflare;
2632
+ if (cf.tunnelId && cf.apiToken && cf.accountId) {
2633
+ try {
2634
+ const { getTunnelHealth } = await import("./cloudflare-client-TFT6VCXF.js");
2635
+ const health = await getTunnelHealth(cf.apiToken, cf.accountId, cf.tunnelId);
2636
+ tunnel = { ...health, tunnelName: cf.tunnelName, tunnelId: cf.tunnelId };
2637
+ } catch {
2638
+ }
2639
+ }
2640
+ const credentialsConfigured = !!(cf.apiToken && cf.accountId && cf.zoneId && cf.tunnelId);
2641
+ json(res, 200, { connections, tunnel, credentialsConfigured });
2642
+ } catch (err) {
2643
+ json(res, 500, { success: false, error: String(err) });
2644
+ }
2645
+ }
2646
+ function handleDomainApps(res, state) {
2647
+ if (!state) {
2648
+ json(res, 200, { apps: [] });
2649
+ return;
2650
+ }
2651
+ try {
2652
+ const mgr = new DomainManager(state.projectName);
2653
+ const apps = mgr.getConnectableApps();
2654
+ json(res, 200, { apps });
2655
+ } catch (err) {
2656
+ json(res, 500, { success: false, error: String(err) });
2657
+ }
2658
+ }
2659
+ async function handleDomainConnect(res, body, state) {
2660
+ if (!state) {
2661
+ json(res, 500, { success: false, error: "No project state" });
2662
+ return;
2663
+ }
2664
+ let parsed;
2665
+ try {
2666
+ parsed = JSON.parse(body);
2667
+ } catch {
2668
+ json(res, 400, { success: false, error: "INVALID_JSON", message: "Request body must be valid JSON" });
2669
+ return;
2670
+ }
2671
+ const { appName, subdomain, domain } = parsed;
2672
+ if (!appName || !subdomain || !domain) {
2673
+ json(res, 400, { success: false, error: "MISSING_FIELDS", message: "appName, subdomain, and domain are required" });
2674
+ return;
2675
+ }
2676
+ if (!/^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$/.test(subdomain)) {
2677
+ json(res, 400, { success: false, error: "INVALID_SUBDOMAIN", message: "Subdomain must be a valid DNS label" });
2678
+ return;
2679
+ }
2680
+ const localConflict = (state.domainConnections ?? []).find(
2681
+ (c) => c.subdomain === subdomain && c.domain === domain && c.appName !== appName
2682
+ );
2683
+ if (localConflict) {
2684
+ json(res, 409, {
2685
+ success: false,
2686
+ error: "SUBDOMAIN_CONFLICT_LOCAL",
2687
+ message: `Subdomain "${subdomain}.${domain}" is already connected to app "${localConflict.appName}"`,
2688
+ conflictingApp: localConflict.appName
2689
+ });
2690
+ return;
2691
+ }
2692
+ try {
2693
+ const mgr = new DomainManager(state.projectName);
2694
+ const result = await mgr.connect(appName, subdomain, domain, {
2695
+ onLog: (line) => writeDomainLog(appName, line.replace("[domain-connect] ", ""))
2696
+ });
2697
+ if (!result.success) {
2698
+ if (result.error === "CNAME_CONFLICT") {
2699
+ json(res, 409, {
2700
+ success: false,
2701
+ error: "SUBDOMAIN_CONFLICT_EXTERNAL",
2702
+ message: `Subdomain "${subdomain}.${domain}" already has a DNS record in Cloudflare (not created by Brewnet)`,
2703
+ steps: result.steps
2704
+ });
2705
+ return;
2706
+ }
2707
+ const statusCode = result.error?.startsWith("APP_NOT_RUNNING") ? 503 : 400;
2708
+ json(res, statusCode, { success: false, error: result.error, message: result.error, steps: result.steps });
2709
+ return;
2710
+ }
2711
+ json(res, 200, {
2712
+ success: true,
2713
+ hostname: result.hostname,
2714
+ externalUrl: result.externalUrl,
2715
+ steps: result.steps
2716
+ });
2717
+ } catch (err) {
2718
+ json(res, 500, { success: false, error: String(err) });
2719
+ }
2720
+ }
2721
+ async function handleDomainDisconnect(res, appName, state) {
2722
+ if (!state) {
2723
+ json(res, 500, { success: false, error: "No project state" });
2724
+ return;
2725
+ }
2726
+ try {
2727
+ const mgr = new DomainManager(state.projectName);
2728
+ const result = await mgr.disconnect(appName);
2729
+ if (!result.success) {
2730
+ const statusCode = result.error?.startsWith("NOT_CONNECTED") ? 404 : 500;
2731
+ json(res, statusCode, { success: false, error: result.error?.split(":")[0], message: result.error });
2732
+ return;
2733
+ }
2734
+ json(res, 200, {
2735
+ success: true,
2736
+ appName: result.appName,
2737
+ removedHostname: result.removedHostname,
2738
+ steps: result.steps
2739
+ });
2740
+ } catch (err) {
2741
+ json(res, 500, { success: false, error: String(err) });
2742
+ }
2743
+ }
2744
+ async function handleDomainStatus(res, appName, state) {
2745
+ if (!state) {
2746
+ json(res, 404, { success: false, error: "No project state" });
2747
+ return;
2748
+ }
2749
+ try {
2750
+ const mgr = new DomainManager(state.projectName);
2751
+ const statuses = await mgr.status(appName);
2752
+ if (statuses.length === 0) {
2753
+ json(res, 404, { success: false, error: "NOT_CONNECTED", message: `No domain connection for app: ${appName}` });
2754
+ return;
2755
+ }
2756
+ json(res, 200, statuses[0]);
2757
+ } catch (err) {
2758
+ json(res, 500, { success: false, error: String(err) });
2759
+ }
2760
+ }
2761
+ async function handleCloudflareZones(res, state) {
2762
+ if (!state) {
2763
+ json(res, 400, { success: false, error: "NO_TOKEN", message: "Cloudflare API token not configured. Complete Step 1 first." });
2764
+ return;
2765
+ }
2766
+ const apiToken = state.domain.cloudflare.apiToken;
2767
+ if (!apiToken) {
2768
+ json(res, 400, { success: false, error: "NO_TOKEN", message: "Cloudflare API token not configured. Complete Step 1 first." });
2769
+ return;
2770
+ }
2771
+ try {
2772
+ const { getZones } = await import("./cloudflare-client-TFT6VCXF.js");
2773
+ const zones = await getZones(apiToken);
2774
+ if (!state.domain.cloudflare.accountId && zones.length > 0) {
2775
+ const firstAccountId = zones[0]?.accountId;
2776
+ if (firstAccountId) {
2777
+ state.domain.cloudflare.accountId = firstAccountId;
2778
+ const { saveState: save } = await import("./state-2SI3P4JG.js");
2779
+ save(state);
2780
+ }
2781
+ }
2782
+ if (zones.length === 0) {
2783
+ json(res, 200, {
2784
+ success: true,
2785
+ zones: [],
2786
+ warning: "No domains found. Ensure the token has Zone:Read permission and at least one domain is registered in your Cloudflare account."
2787
+ });
2788
+ return;
2789
+ }
2790
+ json(res, 200, { success: true, zones });
2791
+ } catch (err) {
2792
+ json(res, 400, {
2793
+ success: false,
2794
+ error: "TOKEN_INVALID",
2795
+ message: "Stored API token is no longer valid. Please re-enter your token."
2796
+ });
2797
+ }
2798
+ }
2799
+ async function handleCreateTunnel(res, body, state, projectPath) {
2800
+ if (!state) {
2801
+ json(res, 400, { success: false, error: "CREDENTIALS_INCOMPLETE", message: "API token and zone must be configured before creating a tunnel." });
2802
+ return;
2803
+ }
2804
+ let parsed;
2805
+ try {
2806
+ parsed = JSON.parse(body);
2807
+ } catch {
2808
+ json(res, 400, { success: false, error: "INVALID_JSON", message: "Request body must be valid JSON" });
2809
+ return;
2810
+ }
2811
+ const { tunnelName } = parsed;
2812
+ if (!tunnelName || !tunnelName.trim()) {
2813
+ json(res, 400, { success: false, error: "MISSING_TUNNEL_NAME", message: "tunnelName is required" });
2814
+ return;
2815
+ }
2816
+ const cf = state.domain.cloudflare;
2817
+ if (!cf.apiToken || !cf.accountId || !cf.zoneId) {
2818
+ json(res, 400, { success: false, error: "CREDENTIALS_INCOMPLETE", message: "API token and zone must be configured before creating a tunnel." });
2819
+ return;
2820
+ }
2821
+ try {
2822
+ const { createTunnel: cfCreateTunnel } = await import("./cloudflare-client-TFT6VCXF.js");
2823
+ const result = await cfCreateTunnel(cf.apiToken, cf.accountId, tunnelName.trim());
2824
+ const { saveState: save } = await import("./state-2SI3P4JG.js");
2825
+ state.domain.cloudflare.tunnelId = result.tunnelId;
2826
+ state.domain.cloudflare.tunnelToken = result.tunnelToken;
2827
+ state.domain.cloudflare.tunnelName = tunnelName.trim();
2828
+ state.domain.cloudflare.tunnelMode = "named";
2829
+ state.domain.cloudflare.enabled = true;
2830
+ save(state);
2831
+ let composeUpdated = false;
2832
+ let containerRestarted = false;
2833
+ const composePath = join2(projectPath, "docker-compose.yml");
2834
+ const { existsSync: fsExists } = await import("fs");
2835
+ if (fsExists(composePath)) {
2836
+ try {
2837
+ const { patchCloudflaredToNamedTunnel } = await import("./compose-generator-O7GSIJ2S.js");
2838
+ composeUpdated = patchCloudflaredToNamedTunnel(composePath, result.tunnelToken);
2839
+ logger.info("tunnel", `[${tunnelName}] compose patch: composeUpdated=${composeUpdated}`);
2840
+ } catch (e) {
2841
+ logger.warn("tunnel", `[${tunnelName}] compose patch failed: ${e instanceof Error ? e.message : e}`);
2842
+ }
2843
+ if (composeUpdated) {
2844
+ try {
2845
+ const { execa: execaTunnel } = await import("execa");
2846
+ const up = await execaTunnel(
2847
+ "docker",
2848
+ ["compose", "-f", composePath, "up", "-d", "--force-recreate", "cloudflared"],
2849
+ { cwd: projectPath, reject: false }
2850
+ );
2851
+ containerRestarted = up.exitCode === 0;
2852
+ if (!containerRestarted) {
2853
+ logger.warn("tunnel", `[${tunnelName}] cloudflared recreate failed (exit ${up.exitCode}): ${up.stderr}`);
2854
+ } else {
2855
+ logger.info("tunnel", `[${tunnelName}] cloudflared container recreated`);
2856
+ }
2857
+ } catch (e) {
2858
+ logger.warn("tunnel", `[${tunnelName}] cloudflared recreate exception: ${e instanceof Error ? e.message : e}`);
2859
+ }
2860
+ }
2861
+ } else {
2862
+ logger.warn("tunnel", `[${tunnelName}] compose file not found at ${composePath} \u2014 cloudflared must be updated manually`);
2863
+ }
2864
+ json(res, 200, {
2865
+ success: true,
2866
+ tunnelId: result.tunnelId,
2867
+ tunnelName: tunnelName.trim(),
2868
+ composeUpdated,
2869
+ containerRestarted
2870
+ });
2871
+ } catch (err) {
2872
+ const msg = err instanceof Error ? err.message : String(err);
2873
+ if (msg.toLowerCase().includes("already exists")) {
2874
+ json(res, 400, {
2875
+ success: false,
2876
+ error: "TUNNEL_NAME_CONFLICT",
2877
+ message: `A tunnel named "${tunnelName}" already exists in your Cloudflare account. Choose a different name.`
2878
+ });
2879
+ } else {
2880
+ json(res, 400, {
2881
+ success: false,
2882
+ error: "TUNNEL_CREATE_FAILED",
2883
+ message: msg
2884
+ });
2885
+ }
2886
+ }
2887
+ }
2888
+ function handleSettingsCloudflareGet(res, state) {
2889
+ if (!state) {
2890
+ json(res, 200, { configured: false });
2891
+ return;
2892
+ }
2893
+ const cf = state.domain.cloudflare;
2894
+ const mask = (s) => !s ? "not set" : s.length > 6 ? s.slice(0, 3) + "***" + s.slice(-3) : "***set***";
2895
+ json(res, 200, {
2896
+ configured: !!(cf.apiToken && cf.accountId && cf.zoneId),
2897
+ accountId: mask(cf.accountId),
2898
+ zoneId: mask(cf.zoneId),
2899
+ zoneName: cf.zoneName || "",
2900
+ tunnelId: mask(cf.tunnelId),
2901
+ tunnelName: cf.tunnelName || "",
2902
+ apiTokenSet: !!cf.apiToken,
2903
+ apiTokenValid: !!cf.apiToken,
2904
+ // Validated on save, assumed valid if set
2905
+ projectName: state.projectName || ""
2906
+ });
2907
+ }
2908
+ async function handleSettingsCloudflarePut(res, body, state) {
2909
+ if (!state) {
2910
+ json(res, 500, { success: false, error: "No project state" });
2911
+ return;
2912
+ }
2913
+ let parsed;
2914
+ try {
2915
+ parsed = JSON.parse(body);
2916
+ } catch {
2917
+ json(res, 400, { success: false, error: "INVALID_JSON", message: "Request body must be valid JSON" });
2918
+ return;
2919
+ }
2920
+ const { accountId, zoneId, tunnelId } = parsed;
2921
+ const apiToken = parsed.apiToken || state.domain.cloudflare.apiToken;
2922
+ if (!apiToken) {
2923
+ json(res, 400, { success: false, error: "MISSING_TOKEN", message: "apiToken is required" });
2924
+ return;
2925
+ }
2926
+ try {
2927
+ const result = await verifyToken(apiToken);
2928
+ if (!result.valid) {
2929
+ json(res, 400, {
2930
+ success: false,
2931
+ error: "INVALID_TOKEN",
2932
+ message: "API token verification failed. Ensure the token has Tunnel:Edit, DNS:Edit, Zone:Read permissions."
2933
+ });
2934
+ return;
2935
+ }
2936
+ const { saveState: save } = await import("./state-2SI3P4JG.js");
2937
+ state.domain.cloudflare.apiToken = apiToken;
2938
+ let resolvedAccountId = accountId || state.domain.cloudflare.accountId || await (await import("./cloudflare-client-TFT6VCXF.js")).getAccounts(apiToken).then((a) => a[0]?.id ?? "").catch(() => "");
2939
+ if (zoneId) state.domain.cloudflare.zoneId = zoneId;
2940
+ if (tunnelId) state.domain.cloudflare.tunnelId = tunnelId;
2941
+ let zoneName = state.domain.cloudflare.zoneName;
2942
+ if (zoneId) {
2943
+ try {
2944
+ const { getZones } = await import("./cloudflare-client-TFT6VCXF.js");
2945
+ const zones = await getZones(apiToken);
2946
+ const found = zones.find((z) => z.id === zoneId);
2947
+ if (found) {
2948
+ zoneName = found.name;
2949
+ state.domain.cloudflare.zoneName = zoneName;
2950
+ if (!resolvedAccountId && found.accountId) {
2951
+ resolvedAccountId = found.accountId;
2952
+ }
2953
+ }
2954
+ } catch {
2955
+ }
2956
+ }
2957
+ if (resolvedAccountId) state.domain.cloudflare.accountId = resolvedAccountId;
2958
+ save(state);
2959
+ json(res, 200, {
2960
+ success: true,
2961
+ verified: true,
2962
+ email: result.email ?? "",
2963
+ zoneName
2964
+ });
2965
+ } catch (err) {
2966
+ json(res, 500, { success: false, error: String(err) });
2967
+ }
2968
+ }
2969
+
2970
+ export {
2971
+ createAdminServer
2972
+ };
2973
+ //# sourceMappingURL=chunk-SIXBB6JU.js.map