@brewnet/cli 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +184 -0
- package/dist/admin-server-DQVIEHV3.js +14 -0
- package/dist/admin-server-DQVIEHV3.js.map +1 -0
- package/dist/boilerplate-manager-P6QYUU7Q.js +29 -0
- package/dist/boilerplate-manager-P6QYUU7Q.js.map +1 -0
- package/dist/chunk-2VWMDHGI.js +1393 -0
- package/dist/chunk-2VWMDHGI.js.map +1 -0
- package/dist/chunk-4TJMJZMO.js +1173 -0
- package/dist/chunk-4TJMJZMO.js.map +1 -0
- package/dist/chunk-BAVGYMGA.js +114 -0
- package/dist/chunk-BAVGYMGA.js.map +1 -0
- package/dist/chunk-DH2VK3YI.js +293 -0
- package/dist/chunk-DH2VK3YI.js.map +1 -0
- package/dist/chunk-HCHY5UIQ.js +301 -0
- package/dist/chunk-HCHY5UIQ.js.map +1 -0
- package/dist/chunk-JFPHGZ6Z.js +254 -0
- package/dist/chunk-JFPHGZ6Z.js.map +1 -0
- package/dist/chunk-SIXBB6JU.js +2973 -0
- package/dist/chunk-SIXBB6JU.js.map +1 -0
- package/dist/chunk-SYV6PK3R.js +181 -0
- package/dist/chunk-SYV6PK3R.js.map +1 -0
- package/dist/chunk-ZKMWE5AH.js +444 -0
- package/dist/chunk-ZKMWE5AH.js.map +1 -0
- package/dist/cloudflare-client-TFT6VCXF.js +32 -0
- package/dist/cloudflare-client-TFT6VCXF.js.map +1 -0
- package/dist/compose-generator-O7GSIJ2S.js +19 -0
- package/dist/compose-generator-O7GSIJ2S.js.map +1 -0
- package/dist/frameworks-Z7VXDGP4.js +18 -0
- package/dist/frameworks-Z7VXDGP4.js.map +1 -0
- package/dist/index.d.ts +22 -0
- package/dist/index.js +7897 -0
- package/dist/index.js.map +1 -0
- package/dist/services/admin-daemon.d.ts +2 -0
- package/dist/services/admin-daemon.js +33 -0
- package/dist/services/admin-daemon.js.map +1 -0
- package/dist/stacks-M4FBTVO5.js +16 -0
- package/dist/stacks-M4FBTVO5.js.map +1 -0
- package/dist/state-2SI3P4JG.js +27 -0
- package/dist/state-2SI3P4JG.js.map +1 -0
- package/package.json +44 -0
|
@@ -0,0 +1,1393 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import {
|
|
3
|
+
configureTunnelIngress,
|
|
4
|
+
createDnsRecord,
|
|
5
|
+
deleteDnsRecord,
|
|
6
|
+
getActiveServiceRoutes,
|
|
7
|
+
getDnsRecords,
|
|
8
|
+
getTunnelHealth
|
|
9
|
+
} from "./chunk-JFPHGZ6Z.js";
|
|
10
|
+
import {
|
|
11
|
+
addExternalLabels,
|
|
12
|
+
getServiceDefinition,
|
|
13
|
+
removeExternalLabels
|
|
14
|
+
} from "./chunk-4TJMJZMO.js";
|
|
15
|
+
import {
|
|
16
|
+
loadState,
|
|
17
|
+
runRotation,
|
|
18
|
+
saveState
|
|
19
|
+
} from "./chunk-ZKMWE5AH.js";
|
|
20
|
+
import {
|
|
21
|
+
DOCKER_COMPOSE_FILENAME,
|
|
22
|
+
LOG_QUERY_DEFAULT_LIMIT,
|
|
23
|
+
LOG_QUERY_MAX_LIMIT
|
|
24
|
+
} from "./chunk-HCHY5UIQ.js";
|
|
25
|
+
import {
|
|
26
|
+
getStackById
|
|
27
|
+
} from "./chunk-SYV6PK3R.js";
|
|
28
|
+
|
|
29
|
+
// src/services/service-manager.ts
|
|
30
|
+
import { readFileSync, writeFileSync, existsSync, copyFileSync, readdirSync } from "fs";
|
|
31
|
+
import { join, basename } from "path";
|
|
32
|
+
import yaml from "js-yaml";
|
|
33
|
+
var BREWNET_PREFIX = "brewnet";
|
|
34
|
+
function readComposeFile(composePath) {
|
|
35
|
+
const content = readFileSync(composePath, "utf-8");
|
|
36
|
+
return yaml.load(content);
|
|
37
|
+
}
|
|
38
|
+
function writeComposeFile(composePath, compose) {
|
|
39
|
+
const yamlContent = yaml.dump(compose, {
|
|
40
|
+
indent: 2,
|
|
41
|
+
lineWidth: 120,
|
|
42
|
+
noRefs: true,
|
|
43
|
+
sortKeys: false,
|
|
44
|
+
quotingType: '"',
|
|
45
|
+
forceQuotes: false
|
|
46
|
+
});
|
|
47
|
+
writeFileSync(composePath, yamlContent, "utf-8");
|
|
48
|
+
}
|
|
49
|
+
function backupComposeFile(composePath) {
|
|
50
|
+
const timestamp = Date.now();
|
|
51
|
+
let backupPath = `${composePath}.bak.${timestamp}`;
|
|
52
|
+
const dir = join(composePath, "..");
|
|
53
|
+
const base = basename(composePath);
|
|
54
|
+
const existing = readdirSync(dir).filter((f) => f.startsWith(`${base}.bak.`));
|
|
55
|
+
let suffix = 0;
|
|
56
|
+
while (existing.includes(basename(backupPath))) {
|
|
57
|
+
suffix++;
|
|
58
|
+
backupPath = `${composePath}.bak.${timestamp}.${suffix}`;
|
|
59
|
+
}
|
|
60
|
+
copyFileSync(composePath, backupPath);
|
|
61
|
+
return backupPath;
|
|
62
|
+
}
|
|
63
|
+
function getServiceVolumes(serviceId) {
|
|
64
|
+
switch (serviceId) {
|
|
65
|
+
case "traefik":
|
|
66
|
+
return [
|
|
67
|
+
"/var/run/docker.sock:/var/run/docker.sock:ro",
|
|
68
|
+
`${BREWNET_PREFIX}_traefik_certs:/letsencrypt`
|
|
69
|
+
];
|
|
70
|
+
case "gitea":
|
|
71
|
+
return [`${BREWNET_PREFIX}_gitea_data:/data`];
|
|
72
|
+
case "postgresql":
|
|
73
|
+
return [`${BREWNET_PREFIX}_postgres_data:/var/lib/postgresql/data`];
|
|
74
|
+
case "mysql":
|
|
75
|
+
return [`${BREWNET_PREFIX}_mysql_data:/var/lib/mysql`];
|
|
76
|
+
case "redis":
|
|
77
|
+
return [`${BREWNET_PREFIX}_redis_data:/data`];
|
|
78
|
+
case "valkey":
|
|
79
|
+
return [`${BREWNET_PREFIX}_valkey_data:/data`];
|
|
80
|
+
case "keydb":
|
|
81
|
+
return [`${BREWNET_PREFIX}_keydb_data:/data`];
|
|
82
|
+
case "nextcloud":
|
|
83
|
+
return [`${BREWNET_PREFIX}_nextcloud_data:/var/www/html`];
|
|
84
|
+
case "minio":
|
|
85
|
+
return [`${BREWNET_PREFIX}_minio_data:/data`];
|
|
86
|
+
case "jellyfin":
|
|
87
|
+
return [
|
|
88
|
+
`${BREWNET_PREFIX}_jellyfin_config:/config`,
|
|
89
|
+
`${BREWNET_PREFIX}_jellyfin_media:/media`
|
|
90
|
+
];
|
|
91
|
+
case "openssh-server":
|
|
92
|
+
return [`${BREWNET_PREFIX}_ssh_config:/config`];
|
|
93
|
+
case "docker-mailserver":
|
|
94
|
+
return [
|
|
95
|
+
`${BREWNET_PREFIX}_mail_data:/var/mail`,
|
|
96
|
+
`${BREWNET_PREFIX}_mail_state:/var/mail-state`,
|
|
97
|
+
`${BREWNET_PREFIX}_mail_config:/tmp/docker-mailserver`
|
|
98
|
+
];
|
|
99
|
+
case "pgadmin":
|
|
100
|
+
return [`${BREWNET_PREFIX}_pgadmin_data:/var/lib/pgadmin`];
|
|
101
|
+
case "filebrowser":
|
|
102
|
+
return [
|
|
103
|
+
`${BREWNET_PREFIX}_filebrowser_data:/srv`,
|
|
104
|
+
`${BREWNET_PREFIX}_filebrowser_db:/database`
|
|
105
|
+
];
|
|
106
|
+
case "cloudflared":
|
|
107
|
+
return [];
|
|
108
|
+
default:
|
|
109
|
+
return [];
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
function buildServiceBlock(def) {
|
|
113
|
+
const svc = {
|
|
114
|
+
image: def.image,
|
|
115
|
+
container_name: `${BREWNET_PREFIX}-${def.id}`,
|
|
116
|
+
restart: "unless-stopped",
|
|
117
|
+
security_opt: ["no-new-privileges:true"],
|
|
118
|
+
networks: [...def.networks]
|
|
119
|
+
};
|
|
120
|
+
const volumes = getServiceVolumes(def.id);
|
|
121
|
+
if (volumes.length > 0) {
|
|
122
|
+
svc.volumes = volumes;
|
|
123
|
+
}
|
|
124
|
+
if (def.subdomain && def.traefikLabels) {
|
|
125
|
+
svc.labels = { ...def.traefikLabels };
|
|
126
|
+
}
|
|
127
|
+
return svc;
|
|
128
|
+
}
|
|
129
|
+
function extractNamedVolumes(volumeMounts) {
|
|
130
|
+
const names = [];
|
|
131
|
+
for (const vol of volumeMounts) {
|
|
132
|
+
const name = vol.split(":")[0];
|
|
133
|
+
if (name && !name.startsWith("/") && !name.startsWith(".")) {
|
|
134
|
+
names.push(name);
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
return names;
|
|
138
|
+
}
|
|
139
|
+
async function addService(serviceId, projectPath) {
|
|
140
|
+
const def = getServiceDefinition(serviceId);
|
|
141
|
+
if (!def) {
|
|
142
|
+
return { success: false, error: `Unknown service: ${serviceId}` };
|
|
143
|
+
}
|
|
144
|
+
const composePath = join(projectPath, DOCKER_COMPOSE_FILENAME);
|
|
145
|
+
if (!existsSync(composePath)) {
|
|
146
|
+
return {
|
|
147
|
+
success: false,
|
|
148
|
+
error: `docker-compose.yml not found at ${composePath}`
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
const compose = readComposeFile(composePath);
|
|
152
|
+
if (compose.services && compose.services[serviceId]) {
|
|
153
|
+
return {
|
|
154
|
+
success: false,
|
|
155
|
+
error: `Service "${serviceId}" already exists in compose`
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
const backupPath = backupComposeFile(composePath);
|
|
159
|
+
const serviceBlock = buildServiceBlock(def);
|
|
160
|
+
if (!compose.services) {
|
|
161
|
+
compose.services = {};
|
|
162
|
+
}
|
|
163
|
+
compose.services[serviceId] = serviceBlock;
|
|
164
|
+
const volumes = getServiceVolumes(serviceId);
|
|
165
|
+
const namedVolumes = extractNamedVolumes(volumes);
|
|
166
|
+
if (namedVolumes.length > 0) {
|
|
167
|
+
if (!compose.volumes) {
|
|
168
|
+
compose.volumes = {};
|
|
169
|
+
}
|
|
170
|
+
for (const vol of namedVolumes) {
|
|
171
|
+
if (!(vol in compose.volumes)) {
|
|
172
|
+
compose.volumes[vol] = null;
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
writeComposeFile(composePath, compose);
|
|
177
|
+
return { success: true, composePath, backupPath };
|
|
178
|
+
}
|
|
179
|
+
async function removeService(serviceId, projectPath, options) {
|
|
180
|
+
const composePath = join(projectPath, DOCKER_COMPOSE_FILENAME);
|
|
181
|
+
if (!existsSync(composePath)) {
|
|
182
|
+
return {
|
|
183
|
+
success: false,
|
|
184
|
+
error: `docker-compose.yml not found at ${composePath}`
|
|
185
|
+
};
|
|
186
|
+
}
|
|
187
|
+
const compose = readComposeFile(composePath);
|
|
188
|
+
if (!compose.services || !compose.services[serviceId]) {
|
|
189
|
+
return {
|
|
190
|
+
success: false,
|
|
191
|
+
error: `Service "${serviceId}" not found in compose`
|
|
192
|
+
};
|
|
193
|
+
}
|
|
194
|
+
const backupPath = backupComposeFile(composePath);
|
|
195
|
+
const serviceEntry = compose.services[serviceId];
|
|
196
|
+
const serviceVolumeMounts = serviceEntry.volumes || [];
|
|
197
|
+
const namedVolumes = extractNamedVolumes(serviceVolumeMounts);
|
|
198
|
+
delete compose.services[serviceId];
|
|
199
|
+
if (options?.purge && compose.volumes && namedVolumes.length > 0) {
|
|
200
|
+
for (const vol of namedVolumes) {
|
|
201
|
+
if (vol in compose.volumes) {
|
|
202
|
+
delete compose.volumes[vol];
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
writeComposeFile(composePath, compose);
|
|
207
|
+
return { success: true, composePath, backupPath };
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// src/utils/log-aggregator.ts
|
|
211
|
+
import { readFileSync as readFileSync2, readdirSync as readdirSync2, existsSync as existsSync2 } from "fs";
|
|
212
|
+
import { join as join2, basename as basename2 } from "path";
|
|
213
|
+
import { homedir } from "os";
|
|
214
|
+
function parseDuration(input) {
|
|
215
|
+
const match = input.match(/^(\d+)([hmd])$/);
|
|
216
|
+
if (match) {
|
|
217
|
+
const value = parseInt(match[1], 10);
|
|
218
|
+
const unit = match[2];
|
|
219
|
+
const now = Date.now();
|
|
220
|
+
let ms = 0;
|
|
221
|
+
switch (unit) {
|
|
222
|
+
case "h":
|
|
223
|
+
ms = value * 60 * 60 * 1e3;
|
|
224
|
+
break;
|
|
225
|
+
case "m":
|
|
226
|
+
ms = value * 60 * 1e3;
|
|
227
|
+
break;
|
|
228
|
+
case "d":
|
|
229
|
+
ms = value * 24 * 60 * 60 * 1e3;
|
|
230
|
+
break;
|
|
231
|
+
}
|
|
232
|
+
return new Date(now - ms).toISOString();
|
|
233
|
+
}
|
|
234
|
+
const date = new Date(input);
|
|
235
|
+
if (!isNaN(date.getTime())) {
|
|
236
|
+
return date.toISOString();
|
|
237
|
+
}
|
|
238
|
+
throw new Error(
|
|
239
|
+
`Invalid time format: '${input}'. Use: 1h, 30m, 1d, or ISO date (2026-03-15)`
|
|
240
|
+
);
|
|
241
|
+
}
|
|
242
|
+
function readCliLogs(logsDir, since) {
|
|
243
|
+
if (!existsSync2(logsDir)) return [];
|
|
244
|
+
const files = readdirSync2(logsDir).filter((f) => /^brewnet-\d{4}-\d{2}-\d{2}\.log$/.test(f)).sort();
|
|
245
|
+
const sinceDate = since ? since.slice(0, 10) : void 0;
|
|
246
|
+
const entries = [];
|
|
247
|
+
for (const file of files) {
|
|
248
|
+
if (sinceDate) {
|
|
249
|
+
const fileDate = file.replace("brewnet-", "").replace(".log", "");
|
|
250
|
+
if (fileDate < sinceDate) continue;
|
|
251
|
+
}
|
|
252
|
+
const content = readFileSync2(join2(logsDir, file), "utf-8");
|
|
253
|
+
for (const line of content.split("\n")) {
|
|
254
|
+
if (!line.trim()) continue;
|
|
255
|
+
try {
|
|
256
|
+
const parsed = JSON.parse(line);
|
|
257
|
+
if (since && parsed.timestamp < since) continue;
|
|
258
|
+
entries.push({
|
|
259
|
+
timestamp: parsed.timestamp,
|
|
260
|
+
source: "cli",
|
|
261
|
+
level: parsed.level,
|
|
262
|
+
message: parsed.message,
|
|
263
|
+
metadata: { command: parsed.command, ...parsed.metadata }
|
|
264
|
+
});
|
|
265
|
+
} catch {
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
return entries;
|
|
270
|
+
}
|
|
271
|
+
function readTunnelLogs(logsDir, since) {
|
|
272
|
+
const logFile = join2(logsDir, "tunnel.log");
|
|
273
|
+
if (!existsSync2(logFile)) return [];
|
|
274
|
+
const content = readFileSync2(logFile, "utf-8");
|
|
275
|
+
const entries = [];
|
|
276
|
+
for (const line of content.split("\n")) {
|
|
277
|
+
if (!line.trim()) continue;
|
|
278
|
+
try {
|
|
279
|
+
const parsed = JSON.parse(line);
|
|
280
|
+
if (since && parsed.timestamp < since) continue;
|
|
281
|
+
const metadata = {
|
|
282
|
+
event: parsed.event,
|
|
283
|
+
tunnelMode: parsed.tunnelMode
|
|
284
|
+
};
|
|
285
|
+
if (parsed.tunnelId) metadata.tunnelId = parsed.tunnelId;
|
|
286
|
+
if (parsed.tunnelName) metadata.tunnelName = parsed.tunnelName;
|
|
287
|
+
entries.push({
|
|
288
|
+
timestamp: parsed.timestamp,
|
|
289
|
+
source: "tunnel",
|
|
290
|
+
level: parsed.error ? "error" : "info",
|
|
291
|
+
service: parsed.domain,
|
|
292
|
+
message: parsed.detail,
|
|
293
|
+
metadata
|
|
294
|
+
});
|
|
295
|
+
} catch {
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
return entries;
|
|
299
|
+
}
|
|
300
|
+
function readAccessLogs(projectPath, since) {
|
|
301
|
+
const logFile = join2(projectPath, "logs", "access.log");
|
|
302
|
+
if (!existsSync2(logFile)) return [];
|
|
303
|
+
const content = readFileSync2(logFile, "utf-8");
|
|
304
|
+
const entries = [];
|
|
305
|
+
for (const line of content.split("\n")) {
|
|
306
|
+
if (!line.trim()) continue;
|
|
307
|
+
try {
|
|
308
|
+
const parsed = JSON.parse(line);
|
|
309
|
+
if (since && parsed.StartUTC < since) continue;
|
|
310
|
+
let level = "info";
|
|
311
|
+
if (parsed.OriginStatus >= 500) level = "error";
|
|
312
|
+
else if (parsed.OriginStatus >= 400) level = "warn";
|
|
313
|
+
const metadata = {};
|
|
314
|
+
if (parsed.RouterName) metadata.routerName = parsed.RouterName;
|
|
315
|
+
if (parsed.ClientAddr) metadata.clientAddr = parsed.ClientAddr;
|
|
316
|
+
if (parsed.Duration !== void 0) metadata.duration = parsed.Duration;
|
|
317
|
+
if (parsed.RequestHost) metadata.requestHost = parsed.RequestHost;
|
|
318
|
+
const userAgent = parsed["request_User-Agent"] ?? parsed.request_User_Agent;
|
|
319
|
+
if (userAgent) metadata.userAgent = userAgent;
|
|
320
|
+
const serviceName = parsed.ServiceName?.split("@")[0];
|
|
321
|
+
entries.push({
|
|
322
|
+
timestamp: parsed.StartUTC,
|
|
323
|
+
source: "access",
|
|
324
|
+
level,
|
|
325
|
+
service: serviceName,
|
|
326
|
+
message: `${parsed.RequestMethod} ${parsed.RequestPath} \u2192 ${parsed.OriginStatus}`,
|
|
327
|
+
metadata
|
|
328
|
+
});
|
|
329
|
+
} catch {
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
return entries;
|
|
333
|
+
}
|
|
334
|
+
async function readServiceLogs(projectPath, opts) {
|
|
335
|
+
let Dockerode;
|
|
336
|
+
try {
|
|
337
|
+
Dockerode = (await import("dockerode")).default;
|
|
338
|
+
} catch {
|
|
339
|
+
return [];
|
|
340
|
+
}
|
|
341
|
+
const docker = new Dockerode();
|
|
342
|
+
const entries = [];
|
|
343
|
+
try {
|
|
344
|
+
const containers = await docker.listContainers({ all: false });
|
|
345
|
+
const projectName = basename2(projectPath).toLowerCase().replace(/[^a-z0-9]/g, "");
|
|
346
|
+
for (const containerInfo of containers) {
|
|
347
|
+
const containerName = containerInfo.Names?.[0]?.replace(/^\//, "") ?? containerInfo.Id.slice(0, 12);
|
|
348
|
+
if (!containerName.toLowerCase().startsWith(projectName)) continue;
|
|
349
|
+
const container = docker.getContainer(containerInfo.Id);
|
|
350
|
+
const logOpts = {
|
|
351
|
+
stdout: true,
|
|
352
|
+
stderr: true,
|
|
353
|
+
timestamps: true
|
|
354
|
+
};
|
|
355
|
+
if (opts?.tail) logOpts.tail = opts.tail;
|
|
356
|
+
if (opts?.since) {
|
|
357
|
+
logOpts.since = Math.floor(new Date(opts.since).getTime() / 1e3);
|
|
358
|
+
}
|
|
359
|
+
try {
|
|
360
|
+
const logBuffer = await container.logs(logOpts);
|
|
361
|
+
const frames = [];
|
|
362
|
+
if (Buffer.isBuffer(logBuffer)) {
|
|
363
|
+
let pos = 0;
|
|
364
|
+
while (pos + 8 <= logBuffer.length) {
|
|
365
|
+
const streamType = logBuffer[pos];
|
|
366
|
+
const payloadSize = logBuffer.readUInt32BE(pos + 4);
|
|
367
|
+
pos += 8;
|
|
368
|
+
if (pos + payloadSize > logBuffer.length) break;
|
|
369
|
+
const payload = logBuffer.subarray(pos, pos + payloadSize).toString("utf-8");
|
|
370
|
+
frames.push({ stream: streamType, text: payload });
|
|
371
|
+
pos += payloadSize;
|
|
372
|
+
}
|
|
373
|
+
} else {
|
|
374
|
+
frames.push({ stream: 1, text: String(logBuffer) });
|
|
375
|
+
}
|
|
376
|
+
for (const frame of frames) {
|
|
377
|
+
const level = frame.stream === 2 ? "error" : "info";
|
|
378
|
+
for (const line of frame.text.split("\n")) {
|
|
379
|
+
if (!line.trim()) continue;
|
|
380
|
+
const tsMatch = line.match(
|
|
381
|
+
/(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:\.\d+)?Z?)\s+(.*)/
|
|
382
|
+
);
|
|
383
|
+
if (!tsMatch) continue;
|
|
384
|
+
const timestamp = tsMatch[1].endsWith("Z") ? tsMatch[1] : tsMatch[1] + "Z";
|
|
385
|
+
const message = tsMatch[2];
|
|
386
|
+
if (opts?.since && timestamp < opts.since) continue;
|
|
387
|
+
const serviceName = containerName.replace(new RegExp(`^${projectName}[-_]`), "").replace(/-\d+$/, "");
|
|
388
|
+
entries.push({
|
|
389
|
+
timestamp,
|
|
390
|
+
source: "service",
|
|
391
|
+
level,
|
|
392
|
+
service: serviceName,
|
|
393
|
+
message,
|
|
394
|
+
metadata: { containerId: containerInfo.Id.slice(0, 12) }
|
|
395
|
+
});
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
} catch {
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
} catch {
|
|
402
|
+
}
|
|
403
|
+
return entries;
|
|
404
|
+
}
|
|
405
|
+
async function queryLogs(query, projectPath) {
|
|
406
|
+
const logsDir = join2(homedir(), ".brewnet", "logs");
|
|
407
|
+
const since = query.since;
|
|
408
|
+
const sourcesToRead = query.sources ?? ["cli", "tunnel", "access", "service"];
|
|
409
|
+
const readers = [];
|
|
410
|
+
if (sourcesToRead.includes("cli")) {
|
|
411
|
+
readers.push(Promise.resolve(readCliLogs(logsDir, since)));
|
|
412
|
+
}
|
|
413
|
+
if (sourcesToRead.includes("tunnel")) {
|
|
414
|
+
readers.push(Promise.resolve(readTunnelLogs(logsDir, since)));
|
|
415
|
+
}
|
|
416
|
+
if (sourcesToRead.includes("access")) {
|
|
417
|
+
readers.push(Promise.resolve(readAccessLogs(projectPath, since)));
|
|
418
|
+
}
|
|
419
|
+
if (sourcesToRead.includes("service")) {
|
|
420
|
+
readers.push(readServiceLogs(projectPath, { since }));
|
|
421
|
+
}
|
|
422
|
+
const results = await Promise.all(readers);
|
|
423
|
+
let entries = results.flat();
|
|
424
|
+
try {
|
|
425
|
+
runRotation(logsDir, projectPath);
|
|
426
|
+
} catch {
|
|
427
|
+
}
|
|
428
|
+
if (query.levels?.length) {
|
|
429
|
+
entries = entries.filter((e) => query.levels.includes(e.level));
|
|
430
|
+
}
|
|
431
|
+
if (query.services?.length) {
|
|
432
|
+
entries = entries.filter((e) => e.service && query.services.includes(e.service));
|
|
433
|
+
}
|
|
434
|
+
if (query.until) {
|
|
435
|
+
entries = entries.filter((e) => e.timestamp <= query.until);
|
|
436
|
+
}
|
|
437
|
+
if (query.search) {
|
|
438
|
+
const searchLower = query.search.toLowerCase();
|
|
439
|
+
entries = entries.filter((e) => e.message.toLowerCase().includes(searchLower));
|
|
440
|
+
}
|
|
441
|
+
entries.sort((a, b) => b.timestamp.localeCompare(a.timestamp));
|
|
442
|
+
const total = entries.length;
|
|
443
|
+
const limit = Math.min(query.limit ?? LOG_QUERY_DEFAULT_LIMIT, LOG_QUERY_MAX_LIMIT);
|
|
444
|
+
const offset = query.offset ?? 0;
|
|
445
|
+
const paged = entries.slice(offset, offset + limit);
|
|
446
|
+
return {
|
|
447
|
+
entries: paged,
|
|
448
|
+
total,
|
|
449
|
+
hasMore: offset + limit < total
|
|
450
|
+
};
|
|
451
|
+
}
|
|
452
|
+
async function getLogStats(projectPath) {
|
|
453
|
+
const logsDir = join2(homedir(), ".brewnet", "logs");
|
|
454
|
+
const [cliEntries, tunnelEntries, accessEntries, serviceEntries] = await Promise.all([
|
|
455
|
+
Promise.resolve(readCliLogs(logsDir)),
|
|
456
|
+
Promise.resolve(readTunnelLogs(logsDir)),
|
|
457
|
+
Promise.resolve(readAccessLogs(projectPath)),
|
|
458
|
+
readServiceLogs(projectPath, {})
|
|
459
|
+
]);
|
|
460
|
+
const allEntries = [...cliEntries, ...tunnelEntries, ...accessEntries, ...serviceEntries];
|
|
461
|
+
const bySource = { cli: 0, tunnel: 0, access: 0, service: 0 };
|
|
462
|
+
const byLevel = { info: 0, warn: 0, error: 0, debug: 0 };
|
|
463
|
+
for (const entry of allEntries) {
|
|
464
|
+
bySource[entry.source]++;
|
|
465
|
+
byLevel[entry.level] = (byLevel[entry.level] ?? 0) + 1;
|
|
466
|
+
}
|
|
467
|
+
allEntries.sort((a, b) => b.timestamp.localeCompare(a.timestamp));
|
|
468
|
+
const recentErrors = allEntries.filter((e) => e.level === "error").slice(0, 10);
|
|
469
|
+
return {
|
|
470
|
+
total: allEntries.length,
|
|
471
|
+
bySource,
|
|
472
|
+
byLevel,
|
|
473
|
+
recentErrors,
|
|
474
|
+
lastUpdated: (/* @__PURE__ */ new Date()).toISOString()
|
|
475
|
+
};
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
// src/services/backup-manager.ts
|
|
479
|
+
import {
|
|
480
|
+
existsSync as existsSync3,
|
|
481
|
+
mkdirSync,
|
|
482
|
+
readdirSync as readdirSync3,
|
|
483
|
+
statSync
|
|
484
|
+
} from "fs";
|
|
485
|
+
import { join as join3 } from "path";
|
|
486
|
+
import { execSync } from "child_process";
|
|
487
|
+
import crypto from "crypto";
|
|
488
|
+
|
|
489
|
+
// src/utils/errors.ts
|
|
490
|
+
var BrewnetError = class _BrewnetError extends Error {
|
|
491
|
+
code;
|
|
492
|
+
httpStatus;
|
|
493
|
+
remediation;
|
|
494
|
+
constructor(code, message, httpStatus, remediation) {
|
|
495
|
+
super(message);
|
|
496
|
+
this.name = "BrewnetError";
|
|
497
|
+
this.code = code;
|
|
498
|
+
this.httpStatus = httpStatus;
|
|
499
|
+
this.remediation = remediation;
|
|
500
|
+
Object.setPrototypeOf(this, new.target.prototype);
|
|
501
|
+
}
|
|
502
|
+
/**
|
|
503
|
+
* Format the error for terminal display.
|
|
504
|
+
*
|
|
505
|
+
* Example output:
|
|
506
|
+
*
|
|
507
|
+
* Error [BN001]: Docker daemon is not running
|
|
508
|
+
*
|
|
509
|
+
* Docker is required to manage services. Please start Docker and try again.
|
|
510
|
+
*
|
|
511
|
+
* Fix:
|
|
512
|
+
* macOS: Open Docker Desktop
|
|
513
|
+
* Linux: sudo systemctl start docker
|
|
514
|
+
*/
|
|
515
|
+
format() {
|
|
516
|
+
const lines = [
|
|
517
|
+
`Error [${this.code}]: ${this.message}`,
|
|
518
|
+
"",
|
|
519
|
+
` ${this.remediation}`
|
|
520
|
+
];
|
|
521
|
+
return lines.join("\n");
|
|
522
|
+
}
|
|
523
|
+
/**
|
|
524
|
+
* Serialize the error for structured logging (JSONL).
|
|
525
|
+
*/
|
|
526
|
+
toJSON() {
|
|
527
|
+
return {
|
|
528
|
+
code: this.code,
|
|
529
|
+
message: this.message,
|
|
530
|
+
httpStatus: this.httpStatus,
|
|
531
|
+
remediation: this.remediation
|
|
532
|
+
};
|
|
533
|
+
}
|
|
534
|
+
// ---------------------------------------------------------------------------
|
|
535
|
+
// Factory methods for every Brewnet error code
|
|
536
|
+
// ---------------------------------------------------------------------------
|
|
537
|
+
/**
|
|
538
|
+
* BN001 — Docker daemon is not running (503 Service Unavailable).
|
|
539
|
+
*/
|
|
540
|
+
static dockerNotRunning() {
|
|
541
|
+
return new _BrewnetError(
|
|
542
|
+
"BN001",
|
|
543
|
+
"Docker daemon is not running",
|
|
544
|
+
503,
|
|
545
|
+
[
|
|
546
|
+
"Docker is required to manage services. Please start Docker and try again.",
|
|
547
|
+
"",
|
|
548
|
+
" Fix:",
|
|
549
|
+
" macOS: Open Docker Desktop",
|
|
550
|
+
" Linux: sudo systemctl start docker"
|
|
551
|
+
].join("\n")
|
|
552
|
+
);
|
|
553
|
+
}
|
|
554
|
+
/**
|
|
555
|
+
* BN002 — Project directory already exists (409 Conflict).
|
|
556
|
+
*/
|
|
557
|
+
static directoryConflict(name) {
|
|
558
|
+
return new _BrewnetError(
|
|
559
|
+
"BN002",
|
|
560
|
+
`Directory "${name}" already exists`,
|
|
561
|
+
409,
|
|
562
|
+
[
|
|
563
|
+
"The project directory already exists. Choose a different project name or remove it first.",
|
|
564
|
+
"",
|
|
565
|
+
" Fix:",
|
|
566
|
+
` rm -rf ${name} # remove existing directory`,
|
|
567
|
+
` brewnet create-app ${name}-v2 # use a different name`
|
|
568
|
+
].join("\n")
|
|
569
|
+
);
|
|
570
|
+
}
|
|
571
|
+
/**
|
|
572
|
+
* BN002 — Port already in use (409 Conflict).
|
|
573
|
+
*/
|
|
574
|
+
static portConflict(port, processInfo) {
|
|
575
|
+
const detail = processInfo ? ` (in use by ${processInfo})` : "";
|
|
576
|
+
return new _BrewnetError(
|
|
577
|
+
"BN002",
|
|
578
|
+
`Port ${port} is already in use${detail}`,
|
|
579
|
+
409,
|
|
580
|
+
[
|
|
581
|
+
`Port ${port} is required by one of your selected services.`,
|
|
582
|
+
"",
|
|
583
|
+
" Fix:",
|
|
584
|
+
` 1. Find the process: lsof -i :${port}`,
|
|
585
|
+
` 2. Stop it: kill <PID>`,
|
|
586
|
+
" 3. Or choose a different port in your configuration."
|
|
587
|
+
].join("\n")
|
|
588
|
+
);
|
|
589
|
+
}
|
|
590
|
+
/**
|
|
591
|
+
* BN003 — SSL certificate issuance failed (500 Internal Server Error).
|
|
592
|
+
*/
|
|
593
|
+
static sslFailed(domain) {
|
|
594
|
+
return new _BrewnetError(
|
|
595
|
+
"BN003",
|
|
596
|
+
`SSL certificate issuance failed for ${domain}`,
|
|
597
|
+
500,
|
|
598
|
+
[
|
|
599
|
+
"Let's Encrypt could not issue a certificate. Common causes:",
|
|
600
|
+
"",
|
|
601
|
+
" - DNS records not yet propagated (wait a few minutes and retry)",
|
|
602
|
+
" - Domain does not resolve to this server's public IP",
|
|
603
|
+
" - Rate limit reached (max 5 duplicates per week)",
|
|
604
|
+
"",
|
|
605
|
+
" Fix:",
|
|
606
|
+
` 1. Verify DNS: dig +short ${domain}`,
|
|
607
|
+
" 2. Retry: brewnet domain ssl " + domain,
|
|
608
|
+
" 3. Use Cloudflare Tunnel for automatic SSL instead."
|
|
609
|
+
].join("\n")
|
|
610
|
+
);
|
|
611
|
+
}
|
|
612
|
+
/**
|
|
613
|
+
* BN004 — Invalid license key (401 Unauthorized).
|
|
614
|
+
*/
|
|
615
|
+
static invalidLicense() {
|
|
616
|
+
return new _BrewnetError(
|
|
617
|
+
"BN004",
|
|
618
|
+
"Invalid or expired license key",
|
|
619
|
+
401,
|
|
620
|
+
[
|
|
621
|
+
"Your Brewnet Pro/Team license key is invalid or has expired.",
|
|
622
|
+
"",
|
|
623
|
+
" Fix:",
|
|
624
|
+
" 1. Check your key at https://brewnet.dev/account",
|
|
625
|
+
" 2. Update it: brewnet config set license <KEY>",
|
|
626
|
+
" 3. Contact support if the issue persists."
|
|
627
|
+
].join("\n")
|
|
628
|
+
);
|
|
629
|
+
}
|
|
630
|
+
/**
|
|
631
|
+
* BN005 — Rate limit exceeded (429 Too Many Requests).
|
|
632
|
+
*/
|
|
633
|
+
static rateLimited() {
|
|
634
|
+
return new _BrewnetError(
|
|
635
|
+
"BN005",
|
|
636
|
+
"Rate limit exceeded",
|
|
637
|
+
429,
|
|
638
|
+
[
|
|
639
|
+
"Too many requests in a short period. Please wait and try again.",
|
|
640
|
+
"",
|
|
641
|
+
" Fix:",
|
|
642
|
+
" Wait a few minutes before retrying.",
|
|
643
|
+
" If using CI, consider adding a delay between requests."
|
|
644
|
+
].join("\n")
|
|
645
|
+
);
|
|
646
|
+
}
|
|
647
|
+
/**
|
|
648
|
+
* BN006 — Boilerplate clone failed (500 Internal Server Error).
|
|
649
|
+
*/
|
|
650
|
+
static cloneFailed(stackId) {
|
|
651
|
+
return new _BrewnetError(
|
|
652
|
+
"BN006",
|
|
653
|
+
`Failed to clone boilerplate stack "${stackId}"`,
|
|
654
|
+
500,
|
|
655
|
+
[
|
|
656
|
+
"Could not download the boilerplate from GitHub. Common causes:",
|
|
657
|
+
"",
|
|
658
|
+
" - No internet connection",
|
|
659
|
+
" - GitHub is temporarily unavailable",
|
|
660
|
+
" - The stack branch does not exist on the remote",
|
|
661
|
+
"",
|
|
662
|
+
" Fix:",
|
|
663
|
+
" 1. Check your internet connection",
|
|
664
|
+
" 2. Verify connectivity: curl -I https://github.com",
|
|
665
|
+
" 3. Retry: brewnet create-app <name> --stack " + stackId
|
|
666
|
+
].join("\n")
|
|
667
|
+
);
|
|
668
|
+
}
|
|
669
|
+
/**
|
|
670
|
+
* BN006 — Health check timed out after scaffolding (500 Internal Server Error).
|
|
671
|
+
*/
|
|
672
|
+
static healthCheckTimeout(timeoutSec) {
|
|
673
|
+
return new _BrewnetError(
|
|
674
|
+
"BN006",
|
|
675
|
+
`Application health check timed out after ${timeoutSec}s`,
|
|
676
|
+
500,
|
|
677
|
+
[
|
|
678
|
+
"The application container started but did not respond to health checks in time.",
|
|
679
|
+
"",
|
|
680
|
+
" Containers are still running. To diagnose:",
|
|
681
|
+
" docker compose logs backend # check for startup errors",
|
|
682
|
+
" docker compose logs # check all services",
|
|
683
|
+
"",
|
|
684
|
+
" Fix:",
|
|
685
|
+
" 1. Check logs for errors (missing env vars, port conflicts, build errors)",
|
|
686
|
+
' 2. Run "make down" to stop containers',
|
|
687
|
+
" 3. Retry: brewnet create-app <name> --stack <STACK_ID>"
|
|
688
|
+
].join("\n")
|
|
689
|
+
);
|
|
690
|
+
}
|
|
691
|
+
/**
|
|
692
|
+
* BN006 — Build failed (500 Internal Server Error).
|
|
693
|
+
*/
|
|
694
|
+
static buildFailed(logs) {
|
|
695
|
+
const logSnippet = logs ? `
|
|
696
|
+
|
|
697
|
+
Build output (last lines):
|
|
698
|
+
${logs.split("\n").slice(-5).join("\n ")}` : "";
|
|
699
|
+
return new _BrewnetError(
|
|
700
|
+
"BN006",
|
|
701
|
+
"Application build failed",
|
|
702
|
+
500,
|
|
703
|
+
[
|
|
704
|
+
"The build process exited with a non-zero status.",
|
|
705
|
+
"",
|
|
706
|
+
" Fix:",
|
|
707
|
+
" 1. Check your build command in brewnet.yml",
|
|
708
|
+
" 2. Run the build locally to reproduce the error",
|
|
709
|
+
" 3. Review logs: brewnet logs --build",
|
|
710
|
+
logSnippet
|
|
711
|
+
].join("\n")
|
|
712
|
+
);
|
|
713
|
+
}
|
|
714
|
+
/**
|
|
715
|
+
* BN007 — Invalid Git repository (400 Bad Request).
|
|
716
|
+
*/
|
|
717
|
+
static invalidGitRepo(path2) {
|
|
718
|
+
return new _BrewnetError(
|
|
719
|
+
"BN007",
|
|
720
|
+
`Not a valid Git repository: ${path2}`,
|
|
721
|
+
400,
|
|
722
|
+
[
|
|
723
|
+
"The specified path is not a Git repository or is inaccessible.",
|
|
724
|
+
"",
|
|
725
|
+
" Fix:",
|
|
726
|
+
` 1. Verify the path exists: ls -la ${path2}`,
|
|
727
|
+
` 2. Initialize if needed: git init ${path2}`,
|
|
728
|
+
" 3. Or clone an existing repo: git clone <url>"
|
|
729
|
+
].join("\n")
|
|
730
|
+
);
|
|
731
|
+
}
|
|
732
|
+
/**
|
|
733
|
+
* BN008 — Resource not found (404 Not Found).
|
|
734
|
+
*/
|
|
735
|
+
static resourceNotFound(resource) {
|
|
736
|
+
return new _BrewnetError(
|
|
737
|
+
"BN008",
|
|
738
|
+
`Resource not found: ${resource}`,
|
|
739
|
+
404,
|
|
740
|
+
[
|
|
741
|
+
`The requested resource "${resource}" could not be found.`,
|
|
742
|
+
"",
|
|
743
|
+
" Fix:",
|
|
744
|
+
" 1. Check the name or ID for typos",
|
|
745
|
+
" 2. List available resources: brewnet status",
|
|
746
|
+
" 3. The resource may have been removed or renamed."
|
|
747
|
+
].join("\n")
|
|
748
|
+
);
|
|
749
|
+
}
|
|
750
|
+
/**
|
|
751
|
+
* BN009 — Database error (500 Internal Server Error).
|
|
752
|
+
*/
|
|
753
|
+
static databaseError(detail) {
|
|
754
|
+
return new _BrewnetError(
|
|
755
|
+
"BN009",
|
|
756
|
+
`Database error: ${detail}`,
|
|
757
|
+
500,
|
|
758
|
+
[
|
|
759
|
+
"An internal database operation failed.",
|
|
760
|
+
"",
|
|
761
|
+
" Fix:",
|
|
762
|
+
" 1. Check disk space: df -h",
|
|
763
|
+
" 2. Verify DB file permissions: ls -la ~/.brewnet/db/",
|
|
764
|
+
" 3. Try resetting the local DB: brewnet config reset-db",
|
|
765
|
+
" 4. If the issue persists, file a bug report."
|
|
766
|
+
].join("\n")
|
|
767
|
+
);
|
|
768
|
+
}
|
|
769
|
+
/**
|
|
770
|
+
* BN010 — Feature requires Pro plan (403 Forbidden).
|
|
771
|
+
*/
|
|
772
|
+
static proRequired(feature) {
|
|
773
|
+
return new _BrewnetError(
|
|
774
|
+
"BN010",
|
|
775
|
+
`"${feature}" requires a Brewnet Pro subscription`,
|
|
776
|
+
403,
|
|
777
|
+
[
|
|
778
|
+
"This feature is available on the Pro plan ($9/mo) or Team plan ($29/mo/server).",
|
|
779
|
+
"",
|
|
780
|
+
" Upgrade:",
|
|
781
|
+
" https://brewnet.dev/pricing",
|
|
782
|
+
"",
|
|
783
|
+
" Activate:",
|
|
784
|
+
" brewnet config set license <KEY>"
|
|
785
|
+
].join("\n")
|
|
786
|
+
);
|
|
787
|
+
}
|
|
788
|
+
};
|
|
789
|
+
function isBrewnetError(err) {
|
|
790
|
+
return err instanceof BrewnetError;
|
|
791
|
+
}
|
|
792
|
+
|
|
793
|
+
// src/services/backup-manager.ts
|
|
794
|
+
function generateBackupId() {
|
|
795
|
+
const ts = Date.now();
|
|
796
|
+
const rand = crypto.randomBytes(3).toString("hex");
|
|
797
|
+
return `backup-${ts}-${rand}`;
|
|
798
|
+
}
|
|
799
|
+
function deriveProjectName(projectPath) {
|
|
800
|
+
const segments = projectPath.replace(/\/+$/, "").split("/");
|
|
801
|
+
return segments[segments.length - 1] || "unknown";
|
|
802
|
+
}
|
|
803
|
+
function buildArchiveFilename(backupId) {
|
|
804
|
+
return `${backupId}.tar.gz`;
|
|
805
|
+
}
|
|
806
|
+
function createBackup(projectPath, backupsDir) {
|
|
807
|
+
mkdirSync(backupsDir, { recursive: true });
|
|
808
|
+
const backupId = generateBackupId();
|
|
809
|
+
const archiveFilename = buildArchiveFilename(backupId);
|
|
810
|
+
const archivePath = join3(backupsDir, archiveFilename);
|
|
811
|
+
const timestamp = Date.now();
|
|
812
|
+
const parentDir = join3(projectPath, "..");
|
|
813
|
+
const baseName = deriveProjectName(projectPath);
|
|
814
|
+
execSync(`tar -czf "${archivePath}" -C "${parentDir}" "${baseName}"`, {
|
|
815
|
+
stdio: "pipe"
|
|
816
|
+
});
|
|
817
|
+
const stats = statSync(archivePath);
|
|
818
|
+
return {
|
|
819
|
+
id: backupId,
|
|
820
|
+
timestamp,
|
|
821
|
+
path: archivePath,
|
|
822
|
+
size: stats.size,
|
|
823
|
+
projectName: baseName
|
|
824
|
+
};
|
|
825
|
+
}
|
|
826
|
+
function restoreBackup(backupId, backupsDir, projectPath) {
|
|
827
|
+
const archiveFilename = buildArchiveFilename(backupId);
|
|
828
|
+
const archivePath = join3(backupsDir, archiveFilename);
|
|
829
|
+
if (!existsSync3(archivePath)) {
|
|
830
|
+
throw BrewnetError.resourceNotFound(`backup:${backupId}`);
|
|
831
|
+
}
|
|
832
|
+
mkdirSync(projectPath, { recursive: true });
|
|
833
|
+
execSync(`tar -xzf "${archivePath}" -C "${projectPath}" --strip-components=1`, {
|
|
834
|
+
stdio: "pipe"
|
|
835
|
+
});
|
|
836
|
+
}
|
|
837
|
+
function listBackups(backupsDir) {
|
|
838
|
+
if (!existsSync3(backupsDir)) {
|
|
839
|
+
return [];
|
|
840
|
+
}
|
|
841
|
+
const files = readdirSync3(backupsDir).filter((f) => f.endsWith(".tar.gz"));
|
|
842
|
+
const records = [];
|
|
843
|
+
for (const file of files) {
|
|
844
|
+
const filePath = join3(backupsDir, file);
|
|
845
|
+
const stats = statSync(filePath);
|
|
846
|
+
const backupId = file.replace(/\.tar\.gz$/, "");
|
|
847
|
+
const parts = backupId.split("-");
|
|
848
|
+
const timestamp = parts.length >= 2 ? parseInt(parts[1], 10) : stats.mtimeMs;
|
|
849
|
+
let projectName = "unknown";
|
|
850
|
+
try {
|
|
851
|
+
const listing = execSync(`tar -tzf "${filePath}" | head -1`, {
|
|
852
|
+
stdio: "pipe",
|
|
853
|
+
encoding: "utf-8"
|
|
854
|
+
}).trim();
|
|
855
|
+
projectName = listing.replace(/\/$/, "").split("/")[0] || "unknown";
|
|
856
|
+
} catch {
|
|
857
|
+
}
|
|
858
|
+
records.push({
|
|
859
|
+
id: backupId,
|
|
860
|
+
timestamp,
|
|
861
|
+
path: filePath,
|
|
862
|
+
size: stats.size,
|
|
863
|
+
projectName
|
|
864
|
+
});
|
|
865
|
+
}
|
|
866
|
+
records.sort((a, b) => b.timestamp - a.timestamp);
|
|
867
|
+
return records;
|
|
868
|
+
}
|
|
869
|
+
function checkDiskSpace(path2, requiredBytes = 0) {
|
|
870
|
+
let available;
|
|
871
|
+
try {
|
|
872
|
+
const output = execSync(`df -k "${path2}" | tail -1`, {
|
|
873
|
+
stdio: "pipe",
|
|
874
|
+
encoding: "utf-8"
|
|
875
|
+
}).trim();
|
|
876
|
+
const columns = output.split(/\s+/);
|
|
877
|
+
const availableKB = parseInt(columns[3], 10);
|
|
878
|
+
available = availableKB * 1024;
|
|
879
|
+
} catch {
|
|
880
|
+
available = 0;
|
|
881
|
+
}
|
|
882
|
+
return {
|
|
883
|
+
available,
|
|
884
|
+
required: requiredBytes,
|
|
885
|
+
sufficient: available >= requiredBytes
|
|
886
|
+
};
|
|
887
|
+
}
|
|
888
|
+
|
|
889
|
+
// src/services/domain-manager.ts
|
|
890
|
+
import fs from "fs";
|
|
891
|
+
import path from "path";
|
|
892
|
+
import os from "os";
|
|
893
|
+
import { execa } from "execa";
|
|
894
|
+
|
|
895
|
+
// src/services/app-registry.ts
|
|
896
|
+
import { existsSync as existsSync4, readFileSync as readFileSync3, writeFileSync as writeFileSync2, mkdirSync as mkdirSync2 } from "fs";
|
|
897
|
+
import { dirname } from "path";
|
|
898
|
+
function readApps(appsJsonPath) {
|
|
899
|
+
if (!existsSync4(appsJsonPath)) return [];
|
|
900
|
+
try {
|
|
901
|
+
return JSON.parse(readFileSync3(appsJsonPath, "utf-8"));
|
|
902
|
+
} catch {
|
|
903
|
+
return [];
|
|
904
|
+
}
|
|
905
|
+
}
|
|
906
|
+
function writeApps(appsJsonPath, apps) {
|
|
907
|
+
mkdirSync2(dirname(appsJsonPath), { recursive: true });
|
|
908
|
+
writeFileSync2(appsJsonPath, JSON.stringify(apps, null, 2), "utf-8");
|
|
909
|
+
}
|
|
910
|
+
function addApp(appsJsonPath, entry) {
|
|
911
|
+
const apps = readApps(appsJsonPath);
|
|
912
|
+
if (apps.some((a) => a.name === entry.name)) {
|
|
913
|
+
throw new Error(`App "${entry.name}" already exists`);
|
|
914
|
+
}
|
|
915
|
+
writeApps(appsJsonPath, [...apps, entry]);
|
|
916
|
+
}
|
|
917
|
+
function updateApp(appsJsonPath, name, patch) {
|
|
918
|
+
const apps = readApps(appsJsonPath);
|
|
919
|
+
const idx = apps.findIndex((a) => a.name === name);
|
|
920
|
+
if (idx === -1) throw new Error(`App "${name}" not found`);
|
|
921
|
+
apps[idx] = { ...apps[idx], ...patch };
|
|
922
|
+
writeApps(appsJsonPath, apps);
|
|
923
|
+
}
|
|
924
|
+
function removeApp(appsJsonPath, name) {
|
|
925
|
+
const apps = readApps(appsJsonPath).filter((a) => a.name !== name);
|
|
926
|
+
writeApps(appsJsonPath, apps);
|
|
927
|
+
}
|
|
928
|
+
function readDeployHistory(historyJsonPath) {
|
|
929
|
+
if (!existsSync4(historyJsonPath)) return [];
|
|
930
|
+
try {
|
|
931
|
+
return JSON.parse(readFileSync3(historyJsonPath, "utf-8"));
|
|
932
|
+
} catch {
|
|
933
|
+
return [];
|
|
934
|
+
}
|
|
935
|
+
}
|
|
936
|
+
function appendDeployHistory(historyJsonPath, entry) {
|
|
937
|
+
const entries = readDeployHistory(historyJsonPath);
|
|
938
|
+
mkdirSync2(dirname(historyJsonPath), { recursive: true });
|
|
939
|
+
writeFileSync2(historyJsonPath, JSON.stringify([...entries, entry], null, 2), "utf-8");
|
|
940
|
+
}
|
|
941
|
+
|
|
942
|
+
// src/services/domain-manager.ts
|
|
943
|
+
var DomainManager = class {
|
|
944
|
+
projectName;
|
|
945
|
+
state;
|
|
946
|
+
constructor(projectName) {
|
|
947
|
+
this.projectName = projectName;
|
|
948
|
+
const loaded = loadState(projectName);
|
|
949
|
+
if (!loaded) {
|
|
950
|
+
throw new Error(`Project "${projectName}" not found. Run \`brewnet init\` first.`);
|
|
951
|
+
}
|
|
952
|
+
this.state = loaded;
|
|
953
|
+
}
|
|
954
|
+
/** Reload state from disk */
|
|
955
|
+
reload() {
|
|
956
|
+
const loaded = loadState(this.projectName);
|
|
957
|
+
if (loaded) this.state = loaded;
|
|
958
|
+
}
|
|
959
|
+
/** Get a copy of the current state */
|
|
960
|
+
getState() {
|
|
961
|
+
return structuredClone(this.state);
|
|
962
|
+
}
|
|
963
|
+
// ── connect ──────────────────────────────────────────────────────────────
|
|
964
|
+
/**
|
|
965
|
+
* Connect a local app to an external domain via Cloudflare Tunnel.
|
|
966
|
+
*
|
|
967
|
+
* Steps: health check → ingress update → DNS create → Traefik labels → persist → poll DNS
|
|
968
|
+
* Rolls back on failure.
|
|
969
|
+
*/
|
|
970
|
+
async connect(appName, subdomain, domain, options = {}) {
|
|
971
|
+
const hostname = `${subdomain}.${domain}`;
|
|
972
|
+
const steps = [];
|
|
973
|
+
const cf = this.state.domain.cloudflare;
|
|
974
|
+
const log = (msg) => options.onLog?.(`[domain-connect] ${msg}`);
|
|
975
|
+
log(`start: app=${appName} subdomain=${subdomain} domain=${domain}`);
|
|
976
|
+
log(`cf state: tunnelId=${cf.tunnelId || "(empty)"} apiToken=${cf.apiToken ? "***" : "(empty)"} accountId=${cf.accountId || "(empty)"} zoneId=${cf.zoneId || "(empty)"}`);
|
|
977
|
+
if (!cf.tunnelId || !cf.apiToken) {
|
|
978
|
+
const err = "Cloudflare credentials not configured. Set API token and tunnel ID first.";
|
|
979
|
+
log(`FAIL: ${err}`);
|
|
980
|
+
return {
|
|
981
|
+
success: false,
|
|
982
|
+
hostname,
|
|
983
|
+
externalUrl: `https://${hostname}`,
|
|
984
|
+
steps,
|
|
985
|
+
error: err
|
|
986
|
+
};
|
|
987
|
+
}
|
|
988
|
+
const containerPort = this.resolveContainerPort(appName);
|
|
989
|
+
log(`resolveContainerPort(${appName}) \u2192 ${containerPort ?? "null"}`);
|
|
990
|
+
if (!containerPort) {
|
|
991
|
+
const err = `Cannot determine container port for app "${appName}".`;
|
|
992
|
+
log(`FAIL: ${err}`);
|
|
993
|
+
return {
|
|
994
|
+
success: false,
|
|
995
|
+
hostname,
|
|
996
|
+
externalUrl: `https://${hostname}`,
|
|
997
|
+
steps,
|
|
998
|
+
error: err
|
|
999
|
+
};
|
|
1000
|
+
}
|
|
1001
|
+
const scenario = options.scenario ?? this.detectScenario();
|
|
1002
|
+
log(`scenario: ${scenario}`);
|
|
1003
|
+
log(`step 1/6: health check \u2192 http://127.0.0.1:${containerPort}/`);
|
|
1004
|
+
const healthStart = Date.now();
|
|
1005
|
+
try {
|
|
1006
|
+
const healthy = await this.checkLocalHealth(appName, containerPort);
|
|
1007
|
+
if (!healthy) {
|
|
1008
|
+
const err = `App "${appName}" not responding on port ${containerPort}`;
|
|
1009
|
+
log(`FAIL step 1: ${err}`);
|
|
1010
|
+
steps.push({ step: "health_check", status: "failed", error: err });
|
|
1011
|
+
return { success: false, hostname, externalUrl: `https://${hostname}`, steps, error: `APP_NOT_RUNNING: Local health check failed for ${appName} on port ${containerPort}` };
|
|
1012
|
+
}
|
|
1013
|
+
steps.push({ step: "health_check", status: "completed", durationMs: Date.now() - healthStart });
|
|
1014
|
+
log(`step 1 OK (${Date.now() - healthStart}ms)`);
|
|
1015
|
+
} catch (err) {
|
|
1016
|
+
log(`FAIL step 1 exception: ${err}`);
|
|
1017
|
+
steps.push({ step: "health_check", status: "failed", error: String(err) });
|
|
1018
|
+
return { success: false, hostname, externalUrl: `https://${hostname}`, steps, error: `Health check failed: ${err}` };
|
|
1019
|
+
}
|
|
1020
|
+
log(`step 2/6: configure tunnel ingress (accountId=${cf.accountId || "(empty)"}, tunnelId=${cf.tunnelId})`);
|
|
1021
|
+
const ingressStart = Date.now();
|
|
1022
|
+
let previousIngress = null;
|
|
1023
|
+
try {
|
|
1024
|
+
previousIngress = getActiveServiceRoutes(this.state);
|
|
1025
|
+
const projectDomain = this.state.domain.zoneName;
|
|
1026
|
+
const builtinRoutes = previousIngress.map((r) => ({ ...r, domain: projectDomain }));
|
|
1027
|
+
const existingExtRoutes = (this.state.domainConnections ?? []).filter((c) => c.appName !== appName).map((c) => ({ subdomain: c.subdomain, containerName: this.resolveContainerName(c.appName), port: c.containerPort, domain: c.domain }));
|
|
1028
|
+
const newRoute = { subdomain, containerName: this.resolveContainerName(appName), port: containerPort, domain };
|
|
1029
|
+
const allRoutes = [...builtinRoutes, ...existingExtRoutes, newRoute];
|
|
1030
|
+
log(`ingress routes: ${JSON.stringify(allRoutes.map((r) => `${r.subdomain} \u2192 ${r.containerName}:${r.port}`))}`);
|
|
1031
|
+
await configureTunnelIngress(cf.apiToken, cf.accountId, cf.tunnelId, domain, allRoutes);
|
|
1032
|
+
steps.push({ step: "ingress_update", status: "completed", durationMs: Date.now() - ingressStart });
|
|
1033
|
+
log(`step 2 OK (${Date.now() - ingressStart}ms)`);
|
|
1034
|
+
} catch (err) {
|
|
1035
|
+
log(`FAIL step 2: ${err}`);
|
|
1036
|
+
steps.push({ step: "ingress_update", status: "failed", error: String(err) });
|
|
1037
|
+
return { success: false, hostname, externalUrl: `https://${hostname}`, steps, error: `Ingress update failed: ${err}` };
|
|
1038
|
+
}
|
|
1039
|
+
log(`step 3/6: DNS CNAME check/create for ${hostname} (zoneId=${cf.zoneId || "(empty)"})`);
|
|
1040
|
+
const dnsStart = Date.now();
|
|
1041
|
+
let cnameRecordId = "";
|
|
1042
|
+
try {
|
|
1043
|
+
const existing = await getDnsRecords(cf.apiToken, cf.zoneId, hostname);
|
|
1044
|
+
log(`existing DNS records for ${hostname}: ${existing.length}`);
|
|
1045
|
+
if (existing.length > 0 && !options.force) {
|
|
1046
|
+
await this.rollbackIngress(cf, previousIngress, domain);
|
|
1047
|
+
const err = `CNAME_CONFLICT: A CNAME record already exists for ${hostname}`;
|
|
1048
|
+
log(`FAIL step 3: ${err}`);
|
|
1049
|
+
steps.push({ step: "dns_creation", status: "failed", error: err });
|
|
1050
|
+
return { success: false, hostname, externalUrl: `https://${hostname}`, steps, error: `CNAME_CONFLICT` };
|
|
1051
|
+
}
|
|
1052
|
+
if (existing.length > 0 && options.force) {
|
|
1053
|
+
for (const rec of existing) {
|
|
1054
|
+
await deleteDnsRecord(cf.apiToken, cf.zoneId, rec.id);
|
|
1055
|
+
}
|
|
1056
|
+
log(`deleted ${existing.length} existing CNAME record(s)`);
|
|
1057
|
+
}
|
|
1058
|
+
await createDnsRecord(cf.apiToken, cf.zoneId, cf.tunnelId, subdomain, domain);
|
|
1059
|
+
const created = await getDnsRecords(cf.apiToken, cf.zoneId, hostname);
|
|
1060
|
+
cnameRecordId = created[0]?.id ?? "";
|
|
1061
|
+
steps.push({ step: "dns_creation", status: "completed", durationMs: Date.now() - dnsStart });
|
|
1062
|
+
log(`step 3 OK \u2014 cnameRecordId=${cnameRecordId} (${Date.now() - dnsStart}ms)`);
|
|
1063
|
+
} catch (err) {
|
|
1064
|
+
await this.rollbackIngress(cf, previousIngress, domain);
|
|
1065
|
+
log(`FAIL step 3: ${err}`);
|
|
1066
|
+
steps.push({ step: "dns_creation", status: "failed", error: String(err) });
|
|
1067
|
+
return { success: false, hostname, externalUrl: `https://${hostname}`, steps, error: `DNS creation failed: ${err}` };
|
|
1068
|
+
}
|
|
1069
|
+
log(`step 4/6: Traefik labels for ${appName}`);
|
|
1070
|
+
try {
|
|
1071
|
+
const composePath = this.getComposePath();
|
|
1072
|
+
if (fs.existsSync(composePath)) {
|
|
1073
|
+
addExternalLabels(composePath, appName, hostname, containerPort);
|
|
1074
|
+
log(`step 4 OK \u2014 labels added to ${composePath}`);
|
|
1075
|
+
} else {
|
|
1076
|
+
log(`step 4 SKIP \u2014 compose file not found at ${composePath}`);
|
|
1077
|
+
}
|
|
1078
|
+
steps.push({ step: "traefik_labels", status: "completed" });
|
|
1079
|
+
} catch (err) {
|
|
1080
|
+
log(`step 4 WARN (non-fatal): ${err}`);
|
|
1081
|
+
steps.push({ step: "traefik_labels", status: "failed", error: String(err) });
|
|
1082
|
+
}
|
|
1083
|
+
log(`step 5/6: persist connection to state`);
|
|
1084
|
+
const connection = {
|
|
1085
|
+
appName,
|
|
1086
|
+
subdomain,
|
|
1087
|
+
domain,
|
|
1088
|
+
hostname,
|
|
1089
|
+
tunnelId: cf.tunnelId,
|
|
1090
|
+
cnameRecordId,
|
|
1091
|
+
containerPort,
|
|
1092
|
+
connectedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1093
|
+
scenario
|
|
1094
|
+
};
|
|
1095
|
+
if (!this.state.domainConnections) {
|
|
1096
|
+
this.state.domainConnections = [];
|
|
1097
|
+
}
|
|
1098
|
+
this.state.domainConnections = this.state.domainConnections.filter((c) => c.appName !== appName);
|
|
1099
|
+
this.state.domainConnections.push(connection);
|
|
1100
|
+
saveState(this.state);
|
|
1101
|
+
log(`step 5 OK`);
|
|
1102
|
+
log(`step 6/6: poll DNS propagation for ${hostname} (timeout 30s)`);
|
|
1103
|
+
const pollStart = Date.now();
|
|
1104
|
+
try {
|
|
1105
|
+
await this.pollDnsPropagation(hostname, 3e4);
|
|
1106
|
+
steps.push({ step: "dns_propagation", status: "completed", durationMs: Date.now() - pollStart });
|
|
1107
|
+
log(`step 6 OK (${Date.now() - pollStart}ms)`);
|
|
1108
|
+
} catch {
|
|
1109
|
+
steps.push({ step: "dns_propagation", status: "skipped", durationMs: Date.now() - pollStart });
|
|
1110
|
+
log(`step 6 SKIP \u2014 DNS not yet propagated (${Date.now() - pollStart}ms)`);
|
|
1111
|
+
}
|
|
1112
|
+
log(`SUCCESS: https://${hostname}`);
|
|
1113
|
+
return {
|
|
1114
|
+
success: true,
|
|
1115
|
+
hostname,
|
|
1116
|
+
externalUrl: `https://${hostname}`,
|
|
1117
|
+
steps
|
|
1118
|
+
};
|
|
1119
|
+
}
|
|
1120
|
+
// ── disconnect ───────────────────────────────────────────────────────────
|
|
1121
|
+
/**
|
|
1122
|
+
* Disconnect an app from its external domain.
|
|
1123
|
+
*
|
|
1124
|
+
* Steps: remove ingress → delete DNS → remove Traefik labels → update state
|
|
1125
|
+
* Atomic rollback on failure.
|
|
1126
|
+
*/
|
|
1127
|
+
async disconnect(appName) {
|
|
1128
|
+
const steps = [];
|
|
1129
|
+
const connections = this.state.domainConnections ?? [];
|
|
1130
|
+
const conn = connections.find((c) => c.appName === appName);
|
|
1131
|
+
if (!conn) {
|
|
1132
|
+
return {
|
|
1133
|
+
success: false,
|
|
1134
|
+
appName,
|
|
1135
|
+
removedHostname: "",
|
|
1136
|
+
steps,
|
|
1137
|
+
error: `NOT_CONNECTED: No external domain connection found for app: ${appName}`
|
|
1138
|
+
};
|
|
1139
|
+
}
|
|
1140
|
+
const cf = this.state.domain.cloudflare;
|
|
1141
|
+
try {
|
|
1142
|
+
const remainingRoutes = getActiveServiceRoutes(this.state);
|
|
1143
|
+
const projectDomain = this.state.domain.zoneName;
|
|
1144
|
+
const builtinRoutes = remainingRoutes.filter((r) => r.subdomain !== conn.subdomain).map((r) => ({ ...r, domain: projectDomain }));
|
|
1145
|
+
const remainingExtRoutes = (this.state.domainConnections ?? []).filter((c) => c.appName !== appName).map((c) => ({ subdomain: c.subdomain, containerName: this.resolveContainerName(c.appName), port: c.containerPort, domain: c.domain }));
|
|
1146
|
+
const filteredRoutes = [...builtinRoutes, ...remainingExtRoutes];
|
|
1147
|
+
if (cf.apiToken && cf.accountId && cf.tunnelId) {
|
|
1148
|
+
await configureTunnelIngress(cf.apiToken, cf.accountId, cf.tunnelId, conn.domain, filteredRoutes);
|
|
1149
|
+
}
|
|
1150
|
+
steps.push({ step: "ingress_removal", status: "completed" });
|
|
1151
|
+
} catch (err) {
|
|
1152
|
+
steps.push({ step: "ingress_removal", status: "failed", error: String(err) });
|
|
1153
|
+
return { success: false, appName, removedHostname: conn.hostname, steps, error: `Ingress removal failed: ${err}` };
|
|
1154
|
+
}
|
|
1155
|
+
try {
|
|
1156
|
+
if (cf.apiToken && cf.zoneId) {
|
|
1157
|
+
if (conn.cnameRecordId) {
|
|
1158
|
+
await deleteDnsRecord(cf.apiToken, cf.zoneId, conn.cnameRecordId);
|
|
1159
|
+
} else {
|
|
1160
|
+
const records = await getDnsRecords(cf.apiToken, cf.zoneId, conn.hostname);
|
|
1161
|
+
for (const rec of records) {
|
|
1162
|
+
await deleteDnsRecord(cf.apiToken, cf.zoneId, rec.id);
|
|
1163
|
+
}
|
|
1164
|
+
}
|
|
1165
|
+
}
|
|
1166
|
+
steps.push({ step: "dns_deletion", status: "completed" });
|
|
1167
|
+
} catch (err) {
|
|
1168
|
+
try {
|
|
1169
|
+
const routes = getActiveServiceRoutes(this.state);
|
|
1170
|
+
const projectDomain = this.state.domain.zoneName;
|
|
1171
|
+
const allBuiltin = routes.map((r) => ({ ...r, domain: projectDomain }));
|
|
1172
|
+
const allExt = (this.state.domainConnections ?? []).map((c) => ({ subdomain: c.subdomain, containerName: this.resolveContainerName(c.appName), port: c.containerPort, domain: c.domain }));
|
|
1173
|
+
const allRoutes = [...allBuiltin, ...allExt];
|
|
1174
|
+
if (cf.apiToken && cf.accountId && cf.tunnelId) {
|
|
1175
|
+
await configureTunnelIngress(cf.apiToken, cf.accountId, cf.tunnelId, conn.domain, allRoutes);
|
|
1176
|
+
}
|
|
1177
|
+
} catch {
|
|
1178
|
+
}
|
|
1179
|
+
steps.push({ step: "dns_deletion", status: "failed", error: String(err) });
|
|
1180
|
+
return { success: false, appName, removedHostname: conn.hostname, steps, error: `DNS deletion failed: ${err}` };
|
|
1181
|
+
}
|
|
1182
|
+
try {
|
|
1183
|
+
const composePath = this.getComposePath();
|
|
1184
|
+
if (fs.existsSync(composePath)) {
|
|
1185
|
+
removeExternalLabels(composePath, appName);
|
|
1186
|
+
}
|
|
1187
|
+
steps.push({ step: "traefik_cleanup", status: "completed" });
|
|
1188
|
+
} catch (err) {
|
|
1189
|
+
steps.push({ step: "traefik_cleanup", status: "failed", error: String(err) });
|
|
1190
|
+
}
|
|
1191
|
+
this.state.domainConnections = connections.filter((c) => c.appName !== appName);
|
|
1192
|
+
saveState(this.state);
|
|
1193
|
+
return {
|
|
1194
|
+
success: true,
|
|
1195
|
+
appName,
|
|
1196
|
+
removedHostname: conn.hostname,
|
|
1197
|
+
steps
|
|
1198
|
+
};
|
|
1199
|
+
}
|
|
1200
|
+
// ── list ─────────────────────────────────────────────────────────────────
|
|
1201
|
+
/** Returns all active domain connections. */
|
|
1202
|
+
list() {
|
|
1203
|
+
return this.state.domainConnections ?? [];
|
|
1204
|
+
}
|
|
1205
|
+
// ── status ───────────────────────────────────────────────────────────────
|
|
1206
|
+
/**
|
|
1207
|
+
* Get detailed status for a specific app's domain connection,
|
|
1208
|
+
* or all connections if appName is omitted.
|
|
1209
|
+
*/
|
|
1210
|
+
async status(appName) {
|
|
1211
|
+
const connections = this.state.domainConnections ?? [];
|
|
1212
|
+
const targets = appName ? connections.filter((c) => c.appName === appName) : connections;
|
|
1213
|
+
const cf = this.state.domain.cloudflare;
|
|
1214
|
+
const results = [];
|
|
1215
|
+
for (const conn of targets) {
|
|
1216
|
+
const info = {
|
|
1217
|
+
appName: conn.appName,
|
|
1218
|
+
local: { url: `http://localhost:${conn.containerPort}`, healthy: false },
|
|
1219
|
+
external: { url: `https://${conn.hostname}`, dnsResolved: false, httpsReachable: false },
|
|
1220
|
+
tunnel: { status: "inactive", connectorCount: 0 },
|
|
1221
|
+
dns: null
|
|
1222
|
+
};
|
|
1223
|
+
try {
|
|
1224
|
+
info.local.healthy = await this.checkLocalHealth(conn.appName, conn.containerPort);
|
|
1225
|
+
} catch {
|
|
1226
|
+
}
|
|
1227
|
+
if (cf.apiToken && cf.accountId && cf.tunnelId) {
|
|
1228
|
+
try {
|
|
1229
|
+
const health = await getTunnelHealth(cf.apiToken, cf.accountId, cf.tunnelId);
|
|
1230
|
+
info.tunnel = health;
|
|
1231
|
+
} catch {
|
|
1232
|
+
}
|
|
1233
|
+
}
|
|
1234
|
+
if (cf.apiToken && cf.zoneId) {
|
|
1235
|
+
try {
|
|
1236
|
+
const records = await getDnsRecords(cf.apiToken, cf.zoneId, conn.hostname);
|
|
1237
|
+
if (records.length > 0) {
|
|
1238
|
+
info.dns = {
|
|
1239
|
+
type: "CNAME",
|
|
1240
|
+
name: records[0].name,
|
|
1241
|
+
content: records[0].content,
|
|
1242
|
+
proxied: records[0].proxied
|
|
1243
|
+
};
|
|
1244
|
+
info.external.dnsResolved = true;
|
|
1245
|
+
}
|
|
1246
|
+
} catch {
|
|
1247
|
+
}
|
|
1248
|
+
}
|
|
1249
|
+
try {
|
|
1250
|
+
const resolved = await this.checkDnsResolution(conn.hostname);
|
|
1251
|
+
info.external.dnsResolved = info.external.dnsResolved || resolved;
|
|
1252
|
+
} catch {
|
|
1253
|
+
}
|
|
1254
|
+
try {
|
|
1255
|
+
info.external.httpsReachable = await this.checkHttpsReachable(conn.hostname);
|
|
1256
|
+
} catch {
|
|
1257
|
+
}
|
|
1258
|
+
results.push(info);
|
|
1259
|
+
}
|
|
1260
|
+
return results;
|
|
1261
|
+
}
|
|
1262
|
+
// ── getConnectableApps ───────────────────────────────────────────────────
|
|
1263
|
+
/** Returns apps that can be connected to domains (running services not yet connected). */
|
|
1264
|
+
getConnectableApps() {
|
|
1265
|
+
const routes = getActiveServiceRoutes(this.state);
|
|
1266
|
+
const connections = this.state.domainConnections ?? [];
|
|
1267
|
+
return routes.map((route) => {
|
|
1268
|
+
const existing = connections.find((c) => c.subdomain === route.subdomain);
|
|
1269
|
+
return {
|
|
1270
|
+
name: route.subdomain,
|
|
1271
|
+
containerName: route.containerName,
|
|
1272
|
+
port: route.port,
|
|
1273
|
+
running: true,
|
|
1274
|
+
// We assume routes represent running services
|
|
1275
|
+
alreadyConnected: !!existing,
|
|
1276
|
+
hostname: existing?.hostname
|
|
1277
|
+
};
|
|
1278
|
+
});
|
|
1279
|
+
}
|
|
1280
|
+
// ── Private helpers ──────────────────────────────────────────────────────
|
|
1281
|
+
detectScenario() {
|
|
1282
|
+
const cf = this.state.domain.cloudflare;
|
|
1283
|
+
if (cf.zoneId) return "A";
|
|
1284
|
+
return "C";
|
|
1285
|
+
}
|
|
1286
|
+
resolveContainerPort(appName) {
|
|
1287
|
+
const routes = getActiveServiceRoutes(this.state);
|
|
1288
|
+
const route = routes.find((r) => r.subdomain === appName || r.containerName === appName);
|
|
1289
|
+
if (route) return route.port;
|
|
1290
|
+
const appsJsonPath = path.join(os.homedir(), ".brewnet", "apps.json");
|
|
1291
|
+
const apps = readApps(appsJsonPath);
|
|
1292
|
+
const found = apps.find((a) => a.name === appName);
|
|
1293
|
+
if (!found) return null;
|
|
1294
|
+
const stackEntry = found.stackId ? getStackById(found.stackId) : null;
|
|
1295
|
+
if (stackEntry?.isUnified === false && found.appDir) {
|
|
1296
|
+
let frontendPort = 3e3;
|
|
1297
|
+
const envPath = path.join(found.appDir, ".env");
|
|
1298
|
+
try {
|
|
1299
|
+
const envContent = fs.readFileSync(envPath, "utf-8");
|
|
1300
|
+
const match = envContent.match(/^FRONTEND_PORT=(\d+)/m);
|
|
1301
|
+
if (match) frontendPort = parseInt(match[1], 10);
|
|
1302
|
+
} catch {
|
|
1303
|
+
}
|
|
1304
|
+
return frontendPort;
|
|
1305
|
+
}
|
|
1306
|
+
return found.port;
|
|
1307
|
+
}
|
|
1308
|
+
resolveContainerName(appName) {
|
|
1309
|
+
const routes = getActiveServiceRoutes(this.state);
|
|
1310
|
+
const route = routes.find((r) => r.subdomain === appName || r.containerName === appName);
|
|
1311
|
+
if (route) return route.containerName;
|
|
1312
|
+
const appsJsonPath = path.join(os.homedir(), ".brewnet", "apps.json");
|
|
1313
|
+
const apps = readApps(appsJsonPath);
|
|
1314
|
+
const found = apps.find((a) => a.name === appName);
|
|
1315
|
+
if (found) return "host.docker.internal";
|
|
1316
|
+
return appName;
|
|
1317
|
+
}
|
|
1318
|
+
getComposePath() {
|
|
1319
|
+
const projectPath = this.state.projectPath.startsWith("~") ? path.join(os.homedir(), this.state.projectPath.slice(1)) : this.state.projectPath;
|
|
1320
|
+
return path.join(projectPath, "docker-compose.yml");
|
|
1321
|
+
}
|
|
1322
|
+
async checkLocalHealth(_appName, port) {
|
|
1323
|
+
try {
|
|
1324
|
+
const controller = new AbortController();
|
|
1325
|
+
const timeout = setTimeout(() => controller.abort(), 5e3);
|
|
1326
|
+
const resp = await fetch(`http://127.0.0.1:${port}/`, { signal: controller.signal });
|
|
1327
|
+
clearTimeout(timeout);
|
|
1328
|
+
return resp.ok || resp.status < 500;
|
|
1329
|
+
} catch {
|
|
1330
|
+
return false;
|
|
1331
|
+
}
|
|
1332
|
+
}
|
|
1333
|
+
async checkDnsResolution(hostname) {
|
|
1334
|
+
try {
|
|
1335
|
+
const result = await execa("dig", ["+short", "CNAME", hostname, "@1.1.1.1"], { timeout: 1e4 });
|
|
1336
|
+
return result.stdout.trim().length > 0;
|
|
1337
|
+
} catch {
|
|
1338
|
+
return false;
|
|
1339
|
+
}
|
|
1340
|
+
}
|
|
1341
|
+
async checkHttpsReachable(hostname) {
|
|
1342
|
+
try {
|
|
1343
|
+
const controller = new AbortController();
|
|
1344
|
+
const timeout = setTimeout(() => controller.abort(), 1e4);
|
|
1345
|
+
const resp = await fetch(`https://${hostname}/`, {
|
|
1346
|
+
method: "HEAD",
|
|
1347
|
+
signal: controller.signal
|
|
1348
|
+
});
|
|
1349
|
+
clearTimeout(timeout);
|
|
1350
|
+
return resp.ok || resp.status < 500;
|
|
1351
|
+
} catch {
|
|
1352
|
+
return false;
|
|
1353
|
+
}
|
|
1354
|
+
}
|
|
1355
|
+
async rollbackIngress(cf, previousRoutes, domain) {
|
|
1356
|
+
if (!previousRoutes || !cf.apiToken || !cf.accountId || !cf.tunnelId) return;
|
|
1357
|
+
try {
|
|
1358
|
+
await configureTunnelIngress(cf.apiToken, cf.accountId, cf.tunnelId, domain, previousRoutes);
|
|
1359
|
+
} catch {
|
|
1360
|
+
}
|
|
1361
|
+
}
|
|
1362
|
+
async pollDnsPropagation(hostname, timeoutMs) {
|
|
1363
|
+
const start = Date.now();
|
|
1364
|
+
while (Date.now() - start < timeoutMs) {
|
|
1365
|
+
const resolved = await this.checkDnsResolution(hostname);
|
|
1366
|
+
if (resolved) return;
|
|
1367
|
+
await new Promise((r) => setTimeout(r, 2e3));
|
|
1368
|
+
}
|
|
1369
|
+
throw new Error("DNS propagation timeout");
|
|
1370
|
+
}
|
|
1371
|
+
};
|
|
1372
|
+
|
|
1373
|
+
export {
|
|
1374
|
+
BrewnetError,
|
|
1375
|
+
isBrewnetError,
|
|
1376
|
+
addService,
|
|
1377
|
+
removeService,
|
|
1378
|
+
parseDuration,
|
|
1379
|
+
queryLogs,
|
|
1380
|
+
getLogStats,
|
|
1381
|
+
createBackup,
|
|
1382
|
+
restoreBackup,
|
|
1383
|
+
listBackups,
|
|
1384
|
+
checkDiskSpace,
|
|
1385
|
+
readApps,
|
|
1386
|
+
addApp,
|
|
1387
|
+
updateApp,
|
|
1388
|
+
removeApp,
|
|
1389
|
+
readDeployHistory,
|
|
1390
|
+
appendDeployHistory,
|
|
1391
|
+
DomainManager
|
|
1392
|
+
};
|
|
1393
|
+
//# sourceMappingURL=chunk-2VWMDHGI.js.map
|