synapse-mcp 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +607 -0
- package/dist/constants.d.ts +23 -0
- package/dist/constants.d.ts.map +1 -0
- package/dist/constants.js +58 -0
- package/dist/constants.js.map +1 -0
- package/dist/formatters/index.d.ts +275 -0
- package/dist/formatters/index.d.ts.map +1 -0
- package/dist/formatters/index.js +461 -0
- package/dist/formatters/index.js.map +1 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +178 -0
- package/dist/index.js.map +1 -0
- package/dist/schemas/common.d.ts +48 -0
- package/dist/schemas/common.d.ts.map +1 -0
- package/dist/schemas/common.js +69 -0
- package/dist/schemas/common.js.map +1 -0
- package/dist/schemas/discriminator.d.ts +20 -0
- package/dist/schemas/discriminator.d.ts.map +1 -0
- package/dist/schemas/discriminator.js +25 -0
- package/dist/schemas/discriminator.js.map +1 -0
- package/dist/schemas/flux/compose.d.ts +93 -0
- package/dist/schemas/flux/compose.d.ts.map +1 -0
- package/dist/schemas/flux/compose.js +112 -0
- package/dist/schemas/flux/compose.js.map +1 -0
- package/dist/schemas/flux/container.d.ts +144 -0
- package/dist/schemas/flux/container.d.ts.map +1 -0
- package/dist/schemas/flux/container.js +163 -0
- package/dist/schemas/flux/container.js.map +1 -0
- package/dist/schemas/flux/docker.d.ts +91 -0
- package/dist/schemas/flux/docker.d.ts.map +1 -0
- package/dist/schemas/flux/docker.js +101 -0
- package/dist/schemas/flux/docker.js.map +1 -0
- package/dist/schemas/flux/host.d.ts +61 -0
- package/dist/schemas/flux/host.d.ts.map +1 -0
- package/dist/schemas/flux/host.js +72 -0
- package/dist/schemas/flux/host.js.map +1 -0
- package/dist/schemas/flux/index.d.ts +20 -0
- package/dist/schemas/flux/index.d.ts.map +1 -0
- package/dist/schemas/flux/index.js +88 -0
- package/dist/schemas/flux/index.js.map +1 -0
- package/dist/schemas/index.d.ts +11 -0
- package/dist/schemas/index.d.ts.map +1 -0
- package/dist/schemas/index.js +11 -0
- package/dist/schemas/index.js.map +1 -0
- package/dist/schemas/scout/index.d.ts +151 -0
- package/dist/schemas/scout/index.d.ts.map +1 -0
- package/dist/schemas/scout/index.js +41 -0
- package/dist/schemas/scout/index.js.map +1 -0
- package/dist/schemas/scout/logs.d.ts +48 -0
- package/dist/schemas/scout/logs.d.ts.map +1 -0
- package/dist/schemas/scout/logs.js +47 -0
- package/dist/schemas/scout/logs.js.map +1 -0
- package/dist/schemas/scout/simple.d.ts +68 -0
- package/dist/schemas/scout/simple.d.ts.map +1 -0
- package/dist/schemas/scout/simple.js +75 -0
- package/dist/schemas/scout/simple.js.map +1 -0
- package/dist/schemas/scout/zfs.d.ts +37 -0
- package/dist/schemas/scout/zfs.d.ts.map +1 -0
- package/dist/schemas/scout/zfs.js +36 -0
- package/dist/schemas/scout/zfs.js.map +1 -0
- package/dist/schemas/unified.d.ts +674 -0
- package/dist/schemas/unified.d.ts.map +1 -0
- package/dist/schemas/unified.js +453 -0
- package/dist/schemas/unified.js.map +1 -0
- package/dist/services/compose.d.ts +107 -0
- package/dist/services/compose.d.ts.map +1 -0
- package/dist/services/compose.js +308 -0
- package/dist/services/compose.js.map +1 -0
- package/dist/services/container.d.ts +69 -0
- package/dist/services/container.d.ts.map +1 -0
- package/dist/services/container.js +111 -0
- package/dist/services/container.js.map +1 -0
- package/dist/services/docker.d.ts +243 -0
- package/dist/services/docker.d.ts.map +1 -0
- package/dist/services/docker.js +812 -0
- package/dist/services/docker.js.map +1 -0
- package/dist/services/file-service.d.ts +79 -0
- package/dist/services/file-service.d.ts.map +1 -0
- package/dist/services/file-service.js +226 -0
- package/dist/services/file-service.js.map +1 -0
- package/dist/services/interfaces.d.ts +537 -0
- package/dist/services/interfaces.d.ts.map +1 -0
- package/dist/services/interfaces.js +2 -0
- package/dist/services/interfaces.js.map +1 -0
- package/dist/services/ssh-pool-exec.d.ts +10 -0
- package/dist/services/ssh-pool-exec.d.ts.map +1 -0
- package/dist/services/ssh-pool-exec.js +10 -0
- package/dist/services/ssh-pool-exec.js.map +1 -0
- package/dist/services/ssh-pool.d.ts +66 -0
- package/dist/services/ssh-pool.d.ts.map +1 -0
- package/dist/services/ssh-pool.js +253 -0
- package/dist/services/ssh-pool.js.map +1 -0
- package/dist/services/ssh-service.d.ts +39 -0
- package/dist/services/ssh-service.d.ts.map +1 -0
- package/dist/services/ssh-service.js +143 -0
- package/dist/services/ssh-service.js.map +1 -0
- package/dist/services/ssh.d.ts +37 -0
- package/dist/services/ssh.d.ts.map +1 -0
- package/dist/services/ssh.js +50 -0
- package/dist/services/ssh.js.map +1 -0
- package/dist/tools/flux.d.ts +14 -0
- package/dist/tools/flux.d.ts.map +1 -0
- package/dist/tools/flux.js +86 -0
- package/dist/tools/flux.js.map +1 -0
- package/dist/tools/index.d.ts +7 -0
- package/dist/tools/index.d.ts.map +1 -0
- package/dist/tools/index.js +43 -0
- package/dist/tools/index.js.map +1 -0
- package/dist/tools/scout.d.ts +14 -0
- package/dist/tools/scout.d.ts.map +1 -0
- package/dist/tools/scout.js +96 -0
- package/dist/tools/scout.js.map +1 -0
- package/dist/tools/unified.d.ts +7 -0
- package/dist/tools/unified.d.ts.map +1 -0
- package/dist/tools/unified.js +827 -0
- package/dist/tools/unified.js.map +1 -0
- package/dist/types.d.ts +93 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +7 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/errors.d.ts +60 -0
- package/dist/utils/errors.d.ts.map +1 -0
- package/dist/utils/errors.js +131 -0
- package/dist/utils/errors.js.map +1 -0
- package/dist/utils/help.d.ts +69 -0
- package/dist/utils/help.d.ts.map +1 -0
- package/dist/utils/help.js +259 -0
- package/dist/utils/help.js.map +1 -0
- package/dist/utils/index.d.ts +4 -0
- package/dist/utils/index.d.ts.map +1 -0
- package/dist/utils/index.js +4 -0
- package/dist/utils/index.js.map +1 -0
- package/dist/utils/path-security.d.ts +64 -0
- package/dist/utils/path-security.d.ts.map +1 -0
- package/dist/utils/path-security.js +138 -0
- package/dist/utils/path-security.js.map +1 -0
- package/package.json +85 -0
|
@@ -0,0 +1,812 @@
|
|
|
1
|
+
import Docker from "dockerode";
|
|
2
|
+
import { readFileSync, existsSync } from "fs";
|
|
3
|
+
import { homedir, hostname } from "os";
|
|
4
|
+
import { join } from "path";
|
|
5
|
+
import { DEFAULT_DOCKER_SOCKET, API_TIMEOUT, ENV_HOSTS_CONFIG } from "../constants.js";
|
|
6
|
+
import { HostOperationError, logError } from "../utils/errors.js";
|
|
7
|
+
/**
|
|
8
|
+
* Check if a string looks like a Unix socket path
|
|
9
|
+
*/
|
|
10
|
+
export function isSocketPath(value) {
|
|
11
|
+
return (value.startsWith("/") &&
|
|
12
|
+
(value.endsWith(".sock") || value.includes("/docker") || value.includes("/run/")));
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Create a default Docker client for a given host configuration
|
|
16
|
+
*/
|
|
17
|
+
function createDefaultDockerClient(config) {
|
|
18
|
+
// Check for explicit socket path OR socket path in host field
|
|
19
|
+
const socketPath = config.dockerSocketPath || (isSocketPath(config.host) ? config.host : null);
|
|
20
|
+
if (socketPath) {
|
|
21
|
+
// Unix socket connection
|
|
22
|
+
return new Docker({ socketPath });
|
|
23
|
+
}
|
|
24
|
+
else if (config.protocol === "http" || config.protocol === "https") {
|
|
25
|
+
// Remote TCP connection
|
|
26
|
+
return new Docker({
|
|
27
|
+
host: config.host,
|
|
28
|
+
port: config.port || 2375,
|
|
29
|
+
protocol: config.protocol,
|
|
30
|
+
timeout: API_TIMEOUT
|
|
31
|
+
});
|
|
32
|
+
}
|
|
33
|
+
else {
|
|
34
|
+
throw new Error(`Unsupported protocol: ${config.protocol}`);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* DockerService class implementing IDockerService interface
|
|
39
|
+
* Manages Docker client connections and operations across multiple hosts
|
|
40
|
+
*/
|
|
41
|
+
export class DockerService {
|
|
42
|
+
dockerFactory;
|
|
43
|
+
clientCache = new Map();
|
|
44
|
+
constructor(dockerFactory = createDefaultDockerClient) {
|
|
45
|
+
this.dockerFactory = dockerFactory;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Get or create Docker client for a host
|
|
49
|
+
*/
|
|
50
|
+
getDockerClient(config) {
|
|
51
|
+
const cacheKey = `${config.name}-${config.host}`;
|
|
52
|
+
const cached = this.clientCache.get(cacheKey);
|
|
53
|
+
if (cached) {
|
|
54
|
+
return cached;
|
|
55
|
+
}
|
|
56
|
+
const client = this.dockerFactory(config);
|
|
57
|
+
this.clientCache.set(cacheKey, client);
|
|
58
|
+
return client;
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Clears all cached Docker clients.
|
|
62
|
+
*
|
|
63
|
+
* Call this during application shutdown or when you need to force new connections
|
|
64
|
+
* to all Docker hosts. The cached client instances will be removed, and any
|
|
65
|
+
* underlying HTTP/socket connections will be cleaned up by garbage collection
|
|
66
|
+
* when the client objects are no longer referenced.
|
|
67
|
+
*
|
|
68
|
+
* Note: Dockerode clients do not have an explicit close() method. The HTTP agent
|
|
69
|
+
* connections are automatically managed and will be released by the Node.js runtime
|
|
70
|
+
* when the client objects are garbage collected.
|
|
71
|
+
*
|
|
72
|
+
* @example
|
|
73
|
+
* ```typescript
|
|
74
|
+
* // Force fresh connections on next access
|
|
75
|
+
* dockerService.clearClients();
|
|
76
|
+
*
|
|
77
|
+
* // Or during shutdown
|
|
78
|
+
* process.on('SIGTERM', () => {
|
|
79
|
+
* dockerService.clearClients();
|
|
80
|
+
* process.exit(0);
|
|
81
|
+
* });
|
|
82
|
+
* ```
|
|
83
|
+
*/
|
|
84
|
+
clearClients() {
|
|
85
|
+
this.clientCache.clear();
|
|
86
|
+
}
|
|
87
|
+
/**
|
|
88
|
+
* List containers across all hosts with filtering (parallel execution)
|
|
89
|
+
*/
|
|
90
|
+
async listContainers(hosts, options = {}) {
|
|
91
|
+
// Query all hosts in parallel using Promise.allSettled
|
|
92
|
+
const results = await Promise.allSettled(hosts.map((host) => this.listContainersOnHost(host, options)));
|
|
93
|
+
// Collect results from successful queries, log failures
|
|
94
|
+
const containers = [];
|
|
95
|
+
for (let i = 0; i < results.length; i++) {
|
|
96
|
+
const result = results[i];
|
|
97
|
+
if (result.status === "fulfilled") {
|
|
98
|
+
containers.push(...result.value);
|
|
99
|
+
}
|
|
100
|
+
else {
|
|
101
|
+
console.error(`Failed to list containers on ${hosts[i].name}:`, result.reason);
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
return containers;
|
|
105
|
+
}
|
|
106
|
+
/**
|
|
107
|
+
* List containers on a single host (internal helper)
|
|
108
|
+
*/
|
|
109
|
+
async listContainersOnHost(host, options) {
|
|
110
|
+
const docker = this.getDockerClient(host);
|
|
111
|
+
const listOptions = {
|
|
112
|
+
all: options.state !== "running"
|
|
113
|
+
};
|
|
114
|
+
// Add label filter if specified
|
|
115
|
+
if (options.labelFilter) {
|
|
116
|
+
listOptions.filters = { label: [options.labelFilter] };
|
|
117
|
+
}
|
|
118
|
+
const containers = await docker.listContainers(listOptions);
|
|
119
|
+
const results = [];
|
|
120
|
+
for (const c of containers) {
|
|
121
|
+
const containerState = c.State?.toLowerCase();
|
|
122
|
+
// Apply state filter
|
|
123
|
+
if (options.state && options.state !== "all") {
|
|
124
|
+
if (options.state === "stopped" && containerState !== "exited")
|
|
125
|
+
continue;
|
|
126
|
+
if (options.state === "paused" && containerState !== "paused")
|
|
127
|
+
continue;
|
|
128
|
+
if (options.state === "running" && containerState !== "running")
|
|
129
|
+
continue;
|
|
130
|
+
}
|
|
131
|
+
const name = c.Names[0]?.replace(/^\//, "") || c.Id.slice(0, 12);
|
|
132
|
+
// Apply name filter
|
|
133
|
+
if (options.nameFilter && !name.toLowerCase().includes(options.nameFilter.toLowerCase())) {
|
|
134
|
+
continue;
|
|
135
|
+
}
|
|
136
|
+
// Apply image filter
|
|
137
|
+
if (options.imageFilter &&
|
|
138
|
+
!c.Image.toLowerCase().includes(options.imageFilter.toLowerCase())) {
|
|
139
|
+
continue;
|
|
140
|
+
}
|
|
141
|
+
results.push({
|
|
142
|
+
id: c.Id,
|
|
143
|
+
name,
|
|
144
|
+
image: c.Image,
|
|
145
|
+
state: containerState,
|
|
146
|
+
status: c.Status,
|
|
147
|
+
created: new Date(c.Created * 1000).toISOString(),
|
|
148
|
+
ports: (c.Ports || []).map((p) => ({
|
|
149
|
+
containerPort: p.PrivatePort,
|
|
150
|
+
hostPort: p.PublicPort,
|
|
151
|
+
protocol: p.Type,
|
|
152
|
+
hostIp: p.IP
|
|
153
|
+
})),
|
|
154
|
+
labels: c.Labels || {},
|
|
155
|
+
hostName: host.name
|
|
156
|
+
});
|
|
157
|
+
}
|
|
158
|
+
return results;
|
|
159
|
+
}
|
|
160
|
+
/**
|
|
161
|
+
* Find which host a container is on
|
|
162
|
+
*/
|
|
163
|
+
async findContainerHost(containerId, hosts) {
|
|
164
|
+
for (const host of hosts) {
|
|
165
|
+
try {
|
|
166
|
+
const docker = this.getDockerClient(host);
|
|
167
|
+
const containers = await docker.listContainers({ all: true });
|
|
168
|
+
const found = containers.find((c) => c.Id.startsWith(containerId) ||
|
|
169
|
+
c.Names.some((n) => n.replace(/^\//, "") === containerId));
|
|
170
|
+
if (found) {
|
|
171
|
+
return { host, container: found };
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
catch (error) {
|
|
175
|
+
logError(new HostOperationError("Failed to list containers on host", host.name, "findContainerHost", error), { metadata: { containerId } });
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
return null;
|
|
179
|
+
}
|
|
180
|
+
/**
|
|
181
|
+
* Perform action on container
|
|
182
|
+
*/
|
|
183
|
+
async containerAction(containerId, action, host) {
|
|
184
|
+
const container = await this.getContainer(containerId, host);
|
|
185
|
+
switch (action) {
|
|
186
|
+
case "start":
|
|
187
|
+
await container.start();
|
|
188
|
+
break;
|
|
189
|
+
case "stop":
|
|
190
|
+
await container.stop({ t: 10 });
|
|
191
|
+
break;
|
|
192
|
+
case "restart":
|
|
193
|
+
await container.restart({ t: 10 });
|
|
194
|
+
break;
|
|
195
|
+
case "pause":
|
|
196
|
+
await container.pause();
|
|
197
|
+
break;
|
|
198
|
+
case "unpause":
|
|
199
|
+
await container.unpause();
|
|
200
|
+
break;
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
/**
|
|
204
|
+
* Get container by ID or name
|
|
205
|
+
*/
|
|
206
|
+
async getContainer(containerId, host) {
|
|
207
|
+
const docker = this.getDockerClient(host);
|
|
208
|
+
return docker.getContainer(containerId);
|
|
209
|
+
}
|
|
210
|
+
/**
|
|
211
|
+
* Get container logs
|
|
212
|
+
*/
|
|
213
|
+
async getContainerLogs(containerId, host, options = {}) {
|
|
214
|
+
const container = await this.getContainer(containerId, host);
|
|
215
|
+
const logOptions = {
|
|
216
|
+
stdout: options.stream !== "stderr",
|
|
217
|
+
stderr: options.stream !== "stdout",
|
|
218
|
+
tail: options.lines || 100,
|
|
219
|
+
timestamps: true,
|
|
220
|
+
follow: false
|
|
221
|
+
};
|
|
222
|
+
if (options.since) {
|
|
223
|
+
logOptions.since = parseTimeSpec(options.since);
|
|
224
|
+
}
|
|
225
|
+
if (options.until) {
|
|
226
|
+
logOptions.until = parseTimeSpec(options.until);
|
|
227
|
+
}
|
|
228
|
+
const logs = await container.logs(logOptions);
|
|
229
|
+
return parseDockerLogs(logs.toString());
|
|
230
|
+
}
|
|
231
|
+
/**
|
|
232
|
+
* Get container stats
|
|
233
|
+
*/
|
|
234
|
+
async getContainerStats(containerId, host) {
|
|
235
|
+
const container = await this.getContainer(containerId, host);
|
|
236
|
+
const stats = await container.stats({ stream: false });
|
|
237
|
+
// Calculate CPU percentage
|
|
238
|
+
const cpuDelta = stats.cpu_stats.cpu_usage.total_usage - stats.precpu_stats.cpu_usage.total_usage;
|
|
239
|
+
const systemDelta = stats.cpu_stats.system_cpu_usage - stats.precpu_stats.system_cpu_usage;
|
|
240
|
+
const cpuCount = stats.cpu_stats.online_cpus || 1;
|
|
241
|
+
const cpuPercent = systemDelta > 0 ? (cpuDelta / systemDelta) * cpuCount * 100 : 0;
|
|
242
|
+
// Memory stats
|
|
243
|
+
const memUsage = stats.memory_stats.usage || 0;
|
|
244
|
+
const memLimit = stats.memory_stats.limit || 1;
|
|
245
|
+
const memPercent = (memUsage / memLimit) * 100;
|
|
246
|
+
// Network stats
|
|
247
|
+
let netRx = 0, netTx = 0;
|
|
248
|
+
if (stats.networks) {
|
|
249
|
+
for (const net of Object.values(stats.networks)) {
|
|
250
|
+
netRx += net.rx_bytes || 0;
|
|
251
|
+
netTx += net.tx_bytes || 0;
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
// Block I/O
|
|
255
|
+
let blockRead = 0, blockWrite = 0;
|
|
256
|
+
if (stats.blkio_stats?.io_service_bytes_recursive) {
|
|
257
|
+
for (const entry of stats.blkio_stats.io_service_bytes_recursive) {
|
|
258
|
+
if (entry.op === "read")
|
|
259
|
+
blockRead += entry.value;
|
|
260
|
+
if (entry.op === "write")
|
|
261
|
+
blockWrite += entry.value;
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
const info = await container.inspect();
|
|
265
|
+
return {
|
|
266
|
+
containerId,
|
|
267
|
+
containerName: info.Name.replace(/^\//, ""),
|
|
268
|
+
cpuPercent: Math.round(cpuPercent * 100) / 100,
|
|
269
|
+
memoryUsage: memUsage,
|
|
270
|
+
memoryLimit: memLimit,
|
|
271
|
+
memoryPercent: Math.round(memPercent * 100) / 100,
|
|
272
|
+
networkRx: netRx,
|
|
273
|
+
networkTx: netTx,
|
|
274
|
+
blockRead,
|
|
275
|
+
blockWrite
|
|
276
|
+
};
|
|
277
|
+
}
|
|
278
|
+
/**
|
|
279
|
+
* Get host status overview (parallel execution)
|
|
280
|
+
*/
|
|
281
|
+
async getHostStatus(hosts) {
|
|
282
|
+
// Query all hosts in parallel - errors are handled in getHostStatusSingle
|
|
283
|
+
return Promise.all(hosts.map((host) => this.getHostStatusSingle(host)));
|
|
284
|
+
}
|
|
285
|
+
/**
|
|
286
|
+
* Get status for a single host (internal helper)
|
|
287
|
+
*/
|
|
288
|
+
async getHostStatusSingle(host) {
|
|
289
|
+
try {
|
|
290
|
+
const docker = this.getDockerClient(host);
|
|
291
|
+
const containers = await docker.listContainers({ all: true });
|
|
292
|
+
const running = containers.filter((c) => c.State === "running").length;
|
|
293
|
+
return {
|
|
294
|
+
name: host.name,
|
|
295
|
+
host: host.host,
|
|
296
|
+
connected: true,
|
|
297
|
+
containerCount: containers.length,
|
|
298
|
+
runningCount: running
|
|
299
|
+
};
|
|
300
|
+
}
|
|
301
|
+
catch (error) {
|
|
302
|
+
logError(new HostOperationError("Failed to get host info", host.name, "getHostInfo", error), {
|
|
303
|
+
metadata: { host: host.host }
|
|
304
|
+
});
|
|
305
|
+
return {
|
|
306
|
+
name: host.name,
|
|
307
|
+
host: host.host,
|
|
308
|
+
connected: false,
|
|
309
|
+
containerCount: 0,
|
|
310
|
+
runningCount: 0,
|
|
311
|
+
error: error instanceof Error ? error.message : "Connection failed"
|
|
312
|
+
};
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
/**
|
|
316
|
+
* List images across all hosts (parallel execution)
|
|
317
|
+
*/
|
|
318
|
+
async listImages(hosts, options = {}) {
|
|
319
|
+
const results = await Promise.allSettled(hosts.map((host) => this.listImagesOnHost(host, options)));
|
|
320
|
+
return results
|
|
321
|
+
.filter((r) => r.status === "fulfilled")
|
|
322
|
+
.flatMap((r) => r.value);
|
|
323
|
+
}
|
|
324
|
+
/**
|
|
325
|
+
* List images from a single host (internal helper)
|
|
326
|
+
*/
|
|
327
|
+
async listImagesOnHost(host, options) {
|
|
328
|
+
const docker = this.getDockerClient(host);
|
|
329
|
+
const images = await docker.listImages({
|
|
330
|
+
filters: options.danglingOnly ? { dangling: ["true"] } : undefined
|
|
331
|
+
});
|
|
332
|
+
return images.map((img) => ({
|
|
333
|
+
id: formatImageId(img.Id),
|
|
334
|
+
tags: img.RepoTags || ["<none>:<none>"],
|
|
335
|
+
size: img.Size,
|
|
336
|
+
created: new Date(img.Created * 1000).toISOString(),
|
|
337
|
+
containers: img.Containers || 0,
|
|
338
|
+
hostName: host.name
|
|
339
|
+
}));
|
|
340
|
+
}
|
|
341
|
+
/**
|
|
342
|
+
* Inspect container for detailed info
|
|
343
|
+
*/
|
|
344
|
+
async inspectContainer(containerId, host) {
|
|
345
|
+
const container = await this.getContainer(containerId, host);
|
|
346
|
+
return container.inspect();
|
|
347
|
+
}
|
|
348
|
+
/**
|
|
349
|
+
* Get Docker system info
|
|
350
|
+
*/
|
|
351
|
+
async getDockerInfo(host) {
|
|
352
|
+
const docker = this.getDockerClient(host);
|
|
353
|
+
const info = await docker.info();
|
|
354
|
+
const version = await docker.version();
|
|
355
|
+
return {
|
|
356
|
+
dockerVersion: version.Version || "unknown",
|
|
357
|
+
apiVersion: version.ApiVersion || "unknown",
|
|
358
|
+
os: info.OperatingSystem || info.OSType || "unknown",
|
|
359
|
+
arch: info.Architecture || "unknown",
|
|
360
|
+
kernelVersion: info.KernelVersion || "unknown",
|
|
361
|
+
cpus: info.NCPU || 0,
|
|
362
|
+
memoryBytes: info.MemTotal || 0,
|
|
363
|
+
storageDriver: info.Driver || "unknown",
|
|
364
|
+
rootDir: info.DockerRootDir || "/var/lib/docker",
|
|
365
|
+
containersTotal: info.Containers || 0,
|
|
366
|
+
containersRunning: info.ContainersRunning || 0,
|
|
367
|
+
containersPaused: info.ContainersPaused || 0,
|
|
368
|
+
containersStopped: info.ContainersStopped || 0,
|
|
369
|
+
images: info.Images || 0
|
|
370
|
+
};
|
|
371
|
+
}
|
|
372
|
+
/**
|
|
373
|
+
* Get Docker disk usage (system df)
|
|
374
|
+
*/
|
|
375
|
+
async getDockerDiskUsage(host) {
|
|
376
|
+
const docker = this.getDockerClient(host);
|
|
377
|
+
const df = await docker.df();
|
|
378
|
+
const images = df.Images || [];
|
|
379
|
+
const imageSize = images.reduce((sum, i) => sum + (i.Size || 0), 0);
|
|
380
|
+
const imageShared = images.reduce((sum, i) => sum + (i.SharedSize || 0), 0);
|
|
381
|
+
const activeImages = images.filter((i) => i.Containers && i.Containers > 0).length;
|
|
382
|
+
const containers = df.Containers || [];
|
|
383
|
+
const containerSize = containers.reduce((sum, c) => sum + (c.SizeRw || 0), 0);
|
|
384
|
+
const containerRootFs = containers.reduce((sum, c) => sum + (c.SizeRootFs || 0), 0);
|
|
385
|
+
const runningContainers = containers.filter((c) => c.State === "running").length;
|
|
386
|
+
const volumes = df.Volumes || [];
|
|
387
|
+
const volumeSize = volumes.reduce((sum, v) => sum + (v.UsageData?.Size || 0), 0);
|
|
388
|
+
const activeVolumes = volumes.filter((v) => v.UsageData?.RefCount && v.UsageData.RefCount > 0).length;
|
|
389
|
+
const buildCache = df.BuildCache || [];
|
|
390
|
+
const buildCacheSize = buildCache.reduce((sum, b) => sum + (b.Size || 0), 0);
|
|
391
|
+
const buildCacheReclaimable = buildCache
|
|
392
|
+
.filter((b) => !b.InUse)
|
|
393
|
+
.reduce((sum, b) => sum + (b.Size || 0), 0);
|
|
394
|
+
const unusedVolumeSize = volumes
|
|
395
|
+
.filter((v) => !v.UsageData?.RefCount)
|
|
396
|
+
.reduce((sum, v) => sum + (v.UsageData?.Size || 0), 0);
|
|
397
|
+
const totalSize = imageSize + containerSize + volumeSize + buildCacheSize;
|
|
398
|
+
const totalReclaimable = imageSize - imageShared + containerSize + unusedVolumeSize + buildCacheReclaimable;
|
|
399
|
+
return {
|
|
400
|
+
images: {
|
|
401
|
+
total: images.length,
|
|
402
|
+
active: activeImages,
|
|
403
|
+
size: imageSize,
|
|
404
|
+
reclaimable: imageSize - imageShared
|
|
405
|
+
},
|
|
406
|
+
containers: {
|
|
407
|
+
total: containers.length,
|
|
408
|
+
running: runningContainers,
|
|
409
|
+
size: containerSize + containerRootFs,
|
|
410
|
+
reclaimable: containerSize
|
|
411
|
+
},
|
|
412
|
+
volumes: {
|
|
413
|
+
total: volumes.length,
|
|
414
|
+
active: activeVolumes,
|
|
415
|
+
size: volumeSize,
|
|
416
|
+
reclaimable: unusedVolumeSize
|
|
417
|
+
},
|
|
418
|
+
buildCache: {
|
|
419
|
+
total: buildCache.length,
|
|
420
|
+
size: buildCacheSize,
|
|
421
|
+
reclaimable: buildCacheReclaimable
|
|
422
|
+
},
|
|
423
|
+
totalSize,
|
|
424
|
+
totalReclaimable
|
|
425
|
+
};
|
|
426
|
+
}
|
|
427
|
+
/**
|
|
428
|
+
* Prune Docker resources
|
|
429
|
+
*/
|
|
430
|
+
async pruneDocker(host, target) {
|
|
431
|
+
const docker = this.getDockerClient(host);
|
|
432
|
+
const results = [];
|
|
433
|
+
const targets = target === "all"
|
|
434
|
+
? ["containers", "images", "volumes", "networks", "buildcache"]
|
|
435
|
+
: [target];
|
|
436
|
+
for (const t of targets) {
|
|
437
|
+
try {
|
|
438
|
+
switch (t) {
|
|
439
|
+
case "containers": {
|
|
440
|
+
const res = await docker.pruneContainers();
|
|
441
|
+
results.push({
|
|
442
|
+
type: "containers",
|
|
443
|
+
spaceReclaimed: res.SpaceReclaimed || 0,
|
|
444
|
+
itemsDeleted: res.ContainersDeleted?.length || 0,
|
|
445
|
+
details: res.ContainersDeleted
|
|
446
|
+
});
|
|
447
|
+
break;
|
|
448
|
+
}
|
|
449
|
+
case "images": {
|
|
450
|
+
const res = await docker.pruneImages();
|
|
451
|
+
results.push({
|
|
452
|
+
type: "images",
|
|
453
|
+
spaceReclaimed: res.SpaceReclaimed || 0,
|
|
454
|
+
itemsDeleted: res.ImagesDeleted?.length || 0,
|
|
455
|
+
details: res.ImagesDeleted?.map((i) => i.Deleted || i.Untagged || "")
|
|
456
|
+
});
|
|
457
|
+
break;
|
|
458
|
+
}
|
|
459
|
+
case "volumes": {
|
|
460
|
+
const res = await docker.pruneVolumes();
|
|
461
|
+
results.push({
|
|
462
|
+
type: "volumes",
|
|
463
|
+
spaceReclaimed: res.SpaceReclaimed || 0,
|
|
464
|
+
itemsDeleted: res.VolumesDeleted?.length || 0,
|
|
465
|
+
details: res.VolumesDeleted
|
|
466
|
+
});
|
|
467
|
+
break;
|
|
468
|
+
}
|
|
469
|
+
case "networks": {
|
|
470
|
+
const res = await docker.pruneNetworks();
|
|
471
|
+
results.push({
|
|
472
|
+
type: "networks",
|
|
473
|
+
spaceReclaimed: 0,
|
|
474
|
+
itemsDeleted: res.NetworksDeleted?.length || 0,
|
|
475
|
+
details: res.NetworksDeleted
|
|
476
|
+
});
|
|
477
|
+
break;
|
|
478
|
+
}
|
|
479
|
+
case "buildcache": {
|
|
480
|
+
const res = (await docker.pruneBuilder());
|
|
481
|
+
results.push({
|
|
482
|
+
type: "buildcache",
|
|
483
|
+
spaceReclaimed: res.SpaceReclaimed || 0,
|
|
484
|
+
itemsDeleted: res.CachesDeleted?.length || 0,
|
|
485
|
+
details: res.CachesDeleted
|
|
486
|
+
});
|
|
487
|
+
break;
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
catch (error) {
|
|
492
|
+
logError(new HostOperationError("Docker cleanup failed", host.name, "dockerCleanup", error), {
|
|
493
|
+
metadata: { type: t }
|
|
494
|
+
});
|
|
495
|
+
results.push({
|
|
496
|
+
type: t,
|
|
497
|
+
spaceReclaimed: 0,
|
|
498
|
+
itemsDeleted: 0,
|
|
499
|
+
details: [`Error: ${error instanceof Error ? error.message : "Unknown error"}`]
|
|
500
|
+
});
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
return results;
|
|
504
|
+
}
|
|
505
|
+
/**
|
|
506
|
+
* Pull an image on a host
|
|
507
|
+
*/
|
|
508
|
+
async pullImage(imageName, host) {
|
|
509
|
+
if (!imageName || imageName.trim() === "") {
|
|
510
|
+
throw new Error("Image name is required");
|
|
511
|
+
}
|
|
512
|
+
const docker = this.getDockerClient(host);
|
|
513
|
+
return new Promise((resolve, reject) => {
|
|
514
|
+
docker.pull(imageName, (err, stream) => {
|
|
515
|
+
if (err) {
|
|
516
|
+
reject(new Error(`Failed to pull image: ${err.message}`));
|
|
517
|
+
return;
|
|
518
|
+
}
|
|
519
|
+
docker.modem.followProgress(stream, (err) => {
|
|
520
|
+
if (err) {
|
|
521
|
+
reject(new Error(`Pull failed: ${err.message}`));
|
|
522
|
+
}
|
|
523
|
+
else {
|
|
524
|
+
resolve({ status: `Successfully pulled ${imageName}` });
|
|
525
|
+
}
|
|
526
|
+
});
|
|
527
|
+
});
|
|
528
|
+
});
|
|
529
|
+
}
|
|
530
|
+
/**
|
|
531
|
+
* Recreate a container (stop, remove, pull latest, start with same config)
|
|
532
|
+
*/
|
|
533
|
+
async recreateContainer(containerId, host, options = {}) {
|
|
534
|
+
const docker = this.getDockerClient(host);
|
|
535
|
+
const container = docker.getContainer(containerId);
|
|
536
|
+
// Get current container config
|
|
537
|
+
const info = await container.inspect();
|
|
538
|
+
const imageName = info.Config.Image;
|
|
539
|
+
// Stop container if running
|
|
540
|
+
if (info.State.Running) {
|
|
541
|
+
await container.stop();
|
|
542
|
+
}
|
|
543
|
+
// Remove container
|
|
544
|
+
await container.remove();
|
|
545
|
+
// Pull latest image if requested
|
|
546
|
+
if (options.pull !== false) {
|
|
547
|
+
await this.pullImage(imageName, host);
|
|
548
|
+
}
|
|
549
|
+
// Create new container with same config
|
|
550
|
+
const newContainer = await docker.createContainer({
|
|
551
|
+
...info.Config,
|
|
552
|
+
HostConfig: info.HostConfig,
|
|
553
|
+
NetworkingConfig: {
|
|
554
|
+
EndpointsConfig: info.NetworkSettings.Networks
|
|
555
|
+
}
|
|
556
|
+
});
|
|
557
|
+
// Start new container
|
|
558
|
+
await newContainer.start();
|
|
559
|
+
return {
|
|
560
|
+
status: "Container recreated successfully",
|
|
561
|
+
containerId: newContainer.id
|
|
562
|
+
};
|
|
563
|
+
}
|
|
564
|
+
/**
|
|
565
|
+
* Remove an image
|
|
566
|
+
*/
|
|
567
|
+
async removeImage(imageId, host, options = {}) {
|
|
568
|
+
const docker = this.getDockerClient(host);
|
|
569
|
+
const image = docker.getImage(imageId);
|
|
570
|
+
await image.remove({ force: options.force });
|
|
571
|
+
return { status: `Successfully removed image ${imageId}` };
|
|
572
|
+
}
|
|
573
|
+
/**
|
|
574
|
+
* Build an image from a Dockerfile (SSH-based for remote hosts)
|
|
575
|
+
*
|
|
576
|
+
* SECURITY: Implements path traversal protection (CWE-22)
|
|
577
|
+
* - Requires absolute paths for context and dockerfile
|
|
578
|
+
* - Rejects any path containing .. or . components
|
|
579
|
+
* - Validates character set to prevent injection
|
|
580
|
+
*
|
|
581
|
+
* @param host - Docker host configuration
|
|
582
|
+
* @param options - Build options (context, tag, dockerfile, noCache)
|
|
583
|
+
* @returns Promise resolving to build status
|
|
584
|
+
* @throws Error if paths contain directory traversal or invalid characters
|
|
585
|
+
*/
|
|
586
|
+
async buildImage(host, options) {
|
|
587
|
+
// For remote builds, we need to use SSH and docker build command
|
|
588
|
+
// dockerode's build() requires local tar stream which won't work for remote
|
|
589
|
+
const { context, tag, dockerfile, noCache } = options;
|
|
590
|
+
// Validate inputs
|
|
591
|
+
if (!/^[a-zA-Z0-9._\-/:]+$/.test(tag)) {
|
|
592
|
+
throw new Error(`Invalid image tag: ${tag}`);
|
|
593
|
+
}
|
|
594
|
+
// Use secure path validation (prevents directory traversal)
|
|
595
|
+
const { validateSecurePath } = await import("../utils/path-security.js");
|
|
596
|
+
validateSecurePath(context, "context");
|
|
597
|
+
if (dockerfile) {
|
|
598
|
+
validateSecurePath(dockerfile, "dockerfile");
|
|
599
|
+
}
|
|
600
|
+
const args = ["build", "-t", tag];
|
|
601
|
+
if (noCache) {
|
|
602
|
+
args.push("--no-cache");
|
|
603
|
+
}
|
|
604
|
+
if (dockerfile) {
|
|
605
|
+
args.push("-f", dockerfile);
|
|
606
|
+
}
|
|
607
|
+
args.push(context);
|
|
608
|
+
// Execute via SSH for remote hosts, or locally for socket connections
|
|
609
|
+
if (host.host.startsWith("/")) {
|
|
610
|
+
// Local socket - use docker directly
|
|
611
|
+
const { execFile } = await import("child_process");
|
|
612
|
+
const { promisify } = await import("util");
|
|
613
|
+
const execFileAsync = promisify(execFile);
|
|
614
|
+
await execFileAsync("docker", args, { timeout: 600000 }); // 10 min timeout for builds
|
|
615
|
+
}
|
|
616
|
+
else {
|
|
617
|
+
// Remote - use SSH
|
|
618
|
+
const { validateHostForSsh, sanitizeForShell } = await import("./ssh.js");
|
|
619
|
+
const { execFile } = await import("child_process");
|
|
620
|
+
const { promisify } = await import("util");
|
|
621
|
+
const execFileAsync = promisify(execFile);
|
|
622
|
+
validateHostForSsh(host);
|
|
623
|
+
const sshArgs = [
|
|
624
|
+
"-o",
|
|
625
|
+
"BatchMode=yes",
|
|
626
|
+
"-o",
|
|
627
|
+
"ConnectTimeout=5",
|
|
628
|
+
"-o",
|
|
629
|
+
"StrictHostKeyChecking=accept-new",
|
|
630
|
+
sanitizeForShell(host.name),
|
|
631
|
+
`docker ${args.join(" ")}`
|
|
632
|
+
];
|
|
633
|
+
await execFileAsync("ssh", sshArgs, { timeout: 600000 });
|
|
634
|
+
}
|
|
635
|
+
return { status: `Successfully built image ${tag}` };
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
/**
|
|
639
|
+
* Config file search paths (in order of priority)
|
|
640
|
+
*/
|
|
641
|
+
const CONFIG_PATHS = [
|
|
642
|
+
process.env.HOMELAB_CONFIG_FILE, // Explicit path
|
|
643
|
+
join(process.cwd(), "homelab.config.json"), // Current directory
|
|
644
|
+
join(homedir(), ".config", "homelab-mcp", "config.json"), // XDG style
|
|
645
|
+
join(homedir(), ".homelab-mcp.json") // Dotfile style
|
|
646
|
+
].filter(Boolean);
|
|
647
|
+
/**
|
|
648
|
+
* Auto-add local Docker socket if it exists and isn't already configured
|
|
649
|
+
*/
|
|
650
|
+
function ensureLocalSocket(hosts) {
|
|
651
|
+
// Check if local socket exists
|
|
652
|
+
if (!existsSync(DEFAULT_DOCKER_SOCKET)) {
|
|
653
|
+
return hosts;
|
|
654
|
+
}
|
|
655
|
+
// Check if any host already uses the local socket
|
|
656
|
+
const hasLocalSocket = hosts.some((h) => h.dockerSocketPath === DEFAULT_DOCKER_SOCKET ||
|
|
657
|
+
h.host === DEFAULT_DOCKER_SOCKET ||
|
|
658
|
+
(h.host === "localhost" && h.dockerSocketPath));
|
|
659
|
+
if (hasLocalSocket) {
|
|
660
|
+
return hosts;
|
|
661
|
+
}
|
|
662
|
+
// Auto-add local socket entry
|
|
663
|
+
const localName = hostname()
|
|
664
|
+
.toLowerCase()
|
|
665
|
+
.replace(/[^a-z0-9-]/g, "-") || "local";
|
|
666
|
+
console.error(`Auto-adding local Docker socket as "${localName}"`);
|
|
667
|
+
return [
|
|
668
|
+
...hosts,
|
|
669
|
+
{
|
|
670
|
+
name: localName,
|
|
671
|
+
host: DEFAULT_DOCKER_SOCKET,
|
|
672
|
+
protocol: "http",
|
|
673
|
+
dockerSocketPath: DEFAULT_DOCKER_SOCKET
|
|
674
|
+
}
|
|
675
|
+
];
|
|
676
|
+
}
|
|
677
|
+
/**
|
|
678
|
+
* Load host configurations from config file, env var, or defaults
|
|
679
|
+
*/
|
|
680
|
+
export function loadHostConfigs() {
|
|
681
|
+
let hosts = [];
|
|
682
|
+
// 1. Try config file first
|
|
683
|
+
for (const configPath of CONFIG_PATHS) {
|
|
684
|
+
if (existsSync(configPath)) {
|
|
685
|
+
try {
|
|
686
|
+
const raw = readFileSync(configPath, "utf-8");
|
|
687
|
+
const config = JSON.parse(raw);
|
|
688
|
+
const configHosts = config.hosts || config; // Support { hosts: [...] } or just [...]
|
|
689
|
+
if (Array.isArray(configHosts) && configHosts.length > 0) {
|
|
690
|
+
console.error(`Loaded ${configHosts.length} hosts from ${configPath}`);
|
|
691
|
+
hosts = configHosts;
|
|
692
|
+
break;
|
|
693
|
+
}
|
|
694
|
+
}
|
|
695
|
+
catch (error) {
|
|
696
|
+
logError(error, {
|
|
697
|
+
operation: "loadHostConfigs",
|
|
698
|
+
metadata: { configPath, source: "file" }
|
|
699
|
+
});
|
|
700
|
+
}
|
|
701
|
+
}
|
|
702
|
+
}
|
|
703
|
+
// 2. Fall back to env var if no config file
|
|
704
|
+
if (hosts.length === 0) {
|
|
705
|
+
const configJson = process.env[ENV_HOSTS_CONFIG];
|
|
706
|
+
if (configJson) {
|
|
707
|
+
try {
|
|
708
|
+
hosts = JSON.parse(configJson);
|
|
709
|
+
console.error(`Loaded ${hosts.length} hosts from HOMELAB_HOSTS_CONFIG env`);
|
|
710
|
+
}
|
|
711
|
+
catch (error) {
|
|
712
|
+
logError(error, {
|
|
713
|
+
operation: "loadHostConfigs",
|
|
714
|
+
metadata: { source: "HOMELAB_HOSTS_CONFIG" }
|
|
715
|
+
});
|
|
716
|
+
}
|
|
717
|
+
}
|
|
718
|
+
}
|
|
719
|
+
// 3. If still no hosts, default to local socket only
|
|
720
|
+
if (hosts.length === 0) {
|
|
721
|
+
console.error("No config found, using local Docker socket");
|
|
722
|
+
return [
|
|
723
|
+
{
|
|
724
|
+
name: "local",
|
|
725
|
+
host: "localhost",
|
|
726
|
+
protocol: "http",
|
|
727
|
+
dockerSocketPath: DEFAULT_DOCKER_SOCKET
|
|
728
|
+
}
|
|
729
|
+
];
|
|
730
|
+
}
|
|
731
|
+
// 4. Auto-add local socket if exists and not configured
|
|
732
|
+
return ensureLocalSocket(hosts);
|
|
733
|
+
}
|
|
734
|
+
/**
|
|
735
|
+
* Parse time specification (absolute or relative) - pure helper function
|
|
736
|
+
*/
|
|
737
|
+
function parseTimeSpec(spec) {
|
|
738
|
+
// Check for relative time like "1h", "30m", "2d"
|
|
739
|
+
const relativeMatch = spec.match(/^(\d+)([smhd])$/);
|
|
740
|
+
if (relativeMatch) {
|
|
741
|
+
const value = parseInt(relativeMatch[1], 10);
|
|
742
|
+
const unit = relativeMatch[2];
|
|
743
|
+
const multipliers = {
|
|
744
|
+
s: 1,
|
|
745
|
+
m: 60,
|
|
746
|
+
h: 3600,
|
|
747
|
+
d: 86400
|
|
748
|
+
};
|
|
749
|
+
return Math.floor(Date.now() / 1000) - value * multipliers[unit];
|
|
750
|
+
}
|
|
751
|
+
// Absolute timestamp
|
|
752
|
+
return Math.floor(new Date(spec).getTime() / 1000);
|
|
753
|
+
}
|
|
754
|
+
/**
|
|
755
|
+
* Parse Docker log output into structured entries - pure helper function
|
|
756
|
+
*/
|
|
757
|
+
function parseDockerLogs(raw) {
|
|
758
|
+
const lines = raw.split("\n").filter((l) => l.trim());
|
|
759
|
+
const entries = [];
|
|
760
|
+
for (const line of lines) {
|
|
761
|
+
// Docker log format: timestamp message
|
|
762
|
+
const match = line.match(/^(\d{4}-\d{2}-\d{2}T[\d:.]+Z)\s+(.*)$/);
|
|
763
|
+
if (match) {
|
|
764
|
+
entries.push({
|
|
765
|
+
timestamp: match[1],
|
|
766
|
+
stream: "stdout", // Default, actual stream info requires demuxing
|
|
767
|
+
message: match[2]
|
|
768
|
+
});
|
|
769
|
+
}
|
|
770
|
+
else if (line.trim()) {
|
|
771
|
+
entries.push({
|
|
772
|
+
timestamp: new Date().toISOString(),
|
|
773
|
+
stream: "stdout",
|
|
774
|
+
message: line
|
|
775
|
+
});
|
|
776
|
+
}
|
|
777
|
+
}
|
|
778
|
+
return entries;
|
|
779
|
+
}
|
|
780
|
+
/**
|
|
781
|
+
* Format bytes to human readable
|
|
782
|
+
*/
|
|
783
|
+
export function formatBytes(bytes) {
|
|
784
|
+
if (bytes === 0)
|
|
785
|
+
return "0 B";
|
|
786
|
+
const k = 1024;
|
|
787
|
+
const sizes = ["B", "KB", "MB", "GB", "TB"];
|
|
788
|
+
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
|
789
|
+
return `${(bytes / Math.pow(k, i)).toFixed(1)} ${sizes[i]}`;
|
|
790
|
+
}
|
|
791
|
+
/**
|
|
792
|
+
* Format uptime from created timestamp
|
|
793
|
+
*/
|
|
794
|
+
export function formatUptime(created) {
|
|
795
|
+
const diff = Date.now() - new Date(created).getTime();
|
|
796
|
+
const days = Math.floor(diff / 86400000);
|
|
797
|
+
const hours = Math.floor((diff % 86400000) / 3600000);
|
|
798
|
+
const minutes = Math.floor((diff % 3600000) / 60000);
|
|
799
|
+
if (days > 0)
|
|
800
|
+
return `${days}d ${hours}h`;
|
|
801
|
+
if (hours > 0)
|
|
802
|
+
return `${hours}h ${minutes}m`;
|
|
803
|
+
return `${minutes}m`;
|
|
804
|
+
}
|
|
805
|
+
/**
|
|
806
|
+
* Format Docker image ID (truncate sha256: prefix and limit to 12 chars)
|
|
807
|
+
*/
|
|
808
|
+
export function formatImageId(id) {
|
|
809
|
+
const cleaned = id.replace(/^sha256:/, "");
|
|
810
|
+
return cleaned.slice(0, 12) || cleaned;
|
|
811
|
+
}
|
|
812
|
+
//# sourceMappingURL=docker.js.map
|