@rog0x/mcp-docker-tools 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +113 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +69 -0
- package/dist/tools/compose-analyzer.d.ts +16 -0
- package/dist/tools/compose-analyzer.js +284 -0
- package/dist/tools/container-list.d.ts +23 -0
- package/dist/tools/container-list.js +132 -0
- package/dist/tools/container-logs.d.ts +38 -0
- package/dist/tools/container-logs.js +126 -0
- package/dist/tools/dockerfile-analyzer.d.ts +21 -0
- package/dist/tools/dockerfile-analyzer.js +278 -0
- package/dist/tools/image-list.d.ts +27 -0
- package/dist/tools/image-list.js +117 -0
- package/package.json +29 -0
- package/src/index.ts +101 -0
- package/src/tools/compose-analyzer.ts +316 -0
- package/src/tools/container-list.ts +158 -0
- package/src/tools/container-logs.ts +131 -0
- package/src/tools/dockerfile-analyzer.ts +314 -0
- package/src/tools/image-list.ts +134 -0
- package/tsconfig.json +19 -0
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
import { execSync } from "node:child_process";
|
|
2
|
+
|
|
3
|
+
export const containerLogsTool = {
|
|
4
|
+
name: "docker_container_logs",
|
|
5
|
+
description:
|
|
6
|
+
"Get logs from a Docker container. Supports retrieving the last N lines, " +
|
|
7
|
+
"filtering by keyword, and including timestamps. Can target a container by name or ID.",
|
|
8
|
+
inputSchema: {
|
|
9
|
+
type: "object" as const,
|
|
10
|
+
properties: {
|
|
11
|
+
container: {
|
|
12
|
+
type: "string",
|
|
13
|
+
description: "Container name or ID to get logs from.",
|
|
14
|
+
},
|
|
15
|
+
tail: {
|
|
16
|
+
type: "number",
|
|
17
|
+
description: "Number of lines to retrieve from the end of logs. Defaults to 100.",
|
|
18
|
+
default: 100,
|
|
19
|
+
},
|
|
20
|
+
since: {
|
|
21
|
+
type: "string",
|
|
22
|
+
description: "Show logs since a timestamp (e.g., '2024-01-01T00:00:00') or relative duration (e.g., '1h', '30m').",
|
|
23
|
+
},
|
|
24
|
+
until: {
|
|
25
|
+
type: "string",
|
|
26
|
+
description: "Show logs until a timestamp or relative duration.",
|
|
27
|
+
},
|
|
28
|
+
filter: {
|
|
29
|
+
type: "string",
|
|
30
|
+
description: "Filter log lines to only include those containing this keyword (case-insensitive).",
|
|
31
|
+
},
|
|
32
|
+
timestamps: {
|
|
33
|
+
type: "boolean",
|
|
34
|
+
description: "Include timestamps in log output. Defaults to true.",
|
|
35
|
+
default: true,
|
|
36
|
+
},
|
|
37
|
+
},
|
|
38
|
+
required: ["container"],
|
|
39
|
+
},
|
|
40
|
+
};
|
|
41
|
+
|
|
42
|
+
export async function handleContainerLogs(args: Record<string, unknown>): Promise<string> {
|
|
43
|
+
const container = args.container as string;
|
|
44
|
+
const tail = typeof args.tail === "number" ? args.tail : 100;
|
|
45
|
+
const since = args.since as string | undefined;
|
|
46
|
+
const until = args.until as string | undefined;
|
|
47
|
+
const filter = args.filter as string | undefined;
|
|
48
|
+
const timestamps = args.timestamps !== false;
|
|
49
|
+
|
|
50
|
+
if (!container || !container.trim()) {
|
|
51
|
+
return "Error: 'container' parameter is required. Provide a container name or ID.";
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
try {
|
|
55
|
+
// Verify container exists
|
|
56
|
+
try {
|
|
57
|
+
execSync(`docker inspect --type=container ${container}`, {
|
|
58
|
+
encoding: "utf-8",
|
|
59
|
+
timeout: 10000,
|
|
60
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
61
|
+
});
|
|
62
|
+
} catch {
|
|
63
|
+
return `Error: Container '${container}' not found. Use docker_container_list to see available containers.`;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// Build the logs command
|
|
67
|
+
const parts = ["docker", "logs"];
|
|
68
|
+
if (timestamps) parts.push("--timestamps");
|
|
69
|
+
parts.push(`--tail=${tail}`);
|
|
70
|
+
if (since) parts.push(`--since=${since}`);
|
|
71
|
+
if (until) parts.push(`--until=${until}`);
|
|
72
|
+
parts.push(container);
|
|
73
|
+
|
|
74
|
+
// docker logs outputs to stderr for some containers, capture both
|
|
75
|
+
const raw = execSync(parts.join(" "), {
|
|
76
|
+
encoding: "utf-8",
|
|
77
|
+
timeout: 30000,
|
|
78
|
+
maxBuffer: 10 * 1024 * 1024,
|
|
79
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
// Also capture stderr since docker logs sends some output there
|
|
83
|
+
let combined: string;
|
|
84
|
+
try {
|
|
85
|
+
combined = execSync(parts.join(" ") + " 2>&1", {
|
|
86
|
+
encoding: "utf-8",
|
|
87
|
+
timeout: 30000,
|
|
88
|
+
maxBuffer: 10 * 1024 * 1024,
|
|
89
|
+
});
|
|
90
|
+
} catch {
|
|
91
|
+
combined = raw;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
if (!combined.trim()) {
|
|
95
|
+
return `No logs found for container '${container}' with the specified parameters.`;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
let lines = combined.split("\n");
|
|
99
|
+
|
|
100
|
+
// Apply keyword filter
|
|
101
|
+
if (filter) {
|
|
102
|
+
const lowerFilter = filter.toLowerCase();
|
|
103
|
+
lines = lines.filter((line) => line.toLowerCase().includes(lowerFilter));
|
|
104
|
+
|
|
105
|
+
if (lines.length === 0) {
|
|
106
|
+
return `No log lines matching '${filter}' found in the last ${tail} lines of container '${container}'.`;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
const header = `Logs for container '${container}' (${lines.length} lines)`;
|
|
111
|
+
const separator = "=".repeat(Math.min(header.length, 60));
|
|
112
|
+
|
|
113
|
+
const result = [header, separator, ...lines].join("\n");
|
|
114
|
+
|
|
115
|
+
// Truncate if extremely long
|
|
116
|
+
if (result.length > 50000) {
|
|
117
|
+
return result.substring(0, 50000) + "\n\n... [output truncated at 50KB]";
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
return result;
|
|
121
|
+
} catch (err: unknown) {
|
|
122
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
123
|
+
if (message.includes("not found") || message.includes("not recognized") || message.includes("ENOENT")) {
|
|
124
|
+
return "Error: Docker is not installed or not available in PATH. Please install Docker and ensure the 'docker' command is accessible.";
|
|
125
|
+
}
|
|
126
|
+
if (message.includes("Cannot connect") || message.includes("Is the docker daemon running")) {
|
|
127
|
+
return "Error: Cannot connect to the Docker daemon. Is Docker running?";
|
|
128
|
+
}
|
|
129
|
+
return `Error fetching logs for '${container}': ${message}`;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
export const dockerfileAnalyzerTool = {
|
|
2
|
+
name: "docker_dockerfile_analyze",
|
|
3
|
+
description:
|
|
4
|
+
"Analyze a Dockerfile for best practices including multi-stage builds, non-root user, " +
|
|
5
|
+
".dockerignore usage, layer caching order, image size optimization, and security. " +
|
|
6
|
+
"Provide the Dockerfile content as input and receive a detailed report with suggestions.",
|
|
7
|
+
inputSchema: {
|
|
8
|
+
type: "object" as const,
|
|
9
|
+
properties: {
|
|
10
|
+
content: {
|
|
11
|
+
type: "string",
|
|
12
|
+
description: "The full content of the Dockerfile to analyze.",
|
|
13
|
+
},
|
|
14
|
+
checkDockerignore: {
|
|
15
|
+
type: "boolean",
|
|
16
|
+
description: "If true, also checks for common .dockerignore recommendations. Defaults to true.",
|
|
17
|
+
default: true,
|
|
18
|
+
},
|
|
19
|
+
},
|
|
20
|
+
required: ["content"],
|
|
21
|
+
},
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
interface Finding {
|
|
25
|
+
category: string;
|
|
26
|
+
severity: "info" | "warning" | "error";
|
|
27
|
+
message: string;
|
|
28
|
+
suggestion?: string;
|
|
29
|
+
line?: number;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
function analyzeDockerfile(content: string, checkDockerignore: boolean): { findings: Finding[]; summary: Record<string, unknown> } {
|
|
33
|
+
const lines = content.split("\n");
|
|
34
|
+
const findings: Finding[] = [];
|
|
35
|
+
|
|
36
|
+
const fromStatements: { line: number; image: string; alias?: string }[] = [];
|
|
37
|
+
let hasUser = false;
|
|
38
|
+
let hasCopy = false;
|
|
39
|
+
let hasAdd = false;
|
|
40
|
+
let hasHealthcheck = false;
|
|
41
|
+
let hasExpose = false;
|
|
42
|
+
let hasWorkdir = false;
|
|
43
|
+
let runCount = 0;
|
|
44
|
+
let hasAptGetCleanup = false;
|
|
45
|
+
let usesLatestTag = false;
|
|
46
|
+
let hasEnvForVersions = false;
|
|
47
|
+
let copiesBeforeRun = false;
|
|
48
|
+
let lastCopyLine = -1;
|
|
49
|
+
let lastRunLine = -1;
|
|
50
|
+
|
|
51
|
+
for (let i = 0; i < lines.length; i++) {
|
|
52
|
+
const raw = lines[i];
|
|
53
|
+
const trimmed = raw.trim();
|
|
54
|
+
const lineNum = i + 1;
|
|
55
|
+
|
|
56
|
+
// Skip comments and empty
|
|
57
|
+
if (trimmed.startsWith("#") || !trimmed) continue;
|
|
58
|
+
|
|
59
|
+
const instruction = trimmed.split(/\s+/)[0].toUpperCase();
|
|
60
|
+
|
|
61
|
+
if (instruction === "FROM") {
|
|
62
|
+
const rest = trimmed.substring(4).trim();
|
|
63
|
+
const parts = rest.split(/\s+/);
|
|
64
|
+
const image = parts[0];
|
|
65
|
+
const alias = parts.find((_, idx) => idx > 0 && parts[idx - 1]?.toUpperCase() === "AS");
|
|
66
|
+
fromStatements.push({ line: lineNum, image, alias });
|
|
67
|
+
|
|
68
|
+
if (image.endsWith(":latest") || (!image.includes(":") && !image.includes("@"))) {
|
|
69
|
+
usesLatestTag = true;
|
|
70
|
+
findings.push({
|
|
71
|
+
category: "Versioning",
|
|
72
|
+
severity: "warning",
|
|
73
|
+
message: `Line ${lineNum}: Base image '${image}' uses implicit or explicit :latest tag.`,
|
|
74
|
+
suggestion: "Pin to a specific version tag (e.g., node:20-alpine) for reproducible builds.",
|
|
75
|
+
line: lineNum,
|
|
76
|
+
});
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
if (!image.includes("alpine") && !image.includes("slim") && !image.includes("distroless") && !image.includes("scratch")) {
|
|
80
|
+
findings.push({
|
|
81
|
+
category: "Image Size",
|
|
82
|
+
severity: "info",
|
|
83
|
+
message: `Line ${lineNum}: Base image '${image}' is not a minimal variant.`,
|
|
84
|
+
suggestion: "Consider using an alpine, slim, or distroless variant to reduce image size.",
|
|
85
|
+
line: lineNum,
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
if (instruction === "RUN") {
|
|
91
|
+
runCount++;
|
|
92
|
+
lastRunLine = lineNum;
|
|
93
|
+
|
|
94
|
+
if (trimmed.includes("apt-get") || trimmed.includes("apk add")) {
|
|
95
|
+
if (trimmed.includes("rm -rf /var/lib/apt/lists") || trimmed.includes("--no-cache")) {
|
|
96
|
+
hasAptGetCleanup = true;
|
|
97
|
+
} else {
|
|
98
|
+
findings.push({
|
|
99
|
+
category: "Layer Caching",
|
|
100
|
+
severity: "warning",
|
|
101
|
+
message: `Line ${lineNum}: Package install without cache cleanup.`,
|
|
102
|
+
suggestion: "Add 'rm -rf /var/lib/apt/lists/*' in the same RUN layer, or use 'apk add --no-cache'.",
|
|
103
|
+
line: lineNum,
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
if (trimmed.includes("curl") && !trimmed.includes("--fail")) {
|
|
109
|
+
findings.push({
|
|
110
|
+
category: "Reliability",
|
|
111
|
+
severity: "info",
|
|
112
|
+
message: `Line ${lineNum}: curl used without --fail flag.`,
|
|
113
|
+
suggestion: "Use 'curl --fail' so the build fails on HTTP errors.",
|
|
114
|
+
line: lineNum,
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
if (instruction === "COPY") {
|
|
120
|
+
hasCopy = true;
|
|
121
|
+
lastCopyLine = lineNum;
|
|
122
|
+
if (lastRunLine > 0 && lastCopyLine > lastRunLine) {
|
|
123
|
+
// This is normal — but copying app code before dependency install is not
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
if (instruction === "ADD") {
|
|
128
|
+
hasAdd = true;
|
|
129
|
+
if (!trimmed.includes(".tar") && !trimmed.includes("http")) {
|
|
130
|
+
findings.push({
|
|
131
|
+
category: "Best Practice",
|
|
132
|
+
severity: "warning",
|
|
133
|
+
message: `Line ${lineNum}: ADD instruction used instead of COPY.`,
|
|
134
|
+
suggestion: "Use COPY unless you specifically need ADD's tar extraction or URL features.",
|
|
135
|
+
line: lineNum,
|
|
136
|
+
});
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
if (instruction === "USER") {
|
|
141
|
+
hasUser = true;
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
if (instruction === "HEALTHCHECK") {
|
|
145
|
+
hasHealthcheck = true;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
if (instruction === "EXPOSE") {
|
|
149
|
+
hasExpose = true;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
if (instruction === "WORKDIR") {
|
|
153
|
+
hasWorkdir = true;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
if (instruction === "ENV") {
|
|
157
|
+
hasEnvForVersions = true;
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Multi-stage build check
|
|
162
|
+
const isMultiStage = fromStatements.length > 1;
|
|
163
|
+
if (!isMultiStage) {
|
|
164
|
+
findings.push({
|
|
165
|
+
category: "Multi-stage Build",
|
|
166
|
+
severity: "info",
|
|
167
|
+
message: "Dockerfile does not use multi-stage builds.",
|
|
168
|
+
suggestion:
|
|
169
|
+
"Multi-stage builds reduce final image size by separating build dependencies from runtime. " +
|
|
170
|
+
"Consider adding a build stage and copying only needed artifacts to the final stage.",
|
|
171
|
+
});
|
|
172
|
+
} else {
|
|
173
|
+
findings.push({
|
|
174
|
+
category: "Multi-stage Build",
|
|
175
|
+
severity: "info",
|
|
176
|
+
message: `Good: Uses multi-stage build with ${fromStatements.length} stages.`,
|
|
177
|
+
});
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// Non-root user
|
|
181
|
+
if (!hasUser) {
|
|
182
|
+
findings.push({
|
|
183
|
+
category: "Security",
|
|
184
|
+
severity: "warning",
|
|
185
|
+
message: "No USER instruction found. Container will run as root by default.",
|
|
186
|
+
suggestion: "Add a USER instruction to run as a non-root user (e.g., USER 1001 or USER appuser).",
|
|
187
|
+
});
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
// HEALTHCHECK
|
|
191
|
+
if (!hasHealthcheck) {
|
|
192
|
+
findings.push({
|
|
193
|
+
category: "Reliability",
|
|
194
|
+
severity: "info",
|
|
195
|
+
message: "No HEALTHCHECK instruction found.",
|
|
196
|
+
suggestion: "Add a HEALTHCHECK to enable Docker to monitor container health.",
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
// RUN consolidation
|
|
201
|
+
if (runCount > 5) {
|
|
202
|
+
findings.push({
|
|
203
|
+
category: "Layer Caching",
|
|
204
|
+
severity: "warning",
|
|
205
|
+
message: `Found ${runCount} separate RUN instructions.`,
|
|
206
|
+
suggestion:
|
|
207
|
+
"Consolidate related RUN commands using && to reduce the number of image layers.",
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
// WORKDIR
|
|
212
|
+
if (!hasWorkdir && hasCopy) {
|
|
213
|
+
findings.push({
|
|
214
|
+
category: "Best Practice",
|
|
215
|
+
severity: "info",
|
|
216
|
+
message: "No WORKDIR instruction found.",
|
|
217
|
+
suggestion: "Use WORKDIR to set a working directory instead of relying on the default or using 'cd' in RUN.",
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// Layer caching order hint
|
|
222
|
+
if (hasCopy && runCount > 0) {
|
|
223
|
+
findings.push({
|
|
224
|
+
category: "Layer Caching",
|
|
225
|
+
severity: "info",
|
|
226
|
+
message: "Tip: Copy dependency manifests (package.json, requirements.txt) before source code.",
|
|
227
|
+
suggestion:
|
|
228
|
+
"COPY package*.json ./ then RUN npm install, then COPY the rest. " +
|
|
229
|
+
"This leverages Docker layer caching so dependencies are only reinstalled when manifests change.",
|
|
230
|
+
});
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
// .dockerignore
|
|
234
|
+
if (checkDockerignore) {
|
|
235
|
+
findings.push({
|
|
236
|
+
category: ".dockerignore",
|
|
237
|
+
severity: "info",
|
|
238
|
+
message: "Ensure a .dockerignore file exists alongside the Dockerfile.",
|
|
239
|
+
suggestion:
|
|
240
|
+
"Common entries: node_modules, .git, .env, dist, *.log, .DS_Store, __pycache__, .venv",
|
|
241
|
+
});
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
const score = calculateScore(findings);
|
|
245
|
+
|
|
246
|
+
return {
|
|
247
|
+
findings,
|
|
248
|
+
summary: {
|
|
249
|
+
totalFindings: findings.length,
|
|
250
|
+
errors: findings.filter((f) => f.severity === "error").length,
|
|
251
|
+
warnings: findings.filter((f) => f.severity === "warning").length,
|
|
252
|
+
info: findings.filter((f) => f.severity === "info").length,
|
|
253
|
+
stages: fromStatements.length,
|
|
254
|
+
isMultiStage,
|
|
255
|
+
hasNonRootUser: hasUser,
|
|
256
|
+
hasHealthcheck,
|
|
257
|
+
runInstructions: runCount,
|
|
258
|
+
usesLatestTag,
|
|
259
|
+
score,
|
|
260
|
+
},
|
|
261
|
+
};
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
function calculateScore(findings: Finding[]): string {
|
|
265
|
+
let score = 100;
|
|
266
|
+
for (const f of findings) {
|
|
267
|
+
if (f.severity === "error") score -= 15;
|
|
268
|
+
if (f.severity === "warning") score -= 7;
|
|
269
|
+
}
|
|
270
|
+
score = Math.max(0, score);
|
|
271
|
+
if (score >= 80) return `${score}/100 (Good)`;
|
|
272
|
+
if (score >= 50) return `${score}/100 (Needs improvement)`;
|
|
273
|
+
return `${score}/100 (Poor)`;
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
export async function handleDockerfileAnalyzer(args: Record<string, unknown>): Promise<string> {
|
|
277
|
+
const content = args.content as string;
|
|
278
|
+
const checkDockerignore = args.checkDockerignore !== false;
|
|
279
|
+
|
|
280
|
+
if (!content || !content.trim()) {
|
|
281
|
+
return "Error: Dockerfile content is empty. Please provide the Dockerfile content to analyze.";
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
const { findings, summary } = analyzeDockerfile(content, checkDockerignore);
|
|
285
|
+
|
|
286
|
+
const sections: string[] = [];
|
|
287
|
+
|
|
288
|
+
sections.push("# Dockerfile Analysis Report\n");
|
|
289
|
+
sections.push(`Score: ${summary.score}`);
|
|
290
|
+
sections.push(`Stages: ${summary.stages} | Multi-stage: ${summary.isMultiStage ? "Yes" : "No"}`);
|
|
291
|
+
sections.push(`Non-root user: ${summary.hasNonRootUser ? "Yes" : "No"} | Healthcheck: ${summary.hasHealthcheck ? "Yes" : "No"}`);
|
|
292
|
+
sections.push(`Findings: ${summary.errors} errors, ${summary.warnings} warnings, ${summary.info} info\n`);
|
|
293
|
+
|
|
294
|
+
const grouped: Record<string, Finding[]> = {};
|
|
295
|
+
for (const f of findings) {
|
|
296
|
+
if (!grouped[f.category]) grouped[f.category] = [];
|
|
297
|
+
grouped[f.category].push(f);
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
for (const [category, items] of Object.entries(grouped)) {
|
|
301
|
+
sections.push(`## ${category}`);
|
|
302
|
+
for (const item of items) {
|
|
303
|
+
const icon = item.severity === "error" ? "[ERROR]" : item.severity === "warning" ? "[WARN]" : "[INFO]";
|
|
304
|
+
const lineRef = item.line ? ` (line ${item.line})` : "";
|
|
305
|
+
sections.push(`${icon}${lineRef} ${item.message}`);
|
|
306
|
+
if (item.suggestion) {
|
|
307
|
+
sections.push(` -> ${item.suggestion}`);
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
sections.push("");
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
return sections.join("\n");
|
|
314
|
+
}
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import { execSync } from "node:child_process";
|
|
2
|
+
|
|
3
|
+
export const imageListTool = {
|
|
4
|
+
name: "docker_image_list",
|
|
5
|
+
description:
|
|
6
|
+
"List Docker images with size, tags, created date, and layer count. " +
|
|
7
|
+
"Optionally filter by repository name.",
|
|
8
|
+
inputSchema: {
|
|
9
|
+
type: "object" as const,
|
|
10
|
+
properties: {
|
|
11
|
+
filter: {
|
|
12
|
+
type: "string",
|
|
13
|
+
description: "Filter images by repository name (partial match). Leave empty to list all.",
|
|
14
|
+
},
|
|
15
|
+
showDangling: {
|
|
16
|
+
type: "boolean",
|
|
17
|
+
description: "Include dangling (untagged) images. Defaults to false.",
|
|
18
|
+
default: false,
|
|
19
|
+
},
|
|
20
|
+
format: {
|
|
21
|
+
type: "string",
|
|
22
|
+
enum: ["table", "json"],
|
|
23
|
+
description: "Output format: 'table' or 'json'. Defaults to 'json'.",
|
|
24
|
+
default: "json",
|
|
25
|
+
},
|
|
26
|
+
},
|
|
27
|
+
required: [],
|
|
28
|
+
},
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
interface ImageInfo {
|
|
32
|
+
id: string;
|
|
33
|
+
repository: string;
|
|
34
|
+
tag: string;
|
|
35
|
+
size: string;
|
|
36
|
+
created: string;
|
|
37
|
+
layers: number;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
function getLayerCount(imageId: string): number {
|
|
41
|
+
try {
|
|
42
|
+
const raw = execSync(`docker inspect --format="{{len .RootFS.Layers}}" ${imageId}`, {
|
|
43
|
+
encoding: "utf-8",
|
|
44
|
+
timeout: 10000,
|
|
45
|
+
}).trim();
|
|
46
|
+
return parseInt(raw, 10) || 0;
|
|
47
|
+
} catch {
|
|
48
|
+
return 0;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
export async function handleImageList(args: Record<string, unknown>): Promise<string> {
|
|
53
|
+
const filter = (args.filter as string) || "";
|
|
54
|
+
const showDangling = args.showDangling === true;
|
|
55
|
+
const format = (args.format as string) || "json";
|
|
56
|
+
|
|
57
|
+
try {
|
|
58
|
+
let cmd = `docker images --format "{{.ID}}|||{{.Repository}}|||{{.Tag}}|||{{.Size}}|||{{.CreatedAt}}"`;
|
|
59
|
+
if (!showDangling) {
|
|
60
|
+
cmd += ` --filter "dangling=false"`;
|
|
61
|
+
}
|
|
62
|
+
if (filter) {
|
|
63
|
+
cmd += ` ${filter}`;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
const raw = execSync(cmd, { encoding: "utf-8", timeout: 15000 }).trim();
|
|
67
|
+
|
|
68
|
+
if (!raw) {
|
|
69
|
+
return filter
|
|
70
|
+
? `No images found matching '${filter}'.`
|
|
71
|
+
: "No Docker images found on this system.";
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
const lines = raw.split("\n").filter((l) => l.trim());
|
|
75
|
+
const images: ImageInfo[] = [];
|
|
76
|
+
|
|
77
|
+
for (const line of lines) {
|
|
78
|
+
const parts = line.split("|||");
|
|
79
|
+
if (parts.length < 5) continue;
|
|
80
|
+
|
|
81
|
+
const id = parts[0];
|
|
82
|
+
images.push({
|
|
83
|
+
id,
|
|
84
|
+
repository: parts[1],
|
|
85
|
+
tag: parts[2],
|
|
86
|
+
size: parts[3],
|
|
87
|
+
created: parts[4],
|
|
88
|
+
layers: getLayerCount(id),
|
|
89
|
+
});
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
const totalSize = images
|
|
93
|
+
.map((img) => {
|
|
94
|
+
const s = img.size.toUpperCase();
|
|
95
|
+
if (s.includes("GB")) return parseFloat(s) * 1024;
|
|
96
|
+
if (s.includes("MB")) return parseFloat(s);
|
|
97
|
+
if (s.includes("KB")) return parseFloat(s) / 1024;
|
|
98
|
+
return 0;
|
|
99
|
+
})
|
|
100
|
+
.reduce((a, b) => a + b, 0);
|
|
101
|
+
|
|
102
|
+
const totalSizeStr =
|
|
103
|
+
totalSize >= 1024
|
|
104
|
+
? `${(totalSize / 1024).toFixed(2)} GB`
|
|
105
|
+
: `${totalSize.toFixed(1)} MB`;
|
|
106
|
+
|
|
107
|
+
if (format === "json") {
|
|
108
|
+
return JSON.stringify(
|
|
109
|
+
{ count: images.length, totalSize: totalSizeStr, images },
|
|
110
|
+
null,
|
|
111
|
+
2
|
|
112
|
+
);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
let table = `Found ${images.length} image(s) | Total size: ${totalSizeStr}\n\n`;
|
|
116
|
+
for (const img of images) {
|
|
117
|
+
table += `${img.repository}:${img.tag}\n`;
|
|
118
|
+
table += ` ID: ${img.id}\n`;
|
|
119
|
+
table += ` Size: ${img.size}\n`;
|
|
120
|
+
table += ` Layers: ${img.layers}\n`;
|
|
121
|
+
table += ` Created: ${img.created}\n\n`;
|
|
122
|
+
}
|
|
123
|
+
return table.trim();
|
|
124
|
+
} catch (err: unknown) {
|
|
125
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
126
|
+
if (message.includes("not found") || message.includes("not recognized") || message.includes("ENOENT")) {
|
|
127
|
+
return "Error: Docker is not installed or not available in PATH. Please install Docker and ensure the 'docker' command is accessible.";
|
|
128
|
+
}
|
|
129
|
+
if (message.includes("Cannot connect") || message.includes("Is the docker daemon running")) {
|
|
130
|
+
return "Error: Cannot connect to the Docker daemon. Is Docker running?";
|
|
131
|
+
}
|
|
132
|
+
return `Error listing images: ${message}`;
|
|
133
|
+
}
|
|
134
|
+
}
|
package/tsconfig.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"target": "ES2022",
|
|
4
|
+
"module": "Node16",
|
|
5
|
+
"moduleResolution": "Node16",
|
|
6
|
+
"outDir": "dist",
|
|
7
|
+
"rootDir": "src",
|
|
8
|
+
"strict": true,
|
|
9
|
+
"esModuleInterop": true,
|
|
10
|
+
"skipLibCheck": true,
|
|
11
|
+
"forceConsistentCasingInFileNames": true,
|
|
12
|
+
"resolveJsonModule": true,
|
|
13
|
+
"declaration": true,
|
|
14
|
+
"declarationMap": true,
|
|
15
|
+
"sourceMap": true
|
|
16
|
+
},
|
|
17
|
+
"include": ["src/**/*"],
|
|
18
|
+
"exclude": ["node_modules", "dist"]
|
|
19
|
+
}
|