@rog0x/mcp-docker-tools 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +113 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +69 -0
- package/dist/tools/compose-analyzer.d.ts +16 -0
- package/dist/tools/compose-analyzer.js +284 -0
- package/dist/tools/container-list.d.ts +23 -0
- package/dist/tools/container-list.js +132 -0
- package/dist/tools/container-logs.d.ts +38 -0
- package/dist/tools/container-logs.js +126 -0
- package/dist/tools/dockerfile-analyzer.d.ts +21 -0
- package/dist/tools/dockerfile-analyzer.js +278 -0
- package/dist/tools/image-list.d.ts +27 -0
- package/dist/tools/image-list.js +117 -0
- package/package.json +29 -0
- package/src/index.ts +101 -0
- package/src/tools/compose-analyzer.ts +316 -0
- package/src/tools/container-list.ts +158 -0
- package/src/tools/container-logs.ts +131 -0
- package/src/tools/dockerfile-analyzer.ts +314 -0
- package/src/tools/image-list.ts +134 -0
- package/tsconfig.json +19 -0
package/src/index.ts
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
4
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
5
|
+
import { z } from "zod";
|
|
6
|
+
|
|
7
|
+
import { handleContainerList } from "./tools/container-list.js";
|
|
8
|
+
import { handleImageList } from "./tools/image-list.js";
|
|
9
|
+
import { handleDockerfileAnalyzer } from "./tools/dockerfile-analyzer.js";
|
|
10
|
+
import { handleComposeAnalyzer } from "./tools/compose-analyzer.js";
|
|
11
|
+
import { handleContainerLogs } from "./tools/container-logs.js";
|
|
12
|
+
|
|
13
|
+
const server = new McpServer({
|
|
14
|
+
name: "mcp-docker-tools",
|
|
15
|
+
version: "1.0.0",
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
// Register: docker_container_list
|
|
19
|
+
server.tool(
|
|
20
|
+
"docker_container_list",
|
|
21
|
+
"List Docker containers with status, ports, image, created time, and resource usage. Can show only running containers or all containers including stopped ones.",
|
|
22
|
+
{
|
|
23
|
+
all: z.boolean().optional().default(false).describe("If true, show all containers including stopped ones. Defaults to false (running only)."),
|
|
24
|
+
format: z.enum(["table", "json"]).optional().default("json").describe("Output format: 'table' for readable text, 'json' for structured data. Defaults to 'json'."),
|
|
25
|
+
},
|
|
26
|
+
async (args) => {
|
|
27
|
+
const text = await handleContainerList(args);
|
|
28
|
+
return { content: [{ type: "text" as const, text }] };
|
|
29
|
+
}
|
|
30
|
+
);
|
|
31
|
+
|
|
32
|
+
// Register: docker_image_list
|
|
33
|
+
server.tool(
|
|
34
|
+
"docker_image_list",
|
|
35
|
+
"List Docker images with size, tags, created date, and layer count. Optionally filter by repository name.",
|
|
36
|
+
{
|
|
37
|
+
filter: z.string().optional().describe("Filter images by repository name (partial match). Leave empty to list all."),
|
|
38
|
+
showDangling: z.boolean().optional().default(false).describe("Include dangling (untagged) images. Defaults to false."),
|
|
39
|
+
format: z.enum(["table", "json"]).optional().default("json").describe("Output format: 'table' or 'json'. Defaults to 'json'."),
|
|
40
|
+
},
|
|
41
|
+
async (args) => {
|
|
42
|
+
const text = await handleImageList(args);
|
|
43
|
+
return { content: [{ type: "text" as const, text }] };
|
|
44
|
+
}
|
|
45
|
+
);
|
|
46
|
+
|
|
47
|
+
// Register: docker_dockerfile_analyze
|
|
48
|
+
server.tool(
|
|
49
|
+
"docker_dockerfile_analyze",
|
|
50
|
+
"Analyze a Dockerfile for best practices including multi-stage builds, non-root user, .dockerignore usage, layer caching order, image size optimization, and security.",
|
|
51
|
+
{
|
|
52
|
+
content: z.string().describe("The full content of the Dockerfile to analyze."),
|
|
53
|
+
checkDockerignore: z.boolean().optional().default(true).describe("If true, also checks for common .dockerignore recommendations. Defaults to true."),
|
|
54
|
+
},
|
|
55
|
+
async (args) => {
|
|
56
|
+
const text = await handleDockerfileAnalyzer(args);
|
|
57
|
+
return { content: [{ type: "text" as const, text }] };
|
|
58
|
+
}
|
|
59
|
+
);
|
|
60
|
+
|
|
61
|
+
// Register: docker_compose_analyze
|
|
62
|
+
server.tool(
|
|
63
|
+
"docker_compose_analyze",
|
|
64
|
+
"Analyze docker-compose.yml: list services, ports, volumes, networks, health checks, dependencies. Suggest improvements.",
|
|
65
|
+
{
|
|
66
|
+
content: z.string().describe("The full content of the docker-compose.yml to analyze."),
|
|
67
|
+
},
|
|
68
|
+
async (args) => {
|
|
69
|
+
const text = await handleComposeAnalyzer(args);
|
|
70
|
+
return { content: [{ type: "text" as const, text }] };
|
|
71
|
+
}
|
|
72
|
+
);
|
|
73
|
+
|
|
74
|
+
// Register: docker_container_logs
|
|
75
|
+
server.tool(
|
|
76
|
+
"docker_container_logs",
|
|
77
|
+
"Get logs from a Docker container. Supports retrieving the last N lines, filtering by keyword, and including timestamps.",
|
|
78
|
+
{
|
|
79
|
+
container: z.string().describe("Container name or ID to get logs from."),
|
|
80
|
+
tail: z.number().optional().default(100).describe("Number of lines to retrieve from the end of logs. Defaults to 100."),
|
|
81
|
+
since: z.string().optional().describe("Show logs since a timestamp (e.g., '2024-01-01T00:00:00') or relative duration (e.g., '1h', '30m')."),
|
|
82
|
+
until: z.string().optional().describe("Show logs until a timestamp or relative duration."),
|
|
83
|
+
filter: z.string().optional().describe("Filter log lines to only include those containing this keyword (case-insensitive)."),
|
|
84
|
+
timestamps: z.boolean().optional().default(true).describe("Include timestamps in log output. Defaults to true."),
|
|
85
|
+
},
|
|
86
|
+
async (args) => {
|
|
87
|
+
const text = await handleContainerLogs(args);
|
|
88
|
+
return { content: [{ type: "text" as const, text }] };
|
|
89
|
+
}
|
|
90
|
+
);
|
|
91
|
+
|
|
92
|
+
async function main(): Promise<void> {
|
|
93
|
+
const transport = new StdioServerTransport();
|
|
94
|
+
await server.connect(transport);
|
|
95
|
+
console.error("mcp-docker-tools server running on stdio");
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
main().catch((err) => {
|
|
99
|
+
console.error("Fatal error:", err);
|
|
100
|
+
process.exit(1);
|
|
101
|
+
});
|
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
export const composeAnalyzerTool = {
|
|
2
|
+
name: "docker_compose_analyze",
|
|
3
|
+
description:
|
|
4
|
+
"Analyze a docker-compose.yml file: list services, ports, volumes, networks, " +
|
|
5
|
+
"health checks, dependencies, and environment variables. Suggests improvements " +
|
|
6
|
+
"for production readiness, security, and best practices.",
|
|
7
|
+
inputSchema: {
|
|
8
|
+
type: "object" as const,
|
|
9
|
+
properties: {
|
|
10
|
+
content: {
|
|
11
|
+
type: "string",
|
|
12
|
+
description: "The full content of the docker-compose.yml to analyze.",
|
|
13
|
+
},
|
|
14
|
+
},
|
|
15
|
+
required: ["content"],
|
|
16
|
+
},
|
|
17
|
+
};
|
|
18
|
+
|
|
19
|
+
interface ServiceInfo {
|
|
20
|
+
name: string;
|
|
21
|
+
image?: string;
|
|
22
|
+
build?: string;
|
|
23
|
+
ports: string[];
|
|
24
|
+
volumes: string[];
|
|
25
|
+
networks: string[];
|
|
26
|
+
dependsOn: string[];
|
|
27
|
+
environment: string[];
|
|
28
|
+
hasHealthcheck: boolean;
|
|
29
|
+
restart?: string;
|
|
30
|
+
hasResourceLimits: boolean;
|
|
31
|
+
hasReadonlyRootfs: boolean;
|
|
32
|
+
hasSecurityOpt: boolean;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
interface ComposeAnalysis {
|
|
36
|
+
version?: string;
|
|
37
|
+
services: ServiceInfo[];
|
|
38
|
+
topLevelNetworks: string[];
|
|
39
|
+
topLevelVolumes: string[];
|
|
40
|
+
findings: { severity: string; message: string; suggestion?: string }[];
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
function parseSimpleYaml(content: string): ComposeAnalysis {
|
|
44
|
+
const lines = content.split("\n");
|
|
45
|
+
const analysis: ComposeAnalysis = {
|
|
46
|
+
services: [],
|
|
47
|
+
topLevelNetworks: [],
|
|
48
|
+
topLevelVolumes: [],
|
|
49
|
+
findings: [],
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
let currentTopLevel = "";
|
|
53
|
+
let currentService = "";
|
|
54
|
+
let currentServiceKey = "";
|
|
55
|
+
let inServiceBlock = false;
|
|
56
|
+
const serviceMap = new Map<string, ServiceInfo>();
|
|
57
|
+
|
|
58
|
+
for (const raw of lines) {
|
|
59
|
+
// Skip comments and empty
|
|
60
|
+
if (raw.trim().startsWith("#") || !raw.trim()) continue;
|
|
61
|
+
|
|
62
|
+
const indent = raw.length - raw.trimStart().length;
|
|
63
|
+
const trimmed = raw.trim();
|
|
64
|
+
|
|
65
|
+
// Top-level keys (no indent)
|
|
66
|
+
if (indent === 0 && trimmed.endsWith(":")) {
|
|
67
|
+
currentTopLevel = trimmed.replace(":", "").trim();
|
|
68
|
+
currentService = "";
|
|
69
|
+
currentServiceKey = "";
|
|
70
|
+
inServiceBlock = currentTopLevel === "services";
|
|
71
|
+
continue;
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
if (indent === 0 && trimmed.includes(":")) {
|
|
75
|
+
const key = trimmed.split(":")[0].trim();
|
|
76
|
+
const value = trimmed.substring(trimmed.indexOf(":") + 1).trim();
|
|
77
|
+
if (key === "version") {
|
|
78
|
+
analysis.version = value.replace(/['"]/g, "");
|
|
79
|
+
}
|
|
80
|
+
currentTopLevel = key;
|
|
81
|
+
inServiceBlock = key === "services";
|
|
82
|
+
currentService = "";
|
|
83
|
+
continue;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// Service-level (indent 2)
|
|
87
|
+
if (inServiceBlock && indent === 2 && trimmed.endsWith(":") && !trimmed.startsWith("-")) {
|
|
88
|
+
currentService = trimmed.replace(":", "").trim();
|
|
89
|
+
currentServiceKey = "";
|
|
90
|
+
if (!serviceMap.has(currentService)) {
|
|
91
|
+
serviceMap.set(currentService, {
|
|
92
|
+
name: currentService,
|
|
93
|
+
ports: [],
|
|
94
|
+
volumes: [],
|
|
95
|
+
networks: [],
|
|
96
|
+
dependsOn: [],
|
|
97
|
+
environment: [],
|
|
98
|
+
hasHealthcheck: false,
|
|
99
|
+
hasResourceLimits: false,
|
|
100
|
+
hasReadonlyRootfs: false,
|
|
101
|
+
hasSecurityOpt: false,
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
continue;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
const svc = serviceMap.get(currentService);
|
|
108
|
+
|
|
109
|
+
// Service properties (indent 4)
|
|
110
|
+
if (inServiceBlock && currentService && indent >= 4) {
|
|
111
|
+
if (indent === 4 && trimmed.includes(":")) {
|
|
112
|
+
const key = trimmed.split(":")[0].trim();
|
|
113
|
+
const value = trimmed.substring(trimmed.indexOf(":") + 1).trim();
|
|
114
|
+
currentServiceKey = key;
|
|
115
|
+
|
|
116
|
+
if (svc) {
|
|
117
|
+
if (key === "image") svc.image = value.replace(/['"]/g, "");
|
|
118
|
+
if (key === "build") svc.build = value || "(context)";
|
|
119
|
+
if (key === "restart") svc.restart = value.replace(/['"]/g, "");
|
|
120
|
+
if (key === "healthcheck") svc.hasHealthcheck = true;
|
|
121
|
+
if (key === "read_only" && value === "true") svc.hasReadonlyRootfs = true;
|
|
122
|
+
if (key === "security_opt") svc.hasSecurityOpt = true;
|
|
123
|
+
if (key === "deploy") svc.hasResourceLimits = true;
|
|
124
|
+
|
|
125
|
+
// Inline port or volume
|
|
126
|
+
if (key === "ports" && value && !value.startsWith("[")) {
|
|
127
|
+
// Not a list, skip
|
|
128
|
+
}
|
|
129
|
+
if (key === "environment" && value) {
|
|
130
|
+
// Inline map style
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
continue;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// List items (indent 6 with -)
|
|
137
|
+
if (trimmed.startsWith("-") && svc) {
|
|
138
|
+
const item = trimmed.substring(1).trim().replace(/['"]/g, "");
|
|
139
|
+
if (currentServiceKey === "ports") svc.ports.push(item);
|
|
140
|
+
if (currentServiceKey === "volumes") svc.volumes.push(item);
|
|
141
|
+
if (currentServiceKey === "networks") svc.networks.push(item);
|
|
142
|
+
if (currentServiceKey === "depends_on") svc.dependsOn.push(item);
|
|
143
|
+
if (currentServiceKey === "environment") svc.environment.push(item);
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// Top-level networks/volumes
|
|
148
|
+
if (currentTopLevel === "networks" && indent === 2 && trimmed.endsWith(":")) {
|
|
149
|
+
analysis.topLevelNetworks.push(trimmed.replace(":", "").trim());
|
|
150
|
+
}
|
|
151
|
+
if (currentTopLevel === "volumes" && indent === 2 && trimmed.endsWith(":")) {
|
|
152
|
+
analysis.topLevelVolumes.push(trimmed.replace(":", "").trim());
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
analysis.services = Array.from(serviceMap.values());
|
|
157
|
+
return analysis;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
function generateFindings(analysis: ComposeAnalysis): void {
|
|
161
|
+
const findings = analysis.findings;
|
|
162
|
+
|
|
163
|
+
if (analysis.version) {
|
|
164
|
+
findings.push({
|
|
165
|
+
severity: "info",
|
|
166
|
+
message: `Compose version: ${analysis.version}. Note: 'version' is obsolete in Compose V2.`,
|
|
167
|
+
suggestion: "The 'version' key can be removed when using Docker Compose V2.",
|
|
168
|
+
});
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
for (const svc of analysis.services) {
|
|
172
|
+
// Image tag
|
|
173
|
+
if (svc.image && (svc.image.endsWith(":latest") || (!svc.image.includes(":") && !svc.image.includes("@")))) {
|
|
174
|
+
findings.push({
|
|
175
|
+
severity: "warning",
|
|
176
|
+
message: `Service '${svc.name}': image '${svc.image}' uses :latest or no tag.`,
|
|
177
|
+
suggestion: "Pin to a specific version for reproducible deployments.",
|
|
178
|
+
});
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
// Restart policy
|
|
182
|
+
if (!svc.restart) {
|
|
183
|
+
findings.push({
|
|
184
|
+
severity: "warning",
|
|
185
|
+
message: `Service '${svc.name}': no restart policy defined.`,
|
|
186
|
+
suggestion: "Add 'restart: unless-stopped' or 'restart: on-failure' for production.",
|
|
187
|
+
});
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
// Health check
|
|
191
|
+
if (!svc.hasHealthcheck) {
|
|
192
|
+
findings.push({
|
|
193
|
+
severity: "info",
|
|
194
|
+
message: `Service '${svc.name}': no healthcheck defined.`,
|
|
195
|
+
suggestion: "Add a healthcheck for better orchestration and monitoring.",
|
|
196
|
+
});
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
// Resource limits
|
|
200
|
+
if (!svc.hasResourceLimits) {
|
|
201
|
+
findings.push({
|
|
202
|
+
severity: "info",
|
|
203
|
+
message: `Service '${svc.name}': no resource limits (deploy.resources) configured.`,
|
|
204
|
+
suggestion: "Set memory and CPU limits to prevent a single container from consuming all host resources.",
|
|
205
|
+
});
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// Privileged ports
|
|
209
|
+
for (const port of svc.ports) {
|
|
210
|
+
const hostPort = port.split(":")[0];
|
|
211
|
+
const portNum = parseInt(hostPort, 10);
|
|
212
|
+
if (portNum > 0 && portNum < 1024) {
|
|
213
|
+
findings.push({
|
|
214
|
+
severity: "info",
|
|
215
|
+
message: `Service '${svc.name}': uses privileged port ${portNum}.`,
|
|
216
|
+
suggestion: "Privileged ports (<1024) may require elevated permissions.",
|
|
217
|
+
});
|
|
218
|
+
}
|
|
219
|
+
if (port.startsWith("0.0.0.0:") || (!port.includes("127.0.0.1") && port.includes(":"))) {
|
|
220
|
+
// Bound to all interfaces by default
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
// Host volumes
|
|
225
|
+
for (const vol of svc.volumes) {
|
|
226
|
+
if (vol.includes("/var/run/docker.sock")) {
|
|
227
|
+
findings.push({
|
|
228
|
+
severity: "warning",
|
|
229
|
+
message: `Service '${svc.name}': mounts Docker socket.`,
|
|
230
|
+
suggestion: "Mounting the Docker socket gives full control over the Docker daemon. Use with extreme caution.",
|
|
231
|
+
});
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
// Environment secrets
|
|
236
|
+
for (const env of svc.environment) {
|
|
237
|
+
const lower = env.toLowerCase();
|
|
238
|
+
if (lower.includes("password") || lower.includes("secret") || lower.includes("api_key") || lower.includes("token")) {
|
|
239
|
+
findings.push({
|
|
240
|
+
severity: "warning",
|
|
241
|
+
message: `Service '${svc.name}': environment variable '${env.split("=")[0]}' may contain a secret.`,
|
|
242
|
+
suggestion: "Use Docker secrets or an .env file (not committed to VCS) instead of inline values.",
|
|
243
|
+
});
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
// Dependencies without healthcheck
|
|
249
|
+
for (const svc of analysis.services) {
|
|
250
|
+
for (const dep of svc.dependsOn) {
|
|
251
|
+
const depSvc = analysis.services.find((s) => s.name === dep);
|
|
252
|
+
if (depSvc && !depSvc.hasHealthcheck) {
|
|
253
|
+
findings.push({
|
|
254
|
+
severity: "info",
|
|
255
|
+
message: `Service '${svc.name}' depends on '${dep}', but '${dep}' has no healthcheck.`,
|
|
256
|
+
suggestion: "Add a healthcheck to the dependency and use 'condition: service_healthy' for reliable startup order.",
|
|
257
|
+
});
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
export async function handleComposeAnalyzer(args: Record<string, unknown>): Promise<string> {
|
|
264
|
+
const content = args.content as string;
|
|
265
|
+
|
|
266
|
+
if (!content || !content.trim()) {
|
|
267
|
+
return "Error: docker-compose.yml content is empty. Please provide the file content to analyze.";
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
const analysis = parseSimpleYaml(content);
|
|
271
|
+
generateFindings(analysis);
|
|
272
|
+
|
|
273
|
+
const sections: string[] = [];
|
|
274
|
+
|
|
275
|
+
sections.push("# Docker Compose Analysis Report\n");
|
|
276
|
+
|
|
277
|
+
// Services overview
|
|
278
|
+
sections.push(`## Services (${analysis.services.length})\n`);
|
|
279
|
+
for (const svc of analysis.services) {
|
|
280
|
+
sections.push(`### ${svc.name}`);
|
|
281
|
+
if (svc.image) sections.push(` Image: ${svc.image}`);
|
|
282
|
+
if (svc.build) sections.push(` Build: ${svc.build}`);
|
|
283
|
+
if (svc.ports.length > 0) sections.push(` Ports: ${svc.ports.join(", ")}`);
|
|
284
|
+
if (svc.volumes.length > 0) sections.push(` Volumes: ${svc.volumes.join(", ")}`);
|
|
285
|
+
if (svc.networks.length > 0) sections.push(` Networks: ${svc.networks.join(", ")}`);
|
|
286
|
+
if (svc.dependsOn.length > 0) sections.push(` Depends on: ${svc.dependsOn.join(", ")}`);
|
|
287
|
+
if (svc.environment.length > 0) sections.push(` Environment: ${svc.environment.length} variable(s)`);
|
|
288
|
+
sections.push(` Restart: ${svc.restart || "not set"}`);
|
|
289
|
+
sections.push(` Healthcheck: ${svc.hasHealthcheck ? "Yes" : "No"}`);
|
|
290
|
+
sections.push("");
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
// Networks & Volumes
|
|
294
|
+
if (analysis.topLevelNetworks.length > 0) {
|
|
295
|
+
sections.push(`## Networks: ${analysis.topLevelNetworks.join(", ")}\n`);
|
|
296
|
+
}
|
|
297
|
+
if (analysis.topLevelVolumes.length > 0) {
|
|
298
|
+
sections.push(`## Volumes: ${analysis.topLevelVolumes.join(", ")}\n`);
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
// Findings
|
|
302
|
+
const warnings = analysis.findings.filter((f) => f.severity === "warning");
|
|
303
|
+
const infos = analysis.findings.filter((f) => f.severity === "info");
|
|
304
|
+
|
|
305
|
+
sections.push(`## Findings (${warnings.length} warnings, ${infos.length} info)\n`);
|
|
306
|
+
for (const f of warnings) {
|
|
307
|
+
sections.push(`[WARN] ${f.message}`);
|
|
308
|
+
if (f.suggestion) sections.push(` -> ${f.suggestion}`);
|
|
309
|
+
}
|
|
310
|
+
for (const f of infos) {
|
|
311
|
+
sections.push(`[INFO] ${f.message}`);
|
|
312
|
+
if (f.suggestion) sections.push(` -> ${f.suggestion}`);
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
return sections.join("\n");
|
|
316
|
+
}
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import { execSync } from "node:child_process";
|
|
2
|
+
|
|
3
|
+
export const containerListTool = {
|
|
4
|
+
name: "docker_container_list",
|
|
5
|
+
description:
|
|
6
|
+
"List Docker containers with status, ports, image, created time, and resource usage. " +
|
|
7
|
+
"Can show only running containers or all containers including stopped ones.",
|
|
8
|
+
inputSchema: {
|
|
9
|
+
type: "object" as const,
|
|
10
|
+
properties: {
|
|
11
|
+
all: {
|
|
12
|
+
type: "boolean",
|
|
13
|
+
description:
|
|
14
|
+
"If true, show all containers including stopped ones. Defaults to false (running only).",
|
|
15
|
+
default: false,
|
|
16
|
+
},
|
|
17
|
+
format: {
|
|
18
|
+
type: "string",
|
|
19
|
+
enum: ["table", "json"],
|
|
20
|
+
description: "Output format: 'table' for readable text, 'json' for structured data. Defaults to 'json'.",
|
|
21
|
+
default: "json",
|
|
22
|
+
},
|
|
23
|
+
},
|
|
24
|
+
required: [],
|
|
25
|
+
},
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
interface ContainerInfo {
|
|
29
|
+
id: string;
|
|
30
|
+
name: string;
|
|
31
|
+
image: string;
|
|
32
|
+
status: string;
|
|
33
|
+
state: string;
|
|
34
|
+
ports: string;
|
|
35
|
+
created: string;
|
|
36
|
+
cpu: string;
|
|
37
|
+
memory: string;
|
|
38
|
+
memoryLimit: string;
|
|
39
|
+
memoryPercent: string;
|
|
40
|
+
networkIO: string;
|
|
41
|
+
blockIO: string;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
function getContainerStats(containerIds: string[]): Map<string, { cpu: string; memory: string; memoryLimit: string; memoryPercent: string; networkIO: string; blockIO: string }> {
|
|
45
|
+
const statsMap = new Map<string, { cpu: string; memory: string; memoryLimit: string; memoryPercent: string; networkIO: string; blockIO: string }>();
|
|
46
|
+
if (containerIds.length === 0) return statsMap;
|
|
47
|
+
|
|
48
|
+
try {
|
|
49
|
+
const raw = execSync(
|
|
50
|
+
`docker stats --no-stream --format "{{.ID}}|||{{.CPUPerc}}|||{{.MemUsage}}|||{{.MemPerc}}|||{{.NetIO}}|||{{.BlockIO}}"`,
|
|
51
|
+
{ encoding: "utf-8", timeout: 30000 }
|
|
52
|
+
).trim();
|
|
53
|
+
|
|
54
|
+
for (const line of raw.split("\n")) {
|
|
55
|
+
if (!line.trim()) continue;
|
|
56
|
+
const parts = line.split("|||");
|
|
57
|
+
if (parts.length >= 6) {
|
|
58
|
+
const memParts = parts[2].split(" / ");
|
|
59
|
+
statsMap.set(parts[0], {
|
|
60
|
+
cpu: parts[1],
|
|
61
|
+
memory: memParts[0]?.trim() ?? "N/A",
|
|
62
|
+
memoryLimit: memParts[1]?.trim() ?? "N/A",
|
|
63
|
+
memoryPercent: parts[3],
|
|
64
|
+
networkIO: parts[4],
|
|
65
|
+
blockIO: parts[5],
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
} catch {
|
|
70
|
+
// Stats unavailable for stopped containers — that's fine
|
|
71
|
+
}
|
|
72
|
+
return statsMap;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
export async function handleContainerList(args: Record<string, unknown>): Promise<string> {
|
|
76
|
+
const showAll = args.all === true;
|
|
77
|
+
const format = (args.format as string) || "json";
|
|
78
|
+
|
|
79
|
+
try {
|
|
80
|
+
const allFlag = showAll ? " -a" : "";
|
|
81
|
+
const raw = execSync(
|
|
82
|
+
`docker ps${allFlag} --format "{{.ID}}|||{{.Names}}|||{{.Image}}|||{{.Status}}|||{{.State}}|||{{.Ports}}|||{{.CreatedAt}}" --no-trunc`,
|
|
83
|
+
{ encoding: "utf-8", timeout: 15000 }
|
|
84
|
+
).trim();
|
|
85
|
+
|
|
86
|
+
if (!raw) {
|
|
87
|
+
return showAll
|
|
88
|
+
? "No containers found."
|
|
89
|
+
: "No running containers. Use { \"all\": true } to see stopped containers.";
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
const lines = raw.split("\n").filter((l) => l.trim());
|
|
93
|
+
const containerIds: string[] = [];
|
|
94
|
+
const containers: ContainerInfo[] = [];
|
|
95
|
+
|
|
96
|
+
for (const line of lines) {
|
|
97
|
+
const parts = line.split("|||");
|
|
98
|
+
if (parts.length < 7) continue;
|
|
99
|
+
const id = parts[0].substring(0, 12);
|
|
100
|
+
containerIds.push(id);
|
|
101
|
+
containers.push({
|
|
102
|
+
id,
|
|
103
|
+
name: parts[1],
|
|
104
|
+
image: parts[2],
|
|
105
|
+
status: parts[3],
|
|
106
|
+
state: parts[4],
|
|
107
|
+
ports: parts[5] || "none",
|
|
108
|
+
created: parts[6],
|
|
109
|
+
cpu: "N/A",
|
|
110
|
+
memory: "N/A",
|
|
111
|
+
memoryLimit: "N/A",
|
|
112
|
+
memoryPercent: "N/A",
|
|
113
|
+
networkIO: "N/A",
|
|
114
|
+
blockIO: "N/A",
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
const stats = getContainerStats(containerIds);
|
|
119
|
+
for (const c of containers) {
|
|
120
|
+
const s = stats.get(c.id);
|
|
121
|
+
if (s) {
|
|
122
|
+
c.cpu = s.cpu;
|
|
123
|
+
c.memory = s.memory;
|
|
124
|
+
c.memoryLimit = s.memoryLimit;
|
|
125
|
+
c.memoryPercent = s.memoryPercent;
|
|
126
|
+
c.networkIO = s.networkIO;
|
|
127
|
+
c.blockIO = s.blockIO;
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
if (format === "json") {
|
|
132
|
+
return JSON.stringify({ count: containers.length, containers }, null, 2);
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
let table = `Found ${containers.length} container(s):\n\n`;
|
|
136
|
+
for (const c of containers) {
|
|
137
|
+
table += `--- ${c.name} (${c.id}) ---\n`;
|
|
138
|
+
table += ` Image: ${c.image}\n`;
|
|
139
|
+
table += ` State: ${c.state} | ${c.status}\n`;
|
|
140
|
+
table += ` Ports: ${c.ports}\n`;
|
|
141
|
+
table += ` Created: ${c.created}\n`;
|
|
142
|
+
table += ` CPU: ${c.cpu}\n`;
|
|
143
|
+
table += ` Memory: ${c.memory} / ${c.memoryLimit} (${c.memoryPercent})\n`;
|
|
144
|
+
table += ` Net I/O: ${c.networkIO}\n`;
|
|
145
|
+
table += ` Block IO: ${c.blockIO}\n\n`;
|
|
146
|
+
}
|
|
147
|
+
return table.trim();
|
|
148
|
+
} catch (err: unknown) {
|
|
149
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
150
|
+
if (message.includes("not found") || message.includes("not recognized") || message.includes("ENOENT")) {
|
|
151
|
+
return "Error: Docker is not installed or not available in PATH. Please install Docker and ensure the 'docker' command is accessible.";
|
|
152
|
+
}
|
|
153
|
+
if (message.includes("Cannot connect") || message.includes("permission denied") || message.includes("Is the docker daemon running")) {
|
|
154
|
+
return "Error: Cannot connect to the Docker daemon. Is Docker running? You may also need appropriate permissions.";
|
|
155
|
+
}
|
|
156
|
+
return `Error listing containers: ${message}`;
|
|
157
|
+
}
|
|
158
|
+
}
|