@rog0x/mcp-docker-tools 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +113 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +69 -0
- package/dist/tools/compose-analyzer.d.ts +16 -0
- package/dist/tools/compose-analyzer.js +284 -0
- package/dist/tools/container-list.d.ts +23 -0
- package/dist/tools/container-list.js +132 -0
- package/dist/tools/container-logs.d.ts +38 -0
- package/dist/tools/container-logs.js +126 -0
- package/dist/tools/dockerfile-analyzer.d.ts +21 -0
- package/dist/tools/dockerfile-analyzer.js +278 -0
- package/dist/tools/image-list.d.ts +27 -0
- package/dist/tools/image-list.js +117 -0
- package/package.json +29 -0
- package/src/index.ts +101 -0
- package/src/tools/compose-analyzer.ts +316 -0
- package/src/tools/container-list.ts +158 -0
- package/src/tools/container-logs.ts +131 -0
- package/src/tools/dockerfile-analyzer.ts +314 -0
- package/src/tools/image-list.ts +134 -0
- package/tsconfig.json +19 -0
package/README.md
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
# mcp-docker-tools
|
|
2
|
+
|
|
3
|
+
Docker management tools for AI agents, built on the [Model Context Protocol](https://modelcontextprotocol.io).
|
|
4
|
+
|
|
5
|
+
Provides five tools for inspecting containers, images, Dockerfiles, and Compose configurations — all accessible to LLMs through MCP.
|
|
6
|
+
|
|
7
|
+
## Tools
|
|
8
|
+
|
|
9
|
+
| Tool | Description |
|
|
10
|
+
|------|-------------|
|
|
11
|
+
| `docker_container_list` | List running or all containers with status, ports, image, created time, and resource usage (CPU, memory, network/block I/O) |
|
|
12
|
+
| `docker_image_list` | List Docker images with size, tags, created date, and layer count |
|
|
13
|
+
| `docker_dockerfile_analyze` | Analyze a Dockerfile for best practices: multi-stage builds, non-root user, layer caching, image size, security |
|
|
14
|
+
| `docker_compose_analyze` | Analyze docker-compose.yml: services, ports, volumes, networks, health checks, dependencies, and improvement suggestions |
|
|
15
|
+
| `docker_container_logs` | Get container logs with tail, keyword filter, time range, and timestamp support |
|
|
16
|
+
|
|
17
|
+
## Prerequisites
|
|
18
|
+
|
|
19
|
+
- **Node.js** >= 18
|
|
20
|
+
- **Docker** CLI installed and accessible in PATH
|
|
21
|
+
- Docker daemon running (for container/image tools)
|
|
22
|
+
|
|
23
|
+
## Installation
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
git clone <repo-url>
|
|
27
|
+
cd mcp-docker-tools
|
|
28
|
+
npm install
|
|
29
|
+
npm run build
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Usage with Claude Desktop
|
|
33
|
+
|
|
34
|
+
Add to your Claude Desktop configuration (`claude_desktop_config.json`):
|
|
35
|
+
|
|
36
|
+
```json
|
|
37
|
+
{
|
|
38
|
+
"mcpServers": {
|
|
39
|
+
"docker-tools": {
|
|
40
|
+
"command": "node",
|
|
41
|
+
"args": ["D:/products/mcp-servers/mcp-docker-tools/dist/index.js"]
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Usage with Claude Code
|
|
48
|
+
|
|
49
|
+
```bash
|
|
50
|
+
claude mcp add docker-tools node D:/products/mcp-servers/mcp-docker-tools/dist/index.js
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
Or add to `.claude/settings.json`:
|
|
54
|
+
|
|
55
|
+
```json
|
|
56
|
+
{
|
|
57
|
+
"mcpServers": {
|
|
58
|
+
"docker-tools": {
|
|
59
|
+
"command": "node",
|
|
60
|
+
"args": ["D:/products/mcp-servers/mcp-docker-tools/dist/index.js"]
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## Tool Examples
|
|
67
|
+
|
|
68
|
+
### List running containers
|
|
69
|
+
```json
|
|
70
|
+
{ "tool": "docker_container_list" }
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
### List all containers including stopped
|
|
74
|
+
```json
|
|
75
|
+
{ "tool": "docker_container_list", "args": { "all": true, "format": "table" } }
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
### List images filtered by name
|
|
79
|
+
```json
|
|
80
|
+
{ "tool": "docker_image_list", "args": { "filter": "node" } }
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
### Analyze a Dockerfile
|
|
84
|
+
```json
|
|
85
|
+
{
|
|
86
|
+
"tool": "docker_dockerfile_analyze",
|
|
87
|
+
"args": {
|
|
88
|
+
"content": "FROM node:20\nCOPY . .\nRUN npm install\nCMD [\"node\", \"index.js\"]"
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Analyze docker-compose.yml
|
|
94
|
+
```json
|
|
95
|
+
{
|
|
96
|
+
"tool": "docker_compose_analyze",
|
|
97
|
+
"args": {
|
|
98
|
+
"content": "services:\n web:\n image: nginx\n ports:\n - 80:80"
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
### Get container logs
|
|
104
|
+
```json
|
|
105
|
+
{
|
|
106
|
+
"tool": "docker_container_logs",
|
|
107
|
+
"args": { "container": "my-app", "tail": 50, "filter": "error" }
|
|
108
|
+
}
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
## License
|
|
112
|
+
|
|
113
|
+
MIT
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
"use strict";
|
|
3
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
|
+
const mcp_js_1 = require("@modelcontextprotocol/sdk/server/mcp.js");
|
|
5
|
+
const stdio_js_1 = require("@modelcontextprotocol/sdk/server/stdio.js");
|
|
6
|
+
const zod_1 = require("zod");
|
|
7
|
+
const container_list_js_1 = require("./tools/container-list.js");
|
|
8
|
+
const image_list_js_1 = require("./tools/image-list.js");
|
|
9
|
+
const dockerfile_analyzer_js_1 = require("./tools/dockerfile-analyzer.js");
|
|
10
|
+
const compose_analyzer_js_1 = require("./tools/compose-analyzer.js");
|
|
11
|
+
const container_logs_js_1 = require("./tools/container-logs.js");
|
|
12
|
+
const server = new mcp_js_1.McpServer({
|
|
13
|
+
name: "mcp-docker-tools",
|
|
14
|
+
version: "1.0.0",
|
|
15
|
+
});
|
|
16
|
+
// Register: docker_container_list
|
|
17
|
+
server.tool("docker_container_list", "List Docker containers with status, ports, image, created time, and resource usage. Can show only running containers or all containers including stopped ones.", {
|
|
18
|
+
all: zod_1.z.boolean().optional().default(false).describe("If true, show all containers including stopped ones. Defaults to false (running only)."),
|
|
19
|
+
format: zod_1.z.enum(["table", "json"]).optional().default("json").describe("Output format: 'table' for readable text, 'json' for structured data. Defaults to 'json'."),
|
|
20
|
+
}, async (args) => {
|
|
21
|
+
const text = await (0, container_list_js_1.handleContainerList)(args);
|
|
22
|
+
return { content: [{ type: "text", text }] };
|
|
23
|
+
});
|
|
24
|
+
// Register: docker_image_list
|
|
25
|
+
server.tool("docker_image_list", "List Docker images with size, tags, created date, and layer count. Optionally filter by repository name.", {
|
|
26
|
+
filter: zod_1.z.string().optional().describe("Filter images by repository name (partial match). Leave empty to list all."),
|
|
27
|
+
showDangling: zod_1.z.boolean().optional().default(false).describe("Include dangling (untagged) images. Defaults to false."),
|
|
28
|
+
format: zod_1.z.enum(["table", "json"]).optional().default("json").describe("Output format: 'table' or 'json'. Defaults to 'json'."),
|
|
29
|
+
}, async (args) => {
|
|
30
|
+
const text = await (0, image_list_js_1.handleImageList)(args);
|
|
31
|
+
return { content: [{ type: "text", text }] };
|
|
32
|
+
});
|
|
33
|
+
// Register: docker_dockerfile_analyze
|
|
34
|
+
server.tool("docker_dockerfile_analyze", "Analyze a Dockerfile for best practices including multi-stage builds, non-root user, .dockerignore usage, layer caching order, image size optimization, and security.", {
|
|
35
|
+
content: zod_1.z.string().describe("The full content of the Dockerfile to analyze."),
|
|
36
|
+
checkDockerignore: zod_1.z.boolean().optional().default(true).describe("If true, also checks for common .dockerignore recommendations. Defaults to true."),
|
|
37
|
+
}, async (args) => {
|
|
38
|
+
const text = await (0, dockerfile_analyzer_js_1.handleDockerfileAnalyzer)(args);
|
|
39
|
+
return { content: [{ type: "text", text }] };
|
|
40
|
+
});
|
|
41
|
+
// Register: docker_compose_analyze
|
|
42
|
+
server.tool("docker_compose_analyze", "Analyze docker-compose.yml: list services, ports, volumes, networks, health checks, dependencies. Suggest improvements.", {
|
|
43
|
+
content: zod_1.z.string().describe("The full content of the docker-compose.yml to analyze."),
|
|
44
|
+
}, async (args) => {
|
|
45
|
+
const text = await (0, compose_analyzer_js_1.handleComposeAnalyzer)(args);
|
|
46
|
+
return { content: [{ type: "text", text }] };
|
|
47
|
+
});
|
|
48
|
+
// Register: docker_container_logs
|
|
49
|
+
server.tool("docker_container_logs", "Get logs from a Docker container. Supports retrieving the last N lines, filtering by keyword, and including timestamps.", {
|
|
50
|
+
container: zod_1.z.string().describe("Container name or ID to get logs from."),
|
|
51
|
+
tail: zod_1.z.number().optional().default(100).describe("Number of lines to retrieve from the end of logs. Defaults to 100."),
|
|
52
|
+
since: zod_1.z.string().optional().describe("Show logs since a timestamp (e.g., '2024-01-01T00:00:00') or relative duration (e.g., '1h', '30m')."),
|
|
53
|
+
until: zod_1.z.string().optional().describe("Show logs until a timestamp or relative duration."),
|
|
54
|
+
filter: zod_1.z.string().optional().describe("Filter log lines to only include those containing this keyword (case-insensitive)."),
|
|
55
|
+
timestamps: zod_1.z.boolean().optional().default(true).describe("Include timestamps in log output. Defaults to true."),
|
|
56
|
+
}, async (args) => {
|
|
57
|
+
const text = await (0, container_logs_js_1.handleContainerLogs)(args);
|
|
58
|
+
return { content: [{ type: "text", text }] };
|
|
59
|
+
});
|
|
60
|
+
async function main() {
|
|
61
|
+
const transport = new stdio_js_1.StdioServerTransport();
|
|
62
|
+
await server.connect(transport);
|
|
63
|
+
console.error("mcp-docker-tools server running on stdio");
|
|
64
|
+
}
|
|
65
|
+
main().catch((err) => {
|
|
66
|
+
console.error("Fatal error:", err);
|
|
67
|
+
process.exit(1);
|
|
68
|
+
});
|
|
69
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
export declare const composeAnalyzerTool: {
|
|
2
|
+
name: string;
|
|
3
|
+
description: string;
|
|
4
|
+
inputSchema: {
|
|
5
|
+
type: "object";
|
|
6
|
+
properties: {
|
|
7
|
+
content: {
|
|
8
|
+
type: string;
|
|
9
|
+
description: string;
|
|
10
|
+
};
|
|
11
|
+
};
|
|
12
|
+
required: string[];
|
|
13
|
+
};
|
|
14
|
+
};
|
|
15
|
+
export declare function handleComposeAnalyzer(args: Record<string, unknown>): Promise<string>;
|
|
16
|
+
//# sourceMappingURL=compose-analyzer.d.ts.map
|
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.composeAnalyzerTool = void 0;
|
|
4
|
+
exports.handleComposeAnalyzer = handleComposeAnalyzer;
|
|
5
|
+
exports.composeAnalyzerTool = {
|
|
6
|
+
name: "docker_compose_analyze",
|
|
7
|
+
description: "Analyze a docker-compose.yml file: list services, ports, volumes, networks, " +
|
|
8
|
+
"health checks, dependencies, and environment variables. Suggests improvements " +
|
|
9
|
+
"for production readiness, security, and best practices.",
|
|
10
|
+
inputSchema: {
|
|
11
|
+
type: "object",
|
|
12
|
+
properties: {
|
|
13
|
+
content: {
|
|
14
|
+
type: "string",
|
|
15
|
+
description: "The full content of the docker-compose.yml to analyze.",
|
|
16
|
+
},
|
|
17
|
+
},
|
|
18
|
+
required: ["content"],
|
|
19
|
+
},
|
|
20
|
+
};
|
|
21
|
+
function parseSimpleYaml(content) {
|
|
22
|
+
const lines = content.split("\n");
|
|
23
|
+
const analysis = {
|
|
24
|
+
services: [],
|
|
25
|
+
topLevelNetworks: [],
|
|
26
|
+
topLevelVolumes: [],
|
|
27
|
+
findings: [],
|
|
28
|
+
};
|
|
29
|
+
let currentTopLevel = "";
|
|
30
|
+
let currentService = "";
|
|
31
|
+
let currentServiceKey = "";
|
|
32
|
+
let inServiceBlock = false;
|
|
33
|
+
const serviceMap = new Map();
|
|
34
|
+
for (const raw of lines) {
|
|
35
|
+
// Skip comments and empty
|
|
36
|
+
if (raw.trim().startsWith("#") || !raw.trim())
|
|
37
|
+
continue;
|
|
38
|
+
const indent = raw.length - raw.trimStart().length;
|
|
39
|
+
const trimmed = raw.trim();
|
|
40
|
+
// Top-level keys (no indent)
|
|
41
|
+
if (indent === 0 && trimmed.endsWith(":")) {
|
|
42
|
+
currentTopLevel = trimmed.replace(":", "").trim();
|
|
43
|
+
currentService = "";
|
|
44
|
+
currentServiceKey = "";
|
|
45
|
+
inServiceBlock = currentTopLevel === "services";
|
|
46
|
+
continue;
|
|
47
|
+
}
|
|
48
|
+
if (indent === 0 && trimmed.includes(":")) {
|
|
49
|
+
const key = trimmed.split(":")[0].trim();
|
|
50
|
+
const value = trimmed.substring(trimmed.indexOf(":") + 1).trim();
|
|
51
|
+
if (key === "version") {
|
|
52
|
+
analysis.version = value.replace(/['"]/g, "");
|
|
53
|
+
}
|
|
54
|
+
currentTopLevel = key;
|
|
55
|
+
inServiceBlock = key === "services";
|
|
56
|
+
currentService = "";
|
|
57
|
+
continue;
|
|
58
|
+
}
|
|
59
|
+
// Service-level (indent 2)
|
|
60
|
+
if (inServiceBlock && indent === 2 && trimmed.endsWith(":") && !trimmed.startsWith("-")) {
|
|
61
|
+
currentService = trimmed.replace(":", "").trim();
|
|
62
|
+
currentServiceKey = "";
|
|
63
|
+
if (!serviceMap.has(currentService)) {
|
|
64
|
+
serviceMap.set(currentService, {
|
|
65
|
+
name: currentService,
|
|
66
|
+
ports: [],
|
|
67
|
+
volumes: [],
|
|
68
|
+
networks: [],
|
|
69
|
+
dependsOn: [],
|
|
70
|
+
environment: [],
|
|
71
|
+
hasHealthcheck: false,
|
|
72
|
+
hasResourceLimits: false,
|
|
73
|
+
hasReadonlyRootfs: false,
|
|
74
|
+
hasSecurityOpt: false,
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
continue;
|
|
78
|
+
}
|
|
79
|
+
const svc = serviceMap.get(currentService);
|
|
80
|
+
// Service properties (indent 4)
|
|
81
|
+
if (inServiceBlock && currentService && indent >= 4) {
|
|
82
|
+
if (indent === 4 && trimmed.includes(":")) {
|
|
83
|
+
const key = trimmed.split(":")[0].trim();
|
|
84
|
+
const value = trimmed.substring(trimmed.indexOf(":") + 1).trim();
|
|
85
|
+
currentServiceKey = key;
|
|
86
|
+
if (svc) {
|
|
87
|
+
if (key === "image")
|
|
88
|
+
svc.image = value.replace(/['"]/g, "");
|
|
89
|
+
if (key === "build")
|
|
90
|
+
svc.build = value || "(context)";
|
|
91
|
+
if (key === "restart")
|
|
92
|
+
svc.restart = value.replace(/['"]/g, "");
|
|
93
|
+
if (key === "healthcheck")
|
|
94
|
+
svc.hasHealthcheck = true;
|
|
95
|
+
if (key === "read_only" && value === "true")
|
|
96
|
+
svc.hasReadonlyRootfs = true;
|
|
97
|
+
if (key === "security_opt")
|
|
98
|
+
svc.hasSecurityOpt = true;
|
|
99
|
+
if (key === "deploy")
|
|
100
|
+
svc.hasResourceLimits = true;
|
|
101
|
+
// Inline port or volume
|
|
102
|
+
if (key === "ports" && value && !value.startsWith("[")) {
|
|
103
|
+
// Not a list, skip
|
|
104
|
+
}
|
|
105
|
+
if (key === "environment" && value) {
|
|
106
|
+
// Inline map style
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
continue;
|
|
110
|
+
}
|
|
111
|
+
// List items (indent 6 with -)
|
|
112
|
+
if (trimmed.startsWith("-") && svc) {
|
|
113
|
+
const item = trimmed.substring(1).trim().replace(/['"]/g, "");
|
|
114
|
+
if (currentServiceKey === "ports")
|
|
115
|
+
svc.ports.push(item);
|
|
116
|
+
if (currentServiceKey === "volumes")
|
|
117
|
+
svc.volumes.push(item);
|
|
118
|
+
if (currentServiceKey === "networks")
|
|
119
|
+
svc.networks.push(item);
|
|
120
|
+
if (currentServiceKey === "depends_on")
|
|
121
|
+
svc.dependsOn.push(item);
|
|
122
|
+
if (currentServiceKey === "environment")
|
|
123
|
+
svc.environment.push(item);
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
// Top-level networks/volumes
|
|
127
|
+
if (currentTopLevel === "networks" && indent === 2 && trimmed.endsWith(":")) {
|
|
128
|
+
analysis.topLevelNetworks.push(trimmed.replace(":", "").trim());
|
|
129
|
+
}
|
|
130
|
+
if (currentTopLevel === "volumes" && indent === 2 && trimmed.endsWith(":")) {
|
|
131
|
+
analysis.topLevelVolumes.push(trimmed.replace(":", "").trim());
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
analysis.services = Array.from(serviceMap.values());
|
|
135
|
+
return analysis;
|
|
136
|
+
}
|
|
137
|
+
function generateFindings(analysis) {
|
|
138
|
+
const findings = analysis.findings;
|
|
139
|
+
if (analysis.version) {
|
|
140
|
+
findings.push({
|
|
141
|
+
severity: "info",
|
|
142
|
+
message: `Compose version: ${analysis.version}. Note: 'version' is obsolete in Compose V2.`,
|
|
143
|
+
suggestion: "The 'version' key can be removed when using Docker Compose V2.",
|
|
144
|
+
});
|
|
145
|
+
}
|
|
146
|
+
for (const svc of analysis.services) {
|
|
147
|
+
// Image tag
|
|
148
|
+
if (svc.image && (svc.image.endsWith(":latest") || (!svc.image.includes(":") && !svc.image.includes("@")))) {
|
|
149
|
+
findings.push({
|
|
150
|
+
severity: "warning",
|
|
151
|
+
message: `Service '${svc.name}': image '${svc.image}' uses :latest or no tag.`,
|
|
152
|
+
suggestion: "Pin to a specific version for reproducible deployments.",
|
|
153
|
+
});
|
|
154
|
+
}
|
|
155
|
+
// Restart policy
|
|
156
|
+
if (!svc.restart) {
|
|
157
|
+
findings.push({
|
|
158
|
+
severity: "warning",
|
|
159
|
+
message: `Service '${svc.name}': no restart policy defined.`,
|
|
160
|
+
suggestion: "Add 'restart: unless-stopped' or 'restart: on-failure' for production.",
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
// Health check
|
|
164
|
+
if (!svc.hasHealthcheck) {
|
|
165
|
+
findings.push({
|
|
166
|
+
severity: "info",
|
|
167
|
+
message: `Service '${svc.name}': no healthcheck defined.`,
|
|
168
|
+
suggestion: "Add a healthcheck for better orchestration and monitoring.",
|
|
169
|
+
});
|
|
170
|
+
}
|
|
171
|
+
// Resource limits
|
|
172
|
+
if (!svc.hasResourceLimits) {
|
|
173
|
+
findings.push({
|
|
174
|
+
severity: "info",
|
|
175
|
+
message: `Service '${svc.name}': no resource limits (deploy.resources) configured.`,
|
|
176
|
+
suggestion: "Set memory and CPU limits to prevent a single container from consuming all host resources.",
|
|
177
|
+
});
|
|
178
|
+
}
|
|
179
|
+
// Privileged ports
|
|
180
|
+
for (const port of svc.ports) {
|
|
181
|
+
const hostPort = port.split(":")[0];
|
|
182
|
+
const portNum = parseInt(hostPort, 10);
|
|
183
|
+
if (portNum > 0 && portNum < 1024) {
|
|
184
|
+
findings.push({
|
|
185
|
+
severity: "info",
|
|
186
|
+
message: `Service '${svc.name}': uses privileged port ${portNum}.`,
|
|
187
|
+
suggestion: "Privileged ports (<1024) may require elevated permissions.",
|
|
188
|
+
});
|
|
189
|
+
}
|
|
190
|
+
if (port.startsWith("0.0.0.0:") || (!port.includes("127.0.0.1") && port.includes(":"))) {
|
|
191
|
+
// Bound to all interfaces by default
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
// Host volumes
|
|
195
|
+
for (const vol of svc.volumes) {
|
|
196
|
+
if (vol.includes("/var/run/docker.sock")) {
|
|
197
|
+
findings.push({
|
|
198
|
+
severity: "warning",
|
|
199
|
+
message: `Service '${svc.name}': mounts Docker socket.`,
|
|
200
|
+
suggestion: "Mounting the Docker socket gives full control over the Docker daemon. Use with extreme caution.",
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
// Environment secrets
|
|
205
|
+
for (const env of svc.environment) {
|
|
206
|
+
const lower = env.toLowerCase();
|
|
207
|
+
if (lower.includes("password") || lower.includes("secret") || lower.includes("api_key") || lower.includes("token")) {
|
|
208
|
+
findings.push({
|
|
209
|
+
severity: "warning",
|
|
210
|
+
message: `Service '${svc.name}': environment variable '${env.split("=")[0]}' may contain a secret.`,
|
|
211
|
+
suggestion: "Use Docker secrets or an .env file (not committed to VCS) instead of inline values.",
|
|
212
|
+
});
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
// Dependencies without healthcheck
|
|
217
|
+
for (const svc of analysis.services) {
|
|
218
|
+
for (const dep of svc.dependsOn) {
|
|
219
|
+
const depSvc = analysis.services.find((s) => s.name === dep);
|
|
220
|
+
if (depSvc && !depSvc.hasHealthcheck) {
|
|
221
|
+
findings.push({
|
|
222
|
+
severity: "info",
|
|
223
|
+
message: `Service '${svc.name}' depends on '${dep}', but '${dep}' has no healthcheck.`,
|
|
224
|
+
suggestion: "Add a healthcheck to the dependency and use 'condition: service_healthy' for reliable startup order.",
|
|
225
|
+
});
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
async function handleComposeAnalyzer(args) {
|
|
231
|
+
const content = args.content;
|
|
232
|
+
if (!content || !content.trim()) {
|
|
233
|
+
return "Error: docker-compose.yml content is empty. Please provide the file content to analyze.";
|
|
234
|
+
}
|
|
235
|
+
const analysis = parseSimpleYaml(content);
|
|
236
|
+
generateFindings(analysis);
|
|
237
|
+
const sections = [];
|
|
238
|
+
sections.push("# Docker Compose Analysis Report\n");
|
|
239
|
+
// Services overview
|
|
240
|
+
sections.push(`## Services (${analysis.services.length})\n`);
|
|
241
|
+
for (const svc of analysis.services) {
|
|
242
|
+
sections.push(`### ${svc.name}`);
|
|
243
|
+
if (svc.image)
|
|
244
|
+
sections.push(` Image: ${svc.image}`);
|
|
245
|
+
if (svc.build)
|
|
246
|
+
sections.push(` Build: ${svc.build}`);
|
|
247
|
+
if (svc.ports.length > 0)
|
|
248
|
+
sections.push(` Ports: ${svc.ports.join(", ")}`);
|
|
249
|
+
if (svc.volumes.length > 0)
|
|
250
|
+
sections.push(` Volumes: ${svc.volumes.join(", ")}`);
|
|
251
|
+
if (svc.networks.length > 0)
|
|
252
|
+
sections.push(` Networks: ${svc.networks.join(", ")}`);
|
|
253
|
+
if (svc.dependsOn.length > 0)
|
|
254
|
+
sections.push(` Depends on: ${svc.dependsOn.join(", ")}`);
|
|
255
|
+
if (svc.environment.length > 0)
|
|
256
|
+
sections.push(` Environment: ${svc.environment.length} variable(s)`);
|
|
257
|
+
sections.push(` Restart: ${svc.restart || "not set"}`);
|
|
258
|
+
sections.push(` Healthcheck: ${svc.hasHealthcheck ? "Yes" : "No"}`);
|
|
259
|
+
sections.push("");
|
|
260
|
+
}
|
|
261
|
+
// Networks & Volumes
|
|
262
|
+
if (analysis.topLevelNetworks.length > 0) {
|
|
263
|
+
sections.push(`## Networks: ${analysis.topLevelNetworks.join(", ")}\n`);
|
|
264
|
+
}
|
|
265
|
+
if (analysis.topLevelVolumes.length > 0) {
|
|
266
|
+
sections.push(`## Volumes: ${analysis.topLevelVolumes.join(", ")}\n`);
|
|
267
|
+
}
|
|
268
|
+
// Findings
|
|
269
|
+
const warnings = analysis.findings.filter((f) => f.severity === "warning");
|
|
270
|
+
const infos = analysis.findings.filter((f) => f.severity === "info");
|
|
271
|
+
sections.push(`## Findings (${warnings.length} warnings, ${infos.length} info)\n`);
|
|
272
|
+
for (const f of warnings) {
|
|
273
|
+
sections.push(`[WARN] ${f.message}`);
|
|
274
|
+
if (f.suggestion)
|
|
275
|
+
sections.push(` -> ${f.suggestion}`);
|
|
276
|
+
}
|
|
277
|
+
for (const f of infos) {
|
|
278
|
+
sections.push(`[INFO] ${f.message}`);
|
|
279
|
+
if (f.suggestion)
|
|
280
|
+
sections.push(` -> ${f.suggestion}`);
|
|
281
|
+
}
|
|
282
|
+
return sections.join("\n");
|
|
283
|
+
}
|
|
284
|
+
//# sourceMappingURL=compose-analyzer.js.map
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
export declare const containerListTool: {
|
|
2
|
+
name: string;
|
|
3
|
+
description: string;
|
|
4
|
+
inputSchema: {
|
|
5
|
+
type: "object";
|
|
6
|
+
properties: {
|
|
7
|
+
all: {
|
|
8
|
+
type: string;
|
|
9
|
+
description: string;
|
|
10
|
+
default: boolean;
|
|
11
|
+
};
|
|
12
|
+
format: {
|
|
13
|
+
type: string;
|
|
14
|
+
enum: string[];
|
|
15
|
+
description: string;
|
|
16
|
+
default: string;
|
|
17
|
+
};
|
|
18
|
+
};
|
|
19
|
+
required: never[];
|
|
20
|
+
};
|
|
21
|
+
};
|
|
22
|
+
export declare function handleContainerList(args: Record<string, unknown>): Promise<string>;
|
|
23
|
+
//# sourceMappingURL=container-list.d.ts.map
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.containerListTool = void 0;
|
|
4
|
+
exports.handleContainerList = handleContainerList;
|
|
5
|
+
const node_child_process_1 = require("node:child_process");
|
|
6
|
+
exports.containerListTool = {
|
|
7
|
+
name: "docker_container_list",
|
|
8
|
+
description: "List Docker containers with status, ports, image, created time, and resource usage. " +
|
|
9
|
+
"Can show only running containers or all containers including stopped ones.",
|
|
10
|
+
inputSchema: {
|
|
11
|
+
type: "object",
|
|
12
|
+
properties: {
|
|
13
|
+
all: {
|
|
14
|
+
type: "boolean",
|
|
15
|
+
description: "If true, show all containers including stopped ones. Defaults to false (running only).",
|
|
16
|
+
default: false,
|
|
17
|
+
},
|
|
18
|
+
format: {
|
|
19
|
+
type: "string",
|
|
20
|
+
enum: ["table", "json"],
|
|
21
|
+
description: "Output format: 'table' for readable text, 'json' for structured data. Defaults to 'json'.",
|
|
22
|
+
default: "json",
|
|
23
|
+
},
|
|
24
|
+
},
|
|
25
|
+
required: [],
|
|
26
|
+
},
|
|
27
|
+
};
|
|
28
|
+
function getContainerStats(containerIds) {
|
|
29
|
+
const statsMap = new Map();
|
|
30
|
+
if (containerIds.length === 0)
|
|
31
|
+
return statsMap;
|
|
32
|
+
try {
|
|
33
|
+
const raw = (0, node_child_process_1.execSync)(`docker stats --no-stream --format "{{.ID}}|||{{.CPUPerc}}|||{{.MemUsage}}|||{{.MemPerc}}|||{{.NetIO}}|||{{.BlockIO}}"`, { encoding: "utf-8", timeout: 30000 }).trim();
|
|
34
|
+
for (const line of raw.split("\n")) {
|
|
35
|
+
if (!line.trim())
|
|
36
|
+
continue;
|
|
37
|
+
const parts = line.split("|||");
|
|
38
|
+
if (parts.length >= 6) {
|
|
39
|
+
const memParts = parts[2].split(" / ");
|
|
40
|
+
statsMap.set(parts[0], {
|
|
41
|
+
cpu: parts[1],
|
|
42
|
+
memory: memParts[0]?.trim() ?? "N/A",
|
|
43
|
+
memoryLimit: memParts[1]?.trim() ?? "N/A",
|
|
44
|
+
memoryPercent: parts[3],
|
|
45
|
+
networkIO: parts[4],
|
|
46
|
+
blockIO: parts[5],
|
|
47
|
+
});
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
catch {
|
|
52
|
+
// Stats unavailable for stopped containers — that's fine
|
|
53
|
+
}
|
|
54
|
+
return statsMap;
|
|
55
|
+
}
|
|
56
|
+
async function handleContainerList(args) {
|
|
57
|
+
const showAll = args.all === true;
|
|
58
|
+
const format = args.format || "json";
|
|
59
|
+
try {
|
|
60
|
+
const allFlag = showAll ? " -a" : "";
|
|
61
|
+
const raw = (0, node_child_process_1.execSync)(`docker ps${allFlag} --format "{{.ID}}|||{{.Names}}|||{{.Image}}|||{{.Status}}|||{{.State}}|||{{.Ports}}|||{{.CreatedAt}}" --no-trunc`, { encoding: "utf-8", timeout: 15000 }).trim();
|
|
62
|
+
if (!raw) {
|
|
63
|
+
return showAll
|
|
64
|
+
? "No containers found."
|
|
65
|
+
: "No running containers. Use { \"all\": true } to see stopped containers.";
|
|
66
|
+
}
|
|
67
|
+
const lines = raw.split("\n").filter((l) => l.trim());
|
|
68
|
+
const containerIds = [];
|
|
69
|
+
const containers = [];
|
|
70
|
+
for (const line of lines) {
|
|
71
|
+
const parts = line.split("|||");
|
|
72
|
+
if (parts.length < 7)
|
|
73
|
+
continue;
|
|
74
|
+
const id = parts[0].substring(0, 12);
|
|
75
|
+
containerIds.push(id);
|
|
76
|
+
containers.push({
|
|
77
|
+
id,
|
|
78
|
+
name: parts[1],
|
|
79
|
+
image: parts[2],
|
|
80
|
+
status: parts[3],
|
|
81
|
+
state: parts[4],
|
|
82
|
+
ports: parts[5] || "none",
|
|
83
|
+
created: parts[6],
|
|
84
|
+
cpu: "N/A",
|
|
85
|
+
memory: "N/A",
|
|
86
|
+
memoryLimit: "N/A",
|
|
87
|
+
memoryPercent: "N/A",
|
|
88
|
+
networkIO: "N/A",
|
|
89
|
+
blockIO: "N/A",
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
const stats = getContainerStats(containerIds);
|
|
93
|
+
for (const c of containers) {
|
|
94
|
+
const s = stats.get(c.id);
|
|
95
|
+
if (s) {
|
|
96
|
+
c.cpu = s.cpu;
|
|
97
|
+
c.memory = s.memory;
|
|
98
|
+
c.memoryLimit = s.memoryLimit;
|
|
99
|
+
c.memoryPercent = s.memoryPercent;
|
|
100
|
+
c.networkIO = s.networkIO;
|
|
101
|
+
c.blockIO = s.blockIO;
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
if (format === "json") {
|
|
105
|
+
return JSON.stringify({ count: containers.length, containers }, null, 2);
|
|
106
|
+
}
|
|
107
|
+
let table = `Found ${containers.length} container(s):\n\n`;
|
|
108
|
+
for (const c of containers) {
|
|
109
|
+
table += `--- ${c.name} (${c.id}) ---\n`;
|
|
110
|
+
table += ` Image: ${c.image}\n`;
|
|
111
|
+
table += ` State: ${c.state} | ${c.status}\n`;
|
|
112
|
+
table += ` Ports: ${c.ports}\n`;
|
|
113
|
+
table += ` Created: ${c.created}\n`;
|
|
114
|
+
table += ` CPU: ${c.cpu}\n`;
|
|
115
|
+
table += ` Memory: ${c.memory} / ${c.memoryLimit} (${c.memoryPercent})\n`;
|
|
116
|
+
table += ` Net I/O: ${c.networkIO}\n`;
|
|
117
|
+
table += ` Block IO: ${c.blockIO}\n\n`;
|
|
118
|
+
}
|
|
119
|
+
return table.trim();
|
|
120
|
+
}
|
|
121
|
+
catch (err) {
|
|
122
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
123
|
+
if (message.includes("not found") || message.includes("not recognized") || message.includes("ENOENT")) {
|
|
124
|
+
return "Error: Docker is not installed or not available in PATH. Please install Docker and ensure the 'docker' command is accessible.";
|
|
125
|
+
}
|
|
126
|
+
if (message.includes("Cannot connect") || message.includes("permission denied") || message.includes("Is the docker daemon running")) {
|
|
127
|
+
return "Error: Cannot connect to the Docker daemon. Is Docker running? You may also need appropriate permissions.";
|
|
128
|
+
}
|
|
129
|
+
return `Error listing containers: ${message}`;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
//# sourceMappingURL=container-list.js.map
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
export declare const containerLogsTool: {
|
|
2
|
+
name: string;
|
|
3
|
+
description: string;
|
|
4
|
+
inputSchema: {
|
|
5
|
+
type: "object";
|
|
6
|
+
properties: {
|
|
7
|
+
container: {
|
|
8
|
+
type: string;
|
|
9
|
+
description: string;
|
|
10
|
+
};
|
|
11
|
+
tail: {
|
|
12
|
+
type: string;
|
|
13
|
+
description: string;
|
|
14
|
+
default: number;
|
|
15
|
+
};
|
|
16
|
+
since: {
|
|
17
|
+
type: string;
|
|
18
|
+
description: string;
|
|
19
|
+
};
|
|
20
|
+
until: {
|
|
21
|
+
type: string;
|
|
22
|
+
description: string;
|
|
23
|
+
};
|
|
24
|
+
filter: {
|
|
25
|
+
type: string;
|
|
26
|
+
description: string;
|
|
27
|
+
};
|
|
28
|
+
timestamps: {
|
|
29
|
+
type: string;
|
|
30
|
+
description: string;
|
|
31
|
+
default: boolean;
|
|
32
|
+
};
|
|
33
|
+
};
|
|
34
|
+
required: string[];
|
|
35
|
+
};
|
|
36
|
+
};
|
|
37
|
+
export declare function handleContainerLogs(args: Record<string, unknown>): Promise<string>;
|
|
38
|
+
//# sourceMappingURL=container-logs.d.ts.map
|