configsentry 0.0.13 → 0.0.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -5
- package/dist/cli.js +0 -0
- package/package.json +3 -3
- package/dist/baseline.test.js +0 -12
- package/dist/rules.test.js +0 -53
- package/src/baseline.test.ts +0 -15
- package/src/baseline.ts +0 -68
- package/src/cli.ts +0 -123
- package/src/compose.ts +0 -19
- package/src/rules.test.ts +0 -63
- package/src/rules.ts +0 -242
- package/src/sarif.ts +0 -74
- package/src/scan.ts +0 -47
- package/src/types.ts +0 -16
package/README.md
CHANGED
|
@@ -99,7 +99,7 @@ jobs:
|
|
|
99
99
|
runs-on: ubuntu-latest
|
|
100
100
|
steps:
|
|
101
101
|
- uses: actions/checkout@v4
|
|
102
|
-
- uses: alfredMorgenstern/configsentry@v0.0.
|
|
102
|
+
- uses: alfredMorgenstern/configsentry@v0.0.17
|
|
103
103
|
with:
|
|
104
104
|
target: .
|
|
105
105
|
# optional: baseline: .configsentry-baseline.json
|
|
@@ -111,7 +111,7 @@ jobs:
|
|
|
111
111
|
# security-events: write
|
|
112
112
|
```
|
|
113
113
|
|
|
114
|
-
> Tip: pin to a tag (like `v0.0.
|
|
114
|
+
> Tip: pin to a tag (like `v0.0.13`) for reproducible builds.
|
|
115
115
|
|
|
116
116
|
## Exit codes
|
|
117
117
|
- `0` no findings
|
|
@@ -125,7 +125,7 @@ node dist/cli.js ./example.docker-compose.yml
|
|
|
125
125
|
```
|
|
126
126
|
|
|
127
127
|
## Next steps
|
|
128
|
-
-
|
|
129
|
-
-
|
|
130
|
-
-
|
|
128
|
+
- GitHub Marketplace listing (Action)
|
|
129
|
+
- more rules (policy packs for common stacks)
|
|
130
|
+
- PR annotations/comments (optional)
|
|
131
131
|
- autofix mode (`--fix`) for safe transforms
|
package/dist/cli.js
CHANGED
|
File without changes
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "configsentry",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.17",
|
|
4
4
|
"description": "Developer-first guardrails for docker-compose.yml (security + ops footguns).",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"license": "MIT",
|
|
@@ -18,7 +18,7 @@
|
|
|
18
18
|
},
|
|
19
19
|
"files": [
|
|
20
20
|
"dist/",
|
|
21
|
-
"
|
|
21
|
+
"!dist/**/*.test.js",
|
|
22
22
|
"README.md",
|
|
23
23
|
"LICENSE"
|
|
24
24
|
],
|
|
@@ -27,7 +27,7 @@
|
|
|
27
27
|
},
|
|
28
28
|
"scripts": {
|
|
29
29
|
"test": "node --test dist/**/*.test.js",
|
|
30
|
-
"build": "tsc -p tsconfig.json",
|
|
30
|
+
"build": "node -e \"require('node:fs').rmSync('dist',{recursive:true,force:true})\" && tsc -p tsconfig.json",
|
|
31
31
|
"prepack": "npm run build",
|
|
32
32
|
"start": "node dist/cli.js",
|
|
33
33
|
"dev": "node --loader ts-node/esm src/cli.ts",
|
package/dist/baseline.test.js
DELETED
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
import test from 'node:test';
|
|
2
|
-
import assert from 'node:assert/strict';
|
|
3
|
-
import { fingerprintFinding, applyBaseline } from './baseline.js';
|
|
4
|
-
test('baseline suppression works', () => {
|
|
5
|
-
const f1 = { id: 'r1', title: 't', severity: 'low', message: 'm', service: 'svc', path: '/tmp/x#p' };
|
|
6
|
-
const f2 = { id: 'r2', title: 't', severity: 'low', message: 'm', service: 'svc', path: '/tmp/x#p2' };
|
|
7
|
-
const set = new Set([fingerprintFinding(f1)]);
|
|
8
|
-
const { kept, suppressed } = applyBaseline([f1, f2], set);
|
|
9
|
-
assert.equal(kept.length, 1);
|
|
10
|
-
assert.equal(suppressed.length, 1);
|
|
11
|
-
assert.equal(kept[0].id, 'r2');
|
|
12
|
-
});
|
package/dist/rules.test.js
DELETED
|
@@ -1,53 +0,0 @@
|
|
|
1
|
-
import test from 'node:test';
|
|
2
|
-
import assert from 'node:assert/strict';
|
|
3
|
-
import { runRules } from './rules.js';
|
|
4
|
-
test('detects privileged container', () => {
|
|
5
|
-
const compose = { services: { app: { privileged: true } } };
|
|
6
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
7
|
-
assert.ok(findings.some((f) => f.id === 'compose.privileged' && f.service === 'app'));
|
|
8
|
-
});
|
|
9
|
-
test('detects sensitive port exposed', () => {
|
|
10
|
-
const compose = { services: { db: { ports: ['5432:5432'] } } };
|
|
11
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
12
|
-
assert.ok(findings.some((f) => f.id === 'compose.exposed-sensitive-port' && f.service === 'db'));
|
|
13
|
-
});
|
|
14
|
-
test('detects docker socket mount', () => {
|
|
15
|
-
const compose = { services: { runner: { volumes: ['/var/run/docker.sock:/var/run/docker.sock'] } } };
|
|
16
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
17
|
-
assert.ok(findings.some((f) => f.id === 'compose.docker-socket' && f.service === 'runner'));
|
|
18
|
-
});
|
|
19
|
-
test('detects cap_add: ALL', () => {
|
|
20
|
-
const compose = { services: { app: { cap_add: ['ALL'] } } };
|
|
21
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
22
|
-
assert.ok(findings.some((f) => f.id === 'compose.cap-add-all' && f.service === 'app'));
|
|
23
|
-
});
|
|
24
|
-
test('detects network_mode: host', () => {
|
|
25
|
-
const compose = { services: { app: { network_mode: 'host' } } };
|
|
26
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
27
|
-
assert.ok(findings.some((f) => f.id === 'compose.network-host' && f.service === 'app'));
|
|
28
|
-
});
|
|
29
|
-
test('detects pid: host', () => {
|
|
30
|
-
const compose = { services: { app: { pid: 'host' } } };
|
|
31
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
32
|
-
assert.ok(findings.some((f) => f.id === 'compose.pid-host' && f.service === 'app'));
|
|
33
|
-
});
|
|
34
|
-
test('detects ipc: host', () => {
|
|
35
|
-
const compose = { services: { app: { ipc: 'host' } } };
|
|
36
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
37
|
-
assert.ok(findings.some((f) => f.id === 'compose.ipc-host' && f.service === 'app'));
|
|
38
|
-
});
|
|
39
|
-
test('detects unconfined security_opt', () => {
|
|
40
|
-
const compose = { services: { app: { security_opt: ['seccomp=unconfined'] } } };
|
|
41
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
42
|
-
assert.ok(findings.some((f) => f.id === 'compose.security-unconfined' && f.service === 'app'));
|
|
43
|
-
});
|
|
44
|
-
test('detects host /dev mount', () => {
|
|
45
|
-
const compose = { services: { app: { volumes: ['/dev:/dev'] } } };
|
|
46
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
47
|
-
assert.ok(findings.some((f) => f.id === 'compose.host-dev-mount' && f.service === 'app'));
|
|
48
|
-
});
|
|
49
|
-
test('detects dangerous device mapping', () => {
|
|
50
|
-
const compose = { services: { app: { devices: ['/dev/kmsg:/dev/kmsg'] } } };
|
|
51
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
52
|
-
assert.ok(findings.some((f) => f.id === 'compose.dangerous-device' && f.service === 'app'));
|
|
53
|
-
});
|
package/src/baseline.test.ts
DELETED
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
import test from 'node:test';
|
|
2
|
-
import assert from 'node:assert/strict';
|
|
3
|
-
import { fingerprintFinding, applyBaseline } from './baseline.js';
|
|
4
|
-
|
|
5
|
-
test('baseline suppression works', () => {
|
|
6
|
-
const f1 = { id: 'r1', title: 't', severity: 'low', message: 'm', service: 'svc', path: '/tmp/x#p' };
|
|
7
|
-
const f2 = { id: 'r2', title: 't', severity: 'low', message: 'm', service: 'svc', path: '/tmp/x#p2' };
|
|
8
|
-
|
|
9
|
-
const set = new Set([fingerprintFinding(f1 as any)]);
|
|
10
|
-
const { kept, suppressed } = applyBaseline([f1 as any, f2 as any], set);
|
|
11
|
-
|
|
12
|
-
assert.equal(kept.length, 1);
|
|
13
|
-
assert.equal(suppressed.length, 1);
|
|
14
|
-
assert.equal(kept[0].id, 'r2');
|
|
15
|
-
});
|
package/src/baseline.ts
DELETED
|
@@ -1,68 +0,0 @@
|
|
|
1
|
-
import fs from 'node:fs/promises';
|
|
2
|
-
import crypto from 'node:crypto';
|
|
3
|
-
import type { Finding } from './types.js';
|
|
4
|
-
|
|
5
|
-
export type BaselineEntry = {
|
|
6
|
-
fingerprint: string;
|
|
7
|
-
id: string;
|
|
8
|
-
service?: string;
|
|
9
|
-
path?: string;
|
|
10
|
-
};
|
|
11
|
-
|
|
12
|
-
export type BaselineFile = {
|
|
13
|
-
version: 1;
|
|
14
|
-
generatedAt: string;
|
|
15
|
-
tool: string;
|
|
16
|
-
entries: BaselineEntry[];
|
|
17
|
-
};
|
|
18
|
-
|
|
19
|
-
export function fingerprintFinding(f: Finding): string {
|
|
20
|
-
const h = crypto.createHash('sha256');
|
|
21
|
-
// Keep stable + minimal; avoid messages that could change wording.
|
|
22
|
-
h.update(String(f.id));
|
|
23
|
-
h.update('\n');
|
|
24
|
-
h.update(String(f.service ?? ''));
|
|
25
|
-
h.update('\n');
|
|
26
|
-
h.update(String(f.path ?? ''));
|
|
27
|
-
return h.digest('hex');
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
export async function writeBaseline(path: string, findings: Finding[]) {
|
|
31
|
-
const entries: BaselineEntry[] = findings.map((f) => ({
|
|
32
|
-
fingerprint: fingerprintFinding(f),
|
|
33
|
-
id: f.id,
|
|
34
|
-
service: f.service,
|
|
35
|
-
path: f.path,
|
|
36
|
-
}));
|
|
37
|
-
|
|
38
|
-
const file: BaselineFile = {
|
|
39
|
-
version: 1,
|
|
40
|
-
generatedAt: new Date().toISOString(),
|
|
41
|
-
tool: 'ConfigSentry',
|
|
42
|
-
entries,
|
|
43
|
-
};
|
|
44
|
-
|
|
45
|
-
await fs.writeFile(path, JSON.stringify(file, null, 2) + '\n', 'utf8');
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
export async function loadBaseline(path: string): Promise<Set<string>> {
|
|
49
|
-
const raw = await fs.readFile(path, 'utf8');
|
|
50
|
-
const json = JSON.parse(raw);
|
|
51
|
-
const entries: any[] = Array.isArray(json?.entries) ? json.entries : [];
|
|
52
|
-
const set = new Set<string>();
|
|
53
|
-
for (const e of entries) {
|
|
54
|
-
if (typeof e?.fingerprint === 'string') set.add(e.fingerprint);
|
|
55
|
-
}
|
|
56
|
-
return set;
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
export function applyBaseline(findings: Finding[], baselineFingerprints: Set<string>) {
|
|
60
|
-
const kept: Finding[] = [];
|
|
61
|
-
const suppressed: Finding[] = [];
|
|
62
|
-
for (const f of findings) {
|
|
63
|
-
const fp = fingerprintFinding(f);
|
|
64
|
-
if (baselineFingerprints.has(fp)) suppressed.push(f);
|
|
65
|
-
else kept.push(f);
|
|
66
|
-
}
|
|
67
|
-
return { kept, suppressed };
|
|
68
|
-
}
|
package/src/cli.ts
DELETED
|
@@ -1,123 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
import path from 'node:path';
|
|
3
|
-
import process from 'node:process';
|
|
4
|
-
import fs from 'node:fs/promises';
|
|
5
|
-
import { fileURLToPath } from 'node:url';
|
|
6
|
-
import { loadCompose } from './compose.js';
|
|
7
|
-
import { runRules } from './rules.js';
|
|
8
|
-
import { findingsToSarif } from './sarif.js';
|
|
9
|
-
import { resolveTargets } from './scan.js';
|
|
10
|
-
import { applyBaseline, loadBaseline, writeBaseline } from './baseline.js';
|
|
11
|
-
|
|
12
|
-
function usage() {
|
|
13
|
-
console.log(`ConfigSentry (MVP)\n\nUsage:\n configsentry <file-or-dir> [--json|--sarif] [--baseline <file>] [--write-baseline <file>]\n\nOutput:\n --json machine-readable findings\n --sarif SARIF 2.1.0 (for GitHub code scanning)\n\nBaselines:\n --baseline <file> suppress findings present in a baseline file\n --write-baseline <file> write baseline file for current findings and exit 0\n\nExit codes:\n 0 = no findings (after baseline suppression)
|
|
14
|
-
2 = findings present
|
|
15
|
-
1 = error
|
|
16
|
-
`);
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
async function main() {
|
|
20
|
-
const args = process.argv.slice(2);
|
|
21
|
-
|
|
22
|
-
if (args.includes('-v') || args.includes('--version')) {
|
|
23
|
-
try {
|
|
24
|
-
const here = path.dirname(fileURLToPath(import.meta.url));
|
|
25
|
-
const pkgPath = path.resolve(here, '../package.json');
|
|
26
|
-
const raw = await fs.readFile(pkgPath, 'utf8');
|
|
27
|
-
const pkg = JSON.parse(raw);
|
|
28
|
-
console.log(pkg.version || 'unknown');
|
|
29
|
-
} catch {
|
|
30
|
-
console.log('unknown');
|
|
31
|
-
}
|
|
32
|
-
process.exit(0);
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
if (args.length === 0 || args.includes('-h') || args.includes('--help')) {
|
|
36
|
-
usage();
|
|
37
|
-
process.exit(0);
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
const json = args.includes('--json');
|
|
41
|
-
const sarif = args.includes('--sarif');
|
|
42
|
-
if (json && sarif) {
|
|
43
|
-
console.error('Error: choose only one output mode: --json or --sarif');
|
|
44
|
-
process.exit(1);
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
const baselineIdx = args.indexOf('--baseline');
|
|
48
|
-
const baselinePath = baselineIdx >= 0 ? args[baselineIdx + 1] : undefined;
|
|
49
|
-
const writeBaselineIdx = args.indexOf('--write-baseline');
|
|
50
|
-
const writeBaselinePath = writeBaselineIdx >= 0 ? args[writeBaselineIdx + 1] : undefined;
|
|
51
|
-
|
|
52
|
-
const target = args.find((a) => !a.startsWith('-'));
|
|
53
|
-
if (!target) {
|
|
54
|
-
usage();
|
|
55
|
-
process.exit(1);
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
const targetPaths = await resolveTargets(target);
|
|
59
|
-
if (targetPaths.length === 0) {
|
|
60
|
-
console.error(`No compose files found in: ${target}`);
|
|
61
|
-
process.exit(1);
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
let allFindings = [] as any[];
|
|
65
|
-
for (const targetPath of targetPaths) {
|
|
66
|
-
const { compose } = await loadCompose(targetPath);
|
|
67
|
-
allFindings = allFindings.concat(runRules(compose, targetPath));
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
// Baseline suppression
|
|
71
|
-
let suppressed: any[] = [];
|
|
72
|
-
let findings = allFindings;
|
|
73
|
-
if (baselinePath) {
|
|
74
|
-
const set = await loadBaseline(path.resolve(baselinePath));
|
|
75
|
-
const res = applyBaseline(allFindings, set);
|
|
76
|
-
findings = res.kept;
|
|
77
|
-
suppressed = res.suppressed;
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
// Baseline generation mode
|
|
81
|
-
if (writeBaselinePath) {
|
|
82
|
-
await writeBaseline(path.resolve(writeBaselinePath), allFindings);
|
|
83
|
-
console.log(`Wrote baseline: ${path.resolve(writeBaselinePath)} (${allFindings.length} finding(s))`);
|
|
84
|
-
process.exit(0);
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
if (json) {
|
|
88
|
-
console.log(JSON.stringify({ targetPaths, findings, suppressedCount: suppressed.length }, null, 2));
|
|
89
|
-
} else if (sarif) {
|
|
90
|
-
console.log(JSON.stringify(findingsToSarif(findings), null, 2));
|
|
91
|
-
} else {
|
|
92
|
-
const scope = targetPaths.length === 1 ? targetPaths[0] : `${targetPaths.length} file(s)`;
|
|
93
|
-
|
|
94
|
-
if (findings.length === 0) {
|
|
95
|
-
console.log(`✅ No findings for ${scope}`);
|
|
96
|
-
if (suppressed.length > 0) {
|
|
97
|
-
console.log(`(suppressed by baseline: ${suppressed.length})`);
|
|
98
|
-
}
|
|
99
|
-
} else {
|
|
100
|
-
console.log(`❌ ${findings.length} finding(s) for ${scope}`);
|
|
101
|
-
if (suppressed.length > 0) {
|
|
102
|
-
console.log(`(suppressed by baseline: ${suppressed.length})`);
|
|
103
|
-
}
|
|
104
|
-
console.log('');
|
|
105
|
-
for (const f of findings) {
|
|
106
|
-
console.log(`[${f.severity.toUpperCase()}] ${f.title}`);
|
|
107
|
-
console.log(`- service: ${f.service ?? '-'}
|
|
108
|
-
- rule: ${f.id}
|
|
109
|
-
- where: ${f.path ?? '-'}
|
|
110
|
-
- msg: ${f.message}`);
|
|
111
|
-
if (f.suggestion) console.log(`- fix: ${f.suggestion}`);
|
|
112
|
-
console.log('');
|
|
113
|
-
}
|
|
114
|
-
}
|
|
115
|
-
}
|
|
116
|
-
|
|
117
|
-
process.exit(findings.length === 0 ? 0 : 2);
|
|
118
|
-
}
|
|
119
|
-
|
|
120
|
-
main().catch((err) => {
|
|
121
|
-
console.error('Error:', err);
|
|
122
|
-
process.exit(1);
|
|
123
|
-
});
|
package/src/compose.ts
DELETED
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
import fs from 'node:fs/promises';
|
|
2
|
-
import path from 'node:path';
|
|
3
|
-
import YAML from 'yaml';
|
|
4
|
-
|
|
5
|
-
export type ComposeFile = {
|
|
6
|
-
services?: Record<string, any>;
|
|
7
|
-
};
|
|
8
|
-
|
|
9
|
-
export async function loadCompose(filePath: string): Promise<{ compose: ComposeFile; raw: any }> {
|
|
10
|
-
const abs = path.resolve(filePath);
|
|
11
|
-
const text = await fs.readFile(abs, 'utf8');
|
|
12
|
-
|
|
13
|
-
// Support multi-document YAML (---). If multiple docs exist, Compose content is typically the first.
|
|
14
|
-
const docs = YAML.parseAllDocuments(text);
|
|
15
|
-
const first = docs[0];
|
|
16
|
-
const doc = first ? first.toJSON() : YAML.parse(text);
|
|
17
|
-
|
|
18
|
-
return { compose: doc as ComposeFile, raw: doc };
|
|
19
|
-
}
|
package/src/rules.test.ts
DELETED
|
@@ -1,63 +0,0 @@
|
|
|
1
|
-
import test from 'node:test';
|
|
2
|
-
import assert from 'node:assert/strict';
|
|
3
|
-
import { runRules } from './rules.js';
|
|
4
|
-
|
|
5
|
-
test('detects privileged container', () => {
|
|
6
|
-
const compose = { services: { app: { privileged: true } } };
|
|
7
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
8
|
-
assert.ok(findings.some((f) => f.id === 'compose.privileged' && f.service === 'app'));
|
|
9
|
-
});
|
|
10
|
-
|
|
11
|
-
test('detects sensitive port exposed', () => {
|
|
12
|
-
const compose = { services: { db: { ports: ['5432:5432'] } } };
|
|
13
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
14
|
-
assert.ok(findings.some((f) => f.id === 'compose.exposed-sensitive-port' && f.service === 'db'));
|
|
15
|
-
});
|
|
16
|
-
|
|
17
|
-
test('detects docker socket mount', () => {
|
|
18
|
-
const compose = { services: { runner: { volumes: ['/var/run/docker.sock:/var/run/docker.sock'] } } };
|
|
19
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
20
|
-
assert.ok(findings.some((f) => f.id === 'compose.docker-socket' && f.service === 'runner'));
|
|
21
|
-
});
|
|
22
|
-
|
|
23
|
-
test('detects cap_add: ALL', () => {
|
|
24
|
-
const compose = { services: { app: { cap_add: ['ALL'] } } };
|
|
25
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
26
|
-
assert.ok(findings.some((f) => f.id === 'compose.cap-add-all' && f.service === 'app'));
|
|
27
|
-
});
|
|
28
|
-
|
|
29
|
-
test('detects network_mode: host', () => {
|
|
30
|
-
const compose = { services: { app: { network_mode: 'host' } } };
|
|
31
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
32
|
-
assert.ok(findings.some((f) => f.id === 'compose.network-host' && f.service === 'app'));
|
|
33
|
-
});
|
|
34
|
-
|
|
35
|
-
test('detects pid: host', () => {
|
|
36
|
-
const compose = { services: { app: { pid: 'host' } } };
|
|
37
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
38
|
-
assert.ok(findings.some((f) => f.id === 'compose.pid-host' && f.service === 'app'));
|
|
39
|
-
});
|
|
40
|
-
|
|
41
|
-
test('detects ipc: host', () => {
|
|
42
|
-
const compose = { services: { app: { ipc: 'host' } } };
|
|
43
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
44
|
-
assert.ok(findings.some((f) => f.id === 'compose.ipc-host' && f.service === 'app'));
|
|
45
|
-
});
|
|
46
|
-
|
|
47
|
-
test('detects unconfined security_opt', () => {
|
|
48
|
-
const compose = { services: { app: { security_opt: ['seccomp=unconfined'] } } };
|
|
49
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
50
|
-
assert.ok(findings.some((f) => f.id === 'compose.security-unconfined' && f.service === 'app'));
|
|
51
|
-
});
|
|
52
|
-
|
|
53
|
-
test('detects host /dev mount', () => {
|
|
54
|
-
const compose = { services: { app: { volumes: ['/dev:/dev'] } } };
|
|
55
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
56
|
-
assert.ok(findings.some((f) => f.id === 'compose.host-dev-mount' && f.service === 'app'));
|
|
57
|
-
});
|
|
58
|
-
|
|
59
|
-
test('detects dangerous device mapping', () => {
|
|
60
|
-
const compose = { services: { app: { devices: ['/dev/kmsg:/dev/kmsg'] } } };
|
|
61
|
-
const findings = runRules(compose, 'docker-compose.yml');
|
|
62
|
-
assert.ok(findings.some((f) => f.id === 'compose.dangerous-device' && f.service === 'app'));
|
|
63
|
-
});
|
package/src/rules.ts
DELETED
|
@@ -1,242 +0,0 @@
|
|
|
1
|
-
import type { Finding } from './types.js';
|
|
2
|
-
|
|
3
|
-
const SENSITIVE_PORTS = new Set([5432, 3306, 6379, 27017, 9200]);
|
|
4
|
-
|
|
5
|
-
function normalizePorts(ports: any): Array<{ hostIp?: string; hostPort?: number; containerPort?: number; raw: string }> {
|
|
6
|
-
if (!Array.isArray(ports)) return [];
|
|
7
|
-
const res: Array<{ hostIp?: string; hostPort?: number; containerPort?: number; raw: string }> = [];
|
|
8
|
-
for (const p of ports) {
|
|
9
|
-
if (typeof p === 'number') {
|
|
10
|
-
res.push({ containerPort: p, raw: String(p) });
|
|
11
|
-
continue;
|
|
12
|
-
}
|
|
13
|
-
if (typeof p !== 'string') continue;
|
|
14
|
-
// patterns:
|
|
15
|
-
// "8080:80"
|
|
16
|
-
// "127.0.0.1:8080:80"
|
|
17
|
-
// "5432:5432"
|
|
18
|
-
const parts = p.split(':');
|
|
19
|
-
if (parts.length === 2) {
|
|
20
|
-
const hostPort = Number(parts[0]);
|
|
21
|
-
const containerPort = Number(parts[1]);
|
|
22
|
-
res.push({ hostPort: Number.isFinite(hostPort) ? hostPort : undefined, containerPort: Number.isFinite(containerPort) ? containerPort : undefined, raw: p });
|
|
23
|
-
} else if (parts.length === 3) {
|
|
24
|
-
const hostIp = parts[0];
|
|
25
|
-
const hostPort = Number(parts[1]);
|
|
26
|
-
const containerPort = Number(parts[2]);
|
|
27
|
-
res.push({ hostIp, hostPort: Number.isFinite(hostPort) ? hostPort : undefined, containerPort: Number.isFinite(containerPort) ? containerPort : undefined, raw: p });
|
|
28
|
-
} else {
|
|
29
|
-
res.push({ raw: p });
|
|
30
|
-
}
|
|
31
|
-
}
|
|
32
|
-
return res;
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
export function runRules(compose: any, targetPath: string): Finding[] {
|
|
36
|
-
const findings: Finding[] = [];
|
|
37
|
-
const services: Record<string, any> = compose?.services ?? {};
|
|
38
|
-
|
|
39
|
-
for (const [serviceName, svc] of Object.entries(services)) {
|
|
40
|
-
// Rule: privileged
|
|
41
|
-
if (svc?.privileged === true) {
|
|
42
|
-
findings.push({
|
|
43
|
-
id: 'compose.privileged',
|
|
44
|
-
title: 'Privileged container',
|
|
45
|
-
severity: 'high',
|
|
46
|
-
message: `Service '${serviceName}' runs with privileged: true.`,
|
|
47
|
-
service: serviceName,
|
|
48
|
-
path: `${targetPath}#services.${serviceName}.privileged`,
|
|
49
|
-
suggestion: 'Remove privileged: true unless absolutely required; prefer adding only the needed capabilities.'
|
|
50
|
-
});
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
// Rule: cap_add: [ALL]
|
|
54
|
-
const capAdd: any[] = Array.isArray(svc?.cap_add) ? svc.cap_add : [];
|
|
55
|
-
if (capAdd.some((c) => String(c).toUpperCase() === 'ALL')) {
|
|
56
|
-
findings.push({
|
|
57
|
-
id: 'compose.cap-add-all',
|
|
58
|
-
title: 'Dangerous Linux capabilities (cap_add: ALL)',
|
|
59
|
-
severity: 'high',
|
|
60
|
-
message: `Service '${serviceName}' uses cap_add: [ALL], which is effectively privileged in many cases.`,
|
|
61
|
-
service: serviceName,
|
|
62
|
-
path: `${targetPath}#services.${serviceName}.cap_add`,
|
|
63
|
-
suggestion: 'Remove cap_add: ALL. Add only the specific capabilities required (e.g. NET_BIND_SERVICE) or redesign to avoid it.'
|
|
64
|
-
});
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
// Rule: host namespaces (network/pid/ipc)
|
|
68
|
-
if (svc?.network_mode === 'host') {
|
|
69
|
-
findings.push({
|
|
70
|
-
id: 'compose.network-host',
|
|
71
|
-
title: 'Host network namespace (network_mode: host)',
|
|
72
|
-
severity: 'high',
|
|
73
|
-
message: `Service '${serviceName}' uses network_mode: host, bypassing Docker network isolation.`,
|
|
74
|
-
service: serviceName,
|
|
75
|
-
path: `${targetPath}#services.${serviceName}.network_mode`,
|
|
76
|
-
suggestion: 'Avoid host networking. Prefer explicit port mappings or internal networks.'
|
|
77
|
-
});
|
|
78
|
-
}
|
|
79
|
-
if (svc?.pid === 'host') {
|
|
80
|
-
findings.push({
|
|
81
|
-
id: 'compose.pid-host',
|
|
82
|
-
title: 'Host PID namespace (pid: host)',
|
|
83
|
-
severity: 'high',
|
|
84
|
-
message: `Service '${serviceName}' uses pid: host, exposing host process namespace to the container.`,
|
|
85
|
-
service: serviceName,
|
|
86
|
-
path: `${targetPath}#services.${serviceName}.pid`,
|
|
87
|
-
suggestion: 'Avoid pid: host unless you are building low-level host tooling and understand the security implications.'
|
|
88
|
-
});
|
|
89
|
-
}
|
|
90
|
-
if (svc?.ipc === 'host') {
|
|
91
|
-
findings.push({
|
|
92
|
-
id: 'compose.ipc-host',
|
|
93
|
-
title: 'Host IPC namespace (ipc: host)',
|
|
94
|
-
severity: 'high',
|
|
95
|
-
message: `Service '${serviceName}' uses ipc: host, exposing host IPC namespace to the container.`,
|
|
96
|
-
service: serviceName,
|
|
97
|
-
path: `${targetPath}#services.${serviceName}.ipc`,
|
|
98
|
-
suggestion: 'Avoid ipc: host. Prefer explicit shared volumes or redesign if IPC sharing is required.'
|
|
99
|
-
});
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
// Rule: unconfined security profiles
|
|
103
|
-
const securityOpt: any[] = Array.isArray(svc?.security_opt) ? svc.security_opt : [];
|
|
104
|
-
const sec = securityOpt.map((x) => String(x).toLowerCase());
|
|
105
|
-
const hasUnconfined = sec.some((x) => x.includes('seccomp') && x.includes('unconfined')) || sec.some((x) => x.includes('apparmor') && x.includes('unconfined')) || sec.some((x) => x.includes('label:disable'));
|
|
106
|
-
if (hasUnconfined) {
|
|
107
|
-
findings.push({
|
|
108
|
-
id: 'compose.security-unconfined',
|
|
109
|
-
title: 'Security profile disabled (unconfined)',
|
|
110
|
-
severity: 'high',
|
|
111
|
-
message: `Service '${serviceName}' disables container security profiles via security_opt (${securityOpt.join(', ')}).`,
|
|
112
|
-
service: serviceName,
|
|
113
|
-
path: `${targetPath}#services.${serviceName}.security_opt`,
|
|
114
|
-
suggestion: 'Avoid unconfined security profiles. Remove the option or use a minimal custom seccomp/apparmor profile.'
|
|
115
|
-
});
|
|
116
|
-
}
|
|
117
|
-
|
|
118
|
-
// Rule: docker socket mount
|
|
119
|
-
const volumes: any[] = Array.isArray(svc?.volumes) ? svc.volumes : [];
|
|
120
|
-
for (const v of volumes) {
|
|
121
|
-
if (typeof v !== 'string') continue;
|
|
122
|
-
if (v.includes('/var/run/docker.sock')) {
|
|
123
|
-
findings.push({
|
|
124
|
-
id: 'compose.docker-socket',
|
|
125
|
-
title: 'Docker socket mounted',
|
|
126
|
-
severity: 'high',
|
|
127
|
-
message: `Service '${serviceName}' mounts /var/run/docker.sock which effectively grants root-on-host.`,
|
|
128
|
-
service: serviceName,
|
|
129
|
-
path: `${targetPath}#services.${serviceName}.volumes`,
|
|
130
|
-
suggestion: 'Avoid mounting the docker socket. If you need it, isolate the runner and treat it as privileged infrastructure.'
|
|
131
|
-
});
|
|
132
|
-
}
|
|
133
|
-
if (v.startsWith('/:') || v.startsWith('/:/')) {
|
|
134
|
-
findings.push({
|
|
135
|
-
id: 'compose.host-root-mount',
|
|
136
|
-
title: 'Host root mounted',
|
|
137
|
-
severity: 'high',
|
|
138
|
-
message: `Service '${serviceName}' appears to mount the host root filesystem ('${v}').`,
|
|
139
|
-
service: serviceName,
|
|
140
|
-
path: `${targetPath}#services.${serviceName}.volumes`,
|
|
141
|
-
suggestion: 'Avoid mounting /. Mount only specific directories required by the app.'
|
|
142
|
-
});
|
|
143
|
-
}
|
|
144
|
-
if (v.startsWith('/dev:/dev') || v.startsWith('/dev/:/dev')) {
|
|
145
|
-
findings.push({
|
|
146
|
-
id: 'compose.host-dev-mount',
|
|
147
|
-
title: 'Host /dev mounted into container',
|
|
148
|
-
severity: 'high',
|
|
149
|
-
message: `Service '${serviceName}' mounts host /dev into the container ('${v}'), which can enable device access and privilege escalation.`,
|
|
150
|
-
service: serviceName,
|
|
151
|
-
path: `${targetPath}#services.${serviceName}.volumes`,
|
|
152
|
-
suggestion: 'Avoid mounting /dev. If hardware access is required, map only the specific device(s) needed via devices:.'
|
|
153
|
-
});
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
|
|
157
|
-
// Rule: dangerous device mappings
|
|
158
|
-
const devices: any[] = Array.isArray(svc?.devices) ? svc.devices : [];
|
|
159
|
-
for (const d of devices) {
|
|
160
|
-
if (typeof d !== 'string') continue;
|
|
161
|
-
const lower = d.toLowerCase();
|
|
162
|
-
if (lower.includes('/dev/mem') || lower.includes('/dev/kmem') || lower.includes('/dev/kmsg')) {
|
|
163
|
-
findings.push({
|
|
164
|
-
id: 'compose.dangerous-device',
|
|
165
|
-
title: 'Dangerous device mapped into container',
|
|
166
|
-
severity: 'high',
|
|
167
|
-
message: `Service '${serviceName}' maps a sensitive device into the container ('${d}').`,
|
|
168
|
-
service: serviceName,
|
|
169
|
-
path: `${targetPath}#services.${serviceName}.devices`,
|
|
170
|
-
suggestion: 'Avoid mapping kernel/memory/log devices into containers. If absolutely required, isolate the host and restrict container privileges.'
|
|
171
|
-
});
|
|
172
|
-
}
|
|
173
|
-
}
|
|
174
|
-
|
|
175
|
-
// Rule: restart policy
|
|
176
|
-
if (svc?.restart == null) {
|
|
177
|
-
findings.push({
|
|
178
|
-
id: 'compose.missing-restart',
|
|
179
|
-
title: 'Missing restart policy',
|
|
180
|
-
severity: 'medium',
|
|
181
|
-
message: `Service '${serviceName}' has no restart policy.`,
|
|
182
|
-
service: serviceName,
|
|
183
|
-
path: `${targetPath}#services.${serviceName}.restart`,
|
|
184
|
-
suggestion: "Set restart: unless-stopped (or on-failure) to improve resilience."
|
|
185
|
-
});
|
|
186
|
-
}
|
|
187
|
-
|
|
188
|
-
// Rule: healthcheck
|
|
189
|
-
if (svc?.healthcheck == null) {
|
|
190
|
-
findings.push({
|
|
191
|
-
id: 'compose.missing-healthcheck',
|
|
192
|
-
title: 'Missing healthcheck',
|
|
193
|
-
severity: 'medium',
|
|
194
|
-
message: `Service '${serviceName}' has no healthcheck.`,
|
|
195
|
-
service: serviceName,
|
|
196
|
-
path: `${targetPath}#services.${serviceName}.healthcheck`,
|
|
197
|
-
suggestion: 'Add a healthcheck so orchestrators can detect broken containers (and dependent services can wait on healthy state).'
|
|
198
|
-
});
|
|
199
|
-
}
|
|
200
|
-
|
|
201
|
-
// Rule: runs as root
|
|
202
|
-
const user = svc?.user;
|
|
203
|
-
if (user == null || user === '0' || user === 0 || user === 'root') {
|
|
204
|
-
findings.push({
|
|
205
|
-
id: 'compose.runs-as-root',
|
|
206
|
-
title: 'Container likely runs as root',
|
|
207
|
-
severity: 'high',
|
|
208
|
-
message: `Service '${serviceName}' does not specify a non-root user (user:).`,
|
|
209
|
-
service: serviceName,
|
|
210
|
-
path: `${targetPath}#services.${serviceName}.user`,
|
|
211
|
-
suggestion: 'Set user: "1000:1000" (or a dedicated UID/GID) and ensure the image supports running unprivileged.'
|
|
212
|
-
});
|
|
213
|
-
}
|
|
214
|
-
|
|
215
|
-
// Rule: exposed sensitive ports
|
|
216
|
-
const ports = normalizePorts(svc?.ports);
|
|
217
|
-
for (const p of ports) {
|
|
218
|
-
const hostIp = p.hostIp;
|
|
219
|
-
const hostPort = p.hostPort;
|
|
220
|
-
const containerPort = p.containerPort;
|
|
221
|
-
|
|
222
|
-
const checkPort = containerPort ?? hostPort;
|
|
223
|
-
if (checkPort == null) continue;
|
|
224
|
-
if (!SENSITIVE_PORTS.has(checkPort)) continue;
|
|
225
|
-
|
|
226
|
-
const bindsAll = hostIp == null || hostIp === '0.0.0.0' || hostIp === '';
|
|
227
|
-
if (bindsAll) {
|
|
228
|
-
findings.push({
|
|
229
|
-
id: 'compose.exposed-sensitive-port',
|
|
230
|
-
title: 'Sensitive port exposed publicly',
|
|
231
|
-
severity: 'high',
|
|
232
|
-
message: `Service '${serviceName}' exposes a commonly sensitive port (${checkPort}) on all interfaces (ports: '${p.raw}').`,
|
|
233
|
-
service: serviceName,
|
|
234
|
-
path: `${targetPath}#services.${serviceName}.ports`,
|
|
235
|
-
suggestion: `Bind to 127.0.0.1 (e.g. '127.0.0.1:${hostPort ?? checkPort}:${containerPort ?? checkPort}') or remove the port and use an internal network.`
|
|
236
|
-
});
|
|
237
|
-
}
|
|
238
|
-
}
|
|
239
|
-
}
|
|
240
|
-
|
|
241
|
-
return findings;
|
|
242
|
-
}
|
package/src/sarif.ts
DELETED
|
@@ -1,74 +0,0 @@
|
|
|
1
|
-
import type { Finding } from './types.js';
|
|
2
|
-
|
|
3
|
-
// Minimal SARIF 2.1.0 generator for GitHub code scanning.
|
|
4
|
-
// Docs: https://docs.oasis-open.org/sarif/sarif/v2.1.0/sarif-v2.1.0.html
|
|
5
|
-
|
|
6
|
-
function level(sev: string): 'error' | 'warning' | 'note' {
|
|
7
|
-
const s = String(sev || '').toLowerCase();
|
|
8
|
-
if (s === 'high' || s === 'critical' || s === 'error') return 'error';
|
|
9
|
-
if (s === 'medium' || s === 'warn' || s === 'warning') return 'warning';
|
|
10
|
-
return 'note';
|
|
11
|
-
}
|
|
12
|
-
|
|
13
|
-
export function findingsToSarif(findings: Finding[], opts: { toolName?: string; repoRoot?: string } = {}) {
|
|
14
|
-
const toolName = opts.toolName || 'ConfigSentry';
|
|
15
|
-
|
|
16
|
-
const rulesById = new Map<string, any>();
|
|
17
|
-
for (const f of findings) {
|
|
18
|
-
if (!rulesById.has(f.id)) {
|
|
19
|
-
rulesById.set(f.id, {
|
|
20
|
-
id: f.id,
|
|
21
|
-
name: f.id,
|
|
22
|
-
shortDescription: { text: f.title },
|
|
23
|
-
fullDescription: { text: f.message },
|
|
24
|
-
help: { text: f.suggestion ? `${f.message}\n\nFix: ${f.suggestion}` : f.message },
|
|
25
|
-
defaultConfiguration: { level: level(f.severity) },
|
|
26
|
-
});
|
|
27
|
-
}
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
const results = findings.map((f) => {
|
|
31
|
-
const res: any = {
|
|
32
|
-
ruleId: f.id,
|
|
33
|
-
level: level(f.severity),
|
|
34
|
-
message: { text: f.suggestion ? `${f.message} Fix: ${f.suggestion}` : f.message },
|
|
35
|
-
properties: {
|
|
36
|
-
severity: f.severity,
|
|
37
|
-
service: f.service ?? undefined,
|
|
38
|
-
},
|
|
39
|
-
};
|
|
40
|
-
|
|
41
|
-
// Best-effort location: we store a pseudo "where" path today.
|
|
42
|
-
// If it contains "file#pointer", split it; else treat it as a file uri.
|
|
43
|
-
if (f.path) {
|
|
44
|
-
const [file, fragment] = String(f.path).split('#');
|
|
45
|
-
res.locations = [
|
|
46
|
-
{
|
|
47
|
-
physicalLocation: {
|
|
48
|
-
artifactLocation: { uri: file },
|
|
49
|
-
region: fragment ? { snippet: { text: fragment } } : undefined,
|
|
50
|
-
},
|
|
51
|
-
},
|
|
52
|
-
];
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
return res;
|
|
56
|
-
});
|
|
57
|
-
|
|
58
|
-
return {
|
|
59
|
-
version: '2.1.0',
|
|
60
|
-
$schema: 'https://json.schemastore.org/sarif-2.1.0.json',
|
|
61
|
-
runs: [
|
|
62
|
-
{
|
|
63
|
-
tool: {
|
|
64
|
-
driver: {
|
|
65
|
-
name: toolName,
|
|
66
|
-
informationUri: 'https://github.com/alfredMorgenstern/configsentry',
|
|
67
|
-
rules: Array.from(rulesById.values()),
|
|
68
|
-
},
|
|
69
|
-
},
|
|
70
|
-
results,
|
|
71
|
-
},
|
|
72
|
-
],
|
|
73
|
-
};
|
|
74
|
-
}
|
package/src/scan.ts
DELETED
|
@@ -1,47 +0,0 @@
|
|
|
1
|
-
import fs from 'node:fs/promises';
|
|
2
|
-
import path from 'node:path';
|
|
3
|
-
|
|
4
|
-
const COMPOSE_FILENAMES = new Set([
|
|
5
|
-
'docker-compose.yml',
|
|
6
|
-
'docker-compose.yaml',
|
|
7
|
-
'compose.yml',
|
|
8
|
-
'compose.yaml',
|
|
9
|
-
]);
|
|
10
|
-
|
|
11
|
-
async function isFile(p: string) {
|
|
12
|
-
try {
|
|
13
|
-
return (await fs.stat(p)).isFile();
|
|
14
|
-
} catch {
|
|
15
|
-
return false;
|
|
16
|
-
}
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
async function isDir(p: string) {
|
|
20
|
-
try {
|
|
21
|
-
return (await fs.stat(p)).isDirectory();
|
|
22
|
-
} catch {
|
|
23
|
-
return false;
|
|
24
|
-
}
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
export async function resolveTargets(input: string): Promise<string[]> {
|
|
28
|
-
const abs = path.resolve(input);
|
|
29
|
-
|
|
30
|
-
if (await isFile(abs)) return [abs];
|
|
31
|
-
|
|
32
|
-
if (await isDir(abs)) {
|
|
33
|
-
const entries = await fs.readdir(abs);
|
|
34
|
-
const hits: string[] = [];
|
|
35
|
-
for (const e of entries) {
|
|
36
|
-
if (COMPOSE_FILENAMES.has(e)) hits.push(path.join(abs, e));
|
|
37
|
-
// Common pattern: docker-compose.prod.yml etc.
|
|
38
|
-
if (/^docker-compose\..+\.ya?ml$/i.test(e)) hits.push(path.join(abs, e));
|
|
39
|
-
if (/^compose\..+\.ya?ml$/i.test(e)) hits.push(path.join(abs, e));
|
|
40
|
-
}
|
|
41
|
-
// de-dupe
|
|
42
|
-
return Array.from(new Set(hits)).sort();
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
// Not a file/dir: treat as a path anyway (will fail later with a nice error)
|
|
46
|
-
return [abs];
|
|
47
|
-
}
|
package/src/types.ts
DELETED
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
export type Severity = 'low' | 'medium' | 'high';
|
|
2
|
-
|
|
3
|
-
export type Finding = {
|
|
4
|
-
id: string;
|
|
5
|
-
title: string;
|
|
6
|
-
severity: Severity;
|
|
7
|
-
message: string;
|
|
8
|
-
service?: string;
|
|
9
|
-
path?: string;
|
|
10
|
-
suggestion?: string;
|
|
11
|
-
};
|
|
12
|
-
|
|
13
|
-
export type Report = {
|
|
14
|
-
targetPath: string;
|
|
15
|
-
findings: Finding[];
|
|
16
|
-
};
|