guardrail-security 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/attack-surface/analyzer.d.ts +50 -0
- package/dist/attack-surface/analyzer.d.ts.map +1 -0
- package/dist/attack-surface/analyzer.js +83 -0
- package/dist/attack-surface/index.d.ts +5 -0
- package/dist/attack-surface/index.d.ts.map +1 -0
- package/dist/attack-surface/index.js +20 -0
- package/dist/index.d.ts +15 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +33 -0
- package/dist/languages/index.d.ts +21 -0
- package/dist/languages/index.d.ts.map +1 -0
- package/dist/languages/index.js +78 -0
- package/dist/languages/java-analyzer.d.ts +72 -0
- package/dist/languages/java-analyzer.d.ts.map +1 -0
- package/dist/languages/java-analyzer.js +417 -0
- package/dist/languages/python-analyzer.d.ts +70 -0
- package/dist/languages/python-analyzer.d.ts.map +1 -0
- package/dist/languages/python-analyzer.js +425 -0
- package/dist/license/compatibility-matrix.d.ts +28 -0
- package/dist/license/compatibility-matrix.d.ts.map +1 -0
- package/dist/license/compatibility-matrix.js +323 -0
- package/dist/license/engine.d.ts +77 -0
- package/dist/license/engine.d.ts.map +1 -0
- package/dist/license/engine.js +264 -0
- package/dist/license/index.d.ts +6 -0
- package/dist/license/index.d.ts.map +1 -0
- package/dist/license/index.js +21 -0
- package/dist/sbom/generator.d.ts +108 -0
- package/dist/sbom/generator.d.ts.map +1 -0
- package/dist/sbom/generator.js +271 -0
- package/dist/sbom/index.d.ts +5 -0
- package/dist/sbom/index.d.ts.map +1 -0
- package/dist/sbom/index.js +20 -0
- package/dist/secrets/guardian.d.ts +113 -0
- package/dist/secrets/guardian.d.ts.map +1 -0
- package/dist/secrets/guardian.js +334 -0
- package/dist/secrets/index.d.ts +10 -0
- package/dist/secrets/index.d.ts.map +1 -0
- package/dist/secrets/index.js +30 -0
- package/dist/secrets/patterns.d.ts +42 -0
- package/dist/secrets/patterns.d.ts.map +1 -0
- package/dist/secrets/patterns.js +165 -0
- package/dist/secrets/pre-commit.d.ts +39 -0
- package/dist/secrets/pre-commit.d.ts.map +1 -0
- package/dist/secrets/pre-commit.js +127 -0
- package/dist/secrets/vault-integration.d.ts +83 -0
- package/dist/secrets/vault-integration.d.ts.map +1 -0
- package/dist/secrets/vault-integration.js +295 -0
- package/dist/secrets/vault-providers.d.ts +110 -0
- package/dist/secrets/vault-providers.d.ts.map +1 -0
- package/dist/secrets/vault-providers.js +417 -0
- package/dist/supply-chain/detector.d.ts +80 -0
- package/dist/supply-chain/detector.d.ts.map +1 -0
- package/dist/supply-chain/detector.js +168 -0
- package/dist/supply-chain/index.d.ts +11 -0
- package/dist/supply-chain/index.d.ts.map +1 -0
- package/dist/supply-chain/index.js +26 -0
- package/dist/supply-chain/malicious-db.d.ts +41 -0
- package/dist/supply-chain/malicious-db.d.ts.map +1 -0
- package/dist/supply-chain/malicious-db.js +82 -0
- package/dist/supply-chain/script-analyzer.d.ts +54 -0
- package/dist/supply-chain/script-analyzer.d.ts.map +1 -0
- package/dist/supply-chain/script-analyzer.js +160 -0
- package/dist/supply-chain/typosquat.d.ts +58 -0
- package/dist/supply-chain/typosquat.d.ts.map +1 -0
- package/dist/supply-chain/typosquat.js +257 -0
- package/dist/supply-chain/vulnerability-db.d.ts +114 -0
- package/dist/supply-chain/vulnerability-db.d.ts.map +1 -0
- package/dist/supply-chain/vulnerability-db.js +310 -0
- package/package.json +34 -0
- package/src/__tests__/license/engine.test.ts +250 -0
- package/src/__tests__/supply-chain/typosquat.test.ts +191 -0
- package/src/attack-surface/analyzer.ts +152 -0
- package/src/attack-surface/index.ts +5 -0
- package/src/index.ts +21 -0
- package/src/languages/index.ts +91 -0
- package/src/languages/java-analyzer.ts +490 -0
- package/src/languages/python-analyzer.ts +498 -0
- package/src/license/compatibility-matrix.ts +366 -0
- package/src/license/engine.ts +345 -0
- package/src/license/index.ts +6 -0
- package/src/sbom/generator.ts +355 -0
- package/src/sbom/index.ts +5 -0
- package/src/secrets/guardian.ts +448 -0
- package/src/secrets/index.ts +10 -0
- package/src/secrets/patterns.ts +186 -0
- package/src/secrets/pre-commit.ts +158 -0
- package/src/secrets/vault-integration.ts +360 -0
- package/src/secrets/vault-providers.ts +446 -0
- package/src/supply-chain/detector.ts +252 -0
- package/src/supply-chain/index.ts +11 -0
- package/src/supply-chain/malicious-db.ts +103 -0
- package/src/supply-chain/script-analyzer.ts +194 -0
- package/src/supply-chain/typosquat.ts +302 -0
- package/src/supply-chain/vulnerability-db.ts +386 -0
package/package.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "guardrail-security",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"main": "./dist/index.js",
|
|
5
|
+
"files": ["dist/**/*", "src/**/*"],
|
|
6
|
+
"types": "./dist/index.d.ts",
|
|
7
|
+
"exports": {
|
|
8
|
+
".": {
|
|
9
|
+
"types": "./dist/index.d.ts",
|
|
10
|
+
"default": "./dist/index.js"
|
|
11
|
+
}
|
|
12
|
+
},
|
|
13
|
+
"scripts": {
|
|
14
|
+
"build": "tsc --skipLibCheck",
|
|
15
|
+
"dev": "tsc --watch",
|
|
16
|
+
"test": "vitest",
|
|
17
|
+
"clean": "rm -rf dist"
|
|
18
|
+
},
|
|
19
|
+
"dependencies": {
|
|
20
|
+
"@guardrail/core": "workspace:*",
|
|
21
|
+
"@guardrail/database": "workspace:*",
|
|
22
|
+
"@aws-sdk/client-secrets-manager": "^3.490.0",
|
|
23
|
+
"@azure/keyvault-secrets": "^4.8.0",
|
|
24
|
+
"@azure/identity": "^4.0.0",
|
|
25
|
+
"@google-cloud/secret-manager": "^5.0.0",
|
|
26
|
+
"node-vault": "^0.10.2",
|
|
27
|
+
"glob": "^10.3.10"
|
|
28
|
+
},
|
|
29
|
+
"devDependencies": {
|
|
30
|
+
"@types/node": "^20.10.0",
|
|
31
|
+
"typescript": "^5.3.3",
|
|
32
|
+
"vitest": "^1.2.0"
|
|
33
|
+
}
|
|
34
|
+
}
|
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* License Compliance Engine Tests
|
|
3
|
+
*
|
|
4
|
+
* Test suite for the LicenseComplianceEngine class
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { describe, it, expect, beforeEach, jest } from "@jest/globals";
|
|
8
|
+
import { LicenseComplianceEngine } from "../../license/engine";
|
|
9
|
+
|
|
10
|
+
// Import ResponseType
|
|
11
|
+
type ResponseType =
|
|
12
|
+
| "basic"
|
|
13
|
+
| "cors"
|
|
14
|
+
| "default"
|
|
15
|
+
| "error"
|
|
16
|
+
| "opaque"
|
|
17
|
+
| "opaqueredirect";
|
|
18
|
+
|
|
19
|
+
// Mock fs
|
|
20
|
+
jest.mock("fs", () => ({
|
|
21
|
+
readFileSync: jest.fn<(path: string) => string>().mockReturnValue(""),
|
|
22
|
+
existsSync: jest.fn<(path: string) => boolean>(() => true),
|
|
23
|
+
}));
|
|
24
|
+
|
|
25
|
+
// Mock prisma
|
|
26
|
+
jest.mock("@guardrail/database", () => ({
|
|
27
|
+
prisma: {
|
|
28
|
+
licenseAnalysis: {
|
|
29
|
+
findMany: jest.fn<() => Promise<any[]>>().mockResolvedValue([]),
|
|
30
|
+
create: jest.fn<() => Promise<any>>().mockResolvedValue({}),
|
|
31
|
+
update: jest.fn<() => Promise<any>>().mockResolvedValue({}),
|
|
32
|
+
delete: jest.fn<() => Promise<any>>().mockResolvedValue({}),
|
|
33
|
+
findUnique: jest.fn<() => Promise<any>>().mockResolvedValue(null),
|
|
34
|
+
},
|
|
35
|
+
},
|
|
36
|
+
}));
|
|
37
|
+
|
|
38
|
+
// Mock fetch
|
|
39
|
+
const mockFetch = jest.fn<() => Promise<Response>>();
|
|
40
|
+
global.fetch = mockFetch;
|
|
41
|
+
|
|
42
|
+
describe("LicenseComplianceEngine", () => {
|
|
43
|
+
let engine: LicenseComplianceEngine;
|
|
44
|
+
|
|
45
|
+
beforeEach(() => {
|
|
46
|
+
engine = new LicenseComplianceEngine();
|
|
47
|
+
jest.clearAllMocks();
|
|
48
|
+
});
|
|
49
|
+
|
|
50
|
+
describe("license fetching", () => {
|
|
51
|
+
it("should fetch license from npm registry", async () => {
|
|
52
|
+
const mockResponse = {
|
|
53
|
+
ok: true,
|
|
54
|
+
json: jest.fn<() => Promise<any>>().mockResolvedValue({
|
|
55
|
+
"dist-tags": { latest: "1.0.0" },
|
|
56
|
+
versions: {
|
|
57
|
+
"1.0.0": {
|
|
58
|
+
license: "MIT",
|
|
59
|
+
},
|
|
60
|
+
},
|
|
61
|
+
}),
|
|
62
|
+
headers: new Headers(),
|
|
63
|
+
status: 200,
|
|
64
|
+
statusText: "OK",
|
|
65
|
+
type: "basic" as ResponseType,
|
|
66
|
+
url: "",
|
|
67
|
+
redirected: false,
|
|
68
|
+
clone: jest.fn<() => Response>(),
|
|
69
|
+
body: null,
|
|
70
|
+
bodyUsed: false,
|
|
71
|
+
bytes: jest.fn<() => Promise<Uint8Array<ArrayBufferLike>>>(),
|
|
72
|
+
text: jest.fn<() => Promise<string>>(),
|
|
73
|
+
blob: jest.fn<() => Promise<Blob>>(),
|
|
74
|
+
formData: jest.fn<() => Promise<FormData>>(),
|
|
75
|
+
arrayBuffer: jest.fn<() => Promise<ArrayBuffer>>(),
|
|
76
|
+
};
|
|
77
|
+
|
|
78
|
+
mockFetch.mockResolvedValue(mockResponse);
|
|
79
|
+
|
|
80
|
+
// Access private method through type assertion
|
|
81
|
+
const result = await (engine as any).fetchLicenseFromRegistry(
|
|
82
|
+
"test-package",
|
|
83
|
+
);
|
|
84
|
+
|
|
85
|
+
expect(result.license).toBe("MIT");
|
|
86
|
+
expect(result.category).toBe("permissive");
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
it("should handle SPDX expressions", async () => {
|
|
90
|
+
const mockResponse = {
|
|
91
|
+
ok: true,
|
|
92
|
+
json: jest.fn<() => Promise<any>>().mockResolvedValue({
|
|
93
|
+
"dist-tags": { latest: "1.0.0" },
|
|
94
|
+
versions: {
|
|
95
|
+
"1.0.0": {
|
|
96
|
+
license: {
|
|
97
|
+
type: "MIT",
|
|
98
|
+
},
|
|
99
|
+
},
|
|
100
|
+
},
|
|
101
|
+
}),
|
|
102
|
+
headers: new Headers(),
|
|
103
|
+
status: 200,
|
|
104
|
+
statusText: "OK",
|
|
105
|
+
type: "basic" as ResponseType,
|
|
106
|
+
url: "",
|
|
107
|
+
redirected: false,
|
|
108
|
+
clone: jest.fn<() => Response>(),
|
|
109
|
+
body: null,
|
|
110
|
+
bodyUsed: false,
|
|
111
|
+
bytes: jest.fn<() => Promise<Uint8Array<ArrayBufferLike>>>(),
|
|
112
|
+
text: jest.fn<() => Promise<string>>(),
|
|
113
|
+
blob: jest.fn<() => Promise<Blob>>(),
|
|
114
|
+
formData: jest.fn<() => Promise<FormData>>(),
|
|
115
|
+
arrayBuffer: jest.fn<() => Promise<ArrayBuffer>>(),
|
|
116
|
+
};
|
|
117
|
+
|
|
118
|
+
mockFetch.mockResolvedValue(mockResponse);
|
|
119
|
+
|
|
120
|
+
const result = await (engine as any).fetchLicenseFromRegistry(
|
|
121
|
+
"test-package",
|
|
122
|
+
);
|
|
123
|
+
|
|
124
|
+
expect(result.license).toBe("MIT");
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
it("should normalize license names", async () => {
|
|
128
|
+
// Test the normalizeLicenseName method directly
|
|
129
|
+
const result = (engine as any).normalizeLicenseName("Apache 2.0");
|
|
130
|
+
expect(result).toBe("Apache-2.0");
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
it("should use fallback on network error", async () => {
|
|
134
|
+
mockFetch.mockRejectedValue(new Error("Network error"));
|
|
135
|
+
|
|
136
|
+
const result = await (engine as any).fetchLicenseFromRegistry(
|
|
137
|
+
"private-package",
|
|
138
|
+
);
|
|
139
|
+
|
|
140
|
+
expect(result.license).toBe("UNKNOWN");
|
|
141
|
+
expect(result.category).toBe("unknown");
|
|
142
|
+
});
|
|
143
|
+
});
|
|
144
|
+
|
|
145
|
+
describe("license categorization", () => {
|
|
146
|
+
it("should categorize MIT as permissive", () => {
|
|
147
|
+
const result = (engine as any).categorizeLicense("MIT");
|
|
148
|
+
expect(result).toBe("permissive");
|
|
149
|
+
});
|
|
150
|
+
|
|
151
|
+
it("should categorize GPL as copyleft", () => {
|
|
152
|
+
const result = (engine as any).categorizeLicense("GPL-3.0");
|
|
153
|
+
expect(result).toBe("copyleft");
|
|
154
|
+
});
|
|
155
|
+
|
|
156
|
+
it("should categorize LGPL as weak-copyleft", () => {
|
|
157
|
+
const result = (engine as any).categorizeLicense("LGPL-3.0");
|
|
158
|
+
expect(result).toBe("weak-copyleft");
|
|
159
|
+
});
|
|
160
|
+
|
|
161
|
+
it("should categorize unknown licenses", () => {
|
|
162
|
+
const result = (engine as any).categorizeLicense("UNKNOWN-LICENSE");
|
|
163
|
+
expect(result).toBe("unknown");
|
|
164
|
+
});
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
describe("conflict detection", () => {
|
|
168
|
+
it("should detect GPL contamination in proprietary project", () => {
|
|
169
|
+
const dependencies = [
|
|
170
|
+
{ name: "dep1", license: "MIT", category: "permissive" },
|
|
171
|
+
{ name: "dep2", license: "GPL-3.0", category: "copyleft" },
|
|
172
|
+
];
|
|
173
|
+
|
|
174
|
+
const conflicts = (engine as any).detectGPLContamination(
|
|
175
|
+
dependencies,
|
|
176
|
+
"Proprietary",
|
|
177
|
+
);
|
|
178
|
+
|
|
179
|
+
expect(conflicts).toHaveLength(2);
|
|
180
|
+
expect(conflicts[0].dependency).toBe("dep1");
|
|
181
|
+
expect(conflicts[0].severity).toBe("warning");
|
|
182
|
+
expect(conflicts[1].dependency).toBe("dep2");
|
|
183
|
+
expect(conflicts[1].severity).toBe("error");
|
|
184
|
+
});
|
|
185
|
+
|
|
186
|
+
it("should allow LGPL in Apache project", () => {
|
|
187
|
+
const dependencies = [
|
|
188
|
+
{ name: "dep1", license: "Apache-2.0", category: "permissive" },
|
|
189
|
+
{ name: "dep2", license: "LGPL-3.0", category: "weak-copyleft" },
|
|
190
|
+
];
|
|
191
|
+
|
|
192
|
+
const conflicts = (engine as any).detectGPLContamination(
|
|
193
|
+
dependencies,
|
|
194
|
+
"Apache-2.0",
|
|
195
|
+
);
|
|
196
|
+
|
|
197
|
+
expect(conflicts).toHaveLength(0);
|
|
198
|
+
});
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
describe("project analysis", () => {
|
|
202
|
+
it("should analyze simple project", async () => {
|
|
203
|
+
const mockFs = await import("fs");
|
|
204
|
+
jest.mocked(mockFs.readFileSync).mockReturnValue(
|
|
205
|
+
JSON.stringify({
|
|
206
|
+
dependencies: {
|
|
207
|
+
express: "^4.18.0",
|
|
208
|
+
lodash: "^4.17.21",
|
|
209
|
+
},
|
|
210
|
+
}),
|
|
211
|
+
);
|
|
212
|
+
|
|
213
|
+
// Mock fetch responses
|
|
214
|
+
mockFetch.mockResolvedValue({
|
|
215
|
+
ok: true,
|
|
216
|
+
json: jest.fn<() => Promise<any>>().mockResolvedValue({
|
|
217
|
+
"dist-tags": { latest: "1.0.0" },
|
|
218
|
+
versions: {
|
|
219
|
+
"1.0.0": { license: "MIT" },
|
|
220
|
+
},
|
|
221
|
+
}),
|
|
222
|
+
headers: new Headers(),
|
|
223
|
+
status: 200,
|
|
224
|
+
statusText: "OK",
|
|
225
|
+
type: "basic" as ResponseType,
|
|
226
|
+
url: "",
|
|
227
|
+
redirected: false,
|
|
228
|
+
clone: jest.fn<() => Response>(),
|
|
229
|
+
body: null,
|
|
230
|
+
bodyUsed: false,
|
|
231
|
+
bytes: jest.fn<() => Promise<Uint8Array<ArrayBufferLike>>>(),
|
|
232
|
+
text: jest.fn<() => Promise<string>>(),
|
|
233
|
+
blob: jest.fn<() => Promise<Blob>>(),
|
|
234
|
+
formData: jest.fn<() => Promise<FormData>>(),
|
|
235
|
+
arrayBuffer: jest.fn<() => Promise<ArrayBuffer>>(),
|
|
236
|
+
});
|
|
237
|
+
|
|
238
|
+
const result = await engine.analyzeProject(
|
|
239
|
+
"/test/path",
|
|
240
|
+
"test-project",
|
|
241
|
+
"MIT",
|
|
242
|
+
);
|
|
243
|
+
|
|
244
|
+
expect(result.projectId).toBe("test-project");
|
|
245
|
+
expect(result.projectLicense).toBe("MIT");
|
|
246
|
+
expect(result.dependencies).toHaveLength(2);
|
|
247
|
+
expect(result.overallStatus).toBe("compliant");
|
|
248
|
+
});
|
|
249
|
+
});
|
|
250
|
+
});
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Typosquat Detector Tests
|
|
3
|
+
*
|
|
4
|
+
* Test suite for the TyposquatDetector class
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { describe, it, expect, beforeEach } from "@jest/globals";
|
|
8
|
+
import { TyposquatDetector } from "../../supply-chain/typosquat";
|
|
9
|
+
|
|
10
|
+
describe("TyposquatDetector", () => {
|
|
11
|
+
let detector: TyposquatDetector;
|
|
12
|
+
|
|
13
|
+
beforeEach(() => {
|
|
14
|
+
detector = new TyposquatDetector();
|
|
15
|
+
});
|
|
16
|
+
|
|
17
|
+
describe("Exact Match Detection", () => {
|
|
18
|
+
const legitimatePackages = [
|
|
19
|
+
"react",
|
|
20
|
+
"vue",
|
|
21
|
+
"express",
|
|
22
|
+
"lodash",
|
|
23
|
+
"typescript",
|
|
24
|
+
"axios",
|
|
25
|
+
"webpack",
|
|
26
|
+
"jest",
|
|
27
|
+
];
|
|
28
|
+
|
|
29
|
+
it.each(legitimatePackages)(
|
|
30
|
+
"should NOT flag legitimate package: %s",
|
|
31
|
+
async (pkg) => {
|
|
32
|
+
const result = await detector.detectTyposquatting(pkg);
|
|
33
|
+
expect(result.isTyposquat).toBe(false);
|
|
34
|
+
},
|
|
35
|
+
);
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
describe("Character Swap Detection", () => {
|
|
39
|
+
const swapAttempts = [
|
|
40
|
+
{ input: "raect", target: "react" },
|
|
41
|
+
{ input: "exrpess", target: "express" },
|
|
42
|
+
{ input: "loadsh", target: "lodash" },
|
|
43
|
+
{ input: "axois", target: "axios" },
|
|
44
|
+
];
|
|
45
|
+
|
|
46
|
+
it.each(swapAttempts)(
|
|
47
|
+
"should detect character swap: $input -> $target",
|
|
48
|
+
async ({ input, target }) => {
|
|
49
|
+
const result = await detector.detectTyposquatting(input);
|
|
50
|
+
expect(result.isTyposquat).toBe(true);
|
|
51
|
+
expect(result.targetPackage).toBe(target);
|
|
52
|
+
expect(result.patterns).toContain("character_swap");
|
|
53
|
+
},
|
|
54
|
+
);
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
describe("Missing Character Detection", () => {
|
|
58
|
+
const missingCharAttempts = [
|
|
59
|
+
{ input: "rect", target: "react" },
|
|
60
|
+
{ input: "expres", target: "express" },
|
|
61
|
+
{ input: "lodas", target: "lodash" },
|
|
62
|
+
{ input: "webpck", target: "webpack" },
|
|
63
|
+
];
|
|
64
|
+
|
|
65
|
+
it.each(missingCharAttempts)(
|
|
66
|
+
"should detect missing character: $input -> $target",
|
|
67
|
+
async ({ input, target }) => {
|
|
68
|
+
const result = await detector.detectTyposquatting(input);
|
|
69
|
+
expect(result.isTyposquat).toBe(true);
|
|
70
|
+
expect(result.targetPackage).toBe(target);
|
|
71
|
+
expect(result.patterns).toContain("missing_character");
|
|
72
|
+
},
|
|
73
|
+
);
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
describe("Extra Character Detection", () => {
|
|
77
|
+
const extraCharAttempts = [
|
|
78
|
+
{ input: "reactt", target: "react" },
|
|
79
|
+
{ input: "expresss", target: "express" },
|
|
80
|
+
{ input: "lodassh", target: "lodash" },
|
|
81
|
+
{ input: "axioss", target: "axios" },
|
|
82
|
+
];
|
|
83
|
+
|
|
84
|
+
it.each(extraCharAttempts)(
|
|
85
|
+
"should detect extra character: $input -> $target",
|
|
86
|
+
async ({ input, target }) => {
|
|
87
|
+
const result = await detector.detectTyposquatting(input);
|
|
88
|
+
expect(result.isTyposquat).toBe(true);
|
|
89
|
+
expect(result.targetPackage).toBe(target);
|
|
90
|
+
expect(result.patterns).toContain("extra_character");
|
|
91
|
+
},
|
|
92
|
+
);
|
|
93
|
+
});
|
|
94
|
+
|
|
95
|
+
describe("Homoglyph Detection", () => {
|
|
96
|
+
const homoglyphAttempts = [
|
|
97
|
+
{ input: "reаct", description: "cyrillic a" },
|
|
98
|
+
{ input: "exprеss", description: "cyrillic e" },
|
|
99
|
+
{ input: "l0dash", description: "zero for o" },
|
|
100
|
+
{ input: "ax1os", description: "one for i" },
|
|
101
|
+
];
|
|
102
|
+
|
|
103
|
+
it.each(homoglyphAttempts)(
|
|
104
|
+
"should detect homoglyph attack: $input ($description)",
|
|
105
|
+
async ({ input }) => {
|
|
106
|
+
const result = await detector.detectTyposquatting(input);
|
|
107
|
+
// Homoglyph detection may vary based on implementation
|
|
108
|
+
expect(result.similarity).toBeGreaterThan(0);
|
|
109
|
+
},
|
|
110
|
+
);
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
describe("Combosquatting Detection", () => {
|
|
114
|
+
const comboAttempts = [
|
|
115
|
+
{ input: "react-native-helper", target: "react" },
|
|
116
|
+
{ input: "express-security", target: "express" },
|
|
117
|
+
{ input: "lodash-utils", target: "lodash" },
|
|
118
|
+
{ input: "axios-wrapper", target: "axios" },
|
|
119
|
+
];
|
|
120
|
+
|
|
121
|
+
it.each(comboAttempts)(
|
|
122
|
+
"should detect combosquatting: $input -> $target",
|
|
123
|
+
async ({ input }) => {
|
|
124
|
+
const result = await detector.detectTyposquatting(input);
|
|
125
|
+
if (result.isTyposquat) {
|
|
126
|
+
expect(result.patterns).toContain("combosquatting");
|
|
127
|
+
}
|
|
128
|
+
},
|
|
129
|
+
);
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
describe("Levenshtein Distance", () => {
|
|
133
|
+
it("should calculate similarity based on Levenshtein distance", async () => {
|
|
134
|
+
const result = await detector.detectTyposquatting("reakt");
|
|
135
|
+
expect(result.similarity).toBeGreaterThan(0.7);
|
|
136
|
+
});
|
|
137
|
+
|
|
138
|
+
it("should have low similarity for unrelated packages", async () => {
|
|
139
|
+
const result = await detector.detectTyposquatting(
|
|
140
|
+
"completely-different-package",
|
|
141
|
+
);
|
|
142
|
+
expect(result.similarity).toBeLessThan(0.5);
|
|
143
|
+
});
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
describe("Result Structure", () => {
|
|
147
|
+
it("should return complete result structure", async () => {
|
|
148
|
+
const result = await detector.detectTyposquatting("reakt");
|
|
149
|
+
|
|
150
|
+
expect(result).toHaveProperty("isTyposquat");
|
|
151
|
+
expect(result).toHaveProperty("suspiciousPackage");
|
|
152
|
+
expect(result).toHaveProperty("similarity");
|
|
153
|
+
expect(result).toHaveProperty("patterns");
|
|
154
|
+
expect(Array.isArray(result.patterns)).toBe(true);
|
|
155
|
+
});
|
|
156
|
+
|
|
157
|
+
it("should include target package when typosquat detected", async () => {
|
|
158
|
+
const result = await detector.detectTyposquatting("reakt");
|
|
159
|
+
|
|
160
|
+
if (result.isTyposquat) {
|
|
161
|
+
expect(result.targetPackage).toBeDefined();
|
|
162
|
+
}
|
|
163
|
+
});
|
|
164
|
+
});
|
|
165
|
+
|
|
166
|
+
describe("Edge Cases", () => {
|
|
167
|
+
it("should handle empty string", async () => {
|
|
168
|
+
const result = await detector.detectTyposquatting("");
|
|
169
|
+
expect(result.isTyposquat).toBe(false);
|
|
170
|
+
});
|
|
171
|
+
|
|
172
|
+
it("should handle very long package names", async () => {
|
|
173
|
+
const longName = "a".repeat(100);
|
|
174
|
+
const result = await detector.detectTyposquatting(longName);
|
|
175
|
+
expect(result).toBeDefined();
|
|
176
|
+
});
|
|
177
|
+
|
|
178
|
+
it("should handle special characters", async () => {
|
|
179
|
+
const result = await detector.detectTyposquatting("@scope/react");
|
|
180
|
+
expect(result).toBeDefined();
|
|
181
|
+
});
|
|
182
|
+
|
|
183
|
+
it("should be case insensitive", async () => {
|
|
184
|
+
const result1 = await detector.detectTyposquatting("REACT");
|
|
185
|
+
const result2 = await detector.detectTyposquatting("React");
|
|
186
|
+
// Both should handle case variations
|
|
187
|
+
expect(result1).toBeDefined();
|
|
188
|
+
expect(result2).toBeDefined();
|
|
189
|
+
});
|
|
190
|
+
});
|
|
191
|
+
});
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
import { prisma } from "@guardrail/database";
|
|
2
|
+
|
|
3
|
+
export interface EntryPoint {
|
|
4
|
+
type: "http" | "graphql" | "websocket" | "grpc";
|
|
5
|
+
path: string;
|
|
6
|
+
method?: string;
|
|
7
|
+
file: string;
|
|
8
|
+
line: number;
|
|
9
|
+
authentication?: string;
|
|
10
|
+
rateLimit?: string;
|
|
11
|
+
parameters: ParameterInfo[];
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export interface ParameterInfo {
|
|
15
|
+
name: string;
|
|
16
|
+
type: string;
|
|
17
|
+
required: boolean;
|
|
18
|
+
validated: boolean;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export interface APISecurityFinding {
|
|
22
|
+
category: string;
|
|
23
|
+
severity: "low" | "medium" | "high" | "critical";
|
|
24
|
+
endpoint: string;
|
|
25
|
+
description: string;
|
|
26
|
+
recommendation: string;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
export interface AttackPath {
|
|
30
|
+
id: string;
|
|
31
|
+
entry: string;
|
|
32
|
+
steps: string[];
|
|
33
|
+
impact: string;
|
|
34
|
+
likelihood: "low" | "medium" | "high";
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
export interface AttackSurfaceAnalysisResult {
|
|
38
|
+
projectId: string;
|
|
39
|
+
summary: {
|
|
40
|
+
totalEntryPoints: number;
|
|
41
|
+
byType: Record<string, number>;
|
|
42
|
+
risksByLevel: Record<string, number>;
|
|
43
|
+
};
|
|
44
|
+
entryPoints: EntryPoint[];
|
|
45
|
+
attackPaths: AttackPath[];
|
|
46
|
+
apiFindings: APISecurityFinding[];
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
export class AttackSurfaceAnalyzer {
|
|
50
|
+
async analyzeProject(
|
|
51
|
+
projectPath: string,
|
|
52
|
+
projectId: string,
|
|
53
|
+
): Promise<AttackSurfaceAnalysisResult> {
|
|
54
|
+
const entryPoints = await this.scanHTTPEndpoints(projectPath);
|
|
55
|
+
const apiFindings = await this.analyzeEndpoints(entryPoints);
|
|
56
|
+
const attackPaths = await this.buildAttackPaths(entryPoints, apiFindings);
|
|
57
|
+
|
|
58
|
+
const byType: Record<string, number> = {};
|
|
59
|
+
const risksByLevel: Record<string, number> = {};
|
|
60
|
+
|
|
61
|
+
for (const ep of entryPoints) {
|
|
62
|
+
byType[ep.type] = (byType[ep.type] || 0) + 1;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
for (const finding of apiFindings) {
|
|
66
|
+
risksByLevel[finding.severity] =
|
|
67
|
+
(risksByLevel[finding.severity] || 0) + 1;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
const result: AttackSurfaceAnalysisResult = {
|
|
71
|
+
projectId,
|
|
72
|
+
summary: {
|
|
73
|
+
totalEntryPoints: entryPoints.length,
|
|
74
|
+
byType,
|
|
75
|
+
risksByLevel,
|
|
76
|
+
},
|
|
77
|
+
entryPoints,
|
|
78
|
+
attackPaths,
|
|
79
|
+
apiFindings,
|
|
80
|
+
};
|
|
81
|
+
|
|
82
|
+
await prisma.attackSurfaceAnalysis.create({
|
|
83
|
+
data: {
|
|
84
|
+
projectId,
|
|
85
|
+
summary: JSON.parse(JSON.stringify(result.summary)),
|
|
86
|
+
endpoints: JSON.parse(JSON.stringify(entryPoints)),
|
|
87
|
+
attackPaths: JSON.parse(JSON.stringify(attackPaths)),
|
|
88
|
+
apiFindings: JSON.parse(JSON.stringify(apiFindings)),
|
|
89
|
+
},
|
|
90
|
+
});
|
|
91
|
+
|
|
92
|
+
return result;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
private async scanHTTPEndpoints(_projectPath: string): Promise<EntryPoint[]> {
|
|
96
|
+
// In production, would use AST parsing to find routes
|
|
97
|
+
return [];
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
private async analyzeEndpoints(
|
|
101
|
+
entryPoints: EntryPoint[],
|
|
102
|
+
): Promise<APISecurityFinding[]> {
|
|
103
|
+
const findings: APISecurityFinding[] = [];
|
|
104
|
+
|
|
105
|
+
for (const ep of entryPoints) {
|
|
106
|
+
if (!ep.authentication) {
|
|
107
|
+
findings.push({
|
|
108
|
+
category: "Broken Authentication",
|
|
109
|
+
severity: "high",
|
|
110
|
+
endpoint: ep.path,
|
|
111
|
+
description: "No authentication detected",
|
|
112
|
+
recommendation: "Add authentication middleware",
|
|
113
|
+
});
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
if (!ep.rateLimit) {
|
|
117
|
+
findings.push({
|
|
118
|
+
category: "Unrestricted Resource Consumption",
|
|
119
|
+
severity: "medium",
|
|
120
|
+
endpoint: ep.path,
|
|
121
|
+
description: "No rate limiting detected",
|
|
122
|
+
recommendation: "Add rate limiting middleware",
|
|
123
|
+
});
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
return findings;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
private async buildAttackPaths(
|
|
131
|
+
_entryPoints: EntryPoint[],
|
|
132
|
+
_findings: APISecurityFinding[],
|
|
133
|
+
): Promise<AttackPath[]> {
|
|
134
|
+
return [];
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
async generateVisualization(
|
|
138
|
+
analysis: AttackSurfaceAnalysisResult,
|
|
139
|
+
): Promise<string> {
|
|
140
|
+
let mermaid = "graph TD\n";
|
|
141
|
+
mermaid += " Start[External User]\n";
|
|
142
|
+
|
|
143
|
+
for (const ep of analysis.entryPoints) {
|
|
144
|
+
const epId = ep.path.replace(/[^a-zA-Z0-9]/g, "_");
|
|
145
|
+
mermaid += ` Start --> ${epId}[${ep.method} ${ep.path}]\n`;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
return mermaid;
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
export const attackSurfaceAnalyzer = new AttackSurfaceAnalyzer();
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Guardrail Security Package
|
|
3
|
+
*
|
|
4
|
+
* Comprehensive security layer including:
|
|
5
|
+
* - Secrets & Credential Guardian
|
|
6
|
+
* - Supply Chain Attack Detection
|
|
7
|
+
* - License Compliance Engine
|
|
8
|
+
* - Attack Surface Analyzer
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
export * from './secrets';
|
|
12
|
+
export * from './supply-chain';
|
|
13
|
+
export * from './license';
|
|
14
|
+
export * from './attack-surface';
|
|
15
|
+
export {
|
|
16
|
+
SBOMGenerator,
|
|
17
|
+
sbomGenerator,
|
|
18
|
+
type SBOMFormat,
|
|
19
|
+
type SBOMGeneratorOptions,
|
|
20
|
+
type SBOMDependency,
|
|
21
|
+
} from './sbom';
|