@mainahq/core 0.2.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +4 -1
- package/src/ai/__tests__/delegation.test.ts +105 -0
- package/src/ai/delegation.ts +111 -0
- package/src/ai/try-generate.ts +17 -0
- package/src/cloud/__tests__/auth.test.ts +164 -0
- package/src/cloud/__tests__/client.test.ts +253 -0
- package/src/cloud/auth.ts +232 -0
- package/src/cloud/client.ts +190 -0
- package/src/cloud/types.ts +106 -0
- package/src/context/relevance.ts +5 -0
- package/src/context/retrieval.ts +3 -0
- package/src/context/semantic.ts +3 -0
- package/src/feedback/__tests__/trace-analysis.test.ts +98 -0
- package/src/feedback/trace-analysis.ts +153 -0
- package/src/index.ts +55 -0
- package/src/init/__tests__/init.test.ts +51 -0
- package/src/init/index.ts +43 -0
- package/src/language/__tests__/detect.test.ts +61 -1
- package/src/language/__tests__/profile.test.ts +68 -1
- package/src/language/detect.ts +33 -3
- package/src/language/profile.ts +67 -2
- package/src/ticket/index.ts +5 -0
- package/src/verify/__tests__/consistency.test.ts +98 -0
- package/src/verify/__tests__/lighthouse.test.ts +215 -0
- package/src/verify/__tests__/linters/checkstyle.test.ts +23 -0
- package/src/verify/__tests__/linters/dotnet-format.test.ts +18 -0
- package/src/verify/__tests__/pipeline.test.ts +21 -2
- package/src/verify/__tests__/typecheck.test.ts +160 -0
- package/src/verify/__tests__/zap.test.ts +188 -0
- package/src/verify/consistency.ts +199 -0
- package/src/verify/detect.ts +13 -1
- package/src/verify/lighthouse.ts +173 -0
- package/src/verify/linters/checkstyle.ts +41 -0
- package/src/verify/linters/dotnet-format.ts +37 -0
- package/src/verify/pipeline.ts +20 -2
- package/src/verify/syntax-guard.ts +8 -0
- package/src/verify/typecheck.ts +178 -0
- package/src/verify/zap.ts +189 -0
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
import { describe, expect, it } from "bun:test";
|
|
2
|
+
import { parseZapJson, runZap } from "../zap";
|
|
3
|
+
|
|
4
|
+
// ─── parseZapJson ─────────────────────────────────────────────────────────
|
|
5
|
+
|
|
6
|
+
describe("parseZapJson", () => {
|
|
7
|
+
it("should return empty array for empty alerts", () => {
|
|
8
|
+
const json = JSON.stringify({ site: [{ alerts: [] }] });
|
|
9
|
+
const findings = parseZapJson(json);
|
|
10
|
+
expect(findings).toEqual([]);
|
|
11
|
+
});
|
|
12
|
+
|
|
13
|
+
it("should parse a single alert from ZAP JSON output", () => {
|
|
14
|
+
const json = JSON.stringify({
|
|
15
|
+
site: [
|
|
16
|
+
{
|
|
17
|
+
alerts: [
|
|
18
|
+
{
|
|
19
|
+
pluginid: "10021",
|
|
20
|
+
alert: "X-Content-Type-Options Header Missing",
|
|
21
|
+
riskdesc: "Low (Medium)",
|
|
22
|
+
desc: "The Anti-MIME-Sniffing header is not set.",
|
|
23
|
+
instances: [
|
|
24
|
+
{
|
|
25
|
+
uri: "https://example.com/api/health",
|
|
26
|
+
method: "GET",
|
|
27
|
+
},
|
|
28
|
+
],
|
|
29
|
+
},
|
|
30
|
+
],
|
|
31
|
+
},
|
|
32
|
+
],
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
const findings = parseZapJson(json);
|
|
36
|
+
expect(findings.length).toBe(1);
|
|
37
|
+
expect(findings[0]?.tool).toBe("zap");
|
|
38
|
+
expect(findings[0]?.file).toBe("https://example.com/api/health");
|
|
39
|
+
expect(findings[0]?.line).toBe(0);
|
|
40
|
+
expect(findings[0]?.message).toContain(
|
|
41
|
+
"X-Content-Type-Options Header Missing",
|
|
42
|
+
);
|
|
43
|
+
expect(findings[0]?.severity).toBe("info");
|
|
44
|
+
expect(findings[0]?.ruleId).toBe("10021");
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
it("should map ZAP risk levels to severity correctly", () => {
|
|
48
|
+
const makeZap = (riskdesc: string) =>
|
|
49
|
+
JSON.stringify({
|
|
50
|
+
site: [
|
|
51
|
+
{
|
|
52
|
+
alerts: [
|
|
53
|
+
{
|
|
54
|
+
pluginid: "10001",
|
|
55
|
+
alert: "Test Alert",
|
|
56
|
+
riskdesc,
|
|
57
|
+
desc: "Test description",
|
|
58
|
+
instances: [{ uri: "https://example.com", method: "GET" }],
|
|
59
|
+
},
|
|
60
|
+
],
|
|
61
|
+
},
|
|
62
|
+
],
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
expect(parseZapJson(makeZap("High (Medium)"))[0]?.severity).toBe("error");
|
|
66
|
+
expect(parseZapJson(makeZap("Medium (Low)"))[0]?.severity).toBe("warning");
|
|
67
|
+
expect(parseZapJson(makeZap("Low (Medium)"))[0]?.severity).toBe("info");
|
|
68
|
+
expect(parseZapJson(makeZap("Informational (Low)"))[0]?.severity).toBe(
|
|
69
|
+
"info",
|
|
70
|
+
);
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
it("should handle multiple alerts with multiple instances", () => {
|
|
74
|
+
const json = JSON.stringify({
|
|
75
|
+
site: [
|
|
76
|
+
{
|
|
77
|
+
alerts: [
|
|
78
|
+
{
|
|
79
|
+
pluginid: "10021",
|
|
80
|
+
alert: "Alert A",
|
|
81
|
+
riskdesc: "High (Medium)",
|
|
82
|
+
desc: "Description A",
|
|
83
|
+
instances: [
|
|
84
|
+
{ uri: "https://example.com/a", method: "GET" },
|
|
85
|
+
{ uri: "https://example.com/b", method: "POST" },
|
|
86
|
+
],
|
|
87
|
+
},
|
|
88
|
+
{
|
|
89
|
+
pluginid: "10022",
|
|
90
|
+
alert: "Alert B",
|
|
91
|
+
riskdesc: "Low (Low)",
|
|
92
|
+
desc: "Description B",
|
|
93
|
+
instances: [{ uri: "https://example.com/c", method: "GET" }],
|
|
94
|
+
},
|
|
95
|
+
],
|
|
96
|
+
},
|
|
97
|
+
],
|
|
98
|
+
});
|
|
99
|
+
|
|
100
|
+
const findings = parseZapJson(json);
|
|
101
|
+
expect(findings.length).toBe(3);
|
|
102
|
+
expect(findings[0]?.file).toBe("https://example.com/a");
|
|
103
|
+
expect(findings[1]?.file).toBe("https://example.com/b");
|
|
104
|
+
expect(findings[2]?.file).toBe("https://example.com/c");
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
it("should handle alerts with no instances", () => {
|
|
108
|
+
const json = JSON.stringify({
|
|
109
|
+
site: [
|
|
110
|
+
{
|
|
111
|
+
alerts: [
|
|
112
|
+
{
|
|
113
|
+
pluginid: "10021",
|
|
114
|
+
alert: "No Instances Alert",
|
|
115
|
+
riskdesc: "Medium (Medium)",
|
|
116
|
+
desc: "No instances",
|
|
117
|
+
instances: [],
|
|
118
|
+
},
|
|
119
|
+
],
|
|
120
|
+
},
|
|
121
|
+
],
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
const findings = parseZapJson(json);
|
|
125
|
+
expect(findings.length).toBe(1);
|
|
126
|
+
expect(findings[0]?.file).toBe("");
|
|
127
|
+
expect(findings[0]?.message).toContain("No Instances Alert");
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
it("should return empty array for invalid JSON", () => {
|
|
131
|
+
const findings = parseZapJson("not valid json {{{");
|
|
132
|
+
expect(findings).toEqual([]);
|
|
133
|
+
});
|
|
134
|
+
|
|
135
|
+
it("should return empty array for malformed structure", () => {
|
|
136
|
+
const findings = parseZapJson(JSON.stringify({ unexpected: true }));
|
|
137
|
+
expect(findings).toEqual([]);
|
|
138
|
+
});
|
|
139
|
+
|
|
140
|
+
it("should handle missing site array gracefully", () => {
|
|
141
|
+
const findings = parseZapJson(JSON.stringify({ site: "not-an-array" }));
|
|
142
|
+
expect(findings).toEqual([]);
|
|
143
|
+
});
|
|
144
|
+
|
|
145
|
+
it("should handle site entries without alerts", () => {
|
|
146
|
+
const json = JSON.stringify({
|
|
147
|
+
site: [{ name: "example.com" }],
|
|
148
|
+
});
|
|
149
|
+
const findings = parseZapJson(json);
|
|
150
|
+
expect(findings).toEqual([]);
|
|
151
|
+
});
|
|
152
|
+
});
|
|
153
|
+
|
|
154
|
+
// ─── runZap ───────────────────────────────────────────────────────────────
|
|
155
|
+
|
|
156
|
+
describe("runZap", () => {
|
|
157
|
+
it("should skip when docker is not available", async () => {
|
|
158
|
+
const result = await runZap({
|
|
159
|
+
targetUrl: "https://example.com",
|
|
160
|
+
cwd: "/tmp",
|
|
161
|
+
available: false,
|
|
162
|
+
});
|
|
163
|
+
expect(result.findings).toEqual([]);
|
|
164
|
+
expect(result.skipped).toBe(true);
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
it("should skip when no targetUrl is provided", async () => {
|
|
168
|
+
const result = await runZap({
|
|
169
|
+
targetUrl: "",
|
|
170
|
+
cwd: "/tmp",
|
|
171
|
+
available: true,
|
|
172
|
+
});
|
|
173
|
+
expect(result.findings).toEqual([]);
|
|
174
|
+
expect(result.skipped).toBe(true);
|
|
175
|
+
});
|
|
176
|
+
|
|
177
|
+
it("should return correct result shape", async () => {
|
|
178
|
+
const result = await runZap({
|
|
179
|
+
targetUrl: "https://example.com",
|
|
180
|
+
cwd: "/tmp",
|
|
181
|
+
available: false,
|
|
182
|
+
});
|
|
183
|
+
expect(result).toHaveProperty("findings");
|
|
184
|
+
expect(result).toHaveProperty("skipped");
|
|
185
|
+
expect(Array.isArray(result.findings)).toBe(true);
|
|
186
|
+
expect(typeof result.skipped).toBe("boolean");
|
|
187
|
+
});
|
|
188
|
+
});
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Cross-function Consistency Check — deterministic AST-based analysis.
|
|
3
|
+
*
|
|
4
|
+
* Catches the class of bug that lost Maina 2 points in the Tier 3 benchmark:
|
|
5
|
+
* functions that call a validator on one code path but skip it on another.
|
|
6
|
+
*
|
|
7
|
+
* Two modes:
|
|
8
|
+
* 1. Spec-based: reads spec.md / constitution.md for stated constraints,
|
|
9
|
+
* builds a rule set, checks compliance
|
|
10
|
+
* 2. Heuristic: if no spec exists, looks for inconsistent validator usage
|
|
11
|
+
* patterns across related functions
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import { existsSync, readFileSync } from "node:fs";
|
|
15
|
+
import { join } from "node:path";
|
|
16
|
+
|
|
17
|
+
import type { Finding } from "./diff-filter";
|
|
18
|
+
|
|
19
|
+
// ─── Types ────────────────────────────────────────────────────────────────
|
|
20
|
+
|
|
21
|
+
export interface ConsistencyRule {
|
|
22
|
+
pattern: string;
|
|
23
|
+
source: "spec" | "heuristic";
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export interface ConsistencyResult {
|
|
27
|
+
findings: Finding[];
|
|
28
|
+
rulesChecked: number;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// ─── Rule Extraction ─────────────────────────────────────────────────────
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Extract consistency rules from spec/constitution content.
|
|
35
|
+
* Looks for patterns like "use X when Y", "always call X", "validate with X".
|
|
36
|
+
*/
|
|
37
|
+
function extractRulesFromSpec(content: string): ConsistencyRule[] {
|
|
38
|
+
const rules: ConsistencyRule[] = [];
|
|
39
|
+
const patterns = [
|
|
40
|
+
/always (?:use|call|check|validate with) (\w+)/gi,
|
|
41
|
+
/must (?:use|call|check) (\w+)/gi,
|
|
42
|
+
/validate.*(?:with|using) (\w+)/gi,
|
|
43
|
+
];
|
|
44
|
+
|
|
45
|
+
for (const pattern of patterns) {
|
|
46
|
+
for (const match of content.matchAll(pattern)) {
|
|
47
|
+
rules.push({
|
|
48
|
+
pattern: match[1] ?? "",
|
|
49
|
+
source: "spec",
|
|
50
|
+
});
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
return rules;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Load spec/constitution content from the maina directory.
|
|
59
|
+
*/
|
|
60
|
+
function loadSpecContent(mainaDir: string): string {
|
|
61
|
+
const paths = [join(mainaDir, "constitution.md"), join(mainaDir, "spec.md")];
|
|
62
|
+
|
|
63
|
+
const parts: string[] = [];
|
|
64
|
+
for (const p of paths) {
|
|
65
|
+
if (existsSync(p)) {
|
|
66
|
+
parts.push(readFileSync(p, "utf-8"));
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
return parts.join("\n");
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// ─── Heuristic Analysis ──────────────────────────────────────────────────
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Find functions that call validators inconsistently.
|
|
77
|
+
* If functionA calls isValid(x) and functionB doesn't but both take similar
|
|
78
|
+
* params, that's a potential inconsistency.
|
|
79
|
+
*/
|
|
80
|
+
function findHeuristicIssues(source: string, file: string): Finding[] {
|
|
81
|
+
const findings: Finding[] = [];
|
|
82
|
+
|
|
83
|
+
// Extract function calls per function body
|
|
84
|
+
const functionPattern =
|
|
85
|
+
/function\s+(\w+)\s*\([^)]*\)\s*\{([^}]*(?:\{[^}]*\}[^}]*)*)\}/g;
|
|
86
|
+
const functions: Array<{ name: string; body: string; line: number }> = [];
|
|
87
|
+
|
|
88
|
+
for (const match of source.matchAll(functionPattern)) {
|
|
89
|
+
const lineNumber = source.substring(0, match.index ?? 0).split("\n").length;
|
|
90
|
+
functions.push({
|
|
91
|
+
name: match[1] ?? "",
|
|
92
|
+
body: match[2] ?? "",
|
|
93
|
+
line: lineNumber,
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// Find validator-like calls (isX, validateX, checkX)
|
|
98
|
+
const validatorPattern = /\b(is[A-Z]\w+|validate\w+|check\w+)\s*\(/g;
|
|
99
|
+
const validatorsByFunction = new Map<string, Set<string>>();
|
|
100
|
+
|
|
101
|
+
for (const fn of functions) {
|
|
102
|
+
const validators = new Set<string>();
|
|
103
|
+
for (const validatorMatch of fn.body.matchAll(validatorPattern)) {
|
|
104
|
+
validators.add(validatorMatch[1] ?? "");
|
|
105
|
+
}
|
|
106
|
+
validatorsByFunction.set(fn.name, validators);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Compare: if most functions use a validator but one doesn't, flag it
|
|
110
|
+
const allValidators = new Set<string>();
|
|
111
|
+
for (const validators of validatorsByFunction.values()) {
|
|
112
|
+
for (const v of validators) allValidators.add(v);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
if (functions.length >= 2) {
|
|
116
|
+
for (const validator of allValidators) {
|
|
117
|
+
const usersCount = Array.from(validatorsByFunction.values()).filter((v) =>
|
|
118
|
+
v.has(validator),
|
|
119
|
+
).length;
|
|
120
|
+
|
|
121
|
+
// If majority uses it but some don't, flag the ones that don't
|
|
122
|
+
if (usersCount > 0 && usersCount < functions.length) {
|
|
123
|
+
for (const fn of functions) {
|
|
124
|
+
const fnValidators = validatorsByFunction.get(fn.name);
|
|
125
|
+
if (fnValidators && !fnValidators.has(validator)) {
|
|
126
|
+
findings.push({
|
|
127
|
+
tool: "consistency",
|
|
128
|
+
file,
|
|
129
|
+
line: fn.line,
|
|
130
|
+
message: `Function '${fn.name}' does not call '${validator}' — other functions in this file do. Possible inconsistency.`,
|
|
131
|
+
severity: "warning",
|
|
132
|
+
ruleId: `consistency/${validator}`,
|
|
133
|
+
});
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
return findings;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// ─── Main ────────────────────────────────────────────────────────────────
|
|
144
|
+
|
|
145
|
+
export async function checkConsistency(
|
|
146
|
+
files: string[],
|
|
147
|
+
cwd: string,
|
|
148
|
+
mainaDir: string,
|
|
149
|
+
): Promise<ConsistencyResult> {
|
|
150
|
+
const specContent = existsSync(mainaDir) ? loadSpecContent(mainaDir) : "";
|
|
151
|
+
const specRules = extractRulesFromSpec(specContent);
|
|
152
|
+
const allFindings: Finding[] = [];
|
|
153
|
+
|
|
154
|
+
for (const file of files) {
|
|
155
|
+
const filePath = join(cwd, file);
|
|
156
|
+
if (!existsSync(filePath)) continue;
|
|
157
|
+
|
|
158
|
+
const source = readFileSync(filePath, "utf-8");
|
|
159
|
+
|
|
160
|
+
// Check spec-based rules
|
|
161
|
+
for (const rule of specRules) {
|
|
162
|
+
const callPattern = new RegExp(`\\b${rule.pattern}\\s*\\(`, "g");
|
|
163
|
+
const fnPattern = /function\s+(\w+)/g;
|
|
164
|
+
|
|
165
|
+
// Find functions that should use this pattern but don't
|
|
166
|
+
for (const fnMatch of source.matchAll(fnPattern)) {
|
|
167
|
+
const fnStart = fnMatch.index ?? 0;
|
|
168
|
+
const fnEnd = source.indexOf("}", fnStart + 1);
|
|
169
|
+
if (fnEnd === -1) continue;
|
|
170
|
+
|
|
171
|
+
const fnBody = source.substring(fnStart, fnEnd);
|
|
172
|
+
if (!callPattern.test(fnBody)) {
|
|
173
|
+
const relatedTerms = rule.pattern
|
|
174
|
+
.toLowerCase()
|
|
175
|
+
.replace(/^is|^validate|^check/, "");
|
|
176
|
+
if (fnBody.toLowerCase().includes(relatedTerms)) {
|
|
177
|
+
const line = source.substring(0, fnStart).split("\n").length;
|
|
178
|
+
allFindings.push({
|
|
179
|
+
tool: "consistency",
|
|
180
|
+
file,
|
|
181
|
+
line,
|
|
182
|
+
message: `Spec requires '${rule.pattern}' — function '${fnMatch[1]}' may need it.`,
|
|
183
|
+
severity: "warning",
|
|
184
|
+
ruleId: `consistency/spec-${rule.pattern}`,
|
|
185
|
+
});
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// Heuristic checks (always run)
|
|
192
|
+
allFindings.push(...findHeuristicIssues(source, file));
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
return {
|
|
196
|
+
findings: allFindings,
|
|
197
|
+
rulesChecked: specRules.length + 1, // +1 for heuristic check
|
|
198
|
+
};
|
|
199
|
+
}
|
package/src/verify/detect.ts
CHANGED
|
@@ -22,7 +22,13 @@ export type ToolName =
|
|
|
22
22
|
| "golangci-lint"
|
|
23
23
|
| "cargo-clippy"
|
|
24
24
|
| "cargo-audit"
|
|
25
|
-
| "playwright"
|
|
25
|
+
| "playwright"
|
|
26
|
+
| "dotnet-format"
|
|
27
|
+
| "checkstyle"
|
|
28
|
+
| "spotbugs"
|
|
29
|
+
| "pmd"
|
|
30
|
+
| "zap"
|
|
31
|
+
| "lighthouse";
|
|
26
32
|
|
|
27
33
|
export interface DetectedTool {
|
|
28
34
|
name: string;
|
|
@@ -47,6 +53,12 @@ export const TOOL_REGISTRY: Record<
|
|
|
47
53
|
"cargo-clippy": { command: "cargo", versionFlag: "clippy --version" },
|
|
48
54
|
"cargo-audit": { command: "cargo-audit", versionFlag: "--version" },
|
|
49
55
|
playwright: { command: "npx", versionFlag: "playwright --version" },
|
|
56
|
+
"dotnet-format": { command: "dotnet", versionFlag: "format --version" },
|
|
57
|
+
checkstyle: { command: "checkstyle", versionFlag: "--version" },
|
|
58
|
+
spotbugs: { command: "spotbugs", versionFlag: "-version" },
|
|
59
|
+
pmd: { command: "pmd", versionFlag: "--version" },
|
|
60
|
+
zap: { command: "docker", versionFlag: "--version" },
|
|
61
|
+
lighthouse: { command: "lighthouse", versionFlag: "--version" },
|
|
50
62
|
};
|
|
51
63
|
|
|
52
64
|
/**
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Lighthouse Integration for the Verify Engine.
|
|
3
|
+
*
|
|
4
|
+
* Runs Google Lighthouse against a URL and checks category scores
|
|
5
|
+
* against configurable thresholds.
|
|
6
|
+
* Generates findings when scores fall below thresholds.
|
|
7
|
+
* Gracefully skips if lighthouse is not installed.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import { isToolAvailable } from "./detect";
|
|
11
|
+
import type { Finding } from "./diff-filter";
|
|
12
|
+
|
|
13
|
+
// ─── Types ────────────────────────────────────────────────────────────────
|
|
14
|
+
|
|
15
|
+
export interface LighthouseOptions {
|
|
16
|
+
url: string;
|
|
17
|
+
cwd: string;
|
|
18
|
+
/** Pre-resolved availability — skips redundant detection if provided. */
|
|
19
|
+
available?: boolean;
|
|
20
|
+
/** Score thresholds (0-100). Categories below threshold generate findings. */
|
|
21
|
+
thresholds?: Record<string, number>;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export interface LighthouseResult {
|
|
25
|
+
findings: Finding[];
|
|
26
|
+
skipped: boolean;
|
|
27
|
+
scores: Record<string, number>;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/** Default thresholds — 90 for the three core categories. */
|
|
31
|
+
const DEFAULT_THRESHOLDS: Record<string, number> = {
|
|
32
|
+
performance: 90,
|
|
33
|
+
accessibility: 90,
|
|
34
|
+
seo: 90,
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
// ─── JSON Parsing ─────────────────────────────────────────────────────────
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Determine severity based on how far below the threshold a score is.
|
|
41
|
+
* Below 50: error. Below threshold: warning.
|
|
42
|
+
*/
|
|
43
|
+
function scoreSeverity(
|
|
44
|
+
score: number,
|
|
45
|
+
_threshold: number,
|
|
46
|
+
): "error" | "warning" | "info" {
|
|
47
|
+
if (score < 50) {
|
|
48
|
+
return "error";
|
|
49
|
+
}
|
|
50
|
+
return "warning";
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Parse Lighthouse JSON output into findings and scores.
|
|
55
|
+
*
|
|
56
|
+
* Lighthouse JSON has this structure:
|
|
57
|
+
* ```json
|
|
58
|
+
* {
|
|
59
|
+
* "requestedUrl": "https://example.com",
|
|
60
|
+
* "categories": {
|
|
61
|
+
* "performance": { "score": 0.95 },
|
|
62
|
+
* "accessibility": { "score": 0.88 },
|
|
63
|
+
* "seo": { "score": 0.92 },
|
|
64
|
+
* "best-practices": { "score": 0.85 }
|
|
65
|
+
* }
|
|
66
|
+
* }
|
|
67
|
+
* ```
|
|
68
|
+
*
|
|
69
|
+
* Scores are 0-1 floats. We multiply by 100 for human-readable percentages.
|
|
70
|
+
* Findings are generated for categories whose scores fall below the thresholds.
|
|
71
|
+
*
|
|
72
|
+
* Handles malformed JSON and unexpected structures gracefully.
|
|
73
|
+
*/
|
|
74
|
+
export function parseLighthouseJson(
|
|
75
|
+
json: string,
|
|
76
|
+
thresholds?: Record<string, number>,
|
|
77
|
+
): Omit<LighthouseResult, "skipped"> {
|
|
78
|
+
let parsed: Record<string, unknown>;
|
|
79
|
+
try {
|
|
80
|
+
parsed = JSON.parse(json) as Record<string, unknown>;
|
|
81
|
+
} catch {
|
|
82
|
+
return { findings: [], scores: {} };
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
const categories = parsed.categories;
|
|
86
|
+
if (typeof categories !== "object" || categories === null) {
|
|
87
|
+
return { findings: [], scores: {} };
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
const requestedUrl = (parsed.requestedUrl as string) ?? "";
|
|
91
|
+
const activeThresholds = thresholds ?? DEFAULT_THRESHOLDS;
|
|
92
|
+
const scores: Record<string, number> = {};
|
|
93
|
+
const findings: Finding[] = [];
|
|
94
|
+
|
|
95
|
+
const cats = categories as Record<string, unknown>;
|
|
96
|
+
|
|
97
|
+
for (const [categoryName, categoryData] of Object.entries(cats)) {
|
|
98
|
+
const cat = categoryData as Record<string, unknown> | null;
|
|
99
|
+
if (!cat || typeof cat.score !== "number") {
|
|
100
|
+
continue;
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
const rawScore = cat.score as number;
|
|
104
|
+
const score = Math.round(rawScore * 100);
|
|
105
|
+
scores[categoryName] = score;
|
|
106
|
+
|
|
107
|
+
const threshold = activeThresholds[categoryName];
|
|
108
|
+
if (threshold !== undefined && score < threshold) {
|
|
109
|
+
findings.push({
|
|
110
|
+
tool: "lighthouse",
|
|
111
|
+
file: requestedUrl,
|
|
112
|
+
line: 0,
|
|
113
|
+
message: `Lighthouse ${categoryName} score ${score} is below threshold ${threshold}`,
|
|
114
|
+
severity: scoreSeverity(score, threshold),
|
|
115
|
+
ruleId: `lighthouse/${categoryName}`,
|
|
116
|
+
});
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
return { findings, scores };
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// ─── Runner ───────────────────────────────────────────────────────────────
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Run Lighthouse and return parsed findings with scores.
|
|
127
|
+
*
|
|
128
|
+
* If lighthouse is not installed or no URL is provided,
|
|
129
|
+
* returns `{ findings: [], skipped: true, scores: {} }`.
|
|
130
|
+
* If lighthouse fails, returns `{ findings: [], skipped: false, scores: {} }`.
|
|
131
|
+
*/
|
|
132
|
+
export async function runLighthouse(
|
|
133
|
+
options: LighthouseOptions,
|
|
134
|
+
): Promise<LighthouseResult> {
|
|
135
|
+
if (!options.url) {
|
|
136
|
+
return { findings: [], skipped: true, scores: {} };
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
const toolAvailable =
|
|
140
|
+
options.available ?? (await isToolAvailable("lighthouse"));
|
|
141
|
+
if (!toolAvailable) {
|
|
142
|
+
return { findings: [], skipped: true, scores: {} };
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
const cwd = options.cwd;
|
|
146
|
+
|
|
147
|
+
const args = [
|
|
148
|
+
"lighthouse",
|
|
149
|
+
options.url,
|
|
150
|
+
"--output=json",
|
|
151
|
+
'--chrome-flags="--headless --no-sandbox"',
|
|
152
|
+
];
|
|
153
|
+
|
|
154
|
+
try {
|
|
155
|
+
const proc = Bun.spawn(args, {
|
|
156
|
+
cwd,
|
|
157
|
+
stdout: "pipe",
|
|
158
|
+
stderr: "pipe",
|
|
159
|
+
});
|
|
160
|
+
|
|
161
|
+
const stdout = await new Response(proc.stdout).text();
|
|
162
|
+
await new Response(proc.stderr).text();
|
|
163
|
+
await proc.exited;
|
|
164
|
+
|
|
165
|
+
const { findings, scores } = parseLighthouseJson(
|
|
166
|
+
stdout,
|
|
167
|
+
options.thresholds,
|
|
168
|
+
);
|
|
169
|
+
return { findings, skipped: false, scores };
|
|
170
|
+
} catch {
|
|
171
|
+
return { findings: [], skipped: false, scores: {} };
|
|
172
|
+
}
|
|
173
|
+
}
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Checkstyle output parser for Java linting.
|
|
3
|
+
* Parses Checkstyle XML output.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import type { SyntaxDiagnostic } from "../syntax-guard";
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Parse Checkstyle XML output into SyntaxDiagnostic[].
|
|
10
|
+
*/
|
|
11
|
+
export function parseCheckstyleOutput(xml: string): SyntaxDiagnostic[] {
|
|
12
|
+
const diagnostics: SyntaxDiagnostic[] = [];
|
|
13
|
+
|
|
14
|
+
// Simple XML parsing — extract <error> elements
|
|
15
|
+
// Format: <file name="path"><error line="10" column="5" severity="error" message="desc" source="rule"/></file>
|
|
16
|
+
const filePattern = /<file\s+name="([^"]+)">([\s\S]*?)<\/file>/g;
|
|
17
|
+
const errorPattern =
|
|
18
|
+
/<error\s+line="(\d+)"\s+(?:column="(\d+)"\s+)?severity="(\w+)"\s+message="([^"]+)"/g;
|
|
19
|
+
|
|
20
|
+
for (const fileMatch of xml.matchAll(filePattern)) {
|
|
21
|
+
const filePath = fileMatch[1] ?? "";
|
|
22
|
+
const fileContent = fileMatch[2] ?? "";
|
|
23
|
+
|
|
24
|
+
for (const errorMatch of fileContent.matchAll(errorPattern)) {
|
|
25
|
+
const line = Number.parseInt(errorMatch[1] ?? "0", 10);
|
|
26
|
+
const column = Number.parseInt(errorMatch[2] ?? "0", 10);
|
|
27
|
+
const severity = errorMatch[3] ?? "warning";
|
|
28
|
+
const message = errorMatch[4] ?? "";
|
|
29
|
+
|
|
30
|
+
diagnostics.push({
|
|
31
|
+
file: filePath,
|
|
32
|
+
line,
|
|
33
|
+
column,
|
|
34
|
+
message,
|
|
35
|
+
severity: severity === "error" ? "error" : "warning",
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
return diagnostics;
|
|
41
|
+
}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* dotnet format output parser for C# linting.
|
|
3
|
+
* Parses `dotnet format --verify-no-changes` output.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import type { SyntaxDiagnostic } from "../syntax-guard";
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Parse dotnet format text output into SyntaxDiagnostic[].
|
|
10
|
+
* dotnet format outputs: "path/file.cs(line,col): severity CODE: message"
|
|
11
|
+
*/
|
|
12
|
+
export function parseDotnetFormatOutput(output: string): SyntaxDiagnostic[] {
|
|
13
|
+
const diagnostics: SyntaxDiagnostic[] = [];
|
|
14
|
+
const lines = output.split("\n");
|
|
15
|
+
|
|
16
|
+
for (const line of lines) {
|
|
17
|
+
if (!line.trim()) continue;
|
|
18
|
+
// Format: file.cs(line,col): warning CS1234: message
|
|
19
|
+
const match = line.match(
|
|
20
|
+
/^(.+?)\((\d+),(\d+)\):\s*(error|warning)\s+(\w+):\s*(.+)$/,
|
|
21
|
+
);
|
|
22
|
+
if (!match) continue;
|
|
23
|
+
|
|
24
|
+
const [, file, lineStr, colStr, severity, _code, message] = match;
|
|
25
|
+
if (!file || !lineStr || !message) continue;
|
|
26
|
+
|
|
27
|
+
diagnostics.push({
|
|
28
|
+
file,
|
|
29
|
+
line: Number.parseInt(lineStr, 10),
|
|
30
|
+
column: Number.parseInt(colStr ?? "0", 10),
|
|
31
|
+
message,
|
|
32
|
+
severity: severity === "error" ? "error" : "warning",
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
return diagnostics;
|
|
37
|
+
}
|