semlint-cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +162 -0
- package/dist/backend.js +205 -0
- package/dist/cli.js +93 -0
- package/dist/config.js +108 -0
- package/dist/diagnostics.js +91 -0
- package/dist/filter.js +144 -0
- package/dist/git.js +85 -0
- package/dist/main.js +186 -0
- package/dist/reporter.js +42 -0
- package/dist/rules.js +88 -0
- package/dist/types.js +2 -0
- package/package.json +51 -0
- package/rules/SEMLINT_NAMING_001.json +12 -0
- package/rules/SEMLINT_PATTERN_002.json +12 -0
- package/rules/SEMLINT_SWE_003.json +12 -0
package/README.md
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
# Semlint CLI MVP
|
|
2
|
+
|
|
3
|
+
Semlint is a deterministic semantic lint CLI that:
|
|
4
|
+
|
|
5
|
+
- reads a git diff,
|
|
6
|
+
- runs enabled semantic rules in parallel,
|
|
7
|
+
- executes an external backend command per rule,
|
|
8
|
+
- prints text or JSON diagnostics,
|
|
9
|
+
- returns CI-friendly exit codes.
|
|
10
|
+
|
|
11
|
+
## Install
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
pnpm install
|
|
15
|
+
pnpm build
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
## Command
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
semlint check
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
If running from source:
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
pnpm check
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
## CLI flags
|
|
31
|
+
|
|
32
|
+
- `--backend <name>`: override backend name
|
|
33
|
+
- `--model <name>`: override model name
|
|
34
|
+
- `--config <path>`: config file path
|
|
35
|
+
- `--format <text|json>`: output format
|
|
36
|
+
- `--base <ref>`: base git ref for explicit ref-to-ref diff
|
|
37
|
+
- `--head <ref>`: head git ref for explicit ref-to-ref diff
|
|
38
|
+
- `--fail-on <error|warn|never>`: failure threshold (default `error`)
|
|
39
|
+
- `--batch`: run all selected rules in one backend call
|
|
40
|
+
- `--debug`: enable debug logs to stderr
|
|
41
|
+
|
|
42
|
+
Default diff behavior (without `--base`/`--head`) uses your local branch state:
|
|
43
|
+
|
|
44
|
+
- tracked changes across commits since merge-base,
|
|
45
|
+
- staged changes,
|
|
46
|
+
- unstaged changes,
|
|
47
|
+
- untracked files.
|
|
48
|
+
|
|
49
|
+
If you pass `--base` or `--head`, Semlint uses explicit `git diff <base> <head>` mode.
|
|
50
|
+
|
|
51
|
+
## Exit codes
|
|
52
|
+
|
|
53
|
+
- `0`: no blocking diagnostics
|
|
54
|
+
- `1`: blocking diagnostics found
|
|
55
|
+
- `2`: backend/runtime failure
|
|
56
|
+
|
|
57
|
+
## Config discovery order
|
|
58
|
+
|
|
59
|
+
1. `--config <path>`
|
|
60
|
+
2. `./semlint.json`
|
|
61
|
+
3. `./.semlint.json`
|
|
62
|
+
4. defaults
|
|
63
|
+
|
|
64
|
+
Unknown fields are ignored.
|
|
65
|
+
|
|
66
|
+
## Minimal config example
|
|
67
|
+
|
|
68
|
+
```json
|
|
69
|
+
{
|
|
70
|
+
"backend": "cursor-cli",
|
|
71
|
+
"model": "auto",
|
|
72
|
+
"budgets": {
|
|
73
|
+
"timeout_ms": 120000
|
|
74
|
+
},
|
|
75
|
+
"output": {
|
|
76
|
+
"format": "text"
|
|
77
|
+
},
|
|
78
|
+
"execution": {
|
|
79
|
+
"batch": false
|
|
80
|
+
},
|
|
81
|
+
"rules": {
|
|
82
|
+
"disable": ["SEMLINT_EXAMPLE_001"],
|
|
83
|
+
"severity_overrides": {
|
|
84
|
+
"SEMLINT_API_001": "error"
|
|
85
|
+
}
|
|
86
|
+
},
|
|
87
|
+
"backends": {
|
|
88
|
+
"cursor-cli": {
|
|
89
|
+
"executable": "agent"
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
## Rule files
|
|
96
|
+
|
|
97
|
+
Rule JSON files are loaded from `rules/`.
|
|
98
|
+
|
|
99
|
+
Required fields:
|
|
100
|
+
|
|
101
|
+
- `id` (string, unique)
|
|
102
|
+
- `title` (string)
|
|
103
|
+
- `severity_default` (`error|warn|info`)
|
|
104
|
+
- `prompt` (non-empty string)
|
|
105
|
+
|
|
106
|
+
Optional fields:
|
|
107
|
+
|
|
108
|
+
- `include_globs`: string[]
|
|
109
|
+
- `exclude_globs`: string[]
|
|
110
|
+
- `diff_regex`: string[]
|
|
111
|
+
|
|
112
|
+
Invalid rules cause runtime failure with exit code `2`.
|
|
113
|
+
|
|
114
|
+
## Backend contract
|
|
115
|
+
|
|
116
|
+
For backend `cursor-cli`, Semlint executes:
|
|
117
|
+
|
|
118
|
+
```bash
|
|
119
|
+
cursor agent "<prompt>" --model <model> --print --output-format text
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
For `cursor-cli`, Semlint always uses `cursor agent` directly.
|
|
123
|
+
Other backend names still resolve executables from config:
|
|
124
|
+
|
|
125
|
+
- `backends.<backend>.executable` if provided
|
|
126
|
+
|
|
127
|
+
Backend stdout must be valid JSON with shape:
|
|
128
|
+
|
|
129
|
+
```json
|
|
130
|
+
{
|
|
131
|
+
"diagnostics": [
|
|
132
|
+
{
|
|
133
|
+
"rule_id": "SEMLINT_API_001",
|
|
134
|
+
"severity": "warn",
|
|
135
|
+
"message": "text",
|
|
136
|
+
"file": "src/file.ts",
|
|
137
|
+
"line": 42
|
|
138
|
+
}
|
|
139
|
+
]
|
|
140
|
+
}
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
If parsing fails, Semlint retries once with appended instruction:
|
|
144
|
+
`Return valid JSON only.`
|
|
145
|
+
|
|
146
|
+
## Batch mode
|
|
147
|
+
|
|
148
|
+
Use batch mode to reduce cost by evaluating all runnable rules in a single backend call:
|
|
149
|
+
|
|
150
|
+
```bash
|
|
151
|
+
semlint check --batch
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
Or configure it in `semlint.json`:
|
|
155
|
+
|
|
156
|
+
```json
|
|
157
|
+
{
|
|
158
|
+
"execution": {
|
|
159
|
+
"batch": true
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
```
|
package/dist/backend.js
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createBackendRunner = createBackendRunner;
|
|
4
|
+
const node_child_process_1 = require("node:child_process");
|
|
5
|
+
const STRICT_JSON_RETRY_INSTRUCTION = [
|
|
6
|
+
"Return valid JSON only.",
|
|
7
|
+
"Do not include markdown fences.",
|
|
8
|
+
"Do not include commentary, headings, or any text before/after JSON.",
|
|
9
|
+
"The first character of your response must be '{' and the last must be '}'.",
|
|
10
|
+
'Output must match: {"diagnostics":[{"rule_id":"<id>","severity":"error|warn|info","message":"<text>","file":"<path>","line":1}]}'
|
|
11
|
+
].join(" ");
|
|
12
|
+
function isEnoentError(error) {
|
|
13
|
+
return (typeof error === "object" &&
|
|
14
|
+
error !== null &&
|
|
15
|
+
"code" in error &&
|
|
16
|
+
error.code === "ENOENT");
|
|
17
|
+
}
|
|
18
|
+
function debugLog(enabled, message) {
|
|
19
|
+
if (enabled) {
|
|
20
|
+
process.stderr.write(`[debug] ${message}\n`);
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
function resolveCommandSpecs(config) {
|
|
24
|
+
if (config.backend === "cursor-cli") {
|
|
25
|
+
// Always use `cursor agent` directly for cursor-cli.
|
|
26
|
+
return [{ executable: "cursor", argsPrefix: ["agent"] }];
|
|
27
|
+
}
|
|
28
|
+
const configuredExecutable = config.backendExecutables[config.backend];
|
|
29
|
+
if (!configuredExecutable) {
|
|
30
|
+
throw new Error(`No executable configured for backend "${config.backend}". Configure it under backends.${config.backend}.executable`);
|
|
31
|
+
}
|
|
32
|
+
return [{ executable: configuredExecutable, argsPrefix: [] }];
|
|
33
|
+
}
|
|
34
|
+
function executeBackendCommand(executable, args, timeoutMs) {
|
|
35
|
+
return new Promise((resolve, reject) => {
|
|
36
|
+
const startedAt = Date.now();
|
|
37
|
+
const child = (0, node_child_process_1.spawn)(executable, args, {
|
|
38
|
+
stdio: ["ignore", "pipe", "pipe"]
|
|
39
|
+
});
|
|
40
|
+
let stdout = "";
|
|
41
|
+
let stderr = "";
|
|
42
|
+
let timedOut = false;
|
|
43
|
+
const timer = setTimeout(() => {
|
|
44
|
+
timedOut = true;
|
|
45
|
+
child.kill("SIGKILL");
|
|
46
|
+
}, timeoutMs);
|
|
47
|
+
child.stdout.on("data", (chunk) => {
|
|
48
|
+
stdout += String(chunk);
|
|
49
|
+
});
|
|
50
|
+
child.stderr.on("data", (chunk) => {
|
|
51
|
+
stderr += String(chunk);
|
|
52
|
+
});
|
|
53
|
+
child.on("error", (error) => {
|
|
54
|
+
clearTimeout(timer);
|
|
55
|
+
reject(error);
|
|
56
|
+
});
|
|
57
|
+
child.on("close", (code) => {
|
|
58
|
+
clearTimeout(timer);
|
|
59
|
+
if (timedOut) {
|
|
60
|
+
reject(new Error(`Backend timed out after ${timeoutMs}ms`));
|
|
61
|
+
return;
|
|
62
|
+
}
|
|
63
|
+
if (code !== 0) {
|
|
64
|
+
reject(new Error(`Backend command failed with code ${code}. stderr: ${stderr.trim() || "(empty)"}`));
|
|
65
|
+
return;
|
|
66
|
+
}
|
|
67
|
+
resolve({ stdout, stderr, elapsedMs: Date.now() - startedAt });
|
|
68
|
+
});
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
function extractFirstJsonObject(raw) {
|
|
72
|
+
let start = -1;
|
|
73
|
+
let depth = 0;
|
|
74
|
+
let inString = false;
|
|
75
|
+
let escaped = false;
|
|
76
|
+
for (let i = 0; i < raw.length; i += 1) {
|
|
77
|
+
const ch = raw[i];
|
|
78
|
+
if (start === -1) {
|
|
79
|
+
if (ch === "{") {
|
|
80
|
+
start = i;
|
|
81
|
+
depth = 1;
|
|
82
|
+
}
|
|
83
|
+
continue;
|
|
84
|
+
}
|
|
85
|
+
if (inString) {
|
|
86
|
+
if (escaped) {
|
|
87
|
+
escaped = false;
|
|
88
|
+
continue;
|
|
89
|
+
}
|
|
90
|
+
if (ch === "\\") {
|
|
91
|
+
escaped = true;
|
|
92
|
+
continue;
|
|
93
|
+
}
|
|
94
|
+
if (ch === "\"") {
|
|
95
|
+
inString = false;
|
|
96
|
+
}
|
|
97
|
+
continue;
|
|
98
|
+
}
|
|
99
|
+
if (ch === "\"") {
|
|
100
|
+
inString = true;
|
|
101
|
+
continue;
|
|
102
|
+
}
|
|
103
|
+
if (ch === "{") {
|
|
104
|
+
depth += 1;
|
|
105
|
+
continue;
|
|
106
|
+
}
|
|
107
|
+
if (ch === "}") {
|
|
108
|
+
depth -= 1;
|
|
109
|
+
if (depth === 0) {
|
|
110
|
+
return raw.slice(start, i + 1);
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
return undefined;
|
|
115
|
+
}
|
|
116
|
+
function parseBackendResult(raw) {
|
|
117
|
+
const candidate = raw.trim();
|
|
118
|
+
let parsed;
|
|
119
|
+
try {
|
|
120
|
+
parsed = JSON.parse(candidate);
|
|
121
|
+
}
|
|
122
|
+
catch {
|
|
123
|
+
const extracted = extractFirstJsonObject(candidate);
|
|
124
|
+
if (!extracted) {
|
|
125
|
+
throw new Error(`Backend output is not valid JSON: ${candidate.slice(0, 200)}`);
|
|
126
|
+
}
|
|
127
|
+
parsed = JSON.parse(extracted);
|
|
128
|
+
}
|
|
129
|
+
if (typeof parsed !== "object" || parsed === null || Array.isArray(parsed)) {
|
|
130
|
+
throw new Error("Backend output JSON root must be an object");
|
|
131
|
+
}
|
|
132
|
+
const diagnostics = parsed.diagnostics;
|
|
133
|
+
if (!Array.isArray(diagnostics)) {
|
|
134
|
+
throw new Error("Backend output must contain diagnostics array");
|
|
135
|
+
}
|
|
136
|
+
return { diagnostics: diagnostics };
|
|
137
|
+
}
|
|
138
|
+
function createBackendRunner(config) {
|
|
139
|
+
const commandSpecs = resolveCommandSpecs(config);
|
|
140
|
+
return {
|
|
141
|
+
async runPrompt(input) {
|
|
142
|
+
let lastError;
|
|
143
|
+
for (const spec of commandSpecs) {
|
|
144
|
+
const commandName = [spec.executable, ...spec.argsPrefix].join(" ");
|
|
145
|
+
const baseArgs = [
|
|
146
|
+
...spec.argsPrefix,
|
|
147
|
+
input.prompt,
|
|
148
|
+
"--model",
|
|
149
|
+
config.model,
|
|
150
|
+
"--print",
|
|
151
|
+
"--mode",
|
|
152
|
+
"ask",
|
|
153
|
+
"--output-format",
|
|
154
|
+
"text"
|
|
155
|
+
];
|
|
156
|
+
try {
|
|
157
|
+
debugLog(config.debug, `${input.label}: backend attempt 1 via "${commandName}" (timeout ${input.timeoutMs}ms)`);
|
|
158
|
+
const first = await executeBackendCommand(spec.executable, baseArgs, input.timeoutMs);
|
|
159
|
+
debugLog(config.debug, `${input.label}: backend attempt 1 completed in ${first.elapsedMs}ms`);
|
|
160
|
+
return parseBackendResult(first.stdout.trim());
|
|
161
|
+
}
|
|
162
|
+
catch (firstError) {
|
|
163
|
+
debugLog(config.debug, `${input.label}: backend attempt 1 failed (${firstError instanceof Error ? firstError.message : String(firstError)})`);
|
|
164
|
+
const retryPrompt = `${input.prompt}\n\n${STRICT_JSON_RETRY_INSTRUCTION}`;
|
|
165
|
+
const retryArgs = [
|
|
166
|
+
...spec.argsPrefix,
|
|
167
|
+
retryPrompt,
|
|
168
|
+
"--model",
|
|
169
|
+
config.model,
|
|
170
|
+
"--print",
|
|
171
|
+
"--mode",
|
|
172
|
+
"ask",
|
|
173
|
+
"--output-format",
|
|
174
|
+
"text"
|
|
175
|
+
];
|
|
176
|
+
try {
|
|
177
|
+
debugLog(config.debug, `${input.label}: backend attempt 2 via "${commandName}" (timeout ${input.timeoutMs}ms)`);
|
|
178
|
+
const second = await executeBackendCommand(spec.executable, retryArgs, input.timeoutMs);
|
|
179
|
+
debugLog(config.debug, `${input.label}: backend attempt 2 completed in ${second.elapsedMs}ms`);
|
|
180
|
+
return parseBackendResult(second.stdout.trim());
|
|
181
|
+
}
|
|
182
|
+
catch (secondError) {
|
|
183
|
+
debugLog(config.debug, `${input.label}: backend attempt 2 failed (${secondError instanceof Error ? secondError.message : String(secondError)})`);
|
|
184
|
+
lastError = secondError;
|
|
185
|
+
if (isEnoentError(secondError)) {
|
|
186
|
+
continue;
|
|
187
|
+
}
|
|
188
|
+
if (isEnoentError(firstError)) {
|
|
189
|
+
continue;
|
|
190
|
+
}
|
|
191
|
+
throw firstError;
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
throw lastError instanceof Error ? lastError : new Error(String(lastError));
|
|
196
|
+
},
|
|
197
|
+
async runRule(input) {
|
|
198
|
+
return this.runPrompt({
|
|
199
|
+
label: `Rule ${input.ruleId}`,
|
|
200
|
+
prompt: input.prompt,
|
|
201
|
+
timeoutMs: input.timeoutMs
|
|
202
|
+
});
|
|
203
|
+
}
|
|
204
|
+
};
|
|
205
|
+
}
|
package/dist/cli.js
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
"use strict";
|
|
3
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
|
+
const main_1 = require("./main");
|
|
5
|
+
const FLAGS_WITH_VALUES = new Set([
|
|
6
|
+
"--backend",
|
|
7
|
+
"--model",
|
|
8
|
+
"--config",
|
|
9
|
+
"--format",
|
|
10
|
+
"--base",
|
|
11
|
+
"--head",
|
|
12
|
+
"--fail-on"
|
|
13
|
+
]);
|
|
14
|
+
function isOutputFormat(value) {
|
|
15
|
+
return value === "text" || value === "json";
|
|
16
|
+
}
|
|
17
|
+
function isFailOn(value) {
|
|
18
|
+
return value === "error" || value === "warn" || value === "never";
|
|
19
|
+
}
|
|
20
|
+
function parseArgs(argv) {
|
|
21
|
+
const [command, ...rest] = argv;
|
|
22
|
+
if (!command || command !== "check") {
|
|
23
|
+
throw new Error("Usage: semlint check [--backend <name>] [--model <name>] [--config <path>] [--format <text|json>] [--base <ref>] [--head <ref>] [--fail-on <error|warn|never>] [--batch] [--debug]");
|
|
24
|
+
}
|
|
25
|
+
const options = {
|
|
26
|
+
command: "check",
|
|
27
|
+
debug: false
|
|
28
|
+
};
|
|
29
|
+
for (let i = 0; i < rest.length; i += 1) {
|
|
30
|
+
const token = rest[i];
|
|
31
|
+
if (token === "--debug") {
|
|
32
|
+
options.debug = true;
|
|
33
|
+
continue;
|
|
34
|
+
}
|
|
35
|
+
if (token === "--batch") {
|
|
36
|
+
options.batch = true;
|
|
37
|
+
continue;
|
|
38
|
+
}
|
|
39
|
+
if (!FLAGS_WITH_VALUES.has(token)) {
|
|
40
|
+
throw new Error(`Unknown flag: ${token}`);
|
|
41
|
+
}
|
|
42
|
+
const value = rest[i + 1];
|
|
43
|
+
if (!value || value.startsWith("--")) {
|
|
44
|
+
throw new Error(`Missing value for flag: ${token}`);
|
|
45
|
+
}
|
|
46
|
+
i += 1;
|
|
47
|
+
switch (token) {
|
|
48
|
+
case "--backend":
|
|
49
|
+
options.backend = value;
|
|
50
|
+
break;
|
|
51
|
+
case "--model":
|
|
52
|
+
options.model = value;
|
|
53
|
+
break;
|
|
54
|
+
case "--config":
|
|
55
|
+
options.configPath = value;
|
|
56
|
+
break;
|
|
57
|
+
case "--format":
|
|
58
|
+
if (!isOutputFormat(value)) {
|
|
59
|
+
throw new Error(`Invalid --format value: ${value}`);
|
|
60
|
+
}
|
|
61
|
+
options.format = value;
|
|
62
|
+
break;
|
|
63
|
+
case "--base":
|
|
64
|
+
options.base = value;
|
|
65
|
+
break;
|
|
66
|
+
case "--head":
|
|
67
|
+
options.head = value;
|
|
68
|
+
break;
|
|
69
|
+
case "--fail-on":
|
|
70
|
+
if (!isFailOn(value)) {
|
|
71
|
+
throw new Error(`Invalid --fail-on value: ${value}`);
|
|
72
|
+
}
|
|
73
|
+
options.failOn = value;
|
|
74
|
+
break;
|
|
75
|
+
default:
|
|
76
|
+
throw new Error(`Unsupported flag: ${token}`);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
return options;
|
|
80
|
+
}
|
|
81
|
+
async function main() {
|
|
82
|
+
try {
|
|
83
|
+
const options = parseArgs(process.argv.slice(2));
|
|
84
|
+
const exitCode = await (0, main_1.runSemlint)(options);
|
|
85
|
+
process.exitCode = exitCode;
|
|
86
|
+
}
|
|
87
|
+
catch (error) {
|
|
88
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
89
|
+
process.stderr.write(`${message}\n`);
|
|
90
|
+
process.exitCode = 2;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
void main();
|
package/dist/config.js
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.resolveConfigPath = resolveConfigPath;
|
|
7
|
+
exports.loadEffectiveConfig = loadEffectiveConfig;
|
|
8
|
+
const node_fs_1 = __importDefault(require("node:fs"));
|
|
9
|
+
const node_path_1 = __importDefault(require("node:path"));
|
|
10
|
+
const VALID_SEVERITIES = new Set(["error", "warn", "info"]);
|
|
11
|
+
const DEFAULTS = {
|
|
12
|
+
backend: "cursor-cli",
|
|
13
|
+
model: "auto",
|
|
14
|
+
timeoutMs: 120000,
|
|
15
|
+
format: "text",
|
|
16
|
+
failOn: "error",
|
|
17
|
+
base: "origin/main",
|
|
18
|
+
head: "HEAD",
|
|
19
|
+
debug: false,
|
|
20
|
+
batchMode: false,
|
|
21
|
+
rulesDisable: [],
|
|
22
|
+
severityOverrides: {},
|
|
23
|
+
backendExecutables: {
|
|
24
|
+
"cursor-cli": "cursor"
|
|
25
|
+
}
|
|
26
|
+
};
|
|
27
|
+
function readJsonIfExists(filePath) {
|
|
28
|
+
if (!node_fs_1.default.existsSync(filePath)) {
|
|
29
|
+
return undefined;
|
|
30
|
+
}
|
|
31
|
+
const raw = node_fs_1.default.readFileSync(filePath, "utf8");
|
|
32
|
+
const parsed = JSON.parse(raw);
|
|
33
|
+
if (typeof parsed !== "object" || parsed === null || Array.isArray(parsed)) {
|
|
34
|
+
throw new Error(`Config at ${filePath} must be a JSON object`);
|
|
35
|
+
}
|
|
36
|
+
return parsed;
|
|
37
|
+
}
|
|
38
|
+
function resolveConfigPath(explicitPath) {
|
|
39
|
+
if (explicitPath) {
|
|
40
|
+
return node_path_1.default.resolve(explicitPath);
|
|
41
|
+
}
|
|
42
|
+
const cwd = process.cwd();
|
|
43
|
+
const primary = node_path_1.default.join(cwd, "semlint.json");
|
|
44
|
+
if (node_fs_1.default.existsSync(primary)) {
|
|
45
|
+
return primary;
|
|
46
|
+
}
|
|
47
|
+
const secondary = node_path_1.default.join(cwd, ".semlint.json");
|
|
48
|
+
if (node_fs_1.default.existsSync(secondary)) {
|
|
49
|
+
return secondary;
|
|
50
|
+
}
|
|
51
|
+
return undefined;
|
|
52
|
+
}
|
|
53
|
+
function sanitizeSeverityOverrides(value) {
|
|
54
|
+
if (!value) {
|
|
55
|
+
return {};
|
|
56
|
+
}
|
|
57
|
+
const out = {};
|
|
58
|
+
for (const [ruleId, severity] of Object.entries(value)) {
|
|
59
|
+
if (typeof severity === "string" && VALID_SEVERITIES.has(severity)) {
|
|
60
|
+
out[ruleId] = severity;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
return out;
|
|
64
|
+
}
|
|
65
|
+
function sanitizeBackendExecutables(value) {
|
|
66
|
+
const out = {
|
|
67
|
+
...DEFAULTS.backendExecutables
|
|
68
|
+
};
|
|
69
|
+
if (!value) {
|
|
70
|
+
return out;
|
|
71
|
+
}
|
|
72
|
+
for (const [name, candidate] of Object.entries(value)) {
|
|
73
|
+
if (typeof candidate === "object" &&
|
|
74
|
+
candidate !== null &&
|
|
75
|
+
"executable" in candidate &&
|
|
76
|
+
typeof candidate.executable === "string" &&
|
|
77
|
+
candidate.executable.trim() !== "") {
|
|
78
|
+
out[name] = candidate.executable.trim();
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
return out;
|
|
82
|
+
}
|
|
83
|
+
function loadEffectiveConfig(options) {
|
|
84
|
+
const configPath = resolveConfigPath(options.configPath);
|
|
85
|
+
const parsed = configPath ? readJsonIfExists(configPath) : undefined;
|
|
86
|
+
const fileConfig = (parsed ?? {});
|
|
87
|
+
return {
|
|
88
|
+
backend: options.backend ?? fileConfig.backend ?? DEFAULTS.backend,
|
|
89
|
+
model: options.model ?? fileConfig.model ?? DEFAULTS.model,
|
|
90
|
+
timeoutMs: typeof fileConfig.budgets?.timeout_ms === "number"
|
|
91
|
+
? fileConfig.budgets.timeout_ms
|
|
92
|
+
: DEFAULTS.timeoutMs,
|
|
93
|
+
format: options.format ?? fileConfig.output?.format ?? DEFAULTS.format,
|
|
94
|
+
failOn: options.failOn ?? DEFAULTS.failOn,
|
|
95
|
+
base: options.base ?? DEFAULTS.base,
|
|
96
|
+
head: options.head ?? DEFAULTS.head,
|
|
97
|
+
debug: options.debug || DEFAULTS.debug,
|
|
98
|
+
batchMode: options.batch ??
|
|
99
|
+
(typeof fileConfig.execution?.batch === "boolean"
|
|
100
|
+
? fileConfig.execution.batch
|
|
101
|
+
: DEFAULTS.batchMode),
|
|
102
|
+
rulesDisable: Array.isArray(fileConfig.rules?.disable)
|
|
103
|
+
? fileConfig.rules?.disable.filter((item) => typeof item === "string")
|
|
104
|
+
: DEFAULTS.rulesDisable,
|
|
105
|
+
severityOverrides: sanitizeSeverityOverrides((fileConfig.rules?.severity_overrides ?? undefined)),
|
|
106
|
+
backendExecutables: sanitizeBackendExecutables((fileConfig.backends ?? undefined))
|
|
107
|
+
};
|
|
108
|
+
}
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.normalizeDiagnostics = normalizeDiagnostics;
|
|
7
|
+
exports.sortDiagnostics = sortDiagnostics;
|
|
8
|
+
exports.hasBlockingDiagnostic = hasBlockingDiagnostic;
|
|
9
|
+
const node_fs_1 = __importDefault(require("node:fs"));
|
|
10
|
+
const node_path_1 = __importDefault(require("node:path"));
|
|
11
|
+
const VALID_SEVERITIES = new Set(["error", "warn", "info"]);
|
|
12
|
+
function isPositiveInteger(value) {
|
|
13
|
+
return typeof value === "number" && Number.isInteger(value) && value >= 1;
|
|
14
|
+
}
|
|
15
|
+
function normalizeDiagnostics(ruleId, diagnostics, debug) {
|
|
16
|
+
const out = [];
|
|
17
|
+
for (const raw of diagnostics) {
|
|
18
|
+
if (typeof raw !== "object" || raw === null || Array.isArray(raw)) {
|
|
19
|
+
if (debug) {
|
|
20
|
+
process.stderr.write(`[debug] Dropped diagnostic for ${ruleId}: not an object\n`);
|
|
21
|
+
}
|
|
22
|
+
continue;
|
|
23
|
+
}
|
|
24
|
+
const candidate = raw;
|
|
25
|
+
const severity = candidate.severity;
|
|
26
|
+
const file = candidate.file;
|
|
27
|
+
const line = candidate.line;
|
|
28
|
+
const message = candidate.message;
|
|
29
|
+
const candidateRuleId = candidate.rule_id;
|
|
30
|
+
if (typeof candidateRuleId !== "string" ||
|
|
31
|
+
candidateRuleId !== ruleId ||
|
|
32
|
+
typeof file !== "string" ||
|
|
33
|
+
file.trim() === "" ||
|
|
34
|
+
typeof message !== "string" ||
|
|
35
|
+
message.trim() === "" ||
|
|
36
|
+
typeof severity !== "string" ||
|
|
37
|
+
!VALID_SEVERITIES.has(severity) ||
|
|
38
|
+
!isPositiveInteger(line)) {
|
|
39
|
+
if (debug) {
|
|
40
|
+
process.stderr.write(`[debug] Dropped diagnostic for ${ruleId}: failed required field validation\n`);
|
|
41
|
+
}
|
|
42
|
+
continue;
|
|
43
|
+
}
|
|
44
|
+
const absolute = node_path_1.default.resolve(file);
|
|
45
|
+
if (!node_fs_1.default.existsSync(absolute)) {
|
|
46
|
+
if (debug) {
|
|
47
|
+
process.stderr.write(`[debug] Dropped diagnostic for ${ruleId}: file does not exist (${file})\n`);
|
|
48
|
+
}
|
|
49
|
+
continue;
|
|
50
|
+
}
|
|
51
|
+
out.push({
|
|
52
|
+
rule_id: candidateRuleId,
|
|
53
|
+
severity: severity,
|
|
54
|
+
message,
|
|
55
|
+
file,
|
|
56
|
+
line,
|
|
57
|
+
column: isPositiveInteger(candidate.column) ? candidate.column : undefined,
|
|
58
|
+
end_line: isPositiveInteger(candidate.end_line) ? candidate.end_line : undefined,
|
|
59
|
+
end_column: isPositiveInteger(candidate.end_column) ? candidate.end_column : undefined,
|
|
60
|
+
evidence: typeof candidate.evidence === "string" ? candidate.evidence : undefined,
|
|
61
|
+
confidence: typeof candidate.confidence === "number" ? candidate.confidence : undefined
|
|
62
|
+
});
|
|
63
|
+
}
|
|
64
|
+
return out;
|
|
65
|
+
}
|
|
66
|
+
const SEVERITY_ORDER = {
|
|
67
|
+
error: 3,
|
|
68
|
+
warn: 2,
|
|
69
|
+
info: 1
|
|
70
|
+
};
|
|
71
|
+
function sortDiagnostics(input) {
|
|
72
|
+
return [...input].sort((a, b) => {
|
|
73
|
+
const fileCompare = a.file.localeCompare(b.file);
|
|
74
|
+
if (fileCompare !== 0) {
|
|
75
|
+
return fileCompare;
|
|
76
|
+
}
|
|
77
|
+
if (a.line !== b.line) {
|
|
78
|
+
return a.line - b.line;
|
|
79
|
+
}
|
|
80
|
+
return SEVERITY_ORDER[b.severity] - SEVERITY_ORDER[a.severity];
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
function hasBlockingDiagnostic(diagnostics, threshold) {
|
|
84
|
+
if (threshold === "never") {
|
|
85
|
+
return false;
|
|
86
|
+
}
|
|
87
|
+
if (threshold === "warn") {
|
|
88
|
+
return diagnostics.some((d) => d.severity === "warn" || d.severity === "error");
|
|
89
|
+
}
|
|
90
|
+
return diagnostics.some((d) => d.severity === "error");
|
|
91
|
+
}
|
package/dist/filter.js
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.extractChangedFilesFromDiff = extractChangedFilesFromDiff;
|
|
7
|
+
exports.getRuleCandidateFiles = getRuleCandidateFiles;
|
|
8
|
+
exports.shouldRunRule = shouldRunRule;
|
|
9
|
+
exports.buildScopedDiff = buildScopedDiff;
|
|
10
|
+
exports.buildRulePrompt = buildRulePrompt;
|
|
11
|
+
const picomatch_1 = __importDefault(require("picomatch"));
|
|
12
|
+
function unquoteDiffPath(raw) {
|
|
13
|
+
if (raw.startsWith("\"") && raw.endsWith("\"") && raw.length >= 2) {
|
|
14
|
+
return raw.slice(1, -1).replace(/\\"/g, "\"").replace(/\\\\/g, "\\");
|
|
15
|
+
}
|
|
16
|
+
return raw;
|
|
17
|
+
}
|
|
18
|
+
function parseDiffGitHeader(line) {
|
|
19
|
+
const match = line.match(/^diff --git (?:"a\/((?:[^"\\]|\\.)+)"|a\/(\S+)) (?:"b\/((?:[^"\\]|\\.)+)"|b\/(\S+))$/);
|
|
20
|
+
if (!match) {
|
|
21
|
+
return undefined;
|
|
22
|
+
}
|
|
23
|
+
const aRaw = match[1] ?? match[2];
|
|
24
|
+
const bRaw = match[3] ?? match[4];
|
|
25
|
+
if (!aRaw || !bRaw) {
|
|
26
|
+
return undefined;
|
|
27
|
+
}
|
|
28
|
+
return {
|
|
29
|
+
aPath: unquoteDiffPath(aRaw),
|
|
30
|
+
bPath: unquoteDiffPath(bRaw)
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
function extractChangedFilesFromDiff(diff) {
|
|
34
|
+
const files = new Set();
|
|
35
|
+
const lines = diff.split("\n");
|
|
36
|
+
for (const line of lines) {
|
|
37
|
+
if (!line.startsWith("diff --git ")) {
|
|
38
|
+
continue;
|
|
39
|
+
}
|
|
40
|
+
const parsed = parseDiffGitHeader(line);
|
|
41
|
+
if (!parsed) {
|
|
42
|
+
continue;
|
|
43
|
+
}
|
|
44
|
+
const normalized = parsed.bPath;
|
|
45
|
+
if (normalized !== "/dev/null") {
|
|
46
|
+
files.add(normalized);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
return Array.from(files);
|
|
50
|
+
}
|
|
51
|
+
function matchesAnyGlob(filePath, globs) {
|
|
52
|
+
return globs.some((glob) => (0, picomatch_1.default)(glob)(filePath));
|
|
53
|
+
}
|
|
54
|
+
function matchesAnyRegex(diff, regexes) {
|
|
55
|
+
for (const candidate of regexes) {
|
|
56
|
+
try {
|
|
57
|
+
const pattern = new RegExp(candidate, "m");
|
|
58
|
+
if (pattern.test(diff)) {
|
|
59
|
+
return true;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
catch {
|
|
63
|
+
return false;
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
return false;
|
|
67
|
+
}
|
|
68
|
+
function getRuleCandidateFiles(rule, changedFiles) {
|
|
69
|
+
let fileCandidates = changedFiles;
|
|
70
|
+
if (rule.include_globs && rule.include_globs.length > 0) {
|
|
71
|
+
fileCandidates = changedFiles.filter((filePath) => matchesAnyGlob(filePath, rule.include_globs));
|
|
72
|
+
if (fileCandidates.length === 0) {
|
|
73
|
+
return [];
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
if (rule.exclude_globs && rule.exclude_globs.length > 0) {
|
|
77
|
+
fileCandidates = fileCandidates.filter((filePath) => !matchesAnyGlob(filePath, rule.exclude_globs));
|
|
78
|
+
}
|
|
79
|
+
return fileCandidates;
|
|
80
|
+
}
|
|
81
|
+
function shouldRunRule(rule, changedFiles, diff) {
|
|
82
|
+
const fileCandidates = getRuleCandidateFiles(rule, changedFiles);
|
|
83
|
+
if (fileCandidates.length === 0) {
|
|
84
|
+
return false;
|
|
85
|
+
}
|
|
86
|
+
if (rule.diff_regex && rule.diff_regex.length > 0 && !matchesAnyRegex(diff, rule.diff_regex)) {
|
|
87
|
+
return false;
|
|
88
|
+
}
|
|
89
|
+
return true;
|
|
90
|
+
}
|
|
91
|
+
function splitDiffIntoFileChunks(diff) {
|
|
92
|
+
const lines = diff.split("\n");
|
|
93
|
+
const chunks = [];
|
|
94
|
+
let currentLines = [];
|
|
95
|
+
let currentFile = "";
|
|
96
|
+
const flush = () => {
|
|
97
|
+
if (currentLines.length === 0) {
|
|
98
|
+
return;
|
|
99
|
+
}
|
|
100
|
+
chunks.push({
|
|
101
|
+
file: currentFile,
|
|
102
|
+
chunk: currentLines.join("\n")
|
|
103
|
+
});
|
|
104
|
+
currentLines = [];
|
|
105
|
+
currentFile = "";
|
|
106
|
+
};
|
|
107
|
+
for (const line of lines) {
|
|
108
|
+
if (line.startsWith("diff --git ")) {
|
|
109
|
+
flush();
|
|
110
|
+
const parsed = parseDiffGitHeader(line);
|
|
111
|
+
if (parsed) {
|
|
112
|
+
currentFile = parsed.bPath;
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
currentLines.push(line);
|
|
116
|
+
}
|
|
117
|
+
flush();
|
|
118
|
+
return chunks;
|
|
119
|
+
}
|
|
120
|
+
function buildScopedDiff(rule, fullDiff, changedFiles) {
|
|
121
|
+
const candidateFiles = new Set(getRuleCandidateFiles(rule, changedFiles));
|
|
122
|
+
if (candidateFiles.size === 0) {
|
|
123
|
+
return fullDiff;
|
|
124
|
+
}
|
|
125
|
+
const chunks = splitDiffIntoFileChunks(fullDiff);
|
|
126
|
+
const scoped = chunks
|
|
127
|
+
.filter((chunk) => chunk.file !== "" && candidateFiles.has(chunk.file))
|
|
128
|
+
.map((chunk) => chunk.chunk)
|
|
129
|
+
.join("\n");
|
|
130
|
+
return scoped.trim() === "" ? fullDiff : scoped;
|
|
131
|
+
}
|
|
132
|
+
function buildRulePrompt(rule, diff) {
|
|
133
|
+
return [
|
|
134
|
+
`RULE_ID: ${rule.id}`,
|
|
135
|
+
`RULE_TITLE: ${rule.title}`,
|
|
136
|
+
`SEVERITY_DEFAULT: ${rule.effectiveSeverity}`,
|
|
137
|
+
"",
|
|
138
|
+
"DIFF:",
|
|
139
|
+
diff,
|
|
140
|
+
"",
|
|
141
|
+
"INSTRUCTIONS:",
|
|
142
|
+
rule.prompt
|
|
143
|
+
].join("\n");
|
|
144
|
+
}
|
package/dist/git.js
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.getGitDiff = getGitDiff;
|
|
4
|
+
exports.getLocalBranchDiff = getLocalBranchDiff;
|
|
5
|
+
const node_child_process_1 = require("node:child_process");
|
|
6
|
+
const node_os_1 = require("node:os");
|
|
7
|
+
function runGitCommand(args, okExitCodes = [0]) {
|
|
8
|
+
return new Promise((resolve, reject) => {
|
|
9
|
+
const child = (0, node_child_process_1.spawn)("git", args, {
|
|
10
|
+
stdio: ["ignore", "pipe", "pipe"]
|
|
11
|
+
});
|
|
12
|
+
let stdout = "";
|
|
13
|
+
let stderr = "";
|
|
14
|
+
child.stdout.on("data", (chunk) => {
|
|
15
|
+
stdout += String(chunk);
|
|
16
|
+
});
|
|
17
|
+
child.stderr.on("data", (chunk) => {
|
|
18
|
+
stderr += String(chunk);
|
|
19
|
+
});
|
|
20
|
+
child.on("error", (error) => reject(error));
|
|
21
|
+
child.on("close", (code) => {
|
|
22
|
+
const exitCode = code ?? 1;
|
|
23
|
+
if (!okExitCodes.includes(exitCode)) {
|
|
24
|
+
reject(new Error(`git ${args.join(" ")} failed with code ${exitCode}: ${stderr.trim()}`));
|
|
25
|
+
return;
|
|
26
|
+
}
|
|
27
|
+
resolve({
|
|
28
|
+
code: exitCode,
|
|
29
|
+
stdout,
|
|
30
|
+
stderr
|
|
31
|
+
});
|
|
32
|
+
});
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
async function getGitDiff(base, head) {
|
|
36
|
+
const result = await runGitCommand(["diff", base, head]);
|
|
37
|
+
return result.stdout;
|
|
38
|
+
}
|
|
39
|
+
function isMissingRefError(message) {
|
|
40
|
+
return /(not a valid object name|unknown revision|bad revision|no upstream configured|no upstream branch)/i.test(message);
|
|
41
|
+
}
|
|
42
|
+
async function resolveLocalComparisonBase() {
|
|
43
|
+
const candidates = ["@{upstream}", "origin/main", "main"];
|
|
44
|
+
for (const candidate of candidates) {
|
|
45
|
+
try {
|
|
46
|
+
const result = await runGitCommand(["merge-base", "HEAD", candidate]);
|
|
47
|
+
const mergeBase = result.stdout.trim();
|
|
48
|
+
if (mergeBase !== "") {
|
|
49
|
+
return mergeBase;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
catch (error) {
|
|
53
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
54
|
+
if (!isMissingRefError(message)) {
|
|
55
|
+
throw error;
|
|
56
|
+
}
|
|
57
|
+
continue;
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
return "HEAD";
|
|
61
|
+
}
|
|
62
|
+
async function getUntrackedFiles() {
|
|
63
|
+
const result = await runGitCommand(["ls-files", "--others", "--exclude-standard"]);
|
|
64
|
+
return result.stdout
|
|
65
|
+
.split("\n")
|
|
66
|
+
.map((line) => line.trim())
|
|
67
|
+
.filter((line) => line.length > 0);
|
|
68
|
+
}
|
|
69
|
+
async function getNoIndexDiffForFile(filePath) {
|
|
70
|
+
const result = await runGitCommand(["diff", "--no-index", "--", node_os_1.devNull, filePath], [0, 1]);
|
|
71
|
+
return result.stdout;
|
|
72
|
+
}
|
|
73
|
+
async function getLocalBranchDiff() {
|
|
74
|
+
const base = await resolveLocalComparisonBase();
|
|
75
|
+
const trackedDiff = await runGitCommand(["diff", base]);
|
|
76
|
+
const untrackedFiles = await getUntrackedFiles();
|
|
77
|
+
const untrackedDiffChunks = [];
|
|
78
|
+
for (const filePath of untrackedFiles) {
|
|
79
|
+
const fileDiff = await getNoIndexDiffForFile(filePath);
|
|
80
|
+
if (fileDiff.trim() !== "") {
|
|
81
|
+
untrackedDiffChunks.push(fileDiff);
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
return [trackedDiff.stdout, ...untrackedDiffChunks].filter((chunk) => chunk !== "").join("\n");
|
|
85
|
+
}
|
package/dist/main.js
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.runSemlint = runSemlint;
|
|
7
|
+
const node_path_1 = __importDefault(require("node:path"));
|
|
8
|
+
const backend_1 = require("./backend");
|
|
9
|
+
const config_1 = require("./config");
|
|
10
|
+
const diagnostics_1 = require("./diagnostics");
|
|
11
|
+
const filter_1 = require("./filter");
|
|
12
|
+
const git_1 = require("./git");
|
|
13
|
+
const reporter_1 = require("./reporter");
|
|
14
|
+
const rules_1 = require("./rules");
|
|
15
|
+
const VERSION = "0.1.0";
|
|
16
|
+
function debugLog(enabled, message) {
|
|
17
|
+
if (enabled) {
|
|
18
|
+
process.stderr.write(`[debug] ${message}\n`);
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
function timed(enabled, label, action) {
|
|
22
|
+
const startedAt = Date.now();
|
|
23
|
+
const result = action();
|
|
24
|
+
debugLog(enabled, `${label} in ${Date.now() - startedAt}ms`);
|
|
25
|
+
return result;
|
|
26
|
+
}
|
|
27
|
+
async function timedAsync(enabled, label, action) {
|
|
28
|
+
const startedAt = Date.now();
|
|
29
|
+
const result = await action();
|
|
30
|
+
debugLog(enabled, `${label} in ${Date.now() - startedAt}ms`);
|
|
31
|
+
return result;
|
|
32
|
+
}
|
|
33
|
+
function buildBatchPrompt(rules, diff) {
|
|
34
|
+
const ruleBlocks = rules
|
|
35
|
+
.map((rule) => [
|
|
36
|
+
`RULE_ID: ${rule.id}`,
|
|
37
|
+
`RULE_TITLE: ${rule.title}`,
|
|
38
|
+
`SEVERITY_DEFAULT: ${rule.effectiveSeverity}`,
|
|
39
|
+
"INSTRUCTIONS:",
|
|
40
|
+
rule.prompt
|
|
41
|
+
].join("\n"))
|
|
42
|
+
.join("\n\n---\n\n");
|
|
43
|
+
return [
|
|
44
|
+
"BATCH_MODE: true",
|
|
45
|
+
"Evaluate all rules below against the DIFF in one pass.",
|
|
46
|
+
"Return valid JSON only with shape {\"diagnostics\":[...]}",
|
|
47
|
+
"Each diagnostic must include: rule_id, severity, message, file, line.",
|
|
48
|
+
"",
|
|
49
|
+
"RULES:",
|
|
50
|
+
ruleBlocks,
|
|
51
|
+
"",
|
|
52
|
+
"DIFF:",
|
|
53
|
+
diff
|
|
54
|
+
].join("\n");
|
|
55
|
+
}
|
|
56
|
+
async function runSemlint(options) {
|
|
57
|
+
const startedAt = Date.now();
|
|
58
|
+
try {
|
|
59
|
+
const config = timed(options.debug, "Loaded effective config", () => (0, config_1.loadEffectiveConfig)(options));
|
|
60
|
+
const rulesDir = node_path_1.default.join(process.cwd(), "rules");
|
|
61
|
+
const rules = timed(config.debug, "Loaded and validated rules", () => (0, rules_1.loadRules)(rulesDir, config.rulesDisable, config.severityOverrides));
|
|
62
|
+
debugLog(config.debug, `Loaded ${rules.length} rule(s)`);
|
|
63
|
+
debugLog(config.debug, `Rule IDs: ${rules.map((rule) => rule.id).join(", ")}`);
|
|
64
|
+
const useLocalBranchDiff = !options.base && !options.head;
|
|
65
|
+
const diff = await timedAsync(config.debug, "Computed git diff", () => useLocalBranchDiff ? (0, git_1.getLocalBranchDiff)() : (0, git_1.getGitDiff)(config.base, config.head));
|
|
66
|
+
const changedFiles = timed(config.debug, "Parsed changed files from diff", () => (0, filter_1.extractChangedFilesFromDiff)(diff));
|
|
67
|
+
debugLog(config.debug, useLocalBranchDiff
|
|
68
|
+
? "Using local branch diff (tracked + staged + unstaged + untracked)"
|
|
69
|
+
: `Using explicit ref diff (${config.base}..${config.head})`);
|
|
70
|
+
debugLog(config.debug, `Detected ${changedFiles.length} changed file(s)`);
|
|
71
|
+
const backend = timed(config.debug, "Initialized backend runner", () => (0, backend_1.createBackendRunner)(config));
|
|
72
|
+
const runnableRules = rules.filter((rule) => {
|
|
73
|
+
const filterStartedAt = Date.now();
|
|
74
|
+
const shouldRun = (0, filter_1.shouldRunRule)(rule, changedFiles, diff);
|
|
75
|
+
debugLog(config.debug, `Rule ${rule.id}: filter check in ${Date.now() - filterStartedAt}ms`);
|
|
76
|
+
if (!shouldRun) {
|
|
77
|
+
debugLog(config.debug, `Skipping rule ${rule.id}: filters did not match`);
|
|
78
|
+
return false;
|
|
79
|
+
}
|
|
80
|
+
return true;
|
|
81
|
+
});
|
|
82
|
+
const diagnostics = [];
|
|
83
|
+
const rulesRun = runnableRules.length;
|
|
84
|
+
let backendErrors = 0;
|
|
85
|
+
if (config.batchMode && runnableRules.length > 0) {
|
|
86
|
+
debugLog(config.debug, `Running ${runnableRules.length} rule(s) in batch mode`);
|
|
87
|
+
const combinedDiff = timed(config.debug, "Batch: combined scoped diff build", () => runnableRules
|
|
88
|
+
.map((rule) => (0, filter_1.buildScopedDiff)(rule, diff, changedFiles))
|
|
89
|
+
.filter((chunk) => chunk.trim() !== "")
|
|
90
|
+
.join("\n"));
|
|
91
|
+
const batchPrompt = timed(config.debug, "Batch: prompt build", () => buildBatchPrompt(runnableRules, combinedDiff || diff));
|
|
92
|
+
try {
|
|
93
|
+
const batchResult = await timedAsync(config.debug, "Batch: backend run", () => backend.runPrompt({
|
|
94
|
+
label: "Batch",
|
|
95
|
+
prompt: batchPrompt,
|
|
96
|
+
timeoutMs: config.timeoutMs
|
|
97
|
+
}));
|
|
98
|
+
const groupedByRule = new Map();
|
|
99
|
+
for (const diagnostic of batchResult.diagnostics) {
|
|
100
|
+
if (typeof diagnostic === "object" &&
|
|
101
|
+
diagnostic !== null &&
|
|
102
|
+
!Array.isArray(diagnostic) &&
|
|
103
|
+
typeof diagnostic.rule_id === "string") {
|
|
104
|
+
const ruleId = diagnostic.rule_id;
|
|
105
|
+
const current = groupedByRule.get(ruleId) ?? [];
|
|
106
|
+
current.push(diagnostic);
|
|
107
|
+
groupedByRule.set(ruleId, current);
|
|
108
|
+
}
|
|
109
|
+
else {
|
|
110
|
+
debugLog(config.debug, "Batch: dropped diagnostic without valid rule_id");
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
const validRuleIds = new Set(runnableRules.map((rule) => rule.id));
|
|
114
|
+
for (const [ruleId] of groupedByRule) {
|
|
115
|
+
if (!validRuleIds.has(ruleId)) {
|
|
116
|
+
debugLog(config.debug, `Batch: dropped diagnostic for unknown rule_id ${ruleId}`);
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
for (const rule of runnableRules) {
|
|
120
|
+
const normalized = timed(config.debug, `Batch: diagnostics normalization for ${rule.id}`, () => (0, diagnostics_1.normalizeDiagnostics)(rule.id, groupedByRule.get(rule.id) ?? [], config.debug));
|
|
121
|
+
diagnostics.push(...normalized);
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
catch (error) {
|
|
125
|
+
backendErrors += 1;
|
|
126
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
127
|
+
debugLog(config.debug, `Batch backend error: ${message}`);
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
else {
|
|
131
|
+
debugLog(config.debug, `Running ${runnableRules.length} rule(s) in parallel`);
|
|
132
|
+
const runResults = await Promise.all(runnableRules.map(async (rule) => {
|
|
133
|
+
let backendError = false;
|
|
134
|
+
let normalized = [];
|
|
135
|
+
const ruleStartedAt = Date.now();
|
|
136
|
+
debugLog(config.debug, `Rule ${rule.id}: started`);
|
|
137
|
+
const scopedDiff = timed(config.debug, `Rule ${rule.id}: scoped diff build`, () => (0, filter_1.buildScopedDiff)(rule, diff, changedFiles));
|
|
138
|
+
const prompt = timed(config.debug, `Rule ${rule.id}: prompt build`, () => (0, filter_1.buildRulePrompt)(rule, scopedDiff));
|
|
139
|
+
try {
|
|
140
|
+
const result = await timedAsync(config.debug, `Rule ${rule.id}: backend run`, () => backend.runRule({
|
|
141
|
+
ruleId: rule.id,
|
|
142
|
+
prompt,
|
|
143
|
+
timeoutMs: config.timeoutMs
|
|
144
|
+
}));
|
|
145
|
+
normalized = timed(config.debug, `Rule ${rule.id}: diagnostics normalization`, () => (0, diagnostics_1.normalizeDiagnostics)(rule.id, result.diagnostics, config.debug));
|
|
146
|
+
}
|
|
147
|
+
catch (error) {
|
|
148
|
+
backendError = true;
|
|
149
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
150
|
+
debugLog(config.debug, `Backend error for rule ${rule.id}: ${message}`);
|
|
151
|
+
}
|
|
152
|
+
debugLog(config.debug, `Rule ${rule.id}: finished in ${Date.now() - ruleStartedAt}ms`);
|
|
153
|
+
return { backendError, normalized };
|
|
154
|
+
}));
|
|
155
|
+
for (const result of runResults) {
|
|
156
|
+
if (result.backendError) {
|
|
157
|
+
backendErrors += 1;
|
|
158
|
+
}
|
|
159
|
+
diagnostics.push(...result.normalized);
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
const sorted = timed(config.debug, "Sorted diagnostics", () => (0, diagnostics_1.sortDiagnostics)(diagnostics));
|
|
163
|
+
const durationMs = Date.now() - startedAt;
|
|
164
|
+
const outputStartedAt = Date.now();
|
|
165
|
+
if (config.format === "json") {
|
|
166
|
+
process.stdout.write(`${(0, reporter_1.formatJsonOutput)(VERSION, sorted, { rulesRun, durationMs, backendErrors })}\n`);
|
|
167
|
+
}
|
|
168
|
+
else {
|
|
169
|
+
process.stdout.write(`${(0, reporter_1.formatTextOutput)(sorted)}\n`);
|
|
170
|
+
}
|
|
171
|
+
debugLog(config.debug, `Rendered output in ${Date.now() - outputStartedAt}ms`);
|
|
172
|
+
debugLog(config.debug, `Total run duration ${durationMs}ms`);
|
|
173
|
+
if (backendErrors > 0) {
|
|
174
|
+
return 2;
|
|
175
|
+
}
|
|
176
|
+
if ((0, diagnostics_1.hasBlockingDiagnostic)(sorted, config.failOn)) {
|
|
177
|
+
return 1;
|
|
178
|
+
}
|
|
179
|
+
return 0;
|
|
180
|
+
}
|
|
181
|
+
catch (error) {
|
|
182
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
183
|
+
process.stderr.write(`${message}\n`);
|
|
184
|
+
return 2;
|
|
185
|
+
}
|
|
186
|
+
}
|
package/dist/reporter.js
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.formatJsonOutput = formatJsonOutput;
|
|
4
|
+
exports.formatTextOutput = formatTextOutput;
|
|
5
|
+
function formatJsonOutput(version, diagnostics, stats) {
|
|
6
|
+
const payload = {
|
|
7
|
+
tool: {
|
|
8
|
+
name: "semlint",
|
|
9
|
+
version
|
|
10
|
+
},
|
|
11
|
+
diagnostics,
|
|
12
|
+
stats: {
|
|
13
|
+
rules_run: stats.rulesRun,
|
|
14
|
+
duration_ms: stats.durationMs,
|
|
15
|
+
backend_errors: stats.backendErrors
|
|
16
|
+
}
|
|
17
|
+
};
|
|
18
|
+
return JSON.stringify(payload, null, 2);
|
|
19
|
+
}
|
|
20
|
+
function formatTextOutput(diagnostics) {
|
|
21
|
+
const lines = [];
|
|
22
|
+
let currentFile = "";
|
|
23
|
+
for (const diagnostic of diagnostics) {
|
|
24
|
+
if (diagnostic.file !== currentFile) {
|
|
25
|
+
if (lines.length > 0) {
|
|
26
|
+
lines.push("");
|
|
27
|
+
}
|
|
28
|
+
currentFile = diagnostic.file;
|
|
29
|
+
lines.push(currentFile);
|
|
30
|
+
}
|
|
31
|
+
const column = diagnostic.column ?? 1;
|
|
32
|
+
lines.push(` ${diagnostic.line}:${column} ${diagnostic.severity} ${diagnostic.rule_id} ${diagnostic.message}`);
|
|
33
|
+
}
|
|
34
|
+
const errors = diagnostics.filter((d) => d.severity === "error").length;
|
|
35
|
+
const warnings = diagnostics.filter((d) => d.severity === "warn").length;
|
|
36
|
+
const problems = diagnostics.length;
|
|
37
|
+
if (lines.length > 0) {
|
|
38
|
+
lines.push("");
|
|
39
|
+
}
|
|
40
|
+
lines.push(`✖ ${problems} problems (${errors} errors, ${warnings} warnings)`);
|
|
41
|
+
return lines.join("\n");
|
|
42
|
+
}
|
package/dist/rules.js
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.loadRules = loadRules;
|
|
7
|
+
const node_fs_1 = __importDefault(require("node:fs"));
|
|
8
|
+
const node_path_1 = __importDefault(require("node:path"));
|
|
9
|
+
const VALID_SEVERITIES = new Set(["error", "warn", "info"]);
|
|
10
|
+
function assertNonEmptyString(value, fieldName, filePath) {
|
|
11
|
+
if (typeof value !== "string" || value.trim() === "") {
|
|
12
|
+
throw new Error(`Invalid rule in ${filePath}: "${fieldName}" must be a non-empty string`);
|
|
13
|
+
}
|
|
14
|
+
return value;
|
|
15
|
+
}
|
|
16
|
+
function assertStringArray(value, fieldName, filePath) {
|
|
17
|
+
if (value === undefined) {
|
|
18
|
+
return undefined;
|
|
19
|
+
}
|
|
20
|
+
if (!Array.isArray(value) || value.some((item) => typeof item !== "string")) {
|
|
21
|
+
throw new Error(`Invalid rule in ${filePath}: "${fieldName}" must be an array of strings`);
|
|
22
|
+
}
|
|
23
|
+
return value;
|
|
24
|
+
}
|
|
25
|
+
function validateRuleObject(raw, filePath) {
|
|
26
|
+
if (typeof raw !== "object" || raw === null || Array.isArray(raw)) {
|
|
27
|
+
throw new Error(`Invalid rule in ${filePath}: root must be a JSON object`);
|
|
28
|
+
}
|
|
29
|
+
const obj = raw;
|
|
30
|
+
const severity = assertNonEmptyString(obj.severity_default, "severity_default", filePath);
|
|
31
|
+
if (!VALID_SEVERITIES.has(severity)) {
|
|
32
|
+
throw new Error(`Invalid rule in ${filePath}: "severity_default" must be one of error|warn|info`);
|
|
33
|
+
}
|
|
34
|
+
return {
|
|
35
|
+
id: assertNonEmptyString(obj.id, "id", filePath),
|
|
36
|
+
title: assertNonEmptyString(obj.title, "title", filePath),
|
|
37
|
+
severity_default: severity,
|
|
38
|
+
prompt: assertNonEmptyString(obj.prompt, "prompt", filePath),
|
|
39
|
+
include_globs: assertStringArray(obj.include_globs, "include_globs", filePath),
|
|
40
|
+
exclude_globs: assertStringArray(obj.exclude_globs, "exclude_globs", filePath),
|
|
41
|
+
diff_regex: assertStringArray(obj.diff_regex, "diff_regex", filePath)
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
function loadRules(rulesDir, disabledRuleIds, severityOverrides) {
|
|
45
|
+
if (!node_fs_1.default.existsSync(rulesDir)) {
|
|
46
|
+
return [];
|
|
47
|
+
}
|
|
48
|
+
if (!node_fs_1.default.statSync(rulesDir).isDirectory()) {
|
|
49
|
+
throw new Error(`Rules path is not a directory: ${rulesDir}`);
|
|
50
|
+
}
|
|
51
|
+
const entries = node_fs_1.default
|
|
52
|
+
.readdirSync(rulesDir)
|
|
53
|
+
.filter((name) => name.endsWith(".json"))
|
|
54
|
+
.sort((a, b) => a.localeCompare(b));
|
|
55
|
+
const seenIds = new Set();
|
|
56
|
+
const disabled = new Set(disabledRuleIds);
|
|
57
|
+
const loaded = [];
|
|
58
|
+
for (const fileName of entries) {
|
|
59
|
+
const filePath = node_path_1.default.join(rulesDir, fileName);
|
|
60
|
+
let parsed;
|
|
61
|
+
try {
|
|
62
|
+
parsed = JSON.parse(node_fs_1.default.readFileSync(filePath, "utf8"));
|
|
63
|
+
}
|
|
64
|
+
catch (error) {
|
|
65
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
66
|
+
throw new Error(`Failed to parse rule JSON in ${filePath}: ${message}`);
|
|
67
|
+
}
|
|
68
|
+
const validated = validateRuleObject(parsed, filePath);
|
|
69
|
+
if (seenIds.has(validated.id)) {
|
|
70
|
+
throw new Error(`Duplicate rule id detected: ${validated.id}`);
|
|
71
|
+
}
|
|
72
|
+
seenIds.add(validated.id);
|
|
73
|
+
if (disabled.has(validated.id)) {
|
|
74
|
+
continue;
|
|
75
|
+
}
|
|
76
|
+
const overrideSeverity = severityOverrides[validated.id];
|
|
77
|
+
const effectiveSeverity = overrideSeverity ?? validated.severity_default;
|
|
78
|
+
if (!VALID_SEVERITIES.has(effectiveSeverity)) {
|
|
79
|
+
throw new Error(`Invalid severity override for rule ${validated.id}`);
|
|
80
|
+
}
|
|
81
|
+
loaded.push({
|
|
82
|
+
...validated,
|
|
83
|
+
sourcePath: filePath,
|
|
84
|
+
effectiveSeverity
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
return loaded.sort((a, b) => a.id.localeCompare(b.id));
|
|
88
|
+
}
|
package/dist/types.js
ADDED
package/package.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "semlint-cli",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Semantic lint CLI — runs LLM-backed rules on your git diff and returns CI-friendly exit codes",
|
|
5
|
+
"type": "commonjs",
|
|
6
|
+
"main": "dist/main.js",
|
|
7
|
+
"bin": {
|
|
8
|
+
"semlint": "dist/cli.js"
|
|
9
|
+
},
|
|
10
|
+
"files": [
|
|
11
|
+
"dist",
|
|
12
|
+
"rules",
|
|
13
|
+
"README.md"
|
|
14
|
+
],
|
|
15
|
+
"scripts": {
|
|
16
|
+
"build": "tsc -p tsconfig.json",
|
|
17
|
+
"prepublishOnly": "pnpm run build",
|
|
18
|
+
"check": "node dist/cli.js check",
|
|
19
|
+
"start": "node dist/cli.js check",
|
|
20
|
+
"serve:docs": "serve docs"
|
|
21
|
+
},
|
|
22
|
+
"keywords": [
|
|
23
|
+
"lint",
|
|
24
|
+
"semantic",
|
|
25
|
+
"git",
|
|
26
|
+
"diff",
|
|
27
|
+
"ci",
|
|
28
|
+
"llm",
|
|
29
|
+
"cursor",
|
|
30
|
+
"code-review"
|
|
31
|
+
],
|
|
32
|
+
"author": "bduron",
|
|
33
|
+
"license": "ISC",
|
|
34
|
+
"repository": {
|
|
35
|
+
"type": "git",
|
|
36
|
+
"url": "git+https://github.com/bduron/semlint.git"
|
|
37
|
+
},
|
|
38
|
+
"engines": {
|
|
39
|
+
"node": ">=18"
|
|
40
|
+
},
|
|
41
|
+
"packageManager": "pnpm@10.29.2",
|
|
42
|
+
"dependencies": {
|
|
43
|
+
"picomatch": "^4.0.3"
|
|
44
|
+
},
|
|
45
|
+
"devDependencies": {
|
|
46
|
+
"@types/node": "^25.3.0",
|
|
47
|
+
"@types/picomatch": "^4.0.2",
|
|
48
|
+
"serve": "^14.2.5",
|
|
49
|
+
"typescript": "^5.9.3"
|
|
50
|
+
}
|
|
51
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "SEMLINT_NAMING_001",
|
|
3
|
+
"title": "Ambient naming convention consistency",
|
|
4
|
+
"severity_default": "warn",
|
|
5
|
+
"include_globs": ["src/**/*.ts"],
|
|
6
|
+
"exclude_globs": ["**/*.test.ts", "**/*.spec.ts"],
|
|
7
|
+
"diff_regex": [
|
|
8
|
+
"^[+-].*\\b(const|let|var|function|class|interface|type|enum)\\b",
|
|
9
|
+
"^[+-].*\\b[A-Za-z_][A-Za-z0-9_]*\\b"
|
|
10
|
+
],
|
|
11
|
+
"prompt": "You are Semlint. Review ONLY the modified code in the provided DIFF and verify naming is consistent with the ambient naming conventions already used in surrounding code. Focus on identifier styles such as camelCase for variables/functions, PascalCase for classes/types/interfaces, and ALL_CAPS for constants where that convention is clearly established nearby. Flag only clear inconsistencies in newly added or renamed identifiers. Ignore untouched legacy naming unless directly impacted by the change. Return valid JSON only with shape {\"diagnostics\":[...]} and each diagnostic must include: rule_id, severity, message, file, line."
|
|
12
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "SEMLINT_PATTERN_002",
|
|
3
|
+
"title": "Ambient pattern is respected",
|
|
4
|
+
"severity_default": "warn",
|
|
5
|
+
"include_globs": ["src/**/*.ts"],
|
|
6
|
+
"exclude_globs": ["**/*.test.ts", "**/*.spec.ts"],
|
|
7
|
+
"diff_regex": [
|
|
8
|
+
"^[+-].*\\b(async|await|Promise|try|catch|throw|switch|map|filter|reduce|forEach)\\b",
|
|
9
|
+
"^[+-].*\\b(import|export|class|interface|type|function|return)\\b"
|
|
10
|
+
],
|
|
11
|
+
"prompt": "You are Semlint. Review ONLY the modified code in the provided DIFF and check whether ambient implementation patterns are respected. Compare new or changed code against nearby established patterns for control flow, async handling, error propagation, data transformation, module boundaries, and function/class structure. Flag clear regressions where the proposed change deviates from consistent local patterns without obvious justification. Ignore untouched legacy code and acceptable intentional improvements. Return valid JSON only with shape {\"diagnostics\":[...]} and each diagnostic must include: rule_id, severity, message, file, line."
|
|
12
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "SEMLINT_SWE_003",
|
|
3
|
+
"title": "Obvious SWE mistakes",
|
|
4
|
+
"severity_default": "warn",
|
|
5
|
+
"include_globs": ["src/**/*.ts"],
|
|
6
|
+
"exclude_globs": ["**/*.test.ts", "**/*.spec.ts"],
|
|
7
|
+
"diff_regex": [
|
|
8
|
+
"^[+-].*\\b(any|as\\s+any|TODO|FIXME|console\\.log|@ts-ignore|throw\\s+new\\s+Error|catch\\s*\\()\\b",
|
|
9
|
+
"^[+-].*\\b(if|else|switch|return|await|Promise|map|forEach|reduce)\\b"
|
|
10
|
+
],
|
|
11
|
+
"prompt": "You are Semlint. Review ONLY the modified code in the DIFF and find obvious software-engineering mistakes in the proposed change. Focus on clear issues such as dead code, side-effect misuse, swallowed errors, unsafe any-casts, accidental debug leftovers, contradictory conditions, and obvious maintainability hazards that are likely unintended (these are only examples, there are many more). Do not nitpick style or architecture unless the issue is clearly harmful. Report only high-signal findings tied to changed lines. Return valid JSON only with shape {\"diagnostics\":[...]} and each diagnostic must include: rule_id, severity, message, file, line."
|
|
12
|
+
}
|