diff-hound 1.0.2 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +105 -25
- package/dist/cli/index.js +14 -3
- package/dist/config/index.js +3 -3
- package/dist/config/index.test.d.ts +1 -0
- package/dist/config/index.test.js +330 -0
- package/dist/core/parseUnifiedDiff.test.d.ts +1 -0
- package/dist/core/parseUnifiedDiff.test.js +310 -0
- package/dist/index.js +17 -9
- package/dist/models/base.d.ts +74 -0
- package/dist/models/base.js +236 -0
- package/dist/models/base.test.d.ts +1 -0
- package/dist/models/base.test.js +241 -0
- package/dist/models/index.d.ts +6 -2
- package/dist/models/index.js +9 -2
- package/dist/models/ollama.d.ts +28 -0
- package/dist/models/ollama.js +88 -0
- package/dist/models/ollama.test.d.ts +1 -0
- package/dist/models/ollama.test.js +235 -0
- package/dist/models/openai.d.ts +14 -17
- package/dist/models/openai.js +41 -125
- package/dist/models/openai.test.d.ts +1 -0
- package/dist/models/openai.test.js +209 -0
- package/dist/platforms/index.d.ts +3 -2
- package/dist/platforms/index.js +8 -1
- package/dist/platforms/local.d.ts +41 -0
- package/dist/platforms/local.js +247 -0
- package/dist/schemas/review-response.d.ts +37 -0
- package/dist/schemas/review-response.js +39 -0
- package/dist/schemas/review-response.json +68 -0
- package/dist/schemas/validate.d.ts +27 -0
- package/dist/schemas/validate.js +108 -0
- package/dist/schemas/validate.test.d.ts +1 -0
- package/dist/schemas/validate.test.js +484 -0
- package/dist/types/index.d.ts +7 -2
- package/package.json +12 -3
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.LocalPlatform = void 0;
|
|
7
|
+
const child_process_1 = require("child_process");
|
|
8
|
+
const util_1 = require("util");
|
|
9
|
+
const fs_1 = __importDefault(require("fs"));
|
|
10
|
+
const path_1 = __importDefault(require("path"));
|
|
11
|
+
const execFileAsync = (0, util_1.promisify)(child_process_1.execFile);
|
|
12
|
+
/**
|
|
13
|
+
* Local platform adapter — reviews local git diffs without any remote API calls.
|
|
14
|
+
* Always operates in dry-run mode (output to stdout).
|
|
15
|
+
*/
|
|
16
|
+
class LocalPlatform {
|
|
17
|
+
constructor(config) {
|
|
18
|
+
this.repoPath = process.cwd();
|
|
19
|
+
this.base = config.base || "";
|
|
20
|
+
this.head = config.head || "HEAD";
|
|
21
|
+
this.patchFile = config.patch;
|
|
22
|
+
}
|
|
23
|
+
static async create(config) {
|
|
24
|
+
const platform = new LocalPlatform(config);
|
|
25
|
+
// If using a patch file, validate it exists
|
|
26
|
+
if (platform.patchFile) {
|
|
27
|
+
const patchPath = path_1.default.resolve(platform.patchFile);
|
|
28
|
+
if (!fs_1.default.existsSync(patchPath)) {
|
|
29
|
+
throw new Error(`Patch file not found: ${patchPath}`);
|
|
30
|
+
}
|
|
31
|
+
platform.patchFile = patchPath;
|
|
32
|
+
return platform;
|
|
33
|
+
}
|
|
34
|
+
// Validate we're in a git repo
|
|
35
|
+
try {
|
|
36
|
+
await execFileAsync("git", ["rev-parse", "--git-dir"], {
|
|
37
|
+
cwd: platform.repoPath,
|
|
38
|
+
});
|
|
39
|
+
}
|
|
40
|
+
catch {
|
|
41
|
+
throw new Error(`Not a git repository: ${platform.repoPath}. ` +
|
|
42
|
+
"Run this command from inside a git repository.");
|
|
43
|
+
}
|
|
44
|
+
// Resolve base ref if not provided
|
|
45
|
+
if (!platform.base) {
|
|
46
|
+
platform.base = await platform.resolveDefaultBase();
|
|
47
|
+
}
|
|
48
|
+
// Validate refs exist
|
|
49
|
+
await platform.validateRef(platform.base);
|
|
50
|
+
await platform.validateRef(platform.head);
|
|
51
|
+
return platform;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Determine a sensible default base ref:
|
|
55
|
+
* 1. If current branch has an upstream, use the merge base with it
|
|
56
|
+
* 2. Otherwise, use HEAD~1
|
|
57
|
+
*/
|
|
58
|
+
async resolveDefaultBase() {
|
|
59
|
+
try {
|
|
60
|
+
// Try to get the upstream tracking branch
|
|
61
|
+
const { stdout } = await execFileAsync("git", ["rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{upstream}"], { cwd: this.repoPath });
|
|
62
|
+
const upstream = stdout.trim();
|
|
63
|
+
if (upstream) {
|
|
64
|
+
// Use merge-base for a cleaner diff
|
|
65
|
+
const { stdout: mergeBase } = await execFileAsync("git", ["merge-base", upstream, this.head], { cwd: this.repoPath });
|
|
66
|
+
return mergeBase.trim();
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
catch {
|
|
70
|
+
// No upstream — fall through
|
|
71
|
+
}
|
|
72
|
+
// Fallback: diff against previous commit
|
|
73
|
+
return "HEAD~1";
|
|
74
|
+
}
|
|
75
|
+
async validateRef(ref) {
|
|
76
|
+
try {
|
|
77
|
+
await execFileAsync("git", ["rev-parse", "--verify", ref], {
|
|
78
|
+
cwd: this.repoPath,
|
|
79
|
+
});
|
|
80
|
+
}
|
|
81
|
+
catch {
|
|
82
|
+
throw new Error(`Invalid git ref: '${ref}'. Make sure it exists in this repository.`);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
/**
|
|
86
|
+
* Returns a single synthetic PullRequest from local git metadata.
|
|
87
|
+
*/
|
|
88
|
+
async getPullRequests(_repo) {
|
|
89
|
+
if (this.patchFile) {
|
|
90
|
+
return [
|
|
91
|
+
{
|
|
92
|
+
id: "local-patch",
|
|
93
|
+
number: 0,
|
|
94
|
+
title: `Patch: ${path_1.default.basename(this.patchFile)}`,
|
|
95
|
+
author: "local",
|
|
96
|
+
branch: "patch-file",
|
|
97
|
+
baseBranch: "N/A",
|
|
98
|
+
updatedAt: new Date(),
|
|
99
|
+
},
|
|
100
|
+
];
|
|
101
|
+
}
|
|
102
|
+
// Get current branch name
|
|
103
|
+
let branch = "unknown";
|
|
104
|
+
try {
|
|
105
|
+
const { stdout } = await execFileAsync("git", ["rev-parse", "--abbrev-ref", "HEAD"], { cwd: this.repoPath });
|
|
106
|
+
branch = stdout.trim();
|
|
107
|
+
}
|
|
108
|
+
catch {
|
|
109
|
+
// Detached HEAD — use commit hash
|
|
110
|
+
try {
|
|
111
|
+
const { stdout } = await execFileAsync("git", ["rev-parse", "--short", "HEAD"], { cwd: this.repoPath });
|
|
112
|
+
branch = stdout.trim();
|
|
113
|
+
}
|
|
114
|
+
catch {
|
|
115
|
+
// ignore
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
// Get a short description from the latest commit
|
|
119
|
+
let title = `Local diff: ${this.base}...${this.head}`;
|
|
120
|
+
try {
|
|
121
|
+
const { stdout } = await execFileAsync("git", ["log", "--format=%s", "-1", this.head], { cwd: this.repoPath });
|
|
122
|
+
title = stdout.trim() || title;
|
|
123
|
+
}
|
|
124
|
+
catch {
|
|
125
|
+
// ignore
|
|
126
|
+
}
|
|
127
|
+
// Get author
|
|
128
|
+
let author = "local";
|
|
129
|
+
try {
|
|
130
|
+
const { stdout } = await execFileAsync("git", ["config", "user.name"], { cwd: this.repoPath });
|
|
131
|
+
author = stdout.trim() || author;
|
|
132
|
+
}
|
|
133
|
+
catch {
|
|
134
|
+
// ignore
|
|
135
|
+
}
|
|
136
|
+
return [
|
|
137
|
+
{
|
|
138
|
+
id: "local",
|
|
139
|
+
number: 0,
|
|
140
|
+
title,
|
|
141
|
+
author,
|
|
142
|
+
branch,
|
|
143
|
+
baseBranch: this.base,
|
|
144
|
+
updatedAt: new Date(),
|
|
145
|
+
},
|
|
146
|
+
];
|
|
147
|
+
}
|
|
148
|
+
/**
|
|
149
|
+
* Get file changes by running `git diff` or reading a patch file.
|
|
150
|
+
*/
|
|
151
|
+
async getPullRequestDiff(_repo, _prId) {
|
|
152
|
+
let diffOutput;
|
|
153
|
+
if (this.patchFile) {
|
|
154
|
+
diffOutput = fs_1.default.readFileSync(this.patchFile, "utf-8");
|
|
155
|
+
}
|
|
156
|
+
else {
|
|
157
|
+
const { stdout } = await execFileAsync("git", ["diff", `${this.base}...${this.head}`, "--unified=3"], { cwd: this.repoPath, maxBuffer: 10 * 1024 * 1024 });
|
|
158
|
+
diffOutput = stdout;
|
|
159
|
+
}
|
|
160
|
+
if (!diffOutput.trim()) {
|
|
161
|
+
return [];
|
|
162
|
+
}
|
|
163
|
+
return this.parseGitDiff(diffOutput);
|
|
164
|
+
}
|
|
165
|
+
/**
|
|
166
|
+
* Parse raw `git diff` output into FileChange objects.
|
|
167
|
+
* This parses the full unified diff format including file headers.
|
|
168
|
+
*/
|
|
169
|
+
parseGitDiff(diffOutput) {
|
|
170
|
+
const files = [];
|
|
171
|
+
// Split on diff headers: "diff --git a/path b/path"
|
|
172
|
+
const fileDiffs = diffOutput.split(/^diff --git /m).filter(Boolean);
|
|
173
|
+
for (const fileDiff of fileDiffs) {
|
|
174
|
+
const lines = fileDiff.split("\n");
|
|
175
|
+
// Parse the file paths from the first line: "a/path b/path"
|
|
176
|
+
const headerMatch = lines[0].match(/^a\/(.+?)\s+b\/(.+)$/);
|
|
177
|
+
if (!headerMatch)
|
|
178
|
+
continue;
|
|
179
|
+
const oldPath = headerMatch[1];
|
|
180
|
+
const newPath = headerMatch[2];
|
|
181
|
+
// Determine status from the diff metadata lines
|
|
182
|
+
let status = "modified";
|
|
183
|
+
let previousFilename;
|
|
184
|
+
for (const line of lines.slice(1)) {
|
|
185
|
+
if (line.startsWith("new file")) {
|
|
186
|
+
status = "added";
|
|
187
|
+
break;
|
|
188
|
+
}
|
|
189
|
+
else if (line.startsWith("deleted file")) {
|
|
190
|
+
status = "deleted";
|
|
191
|
+
break;
|
|
192
|
+
}
|
|
193
|
+
else if (line.startsWith("rename from")) {
|
|
194
|
+
status = "renamed";
|
|
195
|
+
previousFilename = oldPath;
|
|
196
|
+
break;
|
|
197
|
+
}
|
|
198
|
+
else if (line.startsWith("@@")) {
|
|
199
|
+
break; // No more metadata lines
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
// Extract the patch content (from first @@ to end)
|
|
203
|
+
const patchStartIdx = fileDiff.indexOf("\n@@");
|
|
204
|
+
let patch;
|
|
205
|
+
if (patchStartIdx !== -1) {
|
|
206
|
+
patch = fileDiff.substring(patchStartIdx + 1); // +1 to skip the leading \n
|
|
207
|
+
}
|
|
208
|
+
// Count additions and deletions from the patch
|
|
209
|
+
let additions = 0;
|
|
210
|
+
let deletions = 0;
|
|
211
|
+
if (patch) {
|
|
212
|
+
for (const patchLine of patch.split("\n")) {
|
|
213
|
+
if (patchLine.startsWith("+") && !patchLine.startsWith("+++")) {
|
|
214
|
+
additions++;
|
|
215
|
+
}
|
|
216
|
+
else if (patchLine.startsWith("-") &&
|
|
217
|
+
!patchLine.startsWith("---")) {
|
|
218
|
+
deletions++;
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
files.push({
|
|
223
|
+
filename: newPath,
|
|
224
|
+
status,
|
|
225
|
+
additions,
|
|
226
|
+
deletions,
|
|
227
|
+
patch,
|
|
228
|
+
previousFilename,
|
|
229
|
+
});
|
|
230
|
+
}
|
|
231
|
+
return files;
|
|
232
|
+
}
|
|
233
|
+
/**
|
|
234
|
+
* In local mode, "posting" a comment means printing to stdout.
|
|
235
|
+
*/
|
|
236
|
+
async postComment(_repo, _prId, _comment) {
|
|
237
|
+
// Local mode output is handled in main — this is a no-op.
|
|
238
|
+
// The main loop already handles dry-run printing.
|
|
239
|
+
}
|
|
240
|
+
/**
|
|
241
|
+
* Always returns false — no duplicate tracking in local mode.
|
|
242
|
+
*/
|
|
243
|
+
async hasAICommented(_repo, _prId) {
|
|
244
|
+
return false;
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
exports.LocalPlatform = LocalPlatform;
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Types for structured LLM review responses
|
|
3
|
+
* Replaces free-text parsing with JSON Schema validation
|
|
4
|
+
*/
|
|
5
|
+
export type CommentSeverity = "critical" | "warning" | "suggestion" | "nitpick";
|
|
6
|
+
export type CommentCategory = "bug" | "security" | "performance" | "style" | "architecture" | "testing";
|
|
7
|
+
/**
|
|
8
|
+
* A single structured comment from the AI review
|
|
9
|
+
*/
|
|
10
|
+
export interface StructuredComment {
|
|
11
|
+
file: string;
|
|
12
|
+
line: number;
|
|
13
|
+
severity: CommentSeverity;
|
|
14
|
+
category: CommentCategory;
|
|
15
|
+
confidence: number;
|
|
16
|
+
title: string;
|
|
17
|
+
explanation: string;
|
|
18
|
+
suggestion: string;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Structured review response from LLM
|
|
22
|
+
*/
|
|
23
|
+
export interface StructuredReviewResponse {
|
|
24
|
+
summary: string;
|
|
25
|
+
comments: StructuredComment[];
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Convert a StructuredComment to the legacy AIComment format
|
|
29
|
+
* for backward compatibility with platform adapters
|
|
30
|
+
*/
|
|
31
|
+
export declare function toAIComment(comment: StructuredComment): {
|
|
32
|
+
type: "inline";
|
|
33
|
+
path: string;
|
|
34
|
+
line: number;
|
|
35
|
+
content: string;
|
|
36
|
+
severity: "error" | "warning" | "suggestion";
|
|
37
|
+
};
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Types for structured LLM review responses
|
|
4
|
+
* Replaces free-text parsing with JSON Schema validation
|
|
5
|
+
*/
|
|
6
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
7
|
+
exports.toAIComment = toAIComment;
|
|
8
|
+
/**
|
|
9
|
+
* Convert a StructuredComment to the legacy AIComment format
|
|
10
|
+
* for backward compatibility with platform adapters
|
|
11
|
+
*/
|
|
12
|
+
function toAIComment(comment) {
|
|
13
|
+
// Map severity to legacy format
|
|
14
|
+
const severityMap = {
|
|
15
|
+
critical: "error",
|
|
16
|
+
warning: "warning",
|
|
17
|
+
suggestion: "suggestion",
|
|
18
|
+
nitpick: "suggestion",
|
|
19
|
+
};
|
|
20
|
+
// Build content with rich formatting
|
|
21
|
+
let content = `**[${capitalize(comment.category)}] ${comment.title}**`;
|
|
22
|
+
// Add confidence badge
|
|
23
|
+
const confidencePercent = Math.round(comment.confidence * 100);
|
|
24
|
+
content += ` (confidence: ${confidencePercent}%)`;
|
|
25
|
+
content += `\n\n${comment.explanation}`;
|
|
26
|
+
if (comment.suggestion && comment.suggestion.trim().length > 0) {
|
|
27
|
+
content += `\n\n**Suggestion:**\n\`\`\`\n${comment.suggestion}\n\`\`\``;
|
|
28
|
+
}
|
|
29
|
+
return {
|
|
30
|
+
type: "inline",
|
|
31
|
+
path: comment.file,
|
|
32
|
+
line: comment.line,
|
|
33
|
+
content,
|
|
34
|
+
severity: severityMap[comment.severity],
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
function capitalize(str) {
|
|
38
|
+
return str.charAt(0).toUpperCase() + str.slice(1);
|
|
39
|
+
}
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$schema": "http://json-schema.org/draft-07/schema#",
|
|
3
|
+
"title": "StructuredReviewResponse",
|
|
4
|
+
"description": "Structured output format for code review responses",
|
|
5
|
+
"type": "object",
|
|
6
|
+
"properties": {
|
|
7
|
+
"summary": {
|
|
8
|
+
"type": "string",
|
|
9
|
+
"description": "Optional overall summary of the review"
|
|
10
|
+
},
|
|
11
|
+
"comments": {
|
|
12
|
+
"type": "array",
|
|
13
|
+
"description": "List of review comments",
|
|
14
|
+
"items": {
|
|
15
|
+
"$ref": "#/definitions/StructuredComment"
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
},
|
|
19
|
+
"required": ["summary", "comments"],
|
|
20
|
+
"additionalProperties": false,
|
|
21
|
+
"definitions": {
|
|
22
|
+
"StructuredComment": {
|
|
23
|
+
"type": "object",
|
|
24
|
+
"properties": {
|
|
25
|
+
"file": {
|
|
26
|
+
"type": "string",
|
|
27
|
+
"description": "Path to the file being commented on"
|
|
28
|
+
},
|
|
29
|
+
"line": {
|
|
30
|
+
"type": "integer",
|
|
31
|
+
"description": "Line number in the file (1-based)",
|
|
32
|
+
"minimum": 1
|
|
33
|
+
},
|
|
34
|
+
"severity": {
|
|
35
|
+
"type": "string",
|
|
36
|
+
"enum": ["critical", "warning", "suggestion", "nitpick"],
|
|
37
|
+
"description": "Severity level of the issue"
|
|
38
|
+
},
|
|
39
|
+
"category": {
|
|
40
|
+
"type": "string",
|
|
41
|
+
"enum": ["bug", "security", "performance", "style", "architecture", "testing"],
|
|
42
|
+
"description": "Category of the issue"
|
|
43
|
+
},
|
|
44
|
+
"confidence": {
|
|
45
|
+
"type": "number",
|
|
46
|
+
"description": "Confidence score from 0.0 to 1.0",
|
|
47
|
+
"minimum": 0,
|
|
48
|
+
"maximum": 1
|
|
49
|
+
},
|
|
50
|
+
"title": {
|
|
51
|
+
"type": "string",
|
|
52
|
+
"description": "One-line summary (max 80 characters)",
|
|
53
|
+
"maxLength": 80
|
|
54
|
+
},
|
|
55
|
+
"explanation": {
|
|
56
|
+
"type": "string",
|
|
57
|
+
"description": "Detailed explanation of the issue"
|
|
58
|
+
},
|
|
59
|
+
"suggestion": {
|
|
60
|
+
"type": "string",
|
|
61
|
+
"description": "Suggested code fix (optional)"
|
|
62
|
+
}
|
|
63
|
+
},
|
|
64
|
+
"required": ["file", "line", "severity", "category", "confidence", "title", "explanation", "suggestion"],
|
|
65
|
+
"additionalProperties": false
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import type { StructuredReviewResponse } from "./review-response";
|
|
2
|
+
/**
|
|
3
|
+
* Simple validation for structured review response
|
|
4
|
+
* Note: OpenAI's JSON Schema response_format already validates the structure,
|
|
5
|
+
* but we add runtime validation for safety and to handle any edge cases.
|
|
6
|
+
*/
|
|
7
|
+
export interface ValidationResult {
|
|
8
|
+
valid: boolean;
|
|
9
|
+
errors: string[];
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Validate a structured review response
|
|
13
|
+
*/
|
|
14
|
+
export declare function validateStructuredResponse(data: unknown): ValidationResult;
|
|
15
|
+
/**
|
|
16
|
+
* Parse and validate a JSON string into a structured response
|
|
17
|
+
*/
|
|
18
|
+
export declare function parseStructuredResponse(json: string): {
|
|
19
|
+
success: boolean;
|
|
20
|
+
data?: StructuredReviewResponse;
|
|
21
|
+
error?: string;
|
|
22
|
+
};
|
|
23
|
+
/**
|
|
24
|
+
* Check if a response looks like it might be structured JSON
|
|
25
|
+
* (starts with { and contains expected fields)
|
|
26
|
+
*/
|
|
27
|
+
export declare function looksLikeStructuredResponse(response: string): boolean;
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.validateStructuredResponse = validateStructuredResponse;
|
|
4
|
+
exports.parseStructuredResponse = parseStructuredResponse;
|
|
5
|
+
exports.looksLikeStructuredResponse = looksLikeStructuredResponse;
|
|
6
|
+
/**
|
|
7
|
+
* Validate a structured review response
|
|
8
|
+
*/
|
|
9
|
+
function validateStructuredResponse(data) {
|
|
10
|
+
const errors = [];
|
|
11
|
+
if (typeof data !== "object" || data === null) {
|
|
12
|
+
return { valid: false, errors: ["Response must be an object"] };
|
|
13
|
+
}
|
|
14
|
+
const response = data;
|
|
15
|
+
// Check comments array exists
|
|
16
|
+
if (!Array.isArray(response.comments)) {
|
|
17
|
+
errors.push("'comments' must be an array");
|
|
18
|
+
return { valid: false, errors };
|
|
19
|
+
}
|
|
20
|
+
// Validate each comment
|
|
21
|
+
const validSeverities = ["critical", "warning", "suggestion", "nitpick"];
|
|
22
|
+
const validCategories = ["bug", "security", "performance", "style", "architecture", "testing"];
|
|
23
|
+
for (let i = 0; i < response.comments.length; i++) {
|
|
24
|
+
const comment = response.comments[i];
|
|
25
|
+
const prefix = `comments[${i}]`;
|
|
26
|
+
if (typeof comment !== "object" || comment === null) {
|
|
27
|
+
errors.push(`${prefix} must be an object`);
|
|
28
|
+
continue;
|
|
29
|
+
}
|
|
30
|
+
const c = comment;
|
|
31
|
+
// Required fields
|
|
32
|
+
if (typeof c.file !== "string" || c.file.length === 0) {
|
|
33
|
+
errors.push(`${prefix}.file is required and must be a non-empty string`);
|
|
34
|
+
}
|
|
35
|
+
if (typeof c.line !== "number" || !Number.isInteger(c.line) || c.line < 1) {
|
|
36
|
+
errors.push(`${prefix}.line must be a positive integer`);
|
|
37
|
+
}
|
|
38
|
+
if (!validSeverities.includes(c.severity)) {
|
|
39
|
+
errors.push(`${prefix}.severity must be one of: ${validSeverities.join(", ")}`);
|
|
40
|
+
}
|
|
41
|
+
if (!validCategories.includes(c.category)) {
|
|
42
|
+
errors.push(`${prefix}.category must be one of: ${validCategories.join(", ")}`);
|
|
43
|
+
}
|
|
44
|
+
if (typeof c.confidence !== "number" || c.confidence < 0 || c.confidence > 1) {
|
|
45
|
+
errors.push(`${prefix}.confidence must be a number between 0 and 1`);
|
|
46
|
+
}
|
|
47
|
+
if (typeof c.title !== "string" || c.title.length === 0) {
|
|
48
|
+
errors.push(`${prefix}.title is required and must be a non-empty string`);
|
|
49
|
+
}
|
|
50
|
+
if (typeof c.explanation !== "string" || c.explanation.length === 0) {
|
|
51
|
+
errors.push(`${prefix}.explanation is required and must be a non-empty string`);
|
|
52
|
+
}
|
|
53
|
+
// Required fields (OpenAI strict mode requires all properties)
|
|
54
|
+
if (typeof c.suggestion !== "string") {
|
|
55
|
+
errors.push(`${prefix}.suggestion is required and must be a string (can be empty)`);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
// Check summary (required but can be empty)
|
|
59
|
+
if (typeof response.summary !== "string") {
|
|
60
|
+
errors.push("'summary' is required and must be a string (can be empty)");
|
|
61
|
+
}
|
|
62
|
+
return {
|
|
63
|
+
valid: errors.length === 0,
|
|
64
|
+
errors,
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
/**
|
|
68
|
+
* Parse and validate a JSON string into a structured response
|
|
69
|
+
*/
|
|
70
|
+
function parseStructuredResponse(json) {
|
|
71
|
+
try {
|
|
72
|
+
const parsed = JSON.parse(json);
|
|
73
|
+
const validation = validateStructuredResponse(parsed);
|
|
74
|
+
if (!validation.valid) {
|
|
75
|
+
return {
|
|
76
|
+
success: false,
|
|
77
|
+
error: `Validation failed: ${validation.errors.join("; ")}`,
|
|
78
|
+
};
|
|
79
|
+
}
|
|
80
|
+
return {
|
|
81
|
+
success: true,
|
|
82
|
+
data: parsed,
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
catch (e) {
|
|
86
|
+
return {
|
|
87
|
+
success: false,
|
|
88
|
+
error: `JSON parse error: ${e instanceof Error ? e.message : String(e)}`,
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
/**
|
|
93
|
+
* Check if a response looks like it might be structured JSON
|
|
94
|
+
* (starts with { and contains expected fields)
|
|
95
|
+
*/
|
|
96
|
+
function looksLikeStructuredResponse(response) {
|
|
97
|
+
const trimmed = response.trim();
|
|
98
|
+
if (!trimmed.startsWith("{")) {
|
|
99
|
+
return false;
|
|
100
|
+
}
|
|
101
|
+
try {
|
|
102
|
+
const parsed = JSON.parse(trimmed);
|
|
103
|
+
return Array.isArray(parsed.comments);
|
|
104
|
+
}
|
|
105
|
+
catch {
|
|
106
|
+
return false;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|