@klitchevo/code-council 0.2.3 → 0.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -911,30 +911,56 @@ var SEVERITY_DESCRIPTIONS = {
911
911
  low: "Nice to fix - minor improvements, style issues, small optimizations",
912
912
  info: "Informational - suggestions, best practices, educational notes"
913
913
  };
914
- var EXTRACTION_SYSTEM_PROMPT = `You are a precise extraction system. Your task is to parse a code review and extract individual findings into a structured JSON format.
914
+ var EXTRACTION_SYSTEM_PROMPT = `You are a precise extraction system. Your task is to parse a code review and extract individual findings into a structured YAML format.
915
915
 
916
916
  ## Output Format
917
917
 
918
- You MUST respond with valid JSON matching this schema:
919
- {
920
- "findings": [
921
- {
922
- "category": "security" | "performance" | "bug" | "maintainability" | "accessibility" | "architecture" | "style" | "documentation" | "testing" | "other",
923
- "severity": "critical" | "high" | "medium" | "low" | "info",
924
- "title": "Short, descriptive title",
925
- "description": "Detailed explanation of the issue",
926
- "location": {
927
- "file": "path/to/file.ts",
928
- "line": 42,
929
- "endLine": 45
930
- },
931
- "suggestion": "How to fix or improve",
932
- "suggestedCode": "The actual corrected code that should replace the problematic code (if applicable)",
933
- "rawExcerpt": "The original text from the review that describes this finding",
934
- "confidence": 0.95
935
- }
936
- ]
937
- }
918
+ You MUST respond with valid YAML matching this schema. Use block scalars (|) for multi-line text:
919
+
920
+ \`\`\`yaml
921
+ findings:
922
+ - category: security
923
+ severity: high
924
+ title: |
925
+ Short, descriptive title
926
+ description: |
927
+ Detailed explanation of the issue.
928
+ location:
929
+ file: path/to/file.ts
930
+ line: 42
931
+ endLine: 45
932
+ suggestion: |
933
+ How to fix (text explanation)
934
+ suggestedCode: |
935
+ // The actual fixed code that should replace the buggy code
936
+ const sanitized = escapeHtml(userInput);
937
+ return sanitized;
938
+ confidence: 0.95
939
+ \`\`\`
940
+
941
+ ## CRITICAL: suggestedCode Field
942
+
943
+ The \`suggestedCode\` field is ESSENTIAL for GitHub's "Apply suggestion" feature. When the review mentions a code fix:
944
+
945
+ 1. **Extract the actual code** that should replace the buggy code
946
+ 2. Put it in \`suggestedCode\` as a block scalar (|)
947
+ 3. This becomes a clickable "Apply suggestion" button on GitHub
948
+
949
+ Example - if reviewer says "change \`exec(userInput)\` to \`execFile('ls', [userInput])\`":
950
+ \`\`\`yaml
951
+ suggestedCode: |
952
+ execFile('ls', [userInput], (error, stdout) => {
953
+ console.log(stdout);
954
+ });
955
+ \`\`\`
956
+
957
+ If no specific code fix is mentioned, omit \`suggestedCode\` entirely.
958
+
959
+ ## Available Values
960
+
961
+ **Categories**: security, performance, bug, maintainability, accessibility, architecture, style, documentation, testing, other
962
+
963
+ **Severities**: critical, high, medium, low, info
938
964
 
939
965
  ## Category Definitions
940
966
 
@@ -947,8 +973,8 @@ ${Object.entries(SEVERITY_DESCRIPTIONS).map(([sev, desc]) => `- **${sev}**: ${de
947
973
  ## Extraction Rules
948
974
 
949
975
  1. **Be thorough**: Extract ALL distinct issues mentioned in the review
950
- 2. **Be precise**: Use exact quotes for rawExcerpt when possible
951
- 3. **Infer location**: If file paths or line numbers are mentioned, include them
976
+ 2. **Infer location**: If file paths or line numbers are mentioned, include them
977
+ 3. **Extract code fixes**: If the reviewer suggests specific code changes, put them in \`suggestedCode\`
952
978
  4. **Normalize severity**: Map vague language to specific severity levels:
953
979
  - "critical", "severe", "urgent", "must fix" \u2192 critical
954
980
  - "important", "significant", "should fix" \u2192 high
@@ -956,15 +982,17 @@ ${Object.entries(SEVERITY_DESCRIPTIONS).map(([sev, desc]) => `- **${sev}**: ${de
956
982
  - "minor", "nitpick", "suggestion" \u2192 low/info
957
983
  5. **Set confidence**: Higher (0.8-1.0) for clear, explicit issues; lower (0.5-0.7) for inferred or ambiguous ones
958
984
  6. **Don't duplicate**: Each distinct issue should appear once
959
- 7. **Handle empty reviews**: If no issues found, return {"findings": []}
960
- 8. **Include code fixes**: When a fix involves specific code changes, provide the actual corrected code in the suggestedCode field (not explanation, just the code)
985
+ 7. **Handle empty reviews**: If no issues found, return \`findings: []\`
961
986
 
962
987
  ## Important
963
988
 
964
989
  - DO NOT add findings not present in the review
965
990
  - DO NOT interpret or expand on the reviewer's points
966
- - If the review is empty or only contains praise, return an empty findings array
967
- - Always respond with valid JSON - no markdown code blocks, no explanatory text`;
991
+ - If the review is empty or only contains praise, return empty findings array
992
+ - Always respond with valid YAML
993
+ - Use block scalars (|) for any text that might contain special characters
994
+ - The location.line field is important - always include it when mentioned in the review
995
+ - The suggestedCode field enables GitHub's "Apply suggestion" button - ALWAYS include it when a code fix is provided`;
968
996
  function buildExtractionUserMessage(reviewText, modelName) {
969
997
  return `Extract all findings from this code review by ${modelName}:
970
998
 
@@ -972,7 +1000,7 @@ function buildExtractionUserMessage(reviewText, modelName) {
972
1000
  ${reviewText}
973
1001
  ---
974
1002
 
975
- Respond with ONLY valid JSON. Do not include markdown code blocks or any other text.`;
1003
+ Respond with ONLY valid YAML. Use block scalars (|) for multi-line text. No markdown code blocks or explanatory text.`;
976
1004
  }
977
1005
 
978
1006
  // src/schemas/consensus.ts
@@ -1024,12 +1052,15 @@ var OUTPUT_FORMATS = [
1024
1052
  var DEFAULT_CONSENSUS_CONFIG = {
1025
1053
  enabled: false,
1026
1054
  modelWeights: {},
1027
- highConfidenceThreshold: 0.8,
1028
- moderateConfidenceThreshold: 0.5,
1055
+ highConfidenceThreshold: 0.7,
1056
+ // 4+ models for high (was 0.8)
1057
+ moderateConfidenceThreshold: 0.33,
1058
+ // 2+ models for moderate (was 0.5)
1029
1059
  extractionModel: "anthropic/claude-3-haiku",
1030
1060
  fallbackOnError: true,
1031
1061
  lineProximity: 5,
1032
- similarityThreshold: 0.7,
1062
+ similarityThreshold: 0.5,
1063
+ // Allow more clustering (was 0.7)
1033
1064
  hostExtraction: true
1034
1065
  // Recommended: let MCP host model do extraction
1035
1066
  };
@@ -1049,25 +1080,41 @@ var FindingSchema = z2.object({
1049
1080
  description: z2.string().min(1, "Description cannot be empty"),
1050
1081
  location: CodeLocationSchema.optional(),
1051
1082
  suggestion: z2.string().optional(),
1052
- rawExcerpt: z2.string(),
1083
+ rawExcerpt: z2.string().optional(),
1084
+ // Made optional - not all extractions include this
1053
1085
  extractedAt: z2.string().datetime(),
1054
1086
  confidence: z2.number().min(0).max(1).optional()
1055
1087
  });
1088
+ var severitySchema = z2.string().transform((val) => val.toLowerCase()).pipe(z2.enum(FINDING_SEVERITIES));
1089
+ var categorySchema = z2.string().transform((val) => val.toLowerCase()).transform((val) => {
1090
+ if (!FINDING_CATEGORIES.includes(val)) {
1091
+ return "other";
1092
+ }
1093
+ return val;
1094
+ }).pipe(z2.enum(FINDING_CATEGORIES));
1095
+ var lineNumberSchema = z2.union([z2.number(), z2.string()]).transform((val) => {
1096
+ if (typeof val === "string") {
1097
+ const parsed = Number.parseInt(val, 10);
1098
+ return Number.isNaN(parsed) ? void 0 : parsed;
1099
+ }
1100
+ return val;
1101
+ }).pipe(z2.number().int().nonnegative().optional()).nullish();
1056
1102
  var ExtractionResponseSchema = z2.object({
1057
1103
  findings: z2.array(
1058
1104
  z2.object({
1059
- category: z2.enum(FINDING_CATEGORIES),
1060
- severity: z2.enum(FINDING_SEVERITIES),
1105
+ category: categorySchema,
1106
+ severity: severitySchema,
1061
1107
  title: z2.string(),
1062
1108
  description: z2.string(),
1063
1109
  location: z2.object({
1064
- file: z2.string(),
1065
- line: z2.number().nullish(),
1066
- endLine: z2.number().nullish()
1110
+ file: z2.string().nullish(),
1111
+ // Allow null - some models return null for general findings
1112
+ line: lineNumberSchema,
1113
+ endLine: lineNumberSchema
1067
1114
  }).nullish(),
1068
1115
  suggestion: z2.string().nullish(),
1069
1116
  suggestedCode: z2.string().nullish(),
1070
- rawExcerpt: z2.string(),
1117
+ rawExcerpt: z2.string().nullish(),
1071
1118
  confidence: z2.number().min(0).max(1).nullish()
1072
1119
  })
1073
1120
  )
@@ -1145,7 +1192,270 @@ var ConsensusOptionsSchema = z2.object({
1145
1192
  outputFormat: z2.enum(OUTPUT_FORMATS).optional()
1146
1193
  });
1147
1194
 
1195
+ // src/consensus/yaml-parser.ts
1196
+ init_esm_shims();
1197
+ import yaml from "js-yaml";
1198
+ function stripMarkdownCodeBlocks(text) {
1199
+ let result = text.trim();
1200
+ if (result.startsWith("```yaml")) {
1201
+ result = result.slice(7);
1202
+ } else if (result.startsWith("```json")) {
1203
+ result = result.slice(7);
1204
+ } else if (result.startsWith("```")) {
1205
+ result = result.slice(3);
1206
+ }
1207
+ if (result.endsWith("```")) {
1208
+ result = result.slice(0, -3);
1209
+ }
1210
+ return result.trim();
1211
+ }
1212
+ function addBlockScalarIndicators(text) {
1213
+ return text.replace(
1214
+ /^(\s*)(\w+):\s*"([^"]*\\[^"]*)"$/gm,
1215
+ (_, indent, key, value) => {
1216
+ const unescaped = value.replace(/\\n/g, "\n").replace(/\\t/g, " ").replace(/\\"/g, '"').replace(/\\\\/g, "\\");
1217
+ return `${indent}${key}: |
1218
+ ${indent} ${unescaped.replace(/\n/g, `
1219
+ ${indent} `)}`;
1220
+ }
1221
+ );
1222
+ }
1223
+ function fixBlockScalarIndent(text) {
1224
+ return text.replace(/\|(?!\d)/g, "|2");
1225
+ }
1226
+ function extractYamlFromMarkers(text, firstKey, lastKey) {
1227
+ const firstKeyIndex = text.indexOf(`${firstKey}:`);
1228
+ if (firstKeyIndex === -1) return null;
1229
+ let endIndex = text.length;
1230
+ if (lastKey) {
1231
+ const lastKeyIndex = text.lastIndexOf(`${lastKey}:`);
1232
+ if (lastKeyIndex !== -1) {
1233
+ const afterLastKey = text.slice(lastKeyIndex);
1234
+ const nextKeyMatch = afterLastKey.match(/\n[a-zA-Z_]/);
1235
+ if (nextKeyMatch?.index) {
1236
+ endIndex = lastKeyIndex + nextKeyMatch.index;
1237
+ }
1238
+ }
1239
+ }
1240
+ return text.slice(firstKeyIndex, endIndex).trim();
1241
+ }
1242
+ function removeCurlyBrackets(text) {
1243
+ let result = text.trim();
1244
+ if (result.startsWith("{") && result.endsWith("}")) {
1245
+ result = result.slice(1, -1).trim();
1246
+ }
1247
+ return result;
1248
+ }
1249
+ function removeLeadingPlusSymbols(text) {
1250
+ return text.replace(/^\+\s*/gm, "");
1251
+ }
1252
+ function extractFromFindingsKeyword(text) {
1253
+ const stripped = stripMarkdownCodeBlocks(text);
1254
+ const findingsIndex = stripped.indexOf("findings:");
1255
+ if (findingsIndex === -1) {
1256
+ return stripped;
1257
+ }
1258
+ return stripped.slice(findingsIndex).trim();
1259
+ }
1260
+ function removeDuplicateKeys(text) {
1261
+ const lines = text.split("\n");
1262
+ const result = [];
1263
+ const seenKeys = /* @__PURE__ */ new Map();
1264
+ for (const line of lines) {
1265
+ const match = line.match(/^(\s*)(\w+):/);
1266
+ if (match) {
1267
+ const indent = match[1]?.length ?? 0;
1268
+ const key = match[2] ?? "";
1269
+ for (const [level] of seenKeys) {
1270
+ if (level > indent) {
1271
+ seenKeys.delete(level);
1272
+ }
1273
+ }
1274
+ const keysAtLevel = seenKeys.get(indent) ?? /* @__PURE__ */ new Set();
1275
+ if (keysAtLevel.has(key)) {
1276
+ continue;
1277
+ }
1278
+ keysAtLevel.add(key);
1279
+ seenKeys.set(indent, keysAtLevel);
1280
+ }
1281
+ result.push(line);
1282
+ }
1283
+ return result.join("\n");
1284
+ }
1285
+ function replaceTabsWithSpaces(text) {
1286
+ return text.replace(/\t/g, " ");
1287
+ }
1288
+ function tryFixYamlIssues(text) {
1289
+ let result = text;
1290
+ result = result.replace(
1291
+ /^(\s*)(\w+):\s+([^|\n]*:[^\n]*)$/gm,
1292
+ (_, indent, key, value) => {
1293
+ const trimmedValue = value.trim();
1294
+ if (trimmedValue.startsWith('"') || trimmedValue.startsWith("'") || trimmedValue.startsWith("|") || trimmedValue.startsWith(">") || trimmedValue.startsWith("-") || trimmedValue.startsWith("[") || trimmedValue.startsWith("{")) {
1295
+ return `${indent}${key}: ${value}`;
1296
+ }
1297
+ return `${indent}${key}: "${trimmedValue.replace(/"/g, '\\"')}"`;
1298
+ }
1299
+ );
1300
+ return result;
1301
+ }
1302
+ function attemptParse(text, strategyName) {
1303
+ try {
1304
+ const data = yaml.load(text);
1305
+ if (data !== null && data !== void 0) {
1306
+ return { success: true, data, strategy: strategyName };
1307
+ }
1308
+ return { success: false, error: "Parsed to null/undefined" };
1309
+ } catch (error) {
1310
+ const message = error instanceof Error ? error.message : "Unknown error";
1311
+ return { success: false, error: message };
1312
+ }
1313
+ }
1314
+ function attemptJsonParse(text) {
1315
+ try {
1316
+ const data = JSON.parse(text);
1317
+ return { success: true, data, strategy: "json-fallback" };
1318
+ } catch (error) {
1319
+ const message = error instanceof Error ? error.message : "Unknown error";
1320
+ return { success: false, error: message };
1321
+ }
1322
+ }
1323
+ function parseYamlWithFallbacks(text, firstKey, lastKey) {
1324
+ const strategies = [
1325
+ // Try simple approaches first (least likely to break valid YAML)
1326
+ { name: "raw", transform: (t) => t },
1327
+ { name: "strip-markdown", transform: stripMarkdownCodeBlocks },
1328
+ {
1329
+ name: "extract-findings-keyword",
1330
+ transform: extractFromFindingsKeyword
1331
+ },
1332
+ {
1333
+ name: "strip-markdown+tabs",
1334
+ transform: (t) => replaceTabsWithSpaces(stripMarkdownCodeBlocks(t))
1335
+ },
1336
+ {
1337
+ name: "remove-duplicate-keys",
1338
+ transform: (t) => removeDuplicateKeys(stripMarkdownCodeBlocks(t))
1339
+ },
1340
+ {
1341
+ name: "extract-findings+dedup",
1342
+ transform: (t) => removeDuplicateKeys(extractFromFindingsKeyword(t))
1343
+ },
1344
+ {
1345
+ name: "extract-findings+dedup+tabs",
1346
+ transform: (t) => replaceTabsWithSpaces(
1347
+ removeDuplicateKeys(extractFromFindingsKeyword(t))
1348
+ )
1349
+ },
1350
+ // Then try more aggressive fixes
1351
+ {
1352
+ name: "strip-markdown+fix-issues",
1353
+ transform: (t) => tryFixYamlIssues(stripMarkdownCodeBlocks(t))
1354
+ },
1355
+ {
1356
+ name: "block-scalars",
1357
+ transform: (t) => addBlockScalarIndicators(stripMarkdownCodeBlocks(t))
1358
+ },
1359
+ {
1360
+ name: "fix-indent",
1361
+ transform: (t) => fixBlockScalarIndent(stripMarkdownCodeBlocks(t))
1362
+ },
1363
+ {
1364
+ name: "remove-curly",
1365
+ transform: (t) => removeCurlyBrackets(stripMarkdownCodeBlocks(t))
1366
+ },
1367
+ {
1368
+ name: "remove-plus",
1369
+ transform: (t) => removeLeadingPlusSymbols(stripMarkdownCodeBlocks(t))
1370
+ },
1371
+ {
1372
+ name: "tabs-to-spaces",
1373
+ transform: (t) => replaceTabsWithSpaces(stripMarkdownCodeBlocks(t))
1374
+ },
1375
+ {
1376
+ name: "combined-fixes",
1377
+ transform: (t) => tryFixYamlIssues(
1378
+ replaceTabsWithSpaces(
1379
+ removeLeadingPlusSymbols(
1380
+ removeCurlyBrackets(stripMarkdownCodeBlocks(t))
1381
+ )
1382
+ )
1383
+ )
1384
+ },
1385
+ {
1386
+ name: "extract-findings-keyword+fixes",
1387
+ transform: (t) => tryFixYamlIssues(
1388
+ replaceTabsWithSpaces(extractFromFindingsKeyword(t))
1389
+ )
1390
+ },
1391
+ {
1392
+ name: "remove-duplicate-keys+all-fixes",
1393
+ transform: (t) => tryFixYamlIssues(
1394
+ replaceTabsWithSpaces(
1395
+ removeDuplicateKeys(extractFromFindingsKeyword(t))
1396
+ )
1397
+ )
1398
+ }
1399
+ ];
1400
+ if (firstKey) {
1401
+ strategies.push({
1402
+ name: "extract-by-markers",
1403
+ transform: (t) => {
1404
+ const extracted = extractYamlFromMarkers(
1405
+ stripMarkdownCodeBlocks(t),
1406
+ firstKey,
1407
+ lastKey
1408
+ );
1409
+ return extracted ?? t;
1410
+ }
1411
+ });
1412
+ strategies.push({
1413
+ name: "extract-by-markers+dedup",
1414
+ transform: (t) => {
1415
+ const extracted = extractYamlFromMarkers(
1416
+ stripMarkdownCodeBlocks(t),
1417
+ firstKey,
1418
+ lastKey
1419
+ );
1420
+ return removeDuplicateKeys(extracted ?? t);
1421
+ }
1422
+ });
1423
+ }
1424
+ const errors = [];
1425
+ for (const strategy of strategies) {
1426
+ const transformed = strategy.transform(text);
1427
+ const result = attemptParse(transformed, strategy.name);
1428
+ if (result.success) {
1429
+ logger.debug("YAML parsed successfully", { strategy: strategy.name });
1430
+ return result.data;
1431
+ }
1432
+ errors.push(`${strategy.name}: ${result.error}`);
1433
+ }
1434
+ const strippedText = stripMarkdownCodeBlocks(text);
1435
+ const jsonResult = attemptJsonParse(strippedText);
1436
+ if (jsonResult.success) {
1437
+ logger.debug("Fell back to JSON parsing");
1438
+ return jsonResult.data;
1439
+ }
1440
+ errors.push(`json-fallback: ${jsonResult.error}`);
1441
+ const errorSummary = errors.slice(-3).join("; ");
1442
+ const truncatedInput = text.length > 200 ? `${text.slice(0, 200)}...` : text;
1443
+ logger.error("YAML/JSON parsing failed", {
1444
+ inputLength: text.length,
1445
+ inputPreview: truncatedInput,
1446
+ strategiesTried: strategies.length + 1,
1447
+ lastErrors: errors.slice(-3)
1448
+ });
1449
+ throw new Error(
1450
+ `Failed to parse YAML/JSON after ${strategies.length + 1} attempts. Last errors: ${errorSummary}`
1451
+ );
1452
+ }
1453
+ function parseExtractionYaml(text) {
1454
+ return parseYamlWithFallbacks(text, "findings");
1455
+ }
1456
+
1148
1457
  // src/consensus/extractor.ts
1458
+ var MIN_CONFIDENCE_THRESHOLD = 0.3;
1149
1459
  function generateFindingId() {
1150
1460
  const timestamp = Date.now().toString(36);
1151
1461
  const random = Math.random().toString(36).substring(2, 8);
@@ -1153,25 +1463,40 @@ function generateFindingId() {
1153
1463
  }
1154
1464
  function repairJson(jsonStr) {
1155
1465
  let repaired = jsonStr;
1156
- repaired = repaired.replace(/,(\s*[}\]])/g, "$1");
1157
- repaired = repaired.replace(/"([^"]*(?:\\"[^"]*)*)"/g, (match) => {
1158
- return match.replace(/\n/g, "\\n").replace(/\r/g, "\\r").replace(/\t/g, "\\t");
1466
+ repaired = repaired.replace(/\\'/g, "'");
1467
+ repaired = repaired.replace(/\\`/g, "`");
1468
+ repaired = repaired.replace(/"([^"]*?)"/g, (_, content) => {
1469
+ const fixed = content.replace(/\n/g, "\\n").replace(/\r/g, "\\r").replace(/\t/g, "\\t");
1470
+ return `"${fixed}"`;
1159
1471
  });
1472
+ repaired = repaired.replace(/,(\s*[}\]])/g, "$1");
1160
1473
  return repaired;
1161
1474
  }
1162
1475
  function parseExtractionResponse(responseText, sourceModel) {
1163
- let jsonStr = responseText.trim();
1164
- if (jsonStr.startsWith("```json")) {
1165
- jsonStr = jsonStr.slice(7);
1166
- } else if (jsonStr.startsWith("```")) {
1167
- jsonStr = jsonStr.slice(3);
1168
- }
1169
- if (jsonStr.endsWith("```")) {
1170
- jsonStr = jsonStr.slice(0, -3);
1171
- }
1172
- jsonStr = jsonStr.trim();
1173
- jsonStr = repairJson(jsonStr);
1174
- const raw = JSON.parse(jsonStr);
1476
+ let raw;
1477
+ try {
1478
+ raw = parseExtractionYaml(responseText);
1479
+ logger.debug("Parsed extraction response via YAML parser", { sourceModel });
1480
+ } catch (yamlError) {
1481
+ logger.debug("YAML parsing failed, trying JSON repair", {
1482
+ sourceModel,
1483
+ error: yamlError instanceof Error ? yamlError.message : "Unknown"
1484
+ });
1485
+ let jsonStr = responseText.trim();
1486
+ if (jsonStr.startsWith("```json")) {
1487
+ jsonStr = jsonStr.slice(7);
1488
+ } else if (jsonStr.startsWith("```yaml")) {
1489
+ jsonStr = jsonStr.slice(7);
1490
+ } else if (jsonStr.startsWith("```")) {
1491
+ jsonStr = jsonStr.slice(3);
1492
+ }
1493
+ if (jsonStr.endsWith("```")) {
1494
+ jsonStr = jsonStr.slice(0, -3);
1495
+ }
1496
+ jsonStr = jsonStr.trim();
1497
+ jsonStr = repairJson(jsonStr);
1498
+ raw = JSON.parse(jsonStr);
1499
+ }
1175
1500
  const parsed = ExtractionResponseSchema.parse(raw);
1176
1501
  const now = (/* @__PURE__ */ new Date()).toISOString();
1177
1502
  return parsed.findings.map(
@@ -1182,14 +1507,14 @@ function parseExtractionResponse(responseText, sourceModel) {
1182
1507
  severity: f.severity,
1183
1508
  title: f.title,
1184
1509
  description: f.description,
1185
- location: f.location ? {
1510
+ location: f.location?.file ? {
1186
1511
  file: f.location.file,
1187
1512
  line: f.location.line ?? void 0,
1188
1513
  endLine: f.location.endLine ?? void 0
1189
1514
  } : void 0,
1190
1515
  suggestion: f.suggestion ?? void 0,
1191
1516
  suggestedCode: f.suggestedCode ?? void 0,
1192
- rawExcerpt: f.rawExcerpt,
1517
+ rawExcerpt: f.rawExcerpt ?? void 0,
1193
1518
  extractedAt: now,
1194
1519
  confidence: f.confidence ?? void 0
1195
1520
  })
@@ -1219,7 +1544,21 @@ async function extractFindings(reviewText, sourceModel, chatFn) {
1219
1544
  EXTRACTION_SYSTEM_PROMPT,
1220
1545
  userMessage
1221
1546
  );
1222
- const findings = parseExtractionResponse(response, sourceModel);
1547
+ const allFindings = parseExtractionResponse(response, sourceModel);
1548
+ const findings = allFindings.filter((f) => {
1549
+ if (f.confidence === void 0) {
1550
+ return true;
1551
+ }
1552
+ return f.confidence >= MIN_CONFIDENCE_THRESHOLD;
1553
+ });
1554
+ const filteredCount = allFindings.length - findings.length;
1555
+ if (filteredCount > 0) {
1556
+ logger.debug("Filtered low-confidence findings", {
1557
+ sourceModel,
1558
+ filtered: filteredCount,
1559
+ threshold: MIN_CONFIDENCE_THRESHOLD
1560
+ });
1561
+ }
1223
1562
  logger.debug("Extracted findings", {
1224
1563
  sourceModel,
1225
1564
  count: findings.length
@@ -1400,11 +1739,11 @@ function normalizeLocation(location) {
1400
1739
  const file = normalizeFilePath(location.file);
1401
1740
  let line = location.line;
1402
1741
  let endLine = location.endLine;
1403
- if (line !== void 0 && line < 1) {
1742
+ if (line !== void 0 && line < 0) {
1404
1743
  line = void 0;
1405
1744
  }
1406
1745
  if (endLine !== void 0) {
1407
- if (endLine < 1) {
1746
+ if (endLine < 0) {
1408
1747
  endLine = void 0;
1409
1748
  } else if (line !== void 0 && endLine < line) {
1410
1749
  [line, endLine] = [endLine, line];
@@ -1427,7 +1766,7 @@ function normalizeFinding(finding) {
1427
1766
  description: finding.description.trim(),
1428
1767
  location: normalizeLocation(finding.location),
1429
1768
  suggestion: finding.suggestion?.trim(),
1430
- rawExcerpt: finding.rawExcerpt.trim(),
1769
+ rawExcerpt: finding.rawExcerpt?.trim(),
1431
1770
  confidence: finding.confidence !== void 0 ? Math.max(0, Math.min(1, finding.confidence)) : void 0
1432
1771
  };
1433
1772
  }
@@ -1580,8 +1919,10 @@ init_esm_shims();
1580
1919
  // src/consensus/clustering.ts
1581
1920
  init_esm_shims();
1582
1921
  var DEFAULT_CLUSTERING_CONFIG = {
1583
- lineProximity: 5,
1584
- similarityThreshold: 0.7
1922
+ lineProximity: 15,
1923
+ // More lenient - models often point to different lines for same issue
1924
+ similarityThreshold: 0.55
1925
+ // Lower threshold - models describe same issues differently
1585
1926
  };
1586
1927
  function generateClusterId() {
1587
1928
  const timestamp = Date.now().toString(36);
@@ -1625,9 +1966,54 @@ function createClusterTitle(findings) {
1625
1966
  }
1626
1967
  return titles[0] ?? "Unnamed finding";
1627
1968
  }
1969
+ var ISSUE_KEYWORDS = {
1970
+ sql_injection: ["sql", "injection", "sqli", "cwe-89", "query"],
1971
+ xss: ["xss", "cross-site", "scripting", "cwe-79", "sanitiz"],
1972
+ command_injection: ["command", "injection", "exec", "shell", "cwe-78", "rce"],
1973
+ hardcoded_secrets: [
1974
+ "hardcoded",
1975
+ "secret",
1976
+ "credential",
1977
+ "password",
1978
+ "api key",
1979
+ "cwe-798"
1980
+ ],
1981
+ auth_bypass: ["auth", "bypass", "backdoor", "authentication"],
1982
+ weak_crypto: ["crypto", "random", "math.random", "weak", "cwe-338"],
1983
+ info_disclosure: ["disclosure", "exposure", "logging", "cwe-532"],
1984
+ race_condition: ["race", "condition", "toctou", "concurrent"],
1985
+ memory_leak: ["memory", "leak", "unbounded", "cache"],
1986
+ missing_validation: ["validation", "sanitiz", "escape", "input"]
1987
+ };
1988
+ function extractIssueTypes(text) {
1989
+ const lower = text.toLowerCase();
1990
+ const types = /* @__PURE__ */ new Set();
1991
+ for (const [issueType, keywords] of Object.entries(ISSUE_KEYWORDS)) {
1992
+ for (const keyword of keywords) {
1993
+ if (lower.includes(keyword)) {
1994
+ types.add(issueType);
1995
+ break;
1996
+ }
1997
+ }
1998
+ }
1999
+ return types;
2000
+ }
1628
2001
  function textSimilarity(text1, text2) {
1629
2002
  const t1 = text1 ?? "";
1630
2003
  const t2 = text2 ?? "";
2004
+ const types1 = extractIssueTypes(t1);
2005
+ const types2 = extractIssueTypes(t2);
2006
+ if (types1.size > 0 && types2.size > 0) {
2007
+ let typeOverlap = 0;
2008
+ for (const type of types1) {
2009
+ if (types2.has(type)) {
2010
+ typeOverlap++;
2011
+ }
2012
+ }
2013
+ if (typeOverlap > 0) {
2014
+ return Math.min(1, 0.8 + typeOverlap * 0.05);
2015
+ }
2016
+ }
1631
2017
  const words1 = new Set(
1632
2018
  t1.toLowerCase().split(/\s+/).filter((w) => w.length > 2)
1633
2019
  );
@@ -1658,20 +2044,41 @@ function findingSimilarity(finding1, finding2, config = DEFAULT_CLUSTERING_CONFI
1658
2044
  finding2.location,
1659
2045
  config.lineProximity
1660
2046
  );
2047
+ const sameFile = finding1.location?.file && finding2.location?.file && finding1.location.file === finding2.location.file;
2048
+ const types1 = extractIssueTypes(`${finding1.title} ${finding1.description}`);
2049
+ const types2 = extractIssueTypes(`${finding2.title} ${finding2.description}`);
2050
+ let sharedIssueType = false;
2051
+ for (const type of types1) {
2052
+ if (types2.has(type)) {
2053
+ sharedIssueType = true;
2054
+ break;
2055
+ }
2056
+ }
1661
2057
  const titleSim = textSimilarity(finding1.title, finding2.title);
1662
2058
  const descSim = textSimilarity(finding1.description, finding2.description);
2059
+ if (sharedIssueType && sameFile) {
2060
+ return Math.min(1, 0.75 + titleSim * 0.15 + descSim * 0.1);
2061
+ }
2062
+ if (finding1.category === "security" && sameFile) {
2063
+ return Math.min(1, 0.6 + titleSim * 0.2 + descSim * 0.1);
2064
+ }
2065
+ if (sharedIssueType) {
2066
+ return Math.min(1, 0.5 + titleSim * 0.25 + descSim * 0.15);
2067
+ }
1663
2068
  let score = 0;
1664
- score += 0.2;
1665
- if (locationMatch) {
1666
- if (finding1.location?.line && finding2.location?.line) {
1667
- score += 0.4;
1668
- } else if (finding1.location?.file && finding2.location?.file) {
1669
- score += 0.2;
2069
+ score += 0.25;
2070
+ if (sameFile) {
2071
+ if (locationMatch) {
2072
+ score += 0.35;
2073
+ } else {
2074
+ score += 0.25;
1670
2075
  }
2076
+ } else if (locationMatch) {
2077
+ score += 0.15;
1671
2078
  }
1672
2079
  score += titleSim * 0.25;
1673
2080
  score += descSim * 0.15;
1674
- return Math.min(1, score);
2081
+ return Math.min(1, Math.max(0, score));
1675
2082
  }
1676
2083
  function shouldJoinCluster(finding, cluster, config) {
1677
2084
  for (const existing of cluster) {
@@ -1682,20 +2089,40 @@ function shouldJoinCluster(finding, cluster, config) {
1682
2089
  }
1683
2090
  return false;
1684
2091
  }
2092
+ function getClusterKey(finding) {
2093
+ const file = finding.location?.file ?? "no-file";
2094
+ const category = finding.category;
2095
+ return `${file}::${category}`;
2096
+ }
1685
2097
  function initialClustering(findings, config) {
1686
- const clusters = [];
2098
+ const preGroups = /* @__PURE__ */ new Map();
1687
2099
  for (const finding of findings) {
1688
- let joined = false;
1689
- for (const cluster of clusters) {
1690
- if (shouldJoinCluster(finding, cluster, config)) {
1691
- cluster.push(finding);
1692
- joined = true;
1693
- break;
1694
- }
2100
+ const key = getClusterKey(finding);
2101
+ const group = preGroups.get(key) ?? [];
2102
+ group.push(finding);
2103
+ preGroups.set(key, group);
2104
+ }
2105
+ const clusters = [];
2106
+ for (const group of preGroups.values()) {
2107
+ if (group.length === 1) {
2108
+ clusters.push(group);
2109
+ continue;
1695
2110
  }
1696
- if (!joined) {
1697
- clusters.push([finding]);
2111
+ const subClusters = [];
2112
+ for (const finding of group) {
2113
+ let joined = false;
2114
+ for (const subCluster of subClusters) {
2115
+ if (shouldJoinCluster(finding, subCluster, config)) {
2116
+ subCluster.push(finding);
2117
+ joined = true;
2118
+ break;
2119
+ }
2120
+ }
2121
+ if (!joined) {
2122
+ subClusters.push([finding]);
2123
+ }
1698
2124
  }
2125
+ clusters.push(...subClusters);
1699
2126
  }
1700
2127
  return clusters;
1701
2128
  }
@@ -1712,16 +2139,20 @@ function mergeSimilarClusters(clusters, config) {
1712
2139
  const cluster1 = merged[i];
1713
2140
  const cluster2 = merged[j];
1714
2141
  if (!cluster1 || !cluster2) continue;
2142
+ let maxSim = 0;
1715
2143
  let totalSim = 0;
1716
2144
  let pairs = 0;
1717
2145
  for (const f1 of cluster1) {
1718
2146
  for (const f2 of cluster2) {
1719
- totalSim += findingSimilarity(f1, f2, config);
2147
+ const sim = findingSimilarity(f1, f2, config);
2148
+ maxSim = Math.max(maxSim, sim);
2149
+ totalSim += sim;
1720
2150
  pairs++;
1721
2151
  }
1722
2152
  }
1723
2153
  const avgSim = pairs > 0 ? totalSim / pairs : 0;
1724
- if (avgSim >= config.similarityThreshold) {
2154
+ const shouldMerge = maxSim >= 0.75 || avgSim >= config.similarityThreshold;
2155
+ if (shouldMerge) {
1725
2156
  merged[i] = [...cluster1, ...cluster2];
1726
2157
  merged.splice(j, 1);
1727
2158
  changed = true;
@@ -2230,4 +2661,4 @@ export {
2230
2661
  gitReviewSchema,
2231
2662
  handleGitReview
2232
2663
  };
2233
- //# sourceMappingURL=chunk-ZDBY7AGR.js.map
2664
+ //# sourceMappingURL=chunk-H4YGXQ7Q.js.map
@@ -4,8 +4,9 @@ import {
4
4
  buildConsensusReport,
5
5
  formatForHostExtraction,
6
6
  handleGitReview,
7
- initializeConfig
8
- } from "./chunk-ZDBY7AGR.js";
7
+ initializeConfig,
8
+ logger
9
+ } from "./chunk-H4YGXQ7Q.js";
9
10
  import "./chunk-AEDZOTVA.js";
10
11
  import "./chunk-HVF7WG6A.js";
11
12
  import {
@@ -128,29 +129,77 @@ function normalizePath(path) {
128
129
  }
129
130
 
130
131
  // src/consensus/comment-mapper.ts
132
+ var DEFAULT_LINE_TOLERANCE = 20;
133
+ function findNearestChangedLine(changedLines, targetLine, tolerance = DEFAULT_LINE_TOLERANCE) {
134
+ if (changedLines.size === 0) {
135
+ return void 0;
136
+ }
137
+ if (changedLines.has(targetLine)) {
138
+ return targetLine;
139
+ }
140
+ let nearestLine;
141
+ let minDistance = tolerance + 1;
142
+ for (const line of changedLines) {
143
+ const distance = Math.abs(line - targetLine);
144
+ if (distance <= tolerance && distance < minDistance) {
145
+ minDistance = distance;
146
+ nearestLine = line;
147
+ }
148
+ }
149
+ return nearestLine;
150
+ }
151
+ function isWithinHunkBoundary(hunks, lineNumber) {
152
+ for (const hunk of hunks) {
153
+ const hunkEnd = hunk.newStart + hunk.newCount - 1;
154
+ if (lineNumber >= hunk.newStart && lineNumber <= hunkEnd) {
155
+ return true;
156
+ }
157
+ }
158
+ return false;
159
+ }
160
+ function getFirstChangedLine(changedLines) {
161
+ if (changedLines.size === 0) {
162
+ return 1;
163
+ }
164
+ let minLine = Number.POSITIVE_INFINITY;
165
+ for (const line of changedLines) {
166
+ if (line < minLine) {
167
+ minLine = line;
168
+ }
169
+ }
170
+ return minLine;
171
+ }
131
172
  function findMatchingDiffFile(diff, filePath) {
132
173
  const normalizedTarget = normalizePath(filePath);
133
174
  for (const file of diff.files) {
134
175
  const normalizedNew = normalizePath(file.newPath);
135
176
  const normalizedOld = normalizePath(file.oldPath);
136
177
  if (normalizedNew === normalizedTarget || normalizedOld === normalizedTarget) {
137
- return { path: file.newPath, changedLines: file.changedLines };
178
+ return {
179
+ path: file.newPath,
180
+ changedLines: file.changedLines,
181
+ hunks: file.hunks
182
+ };
138
183
  }
139
184
  const targetFilename = normalizedTarget.split("/").pop();
140
185
  const newFilename = normalizedNew.split("/").pop();
141
186
  if (targetFilename && newFilename && targetFilename === newFilename) {
142
187
  if (normalizedNew.endsWith(normalizedTarget) || normalizedTarget.endsWith(normalizedNew)) {
143
- return { path: file.newPath, changedLines: file.changedLines };
188
+ return {
189
+ path: file.newPath,
190
+ changedLines: file.changedLines,
191
+ hunks: file.hunks
192
+ };
144
193
  }
145
194
  }
146
195
  }
147
196
  return void 0;
148
197
  }
149
- function mapClustersToComments(clusters, diff) {
198
+ function mapClustersToComments(clusters, diff, lineTolerance = DEFAULT_LINE_TOLERANCE) {
150
199
  const comments = [];
151
200
  const unmapped = [];
152
201
  for (const cluster of clusters) {
153
- if (!cluster.canonicalLocation?.file || !cluster.canonicalLocation?.line) {
202
+ if (!cluster.canonicalLocation?.file) {
154
203
  unmapped.push(cluster);
155
204
  continue;
156
205
  }
@@ -160,14 +209,52 @@ function mapClustersToComments(clusters, diff) {
160
209
  unmapped.push(cluster);
161
210
  continue;
162
211
  }
163
- if (!matchingFile.changedLines.has(line)) {
164
- unmapped.push(cluster);
165
- continue;
212
+ let targetLine;
213
+ if (line !== void 0) {
214
+ if (matchingFile.changedLines.has(line)) {
215
+ targetLine = line;
216
+ } else {
217
+ const nearestLine = findNearestChangedLine(
218
+ matchingFile.changedLines,
219
+ line,
220
+ lineTolerance
221
+ );
222
+ if (nearestLine !== void 0) {
223
+ logger.debug("Using nearest changed line", {
224
+ file,
225
+ originalLine: line,
226
+ mappedLine: nearestLine
227
+ });
228
+ targetLine = nearestLine;
229
+ } else {
230
+ targetLine = getFirstChangedLine(matchingFile.changedLines);
231
+ logger.debug("Using first changed line as fallback", {
232
+ file,
233
+ originalLine: line,
234
+ mappedLine: targetLine
235
+ });
236
+ }
237
+ }
238
+ } else {
239
+ targetLine = getFirstChangedLine(matchingFile.changedLines);
240
+ logger.debug("No line specified, using first changed line", {
241
+ file,
242
+ mappedLine: targetLine
243
+ });
244
+ }
245
+ if (!isWithinHunkBoundary(matchingFile.hunks, targetLine)) {
246
+ logger.warn("Target line not within hunk boundary", {
247
+ file,
248
+ targetLine,
249
+ hunks: matchingFile.hunks.map(
250
+ (h) => `${h.newStart}-${h.newStart + h.newCount - 1}`
251
+ )
252
+ });
166
253
  }
167
254
  comments.push({
168
255
  cluster,
169
256
  path: matchingFile.path,
170
- line
257
+ line: targetLine
171
258
  });
172
259
  }
173
260
  return { comments, unmapped };
@@ -178,8 +265,18 @@ init_esm_shims();
178
265
  var DEFAULT_OPTIONS = {
179
266
  maxComments: 30,
180
267
  includeLowSeverity: false,
181
- showModelAgreement: true
268
+ showModelAgreement: true,
269
+ minConfidence: 0.2
270
+ // Show findings with 2+ models agreeing (was 0.5)
182
271
  };
272
+ var GITHUB_MAX_COMMENT_LENGTH = 65e3;
273
+ function truncateToLimit(text, limit = GITHUB_MAX_COMMENT_LENGTH) {
274
+ if (text.length <= limit) {
275
+ return text;
276
+ }
277
+ const truncationMarker = "\n\n... (truncated due to GitHub character limit)";
278
+ return text.slice(0, limit - truncationMarker.length) + truncationMarker;
279
+ }
183
280
  function formatSeverity(severity) {
184
281
  const badges = {
185
282
  critical: "CRITICAL",
@@ -193,20 +290,27 @@ function formatSeverity(severity) {
193
290
  function formatCategory(category) {
194
291
  return category.split("_").map((word) => word.charAt(0).toUpperCase() + word.slice(1)).join(" ");
195
292
  }
196
- function formatModelAgreement(cluster) {
197
- const total = cluster.agreeingModels.length + cluster.silentModels.length + cluster.disagreingModels.length;
293
+ function formatModelName(fullName) {
294
+ const slashIndex = fullName.indexOf("/");
295
+ if (slashIndex !== -1) {
296
+ return fullName.slice(slashIndex + 1);
297
+ }
298
+ return fullName;
299
+ }
300
+ function formatModelAgreement(cluster, totalModels) {
198
301
  const agreeing = cluster.agreeingModels.length;
199
302
  const confidence = Math.round(cluster.confidence * 100);
200
- return `${agreeing}/${total} models | Confidence: ${confidence}%`;
303
+ const modelNames = cluster.agreeingModels.map(formatModelName).join(", ");
304
+ return `Found by: ${modelNames} (${agreeing}/${totalModels}) | ${confidence}% confidence`;
201
305
  }
202
- function formatCommentBody(cluster, showModelAgreement) {
306
+ function formatCommentBody(cluster, showModelAgreement, totalModels) {
203
307
  const lines = [];
204
308
  lines.push(
205
309
  `**[${formatSeverity(cluster.severity)}] ${cluster.title}** (${formatCategory(cluster.category)})`
206
310
  );
207
311
  lines.push("");
208
312
  if (showModelAgreement) {
209
- lines.push(formatModelAgreement(cluster));
313
+ lines.push(formatModelAgreement(cluster, totalModels));
210
314
  lines.push("");
211
315
  }
212
316
  const firstFinding = cluster.findings[0];
@@ -251,7 +355,10 @@ function formatSummaryBody(report, mappingResult, inlineCommentCount) {
251
355
  lines.push("## Code Council Multi-Model Review");
252
356
  lines.push("");
253
357
  const totalFindings = report.highConfidence.length + report.moderateConfidence.length + report.lowConfidence.length;
254
- lines.push(`**${report.participatingModels.length} models** participated`);
358
+ const modelNames = report.participatingModels.map(formatModelName).join(", ");
359
+ lines.push(
360
+ `**${report.participatingModels.length} models** participated: ${modelNames}`
361
+ );
255
362
  lines.push(`**${totalFindings} findings** total`);
256
363
  lines.push(`**${inlineCommentCount} inline comments** on changed lines`);
257
364
  lines.push(
@@ -301,8 +408,15 @@ function formatPrComments(report, mappingResult, options = {}) {
301
408
  (c) => c.cluster.severity !== "low" && c.cluster.severity !== "info"
302
409
  );
303
410
  }
411
+ if (opts.minConfidence > 0) {
412
+ commentsToInclude = commentsToInclude.filter(
413
+ (c) => c.cluster.confidence >= opts.minConfidence
414
+ );
415
+ }
304
416
  const severityOrder = ["critical", "high", "medium", "low", "info"];
305
417
  commentsToInclude.sort((a, b) => {
418
+ const confidenceDiff = b.cluster.confidence - a.cluster.confidence;
419
+ if (Math.abs(confidenceDiff) > 0.01) return confidenceDiff;
306
420
  const severityDiff = severityOrder.indexOf(a.cluster.severity) - severityOrder.indexOf(b.cluster.severity);
307
421
  if (severityDiff !== 0) return severityDiff;
308
422
  return a.line - b.line;
@@ -310,12 +424,21 @@ function formatPrComments(report, mappingResult, options = {}) {
310
424
  if (commentsToInclude.length > opts.maxComments) {
311
425
  commentsToInclude = commentsToInclude.slice(0, opts.maxComments);
312
426
  }
313
- const comments = commentsToInclude.map((mapped) => ({
314
- path: mapped.path,
315
- line: mapped.line,
316
- body: formatCommentBody(mapped.cluster, opts.showModelAgreement)
317
- }));
318
- const body = formatSummaryBody(report, mappingResult, comments.length);
427
+ const totalModels = report.participatingModels.length;
428
+ const comments = commentsToInclude.map((mapped) => {
429
+ const rawBody2 = formatCommentBody(
430
+ mapped.cluster,
431
+ opts.showModelAgreement,
432
+ totalModels
433
+ );
434
+ return {
435
+ path: mapped.path,
436
+ line: mapped.line,
437
+ body: truncateToLimit(rawBody2)
438
+ };
439
+ });
440
+ const rawBody = formatSummaryBody(report, mappingResult, comments.length);
441
+ const body = truncateToLimit(rawBody);
319
442
  return {
320
443
  body,
321
444
  event: "COMMENT",
@@ -582,7 +705,7 @@ async function handleCliCodeReview(client, models, format, context, filePath, op
582
705
  const fullContext = language ? `Language: ${language}${context ? `
583
706
  ${context}` : ""}` : context;
584
707
  const results = await client.reviewCode(inputResult, models, fullContext);
585
- return formatReviewOutput(results, format);
708
+ return formatReviewOutput(results, format, client);
586
709
  }
587
710
  async function handleCliGitReview(client, models, format, context, options) {
588
711
  const reviewType = options["review-type"] || "staged";
@@ -602,7 +725,7 @@ async function handleCliGitReview(client, models, format, context, options) {
602
725
  if (format === "pr-comments") {
603
726
  return formatPrCommentsOutput(result.results, result.diffText, client);
604
727
  }
605
- return formatReviewOutput(result.results, format);
728
+ return formatReviewOutput(result.results, format, client);
606
729
  }
607
730
  async function handleCliFrontendReview(client, models, format, context, filePath, options) {
608
731
  const inputResult = await getInput(filePath);
@@ -616,7 +739,7 @@ async function handleCliFrontendReview(client, models, format, context, filePath
616
739
  reviewType,
617
740
  context
618
741
  });
619
- return formatReviewOutput(results, format);
742
+ return formatReviewOutput(results, format, client);
620
743
  }
621
744
  async function handleCliBackendReview(client, models, format, context, filePath, options) {
622
745
  const inputResult = await getInput(filePath);
@@ -630,7 +753,7 @@ async function handleCliBackendReview(client, models, format, context, filePath,
630
753
  reviewType,
631
754
  context
632
755
  });
633
- return formatReviewOutput(results, format);
756
+ return formatReviewOutput(results, format, client);
634
757
  }
635
758
  async function handleCliPlanReview(client, models, format, context, filePath, options) {
636
759
  const inputResult = await getInput(filePath);
@@ -642,14 +765,25 @@ async function handleCliPlanReview(client, models, format, context, filePath, op
642
765
  reviewType,
643
766
  context
644
767
  });
645
- return formatReviewOutput(results, format);
768
+ return formatReviewOutput(results, format, client);
646
769
  }
647
- function formatReviewOutput(results, format) {
648
- const { formatted } = formatForHostExtraction(results, format);
649
- return {
650
- exitCode: ExitCode.SUCCESS,
651
- stdout: formatted
652
- };
770
+ async function formatReviewOutput(results, format, reviewClient) {
771
+ const consensusClient = new ConsensusClient(reviewClient);
772
+ try {
773
+ const { formatted } = await buildConsensusReport(results, consensusClient, {
774
+ outputFormat: format
775
+ });
776
+ return {
777
+ exitCode: ExitCode.SUCCESS,
778
+ stdout: formatted
779
+ };
780
+ } catch (error) {
781
+ const { formatted } = formatForHostExtraction(results, format);
782
+ return {
783
+ exitCode: ExitCode.SUCCESS,
784
+ stdout: formatted
785
+ };
786
+ }
653
787
  }
654
788
  async function formatPrCommentsOutput(results, diffText, reviewClient) {
655
789
  const consensusClient = new ConsensusClient(reviewClient);
@@ -1048,4 +1182,4 @@ export {
1048
1182
  processResult,
1049
1183
  runCli
1050
1184
  };
1051
- //# sourceMappingURL=cli-PHU3RI5B.js.map
1185
+ //# sourceMappingURL=cli-QCODHVTG.js.map
package/dist/index.js CHANGED
@@ -18,7 +18,7 @@ import {
18
18
  handleGitReview,
19
19
  initializeConfig,
20
20
  logger
21
- } from "./chunk-ZDBY7AGR.js";
21
+ } from "./chunk-H4YGXQ7Q.js";
22
22
  import "./chunk-AEDZOTVA.js";
23
23
  import "./chunk-HVF7WG6A.js";
24
24
  import {
@@ -1905,7 +1905,7 @@ ${r.review}
1905
1905
  var args = process.argv.slice(2);
1906
1906
  var command = args[0];
1907
1907
  if (command === "review" || command === "setup") {
1908
- import("./cli-PHU3RI5B.js").then(async ({ processResult, runCli }) => {
1908
+ import("./cli-QCODHVTG.js").then(async ({ processResult, runCli }) => {
1909
1909
  try {
1910
1910
  const result = await runCli(process.argv);
1911
1911
  if (result) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@klitchevo/code-council",
3
- "version": "0.2.3",
3
+ "version": "0.2.4",
4
4
  "description": "Multi-model AI code review server using OpenRouter - get diverse perspectives from multiple LLMs in parallel",
5
5
  "main": "dist/index.js",
6
6
  "type": "module",
@@ -75,10 +75,12 @@
75
75
  "@openrouter/sdk": "0.3.10",
76
76
  "ignore": "^7.0.5",
77
77
  "jiti": "^2.6.1",
78
+ "js-yaml": "^4.1.1",
78
79
  "zod": "4.2.1"
79
80
  },
80
81
  "devDependencies": {
81
82
  "@biomejs/biome": "2.3.10",
83
+ "@types/js-yaml": "^4.0.9",
82
84
  "@types/node": "25.0.3",
83
85
  "@vitest/coverage-v8": "4.0.16",
84
86
  "lefthook": "2.0.12",