@klitchevo/code-council 0.2.1 → 0.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -119,7 +119,7 @@ npx @klitchevo/code-council review --help
119
119
 
120
120
  ## GitHub Actions
121
121
 
122
- Automatically review PRs with multiple AI models. Findings appear as **inline comments** on the exact lines of code.
122
+ Automatically review PRs with multiple AI models. Findings appear as **inline comments** on the exact lines of code. Code fixes use GitHub's **suggestion syntax** for one-click apply. Re-runs automatically clean up old comments.
123
123
 
124
124
  ### Quick Setup
125
125
 
@@ -237,7 +237,7 @@ function toNumber(val) {
237
237
  if (typeof val === "number") return val;
238
238
  if (typeof val === "string") {
239
239
  const n = parseFloat(val);
240
- return isNaN(n) ? null : n;
240
+ return Number.isNaN(n) ? null : n;
241
241
  }
242
242
  return null;
243
243
  }
@@ -312,4 +312,4 @@ export {
312
312
  buildSynthesisUserMessage,
313
313
  parseTpsAnalysis
314
314
  };
315
- //# sourceMappingURL=chunk-IVKLQD6M.js.map
315
+ //# sourceMappingURL=chunk-AEDZOTVA.js.map
@@ -5,11 +5,11 @@ import {
5
5
  buildBatchUserMessage,
6
6
  buildSynthesisUserMessage,
7
7
  buildUserMessage
8
- } from "./chunk-IVKLQD6M.js";
8
+ } from "./chunk-AEDZOTVA.js";
9
9
  import {
10
10
  formatter_exports,
11
11
  init_formatter
12
- } from "./chunk-SYMFCPGM.js";
12
+ } from "./chunk-HVF7WG6A.js";
13
13
  import {
14
14
  __toCommonJS,
15
15
  init_esm_shims
@@ -911,30 +911,56 @@ var SEVERITY_DESCRIPTIONS = {
911
911
  low: "Nice to fix - minor improvements, style issues, small optimizations",
912
912
  info: "Informational - suggestions, best practices, educational notes"
913
913
  };
914
- var EXTRACTION_SYSTEM_PROMPT = `You are a precise extraction system. Your task is to parse a code review and extract individual findings into a structured JSON format.
914
+ var EXTRACTION_SYSTEM_PROMPT = `You are a precise extraction system. Your task is to parse a code review and extract individual findings into a structured YAML format.
915
915
 
916
916
  ## Output Format
917
917
 
918
- You MUST respond with valid JSON matching this schema:
919
- {
920
- "findings": [
921
- {
922
- "category": "security" | "performance" | "bug" | "maintainability" | "accessibility" | "architecture" | "style" | "documentation" | "testing" | "other",
923
- "severity": "critical" | "high" | "medium" | "low" | "info",
924
- "title": "Short, descriptive title",
925
- "description": "Detailed explanation of the issue",
926
- "location": {
927
- "file": "path/to/file.ts",
928
- "line": 42,
929
- "endLine": 45
930
- },
931
- "suggestion": "How to fix or improve",
932
- "suggestedCode": "The actual corrected code that should replace the problematic code (if applicable)",
933
- "rawExcerpt": "The original text from the review that describes this finding",
934
- "confidence": 0.95
935
- }
936
- ]
937
- }
918
+ You MUST respond with valid YAML matching this schema. Use block scalars (|) for multi-line text:
919
+
920
+ \`\`\`yaml
921
+ findings:
922
+ - category: security
923
+ severity: high
924
+ title: |
925
+ Short, descriptive title
926
+ description: |
927
+ Detailed explanation of the issue.
928
+ location:
929
+ file: path/to/file.ts
930
+ line: 42
931
+ endLine: 45
932
+ suggestion: |
933
+ How to fix (text explanation)
934
+ suggestedCode: |
935
+ // The actual fixed code that should replace the buggy code
936
+ const sanitized = escapeHtml(userInput);
937
+ return sanitized;
938
+ confidence: 0.95
939
+ \`\`\`
940
+
941
+ ## CRITICAL: suggestedCode Field
942
+
943
+ The \`suggestedCode\` field is ESSENTIAL for GitHub's "Apply suggestion" feature. When the review mentions a code fix:
944
+
945
+ 1. **Extract the actual code** that should replace the buggy code
946
+ 2. Put it in \`suggestedCode\` as a block scalar (|)
947
+ 3. This becomes a clickable "Apply suggestion" button on GitHub
948
+
949
+ Example - if reviewer says "change \`exec(userInput)\` to \`execFile('ls', [userInput])\`":
950
+ \`\`\`yaml
951
+ suggestedCode: |
952
+ execFile('ls', [userInput], (error, stdout) => {
953
+ console.log(stdout);
954
+ });
955
+ \`\`\`
956
+
957
+ If no specific code fix is mentioned, omit \`suggestedCode\` entirely.
958
+
959
+ ## Available Values
960
+
961
+ **Categories**: security, performance, bug, maintainability, accessibility, architecture, style, documentation, testing, other
962
+
963
+ **Severities**: critical, high, medium, low, info
938
964
 
939
965
  ## Category Definitions
940
966
 
@@ -947,8 +973,8 @@ ${Object.entries(SEVERITY_DESCRIPTIONS).map(([sev, desc]) => `- **${sev}**: ${de
947
973
  ## Extraction Rules
948
974
 
949
975
  1. **Be thorough**: Extract ALL distinct issues mentioned in the review
950
- 2. **Be precise**: Use exact quotes for rawExcerpt when possible
951
- 3. **Infer location**: If file paths or line numbers are mentioned, include them
976
+ 2. **Infer location**: If file paths or line numbers are mentioned, include them
977
+ 3. **Extract code fixes**: If the reviewer suggests specific code changes, put them in \`suggestedCode\`
952
978
  4. **Normalize severity**: Map vague language to specific severity levels:
953
979
  - "critical", "severe", "urgent", "must fix" \u2192 critical
954
980
  - "important", "significant", "should fix" \u2192 high
@@ -956,15 +982,17 @@ ${Object.entries(SEVERITY_DESCRIPTIONS).map(([sev, desc]) => `- **${sev}**: ${de
956
982
  - "minor", "nitpick", "suggestion" \u2192 low/info
957
983
  5. **Set confidence**: Higher (0.8-1.0) for clear, explicit issues; lower (0.5-0.7) for inferred or ambiguous ones
958
984
  6. **Don't duplicate**: Each distinct issue should appear once
959
- 7. **Handle empty reviews**: If no issues found, return {"findings": []}
960
- 8. **Include code fixes**: When a fix involves specific code changes, provide the actual corrected code in the suggestedCode field (not explanation, just the code)
985
+ 7. **Handle empty reviews**: If no issues found, return \`findings: []\`
961
986
 
962
987
  ## Important
963
988
 
964
989
  - DO NOT add findings not present in the review
965
990
  - DO NOT interpret or expand on the reviewer's points
966
- - If the review is empty or only contains praise, return an empty findings array
967
- - Always respond with valid JSON - no markdown code blocks, no explanatory text`;
991
+ - If the review is empty or only contains praise, return empty findings array
992
+ - Always respond with valid YAML
993
+ - Use block scalars (|) for any text that might contain special characters
994
+ - The location.line field is important - always include it when mentioned in the review
995
+ - The suggestedCode field enables GitHub's "Apply suggestion" button - ALWAYS include it when a code fix is provided`;
968
996
  function buildExtractionUserMessage(reviewText, modelName) {
969
997
  return `Extract all findings from this code review by ${modelName}:
970
998
 
@@ -972,7 +1000,7 @@ function buildExtractionUserMessage(reviewText, modelName) {
972
1000
  ${reviewText}
973
1001
  ---
974
1002
 
975
- Respond with ONLY valid JSON. Do not include markdown code blocks or any other text.`;
1003
+ Respond with ONLY valid YAML. Use block scalars (|) for multi-line text. No markdown code blocks or explanatory text.`;
976
1004
  }
977
1005
 
978
1006
  // src/schemas/consensus.ts
@@ -1024,12 +1052,15 @@ var OUTPUT_FORMATS = [
1024
1052
  var DEFAULT_CONSENSUS_CONFIG = {
1025
1053
  enabled: false,
1026
1054
  modelWeights: {},
1027
- highConfidenceThreshold: 0.8,
1028
- moderateConfidenceThreshold: 0.5,
1055
+ highConfidenceThreshold: 0.7,
1056
+ // 4+ models for high (was 0.8)
1057
+ moderateConfidenceThreshold: 0.33,
1058
+ // 2+ models for moderate (was 0.5)
1029
1059
  extractionModel: "anthropic/claude-3-haiku",
1030
1060
  fallbackOnError: true,
1031
1061
  lineProximity: 5,
1032
- similarityThreshold: 0.7,
1062
+ similarityThreshold: 0.5,
1063
+ // Allow more clustering (was 0.7)
1033
1064
  hostExtraction: true
1034
1065
  // Recommended: let MCP host model do extraction
1035
1066
  };
@@ -1049,26 +1080,42 @@ var FindingSchema = z2.object({
1049
1080
  description: z2.string().min(1, "Description cannot be empty"),
1050
1081
  location: CodeLocationSchema.optional(),
1051
1082
  suggestion: z2.string().optional(),
1052
- rawExcerpt: z2.string(),
1083
+ rawExcerpt: z2.string().optional(),
1084
+ // Made optional - not all extractions include this
1053
1085
  extractedAt: z2.string().datetime(),
1054
1086
  confidence: z2.number().min(0).max(1).optional()
1055
1087
  });
1088
+ var severitySchema = z2.string().transform((val) => val.toLowerCase()).pipe(z2.enum(FINDING_SEVERITIES));
1089
+ var categorySchema = z2.string().transform((val) => val.toLowerCase()).transform((val) => {
1090
+ if (!FINDING_CATEGORIES.includes(val)) {
1091
+ return "other";
1092
+ }
1093
+ return val;
1094
+ }).pipe(z2.enum(FINDING_CATEGORIES));
1095
+ var lineNumberSchema = z2.union([z2.number(), z2.string()]).transform((val) => {
1096
+ if (typeof val === "string") {
1097
+ const parsed = Number.parseInt(val, 10);
1098
+ return Number.isNaN(parsed) ? void 0 : parsed;
1099
+ }
1100
+ return val;
1101
+ }).pipe(z2.number().int().nonnegative().optional()).nullish();
1056
1102
  var ExtractionResponseSchema = z2.object({
1057
1103
  findings: z2.array(
1058
1104
  z2.object({
1059
- category: z2.enum(FINDING_CATEGORIES),
1060
- severity: z2.enum(FINDING_SEVERITIES),
1105
+ category: categorySchema,
1106
+ severity: severitySchema,
1061
1107
  title: z2.string(),
1062
1108
  description: z2.string(),
1063
1109
  location: z2.object({
1064
- file: z2.string(),
1065
- line: z2.number().optional(),
1066
- endLine: z2.number().optional()
1067
- }).optional(),
1068
- suggestion: z2.string().optional(),
1069
- suggestedCode: z2.string().optional(),
1070
- rawExcerpt: z2.string(),
1071
- confidence: z2.number().min(0).max(1).optional()
1110
+ file: z2.string().nullish(),
1111
+ // Allow null - some models return null for general findings
1112
+ line: lineNumberSchema,
1113
+ endLine: lineNumberSchema
1114
+ }).nullish(),
1115
+ suggestion: z2.string().nullish(),
1116
+ suggestedCode: z2.string().nullish(),
1117
+ rawExcerpt: z2.string().nullish(),
1118
+ confidence: z2.number().min(0).max(1).nullish()
1072
1119
  })
1073
1120
  )
1074
1121
  });
@@ -1145,24 +1192,311 @@ var ConsensusOptionsSchema = z2.object({
1145
1192
  outputFormat: z2.enum(OUTPUT_FORMATS).optional()
1146
1193
  });
1147
1194
 
1195
+ // src/consensus/yaml-parser.ts
1196
+ init_esm_shims();
1197
+ import yaml from "js-yaml";
1198
+ function stripMarkdownCodeBlocks(text) {
1199
+ let result = text.trim();
1200
+ if (result.startsWith("```yaml")) {
1201
+ result = result.slice(7);
1202
+ } else if (result.startsWith("```json")) {
1203
+ result = result.slice(7);
1204
+ } else if (result.startsWith("```")) {
1205
+ result = result.slice(3);
1206
+ }
1207
+ if (result.endsWith("```")) {
1208
+ result = result.slice(0, -3);
1209
+ }
1210
+ return result.trim();
1211
+ }
1212
+ function addBlockScalarIndicators(text) {
1213
+ return text.replace(
1214
+ /^(\s*)(\w+):\s*"([^"]*\\[^"]*)"$/gm,
1215
+ (_, indent, key, value) => {
1216
+ const unescaped = value.replace(/\\n/g, "\n").replace(/\\t/g, " ").replace(/\\"/g, '"').replace(/\\\\/g, "\\");
1217
+ return `${indent}${key}: |
1218
+ ${indent} ${unescaped.replace(/\n/g, `
1219
+ ${indent} `)}`;
1220
+ }
1221
+ );
1222
+ }
1223
+ function fixBlockScalarIndent(text) {
1224
+ return text.replace(/\|(?!\d)/g, "|2");
1225
+ }
1226
+ function extractYamlFromMarkers(text, firstKey, lastKey) {
1227
+ const firstKeyIndex = text.indexOf(`${firstKey}:`);
1228
+ if (firstKeyIndex === -1) return null;
1229
+ let endIndex = text.length;
1230
+ if (lastKey) {
1231
+ const lastKeyIndex = text.lastIndexOf(`${lastKey}:`);
1232
+ if (lastKeyIndex !== -1) {
1233
+ const afterLastKey = text.slice(lastKeyIndex);
1234
+ const nextKeyMatch = afterLastKey.match(/\n[a-zA-Z_]/);
1235
+ if (nextKeyMatch?.index) {
1236
+ endIndex = lastKeyIndex + nextKeyMatch.index;
1237
+ }
1238
+ }
1239
+ }
1240
+ return text.slice(firstKeyIndex, endIndex).trim();
1241
+ }
1242
+ function removeCurlyBrackets(text) {
1243
+ let result = text.trim();
1244
+ if (result.startsWith("{") && result.endsWith("}")) {
1245
+ result = result.slice(1, -1).trim();
1246
+ }
1247
+ return result;
1248
+ }
1249
+ function removeLeadingPlusSymbols(text) {
1250
+ return text.replace(/^\+\s*/gm, "");
1251
+ }
1252
+ function extractFromFindingsKeyword(text) {
1253
+ const stripped = stripMarkdownCodeBlocks(text);
1254
+ const findingsIndex = stripped.indexOf("findings:");
1255
+ if (findingsIndex === -1) {
1256
+ return stripped;
1257
+ }
1258
+ return stripped.slice(findingsIndex).trim();
1259
+ }
1260
+ function removeDuplicateKeys(text) {
1261
+ const lines = text.split("\n");
1262
+ const result = [];
1263
+ const seenKeys = /* @__PURE__ */ new Map();
1264
+ for (const line of lines) {
1265
+ const match = line.match(/^(\s*)(\w+):/);
1266
+ if (match) {
1267
+ const indent = match[1]?.length ?? 0;
1268
+ const key = match[2] ?? "";
1269
+ for (const [level] of seenKeys) {
1270
+ if (level > indent) {
1271
+ seenKeys.delete(level);
1272
+ }
1273
+ }
1274
+ const keysAtLevel = seenKeys.get(indent) ?? /* @__PURE__ */ new Set();
1275
+ if (keysAtLevel.has(key)) {
1276
+ continue;
1277
+ }
1278
+ keysAtLevel.add(key);
1279
+ seenKeys.set(indent, keysAtLevel);
1280
+ }
1281
+ result.push(line);
1282
+ }
1283
+ return result.join("\n");
1284
+ }
1285
+ function replaceTabsWithSpaces(text) {
1286
+ return text.replace(/\t/g, " ");
1287
+ }
1288
+ function tryFixYamlIssues(text) {
1289
+ let result = text;
1290
+ result = result.replace(
1291
+ /^(\s*)(\w+):\s+([^|\n]*:[^\n]*)$/gm,
1292
+ (_, indent, key, value) => {
1293
+ const trimmedValue = value.trim();
1294
+ if (trimmedValue.startsWith('"') || trimmedValue.startsWith("'") || trimmedValue.startsWith("|") || trimmedValue.startsWith(">") || trimmedValue.startsWith("-") || trimmedValue.startsWith("[") || trimmedValue.startsWith("{")) {
1295
+ return `${indent}${key}: ${value}`;
1296
+ }
1297
+ return `${indent}${key}: "${trimmedValue.replace(/"/g, '\\"')}"`;
1298
+ }
1299
+ );
1300
+ return result;
1301
+ }
1302
+ function attemptParse(text, strategyName) {
1303
+ try {
1304
+ const data = yaml.load(text);
1305
+ if (data !== null && data !== void 0) {
1306
+ return { success: true, data, strategy: strategyName };
1307
+ }
1308
+ return { success: false, error: "Parsed to null/undefined" };
1309
+ } catch (error) {
1310
+ const message = error instanceof Error ? error.message : "Unknown error";
1311
+ return { success: false, error: message };
1312
+ }
1313
+ }
1314
+ function attemptJsonParse(text) {
1315
+ try {
1316
+ const data = JSON.parse(text);
1317
+ return { success: true, data, strategy: "json-fallback" };
1318
+ } catch (error) {
1319
+ const message = error instanceof Error ? error.message : "Unknown error";
1320
+ return { success: false, error: message };
1321
+ }
1322
+ }
1323
+ function parseYamlWithFallbacks(text, firstKey, lastKey) {
1324
+ const strategies = [
1325
+ // Try simple approaches first (least likely to break valid YAML)
1326
+ { name: "raw", transform: (t) => t },
1327
+ { name: "strip-markdown", transform: stripMarkdownCodeBlocks },
1328
+ {
1329
+ name: "extract-findings-keyword",
1330
+ transform: extractFromFindingsKeyword
1331
+ },
1332
+ {
1333
+ name: "strip-markdown+tabs",
1334
+ transform: (t) => replaceTabsWithSpaces(stripMarkdownCodeBlocks(t))
1335
+ },
1336
+ {
1337
+ name: "remove-duplicate-keys",
1338
+ transform: (t) => removeDuplicateKeys(stripMarkdownCodeBlocks(t))
1339
+ },
1340
+ {
1341
+ name: "extract-findings+dedup",
1342
+ transform: (t) => removeDuplicateKeys(extractFromFindingsKeyword(t))
1343
+ },
1344
+ {
1345
+ name: "extract-findings+dedup+tabs",
1346
+ transform: (t) => replaceTabsWithSpaces(
1347
+ removeDuplicateKeys(extractFromFindingsKeyword(t))
1348
+ )
1349
+ },
1350
+ // Then try more aggressive fixes
1351
+ {
1352
+ name: "strip-markdown+fix-issues",
1353
+ transform: (t) => tryFixYamlIssues(stripMarkdownCodeBlocks(t))
1354
+ },
1355
+ {
1356
+ name: "block-scalars",
1357
+ transform: (t) => addBlockScalarIndicators(stripMarkdownCodeBlocks(t))
1358
+ },
1359
+ {
1360
+ name: "fix-indent",
1361
+ transform: (t) => fixBlockScalarIndent(stripMarkdownCodeBlocks(t))
1362
+ },
1363
+ {
1364
+ name: "remove-curly",
1365
+ transform: (t) => removeCurlyBrackets(stripMarkdownCodeBlocks(t))
1366
+ },
1367
+ {
1368
+ name: "remove-plus",
1369
+ transform: (t) => removeLeadingPlusSymbols(stripMarkdownCodeBlocks(t))
1370
+ },
1371
+ {
1372
+ name: "tabs-to-spaces",
1373
+ transform: (t) => replaceTabsWithSpaces(stripMarkdownCodeBlocks(t))
1374
+ },
1375
+ {
1376
+ name: "combined-fixes",
1377
+ transform: (t) => tryFixYamlIssues(
1378
+ replaceTabsWithSpaces(
1379
+ removeLeadingPlusSymbols(
1380
+ removeCurlyBrackets(stripMarkdownCodeBlocks(t))
1381
+ )
1382
+ )
1383
+ )
1384
+ },
1385
+ {
1386
+ name: "extract-findings-keyword+fixes",
1387
+ transform: (t) => tryFixYamlIssues(
1388
+ replaceTabsWithSpaces(extractFromFindingsKeyword(t))
1389
+ )
1390
+ },
1391
+ {
1392
+ name: "remove-duplicate-keys+all-fixes",
1393
+ transform: (t) => tryFixYamlIssues(
1394
+ replaceTabsWithSpaces(
1395
+ removeDuplicateKeys(extractFromFindingsKeyword(t))
1396
+ )
1397
+ )
1398
+ }
1399
+ ];
1400
+ if (firstKey) {
1401
+ strategies.push({
1402
+ name: "extract-by-markers",
1403
+ transform: (t) => {
1404
+ const extracted = extractYamlFromMarkers(
1405
+ stripMarkdownCodeBlocks(t),
1406
+ firstKey,
1407
+ lastKey
1408
+ );
1409
+ return extracted ?? t;
1410
+ }
1411
+ });
1412
+ strategies.push({
1413
+ name: "extract-by-markers+dedup",
1414
+ transform: (t) => {
1415
+ const extracted = extractYamlFromMarkers(
1416
+ stripMarkdownCodeBlocks(t),
1417
+ firstKey,
1418
+ lastKey
1419
+ );
1420
+ return removeDuplicateKeys(extracted ?? t);
1421
+ }
1422
+ });
1423
+ }
1424
+ const errors = [];
1425
+ for (const strategy of strategies) {
1426
+ const transformed = strategy.transform(text);
1427
+ const result = attemptParse(transformed, strategy.name);
1428
+ if (result.success) {
1429
+ logger.debug("YAML parsed successfully", { strategy: strategy.name });
1430
+ return result.data;
1431
+ }
1432
+ errors.push(`${strategy.name}: ${result.error}`);
1433
+ }
1434
+ const strippedText = stripMarkdownCodeBlocks(text);
1435
+ const jsonResult = attemptJsonParse(strippedText);
1436
+ if (jsonResult.success) {
1437
+ logger.debug("Fell back to JSON parsing");
1438
+ return jsonResult.data;
1439
+ }
1440
+ errors.push(`json-fallback: ${jsonResult.error}`);
1441
+ const errorSummary = errors.slice(-3).join("; ");
1442
+ const truncatedInput = text.length > 200 ? `${text.slice(0, 200)}...` : text;
1443
+ logger.error("YAML/JSON parsing failed", {
1444
+ inputLength: text.length,
1445
+ inputPreview: truncatedInput,
1446
+ strategiesTried: strategies.length + 1,
1447
+ lastErrors: errors.slice(-3)
1448
+ });
1449
+ throw new Error(
1450
+ `Failed to parse YAML/JSON after ${strategies.length + 1} attempts. Last errors: ${errorSummary}`
1451
+ );
1452
+ }
1453
+ function parseExtractionYaml(text) {
1454
+ return parseYamlWithFallbacks(text, "findings");
1455
+ }
1456
+
1148
1457
  // src/consensus/extractor.ts
1458
+ var MIN_CONFIDENCE_THRESHOLD = 0.3;
1149
1459
  function generateFindingId() {
1150
1460
  const timestamp = Date.now().toString(36);
1151
1461
  const random = Math.random().toString(36).substring(2, 8);
1152
1462
  return toFindingId(`finding-${timestamp}-${random}`);
1153
1463
  }
1464
+ function repairJson(jsonStr) {
1465
+ let repaired = jsonStr;
1466
+ repaired = repaired.replace(/\\'/g, "'");
1467
+ repaired = repaired.replace(/\\`/g, "`");
1468
+ repaired = repaired.replace(/"([^"]*?)"/g, (_, content) => {
1469
+ const fixed = content.replace(/\n/g, "\\n").replace(/\r/g, "\\r").replace(/\t/g, "\\t");
1470
+ return `"${fixed}"`;
1471
+ });
1472
+ repaired = repaired.replace(/,(\s*[}\]])/g, "$1");
1473
+ return repaired;
1474
+ }
1154
1475
  function parseExtractionResponse(responseText, sourceModel) {
1155
- let jsonStr = responseText.trim();
1156
- if (jsonStr.startsWith("```json")) {
1157
- jsonStr = jsonStr.slice(7);
1158
- } else if (jsonStr.startsWith("```")) {
1159
- jsonStr = jsonStr.slice(3);
1160
- }
1161
- if (jsonStr.endsWith("```")) {
1162
- jsonStr = jsonStr.slice(0, -3);
1476
+ let raw;
1477
+ try {
1478
+ raw = parseExtractionYaml(responseText);
1479
+ logger.debug("Parsed extraction response via YAML parser", { sourceModel });
1480
+ } catch (yamlError) {
1481
+ logger.debug("YAML parsing failed, trying JSON repair", {
1482
+ sourceModel,
1483
+ error: yamlError instanceof Error ? yamlError.message : "Unknown"
1484
+ });
1485
+ let jsonStr = responseText.trim();
1486
+ if (jsonStr.startsWith("```json")) {
1487
+ jsonStr = jsonStr.slice(7);
1488
+ } else if (jsonStr.startsWith("```yaml")) {
1489
+ jsonStr = jsonStr.slice(7);
1490
+ } else if (jsonStr.startsWith("```")) {
1491
+ jsonStr = jsonStr.slice(3);
1492
+ }
1493
+ if (jsonStr.endsWith("```")) {
1494
+ jsonStr = jsonStr.slice(0, -3);
1495
+ }
1496
+ jsonStr = jsonStr.trim();
1497
+ jsonStr = repairJson(jsonStr);
1498
+ raw = JSON.parse(jsonStr);
1163
1499
  }
1164
- jsonStr = jsonStr.trim();
1165
- const raw = JSON.parse(jsonStr);
1166
1500
  const parsed = ExtractionResponseSchema.parse(raw);
1167
1501
  const now = (/* @__PURE__ */ new Date()).toISOString();
1168
1502
  return parsed.findings.map(
@@ -1173,16 +1507,16 @@ function parseExtractionResponse(responseText, sourceModel) {
1173
1507
  severity: f.severity,
1174
1508
  title: f.title,
1175
1509
  description: f.description,
1176
- location: f.location ? {
1510
+ location: f.location?.file ? {
1177
1511
  file: f.location.file,
1178
- line: f.location.line,
1179
- endLine: f.location.endLine
1512
+ line: f.location.line ?? void 0,
1513
+ endLine: f.location.endLine ?? void 0
1180
1514
  } : void 0,
1181
- suggestion: f.suggestion,
1182
- suggestedCode: f.suggestedCode,
1183
- rawExcerpt: f.rawExcerpt,
1515
+ suggestion: f.suggestion ?? void 0,
1516
+ suggestedCode: f.suggestedCode ?? void 0,
1517
+ rawExcerpt: f.rawExcerpt ?? void 0,
1184
1518
  extractedAt: now,
1185
- confidence: f.confidence
1519
+ confidence: f.confidence ?? void 0
1186
1520
  })
1187
1521
  );
1188
1522
  }
@@ -1210,7 +1544,21 @@ async function extractFindings(reviewText, sourceModel, chatFn) {
1210
1544
  EXTRACTION_SYSTEM_PROMPT,
1211
1545
  userMessage
1212
1546
  );
1213
- const findings = parseExtractionResponse(response, sourceModel);
1547
+ const allFindings = parseExtractionResponse(response, sourceModel);
1548
+ const findings = allFindings.filter((f) => {
1549
+ if (f.confidence === void 0) {
1550
+ return true;
1551
+ }
1552
+ return f.confidence >= MIN_CONFIDENCE_THRESHOLD;
1553
+ });
1554
+ const filteredCount = allFindings.length - findings.length;
1555
+ if (filteredCount > 0) {
1556
+ logger.debug("Filtered low-confidence findings", {
1557
+ sourceModel,
1558
+ filtered: filteredCount,
1559
+ threshold: MIN_CONFIDENCE_THRESHOLD
1560
+ });
1561
+ }
1214
1562
  logger.debug("Extracted findings", {
1215
1563
  sourceModel,
1216
1564
  count: findings.length
@@ -1391,11 +1739,11 @@ function normalizeLocation(location) {
1391
1739
  const file = normalizeFilePath(location.file);
1392
1740
  let line = location.line;
1393
1741
  let endLine = location.endLine;
1394
- if (line !== void 0 && line < 1) {
1742
+ if (line !== void 0 && line < 0) {
1395
1743
  line = void 0;
1396
1744
  }
1397
1745
  if (endLine !== void 0) {
1398
- if (endLine < 1) {
1746
+ if (endLine < 0) {
1399
1747
  endLine = void 0;
1400
1748
  } else if (line !== void 0 && endLine < line) {
1401
1749
  [line, endLine] = [endLine, line];
@@ -1418,7 +1766,7 @@ function normalizeFinding(finding) {
1418
1766
  description: finding.description.trim(),
1419
1767
  location: normalizeLocation(finding.location),
1420
1768
  suggestion: finding.suggestion?.trim(),
1421
- rawExcerpt: finding.rawExcerpt.trim(),
1769
+ rawExcerpt: finding.rawExcerpt?.trim(),
1422
1770
  confidence: finding.confidence !== void 0 ? Math.max(0, Math.min(1, finding.confidence)) : void 0
1423
1771
  };
1424
1772
  }
@@ -1571,8 +1919,10 @@ init_esm_shims();
1571
1919
  // src/consensus/clustering.ts
1572
1920
  init_esm_shims();
1573
1921
  var DEFAULT_CLUSTERING_CONFIG = {
1574
- lineProximity: 5,
1575
- similarityThreshold: 0.7
1922
+ lineProximity: 15,
1923
+ // More lenient - models often point to different lines for same issue
1924
+ similarityThreshold: 0.55
1925
+ // Lower threshold - models describe same issues differently
1576
1926
  };
1577
1927
  function generateClusterId() {
1578
1928
  const timestamp = Date.now().toString(36);
@@ -1616,9 +1966,54 @@ function createClusterTitle(findings) {
1616
1966
  }
1617
1967
  return titles[0] ?? "Unnamed finding";
1618
1968
  }
1969
+ var ISSUE_KEYWORDS = {
1970
+ sql_injection: ["sql", "injection", "sqli", "cwe-89", "query"],
1971
+ xss: ["xss", "cross-site", "scripting", "cwe-79", "sanitiz"],
1972
+ command_injection: ["command", "injection", "exec", "shell", "cwe-78", "rce"],
1973
+ hardcoded_secrets: [
1974
+ "hardcoded",
1975
+ "secret",
1976
+ "credential",
1977
+ "password",
1978
+ "api key",
1979
+ "cwe-798"
1980
+ ],
1981
+ auth_bypass: ["auth", "bypass", "backdoor", "authentication"],
1982
+ weak_crypto: ["crypto", "random", "math.random", "weak", "cwe-338"],
1983
+ info_disclosure: ["disclosure", "exposure", "logging", "cwe-532"],
1984
+ race_condition: ["race", "condition", "toctou", "concurrent"],
1985
+ memory_leak: ["memory", "leak", "unbounded", "cache"],
1986
+ missing_validation: ["validation", "sanitiz", "escape", "input"]
1987
+ };
1988
+ function extractIssueTypes(text) {
1989
+ const lower = text.toLowerCase();
1990
+ const types = /* @__PURE__ */ new Set();
1991
+ for (const [issueType, keywords] of Object.entries(ISSUE_KEYWORDS)) {
1992
+ for (const keyword of keywords) {
1993
+ if (lower.includes(keyword)) {
1994
+ types.add(issueType);
1995
+ break;
1996
+ }
1997
+ }
1998
+ }
1999
+ return types;
2000
+ }
1619
2001
  function textSimilarity(text1, text2) {
1620
2002
  const t1 = text1 ?? "";
1621
2003
  const t2 = text2 ?? "";
2004
+ const types1 = extractIssueTypes(t1);
2005
+ const types2 = extractIssueTypes(t2);
2006
+ if (types1.size > 0 && types2.size > 0) {
2007
+ let typeOverlap = 0;
2008
+ for (const type of types1) {
2009
+ if (types2.has(type)) {
2010
+ typeOverlap++;
2011
+ }
2012
+ }
2013
+ if (typeOverlap > 0) {
2014
+ return Math.min(1, 0.8 + typeOverlap * 0.05);
2015
+ }
2016
+ }
1622
2017
  const words1 = new Set(
1623
2018
  t1.toLowerCase().split(/\s+/).filter((w) => w.length > 2)
1624
2019
  );
@@ -1649,20 +2044,41 @@ function findingSimilarity(finding1, finding2, config = DEFAULT_CLUSTERING_CONFI
1649
2044
  finding2.location,
1650
2045
  config.lineProximity
1651
2046
  );
2047
+ const sameFile = finding1.location?.file && finding2.location?.file && finding1.location.file === finding2.location.file;
2048
+ const types1 = extractIssueTypes(`${finding1.title} ${finding1.description}`);
2049
+ const types2 = extractIssueTypes(`${finding2.title} ${finding2.description}`);
2050
+ let sharedIssueType = false;
2051
+ for (const type of types1) {
2052
+ if (types2.has(type)) {
2053
+ sharedIssueType = true;
2054
+ break;
2055
+ }
2056
+ }
1652
2057
  const titleSim = textSimilarity(finding1.title, finding2.title);
1653
2058
  const descSim = textSimilarity(finding1.description, finding2.description);
2059
+ if (sharedIssueType && sameFile) {
2060
+ return Math.min(1, 0.75 + titleSim * 0.15 + descSim * 0.1);
2061
+ }
2062
+ if (finding1.category === "security" && sameFile) {
2063
+ return Math.min(1, 0.6 + titleSim * 0.2 + descSim * 0.1);
2064
+ }
2065
+ if (sharedIssueType) {
2066
+ return Math.min(1, 0.5 + titleSim * 0.25 + descSim * 0.15);
2067
+ }
1654
2068
  let score = 0;
1655
- score += 0.2;
1656
- if (locationMatch) {
1657
- if (finding1.location?.line && finding2.location?.line) {
1658
- score += 0.4;
1659
- } else if (finding1.location?.file && finding2.location?.file) {
1660
- score += 0.2;
2069
+ score += 0.25;
2070
+ if (sameFile) {
2071
+ if (locationMatch) {
2072
+ score += 0.35;
2073
+ } else {
2074
+ score += 0.25;
1661
2075
  }
2076
+ } else if (locationMatch) {
2077
+ score += 0.15;
1662
2078
  }
1663
2079
  score += titleSim * 0.25;
1664
2080
  score += descSim * 0.15;
1665
- return Math.min(1, score);
2081
+ return Math.min(1, Math.max(0, score));
1666
2082
  }
1667
2083
  function shouldJoinCluster(finding, cluster, config) {
1668
2084
  for (const existing of cluster) {
@@ -1673,20 +2089,40 @@ function shouldJoinCluster(finding, cluster, config) {
1673
2089
  }
1674
2090
  return false;
1675
2091
  }
2092
+ function getClusterKey(finding) {
2093
+ const file = finding.location?.file ?? "no-file";
2094
+ const category = finding.category;
2095
+ return `${file}::${category}`;
2096
+ }
1676
2097
  function initialClustering(findings, config) {
1677
- const clusters = [];
2098
+ const preGroups = /* @__PURE__ */ new Map();
1678
2099
  for (const finding of findings) {
1679
- let joined = false;
1680
- for (const cluster of clusters) {
1681
- if (shouldJoinCluster(finding, cluster, config)) {
1682
- cluster.push(finding);
1683
- joined = true;
1684
- break;
1685
- }
2100
+ const key = getClusterKey(finding);
2101
+ const group = preGroups.get(key) ?? [];
2102
+ group.push(finding);
2103
+ preGroups.set(key, group);
2104
+ }
2105
+ const clusters = [];
2106
+ for (const group of preGroups.values()) {
2107
+ if (group.length === 1) {
2108
+ clusters.push(group);
2109
+ continue;
1686
2110
  }
1687
- if (!joined) {
1688
- clusters.push([finding]);
2111
+ const subClusters = [];
2112
+ for (const finding of group) {
2113
+ let joined = false;
2114
+ for (const subCluster of subClusters) {
2115
+ if (shouldJoinCluster(finding, subCluster, config)) {
2116
+ subCluster.push(finding);
2117
+ joined = true;
2118
+ break;
2119
+ }
2120
+ }
2121
+ if (!joined) {
2122
+ subClusters.push([finding]);
2123
+ }
1689
2124
  }
2125
+ clusters.push(...subClusters);
1690
2126
  }
1691
2127
  return clusters;
1692
2128
  }
@@ -1703,16 +2139,20 @@ function mergeSimilarClusters(clusters, config) {
1703
2139
  const cluster1 = merged[i];
1704
2140
  const cluster2 = merged[j];
1705
2141
  if (!cluster1 || !cluster2) continue;
2142
+ let maxSim = 0;
1706
2143
  let totalSim = 0;
1707
2144
  let pairs = 0;
1708
2145
  for (const f1 of cluster1) {
1709
2146
  for (const f2 of cluster2) {
1710
- totalSim += findingSimilarity(f1, f2, config);
2147
+ const sim = findingSimilarity(f1, f2, config);
2148
+ maxSim = Math.max(maxSim, sim);
2149
+ totalSim += sim;
1711
2150
  pairs++;
1712
2151
  }
1713
2152
  }
1714
2153
  const avgSim = pairs > 0 ? totalSim / pairs : 0;
1715
- if (avgSim >= config.similarityThreshold) {
2154
+ const shouldMerge = maxSim >= 0.75 || avgSim >= config.similarityThreshold;
2155
+ if (shouldMerge) {
1716
2156
  merged[i] = [...cluster1, ...cluster2];
1717
2157
  merged.splice(j, 1);
1718
2158
  changed = true;
@@ -2077,7 +2517,7 @@ async function buildConsensusReport(reviews, consensusClient, options = {}) {
2077
2517
  disagreements: disagreements.length,
2078
2518
  executionMs: report.executionTimeMs
2079
2519
  });
2080
- const { formatReport } = await import("./formatter-D42TRSLL.js");
2520
+ const { formatReport } = await import("./formatter-FIH7J57R.js");
2081
2521
  const formatted = formatReport(report, outputFormat);
2082
2522
  return { report, formatted };
2083
2523
  }
@@ -2221,4 +2661,4 @@ export {
2221
2661
  gitReviewSchema,
2222
2662
  handleGitReview
2223
2663
  };
2224
- //# sourceMappingURL=chunk-YWK4IFSW.js.map
2664
+ //# sourceMappingURL=chunk-H4YGXQ7Q.js.map
@@ -15,7 +15,6 @@ function formatReport(report, format = "markdown") {
15
15
  return formatJson(report);
16
16
  case "html":
17
17
  return formatHtml(report);
18
- case "markdown":
19
18
  default:
20
19
  return formatMarkdown(report);
21
20
  }
@@ -396,4 +395,4 @@ export {
396
395
  formatter_exports,
397
396
  init_formatter
398
397
  };
399
- //# sourceMappingURL=chunk-SYMFCPGM.js.map
398
+ //# sourceMappingURL=chunk-HVF7WG6A.js.map
@@ -4,10 +4,11 @@ import {
4
4
  buildConsensusReport,
5
5
  formatForHostExtraction,
6
6
  handleGitReview,
7
- initializeConfig
8
- } from "./chunk-YWK4IFSW.js";
9
- import "./chunk-IVKLQD6M.js";
10
- import "./chunk-SYMFCPGM.js";
7
+ initializeConfig,
8
+ logger
9
+ } from "./chunk-H4YGXQ7Q.js";
10
+ import "./chunk-AEDZOTVA.js";
11
+ import "./chunk-HVF7WG6A.js";
11
12
  import {
12
13
  init_esm_shims
13
14
  } from "./chunk-UFR2SVK2.js";
@@ -43,7 +44,7 @@ function parseUnifiedDiff(diffText) {
43
44
  });
44
45
  }
45
46
  const match = line.match(/^diff --git a\/(.+) b\/(.+)$/);
46
- if (match && match[1] && match[2]) {
47
+ if (match?.[1] && match[2]) {
47
48
  currentFile = {
48
49
  oldPath: match[1],
49
50
  newPath: match[2],
@@ -74,7 +75,7 @@ function parseUnifiedDiff(diffText) {
74
75
  const hunkMatch = line.match(
75
76
  /@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@/
76
77
  );
77
- if (hunkMatch && hunkMatch[1] && hunkMatch[3]) {
78
+ if (hunkMatch?.[1] && hunkMatch[3]) {
78
79
  const hunk = {
79
80
  oldStart: parseInt(hunkMatch[1], 10),
80
81
  oldCount: hunkMatch[2] ? parseInt(hunkMatch[2], 10) : 1,
@@ -128,29 +129,77 @@ function normalizePath(path) {
128
129
  }
129
130
 
130
131
  // src/consensus/comment-mapper.ts
132
+ var DEFAULT_LINE_TOLERANCE = 20;
133
+ function findNearestChangedLine(changedLines, targetLine, tolerance = DEFAULT_LINE_TOLERANCE) {
134
+ if (changedLines.size === 0) {
135
+ return void 0;
136
+ }
137
+ if (changedLines.has(targetLine)) {
138
+ return targetLine;
139
+ }
140
+ let nearestLine;
141
+ let minDistance = tolerance + 1;
142
+ for (const line of changedLines) {
143
+ const distance = Math.abs(line - targetLine);
144
+ if (distance <= tolerance && distance < minDistance) {
145
+ minDistance = distance;
146
+ nearestLine = line;
147
+ }
148
+ }
149
+ return nearestLine;
150
+ }
151
+ function isWithinHunkBoundary(hunks, lineNumber) {
152
+ for (const hunk of hunks) {
153
+ const hunkEnd = hunk.newStart + hunk.newCount - 1;
154
+ if (lineNumber >= hunk.newStart && lineNumber <= hunkEnd) {
155
+ return true;
156
+ }
157
+ }
158
+ return false;
159
+ }
160
+ function getFirstChangedLine(changedLines) {
161
+ if (changedLines.size === 0) {
162
+ return 1;
163
+ }
164
+ let minLine = Number.POSITIVE_INFINITY;
165
+ for (const line of changedLines) {
166
+ if (line < minLine) {
167
+ minLine = line;
168
+ }
169
+ }
170
+ return minLine;
171
+ }
131
172
  function findMatchingDiffFile(diff, filePath) {
132
173
  const normalizedTarget = normalizePath(filePath);
133
174
  for (const file of diff.files) {
134
175
  const normalizedNew = normalizePath(file.newPath);
135
176
  const normalizedOld = normalizePath(file.oldPath);
136
177
  if (normalizedNew === normalizedTarget || normalizedOld === normalizedTarget) {
137
- return { path: file.newPath, changedLines: file.changedLines };
178
+ return {
179
+ path: file.newPath,
180
+ changedLines: file.changedLines,
181
+ hunks: file.hunks
182
+ };
138
183
  }
139
184
  const targetFilename = normalizedTarget.split("/").pop();
140
185
  const newFilename = normalizedNew.split("/").pop();
141
186
  if (targetFilename && newFilename && targetFilename === newFilename) {
142
187
  if (normalizedNew.endsWith(normalizedTarget) || normalizedTarget.endsWith(normalizedNew)) {
143
- return { path: file.newPath, changedLines: file.changedLines };
188
+ return {
189
+ path: file.newPath,
190
+ changedLines: file.changedLines,
191
+ hunks: file.hunks
192
+ };
144
193
  }
145
194
  }
146
195
  }
147
196
  return void 0;
148
197
  }
149
- function mapClustersToComments(clusters, diff) {
198
+ function mapClustersToComments(clusters, diff, lineTolerance = DEFAULT_LINE_TOLERANCE) {
150
199
  const comments = [];
151
200
  const unmapped = [];
152
201
  for (const cluster of clusters) {
153
- if (!cluster.canonicalLocation?.file || !cluster.canonicalLocation?.line) {
202
+ if (!cluster.canonicalLocation?.file) {
154
203
  unmapped.push(cluster);
155
204
  continue;
156
205
  }
@@ -160,14 +209,52 @@ function mapClustersToComments(clusters, diff) {
160
209
  unmapped.push(cluster);
161
210
  continue;
162
211
  }
163
- if (!matchingFile.changedLines.has(line)) {
164
- unmapped.push(cluster);
165
- continue;
212
+ let targetLine;
213
+ if (line !== void 0) {
214
+ if (matchingFile.changedLines.has(line)) {
215
+ targetLine = line;
216
+ } else {
217
+ const nearestLine = findNearestChangedLine(
218
+ matchingFile.changedLines,
219
+ line,
220
+ lineTolerance
221
+ );
222
+ if (nearestLine !== void 0) {
223
+ logger.debug("Using nearest changed line", {
224
+ file,
225
+ originalLine: line,
226
+ mappedLine: nearestLine
227
+ });
228
+ targetLine = nearestLine;
229
+ } else {
230
+ targetLine = getFirstChangedLine(matchingFile.changedLines);
231
+ logger.debug("Using first changed line as fallback", {
232
+ file,
233
+ originalLine: line,
234
+ mappedLine: targetLine
235
+ });
236
+ }
237
+ }
238
+ } else {
239
+ targetLine = getFirstChangedLine(matchingFile.changedLines);
240
+ logger.debug("No line specified, using first changed line", {
241
+ file,
242
+ mappedLine: targetLine
243
+ });
244
+ }
245
+ if (!isWithinHunkBoundary(matchingFile.hunks, targetLine)) {
246
+ logger.warn("Target line not within hunk boundary", {
247
+ file,
248
+ targetLine,
249
+ hunks: matchingFile.hunks.map(
250
+ (h) => `${h.newStart}-${h.newStart + h.newCount - 1}`
251
+ )
252
+ });
166
253
  }
167
254
  comments.push({
168
255
  cluster,
169
256
  path: matchingFile.path,
170
- line
257
+ line: targetLine
171
258
  });
172
259
  }
173
260
  return { comments, unmapped };
@@ -178,8 +265,18 @@ init_esm_shims();
178
265
  var DEFAULT_OPTIONS = {
179
266
  maxComments: 30,
180
267
  includeLowSeverity: false,
181
- showModelAgreement: true
268
+ showModelAgreement: true,
269
+ minConfidence: 0.2
270
+ // Show findings with 2+ models agreeing (was 0.5)
182
271
  };
272
+ var GITHUB_MAX_COMMENT_LENGTH = 65e3;
273
+ function truncateToLimit(text, limit = GITHUB_MAX_COMMENT_LENGTH) {
274
+ if (text.length <= limit) {
275
+ return text;
276
+ }
277
+ const truncationMarker = "\n\n... (truncated due to GitHub character limit)";
278
+ return text.slice(0, limit - truncationMarker.length) + truncationMarker;
279
+ }
183
280
  function formatSeverity(severity) {
184
281
  const badges = {
185
282
  critical: "CRITICAL",
@@ -193,20 +290,27 @@ function formatSeverity(severity) {
193
290
  function formatCategory(category) {
194
291
  return category.split("_").map((word) => word.charAt(0).toUpperCase() + word.slice(1)).join(" ");
195
292
  }
196
- function formatModelAgreement(cluster) {
197
- const total = cluster.agreeingModels.length + cluster.silentModels.length + cluster.disagreingModels.length;
293
+ function formatModelName(fullName) {
294
+ const slashIndex = fullName.indexOf("/");
295
+ if (slashIndex !== -1) {
296
+ return fullName.slice(slashIndex + 1);
297
+ }
298
+ return fullName;
299
+ }
300
+ function formatModelAgreement(cluster, totalModels) {
198
301
  const agreeing = cluster.agreeingModels.length;
199
302
  const confidence = Math.round(cluster.confidence * 100);
200
- return `${agreeing}/${total} models | Confidence: ${confidence}%`;
303
+ const modelNames = cluster.agreeingModels.map(formatModelName).join(", ");
304
+ return `Found by: ${modelNames} (${agreeing}/${totalModels}) | ${confidence}% confidence`;
201
305
  }
202
- function formatCommentBody(cluster, showModelAgreement) {
306
+ function formatCommentBody(cluster, showModelAgreement, totalModels) {
203
307
  const lines = [];
204
308
  lines.push(
205
309
  `**[${formatSeverity(cluster.severity)}] ${cluster.title}** (${formatCategory(cluster.category)})`
206
310
  );
207
311
  lines.push("");
208
312
  if (showModelAgreement) {
209
- lines.push(formatModelAgreement(cluster));
313
+ lines.push(formatModelAgreement(cluster, totalModels));
210
314
  lines.push("");
211
315
  }
212
316
  const firstFinding = cluster.findings[0];
@@ -251,7 +355,10 @@ function formatSummaryBody(report, mappingResult, inlineCommentCount) {
251
355
  lines.push("## Code Council Multi-Model Review");
252
356
  lines.push("");
253
357
  const totalFindings = report.highConfidence.length + report.moderateConfidence.length + report.lowConfidence.length;
254
- lines.push(`**${report.participatingModels.length} models** participated`);
358
+ const modelNames = report.participatingModels.map(formatModelName).join(", ");
359
+ lines.push(
360
+ `**${report.participatingModels.length} models** participated: ${modelNames}`
361
+ );
255
362
  lines.push(`**${totalFindings} findings** total`);
256
363
  lines.push(`**${inlineCommentCount} inline comments** on changed lines`);
257
364
  lines.push(
@@ -301,8 +408,15 @@ function formatPrComments(report, mappingResult, options = {}) {
301
408
  (c) => c.cluster.severity !== "low" && c.cluster.severity !== "info"
302
409
  );
303
410
  }
411
+ if (opts.minConfidence > 0) {
412
+ commentsToInclude = commentsToInclude.filter(
413
+ (c) => c.cluster.confidence >= opts.minConfidence
414
+ );
415
+ }
304
416
  const severityOrder = ["critical", "high", "medium", "low", "info"];
305
417
  commentsToInclude.sort((a, b) => {
418
+ const confidenceDiff = b.cluster.confidence - a.cluster.confidence;
419
+ if (Math.abs(confidenceDiff) > 0.01) return confidenceDiff;
306
420
  const severityDiff = severityOrder.indexOf(a.cluster.severity) - severityOrder.indexOf(b.cluster.severity);
307
421
  if (severityDiff !== 0) return severityDiff;
308
422
  return a.line - b.line;
@@ -310,12 +424,21 @@ function formatPrComments(report, mappingResult, options = {}) {
310
424
  if (commentsToInclude.length > opts.maxComments) {
311
425
  commentsToInclude = commentsToInclude.slice(0, opts.maxComments);
312
426
  }
313
- const comments = commentsToInclude.map((mapped) => ({
314
- path: mapped.path,
315
- line: mapped.line,
316
- body: formatCommentBody(mapped.cluster, opts.showModelAgreement)
317
- }));
318
- const body = formatSummaryBody(report, mappingResult, comments.length);
427
+ const totalModels = report.participatingModels.length;
428
+ const comments = commentsToInclude.map((mapped) => {
429
+ const rawBody2 = formatCommentBody(
430
+ mapped.cluster,
431
+ opts.showModelAgreement,
432
+ totalModels
433
+ );
434
+ return {
435
+ path: mapped.path,
436
+ line: mapped.line,
437
+ body: truncateToLimit(rawBody2)
438
+ };
439
+ });
440
+ const rawBody = formatSummaryBody(report, mappingResult, comments.length);
441
+ const body = truncateToLimit(rawBody);
319
442
  return {
320
443
  body,
321
444
  event: "COMMENT",
@@ -582,7 +705,7 @@ async function handleCliCodeReview(client, models, format, context, filePath, op
582
705
  const fullContext = language ? `Language: ${language}${context ? `
583
706
  ${context}` : ""}` : context;
584
707
  const results = await client.reviewCode(inputResult, models, fullContext);
585
- return formatReviewOutput(results, format);
708
+ return formatReviewOutput(results, format, client);
586
709
  }
587
710
  async function handleCliGitReview(client, models, format, context, options) {
588
711
  const reviewType = options["review-type"] || "staged";
@@ -602,7 +725,7 @@ async function handleCliGitReview(client, models, format, context, options) {
602
725
  if (format === "pr-comments") {
603
726
  return formatPrCommentsOutput(result.results, result.diffText, client);
604
727
  }
605
- return formatReviewOutput(result.results, format);
728
+ return formatReviewOutput(result.results, format, client);
606
729
  }
607
730
  async function handleCliFrontendReview(client, models, format, context, filePath, options) {
608
731
  const inputResult = await getInput(filePath);
@@ -616,7 +739,7 @@ async function handleCliFrontendReview(client, models, format, context, filePath
616
739
  reviewType,
617
740
  context
618
741
  });
619
- return formatReviewOutput(results, format);
742
+ return formatReviewOutput(results, format, client);
620
743
  }
621
744
  async function handleCliBackendReview(client, models, format, context, filePath, options) {
622
745
  const inputResult = await getInput(filePath);
@@ -630,7 +753,7 @@ async function handleCliBackendReview(client, models, format, context, filePath,
630
753
  reviewType,
631
754
  context
632
755
  });
633
- return formatReviewOutput(results, format);
756
+ return formatReviewOutput(results, format, client);
634
757
  }
635
758
  async function handleCliPlanReview(client, models, format, context, filePath, options) {
636
759
  const inputResult = await getInput(filePath);
@@ -642,14 +765,25 @@ async function handleCliPlanReview(client, models, format, context, filePath, op
642
765
  reviewType,
643
766
  context
644
767
  });
645
- return formatReviewOutput(results, format);
768
+ return formatReviewOutput(results, format, client);
646
769
  }
647
- function formatReviewOutput(results, format) {
648
- const { formatted } = formatForHostExtraction(results, format);
649
- return {
650
- exitCode: ExitCode.SUCCESS,
651
- stdout: formatted
652
- };
770
+ async function formatReviewOutput(results, format, reviewClient) {
771
+ const consensusClient = new ConsensusClient(reviewClient);
772
+ try {
773
+ const { formatted } = await buildConsensusReport(results, consensusClient, {
774
+ outputFormat: format
775
+ });
776
+ return {
777
+ exitCode: ExitCode.SUCCESS,
778
+ stdout: formatted
779
+ };
780
+ } catch (error) {
781
+ const { formatted } = formatForHostExtraction(results, format);
782
+ return {
783
+ exitCode: ExitCode.SUCCESS,
784
+ stdout: formatted
785
+ };
786
+ }
653
787
  }
654
788
  async function formatPrCommentsOutput(results, diffText, reviewClient) {
655
789
  const consensusClient = new ConsensusClient(reviewClient);
@@ -1048,4 +1182,4 @@ export {
1048
1182
  processResult,
1049
1183
  runCli
1050
1184
  };
1051
- //# sourceMappingURL=cli-223AZHWH.js.map
1185
+ //# sourceMappingURL=cli-QCODHVTG.js.map
@@ -1,10 +1,10 @@
1
1
  import {
2
2
  formatReport,
3
3
  init_formatter
4
- } from "./chunk-SYMFCPGM.js";
4
+ } from "./chunk-HVF7WG6A.js";
5
5
  import "./chunk-UFR2SVK2.js";
6
6
  init_formatter();
7
7
  export {
8
8
  formatReport
9
9
  };
10
- //# sourceMappingURL=formatter-D42TRSLL.js.map
10
+ //# sourceMappingURL=formatter-FIH7J57R.js.map
package/dist/index.js CHANGED
@@ -18,9 +18,9 @@ import {
18
18
  handleGitReview,
19
19
  initializeConfig,
20
20
  logger
21
- } from "./chunk-YWK4IFSW.js";
22
- import "./chunk-IVKLQD6M.js";
23
- import "./chunk-SYMFCPGM.js";
21
+ } from "./chunk-H4YGXQ7Q.js";
22
+ import "./chunk-AEDZOTVA.js";
23
+ import "./chunk-HVF7WG6A.js";
24
24
  import {
25
25
  init_esm_shims
26
26
  } from "./chunk-UFR2SVK2.js";
@@ -1233,7 +1233,7 @@ function isSensitiveFile(filename) {
1233
1233
  for (const pattern of SENSITIVE_FILE_PATTERNS) {
1234
1234
  if (pattern.includes("*")) {
1235
1235
  const regex = new RegExp(
1236
- "^" + pattern.replace(/\./g, "\\.").replace(/\*/g, ".*") + "$",
1236
+ `^${pattern.replace(/\./g, "\\.").replace(/\*/g, ".*")}$`,
1237
1237
  "i"
1238
1238
  );
1239
1239
  if (regex.test(lowerName)) {
@@ -1661,7 +1661,7 @@ async function handleTpsAudit(client, models, input) {
1661
1661
  });
1662
1662
  for (const result of results) {
1663
1663
  if (!result.error && result.review) {
1664
- const { parseTpsAnalysis } = await import("./tps-audit-DPIJH6JK.js");
1664
+ const { parseTpsAnalysis } = await import("./tps-audit-2DYJMPJ5.js");
1665
1665
  analysis = parseTpsAnalysis(result.review);
1666
1666
  if (analysis) break;
1667
1667
  }
@@ -1676,7 +1676,7 @@ async function handleTpsAudit(client, models, input) {
1676
1676
  };
1677
1677
  }
1678
1678
  async function handleBatchedTpsAudit(client, models, scanResult, focusAreas) {
1679
- const { parseTpsAnalysis } = await import("./tps-audit-DPIJH6JK.js");
1679
+ const { parseTpsAnalysis } = await import("./tps-audit-2DYJMPJ5.js");
1680
1680
  const repoName = scanResult.repoRoot.split("/").pop() ?? "unknown";
1681
1681
  const batches = createFileBatches(scanResult.files);
1682
1682
  const batchContents = batches.map((batch) => ({
@@ -1821,7 +1821,6 @@ function formatTpsAuditResults(auditResult) {
1821
1821
  );
1822
1822
  break;
1823
1823
  }
1824
- case "markdown":
1825
1824
  default: {
1826
1825
  const parts = [];
1827
1826
  parts.push("# TPS Audit Report\n");
@@ -1906,7 +1905,7 @@ ${r.review}
1906
1905
  var args = process.argv.slice(2);
1907
1906
  var command = args[0];
1908
1907
  if (command === "review" || command === "setup") {
1909
- import("./cli-223AZHWH.js").then(async ({ processResult, runCli }) => {
1908
+ import("./cli-QCODHVTG.js").then(async ({ processResult, runCli }) => {
1910
1909
  try {
1911
1910
  const result = await runCli(process.argv);
1912
1911
  if (result) {
@@ -6,7 +6,7 @@ import {
6
6
  buildSynthesisUserMessage,
7
7
  buildUserMessage,
8
8
  parseTpsAnalysis
9
- } from "./chunk-IVKLQD6M.js";
9
+ } from "./chunk-AEDZOTVA.js";
10
10
  import "./chunk-UFR2SVK2.js";
11
11
  export {
12
12
  BATCH_SYSTEM_PROMPT,
@@ -17,4 +17,4 @@ export {
17
17
  buildUserMessage,
18
18
  parseTpsAnalysis
19
19
  };
20
- //# sourceMappingURL=tps-audit-DPIJH6JK.js.map
20
+ //# sourceMappingURL=tps-audit-2DYJMPJ5.js.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@klitchevo/code-council",
3
- "version": "0.2.1",
3
+ "version": "0.2.4",
4
4
  "description": "Multi-model AI code review server using OpenRouter - get diverse perspectives from multiple LLMs in parallel",
5
5
  "main": "dist/index.js",
6
6
  "type": "module",
@@ -75,10 +75,12 @@
75
75
  "@openrouter/sdk": "0.3.10",
76
76
  "ignore": "^7.0.5",
77
77
  "jiti": "^2.6.1",
78
+ "js-yaml": "^4.1.1",
78
79
  "zod": "4.2.1"
79
80
  },
80
81
  "devDependencies": {
81
82
  "@biomejs/biome": "2.3.10",
83
+ "@types/js-yaml": "^4.0.9",
82
84
  "@types/node": "25.0.3",
83
85
  "@vitest/coverage-v8": "4.0.16",
84
86
  "lefthook": "2.0.12",