@ranger1/dx 0.1.69 → 0.1.71

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,866 @@
1
+ #!/usr/bin/env python3
2
+ # Deterministic PR review aggregation (script owns all rules).
3
+ #
4
+ # Workflow:
5
+ # - Mode A: read contextFile + reviewFile(s) from project cache: ./.cache/, parse findings, merge duplicates,
6
+ # post a single PR comment, and generate a fixFile for fixer.
7
+ # - Mode B: read fixReportFile from cache and post it as a PR comment.
8
+ #
9
+ # Input rules:
10
+ # - Callers should pass repo-relative paths (e.g. ./.cache/foo.md). For backward-compat, basenames are also accepted.
11
+ # - Duplicate groups come from LLM but are passed as an argument (NOT written to disk).
12
+ # - Prefer: --duplicate-groups-b64 <base64(json)>
13
+ # - Also supported: --duplicate-groups-json '<json>'
14
+ # - Invalid/missing duplicate groups => treated as no dedupe (do not fail).
15
+ #
16
+ # Output rules:
17
+ # - Stdout must print exactly ONE JSON object and nothing else.
18
+ # - Mode A: {"stop":true} OR {"stop":false,"fixFile":"..."}
19
+ # - Mode B: {"ok":true}
20
+ #
21
+ # PR comment rules:
22
+ # - Every comment must include marker: <!-- pr-review-loop-marker -->
23
+ # - Comment body must NOT contain local filesystem paths (this script scrubs cache paths, $HOME, and repo absolute paths).
24
+ #
25
+ # fixFile rules:
26
+ # - fixFile includes all findings, split into:
27
+ # - IssuesToFix: P0/P1 (must fix)
28
+ # - OptionalIssues: P2/P3 (fixer may decide)
29
+ # - Each merged duplicate group keeps ONE canonical id; merged IDs are appended into canonical description.
30
+ # - Do NOT rewrite id prefixes (e.g. SEC-/LOG-/STY-/GHR-); preserve reviewer-provided finding IDs.
31
+
32
+ import argparse
33
+ import base64
34
+ import json
35
+ import os
36
+ import re
37
+ import subprocess
38
+ import sys
39
+ from pathlib import Path
40
+
41
+
42
+ MARKER = "<!-- pr-review-loop-marker -->"
43
+
44
+
45
+ def _repo_root():
46
+ try:
47
+ p = subprocess.run(
48
+ ["git", "rev-parse", "--show-toplevel"],
49
+ stdout=subprocess.PIPE,
50
+ stderr=subprocess.DEVNULL,
51
+ text=True,
52
+ )
53
+ out = (p.stdout or "").strip()
54
+ if p.returncode == 0 and out:
55
+ return Path(out)
56
+ except Exception:
57
+ pass
58
+ return Path.cwd()
59
+
60
+
61
+ def _cache_dir(repo_root):
62
+ return (repo_root / ".cache").resolve()
63
+
64
+
65
+ def _is_safe_relpath(p):
66
+ if p.is_absolute():
67
+ return False
68
+ if any(part in ("..",) for part in p.parts):
69
+ return False
70
+ return True
71
+
72
+
73
+ def _resolve_ref(repo_root, cache_dir, ref):
74
+ if not ref:
75
+ return None
76
+ s = str(ref).strip()
77
+ if not s:
78
+ return None
79
+
80
+ # If caller already passes a repo-relative path like ./.cache/foo.md
81
+ looks_like_path = ("/" in s) or ("\\" in s) or s.startswith(".")
82
+ if looks_like_path:
83
+ p = Path(s)
84
+ if p.is_absolute():
85
+ # Only allow absolute paths under cache_dir.
86
+ try:
87
+ p2 = p.resolve()
88
+ p2.relative_to(cache_dir.resolve())
89
+ return p2
90
+ except Exception:
91
+ return None
92
+ if not _is_safe_relpath(p):
93
+ return None
94
+ return (repo_root / p).resolve()
95
+
96
+ # Backward-compat: accept basename-only.
97
+ b = _safe_basename(s)
98
+ if not b:
99
+ return None
100
+ return (cache_dir / b).resolve()
101
+
102
+
103
+ def _repo_relpath(repo_root, p):
104
+ try:
105
+ rel = p.resolve().relative_to(repo_root.resolve())
106
+ return "./" + rel.as_posix()
107
+ except Exception:
108
+ return os.path.basename(str(p))
109
+
110
+
111
+ REPO_ROOT = _repo_root()
112
+ CACHE_DIR = _cache_dir(REPO_ROOT)
113
+
114
+
115
+ def _json_out(obj):
116
+ sys.stdout.write(json.dumps(obj, ensure_ascii=True))
117
+ sys.stdout.write("\n")
118
+
119
+
120
+ def _safe_basename(name):
121
+ if not name:
122
+ return None
123
+ base = os.path.basename(name.strip())
124
+ if base != name.strip():
125
+ return None
126
+ if base in (".", ".."):
127
+ return None
128
+ return base
129
+
130
+
131
+ def _read_cache_text(ref):
132
+ p = _resolve_ref(REPO_ROOT, CACHE_DIR, ref)
133
+ if not p:
134
+ raise FileNotFoundError("INVALID_CACHE_REF")
135
+ return p.read_text(encoding="utf-8", errors="replace")
136
+
137
+
138
+ def _write_cache_text(ref, content):
139
+ p = _resolve_ref(REPO_ROOT, CACHE_DIR, ref)
140
+ if not p:
141
+ raise ValueError("INVALID_CACHE_REF")
142
+ CACHE_DIR.mkdir(parents=True, exist_ok=True)
143
+ p.parent.mkdir(parents=True, exist_ok=True)
144
+ p.write_text(content, encoding="utf-8", newline="\n")
145
+
146
+
147
+ def _priority_rank(p):
148
+ if not isinstance(p, str):
149
+ return 99
150
+ p = p.strip().upper()
151
+ if p == "P0":
152
+ return 0
153
+ if p == "P1":
154
+ return 1
155
+ if p == "P2":
156
+ return 2
157
+ if p == "P3":
158
+ return 3
159
+ return 99
160
+
161
+
162
+ def _sanitize_for_comment(text):
163
+ if not isinstance(text, str):
164
+ text = str(text)
165
+
166
+ home = str(Path.home())
167
+ cache_abs = str(CACHE_DIR.resolve())
168
+ repo_abs = str(REPO_ROOT.resolve())
169
+
170
+ # Scrub local cache absolute path.
171
+ text = text.replace(cache_abs + "/", "[cache]/")
172
+
173
+ # Avoid leaking absolute local repo paths.
174
+ if repo_abs:
175
+ text = text.replace(repo_abs + "/", "")
176
+
177
+ return text
178
+
179
+
180
+ def _parse_duplicate_groups_json(s):
181
+ if not s:
182
+ return []
183
+ try:
184
+ data = json.loads(s)
185
+ except Exception:
186
+ return []
187
+
188
+ groups = []
189
+ if isinstance(data, dict) and isinstance(data.get("duplicateGroups"), list):
190
+ groups = data.get("duplicateGroups")
191
+ elif isinstance(data, list):
192
+ groups = data
193
+ else:
194
+ return []
195
+
196
+ out = []
197
+ for g in (groups or []):
198
+ if not isinstance(g, list):
199
+ continue
200
+ ids = []
201
+ for it in g:
202
+ if isinstance(it, str) and it.strip():
203
+ ids.append(it.strip())
204
+ ids = list(dict.fromkeys(ids))
205
+ if len(ids) >= 2:
206
+ out.append(ids)
207
+ return out
208
+
209
+
210
+ def _parse_duplicate_groups_b64(s):
211
+ if not s:
212
+ return []
213
+ try:
214
+ raw = base64.b64decode(s.encode("ascii"), validate=True)
215
+ return _parse_duplicate_groups_json(raw.decode("utf-8", errors="replace"))
216
+ except Exception:
217
+ return []
218
+
219
+
220
+ def _parse_escalation_groups_json(s):
221
+ """Parse escalation groups JSON (same format as duplicate groups)."""
222
+ if not s:
223
+ return []
224
+ try:
225
+ data = json.loads(s)
226
+ except Exception:
227
+ return []
228
+
229
+ groups = []
230
+ if isinstance(data, dict) and isinstance(data.get("escalationGroups"), list):
231
+ groups = data.get("escalationGroups")
232
+ elif isinstance(data, list):
233
+ groups = data
234
+ else:
235
+ return []
236
+
237
+ out = []
238
+ for g in (groups or []):
239
+ if not isinstance(g, list):
240
+ continue
241
+ ids = []
242
+ for it in g:
243
+ if isinstance(it, str) and it.strip():
244
+ ids.append(it.strip())
245
+ ids = list(dict.fromkeys(ids))
246
+ if len(ids) >= 2:
247
+ out.append(ids)
248
+ return out
249
+
250
+
251
+ def _parse_escalation_groups_b64(s):
252
+ """Decode base64 escalation groups JSON."""
253
+ if not s:
254
+ return []
255
+ try:
256
+ raw = base64.b64decode(s.encode("ascii"), validate=True)
257
+ return _parse_escalation_groups_json(raw.decode("utf-8", errors="replace"))
258
+ except Exception:
259
+ return []
260
+
261
+
262
+ def _parse_decision_log(md_text):
263
+ """
264
+ Parse decision log markdown and extract fixed/rejected decisions.
265
+
266
+ Format:
267
+ # Decision Log
268
+ PR: 123
269
+ ## Round 1
270
+ ### Fixed
271
+ - id: SEC-001
272
+ commit: abc123
273
+ essence: JSON.parse error handling
274
+ ### Rejected
275
+ - id: STY-004
276
+ priority: P2
277
+ reason: needs product decision
278
+ essence: component split suggestion
279
+
280
+ Returns: [
281
+ {"id": "SEC-001", "status": "fixed", "essence": "...", "commit": "..."},
282
+ {"id": "STY-004", "status": "rejected", "essence": "...", "reason": "...", "priority": "P2"}
283
+ ]
284
+ """
285
+ if not md_text:
286
+ return []
287
+
288
+ lines = md_text.splitlines()
289
+ decisions = []
290
+
291
+ current_status = None # "fixed" or "rejected"
292
+ current_entry = None
293
+
294
+ for raw in lines:
295
+ line = raw.rstrip("\n")
296
+
297
+ # Detect status section headers
298
+ if line.strip().lower() == "### fixed":
299
+ current_status = "fixed"
300
+ if current_entry:
301
+ decisions.append(current_entry)
302
+ current_entry = None
303
+ continue
304
+
305
+ if line.strip().lower() == "### rejected":
306
+ current_status = "rejected"
307
+ if current_entry:
308
+ decisions.append(current_entry)
309
+ current_entry = None
310
+ continue
311
+
312
+ # Reset on new round headers
313
+ if line.startswith("## Round "):
314
+ if current_entry:
315
+ decisions.append(current_entry)
316
+ current_entry = None
317
+ continue
318
+
319
+ # Start new entry
320
+ if line.startswith("- id:") and current_status:
321
+ if current_entry:
322
+ decisions.append(current_entry)
323
+ fid = line.split(":", 1)[1].strip()
324
+ current_entry = {"id": fid, "status": current_status}
325
+ continue
326
+
327
+ # Parse entry fields
328
+ if current_entry and line.startswith(" "):
329
+ m = re.match(r"^\s{2}([a-zA-Z][a-zA-Z0-9]*):\s*(.*)$", line)
330
+ if m:
331
+ k = m.group(1).strip()
332
+ v = m.group(2).strip()
333
+ current_entry[k] = v
334
+
335
+ # Don't forget last entry
336
+ if current_entry:
337
+ decisions.append(current_entry)
338
+
339
+ return decisions
340
+
341
+
342
+ def _filter_by_decision_log(findings, prior_decisions, escalation_groups):
343
+ """
344
+ Filter findings based on decision log.
345
+
346
+ Rules:
347
+ 1. Filter out findings matching any "fixed" decision (by escalation group)
348
+ 2. Filter out findings matching "rejected" decisions UNLESS in escalation group
349
+
350
+ Args:
351
+ findings: list of finding dicts
352
+ prior_decisions: list from _parse_decision_log()
353
+ escalation_groups: list of [rejected_id, new_finding_id] pairs
354
+
355
+ Returns:
356
+ filtered list of findings
357
+ """
358
+ if not prior_decisions:
359
+ return findings
360
+
361
+ escalation_map = {}
362
+ for group in escalation_groups:
363
+ if len(group) >= 2:
364
+ prior_id = group[0]
365
+ new_finding_ids = group[1:]
366
+ if prior_id not in escalation_map:
367
+ escalation_map[prior_id] = set()
368
+ escalation_map[prior_id].update(new_finding_ids)
369
+
370
+ fixed_ids = set()
371
+ rejected_ids = set()
372
+
373
+ for dec in prior_decisions:
374
+ status = dec.get("status", "").lower()
375
+ fid = dec.get("id", "").strip()
376
+ if not fid:
377
+ continue
378
+
379
+ if status == "fixed":
380
+ fixed_ids.add(fid)
381
+ elif status == "rejected":
382
+ rejected_ids.add(fid)
383
+
384
+ filtered = []
385
+ for f in findings:
386
+ fid = f.get("id", "").strip()
387
+ if not fid:
388
+ continue
389
+
390
+ should_filter = False
391
+
392
+ if fid in fixed_ids:
393
+ should_filter = True
394
+
395
+ if not should_filter:
396
+ for fixed_id in fixed_ids:
397
+ if fixed_id in escalation_map and fid in escalation_map[fixed_id]:
398
+ should_filter = True
399
+ break
400
+
401
+ if not should_filter:
402
+ if fid in rejected_ids:
403
+ should_filter = True
404
+
405
+ for rejected_id in rejected_ids:
406
+ if rejected_id in escalation_map and fid in escalation_map[rejected_id]:
407
+ should_filter = False
408
+ break
409
+
410
+ if not should_filter:
411
+ filtered.append(f)
412
+
413
+ return filtered
414
+
415
+
416
+ def _parse_review_findings(md_text):
417
+ lines = md_text.splitlines()
418
+ items = []
419
+
420
+ cur = None
421
+ for raw in lines:
422
+ line = raw.rstrip("\n")
423
+ m_id = re.match(r"^\s*(?:-\s*)?id:\s*(.+)$", line)
424
+ if m_id:
425
+ if cur:
426
+ items.append(cur)
427
+ cur = {"id": m_id.group(1).strip()}
428
+ continue
429
+ if cur:
430
+ m = re.match(r"^\s*([a-zA-Z][a-zA-Z0-9]*):\s*(.*)$", line)
431
+ if not m:
432
+ continue
433
+ k = m.group(1).strip()
434
+ if k == "id":
435
+ continue
436
+ v = m.group(2)
437
+ cur[k] = v.strip()
438
+
439
+ if cur:
440
+ items.append(cur)
441
+
442
+ normalized = []
443
+ for it in items:
444
+ fid = (it.get("id") or "").strip()
445
+ if not fid:
446
+ continue
447
+ normalized.append(
448
+ {
449
+ "id": fid,
450
+ "priority": (it.get("priority") or "P3").strip(),
451
+ "category": (it.get("category") or "quality").strip(),
452
+ "file": (it.get("file") or "<unknown>").strip(),
453
+ "line": (it.get("line") or "null").strip(),
454
+ "title": (it.get("title") or "").strip(),
455
+ "description": (it.get("description") or "").strip(),
456
+ "suggestion": (it.get("suggestion") or "(no suggestion provided)").strip(),
457
+ }
458
+ )
459
+ return normalized
460
+
461
+
462
+ def _merge_duplicates(findings, duplicate_groups):
463
+ by_id = {f["id"]: dict(f) for f in findings}
464
+ merged_map = {}
465
+ seen = set()
466
+
467
+ for group in duplicate_groups:
468
+ ids = [i for i in group if i in by_id]
469
+ ids = list(dict.fromkeys(ids))
470
+ if len(ids) < 2:
471
+ continue
472
+
473
+ def sort_key(fid):
474
+ f = by_id[fid]
475
+ return (_priority_rank(f.get("priority")), fid)
476
+
477
+ canonical = sorted(ids, key=sort_key)[0]
478
+ merged = [i for i in ids if i != canonical]
479
+ if not merged:
480
+ continue
481
+
482
+ merged_map[canonical] = merged
483
+ for mid in merged:
484
+ seen.add(mid)
485
+
486
+ out = []
487
+ for fid, f in by_id.items():
488
+ if fid in seen:
489
+ continue
490
+
491
+ if fid in merged_map:
492
+ also = ", ".join(merged_map[fid])
493
+ desc = (f.get("description") or "")
494
+ suffix = f"Also reported as: {also}"
495
+ if desc:
496
+ desc = desc + "\n" + suffix
497
+ else:
498
+ desc = suffix
499
+ f = dict(f)
500
+ f["description"] = desc
501
+
502
+ out.append(f)
503
+
504
+ out.sort(key=lambda x: (_priority_rank(x.get("priority")), x.get("id") or ""))
505
+ return out, merged_map
506
+
507
+
508
+ def _counts(findings):
509
+ c = {"P0": 0, "P1": 0, "P2": 0, "P3": 0}
510
+ for f in findings:
511
+ p = (f.get("priority") or "").strip().upper()
512
+ if p in c:
513
+ c[p] += 1
514
+ return c
515
+
516
+
517
+ def _check_existing_comment(pr_number, run_id, round_num, comment_type):
518
+ """
519
+ Check if a comment with same runId/round/type already exists.
520
+ Returns True if duplicate exists (should skip posting).
521
+
522
+ comment_type: "review-summary" or "fix-report" or "final-report"
523
+ """
524
+ try:
525
+ result = subprocess.run(
526
+ ["gh", "api", f"repos/:owner/:repo/issues/{pr_number}/comments", "--paginate"],
527
+ stdout=subprocess.PIPE,
528
+ stderr=subprocess.DEVNULL,
529
+ text=True,
530
+ )
531
+ if result.returncode != 0:
532
+ return False
533
+
534
+ comments = json.loads(result.stdout or "[]")
535
+
536
+ if comment_type == "review-summary":
537
+ type_header = f"## Review Summary (Round {round_num})"
538
+ elif comment_type == "fix-report":
539
+ type_header = f"## Fix Report (Round {round_num})"
540
+ elif comment_type == "final-report":
541
+ type_header = "## Final Report"
542
+ else:
543
+ return False
544
+
545
+ run_id_pattern = f"RunId: {run_id}"
546
+
547
+ for comment in comments:
548
+ body = comment.get("body", "")
549
+ if MARKER in body and type_header in body and run_id_pattern in body:
550
+ return True
551
+
552
+ return False
553
+ except Exception:
554
+ return False
555
+
556
+
557
+ def _post_pr_comment(pr_number, body_ref, run_id=None, round_num=None, comment_type=None):
558
+ """
559
+ Post a PR comment with idempotency check.
560
+
561
+ If run_id, round_num, and comment_type are provided, checks for existing
562
+ duplicate before posting and skips if already posted.
563
+
564
+ Returns: True if posted successfully or skipped (idempotent), False on error
565
+ """
566
+ if isinstance(body_ref, Path):
567
+ p = body_ref
568
+ else:
569
+ p = _resolve_ref(REPO_ROOT, CACHE_DIR, body_ref)
570
+ if not p:
571
+ return False
572
+
573
+ if run_id and round_num and comment_type:
574
+ if _check_existing_comment(pr_number, run_id, round_num, comment_type):
575
+ return True
576
+
577
+ body_path = str(p)
578
+ rc = subprocess.run(
579
+ ["gh", "pr", "comment", str(pr_number), "--body-file", body_path],
580
+ stdout=subprocess.DEVNULL,
581
+ stderr=subprocess.DEVNULL,
582
+ ).returncode
583
+ return rc == 0
584
+
585
+
586
+ def _render_mode_a_comment(pr_number, round_num, run_id, counts, must_fix, merged_map, raw_reviews):
587
+ lines = []
588
+ lines.append(MARKER)
589
+ lines.append("")
590
+ lines.append(f"## Review Summary (Round {round_num})")
591
+ lines.append("")
592
+ lines.append(f"- PR: #{pr_number}")
593
+ lines.append(f"- RunId: {run_id}")
594
+ lines.append(f"- P0: {counts['P0']} P1: {counts['P1']} P2: {counts['P2']} P3: {counts['P3']}")
595
+ lines.append("")
596
+
597
+ if must_fix:
598
+ lines.append("## Must Fix (P0/P1)")
599
+ lines.append("")
600
+ for f in must_fix:
601
+ fid = f.get("id") or ""
602
+ title = f.get("title") or ""
603
+ pri = (f.get("priority") or "").strip()
604
+ file = f.get("file") or "<unknown>"
605
+ line = f.get("line") or "null"
606
+ sugg = f.get("suggestion") or ""
607
+ lines.append(f"- {fid} ({pri}) {title}")
608
+ lines.append(f" - {file}:{line}")
609
+ if fid in merged_map:
610
+ lines.append(f" - merged: {', '.join(merged_map[fid])}")
611
+ if sugg:
612
+ lines.append(f" - suggestion: {_sanitize_for_comment(sugg)}")
613
+ lines.append("")
614
+ else:
615
+ lines.append("## Result")
616
+ lines.append("")
617
+ lines.append("No P0/P1 issues found.")
618
+ lines.append("")
619
+
620
+ lines.append("<details>")
621
+ lines.append("<summary>Raw Reviews</summary>")
622
+ lines.append("")
623
+ for name, content in raw_reviews:
624
+ lines.append(f"### {name}")
625
+ lines.append("")
626
+ lines.append("```md")
627
+ lines.append(_sanitize_for_comment(content))
628
+ lines.append("```")
629
+ lines.append("")
630
+ lines.append("</details>")
631
+ lines.append("")
632
+ return "\n".join(lines)
633
+
634
+
635
+ def _render_mode_b_comment(pr_number, round_num, run_id, fix_report_md):
636
+ body = []
637
+ body.append(MARKER)
638
+ body.append("")
639
+ body.append(f"## Fix Report (Round {round_num})")
640
+ body.append("")
641
+ body.append(f"- PR: #{pr_number}")
642
+ body.append(f"- RunId: {run_id}")
643
+ body.append("")
644
+ body.append(_sanitize_for_comment(fix_report_md))
645
+ body.append("")
646
+ return "\n".join(body)
647
+
648
+
649
+ def _render_final_comment(pr_number, round_num, run_id, status):
650
+ lines = []
651
+ lines.append(MARKER)
652
+ lines.append("")
653
+ lines.append("## Final Report")
654
+ lines.append("")
655
+ lines.append(f"- PR: #{pr_number}")
656
+ lines.append(f"- Total Rounds: {round_num}")
657
+ lines.append(f"- RunId: {run_id}")
658
+ lines.append("")
659
+
660
+ if status == "RESOLVED":
661
+ lines.append("### Status: ✅ All issues resolved")
662
+ lines.append("")
663
+ lines.append("All P0/P1 issues from the automated review have been addressed.")
664
+ lines.append("The PR is ready for human review and merge.")
665
+ else:
666
+ lines.append("### Status: ⚠️ Max rounds reached")
667
+ lines.append("")
668
+ lines.append("The automated review loop has completed the maximum number of rounds (3).")
669
+ lines.append("Some issues may still remain. Please review the PR comments above for details.")
670
+
671
+ lines.append("")
672
+ return "\n".join(lines)
673
+
674
+
675
+ def main(argv):
676
+ class _ArgParser(argparse.ArgumentParser):
677
+ def error(self, message):
678
+ raise ValueError(message)
679
+
680
+ parser = _ArgParser(add_help=False)
681
+ parser.add_argument("--pr", type=int, required=True)
682
+ parser.add_argument("--round", type=int, default=1)
683
+ parser.add_argument("--run-id", required=True)
684
+ parser.add_argument("--context-file")
685
+ parser.add_argument("--review-file", action="append", default=[])
686
+ parser.add_argument("--fix-report-file")
687
+ parser.add_argument("--final-report")
688
+ parser.add_argument("--duplicate-groups-json")
689
+ parser.add_argument("--duplicate-groups-b64")
690
+ parser.add_argument("--decision-log-file")
691
+ parser.add_argument("--escalation-groups-b64")
692
+
693
+ try:
694
+ args = parser.parse_args(argv)
695
+ except ValueError:
696
+ _json_out({"error": "INVALID_ARGS"})
697
+ return 2
698
+
699
+ pr_number = args.pr
700
+ round_num = args.round
701
+ run_id = str(args.run_id)
702
+
703
+ final_report = (args.final_report or "").strip() or None
704
+ fix_report_file = (args.fix_report_file or "").strip() or None
705
+ context_file = (args.context_file or "").strip() or None
706
+ review_files = []
707
+ for rf in args.review_file or []:
708
+ s = (rf or "").strip()
709
+ if s:
710
+ review_files.append(s)
711
+
712
+ if final_report:
713
+ body = _render_final_comment(pr_number, round_num, run_id, final_report)
714
+ body_basename = f"review-aggregate-final-pr{pr_number}-{run_id}.md"
715
+ body_ref = _repo_relpath(REPO_ROOT, CACHE_DIR / body_basename)
716
+ _write_cache_text(body_ref, body)
717
+ if not _post_pr_comment(pr_number, body_ref, run_id=run_id, round_num=round_num, comment_type="final-report"):
718
+ _json_out({"error": "GH_PR_COMMENT_FAILED"})
719
+ return 1
720
+ _json_out({"ok": True, "final": True})
721
+ return 0
722
+
723
+ if fix_report_file:
724
+ fix_p = _resolve_ref(REPO_ROOT, CACHE_DIR, fix_report_file)
725
+ if not fix_p or not fix_p.exists():
726
+ _json_out({"error": "FIX_REPORT_FILE_NOT_FOUND"})
727
+ return 1
728
+ fix_md = _read_cache_text(fix_report_file)
729
+ body = _render_mode_b_comment(pr_number, round_num, run_id, fix_md)
730
+ body_basename = f"review-aggregate-fix-comment-pr{pr_number}-r{round_num}-{run_id}.md"
731
+ body_ref = _repo_relpath(REPO_ROOT, CACHE_DIR / body_basename)
732
+ _write_cache_text(body_ref, body)
733
+ if not _post_pr_comment(pr_number, body_ref, run_id=run_id, round_num=round_num, comment_type="fix-report"):
734
+ _json_out({"error": "GH_PR_COMMENT_FAILED"})
735
+ return 1
736
+ _json_out({"ok": True})
737
+ return 0
738
+
739
+ if not context_file:
740
+ _json_out({"error": "MISSING_CONTEXT_FILE"})
741
+ return 1
742
+ if not review_files:
743
+ _json_out({"error": "MISSING_REVIEW_FILES"})
744
+ return 1
745
+
746
+ ctx_p = _resolve_ref(REPO_ROOT, CACHE_DIR, context_file)
747
+ if not ctx_p or not ctx_p.exists():
748
+ _json_out({"error": "CONTEXT_FILE_NOT_FOUND"})
749
+ return 1
750
+
751
+ valid_review_files = []
752
+ for rf in review_files:
753
+ p = _resolve_ref(REPO_ROOT, CACHE_DIR, rf)
754
+ if p and p.exists():
755
+ valid_review_files.append(rf)
756
+ review_files = valid_review_files
757
+ if not review_files:
758
+ _json_out({"error": "REVIEW_FILES_NOT_FOUND"})
759
+ return 1
760
+
761
+ raw_reviews = []
762
+ all_findings = []
763
+ for rf in review_files:
764
+ md = _read_cache_text(rf)
765
+ raw_reviews.append((rf, md))
766
+ all_findings.extend(_parse_review_findings(md))
767
+
768
+ duplicate_groups = _parse_duplicate_groups_json(args.duplicate_groups_json or "")
769
+ if not duplicate_groups:
770
+ duplicate_groups = _parse_duplicate_groups_b64(args.duplicate_groups_b64 or "")
771
+ merged_findings, merged_map = _merge_duplicates(all_findings, duplicate_groups)
772
+
773
+ decision_log_file = (args.decision_log_file or "").strip() or None
774
+ prior_decisions = []
775
+ if decision_log_file:
776
+ try:
777
+ decision_log_md = _read_cache_text(decision_log_file)
778
+ prior_decisions = _parse_decision_log(decision_log_md)
779
+ except Exception:
780
+ pass
781
+
782
+ escalation_groups = _parse_escalation_groups_b64(args.escalation_groups_b64 or "")
783
+
784
+ if prior_decisions:
785
+ merged_findings = _filter_by_decision_log(merged_findings, prior_decisions, escalation_groups)
786
+
787
+ counts = _counts(merged_findings)
788
+
789
+ must_fix = [f for f in merged_findings if _priority_rank(f.get("priority")) <= 1]
790
+ optional = [f for f in merged_findings if _priority_rank(f.get("priority")) >= 2]
791
+ stop = len(must_fix) == 0
792
+
793
+ body = _render_mode_a_comment(pr_number, round_num, run_id, counts, must_fix, merged_map, raw_reviews)
794
+ body_basename = f"review-aggregate-comment-pr{pr_number}-r{round_num}-{run_id}.md"
795
+ body_ref = _repo_relpath(REPO_ROOT, CACHE_DIR / body_basename)
796
+ _write_cache_text(body_ref, body)
797
+ if not _post_pr_comment(pr_number, body_ref, run_id=run_id, round_num=round_num, comment_type="review-summary"):
798
+ _json_out({"error": "GH_PR_COMMENT_FAILED"})
799
+ return 1
800
+
801
+ if stop:
802
+ _json_out({"stop": True})
803
+ return 0
804
+
805
+ fix_file = f"fix-pr{pr_number}-r{round_num}-{run_id}.md"
806
+ lines = []
807
+ lines.append("# Fix File")
808
+ lines.append("")
809
+ lines.append(f"PR: {pr_number}")
810
+ lines.append(f"Round: {round_num}")
811
+ lines.append("")
812
+ lines.append("## IssuesToFix")
813
+ lines.append("")
814
+ for f in must_fix:
815
+ fid = f.get("id") or ""
816
+ pri = (f.get("priority") or "P3").strip()
817
+ cat = (f.get("category") or "quality").strip()
818
+ file = (f.get("file") or "<unknown>").strip()
819
+ line = (f.get("line") or "null").strip()
820
+ title = (f.get("title") or "").strip()
821
+ desc = (f.get("description") or "").replace("\n", "\\n").strip()
822
+ sugg = (f.get("suggestion") or "(no suggestion provided)").replace("\n", "\\n").strip()
823
+
824
+ lines.append(f"- id: {fid}")
825
+ lines.append(f" priority: {pri}")
826
+ lines.append(f" category: {cat}")
827
+ lines.append(f" file: {file}")
828
+ lines.append(f" line: {line}")
829
+ lines.append(f" title: {title}")
830
+ lines.append(f" description: {desc}")
831
+ lines.append(f" suggestion: {sugg}")
832
+
833
+ lines.append("")
834
+ lines.append("## OptionalIssues")
835
+ lines.append("")
836
+ for f in optional:
837
+ fid = f.get("id") or ""
838
+ pri = (f.get("priority") or "P3").strip()
839
+ cat = (f.get("category") or "quality").strip()
840
+ file = (f.get("file") or "<unknown>").strip()
841
+ line = (f.get("line") or "null").strip()
842
+ title = (f.get("title") or "").strip()
843
+ desc = (f.get("description") or "").replace("\n", "\\n").strip()
844
+ sugg = (f.get("suggestion") or "(no suggestion provided)").replace("\n", "\\n").strip()
845
+
846
+ lines.append(f"- id: {fid}")
847
+ lines.append(f" priority: {pri}")
848
+ lines.append(f" category: {cat}")
849
+ lines.append(f" file: {file}")
850
+ lines.append(f" line: {line}")
851
+ lines.append(f" title: {title}")
852
+ lines.append(f" description: {desc}")
853
+ lines.append(f" suggestion: {sugg}")
854
+
855
+ fix_ref = _repo_relpath(REPO_ROOT, CACHE_DIR / fix_file)
856
+ _write_cache_text(fix_ref, "\n".join(lines) + "\n")
857
+ _json_out({"stop": False, "fixFile": fix_ref})
858
+ return 0
859
+
860
+
861
+ if __name__ == "__main__":
862
+ try:
863
+ raise SystemExit(main(sys.argv[1:]))
864
+ except Exception:
865
+ _json_out({"error": "AGGREGATE_SCRIPT_FAILED"})
866
+ raise SystemExit(1)