@ranger1/dx 0.1.91 → 0.1.92

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +2 -2
  2. package/lib/cli/help.js +1 -1
  3. package/lib/codex-initial.js +19 -215
  4. package/package.json +2 -2
  5. package/skills/backend-layering-audit-fixer/SKILL.md +180 -0
  6. package/{codex/skills → skills}/doctor/SKILL.md +2 -9
  7. package/{codex/skills → skills}/doctor/scripts/doctor.sh +2 -253
  8. package/skills/git-pr-ship/SKILL.md +481 -0
  9. package/skills/naming-audit-fixer/SKILL.md +149 -0
  10. package/skills/naming-audit-fixer/references/fix-guide.md +93 -0
  11. package/skills/naming-audit-fixer/scripts/audit_naming.py +534 -0
  12. package/codex/agents/fixer.toml +0 -37
  13. package/codex/agents/orchestrator.toml +0 -11
  14. package/codex/agents/reviewer.toml +0 -52
  15. package/codex/agents/spark.toml +0 -18
  16. package/codex/skills/pr-review-loop/SKILL.md +0 -209
  17. package/codex/skills/pr-review-loop/agents/openai.yaml +0 -4
  18. package/codex/skills/pr-review-loop/references/agents/pr-context.md +0 -73
  19. package/codex/skills/pr-review-loop/references/agents/pr-precheck.md +0 -161
  20. package/codex/skills/pr-review-loop/references/agents/pr-review-aggregate.md +0 -188
  21. package/codex/skills/pr-review-loop/references/skill-layout.md +0 -25
  22. package/codex/skills/pr-review-loop/scripts/gh_review_harvest.py +0 -292
  23. package/codex/skills/pr-review-loop/scripts/pr_context.py +0 -351
  24. package/codex/skills/pr-review-loop/scripts/pr_review_aggregate.py +0 -951
  25. package/codex/skills/pr-review-loop/scripts/test_pr_review_aggregate.py +0 -876
  26. package/codex/skills/pr-review-loop/scripts/test_validate_reviewer_prompts.py +0 -92
  27. package/codex/skills/pr-review-loop/scripts/validate_reviewer_prompts.py +0 -87
  28. /package/{codex/skills → skills}/doctor/agents/openai.yaml +0 -0
  29. /package/{codex/skills → skills}/e2e-audit-fixer/SKILL.md +0 -0
  30. /package/{codex/skills → skills}/e2e-audit-fixer/agents/openai.yaml +0 -0
  31. /package/{codex/skills → skills}/e2e-audit-fixer/scripts/e2e_e2e_audit.py +0 -0
  32. /package/{codex/skills → skills}/env-accessor-audit-fixer/SKILL.md +0 -0
  33. /package/{codex/skills → skills}/env-accessor-audit-fixer/agents/openai.yaml +0 -0
  34. /package/{codex/skills → skills}/env-accessor-audit-fixer/references/bootstrap-env-foundation.md +0 -0
  35. /package/{codex/skills → skills}/env-accessor-audit-fixer/scripts/env_accessor_audit.py +0 -0
  36. /package/{codex/skills → skills}/error-handling-audit-fixer/SKILL.md +0 -0
  37. /package/{codex/skills → skills}/error-handling-audit-fixer/agents/openai.yaml +0 -0
  38. /package/{codex/skills → skills}/error-handling-audit-fixer/references/error-handling-standard.md +0 -0
  39. /package/{codex/skills → skills}/error-handling-audit-fixer/references/foundation-bootstrap.md +0 -0
  40. /package/{codex/skills → skills}/error-handling-audit-fixer/scripts/error_handling_audit.py +0 -0
  41. /package/{codex/skills → skills}/gh-dependabot-cleanup/SKILL.md +0 -0
  42. /package/{codex/skills → skills}/gh-dependabot-cleanup/agents/openai.yaml +0 -0
  43. /package/{codex/skills → skills}/git-commit-and-pr/SKILL.md +0 -0
  44. /package/{codex/skills → skills}/git-commit-and-pr/agents/openai.yaml +0 -0
  45. /package/{codex/skills → skills}/git-release/SKILL.md +0 -0
  46. /package/{codex/skills → skills}/git-release/agents/openai.yaml +0 -0
  47. /package/{codex/skills → skills}/online-debug-guard/SKILL.md +0 -0
  48. /package/{codex/skills → skills}/online-debug-guard/agents/openai.yaml +0 -0
  49. /package/{codex/skills → skills}/pagination-dto-audit-fixer/SKILL.md +0 -0
  50. /package/{codex/skills → skills}/pagination-dto-audit-fixer/agents/openai.yaml +0 -0
  51. /package/{codex/skills → skills}/pagination-dto-audit-fixer/references/pagination-standard.md +0 -0
  52. /package/{codex/skills → skills}/pagination-dto-audit-fixer/scripts/pagination_dto_audit.py +0 -0
@@ -1,951 +0,0 @@
1
- #!/usr/bin/env python3
2
- # Deterministic PR review aggregation (script owns all rules).
3
- #
4
- # Workflow:
5
- # - Mode A: read contextFile + reviewFile(s) from project cache: ./.cache/, consume an LLM-produced aggregate result,
6
- # post a single PR comment, and generate a fixFile for fixer.
7
- # - Mode B: read fixReportFile from cache and post it as a PR comment.
8
- #
9
- # Input rules:
10
- # - Callers should pass repo-relative paths (e.g. ./.cache/foo.md). For backward-compat, basenames are also accepted.
11
- # - Aggregate result comes from LLM and is passed as an argument (NOT written to disk).
12
- # - Prefer: --aggregate-result-b64 <base64(json)>
13
- # - Also supported: --aggregate-result-json '<json>'
14
- # - Missing/invalid aggregate result => fail closed.
15
- # - Duplicate groups / escalation groups may still be passed for backward-compatible tooling,
16
- # but they are no longer used by this script to decide findings or stop.
17
- #
18
- # Output rules:
19
- # - Stdout must print exactly ONE JSON object and nothing else.
20
- # - Mode A: {"stop":true} OR {"stop":false,"fixFile":"..."}
21
- # - Mode B: {"ok":true}
22
- #
23
- # PR comment rules:
24
- # - Every comment must include marker: <!-- pr-review-loop-marker -->
25
- # - Comment body must NOT contain local filesystem paths (this script scrubs cache paths, $HOME, and repo absolute paths).
26
- #
27
- # fixFile rules:
28
- # - fixFile includes all findings, split into:
29
- # - IssuesToFix: P0/P1 (must fix)
30
- # - OptionalIssues: P2/P3 (fixer may decide)
31
- # - Each merged duplicate group keeps ONE canonical id; merged IDs are appended into canonical description.
32
- # - Do NOT rewrite id prefixes (e.g. SEC-/LOG-/STY-/GHR-); preserve reviewer-provided finding IDs.
33
-
34
- import argparse
35
- import base64
36
- import json
37
- import os
38
- import re
39
- import subprocess
40
- import sys
41
- from pathlib import Path
42
-
43
-
44
- MARKER = "<!-- pr-review-loop-marker -->"
45
-
46
-
47
- def _repo_root():
48
- try:
49
- p = subprocess.run(
50
- ["git", "rev-parse", "--show-toplevel"],
51
- stdout=subprocess.PIPE,
52
- stderr=subprocess.DEVNULL,
53
- text=True,
54
- )
55
- out = (p.stdout or "").strip()
56
- if p.returncode == 0 and out:
57
- return Path(out)
58
- except Exception:
59
- pass
60
- return Path.cwd()
61
-
62
-
63
- def _cache_dir(repo_root):
64
- return (repo_root / ".cache").resolve()
65
-
66
-
67
- def _is_safe_relpath(p):
68
- if p.is_absolute():
69
- return False
70
- if any(part in ("..",) for part in p.parts):
71
- return False
72
- return True
73
-
74
-
75
- def _resolve_ref(repo_root, cache_dir, ref):
76
- if not ref:
77
- return None
78
- s = str(ref).strip()
79
- if not s:
80
- return None
81
-
82
- # If caller already passes a repo-relative path like ./.cache/foo.md
83
- looks_like_path = ("/" in s) or ("\\" in s) or s.startswith(".")
84
- if looks_like_path:
85
- p = Path(s)
86
- if p.is_absolute():
87
- # Only allow absolute paths under cache_dir.
88
- try:
89
- p2 = p.resolve()
90
- p2.relative_to(cache_dir.resolve())
91
- return p2
92
- except Exception:
93
- return None
94
- if not _is_safe_relpath(p):
95
- return None
96
- return (repo_root / p).resolve()
97
-
98
- # Backward-compat: accept basename-only.
99
- b = _safe_basename(s)
100
- if not b:
101
- return None
102
- return (cache_dir / b).resolve()
103
-
104
-
105
- def _repo_relpath(repo_root, p):
106
- try:
107
- rel = p.resolve().relative_to(repo_root.resolve())
108
- return "./" + rel.as_posix()
109
- except Exception:
110
- return os.path.basename(str(p))
111
-
112
-
113
- REPO_ROOT = _repo_root()
114
- CACHE_DIR = _cache_dir(REPO_ROOT)
115
-
116
-
117
- def _json_out(obj):
118
- sys.stdout.write(json.dumps(obj, ensure_ascii=True))
119
- sys.stdout.write("\n")
120
-
121
-
122
- def _safe_basename(name):
123
- if not name:
124
- return None
125
- base = os.path.basename(name.strip())
126
- if base != name.strip():
127
- return None
128
- if base in (".", ".."):
129
- return None
130
- return base
131
-
132
-
133
- def _read_cache_text(ref):
134
- p = _resolve_ref(REPO_ROOT, CACHE_DIR, ref)
135
- if not p:
136
- raise FileNotFoundError("INVALID_CACHE_REF")
137
- return p.read_text(encoding="utf-8", errors="replace")
138
-
139
-
140
- def _write_cache_text(ref, content):
141
- p = _resolve_ref(REPO_ROOT, CACHE_DIR, ref)
142
- if not p:
143
- raise ValueError("INVALID_CACHE_REF")
144
- CACHE_DIR.mkdir(parents=True, exist_ok=True)
145
- p.parent.mkdir(parents=True, exist_ok=True)
146
- p.write_text(content, encoding="utf-8", newline="\n")
147
-
148
-
149
- def _priority_rank(p):
150
- if not isinstance(p, str):
151
- return 99
152
- p = p.strip().upper()
153
- if p == "P0":
154
- return 0
155
- if p == "P1":
156
- return 1
157
- if p == "P2":
158
- return 2
159
- if p == "P3":
160
- return 3
161
- return 99
162
-
163
-
164
- def _sanitize_for_comment(text):
165
- if not isinstance(text, str):
166
- text = str(text)
167
-
168
- home = str(Path.home())
169
- cache_abs = str(CACHE_DIR.resolve())
170
- repo_abs = str(REPO_ROOT.resolve())
171
-
172
- # Scrub local cache absolute path.
173
- text = text.replace(cache_abs + "/", "[cache]/")
174
-
175
- # Avoid leaking absolute local repo paths.
176
- if repo_abs:
177
- text = text.replace(repo_abs + "/", "")
178
-
179
- return text
180
-
181
-
182
- def _parse_duplicate_groups_json(s):
183
- if not s:
184
- return []
185
- try:
186
- data = json.loads(s)
187
- except Exception:
188
- return []
189
-
190
- groups = []
191
- if isinstance(data, dict) and isinstance(data.get("duplicateGroups"), list):
192
- groups = data.get("duplicateGroups")
193
- elif isinstance(data, list):
194
- groups = data
195
- else:
196
- return []
197
-
198
- out = []
199
- for g in (groups or []):
200
- if not isinstance(g, list):
201
- continue
202
- ids = []
203
- for it in g:
204
- if isinstance(it, str) and it.strip():
205
- ids.append(it.strip())
206
- ids = list(dict.fromkeys(ids))
207
- if len(ids) >= 2:
208
- out.append(ids)
209
- return out
210
-
211
-
212
- def _parse_duplicate_groups_b64(s):
213
- if not s:
214
- return []
215
- try:
216
- raw = base64.b64decode(s.encode("ascii"), validate=True)
217
- return _parse_duplicate_groups_json(raw.decode("utf-8", errors="replace"))
218
- except Exception:
219
- return []
220
-
221
-
222
- def _parse_escalation_groups_json(s):
223
- """Parse escalation groups JSON (same format as duplicate groups)."""
224
- if not s:
225
- return []
226
- try:
227
- data = json.loads(s)
228
- except Exception:
229
- return []
230
-
231
- groups = []
232
- if isinstance(data, dict) and isinstance(data.get("escalationGroups"), list):
233
- groups = data.get("escalationGroups")
234
- elif isinstance(data, list):
235
- groups = data
236
- else:
237
- return []
238
-
239
- out = []
240
- for g in (groups or []):
241
- if not isinstance(g, list):
242
- continue
243
- ids = []
244
- for it in g:
245
- if isinstance(it, str) and it.strip():
246
- ids.append(it.strip())
247
- ids = list(dict.fromkeys(ids))
248
- if len(ids) >= 2:
249
- out.append(ids)
250
- return out
251
-
252
-
253
- def _parse_escalation_groups_b64(s):
254
- """Decode base64 escalation groups JSON."""
255
- if not s:
256
- return []
257
- try:
258
- raw = base64.b64decode(s.encode("ascii"), validate=True)
259
- return _parse_escalation_groups_json(raw.decode("utf-8", errors="replace"))
260
- except Exception:
261
- return []
262
-
263
-
264
- def _parse_decision_log(md_text):
265
- """
266
- Parse decision log markdown and extract fixed/rejected decisions.
267
-
268
- Format:
269
- # Decision Log
270
- PR: 123
271
- ## Round 1
272
- ### Fixed
273
- - id: SEC-001
274
- commit: abc123
275
- essence: JSON.parse error handling
276
- ### Rejected
277
- - id: STY-004
278
- priority: P2
279
- reason: needs product decision
280
- essence: component split suggestion
281
-
282
- Returns: [
283
- {"id": "SEC-001", "status": "fixed", "essence": "...", "commit": "..."},
284
- {"id": "STY-004", "status": "rejected", "essence": "...", "reason": "...", "priority": "P2"}
285
- ]
286
- """
287
- if not md_text:
288
- return []
289
-
290
- lines = md_text.splitlines()
291
- decisions = []
292
-
293
- current_status = None # "fixed" or "rejected"
294
- current_entry = None
295
-
296
- for raw in lines:
297
- line = raw.rstrip("\n")
298
-
299
- # Detect status section headers
300
- if line.strip().lower() == "### fixed":
301
- current_status = "fixed"
302
- if current_entry:
303
- decisions.append(current_entry)
304
- current_entry = None
305
- continue
306
-
307
- if line.strip().lower() == "### rejected":
308
- current_status = "rejected"
309
- if current_entry:
310
- decisions.append(current_entry)
311
- current_entry = None
312
- continue
313
-
314
- # Reset on new round headers
315
- if line.startswith("## Round "):
316
- if current_entry:
317
- decisions.append(current_entry)
318
- current_entry = None
319
- continue
320
-
321
- # Start new entry
322
- if line.startswith("- id:") and current_status:
323
- if current_entry:
324
- decisions.append(current_entry)
325
- fid = line.split(":", 1)[1].strip()
326
- current_entry = {"id": fid, "status": current_status}
327
- continue
328
-
329
- # Parse entry fields
330
- if current_entry and line.startswith(" "):
331
- m = re.match(r"^\s{2}([a-zA-Z][a-zA-Z0-9]*):\s*(.*)$", line)
332
- if m:
333
- k = m.group(1).strip()
334
- v = m.group(2).strip()
335
- current_entry[k] = v
336
-
337
- # Don't forget last entry
338
- if current_entry:
339
- decisions.append(current_entry)
340
-
341
- return decisions
342
-
343
-
344
- def _filter_by_decision_log(findings, prior_decisions, escalation_groups):
345
- """
346
- Filter findings based on decision log.
347
-
348
- Rules:
349
- 1. Filter out findings matching any "fixed" decision (by escalation group)
350
- 2. Filter out findings matching "rejected" decisions UNLESS in escalation group
351
-
352
- Args:
353
- findings: list of finding dicts
354
- prior_decisions: list from _parse_decision_log()
355
- escalation_groups: list of [rejected_id, new_finding_id] pairs
356
-
357
- Returns:
358
- filtered list of findings
359
- """
360
- if not prior_decisions:
361
- return findings
362
-
363
- escalation_map = {}
364
- for group in escalation_groups:
365
- if len(group) >= 2:
366
- prior_id = group[0]
367
- new_finding_ids = group[1:]
368
- if prior_id not in escalation_map:
369
- escalation_map[prior_id] = set()
370
- escalation_map[prior_id].update(new_finding_ids)
371
-
372
- fixed_ids = set()
373
- rejected_ids = set()
374
-
375
- for dec in prior_decisions:
376
- status = dec.get("status", "").lower()
377
- fid = dec.get("id", "").strip()
378
- if not fid:
379
- continue
380
-
381
- if status == "fixed":
382
- fixed_ids.add(fid)
383
- elif status == "rejected":
384
- rejected_ids.add(fid)
385
-
386
- filtered = []
387
- for f in findings:
388
- fid = f.get("id", "").strip()
389
- if not fid:
390
- continue
391
-
392
- should_filter = False
393
-
394
- if fid in fixed_ids:
395
- should_filter = True
396
-
397
- if not should_filter:
398
- for fixed_id in fixed_ids:
399
- if fixed_id in escalation_map and fid in escalation_map[fixed_id]:
400
- should_filter = True
401
- break
402
-
403
- if not should_filter:
404
- if fid in rejected_ids:
405
- should_filter = True
406
-
407
- for rejected_id in rejected_ids:
408
- if rejected_id in escalation_map and fid in escalation_map[rejected_id]:
409
- should_filter = False
410
- break
411
-
412
- if not should_filter:
413
- filtered.append(f)
414
-
415
- return filtered
416
-
417
-
418
- def _parse_review_findings(md_text):
419
- lines = md_text.splitlines()
420
- items = []
421
-
422
- cur = None
423
- for raw in lines:
424
- line = raw.rstrip("\n")
425
- m_id = re.match(r"^\s*(?:-\s*)?id:\s*(.+)$", line)
426
- if m_id:
427
- if cur:
428
- items.append(cur)
429
- cur = {"id": m_id.group(1).strip()}
430
- continue
431
- if cur:
432
- m = re.match(r"^\s*([a-zA-Z][a-zA-Z0-9]*):\s*(.*)$", line)
433
- if not m:
434
- continue
435
- k = m.group(1).strip()
436
- if k == "id":
437
- continue
438
- v = m.group(2)
439
- cur[k] = v.strip()
440
-
441
- if cur:
442
- items.append(cur)
443
-
444
- normalized = []
445
- for it in items:
446
- fid = (it.get("id") or "").strip()
447
- if not fid:
448
- continue
449
- normalized.append(
450
- {
451
- "id": fid,
452
- "priority": (it.get("priority") or "P3").strip(),
453
- "category": (it.get("category") or "quality").strip(),
454
- "file": (it.get("file") or "<unknown>").strip(),
455
- "line": (it.get("line") or "null").strip(),
456
- "title": (it.get("title") or "").strip(),
457
- "description": (it.get("description") or "").strip(),
458
- "suggestion": (it.get("suggestion") or "(no suggestion provided)").strip(),
459
- }
460
- )
461
- return normalized
462
-
463
-
464
- def _normalize_aggregate_finding(it):
465
- if not isinstance(it, dict):
466
- return None
467
-
468
- required_fields = [
469
- "id",
470
- "priority",
471
- "category",
472
- "file",
473
- "line",
474
- "title",
475
- "description",
476
- "suggestion",
477
- ]
478
- for field in required_fields:
479
- val = it.get(field)
480
- if not isinstance(val, str) or not val.strip():
481
- return None
482
-
483
- priority = it.get("priority", "").strip().upper()
484
- if priority not in {"P0", "P1", "P2", "P3"}:
485
- return None
486
-
487
- return {
488
- "id": it["id"].strip(),
489
- "priority": priority,
490
- "category": it["category"].strip(),
491
- "file": it["file"].strip(),
492
- "line": it["line"].strip(),
493
- "title": it["title"].strip(),
494
- "description": it["description"].strip(),
495
- "suggestion": it["suggestion"].strip(),
496
- }
497
-
498
-
499
- def _normalize_aggregate_findings(items):
500
- if not isinstance(items, list):
501
- return None
502
-
503
- out = []
504
- for it in items:
505
- normalized = _normalize_aggregate_finding(it)
506
- if not normalized:
507
- return None
508
- out.append(normalized)
509
- return out
510
-
511
-
512
- def _parse_aggregate_result_json(s):
513
- if not s:
514
- return None
515
- try:
516
- data = json.loads(s)
517
- except Exception:
518
- return None
519
-
520
- if not isinstance(data, dict):
521
- return None
522
-
523
- stop = data.get("stop")
524
- if not isinstance(stop, bool):
525
- return None
526
-
527
- must_fix = _normalize_aggregate_findings(data.get("mustFixFindings"))
528
- optional = _normalize_aggregate_findings(data.get("optionalFindings"))
529
- if must_fix is None or optional is None:
530
- return None
531
-
532
- if any(_priority_rank(f.get("priority")) > 1 for f in must_fix):
533
- return None
534
- if any(_priority_rank(f.get("priority")) < 2 for f in optional):
535
- return None
536
-
537
- if stop and must_fix:
538
- return None
539
- if (not stop) and (not must_fix):
540
- return None
541
-
542
- return {
543
- "stop": stop,
544
- "mustFixFindings": must_fix,
545
- "optionalFindings": optional,
546
- }
547
-
548
-
549
- def _parse_aggregate_result_b64(s):
550
- if not s:
551
- return None
552
- try:
553
- raw = base64.b64decode(s.encode("ascii"), validate=True)
554
- return _parse_aggregate_result_json(raw.decode("utf-8", errors="replace"))
555
- except Exception:
556
- return None
557
-
558
-
559
- def _merge_duplicates(findings, duplicate_groups):
560
- by_id = {f["id"]: dict(f) for f in findings}
561
- merged_map = {}
562
- seen = set()
563
-
564
- for group in duplicate_groups:
565
- ids = [i for i in group if i in by_id]
566
- ids = list(dict.fromkeys(ids))
567
- if len(ids) < 2:
568
- continue
569
-
570
- def sort_key(fid):
571
- f = by_id[fid]
572
- return (_priority_rank(f.get("priority")), fid)
573
-
574
- canonical = sorted(ids, key=sort_key)[0]
575
- merged = [i for i in ids if i != canonical]
576
- if not merged:
577
- continue
578
-
579
- merged_map[canonical] = merged
580
- for mid in merged:
581
- seen.add(mid)
582
-
583
- out = []
584
- for fid, f in by_id.items():
585
- if fid in seen:
586
- continue
587
-
588
- if fid in merged_map:
589
- also = ", ".join(merged_map[fid])
590
- desc = (f.get("description") or "")
591
- suffix = f"Also reported as: {also}"
592
- if desc:
593
- desc = desc + "\n" + suffix
594
- else:
595
- desc = suffix
596
- f = dict(f)
597
- f["description"] = desc
598
-
599
- out.append(f)
600
-
601
- out.sort(key=lambda x: (_priority_rank(x.get("priority")), x.get("id") or ""))
602
- return out, merged_map
603
-
604
-
605
- def _counts(findings):
606
- c = {"P0": 0, "P1": 0, "P2": 0, "P3": 0}
607
- for f in findings:
608
- p = (f.get("priority") or "").strip().upper()
609
- if p in c:
610
- c[p] += 1
611
- return c
612
-
613
-
614
- def _check_existing_comment(pr_number, run_id, round_num, comment_type):
615
- """
616
- Check if a comment with same runId/round/type already exists.
617
- Returns True if duplicate exists (should skip posting).
618
-
619
- comment_type: "review-summary" or "fix-report" or "final-report"
620
- """
621
- try:
622
- result = subprocess.run(
623
- ["gh", "api", f"repos/:owner/:repo/issues/{pr_number}/comments", "--paginate"],
624
- stdout=subprocess.PIPE,
625
- stderr=subprocess.DEVNULL,
626
- text=True,
627
- )
628
- if result.returncode != 0:
629
- return False
630
-
631
- comments = json.loads(result.stdout or "[]")
632
-
633
- if comment_type == "review-summary":
634
- type_header = f"## Review Summary (Round {round_num})"
635
- elif comment_type == "fix-report":
636
- type_header = f"## Fix Report (Round {round_num})"
637
- elif comment_type == "final-report":
638
- type_header = "## Final Report"
639
- else:
640
- return False
641
-
642
- run_id_pattern = f"RunId: {run_id}"
643
-
644
- for comment in comments:
645
- body = comment.get("body", "")
646
- if MARKER in body and type_header in body and run_id_pattern in body:
647
- return True
648
-
649
- return False
650
- except Exception:
651
- return False
652
-
653
-
654
- def _post_pr_comment(pr_number, body_ref, run_id=None, round_num=None, comment_type=None):
655
- """
656
- Post a PR comment with idempotency check.
657
-
658
- If run_id, round_num, and comment_type are provided, checks for existing
659
- duplicate before posting and skips if already posted.
660
-
661
- Returns: True if posted successfully or skipped (idempotent), False on error
662
- """
663
- if isinstance(body_ref, Path):
664
- p = body_ref
665
- else:
666
- p = _resolve_ref(REPO_ROOT, CACHE_DIR, body_ref)
667
- if not p:
668
- return False
669
-
670
- if run_id and round_num and comment_type:
671
- if _check_existing_comment(pr_number, run_id, round_num, comment_type):
672
- return True
673
-
674
- body_path = str(p)
675
- rc = subprocess.run(
676
- ["gh", "pr", "comment", str(pr_number), "--body-file", body_path],
677
- stdout=subprocess.DEVNULL,
678
- stderr=subprocess.DEVNULL,
679
- ).returncode
680
- return rc == 0
681
-
682
-
683
- def _render_mode_a_comment(pr_number, round_num, run_id, counts, must_fix, merged_map, raw_reviews):
684
- lines = []
685
- lines.append(MARKER)
686
- lines.append("")
687
- lines.append(f"## Review Summary (Round {round_num})")
688
- lines.append("")
689
- lines.append(f"- PR: #{pr_number}")
690
- lines.append(f"- RunId: {run_id}")
691
- lines.append(f"- P0: {counts['P0']} P1: {counts['P1']} P2: {counts['P2']} P3: {counts['P3']}")
692
- lines.append("")
693
-
694
- if must_fix:
695
- lines.append("## Must Fix (P0/P1)")
696
- lines.append("")
697
- for f in must_fix:
698
- fid = f.get("id") or ""
699
- title = f.get("title") or ""
700
- pri = (f.get("priority") or "").strip()
701
- file = f.get("file") or "<unknown>"
702
- line = f.get("line") or "null"
703
- sugg = f.get("suggestion") or ""
704
- lines.append(f"- {fid} ({pri}) {title}")
705
- lines.append(f" - {file}:{line}")
706
- if fid in merged_map:
707
- lines.append(f" - merged: {', '.join(merged_map[fid])}")
708
- if sugg:
709
- lines.append(f" - suggestion: {_sanitize_for_comment(sugg)}")
710
- lines.append("")
711
- else:
712
- lines.append("## Result")
713
- lines.append("")
714
- lines.append("No P0/P1 issues found.")
715
- lines.append("")
716
-
717
- lines.append("<details>")
718
- lines.append("<summary>Raw Reviews</summary>")
719
- lines.append("")
720
- for name, content in raw_reviews:
721
- lines.append(f"### {name}")
722
- lines.append("")
723
- lines.append("```md")
724
- lines.append(_sanitize_for_comment(content))
725
- lines.append("```")
726
- lines.append("")
727
- lines.append("</details>")
728
- lines.append("")
729
- return "\n".join(lines)
730
-
731
-
732
- def _render_mode_b_comment(pr_number, round_num, run_id, fix_report_md):
733
- body = []
734
- body.append(MARKER)
735
- body.append("")
736
- body.append(f"## Fix Report (Round {round_num})")
737
- body.append("")
738
- body.append(f"- PR: #{pr_number}")
739
- body.append(f"- RunId: {run_id}")
740
- body.append("")
741
- body.append(_sanitize_for_comment(fix_report_md))
742
- body.append("")
743
- return "\n".join(body)
744
-
745
-
746
- def _render_final_comment(pr_number, round_num, run_id, status):
747
- lines = []
748
- lines.append(MARKER)
749
- lines.append("")
750
- lines.append("## Final Report")
751
- lines.append("")
752
- lines.append(f"- PR: #{pr_number}")
753
- lines.append(f"- Total Rounds: {round_num}")
754
- lines.append(f"- RunId: {run_id}")
755
- lines.append("")
756
-
757
- if status == "RESOLVED":
758
- lines.append("### Status: ✅ All issues resolved")
759
- lines.append("")
760
- lines.append("All P0/P1 issues from the automated review have been addressed.")
761
- lines.append("The PR is ready for human review and merge.")
762
- else:
763
- lines.append("### Status: ⚠️ Max rounds reached")
764
- lines.append("")
765
- lines.append("The automated review loop has completed the maximum number of rounds (3).")
766
- lines.append("Some issues may still remain. Please review the PR comments above for details.")
767
-
768
- lines.append("")
769
- return "\n".join(lines)
770
-
771
-
772
- def main(argv):
773
- class _ArgParser(argparse.ArgumentParser):
774
- def error(self, message):
775
- raise ValueError(message)
776
-
777
- parser = _ArgParser(add_help=False)
778
- parser.add_argument("--pr", type=int, required=True)
779
- parser.add_argument("--round", type=int, default=1)
780
- parser.add_argument("--run-id", required=True)
781
- parser.add_argument("--context-file")
782
- parser.add_argument("--review-file", action="append", default=[])
783
- parser.add_argument("--fix-report-file")
784
- parser.add_argument("--final-report")
785
- parser.add_argument("--aggregate-result-json")
786
- parser.add_argument("--aggregate-result-b64")
787
- parser.add_argument("--duplicate-groups-json")
788
- parser.add_argument("--duplicate-groups-b64")
789
- parser.add_argument("--decision-log-file")
790
- parser.add_argument("--escalation-groups-b64")
791
-
792
- try:
793
- args = parser.parse_args(argv)
794
- except ValueError:
795
- _json_out({"error": "INVALID_ARGS"})
796
- return 2
797
-
798
- pr_number = args.pr
799
- round_num = args.round
800
- run_id = str(args.run_id)
801
-
802
- final_report = (args.final_report or "").strip() or None
803
- fix_report_file = (args.fix_report_file or "").strip() or None
804
- context_file = (args.context_file or "").strip() or None
805
- review_files = []
806
- for rf in args.review_file or []:
807
- s = (rf or "").strip()
808
- if s:
809
- review_files.append(s)
810
-
811
- if final_report:
812
- body = _render_final_comment(pr_number, round_num, run_id, final_report)
813
- body_basename = f"review-aggregate-final-pr{pr_number}-{run_id}.md"
814
- body_ref = _repo_relpath(REPO_ROOT, CACHE_DIR / body_basename)
815
- _write_cache_text(body_ref, body)
816
- if not _post_pr_comment(pr_number, body_ref, run_id=run_id, round_num=round_num, comment_type="final-report"):
817
- _json_out({"error": "GH_PR_COMMENT_FAILED"})
818
- return 1
819
- _json_out({"ok": True, "final": True})
820
- return 0
821
-
822
- if fix_report_file:
823
- fix_p = _resolve_ref(REPO_ROOT, CACHE_DIR, fix_report_file)
824
- if not fix_p or not fix_p.exists():
825
- _json_out({"error": "FIX_REPORT_FILE_NOT_FOUND"})
826
- return 1
827
- fix_md = _read_cache_text(fix_report_file)
828
- body = _render_mode_b_comment(pr_number, round_num, run_id, fix_md)
829
- body_basename = f"review-aggregate-fix-comment-pr{pr_number}-r{round_num}-{run_id}.md"
830
- body_ref = _repo_relpath(REPO_ROOT, CACHE_DIR / body_basename)
831
- _write_cache_text(body_ref, body)
832
- if not _post_pr_comment(pr_number, body_ref, run_id=run_id, round_num=round_num, comment_type="fix-report"):
833
- _json_out({"error": "GH_PR_COMMENT_FAILED"})
834
- return 1
835
- _json_out({"ok": True})
836
- return 0
837
-
838
- if not context_file:
839
- _json_out({"error": "MISSING_CONTEXT_FILE"})
840
- return 1
841
- if not review_files:
842
- _json_out({"error": "MISSING_REVIEW_FILES"})
843
- return 1
844
-
845
- ctx_p = _resolve_ref(REPO_ROOT, CACHE_DIR, context_file)
846
- if not ctx_p or not ctx_p.exists():
847
- _json_out({"error": "CONTEXT_FILE_NOT_FOUND"})
848
- return 1
849
-
850
- valid_review_files = []
851
- for rf in review_files:
852
- p = _resolve_ref(REPO_ROOT, CACHE_DIR, rf)
853
- if p and p.exists():
854
- valid_review_files.append(rf)
855
- review_files = valid_review_files
856
- if not review_files:
857
- _json_out({"error": "REVIEW_FILES_NOT_FOUND"})
858
- return 1
859
-
860
- raw_reviews = []
861
- for rf in review_files:
862
- md = _read_cache_text(rf)
863
- raw_reviews.append((rf, md))
864
- aggregate_result = _parse_aggregate_result_json(args.aggregate_result_json or "")
865
- if not aggregate_result:
866
- aggregate_result = _parse_aggregate_result_b64(args.aggregate_result_b64 or "")
867
- if not aggregate_result:
868
- _json_out({"error": "INVALID_AGGREGATE_RESULT"})
869
- return 1
870
-
871
- must_fix = list(aggregate_result["mustFixFindings"])
872
- optional = list(aggregate_result["optionalFindings"])
873
- merged_findings = must_fix + optional
874
- merged_map = {}
875
- counts = _counts(merged_findings)
876
- stop = bool(aggregate_result["stop"])
877
-
878
- body = _render_mode_a_comment(pr_number, round_num, run_id, counts, must_fix, merged_map, raw_reviews)
879
- body_basename = f"review-aggregate-comment-pr{pr_number}-r{round_num}-{run_id}.md"
880
- body_ref = _repo_relpath(REPO_ROOT, CACHE_DIR / body_basename)
881
- _write_cache_text(body_ref, body)
882
- if not _post_pr_comment(pr_number, body_ref, run_id=run_id, round_num=round_num, comment_type="review-summary"):
883
- _json_out({"error": "GH_PR_COMMENT_FAILED"})
884
- return 1
885
-
886
- if stop:
887
- _json_out({"stop": True})
888
- return 0
889
-
890
- fix_file = f"fix-pr{pr_number}-r{round_num}-{run_id}.md"
891
- lines = []
892
- lines.append("# Fix File")
893
- lines.append("")
894
- lines.append(f"PR: {pr_number}")
895
- lines.append(f"Round: {round_num}")
896
- lines.append("")
897
- lines.append("## IssuesToFix")
898
- lines.append("")
899
- for f in must_fix:
900
- fid = f.get("id") or ""
901
- pri = (f.get("priority") or "P3").strip()
902
- cat = (f.get("category") or "quality").strip()
903
- file = (f.get("file") or "<unknown>").strip()
904
- line = (f.get("line") or "null").strip()
905
- title = (f.get("title") or "").strip()
906
- desc = (f.get("description") or "").replace("\n", "\\n").strip()
907
- sugg = (f.get("suggestion") or "(no suggestion provided)").replace("\n", "\\n").strip()
908
-
909
- lines.append(f"- id: {fid}")
910
- lines.append(f" priority: {pri}")
911
- lines.append(f" category: {cat}")
912
- lines.append(f" file: {file}")
913
- lines.append(f" line: {line}")
914
- lines.append(f" title: {title}")
915
- lines.append(f" description: {desc}")
916
- lines.append(f" suggestion: {sugg}")
917
-
918
- lines.append("")
919
- lines.append("## OptionalIssues")
920
- lines.append("")
921
- for f in optional:
922
- fid = f.get("id") or ""
923
- pri = (f.get("priority") or "P3").strip()
924
- cat = (f.get("category") or "quality").strip()
925
- file = (f.get("file") or "<unknown>").strip()
926
- line = (f.get("line") or "null").strip()
927
- title = (f.get("title") or "").strip()
928
- desc = (f.get("description") or "").replace("\n", "\\n").strip()
929
- sugg = (f.get("suggestion") or "(no suggestion provided)").replace("\n", "\\n").strip()
930
-
931
- lines.append(f"- id: {fid}")
932
- lines.append(f" priority: {pri}")
933
- lines.append(f" category: {cat}")
934
- lines.append(f" file: {file}")
935
- lines.append(f" line: {line}")
936
- lines.append(f" title: {title}")
937
- lines.append(f" description: {desc}")
938
- lines.append(f" suggestion: {sugg}")
939
-
940
- fix_ref = _repo_relpath(REPO_ROOT, CACHE_DIR / fix_file)
941
- _write_cache_text(fix_ref, "\n".join(lines) + "\n")
942
- _json_out({"stop": False, "fixFile": fix_ref})
943
- return 0
944
-
945
-
946
- if __name__ == "__main__":
947
- try:
948
- raise SystemExit(main(sys.argv[1:]))
949
- except Exception:
950
- _json_out({"error": "AGGREGATE_SCRIPT_FAILED"})
951
- raise SystemExit(1)