@ranger1/dx 0.1.76 → 0.1.78

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +92 -31
  2. package/bin/dx.js +3 -3
  3. package/lib/cli/commands/deploy.js +2 -1
  4. package/lib/cli/commands/stack.js +198 -237
  5. package/lib/cli/commands/start.js +0 -6
  6. package/lib/cli/dx-cli.js +10 -1
  7. package/lib/cli/help.js +8 -7
  8. package/lib/{opencode-initial.js → codex-initial.js} +3 -82
  9. package/lib/vercel-deploy.js +14 -27
  10. package/package.json +1 -2
  11. package/@opencode/agents/__pycache__/gh_review_harvest.cpython-314.pyc +0 -0
  12. package/@opencode/agents/__pycache__/pr_context.cpython-314.pyc +0 -0
  13. package/@opencode/agents/__pycache__/pr_precheck.cpython-314.pyc +0 -0
  14. package/@opencode/agents/__pycache__/pr_review_aggregate.cpython-314.pyc +0 -0
  15. package/@opencode/agents/__pycache__/test_pr_review_aggregate.cpython-314-pytest-9.0.2.pyc +0 -0
  16. package/@opencode/agents/__pycache__/test_pr_review_aggregate.cpython-314.pyc +0 -0
  17. package/@opencode/agents/claude-reviewer.md +0 -82
  18. package/@opencode/agents/codex-reviewer.md +0 -83
  19. package/@opencode/agents/gemini-reviewer.md +0 -82
  20. package/@opencode/agents/gh-thread-reviewer.md +0 -122
  21. package/@opencode/agents/gh_review_harvest.py +0 -292
  22. package/@opencode/agents/pr-context.md +0 -82
  23. package/@opencode/agents/pr-fix.md +0 -243
  24. package/@opencode/agents/pr-precheck.md +0 -89
  25. package/@opencode/agents/pr-review-aggregate.md +0 -151
  26. package/@opencode/agents/pr_context.py +0 -351
  27. package/@opencode/agents/pr_precheck.py +0 -505
  28. package/@opencode/agents/pr_review_aggregate.py +0 -868
  29. package/@opencode/agents/test_pr_review_aggregate.py +0 -701
  30. package/@opencode/commands/doctor.md +0 -271
  31. package/@opencode/commands/git-commit-and-pr.md +0 -282
  32. package/@opencode/commands/git-release.md +0 -642
  33. package/@opencode/commands/oh_attach.json +0 -92
  34. package/@opencode/commands/opencode_attach.json +0 -29
  35. package/@opencode/commands/opencode_attach.py +0 -142
  36. package/@opencode/commands/pr-review-loop.md +0 -211
@@ -1,868 +0,0 @@
1
- #!/usr/bin/env python3
2
- # Deterministic PR review aggregation (script owns all rules).
3
- #
4
- # Workflow:
5
- # - Mode A: read contextFile + reviewFile(s) from project cache: ./.cache/, parse findings, merge duplicates,
6
- # post a single PR comment, and optionally generate a fixFile for pr-fix.
7
- # - Mode B: read fixReportFile from cache and post it as a PR comment.
8
- #
9
- # Input rules:
10
- # - Callers should pass repo-relative paths (e.g. ./.cache/foo.md). For backward-compat, basenames are also accepted.
11
- # - Duplicate groups come from LLM but are passed as an argument (NOT written to disk).
12
- # - Prefer: --duplicate-groups-b64 <base64(json)>
13
- # - Also supported: --duplicate-groups-json '<json>'
14
- # - Invalid/missing duplicate groups => treated as no dedupe (do not fail).
15
- #
16
- # Output rules:
17
- # - Stdout must print exactly ONE JSON object and nothing else.
18
- # - Mode A: {"stop":true} OR {"stop":false,"fixFile":"..."}
19
- # - Mode B: {"ok":true}
20
- #
21
- # PR comment rules:
22
- # - Every comment must include marker: <!-- pr-review-loop-marker -->
23
- # - Comment body must NOT contain local filesystem paths (this script scrubs cache paths, $HOME, and repo absolute paths).
24
- #
25
- # fixFile rules:
26
- # - fixFile includes all findings, split into:
27
- # - IssuesToFix: P0/P1 (must fix)
28
- # - OptionalIssues: P2/P3 (pr-fix may decide)
29
- # - Each merged duplicate group keeps ONE canonical id; merged IDs are appended into canonical description.
30
- # - Do NOT rewrite id prefixes (CDX-/CLD-/GMN-); preserve reviewer-provided finding IDs.
31
-
32
- import argparse
33
- import base64
34
- import json
35
- import os
36
- import re
37
- import subprocess
38
- import sys
39
- from pathlib import Path
40
-
41
-
42
- MARKER = "<!-- pr-review-loop-marker -->"
43
-
44
-
45
- def _repo_root():
46
- try:
47
- p = subprocess.run(
48
- ["git", "rev-parse", "--show-toplevel"],
49
- stdout=subprocess.PIPE,
50
- stderr=subprocess.DEVNULL,
51
- text=True,
52
- )
53
- out = (p.stdout or "").strip()
54
- if p.returncode == 0 and out:
55
- return Path(out)
56
- except Exception:
57
- pass
58
- return Path.cwd()
59
-
60
-
61
- def _cache_dir(repo_root):
62
- return (repo_root / ".cache").resolve()
63
-
64
-
65
- def _is_safe_relpath(p):
66
- if p.is_absolute():
67
- return False
68
- if any(part in ("..",) for part in p.parts):
69
- return False
70
- return True
71
-
72
-
73
- def _resolve_ref(repo_root, cache_dir, ref):
74
- if not ref:
75
- return None
76
- s = str(ref).strip()
77
- if not s:
78
- return None
79
-
80
- # If caller already passes a repo-relative path like ./.cache/foo.md
81
- looks_like_path = ("/" in s) or ("\\" in s) or s.startswith(".")
82
- if looks_like_path:
83
- p = Path(s)
84
- if p.is_absolute():
85
- # Only allow absolute paths under cache_dir.
86
- try:
87
- p2 = p.resolve()
88
- p2.relative_to(cache_dir.resolve())
89
- return p2
90
- except Exception:
91
- return None
92
- if not _is_safe_relpath(p):
93
- return None
94
- return (repo_root / p).resolve()
95
-
96
- # Backward-compat: accept basename-only.
97
- b = _safe_basename(s)
98
- if not b:
99
- return None
100
- return (cache_dir / b).resolve()
101
-
102
-
103
- def _repo_relpath(repo_root, p):
104
- try:
105
- rel = p.resolve().relative_to(repo_root.resolve())
106
- return "./" + rel.as_posix()
107
- except Exception:
108
- return os.path.basename(str(p))
109
-
110
-
111
- REPO_ROOT = _repo_root()
112
- CACHE_DIR = _cache_dir(REPO_ROOT)
113
-
114
-
115
- def _json_out(obj):
116
- sys.stdout.write(json.dumps(obj, ensure_ascii=True))
117
- sys.stdout.write("\n")
118
-
119
-
120
- def _safe_basename(name):
121
- if not name:
122
- return None
123
- base = os.path.basename(name.strip())
124
- if base != name.strip():
125
- return None
126
- if base in (".", ".."):
127
- return None
128
- return base
129
-
130
-
131
- def _read_cache_text(ref):
132
- p = _resolve_ref(REPO_ROOT, CACHE_DIR, ref)
133
- if not p:
134
- raise FileNotFoundError("INVALID_CACHE_REF")
135
- return p.read_text(encoding="utf-8", errors="replace")
136
-
137
-
138
- def _write_cache_text(ref, content):
139
- p = _resolve_ref(REPO_ROOT, CACHE_DIR, ref)
140
- if not p:
141
- raise ValueError("INVALID_CACHE_REF")
142
- CACHE_DIR.mkdir(parents=True, exist_ok=True)
143
- p.parent.mkdir(parents=True, exist_ok=True)
144
- p.write_text(content, encoding="utf-8", newline="\n")
145
-
146
-
147
- def _priority_rank(p):
148
- if not isinstance(p, str):
149
- return 99
150
- p = p.strip().upper()
151
- if p == "P0":
152
- return 0
153
- if p == "P1":
154
- return 1
155
- if p == "P2":
156
- return 2
157
- if p == "P3":
158
- return 3
159
- return 99
160
-
161
-
162
- def _sanitize_for_comment(text):
163
- if not isinstance(text, str):
164
- text = str(text)
165
-
166
- home = str(Path.home())
167
- cache_abs = str(CACHE_DIR.resolve())
168
- repo_abs = str(REPO_ROOT.resolve())
169
-
170
- # Backward-compat scrub.
171
- text = text.replace("~/.opencode/cache/", "[cache]/")
172
- if home:
173
- text = text.replace(home + "/.opencode/cache/", "[cache]/")
174
-
175
- # New cache scrub.
176
- text = text.replace(cache_abs + "/", "[cache]/")
177
-
178
- # Avoid leaking absolute local repo paths.
179
- if repo_abs:
180
- text = text.replace(repo_abs + "/", "")
181
-
182
- return text
183
-
184
-
185
- def _parse_duplicate_groups_json(s):
186
- if not s:
187
- return []
188
- try:
189
- data = json.loads(s)
190
- except Exception:
191
- return []
192
-
193
- groups = []
194
- if isinstance(data, dict) and isinstance(data.get("duplicateGroups"), list):
195
- groups = data.get("duplicateGroups")
196
- elif isinstance(data, list):
197
- groups = data
198
- else:
199
- return []
200
-
201
- out = []
202
- for g in (groups or []):
203
- if not isinstance(g, list):
204
- continue
205
- ids = []
206
- for it in g:
207
- if isinstance(it, str) and it.strip():
208
- ids.append(it.strip())
209
- ids = list(dict.fromkeys(ids))
210
- if len(ids) >= 2:
211
- out.append(ids)
212
- return out
213
-
214
-
215
- def _parse_duplicate_groups_b64(s):
216
- if not s:
217
- return []
218
- try:
219
- raw = base64.b64decode(s.encode("ascii"), validate=True)
220
- return _parse_duplicate_groups_json(raw.decode("utf-8", errors="replace"))
221
- except Exception:
222
- return []
223
-
224
-
225
- def _parse_escalation_groups_json(s):
226
- """Parse escalation groups JSON (same format as duplicate groups)."""
227
- if not s:
228
- return []
229
- try:
230
- data = json.loads(s)
231
- except Exception:
232
- return []
233
-
234
- groups = []
235
- if isinstance(data, dict) and isinstance(data.get("escalationGroups"), list):
236
- groups = data.get("escalationGroups")
237
- elif isinstance(data, list):
238
- groups = data
239
- else:
240
- return []
241
-
242
- out = []
243
- for g in (groups or []):
244
- if not isinstance(g, list):
245
- continue
246
- ids = []
247
- for it in g:
248
- if isinstance(it, str) and it.strip():
249
- ids.append(it.strip())
250
- ids = list(dict.fromkeys(ids))
251
- if len(ids) >= 2:
252
- out.append(ids)
253
- return out
254
-
255
-
256
- def _parse_escalation_groups_b64(s):
257
- """Decode base64 escalation groups JSON."""
258
- if not s:
259
- return []
260
- try:
261
- raw = base64.b64decode(s.encode("ascii"), validate=True)
262
- return _parse_escalation_groups_json(raw.decode("utf-8", errors="replace"))
263
- except Exception:
264
- return []
265
-
266
-
267
- def _parse_decision_log(md_text):
268
- """
269
- Parse decision log markdown and extract fixed/rejected decisions.
270
-
271
- Format:
272
- # Decision Log
273
- PR: 123
274
- ## Round 1
275
- ### Fixed
276
- - id: CDX-001
277
- commit: abc123
278
- essence: JSON.parse error handling
279
- ### Rejected
280
- - id: GMN-004
281
- priority: P2
282
- reason: needs product decision
283
- essence: component split suggestion
284
-
285
- Returns: [
286
- {"id": "CDX-001", "status": "fixed", "essence": "...", "commit": "..."},
287
- {"id": "GMN-004", "status": "rejected", "essence": "...", "reason": "...", "priority": "P2"}
288
- ]
289
- """
290
- if not md_text:
291
- return []
292
-
293
- lines = md_text.splitlines()
294
- decisions = []
295
-
296
- current_status = None # "fixed" or "rejected"
297
- current_entry = None
298
-
299
- for raw in lines:
300
- line = raw.rstrip("\n")
301
-
302
- # Detect status section headers
303
- if line.strip().lower() == "### fixed":
304
- current_status = "fixed"
305
- if current_entry:
306
- decisions.append(current_entry)
307
- current_entry = None
308
- continue
309
-
310
- if line.strip().lower() == "### rejected":
311
- current_status = "rejected"
312
- if current_entry:
313
- decisions.append(current_entry)
314
- current_entry = None
315
- continue
316
-
317
- # Reset on new round headers
318
- if line.startswith("## Round "):
319
- if current_entry:
320
- decisions.append(current_entry)
321
- current_entry = None
322
- continue
323
-
324
- # Start new entry
325
- if line.startswith("- id:") and current_status:
326
- if current_entry:
327
- decisions.append(current_entry)
328
- fid = line.split(":", 1)[1].strip()
329
- current_entry = {"id": fid, "status": current_status}
330
- continue
331
-
332
- # Parse entry fields
333
- if current_entry and line.startswith(" "):
334
- m = re.match(r"^\s{2}([a-zA-Z][a-zA-Z0-9]*):\s*(.*)$", line)
335
- if m:
336
- k = m.group(1).strip()
337
- v = m.group(2).strip()
338
- current_entry[k] = v
339
-
340
- # Don't forget last entry
341
- if current_entry:
342
- decisions.append(current_entry)
343
-
344
- return decisions
345
-
346
-
347
- def _filter_by_decision_log(findings, prior_decisions, escalation_groups):
348
- """
349
- Filter findings based on decision log.
350
-
351
- Rules:
352
- 1. Filter out findings matching any "fixed" decision (by escalation group)
353
- 2. Filter out findings matching "rejected" decisions UNLESS in escalation group
354
-
355
- Args:
356
- findings: list of finding dicts
357
- prior_decisions: list from _parse_decision_log()
358
- escalation_groups: list of [rejected_id, new_finding_id] pairs
359
-
360
- Returns:
361
- filtered list of findings
362
- """
363
- if not prior_decisions:
364
- return findings
365
-
366
- escalation_map = {}
367
- for group in escalation_groups:
368
- if len(group) >= 2:
369
- prior_id = group[0]
370
- new_finding_ids = group[1:]
371
- if prior_id not in escalation_map:
372
- escalation_map[prior_id] = set()
373
- escalation_map[prior_id].update(new_finding_ids)
374
-
375
- fixed_ids = set()
376
- rejected_ids = set()
377
-
378
- for dec in prior_decisions:
379
- status = dec.get("status", "").lower()
380
- fid = dec.get("id", "").strip()
381
- if not fid:
382
- continue
383
-
384
- if status == "fixed":
385
- fixed_ids.add(fid)
386
- elif status == "rejected":
387
- rejected_ids.add(fid)
388
-
389
- filtered = []
390
- for f in findings:
391
- fid = f.get("id", "").strip()
392
- if not fid:
393
- continue
394
-
395
- should_filter = False
396
-
397
- if fid in fixed_ids:
398
- should_filter = True
399
-
400
- if not should_filter:
401
- for fixed_id in fixed_ids:
402
- if fixed_id in escalation_map and fid in escalation_map[fixed_id]:
403
- should_filter = True
404
- break
405
-
406
- if not should_filter:
407
- if fid in rejected_ids:
408
- should_filter = True
409
-
410
- for rejected_id in rejected_ids:
411
- if rejected_id in escalation_map and fid in escalation_map[rejected_id]:
412
- should_filter = False
413
- break
414
-
415
- if not should_filter:
416
- filtered.append(f)
417
-
418
- return filtered
419
-
420
-
421
- def _parse_review_findings(md_text):
422
- lines = md_text.splitlines()
423
- items = []
424
-
425
- cur = None
426
- for raw in lines:
427
- line = raw.rstrip("\n")
428
- if line.startswith("- id:"):
429
- if cur:
430
- items.append(cur)
431
- cur = {"id": line.split(":", 1)[1].strip()}
432
- continue
433
- if cur and line.startswith(" "):
434
- m = re.match(r"^\s{2}([a-zA-Z][a-zA-Z0-9]*):\s*(.*)$", line)
435
- if not m:
436
- continue
437
- k = m.group(1).strip()
438
- v = m.group(2)
439
- cur[k] = v.strip()
440
-
441
- if cur:
442
- items.append(cur)
443
-
444
- normalized = []
445
- for it in items:
446
- fid = (it.get("id") or "").strip()
447
- if not fid:
448
- continue
449
- normalized.append(
450
- {
451
- "id": fid,
452
- "priority": (it.get("priority") or "P3").strip(),
453
- "category": (it.get("category") or "quality").strip(),
454
- "file": (it.get("file") or "<unknown>").strip(),
455
- "line": (it.get("line") or "null").strip(),
456
- "title": (it.get("title") or "").strip(),
457
- "description": (it.get("description") or "").strip(),
458
- "suggestion": (it.get("suggestion") or "(no suggestion provided)").strip(),
459
- }
460
- )
461
- return normalized
462
-
463
-
464
- def _merge_duplicates(findings, duplicate_groups):
465
- by_id = {f["id"]: dict(f) for f in findings}
466
- merged_map = {}
467
- seen = set()
468
-
469
- for group in duplicate_groups:
470
- ids = [i for i in group if i in by_id]
471
- ids = list(dict.fromkeys(ids))
472
- if len(ids) < 2:
473
- continue
474
-
475
- def sort_key(fid):
476
- f = by_id[fid]
477
- return (_priority_rank(f.get("priority")), fid)
478
-
479
- canonical = sorted(ids, key=sort_key)[0]
480
- merged = [i for i in ids if i != canonical]
481
- if not merged:
482
- continue
483
-
484
- merged_map[canonical] = merged
485
- for mid in merged:
486
- seen.add(mid)
487
-
488
- out = []
489
- for fid, f in by_id.items():
490
- if fid in seen:
491
- continue
492
-
493
- if fid in merged_map:
494
- also = ", ".join(merged_map[fid])
495
- desc = (f.get("description") or "")
496
- suffix = f"Also reported as: {also}"
497
- if desc:
498
- desc = desc + "\n" + suffix
499
- else:
500
- desc = suffix
501
- f = dict(f)
502
- f["description"] = desc
503
-
504
- out.append(f)
505
-
506
- out.sort(key=lambda x: (_priority_rank(x.get("priority")), x.get("id") or ""))
507
- return out, merged_map
508
-
509
-
510
- def _counts(findings):
511
- c = {"P0": 0, "P1": 0, "P2": 0, "P3": 0}
512
- for f in findings:
513
- p = (f.get("priority") or "").strip().upper()
514
- if p in c:
515
- c[p] += 1
516
- return c
517
-
518
-
519
- def _check_existing_comment(pr_number, run_id, round_num, comment_type):
520
- """
521
- Check if a comment with same runId/round/type already exists.
522
- Returns True if duplicate exists (should skip posting).
523
-
524
- comment_type: "review-summary" or "fix-report" or "final-report"
525
- """
526
- try:
527
- result = subprocess.run(
528
- ["gh", "api", f"repos/:owner/:repo/issues/{pr_number}/comments", "--paginate"],
529
- stdout=subprocess.PIPE,
530
- stderr=subprocess.DEVNULL,
531
- text=True,
532
- )
533
- if result.returncode != 0:
534
- return False
535
-
536
- comments = json.loads(result.stdout or "[]")
537
-
538
- if comment_type == "review-summary":
539
- type_header = f"## Review Summary (Round {round_num})"
540
- elif comment_type == "fix-report":
541
- type_header = f"## Fix Report (Round {round_num})"
542
- elif comment_type == "final-report":
543
- type_header = "## Final Report"
544
- else:
545
- return False
546
-
547
- run_id_pattern = f"RunId: {run_id}"
548
-
549
- for comment in comments:
550
- body = comment.get("body", "")
551
- if MARKER in body and type_header in body and run_id_pattern in body:
552
- return True
553
-
554
- return False
555
- except Exception:
556
- return False
557
-
558
-
559
- def _post_pr_comment(pr_number, body_ref, run_id=None, round_num=None, comment_type=None):
560
- """
561
- Post a PR comment with idempotency check.
562
-
563
- If run_id, round_num, and comment_type are provided, checks for existing
564
- duplicate before posting and skips if already posted.
565
-
566
- Returns: True if posted successfully or skipped (idempotent), False on error
567
- """
568
- if isinstance(body_ref, Path):
569
- p = body_ref
570
- else:
571
- p = _resolve_ref(REPO_ROOT, CACHE_DIR, body_ref)
572
- if not p:
573
- return False
574
-
575
- if run_id and round_num and comment_type:
576
- if _check_existing_comment(pr_number, run_id, round_num, comment_type):
577
- return True
578
-
579
- body_path = str(p)
580
- rc = subprocess.run(
581
- ["gh", "pr", "comment", str(pr_number), "--body-file", body_path],
582
- stdout=subprocess.DEVNULL,
583
- stderr=subprocess.DEVNULL,
584
- ).returncode
585
- return rc == 0
586
-
587
-
588
- def _render_mode_a_comment(pr_number, round_num, run_id, counts, must_fix, merged_map, raw_reviews):
589
- lines = []
590
- lines.append(MARKER)
591
- lines.append("")
592
- lines.append(f"## Review Summary (Round {round_num})")
593
- lines.append("")
594
- lines.append(f"- PR: #{pr_number}")
595
- lines.append(f"- RunId: {run_id}")
596
- lines.append(f"- P0: {counts['P0']} P1: {counts['P1']} P2: {counts['P2']} P3: {counts['P3']}")
597
- lines.append("")
598
-
599
- if must_fix:
600
- lines.append("## Must Fix (P0/P1)")
601
- lines.append("")
602
- for f in must_fix:
603
- fid = f.get("id") or ""
604
- title = f.get("title") or ""
605
- pri = (f.get("priority") or "").strip()
606
- file = f.get("file") or "<unknown>"
607
- line = f.get("line") or "null"
608
- sugg = f.get("suggestion") or ""
609
- lines.append(f"- {fid} ({pri}) {title}")
610
- lines.append(f" - {file}:{line}")
611
- if fid in merged_map:
612
- lines.append(f" - merged: {', '.join(merged_map[fid])}")
613
- if sugg:
614
- lines.append(f" - suggestion: {_sanitize_for_comment(sugg)}")
615
- lines.append("")
616
- else:
617
- lines.append("## Result")
618
- lines.append("")
619
- lines.append("No P0/P1 issues found.")
620
- lines.append("")
621
-
622
- lines.append("<details>")
623
- lines.append("<summary>Raw Reviews</summary>")
624
- lines.append("")
625
- for name, content in raw_reviews:
626
- lines.append(f"### {name}")
627
- lines.append("")
628
- lines.append("```md")
629
- lines.append(_sanitize_for_comment(content))
630
- lines.append("```")
631
- lines.append("")
632
- lines.append("</details>")
633
- lines.append("")
634
- return "\n".join(lines)
635
-
636
-
637
- def _render_mode_b_comment(pr_number, round_num, run_id, fix_report_md):
638
- body = []
639
- body.append(MARKER)
640
- body.append("")
641
- body.append(f"## Fix Report (Round {round_num})")
642
- body.append("")
643
- body.append(f"- PR: #{pr_number}")
644
- body.append(f"- RunId: {run_id}")
645
- body.append("")
646
- body.append(_sanitize_for_comment(fix_report_md))
647
- body.append("")
648
- return "\n".join(body)
649
-
650
-
651
- def _render_final_comment(pr_number, round_num, run_id, status):
652
- lines = []
653
- lines.append(MARKER)
654
- lines.append("")
655
- lines.append("## Final Report")
656
- lines.append("")
657
- lines.append(f"- PR: #{pr_number}")
658
- lines.append(f"- Total Rounds: {round_num}")
659
- lines.append(f"- RunId: {run_id}")
660
- lines.append("")
661
-
662
- if status == "RESOLVED":
663
- lines.append("### Status: ✅ All issues resolved")
664
- lines.append("")
665
- lines.append("All P0/P1 issues from the automated review have been addressed.")
666
- lines.append("The PR is ready for human review and merge.")
667
- else:
668
- lines.append("### Status: ⚠️ Max rounds reached")
669
- lines.append("")
670
- lines.append("The automated review loop has completed the maximum number of rounds (3).")
671
- lines.append("Some issues may still remain. Please review the PR comments above for details.")
672
-
673
- lines.append("")
674
- return "\n".join(lines)
675
-
676
-
677
- def main(argv):
678
- class _ArgParser(argparse.ArgumentParser):
679
- def error(self, message):
680
- raise ValueError(message)
681
-
682
- parser = _ArgParser(add_help=False)
683
- parser.add_argument("--pr", type=int, required=True)
684
- parser.add_argument("--round", type=int, default=1)
685
- parser.add_argument("--run-id", required=True)
686
- parser.add_argument("--context-file")
687
- parser.add_argument("--review-file", action="append", default=[])
688
- parser.add_argument("--fix-report-file")
689
- parser.add_argument("--final-report")
690
- parser.add_argument("--duplicate-groups-json")
691
- parser.add_argument("--duplicate-groups-b64")
692
- parser.add_argument("--decision-log-file")
693
- parser.add_argument("--escalation-groups-b64")
694
-
695
- try:
696
- args = parser.parse_args(argv)
697
- except ValueError:
698
- _json_out({"error": "INVALID_ARGS"})
699
- return 2
700
-
701
- pr_number = args.pr
702
- round_num = args.round
703
- run_id = str(args.run_id)
704
-
705
- final_report = (args.final_report or "").strip() or None
706
- fix_report_file = (args.fix_report_file or "").strip() or None
707
- context_file = (args.context_file or "").strip() or None
708
- review_files = []
709
- for rf in args.review_file or []:
710
- s = (rf or "").strip()
711
- if s:
712
- review_files.append(s)
713
-
714
- if final_report:
715
- body = _render_final_comment(pr_number, round_num, run_id, final_report)
716
- body_basename = f"review-aggregate-final-pr{pr_number}-{run_id}.md"
717
- body_ref = _repo_relpath(REPO_ROOT, CACHE_DIR / body_basename)
718
- _write_cache_text(body_ref, body)
719
- if not _post_pr_comment(pr_number, body_ref, run_id=run_id, round_num=round_num, comment_type="final-report"):
720
- _json_out({"error": "GH_PR_COMMENT_FAILED"})
721
- return 1
722
- _json_out({"ok": True, "final": True})
723
- return 0
724
-
725
- if fix_report_file:
726
- fix_p = _resolve_ref(REPO_ROOT, CACHE_DIR, fix_report_file)
727
- if not fix_p or not fix_p.exists():
728
- _json_out({"error": "FIX_REPORT_FILE_NOT_FOUND"})
729
- return 1
730
- fix_md = _read_cache_text(fix_report_file)
731
- body = _render_mode_b_comment(pr_number, round_num, run_id, fix_md)
732
- body_basename = f"review-aggregate-fix-comment-pr{pr_number}-r{round_num}-{run_id}.md"
733
- body_ref = _repo_relpath(REPO_ROOT, CACHE_DIR / body_basename)
734
- _write_cache_text(body_ref, body)
735
- if not _post_pr_comment(pr_number, body_ref, run_id=run_id, round_num=round_num, comment_type="fix-report"):
736
- _json_out({"error": "GH_PR_COMMENT_FAILED"})
737
- return 1
738
- _json_out({"ok": True})
739
- return 0
740
-
741
- if not context_file:
742
- _json_out({"error": "MISSING_CONTEXT_FILE"})
743
- return 1
744
- if not review_files:
745
- _json_out({"error": "MISSING_REVIEW_FILES"})
746
- return 1
747
-
748
- ctx_p = _resolve_ref(REPO_ROOT, CACHE_DIR, context_file)
749
- if not ctx_p or not ctx_p.exists():
750
- _json_out({"error": "CONTEXT_FILE_NOT_FOUND"})
751
- return 1
752
-
753
- valid_review_files = []
754
- for rf in review_files:
755
- p = _resolve_ref(REPO_ROOT, CACHE_DIR, rf)
756
- if p and p.exists():
757
- valid_review_files.append(rf)
758
- review_files = valid_review_files
759
- if not review_files:
760
- _json_out({"error": "REVIEW_FILES_NOT_FOUND"})
761
- return 1
762
-
763
- raw_reviews = []
764
- all_findings = []
765
- for rf in review_files:
766
- md = _read_cache_text(rf)
767
- raw_reviews.append((rf, md))
768
- all_findings.extend(_parse_review_findings(md))
769
-
770
- duplicate_groups = _parse_duplicate_groups_json(args.duplicate_groups_json or "")
771
- if not duplicate_groups:
772
- duplicate_groups = _parse_duplicate_groups_b64(args.duplicate_groups_b64 or "")
773
- merged_findings, merged_map = _merge_duplicates(all_findings, duplicate_groups)
774
-
775
- decision_log_file = (args.decision_log_file or "").strip() or None
776
- prior_decisions = []
777
- if decision_log_file:
778
- try:
779
- decision_log_md = _read_cache_text(decision_log_file)
780
- prior_decisions = _parse_decision_log(decision_log_md)
781
- except Exception:
782
- pass
783
-
784
- escalation_groups = _parse_escalation_groups_b64(args.escalation_groups_b64 or "")
785
-
786
- if prior_decisions:
787
- merged_findings = _filter_by_decision_log(merged_findings, prior_decisions, escalation_groups)
788
-
789
- counts = _counts(merged_findings)
790
-
791
- must_fix = [f for f in merged_findings if _priority_rank(f.get("priority")) <= 1]
792
- optional = [f for f in merged_findings if _priority_rank(f.get("priority")) >= 2]
793
- stop = len(must_fix) == 0
794
-
795
- body = _render_mode_a_comment(pr_number, round_num, run_id, counts, must_fix, merged_map, raw_reviews)
796
- body_basename = f"review-aggregate-comment-pr{pr_number}-r{round_num}-{run_id}.md"
797
- body_ref = _repo_relpath(REPO_ROOT, CACHE_DIR / body_basename)
798
- _write_cache_text(body_ref, body)
799
- if not _post_pr_comment(pr_number, body_ref, run_id=run_id, round_num=round_num, comment_type="review-summary"):
800
- _json_out({"error": "GH_PR_COMMENT_FAILED"})
801
- return 1
802
-
803
- if stop:
804
- _json_out({"stop": True})
805
- return 0
806
-
807
- fix_file = f"fix-pr{pr_number}-r{round_num}-{run_id}.md"
808
- lines = []
809
- lines.append("# Fix File")
810
- lines.append("")
811
- lines.append(f"PR: {pr_number}")
812
- lines.append(f"Round: {round_num}")
813
- lines.append("")
814
- lines.append("## IssuesToFix")
815
- lines.append("")
816
- for f in must_fix:
817
- fid = f.get("id") or ""
818
- pri = (f.get("priority") or "P3").strip()
819
- cat = (f.get("category") or "quality").strip()
820
- file = (f.get("file") or "<unknown>").strip()
821
- line = (f.get("line") or "null").strip()
822
- title = (f.get("title") or "").strip()
823
- desc = (f.get("description") or "").replace("\n", "\\n").strip()
824
- sugg = (f.get("suggestion") or "(no suggestion provided)").replace("\n", "\\n").strip()
825
-
826
- lines.append(f"- id: {fid}")
827
- lines.append(f" priority: {pri}")
828
- lines.append(f" category: {cat}")
829
- lines.append(f" file: {file}")
830
- lines.append(f" line: {line}")
831
- lines.append(f" title: {title}")
832
- lines.append(f" description: {desc}")
833
- lines.append(f" suggestion: {sugg}")
834
-
835
- lines.append("")
836
- lines.append("## OptionalIssues")
837
- lines.append("")
838
- for f in optional:
839
- fid = f.get("id") or ""
840
- pri = (f.get("priority") or "P3").strip()
841
- cat = (f.get("category") or "quality").strip()
842
- file = (f.get("file") or "<unknown>").strip()
843
- line = (f.get("line") or "null").strip()
844
- title = (f.get("title") or "").strip()
845
- desc = (f.get("description") or "").replace("\n", "\\n").strip()
846
- sugg = (f.get("suggestion") or "(no suggestion provided)").replace("\n", "\\n").strip()
847
-
848
- lines.append(f"- id: {fid}")
849
- lines.append(f" priority: {pri}")
850
- lines.append(f" category: {cat}")
851
- lines.append(f" file: {file}")
852
- lines.append(f" line: {line}")
853
- lines.append(f" title: {title}")
854
- lines.append(f" description: {desc}")
855
- lines.append(f" suggestion: {sugg}")
856
-
857
- fix_ref = _repo_relpath(REPO_ROOT, CACHE_DIR / fix_file)
858
- _write_cache_text(fix_ref, "\n".join(lines) + "\n")
859
- _json_out({"stop": False, "fixFile": fix_ref})
860
- return 0
861
-
862
-
863
- if __name__ == "__main__":
864
- try:
865
- raise SystemExit(main(sys.argv[1:]))
866
- except Exception:
867
- _json_out({"error": "AGGREGATE_SCRIPT_FAILED"})
868
- raise SystemExit(1)