@ranger1/dx 0.1.76 → 0.1.78

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +92 -31
  2. package/bin/dx.js +3 -3
  3. package/lib/cli/commands/deploy.js +2 -1
  4. package/lib/cli/commands/stack.js +198 -237
  5. package/lib/cli/commands/start.js +0 -6
  6. package/lib/cli/dx-cli.js +10 -1
  7. package/lib/cli/help.js +8 -7
  8. package/lib/{opencode-initial.js → codex-initial.js} +3 -82
  9. package/lib/vercel-deploy.js +14 -27
  10. package/package.json +1 -2
  11. package/@opencode/agents/__pycache__/gh_review_harvest.cpython-314.pyc +0 -0
  12. package/@opencode/agents/__pycache__/pr_context.cpython-314.pyc +0 -0
  13. package/@opencode/agents/__pycache__/pr_precheck.cpython-314.pyc +0 -0
  14. package/@opencode/agents/__pycache__/pr_review_aggregate.cpython-314.pyc +0 -0
  15. package/@opencode/agents/__pycache__/test_pr_review_aggregate.cpython-314-pytest-9.0.2.pyc +0 -0
  16. package/@opencode/agents/__pycache__/test_pr_review_aggregate.cpython-314.pyc +0 -0
  17. package/@opencode/agents/claude-reviewer.md +0 -82
  18. package/@opencode/agents/codex-reviewer.md +0 -83
  19. package/@opencode/agents/gemini-reviewer.md +0 -82
  20. package/@opencode/agents/gh-thread-reviewer.md +0 -122
  21. package/@opencode/agents/gh_review_harvest.py +0 -292
  22. package/@opencode/agents/pr-context.md +0 -82
  23. package/@opencode/agents/pr-fix.md +0 -243
  24. package/@opencode/agents/pr-precheck.md +0 -89
  25. package/@opencode/agents/pr-review-aggregate.md +0 -151
  26. package/@opencode/agents/pr_context.py +0 -351
  27. package/@opencode/agents/pr_precheck.py +0 -505
  28. package/@opencode/agents/pr_review_aggregate.py +0 -868
  29. package/@opencode/agents/test_pr_review_aggregate.py +0 -701
  30. package/@opencode/commands/doctor.md +0 -271
  31. package/@opencode/commands/git-commit-and-pr.md +0 -282
  32. package/@opencode/commands/git-release.md +0 -642
  33. package/@opencode/commands/oh_attach.json +0 -92
  34. package/@opencode/commands/opencode_attach.json +0 -29
  35. package/@opencode/commands/opencode_attach.py +0 -142
  36. package/@opencode/commands/pr-review-loop.md +0 -211
@@ -1,701 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Unit tests for pr_review_aggregate.py decision log parsing and filtering.
4
-
5
- Tests cover:
6
- 1. _parse_decision_log() - parsing markdown decision logs
7
- 2. _filter_by_decision_log() - filtering findings based on prior decisions
8
- 3. Edge cases: empty input, malformed data, cross-reviewer matching
9
- """
10
-
11
- import importlib.util
12
- import json
13
- import subprocess
14
- from collections.abc import Mapping, Sequence
15
- from pathlib import Path
16
- from types import SimpleNamespace
17
- from typing import Callable, cast
18
-
19
- import pytest
20
-
21
-
22
- def _load_pr_review_aggregate_module():
23
- module_path = Path(__file__).with_name("pr_review_aggregate.py")
24
- spec = importlib.util.spec_from_file_location("pr_review_aggregate", module_path)
25
- if spec is None or spec.loader is None:
26
- raise RuntimeError(f"Failed to load module spec: {module_path}")
27
- module = importlib.util.module_from_spec(spec)
28
- spec.loader.exec_module(module)
29
- return module
30
-
31
-
32
- _pr_review_aggregate = _load_pr_review_aggregate_module()
33
-
34
- _parse_decision_log = cast(Callable[[str], list[dict[str, object]]], getattr(_pr_review_aggregate, "_parse_decision_log"))
35
- _filter_by_decision_log = cast(
36
- Callable[[Sequence[Mapping[str, object]], Sequence[Mapping[str, object]], list[list[str]]], list[dict[str, object]]],
37
- getattr(_pr_review_aggregate, "_filter_by_decision_log"),
38
- )
39
- _parse_escalation_groups_json = cast(
40
- Callable[[str], list[list[str]]],
41
- getattr(_pr_review_aggregate, "_parse_escalation_groups_json"),
42
- )
43
- _parse_escalation_groups_b64 = cast(
44
- Callable[[str], list[list[str]]],
45
- getattr(_pr_review_aggregate, "_parse_escalation_groups_b64"),
46
- )
47
- _check_existing_comment = cast(
48
- Callable[[int, str, int, str], bool],
49
- getattr(_pr_review_aggregate, "_check_existing_comment"),
50
- )
51
- _MARKER = cast(str, getattr(_pr_review_aggregate, "MARKER"))
52
-
53
-
54
- # ============================================================
55
- # Fixtures
56
- # ============================================================
57
-
58
- @pytest.fixture
59
- def empty_decision_log() -> str:
60
- """Empty decision log markdown."""
61
- return ""
62
-
63
-
64
- @pytest.fixture
65
- def valid_decision_log() -> str:
66
- """Valid decision log with Fixed and Rejected entries."""
67
- return """# Decision Log
68
-
69
- PR: 123
70
-
71
- ## Round 1
72
-
73
- ### Fixed
74
- - id: CDX-001
75
- file: apps/backend/src/api.ts
76
- commit: abc123
77
- essence: JSON.parse 未捕获异常
78
-
79
- - id: GMN-002
80
- file: apps/front/src/ErrorBoundary.tsx
81
- commit: def456
82
- essence: 缺少错误边界处理
83
-
84
- ### Rejected
85
- - id: GMN-004
86
- file: apps/front/src/Component.tsx
87
- priority: P2
88
- reason: 需要产品决策,超出 PR 范围
89
- essence: 组件拆分建议
90
-
91
- - id: CLD-003
92
- file: apps/backend/src/db.ts
93
- priority: P3
94
- reason: 性能优化非当前优先级
95
- essence: 批量查询优化
96
- """
97
-
98
-
99
- @pytest.fixture
100
- def valid_decision_log_legacy_no_file() -> str:
101
- """Legacy decision log fixture without the file: field (backward compat)."""
102
- return """# Decision Log
103
-
104
- PR: 123
105
-
106
- ## Round 1
107
-
108
- ### Fixed
109
- - id: CDX-001
110
- commit: abc123
111
- essence: JSON.parse 未捕获异常
112
-
113
- - id: GMN-002
114
- commit: def456
115
- essence: 缺少错误边界处理
116
-
117
- ### Rejected
118
- - id: GMN-004
119
- priority: P2
120
- reason: 需要产品决策,超出 PR 范围
121
- essence: 组件拆分建议
122
-
123
- - id: CLD-003
124
- priority: P3
125
- reason: 性能优化非当前优先级
126
- essence: 批量查询优化
127
- """
128
-
129
-
130
- @pytest.fixture
131
- def malformed_decision_log() -> str:
132
- """Malformed decision log with missing fields and bad formatting."""
133
- return """# Decision Log
134
-
135
- PR: 123
136
-
137
- ### Fixed
138
- - id: BROKEN-001
139
- # Missing essence field
140
-
141
- ### Rejected
142
- - id: BROKEN-002
143
- priority: P2
144
- # Missing essence and reason
145
-
146
- Some random text that should be ignored
147
-
148
- - id: BROKEN-003
149
- this is not a valid field format
150
- """
151
-
152
-
153
- @pytest.fixture
154
- def sample_findings() -> list[dict[str, object]]:
155
- """Sample findings list for filter tests."""
156
- return [
157
- {
158
- "id": "CDX-001",
159
- "priority": "P1",
160
- "category": "bug",
161
- "file": "api.ts",
162
- "line": "42",
163
- "title": "JSON parse error",
164
- "description": "JSON.parse 未捕获异常",
165
- "suggestion": "Add try-catch"
166
- },
167
- {
168
- "id": "GMN-004",
169
- "priority": "P2",
170
- "category": "quality",
171
- "file": "Component.tsx",
172
- "line": "100",
173
- "title": "Component split",
174
- "description": "组件拆分建议",
175
- "suggestion": "Split into smaller components"
176
- },
177
- {
178
- "id": "CLD-007",
179
- "priority": "P0",
180
- "category": "bug",
181
- "file": "Component.tsx",
182
- "line": "100",
183
- "title": "Component split (escalated)",
184
- "description": "组件拆分建议 - 升级为 P0",
185
- "suggestion": "Split into smaller components - critical"
186
- },
187
- {
188
- "id": "NEW-001",
189
- "priority": "P1",
190
- "category": "bug",
191
- "file": "utils.ts",
192
- "line": "20",
193
- "title": "New issue",
194
- "description": "This is a new issue",
195
- "suggestion": "Fix it"
196
- }
197
- ]
198
-
199
-
200
- @pytest.fixture
201
- def prior_decisions() -> list[dict[str, object]]:
202
- """Sample prior decisions from _parse_decision_log."""
203
- return [
204
- {
205
- "id": "CDX-001",
206
- "status": "fixed",
207
- "commit": "abc123",
208
- "essence": "JSON.parse 未捕获异常"
209
- },
210
- {
211
- "id": "GMN-004",
212
- "status": "rejected",
213
- "priority": "P2",
214
- "reason": "需要产品决策,超出 PR 范围",
215
- "essence": "组件拆分建议"
216
- }
217
- ]
218
-
219
-
220
- # ============================================================
221
- # Test: _parse_decision_log() - Empty Input
222
- # ============================================================
223
-
224
- def test_parse_decision_log_empty(empty_decision_log: str) -> None:
225
- """
226
- Test that empty decision log returns empty list.
227
-
228
- Given: empty string
229
- When: _parse_decision_log() is called
230
- Then: returns []
231
- """
232
- result = _parse_decision_log(empty_decision_log)
233
- assert result == []
234
- assert isinstance(result, list)
235
-
236
-
237
- # ============================================================
238
- # Test: _parse_decision_log() - Valid Input
239
- # ============================================================
240
-
241
- def test_parse_decision_log_valid(valid_decision_log: str) -> None:
242
- """
243
- Test that valid decision log is parsed into structured data.
244
-
245
- Given: valid markdown with Fixed and Rejected sections
246
- When: _parse_decision_log() is called
247
- Then: returns list of dicts with id, status, essence, and optional fields
248
- """
249
- result = _parse_decision_log(valid_decision_log)
250
-
251
- # Should have 4 entries (2 Fixed, 2 Rejected)
252
- assert len(result) == 4
253
-
254
- # Verify first Fixed entry
255
- fixed_1 = result[0]
256
- assert fixed_1["id"] == "CDX-001"
257
- assert fixed_1["status"] == "fixed"
258
- assert fixed_1["file"] == "apps/backend/src/api.ts"
259
- assert fixed_1["commit"] == "abc123"
260
- assert fixed_1["essence"] == "JSON.parse 未捕获异常"
261
-
262
- # Verify second Fixed entry
263
- fixed_2 = result[1]
264
- assert fixed_2["id"] == "GMN-002"
265
- assert fixed_2["status"] == "fixed"
266
- assert fixed_2["file"] == "apps/front/src/ErrorBoundary.tsx"
267
- assert fixed_2["commit"] == "def456"
268
- assert fixed_2["essence"] == "缺少错误边界处理"
269
-
270
- # Verify first Rejected entry
271
- rejected_1 = result[2]
272
- assert rejected_1["id"] == "GMN-004"
273
- assert rejected_1["status"] == "rejected"
274
- assert rejected_1["file"] == "apps/front/src/Component.tsx"
275
- assert rejected_1["priority"] == "P2"
276
- assert rejected_1["reason"] == "需要产品决策,超出 PR 范围"
277
- assert rejected_1["essence"] == "组件拆分建议"
278
-
279
- # Verify second Rejected entry
280
- rejected_2 = result[3]
281
- assert rejected_2["id"] == "CLD-003"
282
- assert rejected_2["status"] == "rejected"
283
- assert rejected_2["file"] == "apps/backend/src/db.ts"
284
- assert rejected_2["priority"] == "P3"
285
-
286
-
287
- def test_parse_decision_log_legacy_without_file(valid_decision_log_legacy_no_file: str) -> None:
288
- """Decision log entries without file: should still parse (backward compat)."""
289
- result = _parse_decision_log(valid_decision_log_legacy_no_file)
290
-
291
- # Should have 4 entries (2 Fixed, 2 Rejected)
292
- assert len(result) == 4
293
-
294
- # Basic shape should still be present
295
- for entry in result:
296
- assert "id" in entry
297
- assert "status" in entry
298
-
299
- # And file should be optional
300
- assert all(("file" not in e) or (e["file"] in (None, "")) for e in result)
301
-
302
-
303
- # ============================================================
304
- # Test: _parse_decision_log() - Malformed Input
305
- # ============================================================
306
-
307
- def test_parse_decision_log_malformed(malformed_decision_log: str) -> None:
308
- """
309
- Test that malformed decision log degrades gracefully.
310
-
311
- Given: decision log with missing required fields
312
- When: _parse_decision_log() is called
313
- Then: returns partial data without raising exceptions
314
- """
315
- # Should not raise exception
316
- result = _parse_decision_log(malformed_decision_log)
317
-
318
- # Should return some data (even if incomplete)
319
- assert isinstance(result, list)
320
-
321
- # Entries should have at least id and status
322
- for entry in result:
323
- assert "id" in entry
324
- assert "status" in entry
325
-
326
-
327
- # ============================================================
328
- # Test: _filter_by_decision_log() - Fixed Issues
329
- # ============================================================
330
-
331
- def test_filter_fixed_issues(sample_findings: list[dict[str, object]], prior_decisions: list[dict[str, object]]) -> None:
332
- """
333
- Test that findings matching Fixed decisions are filtered out.
334
-
335
- Given: findings containing CDX-001 which is in Fixed decisions
336
- When: _filter_by_decision_log() is called with empty escalation_groups
337
- Then: CDX-001 is filtered out
338
- """
339
- escalation_groups: list[list[str]] = []
340
-
341
- result = _filter_by_decision_log(sample_findings, prior_decisions, escalation_groups)
342
-
343
- # CDX-001 should be filtered (it's in Fixed decisions)
344
- result_ids = [f["id"] for f in result]
345
- assert "CDX-001" not in result_ids
346
-
347
- # Other findings should remain
348
- assert "GMN-004" in result_ids or "CLD-007" in result_ids or "NEW-001" in result_ids
349
-
350
-
351
- # ============================================================
352
- # Test: _filter_by_decision_log() - Rejected Without Escalation
353
- # ============================================================
354
-
355
- def test_filter_rejected_without_escalation(sample_findings: list[dict[str, object]], prior_decisions: list[dict[str, object]]) -> None:
356
- """
357
- Test that findings matching Rejected decisions are filtered out when NOT in escalation_groups.
358
-
359
- Given: findings containing GMN-004 which is in Rejected decisions
360
- and escalation_groups is empty
361
- When: _filter_by_decision_log() is called
362
- Then: GMN-004 is filtered out
363
- """
364
- escalation_groups: list[list[str]] = []
365
-
366
- result = _filter_by_decision_log(sample_findings, prior_decisions, escalation_groups)
367
-
368
- # GMN-004 should be filtered (it's Rejected and not escalated)
369
- result_ids = [f["id"] for f in result]
370
- assert "GMN-004" not in result_ids
371
-
372
- # New findings should remain
373
- assert "NEW-001" in result_ids
374
-
375
-
376
- # ============================================================
377
- # Test: _filter_by_decision_log() - Rejected With Escalation
378
- # ============================================================
379
-
380
- def test_filter_rejected_with_escalation(sample_findings: list[dict[str, object]], prior_decisions: list[dict[str, object]]) -> None:
381
- """
382
- Test that findings matching Rejected decisions are kept when in escalation_groups.
383
-
384
- Given: findings containing CLD-007 which is an escalation of GMN-004
385
- and escalation_groups contains ["GMN-004", "CLD-007"]
386
- When: _filter_by_decision_log() is called
387
- Then: CLD-007 is NOT filtered (it's an escalation)
388
- """
389
- # GMN-004 (Rejected P2) -> CLD-007 (escalated to P0, ≥2 level jump)
390
- escalation_groups = [["GMN-004", "CLD-007"]]
391
-
392
- result = _filter_by_decision_log(sample_findings, prior_decisions, escalation_groups)
393
-
394
- # CLD-007 should NOT be filtered (it's an escalation)
395
- result_ids = [f["id"] for f in result]
396
- assert "CLD-007" in result_ids
397
-
398
- # GMN-004 itself (P2) should still be filtered
399
- assert "GMN-004" not in result_ids
400
-
401
-
402
- # ============================================================
403
- # Test: _filter_by_decision_log() - Cross-Reviewer Match
404
- # ============================================================
405
-
406
- def test_filter_cross_reviewer_match() -> None:
407
- """
408
- Test that findings with different reviewer IDs but same essence are filtered.
409
-
410
- Given: findings containing GMN-005 (different ID from CDX-001)
411
- but prior decisions contain CDX-001 as Fixed
412
- and escalation_groups links them: ["CDX-001", "GMN-005"]
413
- When: _filter_by_decision_log() is called
414
- Then: GMN-005 is filtered (matched via escalation group to Fixed decision)
415
- """
416
- findings = [
417
- {
418
- "id": "GMN-005",
419
- "priority": "P1",
420
- "category": "bug",
421
- "file": "api.ts",
422
- "line": "42",
423
- "title": "JSON parse error",
424
- "description": "JSON.parse 未捕获异常 (same essence as CDX-001)",
425
- "suggestion": "Add try-catch"
426
- },
427
- {
428
- "id": "NEW-002",
429
- "priority": "P2",
430
- "category": "quality",
431
- "file": "utils.ts",
432
- "line": "10",
433
- "title": "Different issue",
434
- "description": "Completely different",
435
- "suggestion": "Fix differently"
436
- }
437
- ]
438
-
439
- prior_decisions = [
440
- {
441
- "id": "CDX-001",
442
- "status": "fixed",
443
- "commit": "abc123",
444
- "essence": "JSON.parse 未捕获异常"
445
- }
446
- ]
447
-
448
- # Escalation group indicates GMN-005 is related to CDX-001
449
- escalation_groups = [["CDX-001", "GMN-005"]]
450
-
451
- result = _filter_by_decision_log(findings, prior_decisions, escalation_groups)
452
-
453
- # GMN-005 should be filtered (linked to Fixed CDX-001 via escalation group)
454
- result_ids = [f["id"] for f in result]
455
- assert "GMN-005" not in result_ids
456
-
457
- # NEW-002 should remain
458
- assert "NEW-002" in result_ids
459
-
460
-
461
- # ============================================================
462
- # Test: _parse_escalation_groups_json()
463
- # ============================================================
464
-
465
- def test_parse_escalation_groups_json_valid() -> None:
466
- """Test parsing valid escalation groups JSON."""
467
- json_str = '{"escalationGroups": [["GMN-004", "CLD-007"], ["CDX-001", "GMN-005"]]}'
468
- result = _parse_escalation_groups_json(json_str)
469
-
470
- assert len(result) == 2
471
- assert ["GMN-004", "CLD-007"] in result
472
- assert ["CDX-001", "GMN-005"] in result
473
-
474
-
475
- def test_parse_escalation_groups_json_empty() -> None:
476
- """Test parsing empty escalation groups JSON."""
477
- result = _parse_escalation_groups_json("")
478
- assert result == []
479
-
480
-
481
- def test_parse_escalation_groups_json_malformed() -> None:
482
- """Test parsing malformed JSON returns empty list."""
483
- result = _parse_escalation_groups_json("not valid json {{{")
484
- assert result == []
485
-
486
-
487
- # ============================================================
488
- # Test: _parse_escalation_groups_b64()
489
- # ============================================================
490
-
491
- def test_parse_escalation_groups_b64_valid() -> None:
492
- """Test parsing valid base64-encoded escalation groups."""
493
- import base64
494
- json_str = '{"escalationGroups": [["GMN-004", "CLD-007"]]}'
495
- b64_str = base64.b64encode(json_str.encode("utf-8")).decode("ascii")
496
-
497
- result = _parse_escalation_groups_b64(b64_str)
498
-
499
- assert len(result) == 1
500
- assert ["GMN-004", "CLD-007"] in result
501
-
502
-
503
- def test_parse_escalation_groups_b64_empty() -> None:
504
- """Test parsing empty base64 string."""
505
- result = _parse_escalation_groups_b64("")
506
- assert result == []
507
-
508
-
509
- def test_parse_escalation_groups_b64_invalid() -> None:
510
- """Test parsing invalid base64 returns empty list."""
511
- result = _parse_escalation_groups_b64("not-valid-base64!!!")
512
- assert result == []
513
-
514
-
515
- # ============================================================
516
- # Test: Integration - Full Workflow
517
- # ============================================================
518
-
519
- def test_integration_full_filter_workflow() -> None:
520
- """
521
- Integration test: parse decision log and filter findings.
522
-
523
- Simulates real workflow:
524
- 1. Parse decision log markdown
525
- 2. Parse escalation groups
526
- 3. Filter findings based on decisions and escalations
527
- """
528
- decision_log_md = """# Decision Log
529
-
530
- PR: 456
531
-
532
- ## Round 1
533
-
534
- ### Fixed
535
- - id: CDX-010
536
- commit: sha1
537
- essence: 类型错误修复
538
-
539
- ### Rejected
540
- - id: GMN-020
541
- priority: P3
542
- reason: 低优先级优化
543
- essence: 性能优化建议
544
- """
545
-
546
- findings = [
547
- {"id": "CDX-010", "priority": "P1", "category": "bug", "file": "a.ts", "line": "1", "title": "Type error", "description": "类型错误修复", "suggestion": "Fix"},
548
- {"id": "GMN-020", "priority": "P3", "category": "perf", "file": "b.ts", "line": "2", "title": "Perf opt", "description": "性能优化建议", "suggestion": "Optimize"},
549
- {"id": "CLD-030", "priority": "P1", "category": "perf", "file": "b.ts", "line": "2", "title": "Perf opt escalated", "description": "性能优化建议 - 升级", "suggestion": "Optimize now"},
550
- {"id": "NEW-100", "priority": "P2", "category": "quality", "file": "c.ts", "line": "3", "title": "New", "description": "新问题", "suggestion": "Fix new"},
551
- ]
552
-
553
- # Parse decision log
554
- prior_decisions = _parse_decision_log(decision_log_md)
555
- assert len(prior_decisions) == 2
556
-
557
- # Escalation: GMN-020 (P3) -> CLD-030 (P1, ≥2 level jump)
558
- escalation_groups = [["GMN-020", "CLD-030"]]
559
-
560
- # Filter findings
561
- result = _filter_by_decision_log(findings, prior_decisions, escalation_groups)
562
- result_ids = [f["id"] for f in result]
563
-
564
- # CDX-010 should be filtered (Fixed)
565
- assert "CDX-010" not in result_ids
566
-
567
- # GMN-020 should be filtered (Rejected, not escalated)
568
- assert "GMN-020" not in result_ids
569
-
570
- # CLD-030 should remain (escalation of Rejected)
571
- assert "CLD-030" in result_ids
572
-
573
- # NEW-100 should remain (new issue)
574
- assert "NEW-100" in result_ids
575
-
576
- # Final count: 2 findings remain
577
- assert len(result) == 2
578
-
579
-
580
- # ============================================================
581
- # Test: _check_existing_comment() - PR comment idempotency
582
- # ============================================================
583
-
584
-
585
- def _patch_subprocess_run_for_gh_comments(monkeypatch: pytest.MonkeyPatch, comments: list[dict[str, object]], returncode: int = 0) -> None:
586
- stdout = json.dumps(comments, ensure_ascii=True)
587
-
588
- def _fake_run(*_args: object, **_kwargs: object) -> SimpleNamespace:
589
- return SimpleNamespace(returncode=returncode, stdout=stdout)
590
-
591
- monkeypatch.setattr(subprocess, "run", _fake_run)
592
-
593
-
594
- @pytest.mark.parametrize(
595
- "comment_type,round_num,expected_header",
596
- [
597
- ("review-summary", 2, "## Review Summary (Round 2)"),
598
- ("fix-report", 2, "## Fix Report (Round 2)"),
599
- ("final-report", 2, "## Final Report"),
600
- ],
601
- )
602
- def test_check_existing_comment_true_when_marker_header_and_runid_match(
603
- monkeypatch: pytest.MonkeyPatch, comment_type: str, round_num: int, expected_header: str
604
- ) -> None:
605
- pr_number = 123
606
- run_id = "run-abc"
607
- body = "\n".join([_MARKER, "", expected_header, "", f"RunId: {run_id}"])
608
- _patch_subprocess_run_for_gh_comments(monkeypatch, [{"body": body}])
609
-
610
- assert _check_existing_comment(pr_number, run_id, round_num, comment_type) is True
611
-
612
-
613
- @pytest.mark.parametrize(
614
- "comment_type,round_num,expected_header",
615
- [
616
- ("review-summary", 3, "## Review Summary (Round 3)"),
617
- ("fix-report", 3, "## Fix Report (Round 3)"),
618
- ("final-report", 3, "## Final Report"),
619
- ],
620
- )
621
- def test_check_existing_comment_false_when_marker_missing(
622
- monkeypatch: pytest.MonkeyPatch, comment_type: str, round_num: int, expected_header: str
623
- ) -> None:
624
- pr_number = 456
625
- run_id = "run-xyz"
626
- body = "\n".join(["", expected_header, "", f"RunId: {run_id}"])
627
- _patch_subprocess_run_for_gh_comments(monkeypatch, [{"body": body}])
628
-
629
- assert _check_existing_comment(pr_number, run_id, round_num, comment_type) is False
630
-
631
-
632
- @pytest.mark.parametrize(
633
- "comment_type,round_num,expected_header,wrong_header",
634
- [
635
- (
636
- "review-summary",
637
- 2,
638
- "## Review Summary (Round 2)",
639
- "## Fix Report (Round 2)",
640
- ),
641
- (
642
- "fix-report",
643
- 2,
644
- "## Fix Report (Round 2)",
645
- "## Review Summary (Round 2)",
646
- ),
647
- (
648
- "final-report",
649
- 2,
650
- "## Final Report",
651
- "## Review Summary (Round 2)",
652
- ),
653
- ],
654
- )
655
- def test_check_existing_comment_false_when_header_mismatched(
656
- monkeypatch: pytest.MonkeyPatch, comment_type: str, round_num: int, expected_header: str, wrong_header: str
657
- ) -> None:
658
- pr_number = 789
659
- run_id = "run-123"
660
-
661
- body = "\n".join([_MARKER, "", wrong_header, "", f"RunId: {run_id}"])
662
- _patch_subprocess_run_for_gh_comments(monkeypatch, [{"body": body}])
663
-
664
- assert expected_header not in body
665
- assert _check_existing_comment(pr_number, run_id, round_num, comment_type) is False
666
-
667
-
668
- @pytest.mark.parametrize(
669
- "comment_type,round_num,expected_header",
670
- [
671
- ("review-summary", 1, "## Review Summary (Round 1)"),
672
- ("fix-report", 1, "## Fix Report (Round 1)"),
673
- ("final-report", 1, "## Final Report"),
674
- ],
675
- )
676
- def test_check_existing_comment_false_when_runid_mismatched(
677
- monkeypatch: pytest.MonkeyPatch, comment_type: str, round_num: int, expected_header: str
678
- ) -> None:
679
- pr_number = 101
680
- run_id = "run-a"
681
- other_run_id = "run-b"
682
-
683
- body = "\n".join([_MARKER, "", expected_header, "", f"RunId: {other_run_id}"])
684
- _patch_subprocess_run_for_gh_comments(monkeypatch, [{"body": body}])
685
-
686
- assert _check_existing_comment(pr_number, run_id, round_num, comment_type) is False
687
-
688
-
689
- def test_check_existing_comment_false_when_subprocess_run_nonzero(monkeypatch: pytest.MonkeyPatch) -> None:
690
- pr_number = 999
691
- run_id = "run-nonzero"
692
- round_num = 2
693
- comment_type = "review-summary"
694
- body = "\n".join([_MARKER, "", "## Review Summary (Round 2)", "", f"RunId: {run_id}"])
695
-
696
- _patch_subprocess_run_for_gh_comments(monkeypatch, [{"body": body}], returncode=1)
697
- assert _check_existing_comment(pr_number, run_id, round_num, comment_type) is False
698
-
699
-
700
- if __name__ == "__main__":
701
- _ = pytest.main([__file__, "-v"])