jleechanorg-pr-automation 0.2.41__tar.gz → 0.2.48__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/PKG-INFO +1 -1
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/jleechanorg_pr_monitor.py +99 -14
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/openai_automation/codex_github_mentions.py +145 -0
- jleechanorg_pr_automation-0.2.48/jleechanorg_pr_automation/openai_automation/test_auth_restoration.py +244 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/orchestrated_pr_runner.py +37 -8
- jleechanorg_pr_automation-0.2.48/jleechanorg_pr_automation/tests/test_cleanup_pending_reviews.py +320 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_model_parameter.py +4 -11
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_pr_targeting.py +1 -1
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/utils.py +1 -1
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation.egg-info/PKG-INFO +1 -1
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation.egg-info/SOURCES.txt +2 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/pyproject.toml +1 -1
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/README.md +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/STORAGE_STATE_TESTING_PROTOCOL.md +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/__init__.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/automation_safety_manager.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/automation_safety_wrapper.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/automation_utils.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/check_codex_comment.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/codex_branch_updater.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/codex_config.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/logging_utils.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/openai_automation/__init__.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/openai_automation/debug_page_content.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/openai_automation/oracle_cli.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/openai_automation/test_codex_comprehensive.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/openai_automation/test_codex_integration.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/__init__.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/conftest.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_actionable_counting_matrix.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_attempt_limit_logic.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_automation_marker_functions.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_automation_over_running_reproduction.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_automation_safety_limits.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_automation_safety_manager_comprehensive.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_codex_actor_matching.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_fixpr_prompt.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_fixpr_return_value.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_graphql_error_handling.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_orchestrated_pr_runner.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_packaging_integration.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_pr_filtering_matrix.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_pr_monitor_eligibility.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_version_consistency.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_workflow_specific_limits.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation/tests/test_workspace_dispatch_missing_dir.py +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation.egg-info/dependency_links.txt +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation.egg-info/entry_points.txt +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation.egg-info/requires.txt +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/jleechanorg_pr_automation.egg-info/top_level.txt +0 -0
- {jleechanorg_pr_automation-0.2.41 → jleechanorg_pr_automation-0.2.48}/setup.cfg +0 -0
|
@@ -778,6 +778,78 @@ class JleechanorgPRMonitor:
|
|
|
778
778
|
|
|
779
779
|
return None
|
|
780
780
|
|
|
781
|
+
def _cleanup_pending_reviews(self, repo_full: str, pr_number: int) -> None:
|
|
782
|
+
"""Delete any pending reviews for the current automation user to prevent review clutter.
|
|
783
|
+
|
|
784
|
+
This is a safety measure to clean up pending reviews that may have been left behind
|
|
785
|
+
by agents that use MCP tools (create_pending_pull_request_review) without submitting.
|
|
786
|
+
"""
|
|
787
|
+
try:
|
|
788
|
+
# Extract owner and repo from repo_full
|
|
789
|
+
parts = repo_full.split("/")
|
|
790
|
+
if len(parts) != 2:
|
|
791
|
+
self.logger.warning(f"Cannot parse repo_full '{repo_full}' for pending review cleanup")
|
|
792
|
+
return
|
|
793
|
+
|
|
794
|
+
owner, repo = parts
|
|
795
|
+
|
|
796
|
+
# Fetch all reviews for the PR
|
|
797
|
+
reviews_cmd = [
|
|
798
|
+
"gh", "api",
|
|
799
|
+
f"repos/{owner}/{repo}/pulls/{pr_number}/reviews",
|
|
800
|
+
"--paginate", "-q", ".[]",
|
|
801
|
+
]
|
|
802
|
+
result = AutomationUtils.execute_subprocess_with_timeout(
|
|
803
|
+
reviews_cmd, timeout=30, check=False
|
|
804
|
+
)
|
|
805
|
+
|
|
806
|
+
if result.returncode != 0:
|
|
807
|
+
self.logger.debug(f"Could not fetch reviews for pending cleanup: {result.stderr}")
|
|
808
|
+
return
|
|
809
|
+
|
|
810
|
+
# Parse reviews and find pending ones from automation user
|
|
811
|
+
pending_deleted = 0
|
|
812
|
+
for line in (result.stdout or "").splitlines():
|
|
813
|
+
if not line.strip():
|
|
814
|
+
continue
|
|
815
|
+
try:
|
|
816
|
+
review = json.loads(line)
|
|
817
|
+
if review.get("state") == "PENDING":
|
|
818
|
+
user_info = review.get("user")
|
|
819
|
+
if isinstance(user_info, dict):
|
|
820
|
+
review_user = user_info.get("login", "")
|
|
821
|
+
else:
|
|
822
|
+
review_user = ""
|
|
823
|
+
if review_user == self.automation_username:
|
|
824
|
+
review_id = review.get("id")
|
|
825
|
+
if review_id:
|
|
826
|
+
delete_cmd = [
|
|
827
|
+
"gh", "api",
|
|
828
|
+
f"repos/{owner}/{repo}/pulls/{pr_number}/reviews/{review_id}",
|
|
829
|
+
"-X", "DELETE",
|
|
830
|
+
]
|
|
831
|
+
delete_result = AutomationUtils.execute_subprocess_with_timeout(
|
|
832
|
+
delete_cmd, timeout=30, check=False
|
|
833
|
+
)
|
|
834
|
+
if delete_result.returncode == 0:
|
|
835
|
+
self.logger.info(
|
|
836
|
+
f"🧹 Deleted pending review #{review_id} from {review_user} on PR #{pr_number}"
|
|
837
|
+
)
|
|
838
|
+
pending_deleted += 1
|
|
839
|
+
else:
|
|
840
|
+
self.logger.debug(
|
|
841
|
+
f"Could not delete pending review #{review_id}: {delete_result.stderr}"
|
|
842
|
+
)
|
|
843
|
+
except json.JSONDecodeError:
|
|
844
|
+
continue
|
|
845
|
+
|
|
846
|
+
if pending_deleted > 0:
|
|
847
|
+
self.logger.info(f"✅ Cleaned up {pending_deleted} pending review(s) on PR #{pr_number}")
|
|
848
|
+
|
|
849
|
+
except Exception as exc:
|
|
850
|
+
self.logger.debug(f"Pending review cleanup failed for PR #{pr_number}: {exc}")
|
|
851
|
+
# Non-fatal - continue with the workflow
|
|
852
|
+
|
|
781
853
|
def post_codex_instruction_simple(self, repository: str, pr_number: int, pr_data: Dict) -> str:
|
|
782
854
|
"""Post codex instruction comment to PR"""
|
|
783
855
|
repo_full = self._normalize_repository_name(repository)
|
|
@@ -1003,6 +1075,16 @@ Use your judgment to fix comments from everyone or explain why it should not be
|
|
|
1003
1075
|
"Goal: address all review comments with explicit action-based replies, "
|
|
1004
1076
|
"fix failing tests, and resolve merge conflicts.\n\n"
|
|
1005
1077
|
f"CLI chain: {agent_cli}. Start immediately.\n\n"
|
|
1078
|
+
"⚠️ CRITICAL - REVIEW COMMENT REPLY API (MUST READ):\n"
|
|
1079
|
+
" To reply to inline review comments WITHOUT starting a pending review, use ONLY:\n"
|
|
1080
|
+
f" `gh api /repos/{repository}/pulls/{pr_number}/comments -f body='...' -F in_reply_to={{comment_id}}`\n"
|
|
1081
|
+
" This `/comments` endpoint with `in_reply_to` parameter creates a threaded reply WITHOUT starting a review.\n"
|
|
1082
|
+
" ⚠️ IMPORTANT: Use `-f` for body (string) and `-F` for in_reply_to (numeric comment ID).\n\n"
|
|
1083
|
+
" ❌ NEVER USE these (they create pending reviews that clutter the PR):\n"
|
|
1084
|
+
" - `create_pending_pull_request_review` MCP tool\n"
|
|
1085
|
+
" - `add_comment_to_pending_review` MCP tool\n"
|
|
1086
|
+
" - `POST /repos/.../pulls/.../reviews` endpoint\n"
|
|
1087
|
+
" - Any GitHub review workflow that requires 'submit'\n\n"
|
|
1006
1088
|
"Steps:\n"
|
|
1007
1089
|
f"1) gh pr checkout {pr_number}\n\n"
|
|
1008
1090
|
"2) Fetch ALL PR feedback sources (pagination-safe) using correct GitHub API endpoints:\n"
|
|
@@ -1019,9 +1101,9 @@ Use your judgment to fix comments from everyone or explain why it should not be
|
|
|
1019
1101
|
" - **DEFERRED**: Created issue for future work → include issue URL and reason\n"
|
|
1020
1102
|
" - **ACKNOWLEDGED**: Noted but not actionable → include explanation\n"
|
|
1021
1103
|
" - **NOT DONE**: Cannot implement → include specific technical reason\n\n"
|
|
1022
|
-
" **Reply Methods
|
|
1023
|
-
f" - Inline review comments: `gh api /repos/{repository}/pulls/{pr_number}/comments
|
|
1024
|
-
f" - Issue/PR comments: `gh pr comment {pr_number} --body '[Response
|
|
1104
|
+
" **Reply Methods (ONLY use these - no pending reviews!):**\n"
|
|
1105
|
+
f" - Inline review comments: `gh api /repos/{repository}/pulls/{pr_number}/comments -f body='[Response]' -F in_reply_to={{comment_id}}`\n"
|
|
1106
|
+
f" - Issue/PR comments: `gh pr comment {pr_number} --body '[Response]'`\n"
|
|
1025
1107
|
" - Do NOT post mega-comments consolidating multiple responses; reply individually to each comment.\n\n"
|
|
1026
1108
|
"4) Run tests and fix failures (block completion on critical/blocking test failures)\n\n"
|
|
1027
1109
|
"5) Resolve merge conflicts (prefer merge over rebase)\n\n"
|
|
@@ -1451,13 +1533,6 @@ Use your judgment to fix comments from everyone or explain why it should not be
|
|
|
1451
1533
|
if not branch:
|
|
1452
1534
|
branch = f"pr-{pr_number}"
|
|
1453
1535
|
|
|
1454
|
-
if model and "claude" not in agent_cli.lower():
|
|
1455
|
-
self.logger.warning(
|
|
1456
|
-
"⚠️ Model '%s' specified but agent CLI is '%s'. Model parameter is only supported for Claude.",
|
|
1457
|
-
model,
|
|
1458
|
-
agent_cli,
|
|
1459
|
-
)
|
|
1460
|
-
|
|
1461
1536
|
head_sha = pr_data.get("headRefOid")
|
|
1462
1537
|
task_description = self._build_fix_comment_prompt_body(
|
|
1463
1538
|
repo_full,
|
|
@@ -1541,6 +1616,9 @@ Use your judgment to fix comments from everyone or explain why it should not be
|
|
|
1541
1616
|
self._record_processed_pr(repo_name, branch_name, pr_number, head_sha)
|
|
1542
1617
|
return "skipped"
|
|
1543
1618
|
|
|
1619
|
+
# Cleanup any pending reviews left behind by previous automation runs
|
|
1620
|
+
self._cleanup_pending_reviews(repo_full, pr_number)
|
|
1621
|
+
|
|
1544
1622
|
if not self.dispatch_fix_comment_agent(repo_full, pr_number, pr_data, agent_cli=agent_cli, model=model):
|
|
1545
1623
|
return "failed"
|
|
1546
1624
|
|
|
@@ -1609,6 +1687,9 @@ Use your judgment to fix comments from everyone or explain why it should not be
|
|
|
1609
1687
|
)
|
|
1610
1688
|
return "skipped"
|
|
1611
1689
|
|
|
1690
|
+
# Cleanup any pending reviews left behind by previous automation runs
|
|
1691
|
+
self._cleanup_pending_reviews(repo_full, pr_number)
|
|
1692
|
+
|
|
1612
1693
|
# Dispatch agent for fixpr
|
|
1613
1694
|
try:
|
|
1614
1695
|
base_dir = ensure_base_clone(repo_full)
|
|
@@ -2146,8 +2227,10 @@ Use your judgment to fix comments from everyone or explain why it should not be
|
|
|
2146
2227
|
repo_full = self._normalize_repository_name(repository)
|
|
2147
2228
|
self.logger.info(f"🎯 Processing target PR: {repo_full} #{pr_number}")
|
|
2148
2229
|
|
|
2149
|
-
# Check global automation limits
|
|
2150
|
-
|
|
2230
|
+
# Check global automation limits (fixpr uses per-PR limits only)
|
|
2231
|
+
# Note: fixpr workflow bypasses global limit check - it uses per-PR fixpr_limit instead
|
|
2232
|
+
# This allows fixpr to process PRs independently based on per-PR comment counts
|
|
2233
|
+
if not fixpr and not self.safety_manager.can_start_global_run():
|
|
2151
2234
|
self.logger.warning("🚫 Global automation limit reached - cannot process target PR")
|
|
2152
2235
|
return False
|
|
2153
2236
|
|
|
@@ -2268,7 +2351,9 @@ Use your judgment to fix comments from everyone or explain why it should not be
|
|
|
2268
2351
|
mode_label = "fix-comment" if fix_comment else ("fixpr" if fixpr else "comment")
|
|
2269
2352
|
self.logger.info("🚀 Starting jleechanorg PR monitoring cycle (%s mode)", mode_label)
|
|
2270
2353
|
|
|
2271
|
-
|
|
2354
|
+
# FixPR workflow uses per-PR limits only, not global limit
|
|
2355
|
+
# Other workflows (fix-comment, pr_automation) still check global limit
|
|
2356
|
+
if not fixpr and not self.safety_manager.can_start_global_run():
|
|
2272
2357
|
current_runs = self.safety_manager.get_global_runs()
|
|
2273
2358
|
self.logger.warning(
|
|
2274
2359
|
"🚫 Global automation limit reached %s/%s",
|
|
@@ -2712,7 +2797,7 @@ def main():
|
|
|
2712
2797
|
"--model",
|
|
2713
2798
|
type=str,
|
|
2714
2799
|
default=None,
|
|
2715
|
-
help="Model to use for
|
|
2800
|
+
help="Model to use for agent CLI. Examples: sonnet/opus/haiku (Claude), gemini-3-pro-preview/gemini-3-auto (Gemini), composer-1 (Cursor). If not specified, CLI-specific defaults are used.",
|
|
2716
2801
|
)
|
|
2717
2802
|
parser.add_argument("--list-eligible", action="store_true",
|
|
2718
2803
|
help="Dry-run listing of PRs eligible for fixpr (conflicts/failing checks)")
|
|
@@ -21,6 +21,7 @@ Usage:
|
|
|
21
21
|
|
|
22
22
|
import argparse
|
|
23
23
|
import asyncio
|
|
24
|
+
import json
|
|
24
25
|
import logging
|
|
25
26
|
import sys
|
|
26
27
|
import time
|
|
@@ -28,6 +29,7 @@ import traceback
|
|
|
28
29
|
from datetime import datetime
|
|
29
30
|
from pathlib import Path
|
|
30
31
|
from typing import Dict, List, Optional, Set
|
|
32
|
+
from urllib.parse import urlparse
|
|
31
33
|
|
|
32
34
|
from playwright.async_api import (
|
|
33
35
|
Browser,
|
|
@@ -283,6 +285,149 @@ class CodexGitHubMentionsAutomation:
|
|
|
283
285
|
|
|
284
286
|
return True
|
|
285
287
|
except PlaywrightTimeoutError:
|
|
288
|
+
# If not logged in, try to restore from auth state file first (even in CDP mode)
|
|
289
|
+
if AUTH_STATE_PATH.exists():
|
|
290
|
+
print(f"🔄 Not logged in. Attempting to restore auth state from {AUTH_STATE_PATH}...")
|
|
291
|
+
try:
|
|
292
|
+
_ensure_auth_state_permissions(AUTH_STATE_PATH)
|
|
293
|
+
state_content = AUTH_STATE_PATH.read_text()
|
|
294
|
+
state_data = json.loads(state_content)
|
|
295
|
+
|
|
296
|
+
cookies = state_data.get("cookies")
|
|
297
|
+
if isinstance(cookies, list):
|
|
298
|
+
valid_cookies = []
|
|
299
|
+
# Required fields for Playwright add_cookies
|
|
300
|
+
# Must have name, value AND (url OR (domain AND path))
|
|
301
|
+
required_fields = {"name", "value"}
|
|
302
|
+
domain_fields = {"domain", "path"}
|
|
303
|
+
|
|
304
|
+
for cookie in cookies:
|
|
305
|
+
if not isinstance(cookie, dict):
|
|
306
|
+
logger.warning("Skipping non-dict cookie entry")
|
|
307
|
+
continue
|
|
308
|
+
|
|
309
|
+
# Check basic fields
|
|
310
|
+
if not required_fields.issubset(cookie.keys()):
|
|
311
|
+
cookie_name = cookie.get("name", "<unknown>")
|
|
312
|
+
logger.warning(
|
|
313
|
+
"Skipping malformed cookie '%s' missing required fields %s",
|
|
314
|
+
cookie_name,
|
|
315
|
+
required_fields,
|
|
316
|
+
)
|
|
317
|
+
continue
|
|
318
|
+
|
|
319
|
+
# Check domain/path vs url constraint
|
|
320
|
+
has_url = "url" in cookie
|
|
321
|
+
has_domain_and_path = domain_fields.issubset(cookie.keys())
|
|
322
|
+
|
|
323
|
+
if not (has_url or has_domain_and_path):
|
|
324
|
+
cookie_name = cookie.get("name", "<unknown>")
|
|
325
|
+
logger.warning(
|
|
326
|
+
"Skipping cookie '%s' missing either 'url' or both 'domain' and 'path'",
|
|
327
|
+
cookie_name,
|
|
328
|
+
)
|
|
329
|
+
continue
|
|
330
|
+
|
|
331
|
+
valid_cookies.append(cookie)
|
|
332
|
+
|
|
333
|
+
if valid_cookies:
|
|
334
|
+
await self.context.add_cookies(valid_cookies)
|
|
335
|
+
print("✅ Injected cookies from auth state file")
|
|
336
|
+
logger.info(
|
|
337
|
+
"Injected %d cookies from auth state file %s",
|
|
338
|
+
len(valid_cookies),
|
|
339
|
+
AUTH_STATE_PATH,
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
# Restore localStorage from origins
|
|
343
|
+
origins = state_data.get("origins", [])
|
|
344
|
+
if origins:
|
|
345
|
+
try:
|
|
346
|
+
current_url = self.page.url
|
|
347
|
+
current_parsed = urlparse(current_url)
|
|
348
|
+
injected_origins = 0
|
|
349
|
+
|
|
350
|
+
for origin_data in origins:
|
|
351
|
+
origin = origin_data.get("origin")
|
|
352
|
+
if not origin:
|
|
353
|
+
continue
|
|
354
|
+
|
|
355
|
+
# Use exact origin matching (scheme + netloc)
|
|
356
|
+
origin_parsed = urlparse(origin)
|
|
357
|
+
origin_matches = (
|
|
358
|
+
current_parsed.scheme == origin_parsed.scheme
|
|
359
|
+
and current_parsed.netloc == origin_parsed.netloc
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
if origin_matches:
|
|
363
|
+
logger.info(f"Restoring localStorage for origin {origin}")
|
|
364
|
+
storage_items = origin_data.get("localStorage", [])
|
|
365
|
+
items_injected = 0
|
|
366
|
+
if storage_items:
|
|
367
|
+
for item in storage_items:
|
|
368
|
+
key = item.get("name")
|
|
369
|
+
value = item.get("value")
|
|
370
|
+
# Allow empty strings as valid values (use None check)
|
|
371
|
+
if key is not None and value is not None:
|
|
372
|
+
await self.page.evaluate(
|
|
373
|
+
f"window.localStorage.setItem({json.dumps(key)}, {json.dumps(value)})"
|
|
374
|
+
)
|
|
375
|
+
items_injected += 1
|
|
376
|
+
if items_injected > 0:
|
|
377
|
+
injected_origins += 1
|
|
378
|
+
|
|
379
|
+
if injected_origins > 0:
|
|
380
|
+
print(f"✅ Injected localStorage for {injected_origins} origin(s)")
|
|
381
|
+
logger.info(f"Injected localStorage for {injected_origins} origin(s)")
|
|
382
|
+
except Exception as storage_err:
|
|
383
|
+
logger.warning(f"Failed to restore localStorage: {storage_err}")
|
|
384
|
+
print(f"⚠️ Failed to restore localStorage: {storage_err}")
|
|
385
|
+
|
|
386
|
+
# Refresh page to apply cookies and storage
|
|
387
|
+
await self.page.reload(wait_until="domcontentloaded")
|
|
388
|
+
await asyncio.sleep(3)
|
|
389
|
+
|
|
390
|
+
# Check login again
|
|
391
|
+
try:
|
|
392
|
+
await self.page.wait_for_selector(
|
|
393
|
+
'button[aria-label*="User"], [data-testid="profile-button"]',
|
|
394
|
+
timeout=5000,
|
|
395
|
+
)
|
|
396
|
+
print("✅ Successfully restored session from auth state")
|
|
397
|
+
return True
|
|
398
|
+
except PlaywrightTimeoutError:
|
|
399
|
+
print("⚠️ Session restore failed - cookies might be expired")
|
|
400
|
+
else:
|
|
401
|
+
print("⚠️ No valid cookies found in auth state file")
|
|
402
|
+
logger.warning(
|
|
403
|
+
"No valid cookies found in auth state file %s; skipping cookie injection",
|
|
404
|
+
AUTH_STATE_PATH,
|
|
405
|
+
)
|
|
406
|
+
else:
|
|
407
|
+
if "cookies" not in state_data:
|
|
408
|
+
print("⚠️ No 'cookies' key found in auth state file")
|
|
409
|
+
logger.warning(
|
|
410
|
+
"Auth state file %s has no 'cookies' key",
|
|
411
|
+
AUTH_STATE_PATH,
|
|
412
|
+
)
|
|
413
|
+
elif cookies is None:
|
|
414
|
+
print("⚠️ Cookies are null in auth state file")
|
|
415
|
+
logger.warning(
|
|
416
|
+
"Auth state file %s has null 'cookies' value",
|
|
417
|
+
AUTH_STATE_PATH,
|
|
418
|
+
)
|
|
419
|
+
else:
|
|
420
|
+
print("⚠️ Invalid cookies format in auth state file (expected list)")
|
|
421
|
+
logger.warning(
|
|
422
|
+
"Invalid cookies format in auth state file %s: expected list, got %s",
|
|
423
|
+
AUTH_STATE_PATH,
|
|
424
|
+
type(cookies).__name__,
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
except Exception as restore_err:
|
|
428
|
+
logger.exception("Failed to restore auth state from %s", AUTH_STATE_PATH)
|
|
429
|
+
print(f"⚠️ Failed to restore auth state: {restore_err!r}")
|
|
430
|
+
|
|
286
431
|
try:
|
|
287
432
|
await self.page.wait_for_selector(
|
|
288
433
|
'text="Log in", button:has-text("Log in")',
|
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Unit tests for authentication state restoration in CodexGitHubMentionsAutomation.
|
|
4
|
+
Focuses on cookie validation, localStorage restoration, and error handling.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
from unittest.mock import AsyncMock, Mock, patch
|
|
9
|
+
import pytest
|
|
10
|
+
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
|
|
11
|
+
from jleechanorg_pr_automation.openai_automation.codex_github_mentions import (
|
|
12
|
+
CodexGitHubMentionsAutomation,
|
|
13
|
+
AUTH_STATE_PATH,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
@pytest.fixture
|
|
17
|
+
def automation():
|
|
18
|
+
"""Create automation instance with mocked browser/context/page."""
|
|
19
|
+
auto = CodexGitHubMentionsAutomation()
|
|
20
|
+
auto.context = AsyncMock()
|
|
21
|
+
|
|
22
|
+
# Setup page mock
|
|
23
|
+
page_mock = AsyncMock()
|
|
24
|
+
# is_closed is synchronous in Playwright
|
|
25
|
+
page_mock.is_closed = Mock(return_value=False)
|
|
26
|
+
page_mock.url = "https://chatgpt.com/c/123"
|
|
27
|
+
|
|
28
|
+
auto.page = page_mock
|
|
29
|
+
return auto
|
|
30
|
+
|
|
31
|
+
@pytest.mark.asyncio
|
|
32
|
+
async def test_auth_restoration_cookies_and_localstorage(automation):
|
|
33
|
+
"""Test full restoration of cookies and localStorage from valid auth state."""
|
|
34
|
+
|
|
35
|
+
mock_state = {
|
|
36
|
+
"cookies": [
|
|
37
|
+
{
|
|
38
|
+
"name": "session_token",
|
|
39
|
+
"value": "xyz",
|
|
40
|
+
"domain": ".chatgpt.com",
|
|
41
|
+
"path": "/"
|
|
42
|
+
}
|
|
43
|
+
],
|
|
44
|
+
"origins": [
|
|
45
|
+
{
|
|
46
|
+
"origin": "https://chatgpt.com",
|
|
47
|
+
"localStorage": [
|
|
48
|
+
{"name": "theme", "value": "dark"},
|
|
49
|
+
{"name": "feature_flags", "value": "true"}
|
|
50
|
+
]
|
|
51
|
+
}
|
|
52
|
+
]
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
# Mock file existence and read
|
|
56
|
+
with patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions.AUTH_STATE_PATH") as mock_path, \
|
|
57
|
+
patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions._ensure_auth_state_permissions") as mock_perms:
|
|
58
|
+
|
|
59
|
+
mock_path.exists.return_value = True
|
|
60
|
+
mock_path.read_text.return_value = json.dumps(mock_state)
|
|
61
|
+
mock_path.__str__.return_value = "/tmp/fake_path"
|
|
62
|
+
|
|
63
|
+
# Configure wait_for_selector to fail first, then succeed
|
|
64
|
+
automation.page.wait_for_selector.side_effect = [
|
|
65
|
+
PlaywrightTimeoutError("Not logged in"),
|
|
66
|
+
True
|
|
67
|
+
]
|
|
68
|
+
|
|
69
|
+
result = await automation.ensure_openai_login()
|
|
70
|
+
|
|
71
|
+
# Verify permissions check
|
|
72
|
+
mock_perms.assert_called_once_with(mock_path)
|
|
73
|
+
|
|
74
|
+
# Verify cookie injection
|
|
75
|
+
automation.context.add_cookies.assert_awaited_once_with(mock_state["cookies"])
|
|
76
|
+
|
|
77
|
+
# Verify localStorage injection via page.evaluate
|
|
78
|
+
assert automation.page.evaluate.call_count == 2
|
|
79
|
+
|
|
80
|
+
# Verify calls contain the correct keys/values
|
|
81
|
+
call_args = automation.page.evaluate.await_args_list
|
|
82
|
+
# Note: calls might be in any order if list iteration order varies, but list is ordered here
|
|
83
|
+
assert 'setItem("theme", "dark")' in call_args[0][0][0]
|
|
84
|
+
assert 'setItem("feature_flags", "true")' in call_args[1][0][0]
|
|
85
|
+
|
|
86
|
+
assert result is True
|
|
87
|
+
|
|
88
|
+
@pytest.mark.asyncio
|
|
89
|
+
async def test_auth_restoration_origin_mismatch(automation):
|
|
90
|
+
"""Test that localStorage is NOT injected if origin doesn't match."""
|
|
91
|
+
|
|
92
|
+
mock_state = {
|
|
93
|
+
"cookies": [{"name": "c", "value": "v", "domain": "chatgpt.com", "path": "/"}],
|
|
94
|
+
"origins": [
|
|
95
|
+
{
|
|
96
|
+
"origin": "https://other-domain.com",
|
|
97
|
+
"localStorage": [{"name": "secret", "value": "fail"}]
|
|
98
|
+
}
|
|
99
|
+
]
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
# Page URL is chatgpt.com (from fixture), so origin shouldn't match
|
|
103
|
+
with patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions.AUTH_STATE_PATH") as mock_path, \
|
|
104
|
+
patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions._ensure_auth_state_permissions"):
|
|
105
|
+
|
|
106
|
+
mock_path.exists.return_value = True
|
|
107
|
+
mock_path.read_text.return_value = json.dumps(mock_state)
|
|
108
|
+
|
|
109
|
+
automation.page.wait_for_selector.side_effect = [PlaywrightTimeoutError("Fail"), True]
|
|
110
|
+
|
|
111
|
+
await automation.ensure_openai_login()
|
|
112
|
+
|
|
113
|
+
# Should NOT call evaluate to set items
|
|
114
|
+
automation.page.evaluate.assert_not_awaited()
|
|
115
|
+
|
|
116
|
+
@pytest.mark.asyncio
|
|
117
|
+
async def test_auth_restoration_secure_origin_matching(automation):
|
|
118
|
+
"""Test that subdomain matching prevents injection into wrong subdomains."""
|
|
119
|
+
|
|
120
|
+
# State has origin https://chatgpt.com
|
|
121
|
+
mock_state = {
|
|
122
|
+
"cookies": [{"name": "c", "value": "v", "domain": "chatgpt.com", "path": "/"}],
|
|
123
|
+
"origins": [
|
|
124
|
+
{
|
|
125
|
+
"origin": "https://chatgpt.com",
|
|
126
|
+
"localStorage": [{"name": "key", "value": "val"}]
|
|
127
|
+
}
|
|
128
|
+
]
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
# 1. Malicious subdomain matching test
|
|
132
|
+
automation.page.url = "https://chatgpt.com.evil.com/login"
|
|
133
|
+
|
|
134
|
+
with patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions.AUTH_STATE_PATH") as mock_path, \
|
|
135
|
+
patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions._ensure_auth_state_permissions"):
|
|
136
|
+
|
|
137
|
+
mock_path.exists.return_value = True
|
|
138
|
+
mock_path.read_text.return_value = json.dumps(mock_state)
|
|
139
|
+
|
|
140
|
+
automation.page.wait_for_selector = AsyncMock(side_effect=[
|
|
141
|
+
PlaywrightTimeoutError("Fail"),
|
|
142
|
+
True
|
|
143
|
+
])
|
|
144
|
+
await automation.ensure_openai_login()
|
|
145
|
+
automation.page.evaluate.assert_not_awaited()
|
|
146
|
+
|
|
147
|
+
# 2. Correct domain test
|
|
148
|
+
automation.page.url = "https://chatgpt.com/c/123"
|
|
149
|
+
|
|
150
|
+
with patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions.AUTH_STATE_PATH") as mock_path, \
|
|
151
|
+
patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions._ensure_auth_state_permissions"):
|
|
152
|
+
|
|
153
|
+
mock_path.exists.return_value = True
|
|
154
|
+
mock_path.read_text.return_value = json.dumps(mock_state)
|
|
155
|
+
|
|
156
|
+
# Reset mock for second run
|
|
157
|
+
automation.page.evaluate = AsyncMock()
|
|
158
|
+
automation.page.wait_for_selector = AsyncMock(side_effect=[
|
|
159
|
+
PlaywrightTimeoutError("Fail"),
|
|
160
|
+
True
|
|
161
|
+
])
|
|
162
|
+
|
|
163
|
+
await automation.ensure_openai_login()
|
|
164
|
+
automation.page.evaluate.assert_awaited()
|
|
165
|
+
|
|
166
|
+
@pytest.mark.asyncio
|
|
167
|
+
async def test_auth_restoration_cookie_validation(automation):
|
|
168
|
+
"""Test validation of cookies (missing fields, incomplete data)."""
|
|
169
|
+
|
|
170
|
+
mock_state = {
|
|
171
|
+
"cookies": [
|
|
172
|
+
{"name": "valid", "value": "1", "domain": ".com", "path": "/"}, # Valid
|
|
173
|
+
{"name": "bad1"}, # Missing value
|
|
174
|
+
{"name": "bad2", "value": "2"}, # Missing domain/path AND url
|
|
175
|
+
{"not_a_dict": True}, # Invalid type
|
|
176
|
+
]
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
with patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions.AUTH_STATE_PATH") as mock_path, \
|
|
180
|
+
patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions._ensure_auth_state_permissions"):
|
|
181
|
+
|
|
182
|
+
mock_path.exists.return_value = True
|
|
183
|
+
mock_path.read_text.return_value = json.dumps(mock_state)
|
|
184
|
+
|
|
185
|
+
automation.page.wait_for_selector.side_effect = [PlaywrightTimeoutError("Fail"), True]
|
|
186
|
+
|
|
187
|
+
await automation.ensure_openai_login()
|
|
188
|
+
|
|
189
|
+
# Should only inject the one valid cookie
|
|
190
|
+
automation.context.add_cookies.assert_awaited_once()
|
|
191
|
+
call_args = automation.context.add_cookies.call_args[0][0]
|
|
192
|
+
assert len(call_args) == 1
|
|
193
|
+
assert call_args[0]["name"] == "valid"
|
|
194
|
+
|
|
195
|
+
@pytest.mark.asyncio
|
|
196
|
+
async def test_auth_restoration_null_cookies(automation):
|
|
197
|
+
"""Test handling of 'cookies': null in JSON."""
|
|
198
|
+
|
|
199
|
+
mock_state = {"cookies": None}
|
|
200
|
+
|
|
201
|
+
with patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions.AUTH_STATE_PATH") as mock_path, \
|
|
202
|
+
patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions._ensure_auth_state_permissions"):
|
|
203
|
+
|
|
204
|
+
mock_path.exists.return_value = True
|
|
205
|
+
mock_path.read_text.return_value = json.dumps(mock_state)
|
|
206
|
+
|
|
207
|
+
automation.page.wait_for_selector.side_effect = [PlaywrightTimeoutError("Fail"), True]
|
|
208
|
+
|
|
209
|
+
await automation.ensure_openai_login()
|
|
210
|
+
|
|
211
|
+
# Should not crash, should not call add_cookies
|
|
212
|
+
automation.context.add_cookies.assert_not_awaited()
|
|
213
|
+
|
|
214
|
+
@pytest.mark.asyncio
|
|
215
|
+
async def test_auth_restoration_empty_localstorage_values(automation):
|
|
216
|
+
"""Test that empty string values in localStorage are preserved (not skipped)."""
|
|
217
|
+
|
|
218
|
+
mock_state = {
|
|
219
|
+
"cookies": [{"name": "c", "value": "v", "domain": "chatgpt.com", "path": "/"}],
|
|
220
|
+
"origins": [
|
|
221
|
+
{
|
|
222
|
+
"origin": "https://chatgpt.com",
|
|
223
|
+
"localStorage": [
|
|
224
|
+
{"name": "empty_val", "value": ""}, # Should be kept
|
|
225
|
+
{"name": "null_val", "value": None} # Should be skipped if logic checks for None
|
|
226
|
+
]
|
|
227
|
+
}
|
|
228
|
+
]
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
with patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions.AUTH_STATE_PATH") as mock_path, \
|
|
232
|
+
patch("jleechanorg_pr_automation.openai_automation.codex_github_mentions._ensure_auth_state_permissions"):
|
|
233
|
+
|
|
234
|
+
mock_path.exists.return_value = True
|
|
235
|
+
mock_path.read_text.return_value = json.dumps(mock_state)
|
|
236
|
+
|
|
237
|
+
automation.page.wait_for_selector.side_effect = [PlaywrightTimeoutError("Fail"), True]
|
|
238
|
+
await automation.ensure_openai_login()
|
|
239
|
+
|
|
240
|
+
# Should call evaluate for empty string value
|
|
241
|
+
# But NOT for None value (logic: if key is not None and value is not None)
|
|
242
|
+
assert automation.page.evaluate.call_count == 1
|
|
243
|
+
call_arg = automation.page.evaluate.call_args[0][0]
|
|
244
|
+
assert 'setItem("empty_val", "")' in call_arg
|
|
@@ -35,6 +35,26 @@ def log(msg: str) -> None:
|
|
|
35
35
|
print(f"{LOG_PREFIX} {msg}")
|
|
36
36
|
|
|
37
37
|
|
|
38
|
+
def display_log_viewing_command(session_name: str) -> None:
|
|
39
|
+
"""Display formatted log viewing commands for the given session."""
|
|
40
|
+
# Use same path resolution as task_dispatcher (relative to orchestration module)
|
|
41
|
+
try:
|
|
42
|
+
import orchestration.task_dispatcher
|
|
43
|
+
script_path = Path(orchestration.task_dispatcher.__file__).resolve().parent / "stream_logs.sh"
|
|
44
|
+
except (ImportError, AttributeError):
|
|
45
|
+
# Fallback to relative path if orchestration module not available
|
|
46
|
+
script_path = Path(__file__).resolve().parent.parent.parent / "orchestration" / "stream_logs.sh"
|
|
47
|
+
|
|
48
|
+
if script_path.exists():
|
|
49
|
+
log("")
|
|
50
|
+
log("📺 View formatted logs:")
|
|
51
|
+
log(f" {script_path} {session_name}")
|
|
52
|
+
log("")
|
|
53
|
+
log(" Or use the shorter command:")
|
|
54
|
+
log(f" ./orchestration/stream_logs.sh {session_name}")
|
|
55
|
+
log("")
|
|
56
|
+
|
|
57
|
+
|
|
38
58
|
def run_cmd(
|
|
39
59
|
cmd: List[str],
|
|
40
60
|
cwd: Optional[Path] = None,
|
|
@@ -342,18 +362,16 @@ def dispatch_agent_for_pr_with_task(
|
|
|
342
362
|
"workspace_name": workspace_name,
|
|
343
363
|
},
|
|
344
364
|
)
|
|
345
|
-
# Inject model parameter
|
|
365
|
+
# Inject model parameter for all CLIs in the chain (Gemini, Claude, Cursor, etc.)
|
|
346
366
|
if model:
|
|
347
|
-
|
|
348
|
-
if "
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
log(f"❌ Invalid model name requested: {raw_model!r}")
|
|
352
|
-
return False
|
|
353
|
-
agent_spec["model"] = raw_model
|
|
367
|
+
raw_model = str(model).strip()
|
|
368
|
+
if not re.fullmatch(r"[A-Za-z0-9_.-]+", raw_model):
|
|
369
|
+
log(f"❌ Invalid model name requested: {raw_model!r}")
|
|
370
|
+
return False
|
|
354
371
|
ok = dispatcher.create_dynamic_agent(agent_spec)
|
|
355
372
|
if ok:
|
|
356
373
|
log(f"Spawned agent for {repo_full}#{pr_number} at /tmp/{repo}/{workspace_name}")
|
|
374
|
+
display_log_viewing_command(session_name)
|
|
357
375
|
success = True
|
|
358
376
|
else:
|
|
359
377
|
log(f"Failed to spawn agent for {repo_full}#{pr_number}")
|
|
@@ -394,6 +412,16 @@ def dispatch_agent_for_pr(
|
|
|
394
412
|
f"FIXPR TASK (SELF-CONTAINED): Update PR #{pr_number} in {repo_full} (branch {branch}). "
|
|
395
413
|
"Goal: resolve merge conflicts and failing checks. Also review and address any reviewer feedback that is blocking CI or mergeability. "
|
|
396
414
|
f"CLI chain: {agent_cli}. DO NOT wait for additional input—start immediately.\n\n"
|
|
415
|
+
"⚠️ CRITICAL - REVIEW COMMENT REPLY API (MUST READ):\n"
|
|
416
|
+
" To reply to inline review comments WITHOUT starting a pending review, use ONLY:\n"
|
|
417
|
+
f" `gh api /repos/{repo_full}/pulls/{pr_number}/comments -f body='...' -F in_reply_to={{comment_id}}`\n"
|
|
418
|
+
" This `/comments` endpoint with `in_reply_to` parameter creates a threaded reply WITHOUT starting a review.\n"
|
|
419
|
+
" ⚠️ IMPORTANT: Use `-f` for body (string) and `-F` for in_reply_to (numeric comment ID).\n\n"
|
|
420
|
+
" ❌ NEVER USE these (they create pending reviews that clutter the PR):\n"
|
|
421
|
+
" - `create_pending_pull_request_review` MCP tool\n"
|
|
422
|
+
" - `add_comment_to_pending_review` MCP tool\n"
|
|
423
|
+
" - `POST /repos/.../pulls/.../reviews` endpoint\n"
|
|
424
|
+
" - Any GitHub review workflow that requires 'submit'\n\n"
|
|
397
425
|
"If /fixpr is unavailable, follow these steps explicitly (fallback for all CLIs including Claude):\n"
|
|
398
426
|
f"1) gh pr checkout {pr_number}\n"
|
|
399
427
|
"2) git status && git branch --show-current\n"
|
|
@@ -433,6 +461,7 @@ def dispatch_agent_for_pr(
|
|
|
433
461
|
ok = dispatcher.create_dynamic_agent(agent_spec)
|
|
434
462
|
if ok:
|
|
435
463
|
log(f"Spawned agent for {repo_full}#{pr_number} at /tmp/{repo}/{workspace_name}")
|
|
464
|
+
display_log_viewing_command(session_name)
|
|
436
465
|
success = True
|
|
437
466
|
else:
|
|
438
467
|
log(f"Failed to spawn agent for {repo_full}#{pr_number}")
|
jleechanorg_pr_automation-0.2.48/jleechanorg_pr_automation/tests/test_cleanup_pending_reviews.py
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Tests for _cleanup_pending_reviews method and prompt API endpoint verification.
|
|
4
|
+
|
|
5
|
+
Tests ensure:
|
|
6
|
+
1. _cleanup_pending_reviews correctly identifies and deletes pending reviews
|
|
7
|
+
2. Prompts contain the correct API endpoint for threaded replies
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
import unittest
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from types import SimpleNamespace
|
|
14
|
+
from unittest.mock import Mock, patch
|
|
15
|
+
|
|
16
|
+
from automation.jleechanorg_pr_automation import orchestrated_pr_runner as runner
|
|
17
|
+
from jleechanorg_pr_automation.jleechanorg_pr_monitor import JleechanorgPRMonitor
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TestCleanupPendingReviews(unittest.TestCase):
|
|
21
|
+
"""Test _cleanup_pending_reviews method"""
|
|
22
|
+
|
|
23
|
+
def setUp(self):
|
|
24
|
+
"""Set up test environment"""
|
|
25
|
+
with patch('jleechanorg_pr_automation.jleechanorg_pr_monitor.AutomationSafetyManager'):
|
|
26
|
+
self.monitor = JleechanorgPRMonitor(automation_username="test-automation-user")
|
|
27
|
+
self.monitor.logger = Mock()
|
|
28
|
+
|
|
29
|
+
def test_cleanup_pending_reviews_deletes_pending_reviews(self):
|
|
30
|
+
"""Test that pending reviews from automation user are deleted"""
|
|
31
|
+
repo_full = "owner/repo"
|
|
32
|
+
pr_number = 123
|
|
33
|
+
|
|
34
|
+
# Mock reviews API response with pending review from automation user
|
|
35
|
+
reviews_response = [
|
|
36
|
+
{
|
|
37
|
+
"id": 1001,
|
|
38
|
+
"state": "PENDING",
|
|
39
|
+
"user": {"login": "test-automation-user"},
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
"id": 1002,
|
|
43
|
+
"state": "APPROVED",
|
|
44
|
+
"user": {"login": "test-automation-user"},
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"id": 1003,
|
|
48
|
+
"state": "PENDING",
|
|
49
|
+
"user": {"login": "other-user"},
|
|
50
|
+
},
|
|
51
|
+
]
|
|
52
|
+
|
|
53
|
+
delete_calls = []
|
|
54
|
+
|
|
55
|
+
def mock_execute_subprocess(cmd, timeout=None, check=False):
|
|
56
|
+
if "reviews" in " ".join(cmd) and "DELETE" not in cmd:
|
|
57
|
+
# Fetch reviews command
|
|
58
|
+
return SimpleNamespace(
|
|
59
|
+
returncode=0,
|
|
60
|
+
stdout="\n".join(json.dumps(r) for r in reviews_response),
|
|
61
|
+
stderr="",
|
|
62
|
+
)
|
|
63
|
+
elif "DELETE" in cmd:
|
|
64
|
+
# Delete review command
|
|
65
|
+
delete_calls.append(cmd)
|
|
66
|
+
return SimpleNamespace(returncode=0, stdout="", stderr="")
|
|
67
|
+
return SimpleNamespace(returncode=0, stdout="", stderr="")
|
|
68
|
+
|
|
69
|
+
with patch(
|
|
70
|
+
"jleechanorg_pr_automation.jleechanorg_pr_monitor.AutomationUtils.execute_subprocess_with_timeout",
|
|
71
|
+
side_effect=mock_execute_subprocess,
|
|
72
|
+
):
|
|
73
|
+
self.monitor._cleanup_pending_reviews(repo_full, pr_number)
|
|
74
|
+
|
|
75
|
+
# Should only delete pending review from automation user (id 1001)
|
|
76
|
+
assert len(delete_calls) == 1, f"Expected 1 delete call, got {len(delete_calls)}"
|
|
77
|
+
assert "1001" in " ".join(delete_calls[0]), "Should delete review #1001"
|
|
78
|
+
self.monitor.logger.info.assert_any_call(
|
|
79
|
+
"🧹 Deleted pending review #1001 from test-automation-user on PR #123"
|
|
80
|
+
)
|
|
81
|
+
self.monitor.logger.info.assert_any_call("✅ Cleaned up 1 pending review(s) on PR #123")
|
|
82
|
+
|
|
83
|
+
def test_cleanup_pending_reviews_handles_no_pending_reviews(self):
|
|
84
|
+
"""Test that method handles case with no pending reviews gracefully"""
|
|
85
|
+
repo_full = "owner/repo"
|
|
86
|
+
pr_number = 123
|
|
87
|
+
|
|
88
|
+
# Mock reviews API response with no pending reviews
|
|
89
|
+
reviews_response = [
|
|
90
|
+
{"id": 1001, "state": "APPROVED", "user": {"login": "test-automation-user"}},
|
|
91
|
+
{"id": 1002, "state": "COMMENTED", "user": {"login": "other-user"}},
|
|
92
|
+
]
|
|
93
|
+
|
|
94
|
+
delete_calls = []
|
|
95
|
+
|
|
96
|
+
def mock_execute_subprocess(cmd, timeout=None, check=False):
|
|
97
|
+
if "reviews" in " ".join(cmd) and "DELETE" not in cmd:
|
|
98
|
+
return SimpleNamespace(
|
|
99
|
+
returncode=0,
|
|
100
|
+
stdout="\n".join(json.dumps(r) for r in reviews_response),
|
|
101
|
+
stderr="",
|
|
102
|
+
)
|
|
103
|
+
elif "DELETE" in cmd:
|
|
104
|
+
delete_calls.append(cmd)
|
|
105
|
+
return SimpleNamespace(returncode=0, stdout="", stderr="")
|
|
106
|
+
|
|
107
|
+
with patch(
|
|
108
|
+
"jleechanorg_pr_automation.jleechanorg_pr_monitor.AutomationUtils.execute_subprocess_with_timeout",
|
|
109
|
+
side_effect=mock_execute_subprocess,
|
|
110
|
+
):
|
|
111
|
+
self.monitor._cleanup_pending_reviews(repo_full, pr_number)
|
|
112
|
+
|
|
113
|
+
# Should not delete anything
|
|
114
|
+
assert len(delete_calls) == 0, "Should not delete any reviews when none are pending"
|
|
115
|
+
# Should not log cleanup success message
|
|
116
|
+
cleanup_logs = [
|
|
117
|
+
call.args[0] for call in self.monitor.logger.info.call_args_list if "Cleaned up" in call.args[0]
|
|
118
|
+
]
|
|
119
|
+
assert len(cleanup_logs) == 0, "Should not log cleanup when no reviews deleted"
|
|
120
|
+
|
|
121
|
+
def test_cleanup_pending_reviews_handles_api_failure(self):
|
|
122
|
+
"""Test that method handles API failure gracefully"""
|
|
123
|
+
repo_full = "owner/repo"
|
|
124
|
+
pr_number = 123
|
|
125
|
+
|
|
126
|
+
def mock_execute_subprocess(cmd, timeout=None, check=False):
|
|
127
|
+
if "reviews" in " ".join(cmd) and "DELETE" not in cmd:
|
|
128
|
+
# Simulate API failure
|
|
129
|
+
return SimpleNamespace(returncode=1, stdout="", stderr="API error")
|
|
130
|
+
return SimpleNamespace(returncode=0, stdout="", stderr="")
|
|
131
|
+
|
|
132
|
+
with patch(
|
|
133
|
+
"jleechanorg_pr_automation.jleechanorg_pr_monitor.AutomationUtils.execute_subprocess_with_timeout",
|
|
134
|
+
side_effect=mock_execute_subprocess,
|
|
135
|
+
):
|
|
136
|
+
# Should not raise exception
|
|
137
|
+
self.monitor._cleanup_pending_reviews(repo_full, pr_number)
|
|
138
|
+
|
|
139
|
+
self.monitor.logger.debug.assert_called()
|
|
140
|
+
|
|
141
|
+
def test_cleanup_pending_reviews_handles_invalid_repo_format(self):
|
|
142
|
+
"""Test that method handles invalid repo_full format"""
|
|
143
|
+
repo_full = "invalid-format"
|
|
144
|
+
pr_number = 123
|
|
145
|
+
|
|
146
|
+
self.monitor._cleanup_pending_reviews(repo_full, pr_number)
|
|
147
|
+
|
|
148
|
+
self.monitor.logger.warning.assert_called_with(
|
|
149
|
+
"Cannot parse repo_full 'invalid-format' for pending review cleanup"
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
def test_cleanup_pending_reviews_handles_exception(self):
|
|
153
|
+
"""Test that method handles exceptions gracefully"""
|
|
154
|
+
repo_full = "owner/repo"
|
|
155
|
+
pr_number = 123
|
|
156
|
+
|
|
157
|
+
def mock_execute_subprocess(cmd, timeout=None, check=False):
|
|
158
|
+
raise Exception("Unexpected error")
|
|
159
|
+
|
|
160
|
+
with patch(
|
|
161
|
+
"jleechanorg_pr_automation.jleechanorg_pr_monitor.AutomationUtils.execute_subprocess_with_timeout",
|
|
162
|
+
side_effect=mock_execute_subprocess,
|
|
163
|
+
):
|
|
164
|
+
# Should not raise exception
|
|
165
|
+
self.monitor._cleanup_pending_reviews(repo_full, pr_number)
|
|
166
|
+
|
|
167
|
+
self.monitor.logger.debug.assert_called()
|
|
168
|
+
|
|
169
|
+
def test_cleanup_pending_reviews_deletes_multiple_pending_reviews(self):
|
|
170
|
+
"""Test that multiple pending reviews from automation user are all deleted"""
|
|
171
|
+
repo_full = "owner/repo"
|
|
172
|
+
pr_number = 123
|
|
173
|
+
|
|
174
|
+
# Mock reviews API response with multiple pending reviews from automation user
|
|
175
|
+
reviews_response = [
|
|
176
|
+
{"id": 1001, "state": "PENDING", "user": {"login": "test-automation-user"}},
|
|
177
|
+
{"id": 1002, "state": "PENDING", "user": {"login": "test-automation-user"}},
|
|
178
|
+
{"id": 1003, "state": "PENDING", "user": {"login": "other-user"}},
|
|
179
|
+
]
|
|
180
|
+
|
|
181
|
+
delete_calls = []
|
|
182
|
+
|
|
183
|
+
def mock_execute_subprocess(cmd, timeout=None, check=False):
|
|
184
|
+
if "reviews" in " ".join(cmd) and "DELETE" not in cmd:
|
|
185
|
+
return SimpleNamespace(
|
|
186
|
+
returncode=0,
|
|
187
|
+
stdout="\n".join(json.dumps(r) for r in reviews_response),
|
|
188
|
+
stderr="",
|
|
189
|
+
)
|
|
190
|
+
elif "DELETE" in cmd:
|
|
191
|
+
delete_calls.append(cmd)
|
|
192
|
+
return SimpleNamespace(returncode=0, stdout="", stderr="")
|
|
193
|
+
return SimpleNamespace(returncode=0, stdout="", stderr="")
|
|
194
|
+
|
|
195
|
+
with patch(
|
|
196
|
+
"jleechanorg_pr_automation.jleechanorg_pr_monitor.AutomationUtils.execute_subprocess_with_timeout",
|
|
197
|
+
side_effect=mock_execute_subprocess,
|
|
198
|
+
):
|
|
199
|
+
self.monitor._cleanup_pending_reviews(repo_full, pr_number)
|
|
200
|
+
|
|
201
|
+
# Should delete both pending reviews from automation user
|
|
202
|
+
assert len(delete_calls) == 2, f"Expected 2 delete calls, got {len(delete_calls)}"
|
|
203
|
+
self.monitor.logger.info.assert_any_call("✅ Cleaned up 2 pending review(s) on PR #123")
|
|
204
|
+
|
|
205
|
+
def test_cleanup_pending_reviews_handles_null_user(self):
|
|
206
|
+
"""Test that method handles null user gracefully (prevents AttributeError)"""
|
|
207
|
+
repo_full = "owner/repo"
|
|
208
|
+
pr_number = 123
|
|
209
|
+
|
|
210
|
+
# Mock reviews API response with pending review with null user
|
|
211
|
+
reviews_response = [
|
|
212
|
+
{
|
|
213
|
+
"id": 1001,
|
|
214
|
+
"state": "PENDING",
|
|
215
|
+
"user": None, # API can return null user
|
|
216
|
+
},
|
|
217
|
+
{
|
|
218
|
+
"id": 1002,
|
|
219
|
+
"state": "PENDING",
|
|
220
|
+
"user": {"login": "test-automation-user"},
|
|
221
|
+
},
|
|
222
|
+
]
|
|
223
|
+
|
|
224
|
+
delete_calls = []
|
|
225
|
+
|
|
226
|
+
def mock_execute_subprocess(cmd, timeout=None, check=False):
|
|
227
|
+
if "reviews" in " ".join(cmd) and "DELETE" not in cmd:
|
|
228
|
+
return SimpleNamespace(
|
|
229
|
+
returncode=0,
|
|
230
|
+
stdout="\n".join(json.dumps(r) for r in reviews_response),
|
|
231
|
+
stderr="",
|
|
232
|
+
)
|
|
233
|
+
elif "DELETE" in cmd:
|
|
234
|
+
delete_calls.append(cmd)
|
|
235
|
+
return SimpleNamespace(returncode=0, stdout="", stderr="")
|
|
236
|
+
return SimpleNamespace(returncode=0, stdout="", stderr="")
|
|
237
|
+
|
|
238
|
+
with patch(
|
|
239
|
+
"jleechanorg_pr_automation.jleechanorg_pr_monitor.AutomationUtils.execute_subprocess_with_timeout",
|
|
240
|
+
side_effect=mock_execute_subprocess,
|
|
241
|
+
):
|
|
242
|
+
# Should not raise AttributeError
|
|
243
|
+
self.monitor._cleanup_pending_reviews(repo_full, pr_number)
|
|
244
|
+
|
|
245
|
+
# Should only delete pending review from automation user (id 1002), not null user (id 1001)
|
|
246
|
+
assert len(delete_calls) == 1, f"Expected 1 delete call, got {len(delete_calls)}"
|
|
247
|
+
assert "1002" in " ".join(delete_calls[0]), "Should delete review #1002 (with valid user)"
|
|
248
|
+
assert "1001" not in " ".join(delete_calls[0]), "Should NOT delete review #1001 (null user)"
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
class TestPromptAPIEndpoint(unittest.TestCase):
|
|
252
|
+
"""Test that prompts contain correct API endpoint"""
|
|
253
|
+
|
|
254
|
+
def setUp(self):
|
|
255
|
+
"""Set up test environment"""
|
|
256
|
+
with patch('jleechanorg_pr_automation.jleechanorg_pr_monitor.AutomationSafetyManager'):
|
|
257
|
+
self.monitor = JleechanorgPRMonitor(automation_username="test-automation-user")
|
|
258
|
+
|
|
259
|
+
def test_fix_comment_prompt_contains_correct_endpoint(self):
|
|
260
|
+
"""Test that fix-comment prompt contains correct API endpoint"""
|
|
261
|
+
repository = "owner/repo"
|
|
262
|
+
pr_number = 123
|
|
263
|
+
pr_data = {"headRefName": "feature/branch"}
|
|
264
|
+
head_sha = "abc123def456"
|
|
265
|
+
|
|
266
|
+
prompt = self.monitor._build_fix_comment_prompt_body(
|
|
267
|
+
repository, pr_number, pr_data, head_sha, agent_cli="claude"
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
# Should contain correct endpoint: /comments with in_reply_to parameter
|
|
271
|
+
assert "/pulls/123/comments" in prompt, "Prompt should contain /comments endpoint"
|
|
272
|
+
assert "in_reply_to" in prompt, "Prompt should contain in_reply_to parameter"
|
|
273
|
+
assert "-F in_reply_to" in prompt, "Prompt should use -F flag for numeric parameter"
|
|
274
|
+
assert "-f body" in prompt, "Prompt should use -f flag for string parameter"
|
|
275
|
+
|
|
276
|
+
# Should NOT contain incorrect endpoint
|
|
277
|
+
assert "/comments/{comment_id}/replies" not in prompt, "Prompt should NOT contain incorrect /replies endpoint"
|
|
278
|
+
|
|
279
|
+
def test_orchestrated_pr_runner_prompt_contains_correct_endpoint(self):
|
|
280
|
+
"""Test that orchestrated_pr_runner prompt contains correct API endpoint"""
|
|
281
|
+
repo_full = "owner/repo"
|
|
282
|
+
pr_number = 123
|
|
283
|
+
branch = "feature/branch"
|
|
284
|
+
agent_cli = "claude"
|
|
285
|
+
|
|
286
|
+
# Get the task description from dispatch_agent_for_pr
|
|
287
|
+
class FakeDispatcher:
|
|
288
|
+
def __init__(self):
|
|
289
|
+
self.task_description = None
|
|
290
|
+
|
|
291
|
+
def analyze_task_and_create_agents(self, task_description, forced_cli=None):
|
|
292
|
+
self.task_description = task_description
|
|
293
|
+
return [{"id": "agent"}]
|
|
294
|
+
|
|
295
|
+
def create_dynamic_agent(self, spec):
|
|
296
|
+
return True
|
|
297
|
+
|
|
298
|
+
dispatcher = FakeDispatcher()
|
|
299
|
+
pr = {"repo_full": repo_full, "repo": "repo", "number": pr_number, "branch": branch}
|
|
300
|
+
|
|
301
|
+
with patch.object(runner, "WORKSPACE_ROOT_BASE", Path("/tmp")):
|
|
302
|
+
with patch.object(runner, "kill_tmux_session_if_exists", lambda _: None):
|
|
303
|
+
with patch.object(runner, "prepare_workspace_dir", lambda repo, name: None):
|
|
304
|
+
runner.dispatch_agent_for_pr(dispatcher, pr, agent_cli=agent_cli)
|
|
305
|
+
|
|
306
|
+
assert dispatcher.task_description is not None, "Task description should be set"
|
|
307
|
+
prompt = dispatcher.task_description
|
|
308
|
+
|
|
309
|
+
# Should contain correct endpoint: /comments with in_reply_to parameter
|
|
310
|
+
assert "/pulls/123/comments" in prompt, "Prompt should contain /comments endpoint"
|
|
311
|
+
assert "in_reply_to" in prompt, "Prompt should contain in_reply_to parameter"
|
|
312
|
+
assert "-F in_reply_to" in prompt, "Prompt should use -F flag for numeric parameter"
|
|
313
|
+
assert "-f body" in prompt, "Prompt should use -f flag for string parameter"
|
|
314
|
+
|
|
315
|
+
# Should NOT contain incorrect endpoint
|
|
316
|
+
assert "/comments/{comment_id}/replies" not in prompt, "Prompt should NOT contain incorrect /replies endpoint"
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
if __name__ == "__main__":
|
|
320
|
+
unittest.main()
|
|
@@ -10,7 +10,10 @@ import unittest
|
|
|
10
10
|
from types import SimpleNamespace
|
|
11
11
|
from unittest.mock import MagicMock, patch
|
|
12
12
|
|
|
13
|
-
from jleechanorg_pr_automation.jleechanorg_pr_monitor import
|
|
13
|
+
from jleechanorg_pr_automation.jleechanorg_pr_monitor import (
|
|
14
|
+
JleechanorgPRMonitor,
|
|
15
|
+
_normalize_model,
|
|
16
|
+
)
|
|
14
17
|
|
|
15
18
|
|
|
16
19
|
class TestModelParameter(unittest.TestCase):
|
|
@@ -228,15 +231,11 @@ class TestModelParameter(unittest.TestCase):
|
|
|
228
231
|
|
|
229
232
|
def test_normalize_model_none_returns_none(self):
|
|
230
233
|
"""Test that _normalize_model returns None for None input."""
|
|
231
|
-
from jleechanorg_pr_automation.jleechanorg_pr_monitor import _normalize_model
|
|
232
|
-
|
|
233
234
|
result = _normalize_model(None)
|
|
234
235
|
self.assertIsNone(result)
|
|
235
236
|
|
|
236
237
|
def test_normalize_model_empty_string_returns_none(self):
|
|
237
238
|
"""Test that _normalize_model returns None for empty string."""
|
|
238
|
-
from jleechanorg_pr_automation.jleechanorg_pr_monitor import _normalize_model
|
|
239
|
-
|
|
240
239
|
result = _normalize_model("")
|
|
241
240
|
self.assertIsNone(result)
|
|
242
241
|
|
|
@@ -245,8 +244,6 @@ class TestModelParameter(unittest.TestCase):
|
|
|
245
244
|
|
|
246
245
|
def test_normalize_model_valid_names(self):
|
|
247
246
|
"""Test that _normalize_model accepts valid model names."""
|
|
248
|
-
from jleechanorg_pr_automation.jleechanorg_pr_monitor import _normalize_model
|
|
249
|
-
|
|
250
247
|
valid_models = [
|
|
251
248
|
"sonnet",
|
|
252
249
|
"opus",
|
|
@@ -272,8 +269,6 @@ class TestModelParameter(unittest.TestCase):
|
|
|
272
269
|
|
|
273
270
|
def test_normalize_model_invalid_names_raises_error(self):
|
|
274
271
|
"""Test that _normalize_model rejects invalid model names."""
|
|
275
|
-
from jleechanorg_pr_automation.jleechanorg_pr_monitor import _normalize_model
|
|
276
|
-
|
|
277
272
|
invalid_models = [
|
|
278
273
|
"model with spaces",
|
|
279
274
|
"model@invalid",
|
|
@@ -311,8 +306,6 @@ class TestModelParameter(unittest.TestCase):
|
|
|
311
306
|
|
|
312
307
|
def test_normalize_model_strips_whitespace(self):
|
|
313
308
|
"""Test that _normalize_model strips whitespace from valid names."""
|
|
314
|
-
from jleechanorg_pr_automation.jleechanorg_pr_monitor import _normalize_model
|
|
315
|
-
|
|
316
309
|
result = _normalize_model(" sonnet ")
|
|
317
310
|
self.assertEqual(result, "sonnet")
|
|
318
311
|
|
|
@@ -190,7 +190,7 @@ class TestPRTargeting(unittest.TestCase):
|
|
|
190
190
|
self.assertIn("review comments", prompt)
|
|
191
191
|
self.assertIn("issue comments", prompt)
|
|
192
192
|
# After fix for comment #2669657213, prompt clarifies:
|
|
193
|
-
# - Inline review comments use: /pulls/{pr_number}/comments
|
|
193
|
+
# - Inline review comments use: /pulls/{pr_number}/comments with -F in_reply_to={comment_id}
|
|
194
194
|
# - Issue comments don't support threading (top-level comments only)
|
|
195
195
|
self.assertIn("pulls/42/comments", prompt) # Updated to match actual PR number in prompt
|
|
196
196
|
self.assertIn("reply individually to each comment", prompt) # Issue comments clarification
|
|
@@ -189,7 +189,7 @@ def get_automation_limits_with_overrides(overrides: Optional[Mapping[str, Any]]
|
|
|
189
189
|
defaults: Dict[str, int] = {
|
|
190
190
|
# Global PR limit: counts ALL attempts across ALL workflows
|
|
191
191
|
"pr_limit": pr_limit,
|
|
192
|
-
"global_limit": coerce_positive_int(os.getenv("AUTOMATION_GLOBAL_LIMIT"), default=
|
|
192
|
+
"global_limit": coerce_positive_int(os.getenv("AUTOMATION_GLOBAL_LIMIT"), default=100),
|
|
193
193
|
"approval_hours": 24,
|
|
194
194
|
"subprocess_timeout": 300,
|
|
195
195
|
# Workflow-specific limits: 10 attempts per workflow (counts ALL attempts)
|
|
@@ -22,6 +22,7 @@ jleechanorg_pr_automation/openai_automation/__init__.py
|
|
|
22
22
|
jleechanorg_pr_automation/openai_automation/codex_github_mentions.py
|
|
23
23
|
jleechanorg_pr_automation/openai_automation/debug_page_content.py
|
|
24
24
|
jleechanorg_pr_automation/openai_automation/oracle_cli.py
|
|
25
|
+
jleechanorg_pr_automation/openai_automation/test_auth_restoration.py
|
|
25
26
|
jleechanorg_pr_automation/openai_automation/test_codex_comprehensive.py
|
|
26
27
|
jleechanorg_pr_automation/openai_automation/test_codex_integration.py
|
|
27
28
|
jleechanorg_pr_automation/tests/__init__.py
|
|
@@ -32,6 +33,7 @@ jleechanorg_pr_automation/tests/test_automation_marker_functions.py
|
|
|
32
33
|
jleechanorg_pr_automation/tests/test_automation_over_running_reproduction.py
|
|
33
34
|
jleechanorg_pr_automation/tests/test_automation_safety_limits.py
|
|
34
35
|
jleechanorg_pr_automation/tests/test_automation_safety_manager_comprehensive.py
|
|
36
|
+
jleechanorg_pr_automation/tests/test_cleanup_pending_reviews.py
|
|
35
37
|
jleechanorg_pr_automation/tests/test_codex_actor_matching.py
|
|
36
38
|
jleechanorg_pr_automation/tests/test_fixpr_prompt.py
|
|
37
39
|
jleechanorg_pr_automation/tests/test_fixpr_return_value.py
|
|
@@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta"
|
|
|
7
7
|
|
|
8
8
|
[project]
|
|
9
9
|
name = "jleechanorg-pr-automation"
|
|
10
|
-
version = "0.2.
|
|
10
|
+
version = "0.2.48"
|
|
11
11
|
description = "GitHub PR automation system with safety limits and actionable counting"
|
|
12
12
|
authors = [
|
|
13
13
|
{ name = "jleechan", email = "jlee@jleechan.org" },
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|