jleechanorg-pr-automation 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,7 +16,7 @@ from .utils import (
16
16
  json_manager,
17
17
  )
18
18
 
19
- __version__ = "0.1.0"
19
+ __version__ = "0.1.1"
20
20
  __author__ = "jleechan"
21
21
  __email__ = "jlee@jleechan.org"
22
22
 
@@ -4,7 +4,7 @@ Automation Safety Manager - GREEN Phase Implementation
4
4
 
5
5
  Minimal implementation to pass the RED phase tests with:
6
6
  - PR attempt limits (max 5 per PR)
7
- - Global run limits (max 50 total)
7
+ - Global run limits (max 50 per day with automatic midnight reset)
8
8
  - Manual approval system
9
9
  - Thread-safe operations
10
10
  - Email notifications
@@ -25,6 +25,12 @@ from email.mime.text import MIMEText
25
25
  from email.mime.multipart import MIMEMultipart
26
26
  from typing import Dict, Optional, Union
27
27
 
28
+
29
+ REAL_DATETIME = datetime
30
+
31
+ # Number of characters in the ISO 8601 date prefix ("YYYY-MM-DD").
32
+ ISO_DATE_PREFIX_LENGTH = len("YYYY-MM-DD")
33
+
28
34
  # Optional keyring import for email functionality
29
35
  _keyring_spec = importlib.util.find_spec("keyring")
30
36
  if _keyring_spec:
@@ -88,9 +94,14 @@ class AutomationSafetyManager:
88
94
  self._write_json_file(self.pr_attempts_file, {})
89
95
 
90
96
  if not os.path.exists(self.global_runs_file):
97
+ now = datetime.now()
98
+ today = now.date().isoformat()
91
99
  self._write_json_file(self.global_runs_file, {
92
100
  "total_runs": 0,
93
- "start_date": datetime.now().isoformat()
101
+ "start_date": now.isoformat(),
102
+ "current_date": today,
103
+ "last_run": None,
104
+ "last_reset": now.isoformat(),
94
105
  })
95
106
 
96
107
  if not os.path.exists(self.approval_file):
@@ -121,7 +132,6 @@ class AutomationSafetyManager:
121
132
  default_config = {
122
133
  "global_limit": self.global_limit,
123
134
  "pr_limit": self.pr_limit,
124
- "daily_limit": 100
125
135
  }
126
136
  self._write_json_file(self.config_file, default_config)
127
137
 
@@ -145,11 +155,6 @@ class AutomationSafetyManager:
145
155
  # Sync PR attempts - keys already normalized
146
156
  self._write_json_file(self.pr_attempts_file, self._pr_attempts_cache)
147
157
 
148
- # Sync global runs
149
- global_data = self._read_json_file(self.global_runs_file)
150
- global_data["total_runs"] = self._global_runs_cache
151
- self._write_json_file(self.global_runs_file, global_data)
152
-
153
158
  # Sync inflight cache to prevent concurrent processing
154
159
  self._write_json_file(self.inflight_file, self._pr_inflight_cache)
155
160
 
@@ -218,6 +223,99 @@ class AutomationSafetyManager:
218
223
  except Exception as e:
219
224
  self.logger.error(f"Exception writing safety data file {file_path}: {e}")
220
225
 
226
+ def _normalize_global_run_payload(
227
+ self,
228
+ payload: Optional[dict],
229
+ *,
230
+ now: Optional[datetime] = None,
231
+ ) -> tuple[dict, int, bool]:
232
+ """Normalize persisted global run data and determine if it is stale."""
233
+
234
+ current_time = now or datetime.now()
235
+ today = current_time.date().isoformat()
236
+
237
+ if isinstance(payload, dict):
238
+ data = dict(payload)
239
+ else:
240
+ data = {}
241
+
242
+ # Ensure start_date is always present and well-formed
243
+ start_date = data.get("start_date")
244
+ if isinstance(start_date, str):
245
+ try:
246
+ REAL_DATETIME.fromisoformat(start_date)
247
+ except ValueError:
248
+ data["start_date"] = current_time.isoformat()
249
+ else:
250
+ data["start_date"] = current_time.isoformat()
251
+
252
+ stored_date = data.get("current_date")
253
+ normalized_date: Optional[str] = None
254
+ if isinstance(stored_date, str):
255
+ try:
256
+ normalized_date = REAL_DATETIME.fromisoformat(stored_date).date().isoformat()
257
+ except ValueError:
258
+ # Support legacy data that stored raw dates without full ISO format
259
+ if len(stored_date) >= ISO_DATE_PREFIX_LENGTH:
260
+ candidate = stored_date[:ISO_DATE_PREFIX_LENGTH]
261
+ try:
262
+ normalized_date = REAL_DATETIME.fromisoformat(candidate).date().isoformat()
263
+ except ValueError:
264
+ normalized_date = None
265
+ else:
266
+ normalized_date = None
267
+
268
+ if normalized_date is None:
269
+ try:
270
+ normalized_date = REAL_DATETIME.fromisoformat(data["start_date"]).date().isoformat()
271
+ except ValueError:
272
+ normalized_date = None
273
+
274
+ is_stale = normalized_date != today
275
+
276
+ if is_stale:
277
+ normalized_date = today
278
+ data["total_runs"] = 0
279
+ data["last_reset"] = current_time.isoformat()
280
+
281
+ data["current_date"] = normalized_date or today
282
+
283
+ raw_total = data.get("total_runs", 0)
284
+ try:
285
+ total_runs = int(raw_total)
286
+ except (TypeError, ValueError):
287
+ total_runs = 0
288
+
289
+ if total_runs < 0:
290
+ total_runs = 0
291
+
292
+ data["total_runs"] = total_runs
293
+
294
+ last_run = data.get("last_run")
295
+ if last_run is not None:
296
+ if not isinstance(last_run, str):
297
+ data.pop("last_run", None)
298
+ else:
299
+ try:
300
+ REAL_DATETIME.fromisoformat(last_run)
301
+ except ValueError:
302
+ data.pop("last_run", None)
303
+
304
+ last_reset = data.get("last_reset")
305
+ if last_reset is not None:
306
+ if not isinstance(last_reset, str):
307
+ data.pop("last_reset", None)
308
+ else:
309
+ try:
310
+ REAL_DATETIME.fromisoformat(last_reset)
311
+ except ValueError:
312
+ data.pop("last_reset", None)
313
+
314
+ if "last_reset" not in data:
315
+ data["last_reset"] = data["start_date"]
316
+
317
+ return data, total_runs, is_stale
318
+
221
319
  def can_process_pr(self, pr_number: Union[int, str], repo: str = None, branch: str = None) -> bool:
222
320
  """Check if PR can be processed (under attempt limit)"""
223
321
  with self.lock:
@@ -330,8 +428,8 @@ class AutomationSafetyManager:
330
428
  def can_start_global_run(self) -> bool:
331
429
  """Check if a global run can be started"""
332
430
  with self.lock:
333
- # Use cache for testing, file for production
334
- runs = self._global_runs_cache if hasattr(self, '_global_runs_cache') else self.get_global_runs()
431
+ # Always refresh from file to detect external resets
432
+ runs = self.get_global_runs()
335
433
 
336
434
  if runs < self.global_limit:
337
435
  return True
@@ -345,29 +443,58 @@ class AutomationSafetyManager:
345
443
  return False
346
444
 
347
445
  def get_global_runs(self) -> int:
348
- """Get total number of global runs"""
446
+ """Get total number of global runs (resets daily)"""
349
447
  with self.lock:
350
- # Reload from disk to ensure consistency across multiple managers
351
- data = self._read_json_file(self.global_runs_file)
352
- self._global_runs_cache = data.get("total_runs", 0)
353
- return self._global_runs_cache
448
+ normalized_total = 0
449
+
450
+ def _refresh(payload: Optional[dict]):
451
+ nonlocal normalized_total
452
+ normalized, total, _ = self._normalize_global_run_payload(payload)
453
+ normalized_total = normalized["total_runs"]
454
+ return normalized
455
+
456
+ if not json_manager.update_json(self.global_runs_file, _refresh):
457
+ self.logger.warning(
458
+ "Falling back to manual refresh for global run counter"
459
+ )
460
+ payload = self._read_json_file(self.global_runs_file)
461
+ normalized, total, _ = self._normalize_global_run_payload(payload)
462
+ normalized_total = total
463
+ self._write_json_file(self.global_runs_file, normalized)
464
+
465
+ self._global_runs_cache = normalized_total
466
+ return normalized_total
354
467
 
355
468
  def record_global_run(self):
356
469
  """Record a global automation run atomically"""
357
470
  with self.lock:
358
- try:
359
- # Update in-memory cache
360
- self._global_runs_cache += 1
361
-
362
- # Sync to file for persistence (atomic operation)
363
- data = self._read_json_file(self.global_runs_file)
364
- data["total_runs"] = self._global_runs_cache
365
- data["last_run"] = datetime.now().isoformat()
366
- self._write_json_file(self.global_runs_file, data)
367
- except Exception:
368
- # Rollback cache if file write failed
369
- self._global_runs_cache -= 1
370
- raise
471
+ new_total = 0
472
+ current_time = datetime.now()
473
+
474
+ def _increment(payload: Optional[dict]):
475
+ nonlocal new_total
476
+ normalized, total, _ = self._normalize_global_run_payload(payload, now=current_time)
477
+ total += 1
478
+ normalized["total_runs"] = total
479
+ normalized["current_date"] = current_time.date().isoformat()
480
+ normalized["last_run"] = current_time.isoformat()
481
+ new_total = total
482
+ return normalized
483
+
484
+ if not json_manager.update_json(self.global_runs_file, _increment):
485
+ self.logger.warning(
486
+ "Falling back to manual increment for global run counter"
487
+ )
488
+ payload = self._read_json_file(self.global_runs_file)
489
+ normalized, total, _ = self._normalize_global_run_payload(payload, now=current_time)
490
+ total += 1
491
+ normalized["total_runs"] = total
492
+ normalized["current_date"] = current_time.date().isoformat()
493
+ normalized["last_run"] = current_time.isoformat()
494
+ new_total = total
495
+ self._write_json_file(self.global_runs_file, normalized)
496
+
497
+ self._global_runs_cache = new_total
371
498
 
372
499
  def requires_manual_approval(self) -> bool:
373
500
  """Check if manual approval is required"""
@@ -387,7 +514,7 @@ class AutomationSafetyManager:
387
514
  return False
388
515
 
389
516
  try:
390
- approval_date = datetime.fromisoformat(approval_date_str)
517
+ approval_date = REAL_DATETIME.fromisoformat(approval_date_str)
391
518
  except (TypeError, ValueError):
392
519
  return False
393
520
  approval_hours = get_automation_limits()['approval_hours']
@@ -532,6 +659,9 @@ This is an automated notification from the WorldArchitect.AI automation system.
532
659
  data = self._read_json_file(self.global_runs_file)
533
660
  data["total_runs"] = 0
534
661
  data["last_run"] = None
662
+ now = datetime.now()
663
+ data["current_date"] = now.date().isoformat()
664
+ data["last_reset"] = now.isoformat()
535
665
  self._write_json_file(self.global_runs_file, data)
536
666
 
537
667
  def _clear_pr_attempts(self):
@@ -7,9 +7,23 @@ from dataclasses import dataclass
7
7
 
8
8
  DEFAULT_ASSISTANT_HANDLE = "coderabbitai"
9
9
 
10
- # Core instruction template with hardcoded AI assistant mentions
10
+
11
+ def compose_assistant_mentions(assistant_handle: str) -> str:
12
+ """Return the canonical mention list for the supplied assistant handle."""
13
+
14
+ return f"@codex @{assistant_handle} @copilot @cursor"
15
+
16
+
17
+ DEFAULT_ASSISTANT_MENTIONS = compose_assistant_mentions(DEFAULT_ASSISTANT_HANDLE)
18
+
19
+ CODEX_COMMENT_INTRO_BODY = (
20
+ "[AI automation] Codex will implement the code updates while {review_assistants_clause} "
21
+ "review support. Please make the following changes to this PR."
22
+ )
23
+
24
+ # Core instruction template with shared AI assistant intro text
11
25
  CODEX_COMMENT_TEMPLATE = (
12
- "@codex @coderabbitai @copilot @cursor [AI automation] Please make the following changes to this PR\n\n"
26
+ "{comment_intro}\n\n"
13
27
  "Use your judgment to fix comments from everyone or explain why it should not be fixed. "
14
28
  "Follow binary response protocol - every comment needs \"DONE\" or \"NOT DONE\" classification "
15
29
  "explicitly with an explanation. Address all comments on this PR. Fix any failing tests and "
@@ -32,11 +46,63 @@ def normalise_handle(assistant_handle: str | None) -> str:
32
46
  return cleaned or DEFAULT_ASSISTANT_HANDLE
33
47
 
34
48
 
49
+ def _extract_review_assistants(assistant_mentions: str) -> list[str]:
50
+ """Return the assistant mentions that participate in review support."""
51
+
52
+ tokens = assistant_mentions.split()
53
+ return [
54
+ token
55
+ for token in tokens
56
+ if token.startswith("@") and token.lower() != "@codex"
57
+ ]
58
+
59
+
60
+ def _format_review_assistants(review_assistants: list[str]) -> str:
61
+ """Return a human readable list of review assistants for prose usage."""
62
+
63
+ if not review_assistants:
64
+ return "the review assistants"
65
+
66
+ # Strip leading "@" handles so we don't ping reviewers twice inside the prose.
67
+ prose_names = [assistant.lstrip("@") or assistant for assistant in review_assistants]
68
+
69
+ if len(prose_names) == 1:
70
+ return prose_names[0]
71
+
72
+ if len(prose_names) == 2:
73
+ return f"{prose_names[0]} and {prose_names[1]}"
74
+
75
+ return ", ".join(prose_names[:-1]) + f", and {prose_names[-1]}"
76
+
77
+
78
+ def build_comment_intro(
79
+ assistant_mentions: str | None = None,
80
+ assistant_handle: str | None = None,
81
+ ) -> str:
82
+ """Return the shared Codex automation intro text for comment bodies."""
83
+
84
+ mentions = assistant_mentions
85
+ if mentions is None:
86
+ mentions = compose_assistant_mentions(normalise_handle(assistant_handle))
87
+ review_assistants = _extract_review_assistants(mentions)
88
+ assistants_text = _format_review_assistants(review_assistants)
89
+ if len(review_assistants) == 1:
90
+ clause = f"{assistants_text} focuses on"
91
+ else:
92
+ clause = f"{assistants_text} focus on"
93
+ intro_body = CODEX_COMMENT_INTRO_BODY.format(
94
+ review_assistants_clause=clause
95
+ )
96
+ intro_prefix = f"{mentions} " if mentions else ""
97
+ return f"{intro_prefix}{intro_body}"
98
+
99
+
35
100
  def build_default_comment(assistant_handle: str | None = None) -> str:
36
101
  """Return the default Codex instruction text for the given handle."""
37
102
 
38
- handle = normalise_handle(assistant_handle)
39
- return CODEX_COMMENT_TEMPLATE.format(assistant_handle=handle)
103
+ return CODEX_COMMENT_TEMPLATE.format(
104
+ comment_intro=build_comment_intro(assistant_handle=assistant_handle)
105
+ )
40
106
 
41
107
 
42
108
  @dataclass(frozen=True)
@@ -25,10 +25,13 @@ from .automation_utils import AutomationUtils
25
25
  from .codex_config import (
26
26
  CODEX_COMMIT_MARKER_PREFIX as SHARED_MARKER_PREFIX,
27
27
  CODEX_COMMIT_MARKER_SUFFIX as SHARED_MARKER_SUFFIX,
28
+ build_comment_intro,
28
29
  )
29
30
 
30
31
 
31
32
  class JleechanorgPRMonitor:
33
+ """Cross-organization PR monitoring with Codex automation comments"""
34
+
32
35
  @staticmethod
33
36
  def _redact_email(email: Optional[str]) -> Optional[str]:
34
37
  """Redact email for logging while preserving domain for debugging"""
@@ -38,7 +41,6 @@ class JleechanorgPRMonitor:
38
41
  if len(user) <= 2:
39
42
  return f"***@{domain}"
40
43
  return f"{user[:2]}***@{domain}"
41
- """Cross-organization PR monitoring with Codex automation comments"""
42
44
 
43
45
  CODEX_COMMIT_MARKER_PREFIX = SHARED_MARKER_PREFIX
44
46
  CODEX_COMMIT_MARKER_SUFFIX = SHARED_MARKER_SUFFIX
@@ -531,6 +533,10 @@ class JleechanorgPRMonitor:
531
533
  repo_full = self._normalize_repository_name(repository)
532
534
  self.logger.info(f"💬 Requesting Codex support for {repo_full} PR #{pr_number}")
533
535
 
536
+ # Extract repo name and branch from PR data
537
+ repo_name = repo_full.split('/')[-1]
538
+ branch_name = pr_data.get('headRefName', 'unknown')
539
+
534
540
  # Get current PR state including commit SHA
535
541
  head_sha, comments = self._get_pr_comment_state(repo_full, pr_number)
536
542
  head_commit_details = None
@@ -543,12 +549,9 @@ class JleechanorgPRMonitor:
543
549
  repo_full,
544
550
  pr_number,
545
551
  )
552
+ self._record_processed_pr(repo_name, branch_name, pr_number, head_sha)
546
553
  return "skipped"
547
554
 
548
- # Extract repo name and branch from PR data
549
- repo_name = repo_full.split('/')[-1]
550
- branch_name = pr_data.get('headRefName', 'unknown')
551
-
552
555
  if not head_sha:
553
556
  self.logger.warning(
554
557
  f"⚠️ Could not determine commit SHA for PR #{pr_number}; proceeding without marker gating"
@@ -656,7 +659,8 @@ class JleechanorgPRMonitor:
656
659
  ) -> str:
657
660
  """Build comment body that tells all AI assistants to fix PR comments, tests, and merge conflicts"""
658
661
 
659
- comment_body = f"""{self.assistant_mentions} [AI automation] Please make the following changes to this PR
662
+ intro_line = build_comment_intro(assistant_mentions=self.assistant_mentions)
663
+ comment_body = f"""{intro_line}
660
664
 
661
665
  **Summary (Execution Flow):**
662
666
  1. Review every outstanding PR comment to understand required fixes and clarifications.
@@ -37,14 +37,20 @@ class TestAutomationOverRunningReproduction(unittest.TestCase):
37
37
 
38
38
  Manual override allows up to 2x the normal limit (100 runs) but no more.
39
39
  """
40
- # Set up scenario: we're at the 50 run limit
41
- self.manager._global_runs_cache = 50
40
+ # Set up scenario: we're at the 2x limit + 1 (101 runs)
41
+ # We need to write to the file since get_global_runs() reads from file
42
+ today = datetime.now().date().isoformat()
43
+ data = {
44
+ "total_runs": 101,
45
+ "start_date": datetime.now().isoformat(),
46
+ "current_date": today
47
+ }
48
+ self.manager._write_json_file(self.manager.global_runs_file, data)
42
49
 
43
50
  # Manual approval should NOT allow unlimited runs
44
51
  self.manager.grant_manual_approval("test@example.com")
45
52
 
46
53
  # This should be FALSE after 2x the limit (100 runs)
47
- self.manager._global_runs_cache = 101
48
54
  result = self.manager.can_start_global_run()
49
55
 
50
56
  # FIXED: This should now be FALSE (blocked) at 101 runs
@@ -91,14 +97,18 @@ class TestAutomationOverRunningReproduction(unittest.TestCase):
91
97
 
92
98
  The system now blocks excessive runs even with manual override.
93
99
  """
94
- # Simulate the exact scenario from the bug report
95
- self.manager._global_runs_cache = 0
96
-
97
100
  # Grant approval (simulating what happened Sept 27)
98
101
  self.manager.grant_manual_approval("jleechan@anthropic.com")
99
102
 
100
103
  # Simulate running 346 times (what actually happened)
101
- self.manager._global_runs_cache = 346
104
+ # We need to write to the file since get_global_runs() reads from file
105
+ today = datetime.now().date().isoformat()
106
+ data = {
107
+ "total_runs": 346,
108
+ "start_date": datetime.now().isoformat(),
109
+ "current_date": today
110
+ }
111
+ self.manager._write_json_file(self.manager.global_runs_file, data)
102
112
 
103
113
  # This should now be FALSE (blocked) with fixed logic
104
114
  result = self.manager.can_start_global_run()
@@ -264,6 +264,128 @@ class TestAutomationSafetyLimits(unittest.TestCase):
264
264
  self.assertEqual(manager.pr_limit, 5)
265
265
  self.assertEqual(manager.global_limit, 50)
266
266
 
267
+ # Matrix 8: Daily Reset Functionality (50 runs per day)
268
+ def test_daily_reset_first_run_of_day(self):
269
+ """RED: First run of the day should be allowed with counter at 0"""
270
+ result = self.automation_manager.can_start_global_run()
271
+ self.assertTrue(result)
272
+ self.assertEqual(self.automation_manager.get_global_runs(), 0)
273
+
274
+ def test_daily_reset_49th_run_same_day(self):
275
+ """RED: 49th run on same day should be allowed"""
276
+ # Record 48 runs on same day
277
+ for _ in range(48):
278
+ self.automation_manager.record_global_run()
279
+
280
+ result = self.automation_manager.can_start_global_run()
281
+ self.assertTrue(result)
282
+ self.assertEqual(self.automation_manager.get_global_runs(), 48)
283
+
284
+ def test_daily_reset_50th_run_same_day(self):
285
+ """RED: 50th run on same day should be allowed (at limit)"""
286
+ # Record 49 runs on same day
287
+ for _ in range(49):
288
+ self.automation_manager.record_global_run()
289
+
290
+ result = self.automation_manager.can_start_global_run()
291
+ self.assertTrue(result)
292
+ self.assertEqual(self.automation_manager.get_global_runs(), 49)
293
+
294
+ def test_daily_reset_51st_run_same_day_blocked(self):
295
+ """RED: 51st run on same day should be blocked"""
296
+ # Record 50 runs on same day (hit daily limit)
297
+ for _ in range(50):
298
+ self.automation_manager.record_global_run()
299
+
300
+ result = self.automation_manager.can_start_global_run()
301
+ self.assertFalse(result)
302
+ self.assertEqual(self.automation_manager.get_global_runs(), 50)
303
+
304
+ def test_daily_reset_missing_current_date_resets_counter(self):
305
+ """Legacy counters without current_date should reset after upgrade"""
306
+ legacy_data = {
307
+ "total_runs": 50,
308
+ "start_date": datetime(2025, 9, 30, 12, 0, 0).isoformat()
309
+ }
310
+ with open(self.global_runs_file, 'w') as f:
311
+ json.dump(legacy_data, f)
312
+
313
+ if hasattr(self, '_automation_manager'):
314
+ del self._automation_manager
315
+
316
+ # First run after upgrade should reset the stale counter
317
+ self.assertTrue(self.automation_manager.can_start_global_run())
318
+ self.assertEqual(self.automation_manager.get_global_runs(), 0)
319
+
320
+ @patch('jleechanorg_pr_automation.automation_safety_manager.datetime')
321
+ def test_daily_reset_new_day_resets_counter(self, mock_datetime):
322
+ """RED: Counter should reset to 0 when a new day starts"""
323
+ # Day 1: Record 50 runs
324
+ day1 = datetime(2025, 10, 1, 10, 0, 0)
325
+ mock_datetime.now.return_value = day1
326
+ mock_datetime.side_effect = lambda *args, **kw: datetime(*args, **kw)
327
+
328
+ for _ in range(50):
329
+ self.automation_manager.record_global_run()
330
+
331
+ # Should be at limit on Day 1
332
+ self.assertEqual(self.automation_manager.get_global_runs(), 50)
333
+ self.assertFalse(self.automation_manager.can_start_global_run())
334
+
335
+ # Day 2: Counter should reset
336
+ day2 = datetime(2025, 10, 2, 10, 0, 0)
337
+ mock_datetime.now.return_value = day2
338
+
339
+ # Should allow runs again with reset counter
340
+ result = self.automation_manager.can_start_global_run()
341
+ self.assertTrue(result)
342
+ self.assertEqual(self.automation_manager.get_global_runs(), 0)
343
+
344
+ @patch('jleechanorg_pr_automation.automation_safety_manager.datetime')
345
+ def test_daily_reset_multiple_days(self, mock_datetime):
346
+ """RED: Counter should reset each day for multiple days"""
347
+ mock_datetime.side_effect = lambda *args, **kw: datetime(*args, **kw)
348
+
349
+ # Day 1: 50 runs
350
+ day1 = datetime(2025, 10, 1, 10, 0, 0)
351
+ mock_datetime.now.return_value = day1
352
+ for _ in range(50):
353
+ self.automation_manager.record_global_run()
354
+ self.assertEqual(self.automation_manager.get_global_runs(), 50)
355
+
356
+ # Day 2: Reset to 0, then 30 runs
357
+ day2 = datetime(2025, 10, 2, 10, 0, 0)
358
+ mock_datetime.now.return_value = day2
359
+ self.assertEqual(self.automation_manager.get_global_runs(), 0)
360
+ for _ in range(30):
361
+ self.automation_manager.record_global_run()
362
+ self.assertEqual(self.automation_manager.get_global_runs(), 30)
363
+
364
+ # Day 3: Reset to 0 again
365
+ day3 = datetime(2025, 10, 3, 10, 0, 0)
366
+ mock_datetime.now.return_value = day3
367
+ self.assertEqual(self.automation_manager.get_global_runs(), 0)
368
+
369
+ @patch('jleechanorg_pr_automation.automation_safety_manager.datetime')
370
+ def test_daily_reset_midnight_transition(self, mock_datetime):
371
+ """RED: Counter should reset at midnight transition"""
372
+ mock_datetime.side_effect = lambda *args, **kw: datetime(*args, **kw)
373
+
374
+ # 23:59:59 on Day 1 - at limit
375
+ before_midnight = datetime(2025, 10, 1, 23, 59, 59)
376
+ mock_datetime.now.return_value = before_midnight
377
+ for _ in range(50):
378
+ self.automation_manager.record_global_run()
379
+ self.assertFalse(self.automation_manager.can_start_global_run())
380
+
381
+ # 00:00:01 on Day 2 - should reset
382
+ after_midnight = datetime(2025, 10, 2, 0, 0, 1)
383
+ mock_datetime.now.return_value = after_midnight
384
+
385
+ result = self.automation_manager.can_start_global_run()
386
+ self.assertTrue(result)
387
+ self.assertEqual(self.automation_manager.get_global_runs(), 0)
388
+
267
389
  @property
268
390
  def automation_manager(self):
269
391
  """RED: This property will fail - no AutomationSafetyManager exists yet"""
@@ -273,7 +395,7 @@ class TestAutomationSafetyLimits(unittest.TestCase):
273
395
  return self._automation_manager
274
396
 
275
397
 
276
- # Matrix 8: Integration with Existing Automation
398
+ # Matrix 9: Integration with Existing Automation
277
399
  class TestAutomationIntegration(unittest.TestCase):
278
400
  """Integration tests with existing simple_pr_batch.sh script"""
279
401
 
@@ -16,6 +16,7 @@ from unittest.mock import Mock, patch, MagicMock
16
16
 
17
17
  # Import the automation safety manager using proper Python module path
18
18
  from jleechanorg_pr_automation.automation_safety_manager import AutomationSafetyManager
19
+ from jleechanorg_pr_automation.utils import json_manager
19
20
 
20
21
 
21
22
  class TestAutomationSafetyManagerInit:
@@ -150,6 +151,49 @@ class TestGlobalLimits:
150
151
  manager._clear_global_runs()
151
152
  assert manager.get_global_runs() == 0
152
153
 
154
+ def test_global_runs_auto_resets_daily(self, manager):
155
+ """Daily reset should clear the counter and allow automation without manual approval."""
156
+ manager._clear_global_runs()
157
+
158
+ # Simulate crossing the daily limit
159
+ for _ in range(manager.global_limit):
160
+ manager.record_global_run()
161
+
162
+ assert manager.requires_manual_approval() is True
163
+
164
+ now = datetime.now()
165
+ stale_payload = {
166
+ "total_runs": manager.global_limit,
167
+ "start_date": (now - timedelta(days=4)).isoformat(),
168
+ "current_date": (now - timedelta(days=1)).date().isoformat(),
169
+ "last_run": (now - timedelta(hours=2)).isoformat(),
170
+ "last_reset": (now - timedelta(days=2)).isoformat(),
171
+ }
172
+ json_manager.write_json(manager.global_runs_file, stale_payload)
173
+
174
+ refreshed_runs = manager.get_global_runs()
175
+ expected_today = datetime.now().date().isoformat()
176
+ assert refreshed_runs == 0
177
+ assert manager.requires_manual_approval() is False
178
+
179
+ normalized = manager._read_json_file(manager.global_runs_file)
180
+ assert normalized["current_date"] == expected_today
181
+ assert normalized["total_runs"] == 0
182
+
183
+ # Ensure the reset timestamp is updated and sane
184
+ last_reset = normalized.get("last_reset")
185
+ assert last_reset is not None
186
+ parsed_reset = datetime.fromisoformat(last_reset)
187
+ assert (
188
+ parsed_reset is not None
189
+ ), "last_reset should be a valid ISO datetime"
190
+
191
+ # Record another run and verify counters/log fields move forward
192
+ manager.record_global_run()
193
+ normalized = manager._read_json_file(manager.global_runs_file)
194
+ assert normalized["total_runs"] == 1
195
+ assert datetime.fromisoformat(normalized["last_run"])
196
+
153
197
 
154
198
  class TestPRLimits:
155
199
  """Test suite for per-PR automation limits"""
@@ -408,7 +408,7 @@ class TestPRFilteringMatrix(unittest.TestCase):
408
408
  mock_has_comment.assert_not_called()
409
409
  mock_build_body.assert_not_called()
410
410
  mock_subprocess.assert_not_called()
411
- mock_record_processed.assert_not_called()
411
+ mock_record_processed.assert_called_once_with('repo', 'feature', 456, 'sha123')
412
412
 
413
413
  def test_process_pr_comment_only_returns_true_for_posted(self):
414
414
  """GREEN: _process_pr_comment should only return True when comment actually posted"""
@@ -5,6 +5,7 @@ Test PR targeting functionality for jleechanorg_pr_monitor - Codex Strategy Test
5
5
 
6
6
  import unittest
7
7
 
8
+ from jleechanorg_pr_automation.codex_config import build_comment_intro
8
9
  from jleechanorg_pr_automation.jleechanorg_pr_monitor import JleechanorgPRMonitor
9
10
 
10
11
 
@@ -14,10 +15,32 @@ class TestPRTargeting(unittest.TestCase):
14
15
  def test_extract_commit_marker(self):
15
16
  """Commit markers can be parsed from Codex comments"""
16
17
  monitor = JleechanorgPRMonitor()
17
- test_comment = f"@codex @coderabbitai @copilot @cursor [AI automation] Test comment\n\n{monitor.CODEX_COMMIT_MARKER_PREFIX}abc123{monitor.CODEX_COMMIT_MARKER_SUFFIX}"
18
+ intro_line = build_comment_intro(
19
+ assistant_mentions=monitor.assistant_mentions
20
+ )
21
+ test_comment = (
22
+ f"{intro_line} Test comment\n\n"
23
+ f"{monitor.CODEX_COMMIT_MARKER_PREFIX}abc123{monitor.CODEX_COMMIT_MARKER_SUFFIX}"
24
+ )
18
25
  marker = monitor._extract_commit_marker(test_comment)
19
26
  self.assertEqual(marker, "abc123")
20
27
 
28
+ def test_intro_prose_avoids_duplicate_mentions(self):
29
+ """Review assistants should not retain '@' prefixes in prose text."""
30
+
31
+ intro_line = build_comment_intro(
32
+ assistant_mentions="@codex @coderabbitai @copilot @cursor"
33
+ )
34
+ _, _, intro_body = intro_line.partition("] ")
35
+ self.assertIn("coderabbitai", intro_body)
36
+ self.assertNotIn("@coderabbitai", intro_body)
37
+
38
+ def test_intro_without_mentions_has_no_leading_space(self):
39
+ """Explicitly blank mention lists should not add stray whitespace."""
40
+
41
+ intro_line = build_comment_intro(assistant_mentions="")
42
+ self.assertTrue(intro_line.startswith("[AI automation]"))
43
+
21
44
  def test_detect_pending_codex_commit(self):
22
45
  """Codex bot summary comments referencing head commit trigger pending detection."""
23
46
  monitor = JleechanorgPRMonitor()
@@ -1,16 +1,15 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: jleechanorg-pr-automation
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: GitHub PR automation system with safety limits and actionable counting
5
5
  Author-email: jleechan <jlee@jleechan.org>
6
- License: MIT
6
+ License-Expression: MIT
7
7
  Project-URL: Homepage, https://github.com/jleechanorg/worldarchitect.ai
8
8
  Project-URL: Repository, https://github.com/jleechanorg/worldarchitect.ai
9
9
  Project-URL: Issues, https://github.com/jleechanorg/worldarchitect.ai/issues
10
10
  Keywords: github,automation,pr,pull-request,monitoring
11
11
  Classifier: Development Status :: 4 - Beta
12
12
  Classifier: Intended Audience :: Developers
13
- Classifier: License :: OSI Approved :: MIT License
14
13
  Classifier: Programming Language :: Python :: 3
15
14
  Classifier: Programming Language :: Python :: 3.9
16
15
  Classifier: Programming Language :: Python :: 3.10
@@ -206,6 +205,12 @@ MIT License - see LICENSE file for details.
206
205
 
207
206
  ## Changelog
208
207
 
208
+ ### 0.1.1 (2025-10-06)
209
+
210
+ - Fix daily reset of global automation limit so automation never stalls overnight
211
+ - Track latest reset timestamp in safety data for observability
212
+ - Expand safety manager tests to cover daily rollover behaviour
213
+
209
214
  ### 0.1.0 (2025-09-28)
210
215
 
211
216
  - Initial release
@@ -1,23 +1,23 @@
1
- jleechanorg_pr_automation/__init__.py,sha256=qYUTmbETCH775SM_6ln8jX05WzAGPErD-JN7OTIzXrY,852
2
- jleechanorg_pr_automation/automation_safety_manager.py,sha256=La3GStljdkxsSJBklWVj8CKAx6ndCsyI_aS6tSBZGJ8,27531
1
+ jleechanorg_pr_automation/__init__.py,sha256=3-fg5lcfMZsBZYAF9ZspC55gAVDVhj7lIkMxozQWItc,852
2
+ jleechanorg_pr_automation/automation_safety_manager.py,sha256=0cNfrHV2VXGqIpFnVBUMjovn8zKOdM4H3_x8NVR2eLs,32247
3
3
  jleechanorg_pr_automation/automation_safety_wrapper.py,sha256=Oa88wGH7XKfOywVnDMxwL0EP_v5YHvkKaSHxTvyiR48,4059
4
4
  jleechanorg_pr_automation/automation_utils.py,sha256=MInKTLuQSPwIXga8em70JtgGED3KU_OsV3cvX-4tN-Y,11608
5
5
  jleechanorg_pr_automation/check_codex_comment.py,sha256=ccs4XjPLPnsuMiSS_pXo_u-EzNldR34PqyWTlu0U0H0,2221
6
6
  jleechanorg_pr_automation/codex_branch_updater.py,sha256=490tjJDOCm58LoRS3F7vslOINVBU4F2C__2dWYRKcFs,9135
7
- jleechanorg_pr_automation/codex_config.py,sha256=JOrAzVwdDac9gHdLFSYDco0fJ13pdOQT22kAiMYRHhg,2101
8
- jleechanorg_pr_automation/jleechanorg_pr_monitor.py,sha256=6WDl0zwbJJKrRrHLZQ87J-680FC2DbLCZJ4Q2Cn62dU,48700
7
+ jleechanorg_pr_automation/codex_config.py,sha256=WFWh-jXEXIkrbtr4QsMiBnyD4cG3nERbRx0QGOaB0nw,4229
8
+ jleechanorg_pr_automation/jleechanorg_pr_monitor.py,sha256=RB8l2racnmzYPkEJuzu_4UPis1l7ewA6NvUKnGPSa_4,48824
9
9
  jleechanorg_pr_automation/utils.py,sha256=nhPyR7_wm2Bh-D2aE6ueqWVWLelzzXz_BCwkdC920ig,8168
10
10
  jleechanorg_pr_automation/tests/conftest.py,sha256=GVE-RLmzVM9yBNVgfZOHu39onmZPMPjN_vrIsmcWlvU,388
11
11
  jleechanorg_pr_automation/tests/test_actionable_counting_matrix.py,sha256=rQxFH6ontV3GpZigFRiHCXRkX0mEnespJv73L1DfPw0,12174
12
- jleechanorg_pr_automation/tests/test_automation_over_running_reproduction.py,sha256=X1sIKr-ZpAp-lks0wVGnNDM4Yas1uCrgSokW6rYGm38,5759
13
- jleechanorg_pr_automation/tests/test_automation_safety_limits.py,sha256=xFQIxLsXlP0VLnZRueFEUYOFhX1d9fxXXJf_1EzBY2Q,13340
14
- jleechanorg_pr_automation/tests/test_automation_safety_manager_comprehensive.py,sha256=CLyWuhl3TQrMKDoLxUikpuMtSotm6FZZZXT_WPBCI_w,22740
12
+ jleechanorg_pr_automation/tests/test_automation_over_running_reproduction.py,sha256=UP8DqJ5bn525Cx3TdpIApb7ts9Fi0GSJfubI3xyB21U,6230
13
+ jleechanorg_pr_automation/tests/test_automation_safety_limits.py,sha256=IUOs0VgX9EhGKOJ1Tz2tVQjwGn8jp_IKtLaILV6NRiQ,18678
14
+ jleechanorg_pr_automation/tests/test_automation_safety_manager_comprehensive.py,sha256=6mUp4uDesecqYr9sjU4m2oZXTUUzxo3LSFNbXrOxNuY,24619
15
15
  jleechanorg_pr_automation/tests/test_codex_actor_matching.py,sha256=rY6AGM1DatKrYwGIryM36zYLBBK--ydOwJwkH2T7uRk,5252
16
16
  jleechanorg_pr_automation/tests/test_graphql_error_handling.py,sha256=Fv0BWLSF-eQ4n5gHwf_JSc42Y0RizrjPPlCB4THLzaI,6401
17
- jleechanorg_pr_automation/tests/test_pr_filtering_matrix.py,sha256=IBRn9APpNBSpedsvmKVb26ZRrHHj0EzE4F5AvCH9mP8,21415
18
- jleechanorg_pr_automation/tests/test_pr_targeting.py,sha256=YXyWUGQj2LzUDyZ1NU57SoRXtNcUCuRNMtgCcqrcoGU,3798
19
- jleechanorg_pr_automation-0.1.0.dist-info/METADATA,sha256=IUHkLXylUcpFalTqBYu82Rk35s1XmHYTSB-0_lHWUOQ,6333
20
- jleechanorg_pr_automation-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
21
- jleechanorg_pr_automation-0.1.0.dist-info/entry_points.txt,sha256=QD8UUHJ4H09_beMvHzZ5SOy5cRbthvD11mXLzaWjwrg,178
22
- jleechanorg_pr_automation-0.1.0.dist-info/top_level.txt,sha256=1DJKrq0Be2B5_NL5jTICV1rvnqaMXFmyJpuOTUatcig,26
23
- jleechanorg_pr_automation-0.1.0.dist-info/RECORD,,
17
+ jleechanorg_pr_automation/tests/test_pr_filtering_matrix.py,sha256=PC87Vp4dp_TcHPzTIyJpckFK3Zjc10JbLdBMIhxN4II,21453
18
+ jleechanorg_pr_automation/tests/test_pr_targeting.py,sha256=RN8-3SsiCp7ARvK0x8qKLwrIj3YqjFHSh5QliXH5JoM,4671
19
+ jleechanorg_pr_automation-0.1.1.dist-info/METADATA,sha256=g-_YGo6-iKIc02V9zKrJbU40aM3jN9vrPbBncYERwsY,6528
20
+ jleechanorg_pr_automation-0.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
21
+ jleechanorg_pr_automation-0.1.1.dist-info/entry_points.txt,sha256=QD8UUHJ4H09_beMvHzZ5SOy5cRbthvD11mXLzaWjwrg,178
22
+ jleechanorg_pr_automation-0.1.1.dist-info/top_level.txt,sha256=1DJKrq0Be2B5_NL5jTICV1rvnqaMXFmyJpuOTUatcig,26
23
+ jleechanorg_pr_automation-0.1.1.dist-info/RECORD,,