jleechanorg-pr-automation 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of jleechanorg-pr-automation might be problematic. Click here for more details.
- jleechanorg_pr_automation/__init__.py +32 -0
- jleechanorg_pr_automation/automation_safety_manager.py +700 -0
- jleechanorg_pr_automation/automation_safety_wrapper.py +116 -0
- jleechanorg_pr_automation/automation_utils.py +314 -0
- jleechanorg_pr_automation/check_codex_comment.py +76 -0
- jleechanorg_pr_automation/codex_branch_updater.py +272 -0
- jleechanorg_pr_automation/codex_config.py +57 -0
- jleechanorg_pr_automation/jleechanorg_pr_monitor.py +1202 -0
- jleechanorg_pr_automation/tests/conftest.py +12 -0
- jleechanorg_pr_automation/tests/test_actionable_counting_matrix.py +221 -0
- jleechanorg_pr_automation/tests/test_automation_over_running_reproduction.py +147 -0
- jleechanorg_pr_automation/tests/test_automation_safety_limits.py +340 -0
- jleechanorg_pr_automation/tests/test_automation_safety_manager_comprehensive.py +615 -0
- jleechanorg_pr_automation/tests/test_codex_actor_matching.py +137 -0
- jleechanorg_pr_automation/tests/test_graphql_error_handling.py +155 -0
- jleechanorg_pr_automation/tests/test_pr_filtering_matrix.py +473 -0
- jleechanorg_pr_automation/tests/test_pr_targeting.py +95 -0
- jleechanorg_pr_automation/utils.py +232 -0
- jleechanorg_pr_automation-0.1.0.dist-info/METADATA +217 -0
- jleechanorg_pr_automation-0.1.0.dist-info/RECORD +23 -0
- jleechanorg_pr_automation-0.1.0.dist-info/WHEEL +5 -0
- jleechanorg_pr_automation-0.1.0.dist-info/entry_points.txt +3 -0
- jleechanorg_pr_automation-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,700 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Automation Safety Manager - GREEN Phase Implementation
|
|
4
|
+
|
|
5
|
+
Minimal implementation to pass the RED phase tests with:
|
|
6
|
+
- PR attempt limits (max 5 per PR)
|
|
7
|
+
- Global run limits (max 50 total)
|
|
8
|
+
- Manual approval system
|
|
9
|
+
- Thread-safe operations
|
|
10
|
+
- Email notifications
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import argparse
|
|
14
|
+
import fcntl
|
|
15
|
+
import importlib.util
|
|
16
|
+
import json
|
|
17
|
+
import logging
|
|
18
|
+
import os
|
|
19
|
+
import smtplib
|
|
20
|
+
import sys
|
|
21
|
+
import tempfile
|
|
22
|
+
import threading
|
|
23
|
+
from datetime import datetime, timedelta
|
|
24
|
+
from email.mime.text import MIMEText
|
|
25
|
+
from email.mime.multipart import MIMEMultipart
|
|
26
|
+
from typing import Dict, Optional, Union
|
|
27
|
+
|
|
28
|
+
# Optional keyring import for email functionality
|
|
29
|
+
_keyring_spec = importlib.util.find_spec("keyring")
|
|
30
|
+
if _keyring_spec:
|
|
31
|
+
import keyring # type: ignore
|
|
32
|
+
HAS_KEYRING = True
|
|
33
|
+
else:
|
|
34
|
+
keyring = None # type: ignore
|
|
35
|
+
HAS_KEYRING = False
|
|
36
|
+
|
|
37
|
+
# Import shared utilities
|
|
38
|
+
from .utils import (
|
|
39
|
+
json_manager,
|
|
40
|
+
setup_logging,
|
|
41
|
+
get_email_config,
|
|
42
|
+
validate_email_config,
|
|
43
|
+
get_automation_limits,
|
|
44
|
+
format_timestamp,
|
|
45
|
+
parse_timestamp,
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class AutomationSafetyManager:
|
|
50
|
+
"""Thread-safe automation safety manager with configurable limits"""
|
|
51
|
+
|
|
52
|
+
def __init__(self, data_dir: str):
|
|
53
|
+
self.data_dir = data_dir
|
|
54
|
+
self.lock = threading.RLock() # Use RLock to prevent deadlock
|
|
55
|
+
self.logger = setup_logging(__name__)
|
|
56
|
+
|
|
57
|
+
# Get limits from shared utility
|
|
58
|
+
limits = get_automation_limits()
|
|
59
|
+
self.pr_limit = limits['pr_limit']
|
|
60
|
+
self.global_limit = limits['global_limit']
|
|
61
|
+
|
|
62
|
+
# File paths
|
|
63
|
+
self.pr_attempts_file = os.path.join(data_dir, "pr_attempts.json")
|
|
64
|
+
self.global_runs_file = os.path.join(data_dir, "global_runs.json")
|
|
65
|
+
self.approval_file = os.path.join(data_dir, "manual_approval.json")
|
|
66
|
+
self.config_file = os.path.join(data_dir, "automation_safety_config.json")
|
|
67
|
+
self.inflight_file = os.path.join(data_dir, "pr_inflight.json") # NEW: Persist inflight cache
|
|
68
|
+
|
|
69
|
+
# In-memory counters for thread safety
|
|
70
|
+
self._pr_attempts_cache = {}
|
|
71
|
+
self._global_runs_cache = 0
|
|
72
|
+
self._pr_inflight_cache: Dict[str, int] = {}
|
|
73
|
+
|
|
74
|
+
# Initialize files if they don't exist
|
|
75
|
+
self._ensure_files_exist()
|
|
76
|
+
|
|
77
|
+
# Load configuration from file if it exists
|
|
78
|
+
self._load_config_if_exists()
|
|
79
|
+
|
|
80
|
+
# Load initial state from files
|
|
81
|
+
self._load_state_from_files()
|
|
82
|
+
|
|
83
|
+
def _ensure_files_exist(self):
|
|
84
|
+
"""Initialize tracking files if they don't exist"""
|
|
85
|
+
os.makedirs(self.data_dir, exist_ok=True)
|
|
86
|
+
|
|
87
|
+
if not os.path.exists(self.pr_attempts_file):
|
|
88
|
+
self._write_json_file(self.pr_attempts_file, {})
|
|
89
|
+
|
|
90
|
+
if not os.path.exists(self.global_runs_file):
|
|
91
|
+
self._write_json_file(self.global_runs_file, {
|
|
92
|
+
"total_runs": 0,
|
|
93
|
+
"start_date": datetime.now().isoformat()
|
|
94
|
+
})
|
|
95
|
+
|
|
96
|
+
if not os.path.exists(self.approval_file):
|
|
97
|
+
self._write_json_file(self.approval_file, {
|
|
98
|
+
"approved": False,
|
|
99
|
+
"approval_date": None
|
|
100
|
+
})
|
|
101
|
+
|
|
102
|
+
if not os.path.exists(self.inflight_file):
|
|
103
|
+
self._write_json_file(self.inflight_file, {})
|
|
104
|
+
|
|
105
|
+
def _load_config_if_exists(self):
|
|
106
|
+
"""Load configuration from file if it exists, create default if not"""
|
|
107
|
+
if os.path.exists(self.config_file):
|
|
108
|
+
# Load existing config
|
|
109
|
+
try:
|
|
110
|
+
with open(self.config_file, 'r') as f:
|
|
111
|
+
config = json.load(f)
|
|
112
|
+
# Update limits from config
|
|
113
|
+
if 'pr_limit' in config:
|
|
114
|
+
self.pr_limit = config['pr_limit']
|
|
115
|
+
if 'global_limit' in config:
|
|
116
|
+
self.global_limit = config['global_limit']
|
|
117
|
+
except (FileNotFoundError, json.JSONDecodeError):
|
|
118
|
+
pass # Use defaults
|
|
119
|
+
else:
|
|
120
|
+
# Create default config
|
|
121
|
+
default_config = {
|
|
122
|
+
"global_limit": self.global_limit,
|
|
123
|
+
"pr_limit": self.pr_limit,
|
|
124
|
+
"daily_limit": 100
|
|
125
|
+
}
|
|
126
|
+
self._write_json_file(self.config_file, default_config)
|
|
127
|
+
|
|
128
|
+
def _load_state_from_files(self):
|
|
129
|
+
"""Load state from files into memory cache"""
|
|
130
|
+
with self.lock:
|
|
131
|
+
pr_data = self._read_json_file(self.pr_attempts_file)
|
|
132
|
+
self._pr_attempts_cache = self._normalize_pr_attempt_keys(pr_data)
|
|
133
|
+
|
|
134
|
+
# Load global runs
|
|
135
|
+
global_data = self._read_json_file(self.global_runs_file)
|
|
136
|
+
self._global_runs_cache = global_data.get("total_runs", 0)
|
|
137
|
+
|
|
138
|
+
# Load inflight cache
|
|
139
|
+
inflight_data = self._read_json_file(self.inflight_file)
|
|
140
|
+
self._pr_inflight_cache = {k: int(v) for k, v in inflight_data.items()}
|
|
141
|
+
|
|
142
|
+
def _sync_state_to_files(self):
|
|
143
|
+
"""Sync in-memory state to files"""
|
|
144
|
+
with self.lock:
|
|
145
|
+
# Sync PR attempts - keys already normalized
|
|
146
|
+
self._write_json_file(self.pr_attempts_file, self._pr_attempts_cache)
|
|
147
|
+
|
|
148
|
+
# Sync global runs
|
|
149
|
+
global_data = self._read_json_file(self.global_runs_file)
|
|
150
|
+
global_data["total_runs"] = self._global_runs_cache
|
|
151
|
+
self._write_json_file(self.global_runs_file, global_data)
|
|
152
|
+
|
|
153
|
+
# Sync inflight cache to prevent concurrent processing
|
|
154
|
+
self._write_json_file(self.inflight_file, self._pr_inflight_cache)
|
|
155
|
+
|
|
156
|
+
def _make_pr_key(
|
|
157
|
+
self,
|
|
158
|
+
pr_number: Union[int, str],
|
|
159
|
+
repo: Optional[str] = None,
|
|
160
|
+
branch: Optional[str] = None,
|
|
161
|
+
) -> str:
|
|
162
|
+
"""Create a labeled key for PR attempt tracking."""
|
|
163
|
+
|
|
164
|
+
repo_part = f"r={repo or ''}"
|
|
165
|
+
pr_part = f"p={str(pr_number)}"
|
|
166
|
+
branch_part = f"b={branch or ''}"
|
|
167
|
+
return "||".join((repo_part, pr_part, branch_part))
|
|
168
|
+
|
|
169
|
+
def _normalize_pr_attempt_keys(self, raw_data: Dict) -> Dict[str, list]:
|
|
170
|
+
"""Normalize legacy PR attempt keys to the labeled format."""
|
|
171
|
+
|
|
172
|
+
normalized: Dict[str, list] = {}
|
|
173
|
+
|
|
174
|
+
for key, value in (raw_data or {}).items():
|
|
175
|
+
if not isinstance(value, list):
|
|
176
|
+
# Older versions stored counts; coerce to list of failures
|
|
177
|
+
try:
|
|
178
|
+
count = int(value)
|
|
179
|
+
value = [{"result": "failure"}] * count
|
|
180
|
+
except (TypeError, ValueError):
|
|
181
|
+
value = []
|
|
182
|
+
|
|
183
|
+
if isinstance(key, str) and "||p=" in key:
|
|
184
|
+
normalized[key] = value
|
|
185
|
+
continue
|
|
186
|
+
|
|
187
|
+
repo = None
|
|
188
|
+
branch = None
|
|
189
|
+
pr_number: Union[str, int] = ""
|
|
190
|
+
|
|
191
|
+
if isinstance(key, str):
|
|
192
|
+
parts = key.split("::")
|
|
193
|
+
if len(parts) == 1:
|
|
194
|
+
pr_number = parts[0]
|
|
195
|
+
elif len(parts) == 2:
|
|
196
|
+
repo, pr_number = parts
|
|
197
|
+
elif len(parts) >= 3:
|
|
198
|
+
repo, pr_number, branch = parts[0], parts[1], parts[2]
|
|
199
|
+
else:
|
|
200
|
+
pr_number = key
|
|
201
|
+
else:
|
|
202
|
+
pr_number = key
|
|
203
|
+
|
|
204
|
+
normalized_key = self._make_pr_key(pr_number, repo, branch)
|
|
205
|
+
normalized[normalized_key] = value
|
|
206
|
+
|
|
207
|
+
return normalized
|
|
208
|
+
|
|
209
|
+
def _read_json_file(self, file_path: str) -> dict:
|
|
210
|
+
"""Safely read JSON file using shared utility"""
|
|
211
|
+
return json_manager.read_json(file_path, {})
|
|
212
|
+
|
|
213
|
+
def _write_json_file(self, file_path: str, data: dict):
|
|
214
|
+
"""Atomically write JSON file using shared utility"""
|
|
215
|
+
try:
|
|
216
|
+
if not json_manager.write_json(file_path, data):
|
|
217
|
+
self.logger.error(f"Failed to write safety data file {file_path}")
|
|
218
|
+
except Exception as e:
|
|
219
|
+
self.logger.error(f"Exception writing safety data file {file_path}: {e}")
|
|
220
|
+
|
|
221
|
+
def can_process_pr(self, pr_number: Union[int, str], repo: str = None, branch: str = None) -> bool:
|
|
222
|
+
"""Check if PR can be processed (under attempt limit)"""
|
|
223
|
+
with self.lock:
|
|
224
|
+
raw_data = self._read_json_file(self.pr_attempts_file)
|
|
225
|
+
self._pr_attempts_cache = self._normalize_pr_attempt_keys(raw_data)
|
|
226
|
+
|
|
227
|
+
pr_key = self._make_pr_key(pr_number, repo, branch)
|
|
228
|
+
attempts = list(self._pr_attempts_cache.get(pr_key, []))
|
|
229
|
+
|
|
230
|
+
# Check total attempts limit first
|
|
231
|
+
if len(attempts) >= self.pr_limit:
|
|
232
|
+
return False
|
|
233
|
+
|
|
234
|
+
# Count consecutive failures from latest attempts
|
|
235
|
+
consecutive_failures = 0
|
|
236
|
+
for attempt in reversed(attempts):
|
|
237
|
+
if attempt.get("result") == "failure":
|
|
238
|
+
consecutive_failures += 1
|
|
239
|
+
else:
|
|
240
|
+
break
|
|
241
|
+
|
|
242
|
+
# Also block if too many consecutive failures (earlier than total limit)
|
|
243
|
+
return consecutive_failures < self.pr_limit
|
|
244
|
+
|
|
245
|
+
def try_process_pr(self, pr_number: Union[int, str], repo: str = None, branch: str = None) -> bool:
|
|
246
|
+
"""Atomically reserve a processing slot for PR."""
|
|
247
|
+
with self.lock:
|
|
248
|
+
# Check consecutive failure limit first
|
|
249
|
+
if not self.can_process_pr(pr_number, repo, branch):
|
|
250
|
+
return False
|
|
251
|
+
|
|
252
|
+
pr_key = self._make_pr_key(pr_number, repo, branch)
|
|
253
|
+
inflight = self._pr_inflight_cache.get(pr_key, 0)
|
|
254
|
+
|
|
255
|
+
# Check if we're at the concurrent processing limit for this PR
|
|
256
|
+
if inflight >= self.pr_limit:
|
|
257
|
+
return False
|
|
258
|
+
|
|
259
|
+
# Reserve a processing slot
|
|
260
|
+
self._pr_inflight_cache[pr_key] = inflight + 1
|
|
261
|
+
|
|
262
|
+
# Persist immediately to prevent race conditions with concurrent cron jobs
|
|
263
|
+
self._write_json_file(self.inflight_file, self._pr_inflight_cache)
|
|
264
|
+
|
|
265
|
+
return True
|
|
266
|
+
|
|
267
|
+
def release_pr_slot(self, pr_number: Union[int, str], repo: str = None, branch: str = None):
|
|
268
|
+
"""Release a processing slot for PR (call in finally block)"""
|
|
269
|
+
with self.lock:
|
|
270
|
+
pr_key = self._make_pr_key(pr_number, repo, branch)
|
|
271
|
+
inflight = self._pr_inflight_cache.get(pr_key, 0)
|
|
272
|
+
if inflight > 0:
|
|
273
|
+
self._pr_inflight_cache[pr_key] = inflight - 1
|
|
274
|
+
# Persist immediately to prevent race conditions
|
|
275
|
+
self._write_json_file(self.inflight_file, self._pr_inflight_cache)
|
|
276
|
+
|
|
277
|
+
def get_pr_attempts(self, pr_number: Union[int, str], repo: str = None, branch: str = None):
|
|
278
|
+
"""Get count of consecutive failures for a specific PR."""
|
|
279
|
+
with self.lock:
|
|
280
|
+
pr_key = self._make_pr_key(pr_number, repo, branch)
|
|
281
|
+
attempts = list(self._pr_attempts_cache.get(pr_key, []))
|
|
282
|
+
|
|
283
|
+
failure_count = 0
|
|
284
|
+
for attempt in reversed(attempts):
|
|
285
|
+
if attempt.get("result") == "failure":
|
|
286
|
+
failure_count += 1
|
|
287
|
+
else:
|
|
288
|
+
break
|
|
289
|
+
return failure_count
|
|
290
|
+
|
|
291
|
+
def get_pr_attempt_list(self, pr_number: Union[int, str], repo: str = None, branch: str = None):
|
|
292
|
+
"""Get list of attempts for a specific PR (for detailed analysis)"""
|
|
293
|
+
with self.lock:
|
|
294
|
+
# Reload from disk to ensure consistency across multiple managers
|
|
295
|
+
raw_data = self._read_json_file(self.pr_attempts_file)
|
|
296
|
+
self._pr_attempts_cache = self._normalize_pr_attempt_keys(raw_data)
|
|
297
|
+
pr_key = self._make_pr_key(pr_number, repo, branch)
|
|
298
|
+
return self._pr_attempts_cache.get(pr_key, [])
|
|
299
|
+
|
|
300
|
+
def record_pr_attempt(self, pr_number: Union[int, str], result: str, repo: str = None, branch: str = None):
|
|
301
|
+
"""Record a PR attempt (success or failure)"""
|
|
302
|
+
with self.lock:
|
|
303
|
+
pr_key = self._make_pr_key(pr_number, repo, branch)
|
|
304
|
+
|
|
305
|
+
# Create attempt record
|
|
306
|
+
attempt_record = {
|
|
307
|
+
"result": result,
|
|
308
|
+
"timestamp": datetime.now().isoformat(),
|
|
309
|
+
"pr_number": pr_number,
|
|
310
|
+
"repo": repo,
|
|
311
|
+
"branch": branch
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
# Get existing attempts list and append new attempt
|
|
315
|
+
attempts = self._pr_attempts_cache.get(pr_key, [])
|
|
316
|
+
attempts.append(attempt_record)
|
|
317
|
+
self._pr_attempts_cache[pr_key] = attempts
|
|
318
|
+
|
|
319
|
+
# Update inflight cache
|
|
320
|
+
inflight = self._pr_inflight_cache.get(pr_key, 0)
|
|
321
|
+
if inflight > 0:
|
|
322
|
+
if inflight == 1:
|
|
323
|
+
self._pr_inflight_cache.pop(pr_key, None)
|
|
324
|
+
else:
|
|
325
|
+
self._pr_inflight_cache[pr_key] = inflight - 1
|
|
326
|
+
|
|
327
|
+
# Sync to file for persistence
|
|
328
|
+
self._sync_state_to_files()
|
|
329
|
+
|
|
330
|
+
def can_start_global_run(self) -> bool:
|
|
331
|
+
"""Check if a global run can be started"""
|
|
332
|
+
with self.lock:
|
|
333
|
+
# Use cache for testing, file for production
|
|
334
|
+
runs = self._global_runs_cache if hasattr(self, '_global_runs_cache') else self.get_global_runs()
|
|
335
|
+
|
|
336
|
+
if runs < self.global_limit:
|
|
337
|
+
return True
|
|
338
|
+
|
|
339
|
+
# Manual override allows limited additional runs (max 2x limit)
|
|
340
|
+
# Never allow unlimited runs even with override
|
|
341
|
+
if self.has_manual_approval() and runs < (self.global_limit * 2):
|
|
342
|
+
return True
|
|
343
|
+
|
|
344
|
+
# Hard stop at 2x limit regardless of approval status
|
|
345
|
+
return False
|
|
346
|
+
|
|
347
|
+
def get_global_runs(self) -> int:
|
|
348
|
+
"""Get total number of global runs"""
|
|
349
|
+
with self.lock:
|
|
350
|
+
# Reload from disk to ensure consistency across multiple managers
|
|
351
|
+
data = self._read_json_file(self.global_runs_file)
|
|
352
|
+
self._global_runs_cache = data.get("total_runs", 0)
|
|
353
|
+
return self._global_runs_cache
|
|
354
|
+
|
|
355
|
+
def record_global_run(self):
|
|
356
|
+
"""Record a global automation run atomically"""
|
|
357
|
+
with self.lock:
|
|
358
|
+
try:
|
|
359
|
+
# Update in-memory cache
|
|
360
|
+
self._global_runs_cache += 1
|
|
361
|
+
|
|
362
|
+
# Sync to file for persistence (atomic operation)
|
|
363
|
+
data = self._read_json_file(self.global_runs_file)
|
|
364
|
+
data["total_runs"] = self._global_runs_cache
|
|
365
|
+
data["last_run"] = datetime.now().isoformat()
|
|
366
|
+
self._write_json_file(self.global_runs_file, data)
|
|
367
|
+
except Exception:
|
|
368
|
+
# Rollback cache if file write failed
|
|
369
|
+
self._global_runs_cache -= 1
|
|
370
|
+
raise
|
|
371
|
+
|
|
372
|
+
def requires_manual_approval(self) -> bool:
|
|
373
|
+
"""Check if manual approval is required"""
|
|
374
|
+
return self.get_global_runs() >= self.global_limit
|
|
375
|
+
|
|
376
|
+
def has_manual_approval(self) -> bool:
|
|
377
|
+
"""Check if valid manual approval exists"""
|
|
378
|
+
with self.lock:
|
|
379
|
+
data = self._read_json_file(self.approval_file)
|
|
380
|
+
|
|
381
|
+
if not data.get("approved", False):
|
|
382
|
+
return False
|
|
383
|
+
|
|
384
|
+
# Check if approval has expired (configurable hours)
|
|
385
|
+
approval_date_str = data.get("approval_date")
|
|
386
|
+
if not approval_date_str:
|
|
387
|
+
return False
|
|
388
|
+
|
|
389
|
+
try:
|
|
390
|
+
approval_date = datetime.fromisoformat(approval_date_str)
|
|
391
|
+
except (TypeError, ValueError):
|
|
392
|
+
return False
|
|
393
|
+
approval_hours = get_automation_limits()['approval_hours']
|
|
394
|
+
expiry = approval_date + timedelta(hours=approval_hours)
|
|
395
|
+
|
|
396
|
+
return datetime.now() < expiry
|
|
397
|
+
|
|
398
|
+
def check_and_notify_limits(self):
|
|
399
|
+
"""Check limits and send email notifications if thresholds are reached"""
|
|
400
|
+
notifications_sent = []
|
|
401
|
+
|
|
402
|
+
with self.lock:
|
|
403
|
+
# Check for PR limits reached
|
|
404
|
+
for pr_key, attempts in self._pr_attempts_cache.items():
|
|
405
|
+
if len(attempts) >= self.pr_limit:
|
|
406
|
+
self._send_limit_notification(
|
|
407
|
+
f"PR Automation Limit Reached",
|
|
408
|
+
f"PR {pr_key} has reached the maximum attempt limit of {self.pr_limit}."
|
|
409
|
+
)
|
|
410
|
+
notifications_sent.append(f"PR {pr_key}")
|
|
411
|
+
|
|
412
|
+
# Check for global limit reached
|
|
413
|
+
if self._global_runs_cache >= self.global_limit:
|
|
414
|
+
self._send_limit_notification(
|
|
415
|
+
f"Global Automation Limit Reached",
|
|
416
|
+
f"Global automation runs have reached the maximum limit of {self.global_limit}."
|
|
417
|
+
)
|
|
418
|
+
notifications_sent.append("Global limit")
|
|
419
|
+
|
|
420
|
+
return notifications_sent
|
|
421
|
+
|
|
422
|
+
def _send_limit_notification(self, subject: str, message: str):
|
|
423
|
+
"""Send email notification for limit reached"""
|
|
424
|
+
try:
|
|
425
|
+
# Try to use the more complete email notification method
|
|
426
|
+
self._send_notification(subject, message)
|
|
427
|
+
except Exception as e:
|
|
428
|
+
# If email fails, just log it - don't break automation
|
|
429
|
+
self.logger.error("Failed to send email notification: %s", e)
|
|
430
|
+
self.logger.debug("Notification subject: %s", subject)
|
|
431
|
+
self.logger.debug("Notification body: %s", message)
|
|
432
|
+
|
|
433
|
+
def grant_manual_approval(self, approver_email: str, approval_time: Optional[datetime] = None):
|
|
434
|
+
"""Grant manual approval for continued automation"""
|
|
435
|
+
with self.lock:
|
|
436
|
+
approval_time = approval_time or datetime.now()
|
|
437
|
+
|
|
438
|
+
data = {
|
|
439
|
+
"approved": True,
|
|
440
|
+
"approval_date": approval_time.isoformat(),
|
|
441
|
+
"approver": approver_email
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
self._write_json_file(self.approval_file, data)
|
|
445
|
+
|
|
446
|
+
def _get_smtp_credentials(self):
|
|
447
|
+
"""Get SMTP credentials securely from keyring or environment fallback"""
|
|
448
|
+
username = None
|
|
449
|
+
password = None
|
|
450
|
+
|
|
451
|
+
if HAS_KEYRING:
|
|
452
|
+
try:
|
|
453
|
+
username = keyring.get_password("worldarchitect-automation", "smtp_username")
|
|
454
|
+
password = keyring.get_password("worldarchitect-automation", "smtp_password")
|
|
455
|
+
except Exception:
|
|
456
|
+
self.logger.debug("Keyring lookup failed for SMTP credentials", exc_info=True)
|
|
457
|
+
username = None
|
|
458
|
+
password = None
|
|
459
|
+
|
|
460
|
+
if username is None:
|
|
461
|
+
username = os.environ.get('SMTP_USERNAME') or os.environ.get('EMAIL_USER')
|
|
462
|
+
if password is None:
|
|
463
|
+
password = os.environ.get('SMTP_PASSWORD') or os.environ.get('EMAIL_PASS')
|
|
464
|
+
|
|
465
|
+
return username, password
|
|
466
|
+
|
|
467
|
+
def _send_notification(self, subject: str, message: str) -> bool:
|
|
468
|
+
"""Send email notification with secure credential handling"""
|
|
469
|
+
try:
|
|
470
|
+
# Load email configuration
|
|
471
|
+
smtp_server = os.environ.get('SMTP_SERVER', 'smtp.gmail.com')
|
|
472
|
+
smtp_port = int(os.environ.get('SMTP_PORT', '587'))
|
|
473
|
+
username, password = self._get_smtp_credentials()
|
|
474
|
+
to_email = os.environ.get('EMAIL_TO')
|
|
475
|
+
from_email = os.environ.get('EMAIL_FROM') or username
|
|
476
|
+
|
|
477
|
+
if not (username and password and to_email and from_email):
|
|
478
|
+
self.logger.info("Email configuration incomplete - skipping notification")
|
|
479
|
+
return False
|
|
480
|
+
|
|
481
|
+
msg = MIMEMultipart()
|
|
482
|
+
msg['From'] = from_email
|
|
483
|
+
msg['To'] = to_email
|
|
484
|
+
msg['Subject'] = f"[WorldArchitect Automation] {subject}"
|
|
485
|
+
|
|
486
|
+
body = f"""
|
|
487
|
+
{message}
|
|
488
|
+
|
|
489
|
+
Time: {datetime.now().isoformat()}
|
|
490
|
+
System: PR Automation Safety Manager
|
|
491
|
+
|
|
492
|
+
This is an automated notification from the WorldArchitect.AI automation system.
|
|
493
|
+
"""
|
|
494
|
+
|
|
495
|
+
msg.attach(MIMEText(body, 'plain'))
|
|
496
|
+
|
|
497
|
+
# Connect and send email
|
|
498
|
+
server = smtplib.SMTP(smtp_server, smtp_port)
|
|
499
|
+
try:
|
|
500
|
+
server.ehlo()
|
|
501
|
+
server.starttls()
|
|
502
|
+
server.ehlo()
|
|
503
|
+
if username and password:
|
|
504
|
+
server.login(username, password)
|
|
505
|
+
server.send_message(msg)
|
|
506
|
+
finally:
|
|
507
|
+
server.quit()
|
|
508
|
+
self.logger.info("Email notification sent successfully: %s", subject)
|
|
509
|
+
return True
|
|
510
|
+
|
|
511
|
+
except smtplib.SMTPAuthenticationError as e:
|
|
512
|
+
self.logger.error(f"SMTP authentication failed - check credentials: {e}")
|
|
513
|
+
return False
|
|
514
|
+
except smtplib.SMTPRecipientsRefused as e:
|
|
515
|
+
self.logger.error(f"Email recipients refused: {e}")
|
|
516
|
+
return False
|
|
517
|
+
except smtplib.SMTPException as e:
|
|
518
|
+
self.logger.error(f"SMTP error sending notification: {e}")
|
|
519
|
+
return False
|
|
520
|
+
except OSError as e:
|
|
521
|
+
self.logger.error(f"Network error sending notification: {e}")
|
|
522
|
+
return False
|
|
523
|
+
except Exception as e:
|
|
524
|
+
# Log error but don't fail automation
|
|
525
|
+
self.logger.error(f"Unexpected error sending notification: {e}")
|
|
526
|
+
return False
|
|
527
|
+
|
|
528
|
+
def _clear_global_runs(self):
|
|
529
|
+
"""Clear global runs counter (for testing)"""
|
|
530
|
+
with self.lock:
|
|
531
|
+
self._global_runs_cache = 0
|
|
532
|
+
data = self._read_json_file(self.global_runs_file)
|
|
533
|
+
data["total_runs"] = 0
|
|
534
|
+
data["last_run"] = None
|
|
535
|
+
self._write_json_file(self.global_runs_file, data)
|
|
536
|
+
|
|
537
|
+
def _clear_pr_attempts(self):
|
|
538
|
+
"""Clear PR attempts cache (for testing)"""
|
|
539
|
+
with self.lock:
|
|
540
|
+
self._pr_attempts_cache.clear()
|
|
541
|
+
self._write_json_file(self.pr_attempts_file, {})
|
|
542
|
+
|
|
543
|
+
def load_config(self, config_file: str) -> dict:
|
|
544
|
+
"""Load configuration from file"""
|
|
545
|
+
try:
|
|
546
|
+
with open(config_file, 'r') as f:
|
|
547
|
+
config = json.load(f)
|
|
548
|
+
# Update limits from config
|
|
549
|
+
if 'pr_limit' in config:
|
|
550
|
+
self.pr_limit = config['pr_limit']
|
|
551
|
+
if 'global_limit' in config:
|
|
552
|
+
self.global_limit = config['global_limit']
|
|
553
|
+
return config
|
|
554
|
+
except (FileNotFoundError, json.JSONDecodeError):
|
|
555
|
+
return {}
|
|
556
|
+
|
|
557
|
+
def save_config(self, config_file: str, config: dict):
|
|
558
|
+
"""Save configuration to file"""
|
|
559
|
+
self._write_json_file(config_file, config)
|
|
560
|
+
|
|
561
|
+
def has_email_config(self) -> bool:
|
|
562
|
+
"""Check if email configuration is available"""
|
|
563
|
+
try:
|
|
564
|
+
smtp_server = os.environ.get('SMTP_SERVER')
|
|
565
|
+
username, password = self._get_smtp_credentials()
|
|
566
|
+
return bool(smtp_server and username and password)
|
|
567
|
+
except Exception:
|
|
568
|
+
return False
|
|
569
|
+
|
|
570
|
+
def send_notification(self, subject: str, message: str) -> bool:
|
|
571
|
+
"""Send email notification - wrapper for _send_notification"""
|
|
572
|
+
try:
|
|
573
|
+
return self._send_notification(subject, message)
|
|
574
|
+
except Exception:
|
|
575
|
+
return False
|
|
576
|
+
|
|
577
|
+
def _is_email_configured(self) -> bool:
|
|
578
|
+
"""Check if email configuration is complete"""
|
|
579
|
+
try:
|
|
580
|
+
smtp_server = os.environ.get('SMTP_SERVER')
|
|
581
|
+
smtp_port = os.environ.get('SMTP_PORT')
|
|
582
|
+
email_to = os.environ.get('EMAIL_TO')
|
|
583
|
+
username, password = self._get_smtp_credentials()
|
|
584
|
+
return bool(smtp_server and smtp_port and email_to and username and password)
|
|
585
|
+
except Exception:
|
|
586
|
+
return False
|
|
587
|
+
|
|
588
|
+
|
|
589
|
+
def main():
|
|
590
|
+
"""CLI interface for safety manager"""
|
|
591
|
+
|
|
592
|
+
parser = argparse.ArgumentParser(description='Automation Safety Manager')
|
|
593
|
+
parser.add_argument('--data-dir', default='/tmp/automation_safety',
|
|
594
|
+
help='Directory for safety data files')
|
|
595
|
+
parser.add_argument('--check-pr', type=int, metavar='PR_NUMBER',
|
|
596
|
+
help='Check if PR can be processed')
|
|
597
|
+
parser.add_argument('--record-pr', nargs=2, metavar=('PR_NUMBER', 'RESULT'),
|
|
598
|
+
help='Record PR attempt (result: success|failure)')
|
|
599
|
+
parser.add_argument('--repo', type=str,
|
|
600
|
+
help='Repository name (owner/repo) for PR attempt operations')
|
|
601
|
+
parser.add_argument('--branch', type=str,
|
|
602
|
+
help='Branch name for PR attempt tracking')
|
|
603
|
+
parser.add_argument('--check-global', action='store_true',
|
|
604
|
+
help='Check if global run can start')
|
|
605
|
+
parser.add_argument('--record-global', action='store_true',
|
|
606
|
+
help='Record global run')
|
|
607
|
+
parser.add_argument('--manual_override', type=str, metavar='EMAIL',
|
|
608
|
+
help='Grant manual override (emergency use only)')
|
|
609
|
+
parser.add_argument('--status', action='store_true',
|
|
610
|
+
help='Show current status')
|
|
611
|
+
|
|
612
|
+
args = parser.parse_args()
|
|
613
|
+
|
|
614
|
+
# Ensure data directory exists
|
|
615
|
+
os.makedirs(args.data_dir, exist_ok=True)
|
|
616
|
+
|
|
617
|
+
manager = AutomationSafetyManager(args.data_dir)
|
|
618
|
+
|
|
619
|
+
if args.check_pr:
|
|
620
|
+
can_process = manager.can_process_pr(args.check_pr, repo=args.repo, branch=args.branch)
|
|
621
|
+
attempts = manager.get_pr_attempts(args.check_pr, repo=args.repo, branch=args.branch)
|
|
622
|
+
repo_label = f" ({args.repo})" if args.repo else ""
|
|
623
|
+
branch_label = f" [{args.branch}]" if args.branch else ""
|
|
624
|
+
print(
|
|
625
|
+
f"PR #{args.check_pr}{repo_label}{branch_label}: "
|
|
626
|
+
f"{'ALLOWED' if can_process else 'BLOCKED'} ({attempts}/{manager.pr_limit} attempts)"
|
|
627
|
+
)
|
|
628
|
+
sys.exit(0 if can_process else 1)
|
|
629
|
+
|
|
630
|
+
elif args.record_pr:
|
|
631
|
+
pr_number, result = args.record_pr
|
|
632
|
+
manager.record_pr_attempt(int(pr_number), result, repo=args.repo, branch=args.branch)
|
|
633
|
+
print(
|
|
634
|
+
f"Recorded {result} for PR #{pr_number}"
|
|
635
|
+
f"{' in ' + args.repo if args.repo else ''}"
|
|
636
|
+
f"{' [' + args.branch + ']' if args.branch else ''}"
|
|
637
|
+
)
|
|
638
|
+
|
|
639
|
+
elif args.check_global:
|
|
640
|
+
can_start = manager.can_start_global_run()
|
|
641
|
+
runs = manager.get_global_runs()
|
|
642
|
+
print(f"Global runs: {'ALLOWED' if can_start else 'BLOCKED'} ({runs}/{manager.global_limit} runs)")
|
|
643
|
+
sys.exit(0 if can_start else 1)
|
|
644
|
+
|
|
645
|
+
elif args.record_global:
|
|
646
|
+
manager.record_global_run()
|
|
647
|
+
runs = manager.get_global_runs()
|
|
648
|
+
print(f"Recorded global run #{runs}")
|
|
649
|
+
|
|
650
|
+
elif args.manual_override:
|
|
651
|
+
manager.grant_manual_approval(args.manual_override)
|
|
652
|
+
print(f"Manual override granted by {args.manual_override}")
|
|
653
|
+
|
|
654
|
+
elif args.status:
|
|
655
|
+
runs = manager.get_global_runs()
|
|
656
|
+
has_approval = manager.has_manual_approval()
|
|
657
|
+
requires_approval = manager.requires_manual_approval()
|
|
658
|
+
|
|
659
|
+
print(f"Global runs: {runs}/{manager.global_limit}")
|
|
660
|
+
print(f"Requires approval: {requires_approval}")
|
|
661
|
+
print(f"Has approval: {has_approval}")
|
|
662
|
+
|
|
663
|
+
pr_data = manager._read_json_file(manager.pr_attempts_file)
|
|
664
|
+
|
|
665
|
+
if pr_data:
|
|
666
|
+
print("PR attempts:")
|
|
667
|
+
for pr_key, attempts in pr_data.items():
|
|
668
|
+
count = len(attempts) if isinstance(attempts, list) else int(attempts or 0)
|
|
669
|
+
status = "BLOCKED" if count >= manager.pr_limit else "OK"
|
|
670
|
+
|
|
671
|
+
repo_label = ""
|
|
672
|
+
branch_label = ""
|
|
673
|
+
pr_label = pr_key
|
|
674
|
+
|
|
675
|
+
if "||" in pr_key:
|
|
676
|
+
segments = {}
|
|
677
|
+
for segment in pr_key.split("||"):
|
|
678
|
+
if "=" in segment:
|
|
679
|
+
k, v = segment.split("=", 1)
|
|
680
|
+
segments[k] = v
|
|
681
|
+
repo_label = segments.get("r", "")
|
|
682
|
+
pr_label = segments.get("p", pr_label)
|
|
683
|
+
branch_label = segments.get("b", "")
|
|
684
|
+
|
|
685
|
+
display = f"PR #{pr_label}"
|
|
686
|
+
if repo_label:
|
|
687
|
+
display += f" ({repo_label})"
|
|
688
|
+
if branch_label:
|
|
689
|
+
display += f" [{branch_label}]"
|
|
690
|
+
|
|
691
|
+
print(f" {display}: {count}/{manager.pr_limit} ({status})")
|
|
692
|
+
else:
|
|
693
|
+
print("No PR attempts recorded")
|
|
694
|
+
|
|
695
|
+
else:
|
|
696
|
+
parser.print_help()
|
|
697
|
+
|
|
698
|
+
|
|
699
|
+
if __name__ == '__main__':
|
|
700
|
+
main()
|