ghostfix-ai 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent/__init__.py +1 -0
- agent/daemon.py +438 -0
- agent/daemon_runtime.py +110 -0
- agent/terminal_watcher.py +585 -0
- cli/__init__.py +1 -0
- cli/main.py +1266 -0
- core/__init__.py +1 -0
- core/autofix.py +355 -0
- core/command_rerunner.py +35 -0
- core/confidence.py +23 -0
- core/config.py +78 -0
- core/context.py +75 -0
- core/data_generator.py +272 -0
- core/decision_engine.py +1145 -0
- core/demo_report.py +379 -0
- core/detector.py +83 -0
- core/doctor.py +155 -0
- core/event_classifier.py +236 -0
- core/feedback.py +75 -0
- core/fix_audit.py +76 -0
- core/formatter.py +296 -0
- core/incidents.py +137 -0
- core/language_diagnostics.py +229 -0
- core/local_llm.py +253 -0
- core/log_events.py +220 -0
- core/logger.py +71 -0
- core/memory.py +302 -0
- core/parser.py +319 -0
- core/patch_generator.py +205 -0
- core/patch_validator.py +173 -0
- core/production_signals.py +43 -0
- core/production_validator.py +216 -0
- core/project_context.py +401 -0
- core/release_verifier.py +125 -0
- core/root_cause_analyzer.py +538 -0
- core/rules.py +15 -0
- core/runner.py +215 -0
- core/runtime_detector.py +8 -0
- core/safety_policy.py +77 -0
- core/training_export.py +227 -0
- core/training_memory.py +93 -0
- ghostfix/__init__.py +2 -0
- ghostfix_ai-0.2.0.dist-info/METADATA +542 -0
- ghostfix_ai-0.2.0.dist-info/RECORD +78 -0
- ghostfix_ai-0.2.0.dist-info/WHEEL +5 -0
- ghostfix_ai-0.2.0.dist-info/entry_points.txt +2 -0
- ghostfix_ai-0.2.0.dist-info/licenses/LICENSE +21 -0
- ghostfix_ai-0.2.0.dist-info/top_level.txt +6 -0
- ml/__init__.py +1 -0
- ml/aggressive_prune.py +292 -0
- ml/brain_v3_features.py +346 -0
- ml/brain_v4_inference.py +701 -0
- ml/check_brain_v4_model.py +178 -0
- ml/configs/brain_v4_lora_config.yaml +34 -0
- ml/download_base_model.py +37 -0
- ml/embedding_retriever.py +108 -0
- ml/evaluate_brain_v2_safety.py +304 -0
- ml/evaluate_brain_v31.py +280 -0
- ml/evaluate_brain_v33.py +50 -0
- ml/evaluate_brain_v4.py +337 -0
- ml/evaluate_runtime_brain_v4.py +766 -0
- ml/evaluate_watch_mode.py +296 -0
- ml/feedback_logger.py +115 -0
- ml/ghostfix_brain_predict.py +213 -0
- ml/ghostfix_brain_v2_predict.py +183 -0
- ml/ghostfix_brain_v33_predict.py +162 -0
- ml/model_inference.py +485 -0
- ml/predict_fix.py +235 -0
- ml/prepare_brain_v4_lora_dataset.py +738 -0
- ml/project_audit.py +393 -0
- ml/retriever_router.py +52 -0
- ml/shadow_mode_runner.py +238 -0
- ml/train_brain_v4_lora.py +525 -0
- ml/train_ghostfix_brain.py +299 -0
- ml/validate_brain_v33_production_candidate.py +234 -0
- utils/__init__.py +1 -0
- utils/env.py +16 -0
- utils/logger.py +9 -0
agent/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
agent/daemon.py
ADDED
|
@@ -0,0 +1,438 @@
|
|
|
1
|
+
"""
|
|
2
|
+
GhostFix AI - Background Daemon
|
|
3
|
+
Monitors terminal processes and detects errors automatically
|
|
4
|
+
"""
|
|
5
|
+
import os
|
|
6
|
+
import sys
|
|
7
|
+
import time
|
|
8
|
+
import signal
|
|
9
|
+
import threading
|
|
10
|
+
import subprocess
|
|
11
|
+
import queue
|
|
12
|
+
import re
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Optional, List, Dict, Callable
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from datetime import datetime
|
|
17
|
+
import json
|
|
18
|
+
|
|
19
|
+
# Add parent to path
|
|
20
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
21
|
+
|
|
22
|
+
from core.memory import LocalMemory
|
|
23
|
+
from core.detector import detect_error
|
|
24
|
+
from core.parser import parse_error
|
|
25
|
+
from core.decision_engine import decide_fix
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class DaemonConfig:
|
|
30
|
+
"""Configuration for the daemon"""
|
|
31
|
+
poll_interval: float = 0.5 # seconds
|
|
32
|
+
max_buffer_lines: int = 1000
|
|
33
|
+
error_patterns_path: Optional[Path] = None
|
|
34
|
+
auto_fix: bool = False
|
|
35
|
+
notify: bool = True
|
|
36
|
+
log_file: Optional[Path] = None
|
|
37
|
+
watch_processes: bool = True
|
|
38
|
+
watch_files: bool = False
|
|
39
|
+
watch_directories: List[str] = field(default_factory=list)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class DetectedError:
|
|
44
|
+
"""Detected error information"""
|
|
45
|
+
timestamp: datetime
|
|
46
|
+
error_type: str
|
|
47
|
+
error_message: str
|
|
48
|
+
traceback: str
|
|
49
|
+
file_path: Optional[str]
|
|
50
|
+
line_number: Optional[int]
|
|
51
|
+
process_id: Optional[int]
|
|
52
|
+
context: str
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ProcessMonitor:
|
|
56
|
+
"""Monitor running processes for errors"""
|
|
57
|
+
|
|
58
|
+
def __init__(self, config: DaemonConfig):
|
|
59
|
+
self.config = config
|
|
60
|
+
self.running = False
|
|
61
|
+
self.processes: Dict[int, subprocess.Popen] = {}
|
|
62
|
+
self.error_queue: queue.Queue = queue.Queue()
|
|
63
|
+
|
|
64
|
+
def start(self):
|
|
65
|
+
"""Start monitoring"""
|
|
66
|
+
self.running = True
|
|
67
|
+
self._monitor_thread = threading.Thread(target=self._monitor_loop, daemon=True)
|
|
68
|
+
self._monitor_thread.start()
|
|
69
|
+
|
|
70
|
+
def stop(self):
|
|
71
|
+
"""Stop monitoring"""
|
|
72
|
+
self.running = False
|
|
73
|
+
if hasattr(self, '_monitor_thread'):
|
|
74
|
+
self._monitor_thread.join(timeout=2)
|
|
75
|
+
|
|
76
|
+
def _monitor_loop(self):
|
|
77
|
+
"""Main monitoring loop"""
|
|
78
|
+
while self.running:
|
|
79
|
+
self._check_processes()
|
|
80
|
+
time.sleep(self.config.poll_interval)
|
|
81
|
+
|
|
82
|
+
def _check_processes(self):
|
|
83
|
+
"""Check all monitored processes for errors"""
|
|
84
|
+
for pid, proc in list(self.processes.items()):
|
|
85
|
+
if proc.poll() is not None:
|
|
86
|
+
# Process ended
|
|
87
|
+
del self.processes[pid]
|
|
88
|
+
continue
|
|
89
|
+
|
|
90
|
+
# Check stdout/stderr
|
|
91
|
+
try:
|
|
92
|
+
# Read available output
|
|
93
|
+
if proc.stderr:
|
|
94
|
+
import select
|
|
95
|
+
if select.select([proc.stderr], [], [], 0)[0]:
|
|
96
|
+
line = proc.stderr.readline()
|
|
97
|
+
if line:
|
|
98
|
+
self._check_output(line, pid)
|
|
99
|
+
except Exception:
|
|
100
|
+
pass
|
|
101
|
+
|
|
102
|
+
def _check_output(self, line: str, pid: int):
|
|
103
|
+
"""Check output line for errors"""
|
|
104
|
+
result = detect_error(line)
|
|
105
|
+
if result and result.get("status") == "error":
|
|
106
|
+
error = parse_error(line)
|
|
107
|
+
if error:
|
|
108
|
+
detected = DetectedError(
|
|
109
|
+
timestamp=datetime.now(),
|
|
110
|
+
error_type=error.get("type", "Unknown"),
|
|
111
|
+
error_message=error.get("message", line),
|
|
112
|
+
traceback=line,
|
|
113
|
+
file_path=error.get("file"),
|
|
114
|
+
line_number=error.get("line"),
|
|
115
|
+
process_id=pid,
|
|
116
|
+
context=line
|
|
117
|
+
)
|
|
118
|
+
self.error_queue.put(detected)
|
|
119
|
+
|
|
120
|
+
def watch_process(self, proc: subprocess.Popen):
|
|
121
|
+
"""Add a process to watch"""
|
|
122
|
+
self.processes[proc.pid] = proc
|
|
123
|
+
|
|
124
|
+
def get_errors(self) -> List[DetectedError]:
|
|
125
|
+
"""Get all queued errors"""
|
|
126
|
+
errors = []
|
|
127
|
+
while not self.error_queue.empty():
|
|
128
|
+
try:
|
|
129
|
+
errors.append(self.error_queue.get_nowait())
|
|
130
|
+
except queue.Empty:
|
|
131
|
+
break
|
|
132
|
+
return errors
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
class FileMonitor:
|
|
136
|
+
"""Monitor files for errors (e.g., log files)"""
|
|
137
|
+
|
|
138
|
+
def __init__(self, config: DaemonConfig):
|
|
139
|
+
self.config = config
|
|
140
|
+
self.running = False
|
|
141
|
+
self.file_positions: Dict[str, int] = {}
|
|
142
|
+
self.error_queue: queue.Queue = queue.Queue()
|
|
143
|
+
|
|
144
|
+
def start(self):
|
|
145
|
+
"""Start monitoring"""
|
|
146
|
+
self.running = True
|
|
147
|
+
self._monitor_thread = threading.Thread(target=self._monitor_loop, daemon=True)
|
|
148
|
+
self._monitor_thread.start()
|
|
149
|
+
|
|
150
|
+
def stop(self):
|
|
151
|
+
"""Stop monitoring"""
|
|
152
|
+
self.running = False
|
|
153
|
+
if hasattr(self, '_monitor_thread'):
|
|
154
|
+
self._monitor_thread.join(timeout=2)
|
|
155
|
+
|
|
156
|
+
def watch_file(self, path: str):
|
|
157
|
+
"""Add a file to watch"""
|
|
158
|
+
if os.path.exists(path):
|
|
159
|
+
self.file_positions[path] = os.path.getsize(path)
|
|
160
|
+
else:
|
|
161
|
+
self.file_positions[path] = 0
|
|
162
|
+
|
|
163
|
+
def _monitor_loop(self):
|
|
164
|
+
"""Main monitoring loop"""
|
|
165
|
+
while self.running:
|
|
166
|
+
self._check_files()
|
|
167
|
+
time.sleep(self.config.poll_interval)
|
|
168
|
+
|
|
169
|
+
def _check_files(self):
|
|
170
|
+
"""Check all monitored files for new errors"""
|
|
171
|
+
for path, position in list(self.file_positions.items()):
|
|
172
|
+
try:
|
|
173
|
+
if not os.path.exists(path):
|
|
174
|
+
continue
|
|
175
|
+
|
|
176
|
+
current_size = os.path.getsize(path)
|
|
177
|
+
if current_size < position:
|
|
178
|
+
# File was truncated, reset position
|
|
179
|
+
position = 0
|
|
180
|
+
|
|
181
|
+
if current_size > position:
|
|
182
|
+
with open(path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
183
|
+
f.seek(position)
|
|
184
|
+
new_lines = f.readlines()
|
|
185
|
+
f.seek(0)
|
|
186
|
+
current_size = f.tell()
|
|
187
|
+
|
|
188
|
+
self.file_positions[path] = current_size
|
|
189
|
+
|
|
190
|
+
for line in new_lines:
|
|
191
|
+
result = detect_error(line)
|
|
192
|
+
if result and result.get("status") == "error":
|
|
193
|
+
error = parse_error(line)
|
|
194
|
+
if error:
|
|
195
|
+
detected = DetectedError(
|
|
196
|
+
timestamp=datetime.now(),
|
|
197
|
+
error_type=error.get("type", "Unknown"),
|
|
198
|
+
error_message=error.get("message", line),
|
|
199
|
+
traceback=line,
|
|
200
|
+
file_path=error.get("file"),
|
|
201
|
+
line_number=error.get("line"),
|
|
202
|
+
process_id=None,
|
|
203
|
+
context=line
|
|
204
|
+
)
|
|
205
|
+
self.error_queue.put(detected)
|
|
206
|
+
|
|
207
|
+
except Exception as e:
|
|
208
|
+
print(f"Error checking file {path}: {e}")
|
|
209
|
+
|
|
210
|
+
def get_errors(self) -> List[DetectedError]:
|
|
211
|
+
"""Get all queued errors"""
|
|
212
|
+
errors = []
|
|
213
|
+
while not self.error_queue.empty():
|
|
214
|
+
try:
|
|
215
|
+
errors.append(self.error_queue.get_nowait())
|
|
216
|
+
except queue.Empty:
|
|
217
|
+
break
|
|
218
|
+
return errors
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
class GhostFixDaemon:
|
|
222
|
+
"""Main daemon class"""
|
|
223
|
+
|
|
224
|
+
def __init__(self, config: Optional[DaemonConfig] = None):
|
|
225
|
+
self.config = config or DaemonConfig()
|
|
226
|
+
try:
|
|
227
|
+
self.memory = LocalMemory()
|
|
228
|
+
except Exception:
|
|
229
|
+
self.memory = None
|
|
230
|
+
self.process_monitor = ProcessMonitor(self.config)
|
|
231
|
+
self.file_monitor = FileMonitor(self.config)
|
|
232
|
+
self.running = False
|
|
233
|
+
self.callbacks: List[Callable] = []
|
|
234
|
+
self.inference_engine = None
|
|
235
|
+
|
|
236
|
+
# Setup logging
|
|
237
|
+
self._setup_logging()
|
|
238
|
+
|
|
239
|
+
# Setup signal handlers
|
|
240
|
+
signal.signal(signal.SIGINT, self._signal_handler)
|
|
241
|
+
signal.signal(signal.SIGTERM, self._signal_handler)
|
|
242
|
+
|
|
243
|
+
def _setup_logging(self):
|
|
244
|
+
"""Setup logging"""
|
|
245
|
+
if self.config.log_file:
|
|
246
|
+
import logging
|
|
247
|
+
logging.basicConfig(
|
|
248
|
+
level=logging.INFO,
|
|
249
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
250
|
+
handlers=[
|
|
251
|
+
logging.FileHandler(self.config.log_file),
|
|
252
|
+
logging.StreamHandler()
|
|
253
|
+
]
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
def _signal_handler(self, signum, frame):
|
|
257
|
+
"""Handle shutdown signals"""
|
|
258
|
+
print("\n🛑 Received shutdown signal, stopping daemon...")
|
|
259
|
+
self.stop()
|
|
260
|
+
sys.exit(0)
|
|
261
|
+
|
|
262
|
+
def start(self):
|
|
263
|
+
"""Start the daemon"""
|
|
264
|
+
print("\n" + "=" * 50)
|
|
265
|
+
print("👻 GhostFix Daemon Starting...")
|
|
266
|
+
print("=" * 50)
|
|
267
|
+
|
|
268
|
+
self.running = True
|
|
269
|
+
|
|
270
|
+
# Start monitors
|
|
271
|
+
if self.config.watch_processes:
|
|
272
|
+
print("📡 Starting process monitor...")
|
|
273
|
+
self.process_monitor.start()
|
|
274
|
+
|
|
275
|
+
if self.config.watch_files:
|
|
276
|
+
print("📁 Starting file monitor...")
|
|
277
|
+
self.file_monitor.start()
|
|
278
|
+
|
|
279
|
+
# Initialize inference engine (lazy load)
|
|
280
|
+
print("🧠 Initializing inference engine...")
|
|
281
|
+
|
|
282
|
+
print("\n✅ Daemon started successfully!")
|
|
283
|
+
print(f" Poll interval: {self.config.poll_interval}s")
|
|
284
|
+
print(f" Auto-fix: {self.config.auto_fix}")
|
|
285
|
+
print(f" Notify: {self.config.notify}")
|
|
286
|
+
|
|
287
|
+
# Main loop
|
|
288
|
+
self._main_loop()
|
|
289
|
+
|
|
290
|
+
def stop(self):
|
|
291
|
+
"""Stop the daemon"""
|
|
292
|
+
print("\n👻 GhostFix Daemon Stopping...")
|
|
293
|
+
|
|
294
|
+
self.running = False
|
|
295
|
+
self.process_monitor.stop()
|
|
296
|
+
self.file_monitor.stop()
|
|
297
|
+
|
|
298
|
+
if self.inference_engine:
|
|
299
|
+
self.inference_engine.cleanup()
|
|
300
|
+
|
|
301
|
+
print("✅ Daemon stopped")
|
|
302
|
+
|
|
303
|
+
def _main_loop(self):
|
|
304
|
+
"""Main daemon loop"""
|
|
305
|
+
while self.running:
|
|
306
|
+
# Check for errors from process monitor
|
|
307
|
+
errors = self.process_monitor.get_errors()
|
|
308
|
+
for error in errors:
|
|
309
|
+
self._handle_error(error)
|
|
310
|
+
|
|
311
|
+
# Check for errors from file monitor
|
|
312
|
+
errors = self.file_monitor.get_errors()
|
|
313
|
+
for error in errors:
|
|
314
|
+
self._handle_error(error)
|
|
315
|
+
|
|
316
|
+
time.sleep(self.config.poll_interval)
|
|
317
|
+
|
|
318
|
+
def _handle_error(self, error: DetectedError):
|
|
319
|
+
"""Handle a detected error"""
|
|
320
|
+
print(f"\n🚨 Error detected: {error.error_type}")
|
|
321
|
+
print(f" Message: {error.error_message[:100]}...")
|
|
322
|
+
|
|
323
|
+
if self.memory:
|
|
324
|
+
self.memory.save_error(
|
|
325
|
+
error_type=error.error_type,
|
|
326
|
+
error_message=error.error_message,
|
|
327
|
+
cause="",
|
|
328
|
+
fix="",
|
|
329
|
+
context=error.context
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
# Get fix suggestion
|
|
333
|
+
fix = self._get_fix(error)
|
|
334
|
+
|
|
335
|
+
# Notify callbacks
|
|
336
|
+
for callback in self.callbacks:
|
|
337
|
+
try:
|
|
338
|
+
callback(error, fix)
|
|
339
|
+
except Exception as e:
|
|
340
|
+
print(f"Error in callback: {e}")
|
|
341
|
+
|
|
342
|
+
# Print notification
|
|
343
|
+
if self.config.notify:
|
|
344
|
+
self._notify_error(error, fix)
|
|
345
|
+
|
|
346
|
+
# Auto-fix if enabled
|
|
347
|
+
if self.config.auto_fix and fix.get("fix"):
|
|
348
|
+
self._apply_fix(error, fix)
|
|
349
|
+
|
|
350
|
+
def _get_fix(self, error: DetectedError) -> Dict:
|
|
351
|
+
"""Get fix suggestion for error"""
|
|
352
|
+
parsed = {
|
|
353
|
+
"raw": error.traceback,
|
|
354
|
+
"type": error.error_type,
|
|
355
|
+
"message": error.error_message,
|
|
356
|
+
"file": error.file_path,
|
|
357
|
+
"line": error.line_number,
|
|
358
|
+
"missing_package": None,
|
|
359
|
+
}
|
|
360
|
+
return decide_fix(parsed, {"snippet": error.context}).to_dict()
|
|
361
|
+
|
|
362
|
+
def _notify_error(self, error: DetectedError, fix: Dict):
|
|
363
|
+
"""Notify user of error"""
|
|
364
|
+
print("\n" + "=" * 50)
|
|
365
|
+
print(f"🚨 {error.error_type}")
|
|
366
|
+
print("=" * 50)
|
|
367
|
+
print(f"Message: {error.error_message}")
|
|
368
|
+
|
|
369
|
+
if fix.get("cause"):
|
|
370
|
+
print(f"\n📍 Cause: {fix['cause']}")
|
|
371
|
+
|
|
372
|
+
if fix.get("fix"):
|
|
373
|
+
print(f"\n🔧 Fix:\n{fix['fix']}")
|
|
374
|
+
|
|
375
|
+
print("=" * 50)
|
|
376
|
+
|
|
377
|
+
def _apply_fix(self, error: DetectedError, fix: Dict):
|
|
378
|
+
"""Apply automatic fix"""
|
|
379
|
+
# This is dangerous - implement with caution
|
|
380
|
+
print(f"\n⚠️ Auto-fix requested but not implemented yet")
|
|
381
|
+
pass
|
|
382
|
+
|
|
383
|
+
def add_callback(self, callback: Callable):
|
|
384
|
+
"""Add a callback for error notifications"""
|
|
385
|
+
self.callbacks.append(callback)
|
|
386
|
+
|
|
387
|
+
def watch_file(self, path: str):
|
|
388
|
+
"""Add a file to watch"""
|
|
389
|
+
self.file_monitor.watch_file(path)
|
|
390
|
+
|
|
391
|
+
def run_command(self, cmd: List[str], cwd: Optional[str] = None) -> subprocess.Popen:
|
|
392
|
+
"""Run a command and monitor it"""
|
|
393
|
+
proc = subprocess.Popen(
|
|
394
|
+
cmd,
|
|
395
|
+
cwd=cwd,
|
|
396
|
+
stdout=subprocess.PIPE,
|
|
397
|
+
stderr=subprocess.PIPE,
|
|
398
|
+
text=True,
|
|
399
|
+
bufsize=1
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
self.process_monitor.watch_process(proc)
|
|
403
|
+
return proc
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
def create_daemon(
|
|
407
|
+
poll_interval: float = 0.5,
|
|
408
|
+
auto_fix: bool = False,
|
|
409
|
+
notify: bool = True,
|
|
410
|
+
log_file: Optional[str] = None
|
|
411
|
+
) -> GhostFixDaemon:
|
|
412
|
+
"""Create a daemon instance"""
|
|
413
|
+
config = DaemonConfig(
|
|
414
|
+
poll_interval=poll_interval,
|
|
415
|
+
auto_fix=auto_fix,
|
|
416
|
+
notify=notify,
|
|
417
|
+
log_file=Path(log_file) if log_file else None
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
return GhostFixDaemon(config)
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
if __name__ == "__main__":
|
|
424
|
+
# Example usage
|
|
425
|
+
daemon = create_daemon(
|
|
426
|
+
poll_interval=0.5,
|
|
427
|
+
notify=True,
|
|
428
|
+
log_file="ghostfix/daemon.log"
|
|
429
|
+
)
|
|
430
|
+
|
|
431
|
+
# Add custom callback
|
|
432
|
+
def my_callback(error: DetectedError, fix: Dict):
|
|
433
|
+
print(f"Custom callback: {error.error_type}")
|
|
434
|
+
|
|
435
|
+
daemon.add_callback(my_callback)
|
|
436
|
+
|
|
437
|
+
# Start daemon
|
|
438
|
+
daemon.start()
|
agent/daemon_runtime.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import time
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
from agent.terminal_watcher import watch_command
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
DAEMON_DIR = ".ghostfix"
|
|
14
|
+
STATE_FILE = "daemon.json"
|
|
15
|
+
STOP_FILE = "daemon.stop"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def daemon_dir(root: Optional[Path] = None) -> Path:
|
|
19
|
+
return (root or Path.cwd()) / DAEMON_DIR
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def daemon_state_path(root: Optional[Path] = None) -> Path:
|
|
23
|
+
return daemon_dir(root) / STATE_FILE
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def daemon_stop_path(root: Optional[Path] = None) -> Path:
|
|
27
|
+
return daemon_dir(root) / STOP_FILE
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def read_daemon_status(root: Optional[Path] = None) -> dict:
|
|
31
|
+
path = daemon_state_path(root)
|
|
32
|
+
if not path.exists():
|
|
33
|
+
return {"status": "stopped"}
|
|
34
|
+
try:
|
|
35
|
+
data = json.loads(path.read_text(encoding="utf-8"))
|
|
36
|
+
except json.JSONDecodeError:
|
|
37
|
+
return {"status": "unknown", "state_file": str(path)}
|
|
38
|
+
data.setdefault("status", "unknown")
|
|
39
|
+
data["state_file"] = str(path)
|
|
40
|
+
return data
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def request_daemon_stop(root: Optional[Path] = None) -> Path:
|
|
44
|
+
path = daemon_stop_path(root)
|
|
45
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
46
|
+
path.write_text(datetime.now().isoformat(timespec="seconds"), encoding="utf-8")
|
|
47
|
+
return path
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def start_daemon(
|
|
51
|
+
command: str,
|
|
52
|
+
*,
|
|
53
|
+
cwd: Optional[str] = None,
|
|
54
|
+
auto_fix: bool = False,
|
|
55
|
+
verbose: bool = False,
|
|
56
|
+
restart_delay: float = 1.0,
|
|
57
|
+
max_runs: Optional[int] = None,
|
|
58
|
+
) -> int:
|
|
59
|
+
"""Run a foreground daemon loop around watch mode."""
|
|
60
|
+
root = Path(cwd) if cwd else Path.cwd()
|
|
61
|
+
daemon_dir(root).mkdir(parents=True, exist_ok=True)
|
|
62
|
+
stop_path = daemon_stop_path(root)
|
|
63
|
+
if stop_path.exists():
|
|
64
|
+
stop_path.unlink()
|
|
65
|
+
|
|
66
|
+
_write_state(root, status="running", command=command, runs=0)
|
|
67
|
+
runs = 0
|
|
68
|
+
interrupted = False
|
|
69
|
+
last_returncode = 0
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
while not stop_path.exists():
|
|
73
|
+
runs += 1
|
|
74
|
+
_write_state(root, status="running", command=command, runs=runs)
|
|
75
|
+
result = watch_command(command, cwd=str(root), auto_fix=auto_fix, verbose=verbose)
|
|
76
|
+
last_returncode = result.returncode if result.returncode is not None else 0
|
|
77
|
+
|
|
78
|
+
if max_runs is not None and runs >= max_runs:
|
|
79
|
+
break
|
|
80
|
+
if stop_path.exists():
|
|
81
|
+
break
|
|
82
|
+
time.sleep(max(0.0, restart_delay))
|
|
83
|
+
except KeyboardInterrupt:
|
|
84
|
+
interrupted = True
|
|
85
|
+
finally:
|
|
86
|
+
if stop_path.exists():
|
|
87
|
+
stop_path.unlink()
|
|
88
|
+
_write_state(
|
|
89
|
+
root,
|
|
90
|
+
status="stopped",
|
|
91
|
+
command=command,
|
|
92
|
+
runs=runs,
|
|
93
|
+
stopped_at=datetime.now().isoformat(timespec="seconds"),
|
|
94
|
+
interrupted=interrupted,
|
|
95
|
+
last_returncode=last_returncode,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
return last_returncode
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _write_state(root: Path, **data) -> None:
|
|
102
|
+
path = daemon_state_path(root)
|
|
103
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
104
|
+
state = {
|
|
105
|
+
"pid": os.getpid(),
|
|
106
|
+
"cwd": str(root),
|
|
107
|
+
"updated_at": datetime.now().isoformat(timespec="seconds"),
|
|
108
|
+
**data,
|
|
109
|
+
}
|
|
110
|
+
path.write_text(json.dumps(state, indent=2), encoding="utf-8")
|