devloop 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devloop/__init__.py +3 -0
- devloop/agents/__init__.py +33 -0
- devloop/agents/agent_health_monitor.py +105 -0
- devloop/agents/ci_monitor.py +237 -0
- devloop/agents/code_rabbit.py +248 -0
- devloop/agents/doc_lifecycle.py +374 -0
- devloop/agents/echo.py +24 -0
- devloop/agents/file_logger.py +46 -0
- devloop/agents/formatter.py +511 -0
- devloop/agents/git_commit_assistant.py +421 -0
- devloop/agents/linter.py +399 -0
- devloop/agents/performance_profiler.py +284 -0
- devloop/agents/security_scanner.py +322 -0
- devloop/agents/snyk.py +292 -0
- devloop/agents/test_runner.py +484 -0
- devloop/agents/type_checker.py +242 -0
- devloop/cli/__init__.py +1 -0
- devloop/cli/commands/__init__.py +1 -0
- devloop/cli/commands/custom_agents.py +144 -0
- devloop/cli/commands/feedback.py +161 -0
- devloop/cli/commands/summary.py +50 -0
- devloop/cli/main.py +430 -0
- devloop/cli/main_v1.py +144 -0
- devloop/collectors/__init__.py +17 -0
- devloop/collectors/base.py +55 -0
- devloop/collectors/filesystem.py +126 -0
- devloop/collectors/git.py +171 -0
- devloop/collectors/manager.py +159 -0
- devloop/collectors/process.py +221 -0
- devloop/collectors/system.py +195 -0
- devloop/core/__init__.py +21 -0
- devloop/core/agent.py +206 -0
- devloop/core/agent_template.py +498 -0
- devloop/core/amp_integration.py +166 -0
- devloop/core/auto_fix.py +224 -0
- devloop/core/config.py +272 -0
- devloop/core/context.py +0 -0
- devloop/core/context_store.py +530 -0
- devloop/core/contextual_feedback.py +311 -0
- devloop/core/custom_agent.py +439 -0
- devloop/core/debug_trace.py +289 -0
- devloop/core/event.py +105 -0
- devloop/core/event_store.py +316 -0
- devloop/core/feedback.py +311 -0
- devloop/core/learning.py +351 -0
- devloop/core/manager.py +219 -0
- devloop/core/performance.py +433 -0
- devloop/core/proactive_feedback.py +302 -0
- devloop/core/summary_formatter.py +159 -0
- devloop/core/summary_generator.py +275 -0
- devloop-0.2.0.dist-info/METADATA +705 -0
- devloop-0.2.0.dist-info/RECORD +55 -0
- devloop-0.2.0.dist-info/WHEEL +4 -0
- devloop-0.2.0.dist-info/entry_points.txt +3 -0
- devloop-0.2.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
"""Debug tracing utilities for agent execution flow.
|
|
2
|
+
|
|
3
|
+
Provides decorators and utilities for tracing agent execution,
|
|
4
|
+
detecting failures, and diagnosing issues.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import functools
|
|
9
|
+
import logging
|
|
10
|
+
import time
|
|
11
|
+
from datetime import datetime, UTC
|
|
12
|
+
from typing import Any, Callable, Dict, Optional
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ExecutionTrace:
|
|
18
|
+
"""Records execution details for debugging"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, name: str, args: tuple = None, kwargs: dict = None):
|
|
21
|
+
self.name = name
|
|
22
|
+
self.args = args or ()
|
|
23
|
+
self.kwargs = kwargs or {}
|
|
24
|
+
self.start_time: Optional[float] = None
|
|
25
|
+
self.end_time: Optional[float] = None
|
|
26
|
+
self.duration: Optional[float] = None
|
|
27
|
+
self.result: Any = None
|
|
28
|
+
self.exception: Optional[Exception] = None
|
|
29
|
+
self.success = False
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def is_running(self) -> bool:
|
|
33
|
+
return self.start_time is not None and self.end_time is None
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def is_complete(self) -> bool:
|
|
37
|
+
return self.end_time is not None
|
|
38
|
+
|
|
39
|
+
def start(self):
|
|
40
|
+
"""Mark start of execution"""
|
|
41
|
+
self.start_time = time.time()
|
|
42
|
+
logger.debug(f"[TRACE] {self.name} START")
|
|
43
|
+
|
|
44
|
+
def end(self, result: Any = None, exception: Exception = None):
|
|
45
|
+
"""Mark end of execution"""
|
|
46
|
+
self.end_time = time.time()
|
|
47
|
+
self.duration = (
|
|
48
|
+
self.end_time - self.start_time if self.start_time is not None else None
|
|
49
|
+
)
|
|
50
|
+
self.result = result
|
|
51
|
+
self.exception = exception
|
|
52
|
+
self.success = exception is None
|
|
53
|
+
|
|
54
|
+
if exception:
|
|
55
|
+
logger.error(
|
|
56
|
+
f"[TRACE] {self.name} END (failed after {self.duration:.3f}s): {exception}"
|
|
57
|
+
)
|
|
58
|
+
else:
|
|
59
|
+
logger.debug(f"[TRACE] {self.name} END ({self.duration:.3f}s)")
|
|
60
|
+
|
|
61
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
62
|
+
"""Convert trace to dictionary"""
|
|
63
|
+
return {
|
|
64
|
+
"name": self.name,
|
|
65
|
+
"start_time": self.start_time,
|
|
66
|
+
"end_time": self.end_time,
|
|
67
|
+
"duration": self.duration,
|
|
68
|
+
"success": self.success,
|
|
69
|
+
"error": str(self.exception) if self.exception else None,
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
# Global trace history (in-memory for now)
|
|
74
|
+
_trace_history: list[ExecutionTrace] = []
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def get_trace_history(limit: int = 100) -> list[ExecutionTrace]:
|
|
78
|
+
"""Get recent execution traces"""
|
|
79
|
+
return _trace_history[-limit:]
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def clear_trace_history():
|
|
83
|
+
"""Clear trace history"""
|
|
84
|
+
global _trace_history
|
|
85
|
+
_trace_history = []
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def trace_execution(name: str = None):
|
|
89
|
+
"""Decorator for tracing function execution
|
|
90
|
+
|
|
91
|
+
Usage:
|
|
92
|
+
@trace_execution("my_function")
|
|
93
|
+
async def my_func():
|
|
94
|
+
pass
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def decorator(func: Callable) -> Callable:
|
|
98
|
+
trace_name = name or f"{func.__module__}.{func.__name__}"
|
|
99
|
+
|
|
100
|
+
@functools.wraps(func)
|
|
101
|
+
async def async_wrapper(*args, **kwargs):
|
|
102
|
+
trace = ExecutionTrace(trace_name, args, kwargs)
|
|
103
|
+
trace.start()
|
|
104
|
+
_trace_history.append(trace)
|
|
105
|
+
|
|
106
|
+
try:
|
|
107
|
+
result = await func(*args, **kwargs)
|
|
108
|
+
trace.end(result=result)
|
|
109
|
+
return result
|
|
110
|
+
except Exception as e:
|
|
111
|
+
trace.end(exception=e)
|
|
112
|
+
raise
|
|
113
|
+
|
|
114
|
+
@functools.wraps(func)
|
|
115
|
+
def sync_wrapper(*args, **kwargs):
|
|
116
|
+
trace = ExecutionTrace(trace_name, args, kwargs)
|
|
117
|
+
trace.start()
|
|
118
|
+
_trace_history.append(trace)
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
result = func(*args, **kwargs)
|
|
122
|
+
trace.end(result=result)
|
|
123
|
+
return result
|
|
124
|
+
except Exception as e:
|
|
125
|
+
trace.end(exception=e)
|
|
126
|
+
raise
|
|
127
|
+
|
|
128
|
+
# Use appropriate wrapper based on function type
|
|
129
|
+
if asyncio.iscoroutinefunction(func):
|
|
130
|
+
return async_wrapper
|
|
131
|
+
else:
|
|
132
|
+
return sync_wrapper
|
|
133
|
+
|
|
134
|
+
return decorator
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def trace_agent_execution(agent_name: str):
|
|
138
|
+
"""Specialized decorator for agent handle() execution"""
|
|
139
|
+
|
|
140
|
+
def decorator(func: Callable) -> Callable:
|
|
141
|
+
@functools.wraps(func)
|
|
142
|
+
async def wrapper(self, event, *args, **kwargs):
|
|
143
|
+
trace_name = f"agent.{agent_name}.handle"
|
|
144
|
+
trace = ExecutionTrace(
|
|
145
|
+
trace_name,
|
|
146
|
+
(event.type, event.source),
|
|
147
|
+
{"event_id": event.id},
|
|
148
|
+
)
|
|
149
|
+
trace.start()
|
|
150
|
+
_trace_history.append(trace)
|
|
151
|
+
|
|
152
|
+
try:
|
|
153
|
+
logger.debug(
|
|
154
|
+
f"[AGENT] {agent_name} processing event: {event.type} from {event.source}"
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
result = await func(self, event, *args, **kwargs)
|
|
158
|
+
|
|
159
|
+
if hasattr(result, "findings") and result.findings:
|
|
160
|
+
logger.debug(
|
|
161
|
+
f"[AGENT] {agent_name} found {len(result.findings)} issues"
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
trace.end(result=result)
|
|
165
|
+
return result
|
|
166
|
+
|
|
167
|
+
except Exception as e:
|
|
168
|
+
logger.error(f"[AGENT] {agent_name} failed: {e}")
|
|
169
|
+
trace.end(exception=e)
|
|
170
|
+
raise
|
|
171
|
+
|
|
172
|
+
return wrapper
|
|
173
|
+
|
|
174
|
+
return decorator
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def trace_context_store(operation: str):
|
|
178
|
+
"""Trace context store operations"""
|
|
179
|
+
|
|
180
|
+
def decorator(func: Callable) -> Callable:
|
|
181
|
+
@functools.wraps(func)
|
|
182
|
+
async def async_wrapper(self, *args, **kwargs):
|
|
183
|
+
trace_name = f"context_store.{operation}"
|
|
184
|
+
trace = ExecutionTrace(trace_name)
|
|
185
|
+
trace.start()
|
|
186
|
+
_trace_history.append(trace)
|
|
187
|
+
|
|
188
|
+
try:
|
|
189
|
+
logger.debug(f"[CONTEXT] {operation} starting...")
|
|
190
|
+
result = await func(self, *args, **kwargs)
|
|
191
|
+
logger.debug(f"[CONTEXT] {operation} completed successfully")
|
|
192
|
+
trace.end(result=result)
|
|
193
|
+
return result
|
|
194
|
+
except Exception as e:
|
|
195
|
+
logger.error(f"[CONTEXT] {operation} failed: {e}")
|
|
196
|
+
trace.end(exception=e)
|
|
197
|
+
raise
|
|
198
|
+
|
|
199
|
+
@functools.wraps(func)
|
|
200
|
+
def sync_wrapper(self, *args, **kwargs):
|
|
201
|
+
trace_name = f"context_store.{operation}"
|
|
202
|
+
trace = ExecutionTrace(trace_name)
|
|
203
|
+
trace.start()
|
|
204
|
+
_trace_history.append(trace)
|
|
205
|
+
|
|
206
|
+
try:
|
|
207
|
+
logger.debug(f"[CONTEXT] {operation} starting...")
|
|
208
|
+
result = func(self, *args, **kwargs)
|
|
209
|
+
logger.debug(f"[CONTEXT] {operation} completed successfully")
|
|
210
|
+
trace.end(result=result)
|
|
211
|
+
return result
|
|
212
|
+
except Exception as e:
|
|
213
|
+
logger.error(f"[CONTEXT] {operation} failed: {e}")
|
|
214
|
+
trace.end(exception=e)
|
|
215
|
+
raise
|
|
216
|
+
|
|
217
|
+
if asyncio.iscoroutinefunction(func):
|
|
218
|
+
return async_wrapper
|
|
219
|
+
else:
|
|
220
|
+
return sync_wrapper
|
|
221
|
+
|
|
222
|
+
return decorator
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
class FailureDetector:
|
|
226
|
+
"""Detects and reports agent failures"""
|
|
227
|
+
|
|
228
|
+
def __init__(self, alert_threshold: int = 5):
|
|
229
|
+
"""
|
|
230
|
+
Initialize failure detector
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
alert_threshold: Number of consecutive failures before alerting
|
|
234
|
+
"""
|
|
235
|
+
self.alert_threshold = alert_threshold
|
|
236
|
+
self.agent_failures: Dict[str, int] = {}
|
|
237
|
+
self.agent_last_success: Dict[str, datetime] = {}
|
|
238
|
+
|
|
239
|
+
def record_failure(self, agent_name: str):
|
|
240
|
+
"""Record a failure for an agent"""
|
|
241
|
+
self.agent_failures[agent_name] = self.agent_failures.get(agent_name, 0) + 1
|
|
242
|
+
|
|
243
|
+
failure_count = self.agent_failures[agent_name]
|
|
244
|
+
|
|
245
|
+
if failure_count >= self.alert_threshold:
|
|
246
|
+
logger.error(
|
|
247
|
+
f"[FAILURE] Agent '{agent_name}' failed {failure_count} times. "
|
|
248
|
+
f"Agent may be broken and needs investigation."
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
def record_success(self, agent_name: str):
|
|
252
|
+
"""Record a successful execution"""
|
|
253
|
+
self.agent_failures[agent_name] = 0
|
|
254
|
+
self.agent_last_success[agent_name] = datetime.now(UTC)
|
|
255
|
+
logger.debug(f"[SUCCESS] Agent '{agent_name}' executed successfully")
|
|
256
|
+
|
|
257
|
+
def get_status(self) -> Dict[str, Any]:
|
|
258
|
+
"""Get overall failure status"""
|
|
259
|
+
failing_agents = {
|
|
260
|
+
name: count for name, count in self.agent_failures.items() if count > 0
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
return {
|
|
264
|
+
"failing_agents": failing_agents,
|
|
265
|
+
"has_failures": len(failing_agents) > 0,
|
|
266
|
+
"critical_failures": {
|
|
267
|
+
name: count
|
|
268
|
+
for name, count in failing_agents.items()
|
|
269
|
+
if count >= self.alert_threshold
|
|
270
|
+
},
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
# Global failure detector
|
|
275
|
+
_failure_detector = FailureDetector()
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def get_failure_detector() -> FailureDetector:
|
|
279
|
+
"""Get the global failure detector"""
|
|
280
|
+
return _failure_detector
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def report_diagnostics() -> Dict[str, Any]:
|
|
284
|
+
"""Generate diagnostic report"""
|
|
285
|
+
return {
|
|
286
|
+
"timestamp": datetime.now(UTC).isoformat(),
|
|
287
|
+
"traces": [t.to_dict() for t in get_trace_history()],
|
|
288
|
+
"failures": _failure_detector.get_status(),
|
|
289
|
+
}
|
devloop/core/event.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""Event system core - simplified for prototype."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import time
|
|
7
|
+
import uuid
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
from enum import Enum
|
|
10
|
+
from typing import Any, Dict, Set
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class Priority(Enum):
|
|
14
|
+
"""Event priority levels."""
|
|
15
|
+
|
|
16
|
+
LOW = 0
|
|
17
|
+
NORMAL = 1
|
|
18
|
+
HIGH = 2
|
|
19
|
+
CRITICAL = 3
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class Event:
|
|
24
|
+
"""Base event class."""
|
|
25
|
+
|
|
26
|
+
type: str
|
|
27
|
+
payload: Dict[str, Any]
|
|
28
|
+
id: str = field(default_factory=lambda: str(uuid.uuid4()))
|
|
29
|
+
timestamp: float = field(default_factory=time.time)
|
|
30
|
+
source: str = "unknown"
|
|
31
|
+
priority: Priority = Priority.NORMAL
|
|
32
|
+
|
|
33
|
+
def __lt__(self, other: Event) -> bool:
|
|
34
|
+
"""Compare events by priority for priority queue."""
|
|
35
|
+
return self.priority.value > other.priority.value
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class EventBus:
|
|
39
|
+
"""Central event bus for publishing and subscribing to events."""
|
|
40
|
+
|
|
41
|
+
def __init__(self):
|
|
42
|
+
self._subscribers: Dict[str, Set[asyncio.Queue]] = {}
|
|
43
|
+
self._event_log: list[Event] = [] # For debugging
|
|
44
|
+
|
|
45
|
+
async def subscribe(self, event_type: str, queue: asyncio.Queue) -> None:
|
|
46
|
+
"""Subscribe to events of a specific type."""
|
|
47
|
+
if event_type not in self._subscribers:
|
|
48
|
+
self._subscribers[event_type] = set()
|
|
49
|
+
self._subscribers[event_type].add(queue)
|
|
50
|
+
|
|
51
|
+
async def unsubscribe(self, event_type: str, queue: asyncio.Queue) -> None:
|
|
52
|
+
"""Unsubscribe from events."""
|
|
53
|
+
if event_type in self._subscribers:
|
|
54
|
+
self._subscribers[event_type].discard(queue)
|
|
55
|
+
|
|
56
|
+
async def emit(self, event: Event) -> None:
|
|
57
|
+
"""Emit an event to all subscribers."""
|
|
58
|
+
# Log event for debugging
|
|
59
|
+
self._event_log.append(event)
|
|
60
|
+
if len(self._event_log) > 100: # Keep last 100 events
|
|
61
|
+
self._event_log.pop(0)
|
|
62
|
+
|
|
63
|
+
# Store event in event store (async, non-blocking)
|
|
64
|
+
from .event_store import event_store
|
|
65
|
+
|
|
66
|
+
# Don't await - fire and forget for performance
|
|
67
|
+
asyncio.create_task(event_store.store_event(event))
|
|
68
|
+
|
|
69
|
+
# Emit to matching subscribers (supporting patterns)
|
|
70
|
+
notified_queues = set()
|
|
71
|
+
|
|
72
|
+
for pattern, queues in self._subscribers.items():
|
|
73
|
+
if self._matches_pattern(event.type, pattern):
|
|
74
|
+
for queue in list(queues):
|
|
75
|
+
if queue not in notified_queues: # Avoid duplicate notifications
|
|
76
|
+
try:
|
|
77
|
+
await queue.put(event)
|
|
78
|
+
notified_queues.add(queue)
|
|
79
|
+
except (
|
|
80
|
+
asyncio.QueueFull,
|
|
81
|
+
asyncio.CancelledError,
|
|
82
|
+
RuntimeError,
|
|
83
|
+
):
|
|
84
|
+
pass # Queue might be closed or in a bad state
|
|
85
|
+
|
|
86
|
+
def _matches_pattern(self, event_type: str, pattern: str) -> bool:
|
|
87
|
+
"""Check if event type matches a subscription pattern."""
|
|
88
|
+
# Exact match
|
|
89
|
+
if event_type == pattern:
|
|
90
|
+
return True
|
|
91
|
+
|
|
92
|
+
# Global wildcard
|
|
93
|
+
if pattern == "*":
|
|
94
|
+
return True
|
|
95
|
+
|
|
96
|
+
# Pattern matching (e.g., "file:*" matches "file:created")
|
|
97
|
+
if pattern.endswith("*"):
|
|
98
|
+
prefix = pattern[:-1] # Remove the *
|
|
99
|
+
return event_type.startswith(prefix)
|
|
100
|
+
|
|
101
|
+
return False
|
|
102
|
+
|
|
103
|
+
def get_recent_events(self, count: int = 10) -> list[Event]:
|
|
104
|
+
"""Get recent events for debugging."""
|
|
105
|
+
return self._event_log[-count:]
|
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
"""Event store for persistent event logging using SQLite."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
import sqlite3
|
|
9
|
+
from datetime import datetime, UTC
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any, Dict, List, Optional
|
|
12
|
+
|
|
13
|
+
from .event import Event
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class EventStore:
|
|
19
|
+
"""SQLite-based event store for persistent event logging."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, db_path: Path):
|
|
22
|
+
self.db_path = db_path
|
|
23
|
+
self._lock = asyncio.Lock()
|
|
24
|
+
self._connection: Optional[sqlite3.Connection] = None
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def connection(self) -> sqlite3.Connection:
|
|
28
|
+
"""Get the database connection, raising an exception if not initialized."""
|
|
29
|
+
if self._connection is None:
|
|
30
|
+
raise RuntimeError(
|
|
31
|
+
"Database connection not initialized. Call initialize() first."
|
|
32
|
+
)
|
|
33
|
+
return self._connection
|
|
34
|
+
|
|
35
|
+
async def initialize(self) -> None:
|
|
36
|
+
"""Initialize the event store database."""
|
|
37
|
+
async with self._lock:
|
|
38
|
+
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
|
39
|
+
|
|
40
|
+
# Use thread pool for SQLite operations since it's not async
|
|
41
|
+
loop = asyncio.get_event_loop()
|
|
42
|
+
await loop.run_in_executor(None, self._init_db)
|
|
43
|
+
|
|
44
|
+
def _init_db(self) -> None:
|
|
45
|
+
"""Initialize database schema (runs in thread pool)."""
|
|
46
|
+
self._connection = sqlite3.connect(str(self.db_path), check_same_thread=False)
|
|
47
|
+
self.connection.execute(
|
|
48
|
+
"""
|
|
49
|
+
CREATE TABLE IF NOT EXISTS events (
|
|
50
|
+
id TEXT PRIMARY KEY,
|
|
51
|
+
type TEXT NOT NULL,
|
|
52
|
+
timestamp REAL NOT NULL,
|
|
53
|
+
source TEXT NOT NULL,
|
|
54
|
+
payload TEXT NOT NULL,
|
|
55
|
+
priority INTEGER NOT NULL,
|
|
56
|
+
created_at REAL NOT NULL
|
|
57
|
+
)
|
|
58
|
+
"""
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
# Create indexes for efficient queries
|
|
62
|
+
self.connection.execute(
|
|
63
|
+
"""
|
|
64
|
+
CREATE INDEX IF NOT EXISTS idx_events_type ON events(type)
|
|
65
|
+
"""
|
|
66
|
+
)
|
|
67
|
+
self.connection.execute(
|
|
68
|
+
"""
|
|
69
|
+
CREATE INDEX IF NOT EXISTS idx_events_timestamp ON events(timestamp)
|
|
70
|
+
"""
|
|
71
|
+
)
|
|
72
|
+
self.connection.execute(
|
|
73
|
+
"""
|
|
74
|
+
CREATE INDEX IF NOT EXISTS idx_events_source ON events(source)
|
|
75
|
+
"""
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
self.connection.commit()
|
|
79
|
+
logger.info(f"Event store initialized at {self.db_path}")
|
|
80
|
+
|
|
81
|
+
async def store_event(self, event: Event) -> None:
|
|
82
|
+
"""Store an event in the database."""
|
|
83
|
+
async with self._lock:
|
|
84
|
+
if not self._connection:
|
|
85
|
+
logger.warning("Event store not initialized, skipping event storage")
|
|
86
|
+
return
|
|
87
|
+
|
|
88
|
+
loop = asyncio.get_event_loop()
|
|
89
|
+
await loop.run_in_executor(None, self._store_event_sync, event)
|
|
90
|
+
|
|
91
|
+
def _store_event_sync(self, event: Event) -> None:
|
|
92
|
+
"""Store event synchronously (runs in thread pool)."""
|
|
93
|
+
try:
|
|
94
|
+
# Convert event to dict for JSON storage
|
|
95
|
+
event_dict = {
|
|
96
|
+
"id": event.id,
|
|
97
|
+
"type": event.type,
|
|
98
|
+
"timestamp": event.timestamp,
|
|
99
|
+
"source": event.source,
|
|
100
|
+
"payload": event.payload,
|
|
101
|
+
"priority": (
|
|
102
|
+
event.priority.value
|
|
103
|
+
if hasattr(event.priority, "value")
|
|
104
|
+
else event.priority
|
|
105
|
+
),
|
|
106
|
+
"created_at": datetime.now(UTC).timestamp(),
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
self.connection.execute(
|
|
110
|
+
"""
|
|
111
|
+
INSERT OR REPLACE INTO events
|
|
112
|
+
(id, type, timestamp, source, payload, priority, created_at)
|
|
113
|
+
VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
114
|
+
""",
|
|
115
|
+
(
|
|
116
|
+
event_dict["id"],
|
|
117
|
+
event_dict["type"],
|
|
118
|
+
event_dict["timestamp"],
|
|
119
|
+
event_dict["source"],
|
|
120
|
+
json.dumps(event_dict["payload"]),
|
|
121
|
+
event_dict["priority"],
|
|
122
|
+
event_dict["created_at"],
|
|
123
|
+
),
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
self.connection.commit()
|
|
127
|
+
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logger.error(f"Failed to store event {event.id}: {e}")
|
|
130
|
+
|
|
131
|
+
async def get_events(
|
|
132
|
+
self,
|
|
133
|
+
event_type: Optional[str] = None,
|
|
134
|
+
source: Optional[str] = None,
|
|
135
|
+
limit: int = 100,
|
|
136
|
+
offset: int = 0,
|
|
137
|
+
since: Optional[float] = None,
|
|
138
|
+
) -> List[Event]:
|
|
139
|
+
"""Retrieve events from the database."""
|
|
140
|
+
async with self._lock:
|
|
141
|
+
if not self._connection:
|
|
142
|
+
return []
|
|
143
|
+
|
|
144
|
+
loop = asyncio.get_event_loop()
|
|
145
|
+
return await loop.run_in_executor(
|
|
146
|
+
None, self._get_events_sync, event_type, source, limit, offset, since
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
def _get_events_sync(
|
|
150
|
+
self,
|
|
151
|
+
event_type: Optional[str],
|
|
152
|
+
source: Optional[str],
|
|
153
|
+
limit: int,
|
|
154
|
+
offset: int,
|
|
155
|
+
since: Optional[float],
|
|
156
|
+
) -> List[Event]:
|
|
157
|
+
"""Retrieve events synchronously."""
|
|
158
|
+
try:
|
|
159
|
+
query = "SELECT id, type, timestamp, source, payload, priority FROM events WHERE 1=1"
|
|
160
|
+
params: List[Any] = []
|
|
161
|
+
|
|
162
|
+
if event_type:
|
|
163
|
+
query += " AND type = ?"
|
|
164
|
+
params.append(event_type)
|
|
165
|
+
|
|
166
|
+
if source:
|
|
167
|
+
query += " AND source = ?"
|
|
168
|
+
params.append(source)
|
|
169
|
+
|
|
170
|
+
if since:
|
|
171
|
+
query += " AND timestamp >= ?"
|
|
172
|
+
params.append(since)
|
|
173
|
+
|
|
174
|
+
query += " ORDER BY timestamp DESC LIMIT ? OFFSET ?"
|
|
175
|
+
params.extend([limit, offset])
|
|
176
|
+
|
|
177
|
+
cursor = self.connection.execute(query, params)
|
|
178
|
+
rows = cursor.fetchall()
|
|
179
|
+
|
|
180
|
+
events = []
|
|
181
|
+
for row in rows:
|
|
182
|
+
event_id, event_type, timestamp, source, payload_json, priority = row
|
|
183
|
+
|
|
184
|
+
# Parse JSON
|
|
185
|
+
payload = json.loads(payload_json)
|
|
186
|
+
|
|
187
|
+
# Reconstruct event
|
|
188
|
+
event = Event(
|
|
189
|
+
type=event_type,
|
|
190
|
+
payload=payload,
|
|
191
|
+
id=event_id,
|
|
192
|
+
timestamp=timestamp,
|
|
193
|
+
source=source,
|
|
194
|
+
priority=priority,
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
events.append(event)
|
|
198
|
+
|
|
199
|
+
return events
|
|
200
|
+
|
|
201
|
+
except Exception as e:
|
|
202
|
+
logger.error(f"Failed to retrieve events: {e}")
|
|
203
|
+
return []
|
|
204
|
+
|
|
205
|
+
async def get_event_stats(self) -> Dict[str, Any]:
|
|
206
|
+
"""Get statistics about stored events."""
|
|
207
|
+
async with self._lock:
|
|
208
|
+
if not self._connection:
|
|
209
|
+
return {}
|
|
210
|
+
|
|
211
|
+
loop = asyncio.get_event_loop()
|
|
212
|
+
return await loop.run_in_executor(None, self._get_event_stats_sync)
|
|
213
|
+
|
|
214
|
+
def _get_event_stats_sync(self) -> Dict[str, Any]:
|
|
215
|
+
"""Get event statistics synchronously."""
|
|
216
|
+
try:
|
|
217
|
+
# Total events
|
|
218
|
+
cursor = self.connection.execute("SELECT COUNT(*) FROM events")
|
|
219
|
+
total_events = cursor.fetchone()[0]
|
|
220
|
+
|
|
221
|
+
# Events by type
|
|
222
|
+
cursor = self.connection.execute(
|
|
223
|
+
"""
|
|
224
|
+
SELECT type, COUNT(*) as count
|
|
225
|
+
FROM events
|
|
226
|
+
GROUP BY type
|
|
227
|
+
ORDER BY count DESC
|
|
228
|
+
"""
|
|
229
|
+
)
|
|
230
|
+
events_by_type = {row[0]: row[1] for row in cursor.fetchall()}
|
|
231
|
+
|
|
232
|
+
# Events by source
|
|
233
|
+
cursor = self.connection.execute(
|
|
234
|
+
"""
|
|
235
|
+
SELECT source, COUNT(*) as count
|
|
236
|
+
FROM events
|
|
237
|
+
GROUP BY source
|
|
238
|
+
ORDER BY count DESC
|
|
239
|
+
"""
|
|
240
|
+
)
|
|
241
|
+
events_by_source = {row[0]: row[1] for row in cursor.fetchall()}
|
|
242
|
+
|
|
243
|
+
# Time range
|
|
244
|
+
cursor = self.connection.execute(
|
|
245
|
+
"""
|
|
246
|
+
SELECT MIN(timestamp), MAX(timestamp) FROM events
|
|
247
|
+
"""
|
|
248
|
+
)
|
|
249
|
+
time_range = cursor.fetchone()
|
|
250
|
+
oldest_timestamp = time_range[0] if time_range[0] else None
|
|
251
|
+
newest_timestamp = time_range[1] if time_range[1] else None
|
|
252
|
+
|
|
253
|
+
return {
|
|
254
|
+
"total_events": total_events,
|
|
255
|
+
"events_by_type": events_by_type,
|
|
256
|
+
"events_by_source": events_by_source,
|
|
257
|
+
"oldest_timestamp": oldest_timestamp,
|
|
258
|
+
"newest_timestamp": newest_timestamp,
|
|
259
|
+
"database_size": (
|
|
260
|
+
self.db_path.stat().st_size if self.db_path.exists() else 0
|
|
261
|
+
),
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
except Exception as e:
|
|
265
|
+
logger.error(f"Failed to get event stats: {e}")
|
|
266
|
+
return {}
|
|
267
|
+
|
|
268
|
+
async def cleanup_old_events(self, days_to_keep: int = 30) -> int:
|
|
269
|
+
"""Clean up events older than specified days."""
|
|
270
|
+
async with self._lock:
|
|
271
|
+
if not self._connection:
|
|
272
|
+
return 0
|
|
273
|
+
|
|
274
|
+
cutoff_timestamp = datetime.now(UTC).timestamp() - (
|
|
275
|
+
days_to_keep * 24 * 60 * 60
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
loop = asyncio.get_event_loop()
|
|
279
|
+
return await loop.run_in_executor(
|
|
280
|
+
None,
|
|
281
|
+
lambda: self._cleanup_old_events_sync(cutoff_timestamp, days_to_keep),
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
def _cleanup_old_events_sync(
|
|
285
|
+
self, cutoff_timestamp: float, days_to_keep: int
|
|
286
|
+
) -> int:
|
|
287
|
+
"""Clean up old events synchronously."""
|
|
288
|
+
try:
|
|
289
|
+
cursor = self.connection.execute(
|
|
290
|
+
"DELETE FROM events WHERE timestamp < ?", (cutoff_timestamp,)
|
|
291
|
+
)
|
|
292
|
+
deleted_count = cursor.rowcount
|
|
293
|
+
self.connection.commit()
|
|
294
|
+
|
|
295
|
+
if deleted_count > 0:
|
|
296
|
+
logger.info(
|
|
297
|
+
f"Cleaned up {deleted_count} events older than {days_to_keep} days"
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
return deleted_count
|
|
301
|
+
|
|
302
|
+
except Exception as e:
|
|
303
|
+
logger.error(f"Failed to cleanup old events: {e}")
|
|
304
|
+
return 0
|
|
305
|
+
|
|
306
|
+
async def close(self) -> None:
|
|
307
|
+
"""Close the database connection."""
|
|
308
|
+
async with self._lock:
|
|
309
|
+
if self._connection:
|
|
310
|
+
self._connection.close()
|
|
311
|
+
self._connection = None
|
|
312
|
+
logger.info("Event store closed")
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
# Global instance
|
|
316
|
+
event_store = EventStore(Path(".devloop/events.db"))
|