devloop 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. devloop/__init__.py +3 -0
  2. devloop/agents/__init__.py +33 -0
  3. devloop/agents/agent_health_monitor.py +105 -0
  4. devloop/agents/ci_monitor.py +237 -0
  5. devloop/agents/code_rabbit.py +248 -0
  6. devloop/agents/doc_lifecycle.py +374 -0
  7. devloop/agents/echo.py +24 -0
  8. devloop/agents/file_logger.py +46 -0
  9. devloop/agents/formatter.py +511 -0
  10. devloop/agents/git_commit_assistant.py +421 -0
  11. devloop/agents/linter.py +399 -0
  12. devloop/agents/performance_profiler.py +284 -0
  13. devloop/agents/security_scanner.py +322 -0
  14. devloop/agents/snyk.py +292 -0
  15. devloop/agents/test_runner.py +484 -0
  16. devloop/agents/type_checker.py +242 -0
  17. devloop/cli/__init__.py +1 -0
  18. devloop/cli/commands/__init__.py +1 -0
  19. devloop/cli/commands/custom_agents.py +144 -0
  20. devloop/cli/commands/feedback.py +161 -0
  21. devloop/cli/commands/summary.py +50 -0
  22. devloop/cli/main.py +430 -0
  23. devloop/cli/main_v1.py +144 -0
  24. devloop/collectors/__init__.py +17 -0
  25. devloop/collectors/base.py +55 -0
  26. devloop/collectors/filesystem.py +126 -0
  27. devloop/collectors/git.py +171 -0
  28. devloop/collectors/manager.py +159 -0
  29. devloop/collectors/process.py +221 -0
  30. devloop/collectors/system.py +195 -0
  31. devloop/core/__init__.py +21 -0
  32. devloop/core/agent.py +206 -0
  33. devloop/core/agent_template.py +498 -0
  34. devloop/core/amp_integration.py +166 -0
  35. devloop/core/auto_fix.py +224 -0
  36. devloop/core/config.py +272 -0
  37. devloop/core/context.py +0 -0
  38. devloop/core/context_store.py +530 -0
  39. devloop/core/contextual_feedback.py +311 -0
  40. devloop/core/custom_agent.py +439 -0
  41. devloop/core/debug_trace.py +289 -0
  42. devloop/core/event.py +105 -0
  43. devloop/core/event_store.py +316 -0
  44. devloop/core/feedback.py +311 -0
  45. devloop/core/learning.py +351 -0
  46. devloop/core/manager.py +219 -0
  47. devloop/core/performance.py +433 -0
  48. devloop/core/proactive_feedback.py +302 -0
  49. devloop/core/summary_formatter.py +159 -0
  50. devloop/core/summary_generator.py +275 -0
  51. devloop-0.2.0.dist-info/METADATA +705 -0
  52. devloop-0.2.0.dist-info/RECORD +55 -0
  53. devloop-0.2.0.dist-info/WHEEL +4 -0
  54. devloop-0.2.0.dist-info/entry_points.txt +3 -0
  55. devloop-0.2.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,219 @@
1
+ """Agent manager for centralized control with feedback and performance."""
2
+
3
+ import asyncio
4
+ import logging
5
+ from pathlib import Path
6
+ from typing import Any, Dict, List, Optional
7
+
8
+ from devloop.core.agent import Agent
9
+ from devloop.core.context_store import context_store
10
+ from devloop.core.event import EventBus
11
+ from devloop.core.feedback import FeedbackAPI, FeedbackStore
12
+ from devloop.core.performance import PerformanceMonitor
13
+
14
+
15
+ class AgentManager:
16
+ """Manages agent lifecycle and coordination with feedback and performance."""
17
+
18
+ def __init__(
19
+ self,
20
+ event_bus: EventBus,
21
+ project_dir: Optional[Path] = None,
22
+ enable_feedback: bool = True,
23
+ enable_performance: bool = True,
24
+ ):
25
+ self.event_bus = event_bus
26
+ self.agents: Dict[str, Agent] = {}
27
+ self.logger = logging.getLogger("agent_manager")
28
+ self._paused_agents: set[str] = set()
29
+
30
+ # Initialize feedback and performance systems
31
+ self.project_dir = project_dir or Path.cwd()
32
+ self.feedback_api = None
33
+ self.performance_monitor = None
34
+
35
+ if enable_feedback:
36
+ feedback_storage = self.project_dir / ".devloop" / "feedback"
37
+ feedback_store = FeedbackStore(feedback_storage)
38
+ self.feedback_api = FeedbackAPI(feedback_store)
39
+
40
+ if enable_performance:
41
+ performance_storage = self.project_dir / ".devloop" / "performance"
42
+ self.performance_monitor = PerformanceMonitor(performance_storage)
43
+
44
+ def register(self, agent: Agent) -> None:
45
+ """Register an agent."""
46
+ # Inject feedback and performance systems if not already set
47
+ if hasattr(agent, "feedback_api") and agent.feedback_api is None:
48
+ agent.feedback_api = self.feedback_api
49
+ if hasattr(agent, "performance_monitor") and agent.performance_monitor is None:
50
+ agent.performance_monitor = self.performance_monitor
51
+
52
+ self.agents[agent.name] = agent
53
+ self.logger.info(f"Registered agent: {agent.name}")
54
+
55
+ def create_agent(
56
+ self, agent_class, name: str, triggers: List[str], **kwargs
57
+ ) -> Agent:
58
+ """Create and register an agent with feedback/performance systems."""
59
+ # Build kwargs for agent constructor
60
+ agent_kwargs = {
61
+ "name": name,
62
+ "triggers": triggers,
63
+ "event_bus": self.event_bus,
64
+ **kwargs,
65
+ }
66
+
67
+ # Add optional feedback/performance parameters if the agent class supports them
68
+ import inspect
69
+
70
+ sig = inspect.signature(agent_class.__init__)
71
+ if "feedback_api" in sig.parameters:
72
+ agent_kwargs["feedback_api"] = self.feedback_api
73
+ if "performance_monitor" in sig.parameters:
74
+ agent_kwargs["performance_monitor"] = self.performance_monitor
75
+
76
+ agent = agent_class(**agent_kwargs)
77
+ self.register(agent)
78
+ return agent
79
+
80
+ async def start_all(self) -> None:
81
+ """Start all registered agents."""
82
+ # Subscribe to agent completion events for consolidated results
83
+ queue: asyncio.Queue = asyncio.Queue()
84
+ await self.event_bus.subscribe("agent:*:completed", queue)
85
+ self.completion_listener_task = asyncio.create_task(
86
+ self._listen_for_agent_completion(queue)
87
+ )
88
+
89
+ tasks = [agent.start() for agent in self.agents.values() if agent.enabled]
90
+ await asyncio.gather(*tasks)
91
+ self.logger.info(
92
+ f"Started {len([a for a in self.agents.values() if a.enabled])} agents"
93
+ )
94
+
95
+ async def stop_all(self) -> None:
96
+ """Stop all agents."""
97
+ self.completion_listener_task.cancel()
98
+ tasks = [agent.stop() for agent in self.agents.values()]
99
+ await asyncio.gather(*tasks)
100
+ self.logger.info("Stopped all agents")
101
+
102
+ async def start_agent(self, name: str) -> bool:
103
+ """Start a specific agent."""
104
+ if name in self.agents:
105
+ await self.agents[name].start()
106
+ return True
107
+ return False
108
+
109
+ async def stop_agent(self, name: str) -> bool:
110
+ """Stop a specific agent."""
111
+ if name in self.agents:
112
+ await self.agents[name].stop()
113
+ return True
114
+ return False
115
+
116
+ def enable_agent(self, name: str) -> bool:
117
+ """Enable an agent."""
118
+ if name in self.agents:
119
+ self.agents[name].enabled = True
120
+ self.logger.info(f"Enabled agent: {name}")
121
+ return True
122
+ return False
123
+
124
+ def disable_agent(self, name: str) -> bool:
125
+ """Disable an agent."""
126
+ if name in self.agents:
127
+ self.agents[name].enabled = False
128
+ self.logger.info(f"Disabled agent: {name}")
129
+ return True
130
+ return False
131
+
132
+ async def pause_agents(
133
+ self, agents: Optional[List[str]] = None, reason: str = ""
134
+ ) -> None:
135
+ """Pause specific agents (or all)."""
136
+ target_agents = agents or list(self.agents.keys())
137
+
138
+ for agent_name in target_agents:
139
+ if agent_name in self.agents:
140
+ self.agents[agent_name].enabled = False
141
+ self._paused_agents.add(agent_name)
142
+
143
+ self.logger.info(f"Paused agents: {target_agents} (reason: {reason})")
144
+
145
+ async def resume_agents(self, agents: Optional[List[str]] = None) -> None:
146
+ """Resume paused agents."""
147
+ target_agents = agents or list(self._paused_agents)
148
+
149
+ for agent_name in target_agents:
150
+ if agent_name in self.agents:
151
+ self.agents[agent_name].enabled = True
152
+ self._paused_agents.discard(agent_name)
153
+
154
+ self.logger.info(f"Resumed agents: {target_agents}")
155
+
156
+ def get_status(self) -> Dict[str, Dict[str, Any]]:
157
+ """Get status of all agents."""
158
+ return {
159
+ name: {
160
+ "running": agent._running,
161
+ "enabled": agent.enabled,
162
+ "paused": name in self._paused_agents,
163
+ "triggers": agent.triggers,
164
+ }
165
+ for name, agent in self.agents.items()
166
+ }
167
+
168
+ async def get_agent_insights(self, agent_name: str) -> Optional[Dict[str, Any]]:
169
+ """Get insights for a specific agent."""
170
+ if not self.feedback_api:
171
+ return None
172
+ return await self.feedback_api.get_agent_insights(agent_name)
173
+
174
+ async def get_system_health(self) -> Optional[Dict[str, Any]]:
175
+ """Get current system health metrics."""
176
+ if not self.performance_monitor:
177
+ return None
178
+ return await self.performance_monitor.get_system_health()
179
+
180
+ async def submit_feedback(
181
+ self,
182
+ agent_name: str,
183
+ feedback_type,
184
+ value,
185
+ event_type=None,
186
+ comment=None,
187
+ context=None,
188
+ ):
189
+ """Submit feedback for an agent."""
190
+ if not self.feedback_api:
191
+ return None
192
+ return await self.feedback_api.submit_feedback(
193
+ agent_name=agent_name,
194
+ event_type=event_type or "manual",
195
+ feedback_type=feedback_type,
196
+ value=value,
197
+ comment=comment,
198
+ context=context,
199
+ )
200
+
201
+ def get_agent(self, name: str) -> Optional[Agent]:
202
+ """Get an agent by name."""
203
+ return self.agents.get(name)
204
+
205
+ def list_agents(self) -> List[str]:
206
+ """List all registered agent names."""
207
+ return list(self.agents.keys())
208
+
209
+ async def _listen_for_agent_completion(self, queue: asyncio.Queue):
210
+ """Listen for agent completion events and update consolidated results."""
211
+ while True:
212
+ await queue.get()
213
+ try:
214
+ # Update consolidated results for Claude Code integration
215
+ await context_store._update_index()
216
+ except Exception as e:
217
+ self.logger.error(f"Failed to write consolidated results: {e}")
218
+ finally:
219
+ queue.task_done()
@@ -0,0 +1,433 @@
1
+ """Performance monitoring and resource usage analytics."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import json
7
+ import psutil
8
+ import time
9
+ from contextlib import asynccontextmanager
10
+ from dataclasses import dataclass
11
+ from pathlib import Path
12
+ from typing import Any, Dict, List, Optional
13
+
14
+ import aiofiles
15
+
16
+
17
+ @dataclass
18
+ class ResourceUsage:
19
+ """Resource usage snapshot."""
20
+
21
+ timestamp: float
22
+ cpu_percent: float
23
+ memory_mb: float
24
+ memory_percent: float
25
+ disk_read_mb: float = 0.0
26
+ disk_write_mb: float = 0.0
27
+ network_bytes_sent: int = 0
28
+ network_bytes_recv: int = 0
29
+
30
+ @classmethod
31
+ def snapshot(cls) -> ResourceUsage:
32
+ """Create a resource usage snapshot."""
33
+ process = psutil.Process()
34
+ memory_info = process.memory_info()
35
+ cpu_percent = process.cpu_percent(interval=0.1)
36
+
37
+ # Get system-wide I/O counters (since process I/O counters might not be available)
38
+ try:
39
+ io_counters = psutil.disk_io_counters()
40
+ disk_read_mb = (
41
+ io_counters.read_bytes / (1024 * 1024) if io_counters else 0.0
42
+ )
43
+ disk_write_mb = (
44
+ io_counters.write_bytes / (1024 * 1024) if io_counters else 0.0
45
+ )
46
+ except (AttributeError, psutil.AccessDenied):
47
+ disk_read_mb = 0.0
48
+ disk_write_mb = 0.0
49
+
50
+ # Get network I/O
51
+ try:
52
+ net_counters = psutil.net_io_counters()
53
+ network_bytes_sent = net_counters.bytes_sent if net_counters else 0
54
+ network_bytes_recv = net_counters.bytes_recv if net_counters else 0
55
+ except (AttributeError, psutil.AccessDenied):
56
+ network_bytes_sent = 0
57
+ network_bytes_recv = 0
58
+
59
+ return cls(
60
+ timestamp=time.time(),
61
+ cpu_percent=cpu_percent,
62
+ memory_mb=memory_info.rss / (1024 * 1024), # Convert to MB
63
+ memory_percent=process.memory_percent(),
64
+ disk_read_mb=disk_read_mb,
65
+ disk_write_mb=disk_write_mb,
66
+ network_bytes_sent=network_bytes_sent,
67
+ network_bytes_recv=network_bytes_recv,
68
+ )
69
+
70
+
71
+ @dataclass
72
+ class PerformanceMetrics:
73
+ """Performance metrics for an operation."""
74
+
75
+ operation_name: str
76
+ start_time: float
77
+ end_time: Optional[float] = None
78
+ duration: Optional[float] = None
79
+ resource_usage_start: Optional[ResourceUsage] = None
80
+ resource_usage_end: Optional[ResourceUsage] = None
81
+ cpu_used: Optional[float] = None
82
+ memory_used_mb: Optional[float] = None
83
+ success: Optional[bool] = None
84
+ error_message: Optional[str] = None
85
+ metadata: Optional[Dict[str, Any]] = None
86
+
87
+ def complete(self, success: bool, error_message: Optional[str] = None) -> None:
88
+ """Mark the operation as complete."""
89
+ self.end_time = time.time()
90
+ self.duration = self.end_time - self.start_time
91
+ self.success = success
92
+ self.error_message = error_message
93
+
94
+ if self.resource_usage_start and self.resource_usage_end:
95
+ self.cpu_used = (
96
+ self.resource_usage_end.cpu_percent
97
+ - self.resource_usage_start.cpu_percent
98
+ )
99
+ self.memory_used_mb = (
100
+ self.resource_usage_end.memory_mb - self.resource_usage_start.memory_mb
101
+ )
102
+
103
+
104
+ class PerformanceMonitor:
105
+ """Monitor performance and resource usage."""
106
+
107
+ def __init__(self, storage_path: Path, retention_days: int = 30):
108
+ self.storage_path = storage_path
109
+ self.storage_path.mkdir(parents=True, exist_ok=True)
110
+ self.metrics_file = storage_path / "metrics.jsonl"
111
+ self.retention_days = retention_days
112
+
113
+ @asynccontextmanager
114
+ async def monitor_operation(
115
+ self, operation_name: str, metadata: Optional[Dict[str, Any]] = None
116
+ ):
117
+ """Context manager to monitor an operation."""
118
+ start_usage = ResourceUsage.snapshot()
119
+ start_time = time.time()
120
+
121
+ metrics = PerformanceMetrics(
122
+ operation_name=operation_name,
123
+ start_time=start_time,
124
+ resource_usage_start=start_usage,
125
+ metadata=metadata or {},
126
+ )
127
+
128
+ try:
129
+ yield metrics
130
+ end_usage = ResourceUsage.snapshot()
131
+ metrics.resource_usage_end = end_usage
132
+ metrics.complete(success=True)
133
+
134
+ except Exception as e:
135
+ end_usage = ResourceUsage.snapshot()
136
+ metrics.resource_usage_end = end_usage
137
+ metrics.complete(success=False, error_message=str(e))
138
+ raise
139
+
140
+ finally:
141
+ await self._store_metrics(metrics)
142
+
143
+ async def get_system_health(self) -> Dict[str, Any]:
144
+ """Get current system health metrics."""
145
+ usage = ResourceUsage.snapshot()
146
+
147
+ # Get system-wide metrics
148
+ system_cpu = psutil.cpu_percent(interval=0.1)
149
+ system_memory = psutil.virtual_memory()
150
+ system_disk = psutil.disk_usage("/")
151
+
152
+ return {
153
+ "timestamp": usage.timestamp,
154
+ "process": {
155
+ "cpu_percent": usage.cpu_percent,
156
+ "memory_mb": usage.memory_mb,
157
+ "memory_percent": usage.memory_percent,
158
+ },
159
+ "system": {
160
+ "cpu_percent": system_cpu,
161
+ "memory_percent": system_memory.percent,
162
+ "memory_used_gb": system_memory.used / (1024**3),
163
+ "memory_total_gb": system_memory.total / (1024**3),
164
+ "disk_percent": system_disk.percent,
165
+ "disk_used_gb": system_disk.used / (1024**3),
166
+ "disk_total_gb": system_disk.total / (1024**3),
167
+ },
168
+ }
169
+
170
+ async def get_performance_summary(
171
+ self, operation_name: Optional[str] = None, hours: int = 24
172
+ ) -> Dict[str, Any]:
173
+ """Get performance summary for operations."""
174
+ cutoff_time = time.time() - (hours * 3600)
175
+
176
+ operations = await self._load_recent_metrics(cutoff_time)
177
+ if operation_name:
178
+ operations = [
179
+ op for op in operations if op.operation_name == operation_name
180
+ ]
181
+
182
+ if not operations:
183
+ return {
184
+ "operation_name": operation_name or "all",
185
+ "time_range_hours": hours,
186
+ "total_operations": 0,
187
+ "success_rate": 0.0,
188
+ "average_duration": 0.0,
189
+ "average_cpu_usage": 0.0,
190
+ "average_memory_usage_mb": 0.0,
191
+ }
192
+
193
+ successful_ops = [op for op in operations if op.success]
194
+ success_rate = len(successful_ops) / len(operations) * 100
195
+
196
+ durations = [op.duration for op in operations if op.duration is not None]
197
+ avg_duration = sum(durations) / len(durations) if durations else 0.0
198
+
199
+ cpu_usages = [op.cpu_used for op in operations if op.cpu_used is not None]
200
+ avg_cpu = sum(cpu_usages) / len(cpu_usages) if cpu_usages else 0.0
201
+
202
+ memory_usages = [
203
+ op.memory_used_mb for op in operations if op.memory_used_mb is not None
204
+ ]
205
+ avg_memory = sum(memory_usages) / len(memory_usages) if memory_usages else 0.0
206
+
207
+ return {
208
+ "operation_name": operation_name or "all",
209
+ "time_range_hours": hours,
210
+ "total_operations": len(operations),
211
+ "success_rate": round(success_rate, 1),
212
+ "average_duration": round(avg_duration, 2),
213
+ "average_cpu_usage": round(avg_cpu, 1),
214
+ "average_memory_usage_mb": round(avg_memory, 2),
215
+ }
216
+
217
+ async def get_resource_trends(self, hours: int = 24) -> List[Dict[str, Any]]:
218
+ """Get resource usage trends over time."""
219
+ cutoff_time = time.time() - (hours * 3600)
220
+ operations = await self._load_recent_metrics(cutoff_time)
221
+
222
+ # Group by hour
223
+ hourly_data: Dict[int, Dict[str, Any]] = {}
224
+ for op in operations:
225
+ if op.resource_usage_start:
226
+ hour = int(op.start_time // 3600)
227
+ if hour not in hourly_data:
228
+ hourly_data[hour] = {
229
+ "hour": hour,
230
+ "timestamp": hour * 3600,
231
+ "operations": 0,
232
+ "avg_cpu": 0.0,
233
+ "avg_memory_mb": 0.0,
234
+ "cpu_samples": [],
235
+ "memory_samples": [],
236
+ }
237
+
238
+ hourly_data[hour]["operations"] += 1
239
+ if op.cpu_used is not None:
240
+ hourly_data[hour]["cpu_samples"].append(op.cpu_used)
241
+ if op.memory_used_mb is not None:
242
+ hourly_data[hour]["memory_samples"].append(op.memory_used_mb)
243
+
244
+ # Calculate averages
245
+ trends = []
246
+ for hour_data in hourly_data.values():
247
+ if hour_data["cpu_samples"]:
248
+ hour_data["avg_cpu"] = sum(hour_data["cpu_samples"]) / len(
249
+ hour_data["cpu_samples"]
250
+ )
251
+ if hour_data["memory_samples"]:
252
+ hour_data["avg_memory_mb"] = sum(hour_data["memory_samples"]) / len(
253
+ hour_data["memory_samples"]
254
+ )
255
+
256
+ del hour_data["cpu_samples"]
257
+ del hour_data["memory_samples"]
258
+ trends.append(hour_data)
259
+
260
+ return sorted(trends, key=lambda x: x["timestamp"])
261
+
262
+ async def _store_metrics(self, metrics: PerformanceMetrics) -> None:
263
+ """Store performance metrics."""
264
+ metrics_dict = {
265
+ "operation_name": metrics.operation_name,
266
+ "start_time": metrics.start_time,
267
+ "end_time": metrics.end_time,
268
+ "duration": metrics.duration,
269
+ "success": metrics.success,
270
+ "error_message": metrics.error_message,
271
+ "metadata": metrics.metadata,
272
+ }
273
+
274
+ if metrics.resource_usage_start:
275
+ metrics_dict["resource_usage_start"] = {
276
+ "timestamp": metrics.resource_usage_start.timestamp,
277
+ "cpu_percent": metrics.resource_usage_start.cpu_percent,
278
+ "memory_mb": metrics.resource_usage_start.memory_mb,
279
+ "memory_percent": metrics.resource_usage_start.memory_percent,
280
+ "disk_read_mb": metrics.resource_usage_start.disk_read_mb,
281
+ "disk_write_mb": metrics.resource_usage_start.disk_write_mb,
282
+ "network_bytes_sent": metrics.resource_usage_start.network_bytes_sent,
283
+ "network_bytes_recv": metrics.resource_usage_start.network_bytes_recv,
284
+ }
285
+
286
+ if metrics.resource_usage_end:
287
+ metrics_dict["resource_usage_end"] = {
288
+ "timestamp": metrics.resource_usage_end.timestamp,
289
+ "cpu_percent": metrics.resource_usage_end.cpu_percent,
290
+ "memory_mb": metrics.resource_usage_end.memory_mb,
291
+ "memory_percent": metrics.resource_usage_end.memory_percent,
292
+ "disk_read_mb": metrics.resource_usage_end.disk_read_mb,
293
+ "disk_write_mb": metrics.resource_usage_end.disk_write_mb,
294
+ "network_bytes_sent": metrics.resource_usage_end.network_bytes_sent,
295
+ "network_bytes_recv": metrics.resource_usage_end.network_bytes_recv,
296
+ }
297
+
298
+ if metrics.cpu_used is not None:
299
+ metrics_dict["cpu_used"] = metrics.cpu_used
300
+ if metrics.memory_used_mb is not None:
301
+ metrics_dict["memory_used_mb"] = metrics.memory_used_mb
302
+
303
+ async with aiofiles.open(self.metrics_file, "a") as f:
304
+ await f.write(json.dumps(metrics_dict) + "\n")
305
+
306
+ # Cleanup old metrics
307
+ await self._cleanup_old_metrics()
308
+
309
+ async def _load_recent_metrics(
310
+ self, cutoff_time: float
311
+ ) -> List[PerformanceMetrics]:
312
+ """Load metrics newer than cutoff time."""
313
+ metrics: List[PerformanceMetrics] = []
314
+
315
+ if not self.metrics_file.exists():
316
+ return metrics
317
+
318
+ async with aiofiles.open(self.metrics_file, "r") as f:
319
+ lines = await f.readlines()
320
+
321
+ for line in lines:
322
+ try:
323
+ data = json.loads(line.strip())
324
+ if data["start_time"] >= cutoff_time:
325
+ start_usage = None
326
+ end_usage = None
327
+
328
+ if "resource_usage_start" in data:
329
+ start_usage = ResourceUsage(**data["resource_usage_start"])
330
+
331
+ if "resource_usage_end" in data:
332
+ end_usage = ResourceUsage(**data["resource_usage_end"])
333
+
334
+ metrics.append(
335
+ PerformanceMetrics(
336
+ operation_name=data["operation_name"],
337
+ start_time=data["start_time"],
338
+ end_time=data.get("end_time"),
339
+ duration=data.get("duration"),
340
+ resource_usage_start=start_usage,
341
+ resource_usage_end=end_usage,
342
+ cpu_used=data.get("cpu_used"),
343
+ memory_used_mb=data.get("memory_used_mb"),
344
+ success=data.get("success"),
345
+ error_message=data.get("error_message"),
346
+ metadata=data.get("metadata", {}),
347
+ )
348
+ )
349
+ except (json.JSONDecodeError, KeyError):
350
+ continue
351
+
352
+ return metrics
353
+
354
+ async def _cleanup_old_metrics(self) -> None:
355
+ """Remove metrics older than retention period."""
356
+ cutoff_time = time.time() - (self.retention_days * 24 * 3600)
357
+
358
+ if not self.metrics_file.exists():
359
+ return
360
+
361
+ # Read all metrics
362
+ async with aiofiles.open(self.metrics_file, "r") as f:
363
+ lines = await f.readlines()
364
+
365
+ # Filter recent metrics
366
+ recent_lines = []
367
+ for line in lines:
368
+ try:
369
+ data = json.loads(line.strip())
370
+ if data["start_time"] >= cutoff_time:
371
+ recent_lines.append(line)
372
+ except (json.JSONDecodeError, KeyError):
373
+ continue
374
+
375
+ # Write back recent metrics
376
+ async with aiofiles.open(self.metrics_file, "w") as f:
377
+ await f.writelines(recent_lines)
378
+
379
+
380
+ class PerformanceOptimizer:
381
+ """Optimize performance based on monitoring data."""
382
+
383
+ def __init__(self, performance_monitor: PerformanceMonitor):
384
+ self.monitor = performance_monitor
385
+ self._debounce_cache: Dict[str, float] = {}
386
+ self._concurrency_limits: Dict[str, asyncio.Semaphore] = {}
387
+
388
+ async def should_skip_operation(
389
+ self, operation_key: str, debounce_seconds: float = 1.0
390
+ ) -> bool:
391
+ """Check if operation should be debounced."""
392
+ now = time.time()
393
+ last_run = self._debounce_cache.get(operation_key, 0)
394
+
395
+ if now - last_run < debounce_seconds:
396
+ return True
397
+
398
+ self._debounce_cache[operation_key] = now
399
+ return False
400
+
401
+ def get_concurrency_limiter(
402
+ self, operation_type: str, max_concurrent: int
403
+ ) -> asyncio.Semaphore:
404
+ """Get a semaphore for limiting concurrency."""
405
+ if operation_type not in self._concurrency_limits:
406
+ self._concurrency_limits[operation_type] = asyncio.Semaphore(max_concurrent)
407
+ return self._concurrency_limits[operation_type]
408
+
409
+ async def get_optimal_config(self, operation_name: str) -> Dict[str, Any]:
410
+ """Get optimal configuration based on performance history."""
411
+ summary = await self.monitor.get_performance_summary(operation_name, hours=24)
412
+
413
+ # Simple optimization logic based on performance data
414
+ config = {}
415
+
416
+ if summary["total_operations"] > 10: # Need some data
417
+ # If average duration is high, suggest debouncing
418
+ if summary["average_duration"] > 2.0:
419
+ config["debounce_seconds"] = min(summary["average_duration"] * 0.5, 5.0)
420
+
421
+ # If CPU usage is high, suggest lower concurrency
422
+ if summary["average_cpu_usage"] > 50:
423
+ config["max_concurrent"] = max(
424
+ 1, int(10 / (summary["average_cpu_usage"] / 10))
425
+ )
426
+
427
+ # If memory usage is high, suggest smaller batches
428
+ if summary["average_memory_usage_mb"] > 100:
429
+ config["batch_size"] = max(
430
+ 1, int(100 / summary["average_memory_usage_mb"] * 10)
431
+ )
432
+
433
+ return config