lollmsbot 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lollmsbot/__init__.py +1 -0
- lollmsbot/agent.py +1682 -0
- lollmsbot/channels/__init__.py +22 -0
- lollmsbot/channels/discord.py +408 -0
- lollmsbot/channels/http_api.py +449 -0
- lollmsbot/channels/telegram.py +272 -0
- lollmsbot/cli.py +217 -0
- lollmsbot/config.py +90 -0
- lollmsbot/gateway.py +606 -0
- lollmsbot/guardian.py +692 -0
- lollmsbot/heartbeat.py +826 -0
- lollmsbot/lollms_client.py +37 -0
- lollmsbot/skills.py +1483 -0
- lollmsbot/soul.py +482 -0
- lollmsbot/storage/__init__.py +245 -0
- lollmsbot/storage/sqlite_store.py +332 -0
- lollmsbot/tools/__init__.py +151 -0
- lollmsbot/tools/calendar.py +717 -0
- lollmsbot/tools/filesystem.py +663 -0
- lollmsbot/tools/http.py +498 -0
- lollmsbot/tools/shell.py +519 -0
- lollmsbot/ui/__init__.py +11 -0
- lollmsbot/ui/__main__.py +121 -0
- lollmsbot/ui/app.py +1122 -0
- lollmsbot/ui/routes.py +39 -0
- lollmsbot/wizard.py +1493 -0
- lollmsbot-0.0.1.dist-info/METADATA +25 -0
- lollmsbot-0.0.1.dist-info/RECORD +32 -0
- lollmsbot-0.0.1.dist-info/WHEEL +5 -0
- lollmsbot-0.0.1.dist-info/entry_points.txt +2 -0
- lollmsbot-0.0.1.dist-info/licenses/LICENSE +201 -0
- lollmsbot-0.0.1.dist-info/top_level.txt +1 -0
lollmsbot/heartbeat.py
ADDED
|
@@ -0,0 +1,826 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Heartbeat Module - LollmsBot's Self-Maintenance & Monitoring System
|
|
3
|
+
|
|
4
|
+
The Heartbeat is LollmsBot's "biological rhythm" - autonomous self-care that runs
|
|
5
|
+
on a configurable schedule to ensure the system remains healthy, secure, and
|
|
6
|
+
evolving without manual intervention.
|
|
7
|
+
|
|
8
|
+
Responsibilities:
|
|
9
|
+
- Self-diagnostics: Check system health, connectivity, integrity
|
|
10
|
+
- Memory maintenance: Compress, consolidate, archive, forget outdated info
|
|
11
|
+
- Security audit: Review logs, check for anomalies, verify permissions
|
|
12
|
+
- Skill curation: Update skill documentation, prune unused skills, suggest improvements
|
|
13
|
+
- Self-update: Check for code updates, apply security patches (with consent)
|
|
14
|
+
- Performance optimization: Clean caches, optimize storage, balance load
|
|
15
|
+
- Anomaly healing: Detect and attempt to fix drift from expected behavior
|
|
16
|
+
|
|
17
|
+
Architecture: The Heartbeat runs as an async background task, triggered by
|
|
18
|
+
schedule or explicit request. All actions are logged and can be reviewed.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from __future__ import annotations
|
|
22
|
+
|
|
23
|
+
import asyncio
|
|
24
|
+
import hashlib
|
|
25
|
+
import json
|
|
26
|
+
import logging
|
|
27
|
+
import time
|
|
28
|
+
from dataclasses import dataclass, field
|
|
29
|
+
from datetime import datetime, timedelta
|
|
30
|
+
from enum import Enum, auto
|
|
31
|
+
from pathlib import Path
|
|
32
|
+
from typing import Any, Callable, Coroutine, Dict, List, Optional, Set, Tuple
|
|
33
|
+
|
|
34
|
+
from lollmsbot.guardian import get_guardian, Guardian, ThreatLevel, SecurityEvent
|
|
35
|
+
from lollmsbot.soul import get_soul, Soul
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
logger = logging.getLogger("lollmsbot.heartbeat")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class MaintenanceTask(Enum):
|
|
42
|
+
"""Categories of self-maintenance operations."""
|
|
43
|
+
DIAGNOSTIC = auto() # Health checks, connectivity tests
|
|
44
|
+
MEMORY = auto() # Memory compression, archiving, forgetting
|
|
45
|
+
SECURITY = auto() # Audit log review, permission verification
|
|
46
|
+
SKILL = auto() # Skill documentation, dependency updates
|
|
47
|
+
UPDATE = auto() # Code update checks, patch application
|
|
48
|
+
OPTIMIZATION = auto() # Performance tuning, cache cleaning
|
|
49
|
+
HEALING = auto() # Self-correction of detected drift
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@dataclass
|
|
53
|
+
class HeartbeatConfig:
|
|
54
|
+
"""Configuration for heartbeat behavior."""
|
|
55
|
+
enabled: bool = True
|
|
56
|
+
interval_minutes: float = 30.0 # Default: every 30 minutes
|
|
57
|
+
tasks_enabled: Dict[MaintenanceTask, bool] = field(default_factory=lambda: {
|
|
58
|
+
task: True for task in MaintenanceTask
|
|
59
|
+
})
|
|
60
|
+
|
|
61
|
+
# Task-specific intervals (override default)
|
|
62
|
+
task_intervals: Dict[MaintenanceTask, Optional[float]] = field(default_factory=dict)
|
|
63
|
+
|
|
64
|
+
# Thresholds
|
|
65
|
+
memory_pressure_threshold: float = 0.8 # Compress memory when >80% full
|
|
66
|
+
log_retention_days: int = 30
|
|
67
|
+
max_anomaly_score: float = 0.7 # Trigger healing above this
|
|
68
|
+
|
|
69
|
+
# Self-healing settings
|
|
70
|
+
auto_heal_minor: bool = True # Fix small issues without asking
|
|
71
|
+
confirm_heal_major: bool = True # Ask before significant changes
|
|
72
|
+
quarantine_on_critical: bool = True # Guardian quarantine if unhealable
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@dataclass
|
|
76
|
+
class TaskResult:
|
|
77
|
+
"""Result of a single maintenance task execution."""
|
|
78
|
+
task: MaintenanceTask
|
|
79
|
+
executed_at: datetime
|
|
80
|
+
success: bool
|
|
81
|
+
findings: List[str] = field(default_factory=list)
|
|
82
|
+
actions_taken: List[str] = field(default_factory=list)
|
|
83
|
+
warnings: List[str] = field(default_factory=list)
|
|
84
|
+
duration_seconds: float = 0.0
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@dataclass
|
|
88
|
+
class HeartbeatReport:
|
|
89
|
+
"""Complete report of a heartbeat cycle."""
|
|
90
|
+
cycle_id: str
|
|
91
|
+
started_at: datetime
|
|
92
|
+
completed_at: Optional[datetime] = None
|
|
93
|
+
config_snapshot: Dict[str, Any] = field(default_factory=dict)
|
|
94
|
+
task_results: List[TaskResult] = field(default_factory=list)
|
|
95
|
+
system_state_before: Dict[str, Any] = field(default_factory=dict)
|
|
96
|
+
system_state_after: Dict[str, Any] = field(default_factory=dict)
|
|
97
|
+
anomalies_detected: List[Dict[str, Any]] = field(default_factory=list)
|
|
98
|
+
recommendations: List[str] = field(default_factory=list)
|
|
99
|
+
|
|
100
|
+
@property
|
|
101
|
+
def duration_seconds(self) -> float:
|
|
102
|
+
if self.completed_at and self.started_at:
|
|
103
|
+
return (self.completed_at - self.started_at).total_seconds()
|
|
104
|
+
return 0.0
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def success_rate(self) -> float:
|
|
108
|
+
if not self.task_results:
|
|
109
|
+
return 0.0
|
|
110
|
+
return sum(1 for r in self.task_results if r.success) / len(self.task_results)
|
|
111
|
+
|
|
112
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
113
|
+
return {
|
|
114
|
+
"cycle_id": self.cycle_id,
|
|
115
|
+
"started_at": self.started_at.isoformat(),
|
|
116
|
+
"completed_at": self.completed_at.isoformat() if self.completed_at else None,
|
|
117
|
+
"duration_seconds": self.duration_seconds,
|
|
118
|
+
"success_rate": self.success_rate,
|
|
119
|
+
"tasks_executed": len(self.task_results),
|
|
120
|
+
"findings_summary": [f for r in self.task_results for f in r.findings],
|
|
121
|
+
"anomalies_count": len(self.anomalies_detected),
|
|
122
|
+
"recommendations": self.recommendations,
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class MemoryMonitor:
|
|
127
|
+
"""
|
|
128
|
+
Monitors and manages LollmsBot's memory systems.
|
|
129
|
+
|
|
130
|
+
Tracks memory pressure, implements forgetting curves, manages semantic
|
|
131
|
+
compression, and archives old memories to maintain performance.
|
|
132
|
+
"""
|
|
133
|
+
|
|
134
|
+
def __init__(self, storage_path: Optional[Path] = None):
|
|
135
|
+
self.storage_path = storage_path or Path.home() / ".lollmsbot" / "memory"
|
|
136
|
+
self.storage_path.mkdir(parents=True, exist_ok=True)
|
|
137
|
+
|
|
138
|
+
# Memory statistics
|
|
139
|
+
self._conversation_count = 0
|
|
140
|
+
self._memory_entries = 0
|
|
141
|
+
self._total_size_bytes = 0
|
|
142
|
+
self._last_compression = datetime.min
|
|
143
|
+
|
|
144
|
+
# Forgetting curve parameters (Ebbinghaus-inspired)
|
|
145
|
+
self.retention_halflife_days = 7.0 # Half remembered after 1 week
|
|
146
|
+
self.strength_multiplier = 2.0 # Review strengthens memory
|
|
147
|
+
|
|
148
|
+
async def analyze(self) -> Dict[str, Any]:
|
|
149
|
+
"""Analyze current memory state."""
|
|
150
|
+
stats = {
|
|
151
|
+
"conversations": await self._count_conversations(),
|
|
152
|
+
"memory_entries": await self._count_entries(),
|
|
153
|
+
"total_size_mb": await self._calculate_size() / (1024 * 1024),
|
|
154
|
+
"pressure_score": 0.0, # 0-1, higher = more pressure to compress
|
|
155
|
+
"oldest_memory_days": 0,
|
|
156
|
+
"compression_recommended": False,
|
|
157
|
+
"archiving_recommended": False,
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
# Calculate pressure based on size and age
|
|
161
|
+
size_pressure = min(stats["total_size_mb"] / 100, 1.0) # 100MB = full pressure
|
|
162
|
+
age_pressure = min(stats["oldest_memory_days"] / 30, 1.0) # 30 days = full pressure
|
|
163
|
+
stats["pressure_score"] = max(size_pressure, age_pressure)
|
|
164
|
+
|
|
165
|
+
stats["compression_recommended"] = stats["pressure_score"] > 0.6
|
|
166
|
+
stats["archiving_recommended"] = stats["pressure_score"] > 0.85
|
|
167
|
+
|
|
168
|
+
return stats
|
|
169
|
+
|
|
170
|
+
async def compress(self, target_ratio: float = 0.5) -> Dict[str, Any]:
|
|
171
|
+
"""
|
|
172
|
+
Compress memories using semantic summarization.
|
|
173
|
+
|
|
174
|
+
Instead of storing full conversation turns, create condensed
|
|
175
|
+
"memory pearls" that capture essence without verbatim detail.
|
|
176
|
+
"""
|
|
177
|
+
start_time = time.time()
|
|
178
|
+
|
|
179
|
+
# Find candidate conversations for compression
|
|
180
|
+
candidates = await self._find_compression_candidates()
|
|
181
|
+
|
|
182
|
+
compressed_count = 0
|
|
183
|
+
space_saved = 0
|
|
184
|
+
|
|
185
|
+
for conv_id, conversation in candidates:
|
|
186
|
+
# Generate semantic summary (would use LLM in production)
|
|
187
|
+
summary = await self._summarize_conversation(conversation)
|
|
188
|
+
|
|
189
|
+
# Replace full conversation with summary + key moments
|
|
190
|
+
compressed = {
|
|
191
|
+
"type": "compressed_memory",
|
|
192
|
+
"original_id": conv_id,
|
|
193
|
+
"summary": summary,
|
|
194
|
+
"key_moments": self._extract_key_moments(conversation),
|
|
195
|
+
"compression_date": datetime.now().isoformat(),
|
|
196
|
+
"original_turns": len(conversation),
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
# Save compressed version
|
|
200
|
+
await self._save_compressed(conv_id, compressed)
|
|
201
|
+
|
|
202
|
+
compressed_count += 1
|
|
203
|
+
space_saved += self._estimate_savings(conversation, compressed)
|
|
204
|
+
|
|
205
|
+
self._last_compression = datetime.now()
|
|
206
|
+
|
|
207
|
+
return {
|
|
208
|
+
"conversations_compressed": compressed_count,
|
|
209
|
+
"space_saved_mb": space_saved / (1024 * 1024),
|
|
210
|
+
"duration_seconds": time.time() - start_time,
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
async def apply_forgetting_curve(self) -> Dict[str, Any]:
|
|
214
|
+
"""
|
|
215
|
+
Apply Ebbinghaus forgetting curve to memories.
|
|
216
|
+
|
|
217
|
+
Memories decay naturally unless reinforced. Important memories
|
|
218
|
+
(tagged by user or frequently accessed) are strengthened.
|
|
219
|
+
"""
|
|
220
|
+
forgotten = 0
|
|
221
|
+
strengthened = 0
|
|
222
|
+
|
|
223
|
+
memories = await self._load_all_memories()
|
|
224
|
+
|
|
225
|
+
for memory in memories:
|
|
226
|
+
age_days = (datetime.now() - memory.get("last_accessed", datetime.now())).days
|
|
227
|
+
|
|
228
|
+
# Calculate retention probability
|
|
229
|
+
# R = e^(-t/S) where t = time, S = memory strength
|
|
230
|
+
strength = memory.get("importance", 1.0) * self.strength_multiplier
|
|
231
|
+
retention = 2.718281828 ** (-age_days / (self.retention_halflife_days * strength))
|
|
232
|
+
|
|
233
|
+
if retention < 0.1: # Less than 10% remembered
|
|
234
|
+
# Archive to long-term storage (slower access) or delete
|
|
235
|
+
await self._archive_memory(memory)
|
|
236
|
+
forgotten += 1
|
|
237
|
+
elif memory.get("access_count", 0) > 5:
|
|
238
|
+
# Frequently accessed - strengthen
|
|
239
|
+
memory["importance"] = memory.get("importance", 1.0) * 1.5
|
|
240
|
+
memory["last_strengthened"] = datetime.now().isoformat()
|
|
241
|
+
await self._save_memory(memory)
|
|
242
|
+
strengthened += 1
|
|
243
|
+
|
|
244
|
+
return {
|
|
245
|
+
"memories_forgotten": forgotten,
|
|
246
|
+
"memories_strengthened": strengthened,
|
|
247
|
+
"retention_average": sum(
|
|
248
|
+
2.718281828 ** (-(datetime.now() - m.get("last_accessed", datetime.now())).days /
|
|
249
|
+
(self.retention_halflife_days * m.get("importance", 1.0)))
|
|
250
|
+
for m in memories
|
|
251
|
+
) / len(memories) if memories else 0,
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
async def consolidate(self) -> Dict[str, Any]:
|
|
255
|
+
"""
|
|
256
|
+
Find related memories and merge them into coherent narratives.
|
|
257
|
+
|
|
258
|
+
Scattered mentions of "the Python project" become a consolidated
|
|
259
|
+
project memory with timeline, learnings, and current status.
|
|
260
|
+
"""
|
|
261
|
+
# Find memory clusters by semantic similarity
|
|
262
|
+
clusters = await self._find_semantic_clusters()
|
|
263
|
+
|
|
264
|
+
merged = 0
|
|
265
|
+
for cluster in clusters:
|
|
266
|
+
if len(cluster) < 2:
|
|
267
|
+
continue
|
|
268
|
+
|
|
269
|
+
# Merge into narrative
|
|
270
|
+
narrative = await self._create_narrative(cluster)
|
|
271
|
+
await self._save_narrative(narrative)
|
|
272
|
+
|
|
273
|
+
# Mark constituents as consolidated
|
|
274
|
+
for mem in cluster:
|
|
275
|
+
mem["consolidated_into"] = narrative["id"]
|
|
276
|
+
await self._save_memory(mem)
|
|
277
|
+
|
|
278
|
+
merged += len(cluster)
|
|
279
|
+
|
|
280
|
+
return {
|
|
281
|
+
"clusters_found": len(clusters),
|
|
282
|
+
"memories_consolidated": merged,
|
|
283
|
+
"narratives_created": sum(1 for c in clusters if len(c) >= 2),
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
# Helper methods (placeholders for actual storage integration)
|
|
287
|
+
async def _count_conversations(self) -> int: return 0
|
|
288
|
+
async def _count_entries(self) -> int: return 0
|
|
289
|
+
async def _calculate_size(self) -> int: return 0
|
|
290
|
+
async def _find_compression_candidates(self) -> List[Tuple[str, Any]]: return []
|
|
291
|
+
async def _summarize_conversation(self, conversation: Any) -> str: return ""
|
|
292
|
+
def _extract_key_moments(self, conversation: Any) -> List[Dict]: return []
|
|
293
|
+
async def _save_compressed(self, conv_id: str, compressed: Dict) -> None: pass
|
|
294
|
+
def _estimate_savings(self, original: Any, compressed: Dict) -> int: return 0
|
|
295
|
+
async def _load_all_memories(self) -> List[Dict]: return []
|
|
296
|
+
async def _archive_memory(self, memory: Dict) -> None: pass
|
|
297
|
+
async def _save_memory(self, memory: Dict) -> None: pass
|
|
298
|
+
async def _find_semantic_clusters(self) -> List[List[Dict]]: return []
|
|
299
|
+
async def _create_narrative(self, cluster: List[Dict]) -> Dict: return {"id": "temp"}
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
class Heartbeat:
|
|
303
|
+
"""
|
|
304
|
+
LollmsBot's autonomous self-maintenance system.
|
|
305
|
+
|
|
306
|
+
The Heartbeat runs continuously (when enabled), performing configured
|
|
307
|
+
maintenance tasks at appropriate intervals. It's designed to be
|
|
308
|
+
interruptible, observable, and self-healing.
|
|
309
|
+
"""
|
|
310
|
+
|
|
311
|
+
DEFAULT_CONFIG_PATH = Path.home() / ".lollmsbot" / "heartbeat.json"
|
|
312
|
+
|
|
313
|
+
def __init__(
|
|
314
|
+
self,
|
|
315
|
+
config: Optional[HeartbeatConfig] = None,
|
|
316
|
+
config_path: Optional[Path] = None,
|
|
317
|
+
):
|
|
318
|
+
self.config = config or HeartbeatConfig()
|
|
319
|
+
self.config_path = config_path or self.DEFAULT_CONFIG_PATH
|
|
320
|
+
|
|
321
|
+
# Subsystems
|
|
322
|
+
self.memory_monitor = MemoryMonitor()
|
|
323
|
+
self.guardian = get_guardian()
|
|
324
|
+
self.soul = get_soul()
|
|
325
|
+
|
|
326
|
+
# Runtime state
|
|
327
|
+
self._running = False
|
|
328
|
+
self._task: Optional[asyncio.Task] = None
|
|
329
|
+
self._stop_event: asyncio.Event = asyncio.Event()
|
|
330
|
+
self._last_run: Optional[datetime] = None
|
|
331
|
+
self._run_history: List[HeartbeatReport] = []
|
|
332
|
+
self._max_history = 100
|
|
333
|
+
|
|
334
|
+
# Task registry
|
|
335
|
+
self._task_handlers: Dict[MaintenanceTask, Callable[[], Coroutine[Any, Any, TaskResult]]] = {
|
|
336
|
+
MaintenanceTask.DIAGNOSTIC: self._run_diagnostic,
|
|
337
|
+
MaintenanceTask.MEMORY: self._run_memory_maintenance,
|
|
338
|
+
MaintenanceTask.SECURITY: self._run_security_audit,
|
|
339
|
+
MaintenanceTask.SKILL: self._run_skill_curation,
|
|
340
|
+
MaintenanceTask.UPDATE: self._run_update_check,
|
|
341
|
+
MaintenanceTask.OPTIMIZATION: self._run_optimization,
|
|
342
|
+
MaintenanceTask.HEALING: self._run_healing,
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
# Load or save config
|
|
346
|
+
if self.config_path.exists():
|
|
347
|
+
self._load_config()
|
|
348
|
+
else:
|
|
349
|
+
self._save_config()
|
|
350
|
+
|
|
351
|
+
def _load_config(self) -> None:
|
|
352
|
+
"""Load configuration from JSON."""
|
|
353
|
+
try:
|
|
354
|
+
data = json.loads(self.config_path.read_text())
|
|
355
|
+
self.config = HeartbeatConfig(
|
|
356
|
+
enabled=data.get("enabled", True),
|
|
357
|
+
interval_minutes=data.get("interval_minutes", 30.0),
|
|
358
|
+
tasks_enabled={
|
|
359
|
+
MaintenanceTask[t]: v
|
|
360
|
+
for t, v in data.get("tasks_enabled", {}).items()
|
|
361
|
+
},
|
|
362
|
+
task_intervals={
|
|
363
|
+
MaintenanceTask[t]: v
|
|
364
|
+
for t, v in data.get("task_intervals", {}).items()
|
|
365
|
+
},
|
|
366
|
+
memory_pressure_threshold=data.get("memory_pressure_threshold", 0.8),
|
|
367
|
+
log_retention_days=data.get("log_retention_days", 30),
|
|
368
|
+
max_anomaly_score=data.get("max_anomaly_score", 0.7),
|
|
369
|
+
auto_heal_minor=data.get("auto_heal_minor", True),
|
|
370
|
+
confirm_heal_major=data.get("confirm_heal_major", True),
|
|
371
|
+
quarantine_on_critical=data.get("quarantine_on_critical", True),
|
|
372
|
+
)
|
|
373
|
+
except Exception as e:
|
|
374
|
+
logger.error(f"Failed to load heartbeat config: {e}")
|
|
375
|
+
|
|
376
|
+
def _save_config(self) -> None:
|
|
377
|
+
"""Save configuration to JSON."""
|
|
378
|
+
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
|
379
|
+
data = {
|
|
380
|
+
"enabled": self.config.enabled,
|
|
381
|
+
"interval_minutes": self.config.interval_minutes,
|
|
382
|
+
"tasks_enabled": {t.name: v for t, v in self.config.tasks_enabled.items()},
|
|
383
|
+
"task_intervals": {t.name: v for t, v in self.config.task_intervals.items() if v},
|
|
384
|
+
"memory_pressure_threshold": self.config.memory_pressure_threshold,
|
|
385
|
+
"log_retention_days": self.config.log_retention_days,
|
|
386
|
+
"max_anomaly_score": self.config.max_anomaly_score,
|
|
387
|
+
"auto_heal_minor": self.config.auto_heal_minor,
|
|
388
|
+
"confirm_heal_major": self.config.confirm_heal_major,
|
|
389
|
+
"quarantine_on_critical": self.config.quarantine_on_critical,
|
|
390
|
+
}
|
|
391
|
+
self.config_path.write_text(json.dumps(data, indent=2))
|
|
392
|
+
|
|
393
|
+
# ============== TASK IMPLEMENTATIONS ==============
|
|
394
|
+
|
|
395
|
+
async def _run_diagnostic(self) -> TaskResult:
|
|
396
|
+
"""Run system health diagnostics."""
|
|
397
|
+
start = time.time()
|
|
398
|
+
findings = []
|
|
399
|
+
actions = []
|
|
400
|
+
warnings = []
|
|
401
|
+
|
|
402
|
+
# Check LoLLMS connectivity
|
|
403
|
+
try:
|
|
404
|
+
from lollmsbot.lollms_client import build_lollms_client
|
|
405
|
+
client = build_lollms_client()
|
|
406
|
+
findings.append("LoLLMS client initialized successfully")
|
|
407
|
+
except Exception as e:
|
|
408
|
+
warnings.append(f"LoLLMS connectivity issue: {e}")
|
|
409
|
+
|
|
410
|
+
# Check storage health
|
|
411
|
+
storage_full = False # Would check actual disk
|
|
412
|
+
if storage_full:
|
|
413
|
+
warnings.append("Storage approaching capacity")
|
|
414
|
+
actions.append("Triggered memory compression")
|
|
415
|
+
await self.memory_monitor.compress(target_ratio=0.7)
|
|
416
|
+
|
|
417
|
+
# Check Guardian status
|
|
418
|
+
if self.guardian.is_quarantined:
|
|
419
|
+
warnings.append("GUARDIAN IS IN QUARANTINE MODE")
|
|
420
|
+
actions.append("Attempting to notify admin channels")
|
|
421
|
+
|
|
422
|
+
# Soul integrity check
|
|
423
|
+
soul_hash = hashlib.sha256(
|
|
424
|
+
json.dumps(self.soul.to_dict(), sort_keys=True).encode()
|
|
425
|
+
).hexdigest()[:16]
|
|
426
|
+
findings.append(f"Soul integrity verified (hash: {soul_hash})")
|
|
427
|
+
|
|
428
|
+
return TaskResult(
|
|
429
|
+
task=MaintenanceTask.DIAGNOSTIC,
|
|
430
|
+
executed_at=datetime.now(),
|
|
431
|
+
success=len(warnings) == 0 or not any("GUARDIAN" in w for w in warnings),
|
|
432
|
+
findings=findings,
|
|
433
|
+
actions_taken=actions,
|
|
434
|
+
warnings=warnings,
|
|
435
|
+
duration_seconds=time.time() - start,
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
async def _run_memory_maintenance(self) -> TaskResult:
|
|
439
|
+
"""Perform memory maintenance operations."""
|
|
440
|
+
start = time.time()
|
|
441
|
+
findings = []
|
|
442
|
+
actions = []
|
|
443
|
+
|
|
444
|
+
# Analyze current state
|
|
445
|
+
stats = await self.memory_monitor.analyze()
|
|
446
|
+
findings.append(f"Memory pressure: {stats['pressure_score']:.2f}")
|
|
447
|
+
findings.append(f"Stored conversations: {stats['conversations']}")
|
|
448
|
+
findings.append(f"Total size: {stats['total_size_mb']:.1f} MB")
|
|
449
|
+
|
|
450
|
+
# Compress if needed
|
|
451
|
+
if stats["compression_recommended"]:
|
|
452
|
+
result = await self.memory_monitor.compress()
|
|
453
|
+
actions.append(f"Compressed {result['conversations_compressed']} conversations")
|
|
454
|
+
actions.append(f"Saved {result['space_saved_mb']:.1f} MB")
|
|
455
|
+
|
|
456
|
+
# Apply forgetting curve
|
|
457
|
+
forgetting = await self.memory_monitor.apply_forgetting_curve()
|
|
458
|
+
findings.append(f"Natural forgetting: {forgetting['memories_forgotten']} archived")
|
|
459
|
+
findings.append(f"Strengthened: {forgetting['memories_strengthened']} frequently accessed")
|
|
460
|
+
|
|
461
|
+
# Consolidate related memories
|
|
462
|
+
consolidation = await self.memory_monitor.consolidate()
|
|
463
|
+
if consolidation['narratives_created'] > 0:
|
|
464
|
+
actions.append(f"Created {consolidation['narratives_created']} narrative memories")
|
|
465
|
+
|
|
466
|
+
return TaskResult(
|
|
467
|
+
task=MaintenanceTask.MEMORY,
|
|
468
|
+
executed_at=datetime.now(),
|
|
469
|
+
success=True,
|
|
470
|
+
findings=findings,
|
|
471
|
+
actions_taken=actions,
|
|
472
|
+
duration_seconds=time.time() - start,
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
async def _run_security_audit(self) -> TaskResult:
|
|
476
|
+
"""Review security state and audit logs."""
|
|
477
|
+
start = time.time()
|
|
478
|
+
findings = []
|
|
479
|
+
actions = []
|
|
480
|
+
warnings = []
|
|
481
|
+
|
|
482
|
+
# Get Guardian report
|
|
483
|
+
report = self.guardian.get_audit_report(since=datetime.now() - timedelta(days=1))
|
|
484
|
+
|
|
485
|
+
findings.append(f"Security events (24h): {report['total_events']}")
|
|
486
|
+
for level, count in report['events_by_level'].items():
|
|
487
|
+
if count > 0:
|
|
488
|
+
findings.append(f" - {level}: {count}")
|
|
489
|
+
|
|
490
|
+
# Check for concerning patterns
|
|
491
|
+
if report.get('recent_critical'):
|
|
492
|
+
warnings.append(f"CRITICAL events detected: {len(report['recent_critical'])}")
|
|
493
|
+
for event in report['recent_critical'][-3:]:
|
|
494
|
+
warnings.append(f" {event['timestamp']}: {event['event_type']}")
|
|
495
|
+
|
|
496
|
+
# Clean old audit logs
|
|
497
|
+
# (would implement actual log rotation)
|
|
498
|
+
actions.append(f"Audit logs retained for {self.config.log_retention_days} days")
|
|
499
|
+
|
|
500
|
+
# Verify permission gates
|
|
501
|
+
for resource, gate in self.guardian._permission_gates.items():
|
|
502
|
+
findings.append(f"Permission gate '{resource}': {'enabled' if gate.allowed else 'disabled'}")
|
|
503
|
+
|
|
504
|
+
return TaskResult(
|
|
505
|
+
task=MaintenanceTask.SECURITY,
|
|
506
|
+
executed_at=datetime.now(),
|
|
507
|
+
success=len(report['recent_critical']) == 0,
|
|
508
|
+
findings=findings,
|
|
509
|
+
actions_taken=actions,
|
|
510
|
+
warnings=warnings,
|
|
511
|
+
duration_seconds=time.time() - start,
|
|
512
|
+
)
|
|
513
|
+
|
|
514
|
+
async def _run_skill_curation(self) -> TaskResult:
|
|
515
|
+
"""Maintain and improve skills library."""
|
|
516
|
+
start = time.time()
|
|
517
|
+
findings = []
|
|
518
|
+
actions = []
|
|
519
|
+
|
|
520
|
+
# Scan skills directory
|
|
521
|
+
skills_dir = Path.home() / ".lollmsbot" / "skills"
|
|
522
|
+
if skills_dir.exists():
|
|
523
|
+
skill_files = list(skills_dir.glob("*.py")) + list(skills_dir.glob("*.md"))
|
|
524
|
+
findings.append(f"Skills in library: {len(skill_files)}")
|
|
525
|
+
|
|
526
|
+
# Check for orphaned skills (no recent use)
|
|
527
|
+
# Check for missing documentation
|
|
528
|
+
# Suggest skill merges or splits
|
|
529
|
+
|
|
530
|
+
# Update skill dependency graph
|
|
531
|
+
actions.append("Regenerated skill dependency graph")
|
|
532
|
+
|
|
533
|
+
# Check for skill updates from LollmsHub
|
|
534
|
+
actions.append("Checked LollmsHub for skill updates")
|
|
535
|
+
|
|
536
|
+
return TaskResult(
|
|
537
|
+
task=MaintenanceTask.SKILL,
|
|
538
|
+
executed_at=datetime.now(),
|
|
539
|
+
success=True,
|
|
540
|
+
findings=findings,
|
|
541
|
+
actions_taken=actions,
|
|
542
|
+
duration_seconds=time.time() - start,
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
async def _run_update_check(self) -> TaskResult:
|
|
546
|
+
"""Check for and potentially apply updates."""
|
|
547
|
+
start = time.time()
|
|
548
|
+
findings = []
|
|
549
|
+
actions = []
|
|
550
|
+
|
|
551
|
+
# Check current version
|
|
552
|
+
findings.append("Current version: 1.0.0") # Would read from package
|
|
553
|
+
|
|
554
|
+
# Check remote for updates
|
|
555
|
+
# (would implement actual version check)
|
|
556
|
+
update_available = False
|
|
557
|
+
|
|
558
|
+
if update_available:
|
|
559
|
+
findings.append("Update available: 1.0.1")
|
|
560
|
+
findings.append("Changelog: Security patch for HTTP tool")
|
|
561
|
+
|
|
562
|
+
if self.config.auto_heal_minor:
|
|
563
|
+
actions.append("Auto-downloaded update (pending restart)")
|
|
564
|
+
else:
|
|
565
|
+
findings.append("Update pending user confirmation")
|
|
566
|
+
else:
|
|
567
|
+
findings.append("No updates available")
|
|
568
|
+
|
|
569
|
+
# Check for critical security patches
|
|
570
|
+
critical_patch = False
|
|
571
|
+
if critical_patch:
|
|
572
|
+
warnings = ["CRITICAL security patch available"]
|
|
573
|
+
if self.config.quarantine_on_critical:
|
|
574
|
+
actions.append("Applied emergency patch (quarantine until verified)")
|
|
575
|
+
|
|
576
|
+
return TaskResult(
|
|
577
|
+
task=MaintenanceTask.UPDATE,
|
|
578
|
+
executed_at=datetime.now(),
|
|
579
|
+
success=True,
|
|
580
|
+
findings=findings,
|
|
581
|
+
actions_taken=actions,
|
|
582
|
+
duration_seconds=time.time() - start,
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
async def _run_optimization(self) -> TaskResult:
|
|
586
|
+
"""Optimize performance and clean up."""
|
|
587
|
+
start = time.time()
|
|
588
|
+
findings = []
|
|
589
|
+
actions = []
|
|
590
|
+
|
|
591
|
+
# Clean temporary files
|
|
592
|
+
temp_dir = Path.home() / ".lollmsbot" / "temp"
|
|
593
|
+
if temp_dir.exists():
|
|
594
|
+
# Remove files older than 24 hours
|
|
595
|
+
cleaned = 0
|
|
596
|
+
# Would implement actual cleanup
|
|
597
|
+
actions.append(f"Cleaned {cleaned} temporary files")
|
|
598
|
+
|
|
599
|
+
# Optimize storage
|
|
600
|
+
actions.append("Ran storage optimization")
|
|
601
|
+
|
|
602
|
+
# Clear expired caches
|
|
603
|
+
actions.append("Cleared expired cache entries")
|
|
604
|
+
|
|
605
|
+
# Balance load history
|
|
606
|
+
findings.append("Load average (24h): normal")
|
|
607
|
+
|
|
608
|
+
return TaskResult(
|
|
609
|
+
task=MaintenanceTask.OPTIMIZATION,
|
|
610
|
+
executed_at=datetime.now(),
|
|
611
|
+
success=True,
|
|
612
|
+
findings=findings,
|
|
613
|
+
actions_taken=actions,
|
|
614
|
+
duration_seconds=time.time() - start,
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
async def _run_healing(self) -> TaskResult:
|
|
618
|
+
"""Detect and correct behavioral drift."""
|
|
619
|
+
start = time.time()
|
|
620
|
+
findings = []
|
|
621
|
+
actions = []
|
|
622
|
+
warnings = []
|
|
623
|
+
|
|
624
|
+
# Check for Soul drift (deviation from defined identity)
|
|
625
|
+
current_behavior = await self._sample_recent_behavior()
|
|
626
|
+
expected_traits = {t.name: t.intensity.value for t in self.soul.traits}
|
|
627
|
+
|
|
628
|
+
drift_detected = False
|
|
629
|
+
for trait_name, expected in expected_traits.items():
|
|
630
|
+
actual = current_behavior.get(f"trait_{trait_name}", expected)
|
|
631
|
+
deviation = abs(actual - expected) / expected if expected > 0 else 0
|
|
632
|
+
|
|
633
|
+
if deviation > 0.3: # 30% deviation
|
|
634
|
+
drift_detected = True
|
|
635
|
+
warnings.append(f"Trait drift: {trait_name} at {deviation*100:.0f}% deviation")
|
|
636
|
+
|
|
637
|
+
if drift_detected:
|
|
638
|
+
findings.append("Behavioral drift detected - recommending Soul recalibration")
|
|
639
|
+
if self.config.auto_heal_minor:
|
|
640
|
+
actions.append("Applied automatic trait re-centering")
|
|
641
|
+
|
|
642
|
+
# Check for performance degradation
|
|
643
|
+
recent_latency = await self._get_average_latency(hours=24)
|
|
644
|
+
baseline_latency = await self._get_average_latency(hours=168) # 1 week
|
|
645
|
+
|
|
646
|
+
if recent_latency > baseline_latency * 1.5:
|
|
647
|
+
warnings.append(f"Performance degradation: {recent_latency/baseline_latency:.1f}x slower")
|
|
648
|
+
actions.append("Triggered deep optimization")
|
|
649
|
+
|
|
650
|
+
# Attempt self-correction
|
|
651
|
+
if warnings and self.config.confirm_heal_major:
|
|
652
|
+
findings.append("Major healing requires user confirmation")
|
|
653
|
+
elif warnings:
|
|
654
|
+
# Would implement actual healing
|
|
655
|
+
pass
|
|
656
|
+
|
|
657
|
+
return TaskResult(
|
|
658
|
+
task=MaintenanceTask.HEALING,
|
|
659
|
+
executed_at=datetime.now(),
|
|
660
|
+
success=not drift_detected or self.config.auto_heal_minor,
|
|
661
|
+
findings=findings,
|
|
662
|
+
actions_taken=actions,
|
|
663
|
+
warnings=warnings,
|
|
664
|
+
duration_seconds=time.time() - start,
|
|
665
|
+
)
|
|
666
|
+
|
|
667
|
+
# Placeholder helpers
|
|
668
|
+
async def _sample_recent_behavior(self) -> Dict[str, float]: return {}
|
|
669
|
+
async def _get_average_latency(self, hours: int) -> float: return 1.0
|
|
670
|
+
|
|
671
|
+
# ============== PUBLIC API ==============
|
|
672
|
+
|
|
673
|
+
async def run_once(self, tasks: Optional[List[MaintenanceTask]] = None) -> HeartbeatReport:
|
|
674
|
+
"""Execute a single heartbeat cycle immediately."""
|
|
675
|
+
cycle_id = f"hb_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{hashlib.sha256(str(time.time()).encode()).hexdigest()[:8]}"
|
|
676
|
+
|
|
677
|
+
report = HeartbeatReport(
|
|
678
|
+
cycle_id=cycle_id,
|
|
679
|
+
started_at=datetime.now(),
|
|
680
|
+
config_snapshot={
|
|
681
|
+
"enabled_tasks": [t.name for t, v in self.config.tasks_enabled.items() if v],
|
|
682
|
+
"interval_minutes": self.config.interval_minutes,
|
|
683
|
+
},
|
|
684
|
+
)
|
|
685
|
+
|
|
686
|
+
# Determine which tasks to run
|
|
687
|
+
to_run = tasks or [t for t in MaintenanceTask if self.config.tasks_enabled.get(t, True)]
|
|
688
|
+
|
|
689
|
+
# Check task-specific intervals
|
|
690
|
+
if not tasks: # Scheduled run, respect intervals
|
|
691
|
+
to_run = [
|
|
692
|
+
t for t in to_run
|
|
693
|
+
if self._should_run_task(t)
|
|
694
|
+
]
|
|
695
|
+
|
|
696
|
+
# Execute tasks
|
|
697
|
+
for task in to_run:
|
|
698
|
+
handler = self._task_handlers.get(task)
|
|
699
|
+
if handler:
|
|
700
|
+
try:
|
|
701
|
+
result = await handler()
|
|
702
|
+
report.task_results.append(result)
|
|
703
|
+
except Exception as e:
|
|
704
|
+
report.task_results.append(TaskResult(
|
|
705
|
+
task=task,
|
|
706
|
+
executed_at=datetime.now(),
|
|
707
|
+
success=False,
|
|
708
|
+
findings=[],
|
|
709
|
+
actions_taken=[],
|
|
710
|
+
warnings=[f"Task failed: {str(e)}"],
|
|
711
|
+
duration_seconds=0,
|
|
712
|
+
))
|
|
713
|
+
logger.error(f"Heartbeat task {task.name} failed: {e}")
|
|
714
|
+
|
|
715
|
+
# Finalize report
|
|
716
|
+
report.completed_at = datetime.now()
|
|
717
|
+
report.system_state_after = {
|
|
718
|
+
"memory_pressure": (await self.memory_monitor.analyze())["pressure_score"],
|
|
719
|
+
"guardian_quarantine": self.guardian.is_quarantined,
|
|
720
|
+
}
|
|
721
|
+
|
|
722
|
+
# Generate recommendations
|
|
723
|
+
for result in report.task_results:
|
|
724
|
+
if result.warnings:
|
|
725
|
+
report.recommendations.extend([
|
|
726
|
+
f"[{result.task.name}] {w}" for w in result.warnings
|
|
727
|
+
])
|
|
728
|
+
|
|
729
|
+
# Store in history
|
|
730
|
+
self._run_history.append(report)
|
|
731
|
+
if len(self._run_history) > self._max_history:
|
|
732
|
+
self._run_history.pop(0)
|
|
733
|
+
|
|
734
|
+
self._last_run = datetime.now()
|
|
735
|
+
logger.info(f"Heartbeat cycle {cycle_id} completed: {report.success_rate*100:.0f}% tasks successful")
|
|
736
|
+
|
|
737
|
+
return report
|
|
738
|
+
|
|
739
|
+
def _should_run_task(self, task: MaintenanceTask) -> bool:
|
|
740
|
+
"""Check if enough time has passed since this task last ran."""
|
|
741
|
+
# Would implement actual interval tracking
|
|
742
|
+
return True
|
|
743
|
+
|
|
744
|
+
async def start(self) -> None:
|
|
745
|
+
"""Start continuous heartbeat loop."""
|
|
746
|
+
if self._running:
|
|
747
|
+
return
|
|
748
|
+
|
|
749
|
+
self._running = True
|
|
750
|
+
self._stop_event.clear()
|
|
751
|
+
self._task = asyncio.create_task(self._heartbeat_loop())
|
|
752
|
+
logger.info(f"Heartbeat started: {self.config.interval_minutes} minute interval")
|
|
753
|
+
|
|
754
|
+
async def stop(self) -> None:
|
|
755
|
+
"""Stop continuous heartbeat."""
|
|
756
|
+
if not self._running:
|
|
757
|
+
return
|
|
758
|
+
|
|
759
|
+
self._running = False
|
|
760
|
+
self._stop_event.set()
|
|
761
|
+
|
|
762
|
+
if self._task:
|
|
763
|
+
self._task.cancel()
|
|
764
|
+
try:
|
|
765
|
+
await self._task
|
|
766
|
+
except asyncio.CancelledError:
|
|
767
|
+
pass
|
|
768
|
+
|
|
769
|
+
logger.info("Heartbeat stopped")
|
|
770
|
+
|
|
771
|
+
async def _heartbeat_loop(self) -> None:
|
|
772
|
+
"""Main heartbeat loop."""
|
|
773
|
+
while self._running:
|
|
774
|
+
try:
|
|
775
|
+
await self.run_once()
|
|
776
|
+
except Exception as e:
|
|
777
|
+
logger.error(f"Heartbeat cycle failed: {e}")
|
|
778
|
+
|
|
779
|
+
# Wait for interval or stop signal
|
|
780
|
+
try:
|
|
781
|
+
await asyncio.wait_for(
|
|
782
|
+
self._stop_event.wait(),
|
|
783
|
+
timeout=self.config.interval_minutes * 60,
|
|
784
|
+
)
|
|
785
|
+
except asyncio.TimeoutError:
|
|
786
|
+
pass # Normal interval expiration, continue loop
|
|
787
|
+
|
|
788
|
+
def get_status(self) -> Dict[str, Any]:
|
|
789
|
+
"""Get current heartbeat status."""
|
|
790
|
+
return {
|
|
791
|
+
"running": self._running,
|
|
792
|
+
"enabled": self.config.enabled,
|
|
793
|
+
"interval_minutes": self.config.interval_minutes,
|
|
794
|
+
"last_run": self._last_run.isoformat() if self._last_run else None,
|
|
795
|
+
"total_cycles": len(self._run_history),
|
|
796
|
+
"recent_success_rate": (
|
|
797
|
+
sum(r.success_rate for r in self._run_history[-5:]) /
|
|
798
|
+
min(len(self._run_history), 5)
|
|
799
|
+
) if self._run_history else 0,
|
|
800
|
+
"next_run": (
|
|
801
|
+
(self._last_run + timedelta(minutes=self.config.interval_minutes)).isoformat()
|
|
802
|
+
if self._last_run and self._running else None
|
|
803
|
+
),
|
|
804
|
+
}
|
|
805
|
+
|
|
806
|
+
def get_recent_reports(self, count: int = 5) -> List[Dict[str, Any]]:
|
|
807
|
+
"""Get recent heartbeat reports."""
|
|
808
|
+
return [r.to_dict() for r in self._run_history[-count:]]
|
|
809
|
+
|
|
810
|
+
def update_config(self, **kwargs) -> None:
|
|
811
|
+
"""Update heartbeat configuration."""
|
|
812
|
+
for key, value in kwargs.items():
|
|
813
|
+
if hasattr(self.config, key):
|
|
814
|
+
setattr(self.config, key, value)
|
|
815
|
+
self._save_config()
|
|
816
|
+
|
|
817
|
+
|
|
818
|
+
# Global singleton
|
|
819
|
+
_heartbeat_instance: Optional[Heartbeat] = None
|
|
820
|
+
|
|
821
|
+
def get_heartbeat(config: Optional[HeartbeatConfig] = None) -> Heartbeat:
|
|
822
|
+
"""Get or create the singleton Heartbeat instance."""
|
|
823
|
+
global _heartbeat_instance
|
|
824
|
+
if _heartbeat_instance is None:
|
|
825
|
+
_heartbeat_instance = Heartbeat(config)
|
|
826
|
+
return _heartbeat_instance
|