alma-memory 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1396 @@
1
+ """
2
+ ALMA Forgetting Mechanism.
3
+
4
+ Implements intelligent memory pruning to prevent bloat and maintain relevance.
5
+
6
+ Features:
7
+ - Confidence decay over time (exponential, linear, step functions)
8
+ - Staleness detection based on last_validated timestamps
9
+ - Automated cleanup job scheduling
10
+ - Memory growth monitoring and alerting
11
+ """
12
+
13
+ import logging
14
+ import time
15
+ import threading
16
+ import math
17
+ from datetime import datetime, timezone, timedelta
18
+ from typing import Optional, List, Dict, Any, Callable
19
+ from dataclasses import dataclass, field
20
+ from enum import Enum
21
+ from abc import ABC, abstractmethod
22
+
23
+ from alma.types import Heuristic, Outcome, DomainKnowledge, AntiPattern
24
+ from alma.storage.base import StorageBackend
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ class PruneReason(Enum):
30
+ """Reason for pruning a memory item."""
31
+ STALE = "stale" # Too old without validation
32
+ LOW_CONFIDENCE = "low_confidence" # Below confidence threshold
33
+ LOW_SUCCESS_RATE = "low_success" # Too many failures
34
+ SUPERSEDED = "superseded" # Replaced by better heuristic
35
+ DUPLICATE = "duplicate" # Duplicate of another item
36
+ QUOTA_EXCEEDED = "quota" # Agent memory quota exceeded
37
+
38
+
39
+ @dataclass
40
+ class PruneResult:
41
+ """Result of a prune operation."""
42
+ reason: PruneReason
43
+ item_type: str
44
+ item_id: str
45
+ agent: str
46
+ project_id: str
47
+ details: str = ""
48
+
49
+
50
+ @dataclass
51
+ class PruneSummary:
52
+ """Summary of a complete prune operation."""
53
+ outcomes_pruned: int = 0
54
+ heuristics_pruned: int = 0
55
+ knowledge_pruned: int = 0
56
+ anti_patterns_pruned: int = 0
57
+ total_pruned: int = 0
58
+ pruned_items: List[PruneResult] = field(default_factory=list)
59
+ execution_time_ms: int = 0
60
+
61
+ def to_dict(self) -> Dict[str, Any]:
62
+ """Convert to dictionary."""
63
+ return {
64
+ "outcomes_pruned": self.outcomes_pruned,
65
+ "heuristics_pruned": self.heuristics_pruned,
66
+ "knowledge_pruned": self.knowledge_pruned,
67
+ "anti_patterns_pruned": self.anti_patterns_pruned,
68
+ "total_pruned": self.total_pruned,
69
+ "execution_time_ms": self.execution_time_ms,
70
+ }
71
+
72
+
73
+ @dataclass
74
+ class PrunePolicy:
75
+ """
76
+ Configuration for memory pruning behavior.
77
+
78
+ Defines thresholds and quotas for different memory types.
79
+ """
80
+ # Age-based pruning
81
+ outcome_max_age_days: int = 90
82
+ knowledge_max_age_days: int = 180
83
+ anti_pattern_max_age_days: int = 365
84
+
85
+ # Confidence thresholds
86
+ heuristic_min_confidence: float = 0.3
87
+ knowledge_min_confidence: float = 0.5
88
+
89
+ # Success rate thresholds
90
+ heuristic_min_success_rate: float = 0.4
91
+ min_occurrences_before_prune: int = 5 # Don't prune until enough data
92
+
93
+ # Quota limits (per agent)
94
+ max_heuristics_per_agent: int = 100
95
+ max_outcomes_per_agent: int = 500
96
+ max_knowledge_per_agent: int = 200
97
+ max_anti_patterns_per_agent: int = 50
98
+
99
+ # Staleness (time since last validation)
100
+ heuristic_stale_days: int = 60
101
+ knowledge_stale_days: int = 90
102
+
103
+
104
+ class ForgettingEngine:
105
+ """
106
+ Manages memory pruning and forgetting.
107
+
108
+ Implements multiple strategies:
109
+ - Age-based decay (old memories are pruned)
110
+ - Confidence-based pruning (low confidence items removed)
111
+ - Success-rate based pruning (unsuccessful patterns removed)
112
+ - Quota enforcement (prevents memory bloat)
113
+ - Staleness detection (unvalidated memories removed)
114
+ """
115
+
116
+ def __init__(
117
+ self,
118
+ storage: StorageBackend,
119
+ policy: Optional[PrunePolicy] = None,
120
+ ):
121
+ """
122
+ Initialize forgetting engine.
123
+
124
+ Args:
125
+ storage: Storage backend to prune
126
+ policy: Pruning policy configuration
127
+ """
128
+ self.storage = storage
129
+ self.policy = policy or PrunePolicy()
130
+
131
+ def prune(
132
+ self,
133
+ project_id: str,
134
+ agent: Optional[str] = None,
135
+ dry_run: bool = False,
136
+ ) -> PruneSummary:
137
+ """
138
+ Run a complete prune operation.
139
+
140
+ Args:
141
+ project_id: Project to prune
142
+ agent: Specific agent or None for all
143
+ dry_run: If True, only report what would be pruned
144
+
145
+ Returns:
146
+ PruneSummary with details
147
+ """
148
+ import time
149
+ start_time = time.time()
150
+
151
+ summary = PruneSummary()
152
+
153
+ # Prune each memory type
154
+ summary.outcomes_pruned = self._prune_stale_outcomes(
155
+ project_id, agent, dry_run, summary.pruned_items
156
+ )
157
+
158
+ summary.heuristics_pruned = self._prune_heuristics(
159
+ project_id, agent, dry_run, summary.pruned_items
160
+ )
161
+
162
+ summary.knowledge_pruned = self._prune_domain_knowledge(
163
+ project_id, agent, dry_run, summary.pruned_items
164
+ )
165
+
166
+ summary.anti_patterns_pruned = self._prune_anti_patterns(
167
+ project_id, agent, dry_run, summary.pruned_items
168
+ )
169
+
170
+ # Enforce quotas
171
+ quota_pruned = self._enforce_quotas(
172
+ project_id, agent, dry_run, summary.pruned_items
173
+ )
174
+ summary.heuristics_pruned += quota_pruned.get("heuristics", 0)
175
+ summary.outcomes_pruned += quota_pruned.get("outcomes", 0)
176
+
177
+ summary.total_pruned = (
178
+ summary.outcomes_pruned +
179
+ summary.heuristics_pruned +
180
+ summary.knowledge_pruned +
181
+ summary.anti_patterns_pruned
182
+ )
183
+
184
+ summary.execution_time_ms = int((time.time() - start_time) * 1000)
185
+
186
+ action = "Would prune" if dry_run else "Pruned"
187
+ logger.info(
188
+ f"{action} {summary.total_pruned} items for project={project_id}, "
189
+ f"agent={agent or 'all'}"
190
+ )
191
+
192
+ return summary
193
+
194
+ def _prune_stale_outcomes(
195
+ self,
196
+ project_id: str,
197
+ agent: Optional[str],
198
+ dry_run: bool,
199
+ results: List[PruneResult],
200
+ ) -> int:
201
+ """Prune outcomes older than max age."""
202
+ cutoff = datetime.now(timezone.utc) - timedelta(
203
+ days=self.policy.outcome_max_age_days
204
+ )
205
+
206
+ if dry_run:
207
+ # Get count of outcomes to prune
208
+ outcomes = self.storage.get_outcomes(
209
+ project_id=project_id,
210
+ agent=agent,
211
+ top_k=10000,
212
+ success_only=False,
213
+ )
214
+ count = sum(1 for o in outcomes if o.timestamp < cutoff)
215
+ for o in outcomes:
216
+ if o.timestamp < cutoff:
217
+ results.append(PruneResult(
218
+ reason=PruneReason.STALE,
219
+ item_type="outcome",
220
+ item_id=o.id,
221
+ agent=o.agent,
222
+ project_id=project_id,
223
+ details=f"Older than {self.policy.outcome_max_age_days} days",
224
+ ))
225
+ return count
226
+ else:
227
+ return self.storage.delete_outcomes_older_than(
228
+ project_id=project_id,
229
+ older_than=cutoff,
230
+ agent=agent,
231
+ )
232
+
233
+ def _prune_heuristics(
234
+ self,
235
+ project_id: str,
236
+ agent: Optional[str],
237
+ dry_run: bool,
238
+ results: List[PruneResult],
239
+ ) -> int:
240
+ """Prune heuristics based on confidence and success rate."""
241
+ heuristics = self.storage.get_heuristics(
242
+ project_id=project_id,
243
+ agent=agent,
244
+ top_k=10000,
245
+ min_confidence=0.0,
246
+ )
247
+
248
+ to_delete = []
249
+ now = datetime.now(timezone.utc)
250
+ stale_cutoff = now - timedelta(days=self.policy.heuristic_stale_days)
251
+
252
+ for h in heuristics:
253
+ reason = None
254
+ details = ""
255
+
256
+ # Check confidence
257
+ if h.confidence < self.policy.heuristic_min_confidence:
258
+ reason = PruneReason.LOW_CONFIDENCE
259
+ details = f"Confidence {h.confidence:.2f} < {self.policy.heuristic_min_confidence}"
260
+
261
+ # Check success rate (only if enough occurrences)
262
+ elif (h.occurrence_count >= self.policy.min_occurrences_before_prune and
263
+ h.success_rate < self.policy.heuristic_min_success_rate):
264
+ reason = PruneReason.LOW_SUCCESS_RATE
265
+ details = f"Success rate {h.success_rate:.2f} < {self.policy.heuristic_min_success_rate}"
266
+
267
+ # Check staleness
268
+ elif h.last_validated < stale_cutoff:
269
+ reason = PruneReason.STALE
270
+ details = f"Not validated since {h.last_validated.date()}"
271
+
272
+ if reason:
273
+ to_delete.append(h)
274
+ results.append(PruneResult(
275
+ reason=reason,
276
+ item_type="heuristic",
277
+ item_id=h.id,
278
+ agent=h.agent,
279
+ project_id=project_id,
280
+ details=details,
281
+ ))
282
+
283
+ if not dry_run:
284
+ for h in to_delete:
285
+ self.storage.delete_heuristic(h.id)
286
+
287
+ return len(to_delete)
288
+
289
+ def _prune_domain_knowledge(
290
+ self,
291
+ project_id: str,
292
+ agent: Optional[str],
293
+ dry_run: bool,
294
+ results: List[PruneResult],
295
+ ) -> int:
296
+ """Prune old or low-confidence domain knowledge."""
297
+ knowledge = self.storage.get_domain_knowledge(
298
+ project_id=project_id,
299
+ agent=agent,
300
+ top_k=10000,
301
+ )
302
+
303
+ to_delete = []
304
+ now = datetime.now(timezone.utc)
305
+ age_cutoff = now - timedelta(days=self.policy.knowledge_max_age_days)
306
+ stale_cutoff = now - timedelta(days=self.policy.knowledge_stale_days)
307
+
308
+ for dk in knowledge:
309
+ reason = None
310
+ details = ""
311
+
312
+ # Check confidence
313
+ if dk.confidence < self.policy.knowledge_min_confidence:
314
+ reason = PruneReason.LOW_CONFIDENCE
315
+ details = f"Confidence {dk.confidence:.2f} < {self.policy.knowledge_min_confidence}"
316
+
317
+ # Check age
318
+ elif dk.last_verified < age_cutoff:
319
+ reason = PruneReason.STALE
320
+ details = f"Older than {self.policy.knowledge_max_age_days} days"
321
+
322
+ # Check staleness
323
+ elif dk.last_verified < stale_cutoff:
324
+ reason = PruneReason.STALE
325
+ details = f"Not verified since {dk.last_verified.date()}"
326
+
327
+ if reason:
328
+ to_delete.append(dk)
329
+ results.append(PruneResult(
330
+ reason=reason,
331
+ item_type="domain_knowledge",
332
+ item_id=dk.id,
333
+ agent=dk.agent,
334
+ project_id=project_id,
335
+ details=details,
336
+ ))
337
+
338
+ if not dry_run:
339
+ for dk in to_delete:
340
+ self.storage.delete_domain_knowledge(dk.id)
341
+
342
+ return len(to_delete)
343
+
344
+ def _prune_anti_patterns(
345
+ self,
346
+ project_id: str,
347
+ agent: Optional[str],
348
+ dry_run: bool,
349
+ results: List[PruneResult],
350
+ ) -> int:
351
+ """Prune old anti-patterns."""
352
+ anti_patterns = self.storage.get_anti_patterns(
353
+ project_id=project_id,
354
+ agent=agent,
355
+ top_k=10000,
356
+ )
357
+
358
+ to_delete = []
359
+ now = datetime.now(timezone.utc)
360
+ age_cutoff = now - timedelta(days=self.policy.anti_pattern_max_age_days)
361
+
362
+ for ap in anti_patterns:
363
+ if ap.last_seen < age_cutoff:
364
+ to_delete.append(ap)
365
+ results.append(PruneResult(
366
+ reason=PruneReason.STALE,
367
+ item_type="anti_pattern",
368
+ item_id=ap.id,
369
+ agent=ap.agent,
370
+ project_id=project_id,
371
+ details=f"Not seen since {ap.last_seen.date()}",
372
+ ))
373
+
374
+ if not dry_run:
375
+ for ap in to_delete:
376
+ self.storage.delete_anti_pattern(ap.id)
377
+
378
+ return len(to_delete)
379
+
380
+ def _enforce_quotas(
381
+ self,
382
+ project_id: str,
383
+ agent: Optional[str],
384
+ dry_run: bool,
385
+ results: List[PruneResult],
386
+ ) -> Dict[str, int]:
387
+ """Enforce per-agent memory quotas."""
388
+ pruned = {"heuristics": 0, "outcomes": 0}
389
+
390
+ if agent:
391
+ agents = [agent]
392
+ else:
393
+ # Get all agents with data
394
+ stats = self.storage.get_stats(project_id=project_id)
395
+ agents = stats.get("agents", [])
396
+
397
+ for ag in agents:
398
+ # Check heuristic quota
399
+ heuristics = self.storage.get_heuristics(
400
+ project_id=project_id,
401
+ agent=ag,
402
+ top_k=self.policy.max_heuristics_per_agent + 100,
403
+ min_confidence=0.0,
404
+ )
405
+
406
+ if len(heuristics) > self.policy.max_heuristics_per_agent:
407
+ # Sort by confidence (lowest first)
408
+ sorted_h = sorted(heuristics, key=lambda x: x.confidence)
409
+ to_remove = len(heuristics) - self.policy.max_heuristics_per_agent
410
+
411
+ for h in sorted_h[:to_remove]:
412
+ results.append(PruneResult(
413
+ reason=PruneReason.QUOTA_EXCEEDED,
414
+ item_type="heuristic",
415
+ item_id=h.id,
416
+ agent=ag,
417
+ project_id=project_id,
418
+ details=f"Exceeded quota of {self.policy.max_heuristics_per_agent}",
419
+ ))
420
+ if not dry_run:
421
+ self.storage.delete_heuristic(h.id)
422
+ pruned["heuristics"] += 1
423
+
424
+ # Check outcome quota
425
+ outcomes = self.storage.get_outcomes(
426
+ project_id=project_id,
427
+ agent=ag,
428
+ top_k=self.policy.max_outcomes_per_agent + 100,
429
+ success_only=False,
430
+ )
431
+
432
+ if len(outcomes) > self.policy.max_outcomes_per_agent:
433
+ # Sort by timestamp (oldest first)
434
+ sorted_o = sorted(outcomes, key=lambda x: x.timestamp)
435
+ to_remove = len(outcomes) - self.policy.max_outcomes_per_agent
436
+
437
+ for o in sorted_o[:to_remove]:
438
+ results.append(PruneResult(
439
+ reason=PruneReason.QUOTA_EXCEEDED,
440
+ item_type="outcome",
441
+ item_id=o.id,
442
+ agent=ag,
443
+ project_id=project_id,
444
+ details=f"Exceeded quota of {self.policy.max_outcomes_per_agent}",
445
+ ))
446
+ if not dry_run:
447
+ self.storage.delete_outcome(o.id)
448
+ pruned["outcomes"] += 1
449
+
450
+ return pruned
451
+
452
+ def compute_decay_score(
453
+ self,
454
+ item_age_days: float,
455
+ confidence: float,
456
+ success_rate: float,
457
+ occurrence_count: int,
458
+ ) -> float:
459
+ """
460
+ Compute a decay score for an item (lower = more likely to forget).
461
+
462
+ Factors:
463
+ - Recency (newer = higher)
464
+ - Confidence (higher = higher)
465
+ - Success rate (higher = higher)
466
+ - Validation frequency (more = higher)
467
+
468
+ Returns:
469
+ Score between 0 and 1
470
+ """
471
+ # Age decay (half-life of 30 days)
472
+ age_score = 0.5 ** (item_age_days / 30.0)
473
+
474
+ # Normalize occurrence count (cap at 20)
475
+ occurrence_score = min(occurrence_count / 20.0, 1.0)
476
+
477
+ # Weighted combination
478
+ return (
479
+ 0.3 * age_score +
480
+ 0.3 * confidence +
481
+ 0.2 * success_rate +
482
+ 0.2 * occurrence_score
483
+ )
484
+
485
+ def identify_candidates(
486
+ self,
487
+ project_id: str,
488
+ agent: Optional[str] = None,
489
+ max_candidates: int = 20,
490
+ ) -> List[Dict[str, Any]]:
491
+ """
492
+ Identify memory items that are candidates for pruning.
493
+
494
+ Returns items with lowest decay scores.
495
+
496
+ Args:
497
+ project_id: Project to analyze
498
+ agent: Specific agent or None for all
499
+ max_candidates: Maximum candidates to return
500
+
501
+ Returns:
502
+ List of candidate items with scores
503
+ """
504
+ candidates = []
505
+ now = datetime.now(timezone.utc)
506
+
507
+ # Analyze heuristics
508
+ heuristics = self.storage.get_heuristics(
509
+ project_id=project_id,
510
+ agent=agent,
511
+ top_k=1000,
512
+ min_confidence=0.0,
513
+ )
514
+
515
+ for h in heuristics:
516
+ age_days = (now - h.created_at).total_seconds() / (24 * 60 * 60)
517
+ score = self.compute_decay_score(
518
+ item_age_days=age_days,
519
+ confidence=h.confidence,
520
+ success_rate=h.success_rate,
521
+ occurrence_count=h.occurrence_count,
522
+ )
523
+ candidates.append({
524
+ "type": "heuristic",
525
+ "id": h.id,
526
+ "agent": h.agent,
527
+ "score": score,
528
+ "age_days": int(age_days),
529
+ "confidence": h.confidence,
530
+ "summary": h.strategy[:50],
531
+ })
532
+
533
+ # Sort by score (lowest first = best candidates for pruning)
534
+ candidates.sort(key=lambda x: x["score"])
535
+
536
+ return candidates[:max_candidates]
537
+
538
+
539
+ # ==================== DECAY FUNCTIONS ====================
540
+
541
+
542
+ class DecayFunction(ABC):
543
+ """Abstract base class for confidence decay functions."""
544
+
545
+ @abstractmethod
546
+ def compute_decay(self, days_since_validation: float) -> float:
547
+ """
548
+ Compute decay multiplier for a given time since validation.
549
+
550
+ Args:
551
+ days_since_validation: Days since last validation
552
+
553
+ Returns:
554
+ Multiplier between 0 and 1 to apply to confidence
555
+ """
556
+ pass
557
+
558
+ @abstractmethod
559
+ def get_name(self) -> str:
560
+ """Return the name of this decay function."""
561
+ pass
562
+
563
+
564
+ class ExponentialDecay(DecayFunction):
565
+ """
566
+ Exponential decay with configurable half-life.
567
+
568
+ Confidence = original * 0.5^(days/half_life)
569
+ """
570
+
571
+ def __init__(self, half_life_days: float = 30.0):
572
+ """
573
+ Initialize exponential decay.
574
+
575
+ Args:
576
+ half_life_days: Days until confidence halves
577
+ """
578
+ self.half_life_days = half_life_days
579
+
580
+ def compute_decay(self, days_since_validation: float) -> float:
581
+ """Compute exponential decay multiplier."""
582
+ return 0.5 ** (days_since_validation / self.half_life_days)
583
+
584
+ def get_name(self) -> str:
585
+ return f"exponential(half_life={self.half_life_days}d)"
586
+
587
+
588
+ class LinearDecay(DecayFunction):
589
+ """
590
+ Linear decay to zero over a specified period.
591
+
592
+ Confidence decreases linearly from 1 to min_value over decay_period.
593
+ """
594
+
595
+ def __init__(
596
+ self,
597
+ decay_period_days: float = 90.0,
598
+ min_value: float = 0.1,
599
+ ):
600
+ """
601
+ Initialize linear decay.
602
+
603
+ Args:
604
+ decay_period_days: Days until confidence reaches min_value
605
+ min_value: Minimum confidence value (floor)
606
+ """
607
+ self.decay_period_days = decay_period_days
608
+ self.min_value = min_value
609
+
610
+ def compute_decay(self, days_since_validation: float) -> float:
611
+ """Compute linear decay multiplier."""
612
+ decay = 1.0 - (days_since_validation / self.decay_period_days) * (1.0 - self.min_value)
613
+ return max(self.min_value, decay)
614
+
615
+ def get_name(self) -> str:
616
+ return f"linear(period={self.decay_period_days}d, min={self.min_value})"
617
+
618
+
619
+ class StepDecay(DecayFunction):
620
+ """
621
+ Step-wise decay with configurable thresholds.
622
+
623
+ Confidence drops at specific day thresholds.
624
+ """
625
+
626
+ def __init__(
627
+ self,
628
+ steps: Optional[List[tuple]] = None,
629
+ ):
630
+ """
631
+ Initialize step decay.
632
+
633
+ Args:
634
+ steps: List of (days, multiplier) tuples, sorted by days ascending
635
+ Default: [(30, 0.9), (60, 0.7), (90, 0.5), (180, 0.3)]
636
+ """
637
+ self.steps = steps or [
638
+ (30, 0.9),
639
+ (60, 0.7),
640
+ (90, 0.5),
641
+ (180, 0.3),
642
+ ]
643
+ # Ensure sorted
644
+ self.steps = sorted(self.steps, key=lambda x: x[0])
645
+
646
+ def compute_decay(self, days_since_validation: float) -> float:
647
+ """Compute step decay multiplier."""
648
+ multiplier = 1.0
649
+ for threshold_days, step_multiplier in self.steps:
650
+ if days_since_validation >= threshold_days:
651
+ multiplier = step_multiplier
652
+ else:
653
+ break
654
+ return multiplier
655
+
656
+ def get_name(self) -> str:
657
+ return f"step({len(self.steps)} steps)"
658
+
659
+
660
+ class NoDecay(DecayFunction):
661
+ """No decay - confidence remains constant."""
662
+
663
+ def compute_decay(self, days_since_validation: float) -> float:
664
+ return 1.0
665
+
666
+ def get_name(self) -> str:
667
+ return "none"
668
+
669
+
670
+ # ==================== CONFIDENCE DECAYER ====================
671
+
672
+
673
+ @dataclass
674
+ class DecayResult:
675
+ """Result of applying confidence decay."""
676
+ items_processed: int = 0
677
+ items_updated: int = 0
678
+ items_pruned: int = 0
679
+ avg_decay_applied: float = 0.0
680
+ execution_time_ms: int = 0
681
+
682
+
683
+ class ConfidenceDecayer:
684
+ """
685
+ Applies confidence decay to memories based on time since validation.
686
+
687
+ Unlike pruning (which removes items), decay reduces confidence over time,
688
+ making items less likely to be retrieved while preserving them for potential
689
+ revalidation.
690
+ """
691
+
692
+ def __init__(
693
+ self,
694
+ storage: StorageBackend,
695
+ decay_function: Optional[DecayFunction] = None,
696
+ prune_below_confidence: float = 0.1,
697
+ ):
698
+ """
699
+ Initialize confidence decayer.
700
+
701
+ Args:
702
+ storage: Storage backend to update
703
+ decay_function: Function to compute decay (default: ExponentialDecay)
704
+ prune_below_confidence: Auto-prune items that decay below this threshold
705
+ """
706
+ self.storage = storage
707
+ self.decay_function = decay_function or ExponentialDecay(half_life_days=30.0)
708
+ self.prune_below_confidence = prune_below_confidence
709
+
710
+ def apply_decay(
711
+ self,
712
+ project_id: str,
713
+ agent: Optional[str] = None,
714
+ dry_run: bool = False,
715
+ ) -> DecayResult:
716
+ """
717
+ Apply confidence decay to all eligible memories.
718
+
719
+ Args:
720
+ project_id: Project to process
721
+ agent: Specific agent or None for all
722
+ dry_run: If True, calculate but don't update
723
+
724
+ Returns:
725
+ DecayResult with statistics
726
+ """
727
+ start_time = time.time()
728
+ result = DecayResult()
729
+ now = datetime.now(timezone.utc)
730
+ total_decay = 0.0
731
+
732
+ # Process heuristics
733
+ heuristics = self.storage.get_heuristics(
734
+ project_id=project_id,
735
+ agent=agent,
736
+ top_k=10000,
737
+ min_confidence=0.0,
738
+ )
739
+
740
+ for h in heuristics:
741
+ result.items_processed += 1
742
+ days_since = (now - h.last_validated).total_seconds() / (24 * 60 * 60)
743
+ decay_multiplier = self.decay_function.compute_decay(days_since)
744
+
745
+ new_confidence = h.confidence * decay_multiplier
746
+ total_decay += (1.0 - decay_multiplier)
747
+
748
+ if new_confidence != h.confidence:
749
+ if new_confidence < self.prune_below_confidence:
750
+ # Below threshold - prune
751
+ if not dry_run:
752
+ self.storage.delete_heuristic(h.id)
753
+ result.items_pruned += 1
754
+ else:
755
+ # Update confidence
756
+ if not dry_run:
757
+ self.storage.update_heuristic_confidence(h.id, new_confidence)
758
+ result.items_updated += 1
759
+
760
+ # Process domain knowledge
761
+ knowledge = self.storage.get_domain_knowledge(
762
+ project_id=project_id,
763
+ agent=agent,
764
+ top_k=10000,
765
+ )
766
+
767
+ for dk in knowledge:
768
+ result.items_processed += 1
769
+ days_since = (now - dk.last_verified).total_seconds() / (24 * 60 * 60)
770
+ decay_multiplier = self.decay_function.compute_decay(days_since)
771
+
772
+ new_confidence = dk.confidence * decay_multiplier
773
+ total_decay += (1.0 - decay_multiplier)
774
+
775
+ if new_confidence != dk.confidence:
776
+ if new_confidence < self.prune_below_confidence:
777
+ if not dry_run:
778
+ self.storage.delete_domain_knowledge(dk.id)
779
+ result.items_pruned += 1
780
+ else:
781
+ if not dry_run:
782
+ self.storage.update_knowledge_confidence(dk.id, new_confidence)
783
+ result.items_updated += 1
784
+
785
+ result.execution_time_ms = int((time.time() - start_time) * 1000)
786
+ if result.items_processed > 0:
787
+ result.avg_decay_applied = total_decay / result.items_processed
788
+
789
+ action = "Would apply" if dry_run else "Applied"
790
+ logger.info(
791
+ f"{action} decay to {result.items_processed} items: "
792
+ f"{result.items_updated} updated, {result.items_pruned} pruned "
793
+ f"(avg decay: {result.avg_decay_applied:.2%})"
794
+ )
795
+
796
+ return result
797
+
798
+
799
+ # ==================== MEMORY HEALTH MONITOR ====================
800
+
801
+
802
+ @dataclass
803
+ class MemoryHealthMetrics:
804
+ """Metrics about memory health and growth."""
805
+ total_items: int = 0
806
+ heuristic_count: int = 0
807
+ outcome_count: int = 0
808
+ knowledge_count: int = 0
809
+ anti_pattern_count: int = 0
810
+ avg_heuristic_confidence: float = 0.0
811
+ avg_heuristic_age_days: float = 0.0
812
+ stale_heuristic_count: int = 0
813
+ low_confidence_count: int = 0
814
+ storage_bytes: int = 0
815
+ agents_count: int = 0
816
+ timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
817
+
818
+ def to_dict(self) -> Dict[str, Any]:
819
+ """Convert to dictionary."""
820
+ return {
821
+ "total_items": self.total_items,
822
+ "heuristic_count": self.heuristic_count,
823
+ "outcome_count": self.outcome_count,
824
+ "knowledge_count": self.knowledge_count,
825
+ "anti_pattern_count": self.anti_pattern_count,
826
+ "avg_heuristic_confidence": round(self.avg_heuristic_confidence, 3),
827
+ "avg_heuristic_age_days": round(self.avg_heuristic_age_days, 1),
828
+ "stale_heuristic_count": self.stale_heuristic_count,
829
+ "low_confidence_count": self.low_confidence_count,
830
+ "storage_bytes": self.storage_bytes,
831
+ "agents_count": self.agents_count,
832
+ "timestamp": self.timestamp.isoformat(),
833
+ }
834
+
835
+
836
+ @dataclass
837
+ class HealthAlert:
838
+ """An alert about memory health issues."""
839
+ level: str # "warning", "critical"
840
+ category: str
841
+ message: str
842
+ current_value: Any
843
+ threshold: Any
844
+ timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
845
+
846
+
847
+ @dataclass
848
+ class HealthThresholds:
849
+ """Thresholds for health monitoring alerts."""
850
+ # Warning thresholds
851
+ max_total_items_warning: int = 5000
852
+ max_stale_percentage_warning: float = 0.3
853
+ min_avg_confidence_warning: float = 0.5
854
+ max_agent_items_warning: int = 500
855
+
856
+ # Critical thresholds
857
+ max_total_items_critical: int = 10000
858
+ max_stale_percentage_critical: float = 0.5
859
+ min_avg_confidence_critical: float = 0.3
860
+ max_storage_bytes_critical: int = 100 * 1024 * 1024 # 100MB
861
+
862
+
863
+ class MemoryHealthMonitor:
864
+ """
865
+ Monitors memory health and growth, generating alerts when thresholds exceeded.
866
+
867
+ Tracks:
868
+ - Total memory item counts
869
+ - Average confidence levels
870
+ - Staleness ratios
871
+ - Storage size
872
+ - Per-agent statistics
873
+ """
874
+
875
+ def __init__(
876
+ self,
877
+ storage: StorageBackend,
878
+ thresholds: Optional[HealthThresholds] = None,
879
+ stale_days: int = 60,
880
+ low_confidence_threshold: float = 0.3,
881
+ ):
882
+ """
883
+ Initialize health monitor.
884
+
885
+ Args:
886
+ storage: Storage backend to monitor
887
+ thresholds: Alert thresholds
888
+ stale_days: Days since validation to consider stale
889
+ low_confidence_threshold: Confidence below which to count as low
890
+ """
891
+ self.storage = storage
892
+ self.thresholds = thresholds or HealthThresholds()
893
+ self.stale_days = stale_days
894
+ self.low_confidence_threshold = low_confidence_threshold
895
+
896
+ # History for trend analysis
897
+ self._metrics_history: List[MemoryHealthMetrics] = []
898
+ self._max_history = 100
899
+
900
+ # Alert callbacks
901
+ self._alert_handlers: List[Callable[[HealthAlert], None]] = []
902
+
903
+ def add_alert_handler(self, handler: Callable[[HealthAlert], None]) -> None:
904
+ """Add a callback to be called when alerts are generated."""
905
+ self._alert_handlers.append(handler)
906
+
907
+ def collect_metrics(self, project_id: str) -> MemoryHealthMetrics:
908
+ """
909
+ Collect current memory health metrics.
910
+
911
+ Args:
912
+ project_id: Project to analyze
913
+
914
+ Returns:
915
+ MemoryHealthMetrics snapshot
916
+ """
917
+ now = datetime.now(timezone.utc)
918
+ stale_cutoff = now - timedelta(days=self.stale_days)
919
+
920
+ metrics = MemoryHealthMetrics()
921
+
922
+ # Get all heuristics
923
+ heuristics = self.storage.get_heuristics(
924
+ project_id=project_id,
925
+ top_k=10000,
926
+ min_confidence=0.0,
927
+ )
928
+ metrics.heuristic_count = len(heuristics)
929
+
930
+ if heuristics:
931
+ total_confidence = 0.0
932
+ total_age = 0.0
933
+ for h in heuristics:
934
+ total_confidence += h.confidence
935
+ age_days = (now - h.created_at).total_seconds() / (24 * 60 * 60)
936
+ total_age += age_days
937
+ if h.last_validated < stale_cutoff:
938
+ metrics.stale_heuristic_count += 1
939
+ if h.confidence < self.low_confidence_threshold:
940
+ metrics.low_confidence_count += 1
941
+
942
+ metrics.avg_heuristic_confidence = total_confidence / len(heuristics)
943
+ metrics.avg_heuristic_age_days = total_age / len(heuristics)
944
+
945
+ # Get other counts
946
+ outcomes = self.storage.get_outcomes(
947
+ project_id=project_id,
948
+ top_k=10000,
949
+ success_only=False,
950
+ )
951
+ metrics.outcome_count = len(outcomes)
952
+
953
+ knowledge = self.storage.get_domain_knowledge(
954
+ project_id=project_id,
955
+ top_k=10000,
956
+ )
957
+ metrics.knowledge_count = len(knowledge)
958
+
959
+ anti_patterns = self.storage.get_anti_patterns(
960
+ project_id=project_id,
961
+ top_k=10000,
962
+ )
963
+ metrics.anti_pattern_count = len(anti_patterns)
964
+
965
+ metrics.total_items = (
966
+ metrics.heuristic_count +
967
+ metrics.outcome_count +
968
+ metrics.knowledge_count +
969
+ metrics.anti_pattern_count
970
+ )
971
+
972
+ # Get agent count
973
+ stats = self.storage.get_stats(project_id=project_id)
974
+ metrics.agents_count = len(stats.get("agents", []))
975
+
976
+ # Estimate storage size (rough approximation)
977
+ # Average ~500 bytes per item
978
+ metrics.storage_bytes = metrics.total_items * 500
979
+
980
+ # Store in history
981
+ self._metrics_history.append(metrics)
982
+ if len(self._metrics_history) > self._max_history:
983
+ self._metrics_history = self._metrics_history[-self._max_history:]
984
+
985
+ return metrics
986
+
987
+ def check_health(self, project_id: str) -> List[HealthAlert]:
988
+ """
989
+ Check memory health and generate alerts if thresholds exceeded.
990
+
991
+ Args:
992
+ project_id: Project to check
993
+
994
+ Returns:
995
+ List of health alerts (empty if healthy)
996
+ """
997
+ metrics = self.collect_metrics(project_id)
998
+ alerts: List[HealthAlert] = []
999
+ t = self.thresholds
1000
+
1001
+ # Check total items
1002
+ if metrics.total_items >= t.max_total_items_critical:
1003
+ alerts.append(HealthAlert(
1004
+ level="critical",
1005
+ category="total_items",
1006
+ message="Memory item count critically high",
1007
+ current_value=metrics.total_items,
1008
+ threshold=t.max_total_items_critical,
1009
+ ))
1010
+ elif metrics.total_items >= t.max_total_items_warning:
1011
+ alerts.append(HealthAlert(
1012
+ level="warning",
1013
+ category="total_items",
1014
+ message="Memory item count approaching limit",
1015
+ current_value=metrics.total_items,
1016
+ threshold=t.max_total_items_warning,
1017
+ ))
1018
+
1019
+ # Check staleness
1020
+ if metrics.heuristic_count > 0:
1021
+ stale_percentage = metrics.stale_heuristic_count / metrics.heuristic_count
1022
+ if stale_percentage >= t.max_stale_percentage_critical:
1023
+ alerts.append(HealthAlert(
1024
+ level="critical",
1025
+ category="staleness",
1026
+ message="Too many stale heuristics",
1027
+ current_value=f"{stale_percentage:.0%}",
1028
+ threshold=f"{t.max_stale_percentage_critical:.0%}",
1029
+ ))
1030
+ elif stale_percentage >= t.max_stale_percentage_warning:
1031
+ alerts.append(HealthAlert(
1032
+ level="warning",
1033
+ category="staleness",
1034
+ message="Many heuristics are stale",
1035
+ current_value=f"{stale_percentage:.0%}",
1036
+ threshold=f"{t.max_stale_percentage_warning:.0%}",
1037
+ ))
1038
+
1039
+ # Check average confidence
1040
+ if metrics.heuristic_count > 0:
1041
+ if metrics.avg_heuristic_confidence < t.min_avg_confidence_critical:
1042
+ alerts.append(HealthAlert(
1043
+ level="critical",
1044
+ category="confidence",
1045
+ message="Average heuristic confidence critically low",
1046
+ current_value=f"{metrics.avg_heuristic_confidence:.2f}",
1047
+ threshold=f"{t.min_avg_confidence_critical:.2f}",
1048
+ ))
1049
+ elif metrics.avg_heuristic_confidence < t.min_avg_confidence_warning:
1050
+ alerts.append(HealthAlert(
1051
+ level="warning",
1052
+ category="confidence",
1053
+ message="Average heuristic confidence is low",
1054
+ current_value=f"{metrics.avg_heuristic_confidence:.2f}",
1055
+ threshold=f"{t.min_avg_confidence_warning:.2f}",
1056
+ ))
1057
+
1058
+ # Check storage size
1059
+ if metrics.storage_bytes >= t.max_storage_bytes_critical:
1060
+ alerts.append(HealthAlert(
1061
+ level="critical",
1062
+ category="storage",
1063
+ message="Memory storage size critically high",
1064
+ current_value=f"{metrics.storage_bytes / (1024*1024):.1f}MB",
1065
+ threshold=f"{t.max_storage_bytes_critical / (1024*1024):.1f}MB",
1066
+ ))
1067
+
1068
+ # Notify handlers
1069
+ for alert in alerts:
1070
+ for handler in self._alert_handlers:
1071
+ try:
1072
+ handler(alert)
1073
+ except Exception as e:
1074
+ logger.error(f"Alert handler error: {e}")
1075
+
1076
+ return alerts
1077
+
1078
+ def get_growth_trend(self, project_id: str) -> Dict[str, Any]:
1079
+ """
1080
+ Analyze memory growth trend from history.
1081
+
1082
+ Args:
1083
+ project_id: Project to analyze
1084
+
1085
+ Returns:
1086
+ Trend analysis
1087
+ """
1088
+ if len(self._metrics_history) < 2:
1089
+ return {
1090
+ "status": "insufficient_data",
1091
+ "samples": len(self._metrics_history),
1092
+ }
1093
+
1094
+ first = self._metrics_history[0]
1095
+ last = self._metrics_history[-1]
1096
+
1097
+ time_span = (last.timestamp - first.timestamp).total_seconds()
1098
+ if time_span <= 0:
1099
+ return {"status": "insufficient_time_span"}
1100
+
1101
+ days_span = time_span / (24 * 60 * 60)
1102
+ item_growth = last.total_items - first.total_items
1103
+ growth_per_day = item_growth / days_span if days_span > 0 else 0
1104
+
1105
+ return {
1106
+ "status": "ok",
1107
+ "samples": len(self._metrics_history),
1108
+ "time_span_days": round(days_span, 1),
1109
+ "total_growth": item_growth,
1110
+ "growth_per_day": round(growth_per_day, 2),
1111
+ "first_total": first.total_items,
1112
+ "last_total": last.total_items,
1113
+ "confidence_trend": round(
1114
+ last.avg_heuristic_confidence - first.avg_heuristic_confidence, 3
1115
+ ),
1116
+ }
1117
+
1118
+
1119
+ # ==================== CLEANUP SCHEDULER ====================
1120
+
1121
+
1122
+ @dataclass
1123
+ class CleanupJob:
1124
+ """Configuration for a scheduled cleanup job."""
1125
+ name: str
1126
+ project_id: str
1127
+ interval_hours: float
1128
+ agent: Optional[str] = None
1129
+ policy: Optional[PrunePolicy] = None
1130
+ apply_decay: bool = True
1131
+ last_run: Optional[datetime] = None
1132
+ next_run: Optional[datetime] = None
1133
+ enabled: bool = True
1134
+
1135
+
1136
+ @dataclass
1137
+ class CleanupResult:
1138
+ """Result of a cleanup job execution."""
1139
+ job_name: str
1140
+ project_id: str
1141
+ started_at: datetime
1142
+ completed_at: datetime
1143
+ prune_summary: Optional[PruneSummary] = None
1144
+ decay_result: Optional[DecayResult] = None
1145
+ alerts: List[HealthAlert] = field(default_factory=list)
1146
+ success: bool = True
1147
+ error: Optional[str] = None
1148
+
1149
+
1150
+ class CleanupScheduler:
1151
+ """
1152
+ Schedules and executes automated memory cleanup jobs.
1153
+
1154
+ Features:
1155
+ - Configurable job intervals
1156
+ - Prune + decay in single operation
1157
+ - Health check integration
1158
+ - Job execution history
1159
+ - Thread-safe operation
1160
+ """
1161
+
1162
+ def __init__(
1163
+ self,
1164
+ storage: StorageBackend,
1165
+ forgetting_engine: Optional[ForgettingEngine] = None,
1166
+ decayer: Optional[ConfidenceDecayer] = None,
1167
+ health_monitor: Optional[MemoryHealthMonitor] = None,
1168
+ ):
1169
+ """
1170
+ Initialize cleanup scheduler.
1171
+
1172
+ Args:
1173
+ storage: Storage backend
1174
+ forgetting_engine: Engine for pruning (created if not provided)
1175
+ decayer: Engine for decay (created if not provided)
1176
+ health_monitor: Health monitor (created if not provided)
1177
+ """
1178
+ self.storage = storage
1179
+ self.forgetting_engine = forgetting_engine or ForgettingEngine(storage)
1180
+ self.decayer = decayer or ConfidenceDecayer(storage)
1181
+ self.health_monitor = health_monitor or MemoryHealthMonitor(storage)
1182
+
1183
+ self._jobs: Dict[str, CleanupJob] = {}
1184
+ self._history: List[CleanupResult] = []
1185
+ self._max_history = 50
1186
+ self._lock = threading.RLock()
1187
+ self._running = False
1188
+ self._thread: Optional[threading.Thread] = None
1189
+
1190
+ def register_job(self, job: CleanupJob) -> None:
1191
+ """
1192
+ Register a cleanup job.
1193
+
1194
+ Args:
1195
+ job: Job configuration
1196
+ """
1197
+ with self._lock:
1198
+ now = datetime.now(timezone.utc)
1199
+ job.next_run = now + timedelta(hours=job.interval_hours)
1200
+ self._jobs[job.name] = job
1201
+ logger.info(f"Registered cleanup job '{job.name}' for project {job.project_id}")
1202
+
1203
+ def unregister_job(self, name: str) -> bool:
1204
+ """
1205
+ Unregister a cleanup job.
1206
+
1207
+ Args:
1208
+ name: Job name
1209
+
1210
+ Returns:
1211
+ True if job was found and removed
1212
+ """
1213
+ with self._lock:
1214
+ if name in self._jobs:
1215
+ del self._jobs[name]
1216
+ logger.info(f"Unregistered cleanup job '{name}'")
1217
+ return True
1218
+ return False
1219
+
1220
+ def run_job(self, name: str, dry_run: bool = False) -> CleanupResult:
1221
+ """
1222
+ Manually run a specific job.
1223
+
1224
+ Args:
1225
+ name: Job name
1226
+ dry_run: If True, don't actually modify data
1227
+
1228
+ Returns:
1229
+ CleanupResult with execution details
1230
+ """
1231
+ with self._lock:
1232
+ if name not in self._jobs:
1233
+ raise ValueError(f"Job '{name}' not found")
1234
+ job = self._jobs[name]
1235
+
1236
+ return self._execute_job(job, dry_run)
1237
+
1238
+ def run_all_due(self) -> List[CleanupResult]:
1239
+ """
1240
+ Run all jobs that are due.
1241
+
1242
+ Returns:
1243
+ List of results for executed jobs
1244
+ """
1245
+ results = []
1246
+ now = datetime.now(timezone.utc)
1247
+
1248
+ with self._lock:
1249
+ due_jobs = [
1250
+ job for job in self._jobs.values()
1251
+ if job.enabled and job.next_run and job.next_run <= now
1252
+ ]
1253
+
1254
+ for job in due_jobs:
1255
+ try:
1256
+ result = self._execute_job(job)
1257
+ results.append(result)
1258
+ except Exception as e:
1259
+ logger.error(f"Error running job '{job.name}': {e}")
1260
+ results.append(CleanupResult(
1261
+ job_name=job.name,
1262
+ project_id=job.project_id,
1263
+ started_at=now,
1264
+ completed_at=datetime.now(timezone.utc),
1265
+ success=False,
1266
+ error=str(e),
1267
+ ))
1268
+
1269
+ return results
1270
+
1271
+ def _execute_job(self, job: CleanupJob, dry_run: bool = False) -> CleanupResult:
1272
+ """Execute a cleanup job."""
1273
+ started_at = datetime.now(timezone.utc)
1274
+ result = CleanupResult(
1275
+ job_name=job.name,
1276
+ project_id=job.project_id,
1277
+ started_at=started_at,
1278
+ completed_at=started_at,
1279
+ )
1280
+
1281
+ try:
1282
+ # Run prune
1283
+ engine = ForgettingEngine(
1284
+ self.storage,
1285
+ job.policy or self.forgetting_engine.policy,
1286
+ )
1287
+ result.prune_summary = engine.prune(
1288
+ project_id=job.project_id,
1289
+ agent=job.agent,
1290
+ dry_run=dry_run,
1291
+ )
1292
+
1293
+ # Run decay if enabled
1294
+ if job.apply_decay:
1295
+ result.decay_result = self.decayer.apply_decay(
1296
+ project_id=job.project_id,
1297
+ agent=job.agent,
1298
+ dry_run=dry_run,
1299
+ )
1300
+
1301
+ # Check health
1302
+ result.alerts = self.health_monitor.check_health(job.project_id)
1303
+
1304
+ # Update job timing
1305
+ with self._lock:
1306
+ now = datetime.now(timezone.utc)
1307
+ job.last_run = now
1308
+ job.next_run = now + timedelta(hours=job.interval_hours)
1309
+
1310
+ result.success = True
1311
+
1312
+ except Exception as e:
1313
+ result.success = False
1314
+ result.error = str(e)
1315
+ logger.error(f"Cleanup job '{job.name}' failed: {e}")
1316
+
1317
+ result.completed_at = datetime.now(timezone.utc)
1318
+
1319
+ # Store in history
1320
+ with self._lock:
1321
+ self._history.append(result)
1322
+ if len(self._history) > self._max_history:
1323
+ self._history = self._history[-self._max_history:]
1324
+
1325
+ return result
1326
+
1327
+ def start_background(self, check_interval_seconds: int = 60) -> None:
1328
+ """
1329
+ Start background job execution thread.
1330
+
1331
+ Args:
1332
+ check_interval_seconds: How often to check for due jobs
1333
+ """
1334
+ if self._running:
1335
+ logger.warning("Scheduler already running")
1336
+ return
1337
+
1338
+ self._running = True
1339
+
1340
+ def run():
1341
+ while self._running:
1342
+ try:
1343
+ self.run_all_due()
1344
+ except Exception as e:
1345
+ logger.error(f"Scheduler error: {e}")
1346
+ time.sleep(check_interval_seconds)
1347
+
1348
+ self._thread = threading.Thread(target=run, daemon=True)
1349
+ self._thread.start()
1350
+ logger.info(f"Cleanup scheduler started (interval: {check_interval_seconds}s)")
1351
+
1352
+ def stop_background(self) -> None:
1353
+ """Stop the background execution thread."""
1354
+ self._running = False
1355
+ if self._thread:
1356
+ self._thread.join(timeout=5)
1357
+ self._thread = None
1358
+ logger.info("Cleanup scheduler stopped")
1359
+
1360
+ def get_jobs(self) -> List[Dict[str, Any]]:
1361
+ """Get all registered jobs."""
1362
+ with self._lock:
1363
+ return [
1364
+ {
1365
+ "name": job.name,
1366
+ "project_id": job.project_id,
1367
+ "interval_hours": job.interval_hours,
1368
+ "agent": job.agent,
1369
+ "enabled": job.enabled,
1370
+ "last_run": job.last_run.isoformat() if job.last_run else None,
1371
+ "next_run": job.next_run.isoformat() if job.next_run else None,
1372
+ }
1373
+ for job in self._jobs.values()
1374
+ ]
1375
+
1376
+ def get_history(self, limit: int = 10) -> List[Dict[str, Any]]:
1377
+ """Get recent job execution history."""
1378
+ with self._lock:
1379
+ recent = self._history[-limit:]
1380
+ return [
1381
+ {
1382
+ "job_name": r.job_name,
1383
+ "project_id": r.project_id,
1384
+ "started_at": r.started_at.isoformat(),
1385
+ "completed_at": r.completed_at.isoformat(),
1386
+ "duration_ms": int(
1387
+ (r.completed_at - r.started_at).total_seconds() * 1000
1388
+ ),
1389
+ "success": r.success,
1390
+ "items_pruned": r.prune_summary.total_pruned if r.prune_summary else 0,
1391
+ "items_decayed": r.decay_result.items_updated if r.decay_result else 0,
1392
+ "alerts": len(r.alerts),
1393
+ "error": r.error,
1394
+ }
1395
+ for r in reversed(recent)
1396
+ ]