truthound-dashboard 1.3.1__py3-none-any.whl → 1.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. truthound_dashboard/api/alerts.py +258 -0
  2. truthound_dashboard/api/anomaly.py +1302 -0
  3. truthound_dashboard/api/cross_alerts.py +352 -0
  4. truthound_dashboard/api/deps.py +143 -0
  5. truthound_dashboard/api/drift_monitor.py +540 -0
  6. truthound_dashboard/api/lineage.py +1151 -0
  7. truthound_dashboard/api/maintenance.py +363 -0
  8. truthound_dashboard/api/middleware.py +373 -1
  9. truthound_dashboard/api/model_monitoring.py +805 -0
  10. truthound_dashboard/api/notifications_advanced.py +2452 -0
  11. truthound_dashboard/api/plugins.py +2096 -0
  12. truthound_dashboard/api/profile.py +211 -14
  13. truthound_dashboard/api/reports.py +853 -0
  14. truthound_dashboard/api/router.py +147 -0
  15. truthound_dashboard/api/rule_suggestions.py +310 -0
  16. truthound_dashboard/api/schema_evolution.py +231 -0
  17. truthound_dashboard/api/sources.py +47 -3
  18. truthound_dashboard/api/triggers.py +190 -0
  19. truthound_dashboard/api/validations.py +13 -0
  20. truthound_dashboard/api/validators.py +333 -4
  21. truthound_dashboard/api/versioning.py +309 -0
  22. truthound_dashboard/api/websocket.py +301 -0
  23. truthound_dashboard/core/__init__.py +27 -0
  24. truthound_dashboard/core/anomaly.py +1395 -0
  25. truthound_dashboard/core/anomaly_explainer.py +633 -0
  26. truthound_dashboard/core/cache.py +206 -0
  27. truthound_dashboard/core/cached_services.py +422 -0
  28. truthound_dashboard/core/charts.py +352 -0
  29. truthound_dashboard/core/connections.py +1069 -42
  30. truthound_dashboard/core/cross_alerts.py +837 -0
  31. truthound_dashboard/core/drift_monitor.py +1477 -0
  32. truthound_dashboard/core/drift_sampling.py +669 -0
  33. truthound_dashboard/core/i18n/__init__.py +42 -0
  34. truthound_dashboard/core/i18n/detector.py +173 -0
  35. truthound_dashboard/core/i18n/messages.py +564 -0
  36. truthound_dashboard/core/lineage.py +971 -0
  37. truthound_dashboard/core/maintenance.py +443 -5
  38. truthound_dashboard/core/model_monitoring.py +1043 -0
  39. truthound_dashboard/core/notifications/channels.py +1020 -1
  40. truthound_dashboard/core/notifications/deduplication/__init__.py +143 -0
  41. truthound_dashboard/core/notifications/deduplication/policies.py +274 -0
  42. truthound_dashboard/core/notifications/deduplication/service.py +400 -0
  43. truthound_dashboard/core/notifications/deduplication/stores.py +2365 -0
  44. truthound_dashboard/core/notifications/deduplication/strategies.py +422 -0
  45. truthound_dashboard/core/notifications/dispatcher.py +43 -0
  46. truthound_dashboard/core/notifications/escalation/__init__.py +149 -0
  47. truthound_dashboard/core/notifications/escalation/backends.py +1384 -0
  48. truthound_dashboard/core/notifications/escalation/engine.py +429 -0
  49. truthound_dashboard/core/notifications/escalation/models.py +336 -0
  50. truthound_dashboard/core/notifications/escalation/scheduler.py +1187 -0
  51. truthound_dashboard/core/notifications/escalation/state_machine.py +330 -0
  52. truthound_dashboard/core/notifications/escalation/stores.py +2896 -0
  53. truthound_dashboard/core/notifications/events.py +49 -0
  54. truthound_dashboard/core/notifications/metrics/__init__.py +115 -0
  55. truthound_dashboard/core/notifications/metrics/base.py +528 -0
  56. truthound_dashboard/core/notifications/metrics/collectors.py +583 -0
  57. truthound_dashboard/core/notifications/routing/__init__.py +169 -0
  58. truthound_dashboard/core/notifications/routing/combinators.py +184 -0
  59. truthound_dashboard/core/notifications/routing/config.py +375 -0
  60. truthound_dashboard/core/notifications/routing/config_parser.py +867 -0
  61. truthound_dashboard/core/notifications/routing/engine.py +382 -0
  62. truthound_dashboard/core/notifications/routing/expression_engine.py +1269 -0
  63. truthound_dashboard/core/notifications/routing/jinja2_engine.py +774 -0
  64. truthound_dashboard/core/notifications/routing/rules.py +625 -0
  65. truthound_dashboard/core/notifications/routing/validator.py +678 -0
  66. truthound_dashboard/core/notifications/service.py +2 -0
  67. truthound_dashboard/core/notifications/stats_aggregator.py +850 -0
  68. truthound_dashboard/core/notifications/throttling/__init__.py +83 -0
  69. truthound_dashboard/core/notifications/throttling/builder.py +311 -0
  70. truthound_dashboard/core/notifications/throttling/stores.py +1859 -0
  71. truthound_dashboard/core/notifications/throttling/throttlers.py +633 -0
  72. truthound_dashboard/core/openlineage.py +1028 -0
  73. truthound_dashboard/core/plugins/__init__.py +39 -0
  74. truthound_dashboard/core/plugins/docs/__init__.py +39 -0
  75. truthound_dashboard/core/plugins/docs/extractor.py +703 -0
  76. truthound_dashboard/core/plugins/docs/renderers.py +804 -0
  77. truthound_dashboard/core/plugins/hooks/__init__.py +63 -0
  78. truthound_dashboard/core/plugins/hooks/decorators.py +367 -0
  79. truthound_dashboard/core/plugins/hooks/manager.py +403 -0
  80. truthound_dashboard/core/plugins/hooks/protocols.py +265 -0
  81. truthound_dashboard/core/plugins/lifecycle/__init__.py +41 -0
  82. truthound_dashboard/core/plugins/lifecycle/hot_reload.py +584 -0
  83. truthound_dashboard/core/plugins/lifecycle/machine.py +419 -0
  84. truthound_dashboard/core/plugins/lifecycle/states.py +266 -0
  85. truthound_dashboard/core/plugins/loader.py +504 -0
  86. truthound_dashboard/core/plugins/registry.py +810 -0
  87. truthound_dashboard/core/plugins/reporter_executor.py +588 -0
  88. truthound_dashboard/core/plugins/sandbox/__init__.py +59 -0
  89. truthound_dashboard/core/plugins/sandbox/code_validator.py +243 -0
  90. truthound_dashboard/core/plugins/sandbox/engines.py +770 -0
  91. truthound_dashboard/core/plugins/sandbox/protocols.py +194 -0
  92. truthound_dashboard/core/plugins/sandbox.py +617 -0
  93. truthound_dashboard/core/plugins/security/__init__.py +68 -0
  94. truthound_dashboard/core/plugins/security/analyzer.py +535 -0
  95. truthound_dashboard/core/plugins/security/policies.py +311 -0
  96. truthound_dashboard/core/plugins/security/protocols.py +296 -0
  97. truthound_dashboard/core/plugins/security/signing.py +842 -0
  98. truthound_dashboard/core/plugins/security.py +446 -0
  99. truthound_dashboard/core/plugins/validator_executor.py +401 -0
  100. truthound_dashboard/core/plugins/versioning/__init__.py +51 -0
  101. truthound_dashboard/core/plugins/versioning/constraints.py +377 -0
  102. truthound_dashboard/core/plugins/versioning/dependencies.py +541 -0
  103. truthound_dashboard/core/plugins/versioning/semver.py +266 -0
  104. truthound_dashboard/core/profile_comparison.py +601 -0
  105. truthound_dashboard/core/report_history.py +570 -0
  106. truthound_dashboard/core/reporters/__init__.py +57 -0
  107. truthound_dashboard/core/reporters/base.py +296 -0
  108. truthound_dashboard/core/reporters/csv_reporter.py +155 -0
  109. truthound_dashboard/core/reporters/html_reporter.py +598 -0
  110. truthound_dashboard/core/reporters/i18n/__init__.py +65 -0
  111. truthound_dashboard/core/reporters/i18n/base.py +494 -0
  112. truthound_dashboard/core/reporters/i18n/catalogs.py +930 -0
  113. truthound_dashboard/core/reporters/json_reporter.py +160 -0
  114. truthound_dashboard/core/reporters/junit_reporter.py +233 -0
  115. truthound_dashboard/core/reporters/markdown_reporter.py +207 -0
  116. truthound_dashboard/core/reporters/pdf_reporter.py +209 -0
  117. truthound_dashboard/core/reporters/registry.py +272 -0
  118. truthound_dashboard/core/rule_generator.py +2088 -0
  119. truthound_dashboard/core/scheduler.py +822 -12
  120. truthound_dashboard/core/schema_evolution.py +858 -0
  121. truthound_dashboard/core/services.py +152 -9
  122. truthound_dashboard/core/statistics.py +718 -0
  123. truthound_dashboard/core/streaming_anomaly.py +883 -0
  124. truthound_dashboard/core/triggers/__init__.py +45 -0
  125. truthound_dashboard/core/triggers/base.py +226 -0
  126. truthound_dashboard/core/triggers/evaluators.py +609 -0
  127. truthound_dashboard/core/triggers/factory.py +363 -0
  128. truthound_dashboard/core/unified_alerts.py +870 -0
  129. truthound_dashboard/core/validation_limits.py +509 -0
  130. truthound_dashboard/core/versioning.py +709 -0
  131. truthound_dashboard/core/websocket/__init__.py +59 -0
  132. truthound_dashboard/core/websocket/manager.py +512 -0
  133. truthound_dashboard/core/websocket/messages.py +130 -0
  134. truthound_dashboard/db/__init__.py +30 -0
  135. truthound_dashboard/db/models.py +3375 -3
  136. truthound_dashboard/main.py +22 -0
  137. truthound_dashboard/schemas/__init__.py +396 -1
  138. truthound_dashboard/schemas/anomaly.py +1258 -0
  139. truthound_dashboard/schemas/base.py +4 -0
  140. truthound_dashboard/schemas/cross_alerts.py +334 -0
  141. truthound_dashboard/schemas/drift_monitor.py +890 -0
  142. truthound_dashboard/schemas/lineage.py +428 -0
  143. truthound_dashboard/schemas/maintenance.py +154 -0
  144. truthound_dashboard/schemas/model_monitoring.py +374 -0
  145. truthound_dashboard/schemas/notifications_advanced.py +1363 -0
  146. truthound_dashboard/schemas/openlineage.py +704 -0
  147. truthound_dashboard/schemas/plugins.py +1293 -0
  148. truthound_dashboard/schemas/profile.py +420 -34
  149. truthound_dashboard/schemas/profile_comparison.py +242 -0
  150. truthound_dashboard/schemas/reports.py +285 -0
  151. truthound_dashboard/schemas/rule_suggestion.py +434 -0
  152. truthound_dashboard/schemas/schema_evolution.py +164 -0
  153. truthound_dashboard/schemas/source.py +117 -2
  154. truthound_dashboard/schemas/triggers.py +511 -0
  155. truthound_dashboard/schemas/unified_alerts.py +223 -0
  156. truthound_dashboard/schemas/validation.py +25 -1
  157. truthound_dashboard/schemas/validators/__init__.py +11 -0
  158. truthound_dashboard/schemas/validators/base.py +151 -0
  159. truthound_dashboard/schemas/versioning.py +152 -0
  160. truthound_dashboard/static/index.html +2 -2
  161. {truthound_dashboard-1.3.1.dist-info → truthound_dashboard-1.4.0.dist-info}/METADATA +142 -22
  162. truthound_dashboard-1.4.0.dist-info/RECORD +239 -0
  163. truthound_dashboard/static/assets/index-BZG20KuF.js +0 -586
  164. truthound_dashboard/static/assets/index-D_HyZ3pb.css +0 -1
  165. truthound_dashboard/static/assets/unmerged_dictionaries-CtpqQBm0.js +0 -1
  166. truthound_dashboard-1.3.1.dist-info/RECORD +0 -110
  167. {truthound_dashboard-1.3.1.dist-info → truthound_dashboard-1.4.0.dist-info}/WHEEL +0 -0
  168. {truthound_dashboard-1.3.1.dist-info → truthound_dashboard-1.4.0.dist-info}/entry_points.txt +0 -0
  169. {truthound_dashboard-1.3.1.dist-info → truthound_dashboard-1.4.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1859 @@
1
+ """Storage backends for throttling state.
2
+
3
+ This module provides storage backends for tracking rate limit
4
+ counters and token buckets.
5
+
6
+ Storage Backends:
7
+ - InMemoryThrottlingStore: Simple in-memory storage with TTL and LRU eviction
8
+ - SQLiteThrottlingStore: Persistent SQLite storage
9
+ - RedisThrottlingStore: Redis-based storage for distributed deployments
10
+
11
+ Features:
12
+ - TTL-based automatic expiration of old entries
13
+ - LRU eviction when max entries exceeded
14
+ - Periodic background cleanup task
15
+ - Configurable cleanup interval and max entries
16
+ - Memory usage metrics
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ import asyncio
22
+ import json
23
+ import logging
24
+ import os
25
+ import sqlite3
26
+ import threading
27
+ import time
28
+ import weakref
29
+ from abc import ABC, abstractmethod
30
+ from collections import OrderedDict
31
+ from dataclasses import dataclass, field
32
+ from pathlib import Path
33
+ from typing import TYPE_CHECKING, Any
34
+
35
+ # Optional Redis dependency
36
+ try:
37
+ import redis
38
+ import redis.asyncio
39
+
40
+ REDIS_AVAILABLE = True
41
+ except ImportError:
42
+ REDIS_AVAILABLE = False
43
+ redis = None # type: ignore[assignment]
44
+
45
+ if TYPE_CHECKING:
46
+ import redis as redis_sync
47
+ import redis.asyncio as redis_async
48
+
49
+
50
+ logger = logging.getLogger(__name__)
51
+
52
+
53
+ @dataclass
54
+ class ThrottlingEntry:
55
+ """A stored throttling entry.
56
+
57
+ Attributes:
58
+ key: Unique key identifying the throttled entity.
59
+ count: Current count within the window.
60
+ window_start: Start of the current window.
61
+ tokens: Current token count (for token bucket).
62
+ last_refill: Last token refill time.
63
+ last_accessed: Last access timestamp for LRU tracking.
64
+ """
65
+
66
+ key: str
67
+ count: int = 0
68
+ window_start: float = 0.0
69
+ tokens: float = 0.0
70
+ last_refill: float = 0.0
71
+ last_accessed: float = field(default_factory=time.time)
72
+ metadata: dict[str, Any] = field(default_factory=dict)
73
+
74
+
75
+ @dataclass
76
+ class ThrottlingMetrics:
77
+ """Metrics for throttling store operations.
78
+
79
+ Attributes:
80
+ total_entries: Current number of entries.
81
+ peak_entries: Maximum entries ever stored.
82
+ expired_removed: Number of entries removed by TTL expiration.
83
+ lru_evicted: Number of entries evicted by LRU policy.
84
+ cleanup_runs: Number of cleanup cycles executed.
85
+ last_cleanup_time: Timestamp of last cleanup.
86
+ last_cleanup_removed: Entries removed in last cleanup.
87
+ memory_bytes_estimate: Estimated memory usage in bytes.
88
+ """
89
+
90
+ total_entries: int = 0
91
+ peak_entries: int = 0
92
+ expired_removed: int = 0
93
+ lru_evicted: int = 0
94
+ cleanup_runs: int = 0
95
+ last_cleanup_time: float = 0.0
96
+ last_cleanup_removed: int = 0
97
+ memory_bytes_estimate: int = 0
98
+
99
+ def to_dict(self) -> dict[str, Any]:
100
+ """Convert metrics to dictionary."""
101
+ return {
102
+ "total_entries": self.total_entries,
103
+ "peak_entries": self.peak_entries,
104
+ "expired_removed": self.expired_removed,
105
+ "lru_evicted": self.lru_evicted,
106
+ "cleanup_runs": self.cleanup_runs,
107
+ "last_cleanup_time": self.last_cleanup_time,
108
+ "last_cleanup_removed": self.last_cleanup_removed,
109
+ "memory_bytes_estimate": self.memory_bytes_estimate,
110
+ }
111
+
112
+
113
+ class BaseThrottlingStore(ABC):
114
+ """Abstract base class for throttling storage.
115
+
116
+ All stores must implement methods for getting and updating
117
+ throttling state.
118
+ """
119
+
120
+ @abstractmethod
121
+ def get(self, key: str) -> ThrottlingEntry | None:
122
+ """Get entry by key.
123
+
124
+ Args:
125
+ key: The throttling key.
126
+
127
+ Returns:
128
+ Entry if found, None otherwise.
129
+ """
130
+ ...
131
+
132
+ @abstractmethod
133
+ def set(self, entry: ThrottlingEntry) -> None:
134
+ """Set or update an entry.
135
+
136
+ Args:
137
+ entry: The entry to store.
138
+ """
139
+ ...
140
+
141
+ @abstractmethod
142
+ def increment(self, key: str, window_start: float) -> int:
143
+ """Increment counter and return new count.
144
+
145
+ If the entry doesn't exist or window has changed,
146
+ creates a new entry with count=1.
147
+
148
+ Args:
149
+ key: The throttling key.
150
+ window_start: Start of current window.
151
+
152
+ Returns:
153
+ New count value.
154
+ """
155
+ ...
156
+
157
+ @abstractmethod
158
+ def cleanup(self, max_age_seconds: int) -> int:
159
+ """Remove old entries.
160
+
161
+ Args:
162
+ max_age_seconds: Maximum age of entries to keep.
163
+
164
+ Returns:
165
+ Number of entries removed.
166
+ """
167
+ ...
168
+
169
+ @abstractmethod
170
+ def clear(self) -> None:
171
+ """Clear all entries."""
172
+ ...
173
+
174
+ def get_metrics(self) -> dict[str, Any]:
175
+ """Get store metrics.
176
+
177
+ Returns:
178
+ Dictionary with metrics data.
179
+ """
180
+ return {}
181
+
182
+
183
+ class InMemoryThrottlingStore(BaseThrottlingStore):
184
+ """In-memory throttling storage with TTL and LRU eviction.
185
+
186
+ Thread-safe storage suitable for development and single-process
187
+ deployments with automatic memory management.
188
+
189
+ Features:
190
+ - TTL-based automatic expiration of old entries
191
+ - LRU eviction when max entries exceeded
192
+ - Periodic background cleanup task
193
+ - Configurable cleanup interval and max entries
194
+ - Memory usage metrics
195
+
196
+ Configuration via environment variables:
197
+ TRUTHOUND_THROTTLE_MAX_ENTRIES: Maximum entries (default: 10000)
198
+ TRUTHOUND_THROTTLE_DEFAULT_TTL: Default TTL in seconds (default: 3600)
199
+ TRUTHOUND_THROTTLE_CLEANUP_INTERVAL: Cleanup interval in seconds (default: 60)
200
+ TRUTHOUND_THROTTLE_LRU_EVICTION_PERCENT: Percent to evict on overflow (default: 10)
201
+
202
+ Example:
203
+ # Basic usage
204
+ store = InMemoryThrottlingStore()
205
+
206
+ # Custom configuration
207
+ store = InMemoryThrottlingStore(
208
+ max_entries=50000,
209
+ default_ttl=7200,
210
+ cleanup_interval=120,
211
+ )
212
+
213
+ # With background cleanup
214
+ await store.start_background_cleanup()
215
+ # ... use store ...
216
+ await store.stop_background_cleanup()
217
+ """
218
+
219
+ # Track all instances for proper cleanup
220
+ _instances: weakref.WeakSet["InMemoryThrottlingStore"] = weakref.WeakSet()
221
+
222
+ def __init__(
223
+ self,
224
+ max_entries: int | None = None,
225
+ default_ttl: int | None = None,
226
+ cleanup_interval: int | None = None,
227
+ lru_eviction_percent: int | None = None,
228
+ auto_start_cleanup: bool = False,
229
+ ) -> None:
230
+ """Initialize in-memory store with memory management.
231
+
232
+ Args:
233
+ max_entries: Maximum number of entries before LRU eviction.
234
+ default_ttl: Default TTL in seconds for entries.
235
+ cleanup_interval: Interval in seconds between cleanup runs.
236
+ lru_eviction_percent: Percentage of entries to evict on overflow.
237
+ auto_start_cleanup: Whether to auto-start background cleanup.
238
+ """
239
+ # Configuration from environment or parameters
240
+ self.max_entries = max_entries or int(
241
+ os.getenv("TRUTHOUND_THROTTLE_MAX_ENTRIES", "10000")
242
+ )
243
+ self.default_ttl = default_ttl or int(
244
+ os.getenv("TRUTHOUND_THROTTLE_DEFAULT_TTL", "3600")
245
+ )
246
+ self.cleanup_interval = cleanup_interval or int(
247
+ os.getenv("TRUTHOUND_THROTTLE_CLEANUP_INTERVAL", "60")
248
+ )
249
+ self.lru_eviction_percent = lru_eviction_percent or int(
250
+ os.getenv("TRUTHOUND_THROTTLE_LRU_EVICTION_PERCENT", "10")
251
+ )
252
+
253
+ # Use OrderedDict for LRU tracking
254
+ self._entries: OrderedDict[str, ThrottlingEntry] = OrderedDict()
255
+ self._lock = threading.RLock()
256
+
257
+ # Background cleanup task
258
+ self._cleanup_task: asyncio.Task[None] | None = None
259
+ self._cleanup_running = False
260
+ self._shutdown_event: asyncio.Event | None = None
261
+
262
+ # Metrics
263
+ self._metrics = ThrottlingMetrics()
264
+
265
+ # Track instance for cleanup
266
+ InMemoryThrottlingStore._instances.add(self)
267
+
268
+ # Auto-start background cleanup if requested
269
+ self._auto_start_cleanup = auto_start_cleanup
270
+
271
+ def get(self, key: str) -> ThrottlingEntry | None:
272
+ """Get entry by key and update access time for LRU."""
273
+ with self._lock:
274
+ entry = self._entries.get(key)
275
+ if entry is not None:
276
+ # Update access time
277
+ entry.last_accessed = time.time()
278
+ # Move to end for LRU (most recently used)
279
+ self._entries.move_to_end(key)
280
+ return entry
281
+
282
+ def set(self, entry: ThrottlingEntry) -> None:
283
+ """Set or update an entry with LRU tracking."""
284
+ entry.last_accessed = time.time()
285
+
286
+ with self._lock:
287
+ # Check if we need to evict entries
288
+ if len(self._entries) >= self.max_entries and entry.key not in self._entries:
289
+ self._evict_lru()
290
+
291
+ # Set entry
292
+ self._entries[entry.key] = entry
293
+ # Move to end for LRU
294
+ self._entries.move_to_end(entry.key)
295
+
296
+ # Update metrics
297
+ self._metrics.total_entries = len(self._entries)
298
+ if self._metrics.total_entries > self._metrics.peak_entries:
299
+ self._metrics.peak_entries = self._metrics.total_entries
300
+ self._update_memory_estimate()
301
+
302
+ def increment(self, key: str, window_start: float) -> int:
303
+ """Increment counter and return new count."""
304
+ with self._lock:
305
+ entry = self._entries.get(key)
306
+
307
+ if entry is None or entry.window_start != window_start:
308
+ # Check if we need to evict
309
+ if len(self._entries) >= self.max_entries and key not in self._entries:
310
+ self._evict_lru()
311
+
312
+ # New window
313
+ entry = ThrottlingEntry(
314
+ key=key,
315
+ count=1,
316
+ window_start=window_start,
317
+ last_accessed=time.time(),
318
+ )
319
+ self._entries[key] = entry
320
+
321
+ # Update metrics
322
+ self._metrics.total_entries = len(self._entries)
323
+ if self._metrics.total_entries > self._metrics.peak_entries:
324
+ self._metrics.peak_entries = self._metrics.total_entries
325
+
326
+ return 1
327
+
328
+ # Same window, increment
329
+ entry.count += 1
330
+ entry.last_accessed = time.time()
331
+ # Move to end for LRU
332
+ self._entries.move_to_end(key)
333
+
334
+ return entry.count
335
+
336
+ def cleanup(self, max_age_seconds: int) -> int:
337
+ """Remove old entries based on TTL."""
338
+ cutoff = time.time() - max_age_seconds
339
+ removed = 0
340
+
341
+ with self._lock:
342
+ # Collect expired keys
343
+ expired = [
344
+ key for key, entry in self._entries.items()
345
+ if entry.window_start < cutoff and entry.last_refill < cutoff
346
+ ]
347
+
348
+ # Remove expired entries
349
+ for key in expired:
350
+ del self._entries[key]
351
+ removed += 1
352
+
353
+ # Update metrics
354
+ self._metrics.expired_removed += removed
355
+ self._metrics.total_entries = len(self._entries)
356
+ self._metrics.cleanup_runs += 1
357
+ self._metrics.last_cleanup_time = time.time()
358
+ self._metrics.last_cleanup_removed = removed
359
+ self._update_memory_estimate()
360
+
361
+ if removed > 0:
362
+ logger.debug(f"Throttling store cleanup: removed {removed} expired entries")
363
+
364
+ return removed
365
+
366
+ def _evict_lru(self) -> None:
367
+ """Evict least recently used entries.
368
+
369
+ Called when max_entries is reached. Evicts a percentage
370
+ of entries based on lru_eviction_percent.
371
+ """
372
+ evict_count = max(1, int(len(self._entries) * self.lru_eviction_percent / 100))
373
+
374
+ # Remove oldest entries (at the beginning of OrderedDict)
375
+ for _ in range(evict_count):
376
+ if not self._entries:
377
+ break
378
+ # popitem(last=False) removes from beginning (oldest)
379
+ self._entries.popitem(last=False)
380
+ self._metrics.lru_evicted += 1
381
+
382
+ self._metrics.total_entries = len(self._entries)
383
+
384
+ logger.debug(f"Throttling store LRU eviction: evicted {evict_count} entries")
385
+
386
+ def _update_memory_estimate(self) -> None:
387
+ """Estimate memory usage of stored entries."""
388
+ # Rough estimate: key (~50 bytes) + entry object (~200 bytes)
389
+ entry_size_estimate = 250
390
+ self._metrics.memory_bytes_estimate = len(self._entries) * entry_size_estimate
391
+
392
+ def clear(self) -> None:
393
+ """Clear all entries."""
394
+ with self._lock:
395
+ self._entries.clear()
396
+ self._metrics.total_entries = 0
397
+ self._update_memory_estimate()
398
+
399
+ def get_metrics(self) -> dict[str, Any]:
400
+ """Get store metrics."""
401
+ with self._lock:
402
+ self._metrics.total_entries = len(self._entries)
403
+ self._update_memory_estimate()
404
+ return self._metrics.to_dict()
405
+
406
+ def count(self) -> int:
407
+ """Get total entry count."""
408
+ with self._lock:
409
+ return len(self._entries)
410
+
411
+ # =========================================================================
412
+ # Background Cleanup
413
+ # =========================================================================
414
+
415
+ async def start_background_cleanup(self) -> None:
416
+ """Start background cleanup task.
417
+
418
+ Creates an asyncio task that periodically runs cleanup
419
+ to remove expired entries.
420
+ """
421
+ if self._cleanup_task is not None and not self._cleanup_task.done():
422
+ logger.warning("Background cleanup already running")
423
+ return
424
+
425
+ self._cleanup_running = True
426
+ self._shutdown_event = asyncio.Event()
427
+ self._cleanup_task = asyncio.create_task(self._background_cleanup_loop())
428
+ logger.info(
429
+ f"Started throttling store background cleanup "
430
+ f"(interval={self.cleanup_interval}s, ttl={self.default_ttl}s)"
431
+ )
432
+
433
+ async def stop_background_cleanup(self) -> None:
434
+ """Stop background cleanup task."""
435
+ if not self._cleanup_running:
436
+ return
437
+
438
+ self._cleanup_running = False
439
+
440
+ if self._shutdown_event:
441
+ self._shutdown_event.set()
442
+
443
+ if self._cleanup_task:
444
+ try:
445
+ # Wait for task to complete with timeout
446
+ await asyncio.wait_for(self._cleanup_task, timeout=5.0)
447
+ except asyncio.TimeoutError:
448
+ self._cleanup_task.cancel()
449
+ try:
450
+ await self._cleanup_task
451
+ except asyncio.CancelledError:
452
+ pass
453
+
454
+ self._cleanup_task = None
455
+ self._shutdown_event = None
456
+ logger.info("Stopped throttling store background cleanup")
457
+
458
+ async def _background_cleanup_loop(self) -> None:
459
+ """Background task that periodically runs cleanup."""
460
+ while self._cleanup_running:
461
+ try:
462
+ # Wait for cleanup interval or shutdown
463
+ if self._shutdown_event:
464
+ try:
465
+ await asyncio.wait_for(
466
+ self._shutdown_event.wait(),
467
+ timeout=self.cleanup_interval,
468
+ )
469
+ # If we get here, shutdown was requested
470
+ break
471
+ except asyncio.TimeoutError:
472
+ # Timeout means it's time to cleanup
473
+ pass
474
+
475
+ # Run cleanup
476
+ removed = self.cleanup(self.default_ttl)
477
+ if removed > 0:
478
+ logger.debug(
479
+ f"Background cleanup: removed {removed} expired entries, "
480
+ f"{self.count()} remaining"
481
+ )
482
+
483
+ except asyncio.CancelledError:
484
+ logger.debug("Background cleanup task cancelled")
485
+ break
486
+ except Exception as e:
487
+ logger.error(f"Error in background cleanup: {e}")
488
+ # Continue running despite errors
489
+ await asyncio.sleep(self.cleanup_interval)
490
+
491
+ def __del__(self) -> None:
492
+ """Cleanup on deletion."""
493
+ # Note: Cannot await in __del__, so just flag for cleanup
494
+ self._cleanup_running = False
495
+
496
+ # =========================================================================
497
+ # Context Manager Support
498
+ # =========================================================================
499
+
500
+ async def __aenter__(self) -> "InMemoryThrottlingStore":
501
+ """Async context manager entry."""
502
+ if self._auto_start_cleanup:
503
+ await self.start_background_cleanup()
504
+ return self
505
+
506
+ async def __aexit__(
507
+ self, exc_type: Any, exc_val: Any, exc_tb: Any
508
+ ) -> None:
509
+ """Async context manager exit."""
510
+ await self.stop_background_cleanup()
511
+
512
+
513
+ class SQLiteThrottlingStore(BaseThrottlingStore):
514
+ """SQLite-based persistent throttling storage.
515
+
516
+ Provides durable storage that survives process restarts.
517
+ """
518
+
519
+ def __init__(self, db_path: str | Path = "throttling.db") -> None:
520
+ """Initialize SQLite store.
521
+
522
+ Args:
523
+ db_path: Path to database file.
524
+ """
525
+ self.db_path = Path(db_path)
526
+ self._local = threading.local()
527
+ self._init_db()
528
+
529
+ def _get_connection(self) -> sqlite3.Connection:
530
+ """Get thread-local database connection."""
531
+ if not hasattr(self._local, "connection"):
532
+ self._local.connection = sqlite3.connect(
533
+ str(self.db_path),
534
+ check_same_thread=False,
535
+ )
536
+ self._local.connection.row_factory = sqlite3.Row
537
+ return self._local.connection
538
+
539
+ def _init_db(self) -> None:
540
+ """Initialize database schema."""
541
+ conn = self._get_connection()
542
+ conn.execute("""
543
+ CREATE TABLE IF NOT EXISTS throttling_entries (
544
+ key TEXT PRIMARY KEY,
545
+ count INTEGER NOT NULL DEFAULT 0,
546
+ window_start REAL NOT NULL,
547
+ tokens REAL NOT NULL DEFAULT 0,
548
+ last_refill REAL NOT NULL DEFAULT 0,
549
+ last_accessed REAL NOT NULL DEFAULT 0,
550
+ metadata TEXT
551
+ )
552
+ """)
553
+ conn.execute("""
554
+ CREATE INDEX IF NOT EXISTS idx_throttle_window
555
+ ON throttling_entries(window_start)
556
+ """)
557
+ conn.execute("""
558
+ CREATE INDEX IF NOT EXISTS idx_throttle_accessed
559
+ ON throttling_entries(last_accessed)
560
+ """)
561
+ conn.commit()
562
+
563
+ def get(self, key: str) -> ThrottlingEntry | None:
564
+ """Get entry by key."""
565
+ conn = self._get_connection()
566
+ now = time.time()
567
+
568
+ # Update last_accessed
569
+ cursor = conn.execute(
570
+ """
571
+ UPDATE throttling_entries
572
+ SET last_accessed = ?
573
+ WHERE key = ?
574
+ RETURNING key, count, window_start, tokens, last_refill, last_accessed, metadata
575
+ """,
576
+ (now, key),
577
+ )
578
+ row = cursor.fetchone()
579
+
580
+ if row is None:
581
+ # Fall back to SELECT if UPDATE returned nothing
582
+ cursor = conn.execute(
583
+ """
584
+ SELECT key, count, window_start, tokens, last_refill, last_accessed, metadata
585
+ FROM throttling_entries
586
+ WHERE key = ?
587
+ """,
588
+ (key,),
589
+ )
590
+ row = cursor.fetchone()
591
+
592
+ if row is None:
593
+ return None
594
+
595
+ metadata = {}
596
+ if row["metadata"]:
597
+ try:
598
+ metadata = json.loads(row["metadata"])
599
+ except json.JSONDecodeError:
600
+ pass
601
+
602
+ return ThrottlingEntry(
603
+ key=row["key"],
604
+ count=row["count"],
605
+ window_start=row["window_start"],
606
+ tokens=row["tokens"],
607
+ last_refill=row["last_refill"],
608
+ last_accessed=row["last_accessed"],
609
+ metadata=metadata,
610
+ )
611
+
612
+ def set(self, entry: ThrottlingEntry) -> None:
613
+ """Set or update an entry."""
614
+ conn = self._get_connection()
615
+ metadata_json = json.dumps(entry.metadata) if entry.metadata else None
616
+ now = time.time()
617
+
618
+ conn.execute(
619
+ """
620
+ INSERT OR REPLACE INTO throttling_entries
621
+ (key, count, window_start, tokens, last_refill, last_accessed, metadata)
622
+ VALUES (?, ?, ?, ?, ?, ?, ?)
623
+ """,
624
+ (
625
+ entry.key,
626
+ entry.count,
627
+ entry.window_start,
628
+ entry.tokens,
629
+ entry.last_refill,
630
+ now,
631
+ metadata_json,
632
+ ),
633
+ )
634
+ conn.commit()
635
+
636
+ def increment(self, key: str, window_start: float) -> int:
637
+ """Increment counter and return new count."""
638
+ conn = self._get_connection()
639
+ now = time.time()
640
+
641
+ # Check if entry exists and is in same window
642
+ cursor = conn.execute(
643
+ """
644
+ SELECT count, window_start FROM throttling_entries
645
+ WHERE key = ?
646
+ """,
647
+ (key,),
648
+ )
649
+ row = cursor.fetchone()
650
+
651
+ if row is None or row["window_start"] != window_start:
652
+ # New window
653
+ conn.execute(
654
+ """
655
+ INSERT OR REPLACE INTO throttling_entries
656
+ (key, count, window_start, tokens, last_refill, last_accessed)
657
+ VALUES (?, 1, ?, 0, 0, ?)
658
+ """,
659
+ (key, window_start, now),
660
+ )
661
+ conn.commit()
662
+ return 1
663
+
664
+ # Same window, increment
665
+ conn.execute(
666
+ """
667
+ UPDATE throttling_entries
668
+ SET count = count + 1, last_accessed = ?
669
+ WHERE key = ?
670
+ """,
671
+ (now, key),
672
+ )
673
+ conn.commit()
674
+
675
+ return row["count"] + 1
676
+
677
+ def cleanup(self, max_age_seconds: int) -> int:
678
+ """Remove old entries."""
679
+ conn = self._get_connection()
680
+ cutoff = time.time() - max_age_seconds
681
+
682
+ cursor = conn.execute(
683
+ """
684
+ DELETE FROM throttling_entries
685
+ WHERE window_start < ? AND last_refill < ?
686
+ """,
687
+ (cutoff, cutoff),
688
+ )
689
+ conn.commit()
690
+
691
+ return cursor.rowcount
692
+
693
+ def clear(self) -> None:
694
+ """Clear all entries."""
695
+ conn = self._get_connection()
696
+ conn.execute("DELETE FROM throttling_entries")
697
+ conn.commit()
698
+
699
+ def count(self) -> int:
700
+ """Get total entry count."""
701
+ conn = self._get_connection()
702
+ cursor = conn.execute("SELECT COUNT(*) FROM throttling_entries")
703
+ return cursor.fetchone()[0]
704
+
705
+ def get_metrics(self) -> dict[str, Any]:
706
+ """Get store metrics."""
707
+ return {
708
+ "total_entries": self.count(),
709
+ "db_path": str(self.db_path),
710
+ }
711
+
712
+ def close(self) -> None:
713
+ """Close database connection."""
714
+ if hasattr(self._local, "connection"):
715
+ self._local.connection.close()
716
+ del self._local.connection
717
+
718
+
719
+ class RedisThrottlingStore(BaseThrottlingStore):
720
+ """Redis-based throttling store for distributed deployments.
721
+
722
+ Uses Redis for shared throttling state across multiple processes
723
+ or containers. Supports both sync and async operations with
724
+ connection pooling.
725
+
726
+ Features:
727
+ - Connection pool management with configurable pool size
728
+ - Automatic reconnection with exponential backoff
729
+ - TTL management for automatic expiration
730
+ - Graceful degradation (fallback to InMemory on Redis failure)
731
+ - Health check endpoint support
732
+ - Comprehensive metrics collection
733
+ - Async support for high-performance applications
734
+
735
+ Configuration via environment variables:
736
+ TRUTHOUND_THROTTLE_REDIS_URL: Redis connection URL (default: redis://localhost:6379/0)
737
+ TRUTHOUND_THROTTLE_REDIS_PREFIX: Key prefix (default: truthound:throttle:)
738
+ TRUTHOUND_THROTTLE_REDIS_TTL: Default TTL in seconds (default: 3600)
739
+ TRUTHOUND_THROTTLE_REDIS_POOL_SIZE: Connection pool size (default: 10)
740
+ TRUTHOUND_THROTTLE_REDIS_SOCKET_TIMEOUT: Socket timeout (default: 5.0)
741
+ TRUTHOUND_THROTTLE_REDIS_CONNECT_TIMEOUT: Connection timeout (default: 5.0)
742
+ TRUTHOUND_THROTTLE_REDIS_MAX_RETRIES: Max retry attempts (default: 3)
743
+ TRUTHOUND_THROTTLE_REDIS_RETRY_BASE_DELAY: Base delay for exponential backoff (default: 1.0)
744
+ TRUTHOUND_THROTTLE_FALLBACK_ENABLED: Enable fallback to InMemory (default: true)
745
+
746
+ Example:
747
+ # Basic usage
748
+ store = RedisThrottlingStore()
749
+
750
+ # Custom configuration
751
+ store = RedisThrottlingStore(
752
+ redis_url="redis://myredis:6379/1",
753
+ default_ttl=7200,
754
+ max_connections=20,
755
+ )
756
+
757
+ # With context manager
758
+ async with RedisThrottlingStore() as store:
759
+ result = throttler.allow("channel-1")
760
+
761
+ Note: Requires the 'redis' optional dependency.
762
+ Install with: pip install truthound-dashboard[redis]
763
+ """
764
+
765
+ # Hash field names
766
+ FIELD_COUNT = "count"
767
+ FIELD_WINDOW_START = "window_start"
768
+ FIELD_TOKENS = "tokens"
769
+ FIELD_LAST_REFILL = "last_refill"
770
+ FIELD_LAST_ACCESSED = "last_accessed"
771
+ FIELD_METADATA = "metadata"
772
+
773
+ def __init__(
774
+ self,
775
+ redis_url: str | None = None,
776
+ key_prefix: str | None = None,
777
+ default_ttl: int | None = None,
778
+ max_connections: int | None = None,
779
+ socket_timeout: float | None = None,
780
+ socket_connect_timeout: float | None = None,
781
+ max_retries: int | None = None,
782
+ retry_base_delay: float | None = None,
783
+ enable_fallback: bool | None = None,
784
+ ) -> None:
785
+ """Initialize Redis throttling store.
786
+
787
+ All parameters can be configured via environment variables if not
788
+ explicitly provided.
789
+
790
+ Args:
791
+ redis_url: Redis connection URL.
792
+ key_prefix: Prefix for all Redis keys.
793
+ default_ttl: Default TTL in seconds for entries.
794
+ max_connections: Maximum connections in the pool.
795
+ socket_timeout: Socket timeout in seconds.
796
+ socket_connect_timeout: Connection timeout in seconds.
797
+ max_retries: Maximum retry attempts for reconnection.
798
+ retry_base_delay: Base delay for exponential backoff.
799
+ enable_fallback: Enable fallback to InMemory on Redis failure.
800
+
801
+ Raises:
802
+ ImportError: If redis package is not installed.
803
+ """
804
+ if not REDIS_AVAILABLE:
805
+ raise ImportError(
806
+ "Redis support requires the 'redis' package. "
807
+ "Install with: pip install truthound-dashboard[redis] "
808
+ "or pip install redis"
809
+ )
810
+
811
+ # Configuration from environment or parameters
812
+ self.redis_url = redis_url or os.getenv(
813
+ "TRUTHOUND_THROTTLE_REDIS_URL", "redis://localhost:6379/0"
814
+ )
815
+ self.key_prefix = key_prefix or os.getenv(
816
+ "TRUTHOUND_THROTTLE_REDIS_PREFIX", "truthound:throttle:"
817
+ )
818
+ self.default_ttl = default_ttl or int(
819
+ os.getenv("TRUTHOUND_THROTTLE_REDIS_TTL", "3600")
820
+ )
821
+ self.max_connections = max_connections or int(
822
+ os.getenv("TRUTHOUND_THROTTLE_REDIS_POOL_SIZE", "10")
823
+ )
824
+ self.socket_timeout = socket_timeout or float(
825
+ os.getenv("TRUTHOUND_THROTTLE_REDIS_SOCKET_TIMEOUT", "5.0")
826
+ )
827
+ self.socket_connect_timeout = socket_connect_timeout or float(
828
+ os.getenv("TRUTHOUND_THROTTLE_REDIS_CONNECT_TIMEOUT", "5.0")
829
+ )
830
+ self.max_retries = max_retries or int(
831
+ os.getenv("TRUTHOUND_THROTTLE_REDIS_MAX_RETRIES", "3")
832
+ )
833
+ self.retry_base_delay = retry_base_delay or float(
834
+ os.getenv("TRUTHOUND_THROTTLE_REDIS_RETRY_BASE_DELAY", "1.0")
835
+ )
836
+
837
+ fallback_env = os.getenv("TRUTHOUND_THROTTLE_FALLBACK_ENABLED", "true")
838
+ self.enable_fallback = (
839
+ enable_fallback
840
+ if enable_fallback is not None
841
+ else fallback_env.lower() == "true"
842
+ )
843
+
844
+ # Connection pool for sync client
845
+ self._pool: redis.ConnectionPool | None = None
846
+ self._client: redis.Redis | None = None
847
+
848
+ # Connection pool for async client
849
+ self._async_pool: redis.asyncio.ConnectionPool | None = None
850
+ self._async_client: redis.asyncio.Redis | None = None
851
+
852
+ # Locks for thread-safe initialization
853
+ self._lock = threading.Lock()
854
+ self._async_lock: asyncio.Lock | None = None
855
+
856
+ # Fallback store for graceful degradation
857
+ self._fallback_store: InMemoryThrottlingStore | None = None
858
+ self._using_fallback = False
859
+
860
+ # Connection state tracking
861
+ self._connected = False
862
+ self._retry_count = 0
863
+ self._last_error: Exception | None = None
864
+ self._last_error_time: float | None = None
865
+
866
+ # Metrics
867
+ self._metrics = ThrottlingMetrics()
868
+ self._redis_errors = 0
869
+ self._fallback_count = 0
870
+ self._reconnections = 0
871
+
872
+ # Index tracking key
873
+ self._index_key = f"{self.key_prefix}index"
874
+
875
+ def _get_key(self, key: str) -> str:
876
+ """Get full Redis key for throttling entry.
877
+
878
+ Args:
879
+ key: The throttling key.
880
+
881
+ Returns:
882
+ Full Redis key with prefix.
883
+ """
884
+ return f"{self.key_prefix}entry:{key}"
885
+
886
+ def _create_pool(self) -> "redis.ConnectionPool":
887
+ """Create a connection pool for sync client.
888
+
889
+ Returns:
890
+ Configured connection pool.
891
+ """
892
+ return redis.ConnectionPool.from_url(
893
+ self.redis_url,
894
+ max_connections=self.max_connections,
895
+ socket_timeout=self.socket_timeout,
896
+ socket_connect_timeout=self.socket_connect_timeout,
897
+ retry_on_timeout=True,
898
+ decode_responses=True,
899
+ )
900
+
901
+ async def _create_async_pool(self) -> "redis.asyncio.ConnectionPool":
902
+ """Create a connection pool for async client.
903
+
904
+ Returns:
905
+ Configured async connection pool.
906
+ """
907
+ return redis.asyncio.ConnectionPool.from_url(
908
+ self.redis_url,
909
+ max_connections=self.max_connections,
910
+ socket_timeout=self.socket_timeout,
911
+ socket_connect_timeout=self.socket_connect_timeout,
912
+ retry_on_timeout=True,
913
+ decode_responses=True,
914
+ )
915
+
916
+ def _get_fallback_store(self) -> InMemoryThrottlingStore:
917
+ """Get or create fallback in-memory store.
918
+
919
+ Returns:
920
+ InMemoryThrottlingStore instance.
921
+ """
922
+ if self._fallback_store is None:
923
+ self._fallback_store = InMemoryThrottlingStore()
924
+ return self._fallback_store
925
+
926
+ def _calculate_backoff_delay(self) -> float:
927
+ """Calculate exponential backoff delay.
928
+
929
+ Returns:
930
+ Delay in seconds.
931
+ """
932
+ import random
933
+
934
+ # Exponential backoff with jitter
935
+ delay = self.retry_base_delay * (2 ** self._retry_count)
936
+ # Add jitter (up to 25% of delay)
937
+ jitter = delay * random.uniform(0, 0.25)
938
+ return min(delay + jitter, 60.0) # Cap at 60 seconds
939
+
940
+ def _handle_redis_error(self, error: Exception, operation: str) -> None:
941
+ """Handle Redis errors with logging and metrics.
942
+
943
+ Args:
944
+ error: The exception that occurred.
945
+ operation: Name of the operation that failed.
946
+ """
947
+ self._redis_errors += 1
948
+ self._last_error = error
949
+ self._last_error_time = time.time()
950
+ self._connected = False
951
+
952
+ logger.error(
953
+ f"Redis throttling store error during {operation}: {error}",
954
+ extra={
955
+ "operation": operation,
956
+ "error_type": type(error).__name__,
957
+ "retry_count": self._retry_count,
958
+ },
959
+ )
960
+
961
+ def _try_reconnect_sync(self) -> bool:
962
+ """Attempt to reconnect to Redis synchronously.
963
+
964
+ Returns:
965
+ True if reconnection successful, False otherwise.
966
+ """
967
+ if self._retry_count >= self.max_retries:
968
+ logger.warning(
969
+ f"Max retries ({self.max_retries}) reached, using fallback"
970
+ )
971
+ return False
972
+
973
+ delay = self._calculate_backoff_delay()
974
+ logger.info(
975
+ f"Attempting Redis reconnection in {delay:.2f}s "
976
+ f"(attempt {self._retry_count + 1}/{self.max_retries})"
977
+ )
978
+
979
+ time.sleep(delay)
980
+ self._retry_count += 1
981
+
982
+ try:
983
+ # Close existing connections
984
+ if self._client:
985
+ try:
986
+ self._client.close()
987
+ except Exception:
988
+ pass
989
+ self._client = None
990
+
991
+ if self._pool:
992
+ try:
993
+ self._pool.disconnect()
994
+ except Exception:
995
+ pass
996
+ self._pool = None
997
+
998
+ # Create new connection
999
+ self._pool = self._create_pool()
1000
+ self._client = redis.Redis(connection_pool=self._pool)
1001
+
1002
+ # Test connection
1003
+ if self._client.ping():
1004
+ self._connected = True
1005
+ self._retry_count = 0
1006
+ self._using_fallback = False
1007
+ self._reconnections += 1
1008
+ logger.info("Redis throttling store reconnection successful")
1009
+ return True
1010
+ except Exception as e:
1011
+ logger.warning(f"Reconnection attempt failed: {e}")
1012
+
1013
+ return False
1014
+
1015
+ @property
1016
+ def client(self) -> "redis.Redis":
1017
+ """Get sync Redis client with connection pooling.
1018
+
1019
+ Creates the connection pool and client on first access.
1020
+ Handles reconnection on failure.
1021
+
1022
+ Returns:
1023
+ Redis client instance.
1024
+ """
1025
+ if self._client is None or not self._connected:
1026
+ with self._lock:
1027
+ if self._client is None or not self._connected:
1028
+ try:
1029
+ self._pool = self._create_pool()
1030
+ self._client = redis.Redis(connection_pool=self._pool)
1031
+ # Test connection
1032
+ self._client.ping()
1033
+ self._connected = True
1034
+ self._retry_count = 0
1035
+ logger.debug("Redis throttling store sync client connected")
1036
+ except Exception as e:
1037
+ self._handle_redis_error(e, "client_init")
1038
+ raise
1039
+ return self._client
1040
+
1041
+ async def get_async_client(self) -> "redis.asyncio.Redis":
1042
+ """Get async Redis client with connection pooling.
1043
+
1044
+ Creates the async connection pool and client on first access.
1045
+
1046
+ Returns:
1047
+ Async Redis client instance.
1048
+ """
1049
+ if self._async_lock is None:
1050
+ self._async_lock = asyncio.Lock()
1051
+
1052
+ if self._async_client is None or not self._connected:
1053
+ async with self._async_lock:
1054
+ if self._async_client is None or not self._connected:
1055
+ try:
1056
+ self._async_pool = await self._create_async_pool()
1057
+ self._async_client = redis.asyncio.Redis(
1058
+ connection_pool=self._async_pool
1059
+ )
1060
+ # Test connection
1061
+ await self._async_client.ping()
1062
+ self._connected = True
1063
+ self._retry_count = 0
1064
+ logger.debug("Redis throttling store async client connected")
1065
+ except Exception as e:
1066
+ self._handle_redis_error(e, "async_client_init")
1067
+ raise
1068
+ return self._async_client
1069
+
1070
+ def _serialize_entry(self, entry: ThrottlingEntry) -> dict[str, str]:
1071
+ """Serialize entry for Redis storage.
1072
+
1073
+ Args:
1074
+ entry: The entry to serialize.
1075
+
1076
+ Returns:
1077
+ Dictionary suitable for Redis HSET.
1078
+ """
1079
+ return {
1080
+ self.FIELD_COUNT: str(entry.count),
1081
+ self.FIELD_WINDOW_START: str(entry.window_start),
1082
+ self.FIELD_TOKENS: str(entry.tokens),
1083
+ self.FIELD_LAST_REFILL: str(entry.last_refill),
1084
+ self.FIELD_LAST_ACCESSED: str(entry.last_accessed),
1085
+ self.FIELD_METADATA: json.dumps(entry.metadata) if entry.metadata else "{}",
1086
+ }
1087
+
1088
+ def _deserialize_entry(self, key: str, data: dict[str, str]) -> ThrottlingEntry:
1089
+ """Deserialize entry from Redis storage.
1090
+
1091
+ Args:
1092
+ key: The throttling key.
1093
+ data: Dictionary from Redis HGETALL.
1094
+
1095
+ Returns:
1096
+ ThrottlingEntry instance.
1097
+ """
1098
+ metadata = {}
1099
+ if data.get(self.FIELD_METADATA):
1100
+ try:
1101
+ metadata = json.loads(data[self.FIELD_METADATA])
1102
+ except json.JSONDecodeError:
1103
+ pass
1104
+
1105
+ return ThrottlingEntry(
1106
+ key=key,
1107
+ count=int(data.get(self.FIELD_COUNT, 0)),
1108
+ window_start=float(data.get(self.FIELD_WINDOW_START, 0)),
1109
+ tokens=float(data.get(self.FIELD_TOKENS, 0)),
1110
+ last_refill=float(data.get(self.FIELD_LAST_REFILL, 0)),
1111
+ last_accessed=float(data.get(self.FIELD_LAST_ACCESSED, 0)),
1112
+ metadata=metadata,
1113
+ )
1114
+
1115
+ def get(self, key: str) -> ThrottlingEntry | None:
1116
+ """Get entry by key.
1117
+
1118
+ Falls back to InMemory store on Redis failure if enabled.
1119
+
1120
+ Args:
1121
+ key: The throttling key.
1122
+
1123
+ Returns:
1124
+ Entry if found, None otherwise.
1125
+ """
1126
+ if self._using_fallback and self.enable_fallback:
1127
+ return self._get_fallback_store().get(key)
1128
+
1129
+ try:
1130
+ redis_key = self._get_key(key)
1131
+ now = time.time()
1132
+
1133
+ # Get and update last_accessed atomically
1134
+ pipe = self.client.pipeline()
1135
+ pipe.hgetall(redis_key)
1136
+ pipe.hset(redis_key, self.FIELD_LAST_ACCESSED, str(now))
1137
+ pipe.expire(redis_key, self.default_ttl)
1138
+ results = pipe.execute()
1139
+
1140
+ data = results[0]
1141
+ if not data:
1142
+ return None
1143
+
1144
+ return self._deserialize_entry(key, data)
1145
+
1146
+ except Exception as e:
1147
+ self._handle_redis_error(e, "get")
1148
+
1149
+ if self.enable_fallback:
1150
+ self._using_fallback = True
1151
+ self._fallback_count += 1
1152
+ logger.warning("Falling back to InMemory throttling store")
1153
+ return self._get_fallback_store().get(key)
1154
+
1155
+ raise
1156
+
1157
+ async def get_async(self, key: str) -> ThrottlingEntry | None:
1158
+ """Async get entry by key.
1159
+
1160
+ Args:
1161
+ key: The throttling key.
1162
+
1163
+ Returns:
1164
+ Entry if found, None otherwise.
1165
+ """
1166
+ if self._using_fallback and self.enable_fallback:
1167
+ return self._get_fallback_store().get(key)
1168
+
1169
+ try:
1170
+ client = await self.get_async_client()
1171
+ redis_key = self._get_key(key)
1172
+ now = time.time()
1173
+
1174
+ # Get and update last_accessed atomically
1175
+ pipe = client.pipeline()
1176
+ pipe.hgetall(redis_key)
1177
+ pipe.hset(redis_key, self.FIELD_LAST_ACCESSED, str(now))
1178
+ pipe.expire(redis_key, self.default_ttl)
1179
+ results = await pipe.execute()
1180
+
1181
+ data = results[0]
1182
+ if not data:
1183
+ return None
1184
+
1185
+ return self._deserialize_entry(key, data)
1186
+
1187
+ except Exception as e:
1188
+ self._handle_redis_error(e, "get_async")
1189
+
1190
+ if self.enable_fallback:
1191
+ self._using_fallback = True
1192
+ self._fallback_count += 1
1193
+ logger.warning("Falling back to InMemory throttling store")
1194
+ return self._get_fallback_store().get(key)
1195
+
1196
+ raise
1197
+
1198
+ def set(self, entry: ThrottlingEntry) -> None:
1199
+ """Set or update an entry.
1200
+
1201
+ Args:
1202
+ entry: The entry to store.
1203
+ """
1204
+ if self._using_fallback and self.enable_fallback:
1205
+ self._get_fallback_store().set(entry)
1206
+ return
1207
+
1208
+ try:
1209
+ redis_key = self._get_key(entry.key)
1210
+ entry.last_accessed = time.time()
1211
+
1212
+ pipe = self.client.pipeline()
1213
+ pipe.hset(redis_key, mapping=self._serialize_entry(entry))
1214
+ pipe.expire(redis_key, self.default_ttl)
1215
+ pipe.sadd(self._index_key, entry.key)
1216
+ pipe.expire(self._index_key, self.default_ttl * 2)
1217
+ pipe.execute()
1218
+
1219
+ except Exception as e:
1220
+ self._handle_redis_error(e, "set")
1221
+
1222
+ if self.enable_fallback:
1223
+ self._using_fallback = True
1224
+ self._fallback_count += 1
1225
+ logger.warning("Falling back to InMemory throttling store")
1226
+ self._get_fallback_store().set(entry)
1227
+ return
1228
+
1229
+ raise
1230
+
1231
+ async def set_async(self, entry: ThrottlingEntry) -> None:
1232
+ """Async set or update an entry.
1233
+
1234
+ Args:
1235
+ entry: The entry to store.
1236
+ """
1237
+ if self._using_fallback and self.enable_fallback:
1238
+ self._get_fallback_store().set(entry)
1239
+ return
1240
+
1241
+ try:
1242
+ client = await self.get_async_client()
1243
+ redis_key = self._get_key(entry.key)
1244
+ entry.last_accessed = time.time()
1245
+
1246
+ pipe = client.pipeline()
1247
+ pipe.hset(redis_key, mapping=self._serialize_entry(entry))
1248
+ pipe.expire(redis_key, self.default_ttl)
1249
+ pipe.sadd(self._index_key, entry.key)
1250
+ pipe.expire(self._index_key, self.default_ttl * 2)
1251
+ await pipe.execute()
1252
+
1253
+ except Exception as e:
1254
+ self._handle_redis_error(e, "set_async")
1255
+
1256
+ if self.enable_fallback:
1257
+ self._using_fallback = True
1258
+ self._fallback_count += 1
1259
+ logger.warning("Falling back to InMemory throttling store")
1260
+ self._get_fallback_store().set(entry)
1261
+ return
1262
+
1263
+ raise
1264
+
1265
+ def increment(self, key: str, window_start: float) -> int:
1266
+ """Increment counter and return new count.
1267
+
1268
+ Uses Lua script for atomic check-and-increment.
1269
+
1270
+ Args:
1271
+ key: The throttling key.
1272
+ window_start: Start of current window.
1273
+
1274
+ Returns:
1275
+ New count value.
1276
+ """
1277
+ if self._using_fallback and self.enable_fallback:
1278
+ return self._get_fallback_store().increment(key, window_start)
1279
+
1280
+ # Lua script for atomic increment with window check
1281
+ lua_script = """
1282
+ local key = KEYS[1]
1283
+ local index_key = KEYS[2]
1284
+ local window_start = tonumber(ARGV[1])
1285
+ local now = tonumber(ARGV[2])
1286
+ local ttl = tonumber(ARGV[3])
1287
+ local raw_key = ARGV[4]
1288
+
1289
+ -- Get current window_start
1290
+ local current_window = redis.call('HGET', key, 'window_start')
1291
+
1292
+ if current_window == false or tonumber(current_window) ~= window_start then
1293
+ -- New window, reset count to 1
1294
+ redis.call('HMSET', key,
1295
+ 'count', 1,
1296
+ 'window_start', window_start,
1297
+ 'tokens', 0,
1298
+ 'last_refill', 0,
1299
+ 'last_accessed', now,
1300
+ 'metadata', '{}'
1301
+ )
1302
+ redis.call('EXPIRE', key, ttl)
1303
+ redis.call('SADD', index_key, raw_key)
1304
+ redis.call('EXPIRE', index_key, ttl * 2)
1305
+ return 1
1306
+ else
1307
+ -- Same window, increment
1308
+ local new_count = redis.call('HINCRBY', key, 'count', 1)
1309
+ redis.call('HSET', key, 'last_accessed', now)
1310
+ redis.call('EXPIRE', key, ttl)
1311
+ return new_count
1312
+ end
1313
+ """
1314
+
1315
+ try:
1316
+ redis_key = self._get_key(key)
1317
+ now = time.time()
1318
+
1319
+ result = self.client.eval(
1320
+ lua_script,
1321
+ 2, # number of keys
1322
+ redis_key,
1323
+ self._index_key,
1324
+ str(window_start),
1325
+ str(now),
1326
+ str(self.default_ttl),
1327
+ key,
1328
+ )
1329
+
1330
+ return int(result)
1331
+
1332
+ except Exception as e:
1333
+ self._handle_redis_error(e, "increment")
1334
+
1335
+ if self.enable_fallback:
1336
+ self._using_fallback = True
1337
+ self._fallback_count += 1
1338
+ logger.warning("Falling back to InMemory throttling store")
1339
+ return self._get_fallback_store().increment(key, window_start)
1340
+
1341
+ raise
1342
+
1343
+ async def increment_async(self, key: str, window_start: float) -> int:
1344
+ """Async increment counter and return new count.
1345
+
1346
+ Args:
1347
+ key: The throttling key.
1348
+ window_start: Start of current window.
1349
+
1350
+ Returns:
1351
+ New count value.
1352
+ """
1353
+ if self._using_fallback and self.enable_fallback:
1354
+ return self._get_fallback_store().increment(key, window_start)
1355
+
1356
+ # Lua script for atomic increment
1357
+ lua_script = """
1358
+ local key = KEYS[1]
1359
+ local index_key = KEYS[2]
1360
+ local window_start = tonumber(ARGV[1])
1361
+ local now = tonumber(ARGV[2])
1362
+ local ttl = tonumber(ARGV[3])
1363
+ local raw_key = ARGV[4]
1364
+
1365
+ local current_window = redis.call('HGET', key, 'window_start')
1366
+
1367
+ if current_window == false or tonumber(current_window) ~= window_start then
1368
+ redis.call('HMSET', key,
1369
+ 'count', 1,
1370
+ 'window_start', window_start,
1371
+ 'tokens', 0,
1372
+ 'last_refill', 0,
1373
+ 'last_accessed', now,
1374
+ 'metadata', '{}'
1375
+ )
1376
+ redis.call('EXPIRE', key, ttl)
1377
+ redis.call('SADD', index_key, raw_key)
1378
+ redis.call('EXPIRE', index_key, ttl * 2)
1379
+ return 1
1380
+ else
1381
+ local new_count = redis.call('HINCRBY', key, 'count', 1)
1382
+ redis.call('HSET', key, 'last_accessed', now)
1383
+ redis.call('EXPIRE', key, ttl)
1384
+ return new_count
1385
+ end
1386
+ """
1387
+
1388
+ try:
1389
+ client = await self.get_async_client()
1390
+ redis_key = self._get_key(key)
1391
+ now = time.time()
1392
+
1393
+ result = await client.eval(
1394
+ lua_script,
1395
+ 2,
1396
+ redis_key,
1397
+ self._index_key,
1398
+ str(window_start),
1399
+ str(now),
1400
+ str(self.default_ttl),
1401
+ key,
1402
+ )
1403
+
1404
+ return int(result)
1405
+
1406
+ except Exception as e:
1407
+ self._handle_redis_error(e, "increment_async")
1408
+
1409
+ if self.enable_fallback:
1410
+ self._using_fallback = True
1411
+ self._fallback_count += 1
1412
+ logger.warning("Falling back to InMemory throttling store")
1413
+ return self._get_fallback_store().increment(key, window_start)
1414
+
1415
+ raise
1416
+
1417
+ def cleanup(self, max_age_seconds: int) -> int:
1418
+ """Remove old entries.
1419
+
1420
+ Redis handles TTL automatically, but this method can force
1421
+ cleanup of entries older than max_age_seconds.
1422
+
1423
+ Args:
1424
+ max_age_seconds: Maximum age of entries to keep.
1425
+
1426
+ Returns:
1427
+ Number of entries removed.
1428
+ """
1429
+ if self._using_fallback and self.enable_fallback:
1430
+ return self._get_fallback_store().cleanup(max_age_seconds)
1431
+
1432
+ try:
1433
+ cutoff = time.time() - max_age_seconds
1434
+ removed = 0
1435
+
1436
+ # Get all keys from index
1437
+ keys = self.client.smembers(self._index_key)
1438
+
1439
+ for key in keys:
1440
+ redis_key = self._get_key(key)
1441
+ data = self.client.hgetall(redis_key)
1442
+
1443
+ if not data:
1444
+ # Entry expired, remove from index
1445
+ self.client.srem(self._index_key, key)
1446
+ removed += 1
1447
+ elif float(data.get(self.FIELD_WINDOW_START, 0)) < cutoff:
1448
+ # Entry is old, delete it
1449
+ self.client.delete(redis_key)
1450
+ self.client.srem(self._index_key, key)
1451
+ removed += 1
1452
+
1453
+ return removed
1454
+
1455
+ except Exception as e:
1456
+ self._handle_redis_error(e, "cleanup")
1457
+
1458
+ if self.enable_fallback:
1459
+ self._using_fallback = True
1460
+ return self._get_fallback_store().cleanup(max_age_seconds)
1461
+
1462
+ raise
1463
+
1464
+ async def cleanup_async(self, max_age_seconds: int) -> int:
1465
+ """Async remove old entries.
1466
+
1467
+ Args:
1468
+ max_age_seconds: Maximum age of entries to keep.
1469
+
1470
+ Returns:
1471
+ Number of entries removed.
1472
+ """
1473
+ if self._using_fallback and self.enable_fallback:
1474
+ return self._get_fallback_store().cleanup(max_age_seconds)
1475
+
1476
+ try:
1477
+ client = await self.get_async_client()
1478
+ cutoff = time.time() - max_age_seconds
1479
+ removed = 0
1480
+
1481
+ # Get all keys from index
1482
+ keys = await client.smembers(self._index_key)
1483
+
1484
+ for key in keys:
1485
+ redis_key = self._get_key(key)
1486
+ data = await client.hgetall(redis_key)
1487
+
1488
+ if not data:
1489
+ await client.srem(self._index_key, key)
1490
+ removed += 1
1491
+ elif float(data.get(self.FIELD_WINDOW_START, 0)) < cutoff:
1492
+ await client.delete(redis_key)
1493
+ await client.srem(self._index_key, key)
1494
+ removed += 1
1495
+
1496
+ return removed
1497
+
1498
+ except Exception as e:
1499
+ self._handle_redis_error(e, "cleanup_async")
1500
+
1501
+ if self.enable_fallback:
1502
+ self._using_fallback = True
1503
+ return self._get_fallback_store().cleanup(max_age_seconds)
1504
+
1505
+ raise
1506
+
1507
+ def clear(self) -> None:
1508
+ """Clear all throttling entries."""
1509
+ if self._using_fallback and self.enable_fallback:
1510
+ self._get_fallback_store().clear()
1511
+ return
1512
+
1513
+ try:
1514
+ # Get all keys from index
1515
+ keys = self.client.smembers(self._index_key)
1516
+
1517
+ if keys:
1518
+ # Delete all entry keys
1519
+ redis_keys = [self._get_key(k) for k in keys]
1520
+ self.client.delete(*redis_keys)
1521
+
1522
+ # Delete index
1523
+ self.client.delete(self._index_key)
1524
+
1525
+ except Exception as e:
1526
+ self._handle_redis_error(e, "clear")
1527
+
1528
+ if self.enable_fallback:
1529
+ self._using_fallback = True
1530
+ self._get_fallback_store().clear()
1531
+ return
1532
+
1533
+ raise
1534
+
1535
+ async def clear_async(self) -> None:
1536
+ """Async clear all throttling entries."""
1537
+ if self._using_fallback and self.enable_fallback:
1538
+ self._get_fallback_store().clear()
1539
+ return
1540
+
1541
+ try:
1542
+ client = await self.get_async_client()
1543
+
1544
+ # Get all keys from index
1545
+ keys = await client.smembers(self._index_key)
1546
+
1547
+ if keys:
1548
+ redis_keys = [self._get_key(k) for k in keys]
1549
+ await client.delete(*redis_keys)
1550
+
1551
+ await client.delete(self._index_key)
1552
+
1553
+ except Exception as e:
1554
+ self._handle_redis_error(e, "clear_async")
1555
+
1556
+ if self.enable_fallback:
1557
+ self._using_fallback = True
1558
+ self._get_fallback_store().clear()
1559
+ return
1560
+
1561
+ raise
1562
+
1563
+ def count(self) -> int:
1564
+ """Get total entry count."""
1565
+ if self._using_fallback and self.enable_fallback:
1566
+ return self._get_fallback_store().count()
1567
+
1568
+ try:
1569
+ return self.client.scard(self._index_key)
1570
+
1571
+ except Exception as e:
1572
+ self._handle_redis_error(e, "count")
1573
+
1574
+ if self.enable_fallback:
1575
+ self._using_fallback = True
1576
+ return self._get_fallback_store().count()
1577
+
1578
+ raise
1579
+
1580
+ async def count_async(self) -> int:
1581
+ """Async get total entry count."""
1582
+ if self._using_fallback and self.enable_fallback:
1583
+ return self._get_fallback_store().count()
1584
+
1585
+ try:
1586
+ client = await self.get_async_client()
1587
+ return await client.scard(self._index_key)
1588
+
1589
+ except Exception as e:
1590
+ self._handle_redis_error(e, "count_async")
1591
+
1592
+ if self.enable_fallback:
1593
+ self._using_fallback = True
1594
+ return self._get_fallback_store().count()
1595
+
1596
+ raise
1597
+
1598
+ def health_check(self) -> dict[str, Any]:
1599
+ """Perform health check and return status.
1600
+
1601
+ Returns:
1602
+ Dictionary with health status information.
1603
+ """
1604
+ import re
1605
+
1606
+ result = {
1607
+ "healthy": False,
1608
+ "connected": self._connected,
1609
+ "using_fallback": self._using_fallback,
1610
+ "redis_url": re.sub(r"://[^:]+:[^@]+@", "://***:***@", self.redis_url),
1611
+ }
1612
+
1613
+ if self._using_fallback and self.enable_fallback:
1614
+ result["healthy"] = True
1615
+ result["mode"] = "fallback"
1616
+ result["fallback_entries"] = self._get_fallback_store().count()
1617
+ return result
1618
+
1619
+ try:
1620
+ ping_ok = self.client.ping()
1621
+
1622
+ if ping_ok:
1623
+ result["healthy"] = True
1624
+ result["mode"] = "redis"
1625
+ result["entries"] = self.count()
1626
+
1627
+ # Get Redis info
1628
+ info = self.client.info(section="server")
1629
+ result["redis_info"] = {
1630
+ "version": info.get("redis_version"),
1631
+ "uptime_seconds": info.get("uptime_in_seconds"),
1632
+ }
1633
+
1634
+ except Exception as e:
1635
+ result["error"] = str(e)
1636
+ if self._last_error_time:
1637
+ from datetime import datetime
1638
+ result["last_error_time"] = datetime.fromtimestamp(
1639
+ self._last_error_time
1640
+ ).isoformat()
1641
+
1642
+ return result
1643
+
1644
+ async def health_check_async(self) -> dict[str, Any]:
1645
+ """Async perform health check and return status.
1646
+
1647
+ Returns:
1648
+ Dictionary with health status information.
1649
+ """
1650
+ import re
1651
+
1652
+ result = {
1653
+ "healthy": False,
1654
+ "connected": self._connected,
1655
+ "using_fallback": self._using_fallback,
1656
+ "redis_url": re.sub(r"://[^:]+:[^@]+@", "://***:***@", self.redis_url),
1657
+ }
1658
+
1659
+ if self._using_fallback and self.enable_fallback:
1660
+ result["healthy"] = True
1661
+ result["mode"] = "fallback"
1662
+ result["fallback_entries"] = self._get_fallback_store().count()
1663
+ return result
1664
+
1665
+ try:
1666
+ client = await self.get_async_client()
1667
+ ping_ok = await client.ping()
1668
+
1669
+ if ping_ok:
1670
+ result["healthy"] = True
1671
+ result["mode"] = "redis"
1672
+ result["entries"] = await self.count_async()
1673
+
1674
+ info = await client.info(section="server")
1675
+ result["redis_info"] = {
1676
+ "version": info.get("redis_version"),
1677
+ "uptime_seconds": info.get("uptime_in_seconds"),
1678
+ }
1679
+
1680
+ except Exception as e:
1681
+ result["error"] = str(e)
1682
+ if self._last_error_time:
1683
+ from datetime import datetime
1684
+ result["last_error_time"] = datetime.fromtimestamp(
1685
+ self._last_error_time
1686
+ ).isoformat()
1687
+
1688
+ return result
1689
+
1690
+ def get_metrics(self) -> dict[str, Any]:
1691
+ """Get store metrics.
1692
+
1693
+ Returns:
1694
+ Dictionary with metrics data.
1695
+ """
1696
+ metrics = {
1697
+ "redis_errors": self._redis_errors,
1698
+ "fallback_count": self._fallback_count,
1699
+ "reconnections": self._reconnections,
1700
+ "connected": self._connected,
1701
+ "using_fallback": self._using_fallback,
1702
+ }
1703
+
1704
+ if self._using_fallback and self._fallback_store:
1705
+ metrics["fallback_metrics"] = self._fallback_store.get_metrics()
1706
+ else:
1707
+ try:
1708
+ metrics["total_entries"] = self.count()
1709
+ except Exception:
1710
+ metrics["total_entries"] = -1
1711
+
1712
+ return metrics
1713
+
1714
+ def close(self) -> None:
1715
+ """Close all connections and pools."""
1716
+ if self._client is not None:
1717
+ try:
1718
+ self._client.close()
1719
+ except Exception:
1720
+ pass
1721
+ self._client = None
1722
+
1723
+ if self._pool is not None:
1724
+ try:
1725
+ self._pool.disconnect()
1726
+ except Exception:
1727
+ pass
1728
+ self._pool = None
1729
+
1730
+ self._connected = False
1731
+
1732
+ async def close_async(self) -> None:
1733
+ """Async close all connections and pools."""
1734
+ if self._async_client is not None:
1735
+ try:
1736
+ await self._async_client.close()
1737
+ except Exception:
1738
+ pass
1739
+ self._async_client = None
1740
+
1741
+ if self._async_pool is not None:
1742
+ try:
1743
+ await self._async_pool.disconnect()
1744
+ except Exception:
1745
+ pass
1746
+ self._async_pool = None
1747
+
1748
+ self._connected = False
1749
+
1750
+ def __enter__(self) -> "RedisThrottlingStore":
1751
+ """Context manager entry."""
1752
+ return self
1753
+
1754
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
1755
+ """Context manager exit, closes connections."""
1756
+ self.close()
1757
+
1758
+ async def __aenter__(self) -> "RedisThrottlingStore":
1759
+ """Async context manager entry."""
1760
+ return self
1761
+
1762
+ async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
1763
+ """Async context manager exit, closes connections."""
1764
+ await self.close_async()
1765
+
1766
+
1767
+ # =============================================================================
1768
+ # Factory Function
1769
+ # =============================================================================
1770
+
1771
+
1772
+ class ThrottlingStoreType:
1773
+ """Store type constants."""
1774
+
1775
+ MEMORY = "memory"
1776
+ SQLITE = "sqlite"
1777
+ REDIS = "redis"
1778
+
1779
+
1780
+ def create_throttling_store(
1781
+ store_type: str | None = None,
1782
+ **kwargs: Any,
1783
+ ) -> BaseThrottlingStore:
1784
+ """Factory function to create appropriate throttling store.
1785
+
1786
+ Selects the store type based on configuration or environment variables.
1787
+
1788
+ Environment variables:
1789
+ TRUTHOUND_THROTTLE_STORE_TYPE: Store type (memory, sqlite, redis)
1790
+ TRUTHOUND_THROTTLE_SQLITE_PATH: SQLite database path
1791
+ TRUTHOUND_THROTTLE_REDIS_URL: Redis connection URL (enables redis)
1792
+
1793
+ Args:
1794
+ store_type: Explicit store type override. If None, auto-detects.
1795
+ **kwargs: Additional arguments passed to the store constructor.
1796
+
1797
+ Returns:
1798
+ Configured BaseThrottlingStore instance.
1799
+
1800
+ Example:
1801
+ # Auto-detect based on environment
1802
+ store = create_throttling_store()
1803
+
1804
+ # Explicit type
1805
+ store = create_throttling_store("redis", default_ttl=7200)
1806
+
1807
+ # SQLite with custom path
1808
+ store = create_throttling_store("sqlite", db_path="/tmp/throttle.db")
1809
+ """
1810
+ # Determine store type
1811
+ if store_type is None:
1812
+ store_type = os.getenv("TRUTHOUND_THROTTLE_STORE_TYPE")
1813
+
1814
+ # Auto-detect if still None
1815
+ if store_type is None:
1816
+ redis_url = os.getenv("TRUTHOUND_THROTTLE_REDIS_URL")
1817
+ if redis_url and REDIS_AVAILABLE:
1818
+ store_type = ThrottlingStoreType.REDIS
1819
+ logger.info(
1820
+ "Auto-detected Redis throttling store from TRUTHOUND_THROTTLE_REDIS_URL"
1821
+ )
1822
+ elif os.getenv("TRUTHOUND_THROTTLE_SQLITE_PATH"):
1823
+ store_type = ThrottlingStoreType.SQLITE
1824
+ logger.info("Auto-detected SQLite store from TRUTHOUND_THROTTLE_SQLITE_PATH")
1825
+ else:
1826
+ store_type = ThrottlingStoreType.MEMORY
1827
+ logger.info("Using default InMemory throttling store")
1828
+
1829
+ # Normalize store type
1830
+ store_type = store_type.lower().strip()
1831
+
1832
+ # Create store based on type
1833
+ if store_type == ThrottlingStoreType.MEMORY:
1834
+ logger.info("Creating InMemory throttling store")
1835
+ return InMemoryThrottlingStore(**kwargs)
1836
+
1837
+ elif store_type == ThrottlingStoreType.SQLITE:
1838
+ db_path = kwargs.pop("db_path", None) or os.getenv(
1839
+ "TRUTHOUND_THROTTLE_SQLITE_PATH", "throttling.db"
1840
+ )
1841
+ logger.info(f"Creating SQLite throttling store at {db_path}")
1842
+ return SQLiteThrottlingStore(db_path=db_path)
1843
+
1844
+ elif store_type == ThrottlingStoreType.REDIS:
1845
+ if not REDIS_AVAILABLE:
1846
+ logger.warning(
1847
+ "Redis not available, falling back to InMemory store. "
1848
+ "Install with: pip install truthound-dashboard[redis]"
1849
+ )
1850
+ return InMemoryThrottlingStore(**kwargs)
1851
+
1852
+ logger.info("Creating Redis throttling store")
1853
+ return RedisThrottlingStore(**kwargs)
1854
+
1855
+ else:
1856
+ logger.warning(
1857
+ f"Unknown store type '{store_type}', falling back to InMemory store"
1858
+ )
1859
+ return InMemoryThrottlingStore(**kwargs)