coredis 5.2.0__cp314-cp314t-macosx_10_13_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of coredis might be problematic. Click here for more details.

Files changed (100) hide show
  1. 22fe76227e35f92ab5c3__mypyc.cpython-314t-darwin.so +0 -0
  2. coredis/__init__.py +42 -0
  3. coredis/_enum.py +42 -0
  4. coredis/_json.py +11 -0
  5. coredis/_packer.cpython-314t-darwin.so +0 -0
  6. coredis/_packer.py +71 -0
  7. coredis/_protocols.py +50 -0
  8. coredis/_py_311_typing.py +20 -0
  9. coredis/_py_312_typing.py +17 -0
  10. coredis/_sidecar.py +114 -0
  11. coredis/_utils.cpython-314t-darwin.so +0 -0
  12. coredis/_utils.py +440 -0
  13. coredis/_version.py +34 -0
  14. coredis/_version.pyi +1 -0
  15. coredis/cache.py +801 -0
  16. coredis/client/__init__.py +6 -0
  17. coredis/client/basic.py +1238 -0
  18. coredis/client/cluster.py +1264 -0
  19. coredis/commands/__init__.py +64 -0
  20. coredis/commands/_key_spec.py +517 -0
  21. coredis/commands/_utils.py +108 -0
  22. coredis/commands/_validators.py +159 -0
  23. coredis/commands/_wrappers.py +175 -0
  24. coredis/commands/bitfield.py +110 -0
  25. coredis/commands/constants.py +662 -0
  26. coredis/commands/core.py +8484 -0
  27. coredis/commands/function.py +408 -0
  28. coredis/commands/monitor.py +168 -0
  29. coredis/commands/pubsub.py +905 -0
  30. coredis/commands/request.py +108 -0
  31. coredis/commands/script.py +296 -0
  32. coredis/commands/sentinel.py +246 -0
  33. coredis/config.py +50 -0
  34. coredis/connection.py +906 -0
  35. coredis/constants.cpython-314t-darwin.so +0 -0
  36. coredis/constants.py +37 -0
  37. coredis/credentials.py +45 -0
  38. coredis/exceptions.py +360 -0
  39. coredis/experimental/__init__.py +1 -0
  40. coredis/globals.py +23 -0
  41. coredis/modules/__init__.py +117 -0
  42. coredis/modules/autocomplete.py +138 -0
  43. coredis/modules/base.py +262 -0
  44. coredis/modules/filters.py +1319 -0
  45. coredis/modules/graph.py +362 -0
  46. coredis/modules/json.py +691 -0
  47. coredis/modules/response/__init__.py +0 -0
  48. coredis/modules/response/_callbacks/__init__.py +0 -0
  49. coredis/modules/response/_callbacks/autocomplete.py +42 -0
  50. coredis/modules/response/_callbacks/graph.py +237 -0
  51. coredis/modules/response/_callbacks/json.py +21 -0
  52. coredis/modules/response/_callbacks/search.py +221 -0
  53. coredis/modules/response/_callbacks/timeseries.py +158 -0
  54. coredis/modules/response/types.py +179 -0
  55. coredis/modules/search.py +1089 -0
  56. coredis/modules/timeseries.py +1139 -0
  57. coredis/parser.cpython-314t-darwin.so +0 -0
  58. coredis/parser.py +344 -0
  59. coredis/pipeline.py +1225 -0
  60. coredis/pool/__init__.py +11 -0
  61. coredis/pool/basic.py +453 -0
  62. coredis/pool/cluster.py +517 -0
  63. coredis/pool/nodemanager.py +340 -0
  64. coredis/py.typed +0 -0
  65. coredis/recipes/__init__.py +0 -0
  66. coredis/recipes/credentials/__init__.py +5 -0
  67. coredis/recipes/credentials/iam_provider.py +63 -0
  68. coredis/recipes/locks/__init__.py +5 -0
  69. coredis/recipes/locks/extend.lua +17 -0
  70. coredis/recipes/locks/lua_lock.py +281 -0
  71. coredis/recipes/locks/release.lua +10 -0
  72. coredis/response/__init__.py +5 -0
  73. coredis/response/_callbacks/__init__.py +538 -0
  74. coredis/response/_callbacks/acl.py +32 -0
  75. coredis/response/_callbacks/cluster.py +183 -0
  76. coredis/response/_callbacks/command.py +86 -0
  77. coredis/response/_callbacks/connection.py +31 -0
  78. coredis/response/_callbacks/geo.py +58 -0
  79. coredis/response/_callbacks/hash.py +85 -0
  80. coredis/response/_callbacks/keys.py +59 -0
  81. coredis/response/_callbacks/module.py +33 -0
  82. coredis/response/_callbacks/script.py +85 -0
  83. coredis/response/_callbacks/sentinel.py +179 -0
  84. coredis/response/_callbacks/server.py +241 -0
  85. coredis/response/_callbacks/sets.py +44 -0
  86. coredis/response/_callbacks/sorted_set.py +204 -0
  87. coredis/response/_callbacks/streams.py +185 -0
  88. coredis/response/_callbacks/strings.py +70 -0
  89. coredis/response/_callbacks/vector_sets.py +159 -0
  90. coredis/response/_utils.py +33 -0
  91. coredis/response/types.py +416 -0
  92. coredis/retry.py +233 -0
  93. coredis/sentinel.py +477 -0
  94. coredis/stream.py +369 -0
  95. coredis/tokens.py +2286 -0
  96. coredis/typing.py +580 -0
  97. coredis-5.2.0.dist-info/METADATA +211 -0
  98. coredis-5.2.0.dist-info/RECORD +100 -0
  99. coredis-5.2.0.dist-info/WHEEL +6 -0
  100. coredis-5.2.0.dist-info/licenses/LICENSE +23 -0
coredis/cache.py ADDED
@@ -0,0 +1,801 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import dataclasses
5
+ import time
6
+ import weakref
7
+ from abc import ABC, abstractmethod
8
+ from collections import Counter
9
+ from typing import TYPE_CHECKING, Any
10
+
11
+ from coredis._sidecar import Sidecar
12
+ from coredis._utils import b, make_hashable
13
+ from coredis.commands import PubSub
14
+ from coredis.connection import BaseConnection
15
+ from coredis.typing import (
16
+ Generic,
17
+ Hashable,
18
+ Literal,
19
+ ModuleType,
20
+ OrderedDict,
21
+ RedisValueT,
22
+ ResponseType,
23
+ TypeVar,
24
+ )
25
+
26
+ asizeof: ModuleType | None = None
27
+
28
+ try:
29
+ from pympler import asizeof
30
+ except (AttributeError, KeyError):
31
+ # Not available in pypy
32
+ pass
33
+
34
+ if TYPE_CHECKING:
35
+ import coredis.client
36
+
37
+
38
+ @dataclasses.dataclass
39
+ class CacheStats:
40
+ """
41
+ Summary of statics to be used by instances of :class:`coredis.cache.AbstractCache`
42
+ The individual counters exposed are not guaranteed to retain fine grained per key
43
+ metrics but the totals (returned by :attr:`coredis.cache.CacheStats.summary`) will be maintained
44
+ aggregated.
45
+ """
46
+
47
+ #: summary of hits by key (for all commands)
48
+ hits: Counter[bytes] = dataclasses.field(default_factory=Counter)
49
+ #: summary of misses by key (for all commands)
50
+ misses: Counter[bytes] = dataclasses.field(default_factory=Counter)
51
+ #: number of invalidations including server side and local invalidations
52
+ invalidations: Counter[bytes] = dataclasses.field(default_factory=Counter)
53
+ #: counter of keys which returned dirty results based on confidence testing
54
+ dirty: Counter[bytes] = dataclasses.field(default_factory=Counter)
55
+
56
+ def clear(self) -> None:
57
+ self.hits.clear()
58
+ self.misses.clear()
59
+ self.invalidations.clear()
60
+ self.dirty.clear()
61
+
62
+ def compact(self) -> None:
63
+ """
64
+ Collapse totals into a single key to avoid unbounded growth of stats
65
+
66
+ :meta private:
67
+ """
68
+
69
+ for counter in [self.hits, self.misses, self.invalidations, self.dirty]:
70
+ total = sum(counter.values())
71
+ counter.clear()
72
+ counter[b"__coredis__internal__stats__total"] = total
73
+
74
+ def hit(self, key: RedisValueT) -> None:
75
+ self.hits[b(key)] += 1
76
+
77
+ def miss(self, key: RedisValueT) -> None:
78
+ self.misses[b(key)] += 1
79
+
80
+ def invalidate(self, key: RedisValueT) -> None:
81
+ self.invalidations[b(key)] += 1
82
+
83
+ def mark_dirty(self, key: RedisValueT) -> None:
84
+ self.dirty[b(key)] += 1
85
+
86
+ @property
87
+ def summary(self) -> dict[str, int]:
88
+ """
89
+ Aggregated totals of ``hits``, ``misses``, ``dirty_hits``
90
+ and ``invalidations``
91
+ """
92
+
93
+ return {
94
+ "hits": sum(self.hits.values()),
95
+ "misses": sum(self.misses.values()),
96
+ "dirty_hits": sum(self.dirty.values()),
97
+ "invalidations": sum(self.invalidations.values()),
98
+ }
99
+
100
+ def __repr__(self) -> str:
101
+ summary = self.summary
102
+
103
+ return (
104
+ f"CacheStats<hits={summary['hits']}, "
105
+ f"misses={summary['misses']}, "
106
+ f"dirty_hits={summary['dirty_hits']}, "
107
+ f"invalidations={summary['invalidations']}>"
108
+ )
109
+
110
+
111
+ class AbstractCache(ABC):
112
+ """
113
+ Abstract class representing a local cache that can be used by
114
+ :class:`coredis.Redis` or :class:`coredis.RedisCluster`
115
+ """
116
+
117
+ @abstractmethod
118
+ async def initialize(
119
+ self,
120
+ client: coredis.client.Redis[Any] | coredis.client.RedisCluster[Any],
121
+ ) -> AbstractCache:
122
+ """
123
+ Associate and initialize this cache with the provided client
124
+ """
125
+ ...
126
+
127
+ @property
128
+ @abstractmethod
129
+ def healthy(self) -> bool:
130
+ """
131
+ Whether the cache is healthy and should be taken seriously
132
+ """
133
+ ...
134
+
135
+ @abstractmethod
136
+ def get(self, command: bytes, key: RedisValueT, *args: RedisValueT) -> ResponseType:
137
+ """
138
+ Fetch the cached response for command/key/args combination
139
+ """
140
+ ...
141
+
142
+ @abstractmethod
143
+ def put(
144
+ self, command: bytes, key: RedisValueT, *args: RedisValueT, value: ResponseType
145
+ ) -> None:
146
+ """
147
+ Cache the response for command/key/args combination
148
+ """
149
+ ...
150
+
151
+ @abstractmethod
152
+ def invalidate(self, *keys: RedisValueT) -> None:
153
+ """
154
+ Invalidate any cached entries for the provided keys
155
+ """
156
+ ...
157
+
158
+ @property
159
+ @abstractmethod
160
+ def stats(self) -> CacheStats:
161
+ """
162
+ Returns the current stats for the cache
163
+ """
164
+ ...
165
+
166
+ @property
167
+ @abstractmethod
168
+ def confidence(self) -> float:
169
+ """
170
+ Confidence in cached values between 0 - 100. Lower values
171
+ will result in the client discarding and / or validating the
172
+ cached responses
173
+ """
174
+ ...
175
+
176
+ @abstractmethod
177
+ def feedback(self, command: bytes, key: RedisValueT, *args: RedisValueT, match: bool) -> None:
178
+ """
179
+ Provide feedback about a key as having either a match or drift from the actual
180
+ server side value
181
+ """
182
+ ...
183
+
184
+ @abstractmethod
185
+ def get_client_id(self, connection: BaseConnection) -> int | None:
186
+ """
187
+ If the cache supports receiving invalidation events from the server
188
+ return the ``client_id`` that the :paramref:`connection` should send
189
+ redirects to.
190
+ """
191
+ ...
192
+
193
+ @abstractmethod
194
+ def reset(self) -> None:
195
+ """
196
+ Reset the cache
197
+ """
198
+ ...
199
+
200
+ @abstractmethod
201
+ def shutdown(self) -> None:
202
+ """
203
+ Explicitly shutdown the cache
204
+ """
205
+ ...
206
+
207
+
208
+ ET = TypeVar("ET")
209
+
210
+
211
+ class LRUCache(Generic[ET]):
212
+ def __init__(self, max_items: int = -1, max_bytes: int = -1):
213
+ self.max_items = max_items
214
+ self.max_bytes = max_bytes
215
+ self.__cache: OrderedDict[Hashable, ET] = OrderedDict()
216
+
217
+ if self.max_bytes > 0 and asizeof is not None:
218
+ self.max_bytes += asizeof.asizeof(self.__cache)
219
+ elif self.max_bytes > 0:
220
+ raise RuntimeError("max_bytes not supported as dependency pympler not available")
221
+
222
+ def get(self, key: Hashable) -> ET:
223
+ if key not in self.__cache:
224
+ raise KeyError(key)
225
+ self.__cache.move_to_end(key)
226
+
227
+ return self.__cache[key]
228
+
229
+ def insert(self, key: Hashable, value: ET) -> None:
230
+ self.__check_capacity()
231
+ self.__cache[key] = value
232
+ self.__cache.move_to_end(key)
233
+
234
+ def setdefault(self, key: Hashable, value: ET) -> ET:
235
+ try:
236
+ self.__check_capacity()
237
+
238
+ return self.get(key)
239
+ except KeyError:
240
+ self.insert(key, value)
241
+
242
+ return self.get(key)
243
+
244
+ def remove(self, key: Hashable) -> None:
245
+ if key in self.__cache:
246
+ self.__cache.pop(key)
247
+
248
+ def clear(self) -> None:
249
+ self.__cache.clear()
250
+
251
+ def popitem(self) -> tuple[Any, Any] | None:
252
+ """
253
+ Recursively remove the oldest entry. If
254
+ the oldest entry is another LRUCache trigger
255
+ the removal of its oldest entry and if that
256
+ turns out to be an empty LRUCache, remove that.
257
+ """
258
+ try:
259
+ oldest = next(iter(self.__cache))
260
+ item = self.__cache[oldest]
261
+ except StopIteration:
262
+ return None
263
+
264
+ if isinstance(item, LRUCache):
265
+ if popped := item.popitem():
266
+ return popped
267
+ if entry := self.__cache.popitem(last=False):
268
+ return entry
269
+ return None
270
+
271
+ def shrink(self) -> None:
272
+ """
273
+ Remove old entries until the size of the cache
274
+ is less than :paramref:`LRUCache.max_bytes` or if
275
+ there is nothing left to remove.
276
+ """
277
+
278
+ if self.max_bytes > 0 and asizeof is not None:
279
+ cur_size = asizeof.asizeof(self.__cache)
280
+ while cur_size > self.max_bytes:
281
+ if (popped := self.popitem()) is None:
282
+ return
283
+ cur_size -= asizeof.asizeof(popped[0]) + asizeof.asizeof(popped[1])
284
+
285
+ def __repr__(self) -> str:
286
+ if asizeof is not None:
287
+ return (
288
+ f"LruCache<max_items={self.max_items}, "
289
+ f"current_items={len(self.__cache)}, "
290
+ f"max_bytes={self.max_bytes}, "
291
+ f"current_size_bytes={asizeof.asizeof(self.__cache)}>"
292
+ )
293
+ else:
294
+ return f"LruCache<max_items={self.max_items}, current_items={len(self.__cache)}, "
295
+
296
+ def __check_capacity(self) -> None:
297
+ if len(self.__cache) == self.max_items:
298
+ self.__cache.popitem(last=False)
299
+
300
+
301
+ class NodeTrackingCache(
302
+ Sidecar,
303
+ AbstractCache,
304
+ ):
305
+ """
306
+ An LRU cache that uses server assisted client caching
307
+ to ensure local cache entries are invalidated if any
308
+ operations are performed on the keys by another client.
309
+ """
310
+
311
+ def __init__(
312
+ self,
313
+ max_keys: int = 2**12,
314
+ max_size_bytes: int = 64 * 1024 * 1024,
315
+ max_idle_seconds: int = 5,
316
+ confidence: float = 100,
317
+ dynamic_confidence: bool = False,
318
+ cache: LRUCache[LRUCache[LRUCache[ResponseType]]] | None = None,
319
+ stats: CacheStats | None = None,
320
+ ) -> None:
321
+ """
322
+ :param max_keys: maximum keys to cache. A negative value represents
323
+ and unbounded cache.
324
+ :param max_size_bytes: maximum size in bytes for the local cache.
325
+ A negative value represents an unbounded cache.
326
+ :param max_idle_seconds: maximum duration to tolerate no updates
327
+ from the server. When the duration is exceeded the connection
328
+ and cache will be reset.
329
+ :param confidence: 0 - 100. Lower values will result in the client
330
+ discarding and / or validating the cached responses
331
+ :param dynamic_confidence: Whether to adjust the confidence based on
332
+ sampled validations. Tainted values drop the confidence by 0.1% and
333
+ confirmations of correct cached values will increase the confidence by 0.01%
334
+ upto 100.
335
+ """
336
+ super().__init__({b"invalidate"}, max(1, max_idle_seconds - 1))
337
+ self.__protocol_version: Literal[2, 3] | None = None
338
+ self.__invalidation_task: asyncio.Task[None] | None = None
339
+ self.__compact_task: asyncio.Task[None] | None = None
340
+ self.__max_idle_seconds = max_idle_seconds
341
+ self.__confidence = self.__original_confidence = confidence
342
+ self.__dynamic_confidence = dynamic_confidence
343
+ self.__stats = stats or CacheStats()
344
+ self.__cache: LRUCache[LRUCache[LRUCache[ResponseType]]] = cache or LRUCache(
345
+ max_keys, max_size_bytes
346
+ )
347
+
348
+ @property
349
+ def healthy(self) -> bool:
350
+ return bool(
351
+ self.connection
352
+ and self.connection.is_connected
353
+ and time.monotonic() - self.last_checkin < self.__max_idle_seconds
354
+ )
355
+
356
+ @property
357
+ def confidence(self) -> float:
358
+ return self.__confidence
359
+
360
+ @property
361
+ def stats(self) -> CacheStats:
362
+ return self.__stats
363
+
364
+ def get(self, command: bytes, key: RedisValueT, *args: RedisValueT) -> ResponseType:
365
+ try:
366
+ cached = self.__cache.get(b(key)).get(command).get(make_hashable(*args))
367
+ self.__stats.hit(key)
368
+
369
+ return cached
370
+ except KeyError:
371
+ self.__stats.miss(key)
372
+ raise
373
+
374
+ def put(
375
+ self, command: bytes, key: RedisValueT, *args: RedisValueT, value: ResponseType
376
+ ) -> None:
377
+ self.__cache.setdefault(b(key), LRUCache()).setdefault(command, LRUCache()).insert(
378
+ make_hashable(*args), value
379
+ )
380
+
381
+ def invalidate(self, *keys: RedisValueT) -> None:
382
+ for key in keys:
383
+ self.__stats.invalidate(key)
384
+ self.__cache.remove(b(key))
385
+
386
+ def feedback(self, command: bytes, key: RedisValueT, *args: RedisValueT, match: bool) -> None:
387
+ if not match:
388
+ self.__stats.mark_dirty(key)
389
+ self.invalidate(key)
390
+
391
+ if self.__dynamic_confidence:
392
+ self.__confidence = min(
393
+ 100.0,
394
+ max(0.0, self.__confidence * (1.0001 if match else 0.999)),
395
+ )
396
+
397
+ def reset(self) -> None:
398
+ self.__cache.clear()
399
+ self.__stats.compact()
400
+ self.__confidence = self.__original_confidence
401
+
402
+ def process_message(self, message: ResponseType) -> tuple[ResponseType, ...]:
403
+ assert isinstance(message, list)
404
+
405
+ if self.__protocol_version == 2:
406
+ assert isinstance(message[0], bytes)
407
+
408
+ if b(message[0]) in PubSub.SUBUNSUB_MESSAGE_TYPES:
409
+ return ()
410
+ elif message[2] is not None:
411
+ assert isinstance(message[2], list)
412
+
413
+ return tuple(k for k in message[2])
414
+ elif message[1] is not None:
415
+ assert isinstance(message[1], list)
416
+
417
+ return tuple(k for k in message[1])
418
+
419
+ return () # noqa
420
+
421
+ async def initialize(
422
+ self,
423
+ client: coredis.client.Redis[Any] | coredis.client.RedisCluster[Any],
424
+ ) -> NodeTrackingCache:
425
+ self.__protocol_version = client.protocol_version
426
+ await super().start(client)
427
+
428
+ if not self.__invalidation_task or self.__invalidation_task.done():
429
+ self.__invalidation_task = asyncio.create_task(self.__invalidate())
430
+
431
+ if not self.__compact_task or self.__compact_task.done():
432
+ self.__compact_task = asyncio.create_task(self.__compact())
433
+
434
+ return self
435
+
436
+ async def on_reconnect(self, connection: BaseConnection) -> None:
437
+ self.__cache.clear()
438
+ await super().on_reconnect(connection)
439
+
440
+ if self.__protocol_version == 2 and self.connection:
441
+ await self.connection.send_command(b"SUBSCRIBE", b"__redis__:invalidate")
442
+
443
+ def shutdown(self) -> None:
444
+ try:
445
+ asyncio.get_running_loop()
446
+
447
+ if self.__invalidation_task:
448
+ self.__invalidation_task.cancel()
449
+
450
+ if self.__compact_task:
451
+ self.__compact_task.cancel()
452
+ super().stop()
453
+ except RuntimeError:
454
+ pass
455
+
456
+ def get_client_id(self, client: BaseConnection) -> int | None:
457
+ if self.connection and self.connection.is_connected:
458
+ return self.client_id
459
+
460
+ return None
461
+
462
+ async def __compact(self) -> None:
463
+ while True:
464
+ try:
465
+ self.__cache.shrink()
466
+ self.__stats.compact()
467
+ await asyncio.sleep(max(1, self.__max_idle_seconds - 1))
468
+ except asyncio.CancelledError:
469
+ break
470
+
471
+ async def __invalidate(self) -> None:
472
+ while True:
473
+ try:
474
+ key = b(await self.messages.get())
475
+ self.invalidate(key)
476
+ self.messages.task_done()
477
+ except asyncio.CancelledError:
478
+ break
479
+ except RuntimeError: # noqa
480
+ break
481
+
482
+
483
+ class ClusterTrackingCache(AbstractCache):
484
+ """
485
+ An LRU cache for redis cluster that uses server assisted client caching
486
+ to ensure local cache entries are invalidated if any operations are performed
487
+ on the keys by another client.
488
+
489
+ The cache maintains an additional connection per node (including replicas)
490
+ in the cluster to listen to invalidation events
491
+ """
492
+
493
+ def __init__(
494
+ self,
495
+ max_keys: int = 2**12,
496
+ max_size_bytes: int = 64 * 1024 * 1024,
497
+ max_idle_seconds: int = 5,
498
+ confidence: float = 100,
499
+ dynamic_confidence: bool = False,
500
+ cache: LRUCache[LRUCache[LRUCache[ResponseType]]] | None = None,
501
+ stats: CacheStats | None = None,
502
+ ) -> None:
503
+ """
504
+ :param max_keys: maximum keys to cache. A negative value represents
505
+ and unbounded cache.
506
+ :param max_size_bytes: maximum size in bytes for the local cache.
507
+ A negative value represents an unbounded cache.
508
+ :param max_idle_seconds: maximum duration to tolerate no updates
509
+ from the server. When the duration is exceeded the connection
510
+ and cache will be reset.
511
+ :param confidence: 0 - 100. Lower values will result in the client
512
+ discarding and / or validating the cached responses
513
+ :param dynamic_confidence: Whether to adjust the confidence based on
514
+ sampled validations. Tainted values drop the confidence by 0.1% and
515
+ confirmations of correct cached values will increase the confidence by 0.01%
516
+ upto 100.
517
+ """
518
+ self.node_caches: dict[str, NodeTrackingCache] = {}
519
+ self.__protocol_version: Literal[2, 3] | None = None
520
+ self.__cache: LRUCache[LRUCache[LRUCache[ResponseType]]] = cache or LRUCache(
521
+ max_keys, max_size_bytes
522
+ )
523
+ self.__nodes: list[coredis.client.Redis[Any]] = []
524
+ self.__max_idle_seconds = max_idle_seconds
525
+ self.__confidence = self.__original_confidence = confidence
526
+ self.__dynamic_confidence = dynamic_confidence
527
+ self.__stats = stats or CacheStats()
528
+ self.__client: weakref.ReferenceType[coredis.client.RedisCluster[Any]] | None = None
529
+
530
+ async def initialize(
531
+ self,
532
+ client: coredis.client.Redis[Any] | coredis.client.RedisCluster[Any],
533
+ ) -> ClusterTrackingCache:
534
+ import coredis.client
535
+
536
+ assert isinstance(client, coredis.client.RedisCluster)
537
+
538
+ self.__client = weakref.ref(client)
539
+ self.__cache.clear()
540
+
541
+ for sidecar in self.node_caches.values():
542
+ sidecar.shutdown()
543
+ self.node_caches.clear()
544
+ self.__nodes = list(client.all_nodes)
545
+
546
+ for node in self.__nodes:
547
+ node_cache = NodeTrackingCache(
548
+ max_idle_seconds=self.__max_idle_seconds,
549
+ confidence=self.__confidence,
550
+ dynamic_confidence=self.__dynamic_confidence,
551
+ cache=self.__cache,
552
+ stats=self.__stats,
553
+ )
554
+ await node_cache.initialize(node)
555
+ assert node_cache.connection
556
+ self.node_caches[node_cache.connection.location] = node_cache
557
+
558
+ return self
559
+
560
+ @property
561
+ def client(self) -> coredis.client.RedisCluster[Any] | None:
562
+ if self.__client:
563
+ return self.__client()
564
+
565
+ return None # noqa
566
+
567
+ @property
568
+ def healthy(self) -> bool:
569
+ return bool(
570
+ self.client
571
+ and self.client.connection_pool.initialized
572
+ and self.node_caches
573
+ and all(cache.healthy for cache in self.node_caches.values())
574
+ )
575
+
576
+ @property
577
+ def confidence(self) -> float:
578
+ return self.__confidence
579
+
580
+ @property
581
+ def stats(self) -> CacheStats:
582
+ return self.__stats
583
+
584
+ def get_client_id(self, connection: BaseConnection) -> int | None:
585
+ try:
586
+ return self.node_caches[connection.location].get_client_id(connection)
587
+ except KeyError:
588
+ return None
589
+
590
+ def get(self, command: bytes, key: RedisValueT, *args: RedisValueT) -> ResponseType:
591
+ try:
592
+ cached = self.__cache.get(b(key)).get(command).get(make_hashable(*args))
593
+ self.__stats.hit(key)
594
+
595
+ return cached
596
+ except KeyError:
597
+ self.__stats.miss(key)
598
+ raise
599
+
600
+ def put(
601
+ self, command: bytes, key: RedisValueT, *args: RedisValueT, value: ResponseType
602
+ ) -> None:
603
+ self.__cache.setdefault(b(key), LRUCache()).setdefault(command, LRUCache()).insert(
604
+ make_hashable(*args), value
605
+ )
606
+
607
+ def invalidate(self, *keys: RedisValueT) -> None:
608
+ for key in keys:
609
+ self.__stats.invalidate(key)
610
+ self.__cache.remove(b(key))
611
+
612
+ def feedback(self, command: bytes, key: RedisValueT, *args: RedisValueT, match: bool) -> None:
613
+ if not match:
614
+ self.__stats.mark_dirty(key)
615
+ self.invalidate(key)
616
+
617
+ if self.__dynamic_confidence:
618
+ self.__confidence = min(
619
+ 100.0,
620
+ max(0.0, self.__confidence * (1.0001 if match else 0.999)),
621
+ )
622
+
623
+ def reset(self) -> None:
624
+ self.__cache.clear()
625
+ self.__stats.compact()
626
+ self.__confidence = self.__original_confidence
627
+
628
+ def shutdown(self) -> None:
629
+ if self.node_caches:
630
+ for sidecar in self.node_caches.values():
631
+ sidecar.shutdown()
632
+ self.node_caches.clear()
633
+ self.__nodes.clear()
634
+
635
+ def __del__(self) -> None:
636
+ self.shutdown()
637
+
638
+
639
+ class TrackingCache(AbstractCache):
640
+ """
641
+ An LRU cache that uses server assisted client caching to ensure local cache entries
642
+ are invalidated if any operations are performed on the keys by another client.
643
+
644
+ This class proxies to either :class:`~coredis.cache.NodeTrackingCache`
645
+ or :class:`~coredis.cache.ClusterTrackingCache` depending on which type of client
646
+ it is passed into.
647
+ """
648
+
649
+ def __init__(
650
+ self,
651
+ max_keys: int = 2**12,
652
+ max_size_bytes: int = 64 * 1024 * 1024,
653
+ max_idle_seconds: int = 5,
654
+ confidence: float = 100.0,
655
+ dynamic_confidence: bool = False,
656
+ cache: LRUCache[LRUCache[LRUCache[ResponseType]]] | None = None,
657
+ stats: CacheStats | None = None,
658
+ ) -> None:
659
+ """
660
+ :param max_keys: maximum keys to cache. A negative value represents
661
+ and unbounded cache.
662
+ :param max_size_bytes: maximum size in bytes for the local cache.
663
+ A negative value represents an unbounded cache.
664
+ :param max_idle_seconds: maximum duration to tolerate no updates
665
+ from the server. When the duration is exceeded the connection
666
+ and cache will be reset.
667
+ :param confidence: 0 - 100. Lower values will result in the client
668
+ discarding and / or validating the cached responses
669
+ :param dynamic_confidence: Whether to adjust the confidence based on
670
+ sampled validations. Tainted values drop the confidence by 0.1% and
671
+ confirmations of correct cached values will increase the confidence by 0.01%
672
+ upto 100.
673
+ """
674
+ self.instance: ClusterTrackingCache | NodeTrackingCache | None = None
675
+ self.__max_keys = max_keys
676
+ self.__max_size_bytes = max_size_bytes
677
+ self.__max_idle_seconds = max_idle_seconds
678
+ self.__confidence = confidence
679
+ self.__dynamic_confidence = dynamic_confidence
680
+ self.__cache: LRUCache[LRUCache[LRUCache[ResponseType]]] = cache or LRUCache(
681
+ max_keys, max_size_bytes
682
+ )
683
+ self.__client: (
684
+ None
685
+ | (weakref.ReferenceType[coredis.client.Redis[Any] | coredis.client.RedisCluster[Any],])
686
+ ) = None
687
+ self.__stats = stats or CacheStats()
688
+
689
+ async def initialize(
690
+ self,
691
+ client: coredis.client.Redis[Any] | coredis.client.RedisCluster[Any],
692
+ ) -> TrackingCache:
693
+ import coredis.client
694
+
695
+ if self.__client and self.__client() != client:
696
+ copy = self.share()
697
+
698
+ return await copy.initialize(client)
699
+
700
+ self.__client = weakref.ref(client)
701
+
702
+ if not self.instance:
703
+ if isinstance(client, coredis.client.RedisCluster):
704
+ self.instance = ClusterTrackingCache(
705
+ self.__max_keys,
706
+ self.__max_size_bytes,
707
+ self.__max_idle_seconds,
708
+ confidence=self.__confidence,
709
+ dynamic_confidence=self.__dynamic_confidence,
710
+ cache=self.__cache,
711
+ stats=self.__stats,
712
+ )
713
+ else:
714
+ self.instance = NodeTrackingCache(
715
+ self.__max_keys,
716
+ self.__max_size_bytes,
717
+ self.__max_idle_seconds,
718
+ confidence=self.__confidence,
719
+ dynamic_confidence=self.__dynamic_confidence,
720
+ cache=self.__cache,
721
+ stats=self.__stats,
722
+ )
723
+ await self.instance.initialize(client)
724
+
725
+ return self
726
+
727
+ @property
728
+ def healthy(self) -> bool:
729
+ return bool(self.instance and self.instance.healthy)
730
+
731
+ @property
732
+ def confidence(self) -> float:
733
+ if not self.instance:
734
+ return self.__confidence
735
+
736
+ return self.instance.confidence
737
+
738
+ @property
739
+ def stats(self) -> CacheStats:
740
+ return self.__stats
741
+
742
+ def get_client_id(self, connection: BaseConnection) -> int | None:
743
+ if self.instance:
744
+ return self.instance.get_client_id(connection)
745
+
746
+ return None
747
+
748
+ def get(self, command: bytes, key: RedisValueT, *args: RedisValueT) -> ResponseType:
749
+ assert self.instance
750
+
751
+ return self.instance.get(command, key, *args)
752
+
753
+ def put(
754
+ self, command: bytes, key: RedisValueT, *args: RedisValueT, value: ResponseType
755
+ ) -> None:
756
+ if self.instance:
757
+ self.instance.put(command, key, *args, value=value)
758
+
759
+ def invalidate(self, *keys: RedisValueT) -> None:
760
+ if self.instance:
761
+ self.instance.invalidate(*keys)
762
+
763
+ def feedback(self, command: bytes, key: RedisValueT, *args: RedisValueT, match: bool) -> None:
764
+ if self.instance:
765
+ self.instance.feedback(command, key, *args, match=match)
766
+
767
+ def reset(self) -> None:
768
+ if self.instance:
769
+ self.instance.reset()
770
+
771
+ def shutdown(self) -> None:
772
+ if self.instance:
773
+ self.instance.shutdown()
774
+ self.__client = None
775
+
776
+ def share(self) -> TrackingCache:
777
+ """
778
+ Create a copy of this cache that can be used to share
779
+ memory with another client.
780
+
781
+ In the example below ``c1`` and ``c2`` have their own
782
+ instances of :class:`~coredis.cache.TrackingCache` but
783
+ share the same in-memory local cached responses::
784
+
785
+ c1 = await coredis.Redis(cache=TrackingCache())
786
+ c2 = await coredis.Redis(cache=c1.cache.share())
787
+ """
788
+ copy = self.__class__(
789
+ self.__max_keys,
790
+ self.__max_size_bytes,
791
+ self.__max_idle_seconds,
792
+ self.__confidence,
793
+ self.__dynamic_confidence,
794
+ self.__cache,
795
+ self.__stats,
796
+ )
797
+
798
+ return copy
799
+
800
+ def __del__(self) -> None:
801
+ self.shutdown()