redis 5.2.0__tar.gz → 5.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. {redis-5.2.0/redis.egg-info → redis-5.3.0}/PKG-INFO +1 -1
  2. {redis-5.2.0 → redis-5.3.0}/redis/_parsers/helpers.py +11 -4
  3. {redis-5.2.0 → redis-5.3.0}/redis/asyncio/client.py +49 -12
  4. {redis-5.2.0 → redis-5.3.0}/redis/asyncio/cluster.py +101 -12
  5. {redis-5.2.0 → redis-5.3.0}/redis/asyncio/connection.py +85 -12
  6. {redis-5.2.0 → redis-5.3.0}/redis/asyncio/sentinel.py +1 -5
  7. redis-5.3.0/redis/auth/err.py +31 -0
  8. redis-5.3.0/redis/auth/idp.py +28 -0
  9. redis-5.3.0/redis/auth/token.py +126 -0
  10. redis-5.3.0/redis/auth/token_manager.py +370 -0
  11. {redis-5.2.0 → redis-5.3.0}/redis/backoff.py +15 -0
  12. {redis-5.2.0 → redis-5.3.0}/redis/client.py +116 -56
  13. {redis-5.2.0 → redis-5.3.0}/redis/cluster.py +157 -33
  14. {redis-5.2.0 → redis-5.3.0}/redis/connection.py +103 -11
  15. redis-5.3.0/redis/credentials.py +65 -0
  16. redis-5.3.0/redis/event.py +394 -0
  17. {redis-5.2.0 → redis-5.3.0}/redis/typing.py +1 -1
  18. {redis-5.2.0 → redis-5.3.0}/redis/utils.py +65 -0
  19. {redis-5.2.0 → redis-5.3.0/redis.egg-info}/PKG-INFO +1 -1
  20. {redis-5.2.0 → redis-5.3.0}/redis.egg-info/SOURCES.txt +10 -0
  21. {redis-5.2.0 → redis-5.3.0}/redis.egg-info/requires.txt +1 -0
  22. {redis-5.2.0 → redis-5.3.0}/setup.py +3 -1
  23. {redis-5.2.0 → redis-5.3.0}/tests/conftest.py +217 -25
  24. redis-5.3.0/tests/ssl_utils.py +43 -0
  25. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/conftest.py +164 -0
  26. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_cluster.py +159 -43
  27. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_connect.py +16 -13
  28. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_connection.py +15 -17
  29. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_connection_pool.py +39 -32
  30. redis-5.3.0/tests/test_asyncio/test_credentials.py +696 -0
  31. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_cwe_404.py +2 -2
  32. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_encoding.py +1 -1
  33. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_graph.py +20 -20
  34. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_hash.py +7 -6
  35. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_retry.py +2 -2
  36. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_search.py +2 -0
  37. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_sentinel.py +20 -0
  38. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_timeseries.py +5 -0
  39. redis-5.3.0/tests/test_auth/__init__.py +0 -0
  40. redis-5.3.0/tests/test_auth/test_token.py +76 -0
  41. redis-5.3.0/tests/test_auth/test_token_manager.py +560 -0
  42. redis-5.3.0/tests/test_backoff.py +17 -0
  43. {redis-5.2.0 → redis-5.3.0}/tests/test_cache.py +3 -3
  44. {redis-5.2.0 → redis-5.3.0}/tests/test_cluster.py +164 -24
  45. {redis-5.2.0 → redis-5.3.0}/tests/test_commands.py +1 -1
  46. {redis-5.2.0 → redis-5.3.0}/tests/test_connect.py +14 -13
  47. {redis-5.2.0 → redis-5.3.0}/tests/test_connection.py +14 -15
  48. {redis-5.2.0 → redis-5.3.0}/tests/test_connection_pool.py +68 -33
  49. redis-5.3.0/tests/test_credentials.py +659 -0
  50. {redis-5.2.0 → redis-5.3.0}/tests/test_graph.py +23 -23
  51. redis-5.3.0/tests/test_graph_utils/__init__.py +0 -0
  52. {redis-5.2.0 → redis-5.3.0}/tests/test_graph_utils/test_edge.py +4 -4
  53. {redis-5.2.0 → redis-5.3.0}/tests/test_graph_utils/test_node.py +3 -3
  54. {redis-5.2.0 → redis-5.3.0}/tests/test_graph_utils/test_path.py +5 -5
  55. {redis-5.2.0 → redis-5.3.0}/tests/test_hash.py +7 -6
  56. {redis-5.2.0 → redis-5.3.0}/tests/test_json.py +2 -2
  57. {redis-5.2.0 → redis-5.3.0}/tests/test_multiprocessing.py +43 -5
  58. {redis-5.2.0 → redis-5.3.0}/tests/test_retry.py +2 -2
  59. {redis-5.2.0 → redis-5.3.0}/tests/test_search.py +13 -0
  60. {redis-5.2.0 → redis-5.3.0}/tests/test_sentinel.py +1 -1
  61. {redis-5.2.0 → redis-5.3.0}/tests/test_ssl.py +29 -26
  62. redis-5.2.0/redis/credentials.py +0 -26
  63. redis-5.2.0/tests/ssl_utils.py +0 -14
  64. redis-5.2.0/tests/test_asyncio/test_credentials.py +0 -283
  65. redis-5.2.0/tests/test_credentials.py +0 -250
  66. {redis-5.2.0 → redis-5.3.0}/INSTALL +0 -0
  67. {redis-5.2.0 → redis-5.3.0}/LICENSE +0 -0
  68. {redis-5.2.0 → redis-5.3.0}/MANIFEST.in +0 -0
  69. {redis-5.2.0 → redis-5.3.0}/README.md +0 -0
  70. {redis-5.2.0 → redis-5.3.0}/redis/__init__.py +0 -0
  71. {redis-5.2.0 → redis-5.3.0}/redis/_parsers/__init__.py +0 -0
  72. {redis-5.2.0 → redis-5.3.0}/redis/_parsers/base.py +0 -0
  73. {redis-5.2.0 → redis-5.3.0}/redis/_parsers/commands.py +0 -0
  74. {redis-5.2.0 → redis-5.3.0}/redis/_parsers/encoders.py +0 -0
  75. {redis-5.2.0 → redis-5.3.0}/redis/_parsers/hiredis.py +0 -0
  76. {redis-5.2.0 → redis-5.3.0}/redis/_parsers/resp2.py +0 -0
  77. {redis-5.2.0 → redis-5.3.0}/redis/_parsers/resp3.py +0 -0
  78. {redis-5.2.0 → redis-5.3.0}/redis/_parsers/socket.py +0 -0
  79. {redis-5.2.0 → redis-5.3.0}/redis/asyncio/__init__.py +0 -0
  80. {redis-5.2.0 → redis-5.3.0}/redis/asyncio/lock.py +0 -0
  81. {redis-5.2.0 → redis-5.3.0}/redis/asyncio/retry.py +0 -0
  82. {redis-5.2.0 → redis-5.3.0}/redis/asyncio/utils.py +0 -0
  83. {redis-5.2.0/tests → redis-5.3.0/redis/auth}/__init__.py +0 -0
  84. {redis-5.2.0 → redis-5.3.0}/redis/cache.py +0 -0
  85. {redis-5.2.0 → redis-5.3.0}/redis/commands/__init__.py +0 -0
  86. {redis-5.2.0 → redis-5.3.0}/redis/commands/bf/__init__.py +0 -0
  87. {redis-5.2.0 → redis-5.3.0}/redis/commands/bf/commands.py +0 -0
  88. {redis-5.2.0 → redis-5.3.0}/redis/commands/bf/info.py +0 -0
  89. {redis-5.2.0 → redis-5.3.0}/redis/commands/cluster.py +0 -0
  90. {redis-5.2.0 → redis-5.3.0}/redis/commands/core.py +0 -0
  91. {redis-5.2.0 → redis-5.3.0}/redis/commands/graph/__init__.py +0 -0
  92. {redis-5.2.0 → redis-5.3.0}/redis/commands/graph/commands.py +0 -0
  93. {redis-5.2.0 → redis-5.3.0}/redis/commands/graph/edge.py +0 -0
  94. {redis-5.2.0 → redis-5.3.0}/redis/commands/graph/exceptions.py +0 -0
  95. {redis-5.2.0 → redis-5.3.0}/redis/commands/graph/execution_plan.py +0 -0
  96. {redis-5.2.0 → redis-5.3.0}/redis/commands/graph/node.py +0 -0
  97. {redis-5.2.0 → redis-5.3.0}/redis/commands/graph/path.py +0 -0
  98. {redis-5.2.0 → redis-5.3.0}/redis/commands/graph/query_result.py +0 -0
  99. {redis-5.2.0 → redis-5.3.0}/redis/commands/helpers.py +0 -0
  100. {redis-5.2.0 → redis-5.3.0}/redis/commands/json/__init__.py +0 -0
  101. {redis-5.2.0 → redis-5.3.0}/redis/commands/json/_util.py +0 -0
  102. {redis-5.2.0 → redis-5.3.0}/redis/commands/json/commands.py +0 -0
  103. {redis-5.2.0 → redis-5.3.0}/redis/commands/json/decoders.py +0 -0
  104. {redis-5.2.0 → redis-5.3.0}/redis/commands/json/path.py +0 -0
  105. {redis-5.2.0 → redis-5.3.0}/redis/commands/redismodules.py +0 -0
  106. {redis-5.2.0 → redis-5.3.0}/redis/commands/search/__init__.py +0 -0
  107. {redis-5.2.0 → redis-5.3.0}/redis/commands/search/_util.py +0 -0
  108. {redis-5.2.0 → redis-5.3.0}/redis/commands/search/aggregation.py +0 -0
  109. {redis-5.2.0 → redis-5.3.0}/redis/commands/search/commands.py +0 -0
  110. {redis-5.2.0 → redis-5.3.0}/redis/commands/search/document.py +0 -0
  111. {redis-5.2.0 → redis-5.3.0}/redis/commands/search/field.py +0 -0
  112. {redis-5.2.0 → redis-5.3.0}/redis/commands/search/indexDefinition.py +0 -0
  113. {redis-5.2.0 → redis-5.3.0}/redis/commands/search/query.py +0 -0
  114. {redis-5.2.0 → redis-5.3.0}/redis/commands/search/querystring.py +0 -0
  115. {redis-5.2.0 → redis-5.3.0}/redis/commands/search/reducers.py +0 -0
  116. {redis-5.2.0 → redis-5.3.0}/redis/commands/search/result.py +0 -0
  117. {redis-5.2.0 → redis-5.3.0}/redis/commands/search/suggestion.py +0 -0
  118. {redis-5.2.0 → redis-5.3.0}/redis/commands/sentinel.py +0 -0
  119. {redis-5.2.0 → redis-5.3.0}/redis/commands/timeseries/__init__.py +0 -0
  120. {redis-5.2.0 → redis-5.3.0}/redis/commands/timeseries/commands.py +0 -0
  121. {redis-5.2.0 → redis-5.3.0}/redis/commands/timeseries/info.py +0 -0
  122. {redis-5.2.0 → redis-5.3.0}/redis/commands/timeseries/utils.py +0 -0
  123. {redis-5.2.0 → redis-5.3.0}/redis/crc.py +0 -0
  124. {redis-5.2.0 → redis-5.3.0}/redis/exceptions.py +0 -0
  125. {redis-5.2.0 → redis-5.3.0}/redis/lock.py +0 -0
  126. {redis-5.2.0 → redis-5.3.0}/redis/ocsp.py +0 -0
  127. {redis-5.2.0 → redis-5.3.0}/redis/retry.py +0 -0
  128. {redis-5.2.0 → redis-5.3.0}/redis/sentinel.py +0 -0
  129. {redis-5.2.0 → redis-5.3.0}/redis.egg-info/dependency_links.txt +0 -0
  130. {redis-5.2.0 → redis-5.3.0}/redis.egg-info/top_level.txt +0 -0
  131. {redis-5.2.0 → redis-5.3.0}/setup.cfg +0 -0
  132. {redis-5.2.0/tests/test_asyncio → redis-5.3.0/tests}/__init__.py +0 -0
  133. {redis-5.2.0 → redis-5.3.0}/tests/mocks.py +0 -0
  134. {redis-5.2.0/tests/test_graph_utils → redis-5.3.0/tests/test_asyncio}/__init__.py +0 -0
  135. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/compat.py +0 -0
  136. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/mocks.py +0 -0
  137. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_bloom.py +0 -0
  138. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_commands.py +0 -0
  139. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_json.py +0 -0
  140. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_lock.py +0 -0
  141. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_monitor.py +0 -0
  142. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_pipeline.py +0 -0
  143. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_pubsub.py +0 -0
  144. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_scripting.py +0 -0
  145. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/test_sentinel_managed_connection.py +0 -0
  146. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/testdata/jsontestdata.py +0 -0
  147. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/testdata/titles.csv +0 -0
  148. {redis-5.2.0 → redis-5.3.0}/tests/test_asyncio/testdata/will_play_text.csv.bz2 +0 -0
  149. {redis-5.2.0 → redis-5.3.0}/tests/test_bloom.py +0 -0
  150. {redis-5.2.0 → redis-5.3.0}/tests/test_command_parser.py +0 -0
  151. {redis-5.2.0 → redis-5.3.0}/tests/test_encoding.py +0 -0
  152. {redis-5.2.0 → redis-5.3.0}/tests/test_function.py +0 -0
  153. {redis-5.2.0 → redis-5.3.0}/tests/test_helpers.py +0 -0
  154. {redis-5.2.0 → redis-5.3.0}/tests/test_lock.py +0 -0
  155. {redis-5.2.0 → redis-5.3.0}/tests/test_monitor.py +0 -0
  156. {redis-5.2.0 → redis-5.3.0}/tests/test_parsers/test_helpers.py +0 -0
  157. {redis-5.2.0 → redis-5.3.0}/tests/test_pipeline.py +0 -0
  158. {redis-5.2.0 → redis-5.3.0}/tests/test_pubsub.py +0 -0
  159. {redis-5.2.0 → redis-5.3.0}/tests/test_scripting.py +0 -0
  160. {redis-5.2.0 → redis-5.3.0}/tests/test_timeseries.py +0 -0
  161. {redis-5.2.0 → redis-5.3.0}/tests/test_utils.py +0 -0
  162. {redis-5.2.0 → redis-5.3.0}/tests/testdata/jsontestdata.py +0 -0
  163. {redis-5.2.0 → redis-5.3.0}/tests/testdata/titles.csv +0 -0
  164. {redis-5.2.0 → redis-5.3.0}/tests/testdata/will_play_text.csv.bz2 +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: redis
3
- Version: 5.2.0
3
+ Version: 5.3.0
4
4
  Summary: Python client for Redis database and key-value store
5
5
  Home-page: https://github.com/redis/redis-py
6
6
  Author: Redis Inc.
@@ -396,13 +396,20 @@ def parse_slowlog_get(response, **options):
396
396
  # an O(N) complexity) instead of the command.
397
397
  if isinstance(item[3], list):
398
398
  result["command"] = space.join(item[3])
399
- result["client_address"] = item[4]
400
- result["client_name"] = item[5]
399
+
400
+ # These fields are optional, depends on environment.
401
+ if len(item) >= 6:
402
+ result["client_address"] = item[4]
403
+ result["client_name"] = item[5]
401
404
  else:
402
405
  result["complexity"] = item[3]
403
406
  result["command"] = space.join(item[4])
404
- result["client_address"] = item[5]
405
- result["client_name"] = item[6]
407
+
408
+ # These fields are optional, depends on environment.
409
+ if len(item) >= 7:
410
+ result["client_address"] = item[5]
411
+ result["client_name"] = item[6]
412
+
406
413
  return result
407
414
 
408
415
  return [parse_item(item) for item in response]
@@ -53,6 +53,13 @@ from redis.commands import (
53
53
  list_or_args,
54
54
  )
55
55
  from redis.credentials import CredentialProvider
56
+ from redis.event import (
57
+ AfterPooledConnectionsInstantiationEvent,
58
+ AfterPubSubConnectionInstantiationEvent,
59
+ AfterSingleConnectionInstantiationEvent,
60
+ ClientType,
61
+ EventDispatcher,
62
+ )
56
63
  from redis.exceptions import (
57
64
  ConnectionError,
58
65
  ExecAbortError,
@@ -233,6 +240,7 @@ class Redis(
233
240
  redis_connect_func=None,
234
241
  credential_provider: Optional[CredentialProvider] = None,
235
242
  protocol: Optional[int] = 2,
243
+ event_dispatcher: Optional[EventDispatcher] = None,
236
244
  ):
237
245
  """
238
246
  Initialize a new Redis client.
@@ -242,6 +250,10 @@ class Redis(
242
250
  To retry on TimeoutError, `retry_on_timeout` can also be set to `True`.
243
251
  """
244
252
  kwargs: Dict[str, Any]
253
+ if event_dispatcher is None:
254
+ self._event_dispatcher = EventDispatcher()
255
+ else:
256
+ self._event_dispatcher = event_dispatcher
245
257
  # auto_close_connection_pool only has an effect if connection_pool is
246
258
  # None. It is assumed that if connection_pool is not None, the user
247
259
  # wants to manage the connection pool themselves.
@@ -320,9 +332,19 @@ class Redis(
320
332
  # This arg only used if no pool is passed in
321
333
  self.auto_close_connection_pool = auto_close_connection_pool
322
334
  connection_pool = ConnectionPool(**kwargs)
335
+ self._event_dispatcher.dispatch(
336
+ AfterPooledConnectionsInstantiationEvent(
337
+ [connection_pool], ClientType.ASYNC, credential_provider
338
+ )
339
+ )
323
340
  else:
324
341
  # If a pool is passed in, do not close it
325
342
  self.auto_close_connection_pool = False
343
+ self._event_dispatcher.dispatch(
344
+ AfterPooledConnectionsInstantiationEvent(
345
+ [connection_pool], ClientType.ASYNC, credential_provider
346
+ )
347
+ )
326
348
 
327
349
  self.connection_pool = connection_pool
328
350
  self.single_connection_client = single_connection_client
@@ -353,7 +375,13 @@ class Redis(
353
375
  if self.single_connection_client:
354
376
  async with self._single_conn_lock:
355
377
  if self.connection is None:
356
- self.connection = await self.connection_pool.get_connection("_")
378
+ self.connection = await self.connection_pool.get_connection()
379
+
380
+ self._event_dispatcher.dispatch(
381
+ AfterSingleConnectionInstantiationEvent(
382
+ self.connection, ClientType.ASYNC, self._single_conn_lock
383
+ )
384
+ )
357
385
  return self
358
386
 
359
387
  def set_response_callback(self, command: str, callback: ResponseCallbackT):
@@ -521,7 +549,9 @@ class Redis(
521
549
  subscribe to channels and listen for messages that get published to
522
550
  them.
523
551
  """
524
- return PubSub(self.connection_pool, **kwargs)
552
+ return PubSub(
553
+ self.connection_pool, event_dispatcher=self._event_dispatcher, **kwargs
554
+ )
525
555
 
526
556
  def monitor(self) -> "Monitor":
527
557
  return Monitor(self.connection_pool)
@@ -608,7 +638,7 @@ class Redis(
608
638
  await self.initialize()
609
639
  pool = self.connection_pool
610
640
  command_name = args[0]
611
- conn = self.connection or await pool.get_connection(command_name, **options)
641
+ conn = self.connection or await pool.get_connection()
612
642
 
613
643
  if self.single_connection_client:
614
644
  await self._single_conn_lock.acquire()
@@ -682,7 +712,7 @@ class Monitor:
682
712
 
683
713
  async def connect(self):
684
714
  if self.connection is None:
685
- self.connection = await self.connection_pool.get_connection("MONITOR")
715
+ self.connection = await self.connection_pool.get_connection()
686
716
 
687
717
  async def __aenter__(self):
688
718
  await self.connect()
@@ -759,7 +789,12 @@ class PubSub:
759
789
  ignore_subscribe_messages: bool = False,
760
790
  encoder=None,
761
791
  push_handler_func: Optional[Callable] = None,
792
+ event_dispatcher: Optional["EventDispatcher"] = None,
762
793
  ):
794
+ if event_dispatcher is None:
795
+ self._event_dispatcher = EventDispatcher()
796
+ else:
797
+ self._event_dispatcher = event_dispatcher
763
798
  self.connection_pool = connection_pool
764
799
  self.shard_hint = shard_hint
765
800
  self.ignore_subscribe_messages = ignore_subscribe_messages
@@ -865,9 +900,7 @@ class PubSub:
865
900
  Ensure that the PubSub is connected
866
901
  """
867
902
  if self.connection is None:
868
- self.connection = await self.connection_pool.get_connection(
869
- "pubsub", self.shard_hint
870
- )
903
+ self.connection = await self.connection_pool.get_connection()
871
904
  # register a callback that re-subscribes to any channels we
872
905
  # were listening to when we were disconnected
873
906
  self.connection.register_connect_callback(self.on_connect)
@@ -876,6 +909,12 @@ class PubSub:
876
909
  if self.push_handler_func is not None and not HIREDIS_AVAILABLE:
877
910
  self.connection._parser.set_pubsub_push_handler(self.push_handler_func)
878
911
 
912
+ self._event_dispatcher.dispatch(
913
+ AfterPubSubConnectionInstantiationEvent(
914
+ self.connection, self.connection_pool, ClientType.ASYNC, self._lock
915
+ )
916
+ )
917
+
879
918
  async def _disconnect_raise_connect(self, conn, error):
880
919
  """
881
920
  Close the connection and raise an exception
@@ -1329,9 +1368,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1329
1368
  conn = self.connection
1330
1369
  # if this is the first call, we need a connection
1331
1370
  if not conn:
1332
- conn = await self.connection_pool.get_connection(
1333
- command_name, self.shard_hint
1334
- )
1371
+ conn = await self.connection_pool.get_connection()
1335
1372
  self.connection = conn
1336
1373
 
1337
1374
  return await conn.retry.call_with_retry(
@@ -1513,7 +1550,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1513
1550
  await self.reset()
1514
1551
  raise
1515
1552
 
1516
- async def execute(self, raise_on_error: bool = True):
1553
+ async def execute(self, raise_on_error: bool = True) -> List[Any]:
1517
1554
  """Execute all the commands in the current pipeline"""
1518
1555
  stack = self.command_stack
1519
1556
  if not stack and not self.watching:
@@ -1527,7 +1564,7 @@ class Pipeline(Redis): # lgtm [py/init-calls-subclass]
1527
1564
 
1528
1565
  conn = self.connection
1529
1566
  if not conn:
1530
- conn = await self.connection_pool.get_connection("MULTI", self.shard_hint)
1567
+ conn = await self.connection_pool.get_connection()
1531
1568
  # assign to self.connection so reset() releases the connection
1532
1569
  # back to the pool after we're done
1533
1570
  self.connection = conn
@@ -29,6 +29,7 @@ from redis.asyncio.client import ResponseCallbackT
29
29
  from redis.asyncio.connection import Connection, DefaultParser, SSLConnection, parse_url
30
30
  from redis.asyncio.lock import Lock
31
31
  from redis.asyncio.retry import Retry
32
+ from redis.auth.token import TokenInterface
32
33
  from redis.backoff import default_backoff
33
34
  from redis.client import EMPTY_RESPONSE, NEVER_DECODE, AbstractRedis
34
35
  from redis.cluster import (
@@ -38,6 +39,7 @@ from redis.cluster import (
38
39
  SLOT_ID,
39
40
  AbstractRedisCluster,
40
41
  LoadBalancer,
42
+ LoadBalancingStrategy,
41
43
  block_pipeline_command,
42
44
  get_node_name,
43
45
  parse_cluster_slots,
@@ -45,6 +47,7 @@ from redis.cluster import (
45
47
  from redis.commands import READ_COMMANDS, AsyncRedisClusterCommands
46
48
  from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot
47
49
  from redis.credentials import CredentialProvider
50
+ from redis.event import AfterAsyncClusterInstantiationEvent, EventDispatcher
48
51
  from redis.exceptions import (
49
52
  AskError,
50
53
  BusyLoadingError,
@@ -57,6 +60,7 @@ from redis.exceptions import (
57
60
  MaxConnectionsError,
58
61
  MovedError,
59
62
  RedisClusterException,
63
+ RedisError,
60
64
  ResponseError,
61
65
  SlotNotCoveredError,
62
66
  TimeoutError,
@@ -64,6 +68,7 @@ from redis.exceptions import (
64
68
  )
65
69
  from redis.typing import AnyKeyT, EncodableT, KeyT
66
70
  from redis.utils import (
71
+ deprecated_args,
67
72
  deprecated_function,
68
73
  dict_merge,
69
74
  get_lib_version,
@@ -130,9 +135,17 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
130
135
  | See:
131
136
  https://redis.io/docs/manual/scaling/#redis-cluster-configuration-parameters
132
137
  :param read_from_replicas:
133
- | Enable read from replicas in READONLY mode. You can read possibly stale data.
138
+ | @deprecated - please use load_balancing_strategy instead
139
+ | Enable read from replicas in READONLY mode.
134
140
  When set to true, read commands will be assigned between the primary and
135
141
  its replications in a Round-Robin manner.
142
+ The data read from replicas is eventually consistent
143
+ with the data in primary nodes.
144
+ :param load_balancing_strategy:
145
+ | Enable read from replicas in READONLY mode and defines the load balancing
146
+ strategy that will be used for cluster node selection.
147
+ The data read from replicas is eventually consistent
148
+ with the data in primary nodes.
136
149
  :param reinitialize_steps:
137
150
  | Specifies the number of MOVED errors that need to occur before reinitializing
138
151
  the whole cluster topology. If a MOVED error occurs and the cluster does not
@@ -225,6 +238,11 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
225
238
  "result_callbacks",
226
239
  )
227
240
 
241
+ @deprecated_args(
242
+ args_to_warn=["read_from_replicas"],
243
+ reason="Please configure the 'load_balancing_strategy' instead",
244
+ version="5.3.0",
245
+ )
228
246
  def __init__(
229
247
  self,
230
248
  host: Optional[str] = None,
@@ -233,6 +251,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
233
251
  startup_nodes: Optional[List["ClusterNode"]] = None,
234
252
  require_full_coverage: bool = True,
235
253
  read_from_replicas: bool = False,
254
+ load_balancing_strategy: Optional[LoadBalancingStrategy] = None,
236
255
  reinitialize_steps: int = 5,
237
256
  cluster_error_retry_attempts: int = 3,
238
257
  connection_error_retry_attempts: int = 3,
@@ -270,6 +289,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
270
289
  ssl_ciphers: Optional[str] = None,
271
290
  protocol: Optional[int] = 2,
272
291
  address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
292
+ event_dispatcher: Optional[EventDispatcher] = None,
273
293
  ) -> None:
274
294
  if db:
275
295
  raise RedisClusterException(
@@ -331,7 +351,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
331
351
  }
332
352
  )
333
353
 
334
- if read_from_replicas:
354
+ if read_from_replicas or load_balancing_strategy:
335
355
  # Call our on_connect function to configure READONLY mode
336
356
  kwargs["redis_connect_func"] = self.on_connect
337
357
 
@@ -366,14 +386,21 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
366
386
  if host and port:
367
387
  startup_nodes.append(ClusterNode(host, port, **self.connection_kwargs))
368
388
 
389
+ if event_dispatcher is None:
390
+ self._event_dispatcher = EventDispatcher()
391
+ else:
392
+ self._event_dispatcher = event_dispatcher
393
+
369
394
  self.nodes_manager = NodesManager(
370
395
  startup_nodes,
371
396
  require_full_coverage,
372
397
  kwargs,
373
398
  address_remap=address_remap,
399
+ event_dispatcher=self._event_dispatcher,
374
400
  )
375
401
  self.encoder = Encoder(encoding, encoding_errors, decode_responses)
376
402
  self.read_from_replicas = read_from_replicas
403
+ self.load_balancing_strategy = load_balancing_strategy
377
404
  self.reinitialize_steps = reinitialize_steps
378
405
  self.cluster_error_retry_attempts = cluster_error_retry_attempts
379
406
  self.connection_error_retry_attempts = connection_error_retry_attempts
@@ -592,6 +619,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
592
619
  self.nodes_manager.get_node_from_slot(
593
620
  await self._determine_slot(command, *args),
594
621
  self.read_from_replicas and command in READ_COMMANDS,
622
+ self.load_balancing_strategy if command in READ_COMMANDS else None,
595
623
  )
596
624
  ]
597
625
 
@@ -772,7 +800,13 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
772
800
  # refresh the target node
773
801
  slot = await self._determine_slot(*args)
774
802
  target_node = self.nodes_manager.get_node_from_slot(
775
- slot, self.read_from_replicas and args[0] in READ_COMMANDS
803
+ slot,
804
+ self.read_from_replicas and args[0] in READ_COMMANDS,
805
+ (
806
+ self.load_balancing_strategy
807
+ if args[0] in READ_COMMANDS
808
+ else None
809
+ ),
776
810
  )
777
811
  moved = False
778
812
 
@@ -789,10 +823,16 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
789
823
  # and try again with the new setup
790
824
  await self.aclose()
791
825
  raise
792
- except ClusterDownError:
826
+ except (ClusterDownError, SlotNotCoveredError):
793
827
  # ClusterDownError can occur during a failover and to get
794
828
  # self-healed, we will try to reinitialize the cluster layout
795
829
  # and retry executing the command
830
+
831
+ # SlotNotCoveredError can occur when the cluster is not fully
832
+ # initialized or can be temporary issue.
833
+ # We will try to reinitialize the cluster topology
834
+ # and retry executing the command
835
+
796
836
  await self.aclose()
797
837
  await asyncio.sleep(0.25)
798
838
  raise
@@ -929,6 +969,8 @@ class ClusterNode:
929
969
  __slots__ = (
930
970
  "_connections",
931
971
  "_free",
972
+ "_lock",
973
+ "_event_dispatcher",
932
974
  "connection_class",
933
975
  "connection_kwargs",
934
976
  "host",
@@ -966,6 +1008,9 @@ class ClusterNode:
966
1008
 
967
1009
  self._connections: List[Connection] = []
968
1010
  self._free: Deque[Connection] = collections.deque(maxlen=self.max_connections)
1011
+ self._event_dispatcher = self.connection_kwargs.get("event_dispatcher", None)
1012
+ if self._event_dispatcher is None:
1013
+ self._event_dispatcher = EventDispatcher()
969
1014
 
970
1015
  def __repr__(self) -> str:
971
1016
  return (
@@ -1082,10 +1127,38 @@ class ClusterNode:
1082
1127
 
1083
1128
  return ret
1084
1129
 
1130
+ async def re_auth_callback(self, token: TokenInterface):
1131
+ tmp_queue = collections.deque()
1132
+ while self._free:
1133
+ conn = self._free.popleft()
1134
+ await conn.retry.call_with_retry(
1135
+ lambda: conn.send_command(
1136
+ "AUTH", token.try_get("oid"), token.get_value()
1137
+ ),
1138
+ lambda error: self._mock(error),
1139
+ )
1140
+ await conn.retry.call_with_retry(
1141
+ lambda: conn.read_response(), lambda error: self._mock(error)
1142
+ )
1143
+ tmp_queue.append(conn)
1144
+
1145
+ while tmp_queue:
1146
+ conn = tmp_queue.popleft()
1147
+ self._free.append(conn)
1148
+
1149
+ async def _mock(self, error: RedisError):
1150
+ """
1151
+ Dummy functions, needs to be passed as error callback to retry object.
1152
+ :param error:
1153
+ :return:
1154
+ """
1155
+ pass
1156
+
1085
1157
 
1086
1158
  class NodesManager:
1087
1159
  __slots__ = (
1088
1160
  "_moved_exception",
1161
+ "_event_dispatcher",
1089
1162
  "connection_kwargs",
1090
1163
  "default_node",
1091
1164
  "nodes_cache",
@@ -1102,6 +1175,7 @@ class NodesManager:
1102
1175
  require_full_coverage: bool,
1103
1176
  connection_kwargs: Dict[str, Any],
1104
1177
  address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
1178
+ event_dispatcher: Optional[EventDispatcher] = None,
1105
1179
  ) -> None:
1106
1180
  self.startup_nodes = {node.name: node for node in startup_nodes}
1107
1181
  self.require_full_coverage = require_full_coverage
@@ -1113,6 +1187,10 @@ class NodesManager:
1113
1187
  self.slots_cache: Dict[int, List["ClusterNode"]] = {}
1114
1188
  self.read_load_balancer = LoadBalancer()
1115
1189
  self._moved_exception: MovedError = None
1190
+ if event_dispatcher is None:
1191
+ self._event_dispatcher = EventDispatcher()
1192
+ else:
1193
+ self._event_dispatcher = event_dispatcher
1116
1194
 
1117
1195
  def get_node(
1118
1196
  self,
@@ -1129,9 +1207,7 @@ class NodesManager:
1129
1207
  return self.nodes_cache.get(node_name)
1130
1208
  else:
1131
1209
  raise DataError(
1132
- "get_node requires one of the following: "
1133
- "1. node name "
1134
- "2. host and port"
1210
+ "get_node requires one of the following: 1. node name 2. host and port"
1135
1211
  )
1136
1212
 
1137
1213
  def set_nodes(
@@ -1191,17 +1267,24 @@ class NodesManager:
1191
1267
  self._moved_exception = None
1192
1268
 
1193
1269
  def get_node_from_slot(
1194
- self, slot: int, read_from_replicas: bool = False
1270
+ self,
1271
+ slot: int,
1272
+ read_from_replicas: bool = False,
1273
+ load_balancing_strategy=None,
1195
1274
  ) -> "ClusterNode":
1196
1275
  if self._moved_exception:
1197
1276
  self._update_moved_slots()
1198
1277
 
1278
+ if read_from_replicas is True and load_balancing_strategy is None:
1279
+ load_balancing_strategy = LoadBalancingStrategy.ROUND_ROBIN
1280
+
1199
1281
  try:
1200
- if read_from_replicas:
1201
- # get the server index in a Round-Robin manner
1282
+ if len(self.slots_cache[slot]) > 1 and load_balancing_strategy:
1283
+ # get the server index using the strategy defined
1284
+ # in load_balancing_strategy
1202
1285
  primary_name = self.slots_cache[slot][0].name
1203
1286
  node_idx = self.read_load_balancer.get_server_index(
1204
- primary_name, len(self.slots_cache[slot])
1287
+ primary_name, len(self.slots_cache[slot]), load_balancing_strategy
1205
1288
  )
1206
1289
  return self.slots_cache[slot][node_idx]
1207
1290
  return self.slots_cache[slot][0]
@@ -1230,6 +1313,12 @@ class NodesManager:
1230
1313
  try:
1231
1314
  # Make sure cluster mode is enabled on this node
1232
1315
  try:
1316
+ self._event_dispatcher.dispatch(
1317
+ AfterAsyncClusterInstantiationEvent(
1318
+ self.nodes_cache,
1319
+ self.connection_kwargs.get("credential_provider", None),
1320
+ )
1321
+ )
1233
1322
  cluster_slots = await startup_node.execute_command("CLUSTER SLOTS")
1234
1323
  except ResponseError:
1235
1324
  raise RedisClusterException(
@@ -1307,7 +1396,7 @@ class NodesManager:
1307
1396
  if len(disagreements) > 5:
1308
1397
  raise RedisClusterException(
1309
1398
  f"startup_nodes could not agree on a valid "
1310
- f'slots cache: {", ".join(disagreements)}'
1399
+ f"slots cache: {', '.join(disagreements)}"
1311
1400
  )
1312
1401
 
1313
1402
  # Validate if all slots are covered or if we should try next startup node
@@ -27,7 +27,9 @@ from typing import (
27
27
  )
28
28
  from urllib.parse import ParseResult, parse_qs, unquote, urlparse
29
29
 
30
- from ..utils import format_error_message
30
+ from ..auth.token import TokenInterface
31
+ from ..event import AsyncAfterConnectionReleasedEvent, EventDispatcher
32
+ from ..utils import deprecated_args, format_error_message
31
33
 
32
34
  # the functionality is available in 3.11.x but has a major issue before
33
35
  # 3.11.3. See https://github.com/redis/redis-py/issues/2633
@@ -148,6 +150,7 @@ class AbstractConnection:
148
150
  encoder_class: Type[Encoder] = Encoder,
149
151
  credential_provider: Optional[CredentialProvider] = None,
150
152
  protocol: Optional[int] = 2,
153
+ event_dispatcher: Optional[EventDispatcher] = None,
151
154
  ):
152
155
  if (username or password) and credential_provider is not None:
153
156
  raise DataError(
@@ -156,6 +159,10 @@ class AbstractConnection:
156
159
  "1. 'password' and (optional) 'username'\n"
157
160
  "2. 'credential_provider'"
158
161
  )
162
+ if event_dispatcher is None:
163
+ self._event_dispatcher = EventDispatcher()
164
+ else:
165
+ self._event_dispatcher = event_dispatcher
159
166
  self.db = db
160
167
  self.client_name = client_name
161
168
  self.lib_name = lib_name
@@ -195,6 +202,8 @@ class AbstractConnection:
195
202
  self.set_parser(parser_class)
196
203
  self._connect_callbacks: List[weakref.WeakMethod[ConnectCallbackT]] = []
197
204
  self._buffer_cutoff = 6000
205
+ self._re_auth_token: Optional[TokenInterface] = None
206
+
198
207
  try:
199
208
  p = int(protocol)
200
209
  except TypeError:
@@ -214,7 +223,13 @@ class AbstractConnection:
214
223
  _warnings.warn(
215
224
  f"unclosed Connection {self!r}", ResourceWarning, source=self
216
225
  )
217
- self._close()
226
+
227
+ try:
228
+ asyncio.get_running_loop()
229
+ self._close()
230
+ except RuntimeError:
231
+ # No actions been taken if pool already closed.
232
+ pass
218
233
 
219
234
  def _close(self):
220
235
  """
@@ -321,6 +336,9 @@ class AbstractConnection:
321
336
  def _error_message(self, exception: BaseException) -> str:
322
337
  return format_error_message(self._host_error(), exception)
323
338
 
339
+ def get_protocol(self):
340
+ return self.protocol
341
+
324
342
  async def on_connect(self) -> None:
325
343
  """Initialize the connection, authenticate and select a database"""
326
344
  self._parser.on_connect(self)
@@ -333,7 +351,8 @@ class AbstractConnection:
333
351
  self.credential_provider
334
352
  or UsernamePasswordCredentialProvider(self.username, self.password)
335
353
  )
336
- auth_args = cred_provider.get_credentials()
354
+ auth_args = await cred_provider.get_credentials_async()
355
+
337
356
  # if resp version is specified and we have auth args,
338
357
  # we need to send them via HELLO
339
358
  if auth_args and self.protocol not in [2, "2"]:
@@ -655,6 +674,19 @@ class AbstractConnection:
655
674
  while not self._socket_is_empty():
656
675
  await self.read_response(push_request=True)
657
676
 
677
+ def set_re_auth_token(self, token: TokenInterface):
678
+ self._re_auth_token = token
679
+
680
+ async def re_auth(self):
681
+ if self._re_auth_token is not None:
682
+ await self.send_command(
683
+ "AUTH",
684
+ self._re_auth_token.try_get("oid"),
685
+ self._re_auth_token.get_value(),
686
+ )
687
+ await self.read_response()
688
+ self._re_auth_token = None
689
+
658
690
 
659
691
  class Connection(AbstractConnection):
660
692
  "Manages TCP communication to and from a Redis server"
@@ -1033,6 +1065,10 @@ class ConnectionPool:
1033
1065
  self._available_connections: List[AbstractConnection] = []
1034
1066
  self._in_use_connections: Set[AbstractConnection] = set()
1035
1067
  self.encoder_class = self.connection_kwargs.get("encoder_class", Encoder)
1068
+ self._lock = asyncio.Lock()
1069
+ self._event_dispatcher = self.connection_kwargs.get("event_dispatcher", None)
1070
+ if self._event_dispatcher is None:
1071
+ self._event_dispatcher = EventDispatcher()
1036
1072
 
1037
1073
  def __repr__(self):
1038
1074
  return (
@@ -1051,14 +1087,20 @@ class ConnectionPool:
1051
1087
  or len(self._in_use_connections) < self.max_connections
1052
1088
  )
1053
1089
 
1054
- async def get_connection(self, command_name, *keys, **options):
1055
- """Get a connected connection from the pool"""
1056
- connection = self.get_available_connection()
1057
- try:
1058
- await self.ensure_connection(connection)
1059
- except BaseException:
1060
- await self.release(connection)
1061
- raise
1090
+ @deprecated_args(
1091
+ args_to_warn=["*"],
1092
+ reason="Use get_connection() without args instead",
1093
+ version="5.3.0",
1094
+ )
1095
+ async def get_connection(self, command_name=None, *keys, **options):
1096
+ async with self._lock:
1097
+ """Get a connected connection from the pool"""
1098
+ connection = self.get_available_connection()
1099
+ try:
1100
+ await self.ensure_connection(connection)
1101
+ except BaseException:
1102
+ await self.release(connection)
1103
+ raise
1062
1104
 
1063
1105
  return connection
1064
1106
 
@@ -1108,6 +1150,9 @@ class ConnectionPool:
1108
1150
  # not doing so is an error that will cause an exception here.
1109
1151
  self._in_use_connections.remove(connection)
1110
1152
  self._available_connections.append(connection)
1153
+ await self._event_dispatcher.dispatch_async(
1154
+ AsyncAfterConnectionReleasedEvent(connection)
1155
+ )
1111
1156
 
1112
1157
  async def disconnect(self, inuse_connections: bool = True):
1113
1158
  """
@@ -1141,6 +1186,29 @@ class ConnectionPool:
1141
1186
  for conn in self._in_use_connections:
1142
1187
  conn.retry = retry
1143
1188
 
1189
+ async def re_auth_callback(self, token: TokenInterface):
1190
+ async with self._lock:
1191
+ for conn in self._available_connections:
1192
+ await conn.retry.call_with_retry(
1193
+ lambda: conn.send_command(
1194
+ "AUTH", token.try_get("oid"), token.get_value()
1195
+ ),
1196
+ lambda error: self._mock(error),
1197
+ )
1198
+ await conn.retry.call_with_retry(
1199
+ lambda: conn.read_response(), lambda error: self._mock(error)
1200
+ )
1201
+ for conn in self._in_use_connections:
1202
+ conn.set_re_auth_token(token)
1203
+
1204
+ async def _mock(self, error: RedisError):
1205
+ """
1206
+ Dummy functions, needs to be passed as error callback to retry object.
1207
+ :param error:
1208
+ :return:
1209
+ """
1210
+ pass
1211
+
1144
1212
 
1145
1213
  class BlockingConnectionPool(ConnectionPool):
1146
1214
  """
@@ -1192,7 +1260,12 @@ class BlockingConnectionPool(ConnectionPool):
1192
1260
  self._condition = asyncio.Condition()
1193
1261
  self.timeout = timeout
1194
1262
 
1195
- async def get_connection(self, command_name, *keys, **options):
1263
+ @deprecated_args(
1264
+ args_to_warn=["*"],
1265
+ reason="Use get_connection() without args instead",
1266
+ version="5.3.0",
1267
+ )
1268
+ async def get_connection(self, command_name=None, *keys, **options):
1196
1269
  """Gets a connection from the pool, blocking until one is available"""
1197
1270
  try:
1198
1271
  async with self._condition:
@@ -29,11 +29,7 @@ class SentinelManagedConnection(Connection):
29
29
  super().__init__(**kwargs)
30
30
 
31
31
  def __repr__(self):
32
- pool = self.connection_pool
33
- s = (
34
- f"<{self.__class__.__module__}.{self.__class__.__name__}"
35
- f"(service={pool.service_name}"
36
- )
32
+ s = f"<{self.__class__.__module__}.{self.__class__.__name__}"
37
33
  if self.host:
38
34
  host_info = f",host={self.host},port={self.port}"
39
35
  s += host_info