redis 6.3.0__py3-none-any.whl → 7.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
redis/client.py CHANGED
@@ -56,6 +56,10 @@ from redis.exceptions import (
56
56
  WatchError,
57
57
  )
58
58
  from redis.lock import Lock
59
+ from redis.maintenance_events import (
60
+ MaintenanceEventPoolHandler,
61
+ MaintenanceEventsConfig,
62
+ )
59
63
  from redis.retry import Retry
60
64
  from redis.utils import (
61
65
  _set_info_logger,
@@ -244,6 +248,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
244
248
  cache: Optional[CacheInterface] = None,
245
249
  cache_config: Optional[CacheConfig] = None,
246
250
  event_dispatcher: Optional[EventDispatcher] = None,
251
+ maintenance_events_config: Optional[MaintenanceEventsConfig] = None,
247
252
  ) -> None:
248
253
  """
249
254
  Initialize a new Redis client.
@@ -368,9 +373,24 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
368
373
  ]:
369
374
  raise RedisError("Client caching is only supported with RESP version 3")
370
375
 
371
- # TODO: To avoid breaking changes during the bug fix, we have to keep non-reentrant lock.
372
- # TODO: Remove this before next major version (7.0.0)
373
- self.single_connection_lock = threading.Lock()
376
+ if maintenance_events_config and self.connection_pool.get_protocol() not in [
377
+ 3,
378
+ "3",
379
+ ]:
380
+ raise RedisError(
381
+ "Push handlers on connection are only supported with RESP version 3"
382
+ )
383
+ if maintenance_events_config and maintenance_events_config.enabled:
384
+ self.maintenance_events_pool_handler = MaintenanceEventPoolHandler(
385
+ self.connection_pool, maintenance_events_config
386
+ )
387
+ self.connection_pool.set_maintenance_events_pool_handler(
388
+ self.maintenance_events_pool_handler
389
+ )
390
+ else:
391
+ self.maintenance_events_pool_handler = None
392
+
393
+ self.single_connection_lock = threading.RLock()
374
394
  self.connection = None
375
395
  self._single_connection_client = single_connection_client
376
396
  if self._single_connection_client:
@@ -567,8 +587,15 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
567
587
  return Monitor(self.connection_pool)
568
588
 
569
589
  def client(self):
590
+ maintenance_events_config = (
591
+ None
592
+ if self.maintenance_events_pool_handler is None
593
+ else self.maintenance_events_pool_handler.config
594
+ )
570
595
  return self.__class__(
571
- connection_pool=self.connection_pool, single_connection_client=True
596
+ connection_pool=self.connection_pool,
597
+ single_connection_client=True,
598
+ maintenance_events_config=maintenance_events_config,
572
599
  )
573
600
 
574
601
  def __enter__(self):
@@ -637,7 +664,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
637
664
  ),
638
665
  lambda _: self._close_connection(conn),
639
666
  )
667
+
640
668
  finally:
669
+ if conn and conn.should_reconnect():
670
+ self._close_connection(conn)
671
+ conn.connect()
641
672
  if self._single_connection_client:
642
673
  self.single_connection_lock.release()
643
674
  if not self.connection:
@@ -688,11 +719,7 @@ class Monitor:
688
719
  self.connection = self.connection_pool.get_connection()
689
720
 
690
721
  def __enter__(self):
691
- self.connection.send_command("MONITOR")
692
- # check that monitor returns 'OK', but don't return it to user
693
- response = self.connection.read_response()
694
- if not bool_ok(response):
695
- raise RedisError(f"MONITOR failed: {response}")
722
+ self._start_monitor()
696
723
  return self
697
724
 
698
725
  def __exit__(self, *args):
@@ -702,8 +729,13 @@ class Monitor:
702
729
  def next_command(self):
703
730
  """Parse the response from a monitor command"""
704
731
  response = self.connection.read_response()
732
+
733
+ if response is None:
734
+ return None
735
+
705
736
  if isinstance(response, bytes):
706
737
  response = self.connection.encoder.decode(response, force=True)
738
+
707
739
  command_time, command_data = response.split(" ", 1)
708
740
  m = self.monitor_re.match(command_data)
709
741
  db_id, client_info, command = m.groups()
@@ -739,6 +771,14 @@ class Monitor:
739
771
  while True:
740
772
  yield self.next_command()
741
773
 
774
+ def _start_monitor(self):
775
+ self.connection.send_command("MONITOR")
776
+ # check that monitor returns 'OK', but don't return it to user
777
+ response = self.connection.read_response()
778
+
779
+ if not bool_ok(response):
780
+ raise RedisError(f"MONITOR failed: {response}")
781
+
742
782
 
743
783
  class PubSub:
744
784
  """
@@ -776,9 +816,7 @@ class PubSub:
776
816
  else:
777
817
  self._event_dispatcher = event_dispatcher
778
818
 
779
- # TODO: To avoid breaking changes during the bug fix, we have to keep non-reentrant lock.
780
- # TODO: Remove this before next major version (7.0.0)
781
- self._lock = threading.Lock()
819
+ self._lock = threading.RLock()
782
820
  if self.encoder is None:
783
821
  self.encoder = self.connection_pool.get_encoder()
784
822
  self.health_check_response_b = self.encoder.encode(self.HEALTH_CHECK_MESSAGE)
@@ -885,7 +923,7 @@ class PubSub:
885
923
  """
886
924
  ttl = 10
887
925
  conn = self.connection
888
- while self.health_check_response_counter > 0 and ttl > 0:
926
+ while conn and self.health_check_response_counter > 0 and ttl > 0:
889
927
  if self._execute(conn, conn.can_read, timeout=conn.socket_timeout):
890
928
  response = self._execute(conn, conn.read_response)
891
929
  if self.is_health_check_response(response):
@@ -915,11 +953,17 @@ class PubSub:
915
953
  called by the # connection to resubscribe us to any channels and
916
954
  patterns we were previously listening to
917
955
  """
918
- return conn.retry.call_with_retry(
956
+
957
+ if conn.should_reconnect():
958
+ self._reconnect(conn)
959
+
960
+ response = conn.retry.call_with_retry(
919
961
  lambda: command(*args, **kwargs),
920
962
  lambda _: self._reconnect(conn),
921
963
  )
922
964
 
965
+ return response
966
+
923
967
  def parse_response(self, block=True, timeout=0):
924
968
  """Parse the response from a publish/subscribe command"""
925
969
  conn = self.connection
@@ -1129,6 +1173,7 @@ class PubSub:
1129
1173
  return None
1130
1174
 
1131
1175
  response = self.parse_response(block=(timeout is None), timeout=timeout)
1176
+
1132
1177
  if response:
1133
1178
  return self.handle_message(response, ignore_subscribe_messages)
1134
1179
  return None
@@ -1152,6 +1197,7 @@ class PubSub:
1152
1197
  return None
1153
1198
  if isinstance(response, bytes):
1154
1199
  response = [b"pong", response] if response != b"PONG" else [b"pong", b""]
1200
+
1155
1201
  message_type = str_if_bytes(response[0])
1156
1202
  if message_type == "pmessage":
1157
1203
  message = {
@@ -1355,6 +1401,7 @@ class Pipeline(Redis):
1355
1401
  # clean up the other instance attributes
1356
1402
  self.watching = False
1357
1403
  self.explicit_transaction = False
1404
+
1358
1405
  # we can safely return the connection to the pool here since we're
1359
1406
  # sure we're no longer WATCHing anything
1360
1407
  if self.connection:
@@ -1514,6 +1561,7 @@ class Pipeline(Redis):
1514
1561
  if command_name in self.response_callbacks:
1515
1562
  r = self.response_callbacks[command_name](r, **options)
1516
1563
  data.append(r)
1564
+
1517
1565
  return data
1518
1566
 
1519
1567
  def _execute_pipeline(self, connection, commands, raise_on_error):
@@ -1521,16 +1569,17 @@ class Pipeline(Redis):
1521
1569
  all_cmds = connection.pack_commands([args for args, _ in commands])
1522
1570
  connection.send_packed_command(all_cmds)
1523
1571
 
1524
- response = []
1572
+ responses = []
1525
1573
  for args, options in commands:
1526
1574
  try:
1527
- response.append(self.parse_response(connection, args[0], **options))
1575
+ responses.append(self.parse_response(connection, args[0], **options))
1528
1576
  except ResponseError as e:
1529
- response.append(e)
1577
+ responses.append(e)
1530
1578
 
1531
1579
  if raise_on_error:
1532
- self.raise_first_error(commands, response)
1533
- return response
1580
+ self.raise_first_error(commands, responses)
1581
+
1582
+ return responses
1534
1583
 
1535
1584
  def raise_first_error(self, commands, response):
1536
1585
  for i, r in enumerate(response):
@@ -1615,6 +1664,8 @@ class Pipeline(Redis):
1615
1664
  lambda error: self._disconnect_raise_on_watching(conn, error),
1616
1665
  )
1617
1666
  finally:
1667
+ # in reset() the connection is disconnected before returned to the pool if
1668
+ # it is marked for reconnect.
1618
1669
  self.reset()
1619
1670
 
1620
1671
  def discard(self):
redis/cluster.py CHANGED
@@ -170,6 +170,7 @@ REDIS_ALLOWED_KEYS = (
170
170
  "redis_connect_func",
171
171
  "password",
172
172
  "port",
173
+ "timeout",
173
174
  "queue_class",
174
175
  "retry",
175
176
  "retry_on_timeout",
@@ -2716,8 +2717,8 @@ class PipelineStrategy(AbstractStrategy):
2716
2717
 
2717
2718
  If one of the retryable exceptions has been thrown we assume that:
2718
2719
  - connection_pool was disconnected
2719
- - connection_pool was reseted
2720
- - refereh_table_asap set to True
2720
+ - connection_pool was reset
2721
+ - refresh_table_asap set to True
2721
2722
 
2722
2723
  It will try the number of times specified by
2723
2724
  the retries in config option "self.retry"