redis-cluster-client 0.4.3 → 0.7.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 4e29792dd4eb3b7609baa8943b25a385c87dd36767ba679e9c9209be765a670b
4
- data.tar.gz: 367d7cb722ef026b76c7f200237c1c64a0dced84026c78a43feb55a72eb7accb
3
+ metadata.gz: 82e7044600bf789f17ab9db7287beb76ebdb8c1b24c3816523da64af6a836038
4
+ data.tar.gz: ca55b62fc6a4252999f4fd0080cc267bfa31609eb8c9645265072bf374e61231
5
5
  SHA512:
6
- metadata.gz: c3b7eb12f99b928072eeb00f4465afc5be3a3cb07aeabfc821369151f94ebbf12029409317c21b57cb628710e786e1e423b84f543b7430c23d89f71e9fe9339b
7
- data.tar.gz: c317f99c5bb51be184f4aab415017260237e3614e535565649bb90d79a2b47c7b9edfb07c83b3a96ead85849c7bd97639ce6d60eb1f957df351908ae098f6795
6
+ metadata.gz: fe4e0e00d82ec9afab61a96265d1f0eb6f580ce6f9b1721df1135b2b298b21c99de46f0f04cba48a5173a299896897de80037e3256ed39f9605c6f51d3c3fd0c
7
+ data.tar.gz: 956d3d22f40b00c6ec8608d06d33bb85e2ba0af810ffa68a30c51469423c7542eb52f33f71695dfe02a4c2e2577b4f870dd7684c920276adb205bf5df61630bd
@@ -7,6 +7,8 @@ require 'redis_client/cluster/normalized_cmd_name'
7
7
  class RedisClient
8
8
  class Cluster
9
9
  class Command
10
+ SLOW_COMMAND_TIMEOUT = Float(ENV.fetch('REDIS_CLIENT_SLOW_COMMAND_TIMEOUT', -1))
11
+
10
12
  EMPTY_STRING = ''
11
13
  LEFT_BRACKET = '{'
12
14
  RIGHT_BRACKET = '}'
@@ -25,7 +27,10 @@ class RedisClient
25
27
  cmd = errors = nil
26
28
 
27
29
  nodes&.each do |node|
30
+ regular_timeout = node.read_timeout
31
+ node.read_timeout = SLOW_COMMAND_TIMEOUT > 0.0 ? SLOW_COMMAND_TIMEOUT : regular_timeout
28
32
  reply = node.call('COMMAND')
33
+ node.read_timeout = regular_timeout
29
34
  commands = parse_command_reply(reply)
30
35
  cmd = ::RedisClient::Cluster::Command.new(commands)
31
36
  break
@@ -0,0 +1,27 @@
1
+ # frozen_string_literal: true
2
+
3
+ class RedisClient
4
+ class Cluster
5
+ module ConcurrentWorker
6
+ class None
7
+ def new_group(size:)
8
+ ::RedisClient::Cluster::ConcurrentWorker::Group.new(
9
+ worker: self,
10
+ queue: [],
11
+ size: size
12
+ )
13
+ end
14
+
15
+ def push(task)
16
+ task.exec
17
+ end
18
+
19
+ def close; end
20
+
21
+ def inspect
22
+ "#<#{self.class.name} main thread only>"
23
+ end
24
+ end
25
+ end
26
+ end
27
+ end
@@ -0,0 +1,44 @@
1
+ # frozen_string_literal: true
2
+
3
+ class RedisClient
4
+ class Cluster
5
+ module ConcurrentWorker
6
+ class OnDemand
7
+ def initialize(size:)
8
+ @q = SizedQueue.new(size)
9
+ end
10
+
11
+ def new_group(size:)
12
+ ::RedisClient::Cluster::ConcurrentWorker::Group.new(
13
+ worker: self,
14
+ queue: SizedQueue.new(size),
15
+ size: size
16
+ )
17
+ end
18
+
19
+ def push(task)
20
+ @q << spawn_worker(task, @q)
21
+ end
22
+
23
+ def close
24
+ @q.clear
25
+ @q.close
26
+ nil
27
+ end
28
+
29
+ def inspect
30
+ "#<#{self.class.name} active: #{@q.size}, max: #{@q.max}>"
31
+ end
32
+
33
+ private
34
+
35
+ def spawn_worker(task, queue)
36
+ Thread.new(task, queue) do |t, q|
37
+ t.exec
38
+ q.pop
39
+ end
40
+ end
41
+ end
42
+ end
43
+ end
44
+ end
@@ -0,0 +1,72 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'redis_client/pid_cache'
4
+
5
+ class RedisClient
6
+ class Cluster
7
+ module ConcurrentWorker
8
+ # This class is just an experimental implementation.
9
+ # Ruby VM allocates 1 MB memory as a stack for a thread.
10
+ # It is a fixed size but we can modify the size with some environment variables.
11
+ # So it consumes memory 1 MB multiplied a number of workers.
12
+ class Pooled
13
+ def initialize(size:)
14
+ @size = size
15
+ setup
16
+ end
17
+
18
+ def new_group(size:)
19
+ reset if @pid != ::RedisClient::PIDCache.pid
20
+ ensure_workers if @workers.first.nil?
21
+ ::RedisClient::Cluster::ConcurrentWorker::Group.new(
22
+ worker: self,
23
+ queue: SizedQueue.new(size),
24
+ size: size
25
+ )
26
+ end
27
+
28
+ def push(task)
29
+ @q << task
30
+ end
31
+
32
+ def close
33
+ @q.clear
34
+ @workers.each { |t| t&.exit }
35
+ @workers.clear
36
+ @q.close
37
+ @pid = nil
38
+ nil
39
+ end
40
+
41
+ def inspect
42
+ "#<#{self.class.name} tasks: #{@q.size}, workers: #{@size}>"
43
+ end
44
+
45
+ private
46
+
47
+ def setup
48
+ @q = Queue.new
49
+ @workers = Array.new(@size)
50
+ @pid = ::RedisClient::PIDCache.pid
51
+ end
52
+
53
+ def reset
54
+ close
55
+ setup
56
+ end
57
+
58
+ def ensure_workers
59
+ @size.times do |i|
60
+ @workers[i] = spawn_worker unless @workers[i]&.alive?
61
+ end
62
+ end
63
+
64
+ def spawn_worker
65
+ Thread.new(@q) do |q|
66
+ loop { q.pop.exec }
67
+ end
68
+ end
69
+ end
70
+ end
71
+ end
72
+ end
@@ -0,0 +1,86 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'redis_client/cluster/concurrent_worker/on_demand'
4
+ require 'redis_client/cluster/concurrent_worker/pooled'
5
+ require 'redis_client/cluster/concurrent_worker/none'
6
+
7
+ class RedisClient
8
+ class Cluster
9
+ module ConcurrentWorker
10
+ InvalidNumberOfTasks = Class.new(StandardError)
11
+
12
+ class Group
13
+ Task = Struct.new(
14
+ 'RedisClusterClientConcurrentWorkerTask',
15
+ :id, :queue, :args, :kwargs, :block, :result,
16
+ keyword_init: true
17
+ ) do
18
+ def exec
19
+ self[:result] = block&.call(*args, **kwargs)
20
+ rescue StandardError => e
21
+ self[:result] = e
22
+ ensure
23
+ done
24
+ end
25
+
26
+ def done
27
+ queue&.push(self)
28
+ rescue ClosedQueueError
29
+ # something was wrong
30
+ end
31
+ end
32
+
33
+ def initialize(worker:, queue:, size:)
34
+ @worker = worker
35
+ @queue = queue
36
+ @size = size
37
+ @count = 0
38
+ end
39
+
40
+ def push(id, *args, **kwargs, &block)
41
+ raise InvalidNumberOfTasks, "max size reached: #{@count}" if @count == @size
42
+
43
+ task = Task.new(id: id, queue: @queue, args: args, kwargs: kwargs, block: block)
44
+ @worker.push(task)
45
+ @count += 1
46
+ nil
47
+ end
48
+
49
+ def each
50
+ raise InvalidNumberOfTasks, "expected: #{@size}, actual: #{@count}" if @count != @size
51
+
52
+ @size.times do
53
+ task = @queue.pop
54
+ yield(task.id, task.result)
55
+ end
56
+
57
+ nil
58
+ end
59
+
60
+ def close
61
+ @queue.clear
62
+ @queue.close if @queue.respond_to?(:close)
63
+ @count = 0
64
+ nil
65
+ end
66
+
67
+ def inspect
68
+ "#<#{self.class.name} size: #{@count}, max: #{@size}, worker: #{@worker.class.name}>"
69
+ end
70
+ end
71
+
72
+ module_function
73
+
74
+ def create(model: :on_demand, size: 5)
75
+ size = size.positive? ? size : 5
76
+
77
+ case model
78
+ when :on_demand, nil then ::RedisClient::Cluster::ConcurrentWorker::OnDemand.new(size: size)
79
+ when :pooled then ::RedisClient::Cluster::ConcurrentWorker::Pooled.new(size: size)
80
+ when :none then ::RedisClient::Cluster::ConcurrentWorker::None.new
81
+ else raise ArgumentError, "Unknown model: #{model}"
82
+ end
83
+ end
84
+ end
85
+ end
86
+ end
@@ -10,14 +10,14 @@ class RedisClient
10
10
 
11
11
  attr_reader :replica_clients
12
12
 
13
- DUMMY_LATENCY_NSEC = 100 * 1000 * 1000 * 1000
13
+ DUMMY_LATENCY_MSEC = 100 * 1000 * 1000
14
14
  MEASURE_ATTEMPT_COUNT = 10
15
15
 
16
- def initialize(replications, options, pool, **kwargs)
16
+ def initialize(replications, options, pool, concurrent_worker, **kwargs)
17
17
  super
18
18
 
19
19
  all_replica_clients = @clients.select { |k, _| @replica_node_keys.include?(k) }
20
- latencies = measure_latencies(all_replica_clients)
20
+ latencies = measure_latencies(all_replica_clients, concurrent_worker)
21
21
  @replications.each_value { |keys| keys.sort_by! { |k| latencies.fetch(k) } }
22
22
  @replica_clients = select_replica_clients(@replications, @clients)
23
23
  @clients_for_scanning = select_clients_for_scanning(@replications, @clients)
@@ -39,31 +39,35 @@ class RedisClient
39
39
 
40
40
  private
41
41
 
42
- def measure_latencies(clients) # rubocop:disable Metrics/AbcSize
43
- clients.each_slice(::RedisClient::Cluster::Node::MAX_THREADS).each_with_object({}) do |chuncked_clients, acc|
44
- threads = chuncked_clients.map do |k, v|
45
- Thread.new(k, v) do |node_key, client|
46
- Thread.current[:node_key] = node_key
47
-
48
- min = DUMMY_LATENCY_NSEC
49
- MEASURE_ATTEMPT_COUNT.times do
50
- starting = Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond)
51
- client.call_once('PING')
52
- duration = Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond) - starting
53
- min = duration if duration < min
54
- end
55
-
56
- Thread.current[:latency] = min
57
- rescue StandardError
58
- Thread.current[:latency] = DUMMY_LATENCY_NSEC
42
+ def measure_latencies(clients, concurrent_worker) # rubocop:disable Metrics/AbcSize
43
+ return {} if clients.empty?
44
+
45
+ work_group = concurrent_worker.new_group(size: clients.size)
46
+
47
+ clients.each do |node_key, client|
48
+ work_group.push(node_key, client) do |cli|
49
+ min = DUMMY_LATENCY_MSEC
50
+ MEASURE_ATTEMPT_COUNT.times do
51
+ starting = obtain_current_time
52
+ cli.call_once('PING')
53
+ duration = obtain_current_time - starting
54
+ min = duration if duration < min
59
55
  end
60
- end
61
56
 
62
- threads.each do |t|
63
- t.join
64
- acc[t[:node_key]] = t[:latency]
57
+ min
58
+ rescue StandardError
59
+ DUMMY_LATENCY_MSEC
65
60
  end
66
61
  end
62
+
63
+ latencies = {}
64
+ work_group.each { |node_key, v| latencies[node_key] = v }
65
+ work_group.close
66
+ latencies
67
+ end
68
+
69
+ def obtain_current_time
70
+ Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond)
67
71
  end
68
72
 
69
73
  def select_replica_clients(replications, clients)
@@ -6,7 +6,7 @@ class RedisClient
6
6
  class PrimaryOnly
7
7
  attr_reader :clients
8
8
 
9
- def initialize(replications, options, pool, **kwargs)
9
+ def initialize(replications, options, pool, _concurrent_worker, **kwargs)
10
10
  @primary_node_keys = replications.keys.sort
11
11
  @clients = build_clients(@primary_node_keys, options, pool, **kwargs)
12
12
  end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'redis_client/cluster/node/replica_mixin'
4
+
5
+ class RedisClient
6
+ class Cluster
7
+ class Node
8
+ class RandomReplicaOrPrimary
9
+ include ::RedisClient::Cluster::Node::ReplicaMixin
10
+
11
+ def replica_clients
12
+ keys = @replications.values.filter_map(&:sample)
13
+ @clients.select { |k, _| keys.include?(k) }
14
+ end
15
+
16
+ def clients_for_scanning(seed: nil)
17
+ random = seed.nil? ? Random : Random.new(seed)
18
+ keys = @replications.map do |primary_node_key, replica_node_keys|
19
+ decide_use_primary?(random, replica_node_keys.size) ? primary_node_key : replica_node_keys.sample(random: random)
20
+ end
21
+
22
+ clients.select { |k, _| keys.include?(k) }
23
+ end
24
+
25
+ def find_node_key_of_replica(primary_node_key, seed: nil)
26
+ random = seed.nil? ? Random : Random.new(seed)
27
+
28
+ replica_node_keys = @replications.fetch(primary_node_key, EMPTY_ARRAY)
29
+ if decide_use_primary?(random, replica_node_keys.size)
30
+ primary_node_key
31
+ else
32
+ replica_node_keys.sample(random: random) || primary_node_key
33
+ end
34
+ end
35
+
36
+ def any_replica_node_key(seed: nil)
37
+ random = seed.nil? ? Random : Random.new(seed)
38
+ @replica_node_keys.sample(random: random) || any_primary_node_key(seed: seed)
39
+ end
40
+
41
+ private
42
+
43
+ # Randomly equally likely choose node to read between primary and all replicas
44
+ # e.g. 1 primary + 1 replica = 50% probability to read from primary
45
+ # e.g. 1 primary + 2 replica = 33% probability to read from primary
46
+ # e.g. 1 primary + 0 replica = 100% probability to read from primary
47
+ def decide_use_primary?(random, replica_nodes)
48
+ primary_nodes = 1.0
49
+ total = primary_nodes + replica_nodes
50
+ random.rand < primary_nodes / total
51
+ end
52
+ end
53
+ end
54
+ end
55
+ end
@@ -8,7 +8,7 @@ class RedisClient
8
8
 
9
9
  EMPTY_ARRAY = [].freeze
10
10
 
11
- def initialize(replications, options, pool, **kwargs)
11
+ def initialize(replications, options, pool, _concurrent_worker, **kwargs)
12
12
  @replications = replications
13
13
  @primary_node_keys = @replications.keys.sort
14
14
  @replica_node_keys = @replications.values.flatten.sort
@@ -24,12 +24,12 @@ class RedisClient
24
24
  private
25
25
 
26
26
  def build_clients(primary_node_keys, options, pool, **kwargs)
27
- options.filter_map do |node_key, option|
27
+ options.to_h do |node_key, option|
28
28
  option = option.merge(kwargs.reject { |k, _| ::RedisClient::Cluster::Node::IGNORE_GENERIC_CONFIG_KEYS.include?(k) })
29
29
  config = ::RedisClient::Cluster::Node::Config.new(scale_read: !primary_node_keys.include?(node_key), **option)
30
30
  client = pool.nil? ? config.new_client : config.new_pool(**pool)
31
31
  [node_key, client]
32
- end.to_h
32
+ end
33
33
  end
34
34
  end
35
35
  end
@@ -5,6 +5,7 @@ require 'redis_client/config'
5
5
  require 'redis_client/cluster/errors'
6
6
  require 'redis_client/cluster/node/primary_only'
7
7
  require 'redis_client/cluster/node/random_replica'
8
+ require 'redis_client/cluster/node/random_replica_or_primary'
8
9
  require 'redis_client/cluster/node/latency_replica'
9
10
 
10
11
  class RedisClient
@@ -12,11 +13,18 @@ class RedisClient
12
13
  class Node
13
14
  include Enumerable
14
15
 
16
+ # It affects to strike a balance between load and stability in initialization or changed states.
17
+ MAX_STARTUP_SAMPLE = Integer(ENV.fetch('REDIS_CLIENT_MAX_STARTUP_SAMPLE', 3))
18
+
19
+ # It's used with slow queries of fetching meta data like CLUSTER NODES, COMMAND and so on.
20
+ SLOW_COMMAND_TIMEOUT = Float(ENV.fetch('REDIS_CLIENT_SLOW_COMMAND_TIMEOUT', -1))
21
+
22
+ # less memory consumption, but slow
23
+ USE_CHAR_ARRAY_SLOT = Integer(ENV.fetch('REDIS_CLIENT_USE_CHAR_ARRAY_SLOT', 1)) == 1
24
+
15
25
  SLOT_SIZE = 16_384
16
26
  MIN_SLOT = 0
17
27
  MAX_SLOT = SLOT_SIZE - 1
18
- MAX_STARTUP_SAMPLE = 37
19
- MAX_THREADS = Integer(ENV.fetch('REDIS_CLIENT_MAX_THREADS', 5))
20
28
  IGNORE_GENERIC_CONFIG_KEYS = %i[url host port path].freeze
21
29
  DEAD_FLAGS = %w[fail? fail handshake noaddr noflags].freeze
22
30
  ROLE_FLAGS = %w[master slave].freeze
@@ -88,36 +96,43 @@ class RedisClient
88
96
  end
89
97
 
90
98
  class << self
91
- def load_info(options, **kwargs) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
99
+ def load_info(options, concurrent_worker, **kwargs) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
100
+ raise ::RedisClient::Cluster::InitialSetupError, [] if options.nil? || options.empty?
101
+
92
102
  startup_size = options.size > MAX_STARTUP_SAMPLE ? MAX_STARTUP_SAMPLE : options.size
93
- node_info_list = errors = nil
94
- startup_options = options.to_a.sample(MAX_STARTUP_SAMPLE).to_h
95
- startup_nodes = ::RedisClient::Cluster::Node.new(startup_options, **kwargs)
96
- startup_nodes.each_slice(MAX_THREADS).with_index do |chuncked_startup_nodes, chuncked_idx|
97
- threads = chuncked_startup_nodes.each_with_index.map do |raw_client, idx|
98
- Thread.new(raw_client, (MAX_THREADS * chuncked_idx) + idx) do |cli, i|
99
- Thread.current[:index] = i
100
- reply = cli.call('CLUSTER', 'NODES')
101
- Thread.current[:info] = parse_cluster_node_reply(reply)
102
- rescue StandardError => e
103
- Thread.current[:error] = e
104
- ensure
105
- cli&.close
106
- end
103
+ startup_options = options.to_a.sample(startup_size).to_h
104
+ startup_nodes = ::RedisClient::Cluster::Node.new(startup_options, concurrent_worker, **kwargs)
105
+ work_group = concurrent_worker.new_group(size: startup_size)
106
+
107
+ startup_nodes.each_with_index do |raw_client, i|
108
+ work_group.push(i, raw_client) do |client|
109
+ regular_timeout = client.read_timeout
110
+ client.read_timeout = SLOW_COMMAND_TIMEOUT > 0.0 ? SLOW_COMMAND_TIMEOUT : regular_timeout
111
+ reply = client.call('CLUSTER', 'NODES')
112
+ client.read_timeout = regular_timeout
113
+ parse_cluster_node_reply(reply)
114
+ rescue StandardError => e
115
+ e
116
+ ensure
117
+ client&.close
107
118
  end
119
+ end
120
+
121
+ node_info_list = errors = nil
108
122
 
109
- threads.each do |t|
110
- t.join
111
- if t.key?(:info)
112
- node_info_list ||= Array.new(startup_size)
113
- node_info_list[t[:index]] = t[:info]
114
- elsif t.key?(:error)
115
- errors ||= Array.new(startup_size)
116
- errors[t[:index]] = t[:error]
117
- end
123
+ work_group.each do |i, v|
124
+ case v
125
+ when StandardError
126
+ errors ||= Array.new(startup_size)
127
+ errors[i] = v
128
+ else
129
+ node_info_list ||= Array.new(startup_size)
130
+ node_info_list[i] = v
118
131
  end
119
132
  end
120
133
 
134
+ work_group.close
135
+
121
136
  raise ::RedisClient::Cluster::InitialSetupError, errors if node_info_list.nil?
122
137
 
123
138
  grouped = node_info_list.compact.group_by do |info_list|
@@ -180,6 +195,7 @@ class RedisClient
180
195
 
181
196
  def initialize(
182
197
  options,
198
+ concurrent_worker,
183
199
  node_info_list: [],
184
200
  with_replica: false,
185
201
  replica_affinity: :random,
@@ -187,9 +203,11 @@ class RedisClient
187
203
  **kwargs
188
204
  )
189
205
 
206
+ @concurrent_worker = concurrent_worker
190
207
  @slots = build_slot_node_mappings(node_info_list)
191
208
  @replications = build_replication_mappings(node_info_list)
192
- @topology = make_topology_class(with_replica, replica_affinity).new(@replications, options, pool, **kwargs)
209
+ klass = make_topology_class(with_replica, replica_affinity)
210
+ @topology = klass.new(@replications, options, pool, @concurrent_worker, **kwargs)
193
211
  @mutex = Mutex.new
194
212
  end
195
213
 
@@ -240,6 +258,10 @@ class RedisClient
240
258
  @topology.clients_for_scanning(seed: seed).values.sort_by { |c| "#{c.config.host}-#{c.config.port}" }
241
259
  end
242
260
 
261
+ def replica_clients
262
+ @topology.replica_clients.values
263
+ end
264
+
243
265
  def find_node_key_of_primary(slot)
244
266
  return if slot.nil?
245
267
 
@@ -278,6 +300,8 @@ class RedisClient
278
300
  def make_topology_class(with_replica, replica_affinity)
279
301
  if with_replica && replica_affinity == :random
280
302
  ::RedisClient::Cluster::Node::RandomReplica
303
+ elsif with_replica && replica_affinity == :random_with_primary
304
+ ::RedisClient::Cluster::Node::RandomReplicaOrPrimary
281
305
  elsif with_replica && replica_affinity == :latency
282
306
  ::RedisClient::Cluster::Node::LatencyReplica
283
307
  else
@@ -297,7 +321,7 @@ class RedisClient
297
321
  end
298
322
 
299
323
  def make_array_for_slot_node_mappings(node_info_list)
300
- return Array.new(SLOT_SIZE) if node_info_list.count(&:primary?) > 256
324
+ return Array.new(SLOT_SIZE) if !USE_CHAR_ARRAY_SLOT || node_info_list.count(&:primary?) > 256
301
325
 
302
326
  primary_node_keys = node_info_list.select(&:primary?).map(&:node_key)
303
327
  ::RedisClient::Cluster::Node::CharArray.new(SLOT_SIZE, primary_node_keys)
@@ -327,31 +351,34 @@ class RedisClient
327
351
  raise ::RedisClient::Cluster::ErrorCollection, errors
328
352
  end
329
353
 
330
- def try_map(clients) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
331
- results = errors = nil
332
- clients.each_slice(MAX_THREADS) do |chuncked_clients|
333
- threads = chuncked_clients.map do |k, v|
334
- Thread.new(k, v) do |node_key, client|
335
- Thread.current[:node_key] = node_key
336
- reply = yield(node_key, client)
337
- Thread.current[:result] = reply
338
- rescue StandardError => e
339
- Thread.current[:error] = e
340
- end
354
+ def try_map(clients, &block) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity
355
+ return [{}, {}] if clients.empty?
356
+
357
+ work_group = @concurrent_worker.new_group(size: clients.size)
358
+
359
+ clients.each do |node_key, client|
360
+ work_group.push(node_key, node_key, client, block) do |nk, cli, blk|
361
+ blk.call(nk, cli)
362
+ rescue StandardError => e
363
+ e
341
364
  end
365
+ end
342
366
 
343
- threads.each do |t|
344
- t.join
345
- if t.key?(:result)
346
- results ||= {}
347
- results[t[:node_key]] = t[:result]
348
- elsif t.key?(:error)
349
- errors ||= {}
350
- errors[t[:node_key]] = t[:error]
351
- end
367
+ results = errors = nil
368
+
369
+ work_group.each do |node_key, v|
370
+ case v
371
+ when StandardError
372
+ errors ||= {}
373
+ errors[node_key] = v
374
+ else
375
+ results ||= {}
376
+ results[node_key] = v
352
377
  end
353
378
  end
354
379
 
380
+ work_group.close
381
+
355
382
  [results, errors]
356
383
  end
357
384
  end
@@ -95,11 +95,10 @@ class RedisClient
95
95
  attr_accessor :replies, :indices
96
96
  end
97
97
 
98
- MAX_THREADS = Integer(ENV.fetch('REDIS_CLIENT_MAX_THREADS', 5))
99
-
100
- def initialize(router, command_builder, seed: Random.new_seed)
98
+ def initialize(router, command_builder, concurrent_worker, seed: Random.new_seed)
101
99
  @router = router
102
100
  @command_builder = command_builder
101
+ @concurrent_worker = concurrent_worker
103
102
  @seed = seed
104
103
  @pipelines = nil
105
104
  @size = 0
@@ -146,42 +145,39 @@ class RedisClient
146
145
  end
147
146
 
148
147
  def execute # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
149
- all_replies = errors = nil
150
- @pipelines&.each_slice(MAX_THREADS) do |chuncked_pipelines|
151
- threads = chuncked_pipelines.map do |node_key, pipeline|
152
- Thread.new(node_key, pipeline) do |nk, pl|
153
- Thread.current[:node_key] = nk
154
- replies = do_pipelining(@router.find_node(nk), pl)
155
- raise ReplySizeError, "commands: #{pl._size}, replies: #{replies.size}" if pl._size != replies.size
156
-
157
- Thread.current[:replies] = replies
158
- rescue ::RedisClient::Cluster::Pipeline::RedirectionNeeded => e
159
- Thread.current[:redirection_needed] = e
160
- rescue StandardError => e
161
- Thread.current[:error] = e
162
- end
148
+ return if @pipelines.nil? || @pipelines.empty?
149
+
150
+ work_group = @concurrent_worker.new_group(size: @pipelines.size)
151
+
152
+ @pipelines.each do |node_key, pipeline|
153
+ work_group.push(node_key, @router.find_node(node_key), pipeline) do |cli, pl|
154
+ replies = do_pipelining(cli, pl)
155
+ raise ReplySizeError, "commands: #{pl._size}, replies: #{replies.size}" if pl._size != replies.size
156
+
157
+ replies
163
158
  end
159
+ end
164
160
 
165
- threads.each(&:join)
166
- threads.each do |t|
167
- if t.key?(:replies)
168
- all_replies ||= Array.new(@size)
169
- @pipelines[t[:node_key]]
170
- .outer_indices
171
- .each_with_index { |outer, inner| all_replies[outer] = t[:replies][inner] }
172
- elsif t.key?(:redirection_needed)
173
- all_replies ||= Array.new(@size)
174
- pipeline = @pipelines[t[:node_key]]
175
- err = t[:redirection_needed]
176
- err.indices.each { |i| err.replies[i] = handle_redirection(err.replies[i], pipeline, i) }
177
- pipeline.outer_indices.each_with_index { |outer, inner| all_replies[outer] = err.replies[inner] }
178
- elsif t.key?(:error)
179
- errors ||= {}
180
- errors[t[:node_key]] = t[:error]
181
- end
161
+ all_replies = errors = nil
162
+
163
+ work_group.each do |node_key, v|
164
+ case v
165
+ when ::RedisClient::Cluster::Pipeline::RedirectionNeeded
166
+ all_replies ||= Array.new(@size)
167
+ pipeline = @pipelines[node_key]
168
+ v.indices.each { |i| v.replies[i] = handle_redirection(v.replies[i], pipeline, i) }
169
+ pipeline.outer_indices.each_with_index { |outer, inner| all_replies[outer] = v.replies[inner] }
170
+ when StandardError
171
+ errors ||= {}
172
+ errors[node_key] = v
173
+ else
174
+ all_replies ||= Array.new(@size)
175
+ @pipelines[node_key].outer_indices.each_with_index { |outer, inner| all_replies[outer] = v[inner] }
182
176
  end
183
177
  end
184
178
 
179
+ work_group.close
180
+
185
181
  raise ::RedisClient::Cluster::ErrorCollection, errors unless errors.nil?
186
182
 
187
183
  all_replies
@@ -1,35 +1,143 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require 'redis_client'
4
+ require 'redis_client/cluster/normalized_cmd_name'
5
+
3
6
  class RedisClient
4
7
  class Cluster
5
8
  class PubSub
9
+ class State
10
+ def initialize(client, queue)
11
+ @client = client
12
+ @worker = nil
13
+ @queue = queue
14
+ end
15
+
16
+ def call(command)
17
+ @client.call_v(command)
18
+ end
19
+
20
+ def ensure_worker
21
+ @worker = spawn_worker(@client, @queue) unless @worker&.alive?
22
+ end
23
+
24
+ def close
25
+ @worker.exit if @worker&.alive?
26
+ @client.close
27
+ end
28
+
29
+ private
30
+
31
+ def spawn_worker(client, queue)
32
+ # Ruby VM allocates 1 MB memory as a stack for a thread.
33
+ # It is a fixed size but we can modify the size with some environment variables.
34
+ # So it consumes memory 1 MB multiplied a number of workers.
35
+ Thread.new(client, queue) do |pubsub, q|
36
+ loop do
37
+ q << pubsub.next_event
38
+ rescue StandardError => e
39
+ q << e
40
+ end
41
+ end
42
+ end
43
+ end
44
+
45
+ BUF_SIZE = Integer(ENV.fetch('REDIS_CLIENT_PUBSUB_BUF_SIZE', 1024))
46
+
6
47
  def initialize(router, command_builder)
7
48
  @router = router
8
49
  @command_builder = command_builder
9
- @pubsub = nil
50
+ @queue = SizedQueue.new(BUF_SIZE)
51
+ @state_dict = {}
10
52
  end
11
53
 
12
54
  def call(*args, **kwargs)
13
- close
14
- command = @command_builder.generate(args, kwargs)
15
- @pubsub = @router.assign_node(command).pubsub
16
- @pubsub.call_v(command)
55
+ _call(@command_builder.generate(args, kwargs))
56
+ nil
17
57
  end
18
58
 
19
59
  def call_v(command)
20
- close
21
- command = @command_builder.generate(command)
22
- @pubsub = @router.assign_node(command).pubsub
23
- @pubsub.call_v(command)
60
+ _call(@command_builder.generate(command))
61
+ nil
24
62
  end
25
63
 
26
64
  def close
27
- @pubsub&.close
28
- @pubsub = nil
65
+ @state_dict.each_value(&:close)
66
+ @state_dict.clear
67
+ @queue.clear
68
+ @queue.close
69
+ nil
29
70
  end
30
71
 
31
72
  def next_event(timeout = nil)
32
- @pubsub&.next_event(timeout)
73
+ @state_dict.each_value(&:ensure_worker)
74
+ max_duration = calc_max_duration(timeout)
75
+ starting = obtain_current_time
76
+
77
+ loop do
78
+ break if max_duration > 0 && obtain_current_time - starting > max_duration
79
+
80
+ case event = @queue.pop(true)
81
+ when StandardError then raise event
82
+ when Array then break event
83
+ end
84
+ rescue ThreadError
85
+ sleep 0.005
86
+ end
87
+ end
88
+
89
+ private
90
+
91
+ def _call(command)
92
+ case ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_command(command)
93
+ when 'subscribe', 'psubscribe', 'ssubscribe' then call_to_single_state(command)
94
+ when 'unsubscribe', 'punsubscribe' then call_to_all_states(command)
95
+ when 'sunsubscribe' then call_for_sharded_states(command)
96
+ else call_to_single_state(command)
97
+ end
98
+ end
99
+
100
+ def call_to_single_state(command)
101
+ node_key = @router.find_node_key(command)
102
+ try_call(node_key, command)
103
+ end
104
+
105
+ def call_to_all_states(command)
106
+ @state_dict.each_value { |s| s.call(command) }
107
+ end
108
+
109
+ def call_for_sharded_states(command)
110
+ if command.size == 1
111
+ call_to_all_states(command)
112
+ else
113
+ call_to_single_state(command)
114
+ end
115
+ end
116
+
117
+ def try_call(node_key, command, retry_count: 1)
118
+ add_state(node_key).call(command)
119
+ rescue ::RedisClient::CommandError => e
120
+ raise if !e.message.start_with?('MOVED') || retry_count <= 0
121
+
122
+ # for sharded pub/sub
123
+ node_key = e.message.split[2]
124
+ retry_count -= 1
125
+ retry
126
+ end
127
+
128
+ def add_state(node_key)
129
+ return @state_dict[node_key] if @state_dict.key?(node_key)
130
+
131
+ state = State.new(@router.find_node(node_key).pubsub, @queue)
132
+ @state_dict[node_key] = state
133
+ end
134
+
135
+ def obtain_current_time
136
+ Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond)
137
+ end
138
+
139
+ def calc_max_duration(timeout)
140
+ timeout.nil? || timeout < 0 ? 0 : timeout * 1_000_000
33
141
  end
34
142
  end
35
143
  end
@@ -16,14 +16,13 @@ class RedisClient
16
16
  METHODS_FOR_BLOCKING_CMD = %i[blocking_call_v blocking_call].freeze
17
17
  TSF = ->(f, x) { f.nil? ? x : f.call(x) }.curry
18
18
 
19
- attr_reader :node
20
-
21
- def initialize(config, pool: nil, **kwargs)
19
+ def initialize(config, concurrent_worker, pool: nil, **kwargs)
22
20
  @config = config.dup
21
+ @concurrent_worker = concurrent_worker
23
22
  @pool = pool
24
23
  @client_kwargs = kwargs
25
- @node = fetch_cluster_info(@config, pool: @pool, **@client_kwargs)
26
- @command = ::RedisClient::Cluster::Command.load(@node)
24
+ @node = fetch_cluster_info(@config, @concurrent_worker, pool: @pool, **@client_kwargs)
25
+ @command = ::RedisClient::Cluster::Command.load(@node.replica_clients.shuffle)
27
26
  @mutex = Mutex.new
28
27
  @command_builder = @config.command_builder
29
28
  end
@@ -180,6 +179,12 @@ class RedisClient
180
179
  end
181
180
  end
182
181
 
182
+ def find_primary_node_key(command)
183
+ key = @command.extract_first_key(command)
184
+ slot = key.empty? ? nil : ::RedisClient::Cluster::KeySlotConverter.convert(key)
185
+ @node.find_node_key_of_primary(slot)
186
+ end
187
+
183
188
  def find_node(node_key, retry_count: 3)
184
189
  @node.find_by(node_key)
185
190
  rescue ::RedisClient::Cluster::Node::ReloadNeeded
@@ -206,6 +211,14 @@ class RedisClient
206
211
  find_node(node_key)
207
212
  end
208
213
 
214
+ def node_keys
215
+ @node.node_keys
216
+ end
217
+
218
+ def close
219
+ @node.each(&:close)
220
+ end
221
+
209
222
  private
210
223
 
211
224
  def send_wait_command(method, command, args, retry_count: 3, &block) # rubocop:disable Metrics/AbcSize
@@ -275,21 +288,29 @@ class RedisClient
275
288
 
276
289
  def send_pubsub_command(method, command, args, &block) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
277
290
  case ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_subcommand(command)
278
- when 'channels' then @node.call_all(method, command, args).flatten.uniq.sort_by(&:to_s).then(&TSF.call(block))
291
+ when 'channels'
292
+ @node.call_all(method, command, args).flatten.uniq.sort_by(&:to_s).then(&TSF.call(block))
293
+ when 'shardchannels'
294
+ @node.call_replicas(method, command, args).flatten.uniq.sort_by(&:to_s).then(&TSF.call(block))
295
+ when 'numpat'
296
+ @node.call_all(method, command, args).select { |e| e.is_a?(Integer) }.sum.then(&TSF.call(block))
279
297
  when 'numsub'
280
298
  @node.call_all(method, command, args).reject(&:empty?).map { |e| Hash[*e] }
281
299
  .reduce({}) { |a, e| a.merge(e) { |_, v1, v2| v1 + v2 } }.then(&TSF.call(block))
282
- when 'numpat' then @node.call_all(method, command, args).select { |e| e.is_a?(Integer) }.sum.then(&TSF.call(block))
300
+ when 'shardnumsub'
301
+ @node.call_replicas(method, command, args).reject(&:empty?).map { |e| Hash[*e] }
302
+ .reduce({}) { |a, e| a.merge(e) { |_, v1, v2| v1 + v2 } }.then(&TSF.call(block))
283
303
  else assign_node(command).public_send(method, *args, command, &block)
284
304
  end
285
305
  end
286
306
 
287
- def fetch_cluster_info(config, pool: nil, **kwargs)
288
- node_info_list = ::RedisClient::Cluster::Node.load_info(config.per_node_key, **kwargs)
307
+ def fetch_cluster_info(config, concurrent_worker, pool: nil, **kwargs)
308
+ node_info_list = ::RedisClient::Cluster::Node.load_info(config.per_node_key, concurrent_worker, **kwargs)
289
309
  node_addrs = node_info_list.map { |i| ::RedisClient::Cluster::NodeKey.hashify(i.node_key) }
290
310
  config.update_node(node_addrs)
291
311
  ::RedisClient::Cluster::Node.new(
292
312
  config.per_node_key,
313
+ concurrent_worker,
293
314
  node_info_list: node_info_list,
294
315
  pool: pool,
295
316
  with_replica: config.use_replica?,
@@ -308,7 +329,7 @@ class RedisClient
308
329
  # ignore
309
330
  end
310
331
 
311
- @node = fetch_cluster_info(@config, pool: @pool, **@client_kwargs)
332
+ @node = fetch_cluster_info(@config, @concurrent_worker, pool: @pool, **@client_kwargs)
312
333
  end
313
334
  end
314
335
  end
@@ -0,0 +1,57 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'redis_client'
4
+
5
+ class RedisClient
6
+ class Cluster
7
+ class Transaction
8
+ ConsistencyError = Class.new(::RedisClient::Error)
9
+
10
+ def initialize(router, command_builder)
11
+ @router = router
12
+ @command_builder = command_builder
13
+ @node_key = nil
14
+ end
15
+
16
+ def call(*command, **kwargs, &_)
17
+ command = @command_builder.generate(command, kwargs)
18
+ ensure_node_key(command)
19
+ end
20
+
21
+ def call_v(command, &_)
22
+ command = @command_builder.generate(command)
23
+ ensure_node_key(command)
24
+ end
25
+
26
+ def call_once(*command, **kwargs, &_)
27
+ command = @command_builder.generate(command, kwargs)
28
+ ensure_node_key(command)
29
+ end
30
+
31
+ def call_once_v(command, &_)
32
+ command = @command_builder.generate(command)
33
+ ensure_node_key(command)
34
+ end
35
+
36
+ def execute(watch: nil, &block)
37
+ yield self
38
+ raise ArgumentError, 'empty transaction' if @node_key.nil?
39
+
40
+ node = @router.find_node(@node_key)
41
+ @router.try_delegate(node, :multi, watch: watch, &block)
42
+ end
43
+
44
+ private
45
+
46
+ def ensure_node_key(command)
47
+ node_key = @router.find_primary_node_key(command)
48
+ raise ConsistencyError, "Client couldn't determine the node to be executed the transaction by: #{command}" if node_key.nil?
49
+
50
+ @node_key ||= node_key
51
+ raise ConsistencyError, "The transaction should be done for single node: #{@node_key}, #{node_key}" if node_key != @node_key
52
+
53
+ nil
54
+ end
55
+ end
56
+ end
57
+ end
@@ -1,8 +1,10 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require 'redis_client/cluster/concurrent_worker'
3
4
  require 'redis_client/cluster/pipeline'
4
5
  require 'redis_client/cluster/pub_sub'
5
6
  require 'redis_client/cluster/router'
7
+ require 'redis_client/cluster/transaction'
6
8
 
7
9
  class RedisClient
8
10
  class Cluster
@@ -10,14 +12,15 @@ class RedisClient
10
12
 
11
13
  attr_reader :config
12
14
 
13
- def initialize(config, pool: nil, **kwargs)
15
+ def initialize(config, pool: nil, concurrency: nil, **kwargs)
14
16
  @config = config
15
- @router = ::RedisClient::Cluster::Router.new(config, pool: pool, **kwargs)
17
+ @concurrent_worker = ::RedisClient::Cluster::ConcurrentWorker.create(**(concurrency || {}))
18
+ @router = ::RedisClient::Cluster::Router.new(config, @concurrent_worker, pool: pool, **kwargs)
16
19
  @command_builder = config.command_builder
17
20
  end
18
21
 
19
22
  def inspect
20
- "#<#{self.class.name} #{@router.node.node_keys.join(', ')}>"
23
+ "#<#{self.class.name} #{@router.node_keys.join(', ')}>"
21
24
  end
22
25
 
23
26
  def call(*args, **kwargs, &block)
@@ -79,19 +82,24 @@ class RedisClient
79
82
 
80
83
  def pipelined
81
84
  seed = @config.use_replica? && @config.replica_affinity == :random ? nil : Random.new_seed
82
- pipeline = ::RedisClient::Cluster::Pipeline.new(@router, @command_builder, seed: seed)
85
+ pipeline = ::RedisClient::Cluster::Pipeline.new(@router, @command_builder, @concurrent_worker, seed: seed)
83
86
  yield pipeline
84
87
  return [] if pipeline.empty?
85
88
 
86
89
  pipeline.execute
87
90
  end
88
91
 
92
+ def multi(watch: nil, &block)
93
+ ::RedisClient::Cluster::Transaction.new(@router, @command_builder).execute(watch: watch, &block)
94
+ end
95
+
89
96
  def pubsub
90
97
  ::RedisClient::Cluster::PubSub.new(@router, @command_builder)
91
98
  end
92
99
 
93
100
  def close
94
- @router.node.each(&:close)
101
+ @concurrent_worker.close
102
+ @router.close
95
103
  nil
96
104
  end
97
105
 
@@ -17,6 +17,7 @@ class RedisClient
17
17
  VALID_NODES_KEYS = %i[ssl username password host port db].freeze
18
18
  MERGE_CONFIG_KEYS = %i[ssl username password].freeze
19
19
  IGNORE_GENERIC_CONFIG_KEYS = %i[url host port path].freeze
20
+ MAX_WORKERS = Integer(ENV.fetch('REDIS_CLIENT_MAX_THREADS', 5))
20
21
 
21
22
  InvalidClientConfigError = Class.new(::RedisClient::Error)
22
23
 
@@ -27,7 +28,8 @@ class RedisClient
27
28
  replica: false,
28
29
  replica_affinity: :random,
29
30
  fixed_hostname: '',
30
- client_implementation: Cluster,
31
+ concurrency: nil,
32
+ client_implementation: ::RedisClient::Cluster, # for redis gem
31
33
  **client_config
32
34
  )
33
35
 
@@ -38,6 +40,7 @@ class RedisClient
38
40
  client_config = client_config.reject { |k, _| IGNORE_GENERIC_CONFIG_KEYS.include?(k) }
39
41
  @command_builder = client_config.fetch(:command_builder, ::RedisClient::CommandBuilder)
40
42
  @client_config = merge_generic_config(client_config, @node_configs)
43
+ @concurrency = merge_concurrency_option(concurrency)
41
44
  @client_implementation = client_implementation
42
45
  @mutex = Mutex.new
43
46
  end
@@ -48,6 +51,7 @@ class RedisClient
48
51
  replica: @replica,
49
52
  replica_affinity: @replica_affinity,
50
53
  fixed_hostname: @fixed_hostname,
54
+ concurrency: @concurrency,
51
55
  client_implementation: @client_implementation,
52
56
  **@client_config
53
57
  )
@@ -58,15 +62,20 @@ class RedisClient
58
62
  end
59
63
 
60
64
  def read_timeout
61
- @client_config[:read_timeout] || @client_config[:timeout] || RedisClient::Config::DEFAULT_TIMEOUT
65
+ @client_config[:read_timeout] || @client_config[:timeout] || ::RedisClient::Config::DEFAULT_TIMEOUT
62
66
  end
63
67
 
64
68
  def new_pool(size: 5, timeout: 5, **kwargs)
65
- @client_implementation.new(self, pool: { size: size, timeout: timeout }, **kwargs)
69
+ @client_implementation.new(
70
+ self,
71
+ pool: { size: size, timeout: timeout },
72
+ concurrency: @concurrency,
73
+ **kwargs
74
+ )
66
75
  end
67
76
 
68
77
  def new_client(**kwargs)
69
- @client_implementation.new(self, **kwargs)
78
+ @client_implementation.new(self, concurrency: @concurrency, **kwargs)
70
79
  end
71
80
 
72
81
  def per_node_key
@@ -96,6 +105,15 @@ class RedisClient
96
105
 
97
106
  private
98
107
 
108
+ def merge_concurrency_option(option)
109
+ case option
110
+ when Hash
111
+ option = option.transform_keys(&:to_sym)
112
+ { size: MAX_WORKERS }.merge(option)
113
+ else { size: MAX_WORKERS }
114
+ end
115
+ end
116
+
99
117
  def build_node_configs(addrs)
100
118
  configs = Array[addrs].flatten.filter_map { |addr| parse_node_addr(addr) }
101
119
  raise InvalidClientConfigError, '`nodes` option is empty' if configs.empty?
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: redis-cluster-client
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.3
4
+ version: 0.7.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Taishi Kasuga
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-03-08 00:00:00.000000000 Z
11
+ date: 2023-10-06 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: redis-client
@@ -34,18 +34,24 @@ files:
34
34
  - lib/redis-cluster-client.rb
35
35
  - lib/redis_client/cluster.rb
36
36
  - lib/redis_client/cluster/command.rb
37
+ - lib/redis_client/cluster/concurrent_worker.rb
38
+ - lib/redis_client/cluster/concurrent_worker/none.rb
39
+ - lib/redis_client/cluster/concurrent_worker/on_demand.rb
40
+ - lib/redis_client/cluster/concurrent_worker/pooled.rb
37
41
  - lib/redis_client/cluster/errors.rb
38
42
  - lib/redis_client/cluster/key_slot_converter.rb
39
43
  - lib/redis_client/cluster/node.rb
40
44
  - lib/redis_client/cluster/node/latency_replica.rb
41
45
  - lib/redis_client/cluster/node/primary_only.rb
42
46
  - lib/redis_client/cluster/node/random_replica.rb
47
+ - lib/redis_client/cluster/node/random_replica_or_primary.rb
43
48
  - lib/redis_client/cluster/node/replica_mixin.rb
44
49
  - lib/redis_client/cluster/node_key.rb
45
50
  - lib/redis_client/cluster/normalized_cmd_name.rb
46
51
  - lib/redis_client/cluster/pipeline.rb
47
52
  - lib/redis_client/cluster/pub_sub.rb
48
53
  - lib/redis_client/cluster/router.rb
54
+ - lib/redis_client/cluster/transaction.rb
49
55
  - lib/redis_client/cluster_config.rb
50
56
  - lib/redis_cluster_client.rb
51
57
  homepage: https://github.com/redis-rb/redis-cluster-client
@@ -69,7 +75,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
69
75
  - !ruby/object:Gem::Version
70
76
  version: '0'
71
77
  requirements: []
72
- rubygems_version: 3.4.6
78
+ rubygems_version: 3.4.19
73
79
  signing_key:
74
80
  specification_version: 4
75
81
  summary: A Redis cluster client for Ruby