redis-cluster-client 0.5.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ac7d6ca1474a42ad2d4027de72dbd087f6d248b3678ad2e360ed55b56dc60790
4
- data.tar.gz: ce4de83f3f601772df8c95381a65172245967bd9568ff7959aec5858e55fb7af
3
+ metadata.gz: aa2c8f0e32f5e18ea48cd9ceffa81fa084d8634481aa9362d1c5572231ef6491
4
+ data.tar.gz: ad6f608804f372a6488db63e0182d585c72954f9654590cacdcdc3d116602096
5
5
  SHA512:
6
- metadata.gz: 3cb2167fb97c6ab7ccba66e888521a1a119e2f17a0c373829bce0abb41518ead4005c0a7c176373c45942e9cd9fca71355b9898e7cc2b67db22d6291a76b324e
7
- data.tar.gz: 1da6192fcb6f33359b508ff4e3445ec029373ff9606f78170051a682a29371524d6f08864f4a0887b3cbc298e462d13fe8cc9080eb29bd9cf8d965c7a766409e
6
+ metadata.gz: 025a8f3d1359eb027cd07effc689f00bf466aee783563b2552a489f24dcdffeb88aa7863f7158af922f61f20987a93205865e7d46ab695b8aa1b194fba0af449
7
+ data.tar.gz: 67e46fc5427a5f29d4d90e65662591ddb1ca354f333831ac3f908a8b23fd5ef1d0bf30c11195d26391783b11f6ca56f8eb92ad70622da47da63b7520adffdc81
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ class RedisClient
4
+ class Cluster
5
+ module ConcurrentWorker
6
+ class None
7
+ def new_group(size:)
8
+ ::RedisClient::Cluster::ConcurrentWorker::Group.new(
9
+ worker: self,
10
+ queue: Array.new(size),
11
+ size: size
12
+ )
13
+ end
14
+
15
+ def push(task)
16
+ task.exec
17
+ end
18
+
19
+ def close; end
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ class RedisClient
4
+ class Cluster
5
+ module ConcurrentWorker
6
+ class OnDemand
7
+ def initialize(size:)
8
+ @q = SizedQueue.new(size)
9
+ end
10
+
11
+ def new_group(size:)
12
+ ::RedisClient::Cluster::ConcurrentWorker::Group.new(
13
+ worker: self,
14
+ queue: SizedQueue.new(size),
15
+ size: size
16
+ )
17
+ end
18
+
19
+ def push(task)
20
+ @q << spawn_worker(task, @q)
21
+ end
22
+
23
+ def close
24
+ @q.clear
25
+ @q.close
26
+ nil
27
+ end
28
+
29
+ private
30
+
31
+ def spawn_worker(task, queue)
32
+ Thread.new(task, queue) do |t, q|
33
+ t.exec
34
+ q.pop
35
+ end
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,66 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'redis_client/pid_cache'
4
+
5
+ class RedisClient
6
+ class Cluster
7
+ module ConcurrentWorker
8
+ # This class is just an experimental implementation.
9
+ # Ruby VM allocates 1 MB memory as a stack for a thread.
10
+ # It is a fixed size but we can modify the size with some environment variables.
11
+ # So it consumes memory 1 MB multiplied a number of workers.
12
+ class Pooled
13
+ def initialize(size:)
14
+ @size = size
15
+ setup
16
+ end
17
+
18
+ def new_group(size:)
19
+ reset if @pid != ::RedisClient::PIDCache.pid
20
+ ensure_workers if @workers.first.nil?
21
+ ::RedisClient::Cluster::ConcurrentWorker::Group.new(
22
+ worker: self,
23
+ queue: SizedQueue.new(size),
24
+ size: size
25
+ )
26
+ end
27
+
28
+ def push(task)
29
+ @q << task
30
+ end
31
+
32
+ def close
33
+ @q.clear
34
+ @workers.each { |t| t&.exit }
35
+ @workers.clear
36
+ @q.close
37
+ @pid = nil
38
+ nil
39
+ end
40
+
41
+ private
42
+
43
+ def setup
44
+ @q = Queue.new
45
+ @workers = Array.new(@size)
46
+ @pid = ::RedisClient::PIDCache.pid
47
+ end
48
+
49
+ def reset
50
+ close
51
+ setup
52
+ end
53
+
54
+ def ensure_workers
55
+ @workers.size.times do |i|
56
+ @workers[i] = spawn_worker unless @workers[i]&.alive?
57
+ end
58
+ end
59
+
60
+ def spawn_worker
61
+ Thread.new(@q) { |q| loop { q.pop.exec } }
62
+ end
63
+ end
64
+ end
65
+ end
66
+ end
@@ -0,0 +1,82 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'redis_client/cluster/concurrent_worker/on_demand'
4
+ require 'redis_client/cluster/concurrent_worker/pooled'
5
+ require 'redis_client/cluster/concurrent_worker/none'
6
+
7
+ class RedisClient
8
+ class Cluster
9
+ module ConcurrentWorker
10
+ InvalidNumberOfTasks = Class.new(::RedisClient::Error)
11
+
12
+ class Group
13
+ Task = Struct.new(
14
+ 'RedisClusterClientConcurrentWorkerTask',
15
+ :id, :queue, :args, :kwargs, :block, :result,
16
+ keyword_init: true
17
+ ) do
18
+ def exec
19
+ self[:result] = block&.call(*args, **kwargs)
20
+ rescue StandardError => e
21
+ self[:result] = e
22
+ ensure
23
+ done
24
+ end
25
+
26
+ def done
27
+ queue&.push(self)
28
+ rescue ClosedQueueError
29
+ # something was wrong
30
+ end
31
+ end
32
+
33
+ def initialize(worker:, queue:, size:)
34
+ @worker = worker
35
+ @queue = queue
36
+ @size = size
37
+ @count = 0
38
+ end
39
+
40
+ def push(id, *args, **kwargs, &block)
41
+ raise InvalidNumberOfTasks, "max size reached: #{@count}" if @count == @size
42
+
43
+ task = Task.new(id: id, queue: @queue, args: args, kwargs: kwargs, block: block)
44
+ @worker.push(task)
45
+ @count += 1
46
+ nil
47
+ end
48
+
49
+ def each
50
+ raise InvalidNumberOfTasks, "expected: #{@size}, actual: #{@count}" if @count != @size
51
+
52
+ @size.times do
53
+ task = @queue.pop
54
+ yield(task.id, task.result)
55
+ end
56
+
57
+ nil
58
+ end
59
+
60
+ def close
61
+ @queue.clear
62
+ @queue.close if @queue.respond_to?(:close)
63
+ @count = 0
64
+ nil
65
+ end
66
+ end
67
+
68
+ module_function
69
+
70
+ def create(model: :on_demand, size: 5)
71
+ size = size.positive? ? size : 5
72
+
73
+ case model
74
+ when :on_demand, nil then ::RedisClient::Cluster::ConcurrentWorker::OnDemand.new(size: size)
75
+ when :pooled then ::RedisClient::Cluster::ConcurrentWorker::Pooled.new(size: size)
76
+ when :none then ::RedisClient::Cluster::ConcurrentWorker::None.new
77
+ else raise ArgumentError, "Unknown model: #{model}"
78
+ end
79
+ end
80
+ end
81
+ end
82
+ end
@@ -13,11 +13,11 @@ class RedisClient
13
13
  DUMMY_LATENCY_MSEC = 100 * 1000 * 1000
14
14
  MEASURE_ATTEMPT_COUNT = 10
15
15
 
16
- def initialize(replications, options, pool, **kwargs)
16
+ def initialize(replications, options, pool, concurrent_worker, **kwargs)
17
17
  super
18
18
 
19
19
  all_replica_clients = @clients.select { |k, _| @replica_node_keys.include?(k) }
20
- latencies = measure_latencies(all_replica_clients)
20
+ latencies = measure_latencies(all_replica_clients, concurrent_worker)
21
21
  @replications.each_value { |keys| keys.sort_by! { |k| latencies.fetch(k) } }
22
22
  @replica_clients = select_replica_clients(@replications, @clients)
23
23
  @clients_for_scanning = select_clients_for_scanning(@replications, @clients)
@@ -39,28 +39,35 @@ class RedisClient
39
39
 
40
40
  private
41
41
 
42
- def measure_latencies(clients)
43
- clients.each_slice(::RedisClient::Cluster::Node::MAX_THREADS).each_with_object({}) do |chuncked_clients, acc|
44
- chuncked_clients
45
- .map { |node_key, client| [node_key, build_thread_for_measuring_latency(client)] }
46
- .each { |node_key, thread| acc[node_key] = thread.value }
47
- end
48
- end
42
+ def measure_latencies(clients, concurrent_worker) # rubocop:disable Metrics/AbcSize
43
+ return {} if clients.empty?
49
44
 
50
- def build_thread_for_measuring_latency(client)
51
- Thread.new(client) do |cli|
52
- min = DUMMY_LATENCY_MSEC
53
- MEASURE_ATTEMPT_COUNT.times do
54
- starting = Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond)
55
- cli.call_once('PING')
56
- duration = Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond) - starting
57
- min = duration if duration < min
58
- end
45
+ work_group = concurrent_worker.new_group(size: clients.size)
46
+
47
+ clients.each do |node_key, client|
48
+ work_group.push(node_key, client) do |cli|
49
+ min = DUMMY_LATENCY_MSEC
50
+ MEASURE_ATTEMPT_COUNT.times do
51
+ starting = obtain_current_time
52
+ cli.call_once('PING')
53
+ duration = obtain_current_time - starting
54
+ min = duration if duration < min
55
+ end
59
56
 
60
- min
61
- rescue StandardError
62
- DUMMY_LATENCY_MSEC
57
+ min
58
+ rescue StandardError
59
+ DUMMY_LATENCY_MSEC
60
+ end
63
61
  end
62
+
63
+ latencies = {}
64
+ work_group.each { |node_key, v| latencies[node_key] = v }
65
+ work_group.close
66
+ latencies
67
+ end
68
+
69
+ def obtain_current_time
70
+ Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond)
64
71
  end
65
72
 
66
73
  def select_replica_clients(replications, clients)
@@ -6,7 +6,7 @@ class RedisClient
6
6
  class PrimaryOnly
7
7
  attr_reader :clients
8
8
 
9
- def initialize(replications, options, pool, **kwargs)
9
+ def initialize(replications, options, pool, _concurrent_worker, **kwargs)
10
10
  @primary_node_keys = replications.keys.sort
11
11
  @clients = build_clients(@primary_node_keys, options, pool, **kwargs)
12
12
  end
@@ -8,7 +8,7 @@ class RedisClient
8
8
 
9
9
  EMPTY_ARRAY = [].freeze
10
10
 
11
- def initialize(replications, options, pool, **kwargs)
11
+ def initialize(replications, options, pool, _concurrent_worker, **kwargs)
12
12
  @replications = replications
13
13
  @primary_node_keys = @replications.keys.sort
14
14
  @replica_node_keys = @replications.values.flatten.sort
@@ -17,7 +17,6 @@ class RedisClient
17
17
  MIN_SLOT = 0
18
18
  MAX_SLOT = SLOT_SIZE - 1
19
19
  MAX_STARTUP_SAMPLE = Integer(ENV.fetch('REDIS_CLIENT_MAX_STARTUP_SAMPLE', 3))
20
- MAX_THREADS = Integer(ENV.fetch('REDIS_CLIENT_MAX_THREADS', 5))
21
20
  IGNORE_GENERIC_CONFIG_KEYS = %i[url host port path].freeze
22
21
  DEAD_FLAGS = %w[fail? fail handshake noaddr noflags].freeze
23
22
  ROLE_FLAGS = %w[master slave].freeze
@@ -89,27 +88,40 @@ class RedisClient
89
88
  end
90
89
 
91
90
  class << self
92
- def load_info(options, **kwargs) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
91
+ def load_info(options, concurrent_worker, **kwargs) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
92
+ raise ::RedisClient::Cluster::InitialSetupError, [] if options.nil? || options.empty?
93
+
93
94
  startup_size = options.size > MAX_STARTUP_SAMPLE ? MAX_STARTUP_SAMPLE : options.size
95
+ startup_options = options.to_a.sample(startup_size).to_h
96
+ startup_nodes = ::RedisClient::Cluster::Node.new(startup_options, concurrent_worker, **kwargs)
97
+ work_group = concurrent_worker.new_group(size: startup_size)
98
+
99
+ startup_nodes.each_with_index do |raw_client, i|
100
+ work_group.push(i, raw_client) do |client|
101
+ reply = client.call('CLUSTER', 'NODES')
102
+ parse_cluster_node_reply(reply)
103
+ rescue StandardError => e
104
+ e
105
+ ensure
106
+ client&.close
107
+ end
108
+ end
109
+
94
110
  node_info_list = errors = nil
95
- startup_options = options.to_a.sample(MAX_STARTUP_SAMPLE).to_h
96
- startup_nodes = ::RedisClient::Cluster::Node.new(startup_options, **kwargs)
97
- startup_nodes.each_slice(MAX_THREADS).with_index do |chuncked_startup_nodes, chuncked_idx|
98
- chuncked_startup_nodes
99
- .each_with_index
100
- .map { |raw_client, idx| [(MAX_THREADS * chuncked_idx) + idx, build_thread_for_cluster_node(raw_client)] }
101
- .each do |i, t|
102
- case v = t.value
103
- when StandardError
104
- errors ||= Array.new(startup_size)
105
- errors[i] = v
106
- else
107
- node_info_list ||= Array.new(startup_size)
108
- node_info_list[i] = v
109
- end
110
- end
111
+
112
+ work_group.each do |i, v|
113
+ case v
114
+ when StandardError
115
+ errors ||= Array.new(startup_size)
116
+ errors[i] = v
117
+ else
118
+ node_info_list ||= Array.new(startup_size)
119
+ node_info_list[i] = v
120
+ end
111
121
  end
112
122
 
123
+ work_group.close
124
+
113
125
  raise ::RedisClient::Cluster::InitialSetupError, errors if node_info_list.nil?
114
126
 
115
127
  grouped = node_info_list.compact.group_by do |info_list|
@@ -124,17 +136,6 @@ class RedisClient
124
136
 
125
137
  private
126
138
 
127
- def build_thread_for_cluster_node(raw_client)
128
- Thread.new(raw_client) do |client|
129
- reply = client.call('CLUSTER', 'NODES')
130
- parse_cluster_node_reply(reply)
131
- rescue StandardError => e
132
- e
133
- ensure
134
- client&.close
135
- end
136
- end
137
-
138
139
  # @see https://redis.io/commands/cluster-nodes/
139
140
  # @see https://github.com/redis/redis/blob/78960ad57b8a5e6af743d789ed8fd767e37d42b8/src/cluster.c#L4660-L4683
140
141
  def parse_cluster_node_reply(reply) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
@@ -183,6 +184,7 @@ class RedisClient
183
184
 
184
185
  def initialize(
185
186
  options,
187
+ concurrent_worker,
186
188
  node_info_list: [],
187
189
  with_replica: false,
188
190
  replica_affinity: :random,
@@ -190,9 +192,11 @@ class RedisClient
190
192
  **kwargs
191
193
  )
192
194
 
195
+ @concurrent_worker = concurrent_worker
193
196
  @slots = build_slot_node_mappings(node_info_list)
194
197
  @replications = build_replication_mappings(node_info_list)
195
- @topology = make_topology_class(with_replica, replica_affinity).new(@replications, options, pool, **kwargs)
198
+ klass = make_topology_class(with_replica, replica_affinity)
199
+ @topology = klass.new(@replications, options, pool, @concurrent_worker, **kwargs)
196
200
  @mutex = Mutex.new
197
201
  end
198
202
 
@@ -336,32 +340,35 @@ class RedisClient
336
340
  raise ::RedisClient::Cluster::ErrorCollection, errors
337
341
  end
338
342
 
339
- def try_map(clients, &block)
340
- results = errors = nil
341
- clients.each_slice(MAX_THREADS) do |chuncked_clients|
342
- chuncked_clients
343
- .map { |node_key, client| [node_key, build_thread_for_command(node_key, client, &block)] }
344
- .each do |node_key, thread|
345
- case v = thread.value
346
- when StandardError
347
- errors ||= {}
348
- errors[node_key] = v
349
- else
350
- results ||= {}
351
- results[node_key] = v
352
- end
353
- end
343
+ def try_map(clients, &block) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity
344
+ return [{}, {}] if clients.empty?
345
+
346
+ work_group = @concurrent_worker.new_group(size: clients.size)
347
+
348
+ clients.each do |node_key, client|
349
+ work_group.push(node_key, node_key, client, block) do |nk, cli, blk|
350
+ blk.call(nk, cli)
351
+ rescue StandardError => e
352
+ e
353
+ end
354
354
  end
355
355
 
356
- [results, errors]
357
- end
356
+ results = errors = nil
358
357
 
359
- def build_thread_for_command(node_key, client)
360
- Thread.new(node_key, client) do |nk, cli|
361
- yield(nk, cli)
362
- rescue StandardError => e
363
- e
358
+ work_group.each do |node_key, v|
359
+ case v
360
+ when StandardError
361
+ errors ||= {}
362
+ errors[node_key] = v
363
+ else
364
+ results ||= {}
365
+ results[node_key] = v
366
+ end
364
367
  end
368
+
369
+ work_group.close
370
+
371
+ [results, errors]
365
372
  end
366
373
  end
367
374
  end
@@ -95,11 +95,10 @@ class RedisClient
95
95
  attr_accessor :replies, :indices
96
96
  end
97
97
 
98
- MAX_THREADS = Integer(ENV.fetch('REDIS_CLIENT_MAX_THREADS', 5))
99
-
100
- def initialize(router, command_builder, seed: Random.new_seed)
98
+ def initialize(router, command_builder, concurrent_worker, seed: Random.new_seed)
101
99
  @router = router
102
100
  @command_builder = command_builder
101
+ @concurrent_worker = concurrent_worker
103
102
  @seed = seed
104
103
  @pipelines = nil
105
104
  @size = 0
@@ -146,27 +145,39 @@ class RedisClient
146
145
  end
147
146
 
148
147
  def execute # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
148
+ return if @pipelines.nil? || @pipelines.empty?
149
+
150
+ work_group = @concurrent_worker.new_group(size: @pipelines.size)
151
+
152
+ @pipelines.each do |node_key, pipeline|
153
+ work_group.push(node_key, @router.find_node(node_key), pipeline) do |cli, pl|
154
+ replies = do_pipelining(cli, pl)
155
+ raise ReplySizeError, "commands: #{pl._size}, replies: #{replies.size}" if pl._size != replies.size
156
+
157
+ replies
158
+ end
159
+ end
160
+
149
161
  all_replies = errors = nil
150
- @pipelines&.each_slice(MAX_THREADS) do |chuncked_pipelines|
151
- chuncked_pipelines
152
- .map { |node_key, pipeline| [node_key, build_thread_for_pipeline(@router.find_node(node_key), pipeline)] }
153
- .each do |node_key, thread|
154
- case v = thread.value
155
- when ::RedisClient::Cluster::Pipeline::RedirectionNeeded
156
- all_replies ||= Array.new(@size)
157
- pipeline = @pipelines[node_key]
158
- v.indices.each { |i| v.replies[i] = handle_redirection(v.replies[i], pipeline, i) }
159
- pipeline.outer_indices.each_with_index { |outer, inner| all_replies[outer] = v.replies[inner] }
160
- when StandardError
161
- errors ||= {}
162
- errors[node_key] = v
163
- else
164
- all_replies ||= Array.new(@size)
165
- @pipelines[node_key].outer_indices.each_with_index { |outer, inner| all_replies[outer] = v[inner] }
166
- end
167
- end
162
+
163
+ work_group.each do |node_key, v|
164
+ case v
165
+ when ::RedisClient::Cluster::Pipeline::RedirectionNeeded
166
+ all_replies ||= Array.new(@size)
167
+ pipeline = @pipelines[node_key]
168
+ v.indices.each { |i| v.replies[i] = handle_redirection(v.replies[i], pipeline, i) }
169
+ pipeline.outer_indices.each_with_index { |outer, inner| all_replies[outer] = v.replies[inner] }
170
+ when StandardError
171
+ errors ||= {}
172
+ errors[node_key] = v
173
+ else
174
+ all_replies ||= Array.new(@size)
175
+ @pipelines[node_key].outer_indices.each_with_index { |outer, inner| all_replies[outer] = v[inner] }
176
+ end
168
177
  end
169
178
 
179
+ work_group.close
180
+
170
181
  raise ::RedisClient::Cluster::ErrorCollection, errors unless errors.nil?
171
182
 
172
183
  all_replies
@@ -182,17 +193,6 @@ class RedisClient
182
193
  @pipelines[node_key]
183
194
  end
184
195
 
185
- def build_thread_for_pipeline(client, pipeline)
186
- Thread.new(client, pipeline) do |cli, pl|
187
- replies = do_pipelining(cli, pl)
188
- raise ReplySizeError, "commands: #{pl._size}, replies: #{replies.size}" if pl._size != replies.size
189
-
190
- replies
191
- rescue StandardError => e
192
- e
193
- end
194
- end
195
-
196
196
  def do_pipelining(client, pipeline)
197
197
  case client
198
198
  when ::RedisClient then send_pipeline(client, pipeline)
@@ -28,6 +28,9 @@ class RedisClient
28
28
  private
29
29
 
30
30
  def spawn_worker(client, queue)
31
+ # Ruby VM allocates 1 MB memory as a stack for a thread.
32
+ # It is a fixed size but we can modify the size with some environment variables.
33
+ # So it consumes memory 1 MB multiplied a number of workers.
31
34
  Thread.new(client, queue) do |pubsub, q|
32
35
  loop do
33
36
  q << pubsub.next_event
@@ -59,6 +62,8 @@ class RedisClient
59
62
  @state_dict.each_value(&:close)
60
63
  @state_dict.clear
61
64
  @queue.clear
65
+ @queue.close
66
+ nil
62
67
  end
63
68
 
64
69
  def next_event(timeout = nil)
@@ -16,13 +16,12 @@ class RedisClient
16
16
  METHODS_FOR_BLOCKING_CMD = %i[blocking_call_v blocking_call].freeze
17
17
  TSF = ->(f, x) { f.nil? ? x : f.call(x) }.curry
18
18
 
19
- attr_reader :node
20
-
21
- def initialize(config, pool: nil, **kwargs)
19
+ def initialize(config, concurrent_worker, pool: nil, **kwargs)
22
20
  @config = config.dup
21
+ @concurrent_worker = concurrent_worker
23
22
  @pool = pool
24
23
  @client_kwargs = kwargs
25
- @node = fetch_cluster_info(@config, pool: @pool, **@client_kwargs)
24
+ @node = fetch_cluster_info(@config, @concurrent_worker, pool: @pool, **@client_kwargs)
26
25
  @command = ::RedisClient::Cluster::Command.load(@node.shuffled_nodes)
27
26
  @mutex = Mutex.new
28
27
  @command_builder = @config.command_builder
@@ -206,6 +205,14 @@ class RedisClient
206
205
  find_node(node_key)
207
206
  end
208
207
 
208
+ def node_keys
209
+ @node.node_keys
210
+ end
211
+
212
+ def close
213
+ @node.each(&:close)
214
+ end
215
+
209
216
  private
210
217
 
211
218
  def send_wait_command(method, command, args, retry_count: 3, &block) # rubocop:disable Metrics/AbcSize
@@ -284,12 +291,13 @@ class RedisClient
284
291
  end
285
292
  end
286
293
 
287
- def fetch_cluster_info(config, pool: nil, **kwargs)
288
- node_info_list = ::RedisClient::Cluster::Node.load_info(config.per_node_key, **kwargs)
294
+ def fetch_cluster_info(config, concurrent_worker, pool: nil, **kwargs)
295
+ node_info_list = ::RedisClient::Cluster::Node.load_info(config.per_node_key, concurrent_worker, **kwargs)
289
296
  node_addrs = node_info_list.map { |i| ::RedisClient::Cluster::NodeKey.hashify(i.node_key) }
290
297
  config.update_node(node_addrs)
291
298
  ::RedisClient::Cluster::Node.new(
292
299
  config.per_node_key,
300
+ concurrent_worker,
293
301
  node_info_list: node_info_list,
294
302
  pool: pool,
295
303
  with_replica: config.use_replica?,
@@ -308,7 +316,7 @@ class RedisClient
308
316
  # ignore
309
317
  end
310
318
 
311
- @node = fetch_cluster_info(@config, pool: @pool, **@client_kwargs)
319
+ @node = fetch_cluster_info(@config, @concurrent_worker, pool: @pool, **@client_kwargs)
312
320
  end
313
321
  end
314
322
  end
@@ -1,5 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require 'redis_client/cluster/concurrent_worker'
3
4
  require 'redis_client/cluster/pipeline'
4
5
  require 'redis_client/cluster/pub_sub'
5
6
  require 'redis_client/cluster/router'
@@ -10,14 +11,15 @@ class RedisClient
10
11
 
11
12
  attr_reader :config
12
13
 
13
- def initialize(config, pool: nil, **kwargs)
14
+ def initialize(config, pool: nil, concurrency: nil, **kwargs)
14
15
  @config = config
15
- @router = ::RedisClient::Cluster::Router.new(config, pool: pool, **kwargs)
16
+ @concurrent_worker = ::RedisClient::Cluster::ConcurrentWorker.create(**(concurrency || {}))
17
+ @router = ::RedisClient::Cluster::Router.new(config, @concurrent_worker, pool: pool, **kwargs)
16
18
  @command_builder = config.command_builder
17
19
  end
18
20
 
19
21
  def inspect
20
- "#<#{self.class.name} #{@router.node.node_keys.join(', ')}>"
22
+ "#<#{self.class.name} #{@router.node_keys.join(', ')}>"
21
23
  end
22
24
 
23
25
  def call(*args, **kwargs, &block)
@@ -79,7 +81,7 @@ class RedisClient
79
81
 
80
82
  def pipelined
81
83
  seed = @config.use_replica? && @config.replica_affinity == :random ? nil : Random.new_seed
82
- pipeline = ::RedisClient::Cluster::Pipeline.new(@router, @command_builder, seed: seed)
84
+ pipeline = ::RedisClient::Cluster::Pipeline.new(@router, @command_builder, @concurrent_worker, seed: seed)
83
85
  yield pipeline
84
86
  return [] if pipeline.empty?
85
87
 
@@ -91,7 +93,8 @@ class RedisClient
91
93
  end
92
94
 
93
95
  def close
94
- @router.node.each(&:close)
96
+ @concurrent_worker.close
97
+ @router.close
95
98
  nil
96
99
  end
97
100
 
@@ -17,6 +17,7 @@ class RedisClient
17
17
  VALID_NODES_KEYS = %i[ssl username password host port db].freeze
18
18
  MERGE_CONFIG_KEYS = %i[ssl username password].freeze
19
19
  IGNORE_GENERIC_CONFIG_KEYS = %i[url host port path].freeze
20
+ MAX_WORKERS = Integer(ENV.fetch('REDIS_CLIENT_MAX_THREADS', 5))
20
21
 
21
22
  InvalidClientConfigError = Class.new(::RedisClient::Error)
22
23
 
@@ -27,7 +28,8 @@ class RedisClient
27
28
  replica: false,
28
29
  replica_affinity: :random,
29
30
  fixed_hostname: '',
30
- client_implementation: Cluster,
31
+ concurrency: nil,
32
+ client_implementation: ::RedisClient::Cluster, # for redis gem
31
33
  **client_config
32
34
  )
33
35
 
@@ -38,6 +40,7 @@ class RedisClient
38
40
  client_config = client_config.reject { |k, _| IGNORE_GENERIC_CONFIG_KEYS.include?(k) }
39
41
  @command_builder = client_config.fetch(:command_builder, ::RedisClient::CommandBuilder)
40
42
  @client_config = merge_generic_config(client_config, @node_configs)
43
+ @concurrency = merge_concurrency_option(concurrency)
41
44
  @client_implementation = client_implementation
42
45
  @mutex = Mutex.new
43
46
  end
@@ -48,6 +51,7 @@ class RedisClient
48
51
  replica: @replica,
49
52
  replica_affinity: @replica_affinity,
50
53
  fixed_hostname: @fixed_hostname,
54
+ concurrency: @concurrency,
51
55
  client_implementation: @client_implementation,
52
56
  **@client_config
53
57
  )
@@ -58,15 +62,20 @@ class RedisClient
58
62
  end
59
63
 
60
64
  def read_timeout
61
- @client_config[:read_timeout] || @client_config[:timeout] || RedisClient::Config::DEFAULT_TIMEOUT
65
+ @client_config[:read_timeout] || @client_config[:timeout] || ::RedisClient::Config::DEFAULT_TIMEOUT
62
66
  end
63
67
 
64
68
  def new_pool(size: 5, timeout: 5, **kwargs)
65
- @client_implementation.new(self, pool: { size: size, timeout: timeout }, **kwargs)
69
+ @client_implementation.new(
70
+ self,
71
+ pool: { size: size, timeout: timeout },
72
+ concurrency: @concurrency,
73
+ **kwargs
74
+ )
66
75
  end
67
76
 
68
77
  def new_client(**kwargs)
69
- @client_implementation.new(self, **kwargs)
78
+ @client_implementation.new(self, concurrency: @concurrency, **kwargs)
70
79
  end
71
80
 
72
81
  def per_node_key
@@ -96,6 +105,15 @@ class RedisClient
96
105
 
97
106
  private
98
107
 
108
+ def merge_concurrency_option(option)
109
+ case option
110
+ when Hash
111
+ option = option.transform_keys(&:to_sym)
112
+ { size: MAX_WORKERS }.merge(option)
113
+ else { size: MAX_WORKERS }
114
+ end
115
+ end
116
+
99
117
  def build_node_configs(addrs)
100
118
  configs = Array[addrs].flatten.filter_map { |addr| parse_node_addr(addr) }
101
119
  raise InvalidClientConfigError, '`nodes` option is empty' if configs.empty?
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: redis-cluster-client
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.0
4
+ version: 0.6.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Taishi Kasuga
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-09-06 00:00:00.000000000 Z
11
+ date: 2023-09-12 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: redis-client
@@ -34,6 +34,10 @@ files:
34
34
  - lib/redis-cluster-client.rb
35
35
  - lib/redis_client/cluster.rb
36
36
  - lib/redis_client/cluster/command.rb
37
+ - lib/redis_client/cluster/concurrent_worker.rb
38
+ - lib/redis_client/cluster/concurrent_worker/none.rb
39
+ - lib/redis_client/cluster/concurrent_worker/on_demand.rb
40
+ - lib/redis_client/cluster/concurrent_worker/pooled.rb
37
41
  - lib/redis_client/cluster/errors.rb
38
42
  - lib/redis_client/cluster/key_slot_converter.rb
39
43
  - lib/redis_client/cluster/node.rb