redis-cluster-client 0.7.5 → 0.7.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,7 @@
2
2
 
3
3
  require 'redis_client'
4
4
  require 'redis_client/config'
5
+ require 'redis_client/cluster/error_identification'
5
6
  require 'redis_client/cluster/errors'
6
7
  require 'redis_client/cluster/node/primary_only'
7
8
  require 'redis_client/cluster/node/random_replica'
@@ -22,10 +23,10 @@ class RedisClient
22
23
  SLOT_SIZE = 16_384
23
24
  MIN_SLOT = 0
24
25
  MAX_SLOT = SLOT_SIZE - 1
25
- IGNORE_GENERIC_CONFIG_KEYS = %i[url host port path].freeze
26
26
  DEAD_FLAGS = %w[fail? fail handshake noaddr noflags].freeze
27
27
  ROLE_FLAGS = %w[master slave].freeze
28
28
  EMPTY_ARRAY = [].freeze
29
+ EMPTY_HASH = {}.freeze
29
30
 
30
31
  ReloadNeeded = Class.new(::RedisClient::Error)
31
32
 
@@ -78,9 +79,11 @@ class RedisClient
78
79
  end
79
80
 
80
81
  class Config < ::RedisClient::Config
81
- def initialize(scale_read: false, **kwargs)
82
+ def initialize(scale_read: false, middlewares: nil, **kwargs)
82
83
  @scale_read = scale_read
83
- super(**kwargs)
84
+ middlewares ||= []
85
+ middlewares.unshift ErrorIdentification::Middleware
86
+ super(middlewares: middlewares, **kwargs)
84
87
  end
85
88
 
86
89
  private
@@ -92,119 +95,19 @@ class RedisClient
92
95
  end
93
96
  end
94
97
 
95
- class << self
96
- def load_info(options, concurrent_worker, slow_command_timeout: -1, **kwargs) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
97
- raise ::RedisClient::Cluster::InitialSetupError, [] if options.nil? || options.empty?
98
-
99
- startup_size = options.size > MAX_STARTUP_SAMPLE ? MAX_STARTUP_SAMPLE : options.size
100
- startup_options = options.to_a.sample(startup_size).to_h
101
- startup_nodes = ::RedisClient::Cluster::Node.new(startup_options, concurrent_worker, **kwargs)
102
- work_group = concurrent_worker.new_group(size: startup_size)
103
-
104
- startup_nodes.each_with_index do |raw_client, i|
105
- work_group.push(i, raw_client) do |client|
106
- regular_timeout = client.read_timeout
107
- client.read_timeout = slow_command_timeout > 0.0 ? slow_command_timeout : regular_timeout
108
- reply = client.call('CLUSTER', 'NODES')
109
- client.read_timeout = regular_timeout
110
- parse_cluster_node_reply(reply)
111
- rescue StandardError => e
112
- e
113
- ensure
114
- client&.close
115
- end
116
- end
117
-
118
- node_info_list = errors = nil
119
-
120
- work_group.each do |i, v|
121
- case v
122
- when StandardError
123
- errors ||= Array.new(startup_size)
124
- errors[i] = v
125
- else
126
- node_info_list ||= Array.new(startup_size)
127
- node_info_list[i] = v
128
- end
129
- end
130
-
131
- work_group.close
132
-
133
- raise ::RedisClient::Cluster::InitialSetupError, errors if node_info_list.nil?
134
-
135
- grouped = node_info_list.compact.group_by do |info_list|
136
- info_list.sort_by!(&:id)
137
- info_list.each_with_object(String.new(capacity: 128 * info_list.size)) do |e, a|
138
- a << e.id << e.node_key << e.role << e.primary_id << e.config_epoch
139
- end
140
- end
141
-
142
- grouped.max_by { |_, v| v.size }[1].first.freeze
143
- end
144
-
145
- private
146
-
147
- # @see https://redis.io/commands/cluster-nodes/
148
- # @see https://github.com/redis/redis/blob/78960ad57b8a5e6af743d789ed8fd767e37d42b8/src/cluster.c#L4660-L4683
149
- def parse_cluster_node_reply(reply) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
150
- reply.each_line("\n", chomp: true).filter_map do |line|
151
- fields = line.split
152
- flags = fields[2].split(',')
153
- next unless fields[7] == 'connected' && (flags & DEAD_FLAGS).empty?
154
-
155
- slots = if fields[8].nil?
156
- EMPTY_ARRAY
157
- else
158
- fields[8..].reject { |str| str.start_with?('[') }
159
- .map { |str| str.split('-').map { |s| Integer(s) } }
160
- .map { |a| a.size == 1 ? a << a.first : a }
161
- .map(&:sort)
162
- end
163
-
164
- ::RedisClient::Cluster::Node::Info.new(
165
- id: fields[0],
166
- node_key: parse_node_key(fields[1]),
167
- role: (flags & ROLE_FLAGS).first,
168
- primary_id: fields[3],
169
- ping_sent: fields[4],
170
- pong_recv: fields[5],
171
- config_epoch: fields[6],
172
- link_state: fields[7],
173
- slots: slots
174
- )
175
- end
176
- end
177
-
178
- # As redirection node_key is dependent on `cluster-preferred-endpoint-type` config,
179
- # node_key should use hostname if present in CLUSTER NODES output.
180
- #
181
- # See https://redis.io/commands/cluster-nodes/ for details on the output format.
182
- # node_address matches fhe format: <ip:port@cport[,hostname[,auxiliary_field=value]*]>
183
- def parse_node_key(node_address)
184
- ip_chunk, hostname, _auxiliaries = node_address.split(',')
185
- ip_port_string = ip_chunk.split('@').first
186
- return ip_port_string if hostname.nil? || hostname.empty?
187
-
188
- port = ip_port_string.split(':')[1]
189
- "#{hostname}:#{port}"
190
- end
191
- end
192
-
193
98
  def initialize(
194
- options,
195
99
  concurrent_worker,
196
- node_info_list: [],
197
- with_replica: false,
198
- replica_affinity: :random,
100
+ config:,
199
101
  pool: nil,
200
102
  **kwargs
201
103
  )
202
104
 
203
105
  @concurrent_worker = concurrent_worker
204
- @slots = build_slot_node_mappings(node_info_list)
205
- @replications = build_replication_mappings(node_info_list)
206
- klass = make_topology_class(with_replica, replica_affinity)
207
- @topology = klass.new(@replications, options, pool, @concurrent_worker, **kwargs)
106
+ @slots = build_slot_node_mappings(EMPTY_ARRAY)
107
+ @replications = build_replication_mappings(EMPTY_ARRAY)
108
+ klass = make_topology_class(config.use_replica?, config.replica_affinity)
109
+ @topology = klass.new(pool, @concurrent_worker, **kwargs)
110
+ @config = config
208
111
  @mutex = Mutex.new
209
112
  end
210
113
 
@@ -255,6 +158,14 @@ class RedisClient
255
158
  @topology.clients_for_scanning(seed: seed).values.sort_by { |c| "#{c.config.host}-#{c.config.port}" }
256
159
  end
257
160
 
161
+ def clients
162
+ @topology.clients.values
163
+ end
164
+
165
+ def primary_clients
166
+ @topology.primary_clients.values
167
+ end
168
+
258
169
  def replica_clients
259
170
  @topology.replica_clients.values
260
171
  end
@@ -292,6 +203,24 @@ class RedisClient
292
203
  end
293
204
  end
294
205
 
206
+ def reload!
207
+ with_reload_lock do
208
+ with_startup_clients(MAX_STARTUP_SAMPLE) do |startup_clients|
209
+ @node_info = refetch_node_info_list(startup_clients)
210
+ @node_configs = @node_info.to_h do |node_info|
211
+ [node_info.node_key, @config.client_config_for_node(node_info.node_key)]
212
+ end
213
+ @slots = build_slot_node_mappings(@node_info)
214
+ @replications = build_replication_mappings(@node_info)
215
+ @topology.process_topology_update!(@replications, @node_configs)
216
+ end
217
+ end
218
+ end
219
+
220
+ def owns_error?(err)
221
+ any? { |c| ErrorIdentification.client_owns_error?(err, c) }
222
+ end
223
+
295
224
  private
296
225
 
297
226
  def make_topology_class(with_replica, replica_affinity)
@@ -378,6 +307,137 @@ class RedisClient
378
307
 
379
308
  [results, errors]
380
309
  end
310
+
311
+ def refetch_node_info_list(startup_clients) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
312
+ startup_size = startup_clients.size
313
+ work_group = @concurrent_worker.new_group(size: startup_size)
314
+
315
+ startup_clients.each_with_index do |raw_client, i|
316
+ work_group.push(i, raw_client) do |client|
317
+ regular_timeout = client.read_timeout
318
+ client.read_timeout = @config.slow_command_timeout > 0.0 ? @config.slow_command_timeout : regular_timeout
319
+ reply = client.call('CLUSTER', 'NODES')
320
+ client.read_timeout = regular_timeout
321
+ parse_cluster_node_reply(reply)
322
+ rescue StandardError => e
323
+ e
324
+ ensure
325
+ client&.close
326
+ end
327
+ end
328
+
329
+ node_info_list = errors = nil
330
+
331
+ work_group.each do |i, v|
332
+ case v
333
+ when StandardError
334
+ errors ||= Array.new(startup_size)
335
+ errors[i] = v
336
+ else
337
+ node_info_list ||= Array.new(startup_size)
338
+ node_info_list[i] = v
339
+ end
340
+ end
341
+
342
+ work_group.close
343
+
344
+ raise ::RedisClient::Cluster::InitialSetupError, errors if node_info_list.nil?
345
+
346
+ grouped = node_info_list.compact.group_by do |info_list|
347
+ info_list.sort_by!(&:id)
348
+ info_list.each_with_object(String.new(capacity: 128 * info_list.size)) do |e, a|
349
+ a << e.id << e.node_key << e.role << e.primary_id << e.config_epoch
350
+ end
351
+ end
352
+
353
+ grouped.max_by { |_, v| v.size }[1].first
354
+ end
355
+
356
+ def parse_cluster_node_reply(reply) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
357
+ reply.each_line("\n", chomp: true).filter_map do |line|
358
+ fields = line.split
359
+ flags = fields[2].split(',')
360
+ next unless fields[7] == 'connected' && (flags & DEAD_FLAGS).empty?
361
+
362
+ slots = if fields[8].nil?
363
+ EMPTY_ARRAY
364
+ else
365
+ fields[8..].reject { |str| str.start_with?('[') }
366
+ .map { |str| str.split('-').map { |s| Integer(s) } }
367
+ .map { |a| a.size == 1 ? a << a.first : a }
368
+ .map(&:sort)
369
+ end
370
+
371
+ ::RedisClient::Cluster::Node::Info.new(
372
+ id: fields[0],
373
+ node_key: parse_node_key(fields[1]),
374
+ role: (flags & ROLE_FLAGS).first,
375
+ primary_id: fields[3],
376
+ ping_sent: fields[4],
377
+ pong_recv: fields[5],
378
+ config_epoch: fields[6],
379
+ link_state: fields[7],
380
+ slots: slots
381
+ )
382
+ end
383
+ end
384
+
385
+ # As redirection node_key is dependent on `cluster-preferred-endpoint-type` config,
386
+ # node_key should use hostname if present in CLUSTER NODES output.
387
+ #
388
+ # See https://redis.io/commands/cluster-nodes/ for details on the output format.
389
+ # node_address matches fhe format: <ip:port@cport[,hostname[,auxiliary_field=value]*]>
390
+ def parse_node_key(node_address)
391
+ ip_chunk, hostname, _auxiliaries = node_address.split(',')
392
+ ip_port_string = ip_chunk.split('@').first
393
+ return ip_port_string if hostname.nil? || hostname.empty?
394
+
395
+ port = ip_port_string.split(':')[1]
396
+ "#{hostname}:#{port}"
397
+ end
398
+
399
+ def with_startup_clients(count) # rubocop:disable Metrics/AbcSize
400
+ if @config.connect_with_original_config
401
+ # If connect_with_original_config is set, that means we need to build actual client objects
402
+ # and close them, so that we e.g. re-resolve a DNS entry with the cluster nodes in it.
403
+ begin
404
+ # Memoize the startup clients, so we maintain RedisClient's internal circuit breaker configuration
405
+ # if it's set.
406
+ @startup_clients ||= @config.startup_nodes.values.sample(count).map do |node_config|
407
+ ::RedisClient::Cluster::Node::Config.new(**node_config).new_client
408
+ end
409
+ yield @startup_clients
410
+ ensure
411
+ # Close the startup clients when we're done, so we don't maintain pointless open connections to
412
+ # the cluster though
413
+ @startup_clients&.each(&:close)
414
+ end
415
+ else
416
+ # (re-)connect using nodes we already know about.
417
+ # If this is the first time we're connecting to the cluster, we need to seed the topology with the
418
+ # startup clients though.
419
+ @topology.process_topology_update!({}, @config.startup_nodes) if @topology.clients.empty?
420
+ yield @topology.clients.values.sample(count)
421
+ end
422
+ end
423
+
424
+ def with_reload_lock
425
+ # What should happen with concurrent calls #reload? This is a realistic possibility if the cluster goes into
426
+ # a CLUSTERDOWN state, and we're using a pooled backend. Every thread will independently discover this, and
427
+ # call reload!.
428
+ # For now, if a reload is in progress, wait for that to complete, and consider that the same as us having
429
+ # performed the reload.
430
+ # Probably in the future we should add a circuit breaker to #reload itself, and stop trying if the cluster is
431
+ # obviously not working.
432
+ wait_start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
433
+ @mutex.synchronize do
434
+ return if @last_reloaded_at && @last_reloaded_at > wait_start
435
+
436
+ r = yield
437
+ @last_reloaded_at = Process.clock_gettime(Process::CLOCK_MONOTONIC)
438
+ r
439
+ end
440
+ end
381
441
  end
382
442
  end
383
443
  end
@@ -31,6 +31,10 @@ class RedisClient
31
31
  def build_from_host_port(host, port)
32
32
  "#{host}#{DELIMITER}#{port}"
33
33
  end
34
+
35
+ def build_from_client(client)
36
+ "#{client.config.host}#{DELIMITER}#{client.config.port}"
37
+ end
34
38
  end
35
39
  end
36
40
  end
@@ -0,0 +1,48 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'redis_client'
4
+ require 'redis_client/cluster/key_slot_converter'
5
+ require 'redis_client/cluster/transaction'
6
+
7
+ class RedisClient
8
+ class Cluster
9
+ class OptimisticLocking
10
+ def initialize(keys, router)
11
+ @node = find_node!(keys, router)
12
+ @keys = keys
13
+ end
14
+
15
+ def watch
16
+ @node.with do |c|
17
+ c.call('WATCH', *@keys)
18
+ reply = yield(c)
19
+ c.call('UNWATCH')
20
+ reply
21
+ end
22
+ end
23
+
24
+ private
25
+
26
+ def find_node!(keys, router)
27
+ raise ::RedisClient::Cluster::Transaction::ConsistencyError, "unsafe watch: #{keys.join(' ')}" unless safe?(keys)
28
+
29
+ node_key = router.find_primary_node_key(['WATCH', *keys])
30
+ raise ::RedisClient::Cluster::Transaction::ConsistencyError, "couldn't determine the node" if node_key.nil?
31
+
32
+ router.find_node(node_key)
33
+ end
34
+
35
+ def safe?(keys)
36
+ return false if keys.empty?
37
+
38
+ slots = keys.map do |k|
39
+ return false if k.nil? || k.empty?
40
+
41
+ ::RedisClient::Cluster::KeySlotConverter.convert(k)
42
+ end
43
+
44
+ slots.uniq.size == 1
45
+ end
46
+ end
47
+ end
48
+ end
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ class RedisClient
4
+ class Cluster
5
+ class PinningNode
6
+ def initialize(client)
7
+ @client = client
8
+ end
9
+
10
+ def call(*args, **kwargs, &block)
11
+ @client.call(*args, **kwargs, &block)
12
+ end
13
+
14
+ def call_v(args, &block)
15
+ @client.call_v(args, &block)
16
+ end
17
+
18
+ def call_once(*args, **kwargs, &block)
19
+ @client.call_once(*args, **kwargs, &block)
20
+ end
21
+
22
+ def call_once_v(args, &block)
23
+ @client.call_once_v(args, &block)
24
+ end
25
+
26
+ def blocking_call(timeout, *args, **kwargs, &block)
27
+ @client.blocking_call(timeout, *args, **kwargs, &block)
28
+ end
29
+
30
+ def blocking_call_v(timeout, args, &block)
31
+ @client.blocking_call_v(timeout, args, &block)
32
+ end
33
+ end
34
+ end
35
+ end
@@ -50,28 +50,31 @@ class RedisClient
50
50
  end
51
51
 
52
52
  ::RedisClient::ConnectionMixin.module_eval do
53
- def call_pipelined_aware_of_redirection(commands, timeouts) # rubocop:disable Metrics/AbcSize
53
+ def call_pipelined_aware_of_redirection(commands, timeouts) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
54
54
  size = commands.size
55
55
  results = Array.new(commands.size)
56
56
  @pending_reads += size
57
57
  write_multi(commands)
58
58
 
59
59
  redirection_indices = nil
60
+ exception = nil
60
61
  size.times do |index|
61
62
  timeout = timeouts && timeouts[index]
62
63
  result = read(timeout)
63
64
  @pending_reads -= 1
64
- if result.is_a?(CommandError)
65
+ if result.is_a?(::RedisClient::Error)
65
66
  result._set_command(commands[index])
66
- if result.message.start_with?('MOVED', 'ASK')
67
+ if result.is_a?(::RedisClient::CommandError) && result.message.start_with?('MOVED', 'ASK')
67
68
  redirection_indices ||= []
68
69
  redirection_indices << index
70
+ else
71
+ exception ||= result
69
72
  end
70
73
  end
71
-
72
74
  results[index] = result
73
75
  end
74
76
 
77
+ raise exception if exception
75
78
  return results if redirection_indices.nil?
76
79
 
77
80
  err = ::RedisClient::Cluster::Pipeline::RedirectionNeeded.new
@@ -158,15 +161,13 @@ class RedisClient
158
161
  end
159
162
  end
160
163
 
161
- all_replies = errors = nil
164
+ all_replies = errors = required_redirections = nil
162
165
 
163
166
  work_group.each do |node_key, v|
164
167
  case v
165
168
  when ::RedisClient::Cluster::Pipeline::RedirectionNeeded
166
- all_replies ||= Array.new(@size)
167
- pipeline = @pipelines[node_key]
168
- v.indices.each { |i| v.replies[i] = handle_redirection(v.replies[i], pipeline, i) }
169
- pipeline.outer_indices.each_with_index { |outer, inner| all_replies[outer] = v.replies[inner] }
169
+ required_redirections ||= {}
170
+ required_redirections[node_key] = v
170
171
  when StandardError
171
172
  errors ||= {}
172
173
  errors[node_key] = v
@@ -177,9 +178,15 @@ class RedisClient
177
178
  end
178
179
 
179
180
  work_group.close
180
-
181
181
  raise ::RedisClient::Cluster::ErrorCollection, errors unless errors.nil?
182
182
 
183
+ required_redirections&.each do |node_key, v|
184
+ all_replies ||= Array.new(@size)
185
+ pipeline = @pipelines[node_key]
186
+ v.indices.each { |i| v.replies[i] = handle_redirection(v.replies[i], pipeline, i) }
187
+ pipeline.outer_indices.each_with_index { |outer, inner| all_replies[outer] = v.replies[inner] }
188
+ end
189
+
183
190
  all_replies
184
191
  end
185
192
 
@@ -217,21 +224,15 @@ class RedisClient
217
224
 
218
225
  if err.message.start_with?('MOVED')
219
226
  node = @router.assign_redirection_node(err.message)
220
- try_redirection(node, pipeline, inner_index)
227
+ redirect_command(node, pipeline, inner_index)
221
228
  elsif err.message.start_with?('ASK')
222
229
  node = @router.assign_asking_node(err.message)
223
- try_asking(node) ? try_redirection(node, pipeline, inner_index) : err
230
+ try_asking(node) ? redirect_command(node, pipeline, inner_index) : err
224
231
  else
225
232
  err
226
233
  end
227
234
  end
228
235
 
229
- def try_redirection(node, pipeline, inner_index)
230
- redirect_command(node, pipeline, inner_index)
231
- rescue StandardError => e
232
- e
233
- end
234
-
235
236
  def redirect_command(node, pipeline, inner_index)
236
237
  method = pipeline.get_callee_method(inner_index)
237
238
  command = pipeline.get_command(inner_index)