redis-cluster-client 0.13.5 → 0.16.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/redis_client/cluster/concurrent_worker/pooled.rb +16 -2
- data/lib/redis_client/cluster/key_slot_converter.rb +1 -1
- data/lib/redis_client/cluster/node.rb +73 -48
- data/lib/redis_client/cluster/pub_sub.rb +21 -10
- data/lib/redis_client/cluster/router.rb +19 -33
- data/lib/redis_client/cluster.rb +20 -2
- data/lib/redis_client/cluster_config.rb +3 -4
- metadata +4 -4
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: e90bf81ffcf5eb68e78018e950813bdb727274c17157088645e1d58d763252a9
|
|
4
|
+
data.tar.gz: d4185f0e3d0b7d5069a9e156c607d39d0d10bd348f602a13ab53a68fa716e9fe
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: e30aa213ff6f7be6e70bcc2bc7329b8f0d9358f98169cea311c9e2afa66bf3fc28db5e8eb84776b09564ab7095d08c7744405065e7fbd2bf8fec483b4ddfba67
|
|
7
|
+
data.tar.gz: 5c8952e4eccce33fc59697e42bb135ecaaa25241e5491e1c854f9fb1f42b9ef03ae9044c83add3d21d4a2ac755d6d1d6f11eca1c7d6c41f47f7a31b15cc03d22
|
|
@@ -10,6 +10,10 @@ class RedisClient
|
|
|
10
10
|
# It is a fixed size but we can modify the size with some environment variables.
|
|
11
11
|
# So it consumes memory 1 MB multiplied a number of workers.
|
|
12
12
|
class Pooled
|
|
13
|
+
IO_ERROR_NEVER = { IOError => :never }.freeze
|
|
14
|
+
IO_ERROR_IMMEDIATE = { IOError => :immediate }.freeze
|
|
15
|
+
private_constant :IO_ERROR_NEVER, :IO_ERROR_IMMEDIATE
|
|
16
|
+
|
|
13
17
|
def initialize(size:)
|
|
14
18
|
raise ArgumentError, "size must be positive: #{size}" unless size.positive?
|
|
15
19
|
|
|
@@ -33,7 +37,9 @@ class RedisClient
|
|
|
33
37
|
|
|
34
38
|
def close
|
|
35
39
|
@q.clear
|
|
36
|
-
@workers.
|
|
40
|
+
workers = @workers.compact
|
|
41
|
+
workers.each(&:exit)
|
|
42
|
+
workers.each(&:join)
|
|
37
43
|
@workers.clear
|
|
38
44
|
@q.close
|
|
39
45
|
@pid = nil
|
|
@@ -65,7 +71,15 @@ class RedisClient
|
|
|
65
71
|
|
|
66
72
|
def spawn_worker
|
|
67
73
|
Thread.new(@q) do |q|
|
|
68
|
-
|
|
74
|
+
Thread.handle_interrupt(IO_ERROR_NEVER) do
|
|
75
|
+
loop do
|
|
76
|
+
Thread.handle_interrupt(IO_ERROR_IMMEDIATE) do
|
|
77
|
+
q.pop.exec
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
rescue IOError
|
|
82
|
+
# stream closed in another thread
|
|
69
83
|
end
|
|
70
84
|
end
|
|
71
85
|
end
|
|
@@ -24,10 +24,12 @@ class RedisClient
|
|
|
24
24
|
ROLE_FLAGS = %w[master slave].freeze
|
|
25
25
|
EMPTY_ARRAY = [].freeze
|
|
26
26
|
EMPTY_HASH = {}.freeze
|
|
27
|
-
|
|
27
|
+
EMPTY_STRING = ''
|
|
28
|
+
JITTER_WINDOW = (3_000_000...10_000_000).freeze # micro seconds
|
|
28
29
|
|
|
29
30
|
private_constant :USE_CHAR_ARRAY_SLOT, :SLOT_SIZE, :MIN_SLOT, :MAX_SLOT,
|
|
30
|
-
:DEAD_FLAGS, :ROLE_FLAGS, :EMPTY_ARRAY, :EMPTY_HASH
|
|
31
|
+
:DEAD_FLAGS, :ROLE_FLAGS, :EMPTY_ARRAY, :EMPTY_HASH, :EMPTY_STRING,
|
|
32
|
+
:JITTER_WINDOW
|
|
31
33
|
|
|
32
34
|
ReloadNeeded = Class.new(::RedisClient::Cluster::Error)
|
|
33
35
|
|
|
@@ -46,7 +48,7 @@ class RedisClient
|
|
|
46
48
|
end
|
|
47
49
|
|
|
48
50
|
def serialize(str)
|
|
49
|
-
str << id << node_key << role << primary_id
|
|
51
|
+
str << id << node_key << role << primary_id
|
|
50
52
|
end
|
|
51
53
|
end
|
|
52
54
|
|
|
@@ -106,8 +108,7 @@ class RedisClient
|
|
|
106
108
|
@topology = klass.new(pool, @concurrent_worker, **kwargs)
|
|
107
109
|
@config = config
|
|
108
110
|
@mutex = Mutex.new
|
|
109
|
-
@
|
|
110
|
-
@reload_times = 0
|
|
111
|
+
@next_reload_time = nil
|
|
111
112
|
@random = Random.new
|
|
112
113
|
end
|
|
113
114
|
|
|
@@ -193,26 +194,22 @@ class RedisClient
|
|
|
193
194
|
end
|
|
194
195
|
|
|
195
196
|
def update_slot(slot, node_key)
|
|
196
|
-
return
|
|
197
|
+
return unless @mutex.try_lock
|
|
197
198
|
|
|
198
|
-
@
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
199
|
+
@slots[slot] = node_key
|
|
200
|
+
rescue RangeError
|
|
201
|
+
@slots = Array.new(SLOT_SIZE) { |i| @slots[i] }
|
|
202
|
+
@slots[slot] = node_key
|
|
203
|
+
ensure
|
|
204
|
+
@mutex.unlock if @mutex.owned?
|
|
204
205
|
end
|
|
205
206
|
|
|
206
|
-
def
|
|
207
|
+
def try_reload!
|
|
207
208
|
with_reload_lock do
|
|
208
|
-
|
|
209
|
-
@
|
|
210
|
-
|
|
211
|
-
[node_info.node_key, @config.client_config_for_node(node_info.node_key)]
|
|
209
|
+
with_reload_jitter do
|
|
210
|
+
with_startup_clients(@config.max_startup_sample) do |clients|
|
|
211
|
+
reload!(clients)
|
|
212
212
|
end
|
|
213
|
-
@slots = build_slot_node_mappings(@node_info)
|
|
214
|
-
@replications = build_replication_mappings(@node_info)
|
|
215
|
-
@topology.process_topology_update!(@replications, @node_configs)
|
|
216
213
|
end
|
|
217
214
|
end
|
|
218
215
|
end
|
|
@@ -312,12 +309,11 @@ class RedisClient
|
|
|
312
309
|
work_group.push(i, raw_client) do |client|
|
|
313
310
|
regular_timeout = client.read_timeout
|
|
314
311
|
client.read_timeout = @config.slow_command_timeout > 0.0 ? @config.slow_command_timeout : regular_timeout
|
|
315
|
-
|
|
316
|
-
client.read_timeout = regular_timeout
|
|
317
|
-
parse_cluster_node_reply(reply)
|
|
312
|
+
fetch_cluster_state(client)
|
|
318
313
|
rescue StandardError => e
|
|
319
314
|
e
|
|
320
315
|
ensure
|
|
316
|
+
client.read_timeout = regular_timeout
|
|
321
317
|
client&.close
|
|
322
318
|
end
|
|
323
319
|
end
|
|
@@ -347,6 +343,16 @@ class RedisClient
|
|
|
347
343
|
grouped.max_by { |_, v| v.size }[1].first
|
|
348
344
|
end
|
|
349
345
|
|
|
346
|
+
def fetch_cluster_state(client)
|
|
347
|
+
reply = client.call_once('cluster', 'shards')
|
|
348
|
+
parse_cluster_shards_reply(reply)
|
|
349
|
+
rescue ::RedisClient::CommandError => e
|
|
350
|
+
raise unless e.message.start_with?('ERR Unknown subcommand')
|
|
351
|
+
|
|
352
|
+
reply = client.call_once('cluster', 'nodes')
|
|
353
|
+
parse_cluster_node_reply(reply)
|
|
354
|
+
end
|
|
355
|
+
|
|
350
356
|
def parse_cluster_node_reply(reply) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
|
|
351
357
|
reply.each_line("\n", chomp: true).filter_map do |line|
|
|
352
358
|
fields = line.split
|
|
@@ -390,16 +396,19 @@ class RedisClient
|
|
|
390
396
|
id: id,
|
|
391
397
|
node_key: NodeKey.build_from_host_port(ip, arr[1]),
|
|
392
398
|
role: role,
|
|
393
|
-
primary_id: role == 'master' ?
|
|
399
|
+
primary_id: role == 'master' ? EMPTY_STRING : primary_id,
|
|
394
400
|
slots: role == 'master' ? slots : EMPTY_ARRAY
|
|
395
401
|
)
|
|
396
402
|
end
|
|
397
|
-
end
|
|
403
|
+
end
|
|
398
404
|
end
|
|
399
405
|
|
|
400
406
|
def parse_cluster_shards_reply(reply) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
|
|
401
407
|
reply.each_with_object([]) do |shard, acc|
|
|
408
|
+
resp2 = shard.is_a?(Array)
|
|
409
|
+
shard = shard.each_slice(2).to_h if resp2
|
|
402
410
|
nodes = shard.fetch('nodes')
|
|
411
|
+
nodes = nodes.map { |n| n.each_slice(2).to_h } if resp2
|
|
403
412
|
primary_id = nodes.find { |n| n.fetch('role') == 'master' }.fetch('id')
|
|
404
413
|
|
|
405
414
|
nodes.each do |node|
|
|
@@ -411,18 +420,18 @@ class RedisClient
|
|
|
411
420
|
id: node.fetch('id'),
|
|
412
421
|
node_key: NodeKey.build_from_host_port(ip, node['port'] || node['tls-port']),
|
|
413
422
|
role: role == 'master' ? role : 'slave',
|
|
414
|
-
primary_id: role == 'master' ?
|
|
423
|
+
primary_id: role == 'master' ? EMPTY_STRING : primary_id,
|
|
415
424
|
slots: role == 'master' ? shard.fetch('slots').each_slice(2).to_a.freeze : EMPTY_ARRAY
|
|
416
425
|
)
|
|
417
426
|
end
|
|
418
|
-
end
|
|
427
|
+
end
|
|
419
428
|
end
|
|
420
429
|
|
|
421
430
|
# As redirection node_key is dependent on `cluster-preferred-endpoint-type` config,
|
|
422
431
|
# node_key should use hostname if present in CLUSTER NODES output.
|
|
423
432
|
#
|
|
424
433
|
# See https://redis.io/commands/cluster-nodes/ for details on the output format.
|
|
425
|
-
# node_address matches
|
|
434
|
+
# node_address matches the format: <ip:port@cport[,hostname[,auxiliary_field=value]*]>
|
|
426
435
|
def parse_node_key(node_address)
|
|
427
436
|
ip_chunk, hostname, _auxiliaries = node_address.split(',')
|
|
428
437
|
ip_port_string = ip_chunk.split('@').first
|
|
@@ -432,6 +441,16 @@ class RedisClient
|
|
|
432
441
|
"#{hostname}:#{port}"
|
|
433
442
|
end
|
|
434
443
|
|
|
444
|
+
def reload!(clients)
|
|
445
|
+
@node_info = refetch_node_info_list(clients)
|
|
446
|
+
@node_configs = @node_info.to_h do |node_info|
|
|
447
|
+
[node_info.node_key, @config.client_config_for_node(node_info.node_key)]
|
|
448
|
+
end
|
|
449
|
+
@slots = build_slot_node_mappings(@node_info)
|
|
450
|
+
@replications = build_replication_mappings(@node_info)
|
|
451
|
+
@topology.process_topology_update!(@replications, @node_configs)
|
|
452
|
+
end
|
|
453
|
+
|
|
435
454
|
def with_startup_clients(count) # rubocop:disable Metrics/AbcSize
|
|
436
455
|
if @config.connect_with_original_config
|
|
437
456
|
# If connect_with_original_config is set, that means we need to build actual client objects
|
|
@@ -457,35 +476,41 @@ class RedisClient
|
|
|
457
476
|
end
|
|
458
477
|
end
|
|
459
478
|
|
|
479
|
+
def with_reload_jitter
|
|
480
|
+
return unless @next_reload_time.nil? || obtain_current_time >= @next_reload_time
|
|
481
|
+
|
|
482
|
+
yield
|
|
483
|
+
|
|
484
|
+
@next_reload_time = obtain_current_time + @random.rand(JITTER_WINDOW)
|
|
485
|
+
end
|
|
486
|
+
|
|
460
487
|
def with_reload_lock
|
|
461
|
-
# What should happen with concurrent calls #
|
|
488
|
+
# What should happen with concurrent calls #try_reload! This is a realistic possibility if the cluster goes into
|
|
462
489
|
# a CLUSTERDOWN state, and we're using a pooled backend. Every thread will independently discover this, and
|
|
463
|
-
# call
|
|
464
|
-
# For now, if a reload is in progress, wait for that to complete, and
|
|
465
|
-
#
|
|
466
|
-
# Probably in the future we should add a circuit breaker to #
|
|
490
|
+
# call #try_reload!.
|
|
491
|
+
# For now, if a reload is in progress by a thread, the other threads do not wait for that to complete, and
|
|
492
|
+
# they throw an error.
|
|
493
|
+
# Probably in the future we should add a circuit breaker to #try_reload! itself, and stop trying if the cluster is
|
|
467
494
|
# obviously not working.
|
|
468
|
-
|
|
469
|
-
@mutex.synchronize do
|
|
470
|
-
return if @last_reloaded_at && @last_reloaded_at > wait_start
|
|
471
|
-
|
|
472
|
-
if @last_reloaded_at && @reload_times > 1
|
|
473
|
-
# Mitigate load of servers by naive logic. Don't sleep with exponential backoff.
|
|
474
|
-
now = obtain_current_time
|
|
475
|
-
elapsed = @last_reloaded_at + @random.rand(STATE_REFRESH_INTERVAL) * 1_000_000
|
|
476
|
-
return if now < elapsed
|
|
477
|
-
end
|
|
495
|
+
return unless @mutex.try_lock
|
|
478
496
|
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
r
|
|
483
|
-
end
|
|
497
|
+
yield
|
|
498
|
+
ensure
|
|
499
|
+
@mutex.unlock if @mutex.owned?
|
|
484
500
|
end
|
|
485
501
|
|
|
486
502
|
def obtain_current_time
|
|
487
503
|
Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond)
|
|
488
504
|
end
|
|
505
|
+
|
|
506
|
+
def bypass_reload!
|
|
507
|
+
# DO NOT USE THIS METHOD
|
|
508
|
+
with_reload_lock do
|
|
509
|
+
with_startup_clients(@config.max_startup_sample) do |clients|
|
|
510
|
+
reload!(clients)
|
|
511
|
+
end
|
|
512
|
+
end
|
|
513
|
+
end
|
|
489
514
|
end
|
|
490
515
|
end
|
|
491
516
|
end
|
|
@@ -7,6 +7,10 @@ class RedisClient
|
|
|
7
7
|
class Cluster
|
|
8
8
|
class PubSub
|
|
9
9
|
class State
|
|
10
|
+
IO_ERROR_NEVER = { IOError => :never }.freeze
|
|
11
|
+
IO_ERROR_IMMEDIATE = { IOError => :immediate }.freeze
|
|
12
|
+
private_constant :IO_ERROR_NEVER, :IO_ERROR_IMMEDIATE
|
|
13
|
+
|
|
10
14
|
def initialize(client, queue)
|
|
11
15
|
@client = client
|
|
12
16
|
@worker = nil
|
|
@@ -22,7 +26,11 @@ class RedisClient
|
|
|
22
26
|
end
|
|
23
27
|
|
|
24
28
|
def close
|
|
25
|
-
|
|
29
|
+
if @worker&.alive?
|
|
30
|
+
@worker.exit
|
|
31
|
+
@worker.join
|
|
32
|
+
end
|
|
33
|
+
|
|
26
34
|
@client.close
|
|
27
35
|
rescue ::RedisClient::ConnectionError
|
|
28
36
|
# ignore
|
|
@@ -35,21 +43,24 @@ class RedisClient
|
|
|
35
43
|
# It is a fixed size but we can modify the size with some environment variables.
|
|
36
44
|
# So it consumes memory 1 MB multiplied a number of workers.
|
|
37
45
|
Thread.new(client, queue, nil) do |pubsub, q, prev_err|
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
+
Thread.handle_interrupt(IO_ERROR_NEVER) do
|
|
47
|
+
loop do
|
|
48
|
+
Thread.handle_interrupt(IO_ERROR_IMMEDIATE) { q << pubsub.next_event }
|
|
49
|
+
prev_err = nil
|
|
50
|
+
rescue StandardError => e
|
|
51
|
+
next sleep 0.005 if e.instance_of?(prev_err.class) && e.message == prev_err&.message
|
|
52
|
+
|
|
53
|
+
Thread.handle_interrupt(IO_ERROR_IMMEDIATE) { q << e }
|
|
54
|
+
prev_err = e
|
|
55
|
+
end
|
|
46
56
|
end
|
|
57
|
+
rescue IOError
|
|
58
|
+
# stream closed in another thread
|
|
47
59
|
end
|
|
48
60
|
end
|
|
49
61
|
end
|
|
50
62
|
|
|
51
63
|
BUF_SIZE = Integer(ENV.fetch('REDIS_CLIENT_PUBSUB_BUF_SIZE', 1024))
|
|
52
|
-
|
|
53
64
|
private_constant :BUF_SIZE
|
|
54
65
|
|
|
55
66
|
def initialize(router, command_builder)
|
|
@@ -17,7 +17,6 @@ class RedisClient
|
|
|
17
17
|
class Router
|
|
18
18
|
ZERO_CURSOR_FOR_SCAN = '0'
|
|
19
19
|
TSF = ->(f, x) { f.nil? ? x : f.call(x) }.curry
|
|
20
|
-
Ractor.make_shareable(TSF) if Object.const_defined?(:Ractor, false) && Ractor.respond_to?(:make_shareable)
|
|
21
20
|
DEDICATED_ACTIONS = lambda do # rubocop:disable Metrics/BlockLength
|
|
22
21
|
action = Struct.new('RedisCommandRoutingAction', :method_name, :reply_transformer, keyword_init: true)
|
|
23
22
|
pick_first = ->(reply) { reply.first } # rubocop:disable Style/SymbolProc
|
|
@@ -59,6 +58,7 @@ class RedisClient
|
|
|
59
58
|
'bgsave' => all_node_first_action,
|
|
60
59
|
'quit' => all_node_first_action,
|
|
61
60
|
'save' => all_node_first_action,
|
|
61
|
+
'select' => all_node_first_action,
|
|
62
62
|
'flushall' => primary_first_action,
|
|
63
63
|
'flushdb' => primary_first_action,
|
|
64
64
|
'readonly' => not_supported_action,
|
|
@@ -84,7 +84,7 @@ class RedisClient
|
|
|
84
84
|
@pool = pool
|
|
85
85
|
@client_kwargs = kwargs
|
|
86
86
|
@node = ::RedisClient::Cluster::Node.new(concurrent_worker, config: config, pool: pool, **kwargs)
|
|
87
|
-
@node.
|
|
87
|
+
@node.try_reload!
|
|
88
88
|
@command = ::RedisClient::Cluster::Command.load(@node.replica_clients.shuffle, slow_command_timeout: config.slow_command_timeout)
|
|
89
89
|
@command_builder = @config.command_builder
|
|
90
90
|
rescue ::RedisClient::Cluster::InitialSetupError => e
|
|
@@ -93,9 +93,8 @@ class RedisClient
|
|
|
93
93
|
end
|
|
94
94
|
|
|
95
95
|
def send_command(method, command, *args, &block) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
|
|
96
|
-
return assign_node_and_send_command(method, command, args, &block) unless DEDICATED_ACTIONS.key?(command.first)
|
|
97
|
-
|
|
98
96
|
action = DEDICATED_ACTIONS[command.first]
|
|
97
|
+
return assign_node_and_send_command(method, command, args, &block) if action.nil?
|
|
99
98
|
return send(action.method_name, method, command, args, &block) if action.reply_transformer.nil?
|
|
100
99
|
|
|
101
100
|
reply = send(action.method_name, method, command, args)
|
|
@@ -167,8 +166,8 @@ class RedisClient
|
|
|
167
166
|
rescue ::RedisClient::ConnectionError => e
|
|
168
167
|
raise unless ::RedisClient::Cluster::ErrorIdentification.client_owns_error?(e, node)
|
|
169
168
|
|
|
170
|
-
retry_count -= 1
|
|
171
169
|
renew_cluster_state
|
|
170
|
+
retry_count -= 1
|
|
172
171
|
|
|
173
172
|
if retry_count >= 0
|
|
174
173
|
# Find the node to use for this command - if this fails for some reason, though, re-use
|
|
@@ -180,7 +179,6 @@ class RedisClient
|
|
|
180
179
|
retry
|
|
181
180
|
end
|
|
182
181
|
|
|
183
|
-
retry if retry_count >= 0
|
|
184
182
|
raise
|
|
185
183
|
end
|
|
186
184
|
|
|
@@ -294,7 +292,7 @@ class RedisClient
|
|
|
294
292
|
end
|
|
295
293
|
|
|
296
294
|
def renew_cluster_state
|
|
297
|
-
@node.
|
|
295
|
+
@node.try_reload!
|
|
298
296
|
rescue ::RedisClient::Cluster::InitialSetupError
|
|
299
297
|
# ignore
|
|
300
298
|
end
|
|
@@ -340,8 +338,8 @@ class RedisClient
|
|
|
340
338
|
raise if retry_count <= 0
|
|
341
339
|
raise if e.errors.values.none? { |err| err.message.include?('WAIT cannot be used with replica instances') }
|
|
342
340
|
|
|
343
|
-
retry_count -= 1
|
|
344
341
|
renew_cluster_state
|
|
342
|
+
retry_count -= 1
|
|
345
343
|
retry
|
|
346
344
|
end
|
|
347
345
|
|
|
@@ -382,35 +380,23 @@ class RedisClient
|
|
|
382
380
|
end
|
|
383
381
|
|
|
384
382
|
def send_cluster_command(method, command, args, &block) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
|
|
385
|
-
if command[1].casecmp('
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
elsif command[1].casecmp('meet').zero?
|
|
394
|
-
fail_not_supported_command(method, command, args, &block)
|
|
395
|
-
elsif command[1].casecmp('replicate').zero?
|
|
396
|
-
fail_not_supported_command(method, command, args, &block)
|
|
397
|
-
elsif command[1].casecmp('reset').zero?
|
|
398
|
-
fail_not_supported_command(method, command, args, &block)
|
|
399
|
-
elsif command[1].casecmp('set-config-epoch').zero?
|
|
400
|
-
fail_not_supported_command(method, command, args, &block)
|
|
401
|
-
elsif command[1].casecmp('setslot').zero?
|
|
402
|
-
fail_not_supported_command(method, command, args, &block)
|
|
383
|
+
if command[1].casecmp('keyslot').zero? ||
|
|
384
|
+
command[1].casecmp('info').zero? ||
|
|
385
|
+
command[1].casecmp('nodes').zero? ||
|
|
386
|
+
command[1].casecmp('slots').zero? ||
|
|
387
|
+
command[1].casecmp('shards').zero? ||
|
|
388
|
+
command[1].casecmp('count-failure-reports').zero? ||
|
|
389
|
+
command[1].casecmp('slaves').zero?
|
|
390
|
+
assign_node(command).public_send(method, *args, command, &block)
|
|
403
391
|
elsif command[1].casecmp('saveconfig').zero?
|
|
404
392
|
@node.call_all(method, command, args).first.then(&TSF.call(block))
|
|
405
|
-
elsif command[1].casecmp('getkeysinslot').zero?
|
|
406
|
-
raise ArgumentError, command.join(' ') if command.size != 4
|
|
407
|
-
|
|
393
|
+
elsif command[1].casecmp('countkeysinslot').zero? || command[1].casecmp('getkeysinslot').zero?
|
|
408
394
|
handle_node_reload_error do
|
|
409
|
-
node_key = @node.find_node_key_of_replica(command[2])
|
|
395
|
+
node_key = @node.find_node_key_of_replica(command[2]) || @node.any_replica_node_key
|
|
410
396
|
@node.find_by(node_key).public_send(method, *args, command, &block)
|
|
411
397
|
end
|
|
412
398
|
else
|
|
413
|
-
|
|
399
|
+
fail_not_supported_command(method, command, args, &block)
|
|
414
400
|
end
|
|
415
401
|
end
|
|
416
402
|
|
|
@@ -464,7 +450,7 @@ class RedisClient
|
|
|
464
450
|
end
|
|
465
451
|
|
|
466
452
|
def send_multiple_keys_command(method, command, args, &block) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
|
|
467
|
-
# This implementation
|
|
453
|
+
# This implementation prioritizes performance over readability.
|
|
468
454
|
cmd = command.first
|
|
469
455
|
if cmd.casecmp('mget').zero?
|
|
470
456
|
single_key_cmd = 'get'
|
|
@@ -514,8 +500,8 @@ class RedisClient
|
|
|
514
500
|
rescue ::RedisClient::Cluster::Node::ReloadNeeded
|
|
515
501
|
raise ::RedisClient::Cluster::NodeMightBeDown.new.with_config(@config) if retry_count <= 0
|
|
516
502
|
|
|
517
|
-
retry_count -= 1
|
|
518
503
|
renew_cluster_state
|
|
504
|
+
retry_count -= 1
|
|
519
505
|
retry
|
|
520
506
|
end
|
|
521
507
|
end
|
data/lib/redis_client/cluster.rb
CHANGED
|
@@ -130,8 +130,26 @@ class RedisClient
|
|
|
130
130
|
::RedisClient::Cluster::PubSub.new(router, @command_builder)
|
|
131
131
|
end
|
|
132
132
|
|
|
133
|
-
|
|
134
|
-
|
|
133
|
+
# Compatibility layer for RedisClient::Pooled
|
|
134
|
+
def with(_options = nil)
|
|
135
|
+
yield self
|
|
136
|
+
end
|
|
137
|
+
alias then with
|
|
138
|
+
|
|
139
|
+
# Compatibility layer for RedisClient::HashRing
|
|
140
|
+
def node_for(_key)
|
|
141
|
+
self
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
# Compatibility layer for RedisClient::HashRing
|
|
145
|
+
def nodes_for(*keys)
|
|
146
|
+
keys.flatten!
|
|
147
|
+
{ self => keys }
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
# Compatibility layer for RedisClient::HashRing
|
|
151
|
+
def nodes
|
|
152
|
+
[self].freeze
|
|
135
153
|
end
|
|
136
154
|
|
|
137
155
|
def close
|
|
@@ -15,16 +15,15 @@ class RedisClient
|
|
|
15
15
|
DEFAULT_SCHEME = 'redis'
|
|
16
16
|
SECURE_SCHEME = 'rediss'
|
|
17
17
|
DEFAULT_NODE = "#{DEFAULT_SCHEME}://#{DEFAULT_HOST}:#{DEFAULT_PORT}"
|
|
18
|
-
Ractor.make_shareable(DEFAULT_NODE) if Object.const_defined?(:Ractor, false) && Ractor.respond_to?(:make_shareable)
|
|
19
18
|
DEFAULT_NODES = [DEFAULT_NODE].freeze
|
|
20
19
|
VALID_SCHEMES = [DEFAULT_SCHEME, SECURE_SCHEME].freeze
|
|
21
20
|
VALID_NODES_KEYS = %i[ssl username password host port db].freeze
|
|
22
|
-
MERGE_CONFIG_KEYS = %i[ssl username password].freeze
|
|
21
|
+
MERGE_CONFIG_KEYS = %i[ssl username password db].freeze
|
|
23
22
|
IGNORE_GENERIC_CONFIG_KEYS = %i[url host port path].freeze
|
|
24
23
|
MAX_WORKERS = Integer(ENV.fetch('REDIS_CLIENT_MAX_THREADS', -1)) # for backward compatibility
|
|
25
|
-
#
|
|
24
|
+
# Used for slow commands that fetch metadata, e.g. CLUSTER NODES, COMMAND.
|
|
26
25
|
SLOW_COMMAND_TIMEOUT = Float(ENV.fetch('REDIS_CLIENT_SLOW_COMMAND_TIMEOUT', -1))
|
|
27
|
-
#
|
|
26
|
+
# Controls the balance between startup load and stability during initialization or cluster state changes.
|
|
28
27
|
MAX_STARTUP_SAMPLE = Integer(ENV.fetch('REDIS_CLIENT_MAX_STARTUP_SAMPLE', 3))
|
|
29
28
|
|
|
30
29
|
private_constant :DEFAULT_HOST, :DEFAULT_PORT, :DEFAULT_SCHEME, :SECURE_SCHEME, :DEFAULT_NODES,
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: redis-cluster-client
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.
|
|
4
|
+
version: 0.16.1
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Taishi Kasuga
|
|
@@ -15,14 +15,14 @@ dependencies:
|
|
|
15
15
|
requirements:
|
|
16
16
|
- - "~>"
|
|
17
17
|
- !ruby/object:Gem::Version
|
|
18
|
-
version: '0.
|
|
18
|
+
version: '0.28'
|
|
19
19
|
type: :runtime
|
|
20
20
|
prerelease: false
|
|
21
21
|
version_requirements: !ruby/object:Gem::Requirement
|
|
22
22
|
requirements:
|
|
23
23
|
- - "~>"
|
|
24
24
|
- !ruby/object:Gem::Version
|
|
25
|
-
version: '0.
|
|
25
|
+
version: '0.28'
|
|
26
26
|
email:
|
|
27
27
|
- proxy0721@gmail.com
|
|
28
28
|
executables: []
|
|
@@ -74,7 +74,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
|
74
74
|
- !ruby/object:Gem::Version
|
|
75
75
|
version: '0'
|
|
76
76
|
requirements: []
|
|
77
|
-
rubygems_version:
|
|
77
|
+
rubygems_version: 4.0.6
|
|
78
78
|
specification_version: 4
|
|
79
79
|
summary: Redis cluster-aware client for Ruby
|
|
80
80
|
test_files: []
|