redis-cluster-client 0.7.5 → 0.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/redis_client/cluster/command.rb +53 -17
- data/lib/redis_client/cluster/error_identification.rb +55 -0
- data/lib/redis_client/cluster/key_slot_converter.rb +18 -0
- data/lib/redis_client/cluster/node/base_topology.rb +60 -0
- data/lib/redis_client/cluster/node/latency_replica.rb +13 -17
- data/lib/redis_client/cluster/node/primary_only.rb +7 -19
- data/lib/redis_client/cluster/node/random_replica.rb +2 -4
- data/lib/redis_client/cluster/node/random_replica_or_primary.rb +2 -4
- data/lib/redis_client/cluster/node.rb +160 -107
- data/lib/redis_client/cluster/node_key.rb +4 -0
- data/lib/redis_client/cluster/optimistic_locking.rb +72 -0
- data/lib/redis_client/cluster/pinning_node.rb +35 -0
- data/lib/redis_client/cluster/pipeline.rb +22 -14
- data/lib/redis_client/cluster/router.rb +75 -80
- data/lib/redis_client/cluster/transaction.rb +140 -20
- data/lib/redis_client/cluster.rb +47 -4
- data/lib/redis_client/cluster_config.rb +30 -45
- metadata +9 -6
- data/lib/redis_client/cluster/node/replica_mixin.rb +0 -37
@@ -22,10 +22,10 @@ class RedisClient
|
|
22
22
|
SLOT_SIZE = 16_384
|
23
23
|
MIN_SLOT = 0
|
24
24
|
MAX_SLOT = SLOT_SIZE - 1
|
25
|
-
IGNORE_GENERIC_CONFIG_KEYS = %i[url host port path].freeze
|
26
25
|
DEAD_FLAGS = %w[fail? fail handshake noaddr noflags].freeze
|
27
26
|
ROLE_FLAGS = %w[master slave].freeze
|
28
27
|
EMPTY_ARRAY = [].freeze
|
28
|
+
EMPTY_HASH = {}.freeze
|
29
29
|
|
30
30
|
ReloadNeeded = Class.new(::RedisClient::Error)
|
31
31
|
|
@@ -92,119 +92,19 @@ class RedisClient
|
|
92
92
|
end
|
93
93
|
end
|
94
94
|
|
95
|
-
class << self
|
96
|
-
def load_info(options, concurrent_worker, slow_command_timeout: -1, **kwargs) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
|
97
|
-
raise ::RedisClient::Cluster::InitialSetupError, [] if options.nil? || options.empty?
|
98
|
-
|
99
|
-
startup_size = options.size > MAX_STARTUP_SAMPLE ? MAX_STARTUP_SAMPLE : options.size
|
100
|
-
startup_options = options.to_a.sample(startup_size).to_h
|
101
|
-
startup_nodes = ::RedisClient::Cluster::Node.new(startup_options, concurrent_worker, **kwargs)
|
102
|
-
work_group = concurrent_worker.new_group(size: startup_size)
|
103
|
-
|
104
|
-
startup_nodes.each_with_index do |raw_client, i|
|
105
|
-
work_group.push(i, raw_client) do |client|
|
106
|
-
regular_timeout = client.read_timeout
|
107
|
-
client.read_timeout = slow_command_timeout > 0.0 ? slow_command_timeout : regular_timeout
|
108
|
-
reply = client.call('CLUSTER', 'NODES')
|
109
|
-
client.read_timeout = regular_timeout
|
110
|
-
parse_cluster_node_reply(reply)
|
111
|
-
rescue StandardError => e
|
112
|
-
e
|
113
|
-
ensure
|
114
|
-
client&.close
|
115
|
-
end
|
116
|
-
end
|
117
|
-
|
118
|
-
node_info_list = errors = nil
|
119
|
-
|
120
|
-
work_group.each do |i, v|
|
121
|
-
case v
|
122
|
-
when StandardError
|
123
|
-
errors ||= Array.new(startup_size)
|
124
|
-
errors[i] = v
|
125
|
-
else
|
126
|
-
node_info_list ||= Array.new(startup_size)
|
127
|
-
node_info_list[i] = v
|
128
|
-
end
|
129
|
-
end
|
130
|
-
|
131
|
-
work_group.close
|
132
|
-
|
133
|
-
raise ::RedisClient::Cluster::InitialSetupError, errors if node_info_list.nil?
|
134
|
-
|
135
|
-
grouped = node_info_list.compact.group_by do |info_list|
|
136
|
-
info_list.sort_by!(&:id)
|
137
|
-
info_list.each_with_object(String.new(capacity: 128 * info_list.size)) do |e, a|
|
138
|
-
a << e.id << e.node_key << e.role << e.primary_id << e.config_epoch
|
139
|
-
end
|
140
|
-
end
|
141
|
-
|
142
|
-
grouped.max_by { |_, v| v.size }[1].first.freeze
|
143
|
-
end
|
144
|
-
|
145
|
-
private
|
146
|
-
|
147
|
-
# @see https://redis.io/commands/cluster-nodes/
|
148
|
-
# @see https://github.com/redis/redis/blob/78960ad57b8a5e6af743d789ed8fd767e37d42b8/src/cluster.c#L4660-L4683
|
149
|
-
def parse_cluster_node_reply(reply) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
|
150
|
-
reply.each_line("\n", chomp: true).filter_map do |line|
|
151
|
-
fields = line.split
|
152
|
-
flags = fields[2].split(',')
|
153
|
-
next unless fields[7] == 'connected' && (flags & DEAD_FLAGS).empty?
|
154
|
-
|
155
|
-
slots = if fields[8].nil?
|
156
|
-
EMPTY_ARRAY
|
157
|
-
else
|
158
|
-
fields[8..].reject { |str| str.start_with?('[') }
|
159
|
-
.map { |str| str.split('-').map { |s| Integer(s) } }
|
160
|
-
.map { |a| a.size == 1 ? a << a.first : a }
|
161
|
-
.map(&:sort)
|
162
|
-
end
|
163
|
-
|
164
|
-
::RedisClient::Cluster::Node::Info.new(
|
165
|
-
id: fields[0],
|
166
|
-
node_key: parse_node_key(fields[1]),
|
167
|
-
role: (flags & ROLE_FLAGS).first,
|
168
|
-
primary_id: fields[3],
|
169
|
-
ping_sent: fields[4],
|
170
|
-
pong_recv: fields[5],
|
171
|
-
config_epoch: fields[6],
|
172
|
-
link_state: fields[7],
|
173
|
-
slots: slots
|
174
|
-
)
|
175
|
-
end
|
176
|
-
end
|
177
|
-
|
178
|
-
# As redirection node_key is dependent on `cluster-preferred-endpoint-type` config,
|
179
|
-
# node_key should use hostname if present in CLUSTER NODES output.
|
180
|
-
#
|
181
|
-
# See https://redis.io/commands/cluster-nodes/ for details on the output format.
|
182
|
-
# node_address matches fhe format: <ip:port@cport[,hostname[,auxiliary_field=value]*]>
|
183
|
-
def parse_node_key(node_address)
|
184
|
-
ip_chunk, hostname, _auxiliaries = node_address.split(',')
|
185
|
-
ip_port_string = ip_chunk.split('@').first
|
186
|
-
return ip_port_string if hostname.nil? || hostname.empty?
|
187
|
-
|
188
|
-
port = ip_port_string.split(':')[1]
|
189
|
-
"#{hostname}:#{port}"
|
190
|
-
end
|
191
|
-
end
|
192
|
-
|
193
95
|
def initialize(
|
194
|
-
options,
|
195
96
|
concurrent_worker,
|
196
|
-
|
197
|
-
with_replica: false,
|
198
|
-
replica_affinity: :random,
|
97
|
+
config:,
|
199
98
|
pool: nil,
|
200
99
|
**kwargs
|
201
100
|
)
|
202
101
|
|
203
102
|
@concurrent_worker = concurrent_worker
|
204
|
-
@slots = build_slot_node_mappings(
|
205
|
-
@replications = build_replication_mappings(
|
206
|
-
klass = make_topology_class(
|
207
|
-
@topology = klass.new(
|
103
|
+
@slots = build_slot_node_mappings(EMPTY_ARRAY)
|
104
|
+
@replications = build_replication_mappings(EMPTY_ARRAY)
|
105
|
+
klass = make_topology_class(config.use_replica?, config.replica_affinity)
|
106
|
+
@topology = klass.new(pool, @concurrent_worker, **kwargs)
|
107
|
+
@config = config
|
208
108
|
@mutex = Mutex.new
|
209
109
|
end
|
210
110
|
|
@@ -255,6 +155,14 @@ class RedisClient
|
|
255
155
|
@topology.clients_for_scanning(seed: seed).values.sort_by { |c| "#{c.config.host}-#{c.config.port}" }
|
256
156
|
end
|
257
157
|
|
158
|
+
def clients
|
159
|
+
@topology.clients.values
|
160
|
+
end
|
161
|
+
|
162
|
+
def primary_clients
|
163
|
+
@topology.primary_clients.values
|
164
|
+
end
|
165
|
+
|
258
166
|
def replica_clients
|
259
167
|
@topology.replica_clients.values
|
260
168
|
end
|
@@ -292,6 +200,20 @@ class RedisClient
|
|
292
200
|
end
|
293
201
|
end
|
294
202
|
|
203
|
+
def reload!
|
204
|
+
with_reload_lock do
|
205
|
+
with_startup_clients(MAX_STARTUP_SAMPLE) do |startup_clients|
|
206
|
+
@node_info = refetch_node_info_list(startup_clients)
|
207
|
+
@node_configs = @node_info.to_h do |node_info|
|
208
|
+
[node_info.node_key, @config.client_config_for_node(node_info.node_key)]
|
209
|
+
end
|
210
|
+
@slots = build_slot_node_mappings(@node_info)
|
211
|
+
@replications = build_replication_mappings(@node_info)
|
212
|
+
@topology.process_topology_update!(@replications, @node_configs)
|
213
|
+
end
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
295
217
|
private
|
296
218
|
|
297
219
|
def make_topology_class(with_replica, replica_affinity)
|
@@ -378,6 +300,137 @@ class RedisClient
|
|
378
300
|
|
379
301
|
[results, errors]
|
380
302
|
end
|
303
|
+
|
304
|
+
def refetch_node_info_list(startup_clients) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
|
305
|
+
startup_size = startup_clients.size
|
306
|
+
work_group = @concurrent_worker.new_group(size: startup_size)
|
307
|
+
|
308
|
+
startup_clients.each_with_index do |raw_client, i|
|
309
|
+
work_group.push(i, raw_client) do |client|
|
310
|
+
regular_timeout = client.read_timeout
|
311
|
+
client.read_timeout = @config.slow_command_timeout > 0.0 ? @config.slow_command_timeout : regular_timeout
|
312
|
+
reply = client.call('CLUSTER', 'NODES')
|
313
|
+
client.read_timeout = regular_timeout
|
314
|
+
parse_cluster_node_reply(reply)
|
315
|
+
rescue StandardError => e
|
316
|
+
e
|
317
|
+
ensure
|
318
|
+
client&.close
|
319
|
+
end
|
320
|
+
end
|
321
|
+
|
322
|
+
node_info_list = errors = nil
|
323
|
+
|
324
|
+
work_group.each do |i, v|
|
325
|
+
case v
|
326
|
+
when StandardError
|
327
|
+
errors ||= Array.new(startup_size)
|
328
|
+
errors[i] = v
|
329
|
+
else
|
330
|
+
node_info_list ||= Array.new(startup_size)
|
331
|
+
node_info_list[i] = v
|
332
|
+
end
|
333
|
+
end
|
334
|
+
|
335
|
+
work_group.close
|
336
|
+
|
337
|
+
raise ::RedisClient::Cluster::InitialSetupError, errors if node_info_list.nil?
|
338
|
+
|
339
|
+
grouped = node_info_list.compact.group_by do |info_list|
|
340
|
+
info_list.sort_by!(&:id)
|
341
|
+
info_list.each_with_object(String.new(capacity: 128 * info_list.size)) do |e, a|
|
342
|
+
a << e.id << e.node_key << e.role << e.primary_id << e.config_epoch
|
343
|
+
end
|
344
|
+
end
|
345
|
+
|
346
|
+
grouped.max_by { |_, v| v.size }[1].first
|
347
|
+
end
|
348
|
+
|
349
|
+
def parse_cluster_node_reply(reply) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
|
350
|
+
reply.each_line("\n", chomp: true).filter_map do |line|
|
351
|
+
fields = line.split
|
352
|
+
flags = fields[2].split(',')
|
353
|
+
next unless fields[7] == 'connected' && (flags & DEAD_FLAGS).empty?
|
354
|
+
|
355
|
+
slots = if fields[8].nil?
|
356
|
+
EMPTY_ARRAY
|
357
|
+
else
|
358
|
+
fields[8..].reject { |str| str.start_with?('[') }
|
359
|
+
.map { |str| str.split('-').map { |s| Integer(s) } }
|
360
|
+
.map { |a| a.size == 1 ? a << a.first : a }
|
361
|
+
.map(&:sort)
|
362
|
+
end
|
363
|
+
|
364
|
+
::RedisClient::Cluster::Node::Info.new(
|
365
|
+
id: fields[0],
|
366
|
+
node_key: parse_node_key(fields[1]),
|
367
|
+
role: (flags & ROLE_FLAGS).first,
|
368
|
+
primary_id: fields[3],
|
369
|
+
ping_sent: fields[4],
|
370
|
+
pong_recv: fields[5],
|
371
|
+
config_epoch: fields[6],
|
372
|
+
link_state: fields[7],
|
373
|
+
slots: slots
|
374
|
+
)
|
375
|
+
end
|
376
|
+
end
|
377
|
+
|
378
|
+
# As redirection node_key is dependent on `cluster-preferred-endpoint-type` config,
|
379
|
+
# node_key should use hostname if present in CLUSTER NODES output.
|
380
|
+
#
|
381
|
+
# See https://redis.io/commands/cluster-nodes/ for details on the output format.
|
382
|
+
# node_address matches fhe format: <ip:port@cport[,hostname[,auxiliary_field=value]*]>
|
383
|
+
def parse_node_key(node_address)
|
384
|
+
ip_chunk, hostname, _auxiliaries = node_address.split(',')
|
385
|
+
ip_port_string = ip_chunk.split('@').first
|
386
|
+
return ip_port_string if hostname.nil? || hostname.empty?
|
387
|
+
|
388
|
+
port = ip_port_string.split(':')[1]
|
389
|
+
"#{hostname}:#{port}"
|
390
|
+
end
|
391
|
+
|
392
|
+
def with_startup_clients(count) # rubocop:disable Metrics/AbcSize
|
393
|
+
if @config.connect_with_original_config
|
394
|
+
# If connect_with_original_config is set, that means we need to build actual client objects
|
395
|
+
# and close them, so that we e.g. re-resolve a DNS entry with the cluster nodes in it.
|
396
|
+
begin
|
397
|
+
# Memoize the startup clients, so we maintain RedisClient's internal circuit breaker configuration
|
398
|
+
# if it's set.
|
399
|
+
@startup_clients ||= @config.startup_nodes.values.sample(count).map do |node_config|
|
400
|
+
::RedisClient::Cluster::Node::Config.new(**node_config).new_client
|
401
|
+
end
|
402
|
+
yield @startup_clients
|
403
|
+
ensure
|
404
|
+
# Close the startup clients when we're done, so we don't maintain pointless open connections to
|
405
|
+
# the cluster though
|
406
|
+
@startup_clients&.each(&:close)
|
407
|
+
end
|
408
|
+
else
|
409
|
+
# (re-)connect using nodes we already know about.
|
410
|
+
# If this is the first time we're connecting to the cluster, we need to seed the topology with the
|
411
|
+
# startup clients though.
|
412
|
+
@topology.process_topology_update!({}, @config.startup_nodes) if @topology.clients.empty?
|
413
|
+
yield @topology.clients.values.sample(count)
|
414
|
+
end
|
415
|
+
end
|
416
|
+
|
417
|
+
def with_reload_lock
|
418
|
+
# What should happen with concurrent calls #reload? This is a realistic possibility if the cluster goes into
|
419
|
+
# a CLUSTERDOWN state, and we're using a pooled backend. Every thread will independently discover this, and
|
420
|
+
# call reload!.
|
421
|
+
# For now, if a reload is in progress, wait for that to complete, and consider that the same as us having
|
422
|
+
# performed the reload.
|
423
|
+
# Probably in the future we should add a circuit breaker to #reload itself, and stop trying if the cluster is
|
424
|
+
# obviously not working.
|
425
|
+
wait_start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
426
|
+
@mutex.synchronize do
|
427
|
+
return if @last_reloaded_at && @last_reloaded_at > wait_start
|
428
|
+
|
429
|
+
r = yield
|
430
|
+
@last_reloaded_at = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
431
|
+
r
|
432
|
+
end
|
433
|
+
end
|
381
434
|
end
|
382
435
|
end
|
383
436
|
end
|
@@ -0,0 +1,72 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'redis_client'
|
4
|
+
require 'redis_client/cluster/transaction'
|
5
|
+
|
6
|
+
class RedisClient
|
7
|
+
class Cluster
|
8
|
+
class OptimisticLocking
|
9
|
+
def initialize(router)
|
10
|
+
@router = router
|
11
|
+
@asking = false
|
12
|
+
end
|
13
|
+
|
14
|
+
def watch(keys)
|
15
|
+
slot = find_slot(keys)
|
16
|
+
raise ::RedisClient::Cluster::Transaction::ConsistencyError, "unsafe watch: #{keys.join(' ')}" if slot.nil?
|
17
|
+
|
18
|
+
# We have not yet selected a node for this transaction, initially, which means we can handle
|
19
|
+
# redirections freely initially (i.e. for the first WATCH call)
|
20
|
+
node = @router.find_primary_node_by_slot(slot)
|
21
|
+
handle_redirection(node, retry_count: 1) do |nd|
|
22
|
+
nd.with do |c|
|
23
|
+
c.ensure_connected_cluster_scoped(retryable: false) do
|
24
|
+
c.call('ASKING') if @asking
|
25
|
+
c.call('WATCH', *keys)
|
26
|
+
begin
|
27
|
+
yield(c, slot, @asking)
|
28
|
+
rescue ::RedisClient::ConnectionError
|
29
|
+
# No need to unwatch on a connection error.
|
30
|
+
raise
|
31
|
+
rescue StandardError
|
32
|
+
c.call('UNWATCH')
|
33
|
+
raise
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
private
|
41
|
+
|
42
|
+
def handle_redirection(node, retry_count: 1, &blk)
|
43
|
+
@router.handle_redirection(node, retry_count: retry_count) do |nd|
|
44
|
+
handle_asking_once(nd, &blk)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def handle_asking_once(node)
|
49
|
+
yield node
|
50
|
+
rescue ::RedisClient::CommandError => e
|
51
|
+
raise unless ErrorIdentification.client_owns_error?(e, node)
|
52
|
+
raise unless e.message.start_with?('ASK')
|
53
|
+
|
54
|
+
node = @router.assign_asking_node(e.message)
|
55
|
+
@asking = true
|
56
|
+
yield node
|
57
|
+
ensure
|
58
|
+
@asking = false
|
59
|
+
end
|
60
|
+
|
61
|
+
def find_slot(keys)
|
62
|
+
return if keys.empty?
|
63
|
+
return if keys.any? { |k| k.nil? || k.empty? }
|
64
|
+
|
65
|
+
slots = keys.map { |k| @router.find_slot_by_key(k) }
|
66
|
+
return if slots.uniq.size != 1
|
67
|
+
|
68
|
+
slots.first
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class RedisClient
|
4
|
+
class Cluster
|
5
|
+
class PinningNode
|
6
|
+
def initialize(client)
|
7
|
+
@client = client
|
8
|
+
end
|
9
|
+
|
10
|
+
def call(*args, **kwargs, &block)
|
11
|
+
@client.call(*args, **kwargs, &block)
|
12
|
+
end
|
13
|
+
|
14
|
+
def call_v(args, &block)
|
15
|
+
@client.call_v(args, &block)
|
16
|
+
end
|
17
|
+
|
18
|
+
def call_once(*args, **kwargs, &block)
|
19
|
+
@client.call_once(*args, **kwargs, &block)
|
20
|
+
end
|
21
|
+
|
22
|
+
def call_once_v(args, &block)
|
23
|
+
@client.call_once_v(args, &block)
|
24
|
+
end
|
25
|
+
|
26
|
+
def blocking_call(timeout, *args, **kwargs, &block)
|
27
|
+
@client.blocking_call(timeout, *args, **kwargs, &block)
|
28
|
+
end
|
29
|
+
|
30
|
+
def blocking_call_v(timeout, args, &block)
|
31
|
+
@client.blocking_call_v(timeout, args, &block)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -12,7 +12,7 @@ class RedisClient
|
|
12
12
|
class Extended < ::RedisClient::Pipeline
|
13
13
|
attr_reader :outer_indices
|
14
14
|
|
15
|
-
def initialize(
|
15
|
+
def initialize(...)
|
16
16
|
super
|
17
17
|
@outer_indices = nil
|
18
18
|
end
|
@@ -50,28 +50,31 @@ class RedisClient
|
|
50
50
|
end
|
51
51
|
|
52
52
|
::RedisClient::ConnectionMixin.module_eval do
|
53
|
-
def call_pipelined_aware_of_redirection(commands, timeouts) # rubocop:disable Metrics/AbcSize
|
53
|
+
def call_pipelined_aware_of_redirection(commands, timeouts, exception:) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
|
54
54
|
size = commands.size
|
55
55
|
results = Array.new(commands.size)
|
56
56
|
@pending_reads += size
|
57
57
|
write_multi(commands)
|
58
58
|
|
59
59
|
redirection_indices = nil
|
60
|
+
first_exception = nil
|
60
61
|
size.times do |index|
|
61
62
|
timeout = timeouts && timeouts[index]
|
62
63
|
result = read(timeout)
|
63
64
|
@pending_reads -= 1
|
64
|
-
if result.is_a?(
|
65
|
+
if result.is_a?(::RedisClient::Error)
|
65
66
|
result._set_command(commands[index])
|
66
|
-
if result.message.start_with?('MOVED', 'ASK')
|
67
|
+
if result.is_a?(::RedisClient::CommandError) && result.message.start_with?('MOVED', 'ASK')
|
67
68
|
redirection_indices ||= []
|
68
69
|
redirection_indices << index
|
70
|
+
elsif exception
|
71
|
+
first_exception ||= result
|
69
72
|
end
|
70
73
|
end
|
71
|
-
|
72
74
|
results[index] = result
|
73
75
|
end
|
74
76
|
|
77
|
+
raise first_exception if exception && first_exception
|
75
78
|
return results if redirection_indices.nil?
|
76
79
|
|
77
80
|
err = ::RedisClient::Cluster::Pipeline::RedirectionNeeded.new
|
@@ -95,10 +98,11 @@ class RedisClient
|
|
95
98
|
attr_accessor :replies, :indices
|
96
99
|
end
|
97
100
|
|
98
|
-
def initialize(router, command_builder, concurrent_worker, seed: Random.new_seed)
|
101
|
+
def initialize(router, command_builder, concurrent_worker, exception:, seed: Random.new_seed)
|
99
102
|
@router = router
|
100
103
|
@command_builder = command_builder
|
101
104
|
@concurrent_worker = concurrent_worker
|
105
|
+
@exception = exception
|
102
106
|
@seed = seed
|
103
107
|
@pipelines = nil
|
104
108
|
@size = 0
|
@@ -158,15 +162,13 @@ class RedisClient
|
|
158
162
|
end
|
159
163
|
end
|
160
164
|
|
161
|
-
all_replies = errors = nil
|
165
|
+
all_replies = errors = required_redirections = nil
|
162
166
|
|
163
167
|
work_group.each do |node_key, v|
|
164
168
|
case v
|
165
169
|
when ::RedisClient::Cluster::Pipeline::RedirectionNeeded
|
166
|
-
|
167
|
-
|
168
|
-
v.indices.each { |i| v.replies[i] = handle_redirection(v.replies[i], pipeline, i) }
|
169
|
-
pipeline.outer_indices.each_with_index { |outer, inner| all_replies[outer] = v.replies[inner] }
|
170
|
+
required_redirections ||= {}
|
171
|
+
required_redirections[node_key] = v
|
170
172
|
when StandardError
|
171
173
|
errors ||= {}
|
172
174
|
errors[node_key] = v
|
@@ -177,9 +179,15 @@ class RedisClient
|
|
177
179
|
end
|
178
180
|
|
179
181
|
work_group.close
|
180
|
-
|
181
182
|
raise ::RedisClient::Cluster::ErrorCollection, errors unless errors.nil?
|
182
183
|
|
184
|
+
required_redirections&.each do |node_key, v|
|
185
|
+
all_replies ||= Array.new(@size)
|
186
|
+
pipeline = @pipelines[node_key]
|
187
|
+
v.indices.each { |i| v.replies[i] = handle_redirection(v.replies[i], pipeline, i) }
|
188
|
+
pipeline.outer_indices.each_with_index { |outer, inner| all_replies[outer] = v.replies[inner] }
|
189
|
+
end
|
190
|
+
|
183
191
|
all_replies
|
184
192
|
end
|
185
193
|
|
@@ -205,7 +213,7 @@ class RedisClient
|
|
205
213
|
results = client.ensure_connected_cluster_scoped(retryable: pipeline._retryable?) do |connection|
|
206
214
|
commands = pipeline._commands
|
207
215
|
client.middlewares.call_pipelined(commands, client.config) do
|
208
|
-
connection.call_pipelined_aware_of_redirection(commands, pipeline._timeouts)
|
216
|
+
connection.call_pipelined_aware_of_redirection(commands, pipeline._timeouts, exception: @exception)
|
209
217
|
end
|
210
218
|
end
|
211
219
|
|
@@ -229,7 +237,7 @@ class RedisClient
|
|
229
237
|
def try_redirection(node, pipeline, inner_index)
|
230
238
|
redirect_command(node, pipeline, inner_index)
|
231
239
|
rescue StandardError => e
|
232
|
-
e
|
240
|
+
@exception ? raise : e
|
233
241
|
end
|
234
242
|
|
235
243
|
def redirect_command(node, pipeline, inner_index)
|