redis-cluster-client 0.4.15 → 0.4.17

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: be213593746d1aceb880d03bf002812ca650fa0f76aa6e906f52f2650a0fb758
4
- data.tar.gz: e5e9d541f024c595a2dac84db7f5a60a9df3b318ebcc20dfc60cbfa91b30cc8a
3
+ metadata.gz: bc35dc957459313cb97b78fae42fadb612788e8e65eb49449db24b66294057fa
4
+ data.tar.gz: 03cf3604892eb1a881123bdfeabaeac30816ed940a827c5390b2320b3e9e26f0
5
5
  SHA512:
6
- metadata.gz: 6d2bbd083b59645a96e3b97096f80cbe22c0788213b9f1b69a21d661fdf88d9f20abf1fdd785ef5e26ee083f5ce42d232da73863bc7015cdc17f836f01440c20
7
- data.tar.gz: c792f23f43397ff380f277e1c7b7d9cc54090f9cd6eb302c7aff8bacaacd31da98ba9a810991a566b1081cf7d96b889e608e9ad77239157b7c92ed1461fe2ebe
6
+ metadata.gz: 7f8de53c53938381fb88c53ac2f13e8b7a78a3ee5653493fe0496acebaf0da57b568fb8c4436f8d19a7ba84d55018d9e4786c3726af2586581d0e7daee7b383f
7
+ data.tar.gz: 7b65b9e12fd251ca8a6c278c4fb245fc08b114c9cbfac58f88e5aca81d8b9ac9aa4939ef9dbf2ee042f313764b3205a9490453b7a2310915338e50c2871e4c47
@@ -39,30 +39,27 @@ class RedisClient
39
39
 
40
40
  private
41
41
 
42
- def measure_latencies(clients) # rubocop:disable Metrics/AbcSize
42
+ def measure_latencies(clients)
43
43
  clients.each_slice(::RedisClient::Cluster::Node::MAX_THREADS).each_with_object({}) do |chuncked_clients, acc|
44
- threads = chuncked_clients.map do |k, v|
45
- Thread.new(k, v) do |node_key, client|
46
- Thread.current[:node_key] = node_key
47
-
48
- min = DUMMY_LATENCY_MSEC
49
- MEASURE_ATTEMPT_COUNT.times do
50
- starting = Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond)
51
- client.call_once('PING')
52
- duration = Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond) - starting
53
- min = duration if duration < min
54
- end
55
-
56
- Thread.current[:latency] = min
57
- rescue StandardError
58
- Thread.current[:latency] = DUMMY_LATENCY_MSEC
59
- end
60
- end
44
+ chuncked_clients
45
+ .map { |node_key, client| [node_key, build_thread_for_measuring_latency(client)] }
46
+ .each { |node_key, thread| acc[node_key] = thread.value }
47
+ end
48
+ end
61
49
 
62
- threads.each do |t|
63
- t.join
64
- acc[t[:node_key]] = t[:latency]
50
+ def build_thread_for_measuring_latency(client)
51
+ Thread.new(client) do |cli|
52
+ min = DUMMY_LATENCY_MSEC
53
+ MEASURE_ATTEMPT_COUNT.times do
54
+ starting = Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond)
55
+ cli.call_once('PING')
56
+ duration = Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond) - starting
57
+ min = duration if duration < min
65
58
  end
59
+
60
+ min
61
+ rescue StandardError
62
+ DUMMY_LATENCY_MSEC
66
63
  end
67
64
  end
68
65
 
@@ -24,12 +24,12 @@ class RedisClient
24
24
  private
25
25
 
26
26
  def build_clients(primary_node_keys, options, pool, **kwargs)
27
- options.filter_map do |node_key, option|
27
+ options.to_h do |node_key, option|
28
28
  option = option.merge(kwargs.reject { |k, _| ::RedisClient::Cluster::Node::IGNORE_GENERIC_CONFIG_KEYS.include?(k) })
29
29
  config = ::RedisClient::Cluster::Node::Config.new(scale_read: !primary_node_keys.include?(node_key), **option)
30
30
  client = pool.nil? ? config.new_client : config.new_pool(**pool)
31
31
  [node_key, client]
32
- end.to_h
32
+ end
33
33
  end
34
34
  end
35
35
  end
@@ -94,28 +94,19 @@ class RedisClient
94
94
  startup_options = options.to_a.sample(MAX_STARTUP_SAMPLE).to_h
95
95
  startup_nodes = ::RedisClient::Cluster::Node.new(startup_options, **kwargs)
96
96
  startup_nodes.each_slice(MAX_THREADS).with_index do |chuncked_startup_nodes, chuncked_idx|
97
- threads = chuncked_startup_nodes.each_with_index.map do |raw_client, idx|
98
- Thread.new(raw_client, (MAX_THREADS * chuncked_idx) + idx) do |cli, i|
99
- Thread.current[:index] = i
100
- reply = cli.call('CLUSTER', 'NODES')
101
- Thread.current[:info] = parse_cluster_node_reply(reply)
102
- rescue StandardError => e
103
- Thread.current[:error] = e
104
- ensure
105
- cli&.close
97
+ chuncked_startup_nodes
98
+ .each_with_index
99
+ .map { |raw_client, idx| [(MAX_THREADS * chuncked_idx) + idx, build_thread_for_cluster_node(raw_client)] }
100
+ .each do |i, t|
101
+ case v = t.value
102
+ when StandardError
103
+ errors ||= Array.new(startup_size)
104
+ errors[i] = v
105
+ else
106
+ node_info_list ||= Array.new(startup_size)
107
+ node_info_list[i] = v
108
+ end
106
109
  end
107
- end
108
-
109
- threads.each do |t|
110
- t.join
111
- if t.key?(:info)
112
- node_info_list ||= Array.new(startup_size)
113
- node_info_list[t[:index]] = t[:info]
114
- elsif t.key?(:error)
115
- errors ||= Array.new(startup_size)
116
- errors[t[:index]] = t[:error]
117
- end
118
- end
119
110
  end
120
111
 
121
112
  raise ::RedisClient::Cluster::InitialSetupError, errors if node_info_list.nil?
@@ -132,6 +123,17 @@ class RedisClient
132
123
 
133
124
  private
134
125
 
126
+ def build_thread_for_cluster_node(raw_client)
127
+ Thread.new(raw_client) do |client|
128
+ reply = client.call('CLUSTER', 'NODES')
129
+ parse_cluster_node_reply(reply)
130
+ rescue StandardError => e
131
+ e
132
+ ensure
133
+ client&.close
134
+ end
135
+ end
136
+
135
137
  # @see https://redis.io/commands/cluster-nodes/
136
138
  # @see https://github.com/redis/redis/blob/78960ad57b8a5e6af743d789ed8fd767e37d42b8/src/cluster.c#L4660-L4683
137
139
  def parse_cluster_node_reply(reply) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
@@ -331,33 +333,33 @@ class RedisClient
331
333
  raise ::RedisClient::Cluster::ErrorCollection, errors
332
334
  end
333
335
 
334
- def try_map(clients) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
336
+ def try_map(clients, &block)
335
337
  results = errors = nil
336
338
  clients.each_slice(MAX_THREADS) do |chuncked_clients|
337
- threads = chuncked_clients.map do |k, v|
338
- Thread.new(k, v) do |node_key, client|
339
- Thread.current[:node_key] = node_key
340
- reply = yield(node_key, client)
341
- Thread.current[:result] = reply
342
- rescue StandardError => e
343
- Thread.current[:error] = e
344
- end
345
- end
346
-
347
- threads.each do |t|
348
- t.join
349
- if t.key?(:result)
350
- results ||= {}
351
- results[t[:node_key]] = t[:result]
352
- elsif t.key?(:error)
353
- errors ||= {}
354
- errors[t[:node_key]] = t[:error]
339
+ chuncked_clients
340
+ .map { |node_key, client| [node_key, build_thread_for_command(node_key, client, &block)] }
341
+ .each do |node_key, thread|
342
+ case v = thread.value
343
+ when StandardError
344
+ errors ||= {}
345
+ errors[node_key] = v
346
+ else
347
+ results ||= {}
348
+ results[node_key] = v
349
+ end
355
350
  end
356
- end
357
351
  end
358
352
 
359
353
  [results, errors]
360
354
  end
355
+
356
+ def build_thread_for_command(node_key, client)
357
+ Thread.new(node_key, client) do |nk, cli|
358
+ yield(nk, cli)
359
+ rescue StandardError => e
360
+ e
361
+ end
362
+ end
361
363
  end
362
364
  end
363
365
  end
@@ -148,38 +148,23 @@ class RedisClient
148
148
  def execute # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
149
149
  all_replies = errors = nil
150
150
  @pipelines&.each_slice(MAX_THREADS) do |chuncked_pipelines|
151
- threads = chuncked_pipelines.map do |node_key, pipeline|
152
- Thread.new(node_key, pipeline) do |nk, pl|
153
- Thread.current[:node_key] = nk
154
- replies = do_pipelining(@router.find_node(nk), pl)
155
- raise ReplySizeError, "commands: #{pl._size}, replies: #{replies.size}" if pl._size != replies.size
156
-
157
- Thread.current[:replies] = replies
158
- rescue ::RedisClient::Cluster::Pipeline::RedirectionNeeded => e
159
- Thread.current[:redirection_needed] = e
160
- rescue StandardError => e
161
- Thread.current[:error] = e
162
- end
163
- end
164
-
165
- threads.each(&:join)
166
- threads.each do |t|
167
- if t.key?(:replies)
168
- all_replies ||= Array.new(@size)
169
- @pipelines[t[:node_key]]
170
- .outer_indices
171
- .each_with_index { |outer, inner| all_replies[outer] = t[:replies][inner] }
172
- elsif t.key?(:redirection_needed)
173
- all_replies ||= Array.new(@size)
174
- pipeline = @pipelines[t[:node_key]]
175
- err = t[:redirection_needed]
176
- err.indices.each { |i| err.replies[i] = handle_redirection(err.replies[i], pipeline, i) }
177
- pipeline.outer_indices.each_with_index { |outer, inner| all_replies[outer] = err.replies[inner] }
178
- elsif t.key?(:error)
179
- errors ||= {}
180
- errors[t[:node_key]] = t[:error]
151
+ chuncked_pipelines
152
+ .map { |node_key, pipeline| [node_key, build_thread_for_pipeline(@router, node_key, pipeline)] }
153
+ .each do |node_key, thread|
154
+ case v = thread.value
155
+ when ::RedisClient::Cluster::Pipeline::RedirectionNeeded
156
+ all_replies ||= Array.new(@size)
157
+ pipeline = @pipelines[node_key]
158
+ v.indices.each { |i| v.replies[i] = handle_redirection(v.replies[i], pipeline, i) }
159
+ pipeline.outer_indices.each_with_index { |outer, inner| all_replies[outer] = v.replies[inner] }
160
+ when StandardError
161
+ errors ||= {}
162
+ errors[node_key] = v
163
+ else
164
+ all_replies ||= Array.new(@size)
165
+ @pipelines[node_key].outer_indices.each_with_index { |outer, inner| all_replies[outer] = v[inner] }
166
+ end
181
167
  end
182
- end
183
168
  end
184
169
 
185
170
  raise ::RedisClient::Cluster::ErrorCollection, errors unless errors.nil?
@@ -197,6 +182,17 @@ class RedisClient
197
182
  @pipelines[node_key]
198
183
  end
199
184
 
185
+ def build_thread_for_pipeline(router, node_key, pipeline)
186
+ Thread.new(router, node_key, pipeline) do |rt, nk, pl|
187
+ replies = do_pipelining(rt.find_node(nk), pl)
188
+ raise ReplySizeError, "commands: #{pl._size}, replies: #{replies.size}" if pl._size != replies.size
189
+
190
+ replies
191
+ rescue StandardError => e
192
+ e
193
+ end
194
+ end
195
+
200
196
  def do_pipelining(client, pipeline)
201
197
  case client
202
198
  when ::RedisClient then send_pipeline(client, pipeline)
@@ -24,7 +24,7 @@ class RedisClient
24
24
  @worker = subscribe(@client, timeout) if @worker.nil?
25
25
  return if @worker.alive?
26
26
 
27
- message = @worker[:reply]
27
+ message = @worker.value
28
28
  @worker = nil
29
29
  message
30
30
  end
@@ -33,9 +33,9 @@ class RedisClient
33
33
 
34
34
  def subscribe(client, timeout)
35
35
  Thread.new(client, timeout) do |pubsub, to|
36
- Thread.current[:reply] = pubsub.next_event(to)
36
+ pubsub.next_event(to)
37
37
  rescue StandardError => e
38
- Thread.current[:reply] = e
38
+ e
39
39
  end
40
40
  end
41
41
  end
@@ -64,12 +64,12 @@ class RedisClient
64
64
  def next_event(timeout = nil)
65
65
  return if @state_list.empty?
66
66
 
67
+ @state_list.shuffle!
67
68
  max_duration = calc_max_duration(timeout)
68
69
  starting = obtain_current_time
69
70
  loop do
70
71
  break if max_duration > 0 && obtain_current_time - starting > max_duration
71
72
 
72
- @state_list.shuffle!
73
73
  @state_list.each do |pubsub|
74
74
  message = pubsub.take_message(timeout)
75
75
  return message if message
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: redis-cluster-client
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.15
4
+ version: 0.4.17
5
5
  platform: ruby
6
6
  authors:
7
7
  - Taishi Kasuga
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-08-14 00:00:00.000000000 Z
11
+ date: 2023-09-02 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: redis-client
@@ -69,7 +69,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
69
69
  - !ruby/object:Gem::Version
70
70
  version: '0'
71
71
  requirements: []
72
- rubygems_version: 3.4.13
72
+ rubygems_version: 3.4.19
73
73
  signing_key:
74
74
  specification_version: 4
75
75
  summary: A Redis cluster client for Ruby