yugabytedb-ysql 0.5 → 0.6
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +9 -3
- data/lib/ysql/load_balance_service.rb +93 -17
- data/lib/ysql/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 57d0c470a8531967faa0a95cae6b3ab3f9ef67df2da933883036741bf0f0fbbf
|
4
|
+
data.tar.gz: fd221b19bd4493f44dfdbd3c0f256ea5c203b50a334696cf5cef2de9fb3a0d87
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: da29a7ac8db9a3b4153cb2f14cc743f60927c6b3598ab736daa93afa7daa23e0d32260736e45fcad0b522c4e9816972f1769be342a40a66409f3b30c70e1c583
|
7
|
+
data.tar.gz: 2d24cf62881403505a94cf17e80e85bdb274eebf3fa08424467edb9fad37fb6d553372273810da6469ce2c5f44011299e95f53d56c5da66e85b7f2854271e5e2
|
data/README.md
CHANGED
@@ -16,7 +16,13 @@ This is similar to 'Cluster Awareness' but uses those servers which are part of
|
|
16
16
|
|
17
17
|
### Connection Properties added for load balancing
|
18
18
|
|
19
|
-
- _load_balance_
|
19
|
+
- _load_balance_ - Starting with version 0.6, it expects one of **false, any (same as true), only-primary, only-rr, prefer-primary and prefer-rr** as its possible values. The default value for _load_balance_ property is `false`.
|
20
|
+
- _false_ - No connection load balancing. Behaviour is similar to vanilla ruby-pg driver
|
21
|
+
- _any_ - Same as value _true_. Distribute connections equally across all nodes in the cluster, irrespective of its type (`primary` or `read-replica`)
|
22
|
+
- _only-primary_ - Create connections equally across only the primary nodes of the cluster
|
23
|
+
- _only-rr_ - Create connections equally across only the read-replica nodes of the cluster
|
24
|
+
- _prefer-primary_ - Create connections equally across primary cluster nodes. If none available, on any available read replica node in the cluster
|
25
|
+
- _prefer-rr_ - Create connections equally across read replica nodes of the cluster. If none available, on any available primary cluster node
|
20
26
|
- _topology_keys_ - It takes a comma separated geo-location values. A single geo-location can be given as 'cloud.region.zone'. Multiple geo-locations too can be specified, separated by comma (`,`). Optionally, you can also register your preference for particular geo-locations by appending the preference value with prefix `:`. For example, `cloud.regionA.zoneA:1,cloud.regionA.zoneB:2`.
|
21
27
|
- _yb_servers_refresh_interval_ - Minimum time interval, in seconds, between two attempts to refresh the information about cluster nodes. This is checked only when a new connection is requested. Default is 300. Valid values are integers between 0 and 600. Value 0 means refresh for each connection request. Any value outside this range is ignored and the default is used.
|
22
28
|
- _fallback_to_topology_keys_only_ - When set to true, the driver does not attempt to connect to nodes outside of the geo-locations specified via _topology_keys_. Default value is false.
|
@@ -83,9 +89,9 @@ The driver attempts connection to servers in the first fallback placement(s) if
|
|
83
89
|
then it attempts to connect to servers in the second fallback placement(s), if specified. This continues until the driver finds a server to connect to, else an error is returned to the application.
|
84
90
|
And this repeats for each connection request.
|
85
91
|
|
86
|
-
###
|
92
|
+
### Using with ActiveRecord
|
87
93
|
|
88
|
-
- The load balancing feature of the Ruby Smart driver for YugabyteDB
|
94
|
+
- The load balancing feature of the Ruby Smart driver for YugabyteDB can be used with ActiveRecord - the ORM tool for Ruby apps - via its [adapter for YugabyteDB](https://github.com/yugabyte/activerecord-yugabytedb-adapter).
|
89
95
|
|
90
96
|
Rest of the README is from upstream repository.
|
91
97
|
|
@@ -4,8 +4,16 @@ require 'concurrent'
|
|
4
4
|
|
5
5
|
class YSQL::LoadBalanceService
|
6
6
|
|
7
|
-
|
8
|
-
|
7
|
+
class << self
|
8
|
+
attr_accessor :logger
|
9
|
+
end
|
10
|
+
|
11
|
+
# Set up a default logger
|
12
|
+
self.logger = Logger.new(STDOUT)
|
13
|
+
self.logger.level = Logger::WARN
|
14
|
+
|
15
|
+
LBProperties = Struct.new(:lb_value, :placements_info, :refresh_interval, :fallback_to_tk_only, :failed_host_reconnect_delay)
|
16
|
+
Node = Struct.new(:host, :port, :cloud, :region, :zone, :public_ip, :count, :is_down, :down_since, :node_type)
|
9
17
|
CloudPlacement = Struct.new(:cloud, :region, :zone)
|
10
18
|
@@mutex = Concurrent::ReentrantReadWriteLock.new
|
11
19
|
@@last_refresh_time = -1
|
@@ -27,6 +35,7 @@ class YSQL::LoadBalanceService
|
|
27
35
|
info = @@cluster_info[host]
|
28
36
|
unless info.nil?
|
29
37
|
info.count -= 1
|
38
|
+
logger.debug "decrement_connection_count(): count for #{host} updated to #{info.count}"
|
30
39
|
if info.count < 0
|
31
40
|
# Can go negative if we are here because of a connection that was created in a non-LB fashion
|
32
41
|
info.count = 0
|
@@ -40,6 +49,7 @@ class YSQL::LoadBalanceService
|
|
40
49
|
end
|
41
50
|
|
42
51
|
def self.connect_to_lb_hosts(lb_props, iopts)
|
52
|
+
logger.debug "connect_to_lb_hosts(): lb_props = #{lb_props}"
|
43
53
|
refresh_done = false
|
44
54
|
@@mutex.acquire_write_lock
|
45
55
|
begin
|
@@ -48,16 +58,21 @@ class YSQL::LoadBalanceService
|
|
48
58
|
if @@control_connection == nil
|
49
59
|
begin
|
50
60
|
@@control_connection = create_control_connection(iopts)
|
61
|
+
logger.debug "connect_to_lb_hosts(): created control connection to #{@@control_connection.host}"
|
51
62
|
rescue
|
52
63
|
return nil
|
53
64
|
end
|
54
65
|
end
|
55
66
|
begin
|
56
67
|
refresh_yb_servers(lb_props.failed_host_reconnect_delay, @@control_connection)
|
68
|
+
logger.debug "connect_to_lb_hosts(): refreshed yb_servers metadata"
|
57
69
|
refresh_done = true
|
58
70
|
rescue => err
|
59
71
|
if iopts[:host] == @@control_connection.host
|
60
72
|
if @@cluster_info[iopts[:host]]
|
73
|
+
if @@cluster_info[iopts[:host]].is_down
|
74
|
+
logger.debug "connect_to_lb_hosts(): Marking #{@@control_connection.host} as DOWN"
|
75
|
+
end
|
61
76
|
@@cluster_info[iopts[:host]].is_down = true
|
62
77
|
@@cluster_info[iopts[:host]].down_since = Time.now.to_i
|
63
78
|
end
|
@@ -73,6 +88,7 @@ class YSQL::LoadBalanceService
|
|
73
88
|
end
|
74
89
|
end
|
75
90
|
@@control_connection = create_control_connection(iopts)
|
91
|
+
logger.debug "connect_to_lb_hosts(): created control connection to #{@@control_connection.host} in rescue"
|
76
92
|
end
|
77
93
|
end
|
78
94
|
end
|
@@ -81,20 +97,43 @@ class YSQL::LoadBalanceService
|
|
81
97
|
end
|
82
98
|
success = false
|
83
99
|
new_request = true
|
100
|
+
strict_preference = true
|
84
101
|
placement_index = 1
|
85
102
|
until success
|
86
103
|
@@mutex.acquire_write_lock
|
87
104
|
begin
|
88
|
-
|
105
|
+
if strict_preference
|
106
|
+
host_port = get_least_loaded_server(lb_props.placements_info, lb_props.fallback_to_tk_only, new_request, placement_index, lb_props.lb_value, strict_preference)
|
107
|
+
else
|
108
|
+
host_port = get_least_loaded_server(nil, lb_props.fallback_to_tk_only, new_request, placement_index, lb_props.lb_value, strict_preference)
|
109
|
+
end
|
89
110
|
new_request = false
|
90
111
|
ensure
|
91
112
|
@@mutex.release_write_lock
|
92
113
|
end
|
93
114
|
unless host_port
|
94
|
-
|
115
|
+
if (lb_props.lb_value == "only-primary" || lb_props.lb_value == "only-rr" )
|
116
|
+
raise(YSQL::Error, "No node found for load_balance=#{lb_props.lb_value}")
|
117
|
+
elsif strict_preference && (lb_props.lb_value == "prefer-primary" || lb_props.lb_value == "prefer-rr")
|
118
|
+
@@mutex.acquire_write_lock
|
119
|
+
begin
|
120
|
+
host_port = get_least_loaded_server(nil, lb_props.fallback_to_tk_only, new_request, placement_index, lb_props.lb_value, strict_preference)
|
121
|
+
ensure
|
122
|
+
@@mutex.release_write_lock
|
123
|
+
end
|
124
|
+
unless host_port
|
125
|
+
strict_preference = false
|
126
|
+
placement_index = 1
|
127
|
+
next
|
128
|
+
end
|
129
|
+
else
|
130
|
+
logger.debug "connect_to_lb_hosts(): lb_host not found for load_balance=#{lb_props.lb_value}"
|
131
|
+
break
|
132
|
+
end
|
95
133
|
end
|
96
134
|
lb_host = host_port[0]
|
97
135
|
lb_port = host_port[1]
|
136
|
+
logger.debug "connect_to_lb_hosts(): lb_host #{lb_host}"
|
98
137
|
placement_index = host_port[2]
|
99
138
|
if lb_host.empty?
|
100
139
|
break
|
@@ -109,6 +148,9 @@ class YSQL::LoadBalanceService
|
|
109
148
|
rescue => e
|
110
149
|
@@mutex.acquire_write_lock
|
111
150
|
begin
|
151
|
+
if @@cluster_info[lb_host].is_down
|
152
|
+
logger.debug "connect_to_lb_hosts(): Marking #{lb_host} as DOWN"
|
153
|
+
end
|
112
154
|
@@cluster_info[lb_host].is_down = true
|
113
155
|
@@cluster_info[lb_host].down_since = Time.now.to_i
|
114
156
|
@@cluster_info[lb_host].count -= 1
|
@@ -133,6 +175,9 @@ class YSQL::LoadBalanceService
|
|
133
175
|
success = true
|
134
176
|
rescue => e
|
135
177
|
if @@cluster_info[iopts[:host]]
|
178
|
+
if @@cluster_info[iopts[:host]].is_down
|
179
|
+
logger.debug "create_control_connection(): Marking #{iopts[:host]} as DOWN"
|
180
|
+
end
|
136
181
|
@@cluster_info[iopts[:host]].is_down = true
|
137
182
|
@@cluster_info[iopts[:host]].down_since = Time.now.to_i
|
138
183
|
end
|
@@ -161,6 +206,7 @@ class YSQL::LoadBalanceService
|
|
161
206
|
region = row['region']
|
162
207
|
zone = row['zone']
|
163
208
|
public_ip = row['public_ip']
|
209
|
+
node_type = row['node_type']
|
164
210
|
public_ip = resolve_host(public_ip)[0][0] if public_ip
|
165
211
|
if not public_ip.nil? and not public_ip.empty?
|
166
212
|
found_public_ip = true
|
@@ -179,12 +225,15 @@ class YSQL::LoadBalanceService
|
|
179
225
|
if old
|
180
226
|
if old.is_down
|
181
227
|
if Time.now.to_i - old.down_since > failed_host_reconnect_delay_secs
|
228
|
+
unless old.is_down
|
229
|
+
logger.debug "refresh_yb_servers(): Marking #{host} as UP"
|
230
|
+
end
|
182
231
|
old.is_down = false
|
183
232
|
end
|
184
233
|
@@cluster_info[host] = old
|
185
234
|
end
|
186
235
|
else
|
187
|
-
node = Node.new(host, port, cloud, region, zone, public_ip, 0, false, 0)
|
236
|
+
node = Node.new(host, port, cloud, region, zone, public_ip, 0, false, 0, node_type)
|
188
237
|
@@cluster_info[host] = node
|
189
238
|
end
|
190
239
|
end
|
@@ -196,21 +245,37 @@ class YSQL::LoadBalanceService
|
|
196
245
|
@@last_refresh_time = Time.now.to_i
|
197
246
|
end
|
198
247
|
|
199
|
-
def self.
|
248
|
+
def self.is_node_type_acceptable(node_type, lb_value, strict_preference)
|
249
|
+
case lb_value
|
250
|
+
when "true", "any"
|
251
|
+
true
|
252
|
+
when "only-primary"
|
253
|
+
node_type == "primary"
|
254
|
+
when "only-rr"
|
255
|
+
node_type == "read_replica"
|
256
|
+
when "prefer-primary"
|
257
|
+
node_type == "primary" || (!strict_preference && node_type == "read_replica")
|
258
|
+
when "prefer-rr"
|
259
|
+
node_type == "read_replica" || (!strict_preference && node_type == "primary")
|
260
|
+
else
|
261
|
+
false
|
262
|
+
end
|
263
|
+
end
|
264
|
+
|
265
|
+
def self.get_least_loaded_server(allowed_placements, fallback_to_tk_only, new_request, placement_index, lb_value, strict_preference)
|
200
266
|
current_index = 1
|
201
267
|
selected = Array.new
|
202
268
|
unless allowed_placements.nil? # topology-aware
|
203
|
-
|
269
|
+
logger.debug "get_least_loaded_server(): topology_keys given = #{allowed_placements}"
|
204
270
|
(placement_index..10).each { |idx|
|
205
271
|
current_index = idx
|
206
272
|
selected.clear
|
207
273
|
min_connections = 1000000 # Using some really high value
|
208
274
|
@@cluster_info.each do |host, node_info|
|
209
|
-
|
210
|
-
|
275
|
+
if !node_info.is_down && !allowed_placements[idx].nil?
|
276
|
+
if is_node_type_acceptable(node_info.node_type, lb_value, strict_preference)
|
211
277
|
allowed_placements[idx].each do |cp|
|
212
278
|
if cp[0] == node_info.cloud && cp[1] == node_info.region && (cp[2] == node_info.zone || cp[2] == "*")
|
213
|
-
eligible_hosts << host
|
214
279
|
if node_info.count < min_connections
|
215
280
|
min_connections = node_info.count
|
216
281
|
selected.clear
|
@@ -231,12 +296,11 @@ class YSQL::LoadBalanceService
|
|
231
296
|
end
|
232
297
|
|
233
298
|
if allowed_placements.nil? || (selected.empty? && !fallback_to_tk_only) # cluster-aware || fallback_to_tk_only = false
|
234
|
-
|
235
|
-
end
|
299
|
+
logger.debug "get_least_loaded_server(): topology_keys not given or no nodes found for given topology_keys"
|
236
300
|
min_connections = 1000000 # Using some really high value
|
237
301
|
selected = Array.new
|
238
302
|
@@cluster_info.each do |host, node_info|
|
239
|
-
|
303
|
+
if !node_info.is_down && is_node_type_acceptable(node_info.node_type, lb_value, strict_preference)
|
240
304
|
if node_info.count < min_connections
|
241
305
|
min_connections = node_info.count
|
242
306
|
selected.clear
|
@@ -254,14 +318,16 @@ class YSQL::LoadBalanceService
|
|
254
318
|
index = rand(selected.size)
|
255
319
|
selected_node = selected[index]
|
256
320
|
@@cluster_info[selected_node].count += 1
|
321
|
+
selected_port = @@cluster_info[selected_node].port
|
257
322
|
if !@@useHostColumn.nil? && !@@useHostColumn
|
258
323
|
selected_node = @@cluster_info[selected_node].public_ip
|
259
324
|
end
|
260
|
-
Array[selected_node,
|
325
|
+
Array[selected_node, selected_port, current_index]
|
261
326
|
end
|
262
327
|
end
|
263
328
|
|
264
329
|
def self.parse_lb_args_from_url(conn_string)
|
330
|
+
logger.debug "parse_lb_args_from_url(): conn_string = #{conn_string}"
|
265
331
|
string_parts = conn_string.split('?', -1)
|
266
332
|
if string_parts.length != 2
|
267
333
|
return conn_string, nil
|
@@ -293,7 +359,7 @@ class YSQL::LoadBalanceService
|
|
293
359
|
|
294
360
|
base_string = base_string.chop if base_string[-1] == "&"
|
295
361
|
base_string = base_string.chop if base_string[-1] == "?"
|
296
|
-
if not lb_props.empty? and lb_props[:load_balance].to_s.downcase
|
362
|
+
if not lb_props.empty? and is_lb_enabled(lb_props[:load_balance].to_s.downcase)
|
297
363
|
return base_string, parse_connect_lb_args(lb_props)
|
298
364
|
else
|
299
365
|
return base_string, nil
|
@@ -301,15 +367,25 @@ class YSQL::LoadBalanceService
|
|
301
367
|
end
|
302
368
|
end
|
303
369
|
|
370
|
+
def self.is_lb_enabled(lb)
|
371
|
+
case lb
|
372
|
+
when "true", "any", "only-primary", "prefer-primary", "only-rr", "prefer-rr"
|
373
|
+
true
|
374
|
+
else
|
375
|
+
false
|
376
|
+
end
|
377
|
+
end
|
378
|
+
|
304
379
|
def self.parse_connect_lb_args(hash_arg)
|
380
|
+
logger.debug "parse_connect_lb_args(): hash_arg = #{hash_arg}"
|
305
381
|
lb = hash_arg.delete(:load_balance)
|
306
382
|
tk = hash_arg.delete(:topology_keys)
|
307
383
|
ri = hash_arg.delete(:yb_servers_refresh_interval)
|
308
384
|
ttl = hash_arg.delete(:failed_host_reconnect_delay_secs)
|
309
385
|
fb = hash_arg.delete(:fallback_to_topology_keys_only)
|
310
386
|
|
311
|
-
if lb
|
312
|
-
lb_properties = LBProperties.new(nil, 300, false, 5)
|
387
|
+
if is_lb_enabled(lb.to_s.downcase)
|
388
|
+
lb_properties = LBProperties.new(lb.to_s.downcase, nil, 300, false, 5)
|
313
389
|
if tk
|
314
390
|
lb_properties.placements_info = Hash.new
|
315
391
|
tk_parts = tk.split(',', -1)
|
data/lib/ysql/version.rb
CHANGED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: yugabytedb-ysql
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: '0.
|
4
|
+
version: '0.6'
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Michael Granger
|
@@ -10,7 +10,7 @@ authors:
|
|
10
10
|
autorequire:
|
11
11
|
bindir: bin
|
12
12
|
cert_chain: []
|
13
|
-
date:
|
13
|
+
date: 2025-01-16 00:00:00.000000000 Z
|
14
14
|
dependencies: []
|
15
15
|
description: Pg_YugabyteDB is the Ruby interface to the PostgreSQL-compatible YugabyteDB.
|
16
16
|
It works with YugabyteDB 2.20 and later.
|