switchman 2.0.13 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Rakefile +10 -2
- data/app/models/switchman/shard.rb +234 -271
- data/app/models/switchman/unsharded_record.rb +7 -0
- data/db/migrate/20130328212039_create_switchman_shards.rb +1 -1
- data/db/migrate/20130328224244_create_default_shard.rb +5 -5
- data/db/migrate/20161206323434_add_back_default_string_limits_switchman.rb +1 -0
- data/db/migrate/20180828183945_add_default_shard_index.rb +2 -2
- data/db/migrate/20180828192111_add_timestamps_to_shards.rb +7 -5
- data/db/migrate/20190114212900_add_unique_name_indexes.rb +5 -3
- data/lib/switchman.rb +3 -5
- data/lib/switchman/action_controller/caching.rb +2 -2
- data/lib/switchman/active_record/abstract_adapter.rb +1 -0
- data/lib/switchman/active_record/association.rb +78 -89
- data/lib/switchman/active_record/attribute_methods.rb +58 -52
- data/lib/switchman/active_record/base.rb +58 -59
- data/lib/switchman/active_record/calculations.rb +74 -67
- data/lib/switchman/active_record/connection_pool.rb +14 -41
- data/lib/switchman/active_record/database_configurations.rb +34 -0
- data/lib/switchman/active_record/database_configurations/database_config.rb +13 -0
- data/lib/switchman/active_record/finder_methods.rb +11 -16
- data/lib/switchman/active_record/log_subscriber.rb +4 -8
- data/lib/switchman/active_record/migration.rb +6 -47
- data/lib/switchman/active_record/model_schema.rb +1 -1
- data/lib/switchman/active_record/persistence.rb +4 -6
- data/lib/switchman/active_record/postgresql_adapter.rb +124 -168
- data/lib/switchman/active_record/predicate_builder.rb +2 -2
- data/lib/switchman/active_record/query_cache.rb +18 -19
- data/lib/switchman/active_record/query_methods.rb +172 -197
- data/lib/switchman/active_record/reflection.rb +6 -10
- data/lib/switchman/active_record/relation.rb +30 -78
- data/lib/switchman/active_record/spawn_methods.rb +27 -29
- data/lib/switchman/active_record/statement_cache.rb +18 -35
- data/lib/switchman/active_record/tasks/database_tasks.rb +16 -0
- data/lib/switchman/active_support/cache.rb +3 -5
- data/lib/switchman/arel.rb +13 -8
- data/lib/switchman/database_server.rb +121 -142
- data/lib/switchman/default_shard.rb +52 -16
- data/lib/switchman/engine.rb +61 -58
- data/lib/switchman/environment.rb +4 -8
- data/lib/switchman/errors.rb +1 -0
- data/lib/switchman/guard_rail.rb +6 -19
- data/lib/switchman/guard_rail/relation.rb +5 -7
- data/lib/switchman/r_spec_helper.rb +29 -37
- data/lib/switchman/rails.rb +14 -12
- data/lib/switchman/schema_cache.rb +1 -9
- data/lib/switchman/sharded_instrumenter.rb +1 -1
- data/lib/switchman/standard_error.rb +15 -3
- data/lib/switchman/test_helper.rb +7 -11
- data/lib/switchman/version.rb +1 -1
- data/lib/tasks/switchman.rake +54 -69
- metadata +87 -45
- data/lib/switchman/active_record/batches.rb +0 -11
- data/lib/switchman/active_record/connection_handler.rb +0 -172
- data/lib/switchman/active_record/where_clause_factory.rb +0 -36
- data/lib/switchman/connection_pool_proxy.rb +0 -173
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: bb89193fead8142e30280a6ab2706f6e9af798ff18820fa144be3dd9ac77591f
|
4
|
+
data.tar.gz: e1157f5656f06ae9bdb7c62a2c9ae1611e2073cf47ca4278bae8605296603d21
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: fbb9b4d9385dad87ba827400dc9245085c31e133d2445daf64b3d3e81f3102c189f8feafb328d085649146c8ccd2782fd7ba3cb410c262d06a31e014175cac99
|
7
|
+
data.tar.gz: 55c8010cbc73d112262d674d131b1fe0acbd39b5b482e73b9b7165aac23a25a93d5bbca4320a56b97450f68af8526045aede39c13cfb3426189073acba8f6a3f
|
data/Rakefile
CHANGED
@@ -1,4 +1,6 @@
|
|
1
1
|
#!/usr/bin/env rake
|
2
|
+
# frozen_string_literal: true
|
3
|
+
|
2
4
|
begin
|
3
5
|
require 'bundler/setup'
|
4
6
|
rescue LoadError
|
@@ -19,7 +21,7 @@ RDoc::Task.new(:rdoc) do |rdoc|
|
|
19
21
|
rdoc.rdoc_files.include('lib/**/*.rb')
|
20
22
|
end
|
21
23
|
|
22
|
-
APP_RAKEFILE = File.expand_path(
|
24
|
+
APP_RAKEFILE = File.expand_path('spec/dummy/Rakefile', __dir__)
|
23
25
|
load 'rails/tasks/engine.rake'
|
24
26
|
|
25
27
|
Bundler::GemHelper.install_tasks
|
@@ -27,4 +29,10 @@ Bundler::GemHelper.install_tasks
|
|
27
29
|
require 'rspec/core/rake_task'
|
28
30
|
RSpec::Core::RakeTask.new
|
29
31
|
|
30
|
-
|
32
|
+
require 'rubocop/rake_task'
|
33
|
+
|
34
|
+
RuboCop::RakeTask.new do |task|
|
35
|
+
task.options = ['-S']
|
36
|
+
end
|
37
|
+
|
38
|
+
task default: %i[spec rubocop]
|
@@ -6,44 +6,24 @@ require 'switchman/environment'
|
|
6
6
|
require 'switchman/errors'
|
7
7
|
|
8
8
|
module Switchman
|
9
|
-
class Shard <
|
9
|
+
class Shard < UnshardedRecord
|
10
10
|
# ten trillion possible ids per shard. yup.
|
11
11
|
IDS_PER_SHARD = 10_000_000_000_000
|
12
12
|
|
13
|
-
CATEGORIES =
|
14
|
-
{
|
15
|
-
# special cased to mean all other models
|
16
|
-
:primary => nil,
|
17
|
-
# special cased to not allow activating a shard other than the default
|
18
|
-
:unsharded => [Shard]
|
19
|
-
}
|
20
|
-
private_constant :CATEGORIES
|
21
|
-
@connection_specification_name = @shard_category = :unsharded
|
22
|
-
|
23
|
-
if defined?(::ProtectedAttributes)
|
24
|
-
attr_accessible :default, :name, :database_server
|
25
|
-
end
|
26
|
-
|
27
13
|
# only allow one default
|
28
|
-
validates_uniqueness_of :default, :
|
14
|
+
validates_uniqueness_of :default, if: ->(s) { s.default? }
|
29
15
|
|
30
16
|
after_save :clear_cache
|
31
17
|
after_destroy :clear_cache
|
32
18
|
|
33
|
-
after_rollback :on_rollback
|
34
|
-
|
35
19
|
scope :primary, -> { where(name: nil).order(:database_server_id, :id).distinct_on(:database_server_id) }
|
36
20
|
|
37
21
|
class << self
|
38
|
-
def
|
39
|
-
|
22
|
+
def sharded_models
|
23
|
+
@sharded_models ||= [::ActiveRecord::Base, UnshardedRecord].freeze
|
40
24
|
end
|
41
25
|
|
42
|
-
def default(
|
43
|
-
if reload_deprecated
|
44
|
-
reload = reload_deprecated
|
45
|
-
::ActiveSupport::Deprecation.warn("positional reload parameter to Switchman::Shard.default is deprecated; use `reload: true`")
|
46
|
-
end
|
26
|
+
def default(reload: false, with_fallback: false)
|
47
27
|
if !@default || reload
|
48
28
|
# Have to create a dummy object so that several key methods still work
|
49
29
|
# (it's easier to do this in one place here, and just assume that sharding
|
@@ -54,74 +34,68 @@ module Switchman
|
|
54
34
|
|
55
35
|
# if we already have a default shard in place, and the caller wants
|
56
36
|
# to use it as a fallback, use that instead of the dummy instance
|
57
|
-
if with_fallback && @default
|
58
|
-
default = @default
|
59
|
-
end
|
37
|
+
default = @default if with_fallback && @default
|
60
38
|
|
61
39
|
# the first time we need a dummy dummy for re-entrancy to avoid looping on ourselves
|
62
40
|
@default ||= default
|
63
41
|
|
64
42
|
# Now find the actual record, if it exists; rescue the fake default if the table doesn't exist
|
65
43
|
@default = begin
|
66
|
-
find_cached(
|
44
|
+
find_cached('default_shard') { Shard.where(default: true).take } || default
|
67
45
|
rescue
|
68
46
|
default
|
69
47
|
end
|
70
48
|
|
71
|
-
# rebuild current shard activations - it might have "another" default shard serialized there
|
72
|
-
active_shards.replace(active_shards.dup.map do |category, shard|
|
73
|
-
shard = Shard.lookup((!shard || shard.default?) ? 'default' : shard.id)
|
74
|
-
[category, shard]
|
75
|
-
end.to_h)
|
76
|
-
|
77
|
-
activate!(primary: @default) if active_shards.empty?
|
78
|
-
|
79
49
|
# make sure this is not erroneously cached
|
80
|
-
if @default.database_server.instance_variable_defined?(:@primary_shard)
|
81
|
-
@default.database_server.remove_instance_variable(:@primary_shard)
|
82
|
-
end
|
50
|
+
@default.database_server.remove_instance_variable(:@primary_shard) if @default.database_server.instance_variable_defined?(:@primary_shard)
|
83
51
|
|
84
52
|
# and finally, check for cached references to the default shard on the existing connection
|
85
|
-
|
86
|
-
|
53
|
+
sharded_models.each do |klass|
|
54
|
+
klass.connection.shard = @default if klass.connected? && klass.connection.shard.default?
|
87
55
|
end
|
88
56
|
end
|
89
57
|
@default
|
90
58
|
end
|
91
59
|
|
92
|
-
def current(
|
93
|
-
|
60
|
+
def current(klass = ::ActiveRecord::Base)
|
61
|
+
klass ||= ::ActiveRecord::Base
|
62
|
+
klass.connection_pool.shard
|
94
63
|
end
|
95
64
|
|
96
65
|
def activate(shards)
|
97
|
-
|
66
|
+
activated_classes = activate!(shards)
|
98
67
|
yield
|
99
68
|
ensure
|
100
|
-
|
69
|
+
activated_classes.each do |klass|
|
70
|
+
klass.connection_pool.shard_stack.pop
|
71
|
+
klass.connected_to_stack.pop
|
72
|
+
end
|
101
73
|
end
|
102
74
|
|
103
75
|
def activate!(shards)
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
unless
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
76
|
+
activated_classes = []
|
77
|
+
shards.each do |klass, shard|
|
78
|
+
next if klass == UnshardedRecord
|
79
|
+
|
80
|
+
next unless klass.current_shard != shard.database_server.id.to_sym ||
|
81
|
+
klass.connection_pool.shard != shard
|
82
|
+
|
83
|
+
activated_classes << klass
|
84
|
+
klass.connected_to_stack << { shard: shard.database_server.id.to_sym, klasses: [klass] }
|
85
|
+
klass.connection_pool.shard_stack << shard
|
113
86
|
end
|
114
|
-
|
87
|
+
activated_classes
|
115
88
|
end
|
116
89
|
|
117
90
|
def lookup(id)
|
118
91
|
id_i = id.to_i
|
119
92
|
return current if id_i == current.id || id == 'self'
|
120
93
|
return default if id_i == default.id || id.nil? || id == 'default'
|
94
|
+
|
121
95
|
id = id_i
|
122
|
-
raise ArgumentError if id
|
96
|
+
raise ArgumentError if id.zero?
|
123
97
|
|
124
|
-
unless cached_shards.
|
98
|
+
unless cached_shards.key?(id)
|
125
99
|
cached_shards[id] = Shard.default.activate do
|
126
100
|
find_cached(['shard', id]) { find_by(id: id) }
|
127
101
|
end
|
@@ -136,59 +110,47 @@ module Switchman
|
|
136
110
|
# ==== Parameters
|
137
111
|
#
|
138
112
|
# * +shards+ - an array or relation of Shards to iterate over
|
139
|
-
# * +
|
140
|
-
#
|
141
|
-
# :parallel - true/false to execute in parallel, or a integer of how many
|
113
|
+
# * +classes+ - an array of classes to activate
|
114
|
+
# parallel: - true/false to execute in parallel, or a integer of how many
|
142
115
|
# sub-processes per database server. Note that parallel
|
143
116
|
# invocation currently uses forking, so should be used sparingly
|
144
117
|
# because errors are not raised, and you cannot get results back
|
145
|
-
# :
|
146
|
-
# :
|
118
|
+
# max_procs: - only run this many parallel processes at a time
|
119
|
+
# exception: - :ignore, :raise, :defer (wait until the end and raise the first
|
147
120
|
# error), or a proc
|
148
|
-
def with_each_shard(*args)
|
149
|
-
raise ArgumentError, "wrong number of arguments (#{args.length} for 0...
|
121
|
+
def with_each_shard(*args, parallel: false, max_procs: nil, exception: :raise, &block)
|
122
|
+
raise ArgumentError, "wrong number of arguments (#{args.length} for 0...2)" if args.length > 2
|
150
123
|
|
151
|
-
unless default.is_a?(Shard)
|
152
|
-
return Array.wrap(yield)
|
153
|
-
end
|
124
|
+
return Array.wrap(yield) unless default.is_a?(Shard)
|
154
125
|
|
155
|
-
options = args.extract_options!
|
156
126
|
if args.length == 1
|
157
|
-
if Array === args.first && args.first.first.is_a?(
|
158
|
-
|
127
|
+
if Array === args.first && args.first.first.is_a?(Class)
|
128
|
+
classes = args.first
|
159
129
|
else
|
160
130
|
scope = args.first
|
161
131
|
end
|
162
132
|
else
|
163
|
-
scope,
|
133
|
+
scope, classes = args
|
164
134
|
end
|
165
135
|
|
166
|
-
parallel =
|
167
|
-
|
168
|
-
1
|
169
|
-
when false, nil
|
170
|
-
0
|
171
|
-
else
|
172
|
-
options[:parallel]
|
173
|
-
end
|
174
|
-
options.delete(:parallel)
|
136
|
+
parallel = 1 if parallel == true
|
137
|
+
parallel = 0 if parallel == false || parallel.nil?
|
175
138
|
|
176
139
|
scope ||= Shard.all
|
177
|
-
if ::ActiveRecord::Relation === scope && scope.order_values.empty?
|
178
|
-
scope = scope.order(::Arel.sql("database_server_id IS NOT NULL, database_server_id, id"))
|
179
|
-
end
|
140
|
+
scope = scope.order(::Arel.sql('database_server_id IS NOT NULL, database_server_id, id')) if ::ActiveRecord::Relation === scope && scope.order_values.empty?
|
180
141
|
|
181
|
-
if parallel
|
182
|
-
max_procs = determine_max_procs(
|
142
|
+
if parallel.positive?
|
143
|
+
max_procs = determine_max_procs(max_procs, parallel)
|
183
144
|
if ::ActiveRecord::Relation === scope
|
184
145
|
# still need a post-uniq, cause the default database server could be NULL or Rails.env in the db
|
185
146
|
database_servers = scope.reorder('database_server_id').select(:database_server_id).distinct.
|
186
|
-
|
147
|
+
map(&:database_server).compact.uniq
|
187
148
|
# nothing to do
|
188
|
-
return if database_servers.count
|
149
|
+
return if database_servers.count.zero?
|
150
|
+
|
189
151
|
parallel = [(max_procs.to_f / database_servers.count).ceil, parallel].min if max_procs
|
190
152
|
|
191
|
-
scopes =
|
153
|
+
scopes = database_servers.map do |server|
|
192
154
|
server_scope = server.shards.merge(scope)
|
193
155
|
if parallel == 1
|
194
156
|
subscopes = [server_scope]
|
@@ -196,28 +158,28 @@ module Switchman
|
|
196
158
|
subscopes = []
|
197
159
|
total = server_scope.count
|
198
160
|
ranges = []
|
199
|
-
server_scope.find_ids_in_ranges(:
|
161
|
+
server_scope.find_ids_in_ranges(batch_size: (total.to_f / parallel).ceil) do |min, max|
|
200
162
|
ranges << [min, max]
|
201
163
|
end
|
202
164
|
# create a half-open range on the last one
|
203
165
|
ranges.last[1] = nil
|
204
166
|
ranges.each do |min, max|
|
205
|
-
subscope = server_scope.where(
|
206
|
-
subscope = subscope.where(
|
167
|
+
subscope = server_scope.where('id>=?', min)
|
168
|
+
subscope = subscope.where('id<=?', max) if max
|
207
169
|
subscopes << subscope
|
208
170
|
end
|
209
171
|
end
|
210
172
|
[server, subscopes]
|
211
|
-
end
|
173
|
+
end.to_h
|
212
174
|
else
|
213
175
|
scopes = scope.group_by(&:database_server)
|
214
176
|
if parallel > 1
|
215
177
|
parallel = [(max_procs.to_f / scopes.count).ceil, parallel].min if max_procs
|
216
|
-
scopes =
|
178
|
+
scopes = scopes.map do |(server, shards)|
|
217
179
|
[server, shards.in_groups(parallel, false).compact]
|
218
|
-
end
|
180
|
+
end.to_h
|
219
181
|
else
|
220
|
-
scopes =
|
182
|
+
scopes = scopes.map { |(server, shards)| [server, [shards]] }.to_h
|
221
183
|
end
|
222
184
|
end
|
223
185
|
|
@@ -229,8 +191,8 @@ module Switchman
|
|
229
191
|
fd_to_name_map = {}
|
230
192
|
errors = []
|
231
193
|
|
232
|
-
wait_for_output = lambda do
|
233
|
-
ready,
|
194
|
+
wait_for_output = lambda do
|
195
|
+
ready, = IO.select(out_fds + err_fds)
|
234
196
|
ready.each do |fd|
|
235
197
|
if fd.eof?
|
236
198
|
fd.close
|
@@ -245,7 +207,8 @@ module Switchman
|
|
245
207
|
|
246
208
|
# only one process; don't bother forking
|
247
209
|
if scopes.length == 1 && parallel == 1
|
248
|
-
return with_each_shard(scopes.first.last.first,
|
210
|
+
return with_each_shard(scopes.first.last.first, classes, exception: exception,
|
211
|
+
&block)
|
249
212
|
end
|
250
213
|
|
251
214
|
# clear connections prior to forking (no more queries will be executed in the parent,
|
@@ -255,51 +218,47 @@ module Switchman
|
|
255
218
|
|
256
219
|
scopes.each do |server, subscopes|
|
257
220
|
subscopes.each_with_index do |subscope, idx|
|
258
|
-
if subscopes.length > 1
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
221
|
+
name = if subscopes.length > 1
|
222
|
+
"#{server.id} #{idx + 1}"
|
223
|
+
else
|
224
|
+
server.id
|
225
|
+
end
|
263
226
|
|
264
227
|
exception_pipe = IO.pipe
|
265
228
|
exception_pipes << exception_pipe
|
266
229
|
pid, io_in, io_out, io_err = Open4.pfork4(lambda do
|
230
|
+
Switchman.config[:on_fork_proc]&.call
|
231
|
+
|
232
|
+
# set a pretty name for the process title, up to 128 characters
|
233
|
+
# (we don't actually know the limit, depending on how the process
|
234
|
+
# was started)
|
235
|
+
# first, simplify the binary name by stripping directories,
|
236
|
+
# then truncate arguments as necessary
|
237
|
+
bin = File.basename($0) # Process.argv0 doesn't work on Ruby 2.5 (https://bugs.ruby-lang.org/issues/15887)
|
238
|
+
max_length = 128 - bin.length - name.length - 3
|
239
|
+
args = ARGV.join(' ')
|
240
|
+
args = args[0..max_length] if max_length >= 0
|
241
|
+
new_title = [bin, args, name].join(' ')
|
242
|
+
Process.setproctitle(new_title)
|
243
|
+
|
244
|
+
with_each_shard(subscope, classes, exception: exception, &block)
|
245
|
+
exception_pipe.last.close
|
246
|
+
rescue => e
|
267
247
|
begin
|
268
|
-
|
269
|
-
|
270
|
-
#
|
271
|
-
#
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
max_length = 128 - bin.length - name.length - 3
|
277
|
-
args = ARGV.join(" ")
|
278
|
-
if max_length >= 0
|
279
|
-
args = args[0..max_length]
|
280
|
-
end
|
281
|
-
new_title = [bin, args, name].join(" ")
|
282
|
-
Process.setproctitle(new_title)
|
283
|
-
|
284
|
-
with_each_shard(subscope, categories, options) { yield }
|
285
|
-
exception_pipe.last.close
|
286
|
-
rescue => e
|
287
|
-
begin
|
288
|
-
dumped = Marshal.dump(e)
|
289
|
-
rescue
|
290
|
-
# couldn't dump the exception; create a copy with just
|
291
|
-
# the message and the backtrace
|
292
|
-
e2 = e.class.new(e.message)
|
293
|
-
e2.set_backtrace(e.backtrace)
|
294
|
-
e2.instance_variable_set(:@active_shards, e.instance_variable_get(:@active_shards))
|
295
|
-
dumped = Marshal.dump(e2)
|
296
|
-
end
|
297
|
-
exception_pipe.last.set_encoding(dumped.encoding)
|
298
|
-
exception_pipe.last.write(dumped)
|
299
|
-
exception_pipe.last.flush
|
300
|
-
exception_pipe.last.close
|
301
|
-
exit! 1
|
248
|
+
dumped = Marshal.dump(e)
|
249
|
+
rescue
|
250
|
+
# couldn't dump the exception; create a copy with just
|
251
|
+
# the message and the backtrace
|
252
|
+
e2 = e.class.new(e.message)
|
253
|
+
e2.set_backtrace(e.backtrace)
|
254
|
+
e2.instance_variable_set(:@active_shards, e.instance_variable_get(:@active_shards))
|
255
|
+
dumped = Marshal.dump(e2)
|
302
256
|
end
|
257
|
+
exception_pipe.last.set_encoding(dumped.encoding)
|
258
|
+
exception_pipe.last.write(dumped)
|
259
|
+
exception_pipe.last.flush
|
260
|
+
exception_pipe.last.close
|
261
|
+
exit! 1
|
303
262
|
end)
|
304
263
|
exception_pipe.last.close
|
305
264
|
pids << pid
|
@@ -313,7 +272,7 @@ module Switchman
|
|
313
272
|
while max_procs && pids.count >= max_procs
|
314
273
|
while max_procs && out_fds.count >= max_procs
|
315
274
|
# wait for output if we've hit the max_procs limit
|
316
|
-
wait_for_output.call
|
275
|
+
wait_for_output.call
|
317
276
|
end
|
318
277
|
# we've gotten all the output from one fd so wait for its child process to exit
|
319
278
|
found_pid, status = Process.wait2
|
@@ -323,9 +282,7 @@ module Switchman
|
|
323
282
|
end
|
324
283
|
end
|
325
284
|
|
326
|
-
while out_fds.any? || err_fds.any?
|
327
|
-
wait_for_output.call(out_fds, err_fds, fd_to_name_map)
|
328
|
-
end
|
285
|
+
wait_for_output.call while out_fds.any? || err_fds.any?
|
329
286
|
pids.each do |pid|
|
330
287
|
_, status = Process.waitpid2(pid)
|
331
288
|
errors << pid_to_name_map[pid] if status.exitstatus != 0
|
@@ -333,67 +290,51 @@ module Switchman
|
|
333
290
|
|
334
291
|
# check for an exception; we only re-raise the first one
|
335
292
|
exception_pipes.each do |exception_pipe|
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
end
|
293
|
+
serialized_exception = exception_pipe.first.read
|
294
|
+
next if serialized_exception.empty?
|
295
|
+
|
296
|
+
ex = Marshal.load(serialized_exception) # rubocop:disable Security/MarshalLoad
|
297
|
+
raise ex
|
298
|
+
ensure
|
299
|
+
exception_pipe.first.close
|
344
300
|
end
|
345
301
|
|
346
302
|
unless errors.empty?
|
347
|
-
raise ParallelShardExecError
|
303
|
+
raise ParallelShardExecError,
|
304
|
+
"The following subprocesses did not exit cleanly: #{errors.sort.join(', ')}"
|
348
305
|
end
|
306
|
+
|
349
307
|
return
|
350
308
|
end
|
351
309
|
|
352
|
-
|
310
|
+
classes ||= []
|
353
311
|
|
354
312
|
previous_shard = nil
|
355
|
-
close_connections_if_needed = lambda do |shard|
|
356
|
-
# prune the prior connection unless it happened to be the same
|
357
|
-
if previous_shard && shard != previous_shard && !previous_shard.database_server.shareable?
|
358
|
-
previous_shard.activate do
|
359
|
-
::GuardRail.activated_environments.each do |env|
|
360
|
-
::GuardRail.activate(env) do
|
361
|
-
if ::ActiveRecord::Base.connected? && ::ActiveRecord::Base.connection.open_transactions == 0
|
362
|
-
::ActiveRecord::Base.connection_pool.current_pool.disconnect!
|
363
|
-
end
|
364
|
-
end
|
365
|
-
end
|
366
|
-
end
|
367
|
-
end
|
368
|
-
end
|
369
|
-
|
370
313
|
result = []
|
371
|
-
|
314
|
+
ex = nil
|
372
315
|
scope.each do |shard|
|
373
316
|
# shard references a database server that isn't configured in this environment
|
374
317
|
next unless shard.database_server
|
375
|
-
|
376
|
-
shard.activate(*
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
raise
|
390
|
-
end
|
318
|
+
|
319
|
+
shard.activate(*classes) do
|
320
|
+
result.concat Array.wrap(yield)
|
321
|
+
rescue
|
322
|
+
case exception
|
323
|
+
when :ignore
|
324
|
+
# ignore
|
325
|
+
when :defer
|
326
|
+
ex ||= $!
|
327
|
+
when Proc
|
328
|
+
exception.call
|
329
|
+
# when :raise
|
330
|
+
else
|
331
|
+
raise
|
391
332
|
end
|
392
333
|
end
|
393
334
|
previous_shard = shard
|
394
335
|
end
|
395
|
-
|
396
|
-
|
336
|
+
raise ex if ex
|
337
|
+
|
397
338
|
result
|
398
339
|
end
|
399
340
|
|
@@ -402,22 +343,22 @@ module Switchman
|
|
402
343
|
array.each do |object|
|
403
344
|
partition_object = partition_proc ? partition_proc.call(object) : object
|
404
345
|
case partition_object
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
end
|
413
|
-
next
|
414
|
-
else
|
415
|
-
shard = partition_object.shard
|
346
|
+
when Shard
|
347
|
+
shard = partition_object
|
348
|
+
when ::ActiveRecord::Base
|
349
|
+
if partition_object.respond_to?(:associated_shards)
|
350
|
+
partition_object.associated_shards.each do |a_shard|
|
351
|
+
shard_arrays[a_shard] ||= []
|
352
|
+
shard_arrays[a_shard] << object
|
416
353
|
end
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
354
|
+
next
|
355
|
+
else
|
356
|
+
shard = partition_object.shard
|
357
|
+
end
|
358
|
+
when Integer, /^\d+$/, /^(\d+)~(\d+)$/
|
359
|
+
local_id, shard = Shard.local_id_for(partition_object)
|
360
|
+
local_id ||= partition_object
|
361
|
+
object = local_id unless partition_proc
|
421
362
|
end
|
422
363
|
shard ||= Shard.current
|
423
364
|
shard_arrays[shard] ||= []
|
@@ -426,7 +367,7 @@ module Switchman
|
|
426
367
|
# TODO: use with_each_shard (or vice versa) to get
|
427
368
|
# connection management and parallelism benefits
|
428
369
|
shard_arrays.inject([]) do |results, (shard, objects)|
|
429
|
-
results.concat
|
370
|
+
results.concat(shard.activate { Array.wrap(yield objects) })
|
430
371
|
end
|
431
372
|
end
|
432
373
|
|
@@ -438,7 +379,7 @@ module Switchman
|
|
438
379
|
# stay as provided. This assumes no consumer
|
439
380
|
# will return a nil value from the block.
|
440
381
|
def signed_id_operation(input_id)
|
441
|
-
sign = input_id
|
382
|
+
sign = input_id.negative? ? -1 : 1
|
442
383
|
output = yield input_id.abs
|
443
384
|
output * sign
|
444
385
|
end
|
@@ -446,9 +387,10 @@ module Switchman
|
|
446
387
|
# converts an AR object, integral id, string id, or string short-global-id to a
|
447
388
|
# integral id. nil if it can't be interpreted
|
448
389
|
def integral_id_for(any_id)
|
449
|
-
|
450
|
-
|
451
|
-
|
390
|
+
case any_id
|
391
|
+
when ::Arel::Nodes::Casted
|
392
|
+
any_id = any_id.value
|
393
|
+
when ::Arel::Nodes::BindParam
|
452
394
|
any_id = any_id.value.value_before_type_cast
|
453
395
|
end
|
454
396
|
|
@@ -459,12 +401,11 @@ module Switchman
|
|
459
401
|
local_id = $2.to_i
|
460
402
|
signed_id_operation(local_id) do |id|
|
461
403
|
return nil if id > IDS_PER_SHARD
|
404
|
+
|
462
405
|
$1.to_i * IDS_PER_SHARD + id
|
463
406
|
end
|
464
407
|
when Integer, /^-?\d+$/
|
465
408
|
any_id.to_i
|
466
|
-
else
|
467
|
-
nil
|
468
409
|
end
|
469
410
|
end
|
470
411
|
|
@@ -476,11 +417,12 @@ module Switchman
|
|
476
417
|
def local_id_for(any_id)
|
477
418
|
id = integral_id_for(any_id)
|
478
419
|
return NIL_NIL_ID unless id
|
420
|
+
|
479
421
|
return_shard = nil
|
480
422
|
local_id = signed_id_operation(id) do |abs_id|
|
481
423
|
if abs_id < IDS_PER_SHARD
|
482
424
|
abs_id
|
483
|
-
elsif return_shard = lookup(abs_id / IDS_PER_SHARD)
|
425
|
+
elsif (return_shard = lookup(abs_id / IDS_PER_SHARD))
|
484
426
|
abs_id % IDS_PER_SHARD
|
485
427
|
else
|
486
428
|
return NIL_NIL_ID
|
@@ -497,8 +439,10 @@ module Switchman
|
|
497
439
|
integral_id = integral_id_for(any_id)
|
498
440
|
local_id, shard = local_id_for(integral_id)
|
499
441
|
return integral_id unless local_id
|
442
|
+
|
500
443
|
shard ||= source_shard
|
501
444
|
return local_id if shard == target_shard
|
445
|
+
|
502
446
|
shard.global_id_for(local_id)
|
503
447
|
end
|
504
448
|
|
@@ -509,6 +453,7 @@ module Switchman
|
|
509
453
|
local_id, shard = local_id_for(any_id)
|
510
454
|
return any_id unless local_id
|
511
455
|
return local_id unless shard
|
456
|
+
|
512
457
|
"#{shard.id}~#{local_id}"
|
513
458
|
end
|
514
459
|
|
@@ -517,6 +462,7 @@ module Switchman
|
|
517
462
|
def global_id_for(any_id, source_shard = nil)
|
518
463
|
id = integral_id_for(any_id)
|
519
464
|
return any_id unless id
|
465
|
+
|
520
466
|
signed_id_operation(id) do |abs_id|
|
521
467
|
if abs_id >= IDS_PER_SHARD
|
522
468
|
abs_id
|
@@ -529,29 +475,59 @@ module Switchman
|
|
529
475
|
|
530
476
|
def shard_for(any_id, source_shard = nil)
|
531
477
|
return any_id.shard if any_id.is_a?(::ActiveRecord::Base)
|
478
|
+
|
532
479
|
_, shard = local_id_for(any_id)
|
533
480
|
shard || source_shard || Shard.current
|
534
481
|
end
|
535
482
|
|
536
483
|
# given the provided option, determines whether we need to (and whether
|
537
484
|
# it's possible) to determine a reasonable default.
|
538
|
-
def determine_max_procs(max_procs_input, parallel_input=2)
|
485
|
+
def determine_max_procs(max_procs_input, parallel_input = 2)
|
539
486
|
max_procs = nil
|
540
487
|
if max_procs_input
|
541
488
|
max_procs = max_procs_input.to_i
|
542
|
-
max_procs = nil if max_procs
|
489
|
+
max_procs = nil if max_procs.zero?
|
543
490
|
else
|
544
491
|
return 1 if parallel_input.nil? || parallel_input < 1
|
492
|
+
|
545
493
|
cpus = Environment.cpu_count
|
546
|
-
|
547
|
-
max_procs = cpus * parallel_input
|
548
|
-
end
|
494
|
+
max_procs = cpus * parallel_input if cpus&.positive?
|
549
495
|
end
|
550
496
|
|
551
|
-
|
497
|
+
max_procs
|
552
498
|
end
|
553
499
|
|
554
500
|
private
|
501
|
+
|
502
|
+
def add_sharded_model(klass)
|
503
|
+
@sharded_models = (sharded_models + [klass]).freeze
|
504
|
+
initialize_sharding
|
505
|
+
end
|
506
|
+
|
507
|
+
def initialize_sharding
|
508
|
+
full_connects_to_hash = DatabaseServer.all.map { |db| [db.id.to_sym, db.connects_to_hash] }.to_h
|
509
|
+
sharded_models.each do |klass|
|
510
|
+
connects_to_hash = full_connects_to_hash.deep_dup
|
511
|
+
if klass == UnshardedRecord
|
512
|
+
# no need to mention other databases for the unsharded category
|
513
|
+
connects_to_hash = { ::Rails.env.to_sym => DatabaseServer.find(nil).connects_to_hash }
|
514
|
+
end
|
515
|
+
|
516
|
+
# prune things we're already connected to
|
517
|
+
if klass.connection_specification_name == klass.name
|
518
|
+
connects_to_hash.each do |(db_name, role_hash)|
|
519
|
+
role_hash.each_key do |role|
|
520
|
+
role_hash.delete(role) if klass.connection_handler.retrieve_connection_pool(
|
521
|
+
klass.connection_specification_name, role: role, shard: db_name
|
522
|
+
)
|
523
|
+
end
|
524
|
+
end
|
525
|
+
end
|
526
|
+
|
527
|
+
klass.connects_to shards: connects_to_hash
|
528
|
+
end
|
529
|
+
end
|
530
|
+
|
555
531
|
# in-process caching
|
556
532
|
def cached_shards
|
557
533
|
@cached_shards ||= {}.compare_by_identity
|
@@ -582,10 +558,6 @@ module Switchman
|
|
582
558
|
shard = nil unless shard.database_server
|
583
559
|
shard
|
584
560
|
end
|
585
|
-
|
586
|
-
def active_shards
|
587
|
-
Thread.current[:active_shards] ||= {}.compare_by_identity
|
588
|
-
end
|
589
561
|
end
|
590
562
|
|
591
563
|
def name
|
@@ -599,11 +571,11 @@ module Switchman
|
|
599
571
|
|
600
572
|
def name=(name)
|
601
573
|
write_attribute(:name, @name = name)
|
602
|
-
remove_instance_variable(:@name) if name
|
574
|
+
remove_instance_variable(:@name) if name.nil?
|
603
575
|
end
|
604
576
|
|
605
577
|
def database_server
|
606
|
-
@database_server ||= DatabaseServer.find(
|
578
|
+
@database_server ||= DatabaseServer.find(database_server_id)
|
607
579
|
end
|
608
580
|
|
609
581
|
def database_server=(database_server)
|
@@ -624,23 +596,21 @@ module Switchman
|
|
624
596
|
Shard.default
|
625
597
|
end
|
626
598
|
|
627
|
-
def activate(*
|
628
|
-
shards =
|
629
|
-
Shard.activate(shards)
|
630
|
-
yield
|
631
|
-
end
|
599
|
+
def activate(*classes, &block)
|
600
|
+
shards = hashify_classes(classes)
|
601
|
+
Shard.activate(shards, &block)
|
632
602
|
end
|
633
603
|
|
634
604
|
# for use from console ONLY
|
635
|
-
def activate!(*
|
636
|
-
shards =
|
605
|
+
def activate!(*classes)
|
606
|
+
shards = hashify_classes(classes)
|
637
607
|
Shard.activate!(shards)
|
638
608
|
nil
|
639
609
|
end
|
640
610
|
|
641
611
|
# custom serialization, since shard is self-referential
|
642
|
-
def _dump(
|
643
|
-
|
612
|
+
def _dump(_depth)
|
613
|
+
id.to_s
|
644
614
|
end
|
645
615
|
|
646
616
|
def self._load(str)
|
@@ -648,45 +618,45 @@ module Switchman
|
|
648
618
|
end
|
649
619
|
|
650
620
|
def drop_database
|
651
|
-
raise(
|
621
|
+
raise('Cannot drop the database of the default shard') if default?
|
652
622
|
return unless read_attribute(:name)
|
653
623
|
|
654
624
|
begin
|
655
|
-
adapter =
|
625
|
+
adapter = database_server.config[:adapter]
|
656
626
|
sharding_config = Switchman.config || {}
|
657
627
|
drop_statement = sharding_config[adapter]&.[](:drop_statement)
|
658
628
|
drop_statement ||= sharding_config[:drop_statement]
|
659
629
|
if drop_statement
|
660
630
|
drop_statement = Array(drop_statement).dup.
|
661
|
-
|
631
|
+
map { |statement| statement.gsub('%{name}', name) }
|
662
632
|
end
|
663
633
|
|
664
634
|
case adapter
|
665
|
-
|
666
|
-
|
667
|
-
|
668
|
-
|
669
|
-
|
670
|
-
|
671
|
-
end
|
635
|
+
when 'mysql', 'mysql2'
|
636
|
+
activate do
|
637
|
+
::GuardRail.activate(:deploy) do
|
638
|
+
drop_statement ||= "DROP DATABASE #{name}"
|
639
|
+
Array(drop_statement).each do |stmt|
|
640
|
+
::ActiveRecord::Base.connection.execute(stmt)
|
672
641
|
end
|
673
642
|
end
|
674
|
-
|
675
|
-
|
676
|
-
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
ensure
|
686
|
-
conn.raw_connection.set_notice_processor(&old_proc) if old_proc
|
643
|
+
end
|
644
|
+
when 'postgresql'
|
645
|
+
activate do
|
646
|
+
::GuardRail.activate(:deploy) do
|
647
|
+
# Shut up, Postgres!
|
648
|
+
conn = ::ActiveRecord::Base.connection
|
649
|
+
old_proc = conn.raw_connection.set_notice_processor {}
|
650
|
+
begin
|
651
|
+
drop_statement ||= "DROP SCHEMA #{name} CASCADE"
|
652
|
+
Array(drop_statement).each do |stmt|
|
653
|
+
::ActiveRecord::Base.connection.execute(stmt)
|
687
654
|
end
|
655
|
+
ensure
|
656
|
+
conn.raw_connection.set_notice_processor(&old_proc) if old_proc
|
688
657
|
end
|
689
658
|
end
|
659
|
+
end
|
690
660
|
end
|
691
661
|
rescue
|
692
662
|
logger.info "Drop failed: #{$!}"
|
@@ -696,8 +666,9 @@ module Switchman
|
|
696
666
|
# takes an id local to this shard, and returns a global id
|
697
667
|
def global_id_for(local_id)
|
698
668
|
return nil unless local_id
|
669
|
+
|
699
670
|
self.class.signed_id_operation(local_id) do |abs_id|
|
700
|
-
abs_id +
|
671
|
+
abs_id + id * IDS_PER_SHARD
|
701
672
|
end
|
702
673
|
end
|
703
674
|
|
@@ -707,7 +678,8 @@ module Switchman
|
|
707
678
|
end
|
708
679
|
|
709
680
|
def destroy
|
710
|
-
raise(
|
681
|
+
raise('Cannot destroy the default shard') if default?
|
682
|
+
|
711
683
|
super
|
712
684
|
end
|
713
685
|
|
@@ -716,31 +688,22 @@ module Switchman
|
|
716
688
|
def clear_cache
|
717
689
|
Shard.default.activate do
|
718
690
|
Switchman.cache.delete(['shard', id].join('/'))
|
719
|
-
Switchman.cache.delete(
|
691
|
+
Switchman.cache.delete('default_shard') if default?
|
720
692
|
end
|
721
|
-
self.class.clear_cache
|
722
693
|
end
|
723
694
|
|
724
695
|
def default_name
|
725
696
|
database_server.shard_name(self)
|
726
697
|
end
|
727
698
|
|
728
|
-
def
|
729
|
-
|
730
|
-
|
731
|
-
next unless pool.is_a?(ConnectionPoolProxy)
|
732
|
-
|
733
|
-
pool.remove_shard!(self)
|
734
|
-
end
|
735
|
-
end
|
736
|
-
|
737
|
-
def hashify_categories(categories)
|
738
|
-
if categories.empty?
|
739
|
-
{ :primary => self }
|
699
|
+
def hashify_classes(classes)
|
700
|
+
if classes.empty?
|
701
|
+
{ ::ActiveRecord::Base => self }
|
740
702
|
else
|
741
|
-
|
703
|
+
classes.each_with_object({}) do |klass, h|
|
704
|
+
h[klass] = self
|
705
|
+
end
|
742
706
|
end
|
743
707
|
end
|
744
|
-
|
745
708
|
end
|
746
709
|
end
|