switchman 1.14.10 → 1.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/app/models/switchman/shard.rb +718 -11
- data/lib/switchman/active_record/association.rb +7 -10
- data/lib/switchman/active_record/connection_handler.rb +7 -6
- data/lib/switchman/active_record/log_subscriber.rb +8 -12
- data/lib/switchman/active_record/postgresql_adapter.rb +5 -24
- data/lib/switchman/active_record/query_cache.rb +17 -107
- data/lib/switchman/active_record/statement_cache.rb +1 -9
- data/lib/switchman/connection_pool_proxy.rb +1 -3
- data/lib/switchman/engine.rb +7 -6
- data/lib/switchman/r_spec_helper.rb +2 -2
- data/lib/switchman/version.rb +1 -1
- metadata +6 -7
- data/app/models/switchman/shard_internal.rb +0 -718
@@ -30,17 +30,14 @@ module Switchman
|
|
30
30
|
end
|
31
31
|
|
32
32
|
module CollectionAssociation
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
Shard.with_each_shard(shards, [klass.shard_category, owner.class.shard_category].uniq) do
|
40
|
-
super
|
41
|
-
end
|
33
|
+
def find_target
|
34
|
+
shards = reflection.options[:multishard] && owner.respond_to?(:associated_shards) ? owner.associated_shards : [shard]
|
35
|
+
# activate both the owner and the target's shard category, so that Reflection#join_id_for,
|
36
|
+
# when called for the owner, will be returned relative to shard the query will execute on
|
37
|
+
Shard.with_each_shard(shards, [klass.shard_category, owner.class.shard_category].uniq) do
|
38
|
+
super
|
42
39
|
end
|
43
|
-
|
40
|
+
end
|
44
41
|
end
|
45
42
|
|
46
43
|
module BelongsToAssociation
|
@@ -24,6 +24,10 @@ module Switchman
|
|
24
24
|
end
|
25
25
|
|
26
26
|
def establish_connection(spec)
|
27
|
+
# Just skip establishing a sharded connection if sharding isn't loaded; we'll do it again later
|
28
|
+
# This only can happen when loading ActiveRecord::Base; after everything is loaded Shard will
|
29
|
+
# be defined and this will actually establish a connection
|
30
|
+
return unless defined?(Shard)
|
27
31
|
pool = super
|
28
32
|
|
29
33
|
# this is the first place that the adapter would have been required; but now we
|
@@ -39,14 +43,12 @@ module Switchman
|
|
39
43
|
# to sharding will recurse onto itself trying to access column information
|
40
44
|
Shard.default
|
41
45
|
|
46
|
+
config = pool.spec.config
|
42
47
|
# automatically change config to allow for sharing connections with simple config
|
43
|
-
config = ::Rails.version < '5.1' ? spec.config : pool.spec.config
|
44
48
|
ConnectionHandler.make_sharing_automagic(config)
|
45
49
|
ConnectionHandler.make_sharing_automagic(Shard.default.database_server.config)
|
46
50
|
|
47
|
-
if ::Rails.version < '
|
48
|
-
::ActiveRecord::Base.configurations[::Rails.env] = spec.instance_variable_get(:@config).stringify_keys
|
49
|
-
elsif ::Rails.version < '6.0'
|
51
|
+
if ::Rails.version < '6.0'
|
50
52
|
::ActiveRecord::Base.configurations[::Rails.env] = config.stringify_keys
|
51
53
|
else
|
52
54
|
# Adopted from the deprecated code that currently lives in rails proper
|
@@ -132,8 +134,7 @@ module Switchman
|
|
132
134
|
else
|
133
135
|
ancestor_pool.spec
|
134
136
|
end
|
135
|
-
|
136
|
-
pool = establish_connection spec
|
137
|
+
pool = establish_connection(spec.to_hash)
|
137
138
|
pool.instance_variable_set(:@schema_cache, ancestor_pool.schema_cache) if ancestor_pool.schema_cache
|
138
139
|
pool
|
139
140
|
elsif spec_name != "primary"
|
@@ -18,18 +18,14 @@ module Switchman
|
|
18
18
|
shard = " [#{shard[:database_server_id]}:#{shard[:id]} #{shard[:env]}]" if shard
|
19
19
|
|
20
20
|
unless (payload[:binds] || []).empty?
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
binds = " " + payload[:binds].zip(casted_params).map { |attr, value|
|
30
|
-
render_bind(attr, value)
|
31
|
-
}.inspect
|
32
|
-
end
|
21
|
+
use_old_format = (::Rails.version < '5.1.5')
|
22
|
+
args = use_old_format ?
|
23
|
+
[payload[:binds], payload[:type_casted_binds]] :
|
24
|
+
[payload[:type_casted_binds]]
|
25
|
+
casted_params = type_casted_binds(*args)
|
26
|
+
binds = " " + payload[:binds].zip(casted_params).map { |attr, value|
|
27
|
+
render_bind(attr, value)
|
28
|
+
}.inspect
|
33
29
|
end
|
34
30
|
|
35
31
|
name = colorize_payload_name(name, payload[:name])
|
@@ -52,31 +52,12 @@ module Switchman
|
|
52
52
|
SQL
|
53
53
|
end
|
54
54
|
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
name.instance_variable_set(:@schema, shard.name)
|
60
|
-
end
|
61
|
-
[name.schema, name.identifier]
|
62
|
-
end
|
63
|
-
else
|
64
|
-
def data_source_exists?(name)
|
65
|
-
name = ::ActiveRecord::ConnectionAdapters::PostgreSQL::Utils.extract_schema_qualified_name(name.to_s)
|
66
|
-
return false unless name.identifier
|
67
|
-
if !name.schema && use_qualified_names?
|
68
|
-
name.instance_variable_set(:@schema, shard.name)
|
69
|
-
end
|
70
|
-
|
71
|
-
exec_query(<<-SQL, 'SCHEMA').rows.first[0].to_i > 0
|
72
|
-
SELECT COUNT(*)
|
73
|
-
FROM pg_class c
|
74
|
-
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
|
75
|
-
WHERE c.relkind IN ('r','v','m') -- (r)elation/table, (v)iew, (m)aterialized view
|
76
|
-
AND c.relname = '#{name.identifier}'
|
77
|
-
AND n.nspname = #{name.schema ? "'#{name.schema}'" : 'ANY (current_schemas(false))'}
|
78
|
-
SQL
|
55
|
+
def extract_schema_qualified_name(string)
|
56
|
+
name = ::ActiveRecord::ConnectionAdapters::PostgreSQL::Utils.extract_schema_qualified_name(string.to_s)
|
57
|
+
if string && !name.schema && use_qualified_names?
|
58
|
+
name.instance_variable_set(:@schema, shard.name)
|
79
59
|
end
|
60
|
+
[name.schema, name.identifier]
|
80
61
|
end
|
81
62
|
|
82
63
|
def view_exists?(name)
|
@@ -1,122 +1,32 @@
|
|
1
1
|
module Switchman
|
2
2
|
module ActiveRecord
|
3
3
|
module QueryCache
|
4
|
-
if ::Rails.version < '5.0.1'
|
5
|
-
# thread local accessors to replace @query_cache_enabled
|
6
|
-
def query_cache
|
7
|
-
thread_cache = Thread.current[:query_cache] ||= {}
|
8
|
-
thread_cache[self.object_id] ||= Hash.new { |h,sql| h[sql] = {} }
|
9
|
-
end
|
10
|
-
|
11
|
-
def query_cache_enabled
|
12
|
-
Thread.current[:query_cache_enabled]
|
13
|
-
end
|
14
|
-
|
15
|
-
def query_cache_enabled=(value)
|
16
|
-
Thread.current[:query_cache_enabled] = value
|
17
|
-
end
|
18
|
-
|
19
|
-
# basically wholesale repeat of the methods from the original (see
|
20
|
-
# https://github.com/rails/rails/blob/master/activerecord/lib/active_record/connection_adapters/abstract/query_cache.rb),
|
21
|
-
# but with self.query_cache_enabled and self.query_cache_enabled= instead
|
22
|
-
# of @query_cache_enabled.
|
23
|
-
|
24
|
-
def enable_query_cache!
|
25
|
-
self.query_cache_enabled = true
|
26
|
-
end
|
27
|
-
|
28
|
-
def disable_query_cache!
|
29
|
-
self.query_cache_enabled = false
|
30
|
-
end
|
31
|
-
|
32
|
-
def cache
|
33
|
-
old, self.query_cache_enabled = query_cache_enabled, true
|
34
|
-
yield
|
35
|
-
ensure
|
36
|
-
self.query_cache_enabled = old
|
37
|
-
clear_query_cache unless self.query_cache_enabled
|
38
|
-
end
|
39
|
-
|
40
|
-
def uncached
|
41
|
-
old, self.query_cache_enabled = query_cache_enabled, false
|
42
|
-
yield
|
43
|
-
ensure
|
44
|
-
self.query_cache_enabled = old
|
45
|
-
end
|
46
|
-
|
47
|
-
def clear_query_cache
|
48
|
-
Thread.current[:query_cache]&.clear
|
49
|
-
end
|
50
|
-
|
51
|
-
def select_all(arel, name = nil, binds = [], preparable: nil)
|
52
|
-
if self.query_cache_enabled && !locked?(arel)
|
53
|
-
arel, binds = binds_from_relation(arel, binds)
|
54
|
-
sql = to_sql(arel, binds)
|
55
|
-
cache_sql(sql, binds) { super(sql, name, binds, preparable: preparable) }
|
56
|
-
else
|
57
|
-
super
|
58
|
-
end
|
59
|
-
end
|
60
|
-
|
61
|
-
# no reason to define these on the including class directly. the super
|
62
|
-
# works just as well from a method on the included module
|
63
|
-
[:insert, :update, :delete].each do |method_name|
|
64
|
-
class_eval <<-end_code, __FILE__, __LINE__ + 1
|
65
|
-
def #{method_name}(*args)
|
66
|
-
clear_query_cache if self.query_cache_enabled
|
67
|
-
super
|
68
|
-
end
|
69
|
-
end_code
|
70
|
-
end
|
71
|
-
end
|
72
4
|
|
73
5
|
private
|
74
6
|
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
7
|
+
def cache_sql(sql, name, binds)
|
8
|
+
# have to include the shard id in the cache key because of switching dbs on the same connection
|
9
|
+
sql = "#{self.shard.id}::#{sql}"
|
10
|
+
@lock.synchronize do
|
79
11
|
result =
|
80
12
|
if query_cache[sql].key?(binds)
|
81
|
-
args = {
|
82
|
-
|
83
|
-
|
13
|
+
args = {
|
14
|
+
sql: sql,
|
15
|
+
binds: binds,
|
16
|
+
name: name,
|
17
|
+
connection_id: object_id,
|
18
|
+
cached: true
|
19
|
+
}
|
20
|
+
args[:type_casted_binds] = -> { type_casted_binds(binds) } if ::Rails.version >= '5.1.5'
|
21
|
+
::ActiveSupport::Notifications.instrument(
|
22
|
+
"sql.active_record",
|
23
|
+
args
|
24
|
+
)
|
84
25
|
query_cache[sql][binds]
|
85
26
|
else
|
86
27
|
query_cache[sql][binds] = yield
|
87
28
|
end
|
88
|
-
|
89
|
-
if ::ActiveRecord::Result === result
|
90
|
-
result.dup
|
91
|
-
else
|
92
|
-
result.collect { |row| row.dup }
|
93
|
-
end
|
94
|
-
end
|
95
|
-
else
|
96
|
-
def cache_sql(sql, name, binds)
|
97
|
-
# have to include the shard id in the cache key because of switching dbs on the same connection
|
98
|
-
sql = "#{self.shard.id}::#{sql}"
|
99
|
-
@lock.synchronize do
|
100
|
-
result =
|
101
|
-
if query_cache[sql].key?(binds)
|
102
|
-
args = {
|
103
|
-
sql: sql,
|
104
|
-
binds: binds,
|
105
|
-
name: name,
|
106
|
-
connection_id: object_id,
|
107
|
-
cached: true
|
108
|
-
}
|
109
|
-
args[:type_casted_binds] = -> { type_casted_binds(binds) } if ::Rails.version >= '5.1.5'
|
110
|
-
::ActiveSupport::Notifications.instrument(
|
111
|
-
"sql.active_record",
|
112
|
-
args
|
113
|
-
)
|
114
|
-
query_cache[sql][binds]
|
115
|
-
else
|
116
|
-
query_cache[sql][binds] = yield
|
117
|
-
end
|
118
|
-
result.dup
|
119
|
-
end
|
29
|
+
result.dup
|
120
30
|
end
|
121
31
|
end
|
122
32
|
end
|
@@ -56,15 +56,7 @@ module Switchman
|
|
56
56
|
end
|
57
57
|
end
|
58
58
|
|
59
|
-
if ::Rails.version < '5.
|
60
|
-
def generic_query_builder(connection)
|
61
|
-
@query_builder ||= connection.cacheable_query(@arel)
|
62
|
-
end
|
63
|
-
|
64
|
-
def qualified_query_builder(shard, klass)
|
65
|
-
@qualified_query_builders[shard.id] ||= klass.connection.cacheable_query(@arel)
|
66
|
-
end
|
67
|
-
elsif ::Rails.version < '5.2'
|
59
|
+
if ::Rails.version < '5.2'
|
68
60
|
def generic_query_builder(connection)
|
69
61
|
@query_builder ||= connection.cacheable_query(self.class, @arel)
|
70
62
|
end
|
@@ -159,9 +159,7 @@ module Switchman
|
|
159
159
|
::ActiveRecord::ConnectionAdapters::ConnectionPool.new(spec).tap do |pool|
|
160
160
|
pool.shard = shard
|
161
161
|
pool.set_schema_cache(@schema_cache) if ::Rails.version >= '6'
|
162
|
-
if
|
163
|
-
pool.enable_query_cache! if !@connection_pools.empty? && @connection_pools.first.last.query_cache_enabled
|
164
|
-
end
|
162
|
+
pool.enable_query_cache! if !@connection_pools.empty? && @connection_pools.first.last.query_cache_enabled
|
165
163
|
end
|
166
164
|
end
|
167
165
|
end
|
data/lib/switchman/engine.rb
CHANGED
@@ -88,7 +88,7 @@ module Switchman
|
|
88
88
|
require "switchman/call_super"
|
89
89
|
require "switchman/rails"
|
90
90
|
require "switchman/shackles/relation"
|
91
|
-
require_dependency "switchman/
|
91
|
+
require_dependency "switchman/shard"
|
92
92
|
require "switchman/standard_error"
|
93
93
|
|
94
94
|
::StandardError.include(StandardError)
|
@@ -118,11 +118,6 @@ module Switchman
|
|
118
118
|
::ActiveRecord::ConnectionAdapters::ConnectionHandler.prepend(ActiveRecord::ConnectionHandler)
|
119
119
|
::ActiveRecord::ConnectionAdapters::ConnectionPool.prepend(ActiveRecord::ConnectionPool)
|
120
120
|
::ActiveRecord::ConnectionAdapters::AbstractAdapter.prepend(ActiveRecord::QueryCache)
|
121
|
-
# when we call super in Switchman::ActiveRecord::QueryCache#select_all,
|
122
|
-
# we want it to find the definition from
|
123
|
-
# ActiveRecord::ConnectionAdapters::DatabaseStatements, not
|
124
|
-
# ActiveRecord::ConnectionAdapters::QueryCache
|
125
|
-
::ActiveRecord::ConnectionAdapters::QueryCache.send(:remove_method, :select_all) if ::Rails.version < '5.0.1'
|
126
121
|
|
127
122
|
::ActiveRecord::LogSubscriber.prepend(ActiveRecord::LogSubscriber)
|
128
123
|
::ActiveRecord::Migration.prepend(ActiveRecord::Migration)
|
@@ -173,6 +168,12 @@ module Switchman
|
|
173
168
|
require "switchman/active_record/postgresql_adapter"
|
174
169
|
::ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.prepend(ActiveRecord::PostgreSQLAdapter)
|
175
170
|
end
|
171
|
+
|
172
|
+
# If Switchman::Shard wasn't loaded as of when ActiveRecord::Base initialized
|
173
|
+
# establish a connection here instead
|
174
|
+
if !Shard.instance_variable_get(:@default)
|
175
|
+
::ActiveRecord::Base.establish_connection
|
176
|
+
end
|
176
177
|
end
|
177
178
|
end
|
178
179
|
|
@@ -121,7 +121,7 @@ module Switchman
|
|
121
121
|
klass.before do
|
122
122
|
raise "Sharding did not set up correctly" if @@sharding_failed
|
123
123
|
Shard.clear_cache
|
124
|
-
if
|
124
|
+
if use_transactional_tests
|
125
125
|
Shard.default(true)
|
126
126
|
@shard1 = Shard.find(@shard1.id)
|
127
127
|
@shard2 = Shard.find(@shard2.id)
|
@@ -137,7 +137,7 @@ module Switchman
|
|
137
137
|
|
138
138
|
klass.after do
|
139
139
|
next if @@sharding_failed
|
140
|
-
if
|
140
|
+
if use_transactional_tests
|
141
141
|
shards = [@shard2]
|
142
142
|
shards << @shard1 unless @shard1.database_server == Shard.default.database_server
|
143
143
|
shards.each do |shard|
|
data/lib/switchman/version.rb
CHANGED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: switchman
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.15.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Cody Cutrer
|
@@ -10,7 +10,7 @@ authors:
|
|
10
10
|
autorequire:
|
11
11
|
bindir: bin
|
12
12
|
cert_chain: []
|
13
|
-
date: 2020-
|
13
|
+
date: 2020-05-18 00:00:00.000000000 Z
|
14
14
|
dependencies:
|
15
15
|
- !ruby/object:Gem::Dependency
|
16
16
|
name: railties
|
@@ -18,7 +18,7 @@ dependencies:
|
|
18
18
|
requirements:
|
19
19
|
- - ">="
|
20
20
|
- !ruby/object:Gem::Version
|
21
|
-
version: '5.
|
21
|
+
version: '5.1'
|
22
22
|
- - "<"
|
23
23
|
- !ruby/object:Gem::Version
|
24
24
|
version: '6.1'
|
@@ -28,7 +28,7 @@ dependencies:
|
|
28
28
|
requirements:
|
29
29
|
- - ">="
|
30
30
|
- !ruby/object:Gem::Version
|
31
|
-
version: '5.
|
31
|
+
version: '5.1'
|
32
32
|
- - "<"
|
33
33
|
- !ruby/object:Gem::Version
|
34
34
|
version: '6.1'
|
@@ -38,7 +38,7 @@ dependencies:
|
|
38
38
|
requirements:
|
39
39
|
- - ">="
|
40
40
|
- !ruby/object:Gem::Version
|
41
|
-
version: '5.
|
41
|
+
version: '5.1'
|
42
42
|
- - "<"
|
43
43
|
- !ruby/object:Gem::Version
|
44
44
|
version: '6.1'
|
@@ -48,7 +48,7 @@ dependencies:
|
|
48
48
|
requirements:
|
49
49
|
- - ">="
|
50
50
|
- !ruby/object:Gem::Version
|
51
|
-
version: '5.
|
51
|
+
version: '5.1'
|
52
52
|
- - "<"
|
53
53
|
- !ruby/object:Gem::Version
|
54
54
|
version: '6.1'
|
@@ -173,7 +173,6 @@ extra_rdoc_files: []
|
|
173
173
|
files:
|
174
174
|
- Rakefile
|
175
175
|
- app/models/switchman/shard.rb
|
176
|
-
- app/models/switchman/shard_internal.rb
|
177
176
|
- db/migrate/20130328212039_create_switchman_shards.rb
|
178
177
|
- db/migrate/20130328224244_create_default_shard.rb
|
179
178
|
- db/migrate/20161206323434_add_back_default_string_limits_switchman.rb
|
@@ -1,718 +0,0 @@
|
|
1
|
-
require 'switchman/database_server'
|
2
|
-
require 'switchman/default_shard'
|
3
|
-
require 'switchman/environment'
|
4
|
-
require 'switchman/errors'
|
5
|
-
|
6
|
-
module Switchman
|
7
|
-
class Shard < ::ActiveRecord::Base
|
8
|
-
# ten trillion possible ids per shard. yup.
|
9
|
-
IDS_PER_SHARD = 10_000_000_000_000
|
10
|
-
|
11
|
-
CATEGORIES =
|
12
|
-
{
|
13
|
-
# special cased to mean all other models
|
14
|
-
:primary => nil,
|
15
|
-
# special cased to not allow activating a shard other than the default
|
16
|
-
:unsharded => [Shard]
|
17
|
-
}
|
18
|
-
private_constant :CATEGORIES
|
19
|
-
@connection_specification_name = @shard_category = :unsharded
|
20
|
-
|
21
|
-
if defined?(::ProtectedAttributes)
|
22
|
-
attr_accessible :default, :name, :database_server
|
23
|
-
end
|
24
|
-
|
25
|
-
# only allow one default
|
26
|
-
validates_uniqueness_of :default, :if => lambda { |s| s.default? }
|
27
|
-
|
28
|
-
after_save :clear_cache
|
29
|
-
after_destroy :clear_cache
|
30
|
-
|
31
|
-
after_rollback :on_rollback
|
32
|
-
|
33
|
-
scope :primary, -> { where(name: nil).order(:database_server_id, :id).distinct_on(:database_server_id) }
|
34
|
-
|
35
|
-
class << self
|
36
|
-
def categories
|
37
|
-
CATEGORIES.keys
|
38
|
-
end
|
39
|
-
|
40
|
-
def default(reload_deprecated = false, reload: false, with_fallback: false)
|
41
|
-
reload = reload_deprecated if reload_deprecated
|
42
|
-
if !@default || reload
|
43
|
-
# Have to create a dummy object so that several key methods still work
|
44
|
-
# (it's easier to do this in one place here, and just assume that sharding
|
45
|
-
# is up and running everywhere else). This includes for looking up the
|
46
|
-
# default shard itself. This also needs to be a local so that this method
|
47
|
-
# can be re-entrant
|
48
|
-
default = DefaultShard.instance
|
49
|
-
|
50
|
-
# if we already have a default shard in place, and the caller wants
|
51
|
-
# to use it as a fallback, use that instead of the dummy instance
|
52
|
-
if with_fallback && @default
|
53
|
-
default = @default
|
54
|
-
end
|
55
|
-
|
56
|
-
# the first time we need a dummy dummy for re-entrancy to avoid looping on ourselves
|
57
|
-
@default ||= default
|
58
|
-
|
59
|
-
# Now find the actual record, if it exists; rescue the fake default if the table doesn't exist
|
60
|
-
@default = begin
|
61
|
-
find_cached("default_shard") { Shard.where(default: true).take } || default
|
62
|
-
rescue
|
63
|
-
default
|
64
|
-
end
|
65
|
-
|
66
|
-
# rebuild current shard activations - it might have "another" default shard serialized there
|
67
|
-
active_shards.replace(active_shards.dup.map do |category, shard|
|
68
|
-
shard = Shard.lookup((!shard || shard.default?) ? 'default' : shard.id)
|
69
|
-
[category, shard]
|
70
|
-
end.to_h)
|
71
|
-
|
72
|
-
activate!(primary: @default) if active_shards.empty?
|
73
|
-
|
74
|
-
# make sure this is not erroneously cached
|
75
|
-
if @default.database_server.instance_variable_defined?(:@primary_shard)
|
76
|
-
@default.database_server.remove_instance_variable(:@primary_shard)
|
77
|
-
end
|
78
|
-
|
79
|
-
# and finally, check for cached references to the default shard on the existing connection
|
80
|
-
if ::ActiveRecord::Base.connected? && ::ActiveRecord::Base.connection.shard.default?
|
81
|
-
::ActiveRecord::Base.connection.shard = @default
|
82
|
-
end
|
83
|
-
end
|
84
|
-
@default
|
85
|
-
end
|
86
|
-
|
87
|
-
def current(category = :primary)
|
88
|
-
active_shards[category] || Shard.default
|
89
|
-
end
|
90
|
-
|
91
|
-
def activate(shards)
|
92
|
-
old_shards = activate!(shards)
|
93
|
-
yield
|
94
|
-
ensure
|
95
|
-
active_shards.merge!(old_shards) if old_shards
|
96
|
-
end
|
97
|
-
|
98
|
-
def activate!(shards)
|
99
|
-
old_shards = nil
|
100
|
-
currently_active_shards = active_shards
|
101
|
-
shards.each do |category, shard|
|
102
|
-
next if category == :unsharded
|
103
|
-
unless currently_active_shards[category] == shard
|
104
|
-
old_shards ||= {}
|
105
|
-
old_shards[category] = currently_active_shards[category]
|
106
|
-
currently_active_shards[category] = shard
|
107
|
-
end
|
108
|
-
end
|
109
|
-
old_shards
|
110
|
-
end
|
111
|
-
|
112
|
-
def lookup(id)
|
113
|
-
id_i = id.to_i
|
114
|
-
return current if id_i == current.id || id == 'self'
|
115
|
-
return default if id_i == default.id || id.nil? || id == 'default'
|
116
|
-
id = id_i
|
117
|
-
raise ArgumentError if id == 0
|
118
|
-
|
119
|
-
unless cached_shards.has_key?(id)
|
120
|
-
cached_shards[id] = Shard.default.activate do
|
121
|
-
find_cached(['shard', id]) { find_by(id: id) }
|
122
|
-
end
|
123
|
-
end
|
124
|
-
cached_shards[id]
|
125
|
-
end
|
126
|
-
|
127
|
-
def clear_cache
|
128
|
-
cached_shards.clear
|
129
|
-
end
|
130
|
-
|
131
|
-
# ==== Parameters
|
132
|
-
#
|
133
|
-
# * +shards+ - an array or relation of Shards to iterate over
|
134
|
-
# * +categories+ - an array of categories to activate
|
135
|
-
# * +options+ -
|
136
|
-
# :parallel - true/false to execute in parallel, or a integer of how many
|
137
|
-
# sub-processes per database server. Note that parallel
|
138
|
-
# invocation currently uses forking, so should be used sparingly
|
139
|
-
# because errors are not raised, and you cannot get results back
|
140
|
-
# :max_procs - only run this many parallel processes at a time
|
141
|
-
# :exception - :ignore, :raise, :defer (wait until the end and raise the first
|
142
|
-
# error), or a proc
|
143
|
-
def with_each_shard(*args)
|
144
|
-
raise ArgumentError, "wrong number of arguments (#{args.length} for 0...3)" if args.length > 3
|
145
|
-
|
146
|
-
unless default.is_a?(Shard)
|
147
|
-
return Array.wrap(yield)
|
148
|
-
end
|
149
|
-
|
150
|
-
options = args.extract_options!
|
151
|
-
if args.length == 1
|
152
|
-
if Array === args.first && args.first.first.is_a?(Symbol)
|
153
|
-
categories = args.first
|
154
|
-
else
|
155
|
-
scope = args.first
|
156
|
-
end
|
157
|
-
else
|
158
|
-
scope, categories = args
|
159
|
-
end
|
160
|
-
|
161
|
-
parallel = case options[:parallel]
|
162
|
-
when true
|
163
|
-
1
|
164
|
-
when false, nil
|
165
|
-
0
|
166
|
-
else
|
167
|
-
options[:parallel]
|
168
|
-
end
|
169
|
-
options.delete(:parallel)
|
170
|
-
|
171
|
-
scope ||= Shard.all
|
172
|
-
if ::ActiveRecord::Relation === scope && scope.order_values.empty?
|
173
|
-
scope = scope.order(::Arel.sql("database_server_id IS NOT NULL, database_server_id, id"))
|
174
|
-
end
|
175
|
-
|
176
|
-
if parallel > 0
|
177
|
-
max_procs = determine_max_procs(options.delete(:max_procs), parallel)
|
178
|
-
if ::ActiveRecord::Relation === scope
|
179
|
-
# still need a post-uniq, cause the default database server could be NULL or Rails.env in the db
|
180
|
-
database_servers = scope.reorder('database_server_id').select(:database_server_id).distinct.
|
181
|
-
map(&:database_server).compact.uniq
|
182
|
-
# nothing to do
|
183
|
-
return if database_servers.count == 0
|
184
|
-
parallel = [(max_procs.to_f / database_servers.count).ceil, parallel].min if max_procs
|
185
|
-
|
186
|
-
scopes = Hash[database_servers.map do |server|
|
187
|
-
server_scope = server.shards.merge(scope)
|
188
|
-
if parallel == 1
|
189
|
-
subscopes = [server_scope]
|
190
|
-
else
|
191
|
-
subscopes = []
|
192
|
-
total = server_scope.count
|
193
|
-
ranges = []
|
194
|
-
server_scope.find_ids_in_ranges(:batch_size => (total.to_f / parallel).ceil) do |min, max|
|
195
|
-
ranges << [min, max]
|
196
|
-
end
|
197
|
-
# create a half-open range on the last one
|
198
|
-
ranges.last[1] = nil
|
199
|
-
ranges.each do |min, max|
|
200
|
-
subscope = server_scope.where("id>=?", min)
|
201
|
-
subscope = subscope.where("id<=?", max) if max
|
202
|
-
subscopes << subscope
|
203
|
-
end
|
204
|
-
end
|
205
|
-
[server, subscopes]
|
206
|
-
end]
|
207
|
-
else
|
208
|
-
scopes = scope.group_by(&:database_server)
|
209
|
-
if parallel > 1
|
210
|
-
parallel = [(max_procs.to_f / scopes.count).ceil, parallel].min if max_procs
|
211
|
-
scopes = Hash[scopes.map do |(server, shards)|
|
212
|
-
[server, shards.in_groups(parallel, false).compact]
|
213
|
-
end]
|
214
|
-
else
|
215
|
-
scopes = Hash[scopes.map { |(server, shards)| [server, [shards]] }]
|
216
|
-
end
|
217
|
-
end
|
218
|
-
|
219
|
-
exception_pipes = []
|
220
|
-
pids = []
|
221
|
-
out_fds = []
|
222
|
-
err_fds = []
|
223
|
-
pid_to_name_map = {}
|
224
|
-
fd_to_name_map = {}
|
225
|
-
errors = []
|
226
|
-
|
227
|
-
wait_for_output = lambda do |out_fds, err_fds, fd_to_name_map|
|
228
|
-
ready, _ = IO.select(out_fds + err_fds)
|
229
|
-
ready.each do |fd|
|
230
|
-
if fd.eof?
|
231
|
-
fd.close
|
232
|
-
out_fds.delete(fd)
|
233
|
-
err_fds.delete(fd)
|
234
|
-
next
|
235
|
-
end
|
236
|
-
line = fd.readline
|
237
|
-
puts "#{fd_to_name_map[fd]}: #{line}"
|
238
|
-
end
|
239
|
-
end
|
240
|
-
|
241
|
-
# only one process; don't bother forking
|
242
|
-
if scopes.length == 1 && parallel == 1
|
243
|
-
return with_each_shard(scopes.first.last.first, categories, options) { yield }
|
244
|
-
end
|
245
|
-
|
246
|
-
# clear connections prior to forking (no more queries will be executed in the parent,
|
247
|
-
# and we want them gone so that we don't accidentally use them post-fork doing something
|
248
|
-
# silly like dealloc'ing prepared statements)
|
249
|
-
::ActiveRecord::Base.clear_all_connections!
|
250
|
-
|
251
|
-
scopes.each do |server, subscopes|
|
252
|
-
subscopes.each_with_index do |subscope, idx|
|
253
|
-
if subscopes.length > 1
|
254
|
-
name = "#{server.id} #{idx + 1}"
|
255
|
-
else
|
256
|
-
name = server.id
|
257
|
-
end
|
258
|
-
|
259
|
-
exception_pipe = IO.pipe
|
260
|
-
exception_pipes << exception_pipe
|
261
|
-
pid, io_in, io_out, io_err = Open4.pfork4(lambda do
|
262
|
-
begin
|
263
|
-
Switchman.config[:on_fork_proc]&.call
|
264
|
-
|
265
|
-
# set a pretty name for the process title, up to 128 characters
|
266
|
-
# (we don't actually know the limit, depending on how the process
|
267
|
-
# was started)
|
268
|
-
# first, simplify the binary name by stripping directories,
|
269
|
-
# then truncate arguments as necessary
|
270
|
-
bin = File.basename($0) # Process.argv0 doesn't work on Ruby 2.5 (https://bugs.ruby-lang.org/issues/15887)
|
271
|
-
max_length = 128 - bin.length - name.length - 3
|
272
|
-
args = ARGV.join(" ")
|
273
|
-
if max_length >= 0
|
274
|
-
args = args[0..max_length]
|
275
|
-
end
|
276
|
-
new_title = [bin, args, name].join(" ")
|
277
|
-
Process.setproctitle(new_title)
|
278
|
-
|
279
|
-
with_each_shard(subscope, categories, options) { yield }
|
280
|
-
exception_pipe.last.close
|
281
|
-
rescue => e
|
282
|
-
begin
|
283
|
-
dumped = Marshal.dump(e)
|
284
|
-
rescue
|
285
|
-
# couldn't dump the exception; create a copy with just
|
286
|
-
# the message and the backtrace
|
287
|
-
e2 = e.class.new(e.message)
|
288
|
-
e2.set_backtrace(e.backtrace)
|
289
|
-
e2.instance_variable_set(:@active_shards, e.instance_variable_get(:@active_shards))
|
290
|
-
dumped = Marshal.dump(e2)
|
291
|
-
end
|
292
|
-
exception_pipe.last.set_encoding(dumped.encoding)
|
293
|
-
exception_pipe.last.write(dumped)
|
294
|
-
exception_pipe.last.flush
|
295
|
-
exception_pipe.last.close
|
296
|
-
exit! 1
|
297
|
-
end
|
298
|
-
end)
|
299
|
-
exception_pipe.last.close
|
300
|
-
pids << pid
|
301
|
-
io_in.close # don't care about writing to stdin
|
302
|
-
out_fds << io_out
|
303
|
-
err_fds << io_err
|
304
|
-
pid_to_name_map[pid] = name
|
305
|
-
fd_to_name_map[io_out] = name
|
306
|
-
fd_to_name_map[io_err] = name
|
307
|
-
|
308
|
-
while max_procs && pids.count >= max_procs
|
309
|
-
while max_procs && out_fds.count >= max_procs
|
310
|
-
# wait for output if we've hit the max_procs limit
|
311
|
-
wait_for_output.call(out_fds, err_fds, fd_to_name_map)
|
312
|
-
end
|
313
|
-
# we've gotten all the output from one fd so wait for its child process to exit
|
314
|
-
found_pid, status = Process.wait2
|
315
|
-
pids.delete(found_pid)
|
316
|
-
errors << pid_to_name_map[found_pid] if status.exitstatus != 0
|
317
|
-
end
|
318
|
-
end
|
319
|
-
end
|
320
|
-
|
321
|
-
while out_fds.any? || err_fds.any?
|
322
|
-
wait_for_output.call(out_fds, err_fds, fd_to_name_map)
|
323
|
-
end
|
324
|
-
pids.each do |pid|
|
325
|
-
_, status = Process.waitpid2(pid)
|
326
|
-
errors << pid_to_name_map[pid] if status.exitstatus != 0
|
327
|
-
end
|
328
|
-
|
329
|
-
# check for an exception; we only re-raise the first one
|
330
|
-
exception_pipes.each do |exception_pipe|
|
331
|
-
begin
|
332
|
-
serialized_exception = exception_pipe.first.read
|
333
|
-
next if serialized_exception.empty?
|
334
|
-
exception = Marshal.load(serialized_exception)
|
335
|
-
raise exception
|
336
|
-
ensure
|
337
|
-
exception_pipe.first.close
|
338
|
-
end
|
339
|
-
end
|
340
|
-
|
341
|
-
unless errors.empty?
|
342
|
-
raise ParallelShardExecError.new("The following subprocesses did not exit cleanly: #{errors.sort.join(", ")}")
|
343
|
-
end
|
344
|
-
return
|
345
|
-
end
|
346
|
-
|
347
|
-
categories ||= []
|
348
|
-
|
349
|
-
previous_shard = nil
|
350
|
-
close_connections_if_needed = lambda do |shard|
|
351
|
-
# prune the prior connection unless it happened to be the same
|
352
|
-
if previous_shard && shard != previous_shard && !previous_shard.database_server.shareable?
|
353
|
-
previous_shard.activate do
|
354
|
-
::Shackles.activated_environments.each do |env|
|
355
|
-
::Shackles.activate(env) do
|
356
|
-
if ::ActiveRecord::Base.connected? && ::ActiveRecord::Base.connection.open_transactions == 0
|
357
|
-
::ActiveRecord::Base.connection_pool.current_pool.disconnect!
|
358
|
-
end
|
359
|
-
end
|
360
|
-
end
|
361
|
-
end
|
362
|
-
end
|
363
|
-
end
|
364
|
-
|
365
|
-
result = []
|
366
|
-
exception = nil
|
367
|
-
scope.each do |shard|
|
368
|
-
# shard references a database server that isn't configured in this environment
|
369
|
-
next unless shard.database_server
|
370
|
-
close_connections_if_needed.call(shard)
|
371
|
-
shard.activate(*categories) do
|
372
|
-
begin
|
373
|
-
result.concat Array.wrap(yield)
|
374
|
-
rescue
|
375
|
-
case options[:exception]
|
376
|
-
when :ignore
|
377
|
-
when :defer
|
378
|
-
exception ||= $!
|
379
|
-
when Proc
|
380
|
-
options[:exception].call
|
381
|
-
when :raise
|
382
|
-
raise
|
383
|
-
else
|
384
|
-
raise
|
385
|
-
end
|
386
|
-
end
|
387
|
-
end
|
388
|
-
previous_shard = shard
|
389
|
-
end
|
390
|
-
close_connections_if_needed.call(Shard.current)
|
391
|
-
raise exception if exception
|
392
|
-
result
|
393
|
-
end
|
394
|
-
|
395
|
-
def partition_by_shard(array, partition_proc = nil)
|
396
|
-
shard_arrays = {}
|
397
|
-
array.each do |object|
|
398
|
-
partition_object = partition_proc ? partition_proc.call(object) : object
|
399
|
-
case partition_object
|
400
|
-
when Shard
|
401
|
-
shard = partition_object
|
402
|
-
when ::ActiveRecord::Base
|
403
|
-
if partition_object.respond_to?(:associated_shards)
|
404
|
-
partition_object.associated_shards.each do |a_shard|
|
405
|
-
shard_arrays[a_shard] ||= []
|
406
|
-
shard_arrays[a_shard] << object
|
407
|
-
end
|
408
|
-
next
|
409
|
-
else
|
410
|
-
shard = partition_object.shard
|
411
|
-
end
|
412
|
-
when Integer, /^\d+$/, /^(\d+)~(\d+)$/
|
413
|
-
local_id, shard = Shard.local_id_for(partition_object)
|
414
|
-
local_id ||= partition_object
|
415
|
-
object = local_id if !partition_proc
|
416
|
-
end
|
417
|
-
shard ||= Shard.current
|
418
|
-
shard_arrays[shard] ||= []
|
419
|
-
shard_arrays[shard] << object
|
420
|
-
end
|
421
|
-
# TODO: use with_each_shard (or vice versa) to get
|
422
|
-
# connection management and parallelism benefits
|
423
|
-
shard_arrays.inject([]) do |results, (shard, objects)|
|
424
|
-
results.concat shard.activate { Array.wrap(yield objects) }
|
425
|
-
end
|
426
|
-
end
|
427
|
-
|
428
|
-
# converts an AR object, integral id, string id, or string short-global-id to a
|
429
|
-
# integral id. nil if it can't be interpreted
|
430
|
-
def integral_id_for(any_id)
|
431
|
-
if any_id.is_a?(::Arel::Nodes::Casted)
|
432
|
-
any_id = any_id.val
|
433
|
-
elsif any_id.is_a?(::Arel::Nodes::BindParam) && ::Rails.version >= "5.2"
|
434
|
-
any_id = any_id.value.value_before_type_cast
|
435
|
-
end
|
436
|
-
|
437
|
-
case any_id
|
438
|
-
when ::ActiveRecord::Base
|
439
|
-
any_id.id
|
440
|
-
when /^(\d+)~(\d+)$/
|
441
|
-
local_id = $2.to_i
|
442
|
-
# doesn't make sense to have a double-global id
|
443
|
-
return nil if local_id > IDS_PER_SHARD
|
444
|
-
$1.to_i * IDS_PER_SHARD + local_id
|
445
|
-
when Integer, /^\d+$/
|
446
|
-
any_id.to_i
|
447
|
-
else
|
448
|
-
nil
|
449
|
-
end
|
450
|
-
end
|
451
|
-
|
452
|
-
# takes an id-ish, and returns a local id and the shard it's
|
453
|
-
# local to. [nil, nil] if it can't be interpreted. [id, nil]
|
454
|
-
# if it's already a local ID. [nil, nil] if it's a well formed
|
455
|
-
# id, but the shard it refers to does not exist
|
456
|
-
NIL_NIL_ID = [nil, nil].freeze
|
457
|
-
def local_id_for(any_id)
|
458
|
-
id = integral_id_for(any_id)
|
459
|
-
return NIL_NIL_ID unless id
|
460
|
-
if id < IDS_PER_SHARD
|
461
|
-
[id, nil]
|
462
|
-
elsif shard = lookup(id / IDS_PER_SHARD)
|
463
|
-
[id % IDS_PER_SHARD, shard]
|
464
|
-
else
|
465
|
-
NIL_NIL_ID
|
466
|
-
end
|
467
|
-
end
|
468
|
-
|
469
|
-
# takes an id-ish, and returns an integral id relative to
|
470
|
-
# target_shard. returns nil if it can't be interpreted,
|
471
|
-
# or the integral version of the id if it refers to a shard
|
472
|
-
# that does not exist
|
473
|
-
def relative_id_for(any_id, source_shard, target_shard)
|
474
|
-
integral_id = integral_id_for(any_id)
|
475
|
-
local_id, shard = local_id_for(integral_id)
|
476
|
-
return integral_id unless local_id
|
477
|
-
shard ||= source_shard
|
478
|
-
return local_id if shard == target_shard
|
479
|
-
shard.global_id_for(local_id)
|
480
|
-
end
|
481
|
-
|
482
|
-
# takes an id-ish, and returns a shortened global
|
483
|
-
# string id if global, and itself if local.
|
484
|
-
# returns any_id itself if it can't be interpreted
|
485
|
-
def short_id_for(any_id)
|
486
|
-
local_id, shard = local_id_for(any_id)
|
487
|
-
return any_id unless local_id
|
488
|
-
return local_id unless shard
|
489
|
-
"#{shard.id}~#{local_id}"
|
490
|
-
end
|
491
|
-
|
492
|
-
# takes an id-ish, and returns an integral global id.
|
493
|
-
# returns nil if it can't be interpreted
|
494
|
-
def global_id_for(any_id, source_shard = nil)
|
495
|
-
id = integral_id_for(any_id)
|
496
|
-
return any_id unless id
|
497
|
-
if id >= IDS_PER_SHARD
|
498
|
-
id
|
499
|
-
else
|
500
|
-
source_shard ||= Shard.current
|
501
|
-
source_shard.global_id_for(id)
|
502
|
-
end
|
503
|
-
end
|
504
|
-
|
505
|
-
def shard_for(any_id, source_shard = nil)
|
506
|
-
return any_id.shard if any_id.is_a?(::ActiveRecord::Base)
|
507
|
-
_, shard = local_id_for(any_id)
|
508
|
-
shard || source_shard || Shard.current
|
509
|
-
end
|
510
|
-
|
511
|
-
# given the provided option, determines whether we need to (and whether
|
512
|
-
# it's possible) to determine a reasonable default.
|
513
|
-
def determine_max_procs(max_procs_input, parallel_input=2)
|
514
|
-
max_procs = nil
|
515
|
-
if max_procs_input
|
516
|
-
max_procs = max_procs_input.to_i
|
517
|
-
max_procs = nil if max_procs == 0
|
518
|
-
else
|
519
|
-
return 1 if parallel_input.nil? || parallel_input < 1
|
520
|
-
cpus = Environment.cpu_count
|
521
|
-
if cpus && cpus > 0
|
522
|
-
max_procs = cpus * parallel_input
|
523
|
-
end
|
524
|
-
end
|
525
|
-
|
526
|
-
return max_procs
|
527
|
-
end
|
528
|
-
|
529
|
-
private
|
530
|
-
# in-process caching
|
531
|
-
def cached_shards
|
532
|
-
@cached_shards ||= {}.compare_by_identity
|
533
|
-
end
|
534
|
-
|
535
|
-
def add_to_cache(shard)
|
536
|
-
cached_shards[shard.id] = shard
|
537
|
-
end
|
538
|
-
|
539
|
-
def remove_from_cache(shard)
|
540
|
-
cached_shards.delete(shard.id)
|
541
|
-
end
|
542
|
-
|
543
|
-
def find_cached(key)
|
544
|
-
# can't simply cache the AR object since Shard has a custom serializer
|
545
|
-
# that calls this method
|
546
|
-
attributes = Switchman.cache.fetch(key) { yield&.attributes }
|
547
|
-
return nil unless attributes
|
548
|
-
|
549
|
-
shard = Shard.new
|
550
|
-
attributes.each do |attr, value|
|
551
|
-
shard.send(:"#{attr}=", value) if shard.respond_to?(:"#{attr}=")
|
552
|
-
end
|
553
|
-
shard.clear_changes_information
|
554
|
-
shard.instance_variable_set(:@new_record, false)
|
555
|
-
# connection info doesn't exist in database.yml;
|
556
|
-
# pretend the shard doesn't exist either
|
557
|
-
shard = nil unless shard.database_server
|
558
|
-
shard
|
559
|
-
end
|
560
|
-
|
561
|
-
def active_shards
|
562
|
-
Thread.current[:active_shards] ||= {}.compare_by_identity
|
563
|
-
end
|
564
|
-
end
|
565
|
-
|
566
|
-
def name
|
567
|
-
unless instance_variable_defined?(:@name)
|
568
|
-
# protect against re-entrancy
|
569
|
-
@name = nil
|
570
|
-
@name = read_attribute(:name) || default_name
|
571
|
-
end
|
572
|
-
@name
|
573
|
-
end
|
574
|
-
|
575
|
-
def name=(name)
|
576
|
-
write_attribute(:name, @name = name)
|
577
|
-
remove_instance_variable(:@name) if name == nil
|
578
|
-
end
|
579
|
-
|
580
|
-
def database_server
|
581
|
-
@database_server ||= DatabaseServer.find(self.database_server_id)
|
582
|
-
end
|
583
|
-
|
584
|
-
def database_server=(database_server)
|
585
|
-
self.database_server_id = database_server.id
|
586
|
-
@database_server = database_server
|
587
|
-
end
|
588
|
-
|
589
|
-
def primary?
|
590
|
-
self == database_server.primary_shard
|
591
|
-
end
|
592
|
-
|
593
|
-
def description
|
594
|
-
[database_server.id, name].compact.join(':')
|
595
|
-
end
|
596
|
-
|
597
|
-
# Shards are always on the default shard
|
598
|
-
def shard
|
599
|
-
Shard.default
|
600
|
-
end
|
601
|
-
|
602
|
-
def activate(*categories)
|
603
|
-
shards = hashify_categories(categories)
|
604
|
-
Shard.activate(shards) do
|
605
|
-
yield
|
606
|
-
end
|
607
|
-
end
|
608
|
-
|
609
|
-
# for use from console ONLY
|
610
|
-
def activate!(*categories)
|
611
|
-
shards = hashify_categories(categories)
|
612
|
-
Shard.activate!(shards)
|
613
|
-
nil
|
614
|
-
end
|
615
|
-
|
616
|
-
# custom serialization, since shard is self-referential
|
617
|
-
def _dump(depth)
|
618
|
-
self.id.to_s
|
619
|
-
end
|
620
|
-
|
621
|
-
def self._load(str)
|
622
|
-
lookup(str.to_i)
|
623
|
-
end
|
624
|
-
|
625
|
-
def drop_database
|
626
|
-
raise("Cannot drop the database of the default shard") if self.default?
|
627
|
-
return unless read_attribute(:name)
|
628
|
-
|
629
|
-
begin
|
630
|
-
adapter = self.database_server.config[:adapter]
|
631
|
-
sharding_config = Switchman.config || {}
|
632
|
-
drop_statement = sharding_config[adapter]&.[](:drop_statement)
|
633
|
-
drop_statement ||= sharding_config[:drop_statement]
|
634
|
-
if drop_statement
|
635
|
-
drop_statement = Array(drop_statement).dup.
|
636
|
-
map { |statement| statement.gsub('%{name}', self.name) }
|
637
|
-
end
|
638
|
-
|
639
|
-
case adapter
|
640
|
-
when 'mysql', 'mysql2'
|
641
|
-
self.activate do
|
642
|
-
::Shackles.activate(:deploy) do
|
643
|
-
drop_statement ||= "DROP DATABASE #{self.name}"
|
644
|
-
Array(drop_statement).each do |stmt|
|
645
|
-
::ActiveRecord::Base.connection.execute(stmt)
|
646
|
-
end
|
647
|
-
end
|
648
|
-
end
|
649
|
-
when 'postgresql'
|
650
|
-
self.activate do
|
651
|
-
::Shackles.activate(:deploy) do
|
652
|
-
# Shut up, Postgres!
|
653
|
-
conn = ::ActiveRecord::Base.connection
|
654
|
-
old_proc = conn.raw_connection.set_notice_processor {}
|
655
|
-
begin
|
656
|
-
drop_statement ||= "DROP SCHEMA #{self.name} CASCADE"
|
657
|
-
Array(drop_statement).each do |stmt|
|
658
|
-
::ActiveRecord::Base.connection.execute(stmt)
|
659
|
-
end
|
660
|
-
ensure
|
661
|
-
conn.raw_connection.set_notice_processor(&old_proc) if old_proc
|
662
|
-
end
|
663
|
-
end
|
664
|
-
end
|
665
|
-
end
|
666
|
-
rescue
|
667
|
-
logger.info "Drop failed: #{$!}"
|
668
|
-
end
|
669
|
-
end
|
670
|
-
|
671
|
-
# takes an id local to this shard, and returns a global id
|
672
|
-
def global_id_for(local_id)
|
673
|
-
return nil unless local_id
|
674
|
-
local_id + self.id * IDS_PER_SHARD
|
675
|
-
end
|
676
|
-
|
677
|
-
# skip global_id.hash
|
678
|
-
def hash
|
679
|
-
id.hash
|
680
|
-
end
|
681
|
-
|
682
|
-
def destroy
|
683
|
-
raise("Cannot destroy the default shard") if self.default?
|
684
|
-
super
|
685
|
-
end
|
686
|
-
|
687
|
-
private
|
688
|
-
|
689
|
-
def clear_cache
|
690
|
-
Shard.default.activate do
|
691
|
-
Switchman.cache.delete(['shard', id].join('/'))
|
692
|
-
Switchman.cache.delete("default_shard") if default?
|
693
|
-
end
|
694
|
-
end
|
695
|
-
|
696
|
-
def default_name
|
697
|
-
database_server.shard_name(self)
|
698
|
-
end
|
699
|
-
|
700
|
-
def on_rollback
|
701
|
-
# make sure all connection pool proxies are referencing valid pools
|
702
|
-
::ActiveRecord::Base.connection_handler.connection_pools.each do |pool|
|
703
|
-
next unless pool.is_a?(ConnectionPoolProxy)
|
704
|
-
|
705
|
-
pool.remove_shard!(self)
|
706
|
-
end
|
707
|
-
end
|
708
|
-
|
709
|
-
def hashify_categories(categories)
|
710
|
-
if categories.empty?
|
711
|
-
{ :primary => self }
|
712
|
-
else
|
713
|
-
categories.inject({}) { |h, category| h[category] = self; h }
|
714
|
-
end
|
715
|
-
end
|
716
|
-
|
717
|
-
end
|
718
|
-
end
|