nandi 1.0.1 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +152 -3
- data/lib/generators/nandi/check_constraint/USAGE +9 -0
- data/lib/generators/nandi/check_constraint/check_constraint_generator.rb +2 -4
- data/lib/generators/nandi/compile/USAGE +14 -2
- data/lib/generators/nandi/compile/compile_generator.rb +27 -24
- data/lib/generators/nandi/foreign_key/USAGE +9 -0
- data/lib/generators/nandi/foreign_key/foreign_key_generator.rb +2 -4
- data/lib/generators/nandi/index/USAGE +9 -0
- data/lib/generators/nandi/index/index_generator.rb +2 -4
- data/lib/generators/nandi/migration/USAGE +12 -2
- data/lib/generators/nandi/migration/migration_generator.rb +3 -6
- data/lib/generators/nandi/not_null_check/USAGE +9 -0
- data/lib/generators/nandi/not_null_check/not_null_check_generator.rb +2 -4
- data/lib/nandi/compiled_migration.rb +17 -9
- data/lib/nandi/config.rb +69 -64
- data/lib/nandi/lockfile.rb +74 -54
- data/lib/nandi/migration_violations.rb +110 -0
- data/lib/nandi/multi_database.rb +178 -0
- data/lib/nandi/multi_db_generator.rb +23 -0
- data/lib/nandi/safe_migration_enforcer.rb +78 -82
- data/lib/nandi/version.rb +1 -1
- data/lib/nandi.rb +3 -6
- metadata +5 -2
data/lib/nandi/config.rb
CHANGED
@@ -2,68 +2,19 @@
|
|
2
2
|
|
3
3
|
require "nandi/renderers"
|
4
4
|
require "nandi/lockfile"
|
5
|
+
require "nandi/multi_database"
|
5
6
|
|
6
7
|
module Nandi
|
7
8
|
class Config
|
8
|
-
# Most DDL changes take a very strict lock, but execute very quickly. For these
|
9
|
-
# the statement timeout should be very tight, so that if there's an unexpected
|
10
|
-
# delay the query queue does not back up.
|
11
|
-
DEFAULT_ACCESS_EXCLUSIVE_STATEMENT_TIMEOUT = 1_500
|
12
|
-
DEFAULT_ACCESS_EXCLUSIVE_LOCK_TIMEOUT = 5_000
|
13
|
-
|
14
|
-
DEFAULT_ACCESS_EXCLUSIVE_STATEMENT_TIMEOUT_LIMIT =
|
15
|
-
DEFAULT_ACCESS_EXCLUSIVE_STATEMENT_TIMEOUT
|
16
|
-
DEFAULT_ACCESS_EXCLUSIVE_LOCK_TIMEOUT_LIMIT =
|
17
|
-
DEFAULT_ACCESS_EXCLUSIVE_LOCK_TIMEOUT
|
18
|
-
DEFAULT_LOCKFILE_DIRECTORY = File.join(Dir.pwd, "db")
|
19
|
-
DEFAULT_CONCURRENT_TIMEOUT_LIMIT = 3_600_000
|
20
9
|
DEFAULT_COMPILE_FILES = "all"
|
10
|
+
DEFAULT_LOCKFILE_DIRECTORY = File.join(Dir.pwd, "db")
|
11
|
+
|
21
12
|
# The rendering backend used to produce output. The only supported option
|
22
13
|
# at current is Nandi::Renderers::ActiveRecord, which produces ActiveRecord
|
23
14
|
# migrations.
|
24
15
|
# @return [Class]
|
25
16
|
attr_accessor :renderer
|
26
17
|
|
27
|
-
# The default lock timeout for migrations that take ACCESS EXCLUSIVE
|
28
|
-
# locks. Can be overridden by way of the `set_lock_timeout` class
|
29
|
-
# method in a given migration. Default: 1500ms.
|
30
|
-
# @return [Integer]
|
31
|
-
attr_accessor :access_exclusive_lock_timeout
|
32
|
-
|
33
|
-
# The default statement timeout for migrations that take ACCESS EXCLUSIVE
|
34
|
-
# locks. Can be overridden by way of the `set_statement_timeout` class
|
35
|
-
# method in a given migration. Default: 1500ms.
|
36
|
-
# @return [Integer]
|
37
|
-
attr_accessor :access_exclusive_statement_timeout
|
38
|
-
|
39
|
-
# The maximum lock timeout for migrations that take an ACCESS EXCLUSIVE
|
40
|
-
# lock and therefore block all reads and writes. Default: 5,000ms.
|
41
|
-
# @return [Integer]
|
42
|
-
attr_accessor :access_exclusive_statement_timeout_limit
|
43
|
-
|
44
|
-
# The maximum statement timeout for migrations that take an ACCESS
|
45
|
-
# EXCLUSIVE lock and therefore block all reads and writes. Default: 1500ms.
|
46
|
-
# @return [Integer]
|
47
|
-
attr_accessor :access_exclusive_lock_timeout_limit
|
48
|
-
|
49
|
-
# The minimum statement timeout for migrations that take place concurrently.
|
50
|
-
# Default: 3,600,000ms (ie, 3 hours).
|
51
|
-
# @return [Integer]
|
52
|
-
attr_accessor :concurrent_statement_timeout_limit
|
53
|
-
|
54
|
-
# The minimum lock timeout for migrations that take place concurrently.
|
55
|
-
# Default: 3,600,000ms (ie, 3 hours).
|
56
|
-
# @return [Integer]
|
57
|
-
attr_accessor :concurrent_lock_timeout_limit
|
58
|
-
|
59
|
-
# The directory for Nandi migrations. Default: `db/safe_migrations`
|
60
|
-
# @return [String]
|
61
|
-
attr_accessor :migration_directory
|
62
|
-
|
63
|
-
# The directory for output files. Default: `db/migrate`
|
64
|
-
# @return [String]
|
65
|
-
attr_accessor :output_directory
|
66
|
-
|
67
18
|
# The files to compile when the compile generator is run. Default: `all`
|
68
19
|
# May be one of the following:
|
69
20
|
# - 'all' compiles all files
|
@@ -72,7 +23,7 @@ module Nandi
|
|
72
23
|
# - a timestamp range , eg '>=20190101010101'
|
73
24
|
# @return [String]
|
74
25
|
attr_accessor :compile_files
|
75
|
-
|
26
|
+
|
76
27
|
# Directory where .nandilock.yml will be stored
|
77
28
|
# Defaults to project root
|
78
29
|
# @return [String]
|
@@ -83,18 +34,7 @@ module Nandi
|
|
83
34
|
|
84
35
|
def initialize(renderer: Renderers::ActiveRecord)
|
85
36
|
@renderer = renderer
|
86
|
-
@access_exclusive_statement_timeout = DEFAULT_ACCESS_EXCLUSIVE_STATEMENT_TIMEOUT
|
87
|
-
@concurrent_lock_timeout_limit =
|
88
|
-
@concurrent_statement_timeout_limit =
|
89
|
-
DEFAULT_CONCURRENT_TIMEOUT_LIMIT
|
90
37
|
@custom_methods = {}
|
91
|
-
@access_exclusive_lock_timeout =
|
92
|
-
DEFAULT_ACCESS_EXCLUSIVE_LOCK_TIMEOUT
|
93
|
-
@access_exclusive_statement_timeout =
|
94
|
-
DEFAULT_ACCESS_EXCLUSIVE_STATEMENT_TIMEOUT
|
95
|
-
@access_exclusive_statement_timeout_limit =
|
96
|
-
DEFAULT_ACCESS_EXCLUSIVE_STATEMENT_TIMEOUT_LIMIT
|
97
|
-
@access_exclusive_lock_timeout_limit = DEFAULT_ACCESS_EXCLUSIVE_LOCK_TIMEOUT_LIMIT
|
98
38
|
@compile_files = DEFAULT_COMPILE_FILES
|
99
39
|
@lockfile_directory = DEFAULT_LOCKFILE_DIRECTORY
|
100
40
|
end
|
@@ -119,6 +59,71 @@ module Nandi
|
|
119
59
|
custom_methods[name] = klass
|
120
60
|
end
|
121
61
|
|
62
|
+
# Register a database to compile migrations for.
|
63
|
+
def register_database(name, config = {})
|
64
|
+
multi_db_config.register(name, config)
|
65
|
+
end
|
66
|
+
|
67
|
+
def lockfile_path(database_name = nil)
|
68
|
+
File.join(lockfile_directory, databases.config(database_name).lockfile_name)
|
69
|
+
end
|
70
|
+
|
71
|
+
# Explicitly define getters for backwards compatibility when the database isnt specified.
|
72
|
+
# rubocop:disable Layout/LineLength
|
73
|
+
def migration_directory(database_name = nil) = config(database_name).migration_directory
|
74
|
+
def output_directory(database_name = nil) = config(database_name).output_directory
|
75
|
+
def access_exclusive_lock_timeout(database_name = nil) = config(database_name).access_exclusive_lock_timeout
|
76
|
+
def access_exclusive_lock_timeout_limit(database_name = nil) = config(database_name).access_exclusive_lock_timeout_limit
|
77
|
+
def access_exclusive_statement_timeout(database_name = nil) = config(database_name).access_exclusive_statement_timeout
|
78
|
+
def access_exclusive_statement_timeout_limit(database_name = nil) = config(database_name).access_exclusive_statement_timeout_limit
|
79
|
+
def concurrent_lock_timeout_limit(database_name = nil) = config(database_name).concurrent_lock_timeout_limit
|
80
|
+
def concurrent_statement_timeout_limit(database_name = nil) = config(database_name).concurrent_statement_timeout_limit
|
81
|
+
# rubocop:enable Layout/LineLength
|
82
|
+
|
83
|
+
# Delegate setter methods to the default database for backwards compatibility
|
84
|
+
delegate :migration_directory=,
|
85
|
+
:output_directory=,
|
86
|
+
:access_exclusive_lock_timeout=,
|
87
|
+
:access_exclusive_lock_timeout_limit=,
|
88
|
+
:access_exclusive_statement_timeout=,
|
89
|
+
:access_exclusive_statement_timeout_limit=,
|
90
|
+
:concurrent_lock_timeout_limit=,
|
91
|
+
:concurrent_statement_timeout_limit=,
|
92
|
+
to: :default
|
93
|
+
|
94
|
+
delegate :validate!, :default, :config, to: :databases
|
95
|
+
|
96
|
+
alias_method :database, :config
|
97
|
+
|
98
|
+
def databases
|
99
|
+
# If we've never registered any databases, use a single database with
|
100
|
+
# default values for backwards compatibility.
|
101
|
+
@multi_db_config.nil? ? single_db_config : @multi_db_config
|
102
|
+
end
|
103
|
+
|
104
|
+
def validate!
|
105
|
+
if @single_db_config && @multi_db_config
|
106
|
+
raise ArgumentError, "Cannot use multi and single database config. Config setters are now deprecated, " \
|
107
|
+
"use only `register_database(name, config)` to configure Nandi."
|
108
|
+
end
|
109
|
+
databases.validate!
|
110
|
+
end
|
111
|
+
|
112
|
+
private
|
113
|
+
|
114
|
+
def single_db_config
|
115
|
+
# Pre-register the default database to ensure behavior is backwards compatible.
|
116
|
+
@single_db_config ||= begin
|
117
|
+
single_db_config = MultiDatabase.new
|
118
|
+
single_db_config.register(:primary, {})
|
119
|
+
single_db_config
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
def multi_db_config
|
124
|
+
@multi_db_config ||= MultiDatabase.new
|
125
|
+
end
|
126
|
+
|
122
127
|
def lockfile_directory
|
123
128
|
@lockfile_directory ||= Pathname.new(@lockfile_directory)
|
124
129
|
end
|
data/lib/nandi/lockfile.rb
CHANGED
@@ -5,74 +5,94 @@ require "digest"
|
|
5
5
|
|
6
6
|
module Nandi
|
7
7
|
class Lockfile
|
8
|
+
attr_reader :db_name
|
9
|
+
|
8
10
|
class << self
|
9
|
-
|
10
|
-
|
11
|
+
# Registry pattern using class variables to maintain singleton instances
|
12
|
+
# per database. This ensures that lockfile operations for the same database
|
13
|
+
# always work with the same instance, maintaining consistency.
|
14
|
+
def for(db_name)
|
15
|
+
@instances ||= {}
|
16
|
+
# Handle nil by using :primary as default
|
17
|
+
key = db_name.nil? ? :primary : db_name.to_sym
|
18
|
+
@instances[key] ||= new(key)
|
11
19
|
end
|
12
20
|
|
13
|
-
def
|
14
|
-
|
15
|
-
|
16
|
-
File.write(path, {}.to_yaml)
|
21
|
+
def clear_instances!
|
22
|
+
@instances = {}
|
17
23
|
end
|
18
24
|
|
19
|
-
|
20
|
-
|
25
|
+
private_class_method :new
|
26
|
+
end
|
21
27
|
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
}
|
26
|
-
end
|
28
|
+
def initialize(db_name = nil)
|
29
|
+
@db_name = db_name || Nandi.config.default.name
|
30
|
+
end
|
27
31
|
|
28
|
-
|
29
|
-
|
32
|
+
def file_present?
|
33
|
+
File.exist?(path)
|
34
|
+
end
|
30
35
|
|
31
|
-
|
32
|
-
|
33
|
-
compiled_digest: lockfile.dig(file_name, :compiled_digest),
|
34
|
-
}
|
35
|
-
end
|
36
|
+
def create!
|
37
|
+
return if file_present?
|
36
38
|
|
37
|
-
|
38
|
-
|
39
|
+
File.write(path, {}.to_yaml)
|
40
|
+
end
|
39
41
|
|
40
|
-
|
42
|
+
def add(file_name:, source_digest:, compiled_digest:)
|
43
|
+
load!
|
41
44
|
|
42
|
-
|
43
|
-
|
45
|
+
@lockfile[file_name] = {
|
46
|
+
source_digest: source_digest,
|
47
|
+
compiled_digest: compiled_digest,
|
48
|
+
}
|
49
|
+
end
|
44
50
|
|
45
|
-
|
46
|
-
|
47
|
-
#
|
48
|
-
# Normally, new migrations are added to the bottom of the Nandi lockfile.
|
49
|
-
# This is relatively unfriendly to git's merge algorithm, and means that
|
50
|
-
# if someone merges a pull request with a completely unrelated migration,
|
51
|
-
# you'll have to rebase to get yours merged as the last line of the file
|
52
|
-
# will be seen as a conflict (both branches added content there).
|
53
|
-
#
|
54
|
-
# This is in contrast to something like Gemfile.lock, where changes tend
|
55
|
-
# to be distributed throughout the file. The idea behind sorting by
|
56
|
-
# SHA-256 hash is to distribute new Nandi lockfile entries evenly, but
|
57
|
-
# also stably through the file. It needs to be stable or we'd have even
|
58
|
-
# worse merge conflict problems (e.g. if we randomised the order on
|
59
|
-
# writing the file, the whole thing would conflict pretty much every time
|
60
|
-
# it was regenerated).
|
61
|
-
content = lockfile.to_h.deep_stringify_keys.sort_by do |k, _|
|
62
|
-
Digest::SHA256.hexdigest(k)
|
63
|
-
end.to_h.to_yaml
|
64
|
-
|
65
|
-
File.write(path, content)
|
66
|
-
end
|
51
|
+
def get(file_name)
|
52
|
+
load!
|
67
53
|
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
54
|
+
{
|
55
|
+
source_digest: @lockfile.dig(file_name, :source_digest),
|
56
|
+
compiled_digest: @lockfile.dig(file_name, :compiled_digest),
|
57
|
+
}
|
58
|
+
end
|
59
|
+
|
60
|
+
def load!
|
61
|
+
return @lockfile if @lockfile
|
62
|
+
|
63
|
+
create! unless file_present?
|
64
|
+
|
65
|
+
@lockfile = YAML.safe_load_file(path).with_indifferent_access
|
66
|
+
end
|
67
|
+
|
68
|
+
def persist!
|
69
|
+
load!
|
70
|
+
# This is a somewhat ridiculous trick to avoid merge conflicts in git.
|
71
|
+
#
|
72
|
+
# Normally, new migrations are added to the bottom of the Nandi lockfile.
|
73
|
+
# This is relatively unfriendly to git's merge algorithm, and means that
|
74
|
+
# if someone merges a pull request with a completely unrelated migration,
|
75
|
+
# you'll have to rebase to get yours merged as the last line of the file
|
76
|
+
# will be seen as a conflict (both branches added content there).
|
77
|
+
#
|
78
|
+
# This is in contrast to something like Gemfile.lock, where changes tend
|
79
|
+
# to be distributed throughout the file. The idea behind sorting by
|
80
|
+
# SHA-256 hash is to distribute new Nandi lockfile entries evenly, but
|
81
|
+
# also stably through the file. It needs to be stable or we'd have even
|
82
|
+
# worse merge conflict problems (e.g. if we randomised the order on
|
83
|
+
# writing the file, the whole thing would conflict pretty much every time
|
84
|
+
# it was regenerated).
|
85
|
+
content = @lockfile.to_h.deep_stringify_keys.sort_by do |k, _|
|
86
|
+
Digest::SHA256.hexdigest(k)
|
87
|
+
end.to_h.to_yaml
|
88
|
+
|
89
|
+
File.write(path, content)
|
90
|
+
end
|
91
|
+
|
92
|
+
private
|
74
93
|
|
75
|
-
|
94
|
+
def path
|
95
|
+
Nandi.config.lockfile_path(@db_name)
|
76
96
|
end
|
77
97
|
end
|
78
98
|
end
|
@@ -0,0 +1,110 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Nandi
|
4
|
+
class MigrationViolations
|
5
|
+
def initialize
|
6
|
+
@ungenerated_files = []
|
7
|
+
@handwritten_files = []
|
8
|
+
@out_of_date_files = []
|
9
|
+
@hand_edited_files = []
|
10
|
+
end
|
11
|
+
|
12
|
+
def add_ungenerated(missing_files, directory)
|
13
|
+
return if missing_files.empty?
|
14
|
+
|
15
|
+
full_paths = build_full_paths(missing_files, directory)
|
16
|
+
@ungenerated_files.concat(full_paths)
|
17
|
+
end
|
18
|
+
|
19
|
+
def add_handwritten(handwritten_files, directory)
|
20
|
+
return if handwritten_files.empty?
|
21
|
+
|
22
|
+
full_paths = build_full_paths(handwritten_files, directory)
|
23
|
+
@handwritten_files.concat(full_paths)
|
24
|
+
end
|
25
|
+
|
26
|
+
def add_out_of_date(changed_files, directory)
|
27
|
+
return if changed_files.empty?
|
28
|
+
|
29
|
+
full_paths = build_full_paths(changed_files, directory)
|
30
|
+
@out_of_date_files.concat(full_paths)
|
31
|
+
end
|
32
|
+
|
33
|
+
def add_hand_edited(altered_files, directory)
|
34
|
+
return if altered_files.empty?
|
35
|
+
|
36
|
+
full_paths = build_full_paths(altered_files, directory)
|
37
|
+
@hand_edited_files.concat(full_paths)
|
38
|
+
end
|
39
|
+
|
40
|
+
def any?
|
41
|
+
[@ungenerated_files, @handwritten_files, @out_of_date_files, @hand_edited_files].any?(&:any?)
|
42
|
+
end
|
43
|
+
|
44
|
+
def to_error_message
|
45
|
+
error_messages = []
|
46
|
+
|
47
|
+
error_messages << ungenerated_error if @ungenerated_files.any?
|
48
|
+
error_messages << handwritten_error if @handwritten_files.any?
|
49
|
+
error_messages << out_of_date_error if @out_of_date_files.any?
|
50
|
+
error_messages << hand_edited_error if @hand_edited_files.any?
|
51
|
+
|
52
|
+
error_messages.join("\n\n")
|
53
|
+
end
|
54
|
+
|
55
|
+
private
|
56
|
+
|
57
|
+
def build_full_paths(filenames, directory)
|
58
|
+
filenames.map { |filename| File.join(directory, filename) }
|
59
|
+
end
|
60
|
+
|
61
|
+
def format_file_list(files)
|
62
|
+
" - #{files.sort.join("\n - ")}"
|
63
|
+
end
|
64
|
+
|
65
|
+
def ungenerated_error
|
66
|
+
<<~ERROR.strip
|
67
|
+
The following migrations are pending generation:
|
68
|
+
|
69
|
+
#{format_file_list(@ungenerated_files)}
|
70
|
+
|
71
|
+
Please run `rails generate nandi:compile` to generate your migrations.
|
72
|
+
ERROR
|
73
|
+
end
|
74
|
+
|
75
|
+
def handwritten_error
|
76
|
+
<<~ERROR.strip
|
77
|
+
The following migrations have been written by hand, not generated:
|
78
|
+
|
79
|
+
#{format_file_list(@handwritten_files)}
|
80
|
+
|
81
|
+
Please use Nandi to generate your migrations. In exeptional cases, hand-written
|
82
|
+
ActiveRecord migrations can be added to the .nandiignore file. Doing so will
|
83
|
+
require additional review that will slow your PR down.
|
84
|
+
ERROR
|
85
|
+
end
|
86
|
+
|
87
|
+
def out_of_date_error
|
88
|
+
<<~ERROR.strip
|
89
|
+
The following migrations have changed but not been recompiled:
|
90
|
+
|
91
|
+
#{format_file_list(@out_of_date_files)}
|
92
|
+
|
93
|
+
Please recompile your migrations to make sure that the changes you expect are
|
94
|
+
applied.
|
95
|
+
ERROR
|
96
|
+
end
|
97
|
+
|
98
|
+
def hand_edited_error
|
99
|
+
<<~ERROR.strip
|
100
|
+
The following migrations have had their generated content altered:
|
101
|
+
|
102
|
+
#{format_file_list(@hand_edited_files)}
|
103
|
+
|
104
|
+
Please don't hand-edit generated migrations. If you want to write a regular
|
105
|
+
ActiveRecord::Migration, please do so and add it to .nandiignore. Note that
|
106
|
+
this will require additional review that will slow your PR down.
|
107
|
+
ERROR
|
108
|
+
end
|
109
|
+
end
|
110
|
+
end
|
@@ -0,0 +1,178 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Nandi
|
4
|
+
class MultiDatabase
|
5
|
+
class Database
|
6
|
+
# Most DDL changes take a very strict lock, but execute very quickly. For these
|
7
|
+
# the statement timeout should be very tight, so that if there's an unexpected
|
8
|
+
# delay the query queue does not back up.
|
9
|
+
DEFAULT_ACCESS_EXCLUSIVE_STATEMENT_TIMEOUT = 1_500
|
10
|
+
DEFAULT_ACCESS_EXCLUSIVE_LOCK_TIMEOUT = 5_000
|
11
|
+
|
12
|
+
DEFAULT_ACCESS_EXCLUSIVE_STATEMENT_TIMEOUT_LIMIT =
|
13
|
+
DEFAULT_ACCESS_EXCLUSIVE_STATEMENT_TIMEOUT
|
14
|
+
DEFAULT_ACCESS_EXCLUSIVE_LOCK_TIMEOUT_LIMIT =
|
15
|
+
DEFAULT_ACCESS_EXCLUSIVE_LOCK_TIMEOUT
|
16
|
+
DEFAULT_CONCURRENT_TIMEOUT_LIMIT = 3_600_000
|
17
|
+
|
18
|
+
DEFAULT_MIGRATION_DIRECTORY = "db/safe_migrations"
|
19
|
+
DEFAULT_OUTPUT_DIRECTORY = "db/migrate"
|
20
|
+
|
21
|
+
# The default lock timeout for migrations that take ACCESS EXCLUSIVE
|
22
|
+
# locks. Can be overridden by way of the `set_lock_timeout` class
|
23
|
+
# method in a given migration. Default: 1500ms.
|
24
|
+
# @return [Integer]
|
25
|
+
attr_accessor :access_exclusive_lock_timeout
|
26
|
+
|
27
|
+
# The default statement timeout for migrations that take ACCESS EXCLUSIVE
|
28
|
+
# locks. Can be overridden by way of the `set_statement_timeout` class
|
29
|
+
# method in a given migration. Default: 1500ms.
|
30
|
+
# @return [Integer]
|
31
|
+
attr_accessor :access_exclusive_statement_timeout
|
32
|
+
|
33
|
+
# The maximum lock timeout for migrations that take an ACCESS EXCLUSIVE
|
34
|
+
# lock and therefore block all reads and writes. Default: 5,000ms.
|
35
|
+
# @return [Integer]
|
36
|
+
attr_accessor :access_exclusive_statement_timeout_limit
|
37
|
+
|
38
|
+
# The maximum statement timeout for migrations that take an ACCESS
|
39
|
+
# EXCLUSIVE lock and therefore block all reads and writes. Default: 1500ms.
|
40
|
+
# @return [Integer]
|
41
|
+
attr_accessor :access_exclusive_lock_timeout_limit
|
42
|
+
|
43
|
+
# The minimum statement timeout for migrations that take place concurrently.
|
44
|
+
# Default: 3,600,000ms (ie, 3 hours).
|
45
|
+
# @return [Integer]
|
46
|
+
attr_accessor :concurrent_statement_timeout_limit
|
47
|
+
|
48
|
+
# The minimum lock timeout for migrations that take place concurrently.
|
49
|
+
# Default: 3,600,000ms (ie, 3 hours).
|
50
|
+
# @return [Integer]
|
51
|
+
attr_accessor :concurrent_lock_timeout_limit
|
52
|
+
|
53
|
+
# The directory for output files. Default: `db/migrate`
|
54
|
+
# @return [String]
|
55
|
+
attr_accessor :output_directory
|
56
|
+
|
57
|
+
attr_reader :name, :default
|
58
|
+
|
59
|
+
attr_accessor :migration_directory,
|
60
|
+
:lockfile_name
|
61
|
+
|
62
|
+
def initialize(name:, config:)
|
63
|
+
@name = name
|
64
|
+
@default = @name == :primary || config[:default] == true
|
65
|
+
|
66
|
+
# Paths and files
|
67
|
+
@migration_directory = config[:migration_directory] || "db/#{path_prefix(name, default)}safe_migrations"
|
68
|
+
@output_directory = config[:output_directory] || "db/#{path_prefix(name, default)}migrate"
|
69
|
+
@lockfile_name = config[:lockfile_name] || ".#{path_prefix(name, default)}nandilock.yml"
|
70
|
+
|
71
|
+
timeout_limits(config)
|
72
|
+
end
|
73
|
+
|
74
|
+
private
|
75
|
+
|
76
|
+
def timeout_limits(config)
|
77
|
+
@access_exclusive_lock_timeout =
|
78
|
+
config[:access_exclusive_lock_timeout] || DEFAULT_ACCESS_EXCLUSIVE_LOCK_TIMEOUT
|
79
|
+
@access_exclusive_statement_timeout =
|
80
|
+
config[:access_exclusive_statement_timeout] || DEFAULT_ACCESS_EXCLUSIVE_STATEMENT_TIMEOUT
|
81
|
+
@access_exclusive_lock_timeout_limit =
|
82
|
+
config[:access_exclusive_lock_timeout_limit] || DEFAULT_ACCESS_EXCLUSIVE_LOCK_TIMEOUT_LIMIT
|
83
|
+
@access_exclusive_statement_timeout_limit =
|
84
|
+
config[:access_exclusive_statement_timeout_limit] || DEFAULT_ACCESS_EXCLUSIVE_STATEMENT_TIMEOUT_LIMIT
|
85
|
+
@concurrent_lock_timeout_limit =
|
86
|
+
config[:concurrent_lock_timeout_limit] || DEFAULT_CONCURRENT_TIMEOUT_LIMIT
|
87
|
+
@concurrent_statement_timeout_limit =
|
88
|
+
config[:concurrent_statement_timeout_limit] || DEFAULT_CONCURRENT_TIMEOUT_LIMIT
|
89
|
+
end
|
90
|
+
|
91
|
+
def path_prefix(name, default)
|
92
|
+
default ? "" : "#{name}_"
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
def initialize
|
97
|
+
@databases = {}
|
98
|
+
end
|
99
|
+
|
100
|
+
def config(name = nil)
|
101
|
+
# If name isnt specified, return config for the default database. This mimics behavior
|
102
|
+
# of the rails migration commands.
|
103
|
+
return default if name.nil?
|
104
|
+
|
105
|
+
name = name.to_sym
|
106
|
+
db_config = @databases[name]
|
107
|
+
raise ArgumentError, "Missing database configuration for #{name}" if db_config.nil?
|
108
|
+
|
109
|
+
db_config
|
110
|
+
end
|
111
|
+
|
112
|
+
def default
|
113
|
+
@databases.values.find(&:default)
|
114
|
+
end
|
115
|
+
|
116
|
+
def register(name, config)
|
117
|
+
name = name.to_sym
|
118
|
+
raise ArgumentError, "Database #{name} already registered" if @databases.key?(name)
|
119
|
+
|
120
|
+
@databases[name] = Database.new(name: name, config: config)
|
121
|
+
end
|
122
|
+
|
123
|
+
def names
|
124
|
+
@databases.keys
|
125
|
+
end
|
126
|
+
|
127
|
+
def validate!
|
128
|
+
enforce_default_db_for_multi_database!
|
129
|
+
enforce_names_for_multi_database!
|
130
|
+
validate_unique_migration_directories!
|
131
|
+
validate_unique_output_directories!
|
132
|
+
end
|
133
|
+
|
134
|
+
delegate :each, :map, to: :@databases
|
135
|
+
|
136
|
+
private
|
137
|
+
|
138
|
+
def enforce_default_db_for_multi_database!
|
139
|
+
# If there is a `primary` database, we take that as the default database
|
140
|
+
# following rails behavior. If not, we will validate that there is one specified
|
141
|
+
# default database using the `default: true` option.
|
142
|
+
if @databases.values.none?(&:default)
|
143
|
+
raise ArgumentError, "Missing default database. Specify a default database using the `default: true` option " \
|
144
|
+
"or by registering `primary` as a database name."
|
145
|
+
end
|
146
|
+
if @databases.values.count(&:default) > 1
|
147
|
+
raise ArgumentError, "Multiple default databases specified: " \
|
148
|
+
"#{@databases.values.select(&:default).map(&:name).join(', ')}"
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
def validate_unique_migration_directories!
|
153
|
+
paths = @databases.values.map(&:migration_directory).uniq.filter(&:present?)
|
154
|
+
if paths.length != @databases.values.length
|
155
|
+
raise ArgumentError,
|
156
|
+
"Unique migration directories must be specified for each database"
|
157
|
+
end
|
158
|
+
end
|
159
|
+
|
160
|
+
def validate_unique_output_directories!
|
161
|
+
paths = @databases.values.map(&:output_directory).uniq.filter(&:present?)
|
162
|
+
if paths.length != @databases.values.length
|
163
|
+
raise ArgumentError,
|
164
|
+
"Unique output directories must be specified for each database"
|
165
|
+
end
|
166
|
+
end
|
167
|
+
|
168
|
+
def enforce_names_for_multi_database!
|
169
|
+
# If we're in multi-db mode, enforce that all databases have a name
|
170
|
+
return if @databases.count <= 1
|
171
|
+
|
172
|
+
unknown_names = @databases.keys.select(&:nil?)
|
173
|
+
if unknown_names.any?
|
174
|
+
raise ArgumentError, "Databases must have a name in multi-db mode"
|
175
|
+
end
|
176
|
+
end
|
177
|
+
end
|
178
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Nandi
|
4
|
+
module MultiDbGenerator
|
5
|
+
def self.included(base)
|
6
|
+
base.class_option :database,
|
7
|
+
default: nil,
|
8
|
+
type: :string,
|
9
|
+
desc: "Database to migrate in multi-database mode. " \
|
10
|
+
"If not specified, uses specified default or primary database"
|
11
|
+
end
|
12
|
+
|
13
|
+
private
|
14
|
+
|
15
|
+
def db_name
|
16
|
+
options["database"]&.to_sym
|
17
|
+
end
|
18
|
+
|
19
|
+
def base_path
|
20
|
+
Nandi.config.migration_directory(db_name)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|