nando 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.env +2 -0
- data/.gitignore +19 -0
- data/.rspec +3 -0
- data/.travis.yml +7 -0
- data/Gemfile +6 -0
- data/Gemfile.lock +47 -0
- data/LICENSE +201 -0
- data/README.md +49 -0
- data/Rakefile +6 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/exe/nando +83 -0
- data/lib/nando/baseline_templates/migration.rb +9 -0
- data/lib/nando/errors.rb +13 -0
- data/lib/nando/generator.rb +86 -0
- data/lib/nando/interface.rb +87 -0
- data/lib/nando/logger.rb +30 -0
- data/lib/nando/migration.rb +347 -0
- data/lib/nando/migrator.rb +369 -0
- data/lib/nando/parser.rb +68 -0
- data/lib/nando/parser_templates/migration.rb +13 -0
- data/lib/nando/schema_diff.rb +805 -0
- data/lib/nando/templates/migration.rb +9 -0
- data/lib/nando/templates/migration_without_transaction.rb +9 -0
- data/lib/nando/updater.rb +372 -0
- data/lib/nando/utils.rb +22 -0
- data/lib/nando/version.rb +3 -0
- data/lib/nando.rb +12 -0
- data/nando.gemspec +44 -0
- data/notes.txt +128 -0
- metadata +200 -0
data/lib/nando/errors.rb
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
require 'erb'
|
|
2
|
+
|
|
3
|
+
module MigrationGenerator
|
|
4
|
+
|
|
5
|
+
# creates the actual migration file
|
|
6
|
+
def self.create_migration_file (filepath, migration_name, migration_type)
|
|
7
|
+
dir = File.dirname(filepath)
|
|
8
|
+
|
|
9
|
+
if !File.directory?(dir)
|
|
10
|
+
raise Nando::GenericError.new("No directory '#{dir}' was found")
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
case migration_type
|
|
14
|
+
when Nando::Migration.name.demodulize
|
|
15
|
+
template_file_name = 'migration'
|
|
16
|
+
when Nando::MigrationWithoutTransaction.name.demodulize
|
|
17
|
+
template_file_name = 'migration_without_transaction'
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
migration_class_name = migration_name.camelize()
|
|
21
|
+
file = File.new(filepath, 'w')
|
|
22
|
+
# TODO: check if binding logic is correct, and if pathing changes when it's a gem
|
|
23
|
+
render_to_file(File.join(File.dirname(File.expand_path(__FILE__)), "templates/#{template_file_name}.rb"), binding, file)
|
|
24
|
+
|
|
25
|
+
puts "Creating a new migration: #{filepath}"
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
# based on the template renderer from the commercial engine
|
|
29
|
+
def self.render_to_file (template_file, context, output_file)
|
|
30
|
+
output_file.write render(template_file, context)
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def self.render(template_file, context)
|
|
34
|
+
renderer = ERB.new(File.read(template_file), nil, nil)
|
|
35
|
+
renderer.result(context)
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def self.create_baseline_file (filepath, migration_name)
|
|
39
|
+
dir = File.dirname(filepath)
|
|
40
|
+
|
|
41
|
+
if !File.directory?(dir)
|
|
42
|
+
raise Nando::GenericError.new("No directory '#{dir}' was found")
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
@db_connection = NandoMigrator.instance.get_database_connection();
|
|
46
|
+
results = @db_connection.exec("
|
|
47
|
+
SELECT n.nspname AS function_schema,
|
|
48
|
+
p.proname AS function_name,
|
|
49
|
+
l.lanname AS function_language,
|
|
50
|
+
CASE WHEN l.lanname = 'internal' THEN p.prosrc ELSE pg_get_functiondef(p.oid) END AS definition,
|
|
51
|
+
pg_get_function_arguments(p.oid) AS function_arguments,
|
|
52
|
+
t.typname AS return_type,
|
|
53
|
+
p.proowner AS p_owner
|
|
54
|
+
FROM pg_proc p
|
|
55
|
+
LEFT JOIN pg_namespace n ON p.pronamespace = n.oid
|
|
56
|
+
LEFT JOIN pg_language l ON p.prolang = l.oid
|
|
57
|
+
LEFT JOIN pg_type t ON t.oid = p.prorettype
|
|
58
|
+
WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
|
|
59
|
+
ORDER BY function_schema, function_name
|
|
60
|
+
")
|
|
61
|
+
|
|
62
|
+
up_method = ''
|
|
63
|
+
number_of_functions = 0
|
|
64
|
+
indent = ' '
|
|
65
|
+
|
|
66
|
+
for row in results do
|
|
67
|
+
up_method += "\n" + indent + "update_function <<-'SQL'\n"
|
|
68
|
+
up_method += "#{row['definition']}"
|
|
69
|
+
up_method += "\n" + indent + "SQL\n"
|
|
70
|
+
number_of_functions += 1
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
new_file = File.new(filepath, 'w')
|
|
74
|
+
|
|
75
|
+
# binding
|
|
76
|
+
migration_class_name = migration_name.camelize
|
|
77
|
+
migration_type = Nando::Migration.name.demodulize # TODO: atm all baseline files are create as migrations with transactions, this might change later
|
|
78
|
+
migration_up_code = up_method
|
|
79
|
+
migration_down_code = indent + "# #{number_of_functions} functions have been added to this baseline"
|
|
80
|
+
|
|
81
|
+
render_to_file(File.join(File.dirname(File.expand_path(__FILE__)), 'baseline_templates/migration.rb'), binding, new_file)
|
|
82
|
+
|
|
83
|
+
puts "Creating a new baseline: #{filepath}"
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
end
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
module NandoInterface
|
|
2
|
+
|
|
3
|
+
# prints help message
|
|
4
|
+
def self.print_help_message
|
|
5
|
+
schema_mig_table = "schema_migrations" # left as a variable, might fill this value dynamically later
|
|
6
|
+
|
|
7
|
+
message = ''
|
|
8
|
+
message += "Usage: nando <command> [options]\n\n"
|
|
9
|
+
|
|
10
|
+
# commands
|
|
11
|
+
commands = [
|
|
12
|
+
['up', "Executes all the migrations that are not yet on the #{schema_mig_table.white.bold} table"],
|
|
13
|
+
['down', "Rollbacks the last migration in the #{schema_mig_table.white.bold} table"],
|
|
14
|
+
['apply <version>', "Applies the migration with the specified version (even if it was already executed). Intended for development, not production"],
|
|
15
|
+
['new <migration name>', "Creates a new migration with the specified name. Use the #{"-t/--type".white.bold} flag to specify the type of migration: #{"[Migration|MigrationWithoutTransaction]".white.bold}"],
|
|
16
|
+
['parse <source> <destination>', "Parses all the #{"dbmate".white.bold} migrations in the source folder into #{"Nando".white.bold} migrations in the destination folder"],
|
|
17
|
+
['baseline', "Creates a baseline Nando migration with all the functions currently in the database"],
|
|
18
|
+
['update <path to migration>', "Updates the specified migration. Use #{"-f/--function".white.bold} to add one or more functions to the migration file"],
|
|
19
|
+
['diff <source> <target>', "Compares 2 schemas in the database and suggests SQL commands to fix any changes found"]
|
|
20
|
+
]
|
|
21
|
+
|
|
22
|
+
largest_command = commands.max { |a,b| a[0].length <=> b[0].length }
|
|
23
|
+
required_indent = "nando #{largest_command[0]}".length
|
|
24
|
+
|
|
25
|
+
message += "Commands:\n"
|
|
26
|
+
for command in commands do
|
|
27
|
+
message += build_command_message(command[0], command[1], required_indent)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
# flags
|
|
31
|
+
flags = [
|
|
32
|
+
['-t/--type', 'Used to specify the migration type'],
|
|
33
|
+
['-f/--function', 'Used to specify which function files to add to a migration'],
|
|
34
|
+
['-h/--help', 'Shows the help message']
|
|
35
|
+
]
|
|
36
|
+
|
|
37
|
+
largest_flag = flags.max { |a,b| a[0].length <=> b[0].length }
|
|
38
|
+
required_indent = largest_flag[0].length
|
|
39
|
+
|
|
40
|
+
message += "\nFlags:\n"
|
|
41
|
+
for flag in flags do
|
|
42
|
+
message += build_flag_message(flag[0], flag[1], required_indent)
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
message += "Nando Version (#{Nando::VERSION})"
|
|
46
|
+
|
|
47
|
+
puts message
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
def self.build_command_message (command, description, required_indent)
|
|
51
|
+
command_message = "nando #{command}"
|
|
52
|
+
indent = " " * (required_indent - command_message.length)
|
|
53
|
+
return "#{command_message.white.bold}#{indent} #{description}\n"
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
def self.build_flag_message (flag, description, required_indent)
|
|
57
|
+
indent = " " * (required_indent - flag.length)
|
|
58
|
+
return "#{flag.white.bold}#{indent} #{description}\n"
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def self.get_user_function_list
|
|
63
|
+
puts 'Enter the list of functions to add to the current migration: (Enter an empty line to exit)'.bold.magenta
|
|
64
|
+
input = multi_line_gets
|
|
65
|
+
return (input.split("\n").each { |line| line.strip! }.reject { |line| line == '' }) || []
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def self.multi_line_gets (all_text = '')
|
|
69
|
+
until (text = gets) == "\n"
|
|
70
|
+
all_text << text
|
|
71
|
+
end
|
|
72
|
+
return all_text.chomp
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
# if input is Y/y return true, else return false
|
|
76
|
+
def self.get_user_input_boolean (message)
|
|
77
|
+
puts "\n#{message} (Y/N)".magenta.bold
|
|
78
|
+
# TODO: review error when not using $stdin (might need to use it above as well)
|
|
79
|
+
input = $stdin.gets.chomp.downcase.strip
|
|
80
|
+
if input == 'y'
|
|
81
|
+
return true
|
|
82
|
+
else
|
|
83
|
+
return false
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
end
|
data/lib/nando/logger.rb
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# logger functions
|
|
2
|
+
require 'colorize'
|
|
3
|
+
|
|
4
|
+
def _warn (message, header = 'WARNING')
|
|
5
|
+
print "#{header}: ".yellow.bold
|
|
6
|
+
puts message
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
def _success (message, header = 'SUCCESS')
|
|
10
|
+
print "#{header}: ".green.bold
|
|
11
|
+
puts message
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def _error (message, header = 'ERROR')
|
|
15
|
+
print "#{header}: ".red.bold
|
|
16
|
+
puts message
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def _debug (message, header = 'DEBUG')
|
|
20
|
+
if ENV['DEBUG'] != 'true'
|
|
21
|
+
return
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
print "#{header}: ".light_cyan.bold
|
|
25
|
+
puts message
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def _info (message)
|
|
29
|
+
puts message
|
|
30
|
+
end
|
|
@@ -0,0 +1,347 @@
|
|
|
1
|
+
module Nando
|
|
2
|
+
|
|
3
|
+
class Migration
|
|
4
|
+
def initialize (conn, version)
|
|
5
|
+
@conn = conn
|
|
6
|
+
@version = version
|
|
7
|
+
@migration_table = NandoMigrator.instance.migration_table
|
|
8
|
+
@migration_field = NandoMigrator.instance.migration_field
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
def execute (sql)
|
|
12
|
+
@conn.exec(sql)
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def update_function (sql)
|
|
16
|
+
# TODO: add validations here
|
|
17
|
+
@conn.exec(sql)
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def execute_migration (method)
|
|
21
|
+
# TODO: review this is the best way of creating a transaction (don't know if re-assigning connections has weird behaviours)
|
|
22
|
+
old_connection = @conn
|
|
23
|
+
@conn.transaction do |conn|
|
|
24
|
+
@conn = conn
|
|
25
|
+
self.send(method)
|
|
26
|
+
end
|
|
27
|
+
@conn = old_connection
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def invalidate_postgresql_redis_cache!
|
|
31
|
+
# TODO: how to do this?
|
|
32
|
+
Rake::Task['cloudware:toconline:system:redis:postgresql_cache_invalidate'].invoke
|
|
33
|
+
Rake::Task['cloudware:toconline:system:redis:redis_current_company_invalidate'].invoke
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
class MigrationWithoutTransaction < Migration
|
|
38
|
+
def initialize (conn, version)
|
|
39
|
+
super(conn, version)
|
|
40
|
+
@conn.exec('DROP FUNCTION IF EXISTS sharding.create_company_shard(integer,text)')
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def execute_migration (method)
|
|
44
|
+
self.send(method)
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# custom CW methods
|
|
49
|
+
|
|
50
|
+
###########################################################
|
|
51
|
+
# WORK OVER SHARDED COMPANIES, AND ALREADY SHARDED MODULES
|
|
52
|
+
###########################################################
|
|
53
|
+
def migrate_companies (module_name = nil, options = {}, &block)
|
|
54
|
+
return if block.nil?
|
|
55
|
+
|
|
56
|
+
if module_name.nil?
|
|
57
|
+
work_on_schemas(get_sharded_company_schemas, 'global company', :up, options, &block)
|
|
58
|
+
# TODO: splited a "say_with_time", might need to review this
|
|
59
|
+
puts "[PUBLIC] Running migration on unsharded companies"
|
|
60
|
+
block.call('public', nil, nil, 'tablespace_000')
|
|
61
|
+
elsif module_name.to_sym == :all
|
|
62
|
+
work_on_schemas(get_companies_schemas, 'global company', :up, options, &block)
|
|
63
|
+
else
|
|
64
|
+
work_on_schemas(get_companies_schemas_from_module(module_name), 'global company', :up, options, &block)
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def rollback_companies (module_name = nil, options = {}, &block)
|
|
69
|
+
if module_name.nil?
|
|
70
|
+
work_on_schemas(get_sharded_company_schemas, 'global company', :down, options, &block)
|
|
71
|
+
# TODO: splited a "say_with_time", might need to review this
|
|
72
|
+
puts "[PUBLIC] Rolling back migration on unsharded companies"
|
|
73
|
+
block.call('public', nil, nil, 'tablespace_000')
|
|
74
|
+
elsif module_name.to_sym == :all
|
|
75
|
+
work_on_schemas(get_companies_schemas, 'global company', :down, options, &block)
|
|
76
|
+
else
|
|
77
|
+
work_on_schemas(get_companies_schemas_from_module(module_name), 'global company', :down, options, &block)
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
#################################
|
|
83
|
+
# WORK OVER ACCOUNTING COMPANIES
|
|
84
|
+
#################################
|
|
85
|
+
def migrate_accounting_companies (options = {}, &block)
|
|
86
|
+
return if block.nil?
|
|
87
|
+
|
|
88
|
+
work_on_schemas(get_accounting_companies_schemas, 'accounting company', :up, options, &block)
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
def rollback_accounting_companies(options = {}, &block)
|
|
92
|
+
work_on_schemas(get_accounting_companies_schemas, 'accounting company', :down, options, &block)
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
def migrate_fiscal_years (options = {}, &block)
|
|
96
|
+
return if block.nil?
|
|
97
|
+
work_on_schemas get_accounting_companies_schemas, 'accounting company', :up, options do |schema, company_id, use_sharded_company, tablespace_name, company_schema|
|
|
98
|
+
each_fiscal_year(schema) do |fiscal_year|
|
|
99
|
+
block.call schema, fiscal_year['table_prefix'], fiscal_year, company_id, tablespace_name, company_schema, use_sharded_company
|
|
100
|
+
end
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
def rollback_fiscal_years (options = {}, &block)
|
|
105
|
+
work_on_schemas get_accounting_companies_schemas, 'accounting company', :down, options do |schema, company_id, use_sharded_company, tablespace_name, company_schema|
|
|
106
|
+
each_fiscal_year(schema) do |fiscal_year|
|
|
107
|
+
block.call schema, fiscal_year['table_prefix'], fiscal_year, company_id, tablespace_name, company_schema, use_sharded_company
|
|
108
|
+
end
|
|
109
|
+
end
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
def each_fiscal_year (schema, &block)
|
|
113
|
+
return if block.nil?
|
|
114
|
+
@conn.exec(%Q[SELECT * FROM "#{schema}"."fiscal_years"]).to_a.each(&block)
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
###########################
|
|
119
|
+
# WORK OVER USER TEMPLATES
|
|
120
|
+
###########################
|
|
121
|
+
def migrate_user_schemas (options = {}, &block)
|
|
122
|
+
return if block.nil?
|
|
123
|
+
|
|
124
|
+
work_on_schemas(get_user_schemas, 'user template', :up, options, &block)
|
|
125
|
+
end
|
|
126
|
+
|
|
127
|
+
def rollback_user_schemas (options = {}, &block)
|
|
128
|
+
work_on_schemas(get_user_schemas, 'user template', :down, options, &block)
|
|
129
|
+
end
|
|
130
|
+
|
|
131
|
+
def migrate_user_templates (options = {}, &block)
|
|
132
|
+
return if block.nil?
|
|
133
|
+
|
|
134
|
+
work_on_schemas get_user_schemas, 'user template', :up, options do |schema, id, use_sharded_company, tablespace_name|
|
|
135
|
+
each_user_template(schema) do |user_template|
|
|
136
|
+
block.call(schema, user_template['table_prefix'], user_template, tablespace_name)
|
|
137
|
+
end
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
def rollback_user_templates (options = {}, &block)
|
|
142
|
+
work_on_schemas get_user_schemas, 'user template', :down, options do |schema, id, use_sharded_company, tablespace_name|
|
|
143
|
+
each_user_template(schema) do |user_template|
|
|
144
|
+
block.call(schema, user_template['table_prefix'], user_template, tablespace_name)
|
|
145
|
+
end
|
|
146
|
+
end
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
def each_user_template (schema_name, &block)
|
|
150
|
+
return if block.nil?
|
|
151
|
+
@conn.exec(%Q[SELECT * FROM "accounting"."user_templates" WHERE schema_name='#{schema_name}' ORDER BY "id"]).to_a.each(&block)
|
|
152
|
+
end
|
|
153
|
+
|
|
154
|
+
def table_exists? (schema_name, table_name)
|
|
155
|
+
@conn.exec(%Q[SELECT 1 FROM "information_schema"."tables" WHERE "table_schema" = '#{schema_name}' AND "table_name" = '#{table_name}']).any?
|
|
156
|
+
end
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
# helper methods
|
|
160
|
+
|
|
161
|
+
def work_on_schemas (schemas, schema_type_description, direction, options, &block)
|
|
162
|
+
options ||= {}
|
|
163
|
+
options[:record_on_schema_migrations] = true unless options.has_key?(:record_on_schema_migrations)
|
|
164
|
+
options[:use_public_schema] = true unless options.has_key?(:use_public_schema)
|
|
165
|
+
|
|
166
|
+
schema_count = schemas.count.to_s
|
|
167
|
+
options[:max_schemas_per_conn] ||= 200 # min_queries_per_conn / max_queries_per_conn
|
|
168
|
+
options[:_internal_reset_counter] = 0
|
|
169
|
+
|
|
170
|
+
running_migration_version = get_migration_version
|
|
171
|
+
|
|
172
|
+
puts "#{direction == :up ? 'Migrating' : 'Rolling back'} on #{schema_count} #{schema_type_description} schema(s)"
|
|
173
|
+
|
|
174
|
+
schemas.each_with_index do |schema, index|
|
|
175
|
+
# create_schema_migrations_table_on_schema(schema) unless schema_migration_table_exists?(schema)
|
|
176
|
+
send :"#{direction}_on_schema", schema, running_migration_version, options, "[#{(index + 1).to_s.rjust(schema_count.length)}/#{schema_count}] ", &block
|
|
177
|
+
end
|
|
178
|
+
end
|
|
179
|
+
|
|
180
|
+
def up_on_schema (schema, running_migration_version, options, progress_feedback, &block)
|
|
181
|
+
with_new_connection(options) do
|
|
182
|
+
@conn.transaction do |conn|
|
|
183
|
+
# TODO: ensure this has the same behavior as "ActiveRecord::Base.transaction(requires_new: true)"
|
|
184
|
+
if migration_ran_on_schema?(schema, running_migration_version, conn)
|
|
185
|
+
puts "#{progress_feedback}Migration already ran on schema #{schema['schema_name']}, skipping"
|
|
186
|
+
else
|
|
187
|
+
# TODO: splited a "say_with_time", might need to review this
|
|
188
|
+
puts "#{progress_feedback}Running migration on schema #{schema['schema_name']}"
|
|
189
|
+
block.call(schema['schema_name'], schema['id'], schema['use_sharded_company'].to_b, schema['tablespace_name'], schema['company_schema']) unless block.nil?
|
|
190
|
+
migration_ran_on_schema!(schema, running_migration_version, conn) if options[:record_on_schema_migrations]
|
|
191
|
+
end
|
|
192
|
+
end
|
|
193
|
+
end
|
|
194
|
+
end
|
|
195
|
+
|
|
196
|
+
def down_on_schema (schema, running_migration_version, options, progress_feedback, &block)
|
|
197
|
+
with_new_connection(options) do
|
|
198
|
+
@conn.transaction do |conn|
|
|
199
|
+
# TODO: ensure this has the same behavior as "ActiveRecord::Base.transaction(requires_new: true)"
|
|
200
|
+
if migration_ran_on_schema?(schema, running_migration_version, conn)
|
|
201
|
+
# TODO: splited a "say_with_time", might need to review this
|
|
202
|
+
puts "#{progress_feedback}Rolling back migration on schema #{schema['schema_name']}"
|
|
203
|
+
block.call(schema['schema_name'], schema['id'], schema['use_sharded_company'].to_b, schema['tablespace_name'], schema['company_schema']) unless block.nil?
|
|
204
|
+
migration_rolled_back_on_schema!(schema, running_migration_version, conn) if options[:record_on_schema_migrations]
|
|
205
|
+
else
|
|
206
|
+
puts "#{progress_feedback}Migration didn't run on schema #{schema['schema_name']}, skipping"
|
|
207
|
+
end
|
|
208
|
+
end
|
|
209
|
+
end
|
|
210
|
+
end
|
|
211
|
+
|
|
212
|
+
def with_new_connection (options = {}, &block)
|
|
213
|
+
options[:_internal_reset_counter] ||= 0
|
|
214
|
+
if options[:_internal_reset_counter] == options[:max_schemas_per_conn] || ( 0 == options[:_internal_reset_counter] && !options[:statement_timeout].nil? )
|
|
215
|
+
# TODO: what was the objective of this? Is a reset enough?
|
|
216
|
+
# ActiveRecord::Base.connection.reset!()
|
|
217
|
+
# ActiveRecord::Base.connection.raw_connection.reset
|
|
218
|
+
# ActiveRecord::Base.connection.raw_connection.exec("SET statement_timeout TO #{0 == options[:statement_timeout]? "'48h'" : options[:statement_timeout]}") if options[:statement_timeout]
|
|
219
|
+
@conn.reset()
|
|
220
|
+
@conn.exec("SET statement_timeout TO #{0 == options[:statement_timeout]? "'48h'" : options[:statement_timeout]}") if options[:statement_timeout]
|
|
221
|
+
options[:_internal_reset_counter] = 0
|
|
222
|
+
end
|
|
223
|
+
options[:_internal_reset_counter] +=1
|
|
224
|
+
block.call unless block.nil?
|
|
225
|
+
end
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
# schema queries
|
|
229
|
+
|
|
230
|
+
def get_sharded_company_schemas
|
|
231
|
+
get_schemas %Q[
|
|
232
|
+
SELECT "id",
|
|
233
|
+
"schema_name",
|
|
234
|
+
"use_sharded_company",
|
|
235
|
+
common.get_tablespace_name("schema_name") AS "tablespace_name"
|
|
236
|
+
FROM "public"."companies"
|
|
237
|
+
WHERE "schema_name" IS NOT NULL
|
|
238
|
+
AND "use_sharded_company"
|
|
239
|
+
AND "is_deleted" IS DISTINCT FROM true
|
|
240
|
+
AND "companies"."cluster" = #{database_cluster}
|
|
241
|
+
ORDER BY "id"
|
|
242
|
+
]
|
|
243
|
+
end
|
|
244
|
+
|
|
245
|
+
def get_companies_schemas_from_module (module_name)
|
|
246
|
+
get_schemas %Q[
|
|
247
|
+
SELECT "companies"."id",
|
|
248
|
+
"companies"."schema_name",
|
|
249
|
+
"companies"."use_sharded_company",
|
|
250
|
+
common.get_tablespace_name("companies"."schema_name") AS "tablespace_name"
|
|
251
|
+
FROM "public"."company_modules"
|
|
252
|
+
JOIN "public"."companies" ON "companies"."id" = "company_modules"."company_id"
|
|
253
|
+
WHERE "companies"."schema_name" IS NOT NULL
|
|
254
|
+
AND "company_modules"."name" = '#{module_name}'
|
|
255
|
+
AND "company_modules"."has_schema_structure"
|
|
256
|
+
AND "companies"."is_deleted" IS DISTINCT FROM true
|
|
257
|
+
AND "companies"."cluster" = #{database_cluster}
|
|
258
|
+
ORDER BY "id"
|
|
259
|
+
]
|
|
260
|
+
end
|
|
261
|
+
|
|
262
|
+
def get_companies_schemas
|
|
263
|
+
get_schemas %Q[
|
|
264
|
+
SELECT "id",
|
|
265
|
+
"schema_name",
|
|
266
|
+
"use_sharded_company",
|
|
267
|
+
common.get_tablespace_name("schema_name") AS "tablespace_name"
|
|
268
|
+
FROM "public"."companies"
|
|
269
|
+
WHERE "schema_name" IS NOT NULL
|
|
270
|
+
AND "is_deleted" IS DISTINCT FROM true
|
|
271
|
+
AND "companies"."cluster" = #{database_cluster}
|
|
272
|
+
ORDER BY "id"
|
|
273
|
+
]
|
|
274
|
+
end
|
|
275
|
+
|
|
276
|
+
def get_accounting_companies_schemas
|
|
277
|
+
get_schemas %Q[
|
|
278
|
+
SELECT "accounting_companies"."company_id" AS "id",
|
|
279
|
+
"accounting_companies"."schema_name",
|
|
280
|
+
common.get_tablespace_name("accounting_companies"."schema_name") AS "tablespace_name",
|
|
281
|
+
"companies"."schema_name" AS company_schema,
|
|
282
|
+
"companies"."use_sharded_company"
|
|
283
|
+
FROM "accounting"."accounting_companies"
|
|
284
|
+
JOIN "public"."companies" ON "companies"."id" = "accounting_companies"."company_id"
|
|
285
|
+
WHERE "companies"."is_deleted" IS DISTINCT FROM true
|
|
286
|
+
AND "companies"."cluster" = #{database_cluster}
|
|
287
|
+
ORDER BY "accounting_companies"."id"
|
|
288
|
+
]
|
|
289
|
+
end
|
|
290
|
+
|
|
291
|
+
def get_user_schemas
|
|
292
|
+
get_schemas %Q[
|
|
293
|
+
SELECT DISTINCT "user_id" AS "id",
|
|
294
|
+
"schema_name",
|
|
295
|
+
false AS "use_sharded_company",
|
|
296
|
+
common.get_tablespace_name("schema_name") AS "tablespace_name"
|
|
297
|
+
FROM "accounting"."user_templates"
|
|
298
|
+
ORDER BY "id"
|
|
299
|
+
]
|
|
300
|
+
end
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
# utils
|
|
304
|
+
|
|
305
|
+
def database_cluster
|
|
306
|
+
@conn.exec("SHOW cloudware.cluster").to_a[0]["cloudware.cluster"].to_i
|
|
307
|
+
end
|
|
308
|
+
|
|
309
|
+
def get_schemas (query)
|
|
310
|
+
schemas_rows = @conn.exec(query).to_a
|
|
311
|
+
return schemas_rows
|
|
312
|
+
end
|
|
313
|
+
|
|
314
|
+
def get_migration_version
|
|
315
|
+
return @version
|
|
316
|
+
end
|
|
317
|
+
|
|
318
|
+
def migration_ran_on_schema? (schema, migration_version, conn = nil)
|
|
319
|
+
query = %Q[SELECT 1 FROM "#{schema['schema_name']}"."#{@migration_table}" WHERE "#{@migration_field}" = '#{migration_version}']
|
|
320
|
+
if conn.nil?
|
|
321
|
+
@conn.exec(query).any?
|
|
322
|
+
else
|
|
323
|
+
conn.exec(query).any?
|
|
324
|
+
end
|
|
325
|
+
end
|
|
326
|
+
|
|
327
|
+
def migration_ran_on_schema! (schema, migration_version, conn = nil)
|
|
328
|
+
query = %Q[INSERT INTO "#{schema['schema_name']}"."#{@migration_table}" ("#{@migration_field}") VALUES ('#{migration_version}')]
|
|
329
|
+
if conn.nil?
|
|
330
|
+
@conn.exec(query)
|
|
331
|
+
else
|
|
332
|
+
conn.exec(query)
|
|
333
|
+
end
|
|
334
|
+
end
|
|
335
|
+
|
|
336
|
+
def migration_rolled_back_on_schema! (schema, migration_version, conn = nil)
|
|
337
|
+
query = %Q[DELETE FROM "#{schema['schema_name']}"."#{@migration_table}" WHERE "#{@migration_field}" = '#{migration_version}']
|
|
338
|
+
if conn.nil?
|
|
339
|
+
@conn.exec(query)
|
|
340
|
+
else
|
|
341
|
+
conn.exec(query)
|
|
342
|
+
end
|
|
343
|
+
end
|
|
344
|
+
|
|
345
|
+
end
|
|
346
|
+
|
|
347
|
+
end
|