xmigra 1.5.1 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/xmigra/db_support/mssql.rb +43 -0
- data/lib/xmigra/db_support/psql.rb +45 -0
- data/lib/xmigra/declarative_migration.rb +160 -0
- data/lib/xmigra/declarative_support/table.rb +590 -0
- data/lib/xmigra/declarative_support.rb +158 -0
- data/lib/xmigra/impdecl_migration_adder.rb +249 -0
- data/lib/xmigra/migration.rb +10 -3
- data/lib/xmigra/migration_chain.rb +22 -8
- data/lib/xmigra/migration_conflict.rb +27 -0
- data/lib/xmigra/new_access_artifact_adder.rb +44 -0
- data/lib/xmigra/new_index_adder.rb +33 -0
- data/lib/xmigra/new_migration_adder.rb +10 -6
- data/lib/xmigra/permission_script_writer.rb +11 -5
- data/lib/xmigra/program.rb +231 -23
- data/lib/xmigra/schema_updater.rb +28 -5
- data/lib/xmigra/vcs_support/git.rb +189 -8
- data/lib/xmigra/vcs_support/svn.rb +107 -1
- data/lib/xmigra/version.rb +1 -1
- data/lib/xmigra.rb +47 -2
- data/test/git_vcs.rb +64 -4
- data/test/new_files.rb +14 -0
- data/test/runner.rb +49 -4
- data/test/structure_declarative.rb +811 -0
- data/test/utils.rb +17 -2
- metadata +10 -2
@@ -0,0 +1,158 @@
|
|
1
|
+
module XMigra
|
2
|
+
module DeclarativeSupport
|
3
|
+
class SpecificationError < Error; end
|
4
|
+
|
5
|
+
class StructureReader
|
6
|
+
EXTENSION_PREFIX = 'X-'
|
7
|
+
|
8
|
+
def initialize(data, keypath=[])
|
9
|
+
@keypath = keypath
|
10
|
+
@data = data
|
11
|
+
@children = []
|
12
|
+
@used_keys = Set.new
|
13
|
+
end
|
14
|
+
|
15
|
+
def [](key)
|
16
|
+
result, result_key = item_and_keypath(key)
|
17
|
+
|
18
|
+
case result
|
19
|
+
when Hash
|
20
|
+
result = StructureReader.new(result, result_key).tap do |r|
|
21
|
+
r.parent = self
|
22
|
+
end
|
23
|
+
when Array
|
24
|
+
raise "Invalid to fetch an array via [] -- use array_fetch"
|
25
|
+
end
|
26
|
+
|
27
|
+
@used_keys << key if @data.kind_of? Hash
|
28
|
+
return result
|
29
|
+
end
|
30
|
+
|
31
|
+
def fetch(key, *args)
|
32
|
+
if args.length > 1
|
33
|
+
raise ArgumentError, "fetch takes 1 or 2 arguments"
|
34
|
+
end
|
35
|
+
|
36
|
+
if @data.has_key?(key)
|
37
|
+
return self[key]
|
38
|
+
elsif args.length == 1
|
39
|
+
return args[0]
|
40
|
+
else
|
41
|
+
raise KeyError, "#{key.inspect} not present"
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
def array_fetch(key, key_finder_proc)
|
46
|
+
result, result_key = item_and_keypath(key)
|
47
|
+
unless result.kind_of? Array
|
48
|
+
raise ::TypeError, "Expected key for array"
|
49
|
+
end
|
50
|
+
|
51
|
+
@used_keys << key if @data.kind_of? Hash
|
52
|
+
return StructureReader.new(result, result_key).tap do |r|
|
53
|
+
r.parent = self
|
54
|
+
r.key_finder_proc = key_finder_proc
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def raw_item(key)
|
59
|
+
@used_keys << key if @data.kind_of? Hash
|
60
|
+
return @data[key]
|
61
|
+
end
|
62
|
+
|
63
|
+
def each
|
64
|
+
to_enum(:each) unless block_given?
|
65
|
+
|
66
|
+
if @data.kind_of? Hash
|
67
|
+
@data.each_key {|k| yield k, self[k]}
|
68
|
+
else
|
69
|
+
(0...@data.length).each {|i| yield self[i]}
|
70
|
+
end
|
71
|
+
end
|
72
|
+
include Enumerable
|
73
|
+
|
74
|
+
def kind_of?(klass)
|
75
|
+
return super(klass) || @data.kind_of?(klass)
|
76
|
+
end
|
77
|
+
|
78
|
+
def hash
|
79
|
+
@data.hash
|
80
|
+
end
|
81
|
+
|
82
|
+
def eql?(other)
|
83
|
+
@data.eql?(other)
|
84
|
+
end
|
85
|
+
|
86
|
+
def keys
|
87
|
+
@data.keys
|
88
|
+
end
|
89
|
+
|
90
|
+
def values
|
91
|
+
@data.values
|
92
|
+
end
|
93
|
+
|
94
|
+
def uniq
|
95
|
+
collect {|o| o}.uniq
|
96
|
+
end
|
97
|
+
|
98
|
+
def length
|
99
|
+
@data.length
|
100
|
+
end
|
101
|
+
|
102
|
+
def join(sep=$,)
|
103
|
+
@data.join(sep)
|
104
|
+
end
|
105
|
+
|
106
|
+
def each_extension(&blk)
|
107
|
+
return to_enum(:each_extension) if blk.nil?
|
108
|
+
|
109
|
+
if @data.kind_of? Hash
|
110
|
+
@data.each_pair do |k, val|
|
111
|
+
next unless k.kind_of?(String) and k.start_with?(EXTENSION_PREFIX)
|
112
|
+
blk.call((@keypath + [k]).join('.'), val)
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
children.each do |child|
|
117
|
+
child.each_extension(&blk)
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
def each_unused_standard_key(&blk)
|
122
|
+
return to_enum(:each_unused_standard_key) if blk.nil?
|
123
|
+
|
124
|
+
if @data.kind_of? Hash
|
125
|
+
@data.each_key do |k|
|
126
|
+
next if @used_keys.include?(k)
|
127
|
+
next if k.kind_of?(String) && k.start_with?(EXTENSION_PREFIX)
|
128
|
+
blk.call(@keypath + [k]).join('.')
|
129
|
+
end
|
130
|
+
end
|
131
|
+
children.each {|child| child.each_unused_standard_key(&blk)}
|
132
|
+
end
|
133
|
+
|
134
|
+
protected
|
135
|
+
attr_accessor :key_finder_proc
|
136
|
+
attr_reader :parent, :children
|
137
|
+
|
138
|
+
def parent=(new_val)
|
139
|
+
@parent.children.delete(self) if @parent
|
140
|
+
@parent = new_val
|
141
|
+
@parent.children << self if @parent
|
142
|
+
new_val
|
143
|
+
end
|
144
|
+
|
145
|
+
def item_and_keypath(key)
|
146
|
+
item = @data[key]
|
147
|
+
subkey = begin
|
148
|
+
if @key_finder_proc
|
149
|
+
@keypath + [@key_finder_proc.call(item)]
|
150
|
+
else
|
151
|
+
@keypath + [key]
|
152
|
+
end
|
153
|
+
end
|
154
|
+
return item, subkey
|
155
|
+
end
|
156
|
+
end
|
157
|
+
end
|
158
|
+
end
|
@@ -0,0 +1,249 @@
|
|
1
|
+
require 'xmigra/declarative_migration'
|
2
|
+
require 'xmigra/new_migration_adder'
|
3
|
+
|
4
|
+
module XMigra
|
5
|
+
class ImpdeclMigrationAdder < NewMigrationAdder
|
6
|
+
class NoChangesError < Error; end
|
7
|
+
|
8
|
+
@support_types = {}
|
9
|
+
def self.register_support_type(tag, klass)
|
10
|
+
if @support_types.has_key? tag
|
11
|
+
raise Error, "#{@support_types[tag]} already registered to handle #{tag}"
|
12
|
+
end
|
13
|
+
@support_types[tag] = klass
|
14
|
+
if block_given?
|
15
|
+
begin
|
16
|
+
yield
|
17
|
+
ensure
|
18
|
+
@support_types.delete(tag)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
def self.support_type(tag)
|
24
|
+
@support_types[tag]
|
25
|
+
end
|
26
|
+
|
27
|
+
def self.each_support_type(&blk)
|
28
|
+
@support_types.each_pair(&blk)
|
29
|
+
end
|
30
|
+
|
31
|
+
module SupportedDatabaseObject
|
32
|
+
module ClassMethods
|
33
|
+
def for_declarative_tagged(tag)
|
34
|
+
XMigra::ImpdeclMigrationAdder.register_support_type(tag, self)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def self.included(mod)
|
39
|
+
mod.extend(ClassMethods)
|
40
|
+
end
|
41
|
+
|
42
|
+
# Classes including this Module should define:
|
43
|
+
# #creation_sql
|
44
|
+
# #sql_to_effect_from(old_state)
|
45
|
+
# #destruction_sql
|
46
|
+
#
|
47
|
+
# and expect to receive as arguments to their constructor the name of
|
48
|
+
# the object and the Ruby-ized data present at the top level of the
|
49
|
+
# declarative file.
|
50
|
+
end
|
51
|
+
|
52
|
+
def initialize(path)
|
53
|
+
super(path)
|
54
|
+
@migrations = MigrationChain.new(
|
55
|
+
self.path.join(STRUCTURE_SUBDIR),
|
56
|
+
:db_specifics=>@db_specifics,
|
57
|
+
:vcs_specifics=>@vcs_specifics,
|
58
|
+
)
|
59
|
+
end
|
60
|
+
|
61
|
+
attr_accessor :strict
|
62
|
+
|
63
|
+
def add_migration_implementing_changes(file_path, options={})
|
64
|
+
file_path = Pathname(file_path)
|
65
|
+
prev_impl = @migrations.latest_declarative_implementations[file_path]
|
66
|
+
decl_stat = prev_impl.declarative_status
|
67
|
+
|
68
|
+
# Declarative doesn't make any sense without version control
|
69
|
+
unless VersionControlSupportModules.find {|m| self.kind_of? m}
|
70
|
+
raise Error, "#{self.path} is not under version control (required for declarative)"
|
71
|
+
end
|
72
|
+
|
73
|
+
# Check if an implementation is needed/allowed
|
74
|
+
if bad_rel = {
|
75
|
+
:equal=>"the same revision as",
|
76
|
+
:older=>"an older revision than",
|
77
|
+
}[decl_stat]
|
78
|
+
raise NoChangesError, "#{file_path} changed in #{bad_rel} the latest implementing migration #{prev_impl.file_path}"
|
79
|
+
end
|
80
|
+
|
81
|
+
# This should require the same user to generate a migration on the same
|
82
|
+
# day starting from the same committed version working on the same
|
83
|
+
# branch to cause a collision of migration file names:
|
84
|
+
file_hash = begin
|
85
|
+
file_base = begin
|
86
|
+
[
|
87
|
+
SchemaUpdater.new(path).branch_identifier,
|
88
|
+
vcs_latest_revision(file_path),
|
89
|
+
].join("\x00")
|
90
|
+
rescue VersionControlError
|
91
|
+
''
|
92
|
+
end
|
93
|
+
XMigra.secure_digest(
|
94
|
+
[(ENV['USER'] || ENV['USERNAME']).to_s, file_base.to_s].join("\x00"),
|
95
|
+
:encoding=>:base32
|
96
|
+
)[0,12]
|
97
|
+
end
|
98
|
+
summary = "#{file_path.basename('.yaml')}-#{file_hash}.decl"
|
99
|
+
|
100
|
+
add_migration_options = {
|
101
|
+
:file_path=>file_path,
|
102
|
+
}
|
103
|
+
|
104
|
+
# Figure out the goal of the change to the declarative
|
105
|
+
fail_options = []
|
106
|
+
case decl_stat
|
107
|
+
when :unimplemented
|
108
|
+
fail_options << :renounce
|
109
|
+
add_migration_options[:goal] = options[:adopt] ? 'adoption' : 'creation'
|
110
|
+
when :newer
|
111
|
+
fail_options.concat [:adopt, :renounce]
|
112
|
+
add_migration_options[:goal] = 'revision'
|
113
|
+
when :missing
|
114
|
+
fail_options << :adopt
|
115
|
+
add_migration_options[:goal] = options[:renounce] ? 'renunciation' : 'destruction'
|
116
|
+
end
|
117
|
+
|
118
|
+
if opt = fail_options.find {|o| options[o]}
|
119
|
+
raise Program::ArgumentError, "--#{opt} flag is invalid when declarative file is #{decl_stat}"
|
120
|
+
end
|
121
|
+
|
122
|
+
# gsub gets rid of trailing whitespace on a line (which would force double-quote syntax)
|
123
|
+
add_migration_options[:delta] = prev_impl.delta(file_path).gsub(/\s+$/, '').extend(LiteralYamlStyle)
|
124
|
+
unless options[:adopt] || options[:renounce]
|
125
|
+
begin
|
126
|
+
if suggested_sql = build_suggested_sql(decl_stat, file_path, prev_impl)
|
127
|
+
add_migration_options[:sql] = suggested_sql
|
128
|
+
add_migration_options[:sql_suggested] = true
|
129
|
+
end
|
130
|
+
rescue DeclarativeSupport::SpecificationError
|
131
|
+
add_migration_options[:spec_error] = $!.to_s
|
132
|
+
end
|
133
|
+
end
|
134
|
+
|
135
|
+
add_migration(summary, add_migration_options)
|
136
|
+
end
|
137
|
+
|
138
|
+
def migration_data(head_info, options)
|
139
|
+
target_object = options[:file_path].basename('.yaml')
|
140
|
+
goal = options[:goal].to_sym
|
141
|
+
super(head_info, options).tap do |data|
|
142
|
+
# The "changes" key is not used by declarative implementation
|
143
|
+
#migrations -- the "of object" (TARGET_KEY) is used instead
|
144
|
+
data.delete(Migration::CHANGES)
|
145
|
+
|
146
|
+
data[DeclarativeMigration::GOAL_KEY] = options[:goal].to_s
|
147
|
+
data[DeclarativeMigration::TARGET_KEY] = target_object.to_s
|
148
|
+
data[DeclarativeMigration::DECLARATION_VERSION_KEY] = begin
|
149
|
+
if [:renunciation, :destruction].include?(goal)
|
150
|
+
'DELETED'
|
151
|
+
else
|
152
|
+
XMigra.secure_digest(options[:file_path].read)
|
153
|
+
end
|
154
|
+
end
|
155
|
+
data['delta'] = options[:delta]
|
156
|
+
options[:spec_error].tap do |message|
|
157
|
+
data['specification error'] = message if message
|
158
|
+
end
|
159
|
+
|
160
|
+
# Reorder "sql" key to here (unless adopting or renouncing, then
|
161
|
+
# remove "sql" completely)
|
162
|
+
provided_sql = data.delete('sql')
|
163
|
+
unless [:adoption, :renunciation].include? goal
|
164
|
+
data['sql'] = provided_sql
|
165
|
+
data[DeclarativeMigration::QUALIFICATION_KEY] = begin
|
166
|
+
if options[:sql_suggested]
|
167
|
+
'suggested command sequence'
|
168
|
+
else
|
169
|
+
'unimplemented'
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
# Reorder "description" key to here with
|
175
|
+
data.delete('description')
|
176
|
+
data['description'] = "Declarative #{goal} of #{target_object}"
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
def build_suggested_sql(decl_stat, file_path, prev_impl)
|
181
|
+
d = SupportedObjectDeserializer.new(
|
182
|
+
file_path.basename('.yaml').to_s,
|
183
|
+
@db_specifics
|
184
|
+
)
|
185
|
+
case decl_stat
|
186
|
+
when :unimplemented
|
187
|
+
initial_state = YAML.parse_file(file_path)
|
188
|
+
initial_state = d.deserialize(initial_state.children[0])
|
189
|
+
|
190
|
+
if initial_state.kind_of?(SupportedDatabaseObject)
|
191
|
+
initial_state.creation_sql
|
192
|
+
end
|
193
|
+
when :newer
|
194
|
+
old_state = YAML.parse(
|
195
|
+
vcs_contents(file_path, :revision=>prev_impl.vcs_latest_revision),
|
196
|
+
file_path
|
197
|
+
)
|
198
|
+
old_state = d.deserialize(old_state.children[0])
|
199
|
+
new_state = YAML.parse_file(file_path)
|
200
|
+
new_state = d.deserialize(new_state.children[0])
|
201
|
+
|
202
|
+
if new_state.kind_of?(SupportedDatabaseObject) && old_state.class == new_state.class
|
203
|
+
new_state.sql_to_effect_from old_state
|
204
|
+
end
|
205
|
+
when :missing
|
206
|
+
penultimate_state = YAML.parse(
|
207
|
+
vcs_contents(file_path, :revision=>prev_impl.vcs_latest_revision),
|
208
|
+
file_path
|
209
|
+
)
|
210
|
+
penultimate_state = d.deserialize(penultimate_state.children[0])
|
211
|
+
|
212
|
+
if penultimate_state.kind_of?(SupportedDatabaseObject)
|
213
|
+
penultimate_state.destruction_sql
|
214
|
+
end
|
215
|
+
end
|
216
|
+
rescue DeclarativeSupport::SpecificationError
|
217
|
+
raise
|
218
|
+
rescue StandardError
|
219
|
+
raise if strict
|
220
|
+
nil
|
221
|
+
end
|
222
|
+
|
223
|
+
class SupportedObjectDeserializer
|
224
|
+
def initialize(object_name, db_specifics)
|
225
|
+
@object_name = object_name
|
226
|
+
@db_specifics = db_specifics
|
227
|
+
end
|
228
|
+
|
229
|
+
attr_reader :object_name, :db_specifics
|
230
|
+
|
231
|
+
def deserialize(yaml_node)
|
232
|
+
data = yaml_node.to_ruby
|
233
|
+
if klass = ImpdeclMigrationAdder.support_type(yaml_node.tag)
|
234
|
+
klass.new(@object_name, data).extend(@db_specifics)
|
235
|
+
else
|
236
|
+
if data.respond_to? :name=
|
237
|
+
data.name = @object_name
|
238
|
+
elsif data.kind_of? Hash
|
239
|
+
data['name'] = @object_name
|
240
|
+
end
|
241
|
+
data
|
242
|
+
end
|
243
|
+
end
|
244
|
+
end
|
245
|
+
end
|
246
|
+
end
|
247
|
+
|
248
|
+
require 'xmigra/declarative_support'
|
249
|
+
require 'xmigra/declarative_support/table'
|
data/lib/xmigra/migration.rb
CHANGED
@@ -12,22 +12,29 @@ module XMigra
|
|
12
12
|
@id = info['id'].dup.freeze
|
13
13
|
_follows = info[FOLLOWS]
|
14
14
|
@follows = (_follows.dup.freeze unless _follows == EMPTY_DB)
|
15
|
-
@sql = info["sql"].dup.freeze
|
15
|
+
@sql = info.has_key?('sql') ? info["sql"].dup.freeze : nil
|
16
16
|
@description = info["description"].dup.freeze
|
17
17
|
@changes = (info[CHANGES] || []).dup.freeze
|
18
18
|
@changes.each {|c| c.freeze}
|
19
|
+
@all_info = Marshal.load(Marshal.dump(info))
|
19
20
|
end
|
20
21
|
|
21
22
|
attr_reader :id, :follows, :description, :changes
|
22
23
|
attr_accessor :file_path
|
23
24
|
|
24
25
|
def schema_dir
|
25
|
-
|
26
|
+
@schema_dir ||= begin
|
27
|
+
result = Pathname(file_path).dirname
|
28
|
+
while result.basename.to_s != SchemaManipulator::STRUCTURE_SUBDIR
|
29
|
+
result = result.dirname
|
30
|
+
end
|
31
|
+
result.join('..')
|
32
|
+
end
|
26
33
|
end
|
27
34
|
|
28
35
|
def sql
|
29
36
|
if Plugin.active
|
30
|
-
@sql.dup.tap do |result|
|
37
|
+
(@sql || "").dup.tap do |result|
|
31
38
|
Plugin.active.amend_source_sql(result)
|
32
39
|
end
|
33
40
|
else
|
@@ -1,4 +1,5 @@
|
|
1
1
|
|
2
|
+
require 'xmigra/declarative_migration'
|
2
3
|
require 'xmigra/migration'
|
3
4
|
|
4
5
|
module XMigra
|
@@ -7,13 +8,16 @@ module XMigra
|
|
7
8
|
LATEST_CHANGE = 'latest change'
|
8
9
|
MIGRATION_FILE_PATTERN = /^\d{4}-\d\d-\d\d.*\.yaml$/i
|
9
10
|
|
11
|
+
include DeclarativeMigration::ChainSupport
|
12
|
+
|
10
13
|
def initialize(path, options={})
|
11
14
|
super()
|
15
|
+
@path = Pathname(path)
|
12
16
|
|
13
17
|
db_specifics = options[:db_specifics]
|
14
18
|
vcs_specifics = options[:vcs_specifics]
|
15
19
|
|
16
|
-
head_info =
|
20
|
+
head_info = yaml_of_file(File.join(path, HEAD_FILE)) || {}
|
17
21
|
file = head_info[LATEST_CHANGE]
|
18
22
|
prev_file = HEAD_FILE
|
19
23
|
files_loaded = []
|
@@ -21,18 +25,16 @@ module XMigra
|
|
21
25
|
until file.nil?
|
22
26
|
file = XMigra.yaml_path(file)
|
23
27
|
fpath = File.join(path, file)
|
24
|
-
break
|
25
|
-
begin
|
26
|
-
mig_info = YAML.load_file(fpath)
|
27
|
-
rescue
|
28
|
-
raise XMigra::Error, "Error loading/parsing #{fpath}"
|
29
|
-
end
|
28
|
+
break if (mig_info = yaml_of_file(fpath)).nil?
|
30
29
|
files_loaded << file
|
31
30
|
mig_info["id"] = Migration::id_from_filename(file)
|
32
31
|
migration = Migration.new(mig_info)
|
33
32
|
migration.file_path = File.expand_path(fpath)
|
34
33
|
migration.extend(db_specifics) if db_specifics
|
35
34
|
migration.extend(vcs_specifics) if vcs_specifics
|
35
|
+
if migration.file_path.end_with? ".decl.yaml"
|
36
|
+
migration.extend(DeclarativeMigration)
|
37
|
+
end
|
36
38
|
unshift(migration)
|
37
39
|
prev_file = file
|
38
40
|
file = migration.follows
|
@@ -50,14 +52,26 @@ module XMigra
|
|
50
52
|
@other_migrations.freeze
|
51
53
|
end
|
52
54
|
|
55
|
+
attr_reader :path
|
56
|
+
|
53
57
|
# Test if the chain reaches back to the empty database
|
54
58
|
def complete?
|
55
|
-
length
|
59
|
+
length == 0 || self[0].follows.nil?
|
56
60
|
end
|
57
61
|
|
58
62
|
# Test if the chain encompasses all migration-like filenames in the path
|
59
63
|
def includes_all?
|
60
64
|
@other_migrations.empty?
|
61
65
|
end
|
66
|
+
|
67
|
+
protected
|
68
|
+
def yaml_of_file(fpath)
|
69
|
+
return nil unless File.file?(fpath)
|
70
|
+
begin
|
71
|
+
return YAML.load_file(fpath)
|
72
|
+
rescue
|
73
|
+
raise XMigra::Error, "Error loading/parsing #{fpath}"
|
74
|
+
end
|
75
|
+
end
|
62
76
|
end
|
63
77
|
end
|
@@ -63,6 +63,33 @@ module XMigra
|
|
63
63
|
if @after_fix
|
64
64
|
@after_fix.call
|
65
65
|
end
|
66
|
+
|
67
|
+
fix_merged_declarative_relations!
|
68
|
+
end
|
69
|
+
|
70
|
+
def fix_merged_declarative_relations!
|
71
|
+
# Update the latest implementing migration for any declarative
|
72
|
+
# file that is modified in the working copy
|
73
|
+
each_decohered_implementing_migration(
|
74
|
+
&method(:fix_decohered_implmeneting_migration!)
|
75
|
+
)
|
76
|
+
end
|
77
|
+
|
78
|
+
def each_decohered_implementing_migration
|
79
|
+
tool = SchemaUpdater.new(@path.join(".."))
|
80
|
+
latest_impls = tool.migrations.latest_declarative_implementations
|
81
|
+
latest_impls.each_pair do |decl_file, migration|
|
82
|
+
next unless tool.vcs_file_modified?(decl_file)
|
83
|
+
yield migration.file_path, tool.vcs_latest_revision(migration.file_path)
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def fix_decohered_implmeneting_migration!(file_path, last_commit)
|
88
|
+
migration_info = YAML.load_file(file_path)
|
89
|
+
migration_info['pre-unbranch'] = last_commit
|
90
|
+
file_path.open('w') do |f|
|
91
|
+
$xmigra_yamler.dump(migration_info, f)
|
92
|
+
end
|
66
93
|
end
|
67
94
|
end
|
68
95
|
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
|
2
|
+
require 'xmigra/schema_manipulator'
|
3
|
+
|
4
|
+
module XMigra
|
5
|
+
class NewAccessArtifactAdder < SchemaManipulator
|
6
|
+
class UnsupportedArtifactType < XMigra::Error
|
7
|
+
def initialize(artifact_type, system_name)
|
8
|
+
super("#{system_name} does not support #{artifact_type} artifacts")
|
9
|
+
@artifact_type = artifact_type
|
10
|
+
@system_name = system_name
|
11
|
+
end
|
12
|
+
|
13
|
+
attr_reader :artifact_type, :system_name
|
14
|
+
end
|
15
|
+
|
16
|
+
def initialize(path)
|
17
|
+
super(path)
|
18
|
+
end
|
19
|
+
|
20
|
+
def add_artifact(type, name)
|
21
|
+
access_dir = @path.join(ACCESS_SUBDIR)
|
22
|
+
FileUtils.mkdir_p(access_dir) unless access_dir.exist?
|
23
|
+
|
24
|
+
new_fpath = access_dir.join(name + '.yaml')
|
25
|
+
raise(XMigra::Error, "Access object \"#{new_fpath.basename}\" already exists") if new_fpath.exist?
|
26
|
+
|
27
|
+
template_method = begin
|
28
|
+
method("#{type}_definition_template_sql".to_sym)
|
29
|
+
rescue NameError
|
30
|
+
proc {''}
|
31
|
+
end
|
32
|
+
new_data = {
|
33
|
+
'define'=>type.to_s,
|
34
|
+
'sql'=>template_method.call.dup.extend(LiteralYamlStyle),
|
35
|
+
}
|
36
|
+
|
37
|
+
File.open(new_fpath, "w") do |f|
|
38
|
+
$xmigra_yamler.dump(new_data, f)
|
39
|
+
end
|
40
|
+
|
41
|
+
return new_fpath
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
|
2
|
+
require 'xmigra/schema_manipulator'
|
3
|
+
|
4
|
+
module XMigra
|
5
|
+
class NewIndexAdder < SchemaManipulator
|
6
|
+
def initialize(path)
|
7
|
+
super(path)
|
8
|
+
end
|
9
|
+
|
10
|
+
def add_index(name)
|
11
|
+
indexes_dir = @path.join(INDEXES_SUBDIR)
|
12
|
+
FileUtils.mkdir_p(indexes_dir) unless indexes_dir.exist?
|
13
|
+
|
14
|
+
new_fpath = indexes_dir.join(name + '.yaml')
|
15
|
+
raise(XMigra::Error, "Index \"#{new_fpath.basename}\" already exists") if new_fpath.exist?
|
16
|
+
|
17
|
+
index_creation_template = begin
|
18
|
+
index_template_sql
|
19
|
+
rescue NameError
|
20
|
+
''
|
21
|
+
end
|
22
|
+
new_data = {
|
23
|
+
'sql'=>index_creation_template.dup.extend(LiteralYamlStyle),
|
24
|
+
}
|
25
|
+
|
26
|
+
File.open(new_fpath, "w") do |f|
|
27
|
+
$xmigra_yamler.dump(new_data, f)
|
28
|
+
end
|
29
|
+
|
30
|
+
return new_fpath
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
@@ -40,12 +40,7 @@ module XMigra
|
|
40
40
|
)
|
41
41
|
raise(XMigra::Error, "Migration file\"#{new_fpath.basename}\" already exists") if new_fpath.exist?
|
42
42
|
|
43
|
-
new_data =
|
44
|
-
Migration::FOLLOWS=>head_info.fetch(MigrationChain::LATEST_CHANGE, Migration::EMPTY_DB),
|
45
|
-
'sql'=>options.fetch(:sql, "<<<<< INSERT SQL HERE >>>>>\n").dup.extend(LiteralYamlStyle),
|
46
|
-
'description'=>options.fetch(:description, "<<<<< DESCRIPTION OF MIGRATION >>>>>").dup.extend(FoldedYamlStyle),
|
47
|
-
Migration::CHANGES=>options.fetch(:changes, ["<<<<< WHAT THIS MIGRATION CHANGES >>>>>"]),
|
48
|
-
}
|
43
|
+
new_data = migration_data(head_info, options)
|
49
44
|
|
50
45
|
# Write the head file first, in case a lock is required
|
51
46
|
old_head_info = head_info.dup
|
@@ -85,6 +80,15 @@ module XMigra
|
|
85
80
|
return new_fpath
|
86
81
|
end
|
87
82
|
|
83
|
+
def migration_data(head_info, options={})
|
84
|
+
{}.tap do |data|
|
85
|
+
data[Migration::FOLLOWS] = head_info.fetch(MigrationChain::LATEST_CHANGE, Migration::EMPTY_DB)
|
86
|
+
data['sql'] = options.fetch(:sql, "<<<<< INSERT SQL HERE >>>>>\n").dup.extend(LiteralYamlStyle)
|
87
|
+
data['description'] = options.fetch(:description, "<<<<< DESCRIPTION OF MIGRATION >>>>>").dup.extend(FoldedYamlStyle)
|
88
|
+
data[Migration::CHANGES] = options.fetch(:changes, ["<<<<< WHAT THIS MIGRATION CHANGES >>>>>"])
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
88
92
|
# Called when the chain of migrations in the production/master branch is
|
89
93
|
# extended with a new migration.
|
90
94
|
#
|
@@ -17,11 +17,11 @@ module XMigra
|
|
17
17
|
|
18
18
|
def ddl_block_separator; "\n"; end
|
19
19
|
|
20
|
-
def permissions_sql
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
[
|
20
|
+
def permissions_sql(options = {})
|
21
|
+
"".tap do |result|
|
22
|
+
result << @db_info.fetch('script comment', '') + "\n\n"
|
23
|
+
|
24
|
+
transaction_sql = [
|
25
25
|
# Check for blatantly incorrect application of script, e.g. running
|
26
26
|
# on master or template database.
|
27
27
|
check_execution_environment_sql,
|
@@ -39,6 +39,12 @@ module XMigra
|
|
39
39
|
].flatten.compact.join(ddl_block_separator).tap do |result|
|
40
40
|
Plugin.active.amend_composed_sql(result) if Plugin.active
|
41
41
|
end
|
42
|
+
|
43
|
+
if options.fetch(:transactional, true)
|
44
|
+
result << in_ddl_transaction {transaction_sql}
|
45
|
+
else
|
46
|
+
result << transaction_sql
|
47
|
+
end
|
42
48
|
end
|
43
49
|
end
|
44
50
|
|