flydata 0.7.8 → 0.7.9
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/VERSION +1 -1
- data/bin/split_sync_ddl.rb +53 -0
- data/flydata-core/lib/flydata-core/errors.rb +17 -0
- data/flydata-core/lib/flydata-core/mysql/compatibility_checker.rb +34 -3
- data/flydata-core/spec/mysql/compatibility_checker_spec.rb +120 -3
- data/flydata.gemspec +0 -0
- data/lib/flydata/api/data_entry.rb +1 -1
- data/lib/flydata/command/sync.rb +76 -34
- data/lib/flydata/source_mysql/mysql_compatibility_check.rb +3 -2
- data/lib/flydata/source_mysql/plugin_support/binlog_query_dispatcher.rb +4 -0
- data/lib/flydata/source_mysql/plugin_support/create_table_query_handler.rb +28 -0
- data/lib/flydata/source_mysql/plugin_support/drop_table_query_handler.rb +28 -0
- data/lib/flydata/source_postgresql/table_meta.rb +5 -1
- data/lib/flydata/sync_file_manager.rb +15 -3
- data/spec/flydata/api/data_entry_spec.rb +4 -4
- data/spec/flydata/command/sync_spec.rb +108 -1
- data/spec/flydata/source_mysql/mysql_compatibility_check_spec.rb +1 -0
- data/spec/flydata/source_mysql/plugin_support/binlog_query_dispatcher_spec.rb +12 -0
- metadata +6 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: b6544ac842d41d5b8482f1d8996e8a853efda2b9
|
4
|
+
data.tar.gz: 63e29a5dc775e88134129621791733b2a5ef301c
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 714ba6763f79f026d4dce4cd19b0fc6d9b83114292b1bde90bc30426d8f4133c86e1e9e2a36d7e4200efd3bed12bc12cc472fa8e806f45b6bc5fe0a0e70e9065
|
7
|
+
data.tar.gz: 1fe8b84fe9a71066df153046685ed8bc543f99fc6b32e9d8a36474e4b5e703a33c4647180ddc5a17213f6b15c97f2788ee04de97fec0da81464ca0d8c45a28c7
|
data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
0.7.
|
1
|
+
0.7.9
|
@@ -0,0 +1,53 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
def usage
|
4
|
+
puts <<EOT
|
5
|
+
Split sync DDL file into multiple files
|
6
|
+
|
7
|
+
Usage: split_sync_ddl.rb <ddl-file-path> [<num-tables-per-file(default:50)>]
|
8
|
+
|
9
|
+
ex) split_sync_ddl ct20160722_01.sql 10
|
10
|
+
This command generates
|
11
|
+
ct20160722_01.sql_0
|
12
|
+
ct20160722_01.sql_1
|
13
|
+
ct20160722_01.sql_2
|
14
|
+
....
|
15
|
+
EOT
|
16
|
+
exit 1
|
17
|
+
end
|
18
|
+
|
19
|
+
@original_ddl = ARGV[0]
|
20
|
+
unless @original_ddl
|
21
|
+
usage
|
22
|
+
end
|
23
|
+
unless File.exists?(@original_ddl)
|
24
|
+
$stderr.puts "Error! File doesn't exist - #{@original_ddl}"
|
25
|
+
exit 1
|
26
|
+
end
|
27
|
+
@num_tables_per_file = ARGV[1] || 50
|
28
|
+
|
29
|
+
@file_no = 0
|
30
|
+
|
31
|
+
def out_file_name(file_no)
|
32
|
+
"#{@original_ddl}_#{file_no}"
|
33
|
+
end
|
34
|
+
|
35
|
+
def save_file(lines)
|
36
|
+
fn = out_file_name(@file_no)
|
37
|
+
content = lines.kind_of?(Array) ? lines.join : lines.to_s
|
38
|
+
IO.write(fn, content)
|
39
|
+
@file_no += 1
|
40
|
+
end
|
41
|
+
|
42
|
+
SEPARATOR = "BEGIN;\n"
|
43
|
+
def main
|
44
|
+
all_blocks = IO.read(@original_ddl).split(SEPARATOR)
|
45
|
+
first_block = all_blocks.shift
|
46
|
+
save_file(first_block)
|
47
|
+
all_blocks.each_slice(@num_tables_per_file) do |blocks|
|
48
|
+
content = blocks.collect{|block| "#{SEPARATOR}#{block}"}.join
|
49
|
+
save_file(content)
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
main
|
@@ -32,6 +32,7 @@ module ErrorCode
|
|
32
32
|
S3_ACCESS_ERROR = 10008
|
33
33
|
REDSHIFT_ACCESS_ERROR = 10009
|
34
34
|
BREAKING_ALTER_TABLE_ERROR = 10010
|
35
|
+
BREAKING_INFORMATION_SCHEMA_ERROR = 10011
|
35
36
|
INTERNAL_ERROR = 1200
|
36
37
|
end
|
37
38
|
|
@@ -66,6 +67,10 @@ end
|
|
66
67
|
class UserMaintenanceError < StandardError
|
67
68
|
end
|
68
69
|
|
70
|
+
# Errors usded by including
|
71
|
+
module BreakingSyncError
|
72
|
+
end
|
73
|
+
|
69
74
|
# An error that indicates that sync record was received for a table whose sync is broken
|
70
75
|
# These records cannot be retried, should be ignored
|
71
76
|
class BrokenTableSyncError < StandardError
|
@@ -203,9 +208,21 @@ end
|
|
203
208
|
|
204
209
|
# Unsupported ALTER TABLE which breaks the tables sync consistency.
|
205
210
|
class BreakingAlterTableError < RecordDeliveryError
|
211
|
+
include BreakingSyncError
|
206
212
|
def err_code; ErrorCode::BREAKING_ALTER_TABLE_ERROR; end
|
207
213
|
end
|
208
214
|
|
215
|
+
# Unrecoverable information schema error which requires re-synchronize a table
|
216
|
+
class BreakingInformationSchemaError < InformationSchemaError
|
217
|
+
include BreakingSyncError
|
218
|
+
def initialize(*args)
|
219
|
+
super
|
220
|
+
@error_content[:err_reason] = "Sync is broken. The table needs to be re-synchronized. (#{@error_content[:err_reason]})"
|
221
|
+
end
|
222
|
+
def err_code; ErrorCode::BREAKING_INFORMATION_SCHEMA_ERROR; end
|
223
|
+
end
|
224
|
+
|
225
|
+
|
209
226
|
# + BadValueError
|
210
227
|
# data_node_id, data_entry_id, chunk_identifier, table_name, err_code, err_reason, err_level, record_no, raw_record, colname, type, raw_value, modified_value
|
211
228
|
|
@@ -43,6 +43,26 @@ module FlydataCore
|
|
43
43
|
client.close rescue nil if client
|
44
44
|
end
|
45
45
|
end
|
46
|
+
|
47
|
+
# This command works only on RDS MySQL
|
48
|
+
RDS_CHECK_QUERY = "call mysql.rds_show_configuration;"
|
49
|
+
def rds?(hostname = @option[:host])
|
50
|
+
return true if hostname.match(/rds.amazonaws.com$/) != nil
|
51
|
+
|
52
|
+
# To distinguish RDS customers using Secure Tunnel
|
53
|
+
begin
|
54
|
+
exec_query(RDS_CHECK_QUERY)
|
55
|
+
return true
|
56
|
+
rescue Mysql2::Error => e
|
57
|
+
if e.message =~ /command denied to user/
|
58
|
+
return true
|
59
|
+
elsif e.message =~ /PROCEDURE mysql.rds_show_configuration does not exist/
|
60
|
+
return false
|
61
|
+
else
|
62
|
+
raise e
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
46
66
|
end
|
47
67
|
|
48
68
|
class SyncPermissionChecker < MysqlCompatibilityChecker
|
@@ -281,19 +301,30 @@ EOT
|
|
281
301
|
PK_CHECK_QUERY_TMPLT = <<EOT
|
282
302
|
SELECT
|
283
303
|
t.table_name,
|
284
|
-
SUM(IF(tc.constraint_type='PRIMARY KEY', 1, 0)) as num_pk
|
304
|
+
SUM(IF(tc.constraint_type='PRIMARY KEY' AND col.is_nullable='NO', 1, 0)) as num_pk,
|
305
|
+
SUM(IF(tc.constraint_type='UNIQUE' AND col.is_nullable='NO', 1, 0)) as num_uk
|
285
306
|
FROM
|
286
307
|
(select * from information_schema.tables where table_schema = '%{database}' AND table_name in (%{table_names})) t
|
287
308
|
LEFT OUTER JOIN
|
288
309
|
(select * from information_schema.table_constraints where table_schema = '%{database}' AND table_name in (%{table_names})) tc
|
289
310
|
USING (table_schema, table_name)
|
311
|
+
LEFT JOIN
|
312
|
+
(select * from information_schema.key_column_usage where constraint_schema = '%{database}') kc
|
313
|
+
USING (table_schema, table_name, constraint_name)
|
314
|
+
LEFT JOIN
|
315
|
+
(select * from information_schema.columns where table_schema = '%{database}' AND table_name in (%{table_names})) col
|
316
|
+
ON t.table_schema = col.table_schema AND t.table_name = col.table_name AND kc.column_name = col.column_name
|
290
317
|
GROUP BY
|
291
318
|
t.table_schema, t.table_name
|
292
319
|
HAVING
|
293
|
-
num_pk = 0;
|
320
|
+
num_pk = 0 and num_uk = 0;
|
294
321
|
EOT
|
322
|
+
|
295
323
|
def create_query(option = @option)
|
296
|
-
|
324
|
+
pk_override = option[:pk_override] || {}
|
325
|
+
tables = option[:tables].select{|tbl| pk_override[tbl.to_s].nil? } # exclude tables having pk_override
|
326
|
+
return nil if tables.empty?
|
327
|
+
PK_CHECK_QUERY_TMPLT % {database: option[:database], table_names: tables.collect{|tn| "'#{tn}'"}.join(',')}
|
297
328
|
end
|
298
329
|
|
299
330
|
def check_result(result, option = @option)
|
@@ -5,9 +5,79 @@ module FlydataCore
|
|
5
5
|
module Mysql
|
6
6
|
describe CompatibilityChecker do
|
7
7
|
let(:database) { 'test_db' }
|
8
|
+
let(:mysql_host) { hostname + '.' + domain_name }
|
9
|
+
let(:hostname) { 'test-hostname' }
|
10
|
+
let(:domain_name) { 'flydata.com' }
|
8
11
|
let(:tables) { %w(table_1 table_2 table_3) }
|
9
12
|
let(:option) do
|
10
|
-
{ database: database,
|
13
|
+
{ database: database,
|
14
|
+
host: mysql_host,
|
15
|
+
tables: tables
|
16
|
+
}
|
17
|
+
end
|
18
|
+
|
19
|
+
describe MysqlCompatibilityChecker do
|
20
|
+
let(:subject_object) { described_class.new(option) }
|
21
|
+
|
22
|
+
describe '#rds?' do
|
23
|
+
subject { subject_object.rds? }
|
24
|
+
|
25
|
+
context 'When called for non-rds host' do
|
26
|
+
let(:mysql2_error_msg) { "PROCEDURE mysql.rds_show_configuration does not exist" }
|
27
|
+
before do
|
28
|
+
allow(subject_object).to receive(:exec_query).with(MysqlCompatibilityChecker::RDS_CHECK_QUERY).and_raise(Mysql2::Error, mysql2_error_msg)
|
29
|
+
end
|
30
|
+
|
31
|
+
context 'When called for on-premise host' do
|
32
|
+
let(:hostname) { 'test-hostname' }
|
33
|
+
let(:domain_name) { 'flydata.com' }
|
34
|
+
it { is_expected.to be_falsy }
|
35
|
+
end
|
36
|
+
|
37
|
+
context 'When called for ec2 host' do
|
38
|
+
let(:hostname) { 'ec2-12-345-678-90.us-west-1' }
|
39
|
+
let(:domain_name) { 'compute.amazonaws.com' }
|
40
|
+
it { is_expected.to be_falsy }
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
context 'When called for RDS host' do
|
45
|
+
let(:hostname) { 'test-read-replica.stuvwxyz.us-west-1' }
|
46
|
+
let(:domain_name) { 'rds.amazonaws.com' }
|
47
|
+
it { is_expected.to be_truthy }
|
48
|
+
end
|
49
|
+
|
50
|
+
context 'when called for secure tunnel user' do
|
51
|
+
let(:hostname) { 'mysql.dse-9999-zzz8xy765' }
|
52
|
+
let(:domain_name) { 'flydata.com' }
|
53
|
+
|
54
|
+
context 'When called for non-rds host' do
|
55
|
+
let(:mysql2_error_msg) { "PROCEDURE mysql.rds_show_configuration does not exist" }
|
56
|
+
it do
|
57
|
+
expect(subject_object).to receive(:exec_query).with(MysqlCompatibilityChecker::RDS_CHECK_QUERY).and_raise(Mysql2::Error, mysql2_error_msg)
|
58
|
+
is_expected.to be_falsy
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
context 'When called for RDS host' do
|
63
|
+
let(:result) { double 'rds query result' }
|
64
|
+
it do
|
65
|
+
expect(subject_object).to receive(:exec_query).with(MysqlCompatibilityChecker::RDS_CHECK_QUERY).and_return(result)
|
66
|
+
is_expected.to be_truthy
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
context 'When unexpected error returned' do
|
72
|
+
let(:mysql2_error_msg) { "Unknown MySQL server host '#{mysql_host}'" }
|
73
|
+
before do
|
74
|
+
allow(subject_object).to receive(:exec_query).with(MysqlCompatibilityChecker::RDS_CHECK_QUERY).and_raise(Mysql2::Error, mysql2_error_msg)
|
75
|
+
end
|
76
|
+
it do
|
77
|
+
expect { subject }.to raise_error(Mysql2::Error, mysql2_error_msg)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
11
81
|
end
|
12
82
|
|
13
83
|
describe TableExistenceChecker do
|
@@ -64,18 +134,65 @@ EOT
|
|
64
134
|
it { is_expected.to eq <<EOT
|
65
135
|
SELECT
|
66
136
|
t.table_name,
|
67
|
-
SUM(IF(tc.constraint_type='PRIMARY KEY', 1, 0)) as num_pk
|
137
|
+
SUM(IF(tc.constraint_type='PRIMARY KEY' AND col.is_nullable='NO', 1, 0)) as num_pk,
|
138
|
+
SUM(IF(tc.constraint_type='UNIQUE' AND col.is_nullable='NO', 1, 0)) as num_uk
|
68
139
|
FROM
|
69
140
|
(select * from information_schema.tables where table_schema = 'test_db' AND table_name in ('table_1','table_2','table_3')) t
|
70
141
|
LEFT OUTER JOIN
|
71
142
|
(select * from information_schema.table_constraints where table_schema = 'test_db' AND table_name in ('table_1','table_2','table_3')) tc
|
72
143
|
USING (table_schema, table_name)
|
144
|
+
LEFT JOIN
|
145
|
+
(select * from information_schema.key_column_usage where constraint_schema = 'test_db') kc
|
146
|
+
USING (table_schema, table_name, constraint_name)
|
147
|
+
LEFT JOIN
|
148
|
+
(select * from information_schema.columns where table_schema = 'test_db' AND table_name in ('table_1','table_2','table_3')) col
|
149
|
+
ON t.table_schema = col.table_schema AND t.table_name = col.table_name AND kc.column_name = col.column_name
|
73
150
|
GROUP BY
|
74
151
|
t.table_schema, t.table_name
|
75
152
|
HAVING
|
76
|
-
num_pk = 0;
|
153
|
+
num_pk = 0 and num_uk = 0;
|
77
154
|
EOT
|
78
155
|
}
|
156
|
+
|
157
|
+
context 'when pk_override is set' do
|
158
|
+
before do
|
159
|
+
option[:pk_override] = {
|
160
|
+
'table_2' => ['id']
|
161
|
+
}
|
162
|
+
end
|
163
|
+
it { is_expected.to eq <<EOT
|
164
|
+
SELECT
|
165
|
+
t.table_name,
|
166
|
+
SUM(IF(tc.constraint_type='PRIMARY KEY' AND col.is_nullable='NO', 1, 0)) as num_pk,
|
167
|
+
SUM(IF(tc.constraint_type='UNIQUE' AND col.is_nullable='NO', 1, 0)) as num_uk
|
168
|
+
FROM
|
169
|
+
(select * from information_schema.tables where table_schema = 'test_db' AND table_name in ('table_1','table_3')) t
|
170
|
+
LEFT OUTER JOIN
|
171
|
+
(select * from information_schema.table_constraints where table_schema = 'test_db' AND table_name in ('table_1','table_3')) tc
|
172
|
+
USING (table_schema, table_name)
|
173
|
+
LEFT JOIN
|
174
|
+
(select * from information_schema.key_column_usage where constraint_schema = 'test_db') kc
|
175
|
+
USING (table_schema, table_name, constraint_name)
|
176
|
+
LEFT JOIN
|
177
|
+
(select * from information_schema.columns where table_schema = 'test_db' AND table_name in ('table_1','table_3')) col
|
178
|
+
ON t.table_schema = col.table_schema AND t.table_name = col.table_name AND kc.column_name = col.column_name
|
179
|
+
GROUP BY
|
180
|
+
t.table_schema, t.table_name
|
181
|
+
HAVING
|
182
|
+
num_pk = 0 and num_uk = 0;
|
183
|
+
EOT
|
184
|
+
}
|
185
|
+
end
|
186
|
+
|
187
|
+
context 'when no tables need to be validated' do
|
188
|
+
let(:tables) { %w(table_2) }
|
189
|
+
before do
|
190
|
+
option[:pk_override] = {
|
191
|
+
'table_2' => ['id']
|
192
|
+
}
|
193
|
+
end
|
194
|
+
it { is_expected.to be_nil }
|
195
|
+
end
|
79
196
|
end
|
80
197
|
|
81
198
|
describe '#check_reesult' do
|
data/flydata.gemspec
CHANGED
Binary file
|
data/lib/flydata/command/sync.rb
CHANGED
@@ -74,7 +74,7 @@ module Flydata
|
|
74
74
|
# Public method
|
75
75
|
# - Called from Sender#start/restart
|
76
76
|
def try_initial_sync(options)
|
77
|
-
handle_initial_sync(
|
77
|
+
handle_initial_sync(options) if source.sync.supported?
|
78
78
|
rescue Source::UnsupportedSourceError
|
79
79
|
return
|
80
80
|
end
|
@@ -123,21 +123,10 @@ EOS
|
|
123
123
|
# Command: flydata sync:reset
|
124
124
|
# - Entry method
|
125
125
|
def reset(*tables)
|
126
|
-
|
127
|
-
if tables.empty? && !opts[:all] && !opts[:init]
|
128
|
-
log_info_stdout <<EOS
|
129
|
-
ERROR! Argument or option must be specified.
|
130
|
-
|
131
|
-
To reset all tables: flydata sync:reset --all
|
132
|
-
To reset specific tables: flydata sync:reset table1 table2 ...
|
133
|
-
To reset unfinished initial sync: flydata sync:reset --init
|
134
|
-
EOS
|
135
|
-
return
|
136
|
-
end
|
137
|
-
|
138
126
|
# Set instance variables
|
139
127
|
reset_init = opts[:init]
|
140
128
|
if opts[:all]
|
129
|
+
# Reset all tables regardless of sync_resumed
|
141
130
|
tables = []
|
142
131
|
reset_init = false
|
143
132
|
end
|
@@ -148,7 +137,30 @@ EOS
|
|
148
137
|
|
149
138
|
return if !sync_resumed && reset_init
|
150
139
|
|
151
|
-
|
140
|
+
# Suggest to add option/arguments
|
141
|
+
if tables.empty? && !opts[:all] && !opts[:init]
|
142
|
+
if sync_resumed
|
143
|
+
log_info_stdout <<EOS
|
144
|
+
ERROR! Argument or option must be specified.
|
145
|
+
|
146
|
+
To reset unfinished initial sync, run the following command:
|
147
|
+
|
148
|
+
flydata sync:reset --init
|
149
|
+
|
150
|
+
EOS
|
151
|
+
else
|
152
|
+
log_info_stdout <<EOS
|
153
|
+
ERROR! Argument or option must be specified.
|
154
|
+
|
155
|
+
To reset all tables: flydata sync:reset --all
|
156
|
+
To reset specific tables: flydata sync:reset table1 table2 ...
|
157
|
+
|
158
|
+
EOS
|
159
|
+
end
|
160
|
+
return
|
161
|
+
end
|
162
|
+
|
163
|
+
# Suggest to use --init
|
152
164
|
if sync_resumed && !tables.empty?
|
153
165
|
log_info_stdout <<EOS
|
154
166
|
ERROR! You cannot reset tables because the previous initial sync has not been completed. Reset the unfinished initial sync first with the following command:
|
@@ -159,17 +171,18 @@ EOS
|
|
159
171
|
return
|
160
172
|
end
|
161
173
|
|
174
|
+
# Suggest to use --force
|
162
175
|
if target_append_only_tables.size > 0 && !opts[:force]
|
163
176
|
log_info_stdout <<EOS
|
164
177
|
ERROR! Reset failed because it includes append only table(s). Sync can no longer continue if you reset an append only table.
|
165
178
|
|
166
179
|
Append only table(s): #{target_append_only_tables.join(", ")}
|
167
180
|
|
168
|
-
If you really want to reset append-only tables,
|
181
|
+
If you really want to reset append-only tables, run the command with '--force' option.
|
169
182
|
|
170
183
|
EOS
|
171
184
|
|
172
|
-
log_info_stdout <<EOS
|
185
|
+
log_info_stdout <<EOS if !target_full_sync_tables.empty? && !sync_resumed
|
173
186
|
To reset all tables except for append only tables, run the following command.
|
174
187
|
|
175
188
|
flydata sync:reset #{target_full_sync_tables.join(" ")}
|
@@ -178,11 +191,23 @@ EOS
|
|
178
191
|
return
|
179
192
|
end
|
180
193
|
|
181
|
-
# Flush client buffer
|
182
194
|
msg_tables = @input_tables.empty? ? '' : " for these tables : #{@input_tables.join(" ")}"
|
183
195
|
msg_sync_type = sync_resumed ? "the current initial sync" : "the current sync"
|
184
196
|
show_purpose_name
|
185
197
|
return unless ask_yes_no("This resets #{msg_sync_type}#{msg_tables}. Are you sure?")
|
198
|
+
|
199
|
+
recover_cmd = "flydata sync:reset #{tables.empty? ? '' : tables.join(" ")}"
|
200
|
+
|
201
|
+
_reset(recover_cmd, reset_client_only: opts.client?, delete_tbl_ddl: true)
|
202
|
+
|
203
|
+
end
|
204
|
+
run_exclusive :reset
|
205
|
+
|
206
|
+
def _reset(recover_cmd, options)
|
207
|
+
reset_client_only = options[:reset_client_only]
|
208
|
+
delete_tbl_ddl = options[:delete_tbl_ddl]
|
209
|
+
|
210
|
+
# Flush client buffer
|
186
211
|
sender = Flydata::Command::Sender.new
|
187
212
|
sender.flush_client_buffer # TODO We should rather delete buffer files
|
188
213
|
sender.stop
|
@@ -193,9 +218,9 @@ EOS
|
|
193
218
|
ee = ServerDataProcessingTimeout.new("Delayed Data Processing")
|
194
219
|
ee.description = <<EOS
|
195
220
|
Data processing is taking more than expected. Please contact support@flydata.com to check the system status.
|
196
|
-
Once checked, you can continue
|
221
|
+
Once checked, you can continue your operation with the following command
|
197
222
|
|
198
|
-
|
223
|
+
#{recover_cmd}
|
199
224
|
|
200
225
|
EOS
|
201
226
|
ee.set_backtrace e.backtrace
|
@@ -204,21 +229,23 @@ EOS
|
|
204
229
|
|
205
230
|
# Cleanup tables on server
|
206
231
|
de = data_entry
|
207
|
-
cleanup_sync_server(de, @input_tables) unless
|
232
|
+
cleanup_sync_server(de, @input_tables) unless reset_client_only
|
208
233
|
sync_fm = create_sync_file_manager(de)
|
209
234
|
|
210
235
|
# Delete local files
|
211
236
|
sync_fm.delete_dump_files
|
212
|
-
sync_fm.
|
237
|
+
sync_fm.delete_table_position_files(*@input_tables)
|
238
|
+
sync_fm.delete_table_rev_files(*@input_tables)
|
239
|
+
sync_fm.delete_table_ddl_files(*@input_tables) if delete_tbl_ddl
|
213
240
|
|
214
241
|
new_tables_after_reset = @unsynced_tables + @input_tables
|
215
242
|
if @input_tables.empty? or @full_tables.empty? or @full_tables.all?{|ft| new_tables_after_reset.include?(ft)}
|
216
243
|
sync_fm.delete_master_position_files
|
217
244
|
end
|
218
245
|
sync_fm.close
|
246
|
+
|
219
247
|
log_info_stdout("Reset completed successfully.")
|
220
248
|
end
|
221
|
-
run_exclusive :reset
|
222
249
|
|
223
250
|
# Depricated Command: flydata sync:skip
|
224
251
|
# skip initial sync
|
@@ -554,13 +581,13 @@ EOS
|
|
554
581
|
|
555
582
|
# Initial sync
|
556
583
|
|
557
|
-
def handle_initial_sync(
|
584
|
+
def handle_initial_sync(options = {})
|
558
585
|
unless Flydata::Command::Sender.new.wait_until_server_ready
|
559
586
|
raise Timeout::Error, "Timed out to wait for the server side to become active. Pleae try again later."
|
560
587
|
end
|
561
588
|
|
562
589
|
# Setup instance variables
|
563
|
-
sync_resumed = set_current_tables(
|
590
|
+
sync_resumed = set_current_tables(nil, resume: true)
|
564
591
|
|
565
592
|
if sync_resumed
|
566
593
|
# skip confirmation prompts and resume sync right away.
|
@@ -595,7 +622,22 @@ EOS
|
|
595
622
|
def initial_sync(opt)
|
596
623
|
de = data_entry
|
597
624
|
# Load sync information from file
|
598
|
-
validate_initial_sync_status
|
625
|
+
aborted_during_dump = validate_initial_sync_status
|
626
|
+
if aborted_during_dump
|
627
|
+
# The previous init-sync failed during dump.
|
628
|
+
# - Discard remainng continuous-sync data by automatically running `sync:reset --init`
|
629
|
+
# and run init-sync from scratch.
|
630
|
+
# - Will keep ./positions/*.generated_ddl files.
|
631
|
+
# No need to re-run sync:generate_table_ddl to drop tables on Redshift because
|
632
|
+
# no data was written to Redshift during the previous init-sync.
|
633
|
+
$log.info "Resetting the initial sync..."
|
634
|
+
recover_cmd = "flydata restart"
|
635
|
+
_reset(recover_cmd, reset_client_only: false, delete_tbl_ddl: false)
|
636
|
+
|
637
|
+
# Setup instance variables again
|
638
|
+
sync_resumed = set_current_tables(nil, resume: true)
|
639
|
+
end
|
640
|
+
|
599
641
|
begin
|
600
642
|
if opt[:sync_resumed]
|
601
643
|
# parallel cont sync has sent buffer data by now so server buffer
|
@@ -732,12 +774,7 @@ EOM
|
|
732
774
|
#Catch all exceptions including SystemExit and Interrupt.
|
733
775
|
log_info_stdout "Quit while running dump, deleting dump file..."
|
734
776
|
sync_fm.delete_dump_file
|
735
|
-
log_info_stdout "Dump file deleted. To restart the FlyData Agent,
|
736
|
-
if (target_tables.nil? or target_tables.empty?)
|
737
|
-
log_info_stdout "To do full reset sync use command: flydata sync:reset"
|
738
|
-
else
|
739
|
-
log_info_stdout "To reset these tables, use command: flydata sync:reset #{target_tables.join(' ')}"
|
740
|
-
end
|
777
|
+
log_info_stdout "Dump file deleted. To restart the FlyData Agent, run the following command: flydata restart"
|
741
778
|
raise e
|
742
779
|
end
|
743
780
|
call_block_or_return_io(fp, source_pos, &dump_ready_callback)
|
@@ -1294,9 +1331,10 @@ Thank you for using FlyData!
|
|
1294
1331
|
|
1295
1332
|
@input_tables = sync_resumed ? sync_info[:tables] : input_tables
|
1296
1333
|
@input_tables ||= []
|
1334
|
+
|
1335
|
+
# true if all tables in data entry hasn't init-synced
|
1297
1336
|
@full_initial_sync = sync_resumed ? sync_info[:initial_sync] :
|
1298
1337
|
(@unsynced_tables == @full_tables)
|
1299
|
-
|
1300
1338
|
sync_fm.close
|
1301
1339
|
|
1302
1340
|
verify_input_tables(@input_tables, @full_tables)
|
@@ -1307,13 +1345,17 @@ Thank you for using FlyData!
|
|
1307
1345
|
def validate_initial_sync_status
|
1308
1346
|
sync_fm = create_sync_file_manager
|
1309
1347
|
dump_pos_info = sync_fm.load_dump_pos
|
1310
|
-
|
1348
|
+
sync_info_exists = !!sync_fm.load_sync_info
|
1349
|
+
dump_file_deleted = !File.exists?(sync_fm.dump_file_path)
|
1311
1350
|
sync_fm.close
|
1312
1351
|
|
1313
1352
|
# status is parsing but dumpfile doesn't exist due to streaming -> raise error
|
1314
|
-
if dump_pos_info[:status] == STATUS_PARSING &&
|
1353
|
+
if dump_pos_info[:status] == STATUS_PARSING && dump_file_deleted
|
1315
1354
|
raise "FlyData Sync was interrupted with invalid state. Run 'flydata sync:reset#{@input_tables.join(',')}' first."
|
1316
1355
|
end
|
1356
|
+
|
1357
|
+
# check if the previous initial sync was aborted during dump.
|
1358
|
+
sync_info_exists && dump_file_deleted
|
1317
1359
|
end
|
1318
1360
|
|
1319
1361
|
def target_tables
|
@@ -17,6 +17,7 @@ module SourceMysql
|
|
17
17
|
@dump_dir = options[:dump_dir] || nil
|
18
18
|
@backup_dir = options[:backup_dir] || nil
|
19
19
|
@tables = de_hash['tables']
|
20
|
+
@rds = FlydataCore::Mysql::MysqlCompatibilityChecker.new(@db_opts).rds?
|
20
21
|
end
|
21
22
|
|
22
23
|
def print_errors
|
@@ -99,8 +100,8 @@ module SourceMysql
|
|
99
100
|
end
|
100
101
|
end
|
101
102
|
|
102
|
-
def is_rds?
|
103
|
-
|
103
|
+
def is_rds?
|
104
|
+
@rds
|
104
105
|
end
|
105
106
|
end
|
106
107
|
|
@@ -1,4 +1,6 @@
|
|
1
1
|
require 'flydata/source_mysql/plugin_support/alter_table_query_handler'
|
2
|
+
require 'flydata/source_mysql/plugin_support/create_table_query_handler'
|
3
|
+
require 'flydata/source_mysql/plugin_support/drop_table_query_handler'
|
2
4
|
require 'flydata/source_mysql/plugin_support/truncate_table_query_handler'
|
3
5
|
require 'flydata/source_mysql/plugin_support/drop_database_query_handler'
|
4
6
|
require 'flydata/source_mysql/plugin_support/unsupported_query_handler'
|
@@ -63,6 +65,8 @@ module PluginSupport
|
|
63
65
|
@handlers = [
|
64
66
|
AlterTableQueryHandler.new(context),
|
65
67
|
TruncateTableQueryHandler.new(context),
|
68
|
+
CreateTableQueryHandler.new(context),
|
69
|
+
DropTableQueryHandler.new(context),
|
66
70
|
DropDatabaseQueryHandler.new(context),
|
67
71
|
UnsupportedQueryHandler.new(context), # This must be a last element
|
68
72
|
]
|
@@ -0,0 +1,28 @@
|
|
1
|
+
require 'flydata/source_mysql/plugin_support/ddl_query_handler'
|
2
|
+
|
3
|
+
module Flydata
|
4
|
+
module SourceMysql
|
5
|
+
|
6
|
+
module PluginSupport
|
7
|
+
class CreateTableQueryHandler < TableDdlQueryHandler
|
8
|
+
PATTERN = /^CREATE TABLE/i
|
9
|
+
|
10
|
+
def initialize(context)
|
11
|
+
super
|
12
|
+
end
|
13
|
+
|
14
|
+
def pattern
|
15
|
+
PATTERN
|
16
|
+
end
|
17
|
+
|
18
|
+
def process(record)
|
19
|
+
if acceptable_db?(record)
|
20
|
+
$log.error "CREATE TABLE detected. Please reset and resync the following table:'#{table_info(record)[:table_name]}'. - query:'#{record["query"]}' binlog_pos:'#{binlog_pos(record)}'"
|
21
|
+
end
|
22
|
+
#NOTE: No emit_record here because this record should not be sent to data servers for now
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
end
|
28
|
+
end
|
@@ -0,0 +1,28 @@
|
|
1
|
+
require 'flydata/source_mysql/plugin_support/ddl_query_handler'
|
2
|
+
|
3
|
+
module Flydata
|
4
|
+
module SourceMysql
|
5
|
+
|
6
|
+
module PluginSupport
|
7
|
+
class DropTableQueryHandler < TableDdlQueryHandler
|
8
|
+
PATTERN = /^DROP TABLE/i
|
9
|
+
|
10
|
+
def initialize(context)
|
11
|
+
super
|
12
|
+
end
|
13
|
+
|
14
|
+
def pattern
|
15
|
+
PATTERN
|
16
|
+
end
|
17
|
+
|
18
|
+
def process(record)
|
19
|
+
if acceptable_db?(record)
|
20
|
+
$log.error "DROP TABLE detected for table:'#{table_info(record)[:table_name]}'. - query:'#{record["query"]}' binlog_pos:'#{binlog_pos(record)}'"
|
21
|
+
end
|
22
|
+
#NOTE: No emit_record here because this record should not be sent to data servers for now
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
end
|
28
|
+
end
|
@@ -60,9 +60,13 @@ FROM
|
|
60
60
|
JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY(i.indkey)
|
61
61
|
RIGHT JOIN
|
62
62
|
(SELECT
|
63
|
-
(
|
63
|
+
(table_schema ||'.'|| table_name)::regclass AS regid,
|
64
64
|
*
|
65
65
|
FROM information_schema.columns
|
66
|
+
WHERE
|
67
|
+
table_catalog = '%{database}'
|
68
|
+
AND table_schema IN (%{schema})
|
69
|
+
AND table_name IN (%{tables})
|
66
70
|
) c
|
67
71
|
ON i.indrelid = c.regid AND a.attname = c.column_name
|
68
72
|
WHERE
|
@@ -324,19 +324,31 @@ module Flydata
|
|
324
324
|
new_rev
|
325
325
|
end
|
326
326
|
|
327
|
-
def
|
327
|
+
def delete_table_position_files(*tables)
|
328
328
|
files_to_delete = [
|
329
329
|
table_position_file_paths(*tables),
|
330
330
|
table_source_pos_paths(*tables),
|
331
331
|
table_source_pos_init_paths(*tables),
|
332
|
-
table_rev_file_paths(*tables),
|
333
|
-
table_ddl_file_paths(*tables)
|
334
332
|
]
|
335
333
|
files_to_delete.flatten.each do |path|
|
336
334
|
FileUtils.rm(path) if File.exists?(path)
|
337
335
|
end
|
338
336
|
end
|
339
337
|
|
338
|
+
def delete_table_rev_files(*tables)
|
339
|
+
files_to_delete = table_rev_file_paths(*tables)
|
340
|
+
files_to_delete.each do |path|
|
341
|
+
FileUtils.rm(path) if File.exists?(path)
|
342
|
+
end
|
343
|
+
end
|
344
|
+
|
345
|
+
def delete_table_ddl_files(*tables)
|
346
|
+
files_to_delete = table_ddl_file_paths(*tables)
|
347
|
+
files_to_delete.each do |path|
|
348
|
+
FileUtils.rm(path) if File.exists?(path)
|
349
|
+
end
|
350
|
+
end
|
351
|
+
|
340
352
|
def tables_from_positions_dir
|
341
353
|
all_table_control_files = Dir.glob(File.join(table_positions_dir_path, '*.{pos,generated_ddl,init,rev}'))
|
342
354
|
tables = Set.new
|
@@ -177,8 +177,8 @@ describe DataEntry do
|
|
177
177
|
end
|
178
178
|
context "with more than 100 tables" do
|
179
179
|
let(:tables) { 180.times.collect{|i| "tbl#{i}"} }
|
180
|
-
let(:expected_tables1) { tables[0...
|
181
|
-
let(:expected_tables2) { tables[
|
180
|
+
let(:expected_tables1) { tables[0...110].join(",") }
|
181
|
+
let(:expected_tables2) { tables[110..-1].join(",") }
|
182
182
|
|
183
183
|
let(:expected_message) { double('expected_message') }
|
184
184
|
|
@@ -320,8 +320,8 @@ describe DataEntry do
|
|
320
320
|
|
321
321
|
context "with more than 100 tables" do
|
322
322
|
let(:tables) { 180.times.collect {|i| "tbl#{i}"} }
|
323
|
-
let(:expected_tables1) { tables[0...
|
324
|
-
let(:expected_tables2) { tables[
|
323
|
+
let(:expected_tables1) { tables[0...110].join(",") }
|
324
|
+
let(:expected_tables2) { tables[110..-1].join(",") }
|
325
325
|
|
326
326
|
it do
|
327
327
|
expect(api_client).to receive(:post).
|
@@ -33,7 +33,8 @@ module Flydata
|
|
33
33
|
"data_port_key"=>"a458c641",
|
34
34
|
"mysql_data_entry_preference" =>
|
35
35
|
{ "host"=>"localhost", "port"=>3306, "username"=>"masashi",
|
36
|
-
"password"=>"welcome", "database"=>"sync_test",
|
36
|
+
"password"=>"welcome", "database"=>"sync_test",
|
37
|
+
"tables"=>["table1", "table2", "table4"],
|
37
38
|
"invalid_tables"=>["table3"],
|
38
39
|
"new_tables"=>["table4"],
|
39
40
|
"dump_dir"=>default_dump_dir, "forwarder" => "tcpforwarder",
|
@@ -419,6 +420,112 @@ module Flydata
|
|
419
420
|
end
|
420
421
|
end
|
421
422
|
end
|
423
|
+
|
424
|
+
describe '#reset' do
|
425
|
+
let(:subject_object) { described_class.new(reset_opts) }
|
426
|
+
subject { subject_object.reset(*arg_tables) }
|
427
|
+
|
428
|
+
let(:command_options) { ["-y"] }
|
429
|
+
let(:reset_opts) { Sync.slop_reset }
|
430
|
+
let(:timeout_value) { Sync::SERVER_DATA_PROCESSING_TIMEOUT } #3600
|
431
|
+
|
432
|
+
let(:test_data_entry) { default_data_entry }
|
433
|
+
let(:valid_tables_append_only) { nil }
|
434
|
+
let(:invalid_tables_append_only) { nil }
|
435
|
+
let(:full_tables) { ["table1", "table2"] } #not include invalid_tables and real_new_tables
|
436
|
+
let(:new_tables) { ["table4"] }
|
437
|
+
|
438
|
+
before do
|
439
|
+
command_options.each {|opt_str| reset_opts.parse!([opt_str]) }
|
440
|
+
allow(subject_object).to receive(:data_entry).and_return(test_data_entry)
|
441
|
+
allow(subject_object).to receive(:check_server_status).and_return( { "complete" => true, "state" => "complete" } )
|
442
|
+
allow_any_instance_of(Api::DataEntry).to receive(:cleanup_sync)
|
443
|
+
|
444
|
+
allow_any_instance_of(SyncFileManager).to receive(:load_sync_info).and_return(sync_info)
|
445
|
+
allow_any_instance_of(SyncFileManager).to receive(:get_new_table_list).with(full_tables, "generated_ddl").and_return(full_tables)
|
446
|
+
allow_any_instance_of(SyncFileManager).to receive(:get_new_table_list).with(new_tables, "pos").and_return(new_tables)
|
447
|
+
allow_any_instance_of(SyncFileManager).to receive(:get_new_table_list).with(full_tables, "pos").and_return([])
|
448
|
+
end
|
449
|
+
|
450
|
+
context 'when called with --all' do
|
451
|
+
let(:arg_tables) { [] }
|
452
|
+
let(:command_options) { ["-y", "--all"] }
|
453
|
+
let(:sync_info) { nil }
|
454
|
+
it do
|
455
|
+
expect(subject_object).to receive(:set_current_tables).with([], resume: false).and_call_original
|
456
|
+
expect(subject_object).to receive(:wait_for_server_buffer).with( {:timeout=>timeout_value, :tables=>arg_tables} )
|
457
|
+
expect(subject_object).to receive(:cleanup_sync_server).with( default_data_entry, arg_tables )
|
458
|
+
subject
|
459
|
+
end
|
460
|
+
end
|
461
|
+
|
462
|
+
context 'when called with --init' do
|
463
|
+
let(:arg_tables) { [] }
|
464
|
+
let(:command_options) { ["-y", "--init"] }
|
465
|
+
let(:sync_info) { {initial_sync: is_full_init_sync, tables: sync_info_tables} }
|
466
|
+
let(:full_tables) { ["table1", "table2", "table4"] }
|
467
|
+
let(:new_tables) { [] }
|
468
|
+
before do
|
469
|
+
# no new tables.
|
470
|
+
test_data_entry["mysql_data_entry_preference"]["new_tables"] = []
|
471
|
+
# continuous sync has started behind the aborted init-sync and all tables have .pos file
|
472
|
+
allow_any_instance_of(SyncFileManager).to receive(:load_sync_info).and_return(sync_info)
|
473
|
+
allow_any_instance_of(SyncFileManager).to receive(:get_new_table_list).with(full_tables, "generated_ddl").and_return([])
|
474
|
+
allow_any_instance_of(SyncFileManager).to receive(:get_new_table_list).with(new_tables, "pos").and_return([])
|
475
|
+
allow_any_instance_of(SyncFileManager).to receive(:get_new_table_list).with(full_tables, "pos").and_return([])
|
476
|
+
end
|
477
|
+
|
478
|
+
context 'when the aborted init-sync was for all tables in the application' do
|
479
|
+
let(:is_full_init_sync) { true }
|
480
|
+
let(:sync_info_tables) { ["table1", "table2", "table4"] }
|
481
|
+
let(:target_tables_for_api) { [] }
|
482
|
+
it do
|
483
|
+
expect(subject_object).to receive(:set_current_tables).with([], resume: true).and_call_original
|
484
|
+
expect(subject_object).to receive(:wait_for_server_buffer).with(
|
485
|
+
{:timeout=>timeout_value, :tables=>target_tables_for_api} )
|
486
|
+
expect(subject_object).to receive(:cleanup_sync_server).with(
|
487
|
+
default_data_entry, sync_info_tables )
|
488
|
+
subject
|
489
|
+
end
|
490
|
+
end
|
491
|
+
|
492
|
+
context 'when the aborted init-sync was for a part of tables in the application' do
|
493
|
+
let(:is_full_init_sync) { false }
|
494
|
+
let(:sync_info_tables) { ["table2"] }
|
495
|
+
let(:target_tables_for_api) { sync_info_tables }
|
496
|
+
it do
|
497
|
+
expect(subject_object).to receive(:set_current_tables).with([], resume: true).and_call_original
|
498
|
+
expect(subject_object).to receive(:wait_for_server_buffer).with(
|
499
|
+
{:timeout=>timeout_value, :tables=>target_tables_for_api} )
|
500
|
+
expect(subject_object).to receive(:cleanup_sync_server).with(
|
501
|
+
default_data_entry, sync_info_tables )
|
502
|
+
subject
|
503
|
+
end
|
504
|
+
end
|
505
|
+
end
|
506
|
+
|
507
|
+
context 'when called with a table as an argument' do
|
508
|
+
let(:arg_tables) { ["table2"] }
|
509
|
+
let(:command_options) { ["-y"] }
|
510
|
+
let(:sync_info) { nil }
|
511
|
+
let(:target_tables_for_api) { arg_tables }
|
512
|
+
before do
|
513
|
+
allow_any_instance_of(SyncFileManager).to receive(:get_new_table_list).with(
|
514
|
+
full_tables, "pos").and_return([])
|
515
|
+
allow_any_instance_of(SyncFileManager).to receive(:get_new_table_list).with(full_tables, "pos").and_return([])
|
516
|
+
allow_any_instance_of(SyncFileManager).to receive(:get_new_table_list).with(new_tables, "pos").and_return(new_tables)
|
517
|
+
allow_any_instance_of(SyncFileManager).to receive(:get_new_table_list).with(full_tables, "generated_ddl").and_return([])
|
518
|
+
end
|
519
|
+
it do
|
520
|
+
expect(subject_object).to receive(:set_current_tables).with(arg_tables, resume: true).and_call_original
|
521
|
+
expect(subject_object).to receive(:wait_for_server_buffer).with(
|
522
|
+
{:timeout=>timeout_value, :tables=>arg_tables} )
|
523
|
+
expect(subject_object).to receive(:cleanup_sync_server).with(
|
524
|
+
default_data_entry, arg_tables )
|
525
|
+
subject
|
526
|
+
end
|
527
|
+
end
|
528
|
+
end
|
422
529
|
end
|
423
530
|
end
|
424
531
|
end
|
@@ -59,6 +59,16 @@ EOT
|
|
59
59
|
allow(r).to receive(:pattern).and_return(Flydata::SourceMysql::PluginSupport::TruncateTableQueryHandler::PATTERN)
|
60
60
|
r
|
61
61
|
end
|
62
|
+
let(:create_table_query_handler) do
|
63
|
+
r = double('create_table_query_handler')
|
64
|
+
allow(r).to receive(:pattern).and_return(Flydata::SourceMysql::PluginSupport::CreateTableQueryHandler::PATTERN)
|
65
|
+
r
|
66
|
+
end
|
67
|
+
let(:drop_table_query_handler) do
|
68
|
+
r = double('drop_table_query_handler')
|
69
|
+
allow(r).to receive(:pattern).and_return(Flydata::SourceMysql::PluginSupport::DropTableQueryHandler::PATTERN)
|
70
|
+
r
|
71
|
+
end
|
62
72
|
let(:drop_database_query_handler) do
|
63
73
|
r = double('drop_database_query_handler')
|
64
74
|
allow(r).to receive(:pattern).and_return(Flydata::SourceMysql::PluginSupport::DropDatabaseQueryHandler::PATTERN)
|
@@ -83,6 +93,8 @@ EOT
|
|
83
93
|
before do
|
84
94
|
expect(AlterTableQueryHandler).to receive(:new).with(context).and_return(alter_query_handler)
|
85
95
|
expect(TruncateTableQueryHandler).to receive(:new).with(context).and_return(truncate_query_handler)
|
96
|
+
expect(CreateTableQueryHandler).to receive(:new).with(context).and_return(create_table_query_handler)
|
97
|
+
expect(DropTableQueryHandler).to receive(:new).with(context).and_return(drop_table_query_handler)
|
86
98
|
expect(DropDatabaseQueryHandler).to receive(:new).with(context).and_return(drop_database_query_handler)
|
87
99
|
allow(UnsupportedQueryHandler).to receive(:new).with(context).and_return(unsupported_query_handler)
|
88
100
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: flydata
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.7.
|
4
|
+
version: 0.7.9
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Koichi Fujikawa
|
@@ -12,7 +12,7 @@ authors:
|
|
12
12
|
autorequire:
|
13
13
|
bindir: bin
|
14
14
|
cert_chain: []
|
15
|
-
date: 2016-
|
15
|
+
date: 2016-08-05 00:00:00.000000000 Z
|
16
16
|
dependencies:
|
17
17
|
- !ruby/object:Gem::Dependency
|
18
18
|
name: rest-client
|
@@ -484,6 +484,7 @@ executables:
|
|
484
484
|
- fdredshift
|
485
485
|
- flydata
|
486
486
|
- serverinfo
|
487
|
+
- split_sync_ddl.rb
|
487
488
|
extensions:
|
488
489
|
- ext/flydata/json/extconf.rb
|
489
490
|
- ext/flydata/source_mysql/parser/extconf.rb
|
@@ -502,6 +503,7 @@ files:
|
|
502
503
|
- bin/fdredshift
|
503
504
|
- bin/flydata
|
504
505
|
- bin/serverinfo
|
506
|
+
- bin/split_sync_ddl.rb
|
505
507
|
- circle.yml
|
506
508
|
- ext/flydata/flydata.h
|
507
509
|
- ext/flydata/json/extconf.rb
|
@@ -732,9 +734,11 @@ files:
|
|
732
734
|
- lib/flydata/source_mysql/plugin_support/binlog_record_dispatcher.rb
|
733
735
|
- lib/flydata/source_mysql/plugin_support/binlog_record_handler.rb
|
734
736
|
- lib/flydata/source_mysql/plugin_support/context.rb
|
737
|
+
- lib/flydata/source_mysql/plugin_support/create_table_query_handler.rb
|
735
738
|
- lib/flydata/source_mysql/plugin_support/ddl_query_handler.rb
|
736
739
|
- lib/flydata/source_mysql/plugin_support/dml_record_handler.rb
|
737
740
|
- lib/flydata/source_mysql/plugin_support/drop_database_query_handler.rb
|
741
|
+
- lib/flydata/source_mysql/plugin_support/drop_table_query_handler.rb
|
738
742
|
- lib/flydata/source_mysql/plugin_support/source_position_file.rb
|
739
743
|
- lib/flydata/source_mysql/plugin_support/truncate_table_query_handler.rb
|
740
744
|
- lib/flydata/source_mysql/plugin_support/unsupported_query_handler.rb
|