flydata 0.3.5 → 0.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/VERSION +1 -1
- data/bin/flydata +1 -0
- data/flydata-core/lib/flydata-core/core_ext/module.rb +1 -1
- data/flydata-core/lib/flydata-core/core_ext/object.rb +1 -1
- data/flydata.gemspec +21 -5
- data/lib/flydata.rb +5 -8
- data/lib/flydata/api/data_entry.rb +2 -0
- data/lib/flydata/api/data_port.rb +2 -0
- data/lib/flydata/api/redshift_cluster.rb +2 -0
- data/lib/flydata/api_client.rb +3 -0
- data/lib/flydata/cli.rb +13 -2
- data/lib/flydata/command/base.rb +6 -0
- data/lib/flydata/command/conf.rb +3 -0
- data/lib/flydata/command/crontab.rb +3 -0
- data/lib/flydata/command/encrypt.rb +3 -0
- data/lib/flydata/command/kill_all.rb +3 -0
- data/lib/flydata/command/login.rb +2 -0
- data/lib/flydata/command/restart.rb +3 -0
- data/lib/flydata/command/routine.rb +3 -0
- data/lib/flydata/command/sender.rb +2 -0
- data/lib/flydata/command/setlogdel.rb +4 -1
- data/lib/flydata/command/setup.rb +7 -2
- data/lib/flydata/command/start.rb +3 -0
- data/lib/flydata/command/status.rb +3 -0
- data/lib/flydata/command/stop.rb +3 -0
- data/lib/flydata/command/sync.rb +10 -3
- data/lib/flydata/command/version.rb +2 -0
- data/lib/flydata/{command_logger.rb → command_loggable.rb} +0 -0
- data/lib/flydata/compatibility_check.rb +1 -1
- data/lib/flydata/credentials.rb +2 -0
- data/lib/flydata/fluent-plugins/in_mysql_binlog_flydata.rb +8 -9
- data/lib/flydata/fluent-plugins/mysql/alter_table_query_handler.rb +1 -1
- data/lib/flydata/fluent-plugins/mysql/binlog_query_dispatcher.rb +1 -1
- data/lib/flydata/fluent-plugins/mysql/binlog_query_handler.rb +1 -1
- data/lib/flydata/fluent-plugins/mysql/binlog_record_dispatcher.rb +2 -2
- data/lib/flydata/fluent-plugins/mysql/binlog_record_handler.rb +1 -1
- data/lib/flydata/fluent-plugins/mysql/ddl_query_handler.rb +1 -1
- data/lib/flydata/fluent-plugins/mysql/dml_record_handler.rb +1 -1
- data/lib/flydata/helpers.rb +0 -10
- data/lib/flydata/heroku.rb +3 -0
- data/lib/flydata/output/forwarder.rb +1 -1
- data/lib/flydata/parser/mysql/dump_parser.rb +29 -31
- data/lib/flydata/sync_file_manager.rb +230 -232
- data/spec/fly_data_model_spec.rb +1 -0
- data/spec/flydata/api/data_entry_spec.rb +1 -0
- data/spec/flydata/api_client_spec.rb +18 -0
- data/spec/flydata/cli_spec.rb +1 -0
- data/spec/flydata/command/base_spec.rb +44 -0
- data/spec/flydata/command/conf_spec.rb +21 -0
- data/spec/flydata/command/crontab_spec.rb +17 -0
- data/spec/flydata/command/encrypt_spec.rb +28 -0
- data/spec/flydata/command/kill_all_spec.rb +17 -0
- data/spec/flydata/command/login_spec.rb +21 -0
- data/spec/flydata/command/restart_spec.rb +17 -0
- data/spec/flydata/command/routine_spec.rb +29 -0
- data/spec/flydata/command/sender_spec.rb +7 -2
- data/spec/flydata/command/setlogdel_spec.rb +18 -0
- data/spec/flydata/command/setup_spec.rb +44 -0
- data/spec/flydata/command/start_spec.rb +17 -0
- data/spec/flydata/command/status_spec.rb +17 -0
- data/spec/flydata/command/stop_spec.rb +17 -0
- data/spec/flydata/command/sync_spec.rb +1 -0
- data/spec/flydata/command/version_spec.rb +14 -0
- data/spec/flydata/fluent-plugins/in_mysql_binlog_flydata_spec.rb +1 -1
- data/spec/flydata/parser/mysql/dump_parser_spec.rb +23 -73
- data/spec/flydata/sync_file_manager_spec.rb +150 -152
- metadata +19 -4
@@ -1,7 +1,7 @@
|
|
1
1
|
require 'fluent/plugin/in_mysql_binlog'
|
2
2
|
require 'binlog'
|
3
|
-
|
4
|
-
|
3
|
+
require 'flydata/fluent-plugins/mysql/dml_record_handler'
|
4
|
+
require 'flydata/fluent-plugins/mysql/binlog_query_dispatcher'
|
5
5
|
|
6
6
|
module Mysql
|
7
7
|
class BinlogRecordDispatcher
|
data/lib/flydata/helpers.rb
CHANGED
@@ -2,16 +2,6 @@ module Flydata
|
|
2
2
|
module Helpers
|
3
3
|
@@development_mode = nil
|
4
4
|
module_function
|
5
|
-
def parse_command(cmd)
|
6
|
-
klass = Flydata::Command::Base
|
7
|
-
method = cmd
|
8
|
-
if cmd.include?(':')
|
9
|
-
class_name, method = cmd.split(':')
|
10
|
-
klass = to_command_class(class_name)
|
11
|
-
end
|
12
|
-
[klass, method]
|
13
|
-
end
|
14
|
-
|
15
5
|
def usage_text
|
16
6
|
text = ""
|
17
7
|
|
data/lib/flydata/heroku.rb
CHANGED
@@ -12,7 +12,7 @@ module Flydata
|
|
12
12
|
|
13
13
|
def mysql_cli(conf = nil)
|
14
14
|
mysql_conf(conf) if conf
|
15
|
-
return
|
15
|
+
return FdMysqlClient.new(@mysql_conf) if @mysql_conf
|
16
16
|
nil
|
17
17
|
end
|
18
18
|
end
|
@@ -53,32 +53,6 @@ module Flydata
|
|
53
53
|
end
|
54
54
|
end
|
55
55
|
|
56
|
-
class MysqlDumpGeneratorMasterData < MysqlDumpGenerator
|
57
|
-
def dump(file_path)
|
58
|
-
cmd = "#{@dump_cmd} -r #{file_path}"
|
59
|
-
o, e, s = Open3.capture3(cmd)
|
60
|
-
e.to_s.each_line {|l| puts l unless /^Warning:/ =~ l } unless e.to_s.empty?
|
61
|
-
unless s.exitstatus == 0
|
62
|
-
if File.exists?(file_path)
|
63
|
-
File.open(file_path, 'r') {|f| f.each_line{|l| puts l}}
|
64
|
-
FileUtils.rm(file_path)
|
65
|
-
end
|
66
|
-
raise "Failed to run mysqldump command."
|
67
|
-
end
|
68
|
-
unless File.exists?(file_path)
|
69
|
-
raise "mysqldump file does not exist. Something wrong..."
|
70
|
-
end
|
71
|
-
if File.size(file_path) == 0
|
72
|
-
raise "mysqldump file is empty. Something wrong..."
|
73
|
-
end
|
74
|
-
true
|
75
|
-
end
|
76
|
-
|
77
|
-
def generate_dump_cmd(conf)
|
78
|
-
Util::MysqlUtil.generate_mysqldump_with_master_data_cmd(conf)
|
79
|
-
end
|
80
|
-
end
|
81
|
-
|
82
56
|
class MysqlDumpGeneratorNoMasterData < MysqlDumpGenerator
|
83
57
|
CHANGE_MASTER_TEMPLATE = <<EOS
|
84
58
|
--
|
@@ -100,7 +74,7 @@ EOS
|
|
100
74
|
|
101
75
|
begin
|
102
76
|
# create pipe for callback function
|
103
|
-
rd_io, wr_io = IO.pipe
|
77
|
+
rd_io, wr_io = IO.pipe("utf-8")
|
104
78
|
wr_io.sync = true
|
105
79
|
wr_io.set_encoding("utf-8")
|
106
80
|
rd_io.extend(DumpStreamIO)
|
@@ -119,7 +93,7 @@ EOS
|
|
119
93
|
threads << Thread.new do
|
120
94
|
begin
|
121
95
|
wr_io.print(first_line) # write a first line
|
122
|
-
|
96
|
+
filter_dump_stream(cmd_out, wr_io, binlog_file, binlog_pos)
|
123
97
|
ensure
|
124
98
|
wr_io.close rescue nil
|
125
99
|
end
|
@@ -187,7 +161,7 @@ EOS
|
|
187
161
|
|
188
162
|
def create_table_locker
|
189
163
|
Fiber.new do
|
190
|
-
client =
|
164
|
+
client = FdMysqlClient.new(@db_opts)
|
191
165
|
# Lock tables
|
192
166
|
client.query "FLUSH LOCAL TABLES;"
|
193
167
|
q = flush_tables_with_read_lock_query(client)
|
@@ -227,7 +201,7 @@ EOS
|
|
227
201
|
result.first['Value']
|
228
202
|
end
|
229
203
|
|
230
|
-
def
|
204
|
+
def filter_dump_stream(cmd_out, w_io, binlog_file, binlog_pos)
|
231
205
|
find_insert_pos = :not_started
|
232
206
|
cmd_out.each_line do |line|
|
233
207
|
if find_insert_pos == :not_started && /^-- Server version/ === line
|
@@ -248,6 +222,30 @@ EOS
|
|
248
222
|
|
249
223
|
end
|
250
224
|
|
225
|
+
# Custom mysql client that sets config params (eg:-read_timeout) uniformly for all
|
226
|
+
# mysql access. Also, gives access to the last query that was executed using the client
|
227
|
+
# which can be helpful when handling exceptions
|
228
|
+
class FdMysqlClient < Mysql2::Client
|
229
|
+
|
230
|
+
attr_accessor :last_query
|
231
|
+
|
232
|
+
def initialize(db_opts)
|
233
|
+
super(db_opts.merge(read_timeout: 600))
|
234
|
+
end
|
235
|
+
|
236
|
+
def query(sql, options = {})
|
237
|
+
@last_query = sql
|
238
|
+
begin
|
239
|
+
super(sql, options)
|
240
|
+
rescue Mysql2::Error => e
|
241
|
+
if /^Timeout waiting for a response/ === e.to_s
|
242
|
+
raise "The below query timed out when running. Please check long running processes and locks in your database.\n#{last_query}"
|
243
|
+
end
|
244
|
+
raise e
|
245
|
+
end
|
246
|
+
end
|
247
|
+
end
|
248
|
+
|
251
249
|
class MysqlDumpParser
|
252
250
|
|
253
251
|
module State
|
@@ -1,290 +1,288 @@
|
|
1
1
|
module Flydata
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
end
|
2
|
+
class SyncFileManager
|
3
|
+
DUMP_DIR = ENV['FLYDATA_DUMP'] || File.join(FLYDATA_HOME, 'dump')
|
4
|
+
BACKUP_DIR = ENV['FLYDATA_BACKUP'] || File.join(FLYDATA_HOME, 'backup')
|
5
|
+
TABLE_POSITIONS_DIR = ENV['FLYDATA_TABLE_POSITIONS'] || File.join(FLYDATA_HOME, 'positions')
|
6
|
+
def initialize(data_entry)
|
7
|
+
@data_entry = data_entry
|
8
|
+
@table_position_files = {} # File objects keyed by table name
|
9
|
+
end
|
11
10
|
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
11
|
+
def close
|
12
|
+
@table_position_files.values.each {|f| f.close }
|
13
|
+
@table_position_files = {}
|
14
|
+
end
|
16
15
|
|
17
|
-
|
18
|
-
|
19
|
-
|
16
|
+
def dump_file_path
|
17
|
+
File.join(dump_dir, @data_entry['name']) + ".dump"
|
18
|
+
end
|
20
19
|
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
20
|
+
# dump pos file for resume
|
21
|
+
def dump_pos_path
|
22
|
+
dump_file_path + ".pos"
|
23
|
+
end
|
25
24
|
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
end
|
25
|
+
def save_dump_pos(status, table_name, last_pos, binlog_pos, state = nil, substate = nil)
|
26
|
+
File.open(dump_pos_path, 'w') do |f|
|
27
|
+
f.write(dump_pos_content(status, table_name, last_pos, binlog_pos, state, substate))
|
30
28
|
end
|
29
|
+
end
|
31
30
|
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
31
|
+
def load_dump_pos
|
32
|
+
path = dump_pos_path
|
33
|
+
return {} unless File.exists?(path)
|
34
|
+
items = File.open(path, 'r').readline.split("\t")
|
35
|
+
raise "Invalid dump.pos file: #{path}" unless items.length >= 5 && items.length <= 7
|
36
|
+
mysql_table = load_mysql_table_marshal_dump
|
37
|
+
{ status: items[0], table_name: items[1], last_pos: items[2].to_i,
|
38
|
+
binlog_pos: {binfile: items[3], pos: items[4].to_i},
|
39
|
+
state: items[5], substate: items[6], mysql_table: mysql_table}
|
40
|
+
end
|
42
41
|
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
end
|
49
|
-
tables.each do |tab|
|
50
|
-
File.open(File.join(table_positions_dir_path, "#{tab}.generated_ddl"), 'w') {|f| f.write("1") }
|
51
|
-
end
|
42
|
+
def mark_generated_tables(tables)
|
43
|
+
table_positions_dir_path = ENV['FLYDATA_TABLE_POSITIONS'] || File.join(FLYDATA_HOME, 'positions')
|
44
|
+
#Create positions if dir does not exist
|
45
|
+
unless File.directory?(table_positions_dir_path)
|
46
|
+
FileUtils.mkdir_p(table_positions_dir_path)
|
52
47
|
end
|
53
|
-
|
54
|
-
|
55
|
-
table_positions_dir_path = ENV['FLYDATA_TABLE_POSITIONS'] || File.join(FLYDATA_HOME, 'positions')
|
56
|
-
new_tables = []
|
57
|
-
tables.each do |table|
|
58
|
-
new_tables << table unless File.exists?(File.join(table_positions_dir_path, "#{table}.#{file_type}"))
|
59
|
-
end
|
60
|
-
new_tables
|
48
|
+
tables.each do |tab|
|
49
|
+
File.open(File.join(table_positions_dir_path, "#{tab}.generated_ddl"), 'w') {|f| f.write("1") }
|
61
50
|
end
|
51
|
+
end
|
62
52
|
|
63
|
-
|
64
|
-
|
65
|
-
|
53
|
+
def get_new_table_list(tables, file_type)
|
54
|
+
table_positions_dir_path = ENV['FLYDATA_TABLE_POSITIONS'] || File.join(FLYDATA_HOME, 'positions')
|
55
|
+
new_tables = []
|
56
|
+
tables.each do |table|
|
57
|
+
new_tables << table unless File.exists?(File.join(table_positions_dir_path, "#{table}.#{file_type}"))
|
66
58
|
end
|
59
|
+
new_tables
|
60
|
+
end
|
67
61
|
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
end
|
62
|
+
# MysqlTable marshal file
|
63
|
+
def mysql_table_marshal_dump_path
|
64
|
+
dump_file_path + ".mysql_table"
|
65
|
+
end
|
73
66
|
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
File.open(path, 'w') do |f|
|
78
|
-
f.write(binlog_content(binlog_pos))
|
79
|
-
end
|
67
|
+
def save_mysql_table_marshal_dump(mysql_table)
|
68
|
+
File.open(mysql_table_marshal_dump_path, 'w') do |f|
|
69
|
+
f.write Marshal.dump(mysql_table)
|
80
70
|
end
|
71
|
+
end
|
81
72
|
|
82
|
-
|
83
|
-
|
73
|
+
# binlog.pos file
|
74
|
+
def save_binlog(binlog_pos)
|
75
|
+
path = binlog_path
|
76
|
+
File.open(path, 'w') do |f|
|
77
|
+
f.write(binlog_content(binlog_pos))
|
84
78
|
end
|
79
|
+
end
|
85
80
|
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
File.open(file, "w") {|f| f.write('0') }
|
90
|
-
end
|
91
|
-
end
|
81
|
+
def binlog_path
|
82
|
+
File.join(FLYDATA_HOME, @data_entry['name'] + ".binlog.pos")
|
83
|
+
end
|
92
84
|
|
93
|
-
|
94
|
-
|
85
|
+
def reset_table_position_files(tables)
|
86
|
+
tables.each do |table_name|
|
87
|
+
file = File.join(table_positions_dir_path, table_name + ".pos")
|
88
|
+
File.open(file, "w") {|f| f.write('0') }
|
95
89
|
end
|
90
|
+
end
|
96
91
|
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
end
|
92
|
+
def table_positions_dir_path
|
93
|
+
TABLE_POSITIONS_DIR
|
94
|
+
end
|
101
95
|
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
96
|
+
def table_position_file_paths(*tables)
|
97
|
+
tables.empty? ? Dir.glob(File.join(table_positions_dir_path, '*.pos')) :
|
98
|
+
tables.map{|table| File.join(table_positions_dir_path, table + '.pos')}
|
99
|
+
end
|
106
100
|
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
101
|
+
def table_ddl_file_paths(*tables)
|
102
|
+
tables.empty? ? Dir.glob(File.join(table_positions_dir_path, '*.generated_ddl')) :
|
103
|
+
tables.map{|table| File.join(table_positions_dir_path, table + '.generated_ddl')}
|
104
|
+
end
|
111
105
|
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
106
|
+
def table_binlog_pos_paths(*tables)
|
107
|
+
tables.empty? ? Dir.glob(File.join(table_positions_dir_path, '*.binlog.pos')) :
|
108
|
+
tables.map{|table| File.join(table_positions_dir_path, table + '.binlog.pos')}
|
109
|
+
end
|
116
110
|
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
111
|
+
def table_binlog_pos_init_paths(*tables)
|
112
|
+
tables.empty? ? Dir.glob(File.join(table_positions_dir_path, '*.binlog.pos.init')) :
|
113
|
+
tables.map{|table| File.join(table_positions_dir_path, table + '.binlog.pos.init')}
|
114
|
+
end
|
115
|
+
|
116
|
+
# Read a sequence number from the table's position file,
|
117
|
+
# increment the number and pass the number to a block.
|
118
|
+
# After executing the block, saves the value to the position
|
119
|
+
# file.
|
120
|
+
def increment_and_save_table_position(table_name)
|
121
|
+
file = File.join(table_positions_dir_path, table_name + ".pos")
|
122
|
+
retry_count = 0
|
123
|
+
begin
|
124
|
+
@table_position_files[table_name] ||= File.open(file, "r+")
|
125
|
+
rescue Errno::ENOENT
|
126
|
+
raise if retry_count > 0 # Already retried. Must be a differentfile causing the error
|
127
|
+
# File not exist. Create one with initial value of '0'
|
128
|
+
File.open(file, "w") {|f| f.write('0') }
|
129
|
+
retry_count += 1
|
130
|
+
retry
|
131
|
+
end
|
132
|
+
f = @table_position_files[table_name]
|
133
|
+
seq = f.read
|
134
|
+
seq = seq.to_i + 1
|
135
|
+
begin
|
136
|
+
yield(seq)
|
137
|
+
ensure
|
138
|
+
# when an error happened in yield, the sequence number should remain
|
139
|
+
# as is. For the next call to read the value correctly, the position
|
140
|
+
# must be rewound.
|
147
141
|
f.rewind
|
148
142
|
end
|
143
|
+
f.truncate(0)
|
144
|
+
f.write(seq)
|
145
|
+
f.flush
|
146
|
+
f.rewind
|
147
|
+
end
|
149
148
|
|
150
|
-
|
151
|
-
|
152
|
-
|
149
|
+
def sync_info_file
|
150
|
+
File.join(dump_dir, "sync.info")
|
151
|
+
end
|
153
152
|
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
end
|
153
|
+
def save_sync_info(initial_sync, tables)
|
154
|
+
File.open(sync_info_file, "w") do |f|
|
155
|
+
f.write([initial_sync, tables.join(" ")].join("\t"))
|
158
156
|
end
|
157
|
+
end
|
159
158
|
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
159
|
+
def load_sync_info
|
160
|
+
return nil unless File.exists?(sync_info_file)
|
161
|
+
items = File.open(sync_info_file, 'r').readline.split("\t")
|
162
|
+
{ initial_sync: (items[0] == 'true'),
|
163
|
+
tables: items[1].split(" ") }
|
164
|
+
end
|
166
165
|
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
166
|
+
def get_table_binlog_pos(table_name)
|
167
|
+
file = File.join(table_positions_dir_path, table_name + ".binlog.pos")
|
168
|
+
return nil unless File.exists?(file)
|
169
|
+
File.open(file, 'r').readline
|
170
|
+
end
|
172
171
|
|
173
|
-
|
174
|
-
|
175
|
-
|
172
|
+
def table_rev_file_path(table_name)
|
173
|
+
File.join(table_positions_dir_path, table_name + ".rev")
|
174
|
+
end
|
176
175
|
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
176
|
+
def table_rev_file_paths(*tables)
|
177
|
+
tables.empty? ? Dir.glob(File.join(table_positions_dir_path, "*.rev")) :
|
178
|
+
tables.map{|table| table_rev_file_path(table)}
|
179
|
+
end
|
181
180
|
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
end
|
181
|
+
def table_rev(table_name)
|
182
|
+
file = table_rev_file_path(table_name)
|
183
|
+
return 1 unless File.exists?(file) #default revision is 1
|
184
|
+
File.open(file, "r+") do |f|
|
185
|
+
seq = f.read
|
186
|
+
if seq.empty?
|
187
|
+
return 1
|
188
|
+
else
|
189
|
+
return seq.to_i
|
192
190
|
end
|
193
191
|
end
|
192
|
+
end
|
194
193
|
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
end
|
201
|
-
new_rev
|
194
|
+
def increment_table_rev(table_name, base_rev)
|
195
|
+
file = table_rev_file_path(table_name)
|
196
|
+
new_rev = base_rev + 1
|
197
|
+
File.open(file, "w") do |f|
|
198
|
+
f.write(new_rev)
|
202
199
|
end
|
200
|
+
new_rev
|
201
|
+
end
|
203
202
|
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
end
|
203
|
+
def delete_table_binlog_pos(table_name)
|
204
|
+
file = File.join(table_positions_dir_path, table_name + ".binlog.pos")
|
205
|
+
if File.exists?(file)
|
206
|
+
FileUtils.rm(file, :force => true)
|
207
|
+
else
|
208
|
+
puts "#{file} does not exist. Something is wrong. Did you delete the file manually when flydata was running?"
|
211
209
|
end
|
210
|
+
end
|
212
211
|
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
end
|
212
|
+
def save_table_binlog_pos(tables, binlog_pos)
|
213
|
+
tables.each do |table_name|
|
214
|
+
file = File.join(dump_dir, table_name + ".binlog.pos")
|
215
|
+
File.open(file, "w") do |f|
|
216
|
+
f.write(binlog_content(binlog_pos))
|
219
217
|
end
|
220
218
|
end
|
219
|
+
end
|
221
220
|
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
end
|
230
|
-
FileUtils.mv(src_file, table_positions_dir_path)
|
231
|
-
# save the position at initial sync. this is used for repair if
|
232
|
-
# necessary.
|
233
|
-
FileUtils.cp(File.join(table_positions_dir_path, file_name), File.join(table_positions_dir_path, file_name + ".init"))
|
221
|
+
def install_table_binlog_files(tables)
|
222
|
+
FileUtils.mkdir_p(table_positions_dir_path) unless Dir.exists?(table_positions_dir_path)
|
223
|
+
tables.each do |table_name|
|
224
|
+
file_name = table_name + ".binlog.pos"
|
225
|
+
src_file = File.join(dump_dir, file_name)
|
226
|
+
if ! File.exists?(src_file)
|
227
|
+
raise "#{src_file} does not exist. Error!!"
|
234
228
|
end
|
229
|
+
FileUtils.mv(src_file, table_positions_dir_path)
|
230
|
+
# save the position at initial sync. this is used for repair if
|
231
|
+
# necessary.
|
232
|
+
FileUtils.cp(File.join(table_positions_dir_path, file_name), File.join(table_positions_dir_path, file_name + ".init"))
|
235
233
|
end
|
234
|
+
end
|
236
235
|
|
237
|
-
|
238
|
-
|
239
|
-
|
236
|
+
def delete_dump_file
|
237
|
+
FileUtils.rm(dump_file_path) if File.exists?(dump_file_path)
|
238
|
+
end
|
240
239
|
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
240
|
+
def backup_dump_dir
|
241
|
+
backup_dir = BACKUP_DIR.dup
|
242
|
+
FileUtils.mkdir_p(backup_dir) unless Dir.exists?(backup_dir)
|
243
|
+
dest_dir = File.join(backup_dir, Time.now.strftime("%Y%m%d%H%M%S"))
|
244
|
+
FileUtils.mkdir(dest_dir)
|
245
|
+
FileUtils.mv(Dir.glob("#{dump_dir}/*"), dest_dir)
|
246
|
+
end
|
248
247
|
|
249
|
-
|
250
|
-
|
251
|
-
|
248
|
+
def backup_dir
|
249
|
+
BACKUP_DIR
|
250
|
+
end
|
252
251
|
|
253
|
-
|
252
|
+
private
|
254
253
|
|
255
|
-
|
256
|
-
|
257
|
-
|
254
|
+
def dump_pos_content(status, table_name, last_pos, binlog_pos, state = nil, substate = nil)
|
255
|
+
[status, table_name, last_pos, binlog_content(binlog_pos), state, substate].join("\t")
|
256
|
+
end
|
258
257
|
|
259
|
-
|
260
|
-
|
261
|
-
|
258
|
+
def binlog_content(binlog_pos)
|
259
|
+
[binlog_pos[:binfile], binlog_pos[:pos]].join("\t")
|
260
|
+
end
|
262
261
|
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
262
|
+
def load_mysql_table_marshal_dump
|
263
|
+
path = mysql_table_marshal_dump_path
|
264
|
+
return nil unless File.exists?(path)
|
265
|
+
Marshal.load(File.open(path, 'r'))
|
266
|
+
end
|
268
267
|
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
end
|
268
|
+
def dump_dir
|
269
|
+
pref = @data_entry['mysql_data_entry_preference']
|
270
|
+
dump_dir = if pref and pref['mysqldump_dir']
|
271
|
+
pref['mysqldump_dir']
|
272
|
+
else
|
273
|
+
nil
|
274
|
+
end
|
275
|
+
if dump_dir
|
276
|
+
dump_dir = dump_dir.dup
|
277
|
+
dump_dir[0] = ENV['HOME'] if dump_dir.match(/^~$|^~\//)
|
278
|
+
else
|
279
|
+
dump_dir = DUMP_DIR.dup
|
280
|
+
end
|
281
|
+
if File.exists?(dump_dir) and not Dir.exists?(dump_dir)
|
282
|
+
raise "'mysqldump_dir'(#{dump_dir}) must be a directory."
|
283
|
+
end
|
284
|
+
FileUtils.mkdir_p(dump_dir) unless Dir.exists?(dump_dir)
|
285
|
+
dump_dir
|
288
286
|
end
|
289
287
|
end
|
290
288
|
end
|