tiny_backup 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/lib/tiny_backup.rb +613 -0
- metadata +47 -0
data/lib/tiny_backup.rb
ADDED
@@ -0,0 +1,613 @@
|
|
1
|
+
begin
|
2
|
+
# there is a problem between two versions of the zip gem
|
3
|
+
require 'zip'
|
4
|
+
ZIPLIB = Zip::File
|
5
|
+
ZIPOLD = false
|
6
|
+
rescue LoadError
|
7
|
+
ZIPOLD = true
|
8
|
+
ZIPLIB = Zip::ZipFile
|
9
|
+
end
|
10
|
+
require 'rake'
|
11
|
+
require 'csv'
|
12
|
+
|
13
|
+
module TinyBackup
|
14
|
+
# Get configuration settings
|
15
|
+
#
|
16
|
+
# Returns a *hash* with the following keys backup_folder, date_format, version_prefix, zip_prefix, max_versions, per_page
|
17
|
+
def config
|
18
|
+
{
|
19
|
+
backup_folder: backup_folder,
|
20
|
+
date_format: date_format,
|
21
|
+
version_prefix: version_prefix,
|
22
|
+
zip_prefix: zip_prefix,
|
23
|
+
max_versions: max_versions,
|
24
|
+
per_page: per_page
|
25
|
+
}
|
26
|
+
end
|
27
|
+
|
28
|
+
# Set configuration settings
|
29
|
+
#
|
30
|
+
# +options+ is a *hash* with the following keys backup_folder, date_format, version_prefix, zip_prefix, max_versions, per_page
|
31
|
+
def config= options
|
32
|
+
if options[:date_format].present?
|
33
|
+
# fall-back if the date_format is invalid
|
34
|
+
begin
|
35
|
+
Time.now.strftime(options[:date_format]).to_datetime
|
36
|
+
rescue
|
37
|
+
raise ArgumentError, "invalid date format"
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
options.slice(*config.keys).each do |k, v|
|
42
|
+
instance_variable_set "@#{k}", ([:max_versions, :per_page].include?(k) ? v.to_i : v.to_s)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
# Returns a *string* of the folder that is used to keep the backup files and temporary files
|
47
|
+
def backup_folder
|
48
|
+
this_backup_folder = @backup_folder || "db/backup"
|
49
|
+
|
50
|
+
# the backup_folder will not have any slash at the end
|
51
|
+
this_backup_folder = this_backup_folder.to_s[0..-2] if this_backup_folder.to_s.last == "/"
|
52
|
+
|
53
|
+
# create the folder if doesn't exist
|
54
|
+
FileUtils.mkdir(this_backup_folder) unless File.directory?(this_backup_folder)
|
55
|
+
|
56
|
+
return this_backup_folder
|
57
|
+
end
|
58
|
+
|
59
|
+
# Returns a *string* of the date format that will be appended and used to time-stamp the backup files. Can be any valid date format similar with the ISO C and POSIX directives
|
60
|
+
#
|
61
|
+
# *Default* *value*: %d-%m-%Y_%H:%M
|
62
|
+
def date_format
|
63
|
+
@date_format || "%d-%m-%Y_%H:%M"
|
64
|
+
end
|
65
|
+
|
66
|
+
# Returns a *string* of the prefix used to name each .diff file that contains changes from the last version. Automatically generated +version_number+ will be appended to the +version_prefix+
|
67
|
+
#
|
68
|
+
# *Default* *value*: +ver_+
|
69
|
+
def version_prefix
|
70
|
+
@version_prefix || "ver_"
|
71
|
+
end
|
72
|
+
|
73
|
+
# Returns a *string* of the prefix used to name the starting .zip file that contains a schema.rb file and a .csv file for each non-empty table
|
74
|
+
#
|
75
|
+
# *Default* *value*: +origin+
|
76
|
+
def zip_prefix
|
77
|
+
@zip_prefix || "origin"
|
78
|
+
end
|
79
|
+
|
80
|
+
# Returns an *integer* of maximum number of versions that can exist. If this number is exceeded, the first .diff version is merged in the starting .zip file and cannot be reversed
|
81
|
+
#
|
82
|
+
# *Default* *value*: +32+ (if backup is done daily, keep the last month versions)
|
83
|
+
def max_versions
|
84
|
+
@max_versions || 32
|
85
|
+
end
|
86
|
+
|
87
|
+
# Returns an *integer* of maximum number of rows fetched by a single query to avoid memory crashes. If your table has more rows it will be used multiple query to fetch all the data
|
88
|
+
#
|
89
|
+
# *Default* *value*: +100000+
|
90
|
+
def per_page
|
91
|
+
@per_page || 100000
|
92
|
+
end
|
93
|
+
|
94
|
+
# Create a backup of the current database and choose automatically to create a .zip or .diff file
|
95
|
+
#
|
96
|
+
# *Lock* *until* *is* *done*
|
97
|
+
def backup_now
|
98
|
+
# if the resource is locked, we skip to ensure block
|
99
|
+
lock
|
100
|
+
locked_by_this_method = true
|
101
|
+
|
102
|
+
@tmp_files = []
|
103
|
+
nvf = new_version_filename
|
104
|
+
|
105
|
+
stream = StringIO.new
|
106
|
+
ActiveRecord::SchemaDumper.dump(ActiveRecord::Base.connection, stream)
|
107
|
+
schema_rb = stream.string
|
108
|
+
|
109
|
+
if nvf.split(".").last == "zip"
|
110
|
+
# there is no .zip file so we must create one and add schema.rb and .csv file for each table
|
111
|
+
# TODO: use a much better compression like Zlib::BEST_COMPRESSION to reduce the zip size, but this will consume processing power
|
112
|
+
ZIPLIB.open("#{backup_folder}/#{nvf}", ZIPLIB::CREATE) do |f|
|
113
|
+
puts "-- backup_schema\n"
|
114
|
+
t_benchmark = Benchmark.ms do
|
115
|
+
f.get_output_stream("schema.rb") do |ff|
|
116
|
+
ff.write schema_rb
|
117
|
+
@tmp_files << ff if ZIPOLD
|
118
|
+
end
|
119
|
+
end
|
120
|
+
puts " -> #{'%.4f' % (t_benchmark/1000)}s\n"
|
121
|
+
|
122
|
+
ActiveRecord::Base.connection.tables.each do |table|
|
123
|
+
next if table == "schema_migrations"
|
124
|
+
query_count = ActiveRecord::Base.connection.execute("SELECT COUNT(*) FROM #{table}").first.first
|
125
|
+
|
126
|
+
puts "-- backup_table(\"#{table}\")\n"
|
127
|
+
t_benchmark = Benchmark.ms do
|
128
|
+
if query_count > 0
|
129
|
+
rows = []
|
130
|
+
query_index = 0
|
131
|
+
|
132
|
+
loop do
|
133
|
+
break if query_index >= query_count
|
134
|
+
query = ActiveRecord::Base.connection.execute("SELECT * FROM #{table} LIMIT #{per_page} OFFSET #{query_index}")
|
135
|
+
rows << add_query(query.fields) if query_index == 0
|
136
|
+
query.each { |row| rows << add_query(row) }
|
137
|
+
query_index += per_page
|
138
|
+
end
|
139
|
+
|
140
|
+
f.get_output_stream("#{table}.csv") do |ff|
|
141
|
+
ff.write rows.join
|
142
|
+
@tmp_files << ff if ZIPOLD
|
143
|
+
end
|
144
|
+
end
|
145
|
+
end
|
146
|
+
puts " -> #{'%.4f' % (t_benchmark/1000)}s\n"
|
147
|
+
end
|
148
|
+
end
|
149
|
+
|
150
|
+
else
|
151
|
+
# a new .diff file is created with the diff between origin_zip and tmp_origin_zip(made by merging all the versions into origin)
|
152
|
+
tmp_origin_zip = "#{backup_folder}/#{compact_original(:all)}"
|
153
|
+
tables = ActiveRecord::Base.connection.tables
|
154
|
+
is_empty = true
|
155
|
+
|
156
|
+
File.open("#{backup_folder}/#{nvf}", "wb") do |f|
|
157
|
+
ZIPLIB.open(tmp_origin_zip) do |zf|
|
158
|
+
zf.entries.each do |zf_entry|
|
159
|
+
|
160
|
+
if zf_entry.name == "schema.rb"
|
161
|
+
puts "-- backup_schema\n"
|
162
|
+
t_benchmark = Benchmark.ms do
|
163
|
+
tables.delete "schema_migrations"
|
164
|
+
this_diff = diff_files zf.read(zf_entry.name), schema_rb
|
165
|
+
|
166
|
+
if this_diff.present?
|
167
|
+
is_empty = false
|
168
|
+
f.write "***************\n"
|
169
|
+
f.write "*** schema.rb \n"
|
170
|
+
f.write "\n"
|
171
|
+
this_diff.each { |i| f.write i }
|
172
|
+
f.write "\n\n"
|
173
|
+
end
|
174
|
+
end
|
175
|
+
puts " -> #{'%.4f' % (t_benchmark/1000)}s\n"
|
176
|
+
else
|
177
|
+
|
178
|
+
table = zf_entry.name.split(".").first
|
179
|
+
tables.delete table
|
180
|
+
begin
|
181
|
+
query_count = ActiveRecord::Base.connection.execute("SELECT COUNT(*) FROM #{table}").first.first
|
182
|
+
rescue ActiveRecord::StatementInvalid
|
183
|
+
next
|
184
|
+
end
|
185
|
+
|
186
|
+
rows = []
|
187
|
+
query_index = 0
|
188
|
+
|
189
|
+
puts "-- backup_table(\"#{table}\")\n"
|
190
|
+
t_benchmark = Benchmark.ms do
|
191
|
+
loop do
|
192
|
+
break if query_index >= query_count
|
193
|
+
query = ActiveRecord::Base.connection.execute("SELECT * FROM #{table} LIMIT #{per_page} OFFSET #{query_index}")
|
194
|
+
rows << add_query(query.fields) if query_index == 0
|
195
|
+
query.each { |row| rows << add_query(row) }
|
196
|
+
query_index += per_page
|
197
|
+
end
|
198
|
+
|
199
|
+
this_diff = diff_files zf.read(zf_entry.name), rows.join
|
200
|
+
|
201
|
+
if this_diff.present?
|
202
|
+
is_empty = false
|
203
|
+
f.write "***************\n"
|
204
|
+
f.write "*** #{zf_entry.name} \n"
|
205
|
+
f.write "\n"
|
206
|
+
this_diff.each { |i| f.write i }
|
207
|
+
f.write "\n\n"
|
208
|
+
end
|
209
|
+
end
|
210
|
+
puts " -> #{'%.4f' % (t_benchmark/1000)}s\n"
|
211
|
+
end
|
212
|
+
|
213
|
+
end
|
214
|
+
end
|
215
|
+
|
216
|
+
# tables that are created recently and doesn't have a .csv file in the tmp_origin_zip
|
217
|
+
tables.each do |table|
|
218
|
+
begin
|
219
|
+
query_count = ActiveRecord::Base.connection.execute("SELECT COUNT(*) FROM #{table}").first.first
|
220
|
+
rescue ActiveRecord::StatementInvalid
|
221
|
+
next
|
222
|
+
end
|
223
|
+
|
224
|
+
rows = []
|
225
|
+
query_index = 0
|
226
|
+
|
227
|
+
puts "-- backup_table(\"#{table}\")\n"
|
228
|
+
t_benchmark = Benchmark.ms do
|
229
|
+
loop do
|
230
|
+
break if query_index >= query_count
|
231
|
+
query = ActiveRecord::Base.connection.execute("SELECT * FROM #{table} LIMIT #{per_page} OFFSET #{query_index}")
|
232
|
+
|
233
|
+
rows << add_query(query.fields) if query_index == 0
|
234
|
+
query.each { |row| rows << add_query(row) }
|
235
|
+
query_index += per_page
|
236
|
+
end
|
237
|
+
|
238
|
+
this_diff = diff_files "", rows.join
|
239
|
+
|
240
|
+
if this_diff.present?
|
241
|
+
is_empty = false
|
242
|
+
f.write "***************\n"
|
243
|
+
f.write "*** #{table}.csv \n"
|
244
|
+
f.write "\n"
|
245
|
+
this_diff.each { |i| f.write i }
|
246
|
+
f.write "\n\n"
|
247
|
+
end
|
248
|
+
end
|
249
|
+
puts " -> #{'%.4f' % (t_benchmark/1000)}s\n"
|
250
|
+
end
|
251
|
+
end
|
252
|
+
|
253
|
+
File.delete tmp_origin_zip
|
254
|
+
File.delete("#{backup_folder}/#{nvf}") if is_empty
|
255
|
+
end
|
256
|
+
|
257
|
+
# keep max versions
|
258
|
+
version_files = Dir.glob("#{backup_folder}/#{version_prefix}*").sort
|
259
|
+
if max_versions < version_files.length
|
260
|
+
# throw files to garbage
|
261
|
+
@tmp_files << Dir.glob("#{backup_folder}/#{zip_prefix}*").first
|
262
|
+
@tmp_files << version_files.first
|
263
|
+
|
264
|
+
File.rename "#{backup_folder}/#{compact_original(1)}", dup_file("#{backup_folder}/#{zip_prefix}_#{Time.now.strftime(date_format)}.zip")
|
265
|
+
end
|
266
|
+
|
267
|
+
# delete temporary files before method exit
|
268
|
+
rescue => e
|
269
|
+
@method_error = e
|
270
|
+
ensure
|
271
|
+
unlock if locked_by_this_method
|
272
|
+
@tmp_files.each { |i| File.delete(i) rescue nil } if @tmp_files.present?
|
273
|
+
raise @method_error if @method_error.present?
|
274
|
+
return true
|
275
|
+
end
|
276
|
+
|
277
|
+
# Merge into the .zip file and delete all the .diff files to clear the space
|
278
|
+
#
|
279
|
+
# *Lock* *until* *is* *done*
|
280
|
+
def compact_all
|
281
|
+
lock # if the resource is locked, we skip to ensure block
|
282
|
+
locked_by_this_method = true
|
283
|
+
|
284
|
+
@tmp_files = []
|
285
|
+
@tmp_files += Dir.glob("#{backup_folder}/#{zip_prefix}*")
|
286
|
+
@tmp_files += Dir.glob("#{backup_folder}/#{version_prefix}*")
|
287
|
+
|
288
|
+
# make the temporary zip be the original and apply the updated_at time-stamp
|
289
|
+
File.rename "#{backup_folder}/#{compact_original(:all)}", dup_file("#{backup_folder}/#{zip_prefix}_#{Time.now.strftime(date_format)}.zip")
|
290
|
+
|
291
|
+
# delete temporary files before method exit
|
292
|
+
rescue => e
|
293
|
+
@method_error = e
|
294
|
+
ensure
|
295
|
+
unlock if locked_by_this_method
|
296
|
+
@tmp_files.each { |i| File.delete(i) rescue nil } if @tmp_files.present?
|
297
|
+
raise @method_error if @method_error.present?
|
298
|
+
return true
|
299
|
+
end
|
300
|
+
|
301
|
+
# Change the database to match the selected integer +version_number+.
|
302
|
+
# If the +just_temporary+ is false, the unused version files will be deleted and the latest backup version will be synchronized with the database data.
|
303
|
+
# It's dangerous to start a new backup if the database is not synchronized with the latest backup version(call restore_db(:all) to restore all data)
|
304
|
+
#
|
305
|
+
# *Lock* *until* *is* *done*
|
306
|
+
def restore_db version_number, just_temporary=true
|
307
|
+
lock # if the resource is locked, we skip to ensure block
|
308
|
+
locked_by_this_method = true
|
309
|
+
|
310
|
+
@tmp_files = []
|
311
|
+
|
312
|
+
if just_temporary && version_number != :all
|
313
|
+
puts "you want to restore just temporary: DO NOT start a backup BEFORE calling TinyBackup.restore_db(:all)\n"
|
314
|
+
end
|
315
|
+
|
316
|
+
version_files = Dir.glob("#{backup_folder}/#{version_prefix}*")
|
317
|
+
if version_number == :all
|
318
|
+
version_count = version_files.length
|
319
|
+
else
|
320
|
+
good_versions = version_files.find_all { |i| i.gsub("#{backup_folder}/#{version_prefix}", "").split("_").first.to_i <= version_number.to_i }
|
321
|
+
version_count = good_versions.length
|
322
|
+
@tmp_files += version_files - good_versions if !just_temporary
|
323
|
+
end
|
324
|
+
|
325
|
+
tmp_origin_zip = compact_original version_count
|
326
|
+
@tmp_files << "#{backup_folder}/#{tmp_origin_zip}"
|
327
|
+
@tmp_files << "#{backup_folder}/schema_tmp.rb"
|
328
|
+
|
329
|
+
db_name = Rails.configuration.database_configuration[Rails.env]["database"]
|
330
|
+
db_collation = ActiveRecord::Base.connection.collation
|
331
|
+
ActiveRecord::Base.connection.drop_database db_name
|
332
|
+
ActiveRecord::Base.connection.create_database db_name, collation: db_collation
|
333
|
+
ActiveRecord::Base.connection.reconnect!
|
334
|
+
|
335
|
+
# prepare the structure
|
336
|
+
ZIPLIB.open("#{backup_folder}/#{tmp_origin_zip}") do |zf|
|
337
|
+
zf.entries.each do |zf_entry|
|
338
|
+
if zf_entry.name == "schema.rb"
|
339
|
+
File.open("#{backup_folder}/schema_tmp.rb", "wb") { |f| f.write zf.read(zf_entry.name) }
|
340
|
+
break
|
341
|
+
end
|
342
|
+
end
|
343
|
+
end
|
344
|
+
ActiveRecord::Schema.load("#{backup_folder}/schema_tmp.rb")
|
345
|
+
|
346
|
+
# add the data
|
347
|
+
ZIPLIB.open("#{backup_folder}/#{tmp_origin_zip}") do |zf|
|
348
|
+
zf.entries.each do |zf_entry|
|
349
|
+
next if zf_entry.name == "schema.rb"
|
350
|
+
table_rows = zf.read(zf_entry.name).split("\n")
|
351
|
+
table_header = table_rows.shift
|
352
|
+
table_name = zf_entry.name.split(".").first
|
353
|
+
|
354
|
+
puts "-- insert_data(\"#{table_name}\")\n"
|
355
|
+
t_benchmark = Benchmark.ms do
|
356
|
+
table_rows.in_groups_of(per_page, false) do |tr_group|
|
357
|
+
ActiveRecord::Base.connection.execute insert_row(table_name, table_header, tr_group)
|
358
|
+
end
|
359
|
+
end
|
360
|
+
puts " -> #{'%.4f' % (t_benchmark/1000)}s\n"
|
361
|
+
end
|
362
|
+
end
|
363
|
+
|
364
|
+
# delete temporary files before method exit
|
365
|
+
rescue => e
|
366
|
+
@method_error = e
|
367
|
+
ensure
|
368
|
+
unlock if locked_by_this_method
|
369
|
+
@tmp_files.each { |i| File.delete(i) rescue nil } if @tmp_files.present?
|
370
|
+
raise @method_error if @method_error.present?
|
371
|
+
return true
|
372
|
+
end
|
373
|
+
|
374
|
+
# YOU SHALL NOT PASS -----------------------------------------------------------------
|
375
|
+
private
|
376
|
+
|
377
|
+
# Returns a *string* of the query used to insert a row of data
|
378
|
+
def insert_row(table_name, table_header, table_rows)
|
379
|
+
"INSERT INTO #{table_name} (#{table_header.gsub("\"", "`")}) VALUES " +
|
380
|
+
table_rows.map { |i| "(#{i})" }.join(",")
|
381
|
+
end
|
382
|
+
|
383
|
+
# Merge X versions starting with the lowest +version_number+ into a temporary .zip file that looks like the original zip
|
384
|
+
#
|
385
|
+
# Returns a *string* that is the path of the temporary zip file
|
386
|
+
def compact_original versions
|
387
|
+
versions = Dir.glob("#{backup_folder}/*").length if versions == :all
|
388
|
+
@tmp_files_compact ||= []
|
389
|
+
tmp_filename = "tmp_#{zip_prefix}_#{Time.now.to_i}.zip"
|
390
|
+
origin_zip = Dir.glob("#{backup_folder}/#{zip_prefix}*").first
|
391
|
+
|
392
|
+
# add all files from original zip to a big hash
|
393
|
+
zip_files = {}
|
394
|
+
ZIPLIB.open(origin_zip) do |zf|
|
395
|
+
zf.entries.each { |zf_entry| zip_files[zf_entry.name] = zf.read(zf_entry.name) }
|
396
|
+
end
|
397
|
+
|
398
|
+
# modify the hash on every version
|
399
|
+
Dir.glob("#{backup_folder}/#{version_prefix}*").sort.first(versions).each do |version_file|
|
400
|
+
diff_hash = prepare_diff version_file
|
401
|
+
|
402
|
+
zip_files.each do |k, v|
|
403
|
+
next if diff_hash[k].nil?
|
404
|
+
|
405
|
+
if zip_files[k].nil?
|
406
|
+
diff_hash.delete k
|
407
|
+
next
|
408
|
+
end
|
409
|
+
|
410
|
+
diff_hash[k].find_all { |i| i[0] == "<" && i.include?("create_table") }.each do |deleted_table|
|
411
|
+
if zip_files["#{deleted_table.split("\"")[1]}.csv"].present?
|
412
|
+
zip_files["#{deleted_table.split("\"")[1]}.csv"] = nil # using delete will result in a stack level too deep
|
413
|
+
end
|
414
|
+
end if k == "schema.rb"
|
415
|
+
|
416
|
+
zip_files[k] = update_file v, diff_hash[k]
|
417
|
+
|
418
|
+
diff_hash.delete k
|
419
|
+
end
|
420
|
+
|
421
|
+
diff_hash.each { |k, v| zip_files[k] = update_file("", v) }
|
422
|
+
end
|
423
|
+
|
424
|
+
# save the big hash
|
425
|
+
ZIPLIB.open("#{backup_folder}/#{tmp_filename}", ZIPLIB::CREATE) do |f|
|
426
|
+
zip_files.each do |k, v|
|
427
|
+
f.get_output_stream(k) do |ff|
|
428
|
+
ff.write v
|
429
|
+
@tmp_files_compact << ff if ZIPOLD
|
430
|
+
end if v != "\n"
|
431
|
+
end
|
432
|
+
end
|
433
|
+
|
434
|
+
@tmp_files_compact.each { |i| File.delete i } if ZIPOLD
|
435
|
+
tmp_filename
|
436
|
+
end
|
437
|
+
|
438
|
+
# Returns a *hash* with of the parsed .diff file
|
439
|
+
def prepare_diff diff_path
|
440
|
+
diff_hash = {}
|
441
|
+
|
442
|
+
current_key = nil
|
443
|
+
File.read(diff_path).split("\n").each do |diff_line|
|
444
|
+
if diff_line.starts_with? "***************"
|
445
|
+
current_key = nil
|
446
|
+
elsif diff_line.starts_with? "*** "
|
447
|
+
current_key = diff_line.gsub("*", "").strip
|
448
|
+
elsif current_key.present?
|
449
|
+
diff_hash[current_key] ||= []
|
450
|
+
diff_hash[current_key] << diff_line
|
451
|
+
end
|
452
|
+
end
|
453
|
+
|
454
|
+
diff_hash
|
455
|
+
end
|
456
|
+
|
457
|
+
# It happens if there are two files with the same date(after using the date_format)
|
458
|
+
#
|
459
|
+
# Returns a *string* that will fix the duplicated filename issue by appending an index
|
460
|
+
def dup_file filename
|
461
|
+
return filename unless File.exist? filename
|
462
|
+
index = 1
|
463
|
+
name, ext = filename.split(".")
|
464
|
+
|
465
|
+
loop do
|
466
|
+
dup_filename = "#{name}_#{index}.#{ext}"
|
467
|
+
return dup_filename unless File.exist? dup_filename
|
468
|
+
index += 1
|
469
|
+
end
|
470
|
+
end
|
471
|
+
|
472
|
+
# Returns a *string* of the updated file after applying the diff
|
473
|
+
def update_file file, diff_lines
|
474
|
+
file = file.split "\n"
|
475
|
+
current_operation = nil
|
476
|
+
offset = 0
|
477
|
+
|
478
|
+
diff_lines.each_with_index do |diff_line, index|
|
479
|
+
if diff_line[0] != "<" && diff_line[0] != ">" && (diff_line.include?("a") || diff_line.include?("c") || diff_line.include?("d"))
|
480
|
+
# check linux diff ooutput to understand this
|
481
|
+
current_operation = diff_line
|
482
|
+
|
483
|
+
if current_operation.include? "a"
|
484
|
+
# Addition operation
|
485
|
+
l, r = diff_operation current_operation, "a"
|
486
|
+
|
487
|
+
insert_lines = []
|
488
|
+
loop do
|
489
|
+
break if diff_lines[index].nil? || (diff_lines[index][0] != ">" && insert_lines.present?)
|
490
|
+
insert_lines << diff_lines[index][2..-1] if diff_lines[index][0] == ">"
|
491
|
+
index += 1
|
492
|
+
end
|
493
|
+
|
494
|
+
file.insert offset + l.first + 1, *insert_lines
|
495
|
+
offset += [*insert_lines].length
|
496
|
+
|
497
|
+
elsif current_operation.include? "c"
|
498
|
+
# Changing operation
|
499
|
+
f, t = diff_operation current_operation, "c"
|
500
|
+
|
501
|
+
insert_lines = []
|
502
|
+
loop do
|
503
|
+
break if diff_lines[index].nil? || (diff_lines[index][0] != ">" && insert_lines.present?)
|
504
|
+
insert_lines << diff_lines[index][2..-1] if diff_lines[index][0] == ">"
|
505
|
+
index += 1
|
506
|
+
end
|
507
|
+
|
508
|
+
f.last.times { file.delete_at offset + f.first }
|
509
|
+
file.insert offset + f.first, *insert_lines
|
510
|
+
offset += [*insert_lines].length
|
511
|
+
offset -= f.last
|
512
|
+
|
513
|
+
elsif current_operation.include? "d"
|
514
|
+
# Deletion operation
|
515
|
+
r, l = diff_operation current_operation, "d"
|
516
|
+
|
517
|
+
r.last.times { file.delete_at offset + r.first }
|
518
|
+
offset -= r.last
|
519
|
+
end
|
520
|
+
end
|
521
|
+
end
|
522
|
+
|
523
|
+
file = file.map { |i| i.to_s.force_encoding("ASCII-8BIT") } # I HATE ENCODING
|
524
|
+
file.join("\n") + "\n"
|
525
|
+
end
|
526
|
+
|
527
|
+
# Returns a *pair* *of* *integer* of the range interval in the diff operation
|
528
|
+
def diff_operation operation, sign
|
529
|
+
val1, val2 = operation.split sign
|
530
|
+
return diff_range(val1), diff_range(val2)
|
531
|
+
end
|
532
|
+
|
533
|
+
|
534
|
+
# Returns an *array* that contains the starting position of the change and how many operations it will take
|
535
|
+
def diff_range value
|
536
|
+
if value.include? ","
|
537
|
+
l, r = value.split(",")
|
538
|
+
[l.to_i - 1, r.to_i - l.to_i + 1]
|
539
|
+
else
|
540
|
+
[value.to_i - 1, 1]
|
541
|
+
end
|
542
|
+
end
|
543
|
+
|
544
|
+
# Two temporary files will be created, but deleted shortly after the operation is finished
|
545
|
+
#
|
546
|
+
# Returns an *array* that contains lines of the diff between +file1+ and +file2+
|
547
|
+
def diff_files file1, file2
|
548
|
+
diff_filename = "#{backup_folder}/diff"
|
549
|
+
|
550
|
+
File.open("#{diff_filename}1", "wb") { |f| f.write file1 }
|
551
|
+
File.open("#{diff_filename}2", "wb") { |f| f.write file2 }
|
552
|
+
|
553
|
+
diff_lines = []
|
554
|
+
IO.popen("diff #{diff_filename}1 #{diff_filename}2").each { |diff_line| diff_lines << diff_line }
|
555
|
+
# TODO: use something that doesn't depend on the operating system(that can work on Windows too)
|
556
|
+
|
557
|
+
File.delete("#{diff_filename}1")
|
558
|
+
File.delete("#{diff_filename}2")
|
559
|
+
|
560
|
+
diff_lines
|
561
|
+
end
|
562
|
+
|
563
|
+
# Returns a *string* of the current file created by backup_now function
|
564
|
+
def new_version_filename
|
565
|
+
backup_folder_files = Dir.glob("#{backup_folder}/*").map { |i| i.gsub("#{backup_folder}/", "") }
|
566
|
+
|
567
|
+
zip_file = backup_folder_files.find { |i| i.starts_with?(zip_prefix) && i.ends_with?("zip") }
|
568
|
+
return "#{zip_prefix}_#{Time.now.strftime(date_format)}.zip" if zip_file.nil?
|
569
|
+
|
570
|
+
version_files = backup_folder_files.find_all { |i| i.starts_with?(version_prefix) && i.ends_with?("diff") }.sort
|
571
|
+
return "#{version_prefix}1_#{Time.now.strftime(date_format)}.diff" if version_files.blank?
|
572
|
+
|
573
|
+
last_version = version_files.last.gsub(version_prefix, "").split("_").first.to_i
|
574
|
+
return "#{version_prefix}#{last_version + 1}_#{Time.now.strftime(date_format)}.diff"
|
575
|
+
end
|
576
|
+
|
577
|
+
# Returns a *string* of parsed csv row to be inserted into database
|
578
|
+
def add_query values
|
579
|
+
values.map do |val|
|
580
|
+
if val.nil?
|
581
|
+
"NULL"
|
582
|
+
elsif val.is_a?(Date) || val.is_a?(DateTime) || val.is_a?(Time)
|
583
|
+
"\"#{val.strftime('%Y-%m-%d %H:%M:%S')}\"" # this should be a DB recognized format
|
584
|
+
elsif val.is_a?(Integer) || val.is_a?(Float) || val.is_a?(BigDecimal)
|
585
|
+
val
|
586
|
+
else
|
587
|
+
val.inspect
|
588
|
+
end
|
589
|
+
end.join(",") + "\n"
|
590
|
+
end
|
591
|
+
|
592
|
+
# Lock the library operations and stop the ActiveRecord logger
|
593
|
+
def lock
|
594
|
+
@logger = ActiveRecord::Base.logger
|
595
|
+
ActiveRecord::Base.logger = nil
|
596
|
+
|
597
|
+
if File.exist?("#{backup_folder}/.lock")
|
598
|
+
raise "Another operation is running. More info you can find in the '#{backup_folder}/.lock' file"
|
599
|
+
else
|
600
|
+
File.open("#{backup_folder}/.lock", "wb") do |f|
|
601
|
+
f.write caller.join("\n") + "\n"
|
602
|
+
end
|
603
|
+
end
|
604
|
+
end
|
605
|
+
|
606
|
+
# Unlock the library operations and restart ActiveRecord logger
|
607
|
+
def unlock
|
608
|
+
ActiveRecord::Base.logger = @logger
|
609
|
+
|
610
|
+
File.delete("#{backup_folder}/.lock") if File.exist?("#{backup_folder}/.lock")
|
611
|
+
end
|
612
|
+
end
|
613
|
+
include TinyBackup
|
metadata
ADDED
@@ -0,0 +1,47 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: tiny_backup
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.0.0
|
5
|
+
prerelease:
|
6
|
+
platform: ruby
|
7
|
+
authors:
|
8
|
+
- Razvan Pavel
|
9
|
+
autorequire:
|
10
|
+
bindir: bin
|
11
|
+
cert_chain: []
|
12
|
+
date: 2015-12-02 00:00:00.000000000 Z
|
13
|
+
dependencies: []
|
14
|
+
description: Backup your database on S3 or local, optimized for small disk space.
|
15
|
+
The backup will have a starting zip with your data and diff files for each new backup
|
16
|
+
email: pavelrazvan92@gmail.com
|
17
|
+
executables: []
|
18
|
+
extensions: []
|
19
|
+
extra_rdoc_files: []
|
20
|
+
files:
|
21
|
+
- lib/tiny_backup.rb
|
22
|
+
homepage: http://rubygems.org/gems/tiny_backup
|
23
|
+
licenses:
|
24
|
+
- MIT
|
25
|
+
post_install_message:
|
26
|
+
rdoc_options: []
|
27
|
+
require_paths:
|
28
|
+
- lib
|
29
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
30
|
+
none: false
|
31
|
+
requirements:
|
32
|
+
- - ! '>='
|
33
|
+
- !ruby/object:Gem::Version
|
34
|
+
version: '0'
|
35
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
36
|
+
none: false
|
37
|
+
requirements:
|
38
|
+
- - ! '>='
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '0'
|
41
|
+
requirements: []
|
42
|
+
rubyforge_project:
|
43
|
+
rubygems_version: 1.8.23
|
44
|
+
signing_key:
|
45
|
+
specification_version: 3
|
46
|
+
summary: Backup your database on S3 or local, optimized for small disk space
|
47
|
+
test_files: []
|