s3backup 0.7.3 → 0.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/History.txt +6 -0
- data/README.rdoc +2 -1
- data/Rakefile +1 -1
- data/backup.yml +1 -1
- data/lib/s3backup/manager.rb +115 -92
- data/lib/s3backup/restore.rb +1 -1
- data/lib/s3backup/s3wrapper.rb +1 -0
- data/lib/s3backup/tree_info.rb +213 -129
- data/lib/s3backup.rb +1 -1
- metadata +12 -2
data/History.txt
CHANGED
@@ -41,3 +41,9 @@
|
|
41
41
|
=== 0.7.2 2010-01-29
|
42
42
|
* mod bug if directory name is started '-'
|
43
43
|
* dereese use memory
|
44
|
+
|
45
|
+
=== 0.8.1 2010-02-11
|
46
|
+
* mod bug if directory name is same , that can't restore(because it was not backuped).
|
47
|
+
* change memory to sqlite3.
|
48
|
+
* add config temporary. temporary is directory for temporary file
|
49
|
+
* resume is mandatory.
|
data/README.rdoc
CHANGED
@@ -28,7 +28,8 @@ To use remotebackup,you should prepare backup configuretion file by yaml such be
|
|
28
28
|
proxy_user: login name for proxy server if you use proxy.
|
29
29
|
proxy_password: login password for proxy server if you use proxy.
|
30
30
|
log_level: 'output log level. value is debug or info or warn or error(optional default info)'
|
31
|
-
|
31
|
+
temporary: 'temporary directory path. default(/tmp)
|
32
|
+
|
32
33
|
*If directories isn't specified when restore, it restores all directories in bucket.
|
33
34
|
|
34
35
|
== COMMAND:
|
data/Rakefile
CHANGED
@@ -14,7 +14,7 @@ $hoe = Hoe.spec 's3backup' do
|
|
14
14
|
self.developer 'Takeshi Morita', 'morita@ibrains.co.jp'
|
15
15
|
self.post_install_message = 'PostInstall.txt' # TODO remove if post-install message not required
|
16
16
|
self.rubyforge_name = self.name # TODO this is default value
|
17
|
-
self.extra_deps = [['aws-s3','>= 0.6.2']]
|
17
|
+
self.extra_deps = [['aws-s3','>= 0.6.2'],['sqlite3',">= 0.0.7"]]
|
18
18
|
|
19
19
|
end
|
20
20
|
|
data/backup.yml
CHANGED
@@ -13,4 +13,4 @@ proxy_port: 'port of proxy server(optional)'
|
|
13
13
|
proxy_user: 'user name of proxy server(optional)'
|
14
14
|
proxy_password: 'password of proxy server(optional)'
|
15
15
|
log_level: 'output log level. value is debug or info or warn or error(optional default info)'
|
16
|
-
|
16
|
+
temporary: 'temporary directory path. default(/tmp)
|
data/lib/s3backup/manager.rb
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
require 'cgi'
|
2
|
-
require '
|
2
|
+
require 'time'
|
3
3
|
require 'fileutils'
|
4
4
|
require 's3backup/s3log'
|
5
5
|
require 's3backup/tree_info'
|
@@ -22,6 +22,7 @@ module S3backup
|
|
22
22
|
def initialize(target,config)
|
23
23
|
@target = target
|
24
24
|
@resume = false
|
25
|
+
@temporary = "/tmp"
|
25
26
|
set_config(config)
|
26
27
|
end
|
27
28
|
def set_config(config)
|
@@ -49,23 +50,36 @@ module S3backup
|
|
49
50
|
else
|
50
51
|
@buf_size = DEFAULT_BUF_READ_SIZE
|
51
52
|
end
|
52
|
-
if config["
|
53
|
-
|
54
|
-
@buf_size = config["buffer_size"].to_i
|
55
|
-
else
|
56
|
-
@buf_size = config["buffer_size"]
|
57
|
-
end
|
58
|
-
if @buf_size > 1000*1000*1000*5
|
59
|
-
S3log.error("buffer_size must be less than 5G\n")
|
60
|
-
exit(-1)
|
61
|
-
end
|
62
|
-
else
|
63
|
-
@buf_size = DEFAULT_BUF_READ_SIZE
|
53
|
+
if config["temporary"]
|
54
|
+
@temporary = config["temporary"]
|
64
55
|
end
|
65
56
|
if config["resume"] == true
|
66
57
|
@resume = true
|
67
58
|
end
|
68
59
|
end
|
60
|
+
def to_gz(file,remain=false)
|
61
|
+
if remain
|
62
|
+
cmd = "(cd #{shell_name(File.dirname(file))};gzip -c #{shell_name(file)} > #{shell_name(file)}.gz " +
|
63
|
+
"2>/dev/null)"
|
64
|
+
else
|
65
|
+
cmd = "(cd #{shell_name(File.dirname(file))};gzip #{shell_name(file)} > /dev/null 2>&1)"
|
66
|
+
end
|
67
|
+
S3log.debug(cmd)
|
68
|
+
system(cmd)
|
69
|
+
unless $?.success?
|
70
|
+
S3log.error("feiled #{cmd} execute. #{$?.inspect}")
|
71
|
+
exit(-1)
|
72
|
+
end
|
73
|
+
end
|
74
|
+
def from_gz(file)
|
75
|
+
cmd = "(cd #{shell_name(File.dirname(file))};gunzip #{shell_name(file)} > /dev/null 2>&1)"
|
76
|
+
S3log.debug(cmd)
|
77
|
+
system(cmd)
|
78
|
+
unless $?.success?
|
79
|
+
S3log.error("feiled #{cmd} execute. #{$?.inspect}")
|
80
|
+
exit(-1)
|
81
|
+
end
|
82
|
+
end
|
69
83
|
#指定されたディレクトリをtar gzip形式で圧縮する
|
70
84
|
def to_tgz(path,dir)
|
71
85
|
#サブディレクトリを圧縮の対象外にする。
|
@@ -75,9 +89,12 @@ module S3backup
|
|
75
89
|
sub_dir.push(file) if File.directory?(dir+"/"+file)
|
76
90
|
end
|
77
91
|
exclude = ""
|
78
|
-
|
79
|
-
|
80
|
-
|
92
|
+
if sub_dir.length != 0
|
93
|
+
exclude = " --exclude=#{shell_name(File.basename(dir))}/" + sub_dir.map{|d| shell_name(d)}.join(
|
94
|
+
" --exclude=#{shell_name(File.basename(dir))}/")
|
95
|
+
end
|
96
|
+
cmd = "(cd #{shell_name(File.dirname(dir))};tar -czvf #{shell_name(path)} #{exclude} -- " +
|
97
|
+
"#{shell_name(File.basename(dir))} > /dev/null 2>&1)"
|
81
98
|
S3log.debug(cmd)
|
82
99
|
system(cmd)
|
83
100
|
unless $?.success?
|
@@ -94,38 +111,33 @@ module S3backup
|
|
94
111
|
exit(-1)
|
95
112
|
end
|
96
113
|
end
|
97
|
-
def get_chain(key)
|
114
|
+
def get_chain(key,path)
|
98
115
|
data = nil
|
99
|
-
data_set = nil
|
100
116
|
i=1
|
101
117
|
if @aes
|
102
118
|
key = @aes.encrypt(key)
|
103
119
|
end
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
120
|
+
File.open(path,"w") do |f|
|
121
|
+
while 1
|
122
|
+
key_name = i.to_s()+"_"+key
|
123
|
+
data = @target.get(key_name)
|
124
|
+
if data == nil
|
125
|
+
break
|
126
|
+
end
|
127
|
+
if @aes
|
128
|
+
data = @aes.decrypt(data)
|
129
|
+
end
|
130
|
+
f.write(data)
|
131
|
+
i+=1
|
115
132
|
end
|
116
|
-
data_set += data
|
117
|
-
i+=1
|
118
133
|
end
|
119
|
-
return data_set
|
120
134
|
end
|
121
135
|
def get_directory(dir,out_dir)
|
122
|
-
|
123
|
-
|
124
|
-
tmp.write(data)
|
125
|
-
tmp.close
|
136
|
+
file_name = @temporary + "/"+ CGI.escape(dir)
|
137
|
+
get_chain(dir,file_name + ".tgz")
|
126
138
|
#tgzのファイルをcur_dirに展開
|
127
|
-
from_tgz(
|
128
|
-
|
139
|
+
from_tgz(file_name + ".tgz",out_dir)
|
140
|
+
#File.unlink(file_name)
|
129
141
|
end
|
130
142
|
def get_directories(dirs,prefix,output_dir)
|
131
143
|
prefix_len = prefix.length
|
@@ -138,10 +150,9 @@ module S3backup
|
|
138
150
|
end
|
139
151
|
end
|
140
152
|
def store_directory(dir)
|
141
|
-
|
142
|
-
tmp.close
|
153
|
+
tmp_file = @temporary + "/bk_" + CGI.escape(dir)
|
143
154
|
#tgzのファイルをtmp.pathに作成
|
144
|
-
to_tgz(
|
155
|
+
to_tgz(tmp_file,dir)
|
145
156
|
#S3にディレクトリの絶対パスをキーにして、圧縮したデータをストア
|
146
157
|
i=1
|
147
158
|
key = nil
|
@@ -156,20 +167,21 @@ module S3backup
|
|
156
167
|
@target.delete(cnt.to_s() + "_" + key)
|
157
168
|
cnt+=1
|
158
169
|
end
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
170
|
+
File.open(tmp_file,"r") do |f|
|
171
|
+
begin
|
172
|
+
while 1
|
173
|
+
key_name = i.to_s()+"_"+key
|
174
|
+
data = f.readpartial(@buf_size)
|
175
|
+
if @aes
|
176
|
+
data = @aes.encrypt(data)
|
177
|
+
end
|
178
|
+
@target.post(key_name,data)
|
179
|
+
i+=1
|
166
180
|
end
|
167
|
-
|
168
|
-
i+=1
|
181
|
+
rescue EOFError
|
169
182
|
end
|
170
|
-
rescue EOFError
|
171
183
|
end
|
172
|
-
|
184
|
+
File.unlink(tmp_file)
|
173
185
|
end
|
174
186
|
def delete_direcory(dir)
|
175
187
|
if @aes
|
@@ -182,41 +194,56 @@ module S3backup
|
|
182
194
|
end
|
183
195
|
def differential_copy(dir)
|
184
196
|
#現在のファイル・ツリーを比較
|
185
|
-
tree_info = TreeInfo.new(dir
|
186
|
-
|
187
|
-
|
188
|
-
tree_data = nil
|
197
|
+
tree_info = TreeInfo.new({:format=>:directory,:directory=>dir,:db=>@temporary + "/new_" +
|
198
|
+
Time.now.to_i.to_s + "_" + Process.pid.to_s + ".db"})
|
199
|
+
target_db_name = dir+".gz"
|
189
200
|
#前回のファイル・ツリーを取得
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
201
|
+
data = @target.get(target_db_name)
|
202
|
+
old_tree = nil
|
203
|
+
if data
|
204
|
+
db_name = @temporary + "/old_" + Time.now.to_i.to_s + "_" + Process.pid.to_s + ".db"
|
205
|
+
File.open(db_name + ".gz","w") do |f|
|
206
|
+
f.write(data)
|
207
|
+
end
|
208
|
+
from_gz(db_name + ".gz")
|
209
|
+
old_tree = TreeInfo.new({:format=>:database,:db=>db_name})
|
199
210
|
else
|
200
|
-
|
201
|
-
|
211
|
+
target_tree_name = "tree_"+dir+".yml"
|
212
|
+
#以前のフォーマットだった場合は変換
|
213
|
+
data = @target.get(target_tree_name)
|
214
|
+
if data
|
215
|
+
old_tree = TreeInfo.new({:format=>:yaml,:data=>data,:db=>@temporary + "/old_" +
|
216
|
+
Time.now.to_i.to_s + "_" + Process.pid.to_s + ".db"})
|
217
|
+
else
|
218
|
+
old_tree = TreeInfo.new({:db=>@temporary + "/old_" +
|
219
|
+
Time.now.to_i.to_s + "_" + Process.pid.to_s + ".db"})
|
220
|
+
end
|
202
221
|
end
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
222
|
+
data = nil;
|
223
|
+
GC.start
|
224
|
+
cnt=0
|
225
|
+
#前回と今回のファイル・ツリーを比較
|
226
|
+
tree_info.modify(old_tree) do |dir_info|
|
227
|
+
cnt+=1
|
228
|
+
S3log.debug("diff_info=#{dir_info[:name]}")
|
229
|
+
#更新されたディレクトリをアップロード
|
230
|
+
store_directory(dir_info[:name])
|
231
|
+
#前回のファイル・ツリー情報のうち、今回アップデートしたディレクトリ情報ファイル情報を更新
|
232
|
+
old_dir_map = old_tree.update_dir(dir_info)
|
233
|
+
if cnt != 0 and cnt % 10 == 0
|
211
234
|
#更新したファイル・ツリー情報をアップロード(途中で失敗しても、resumeできるようにするため。)
|
212
|
-
|
235
|
+
to_gz(old_tree.db_name,true)
|
236
|
+
@target.post(target_db_name,File.read(old_tree.db_name + ".gz"))
|
213
237
|
end
|
214
238
|
end
|
215
|
-
|
216
|
-
delete_direcory(
|
239
|
+
tree_info.remove(old_tree) do |dir_info|
|
240
|
+
delete_direcory(dir_info[:name])
|
217
241
|
end
|
218
242
|
#今回のファイル・ツリーをAWS S3に登録
|
219
|
-
|
243
|
+
to_gz(tree_info.db_name)
|
244
|
+
@target.post(target_db_name,File.read(tree_info.db_name + ".gz"))
|
245
|
+
tree_info.close(true)
|
246
|
+
old_tree.close(true)
|
220
247
|
end
|
221
248
|
def get_target_tree(dir)
|
222
249
|
base_dir = dir
|
@@ -228,8 +255,8 @@ module S3backup
|
|
228
255
|
if base == before_base
|
229
256
|
break
|
230
257
|
end
|
231
|
-
|
232
|
-
tree_data = @target.get(
|
258
|
+
tree_db_name = base+".gz"
|
259
|
+
tree_data = @target.get(tree_db_name)
|
233
260
|
if tree_data
|
234
261
|
break
|
235
262
|
end
|
@@ -239,16 +266,12 @@ module S3backup
|
|
239
266
|
unless tree_data
|
240
267
|
return nil
|
241
268
|
end
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
files = @target.find(/^tree_.*\.yml/)
|
246
|
-
dirs = files.map do |d|
|
247
|
-
m=/tree_(.*)\.yml/.match(d)
|
248
|
-
next nil unless m
|
249
|
-
m[1]
|
269
|
+
db_name = @temporary + "/" + Time.now.to_i.to_s + "_" + Process.pid.to_s + ".db"
|
270
|
+
File.open(db_name + ".gz","w") do |f|
|
271
|
+
f.write(tree_data)
|
250
272
|
end
|
251
|
-
|
273
|
+
from_gz(db_name + ".gz")
|
274
|
+
return TreeInfo.new({:format=>:database,:db=>db_name})
|
252
275
|
end
|
253
276
|
def expand_tree(dir,tree_info,output_dir)
|
254
277
|
now = Time.new
|
@@ -267,7 +290,7 @@ module S3backup
|
|
267
290
|
dir_len = k.length
|
268
291
|
relative_path = k.slice(top_dir_len,dir_len - top_dir_len)
|
269
292
|
dir = output_dir + relative_path
|
270
|
-
File.utime(now,v[:mtime],dir)
|
293
|
+
File.utime(now,Time.parse(v[:mtime]),dir)
|
271
294
|
end
|
272
295
|
}
|
273
296
|
end
|
@@ -278,7 +301,7 @@ module S3backup
|
|
278
301
|
return
|
279
302
|
end
|
280
303
|
expand_tree(dir,tree,output_dir)
|
281
|
-
|
304
|
+
tree.close(true)
|
282
305
|
end
|
283
306
|
end
|
284
307
|
end
|
data/lib/s3backup/restore.rb
CHANGED
data/lib/s3backup/s3wrapper.rb
CHANGED
data/lib/s3backup/tree_info.rb
CHANGED
@@ -1,22 +1,112 @@
|
|
1
1
|
require 'yaml'
|
2
|
+
require 'sqlite3'
|
2
3
|
module S3backup
|
3
4
|
class TreeInfo
|
4
|
-
attr_reader :
|
5
|
-
def
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
5
|
+
attr_reader :db_name,:db
|
6
|
+
def make_table
|
7
|
+
sql = "create table directory ( id INTEGER PRIMARY KEY ,name varchar(2048), mtime integer, parent_directory_id integer)"
|
8
|
+
@db.execute(sql)
|
9
|
+
sql = "create table file ( name varchar(2048), size integer, mtime integer,directory_id integer)"
|
10
|
+
@db.execute(sql)
|
11
|
+
sql = "create table symlink ( name varchar(2048), source varchar(2048),directory_id integer)"
|
12
|
+
@db.execute(sql)
|
13
|
+
sql = "CREATE INDEX idx_directory_name ON directory(name)"
|
14
|
+
@db.execute(sql)
|
15
|
+
sql = "CREATE INDEX idx_directory_parent_directory_id ON directory(parent_directory_id)"
|
16
|
+
@db.execute(sql)
|
17
|
+
sql = "CREATE INDEX idx_file_directory_id ON file(directory_id)"
|
18
|
+
@db.execute(sql)
|
19
|
+
sql = "CREATE INDEX idx_file_name ON file(name)"
|
20
|
+
@db.execute(sql)
|
21
|
+
sql = "CREATE INDEX idx_symlink_name ON symlink(name)"
|
22
|
+
@db.execute(sql)
|
23
|
+
sql = "CREATE INDEX idx_symlink_directory_id ON symlink(directory_id)"
|
24
|
+
@db.execute(sql)
|
25
|
+
end
|
26
|
+
def check_dirs(p_id,p_name)
|
27
|
+
@db.execute('select id,name from directory where parent_directory_id = ?',p_id) do |row|
|
28
|
+
id = row[0]
|
29
|
+
name = row[1]
|
30
|
+
if File.basename(name) == File.basename(p_name)
|
31
|
+
sql = "insert into file(name,size,mtime,directory_id) values (:name, :size, :mtime,:directory_id)"
|
32
|
+
@db.execute(sql,:name=>"zetteiarienainamae#{id}",:size=>0,:mtime =>0,:directory_id=>p_id)
|
33
|
+
end
|
34
|
+
check_dirs(id,name)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
def convert_yaml_to_sqlite3(file_map)
|
38
|
+
file_map[:directory].keys().sort{|a,b| a<=>b}.each do |key|
|
39
|
+
file_at = file_map[:directory][key]
|
40
|
+
sql = "insert into directory(name,mtime) values (:name, :mtime)"
|
41
|
+
@db.execute(sql,:name=>key,:mtime => file_at[:mtime] )
|
42
|
+
end
|
43
|
+
@db.execute('select id,name from directory' ) do |row|
|
44
|
+
dir_id = row[0].to_i
|
45
|
+
parent = File.dirname(row[1])
|
46
|
+
@db.execute('select id from directory where name =?',parent ) do |row|
|
47
|
+
@db.execute("update directory set parent_directory_id = #{row[0]} where id = #{dir_id}")
|
48
|
+
end
|
49
|
+
end
|
50
|
+
#for bug (same name directory was not backuped before)
|
51
|
+
@db.execute('select id,name from directory order by id limit 1') do |row|
|
52
|
+
p_id = row[0]
|
53
|
+
name = row[1]
|
54
|
+
check_dirs(p_id,name)
|
55
|
+
end
|
56
|
+
file_map[:file].each do |key,val|
|
57
|
+
file_at = file_map[:file][key]
|
58
|
+
dir_name = File.dirname(key)
|
59
|
+
dir_id = nil
|
60
|
+
@db.execute('select id from directory where name=?',dir_name ) do |row|
|
61
|
+
#rowは結果の配列
|
62
|
+
dir_id = row[0].to_i
|
63
|
+
end
|
64
|
+
unless dir_id
|
65
|
+
STDERR.print "directory name isn't exist ignore #{dir_name}"
|
66
|
+
next
|
67
|
+
end
|
68
|
+
sql = "insert into file(name,size,mtime,directory_id) values (:name, :size, :mtime,:directory_id)"
|
69
|
+
@db.execute(sql,:name=>key,:size=>file_at[:size],:mtime => file_at[:date], :directory_id=>dir_id)
|
70
|
+
end
|
71
|
+
file_map[:symlink].each do |key,val|
|
72
|
+
file_at = file_map[:symlink][key]
|
73
|
+
dir_name = File.dirname(key)
|
74
|
+
sql="select id from directory where name = :name"
|
75
|
+
dir_id = nil
|
76
|
+
@db.execute('select id from directory where name=?',dir_name ) do |row|
|
77
|
+
#rowは結果の配列
|
78
|
+
dir_id = row[0].to_i
|
79
|
+
end
|
80
|
+
unless dir_id
|
81
|
+
STDERR.print "directory name isn't exist ignore #{dir_name}"
|
82
|
+
next
|
83
|
+
end
|
84
|
+
sql = "insert into symlink(name,source,directory_id) values (:name, :source,:directory_id)"
|
85
|
+
@db.execute(sql,:name=>key,:source=>file_at[:source],:directory_id=>dir_id)
|
86
|
+
end
|
87
|
+
end
|
88
|
+
def initialize(opt)
|
89
|
+
@db_name = opt[:db]
|
90
|
+
@db = SQLite3::Database.new(opt[:db])
|
91
|
+
if opt[:format].nil?
|
92
|
+
make_table
|
93
|
+
elsif opt[:format] == :directory
|
94
|
+
make_table
|
95
|
+
stat = File.stat(opt[:directory])
|
96
|
+
sql = "insert into directory(name,mtime) values (:name, :mtime)"
|
97
|
+
@db.execute(sql,:name=>opt[:directory],:mtime =>stat.mtime)
|
98
|
+
dir_id = nil
|
99
|
+
@db.execute('select id from directory where name=?',opt[:directory]) do |row|
|
100
|
+
#rowは結果の配列
|
101
|
+
dir_id = row[0].to_i
|
102
|
+
end
|
103
|
+
makeFileMap(opt[:directory],dir_id)
|
104
|
+
elsif opt[:format] == :yaml
|
105
|
+
make_table
|
106
|
+
convert_yaml_to_sqlite3(YAML.load(opt[:data]))
|
17
107
|
end
|
18
108
|
end
|
19
|
-
def makeFileMap(dir)
|
109
|
+
def makeFileMap(dir,id)
|
20
110
|
Dir.entries(dir).each do |e|
|
21
111
|
if e == "." or e == ".."
|
22
112
|
next
|
@@ -24,140 +114,134 @@ module S3backup
|
|
24
114
|
name = dir + "/" + e
|
25
115
|
if File.directory?(name)
|
26
116
|
stat = File.stat(name)
|
27
|
-
|
28
|
-
|
117
|
+
sql = "insert into directory(name,mtime,parent_directory_id) values (:name, :mtime,:parent_directory_id)"
|
118
|
+
@db.execute(sql,:name=>name,:mtime =>stat.mtime,:parent_directory_id=>id)
|
119
|
+
dir_id = nil
|
120
|
+
@db.execute('select id from directory where name=?',name) do |row|
|
121
|
+
#rowは結果の配列
|
122
|
+
dir_id = row[0].to_i
|
123
|
+
end
|
124
|
+
makeFileMap(name,dir_id)
|
29
125
|
elsif File.symlink?(name)
|
30
|
-
|
126
|
+
sql = "insert into symlink(name,source,directory_id) values (:name, :source,:directory_id)"
|
127
|
+
@db.execute(sql,:name=>name,:source=>File.readlink(name),:directory_id=>id)
|
31
128
|
else
|
32
129
|
stat = File.stat(name)
|
33
|
-
|
34
|
-
|
35
|
-
end
|
36
|
-
end
|
37
|
-
def make_dir_map
|
38
|
-
dir_map = {};
|
39
|
-
@fileMap[:directory].each do |k,v|
|
40
|
-
dir_map[k] = {:mtime => v[:mtime],:file=>{},:symlink=>{}}
|
41
|
-
end
|
42
|
-
@fileMap[:file].each do |k,v|
|
43
|
-
target = dir_map[File.dirname(k)]
|
44
|
-
#不整合だけど適当に作る
|
45
|
-
unless target
|
46
|
-
S3log.warn("Tree Data isn't correct.")
|
47
|
-
target = {:mtime => DateTime.now.to_s,:file=>{},:symlink=>{}}
|
48
|
-
dir_map[File.dirname(k)] = target
|
49
|
-
end
|
50
|
-
target[:file][k] = {:size => v[:size], :date => v[:date]}
|
51
|
-
end
|
52
|
-
@fileMap[:symlink].each do |k,v|
|
53
|
-
target = dir_map[File.dirname(k)]
|
54
|
-
#不整合だけど適当に作る
|
55
|
-
unless target
|
56
|
-
S3log.warn("Tree Data isn't correct.")
|
57
|
-
target = {:mtime => DateTime.now.to_s,:file=>{},:symlink=>{}}
|
58
|
-
dir_map[File.dirname(k)] = target
|
130
|
+
sql = "insert into file(name,size,mtime,directory_id) values (:name, :size, :mtime,:directory_id)"
|
131
|
+
@db.execute(sql,:name=>name,:size=>stat.size,:mtime => stat.mtime, :directory_id=>id)
|
59
132
|
end
|
60
|
-
target[:symlink][k] = {:source => v[:source]}
|
61
133
|
end
|
62
|
-
return dir_map;
|
63
134
|
end
|
64
|
-
def update_dir(
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
135
|
+
def update_dir(dir_info)
|
136
|
+
result = @db.execute("select id from directory where name = ?",dir_info[:name])
|
137
|
+
p_id = nil
|
138
|
+
id = nil
|
139
|
+
@db.execute('select id from directory where name =?',File.dirname(dir_info[:name])) do |row|
|
140
|
+
p_id = row[0]
|
141
|
+
end
|
142
|
+
if result.length != 0
|
143
|
+
id = result[0][0]
|
144
|
+
@db.execute("delete from file where directory_id = #{id}")
|
145
|
+
@db.execute("delete from symlink where directory_id = #{id}")
|
146
|
+
|
147
|
+
@db.execute("update directory set mtime = ?,parent_directory_id = ?" +
|
148
|
+
" where id = ?",dir_info[:mtime],p_id,id)
|
149
|
+
else
|
150
|
+
@db.execute("insert into directory(name,mtime,parent_directory_id) values(?,?,?)",
|
151
|
+
dir_info[:name],dir_info[:mtime],p_id)
|
152
|
+
result = @db.execute("select id from directory where name = ?",dir_info[:name])
|
153
|
+
id = result[0][0]
|
71
154
|
end
|
72
|
-
|
73
|
-
|
74
|
-
|
155
|
+
dir_info[:files].each do |f|
|
156
|
+
@db.execute("insert into file(name,mtime,size,directory_id) values(?,?,?,?)",
|
157
|
+
f[:name],f[:mtime],f[:size],id)
|
75
158
|
end
|
76
|
-
dir_info[:
|
77
|
-
@
|
159
|
+
dir_info[:links].each do |f|
|
160
|
+
@db.execute("insert into symlink(name,source,directory_id) values(?,?,?)",f[:name],f[:source],id)
|
78
161
|
end
|
79
|
-
dir_map[name][:file] = dir_info[:file]
|
80
|
-
dir_map[name][:symlink] = dir_info[:symlink]
|
81
|
-
dir_map[name][:mtime] = dir_info[:mtime]
|
82
|
-
return dir_map
|
83
|
-
end
|
84
|
-
def load_yaml(data)
|
85
|
-
@fileMap = YAML.load(data)
|
86
162
|
end
|
87
|
-
def
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
tree = []
|
93
|
-
@fileMap[:directory].each do |k,v|
|
94
|
-
if k.index(dir) != 0
|
95
|
-
next
|
96
|
-
end
|
97
|
-
level = k.count("/") - count
|
163
|
+
def get_level_directory(tree,p_id,level)
|
164
|
+
@db.execute('select id,name,mtime from directory where parent_directory_id = ?',p_id) do |row|
|
165
|
+
id = row[0]
|
166
|
+
name = row[1]
|
167
|
+
mtime = row[2]
|
98
168
|
tree[level] = {} unless tree[level]
|
99
|
-
tree[level][
|
169
|
+
tree[level][name] = {:mtime=>mtime}
|
170
|
+
get_level_directory(tree,id,level+1)
|
100
171
|
end
|
172
|
+
end
|
173
|
+
def hierarchie(dir)
|
174
|
+
tree=[]
|
175
|
+
result = @db.execute('select id,name,mtime from directory where name = ?',dir)
|
176
|
+
if result.length == 0
|
177
|
+
S3log.error("#{dir} is not stored.")
|
178
|
+
exit(-1)
|
179
|
+
end
|
180
|
+
id = result[0][0]
|
181
|
+
name = result[0][1]
|
182
|
+
mtime = result[0][2]
|
183
|
+
tree[0] = {}
|
184
|
+
tree[0][name]={:mtime=>mtime}
|
185
|
+
get_level_directory(tree,id,1)
|
101
186
|
return tree
|
102
187
|
end
|
103
|
-
def
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
new_info.each do |k,v|
|
126
|
-
next unless old_info[k]
|
127
|
-
if old_info[k][:date] != v[:date] or old_info[k][:size] != v[:size]
|
128
|
-
modify_files.push(k)
|
129
|
-
dir = File.dirname(k)
|
130
|
-
modify_dir_map[dir] = true
|
188
|
+
def modify(target)
|
189
|
+
now_id = 0
|
190
|
+
while 1
|
191
|
+
result = @db.execute("select id,name,mtime from directory where id > ? limit 1",now_id)
|
192
|
+
break if result.length == 0
|
193
|
+
now_id = result[0][0].to_i
|
194
|
+
name = result[0][1]
|
195
|
+
mtime = result[0][2]
|
196
|
+
files = []
|
197
|
+
links = []
|
198
|
+
t_result = target.db.execute("select id,name from directory where name = ?",name)
|
199
|
+
if t_result.length != 0
|
200
|
+
t_id = t_result[0][0]
|
201
|
+
t_files = target.db.execute("select name,size,mtime from file where directory_id = ? order by name",t_id)
|
202
|
+
files = @db.execute("select name,size,mtime from file where directory_id = ? order by name",now_id)
|
203
|
+
if t_files == files
|
204
|
+
t_links = target.db.execute("select name,source from symlink where directory_id = ? order by name",t_id)
|
205
|
+
links = @db.execute("select name,source from symlink where directory_id = ? order by name",now_id)
|
206
|
+
if t_links == links
|
207
|
+
next
|
208
|
+
end
|
209
|
+
end
|
131
210
|
end
|
211
|
+
file_infos = []
|
212
|
+
files.each do |f|
|
213
|
+
file_infos.push({:name=>f[0],:size=>f[1],:mtime=>f[2]})
|
214
|
+
end
|
215
|
+
sym_infos = []
|
216
|
+
links.each do |l|
|
217
|
+
sym_infos.push({:name=>f[0],:source=>f[1]})
|
218
|
+
end
|
219
|
+
yield({:name => name,:mtime=>mtime,:files => file_infos ,:links => sym_infos})
|
132
220
|
end
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
dir = File.dirname(f)
|
146
|
-
modify_dir_map[dir] = true
|
221
|
+
end
|
222
|
+
def remove(target)
|
223
|
+
now_id = 0
|
224
|
+
while 1
|
225
|
+
t_result = target.db.execute("select id,name from directory where id > ? limit 1",now_id)
|
226
|
+
break if t_result.length == 0
|
227
|
+
now_id = t_result[0][0].to_i
|
228
|
+
name = t_result[0][1]
|
229
|
+
result = @db.execute("select id,name from directory where name = ?",name)
|
230
|
+
if result.length == 0
|
231
|
+
yield({:name => name})
|
232
|
+
end
|
147
233
|
end
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
234
|
+
end
|
235
|
+
def close(delete=false)
|
236
|
+
@db.close
|
237
|
+
if delete
|
238
|
+
if File.exist?(@db_name)
|
239
|
+
File.unlink(@db_name)
|
240
|
+
end
|
241
|
+
if File.exist?(@db_name + ".gz")
|
242
|
+
File.unlink(@db_name+".gz")
|
155
243
|
end
|
156
244
|
end
|
157
|
-
return {
|
158
|
-
:directory => {:add => add_dirs,:modify => modify_dir_map.keys - add_dirs - remove_dirs,:remove => remove_dirs},
|
159
|
-
:file => {:add => add_files,:modify => modify_files,:remove => remove_files},
|
160
|
-
:symlink => {:add => add_links,:modify => modify_links,:remove => remove_links}}
|
161
245
|
end
|
162
246
|
end
|
163
247
|
end
|
data/lib/s3backup.rb
CHANGED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: s3backup
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.8.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Takeshi Morita
|
@@ -9,7 +9,7 @@ autorequire:
|
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
11
|
|
12
|
-
date: 2010-
|
12
|
+
date: 2010-02-11 00:00:00 +09:00
|
13
13
|
default_executable:
|
14
14
|
dependencies:
|
15
15
|
- !ruby/object:Gem::Dependency
|
@@ -22,6 +22,16 @@ dependencies:
|
|
22
22
|
- !ruby/object:Gem::Version
|
23
23
|
version: 0.6.2
|
24
24
|
version:
|
25
|
+
- !ruby/object:Gem::Dependency
|
26
|
+
name: sqlite3
|
27
|
+
type: :runtime
|
28
|
+
version_requirement:
|
29
|
+
version_requirements: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - ">="
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: 0.0.7
|
34
|
+
version:
|
25
35
|
- !ruby/object:Gem::Dependency
|
26
36
|
name: rubyforge
|
27
37
|
type: :development
|