s3backup 0.7.3 → 0.8.1

Sign up to get free protection for your applications and to get access to all the features.
data/History.txt CHANGED
@@ -41,3 +41,9 @@
41
41
  === 0.7.2 2010-01-29
42
42
  * mod bug if directory name is started '-'
43
43
  * dereese use memory
44
+
45
+ === 0.8.1 2010-02-11
46
+ * mod bug if directory name is same , that can't restore(because it was not backuped).
47
+ * change memory to sqlite3.
48
+ * add config temporary. temporary is directory for temporary file
49
+ * resume is mandatory.
data/README.rdoc CHANGED
@@ -28,7 +28,8 @@ To use remotebackup,you should prepare backup configuretion file by yaml such be
28
28
  proxy_user: login name for proxy server if you use proxy.
29
29
  proxy_password: login password for proxy server if you use proxy.
30
30
  log_level: 'output log level. value is debug or info or warn or error(optional default info)'
31
- resume: 'if set true,when Ctrl-c or another program stop, next run program resume before backup. default false. caution!! this option use huge memory'
31
+ temporary: 'temporary directory path. default(/tmp)
32
+
32
33
  *If directories isn't specified when restore, it restores all directories in bucket.
33
34
 
34
35
  == COMMAND:
data/Rakefile CHANGED
@@ -14,7 +14,7 @@ $hoe = Hoe.spec 's3backup' do
14
14
  self.developer 'Takeshi Morita', 'morita@ibrains.co.jp'
15
15
  self.post_install_message = 'PostInstall.txt' # TODO remove if post-install message not required
16
16
  self.rubyforge_name = self.name # TODO this is default value
17
- self.extra_deps = [['aws-s3','>= 0.6.2']]
17
+ self.extra_deps = [['aws-s3','>= 0.6.2'],['sqlite3',">= 0.0.7"]]
18
18
 
19
19
  end
20
20
 
data/backup.yml CHANGED
@@ -13,4 +13,4 @@ proxy_port: 'port of proxy server(optional)'
13
13
  proxy_user: 'user name of proxy server(optional)'
14
14
  proxy_password: 'password of proxy server(optional)'
15
15
  log_level: 'output log level. value is debug or info or warn or error(optional default info)'
16
- resume: 'if set true,when Ctrl-c or another program stop, next run program resume before backup. default false. caution!! this option use huge memory'
16
+ temporary: 'temporary directory path. default(/tmp)
@@ -1,5 +1,5 @@
1
1
  require 'cgi'
2
- require 'tempfile'
2
+ require 'time'
3
3
  require 'fileutils'
4
4
  require 's3backup/s3log'
5
5
  require 's3backup/tree_info'
@@ -22,6 +22,7 @@ module S3backup
22
22
  def initialize(target,config)
23
23
  @target = target
24
24
  @resume = false
25
+ @temporary = "/tmp"
25
26
  set_config(config)
26
27
  end
27
28
  def set_config(config)
@@ -49,23 +50,36 @@ module S3backup
49
50
  else
50
51
  @buf_size = DEFAULT_BUF_READ_SIZE
51
52
  end
52
- if config["buffer_size"]
53
- if config["buffer_size"].class == String
54
- @buf_size = config["buffer_size"].to_i
55
- else
56
- @buf_size = config["buffer_size"]
57
- end
58
- if @buf_size > 1000*1000*1000*5
59
- S3log.error("buffer_size must be less than 5G\n")
60
- exit(-1)
61
- end
62
- else
63
- @buf_size = DEFAULT_BUF_READ_SIZE
53
+ if config["temporary"]
54
+ @temporary = config["temporary"]
64
55
  end
65
56
  if config["resume"] == true
66
57
  @resume = true
67
58
  end
68
59
  end
60
+ def to_gz(file,remain=false)
61
+ if remain
62
+ cmd = "(cd #{shell_name(File.dirname(file))};gzip -c #{shell_name(file)} > #{shell_name(file)}.gz " +
63
+ "2>/dev/null)"
64
+ else
65
+ cmd = "(cd #{shell_name(File.dirname(file))};gzip #{shell_name(file)} > /dev/null 2>&1)"
66
+ end
67
+ S3log.debug(cmd)
68
+ system(cmd)
69
+ unless $?.success?
70
+ S3log.error("feiled #{cmd} execute. #{$?.inspect}")
71
+ exit(-1)
72
+ end
73
+ end
74
+ def from_gz(file)
75
+ cmd = "(cd #{shell_name(File.dirname(file))};gunzip #{shell_name(file)} > /dev/null 2>&1)"
76
+ S3log.debug(cmd)
77
+ system(cmd)
78
+ unless $?.success?
79
+ S3log.error("feiled #{cmd} execute. #{$?.inspect}")
80
+ exit(-1)
81
+ end
82
+ end
69
83
  #指定されたディレクトリをtar gzip形式で圧縮する
70
84
  def to_tgz(path,dir)
71
85
  #サブディレクトリを圧縮の対象外にする。
@@ -75,9 +89,12 @@ module S3backup
75
89
  sub_dir.push(file) if File.directory?(dir+"/"+file)
76
90
  end
77
91
  exclude = ""
78
- exclude = exclude + " --exclude=" + sub_dir.map{|d| shell_name(d)}.join(" --exclude=") if sub_dir.length != 0
79
- cmd = "(cd #{shell_name(File.dirname(dir))};tar -czvf #{shell_name(path)} #{exclude} -- #{shell_name(File.basename(dir))}" +
80
- " > /dev/null 2>&1)"
92
+ if sub_dir.length != 0
93
+ exclude = " --exclude=#{shell_name(File.basename(dir))}/" + sub_dir.map{|d| shell_name(d)}.join(
94
+ " --exclude=#{shell_name(File.basename(dir))}/")
95
+ end
96
+ cmd = "(cd #{shell_name(File.dirname(dir))};tar -czvf #{shell_name(path)} #{exclude} -- " +
97
+ "#{shell_name(File.basename(dir))} > /dev/null 2>&1)"
81
98
  S3log.debug(cmd)
82
99
  system(cmd)
83
100
  unless $?.success?
@@ -94,38 +111,33 @@ module S3backup
94
111
  exit(-1)
95
112
  end
96
113
  end
97
- def get_chain(key)
114
+ def get_chain(key,path)
98
115
  data = nil
99
- data_set = nil
100
116
  i=1
101
117
  if @aes
102
118
  key = @aes.encrypt(key)
103
119
  end
104
- while 1
105
- key_name = i.to_s()+"_"+key
106
- data = @target.get(key_name)
107
- if data == nil
108
- break
109
- end
110
- if i==1
111
- data_set = ''
112
- end
113
- if @aes
114
- data = @aes.decrypt(data)
120
+ File.open(path,"w") do |f|
121
+ while 1
122
+ key_name = i.to_s()+"_"+key
123
+ data = @target.get(key_name)
124
+ if data == nil
125
+ break
126
+ end
127
+ if @aes
128
+ data = @aes.decrypt(data)
129
+ end
130
+ f.write(data)
131
+ i+=1
115
132
  end
116
- data_set += data
117
- i+=1
118
133
  end
119
- return data_set
120
134
  end
121
135
  def get_directory(dir,out_dir)
122
- data = get_chain(dir)
123
- tmp = Tempfile.open("s3backup")
124
- tmp.write(data)
125
- tmp.close
136
+ file_name = @temporary + "/"+ CGI.escape(dir)
137
+ get_chain(dir,file_name + ".tgz")
126
138
  #tgzのファイルをcur_dirに展開
127
- from_tgz(tmp.path,out_dir)
128
- tmp.close(true)
139
+ from_tgz(file_name + ".tgz",out_dir)
140
+ #File.unlink(file_name)
129
141
  end
130
142
  def get_directories(dirs,prefix,output_dir)
131
143
  prefix_len = prefix.length
@@ -138,10 +150,9 @@ module S3backup
138
150
  end
139
151
  end
140
152
  def store_directory(dir)
141
- tmp = Tempfile.open("s3backup")
142
- tmp.close
153
+ tmp_file = @temporary + "/bk_" + CGI.escape(dir)
143
154
  #tgzのファイルをtmp.pathに作成
144
- to_tgz(tmp.path,dir)
155
+ to_tgz(tmp_file,dir)
145
156
  #S3にディレクトリの絶対パスをキーにして、圧縮したデータをストア
146
157
  i=1
147
158
  key = nil
@@ -156,20 +167,21 @@ module S3backup
156
167
  @target.delete(cnt.to_s() + "_" + key)
157
168
  cnt+=1
158
169
  end
159
- f = File.open(tmp.path,"r")
160
- begin
161
- while 1
162
- key_name = i.to_s()+"_"+key
163
- data = f.readpartial(@buf_size)
164
- if @aes
165
- data = @aes.encrypt(data)
170
+ File.open(tmp_file,"r") do |f|
171
+ begin
172
+ while 1
173
+ key_name = i.to_s()+"_"+key
174
+ data = f.readpartial(@buf_size)
175
+ if @aes
176
+ data = @aes.encrypt(data)
177
+ end
178
+ @target.post(key_name,data)
179
+ i+=1
166
180
  end
167
- @target.post(key_name,data)
168
- i+=1
181
+ rescue EOFError
169
182
  end
170
- rescue EOFError
171
183
  end
172
- tmp.close(true)
184
+ File.unlink(tmp_file)
173
185
  end
174
186
  def delete_direcory(dir)
175
187
  if @aes
@@ -182,41 +194,56 @@ module S3backup
182
194
  end
183
195
  def differential_copy(dir)
184
196
  #現在のファイル・ツリーを比較
185
- tree_info = TreeInfo.new(dir)
186
-
187
- target_tree_name = "tree_"+dir+".yml"
188
- tree_data = nil
197
+ tree_info = TreeInfo.new({:format=>:directory,:directory=>dir,:db=>@temporary + "/new_" +
198
+ Time.now.to_i.to_s + "_" + Process.pid.to_s + ".db"})
199
+ target_db_name = dir+".gz"
189
200
  #前回のファイル・ツリーを取得
190
- old_tree = TreeInfo.new(@target.get(target_tree_name))
191
-
192
- #前回と今回のファイル・ツリーを比較
193
- diff_info = tree_info.diff(old_tree)
194
- S3log.debug("diff_info=#{diff_info.inspect}")
195
- dir_map = nil
196
- if @resume
197
- new_dir_map = tree_info.make_dir_map
198
- old_dir_map = old_tree.make_dir_map
201
+ data = @target.get(target_db_name)
202
+ old_tree = nil
203
+ if data
204
+ db_name = @temporary + "/old_" + Time.now.to_i.to_s + "_" + Process.pid.to_s + ".db"
205
+ File.open(db_name + ".gz","w") do |f|
206
+ f.write(data)
207
+ end
208
+ from_gz(db_name + ".gz")
209
+ old_tree = TreeInfo.new({:format=>:database,:db=>db_name})
199
210
  else
200
- #メモリ節約のため開放
201
- old_tree = nil
211
+ target_tree_name = "tree_"+dir+".yml"
212
+ #以前のフォーマットだった場合は変換
213
+ data = @target.get(target_tree_name)
214
+ if data
215
+ old_tree = TreeInfo.new({:format=>:yaml,:data=>data,:db=>@temporary + "/old_" +
216
+ Time.now.to_i.to_s + "_" + Process.pid.to_s + ".db"})
217
+ else
218
+ old_tree = TreeInfo.new({:db=>@temporary + "/old_" +
219
+ Time.now.to_i.to_s + "_" + Process.pid.to_s + ".db"})
220
+ end
202
221
  end
203
- update_dir = diff_info[:directory][:add] + diff_info[:directory][:modify]
204
- #更新されたディレクトリをアップロード
205
- update_dir.each do |udir|
206
- GC.start
207
- store_directory(udir)
208
- if @resume
209
- #前回のファイル・ツリー情報のうち、今回アップデートしたディレクトリ情報ファイル情報を更新
210
- old_dir_map = old_tree.update_dir(udir,old_dir_map,new_dir_map[udir])
222
+ data = nil;
223
+ GC.start
224
+ cnt=0
225
+ #前回と今回のファイル・ツリーを比較
226
+ tree_info.modify(old_tree) do |dir_info|
227
+ cnt+=1
228
+ S3log.debug("diff_info=#{dir_info[:name]}")
229
+ #更新されたディレクトリをアップロード
230
+ store_directory(dir_info[:name])
231
+ #前回のファイル・ツリー情報のうち、今回アップデートしたディレクトリ情報ファイル情報を更新
232
+ old_dir_map = old_tree.update_dir(dir_info)
233
+ if cnt != 0 and cnt % 10 == 0
211
234
  #更新したファイル・ツリー情報をアップロード(途中で失敗しても、resumeできるようにするため。)
212
- @target.post(target_tree_name,old_tree.dump_yaml)
235
+ to_gz(old_tree.db_name,true)
236
+ @target.post(target_db_name,File.read(old_tree.db_name + ".gz"))
213
237
  end
214
238
  end
215
- diff_info[:directory][:remove].each do |rm_dir|
216
- delete_direcory(rm_dir)
239
+ tree_info.remove(old_tree) do |dir_info|
240
+ delete_direcory(dir_info[:name])
217
241
  end
218
242
  #今回のファイル・ツリーをAWS S3に登録
219
- @target.post(target_tree_name,tree_info.dump_yaml)
243
+ to_gz(tree_info.db_name)
244
+ @target.post(target_db_name,File.read(tree_info.db_name + ".gz"))
245
+ tree_info.close(true)
246
+ old_tree.close(true)
220
247
  end
221
248
  def get_target_tree(dir)
222
249
  base_dir = dir
@@ -228,8 +255,8 @@ module S3backup
228
255
  if base == before_base
229
256
  break
230
257
  end
231
- tree_file_name = "tree_"+base+".yml"
232
- tree_data = @target.get(tree_file_name)
258
+ tree_db_name = base+".gz"
259
+ tree_data = @target.get(tree_db_name)
233
260
  if tree_data
234
261
  break
235
262
  end
@@ -239,16 +266,12 @@ module S3backup
239
266
  unless tree_data
240
267
  return nil
241
268
  end
242
- return TreeInfo.new(tree_data)
243
- end
244
- def get_target_bases
245
- files = @target.find(/^tree_.*\.yml/)
246
- dirs = files.map do |d|
247
- m=/tree_(.*)\.yml/.match(d)
248
- next nil unless m
249
- m[1]
269
+ db_name = @temporary + "/" + Time.now.to_i.to_s + "_" + Process.pid.to_s + ".db"
270
+ File.open(db_name + ".gz","w") do |f|
271
+ f.write(tree_data)
250
272
  end
251
- return dirs.compact
273
+ from_gz(db_name + ".gz")
274
+ return TreeInfo.new({:format=>:database,:db=>db_name})
252
275
  end
253
276
  def expand_tree(dir,tree_info,output_dir)
254
277
  now = Time.new
@@ -267,7 +290,7 @@ module S3backup
267
290
  dir_len = k.length
268
291
  relative_path = k.slice(top_dir_len,dir_len - top_dir_len)
269
292
  dir = output_dir + relative_path
270
- File.utime(now,v[:mtime],dir)
293
+ File.utime(now,Time.parse(v[:mtime]),dir)
271
294
  end
272
295
  }
273
296
  end
@@ -278,7 +301,7 @@ module S3backup
278
301
  return
279
302
  end
280
303
  expand_tree(dir,tree,output_dir)
281
- S3log.debug("expand_tree=#{tree.inspect}")
304
+ tree.close(true)
282
305
  end
283
306
  end
284
307
  end
@@ -34,8 +34,8 @@ module S3backup
34
34
  end
35
35
  end
36
36
  def start
37
+ S3log.error("directories is not defined") unless @directories
37
38
  begin
38
- @directories = @manager.get_target_bases unless @directories
39
39
  @directories.each do |dir|
40
40
  @manager.restore(dir,@output_dir)
41
41
  end
@@ -68,6 +68,7 @@ module S3backup
68
68
  while true do
69
69
  begin
70
70
  if AWS::S3::S3Object.exists? key_name,@bucket_name
71
+ S3log.info("AWS::S3::S3Object.value(#{key_name})\n")
71
72
  data = AWS::S3::S3Object.value(key_name,@bucket_name)
72
73
  end
73
74
  break;
@@ -1,22 +1,112 @@
1
1
  require 'yaml'
2
+ require 'sqlite3'
2
3
  module S3backup
3
4
  class TreeInfo
4
- attr_reader :fileMap
5
- def initialize(target)
6
- if target.nil?
7
- @fileMap = {:file => Hash.new,:symlink => Hash.new,:directory => Hash.new}
8
- elsif File.directory?(target)
9
- @fileMap = {:file => Hash.new,:symlink => Hash.new,:directory => Hash.new}
10
- stat = File.stat(target)
11
- @fileMap[:directory][target] = {:mtime => stat.mtime}
12
- makeFileMap(target)
13
- elsif File.file?(target)
14
- load_yaml(File.read(target))
15
- else
16
- load_yaml(target)
5
+ attr_reader :db_name,:db
6
+ def make_table
7
+ sql = "create table directory ( id INTEGER PRIMARY KEY ,name varchar(2048), mtime integer, parent_directory_id integer)"
8
+ @db.execute(sql)
9
+ sql = "create table file ( name varchar(2048), size integer, mtime integer,directory_id integer)"
10
+ @db.execute(sql)
11
+ sql = "create table symlink ( name varchar(2048), source varchar(2048),directory_id integer)"
12
+ @db.execute(sql)
13
+ sql = "CREATE INDEX idx_directory_name ON directory(name)"
14
+ @db.execute(sql)
15
+ sql = "CREATE INDEX idx_directory_parent_directory_id ON directory(parent_directory_id)"
16
+ @db.execute(sql)
17
+ sql = "CREATE INDEX idx_file_directory_id ON file(directory_id)"
18
+ @db.execute(sql)
19
+ sql = "CREATE INDEX idx_file_name ON file(name)"
20
+ @db.execute(sql)
21
+ sql = "CREATE INDEX idx_symlink_name ON symlink(name)"
22
+ @db.execute(sql)
23
+ sql = "CREATE INDEX idx_symlink_directory_id ON symlink(directory_id)"
24
+ @db.execute(sql)
25
+ end
26
+ def check_dirs(p_id,p_name)
27
+ @db.execute('select id,name from directory where parent_directory_id = ?',p_id) do |row|
28
+ id = row[0]
29
+ name = row[1]
30
+ if File.basename(name) == File.basename(p_name)
31
+ sql = "insert into file(name,size,mtime,directory_id) values (:name, :size, :mtime,:directory_id)"
32
+ @db.execute(sql,:name=>"zetteiarienainamae#{id}",:size=>0,:mtime =>0,:directory_id=>p_id)
33
+ end
34
+ check_dirs(id,name)
35
+ end
36
+ end
37
+ def convert_yaml_to_sqlite3(file_map)
38
+ file_map[:directory].keys().sort{|a,b| a<=>b}.each do |key|
39
+ file_at = file_map[:directory][key]
40
+ sql = "insert into directory(name,mtime) values (:name, :mtime)"
41
+ @db.execute(sql,:name=>key,:mtime => file_at[:mtime] )
42
+ end
43
+ @db.execute('select id,name from directory' ) do |row|
44
+ dir_id = row[0].to_i
45
+ parent = File.dirname(row[1])
46
+ @db.execute('select id from directory where name =?',parent ) do |row|
47
+ @db.execute("update directory set parent_directory_id = #{row[0]} where id = #{dir_id}")
48
+ end
49
+ end
50
+ #for bug (same name directory was not backuped before)
51
+ @db.execute('select id,name from directory order by id limit 1') do |row|
52
+ p_id = row[0]
53
+ name = row[1]
54
+ check_dirs(p_id,name)
55
+ end
56
+ file_map[:file].each do |key,val|
57
+ file_at = file_map[:file][key]
58
+ dir_name = File.dirname(key)
59
+ dir_id = nil
60
+ @db.execute('select id from directory where name=?',dir_name ) do |row|
61
+ #rowは結果の配列
62
+ dir_id = row[0].to_i
63
+ end
64
+ unless dir_id
65
+ STDERR.print "directory name isn't exist ignore #{dir_name}"
66
+ next
67
+ end
68
+ sql = "insert into file(name,size,mtime,directory_id) values (:name, :size, :mtime,:directory_id)"
69
+ @db.execute(sql,:name=>key,:size=>file_at[:size],:mtime => file_at[:date], :directory_id=>dir_id)
70
+ end
71
+ file_map[:symlink].each do |key,val|
72
+ file_at = file_map[:symlink][key]
73
+ dir_name = File.dirname(key)
74
+ sql="select id from directory where name = :name"
75
+ dir_id = nil
76
+ @db.execute('select id from directory where name=?',dir_name ) do |row|
77
+ #rowは結果の配列
78
+ dir_id = row[0].to_i
79
+ end
80
+ unless dir_id
81
+ STDERR.print "directory name isn't exist ignore #{dir_name}"
82
+ next
83
+ end
84
+ sql = "insert into symlink(name,source,directory_id) values (:name, :source,:directory_id)"
85
+ @db.execute(sql,:name=>key,:source=>file_at[:source],:directory_id=>dir_id)
86
+ end
87
+ end
88
+ def initialize(opt)
89
+ @db_name = opt[:db]
90
+ @db = SQLite3::Database.new(opt[:db])
91
+ if opt[:format].nil?
92
+ make_table
93
+ elsif opt[:format] == :directory
94
+ make_table
95
+ stat = File.stat(opt[:directory])
96
+ sql = "insert into directory(name,mtime) values (:name, :mtime)"
97
+ @db.execute(sql,:name=>opt[:directory],:mtime =>stat.mtime)
98
+ dir_id = nil
99
+ @db.execute('select id from directory where name=?',opt[:directory]) do |row|
100
+ #rowは結果の配列
101
+ dir_id = row[0].to_i
102
+ end
103
+ makeFileMap(opt[:directory],dir_id)
104
+ elsif opt[:format] == :yaml
105
+ make_table
106
+ convert_yaml_to_sqlite3(YAML.load(opt[:data]))
17
107
  end
18
108
  end
19
- def makeFileMap(dir)
109
+ def makeFileMap(dir,id)
20
110
  Dir.entries(dir).each do |e|
21
111
  if e == "." or e == ".."
22
112
  next
@@ -24,140 +114,134 @@ module S3backup
24
114
  name = dir + "/" + e
25
115
  if File.directory?(name)
26
116
  stat = File.stat(name)
27
- @fileMap[:directory][name] = {:mtime => stat.mtime}
28
- makeFileMap(name)
117
+ sql = "insert into directory(name,mtime,parent_directory_id) values (:name, :mtime,:parent_directory_id)"
118
+ @db.execute(sql,:name=>name,:mtime =>stat.mtime,:parent_directory_id=>id)
119
+ dir_id = nil
120
+ @db.execute('select id from directory where name=?',name) do |row|
121
+ #rowは結果の配列
122
+ dir_id = row[0].to_i
123
+ end
124
+ makeFileMap(name,dir_id)
29
125
  elsif File.symlink?(name)
30
- @fileMap[:symlink][name] = {:source => File.readlink(name)}
126
+ sql = "insert into symlink(name,source,directory_id) values (:name, :source,:directory_id)"
127
+ @db.execute(sql,:name=>name,:source=>File.readlink(name),:directory_id=>id)
31
128
  else
32
129
  stat = File.stat(name)
33
- @fileMap[:file][name] = {:size => stat.size,:date => stat.mtime}
34
- end
35
- end
36
- end
37
- def make_dir_map
38
- dir_map = {};
39
- @fileMap[:directory].each do |k,v|
40
- dir_map[k] = {:mtime => v[:mtime],:file=>{},:symlink=>{}}
41
- end
42
- @fileMap[:file].each do |k,v|
43
- target = dir_map[File.dirname(k)]
44
- #不整合だけど適当に作る
45
- unless target
46
- S3log.warn("Tree Data isn't correct.")
47
- target = {:mtime => DateTime.now.to_s,:file=>{},:symlink=>{}}
48
- dir_map[File.dirname(k)] = target
49
- end
50
- target[:file][k] = {:size => v[:size], :date => v[:date]}
51
- end
52
- @fileMap[:symlink].each do |k,v|
53
- target = dir_map[File.dirname(k)]
54
- #不整合だけど適当に作る
55
- unless target
56
- S3log.warn("Tree Data isn't correct.")
57
- target = {:mtime => DateTime.now.to_s,:file=>{},:symlink=>{}}
58
- dir_map[File.dirname(k)] = target
130
+ sql = "insert into file(name,size,mtime,directory_id) values (:name, :size, :mtime,:directory_id)"
131
+ @db.execute(sql,:name=>name,:size=>stat.size,:mtime => stat.mtime, :directory_id=>id)
59
132
  end
60
- target[:symlink][k] = {:source => v[:source]}
61
133
  end
62
- return dir_map;
63
134
  end
64
- def update_dir(name,dir_map,dir_info)
65
- dir_map[name] = {:file => {},:symlink=>{}} unless dir_map[name]
66
- dir_map[name][:file].each do |k,v|
67
- @fileMap[:file].delete(k)
68
- end
69
- dir_map[name][:symlink].each do |k,v|
70
- @fileMap[:symlink].delete(k)
135
+ def update_dir(dir_info)
136
+ result = @db.execute("select id from directory where name = ?",dir_info[:name])
137
+ p_id = nil
138
+ id = nil
139
+ @db.execute('select id from directory where name =?',File.dirname(dir_info[:name])) do |row|
140
+ p_id = row[0]
141
+ end
142
+ if result.length != 0
143
+ id = result[0][0]
144
+ @db.execute("delete from file where directory_id = #{id}")
145
+ @db.execute("delete from symlink where directory_id = #{id}")
146
+
147
+ @db.execute("update directory set mtime = ?,parent_directory_id = ?" +
148
+ " where id = ?",dir_info[:mtime],p_id,id)
149
+ else
150
+ @db.execute("insert into directory(name,mtime,parent_directory_id) values(?,?,?)",
151
+ dir_info[:name],dir_info[:mtime],p_id)
152
+ result = @db.execute("select id from directory where name = ?",dir_info[:name])
153
+ id = result[0][0]
71
154
  end
72
- @fileMap[:directory][name] = {:mtime => dir_info[:mtime]}
73
- dir_info[:file].each do |k,v|
74
- @fileMap[:file][k] = v
155
+ dir_info[:files].each do |f|
156
+ @db.execute("insert into file(name,mtime,size,directory_id) values(?,?,?,?)",
157
+ f[:name],f[:mtime],f[:size],id)
75
158
  end
76
- dir_info[:symlink].each do |k,v|
77
- @fileMap[:symlink][k] = v
159
+ dir_info[:links].each do |f|
160
+ @db.execute("insert into symlink(name,source,directory_id) values(?,?,?)",f[:name],f[:source],id)
78
161
  end
79
- dir_map[name][:file] = dir_info[:file]
80
- dir_map[name][:symlink] = dir_info[:symlink]
81
- dir_map[name][:mtime] = dir_info[:mtime]
82
- return dir_map
83
- end
84
- def load_yaml(data)
85
- @fileMap = YAML.load(data)
86
162
  end
87
- def dump_yaml()
88
- YAML.dump(@fileMap)
89
- end
90
- def hierarchie(dir)
91
- count = dir.count("/")
92
- tree = []
93
- @fileMap[:directory].each do |k,v|
94
- if k.index(dir) != 0
95
- next
96
- end
97
- level = k.count("/") - count
163
+ def get_level_directory(tree,p_id,level)
164
+ @db.execute('select id,name,mtime from directory where parent_directory_id = ?',p_id) do |row|
165
+ id = row[0]
166
+ name = row[1]
167
+ mtime = row[2]
98
168
  tree[level] = {} unless tree[level]
99
- tree[level][k] = v
169
+ tree[level][name] = {:mtime=>mtime}
170
+ get_level_directory(tree,id,level+1)
100
171
  end
172
+ end
173
+ def hierarchie(dir)
174
+ tree=[]
175
+ result = @db.execute('select id,name,mtime from directory where name = ?',dir)
176
+ if result.length == 0
177
+ S3log.error("#{dir} is not stored.")
178
+ exit(-1)
179
+ end
180
+ id = result[0][0]
181
+ name = result[0][1]
182
+ mtime = result[0][2]
183
+ tree[0] = {}
184
+ tree[0][name]={:mtime=>mtime}
185
+ get_level_directory(tree,id,1)
101
186
  return tree
102
187
  end
103
- def diff(target)
104
- modify_dir_map = {}
105
- modify_files = []
106
- modify_links = []
107
-
108
- remove_dirs = target.fileMap[:directory].keys - @fileMap[:directory].keys
109
- add_dirs = @fileMap[:directory].keys - target.fileMap[:directory].keys
110
-
111
- new_info = @fileMap[:file]
112
- old_info = target.fileMap[:file]
113
-
114
- remove_files = old_info.keys - new_info.keys
115
- remove_files.each do |f|
116
- dir = File.dirname(f)
117
- modify_dir_map[dir] = true
118
- end
119
- add_files = new_info.keys - old_info.keys
120
- add_files.each do |f|
121
- dir = File.dirname(f)
122
- modify_dir_map[dir] = true
123
- end
124
-
125
- new_info.each do |k,v|
126
- next unless old_info[k]
127
- if old_info[k][:date] != v[:date] or old_info[k][:size] != v[:size]
128
- modify_files.push(k)
129
- dir = File.dirname(k)
130
- modify_dir_map[dir] = true
188
+ def modify(target)
189
+ now_id = 0
190
+ while 1
191
+ result = @db.execute("select id,name,mtime from directory where id > ? limit 1",now_id)
192
+ break if result.length == 0
193
+ now_id = result[0][0].to_i
194
+ name = result[0][1]
195
+ mtime = result[0][2]
196
+ files = []
197
+ links = []
198
+ t_result = target.db.execute("select id,name from directory where name = ?",name)
199
+ if t_result.length != 0
200
+ t_id = t_result[0][0]
201
+ t_files = target.db.execute("select name,size,mtime from file where directory_id = ? order by name",t_id)
202
+ files = @db.execute("select name,size,mtime from file where directory_id = ? order by name",now_id)
203
+ if t_files == files
204
+ t_links = target.db.execute("select name,source from symlink where directory_id = ? order by name",t_id)
205
+ links = @db.execute("select name,source from symlink where directory_id = ? order by name",now_id)
206
+ if t_links == links
207
+ next
208
+ end
209
+ end
131
210
  end
211
+ file_infos = []
212
+ files.each do |f|
213
+ file_infos.push({:name=>f[0],:size=>f[1],:mtime=>f[2]})
214
+ end
215
+ sym_infos = []
216
+ links.each do |l|
217
+ sym_infos.push({:name=>f[0],:source=>f[1]})
218
+ end
219
+ yield({:name => name,:mtime=>mtime,:files => file_infos ,:links => sym_infos})
132
220
  end
133
-
134
- new_info = @fileMap[:symlink]
135
- old_info = target.fileMap[:symlink]
136
-
137
- remove_links = old_info.keys - new_info.keys
138
- remove_links.each do |f|
139
- dir = File.dirname(f)
140
- modify_dir_map[dir] = true
141
- end
142
-
143
- add_links = new_info.keys - old_info.keys
144
- add_links.each do |f|
145
- dir = File.dirname(f)
146
- modify_dir_map[dir] = true
221
+ end
222
+ def remove(target)
223
+ now_id = 0
224
+ while 1
225
+ t_result = target.db.execute("select id,name from directory where id > ? limit 1",now_id)
226
+ break if t_result.length == 0
227
+ now_id = t_result[0][0].to_i
228
+ name = t_result[0][1]
229
+ result = @db.execute("select id,name from directory where name = ?",name)
230
+ if result.length == 0
231
+ yield({:name => name})
232
+ end
147
233
  end
148
-
149
- new_info.each do |k,v|
150
- next unless old_info[k]
151
- if old_info[k][:source] != v[:source]
152
- modify_links.push(k)
153
- dir = File.dirname(k)
154
- modify_dir_map[dir] = true
234
+ end
235
+ def close(delete=false)
236
+ @db.close
237
+ if delete
238
+ if File.exist?(@db_name)
239
+ File.unlink(@db_name)
240
+ end
241
+ if File.exist?(@db_name + ".gz")
242
+ File.unlink(@db_name+".gz")
155
243
  end
156
244
  end
157
- return {
158
- :directory => {:add => add_dirs,:modify => modify_dir_map.keys - add_dirs - remove_dirs,:remove => remove_dirs},
159
- :file => {:add => add_files,:modify => modify_files,:remove => remove_files},
160
- :symlink => {:add => add_links,:modify => modify_links,:remove => remove_links}}
161
245
  end
162
246
  end
163
247
  end
data/lib/s3backup.rb CHANGED
@@ -2,5 +2,5 @@ $:.unshift(File.dirname(__FILE__)) unless
2
2
  $:.include?(File.dirname(__FILE__)) || $:.include?(File.expand_path(File.dirname(__FILE__)))
3
3
 
4
4
  module S3backup
5
- VERSION = '0.7.3'
5
+ VERSION = '0.8.1'
6
6
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: s3backup
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.3
4
+ version: 0.8.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Takeshi Morita
@@ -9,7 +9,7 @@ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
11
 
12
- date: 2010-01-31 00:00:00 +09:00
12
+ date: 2010-02-11 00:00:00 +09:00
13
13
  default_executable:
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
@@ -22,6 +22,16 @@ dependencies:
22
22
  - !ruby/object:Gem::Version
23
23
  version: 0.6.2
24
24
  version:
25
+ - !ruby/object:Gem::Dependency
26
+ name: sqlite3
27
+ type: :runtime
28
+ version_requirement:
29
+ version_requirements: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - ">="
32
+ - !ruby/object:Gem::Version
33
+ version: 0.0.7
34
+ version:
25
35
  - !ruby/object:Gem::Dependency
26
36
  name: rubyforge
27
37
  type: :development