mogilefs-client 2.2.0 → 3.0.0.rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. data/.document +11 -0
  2. data/.gemtest +0 -0
  3. data/.gitignore +4 -0
  4. data/.wrongdoc.yml +5 -0
  5. data/GIT-VERSION-GEN +28 -0
  6. data/GNUmakefile +44 -0
  7. data/HACKING +33 -0
  8. data/{History.txt → History} +0 -1
  9. data/{LICENSE.txt → LICENSE} +0 -1
  10. data/Manifest.txt +34 -7
  11. data/README +51 -0
  12. data/Rakefile +11 -11
  13. data/TODO +10 -0
  14. data/bin/mog +109 -68
  15. data/examples/mogstored_rack.rb +189 -0
  16. data/lib/mogilefs.rb +56 -17
  17. data/lib/mogilefs/admin.rb +128 -62
  18. data/lib/mogilefs/backend.rb +205 -95
  19. data/lib/mogilefs/bigfile.rb +54 -70
  20. data/lib/mogilefs/bigfile/filter.rb +58 -0
  21. data/lib/mogilefs/chunker.rb +30 -0
  22. data/lib/mogilefs/client.rb +0 -2
  23. data/lib/mogilefs/copy_stream.rb +30 -0
  24. data/lib/mogilefs/http_file.rb +175 -0
  25. data/lib/mogilefs/http_reader.rb +79 -0
  26. data/lib/mogilefs/mogilefs.rb +242 -148
  27. data/lib/mogilefs/mysql.rb +3 -4
  28. data/lib/mogilefs/paths_size.rb +24 -0
  29. data/lib/mogilefs/pool.rb +0 -1
  30. data/lib/mogilefs/socket.rb +9 -0
  31. data/lib/mogilefs/socket/kgio.rb +55 -0
  32. data/lib/mogilefs/socket/pure_ruby.rb +70 -0
  33. data/lib/mogilefs/socket_common.rb +58 -0
  34. data/lib/mogilefs/util.rb +6 -169
  35. data/test/aggregate.rb +11 -11
  36. data/test/exec.rb +72 -0
  37. data/test/fresh.rb +222 -0
  38. data/test/integration.rb +43 -0
  39. data/test/setup.rb +1 -0
  40. data/test/socket_test.rb +98 -0
  41. data/test/test_admin.rb +14 -37
  42. data/test/test_backend.rb +50 -107
  43. data/test/test_bigfile.rb +2 -2
  44. data/test/test_db_backend.rb +1 -2
  45. data/test/test_fresh.rb +8 -0
  46. data/test/test_http_reader.rb +34 -0
  47. data/test/test_mogilefs.rb +278 -98
  48. data/test/test_mogilefs_integration.rb +174 -0
  49. data/test/test_mogilefs_integration_large_pipe.rb +62 -0
  50. data/test/test_mogilefs_integration_list_keys.rb +40 -0
  51. data/test/test_mogilefs_socket_kgio.rb +11 -0
  52. data/test/test_mogilefs_socket_pure.rb +7 -0
  53. data/test/test_mogstored_rack.rb +89 -0
  54. data/test/test_mogtool_bigfile.rb +116 -0
  55. data/test/test_mysql.rb +1 -2
  56. data/test/test_pool.rb +1 -1
  57. data/test/test_unit_mogstored_rack.rb +72 -0
  58. metadata +76 -54
  59. data/README.txt +0 -80
  60. data/lib/mogilefs/httpfile.rb +0 -157
  61. data/lib/mogilefs/network.rb +0 -107
  62. data/test/test_network.rb +0 -56
  63. data/test/test_util.rb +0 -121
@@ -1,25 +1,31 @@
1
1
  # -*- encoding: binary -*-
2
- require 'mogilefs'
3
- require 'mogilefs/util'
4
2
  require 'thread'
5
3
 
6
- ##
7
- # MogileFS::Backend communicates with the MogileFS trackers.
8
-
4
+ # This class communicates with the MogileFS trackers.
5
+ # You should not have to use this directly unless you are developing
6
+ # support for new commands or plugins for MogileFS
9
7
  class MogileFS::Backend
10
8
 
11
- ##
12
9
  # Adds MogileFS commands +names+.
13
-
14
10
  def self.add_command(*names)
15
11
  names.each do |name|
16
12
  define_method name do |*args|
17
- do_request name, args.first || {}
13
+ do_request(name, args[0] || {}, false)
14
+ end
15
+ end
16
+ end
17
+
18
+ # adds idempotent MogileFS commands +names+, these commands may be retried
19
+ # transparently on a different tracker if there is a network/server error.
20
+ def self.add_idempotent_command(*names)
21
+ names.each do |name|
22
+ define_method name do |*args|
23
+ do_request(name, args[0] || {}, true)
18
24
  end
19
25
  end
20
26
  end
21
27
 
22
- BACKEND_ERRORS = {}
28
+ BACKEND_ERRORS = {} # :nodoc:
23
29
 
24
30
  # this converts an error code from a mogilefsd tracker to an exception:
25
31
  #
@@ -30,10 +36,10 @@ class MogileFS::Backend
30
36
  def self.add_error(err_snake)
31
37
  err_camel = err_snake.gsub(/(?:^|_)([a-z])/) { $1.upcase }
32
38
  err_camel << 'Error' unless /Error\z/ =~ err_camel
33
- unless self.const_defined?(err_camel)
34
- self.class_eval("class #{err_camel} < MogileFS::Error; end")
39
+ unless const_defined?(err_camel)
40
+ const_set(err_camel, Class.new(MogileFS::Error))
35
41
  end
36
- BACKEND_ERRORS[err_snake] = self.const_get(err_camel)
42
+ BACKEND_ERRORS[err_snake] = const_get(err_camel)
37
43
  end
38
44
 
39
45
  ##
@@ -67,6 +73,7 @@ class MogileFS::Backend
67
73
  @socket = nil
68
74
  @lasterr = nil
69
75
  @lasterrstr = nil
76
+ @pending = []
70
77
 
71
78
  @dead = {}
72
79
  end
@@ -82,19 +89,21 @@ class MogileFS::Backend
82
89
 
83
90
  add_command :create_open
84
91
  add_command :create_close
85
- add_command :get_paths
92
+ add_idempotent_command :get_paths
86
93
  add_command :delete
87
- add_command :sleep
94
+ add_idempotent_command :sleep
88
95
  add_command :rename
89
- add_command :list_keys
96
+ add_idempotent_command :list_keys
97
+ add_idempotent_command :file_info
98
+ add_idempotent_command :file_debug
90
99
 
91
100
  # MogileFS::Backend commands
92
101
 
93
- add_command :get_hosts
94
- add_command :get_devices
95
- add_command :list_fids
96
- add_command :stats
97
- add_command :get_domains
102
+ add_idempotent_command :get_hosts
103
+ add_idempotent_command :get_devices
104
+ add_idempotent_command :list_fids
105
+ add_idempotent_command :stats
106
+ add_idempotent_command :get_domains
98
107
  add_command :create_domain
99
108
  add_command :delete_domain
100
109
  add_command :create_class
@@ -104,6 +113,7 @@ class MogileFS::Backend
104
113
  add_command :update_host
105
114
  add_command :delete_host
106
115
  add_command :set_state
116
+ add_command :replicate_now
107
117
 
108
118
  # Errors copied from MogileFS/Worker/Query.pm
109
119
  add_error 'dup'
@@ -143,51 +153,133 @@ class MogileFS::Backend
143
153
  add_error 'unknown_state'
144
154
  add_error 'unreg_domain'
145
155
 
146
- private unless defined? $TESTING
147
-
148
- # record-separator for mogilefsd responses, update this if the protocol
149
- # changes
150
- RS = "\n"
151
-
152
- def shutdown_unlocked # :nodoc:
156
+ def shutdown_unlocked(do_raise = false) # :nodoc:
157
+ @pending = []
153
158
  if @socket
154
159
  @socket.close rescue nil # ignore errors
155
160
  @socket = nil
156
161
  end
162
+ raise if do_raise
157
163
  end
158
164
 
159
- ##
160
- # Performs the +cmd+ request with +args+.
165
+ def dispatch_unlocked(request, timeout = @timeout) # :nodoc:
166
+ begin
167
+ io = socket
168
+ io.timed_write(request, timeout)
169
+ io
170
+ rescue SystemCallError, MogileFS::RequestTruncatedError => err
171
+ @dead[@active_host] = [ Time.now, err ]
172
+ shutdown_unlocked
173
+ retry
174
+ end
175
+ end
176
+
177
+ def pipeline_gets_unlocked(io, timeout) # :nodoc:
178
+ line = io.timed_gets(timeout) or
179
+ raise MogileFS::PipelineError,
180
+ "EOF with #{@pending.size} requests in-flight"
181
+ ready = @pending.shift
182
+ ready[1].call(parse_response(line, ready[0]))
183
+ end
184
+
185
+ def timeout_update(timeout, t0) # :nodoc:
186
+ timeout -= (Time.now - t0)
187
+ timeout < 0 ? 0 : timeout
188
+ end
189
+
190
+ # try to read any responses we have pending already before filling
191
+ # the pipeline more requests. This usually takes very little time,
192
+ # but trackers may return huge responses and we could be on a slow
193
+ # network.
194
+ def pipeline_drain_unlocked(io, timeout) # :nodoc:
195
+ set = [ io ]
196
+ while @pending.size > 0
197
+ t0 = Time.now
198
+ r = IO.select(set, set, nil, timeout)
199
+ timeout = timeout_update(timeout, t0)
200
+
201
+ if r && r[0][0]
202
+ t0 = Time.now
203
+ pipeline_gets_unlocked(io, timeout)
204
+ timeout = timeout_update(timeout, t0)
205
+ else
206
+ return timeout
207
+ end
208
+ end
209
+ timeout
210
+ end
211
+
212
+ # dispatch a request like do_request, but queue +block+ for execution
213
+ # upon receiving a response. It is the users' responsibility to ensure
214
+ # &block is executed in the correct order. Trackers with multiple
215
+ # queryworkers are not guaranteed to return responses in the same
216
+ # order they were requested.
217
+ def pipeline_dispatch(cmd, args, &block) # :nodoc:
218
+ request = make_request(cmd, args)
219
+ timeout = @timeout
161
220
 
162
- def do_request(cmd, args)
163
- response = nil
164
- request = make_request cmd, args
165
221
  @mutex.synchronize do
222
+ io = socket
223
+ timeout = pipeline_drain_unlocked(io, timeout)
224
+
225
+ # send the request out...
166
226
  begin
227
+ io.timed_write(request, timeout)
228
+ @pending << [ request, block ]
229
+ rescue SystemCallError, MogileFS::RequestTruncatedError => err
230
+ @dead[@active_host] = [ Time.now, err ]
231
+ shutdown_unlocked(@pending[0])
167
232
  io = socket
168
- begin
169
- bytes_sent = io.write request
170
- bytes_sent == request.size or
171
- raise MogileFS::RequestTruncatedError,
172
- "request truncated (sent #{bytes_sent} expected #{request.size})"
173
- rescue SystemCallError
174
- raise MogileFS::UnreachableBackendError
175
- end
233
+ retry
234
+ end
176
235
 
177
- readable?(io)
178
- response = io.gets(RS) and return parse_response(response)
179
- ensure
180
- # we DO NOT want the response we timed out waiting for, to crop up later
181
- # on, on the same socket, intersperesed with a subsequent request!
182
- # we close the socket if it times out like this
183
- response or shutdown_unlocked
236
+ @pending.size
237
+ end
238
+ end
239
+
240
+ def pipeline_wait(count = nil) # :nodoc:
241
+ @mutex.synchronize do
242
+ io = socket
243
+ count ||= @pending.size
244
+ @pending.size < count and
245
+ raise MogileFS::Error,
246
+ "pending=#{@pending.size} < expected=#{count} failed"
247
+ begin
248
+ count.times { pipeline_gets_unlocked(io, @timeout) }
249
+ rescue
250
+ shutdown_unlocked(true)
184
251
  end
252
+ end
253
+ end
254
+
255
+ # Performs the +cmd+ request with +args+.
256
+ def do_request(cmd, args, idempotent = false)
257
+ request = make_request cmd, args
258
+ @mutex.synchronize do
259
+ begin
260
+ io = dispatch_unlocked(request)
261
+ line = io.timed_gets(@timeout) and return parse_response(line)
262
+
263
+ idempotent or
264
+ raise EOFError, "end of file reached after: #{request.inspect}"
265
+ # fall through to retry in loop
266
+ rescue SystemCallError,
267
+ MogileFS::UnreadableSocketError,
268
+ MogileFS::InvalidResponseError, # truncated response
269
+ MogileFS::Timeout
270
+ # we got a successful timed_write, but not a timed_gets
271
+ retry if idempotent
272
+ shutdown_unlocked(true)
273
+ rescue
274
+ # we DO NOT want the response we timed out waiting for, to crop up later
275
+ # on, on the same socket, intersperesed with a subsequent request! we
276
+ # close the socket if there's any error.
277
+ shutdown_unlocked(true)
278
+ end while idempotent
185
279
  end # @mutex.synchronize
186
280
  end
187
281
 
188
- ##
189
282
  # Makes a new request string for +cmd+ and +args+.
190
-
191
283
  def make_request(cmd, args)
192
284
  "#{cmd} #{url_encode args}\r\n"
193
285
  end
@@ -200,102 +292,120 @@ class MogileFS::Backend
200
292
  BACKEND_ERRORS[err_snake] || self.class.add_error(err_snake)
201
293
  end
202
294
 
203
- ##
204
295
  # Turns the +line+ response from the server into a Hash of options, an
205
296
  # error, or raises, as appropriate.
206
-
207
- def parse_response(line)
297
+ def parse_response(line, request = nil)
208
298
  if line =~ /^ERR\s+(\w+)\s*([^\r\n]*)/
209
299
  @lasterr = $1
210
300
  @lasterrstr = $2 ? url_unescape($2) : nil
211
- raise error(@lasterr), @lasterrstr
301
+ if request
302
+ request = " request=#{request.strip}"
303
+ @lasterrstr = @lasterrstr ? (@lasterrstr << request) : request
304
+ return error(@lasterr).new(@lasterrstr)
305
+ end
306
+ raise error(@lasterr).new(@lasterrstr)
212
307
  end
213
308
 
214
- return url_decode($1) if line =~ /^OK\s+\d*\s*(\S*)/
309
+ return url_decode($1) if line =~ /^OK\s+\d*\s*(\S*)\r\n\z/
215
310
 
216
311
  raise MogileFS::InvalidResponseError,
217
312
  "Invalid response from server: #{line.inspect}"
218
313
  end
219
314
 
220
- ##
221
- # Raises if the socket does not become readable in +@timeout+ seconds.
222
-
223
- def readable?(io = @socket)
224
- timeleft = @timeout
225
- peer = nil
226
- loop do
227
- t0 = Time.now
228
- found = IO.select([io], nil, nil, timeleft)
229
- return true if found && found[0]
230
- timeleft -= (Time.now - t0)
231
- timeleft >= 0 and next
232
- peer = io ? "#{io.mogilefs_peername} " : nil
315
+ # this command is special since the cache is per-tracker, so we connect
316
+ # to all backends and not just one
317
+ def clear_cache(types = %w(all))
318
+ opts = {}
319
+ types.each { |type| opts[type] = 1 }
233
320
 
234
- raise MogileFS::UnreadableSocketError, "#{peer}never became readable"
321
+ sockets = @hosts.map do |host|
322
+ MogileFS::Socket.start(*(host.split(/:/))) rescue nil
235
323
  end
236
- false
324
+ sockets.compact!
325
+
326
+ wpending = sockets
327
+ rpending = []
328
+ request = make_request("clear_cache", opts)
329
+ while wpending[0] || rpending[0]
330
+ r = IO.select(rpending, wpending, nil, @timeout) or return
331
+ rpending -= r[0]
332
+ wpending -= r[1]
333
+ r[0].each { |io| io.timed_gets(0) rescue nil }
334
+ r[1].each do |io|
335
+ begin
336
+ io.timed_write(request, 0)
337
+ rpending << io
338
+ rescue
339
+ end
340
+ end
341
+ end
342
+ nil
343
+ ensure
344
+ sockets.each { |io| io.close }
237
345
  end
238
346
 
239
- ##
240
347
  # Returns a socket connected to a MogileFS tracker.
241
-
242
348
  def socket
243
349
  return @socket if @socket and not @socket.closed?
244
350
 
245
351
  now = Time.now
246
352
 
247
- @hosts.sort_by { rand(3) - 1 }.each do |host|
248
- next if @dead.include? host and @dead[host] > now - 5
353
+ @hosts.shuffle.each do |host|
354
+ next if @dead.include?(host) and @dead[host][0] > now - 5
249
355
 
250
356
  begin
251
- @socket = Socket.mogilefs_new(*(host.split(/:/) << @timeout))
252
- rescue SystemCallError, MogileFS::Timeout
253
- @dead[host] = now
357
+ addr, port = host.split(/:/)
358
+ @socket = MogileFS::Socket.tcp(addr, port, @timeout)
359
+ @active_host = host
360
+ rescue SystemCallError, MogileFS::Timeout => err
361
+ @dead[host] = [ now, err ]
254
362
  next
255
363
  end
256
364
 
257
365
  return @socket
258
366
  end
259
367
 
260
- raise MogileFS::UnreachableBackendError
368
+ errors = @dead.map { |host,(_,e)| "#{host} - #{e.message} (#{e.class})" }
369
+ raise MogileFS::UnreachableBackendError,
370
+ "couldn't connect to any tracker: #{errors.join(', ')}"
261
371
  end
262
372
 
263
- ##
264
373
  # Turns a url params string into a Hash.
265
-
266
- def url_decode(str)
267
- Hash[*(str.split(/&/).map { |pair|
268
- pair.split(/=/, 2).map { |x| url_unescape(x) }
269
- } ).flatten]
374
+ def url_decode(str) # :nodoc:
375
+ rv = {}
376
+ str.split(/&/).each do |pair|
377
+ k, v = pair.split(/=/, 2).map! { |x| url_unescape(x) }
378
+ rv[k.freeze] = v
379
+ end
380
+ rv
270
381
  end
271
382
 
272
- ##
273
- # Turns a Hash (or Array of pairs) into a url params string.
383
+ # :stopdoc:
384
+ # TODO: see if we can use existing URL-escape/unescaping routines
385
+ # in the Ruby standard library, Perl MogileFS seems to NIH these
386
+ # routines, too
387
+ # :startdoc:
274
388
 
275
- def url_encode(params)
389
+ # Turns a Hash (or Array of pairs) into a url params string.
390
+ def url_encode(params) # :nodoc:
276
391
  params.map do |k,v|
277
392
  "#{url_escape k.to_s}=#{url_escape v.to_s}"
278
393
  end.join("&")
279
394
  end
280
395
 
281
- ##
282
396
  # Escapes naughty URL characters.
283
397
  if ''.respond_to?(:ord) # Ruby 1.9
284
- def url_escape(str)
398
+ def url_escape(str) # :nodoc:
285
399
  str.gsub(/([^\w\,\-.\/\\\: ])/) { "%%%02x" % $1.ord }.tr(' ', '+')
286
400
  end
287
401
  else # Ruby 1.8
288
- def url_escape(str)
402
+ def url_escape(str) # :nodoc:
289
403
  str.gsub(/([^\w\,\-.\/\\\: ])/) { "%%%02x" % $1[0] }.tr(' ', '+')
290
404
  end
291
405
  end
292
406
 
293
- ##
294
407
  # Unescapes naughty URL characters.
295
-
296
- def url_unescape(str)
297
- str.gsub(/%([a-f0-9][a-f0-9])/i) { [$1.to_i(16)].pack 'C' }.tr('+', ' ')
408
+ def url_unescape(str) # :nodoc:
409
+ str.tr('+', ' ').gsub(/%([a-f0-9][a-f0-9])/i) { [$1.to_i(16)].pack 'C' }
298
410
  end
299
-
300
411
  end
301
-
@@ -1,107 +1,91 @@
1
1
  # -*- encoding: binary -*-
2
- require 'zlib'
3
- require 'digest/md5'
4
2
  require 'uri'
5
- require 'mogilefs/util'
3
+
4
+ # Used for reading deprecated "bigfile" objects generated by the deprecated
5
+ # mogtool(1) utility. This is for reading legacy data and not recommended for
6
+ # new projects. MogileFS itself is capable of storing standalone objects
7
+ # of arbitrary length (as long as the underlying database and underlying
8
+ # filesystem on the DAV devices accept them).
6
9
 
7
10
  module MogileFS::Bigfile
8
- GZIP_HEADER = "\x1f\x8b".freeze # mogtool(1) has this
9
11
  # VALID_TYPES = %w(file tarball partition).map { |x| x.freeze }.freeze
10
12
 
11
13
  # returns a big_info hash if successful
12
14
  def bigfile_stat(key)
13
- parse_info(get_file_data(key))
15
+ bigfile_parse_info(get_file_data(key))
14
16
  end
15
17
 
16
18
  # returns total bytes written and the big_info hash if successful, raises an
17
- # exception if not wr_io is expected to be an IO-like object capable of
18
- # receiving the syswrite method.
19
+ # exception if not. wr_io is expected to be an IO-like object capable of
20
+ # receiving the write method.
19
21
  def bigfile_write(key, wr_io, opts = { :verify => false })
20
22
  info = bigfile_stat(key)
21
- zi = nil
22
- md5 = opts[:verify] ? Digest::MD5.new : nil
23
23
  total = 0
24
+ t = @get_file_data_timeout
24
25
 
25
26
  # we only decode raw zlib deflated streams that mogtool (unfortunately)
26
27
  # generates. tarballs and gzip(1) are up to to the application to decrypt.
27
- filter = Proc.new do |buf|
28
- if zi == nil
29
- if info[:compressed] && info[:type] == 'file' &&
30
- buf.length >= 2 && buf[0,2] != GZIP_HEADER
31
- zi = Zlib::Inflate.new
32
-
33
- # mogtool(1) seems to have a bug that causes it to generate bogus
34
- # MD5s if zlib deflate is used. Don't trust those MD5s for now...
35
- md5 = nil
36
- else
37
- zi = false
38
- end
39
- end
40
- buf ||= ''
41
- if zi
42
- zi.inflate(buf)
43
- else
44
- md5 << buf
45
- buf
46
- end
47
- end if (info[:compressed] || md5)
28
+ if info[:compressed] || opts[:verify]
29
+ wr_io = MogileFS::Bigfile::Filter.new(wr_io, info, opts)
30
+ end
48
31
 
49
32
  info[:parts].each_with_index do |part,part_nr|
50
33
  next if part_nr == 0 # info[:parts][0] is always empty
51
- uris = verify_uris(part[:paths].map { |path| URI.parse(path) })
52
- if uris.empty?
34
+
35
+ begin
36
+ sock = MogileFS::HTTPReader.first(part[:paths], t)
37
+ rescue
53
38
  # part[:paths] may not be valid anymore due to rebalancing, however we
54
39
  # can get_keys on key,<part_nr> and retry paths if all paths fail
55
- part[:paths] = get_paths("#{key.gsub(/^big_info:/, '')},#{part_nr}")
56
- uris = verify_uris(part[:paths].map { |path| URI.parse(path) })
57
- raise MogileFS::Backend::NoDevices if uris.empty?
40
+ part_key = "#{key.sub(/^_big_info:/, '')},#{part_nr}"
41
+ paths = get_paths(part_key)
42
+ paths.empty? and
43
+ raise MogileFS::Backend::NoDevices,
44
+ "no device for key=#{part_key.inspect}", []
45
+ sock = MogileFS::HTTPReader.first(paths, t)
58
46
  end
59
47
 
60
- sock = http_read_sock(uris[0])
61
- md5.reset if md5
62
- w = sysrwloop(sock, wr_io, filter)
63
-
64
- if md5 && md5.hexdigest != part[:md5]
65
- raise MogileFS::ChecksumMismatchError, "#{md5} != #{part[:md5]}"
48
+ begin
49
+ w = MogileFS.io.copy_stream(sock, wr_io)
50
+ ensure
51
+ sock.close
66
52
  end
53
+
54
+ wr_io.respond_to?(:md5_check!) and wr_io.md5_check!(part[:md5])
67
55
  total += w
68
56
  end
69
-
70
- syswrite_full(wr_io, zi.finish) if zi
57
+ wr_io.flush
58
+ total += wr_io.flushed_bytes if wr_io.respond_to?(:flushed_bytes)
71
59
 
72
60
  [ total, info ]
73
61
  end
74
62
 
75
- private
76
-
77
- include MogileFS::Util
78
-
79
- ##
80
- # parses the contents of a _big_info: string or IO object
81
- def parse_info(info = '')
82
- rv = { :parts => [] }
83
- info.each_line do |line|
84
- line.chomp!
85
- case line
86
- when /^(des|type|filename)\s+(.+)$/
87
- rv[$1.to_sym] = $2
88
- when /^compressed\s+([01])$/
89
- rv[:compressed] = ($1 == '1')
90
- when /^(chunks|size)\s+(\d+)$/
91
- rv[$1.to_sym] = $2.to_i
92
- when /^part\s+(\d+)\s+bytes=(\d+)\s+md5=(.+)\s+paths:\s+(.+)$/
93
- rv[:parts][$1.to_i] = {
94
- :bytes => $2.to_i,
95
- :md5 => $3.downcase,
96
- :paths => $4.split(/\s*,\s*/),
97
- }
98
- end
63
+ ##
64
+ # parses the contents of a _big_info: string or IO object
65
+ def bigfile_parse_info(info) # :nodoc:
66
+ rv = { :parts => [] }
67
+ info.each_line do |line|
68
+ line.chomp!
69
+ case line
70
+ when /^(des|type|filename)\s+(.+)$/
71
+ rv[$1.to_sym] = $2
72
+ when /^compressed\s+([01])$/
73
+ rv[:compressed] = ($1 == '1')
74
+ when /^(chunks|size)\s+(\d+)$/
75
+ rv[$1.to_sym] = $2.to_i
76
+ when /^part\s+(\d+)\s+bytes=(\d+)\s+md5=(.+)\s+paths:\s+(.+)$/
77
+ rv[:parts][$1.to_i] = {
78
+ :bytes => $2.to_i,
79
+ :md5 => $3.downcase,
80
+ :paths => $4.split(/\s*,\s*/),
81
+ }
99
82
  end
100
-
101
- rv
102
83
  end
103
84
 
104
- end # module MogileFS::Bigfile
85
+ rv
86
+ end
87
+ end
88
+ require "mogilefs/bigfile/filter"
105
89
 
106
90
  __END__
107
91
  # Copied from mogtool: