ruby-ole 1.2.1 → 1.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,85 @@
1
+
2
+ module Ole
3
+ module Types
4
+ # should have a list of the VT_* variant types, and have all the serialization related code
5
+ # here... implement dump & load functions like marshalling
6
+ class Guid
7
+ SIZE = 16
8
+
9
+ def self.load str
10
+ Types.load_guid str
11
+ end
12
+ end
13
+
14
+ # see http://poi.apache.org/hpsf/internals.html
15
+ class PropertySet
16
+ HEADER_SIZE = 28
17
+ HEADER_UNPACK = "vvVa#{Guid::SIZE}V"
18
+ OS_MAP = {
19
+ 0 => :win16,
20
+ 1 => :mac,
21
+ 2 => :win32
22
+ }
23
+
24
+ class Section < Struct.new(:guid, :offset)
25
+ include Enumerable
26
+
27
+ SIZE = Guid::SIZE + 4
28
+ UNPACK_STR = "a#{Guid::SIZE}v"
29
+
30
+ attr_reader :length
31
+ def initialize str, property_set
32
+ @property_set = property_set
33
+ super(*str.unpack(UNPACK_STR))
34
+ self.guid = Guid.load guid
35
+ load_header
36
+ end
37
+
38
+ def io
39
+ @property_set.io
40
+ end
41
+
42
+ def load_header
43
+ io.seek offset
44
+ @byte_size, @length = io.read(8).unpack 'V2'
45
+ end
46
+
47
+ def each
48
+ io.seek offset + 8
49
+ io.read(length * 8).scan(/.{8}/m).each do |str|
50
+ id, property_offset = str.unpack 'V2'
51
+ io.seek offset + property_offset
52
+ type = io.read(4).unpack('V')[0]
53
+ yield id, type, io.read(10)
54
+ end
55
+ self
56
+ end
57
+
58
+ def properties
59
+ to_a
60
+ end
61
+ end
62
+
63
+ attr_reader :io, :signature, :unknown, :os, :guid, :sections
64
+ def initialize io
65
+ @io = io
66
+ load_header io.read(HEADER_SIZE)
67
+ load_section_list io.read(@num_sections * Section::SIZE)
68
+ # expect no gap between last section and start of data.
69
+ Log.warn "gap between section list and property data" unless io.pos == @sections.map(&:offset).min
70
+ end
71
+
72
+ def load_header str
73
+ @signature, @unknown, @os_id, @guid, @num_sections = str.unpack HEADER_UNPACK
74
+ # should i check that unknown == 0? it usually is. so is the guid actually
75
+ @guid = Guid.load @guid
76
+ @os = OS_MAP[@os_id] || Log.warn("unknown operating system id #{@os_id}")
77
+ end
78
+
79
+ def load_section_list str
80
+ @sections = str.scan(/.{#{Section::SIZE}}/m).map { |str| Section.new str, self }
81
+ end
82
+ end
83
+ end
84
+ end
85
+
@@ -1,12 +1,35 @@
1
-
2
- # move to support?
3
- class IO # :nodoc:
4
- def self.copy src, dst
5
- until src.eof?
6
- buf = src.read(4096)
7
- dst.write buf
1
+ class IOModeString
2
+ def initialize mode='r'
3
+ @mode = mode
4
+ if @mode['b']
5
+ @binary = true
6
+ @mode = @mode.sub 'b', ''
7
+ else
8
+ @binary = false
9
+ end
10
+ if @mode[/\+$/]
11
+ @plus = true
12
+ @mode = @mode.sub(/\+$/, '')
13
+ else
14
+ @plus = false
8
15
  end
9
16
  end
17
+
18
+ def explicit_binary?
19
+ @binary
20
+ end
21
+
22
+ def binary?
23
+ RUBY_PLATFORM !~ /win/ or @binary
24
+ end
25
+
26
+ def to_s
27
+ @mode
28
+ end
29
+
30
+ def inspect
31
+ "#<#{self.class}:#{to_s.inspect}>"
32
+ end
10
33
  end
11
34
 
12
35
  #
@@ -49,13 +72,14 @@ end
49
72
  #
50
73
  class RangesIO
51
74
  attr_reader :io, :ranges, :size, :pos
52
- # +io+ is the parent io object that we are wrapping.
75
+ # +io+:: the parent io object that we are wrapping.
53
76
  #
54
- # +ranges+ are byte offsets, either
77
+ # +ranges+:: byte offsets, either:
55
78
  # 1. an array of ranges [1..2, 4..5, 6..8] or
56
79
  # 2. an array of arrays, where the second is length [[1, 1], [4, 1], [6, 2]] for the above
57
80
  # (think the way String indexing works)
58
- # The +ranges+ provide sequential slices of the file that will be read. they can overlap.
81
+ #
82
+ # NOTE: the +ranges+ can overlap.
59
83
  def initialize io, ranges, opts={}
60
84
  @opts = {:close_parent => false}.merge opts
61
85
  @io = io
@@ -67,10 +91,28 @@ class RangesIO
67
91
  @pos = 0
68
92
  end
69
93
 
94
+ # add block form. TODO add test for this
95
+ def self.open(*args, &block)
96
+ ranges_io = new(*args)
97
+ if block_given?
98
+ begin; yield ranges_io
99
+ ensure; ranges_io.close
100
+ end
101
+ else
102
+ ranges_io
103
+ end
104
+ end
105
+
70
106
  def pos= pos, whence=IO::SEEK_SET
71
- # FIXME support other whence values
72
- raise NotImplementedError, "#{whence.inspect} not supported" unless whence == IO::SEEK_SET
73
- # just a simple pos calculation. invalidate buffers if we had them
107
+ case whence
108
+ when IO::SEEK_SET
109
+ when IO::SEEK_CUR
110
+ pos += @pos
111
+ when IO::SEEK_END
112
+ pos = @size + pos
113
+ else raise Errno::EINVAL
114
+ end
115
+ raise Errno::EINVAL unless (0...@size) === pos
74
116
  @pos = pos
75
117
  end
76
118
 
@@ -81,19 +123,19 @@ class RangesIO
81
123
  @io.close if @opts[:close_parent]
82
124
  end
83
125
 
84
- def range_and_offset pos
85
- off = nil
86
- r = ranges.inject(0) do |total, r|
87
- to = total + r[1]
88
- if pos <= to
89
- off = pos - total
90
- break r
126
+ # returns the [+offset+, +size+], pair inorder to read/write at +pos+
127
+ # (like a partial range), and its index.
128
+ def offset_and_size pos
129
+ total = 0
130
+ ranges.each_with_index do |(offset, size), i|
131
+ if pos <= total + size
132
+ diff = pos - total
133
+ return [offset + diff, size - diff], i
91
134
  end
92
- to
135
+ total += size
93
136
  end
94
137
  # should be impossible for any valid pos, (0...size) === pos
95
- raise "unable to find range for pos #{pos.inspect}" unless off
96
- [r, off]
138
+ raise ArgumentError, "no range for pos #{pos.inspect}"
97
139
  end
98
140
 
99
141
  def eof?
@@ -103,26 +145,25 @@ class RangesIO
103
145
  # read bytes from file, to a maximum of +limit+, or all available if unspecified.
104
146
  def read limit=nil
105
147
  data = ''
106
- limit ||= size
107
- # special case eof
108
148
  return data if eof?
109
- r, off = range_and_offset @pos
110
- i = ranges.index r
149
+ limit ||= size
150
+ partial_range, i = offset_and_size @pos
111
151
  # this may be conceptually nice (create sub-range starting where we are), but
112
152
  # for a large range array its pretty wasteful. even the previous way was. but
113
153
  # i'm not trying to optimize this atm. it may even go to c later if necessary.
114
- ([[r[0] + off, r[1] - off]] + ranges[i+1..-1]).each do |pos, len|
154
+ ([partial_range] + ranges[i+1..-1]).each do |pos, len|
115
155
  @io.seek pos
116
156
  if limit < len
117
- # FIXME this += isn't correct if there is a read error
118
- # or something.
119
- @pos += limit
120
- break data << @io.read(limit)
157
+ # convoluted, to handle read errors. s may be nil
158
+ s = @io.read limit
159
+ @pos += s.length if s
160
+ break data << s
121
161
  end
122
- # this can also stuff up. if the ranges are beyond the size of the file, we can get
123
- # nil here.
124
- data << @io.read(len)
125
- @pos += len
162
+ # convoluted, to handle ranges beyond the size of the file
163
+ s = @io.read len
164
+ @pos += s.length if s
165
+ data << s
166
+ break if s.length != len
126
167
  limit -= len
127
168
  end
128
169
  data
@@ -133,12 +174,16 @@ class RangesIO
133
174
  def truncate size
134
175
  raise NotImplementedError, 'truncate not supported'
135
176
  end
136
- # why not? :)
137
- alias size= :truncate
177
+
178
+ # using explicit forward instead of an alias now for overriding.
179
+ # should override truncate.
180
+ def size= size
181
+ truncate size
182
+ end
138
183
 
139
184
  def write data
140
185
  # short cut. needed because truncate 0 may return no ranges, instead of empty range,
141
- # thus range_and_offset fails.
186
+ # thus offset_and_size fails.
142
187
  return 0 if data.empty?
143
188
  data_pos = 0
144
189
  # if we don't have room, we can use the truncate hook to make more space.
@@ -146,13 +191,11 @@ class RangesIO
146
191
  begin
147
192
  truncate @pos + data.length
148
193
  rescue NotImplementedError
149
- # FIXME maybe warn instead, then just truncate the data?
150
- raise "unable to satisfy write of #{data.length} bytes"
194
+ raise IOError, "unable to grow #{inspect} to write #{data.length} bytes"
151
195
  end
152
196
  end
153
- r, off = range_and_offset @pos
154
- i = ranges.index r
155
- ([[r[0] + off, r[1] - off]] + ranges[i+1..-1]).each do |pos, len|
197
+ partial_range, i = offset_and_size @pos
198
+ ([partial_range] + ranges[i+1..-1]).each do |pos, len|
156
199
  @io.seek pos
157
200
  if data_pos + len > data.length
158
201
  chunk = data[data_pos..-1]
@@ -168,17 +211,24 @@ class RangesIO
168
211
  data_pos
169
212
  end
170
213
 
171
- # this will be generalised to a module later
172
- def each_read blocksize=4096
173
- yield read(blocksize) until eof?
214
+ # i can wrap it in a buffered io stream that
215
+ # provides gets, and appropriately handle pos,
216
+ # truncate. mostly added just to past the tests.
217
+ # FIXME
218
+ def gets
219
+ s = read 1024
220
+ i = s.index "\n"
221
+ @pos -= s.length - (i+1)
222
+ s[0..i]
174
223
  end
224
+ alias readline :gets
175
225
 
176
226
  def inspect
177
227
  # the rescue is for empty files
178
- pos, len = *(range_and_offset(@pos)[0] rescue [nil, nil])
228
+ pos, len = (@ranges[offset_and_size(@pos).last] rescue [nil, nil])
179
229
  range_str = pos ? "#{pos}..#{pos+len}" : 'nil'
180
- "#<#{self.class} io=#{io.inspect} size=#@size pos=#@pos "\
181
- "current_range=#{range_str}>"
230
+ "#<#{self.class} io=#{io.inspect}, size=#@size, pos=#@pos, "\
231
+ "range=#{range_str}>"
182
232
  end
183
233
  end
184
234
 
@@ -2,13 +2,11 @@
2
2
 
3
3
  $: << File.dirname(__FILE__) + '/..'
4
4
 
5
- require 'stringio'
6
5
  require 'tempfile'
7
6
 
8
7
  require 'ole/base'
9
8
  require 'ole/types'
10
- # not strictly ole related
11
- require 'ole/io_helpers'
9
+ require 'ole/ranges_io'
12
10
 
13
11
  module Ole # :nodoc:
14
12
  #
@@ -53,27 +51,22 @@ module Ole # :nodoc:
53
51
  #
54
52
  # = TODO
55
53
  #
56
- # 1. tests. lock down how things work at the moment - mostly good.
57
- # create from scratch works now, as does copying in a subtree of another doc, so
58
- # ole embedded attachment serialization works now. i can save embedded xls in an msg
59
- # into a separate file, and open it. this was a goal. now i would want to implemenet
60
- # to_mime conversion for embedded attachments, that serializes them to ole, but handles
61
- # some separately like various meta file types as plain .wmf attachments perhaps. this
62
- # will give pretty good .eml's from emails with embedded attachments.
63
- # the other todo is .rtf output, with full support for embedded ole objects...
64
- # 2. lots of tidying up
65
- # - main FIXME's in this regard are:
66
- # * the custom header cruft for Header and Dirent needs some love.
67
- # * i have a number of classes doing load/save combos: Header, AllocationTable, Dirent,
68
- # and, in a manner of speaking, but arguably different, Storage itself.
69
- # they have differing api's which would be nice to clean.
70
- # AllocationTable::Big must be created aot now, as it is used for all subsequent reads.
71
- # * ole types need work, can't serialize datetime at the moment.
72
- # 3. need to fix META_BAT support in #flush.
54
+ # * the custom header cruft for Header and Dirent needs some love.
55
+ # * i have a number of classes doing load/save combos: Header, AllocationTable, Dirent,
56
+ # and, in a manner of speaking, but arguably different, Storage itself.
57
+ # they have differing api's which would be nice to clean.
58
+ # AllocationTable::Big must be created aot now, as it is used for all subsequent reads.
59
+ # * need to fix META_BAT support in #flush.
73
60
  #
74
61
  class Storage
75
- VERSION = '1.2.1'
62
+ # thrown for any bogus OLE file errors.
63
+ class FormatError < StandardError # :nodoc:
64
+ end
65
+
66
+ VERSION = '1.2.2'
76
67
 
68
+ # options used at creation time
69
+ attr_reader :opts
77
70
  # The top of the ole tree structure
78
71
  attr_reader :root
79
72
  # The tree structure in its original flattened form. only valid after #load, or #flush.
@@ -86,15 +79,20 @@ module Ole # :nodoc:
86
79
 
87
80
  # maybe include an option hash, and allow :close_parent => true, to be more general.
88
81
  # +arg+ should be either a file, or an +IO+ object, and needs to be seekable.
89
- def initialize arg, mode=nil
82
+ def initialize arg, mode=nil, opts={}
83
+ opts, mode = mode, nil if Hash === mode
84
+ opts = {:update_timestamps => true}.merge(opts)
85
+ @opts = opts
86
+
90
87
  # get the io object
91
88
  @close_parent, @io = if String === arg
92
89
  [true, open(arg, mode || 'rb')]
93
90
  else
94
- raise 'unable to specify mode string with io object' if mode
91
+ raise ArgumentError, 'unable to specify mode string with io object' if mode
95
92
  [false, arg]
96
93
  end
97
94
  # do we have this file opened for writing? don't know of a better way to tell
95
+ # (unless we parse the mode string in the open case)
98
96
  @writeable = begin
99
97
  @io.flush
100
98
  true
@@ -104,11 +102,12 @@ module Ole # :nodoc:
104
102
  # silence undefined warning in clear
105
103
  @sb_file = nil
106
104
  # if the io object has data, we should load it, otherwise start afresh
105
+ # this should be based on the mode string rather.
107
106
  @io.size > 0 ? load : clear
108
107
  end
109
108
 
110
- def self.new arg, mode=nil
111
- ole = super
109
+ def self.open arg, mode=nil, opts={}
110
+ ole = new arg, mode, opts
112
111
  if block_given?
113
112
  begin yield ole
114
113
  ensure; ole.close
@@ -117,24 +116,16 @@ module Ole # :nodoc:
117
116
  end
118
117
  end
119
118
 
120
- class << self
121
- # encouraged
122
- alias open :new
123
- # deprecated
124
- alias load :new
125
- end
126
-
127
119
  # load document from file.
128
120
  def load
129
121
  # we always read 512 for the header block. if the block size ends up being different,
130
122
  # what happens to the 109 fat entries. are there more/less entries?
131
123
  @io.rewind
132
124
  header_block = @io.read 512
133
- @header = Header.load header_block
125
+ @header = Header.new header_block
134
126
 
135
- # create an empty bbat
127
+ # create an empty bbat.
136
128
  @bbat = AllocationTable::Big.new self
137
- # extra mbat blocks
138
129
  mbat_blocks = (0...@header.num_mbat).map { |i| i + @header.mbat_start }
139
130
  bbat_chain = (header_block[Header::SIZE..-1] + @bbat.read(mbat_blocks)).unpack 'L*'
140
131
  # am i using num_bat in the right way?
@@ -143,7 +134,7 @@ module Ole # :nodoc:
143
134
  # get block chain for directories, read it, then split it into chunks and load the
144
135
  # directory entries. semantics changed - used to cut at first dir where dir.type == 0
145
136
  @dirents = @bbat.read(@header.dirent_start).scan(/.{#{Dirent::SIZE}}/mo).
146
- map { |str| Dirent.load self, str }.reject { |d| d.type_id == 0 }
137
+ map { |str| Dirent.new self, str }.reject { |d| d.type_id == 0 }
147
138
 
148
139
  # now reorder from flat into a tree
149
140
  # links are stored in some kind of balanced binary tree
@@ -155,7 +146,7 @@ module Ole # :nodoc:
155
146
  return [] if idx == Dirent::EOT
156
147
  d = self[idx]
157
148
  d.children = to_tree d.child
158
- raise "directory #{d.inspect} used twice" if d.idx
149
+ raise FormatError, "directory #{d.inspect} used twice" if d.idx
159
150
  d.idx = idx
160
151
  to_tree(d.prev) + [d] + to_tree(d.next)
161
152
  end
@@ -164,7 +155,7 @@ module Ole # :nodoc:
164
155
  @root = @dirents.to_tree.first
165
156
  Log.warn "root name was #{@root.name.inspect}" unless @root.name == 'Root Entry'
166
157
  unused = @dirents.reject(&:idx).length
167
- Log.warn "* #{unused} unused directories" if unused > 0
158
+ Log.warn "#{unused} unused directories" if unused > 0
168
159
 
169
160
  # FIXME i don't currently use @header.num_sbat which i should
170
161
  # hmm. nor do i write it. it means what exactly again?
@@ -174,8 +165,8 @@ module Ole # :nodoc:
174
165
  end
175
166
 
176
167
  def close
177
- flush if @writeable
178
168
  @sb_file.close
169
+ flush if @writeable
179
170
  @io.close if @close_parent
180
171
  end
181
172
 
@@ -193,48 +184,53 @@ this stuff will ensure reliability of input better. otherwise, its actually wort
193
184
  directly after read, to ensure the above is probably acounted for, before subsequent writes possibly
194
185
  destroy things.
195
186
  =end
187
+
188
+ # the flush method is the main "save" method. all file contents are always
189
+ # written directly to the file by the RangesIO objects, all this method does
190
+ # is write out all the file meta data - dirents, allocation tables, file header
191
+ # etc.
196
192
  def flush
197
- # recreate dirs from our tree, split into dirs and big and small files
198
- @root.type = :root
193
+ # update root dirent, and flatten dirent tree
199
194
  @root.name = 'Root Entry'
200
195
  @root.first_block = @sb_file.first_block
201
196
  @root.size = @sb_file.size
202
197
  @dirents = @root.flatten
203
198
 
204
- # maybe i should move the block form up to RangesIO, and get it for free at all levels.
205
- # Dirent#open gets block form for free then
206
- io = RangesIOResizeable.new @bbat, @header.dirent_start
207
- io.truncate 0
208
- @dirents.each { |dirent| io.write dirent.save }
209
- padding = (io.size / @bbat.block_size.to_f).ceil * @bbat.block_size - io.size
210
- io.write 0.chr * padding
211
- @header.dirent_start = io.first_block
212
- io.close
213
-
214
- # similarly for the sbat data.
215
- io = RangesIOResizeable.new @bbat, @header.sbat_start
216
- io.truncate 0
217
- io.write @sbat.save
218
- @header.sbat_start = io.first_block
219
- @header.num_sbat = @bbat.chain(@header.sbat_start).length
220
- io.close
199
+ # serialize the dirents using the bbat
200
+ RangesIOResizeable.open @bbat, @header.dirent_start do |io|
201
+ io.truncate 0
202
+ @dirents.each { |dirent| io.write dirent.to_s }
203
+ padding = (io.size / @bbat.block_size.to_f).ceil * @bbat.block_size - io.size
204
+ io.write 0.chr * padding
205
+ @header.dirent_start = io.first_block
206
+ end
221
207
 
222
- # what follows will be slightly more complex for the bat fiddling.
208
+ # serialize the sbat
209
+ # perhaps the blocks used by the sbat should be marked with BAT?
210
+ RangesIOResizeable.open @bbat, @header.sbat_start do |io|
211
+ io.truncate 0
212
+ io.write @sbat.to_s
213
+ @header.sbat_start = io.first_block
214
+ @header.num_sbat = @bbat.chain(@header.sbat_start).length
215
+ end
223
216
 
224
217
  # create RangesIOResizeable hooked up to the bbat. use that to claim bbat blocks using
225
218
  # truncate. then when its time to write, convert that chain and some chunk of blocks at
226
219
  # the end, into META_BAT blocks. write out the chain, and those meta bat blocks, and its
227
220
  # done.
228
- @bbat.table.map! do |b|
221
+ # this is perhaps not good, as we reclaim all bat blocks here, which
222
+ # may include the sbat we just wrote. FIXME
223
+ @bbat.map! do |b|
229
224
  b == AllocationTable::BAT || b == AllocationTable::META_BAT ?
230
225
  AllocationTable::AVAIL : b
231
226
  end
232
227
  io = RangesIOResizeable.new @bbat, AllocationTable::EOC
233
228
 
234
- # use crappy loop for now:
229
+ # currently we use a loop. this could be better, but basically,
230
+ # the act of writing out the bat, itself requires blocks which get
231
+ # recorded in the bat.
235
232
  while true
236
- bbat_data = @bbat.save
237
- #mbat_data = bbat_data.length / @bbat.block_size * 4
233
+ bbat_data = @bbat.to_s
238
234
  mbat_chain = @bbat.chain io.first_block
239
235
  raise NotImplementedError, "don't handle writing out extra META_BAT blocks yet" if mbat_chain.length > 109
240
236
  # so we can ignore meta blocks in this calculation:
@@ -247,27 +243,18 @@ destroy things.
247
243
  ranges = io.ranges
248
244
  mbat_chain = @bbat.chain io.first_block
249
245
  io.close
250
- mbat_chain.each { |b| @bbat.table[b] = AllocationTable::BAT }
246
+ mbat_chain.each { |b| @bbat[b] = AllocationTable::BAT }
251
247
  @header.num_bat = mbat_chain.length
252
- #p @bbat.truncated_table
253
- #p ranges
254
- #p mbat_chain
255
- # not resizeable!
256
- io = RangesIO.new @io, ranges
257
- io.write @bbat.save
258
- io.close
248
+
249
+ # now finally write the bbat, using a not resizable io.
250
+ RangesIO.open(@io, ranges) { |io| io.write @bbat.to_s }
251
+
252
+ # this is the mbat
259
253
  mbat_chain += [AllocationTable::AVAIL] * (109 - mbat_chain.length)
260
254
  @header.mbat_start = AllocationTable::EOC
261
255
  @header.num_mbat = 0
262
256
 
263
257
  =begin
264
- # Old save code. remove shortly
265
-
266
- bbat_data = new_bbat.save
267
- # must exist as linear chain stored in header.
268
- @header.num_bat = (bbat_data.length / new_bbat.block_size.to_f).ceil
269
- base = io.pos / new_bbat.block_size - 1
270
- io.write bbat_data
271
258
  # now that spanned a number of blocks:
272
259
  mbat = (0...@header.num_bat).map { |i| i + base }
273
260
  mbat += [AllocationTable::AVAIL] * (109 - mbat.length) if mbat.length < 109
@@ -278,11 +265,9 @@ destroy things.
278
265
  io.write other_mbat_data
279
266
  =end
280
267
 
281
- @root.type = :dir
282
-
283
268
  # now seek back and write the header out
284
269
  @io.seek 0
285
- @io.write @header.save + mbat_chain.pack('L*')
270
+ @io.write @header.to_s + mbat_chain.pack('L*')
286
271
  @io.flush
287
272
  end
288
273
 
@@ -291,13 +276,9 @@ destroy things.
291
276
  Log.warn 'creating new ole storage object on non-writable io' unless @writeable
292
277
  @header = Header.new
293
278
  @bbat = AllocationTable::Big.new self
294
- @root = Dirent.new self, :dir
295
- @root.name = 'Root Entry'
279
+ @root = Dirent.new self, :type => :root, :name => 'Root Entry'
296
280
  @dirents = [@root]
297
281
  @root.idx = 0
298
- @root.children = []
299
- # size shouldn't display for non-files
300
- @root.size = 0
301
282
  @sb_file.close if @sb_file
302
283
  @sb_file = RangesIOResizeable.new @bbat, AllocationTable::EOC
303
284
  @sbat = AllocationTable::Small.new self
@@ -310,7 +291,7 @@ destroy things.
310
291
  case temp
311
292
  when :file; Tempfile.open 'w+', &method(:repack_using_io)
312
293
  when :mem; StringIO.open(&method(:repack_using_io))
313
- else raise "unknown temp backing #{temp.inspect}"
294
+ else raise ArgumentError, "unknown temp backing #{temp.inspect}"
314
295
  end
315
296
  end
316
297
 
@@ -318,8 +299,8 @@ destroy things.
318
299
  @io.rewind
319
300
  IO.copy @io, temp_io
320
301
  clear
321
- Storage.open temp_io do |temp_ole|
322
- temp_ole.root.type = :dir
302
+ Storage.open temp_io, nil, @opts do |temp_ole|
303
+ #temp_ole.root.type = :dir
323
304
  Dirent.copy temp_ole.root, root
324
305
  end
325
306
  end
@@ -333,7 +314,12 @@ destroy things.
333
314
  "#<#{self.class} io=#{@io.inspect} root=#{@root.inspect}>"
334
315
  end
335
316
 
317
+ #
336
318
  # A class which wraps the ole header
319
+ #
320
+ # Header.new can be both used to load from a string, or to create from
321
+ # defaults. Serialization is accomplished with the #to_s method.
322
+ #
337
323
  class Header < Struct.new(
338
324
  :magic, :clsid, :minor_ver, :major_ver, :byte_order, :b_shift, :s_shift,
339
325
  :reserved, :csectdir, :num_bat, :dirent_start, :transacting_signature, :threshold,
@@ -353,23 +339,18 @@ destroy things.
353
339
  4096, EOC, 0, EOC, 0
354
340
  ]
355
341
 
356
- # 2 basic initializations, from scratch, or from a data string.
357
- # from scratch will be geared towards creating a new ole object
358
- def initialize *values
359
- super(*(values.empty? ? DEFAULT : values))
342
+ def initialize values=DEFAULT
343
+ values = values.unpack(PACK) if String === values
344
+ super(*values)
360
345
  validate!
361
346
  end
362
347
 
363
- def self.load str
364
- Header.new(*str.unpack(PACK))
365
- end
366
-
367
- def save
348
+ def to_s
368
349
  to_a.pack PACK
369
350
  end
370
351
 
371
352
  def validate!
372
- raise "OLE2 signature is invalid" unless magic == MAGIC
353
+ raise FormatError, "OLE2 signature is invalid" unless magic == MAGIC
373
354
  if num_bat == 0 or # is that valid for a completely empty file?
374
355
  # not sure about this one. basically to do max possible bat given size of mbat
375
356
  num_bat > 109 && num_bat > 109 + num_mbat * (1 << b_shift - 2) or
@@ -379,7 +360,7 @@ destroy things.
379
360
  s_shift > b_shift or b_shift <= 6 or b_shift >= 31 or
380
361
  # we only handle little endian
381
362
  byte_order != "\xfe\xff"
382
- raise "not valid OLE2 structured storage file"
363
+ raise FormatError, "not valid OLE2 structured storage file"
383
364
  end
384
365
  # relaxed this, due to test-msg/qwerty_[1-3]*.msg they all had
385
366
  # 3 for this value.
@@ -414,38 +395,41 @@ destroy things.
414
395
  # block, and in extra blocks throughout the file as referenced by the meta
415
396
  # bat. That chain is linear, as there is no higher level table.
416
397
  #
417
- class AllocationTable
398
+ # AllocationTable.new is used to create an empty table. It can parse a string
399
+ # with the #load method. Serialization is accomplished with the #to_s method.
400
+ #
401
+ class AllocationTable < Array
418
402
  # a free block (I don't currently leave any blocks free), although I do pad out
419
403
  # the allocation table with AVAIL to the block size.
420
404
  AVAIL = 0xffffffff
421
405
  EOC = 0xfffffffe # end of a chain
422
- # these blocks correspond to the bat, and aren't part of a file, nor available.
423
- # (I don't currently output these)
406
+ # these blocks are used for storing the allocation table chains
424
407
  BAT = 0xfffffffd
425
408
  META_BAT = 0xfffffffc
426
409
 
427
- attr_reader :ole, :io, :table, :block_size
410
+ attr_reader :ole, :io, :block_size
428
411
  def initialize ole
429
412
  @ole = ole
430
- @table = []
413
+ @sparse = true
414
+ super()
431
415
  end
432
416
 
433
417
  def load data
434
- @table = data.unpack('L*')
418
+ replace data.unpack('L*')
435
419
  end
436
420
 
437
- def truncated_table
421
+ def truncate
438
422
  # this strips trailing AVAILs. come to think of it, this has the potential to break
439
423
  # bogus ole. if you terminate using AVAIL instead of EOC, like I did before. but that is
440
424
  # very broken. however, if a chain ends with AVAIL, it should probably be fixed to EOC
441
425
  # at load time.
442
- temp = @table.reverse
426
+ temp = reverse
443
427
  not_avail = temp.find { |b| b != AVAIL } and temp = temp[temp.index(not_avail)..-1]
444
428
  temp.reverse
445
429
  end
446
430
 
447
- def save
448
- table = truncated_table #@table
431
+ def to_s
432
+ table = truncate
449
433
  # pad it out some
450
434
  num = @ole.bbat.block_size / 4
451
435
  # do you really use AVAIL? they probably extend past end of file, and may shortly
@@ -454,101 +438,99 @@ destroy things.
454
438
  table.pack 'L*'
455
439
  end
456
440
 
457
- # rewriting this to be non-recursive. it broke on a large attachment
458
- # building up the chain, causing a stack error. need tail-call elimination...
459
- def chain start
441
+ # rewrote this to be non-recursive as it broke on a large attachment
442
+ # chain with a stack error
443
+ def chain idx
460
444
  a = []
461
- idx = start
462
445
  until idx >= META_BAT
463
- raise "broken allocationtable chain" if idx < 0 || idx > @table.length
446
+ raise FormatError, "broken allocationtable chain" if idx < 0 || idx > length
464
447
  a << idx
465
- idx = @table[idx]
448
+ idx = self[idx]
466
449
  end
467
450
  Log.warn "invalid chain terminator #{idx}" unless idx == EOC
468
451
  a
469
452
  end
470
453
 
471
- def ranges chain, size=nil
472
- chain = self.chain(chain) unless Array === chain
473
- blocks_to_ranges chain, size
474
- end
475
-
476
- # Turn a chain (an array given by +chain+) of big blocks, optionally
477
- # truncated to +size+, into an array of arrays describing the stretches of
454
+ # Turn a chain (an array given by +chain+) of blocks (optionally
455
+ # truncated to +size+) into an array of arrays describing the stretches of
478
456
  # bytes in the file that it belongs to.
479
457
  #
480
- # Big blocks are of size Ole::Storage::Header#b_size, and are stored
481
- # directly in the parent file.
482
- # truncate the chain if required
483
- # convert chain to ranges of the block size
484
- # truncate final range if required
485
-
458
+ # The blocks are Big or Small blocks depending on the table type.
486
459
  def blocks_to_ranges chain, size=nil
460
+ # truncate the chain if required
487
461
  chain = chain[0...(size.to_f / block_size).ceil] if size
462
+ # convert chain to ranges of the block size
488
463
  ranges = chain.map { |i| [block_size * i, block_size] }
464
+ # truncate final range if required
489
465
  ranges.last[1] -= (ranges.length * block_size - size) if ranges.last and size
490
466
  ranges
491
467
  end
492
468
 
469
+ def ranges chain, size=nil
470
+ chain = self.chain(chain) unless Array === chain
471
+ blocks_to_ranges chain, size
472
+ end
473
+
493
474
  # quick shortcut. chain can be either a head (in which case the table is used to
494
475
  # turn it into a chain), or a chain. it is converted to ranges, then to rangesio.
495
- # its not resizeable or migrateable. it probably could be resizeable though, using
496
- # self as the bat. but what would the first_block be?
497
- def open chain, size=nil
498
- io = RangesIO.new @io, ranges(chain, size)
499
- if block_given?
500
- begin yield io
501
- ensure; io.close
502
- end
503
- else io
504
- end
476
+ def open chain, size=nil, &block
477
+ RangesIO.open @io, ranges(chain, size), &block
505
478
  end
506
479
 
507
480
  def read chain, size=nil
508
481
  open chain, size, &:read
509
482
  end
510
483
 
511
- # ----------------------
484
+ # catch any method that may add an AVAIL somewhere in the middle, thus invalidating
485
+ # the @sparse speedup for free_block. annoying using eval, but define_method won't
486
+ # work for this.
487
+ # FIXME
488
+ [:map!, :collect!].each do |name|
489
+ eval <<-END
490
+ def #{name}(*args, &block)
491
+ @sparse = true
492
+ super
493
+ end
494
+ END
495
+ end
496
+
497
+ def []= idx, val
498
+ @sparse = true if val == AVAIL
499
+ super
500
+ end
512
501
 
513
- def get_free_block
514
- @table.each_index { |i| return i if @table[i] == AVAIL }
515
- @table.push AVAIL
516
- @table.length - 1
502
+ def free_block
503
+ if @sparse
504
+ i = index(AVAIL) and return i
505
+ end
506
+ @sparse = false
507
+ push AVAIL
508
+ length - 1
517
509
  end
518
510
 
519
511
  # must return first_block
520
- def resize_chain first_block, size
512
+ def resize_chain blocks, size
521
513
  new_num_blocks = (size / block_size.to_f).ceil
522
- blocks = chain first_block
523
514
  old_num_blocks = blocks.length
524
515
  if new_num_blocks < old_num_blocks
525
516
  # de-allocate some of our old blocks. TODO maybe zero them out in the file???
526
- (new_num_blocks...old_num_blocks).each { |i| @table[blocks[i]] = AVAIL }
527
- # if we have a chain, terminate it and return head, otherwise return EOC
528
- if new_num_blocks > 0
529
- @table[blocks[new_num_blocks-1]] = EOC
530
- first_block
531
- else EOC
532
- end
517
+ (new_num_blocks...old_num_blocks).each { |i| self[blocks[i]] = AVAIL }
518
+ self[blocks[new_num_blocks-1]] = EOC if new_num_blocks > 0
519
+ blocks.slice! new_num_blocks..-1
533
520
  elsif new_num_blocks > old_num_blocks
534
521
  # need some more blocks.
535
522
  last_block = blocks.last
536
523
  (new_num_blocks - old_num_blocks).times do
537
- block = get_free_block
524
+ block = free_block
538
525
  # connect the chain. handle corner case of blocks being [] initially
539
- if last_block
540
- @table[last_block] = block
541
- else
542
- first_block = block
543
- end
526
+ self[last_block] = block if last_block
527
+ blocks << block
544
528
  last_block = block
545
- # this is just to inhibit the problem where it gets picked as being a free block
546
- # again next time around.
547
- @table[last_block] = EOC
529
+ self[last_block] = EOC
548
530
  end
549
- first_block
550
- else first_block
551
531
  end
532
+ # update ranges, and return that also now
533
+ blocks
552
534
  end
553
535
 
554
536
  class Big < AllocationTable
@@ -587,15 +569,18 @@ destroy things.
587
569
  def initialize bat, first_block, size=nil
588
570
  @bat = bat
589
571
  self.first_block = first_block
590
- super @bat.io, @bat.ranges(first_block, size)
572
+ # we know cache the blocks chain, for faster resizing.
573
+ @blocks = @bat.chain first_block
574
+ super @bat.io, @bat.ranges(@blocks, size)
591
575
  end
592
576
 
593
577
  def truncate size
594
578
  # note that old_blocks is != @ranges.length necessarily. i'm planning to write a
595
579
  # merge_ranges function that merges sequential ranges into one as an optimization.
596
- self.first_block = @bat.resize_chain first_block, size
597
- @ranges = @bat.ranges first_block, size
580
+ @bat.resize_chain @blocks, size
581
+ @ranges = @bat.ranges @blocks, size
598
582
  @pos = @size if @pos > size
583
+ self.first_block = @blocks.empty? ? AllocationTable::EOC : @blocks.first
599
584
 
600
585
  # don't know if this is required, but we explicitly request our @io to grow if necessary
601
586
  # we never shrink it though. maybe this belongs in allocationtable, where smarter decisions
@@ -609,8 +594,7 @@ destroy things.
609
594
  end
610
595
 
611
596
  # like RangesIOResizeable, but Ole::Storage::Dirent specific. provides for migration
612
- # between bats based on size, and updating the dirent, instead of the ole copy back
613
- # on close.
597
+ # between bats based on size, and updating the dirent.
614
598
  class RangesIOMigrateable < RangesIOResizeable
615
599
  attr_reader :dirent
616
600
  def initialize dirent
@@ -620,7 +604,7 @@ destroy things.
620
604
 
621
605
  def truncate size
622
606
  bat = @dirent.ole.bat_for_size size
623
- if bat != @bat
607
+ if bat.class != @bat.class
624
608
  # bat migration needed! we need to backup some data. the amount of data
625
609
  # should be <= @ole.header.threshold, so we can just hold it all in one buffer.
626
610
  # backup this
@@ -667,16 +651,16 @@ destroy things.
667
651
  # was considering separate classes for dirs and files. some methods/attrs only
668
652
  # applicable to one or the other.
669
653
  #
670
- # Note that Dirent is still using a home grown Struct variant, with explicit
671
- # MEMBERS etc. any reason for that still?
654
+ # As with the other classes, #to_s performs the serialization.
672
655
  #
673
- class Dirent
674
- MEMBERS = [
656
+ class Dirent < Struct.new(
675
657
  :name_utf16, :name_len, :type_id, :colour, :prev, :next, :child,
676
658
  :clsid, :flags, # dirs only
677
659
  :create_time_str, :modify_time_str, # files only
678
660
  :first_block, :size, :reserved
679
- ]
661
+ )
662
+ include RecursivelyEnumerable
663
+
680
664
  PACK = 'a64 S C C L3 a16 L a8 a8 L2 a4'
681
665
  SIZE = 128
682
666
  TYPE_MAP = {
@@ -686,6 +670,7 @@ destroy things.
686
670
  2 => :file,
687
671
  5 => :root
688
672
  }
673
+ # something to do with the fact that the tree is supposed to be red-black
689
674
  COLOUR_MAP = {
690
675
  0 => :red,
691
676
  1 => :black
@@ -693,63 +678,70 @@ destroy things.
693
678
  # used in the next / prev / child stuff to show that the tree ends here.
694
679
  # also used for first_block for directory.
695
680
  EOT = 0xffffffff
681
+ DEFAULT = [
682
+ 0.chr * 2, 2, 0, # will get overwritten
683
+ 1, EOT, EOT, EOT,
684
+ 0.chr * 16, 0, nil, nil,
685
+ AllocationTable::EOC, 0, 0.chr * 4
686
+ ]
696
687
 
697
- include Enumerable
698
-
699
- # Dirent's should be created in 1 of 2 ways, either Dirent.new ole, [:dir/:file/:root],
700
- # or Dirent.load '... dirent data ...'
701
- # its a bit clunky, but thats how it is at the moment. you can assign to type, but
702
- # shouldn't.
703
-
688
+ # i think its just used by the tree building
704
689
  attr_accessor :idx
705
690
  # This returns all the children of this +Dirent+. It is filled in
706
691
  # when the tree structure is recreated.
707
692
  attr_accessor :children
708
- attr_reader :ole, :type, :create_time, :modify_time, :name
709
- def initialize ole, type
710
- @ole = ole
711
- # this isn't really good enough. need default values put in there.
712
- @values = [
713
- 0.chr * 2, 2, 0, # will get overwritten
714
- 1, EOT, EOT, EOT,
715
- 0.chr * 16, 0, nil, nil,
716
- AllocationTable::EOC, 0, 0.chr * 4]
717
- # maybe check types here.
718
- @type = type
719
- @create_time = @modify_time = nil
720
- @children = []
721
- if file?
722
- @create_time = Time.now
723
- @modify_time = Time.now
693
+ attr_accessor :name
694
+ attr_reader :ole, :type, :create_time, :modify_time
695
+ def initialize ole, values=DEFAULT, opts={}
696
+ @ole = ole
697
+ values, opts = DEFAULT, values if Hash === values
698
+ values = values.unpack(PACK) if String === values
699
+ super(*values)
700
+
701
+ # extra parsing from the actual struct values
702
+ @name = opts[:name] || Types::FROM_UTF16.iconv(name_utf16[0...name_len].sub(/\x00\x00$/, ''))
703
+ @type = if opts[:type]
704
+ unless TYPE_MAP.values.include?(opts[:type])
705
+ raise ArgumentError, "unknown type #{opts[:type].inspect}"
706
+ end
707
+ opts[:type]
708
+ else
709
+ TYPE_MAP[type_id] or raise FormatError, "unknown type_id #{type_id.inspect}"
724
710
  end
725
- end
726
-
727
- def self.load ole, str
728
- # load should function without the need for the initializer.
729
- dirent = Dirent.allocate
730
- dirent.load ole, str
731
- dirent
732
- end
733
711
 
734
- def load ole, str
735
- @ole = ole
736
- @values = str.unpack PACK
737
- @name = Types::FROM_UTF16.iconv name_utf16[0...name_len].sub(/\x00\x00$/, '')
738
- @type = TYPE_MAP[type_id] or raise "unknown type #{type_id.inspect}"
712
+ # further extra type specific stuff
739
713
  if file?
740
- @create_time = Types.load_time create_time_str
741
- @modify_time = Types.load_time modify_time_str
714
+ default_time = @ole.opts[:update_timestamps] ? Time.now : nil
715
+ @create_time ||= default_time
716
+ @modify_time ||= default_time
717
+ @create_time = Types.load_time(create_time_str) if create_time_str
718
+ @modify_time = Types.load_time(create_time_str) if modify_time_str
719
+ @children = nil
720
+ else
721
+ @create_time = nil
722
+ @modify_time = nil
723
+ self.size = 0 unless @type == :root
724
+ @children = []
742
725
  end
743
726
  end
744
727
 
745
- # only defined for files really. and the above children stuff is only for children.
746
- # maybe i should have some sort of File and Dir class, that subclass Dirents? a dirent
747
- # is just a data holder.
748
- # this can be used for write support if the underlying io object was opened for writing.
749
- # maybe take a mode string argument, and do truncation, append etc stuff.
750
- def open
751
- return nil unless file?
728
+ def open mode='r'
729
+ raise Errno::EISDIR unless file?
752
730
  io = RangesIOMigrateable.new self
731
+ # TODO work on the mode string stuff a bit more.
732
+ # maybe let the io object know about the mode, so it can refuse
733
+ # to work for read/write appropriately. maybe redefine all unusable
734
+ # methods using singleton class to throw errors.
735
+ # for now, i just want to implement truncation on use of 'w'. later,
736
+ # i need to do 'a' etc.
737
+ case mode
738
+ when 'r', 'r+'
739
+ # as i don't enforce reading/writing, nothing changes here
740
+ when 'w'
741
+ io.truncate 0
742
+ else
743
+ raise NotImplementedError, "unsupported mode - #{mode.inspect}"
744
+ end
753
745
  if block_given?
754
746
  begin yield io
755
747
  ensure; io.close
@@ -762,71 +754,48 @@ destroy things.
762
754
  open { |io| io.read limit }
763
755
  end
764
756
 
765
- def dir?
766
- # to count root as a dir.
767
- type != :file
768
- end
769
-
770
757
  def file?
771
758
  type == :file
772
759
  end
773
760
 
774
- def time
775
- # time is nil for streams, otherwise try to parse either of the time pairse (not
776
- # sure of their meaning - created / modified?)
777
- #@time ||= file? ? nil : (Dirent.parse_time(secs1, days1) || Dirent.parse_time(secs2, days2))
778
- create_time || modify_time
761
+ def dir?
762
+ # to count root as a dir.
763
+ !file?
779
764
  end
780
765
 
781
- def each(&block)
782
- @children.each(&block)
766
+ def / name
767
+ children.find { |child| name === child.name }
783
768
  end
784
-
769
+
785
770
  def [] idx
786
- return children[idx] if Integer === idx
787
- # path style look up.
788
- # maybe take another arg to allow creation? or leave that to the filesystem
789
- # add on.
790
- # not sure if '/' is a valid char in an Dirent#name, so no splitting etc at
791
- # this level.
792
- # also what about warning about multiple hits for the same name?
793
- children.find { |child| idx === child.name }
794
- end
795
-
796
- # solution for the above '/' thing for now.
797
- def / path
798
- self[path]
799
- end
800
-
801
- def to_tree
802
- if children and !children.empty?
803
- str = "- #{inspect}\n"
804
- children.each_with_index do |child, i|
805
- last = i == children.length - 1
806
- child.to_tree.split(/\n/).each_with_index do |line, j|
807
- str << " #{last ? (j == 0 ? "\\" : ' ') : '|'}#{line}\n"
808
- end
809
- end
810
- str
811
- else "- #{inspect}\n"
771
+ if String === idx
772
+ warn 'String form of Dirent#[] is deprecated'
773
+ self / idx
774
+ else
775
+ super
812
776
  end
813
777
  end
814
778
 
815
- MEMBERS.each_with_index do |sym, i|
816
- define_method(sym) { @values[i] }
817
- define_method(sym.to_s + '=') { |val| @values[i] = val }
779
+ # move to ruby-msg. and remove from here
780
+ def time
781
+ warn 'Dirent#time is deprecated'
782
+ create_time || modify_time
818
783
  end
819
784
 
820
- def to_a
821
- @values
785
+ def each_child(&block)
786
+ @children.each(&block)
822
787
  end
823
788
 
824
789
  # flattens the tree starting from here into +dirents+. note it modifies its argument.
825
790
  def flatten dirents=[]
826
791
  @idx = dirents.length
827
792
  dirents << self
828
- children.each { |child| child.flatten dirents }
829
- self.child = Dirent.flatten_helper children
793
+ if file?
794
+ self.prev = self.next = self.child = EOT
795
+ else
796
+ children.each { |child| child.flatten dirents }
797
+ self.child = Dirent.flatten_helper children
798
+ end
830
799
  dirents
831
800
  end
832
801
 
@@ -843,29 +812,27 @@ destroy things.
843
812
  this.idx
844
813
  end
845
814
 
846
- attr_accessor :name, :type
847
- def save
815
+ def to_s
848
816
  tmp = Types::TO_UTF16.iconv(name)
849
817
  tmp = tmp[0, 62] if tmp.length > 62
850
818
  tmp += 0.chr * 2
851
819
  self.name_len = tmp.length
852
820
  self.name_utf16 = tmp + 0.chr * (64 - tmp.length)
853
- begin
854
- self.type_id = TYPE_MAP.to_a.find { |id, name| @type == name }.first
855
- rescue
856
- raise "unknown type #{type.inspect}"
857
- end
821
+ # type_id can perhaps be set in the initializer, as its read only now.
822
+ self.type_id = TYPE_MAP.to_a.find { |id, name| @type == name }.first
858
823
  # for the case of files, it is assumed that that was handled already
859
824
  # note not dir?, so as not to override root's first_block
860
825
  self.first_block = Dirent::EOT if type == :dir
861
- if 0 #file?
862
- #self.create_time_str = ?? #Types.load_time create_time_str
863
- #self.modify_time_str = ?? #Types.load_time modify_time_str
826
+ if file?
827
+ if @ole.opts[:update_timestamps]
828
+ self.create_time_str = Types.save_time @create_time
829
+ self.modify_time_str = Types.save_time Time.now
830
+ end
864
831
  else
865
832
  self.create_time_str = 0.chr * 8
866
833
  self.modify_time_str = 0.chr * 8
867
834
  end
868
- @values.pack PACK
835
+ to_a.pack PACK
869
836
  end
870
837
 
871
838
  def inspect
@@ -875,7 +842,7 @@ destroy things.
875
842
  tmp = read 9
876
843
  data = tmp.length == 9 ? tmp[0, 5] + '...' : tmp
877
844
  str << " size=#{size}" +
878
- "#{time ? ' time=' + time.to_s.inspect : nil}" +
845
+ "#{modify_time ? ' modify_time=' + modify_time.to_s.inspect : nil}" +
879
846
  " data=#{data.inspect}"
880
847
  else
881
848
  # there is some dir specific stuff. like clsid, flags.
@@ -887,7 +854,7 @@ destroy things.
887
854
  # and for creation of a dirent. don't like the name. is it a file or a directory?
888
855
  # assign to type later? io will be empty.
889
856
  def new_child type
890
- child = Dirent.new ole, type
857
+ child = Dirent.new ole, :type => type
891
858
  children << child
892
859
  yield child if block_given?
893
860
  child
@@ -895,7 +862,7 @@ destroy things.
895
862
 
896
863
  def delete child
897
864
  # remove from our child array, so that on reflatten and re-creation of @dirents, it will be gone
898
- raise "#{child.inspect} not a child of #{self.inspect}" unless @children.delete child
865
+ raise ArgumentError, "#{child.inspect} not a child of #{self.inspect}" unless @children.delete child
899
866
  # free our blocks
900
867
  child.open { |io| io.truncate 0 }
901
868
  end
@@ -903,7 +870,7 @@ destroy things.
903
870
  def self.copy src, dst
904
871
  # copies the contents of src to dst. must be the same type. this will throw an
905
872
  # error on copying to root. maybe this will recurse too much for big documents??
906
- raise 'differing types' if src.type == :file and dst.type != :file
873
+ raise ArgumentError, 'differing types' if src.file? and !dst.file?
907
874
  dst.name = src.name
908
875
  if src.dir?
909
876
  src.children.each do |src_child|
@@ -919,7 +886,3 @@ destroy things.
919
886
  end
920
887
  end
921
888
 
922
- if $0 == __FILE__
923
- puts Ole::Storage.open(ARGV[0]) { |ole| ole.root.to_tree }
924
- end
925
-