WriteExcel 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.document +5 -0
- data/.gitignore +21 -0
- data/LICENSE +20 -0
- data/README.rdoc +17 -0
- data/Rakefile +47 -0
- data/VERSION +1 -0
- data/examples/a_simple.rb +42 -0
- data/examples/autofilters.rb +266 -0
- data/examples/bigfile.rb +30 -0
- data/examples/copyformat.rb +51 -0
- data/examples/data_validate.rb +278 -0
- data/examples/date_time.rb +86 -0
- data/examples/demo.rb +118 -0
- data/examples/diag_border.rb +35 -0
- data/examples/formats.rb +489 -0
- data/examples/header.rb +136 -0
- data/examples/hidden.rb +28 -0
- data/examples/hyperlink.rb +42 -0
- data/examples/images.rb +52 -0
- data/examples/merge1.rb +39 -0
- data/examples/merge2.rb +44 -0
- data/examples/merge3.rb +65 -0
- data/examples/merge4.rb +82 -0
- data/examples/merge5.rb +79 -0
- data/examples/protection.rb +46 -0
- data/examples/regions.rb +52 -0
- data/examples/repeat.rb +42 -0
- data/examples/stats.rb +75 -0
- data/examples/stocks.rb +80 -0
- data/examples/tab_colors.rb +30 -0
- data/lib/WriteExcel.rb +30 -0
- data/lib/WriteExcel/biffwriter.rb +259 -0
- data/lib/WriteExcel/chart.rb +217 -0
- data/lib/WriteExcel/excelformula.y +138 -0
- data/lib/WriteExcel/excelformulaparser.rb +573 -0
- data/lib/WriteExcel/format.rb +1108 -0
- data/lib/WriteExcel/formula.rb +986 -0
- data/lib/WriteExcel/olewriter.rb +322 -0
- data/lib/WriteExcel/properties.rb +250 -0
- data/lib/WriteExcel/storage_lite.rb +590 -0
- data/lib/WriteExcel/workbook.rb +2602 -0
- data/lib/WriteExcel/worksheet.rb +6378 -0
- data/spec/WriteExcel_spec.rb +7 -0
- data/spec/spec.opts +1 -0
- data/spec/spec_helper.rb +9 -0
- data/test/tc_all.rb +31 -0
- data/test/tc_biff.rb +104 -0
- data/test/tc_chart.rb +22 -0
- data/test/tc_example_match.rb +1280 -0
- data/test/tc_format.rb +1264 -0
- data/test/tc_formula.rb +63 -0
- data/test/tc_ole.rb +110 -0
- data/test/tc_storage_lite.rb +102 -0
- data/test/tc_workbook.rb +115 -0
- data/test/tc_worksheet.rb +115 -0
- data/test/test_00_IEEE_double.rb +14 -0
- data/test/test_01_add_worksheet.rb +12 -0
- data/test/test_02_merge_formats.rb +58 -0
- data/test/test_04_dimensions.rb +397 -0
- data/test/test_05_rows.rb +182 -0
- data/test/test_06_extsst.rb +80 -0
- data/test/test_11_date_time.rb +484 -0
- data/test/test_12_date_only.rb +506 -0
- data/test/test_13_date_seconds.rb +486 -0
- data/test/test_21_escher.rb +629 -0
- data/test/test_22_mso_drawing_group.rb +739 -0
- data/test/test_23_note.rb +78 -0
- data/test/test_24_txo.rb +80 -0
- data/test/test_26_autofilter.rb +327 -0
- data/test/test_27_autofilter.rb +144 -0
- data/test/test_28_autofilter.rb +174 -0
- data/test/test_29_process_jpg.rb +131 -0
- data/test/test_30_validation_dval.rb +82 -0
- data/test/test_31_validation_dv_strings.rb +131 -0
- data/test/test_32_validation_dv_formula.rb +211 -0
- data/test/test_40_property_types.rb +191 -0
- data/test/test_41_properties.rb +238 -0
- data/test/test_42_set_properties.rb +430 -0
- data/test/ts_all.rb +34 -0
- metadata +154 -0
@@ -0,0 +1,2602 @@
|
|
1
|
+
|
2
|
+
#require "WriteExcel/biffwriter"
|
3
|
+
#require "WriteExcel/olewriter"
|
4
|
+
#require "WriteExcel/formula"
|
5
|
+
#require "WriteExcel/format"
|
6
|
+
#require "WriteExcel/worksheet"
|
7
|
+
#require "WriteExcel/properties"
|
8
|
+
#require "digest/md5"
|
9
|
+
#require "WriteExcel/storage_lite"
|
10
|
+
|
11
|
+
class Workbook < BIFFWriter
|
12
|
+
BOF = 11
|
13
|
+
EOF = 4
|
14
|
+
SheetName = "Sheet"
|
15
|
+
NonAscii = /[^!"#\$%&'\(\)\*\+,\-\.\/\:\;<=>\?@0-9A-Za-z_\[\\\]^` ~\0\n]/
|
16
|
+
|
17
|
+
attr_accessor :date_system, :str_unique, :biff_only
|
18
|
+
attr_reader :encoding, :url_format, :parser, :tempdir, :date_1904, :compatibility
|
19
|
+
attr_reader :summary
|
20
|
+
attr_accessor :activesheet, :firstsheet, :str_total, :str_unique, :str_table
|
21
|
+
attr_reader :formats, :xf_index, :worksheets, :extsst_buckets, :extsst_bucket_size
|
22
|
+
attr_reader :data
|
23
|
+
attr_writer :mso_size
|
24
|
+
attr_accessor :localtime
|
25
|
+
|
26
|
+
###############################################################################
|
27
|
+
#
|
28
|
+
# new()
|
29
|
+
#
|
30
|
+
# Constructor. Creates a new Workbook object from a BIFFwriter object.
|
31
|
+
#
|
32
|
+
def initialize(filename, default_formats = {})
|
33
|
+
super()
|
34
|
+
@filename = filename
|
35
|
+
@default_formats = default_formats
|
36
|
+
@parser = Formula.new(@byte_order)
|
37
|
+
@tempdir = nil
|
38
|
+
@date_1904 = false
|
39
|
+
@sheet =
|
40
|
+
|
41
|
+
@activesheet = 0
|
42
|
+
@firstsheet = 0
|
43
|
+
@selected = 0
|
44
|
+
@xf_index = 0
|
45
|
+
@fileclosed = false
|
46
|
+
@biffsize = 0
|
47
|
+
@sheetname = "Sheet"
|
48
|
+
@url_format = ''
|
49
|
+
@codepage = 0x04E4
|
50
|
+
@worksheets = []
|
51
|
+
@sheetnames = []
|
52
|
+
@formats = []
|
53
|
+
@palette = []
|
54
|
+
@biff_only = 0
|
55
|
+
|
56
|
+
@internal_fh = 0
|
57
|
+
@fh_out = ""
|
58
|
+
|
59
|
+
@str_total = 0
|
60
|
+
@str_unique = 0
|
61
|
+
@str_table = {}
|
62
|
+
@str_array = []
|
63
|
+
@str_block_sizes = []
|
64
|
+
@extsst_offsets = []
|
65
|
+
@extsst_buckets = 0
|
66
|
+
@extsst_bucket_size = 0
|
67
|
+
|
68
|
+
@ext_ref_count = 0
|
69
|
+
@ext_refs = {}
|
70
|
+
|
71
|
+
@mso_clusters = []
|
72
|
+
@mso_size = 0
|
73
|
+
|
74
|
+
@hideobj = 0
|
75
|
+
@compatibility = 0
|
76
|
+
|
77
|
+
@add_doc_properties = 0
|
78
|
+
@summary = ''
|
79
|
+
@doc_summary = ''
|
80
|
+
@localtime = Time.now
|
81
|
+
|
82
|
+
# Add the in-built style formats and the default cell format.
|
83
|
+
add_format(:type => 1) # 0 Normal
|
84
|
+
add_format(:type => 1) # 1 RowLevel 1
|
85
|
+
add_format(:type => 1) # 2 RowLevel 2
|
86
|
+
add_format(:type => 1) # 3 RowLevel 3
|
87
|
+
add_format(:type => 1) # 4 RowLevel 4
|
88
|
+
add_format(:type => 1) # 5 RowLevel 5
|
89
|
+
add_format(:type => 1) # 6 RowLevel 6
|
90
|
+
add_format(:type => 1) # 7 RowLevel 7
|
91
|
+
add_format(:type => 1) # 8 ColLevel 1
|
92
|
+
add_format(:type => 1) # 9 ColLevel 2
|
93
|
+
add_format(:type => 1) # 10 ColLevel 3
|
94
|
+
add_format(:type => 1) # 11 ColLevel 4
|
95
|
+
add_format(:type => 1) # 12 ColLevel 5
|
96
|
+
add_format(:type => 1) # 13 ColLevel 6
|
97
|
+
add_format(:type => 1) # 14 ColLevel 7
|
98
|
+
add_format(default_formats) # 15 Cell XF
|
99
|
+
add_format(:type => 1, :num_format => 0x2B) # 16 Comma
|
100
|
+
add_format(:type => 1, :num_format => 0x29) # 17 Comma[0]
|
101
|
+
add_format(:type => 1, :num_format => 0x2C) # 18 Currency
|
102
|
+
add_format(:type => 1, :num_format => 0x2A) # 19 Currency[0]
|
103
|
+
add_format(:type => 1, :num_format => 0x09) # 20 Percent
|
104
|
+
|
105
|
+
# Add the default format for hyperlinks
|
106
|
+
@url_format = add_format(:color => 'blue', :underline => 1)
|
107
|
+
|
108
|
+
# Convert the filename to a filehandle to pass to the OLE writer when the
|
109
|
+
# file is closed. If the filename is a reference it is assumed that it is
|
110
|
+
# a valid filehandle.
|
111
|
+
#
|
112
|
+
if filename.kind_of?(String) && filename != ''
|
113
|
+
@fh_out = open(filename, "wb")
|
114
|
+
@internal_fh = 1
|
115
|
+
else
|
116
|
+
print "Workbook#new - filename required."
|
117
|
+
exit
|
118
|
+
end
|
119
|
+
|
120
|
+
# Set colour palette.
|
121
|
+
set_palette_xl97
|
122
|
+
|
123
|
+
get_checksum_method
|
124
|
+
end
|
125
|
+
|
126
|
+
###############################################################################
|
127
|
+
#
|
128
|
+
# _get_checksum_method.
|
129
|
+
#
|
130
|
+
# Check for modules available to calculate image checksum. Excel uses MD4 but
|
131
|
+
# MD5 will also work.
|
132
|
+
#
|
133
|
+
# ------- cxn03651 add -------
|
134
|
+
# md5 can use in ruby. so, @checksum_method is always 3.
|
135
|
+
|
136
|
+
def get_checksum_method
|
137
|
+
@checksum_method = 3
|
138
|
+
end
|
139
|
+
|
140
|
+
###############################################################################
|
141
|
+
#
|
142
|
+
# close()
|
143
|
+
#
|
144
|
+
# Calls finalization methods and explicitly close the OLEwriter file
|
145
|
+
# handle.
|
146
|
+
#
|
147
|
+
def close
|
148
|
+
return if @fileclosed # Prevent close() from being called twice.
|
149
|
+
|
150
|
+
@fileclosed = true
|
151
|
+
return store_workbook
|
152
|
+
end
|
153
|
+
|
154
|
+
###############################################################################
|
155
|
+
#
|
156
|
+
# sheets(slice,...)
|
157
|
+
#
|
158
|
+
# An accessor for the _worksheets[] array
|
159
|
+
#
|
160
|
+
# Returns: an optionally sliced list of the worksheet objects in a workbook.
|
161
|
+
#
|
162
|
+
def sheets(*args)
|
163
|
+
if args.empty?
|
164
|
+
@worksheets
|
165
|
+
else
|
166
|
+
ary = []
|
167
|
+
args.each do |i|
|
168
|
+
ary << @worksheets[i]
|
169
|
+
end
|
170
|
+
ary
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
###############################################################################
|
175
|
+
#
|
176
|
+
# add_worksheet($name, $encoding)
|
177
|
+
#
|
178
|
+
# Add a new worksheet to the Excel workbook.
|
179
|
+
#
|
180
|
+
# Returns: reference to a worksheet object
|
181
|
+
#
|
182
|
+
def add_worksheet(name = '', encoding = 0)
|
183
|
+
name, encoding = check_sheetname(name, encoding)
|
184
|
+
|
185
|
+
index = @worksheets.size
|
186
|
+
|
187
|
+
worksheet = Worksheet.new(
|
188
|
+
self,
|
189
|
+
name,
|
190
|
+
index,
|
191
|
+
encoding
|
192
|
+
)
|
193
|
+
@worksheets[index] = worksheet # Store ref for iterator
|
194
|
+
@sheetnames[index] = name # Store EXTERNSHEET names
|
195
|
+
# @parser->set_ext_sheets($name, $index) # Store names in Formula.pm
|
196
|
+
return worksheet
|
197
|
+
end
|
198
|
+
|
199
|
+
###############################################################################
|
200
|
+
#
|
201
|
+
# add_chart_ext($filename, $name)
|
202
|
+
#
|
203
|
+
# Add an externally created chart.
|
204
|
+
#
|
205
|
+
#
|
206
|
+
def add_chart_ext(filename, name, encoding = nil)
|
207
|
+
index = @worksheets.size
|
208
|
+
|
209
|
+
name, encoding = check_sheetname(name, encoding)
|
210
|
+
|
211
|
+
init_data = [
|
212
|
+
filename,
|
213
|
+
name,
|
214
|
+
index,
|
215
|
+
encoding,
|
216
|
+
@activesheet,
|
217
|
+
@firstsheet
|
218
|
+
]
|
219
|
+
|
220
|
+
worksheet = Chart.new(*init_data)
|
221
|
+
@worksheets[index] = worksheet # Store ref for iterator
|
222
|
+
@sheetnames[index] = name # Store EXTERNSHEET names
|
223
|
+
@parser.set_ext_sheets(name, index) # Store names in Formula.pm
|
224
|
+
return worksheet
|
225
|
+
end
|
226
|
+
|
227
|
+
###############################################################################
|
228
|
+
#
|
229
|
+
# _check_sheetname($name, $encoding)
|
230
|
+
#
|
231
|
+
# Check for valid worksheet names. We check the length, if it contains any
|
232
|
+
# invalid characters and if the name is unique in the workbook.
|
233
|
+
#
|
234
|
+
def check_sheetname(name, encoding = 0)
|
235
|
+
name = '' if name.nil?
|
236
|
+
limit = encoding != 0 ? 62 : 31
|
237
|
+
invalid_char = %r![\[\]:*?/\\]!
|
238
|
+
|
239
|
+
# Supply default "Sheet" name if none has been defined.
|
240
|
+
index = @worksheets.size
|
241
|
+
sheetname = @sheetname
|
242
|
+
|
243
|
+
if name == ""
|
244
|
+
name = sheetname + (index+1).to_s
|
245
|
+
encoding = 0
|
246
|
+
end
|
247
|
+
|
248
|
+
# Check that sheetname is <= 31 (1 or 2 byte chars). Excel limit.
|
249
|
+
raise "Sheetname #{name} must be <= 31 chars" if name.length > limit
|
250
|
+
|
251
|
+
# Check that Unicode sheetname has an even number of bytes
|
252
|
+
if encoding == 1 and name.length % 2
|
253
|
+
raise 'Odd number of bytes in Unicode worksheet name:' + name
|
254
|
+
end
|
255
|
+
|
256
|
+
# Check that sheetname doesn't contain any invalid characters
|
257
|
+
if encoding != 1 and name =~ invalid_char
|
258
|
+
# Check ASCII names
|
259
|
+
raise 'Invalid character []:*?/\\ in worksheet name: ' + name
|
260
|
+
else
|
261
|
+
# Extract any 8bit clean chars from the UTF16 name and validate them.
|
262
|
+
str = name.dup
|
263
|
+
while str =~ /../m
|
264
|
+
hi, lo = $~[0].unpack('aa')
|
265
|
+
if hi == "\0" and lo =~ invalid_char
|
266
|
+
raise 'Invalid character []:*?/\\ in worksheet name: ' + name
|
267
|
+
end
|
268
|
+
str = $~.post_match
|
269
|
+
end
|
270
|
+
end
|
271
|
+
|
272
|
+
# Handle utf8 strings
|
273
|
+
if name =~ NonAscii
|
274
|
+
name = NKF.nkf('-w16B0 -m0 -W', name)
|
275
|
+
encoding = 1
|
276
|
+
end
|
277
|
+
|
278
|
+
# Check that the worksheet name doesn't already exist since this is a fatal
|
279
|
+
# error in Excel 97. The check must also exclude case insensitive matches
|
280
|
+
# since the names 'Sheet1' and 'sheet1' are equivalent. The tests also have
|
281
|
+
# to take the encoding into account.
|
282
|
+
#
|
283
|
+
@worksheets.each do |worksheet|
|
284
|
+
name_a = name
|
285
|
+
encd_a = encoding
|
286
|
+
name_b = worksheet.name
|
287
|
+
encd_b = worksheet.encoding
|
288
|
+
error = 0;
|
289
|
+
|
290
|
+
if encd_a == 0 and encd_b == 0
|
291
|
+
error = 1 if name_a.downcase == name_b.downcase
|
292
|
+
elsif encd_a == 0 and encd_b == 1
|
293
|
+
name_a = name_a.unpack("C*").pack("n*")
|
294
|
+
error = 1 if name_a.downcase == name_b.downcase
|
295
|
+
elsif encd_a == 1 and encd_b == 0
|
296
|
+
name_b = name_b.unpack("C*").pack("n*")
|
297
|
+
error = 1 if name_a.downcase == name_b.downcase
|
298
|
+
elsif encd_a == 1 and encd_b == 1
|
299
|
+
# # We can do a true case insensitive test with Perl 5.8 and utf8.
|
300
|
+
# if ($] >= 5.008) {
|
301
|
+
# $name_a = Encode::decode("UTF-16BE", $name_a);
|
302
|
+
# $name_b = Encode::decode("UTF-16BE", $name_b);
|
303
|
+
# $error = 1 if lc($name_a) eq lc($name_b);
|
304
|
+
# }
|
305
|
+
# else {
|
306
|
+
# # We can't easily do a case insensitive test of the UTF16 names.
|
307
|
+
# # As a special case we check if all of the high bytes are nulls and
|
308
|
+
# # then do an ASCII style case insensitive test.
|
309
|
+
#
|
310
|
+
# # Strip out the high bytes (funkily).
|
311
|
+
# my $hi_a = grep {ord} $name_a =~ /(.)./sg;
|
312
|
+
# my $hi_b = grep {ord} $name_b =~ /(.)./sg;
|
313
|
+
#
|
314
|
+
# if ($hi_a or $hi_b) {
|
315
|
+
# $error = 1 if $name_a eq $name_b;
|
316
|
+
# }
|
317
|
+
# else {
|
318
|
+
# $error = 1 if lc($name_a) eq lc($name_b);
|
319
|
+
# }
|
320
|
+
# }
|
321
|
+
# }
|
322
|
+
# # If any of the cases failed we throw the error here.
|
323
|
+
end
|
324
|
+
if error != 0
|
325
|
+
raise "Worksheet name '#{name}', with case ignored, " +
|
326
|
+
"is already in use"
|
327
|
+
end
|
328
|
+
end
|
329
|
+
return [name, encoding]
|
330
|
+
end
|
331
|
+
|
332
|
+
###############################################################################
|
333
|
+
#
|
334
|
+
# add_format(%properties)
|
335
|
+
#
|
336
|
+
# Add a new format to the Excel workbook. This adds an XF record and
|
337
|
+
# a FONT record. Also, pass any properties to the Format::new().
|
338
|
+
#
|
339
|
+
def add_format(formats = {})
|
340
|
+
format = Format.new(@xf_index, @default_formats.merge(formats))
|
341
|
+
@xf_index += 1
|
342
|
+
@formats.push format # Store format reference
|
343
|
+
return format
|
344
|
+
end
|
345
|
+
|
346
|
+
###############################################################################
|
347
|
+
#
|
348
|
+
# compatibility_mode()
|
349
|
+
#
|
350
|
+
# Set the compatibility mode.
|
351
|
+
#
|
352
|
+
# Excel doesn't require every possible Biff record to be present in a file.
|
353
|
+
# In particular if the indexing records INDEX, ROW and DBCELL aren't present
|
354
|
+
# it just ignores the fact and reads the cells anyway. This is also true of
|
355
|
+
# the EXTSST record. Gnumeric and OOo also take this approach. This allows
|
356
|
+
# WriteExcel to ignore these records in order to minimise the amount of data
|
357
|
+
# stored in memory. However, other third party applications that read Excel
|
358
|
+
# files often expect these records to be present. In "compatibility mode"
|
359
|
+
# WriteExcel writes these records and tries to be as close to an Excel
|
360
|
+
# generated file as possible.
|
361
|
+
#
|
362
|
+
# This requires additional data to be stored in memory until the file is
|
363
|
+
# about to be written. This incurs a memory and speed penalty and may not be
|
364
|
+
# suitable for very large files.
|
365
|
+
#
|
366
|
+
def compatibility_mode(mode = 1)
|
367
|
+
unless sheets.empty?
|
368
|
+
raise "compatibility_mode() must be called before add_worksheet()"
|
369
|
+
end
|
370
|
+
@compatibility = mode
|
371
|
+
end
|
372
|
+
|
373
|
+
###############################################################################
|
374
|
+
#
|
375
|
+
# set_1904()
|
376
|
+
#
|
377
|
+
# Set the date system: false = 1900 (the default), true = 1904
|
378
|
+
#
|
379
|
+
def set_1904(mode = true)
|
380
|
+
unless sheets.empty?
|
381
|
+
raise "set_1904() must be called before add_worksheet()"
|
382
|
+
end
|
383
|
+
@date_1904 = mode
|
384
|
+
end
|
385
|
+
|
386
|
+
###############################################################################
|
387
|
+
#
|
388
|
+
# set_custom_color()
|
389
|
+
#
|
390
|
+
# Change the RGB components of the elements in the colour palette.
|
391
|
+
#
|
392
|
+
def set_custom_color(index = nil, red = nil, green = nil, blue = nil)
|
393
|
+
# Match a HTML #xxyyzz style parameter
|
394
|
+
if !red.nil? && red =~ /^#(\w\w)(\w\w)(\w\w)/
|
395
|
+
red = $1.hex
|
396
|
+
green = $2.hex
|
397
|
+
blue = $3.hex
|
398
|
+
end
|
399
|
+
|
400
|
+
# Check that the colour index is the right range
|
401
|
+
if index < 8 || index > 64
|
402
|
+
raise "Color index #{index} outside range: 8 <= index <= 64";
|
403
|
+
end
|
404
|
+
|
405
|
+
# Check that the colour components are in the right range
|
406
|
+
if (red < 0 || red > 255) ||
|
407
|
+
(green < 0 || green > 255) ||
|
408
|
+
(blue < 0 || blue > 255)
|
409
|
+
raise "Color component outside range: 0 <= color <= 255";
|
410
|
+
end
|
411
|
+
|
412
|
+
index -=8 # Adjust colour index (wingless dragonfly)
|
413
|
+
|
414
|
+
# Set the RGB value
|
415
|
+
@palette[index] = [red, green, blue, 0]
|
416
|
+
|
417
|
+
return index +8
|
418
|
+
end
|
419
|
+
|
420
|
+
###############################################################################
|
421
|
+
#
|
422
|
+
# set_palette_xl97()
|
423
|
+
#
|
424
|
+
# Sets the colour palette to the Excel 97+ default.
|
425
|
+
#
|
426
|
+
def set_palette_xl97
|
427
|
+
@palette = [
|
428
|
+
[0x00, 0x00, 0x00, 0x00], # 8
|
429
|
+
[0xff, 0xff, 0xff, 0x00], # 9
|
430
|
+
[0xff, 0x00, 0x00, 0x00], # 10
|
431
|
+
[0x00, 0xff, 0x00, 0x00], # 11
|
432
|
+
[0x00, 0x00, 0xff, 0x00], # 12
|
433
|
+
[0xff, 0xff, 0x00, 0x00], # 13
|
434
|
+
[0xff, 0x00, 0xff, 0x00], # 14
|
435
|
+
[0x00, 0xff, 0xff, 0x00], # 15
|
436
|
+
[0x80, 0x00, 0x00, 0x00], # 16
|
437
|
+
[0x00, 0x80, 0x00, 0x00], # 17
|
438
|
+
[0x00, 0x00, 0x80, 0x00], # 18
|
439
|
+
[0x80, 0x80, 0x00, 0x00], # 19
|
440
|
+
[0x80, 0x00, 0x80, 0x00], # 20
|
441
|
+
[0x00, 0x80, 0x80, 0x00], # 21
|
442
|
+
[0xc0, 0xc0, 0xc0, 0x00], # 22
|
443
|
+
[0x80, 0x80, 0x80, 0x00], # 23
|
444
|
+
[0x99, 0x99, 0xff, 0x00], # 24
|
445
|
+
[0x99, 0x33, 0x66, 0x00], # 25
|
446
|
+
[0xff, 0xff, 0xcc, 0x00], # 26
|
447
|
+
[0xcc, 0xff, 0xff, 0x00], # 27
|
448
|
+
[0x66, 0x00, 0x66, 0x00], # 28
|
449
|
+
[0xff, 0x80, 0x80, 0x00], # 29
|
450
|
+
[0x00, 0x66, 0xcc, 0x00], # 30
|
451
|
+
[0xcc, 0xcc, 0xff, 0x00], # 31
|
452
|
+
[0x00, 0x00, 0x80, 0x00], # 32
|
453
|
+
[0xff, 0x00, 0xff, 0x00], # 33
|
454
|
+
[0xff, 0xff, 0x00, 0x00], # 34
|
455
|
+
[0x00, 0xff, 0xff, 0x00], # 35
|
456
|
+
[0x80, 0x00, 0x80, 0x00], # 36
|
457
|
+
[0x80, 0x00, 0x00, 0x00], # 37
|
458
|
+
[0x00, 0x80, 0x80, 0x00], # 38
|
459
|
+
[0x00, 0x00, 0xff, 0x00], # 39
|
460
|
+
[0x00, 0xcc, 0xff, 0x00], # 40
|
461
|
+
[0xcc, 0xff, 0xff, 0x00], # 41
|
462
|
+
[0xcc, 0xff, 0xcc, 0x00], # 42
|
463
|
+
[0xff, 0xff, 0x99, 0x00], # 43
|
464
|
+
[0x99, 0xcc, 0xff, 0x00], # 44
|
465
|
+
[0xff, 0x99, 0xcc, 0x00], # 45
|
466
|
+
[0xcc, 0x99, 0xff, 0x00], # 46
|
467
|
+
[0xff, 0xcc, 0x99, 0x00], # 47
|
468
|
+
[0x33, 0x66, 0xff, 0x00], # 48
|
469
|
+
[0x33, 0xcc, 0xcc, 0x00], # 49
|
470
|
+
[0x99, 0xcc, 0x00, 0x00], # 50
|
471
|
+
[0xff, 0xcc, 0x00, 0x00], # 51
|
472
|
+
[0xff, 0x99, 0x00, 0x00], # 52
|
473
|
+
[0xff, 0x66, 0x00, 0x00], # 53
|
474
|
+
[0x66, 0x66, 0x99, 0x00], # 54
|
475
|
+
[0x96, 0x96, 0x96, 0x00], # 55
|
476
|
+
[0x00, 0x33, 0x66, 0x00], # 56
|
477
|
+
[0x33, 0x99, 0x66, 0x00], # 57
|
478
|
+
[0x00, 0x33, 0x00, 0x00], # 58
|
479
|
+
[0x33, 0x33, 0x00, 0x00], # 59
|
480
|
+
[0x99, 0x33, 0x00, 0x00], # 60
|
481
|
+
[0x99, 0x33, 0x66, 0x00], # 61
|
482
|
+
[0x33, 0x33, 0x99, 0x00], # 62
|
483
|
+
[0x33, 0x33, 0x33, 0x00] # 63
|
484
|
+
]
|
485
|
+
return 0
|
486
|
+
end
|
487
|
+
|
488
|
+
###############################################################################
|
489
|
+
#
|
490
|
+
# set_tempdir()
|
491
|
+
#
|
492
|
+
# Change the default temp directory used by _initialize() in Worksheet.pm.
|
493
|
+
#
|
494
|
+
def set_tempdir(dir = '')
|
495
|
+
raise "#{dir} is not a valid directory" if dir != '' && !FileTest.directory?(dir)
|
496
|
+
raise "set_tempdir must be called before add_worksheet" unless sheets.empty?
|
497
|
+
|
498
|
+
@tempdir = dir
|
499
|
+
end
|
500
|
+
|
501
|
+
###############################################################################
|
502
|
+
#
|
503
|
+
# set_codepage()
|
504
|
+
#
|
505
|
+
# See also the _store_codepage method. This is used to store the code page, i.e.
|
506
|
+
# the character set used in the workbook.
|
507
|
+
#
|
508
|
+
def set_codepage(type = 1)
|
509
|
+
if type == 2
|
510
|
+
@codepage = 0x8000
|
511
|
+
else
|
512
|
+
@codepage = 0x04E4
|
513
|
+
end
|
514
|
+
end
|
515
|
+
|
516
|
+
###############################################################################
|
517
|
+
#
|
518
|
+
# set_properties()
|
519
|
+
#
|
520
|
+
# Set the document properties such as Title, Author etc. These are written to
|
521
|
+
# property sets in the OLE container.
|
522
|
+
#
|
523
|
+
def set_properties(params)
|
524
|
+
# Ignore if no args were passed.
|
525
|
+
return -1 if !params.kind_of?(Hash) || params.empty?
|
526
|
+
|
527
|
+
# List of valid input parameters.
|
528
|
+
properties = {
|
529
|
+
:codepage => [0x0001, 'VT_I2' ],
|
530
|
+
:title => [0x0002, 'VT_LPSTR' ],
|
531
|
+
:subject => [0x0003, 'VT_LPSTR' ],
|
532
|
+
:author => [0x0004, 'VT_LPSTR' ],
|
533
|
+
:keywords => [0x0005, 'VT_LPSTR' ],
|
534
|
+
:comments => [0x0006, 'VT_LPSTR' ],
|
535
|
+
:last_author => [0x0008, 'VT_LPSTR' ],
|
536
|
+
:created => [0x000C, 'VT_FILETIME'],
|
537
|
+
:category => [0x0002, 'VT_LPSTR' ],
|
538
|
+
:manager => [0x000E, 'VT_LPSTR' ],
|
539
|
+
:company => [0x000F, 'VT_LPSTR' ],
|
540
|
+
:utf8 => 1
|
541
|
+
}
|
542
|
+
|
543
|
+
# Check for valid input parameters.
|
544
|
+
params.each_key do |k|
|
545
|
+
unless properties.has_key?(k)
|
546
|
+
raise "Unknown parameter '#{k}' in set_properties()";
|
547
|
+
end
|
548
|
+
end
|
549
|
+
|
550
|
+
# Set the creation time unless specified by the user.
|
551
|
+
unless params.has_key?(:created)
|
552
|
+
params[:created] = @localtime
|
553
|
+
end
|
554
|
+
|
555
|
+
#
|
556
|
+
# Create the SummaryInformation property set.
|
557
|
+
#
|
558
|
+
|
559
|
+
# Get the codepage of the strings in the property set.
|
560
|
+
strings = ["title", "subject", "author", "keywords", "comments", "last_author"]
|
561
|
+
params[:codepage] = get_property_set_codepage(params, strings)
|
562
|
+
|
563
|
+
# Create an array of property set values.
|
564
|
+
property_sets = []
|
565
|
+
strings.unshift("codepage")
|
566
|
+
strings.push("created")
|
567
|
+
strings.each do |property|
|
568
|
+
if params.has_key?(property.to_sym) && !params[property.to_sym].nil?
|
569
|
+
property_sets.push(
|
570
|
+
[ properties[property.to_sym][0],
|
571
|
+
properties[property.to_sym][1],
|
572
|
+
params[property.to_sym] ]
|
573
|
+
)
|
574
|
+
end
|
575
|
+
end
|
576
|
+
|
577
|
+
# Pack the property sets.
|
578
|
+
@summary = create_summary_property_set(property_sets)
|
579
|
+
|
580
|
+
#
|
581
|
+
# Create the DocSummaryInformation property set.
|
582
|
+
#
|
583
|
+
|
584
|
+
# Get the codepage of the strings in the property set.
|
585
|
+
strings = ["category", "manager", "company"]
|
586
|
+
params[:codepage] = get_property_set_codepage(params, strings)
|
587
|
+
|
588
|
+
# Create an array of property set values.
|
589
|
+
property_sets = []
|
590
|
+
|
591
|
+
["codepage", "category", "manager", "company"].each do |property|
|
592
|
+
if params.has_key?(property.to_sym) && !params[property.to_sym].nil?
|
593
|
+
property_sets.push(
|
594
|
+
[ properties[property.to_sym][0],
|
595
|
+
properties[property.to_sym][1],
|
596
|
+
params[property.to_sym] ]
|
597
|
+
)
|
598
|
+
end
|
599
|
+
end
|
600
|
+
|
601
|
+
# Pack the property sets.
|
602
|
+
@doc_summary = create_doc_summary_property_set(property_sets)
|
603
|
+
|
604
|
+
# Set a flag for when the files is written.
|
605
|
+
@add_doc_properties = 1
|
606
|
+
end
|
607
|
+
|
608
|
+
###############################################################################
|
609
|
+
#
|
610
|
+
# _get_property_set_codepage()
|
611
|
+
#
|
612
|
+
# Get the character codepage used by the strings in a property set. If one of
|
613
|
+
# the strings used is utf8 then the codepage is marked as utf8. Otherwise
|
614
|
+
# Latin 1 is used (although in our case this is limited to 7bit ASCII).
|
615
|
+
#
|
616
|
+
def get_property_set_codepage(params, strings)
|
617
|
+
# Allow for manually marked utf8 strings.
|
618
|
+
unless params[:utf8].nil?
|
619
|
+
return 0xFDE9
|
620
|
+
else
|
621
|
+
strings.each do |string|
|
622
|
+
next unless params.has_key?(string.to_sym)
|
623
|
+
return 0xFDE9 if params[string.to_sym] =~ NonAscii
|
624
|
+
end
|
625
|
+
return 0x04E4; # Default codepage, Latin 1.
|
626
|
+
end
|
627
|
+
end
|
628
|
+
|
629
|
+
###############################################################################
|
630
|
+
#
|
631
|
+
# _store_workbook()
|
632
|
+
#
|
633
|
+
# Assemble worksheets into a workbook and send the BIFF data to an OLE
|
634
|
+
# storage.
|
635
|
+
#
|
636
|
+
def store_workbook
|
637
|
+
# Add a default worksheet if non have been added.
|
638
|
+
add_worksheet if @worksheets.empty?
|
639
|
+
|
640
|
+
# Calculate size required for MSO records and update worksheets.
|
641
|
+
calc_mso_sizes
|
642
|
+
|
643
|
+
# Ensure that at least one worksheet has been selected.
|
644
|
+
@worksheets[0].select if @activesheet == 0
|
645
|
+
|
646
|
+
# Calculate the number of selected worksheet tabs and call the finalization
|
647
|
+
# methods for each worksheet
|
648
|
+
@worksheets.each do |sheet|
|
649
|
+
@selected += 1 if sheet.selected != 0
|
650
|
+
sheet.active = 1 if sheet.index == @activesheet
|
651
|
+
end
|
652
|
+
|
653
|
+
# Add Workbook globals
|
654
|
+
store_bof(0x0005)
|
655
|
+
store_codepage
|
656
|
+
store_window1
|
657
|
+
store_hideobj
|
658
|
+
store_1904
|
659
|
+
store_all_fonts
|
660
|
+
store_all_num_formats
|
661
|
+
store_all_xfs
|
662
|
+
store_all_styles
|
663
|
+
store_palette
|
664
|
+
|
665
|
+
# Calculate the offsets required by the BOUNDSHEET records
|
666
|
+
calc_sheet_offsets
|
667
|
+
|
668
|
+
# Add BOUNDSHEET records.
|
669
|
+
@worksheets.each do |sheet|
|
670
|
+
store_boundsheet(
|
671
|
+
sheet.name,
|
672
|
+
sheet.offset,
|
673
|
+
sheet.type,
|
674
|
+
sheet.hidden,
|
675
|
+
sheet.encoding
|
676
|
+
)
|
677
|
+
end
|
678
|
+
|
679
|
+
# NOTE: If any records are added between here and EOF the
|
680
|
+
# _calc_sheet_offsets() should be updated to include the new length.
|
681
|
+
store_country
|
682
|
+
if @ext_ref_count != 0
|
683
|
+
store_supbook
|
684
|
+
store_externsheet
|
685
|
+
store_names
|
686
|
+
end
|
687
|
+
add_mso_drawing_group
|
688
|
+
store_shared_strings
|
689
|
+
store_extsst
|
690
|
+
|
691
|
+
# End Workbook globals
|
692
|
+
store_eof
|
693
|
+
|
694
|
+
# Store the workbook in an OLE container
|
695
|
+
return store_OLE_file
|
696
|
+
end
|
697
|
+
|
698
|
+
###############################################################################
|
699
|
+
#
|
700
|
+
# _store_OLE_file()
|
701
|
+
#
|
702
|
+
# Store the workbook in an OLE container using the default handler or using
|
703
|
+
# OLE::Storage_Lite if the workbook data is > ~ 7MB.
|
704
|
+
#
|
705
|
+
def store_OLE_file
|
706
|
+
maxsize = 7_087_104
|
707
|
+
# maxsize = 1
|
708
|
+
|
709
|
+
if @add_doc_properties == 0 && @biffsize <= maxsize
|
710
|
+
# Write the OLE file using OLEwriter if data <= 7MB
|
711
|
+
ole = OLEWriter.new(@fh_out)
|
712
|
+
|
713
|
+
# Write the BIFF data without the OLE container for testing.
|
714
|
+
ole.biff_only = @biff_only
|
715
|
+
|
716
|
+
# Indicate that we created the filehandle and want to close it.
|
717
|
+
ole.internal_fh = @internal_fh
|
718
|
+
|
719
|
+
ole.set_size(@biffsize)
|
720
|
+
ole.write_header
|
721
|
+
|
722
|
+
while tmp = get_data
|
723
|
+
ole.write(tmp)
|
724
|
+
end
|
725
|
+
|
726
|
+
@worksheets.each do |worksheet|
|
727
|
+
while tmp = worksheet.get_data
|
728
|
+
ole.write(tmp)
|
729
|
+
end
|
730
|
+
end
|
731
|
+
|
732
|
+
return ole.close
|
733
|
+
else
|
734
|
+
# Write the OLE file using ruby-ole if data > 7MB
|
735
|
+
|
736
|
+
# Create the Workbook stream.
|
737
|
+
stream = 'Workbook'.unpack('C*').pack('v*')
|
738
|
+
workbook = OLEStorageLitePPSFile.new(stream)
|
739
|
+
workbook.set_file # use tempfile
|
740
|
+
|
741
|
+
while tmp = get_data
|
742
|
+
workbook.append(tmp)
|
743
|
+
end
|
744
|
+
|
745
|
+
@worksheets.each do |worksheet|
|
746
|
+
while tmp = worksheet.get_data
|
747
|
+
workbook.append(tmp)
|
748
|
+
end
|
749
|
+
end
|
750
|
+
|
751
|
+
streams = []
|
752
|
+
streams << workbook
|
753
|
+
|
754
|
+
# Create the properties streams, if any.
|
755
|
+
if @add_doc_properties != 0
|
756
|
+
stream = "\5SummaryInformation".unpack('C*').pack('v*')
|
757
|
+
summary = OLEStorageLitePPSFile.new(stream, @summary)
|
758
|
+
streams << summary
|
759
|
+
|
760
|
+
stream = "\5DocumentSummaryInformation".unpack('C*').pack('v*')
|
761
|
+
summary = OLEStorageLitePPSFile.new(stream, @doc_summary)
|
762
|
+
streams << summary
|
763
|
+
end
|
764
|
+
|
765
|
+
# Create the OLE root document and add the substreams.
|
766
|
+
localtime = @localtime.to_a[0..5]
|
767
|
+
localtime[4] -= 1 # month
|
768
|
+
localtime[5] -= 1900
|
769
|
+
|
770
|
+
ole_root = OLEStorageLitePPSRoot.new(
|
771
|
+
localtime,
|
772
|
+
localtime,
|
773
|
+
streams
|
774
|
+
)
|
775
|
+
ole_root.save(@filename)
|
776
|
+
|
777
|
+
# Close the filehandle if it was created internally.
|
778
|
+
return @fh_out.close if @internal_fh != 0
|
779
|
+
end
|
780
|
+
end
|
781
|
+
|
782
|
+
###############################################################################
|
783
|
+
#
|
784
|
+
# _calc_sheet_offsets()
|
785
|
+
#
|
786
|
+
# Calculate Worksheet BOF offsets records for use in the BOUNDSHEET records.
|
787
|
+
#
|
788
|
+
def calc_sheet_offsets
|
789
|
+
_bof = 12
|
790
|
+
_eof = 4
|
791
|
+
|
792
|
+
offset = @datasize
|
793
|
+
|
794
|
+
# Add the length of the COUNTRY record
|
795
|
+
offset += 8
|
796
|
+
|
797
|
+
# Add the length of the SST and associated CONTINUEs
|
798
|
+
offset += calculate_shared_string_sizes
|
799
|
+
|
800
|
+
# Add the length of the EXTSST record.
|
801
|
+
offset += calculate_extsst_size
|
802
|
+
|
803
|
+
# Add the length of the SUPBOOK, EXTERNSHEET and NAME records
|
804
|
+
offset += calculate_extern_sizes
|
805
|
+
|
806
|
+
# Add the length of the MSODRAWINGGROUP records including an extra 4 bytes
|
807
|
+
# for any CONTINUE headers. See _add_mso_drawing_group_continue().
|
808
|
+
mso_size = @mso_size
|
809
|
+
mso_size += 4 * Integer((mso_size -1) / Float(@limit))
|
810
|
+
offset += mso_size
|
811
|
+
|
812
|
+
@worksheets.each do |sheet|
|
813
|
+
offset += _bof + sheet.name.length
|
814
|
+
end
|
815
|
+
|
816
|
+
offset += _eof
|
817
|
+
@worksheets.each do |sheet|
|
818
|
+
sheet.offset = offset
|
819
|
+
sheet.close(*@sheetnames)
|
820
|
+
offset += sheet.datasize
|
821
|
+
end
|
822
|
+
|
823
|
+
@biffsize = offset
|
824
|
+
end
|
825
|
+
|
826
|
+
###############################################################################
|
827
|
+
#
|
828
|
+
# _calc_mso_sizes()
|
829
|
+
#
|
830
|
+
# Calculate the MSODRAWINGGROUP sizes and the indexes of the Worksheet
|
831
|
+
# MSODRAWING records.
|
832
|
+
#
|
833
|
+
# In the following SPID is shape id, according to Escher nomenclature.
|
834
|
+
#
|
835
|
+
def calc_mso_sizes
|
836
|
+
mso_size = 0 # Size of the MSODRAWINGGROUP record
|
837
|
+
start_spid = 1024 # Initial spid for each sheet
|
838
|
+
max_spid = 1024 # spidMax
|
839
|
+
num_clusters = 1 # cidcl
|
840
|
+
shapes_saved = 0 # cspSaved
|
841
|
+
drawings_saved = 0 # cdgSaved
|
842
|
+
clusters = []
|
843
|
+
|
844
|
+
process_images
|
845
|
+
|
846
|
+
# Add Bstore container size if there are images.
|
847
|
+
mso_size += 8 unless @images_data.empty?
|
848
|
+
|
849
|
+
# Iterate through the worksheets, calculate the MSODRAWINGGROUP parameters
|
850
|
+
# and space required to store the record and the MSODRAWING parameters
|
851
|
+
# required by each worksheet.
|
852
|
+
#
|
853
|
+
@worksheets.each do |sheet|
|
854
|
+
num_images = sheet.num_images
|
855
|
+
image_mso_size = sheet.image_mso_size
|
856
|
+
num_comments = sheet.prepare_comments
|
857
|
+
num_charts = sheet.prepare_charts
|
858
|
+
num_filters = sheet.filter_count
|
859
|
+
|
860
|
+
next if num_images + num_comments + num_charts + num_filters == 0
|
861
|
+
|
862
|
+
# Include 1 parent MSODRAWING shape, per sheet, in the shape count.
|
863
|
+
num_shapes = 1 + num_images + num_comments +
|
864
|
+
num_charts + num_filters
|
865
|
+
shapes_saved += num_shapes
|
866
|
+
mso_size += image_mso_size
|
867
|
+
|
868
|
+
# Add a drawing object for each sheet with comments.
|
869
|
+
drawings_saved += 1
|
870
|
+
|
871
|
+
# For each sheet start the spids at the next 1024 interval.
|
872
|
+
max_spid = 1024 * (1 + Integer((max_spid -1)/1024.0))
|
873
|
+
start_spid = max_spid
|
874
|
+
|
875
|
+
# Max spid for each sheet and eventually for the workbook.
|
876
|
+
max_spid += num_shapes
|
877
|
+
|
878
|
+
# Store the cluster ids
|
879
|
+
i = num_shapes
|
880
|
+
while i > 0
|
881
|
+
num_clusters += 1
|
882
|
+
mso_size += 8
|
883
|
+
size = i > 1024 ? 1024 : i
|
884
|
+
|
885
|
+
clusters.push([drawings_saved, size])
|
886
|
+
i -= 1024
|
887
|
+
end
|
888
|
+
|
889
|
+
# Pass calculated values back to the worksheet
|
890
|
+
sheet.object_ids = [start_spid, drawings_saved,
|
891
|
+
num_shapes, max_spid -1]
|
892
|
+
end
|
893
|
+
|
894
|
+
|
895
|
+
# Calculate the MSODRAWINGGROUP size if we have stored some shapes.
|
896
|
+
mso_size += 86 if mso_size != 0 # Smallest size is 86+8=94
|
897
|
+
|
898
|
+
@mso_size = mso_size
|
899
|
+
@mso_clusters = [
|
900
|
+
max_spid, num_clusters, shapes_saved,
|
901
|
+
drawings_saved, clusters
|
902
|
+
]
|
903
|
+
end
|
904
|
+
|
905
|
+
###############################################################################
|
906
|
+
#
|
907
|
+
# _process_images()
|
908
|
+
#
|
909
|
+
# We need to process each image in each worksheet and extract information.
|
910
|
+
# Some of this information is stored and used in the Workbook and some is
|
911
|
+
# passed back into each Worksheet. The overall size for the image related
|
912
|
+
# BIFF structures in the Workbook is calculated here.
|
913
|
+
#
|
914
|
+
# MSO size = 8 bytes for bstore_container +
|
915
|
+
# 44 bytes for blip_store_entry +
|
916
|
+
# 25 bytes for blip
|
917
|
+
# = 77 + image size.
|
918
|
+
#
|
919
|
+
def process_images
|
920
|
+
images_seen = {}
|
921
|
+
image_data = []
|
922
|
+
previous_images = []
|
923
|
+
image_id = 1;
|
924
|
+
images_size = 0;
|
925
|
+
|
926
|
+
@worksheets.each do |sheet|
|
927
|
+
next if sheet.prepare_images == 0
|
928
|
+
|
929
|
+
num_images = 0
|
930
|
+
image_mso_size = 0
|
931
|
+
|
932
|
+
sheet.images_array.each do |image|
|
933
|
+
filename = image[2]
|
934
|
+
num_images += 1
|
935
|
+
|
936
|
+
#
|
937
|
+
# For each Worksheet image we get a structure like this
|
938
|
+
# [
|
939
|
+
# $row,
|
940
|
+
# $col,
|
941
|
+
# $name,
|
942
|
+
# $x_offset,
|
943
|
+
# $y_offset,
|
944
|
+
# $scale_x,
|
945
|
+
# $scale_y,
|
946
|
+
# ]
|
947
|
+
#
|
948
|
+
# And we add additional information:
|
949
|
+
#
|
950
|
+
# $image_id,
|
951
|
+
# $type,
|
952
|
+
# $width,
|
953
|
+
# $height;
|
954
|
+
|
955
|
+
if images_seen[filename].nil?
|
956
|
+
# TODO should also match seen images based on checksum.
|
957
|
+
|
958
|
+
# Open the image file and import the data.
|
959
|
+
fh = open(filename, "rb")
|
960
|
+
raise "Couldn't import #{filename}: #{$!}" unless fh
|
961
|
+
|
962
|
+
# Slurp the file into a string and do some size calcs.
|
963
|
+
# my $data = do {local $/; <$fh>};
|
964
|
+
data = fh.read
|
965
|
+
size = data.length
|
966
|
+
checksum1 = image_checksum(data, image_id)
|
967
|
+
checksum2 = checksum1
|
968
|
+
ref_count = 1
|
969
|
+
|
970
|
+
# Process the image and extract dimensions.
|
971
|
+
# Test for PNGs...
|
972
|
+
if data.unpack('x A3')[0] == 'PNG'
|
973
|
+
type, width, height = process_png(data)
|
974
|
+
# Test for JFIF and Exif JPEGs...
|
975
|
+
elsif ( data.unpack('n')[0] == 0xFFD8 &&
|
976
|
+
(data.unpack('x6 A4')[0] == 'JFIF' ||
|
977
|
+
data.unpack('x6 A4')[0] == 'Exif')
|
978
|
+
)
|
979
|
+
type, width, height = process_jpg(data, filename)
|
980
|
+
# Test for BMPs...
|
981
|
+
elsif data.unpack('A2')[0] == 'BM'
|
982
|
+
type, width, height = process_bmp(data, filename)
|
983
|
+
# The 14 byte header of the BMP is stripped off.
|
984
|
+
data[0, 13] = ''
|
985
|
+
|
986
|
+
# A checksum of the new image data is also required.
|
987
|
+
checksum2 = image_checksum(data, image_id, image_id)
|
988
|
+
|
989
|
+
# Adjust size -14 (header) + 16 (extra checksum).
|
990
|
+
size += 2
|
991
|
+
else
|
992
|
+
raise "Unsupported image format for file: #{filename}\n"
|
993
|
+
end
|
994
|
+
|
995
|
+
# Push the new data back into the Worksheet array;
|
996
|
+
image.push(image_id, type, width, height)
|
997
|
+
|
998
|
+
# Also store new data for use in duplicate images.
|
999
|
+
previous_images.push([image_id, type, width, height])
|
1000
|
+
|
1001
|
+
# Store information required by the Workbook.
|
1002
|
+
image_data.push([ref_count, type, data, size,
|
1003
|
+
checksum1, checksum2])
|
1004
|
+
|
1005
|
+
# Keep track of overall data size.
|
1006
|
+
images_size += size +61; # Size for bstore container.
|
1007
|
+
image_mso_size += size +69; # Size for dgg container.
|
1008
|
+
|
1009
|
+
images_seen[filename] = image_id
|
1010
|
+
image_id += 1
|
1011
|
+
fh.close
|
1012
|
+
else
|
1013
|
+
# We've processed this file already.
|
1014
|
+
index = images_seen[filename] -1
|
1015
|
+
|
1016
|
+
# Increase image reference count.
|
1017
|
+
image_data[index][0] += 1
|
1018
|
+
|
1019
|
+
# Add previously calculated data back onto the Worksheet array.
|
1020
|
+
# $image_id, $type, $width, $height
|
1021
|
+
a_ref = sheet.images_array[index]
|
1022
|
+
image.concat(previous_images[index])
|
1023
|
+
end
|
1024
|
+
end
|
1025
|
+
|
1026
|
+
# Store information required by the Worksheet.
|
1027
|
+
sheet.num_images = num_images
|
1028
|
+
sheet.image_mso_size = image_mso_size
|
1029
|
+
|
1030
|
+
end
|
1031
|
+
|
1032
|
+
|
1033
|
+
# Store information required by the Workbook.
|
1034
|
+
@images_size = images_size
|
1035
|
+
@images_data = image_data # Store the data for MSODRAWINGGROUP.
|
1036
|
+
|
1037
|
+
end
|
1038
|
+
|
1039
|
+
###############################################################################
|
1040
|
+
#
|
1041
|
+
# _image_checksum()
|
1042
|
+
#
|
1043
|
+
# Generate a checksum for the image using whichever module is available..The
|
1044
|
+
# available modules are checked in _get_checksum_method(). Excel uses an MD4
|
1045
|
+
# checksum but any other will do. In the event of no checksum module being
|
1046
|
+
# available we simulate a checksum using the image index.
|
1047
|
+
#
|
1048
|
+
def image_checksum(data, index1, index2 = 0)
|
1049
|
+
if @checksum_method == 1
|
1050
|
+
# Digest::MD4
|
1051
|
+
# return Digest::MD4::md4_hex($data);
|
1052
|
+
elsif @checksum_method == 2
|
1053
|
+
# Digest::Perl::MD4
|
1054
|
+
# return Digest::Perl::MD4::md4_hex($data);
|
1055
|
+
elsif @checksum_method == 3
|
1056
|
+
# Digest::MD5
|
1057
|
+
return Digest::MD5.hexdigest(data)
|
1058
|
+
else
|
1059
|
+
# Default
|
1060
|
+
return sprintf('%016X%016X', index2, index1)
|
1061
|
+
end
|
1062
|
+
end
|
1063
|
+
|
1064
|
+
###############################################################################
|
1065
|
+
#
|
1066
|
+
# _process_png()
|
1067
|
+
#
|
1068
|
+
# Extract width and height information from a PNG file.
|
1069
|
+
#
|
1070
|
+
def process_png(data)
|
1071
|
+
type = 6 # Excel Blip type (MSOBLIPTYPE).
|
1072
|
+
width = data[16, 4].unpack("N")[0]
|
1073
|
+
height = data[20, 4].unpack("N")[0]
|
1074
|
+
|
1075
|
+
return [type, width, height]
|
1076
|
+
end
|
1077
|
+
|
1078
|
+
###############################################################################
|
1079
|
+
#
|
1080
|
+
# _process_bmp()
|
1081
|
+
#
|
1082
|
+
# Extract width and height information from a BMP file.
|
1083
|
+
#
|
1084
|
+
# Most of these checks came from the old Worksheet::_process_bitmap() method.
|
1085
|
+
#
|
1086
|
+
def process_bmp(data, filename)
|
1087
|
+
type = 7 # Excel Blip type (MSOBLIPTYPE).
|
1088
|
+
|
1089
|
+
# Check that the file is big enough to be a bitmap.
|
1090
|
+
if data.length <= 0x36
|
1091
|
+
raise "#{filename} doesn't contain enough data."
|
1092
|
+
end
|
1093
|
+
|
1094
|
+
# Read the bitmap width and height. Verify the sizes.
|
1095
|
+
width, height = data.unpack("x18 V2")
|
1096
|
+
|
1097
|
+
if width > 0xFFFF
|
1098
|
+
raise "#{filename}: largest image width #{width} supported is 65k."
|
1099
|
+
end
|
1100
|
+
|
1101
|
+
if height > 0xFFFF
|
1102
|
+
raise "#{filename}: largest image height supported is 65k."
|
1103
|
+
end
|
1104
|
+
|
1105
|
+
# Read the bitmap planes and bpp data. Verify them.
|
1106
|
+
planes, bitcount = data.unpack("x26 v2")
|
1107
|
+
|
1108
|
+
if bitcount != 24
|
1109
|
+
raise "#{filename} isn't a 24bit true color bitmap."
|
1110
|
+
end
|
1111
|
+
|
1112
|
+
if planes != 1
|
1113
|
+
raise "#{filename}: only 1 plane supported in bitmap image."
|
1114
|
+
end
|
1115
|
+
|
1116
|
+
# Read the bitmap compression. Verify compression.
|
1117
|
+
compression = data.unpack("x30 V")
|
1118
|
+
|
1119
|
+
if compression != 0
|
1120
|
+
raise "#{filename}: compression not supported in bitmap image."
|
1121
|
+
end
|
1122
|
+
|
1123
|
+
return [type, width, height]
|
1124
|
+
end
|
1125
|
+
|
1126
|
+
###############################################################################
|
1127
|
+
#
|
1128
|
+
# _process_jpg()
|
1129
|
+
#
|
1130
|
+
# Extract width and height information from a JPEG file.
|
1131
|
+
#
|
1132
|
+
def process_jpg(data, filename)
|
1133
|
+
type = 5 # Excel Blip type (MSOBLIPTYPE).
|
1134
|
+
|
1135
|
+
offset = 2;
|
1136
|
+
data_length = data.length
|
1137
|
+
|
1138
|
+
# Search through the image data to find the 0xFFC0 marker. The height and
|
1139
|
+
# width are contained in the data for that sub element.
|
1140
|
+
while offset < data_length
|
1141
|
+
marker = data[offset, 2].unpack("n")
|
1142
|
+
marker = marker[0]
|
1143
|
+
length = data[offset+2, 2].unpack("n")
|
1144
|
+
length = length[0]
|
1145
|
+
|
1146
|
+
if marker == 0xFFC0
|
1147
|
+
height = data[offset+5, 2].unpack("n")
|
1148
|
+
height = height[0]
|
1149
|
+
width = data[offset+7, 2].unpack("n")
|
1150
|
+
width = width[0]
|
1151
|
+
break
|
1152
|
+
end
|
1153
|
+
|
1154
|
+
offset = offset + length + 2
|
1155
|
+
break if marker == 0xFFDA
|
1156
|
+
end
|
1157
|
+
|
1158
|
+
if height.nil?
|
1159
|
+
raise "#{filename}: no size data found in image.\n"
|
1160
|
+
end
|
1161
|
+
|
1162
|
+
return [type, width, height]
|
1163
|
+
end
|
1164
|
+
|
1165
|
+
###############################################################################
|
1166
|
+
#
|
1167
|
+
# _store_all_fonts()
|
1168
|
+
#
|
1169
|
+
# Store the Excel FONT records.
|
1170
|
+
#
|
1171
|
+
def store_all_fonts
|
1172
|
+
format = @formats[15] # The default cell format.
|
1173
|
+
font = format.get_font
|
1174
|
+
|
1175
|
+
# Fonts are 0-indexed. According to the SDK there is no index 4,
|
1176
|
+
(0..3).each do
|
1177
|
+
append(font)
|
1178
|
+
end
|
1179
|
+
|
1180
|
+
# Add the font for comments. This isn't connected to any XF format.
|
1181
|
+
tmp = Format.new(nil, :font => 'Tahoma', :size => 8)
|
1182
|
+
font = tmp.get_font
|
1183
|
+
append(font)
|
1184
|
+
|
1185
|
+
# Iterate through the XF objects and write a FONT record if it isn't the
|
1186
|
+
# same as the default FONT and if it hasn't already been used.
|
1187
|
+
#
|
1188
|
+
fonts = {}
|
1189
|
+
index = 6 # The first user defined FONT
|
1190
|
+
|
1191
|
+
key = format.get_font_key # The default font for cell formats.
|
1192
|
+
fonts[key] = 0 # Index of the default font
|
1193
|
+
|
1194
|
+
# Fonts that are marked as '_font_only' are always stored. These are used
|
1195
|
+
# mainly for charts and may not have an associated XF record.
|
1196
|
+
|
1197
|
+
@formats.each do |format|
|
1198
|
+
key = format.get_font_key
|
1199
|
+
if format.font_only == 0 and !fonts[key].nil?
|
1200
|
+
# FONT has already been used
|
1201
|
+
format.font_index = fonts[key]
|
1202
|
+
else
|
1203
|
+
# Add a new FONT record
|
1204
|
+
|
1205
|
+
if format.font_only == 0
|
1206
|
+
fonts[key] = index
|
1207
|
+
end
|
1208
|
+
|
1209
|
+
format.font_index = index
|
1210
|
+
index += 1
|
1211
|
+
font = format.get_font
|
1212
|
+
append(font)
|
1213
|
+
end
|
1214
|
+
end
|
1215
|
+
end
|
1216
|
+
|
1217
|
+
###############################################################################
|
1218
|
+
#
|
1219
|
+
# _store_all_num_formats()
|
1220
|
+
#
|
1221
|
+
# Store user defined numerical formats i.e. FORMAT records
|
1222
|
+
#
|
1223
|
+
def store_all_num_formats
|
1224
|
+
num_formats = {}
|
1225
|
+
index = 164 # User defined FORMAT records start from 0xA4
|
1226
|
+
|
1227
|
+
# Iterate through the XF objects and write a FORMAT record if it isn't a
|
1228
|
+
# built-in format type and if the FORMAT string hasn't already been used.
|
1229
|
+
#
|
1230
|
+
@formats.each do |format|
|
1231
|
+
num_format = format.num_format
|
1232
|
+
encoding = format.num_format_enc
|
1233
|
+
|
1234
|
+
# Check if $num_format is an index to a built-in format.
|
1235
|
+
# Also check for a string of zeros, which is a valid format string
|
1236
|
+
# but would evaluate to zero.
|
1237
|
+
#
|
1238
|
+
unless num_format.to_s =~ /^0+\d/
|
1239
|
+
next if num_format.to_s =~ /^\d+$/ # built-in
|
1240
|
+
end
|
1241
|
+
|
1242
|
+
if num_formats[num_format]
|
1243
|
+
# FORMAT has already been used
|
1244
|
+
format.num_format = num_formats[num_format]
|
1245
|
+
else
|
1246
|
+
# Add a new FORMAT
|
1247
|
+
num_formats[num_format] = index
|
1248
|
+
format.num_format = index
|
1249
|
+
store_num_format(num_format, index, encoding)
|
1250
|
+
index += 1
|
1251
|
+
end
|
1252
|
+
end
|
1253
|
+
end
|
1254
|
+
|
1255
|
+
###############################################################################
|
1256
|
+
#
|
1257
|
+
# _store_all_xfs()
|
1258
|
+
#
|
1259
|
+
# Write all XF records.
|
1260
|
+
#
|
1261
|
+
def store_all_xfs
|
1262
|
+
@formats.each do |format|
|
1263
|
+
xf = format.get_xf
|
1264
|
+
append(xf)
|
1265
|
+
end
|
1266
|
+
end
|
1267
|
+
|
1268
|
+
###############################################################################
|
1269
|
+
#
|
1270
|
+
# _store_all_styles()
|
1271
|
+
#
|
1272
|
+
# Write all STYLE records.
|
1273
|
+
#
|
1274
|
+
def store_all_styles
|
1275
|
+
# Excel adds the built-in styles in alphabetical order.
|
1276
|
+
built_ins = [
|
1277
|
+
[0x03, 16], # Comma
|
1278
|
+
[0x06, 17], # Comma[0]
|
1279
|
+
[0x04, 18], # Currency
|
1280
|
+
[0x07, 19], # Currency[0]
|
1281
|
+
[0x00, 0], # Normal
|
1282
|
+
[0x05, 20] # Percent
|
1283
|
+
|
1284
|
+
# We don't deal with these styles yet.
|
1285
|
+
#[0x08, 21], # Hyperlink
|
1286
|
+
#[0x02, 8], # ColLevel_n
|
1287
|
+
#[0x01, 1], # RowLevel_n
|
1288
|
+
]
|
1289
|
+
|
1290
|
+
built_ins.each do |aref|
|
1291
|
+
type = aref[0]
|
1292
|
+
xf_index = aref[1]
|
1293
|
+
|
1294
|
+
store_style(type, xf_index)
|
1295
|
+
end
|
1296
|
+
end
|
1297
|
+
|
1298
|
+
###############################################################################
|
1299
|
+
#
|
1300
|
+
# _store_names()
|
1301
|
+
#
|
1302
|
+
# Write the NAME record to define the print area and the repeat rows and cols.
|
1303
|
+
#
|
1304
|
+
def store_names
|
1305
|
+
index = 0
|
1306
|
+
|
1307
|
+
# Create the print area NAME records
|
1308
|
+
@worksheets.each do |worksheet|
|
1309
|
+
|
1310
|
+
key = "#{index}:#{index}"
|
1311
|
+
ref = @ext_refs[key]
|
1312
|
+
index += 1
|
1313
|
+
|
1314
|
+
# Write a Name record if Autofilter has been defined
|
1315
|
+
if worksheet.filter_count != 0
|
1316
|
+
store_name_short(
|
1317
|
+
worksheet.index,
|
1318
|
+
0x0D, # NAME type = Filter Database
|
1319
|
+
ref,
|
1320
|
+
worksheet.filter_area[0],
|
1321
|
+
worksheet.filter_area[1],
|
1322
|
+
worksheet.filter_area[2],
|
1323
|
+
worksheet.filter_area[3],
|
1324
|
+
1 # Hidden
|
1325
|
+
)
|
1326
|
+
end
|
1327
|
+
|
1328
|
+
# Write a Name record if the print area has been defined
|
1329
|
+
if !worksheet.print_rowmin.nil? && worksheet.print.rowmin != 0
|
1330
|
+
store_name_short(
|
1331
|
+
worksheet.index,
|
1332
|
+
0x06, # NAME type = Print_Area
|
1333
|
+
ref,
|
1334
|
+
worksheet.print_rowmin,
|
1335
|
+
worksheet.print_rowmax,
|
1336
|
+
worksheet.print_colmin,
|
1337
|
+
worksheet.print_colmax
|
1338
|
+
)
|
1339
|
+
end
|
1340
|
+
|
1341
|
+
end
|
1342
|
+
|
1343
|
+
index = 0
|
1344
|
+
|
1345
|
+
# Create the print title NAME records
|
1346
|
+
@worksheets.each do |worksheet|
|
1347
|
+
|
1348
|
+
rowmin = worksheet.title_rowmin
|
1349
|
+
rowmax = worksheet.title_rowmax
|
1350
|
+
colmin = worksheet.title_colmin
|
1351
|
+
colmax = worksheet.title_colmax
|
1352
|
+
key = "#{index}:#{index}"
|
1353
|
+
ref = @ext_refs[key]
|
1354
|
+
index += 1
|
1355
|
+
|
1356
|
+
# Determine if row + col, row, col or nothing has been defined
|
1357
|
+
# and write the appropriate record
|
1358
|
+
#
|
1359
|
+
if rowmin && colmin
|
1360
|
+
# Row and column titles have been defined.
|
1361
|
+
# Row title has been defined.
|
1362
|
+
store_name_long(
|
1363
|
+
worksheet.index,
|
1364
|
+
0x07, # NAME type = Print_Titles
|
1365
|
+
ref,
|
1366
|
+
rowmin,
|
1367
|
+
rowmax,
|
1368
|
+
colmin,
|
1369
|
+
colmax
|
1370
|
+
)
|
1371
|
+
elsif rowmin
|
1372
|
+
# Row title has been defined.
|
1373
|
+
store_name_short(
|
1374
|
+
worksheet.index,
|
1375
|
+
0x07, # NAME type = Print_Titles
|
1376
|
+
ref,
|
1377
|
+
rowmin,
|
1378
|
+
rowmax,
|
1379
|
+
0x00,
|
1380
|
+
0xff
|
1381
|
+
)
|
1382
|
+
elsif colmin
|
1383
|
+
# Column title has been defined.
|
1384
|
+
store_name_short(
|
1385
|
+
worksheet.index,
|
1386
|
+
0x07, # NAME type = Print_Titles
|
1387
|
+
ref,
|
1388
|
+
0x0000,
|
1389
|
+
0xffff,
|
1390
|
+
colmin,
|
1391
|
+
colmax
|
1392
|
+
)
|
1393
|
+
else
|
1394
|
+
# Nothing left to do
|
1395
|
+
end
|
1396
|
+
end
|
1397
|
+
end
|
1398
|
+
|
1399
|
+
###############################################################################
|
1400
|
+
###############################################################################
|
1401
|
+
#
|
1402
|
+
# BIFF RECORDS
|
1403
|
+
#
|
1404
|
+
|
1405
|
+
|
1406
|
+
###############################################################################
|
1407
|
+
#
|
1408
|
+
# _store_window1()
|
1409
|
+
#
|
1410
|
+
# Write Excel BIFF WINDOW1 record.
|
1411
|
+
#
|
1412
|
+
def store_window1
|
1413
|
+
record = 0x003D # Record identifier
|
1414
|
+
length = 0x0012 # Number of bytes to follow
|
1415
|
+
|
1416
|
+
xWn = 0x0000 # Horizontal position of window
|
1417
|
+
yWn = 0x0000 # Vertical position of window
|
1418
|
+
dxWn = 0x355C # Width of window
|
1419
|
+
dyWn = 0x30ED # Height of window
|
1420
|
+
|
1421
|
+
grbit = 0x0038 # Option flags
|
1422
|
+
ctabsel = @selected # Number of workbook tabs selected
|
1423
|
+
wTabRatio = 0x0258 # Tab to scrollbar ratio
|
1424
|
+
|
1425
|
+
itabFirst = @firstsheet # 1st displayed worksheet
|
1426
|
+
itabCur = @activesheet # Active worksheet
|
1427
|
+
|
1428
|
+
header = [record, length].pack("vv")
|
1429
|
+
data = [
|
1430
|
+
xWn, yWn, dxWn, dyWn,
|
1431
|
+
grbit,
|
1432
|
+
itabCur, itabFirst,
|
1433
|
+
ctabsel, wTabRatio
|
1434
|
+
].pack("vvvvvvvvv")
|
1435
|
+
|
1436
|
+
append(header, data)
|
1437
|
+
end
|
1438
|
+
|
1439
|
+
###############################################################################
|
1440
|
+
#
|
1441
|
+
# _store_boundsheet()
|
1442
|
+
# my $sheetname = $_[0]; # Worksheet name
|
1443
|
+
# my $offset = $_[1]; # Location of worksheet BOF
|
1444
|
+
# my $type = $_[2]; # Worksheet type
|
1445
|
+
# my $hidden = $_[3]; # Worksheet hidden flag
|
1446
|
+
# my $encoding = $_[4]; # Sheet name encoding
|
1447
|
+
#
|
1448
|
+
# Writes Excel BIFF BOUNDSHEET record.
|
1449
|
+
#
|
1450
|
+
def store_boundsheet(sheetname, offset, type, hidden, encoding)
|
1451
|
+
record = 0x0085 # Record identifier
|
1452
|
+
length = 0x08 + sheetname.length # Number of bytes to follow
|
1453
|
+
|
1454
|
+
cch = sheetname.length # Length of sheet name
|
1455
|
+
|
1456
|
+
grbit = type | hidden
|
1457
|
+
|
1458
|
+
# Character length is num of chars not num of bytes
|
1459
|
+
cch /= 2 if encoding != 0
|
1460
|
+
|
1461
|
+
# Change the UTF-16 name from BE to LE
|
1462
|
+
sheetname = sheetname.unpack('v*').pack('n*') if encoding != 0
|
1463
|
+
|
1464
|
+
header = [record, length].pack("vv")
|
1465
|
+
data = [offset, grbit, cch, encoding].pack("VvCC")
|
1466
|
+
|
1467
|
+
append(header, data, sheetname)
|
1468
|
+
end
|
1469
|
+
|
1470
|
+
###############################################################################
|
1471
|
+
#
|
1472
|
+
# _store_style()
|
1473
|
+
# type = $_[0] # Built-in style
|
1474
|
+
# xf_index = $_[1] # Index to style XF
|
1475
|
+
#
|
1476
|
+
# Write Excel BIFF STYLE records.
|
1477
|
+
#
|
1478
|
+
def store_style(type, xf_index)
|
1479
|
+
record = 0x0293 # Record identifier
|
1480
|
+
length = 0x0004 # Bytes to follow
|
1481
|
+
|
1482
|
+
level = 0xff # Outline style level
|
1483
|
+
|
1484
|
+
xf_index |= 0x8000 # Add flag to indicate built-in style.
|
1485
|
+
|
1486
|
+
header = [record, length].pack("vv")
|
1487
|
+
data = [xf_index, type, level].pack("vCC")
|
1488
|
+
|
1489
|
+
append(header, data)
|
1490
|
+
end
|
1491
|
+
|
1492
|
+
###############################################################################
|
1493
|
+
#
|
1494
|
+
# _store_num_format()
|
1495
|
+
# my $format = $_[0]; # Custom format string
|
1496
|
+
# my $ifmt = $_[1]; # Format index code
|
1497
|
+
# my $encoding = $_[2]; # Char encoding for format string
|
1498
|
+
#
|
1499
|
+
# Writes Excel FORMAT record for non "built-in" numerical formats.
|
1500
|
+
#
|
1501
|
+
def store_num_format(format, ifmt, encoding)
|
1502
|
+
format = format.to_s unless format.kind_of?(String)
|
1503
|
+
record = 0x041E # Record identifier
|
1504
|
+
# length # Number of bytes to follow
|
1505
|
+
# Char length of format string
|
1506
|
+
cch = format.length
|
1507
|
+
|
1508
|
+
# Handle utf8 strings
|
1509
|
+
if format =~ NonAscii
|
1510
|
+
format = NKF.nkf('-w16B0 -m0 -W', format)
|
1511
|
+
encoding = 1
|
1512
|
+
end
|
1513
|
+
|
1514
|
+
# Handle Unicode format strings.
|
1515
|
+
if encoding == 1
|
1516
|
+
raise "Uneven number of bytes in Unicode font name" if cch % 2 != 0
|
1517
|
+
cch /= 2 if encoding != 0
|
1518
|
+
format = format.unpack('n*').pack('v*')
|
1519
|
+
end
|
1520
|
+
|
1521
|
+
# Special case to handle Euro symbol, 0x80, in non-Unicode strings.
|
1522
|
+
if encoding == 0 and format =~ /\x80/
|
1523
|
+
format = format.unpack('C*').pack('v*')
|
1524
|
+
format.gsub!(/\x80\x00/, "\xAC\x20")
|
1525
|
+
encoding = 1
|
1526
|
+
end
|
1527
|
+
|
1528
|
+
length = 0x05 + format.length
|
1529
|
+
|
1530
|
+
header = [record, length].pack("vv")
|
1531
|
+
data = [ifmt, cch, encoding].pack("vvC")
|
1532
|
+
|
1533
|
+
append(header, data, format)
|
1534
|
+
end
|
1535
|
+
|
1536
|
+
###############################################################################
|
1537
|
+
#
|
1538
|
+
# _store_1904()
|
1539
|
+
#
|
1540
|
+
# Write Excel 1904 record to indicate the date system in use.
|
1541
|
+
#
|
1542
|
+
def store_1904
|
1543
|
+
record = 0x0022 # Record identifier
|
1544
|
+
length = 0x0002 # Bytes to follow
|
1545
|
+
|
1546
|
+
f1904 = @date_1904 ? 1 : 0 # Flag for 1904 date system
|
1547
|
+
|
1548
|
+
header = [record, length].pack("vv")
|
1549
|
+
data = [f1904].pack("v")
|
1550
|
+
|
1551
|
+
append(header, data)
|
1552
|
+
end
|
1553
|
+
|
1554
|
+
###############################################################################
|
1555
|
+
#
|
1556
|
+
# _store_supbook()
|
1557
|
+
#
|
1558
|
+
# Write BIFF record SUPBOOK to indicate that the workbook contains external
|
1559
|
+
# references, in our case, formula, print area and print title refs.
|
1560
|
+
#
|
1561
|
+
def store_supbook
|
1562
|
+
record = 0x01AE # Record identifier
|
1563
|
+
length = 0x0004 # Number of bytes to follow
|
1564
|
+
|
1565
|
+
ctabs = @worksheets.size # Number of worksheets
|
1566
|
+
stVirtPath = 0x0401 # Encoded workbook filename
|
1567
|
+
|
1568
|
+
header = [record, length].pack("vv")
|
1569
|
+
data = [ctabs, stVirtPath].pack("vv")
|
1570
|
+
|
1571
|
+
append(header, data)
|
1572
|
+
end
|
1573
|
+
|
1574
|
+
###############################################################################
|
1575
|
+
#
|
1576
|
+
# _store_externsheet()
|
1577
|
+
#
|
1578
|
+
# Writes the Excel BIFF EXTERNSHEET record. These references are used by
|
1579
|
+
# formulas. TODO NAME record is required to define the print area and the
|
1580
|
+
# repeat rows and columns.
|
1581
|
+
#
|
1582
|
+
def store_externsheet
|
1583
|
+
record = 0x0017 # Record identifier
|
1584
|
+
|
1585
|
+
# Get the external refs
|
1586
|
+
ext_refs = @ext_refs
|
1587
|
+
ext = ext_refs.keys.sort
|
1588
|
+
|
1589
|
+
# Change the external refs from stringified "1:1" to [1, 1]
|
1590
|
+
ext.map! {|e| e.split(/:/).map! {|e| e.to_i} }
|
1591
|
+
|
1592
|
+
cxti = ext.size # Number of Excel XTI structures
|
1593
|
+
rgxti = '' # Array of XTI structures
|
1594
|
+
|
1595
|
+
# Write the XTI structs
|
1596
|
+
ext.each do |e|
|
1597
|
+
rgxti = rgxti + [0, e[0], e[1]].pack("vvv")
|
1598
|
+
end
|
1599
|
+
|
1600
|
+
data = [cxti].pack("v") + rgxti
|
1601
|
+
header = [record, data.length].pack("vv")
|
1602
|
+
|
1603
|
+
append(header, data)
|
1604
|
+
end
|
1605
|
+
|
1606
|
+
###############################################################################
|
1607
|
+
#
|
1608
|
+
# _store_name_short()
|
1609
|
+
# index = shift # Sheet index
|
1610
|
+
# type = shift
|
1611
|
+
# ext_ref = shift # TODO
|
1612
|
+
# rowmin = $_[0] # Start row
|
1613
|
+
# rowmax = $_[1] # End row
|
1614
|
+
# colmin = $_[2] # Start column
|
1615
|
+
# colmax = $_[3] # end column
|
1616
|
+
# hidden = $_[4] # Name is hidden
|
1617
|
+
#
|
1618
|
+
#
|
1619
|
+
# Store the NAME record in the short format that is used for storing the print
|
1620
|
+
# area, repeat rows only and repeat columns only.
|
1621
|
+
#
|
1622
|
+
def store_name_short(index, type, ext_ref, rowmin, rowmax, colmin, colmax, hidden)
|
1623
|
+
record = 0x0018 # Record identifier
|
1624
|
+
length = 0x001b # Number of bytes to follow
|
1625
|
+
|
1626
|
+
grbit = 0x0020 # Option flags
|
1627
|
+
chKey = 0x00 # Keyboard shortcut
|
1628
|
+
cch = 0x01 # Length of text name
|
1629
|
+
cce = 0x000b # Length of text definition
|
1630
|
+
unknown01 = 0x0000 #
|
1631
|
+
ixals = index +1 # Sheet index
|
1632
|
+
unknown02 = 0x00 #
|
1633
|
+
cchCustMenu = 0x00 # Length of cust menu text
|
1634
|
+
cchDescription = 0x00 # Length of description text
|
1635
|
+
cchHelptopic = 0x00 # Length of help topic text
|
1636
|
+
cchStatustext = 0x00 # Length of status bar text
|
1637
|
+
rgch = type # Built-in name type
|
1638
|
+
unknown03 = 0x3b #
|
1639
|
+
|
1640
|
+
grbit = 0x0021 if hidden
|
1641
|
+
|
1642
|
+
header = [record, length].pack("vv")
|
1643
|
+
data = [grbit].pack("v")
|
1644
|
+
data = data + [chKey].pack("C")
|
1645
|
+
data = data + [cch].pack("C")
|
1646
|
+
data = data + [cce].pack("v")
|
1647
|
+
data = data + [unknown01].pack("v")
|
1648
|
+
data = data + [ixals].pack("v")
|
1649
|
+
data = data + [unknown02].pack("C")
|
1650
|
+
data = data + [cchCustMenu].pack("C")
|
1651
|
+
data = data + [cchDescription].pack("C")
|
1652
|
+
data = data + [cchHelptopic].pack("C")
|
1653
|
+
data = data + [cchStatustext].pack("C")
|
1654
|
+
data = data + [rgch].pack("C")
|
1655
|
+
data = data + [unknown03].pack("C")
|
1656
|
+
data = data + [ext_ref].pack("v")
|
1657
|
+
|
1658
|
+
data = data + [rowmin].pack("v")
|
1659
|
+
data = data + [rowmax].pack("v")
|
1660
|
+
data = data + [colmin].pack("v")
|
1661
|
+
data = data + [colmax].pack("v")
|
1662
|
+
|
1663
|
+
append(header, data)
|
1664
|
+
end
|
1665
|
+
|
1666
|
+
###############################################################################
|
1667
|
+
#
|
1668
|
+
# _store_name_long()
|
1669
|
+
# my $index = shift; # Sheet index
|
1670
|
+
# my $type = shift;
|
1671
|
+
# my $ext_ref = shift; # TODO
|
1672
|
+
# my $rowmin = $_[0]; # Start row
|
1673
|
+
# my $rowmax = $_[1]; # End row
|
1674
|
+
# my $colmin = $_[2]; # Start column
|
1675
|
+
# my $colmax = $_[3]; # end column
|
1676
|
+
#
|
1677
|
+
#
|
1678
|
+
# Store the NAME record in the long format that is used for storing the repeat
|
1679
|
+
# rows and columns when both are specified. This share a lot of code with
|
1680
|
+
# _store_name_short() but we use a separate method to keep the code clean.
|
1681
|
+
# Code abstraction for reuse can be carried too far, and I should know. ;-)
|
1682
|
+
#
|
1683
|
+
def store_name_long(index, type, ext_ref, rowmin, rowmax, colmin, colmax)
|
1684
|
+
record = 0x0018 # Record identifier
|
1685
|
+
length = 0x002a # Number of bytes to follow
|
1686
|
+
|
1687
|
+
index = shift # Sheet index
|
1688
|
+
type = shift
|
1689
|
+
ext_ref = shift # TODO
|
1690
|
+
|
1691
|
+
grbit = 0x0020 # Option flags
|
1692
|
+
chKey = 0x00 # Keyboard shortcut
|
1693
|
+
cch = 0x01 # Length of text name
|
1694
|
+
cce = 0x001a # Length of text definition
|
1695
|
+
unknown01 = 0x0000 #
|
1696
|
+
ixals = index +1 # Sheet index
|
1697
|
+
unknown02 = 0x00 #
|
1698
|
+
cchCustMenu = 0x00 # Length of cust menu text
|
1699
|
+
cchDescription = 0x00 # Length of description text
|
1700
|
+
cchHelptopic = 0x00 # Length of help topic text
|
1701
|
+
cchStatustext = 0x00 # Length of status bar text
|
1702
|
+
rgch = type # Built-in name type
|
1703
|
+
|
1704
|
+
unknown03 = 0x29
|
1705
|
+
unknown04 = 0x0017
|
1706
|
+
unknown05 = 0x3b
|
1707
|
+
|
1708
|
+
header = [record, length].pack("vv")
|
1709
|
+
data = [grbit].pack("v")
|
1710
|
+
data = data + [chKey].pack("C")
|
1711
|
+
data = data + [cch].pack("C")
|
1712
|
+
data = data + [cce].pack("v")
|
1713
|
+
data = data + [unknown01].pack("v")
|
1714
|
+
data = data + [ixals].pack("v")
|
1715
|
+
data = data + [unknown02].pack("C")
|
1716
|
+
data = data + [cchCustMenu].pack("C")
|
1717
|
+
data = data + [cchDescription].pack("C")
|
1718
|
+
data = data + [cchHelptopic].pack("C")
|
1719
|
+
data = data + [cchStatustext].pack("C")
|
1720
|
+
data = data + [rgch].pack("C")
|
1721
|
+
|
1722
|
+
# Column definition
|
1723
|
+
data = data + [unknown03].pack("C")
|
1724
|
+
data = data + [unknown04].pack("v")
|
1725
|
+
data = data + [unknown05].pack("C")
|
1726
|
+
data = data + [ext_ref].pack("v")
|
1727
|
+
data = data + [0x0000].pack("v")
|
1728
|
+
data = data + [0xffff].pack("v")
|
1729
|
+
data = data + [colmin].pack("v")
|
1730
|
+
data = data + [colmax].pack("v")
|
1731
|
+
|
1732
|
+
# Row definition
|
1733
|
+
data = data + [unknown05].pack("C")
|
1734
|
+
data = data + [ext_ref].pack("v")
|
1735
|
+
data = data + [rowmin].pack("v")
|
1736
|
+
data = data + [rowmax].pack("v")
|
1737
|
+
data = data + [0x00].pack("v")
|
1738
|
+
data = data + [0xff].pack("v")
|
1739
|
+
# End of data
|
1740
|
+
data = data + [0x10].pack("C")
|
1741
|
+
|
1742
|
+
append(header, data)
|
1743
|
+
end
|
1744
|
+
|
1745
|
+
###############################################################################
|
1746
|
+
#
|
1747
|
+
# _store_palette()
|
1748
|
+
#
|
1749
|
+
# Stores the PALETTE biff record.
|
1750
|
+
#
|
1751
|
+
def store_palette
|
1752
|
+
record = 0x0092 # Record identifier
|
1753
|
+
length = 2 + 4 * @palette.size # Number of bytes to follow
|
1754
|
+
ccv = @palette.size # Number of RGB values to follow
|
1755
|
+
data = '' # The RGB data
|
1756
|
+
|
1757
|
+
# Pack the RGB data
|
1758
|
+
@palette.each do |p|
|
1759
|
+
data = data + p.pack('CCCC')
|
1760
|
+
end
|
1761
|
+
|
1762
|
+
header = [record, length, ccv].pack("vvv")
|
1763
|
+
|
1764
|
+
append(header, data)
|
1765
|
+
end
|
1766
|
+
|
1767
|
+
###############################################################################
|
1768
|
+
#
|
1769
|
+
# _store_codepage()
|
1770
|
+
#
|
1771
|
+
# Stores the CODEPAGE biff record.
|
1772
|
+
#
|
1773
|
+
def store_codepage
|
1774
|
+
record = 0x0042 # Record identifier
|
1775
|
+
length = 0x0002 # Number of bytes to follow
|
1776
|
+
cv = @codepage # The code page
|
1777
|
+
|
1778
|
+
header = [record, length].pack("vv")
|
1779
|
+
data = [cv].pack("v")
|
1780
|
+
|
1781
|
+
append(header, data)
|
1782
|
+
end
|
1783
|
+
|
1784
|
+
###############################################################################
|
1785
|
+
#
|
1786
|
+
# _store_country()
|
1787
|
+
#
|
1788
|
+
# Stores the COUNTRY biff record.
|
1789
|
+
#
|
1790
|
+
# Will add setter method for the country codes when/if required.
|
1791
|
+
#
|
1792
|
+
def store_country
|
1793
|
+
record = 0x008C # Record identifier
|
1794
|
+
length = 0x0004 # Number of bytes to follow
|
1795
|
+
country_default = 1
|
1796
|
+
country_win_ini = 1
|
1797
|
+
|
1798
|
+
header = [record, length].pack("vv")
|
1799
|
+
data = [country_default, country_win_ini].pack("vv")
|
1800
|
+
append(header, data)
|
1801
|
+
end
|
1802
|
+
|
1803
|
+
###############################################################################
|
1804
|
+
#
|
1805
|
+
# _store_hideobj()
|
1806
|
+
#
|
1807
|
+
# Stores the HIDEOBJ biff record.
|
1808
|
+
#
|
1809
|
+
def store_hideobj
|
1810
|
+
record = 0x008D # Record identifier
|
1811
|
+
length = 0x0002 # Number of bytes to follow
|
1812
|
+
hide = @hideobj # Option to hide objects
|
1813
|
+
|
1814
|
+
header = [record, length].pack("vv")
|
1815
|
+
data = [hide].pack("v")
|
1816
|
+
|
1817
|
+
append(header, data)
|
1818
|
+
end
|
1819
|
+
|
1820
|
+
###############################################################################
|
1821
|
+
###############################################################################
|
1822
|
+
###############################################################################
|
1823
|
+
|
1824
|
+
|
1825
|
+
|
1826
|
+
###############################################################################
|
1827
|
+
#
|
1828
|
+
# _calculate_extern_sizes()
|
1829
|
+
#
|
1830
|
+
# We need to calculate the space required by the SUPBOOK, EXTERNSHEET and NAME
|
1831
|
+
# records so that it can be added to the BOUNDSHEET offsets.
|
1832
|
+
#
|
1833
|
+
def calculate_extern_sizes
|
1834
|
+
ext_refs = @parser.get_ext_sheets
|
1835
|
+
ext_ref_count = ext_refs.keys.size
|
1836
|
+
length = 0
|
1837
|
+
index = 0
|
1838
|
+
|
1839
|
+
@worksheets.each do |worksheet|
|
1840
|
+
|
1841
|
+
rowmin = worksheet.title_rowmin
|
1842
|
+
colmin = worksheet.title_colmin
|
1843
|
+
filter = worksheet.filter_count
|
1844
|
+
key = "#{index}:#{index}"
|
1845
|
+
index += 1
|
1846
|
+
|
1847
|
+
# Add area NAME records
|
1848
|
+
#
|
1849
|
+
if !worksheet.print_rowmin.nil? && worksheet.print_rowmin != 0
|
1850
|
+
if ext_ref[key].nil?
|
1851
|
+
ext_refs[key] = ext_ref_count
|
1852
|
+
ext_ref_count += 1
|
1853
|
+
end
|
1854
|
+
length += 31
|
1855
|
+
end
|
1856
|
+
|
1857
|
+
# Add title NAME records
|
1858
|
+
#
|
1859
|
+
if rowmin and colmin
|
1860
|
+
if ext_ref[key].nil?
|
1861
|
+
ext_refs[key] = ext_ref_count
|
1862
|
+
ext_ref_count += 1
|
1863
|
+
end
|
1864
|
+
|
1865
|
+
length += 46
|
1866
|
+
elsif rowmin or colmin
|
1867
|
+
if ext_ref[key].nil?
|
1868
|
+
ext_refs[key] = ext_ref_count
|
1869
|
+
ext_ref_count += 1
|
1870
|
+
end
|
1871
|
+
length += 31
|
1872
|
+
else
|
1873
|
+
# TODO, may need this later.
|
1874
|
+
end
|
1875
|
+
|
1876
|
+
# Add Autofilter NAME records
|
1877
|
+
#
|
1878
|
+
if filter != 0
|
1879
|
+
if ext_refs[key].nil?
|
1880
|
+
ext_refs[key] = ext_ref_count
|
1881
|
+
ext_ref_count += 1
|
1882
|
+
end
|
1883
|
+
length += 31
|
1884
|
+
end
|
1885
|
+
end
|
1886
|
+
|
1887
|
+
# Update the ref counts.
|
1888
|
+
@ext_ref_count = ext_ref_count
|
1889
|
+
@ext_refs = ext_refs
|
1890
|
+
|
1891
|
+
# If there are no external refs then we don't write, SUPBOOK, EXTERNSHEET
|
1892
|
+
# and NAME. Therefore the length is 0.
|
1893
|
+
|
1894
|
+
return length = 0 if ext_ref_count == 0
|
1895
|
+
|
1896
|
+
# The SUPBOOK record is 8 bytes
|
1897
|
+
length += 8
|
1898
|
+
|
1899
|
+
# The EXTERNSHEET record is 6 bytes + 6 bytes for each external ref
|
1900
|
+
length += 6 * (1 + ext_ref_count)
|
1901
|
+
|
1902
|
+
return length
|
1903
|
+
end
|
1904
|
+
|
1905
|
+
###############################################################################
|
1906
|
+
#
|
1907
|
+
# _calculate_shared_string_sizes()
|
1908
|
+
#
|
1909
|
+
# Handling of the SST continue blocks is complicated by the need to include an
|
1910
|
+
# additional continuation byte depending on whether the string is split between
|
1911
|
+
# blocks or whether it starts at the beginning of the block. (There are also
|
1912
|
+
# additional complications that will arise later when/if Rich Strings are
|
1913
|
+
# supported). As such we cannot use the simple CONTINUE mechanism provided by
|
1914
|
+
# the _add_continue() method in BIFFwriter.pm. Thus we have to make two passes
|
1915
|
+
# through the strings data. The first is to calculate the required block sizes
|
1916
|
+
# and the second, in _store_shared_strings(), is to write the actual strings.
|
1917
|
+
# The first pass through the data is also used to calculate the size of the SST
|
1918
|
+
# and CONTINUE records for use in setting the BOUNDSHEET record offsets. The
|
1919
|
+
# downside of this is that the same algorithm repeated in _store_shared_strings.
|
1920
|
+
#
|
1921
|
+
def calculate_shared_string_sizes
|
1922
|
+
strings = Array.new(@str_unique)
|
1923
|
+
|
1924
|
+
@str_table.each_key do |key|
|
1925
|
+
strings[@str_table[key]] = key
|
1926
|
+
end
|
1927
|
+
# The SST data could be very large, free some memory (maybe).
|
1928
|
+
@str_table = nil
|
1929
|
+
@str_array = strings
|
1930
|
+
|
1931
|
+
# Iterate through the strings to calculate the CONTINUE block sizes.
|
1932
|
+
#
|
1933
|
+
# The SST blocks requires a specialised CONTINUE block, so we have to
|
1934
|
+
# ensure that the maximum data block size is less than the limit used by
|
1935
|
+
# _add_continue() in BIFFwriter.pm. For simplicity we use the same size
|
1936
|
+
# for the SST and CONTINUE records:
|
1937
|
+
# 8228 : Maximum Excel97 block size
|
1938
|
+
# -4 : Length of block header
|
1939
|
+
# -8 : Length of additional SST header information
|
1940
|
+
# -8 : Arbitrary number to keep within _add_continue() limit
|
1941
|
+
# = 8208
|
1942
|
+
#
|
1943
|
+
continue_limit = 8208
|
1944
|
+
block_length = 0
|
1945
|
+
written = 0
|
1946
|
+
block_sizes = []
|
1947
|
+
continue = 0
|
1948
|
+
|
1949
|
+
strings.each do |string|
|
1950
|
+
|
1951
|
+
string_length = string.length
|
1952
|
+
encoding = string.unpack("xx C")[0]
|
1953
|
+
split_string = 0
|
1954
|
+
|
1955
|
+
# Block length is the total length of the strings that will be
|
1956
|
+
# written out in a single SST or CONTINUE block.
|
1957
|
+
#
|
1958
|
+
block_length += string_length
|
1959
|
+
|
1960
|
+
# We can write the string if it doesn't cross a CONTINUE boundary
|
1961
|
+
if block_length < continue_limit
|
1962
|
+
written += string_length
|
1963
|
+
next
|
1964
|
+
end
|
1965
|
+
|
1966
|
+
|
1967
|
+
# Deal with the cases where the next string to be written will exceed
|
1968
|
+
# the CONTINUE boundary. If the string is very long it may need to be
|
1969
|
+
# written in more than one CONTINUE record.
|
1970
|
+
#
|
1971
|
+
while block_length >= continue_limit
|
1972
|
+
|
1973
|
+
# We need to avoid the case where a string is continued in the first
|
1974
|
+
# n bytes that contain the string header information.
|
1975
|
+
#
|
1976
|
+
header_length = 3 # Min string + header size -1
|
1977
|
+
space_remaining = continue_limit -written -continue
|
1978
|
+
|
1979
|
+
|
1980
|
+
# Unicode data should only be split on char (2 byte) boundaries.
|
1981
|
+
# Therefore, in some cases we need to reduce the amount of available
|
1982
|
+
# space by 1 byte to ensure the correct alignment.
|
1983
|
+
align = 0
|
1984
|
+
|
1985
|
+
# Only applies to Unicode strings
|
1986
|
+
if encoding == 1
|
1987
|
+
# Min string + header size -1
|
1988
|
+
header_length = 4
|
1989
|
+
|
1990
|
+
if space_remaining > header_length
|
1991
|
+
# String contains 3 byte header => split on odd boundary
|
1992
|
+
if split_string == 0 and space_remaining % 2 != 1
|
1993
|
+
space_remaining -= 1
|
1994
|
+
align = 1
|
1995
|
+
# Split section without header => split on even boundary
|
1996
|
+
elsif split_string != 0 and space_remaining % 2 == 1
|
1997
|
+
space_remaining -= 1
|
1998
|
+
align = 1
|
1999
|
+
end
|
2000
|
+
|
2001
|
+
split_string = 1
|
2002
|
+
end
|
2003
|
+
end
|
2004
|
+
|
2005
|
+
if space_remaining > header_length
|
2006
|
+
# Write as much as possible of the string in the current block
|
2007
|
+
written += space_remaining
|
2008
|
+
|
2009
|
+
# Reduce the current block length by the amount written
|
2010
|
+
block_length -= continue_limit -continue -align
|
2011
|
+
|
2012
|
+
# Store the max size for this block
|
2013
|
+
block_sizes.push(continue_limit -align)
|
2014
|
+
|
2015
|
+
# If the current string was split then the next CONTINUE block
|
2016
|
+
# should have the string continue flag (grbit) set unless the
|
2017
|
+
# split string fits exactly into the remaining space.
|
2018
|
+
#
|
2019
|
+
if block_length > 0
|
2020
|
+
continue = 1
|
2021
|
+
else
|
2022
|
+
continue = 0
|
2023
|
+
end
|
2024
|
+
else
|
2025
|
+
# Store the max size for this block
|
2026
|
+
block_sizes.push(written +continue)
|
2027
|
+
|
2028
|
+
# Not enough space to start the string in the current block
|
2029
|
+
block_length -= continue_limit -space_remaining -continue
|
2030
|
+
continue = 0
|
2031
|
+
end
|
2032
|
+
|
2033
|
+
# If the string (or substr) is small enough we can write it in the
|
2034
|
+
# new CONTINUE block. Else, go through the loop again to write it in
|
2035
|
+
# one or more CONTINUE blocks
|
2036
|
+
#
|
2037
|
+
if block_length < continue_limit
|
2038
|
+
written = block_length
|
2039
|
+
else
|
2040
|
+
written = 0
|
2041
|
+
end
|
2042
|
+
end
|
2043
|
+
end
|
2044
|
+
|
2045
|
+
# Store the max size for the last block unless it is empty
|
2046
|
+
block_sizes.push(written +continue) if written +continue != 0
|
2047
|
+
|
2048
|
+
@str_block_sizes = block_sizes.dup
|
2049
|
+
|
2050
|
+
# Calculate the total length of the SST and associated CONTINUEs (if any).
|
2051
|
+
# The SST record will have a length even if it contains no strings.
|
2052
|
+
# This length is required to set the offsets in the BOUNDSHEET records since
|
2053
|
+
# they must be written before the SST records
|
2054
|
+
#
|
2055
|
+
length = 12
|
2056
|
+
length += block_sizes.shift unless block_sizes.empty? # SST
|
2057
|
+
while !block_sizes.empty? do
|
2058
|
+
length += 4 + block_sizes.shift # CONTINUEs
|
2059
|
+
end
|
2060
|
+
|
2061
|
+
return length
|
2062
|
+
end
|
2063
|
+
|
2064
|
+
###############################################################################
|
2065
|
+
#
|
2066
|
+
# _store_shared_strings()
|
2067
|
+
#
|
2068
|
+
# Write all of the workbooks strings into an indexed array.
|
2069
|
+
#
|
2070
|
+
# See the comments in _calculate_shared_string_sizes() for more information.
|
2071
|
+
#
|
2072
|
+
# We also use this routine to record the offsets required by the EXTSST table.
|
2073
|
+
# In order to do this we first identify the first string in an EXTSST bucket
|
2074
|
+
# and then store its global and local offset within the SST table. The offset
|
2075
|
+
# occurs wherever the start of the bucket string is written out via append().
|
2076
|
+
#
|
2077
|
+
def store_shared_strings
|
2078
|
+
strings = @str_array
|
2079
|
+
|
2080
|
+
record = 0x00FC # Record identifier
|
2081
|
+
length = 0x0008 # Number of bytes to follow
|
2082
|
+
total = 0x0000
|
2083
|
+
|
2084
|
+
# Iterate through the strings to calculate the CONTINUE block sizes
|
2085
|
+
continue_limit = 8208
|
2086
|
+
block_length = 0
|
2087
|
+
written = 0
|
2088
|
+
continue = 0
|
2089
|
+
|
2090
|
+
# The SST and CONTINUE block sizes have been pre-calculated by
|
2091
|
+
# _calculate_shared_string_sizes()
|
2092
|
+
block_sizes = @str_block_sizes
|
2093
|
+
|
2094
|
+
# The SST record is required even if it contains no strings. Thus we will
|
2095
|
+
# always have a length
|
2096
|
+
#
|
2097
|
+
if block_sizes.size != 0
|
2098
|
+
length = 8 + block_sizes.shift
|
2099
|
+
else
|
2100
|
+
# No strings
|
2101
|
+
length = 8
|
2102
|
+
end
|
2103
|
+
|
2104
|
+
# Initialise variables used to track EXTSST bucket offsets.
|
2105
|
+
extsst_str_num = -1
|
2106
|
+
sst_block_start = @datasize
|
2107
|
+
|
2108
|
+
# Write the SST block header information
|
2109
|
+
header = [record, length].pack("vv")
|
2110
|
+
data = [@str_total, @str_unique].pack("VV")
|
2111
|
+
append(header, data)
|
2112
|
+
|
2113
|
+
# Iterate through the strings and write them out
|
2114
|
+
return if strings.empty?
|
2115
|
+
strings.each do |string|
|
2116
|
+
|
2117
|
+
string_length = string.length
|
2118
|
+
encoding = string.unpack("xx C")[0]
|
2119
|
+
split_string = 0
|
2120
|
+
bucket_string = 0 # Used to track EXTSST bucket offsets.
|
2121
|
+
|
2122
|
+
# Check if the string is at the start of a EXTSST bucket.
|
2123
|
+
extsst_str_num += 1
|
2124
|
+
if extsst_str_num % @extsst_bucket_size == 0
|
2125
|
+
bucket_string = 1
|
2126
|
+
end
|
2127
|
+
|
2128
|
+
# Block length is the total length of the strings that will be
|
2129
|
+
# written out in a single SST or CONTINUE block.
|
2130
|
+
#
|
2131
|
+
block_length += string_length
|
2132
|
+
|
2133
|
+
# We can write the string if it doesn't cross a CONTINUE boundary
|
2134
|
+
if block_length < continue_limit
|
2135
|
+
|
2136
|
+
# Store location of EXTSST bucket string.
|
2137
|
+
if bucket_string != 0
|
2138
|
+
global_offset = @datasize
|
2139
|
+
local_offset = @datasize - sst_block_start
|
2140
|
+
|
2141
|
+
@extsst_offsets.push([global_offset, local_offset])
|
2142
|
+
bucket_string = 0
|
2143
|
+
end
|
2144
|
+
|
2145
|
+
append(string)
|
2146
|
+
written += string_length
|
2147
|
+
next
|
2148
|
+
end
|
2149
|
+
|
2150
|
+
# Deal with the cases where the next string to be written will exceed
|
2151
|
+
# the CONTINUE boundary. If the string is very long it may need to be
|
2152
|
+
# written in more than one CONTINUE record.
|
2153
|
+
#
|
2154
|
+
while block_length >= continue_limit
|
2155
|
+
|
2156
|
+
# We need to avoid the case where a string is continued in the first
|
2157
|
+
# n bytes that contain the string header information.
|
2158
|
+
#
|
2159
|
+
header_length = 3 # Min string + header size -1
|
2160
|
+
space_remaining = continue_limit -written -continue
|
2161
|
+
|
2162
|
+
|
2163
|
+
# Unicode data should only be split on char (2 byte) boundaries.
|
2164
|
+
# Therefore, in some cases we need to reduce the amount of available
|
2165
|
+
# space by 1 byte to ensure the correct alignment.
|
2166
|
+
align = 0
|
2167
|
+
|
2168
|
+
# Only applies to Unicode strings
|
2169
|
+
if encoding == 1
|
2170
|
+
# Min string + header size -1
|
2171
|
+
header_length = 4
|
2172
|
+
|
2173
|
+
if space_remaining > header_length
|
2174
|
+
# String contains 3 byte header => split on odd boundary
|
2175
|
+
if split_string == 0 and space_remaining % 2 != 1
|
2176
|
+
space_remaining -= 1
|
2177
|
+
align = 1
|
2178
|
+
# Split section without header => split on even boundary
|
2179
|
+
elsif split_string != 0 and space_remaining % 2 == 1
|
2180
|
+
space_remaining -= 1
|
2181
|
+
align = 1
|
2182
|
+
end
|
2183
|
+
|
2184
|
+
split_string = 1
|
2185
|
+
end
|
2186
|
+
end
|
2187
|
+
|
2188
|
+
if space_remaining > header_length
|
2189
|
+
# Write as much as possible of the string in the current block
|
2190
|
+
tmp = string[0, space_remaining]
|
2191
|
+
|
2192
|
+
# Store location of EXTSST bucket string.
|
2193
|
+
if bucket_string != 0
|
2194
|
+
global_offset = @datasize
|
2195
|
+
local_offset = @datasize - sst_block_start
|
2196
|
+
|
2197
|
+
@extsst_offsets.push([global_offset, local_offset])
|
2198
|
+
bucket_string = 0
|
2199
|
+
end
|
2200
|
+
|
2201
|
+
append(tmp)
|
2202
|
+
|
2203
|
+
|
2204
|
+
# The remainder will be written in the next block(s)
|
2205
|
+
string = string[space_remaining .. string.length-1]
|
2206
|
+
|
2207
|
+
# Reduce the current block length by the amount written
|
2208
|
+
block_length -= continue_limit -continue -align
|
2209
|
+
|
2210
|
+
# If the current string was split then the next CONTINUE block
|
2211
|
+
# should have the string continue flag (grbit) set unless the
|
2212
|
+
# split string fits exactly into the remaining space.
|
2213
|
+
#
|
2214
|
+
if block_length > 0
|
2215
|
+
continue = 1
|
2216
|
+
else
|
2217
|
+
continue = 0
|
2218
|
+
end
|
2219
|
+
else
|
2220
|
+
# Not enough space to start the string in the current block
|
2221
|
+
block_length -= continue_limit -space_remaining -continue
|
2222
|
+
continue = 0
|
2223
|
+
end
|
2224
|
+
|
2225
|
+
# Write the CONTINUE block header
|
2226
|
+
if block_sizes.size != 0
|
2227
|
+
sst_block_start= @datasize # Reset EXTSST offset.
|
2228
|
+
|
2229
|
+
record = 0x003C
|
2230
|
+
length = block_sizes.shift
|
2231
|
+
|
2232
|
+
header = [record, length].pack("vv")
|
2233
|
+
header = header + [encoding].pack("C") if continue != 0
|
2234
|
+
|
2235
|
+
append(header)
|
2236
|
+
end
|
2237
|
+
|
2238
|
+
# If the string (or substr) is small enough we can write it in the
|
2239
|
+
# new CONTINUE block. Else, go through the loop again to write it in
|
2240
|
+
# one or more CONTINUE blocks
|
2241
|
+
#
|
2242
|
+
if block_length < continue_limit
|
2243
|
+
|
2244
|
+
# Store location of EXTSST bucket string.
|
2245
|
+
if bucket_string != 0
|
2246
|
+
global_offset = @datasize
|
2247
|
+
local_offset = @datasize - sst_block_start
|
2248
|
+
|
2249
|
+
@extsst_offsets.push([global_offset, local_offset])
|
2250
|
+
|
2251
|
+
bucket_string = 0
|
2252
|
+
end
|
2253
|
+
append(string)
|
2254
|
+
|
2255
|
+
written = block_length
|
2256
|
+
else
|
2257
|
+
written = 0
|
2258
|
+
end
|
2259
|
+
end
|
2260
|
+
end
|
2261
|
+
end
|
2262
|
+
|
2263
|
+
###############################################################################
|
2264
|
+
#
|
2265
|
+
# _calculate_extsst_size
|
2266
|
+
#
|
2267
|
+
# The number of buckets used in the EXTSST is between 0 and 128. The number of
|
2268
|
+
# strings per bucket (bucket size) has a minimum value of 8 and a theoretical
|
2269
|
+
# maximum of 2^16. For "number of strings" < 1024 there is a constant bucket
|
2270
|
+
# size of 8. The following algorithm generates the same size/bucket ratio
|
2271
|
+
# as Excel.
|
2272
|
+
#
|
2273
|
+
def calculate_extsst_size
|
2274
|
+
unique_strings = @str_unique
|
2275
|
+
|
2276
|
+
if unique_strings < 1024
|
2277
|
+
bucket_size = 8
|
2278
|
+
else
|
2279
|
+
bucket_size = 1 + Integer(unique_strings / 128.0)
|
2280
|
+
end
|
2281
|
+
|
2282
|
+
buckets = Integer((unique_strings + bucket_size -1) / Float(bucket_size))
|
2283
|
+
|
2284
|
+
@extsst_buckets = buckets
|
2285
|
+
@extsst_bucket_size = bucket_size
|
2286
|
+
|
2287
|
+
return 6 + 8 * buckets
|
2288
|
+
end
|
2289
|
+
|
2290
|
+
###############################################################################
|
2291
|
+
#
|
2292
|
+
# _store_extsst
|
2293
|
+
#
|
2294
|
+
# Write EXTSST table using the offsets calculated in _store_shared_strings().
|
2295
|
+
#
|
2296
|
+
def store_extsst
|
2297
|
+
offsets = @extsst_offsets
|
2298
|
+
bucket_size = @extsst_bucket_size
|
2299
|
+
|
2300
|
+
record = 0x00FF # Record identifier
|
2301
|
+
length = 2 + 8 * offsets.size # Bytes to follow
|
2302
|
+
|
2303
|
+
header = [record, length].pack('vv')
|
2304
|
+
data = [bucket_size].pack('v')
|
2305
|
+
|
2306
|
+
offsets.each do |offset|
|
2307
|
+
data = data + [offset[0], offset[1], 0].pack('Vvv')
|
2308
|
+
end
|
2309
|
+
|
2310
|
+
append(header, data)
|
2311
|
+
|
2312
|
+
end
|
2313
|
+
|
2314
|
+
#
|
2315
|
+
# Methods related to comments and MSO objects.
|
2316
|
+
#
|
2317
|
+
|
2318
|
+
###############################################################################
|
2319
|
+
#
|
2320
|
+
# _add_mso_drawing_group()
|
2321
|
+
#
|
2322
|
+
# Write the MSODRAWINGGROUP record that keeps track of the Escher drawing
|
2323
|
+
# objects in the file such as images, comments and filters.
|
2324
|
+
#
|
2325
|
+
def add_mso_drawing_group #:nodoc:
|
2326
|
+
return unless @mso_size != 0
|
2327
|
+
|
2328
|
+
record = 0x00EB # Record identifier
|
2329
|
+
length = 0x0000 # Number of bytes to follow
|
2330
|
+
|
2331
|
+
data = store_mso_dgg_container
|
2332
|
+
data = data + store_mso_dgg(*@mso_clusters)
|
2333
|
+
data = data + store_mso_bstore_container
|
2334
|
+
@images_data.each do |image|
|
2335
|
+
data = data + store_mso_images(*image)
|
2336
|
+
end
|
2337
|
+
data = data + store_mso_opt
|
2338
|
+
data = data + store_mso_split_menu_colors
|
2339
|
+
|
2340
|
+
length = data.length
|
2341
|
+
header = [record, length].pack("vv")
|
2342
|
+
|
2343
|
+
add_mso_drawing_group_continue(header + data)
|
2344
|
+
|
2345
|
+
return header + data # For testing only.
|
2346
|
+
end
|
2347
|
+
|
2348
|
+
###############################################################################
|
2349
|
+
#
|
2350
|
+
# _add_mso_drawing_group_continue()
|
2351
|
+
#
|
2352
|
+
# See first the Spreadsheet::WriteExcel::BIFFwriter::_add_continue() method.
|
2353
|
+
#
|
2354
|
+
# Add specialised CONTINUE headers to large MSODRAWINGGROUP data block.
|
2355
|
+
# We use the Excel 97 max block size of 8228 - 4 bytes for the header = 8224.
|
2356
|
+
#
|
2357
|
+
# The structure depends on the size of the data block:
|
2358
|
+
#
|
2359
|
+
# Case 1: <= 8224 bytes 1 MSODRAWINGGROUP
|
2360
|
+
# Case 2: <= 2*8224 bytes 1 MSODRAWINGGROUP + 1 CONTINUE
|
2361
|
+
# Case 3: > 2*8224 bytes 2 MSODRAWINGGROUP + n CONTINUE
|
2362
|
+
#
|
2363
|
+
def add_mso_drawing_group_continue(data)
|
2364
|
+
limit = 8228 -4
|
2365
|
+
mso_group = 0x00EB # Record identifier
|
2366
|
+
continue = 0x003C # Record identifier
|
2367
|
+
block_count = 1
|
2368
|
+
|
2369
|
+
# Ignore the base class _add_continue() method.
|
2370
|
+
@ignore_continue = 1
|
2371
|
+
|
2372
|
+
# Case 1 above. Just return the data as it is.
|
2373
|
+
if data.length <= limit
|
2374
|
+
append(data)
|
2375
|
+
return
|
2376
|
+
end
|
2377
|
+
|
2378
|
+
# Change length field of the first MSODRAWINGGROUP block. Case 2 and 3.
|
2379
|
+
tmp = data.dup
|
2380
|
+
tmp[0, limit + 4] = ""
|
2381
|
+
tmp[2, 2] = [limit].pack('v')
|
2382
|
+
append(tmp)
|
2383
|
+
|
2384
|
+
# Add MSODRAWINGGROUP and CONTINUE blocks for Case 3 above.
|
2385
|
+
while data.length > limit
|
2386
|
+
if block_count == 1
|
2387
|
+
# Add extra MSODRAWINGGROUP block header.
|
2388
|
+
header = [mso_group, limit].pack("vv")
|
2389
|
+
block_count += 1
|
2390
|
+
else
|
2391
|
+
# Add normal CONTINUE header.
|
2392
|
+
header = [continue, limit].pack("vv")
|
2393
|
+
end
|
2394
|
+
|
2395
|
+
tmp = data.dup
|
2396
|
+
tmp[0, limit] = ''
|
2397
|
+
append(header, tmp)
|
2398
|
+
end
|
2399
|
+
|
2400
|
+
# Last CONTINUE block for remaining data. Case 2 and 3 above.
|
2401
|
+
header = [continue, data.length].pack("vv")
|
2402
|
+
append(header, data)
|
2403
|
+
|
2404
|
+
# Turn the base class _add_continue() method back on.
|
2405
|
+
@ignore_continue = 0
|
2406
|
+
end
|
2407
|
+
|
2408
|
+
###############################################################################
|
2409
|
+
#
|
2410
|
+
# _store_mso_dgg_container()
|
2411
|
+
#
|
2412
|
+
# Write the Escher DggContainer record that is part of MSODRAWINGGROUP.
|
2413
|
+
#
|
2414
|
+
def store_mso_dgg_container
|
2415
|
+
type = 0xF000
|
2416
|
+
version = 15
|
2417
|
+
instance = 0
|
2418
|
+
data = ''
|
2419
|
+
length = @mso_size -12 # -4 (biff header) -8 (for this).
|
2420
|
+
|
2421
|
+
return add_mso_generic(type, version, instance, data, length)
|
2422
|
+
end
|
2423
|
+
|
2424
|
+
|
2425
|
+
###############################################################################
|
2426
|
+
#
|
2427
|
+
# _store_mso_dgg()
|
2428
|
+
# my $max_spid = $_[0];
|
2429
|
+
# my $num_clusters = $_[1];
|
2430
|
+
# my $shapes_saved = $_[2];
|
2431
|
+
# my $drawings_saved = $_[3];
|
2432
|
+
# my $clusters = $_[4];
|
2433
|
+
#
|
2434
|
+
# Write the Escher Dgg record that is part of MSODRAWINGGROUP.
|
2435
|
+
#
|
2436
|
+
def store_mso_dgg(max_spid, num_clusters, shapes_saved, drawings_saved, clusters)
|
2437
|
+
type = 0xF006
|
2438
|
+
version = 0
|
2439
|
+
instance = 0
|
2440
|
+
data = ''
|
2441
|
+
length = nil # Calculate automatically.
|
2442
|
+
|
2443
|
+
data = [max_spid, num_clusters,
|
2444
|
+
shapes_saved, drawings_saved].pack("VVVV")
|
2445
|
+
|
2446
|
+
clusters.each do |aref|
|
2447
|
+
drawing_id = aref[0]
|
2448
|
+
shape_ids_used = aref[1]
|
2449
|
+
|
2450
|
+
data = data + [drawing_id, shape_ids_used].pack("VV")
|
2451
|
+
end
|
2452
|
+
|
2453
|
+
return add_mso_generic(type, version, instance, data, length)
|
2454
|
+
end
|
2455
|
+
|
2456
|
+
###############################################################################
|
2457
|
+
#
|
2458
|
+
# _store_mso_bstore_container()
|
2459
|
+
#
|
2460
|
+
# Write the Escher BstoreContainer record that is part of MSODRAWINGGROUP.
|
2461
|
+
#
|
2462
|
+
def store_mso_bstore_container
|
2463
|
+
return '' if @images_size == 0
|
2464
|
+
|
2465
|
+
type = 0xF001
|
2466
|
+
version = 15
|
2467
|
+
instance = @images_data.size # Number of images.
|
2468
|
+
data = ''
|
2469
|
+
length = @images_size +8 *instance
|
2470
|
+
|
2471
|
+
return add_mso_generic(type, version, instance, data, length)
|
2472
|
+
end
|
2473
|
+
|
2474
|
+
###############################################################################
|
2475
|
+
#
|
2476
|
+
# _store_mso_images()
|
2477
|
+
# ref_count = $_[0]
|
2478
|
+
# image_type = $_[1]
|
2479
|
+
# image = $_[2]
|
2480
|
+
# size = $_[3]
|
2481
|
+
# checksum1 = $_[4]
|
2482
|
+
# checksum2 = $_[5]
|
2483
|
+
#
|
2484
|
+
# Write the Escher BstoreContainer record that is part of MSODRAWINGGROUP.
|
2485
|
+
#
|
2486
|
+
def store_mso_images(ref_count, image_type, image, size, checksum1, checksum2)
|
2487
|
+
blip_store_entry = store_mso_blip_store_entry(
|
2488
|
+
ref_count,
|
2489
|
+
image_type,
|
2490
|
+
size,
|
2491
|
+
checksum1
|
2492
|
+
)
|
2493
|
+
|
2494
|
+
blip = store_mso_blip(
|
2495
|
+
image_type,
|
2496
|
+
image,
|
2497
|
+
size,
|
2498
|
+
checksum1,
|
2499
|
+
checksum2
|
2500
|
+
)
|
2501
|
+
|
2502
|
+
return blip_store_entry + blip
|
2503
|
+
end
|
2504
|
+
|
2505
|
+
###############################################################################
|
2506
|
+
#
|
2507
|
+
# _store_mso_blip_store_entry()
|
2508
|
+
# ref_count = $_[0]
|
2509
|
+
# image_type = $_[1]
|
2510
|
+
# size = $_[2]
|
2511
|
+
# checksum1 = $_[3]
|
2512
|
+
#
|
2513
|
+
# Write the Escher BlipStoreEntry record that is part of MSODRAWINGGROUP.
|
2514
|
+
#
|
2515
|
+
def store_mso_blip_store_entry(ref_count, image_type, size, checksum1)
|
2516
|
+
type = 0xF007
|
2517
|
+
version = 2
|
2518
|
+
instance = image_type
|
2519
|
+
length = size +61
|
2520
|
+
data = [image_type].pack('C') + # Win32
|
2521
|
+
[image_type].pack('C') + # Mac
|
2522
|
+
[checksum1].pack('H*') + # Uid checksum
|
2523
|
+
[0xFF].pack('v') + # Tag
|
2524
|
+
[size +25].pack('V') + # Next Blip size
|
2525
|
+
[ref_count].pack('V') + # Image ref count
|
2526
|
+
[0x00000000].pack('V') + # File offset
|
2527
|
+
[0x00].pack('C') + # Usage
|
2528
|
+
[0x00].pack('C') + # Name length
|
2529
|
+
[0x00].pack('C') + # Unused
|
2530
|
+
[0x00].pack('C') # Unused
|
2531
|
+
|
2532
|
+
return add_mso_generic(type, version, instance, data, length)
|
2533
|
+
end
|
2534
|
+
|
2535
|
+
###############################################################################
|
2536
|
+
#
|
2537
|
+
# _store_mso_blip()
|
2538
|
+
# image_type = $_[0]
|
2539
|
+
# image_data = $_[1]
|
2540
|
+
# size = $_[2]
|
2541
|
+
# checksum1 = $_[3]
|
2542
|
+
# checksum2 = $_[4]
|
2543
|
+
#
|
2544
|
+
# Write the Escher Blip record that is part of MSODRAWINGGROUP.
|
2545
|
+
#
|
2546
|
+
def store_mso_blip(image_type, image_data, size, checksum1, checksum2)
|
2547
|
+
instance = 0x046A if image_type == 5 # JPG
|
2548
|
+
instance = 0x06E0 if image_type == 6 # PNG
|
2549
|
+
instance = 0x07A9 if image_type == 7 # BMP
|
2550
|
+
|
2551
|
+
# BMPs contain an extra checksum for the stripped data.
|
2552
|
+
if image_type == 7
|
2553
|
+
checksum1 = checksum2 + checksum1
|
2554
|
+
end
|
2555
|
+
|
2556
|
+
type = 0xF018 + image_type
|
2557
|
+
version = 0x0000
|
2558
|
+
length = size +17
|
2559
|
+
data = [checksum1].pack('H*') + # Uid checksum
|
2560
|
+
[0xFF].pack('C') + # Tag
|
2561
|
+
image_data # Image
|
2562
|
+
|
2563
|
+
return add_mso_generic(type, version, instance, data, length)
|
2564
|
+
end
|
2565
|
+
|
2566
|
+
###############################################################################
|
2567
|
+
#
|
2568
|
+
# _store_mso_opt()
|
2569
|
+
#
|
2570
|
+
# Write the Escher Opt record that is part of MSODRAWINGGROUP.
|
2571
|
+
#
|
2572
|
+
def store_mso_opt
|
2573
|
+
type = 0xF00B
|
2574
|
+
version = 3
|
2575
|
+
instance = 3
|
2576
|
+
data = ''
|
2577
|
+
length = 18
|
2578
|
+
|
2579
|
+
data = ['BF0008000800810109000008C0014000'+'0008'].pack("H*")
|
2580
|
+
|
2581
|
+
return add_mso_generic(type, version, instance, data, length)
|
2582
|
+
end
|
2583
|
+
|
2584
|
+
###############################################################################
|
2585
|
+
#
|
2586
|
+
# _store_mso_split_menu_colors()
|
2587
|
+
#
|
2588
|
+
# Write the Escher SplitMenuColors record that is part of MSODRAWINGGROUP.
|
2589
|
+
#
|
2590
|
+
def store_mso_split_menu_colors
|
2591
|
+
type = 0xF11E
|
2592
|
+
version = 0
|
2593
|
+
instance = 4
|
2594
|
+
data = ''
|
2595
|
+
length = 16
|
2596
|
+
|
2597
|
+
data = ['0D0000080C00000817000008F7000010'].pack("H*")
|
2598
|
+
|
2599
|
+
return add_mso_generic(type, version, instance, data, length)
|
2600
|
+
end
|
2601
|
+
|
2602
|
+
end
|