woff 1.0.0 → 1.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -8,8 +8,8 @@ Gem::Specification.new do |gem|
8
8
  gem.authors = ["Josh Hepworth"]
9
9
  gem.licenses = "MIT"
10
10
  gem.email = ["josh@friendsoftheweb.com"]
11
- gem.description = %q{Handles the management and modification of WOFF formatted files.}
12
- gem.summary = %q{Reading and modifying binary WOFF files.}
11
+ gem.description = %q{Handles the management and modification of WOFF and WOFF2 formatted files.}
12
+ gem.summary = %q{Reading and modifying binary WOFF and WOFF2 files.}
13
13
  gem.homepage = "https://github.com/friendsoftheweb/woff-rb"
14
14
 
15
15
  gem.files = `git ls-files`.split($/)
@@ -17,5 +17,12 @@ Gem::Specification.new do |gem|
17
17
  gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
18
18
  gem.require_paths = ["lib"]
19
19
 
20
+ gem.required_ruby_version = ">= 2.2.0"
21
+
20
22
  gem.add_dependency "bindata", "~> 2.3"
23
+ gem.add_dependency "brotli", "~> 0.1"
24
+
25
+ gem.add_development_dependency "rake", "~> 11.2"
26
+ gem.add_development_dependency "rspec", "~> 3.5"
27
+ gem.add_development_dependency "pry-byebug", "~> 3.4"
21
28
  end
@@ -0,0 +1,1176 @@
1
+ """
2
+ This implements the WOFF specification dated September 16, 2009.
3
+
4
+ The main object is the WOFFFont. It is a subclass for the FontTools
5
+ TTFont object, so it has very similar functionality. The WOFFReader
6
+ and WOFFWriter are also available for use outside of this module.
7
+ Those objects are much faster than WOFFFont, but they require much
8
+ more care.
9
+ """
10
+
11
+ import zlib
12
+ import struct
13
+ from fontTools.misc import sstruct
14
+ from cStringIO import StringIO
15
+ from xml.etree import ElementTree
16
+ from fontTools.ttLib import TTFont, debugmsg, sortedTagList
17
+ from fontTools.ttLib.sfnt import getSearchRange, calcChecksum, SFNTDirectoryEntry, \
18
+ sfntDirectoryFormat, sfntDirectorySize, sfntDirectoryEntryFormat, sfntDirectoryEntrySize
19
+
20
+
21
+ # -----------
22
+ # Main Object
23
+ # -----------
24
+
25
+ class WOFFFont(TTFont):
26
+
27
+ """
28
+ This object represents a WOFF file. It is a subclass of
29
+ the FontTools TTFont object, so the same API applies.
30
+ For information about the arguments in __init__,
31
+ refer to the TTFont documentation.
32
+
33
+ This object has two special attributes: metadata and privateData.
34
+ The metadata attribute returns an ElementTree Element object
35
+ representing the metadata stored in the font. To set new metadata
36
+ in the font, you must use this object. The privateData attribute
37
+ returns the private data stored in the font. To set private data,
38
+ set a string to font.privateData.
39
+ """
40
+
41
+ def __init__(self, file=None, flavor="\000\001\000\000",
42
+ checkChecksums=0, verbose=False, recalcBBoxes=True,
43
+ allowVID=False, ignoreDecompileErrors=False):
44
+ # can't use the TTFont __init__ because it goes directly to the SFNTReader.
45
+ # see that method for details about all of this.
46
+ self.verbose = verbose
47
+ self.recalcBBoxes = recalcBBoxes
48
+ self.tables = {}
49
+ self.reader = None
50
+
51
+ self.last_vid = 0xFFFE
52
+ self.reverseVIDDict = {}
53
+ self.VIDDict = {}
54
+ self.allowVID = allowVID
55
+
56
+ self.ignoreDecompileErrors = ignoreDecompileErrors
57
+
58
+ self.flavor = flavor
59
+ self.majorVersion = 0
60
+ self.minorVersion = 0
61
+ self._metadata = None
62
+ self._tableOrder = None
63
+
64
+ if file is not None:
65
+ if not hasattr(file, "read"):
66
+ file = open(file, "rb")
67
+ self.reader = WOFFReader(file, checkChecksums=checkChecksums)
68
+ self.flavor = self.reader.flavor
69
+ self.majorVersion = self.reader.majorVersion
70
+ self.minorVersion = self.reader.minorVersion
71
+ self._tableOrder = self.reader.keys()
72
+ else:
73
+ self._metadata = ElementTree.Element("metadata", version="1.0")
74
+ self.privateData = None
75
+
76
+ def __getattr__(self, attr):
77
+ if attr not in ("privateData", "metadata"):
78
+ raise AttributeError(attr)
79
+ # metadata
80
+ if attr == "metadata":
81
+ if self._metadata is not None:
82
+ return self._metadata
83
+ if self.reader is not None:
84
+ text = self.reader.metadata
85
+ if text:
86
+ metadata = ElementTree.fromstring(text)
87
+ else:
88
+ metadata = ElementTree.Element("metadata", version="1.0")
89
+ self._metadata = metadata
90
+ return self._metadata
91
+ return None
92
+ # private data
93
+ elif attr == "privateData":
94
+ if not hasattr(self, "privateData"):
95
+ privateData = None
96
+ if self.reader is not None:
97
+ privateData = self.reader.privateData
98
+ self.privateData = privateData
99
+ return self.privateData
100
+ # fallback to None
101
+ return None
102
+
103
+ def keys(self):
104
+ """
105
+ Return a list of all tables in the font. If a table order
106
+ has been set manually or as the result of opening an existing
107
+ WOFF file, the set table order will be in the list first.
108
+ Tables not defined in an existing order will be sorted following
109
+ the suggested ordering in the OTF/OFF specification.
110
+
111
+ The first table listed in all cases is the GlyphOrder pseudo table.
112
+ """
113
+ tags = set(self.tables.keys())
114
+ if self.reader is not None:
115
+ tags = tags | set(self.reader.keys())
116
+ tags = list(tags)
117
+ if "GlyphOrder" in tags:
118
+ tags.remove("GlyphOrder")
119
+ return ["GlyphOrder"] + sortedTagList(tags, self._tableOrder)
120
+
121
+ def setTableOrder(self, order):
122
+ """
123
+ Set the order in which tables should be written
124
+ into the font. This is required if a DSIG table
125
+ is in the font.
126
+ """
127
+ self._tableOrder = order
128
+
129
+ def save(self, file, compressionLevel=9, recompressTables=False, reorderTables=True, recalculateHeadChecksum=True):
130
+ """
131
+ Save a WOFF into file a file object specifified by the
132
+ file argument.. Optionally, file can be a path and a
133
+ new file will be created at that location.
134
+
135
+ compressionLevel is the compression level to be
136
+ used with zlib. This must be an int between 1 and 9.
137
+ The default is 9, the highest compression, but slowest
138
+ compression time.
139
+
140
+ Set recompressTables to True if you want any already
141
+ compressed tables to be decompressed and then recompressed
142
+ using the level specified by compressionLevel.
143
+
144
+ If you want the tables in the WOFF reordered following
145
+ the suggested optimal table orderings described in the
146
+ OTF/OFF sepecification, set reorderTables to True.
147
+ Tables cannot be reordered if a DSIG table is in the font.
148
+
149
+ If you change any of the SFNT data or reorder the tables,
150
+ the head table checkSumAdjustment must be recalculated.
151
+ If you are not changing any of the SFNT data, you can set
152
+ recalculateHeadChecksum to False to prevent the recalculation.
153
+ This must be set to False if the font contains a DSIG table.
154
+ """
155
+ # if DSIG is to be written, the table order
156
+ # must be completely specified. otherwise the
157
+ # DSIG may not be valid after decoding the WOFF.
158
+ tags = self.keys()
159
+ if "GlyphOrder" in tags:
160
+ tags.remove("GlyphOrder")
161
+ if "DSIG" in tags:
162
+ if self._tableOrder is None or (set(self._tableOrder) != set(tags)):
163
+ raise WOFFLibError("A complete table order must be supplied when saving a font with a 'DSIG' table.")
164
+ elif reorderTables:
165
+ raise WOFFLibError("Tables can not be reordered when a 'DSIG' table is in the font. Set reorderTables to False.")
166
+ elif recalculateHeadChecksum:
167
+ raise WOFFLibError("The 'head' table checkSumAdjustment can not be recalculated when a 'DSIG' table is in the font.")
168
+ # sort the tags if necessary
169
+ if reorderTables:
170
+ tags = sortedTagList(tags)
171
+ # open a file if necessary
172
+ closeStream = False
173
+ if not hasattr(file, "write"):
174
+ closeStream = True
175
+ file = open(file, "wb")
176
+ # write the table data
177
+ if "GlyphOrder" in tags:
178
+ tags.remove("GlyphOrder")
179
+ numTables = len(tags)
180
+ writer = WOFFWriter(file, numTables, flavor=self.flavor,
181
+ majorVersion=self.majorVersion, minorVersion=self.minorVersion,
182
+ compressionLevel=compressionLevel, recalculateHeadChecksum=recalculateHeadChecksum,
183
+ verbose=self.verbose)
184
+ for tag in tags:
185
+ origData = None
186
+ origLength = None
187
+ origChecksum = None
188
+ compLength = None
189
+ # table is loaded
190
+ if self.isLoaded(tag):
191
+ origData = self.getTableData(tag)
192
+ # table is in reader
193
+ elif self.reader is not None:
194
+ if recompressTables:
195
+ origData = self.getTableData(tag)
196
+ else:
197
+ if self.verbose:
198
+ debugmsg("Reading '%s' table from disk" % tag)
199
+ origData, origLength, origChecksum, compLength = self.reader.getCompressedTableData(tag)
200
+ # add to writer
201
+ writer.setTable(tag, origData, origLength=origLength, origChecksum=origChecksum, compLength=compLength)
202
+ # write the metadata
203
+ metadata = None
204
+ metaOrigLength = None
205
+ metaLength = None
206
+ if hasattr(self, "metadata"):
207
+ declaration = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
208
+ tree = ElementTree.ElementTree(self.metadata)
209
+ f = StringIO()
210
+ tree.write(f, encoding="utf-8")
211
+ metadata = f.getvalue()
212
+ # make sure the metadata starts with the declaration
213
+ if not metadata.startswith(declaration):
214
+ metadata = declaration + metadata
215
+ del f
216
+ elif self.reader is not None:
217
+ if recompressTables:
218
+ metadata = self.reader.metadata
219
+ else:
220
+ metadata, metaOrigLength, metaLength = self.reader.getCompressedMetadata()
221
+ if metadata:
222
+ writer.setMetadata(metadata, metaOrigLength=metaOrigLength, metaLength=metaLength)
223
+ # write the private data
224
+ privData = self.privateData
225
+ if privData:
226
+ writer.setPrivateData(privData)
227
+ # close the writer
228
+ writer.close()
229
+ # close the file
230
+ if closeStream:
231
+ file.close()
232
+
233
+ def saveXML(self):
234
+ raise NotImplementedError
235
+
236
+ def importXML(self):
237
+ raise NotImplementedError
238
+
239
+
240
+ # ------
241
+ # Reader
242
+ # ------
243
+
244
+ woffHeaderFormat = """
245
+ > # big endian
246
+ signature: 4s
247
+ flavor: 4s
248
+ length: L
249
+ numTables: H
250
+ reserved: H
251
+ totalSFNTSize: L
252
+ majorVersion: H
253
+ minorVersion: H
254
+ metaOffset: L
255
+ metaLength: L
256
+ metaOrigLength: L
257
+ privOffset: L
258
+ privLength: L
259
+ """
260
+ woffHeaderSize = sstruct.calcsize(woffHeaderFormat)
261
+
262
+ class WOFFReader(object):
263
+
264
+ def __init__(self, file, checkChecksums=1):
265
+ self.file = file
266
+ self.checkChecksums = checkChecksums
267
+ # unpack the header
268
+ self.file.seek(0)
269
+ bytes = self.file.read(woffHeaderSize)
270
+ if len(bytes) != woffHeaderSize:
271
+ raise WOFFLibError("Not a properly formatted WOFF file.")
272
+ sstruct.unpack(woffHeaderFormat, bytes, self)
273
+ if self.signature != "wOFF":
274
+ raise WOFFLibError("Not a properly formatted WOFF file.")
275
+ # unpack the directory
276
+ self.tables = {}
277
+ for i in range(self.numTables):
278
+ entry = WOFFDirectoryEntry()
279
+ entry.fromFile(self.file)
280
+ self.tables[entry.tag] = entry
281
+
282
+ def close(self):
283
+ self.file.close()
284
+
285
+ def __contains__(self, tag):
286
+ return tag in self.tables
287
+
288
+ has_key = __contains__
289
+
290
+ def keys(self):
291
+ """
292
+ This returns a list of all tables in the WOFF
293
+ sorted in ascending order based on the offset
294
+ of each table.
295
+ """
296
+ sorter = []
297
+ for tag, entry in self.tables.items():
298
+ sorter.append((entry.offset, tag))
299
+ order = [tag for offset, tag in sorted(sorter)]
300
+ return order
301
+
302
+ def __getitem__(self, tag):
303
+ entry = self.tables[tag]
304
+ self.file.seek(entry.offset)
305
+ data = self.file.read(entry.compLength)
306
+ # decompress if necessary
307
+ if entry.compLength < entry.origLength:
308
+ data = zlib.decompress(data)
309
+ else:
310
+ data = data[:entry.origLength]
311
+ # compare the checksums
312
+ if self.checkChecksums:
313
+ checksum = calcTableChecksum(tag, data)
314
+ if self.checkChecksums > 1:
315
+ assert checksum == entry.origChecksum, "bad checksum for '%s' table" % tag
316
+ elif checksum != entry.origChecksum:
317
+ print "bad checksum for '%s' table" % tag
318
+ print
319
+ return data
320
+
321
+ def getCompressedTableData(self, tag):
322
+ entry = self.tables[tag]
323
+ self.file.seek(entry.offset)
324
+ data = self.file.read(entry.compLength)
325
+ return data, entry.origLength, entry.origChecksum, entry.compLength
326
+
327
+ def getCompressedMetadata(self):
328
+ self.file.seek(self.metaOffset)
329
+ data = self.file.read(self.metaLength)
330
+ return data, self.metaOrigLength, self.metaLength
331
+
332
+ def __getattr__(self, attr):
333
+ if attr not in ("privateData", "metadata"):
334
+ raise AttributeError(attr)
335
+ if attr == "privateData":
336
+ self.file.seek(self.privOffset)
337
+ return self.file.read(self.privLength)
338
+ if attr == "metadata":
339
+ self.file.seek(self.metaOffset)
340
+ data = self.file.read(self.metaLength)
341
+ if self.metaLength:
342
+ data = zlib.decompress(data)
343
+ assert len(data) == self.metaOrigLength
344
+ return data
345
+
346
+ def __delitem__(self, tag):
347
+ del self.tables[tag]
348
+
349
+
350
+ # ------
351
+ # Writer
352
+ # ------
353
+
354
+ class WOFFWriter(object):
355
+
356
+ def __init__(self, file, numTables, flavor="\000\001\000\000",
357
+ majorVersion=0, minorVersion=0, compressionLevel=9,
358
+ recalculateHeadChecksum=True,
359
+ verbose=False):
360
+ self.signature = "wOFF"
361
+ self.flavor = flavor
362
+ self.length = woffHeaderSize + (numTables * woffDirectoryEntrySize)
363
+ self.totalSFNTSize = sfntDirectorySize + (numTables * sfntDirectoryEntrySize)
364
+ self.numTables = numTables
365
+ self.majorVersion = majorVersion
366
+ self.minorVersion = minorVersion
367
+ self.metaOffset = 0
368
+ self.metaOrigLength = 0
369
+ self.metaLength = 0
370
+ self.privOffset = 0
371
+ self.privLength = 0
372
+ self.reserved = 0
373
+
374
+ self.file = file
375
+ self.compressionLevel = compressionLevel
376
+ self.recalculateHeadChecksum = recalculateHeadChecksum
377
+ self.verbose = verbose
378
+
379
+ # the data is held to facilitate the
380
+ # head checkSumAdjustment calculation.
381
+ self.tables = {}
382
+ self.metadata = None
383
+ self.privateData = None
384
+ self.tableDataEnd = 0
385
+ self.metadataEnd = 0
386
+
387
+ def _tableOrder(self):
388
+ return [entry.tag for index, entry, data in sorted(self.tables.values())]
389
+
390
+ def setTable(self, tag, data, origLength=None, origChecksum=None, compLength=None):
391
+ # don't compress the head if the checkSumAdjustment needs to be recalculated
392
+ # the compression will be handled later.
393
+ if self.recalculateHeadChecksum and tag == "head":
394
+ # decompress
395
+ if compLength is not None and compLength < origLength:
396
+ data = zlib.decompress(data)
397
+ entry = self._prepTable(tag, data, origLength=len(data), entryOnly=True)
398
+ # compress
399
+ else:
400
+ entry, data = self._prepTable(tag, data=data, origLength=origLength, origChecksum=origChecksum, compLength=compLength)
401
+ # store
402
+ self.tables[tag] = (len(self.tables), entry, data)
403
+
404
+ def setMetadata(self, data, metaOrigLength=None, metaLength=None):
405
+ if not data:
406
+ return
407
+ if metaLength is None:
408
+ if self.verbose:
409
+ debugmsg("compressing metadata")
410
+ metaOrigLength = len(data)
411
+ data = zlib.compress(data, self.compressionLevel)
412
+ metaLength = len(data)
413
+ # set the header values
414
+ self.metaOrigLength = metaOrigLength
415
+ self.metaLength = metaLength
416
+ # store
417
+ self.metadata = data
418
+
419
+ def setPrivateData(self, data):
420
+ if not data:
421
+ return
422
+ privLength = len(data)
423
+ # set the header value
424
+ self.privLength = privLength
425
+ # store
426
+ self.privateData = data
427
+
428
+ def close(self):
429
+ if self.numTables != len(self.tables):
430
+ raise WOFFLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(self.tables)))
431
+ # first, handle the checkSumAdjustment
432
+ if self.recalculateHeadChecksum and "head" in self.tables:
433
+ self._handleHeadChecksum()
434
+ # check the table directory conformance
435
+ for tag, (index, entry, data) in sorted(self.tables.items()):
436
+ self._checkTableConformance(entry, data)
437
+ # write the header
438
+ header = sstruct.pack(woffHeaderFormat, self)
439
+ self.file.seek(0)
440
+ self.file.write(header)
441
+ # update the directory offsets
442
+ offset = woffHeaderSize + (woffDirectoryEntrySize * self.numTables)
443
+ order = self._tableOrder()
444
+ for tag in order:
445
+ index, entry, data = self.tables[tag]
446
+ entry.offset = offset
447
+ offset += calc4BytePaddedLength(entry.compLength) # ensure byte alignment
448
+ # write the directory
449
+ self._writeTableDirectory()
450
+ # write the table data
451
+ self._writeTableData()
452
+ # write the metadata
453
+ self._writeMetadata()
454
+ # write the private data
455
+ self._writePrivateData()
456
+ # write the header
457
+ self._writeHeader()
458
+ # go to the beginning of the file
459
+ self.file.seek(0)
460
+
461
+ # header support
462
+
463
+ def _writeHeader(self):
464
+ header = sstruct.pack(woffHeaderFormat, self)
465
+ self.file.seek(0)
466
+ self.file.write(header)
467
+
468
+ # sfnt support
469
+
470
+ def _prepTable(self, tag, data, origLength=None, origChecksum=None, compLength=None, entryOnly=False):
471
+ # skip data prep
472
+ if entryOnly:
473
+ origLength = origLength
474
+ origChecksum = calcTableChecksum(tag, data)
475
+ compLength = 0
476
+ # prep the data
477
+ else:
478
+ # compress
479
+ if compLength is None:
480
+ origData = data
481
+ origLength = len(origData)
482
+ origChecksum = calcTableChecksum(tag, data)
483
+ if self.verbose:
484
+ debugmsg("compressing '%s' table" % tag)
485
+ compData = zlib.compress(origData, self.compressionLevel)
486
+ compLength = len(compData)
487
+ if origLength <= compLength:
488
+ data = origData
489
+ compLength = origLength
490
+ else:
491
+ data = compData
492
+ # make the directory entry
493
+ entry = WOFFDirectoryEntry()
494
+ entry.tag = tag
495
+ entry.offset = 0
496
+ entry.origLength = origLength
497
+ entry.origChecksum = origChecksum
498
+ entry.compLength = compLength
499
+ # return
500
+ if entryOnly:
501
+ return entry
502
+ return entry, data
503
+
504
+ def _checkTableConformance(self, entry, data):
505
+ """
506
+ Check the conformance of the table directory entries.
507
+ These must be checked because the origChecksum, origLength
508
+ and compLength can be set by an outside caller.
509
+ """
510
+ if self.verbose:
511
+ debugmsg("checking conformance of '%s' table" % entry.tag)
512
+ # origLength must be less than or equal to compLength
513
+ if entry.origLength < entry.compLength:
514
+ raise WOFFLibError("origLength and compLength are not correct in the '%s' table entry." % entry.tag)
515
+ # unpack the data as needed
516
+ if entry.origLength > entry.compLength:
517
+ origData = zlib.decompress(data)
518
+ compData = data
519
+ else:
520
+ origData = data
521
+ compData = data
522
+ # the origLength entry must match the actual length
523
+ if entry.origLength != len(origData):
524
+ raise WOFFLibError("origLength is not correct in the '%s' table entry." % entry.tag)
525
+ # the checksum must be correct
526
+ if entry.origChecksum != calcTableChecksum(entry.tag, origData):
527
+ raise WOFFLibError("origChecksum is not correct in the '%s' table entry." % entry.tag)
528
+ # the compLength must be correct
529
+ if entry.compLength != len(compData):
530
+ raise WOFFLibError("compLength is not correct in the '%s' table entry." % entry.tag)
531
+
532
+ def _handleHeadChecksum(self):
533
+ if self.verbose:
534
+ debugmsg("updating head checkSumAdjustment")
535
+ # get the value
536
+ tables = {}
537
+ offset = sfntDirectorySize + (sfntDirectoryEntrySize * self.numTables)
538
+ for (index, entry, data) in sorted(self.tables.values()):
539
+ tables[entry.tag] = dict(offset=offset, length=entry.origLength, checkSum=entry.origChecksum)
540
+ offset += calc4BytePaddedLength(entry.origLength)
541
+ checkSumAdjustment = calcHeadCheckSumAdjustment(self.flavor, tables)
542
+ # set the value in the head table
543
+ index, entry, data = self.tables["head"]
544
+ data = data[:8] + struct.pack(">L", checkSumAdjustment) + data[12:]
545
+ # compress the data
546
+ newEntry, data = self._prepTable("head", data)
547
+ # update the entry data
548
+ assert entry.origChecksum == newEntry.origChecksum
549
+ entry.origLength = newEntry.origLength
550
+ entry.compLength = newEntry.compLength
551
+ # store
552
+ self.tables["head"] = (index, entry, data)
553
+
554
+ def _writeTableDirectory(self):
555
+ if self.verbose:
556
+ debugmsg("writing table directory")
557
+ self.file.seek(woffHeaderSize)
558
+ for tag, (index, entry, data) in sorted(self.tables.items()):
559
+ entry = sstruct.pack(woffDirectoryEntryFormat, entry)
560
+ self.file.write(entry)
561
+
562
+ def _writeTableData(self):
563
+ d = woffHeaderSize + (woffDirectoryEntrySize * self.numTables)
564
+ offset = woffHeaderSize + (woffDirectoryEntrySize * self.numTables)
565
+ self.file.seek(offset)
566
+ for tag in self._tableOrder():
567
+ if self.verbose:
568
+ debugmsg("writing '%s' table" % tag)
569
+ index, entry, data = self.tables[tag]
570
+ data += "\0" * (calc4BytePaddedLength(entry.compLength) - entry.compLength ) # ensure byte alignment
571
+ self.file.write(data)
572
+ self.length += calc4BytePaddedLength(entry.compLength) # ensure byte alignment
573
+ self.totalSFNTSize += calc4BytePaddedLength(entry.origLength) # ensure byte alignment
574
+ # store the end for use by metadata or private data
575
+ self.tableDataEnd = self.length
576
+
577
+ # metadata support
578
+
579
+ def _writeMetadata(self):
580
+ if self.metadata is None:
581
+ return
582
+ if self.verbose:
583
+ debugmsg("writing metadata")
584
+ self.length += self.metaLength
585
+ self.metaOffset = self.tableDataEnd
586
+ self.file.seek(self.metaOffset)
587
+ self.file.write(self.metadata)
588
+ # store the end for use by private data
589
+ self.metadataEnd = self.metaOffset + self.metaLength
590
+ # if private data exists, pad to a four byte boundary
591
+ if self.privateData is not None:
592
+ padding = calc4BytePaddedLength(self.metaLength) - self.metaLength
593
+ self.metadataEnd += padding
594
+ self.length += padding
595
+ padding = "\0" * padding
596
+ if padding:
597
+ self.file.write(padding)
598
+
599
+ # private data support
600
+
601
+ def _writePrivateData(self):
602
+ if self.privateData is None:
603
+ return
604
+ if self.verbose:
605
+ debugmsg("writing private data")
606
+ if self.metadata is not None:
607
+ self.privOffset = self.metadataEnd
608
+ else:
609
+ self.privOffset = self.tableDataEnd
610
+ self.length += self.privLength
611
+ self.file.seek(self.privOffset)
612
+ self.file.write(self.privateData)
613
+
614
+
615
+ # ---------
616
+ # Directory
617
+ # ---------
618
+
619
+ woffDirectoryEntryFormat = """
620
+ > # big endian
621
+ tag: 4s
622
+ offset: L
623
+ compLength: L
624
+ origLength: L
625
+ origChecksum: L
626
+ """
627
+ woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat)
628
+
629
+ class WOFFDirectoryEntry(object):
630
+
631
+ def fromFile(self, file):
632
+ sstruct.unpack(woffDirectoryEntryFormat, file.read(woffDirectoryEntrySize), self)
633
+
634
+ def fromString(self, str):
635
+ sstruct.unpack(woffDirectoryEntryFormat, str, self)
636
+
637
+ def toString(self):
638
+ return sstruct.pack(woffDirectoryEntryFormat, self)
639
+
640
+ def __repr__(self):
641
+ if hasattr(self, "tag"):
642
+ return "<WOFFDirectoryEntry '%s' at %x>" % (self.tag, id(self))
643
+ else:
644
+ return "<WOFFDirectoryEntry at %x>" % id(self)
645
+
646
+
647
+ # -------
648
+ # Helpers
649
+ # -------
650
+
651
+ class WOFFLibError(Exception): pass
652
+
653
+ def calc4BytePaddedLength(length):
654
+ return (length + 3) & ~3
655
+
656
+ def calcTableChecksum(tag, data):
657
+ if tag == "head":
658
+ checksum = calcChecksum(data[:8] + '\0\0\0\0' + data[12:])
659
+ else:
660
+ checksum = calcChecksum(data)
661
+ checksum = checksum & 0xffffffff
662
+ return checksum
663
+
664
+ def calcHeadCheckSumAdjustment(flavor, tables):
665
+ numTables = len(tables)
666
+ # build the sfnt header
667
+ searchRange, entrySelector, rangeShift = getSearchRange(numTables)
668
+ sfntDirectoryData = dict(
669
+ sfntVersion=flavor,
670
+ numTables=numTables,
671
+ searchRange=searchRange,
672
+ entrySelector=entrySelector,
673
+ rangeShift=rangeShift
674
+ )
675
+ # build the sfnt directory
676
+ directory = sstruct.pack(sfntDirectoryFormat, sfntDirectoryData)
677
+ for tag, entry in sorted(tables.items()):
678
+ entry = tables[tag]
679
+ sfntEntry = SFNTDirectoryEntry()
680
+ sfntEntry.tag = tag
681
+ sfntEntry.checkSum = entry["checkSum"]
682
+ sfntEntry.offset = entry["offset"]
683
+ sfntEntry.length = entry["length"]
684
+ directory += sfntEntry.toString()
685
+ # calculate the checkSumAdjustment
686
+ checkSums = [entry["checkSum"] for entry in tables.values()]
687
+ checkSums.append(calcChecksum(directory))
688
+ checkSumAdjustment = sum(checkSums)
689
+ checkSumAdjustment = (0xB1B0AFBA - checkSumAdjustment) & 0xffffffff
690
+ # done
691
+ return checkSumAdjustment
692
+
693
+ # ----------------
694
+ # SFNT Conformance
695
+ # ----------------
696
+
697
+ def checkSFNTConformance(file):
698
+ """
699
+ This function checks a SFNT file to see if it meets
700
+ the conformance recomendations in the WOFF specification.
701
+ This includes:
702
+ - searchRange must be correct.
703
+ - entrySelector must be correct.
704
+ - rangeShift must be correct.
705
+ - offset to each table must be after the table directory
706
+ and before the end of the file.
707
+ - offset + length of each table must not extend past
708
+ the end of the file.
709
+ - the table directory must be in ascending order.
710
+ - tables must be padded to 4 byte boundaries.
711
+ - the final table must be padded to a 4 byte boundary.
712
+ - the gaps between table data blocks must not be more
713
+ than necessary to pad the table to a 4 byte boundary.
714
+ - the gap between the end of the final table and
715
+ the end of the file must not be more than necessary
716
+ to pad the table to a four byte boundary.
717
+ - the checksums for each table in the table directory
718
+ must be correct.
719
+ - the head checkSumAdjustment must be correct.
720
+ - the padding bytes must be null.
721
+
722
+ The returned value of this function will be a list.
723
+ If any errors were found, they will be represented
724
+ as strings in the list.
725
+ """
726
+ # load the data
727
+ closeFile = False
728
+ if not hasattr(file, "read"):
729
+ file = open(file, "rb")
730
+ closeFile = True
731
+ data = file.read()
732
+ if closeFile:
733
+ file.close()
734
+ # storage
735
+ errors = []
736
+ # unpack the header
737
+ headerData = data[:sfntDirectorySize]
738
+ header = sstruct.unpack(sfntDirectoryFormat, headerData)
739
+ # unpack the table directory
740
+ numTables = header["numTables"]
741
+ directoryData = data[sfntDirectorySize : sfntDirectorySize + (sfntDirectoryEntrySize * numTables)]
742
+ tableDirectory = []
743
+ for index in range(numTables):
744
+ entry = sstruct.unpack(sfntDirectoryEntryFormat, directoryData[:sfntDirectoryEntrySize])
745
+ tableDirectory.append(entry)
746
+ directoryData = directoryData[sfntDirectoryEntrySize:]
747
+ # sanity testing
748
+ errors += _testOffsetBoundaryValidity(len(data), tableDirectory)
749
+ errors += _testLengthBoundaryValidity(len(data), tableDirectory)
750
+ # if one or more errors have already been found, something
751
+ # is very wrong and this should come to a screeching halt.
752
+ if errors:
753
+ return errors
754
+ # junk at the beginning of the file
755
+ errors += _testJunkAtTheBeginningOfTheFile(header)
756
+ # test directory order
757
+ errors += _testDirectoryOrder(tableDirectory)
758
+ # load the table data
759
+ for entry in tableDirectory:
760
+ offset = entry["offset"]
761
+ length = entry["length"]
762
+ entry["data"] = data[offset:offset+length]
763
+ # test for overlaps
764
+ errors += _testOverlaps(tableDirectory)
765
+ # test for padding
766
+ errors += _testOffsets(tableDirectory)
767
+ # test the final table padding
768
+ errors += _testFinalTablePadding(len(data), numTables, tableDirectory[-1]["tag"])
769
+ # test for gaps
770
+ errors += _testGaps(tableDirectory)
771
+ # test for a gap at the end of the file
772
+ errors += _testGapAfterFinalTable(len(data), tableDirectory)
773
+ # test padding value
774
+ errors += _testPaddingValue(tableDirectory, data)
775
+ # validate checksums
776
+ errors += _testCheckSums(tableDirectory)
777
+ errors += _testHeadCheckSum(header, tableDirectory)
778
+ # done.
779
+ return errors
780
+
781
+ def _testOffsetBoundaryValidity(dataLength, tableDirectory):
782
+ """
783
+ >>> test = [
784
+ ... dict(tag="test", offset=44)
785
+ ... ]
786
+ >>> bool(_testOffsetBoundaryValidity(45, test))
787
+ False
788
+ >>> test = [
789
+ ... dict(tag="test", offset=1)
790
+ ... ]
791
+ >>> bool(_testOffsetBoundaryValidity(45, test))
792
+ True
793
+ >>> test = [
794
+ ... dict(tag="test", offset=46)
795
+ ... ]
796
+ >>> bool(_testOffsetBoundaryValidity(45, test))
797
+ True
798
+ """
799
+ errors = []
800
+ numTables = len(tableDirectory)
801
+ minOffset = sfntDirectorySize + (sfntDirectoryEntrySize * numTables)
802
+ for entry in tableDirectory:
803
+ offset = entry["offset"]
804
+ tag = entry["tag"]
805
+ if offset < minOffset:
806
+ errors.append("The offset to the %s table is not valid." % tag)
807
+ if offset > dataLength:
808
+ errors.append("The offset to the %s table is not valid." % tag)
809
+ return errors
810
+
811
+ def _testLengthBoundaryValidity(dataLength, tableDirectory):
812
+ """
813
+ >>> test = [
814
+ ... dict(tag="test", offset=44, length=1)
815
+ ... ]
816
+ >>> bool(_testLengthBoundaryValidity(45, test))
817
+ False
818
+ >>> test = [
819
+ ... dict(tag="test", offset=44, length=2)
820
+ ... ]
821
+ >>> bool(_testLengthBoundaryValidity(45, test))
822
+ True
823
+ """
824
+ errors = []
825
+ entries = [(entry["offset"], entry) for entry in tableDirectory]
826
+ for o, entry in sorted(entries):
827
+ offset = entry["offset"]
828
+ length = entry["length"]
829
+ tag = entry["tag"]
830
+ end = offset + length
831
+ if end > dataLength:
832
+ errors.append("The length of the %s table is not valid." % tag)
833
+ return errors
834
+
835
+ def _testJunkAtTheBeginningOfTheFile(header):
836
+ """
837
+ >>> test = dict(numTables=5, searchRange=64, entrySelector=2, rangeShift=16)
838
+ >>> bool(_testJunkAtTheBeginningOfTheFile(test))
839
+ False
840
+ >>> test = dict(numTables=5, searchRange=0, entrySelector=2, rangeShift=16)
841
+ >>> bool(_testJunkAtTheBeginningOfTheFile(test))
842
+ True
843
+ >>> test = dict(numTables=5, searchRange=64, entrySelector=0, rangeShift=16)
844
+ >>> bool(_testJunkAtTheBeginningOfTheFile(test))
845
+ True
846
+ >>> test = dict(numTables=5, searchRange=64, entrySelector=2, rangeShift=0)
847
+ >>> bool(_testJunkAtTheBeginningOfTheFile(test))
848
+ True
849
+ """
850
+ errors = []
851
+ numTables = header["numTables"]
852
+ searchRange, entrySelector, rangeShift = getSearchRange(numTables)
853
+ if header["searchRange"] != searchRange:
854
+ errors.append("The searchRange value is incorrect.")
855
+ if header["entrySelector"] != entrySelector:
856
+ errors.append("The entrySelector value is incorrect.")
857
+ if header["rangeShift"] != rangeShift:
858
+ errors.append("The rangeShift value is incorrect.")
859
+ return errors
860
+
861
+ def _testDirectoryOrder(tableDirectory):
862
+ """
863
+ >>> test = [
864
+ ... dict(tag="aaaa"),
865
+ ... dict(tag="bbbb")
866
+ ... ]
867
+ >>> bool(_testDirectoryOrder(test))
868
+ False
869
+ >>> test = [
870
+ ... dict(tag="bbbb"),
871
+ ... dict(tag="aaaa")
872
+ ... ]
873
+ >>> bool(_testDirectoryOrder(test))
874
+ True
875
+ """
876
+ order = [entry["tag"] for entry in tableDirectory]
877
+ if order != list(sorted(order)):
878
+ return ["The table directory is not in ascending order."]
879
+ return []
880
+
881
+ def _testOverlaps(tableDirectory):
882
+ """
883
+ >>> test = [
884
+ ... dict(tag="aaaa", offset=0, length=100),
885
+ ... dict(tag="bbbb", offset=1000, length=100),
886
+ ... ]
887
+ >>> bool(_testOverlaps(test))
888
+ False
889
+ >>> test = [
890
+ ... dict(tag="aaaa", offset=0, length=100),
891
+ ... dict(tag="bbbb", offset=50, length=100),
892
+ ... ]
893
+ >>> bool(_testOverlaps(test))
894
+ True
895
+ >>> test = [
896
+ ... dict(tag="aaaa", offset=0, length=100),
897
+ ... dict(tag="bbbb", offset=0, length=100),
898
+ ... ]
899
+ >>> bool(_testOverlaps(test))
900
+ True
901
+ >>> test = [
902
+ ... dict(tag="aaaa", offset=0, length=100),
903
+ ... dict(tag="bbbb", offset=0, length=150),
904
+ ... ]
905
+ >>> bool(_testOverlaps(test))
906
+ True
907
+ """
908
+ # gather the edges
909
+ edges = {}
910
+ for entry in tableDirectory:
911
+ start = entry["offset"]
912
+ end = start + entry["length"]
913
+ edges[entry["tag"]] = (start, end)
914
+ # look for overlaps
915
+ overlaps = set()
916
+ for tag, (start, end) in edges.items():
917
+ for otherTag, (otherStart, otherEnd) in edges.items():
918
+ tag = tag.strip()
919
+ otherTag = otherTag.strip()
920
+ if tag == otherTag:
921
+ continue
922
+ if start >= otherStart and start < otherEnd:
923
+ l = sorted((tag, otherTag))
924
+ overlaps.add(tuple(l))
925
+ if end > otherStart and end <= otherEnd:
926
+ l = sorted((tag, otherTag))
927
+ overlaps.add(tuple(l))
928
+ # report
929
+ errors = []
930
+ if overlaps:
931
+ for t1, t2 in sorted(overlaps):
932
+ errors.append("The tables %s and %s overlap." % (t1, t2))
933
+ return errors
934
+
935
+ def _testOffsets(tableDirectory):
936
+ """
937
+ >>> test = [
938
+ ... dict(tag="test", offset=1)
939
+ ... ]
940
+ >>> bool(_testOffsets(test))
941
+ True
942
+ >>> test = [
943
+ ... dict(tag="test", offset=2)
944
+ ... ]
945
+ >>> bool(_testOffsets(test))
946
+ True
947
+ >>> test = [
948
+ ... dict(tag="test", offset=3)
949
+ ... ]
950
+ >>> bool(_testOffsets(test))
951
+ True
952
+ >>> test = [
953
+ ... dict(tag="test", offset=4)
954
+ ... ]
955
+ >>> bool(_testOffsets(test))
956
+ False
957
+ """
958
+ errors = []
959
+ # make the entries sortable
960
+ entries = [(entry["offset"], entry) for entry in tableDirectory]
961
+ for o, entry in sorted(entries):
962
+ offset = entry["offset"]
963
+ if offset % 4:
964
+ errors.append("The %s table does not begin on a 4-byte boundary." % entry["tag"].strip())
965
+ return errors
966
+
967
+ def _testFinalTablePadding(dataLength, numTables, finalTableTag):
968
+ """
969
+ >>> bool(_testFinalTablePadding(
970
+ ... sfntDirectorySize + sfntDirectoryEntrySize + 1,
971
+ ... 1,
972
+ ... "test"
973
+ ... ))
974
+ True
975
+ >>> bool(_testFinalTablePadding(
976
+ ... sfntDirectorySize + sfntDirectoryEntrySize + 2,
977
+ ... 1,
978
+ ... "test"
979
+ ... ))
980
+ True
981
+ >>> bool(_testFinalTablePadding(
982
+ ... sfntDirectorySize + sfntDirectoryEntrySize + 3,
983
+ ... 1,
984
+ ... "test"
985
+ ... ))
986
+ True
987
+ >>> bool(_testFinalTablePadding(
988
+ ... sfntDirectorySize + sfntDirectoryEntrySize + 4,
989
+ ... 1,
990
+ ... "test"
991
+ ... ))
992
+ False
993
+ """
994
+ errors = []
995
+ if (dataLength - (sfntDirectorySize + (sfntDirectoryEntrySize * numTables))) % 4:
996
+ errors.append("The final table (%s) is not properly padded." % finalTableTag)
997
+ return errors
998
+
999
+ def _testGaps(tableDirectory):
1000
+ """
1001
+ >>> start = sfntDirectorySize + (sfntDirectoryEntrySize * 2)
1002
+ >>> test = [
1003
+ ... dict(offset=start, length=4, tag="test1"),
1004
+ ... dict(offset=start+4, length=4, tag="test2"),
1005
+ ... ]
1006
+ >>> bool(_testGaps(test))
1007
+ False
1008
+ >>> test = [
1009
+ ... dict(offset=start, length=4, tag="test1"),
1010
+ ... dict(offset=start+5, length=4, tag="test2"),
1011
+ ... ]
1012
+ >>> bool(_testGaps(test))
1013
+ True
1014
+ >>> test = [
1015
+ ... dict(offset=start, length=4, tag="test1"),
1016
+ ... dict(offset=start+8, length=4, tag="test2"),
1017
+ ... ]
1018
+ >>> bool(_testGaps(test))
1019
+ True
1020
+ """
1021
+ errors = []
1022
+ sorter = []
1023
+ for entry in tableDirectory:
1024
+ sorter.append((entry["offset"], entry))
1025
+ prevTag = None
1026
+ prevEnd = None
1027
+ for offset, entry in sorted(sorter):
1028
+ length = entry["length"]
1029
+ length = calc4BytePaddedLength(length)
1030
+ tag = entry["tag"]
1031
+ if prevEnd is None:
1032
+ prevEnd = offset + length
1033
+ prevTag = tag
1034
+ else:
1035
+ if offset - prevEnd != 0:
1036
+ errors.append("Improper padding between the %s and %s tables." % (prevTag, tag))
1037
+ prevEnd = offset + length
1038
+ prevTag = tag
1039
+ return errors
1040
+
1041
+ def _testGapAfterFinalTable(dataLength, tableDirectory):
1042
+ """
1043
+ >>> start = sfntDirectorySize + (sfntDirectoryEntrySize * 2)
1044
+ >>> test = [
1045
+ ... dict(offset=start, length=1, tag="test")
1046
+ ... ]
1047
+ >>> bool(_testGapAfterFinalTable(start + 4, test))
1048
+ False
1049
+ >>> test = [
1050
+ ... dict(offset=start, length=1, tag="test")
1051
+ ... ]
1052
+ >>> bool(_testGapAfterFinalTable(start + 5, test))
1053
+ True
1054
+ >>> test = [
1055
+ ... dict(offset=start, length=1, tag="test")
1056
+ ... ]
1057
+ >>> bool(_testGapAfterFinalTable(start + 8, test))
1058
+ True
1059
+ """
1060
+ errors = []
1061
+ sorter = []
1062
+ for entry in tableDirectory:
1063
+ sorter.append((entry["offset"], entry))
1064
+ entry = sorted(sorter)[-1]
1065
+ offset = entry[-1]["offset"]
1066
+ length = entry[-1]["length"]
1067
+ length = calc4BytePaddedLength(length)
1068
+ lastPosition = offset + length
1069
+ if dataLength - lastPosition > 0:
1070
+ errors.append("Improper padding at the end of the file.")
1071
+ return errors
1072
+
1073
+ def _testCheckSums(tableDirectory):
1074
+ """
1075
+ >>> data = "0" * 44
1076
+ >>> checkSum = calcTableChecksum("test", data)
1077
+ >>> test = [
1078
+ ... dict(data=data, checkSum=checkSum, tag="test")
1079
+ ... ]
1080
+ >>> bool(_testCheckSums(test))
1081
+ False
1082
+ >>> test = [
1083
+ ... dict(data=data, checkSum=checkSum+1, tag="test")
1084
+ ... ]
1085
+ >>> bool(_testCheckSums(test))
1086
+ True
1087
+ """
1088
+ errors = []
1089
+ for entry in tableDirectory:
1090
+ tag = entry["tag"]
1091
+ checkSum = entry["checkSum"]
1092
+ data = entry["data"]
1093
+ shouldBe = calcTableChecksum(tag, data)
1094
+ if checkSum != shouldBe:
1095
+ errors.append("Invalid checksum for the %s table." % tag)
1096
+ return errors
1097
+
1098
+ def _testHeadCheckSum(header, tableDirectory):
1099
+ """
1100
+ >>> header = dict(sfntVersion="OTTO")
1101
+ >>> tableDirectory = [
1102
+ ... dict(tag="head", offset=100, length=100, checkSum=123, data="00000000"+struct.pack(">L", 925903070)),
1103
+ ... dict(tag="aaab", offset=200, length=100, checkSum=456),
1104
+ ... dict(tag="aaac", offset=300, length=100, checkSum=789),
1105
+ ... ]
1106
+ >>> bool(_testHeadCheckSum(header, tableDirectory))
1107
+ """
1108
+ flavor = header["sfntVersion"]
1109
+ tables = {}
1110
+ for entry in tableDirectory:
1111
+ tables[entry["tag"]] = entry
1112
+ data = tables["head"]["data"][8:12]
1113
+ checkSumAdjustment = struct.unpack(">L", data)[0]
1114
+ shouldBe = calcHeadCheckSumAdjustment(flavor, tables)
1115
+ if checkSumAdjustment != shouldBe:
1116
+ return ["The head checkSumAdjustment value is incorrect."]
1117
+ return []
1118
+
1119
+ def _testPaddingValue(tableDirectory, data):
1120
+ """
1121
+ # before first table
1122
+ >>> testDirectory = [dict(tag="aaaa", offset=28, length=4)]
1123
+ >>> bool(_testPaddingValue(testDirectory, "\x01" * 32))
1124
+ False
1125
+ >>> testDirectory = [dict(tag="aaaa", offset=32, length=4)]
1126
+ >>> bool(_testPaddingValue(testDirectory, "\x01" * 36))
1127
+ True
1128
+
1129
+ # between tables
1130
+ >>> testDirectory = [dict(tag="aaaa", offset=44, length=4), dict(tag="bbbb", offset=48, length=4)]
1131
+ >>> bool(_testPaddingValue(testDirectory, "\x01" * 52))
1132
+ False
1133
+ >>> testDirectory = [dict(tag="aaaa", offset=44, length=4), dict(tag="bbbb", offset=52, length=4)]
1134
+ >>> bool(_testPaddingValue(testDirectory, "\x01" * 56))
1135
+ True
1136
+
1137
+ # after final table
1138
+ >>> testDirectory = [dict(tag="aaaa", offset=28, length=4)]
1139
+ >>> bool(_testPaddingValue(testDirectory, "\x01" * 32))
1140
+ False
1141
+ >>> testDirectory = [dict(tag="aaaa", offset=28, length=4)]
1142
+ >>> bool(_testPaddingValue(testDirectory, "\x01" * 36))
1143
+ True
1144
+ """
1145
+ errors = []
1146
+ # check between directory and first table
1147
+ # check between all tables
1148
+ entries = [(entry["offset"], entry) for entry in tableDirectory]
1149
+ prev = "table directory"
1150
+ prevEnd = sfntDirectorySize + (sfntDirectoryEntrySize * len(tableDirectory))
1151
+ for o, entry in sorted(entries):
1152
+ tag = entry["tag"]
1153
+ offset = entry["offset"]
1154
+ length = entry["length"]
1155
+ # slice the bytes between the previous and the current
1156
+ if offset > prevEnd:
1157
+ bytes = data[prevEnd:offset]
1158
+ # replace \0 with nothing
1159
+ bytes = bytes.replace("\0", "")
1160
+ if bytes:
1161
+ errors.append("Bytes between %s and %s are not null." % (prev, tag))
1162
+ # shift for teh next table
1163
+ prev = tag
1164
+ prevEnd = offset + length
1165
+ # check last table
1166
+ entry = sorted(entries)[-1][1]
1167
+ end = entry["offset"] + entry["length"]
1168
+ bytes = data[end:]
1169
+ bytes = bytes.replace("\0", "")
1170
+ if bytes:
1171
+ errors.append("Bytes after final table (%s) are not null." % entry["tag"])
1172
+ return errors
1173
+
1174
+ if __name__ == "__main__":
1175
+ import doctest
1176
+ doctest.testmod(verbose=False)