fonttools 4.60.2__cp311-cp311-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fontTools/__init__.py +8 -0
- fontTools/__main__.py +35 -0
- fontTools/afmLib.py +439 -0
- fontTools/agl.py +5233 -0
- fontTools/annotations.py +30 -0
- fontTools/cffLib/CFF2ToCFF.py +258 -0
- fontTools/cffLib/CFFToCFF2.py +305 -0
- fontTools/cffLib/__init__.py +3694 -0
- fontTools/cffLib/specializer.py +927 -0
- fontTools/cffLib/transforms.py +495 -0
- fontTools/cffLib/width.py +210 -0
- fontTools/colorLib/__init__.py +0 -0
- fontTools/colorLib/builder.py +664 -0
- fontTools/colorLib/errors.py +2 -0
- fontTools/colorLib/geometry.py +143 -0
- fontTools/colorLib/table_builder.py +223 -0
- fontTools/colorLib/unbuilder.py +81 -0
- fontTools/config/__init__.py +90 -0
- fontTools/cu2qu/__init__.py +15 -0
- fontTools/cu2qu/__main__.py +6 -0
- fontTools/cu2qu/benchmark.py +54 -0
- fontTools/cu2qu/cli.py +198 -0
- fontTools/cu2qu/cu2qu.c +15817 -0
- fontTools/cu2qu/cu2qu.cp311-win32.pyd +0 -0
- fontTools/cu2qu/cu2qu.py +563 -0
- fontTools/cu2qu/errors.py +77 -0
- fontTools/cu2qu/ufo.py +363 -0
- fontTools/designspaceLib/__init__.py +3343 -0
- fontTools/designspaceLib/__main__.py +6 -0
- fontTools/designspaceLib/split.py +475 -0
- fontTools/designspaceLib/statNames.py +260 -0
- fontTools/designspaceLib/types.py +147 -0
- fontTools/encodings/MacRoman.py +258 -0
- fontTools/encodings/StandardEncoding.py +258 -0
- fontTools/encodings/__init__.py +1 -0
- fontTools/encodings/codecs.py +135 -0
- fontTools/feaLib/__init__.py +4 -0
- fontTools/feaLib/__main__.py +78 -0
- fontTools/feaLib/ast.py +2143 -0
- fontTools/feaLib/builder.py +1814 -0
- fontTools/feaLib/error.py +22 -0
- fontTools/feaLib/lexer.c +17029 -0
- fontTools/feaLib/lexer.cp311-win32.pyd +0 -0
- fontTools/feaLib/lexer.py +287 -0
- fontTools/feaLib/location.py +12 -0
- fontTools/feaLib/lookupDebugInfo.py +12 -0
- fontTools/feaLib/parser.py +2394 -0
- fontTools/feaLib/variableScalar.py +118 -0
- fontTools/fontBuilder.py +1014 -0
- fontTools/help.py +36 -0
- fontTools/merge/__init__.py +248 -0
- fontTools/merge/__main__.py +6 -0
- fontTools/merge/base.py +81 -0
- fontTools/merge/cmap.py +173 -0
- fontTools/merge/layout.py +526 -0
- fontTools/merge/options.py +85 -0
- fontTools/merge/tables.py +352 -0
- fontTools/merge/unicode.py +78 -0
- fontTools/merge/util.py +143 -0
- fontTools/misc/__init__.py +1 -0
- fontTools/misc/arrayTools.py +424 -0
- fontTools/misc/bezierTools.c +39731 -0
- fontTools/misc/bezierTools.cp311-win32.pyd +0 -0
- fontTools/misc/bezierTools.py +1500 -0
- fontTools/misc/classifyTools.py +170 -0
- fontTools/misc/cliTools.py +53 -0
- fontTools/misc/configTools.py +349 -0
- fontTools/misc/cython.py +27 -0
- fontTools/misc/dictTools.py +83 -0
- fontTools/misc/eexec.py +119 -0
- fontTools/misc/encodingTools.py +72 -0
- fontTools/misc/enumTools.py +23 -0
- fontTools/misc/etree.py +456 -0
- fontTools/misc/filenames.py +245 -0
- fontTools/misc/filesystem/__init__.py +68 -0
- fontTools/misc/filesystem/_base.py +134 -0
- fontTools/misc/filesystem/_copy.py +45 -0
- fontTools/misc/filesystem/_errors.py +54 -0
- fontTools/misc/filesystem/_info.py +75 -0
- fontTools/misc/filesystem/_osfs.py +164 -0
- fontTools/misc/filesystem/_path.py +67 -0
- fontTools/misc/filesystem/_subfs.py +92 -0
- fontTools/misc/filesystem/_tempfs.py +34 -0
- fontTools/misc/filesystem/_tools.py +34 -0
- fontTools/misc/filesystem/_walk.py +55 -0
- fontTools/misc/filesystem/_zipfs.py +204 -0
- fontTools/misc/fixedTools.py +253 -0
- fontTools/misc/intTools.py +25 -0
- fontTools/misc/iterTools.py +12 -0
- fontTools/misc/lazyTools.py +42 -0
- fontTools/misc/loggingTools.py +543 -0
- fontTools/misc/macCreatorType.py +56 -0
- fontTools/misc/macRes.py +261 -0
- fontTools/misc/plistlib/__init__.py +681 -0
- fontTools/misc/plistlib/py.typed +0 -0
- fontTools/misc/psCharStrings.py +1511 -0
- fontTools/misc/psLib.py +398 -0
- fontTools/misc/psOperators.py +572 -0
- fontTools/misc/py23.py +96 -0
- fontTools/misc/roundTools.py +110 -0
- fontTools/misc/sstruct.py +227 -0
- fontTools/misc/symfont.py +242 -0
- fontTools/misc/testTools.py +233 -0
- fontTools/misc/textTools.py +156 -0
- fontTools/misc/timeTools.py +88 -0
- fontTools/misc/transform.py +516 -0
- fontTools/misc/treeTools.py +45 -0
- fontTools/misc/vector.py +147 -0
- fontTools/misc/visitor.py +158 -0
- fontTools/misc/xmlReader.py +188 -0
- fontTools/misc/xmlWriter.py +231 -0
- fontTools/mtiLib/__init__.py +1400 -0
- fontTools/mtiLib/__main__.py +5 -0
- fontTools/otlLib/__init__.py +1 -0
- fontTools/otlLib/builder.py +3465 -0
- fontTools/otlLib/error.py +11 -0
- fontTools/otlLib/maxContextCalc.py +96 -0
- fontTools/otlLib/optimize/__init__.py +53 -0
- fontTools/otlLib/optimize/__main__.py +6 -0
- fontTools/otlLib/optimize/gpos.py +439 -0
- fontTools/pens/__init__.py +1 -0
- fontTools/pens/areaPen.py +52 -0
- fontTools/pens/basePen.py +475 -0
- fontTools/pens/boundsPen.py +98 -0
- fontTools/pens/cairoPen.py +26 -0
- fontTools/pens/cocoaPen.py +26 -0
- fontTools/pens/cu2quPen.py +325 -0
- fontTools/pens/explicitClosingLinePen.py +101 -0
- fontTools/pens/filterPen.py +433 -0
- fontTools/pens/freetypePen.py +462 -0
- fontTools/pens/hashPointPen.py +89 -0
- fontTools/pens/momentsPen.c +13378 -0
- fontTools/pens/momentsPen.cp311-win32.pyd +0 -0
- fontTools/pens/momentsPen.py +879 -0
- fontTools/pens/perimeterPen.py +69 -0
- fontTools/pens/pointInsidePen.py +192 -0
- fontTools/pens/pointPen.py +643 -0
- fontTools/pens/qtPen.py +29 -0
- fontTools/pens/qu2cuPen.py +105 -0
- fontTools/pens/quartzPen.py +43 -0
- fontTools/pens/recordingPen.py +335 -0
- fontTools/pens/reportLabPen.py +79 -0
- fontTools/pens/reverseContourPen.py +96 -0
- fontTools/pens/roundingPen.py +130 -0
- fontTools/pens/statisticsPen.py +312 -0
- fontTools/pens/svgPathPen.py +310 -0
- fontTools/pens/t2CharStringPen.py +88 -0
- fontTools/pens/teePen.py +55 -0
- fontTools/pens/transformPen.py +115 -0
- fontTools/pens/ttGlyphPen.py +335 -0
- fontTools/pens/wxPen.py +29 -0
- fontTools/qu2cu/__init__.py +15 -0
- fontTools/qu2cu/__main__.py +7 -0
- fontTools/qu2cu/benchmark.py +56 -0
- fontTools/qu2cu/cli.py +125 -0
- fontTools/qu2cu/qu2cu.c +16682 -0
- fontTools/qu2cu/qu2cu.cp311-win32.pyd +0 -0
- fontTools/qu2cu/qu2cu.py +405 -0
- fontTools/subset/__init__.py +4096 -0
- fontTools/subset/__main__.py +6 -0
- fontTools/subset/cff.py +184 -0
- fontTools/subset/svg.py +253 -0
- fontTools/subset/util.py +25 -0
- fontTools/svgLib/__init__.py +3 -0
- fontTools/svgLib/path/__init__.py +65 -0
- fontTools/svgLib/path/arc.py +154 -0
- fontTools/svgLib/path/parser.py +322 -0
- fontTools/svgLib/path/shapes.py +183 -0
- fontTools/t1Lib/__init__.py +648 -0
- fontTools/tfmLib.py +460 -0
- fontTools/ttLib/__init__.py +30 -0
- fontTools/ttLib/__main__.py +148 -0
- fontTools/ttLib/macUtils.py +54 -0
- fontTools/ttLib/removeOverlaps.py +395 -0
- fontTools/ttLib/reorderGlyphs.py +285 -0
- fontTools/ttLib/scaleUpem.py +436 -0
- fontTools/ttLib/sfnt.py +661 -0
- fontTools/ttLib/standardGlyphOrder.py +271 -0
- fontTools/ttLib/tables/B_A_S_E_.py +14 -0
- fontTools/ttLib/tables/BitmapGlyphMetrics.py +64 -0
- fontTools/ttLib/tables/C_B_D_T_.py +113 -0
- fontTools/ttLib/tables/C_B_L_C_.py +19 -0
- fontTools/ttLib/tables/C_F_F_.py +61 -0
- fontTools/ttLib/tables/C_F_F__2.py +26 -0
- fontTools/ttLib/tables/C_O_L_R_.py +165 -0
- fontTools/ttLib/tables/C_P_A_L_.py +305 -0
- fontTools/ttLib/tables/D_S_I_G_.py +158 -0
- fontTools/ttLib/tables/D__e_b_g.py +35 -0
- fontTools/ttLib/tables/DefaultTable.py +49 -0
- fontTools/ttLib/tables/E_B_D_T_.py +835 -0
- fontTools/ttLib/tables/E_B_L_C_.py +718 -0
- fontTools/ttLib/tables/F_F_T_M_.py +52 -0
- fontTools/ttLib/tables/F__e_a_t.py +149 -0
- fontTools/ttLib/tables/G_D_E_F_.py +13 -0
- fontTools/ttLib/tables/G_M_A_P_.py +148 -0
- fontTools/ttLib/tables/G_P_K_G_.py +133 -0
- fontTools/ttLib/tables/G_P_O_S_.py +14 -0
- fontTools/ttLib/tables/G_S_U_B_.py +13 -0
- fontTools/ttLib/tables/G_V_A_R_.py +5 -0
- fontTools/ttLib/tables/G__l_a_t.py +235 -0
- fontTools/ttLib/tables/G__l_o_c.py +85 -0
- fontTools/ttLib/tables/H_V_A_R_.py +13 -0
- fontTools/ttLib/tables/J_S_T_F_.py +13 -0
- fontTools/ttLib/tables/L_T_S_H_.py +58 -0
- fontTools/ttLib/tables/M_A_T_H_.py +13 -0
- fontTools/ttLib/tables/M_E_T_A_.py +352 -0
- fontTools/ttLib/tables/M_V_A_R_.py +13 -0
- fontTools/ttLib/tables/O_S_2f_2.py +752 -0
- fontTools/ttLib/tables/S_I_N_G_.py +99 -0
- fontTools/ttLib/tables/S_T_A_T_.py +15 -0
- fontTools/ttLib/tables/S_V_G_.py +223 -0
- fontTools/ttLib/tables/S__i_l_f.py +1040 -0
- fontTools/ttLib/tables/S__i_l_l.py +92 -0
- fontTools/ttLib/tables/T_S_I_B_.py +13 -0
- fontTools/ttLib/tables/T_S_I_C_.py +14 -0
- fontTools/ttLib/tables/T_S_I_D_.py +13 -0
- fontTools/ttLib/tables/T_S_I_J_.py +13 -0
- fontTools/ttLib/tables/T_S_I_P_.py +13 -0
- fontTools/ttLib/tables/T_S_I_S_.py +13 -0
- fontTools/ttLib/tables/T_S_I_V_.py +26 -0
- fontTools/ttLib/tables/T_S_I__0.py +70 -0
- fontTools/ttLib/tables/T_S_I__1.py +163 -0
- fontTools/ttLib/tables/T_S_I__2.py +17 -0
- fontTools/ttLib/tables/T_S_I__3.py +22 -0
- fontTools/ttLib/tables/T_S_I__5.py +60 -0
- fontTools/ttLib/tables/T_T_F_A_.py +14 -0
- fontTools/ttLib/tables/TupleVariation.py +884 -0
- fontTools/ttLib/tables/V_A_R_C_.py +12 -0
- fontTools/ttLib/tables/V_D_M_X_.py +249 -0
- fontTools/ttLib/tables/V_O_R_G_.py +165 -0
- fontTools/ttLib/tables/V_V_A_R_.py +13 -0
- fontTools/ttLib/tables/__init__.py +98 -0
- fontTools/ttLib/tables/_a_n_k_r.py +15 -0
- fontTools/ttLib/tables/_a_v_a_r.py +193 -0
- fontTools/ttLib/tables/_b_s_l_n.py +15 -0
- fontTools/ttLib/tables/_c_i_d_g.py +24 -0
- fontTools/ttLib/tables/_c_m_a_p.py +1591 -0
- fontTools/ttLib/tables/_c_v_a_r.py +94 -0
- fontTools/ttLib/tables/_c_v_t.py +56 -0
- fontTools/ttLib/tables/_f_e_a_t.py +15 -0
- fontTools/ttLib/tables/_f_p_g_m.py +62 -0
- fontTools/ttLib/tables/_f_v_a_r.py +261 -0
- fontTools/ttLib/tables/_g_a_s_p.py +63 -0
- fontTools/ttLib/tables/_g_c_i_d.py +13 -0
- fontTools/ttLib/tables/_g_l_y_f.py +2311 -0
- fontTools/ttLib/tables/_g_v_a_r.py +340 -0
- fontTools/ttLib/tables/_h_d_m_x.py +127 -0
- fontTools/ttLib/tables/_h_e_a_d.py +130 -0
- fontTools/ttLib/tables/_h_h_e_a.py +147 -0
- fontTools/ttLib/tables/_h_m_t_x.py +164 -0
- fontTools/ttLib/tables/_k_e_r_n.py +289 -0
- fontTools/ttLib/tables/_l_c_a_r.py +13 -0
- fontTools/ttLib/tables/_l_o_c_a.py +70 -0
- fontTools/ttLib/tables/_l_t_a_g.py +72 -0
- fontTools/ttLib/tables/_m_a_x_p.py +147 -0
- fontTools/ttLib/tables/_m_e_t_a.py +112 -0
- fontTools/ttLib/tables/_m_o_r_t.py +14 -0
- fontTools/ttLib/tables/_m_o_r_x.py +15 -0
- fontTools/ttLib/tables/_n_a_m_e.py +1242 -0
- fontTools/ttLib/tables/_o_p_b_d.py +14 -0
- fontTools/ttLib/tables/_p_o_s_t.py +319 -0
- fontTools/ttLib/tables/_p_r_e_p.py +16 -0
- fontTools/ttLib/tables/_p_r_o_p.py +12 -0
- fontTools/ttLib/tables/_s_b_i_x.py +129 -0
- fontTools/ttLib/tables/_t_r_a_k.py +332 -0
- fontTools/ttLib/tables/_v_h_e_a.py +139 -0
- fontTools/ttLib/tables/_v_m_t_x.py +19 -0
- fontTools/ttLib/tables/asciiTable.py +20 -0
- fontTools/ttLib/tables/grUtils.py +92 -0
- fontTools/ttLib/tables/otBase.py +1458 -0
- fontTools/ttLib/tables/otConverters.py +2068 -0
- fontTools/ttLib/tables/otData.py +6400 -0
- fontTools/ttLib/tables/otTables.py +2703 -0
- fontTools/ttLib/tables/otTraverse.py +163 -0
- fontTools/ttLib/tables/sbixGlyph.py +149 -0
- fontTools/ttLib/tables/sbixStrike.py +177 -0
- fontTools/ttLib/tables/table_API_readme.txt +91 -0
- fontTools/ttLib/tables/ttProgram.py +594 -0
- fontTools/ttLib/ttCollection.py +125 -0
- fontTools/ttLib/ttFont.py +1148 -0
- fontTools/ttLib/ttGlyphSet.py +490 -0
- fontTools/ttLib/ttVisitor.py +32 -0
- fontTools/ttLib/woff2.py +1680 -0
- fontTools/ttx.py +479 -0
- fontTools/ufoLib/__init__.py +2575 -0
- fontTools/ufoLib/converters.py +407 -0
- fontTools/ufoLib/errors.py +30 -0
- fontTools/ufoLib/etree.py +6 -0
- fontTools/ufoLib/filenames.py +356 -0
- fontTools/ufoLib/glifLib.py +2120 -0
- fontTools/ufoLib/kerning.py +141 -0
- fontTools/ufoLib/plistlib.py +47 -0
- fontTools/ufoLib/pointPen.py +6 -0
- fontTools/ufoLib/utils.py +107 -0
- fontTools/ufoLib/validators.py +1208 -0
- fontTools/unicode.py +50 -0
- fontTools/unicodedata/Blocks.py +817 -0
- fontTools/unicodedata/Mirrored.py +446 -0
- fontTools/unicodedata/OTTags.py +50 -0
- fontTools/unicodedata/ScriptExtensions.py +832 -0
- fontTools/unicodedata/Scripts.py +3639 -0
- fontTools/unicodedata/__init__.py +306 -0
- fontTools/varLib/__init__.py +1600 -0
- fontTools/varLib/__main__.py +6 -0
- fontTools/varLib/avar/__init__.py +0 -0
- fontTools/varLib/avar/__main__.py +72 -0
- fontTools/varLib/avar/build.py +79 -0
- fontTools/varLib/avar/map.py +108 -0
- fontTools/varLib/avar/plan.py +1004 -0
- fontTools/varLib/avar/unbuild.py +271 -0
- fontTools/varLib/avarPlanner.py +8 -0
- fontTools/varLib/builder.py +215 -0
- fontTools/varLib/cff.py +631 -0
- fontTools/varLib/errors.py +219 -0
- fontTools/varLib/featureVars.py +703 -0
- fontTools/varLib/hvar.py +113 -0
- fontTools/varLib/instancer/__init__.py +2052 -0
- fontTools/varLib/instancer/__main__.py +5 -0
- fontTools/varLib/instancer/featureVars.py +190 -0
- fontTools/varLib/instancer/names.py +388 -0
- fontTools/varLib/instancer/solver.py +309 -0
- fontTools/varLib/interpolatable.py +1209 -0
- fontTools/varLib/interpolatableHelpers.py +399 -0
- fontTools/varLib/interpolatablePlot.py +1269 -0
- fontTools/varLib/interpolatableTestContourOrder.py +82 -0
- fontTools/varLib/interpolatableTestStartingPoint.py +107 -0
- fontTools/varLib/interpolate_layout.py +124 -0
- fontTools/varLib/iup.c +19815 -0
- fontTools/varLib/iup.cp311-win32.pyd +0 -0
- fontTools/varLib/iup.py +490 -0
- fontTools/varLib/merger.py +1717 -0
- fontTools/varLib/models.py +642 -0
- fontTools/varLib/multiVarStore.py +253 -0
- fontTools/varLib/mutator.py +529 -0
- fontTools/varLib/mvar.py +40 -0
- fontTools/varLib/plot.py +238 -0
- fontTools/varLib/stat.py +149 -0
- fontTools/varLib/varStore.py +739 -0
- fontTools/voltLib/__init__.py +5 -0
- fontTools/voltLib/__main__.py +206 -0
- fontTools/voltLib/ast.py +452 -0
- fontTools/voltLib/error.py +12 -0
- fontTools/voltLib/lexer.py +99 -0
- fontTools/voltLib/parser.py +664 -0
- fontTools/voltLib/voltToFea.py +911 -0
- fonttools-4.60.2.data/data/share/man/man1/ttx.1 +225 -0
- fonttools-4.60.2.dist-info/METADATA +2250 -0
- fonttools-4.60.2.dist-info/RECORD +353 -0
- fonttools-4.60.2.dist-info/WHEEL +5 -0
- fonttools-4.60.2.dist-info/entry_points.txt +5 -0
- fonttools-4.60.2.dist-info/licenses/LICENSE +21 -0
- fonttools-4.60.2.dist-info/licenses/LICENSE.external +388 -0
- fonttools-4.60.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2394 @@
|
|
|
1
|
+
from fontTools.feaLib.error import FeatureLibError
|
|
2
|
+
from fontTools.feaLib.lexer import Lexer, IncludingLexer, NonIncludingLexer
|
|
3
|
+
from fontTools.feaLib.variableScalar import VariableScalar
|
|
4
|
+
from fontTools.misc.encodingTools import getEncoding
|
|
5
|
+
from fontTools.misc.textTools import bytechr, tobytes, tostr
|
|
6
|
+
import fontTools.feaLib.ast as ast
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
import re
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
log = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class Parser(object):
|
|
16
|
+
"""Initializes a Parser object.
|
|
17
|
+
|
|
18
|
+
Example:
|
|
19
|
+
|
|
20
|
+
.. code:: python
|
|
21
|
+
|
|
22
|
+
from fontTools.feaLib.parser import Parser
|
|
23
|
+
parser = Parser(file, font.getReverseGlyphMap())
|
|
24
|
+
parsetree = parser.parse()
|
|
25
|
+
|
|
26
|
+
Note: the ``glyphNames`` iterable serves a double role to help distinguish
|
|
27
|
+
glyph names from ranges in the presence of hyphens and to ensure that glyph
|
|
28
|
+
names referenced in a feature file are actually part of a font's glyph set.
|
|
29
|
+
If the iterable is left empty, no glyph name in glyph set checking takes
|
|
30
|
+
place, and all glyph tokens containing hyphens are treated as literal glyph
|
|
31
|
+
names, not as ranges. (Adding a space around the hyphen can, in any case,
|
|
32
|
+
help to disambiguate ranges from glyph names containing hyphens.)
|
|
33
|
+
|
|
34
|
+
By default, the parser will follow ``include()`` statements in the feature
|
|
35
|
+
file. To turn this off, pass ``followIncludes=False``. Pass a directory string as
|
|
36
|
+
``includeDir`` to explicitly declare a directory to search included feature files
|
|
37
|
+
in.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
extensions = {}
|
|
41
|
+
ast = ast
|
|
42
|
+
SS_FEATURE_TAGS = {"ss%02d" % i for i in range(1, 20 + 1)}
|
|
43
|
+
CV_FEATURE_TAGS = {"cv%02d" % i for i in range(1, 99 + 1)}
|
|
44
|
+
|
|
45
|
+
def __init__(
|
|
46
|
+
self, featurefile, glyphNames=(), followIncludes=True, includeDir=None, **kwargs
|
|
47
|
+
):
|
|
48
|
+
if "glyphMap" in kwargs:
|
|
49
|
+
from fontTools.misc.loggingTools import deprecateArgument
|
|
50
|
+
|
|
51
|
+
deprecateArgument("glyphMap", "use 'glyphNames' (iterable) instead")
|
|
52
|
+
if glyphNames:
|
|
53
|
+
raise TypeError(
|
|
54
|
+
"'glyphNames' and (deprecated) 'glyphMap' are " "mutually exclusive"
|
|
55
|
+
)
|
|
56
|
+
glyphNames = kwargs.pop("glyphMap")
|
|
57
|
+
if kwargs:
|
|
58
|
+
raise TypeError(
|
|
59
|
+
"unsupported keyword argument%s: %s"
|
|
60
|
+
% ("" if len(kwargs) == 1 else "s", ", ".join(repr(k) for k in kwargs))
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
self.glyphNames_ = set(glyphNames)
|
|
64
|
+
self.doc_ = self.ast.FeatureFile()
|
|
65
|
+
self.anchors_ = SymbolTable()
|
|
66
|
+
self.glyphclasses_ = SymbolTable()
|
|
67
|
+
self.lookups_ = SymbolTable()
|
|
68
|
+
self.valuerecords_ = SymbolTable()
|
|
69
|
+
self.symbol_tables_ = {self.anchors_, self.valuerecords_}
|
|
70
|
+
self.next_token_type_, self.next_token_ = (None, None)
|
|
71
|
+
self.cur_comments_ = []
|
|
72
|
+
self.next_token_location_ = None
|
|
73
|
+
lexerClass = IncludingLexer if followIncludes else NonIncludingLexer
|
|
74
|
+
self.lexer_ = lexerClass(featurefile, includeDir=includeDir)
|
|
75
|
+
self.missing = {}
|
|
76
|
+
self.advance_lexer_(comments=True)
|
|
77
|
+
|
|
78
|
+
def parse(self):
|
|
79
|
+
"""Parse the file, and return a :class:`fontTools.feaLib.ast.FeatureFile`
|
|
80
|
+
object representing the root of the abstract syntax tree containing the
|
|
81
|
+
parsed contents of the file."""
|
|
82
|
+
statements = self.doc_.statements
|
|
83
|
+
while self.next_token_type_ is not None or self.cur_comments_:
|
|
84
|
+
self.advance_lexer_(comments=True)
|
|
85
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
86
|
+
statements.append(
|
|
87
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
88
|
+
)
|
|
89
|
+
elif self.is_cur_keyword_("include"):
|
|
90
|
+
statements.append(self.parse_include_())
|
|
91
|
+
elif self.cur_token_type_ is Lexer.GLYPHCLASS:
|
|
92
|
+
statements.append(self.parse_glyphclass_definition_())
|
|
93
|
+
elif self.is_cur_keyword_(("anon", "anonymous")):
|
|
94
|
+
statements.append(self.parse_anonymous_())
|
|
95
|
+
elif self.is_cur_keyword_("anchorDef"):
|
|
96
|
+
statements.append(self.parse_anchordef_())
|
|
97
|
+
elif self.is_cur_keyword_("languagesystem"):
|
|
98
|
+
statements.append(self.parse_languagesystem_())
|
|
99
|
+
elif self.is_cur_keyword_("lookup"):
|
|
100
|
+
statements.append(self.parse_lookup_(vertical=False))
|
|
101
|
+
elif self.is_cur_keyword_("markClass"):
|
|
102
|
+
statements.append(self.parse_markClass_())
|
|
103
|
+
elif self.is_cur_keyword_("feature"):
|
|
104
|
+
statements.append(self.parse_feature_block_())
|
|
105
|
+
elif self.is_cur_keyword_("conditionset"):
|
|
106
|
+
statements.append(self.parse_conditionset_())
|
|
107
|
+
elif self.is_cur_keyword_("variation"):
|
|
108
|
+
statements.append(self.parse_feature_block_(variation=True))
|
|
109
|
+
elif self.is_cur_keyword_("table"):
|
|
110
|
+
statements.append(self.parse_table_())
|
|
111
|
+
elif self.is_cur_keyword_("valueRecordDef"):
|
|
112
|
+
statements.append(self.parse_valuerecord_definition_(vertical=False))
|
|
113
|
+
elif (
|
|
114
|
+
self.cur_token_type_ is Lexer.NAME
|
|
115
|
+
and self.cur_token_ in self.extensions
|
|
116
|
+
):
|
|
117
|
+
statements.append(self.extensions[self.cur_token_](self))
|
|
118
|
+
elif self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ";":
|
|
119
|
+
continue
|
|
120
|
+
else:
|
|
121
|
+
raise FeatureLibError(
|
|
122
|
+
"Expected feature, languagesystem, lookup, markClass, "
|
|
123
|
+
'table, or glyph class definition, got {} "{}"'.format(
|
|
124
|
+
self.cur_token_type_, self.cur_token_
|
|
125
|
+
),
|
|
126
|
+
self.cur_token_location_,
|
|
127
|
+
)
|
|
128
|
+
# Report any missing glyphs at the end of parsing
|
|
129
|
+
if self.missing:
|
|
130
|
+
error = [
|
|
131
|
+
" %s (first found at %s)" % (name, loc)
|
|
132
|
+
for name, loc in self.missing.items()
|
|
133
|
+
]
|
|
134
|
+
raise FeatureLibError(
|
|
135
|
+
"The following glyph names are referenced but are missing from the "
|
|
136
|
+
"glyph set:\n" + ("\n".join(error)),
|
|
137
|
+
None,
|
|
138
|
+
)
|
|
139
|
+
return self.doc_
|
|
140
|
+
|
|
141
|
+
def parse_anchor_(self):
|
|
142
|
+
# Parses an anchor in any of the four formats given in the feature
|
|
143
|
+
# file specification (2.e.vii).
|
|
144
|
+
self.expect_symbol_("<")
|
|
145
|
+
self.expect_keyword_("anchor")
|
|
146
|
+
location = self.cur_token_location_
|
|
147
|
+
|
|
148
|
+
if self.next_token_ == "NULL": # Format D
|
|
149
|
+
self.expect_keyword_("NULL")
|
|
150
|
+
self.expect_symbol_(">")
|
|
151
|
+
return None
|
|
152
|
+
|
|
153
|
+
if self.next_token_type_ == Lexer.NAME: # Format E
|
|
154
|
+
name = self.expect_name_()
|
|
155
|
+
anchordef = self.anchors_.resolve(name)
|
|
156
|
+
if anchordef is None:
|
|
157
|
+
raise FeatureLibError(
|
|
158
|
+
'Unknown anchor "%s"' % name, self.cur_token_location_
|
|
159
|
+
)
|
|
160
|
+
self.expect_symbol_(">")
|
|
161
|
+
return self.ast.Anchor(
|
|
162
|
+
anchordef.x,
|
|
163
|
+
anchordef.y,
|
|
164
|
+
name=name,
|
|
165
|
+
contourpoint=anchordef.contourpoint,
|
|
166
|
+
xDeviceTable=None,
|
|
167
|
+
yDeviceTable=None,
|
|
168
|
+
location=location,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
x, y = self.expect_number_(variable=True), self.expect_number_(variable=True)
|
|
172
|
+
|
|
173
|
+
contourpoint = None
|
|
174
|
+
if self.next_token_ == "contourpoint": # Format B
|
|
175
|
+
self.expect_keyword_("contourpoint")
|
|
176
|
+
contourpoint = self.expect_number_()
|
|
177
|
+
|
|
178
|
+
if self.next_token_ == "<": # Format C
|
|
179
|
+
xDeviceTable = self.parse_device_()
|
|
180
|
+
yDeviceTable = self.parse_device_()
|
|
181
|
+
else:
|
|
182
|
+
xDeviceTable, yDeviceTable = None, None
|
|
183
|
+
|
|
184
|
+
self.expect_symbol_(">")
|
|
185
|
+
return self.ast.Anchor(
|
|
186
|
+
x,
|
|
187
|
+
y,
|
|
188
|
+
name=None,
|
|
189
|
+
contourpoint=contourpoint,
|
|
190
|
+
xDeviceTable=xDeviceTable,
|
|
191
|
+
yDeviceTable=yDeviceTable,
|
|
192
|
+
location=location,
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
def parse_anchor_marks_(self):
|
|
196
|
+
# Parses a sequence of ``[<anchor> mark @MARKCLASS]*.``
|
|
197
|
+
anchorMarks = [] # [(self.ast.Anchor, markClassName)*]
|
|
198
|
+
while self.next_token_ == "<":
|
|
199
|
+
anchor = self.parse_anchor_()
|
|
200
|
+
if anchor is None and self.next_token_ != "mark":
|
|
201
|
+
continue # <anchor NULL> without mark, eg. in GPOS type 5
|
|
202
|
+
self.expect_keyword_("mark")
|
|
203
|
+
markClass = self.expect_markClass_reference_()
|
|
204
|
+
anchorMarks.append((anchor, markClass))
|
|
205
|
+
return anchorMarks
|
|
206
|
+
|
|
207
|
+
def parse_anchordef_(self):
|
|
208
|
+
# Parses a named anchor definition (`section 2.e.viii <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#2.e.vii>`_).
|
|
209
|
+
assert self.is_cur_keyword_("anchorDef")
|
|
210
|
+
location = self.cur_token_location_
|
|
211
|
+
x, y = self.expect_number_(), self.expect_number_()
|
|
212
|
+
contourpoint = None
|
|
213
|
+
if self.next_token_ == "contourpoint":
|
|
214
|
+
self.expect_keyword_("contourpoint")
|
|
215
|
+
contourpoint = self.expect_number_()
|
|
216
|
+
name = self.expect_name_()
|
|
217
|
+
self.expect_symbol_(";")
|
|
218
|
+
anchordef = self.ast.AnchorDefinition(
|
|
219
|
+
name, x, y, contourpoint=contourpoint, location=location
|
|
220
|
+
)
|
|
221
|
+
self.anchors_.define(name, anchordef)
|
|
222
|
+
return anchordef
|
|
223
|
+
|
|
224
|
+
def parse_anonymous_(self):
|
|
225
|
+
# Parses an anonymous data block (`section 10 <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#10>`_).
|
|
226
|
+
assert self.is_cur_keyword_(("anon", "anonymous"))
|
|
227
|
+
tag = self.expect_tag_()
|
|
228
|
+
_, content, location = self.lexer_.scan_anonymous_block(tag)
|
|
229
|
+
self.advance_lexer_()
|
|
230
|
+
self.expect_symbol_("}")
|
|
231
|
+
end_tag = self.expect_tag_()
|
|
232
|
+
assert tag == end_tag, "bad splitting in Lexer.scan_anonymous_block()"
|
|
233
|
+
self.expect_symbol_(";")
|
|
234
|
+
return self.ast.AnonymousBlock(tag, content, location=location)
|
|
235
|
+
|
|
236
|
+
def parse_attach_(self):
|
|
237
|
+
# Parses a GDEF Attach statement (`section 9.b <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.b>`_)
|
|
238
|
+
assert self.is_cur_keyword_("Attach")
|
|
239
|
+
location = self.cur_token_location_
|
|
240
|
+
glyphs = self.parse_glyphclass_(accept_glyphname=True)
|
|
241
|
+
contourPoints = {self.expect_number_()}
|
|
242
|
+
while self.next_token_ != ";":
|
|
243
|
+
contourPoints.add(self.expect_number_())
|
|
244
|
+
self.expect_symbol_(";")
|
|
245
|
+
return self.ast.AttachStatement(glyphs, contourPoints, location=location)
|
|
246
|
+
|
|
247
|
+
def parse_enumerate_(self, vertical):
|
|
248
|
+
# Parse an enumerated pair positioning rule (`section 6.b.ii <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#6.b.ii>`_).
|
|
249
|
+
assert self.cur_token_ in {"enumerate", "enum"}
|
|
250
|
+
self.advance_lexer_()
|
|
251
|
+
return self.parse_position_(enumerated=True, vertical=vertical)
|
|
252
|
+
|
|
253
|
+
def parse_GlyphClassDef_(self):
|
|
254
|
+
# Parses 'GlyphClassDef @BASE, @LIGATURES, @MARKS, @COMPONENTS;'
|
|
255
|
+
assert self.is_cur_keyword_("GlyphClassDef")
|
|
256
|
+
location = self.cur_token_location_
|
|
257
|
+
if self.next_token_ != ",":
|
|
258
|
+
baseGlyphs = self.parse_glyphclass_(accept_glyphname=False)
|
|
259
|
+
else:
|
|
260
|
+
baseGlyphs = None
|
|
261
|
+
self.expect_symbol_(",")
|
|
262
|
+
if self.next_token_ != ",":
|
|
263
|
+
ligatureGlyphs = self.parse_glyphclass_(accept_glyphname=False)
|
|
264
|
+
else:
|
|
265
|
+
ligatureGlyphs = None
|
|
266
|
+
self.expect_symbol_(",")
|
|
267
|
+
if self.next_token_ != ",":
|
|
268
|
+
markGlyphs = self.parse_glyphclass_(accept_glyphname=False)
|
|
269
|
+
else:
|
|
270
|
+
markGlyphs = None
|
|
271
|
+
self.expect_symbol_(",")
|
|
272
|
+
if self.next_token_ != ";":
|
|
273
|
+
componentGlyphs = self.parse_glyphclass_(accept_glyphname=False)
|
|
274
|
+
else:
|
|
275
|
+
componentGlyphs = None
|
|
276
|
+
self.expect_symbol_(";")
|
|
277
|
+
return self.ast.GlyphClassDefStatement(
|
|
278
|
+
baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=location
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
def parse_glyphclass_definition_(self):
|
|
282
|
+
# Parses glyph class definitions such as '@UPPERCASE = [A-Z];'
|
|
283
|
+
location, name = self.cur_token_location_, self.cur_token_
|
|
284
|
+
self.expect_symbol_("=")
|
|
285
|
+
glyphs = self.parse_glyphclass_(accept_glyphname=False)
|
|
286
|
+
self.expect_symbol_(";")
|
|
287
|
+
glyphclass = self.ast.GlyphClassDefinition(name, glyphs, location=location)
|
|
288
|
+
self.glyphclasses_.define(name, glyphclass)
|
|
289
|
+
return glyphclass
|
|
290
|
+
|
|
291
|
+
def split_glyph_range_(self, name, location):
|
|
292
|
+
# Since v1.20, the OpenType Feature File specification allows
|
|
293
|
+
# for dashes in glyph names. A sequence like "a-b-c-d" could
|
|
294
|
+
# therefore mean a single glyph whose name happens to be
|
|
295
|
+
# "a-b-c-d", or it could mean a range from glyph "a" to glyph
|
|
296
|
+
# "b-c-d", or a range from glyph "a-b" to glyph "c-d", or a
|
|
297
|
+
# range from glyph "a-b-c" to glyph "d".Technically, this
|
|
298
|
+
# example could be resolved because the (pretty complex)
|
|
299
|
+
# definition of glyph ranges renders most of these splits
|
|
300
|
+
# invalid. But the specification does not say that a compiler
|
|
301
|
+
# should try to apply such fancy heuristics. To encourage
|
|
302
|
+
# unambiguous feature files, we therefore try all possible
|
|
303
|
+
# splits and reject the feature file if there are multiple
|
|
304
|
+
# splits possible. It is intentional that we don't just emit a
|
|
305
|
+
# warning; warnings tend to get ignored. To fix the problem,
|
|
306
|
+
# font designers can trivially add spaces around the intended
|
|
307
|
+
# split point, and we emit a compiler error that suggests
|
|
308
|
+
# how exactly the source should be rewritten to make things
|
|
309
|
+
# unambiguous.
|
|
310
|
+
parts = name.split("-")
|
|
311
|
+
solutions = []
|
|
312
|
+
for i in range(len(parts)):
|
|
313
|
+
start, limit = "-".join(parts[0:i]), "-".join(parts[i:])
|
|
314
|
+
if start in self.glyphNames_ and limit in self.glyphNames_:
|
|
315
|
+
solutions.append((start, limit))
|
|
316
|
+
if len(solutions) == 1:
|
|
317
|
+
start, limit = solutions[0]
|
|
318
|
+
return start, limit
|
|
319
|
+
elif len(solutions) == 0:
|
|
320
|
+
raise FeatureLibError(
|
|
321
|
+
'"%s" is not a glyph in the font, and it can not be split '
|
|
322
|
+
"into a range of known glyphs" % name,
|
|
323
|
+
location,
|
|
324
|
+
)
|
|
325
|
+
else:
|
|
326
|
+
ranges = " or ".join(['"%s - %s"' % (s, l) for s, l in solutions])
|
|
327
|
+
raise FeatureLibError(
|
|
328
|
+
'Ambiguous glyph range "%s"; '
|
|
329
|
+
"please use %s to clarify what you mean" % (name, ranges),
|
|
330
|
+
location,
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
def parse_glyphclass_(self, accept_glyphname, accept_null=False):
|
|
334
|
+
# Parses a glyph class, either named or anonymous, or (if
|
|
335
|
+
# ``bool(accept_glyphname)``) a glyph name. If ``bool(accept_null)`` then
|
|
336
|
+
# also accept the special NULL glyph.
|
|
337
|
+
if accept_glyphname and self.next_token_type_ in (Lexer.NAME, Lexer.CID):
|
|
338
|
+
if accept_null and self.next_token_ == "NULL":
|
|
339
|
+
# If you want a glyph called NULL, you should escape it.
|
|
340
|
+
self.advance_lexer_()
|
|
341
|
+
return self.ast.NullGlyph(location=self.cur_token_location_)
|
|
342
|
+
glyph = self.expect_glyph_()
|
|
343
|
+
self.check_glyph_name_in_glyph_set(glyph)
|
|
344
|
+
return self.ast.GlyphName(glyph, location=self.cur_token_location_)
|
|
345
|
+
if self.next_token_type_ is Lexer.GLYPHCLASS:
|
|
346
|
+
self.advance_lexer_()
|
|
347
|
+
gc = self.glyphclasses_.resolve(self.cur_token_)
|
|
348
|
+
if gc is None:
|
|
349
|
+
raise FeatureLibError(
|
|
350
|
+
"Unknown glyph class @%s" % self.cur_token_,
|
|
351
|
+
self.cur_token_location_,
|
|
352
|
+
)
|
|
353
|
+
if isinstance(gc, self.ast.MarkClass):
|
|
354
|
+
return self.ast.MarkClassName(gc, location=self.cur_token_location_)
|
|
355
|
+
else:
|
|
356
|
+
return self.ast.GlyphClassName(gc, location=self.cur_token_location_)
|
|
357
|
+
|
|
358
|
+
self.expect_symbol_("[")
|
|
359
|
+
location = self.cur_token_location_
|
|
360
|
+
glyphs = self.ast.GlyphClass(location=location)
|
|
361
|
+
while self.next_token_ != "]":
|
|
362
|
+
if self.next_token_type_ is Lexer.NAME:
|
|
363
|
+
glyph = self.expect_glyph_()
|
|
364
|
+
location = self.cur_token_location_
|
|
365
|
+
if "-" in glyph and self.glyphNames_ and glyph not in self.glyphNames_:
|
|
366
|
+
start, limit = self.split_glyph_range_(glyph, location)
|
|
367
|
+
self.check_glyph_name_in_glyph_set(start, limit)
|
|
368
|
+
glyphs.add_range(
|
|
369
|
+
start, limit, self.make_glyph_range_(location, start, limit)
|
|
370
|
+
)
|
|
371
|
+
elif self.next_token_ == "-":
|
|
372
|
+
start = glyph
|
|
373
|
+
self.expect_symbol_("-")
|
|
374
|
+
limit = self.expect_glyph_()
|
|
375
|
+
self.check_glyph_name_in_glyph_set(start, limit)
|
|
376
|
+
glyphs.add_range(
|
|
377
|
+
start, limit, self.make_glyph_range_(location, start, limit)
|
|
378
|
+
)
|
|
379
|
+
else:
|
|
380
|
+
if "-" in glyph and not self.glyphNames_:
|
|
381
|
+
log.warning(
|
|
382
|
+
str(
|
|
383
|
+
FeatureLibError(
|
|
384
|
+
f"Ambiguous glyph name that looks like a range: {glyph!r}",
|
|
385
|
+
location,
|
|
386
|
+
)
|
|
387
|
+
)
|
|
388
|
+
)
|
|
389
|
+
self.check_glyph_name_in_glyph_set(glyph)
|
|
390
|
+
glyphs.append(glyph)
|
|
391
|
+
elif self.next_token_type_ is Lexer.CID:
|
|
392
|
+
glyph = self.expect_glyph_()
|
|
393
|
+
if self.next_token_ == "-":
|
|
394
|
+
range_location = self.cur_token_location_
|
|
395
|
+
range_start = self.cur_token_
|
|
396
|
+
self.expect_symbol_("-")
|
|
397
|
+
range_end = self.expect_cid_()
|
|
398
|
+
self.check_glyph_name_in_glyph_set(
|
|
399
|
+
f"cid{range_start:05d}",
|
|
400
|
+
f"cid{range_end:05d}",
|
|
401
|
+
)
|
|
402
|
+
glyphs.add_cid_range(
|
|
403
|
+
range_start,
|
|
404
|
+
range_end,
|
|
405
|
+
self.make_cid_range_(range_location, range_start, range_end),
|
|
406
|
+
)
|
|
407
|
+
else:
|
|
408
|
+
glyph_name = f"cid{self.cur_token_:05d}"
|
|
409
|
+
self.check_glyph_name_in_glyph_set(glyph_name)
|
|
410
|
+
glyphs.append(glyph_name)
|
|
411
|
+
elif self.next_token_type_ is Lexer.GLYPHCLASS:
|
|
412
|
+
self.advance_lexer_()
|
|
413
|
+
gc = self.glyphclasses_.resolve(self.cur_token_)
|
|
414
|
+
if gc is None:
|
|
415
|
+
raise FeatureLibError(
|
|
416
|
+
"Unknown glyph class @%s" % self.cur_token_,
|
|
417
|
+
self.cur_token_location_,
|
|
418
|
+
)
|
|
419
|
+
if isinstance(gc, self.ast.MarkClass):
|
|
420
|
+
gc = self.ast.MarkClassName(gc, location=self.cur_token_location_)
|
|
421
|
+
else:
|
|
422
|
+
gc = self.ast.GlyphClassName(gc, location=self.cur_token_location_)
|
|
423
|
+
glyphs.add_class(gc)
|
|
424
|
+
else:
|
|
425
|
+
raise FeatureLibError(
|
|
426
|
+
"Expected glyph name, glyph range, "
|
|
427
|
+
f"or glyph class reference, found {self.next_token_!r}",
|
|
428
|
+
self.next_token_location_,
|
|
429
|
+
)
|
|
430
|
+
self.expect_symbol_("]")
|
|
431
|
+
return glyphs
|
|
432
|
+
|
|
433
|
+
def parse_glyph_pattern_(self, vertical):
|
|
434
|
+
# Parses a glyph pattern, including lookups and context, e.g.::
|
|
435
|
+
#
|
|
436
|
+
# a b
|
|
437
|
+
# a b c' d e
|
|
438
|
+
# a b c' lookup ChangeC d e
|
|
439
|
+
prefix, glyphs, lookups, values, suffix = ([], [], [], [], [])
|
|
440
|
+
hasMarks = False
|
|
441
|
+
while self.next_token_ not in {"by", "from", ";", ","}:
|
|
442
|
+
gc = self.parse_glyphclass_(accept_glyphname=True)
|
|
443
|
+
marked = False
|
|
444
|
+
if self.next_token_ == "'":
|
|
445
|
+
self.expect_symbol_("'")
|
|
446
|
+
hasMarks = marked = True
|
|
447
|
+
if marked:
|
|
448
|
+
if suffix:
|
|
449
|
+
# makeotf also reports this as an error, while FontForge
|
|
450
|
+
# silently inserts ' in all the intervening glyphs.
|
|
451
|
+
# https://github.com/fonttools/fonttools/pull/1096
|
|
452
|
+
raise FeatureLibError(
|
|
453
|
+
"Unsupported contextual target sequence: at most "
|
|
454
|
+
"one run of marked (') glyph/class names allowed",
|
|
455
|
+
self.cur_token_location_,
|
|
456
|
+
)
|
|
457
|
+
glyphs.append(gc)
|
|
458
|
+
elif glyphs:
|
|
459
|
+
suffix.append(gc)
|
|
460
|
+
else:
|
|
461
|
+
prefix.append(gc)
|
|
462
|
+
|
|
463
|
+
if self.is_next_value_():
|
|
464
|
+
values.append(self.parse_valuerecord_(vertical))
|
|
465
|
+
else:
|
|
466
|
+
values.append(None)
|
|
467
|
+
|
|
468
|
+
lookuplist = None
|
|
469
|
+
while self.next_token_ == "lookup":
|
|
470
|
+
if lookuplist is None:
|
|
471
|
+
lookuplist = []
|
|
472
|
+
self.expect_keyword_("lookup")
|
|
473
|
+
if not marked:
|
|
474
|
+
raise FeatureLibError(
|
|
475
|
+
"Lookups can only follow marked glyphs",
|
|
476
|
+
self.cur_token_location_,
|
|
477
|
+
)
|
|
478
|
+
lookup_name = self.expect_name_()
|
|
479
|
+
lookup = self.lookups_.resolve(lookup_name)
|
|
480
|
+
if lookup is None:
|
|
481
|
+
raise FeatureLibError(
|
|
482
|
+
'Unknown lookup "%s"' % lookup_name, self.cur_token_location_
|
|
483
|
+
)
|
|
484
|
+
lookuplist.append(lookup)
|
|
485
|
+
if marked:
|
|
486
|
+
lookups.append(lookuplist)
|
|
487
|
+
|
|
488
|
+
if not glyphs and not suffix: # eg., "sub f f i by"
|
|
489
|
+
assert lookups == []
|
|
490
|
+
return ([], prefix, [None] * len(prefix), values, [], hasMarks)
|
|
491
|
+
else:
|
|
492
|
+
if any(values[: len(prefix)]):
|
|
493
|
+
raise FeatureLibError(
|
|
494
|
+
"Positioning cannot be applied in the bactrack glyph sequence, "
|
|
495
|
+
"before the marked glyph sequence.",
|
|
496
|
+
self.cur_token_location_,
|
|
497
|
+
)
|
|
498
|
+
marked_values = values[len(prefix) : len(prefix) + len(glyphs)]
|
|
499
|
+
if any(marked_values):
|
|
500
|
+
if any(values[len(prefix) + len(glyphs) :]):
|
|
501
|
+
raise FeatureLibError(
|
|
502
|
+
"Positioning values are allowed only in the marked glyph "
|
|
503
|
+
"sequence, or after the final glyph node when only one glyph "
|
|
504
|
+
"node is marked.",
|
|
505
|
+
self.cur_token_location_,
|
|
506
|
+
)
|
|
507
|
+
values = marked_values
|
|
508
|
+
elif values and values[-1]:
|
|
509
|
+
if len(glyphs) > 1 or any(values[:-1]):
|
|
510
|
+
raise FeatureLibError(
|
|
511
|
+
"Positioning values are allowed only in the marked glyph "
|
|
512
|
+
"sequence, or after the final glyph node when only one glyph "
|
|
513
|
+
"node is marked.",
|
|
514
|
+
self.cur_token_location_,
|
|
515
|
+
)
|
|
516
|
+
values = values[-1:]
|
|
517
|
+
elif any(values):
|
|
518
|
+
raise FeatureLibError(
|
|
519
|
+
"Positioning values are allowed only in the marked glyph "
|
|
520
|
+
"sequence, or after the final glyph node when only one glyph "
|
|
521
|
+
"node is marked.",
|
|
522
|
+
self.cur_token_location_,
|
|
523
|
+
)
|
|
524
|
+
return (prefix, glyphs, lookups, values, suffix, hasMarks)
|
|
525
|
+
|
|
526
|
+
def parse_ignore_glyph_pattern_(self, sub):
|
|
527
|
+
location = self.cur_token_location_
|
|
528
|
+
prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_(
|
|
529
|
+
vertical=False
|
|
530
|
+
)
|
|
531
|
+
if any(lookups):
|
|
532
|
+
raise FeatureLibError(
|
|
533
|
+
f'No lookups can be specified for "ignore {sub}"', location
|
|
534
|
+
)
|
|
535
|
+
if not hasMarks:
|
|
536
|
+
error = FeatureLibError(
|
|
537
|
+
f'Ambiguous "ignore {sub}", there should be least one marked glyph',
|
|
538
|
+
location,
|
|
539
|
+
)
|
|
540
|
+
log.warning(str(error))
|
|
541
|
+
suffix, glyphs = glyphs[1:], glyphs[0:1]
|
|
542
|
+
chainContext = (prefix, glyphs, suffix)
|
|
543
|
+
return chainContext
|
|
544
|
+
|
|
545
|
+
def parse_ignore_context_(self, sub):
|
|
546
|
+
location = self.cur_token_location_
|
|
547
|
+
chainContext = [self.parse_ignore_glyph_pattern_(sub)]
|
|
548
|
+
while self.next_token_ == ",":
|
|
549
|
+
self.expect_symbol_(",")
|
|
550
|
+
chainContext.append(self.parse_ignore_glyph_pattern_(sub))
|
|
551
|
+
self.expect_symbol_(";")
|
|
552
|
+
return chainContext
|
|
553
|
+
|
|
554
|
+
def parse_ignore_(self):
|
|
555
|
+
# Parses an ignore sub/pos rule.
|
|
556
|
+
assert self.is_cur_keyword_("ignore")
|
|
557
|
+
location = self.cur_token_location_
|
|
558
|
+
self.advance_lexer_()
|
|
559
|
+
if self.cur_token_ in ["substitute", "sub"]:
|
|
560
|
+
chainContext = self.parse_ignore_context_("sub")
|
|
561
|
+
return self.ast.IgnoreSubstStatement(chainContext, location=location)
|
|
562
|
+
if self.cur_token_ in ["position", "pos"]:
|
|
563
|
+
chainContext = self.parse_ignore_context_("pos")
|
|
564
|
+
return self.ast.IgnorePosStatement(chainContext, location=location)
|
|
565
|
+
raise FeatureLibError(
|
|
566
|
+
'Expected "substitute" or "position"', self.cur_token_location_
|
|
567
|
+
)
|
|
568
|
+
|
|
569
|
+
def parse_include_(self):
|
|
570
|
+
assert self.cur_token_ == "include"
|
|
571
|
+
location = self.cur_token_location_
|
|
572
|
+
filename = self.expect_filename_()
|
|
573
|
+
# self.expect_symbol_(";")
|
|
574
|
+
return ast.IncludeStatement(filename, location=location)
|
|
575
|
+
|
|
576
|
+
def parse_language_(self):
|
|
577
|
+
assert self.is_cur_keyword_("language")
|
|
578
|
+
location = self.cur_token_location_
|
|
579
|
+
language = self.expect_language_tag_()
|
|
580
|
+
include_default, required = (True, False)
|
|
581
|
+
if self.next_token_ in {"exclude_dflt", "include_dflt"}:
|
|
582
|
+
include_default = self.expect_name_() == "include_dflt"
|
|
583
|
+
if self.next_token_ == "required":
|
|
584
|
+
self.expect_keyword_("required")
|
|
585
|
+
required = True
|
|
586
|
+
self.expect_symbol_(";")
|
|
587
|
+
return self.ast.LanguageStatement(
|
|
588
|
+
language, include_default, required, location=location
|
|
589
|
+
)
|
|
590
|
+
|
|
591
|
+
def parse_ligatureCaretByIndex_(self):
|
|
592
|
+
assert self.is_cur_keyword_("LigatureCaretByIndex")
|
|
593
|
+
location = self.cur_token_location_
|
|
594
|
+
glyphs = self.parse_glyphclass_(accept_glyphname=True)
|
|
595
|
+
carets = [self.expect_number_()]
|
|
596
|
+
while self.next_token_ != ";":
|
|
597
|
+
carets.append(self.expect_number_())
|
|
598
|
+
self.expect_symbol_(";")
|
|
599
|
+
return self.ast.LigatureCaretByIndexStatement(glyphs, carets, location=location)
|
|
600
|
+
|
|
601
|
+
def parse_ligatureCaretByPos_(self):
|
|
602
|
+
assert self.is_cur_keyword_("LigatureCaretByPos")
|
|
603
|
+
location = self.cur_token_location_
|
|
604
|
+
glyphs = self.parse_glyphclass_(accept_glyphname=True)
|
|
605
|
+
carets = [self.expect_number_(variable=True)]
|
|
606
|
+
while self.next_token_ != ";":
|
|
607
|
+
carets.append(self.expect_number_(variable=True))
|
|
608
|
+
self.expect_symbol_(";")
|
|
609
|
+
return self.ast.LigatureCaretByPosStatement(glyphs, carets, location=location)
|
|
610
|
+
|
|
611
|
+
def parse_lookup_(self, vertical):
|
|
612
|
+
# Parses a ``lookup`` - either a lookup block, or a lookup reference
|
|
613
|
+
# inside a feature.
|
|
614
|
+
assert self.is_cur_keyword_("lookup")
|
|
615
|
+
location, name = self.cur_token_location_, self.expect_name_()
|
|
616
|
+
|
|
617
|
+
if self.next_token_ == ";":
|
|
618
|
+
lookup = self.lookups_.resolve(name)
|
|
619
|
+
if lookup is None:
|
|
620
|
+
raise FeatureLibError(
|
|
621
|
+
'Unknown lookup "%s"' % name, self.cur_token_location_
|
|
622
|
+
)
|
|
623
|
+
self.expect_symbol_(";")
|
|
624
|
+
return self.ast.LookupReferenceStatement(lookup, location=location)
|
|
625
|
+
|
|
626
|
+
use_extension = False
|
|
627
|
+
if self.next_token_ == "useExtension":
|
|
628
|
+
self.expect_keyword_("useExtension")
|
|
629
|
+
use_extension = True
|
|
630
|
+
|
|
631
|
+
block = self.ast.LookupBlock(name, use_extension, location=location)
|
|
632
|
+
self.parse_block_(block, vertical)
|
|
633
|
+
self.lookups_.define(name, block)
|
|
634
|
+
return block
|
|
635
|
+
|
|
636
|
+
def parse_lookupflag_(self):
|
|
637
|
+
# Parses a ``lookupflag`` statement, either specified by number or
|
|
638
|
+
# in words.
|
|
639
|
+
assert self.is_cur_keyword_("lookupflag")
|
|
640
|
+
location = self.cur_token_location_
|
|
641
|
+
|
|
642
|
+
# format B: "lookupflag 6;"
|
|
643
|
+
if self.next_token_type_ == Lexer.NUMBER:
|
|
644
|
+
value = self.expect_number_()
|
|
645
|
+
self.expect_symbol_(";")
|
|
646
|
+
return self.ast.LookupFlagStatement(value, location=location)
|
|
647
|
+
|
|
648
|
+
# format A: "lookupflag RightToLeft MarkAttachmentType @M;"
|
|
649
|
+
value_seen = False
|
|
650
|
+
value, markAttachment, markFilteringSet = 0, None, None
|
|
651
|
+
flags = {
|
|
652
|
+
"RightToLeft": 1,
|
|
653
|
+
"IgnoreBaseGlyphs": 2,
|
|
654
|
+
"IgnoreLigatures": 4,
|
|
655
|
+
"IgnoreMarks": 8,
|
|
656
|
+
}
|
|
657
|
+
seen = set()
|
|
658
|
+
while self.next_token_ != ";":
|
|
659
|
+
if self.next_token_ in seen:
|
|
660
|
+
raise FeatureLibError(
|
|
661
|
+
"%s can be specified only once" % self.next_token_,
|
|
662
|
+
self.next_token_location_,
|
|
663
|
+
)
|
|
664
|
+
seen.add(self.next_token_)
|
|
665
|
+
if self.next_token_ == "MarkAttachmentType":
|
|
666
|
+
self.expect_keyword_("MarkAttachmentType")
|
|
667
|
+
markAttachment = self.parse_glyphclass_(accept_glyphname=False)
|
|
668
|
+
elif self.next_token_ == "UseMarkFilteringSet":
|
|
669
|
+
self.expect_keyword_("UseMarkFilteringSet")
|
|
670
|
+
markFilteringSet = self.parse_glyphclass_(accept_glyphname=False)
|
|
671
|
+
elif self.next_token_ in flags:
|
|
672
|
+
value_seen = True
|
|
673
|
+
value = value | flags[self.expect_name_()]
|
|
674
|
+
else:
|
|
675
|
+
raise FeatureLibError(
|
|
676
|
+
'"%s" is not a recognized lookupflag' % self.next_token_,
|
|
677
|
+
self.next_token_location_,
|
|
678
|
+
)
|
|
679
|
+
self.expect_symbol_(";")
|
|
680
|
+
|
|
681
|
+
if not any([value_seen, markAttachment, markFilteringSet]):
|
|
682
|
+
raise FeatureLibError(
|
|
683
|
+
"lookupflag must have a value", self.next_token_location_
|
|
684
|
+
)
|
|
685
|
+
|
|
686
|
+
return self.ast.LookupFlagStatement(
|
|
687
|
+
value,
|
|
688
|
+
markAttachment=markAttachment,
|
|
689
|
+
markFilteringSet=markFilteringSet,
|
|
690
|
+
location=location,
|
|
691
|
+
)
|
|
692
|
+
|
|
693
|
+
def parse_markClass_(self):
|
|
694
|
+
assert self.is_cur_keyword_("markClass")
|
|
695
|
+
location = self.cur_token_location_
|
|
696
|
+
glyphs = self.parse_glyphclass_(accept_glyphname=True)
|
|
697
|
+
if not glyphs.glyphSet():
|
|
698
|
+
raise FeatureLibError(
|
|
699
|
+
"Empty glyph class in mark class definition", location
|
|
700
|
+
)
|
|
701
|
+
anchor = self.parse_anchor_()
|
|
702
|
+
name = self.expect_class_name_()
|
|
703
|
+
self.expect_symbol_(";")
|
|
704
|
+
markClass = self.doc_.markClasses.get(name)
|
|
705
|
+
if markClass is None:
|
|
706
|
+
markClass = self.ast.MarkClass(name)
|
|
707
|
+
self.doc_.markClasses[name] = markClass
|
|
708
|
+
self.glyphclasses_.define(name, markClass)
|
|
709
|
+
mcdef = self.ast.MarkClassDefinition(
|
|
710
|
+
markClass, anchor, glyphs, location=location
|
|
711
|
+
)
|
|
712
|
+
markClass.addDefinition(mcdef)
|
|
713
|
+
return mcdef
|
|
714
|
+
|
|
715
|
+
def parse_position_(self, enumerated, vertical):
|
|
716
|
+
assert self.cur_token_ in {"position", "pos"}
|
|
717
|
+
if self.next_token_ == "cursive": # GPOS type 3
|
|
718
|
+
return self.parse_position_cursive_(enumerated, vertical)
|
|
719
|
+
elif self.next_token_ == "base": # GPOS type 4
|
|
720
|
+
return self.parse_position_base_(enumerated, vertical)
|
|
721
|
+
elif self.next_token_ == "ligature": # GPOS type 5
|
|
722
|
+
return self.parse_position_ligature_(enumerated, vertical)
|
|
723
|
+
elif self.next_token_ == "mark": # GPOS type 6
|
|
724
|
+
return self.parse_position_mark_(enumerated, vertical)
|
|
725
|
+
|
|
726
|
+
location = self.cur_token_location_
|
|
727
|
+
prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_(
|
|
728
|
+
vertical
|
|
729
|
+
)
|
|
730
|
+
self.expect_symbol_(";")
|
|
731
|
+
|
|
732
|
+
if any(lookups):
|
|
733
|
+
# GPOS type 8: Chaining contextual positioning; explicit lookups
|
|
734
|
+
if any(values):
|
|
735
|
+
raise FeatureLibError(
|
|
736
|
+
'If "lookup" is present, no values must be specified', location
|
|
737
|
+
)
|
|
738
|
+
return self.ast.ChainContextPosStatement(
|
|
739
|
+
prefix, glyphs, suffix, lookups, location=location
|
|
740
|
+
)
|
|
741
|
+
|
|
742
|
+
# Pair positioning, format A: "pos V 10 A -10;"
|
|
743
|
+
# Pair positioning, format B: "pos V A -20;"
|
|
744
|
+
if not prefix and not suffix and len(glyphs) == 2 and not hasMarks:
|
|
745
|
+
if values[0] is None: # Format B: "pos V A -20;"
|
|
746
|
+
values.reverse()
|
|
747
|
+
return self.ast.PairPosStatement(
|
|
748
|
+
glyphs[0],
|
|
749
|
+
values[0],
|
|
750
|
+
glyphs[1],
|
|
751
|
+
values[1],
|
|
752
|
+
enumerated=enumerated,
|
|
753
|
+
location=location,
|
|
754
|
+
)
|
|
755
|
+
|
|
756
|
+
if enumerated:
|
|
757
|
+
raise FeatureLibError(
|
|
758
|
+
'"enumerate" is only allowed with pair positionings', location
|
|
759
|
+
)
|
|
760
|
+
return self.ast.SinglePosStatement(
|
|
761
|
+
list(zip(glyphs, values)),
|
|
762
|
+
prefix,
|
|
763
|
+
suffix,
|
|
764
|
+
forceChain=hasMarks,
|
|
765
|
+
location=location,
|
|
766
|
+
)
|
|
767
|
+
|
|
768
|
+
def parse_position_cursive_(self, enumerated, vertical):
|
|
769
|
+
location = self.cur_token_location_
|
|
770
|
+
self.expect_keyword_("cursive")
|
|
771
|
+
if enumerated:
|
|
772
|
+
raise FeatureLibError(
|
|
773
|
+
'"enumerate" is not allowed with ' "cursive attachment positioning",
|
|
774
|
+
location,
|
|
775
|
+
)
|
|
776
|
+
glyphclass = self.parse_glyphclass_(accept_glyphname=True)
|
|
777
|
+
entryAnchor = self.parse_anchor_()
|
|
778
|
+
exitAnchor = self.parse_anchor_()
|
|
779
|
+
self.expect_symbol_(";")
|
|
780
|
+
return self.ast.CursivePosStatement(
|
|
781
|
+
glyphclass, entryAnchor, exitAnchor, location=location
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
def parse_position_base_(self, enumerated, vertical):
|
|
785
|
+
location = self.cur_token_location_
|
|
786
|
+
self.expect_keyword_("base")
|
|
787
|
+
if enumerated:
|
|
788
|
+
raise FeatureLibError(
|
|
789
|
+
'"enumerate" is not allowed with '
|
|
790
|
+
"mark-to-base attachment positioning",
|
|
791
|
+
location,
|
|
792
|
+
)
|
|
793
|
+
base = self.parse_glyphclass_(accept_glyphname=True)
|
|
794
|
+
marks = self.parse_anchor_marks_()
|
|
795
|
+
self.expect_symbol_(";")
|
|
796
|
+
return self.ast.MarkBasePosStatement(base, marks, location=location)
|
|
797
|
+
|
|
798
|
+
def parse_position_ligature_(self, enumerated, vertical):
|
|
799
|
+
location = self.cur_token_location_
|
|
800
|
+
self.expect_keyword_("ligature")
|
|
801
|
+
if enumerated:
|
|
802
|
+
raise FeatureLibError(
|
|
803
|
+
'"enumerate" is not allowed with '
|
|
804
|
+
"mark-to-ligature attachment positioning",
|
|
805
|
+
location,
|
|
806
|
+
)
|
|
807
|
+
ligatures = self.parse_glyphclass_(accept_glyphname=True)
|
|
808
|
+
marks = [self.parse_anchor_marks_()]
|
|
809
|
+
while self.next_token_ == "ligComponent":
|
|
810
|
+
self.expect_keyword_("ligComponent")
|
|
811
|
+
marks.append(self.parse_anchor_marks_())
|
|
812
|
+
self.expect_symbol_(";")
|
|
813
|
+
return self.ast.MarkLigPosStatement(ligatures, marks, location=location)
|
|
814
|
+
|
|
815
|
+
def parse_position_mark_(self, enumerated, vertical):
|
|
816
|
+
location = self.cur_token_location_
|
|
817
|
+
self.expect_keyword_("mark")
|
|
818
|
+
if enumerated:
|
|
819
|
+
raise FeatureLibError(
|
|
820
|
+
'"enumerate" is not allowed with '
|
|
821
|
+
"mark-to-mark attachment positioning",
|
|
822
|
+
location,
|
|
823
|
+
)
|
|
824
|
+
baseMarks = self.parse_glyphclass_(accept_glyphname=True)
|
|
825
|
+
marks = self.parse_anchor_marks_()
|
|
826
|
+
self.expect_symbol_(";")
|
|
827
|
+
return self.ast.MarkMarkPosStatement(baseMarks, marks, location=location)
|
|
828
|
+
|
|
829
|
+
def parse_script_(self):
|
|
830
|
+
assert self.is_cur_keyword_("script")
|
|
831
|
+
location, script = self.cur_token_location_, self.expect_script_tag_()
|
|
832
|
+
self.expect_symbol_(";")
|
|
833
|
+
return self.ast.ScriptStatement(script, location=location)
|
|
834
|
+
|
|
835
|
+
def parse_substitute_(self):
|
|
836
|
+
assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"}
|
|
837
|
+
location = self.cur_token_location_
|
|
838
|
+
reverse = self.cur_token_ in {"reversesub", "rsub"}
|
|
839
|
+
(
|
|
840
|
+
old_prefix,
|
|
841
|
+
old,
|
|
842
|
+
lookups,
|
|
843
|
+
values,
|
|
844
|
+
old_suffix,
|
|
845
|
+
hasMarks,
|
|
846
|
+
) = self.parse_glyph_pattern_(vertical=False)
|
|
847
|
+
if any(values):
|
|
848
|
+
raise FeatureLibError(
|
|
849
|
+
"Substitution statements cannot contain values", location
|
|
850
|
+
)
|
|
851
|
+
new = []
|
|
852
|
+
if self.next_token_ == "by":
|
|
853
|
+
keyword = self.expect_keyword_("by")
|
|
854
|
+
while self.next_token_ != ";":
|
|
855
|
+
gc = self.parse_glyphclass_(accept_glyphname=True, accept_null=True)
|
|
856
|
+
new.append(gc)
|
|
857
|
+
elif self.next_token_ == "from":
|
|
858
|
+
keyword = self.expect_keyword_("from")
|
|
859
|
+
new = [self.parse_glyphclass_(accept_glyphname=False)]
|
|
860
|
+
else:
|
|
861
|
+
keyword = None
|
|
862
|
+
self.expect_symbol_(";")
|
|
863
|
+
if len(new) == 0 and not any(lookups):
|
|
864
|
+
raise FeatureLibError(
|
|
865
|
+
'Expected "by", "from" or explicit lookup references',
|
|
866
|
+
self.cur_token_location_,
|
|
867
|
+
)
|
|
868
|
+
|
|
869
|
+
# GSUB lookup type 3: Alternate substitution.
|
|
870
|
+
# Format: "substitute a from [a.1 a.2 a.3];"
|
|
871
|
+
if keyword == "from":
|
|
872
|
+
if reverse:
|
|
873
|
+
raise FeatureLibError(
|
|
874
|
+
'Reverse chaining substitutions do not support "from"', location
|
|
875
|
+
)
|
|
876
|
+
if len(old) != 1 or len(old[0].glyphSet()) != 1:
|
|
877
|
+
raise FeatureLibError('Expected a single glyph before "from"', location)
|
|
878
|
+
if len(new) != 1:
|
|
879
|
+
raise FeatureLibError(
|
|
880
|
+
'Expected a single glyphclass after "from"', location
|
|
881
|
+
)
|
|
882
|
+
return self.ast.AlternateSubstStatement(
|
|
883
|
+
old_prefix, old[0], old_suffix, new[0], location=location
|
|
884
|
+
)
|
|
885
|
+
|
|
886
|
+
num_lookups = len([l for l in lookups if l is not None])
|
|
887
|
+
|
|
888
|
+
is_deletion = False
|
|
889
|
+
if len(new) == 1 and isinstance(new[0], ast.NullGlyph):
|
|
890
|
+
if reverse:
|
|
891
|
+
raise FeatureLibError(
|
|
892
|
+
"Reverse chaining substitutions do not support glyph deletion",
|
|
893
|
+
location,
|
|
894
|
+
)
|
|
895
|
+
new = [] # Deletion
|
|
896
|
+
is_deletion = True
|
|
897
|
+
|
|
898
|
+
# GSUB lookup type 1: Single substitution.
|
|
899
|
+
# Format A: "substitute a by a.sc;"
|
|
900
|
+
# Format B: "substitute [one.fitted one.oldstyle] by one;"
|
|
901
|
+
# Format C: "substitute [a-d] by [A.sc-D.sc];"
|
|
902
|
+
if not reverse and len(old) == 1 and len(new) == 1 and num_lookups == 0:
|
|
903
|
+
glyphs = list(old[0].glyphSet())
|
|
904
|
+
replacements = list(new[0].glyphSet())
|
|
905
|
+
if len(replacements) == 1:
|
|
906
|
+
replacements = replacements * len(glyphs)
|
|
907
|
+
if len(glyphs) != len(replacements):
|
|
908
|
+
raise FeatureLibError(
|
|
909
|
+
'Expected a glyph class with %d elements after "by", '
|
|
910
|
+
"but found a glyph class with %d elements"
|
|
911
|
+
% (len(glyphs), len(replacements)),
|
|
912
|
+
location,
|
|
913
|
+
)
|
|
914
|
+
return self.ast.SingleSubstStatement(
|
|
915
|
+
old, new, old_prefix, old_suffix, forceChain=hasMarks, location=location
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
# Glyph deletion, built as GSUB lookup type 2: Multiple substitution
|
|
919
|
+
# with empty replacement.
|
|
920
|
+
if is_deletion and len(old) == 1 and num_lookups == 0:
|
|
921
|
+
return self.ast.MultipleSubstStatement(
|
|
922
|
+
old_prefix,
|
|
923
|
+
old[0],
|
|
924
|
+
old_suffix,
|
|
925
|
+
(),
|
|
926
|
+
forceChain=hasMarks,
|
|
927
|
+
location=location,
|
|
928
|
+
)
|
|
929
|
+
|
|
930
|
+
# GSUB lookup type 2: Multiple substitution.
|
|
931
|
+
# Format: "substitute f_f_i by f f i;"
|
|
932
|
+
#
|
|
933
|
+
# GlyphsApp introduces two additional formats:
|
|
934
|
+
# Format 1: "substitute [f_i f_l] by [f f] [i l];"
|
|
935
|
+
# Format 2: "substitute [f_i f_l] by f [i l];"
|
|
936
|
+
# http://handbook.glyphsapp.com/en/layout/multiple-substitution-with-classes/
|
|
937
|
+
if not reverse and len(old) == 1 and len(new) > 1 and num_lookups == 0:
|
|
938
|
+
count = len(old[0].glyphSet())
|
|
939
|
+
for n in new:
|
|
940
|
+
if not list(n.glyphSet()):
|
|
941
|
+
raise FeatureLibError("Empty class in replacement", location)
|
|
942
|
+
if len(n.glyphSet()) != 1 and len(n.glyphSet()) != count:
|
|
943
|
+
raise FeatureLibError(
|
|
944
|
+
f'Expected a glyph class with 1 or {count} elements after "by", '
|
|
945
|
+
f"but found a glyph class with {len(n.glyphSet())} elements",
|
|
946
|
+
location,
|
|
947
|
+
)
|
|
948
|
+
return self.ast.MultipleSubstStatement(
|
|
949
|
+
old_prefix,
|
|
950
|
+
old[0],
|
|
951
|
+
old_suffix,
|
|
952
|
+
new,
|
|
953
|
+
forceChain=hasMarks,
|
|
954
|
+
location=location,
|
|
955
|
+
)
|
|
956
|
+
|
|
957
|
+
# GSUB lookup type 4: Ligature substitution.
|
|
958
|
+
# Format: "substitute f f i by f_f_i;"
|
|
959
|
+
if (
|
|
960
|
+
not reverse
|
|
961
|
+
and len(old) > 1
|
|
962
|
+
and len(new) == 1
|
|
963
|
+
and len(new[0].glyphSet()) == 1
|
|
964
|
+
and num_lookups == 0
|
|
965
|
+
):
|
|
966
|
+
return self.ast.LigatureSubstStatement(
|
|
967
|
+
old_prefix,
|
|
968
|
+
old,
|
|
969
|
+
old_suffix,
|
|
970
|
+
list(new[0].glyphSet())[0],
|
|
971
|
+
forceChain=hasMarks,
|
|
972
|
+
location=location,
|
|
973
|
+
)
|
|
974
|
+
|
|
975
|
+
# GSUB lookup type 8: Reverse chaining substitution.
|
|
976
|
+
if reverse:
|
|
977
|
+
if len(old) != 1:
|
|
978
|
+
raise FeatureLibError(
|
|
979
|
+
"In reverse chaining single substitutions, "
|
|
980
|
+
"only a single glyph or glyph class can be replaced",
|
|
981
|
+
location,
|
|
982
|
+
)
|
|
983
|
+
if len(new) != 1:
|
|
984
|
+
raise FeatureLibError(
|
|
985
|
+
"In reverse chaining single substitutions, "
|
|
986
|
+
'the replacement (after "by") must be a single glyph '
|
|
987
|
+
"or glyph class",
|
|
988
|
+
location,
|
|
989
|
+
)
|
|
990
|
+
if num_lookups != 0:
|
|
991
|
+
raise FeatureLibError(
|
|
992
|
+
"Reverse chaining substitutions cannot call named lookups", location
|
|
993
|
+
)
|
|
994
|
+
glyphs = sorted(list(old[0].glyphSet()))
|
|
995
|
+
replacements = sorted(list(new[0].glyphSet()))
|
|
996
|
+
if len(replacements) == 1:
|
|
997
|
+
replacements = replacements * len(glyphs)
|
|
998
|
+
if len(glyphs) != len(replacements):
|
|
999
|
+
raise FeatureLibError(
|
|
1000
|
+
'Expected a glyph class with %d elements after "by", '
|
|
1001
|
+
"but found a glyph class with %d elements"
|
|
1002
|
+
% (len(glyphs), len(replacements)),
|
|
1003
|
+
location,
|
|
1004
|
+
)
|
|
1005
|
+
return self.ast.ReverseChainSingleSubstStatement(
|
|
1006
|
+
old_prefix, old_suffix, old, new, location=location
|
|
1007
|
+
)
|
|
1008
|
+
|
|
1009
|
+
if len(old) > 1 and len(new) > 1:
|
|
1010
|
+
raise FeatureLibError(
|
|
1011
|
+
"Direct substitution of multiple glyphs by multiple glyphs "
|
|
1012
|
+
"is not supported",
|
|
1013
|
+
location,
|
|
1014
|
+
)
|
|
1015
|
+
|
|
1016
|
+
# If there are remaining glyphs to parse, this is an invalid GSUB statement
|
|
1017
|
+
if len(new) != 0 or is_deletion:
|
|
1018
|
+
raise FeatureLibError("Invalid substitution statement", location)
|
|
1019
|
+
|
|
1020
|
+
# GSUB lookup type 6: Chaining contextual substitution.
|
|
1021
|
+
rule = self.ast.ChainContextSubstStatement(
|
|
1022
|
+
old_prefix, old, old_suffix, lookups, location=location
|
|
1023
|
+
)
|
|
1024
|
+
return rule
|
|
1025
|
+
|
|
1026
|
+
def parse_subtable_(self):
|
|
1027
|
+
assert self.is_cur_keyword_("subtable")
|
|
1028
|
+
location = self.cur_token_location_
|
|
1029
|
+
self.expect_symbol_(";")
|
|
1030
|
+
return self.ast.SubtableStatement(location=location)
|
|
1031
|
+
|
|
1032
|
+
def parse_size_parameters_(self):
|
|
1033
|
+
# Parses a ``parameters`` statement used in ``size`` features. See
|
|
1034
|
+
# `section 8.b <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.b>`_.
|
|
1035
|
+
assert self.is_cur_keyword_("parameters")
|
|
1036
|
+
location = self.cur_token_location_
|
|
1037
|
+
DesignSize = self.expect_decipoint_()
|
|
1038
|
+
SubfamilyID = self.expect_number_()
|
|
1039
|
+
RangeStart = 0.0
|
|
1040
|
+
RangeEnd = 0.0
|
|
1041
|
+
if self.next_token_type_ in (Lexer.NUMBER, Lexer.FLOAT) or SubfamilyID != 0:
|
|
1042
|
+
RangeStart = self.expect_decipoint_()
|
|
1043
|
+
RangeEnd = self.expect_decipoint_()
|
|
1044
|
+
|
|
1045
|
+
self.expect_symbol_(";")
|
|
1046
|
+
return self.ast.SizeParameters(
|
|
1047
|
+
DesignSize, SubfamilyID, RangeStart, RangeEnd, location=location
|
|
1048
|
+
)
|
|
1049
|
+
|
|
1050
|
+
def parse_size_menuname_(self):
|
|
1051
|
+
assert self.is_cur_keyword_("sizemenuname")
|
|
1052
|
+
location = self.cur_token_location_
|
|
1053
|
+
platformID, platEncID, langID, string = self.parse_name_()
|
|
1054
|
+
return self.ast.FeatureNameStatement(
|
|
1055
|
+
"size", platformID, platEncID, langID, string, location=location
|
|
1056
|
+
)
|
|
1057
|
+
|
|
1058
|
+
def parse_table_(self):
|
|
1059
|
+
assert self.is_cur_keyword_("table")
|
|
1060
|
+
location, name = self.cur_token_location_, self.expect_tag_()
|
|
1061
|
+
table = self.ast.TableBlock(name, location=location)
|
|
1062
|
+
self.expect_symbol_("{")
|
|
1063
|
+
handler = {
|
|
1064
|
+
"GDEF": self.parse_table_GDEF_,
|
|
1065
|
+
"head": self.parse_table_head_,
|
|
1066
|
+
"hhea": self.parse_table_hhea_,
|
|
1067
|
+
"vhea": self.parse_table_vhea_,
|
|
1068
|
+
"name": self.parse_table_name_,
|
|
1069
|
+
"BASE": self.parse_table_BASE_,
|
|
1070
|
+
"OS/2": self.parse_table_OS_2_,
|
|
1071
|
+
"STAT": self.parse_table_STAT_,
|
|
1072
|
+
}.get(name)
|
|
1073
|
+
if handler:
|
|
1074
|
+
handler(table)
|
|
1075
|
+
else:
|
|
1076
|
+
raise FeatureLibError(
|
|
1077
|
+
'"table %s" is not supported' % name.strip(), location
|
|
1078
|
+
)
|
|
1079
|
+
self.expect_symbol_("}")
|
|
1080
|
+
end_tag = self.expect_tag_()
|
|
1081
|
+
if end_tag != name:
|
|
1082
|
+
raise FeatureLibError(
|
|
1083
|
+
'Expected "%s"' % name.strip(), self.cur_token_location_
|
|
1084
|
+
)
|
|
1085
|
+
self.expect_symbol_(";")
|
|
1086
|
+
return table
|
|
1087
|
+
|
|
1088
|
+
def parse_table_GDEF_(self, table):
|
|
1089
|
+
statements = table.statements
|
|
1090
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1091
|
+
self.advance_lexer_(comments=True)
|
|
1092
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1093
|
+
statements.append(
|
|
1094
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
1095
|
+
)
|
|
1096
|
+
elif self.is_cur_keyword_("Attach"):
|
|
1097
|
+
statements.append(self.parse_attach_())
|
|
1098
|
+
elif self.is_cur_keyword_("GlyphClassDef"):
|
|
1099
|
+
statements.append(self.parse_GlyphClassDef_())
|
|
1100
|
+
elif self.is_cur_keyword_("LigatureCaretByIndex"):
|
|
1101
|
+
statements.append(self.parse_ligatureCaretByIndex_())
|
|
1102
|
+
elif self.is_cur_keyword_("LigatureCaretByPos"):
|
|
1103
|
+
statements.append(self.parse_ligatureCaretByPos_())
|
|
1104
|
+
elif self.cur_token_ == ";":
|
|
1105
|
+
continue
|
|
1106
|
+
else:
|
|
1107
|
+
raise FeatureLibError(
|
|
1108
|
+
"Expected Attach, LigatureCaretByIndex, " "or LigatureCaretByPos",
|
|
1109
|
+
self.cur_token_location_,
|
|
1110
|
+
)
|
|
1111
|
+
|
|
1112
|
+
def parse_table_head_(self, table):
|
|
1113
|
+
statements = table.statements
|
|
1114
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1115
|
+
self.advance_lexer_(comments=True)
|
|
1116
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1117
|
+
statements.append(
|
|
1118
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
1119
|
+
)
|
|
1120
|
+
elif self.is_cur_keyword_("FontRevision"):
|
|
1121
|
+
statements.append(self.parse_FontRevision_())
|
|
1122
|
+
elif self.cur_token_ == ";":
|
|
1123
|
+
continue
|
|
1124
|
+
else:
|
|
1125
|
+
raise FeatureLibError("Expected FontRevision", self.cur_token_location_)
|
|
1126
|
+
|
|
1127
|
+
def parse_table_hhea_(self, table):
|
|
1128
|
+
statements = table.statements
|
|
1129
|
+
fields = ("CaretOffset", "Ascender", "Descender", "LineGap")
|
|
1130
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1131
|
+
self.advance_lexer_(comments=True)
|
|
1132
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1133
|
+
statements.append(
|
|
1134
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
1135
|
+
)
|
|
1136
|
+
elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields:
|
|
1137
|
+
key = self.cur_token_.lower()
|
|
1138
|
+
value = self.expect_number_()
|
|
1139
|
+
statements.append(
|
|
1140
|
+
self.ast.HheaField(key, value, location=self.cur_token_location_)
|
|
1141
|
+
)
|
|
1142
|
+
if self.next_token_ != ";":
|
|
1143
|
+
raise FeatureLibError(
|
|
1144
|
+
"Incomplete statement", self.next_token_location_
|
|
1145
|
+
)
|
|
1146
|
+
elif self.cur_token_ == ";":
|
|
1147
|
+
continue
|
|
1148
|
+
else:
|
|
1149
|
+
raise FeatureLibError(
|
|
1150
|
+
"Expected CaretOffset, Ascender, " "Descender or LineGap",
|
|
1151
|
+
self.cur_token_location_,
|
|
1152
|
+
)
|
|
1153
|
+
|
|
1154
|
+
def parse_table_vhea_(self, table):
|
|
1155
|
+
statements = table.statements
|
|
1156
|
+
fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap")
|
|
1157
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1158
|
+
self.advance_lexer_(comments=True)
|
|
1159
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1160
|
+
statements.append(
|
|
1161
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
1162
|
+
)
|
|
1163
|
+
elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields:
|
|
1164
|
+
key = self.cur_token_.lower()
|
|
1165
|
+
value = self.expect_number_()
|
|
1166
|
+
statements.append(
|
|
1167
|
+
self.ast.VheaField(key, value, location=self.cur_token_location_)
|
|
1168
|
+
)
|
|
1169
|
+
if self.next_token_ != ";":
|
|
1170
|
+
raise FeatureLibError(
|
|
1171
|
+
"Incomplete statement", self.next_token_location_
|
|
1172
|
+
)
|
|
1173
|
+
elif self.cur_token_ == ";":
|
|
1174
|
+
continue
|
|
1175
|
+
else:
|
|
1176
|
+
raise FeatureLibError(
|
|
1177
|
+
"Expected VertTypoAscender, "
|
|
1178
|
+
"VertTypoDescender or VertTypoLineGap",
|
|
1179
|
+
self.cur_token_location_,
|
|
1180
|
+
)
|
|
1181
|
+
|
|
1182
|
+
def parse_table_name_(self, table):
|
|
1183
|
+
statements = table.statements
|
|
1184
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1185
|
+
self.advance_lexer_(comments=True)
|
|
1186
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1187
|
+
statements.append(
|
|
1188
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
1189
|
+
)
|
|
1190
|
+
elif self.is_cur_keyword_("nameid"):
|
|
1191
|
+
statement = self.parse_nameid_()
|
|
1192
|
+
if statement:
|
|
1193
|
+
statements.append(statement)
|
|
1194
|
+
elif self.cur_token_ == ";":
|
|
1195
|
+
continue
|
|
1196
|
+
else:
|
|
1197
|
+
raise FeatureLibError("Expected nameid", self.cur_token_location_)
|
|
1198
|
+
|
|
1199
|
+
def parse_name_(self):
|
|
1200
|
+
"""Parses a name record. See `section 9.e <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.e>`_."""
|
|
1201
|
+
platEncID = None
|
|
1202
|
+
langID = None
|
|
1203
|
+
if self.next_token_type_ in Lexer.NUMBERS:
|
|
1204
|
+
platformID = self.expect_any_number_()
|
|
1205
|
+
location = self.cur_token_location_
|
|
1206
|
+
if platformID not in (1, 3):
|
|
1207
|
+
raise FeatureLibError("Expected platform id 1 or 3", location)
|
|
1208
|
+
if self.next_token_type_ in Lexer.NUMBERS:
|
|
1209
|
+
platEncID = self.expect_any_number_()
|
|
1210
|
+
langID = self.expect_any_number_()
|
|
1211
|
+
else:
|
|
1212
|
+
platformID = 3
|
|
1213
|
+
location = self.cur_token_location_
|
|
1214
|
+
|
|
1215
|
+
if platformID == 1: # Macintosh
|
|
1216
|
+
platEncID = platEncID or 0 # Roman
|
|
1217
|
+
langID = langID or 0 # English
|
|
1218
|
+
else: # 3, Windows
|
|
1219
|
+
platEncID = platEncID or 1 # Unicode
|
|
1220
|
+
langID = langID or 0x0409 # English
|
|
1221
|
+
|
|
1222
|
+
string = self.expect_string_()
|
|
1223
|
+
self.expect_symbol_(";")
|
|
1224
|
+
|
|
1225
|
+
encoding = getEncoding(platformID, platEncID, langID)
|
|
1226
|
+
if encoding is None:
|
|
1227
|
+
raise FeatureLibError("Unsupported encoding", location)
|
|
1228
|
+
unescaped = self.unescape_string_(string, encoding)
|
|
1229
|
+
return platformID, platEncID, langID, unescaped
|
|
1230
|
+
|
|
1231
|
+
def parse_stat_name_(self):
|
|
1232
|
+
platEncID = None
|
|
1233
|
+
langID = None
|
|
1234
|
+
if self.next_token_type_ in Lexer.NUMBERS:
|
|
1235
|
+
platformID = self.expect_any_number_()
|
|
1236
|
+
location = self.cur_token_location_
|
|
1237
|
+
if platformID not in (1, 3):
|
|
1238
|
+
raise FeatureLibError("Expected platform id 1 or 3", location)
|
|
1239
|
+
if self.next_token_type_ in Lexer.NUMBERS:
|
|
1240
|
+
platEncID = self.expect_any_number_()
|
|
1241
|
+
langID = self.expect_any_number_()
|
|
1242
|
+
else:
|
|
1243
|
+
platformID = 3
|
|
1244
|
+
location = self.cur_token_location_
|
|
1245
|
+
|
|
1246
|
+
if platformID == 1: # Macintosh
|
|
1247
|
+
platEncID = platEncID or 0 # Roman
|
|
1248
|
+
langID = langID or 0 # English
|
|
1249
|
+
else: # 3, Windows
|
|
1250
|
+
platEncID = platEncID or 1 # Unicode
|
|
1251
|
+
langID = langID or 0x0409 # English
|
|
1252
|
+
|
|
1253
|
+
string = self.expect_string_()
|
|
1254
|
+
encoding = getEncoding(platformID, platEncID, langID)
|
|
1255
|
+
if encoding is None:
|
|
1256
|
+
raise FeatureLibError("Unsupported encoding", location)
|
|
1257
|
+
unescaped = self.unescape_string_(string, encoding)
|
|
1258
|
+
return platformID, platEncID, langID, unescaped
|
|
1259
|
+
|
|
1260
|
+
def parse_nameid_(self):
|
|
1261
|
+
assert self.cur_token_ == "nameid", self.cur_token_
|
|
1262
|
+
location, nameID = self.cur_token_location_, self.expect_any_number_()
|
|
1263
|
+
if nameID > 32767:
|
|
1264
|
+
raise FeatureLibError(
|
|
1265
|
+
"Name id value cannot be greater than 32767", self.cur_token_location_
|
|
1266
|
+
)
|
|
1267
|
+
platformID, platEncID, langID, string = self.parse_name_()
|
|
1268
|
+
return self.ast.NameRecord(
|
|
1269
|
+
nameID, platformID, platEncID, langID, string, location=location
|
|
1270
|
+
)
|
|
1271
|
+
|
|
1272
|
+
def unescape_string_(self, string, encoding):
|
|
1273
|
+
if encoding == "utf_16_be":
|
|
1274
|
+
s = re.sub(r"\\[0-9a-fA-F]{4}", self.unescape_unichr_, string)
|
|
1275
|
+
else:
|
|
1276
|
+
unescape = lambda m: self.unescape_byte_(m, encoding)
|
|
1277
|
+
s = re.sub(r"\\[0-9a-fA-F]{2}", unescape, string)
|
|
1278
|
+
# We now have a Unicode string, but it might contain surrogate pairs.
|
|
1279
|
+
# We convert surrogates to actual Unicode by round-tripping through
|
|
1280
|
+
# Python's UTF-16 codec in a special mode.
|
|
1281
|
+
utf16 = tobytes(s, "utf_16_be", "surrogatepass")
|
|
1282
|
+
return tostr(utf16, "utf_16_be")
|
|
1283
|
+
|
|
1284
|
+
@staticmethod
|
|
1285
|
+
def unescape_unichr_(match):
|
|
1286
|
+
n = match.group(0)[1:]
|
|
1287
|
+
return chr(int(n, 16))
|
|
1288
|
+
|
|
1289
|
+
@staticmethod
|
|
1290
|
+
def unescape_byte_(match, encoding):
|
|
1291
|
+
n = match.group(0)[1:]
|
|
1292
|
+
return bytechr(int(n, 16)).decode(encoding)
|
|
1293
|
+
|
|
1294
|
+
def find_previous(self, statements, class_):
|
|
1295
|
+
for previous in reversed(statements):
|
|
1296
|
+
if isinstance(previous, self.ast.Comment):
|
|
1297
|
+
continue
|
|
1298
|
+
elif isinstance(previous, class_):
|
|
1299
|
+
return previous
|
|
1300
|
+
else:
|
|
1301
|
+
# If we find something that doesn't match what we're looking
|
|
1302
|
+
# for, and isn't a comment, fail
|
|
1303
|
+
return None
|
|
1304
|
+
# Out of statements to look at
|
|
1305
|
+
return None
|
|
1306
|
+
|
|
1307
|
+
def parse_table_BASE_(self, table):
|
|
1308
|
+
statements = table.statements
|
|
1309
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1310
|
+
self.advance_lexer_(comments=True)
|
|
1311
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1312
|
+
statements.append(
|
|
1313
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
1314
|
+
)
|
|
1315
|
+
elif self.is_cur_keyword_("HorizAxis.BaseTagList"):
|
|
1316
|
+
horiz_bases = self.parse_base_tag_list_()
|
|
1317
|
+
elif self.is_cur_keyword_("HorizAxis.BaseScriptList"):
|
|
1318
|
+
horiz_scripts = self.parse_base_script_list_(len(horiz_bases))
|
|
1319
|
+
statements.append(
|
|
1320
|
+
self.ast.BaseAxis(
|
|
1321
|
+
horiz_bases,
|
|
1322
|
+
horiz_scripts,
|
|
1323
|
+
False,
|
|
1324
|
+
location=self.cur_token_location_,
|
|
1325
|
+
)
|
|
1326
|
+
)
|
|
1327
|
+
elif self.is_cur_keyword_("HorizAxis.MinMax"):
|
|
1328
|
+
base_script_list = self.find_previous(statements, ast.BaseAxis)
|
|
1329
|
+
if base_script_list is None:
|
|
1330
|
+
raise FeatureLibError(
|
|
1331
|
+
"MinMax must be preceded by BaseScriptList",
|
|
1332
|
+
self.cur_token_location_,
|
|
1333
|
+
)
|
|
1334
|
+
if base_script_list.vertical:
|
|
1335
|
+
raise FeatureLibError(
|
|
1336
|
+
"HorizAxis.MinMax must be preceded by HorizAxis statements",
|
|
1337
|
+
self.cur_token_location_,
|
|
1338
|
+
)
|
|
1339
|
+
base_script_list.minmax.append(self.parse_base_minmax_())
|
|
1340
|
+
elif self.is_cur_keyword_("VertAxis.BaseTagList"):
|
|
1341
|
+
vert_bases = self.parse_base_tag_list_()
|
|
1342
|
+
elif self.is_cur_keyword_("VertAxis.BaseScriptList"):
|
|
1343
|
+
vert_scripts = self.parse_base_script_list_(len(vert_bases))
|
|
1344
|
+
statements.append(
|
|
1345
|
+
self.ast.BaseAxis(
|
|
1346
|
+
vert_bases,
|
|
1347
|
+
vert_scripts,
|
|
1348
|
+
True,
|
|
1349
|
+
location=self.cur_token_location_,
|
|
1350
|
+
)
|
|
1351
|
+
)
|
|
1352
|
+
elif self.is_cur_keyword_("VertAxis.MinMax"):
|
|
1353
|
+
base_script_list = self.find_previous(statements, ast.BaseAxis)
|
|
1354
|
+
if base_script_list is None:
|
|
1355
|
+
raise FeatureLibError(
|
|
1356
|
+
"MinMax must be preceded by BaseScriptList",
|
|
1357
|
+
self.cur_token_location_,
|
|
1358
|
+
)
|
|
1359
|
+
if not base_script_list.vertical:
|
|
1360
|
+
raise FeatureLibError(
|
|
1361
|
+
"VertAxis.MinMax must be preceded by VertAxis statements",
|
|
1362
|
+
self.cur_token_location_,
|
|
1363
|
+
)
|
|
1364
|
+
base_script_list.minmax.append(self.parse_base_minmax_())
|
|
1365
|
+
elif self.cur_token_ == ";":
|
|
1366
|
+
continue
|
|
1367
|
+
|
|
1368
|
+
def parse_table_OS_2_(self, table):
|
|
1369
|
+
statements = table.statements
|
|
1370
|
+
numbers = (
|
|
1371
|
+
"FSType",
|
|
1372
|
+
"TypoAscender",
|
|
1373
|
+
"TypoDescender",
|
|
1374
|
+
"TypoLineGap",
|
|
1375
|
+
"winAscent",
|
|
1376
|
+
"winDescent",
|
|
1377
|
+
"XHeight",
|
|
1378
|
+
"CapHeight",
|
|
1379
|
+
"WeightClass",
|
|
1380
|
+
"WidthClass",
|
|
1381
|
+
"LowerOpSize",
|
|
1382
|
+
"UpperOpSize",
|
|
1383
|
+
)
|
|
1384
|
+
ranges = ("UnicodeRange", "CodePageRange")
|
|
1385
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1386
|
+
self.advance_lexer_(comments=True)
|
|
1387
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1388
|
+
statements.append(
|
|
1389
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
1390
|
+
)
|
|
1391
|
+
elif self.cur_token_type_ is Lexer.NAME:
|
|
1392
|
+
key = self.cur_token_.lower()
|
|
1393
|
+
value = None
|
|
1394
|
+
if self.cur_token_ in numbers:
|
|
1395
|
+
value = self.expect_number_()
|
|
1396
|
+
elif self.is_cur_keyword_("Panose"):
|
|
1397
|
+
value = []
|
|
1398
|
+
for i in range(10):
|
|
1399
|
+
value.append(self.expect_number_())
|
|
1400
|
+
elif self.cur_token_ in ranges:
|
|
1401
|
+
value = []
|
|
1402
|
+
while self.next_token_ != ";":
|
|
1403
|
+
value.append(self.expect_number_())
|
|
1404
|
+
elif self.is_cur_keyword_("Vendor"):
|
|
1405
|
+
value = self.expect_string_()
|
|
1406
|
+
statements.append(
|
|
1407
|
+
self.ast.OS2Field(key, value, location=self.cur_token_location_)
|
|
1408
|
+
)
|
|
1409
|
+
elif self.cur_token_ == ";":
|
|
1410
|
+
continue
|
|
1411
|
+
|
|
1412
|
+
def parse_STAT_ElidedFallbackName(self):
|
|
1413
|
+
assert self.is_cur_keyword_("ElidedFallbackName")
|
|
1414
|
+
self.expect_symbol_("{")
|
|
1415
|
+
names = []
|
|
1416
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1417
|
+
self.advance_lexer_()
|
|
1418
|
+
if self.is_cur_keyword_("name"):
|
|
1419
|
+
platformID, platEncID, langID, string = self.parse_stat_name_()
|
|
1420
|
+
nameRecord = self.ast.STATNameStatement(
|
|
1421
|
+
"stat",
|
|
1422
|
+
platformID,
|
|
1423
|
+
platEncID,
|
|
1424
|
+
langID,
|
|
1425
|
+
string,
|
|
1426
|
+
location=self.cur_token_location_,
|
|
1427
|
+
)
|
|
1428
|
+
names.append(nameRecord)
|
|
1429
|
+
else:
|
|
1430
|
+
if self.cur_token_ != ";":
|
|
1431
|
+
raise FeatureLibError(
|
|
1432
|
+
f"Unexpected token {self.cur_token_} " f"in ElidedFallbackName",
|
|
1433
|
+
self.cur_token_location_,
|
|
1434
|
+
)
|
|
1435
|
+
self.expect_symbol_("}")
|
|
1436
|
+
if not names:
|
|
1437
|
+
raise FeatureLibError('Expected "name"', self.cur_token_location_)
|
|
1438
|
+
return names
|
|
1439
|
+
|
|
1440
|
+
def parse_STAT_design_axis(self):
|
|
1441
|
+
assert self.is_cur_keyword_("DesignAxis")
|
|
1442
|
+
names = []
|
|
1443
|
+
axisTag = self.expect_tag_()
|
|
1444
|
+
if (
|
|
1445
|
+
axisTag not in ("ital", "opsz", "slnt", "wdth", "wght")
|
|
1446
|
+
and not axisTag.isupper()
|
|
1447
|
+
):
|
|
1448
|
+
log.warning(f"Unregistered axis tag {axisTag} should be uppercase.")
|
|
1449
|
+
axisOrder = self.expect_number_()
|
|
1450
|
+
self.expect_symbol_("{")
|
|
1451
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1452
|
+
self.advance_lexer_()
|
|
1453
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1454
|
+
continue
|
|
1455
|
+
elif self.is_cur_keyword_("name"):
|
|
1456
|
+
location = self.cur_token_location_
|
|
1457
|
+
platformID, platEncID, langID, string = self.parse_stat_name_()
|
|
1458
|
+
name = self.ast.STATNameStatement(
|
|
1459
|
+
"stat", platformID, platEncID, langID, string, location=location
|
|
1460
|
+
)
|
|
1461
|
+
names.append(name)
|
|
1462
|
+
elif self.cur_token_ == ";":
|
|
1463
|
+
continue
|
|
1464
|
+
else:
|
|
1465
|
+
raise FeatureLibError(
|
|
1466
|
+
f'Expected "name", got {self.cur_token_}', self.cur_token_location_
|
|
1467
|
+
)
|
|
1468
|
+
|
|
1469
|
+
self.expect_symbol_("}")
|
|
1470
|
+
return self.ast.STATDesignAxisStatement(
|
|
1471
|
+
axisTag, axisOrder, names, self.cur_token_location_
|
|
1472
|
+
)
|
|
1473
|
+
|
|
1474
|
+
def parse_STAT_axis_value_(self):
|
|
1475
|
+
assert self.is_cur_keyword_("AxisValue")
|
|
1476
|
+
self.expect_symbol_("{")
|
|
1477
|
+
locations = []
|
|
1478
|
+
names = []
|
|
1479
|
+
flags = 0
|
|
1480
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1481
|
+
self.advance_lexer_(comments=True)
|
|
1482
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1483
|
+
continue
|
|
1484
|
+
elif self.is_cur_keyword_("name"):
|
|
1485
|
+
location = self.cur_token_location_
|
|
1486
|
+
platformID, platEncID, langID, string = self.parse_stat_name_()
|
|
1487
|
+
name = self.ast.STATNameStatement(
|
|
1488
|
+
"stat", platformID, platEncID, langID, string, location=location
|
|
1489
|
+
)
|
|
1490
|
+
names.append(name)
|
|
1491
|
+
elif self.is_cur_keyword_("location"):
|
|
1492
|
+
location = self.parse_STAT_location()
|
|
1493
|
+
locations.append(location)
|
|
1494
|
+
elif self.is_cur_keyword_("flag"):
|
|
1495
|
+
flags = self.expect_stat_flags()
|
|
1496
|
+
elif self.cur_token_ == ";":
|
|
1497
|
+
continue
|
|
1498
|
+
else:
|
|
1499
|
+
raise FeatureLibError(
|
|
1500
|
+
f"Unexpected token {self.cur_token_} " f"in AxisValue",
|
|
1501
|
+
self.cur_token_location_,
|
|
1502
|
+
)
|
|
1503
|
+
self.expect_symbol_("}")
|
|
1504
|
+
if not names:
|
|
1505
|
+
raise FeatureLibError('Expected "Axis Name"', self.cur_token_location_)
|
|
1506
|
+
if not locations:
|
|
1507
|
+
raise FeatureLibError('Expected "Axis location"', self.cur_token_location_)
|
|
1508
|
+
if len(locations) > 1:
|
|
1509
|
+
for location in locations:
|
|
1510
|
+
if len(location.values) > 1:
|
|
1511
|
+
raise FeatureLibError(
|
|
1512
|
+
"Only one value is allowed in a "
|
|
1513
|
+
"Format 4 Axis Value Record, but "
|
|
1514
|
+
f"{len(location.values)} were found.",
|
|
1515
|
+
self.cur_token_location_,
|
|
1516
|
+
)
|
|
1517
|
+
format4_tags = []
|
|
1518
|
+
for location in locations:
|
|
1519
|
+
tag = location.tag
|
|
1520
|
+
if tag in format4_tags:
|
|
1521
|
+
raise FeatureLibError(
|
|
1522
|
+
f"Axis tag {tag} already " "defined.", self.cur_token_location_
|
|
1523
|
+
)
|
|
1524
|
+
format4_tags.append(tag)
|
|
1525
|
+
|
|
1526
|
+
return self.ast.STATAxisValueStatement(
|
|
1527
|
+
names, locations, flags, self.cur_token_location_
|
|
1528
|
+
)
|
|
1529
|
+
|
|
1530
|
+
def parse_STAT_location(self):
|
|
1531
|
+
values = []
|
|
1532
|
+
tag = self.expect_tag_()
|
|
1533
|
+
if len(tag.strip()) != 4:
|
|
1534
|
+
raise FeatureLibError(
|
|
1535
|
+
f"Axis tag {self.cur_token_} must be 4 " "characters",
|
|
1536
|
+
self.cur_token_location_,
|
|
1537
|
+
)
|
|
1538
|
+
|
|
1539
|
+
while self.next_token_ != ";":
|
|
1540
|
+
if self.next_token_type_ is Lexer.FLOAT:
|
|
1541
|
+
value = self.expect_float_()
|
|
1542
|
+
values.append(value)
|
|
1543
|
+
elif self.next_token_type_ is Lexer.NUMBER:
|
|
1544
|
+
value = self.expect_number_()
|
|
1545
|
+
values.append(value)
|
|
1546
|
+
else:
|
|
1547
|
+
raise FeatureLibError(
|
|
1548
|
+
f'Unexpected value "{self.next_token_}". '
|
|
1549
|
+
"Expected integer or float.",
|
|
1550
|
+
self.next_token_location_,
|
|
1551
|
+
)
|
|
1552
|
+
if len(values) == 3:
|
|
1553
|
+
nominal, min_val, max_val = values
|
|
1554
|
+
if nominal < min_val or nominal > max_val:
|
|
1555
|
+
raise FeatureLibError(
|
|
1556
|
+
f"Default value {nominal} is outside "
|
|
1557
|
+
f"of specified range "
|
|
1558
|
+
f"{min_val}-{max_val}.",
|
|
1559
|
+
self.next_token_location_,
|
|
1560
|
+
)
|
|
1561
|
+
return self.ast.AxisValueLocationStatement(tag, values)
|
|
1562
|
+
|
|
1563
|
+
def parse_table_STAT_(self, table):
|
|
1564
|
+
statements = table.statements
|
|
1565
|
+
design_axes = []
|
|
1566
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1567
|
+
self.advance_lexer_(comments=True)
|
|
1568
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1569
|
+
statements.append(
|
|
1570
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
1571
|
+
)
|
|
1572
|
+
elif self.cur_token_type_ is Lexer.NAME:
|
|
1573
|
+
if self.is_cur_keyword_("ElidedFallbackName"):
|
|
1574
|
+
names = self.parse_STAT_ElidedFallbackName()
|
|
1575
|
+
statements.append(self.ast.ElidedFallbackName(names))
|
|
1576
|
+
elif self.is_cur_keyword_("ElidedFallbackNameID"):
|
|
1577
|
+
value = self.expect_number_()
|
|
1578
|
+
statements.append(self.ast.ElidedFallbackNameID(value))
|
|
1579
|
+
self.expect_symbol_(";")
|
|
1580
|
+
elif self.is_cur_keyword_("DesignAxis"):
|
|
1581
|
+
designAxis = self.parse_STAT_design_axis()
|
|
1582
|
+
design_axes.append(designAxis.tag)
|
|
1583
|
+
statements.append(designAxis)
|
|
1584
|
+
self.expect_symbol_(";")
|
|
1585
|
+
elif self.is_cur_keyword_("AxisValue"):
|
|
1586
|
+
axisValueRecord = self.parse_STAT_axis_value_()
|
|
1587
|
+
for location in axisValueRecord.locations:
|
|
1588
|
+
if location.tag not in design_axes:
|
|
1589
|
+
# Tag must be defined in a DesignAxis before it
|
|
1590
|
+
# can be referenced
|
|
1591
|
+
raise FeatureLibError(
|
|
1592
|
+
"DesignAxis not defined for " f"{location.tag}.",
|
|
1593
|
+
self.cur_token_location_,
|
|
1594
|
+
)
|
|
1595
|
+
statements.append(axisValueRecord)
|
|
1596
|
+
self.expect_symbol_(";")
|
|
1597
|
+
else:
|
|
1598
|
+
raise FeatureLibError(
|
|
1599
|
+
f"Unexpected token {self.cur_token_}", self.cur_token_location_
|
|
1600
|
+
)
|
|
1601
|
+
elif self.cur_token_ == ";":
|
|
1602
|
+
continue
|
|
1603
|
+
|
|
1604
|
+
def parse_base_tag_list_(self):
|
|
1605
|
+
# Parses BASE table entries. (See `section 9.a <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.a>`_)
|
|
1606
|
+
assert self.cur_token_ in (
|
|
1607
|
+
"HorizAxis.BaseTagList",
|
|
1608
|
+
"VertAxis.BaseTagList",
|
|
1609
|
+
), self.cur_token_
|
|
1610
|
+
bases = []
|
|
1611
|
+
while self.next_token_ != ";":
|
|
1612
|
+
bases.append(self.expect_script_tag_())
|
|
1613
|
+
self.expect_symbol_(";")
|
|
1614
|
+
return bases
|
|
1615
|
+
|
|
1616
|
+
def parse_base_script_list_(self, count):
|
|
1617
|
+
assert self.cur_token_ in (
|
|
1618
|
+
"HorizAxis.BaseScriptList",
|
|
1619
|
+
"VertAxis.BaseScriptList",
|
|
1620
|
+
), self.cur_token_
|
|
1621
|
+
scripts = [self.parse_base_script_record_(count)]
|
|
1622
|
+
while self.next_token_ == ",":
|
|
1623
|
+
self.expect_symbol_(",")
|
|
1624
|
+
scripts.append(self.parse_base_script_record_(count))
|
|
1625
|
+
self.expect_symbol_(";")
|
|
1626
|
+
return scripts
|
|
1627
|
+
|
|
1628
|
+
def parse_base_script_record_(self, count):
|
|
1629
|
+
script_tag = self.expect_script_tag_()
|
|
1630
|
+
base_tag = self.expect_script_tag_()
|
|
1631
|
+
coords = [self.expect_number_() for i in range(count)]
|
|
1632
|
+
return script_tag, base_tag, coords
|
|
1633
|
+
|
|
1634
|
+
def parse_base_minmax_(self):
|
|
1635
|
+
script_tag = self.expect_script_tag_()
|
|
1636
|
+
language = self.expect_language_tag_()
|
|
1637
|
+
min_coord = self.expect_number_()
|
|
1638
|
+
self.advance_lexer_()
|
|
1639
|
+
if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ","):
|
|
1640
|
+
raise FeatureLibError(
|
|
1641
|
+
"Expected a comma between min and max coordinates",
|
|
1642
|
+
self.cur_token_location_,
|
|
1643
|
+
)
|
|
1644
|
+
max_coord = self.expect_number_()
|
|
1645
|
+
if self.next_token_ == ",": # feature tag...
|
|
1646
|
+
raise FeatureLibError(
|
|
1647
|
+
"Feature tags are not yet supported in BASE table",
|
|
1648
|
+
self.cur_token_location_,
|
|
1649
|
+
)
|
|
1650
|
+
|
|
1651
|
+
return script_tag, language, min_coord, max_coord
|
|
1652
|
+
|
|
1653
|
+
def parse_device_(self):
|
|
1654
|
+
result = None
|
|
1655
|
+
self.expect_symbol_("<")
|
|
1656
|
+
self.expect_keyword_("device")
|
|
1657
|
+
if self.next_token_ == "NULL":
|
|
1658
|
+
self.expect_keyword_("NULL")
|
|
1659
|
+
else:
|
|
1660
|
+
result = [(self.expect_number_(), self.expect_number_())]
|
|
1661
|
+
while self.next_token_ == ",":
|
|
1662
|
+
self.expect_symbol_(",")
|
|
1663
|
+
result.append((self.expect_number_(), self.expect_number_()))
|
|
1664
|
+
result = tuple(result) # make it hashable
|
|
1665
|
+
self.expect_symbol_(">")
|
|
1666
|
+
return result
|
|
1667
|
+
|
|
1668
|
+
def is_next_value_(self):
|
|
1669
|
+
return (
|
|
1670
|
+
self.next_token_type_ is Lexer.NUMBER
|
|
1671
|
+
or self.next_token_ == "<"
|
|
1672
|
+
or self.next_token_ == "("
|
|
1673
|
+
)
|
|
1674
|
+
|
|
1675
|
+
def parse_valuerecord_(self, vertical):
|
|
1676
|
+
if (
|
|
1677
|
+
self.next_token_type_ is Lexer.SYMBOL and self.next_token_ == "("
|
|
1678
|
+
) or self.next_token_type_ is Lexer.NUMBER:
|
|
1679
|
+
number, location = (
|
|
1680
|
+
self.expect_number_(variable=True),
|
|
1681
|
+
self.cur_token_location_,
|
|
1682
|
+
)
|
|
1683
|
+
if vertical:
|
|
1684
|
+
val = self.ast.ValueRecord(
|
|
1685
|
+
yAdvance=number, vertical=vertical, location=location
|
|
1686
|
+
)
|
|
1687
|
+
else:
|
|
1688
|
+
val = self.ast.ValueRecord(
|
|
1689
|
+
xAdvance=number, vertical=vertical, location=location
|
|
1690
|
+
)
|
|
1691
|
+
return val
|
|
1692
|
+
self.expect_symbol_("<")
|
|
1693
|
+
location = self.cur_token_location_
|
|
1694
|
+
if self.next_token_type_ is Lexer.NAME:
|
|
1695
|
+
name = self.expect_name_()
|
|
1696
|
+
if name == "NULL":
|
|
1697
|
+
self.expect_symbol_(">")
|
|
1698
|
+
return self.ast.ValueRecord()
|
|
1699
|
+
vrd = self.valuerecords_.resolve(name)
|
|
1700
|
+
if vrd is None:
|
|
1701
|
+
raise FeatureLibError(
|
|
1702
|
+
'Unknown valueRecordDef "%s"' % name, self.cur_token_location_
|
|
1703
|
+
)
|
|
1704
|
+
value = vrd.value
|
|
1705
|
+
xPlacement, yPlacement = (value.xPlacement, value.yPlacement)
|
|
1706
|
+
xAdvance, yAdvance = (value.xAdvance, value.yAdvance)
|
|
1707
|
+
else:
|
|
1708
|
+
xPlacement, yPlacement, xAdvance, yAdvance = (
|
|
1709
|
+
self.expect_number_(variable=True),
|
|
1710
|
+
self.expect_number_(variable=True),
|
|
1711
|
+
self.expect_number_(variable=True),
|
|
1712
|
+
self.expect_number_(variable=True),
|
|
1713
|
+
)
|
|
1714
|
+
|
|
1715
|
+
if self.next_token_ == "<":
|
|
1716
|
+
xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (
|
|
1717
|
+
self.parse_device_(),
|
|
1718
|
+
self.parse_device_(),
|
|
1719
|
+
self.parse_device_(),
|
|
1720
|
+
self.parse_device_(),
|
|
1721
|
+
)
|
|
1722
|
+
allDeltas = sorted(
|
|
1723
|
+
[
|
|
1724
|
+
delta
|
|
1725
|
+
for size, delta in (xPlaDevice if xPlaDevice else ())
|
|
1726
|
+
+ (yPlaDevice if yPlaDevice else ())
|
|
1727
|
+
+ (xAdvDevice if xAdvDevice else ())
|
|
1728
|
+
+ (yAdvDevice if yAdvDevice else ())
|
|
1729
|
+
]
|
|
1730
|
+
)
|
|
1731
|
+
if allDeltas[0] < -128 or allDeltas[-1] > 127:
|
|
1732
|
+
raise FeatureLibError(
|
|
1733
|
+
"Device value out of valid range (-128..127)",
|
|
1734
|
+
self.cur_token_location_,
|
|
1735
|
+
)
|
|
1736
|
+
else:
|
|
1737
|
+
xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (None, None, None, None)
|
|
1738
|
+
|
|
1739
|
+
self.expect_symbol_(">")
|
|
1740
|
+
return self.ast.ValueRecord(
|
|
1741
|
+
xPlacement,
|
|
1742
|
+
yPlacement,
|
|
1743
|
+
xAdvance,
|
|
1744
|
+
yAdvance,
|
|
1745
|
+
xPlaDevice,
|
|
1746
|
+
yPlaDevice,
|
|
1747
|
+
xAdvDevice,
|
|
1748
|
+
yAdvDevice,
|
|
1749
|
+
vertical=vertical,
|
|
1750
|
+
location=location,
|
|
1751
|
+
)
|
|
1752
|
+
|
|
1753
|
+
def parse_valuerecord_definition_(self, vertical):
|
|
1754
|
+
# Parses a named value record definition. (See section `2.e.v <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#2.e.v>`_)
|
|
1755
|
+
assert self.is_cur_keyword_("valueRecordDef")
|
|
1756
|
+
location = self.cur_token_location_
|
|
1757
|
+
value = self.parse_valuerecord_(vertical)
|
|
1758
|
+
name = self.expect_name_()
|
|
1759
|
+
self.expect_symbol_(";")
|
|
1760
|
+
vrd = self.ast.ValueRecordDefinition(name, value, location=location)
|
|
1761
|
+
self.valuerecords_.define(name, vrd)
|
|
1762
|
+
return vrd
|
|
1763
|
+
|
|
1764
|
+
def parse_languagesystem_(self):
|
|
1765
|
+
assert self.cur_token_ == "languagesystem"
|
|
1766
|
+
location = self.cur_token_location_
|
|
1767
|
+
script = self.expect_script_tag_()
|
|
1768
|
+
language = self.expect_language_tag_()
|
|
1769
|
+
self.expect_symbol_(";")
|
|
1770
|
+
return self.ast.LanguageSystemStatement(script, language, location=location)
|
|
1771
|
+
|
|
1772
|
+
def parse_feature_block_(self, variation=False):
|
|
1773
|
+
if variation:
|
|
1774
|
+
assert self.cur_token_ == "variation"
|
|
1775
|
+
else:
|
|
1776
|
+
assert self.cur_token_ == "feature"
|
|
1777
|
+
location = self.cur_token_location_
|
|
1778
|
+
tag = self.expect_tag_()
|
|
1779
|
+
vertical = tag in {"vkrn", "vpal", "vhal", "valt"}
|
|
1780
|
+
|
|
1781
|
+
stylisticset = None
|
|
1782
|
+
cv_feature = None
|
|
1783
|
+
size_feature = False
|
|
1784
|
+
if tag in self.SS_FEATURE_TAGS:
|
|
1785
|
+
stylisticset = tag
|
|
1786
|
+
elif tag in self.CV_FEATURE_TAGS:
|
|
1787
|
+
cv_feature = tag
|
|
1788
|
+
elif tag == "size":
|
|
1789
|
+
size_feature = True
|
|
1790
|
+
|
|
1791
|
+
if variation:
|
|
1792
|
+
conditionset = self.expect_name_()
|
|
1793
|
+
|
|
1794
|
+
use_extension = False
|
|
1795
|
+
if self.next_token_ == "useExtension":
|
|
1796
|
+
self.expect_keyword_("useExtension")
|
|
1797
|
+
use_extension = True
|
|
1798
|
+
|
|
1799
|
+
if variation:
|
|
1800
|
+
block = self.ast.VariationBlock(
|
|
1801
|
+
tag, conditionset, use_extension=use_extension, location=location
|
|
1802
|
+
)
|
|
1803
|
+
else:
|
|
1804
|
+
block = self.ast.FeatureBlock(
|
|
1805
|
+
tag, use_extension=use_extension, location=location
|
|
1806
|
+
)
|
|
1807
|
+
self.parse_block_(block, vertical, stylisticset, size_feature, cv_feature)
|
|
1808
|
+
return block
|
|
1809
|
+
|
|
1810
|
+
def parse_feature_reference_(self):
|
|
1811
|
+
assert self.cur_token_ == "feature", self.cur_token_
|
|
1812
|
+
location = self.cur_token_location_
|
|
1813
|
+
featureName = self.expect_tag_()
|
|
1814
|
+
self.expect_symbol_(";")
|
|
1815
|
+
return self.ast.FeatureReferenceStatement(featureName, location=location)
|
|
1816
|
+
|
|
1817
|
+
def parse_featureNames_(self, tag):
|
|
1818
|
+
"""Parses a ``featureNames`` statement found in stylistic set features.
|
|
1819
|
+
See section `8.c <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.c>`_.
|
|
1820
|
+
"""
|
|
1821
|
+
assert self.cur_token_ == "featureNames", self.cur_token_
|
|
1822
|
+
block = self.ast.NestedBlock(
|
|
1823
|
+
tag, self.cur_token_, location=self.cur_token_location_
|
|
1824
|
+
)
|
|
1825
|
+
self.expect_symbol_("{")
|
|
1826
|
+
for symtab in self.symbol_tables_:
|
|
1827
|
+
symtab.enter_scope()
|
|
1828
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1829
|
+
self.advance_lexer_(comments=True)
|
|
1830
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1831
|
+
block.statements.append(
|
|
1832
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
1833
|
+
)
|
|
1834
|
+
elif self.is_cur_keyword_("name"):
|
|
1835
|
+
location = self.cur_token_location_
|
|
1836
|
+
platformID, platEncID, langID, string = self.parse_name_()
|
|
1837
|
+
block.statements.append(
|
|
1838
|
+
self.ast.FeatureNameStatement(
|
|
1839
|
+
tag, platformID, platEncID, langID, string, location=location
|
|
1840
|
+
)
|
|
1841
|
+
)
|
|
1842
|
+
elif self.cur_token_ == ";":
|
|
1843
|
+
continue
|
|
1844
|
+
else:
|
|
1845
|
+
raise FeatureLibError('Expected "name"', self.cur_token_location_)
|
|
1846
|
+
self.expect_symbol_("}")
|
|
1847
|
+
for symtab in self.symbol_tables_:
|
|
1848
|
+
symtab.exit_scope()
|
|
1849
|
+
self.expect_symbol_(";")
|
|
1850
|
+
return block
|
|
1851
|
+
|
|
1852
|
+
def parse_cvParameters_(self, tag):
|
|
1853
|
+
# Parses a ``cvParameters`` block found in Character Variant features.
|
|
1854
|
+
# See section `8.d <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.d>`_.
|
|
1855
|
+
assert self.cur_token_ == "cvParameters", self.cur_token_
|
|
1856
|
+
block = self.ast.NestedBlock(
|
|
1857
|
+
tag, self.cur_token_, location=self.cur_token_location_
|
|
1858
|
+
)
|
|
1859
|
+
self.expect_symbol_("{")
|
|
1860
|
+
for symtab in self.symbol_tables_:
|
|
1861
|
+
symtab.enter_scope()
|
|
1862
|
+
|
|
1863
|
+
statements = block.statements
|
|
1864
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1865
|
+
self.advance_lexer_(comments=True)
|
|
1866
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1867
|
+
statements.append(
|
|
1868
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
1869
|
+
)
|
|
1870
|
+
elif self.is_cur_keyword_(
|
|
1871
|
+
{
|
|
1872
|
+
"FeatUILabelNameID",
|
|
1873
|
+
"FeatUITooltipTextNameID",
|
|
1874
|
+
"SampleTextNameID",
|
|
1875
|
+
"ParamUILabelNameID",
|
|
1876
|
+
}
|
|
1877
|
+
):
|
|
1878
|
+
statements.append(self.parse_cvNameIDs_(tag, self.cur_token_))
|
|
1879
|
+
elif self.is_cur_keyword_("Character"):
|
|
1880
|
+
statements.append(self.parse_cvCharacter_(tag))
|
|
1881
|
+
elif self.cur_token_ == ";":
|
|
1882
|
+
continue
|
|
1883
|
+
else:
|
|
1884
|
+
raise FeatureLibError(
|
|
1885
|
+
"Expected statement: got {} {}".format(
|
|
1886
|
+
self.cur_token_type_, self.cur_token_
|
|
1887
|
+
),
|
|
1888
|
+
self.cur_token_location_,
|
|
1889
|
+
)
|
|
1890
|
+
|
|
1891
|
+
self.expect_symbol_("}")
|
|
1892
|
+
for symtab in self.symbol_tables_:
|
|
1893
|
+
symtab.exit_scope()
|
|
1894
|
+
self.expect_symbol_(";")
|
|
1895
|
+
return block
|
|
1896
|
+
|
|
1897
|
+
def parse_cvNameIDs_(self, tag, block_name):
|
|
1898
|
+
assert self.cur_token_ == block_name, self.cur_token_
|
|
1899
|
+
block = self.ast.NestedBlock(tag, block_name, location=self.cur_token_location_)
|
|
1900
|
+
self.expect_symbol_("{")
|
|
1901
|
+
for symtab in self.symbol_tables_:
|
|
1902
|
+
symtab.enter_scope()
|
|
1903
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
1904
|
+
self.advance_lexer_(comments=True)
|
|
1905
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
1906
|
+
block.statements.append(
|
|
1907
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
1908
|
+
)
|
|
1909
|
+
elif self.is_cur_keyword_("name"):
|
|
1910
|
+
location = self.cur_token_location_
|
|
1911
|
+
platformID, platEncID, langID, string = self.parse_name_()
|
|
1912
|
+
block.statements.append(
|
|
1913
|
+
self.ast.CVParametersNameStatement(
|
|
1914
|
+
tag,
|
|
1915
|
+
platformID,
|
|
1916
|
+
platEncID,
|
|
1917
|
+
langID,
|
|
1918
|
+
string,
|
|
1919
|
+
block_name,
|
|
1920
|
+
location=location,
|
|
1921
|
+
)
|
|
1922
|
+
)
|
|
1923
|
+
elif self.cur_token_ == ";":
|
|
1924
|
+
continue
|
|
1925
|
+
else:
|
|
1926
|
+
raise FeatureLibError('Expected "name"', self.cur_token_location_)
|
|
1927
|
+
self.expect_symbol_("}")
|
|
1928
|
+
for symtab in self.symbol_tables_:
|
|
1929
|
+
symtab.exit_scope()
|
|
1930
|
+
self.expect_symbol_(";")
|
|
1931
|
+
return block
|
|
1932
|
+
|
|
1933
|
+
def parse_cvCharacter_(self, tag):
|
|
1934
|
+
assert self.cur_token_ == "Character", self.cur_token_
|
|
1935
|
+
location, character = self.cur_token_location_, self.expect_any_number_()
|
|
1936
|
+
self.expect_symbol_(";")
|
|
1937
|
+
if not (0xFFFFFF >= character >= 0):
|
|
1938
|
+
raise FeatureLibError(
|
|
1939
|
+
"Character value must be between "
|
|
1940
|
+
"{:#x} and {:#x}".format(0, 0xFFFFFF),
|
|
1941
|
+
location,
|
|
1942
|
+
)
|
|
1943
|
+
return self.ast.CharacterStatement(character, tag, location=location)
|
|
1944
|
+
|
|
1945
|
+
def parse_FontRevision_(self):
|
|
1946
|
+
# Parses a ``FontRevision`` statement found in the head table. See
|
|
1947
|
+
# `section 9.c <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.c>`_.
|
|
1948
|
+
assert self.cur_token_ == "FontRevision", self.cur_token_
|
|
1949
|
+
location, version = self.cur_token_location_, self.expect_float_()
|
|
1950
|
+
self.expect_symbol_(";")
|
|
1951
|
+
if version <= 0:
|
|
1952
|
+
raise FeatureLibError("Font revision numbers must be positive", location)
|
|
1953
|
+
return self.ast.FontRevisionStatement(version, location=location)
|
|
1954
|
+
|
|
1955
|
+
def parse_conditionset_(self):
|
|
1956
|
+
name = self.expect_name_()
|
|
1957
|
+
|
|
1958
|
+
conditions = {}
|
|
1959
|
+
self.expect_symbol_("{")
|
|
1960
|
+
|
|
1961
|
+
while self.next_token_ != "}":
|
|
1962
|
+
self.advance_lexer_()
|
|
1963
|
+
if self.cur_token_type_ is not Lexer.NAME:
|
|
1964
|
+
raise FeatureLibError("Expected an axis name", self.cur_token_location_)
|
|
1965
|
+
|
|
1966
|
+
axis = self.cur_token_
|
|
1967
|
+
if axis in conditions:
|
|
1968
|
+
raise FeatureLibError(
|
|
1969
|
+
f"Repeated condition for axis {axis}", self.cur_token_location_
|
|
1970
|
+
)
|
|
1971
|
+
|
|
1972
|
+
if self.next_token_type_ is Lexer.FLOAT:
|
|
1973
|
+
min_value = self.expect_float_()
|
|
1974
|
+
elif self.next_token_type_ is Lexer.NUMBER:
|
|
1975
|
+
min_value = self.expect_number_(variable=False)
|
|
1976
|
+
|
|
1977
|
+
if self.next_token_type_ is Lexer.FLOAT:
|
|
1978
|
+
max_value = self.expect_float_()
|
|
1979
|
+
elif self.next_token_type_ is Lexer.NUMBER:
|
|
1980
|
+
max_value = self.expect_number_(variable=False)
|
|
1981
|
+
self.expect_symbol_(";")
|
|
1982
|
+
|
|
1983
|
+
conditions[axis] = (min_value, max_value)
|
|
1984
|
+
|
|
1985
|
+
self.expect_symbol_("}")
|
|
1986
|
+
|
|
1987
|
+
finalname = self.expect_name_()
|
|
1988
|
+
if finalname != name:
|
|
1989
|
+
raise FeatureLibError('Expected "%s"' % name, self.cur_token_location_)
|
|
1990
|
+
return self.ast.ConditionsetStatement(name, conditions)
|
|
1991
|
+
|
|
1992
|
+
def parse_block_(
|
|
1993
|
+
self, block, vertical, stylisticset=None, size_feature=False, cv_feature=None
|
|
1994
|
+
):
|
|
1995
|
+
self.expect_symbol_("{")
|
|
1996
|
+
for symtab in self.symbol_tables_:
|
|
1997
|
+
symtab.enter_scope()
|
|
1998
|
+
|
|
1999
|
+
statements = block.statements
|
|
2000
|
+
while self.next_token_ != "}" or self.cur_comments_:
|
|
2001
|
+
self.advance_lexer_(comments=True)
|
|
2002
|
+
if self.cur_token_type_ is Lexer.COMMENT:
|
|
2003
|
+
statements.append(
|
|
2004
|
+
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
|
|
2005
|
+
)
|
|
2006
|
+
elif self.cur_token_type_ is Lexer.GLYPHCLASS:
|
|
2007
|
+
statements.append(self.parse_glyphclass_definition_())
|
|
2008
|
+
elif self.is_cur_keyword_("anchorDef"):
|
|
2009
|
+
statements.append(self.parse_anchordef_())
|
|
2010
|
+
elif self.is_cur_keyword_({"enum", "enumerate"}):
|
|
2011
|
+
statements.append(self.parse_enumerate_(vertical=vertical))
|
|
2012
|
+
elif self.is_cur_keyword_("feature"):
|
|
2013
|
+
statements.append(self.parse_feature_reference_())
|
|
2014
|
+
elif self.is_cur_keyword_("ignore"):
|
|
2015
|
+
statements.append(self.parse_ignore_())
|
|
2016
|
+
elif self.is_cur_keyword_("language"):
|
|
2017
|
+
statements.append(self.parse_language_())
|
|
2018
|
+
elif self.is_cur_keyword_("lookup"):
|
|
2019
|
+
statements.append(self.parse_lookup_(vertical))
|
|
2020
|
+
elif self.is_cur_keyword_("lookupflag"):
|
|
2021
|
+
statements.append(self.parse_lookupflag_())
|
|
2022
|
+
elif self.is_cur_keyword_("markClass"):
|
|
2023
|
+
statements.append(self.parse_markClass_())
|
|
2024
|
+
elif self.is_cur_keyword_({"pos", "position"}):
|
|
2025
|
+
statements.append(
|
|
2026
|
+
self.parse_position_(enumerated=False, vertical=vertical)
|
|
2027
|
+
)
|
|
2028
|
+
elif self.is_cur_keyword_("script"):
|
|
2029
|
+
statements.append(self.parse_script_())
|
|
2030
|
+
elif self.is_cur_keyword_({"sub", "substitute", "rsub", "reversesub"}):
|
|
2031
|
+
statements.append(self.parse_substitute_())
|
|
2032
|
+
elif self.is_cur_keyword_("subtable"):
|
|
2033
|
+
statements.append(self.parse_subtable_())
|
|
2034
|
+
elif self.is_cur_keyword_("valueRecordDef"):
|
|
2035
|
+
statements.append(self.parse_valuerecord_definition_(vertical))
|
|
2036
|
+
elif stylisticset and self.is_cur_keyword_("featureNames"):
|
|
2037
|
+
statements.append(self.parse_featureNames_(stylisticset))
|
|
2038
|
+
elif cv_feature and self.is_cur_keyword_("cvParameters"):
|
|
2039
|
+
statements.append(self.parse_cvParameters_(cv_feature))
|
|
2040
|
+
elif size_feature and self.is_cur_keyword_("parameters"):
|
|
2041
|
+
statements.append(self.parse_size_parameters_())
|
|
2042
|
+
elif size_feature and self.is_cur_keyword_("sizemenuname"):
|
|
2043
|
+
statements.append(self.parse_size_menuname_())
|
|
2044
|
+
elif (
|
|
2045
|
+
self.cur_token_type_ is Lexer.NAME
|
|
2046
|
+
and self.cur_token_ in self.extensions
|
|
2047
|
+
):
|
|
2048
|
+
statements.append(self.extensions[self.cur_token_](self))
|
|
2049
|
+
elif self.cur_token_ == ";":
|
|
2050
|
+
continue
|
|
2051
|
+
else:
|
|
2052
|
+
raise FeatureLibError(
|
|
2053
|
+
"Expected glyph class definition or statement: got {} {}".format(
|
|
2054
|
+
self.cur_token_type_, self.cur_token_
|
|
2055
|
+
),
|
|
2056
|
+
self.cur_token_location_,
|
|
2057
|
+
)
|
|
2058
|
+
|
|
2059
|
+
self.expect_symbol_("}")
|
|
2060
|
+
for symtab in self.symbol_tables_:
|
|
2061
|
+
symtab.exit_scope()
|
|
2062
|
+
|
|
2063
|
+
name = self.expect_name_()
|
|
2064
|
+
if name != block.name.strip():
|
|
2065
|
+
raise FeatureLibError(
|
|
2066
|
+
'Expected "%s"' % block.name.strip(), self.cur_token_location_
|
|
2067
|
+
)
|
|
2068
|
+
self.expect_symbol_(";")
|
|
2069
|
+
|
|
2070
|
+
def is_cur_keyword_(self, k):
|
|
2071
|
+
if self.cur_token_type_ is Lexer.NAME:
|
|
2072
|
+
if isinstance(k, type("")): # basestring is gone in Python3
|
|
2073
|
+
return self.cur_token_ == k
|
|
2074
|
+
else:
|
|
2075
|
+
return self.cur_token_ in k
|
|
2076
|
+
return False
|
|
2077
|
+
|
|
2078
|
+
def expect_class_name_(self):
|
|
2079
|
+
self.advance_lexer_()
|
|
2080
|
+
if self.cur_token_type_ is not Lexer.GLYPHCLASS:
|
|
2081
|
+
raise FeatureLibError("Expected @NAME", self.cur_token_location_)
|
|
2082
|
+
return self.cur_token_
|
|
2083
|
+
|
|
2084
|
+
def expect_cid_(self):
|
|
2085
|
+
self.advance_lexer_()
|
|
2086
|
+
if self.cur_token_type_ is Lexer.CID:
|
|
2087
|
+
return self.cur_token_
|
|
2088
|
+
raise FeatureLibError("Expected a CID", self.cur_token_location_)
|
|
2089
|
+
|
|
2090
|
+
def expect_filename_(self):
|
|
2091
|
+
self.advance_lexer_()
|
|
2092
|
+
if self.cur_token_type_ is not Lexer.FILENAME:
|
|
2093
|
+
raise FeatureLibError("Expected file name", self.cur_token_location_)
|
|
2094
|
+
return self.cur_token_
|
|
2095
|
+
|
|
2096
|
+
def expect_glyph_(self):
|
|
2097
|
+
self.advance_lexer_()
|
|
2098
|
+
if self.cur_token_type_ is Lexer.NAME:
|
|
2099
|
+
return self.cur_token_.lstrip("\\")
|
|
2100
|
+
elif self.cur_token_type_ is Lexer.CID:
|
|
2101
|
+
return "cid%05d" % self.cur_token_
|
|
2102
|
+
raise FeatureLibError("Expected a glyph name or CID", self.cur_token_location_)
|
|
2103
|
+
|
|
2104
|
+
def check_glyph_name_in_glyph_set(self, *names):
|
|
2105
|
+
"""Adds a glyph name (just `start`) or glyph names of a
|
|
2106
|
+
range (`start` and `end`) which are not in the glyph set
|
|
2107
|
+
to the "missing list" for future error reporting.
|
|
2108
|
+
|
|
2109
|
+
If no glyph set is present, does nothing.
|
|
2110
|
+
"""
|
|
2111
|
+
if self.glyphNames_:
|
|
2112
|
+
for name in names:
|
|
2113
|
+
if name in self.glyphNames_:
|
|
2114
|
+
continue
|
|
2115
|
+
if name not in self.missing:
|
|
2116
|
+
self.missing[name] = self.cur_token_location_
|
|
2117
|
+
|
|
2118
|
+
def expect_markClass_reference_(self):
|
|
2119
|
+
name = self.expect_class_name_()
|
|
2120
|
+
mc = self.glyphclasses_.resolve(name)
|
|
2121
|
+
if mc is None:
|
|
2122
|
+
raise FeatureLibError(
|
|
2123
|
+
"Unknown markClass @%s" % name, self.cur_token_location_
|
|
2124
|
+
)
|
|
2125
|
+
if not isinstance(mc, self.ast.MarkClass):
|
|
2126
|
+
raise FeatureLibError(
|
|
2127
|
+
"@%s is not a markClass" % name, self.cur_token_location_
|
|
2128
|
+
)
|
|
2129
|
+
return mc
|
|
2130
|
+
|
|
2131
|
+
def expect_tag_(self):
|
|
2132
|
+
self.advance_lexer_()
|
|
2133
|
+
if self.cur_token_type_ is not Lexer.NAME:
|
|
2134
|
+
raise FeatureLibError("Expected a tag", self.cur_token_location_)
|
|
2135
|
+
if len(self.cur_token_) > 4:
|
|
2136
|
+
raise FeatureLibError(
|
|
2137
|
+
"Tags cannot be longer than 4 characters", self.cur_token_location_
|
|
2138
|
+
)
|
|
2139
|
+
return (self.cur_token_ + " ")[:4]
|
|
2140
|
+
|
|
2141
|
+
def expect_script_tag_(self):
|
|
2142
|
+
tag = self.expect_tag_()
|
|
2143
|
+
if tag == "dflt":
|
|
2144
|
+
raise FeatureLibError(
|
|
2145
|
+
'"dflt" is not a valid script tag; use "DFLT" instead',
|
|
2146
|
+
self.cur_token_location_,
|
|
2147
|
+
)
|
|
2148
|
+
return tag
|
|
2149
|
+
|
|
2150
|
+
def expect_language_tag_(self):
|
|
2151
|
+
tag = self.expect_tag_()
|
|
2152
|
+
if tag == "DFLT":
|
|
2153
|
+
raise FeatureLibError(
|
|
2154
|
+
'"DFLT" is not a valid language tag; use "dflt" instead',
|
|
2155
|
+
self.cur_token_location_,
|
|
2156
|
+
)
|
|
2157
|
+
return tag
|
|
2158
|
+
|
|
2159
|
+
def expect_symbol_(self, symbol):
|
|
2160
|
+
self.advance_lexer_()
|
|
2161
|
+
if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol:
|
|
2162
|
+
return symbol
|
|
2163
|
+
raise FeatureLibError("Expected '%s'" % symbol, self.cur_token_location_)
|
|
2164
|
+
|
|
2165
|
+
def expect_keyword_(self, keyword):
|
|
2166
|
+
self.advance_lexer_()
|
|
2167
|
+
if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword:
|
|
2168
|
+
return self.cur_token_
|
|
2169
|
+
raise FeatureLibError('Expected "%s"' % keyword, self.cur_token_location_)
|
|
2170
|
+
|
|
2171
|
+
def expect_name_(self):
|
|
2172
|
+
self.advance_lexer_()
|
|
2173
|
+
if self.cur_token_type_ is Lexer.NAME:
|
|
2174
|
+
return self.cur_token_
|
|
2175
|
+
raise FeatureLibError("Expected a name", self.cur_token_location_)
|
|
2176
|
+
|
|
2177
|
+
def expect_number_(self, variable=False):
|
|
2178
|
+
self.advance_lexer_()
|
|
2179
|
+
if self.cur_token_type_ is Lexer.NUMBER:
|
|
2180
|
+
return self.cur_token_
|
|
2181
|
+
if variable and self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "(":
|
|
2182
|
+
return self.expect_variable_scalar_()
|
|
2183
|
+
raise FeatureLibError("Expected a number", self.cur_token_location_)
|
|
2184
|
+
|
|
2185
|
+
def expect_variable_scalar_(self):
|
|
2186
|
+
self.advance_lexer_() # "("
|
|
2187
|
+
scalar = VariableScalar()
|
|
2188
|
+
while True:
|
|
2189
|
+
if self.cur_token_type_ == Lexer.SYMBOL and self.cur_token_ == ")":
|
|
2190
|
+
break
|
|
2191
|
+
location, value = self.expect_master_()
|
|
2192
|
+
scalar.add_value(location, value)
|
|
2193
|
+
return scalar
|
|
2194
|
+
|
|
2195
|
+
def expect_master_(self):
|
|
2196
|
+
location = {}
|
|
2197
|
+
while True:
|
|
2198
|
+
if self.cur_token_type_ is not Lexer.NAME:
|
|
2199
|
+
raise FeatureLibError("Expected an axis name", self.cur_token_location_)
|
|
2200
|
+
axis = self.cur_token_
|
|
2201
|
+
self.advance_lexer_()
|
|
2202
|
+
if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "="):
|
|
2203
|
+
raise FeatureLibError(
|
|
2204
|
+
"Expected an equals sign", self.cur_token_location_
|
|
2205
|
+
)
|
|
2206
|
+
value = self.expect_integer_or_float_()
|
|
2207
|
+
location[axis] = value
|
|
2208
|
+
if self.next_token_type_ is Lexer.NAME and self.next_token_[0] == ":":
|
|
2209
|
+
# Lexer has just read the value as a glyph name. We'll correct it later
|
|
2210
|
+
break
|
|
2211
|
+
self.advance_lexer_()
|
|
2212
|
+
if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ","):
|
|
2213
|
+
raise FeatureLibError(
|
|
2214
|
+
"Expected an comma or an equals sign", self.cur_token_location_
|
|
2215
|
+
)
|
|
2216
|
+
self.advance_lexer_()
|
|
2217
|
+
self.advance_lexer_()
|
|
2218
|
+
value = int(self.cur_token_[1:])
|
|
2219
|
+
self.advance_lexer_()
|
|
2220
|
+
return location, value
|
|
2221
|
+
|
|
2222
|
+
def expect_any_number_(self):
|
|
2223
|
+
self.advance_lexer_()
|
|
2224
|
+
if self.cur_token_type_ in Lexer.NUMBERS:
|
|
2225
|
+
return self.cur_token_
|
|
2226
|
+
raise FeatureLibError(
|
|
2227
|
+
"Expected a decimal, hexadecimal or octal number", self.cur_token_location_
|
|
2228
|
+
)
|
|
2229
|
+
|
|
2230
|
+
def expect_float_(self):
|
|
2231
|
+
self.advance_lexer_()
|
|
2232
|
+
if self.cur_token_type_ is Lexer.FLOAT:
|
|
2233
|
+
return self.cur_token_
|
|
2234
|
+
raise FeatureLibError(
|
|
2235
|
+
"Expected a floating-point number", self.cur_token_location_
|
|
2236
|
+
)
|
|
2237
|
+
|
|
2238
|
+
def expect_integer_or_float_(self):
|
|
2239
|
+
if self.next_token_type_ == Lexer.FLOAT:
|
|
2240
|
+
return self.expect_float_()
|
|
2241
|
+
elif self.next_token_type_ is Lexer.NUMBER:
|
|
2242
|
+
return self.expect_number_()
|
|
2243
|
+
else:
|
|
2244
|
+
raise FeatureLibError(
|
|
2245
|
+
"Expected an integer or floating-point number", self.cur_token_location_
|
|
2246
|
+
)
|
|
2247
|
+
|
|
2248
|
+
def expect_decipoint_(self):
|
|
2249
|
+
if self.next_token_type_ == Lexer.FLOAT:
|
|
2250
|
+
return self.expect_float_()
|
|
2251
|
+
elif self.next_token_type_ is Lexer.NUMBER:
|
|
2252
|
+
return self.expect_number_() / 10
|
|
2253
|
+
else:
|
|
2254
|
+
raise FeatureLibError(
|
|
2255
|
+
"Expected an integer or floating-point number", self.cur_token_location_
|
|
2256
|
+
)
|
|
2257
|
+
|
|
2258
|
+
def expect_stat_flags(self):
|
|
2259
|
+
value = 0
|
|
2260
|
+
flags = {
|
|
2261
|
+
"OlderSiblingFontAttribute": 1,
|
|
2262
|
+
"ElidableAxisValueName": 2,
|
|
2263
|
+
}
|
|
2264
|
+
while self.next_token_ != ";":
|
|
2265
|
+
if self.next_token_ in flags:
|
|
2266
|
+
name = self.expect_name_()
|
|
2267
|
+
value = value | flags[name]
|
|
2268
|
+
else:
|
|
2269
|
+
raise FeatureLibError(
|
|
2270
|
+
f"Unexpected STAT flag {self.cur_token_}", self.cur_token_location_
|
|
2271
|
+
)
|
|
2272
|
+
return value
|
|
2273
|
+
|
|
2274
|
+
def expect_stat_values_(self):
|
|
2275
|
+
if self.next_token_type_ == Lexer.FLOAT:
|
|
2276
|
+
return self.expect_float_()
|
|
2277
|
+
elif self.next_token_type_ is Lexer.NUMBER:
|
|
2278
|
+
return self.expect_number_()
|
|
2279
|
+
else:
|
|
2280
|
+
raise FeatureLibError(
|
|
2281
|
+
"Expected an integer or floating-point number", self.cur_token_location_
|
|
2282
|
+
)
|
|
2283
|
+
|
|
2284
|
+
def expect_string_(self):
|
|
2285
|
+
self.advance_lexer_()
|
|
2286
|
+
if self.cur_token_type_ is Lexer.STRING:
|
|
2287
|
+
return self.cur_token_
|
|
2288
|
+
raise FeatureLibError("Expected a string", self.cur_token_location_)
|
|
2289
|
+
|
|
2290
|
+
def advance_lexer_(self, comments=False):
|
|
2291
|
+
if comments and self.cur_comments_:
|
|
2292
|
+
self.cur_token_type_ = Lexer.COMMENT
|
|
2293
|
+
self.cur_token_, self.cur_token_location_ = self.cur_comments_.pop(0)
|
|
2294
|
+
return
|
|
2295
|
+
else:
|
|
2296
|
+
self.cur_token_type_, self.cur_token_, self.cur_token_location_ = (
|
|
2297
|
+
self.next_token_type_,
|
|
2298
|
+
self.next_token_,
|
|
2299
|
+
self.next_token_location_,
|
|
2300
|
+
)
|
|
2301
|
+
while True:
|
|
2302
|
+
try:
|
|
2303
|
+
(
|
|
2304
|
+
self.next_token_type_,
|
|
2305
|
+
self.next_token_,
|
|
2306
|
+
self.next_token_location_,
|
|
2307
|
+
) = next(self.lexer_)
|
|
2308
|
+
except StopIteration:
|
|
2309
|
+
self.next_token_type_, self.next_token_ = (None, None)
|
|
2310
|
+
if self.next_token_type_ != Lexer.COMMENT:
|
|
2311
|
+
break
|
|
2312
|
+
self.cur_comments_.append((self.next_token_, self.next_token_location_))
|
|
2313
|
+
|
|
2314
|
+
@staticmethod
|
|
2315
|
+
def reverse_string_(s):
|
|
2316
|
+
"""'abc' --> 'cba'"""
|
|
2317
|
+
return "".join(reversed(list(s)))
|
|
2318
|
+
|
|
2319
|
+
def make_cid_range_(self, location, start, limit):
|
|
2320
|
+
"""(location, 999, 1001) --> ["cid00999", "cid01000", "cid01001"]"""
|
|
2321
|
+
result = list()
|
|
2322
|
+
if start > limit:
|
|
2323
|
+
raise FeatureLibError(
|
|
2324
|
+
"Bad range: start should be less than limit", location
|
|
2325
|
+
)
|
|
2326
|
+
for cid in range(start, limit + 1):
|
|
2327
|
+
result.append("cid%05d" % cid)
|
|
2328
|
+
return result
|
|
2329
|
+
|
|
2330
|
+
def make_glyph_range_(self, location, start, limit):
|
|
2331
|
+
"""(location, "a.sc", "d.sc") --> ["a.sc", "b.sc", "c.sc", "d.sc"]"""
|
|
2332
|
+
result = list()
|
|
2333
|
+
if len(start) != len(limit):
|
|
2334
|
+
raise FeatureLibError(
|
|
2335
|
+
'Bad range: "%s" and "%s" should have the same length' % (start, limit),
|
|
2336
|
+
location,
|
|
2337
|
+
)
|
|
2338
|
+
|
|
2339
|
+
rev = self.reverse_string_
|
|
2340
|
+
prefix = os.path.commonprefix([start, limit])
|
|
2341
|
+
suffix = rev(os.path.commonprefix([rev(start), rev(limit)]))
|
|
2342
|
+
if len(suffix) > 0:
|
|
2343
|
+
start_range = start[len(prefix) : -len(suffix)]
|
|
2344
|
+
limit_range = limit[len(prefix) : -len(suffix)]
|
|
2345
|
+
else:
|
|
2346
|
+
start_range = start[len(prefix) :]
|
|
2347
|
+
limit_range = limit[len(prefix) :]
|
|
2348
|
+
|
|
2349
|
+
if start_range >= limit_range:
|
|
2350
|
+
raise FeatureLibError(
|
|
2351
|
+
"Start of range must be smaller than its end", location
|
|
2352
|
+
)
|
|
2353
|
+
|
|
2354
|
+
uppercase = re.compile(r"^[A-Z]$")
|
|
2355
|
+
if uppercase.match(start_range) and uppercase.match(limit_range):
|
|
2356
|
+
for c in range(ord(start_range), ord(limit_range) + 1):
|
|
2357
|
+
result.append("%s%c%s" % (prefix, c, suffix))
|
|
2358
|
+
return result
|
|
2359
|
+
|
|
2360
|
+
lowercase = re.compile(r"^[a-z]$")
|
|
2361
|
+
if lowercase.match(start_range) and lowercase.match(limit_range):
|
|
2362
|
+
for c in range(ord(start_range), ord(limit_range) + 1):
|
|
2363
|
+
result.append("%s%c%s" % (prefix, c, suffix))
|
|
2364
|
+
return result
|
|
2365
|
+
|
|
2366
|
+
digits = re.compile(r"^[0-9]{1,3}$")
|
|
2367
|
+
if digits.match(start_range) and digits.match(limit_range):
|
|
2368
|
+
for i in range(int(start_range, 10), int(limit_range, 10) + 1):
|
|
2369
|
+
number = ("000" + str(i))[-len(start_range) :]
|
|
2370
|
+
result.append("%s%s%s" % (prefix, number, suffix))
|
|
2371
|
+
return result
|
|
2372
|
+
|
|
2373
|
+
raise FeatureLibError('Bad range: "%s-%s"' % (start, limit), location)
|
|
2374
|
+
|
|
2375
|
+
|
|
2376
|
+
class SymbolTable(object):
|
|
2377
|
+
def __init__(self):
|
|
2378
|
+
self.scopes_ = [{}]
|
|
2379
|
+
|
|
2380
|
+
def enter_scope(self):
|
|
2381
|
+
self.scopes_.append({})
|
|
2382
|
+
|
|
2383
|
+
def exit_scope(self):
|
|
2384
|
+
self.scopes_.pop()
|
|
2385
|
+
|
|
2386
|
+
def define(self, name, item):
|
|
2387
|
+
self.scopes_[-1][name] = item
|
|
2388
|
+
|
|
2389
|
+
def resolve(self, name):
|
|
2390
|
+
for scope in reversed(self.scopes_):
|
|
2391
|
+
item = scope.get(name)
|
|
2392
|
+
if item:
|
|
2393
|
+
return item
|
|
2394
|
+
return None
|