FooBarWidget-mizuho 0.9.1 → 0.9.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README.markdown +1 -1
- data/Rakefile +14 -2
- data/asciidoc/BUGS +11 -6
- data/asciidoc/BUGS.txt +7 -3
- data/asciidoc/CHANGELOG +313 -151
- data/asciidoc/CHANGELOG.txt +177 -12
- data/asciidoc/INSTALL +30 -36
- data/asciidoc/INSTALL.txt +20 -20
- data/asciidoc/Makefile.in +145 -0
- data/asciidoc/README +11 -11
- data/asciidoc/README.txt +9 -9
- data/asciidoc/a2x +40 -7
- data/asciidoc/asciidoc.conf +180 -126
- data/asciidoc/asciidoc.py +1667 -977
- data/asciidoc/common.aap +2 -2
- data/asciidoc/configure +2840 -0
- data/asciidoc/configure.ac +11 -0
- data/asciidoc/dblatex/asciidoc-dblatex.sty +2 -0
- data/asciidoc/dblatex/asciidoc-dblatex.xsl +15 -0
- data/asciidoc/dblatex/dblatex-readme.txt +19 -2
- data/asciidoc/doc/a2x.1 +77 -67
- data/asciidoc/doc/a2x.1.txt +11 -2
- data/asciidoc/doc/article.css-embedded.html +85 -63
- data/asciidoc/doc/article.html +644 -62
- data/asciidoc/doc/article.pdf +0 -0
- data/asciidoc/doc/article.txt +15 -17
- data/asciidoc/doc/asciidoc.1 +34 -40
- data/asciidoc/doc/asciidoc.1.css-embedded.html +67 -32
- data/asciidoc/doc/asciidoc.1.css.html +28 -28
- data/asciidoc/doc/asciidoc.1.html +33 -33
- data/asciidoc/doc/asciidoc.css-embedded.html +2234 -2348
- data/asciidoc/doc/asciidoc.css.html +2203 -2352
- data/asciidoc/doc/asciidoc.dict +52 -1
- data/asciidoc/doc/asciidoc.html +980 -1160
- data/asciidoc/doc/asciidoc.txt +941 -738
- data/asciidoc/doc/asciimathml.txt +63 -0
- data/asciidoc/doc/book-multi.html +26 -43
- data/asciidoc/doc/book-multi.txt +19 -23
- data/asciidoc/doc/book.css-embedded.html +92 -71
- data/asciidoc/doc/book.html +24 -41
- data/asciidoc/doc/book.txt +19 -21
- data/asciidoc/doc/docbook-xsl.css +1 -0
- data/asciidoc/doc/faq.txt +288 -60
- data/asciidoc/doc/images +1 -0
- data/asciidoc/doc/latex-backend.html +16 -123
- data/asciidoc/doc/latex-backend.txt +17 -19
- data/asciidoc/doc/latexmath.txt +233 -24
- data/asciidoc/doc/latexmathml.txt +41 -0
- data/asciidoc/doc/main.aap +9 -5
- data/asciidoc/doc/music-filter.pdf +0 -0
- data/asciidoc/doc/music-filter.txt +2 -2
- data/asciidoc/doc/source-highlight-filter.html +476 -105
- data/asciidoc/doc/source-highlight-filter.pdf +0 -0
- data/asciidoc/doc/source-highlight-filter.txt +39 -10
- data/asciidoc/docbook-xsl/asciidoc-docbook-xsl.txt +1 -29
- data/asciidoc/docbook-xsl/fo.xsl +35 -3
- data/asciidoc/docbook-xsl/manpage.xsl +3 -0
- data/asciidoc/docbook-xsl/text.xsl +50 -0
- data/asciidoc/docbook.conf +182 -73
- data/asciidoc/examples/website/ASCIIMathML.js +1 -0
- data/asciidoc/examples/website/CHANGELOG.html +618 -182
- data/asciidoc/examples/website/CHANGELOG.txt +1 -0
- data/asciidoc/examples/website/INSTALL.html +34 -36
- data/asciidoc/examples/website/INSTALL.txt +1 -0
- data/asciidoc/examples/website/LaTeXMathML.js +1 -0
- data/asciidoc/examples/website/README-website.html +26 -37
- data/asciidoc/examples/website/README-website.txt +6 -6
- data/asciidoc/examples/website/README.html +15 -15
- data/asciidoc/examples/website/README.txt +1 -0
- data/asciidoc/examples/website/a2x.1.html +74 -50
- data/asciidoc/examples/website/a2x.1.txt +1 -0
- data/asciidoc/examples/website/asciidoc-docbook-xsl.html +13 -48
- data/asciidoc/examples/website/asciidoc-docbook-xsl.txt +1 -0
- data/asciidoc/examples/website/asciimathml.txt +1 -0
- data/asciidoc/examples/website/customers.csv +1 -0
- data/asciidoc/examples/website/downloads.html +69 -31
- data/asciidoc/examples/website/downloads.txt +28 -5
- data/asciidoc/examples/website/faq.html +370 -124
- data/asciidoc/examples/website/faq.txt +1 -0
- data/asciidoc/examples/website/images +1 -0
- data/asciidoc/examples/website/index.html +64 -64
- data/asciidoc/examples/website/index.txt +22 -15
- data/asciidoc/examples/website/latex-backend.html +152 -257
- data/asciidoc/examples/website/latex-backend.txt +1 -0
- data/asciidoc/examples/website/latexmathml.txt +1 -0
- data/asciidoc/examples/website/manpage.html +27 -27
- data/asciidoc/examples/website/manpage.txt +1 -0
- data/asciidoc/examples/website/music-filter.html +18 -18
- data/asciidoc/examples/website/music-filter.txt +1 -0
- data/asciidoc/examples/website/music1.abc +1 -1
- data/asciidoc/examples/website/music1.png +0 -0
- data/asciidoc/examples/website/music2.ly +1 -1
- data/asciidoc/examples/website/music2.png +0 -0
- data/asciidoc/examples/website/newlists.txt +40 -0
- data/asciidoc/examples/website/newtables.txt +397 -0
- data/asciidoc/examples/website/source-highlight-filter.html +67 -32
- data/asciidoc/examples/website/source-highlight-filter.txt +1 -0
- data/asciidoc/examples/website/support.html +4 -4
- data/asciidoc/examples/website/toc.js +1 -0
- data/asciidoc/examples/website/userguide.html +2190 -2339
- data/asciidoc/examples/website/userguide.txt +1 -0
- data/asciidoc/examples/website/version83.txt +37 -0
- data/asciidoc/examples/website/version9.html +13 -13
- data/asciidoc/examples/website/xhtml11-manpage.css +1 -0
- data/asciidoc/examples/website/xhtml11-quirks.css +1 -0
- data/asciidoc/examples/website/xhtml11.css +1 -0
- data/asciidoc/filters/code-filter-readme.txt +3 -3
- data/asciidoc/filters/code-filter-test.txt +6 -6
- data/asciidoc/filters/source-highlight-filter.conf +12 -5
- data/asciidoc/html4.conf +152 -58
- data/asciidoc/install-sh +201 -0
- data/asciidoc/latex.conf +41 -41
- data/asciidoc/stylesheets/docbook-xsl.css +1 -0
- data/asciidoc/stylesheets/xhtml11.css +39 -4
- data/asciidoc/text.conf +4 -4
- data/asciidoc/vim/syntax/asciidoc.vim +58 -32
- data/asciidoc/wordpress.conf +48 -0
- data/asciidoc/xhtml11-quirks.conf +1 -1
- data/asciidoc/xhtml11.conf +198 -70
- data/bin/mizuho +5 -2
- data/lib/mizuho/generator.rb +48 -19
- data/mizuho.gemspec +16 -6
- metadata +58 -15
- data/asciidoc/doc/asciimath.txt +0 -47
- data/asciidoc/docbook-xsl/shaded-literallayout.patch +0 -32
- data/asciidoc/examples/website/asciimath.html +0 -157
- data/asciidoc/examples/website/latexmath.html +0 -119
- data/asciidoc/filters/code-filter-test-c++.txt +0 -7
- data/asciidoc/install.sh +0 -55
- data/asciidoc/linuxdoc.conf +0 -285
- data/asciidoc/math.conf +0 -50
- data/asciidoc/stylesheets/xhtml-deprecated-manpage.css +0 -21
- data/asciidoc/stylesheets/xhtml-deprecated.css +0 -247
- data/asciidoc/t.conf +0 -20
- data/asciidoc/xhtml-deprecated-css.conf +0 -235
- data/asciidoc/xhtml-deprecated.conf +0 -351
data/asciidoc/asciidoc.py
CHANGED
@@ -6,10 +6,10 @@ Copyright (C) 2002-2008 Stuart Rackham. Free use of this software is granted
|
|
6
6
|
under the terms of the GNU General Public License (GPL).
|
7
7
|
"""
|
8
8
|
|
9
|
-
import sys, os, re, time, traceback, tempfile,
|
9
|
+
import sys, os, re, time, traceback, tempfile, subprocess, codecs, locale
|
10
10
|
from types import *
|
11
11
|
|
12
|
-
VERSION = '8.
|
12
|
+
VERSION = '8.3.1' # See CHANGLOG file for version history.
|
13
13
|
|
14
14
|
#---------------------------------------------------------------------------
|
15
15
|
# Program onstants.
|
@@ -20,10 +20,10 @@ DEFAULT_DOCTYPE = 'article'
|
|
20
20
|
# definition subs entry.
|
21
21
|
SUBS_OPTIONS = ('specialcharacters','quotes','specialwords',
|
22
22
|
'replacements', 'attributes','macros','callouts','normal','verbatim',
|
23
|
-
'none','
|
23
|
+
'none','replacements2')
|
24
24
|
# Default value for unspecified subs and presubs configuration file entries.
|
25
25
|
SUBS_NORMAL = ('specialcharacters','quotes','attributes',
|
26
|
-
'specialwords','replacements','macros'
|
26
|
+
'specialwords','replacements','macros')
|
27
27
|
SUBS_VERBATIM = ('specialcharacters','callouts')
|
28
28
|
|
29
29
|
NAME_RE = r'(?u)[^\W\d][-\w]*' # Valid section or attrbibute name.
|
@@ -33,32 +33,29 @@ NAME_RE = r'(?u)[^\W\d][-\w]*' # Valid section or attrbibute name.
|
|
33
33
|
# Utility functions and classes.
|
34
34
|
#---------------------------------------------------------------------------
|
35
35
|
|
36
|
-
class EAsciiDoc(Exception):
|
37
|
-
pass
|
36
|
+
class EAsciiDoc(Exception): pass
|
38
37
|
|
39
|
-
|
40
|
-
from UserDict import UserDict
|
41
|
-
|
42
|
-
class OrderedDict(UserDict):
|
38
|
+
class OrderedDict(dict):
|
43
39
|
"""
|
44
40
|
Dictionary ordered by insertion order.
|
45
41
|
Python Cookbook: Ordered Dictionary, Submitter: David Benjamin.
|
46
42
|
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
|
47
43
|
"""
|
48
|
-
def __init__(self, d=None):
|
44
|
+
def __init__(self, d=None, **kwargs):
|
49
45
|
self._keys = []
|
50
|
-
|
46
|
+
if d is None: d = kwargs
|
47
|
+
dict.__init__(self, d)
|
51
48
|
def __delitem__(self, key):
|
52
|
-
|
49
|
+
dict.__delitem__(self, key)
|
53
50
|
self._keys.remove(key)
|
54
51
|
def __setitem__(self, key, item):
|
55
|
-
|
52
|
+
dict.__setitem__(self, key, item)
|
56
53
|
if key not in self._keys: self._keys.append(key)
|
57
54
|
def clear(self):
|
58
|
-
|
55
|
+
dict.clear(self)
|
59
56
|
self._keys = []
|
60
57
|
def copy(self):
|
61
|
-
d =
|
58
|
+
d = dict.copy(self)
|
62
59
|
d._keys = self._keys[:]
|
63
60
|
return d
|
64
61
|
def items(self):
|
@@ -74,17 +71,38 @@ class OrderedDict(UserDict):
|
|
74
71
|
del self[key]
|
75
72
|
return (key, val)
|
76
73
|
def setdefault(self, key, failobj = None):
|
77
|
-
|
74
|
+
dict.setdefault(self, key, failobj)
|
78
75
|
if key not in self._keys: self._keys.append(key)
|
79
76
|
def update(self, d=None, **kwargs):
|
80
77
|
if d is None:
|
81
78
|
d = kwargs
|
82
|
-
|
79
|
+
dict.update(self, d)
|
83
80
|
for key in d.keys():
|
84
81
|
if key not in self._keys: self._keys.append(key)
|
85
82
|
def values(self):
|
86
83
|
return map(self.get, self._keys)
|
87
84
|
|
85
|
+
class AttrDict(dict):
|
86
|
+
"""
|
87
|
+
Like a dictionary except values can be accessed as attributes i.e. obj.foo
|
88
|
+
can be used in addition to obj['foo'].
|
89
|
+
If an item is not present None is returned.
|
90
|
+
"""
|
91
|
+
def __getattr__(self, key):
|
92
|
+
try: return self[key]
|
93
|
+
except KeyError, k: return None
|
94
|
+
def __setattr__(self, key, value):
|
95
|
+
self[key] = value
|
96
|
+
def __delattr__(self, key):
|
97
|
+
try: del self[key]
|
98
|
+
except KeyError, k: raise AttributeError, k
|
99
|
+
def __repr__(self):
|
100
|
+
return '<AttrDict ' + dict.__repr__(self) + '>'
|
101
|
+
def __getstate__(self):
|
102
|
+
return dict(self)
|
103
|
+
def __setstate__(self,value):
|
104
|
+
for k,v in value.items(): self[k]=v
|
105
|
+
|
88
106
|
def print_stderr(line):
|
89
107
|
sys.stderr.write(line+os.linesep)
|
90
108
|
|
@@ -96,25 +114,35 @@ def warning(msg,linenos=True):
|
|
96
114
|
console(msg,'WARNING: ',linenos)
|
97
115
|
document.has_warnings = True
|
98
116
|
|
99
|
-
def deprecated(
|
100
|
-
console(
|
101
|
-
|
102
|
-
def error(msg, cursor=None):
|
103
|
-
"""Report fatal error but don't exit application, continue in the hope of
|
104
|
-
reporting all fatal errors finishing with a non-zero exit code."""
|
105
|
-
console(msg,'ERROR: ', cursor=cursor)
|
106
|
-
document.has_errors = True
|
117
|
+
def deprecated(msg, linenos=True):
|
118
|
+
console(msg, 'DEPRECATED: ', linenos)
|
107
119
|
|
108
|
-
def
|
109
|
-
"""
|
110
|
-
|
111
|
-
|
120
|
+
def message(msg, prefix='', linenos=True, cursor=None):
|
121
|
+
"""
|
122
|
+
Return formatted message string. 'offset' is added to reported line number
|
123
|
+
for warnings emitted when reading ahead.
|
124
|
+
"""
|
112
125
|
if linenos and reader.cursor:
|
113
126
|
if not cursor:
|
114
127
|
cursor = reader.cursor
|
115
|
-
|
116
|
-
|
117
|
-
|
128
|
+
prefix += '%s: line %d: ' % (os.path.basename(cursor[0]),cursor[1])
|
129
|
+
return prefix + msg
|
130
|
+
|
131
|
+
def error(msg, cursor=None, halt=False):
|
132
|
+
"""
|
133
|
+
Report fatal error.
|
134
|
+
If halt=True raise EAsciiDoc exception.
|
135
|
+
If halt=False don't exit application, continue in the hope of reporting all
|
136
|
+
fatal errors finishing with a non-zero exit code.
|
137
|
+
"""
|
138
|
+
if halt:
|
139
|
+
raise EAsciiDoc, message(msg,linenos=False,cursor=cursor)
|
140
|
+
else:
|
141
|
+
console(msg,'ERROR: ',cursor=cursor)
|
142
|
+
document.has_errors = True
|
143
|
+
|
144
|
+
def console(msg, prefix='', linenos=True, cursor=None):
|
145
|
+
print_stderr(message(msg,prefix,linenos,cursor))
|
118
146
|
|
119
147
|
def file_in(fname, directory):
|
120
148
|
"""Return True if file fname resides inside directory."""
|
@@ -124,7 +152,7 @@ def file_in(fname, directory):
|
|
124
152
|
directory = os.getcwd()
|
125
153
|
else:
|
126
154
|
assert os.path.isdir(directory)
|
127
|
-
directory = os.path.
|
155
|
+
directory = os.path.realpath(directory)
|
128
156
|
fname = os.path.realpath(fname)
|
129
157
|
return os.path.commonprefix((directory, fname)) == directory
|
130
158
|
|
@@ -142,9 +170,11 @@ def is_safe_file(fname, directory=None):
|
|
142
170
|
directory = '.'
|
143
171
|
return not safe() or file_in(fname, directory)
|
144
172
|
|
145
|
-
# Return file name which must reside in the parent file directory.
|
146
|
-
# Return None if file is not found or not safe.
|
147
173
|
def safe_filename(fname, parentdir):
|
174
|
+
"""
|
175
|
+
Return file name which must reside in the parent file directory.
|
176
|
+
Return None if file is not found or not safe.
|
177
|
+
"""
|
148
178
|
if not os.path.isabs(fname):
|
149
179
|
# Include files are relative to parent document
|
150
180
|
# directory.
|
@@ -160,16 +190,6 @@ def safe_filename(fname, parentdir):
|
|
160
190
|
def unsafe_error(msg):
|
161
191
|
error('unsafe: '+msg)
|
162
192
|
|
163
|
-
def syseval(cmd):
|
164
|
-
# Run shell command and return stdout.
|
165
|
-
child = os.popen(cmd)
|
166
|
-
data = child.read()
|
167
|
-
err = child.close()
|
168
|
-
if not err:
|
169
|
-
return data
|
170
|
-
else:
|
171
|
-
return ''
|
172
|
-
|
173
193
|
def assign(dst,src):
|
174
194
|
"""Assign all attributes from 'src' object to 'dst' object."""
|
175
195
|
for a,v in src.__dict__.items():
|
@@ -212,33 +232,50 @@ def validate(value,rule,errmsg):
|
|
212
232
|
raise EAsciiDoc,errmsg
|
213
233
|
return value
|
214
234
|
|
215
|
-
def
|
216
|
-
"""
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
+
def lstrip_list(s):
|
236
|
+
"""
|
237
|
+
Return list with empty items from start of list removed.
|
238
|
+
"""
|
239
|
+
for i in range(len(s)):
|
240
|
+
if s[i]: break
|
241
|
+
else:
|
242
|
+
return []
|
243
|
+
return s[i:]
|
244
|
+
|
245
|
+
def rstrip_list(s):
|
246
|
+
"""
|
247
|
+
Return list with empty items from end of list removed.
|
248
|
+
"""
|
249
|
+
for i in range(len(s)-1,-1,-1):
|
250
|
+
if s[i]: break
|
251
|
+
else:
|
252
|
+
return []
|
253
|
+
return s[:i+1]
|
254
|
+
|
255
|
+
def strip_list(s):
|
256
|
+
"""
|
257
|
+
Return list with empty items from start and end of list removed.
|
258
|
+
"""
|
259
|
+
s = lstrip_list(s)
|
260
|
+
s = rstrip_list(s)
|
261
|
+
return s
|
262
|
+
|
263
|
+
def is_array(obj):
|
264
|
+
"""
|
265
|
+
Return True if object is list or tuple type.
|
266
|
+
"""
|
267
|
+
return isinstance(obj,list) or isinstance(obj,tuple)
|
235
268
|
|
236
269
|
def dovetail(lines1, lines2):
|
237
|
-
"""
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
270
|
+
"""
|
271
|
+
Append list or tuple of strings 'lines2' to list 'lines1'. Join the last
|
272
|
+
non-blank item in 'lines1' with the first non-blank item in 'lines2' into a
|
273
|
+
single string.
|
274
|
+
"""
|
275
|
+
assert is_array(lines1)
|
276
|
+
assert is_array(lines2)
|
277
|
+
lines1 = strip_list(lines1)
|
278
|
+
lines2 = strip_list(lines2)
|
242
279
|
if not lines1 or not lines2:
|
243
280
|
return list(lines1) + list(lines2)
|
244
281
|
result = list(lines1[:-1])
|
@@ -286,10 +323,8 @@ def parse_attributes(attrs,dict):
|
|
286
323
|
for v in d.values():
|
287
324
|
if not (isinstance(v,str) or isinstance(v,int) or isinstance(v,float) or v is None):
|
288
325
|
raise
|
289
|
-
dict.update(d)
|
290
326
|
except:
|
291
|
-
|
292
|
-
s = s.replace('"',r'\"') # Escape double-quotes.
|
327
|
+
s = s.replace('"','\\"')
|
293
328
|
s = s.split(',')
|
294
329
|
s = map(lambda x: '"' + x.strip() + '"', s)
|
295
330
|
s = ','.join(s)
|
@@ -299,7 +334,7 @@ def parse_attributes(attrs,dict):
|
|
299
334
|
return # If there's a syntax error leave with {0}=attrs.
|
300
335
|
for k in d.keys(): # Drop any empty positional arguments.
|
301
336
|
if d[k] == '': del d[k]
|
302
|
-
|
337
|
+
dict.update(d)
|
303
338
|
assert len(d) > 0
|
304
339
|
|
305
340
|
def parse_named_attributes(s,attrs):
|
@@ -335,7 +370,7 @@ def parse_options(options,allowed,errmsg):
|
|
335
370
|
result = []
|
336
371
|
if options:
|
337
372
|
for s in re.split(r'\s*,\s*',options):
|
338
|
-
if (allowed and s not in allowed) or
|
373
|
+
if (allowed and s not in allowed) or not is_name(s):
|
339
374
|
raise EAsciiDoc,'%s: %s' % (errmsg,s)
|
340
375
|
result.append(s)
|
341
376
|
return tuple(result)
|
@@ -347,16 +382,12 @@ def symbolize(s):
|
|
347
382
|
def is_name(s):
|
348
383
|
"""Return True if s is valid attribute, macro or tag name
|
349
384
|
(starts with alpha containing alphanumeric and dashes only)."""
|
350
|
-
return re.match(NAME_RE,s) is not None
|
385
|
+
return re.match(r'^'+NAME_RE+r'$',s) is not None
|
351
386
|
|
352
387
|
def subs_quotes(text):
|
353
388
|
"""Quoted text is marked up and the resulting text is
|
354
389
|
returned."""
|
355
|
-
# The quote patterns are iterated in reverse sort order to avoid ambiguity.
|
356
|
-
# So, for example, __ is processed before _.
|
357
390
|
keys = config.quotes.keys()
|
358
|
-
keys.sort()
|
359
|
-
keys.reverse()
|
360
391
|
for q in keys:
|
361
392
|
i = q.find('|')
|
362
393
|
if i != -1 and q != '|' and q != '||':
|
@@ -369,41 +400,42 @@ def subs_quotes(text):
|
|
369
400
|
if tag[0] == '#':
|
370
401
|
tag = tag[1:]
|
371
402
|
# Unconstrained quotes can appear anywhere.
|
372
|
-
reo = re.compile(r'(?msu)(^|.)(\[(?P<
|
403
|
+
reo = re.compile(r'(?msu)(^|.)(\[(?P<attrlist>[^[]+?)\])?' \
|
373
404
|
+ r'(?:' + re.escape(lq) + r')' \
|
374
405
|
+ r'(?P<content>.+?)(?:'+re.escape(rq)+r')')
|
375
406
|
else:
|
376
407
|
# The text within constrained quotes must be bounded by white space.
|
377
408
|
# Non-word (\W) characters are allowed at boundaries to accomodate
|
378
409
|
# enveloping quotes.
|
379
|
-
reo = re.compile(r'(?msu)(^|\W)(\[(?P<
|
410
|
+
reo = re.compile(r'(?msu)(^|\W)(\[(?P<attrlist>[^[]+?)\])?' \
|
380
411
|
+ r'(?:' + re.escape(lq) + r')' \
|
381
|
-
+ r'(?P<content
|
412
|
+
+ r'(?P<content>.*?\S)(?:'+re.escape(rq)+r')(?=\W|$)')
|
382
413
|
pos = 0
|
383
414
|
while True:
|
384
415
|
mo = reo.search(text,pos)
|
385
416
|
if not mo: break
|
386
417
|
if text[mo.start()] == '\\':
|
387
|
-
|
418
|
+
# Delete leading backslash.
|
419
|
+
text = text[:mo.start()] + text[mo.start()+1:]
|
420
|
+
# Skip past start of match.
|
421
|
+
pos = mo.start() + 1
|
388
422
|
else:
|
389
|
-
|
390
|
-
parse_attributes(mo.group('
|
391
|
-
stag,etag = config.tag(tag,
|
423
|
+
attrlist = {}
|
424
|
+
parse_attributes(mo.group('attrlist'), attrlist)
|
425
|
+
stag,etag = config.tag(tag, attrlist)
|
392
426
|
s = mo.group(1) + stag + mo.group('content') + etag
|
393
427
|
text = text[:mo.start()] + s + text[mo.end():]
|
394
428
|
pos = mo.start() + len(s)
|
395
|
-
# Unescape escaped quotes.
|
396
|
-
text = text.replace('\\'+lq, lq)
|
397
|
-
if lq != rq:
|
398
|
-
text = text.replace('\\'+rq, rq)
|
399
429
|
return text
|
400
430
|
|
401
431
|
def subs_tag(tag,dict={}):
|
402
432
|
"""Perform attribute substitution and split tag string returning start, end
|
403
433
|
tag tuple (c.f. Config.tag())."""
|
434
|
+
if not tag:
|
435
|
+
return [None,None]
|
404
436
|
s = subs_attrs(tag,dict)
|
405
437
|
if not s:
|
406
|
-
warning('tag
|
438
|
+
warning('tag \'%s\' dropped: contains undefined attribute' % tag)
|
407
439
|
return [None,None]
|
408
440
|
result = s.split('|')
|
409
441
|
if len(result) == 1:
|
@@ -422,7 +454,7 @@ def parse_entry(entry, dict=None, unquote=False, unique_values=False,
|
|
422
454
|
If name! and allow_name_only=True then value is set to None.
|
423
455
|
Leading and trailing white space is striped from 'name' and 'value'.
|
424
456
|
'name' can contain any printable characters.
|
425
|
-
If the '=' delimiter character is allowed in the 'name' then
|
457
|
+
If the '=' delimiter character is allowed in the 'name' then
|
426
458
|
it must be escaped with a backslash and escape_delimiter must be True.
|
427
459
|
If 'unquote' is True leading and trailing double-quotes are stripped from
|
428
460
|
'name' and 'value'.
|
@@ -533,18 +565,26 @@ def filter_lines(filter_cmd, lines, dict={}):
|
|
533
565
|
Run 'lines' through the 'filter_cmd' shell command and return the result.
|
534
566
|
The 'dict' dictionary contains additional filter attributes.
|
535
567
|
"""
|
536
|
-
#
|
537
|
-
if not filter_cmd:
|
568
|
+
# Return input lines if there's not filter.
|
569
|
+
if not filter_cmd or not filter_cmd.strip():
|
538
570
|
return lines
|
539
571
|
# Perform attributes substitution on the filter command.
|
540
572
|
s = subs_attrs(filter_cmd, dict)
|
541
573
|
if not s:
|
542
|
-
raise EAsciiDoc,'
|
543
|
-
filter_cmd = s
|
574
|
+
raise EAsciiDoc,'undefined filter attribute in command: %s' % filter_cmd
|
575
|
+
filter_cmd = s.strip()
|
576
|
+
# Parse for quoted and unquoted command and command tail.
|
577
|
+
# Double quoted.
|
578
|
+
mo = re.match(r'^"(?P<cmd>[^"]+)"(?P<tail>.*)$', filter_cmd)
|
579
|
+
if not mo:
|
580
|
+
# Single quoted.
|
581
|
+
mo = re.match(r"^'(?P<cmd>[^']+)'(?P<tail>.*)$", filter_cmd)
|
582
|
+
if not mo:
|
583
|
+
# Unquoted catch all.
|
584
|
+
mo = re.match(r'^(?P<cmd>\S+)(?P<tail>.*)$', filter_cmd)
|
585
|
+
cmd = mo.group('cmd').strip()
|
544
586
|
# Search for the filter command in both user and application 'filters'
|
545
587
|
# sub-directories.
|
546
|
-
mo = re.match(r'^(?P<cmd>\S+)(?P<tail>.*)$', filter_cmd)
|
547
|
-
cmd = mo.group('cmd')
|
548
588
|
found = False
|
549
589
|
if not os.path.dirname(cmd):
|
550
590
|
# Check in asciidoc user and application directories for unqualified
|
@@ -564,73 +604,36 @@ def filter_lines(filter_cmd, lines, dict={}):
|
|
564
604
|
if found:
|
565
605
|
cmd = cmd2
|
566
606
|
else:
|
567
|
-
if os.__dict__.has_key('uname') and os.uname()[0][:6] == 'CYGWIN':
|
568
|
-
# popen2() does not like non-drive letter path names under
|
569
|
-
# Cygwin.
|
570
|
-
s = syseval('cygpath -m ' + cmd).strip()
|
571
|
-
if s:
|
572
|
-
cmd = s
|
573
607
|
if os.path.isfile(cmd):
|
574
608
|
found = True
|
575
609
|
else:
|
576
610
|
warning('filter not found: %s' % cmd)
|
577
611
|
if found:
|
578
612
|
filter_cmd = '"' + cmd + '"' + mo.group('tail')
|
579
|
-
verbose('filtering: ' + filter_cmd)
|
580
613
|
if sys.platform == 'win32':
|
581
|
-
#
|
582
|
-
#
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
|
588
|
-
|
589
|
-
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
line = lines[i]
|
599
|
-
w.write(line + os.linesep)
|
600
|
-
i = i + 1
|
601
|
-
w.close()
|
602
|
-
result = []
|
603
|
-
for s in open(tmp, 'rt'):
|
604
|
-
result.append(s.rstrip())
|
605
|
-
except:
|
606
|
-
raise EAsciiDoc,'filter error: %s' % filter_cmd
|
607
|
-
finally:
|
608
|
-
os.unlink(tmp)
|
614
|
+
# Windows doesn't like running scripts directly so explicitly
|
615
|
+
# specify interpreter.
|
616
|
+
if found:
|
617
|
+
if cmd.endswith('.py'):
|
618
|
+
filter_cmd = 'python ' + filter_cmd
|
619
|
+
elif cmd.endswith('.rb'):
|
620
|
+
filter_cmd = 'ruby ' + filter_cmd
|
621
|
+
verbose('filtering: ' + filter_cmd)
|
622
|
+
input = os.linesep.join(lines)
|
623
|
+
try:
|
624
|
+
p = subprocess.Popen(filter_cmd, shell=True,
|
625
|
+
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
626
|
+
output = p.communicate(input)[0]
|
627
|
+
except:
|
628
|
+
raise EAsciiDoc,'filter error: %s: %s' % (filter_cmd, sys.exc_info()[1])
|
629
|
+
if output:
|
630
|
+
result = [s.rstrip() for s in output.split(os.linesep)]
|
609
631
|
else:
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
i = 0
|
616
|
-
while i < len(lines):
|
617
|
-
line = lines[i]
|
618
|
-
if select.select([],[w.fileno()],[],0)[1]:
|
619
|
-
w.write(line+os.linesep) # Use platform line terminator.
|
620
|
-
i = i+1
|
621
|
-
if select.select([r.fileno()],[],[],0)[0]:
|
622
|
-
s = r.readline()
|
623
|
-
if not s: break # Exit if filter output closes.
|
624
|
-
result.append(s.rstrip())
|
625
|
-
w.close()
|
626
|
-
for s in r:
|
627
|
-
result.append(s.rstrip())
|
628
|
-
r.close()
|
629
|
-
except:
|
630
|
-
raise EAsciiDoc,'filter error: %s' % filter_cmd
|
631
|
-
# There's no easy way to guage whether popen2() found and executed the
|
632
|
-
# filter, so guess that if it produced no output there is probably a
|
633
|
-
# problem.
|
632
|
+
result = []
|
633
|
+
filter_status = p.wait()
|
634
|
+
if filter_status:
|
635
|
+
warning('filter non-zero exit code: %s: returned %d' %
|
636
|
+
(filter_cmd, filter_status))
|
634
637
|
if lines and not result:
|
635
638
|
warning('no output from filter: %s' % filter_cmd)
|
636
639
|
return result
|
@@ -715,7 +718,7 @@ def system(name, args, is_macro=False):
|
|
715
718
|
|
716
719
|
def subs_attrs(lines, dictionary=None):
|
717
720
|
"""Substitute 'lines' of text with attributes from the global
|
718
|
-
document.attributes dictionary and from
|
721
|
+
document.attributes dictionary and from 'dictionary' ('dictionary'
|
719
722
|
entries take precedence). Return a tuple of the substituted lines. 'lines'
|
720
723
|
containing undefined attributes are deleted. If 'lines' is a string then
|
721
724
|
return a string.
|
@@ -938,6 +941,12 @@ class Lex:
|
|
938
941
|
if Lex.prev_element and Lex.prev_cursor == reader.cursor:
|
939
942
|
return Lex.prev_element
|
940
943
|
result = None
|
944
|
+
# Check for AttributeEntry.
|
945
|
+
if not result and AttributeEntry.isnext():
|
946
|
+
result = AttributeEntry
|
947
|
+
# Check for AttributeList.
|
948
|
+
if not result and AttributeList.isnext():
|
949
|
+
result = AttributeList
|
941
950
|
# Check for Title.
|
942
951
|
if not result and Title.isnext():
|
943
952
|
result = Title
|
@@ -956,14 +965,10 @@ class Lex:
|
|
956
965
|
else:
|
957
966
|
result = blocks.current
|
958
967
|
# Check for Table.
|
968
|
+
if not result and tables_OLD.isnext():
|
969
|
+
result = tables_OLD.current
|
959
970
|
if not result and tables.isnext():
|
960
971
|
result = tables.current
|
961
|
-
# Check for AttributeEntry.
|
962
|
-
if not result and AttributeEntry.isnext():
|
963
|
-
result = AttributeEntry
|
964
|
-
# Check for AttributeList.
|
965
|
-
if not result and AttributeList.isnext():
|
966
|
-
result = AttributeList
|
967
972
|
# Check for BlockTitle.
|
968
973
|
if not result and BlockTitle.isnext():
|
969
974
|
result = BlockTitle
|
@@ -978,86 +983,33 @@ class Lex:
|
|
978
983
|
return result
|
979
984
|
next = staticmethod(next)
|
980
985
|
|
981
|
-
# Extract the passthrough text and replace with temporary placeholders.
|
982
|
-
def extract_passthroughs(text, passthroughs):
|
983
|
-
# +++ passthrough.
|
984
|
-
lq1 = r'(?P<lq>\+{3})'
|
985
|
-
rq1 = r'\+{3}'
|
986
|
-
reo1 = re.compile(r'(?msu)(^|[^\w+])(' + lq1 + r')' \
|
987
|
-
+ r'(?P<content>.+?)(' + rq1 + r')(?=[^\w+]|$)')
|
988
|
-
# $$ passthrough.
|
989
|
-
lq2 = r'(\[(?P<attrs>[^[]+?)\])?(?P<lq>\${2})'
|
990
|
-
rq2 = r'\${2}'
|
991
|
-
reo2 = re.compile(r'(?msu)(^|[^\w$\]])(' + lq2 + r')' \
|
992
|
-
+ r'(?P<content>.+?)(' + rq2 + r')(?=[^\w$]|$)')
|
993
|
-
reo = reo1
|
994
|
-
pos = 0
|
995
|
-
while True:
|
996
|
-
mo = reo.search(text,pos)
|
997
|
-
if not mo:
|
998
|
-
if reo == reo1:
|
999
|
-
reo = reo2
|
1000
|
-
pos = 0
|
1001
|
-
continue
|
1002
|
-
else:
|
1003
|
-
break
|
1004
|
-
if text[mo.start()] == '\\':
|
1005
|
-
pos = mo.end()
|
1006
|
-
else:
|
1007
|
-
content = mo.group('content')
|
1008
|
-
if mo.group('lq') == '$$':
|
1009
|
-
content = config.subs_specialchars(content)
|
1010
|
-
attrs = {}
|
1011
|
-
parse_attributes(mo.group('attrs'), attrs)
|
1012
|
-
stag,etag = config.tag('$$passthrough', attrs)
|
1013
|
-
if not stag:
|
1014
|
-
etag = '' # Drop end tag if start tag has been.
|
1015
|
-
content = stag + content + etag
|
1016
|
-
passthroughs.append(content)
|
1017
|
-
# Tabs are expanded when the source is read so using them here
|
1018
|
-
# guarantees the placeholders are unambiguous.
|
1019
|
-
s = mo.group(1) + '\t' + str(len(passthroughs)-1) + '\t'
|
1020
|
-
text = text[:mo.start()] + s + text[mo.end():]
|
1021
|
-
pos = mo.start() + len(s)
|
1022
|
-
# Unescape escaped passthroughs.
|
1023
|
-
text = text.replace('\\+++', '+++')
|
1024
|
-
text = text.replace('\\$$', '$$')
|
1025
|
-
return text
|
1026
|
-
extract_passthroughs = staticmethod(extract_passthroughs)
|
1027
|
-
|
1028
|
-
# Replace passthough placeholders with the original passthrough text.
|
1029
|
-
def restore_passthroughs(text, passthroughs):
|
1030
|
-
for i,v in enumerate(passthroughs):
|
1031
|
-
text = text.replace('\t'+str(i)+'\t', passthroughs[i], 1)
|
1032
|
-
return text
|
1033
|
-
restore_passthroughs = staticmethod(restore_passthroughs)
|
1034
|
-
|
1035
986
|
def subs_1(s,options):
|
1036
987
|
"""Perform substitution specified in 'options' (in 'options' order) on
|
1037
|
-
|
1038
|
-
Does not process 'attributes' or 'passthroughs' substitutions."""
|
988
|
+
Does not process 'attributes' substitutions."""
|
1039
989
|
if not s:
|
1040
990
|
return s
|
1041
991
|
result = s
|
1042
992
|
for o in options:
|
1043
|
-
if o == '
|
993
|
+
if o == 'none':
|
994
|
+
return s
|
995
|
+
elif o == 'specialcharacters':
|
1044
996
|
result = config.subs_specialchars(result)
|
1045
|
-
|
997
|
+
elif o == 'attributes':
|
998
|
+
result = subs_attrs(result)
|
1046
999
|
elif o == 'quotes':
|
1047
1000
|
result = subs_quotes(result)
|
1048
|
-
# Special words.
|
1049
1001
|
elif o == 'specialwords':
|
1050
1002
|
result = config.subs_specialwords(result)
|
1051
|
-
# Replacements.
|
1052
1003
|
elif o in ('replacements','replacements2'):
|
1053
1004
|
result = config.subs_replacements(result,o)
|
1054
|
-
# Inline macros.
|
1055
1005
|
elif o == 'macros':
|
1056
1006
|
result = macros.subs(result)
|
1057
1007
|
elif o == 'callouts':
|
1058
1008
|
result = macros.subs(result,callouts=True)
|
1059
1009
|
else:
|
1060
1010
|
raise EAsciiDoc,'illegal substitution option: %s' % o
|
1011
|
+
if not result:
|
1012
|
+
break
|
1061
1013
|
return result
|
1062
1014
|
subs_1 = staticmethod(subs_1)
|
1063
1015
|
|
@@ -1075,19 +1027,18 @@ class Lex:
|
|
1075
1027
|
return lines
|
1076
1028
|
# Join lines so quoting can span multiple lines.
|
1077
1029
|
para = '\n'.join(lines)
|
1078
|
-
if '
|
1079
|
-
|
1080
|
-
para = Lex.extract_passthroughs(para,passthroughs)
|
1030
|
+
if 'macros' in options:
|
1031
|
+
para = macros.extract_passthroughs(para)
|
1081
1032
|
for o in options:
|
1082
1033
|
if o == 'attributes':
|
1083
1034
|
# If we don't substitute attributes line-by-line then a single
|
1084
1035
|
# undefined attribute will drop the entire paragraph.
|
1085
1036
|
lines = subs_attrs(para.split('\n'))
|
1086
1037
|
para = '\n'.join(lines)
|
1087
|
-
|
1038
|
+
else:
|
1088
1039
|
para = Lex.subs_1(para,(o,))
|
1089
|
-
if '
|
1090
|
-
para =
|
1040
|
+
if 'macros' in options:
|
1041
|
+
para = macros.restore_passthroughs(para)
|
1091
1042
|
return para.splitlines()
|
1092
1043
|
subs = staticmethod(subs)
|
1093
1044
|
|
@@ -1145,6 +1096,7 @@ class Document:
|
|
1145
1096
|
self.attributes['backend-'+document.backend] = ''
|
1146
1097
|
self.attributes['doctype-'+document.doctype] = ''
|
1147
1098
|
self.attributes[document.backend+'-'+document.doctype] = ''
|
1099
|
+
self.attributes['asciidoc-file'] = APP_FILE
|
1148
1100
|
self.attributes['asciidoc-dir'] = APP_DIR
|
1149
1101
|
self.attributes['user-dir'] = USER_DIR
|
1150
1102
|
if self.infile != '<stdin>':
|
@@ -1215,12 +1167,12 @@ class Document:
|
|
1215
1167
|
error('SYNOPSIS section expected')
|
1216
1168
|
else:
|
1217
1169
|
Title.translate()
|
1218
|
-
if Title.
|
1170
|
+
if Title.attributes['title'].upper() <> 'SYNOPSIS':
|
1219
1171
|
error('second section must be named SYNOPSIS')
|
1220
1172
|
if Title.level != 1:
|
1221
1173
|
error('SYNOPSIS section title must be at level 1')
|
1222
1174
|
d = {}
|
1223
|
-
d.update(Title.
|
1175
|
+
d.update(Title.attributes)
|
1224
1176
|
AttributeList.consume(d)
|
1225
1177
|
stag,etag = config.section2tags('sect-synopsis',d)
|
1226
1178
|
writer.write(stag)
|
@@ -1324,7 +1276,7 @@ class Header:
|
|
1324
1276
|
assert Lex.next() is Title and Title.level == 0
|
1325
1277
|
Title.translate()
|
1326
1278
|
attrs = document.attributes # Alias for readability.
|
1327
|
-
attrs['doctitle'] = Title.
|
1279
|
+
attrs['doctitle'] = Title.attributes['title']
|
1328
1280
|
if document.doctype == 'manpage':
|
1329
1281
|
# manpage title formatted like mantitle(manvolnum).
|
1330
1282
|
mo = re.match(r'^(?P<mantitle>.*)\((?P<manvolnum>.*)\)$',
|
@@ -1372,7 +1324,7 @@ class Header:
|
|
1372
1324
|
error('NAME section expected')
|
1373
1325
|
else:
|
1374
1326
|
Title.translate()
|
1375
|
-
if Title.
|
1327
|
+
if Title.attributes['title'].upper() <> 'NAME':
|
1376
1328
|
error('first section must be named NAME')
|
1377
1329
|
if Title.level != 1:
|
1378
1330
|
error('NAME section title must be at level 1')
|
@@ -1395,6 +1347,7 @@ class AttributeEntry:
|
|
1395
1347
|
pattern = None
|
1396
1348
|
subs = None
|
1397
1349
|
name = None
|
1350
|
+
name2 = None
|
1398
1351
|
value = None
|
1399
1352
|
def __init__(self):
|
1400
1353
|
raise AssertionError,'no class instances allowed'
|
@@ -1415,36 +1368,49 @@ class AttributeEntry:
|
|
1415
1368
|
AttributeEntry.subs = subs
|
1416
1369
|
line = reader.read_next()
|
1417
1370
|
if line:
|
1371
|
+
# Attribute entry formatted like :<name>[.<name2>]:[ <value>]
|
1418
1372
|
mo = re.match(AttributeEntry.pattern,line)
|
1419
1373
|
if mo:
|
1420
|
-
name = mo.group('attrname')
|
1421
|
-
|
1422
|
-
|
1423
|
-
|
1424
|
-
else:
|
1425
|
-
value = mo.group('attrvalue').strip()
|
1426
|
-
# Strip white space and illegal name chars.
|
1427
|
-
name = re.sub(r'(?u)[^\w\-_]', '', name).lower()
|
1428
|
-
AttributeEntry.name = name
|
1429
|
-
AttributeEntry.value = value
|
1374
|
+
AttributeEntry.name = mo.group('attrname')
|
1375
|
+
AttributeEntry.name2 = mo.group('attrname2')
|
1376
|
+
AttributeEntry.value = mo.group('attrvalue') or ''
|
1377
|
+
AttributeEntry.value = AttributeEntry.value.strip()
|
1430
1378
|
result = True
|
1431
1379
|
return result
|
1432
1380
|
isnext = staticmethod(isnext)
|
1433
1381
|
def translate():
|
1434
1382
|
assert Lex.next() is AttributeEntry
|
1435
1383
|
attr = AttributeEntry # Alias for brevity.
|
1436
|
-
reader.read() # Discard attribute from reader.
|
1437
|
-
#
|
1438
|
-
|
1439
|
-
|
1440
|
-
|
1441
|
-
|
1442
|
-
|
1443
|
-
|
1444
|
-
|
1445
|
-
|
1446
|
-
|
1447
|
-
|
1384
|
+
reader.read() # Discard attribute entry from reader.
|
1385
|
+
if AttributeEntry.name2: # The entry is a conf file entry.
|
1386
|
+
section = {}
|
1387
|
+
# [attributes] and [miscellaneous] entries can have name! syntax.
|
1388
|
+
if attr.name in ('attributes','miscellaneous') and attr.name2[-1] == '!':
|
1389
|
+
section[attr.name] = [attr.name2]
|
1390
|
+
else:
|
1391
|
+
section[attr.name] = ['%s=%s' % (attr.name2,attr.value)]
|
1392
|
+
config.load_sections(section)
|
1393
|
+
config.validate()
|
1394
|
+
else: # The entry is an attribute.
|
1395
|
+
if attr.name[-1] == '!':
|
1396
|
+
# Names like name! undefine the attribute.
|
1397
|
+
attr.name = attr.name[:-1]
|
1398
|
+
attr.value = None
|
1399
|
+
# Strip white space and illegal name chars.
|
1400
|
+
attr.name = re.sub(r'(?u)[^\w\-_]', '', attr.name).lower()
|
1401
|
+
# Don't override command-line attributes.
|
1402
|
+
if attr.name in config.cmd_attrs:
|
1403
|
+
return
|
1404
|
+
# Update document.attributes from previously parsed attribute.
|
1405
|
+
if attr.name == 'attributeentry-subs':
|
1406
|
+
AttributeEntry.subs = None # Force update in isnext().
|
1407
|
+
elif attr.value:
|
1408
|
+
attr.value = Lex.subs((attr.value,), attr.subs)
|
1409
|
+
attr.value = writer.newline.join(attr.value)
|
1410
|
+
if attr.value is not None:
|
1411
|
+
document.attributes[attr.name] = attr.value
|
1412
|
+
elif attr.name in document.attributes:
|
1413
|
+
del document.attributes[attr.name]
|
1448
1414
|
translate = staticmethod(translate)
|
1449
1415
|
def translate_all():
|
1450
1416
|
""" Process all contiguous attribute lines on reader."""
|
@@ -1492,6 +1458,11 @@ class AttributeList:
|
|
1492
1458
|
if AttributeList.attrs:
|
1493
1459
|
d.update(AttributeList.attrs)
|
1494
1460
|
AttributeList.attrs = {}
|
1461
|
+
# Generate option attributes.
|
1462
|
+
if 'options' in d:
|
1463
|
+
options = parse_options(d['options'], (), 'illegal option name')
|
1464
|
+
for option in options:
|
1465
|
+
d[option+'-option'] = ''
|
1495
1466
|
consume = staticmethod(consume)
|
1496
1467
|
|
1497
1468
|
class BlockTitle:
|
@@ -1537,7 +1508,7 @@ class Title:
|
|
1537
1508
|
subs = ()
|
1538
1509
|
pattern = None
|
1539
1510
|
level = 0
|
1540
|
-
|
1511
|
+
attributes = {}
|
1541
1512
|
sectname = None
|
1542
1513
|
section_numbers = [0]*len(underlines)
|
1543
1514
|
dump_dict = {}
|
@@ -1545,7 +1516,7 @@ class Title:
|
|
1545
1516
|
def __init__(self):
|
1546
1517
|
raise AssertionError,'no class instances allowed'
|
1547
1518
|
def translate():
|
1548
|
-
"""Parse the Title.
|
1519
|
+
"""Parse the Title.attributes and Title.level from the reader. The
|
1549
1520
|
real work has already been done by parse()."""
|
1550
1521
|
assert Lex.next() is Title
|
1551
1522
|
# Discard title from reader.
|
@@ -1555,11 +1526,11 @@ class Title:
|
|
1555
1526
|
# Perform title substitutions.
|
1556
1527
|
if not Title.subs:
|
1557
1528
|
Title.subs = config.subsnormal
|
1558
|
-
s = Lex.subs((Title.
|
1529
|
+
s = Lex.subs((Title.attributes['title'],), Title.subs)
|
1559
1530
|
s = writer.newline.join(s)
|
1560
1531
|
if not s:
|
1561
1532
|
warning('blank section title')
|
1562
|
-
Title.
|
1533
|
+
Title.attributes['title'] = s
|
1563
1534
|
translate = staticmethod(translate)
|
1564
1535
|
def isnext():
|
1565
1536
|
lines = reader.read_ahead(2)
|
@@ -1576,7 +1547,7 @@ class Title:
|
|
1576
1547
|
if Title.dump_dict.has_key(k):
|
1577
1548
|
mo = re.match(Title.dump_dict[k], lines[0])
|
1578
1549
|
if mo:
|
1579
|
-
Title.
|
1550
|
+
Title.attributes = mo.groupdict()
|
1580
1551
|
Title.level = level
|
1581
1552
|
Title.linecount = 1
|
1582
1553
|
result = True
|
@@ -1601,25 +1572,25 @@ class Title:
|
|
1601
1572
|
if not re.search(r'(?u)\w',title): return False
|
1602
1573
|
mo = re.match(Title.pattern, title)
|
1603
1574
|
if mo:
|
1604
|
-
Title.
|
1575
|
+
Title.attributes = mo.groupdict()
|
1605
1576
|
Title.level = list(Title.underlines).index(ul[:2])
|
1606
1577
|
Title.linecount = 2
|
1607
1578
|
result = True
|
1608
1579
|
# Check for expected pattern match groups.
|
1609
1580
|
if result:
|
1610
|
-
if not Title.
|
1581
|
+
if not Title.attributes.has_key('title'):
|
1611
1582
|
warning('[titles] entry has no <title> group')
|
1612
|
-
Title.
|
1613
|
-
for k,v in Title.
|
1614
|
-
if v is None: del Title.
|
1583
|
+
Title.attributes['title'] = lines[0]
|
1584
|
+
for k,v in Title.attributes.items():
|
1585
|
+
if v is None: del Title.attributes[k]
|
1615
1586
|
return result
|
1616
1587
|
parse = staticmethod(parse)
|
1617
|
-
def load(
|
1618
|
-
"""Load and validate [titles] section entries
|
1619
|
-
if
|
1588
|
+
def load(entries):
|
1589
|
+
"""Load and validate [titles] section entries dictionary."""
|
1590
|
+
if entries.has_key('underlines'):
|
1620
1591
|
errmsg = 'malformed [titles] underlines entry'
|
1621
1592
|
try:
|
1622
|
-
underlines = parse_list(
|
1593
|
+
underlines = parse_list(entries['underlines'])
|
1623
1594
|
except:
|
1624
1595
|
raise EAsciiDoc,errmsg
|
1625
1596
|
if len(underlines) != len(Title.underlines):
|
@@ -1628,27 +1599,27 @@ class Title:
|
|
1628
1599
|
if len(s) !=2:
|
1629
1600
|
raise EAsciiDoc,errmsg
|
1630
1601
|
Title.underlines = tuple(underlines)
|
1631
|
-
Title.dump_dict['underlines'] =
|
1632
|
-
if
|
1633
|
-
Title.subs = parse_options(
|
1602
|
+
Title.dump_dict['underlines'] = entries['underlines']
|
1603
|
+
if entries.has_key('subs'):
|
1604
|
+
Title.subs = parse_options(entries['subs'], SUBS_OPTIONS,
|
1634
1605
|
'illegal [titles] subs entry')
|
1635
|
-
Title.dump_dict['subs'] =
|
1636
|
-
if
|
1637
|
-
pat =
|
1606
|
+
Title.dump_dict['subs'] = entries['subs']
|
1607
|
+
if entries.has_key('sectiontitle'):
|
1608
|
+
pat = entries['sectiontitle']
|
1638
1609
|
if not pat or not is_regexp(pat):
|
1639
1610
|
raise EAsciiDoc,'malformed [titles] sectiontitle entry'
|
1640
1611
|
Title.pattern = pat
|
1641
1612
|
Title.dump_dict['sectiontitle'] = pat
|
1642
|
-
if
|
1643
|
-
pat =
|
1613
|
+
if entries.has_key('blocktitle'):
|
1614
|
+
pat = entries['blocktitle']
|
1644
1615
|
if not pat or not is_regexp(pat):
|
1645
1616
|
raise EAsciiDoc,'malformed [titles] blocktitle entry'
|
1646
1617
|
BlockTitle.pattern = pat
|
1647
1618
|
Title.dump_dict['blocktitle'] = pat
|
1648
1619
|
# Load single-line title patterns.
|
1649
1620
|
for k in ('sect0','sect1','sect2','sect3','sect4'):
|
1650
|
-
if
|
1651
|
-
pat =
|
1621
|
+
if entries.has_key(k):
|
1622
|
+
pat = entries[k]
|
1652
1623
|
if not pat or not is_regexp(pat):
|
1653
1624
|
raise EAsciiDoc,'malformed [titles] %s entry' % k
|
1654
1625
|
Title.dump_dict[k] = pat
|
@@ -1663,13 +1634,13 @@ class Title:
|
|
1663
1634
|
"""Set Title section name. First search for section title in
|
1664
1635
|
[specialsections], if not found use default 'sect<level>' name."""
|
1665
1636
|
for pat,sect in config.specialsections.items():
|
1666
|
-
mo = re.match(pat,Title.
|
1637
|
+
mo = re.match(pat,Title.attributes['title'])
|
1667
1638
|
if mo:
|
1668
1639
|
title = mo.groupdict().get('title')
|
1669
1640
|
if title is not None:
|
1670
|
-
Title.
|
1641
|
+
Title.attributes['title'] = title.strip()
|
1671
1642
|
else:
|
1672
|
-
Title.
|
1643
|
+
Title.attributes['title'] = mo.group().strip()
|
1673
1644
|
Title.sectname = sect
|
1674
1645
|
break
|
1675
1646
|
else:
|
@@ -1721,9 +1692,10 @@ class Section:
|
|
1721
1692
|
NameChar ::= Letter | Digit | '.' | '-' | '_' | ':'
|
1722
1693
|
"""
|
1723
1694
|
base_ident = re.sub(r'[^a-zA-Z0-9]+', '_', title).strip('_').lower()
|
1724
|
-
# Prefix
|
1725
|
-
#
|
1726
|
-
|
1695
|
+
# Prefix the ID name with idprefix attribute or underscore if not
|
1696
|
+
# defined. Prefix ensures the ID does not clash with existing IDs.
|
1697
|
+
idprefix = document.attributes.get('idprefix','_')
|
1698
|
+
base_ident = idprefix + base_ident
|
1727
1699
|
i = 1
|
1728
1700
|
while True:
|
1729
1701
|
if i == 1:
|
@@ -1763,11 +1735,11 @@ class Section:
|
|
1763
1735
|
if not document.attributes.get('sectids') is None \
|
1764
1736
|
and 'id' not in AttributeList.attrs:
|
1765
1737
|
# Generate ids for sections.
|
1766
|
-
AttributeList.attrs['id'] = Section.gen_id(Title.
|
1738
|
+
AttributeList.attrs['id'] = Section.gen_id(Title.attributes['title'])
|
1767
1739
|
Section.setlevel(Title.level)
|
1768
|
-
Title.
|
1769
|
-
AttributeList.consume(Title.
|
1770
|
-
stag,etag = config.section2tags(Title.sectname,Title.
|
1740
|
+
Title.attributes['sectnum'] = Title.getnumber(document.level)
|
1741
|
+
AttributeList.consume(Title.attributes)
|
1742
|
+
stag,etag = config.section2tags(Title.sectname,Title.attributes)
|
1771
1743
|
Section.savetag(Title.level,etag)
|
1772
1744
|
writer.write(stag)
|
1773
1745
|
Section.translate_body()
|
@@ -1796,12 +1768,12 @@ class Section:
|
|
1796
1768
|
|
1797
1769
|
class AbstractBlock:
|
1798
1770
|
def __init__(self):
|
1799
|
-
self.OPTIONS = () # The set of allowed options values
|
1800
1771
|
# Configuration parameter names common to all blocks.
|
1801
|
-
self.CONF_ENTRIES = ('options','subs','presubs','postsubs',
|
1802
|
-
'posattrs','style','.*-style')
|
1803
|
-
#
|
1772
|
+
self.CONF_ENTRIES = ('delimiter','options','subs','presubs','postsubs',
|
1773
|
+
'posattrs','style','.*-style','template','filter')
|
1774
|
+
self.start = None # File reader cursor at start delimiter.
|
1804
1775
|
self.name=None # Configuration file section name.
|
1776
|
+
# Configuration parameters.
|
1805
1777
|
self.delimiter=None # Regular expression matching block delimiter.
|
1806
1778
|
self.template=None # template section entry.
|
1807
1779
|
self.options=() # options entry list.
|
@@ -1810,18 +1782,27 @@ class AbstractBlock:
|
|
1810
1782
|
self.filter=None # filter entry.
|
1811
1783
|
self.posattrs=() # posattrs entry list.
|
1812
1784
|
self.style=None # Default style.
|
1813
|
-
self.styles=OrderedDict()
|
1785
|
+
self.styles=OrderedDict() # Each entry is a styles dictionary.
|
1814
1786
|
# Before a block is processed it's attributes (from it's
|
1815
1787
|
# attributes list) are merged with the block configuration parameters
|
1816
|
-
# (by self.
|
1788
|
+
# (by self.merge_attributes()) resulting in the template substitution
|
1817
1789
|
# dictionary (self.attributes) and the block's procssing parameters
|
1818
1790
|
# (self.parameters).
|
1819
1791
|
self.attributes={}
|
1820
1792
|
# The names of block parameters.
|
1821
1793
|
self.PARAM_NAMES=('template','options','presubs','postsubs','filter')
|
1822
|
-
self.parameters=
|
1794
|
+
self.parameters=None
|
1823
1795
|
# Leading delimiter match object.
|
1824
1796
|
self.mo=None
|
1797
|
+
def short_name(self):
|
1798
|
+
""" Return the text following the last dash in the section namem """
|
1799
|
+
i = self.name.rfind('-')
|
1800
|
+
if i == -1:
|
1801
|
+
return self.name
|
1802
|
+
else:
|
1803
|
+
return self.name[i+1:]
|
1804
|
+
def error(self, msg, cursor=None, halt=False):
|
1805
|
+
error('[%s] %s' % (self.name,msg), cursor, halt)
|
1825
1806
|
def is_conf_entry(self,param):
|
1826
1807
|
"""Return True if param matches an allowed configuration file entry
|
1827
1808
|
name."""
|
@@ -1831,53 +1812,89 @@ class AbstractBlock:
|
|
1831
1812
|
return False
|
1832
1813
|
def load(self,name,entries):
|
1833
1814
|
"""Update block definition from section 'entries' dictionary."""
|
1834
|
-
for k in entries.keys():
|
1835
|
-
if not self.is_conf_entry(k):
|
1836
|
-
raise EAsciiDoc,'illegal [%s] entry name: %s' % (name,k)
|
1837
1815
|
self.name = name
|
1838
|
-
|
1839
|
-
|
1840
|
-
|
1841
|
-
|
1842
|
-
|
1816
|
+
self.update_parameters(entries, self, all=True)
|
1817
|
+
def update_parameters(self, src, dst=None, all=False):
|
1818
|
+
"""
|
1819
|
+
Parse processing parameters from src dictionary to dst object.
|
1820
|
+
dst defaults to self.parameters.
|
1821
|
+
If all is True then copy src entries that aren't parameter names.
|
1822
|
+
"""
|
1823
|
+
dst = dst or self.parameters
|
1824
|
+
msg = '[%s] malformed entry %%s: %%s' % self.name
|
1825
|
+
def copy(obj,k,v):
|
1826
|
+
if isinstance(obj,dict):
|
1827
|
+
obj[k] = v
|
1828
|
+
else:
|
1829
|
+
setattr(obj,k,v)
|
1830
|
+
for k,v in src.items():
|
1831
|
+
if not re.match(r'\d+',k) and not is_name(k):
|
1832
|
+
raise EAsciiDoc, msg % (k,v)
|
1833
|
+
if k == 'template':
|
1834
|
+
if not is_name(v):
|
1835
|
+
raise EAsciiDoc, msg % (k,v)
|
1836
|
+
copy(dst,k,v)
|
1837
|
+
elif k == 'filter':
|
1838
|
+
copy(dst,k,v)
|
1839
|
+
elif k == 'options':
|
1840
|
+
if isinstance(v,str):
|
1841
|
+
v = parse_options(v, (), msg % (k,v))
|
1842
|
+
copy(dst,k,v)
|
1843
|
+
elif k in ('subs','presubs','postsubs'):
|
1844
|
+
# Subs is an alias for presubs.
|
1845
|
+
if k == 'subs': k = 'presubs'
|
1846
|
+
if isinstance(v,str):
|
1847
|
+
v = parse_options(v, SUBS_OPTIONS, msg % (k,v))
|
1848
|
+
copy(dst,k,v)
|
1849
|
+
elif k == 'delimiter':
|
1843
1850
|
if v and is_regexp(v):
|
1844
|
-
|
1851
|
+
copy(dst,k,v)
|
1845
1852
|
else:
|
1846
|
-
raise EAsciiDoc,
|
1847
|
-
elif k == 'template':
|
1848
|
-
if not is_name(v):
|
1849
|
-
raise EAsciiDoc, \
|
1850
|
-
'malformed [%s] template name: %s' % (name,v)
|
1851
|
-
self.template = v
|
1853
|
+
raise EAsciiDoc, msg % (k,v)
|
1852
1854
|
elif k == 'style':
|
1853
|
-
if
|
1854
|
-
|
1855
|
-
|
1856
|
-
|
1855
|
+
if is_name(v):
|
1856
|
+
copy(dst,k,v)
|
1857
|
+
else:
|
1858
|
+
raise EAsciiDoc, msg % (k,v)
|
1857
1859
|
elif k == 'posattrs':
|
1858
|
-
|
1859
|
-
|
1860
|
-
elif k == 'options':
|
1861
|
-
self.options = parse_options(v,self.OPTIONS,
|
1862
|
-
'illegal [%s] %s: %s' % (name,k,v))
|
1863
|
-
elif k == 'presubs' or k == 'subs':
|
1864
|
-
self.presubs = parse_options(v,SUBS_OPTIONS,
|
1865
|
-
'illegal [%s] %s: %s' % (name,k,v))
|
1866
|
-
elif k == 'postsubs':
|
1867
|
-
self.postsubs = parse_options(v,SUBS_OPTIONS,
|
1868
|
-
'illegal [%s] %s: %s' % (name,k,v))
|
1869
|
-
elif k == 'filter':
|
1870
|
-
self.filter = v
|
1860
|
+
v = parse_options(v, (), msg % (k,v))
|
1861
|
+
copy(dst,k,v)
|
1871
1862
|
else:
|
1872
1863
|
mo = re.match(r'^(?P<style>.*)-style$',k)
|
1873
1864
|
if mo:
|
1874
1865
|
if not v:
|
1875
|
-
raise EAsciiDoc,
|
1866
|
+
raise EAsciiDoc, msg % (k,v)
|
1876
1867
|
style = mo.group('style')
|
1868
|
+
if not is_name(style):
|
1869
|
+
raise EAsciiDoc, msg % (k,v)
|
1877
1870
|
d = {}
|
1878
1871
|
if not parse_named_attributes(v,d):
|
1879
|
-
raise EAsciiDoc,
|
1872
|
+
raise EAsciiDoc, msg % (k,v)
|
1873
|
+
if 'subs' in d:
|
1874
|
+
# Subs is an alias for presubs.
|
1875
|
+
d['presubs'] = d['subs']
|
1876
|
+
del d['subs']
|
1880
1877
|
self.styles[style] = d
|
1878
|
+
elif all or k in self.PARAM_NAMES:
|
1879
|
+
copy(dst,k,v) # Derived class specific entries.
|
1880
|
+
def get_param(self,name,params=None):
|
1881
|
+
"""
|
1882
|
+
Return named processing parameter from params dictionary.
|
1883
|
+
If the parameter is not in params look in self.parameters.
|
1884
|
+
"""
|
1885
|
+
if params and name in params:
|
1886
|
+
return params[name]
|
1887
|
+
elif name in self.parameters:
|
1888
|
+
return self.parameters[name]
|
1889
|
+
else:
|
1890
|
+
return None
|
1891
|
+
def get_subs(self,params=None):
|
1892
|
+
"""
|
1893
|
+
Return (presubs,postsubs) tuple.
|
1894
|
+
"""
|
1895
|
+
presubs = self.get_param('presubs',params)
|
1896
|
+
postsubs = self.get_param('postsubs',params)
|
1897
|
+
return (presubs,postsubs)
|
1881
1898
|
def dump(self):
|
1882
1899
|
"""Write block definition to stdout."""
|
1883
1900
|
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
|
@@ -1911,8 +1928,12 @@ class AbstractBlock:
|
|
1911
1928
|
if self.is_conf_entry('delimiter') and not self.delimiter:
|
1912
1929
|
raise EAsciiDoc,'[%s] missing delimiter' % self.name
|
1913
1930
|
if self.style:
|
1931
|
+
if not is_name(self.style):
|
1932
|
+
raise EAsciiDoc, 'illegal style name: %s' % self.style
|
1914
1933
|
if not self.styles.has_key(self.style):
|
1915
|
-
|
1934
|
+
if not isinstance(self,List): # Lists don't have templates.
|
1935
|
+
warning('[%s] \'%s\' style not in %s' % (
|
1936
|
+
self.name,self.style,self.styles.keys()))
|
1916
1937
|
# Check all styles for missing templates.
|
1917
1938
|
all_styles_have_template = True
|
1918
1939
|
for k,v in self.styles.items():
|
@@ -1928,7 +1949,8 @@ class AbstractBlock:
|
|
1928
1949
|
if not config.sections.has_key(self.template):
|
1929
1950
|
warning('[%s] missing template section' % self.template)
|
1930
1951
|
elif not all_styles_have_template:
|
1931
|
-
|
1952
|
+
if not isinstance(self,List): # Lists don't have templates.
|
1953
|
+
warning('[%s] styles missing templates' % self.name)
|
1932
1954
|
def isnext(self):
|
1933
1955
|
"""Check if this block is next in document reader."""
|
1934
1956
|
result = False
|
@@ -1943,34 +1965,50 @@ class AbstractBlock:
|
|
1943
1965
|
"""Translate block from document reader."""
|
1944
1966
|
if not self.presubs:
|
1945
1967
|
self.presubs = config.subsnormal
|
1946
|
-
|
1947
|
-
|
1948
|
-
|
1949
|
-
|
1950
|
-
|
1951
|
-
|
1952
|
-
|
1953
|
-
|
1954
|
-
|
1955
|
-
|
1956
|
-
|
1957
|
-
|
1958
|
-
|
1959
|
-
|
1960
|
-
|
1961
|
-
|
1962
|
-
|
1963
|
-
|
1964
|
-
|
1968
|
+
if reader.cursor:
|
1969
|
+
self.start = reader.cursor[:]
|
1970
|
+
def merge_attributes(self,attrs,params=[]):
|
1971
|
+
"""
|
1972
|
+
Use the current blocks attribute list (attrs dictionary) to build a
|
1973
|
+
dictionary of block processing parameters (self.parameters) and tag
|
1974
|
+
substitution attributes (self.attributes).
|
1975
|
+
|
1976
|
+
1. Copy the default parameters (self.*) to self.parameters.
|
1977
|
+
self.parameters are used internally to render the current block.
|
1978
|
+
Optional params array of addtional parameters.
|
1979
|
+
|
1980
|
+
2. Copy attrs to self.attributes. self.attributes are used for template
|
1981
|
+
and tag substitution in the current block.
|
1982
|
+
|
1983
|
+
3. If a style attribute was specified update self.parameters with the
|
1984
|
+
corresponding style parameters; if there are any style parameters
|
1985
|
+
remaining add them to self.attributes (existing attribute list entries
|
1986
|
+
take precedence).
|
1987
|
+
|
1988
|
+
4. Set named positional attributes in self.attributes if self.posattrs
|
1989
|
+
was specified.
|
1990
|
+
|
1991
|
+
5. Finally self.parameters is updated with any corresponding parameters
|
1992
|
+
specified in attrs.
|
1993
|
+
|
1994
|
+
"""
|
1995
|
+
|
1996
|
+
def check_array_parameter(param):
|
1997
|
+
# Check the parameter is a sequence type.
|
1998
|
+
if not is_array(self.parameters[param]):
|
1999
|
+
error('malformed presubs attribute: %s' %
|
2000
|
+
self.parameters[param])
|
2001
|
+
# Revert to default value.
|
2002
|
+
self.parameters[param] = getattr(self,param)
|
2003
|
+
|
2004
|
+
params = list(self.PARAM_NAMES) + params
|
1965
2005
|
self.attributes = {}
|
1966
2006
|
self.attributes.update(attrs)
|
1967
2007
|
# Calculate dynamic block parameters.
|
1968
2008
|
# Start with configuration file defaults.
|
1969
|
-
self.parameters
|
1970
|
-
|
1971
|
-
|
1972
|
-
self.parameters['postsubs'] = self.postsubs
|
1973
|
-
self.parameters['filter'] = self.filter
|
2009
|
+
self.parameters = AttrDict()
|
2010
|
+
for name in params:
|
2011
|
+
self.parameters[name] = getattr(self,name)
|
1974
2012
|
# Load the selected style attributes.
|
1975
2013
|
posattrs = self.posattrs
|
1976
2014
|
if posattrs and posattrs[0] == 'style':
|
@@ -1979,15 +2017,15 @@ class AbstractBlock:
|
|
1979
2017
|
style = None
|
1980
2018
|
if not style:
|
1981
2019
|
style = self.attributes.get('style',self.style)
|
1982
|
-
if style
|
1983
|
-
if not
|
1984
|
-
|
1985
|
-
|
2020
|
+
if style:
|
2021
|
+
if not is_name(style):
|
2022
|
+
raise EAsciiDoc, 'illegal style name: %s' % style
|
2023
|
+
if self.styles.has_key(style):
|
1986
2024
|
self.attributes['style'] = style
|
1987
2025
|
for k,v in self.styles[style].items():
|
1988
2026
|
if k == 'posattrs':
|
1989
2027
|
posattrs = v
|
1990
|
-
elif k in
|
2028
|
+
elif k in params:
|
1991
2029
|
self.parameters[k] = v
|
1992
2030
|
elif not self.attributes.has_key(k):
|
1993
2031
|
# Style attributes don't take precedence over explicit.
|
@@ -1996,19 +2034,11 @@ class AbstractBlock:
|
|
1996
2034
|
for i,v in enumerate(posattrs):
|
1997
2035
|
if self.attributes.has_key(str(i+1)):
|
1998
2036
|
self.attributes[v] = self.attributes[str(i+1)]
|
1999
|
-
# Override config and style attributes with
|
2000
|
-
self.
|
2001
|
-
|
2002
|
-
|
2003
|
-
|
2004
|
-
def get_options(self):
|
2005
|
-
return self.parameters['options']
|
2006
|
-
def get_subs(self):
|
2007
|
-
return (self.parameters['presubs'], self.parameters['postsubs'])
|
2008
|
-
def get_template(self):
|
2009
|
-
return self.parameters['template']
|
2010
|
-
def get_filter(self):
|
2011
|
-
return self.parameters['filter']
|
2037
|
+
# Override config and style attributes with attribute list attributes.
|
2038
|
+
self.update_parameters(attrs)
|
2039
|
+
check_array_parameter('options')
|
2040
|
+
check_array_parameter('presubs')
|
2041
|
+
check_array_parameter('postsubs')
|
2012
2042
|
|
2013
2043
|
class AbstractBlocks:
|
2014
2044
|
"""List of block definitions."""
|
@@ -2058,8 +2088,6 @@ class AbstractBlocks:
|
|
2058
2088
|
class Paragraph(AbstractBlock):
|
2059
2089
|
def __init__(self):
|
2060
2090
|
AbstractBlock.__init__(self)
|
2061
|
-
self.CONF_ENTRIES += ('delimiter','template','filter')
|
2062
|
-
self.OPTIONS = ('listelement',)
|
2063
2091
|
self.text=None # Text in first line of paragraph.
|
2064
2092
|
def load(self,name,entries):
|
2065
2093
|
AbstractBlock.load(self,name,entries)
|
@@ -2080,18 +2108,20 @@ class Paragraph(AbstractBlock):
|
|
2080
2108
|
AttributeList.consume(attrs)
|
2081
2109
|
self.merge_attributes(attrs)
|
2082
2110
|
reader.read() # Discard (already parsed item first line).
|
2083
|
-
body = reader.read_until(r'^\+$|^$|'+blocks.delimiter
|
2111
|
+
body = reader.read_until(r'^\+$|^$|' + blocks.delimiter
|
2112
|
+
+ r'|' + tables.delimiter
|
2113
|
+
+ r'|' + tables_OLD.delimiter
|
2114
|
+
+ r'|' + AttributeList.pattern
|
2115
|
+
)
|
2084
2116
|
body = [self.text] + list(body)
|
2085
|
-
presubs
|
2086
|
-
|
2087
|
-
if 'verbatim' not in (presubs + postsubs):
|
2088
|
-
body = join_lines(body)
|
2117
|
+
presubs = self.parameters.presubs
|
2118
|
+
postsubs = self.parameters.postsubs
|
2089
2119
|
body = Lex.set_margin(body) # Move body to left margin.
|
2090
2120
|
body = Lex.subs(body,presubs)
|
2091
|
-
if self.
|
2092
|
-
body = filter_lines(self.
|
2121
|
+
if self.parameters.filter:
|
2122
|
+
body = filter_lines(self.parameters.filter,body,self.attributes)
|
2093
2123
|
body = Lex.subs(body,postsubs)
|
2094
|
-
template = self.
|
2124
|
+
template = self.parameters.template
|
2095
2125
|
stag,etag = config.section2tags(template, self.attributes)
|
2096
2126
|
# Write start tag, content, end tag.
|
2097
2127
|
writer.write(dovetail_tags(stag,body,etag))
|
@@ -2117,16 +2147,15 @@ class Paragraphs(AbstractBlocks):
|
|
2117
2147
|
raise EAsciiDoc,'missing [paradef-default] section'
|
2118
2148
|
|
2119
2149
|
class List(AbstractBlock):
|
2120
|
-
TAGS = ('listtag','itemtag','texttag','entrytag','labeltag')
|
2121
|
-
TYPES = ('bulleted','numbered','labeled','callout')
|
2122
2150
|
def __init__(self):
|
2123
2151
|
AbstractBlock.__init__(self)
|
2124
|
-
self.CONF_ENTRIES += ('
|
2125
|
-
self.
|
2126
|
-
|
2127
|
-
self.
|
2128
|
-
self.
|
2129
|
-
|
2152
|
+
self.CONF_ENTRIES += ('type','tags')
|
2153
|
+
self.PARAM_NAMES += ('tags',)
|
2154
|
+
# tabledef conf file parameters.
|
2155
|
+
self.type=None
|
2156
|
+
self.tags=None # Name of listtags-<tags> conf section.
|
2157
|
+
# Calculated parameters.
|
2158
|
+
self.tag=None # Current tags AttrDict.
|
2130
2159
|
self.label=None # List item label (labeled lists).
|
2131
2160
|
self.text=None # Text in first line of list item.
|
2132
2161
|
self.index=None # Matched delimiter 'index' group (numbered lists).
|
@@ -2134,28 +2163,19 @@ class List(AbstractBlock):
|
|
2134
2163
|
self.listindex=None # Current list index (1..)
|
2135
2164
|
def load(self,name,entries):
|
2136
2165
|
AbstractBlock.load(self,name,entries)
|
2137
|
-
for k,v in entries.items():
|
2138
|
-
if k == 'type':
|
2139
|
-
if v in self.TYPES:
|
2140
|
-
self.type = v
|
2141
|
-
else:
|
2142
|
-
raise EAsciiDoc,'illegal list type: %s' % v
|
2143
|
-
elif k in self.TAGS:
|
2144
|
-
if is_name(v):
|
2145
|
-
setattr(self,k,v)
|
2146
|
-
else:
|
2147
|
-
raise EAsciiDoc,'illegal list %s name: %s' % (k,v)
|
2148
2166
|
def dump(self):
|
2149
2167
|
AbstractBlock.dump(self)
|
2150
2168
|
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
|
2151
2169
|
write('type='+self.type)
|
2152
|
-
write('
|
2153
|
-
write('itemtag='+self.itemtag)
|
2154
|
-
write('texttag='+self.texttag)
|
2155
|
-
if self.type == 'labeled':
|
2156
|
-
write('entrytag='+self.entrytag)
|
2157
|
-
write('labeltag='+self.labeltag)
|
2170
|
+
write('tags='+self.tags)
|
2158
2171
|
write('')
|
2172
|
+
def validate(self):
|
2173
|
+
AbstractBlock.validate(self)
|
2174
|
+
tags = [self.tags]
|
2175
|
+
tags += [s['tags'] for s in self.styles.values() if 'tags' in s]
|
2176
|
+
for t in tags:
|
2177
|
+
if t not in lists.tags:
|
2178
|
+
self.error('missing section: [listtags-%s]' % t,halt=True)
|
2159
2179
|
def isnext(self):
|
2160
2180
|
result = AbstractBlock.isnext(self)
|
2161
2181
|
if result:
|
@@ -2165,24 +2185,20 @@ class List(AbstractBlock):
|
|
2165
2185
|
return result
|
2166
2186
|
def translate_entry(self):
|
2167
2187
|
assert self.type == 'labeled'
|
2168
|
-
|
2169
|
-
|
2170
|
-
|
2171
|
-
|
2172
|
-
|
2188
|
+
entrytag = subs_tag(self.tag.entry, self.attributes)
|
2189
|
+
labeltag = subs_tag(self.tag.label, self.attributes)
|
2190
|
+
writer.write(entrytag[0])
|
2191
|
+
writer.write(labeltag[0])
|
2192
|
+
# Write labels.
|
2193
|
+
while Lex.next() is self:
|
2173
2194
|
reader.read() # Discard (already parsed item first line).
|
2174
|
-
writer.write_tag(self.
|
2195
|
+
writer.write_tag(self.tag.term, [self.label],
|
2175
2196
|
self.presubs, self.attributes)
|
2176
|
-
|
2177
|
-
|
2178
|
-
while Lex.next() is self:
|
2179
|
-
reader.read() # Discard (already parsed item first line).
|
2180
|
-
writer.write_tag(self.labeltag, [self.label],
|
2181
|
-
self.presubs, self.attributes)
|
2197
|
+
if self.text: break
|
2198
|
+
writer.write(labeltag[1])
|
2182
2199
|
# Write item text.
|
2183
2200
|
self.translate_item()
|
2184
|
-
|
2185
|
-
writer.write(etag)
|
2201
|
+
writer.write(entrytag[1])
|
2186
2202
|
def iscontinued(self):
|
2187
2203
|
if reader.read_next() == '+':
|
2188
2204
|
reader.read() # Discard.
|
@@ -2201,26 +2217,27 @@ class List(AbstractBlock):
|
|
2201
2217
|
"""Translation for '+' style list continuation."""
|
2202
2218
|
if self.type == 'callout':
|
2203
2219
|
self.attributes['coids'] = calloutmap.calloutids(self.listindex)
|
2204
|
-
|
2205
|
-
|
2206
|
-
writer.write(stag)
|
2220
|
+
itemtag = subs_tag(self.tag.item, self.attributes)
|
2221
|
+
writer.write(itemtag[0])
|
2207
2222
|
if self.text and self.text == '+':
|
2208
|
-
#
|
2223
|
+
# Pathological case: continued Horizontal Labeled List with no
|
2209
2224
|
# item text.
|
2210
2225
|
continued = True
|
2211
2226
|
elif not self.text and self.iscontinued():
|
2212
|
-
#
|
2227
|
+
# Pathological case: continued Vertical Labeled List with no
|
2213
2228
|
# item text.
|
2214
2229
|
continued = True
|
2215
2230
|
else:
|
2216
2231
|
# Write ItemText.
|
2217
|
-
text = reader.read_until(
|
2218
|
-
|
2232
|
+
text = reader.read_until(
|
2233
|
+
lists.delimiter + r'|^\+$|^$|' + blocks.delimiter
|
2234
|
+
+ r'|' + tables.delimiter
|
2235
|
+
+ r'|' + tables_OLD.delimiter
|
2236
|
+
)
|
2219
2237
|
if self.text is not None:
|
2220
2238
|
text = [self.text] + list(text)
|
2221
|
-
text = join_lines(text)
|
2222
2239
|
if text:
|
2223
|
-
writer.write_tag(self.
|
2240
|
+
writer.write_tag(self.tag.text, text, self.presubs, self.attributes)
|
2224
2241
|
continued = self.iscontinued()
|
2225
2242
|
while True:
|
2226
2243
|
next = Lex.next()
|
@@ -2237,23 +2254,23 @@ class List(AbstractBlock):
|
|
2237
2254
|
else:
|
2238
2255
|
break
|
2239
2256
|
continued = self.iscontinued()
|
2240
|
-
|
2241
|
-
writer.write(etag)
|
2257
|
+
writer.write(itemtag[1])
|
2242
2258
|
def translate_item_2(self):
|
2243
2259
|
"""Translation for List block style lists."""
|
2244
2260
|
if self.type == 'callout':
|
2245
2261
|
self.attributes['coids'] = calloutmap.calloutids(self.listindex)
|
2246
|
-
|
2247
|
-
|
2248
|
-
writer.write(stag)
|
2262
|
+
itemtag = subs_tag(self.tag.item, self.attributes)
|
2263
|
+
writer.write(itemtag[0])
|
2249
2264
|
if self.text or reader.read_next():
|
2250
2265
|
# Write ItemText.
|
2251
|
-
text = reader.read_until(
|
2252
|
-
|
2266
|
+
text = reader.read_until(
|
2267
|
+
lists.delimiter + r'|^$|' + blocks.delimiter
|
2268
|
+
+ r'|' + tables.delimiter
|
2269
|
+
+ r'|' + tables_OLD.delimiter
|
2270
|
+
)
|
2253
2271
|
if self.text is not None:
|
2254
2272
|
text = [self.text] + list(text)
|
2255
|
-
text
|
2256
|
-
writer.write_tag(self.texttag, text, self.presubs, self.attributes)
|
2273
|
+
writer.write_tag(self.tag.text, text, self.presubs, self.attributes)
|
2257
2274
|
while True:
|
2258
2275
|
next = Lex.next()
|
2259
2276
|
if next in lists.open:
|
@@ -2270,8 +2287,7 @@ class List(AbstractBlock):
|
|
2270
2287
|
next.translate()
|
2271
2288
|
else:
|
2272
2289
|
break
|
2273
|
-
|
2274
|
-
writer.write(etag)
|
2290
|
+
writer.write(itemtag[1])
|
2275
2291
|
def check_index(self):
|
2276
2292
|
""" Check calculated listindex (1,2,...) against the item index in the
|
2277
2293
|
document (self.index)."""
|
@@ -2287,15 +2303,37 @@ class List(AbstractBlock):
|
|
2287
2303
|
if matched and i != self.listindex:
|
2288
2304
|
print 'type: ',self.type,': expected ',self.listindex,' got ',i
|
2289
2305
|
warning('list item %s out of sequence' % self.index)
|
2306
|
+
def check_tags(self):
|
2307
|
+
""" Check that all necessary tags are present. """
|
2308
|
+
tags = set(Lists.TAGS)
|
2309
|
+
if self.type != 'labeled':
|
2310
|
+
tags = tags.difference(['entry','label','term'])
|
2311
|
+
missing = tags.difference(self.tag.keys())
|
2312
|
+
if missing:
|
2313
|
+
self.error('missing tag(s): %s' % ','.join(missing), halt=True)
|
2290
2314
|
def translate(self):
|
2291
2315
|
AbstractBlock.translate(self)
|
2316
|
+
if self.short_name() in ('bibliography','glossary','qanda'):
|
2317
|
+
deprecated('old %s list syntax' % self.short_name())
|
2292
2318
|
lists.open.append(self)
|
2293
2319
|
attrs = {}
|
2294
2320
|
attrs.update(self.mo.groupdict())
|
2295
2321
|
BlockTitle.consume(attrs)
|
2296
2322
|
AttributeList.consume(attrs)
|
2297
|
-
self.merge_attributes(attrs)
|
2298
|
-
|
2323
|
+
self.merge_attributes(attrs,['tags'])
|
2324
|
+
self.tag = lists.tags[self.parameters.tags]
|
2325
|
+
self.check_tags()
|
2326
|
+
if 'width' in self.attributes:
|
2327
|
+
# Set horizontal list 'labelwidth' and 'itemwidth' attributes.
|
2328
|
+
v = str(self.attributes['width'])
|
2329
|
+
mo = re.match(r'^(\d{1,2})%?$',v)
|
2330
|
+
if mo:
|
2331
|
+
labelwidth = int(mo.group(1))
|
2332
|
+
self.attributes['labelwidth'] = str(labelwidth)
|
2333
|
+
self.attributes['itemwidth'] = str(100-labelwidth)
|
2334
|
+
else:
|
2335
|
+
self.error('illegal attribute value: label="%s"' % v)
|
2336
|
+
stag,etag = subs_tag(self.tag.list, self.attributes)
|
2299
2337
|
if stag:
|
2300
2338
|
writer.write(stag)
|
2301
2339
|
self.listindex = 0
|
@@ -2324,37 +2362,50 @@ class Lists(AbstractBlocks):
|
|
2324
2362
|
"""List of List objects."""
|
2325
2363
|
BLOCK_TYPE = List
|
2326
2364
|
PREFIX = 'listdef-'
|
2365
|
+
TYPES = ('bulleted','numbered','labeled','callout')
|
2366
|
+
TAGS = ('list', 'entry','item','text', 'label','term')
|
2327
2367
|
def __init__(self):
|
2328
2368
|
AbstractBlocks.__init__(self)
|
2329
2369
|
self.open = [] # A stack of the current and parent lists.
|
2330
2370
|
self.listblock = None # Current list is in list block.
|
2371
|
+
self.tags={} # List tags dictionary. Each entry is a tags AttrDict.
|
2331
2372
|
def load(self,sections):
|
2332
2373
|
AbstractBlocks.load(self,sections)
|
2374
|
+
self.load_tags(sections)
|
2375
|
+
def load_tags(self,sections):
|
2376
|
+
"""
|
2377
|
+
Load listtags-* conf file sections to self.tags.
|
2378
|
+
"""
|
2379
|
+
for section in sections.keys():
|
2380
|
+
mo = re.match(r'^listtags-(?P<name>\w+)$',section)
|
2381
|
+
if mo:
|
2382
|
+
name = mo.group('name')
|
2383
|
+
if self.tags.has_key(name):
|
2384
|
+
d = self.tags[name]
|
2385
|
+
else:
|
2386
|
+
d = AttrDict()
|
2387
|
+
parse_entries(sections.get(section,()),d)
|
2388
|
+
for k in d.keys():
|
2389
|
+
if k not in self.TAGS:
|
2390
|
+
warning('[%s] contains illegal list tag: %s' %
|
2391
|
+
(section,k))
|
2392
|
+
self.tags[name] = d
|
2333
2393
|
def validate(self):
|
2334
2394
|
AbstractBlocks.validate(self)
|
2335
2395
|
for b in self.blocks:
|
2336
2396
|
# Check list has valid type.
|
2337
|
-
if not b.type in
|
2397
|
+
if not b.type in Lists.TYPES:
|
2338
2398
|
raise EAsciiDoc,'[%s] illegal type' % b.name
|
2339
|
-
|
2340
|
-
|
2341
|
-
|
2342
|
-
|
2343
|
-
|
2344
|
-
|
2345
|
-
warning('[%s] missing tag texttag' % b.name)
|
2346
|
-
if b.type == 'labeled':
|
2347
|
-
if not b.entrytag or not config.tags.has_key(b.entrytag):
|
2348
|
-
warning('[%s] missing entrytag' % b.name)
|
2349
|
-
if not b.labeltag or not config.tags.has_key(b.labeltag):
|
2350
|
-
warning('[%s] missing labeltag' % b.name)
|
2399
|
+
b.validate()
|
2400
|
+
def dump(self):
|
2401
|
+
AbstractBlocks.dump(self)
|
2402
|
+
for k,v in self.tags.items():
|
2403
|
+
dump_section('listtags-'+k, v)
|
2404
|
+
|
2351
2405
|
|
2352
2406
|
class DelimitedBlock(AbstractBlock):
|
2353
2407
|
def __init__(self):
|
2354
2408
|
AbstractBlock.__init__(self)
|
2355
|
-
self.CONF_ENTRIES += ('delimiter','template','filter')
|
2356
|
-
self.OPTIONS = ('skip','sectionbody','list')
|
2357
|
-
self.start = None # File reader cursor at start delimiter.
|
2358
2409
|
def load(self,name,entries):
|
2359
2410
|
AbstractBlock.load(self,name,entries)
|
2360
2411
|
def dump(self):
|
@@ -2368,14 +2419,13 @@ class DelimitedBlock(AbstractBlock):
|
|
2368
2419
|
if 'list' in self.options:
|
2369
2420
|
lists.listblock = self
|
2370
2421
|
reader.read() # Discard delimiter.
|
2371
|
-
self.start = reader.cursor[:]
|
2372
2422
|
attrs = {}
|
2373
2423
|
# Leave list block attributes for the list element.
|
2374
2424
|
if lists.listblock is not self:
|
2375
2425
|
BlockTitle.consume(attrs)
|
2376
2426
|
AttributeList.consume(attrs)
|
2377
2427
|
self.merge_attributes(attrs)
|
2378
|
-
options = self.
|
2428
|
+
options = self.parameters.options
|
2379
2429
|
if safe() and self.name == 'blockdef-backend':
|
2380
2430
|
unsafe_error('Backend Block')
|
2381
2431
|
# Discard block body.
|
@@ -2384,7 +2434,7 @@ class DelimitedBlock(AbstractBlock):
|
|
2384
2434
|
# Discard block body.
|
2385
2435
|
reader.read_until(self.delimiter,same_file=True)
|
2386
2436
|
else:
|
2387
|
-
template = self.
|
2437
|
+
template = self.parameters.template
|
2388
2438
|
stag,etag = config.section2tags(template,self.attributes)
|
2389
2439
|
if 'sectionbody' in options or 'list' in options:
|
2390
2440
|
# The body is treated like a SimpleSection.
|
@@ -2393,18 +2443,18 @@ class DelimitedBlock(AbstractBlock):
|
|
2393
2443
|
writer.write(etag)
|
2394
2444
|
else:
|
2395
2445
|
body = reader.read_until(self.delimiter,same_file=True)
|
2396
|
-
presubs
|
2446
|
+
presubs = self.parameters.presubs
|
2447
|
+
postsubs = self.parameters.postsubs
|
2397
2448
|
body = Lex.subs(body,presubs)
|
2398
|
-
if self.
|
2399
|
-
body = filter_lines(self.
|
2449
|
+
if self.parameters.filter:
|
2450
|
+
body = filter_lines(self.parameters.filter,body,self.attributes)
|
2400
2451
|
body = Lex.subs(body,postsubs)
|
2401
2452
|
# Write start tag, content, end tag.
|
2402
2453
|
writer.write(dovetail_tags(stag,body,etag))
|
2403
2454
|
if 'list' in options:
|
2404
2455
|
lists.listblock = None
|
2405
2456
|
if reader.eof():
|
2406
|
-
error('missing
|
2407
|
-
cursor=self.start)
|
2457
|
+
self.error('missing closing delimiter',self.start)
|
2408
2458
|
else:
|
2409
2459
|
delimiter = reader.read() # Discard delimiter line.
|
2410
2460
|
assert re.match(self.delimiter,delimiter)
|
@@ -2423,372 +2473,384 @@ class DelimitedBlocks(AbstractBlocks):
|
|
2423
2473
|
|
2424
2474
|
class Column:
|
2425
2475
|
"""Table column."""
|
2426
|
-
def __init__(self):
|
2427
|
-
self.
|
2428
|
-
self.
|
2429
|
-
self.
|
2476
|
+
def __init__(self, width=None, align=None, style=None):
|
2477
|
+
self.width=width or '1'
|
2478
|
+
self.align=align or '<'
|
2479
|
+
self.style=style # Style name or None.
|
2480
|
+
# Calculated attribute values.
|
2481
|
+
self.colalign=None # 'left','center','right'.
|
2482
|
+
self.abswidth=None # 1.. (page units).
|
2483
|
+
self.pcwidth=None # 1..99 (percentage).
|
2430
2484
|
|
2431
2485
|
class Table(AbstractBlock):
|
2432
|
-
|
2433
|
-
|
2434
|
-
|
2486
|
+
ALIGNMENTS = {'<':'left', '>':'right', '^':'center'}
|
2487
|
+
FORMATS = ('psv','csv','dsv')
|
2488
|
+
SEPARATORS = dict(
|
2489
|
+
csv=',',
|
2490
|
+
dsv=r':|\n',
|
2491
|
+
psv=r'((?P<cellcount>\d+)\*)?\|',
|
2492
|
+
)
|
2435
2493
|
def __init__(self):
|
2436
2494
|
AbstractBlock.__init__(self)
|
2437
|
-
self.CONF_ENTRIES += ('
|
2438
|
-
|
2439
|
-
|
2440
|
-
|
2441
|
-
self.
|
2442
|
-
self.format=None # 'fixed','csv','dsv'
|
2443
|
-
self.colspec=None
|
2444
|
-
self.headrow=None
|
2445
|
-
self.footrow=None
|
2446
|
-
self.bodyrow=None
|
2447
|
-
self.headdata=None
|
2448
|
-
self.footdata=None
|
2449
|
-
self.bodydata=None
|
2495
|
+
self.CONF_ENTRIES += ('format','tags','separator')
|
2496
|
+
# tabledef conf file parameters.
|
2497
|
+
self.format='psv'
|
2498
|
+
self.separator=None
|
2499
|
+
self.tags=None # Name of tabletags-<tags> conf section.
|
2450
2500
|
# Calculated parameters.
|
2451
|
-
self.
|
2452
|
-
self.
|
2453
|
-
self.
|
2501
|
+
self.abswidth=None # 1.. (page units).
|
2502
|
+
self.pcwidth = None # 1..99 (percentage).
|
2503
|
+
self.rows=[] # Parsed rows, each row is a list of cell data.
|
2454
2504
|
self.columns=[] # List of Columns.
|
2455
|
-
# Other.
|
2456
|
-
self.check_msg='' # Message set by previous self.validate() call.
|
2457
2505
|
def load(self,name,entries):
|
2458
2506
|
AbstractBlock.load(self,name,entries)
|
2459
|
-
"""Update table definition from section entries in 'entries'."""
|
2460
|
-
for k,v in entries.items():
|
2461
|
-
if k == 'fillchar':
|
2462
|
-
if v and len(v) == 1:
|
2463
|
-
self.fillchar = v
|
2464
|
-
else:
|
2465
|
-
raise EAsciiDoc,'malformed table fillchar: %s' % v
|
2466
|
-
elif k == 'format':
|
2467
|
-
if v in Table.FORMATS:
|
2468
|
-
self.format = v
|
2469
|
-
else:
|
2470
|
-
raise EAsciiDoc,'illegal table format: %s' % v
|
2471
|
-
elif k == 'colspec':
|
2472
|
-
self.colspec = v
|
2473
|
-
elif k == 'headrow':
|
2474
|
-
self.headrow = v
|
2475
|
-
elif k == 'footrow':
|
2476
|
-
self.footrow = v
|
2477
|
-
elif k == 'bodyrow':
|
2478
|
-
self.bodyrow = v
|
2479
|
-
elif k == 'headdata':
|
2480
|
-
self.headdata = v
|
2481
|
-
elif k == 'footdata':
|
2482
|
-
self.footdata = v
|
2483
|
-
elif k == 'bodydata':
|
2484
|
-
self.bodydata = v
|
2485
2507
|
def dump(self):
|
2486
2508
|
AbstractBlock.dump(self)
|
2487
2509
|
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
|
2488
|
-
write('fillchar='+self.fillchar)
|
2489
2510
|
write('format='+self.format)
|
2490
|
-
if self.colspec:
|
2491
|
-
write('colspec='+self.colspec)
|
2492
|
-
if self.headrow:
|
2493
|
-
write('headrow='+self.headrow)
|
2494
|
-
if self.footrow:
|
2495
|
-
write('footrow='+self.footrow)
|
2496
|
-
write('bodyrow='+self.bodyrow)
|
2497
|
-
if self.headdata:
|
2498
|
-
write('headdata='+self.headdata)
|
2499
|
-
if self.footdata:
|
2500
|
-
write('footdata='+self.footdata)
|
2501
|
-
write('bodydata='+self.bodydata)
|
2502
2511
|
write('')
|
2503
2512
|
def validate(self):
|
2504
2513
|
AbstractBlock.validate(self)
|
2505
|
-
|
2506
|
-
|
2514
|
+
if self.format not in Table.FORMATS:
|
2515
|
+
self.error('illegal format=%s' % self.format,halt=True)
|
2516
|
+
self.tags = self.tags or 'default'
|
2517
|
+
tags = [self.tags]
|
2518
|
+
tags += [s['tags'] for s in self.styles.values() if 'tags' in s]
|
2519
|
+
for t in tags:
|
2520
|
+
if t not in tables.tags:
|
2521
|
+
self.error('missing section: [tabletags-%s]' % t,halt=True)
|
2522
|
+
if self.separator:
|
2523
|
+
# Evaluate escape characters.
|
2524
|
+
self.separator = eval('"'+self.separator+'"')
|
2525
|
+
#TODO: Move to class Tables
|
2507
2526
|
# Check global table parameters.
|
2508
|
-
if config.textwidth is None:
|
2509
|
-
self.check_msg = 'missing [miscellaneous] textwidth entry'
|
2510
2527
|
elif config.pagewidth is None:
|
2511
|
-
self.
|
2528
|
+
self.error('missing [miscellaneous] entry: pagewidth')
|
2512
2529
|
elif config.pageunits is None:
|
2513
|
-
self.
|
2514
|
-
|
2515
|
-
|
2516
|
-
|
2517
|
-
|
2518
|
-
|
2519
|
-
|
2520
|
-
|
2521
|
-
|
2522
|
-
|
2523
|
-
|
2524
|
-
|
2525
|
-
|
2530
|
+
self.error('missing [miscellaneous] entry: pageunits')
|
2531
|
+
def validate_attributes(self):
|
2532
|
+
"""Validate and parse table attributes."""
|
2533
|
+
# Set defaults.
|
2534
|
+
format = self.format
|
2535
|
+
tags = self.tags
|
2536
|
+
separator = self.separator
|
2537
|
+
abswidth = float(config.pagewidth)
|
2538
|
+
pcwidth = 100.0
|
2539
|
+
for k,v in self.attributes.items():
|
2540
|
+
if k == 'format':
|
2541
|
+
if v not in self.FORMATS:
|
2542
|
+
self.error('illegal %s=%s' % (k,v))
|
2543
|
+
else:
|
2544
|
+
format = v
|
2545
|
+
elif k == 'tags':
|
2546
|
+
if v not in tables.tags:
|
2547
|
+
self.error('illegal %s=%s' % (k,v))
|
2548
|
+
else:
|
2549
|
+
tags = v
|
2550
|
+
elif k == 'separator':
|
2551
|
+
separator = v
|
2552
|
+
elif k == 'width':
|
2553
|
+
if not re.match(r'^\d{1,3}%$',v) or int(v[:-1]) > 100:
|
2554
|
+
self.error('illegal %s=%s' % (k,v))
|
2555
|
+
else:
|
2556
|
+
abswidth = float(v[:-1])/100 * config.pagewidth
|
2557
|
+
pcwidth = float(v[:-1])
|
2558
|
+
# Calculate separator if it has not been specified.
|
2559
|
+
if not separator:
|
2560
|
+
separator = Table.SEPARATORS[format]
|
2561
|
+
if format == 'csv':
|
2562
|
+
if len(separator) > 1:
|
2563
|
+
self.error('illegal csv separator=%s' % separator)
|
2564
|
+
separator = ','
|
2526
2565
|
else:
|
2527
|
-
|
2528
|
-
|
2529
|
-
|
2530
|
-
|
2531
|
-
|
2532
|
-
|
2533
|
-
|
2534
|
-
|
2535
|
-
|
2536
|
-
|
2537
|
-
|
2538
|
-
|
2539
|
-
|
2566
|
+
if not is_regexp(separator):
|
2567
|
+
self.error('illegal regular expression: separator=%s' %
|
2568
|
+
separator)
|
2569
|
+
self.parameters.format = format
|
2570
|
+
self.parameters.tags = tags
|
2571
|
+
self.parameters.separator = separator
|
2572
|
+
self.abswidth = abswidth
|
2573
|
+
self.pcwidth = pcwidth
|
2574
|
+
def get_tags(self,params):
|
2575
|
+
tags = self.get_param('tags',params)
|
2576
|
+
assert(tags and tags in tables.tags)
|
2577
|
+
return tables.tags[tags]
|
2578
|
+
def get_style(self,prefix):
|
2579
|
+
"""
|
2580
|
+
Return the style dictionary whose name starts with 'prefix'.
|
2581
|
+
"""
|
2582
|
+
if prefix is None:
|
2583
|
+
return None
|
2584
|
+
names = self.styles.keys()
|
2585
|
+
names.sort()
|
2586
|
+
for name in names:
|
2587
|
+
if name.startswith(prefix):
|
2588
|
+
return self.styles[name]
|
2540
2589
|
else:
|
2541
|
-
self.
|
2542
|
-
|
2543
|
-
|
2544
|
-
|
2545
|
-
|
2546
|
-
|
2547
|
-
|
2548
|
-
|
2549
|
-
|
2590
|
+
self.error('missing style: %s*' % prefix)
|
2591
|
+
return None
|
2592
|
+
def parse_cols(self,cols):
|
2593
|
+
"""
|
2594
|
+
Build list of column objects from table 'cols' attribute.
|
2595
|
+
"""
|
2596
|
+
# [<multiplier>*][<align>][<width>][<style>]
|
2597
|
+
COLS_RE1 = r'^((?P<count>\d+)\*)?(?P<align>[<\^>])?(?P<width>\d+%?)?(?P<style>[a-zA-Z]\w*)?$'
|
2598
|
+
# [<multiplier>*][<width>][<align>][<style>]
|
2599
|
+
COLS_RE2 = r'^((?P<count>\d+)\*)?(?P<width>\d+%?)?(?P<align>[<\^>])?(?P<style>[a-zA-Z]\w*)?$'
|
2600
|
+
reo1 = re.compile(COLS_RE1)
|
2601
|
+
reo2 = re.compile(COLS_RE2)
|
2602
|
+
cols = str(cols)
|
2603
|
+
if re.match(r'^\d+$',cols):
|
2604
|
+
for i in range(int(cols)):
|
2605
|
+
self.columns.append(Column())
|
2550
2606
|
else:
|
2551
|
-
|
2552
|
-
|
2553
|
-
|
2554
|
-
|
2555
|
-
|
2556
|
-
|
2557
|
-
|
2558
|
-
|
2559
|
-
|
2560
|
-
|
2561
|
-
|
2562
|
-
s = re.sub(fc+r'+$','',s)
|
2563
|
-
if s == '':
|
2564
|
-
c.rulerwidth = None
|
2565
|
-
else:
|
2566
|
-
c.rulerwidth = int(validate(s,'int($)>0',
|
2567
|
-
'malformed ruler: bad width'))
|
2568
|
-
else: # Calculate column width from inter-fillchar intervals.
|
2569
|
-
if not re.match(r'^'+fc+r'+$',s):
|
2570
|
-
raise EAsciiDoc,'malformed ruler: illegal fillchars'
|
2571
|
-
c.rulerwidth = len(s)+1
|
2572
|
-
self.columns.append(c)
|
2573
|
-
# Fill in unspecified ruler widths.
|
2574
|
-
if self.isnumeric:
|
2575
|
-
if self.columns[0].rulerwidth is None:
|
2576
|
-
prevwidth = 1
|
2577
|
-
for c in self.columns:
|
2578
|
-
if c.rulerwidth is None:
|
2579
|
-
c.rulerwidth = prevwidth
|
2580
|
-
prevwidth = c.rulerwidth
|
2581
|
-
def build_colspecs(self):
|
2582
|
-
"""Generate colwidths and colspecs. This can only be done after the
|
2583
|
-
table arguments have been parsed since we use the table format."""
|
2584
|
-
self.attributes['cols'] = len(self.columns)
|
2585
|
-
# Calculate total ruler width.
|
2586
|
-
totalwidth = 0
|
2587
|
-
for c in self.columns:
|
2588
|
-
totalwidth = totalwidth + c.rulerwidth
|
2589
|
-
if totalwidth <= 0:
|
2590
|
-
raise EAsciiDoc,'zero width table'
|
2591
|
-
# Calculate marked up colwidths from rulerwidths.
|
2592
|
-
for c in self.columns:
|
2593
|
-
# Convert ruler width to output page width.
|
2594
|
-
width = float(c.rulerwidth)
|
2595
|
-
if self.format == 'fixed':
|
2596
|
-
if self.tablewidth is None:
|
2597
|
-
# Size proportional to ruler width.
|
2598
|
-
colfraction = width/config.textwidth
|
2607
|
+
for col in re.split(r'\s*,\s*',cols):
|
2608
|
+
mo = reo1.match(col)
|
2609
|
+
if not mo:
|
2610
|
+
mo = reo2.match(col)
|
2611
|
+
if mo:
|
2612
|
+
count = int(mo.groupdict().get('count') or 1)
|
2613
|
+
for i in range(count):
|
2614
|
+
self.columns.append(
|
2615
|
+
Column(mo.group('width'), mo.group('align'),
|
2616
|
+
self.get_style(mo.group('style')))
|
2617
|
+
)
|
2599
2618
|
else:
|
2600
|
-
|
2601
|
-
|
2619
|
+
self.error('illegal column spec: %s' % col,self.start)
|
2620
|
+
# Validate widths and calculate missing widths.
|
2621
|
+
n = 0; percents = 0; props = 0
|
2622
|
+
for col in self.columns:
|
2623
|
+
if col.width:
|
2624
|
+
if col.width[-1] == '%': percents += int(col.width[:-1])
|
2625
|
+
else: props += int(col.width)
|
2626
|
+
n += 1
|
2627
|
+
if percents > 0 and props > 0:
|
2628
|
+
self.error('mixed percent and proportional widths: %s'
|
2629
|
+
% cols,self.start)
|
2630
|
+
pcunits = percents > 0
|
2631
|
+
# Fill in missing widths.
|
2632
|
+
if n < len(self.columns) and percents < 100:
|
2633
|
+
if pcunits:
|
2634
|
+
width = float(100 - percents)/float(len(self.columns) - n)
|
2602
2635
|
else:
|
2603
|
-
|
2604
|
-
|
2605
|
-
|
2606
|
-
|
2607
|
-
|
2608
|
-
|
2609
|
-
|
2610
|
-
|
2611
|
-
|
2612
|
-
|
2613
|
-
|
2614
|
-
|
2615
|
-
|
2616
|
-
|
2617
|
-
|
2618
|
-
|
2619
|
-
|
2636
|
+
width = 1
|
2637
|
+
for col in self.columns:
|
2638
|
+
if not col.width:
|
2639
|
+
if pcunits:
|
2640
|
+
col.width = str(int(width))+'%'
|
2641
|
+
percents += width
|
2642
|
+
else:
|
2643
|
+
col.width = str(width)
|
2644
|
+
props += width
|
2645
|
+
# Calculate column alignment and absolute and percent width values.
|
2646
|
+
percents = 0
|
2647
|
+
for col in self.columns:
|
2648
|
+
col.colalign = Table.ALIGNMENTS[col.align]
|
2649
|
+
if pcunits:
|
2650
|
+
col.pcwidth = float(col.width[:-1])
|
2651
|
+
else:
|
2652
|
+
col.pcwidth = (float(col.width)/props)*100
|
2653
|
+
col.abswidth = int(self.abswidth * (col.pcwidth/100))
|
2654
|
+
percents += col.pcwidth
|
2655
|
+
col.pcwidth = int(col.pcwidth)
|
2656
|
+
if round(percents) > 100:
|
2657
|
+
self.error('total width exceeds 100%%: %s' % cols,self.start)
|
2658
|
+
elif round(percents) < 100:
|
2659
|
+
self.error('total width less than 100%%: %s' % cols,self.start)
|
2660
|
+
def build_colspecs(self):
|
2661
|
+
"""
|
2662
|
+
Generate column related substitution attributes.
|
2663
|
+
"""
|
2664
|
+
cols = []
|
2665
|
+
i = 0
|
2666
|
+
for col in self.columns:
|
2667
|
+
i += 1
|
2668
|
+
colspec = self.get_tags(col.style).colspec
|
2669
|
+
if colspec:
|
2670
|
+
self.attributes['colalign'] = col.colalign
|
2671
|
+
self.attributes['colabswidth'] = col.abswidth
|
2672
|
+
self.attributes['colpcwidth'] = col.pcwidth
|
2673
|
+
self.attributes['colnumber'] = str(i+1)
|
2674
|
+
s = subs_attrs(colspec, self.attributes)
|
2620
2675
|
if not s:
|
2621
2676
|
warning('colspec dropped: contains undefined attribute')
|
2622
2677
|
else:
|
2623
2678
|
cols.append(s)
|
2679
|
+
if cols:
|
2624
2680
|
self.attributes['colspecs'] = writer.newline.join(cols)
|
2625
|
-
def
|
2626
|
-
"""
|
2627
|
-
|
2628
|
-
|
2629
|
-
|
2630
|
-
|
2631
|
-
|
2632
|
-
|
2633
|
-
|
2634
|
-
|
2635
|
-
|
2636
|
-
|
2637
|
-
return (join_lines(rows[:i]), rows[i+1:])
|
2638
|
-
def parse_rows(self, rows, rtag, dtag):
|
2639
|
-
"""Parse rows list using the row and data tags. Returns a substituted
|
2640
|
-
list of output lines."""
|
2641
|
-
result = []
|
2642
|
-
# Source rows are parsed as single block, rather than line by line, to
|
2643
|
-
# allow the CSV reader to handle multi-line rows.
|
2644
|
-
if self.format == 'fixed':
|
2645
|
-
rows = self.parse_fixed(rows)
|
2646
|
-
elif self.format == 'csv':
|
2647
|
-
rows = self.parse_csv(rows)
|
2648
|
-
elif self.format == 'dsv':
|
2649
|
-
rows = self.parse_dsv(rows)
|
2681
|
+
def parse_rows(self, text):
|
2682
|
+
"""
|
2683
|
+
Parse the table source text into self.rows (a list of rows, each row
|
2684
|
+
is a list of raw cell text.
|
2685
|
+
"""
|
2686
|
+
if self.parameters.format in ('psv','dsv'):
|
2687
|
+
cells = self.parse_psv_dsv(text)
|
2688
|
+
colcount = len(self.columns)
|
2689
|
+
for i in range(0, len(cells), colcount):
|
2690
|
+
self.rows.append(cells[i:i+colcount])
|
2691
|
+
elif self.parameters.format == 'csv':
|
2692
|
+
self.parse_csv(text)
|
2650
2693
|
else:
|
2651
2694
|
assert True,'illegal table format'
|
2652
|
-
|
2695
|
+
def subs_rows(self, rows, rowtype='body'):
|
2696
|
+
"""
|
2697
|
+
Return a string of output markup from a list of rows, each row
|
2698
|
+
is a list of raw cell text.
|
2699
|
+
"""
|
2700
|
+
tags = tables.tags[self.parameters.tags]
|
2701
|
+
if rowtype == 'header':
|
2702
|
+
rtag = tags.headrow
|
2703
|
+
elif rowtype == 'footer':
|
2704
|
+
rtag = tags.footrow
|
2705
|
+
else:
|
2706
|
+
rtag = tags.bodyrow
|
2707
|
+
result = []
|
2653
2708
|
stag,etag = subs_tag(rtag,self.attributes)
|
2654
2709
|
for row in rows:
|
2655
|
-
result.append(
|
2656
|
-
|
2657
|
-
|
2658
|
-
|
2659
|
-
|
2660
|
-
|
2661
|
-
|
2662
|
-
Returns a
|
2663
|
-
|
2664
|
-
if len(
|
2665
|
-
warning('fewer row data items
|
2666
|
-
if len(
|
2710
|
+
result.append(stag)
|
2711
|
+
result += self.subs_row(row,rowtype)
|
2712
|
+
result.append(etag)
|
2713
|
+
return writer.newline.join(result)
|
2714
|
+
def subs_row(self, row, rowtype):
|
2715
|
+
"""
|
2716
|
+
Substitute the list of cells using the cell data tag.
|
2717
|
+
Returns a list of marked up table cell elements.
|
2718
|
+
"""
|
2719
|
+
if len(row) < len(self.columns):
|
2720
|
+
warning('fewer row data items than table columns')
|
2721
|
+
if len(row) > len(self.columns):
|
2667
2722
|
warning('more row data items than table columns')
|
2723
|
+
result = []
|
2668
2724
|
for i in range(len(self.columns)):
|
2669
|
-
|
2670
|
-
|
2725
|
+
col = self.columns[i]
|
2726
|
+
tags = self.get_tags(col.style)
|
2727
|
+
self.attributes['colalign'] = col.colalign
|
2728
|
+
self.attributes['colabswidth'] = col.abswidth
|
2729
|
+
self.attributes['colpcwidth'] = col.pcwidth
|
2730
|
+
self.attributes['colnumber'] = str(i+1)
|
2731
|
+
if rowtype == 'header':
|
2732
|
+
dtag = tags.headdata
|
2733
|
+
elif rowtype == 'footer':
|
2734
|
+
dtag = tags.footdata
|
2671
2735
|
else:
|
2672
|
-
|
2673
|
-
|
2674
|
-
|
2675
|
-
|
2676
|
-
self.attributes['colnumber'] = str(i + 1)
|
2677
|
-
stag,etag = subs_tag(dtag,self.attributes)
|
2678
|
-
# Insert AsciiDoc line break (' +') where row data has newlines
|
2679
|
-
# ('\n'). This is really only useful when the table format is csv
|
2680
|
-
# and the output markup is HTML. It's also a bit dubious in that it
|
2681
|
-
# assumes the user has not modified the shipped line break pattern.
|
2682
|
-
subs = self.get_subs()[0]
|
2683
|
-
if 'replacements' in subs:
|
2684
|
-
# Insert line breaks in cell data.
|
2685
|
-
d = re.sub(r'(?m)\n',r' +\n',d)
|
2686
|
-
d = d.split('\n') # So writer.newline is written.
|
2736
|
+
dtag = tags.bodydata
|
2737
|
+
# Fill missing column data with blanks.
|
2738
|
+
if i > len(row) - 1:
|
2739
|
+
data = ''
|
2687
2740
|
else:
|
2688
|
-
|
2689
|
-
|
2690
|
-
|
2691
|
-
|
2692
|
-
|
2693
|
-
|
2694
|
-
|
2695
|
-
|
2696
|
-
data =
|
2697
|
-
|
2698
|
-
|
2699
|
-
|
2700
|
-
|
2701
|
-
|
2702
|
-
if
|
2703
|
-
|
2704
|
-
|
2705
|
-
|
2706
|
-
|
2707
|
-
|
2708
|
-
|
2709
|
-
|
2710
|
-
result.append(data)
|
2741
|
+
data = row[i]
|
2742
|
+
# Format header cells with the table style not column style.
|
2743
|
+
if rowtype == 'header':
|
2744
|
+
colstyle = None
|
2745
|
+
else:
|
2746
|
+
colstyle = col.style
|
2747
|
+
presubs,postsubs = self.get_subs(colstyle)
|
2748
|
+
data = [data]
|
2749
|
+
data = Lex.subs(data, presubs)
|
2750
|
+
data = filter_lines(self.get_param('filter',colstyle),
|
2751
|
+
data, self.attributes)
|
2752
|
+
data = Lex.subs(data, postsubs)
|
2753
|
+
if rowtype != 'header':
|
2754
|
+
ptag = tags.paragraph
|
2755
|
+
if ptag:
|
2756
|
+
stag,etag = subs_tag(ptag,self.attributes)
|
2757
|
+
text = '\n'.join(data).strip()
|
2758
|
+
data = []
|
2759
|
+
for para in re.split(r'\n{2,}',text):
|
2760
|
+
data += dovetail_tags([stag],para.split('\n'),[etag])
|
2761
|
+
stag,etag = subs_tag(dtag,self.attributes)
|
2762
|
+
result = result + dovetail_tags([stag],data,[etag])
|
2711
2763
|
return result
|
2712
|
-
def parse_csv(self,
|
2713
|
-
"""
|
2714
|
-
|
2764
|
+
def parse_csv(self,text):
|
2765
|
+
"""
|
2766
|
+
Parse the table source text and return a list of rows, each row
|
2767
|
+
is a list of raw cell text.
|
2768
|
+
"""
|
2715
2769
|
import StringIO
|
2716
2770
|
import csv
|
2717
|
-
|
2718
|
-
rdr = csv.reader(StringIO.StringIO('\r\n'.join(
|
2719
|
-
|
2771
|
+
self.rows = []
|
2772
|
+
rdr = csv.reader(StringIO.StringIO('\r\n'.join(text)),
|
2773
|
+
delimiter=self.parameters.separator, skipinitialspace=True)
|
2720
2774
|
try:
|
2721
2775
|
for row in rdr:
|
2722
|
-
|
2776
|
+
self.rows.append(row)
|
2723
2777
|
except:
|
2724
|
-
|
2725
|
-
|
2726
|
-
|
2727
|
-
|
2728
|
-
|
2729
|
-
|
2730
|
-
|
2731
|
-
|
2732
|
-
|
2733
|
-
|
2734
|
-
|
2735
|
-
|
2736
|
-
|
2737
|
-
|
2738
|
-
|
2739
|
-
|
2740
|
-
|
2741
|
-
|
2742
|
-
|
2743
|
-
|
2744
|
-
|
2778
|
+
self.error('csv parse error: %s' % row)
|
2779
|
+
def parse_psv_dsv(self,text):
|
2780
|
+
"""
|
2781
|
+
Parse list of PSV or DSV table source text lines and return a list of
|
2782
|
+
cells.
|
2783
|
+
"""
|
2784
|
+
text = '\n'.join(text)
|
2785
|
+
separator = '(?msu)'+self.parameters.separator
|
2786
|
+
format = self.parameters.format
|
2787
|
+
start = 0
|
2788
|
+
cellcount = 1
|
2789
|
+
cells = []
|
2790
|
+
cell = ''
|
2791
|
+
for mo in re.finditer(separator,text):
|
2792
|
+
cell += text[start:mo.start()]
|
2793
|
+
if cell.endswith('\\'):
|
2794
|
+
cell = cell[:-1]+mo.group() # Reinstate escaped separators.
|
2795
|
+
else:
|
2796
|
+
for i in range(cellcount):
|
2797
|
+
cells.append(cell)
|
2798
|
+
cellcount = int(mo.groupdict().get('cellcount') or '1')
|
2799
|
+
cell = ''
|
2800
|
+
start = mo.end()
|
2801
|
+
# Last cell follows final separator.
|
2802
|
+
cell += text[start:]
|
2803
|
+
for i in range(cellcount):
|
2804
|
+
cells.append(cell)
|
2805
|
+
# We expect a dummy blank item preceeding first PSV cell.
|
2806
|
+
if format == 'psv':
|
2807
|
+
if cells[0] != '':
|
2808
|
+
self.error('missing leading separator: %s' % separator,
|
2809
|
+
self.start)
|
2810
|
+
else:
|
2811
|
+
cells.pop(0)
|
2812
|
+
return cells
|
2745
2813
|
def translate(self):
|
2746
2814
|
AbstractBlock.translate(self)
|
2815
|
+
reader.read() # Discard delimiter.
|
2747
2816
|
# Reset instance specific properties.
|
2748
|
-
self.underline = None
|
2749
2817
|
self.columns = []
|
2818
|
+
self.rows = []
|
2750
2819
|
attrs = {}
|
2751
2820
|
BlockTitle.consume(attrs)
|
2752
|
-
# Add relevant globals to table substitutions.
|
2753
|
-
attrs['pagewidth'] = str(config.pagewidth)
|
2754
|
-
attrs['pageunits'] = config.pageunits
|
2755
2821
|
# Mix in document attribute list.
|
2756
2822
|
AttributeList.consume(attrs)
|
2757
|
-
# Validate overridable attributes.
|
2758
|
-
for k,v in attrs.items():
|
2759
|
-
if k == 'format':
|
2760
|
-
if v not in self.FORMATS:
|
2761
|
-
raise EAsciiDoc, 'illegal [%s] %s: %s' % (self.name,k,v)
|
2762
|
-
self.format = v
|
2763
|
-
elif k == 'tablewidth':
|
2764
|
-
try:
|
2765
|
-
self.tablewidth = float(attrs['tablewidth'])
|
2766
|
-
except:
|
2767
|
-
raise EAsciiDoc, 'illegal [%s] %s: %s' % (self.name,k,v)
|
2768
2823
|
self.merge_attributes(attrs)
|
2769
|
-
|
2770
|
-
|
2771
|
-
|
2772
|
-
self.
|
2824
|
+
self.validate_attributes()
|
2825
|
+
# Add global and calculated configuration parameters.
|
2826
|
+
self.attributes['pagewidth'] = config.pagewidth
|
2827
|
+
self.attributes['pageunits'] = config.pageunits
|
2828
|
+
self.attributes['tableabswidth'] = int(self.abswidth)
|
2829
|
+
self.attributes['tablepcwidth'] = int(self.pcwidth)
|
2773
2830
|
# Read the entire table.
|
2774
|
-
|
2775
|
-
|
2776
|
-
|
2777
|
-
|
2778
|
-
|
2779
|
-
|
2780
|
-
|
2781
|
-
|
2782
|
-
raise EAsciiDoc,'closing [%s] underline expected' % self.name
|
2783
|
-
table.append(reader.read())
|
2784
|
-
# EXPERIMENTAL: The number of lines in the table, requested by Benjamin Klum.
|
2785
|
-
self.attributes['rows'] = str(len(table))
|
2786
|
-
#TODO: Inherited validate() doesn't set check_msg, needs checking.
|
2787
|
-
if self.check_msg: # Skip if table definition was marked invalid.
|
2788
|
-
warning('skipping %s table: %s' % (self.name,self.check_msg))
|
2831
|
+
text = reader.read_until(self.delimiter)
|
2832
|
+
if reader.eof():
|
2833
|
+
self.error('missing closing delimiter',self.start)
|
2834
|
+
else:
|
2835
|
+
delimiter = reader.read() # Discard closing delimiter.
|
2836
|
+
assert re.match(self.delimiter,delimiter)
|
2837
|
+
if len(text) == 0:
|
2838
|
+
warning('[%s] table is empty' % self.name)
|
2789
2839
|
return
|
2790
|
-
|
2840
|
+
cols = attrs.get('cols')
|
2841
|
+
if not cols:
|
2842
|
+
# Calculate column count from number of items in first line.
|
2843
|
+
if self.parameters.format == 'csv':
|
2844
|
+
cols = text[0].count(self.parameters.separator)
|
2845
|
+
else:
|
2846
|
+
cols = len(self.parse_psv_dsv(text[:1]))
|
2847
|
+
self.parse_cols(cols)
|
2848
|
+
# Set calculated attributes.
|
2849
|
+
self.attributes['colcount'] = len(self.columns)
|
2791
2850
|
self.build_colspecs()
|
2851
|
+
self.parse_rows(text)
|
2852
|
+
# The 'rowcount' attribute is used by the experimental LaTeX backend.
|
2853
|
+
self.attributes['rowcount'] = str(len(self.rows))
|
2792
2854
|
# Generate headrows, footrows, bodyrows.
|
2793
2855
|
# Headrow, footrow and bodyrow data replaces same named attributes in
|
2794
2856
|
# the table markup template. In order to ensure this data does not get
|
@@ -2796,25 +2858,20 @@ class Table(AbstractBlock):
|
|
2796
2858
|
# already substituted inline passthroughs) unique placeholders are used
|
2797
2859
|
# (the tab character does not appear elsewhere since it is expanded on
|
2798
2860
|
# input) which are replaced after template attribute substitution.
|
2799
|
-
headrows = footrows =
|
2800
|
-
|
2801
|
-
|
2802
|
-
headrows = bodyrows
|
2803
|
-
bodyrows,table = self.split_rows(table)
|
2804
|
-
if table:
|
2805
|
-
footrows,table = self.split_rows(table)
|
2806
|
-
if headrows:
|
2807
|
-
headrows = self.parse_rows(headrows, self.headrow, self.headdata)
|
2808
|
-
headrows = writer.newline.join(headrows)
|
2861
|
+
headrows = footrows = bodyrows = None
|
2862
|
+
if self.rows and 'header' in self.parameters.options:
|
2863
|
+
headrows = self.subs_rows(self.rows[0:1],'header')
|
2809
2864
|
self.attributes['headrows'] = '\theadrows\t'
|
2810
|
-
|
2811
|
-
|
2812
|
-
footrows =
|
2865
|
+
self.rows = self.rows[1:]
|
2866
|
+
if self.rows and 'footer' in self.parameters.options:
|
2867
|
+
footrows = self.subs_rows( self.rows[-1:], 'footer')
|
2813
2868
|
self.attributes['footrows'] = '\tfootrows\t'
|
2814
|
-
|
2815
|
-
|
2816
|
-
|
2817
|
-
|
2869
|
+
self.rows = self.rows[:-1]
|
2870
|
+
if self.rows:
|
2871
|
+
bodyrows = self.subs_rows(self.rows)
|
2872
|
+
self.attributes['bodyrows'] = '\tbodyrows\t'
|
2873
|
+
table = subs_attrs(config.sections[self.parameters.template],
|
2874
|
+
self.attributes)
|
2818
2875
|
table = writer.newline.join(table)
|
2819
2876
|
# Before we finish replace the table head, foot and body place holders
|
2820
2877
|
# with the real data.
|
@@ -2822,72 +2879,85 @@ class Table(AbstractBlock):
|
|
2822
2879
|
table = table.replace('\theadrows\t', headrows, 1)
|
2823
2880
|
if footrows:
|
2824
2881
|
table = table.replace('\tfootrows\t', footrows, 1)
|
2825
|
-
|
2882
|
+
if bodyrows:
|
2883
|
+
table = table.replace('\tbodyrows\t', bodyrows, 1)
|
2826
2884
|
writer.write(table)
|
2827
2885
|
|
2828
2886
|
class Tables(AbstractBlocks):
|
2829
2887
|
"""List of tables."""
|
2830
2888
|
BLOCK_TYPE = Table
|
2831
2889
|
PREFIX = 'tabledef-'
|
2890
|
+
TAGS = ('colspec', 'headrow','footrow','bodyrow',
|
2891
|
+
'headdata','footdata', 'bodydata','paragraph')
|
2832
2892
|
def __init__(self):
|
2833
2893
|
AbstractBlocks.__init__(self)
|
2894
|
+
# Table tags dictionary. Each entry is a tags dictionary.
|
2895
|
+
self.tags={}
|
2834
2896
|
def load(self,sections):
|
2835
2897
|
AbstractBlocks.load(self,sections)
|
2836
|
-
|
2898
|
+
self.load_tags(sections)
|
2899
|
+
def load_tags(self,sections):
|
2900
|
+
"""
|
2901
|
+
Load tabletags-* conf file sections to self.tags.
|
2902
|
+
"""
|
2903
|
+
for section in sections.keys():
|
2904
|
+
mo = re.match(r'^tabletags-(?P<name>\w+)$',section)
|
2905
|
+
if mo:
|
2906
|
+
name = mo.group('name')
|
2907
|
+
if self.tags.has_key(name):
|
2908
|
+
d = self.tags[name]
|
2909
|
+
else:
|
2910
|
+
d = AttrDict()
|
2911
|
+
parse_entries(sections.get(section,()),d)
|
2912
|
+
for k in d.keys():
|
2913
|
+
if k not in self.TAGS:
|
2914
|
+
warning('[%s] contains illegal table tag: %s' %
|
2915
|
+
(section,k))
|
2916
|
+
self.tags[name] = d
|
2837
2917
|
def validate(self):
|
2838
|
-
|
2918
|
+
AbstractBlocks.validate(self)
|
2839
2919
|
# Check we have a default table definition,
|
2840
2920
|
for i in range(len(self.blocks)):
|
2841
2921
|
if self.blocks[i].name == 'tabledef-default':
|
2842
2922
|
default = self.blocks[i]
|
2843
2923
|
break
|
2844
2924
|
else:
|
2845
|
-
raise EAsciiDoc,'missing [
|
2846
|
-
# Set default table defaults.
|
2847
|
-
if default.format is None: default.subs = 'fixed'
|
2925
|
+
raise EAsciiDoc,'missing [tabledef-default] section'
|
2848
2926
|
# Propagate defaults to unspecified table parameters.
|
2849
2927
|
for b in self.blocks:
|
2850
2928
|
if b is not default:
|
2851
|
-
if b.fillchar is None: b.fillchar = default.fillchar
|
2852
2929
|
if b.format is None: b.format = default.format
|
2853
2930
|
if b.template is None: b.template = default.template
|
2854
|
-
|
2855
|
-
|
2856
|
-
|
2857
|
-
|
2858
|
-
|
2859
|
-
|
2860
|
-
|
2861
|
-
|
2862
|
-
|
2863
|
-
|
2864
|
-
|
2865
|
-
|
2866
|
-
|
2867
|
-
|
2868
|
-
|
2869
|
-
|
2870
|
-
|
2871
|
-
|
2872
|
-
|
2873
|
-
|
2874
|
-
|
2875
|
-
|
2876
|
-
if not
|
2877
|
-
b.headrow = b.bodyrow
|
2878
|
-
if not b.footrow:
|
2879
|
-
b.footrow = b.bodyrow
|
2880
|
-
if not b.headdata:
|
2881
|
-
b.headdata = b.bodydata
|
2882
|
-
if not b.footdata:
|
2883
|
-
b.footdata = b.bodydata
|
2884
|
-
self.delimiter = join_regexp(delimiters)
|
2931
|
+
# Check tags and propagate default tags.
|
2932
|
+
if not 'default' in self.tags:
|
2933
|
+
raise EAsciiDoc,'missing [tabletags-default] section'
|
2934
|
+
default = self.tags['default']
|
2935
|
+
for tag in ('bodyrow','bodydata','paragraph'): # Mandatory default tags.
|
2936
|
+
if tag not in default:
|
2937
|
+
raise EAsciiDoc,'missing [tabletags-default] entry: %s' % tag
|
2938
|
+
for t in self.tags.values():
|
2939
|
+
if t is not default:
|
2940
|
+
if t.colspec is None: t.colspec = default.colspec
|
2941
|
+
if t.headrow is None: t.headrow = default.headrow
|
2942
|
+
if t.footrow is None: t.footrow = default.footrow
|
2943
|
+
if t.bodyrow is None: t.bodyrow = default.bodyrow
|
2944
|
+
if t.headdata is None: t.headdata = default.headdata
|
2945
|
+
if t.footdata is None: t.footdata = default.footdata
|
2946
|
+
if t.bodydata is None: t.bodydata = default.bodydata
|
2947
|
+
if t.paragraph is None: t.paragraph = default.paragraph
|
2948
|
+
# Use body tags if header and footer tags are not specified.
|
2949
|
+
for t in self.tags.values():
|
2950
|
+
if not t.headrow: t.headrow = t.bodyrow
|
2951
|
+
if not t.footrow: t.footrow = t.bodyrow
|
2952
|
+
if not t.headdata: t.headdata = t.bodydata
|
2953
|
+
if not t.footdata: t.footdata = t.bodydata
|
2885
2954
|
# Check table definitions are valid.
|
2886
2955
|
for b in self.blocks:
|
2887
2956
|
b.validate()
|
2888
|
-
|
2889
|
-
|
2890
|
-
|
2957
|
+
def dump(self):
|
2958
|
+
AbstractBlocks.dump(self)
|
2959
|
+
for k,v in self.tags.items():
|
2960
|
+
dump_section('tabletags-'+k, v)
|
2891
2961
|
|
2892
2962
|
class Macros:
|
2893
2963
|
# Default system macro syntax.
|
@@ -2896,6 +2966,7 @@ class Macros:
|
|
2896
2966
|
def __init__(self):
|
2897
2967
|
self.macros = [] # List of Macros.
|
2898
2968
|
self.current = None # The last matched block macro.
|
2969
|
+
self.passthroughs = []
|
2899
2970
|
# Initialize default system macro.
|
2900
2971
|
m = Macro()
|
2901
2972
|
m.pattern = self.SYS_DEFAULT
|
@@ -2924,7 +2995,11 @@ class Macros:
|
|
2924
2995
|
write('[macros]')
|
2925
2996
|
# Dump all macros except the first (built-in system) macro.
|
2926
2997
|
for m in self.macros[1:]:
|
2927
|
-
|
2998
|
+
# Escape = in pattern.
|
2999
|
+
macro = '%s=%s%s' % (m.pattern.replace('=',r'\='), m.prefix, m.name)
|
3000
|
+
if m.subslist is not None:
|
3001
|
+
macro += '[' + ','.join(m.subslist) + ']'
|
3002
|
+
write(macro)
|
2928
3003
|
write('')
|
2929
3004
|
def validate(self):
|
2930
3005
|
# Check all named sections exist.
|
@@ -2964,72 +3039,20 @@ class Macros:
|
|
2964
3039
|
if re.match(name,mo.group('name')):
|
2965
3040
|
return mo
|
2966
3041
|
return None
|
2967
|
-
|
2968
|
-
|
2969
|
-
|
2970
|
-
|
2971
|
-
|
2972
|
-
|
2973
|
-
|
2974
|
-
|
2975
|
-
|
2976
|
-
|
2977
|
-
|
2978
|
-
|
2979
|
-
|
2980
|
-
|
2981
|
-
for k,v in d.items():
|
2982
|
-
if v is None: del d[k]
|
2983
|
-
if _macro.name:
|
2984
|
-
name = _macro.name
|
2985
|
-
else:
|
2986
|
-
if not d.has_key('name'):
|
2987
|
-
warning('missing macro name group: %s' % mo.re.pattern)
|
2988
|
-
return ''
|
2989
|
-
name = d['name']
|
2990
|
-
section_name = _macro.section_name(name)
|
2991
|
-
if not section_name:
|
2992
|
-
return ''
|
2993
|
-
# If we're dealing with a block macro get optional block ID and block title.
|
2994
|
-
if _macro.prefix == '#':
|
2995
|
-
AttributeList.consume(d)
|
2996
|
-
BlockTitle.consume(d)
|
2997
|
-
# Parse macro attributes.
|
2998
|
-
if d.has_key('attrlist'):
|
2999
|
-
if d['attrlist'] in (None,''):
|
3000
|
-
del d['attrlist']
|
3001
|
-
else:
|
3002
|
-
parse_attributes(d['attrlist'],d)
|
3003
|
-
if name == 'callout':
|
3004
|
-
listindex =int(d['index'])
|
3005
|
-
d['coid'] = calloutmap.add(listindex)
|
3006
|
-
# Unescape special characters in LaTeX target file names.
|
3007
|
-
if document.backend == 'latex' and d.has_key('target') and d['target']:
|
3008
|
-
if not d.has_key('0'):
|
3009
|
-
d['0'] = d['target']
|
3010
|
-
d['target']= config.subs_specialchars_reverse(d['target'])
|
3011
|
-
# BUG: We've already done attribute substitution on the macro which means
|
3012
|
-
# that any escaped attribute references are now unescaped and will be
|
3013
|
-
# substituted by config.subs_section() below. As a partial fix have withheld
|
3014
|
-
# {0} from substitution but this kludge doesn't fix it for other attributes
|
3015
|
-
# containing unescaped references.
|
3016
|
-
a0 = d.get('0')
|
3017
|
-
if a0:
|
3018
|
-
d['0'] = chr(0) # Replace temporarily with unused character.
|
3019
|
-
body = config.subs_section(section_name,d)
|
3020
|
-
if len(body) == 0:
|
3021
|
-
result = ''
|
3022
|
-
elif len(body) == 1:
|
3023
|
-
result = body[0]
|
3024
|
-
else:
|
3025
|
-
if _macro.prefix == '#':
|
3026
|
-
result = writer.newline.join(body)
|
3027
|
-
else:
|
3028
|
-
# Internally processed inline macros use UNIX line separator.
|
3029
|
-
result = '\n'.join(body)
|
3030
|
-
if a0:
|
3031
|
-
result = result.replace(chr(0), a0)
|
3032
|
-
return result
|
3042
|
+
def extract_passthroughs(self,text,prefix=''):
|
3043
|
+
""" Extract the passthrough text and replace with temporary
|
3044
|
+
placeholders."""
|
3045
|
+
self.passthroughs = []
|
3046
|
+
for m in self.macros:
|
3047
|
+
if m.has_passthrough() and m.prefix == prefix:
|
3048
|
+
text = m.subs_passthroughs(text, self.passthroughs)
|
3049
|
+
return text
|
3050
|
+
def restore_passthroughs(self,text):
|
3051
|
+
""" Replace passthough placeholders with the original passthrough
|
3052
|
+
text."""
|
3053
|
+
for i,v in enumerate(self.passthroughs):
|
3054
|
+
text = text.replace('\t'+str(i)+'\t', self.passthroughs[i], 1)
|
3055
|
+
return text
|
3033
3056
|
|
3034
3057
|
class Macro:
|
3035
3058
|
def __init__(self):
|
@@ -3037,6 +3060,9 @@ class Macro:
|
|
3037
3060
|
self.name = '' # Conf file macro name (None if implicit).
|
3038
3061
|
self.prefix = '' # '' if inline, '+' if system, '#' if block.
|
3039
3062
|
self.reo = None # Compiled pattern re object.
|
3063
|
+
self.subslist = None # Default subs for macros passtext group.
|
3064
|
+
def has_passthrough(self):
|
3065
|
+
return self.pattern.find(r'(?P<passtext>') >= 0
|
3040
3066
|
def section_name(self,name=None):
|
3041
3067
|
"""Return macro markup template section name based on macro name and
|
3042
3068
|
prefix. Return None section not found."""
|
@@ -3051,7 +3077,7 @@ class Macro:
|
|
3051
3077
|
if config.sections.has_key(name+suffix):
|
3052
3078
|
return name+suffix
|
3053
3079
|
else:
|
3054
|
-
warning('missing macro section: [%s]' % name+suffix)
|
3080
|
+
warning('missing macro section: [%s]' % (name+suffix))
|
3055
3081
|
return None
|
3056
3082
|
def equals(self,m):
|
3057
3083
|
if self.pattern != m.pattern:
|
@@ -3066,29 +3092,161 @@ class Macro:
|
|
3066
3092
|
if not e:
|
3067
3093
|
raise EAsciiDoc,'malformed macro entry: %s' % entry
|
3068
3094
|
if not is_regexp(e[0]):
|
3069
|
-
raise EAsciiDoc,'illegal
|
3070
|
-
|
3071
|
-
|
3072
|
-
|
3073
|
-
|
3074
|
-
|
3075
|
-
|
3076
|
-
|
3095
|
+
raise EAsciiDoc,'illegal macro regular expression: %s' % e[0]
|
3096
|
+
pattern, name = e
|
3097
|
+
if name and name[0] in ('+','#'):
|
3098
|
+
prefix, name = name[0], name[1:]
|
3099
|
+
else:
|
3100
|
+
prefix = ''
|
3101
|
+
# Parse passthrough subslist.
|
3102
|
+
mo = re.match(r'^(?P<name>[^[]*)(\[(?P<subslist>.*)\])?$', name)
|
3103
|
+
name = mo.group('name')
|
3104
|
+
if name and not is_name(name):
|
3077
3105
|
raise EAsciiDoc,'illegal section name in macro entry: %s' % entry
|
3106
|
+
subslist = mo.group('subslist')
|
3107
|
+
if subslist is not None:
|
3108
|
+
# Parse and validate passthrough subs.
|
3109
|
+
subslist = parse_options(subslist, SUBS_OPTIONS,
|
3110
|
+
'illegal subs in macro entry: %s' % entry)
|
3111
|
+
self.pattern = pattern
|
3112
|
+
self.reo = re.compile(pattern)
|
3113
|
+
self.prefix = prefix
|
3114
|
+
self.name = name
|
3115
|
+
self.subslist = subslist
|
3116
|
+
|
3078
3117
|
def subs(self,text):
|
3079
|
-
|
3080
|
-
|
3081
|
-
|
3118
|
+
def subs_func(mo):
|
3119
|
+
"""Function called to perform inline macro substitution.
|
3120
|
+
Uses matched macro regular expression object and returns string
|
3121
|
+
containing the substituted macro body."""
|
3122
|
+
# Check if macro reference is escaped.
|
3123
|
+
if mo.group()[0] == '\\':
|
3124
|
+
return mo.group()[1:] # Strip leading backslash.
|
3125
|
+
d = mo.groupdict()
|
3126
|
+
# Delete groups that didn't participate in match.
|
3127
|
+
for k,v in d.items():
|
3128
|
+
if v is None: del d[k]
|
3129
|
+
if self.name:
|
3130
|
+
name = self.name
|
3131
|
+
else:
|
3132
|
+
if not d.has_key('name'):
|
3133
|
+
warning('missing macro name group: %s' % mo.re.pattern)
|
3134
|
+
return ''
|
3135
|
+
name = d['name']
|
3136
|
+
section_name = self.section_name(name)
|
3137
|
+
if not section_name:
|
3138
|
+
return ''
|
3139
|
+
# If we're dealing with a block macro get optional block ID and
|
3140
|
+
# block title.
|
3141
|
+
if self.prefix == '#':
|
3142
|
+
AttributeList.consume(d)
|
3143
|
+
BlockTitle.consume(d)
|
3144
|
+
# Parse macro attributes.
|
3145
|
+
if d.has_key('attrlist'):
|
3146
|
+
if d['attrlist'] in (None,''):
|
3147
|
+
del d['attrlist']
|
3148
|
+
else:
|
3149
|
+
if self.prefix == '':
|
3150
|
+
# Unescape ] characters in inline macros.
|
3151
|
+
d['attrlist'] = d['attrlist'].replace('\\]',']')
|
3152
|
+
parse_attributes(d['attrlist'],d)
|
3153
|
+
# Generate option attributes.
|
3154
|
+
if 'options' in d:
|
3155
|
+
options = parse_options(d['options'], (),
|
3156
|
+
'%s: illegal option name' % name)
|
3157
|
+
for option in options:
|
3158
|
+
d[option+'-option'] = ''
|
3159
|
+
if name == 'callout':
|
3160
|
+
listindex =int(d['index'])
|
3161
|
+
d['coid'] = calloutmap.add(listindex)
|
3162
|
+
# Unescape special characters in LaTeX target file names.
|
3163
|
+
if document.backend == 'latex' and d.has_key('target') and d['target']:
|
3164
|
+
if not d.has_key('0'):
|
3165
|
+
d['0'] = d['target']
|
3166
|
+
d['target']= config.subs_specialchars_reverse(d['target'])
|
3167
|
+
# BUG: We've already done attribute substitution on the macro which
|
3168
|
+
# means that any escaped attribute references are now unescaped and
|
3169
|
+
# will be substituted by config.subs_section() below. As a partial
|
3170
|
+
# fix have withheld {0} from substitution but this kludge doesn't
|
3171
|
+
# fix it for other attributes containing unescaped references.
|
3172
|
+
# Passthrough macros don't have this problem.
|
3173
|
+
a0 = d.get('0')
|
3174
|
+
if a0:
|
3175
|
+
d['0'] = chr(0) # Replace temporarily with unused character.
|
3176
|
+
body = config.subs_section(section_name,d)
|
3177
|
+
if len(body) == 0:
|
3178
|
+
result = ''
|
3179
|
+
elif len(body) == 1:
|
3180
|
+
result = body[0]
|
3181
|
+
else:
|
3182
|
+
if self.prefix == '#':
|
3183
|
+
result = writer.newline.join(body)
|
3184
|
+
else:
|
3185
|
+
# Internally processed inline macros use UNIX line
|
3186
|
+
# separator.
|
3187
|
+
result = '\n'.join(body)
|
3188
|
+
if a0:
|
3189
|
+
result = result.replace(chr(0), a0)
|
3190
|
+
return result
|
3191
|
+
|
3192
|
+
return self.reo.sub(subs_func, text)
|
3193
|
+
|
3082
3194
|
def translate(self):
|
3083
3195
|
""" Block macro translation."""
|
3084
3196
|
assert self.prefix == '#'
|
3085
3197
|
s = reader.read()
|
3086
|
-
|
3198
|
+
if self.has_passthrough():
|
3199
|
+
s = macros.extract_passthroughs(s,'#')
|
3200
|
+
s = subs_attrs(s)
|
3087
3201
|
if s:
|
3088
3202
|
s = self.subs(s)
|
3203
|
+
if self.has_passthrough():
|
3204
|
+
s = macros.restore_passthroughs(s)
|
3089
3205
|
if s:
|
3090
3206
|
writer.write(s)
|
3091
3207
|
|
3208
|
+
def subs_passthroughs(self, text, passthroughs):
|
3209
|
+
""" Replace macro attribute lists in text with placeholders.
|
3210
|
+
Substitute and append the passthrough attribute lists to the
|
3211
|
+
passthroughs list."""
|
3212
|
+
def subs_func(mo):
|
3213
|
+
"""Function called to perform inline macro substitution.
|
3214
|
+
Uses matched macro regular expression object and returns string
|
3215
|
+
containing the substituted macro body."""
|
3216
|
+
# Don't process escaped macro references.
|
3217
|
+
if mo.group()[0] == '\\':
|
3218
|
+
return mo.group()
|
3219
|
+
d = mo.groupdict()
|
3220
|
+
if not d.has_key('passtext'):
|
3221
|
+
warning('passthrough macro %s: missing passtext group' %
|
3222
|
+
d.get('name',''))
|
3223
|
+
return mo.group()
|
3224
|
+
passtext = d['passtext']
|
3225
|
+
if d.get('subslist'):
|
3226
|
+
if d['subslist'].startswith(':'):
|
3227
|
+
error('block macro cannot occur here: %s' % mo.group(),
|
3228
|
+
halt=True)
|
3229
|
+
subslist = parse_options(d['subslist'], SUBS_OPTIONS,
|
3230
|
+
'illegal passthrough macro subs option')
|
3231
|
+
else:
|
3232
|
+
subslist = self.subslist
|
3233
|
+
passtext = Lex.subs_1(passtext,subslist)
|
3234
|
+
if passtext is None: passtext = ''
|
3235
|
+
if self.prefix == '':
|
3236
|
+
# Unescape ] characters in inline macros.
|
3237
|
+
passtext = passtext.replace('\\]',']')
|
3238
|
+
passthroughs.append(passtext)
|
3239
|
+
# Tabs guarantee the placeholders are unambiguous.
|
3240
|
+
result = (
|
3241
|
+
text[mo.start():mo.start('passtext')] +
|
3242
|
+
'\t' + str(len(passthroughs)-1) + '\t' +
|
3243
|
+
text[mo.end('passtext'):mo.end()]
|
3244
|
+
)
|
3245
|
+
return result
|
3246
|
+
|
3247
|
+
return self.reo.sub(subs_func, text)
|
3248
|
+
|
3249
|
+
|
3092
3250
|
class CalloutMap:
|
3093
3251
|
def __init__(self):
|
3094
3252
|
self.comap = {} # key = list index, value = callouts list.
|
@@ -3119,7 +3277,7 @@ class CalloutMap:
|
|
3119
3277
|
result += ' ' + self.calloutid(self.listnumber,coindex)
|
3120
3278
|
return result.strip()
|
3121
3279
|
else:
|
3122
|
-
|
3280
|
+
warning('no callouts refer to list item '+str(listindex))
|
3123
3281
|
return ''
|
3124
3282
|
def validate(self,maxlistindex):
|
3125
3283
|
# Check that all list indexes referenced by callouts exist.
|
@@ -3147,8 +3305,8 @@ class Reader1:
|
|
3147
3305
|
self.tabsize = 8 # Tab expansion number of spaces.
|
3148
3306
|
self.parent = None # Included reader's parent reader.
|
3149
3307
|
self._lineno = 0 # The last line read from file object f.
|
3150
|
-
self.
|
3151
|
-
self.
|
3308
|
+
self.current_depth = 0 # Current include depth.
|
3309
|
+
self.max_depth = 5 # Initial maxiumum allowed include depth.
|
3152
3310
|
def open(self,fname):
|
3153
3311
|
self.fname = fname
|
3154
3312
|
verbose('reading: '+fname)
|
@@ -3196,12 +3354,13 @@ class Reader1:
|
|
3196
3354
|
# Check for include macro.
|
3197
3355
|
mo = macros.match('+',r'include[1]?',result)
|
3198
3356
|
if mo and not skip:
|
3199
|
-
#
|
3357
|
+
# Don't process include macro once the maximum depth is reached.
|
3358
|
+
if self.current_depth >= self.max_depth:
|
3359
|
+
return result
|
3360
|
+
# Perform attribute substitution on include macro file name.
|
3200
3361
|
fname = subs_attrs(mo.group('target'))
|
3201
3362
|
if not fname:
|
3202
3363
|
return Reader1.read(self) # Return next input line.
|
3203
|
-
if self.include_depth >= self.include_max:
|
3204
|
-
raise EAsciiDoc,'maxiumum inlcude depth exceeded'
|
3205
3364
|
if self.fname != '<stdin>':
|
3206
3365
|
fname = os.path.expandvars(os.path.expanduser(fname))
|
3207
3366
|
fname = safe_filename(fname, os.path.dirname(self.fname))
|
@@ -3211,8 +3370,8 @@ class Reader1:
|
|
3211
3370
|
if not config.dumping:
|
3212
3371
|
# Store the include file in memory for later
|
3213
3372
|
# retrieval by the {include1:} system attribute.
|
3214
|
-
config.include1[fname] =
|
3215
|
-
|
3373
|
+
config.include1[fname] = [
|
3374
|
+
s.rstrip() for s in open(fname)]
|
3216
3375
|
return '{include1:%s}' % fname
|
3217
3376
|
else:
|
3218
3377
|
# This is a configuration dump, just pass the macro
|
@@ -3225,11 +3384,21 @@ class Reader1:
|
|
3225
3384
|
parent = Reader1()
|
3226
3385
|
assign(parent,self)
|
3227
3386
|
self.parent = parent
|
3387
|
+
# Set attributes in child.
|
3228
3388
|
if attrs.has_key('tabsize'):
|
3229
|
-
self.tabsize = int(validate(attrs['tabsize'],
|
3389
|
+
self.tabsize = int(validate(attrs['tabsize'],
|
3390
|
+
'int($)>=0',
|
3230
3391
|
'illegal include macro tabsize argument'))
|
3392
|
+
else:
|
3393
|
+
self.tabsize = config.tabsize
|
3394
|
+
if attrs.has_key('depth'):
|
3395
|
+
attrs['depth'] = int(validate(attrs['depth'],
|
3396
|
+
'int($)>=1',
|
3397
|
+
'illegal include macro depth argument'))
|
3398
|
+
self.max_depth = self.current_depth + attrs['depth']
|
3399
|
+
# Process included file.
|
3231
3400
|
self.open(fname)
|
3232
|
-
self.
|
3401
|
+
self.current_depth = self.current_depth + 1
|
3233
3402
|
result = Reader1.read(self)
|
3234
3403
|
else:
|
3235
3404
|
if not Reader1.eof(self):
|
@@ -3453,18 +3622,18 @@ class Writer:
|
|
3453
3622
|
self.lines_out = self.lines_out + 1
|
3454
3623
|
else:
|
3455
3624
|
for arg in args:
|
3456
|
-
if
|
3625
|
+
if is_array(arg):
|
3457
3626
|
for s in arg:
|
3458
3627
|
self.write_line(s)
|
3459
3628
|
elif arg is not None:
|
3460
3629
|
self.write_line(arg)
|
3461
|
-
def write_tag(self,
|
3462
|
-
"""Write content enveloped by
|
3630
|
+
def write_tag(self,tag,content,subs=None,d=None):
|
3631
|
+
"""Write content enveloped by tag.
|
3463
3632
|
Substitutions specified in the 'subs' list are perform on the
|
3464
3633
|
'content'."""
|
3465
3634
|
if subs is None:
|
3466
3635
|
subs = config.subsnormal
|
3467
|
-
stag,etag =
|
3636
|
+
stag,etag = subs_tag(tag,d)
|
3468
3637
|
if stag:
|
3469
3638
|
self.write(stag)
|
3470
3639
|
if content:
|
@@ -3501,11 +3670,12 @@ def _subs_specialwords(mo):
|
|
3501
3670
|
|
3502
3671
|
class Config:
|
3503
3672
|
"""Methods to process configuration files."""
|
3504
|
-
#
|
3505
|
-
|
3673
|
+
# Non-template section name regexp's.
|
3674
|
+
ENTRIES_SECTIONS= ('tags','miscellaneous','attributes','specialcharacters',
|
3506
3675
|
'specialwords','macros','replacements','quotes','titles',
|
3507
|
-
r'paradef
|
3508
|
-
'replacements2'
|
3676
|
+
r'paradef-.+',r'listdef-.+',r'blockdef-.+',r'tabledef-.+',
|
3677
|
+
r'tabletags-.+',r'listtags-.+','replacements2',
|
3678
|
+
r'old_tabledef-.+')
|
3509
3679
|
def __init__(self):
|
3510
3680
|
self.sections = OrderedDict() # Keyed by section name containing
|
3511
3681
|
# lists of section lines.
|
@@ -3514,7 +3684,7 @@ class Config:
|
|
3514
3684
|
self.header_footer = True # -s, --no-header-footer option.
|
3515
3685
|
# [miscellaneous] section.
|
3516
3686
|
self.tabsize = 8
|
3517
|
-
self.textwidth = 70
|
3687
|
+
self.textwidth = 70 # DEPRECATED: Old tables only.
|
3518
3688
|
self.newline = '\r\n'
|
3519
3689
|
self.pagewidth = None
|
3520
3690
|
self.pageunits = None
|
@@ -3530,7 +3700,7 @@ class Config:
|
|
3530
3700
|
self.replacements2 = OrderedDict()
|
3531
3701
|
self.specialsections = {} # Name is special section name pattern, value
|
3532
3702
|
# is corresponding section name.
|
3533
|
-
self.quotes =
|
3703
|
+
self.quotes = OrderedDict() # Values contain corresponding tag name.
|
3534
3704
|
self.fname = '' # Most recently loaded configuration file name.
|
3535
3705
|
self.conf_attrs = {} # Glossary entries from conf files.
|
3536
3706
|
self.cmd_attrs = {} # Attributes from command-line -a options.
|
@@ -3538,7 +3708,7 @@ class Config:
|
|
3538
3708
|
self.include1 = {} # Holds include1::[] files for {include1:}.
|
3539
3709
|
self.dumping = False # True if asciidoc -c option specified.
|
3540
3710
|
|
3541
|
-
def
|
3711
|
+
def load_file(self,fname,dir=None):
|
3542
3712
|
"""Loads sections dictionary with sections from file fname.
|
3543
3713
|
Existing sections are overlaid. Silently skips missing configuration
|
3544
3714
|
files."""
|
@@ -3568,12 +3738,11 @@ class Config:
|
|
3568
3738
|
if found:
|
3569
3739
|
if section: # Store previous section.
|
3570
3740
|
if sections.has_key(section) \
|
3571
|
-
and self.
|
3741
|
+
and self.entries_section(section):
|
3572
3742
|
if ''.join(contents):
|
3573
|
-
# Merge
|
3743
|
+
# Merge entries.
|
3574
3744
|
sections[section] = sections[section] + contents
|
3575
3745
|
else:
|
3576
|
-
print 'blank section'
|
3577
3746
|
del sections[section]
|
3578
3747
|
else:
|
3579
3748
|
sections[section] = contents
|
@@ -3583,22 +3752,29 @@ class Config:
|
|
3583
3752
|
contents.append(s)
|
3584
3753
|
if section and contents: # Store last section.
|
3585
3754
|
if sections.has_key(section) \
|
3586
|
-
and self.
|
3755
|
+
and self.entries_section(section):
|
3587
3756
|
if ''.join(contents):
|
3588
|
-
# Merge
|
3757
|
+
# Merge entries.
|
3589
3758
|
sections[section] = sections[section] + contents
|
3590
3759
|
else:
|
3591
3760
|
del sections[section]
|
3592
3761
|
else:
|
3593
3762
|
sections[section] = contents
|
3594
3763
|
rdr.close()
|
3595
|
-
|
3764
|
+
self.load_sections(sections)
|
3765
|
+
self.loaded.append(os.path.realpath(fname))
|
3766
|
+
|
3767
|
+
def load_sections(self,sections):
|
3768
|
+
'''Loads sections dictionary. Each dictionary entry contains a
|
3769
|
+
list of lines.
|
3770
|
+
'''
|
3771
|
+
# Delete trailing blank lines from sections.
|
3596
3772
|
for k in sections.keys():
|
3597
3773
|
for i in range(len(sections[k])-1,-1,-1):
|
3598
3774
|
if not sections[k][i]:
|
3599
3775
|
del sections[k][i]
|
3600
|
-
elif not self.
|
3601
|
-
break
|
3776
|
+
elif not self.entries_section(k):
|
3777
|
+
break
|
3602
3778
|
# Add/overwrite new sections.
|
3603
3779
|
self.sections.update(sections)
|
3604
3780
|
self.parse_tags()
|
@@ -3625,27 +3801,27 @@ class Config:
|
|
3625
3801
|
paragraphs.load(sections)
|
3626
3802
|
lists.load(sections)
|
3627
3803
|
blocks.load(sections)
|
3804
|
+
tables_OLD.load(sections)
|
3628
3805
|
tables.load(sections)
|
3629
3806
|
macros.load(sections.get('macros',()))
|
3630
|
-
self.loaded.append(os.path.realpath(fname))
|
3631
3807
|
|
3632
3808
|
def load_all(self,dir):
|
3633
3809
|
"""Load the standard configuration files from directory 'dir'."""
|
3634
|
-
self.
|
3810
|
+
self.load_file('asciidoc.conf',dir)
|
3635
3811
|
conf = document.backend + '.conf'
|
3636
|
-
self.
|
3812
|
+
self.load_file(conf,dir)
|
3637
3813
|
conf = document.backend + '-' + document.doctype + '.conf'
|
3638
|
-
self.
|
3814
|
+
self.load_file(conf,dir)
|
3639
3815
|
lang = document.attributes.get('lang')
|
3640
3816
|
if lang:
|
3641
3817
|
conf = 'lang-' + lang + '.conf'
|
3642
|
-
self.
|
3818
|
+
self.load_file(conf,dir)
|
3643
3819
|
# Load ./filters/*.conf files if they exist.
|
3644
3820
|
filters = os.path.join(dir,'filters')
|
3645
3821
|
if os.path.isdir(filters):
|
3646
3822
|
for f in os.listdir(filters):
|
3647
3823
|
if re.match(r'^.+\.conf$',f):
|
3648
|
-
self.
|
3824
|
+
self.load_file(f,filters)
|
3649
3825
|
|
3650
3826
|
def load_miscellaneous(self,d):
|
3651
3827
|
"""Set miscellaneous configuration entries from dictionary 'd'."""
|
@@ -3657,7 +3833,7 @@ class Config:
|
|
3657
3833
|
else:
|
3658
3834
|
setattr(self, name, validate(d[name],rule,errmsg))
|
3659
3835
|
set_misc('tabsize','int($)>0',intval=True)
|
3660
|
-
set_misc('textwidth','int($)>0',intval=True)
|
3836
|
+
set_misc('textwidth','int($)>0',intval=True) # DEPRECATED: Old tables only.
|
3661
3837
|
set_misc('pagewidth','int($)>0',intval=True)
|
3662
3838
|
set_misc('pageunits')
|
3663
3839
|
set_misc('outfilesuffix')
|
@@ -3707,11 +3883,16 @@ class Config:
|
|
3707
3883
|
paragraphs.validate()
|
3708
3884
|
lists.validate()
|
3709
3885
|
blocks.validate()
|
3886
|
+
tables_OLD.validate()
|
3710
3887
|
tables.validate()
|
3711
3888
|
macros.validate()
|
3712
3889
|
|
3713
|
-
def
|
3714
|
-
|
3890
|
+
def entries_section(self,section_name):
|
3891
|
+
"""
|
3892
|
+
Return True if conf file section contains entries, not a markup
|
3893
|
+
template.
|
3894
|
+
"""
|
3895
|
+
for name in self.ENTRIES_SECTIONS:
|
3715
3896
|
if re.match(name,section_name):
|
3716
3897
|
return True
|
3717
3898
|
return False
|
@@ -3754,11 +3935,12 @@ class Config:
|
|
3754
3935
|
paragraphs.dump()
|
3755
3936
|
lists.dump()
|
3756
3937
|
blocks.dump()
|
3938
|
+
tables_OLD.dump()
|
3757
3939
|
tables.dump()
|
3758
3940
|
macros.dump()
|
3759
3941
|
# Dump remaining sections.
|
3760
3942
|
for k in self.sections.keys():
|
3761
|
-
if not self.
|
3943
|
+
if not self.entries_section(k):
|
3762
3944
|
sys.stdout.write('[%s]%s' % (k,writer.newline))
|
3763
3945
|
for line in self.sections[k]:
|
3764
3946
|
sys.stdout.write('%s%s' % (line,writer.newline))
|
@@ -3782,7 +3964,7 @@ class Config:
|
|
3782
3964
|
if v is None:
|
3783
3965
|
if self.tags.has_key(k):
|
3784
3966
|
del self.tags[k]
|
3785
|
-
elif v == '
|
3967
|
+
elif v == '':
|
3786
3968
|
self.tags[k] = (None,None)
|
3787
3969
|
else:
|
3788
3970
|
mo = re.match(r'(?P<stag>.*)\|(?P<etag>.*)',v)
|
@@ -3836,19 +4018,25 @@ class Config:
|
|
3836
4018
|
|
3837
4019
|
def parse_replacements(self,sect='replacements'):
|
3838
4020
|
"""Parse replacements section into self.replacements dictionary."""
|
3839
|
-
replacements = getattr(self,sect)
|
3840
4021
|
d = OrderedDict()
|
3841
4022
|
parse_entries(self.sections.get(sect,()), d, unquote=True)
|
3842
4023
|
for pat,rep in d.items():
|
3843
|
-
pat
|
3844
|
-
if not is_regexp(pat):
|
4024
|
+
if not self.set_replacement(pat, rep, getattr(self,sect)):
|
3845
4025
|
raise EAsciiDoc,'[%s] entry in %s is not a valid' \
|
3846
4026
|
' regular expression: %s' % (sect,self.fname,pat)
|
3847
|
-
|
3848
|
-
|
3849
|
-
|
3850
|
-
|
3851
|
-
|
4027
|
+
|
4028
|
+
def set_replacement(pat, rep, replacements):
|
4029
|
+
"""Add pattern and replacement to replacements dictionary."""
|
4030
|
+
pat = strip_quotes(pat)
|
4031
|
+
if not is_regexp(pat):
|
4032
|
+
return False
|
4033
|
+
if rep is None:
|
4034
|
+
if replacements.has_key(pat):
|
4035
|
+
del replacements[pat]
|
4036
|
+
else:
|
4037
|
+
replacements[pat] = strip_quotes(rep)
|
4038
|
+
return True
|
4039
|
+
set_replacement = staticmethod(set_replacement)
|
3852
4040
|
|
3853
4041
|
def subs_replacements(self,s,sect='replacements'):
|
3854
4042
|
"""Substitute patterns from self.replacements in 's'."""
|
@@ -3970,14 +4158,514 @@ class Config:
|
|
3970
4158
|
return (stag,etag)
|
3971
4159
|
|
3972
4160
|
|
4161
|
+
#---------------------------------------------------------------------------
|
4162
|
+
# Deprecated old table classes follow.
|
4163
|
+
# Naming convention is an _OLD name suffix.
|
4164
|
+
# These will be removed from future versions of AsciiDoc
|
4165
|
+
#
|
4166
|
+
|
4167
|
+
def join_lines_OLD(lines):
|
4168
|
+
"""Return a list in which lines terminated with the backslash line
|
4169
|
+
continuation character are joined."""
|
4170
|
+
result = []
|
4171
|
+
s = ''
|
4172
|
+
continuation = False
|
4173
|
+
for line in lines:
|
4174
|
+
if line and line[-1] == '\\':
|
4175
|
+
s = s + line[:-1]
|
4176
|
+
continuation = True
|
4177
|
+
continue
|
4178
|
+
if continuation:
|
4179
|
+
result.append(s+line)
|
4180
|
+
s = ''
|
4181
|
+
continuation = False
|
4182
|
+
else:
|
4183
|
+
result.append(line)
|
4184
|
+
if continuation:
|
4185
|
+
result.append(s)
|
4186
|
+
return result
|
4187
|
+
|
4188
|
+
class Column_OLD:
|
4189
|
+
"""Table column."""
|
4190
|
+
def __init__(self):
|
4191
|
+
self.colalign = None # 'left','right','center'
|
4192
|
+
self.rulerwidth = None
|
4193
|
+
self.colwidth = None # Output width in page units.
|
4194
|
+
|
4195
|
+
class Table_OLD(AbstractBlock):
|
4196
|
+
COL_STOP = r"(`|'|\.)" # RE.
|
4197
|
+
ALIGNMENTS = {'`':'left', "'":'right', '.':'center'}
|
4198
|
+
FORMATS = ('fixed','csv','dsv')
|
4199
|
+
def __init__(self):
|
4200
|
+
AbstractBlock.__init__(self)
|
4201
|
+
self.CONF_ENTRIES += ('template','fillchar','format','colspec',
|
4202
|
+
'headrow','footrow','bodyrow','headdata',
|
4203
|
+
'footdata', 'bodydata')
|
4204
|
+
# Configuration parameters.
|
4205
|
+
self.fillchar=None
|
4206
|
+
self.format=None # 'fixed','csv','dsv'
|
4207
|
+
self.colspec=None
|
4208
|
+
self.headrow=None
|
4209
|
+
self.footrow=None
|
4210
|
+
self.bodyrow=None
|
4211
|
+
self.headdata=None
|
4212
|
+
self.footdata=None
|
4213
|
+
self.bodydata=None
|
4214
|
+
# Calculated parameters.
|
4215
|
+
self.underline=None # RE matching current table underline.
|
4216
|
+
self.isnumeric=False # True if numeric ruler.
|
4217
|
+
self.tablewidth=None # Optional table width scale factor.
|
4218
|
+
self.columns=[] # List of Columns.
|
4219
|
+
# Other.
|
4220
|
+
self.check_msg='' # Message set by previous self.validate() call.
|
4221
|
+
def load(self,name,entries):
|
4222
|
+
AbstractBlock.load(self,name,entries)
|
4223
|
+
"""Update table definition from section entries in 'entries'."""
|
4224
|
+
for k,v in entries.items():
|
4225
|
+
if k == 'fillchar':
|
4226
|
+
if v and len(v) == 1:
|
4227
|
+
self.fillchar = v
|
4228
|
+
else:
|
4229
|
+
raise EAsciiDoc,'malformed table fillchar: %s' % v
|
4230
|
+
elif k == 'format':
|
4231
|
+
if v in Table_OLD.FORMATS:
|
4232
|
+
self.format = v
|
4233
|
+
else:
|
4234
|
+
raise EAsciiDoc,'illegal table format: %s' % v
|
4235
|
+
elif k == 'colspec':
|
4236
|
+
self.colspec = v
|
4237
|
+
elif k == 'headrow':
|
4238
|
+
self.headrow = v
|
4239
|
+
elif k == 'footrow':
|
4240
|
+
self.footrow = v
|
4241
|
+
elif k == 'bodyrow':
|
4242
|
+
self.bodyrow = v
|
4243
|
+
elif k == 'headdata':
|
4244
|
+
self.headdata = v
|
4245
|
+
elif k == 'footdata':
|
4246
|
+
self.footdata = v
|
4247
|
+
elif k == 'bodydata':
|
4248
|
+
self.bodydata = v
|
4249
|
+
def dump(self):
|
4250
|
+
AbstractBlock.dump(self)
|
4251
|
+
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
|
4252
|
+
write('fillchar='+self.fillchar)
|
4253
|
+
write('format='+self.format)
|
4254
|
+
if self.colspec:
|
4255
|
+
write('colspec='+self.colspec)
|
4256
|
+
if self.headrow:
|
4257
|
+
write('headrow='+self.headrow)
|
4258
|
+
if self.footrow:
|
4259
|
+
write('footrow='+self.footrow)
|
4260
|
+
write('bodyrow='+self.bodyrow)
|
4261
|
+
if self.headdata:
|
4262
|
+
write('headdata='+self.headdata)
|
4263
|
+
if self.footdata:
|
4264
|
+
write('footdata='+self.footdata)
|
4265
|
+
write('bodydata='+self.bodydata)
|
4266
|
+
write('')
|
4267
|
+
def validate(self):
|
4268
|
+
AbstractBlock.validate(self)
|
4269
|
+
"""Check table definition and set self.check_msg if invalid else set
|
4270
|
+
self.check_msg to blank string."""
|
4271
|
+
# Check global table parameters.
|
4272
|
+
if config.textwidth is None:
|
4273
|
+
self.check_msg = 'missing [miscellaneous] textwidth entry'
|
4274
|
+
elif config.pagewidth is None:
|
4275
|
+
self.check_msg = 'missing [miscellaneous] pagewidth entry'
|
4276
|
+
elif config.pageunits is None:
|
4277
|
+
self.check_msg = 'missing [miscellaneous] pageunits entry'
|
4278
|
+
elif self.headrow is None:
|
4279
|
+
self.check_msg = 'missing headrow entry'
|
4280
|
+
elif self.footrow is None:
|
4281
|
+
self.check_msg = 'missing footrow entry'
|
4282
|
+
elif self.bodyrow is None:
|
4283
|
+
self.check_msg = 'missing bodyrow entry'
|
4284
|
+
elif self.headdata is None:
|
4285
|
+
self.check_msg = 'missing headdata entry'
|
4286
|
+
elif self.footdata is None:
|
4287
|
+
self.check_msg = 'missing footdata entry'
|
4288
|
+
elif self.bodydata is None:
|
4289
|
+
self.check_msg = 'missing bodydata entry'
|
4290
|
+
else:
|
4291
|
+
# No errors.
|
4292
|
+
self.check_msg = ''
|
4293
|
+
def isnext(self):
|
4294
|
+
return AbstractBlock.isnext(self)
|
4295
|
+
def parse_ruler(self,ruler):
|
4296
|
+
"""Parse ruler calculating underline and ruler column widths."""
|
4297
|
+
fc = re.escape(self.fillchar)
|
4298
|
+
# Strip and save optional tablewidth from end of ruler.
|
4299
|
+
mo = re.match(r'^(.*'+fc+r'+)([\d\.]+)$',ruler)
|
4300
|
+
if mo:
|
4301
|
+
ruler = mo.group(1)
|
4302
|
+
self.tablewidth = float(mo.group(2))
|
4303
|
+
self.attributes['tablewidth'] = str(float(self.tablewidth))
|
4304
|
+
else:
|
4305
|
+
self.tablewidth = None
|
4306
|
+
self.attributes['tablewidth'] = '100.0'
|
4307
|
+
# Guess whether column widths are specified numerically or not.
|
4308
|
+
if ruler[1] != self.fillchar:
|
4309
|
+
# If the first column does not start with a fillchar then numeric.
|
4310
|
+
self.isnumeric = True
|
4311
|
+
elif ruler[1:] == self.fillchar*len(ruler[1:]):
|
4312
|
+
# The case of one column followed by fillchars is numeric.
|
4313
|
+
self.isnumeric = True
|
4314
|
+
else:
|
4315
|
+
self.isnumeric = False
|
4316
|
+
# Underlines must be 3 or more fillchars.
|
4317
|
+
self.underline = r'^' + fc + r'{3,}$'
|
4318
|
+
splits = re.split(self.COL_STOP,ruler)[1:]
|
4319
|
+
# Build self.columns.
|
4320
|
+
for i in range(0,len(splits),2):
|
4321
|
+
c = Column_OLD()
|
4322
|
+
c.colalign = self.ALIGNMENTS[splits[i]]
|
4323
|
+
s = splits[i+1]
|
4324
|
+
if self.isnumeric:
|
4325
|
+
# Strip trailing fillchars.
|
4326
|
+
s = re.sub(fc+r'+$','',s)
|
4327
|
+
if s == '':
|
4328
|
+
c.rulerwidth = None
|
4329
|
+
else:
|
4330
|
+
c.rulerwidth = int(validate(s,'int($)>0',
|
4331
|
+
'malformed ruler: bad width'))
|
4332
|
+
else: # Calculate column width from inter-fillchar intervals.
|
4333
|
+
if not re.match(r'^'+fc+r'+$',s):
|
4334
|
+
raise EAsciiDoc,'malformed ruler: illegal fillchars'
|
4335
|
+
c.rulerwidth = len(s)+1
|
4336
|
+
self.columns.append(c)
|
4337
|
+
# Fill in unspecified ruler widths.
|
4338
|
+
if self.isnumeric:
|
4339
|
+
if self.columns[0].rulerwidth is None:
|
4340
|
+
prevwidth = 1
|
4341
|
+
for c in self.columns:
|
4342
|
+
if c.rulerwidth is None:
|
4343
|
+
c.rulerwidth = prevwidth
|
4344
|
+
prevwidth = c.rulerwidth
|
4345
|
+
def build_colspecs(self):
|
4346
|
+
"""Generate colwidths and colspecs. This can only be done after the
|
4347
|
+
table arguments have been parsed since we use the table format."""
|
4348
|
+
self.attributes['cols'] = len(self.columns)
|
4349
|
+
# Calculate total ruler width.
|
4350
|
+
totalwidth = 0
|
4351
|
+
for c in self.columns:
|
4352
|
+
totalwidth = totalwidth + c.rulerwidth
|
4353
|
+
if totalwidth <= 0:
|
4354
|
+
raise EAsciiDoc,'zero width table'
|
4355
|
+
# Calculate marked up colwidths from rulerwidths.
|
4356
|
+
for c in self.columns:
|
4357
|
+
# Convert ruler width to output page width.
|
4358
|
+
width = float(c.rulerwidth)
|
4359
|
+
if self.format == 'fixed':
|
4360
|
+
if self.tablewidth is None:
|
4361
|
+
# Size proportional to ruler width.
|
4362
|
+
colfraction = width/config.textwidth
|
4363
|
+
else:
|
4364
|
+
# Size proportional to page width.
|
4365
|
+
colfraction = width/totalwidth
|
4366
|
+
else:
|
4367
|
+
# Size proportional to page width.
|
4368
|
+
colfraction = width/totalwidth
|
4369
|
+
c.colwidth = colfraction * config.pagewidth # To page units.
|
4370
|
+
if self.tablewidth is not None:
|
4371
|
+
c.colwidth = c.colwidth * self.tablewidth # Scale factor.
|
4372
|
+
if self.tablewidth > 1:
|
4373
|
+
c.colwidth = c.colwidth/100 # tablewidth is in percent.
|
4374
|
+
# Build colspecs.
|
4375
|
+
if self.colspec:
|
4376
|
+
cols = []
|
4377
|
+
i = 0
|
4378
|
+
for c in self.columns:
|
4379
|
+
i += 1
|
4380
|
+
self.attributes['colalign'] = c.colalign
|
4381
|
+
self.attributes['colwidth'] = str(int(c.colwidth))
|
4382
|
+
self.attributes['colnumber'] = str(i + 1)
|
4383
|
+
s = subs_attrs(self.colspec,self.attributes)
|
4384
|
+
if not s:
|
4385
|
+
warning('colspec dropped: contains undefined attribute')
|
4386
|
+
else:
|
4387
|
+
cols.append(s)
|
4388
|
+
self.attributes['colspecs'] = writer.newline.join(cols)
|
4389
|
+
def split_rows(self,rows):
|
4390
|
+
"""Return a two item tuple containing a list of lines up to but not
|
4391
|
+
including the next underline (continued lines are joined ) and the
|
4392
|
+
tuple of all lines after the underline."""
|
4393
|
+
reo = re.compile(self.underline)
|
4394
|
+
i = 0
|
4395
|
+
while not reo.match(rows[i]):
|
4396
|
+
i = i+1
|
4397
|
+
if i == 0:
|
4398
|
+
raise EAsciiDoc,'missing table rows'
|
4399
|
+
if i >= len(rows):
|
4400
|
+
raise EAsciiDoc,'closing [%s] underline expected' % self.name
|
4401
|
+
return (join_lines_OLD(rows[:i]), rows[i+1:])
|
4402
|
+
def parse_rows(self, rows, rtag, dtag):
|
4403
|
+
"""Parse rows list using the row and data tags. Returns a substituted
|
4404
|
+
list of output lines."""
|
4405
|
+
result = []
|
4406
|
+
# Source rows are parsed as single block, rather than line by line, to
|
4407
|
+
# allow the CSV reader to handle multi-line rows.
|
4408
|
+
if self.format == 'fixed':
|
4409
|
+
rows = self.parse_fixed(rows)
|
4410
|
+
elif self.format == 'csv':
|
4411
|
+
rows = self.parse_csv(rows)
|
4412
|
+
elif self.format == 'dsv':
|
4413
|
+
rows = self.parse_dsv(rows)
|
4414
|
+
else:
|
4415
|
+
assert True,'illegal table format'
|
4416
|
+
# Substitute and indent all data in all rows.
|
4417
|
+
stag,etag = subs_tag(rtag,self.attributes)
|
4418
|
+
for row in rows:
|
4419
|
+
result.append(' '+stag)
|
4420
|
+
for data in self.subs_row(row,dtag):
|
4421
|
+
result.append(' '+data)
|
4422
|
+
result.append(' '+etag)
|
4423
|
+
return result
|
4424
|
+
def subs_row(self, data, dtag):
|
4425
|
+
"""Substitute the list of source row data elements using the data tag.
|
4426
|
+
Returns a substituted list of output table data items."""
|
4427
|
+
result = []
|
4428
|
+
if len(data) < len(self.columns):
|
4429
|
+
warning('fewer row data items then table columns')
|
4430
|
+
if len(data) > len(self.columns):
|
4431
|
+
warning('more row data items than table columns')
|
4432
|
+
for i in range(len(self.columns)):
|
4433
|
+
if i > len(data) - 1:
|
4434
|
+
d = '' # Fill missing column data with blanks.
|
4435
|
+
else:
|
4436
|
+
d = data[i]
|
4437
|
+
c = self.columns[i]
|
4438
|
+
self.attributes['colalign'] = c.colalign
|
4439
|
+
self.attributes['colwidth'] = str(int(c.colwidth))
|
4440
|
+
self.attributes['colnumber'] = str(i + 1)
|
4441
|
+
stag,etag = subs_tag(dtag,self.attributes)
|
4442
|
+
# Insert AsciiDoc line break (' +') where row data has newlines
|
4443
|
+
# ('\n'). This is really only useful when the table format is csv
|
4444
|
+
# and the output markup is HTML. It's also a bit dubious in that it
|
4445
|
+
# assumes the user has not modified the shipped line break pattern.
|
4446
|
+
subs = self.get_subs()[0]
|
4447
|
+
if 'replacements' in subs:
|
4448
|
+
# Insert line breaks in cell data.
|
4449
|
+
d = re.sub(r'(?m)\n',r' +\n',d)
|
4450
|
+
d = d.split('\n') # So writer.newline is written.
|
4451
|
+
else:
|
4452
|
+
d = [d]
|
4453
|
+
result = result + [stag] + Lex.subs(d,subs) + [etag]
|
4454
|
+
return result
|
4455
|
+
def parse_fixed(self,rows):
|
4456
|
+
"""Parse the list of source table rows. Each row item in the returned
|
4457
|
+
list contains a list of cell data elements."""
|
4458
|
+
result = []
|
4459
|
+
for row in rows:
|
4460
|
+
data = []
|
4461
|
+
start = 0
|
4462
|
+
# build an encoded representation
|
4463
|
+
row = char_decode(row)
|
4464
|
+
for c in self.columns:
|
4465
|
+
end = start + c.rulerwidth
|
4466
|
+
if c is self.columns[-1]:
|
4467
|
+
# Text in last column can continue forever.
|
4468
|
+
# Use the encoded string to slice, but convert back
|
4469
|
+
# to plain string before further processing
|
4470
|
+
data.append(char_encode(row[start:]).strip())
|
4471
|
+
else:
|
4472
|
+
data.append(char_encode(row[start:end]).strip())
|
4473
|
+
start = end
|
4474
|
+
result.append(data)
|
4475
|
+
return result
|
4476
|
+
def parse_csv(self,rows):
|
4477
|
+
"""Parse the list of source table rows. Each row item in the returned
|
4478
|
+
list contains a list of cell data elements."""
|
4479
|
+
import StringIO
|
4480
|
+
import csv
|
4481
|
+
result = []
|
4482
|
+
rdr = csv.reader(StringIO.StringIO('\r\n'.join(rows)),
|
4483
|
+
skipinitialspace=True)
|
4484
|
+
try:
|
4485
|
+
for row in rdr:
|
4486
|
+
result.append(row)
|
4487
|
+
except:
|
4488
|
+
raise EAsciiDoc,'csv parse error: %s' % row
|
4489
|
+
return result
|
4490
|
+
def parse_dsv(self,rows):
|
4491
|
+
"""Parse the list of source table rows. Each row item in the returned
|
4492
|
+
list contains a list of cell data elements."""
|
4493
|
+
separator = self.attributes.get('separator',':')
|
4494
|
+
separator = eval('"'+separator+'"')
|
4495
|
+
if len(separator) != 1:
|
4496
|
+
raise EAsciiDoc,'malformed dsv separator: %s' % separator
|
4497
|
+
# TODO If separator is preceeded by an odd number of backslashes then
|
4498
|
+
# it is escaped and should not delimit.
|
4499
|
+
result = []
|
4500
|
+
for row in rows:
|
4501
|
+
# Skip blank lines
|
4502
|
+
if row == '': continue
|
4503
|
+
# Unescape escaped characters.
|
4504
|
+
row = eval('"'+row.replace('"','\\"')+'"')
|
4505
|
+
data = row.split(separator)
|
4506
|
+
data = [s.strip() for s in data]
|
4507
|
+
result.append(data)
|
4508
|
+
return result
|
4509
|
+
def translate(self):
|
4510
|
+
deprecated('old tables syntax')
|
4511
|
+
AbstractBlock.translate(self)
|
4512
|
+
# Reset instance specific properties.
|
4513
|
+
self.underline = None
|
4514
|
+
self.columns = []
|
4515
|
+
attrs = {}
|
4516
|
+
BlockTitle.consume(attrs)
|
4517
|
+
# Add relevant globals to table substitutions.
|
4518
|
+
attrs['pagewidth'] = str(config.pagewidth)
|
4519
|
+
attrs['pageunits'] = config.pageunits
|
4520
|
+
# Mix in document attribute list.
|
4521
|
+
AttributeList.consume(attrs)
|
4522
|
+
# Validate overridable attributes.
|
4523
|
+
for k,v in attrs.items():
|
4524
|
+
if k == 'format':
|
4525
|
+
if v not in self.FORMATS:
|
4526
|
+
raise EAsciiDoc, 'illegal [%s] %s: %s' % (self.name,k,v)
|
4527
|
+
self.format = v
|
4528
|
+
elif k == 'tablewidth':
|
4529
|
+
try:
|
4530
|
+
self.tablewidth = float(attrs['tablewidth'])
|
4531
|
+
except:
|
4532
|
+
raise EAsciiDoc, 'illegal [%s] %s: %s' % (self.name,k,v)
|
4533
|
+
self.merge_attributes(attrs)
|
4534
|
+
# Parse table ruler.
|
4535
|
+
ruler = reader.read()
|
4536
|
+
assert re.match(self.delimiter,ruler)
|
4537
|
+
self.parse_ruler(ruler)
|
4538
|
+
# Read the entire table.
|
4539
|
+
table = []
|
4540
|
+
while True:
|
4541
|
+
line = reader.read_next()
|
4542
|
+
# Table terminated by underline followed by a blank line or EOF.
|
4543
|
+
if len(table) > 0 and re.match(self.underline,table[-1]):
|
4544
|
+
if line in ('',None):
|
4545
|
+
break;
|
4546
|
+
if line is None:
|
4547
|
+
raise EAsciiDoc,'closing [%s] underline expected' % self.name
|
4548
|
+
table.append(reader.read())
|
4549
|
+
# EXPERIMENTAL: The number of lines in the table, requested by Benjamin Klum.
|
4550
|
+
self.attributes['rows'] = str(len(table))
|
4551
|
+
#TODO: Inherited validate() doesn't set check_msg, needs checking.
|
4552
|
+
if self.check_msg: # Skip if table definition was marked invalid.
|
4553
|
+
warning('skipping %s table: %s' % (self.name,self.check_msg))
|
4554
|
+
return
|
4555
|
+
# Generate colwidths and colspecs.
|
4556
|
+
self.build_colspecs()
|
4557
|
+
# Generate headrows, footrows, bodyrows.
|
4558
|
+
# Headrow, footrow and bodyrow data replaces same named attributes in
|
4559
|
+
# the table markup template. In order to ensure this data does not get
|
4560
|
+
# a second attribute substitution (which would interfere with any
|
4561
|
+
# already substituted inline passthroughs) unique placeholders are used
|
4562
|
+
# (the tab character does not appear elsewhere since it is expanded on
|
4563
|
+
# input) which are replaced after template attribute substitution.
|
4564
|
+
headrows = footrows = []
|
4565
|
+
bodyrows,table = self.split_rows(table)
|
4566
|
+
if table:
|
4567
|
+
headrows = bodyrows
|
4568
|
+
bodyrows,table = self.split_rows(table)
|
4569
|
+
if table:
|
4570
|
+
footrows,table = self.split_rows(table)
|
4571
|
+
if headrows:
|
4572
|
+
headrows = self.parse_rows(headrows, self.headrow, self.headdata)
|
4573
|
+
headrows = writer.newline.join(headrows)
|
4574
|
+
self.attributes['headrows'] = '\theadrows\t'
|
4575
|
+
if footrows:
|
4576
|
+
footrows = self.parse_rows(footrows, self.footrow, self.footdata)
|
4577
|
+
footrows = writer.newline.join(footrows)
|
4578
|
+
self.attributes['footrows'] = '\tfootrows\t'
|
4579
|
+
bodyrows = self.parse_rows(bodyrows, self.bodyrow, self.bodydata)
|
4580
|
+
bodyrows = writer.newline.join(bodyrows)
|
4581
|
+
self.attributes['bodyrows'] = '\tbodyrows\t'
|
4582
|
+
table = subs_attrs(config.sections[self.template],self.attributes)
|
4583
|
+
table = writer.newline.join(table)
|
4584
|
+
# Before we finish replace the table head, foot and body place holders
|
4585
|
+
# with the real data.
|
4586
|
+
if headrows:
|
4587
|
+
table = table.replace('\theadrows\t', headrows, 1)
|
4588
|
+
if footrows:
|
4589
|
+
table = table.replace('\tfootrows\t', footrows, 1)
|
4590
|
+
table = table.replace('\tbodyrows\t', bodyrows, 1)
|
4591
|
+
writer.write(table)
|
4592
|
+
|
4593
|
+
class Tables_OLD(AbstractBlocks):
|
4594
|
+
"""List of tables."""
|
4595
|
+
BLOCK_TYPE = Table_OLD
|
4596
|
+
PREFIX = 'old_tabledef-'
|
4597
|
+
def __init__(self):
|
4598
|
+
AbstractBlocks.__init__(self)
|
4599
|
+
def load(self,sections):
|
4600
|
+
AbstractBlocks.load(self,sections)
|
4601
|
+
def validate(self):
|
4602
|
+
# Does not call AbstractBlocks.validate().
|
4603
|
+
# Check we have a default table definition,
|
4604
|
+
for i in range(len(self.blocks)):
|
4605
|
+
if self.blocks[i].name == 'old_tabledef-default':
|
4606
|
+
default = self.blocks[i]
|
4607
|
+
break
|
4608
|
+
else:
|
4609
|
+
raise EAsciiDoc,'missing [OLD_tabledef-default] section'
|
4610
|
+
# Set default table defaults.
|
4611
|
+
if default.format is None: default.subs = 'fixed'
|
4612
|
+
# Propagate defaults to unspecified table parameters.
|
4613
|
+
for b in self.blocks:
|
4614
|
+
if b is not default:
|
4615
|
+
if b.fillchar is None: b.fillchar = default.fillchar
|
4616
|
+
if b.format is None: b.format = default.format
|
4617
|
+
if b.template is None: b.template = default.template
|
4618
|
+
if b.colspec is None: b.colspec = default.colspec
|
4619
|
+
if b.headrow is None: b.headrow = default.headrow
|
4620
|
+
if b.footrow is None: b.footrow = default.footrow
|
4621
|
+
if b.bodyrow is None: b.bodyrow = default.bodyrow
|
4622
|
+
if b.headdata is None: b.headdata = default.headdata
|
4623
|
+
if b.footdata is None: b.footdata = default.footdata
|
4624
|
+
if b.bodydata is None: b.bodydata = default.bodydata
|
4625
|
+
# Check all tables have valid fill character.
|
4626
|
+
for b in self.blocks:
|
4627
|
+
if not b.fillchar or len(b.fillchar) != 1:
|
4628
|
+
raise EAsciiDoc,'[%s] missing or illegal fillchar' % b.name
|
4629
|
+
# Build combined tables delimiter patterns and assign defaults.
|
4630
|
+
delimiters = []
|
4631
|
+
for b in self.blocks:
|
4632
|
+
# Ruler is:
|
4633
|
+
# (ColStop,(ColWidth,FillChar+)?)+, FillChar+, TableWidth?
|
4634
|
+
b.delimiter = r'^(' + Table_OLD.COL_STOP \
|
4635
|
+
+ r'(\d*|' + re.escape(b.fillchar) + r'*)' \
|
4636
|
+
+ r')+' \
|
4637
|
+
+ re.escape(b.fillchar) + r'+' \
|
4638
|
+
+ '([\d\.]*)$'
|
4639
|
+
delimiters.append(b.delimiter)
|
4640
|
+
if not b.headrow:
|
4641
|
+
b.headrow = b.bodyrow
|
4642
|
+
if not b.footrow:
|
4643
|
+
b.footrow = b.bodyrow
|
4644
|
+
if not b.headdata:
|
4645
|
+
b.headdata = b.bodydata
|
4646
|
+
if not b.footdata:
|
4647
|
+
b.footdata = b.bodydata
|
4648
|
+
self.delimiter = join_regexp(delimiters)
|
4649
|
+
# Check table definitions are valid.
|
4650
|
+
for b in self.blocks:
|
4651
|
+
b.validate()
|
4652
|
+
if config.verbose:
|
4653
|
+
if b.check_msg:
|
4654
|
+
warning('[%s] table definition: %s' % (b.name,b.check_msg))
|
4655
|
+
|
4656
|
+
# End of deprecated old table classes.
|
4657
|
+
#---------------------------------------------------------------------------
|
4658
|
+
|
3973
4659
|
#---------------------------------------------------------------------------
|
3974
4660
|
# Application code.
|
3975
4661
|
#---------------------------------------------------------------------------
|
3976
4662
|
# Constants
|
3977
4663
|
# ---------
|
4664
|
+
APP_FILE = None # This file's full path.
|
3978
4665
|
APP_DIR = None # This file's directory.
|
3979
4666
|
USER_DIR = None # ~/.asciidoc
|
3980
|
-
|
4667
|
+
# Global configuration files directory (set by Makefile build target).
|
4668
|
+
CONF_DIR = '/etc/asciidoc'
|
3981
4669
|
HELP_FILE = 'help.conf' # Default (English) help file.
|
3982
4670
|
|
3983
4671
|
# Globals
|
@@ -3989,6 +4677,7 @@ writer = Writer() # Output stream line writer.
|
|
3989
4677
|
paragraphs = Paragraphs() # Paragraph definitions.
|
3990
4678
|
lists = Lists() # List definitions.
|
3991
4679
|
blocks = DelimitedBlocks() # DelimitedBlock definitions.
|
4680
|
+
tables_OLD = Tables_OLD() # Table_OLD definitions.
|
3992
4681
|
tables = Tables() # Table definitions.
|
3993
4682
|
macros = Macros() # Macro definitions.
|
3994
4683
|
calloutmap = CalloutMap() # Coordinates callouts and callout list.
|
@@ -4031,13 +4720,13 @@ def asciidoc(backend, doctype, confiles, infile, outfile, options):
|
|
4031
4720
|
config.load_all(os.path.dirname(infile))
|
4032
4721
|
if infile != '<stdin>':
|
4033
4722
|
# Load implicit document specific configuration files if they exist.
|
4034
|
-
config.
|
4035
|
-
config.
|
4723
|
+
config.load_file(os.path.splitext(infile)[0] + '.conf')
|
4724
|
+
config.load_file(os.path.splitext(infile)[0] + '-' + backend + '.conf')
|
4036
4725
|
# If user specified configuration file(s) overlay the defaults.
|
4037
4726
|
if confiles:
|
4038
4727
|
for conf in confiles:
|
4039
4728
|
if os.path.isfile(conf):
|
4040
|
-
config.
|
4729
|
+
config.load_file(conf)
|
4041
4730
|
else:
|
4042
4731
|
raise EAsciiDoc,'configuration file %s missing' % conf
|
4043
4732
|
document.init_attrs() # Add conf files.
|
@@ -4131,8 +4820,9 @@ def main():
|
|
4131
4820
|
print_stderr('FAILED: Python 2.3 or better required.')
|
4132
4821
|
sys.exit(1)
|
4133
4822
|
# Locate the executable and configuration files directory.
|
4134
|
-
global APP_DIR,USER_DIR
|
4135
|
-
|
4823
|
+
global APP_FILE,APP_DIR,USER_DIR
|
4824
|
+
APP_FILE = os.path.realpath(sys.argv[0])
|
4825
|
+
APP_DIR = os.path.dirname(APP_FILE)
|
4136
4826
|
USER_DIR = os.environ.get('HOME')
|
4137
4827
|
if USER_DIR is not None:
|
4138
4828
|
USER_DIR = os.path.join(USER_DIR,'.asciidoc')
|