mizuho 0.9.6 → 0.9.8
Sign up to get free protection for your applications and to get access to all the features.
- data/README.markdown +4 -2
- data/Rakefile +0 -37
- data/asciidoc/BUGS +9 -18
- data/asciidoc/BUGS.txt +5 -14
- data/asciidoc/CHANGELOG +1433 -266
- data/asciidoc/CHANGELOG.txt +2821 -1810
- data/asciidoc/COPYING +339 -339
- data/asciidoc/COPYRIGHT +18 -18
- data/asciidoc/INSTALL +189 -30
- data/asciidoc/INSTALL.txt +175 -19
- data/asciidoc/MANIFEST +88 -0
- data/asciidoc/Makefile.in +29 -13
- data/asciidoc/README +16 -17
- data/asciidoc/README.txt +35 -36
- data/asciidoc/a2x.py +902 -0
- data/asciidoc/asciidoc.conf +610 -455
- data/asciidoc/asciidoc.py +1965 -1061
- data/asciidoc/asciidocapi.py +257 -0
- data/asciidoc/common.aap +2 -2
- data/asciidoc/configure +1170 -1023
- data/asciidoc/configure.ac +1 -1
- data/asciidoc/dblatex/asciidoc-dblatex.xsl +42 -0
- data/asciidoc/doc/a2x.1 +559 -102
- data/asciidoc/doc/a2x.1.txt +233 -91
- data/asciidoc/doc/article-docinfo.xml +87 -0
- data/asciidoc/doc/article.pdf +0 -0
- data/asciidoc/doc/article.txt +139 -122
- data/asciidoc/doc/asciidoc.1 +157 -46
- data/asciidoc/doc/asciidoc.1.txt +171 -118
- data/asciidoc/doc/asciidoc.conf +7 -8
- data/asciidoc/doc/asciidoc.dict +960 -635
- data/asciidoc/doc/asciidoc.txt +2335 -1464
- data/asciidoc/doc/asciidocapi.txt +189 -0
- data/asciidoc/doc/asciimathml.txt +14 -17
- data/asciidoc/doc/book-multi.txt +181 -155
- data/asciidoc/doc/book.epub +0 -0
- data/asciidoc/doc/book.txt +156 -131
- data/asciidoc/doc/customers.csv +18 -18
- data/asciidoc/doc/epub-notes.txt +210 -0
- data/asciidoc/doc/faq.txt +1122 -547
- data/asciidoc/doc/latex-backend.txt +192 -191
- data/asciidoc/doc/latex-bugs.txt +134 -0
- data/asciidoc/doc/latex-filter.pdf +0 -0
- data/asciidoc/doc/latex-filter.txt +196 -0
- data/asciidoc/doc/latexmath.txt +13 -136
- data/asciidoc/doc/latexmathml.txt +2 -2
- data/asciidoc/doc/main.aap +233 -297
- data/asciidoc/doc/music-filter.pdf +0 -0
- data/asciidoc/doc/music-filter.txt +55 -65
- data/asciidoc/doc/publishing-ebooks-with-asciidoc.txt +398 -0
- data/asciidoc/doc/slidy-example.txt +167 -0
- data/asciidoc/doc/slidy.txt +113 -0
- data/asciidoc/doc/source-highlight-filter.pdf +0 -0
- data/asciidoc/doc/source-highlight-filter.txt +45 -20
- data/asciidoc/doc/testasciidoc.txt +231 -0
- data/asciidoc/docbook-xsl/asciidoc-docbook-xsl.txt +30 -8
- data/asciidoc/docbook-xsl/chunked.xsl +17 -19
- data/asciidoc/docbook-xsl/common.xsl +106 -67
- data/asciidoc/docbook-xsl/epub.xsl +35 -0
- data/asciidoc/docbook-xsl/fo.xsl +3 -0
- data/asciidoc/docbook-xsl/htmlhelp.xsl +16 -17
- data/asciidoc/docbook-xsl/manpage.xsl +31 -31
- data/asciidoc/docbook-xsl/text.xsl +6 -1
- data/asciidoc/docbook-xsl/xhtml.xsl +14 -14
- data/asciidoc/docbook45.conf +759 -0
- data/asciidoc/filters/code/code-filter-readme.txt +37 -37
- data/asciidoc/filters/code/code-filter-test.txt +15 -15
- data/asciidoc/filters/code/code-filter.conf +8 -8
- data/asciidoc/filters/graphviz/asciidoc-graphviz-sample.txt +104 -64
- data/asciidoc/filters/graphviz/graphviz-filter.conf +20 -16
- data/asciidoc/filters/graphviz/graphviz2png.py +54 -39
- data/asciidoc/filters/latex/latex-filter.conf +18 -0
- data/asciidoc/filters/latex/latex2png.py +216 -0
- data/asciidoc/filters/music/music-filter.conf +8 -17
- data/asciidoc/filters/music/music2png.py +40 -32
- data/asciidoc/filters/source/source-highlight-filter.conf +40 -34
- data/asciidoc/help.conf +261 -96
- data/asciidoc/html4.conf +505 -460
- data/asciidoc/html5.conf +686 -0
- data/asciidoc/images/highlighter.png +0 -0
- data/asciidoc/images/icons/caution.png +0 -0
- data/asciidoc/images/icons/example.png +0 -0
- data/asciidoc/images/icons/important.png +0 -0
- data/asciidoc/images/icons/note.png +0 -0
- data/asciidoc/images/icons/tip.png +0 -0
- data/asciidoc/images/icons/warning.png +0 -0
- data/asciidoc/images/smallnew.png +0 -0
- data/asciidoc/javascripts/asciidoc.js +189 -0
- data/asciidoc/javascripts/slidy.js +2845 -0
- data/asciidoc/javascripts/toc.js +8 -0
- data/asciidoc/lang-de.conf +57 -0
- data/asciidoc/lang-en.conf +54 -0
- data/asciidoc/lang-es.conf +49 -9
- data/asciidoc/lang-fr.conf +59 -0
- data/asciidoc/lang-hu.conf +55 -0
- data/asciidoc/lang-it.conf +55 -0
- data/asciidoc/lang-nl.conf +57 -0
- data/asciidoc/lang-pt-BR.conf +56 -0
- data/asciidoc/lang-ru.conf +60 -0
- data/asciidoc/lang-uk.conf +60 -0
- data/asciidoc/latex.conf +700 -663
- data/asciidoc/main.aap +77 -0
- data/asciidoc/slidy.conf +136 -0
- data/asciidoc/{examples/website/xhtml11-manpage.css → stylesheets/asciidoc-manpage.css} +1 -1
- data/asciidoc/stylesheets/asciidoc.css +508 -0
- data/asciidoc/stylesheets/docbook-xsl.css +322 -272
- data/asciidoc/stylesheets/flask-manpage.css +1 -0
- data/asciidoc/stylesheets/flask.css +584 -0
- data/asciidoc/stylesheets/pygments.css +66 -0
- data/asciidoc/stylesheets/slidy.css +445 -0
- data/asciidoc/stylesheets/toc2.css +33 -0
- data/asciidoc/stylesheets/volnitsky-manpage.css +1 -0
- data/asciidoc/stylesheets/volnitsky.css +435 -0
- data/asciidoc/stylesheets/xhtml11-quirks.css +5 -3
- data/asciidoc/tests/asciidocapi.py +257 -0
- data/asciidoc/tests/data/deprecated-quotes.txt +12 -0
- data/asciidoc/tests/data/filters-test.txt +90 -0
- data/asciidoc/tests/data/lang-de-test.txt +106 -0
- data/asciidoc/tests/data/lang-en-test.txt +114 -0
- data/asciidoc/tests/data/lang-es-test.txt +106 -0
- data/asciidoc/tests/data/lang-fr-test.txt +106 -0
- data/asciidoc/tests/data/lang-hu-test.txt +106 -0
- data/asciidoc/tests/data/lang-nl-test.txt +94 -0
- data/asciidoc/tests/data/lang-pt-BR-test.txt +106 -0
- data/asciidoc/tests/data/lang-ru-test.txt +106 -0
- data/asciidoc/tests/data/lang-uk-test.txt +106 -0
- data/asciidoc/tests/data/oldtables.txt +64 -0
- data/asciidoc/tests/data/rcs-id-marker-test.txt +6 -0
- data/asciidoc/tests/data/testcases.conf +2 -0
- data/asciidoc/tests/data/testcases.txt +740 -0
- data/asciidoc/tests/data/utf8-bom-test.txt +9 -0
- data/asciidoc/tests/data/utf8-examples.txt +217 -0
- data/asciidoc/tests/testasciidoc.conf +520 -0
- data/asciidoc/tests/testasciidoc.py +411 -0
- data/asciidoc/text.conf +16 -16
- data/asciidoc/vim/syntax/asciidoc.vim +99 -91
- data/asciidoc/wordpress.conf +43 -3
- data/asciidoc/xhtml11-quirks.conf +61 -57
- data/asciidoc/xhtml11.conf +684 -645
- data/lib/mizuho.rb +2 -0
- data/lib/mizuho/generator.rb +2 -2
- data/test/generator_spec.rb +1 -1
- data/test/parser_spec.rb +1 -1
- data/test/spec_helper.rb +3 -4
- metadata +87 -88
- data/asciidoc/a2x +0 -674
- data/asciidoc/doc/article.css-embedded.html +0 -602
- data/asciidoc/doc/article.html +0 -46
- data/asciidoc/doc/asciidoc-revhistory.xml +0 -27
- data/asciidoc/doc/asciidoc.1.css-embedded.html +0 -598
- data/asciidoc/doc/asciidoc.1.css.html +0 -212
- data/asciidoc/doc/asciidoc.1.html +0 -190
- data/asciidoc/doc/asciidoc.css-embedded.html +0 -7853
- data/asciidoc/doc/asciidoc.css.html +0 -7416
- data/asciidoc/doc/asciidoc.html +0 -3339
- data/asciidoc/doc/book-multi.css-embedded.html +0 -575
- data/asciidoc/doc/book-multi.html +0 -55
- data/asciidoc/doc/book.css-embedded.html +0 -607
- data/asciidoc/doc/book.html +0 -43
- data/asciidoc/doc/docbook-xsl.css +0 -272
- data/asciidoc/doc/latex-backend.html +0 -117
- data/asciidoc/doc/music-filter.html +0 -566
- data/asciidoc/doc/source-highlight-filter.html +0 -214
- data/asciidoc/docbook.conf +0 -721
- data/asciidoc/examples/website/ASCIIMathML.js +0 -938
- data/asciidoc/examples/website/CHANGELOG.html +0 -4389
- data/asciidoc/examples/website/CHANGELOG.txt +0 -1810
- data/asciidoc/examples/website/INSTALL.html +0 -161
- data/asciidoc/examples/website/INSTALL.txt +0 -71
- data/asciidoc/examples/website/LaTeXMathML.js +0 -1223
- data/asciidoc/examples/website/README-website.html +0 -118
- data/asciidoc/examples/website/README-website.txt +0 -29
- data/asciidoc/examples/website/README.html +0 -125
- data/asciidoc/examples/website/README.txt +0 -36
- data/asciidoc/examples/website/a2x.1.html +0 -419
- data/asciidoc/examples/website/a2x.1.txt +0 -204
- data/asciidoc/examples/website/asciidoc-docbook-xsl.html +0 -130
- data/asciidoc/examples/website/asciidoc-docbook-xsl.txt +0 -43
- data/asciidoc/examples/website/asciidoc-graphviz-sample.txt +0 -130
- data/asciidoc/examples/website/asciimathml.txt +0 -64
- data/asciidoc/examples/website/build-website.sh +0 -25
- data/asciidoc/examples/website/customers.csv +0 -18
- data/asciidoc/examples/website/downloads.html +0 -257
- data/asciidoc/examples/website/downloads.txt +0 -121
- data/asciidoc/examples/website/faq.html +0 -673
- data/asciidoc/examples/website/faq.txt +0 -547
- data/asciidoc/examples/website/index.html +0 -419
- data/asciidoc/examples/website/index.txt +0 -245
- data/asciidoc/examples/website/latex-backend.html +0 -535
- data/asciidoc/examples/website/latex-backend.txt +0 -191
- data/asciidoc/examples/website/latexmathml.txt +0 -41
- data/asciidoc/examples/website/layout1.conf +0 -161
- data/asciidoc/examples/website/layout1.css +0 -65
- data/asciidoc/examples/website/layout2.conf +0 -158
- data/asciidoc/examples/website/layout2.css +0 -93
- data/asciidoc/examples/website/manpage.html +0 -266
- data/asciidoc/examples/website/manpage.txt +0 -118
- data/asciidoc/examples/website/music-filter.html +0 -242
- data/asciidoc/examples/website/music-filter.txt +0 -158
- data/asciidoc/examples/website/music1.abc +0 -12
- data/asciidoc/examples/website/music1.png +0 -0
- data/asciidoc/examples/website/music2.ly +0 -9
- data/asciidoc/examples/website/music2.png +0 -0
- data/asciidoc/examples/website/newlists.txt +0 -40
- data/asciidoc/examples/website/newtables.txt +0 -397
- data/asciidoc/examples/website/sample1.png +0 -0
- data/asciidoc/examples/website/sample3.png +0 -0
- data/asciidoc/examples/website/sample4.png +0 -0
- data/asciidoc/examples/website/source-highlight-filter.html +0 -286
- data/asciidoc/examples/website/source-highlight-filter.txt +0 -203
- data/asciidoc/examples/website/support.html +0 -78
- data/asciidoc/examples/website/support.txt +0 -5
- data/asciidoc/examples/website/toc.js +0 -69
- data/asciidoc/examples/website/userguide.html +0 -7460
- data/asciidoc/examples/website/userguide.txt +0 -4979
- data/asciidoc/examples/website/version83.txt +0 -37
- data/asciidoc/examples/website/version9.html +0 -143
- data/asciidoc/examples/website/version9.txt +0 -48
- data/asciidoc/examples/website/xhtml11-quirks.css +0 -41
- data/asciidoc/examples/website/xhtml11.css +0 -328
- data/asciidoc/stylesheets/xhtml11-manpage.css +0 -18
- data/asciidoc/stylesheets/xhtml11.css +0 -328
data/asciidoc/asciidoc.py
CHANGED
@@ -1,19 +1,22 @@
|
|
1
1
|
#!/usr/bin/env python
|
2
2
|
"""
|
3
|
-
asciidoc - converts an AsciiDoc text file to
|
3
|
+
asciidoc - converts an AsciiDoc text file to HTML or DocBook
|
4
4
|
|
5
|
-
Copyright (C) 2002-
|
5
|
+
Copyright (C) 2002-2010 Stuart Rackham. Free use of this software is granted
|
6
6
|
under the terms of the GNU General Public License (GPL).
|
7
7
|
"""
|
8
8
|
|
9
|
-
import sys, os, re, time, traceback, tempfile, subprocess, codecs, locale
|
9
|
+
import sys, os, re, time, traceback, tempfile, subprocess, codecs, locale, unicodedata
|
10
10
|
|
11
|
-
|
11
|
+
### Used by asciidocapi.py ###
|
12
|
+
VERSION = '8.6.5' # See CHANGLOG file for version history.
|
13
|
+
|
14
|
+
MIN_PYTHON_VERSION = 2.4 # Require this version of Python or better.
|
12
15
|
|
13
16
|
#---------------------------------------------------------------------------
|
14
|
-
# Program
|
17
|
+
# Program constants.
|
15
18
|
#---------------------------------------------------------------------------
|
16
|
-
DEFAULT_BACKEND = '
|
19
|
+
DEFAULT_BACKEND = 'html'
|
17
20
|
DEFAULT_DOCTYPE = 'article'
|
18
21
|
# Allowed substitution options for List, Paragraph and DelimitedBlock
|
19
22
|
# definition subs entry.
|
@@ -22,10 +25,11 @@ SUBS_OPTIONS = ('specialcharacters','quotes','specialwords',
|
|
22
25
|
'none','replacements2')
|
23
26
|
# Default value for unspecified subs and presubs configuration file entries.
|
24
27
|
SUBS_NORMAL = ('specialcharacters','quotes','attributes',
|
25
|
-
'specialwords','replacements','macros')
|
28
|
+
'specialwords','replacements','macros','replacements2')
|
26
29
|
SUBS_VERBATIM = ('specialcharacters','callouts')
|
27
30
|
|
28
|
-
NAME_RE = r'(?u)[^\W\d][-\w]*' # Valid section or
|
31
|
+
NAME_RE = r'(?u)[^\W\d][-\w]*' # Valid section or attribute name.
|
32
|
+
OR, AND = ',', '+' # Attribute list separators.
|
29
33
|
|
30
34
|
|
31
35
|
#---------------------------------------------------------------------------
|
@@ -89,7 +93,7 @@ class AttrDict(dict):
|
|
89
93
|
"""
|
90
94
|
def __getattr__(self, key):
|
91
95
|
try: return self[key]
|
92
|
-
except KeyError
|
96
|
+
except KeyError: return None
|
93
97
|
def __setattr__(self, key, value):
|
94
98
|
self[key] = value
|
95
99
|
def __delattr__(self, key):
|
@@ -102,43 +106,138 @@ class AttrDict(dict):
|
|
102
106
|
def __setstate__(self,value):
|
103
107
|
for k,v in value.items(): self[k]=v
|
104
108
|
|
105
|
-
|
106
|
-
|
109
|
+
class InsensitiveDict(dict):
|
110
|
+
"""
|
111
|
+
Like a dictionary except key access is case insensitive.
|
112
|
+
Keys are stored in lower case.
|
113
|
+
"""
|
114
|
+
def __getitem__(self, key):
|
115
|
+
return dict.__getitem__(self, key.lower())
|
116
|
+
def __setitem__(self, key, value):
|
117
|
+
dict.__setitem__(self, key.lower(), value)
|
118
|
+
def has_key(self, key):
|
119
|
+
return dict.has_key(self,key.lower())
|
120
|
+
def get(self, key, default=None):
|
121
|
+
return dict.get(self, key.lower(), default)
|
122
|
+
def update(self, dict):
|
123
|
+
for k,v in dict.items():
|
124
|
+
self[k] = v
|
125
|
+
def setdefault(self, key, default = None):
|
126
|
+
return dict.setdefault(self, key.lower(), default)
|
127
|
+
|
128
|
+
|
129
|
+
class Trace(object):
|
130
|
+
"""
|
131
|
+
Used in conjunction with the 'trace' attribute to generate diagnostic
|
132
|
+
output. There is a single global instance of this class named trace.
|
133
|
+
"""
|
134
|
+
SUBS_NAMES = ('specialcharacters','quotes','specialwords',
|
135
|
+
'replacements', 'attributes','macros','callouts',
|
136
|
+
'replacements2')
|
137
|
+
def __init__(self):
|
138
|
+
self.name_re = '' # Regexp pattern to match trace names.
|
139
|
+
self.linenos = True
|
140
|
+
self.offset = 0
|
141
|
+
def __call__(self, name, before, after=None):
|
142
|
+
"""
|
143
|
+
Print trace message if tracing is on and the trace 'name' matches the
|
144
|
+
document 'trace' attribute (treated as a regexp).
|
145
|
+
'before' is the source text before substitution; 'after' text is the
|
146
|
+
source text after substitutuion.
|
147
|
+
The 'before' and 'after' messages are only printed if they differ.
|
148
|
+
"""
|
149
|
+
name_re = document.attributes.get('trace')
|
150
|
+
if name_re == 'subs': # Alias for all the inline substitutions.
|
151
|
+
name_re = '|'.join(self.SUBS_NAMES)
|
152
|
+
self.name_re = name_re
|
153
|
+
if self.name_re is not None:
|
154
|
+
msg = message.format(name, 'TRACE: ', self.linenos, offset=self.offset)
|
155
|
+
if before != after and re.match(self.name_re,name):
|
156
|
+
if is_array(before):
|
157
|
+
before = '\n'.join(before)
|
158
|
+
if after is None:
|
159
|
+
msg += '\n%s\n' % before
|
160
|
+
else:
|
161
|
+
if is_array(after):
|
162
|
+
after = '\n'.join(after)
|
163
|
+
msg += '\n<<<\n%s\n>>>\n%s\n' % (before,after)
|
164
|
+
message.stderr(msg)
|
107
165
|
|
108
|
-
|
109
|
-
|
110
|
-
|
166
|
+
class Message:
|
167
|
+
"""
|
168
|
+
Message functions.
|
169
|
+
"""
|
170
|
+
PROG = os.path.basename(os.path.splitext(__file__)[0])
|
171
|
+
|
172
|
+
def __init__(self):
|
173
|
+
# Set to True or False to globally override line numbers method
|
174
|
+
# argument. Has no effect when set to None.
|
175
|
+
self.linenos = None
|
176
|
+
self.messages = []
|
177
|
+
|
178
|
+
def stdout(self,msg):
|
179
|
+
print msg
|
180
|
+
|
181
|
+
def stderr(self,msg=''):
|
182
|
+
self.messages.append(msg)
|
183
|
+
if __name__ == '__main__':
|
184
|
+
sys.stderr.write('%s: %s%s' % (self.PROG, msg, os.linesep))
|
185
|
+
|
186
|
+
def verbose(self, msg,linenos=True):
|
187
|
+
if config.verbose:
|
188
|
+
msg = self.format(msg,linenos=linenos)
|
189
|
+
self.stderr(msg)
|
190
|
+
|
191
|
+
def warning(self, msg,linenos=True,offset=0):
|
192
|
+
msg = self.format(msg,'WARNING: ',linenos,offset=offset)
|
193
|
+
document.has_warnings = True
|
194
|
+
self.stderr(msg)
|
195
|
+
|
196
|
+
def deprecated(self, msg, linenos=True):
|
197
|
+
msg = self.format(msg, 'DEPRECATED: ', linenos)
|
198
|
+
self.stderr(msg)
|
199
|
+
|
200
|
+
def format(self, msg, prefix='', linenos=True, cursor=None, offset=0):
|
201
|
+
"""Return formatted message string."""
|
202
|
+
if self.linenos is not False and ((linenos or self.linenos) and reader.cursor):
|
203
|
+
if cursor is None:
|
204
|
+
cursor = reader.cursor
|
205
|
+
prefix += '%s: line %d: ' % (os.path.basename(cursor[0]),cursor[1]+offset)
|
206
|
+
return prefix + msg
|
111
207
|
|
112
|
-
def
|
113
|
-
|
114
|
-
|
208
|
+
def error(self, msg, cursor=None, halt=False):
|
209
|
+
"""
|
210
|
+
Report fatal error.
|
211
|
+
If halt=True raise EAsciiDoc exception.
|
212
|
+
If halt=False don't exit application, continue in the hope of reporting
|
213
|
+
all fatal errors finishing with a non-zero exit code.
|
214
|
+
"""
|
215
|
+
if halt:
|
216
|
+
raise EAsciiDoc, self.format(msg,linenos=False,cursor=cursor)
|
217
|
+
else:
|
218
|
+
msg = self.format(msg,'ERROR: ',cursor=cursor)
|
219
|
+
self.stderr(msg)
|
220
|
+
document.has_errors = True
|
115
221
|
|
116
|
-
def
|
117
|
-
|
222
|
+
def unsafe(self, msg):
|
223
|
+
self.error('unsafe: '+msg)
|
118
224
|
|
119
|
-
def message(msg, prefix='', linenos=True, cursor=None, offset=0):
|
120
|
-
"""Return formatted message string."""
|
121
|
-
if linenos and reader.cursor:
|
122
|
-
if not cursor:
|
123
|
-
cursor = reader.cursor
|
124
|
-
prefix += '%s: line %d: ' % (os.path.basename(cursor[0]),cursor[1]+offset)
|
125
|
-
return prefix + msg
|
126
225
|
|
127
|
-
def
|
226
|
+
def userdir():
|
128
227
|
"""
|
129
|
-
|
130
|
-
If halt=True raise EAsciiDoc exception.
|
131
|
-
If halt=False don't exit application, continue in the hope of reporting all
|
132
|
-
fatal errors finishing with a non-zero exit code.
|
228
|
+
Return user's home directory or None if it is not defined.
|
133
229
|
"""
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
document.has_errors = True
|
230
|
+
result = os.path.expanduser('~')
|
231
|
+
if result == '~':
|
232
|
+
result = None
|
233
|
+
return result
|
139
234
|
|
140
|
-
def
|
141
|
-
|
235
|
+
def localapp():
|
236
|
+
"""
|
237
|
+
Return True if we are not executing the system wide version
|
238
|
+
i.e. the configuration is in the executable's directory.
|
239
|
+
"""
|
240
|
+
return os.path.isfile(os.path.join(APP_DIR, 'asciidoc.conf'))
|
142
241
|
|
143
242
|
def file_in(fname, directory):
|
144
243
|
"""Return True if file fname resides inside directory."""
|
@@ -164,7 +263,12 @@ def is_safe_file(fname, directory=None):
|
|
164
263
|
directory = os.path.dirname(document.infile)
|
165
264
|
elif directory == '':
|
166
265
|
directory = '.'
|
167
|
-
return
|
266
|
+
return (
|
267
|
+
not safe()
|
268
|
+
or file_in(fname, directory)
|
269
|
+
or file_in(fname, APP_DIR)
|
270
|
+
or file_in(fname, CONF_DIR)
|
271
|
+
)
|
168
272
|
|
169
273
|
def safe_filename(fname, parentdir):
|
170
274
|
"""
|
@@ -174,18 +278,15 @@ def safe_filename(fname, parentdir):
|
|
174
278
|
if not os.path.isabs(fname):
|
175
279
|
# Include files are relative to parent document
|
176
280
|
# directory.
|
177
|
-
fname = os.path.join(parentdir,fname)
|
281
|
+
fname = os.path.normpath(os.path.join(parentdir,fname))
|
178
282
|
if not os.path.isfile(fname):
|
179
|
-
warning('include file not found: %s' % fname)
|
283
|
+
message.warning('include file not found: %s' % fname)
|
180
284
|
return None
|
181
285
|
if not is_safe_file(fname, parentdir):
|
182
|
-
|
286
|
+
message.unsafe('include file: %s' % fname)
|
183
287
|
return None
|
184
288
|
return fname
|
185
289
|
|
186
|
-
def unsafe_error(msg):
|
187
|
-
error('unsafe: '+msg)
|
188
|
-
|
189
290
|
def assign(dst,src):
|
190
291
|
"""Assign all attributes from 'src' object to 'dst' object."""
|
191
292
|
for a,v in src.__dict__.items():
|
@@ -199,13 +300,13 @@ def strip_quotes(s):
|
|
199
300
|
s = s[1:-1]
|
200
301
|
return s
|
201
302
|
|
202
|
-
def
|
303
|
+
def is_re(s):
|
203
304
|
"""Return True if s is a valid regular expression else return False."""
|
204
305
|
try: re.compile(s)
|
205
306
|
except: return False
|
206
307
|
else: return True
|
207
308
|
|
208
|
-
def
|
309
|
+
def re_join(relist):
|
209
310
|
"""Join list of regular expressions re1,re2,... to single regular
|
210
311
|
expression (re1)|(re2)|..."""
|
211
312
|
if len(relist) == 0:
|
@@ -224,7 +325,7 @@ def validate(value,rule,errmsg):
|
|
224
325
|
try:
|
225
326
|
if not eval(rule.replace('$',str(value))):
|
226
327
|
raise EAsciiDoc,errmsg
|
227
|
-
except:
|
328
|
+
except Exception:
|
228
329
|
raise EAsciiDoc,errmsg
|
229
330
|
return value
|
230
331
|
|
@@ -318,15 +419,15 @@ def parse_attributes(attrs,dict):
|
|
318
419
|
# Attributes must evaluate to strings, numbers or None.
|
319
420
|
for v in d.values():
|
320
421
|
if not (isinstance(v,str) or isinstance(v,int) or isinstance(v,float) or v is None):
|
321
|
-
raise
|
322
|
-
except:
|
422
|
+
raise Exception
|
423
|
+
except Exception:
|
323
424
|
s = s.replace('"','\\"')
|
324
425
|
s = s.split(',')
|
325
426
|
s = map(lambda x: '"' + x.strip() + '"', s)
|
326
427
|
s = ','.join(s)
|
327
428
|
try:
|
328
429
|
d = eval('f('+s+')')
|
329
|
-
except:
|
430
|
+
except Exception:
|
330
431
|
return # If there's a syntax error leave with {0}=attrs.
|
331
432
|
for k in d.keys(): # Drop any empty positional arguments.
|
332
433
|
if d[k] == '': del d[k]
|
@@ -346,7 +447,7 @@ def parse_named_attributes(s,attrs):
|
|
346
447
|
d = eval('f('+s+')')
|
347
448
|
attrs.update(d)
|
348
449
|
return True
|
349
|
-
except:
|
450
|
+
except Exception:
|
350
451
|
return False
|
351
452
|
|
352
453
|
def parse_list(s):
|
@@ -354,7 +455,7 @@ def parse_list(s):
|
|
354
455
|
parsed values."""
|
355
456
|
try:
|
356
457
|
result = eval('tuple(['+s+'])')
|
357
|
-
except:
|
458
|
+
except Exception:
|
358
459
|
raise EAsciiDoc,'malformed list: '+s
|
359
460
|
return result
|
360
461
|
|
@@ -392,20 +493,21 @@ def subs_quotes(text):
|
|
392
493
|
else:
|
393
494
|
lq = rq = q
|
394
495
|
tag = config.quotes[q]
|
496
|
+
if not tag: continue
|
395
497
|
# Unconstrained quotes prefix the tag name with a hash.
|
396
498
|
if tag[0] == '#':
|
397
499
|
tag = tag[1:]
|
398
500
|
# Unconstrained quotes can appear anywhere.
|
399
|
-
reo = re.compile(r'(?msu)(^|.)(\[(?P<attrlist>[^[]+?)\])?' \
|
501
|
+
reo = re.compile(r'(?msu)(^|.)(\[(?P<attrlist>[^[\]]+?)\])?' \
|
400
502
|
+ r'(?:' + re.escape(lq) + r')' \
|
401
503
|
+ r'(?P<content>.+?)(?:'+re.escape(rq)+r')')
|
402
504
|
else:
|
403
505
|
# The text within constrained quotes must be bounded by white space.
|
404
506
|
# Non-word (\W) characters are allowed at boundaries to accomodate
|
405
|
-
# enveloping quotes.
|
406
|
-
reo = re.compile(r'(?msu)(
|
507
|
+
# enveloping quotes and punctuation e.g. a='x', ('x'), 'x', ['x'].
|
508
|
+
reo = re.compile(r'(?msu)(^|[^\w;:}])(\[(?P<attrlist>[^[\]]+?)\])?' \
|
407
509
|
+ r'(?:' + re.escape(lq) + r')' \
|
408
|
-
+ r'(?P<content
|
510
|
+
+ r'(?P<content>\S|\S.*?\S)(?:'+re.escape(rq)+r')(?=\W|$)')
|
409
511
|
pos = 0
|
410
512
|
while True:
|
411
513
|
mo = reo.search(text,pos)
|
@@ -431,7 +533,7 @@ def subs_tag(tag,dict={}):
|
|
431
533
|
return [None,None]
|
432
534
|
s = subs_attrs(tag,dict)
|
433
535
|
if not s:
|
434
|
-
warning('tag \'%s\' dropped: contains undefined attribute' % tag)
|
536
|
+
message.warning('tag \'%s\' dropped: contains undefined attribute' % tag)
|
435
537
|
return [None,None]
|
436
538
|
result = s.split('|')
|
437
539
|
if len(result) == 1:
|
@@ -496,37 +598,12 @@ def parse_entries(entries, dict, unquote=False, unique_values=False,
|
|
496
598
|
allow_name_only=False,escape_delimiter=True):
|
497
599
|
"""Parse name=value entries from from lines of text in 'entries' into
|
498
600
|
dictionary 'dict'. Blank lines are skipped."""
|
601
|
+
entries = config.expand_templates(entries)
|
499
602
|
for entry in entries:
|
500
603
|
if entry and not parse_entry(entry, dict, unquote, unique_values,
|
501
604
|
allow_name_only, escape_delimiter):
|
502
605
|
raise EAsciiDoc,'malformed section entry: %s' % entry
|
503
606
|
|
504
|
-
def load_sections(sections, fname, dir=None, namepat=NAME_RE):
|
505
|
-
"""Loads sections dictionary with sections from file fname.
|
506
|
-
Existing sections are overlaid. Silently skips missing configuration
|
507
|
-
files."""
|
508
|
-
if dir:
|
509
|
-
fname = os.path.join(dir, fname)
|
510
|
-
# Sliently skip missing configuration file.
|
511
|
-
if not os.path.isfile(fname):
|
512
|
-
return
|
513
|
-
reo = re.compile(r'^\[(?P<section>'+namepat+')\]\s*$')
|
514
|
-
section,contents = '',[]
|
515
|
-
for line in open(fname):
|
516
|
-
if line and line[0] == '#': # Skip comment lines.
|
517
|
-
continue
|
518
|
-
line = line.rstrip()
|
519
|
-
found = reo.findall(line)
|
520
|
-
if found:
|
521
|
-
if section: # Store previous section.
|
522
|
-
sections[section] = contents
|
523
|
-
section = found[0].lower()
|
524
|
-
contents = []
|
525
|
-
else:
|
526
|
-
contents.append(line)
|
527
|
-
if section and contents: # Store last section.
|
528
|
-
sections[section] = contents
|
529
|
-
|
530
607
|
def dump_section(name,dict,f=sys.stdout):
|
531
608
|
"""Write parameters in 'dict' as in configuration file section format with
|
532
609
|
section 'name'."""
|
@@ -556,6 +633,27 @@ def update_attrs(attrs,dict):
|
|
556
633
|
raise EAsciiDoc,'illegal attribute name: %s' % k
|
557
634
|
attrs[k] = v
|
558
635
|
|
636
|
+
def is_attr_defined(attrs,dic):
|
637
|
+
"""
|
638
|
+
Check if the sequence of attributes is defined in dictionary 'dic'.
|
639
|
+
Valid 'attrs' sequence syntax:
|
640
|
+
<attr> Return True if single attrbiute is defined.
|
641
|
+
<attr1>,<attr2>,... Return True if one or more attributes are defined.
|
642
|
+
<attr1>+<attr2>+... Return True if all the attributes are defined.
|
643
|
+
"""
|
644
|
+
if OR in attrs:
|
645
|
+
for a in attrs.split(OR):
|
646
|
+
if dic.get(a.strip()) is not None:
|
647
|
+
return True
|
648
|
+
else: return False
|
649
|
+
elif AND in attrs:
|
650
|
+
for a in attrs.split(AND):
|
651
|
+
if dic.get(a.strip()) is None:
|
652
|
+
return False
|
653
|
+
else: return True
|
654
|
+
else:
|
655
|
+
return dic.get(attrs.strip()) is not None
|
656
|
+
|
559
657
|
def filter_lines(filter_cmd, lines, attrs={}):
|
560
658
|
"""
|
561
659
|
Run 'lines' through the 'filter_cmd' shell command and return the result.
|
@@ -579,7 +677,8 @@ def filter_lines(filter_cmd, lines, attrs={}):
|
|
579
677
|
# Perform attributes substitution on the filter command.
|
580
678
|
s = subs_attrs(filter_cmd, attrs)
|
581
679
|
if not s:
|
582
|
-
|
680
|
+
message.error('undefined filter attribute in command: %s' % filter_cmd)
|
681
|
+
return []
|
583
682
|
filter_cmd = s.strip()
|
584
683
|
# Parse for quoted and unquoted command and command tail.
|
585
684
|
# Double quoted.
|
@@ -595,17 +694,22 @@ def filter_lines(filter_cmd, lines, attrs={}):
|
|
595
694
|
if not os.path.dirname(cmd):
|
596
695
|
# Filter command has no directory path so search filter directories.
|
597
696
|
filtername = attrs.get('style')
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
found = findfilter(filtername, CONF_DIR, cmd)
|
697
|
+
d = document.attributes.get('docdir')
|
698
|
+
if d:
|
699
|
+
found = findfilter(filtername, d, cmd)
|
602
700
|
if not found:
|
603
|
-
|
701
|
+
if USER_DIR:
|
702
|
+
found = findfilter(filtername, USER_DIR, cmd)
|
703
|
+
if not found:
|
704
|
+
if localapp():
|
705
|
+
found = findfilter(filtername, APP_DIR, cmd)
|
706
|
+
else:
|
707
|
+
found = findfilter(filtername, CONF_DIR, cmd)
|
604
708
|
else:
|
605
709
|
if os.path.isfile(cmd):
|
606
710
|
found = cmd
|
607
711
|
else:
|
608
|
-
warning('filter not found: %s' % cmd)
|
712
|
+
message.warning('filter not found: %s' % cmd)
|
609
713
|
if found:
|
610
714
|
filter_cmd = '"' + found + '"' + mo.group('tail')
|
611
715
|
if sys.platform == 'win32':
|
@@ -616,13 +720,12 @@ def filter_lines(filter_cmd, lines, attrs={}):
|
|
616
720
|
filter_cmd = 'python ' + filter_cmd
|
617
721
|
elif cmd.endswith('.rb'):
|
618
722
|
filter_cmd = 'ruby ' + filter_cmd
|
619
|
-
verbose('filtering: ' + filter_cmd)
|
620
|
-
input = os.linesep.join(lines)
|
723
|
+
message.verbose('filtering: ' + filter_cmd)
|
621
724
|
try:
|
622
725
|
p = subprocess.Popen(filter_cmd, shell=True,
|
623
726
|
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
624
|
-
output = p.communicate(
|
625
|
-
except:
|
727
|
+
output = p.communicate(os.linesep.join(lines))[0]
|
728
|
+
except Exception:
|
626
729
|
raise EAsciiDoc,'filter error: %s: %s' % (filter_cmd, sys.exc_info()[1])
|
627
730
|
if output:
|
628
731
|
result = [s.rstrip() for s in output.split(os.linesep)]
|
@@ -630,39 +733,48 @@ def filter_lines(filter_cmd, lines, attrs={}):
|
|
630
733
|
result = []
|
631
734
|
filter_status = p.wait()
|
632
735
|
if filter_status:
|
633
|
-
warning('filter non-zero exit code: %s: returned %d' %
|
736
|
+
message.warning('filter non-zero exit code: %s: returned %d' %
|
634
737
|
(filter_cmd, filter_status))
|
635
738
|
if lines and not result:
|
636
|
-
warning('no output from filter: %s' % filter_cmd)
|
739
|
+
message.warning('no output from filter: %s' % filter_cmd)
|
637
740
|
return result
|
638
741
|
|
639
|
-
def system(name, args, is_macro=False):
|
640
|
-
"""
|
641
|
-
(name
|
642
|
-
|
742
|
+
def system(name, args, is_macro=False, attrs=None):
|
743
|
+
"""
|
744
|
+
Evaluate a system attribute ({name:args}) or system block macro
|
745
|
+
(name::[args]).
|
746
|
+
If is_macro is True then we are processing a system block macro otherwise
|
747
|
+
it's a system attribute.
|
748
|
+
The attrs dictionary is updated by the counter and set system attributes.
|
643
749
|
NOTE: The include1 attribute is used internally by the include1::[] macro
|
644
|
-
and is not for public use.
|
750
|
+
and is not for public use.
|
751
|
+
"""
|
645
752
|
if is_macro:
|
646
|
-
syntax = '%s::[%s]'
|
753
|
+
syntax = '%s::[%s]' % (name,args)
|
647
754
|
separator = '\n'
|
648
755
|
else:
|
649
|
-
syntax = '{%s:%s}'
|
756
|
+
syntax = '{%s:%s}' % (name,args)
|
650
757
|
separator = writer.newline
|
651
|
-
if name not in ('eval','sys','sys2','include','include1'):
|
652
|
-
msg = 'illegal '+syntax % (name,args)
|
758
|
+
if name not in ('eval','eval3','sys','sys2','sys3','include','include1','counter','counter2','set','set2','template'):
|
653
759
|
if is_macro:
|
654
|
-
msg
|
760
|
+
msg = 'illegal system macro name: %s' % name
|
655
761
|
else:
|
656
|
-
msg
|
657
|
-
warning(msg)
|
762
|
+
msg = 'illegal system attribute name: %s' % name
|
763
|
+
message.warning(msg)
|
658
764
|
return None
|
765
|
+
if is_macro:
|
766
|
+
s = subs_attrs(args)
|
767
|
+
if s is None:
|
768
|
+
message.warning('skipped %s: undefined attribute in: %s' % (name,args))
|
769
|
+
return None
|
770
|
+
args = s
|
659
771
|
if name != 'include1':
|
660
|
-
verbose(
|
772
|
+
message.verbose('evaluating: %s' % syntax)
|
661
773
|
if safe() and name not in ('include','include1'):
|
662
|
-
|
774
|
+
message.unsafe(syntax)
|
663
775
|
return None
|
664
776
|
result = None
|
665
|
-
if name
|
777
|
+
if name in ('eval','eval3'):
|
666
778
|
try:
|
667
779
|
result = eval(args)
|
668
780
|
if result is True:
|
@@ -671,9 +783,9 @@ def system(name, args, is_macro=False):
|
|
671
783
|
result = None
|
672
784
|
elif result is not None:
|
673
785
|
result = str(result)
|
674
|
-
except:
|
675
|
-
warning(
|
676
|
-
elif name in ('sys','sys2'):
|
786
|
+
except Exception:
|
787
|
+
message.warning('%s: evaluation error' % syntax)
|
788
|
+
elif name in ('sys','sys2','sys3'):
|
677
789
|
result = ''
|
678
790
|
fd,tmp = tempfile.mkstemp()
|
679
791
|
os.close(fd)
|
@@ -683,23 +795,77 @@ def system(name, args, is_macro=False):
|
|
683
795
|
if name == 'sys2':
|
684
796
|
cmd = cmd + ' 2>&1'
|
685
797
|
if os.system(cmd):
|
686
|
-
warning(
|
798
|
+
message.warning('%s: non-zero exit status' % syntax)
|
687
799
|
try:
|
688
800
|
if os.path.isfile(tmp):
|
689
801
|
lines = [s.rstrip() for s in open(tmp)]
|
690
802
|
else:
|
691
803
|
lines = []
|
692
|
-
except:
|
693
|
-
raise EAsciiDoc,
|
804
|
+
except Exception:
|
805
|
+
raise EAsciiDoc,'%s: temp file read error' % syntax
|
694
806
|
result = separator.join(lines)
|
695
807
|
finally:
|
696
808
|
if os.path.isfile(tmp):
|
697
809
|
os.remove(tmp)
|
810
|
+
elif name in ('counter','counter2'):
|
811
|
+
mo = re.match(r'^(?P<attr>[^:]*?)(:(?P<seed>.*))?$', args)
|
812
|
+
attr = mo.group('attr')
|
813
|
+
seed = mo.group('seed')
|
814
|
+
if seed and (not re.match(r'^\d+$', seed) and len(seed) > 1):
|
815
|
+
message.warning('%s: illegal counter seed: %s' % (syntax,seed))
|
816
|
+
return None
|
817
|
+
if not is_name(attr):
|
818
|
+
message.warning('%s: illegal attribute name' % syntax)
|
819
|
+
return None
|
820
|
+
value = document.attributes.get(attr)
|
821
|
+
if value:
|
822
|
+
if not re.match(r'^\d+$', value) and len(value) > 1:
|
823
|
+
message.warning('%s: illegal counter value: %s'
|
824
|
+
% (syntax,value))
|
825
|
+
return None
|
826
|
+
if re.match(r'^\d+$', value):
|
827
|
+
expr = value + '+1'
|
828
|
+
else:
|
829
|
+
expr = 'chr(ord("%s")+1)' % value
|
830
|
+
try:
|
831
|
+
result = str(eval(expr))
|
832
|
+
except Exception:
|
833
|
+
message.warning('%s: evaluation error: %s' % (syntax, expr))
|
834
|
+
else:
|
835
|
+
if seed:
|
836
|
+
result = seed
|
837
|
+
else:
|
838
|
+
result = '1'
|
839
|
+
document.attributes[attr] = result
|
840
|
+
if attrs is not None:
|
841
|
+
attrs[attr] = result
|
842
|
+
if name == 'counter2':
|
843
|
+
result = ''
|
844
|
+
elif name in ('set','set2'):
|
845
|
+
mo = re.match(r'^(?P<attr>[^:]*?)(:(?P<value>.*))?$', args)
|
846
|
+
attr = mo.group('attr')
|
847
|
+
value = mo.group('value')
|
848
|
+
if value is None:
|
849
|
+
value = ''
|
850
|
+
if attr.endswith('!'):
|
851
|
+
attr = attr[:-1]
|
852
|
+
value = None
|
853
|
+
if not is_name(attr):
|
854
|
+
message.warning('%s: illegal attribute name' % syntax)
|
855
|
+
else:
|
856
|
+
if attrs is not None:
|
857
|
+
attrs[attr] = value
|
858
|
+
if name != 'set2': # set2 only updates local attributes.
|
859
|
+
document.attributes[attr] = value
|
860
|
+
if value is None:
|
861
|
+
result = None
|
862
|
+
else:
|
863
|
+
result = ''
|
698
864
|
elif name == 'include':
|
699
865
|
if not os.path.exists(args):
|
700
|
-
warning(
|
866
|
+
message.warning('%s: file does not exist' % syntax)
|
701
867
|
elif not is_safe_file(args):
|
702
|
-
|
868
|
+
message.unsafe(syntax)
|
703
869
|
else:
|
704
870
|
result = [s.rstrip() for s in open(args)]
|
705
871
|
if result:
|
@@ -710,8 +876,21 @@ def system(name, args, is_macro=False):
|
|
710
876
|
result = ''
|
711
877
|
elif name == 'include1':
|
712
878
|
result = separator.join(config.include1[args])
|
879
|
+
elif name == 'template':
|
880
|
+
if not args in config.sections:
|
881
|
+
message.warning('%s: template does not exist' % syntax)
|
882
|
+
else:
|
883
|
+
result = []
|
884
|
+
for line in config.sections[args]:
|
885
|
+
line = subs_attrs(line)
|
886
|
+
if line is not None:
|
887
|
+
result.append(line)
|
888
|
+
result = '\n'.join(result)
|
713
889
|
else:
|
714
890
|
assert False
|
891
|
+
if result and name in ('eval3','sys3'):
|
892
|
+
macros.passthroughs.append(result)
|
893
|
+
result = '\x07' + str(len(macros.passthroughs)-1) + '\x07'
|
715
894
|
return result
|
716
895
|
|
717
896
|
def subs_attrs(lines, dictionary=None):
|
@@ -746,7 +925,6 @@ def subs_attrs(lines, dictionary=None):
|
|
746
925
|
lines = [lines]
|
747
926
|
else:
|
748
927
|
string_result = False
|
749
|
-
lines = list(lines)
|
750
928
|
if dictionary is None:
|
751
929
|
attrs = document.attributes
|
752
930
|
else:
|
@@ -757,7 +935,6 @@ def subs_attrs(lines, dictionary=None):
|
|
757
935
|
if not re.match(r'^\d+$', k):
|
758
936
|
attrs[k] = v
|
759
937
|
# Substitute attribute references inside dictionary values.
|
760
|
-
dictionary = dictionary.copy()
|
761
938
|
for k,v in dictionary.items():
|
762
939
|
if v is None:
|
763
940
|
del dictionary[k]
|
@@ -769,129 +946,170 @@ def subs_attrs(lines, dictionary=None):
|
|
769
946
|
dictionary[k] = v
|
770
947
|
attrs.update(dictionary)
|
771
948
|
# Substitute all attributes in all lines.
|
772
|
-
|
773
|
-
|
949
|
+
result = []
|
950
|
+
for line in lines:
|
774
951
|
# Make it easier for regular expressions.
|
775
|
-
|
776
|
-
|
952
|
+
line = line.replace('\\{','{\\')
|
953
|
+
line = line.replace('\\}','}\\')
|
777
954
|
# Expand simple attributes ({name}).
|
778
955
|
# Nested attributes not allowed.
|
779
956
|
reo = re.compile(r'(?su)\{(?P<name>[^\\\W][-\w]*?)\}(?!\\)')
|
780
957
|
pos = 0
|
781
958
|
while True:
|
782
|
-
mo = reo.search(
|
959
|
+
mo = reo.search(line,pos)
|
783
960
|
if not mo: break
|
784
961
|
s = attrs.get(mo.group('name'))
|
785
962
|
if s is None:
|
786
963
|
pos = mo.end()
|
787
964
|
else:
|
788
965
|
s = str(s)
|
789
|
-
|
966
|
+
line = line[:mo.start()] + s + line[mo.end():]
|
790
967
|
pos = mo.start() + len(s)
|
791
968
|
# Expand conditional attributes.
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
807
|
-
if
|
808
|
-
|
809
|
-
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
969
|
+
# Single name -- higher precedence.
|
970
|
+
reo1 = re.compile(r'(?su)\{(?P<name>[^\\\W][-\w]*?)' \
|
971
|
+
r'(?P<op>\=|\?|!|#|%|@|\$)' \
|
972
|
+
r'(?P<value>.*?)\}(?!\\)')
|
973
|
+
# Multiple names (n1,n2,... or n1+n2+...) -- lower precedence.
|
974
|
+
reo2 = re.compile(r'(?su)\{(?P<name>[^\\\W][-\w'+OR+AND+r']*?)' \
|
975
|
+
r'(?P<op>\=|\?|!|#|%|@|\$)' \
|
976
|
+
r'(?P<value>.*?)\}(?!\\)')
|
977
|
+
for reo in [reo1,reo2]:
|
978
|
+
pos = 0
|
979
|
+
while True:
|
980
|
+
mo = reo.search(line,pos)
|
981
|
+
if not mo: break
|
982
|
+
attr = mo.group()
|
983
|
+
name = mo.group('name')
|
984
|
+
if reo == reo2:
|
985
|
+
if OR in name:
|
986
|
+
sep = OR
|
987
|
+
else:
|
988
|
+
sep = AND
|
989
|
+
names = [s.strip() for s in name.split(sep) if s.strip() ]
|
990
|
+
for n in names:
|
991
|
+
if not re.match(r'^[^\\\W][-\w]*$',n):
|
992
|
+
message.error('illegal attribute syntax: %s' % attr)
|
993
|
+
if sep == OR:
|
994
|
+
# Process OR name expression: n1,n2,...
|
995
|
+
for n in names:
|
996
|
+
if attrs.get(n) is not None:
|
997
|
+
lval = ''
|
998
|
+
break
|
999
|
+
else:
|
1000
|
+
lval = None
|
1001
|
+
else:
|
1002
|
+
# Process AND name expression: n1+n2+...
|
1003
|
+
for n in names:
|
1004
|
+
if attrs.get(n) is None:
|
1005
|
+
lval = None
|
1006
|
+
break
|
1007
|
+
else:
|
1008
|
+
lval = ''
|
814
1009
|
else:
|
815
|
-
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
823
|
-
|
824
|
-
|
825
|
-
|
826
|
-
|
827
|
-
elif
|
828
|
-
|
829
|
-
s = ''
|
1010
|
+
lval = attrs.get(name)
|
1011
|
+
op = mo.group('op')
|
1012
|
+
# mo.end() not good enough because '{x={y}}' matches '{x={y}'.
|
1013
|
+
end = end_brace(line,mo.start())
|
1014
|
+
rval = line[mo.start('value'):end-1]
|
1015
|
+
UNDEFINED = '{zzzzz}'
|
1016
|
+
if lval is None:
|
1017
|
+
if op == '=': s = rval
|
1018
|
+
elif op == '?': s = ''
|
1019
|
+
elif op == '!': s = rval
|
1020
|
+
elif op == '#': s = UNDEFINED # So the line is dropped.
|
1021
|
+
elif op == '%': s = rval
|
1022
|
+
elif op in ('@','$'):
|
1023
|
+
s = UNDEFINED # So the line is dropped.
|
830
1024
|
else:
|
831
|
-
|
832
|
-
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
|
838
|
-
|
839
|
-
|
840
|
-
|
1025
|
+
assert False, 'illegal attribute: %s' % attr
|
1026
|
+
else:
|
1027
|
+
if op == '=': s = lval
|
1028
|
+
elif op == '?': s = rval
|
1029
|
+
elif op == '!': s = ''
|
1030
|
+
elif op == '#': s = rval
|
1031
|
+
elif op == '%': s = UNDEFINED # So the line is dropped.
|
1032
|
+
elif op in ('@','$'):
|
1033
|
+
v = re.split(r'(?<!\\):',rval)
|
1034
|
+
if len(v) not in (2,3):
|
1035
|
+
message.error('illegal attribute syntax: %s' % attr)
|
1036
|
+
s = ''
|
1037
|
+
elif not is_re('^'+v[0]+'$'):
|
1038
|
+
message.error('illegal attribute regexp: %s' % attr)
|
1039
|
+
s = ''
|
841
1040
|
else:
|
842
|
-
|
843
|
-
|
844
|
-
|
845
|
-
|
846
|
-
s =
|
847
|
-
else:
|
848
|
-
|
1041
|
+
v = [s.replace('\\:',':') for s in v]
|
1042
|
+
re_mo = re.match('^'+v[0]+'$',lval)
|
1043
|
+
if op == '@':
|
1044
|
+
if re_mo:
|
1045
|
+
s = v[1] # {<name>@<re>:<v1>[:<v2>]}
|
1046
|
+
else:
|
1047
|
+
if len(v) == 3: # {<name>@<re>:<v1>:<v2>}
|
1048
|
+
s = v[2]
|
1049
|
+
else: # {<name>@<re>:<v1>}
|
1050
|
+
s = ''
|
849
1051
|
else:
|
850
|
-
if
|
851
|
-
|
852
|
-
|
853
|
-
|
854
|
-
|
855
|
-
|
856
|
-
|
857
|
-
|
858
|
-
|
1052
|
+
if re_mo:
|
1053
|
+
if len(v) == 2: # {<name>$<re>:<v1>}
|
1054
|
+
s = v[1]
|
1055
|
+
elif v[1] == '': # {<name>$<re>::<v2>}
|
1056
|
+
s = UNDEFINED # So the line is dropped.
|
1057
|
+
else: # {<name>$<re>:<v1>:<v2>}
|
1058
|
+
s = v[1]
|
1059
|
+
else:
|
1060
|
+
if len(v) == 2: # {<name>$<re>:<v1>}
|
1061
|
+
s = UNDEFINED # So the line is dropped.
|
1062
|
+
else: # {<name>$<re>:<v1>:<v2>}
|
1063
|
+
s = v[2]
|
1064
|
+
else:
|
1065
|
+
assert False, 'illegal attribute: %s' % attr
|
1066
|
+
s = str(s)
|
1067
|
+
line = line[:mo.start()] + s + line[end:]
|
1068
|
+
pos = mo.start() + len(s)
|
859
1069
|
# Drop line if it contains unsubstituted {name} references.
|
860
|
-
skipped = re.search(r'(?su)\{[^\\\W][-\w]*?\}(?!\\)',
|
1070
|
+
skipped = re.search(r'(?su)\{[^\\\W][-\w]*?\}(?!\\)', line)
|
861
1071
|
if skipped:
|
862
|
-
|
1072
|
+
trace('dropped line', line)
|
863
1073
|
continue;
|
864
|
-
# Expand system attributes.
|
865
|
-
|
1074
|
+
# Expand system attributes (eval has precedence).
|
1075
|
+
reos = [
|
1076
|
+
re.compile(r'(?su)\{(?P<action>eval):(?P<expr>.*?)\}(?!\\)'),
|
1077
|
+
re.compile(r'(?su)\{(?P<action>[^\\\W][-\w]*?):(?P<expr>.*?)\}(?!\\)'),
|
1078
|
+
]
|
866
1079
|
skipped = False
|
867
|
-
|
868
|
-
|
869
|
-
|
870
|
-
|
871
|
-
|
872
|
-
|
873
|
-
|
874
|
-
|
875
|
-
|
876
|
-
|
1080
|
+
for reo in reos:
|
1081
|
+
pos = 0
|
1082
|
+
while True:
|
1083
|
+
mo = reo.search(line,pos)
|
1084
|
+
if not mo: break
|
1085
|
+
expr = mo.group('expr')
|
1086
|
+
action = mo.group('action')
|
1087
|
+
expr = expr.replace('{\\','{')
|
1088
|
+
expr = expr.replace('}\\','}')
|
1089
|
+
s = system(action, expr, attrs=dictionary)
|
1090
|
+
if dictionary is not None and action in ('counter','counter2','set','set2'):
|
1091
|
+
# These actions create and update attributes.
|
1092
|
+
attrs.update(dictionary)
|
1093
|
+
if s is None:
|
1094
|
+
# Drop line if the action returns None.
|
1095
|
+
skipped = True
|
1096
|
+
break
|
1097
|
+
line = line[:mo.start()] + s + line[mo.end():]
|
1098
|
+
pos = mo.start() + len(s)
|
1099
|
+
if skipped:
|
877
1100
|
break
|
878
|
-
|
879
|
-
|
880
|
-
|
881
|
-
|
882
|
-
|
883
|
-
continue;
|
884
|
-
# Remove backslash from escaped entries.
|
885
|
-
text = text.replace('{\\','{')
|
886
|
-
text = text.replace('}\\','}')
|
887
|
-
lines[i] = text
|
1101
|
+
if not skipped:
|
1102
|
+
# Remove backslash from escaped entries.
|
1103
|
+
line = line.replace('{\\','{')
|
1104
|
+
line = line.replace('}\\','}')
|
1105
|
+
result.append(line)
|
888
1106
|
if string_result:
|
889
|
-
if
|
890
|
-
return '\n'.join(
|
1107
|
+
if result:
|
1108
|
+
return '\n'.join(result)
|
891
1109
|
else:
|
892
1110
|
return None
|
893
1111
|
else:
|
894
|
-
return tuple(
|
1112
|
+
return tuple(result)
|
895
1113
|
|
896
1114
|
def char_encoding():
|
897
1115
|
encoding = document.attributes.get('encoding')
|
@@ -905,6 +1123,26 @@ def char_encoding():
|
|
905
1123
|
def char_len(s):
|
906
1124
|
return len(char_decode(s))
|
907
1125
|
|
1126
|
+
east_asian_widths = {'W': 2, # Wide
|
1127
|
+
'F': 2, # Full-width (wide)
|
1128
|
+
'Na': 1, # Narrow
|
1129
|
+
'H': 1, # Half-width (narrow)
|
1130
|
+
'N': 1, # Neutral (not East Asian, treated as narrow)
|
1131
|
+
'A': 1} # Ambiguous (s/b wide in East Asian context,
|
1132
|
+
# narrow otherwise, but that doesn't work)
|
1133
|
+
"""Mapping of result codes from `unicodedata.east_asian_width()` to character
|
1134
|
+
column widths."""
|
1135
|
+
|
1136
|
+
def column_width(s):
|
1137
|
+
text = char_decode(s)
|
1138
|
+
if isinstance(text, unicode):
|
1139
|
+
width = 0
|
1140
|
+
for c in text:
|
1141
|
+
width += east_asian_widths[unicodedata.east_asian_width(c)]
|
1142
|
+
return width
|
1143
|
+
else:
|
1144
|
+
return len(text)
|
1145
|
+
|
908
1146
|
def char_decode(s):
|
909
1147
|
if char_encoding():
|
910
1148
|
try:
|
@@ -921,6 +1159,27 @@ def char_encode(s):
|
|
921
1159
|
else:
|
922
1160
|
return s
|
923
1161
|
|
1162
|
+
def time_str(t):
|
1163
|
+
"""Convert seconds since the Epoch to formatted local time string."""
|
1164
|
+
t = time.localtime(t)
|
1165
|
+
s = time.strftime('%H:%M:%S',t)
|
1166
|
+
if time.daylight and t.tm_isdst == 1:
|
1167
|
+
result = s + ' ' + time.tzname[1]
|
1168
|
+
else:
|
1169
|
+
result = s + ' ' + time.tzname[0]
|
1170
|
+
# Attempt to convert the localtime to the output encoding.
|
1171
|
+
try:
|
1172
|
+
result = char_encode(result.decode(locale.getdefaultlocale()[1]))
|
1173
|
+
except Exception:
|
1174
|
+
pass
|
1175
|
+
return result
|
1176
|
+
|
1177
|
+
def date_str(t):
|
1178
|
+
"""Convert seconds since the Epoch to formatted local date string."""
|
1179
|
+
t = time.localtime(t)
|
1180
|
+
return time.strftime('%Y-%m-%d',t)
|
1181
|
+
|
1182
|
+
|
924
1183
|
class Lex:
|
925
1184
|
"""Lexical analysis routines. Static methods and attributes only."""
|
926
1185
|
prev_element = None
|
@@ -939,59 +1198,59 @@ class Lex:
|
|
939
1198
|
# position return the element.
|
940
1199
|
if Lex.prev_element and Lex.prev_cursor == reader.cursor:
|
941
1200
|
return Lex.prev_element
|
942
|
-
|
943
|
-
# Check for AttributeEntry.
|
944
|
-
if not result and AttributeEntry.isnext():
|
1201
|
+
if AttributeEntry.isnext():
|
945
1202
|
result = AttributeEntry
|
946
|
-
|
947
|
-
if not result and AttributeList.isnext():
|
1203
|
+
elif AttributeList.isnext():
|
948
1204
|
result = AttributeList
|
949
|
-
|
950
|
-
|
951
|
-
|
952
|
-
|
953
|
-
|
1205
|
+
elif BlockTitle.isnext() and not tables_OLD.isnext():
|
1206
|
+
result = BlockTitle
|
1207
|
+
elif Title.isnext():
|
1208
|
+
if AttributeList.style() == 'float':
|
1209
|
+
result = FloatingTitle
|
1210
|
+
else:
|
1211
|
+
result = Title
|
1212
|
+
elif macros.isnext():
|
954
1213
|
result = macros.current
|
955
|
-
|
956
|
-
if not result and lists.isnext():
|
1214
|
+
elif lists.isnext():
|
957
1215
|
result = lists.current
|
958
|
-
|
959
|
-
|
960
|
-
|
961
|
-
if 'skip' in blocks.current.options:
|
962
|
-
blocks.current.translate()
|
963
|
-
return Lex.next()
|
964
|
-
else:
|
965
|
-
result = blocks.current
|
966
|
-
# Check for Table.
|
967
|
-
if not result and tables_OLD.isnext():
|
1216
|
+
elif blocks.isnext():
|
1217
|
+
result = blocks.current
|
1218
|
+
elif tables_OLD.isnext():
|
968
1219
|
result = tables_OLD.current
|
969
|
-
|
1220
|
+
elif tables.isnext():
|
970
1221
|
result = tables.current
|
971
|
-
|
972
|
-
if not result and BlockTitle.isnext():
|
973
|
-
result = BlockTitle
|
974
|
-
# If it's none of the above then it must be an Paragraph.
|
975
|
-
if not result:
|
1222
|
+
else:
|
976
1223
|
if not paragraphs.isnext():
|
977
1224
|
raise EAsciiDoc,'paragraph expected'
|
978
1225
|
result = paragraphs.current
|
979
|
-
# Cache answer.
|
1226
|
+
# Optimization: Cache answer.
|
980
1227
|
Lex.prev_cursor = reader.cursor
|
981
1228
|
Lex.prev_element = result
|
982
1229
|
return result
|
983
1230
|
|
1231
|
+
@staticmethod
|
1232
|
+
def canonical_subs(options):
|
1233
|
+
"""Translate composite subs values."""
|
1234
|
+
if len(options) == 1:
|
1235
|
+
if options[0] == 'none':
|
1236
|
+
options = ()
|
1237
|
+
elif options[0] == 'normal':
|
1238
|
+
options = config.subsnormal
|
1239
|
+
elif options[0] == 'verbatim':
|
1240
|
+
options = config.subsverbatim
|
1241
|
+
return options
|
1242
|
+
|
984
1243
|
@staticmethod
|
985
1244
|
def subs_1(s,options):
|
986
|
-
"""Perform substitution specified in 'options' (in 'options' order)
|
987
|
-
Does not process 'attributes' substitutions."""
|
1245
|
+
"""Perform substitution specified in 'options' (in 'options' order)."""
|
988
1246
|
if not s:
|
989
1247
|
return s
|
1248
|
+
if document.attributes.get('plaintext') is not None:
|
1249
|
+
options = ('specialcharacters',)
|
990
1250
|
result = s
|
1251
|
+
options = Lex.canonical_subs(options)
|
991
1252
|
for o in options:
|
992
|
-
if o == '
|
993
|
-
return s
|
994
|
-
elif o == 'specialcharacters':
|
1253
|
+
if o == 'specialcharacters':
|
995
1254
|
result = config.subs_specialchars(result)
|
996
1255
|
elif o == 'attributes':
|
997
1256
|
result = subs_attrs(result)
|
@@ -1007,6 +1266,7 @@ class Lex:
|
|
1007
1266
|
result = macros.subs(result,callouts=True)
|
1008
1267
|
else:
|
1009
1268
|
raise EAsciiDoc,'illegal substitution option: %s' % o
|
1269
|
+
trace(o, s, result)
|
1010
1270
|
if not result:
|
1011
1271
|
break
|
1012
1272
|
return result
|
@@ -1015,15 +1275,9 @@ class Lex:
|
|
1015
1275
|
def subs(lines,options):
|
1016
1276
|
"""Perform inline processing specified by 'options' (in 'options'
|
1017
1277
|
order) on sequence of 'lines'."""
|
1018
|
-
if len(options) == 1:
|
1019
|
-
if options[0] == 'none':
|
1020
|
-
options = ()
|
1021
|
-
elif options[0] == 'normal':
|
1022
|
-
options = config.subsnormal
|
1023
|
-
elif options[0] == 'verbatim':
|
1024
|
-
options = config.subsverbatim
|
1025
1278
|
if not lines or not options:
|
1026
1279
|
return lines
|
1280
|
+
options = Lex.canonical_subs(options)
|
1027
1281
|
# Join lines so quoting can span multiple lines.
|
1028
1282
|
para = '\n'.join(lines)
|
1029
1283
|
if 'macros' in options:
|
@@ -1059,66 +1313,81 @@ class Lex:
|
|
1059
1313
|
# Document element classes parse AsciiDoc reader input and write DocBook writer
|
1060
1314
|
# output.
|
1061
1315
|
#---------------------------------------------------------------------------
|
1062
|
-
class Document:
|
1316
|
+
class Document(object):
|
1317
|
+
|
1318
|
+
# doctype property.
|
1319
|
+
def getdoctype(self):
|
1320
|
+
return self.attributes.get('doctype')
|
1321
|
+
def setdoctype(self,doctype):
|
1322
|
+
self.attributes['doctype'] = doctype
|
1323
|
+
doctype = property(getdoctype,setdoctype)
|
1324
|
+
|
1325
|
+
# backend property.
|
1326
|
+
def getbackend(self):
|
1327
|
+
return self.attributes.get('backend')
|
1328
|
+
def setbackend(self,backend):
|
1329
|
+
if backend:
|
1330
|
+
backend = self.attributes.get('backend-alias-' + backend, backend)
|
1331
|
+
self.attributes['backend'] = backend
|
1332
|
+
backend = property(getbackend,setbackend)
|
1333
|
+
|
1063
1334
|
def __init__(self):
|
1064
|
-
self.doctype = None # 'article','manpage' or 'book'.
|
1065
|
-
self.backend = None # -b option argument.
|
1066
1335
|
self.infile = None # Source file name.
|
1067
1336
|
self.outfile = None # Output file name.
|
1068
|
-
self.attributes =
|
1337
|
+
self.attributes = InsensitiveDict()
|
1069
1338
|
self.level = 0 # 0 => front matter. 1,2,3 => sect1,2,3.
|
1070
1339
|
self.has_errors = False # Set true if processing errors were flagged.
|
1071
1340
|
self.has_warnings = False # Set true if warnings were flagged.
|
1072
|
-
self.safe =
|
1073
|
-
def
|
1074
|
-
|
1075
|
-
|
1076
|
-
|
1077
|
-
|
1078
|
-
|
1079
|
-
|
1080
|
-
else:
|
1081
|
-
self.attributes['localtime'] = s + ' ' + time.tzname[0]
|
1082
|
-
# Attempt to convert the localtime to the output encoding.
|
1083
|
-
try:
|
1084
|
-
self.attributes['localtime'] = char_encode(
|
1085
|
-
self.attributes['localtime'].decode(
|
1086
|
-
locale.getdefaultlocale()[1]
|
1087
|
-
)
|
1088
|
-
)
|
1089
|
-
except:
|
1090
|
-
pass
|
1341
|
+
self.safe = False # Default safe mode.
|
1342
|
+
def update_attributes(self,attrs=None):
|
1343
|
+
"""
|
1344
|
+
Set implicit attributes and attributes in 'attrs'.
|
1345
|
+
"""
|
1346
|
+
t = time.time()
|
1347
|
+
self.attributes['localtime'] = time_str(t)
|
1348
|
+
self.attributes['localdate'] = date_str(t)
|
1091
1349
|
self.attributes['asciidoc-version'] = VERSION
|
1092
|
-
self.attributes['backend'] = document.backend
|
1093
|
-
self.attributes['doctype'] = document.doctype
|
1094
|
-
self.attributes['backend-'+document.backend] = ''
|
1095
|
-
self.attributes['doctype-'+document.doctype] = ''
|
1096
|
-
self.attributes[document.backend+'-'+document.doctype] = ''
|
1097
1350
|
self.attributes['asciidoc-file'] = APP_FILE
|
1098
1351
|
self.attributes['asciidoc-dir'] = APP_DIR
|
1352
|
+
self.attributes['asciidoc-confdir'] = CONF_DIR
|
1099
1353
|
self.attributes['user-dir'] = USER_DIR
|
1100
|
-
if self.infile != '<stdin>':
|
1101
|
-
self.attributes['infile'] = self.infile
|
1102
|
-
self.attributes['indir'] = os.path.dirname(self.infile)
|
1103
|
-
self.attributes['docdir'] = os.path.dirname(self.infile) #DEPRECATED
|
1104
|
-
self.attributes['docname'] = os.path.splitext(
|
1105
|
-
os.path.basename(self.infile))[0]
|
1106
1354
|
if config.verbose:
|
1107
1355
|
self.attributes['verbose'] = ''
|
1108
1356
|
# Update with configuration file attributes.
|
1109
|
-
|
1357
|
+
if attrs:
|
1358
|
+
self.attributes.update(attrs)
|
1110
1359
|
# Update with command-line attributes.
|
1111
1360
|
self.attributes.update(config.cmd_attrs)
|
1112
1361
|
# Extract miscellaneous configuration section entries from attributes.
|
1113
|
-
|
1362
|
+
if attrs:
|
1363
|
+
config.load_miscellaneous(attrs)
|
1114
1364
|
config.load_miscellaneous(config.cmd_attrs)
|
1115
|
-
self.attributes['newline'] = config.newline
|
1365
|
+
self.attributes['newline'] = config.newline
|
1366
|
+
# File name related attributes can't be overridden.
|
1367
|
+
if self.infile is not None:
|
1368
|
+
if self.infile and os.path.exists(self.infile):
|
1369
|
+
t = os.path.getmtime(self.infile)
|
1370
|
+
elif self.infile == '<stdin>':
|
1371
|
+
t = time.time()
|
1372
|
+
else:
|
1373
|
+
t = None
|
1374
|
+
if t:
|
1375
|
+
self.attributes['doctime'] = time_str(t)
|
1376
|
+
self.attributes['docdate'] = date_str(t)
|
1377
|
+
if self.infile != '<stdin>':
|
1378
|
+
self.attributes['infile'] = self.infile
|
1379
|
+
self.attributes['indir'] = os.path.dirname(self.infile)
|
1380
|
+
self.attributes['docfile'] = self.infile
|
1381
|
+
self.attributes['docdir'] = os.path.dirname(self.infile)
|
1382
|
+
self.attributes['docname'] = os.path.splitext(
|
1383
|
+
os.path.basename(self.infile))[0]
|
1116
1384
|
if self.outfile:
|
1117
1385
|
if self.outfile != '<stdout>':
|
1118
1386
|
self.attributes['outfile'] = self.outfile
|
1119
1387
|
self.attributes['outdir'] = os.path.dirname(self.outfile)
|
1120
|
-
self.
|
1121
|
-
|
1388
|
+
if self.infile == '<stdin>':
|
1389
|
+
self.attributes['docname'] = os.path.splitext(
|
1390
|
+
os.path.basename(self.outfile))[0]
|
1122
1391
|
ext = os.path.splitext(self.outfile)[1][1:]
|
1123
1392
|
elif config.outfilesuffix:
|
1124
1393
|
ext = config.outfilesuffix[1:]
|
@@ -1127,59 +1396,155 @@ class Document:
|
|
1127
1396
|
if ext:
|
1128
1397
|
self.attributes['filetype'] = ext
|
1129
1398
|
self.attributes['filetype-'+ext] = ''
|
1130
|
-
def
|
1131
|
-
|
1132
|
-
|
1399
|
+
def load_lang(self):
|
1400
|
+
"""
|
1401
|
+
Load language configuration file.
|
1402
|
+
"""
|
1403
|
+
lang = self.attributes.get('lang')
|
1404
|
+
if lang is None:
|
1405
|
+
filename = 'lang-en.conf' # Default language file.
|
1406
|
+
else:
|
1407
|
+
filename = 'lang-' + lang + '.conf'
|
1408
|
+
if config.load_from_dirs(filename):
|
1409
|
+
self.attributes['lang'] = lang # Reinstate new lang attribute.
|
1410
|
+
else:
|
1411
|
+
if lang is None:
|
1412
|
+
# The default language file must exist.
|
1413
|
+
message.error('missing conf file: %s' % filename, halt=True)
|
1414
|
+
else:
|
1415
|
+
message.warning('missing language conf file: %s' % filename)
|
1416
|
+
def set_deprecated_attribute(self,old,new):
|
1417
|
+
"""
|
1418
|
+
Ensures the 'old' name of an attribute that was renamed to 'new' is
|
1419
|
+
still honored.
|
1420
|
+
"""
|
1421
|
+
if self.attributes.get(new) is None:
|
1422
|
+
if self.attributes.get(old) is not None:
|
1423
|
+
self.attributes[new] = self.attributes[old]
|
1424
|
+
else:
|
1425
|
+
self.attributes[old] = self.attributes[new]
|
1426
|
+
def consume_attributes_and_comments(self,comments_only=False,noblanks=False):
|
1427
|
+
"""
|
1428
|
+
Returns True if one or more attributes or comments were consumed.
|
1429
|
+
If 'noblanks' is True then consumation halts if a blank line is
|
1430
|
+
encountered.
|
1431
|
+
"""
|
1432
|
+
result = False
|
1433
|
+
finished = False
|
1434
|
+
while not finished:
|
1435
|
+
finished = True
|
1436
|
+
if noblanks and not reader.read_next(): return result
|
1437
|
+
if blocks.isnext() and 'skip' in blocks.current.options:
|
1438
|
+
result = True
|
1439
|
+
finished = False
|
1440
|
+
blocks.current.translate()
|
1441
|
+
if noblanks and not reader.read_next(): return result
|
1442
|
+
if macros.isnext() and macros.current.name == 'comment':
|
1443
|
+
result = True
|
1444
|
+
finished = False
|
1445
|
+
macros.current.translate()
|
1446
|
+
if not comments_only:
|
1447
|
+
if AttributeEntry.isnext():
|
1448
|
+
result = True
|
1449
|
+
finished = False
|
1450
|
+
AttributeEntry.translate()
|
1451
|
+
if AttributeList.isnext():
|
1452
|
+
result = True
|
1453
|
+
finished = False
|
1454
|
+
AttributeList.translate()
|
1455
|
+
return result
|
1456
|
+
def parse_header(self,doctype,backend):
|
1457
|
+
"""
|
1458
|
+
Parses header, sets corresponding document attributes and finalizes
|
1459
|
+
document doctype and backend properties.
|
1460
|
+
Returns False if the document does not have a header.
|
1461
|
+
'doctype' and 'backend' are the doctype and backend option values
|
1462
|
+
passed on the command-line, None if no command-line option was not
|
1463
|
+
specified.
|
1464
|
+
"""
|
1133
1465
|
assert self.level == 0
|
1134
|
-
|
1135
|
-
|
1136
|
-
if
|
1137
|
-
|
1138
|
-
|
1139
|
-
|
1140
|
-
|
1141
|
-
|
1142
|
-
AttributeEntry.translate_all()
|
1466
|
+
# Skip comments and attribute entries that preceed the header.
|
1467
|
+
self.consume_attributes_and_comments()
|
1468
|
+
if doctype is not None:
|
1469
|
+
# Command-line overrides header.
|
1470
|
+
self.doctype = doctype
|
1471
|
+
elif self.doctype is None:
|
1472
|
+
# Was not set on command-line or in document header.
|
1473
|
+
self.doctype = DEFAULT_DOCTYPE
|
1143
1474
|
# Process document header.
|
1144
|
-
has_header =
|
1475
|
+
has_header = (Title.isnext() and Title.level == 0
|
1476
|
+
and AttributeList.style() != 'float')
|
1145
1477
|
if self.doctype == 'manpage' and not has_header:
|
1146
|
-
error('manpage document title is mandatory')
|
1478
|
+
message.error('manpage document title is mandatory',halt=True)
|
1147
1479
|
if has_header:
|
1148
|
-
Header.
|
1149
|
-
|
1150
|
-
|
1480
|
+
Header.parse()
|
1481
|
+
# Command-line entries override header derived entries.
|
1482
|
+
self.attributes.update(config.cmd_attrs)
|
1483
|
+
# DEPRECATED: revision renamed to revnumber.
|
1484
|
+
self.set_deprecated_attribute('revision','revnumber')
|
1485
|
+
# DEPRECATED: date renamed to revdate.
|
1486
|
+
self.set_deprecated_attribute('date','revdate')
|
1487
|
+
if doctype is not None:
|
1488
|
+
# Command-line overrides header.
|
1489
|
+
self.doctype = doctype
|
1490
|
+
if backend is not None:
|
1491
|
+
# Command-line overrides header.
|
1492
|
+
self.backend = backend
|
1493
|
+
elif self.backend is None:
|
1494
|
+
# Was not set on command-line or in document header.
|
1495
|
+
self.backend = DEFAULT_BACKEND
|
1496
|
+
else:
|
1497
|
+
# Has been set in document header.
|
1498
|
+
self.backend = self.backend # Translate alias in header.
|
1499
|
+
assert self.doctype in ('article','manpage','book'), 'illegal document type'
|
1500
|
+
return has_header
|
1501
|
+
def translate(self,has_header):
|
1502
|
+
if self.doctype == 'manpage':
|
1503
|
+
# Translate mandatory NAME section.
|
1504
|
+
if Lex.next() is not Title:
|
1505
|
+
message.error('name section expected')
|
1506
|
+
else:
|
1507
|
+
Title.translate()
|
1508
|
+
if Title.level != 1:
|
1509
|
+
message.error('name section title must be at level 1')
|
1510
|
+
if not isinstance(Lex.next(),Paragraph):
|
1511
|
+
message.error('malformed name section body')
|
1512
|
+
lines = reader.read_until(r'^$')
|
1513
|
+
s = ' '.join(lines)
|
1514
|
+
mo = re.match(r'^(?P<manname>.*?)\s+-\s+(?P<manpurpose>.*)$',s)
|
1515
|
+
if not mo:
|
1516
|
+
message.error('malformed name section body')
|
1517
|
+
self.attributes['manname'] = mo.group('manname').strip()
|
1518
|
+
self.attributes['manpurpose'] = mo.group('manpurpose').strip()
|
1519
|
+
names = [s.strip() for s in self.attributes['manname'].split(',')]
|
1520
|
+
if len(names) > 9:
|
1521
|
+
message.warning('to many manpage names')
|
1522
|
+
for i,name in enumerate(names):
|
1523
|
+
self.attributes['manname%d' % (i+1)] = name
|
1524
|
+
if has_header:
|
1525
|
+
# Do postponed substitutions (backend confs have been loaded).
|
1526
|
+
self.attributes['doctitle'] = Title.dosubs(self.attributes['doctitle'])
|
1151
1527
|
if config.header_footer:
|
1152
1528
|
hdr = config.subs_section('header',{})
|
1153
|
-
writer.write(hdr)
|
1529
|
+
writer.write(hdr,trace='header')
|
1530
|
+
if 'title' in self.attributes:
|
1531
|
+
del self.attributes['title']
|
1532
|
+
self.consume_attributes_and_comments()
|
1154
1533
|
if self.doctype in ('article','book'):
|
1155
1534
|
# Translate 'preamble' (untitled elements between header
|
1156
1535
|
# and first section title).
|
1157
1536
|
if Lex.next() is not Title:
|
1158
1537
|
stag,etag = config.section2tags('preamble')
|
1159
|
-
writer.write(stag)
|
1538
|
+
writer.write(stag,trace='preamble open')
|
1160
1539
|
Section.translate_body()
|
1161
|
-
writer.write(etag)
|
1162
|
-
|
1163
|
-
|
1164
|
-
if Lex.next() is not Title:
|
1165
|
-
error('SYNOPSIS section expected')
|
1166
|
-
else:
|
1167
|
-
Title.translate()
|
1168
|
-
if Title.attributes['title'].upper() != 'SYNOPSIS':
|
1169
|
-
error('second section must be named SYNOPSIS')
|
1170
|
-
if Title.level != 1:
|
1171
|
-
error('SYNOPSIS section title must be at level 1')
|
1172
|
-
d = {}
|
1173
|
-
d.update(Title.attributes)
|
1174
|
-
AttributeList.consume(d)
|
1175
|
-
stag,etag = config.section2tags('sect-synopsis',d)
|
1176
|
-
writer.write(stag)
|
1177
|
-
Section.translate_body()
|
1178
|
-
writer.write(etag)
|
1540
|
+
writer.write(etag,trace='preamble close')
|
1541
|
+
elif self.doctype == 'manpage' and 'name' in config.sections:
|
1542
|
+
writer.write(config.subs_section('name',{}), trace='name')
|
1179
1543
|
else:
|
1544
|
+
self.process_author_names()
|
1180
1545
|
if config.header_footer:
|
1181
1546
|
hdr = config.subs_section('header',{})
|
1182
|
-
writer.write(hdr)
|
1547
|
+
writer.write(hdr,trace='header')
|
1183
1548
|
if Lex.next() is not Title:
|
1184
1549
|
Section.translate_body()
|
1185
1550
|
# Process remaining sections.
|
@@ -1191,7 +1556,7 @@ class Document:
|
|
1191
1556
|
# Substitute document parameters and write document footer.
|
1192
1557
|
if config.header_footer:
|
1193
1558
|
ftr = config.subs_section('footer',{})
|
1194
|
-
writer.write(ftr)
|
1559
|
+
writer.write(ftr,trace='footer')
|
1195
1560
|
def parse_author(self,s):
|
1196
1561
|
""" Return False if the author is malformed."""
|
1197
1562
|
attrs = self.attributes # Alias for readability.
|
@@ -1201,8 +1566,10 @@ class Document:
|
|
1201
1566
|
'(\s+(?P<name3>[^<>\s]+))?'
|
1202
1567
|
'(\s+<(?P<email>\S+)>)?$',s)
|
1203
1568
|
if not mo:
|
1204
|
-
|
1205
|
-
|
1569
|
+
# Names that don't match the formal specification.
|
1570
|
+
if s:
|
1571
|
+
attrs['firstname'] = s
|
1572
|
+
return
|
1206
1573
|
firstname = mo.group('name1')
|
1207
1574
|
if mo.group('name3'):
|
1208
1575
|
middlename = mo.group('name2')
|
@@ -1224,7 +1591,7 @@ class Document:
|
|
1224
1591
|
attrs['lastname'] = lastname
|
1225
1592
|
if email:
|
1226
1593
|
attrs['email'] = email
|
1227
|
-
return
|
1594
|
+
return
|
1228
1595
|
def process_author_names(self):
|
1229
1596
|
""" Calculate any missing author related attributes."""
|
1230
1597
|
attrs = self.attributes # Alias for readability.
|
@@ -1234,8 +1601,7 @@ class Document:
|
|
1234
1601
|
author = attrs.get('author')
|
1235
1602
|
initials = attrs.get('authorinitials')
|
1236
1603
|
if author and not (firstname or middlename or lastname):
|
1237
|
-
|
1238
|
-
return
|
1604
|
+
self.parse_author(author)
|
1239
1605
|
attrs['author'] = author.replace('_',' ')
|
1240
1606
|
self.process_author_names()
|
1241
1607
|
return
|
@@ -1244,8 +1610,9 @@ class Document:
|
|
1244
1610
|
author = author.strip()
|
1245
1611
|
author = re.sub(r'\s+',' ', author)
|
1246
1612
|
if not initials:
|
1247
|
-
initials = firstname[:1] +
|
1248
|
-
|
1613
|
+
initials = (char_decode(firstname)[:1] +
|
1614
|
+
char_decode(middlename)[:1] + char_decode(lastname)[:1])
|
1615
|
+
initials = char_encode(initials).upper()
|
1249
1616
|
names = [firstname,middlename,lastname,author,initials]
|
1250
1617
|
for i,v in enumerate(names):
|
1251
1618
|
v = config.subs_specialchars(v)
|
@@ -1268,75 +1635,82 @@ class Document:
|
|
1268
1635
|
|
1269
1636
|
class Header:
|
1270
1637
|
"""Static methods and attributes only."""
|
1638
|
+
REV_LINE_RE = r'^(\D*(?P<revnumber>.*?),)?(?P<revdate>.*?)(:\s*(?P<revremark>.*))?$'
|
1639
|
+
RCS_ID_RE = r'^\$Id: \S+ (?P<revnumber>\S+) (?P<revdate>\S+) \S+ (?P<author>\S+) (\S+ )?\$$'
|
1271
1640
|
def __init__(self):
|
1272
1641
|
raise AssertionError,'no class instances allowed'
|
1273
1642
|
@staticmethod
|
1274
|
-
def
|
1643
|
+
def parse():
|
1275
1644
|
assert Lex.next() is Title and Title.level == 0
|
1276
|
-
Title.translate()
|
1277
1645
|
attrs = document.attributes # Alias for readability.
|
1646
|
+
# Postpone title subs until backend conf files have been loaded.
|
1647
|
+
Title.translate(skipsubs=True)
|
1278
1648
|
attrs['doctitle'] = Title.attributes['title']
|
1649
|
+
document.consume_attributes_and_comments(noblanks=True)
|
1650
|
+
s = reader.read_next()
|
1651
|
+
mo = None
|
1652
|
+
if s:
|
1653
|
+
# Process first header line after the title that is not a comment
|
1654
|
+
# or an attribute entry.
|
1655
|
+
s = reader.read()
|
1656
|
+
mo = re.match(Header.RCS_ID_RE,s)
|
1657
|
+
if not mo:
|
1658
|
+
document.parse_author(s)
|
1659
|
+
document.consume_attributes_and_comments(noblanks=True)
|
1660
|
+
if reader.read_next():
|
1661
|
+
# Process second header line after the title that is not a
|
1662
|
+
# comment or an attribute entry.
|
1663
|
+
s = reader.read()
|
1664
|
+
s = subs_attrs(s)
|
1665
|
+
if s:
|
1666
|
+
mo = re.match(Header.RCS_ID_RE,s)
|
1667
|
+
if not mo:
|
1668
|
+
mo = re.match(Header.REV_LINE_RE,s)
|
1669
|
+
document.consume_attributes_and_comments(noblanks=True)
|
1670
|
+
s = attrs.get('revnumber')
|
1671
|
+
if s:
|
1672
|
+
mo = re.match(Header.RCS_ID_RE,s)
|
1673
|
+
if mo:
|
1674
|
+
revnumber = mo.group('revnumber')
|
1675
|
+
if revnumber:
|
1676
|
+
attrs['revnumber'] = revnumber.strip()
|
1677
|
+
author = mo.groupdict().get('author')
|
1678
|
+
if author and 'firstname' not in attrs:
|
1679
|
+
document.parse_author(author)
|
1680
|
+
revremark = mo.groupdict().get('revremark')
|
1681
|
+
if revremark is not None:
|
1682
|
+
revremark = [revremark]
|
1683
|
+
# Revision remarks can continue on following lines.
|
1684
|
+
while reader.read_next():
|
1685
|
+
if document.consume_attributes_and_comments(noblanks=True):
|
1686
|
+
break
|
1687
|
+
revremark.append(reader.read())
|
1688
|
+
revremark = Lex.subs(revremark,['normal'])
|
1689
|
+
revremark = '\n'.join(revremark).strip()
|
1690
|
+
attrs['revremark'] = revremark
|
1691
|
+
revdate = mo.group('revdate')
|
1692
|
+
if revdate:
|
1693
|
+
attrs['revdate'] = revdate.strip()
|
1694
|
+
elif revnumber or revremark:
|
1695
|
+
# Set revision date to ensure valid DocBook revision.
|
1696
|
+
attrs['revdate'] = attrs['docdate']
|
1697
|
+
document.process_author_names()
|
1279
1698
|
if document.doctype == 'manpage':
|
1280
1699
|
# manpage title formatted like mantitle(manvolnum).
|
1281
1700
|
mo = re.match(r'^(?P<mantitle>.*)\((?P<manvolnum>.*)\)$',
|
1282
1701
|
attrs['doctitle'])
|
1283
1702
|
if not mo:
|
1284
|
-
error('malformed manpage title')
|
1703
|
+
message.error('malformed manpage title')
|
1285
1704
|
else:
|
1286
1705
|
mantitle = mo.group('mantitle').strip()
|
1706
|
+
mantitle = subs_attrs(mantitle)
|
1707
|
+
if mantitle is None:
|
1708
|
+
message.error('undefined attribute in manpage title')
|
1287
1709
|
# mantitle is lowered only if in ALL CAPS
|
1288
1710
|
if mantitle == mantitle.upper():
|
1289
1711
|
mantitle = mantitle.lower()
|
1290
1712
|
attrs['mantitle'] = mantitle;
|
1291
1713
|
attrs['manvolnum'] = mo.group('manvolnum').strip()
|
1292
|
-
AttributeEntry.translate_all()
|
1293
|
-
s = reader.read_next()
|
1294
|
-
if s:
|
1295
|
-
s = reader.read()
|
1296
|
-
document.parse_author(s)
|
1297
|
-
AttributeEntry.translate_all()
|
1298
|
-
if reader.read_next():
|
1299
|
-
# Parse revision line.
|
1300
|
-
s = reader.read()
|
1301
|
-
s = subs_attrs(s)
|
1302
|
-
if s:
|
1303
|
-
# Match RCS/CVS/SVN $Id$ marker format.
|
1304
|
-
mo = re.match(r'^\$Id: \S+ (?P<revision>\S+)'
|
1305
|
-
' (?P<date>\S+) \S+ \S+ (\S+ )?\$$',s)
|
1306
|
-
if not mo:
|
1307
|
-
# Match AsciiDoc revision,date format.
|
1308
|
-
mo = re.match(r'^\D*(?P<revision>.*?),(?P<date>.+)$',s)
|
1309
|
-
if mo:
|
1310
|
-
revision = mo.group('revision').strip()
|
1311
|
-
date = mo.group('date').strip()
|
1312
|
-
else:
|
1313
|
-
revision = None
|
1314
|
-
date = s.strip()
|
1315
|
-
if revision:
|
1316
|
-
attrs['revision'] = config.subs_specialchars(revision)
|
1317
|
-
if date:
|
1318
|
-
attrs['date'] = config.subs_specialchars(date)
|
1319
|
-
AttributeEntry.translate_all()
|
1320
|
-
if document.doctype == 'manpage':
|
1321
|
-
# Translate mandatory NAME section.
|
1322
|
-
if Lex.next() is not Title:
|
1323
|
-
error('NAME section expected')
|
1324
|
-
else:
|
1325
|
-
Title.translate()
|
1326
|
-
if Title.attributes['title'].upper() != 'NAME':
|
1327
|
-
error('first section must be named NAME')
|
1328
|
-
if Title.level != 1:
|
1329
|
-
error('NAME section title must be at level 1')
|
1330
|
-
if not isinstance(Lex.next(),Paragraph):
|
1331
|
-
error('malformed NAME section body')
|
1332
|
-
lines = reader.read_until(r'^$')
|
1333
|
-
s = ' '.join(lines)
|
1334
|
-
mo = re.match(r'^(?P<manname>.*?)\s+-\s+(?P<manpurpose>.*)$',s)
|
1335
|
-
if not mo:
|
1336
|
-
error('malformed NAME section body')
|
1337
|
-
attrs['manname'] = mo.group('manname').strip()
|
1338
|
-
attrs['manpurpose'] = mo.group('manpurpose').strip()
|
1339
|
-
document.process_author_names()
|
1340
1714
|
|
1341
1715
|
class AttributeEntry:
|
1342
1716
|
"""Static methods and attributes only."""
|
@@ -1345,6 +1719,7 @@ class AttributeEntry:
|
|
1345
1719
|
name = None
|
1346
1720
|
name2 = None
|
1347
1721
|
value = None
|
1722
|
+
attributes = {} # Accumulates all the parsed attribute entries.
|
1348
1723
|
def __init__(self):
|
1349
1724
|
raise AssertionError,'no class instances allowed'
|
1350
1725
|
@staticmethod
|
@@ -1353,16 +1728,8 @@ class AttributeEntry:
|
|
1353
1728
|
if not AttributeEntry.pattern:
|
1354
1729
|
pat = document.attributes.get('attributeentry-pattern')
|
1355
1730
|
if not pat:
|
1356
|
-
error("[attributes] missing 'attributeentry-pattern' entry")
|
1731
|
+
message.error("[attributes] missing 'attributeentry-pattern' entry")
|
1357
1732
|
AttributeEntry.pattern = pat
|
1358
|
-
if not AttributeEntry.subs:
|
1359
|
-
subs = document.attributes.get('attributeentry-subs')
|
1360
|
-
if subs:
|
1361
|
-
subs = parse_options(subs,SUBS_OPTIONS,
|
1362
|
-
'illegal [%s] %s: %s' % ('attributes','attributeentry-subs',subs))
|
1363
|
-
else:
|
1364
|
-
subs = ('specialcharacters','attributes')
|
1365
|
-
AttributeEntry.subs = subs
|
1366
1733
|
line = reader.read_next()
|
1367
1734
|
if line:
|
1368
1735
|
# Attribute entry formatted like :<name>[.<name2>]:[ <value>]
|
@@ -1377,42 +1744,61 @@ class AttributeEntry:
|
|
1377
1744
|
@staticmethod
|
1378
1745
|
def translate():
|
1379
1746
|
assert Lex.next() is AttributeEntry
|
1380
|
-
attr = AttributeEntry
|
1381
|
-
reader.read()
|
1382
|
-
|
1383
|
-
|
1384
|
-
|
1385
|
-
|
1386
|
-
|
1747
|
+
attr = AttributeEntry # Alias for brevity.
|
1748
|
+
reader.read() # Discard attribute entry from reader.
|
1749
|
+
while attr.value.endswith(' +'):
|
1750
|
+
if not reader.read_next(): break
|
1751
|
+
attr.value = attr.value[:-1] + reader.read().strip()
|
1752
|
+
if attr.name2 is not None:
|
1753
|
+
# Configuration file attribute.
|
1754
|
+
if attr.name2 != '':
|
1755
|
+
# Section entry attribute.
|
1756
|
+
section = {}
|
1757
|
+
# Some sections can have name! syntax.
|
1758
|
+
if attr.name in ('attributes','miscellaneous') and attr.name2[-1] == '!':
|
1759
|
+
section[attr.name] = [attr.name2]
|
1760
|
+
else:
|
1761
|
+
section[attr.name] = ['%s=%s' % (attr.name2,attr.value)]
|
1762
|
+
config.load_sections(section)
|
1763
|
+
config.load_miscellaneous(config.conf_attrs)
|
1387
1764
|
else:
|
1388
|
-
|
1389
|
-
|
1390
|
-
|
1391
|
-
|
1765
|
+
# Markup template section attribute.
|
1766
|
+
if attr.name in config.sections:
|
1767
|
+
config.sections[attr.name] = [attr.value]
|
1768
|
+
else:
|
1769
|
+
message.warning('missing configuration section: %s' % attr.name)
|
1770
|
+
else:
|
1771
|
+
# Normal attribute.
|
1392
1772
|
if attr.name[-1] == '!':
|
1393
1773
|
# Names like name! undefine the attribute.
|
1394
1774
|
attr.name = attr.name[:-1]
|
1395
1775
|
attr.value = None
|
1396
1776
|
# Strip white space and illegal name chars.
|
1397
1777
|
attr.name = re.sub(r'(?u)[^\w\-_]', '', attr.name).lower()
|
1398
|
-
# Don't override command-line attributes.
|
1399
|
-
if attr.name in config.cmd_attrs
|
1778
|
+
# Don't override most command-line attributes.
|
1779
|
+
if attr.name in config.cmd_attrs \
|
1780
|
+
and attr.name not in ('trace','numbered'):
|
1400
1781
|
return
|
1401
|
-
# Update document
|
1402
|
-
if attr.
|
1403
|
-
|
1404
|
-
|
1782
|
+
# Update document attributes with attribute value.
|
1783
|
+
if attr.value is not None:
|
1784
|
+
mo = re.match(r'^pass:(?P<attrs>.*)\[(?P<value>.*)\]$', attr.value)
|
1785
|
+
if mo:
|
1786
|
+
# Inline passthrough syntax.
|
1787
|
+
attr.subs = mo.group('attrs')
|
1788
|
+
attr.value = mo.group('value') # Passthrough.
|
1789
|
+
else:
|
1790
|
+
# Default substitution.
|
1791
|
+
# DEPRECATED: attributeentry-subs
|
1792
|
+
attr.subs = document.attributes.get('attributeentry-subs',
|
1793
|
+
'specialcharacters,attributes')
|
1794
|
+
attr.subs = parse_options(attr.subs, SUBS_OPTIONS,
|
1795
|
+
'illegal substitution option')
|
1405
1796
|
attr.value = Lex.subs((attr.value,), attr.subs)
|
1406
1797
|
attr.value = writer.newline.join(attr.value)
|
1407
|
-
if attr.value is not None:
|
1408
1798
|
document.attributes[attr.name] = attr.value
|
1409
1799
|
elif attr.name in document.attributes:
|
1410
1800
|
del document.attributes[attr.name]
|
1411
|
-
|
1412
|
-
def translate_all():
|
1413
|
-
""" Process all contiguous attribute lines on reader."""
|
1414
|
-
while AttributeEntry.isnext():
|
1415
|
-
AttributeEntry.translate()
|
1801
|
+
attr.attributes[attr.name] = attr.value
|
1416
1802
|
|
1417
1803
|
class AttributeList:
|
1418
1804
|
"""Static methods and attributes only."""
|
@@ -1422,12 +1808,13 @@ class AttributeList:
|
|
1422
1808
|
def __init__(self):
|
1423
1809
|
raise AssertionError,'no class instances allowed'
|
1424
1810
|
@staticmethod
|
1811
|
+
def initialize():
|
1812
|
+
if not 'attributelist-pattern' in document.attributes:
|
1813
|
+
message.error("[attributes] missing 'attributelist-pattern' entry")
|
1814
|
+
AttributeList.pattern = document.attributes['attributelist-pattern']
|
1815
|
+
@staticmethod
|
1425
1816
|
def isnext():
|
1426
1817
|
result = False # Assume not next.
|
1427
|
-
if not AttributeList.pattern:
|
1428
|
-
if not 'attributelist-pattern' in document.attributes:
|
1429
|
-
error("[attributes] missing 'attributelist-pattern' entry")
|
1430
|
-
AttributeList.pattern = document.attributes['attributelist-pattern']
|
1431
1818
|
line = reader.read_next()
|
1432
1819
|
if line:
|
1433
1820
|
mo = re.match(AttributeList.pattern, line)
|
@@ -1439,15 +1826,28 @@ class AttributeList:
|
|
1439
1826
|
def translate():
|
1440
1827
|
assert Lex.next() is AttributeList
|
1441
1828
|
reader.read() # Discard attribute list from reader.
|
1829
|
+
attrs = {}
|
1442
1830
|
d = AttributeList.match.groupdict()
|
1443
1831
|
for k,v in d.items():
|
1444
1832
|
if v is not None:
|
1445
1833
|
if k == 'attrlist':
|
1446
1834
|
v = subs_attrs(v)
|
1447
1835
|
if v:
|
1448
|
-
parse_attributes(v,
|
1836
|
+
parse_attributes(v, attrs)
|
1449
1837
|
else:
|
1450
1838
|
AttributeList.attrs[k] = v
|
1839
|
+
AttributeList.subs(attrs)
|
1840
|
+
AttributeList.attrs.update(attrs)
|
1841
|
+
@staticmethod
|
1842
|
+
def subs(attrs):
|
1843
|
+
'''Substitute single quoted attribute values normally.'''
|
1844
|
+
reo = re.compile(r"^'.*'$")
|
1845
|
+
for k,v in attrs.items():
|
1846
|
+
if reo.match(str(v)):
|
1847
|
+
attrs[k] = Lex.subs_1(v[1:-1],SUBS_NORMAL)
|
1848
|
+
@staticmethod
|
1849
|
+
def style():
|
1850
|
+
return AttributeList.attrs.get('style') or AttributeList.attrs.get('1')
|
1451
1851
|
@staticmethod
|
1452
1852
|
def consume(d):
|
1453
1853
|
"""Add attribute list to the dictionary 'd' and reset the
|
@@ -1487,7 +1887,7 @@ class BlockTitle:
|
|
1487
1887
|
s = Lex.subs((BlockTitle.title,), Title.subs)
|
1488
1888
|
s = writer.newline.join(s)
|
1489
1889
|
if not s:
|
1490
|
-
warning('blank block title')
|
1890
|
+
message.warning('blank block title')
|
1491
1891
|
BlockTitle.title = s
|
1492
1892
|
@staticmethod
|
1493
1893
|
def consume(d):
|
@@ -1512,22 +1912,28 @@ class Title:
|
|
1512
1912
|
def __init__(self):
|
1513
1913
|
raise AssertionError,'no class instances allowed'
|
1514
1914
|
@staticmethod
|
1515
|
-
def translate():
|
1915
|
+
def translate(skipsubs=False):
|
1516
1916
|
"""Parse the Title.attributes and Title.level from the reader. The
|
1517
1917
|
real work has already been done by parse()."""
|
1518
|
-
assert Lex.next()
|
1918
|
+
assert Lex.next() in (Title,FloatingTitle)
|
1519
1919
|
# Discard title from reader.
|
1520
1920
|
for i in range(Title.linecount):
|
1521
1921
|
reader.read()
|
1522
1922
|
Title.setsectname()
|
1523
|
-
|
1923
|
+
if not skipsubs:
|
1924
|
+
Title.attributes['title'] = Title.dosubs(Title.attributes['title'])
|
1925
|
+
@staticmethod
|
1926
|
+
def dosubs(title):
|
1927
|
+
"""
|
1928
|
+
Perform title substitutions.
|
1929
|
+
"""
|
1524
1930
|
if not Title.subs:
|
1525
1931
|
Title.subs = config.subsnormal
|
1526
|
-
|
1527
|
-
|
1528
|
-
if not
|
1529
|
-
warning('blank section title')
|
1530
|
-
|
1932
|
+
title = Lex.subs((title,), Title.subs)
|
1933
|
+
title = writer.newline.join(title)
|
1934
|
+
if not title:
|
1935
|
+
message.warning('blank section title')
|
1936
|
+
return title
|
1531
1937
|
@staticmethod
|
1532
1938
|
def isnext():
|
1533
1939
|
lines = reader.read_ahead(2)
|
@@ -1554,13 +1960,16 @@ class Title:
|
|
1554
1960
|
if not Title.pattern: return False # Single-line titles only.
|
1555
1961
|
if len(lines) < 2: return False
|
1556
1962
|
title,ul = lines[:2]
|
1557
|
-
title_len =
|
1963
|
+
title_len = column_width(title)
|
1558
1964
|
ul_len = char_len(ul)
|
1559
1965
|
if ul_len < 2: return False
|
1560
1966
|
# Fast elimination check.
|
1561
1967
|
if ul[:2] not in Title.underlines: return False
|
1562
1968
|
# Length of underline must be within +-3 of title.
|
1563
|
-
if not (ul_len-3 < title_len < ul_len+3)
|
1969
|
+
if not ((ul_len-3 < title_len < ul_len+3)
|
1970
|
+
# Next test for backward compatibility.
|
1971
|
+
or (ul_len-3 < char_len(title) < ul_len+3)):
|
1972
|
+
return False
|
1564
1973
|
# Check for valid repetition of underline character pairs.
|
1565
1974
|
s = ul[:2]*((ul_len+1)/2)
|
1566
1975
|
if ul != s[:ul_len]: return False
|
@@ -1576,10 +1985,15 @@ class Title:
|
|
1576
1985
|
# Check for expected pattern match groups.
|
1577
1986
|
if result:
|
1578
1987
|
if not 'title' in Title.attributes:
|
1579
|
-
warning('[titles] entry has no <title> group')
|
1988
|
+
message.warning('[titles] entry has no <title> group')
|
1580
1989
|
Title.attributes['title'] = lines[0]
|
1581
1990
|
for k,v in Title.attributes.items():
|
1582
1991
|
if v is None: del Title.attributes[k]
|
1992
|
+
try:
|
1993
|
+
Title.level += int(document.attributes.get('leveloffset','0'))
|
1994
|
+
except:
|
1995
|
+
pass
|
1996
|
+
Title.attributes['level'] = str(Title.level)
|
1583
1997
|
return result
|
1584
1998
|
@staticmethod
|
1585
1999
|
def load(entries):
|
@@ -1588,7 +2002,7 @@ class Title:
|
|
1588
2002
|
errmsg = 'malformed [titles] underlines entry'
|
1589
2003
|
try:
|
1590
2004
|
underlines = parse_list(entries['underlines'])
|
1591
|
-
except:
|
2005
|
+
except Exception:
|
1592
2006
|
raise EAsciiDoc,errmsg
|
1593
2007
|
if len(underlines) != len(Title.underlines):
|
1594
2008
|
raise EAsciiDoc,errmsg
|
@@ -1603,13 +2017,13 @@ class Title:
|
|
1603
2017
|
Title.dump_dict['subs'] = entries['subs']
|
1604
2018
|
if 'sectiontitle' in entries:
|
1605
2019
|
pat = entries['sectiontitle']
|
1606
|
-
if not pat or not
|
2020
|
+
if not pat or not is_re(pat):
|
1607
2021
|
raise EAsciiDoc,'malformed [titles] sectiontitle entry'
|
1608
2022
|
Title.pattern = pat
|
1609
2023
|
Title.dump_dict['sectiontitle'] = pat
|
1610
2024
|
if 'blocktitle' in entries:
|
1611
2025
|
pat = entries['blocktitle']
|
1612
|
-
if not pat or not
|
2026
|
+
if not pat or not is_re(pat):
|
1613
2027
|
raise EAsciiDoc,'malformed [titles] blocktitle entry'
|
1614
2028
|
BlockTitle.pattern = pat
|
1615
2029
|
Title.dump_dict['blocktitle'] = pat
|
@@ -1617,7 +2031,7 @@ class Title:
|
|
1617
2031
|
for k in ('sect0','sect1','sect2','sect3','sect4'):
|
1618
2032
|
if k in entries:
|
1619
2033
|
pat = entries[k]
|
1620
|
-
if not pat or not
|
2034
|
+
if not pat or not is_re(pat):
|
1621
2035
|
raise EAsciiDoc,'malformed [titles] %s entry' % k
|
1622
2036
|
Title.dump_dict[k] = pat
|
1623
2037
|
# TODO: Check we have either a Title.pattern or at least one
|
@@ -1628,20 +2042,30 @@ class Title:
|
|
1628
2042
|
dump_section('titles',Title.dump_dict)
|
1629
2043
|
@staticmethod
|
1630
2044
|
def setsectname():
|
1631
|
-
"""
|
1632
|
-
|
1633
|
-
|
1634
|
-
|
1635
|
-
|
1636
|
-
|
1637
|
-
|
1638
|
-
|
1639
|
-
|
1640
|
-
|
1641
|
-
|
1642
|
-
break
|
2045
|
+
"""
|
2046
|
+
Set Title section name:
|
2047
|
+
If the first positional or 'template' attribute is set use it,
|
2048
|
+
next search for section title in [specialsections],
|
2049
|
+
if not found use default 'sect<level>' name.
|
2050
|
+
"""
|
2051
|
+
sectname = AttributeList.attrs.get('1')
|
2052
|
+
if sectname and sectname != 'float':
|
2053
|
+
Title.sectname = sectname
|
2054
|
+
elif 'template' in AttributeList.attrs:
|
2055
|
+
Title.sectname = AttributeList.attrs['template']
|
1643
2056
|
else:
|
1644
|
-
|
2057
|
+
for pat,sect in config.specialsections.items():
|
2058
|
+
mo = re.match(pat,Title.attributes['title'])
|
2059
|
+
if mo:
|
2060
|
+
title = mo.groupdict().get('title')
|
2061
|
+
if title is not None:
|
2062
|
+
Title.attributes['title'] = title.strip()
|
2063
|
+
else:
|
2064
|
+
Title.attributes['title'] = mo.group().strip()
|
2065
|
+
Title.sectname = sect
|
2066
|
+
break
|
2067
|
+
else:
|
2068
|
+
Title.sectname = 'sect%d' % Title.level
|
1645
2069
|
@staticmethod
|
1646
2070
|
def getnumber(level):
|
1647
2071
|
"""Return next section number at section 'level' formatted like
|
@@ -1662,6 +2086,25 @@ class Title:
|
|
1662
2086
|
return number
|
1663
2087
|
|
1664
2088
|
|
2089
|
+
class FloatingTitle(Title):
|
2090
|
+
'''Floated titles are translated differently.'''
|
2091
|
+
@staticmethod
|
2092
|
+
def isnext():
|
2093
|
+
return Title.isnext() and AttributeList.style() == 'float'
|
2094
|
+
@staticmethod
|
2095
|
+
def translate():
|
2096
|
+
assert Lex.next() is FloatingTitle
|
2097
|
+
Title.translate()
|
2098
|
+
Section.set_id()
|
2099
|
+
AttributeList.consume(Title.attributes)
|
2100
|
+
template = 'floatingtitle'
|
2101
|
+
if template in config.sections:
|
2102
|
+
stag,etag = config.section2tags(template,Title.attributes)
|
2103
|
+
writer.write(stag,trace='floating title')
|
2104
|
+
else:
|
2105
|
+
message.warning('missing template section: [%s]' % template)
|
2106
|
+
|
2107
|
+
|
1665
2108
|
class Section:
|
1666
2109
|
"""Static methods and attributes only."""
|
1667
2110
|
endtags = [] # Stack of currently open section (level,endtag) tuples.
|
@@ -1676,7 +2119,7 @@ class Section:
|
|
1676
2119
|
def setlevel(level):
|
1677
2120
|
"""Set document level and write open section close tags up to level."""
|
1678
2121
|
while Section.endtags and Section.endtags[-1][0] >= level:
|
1679
|
-
writer.write(Section.endtags.pop()[1])
|
2122
|
+
writer.write(Section.endtags.pop()[1],trace='section close')
|
1680
2123
|
document.level = level
|
1681
2124
|
@staticmethod
|
1682
2125
|
def gen_id(title):
|
@@ -1688,7 +2131,10 @@ class Section:
|
|
1688
2131
|
NCNameStartChar ::= Letter | '_'
|
1689
2132
|
NameChar ::= Letter | Digit | '.' | '-' | '_' | ':'
|
1690
2133
|
"""
|
1691
|
-
|
2134
|
+
# Replace non-alpha numeric characters in title with underscores and
|
2135
|
+
# convert to lower case.
|
2136
|
+
base_ident = char_encode(re.sub(r'(?u)\W+', '_',
|
2137
|
+
char_decode(title)).strip('_').lower())
|
1692
2138
|
# Prefix the ID name with idprefix attribute or underscore if not
|
1693
2139
|
# defined. Prefix ensures the ID does not clash with existing IDs.
|
1694
2140
|
idprefix = document.attributes.get('idprefix','_')
|
@@ -1706,47 +2152,54 @@ class Section:
|
|
1706
2152
|
ident = base_ident
|
1707
2153
|
i += 1
|
1708
2154
|
@staticmethod
|
2155
|
+
def set_id():
|
2156
|
+
if not document.attributes.get('sectids') is None \
|
2157
|
+
and 'id' not in AttributeList.attrs:
|
2158
|
+
# Generate ids for sections.
|
2159
|
+
AttributeList.attrs['id'] = Section.gen_id(Title.attributes['title'])
|
2160
|
+
@staticmethod
|
1709
2161
|
def translate():
|
1710
2162
|
assert Lex.next() is Title
|
1711
2163
|
prev_sectname = Title.sectname
|
1712
2164
|
Title.translate()
|
1713
2165
|
if Title.level == 0 and document.doctype != 'book':
|
1714
|
-
error('only book doctypes can contain level 0 sections')
|
2166
|
+
message.error('only book doctypes can contain level 0 sections')
|
1715
2167
|
if Title.level > document.level \
|
1716
|
-
and
|
1717
|
-
and prev_sectname in ('
|
1718
|
-
'
|
1719
|
-
error('%s section cannot contain sub-sections' % prev_sectname)
|
2168
|
+
and 'basebackend-docbook' in document.attributes \
|
2169
|
+
and prev_sectname in ('colophon','abstract', \
|
2170
|
+
'dedication','glossary','bibliography'):
|
2171
|
+
message.error('%s section cannot contain sub-sections' % prev_sectname)
|
1720
2172
|
if Title.level > document.level+1:
|
1721
2173
|
# Sub-sections of multi-part book level zero Preface and Appendices
|
1722
2174
|
# are meant to be out of sequence.
|
1723
2175
|
if document.doctype == 'book' \
|
1724
2176
|
and document.level == 0 \
|
1725
2177
|
and Title.level == 2 \
|
1726
|
-
and prev_sectname in ('
|
2178
|
+
and prev_sectname in ('preface','appendix'):
|
1727
2179
|
pass
|
1728
2180
|
else:
|
1729
|
-
warning('section title out of sequence: '
|
2181
|
+
message.warning('section title out of sequence: '
|
1730
2182
|
'expected level %d, got level %d'
|
1731
2183
|
% (document.level+1, Title.level))
|
1732
|
-
|
1733
|
-
and 'id' not in AttributeList.attrs:
|
1734
|
-
# Generate ids for sections.
|
1735
|
-
AttributeList.attrs['id'] = Section.gen_id(Title.attributes['title'])
|
2184
|
+
Section.set_id()
|
1736
2185
|
Section.setlevel(Title.level)
|
1737
|
-
|
2186
|
+
if 'numbered' in document.attributes:
|
2187
|
+
Title.attributes['sectnum'] = Title.getnumber(document.level)
|
2188
|
+
else:
|
2189
|
+
Title.attributes['sectnum'] = ''
|
1738
2190
|
AttributeList.consume(Title.attributes)
|
1739
2191
|
stag,etag = config.section2tags(Title.sectname,Title.attributes)
|
1740
2192
|
Section.savetag(Title.level,etag)
|
1741
|
-
writer.write(stag
|
2193
|
+
writer.write(stag,trace='section open: level %d: %s' %
|
2194
|
+
(Title.level, Title.attributes['title']))
|
1742
2195
|
Section.translate_body()
|
1743
2196
|
@staticmethod
|
1744
2197
|
def translate_body(terminator=Title):
|
1745
2198
|
isempty = True
|
1746
2199
|
next = Lex.next()
|
1747
2200
|
while next and next is not terminator:
|
1748
|
-
if
|
1749
|
-
error('title not permitted in
|
2201
|
+
if isinstance(terminator,DelimitedBlock) and next is Title:
|
2202
|
+
message.error('section title not permitted in delimited block')
|
1750
2203
|
next.translate()
|
1751
2204
|
next = Lex.next()
|
1752
2205
|
isempty = False
|
@@ -1755,8 +2208,8 @@ class Section:
|
|
1755
2208
|
isempty = False
|
1756
2209
|
# Report empty sections if invalid markup will result.
|
1757
2210
|
if isempty:
|
1758
|
-
if document.backend == 'docbook' and Title.sectname != '
|
1759
|
-
error('empty section is not valid')
|
2211
|
+
if document.backend == 'docbook' and Title.sectname != 'index':
|
2212
|
+
message.error('empty section is not valid')
|
1760
2213
|
|
1761
2214
|
class AbstractBlock:
|
1762
2215
|
def __init__(self):
|
@@ -1767,6 +2220,7 @@ class AbstractBlock:
|
|
1767
2220
|
self.name=None # Configuration file section name.
|
1768
2221
|
# Configuration parameters.
|
1769
2222
|
self.delimiter=None # Regular expression matching block delimiter.
|
2223
|
+
self.delimiter_reo=None # Compiled delimiter.
|
1770
2224
|
self.template=None # template section entry.
|
1771
2225
|
self.options=() # options entry list.
|
1772
2226
|
self.presubs=None # presubs/subs entry list.
|
@@ -1778,7 +2232,7 @@ class AbstractBlock:
|
|
1778
2232
|
# Before a block is processed it's attributes (from it's
|
1779
2233
|
# attributes list) are merged with the block configuration parameters
|
1780
2234
|
# (by self.merge_attributes()) resulting in the template substitution
|
1781
|
-
# dictionary (self.attributes) and the block's
|
2235
|
+
# dictionary (self.attributes) and the block's processing parameters
|
1782
2236
|
# (self.parameters).
|
1783
2237
|
self.attributes={}
|
1784
2238
|
# The names of block parameters.
|
@@ -1787,14 +2241,14 @@ class AbstractBlock:
|
|
1787
2241
|
# Leading delimiter match object.
|
1788
2242
|
self.mo=None
|
1789
2243
|
def short_name(self):
|
1790
|
-
""" Return the text following the last dash in the section
|
2244
|
+
""" Return the text following the last dash in the section name."""
|
1791
2245
|
i = self.name.rfind('-')
|
1792
2246
|
if i == -1:
|
1793
2247
|
return self.name
|
1794
2248
|
else:
|
1795
2249
|
return self.name[i+1:]
|
1796
2250
|
def error(self, msg, cursor=None, halt=False):
|
1797
|
-
error('[%s] %s' % (self.name,msg), cursor, halt)
|
2251
|
+
message.error('[%s] %s' % (self.name,msg), cursor, halt)
|
1798
2252
|
def is_conf_entry(self,param):
|
1799
2253
|
"""Return True if param matches an allowed configuration file entry
|
1800
2254
|
name."""
|
@@ -1831,6 +2285,8 @@ class AbstractBlock:
|
|
1831
2285
|
elif k == 'options':
|
1832
2286
|
if isinstance(v,str):
|
1833
2287
|
v = parse_options(v, (), msg % (k,v))
|
2288
|
+
# Merge with existing options.
|
2289
|
+
v = tuple(set(dst.options).union(set(v)))
|
1834
2290
|
copy(dst,k,v)
|
1835
2291
|
elif k in ('subs','presubs','postsubs'):
|
1836
2292
|
# Subs is an alias for presubs.
|
@@ -1839,7 +2295,7 @@ class AbstractBlock:
|
|
1839
2295
|
v = parse_options(v, SUBS_OPTIONS, msg % (k,v))
|
1840
2296
|
copy(dst,k,v)
|
1841
2297
|
elif k == 'delimiter':
|
1842
|
-
if v and
|
2298
|
+
if v and is_re(v):
|
1843
2299
|
copy(dst,k,v)
|
1844
2300
|
else:
|
1845
2301
|
raise EAsciiDoc, msg % (k,v)
|
@@ -1924,14 +2380,16 @@ class AbstractBlock:
|
|
1924
2380
|
raise EAsciiDoc, 'illegal style name: %s' % self.style
|
1925
2381
|
if not self.style in self.styles:
|
1926
2382
|
if not isinstance(self,List): # Lists don't have templates.
|
1927
|
-
warning('[%s] \'%s\' style not in %s' % (
|
2383
|
+
message.warning('[%s] \'%s\' style not in %s' % (
|
1928
2384
|
self.name,self.style,self.styles.keys()))
|
1929
2385
|
# Check all styles for missing templates.
|
1930
2386
|
all_styles_have_template = True
|
1931
2387
|
for k,v in self.styles.items():
|
1932
2388
|
t = v.get('template')
|
1933
2389
|
if t and not t in config.sections:
|
1934
|
-
|
2390
|
+
# Defer check if template name contains attributes.
|
2391
|
+
if not re.search(r'{.+}',t):
|
2392
|
+
message.warning('missing template section: [%s]' % t)
|
1935
2393
|
if not t:
|
1936
2394
|
all_styles_have_template = False
|
1937
2395
|
# Check we have a valid template entry or alternatively that all the
|
@@ -1939,16 +2397,22 @@ class AbstractBlock:
|
|
1939
2397
|
if self.is_conf_entry('template') and not 'skip' in self.options:
|
1940
2398
|
if self.template:
|
1941
2399
|
if not self.template in config.sections:
|
1942
|
-
|
2400
|
+
# Defer check if template name contains attributes.
|
2401
|
+
if not re.search(r'{.+}',self.template):
|
2402
|
+
message.warning('missing template section: [%s]'
|
2403
|
+
% self.template)
|
1943
2404
|
elif not all_styles_have_template:
|
1944
2405
|
if not isinstance(self,List): # Lists don't have templates.
|
1945
|
-
warning('[%s]
|
2406
|
+
message.warning('missing styles templates: [%s]' % self.name)
|
1946
2407
|
def isnext(self):
|
1947
2408
|
"""Check if this block is next in document reader."""
|
1948
2409
|
result = False
|
1949
2410
|
reader.skip_blank_lines()
|
1950
2411
|
if reader.read_next():
|
1951
|
-
|
2412
|
+
if not self.delimiter_reo:
|
2413
|
+
# Cache compiled delimiter optimization.
|
2414
|
+
self.delimiter_reo = re.compile(self.delimiter)
|
2415
|
+
mo = self.delimiter_reo.match(reader.read_next())
|
1952
2416
|
if mo:
|
1953
2417
|
self.mo = mo
|
1954
2418
|
result = True
|
@@ -1967,7 +2431,7 @@ class AbstractBlock:
|
|
1967
2431
|
|
1968
2432
|
1. Copy the default parameters (self.*) to self.parameters.
|
1969
2433
|
self.parameters are used internally to render the current block.
|
1970
|
-
Optional params array of
|
2434
|
+
Optional params array of additional parameters.
|
1971
2435
|
|
1972
2436
|
2. Copy attrs to self.attributes. self.attributes are used for template
|
1973
2437
|
and tag substitution in the current block.
|
@@ -1988,7 +2452,7 @@ class AbstractBlock:
|
|
1988
2452
|
def check_array_parameter(param):
|
1989
2453
|
# Check the parameter is a sequence type.
|
1990
2454
|
if not is_array(self.parameters[param]):
|
1991
|
-
error('malformed presubs attribute: %s' %
|
2455
|
+
message.error('malformed presubs attribute: %s' %
|
1992
2456
|
self.parameters[param])
|
1993
2457
|
# Revert to default value.
|
1994
2458
|
self.parameters[param] = getattr(self,param)
|
@@ -2007,14 +2471,21 @@ class AbstractBlock:
|
|
2007
2471
|
# Load the selected style attributes.
|
2008
2472
|
posattrs = self.posattrs
|
2009
2473
|
if posattrs and posattrs[0] == 'style':
|
2474
|
+
# Positional attribute style has highest precedence.
|
2010
2475
|
style = self.attributes.get('1')
|
2011
2476
|
else:
|
2012
2477
|
style = None
|
2013
2478
|
if not style:
|
2479
|
+
# Use explicit style attribute, fall back to default style.
|
2014
2480
|
style = self.attributes.get('style',self.style)
|
2015
2481
|
if style:
|
2016
2482
|
if not is_name(style):
|
2017
|
-
|
2483
|
+
message.error('illegal style name: %s' % style)
|
2484
|
+
style = self.style
|
2485
|
+
# Lists have implicit styles and do their own style checks.
|
2486
|
+
elif style not in self.styles and not isinstance(self,List):
|
2487
|
+
message.warning('missing style: [%s]: %s' % (self.name,style))
|
2488
|
+
style = self.style
|
2018
2489
|
if style in self.styles:
|
2019
2490
|
self.attributes['style'] = style
|
2020
2491
|
for k,v in self.styles[style].items():
|
@@ -2043,7 +2514,7 @@ class AbstractBlocks:
|
|
2043
2514
|
self.current=None
|
2044
2515
|
self.blocks = [] # List of Block objects.
|
2045
2516
|
self.default = None # Default Block.
|
2046
|
-
self.
|
2517
|
+
self.delimiters = None # Combined delimiters regular expression.
|
2047
2518
|
def load(self,sections):
|
2048
2519
|
"""Load block definition from 'sections' dictionary."""
|
2049
2520
|
for k in sections.keys():
|
@@ -2078,12 +2549,12 @@ class AbstractBlocks:
|
|
2078
2549
|
b.validate()
|
2079
2550
|
if b.delimiter:
|
2080
2551
|
delimiters.append(b.delimiter)
|
2081
|
-
self.
|
2552
|
+
self.delimiters = re_join(delimiters)
|
2082
2553
|
|
2083
2554
|
class Paragraph(AbstractBlock):
|
2084
2555
|
def __init__(self):
|
2085
2556
|
AbstractBlock.__init__(self)
|
2086
|
-
self.text=None
|
2557
|
+
self.text=None # Text in first line of paragraph.
|
2087
2558
|
def load(self,name,entries):
|
2088
2559
|
AbstractBlock.load(self,name,entries)
|
2089
2560
|
def dump(self):
|
@@ -2103,23 +2574,22 @@ class Paragraph(AbstractBlock):
|
|
2103
2574
|
AttributeList.consume(attrs)
|
2104
2575
|
self.merge_attributes(attrs)
|
2105
2576
|
reader.read() # Discard (already parsed item first line).
|
2106
|
-
body = reader.read_until(
|
2107
|
-
+ r'|' + tables.delimiter
|
2108
|
-
+ r'|' + tables_OLD.delimiter
|
2109
|
-
+ r'|' + AttributeList.pattern
|
2110
|
-
)
|
2577
|
+
body = reader.read_until(paragraphs.terminators)
|
2111
2578
|
body = [self.text] + list(body)
|
2112
2579
|
presubs = self.parameters.presubs
|
2113
2580
|
postsubs = self.parameters.postsubs
|
2114
|
-
|
2581
|
+
if document.attributes.get('plaintext') is None:
|
2582
|
+
body = Lex.set_margin(body) # Move body to left margin.
|
2115
2583
|
body = Lex.subs(body,presubs)
|
2584
|
+
template = self.parameters.template
|
2585
|
+
template = subs_attrs(template,attrs)
|
2586
|
+
stag = config.section2tags(template, self.attributes,skipend=True)[0]
|
2116
2587
|
if self.parameters.filter:
|
2117
2588
|
body = filter_lines(self.parameters.filter,body,self.attributes)
|
2118
2589
|
body = Lex.subs(body,postsubs)
|
2119
|
-
|
2120
|
-
stag,etag = config.section2tags(template, self.attributes)
|
2590
|
+
etag = config.section2tags(template, self.attributes,skipstart=True)[1]
|
2121
2591
|
# Write start tag, content, end tag.
|
2122
|
-
writer.write(dovetail_tags(stag,body,etag))
|
2592
|
+
writer.write(dovetail_tags(stag,body,etag),trace='paragraph')
|
2123
2593
|
|
2124
2594
|
class Paragraphs(AbstractBlocks):
|
2125
2595
|
"""List of paragraph definitions."""
|
@@ -2127,6 +2597,15 @@ class Paragraphs(AbstractBlocks):
|
|
2127
2597
|
PREFIX = 'paradef-'
|
2128
2598
|
def __init__(self):
|
2129
2599
|
AbstractBlocks.__init__(self)
|
2600
|
+
self.terminators=None # List of compiled re's.
|
2601
|
+
def initialize(self):
|
2602
|
+
self.terminators = [
|
2603
|
+
re.compile(r'^\+$|^$'),
|
2604
|
+
re.compile(AttributeList.pattern),
|
2605
|
+
re.compile(blocks.delimiters),
|
2606
|
+
re.compile(tables.delimiters),
|
2607
|
+
re.compile(tables_OLD.delimiters),
|
2608
|
+
]
|
2130
2609
|
def load(self,sections):
|
2131
2610
|
AbstractBlocks.load(self,sections)
|
2132
2611
|
def validate(self):
|
@@ -2139,9 +2618,11 @@ class Paragraphs(AbstractBlocks):
|
|
2139
2618
|
self.blocks.remove(b)
|
2140
2619
|
break
|
2141
2620
|
else:
|
2142
|
-
raise EAsciiDoc,'missing [paradef-default]
|
2621
|
+
raise EAsciiDoc,'missing section: [paradef-default]'
|
2143
2622
|
|
2144
2623
|
class List(AbstractBlock):
|
2624
|
+
NUMBER_STYLES= ('arabic','loweralpha','upperalpha','lowerroman',
|
2625
|
+
'upperroman')
|
2145
2626
|
def __init__(self):
|
2146
2627
|
AbstractBlock.__init__(self)
|
2147
2628
|
self.CONF_ENTRIES += ('type','tags')
|
@@ -2155,8 +2636,8 @@ class List(AbstractBlock):
|
|
2155
2636
|
self.text=None # Text in first line of list item.
|
2156
2637
|
self.index=None # Matched delimiter 'index' group (numbered lists).
|
2157
2638
|
self.type=None # List type ('numbered','bulleted','labeled').
|
2158
|
-
self.
|
2159
|
-
self.number_style=None
|
2639
|
+
self.ordinal=None # Current list item ordinal number (1..)
|
2640
|
+
self.number_style=None # Current numbered list style ('arabic'..)
|
2160
2641
|
def load(self,name,entries):
|
2161
2642
|
AbstractBlock.load(self,name,entries)
|
2162
2643
|
def dump(self):
|
@@ -2183,119 +2664,77 @@ class List(AbstractBlock):
|
|
2183
2664
|
assert self.type == 'labeled'
|
2184
2665
|
entrytag = subs_tag(self.tag.entry, self.attributes)
|
2185
2666
|
labeltag = subs_tag(self.tag.label, self.attributes)
|
2186
|
-
writer.write(entrytag[0])
|
2187
|
-
writer.write(labeltag[0])
|
2667
|
+
writer.write(entrytag[0],trace='list entry open')
|
2668
|
+
writer.write(labeltag[0],trace='list label open')
|
2188
2669
|
# Write labels.
|
2189
2670
|
while Lex.next() is self:
|
2190
2671
|
reader.read() # Discard (already parsed item first line).
|
2191
2672
|
writer.write_tag(self.tag.term, [self.label],
|
2192
|
-
self.presubs, self.attributes)
|
2673
|
+
self.presubs, self.attributes,trace='list term')
|
2193
2674
|
if self.text: break
|
2194
|
-
writer.write(labeltag[1])
|
2675
|
+
writer.write(labeltag[1],trace='list label close')
|
2195
2676
|
# Write item text.
|
2196
2677
|
self.translate_item()
|
2197
|
-
writer.write(entrytag[1])
|
2198
|
-
def iscontinued(self):
|
2199
|
-
if reader.read_next() == '+':
|
2200
|
-
reader.read() # Discard.
|
2201
|
-
return True
|
2202
|
-
else:
|
2203
|
-
return False
|
2678
|
+
writer.write(entrytag[1],trace='list entry close')
|
2204
2679
|
def translate_item(self):
|
2205
|
-
if lists.listblock:
|
2206
|
-
self.translate_item_2()
|
2207
|
-
else:
|
2208
|
-
self.translate_item_1()
|
2209
|
-
def translate_item_1(self):
|
2210
|
-
"""Translation for '+' style list continuation."""
|
2211
2680
|
if self.type == 'callout':
|
2212
|
-
self.attributes['coids'] = calloutmap.calloutids(self.
|
2681
|
+
self.attributes['coids'] = calloutmap.calloutids(self.ordinal)
|
2213
2682
|
itemtag = subs_tag(self.tag.item, self.attributes)
|
2214
|
-
writer.write(itemtag[0])
|
2215
|
-
|
2216
|
-
|
2217
|
-
|
2218
|
-
|
2219
|
-
|
2220
|
-
|
2221
|
-
|
2222
|
-
continued = True
|
2223
|
-
else:
|
2224
|
-
# Write ItemText.
|
2225
|
-
text = reader.read_until(
|
2226
|
-
lists.delimiter + r'|^\+$|^$|' + blocks.delimiter
|
2227
|
-
+ r'|' + tables.delimiter
|
2228
|
-
+ r'|' + tables_OLD.delimiter
|
2229
|
-
+ r'|' + AttributeList.pattern
|
2230
|
-
)
|
2231
|
-
if self.text is not None:
|
2232
|
-
text = [self.text] + list(text)
|
2233
|
-
if text:
|
2234
|
-
writer.write_tag(self.tag.text, text, self.presubs, self.attributes)
|
2235
|
-
continued = self.iscontinued()
|
2683
|
+
writer.write(itemtag[0],trace='list item open')
|
2684
|
+
# Write ItemText.
|
2685
|
+
text = reader.read_until(lists.terminators)
|
2686
|
+
if self.text:
|
2687
|
+
text = [self.text] + list(text)
|
2688
|
+
if text:
|
2689
|
+
writer.write_tag(self.tag.text, text, self.presubs, self.attributes,trace='list text')
|
2690
|
+
# Process explicit and implicit list item continuations.
|
2236
2691
|
while True:
|
2237
|
-
|
2238
|
-
|
2692
|
+
continuation = reader.read_next() == '+'
|
2693
|
+
if continuation: reader.read() # Discard continuation line.
|
2694
|
+
while Lex.next() in (BlockTitle,AttributeList):
|
2695
|
+
# Consume continued element title and attributes.
|
2239
2696
|
Lex.next().translate()
|
2240
|
-
|
2241
|
-
|
2242
|
-
break
|
2243
|
-
elif isinstance(next,List):
|
2244
|
-
next.translate()
|
2245
|
-
elif isinstance(next,Paragraph) and 'listelement' in next.options:
|
2246
|
-
next.translate()
|
2247
|
-
elif continued:
|
2248
|
-
if next is Title or next is BlockTitle:
|
2249
|
-
error('title not allowed in list item continuation')
|
2250
|
-
next.translate()
|
2251
|
-
else:
|
2697
|
+
if not continuation and BlockTitle.title:
|
2698
|
+
# Titled elements terminate the list.
|
2252
2699
|
break
|
2253
|
-
continued = self.iscontinued()
|
2254
|
-
writer.write(itemtag[1])
|
2255
|
-
def translate_item_2(self):
|
2256
|
-
"""Translation for List block style lists."""
|
2257
|
-
if self.type == 'callout':
|
2258
|
-
self.attributes['coids'] = calloutmap.calloutids(self.listindex)
|
2259
|
-
itemtag = subs_tag(self.tag.item, self.attributes)
|
2260
|
-
writer.write(itemtag[0])
|
2261
|
-
if self.text or reader.read_next():
|
2262
|
-
# Write ItemText.
|
2263
|
-
text = reader.read_until(
|
2264
|
-
lists.delimiter + r'|^$|' + blocks.delimiter
|
2265
|
-
+ r'|' + tables.delimiter
|
2266
|
-
+ r'|' + tables_OLD.delimiter
|
2267
|
-
+ r'|' + AttributeList.pattern
|
2268
|
-
)
|
2269
|
-
if self.text is not None:
|
2270
|
-
text = [self.text] + list(text)
|
2271
|
-
if text:
|
2272
|
-
writer.write_tag(self.tag.text, text, self.presubs, self.attributes)
|
2273
|
-
while True:
|
2274
|
-
# Allow attribute list to precede continued list item element.
|
2275
|
-
while Lex.next() is AttributeList:
|
2276
|
-
Lex.next().translate()
|
2277
2700
|
next = Lex.next()
|
2278
2701
|
if next in lists.open:
|
2279
2702
|
break
|
2280
|
-
elif next is lists.listblock:
|
2281
|
-
break
|
2282
2703
|
elif isinstance(next,List):
|
2283
2704
|
next.translate()
|
2284
2705
|
elif isinstance(next,Paragraph) and 'listelement' in next.options:
|
2285
2706
|
next.translate()
|
2286
|
-
elif
|
2287
|
-
|
2288
|
-
|
2707
|
+
elif continuation:
|
2708
|
+
# This is where continued elements are processed.
|
2709
|
+
if next is Title:
|
2710
|
+
message.error('section title not allowed in list item',halt=True)
|
2289
2711
|
next.translate()
|
2290
2712
|
else:
|
2291
2713
|
break
|
2292
|
-
writer.write(itemtag[1])
|
2714
|
+
writer.write(itemtag[1],trace='list item close')
|
2715
|
+
|
2716
|
+
@staticmethod
|
2717
|
+
def calc_style(index):
|
2718
|
+
"""Return the numbered list style ('arabic'...) of the list item index.
|
2719
|
+
Return None if unrecognized style."""
|
2720
|
+
if re.match(r'^\d+[\.>]$', index):
|
2721
|
+
style = 'arabic'
|
2722
|
+
elif re.match(r'^[ivx]+\)$', index):
|
2723
|
+
style = 'lowerroman'
|
2724
|
+
elif re.match(r'^[IVX]+\)$', index):
|
2725
|
+
style = 'upperroman'
|
2726
|
+
elif re.match(r'^[a-z]\.$', index):
|
2727
|
+
style = 'loweralpha'
|
2728
|
+
elif re.match(r'^[A-Z]\.$', index):
|
2729
|
+
style = 'upperalpha'
|
2730
|
+
else:
|
2731
|
+
assert False
|
2732
|
+
return style
|
2293
2733
|
|
2294
2734
|
@staticmethod
|
2295
|
-
def
|
2296
|
-
"""
|
2297
|
-
|
2298
|
-
NOTE: 'i' and 'I' return (1,'lowerroman') and (1,'upperroman')."""
|
2735
|
+
def calc_index(index,style):
|
2736
|
+
"""Return the ordinal number of (1...) of the list item index
|
2737
|
+
for the given list style."""
|
2299
2738
|
def roman_to_int(roman):
|
2300
2739
|
roman = roman.lower()
|
2301
2740
|
digits = {'i':1,'v':5,'x':10}
|
@@ -2308,44 +2747,35 @@ class List(AbstractBlock):
|
|
2308
2747
|
else:
|
2309
2748
|
result += digit
|
2310
2749
|
return result
|
2311
|
-
|
2312
|
-
|
2750
|
+
index = index[:-1]
|
2751
|
+
if style == 'arabic':
|
2313
2752
|
ordinal = int(index)
|
2314
|
-
elif
|
2315
|
-
style = 'lowerroman'
|
2753
|
+
elif style == 'lowerroman':
|
2316
2754
|
ordinal = roman_to_int(index)
|
2317
|
-
elif
|
2318
|
-
style = 'upperroman'
|
2755
|
+
elif style == 'upperroman':
|
2319
2756
|
ordinal = roman_to_int(index)
|
2320
|
-
elif
|
2321
|
-
style = 'loweralpha'
|
2757
|
+
elif style == 'loweralpha':
|
2322
2758
|
ordinal = ord(index) - ord('a') + 1
|
2323
|
-
elif
|
2324
|
-
style = 'upperalpha'
|
2759
|
+
elif style == 'upperalpha':
|
2325
2760
|
ordinal = ord(index) - ord('A') + 1
|
2326
2761
|
else:
|
2327
|
-
|
2328
|
-
|
2329
|
-
return (style,ordinal)
|
2762
|
+
assert False
|
2763
|
+
return ordinal
|
2330
2764
|
|
2331
2765
|
def check_index(self):
|
2332
|
-
"""
|
2333
|
-
document (self.index) and check the number style is the same as
|
2766
|
+
"""Check calculated self.ordinal (1,2,...) against the item number
|
2767
|
+
in the document (self.index) and check the number style is the same as
|
2334
2768
|
the first item (self.number_style)."""
|
2335
2769
|
assert self.type in ('numbered','callout')
|
2336
2770
|
if self.index:
|
2337
|
-
style
|
2338
|
-
if style
|
2339
|
-
|
2340
|
-
|
2341
|
-
|
2342
|
-
|
2343
|
-
|
2344
|
-
|
2345
|
-
(self.number_style,style), offset=1)
|
2346
|
-
if ordinal != self.listindex:
|
2347
|
-
warning('list item index: expected %s got %s' %
|
2348
|
-
(self.listindex,ordinal), offset=1)
|
2771
|
+
style = self.calc_style(self.index)
|
2772
|
+
if style != self.number_style:
|
2773
|
+
message.warning('list item style: expected %s got %s' %
|
2774
|
+
(self.number_style,style), offset=1)
|
2775
|
+
ordinal = self.calc_index(self.index,style)
|
2776
|
+
if ordinal != self.ordinal:
|
2777
|
+
message.warning('list item index: expected %s got %s' %
|
2778
|
+
(self.ordinal,ordinal), offset=1)
|
2349
2779
|
|
2350
2780
|
def check_tags(self):
|
2351
2781
|
""" Check that all necessary tags are present. """
|
@@ -2358,20 +2788,23 @@ class List(AbstractBlock):
|
|
2358
2788
|
def translate(self):
|
2359
2789
|
AbstractBlock.translate(self)
|
2360
2790
|
if self.short_name() in ('bibliography','glossary','qanda'):
|
2361
|
-
deprecated('old %s list syntax' % self.short_name())
|
2791
|
+
message.deprecated('old %s list syntax' % self.short_name())
|
2362
2792
|
lists.open.append(self)
|
2363
2793
|
attrs = self.mo.groupdict().copy()
|
2364
2794
|
for k in ('label','text','index'):
|
2365
2795
|
if k in attrs: del attrs[k]
|
2366
2796
|
if self.index:
|
2367
2797
|
# Set the numbering style from first list item.
|
2368
|
-
style = self.
|
2369
|
-
if style:
|
2370
|
-
attrs['style'] = style
|
2798
|
+
attrs['style'] = self.calc_style(self.index)
|
2371
2799
|
BlockTitle.consume(attrs)
|
2372
2800
|
AttributeList.consume(attrs)
|
2373
|
-
self.number_style = attrs.get('style')
|
2374
2801
|
self.merge_attributes(attrs,['tags'])
|
2802
|
+
if self.type in ('numbered','callout'):
|
2803
|
+
self.number_style = self.attributes.get('style')
|
2804
|
+
if self.number_style not in self.NUMBER_STYLES:
|
2805
|
+
message.error('illegal numbered list style: %s' % self.number_style)
|
2806
|
+
# Fall back to default style.
|
2807
|
+
self.attributes['style'] = self.number_style = self.style
|
2375
2808
|
self.tag = lists.tags[self.parameters.tags]
|
2376
2809
|
self.check_tags()
|
2377
2810
|
if 'width' in self.attributes:
|
@@ -2383,14 +2816,15 @@ class List(AbstractBlock):
|
|
2383
2816
|
self.attributes['labelwidth'] = str(labelwidth)
|
2384
2817
|
self.attributes['itemwidth'] = str(100-labelwidth)
|
2385
2818
|
else:
|
2386
|
-
self.error('illegal attribute value:
|
2819
|
+
self.error('illegal attribute value: width="%s"' % v)
|
2387
2820
|
stag,etag = subs_tag(self.tag.list, self.attributes)
|
2388
2821
|
if stag:
|
2389
|
-
writer.write(stag)
|
2390
|
-
self.
|
2391
|
-
|
2392
|
-
|
2393
|
-
|
2822
|
+
writer.write(stag,trace='list open')
|
2823
|
+
self.ordinal = 0
|
2824
|
+
# Process list till list syntax changes or there is a new title.
|
2825
|
+
while Lex.next() is self and not BlockTitle.title:
|
2826
|
+
self.ordinal += 1
|
2827
|
+
document.attributes['listindex'] = str(self.ordinal)
|
2394
2828
|
if self.type in ('numbered','callout'):
|
2395
2829
|
self.check_index()
|
2396
2830
|
if self.type in ('bulleted','numbered','callout'):
|
@@ -2401,13 +2835,13 @@ class List(AbstractBlock):
|
|
2401
2835
|
else:
|
2402
2836
|
raise AssertionError,'illegal [%s] list type' % self.name
|
2403
2837
|
if etag:
|
2404
|
-
writer.write(etag)
|
2838
|
+
writer.write(etag,trace='list close')
|
2405
2839
|
if self.type == 'callout':
|
2406
|
-
calloutmap.validate(self.
|
2840
|
+
calloutmap.validate(self.ordinal)
|
2407
2841
|
calloutmap.listclose()
|
2408
2842
|
lists.open.pop()
|
2409
2843
|
if len(lists.open):
|
2410
|
-
document.attributes['listindex'] = str(lists.open[-1].
|
2844
|
+
document.attributes['listindex'] = str(lists.open[-1].ordinal)
|
2411
2845
|
|
2412
2846
|
class Lists(AbstractBlocks):
|
2413
2847
|
"""List of List objects."""
|
@@ -2417,9 +2851,18 @@ class Lists(AbstractBlocks):
|
|
2417
2851
|
TAGS = ('list', 'entry','item','text', 'label','term')
|
2418
2852
|
def __init__(self):
|
2419
2853
|
AbstractBlocks.__init__(self)
|
2420
|
-
self.open = []
|
2421
|
-
self.listblock = None # Current list is in list block.
|
2854
|
+
self.open = [] # A stack of the current and parent lists.
|
2422
2855
|
self.tags={} # List tags dictionary. Each entry is a tags AttrDict.
|
2856
|
+
self.terminators=None # List of compiled re's.
|
2857
|
+
def initialize(self):
|
2858
|
+
self.terminators = [
|
2859
|
+
re.compile(r'^\+$|^$'),
|
2860
|
+
re.compile(AttributeList.pattern),
|
2861
|
+
re.compile(lists.delimiters),
|
2862
|
+
re.compile(blocks.delimiters),
|
2863
|
+
re.compile(tables.delimiters),
|
2864
|
+
re.compile(tables_OLD.delimiters),
|
2865
|
+
]
|
2423
2866
|
def load(self,sections):
|
2424
2867
|
AbstractBlocks.load(self,sections)
|
2425
2868
|
self.load_tags(sections)
|
@@ -2438,7 +2881,7 @@ class Lists(AbstractBlocks):
|
|
2438
2881
|
parse_entries(sections.get(section,()),d)
|
2439
2882
|
for k in d.keys():
|
2440
2883
|
if k not in self.TAGS:
|
2441
|
-
warning('[%s] contains illegal list tag: %s' %
|
2884
|
+
message.warning('[%s] contains illegal list tag: %s' %
|
2442
2885
|
(section,k))
|
2443
2886
|
self.tags[name] = d
|
2444
2887
|
def validate(self):
|
@@ -2467,32 +2910,30 @@ class DelimitedBlock(AbstractBlock):
|
|
2467
2910
|
return AbstractBlock.isnext(self)
|
2468
2911
|
def translate(self):
|
2469
2912
|
AbstractBlock.translate(self)
|
2470
|
-
if 'list' in self.options:
|
2471
|
-
lists.listblock = self
|
2472
2913
|
reader.read() # Discard delimiter.
|
2473
2914
|
attrs = {}
|
2474
|
-
|
2475
|
-
if lists.listblock is not self:
|
2915
|
+
if self.short_name() != 'comment':
|
2476
2916
|
BlockTitle.consume(attrs)
|
2477
2917
|
AttributeList.consume(attrs)
|
2478
2918
|
self.merge_attributes(attrs)
|
2479
2919
|
options = self.parameters.options
|
2480
|
-
if
|
2481
|
-
unsafe_error('Backend Block')
|
2482
|
-
# Discard block body.
|
2920
|
+
if 'skip' in options:
|
2483
2921
|
reader.read_until(self.delimiter,same_file=True)
|
2484
|
-
elif '
|
2485
|
-
|
2922
|
+
elif safe() and self.name == 'blockdef-backend':
|
2923
|
+
message.unsafe('Backend Block')
|
2486
2924
|
reader.read_until(self.delimiter,same_file=True)
|
2487
2925
|
else:
|
2488
2926
|
template = self.parameters.template
|
2489
|
-
|
2490
|
-
|
2491
|
-
|
2492
|
-
|
2927
|
+
template = subs_attrs(template,attrs)
|
2928
|
+
name = self.short_name()+' block'
|
2929
|
+
if 'sectionbody' in options:
|
2930
|
+
# The body is treated like a section body.
|
2931
|
+
stag,etag = config.section2tags(template,self.attributes)
|
2932
|
+
writer.write(stag,trace=name+' open')
|
2493
2933
|
Section.translate_body(self)
|
2494
|
-
writer.write(etag)
|
2934
|
+
writer.write(etag,trace=name+' close')
|
2495
2935
|
else:
|
2936
|
+
stag = config.section2tags(template,self.attributes,skipend=True)[0]
|
2496
2937
|
body = reader.read_until(self.delimiter,same_file=True)
|
2497
2938
|
presubs = self.parameters.presubs
|
2498
2939
|
postsubs = self.parameters.postsubs
|
@@ -2501,9 +2942,9 @@ class DelimitedBlock(AbstractBlock):
|
|
2501
2942
|
body = filter_lines(self.parameters.filter,body,self.attributes)
|
2502
2943
|
body = Lex.subs(body,postsubs)
|
2503
2944
|
# Write start tag, content, end tag.
|
2504
|
-
|
2505
|
-
|
2506
|
-
|
2945
|
+
etag = config.section2tags(template,self.attributes,skipstart=True)[1]
|
2946
|
+
writer.write(dovetail_tags(stag,body,etag),trace=name)
|
2947
|
+
trace(self.short_name()+' block close',etag)
|
2507
2948
|
if reader.eof():
|
2508
2949
|
self.error('missing closing delimiter',self.start)
|
2509
2950
|
else:
|
@@ -2524,23 +2965,37 @@ class DelimitedBlocks(AbstractBlocks):
|
|
2524
2965
|
|
2525
2966
|
class Column:
|
2526
2967
|
"""Table column."""
|
2527
|
-
def __init__(self, width=None,
|
2528
|
-
self.width=width or '1'
|
2529
|
-
self.
|
2530
|
-
self.style=style # Style name or None.
|
2968
|
+
def __init__(self, width=None, align_spec=None, style=None):
|
2969
|
+
self.width = width or '1'
|
2970
|
+
self.halign, self.valign = Table.parse_align_spec(align_spec)
|
2971
|
+
self.style = style # Style name or None.
|
2531
2972
|
# Calculated attribute values.
|
2532
|
-
self.
|
2533
|
-
self.
|
2534
|
-
|
2973
|
+
self.abswidth = None # 1.. (page units).
|
2974
|
+
self.pcwidth = None # 1..99 (percentage).
|
2975
|
+
|
2976
|
+
class Cell:
|
2977
|
+
def __init__(self, data, span_spec=None, align_spec=None, style=None):
|
2978
|
+
self.data = data
|
2979
|
+
self.span, self.vspan = Table.parse_span_spec(span_spec)
|
2980
|
+
self.halign, self.valign = Table.parse_align_spec(align_spec)
|
2981
|
+
self.style = style
|
2982
|
+
def __repr__(self):
|
2983
|
+
return '<Cell: %d.%d %s.%s %s "%s">' % (
|
2984
|
+
self.span, self.vspan,
|
2985
|
+
self.halign, self.valign,
|
2986
|
+
self.style or '',
|
2987
|
+
self.data)
|
2535
2988
|
|
2536
2989
|
class Table(AbstractBlock):
|
2537
|
-
|
2990
|
+
ALIGN = {'<':'left', '>':'right', '^':'center'}
|
2991
|
+
VALIGN = {'<':'top', '>':'bottom', '^':'middle'}
|
2538
2992
|
FORMATS = ('psv','csv','dsv')
|
2539
2993
|
SEPARATORS = dict(
|
2540
|
-
|
2541
|
-
|
2542
|
-
|
2543
|
-
|
2994
|
+
csv=',',
|
2995
|
+
dsv=r':|\n',
|
2996
|
+
# The count and align group matches are not exact.
|
2997
|
+
psv=r'((?<!\S)((?P<span>[\d.]+)(?P<op>[*+]))?(?P<align>[<\^>.]{,3})?(?P<style>[a-z])?)?\|'
|
2998
|
+
)
|
2544
2999
|
def __init__(self):
|
2545
3000
|
AbstractBlock.__init__(self)
|
2546
3001
|
self.CONF_ENTRIES += ('format','tags','separator')
|
@@ -2551,8 +3006,36 @@ class Table(AbstractBlock):
|
|
2551
3006
|
# Calculated parameters.
|
2552
3007
|
self.abswidth=None # 1.. (page units).
|
2553
3008
|
self.pcwidth = None # 1..99 (percentage).
|
2554
|
-
self.rows=[] # Parsed rows, each row is a list of
|
3009
|
+
self.rows=[] # Parsed rows, each row is a list of Cells.
|
2555
3010
|
self.columns=[] # List of Columns.
|
3011
|
+
@staticmethod
|
3012
|
+
def parse_align_spec(align_spec):
|
3013
|
+
"""
|
3014
|
+
Parse AsciiDoc cell alignment specifier and return 2-tuple with
|
3015
|
+
horizonatal and vertical alignment names. Unspecified alignments
|
3016
|
+
set to None.
|
3017
|
+
"""
|
3018
|
+
result = (None, None)
|
3019
|
+
if align_spec:
|
3020
|
+
mo = re.match(r'^([<\^>])?(\.([<\^>]))?$', align_spec)
|
3021
|
+
if mo:
|
3022
|
+
result = (Table.ALIGN.get(mo.group(1)),
|
3023
|
+
Table.VALIGN.get(mo.group(3)))
|
3024
|
+
return result
|
3025
|
+
@staticmethod
|
3026
|
+
def parse_span_spec(span_spec):
|
3027
|
+
"""
|
3028
|
+
Parse AsciiDoc cell span specifier and return 2-tuple with horizonatal
|
3029
|
+
and vertical span counts. Set default values (1,1) if not
|
3030
|
+
specified.
|
3031
|
+
"""
|
3032
|
+
result = (None, None)
|
3033
|
+
if span_spec:
|
3034
|
+
mo = re.match(r'^(\d+)?(\.(\d+))?$', span_spec)
|
3035
|
+
if mo:
|
3036
|
+
result = (mo.group(1) and int(mo.group(1)),
|
3037
|
+
mo.group(3) and int(mo.group(3)))
|
3038
|
+
return (result[0] or 1, result[1] or 1)
|
2556
3039
|
def load(self,name,entries):
|
2557
3040
|
AbstractBlock.load(self,name,entries)
|
2558
3041
|
def dump(self):
|
@@ -2614,7 +3097,7 @@ class Table(AbstractBlock):
|
|
2614
3097
|
self.error('illegal csv separator=%s' % separator)
|
2615
3098
|
separator = ','
|
2616
3099
|
else:
|
2617
|
-
if not
|
3100
|
+
if not is_re(separator):
|
2618
3101
|
self.error('illegal regular expression: separator=%s' %
|
2619
3102
|
separator)
|
2620
3103
|
self.parameters.format = format
|
@@ -2640,14 +3123,15 @@ class Table(AbstractBlock):
|
|
2640
3123
|
else:
|
2641
3124
|
self.error('missing style: %s*' % prefix)
|
2642
3125
|
return None
|
2643
|
-
def parse_cols(self,cols):
|
3126
|
+
def parse_cols(self, cols, halign, valign):
|
2644
3127
|
"""
|
2645
|
-
Build list of column objects from table 'cols'
|
3128
|
+
Build list of column objects from table 'cols', 'halign' and 'valign'
|
3129
|
+
attributes.
|
2646
3130
|
"""
|
2647
3131
|
# [<multiplier>*][<align>][<width>][<style>]
|
2648
|
-
COLS_RE1 = r'^((?P<count>\d+)\*)?(?P<align>[
|
3132
|
+
COLS_RE1 = r'^((?P<count>\d+)\*)?(?P<align>[<\^>.]{,3})?(?P<width>\d+%?)?(?P<style>[a-z]\w*)?$'
|
2649
3133
|
# [<multiplier>*][<width>][<align>][<style>]
|
2650
|
-
COLS_RE2 = r'^((?P<count>\d+)\*)?(?P<width>\d+%?)?(?P<align>[
|
3134
|
+
COLS_RE2 = r'^((?P<count>\d+)\*)?(?P<width>\d+%?)?(?P<align>[<\^>.]{,3})?(?P<style>[a-z]\w*)?$'
|
2651
3135
|
reo1 = re.compile(COLS_RE1)
|
2652
3136
|
reo2 = re.compile(COLS_RE2)
|
2653
3137
|
cols = str(cols)
|
@@ -2668,6 +3152,10 @@ class Table(AbstractBlock):
|
|
2668
3152
|
)
|
2669
3153
|
else:
|
2670
3154
|
self.error('illegal column spec: %s' % col,self.start)
|
3155
|
+
# Set column (and indirectly cell) default alignments.
|
3156
|
+
for col in self.columns:
|
3157
|
+
col.halign = col.halign or halign or document.attributes.get('halign') or 'left'
|
3158
|
+
col.valign = col.valign or valign or document.attributes.get('valign') or 'top'
|
2671
3159
|
# Validate widths and calculate missing widths.
|
2672
3160
|
n = 0; percents = 0; props = 0
|
2673
3161
|
for col in self.columns:
|
@@ -2696,12 +3184,15 @@ class Table(AbstractBlock):
|
|
2696
3184
|
# Calculate column alignment and absolute and percent width values.
|
2697
3185
|
percents = 0
|
2698
3186
|
for col in self.columns:
|
2699
|
-
col.colalign = Table.ALIGNMENTS[col.align]
|
2700
3187
|
if pcunits:
|
2701
3188
|
col.pcwidth = float(col.width[:-1])
|
2702
3189
|
else:
|
2703
3190
|
col.pcwidth = (float(col.width)/props)*100
|
2704
|
-
col.abswidth =
|
3191
|
+
col.abswidth = self.abswidth * (col.pcwidth/100)
|
3192
|
+
if config.pageunits in ('cm','mm','in','em'):
|
3193
|
+
col.abswidth = '%.2f' % round(col.abswidth,2)
|
3194
|
+
else:
|
3195
|
+
col.abswidth = '%d' % round(col.abswidth)
|
2705
3196
|
percents += col.pcwidth
|
2706
3197
|
col.pcwidth = int(col.pcwidth)
|
2707
3198
|
if round(percents) > 100:
|
@@ -2713,40 +3204,74 @@ class Table(AbstractBlock):
|
|
2713
3204
|
Generate column related substitution attributes.
|
2714
3205
|
"""
|
2715
3206
|
cols = []
|
2716
|
-
i =
|
3207
|
+
i = 1
|
2717
3208
|
for col in self.columns:
|
2718
|
-
i += 1
|
2719
3209
|
colspec = self.get_tags(col.style).colspec
|
2720
3210
|
if colspec:
|
2721
|
-
self.attributes['
|
3211
|
+
self.attributes['halign'] = col.halign
|
3212
|
+
self.attributes['valign'] = col.valign
|
2722
3213
|
self.attributes['colabswidth'] = col.abswidth
|
2723
3214
|
self.attributes['colpcwidth'] = col.pcwidth
|
2724
|
-
self.attributes['colnumber'] = str(i
|
3215
|
+
self.attributes['colnumber'] = str(i)
|
2725
3216
|
s = subs_attrs(colspec, self.attributes)
|
2726
3217
|
if not s:
|
2727
|
-
warning('colspec dropped: contains undefined attribute')
|
3218
|
+
message.warning('colspec dropped: contains undefined attribute')
|
2728
3219
|
else:
|
2729
3220
|
cols.append(s)
|
3221
|
+
i += 1
|
2730
3222
|
if cols:
|
2731
3223
|
self.attributes['colspecs'] = writer.newline.join(cols)
|
2732
3224
|
def parse_rows(self, text):
|
2733
3225
|
"""
|
2734
3226
|
Parse the table source text into self.rows (a list of rows, each row
|
2735
|
-
is a list of
|
3227
|
+
is a list of Cells.
|
2736
3228
|
"""
|
3229
|
+
reserved = {} # Cols reserved by rowspans (indexed by row number).
|
2737
3230
|
if self.parameters.format in ('psv','dsv'):
|
3231
|
+
ri = 0 # Current row index 0..
|
2738
3232
|
cells = self.parse_psv_dsv(text)
|
2739
|
-
|
2740
|
-
|
2741
|
-
|
3233
|
+
row = []
|
3234
|
+
ci = 0 # Column counter 0..colcount
|
3235
|
+
for cell in cells:
|
3236
|
+
colcount = len(self.columns) - reserved.get(ri,0)
|
3237
|
+
if cell.vspan > 1:
|
3238
|
+
# Reserve spanned columns from ensuing rows.
|
3239
|
+
for i in range(1, cell.vspan):
|
3240
|
+
reserved[ri+i] = reserved.get(ri+i, 0) + cell.span
|
3241
|
+
ci += cell.span
|
3242
|
+
if ci <= colcount:
|
3243
|
+
row.append(cell)
|
3244
|
+
if ci >= colcount:
|
3245
|
+
self.rows.append(row)
|
3246
|
+
ri += 1
|
3247
|
+
row = []
|
3248
|
+
ci = 0
|
3249
|
+
if ci > colcount:
|
3250
|
+
message.warning('table row %d: span exceeds number of columns'
|
3251
|
+
% ri)
|
2742
3252
|
elif self.parameters.format == 'csv':
|
2743
|
-
self.parse_csv(text)
|
3253
|
+
self.rows = self.parse_csv(text)
|
2744
3254
|
else:
|
2745
3255
|
assert True,'illegal table format'
|
3256
|
+
# Check that all row spans match.
|
3257
|
+
for ri,row in enumerate(self.rows):
|
3258
|
+
row_span = 0
|
3259
|
+
for cell in row:
|
3260
|
+
row_span += cell.span
|
3261
|
+
row_span += reserved.get(ri,0)
|
3262
|
+
if ri == 0:
|
3263
|
+
header_span = row_span
|
3264
|
+
if row_span < header_span:
|
3265
|
+
message.warning('table row %d: does not span all columns' % (ri+1))
|
3266
|
+
if row_span > header_span:
|
3267
|
+
message.warning('table row %d: exceeds columns span' % (ri+1))
|
3268
|
+
# Check that now row spans exceed the number of rows.
|
3269
|
+
if len([x for x in reserved.keys() if x >= len(self.rows)]) > 0:
|
3270
|
+
message.warning('one or more cell spans exceed the available rows')
|
2746
3271
|
def subs_rows(self, rows, rowtype='body'):
|
2747
3272
|
"""
|
2748
3273
|
Return a string of output markup from a list of rows, each row
|
2749
|
-
is a list of raw
|
3274
|
+
is a list of raw data text.
|
2750
3275
|
"""
|
2751
3276
|
tags = tables.tags[self.parameters.tags]
|
2752
3277
|
if rowtype == 'header':
|
@@ -2764,37 +3289,37 @@ class Table(AbstractBlock):
|
|
2764
3289
|
return writer.newline.join(result)
|
2765
3290
|
def subs_row(self, row, rowtype):
|
2766
3291
|
"""
|
2767
|
-
Substitute the list of
|
3292
|
+
Substitute the list of Cells using the data tag.
|
2768
3293
|
Returns a list of marked up table cell elements.
|
2769
3294
|
"""
|
2770
|
-
if len(row) < len(self.columns):
|
2771
|
-
warning('fewer row data items than table columns')
|
2772
|
-
if len(row) > len(self.columns):
|
2773
|
-
warning('more row data items than table columns')
|
2774
3295
|
result = []
|
2775
|
-
|
3296
|
+
i = 0
|
3297
|
+
for cell in row:
|
3298
|
+
if i >= len(self.columns):
|
3299
|
+
break # Skip cells outside the header width.
|
2776
3300
|
col = self.columns[i]
|
2777
|
-
|
2778
|
-
self.attributes['
|
3301
|
+
self.attributes['halign'] = cell.halign or col.halign
|
3302
|
+
self.attributes['valign'] = cell.valign or col.valign
|
2779
3303
|
self.attributes['colabswidth'] = col.abswidth
|
2780
3304
|
self.attributes['colpcwidth'] = col.pcwidth
|
2781
3305
|
self.attributes['colnumber'] = str(i+1)
|
2782
|
-
|
2783
|
-
|
2784
|
-
|
2785
|
-
|
2786
|
-
|
2787
|
-
dtag = tags.bodydata
|
3306
|
+
self.attributes['colspan'] = str(cell.span)
|
3307
|
+
self.attributes['colstart'] = self.attributes['colnumber']
|
3308
|
+
self.attributes['colend'] = str(i+cell.span)
|
3309
|
+
self.attributes['rowspan'] = str(cell.vspan)
|
3310
|
+
self.attributes['morerows'] = str(cell.vspan-1)
|
2788
3311
|
# Fill missing column data with blanks.
|
2789
|
-
if i > len(
|
3312
|
+
if i > len(self.columns) - 1:
|
2790
3313
|
data = ''
|
2791
3314
|
else:
|
2792
|
-
data =
|
2793
|
-
# Format header cells with the table style not column style.
|
3315
|
+
data = cell.data
|
2794
3316
|
if rowtype == 'header':
|
2795
|
-
|
3317
|
+
# Use table style unless overriden by cell style.
|
3318
|
+
colstyle = cell.style
|
2796
3319
|
else:
|
2797
|
-
|
3320
|
+
# If the cell style is not defined use the column style.
|
3321
|
+
colstyle = cell.style or col.style
|
3322
|
+
tags = self.get_tags(colstyle)
|
2798
3323
|
presubs,postsubs = self.get_subs(colstyle)
|
2799
3324
|
data = [data]
|
2800
3325
|
data = Lex.subs(data, presubs)
|
@@ -2809,53 +3334,77 @@ class Table(AbstractBlock):
|
|
2809
3334
|
data = []
|
2810
3335
|
for para in re.split(r'\n{2,}',text):
|
2811
3336
|
data += dovetail_tags([stag],para.split('\n'),[etag])
|
3337
|
+
if rowtype == 'header':
|
3338
|
+
dtag = tags.headdata
|
3339
|
+
elif rowtype == 'footer':
|
3340
|
+
dtag = tags.footdata
|
3341
|
+
else:
|
3342
|
+
dtag = tags.bodydata
|
2812
3343
|
stag,etag = subs_tag(dtag,self.attributes)
|
2813
3344
|
result = result + dovetail_tags([stag],data,[etag])
|
3345
|
+
i += cell.span
|
2814
3346
|
return result
|
2815
3347
|
def parse_csv(self,text):
|
2816
3348
|
"""
|
2817
3349
|
Parse the table source text and return a list of rows, each row
|
2818
|
-
is a list of
|
3350
|
+
is a list of Cells.
|
2819
3351
|
"""
|
2820
3352
|
import StringIO
|
2821
3353
|
import csv
|
2822
|
-
|
3354
|
+
rows = []
|
2823
3355
|
rdr = csv.reader(StringIO.StringIO('\r\n'.join(text)),
|
2824
3356
|
delimiter=self.parameters.separator, skipinitialspace=True)
|
2825
3357
|
try:
|
2826
3358
|
for row in rdr:
|
2827
|
-
|
2828
|
-
except:
|
3359
|
+
rows.append([Cell(data) for data in row])
|
3360
|
+
except Exception:
|
2829
3361
|
self.error('csv parse error: %s' % row)
|
3362
|
+
return rows
|
2830
3363
|
def parse_psv_dsv(self,text):
|
2831
3364
|
"""
|
2832
3365
|
Parse list of PSV or DSV table source text lines and return a list of
|
2833
|
-
|
3366
|
+
Cells.
|
2834
3367
|
"""
|
3368
|
+
def append_cell(data, span_spec, op, align_spec, style):
|
3369
|
+
op = op or '+'
|
3370
|
+
if op == '*': # Cell multiplier.
|
3371
|
+
span = Table.parse_span_spec(span_spec)[0]
|
3372
|
+
for i in range(span):
|
3373
|
+
cells.append(Cell(data, '1', align_spec, style))
|
3374
|
+
elif op == '+': # Column spanner.
|
3375
|
+
cells.append(Cell(data, span_spec, align_spec, style))
|
3376
|
+
else:
|
3377
|
+
self.error('illegal table cell operator')
|
2835
3378
|
text = '\n'.join(text)
|
2836
3379
|
separator = '(?msu)'+self.parameters.separator
|
2837
3380
|
format = self.parameters.format
|
2838
3381
|
start = 0
|
2839
|
-
|
3382
|
+
span = None
|
3383
|
+
op = None
|
3384
|
+
align = None
|
3385
|
+
style = None
|
2840
3386
|
cells = []
|
2841
|
-
|
3387
|
+
data = ''
|
2842
3388
|
for mo in re.finditer(separator,text):
|
2843
|
-
|
2844
|
-
if
|
2845
|
-
|
3389
|
+
data += text[start:mo.start()]
|
3390
|
+
if data.endswith('\\'):
|
3391
|
+
data = data[:-1]+mo.group() # Reinstate escaped separators.
|
2846
3392
|
else:
|
2847
|
-
|
2848
|
-
|
2849
|
-
|
2850
|
-
|
3393
|
+
append_cell(data, span, op, align, style)
|
3394
|
+
span = mo.groupdict().get('span')
|
3395
|
+
op = mo.groupdict().get('op')
|
3396
|
+
align = mo.groupdict().get('align')
|
3397
|
+
style = mo.groupdict().get('style')
|
3398
|
+
if style:
|
3399
|
+
style = self.get_style(style)
|
3400
|
+
data = ''
|
2851
3401
|
start = mo.end()
|
2852
3402
|
# Last cell follows final separator.
|
2853
|
-
|
2854
|
-
|
2855
|
-
cells.append(cell)
|
3403
|
+
data += text[start:]
|
3404
|
+
append_cell(data, span, op, align, style)
|
2856
3405
|
# We expect a dummy blank item preceeding first PSV cell.
|
2857
3406
|
if format == 'psv':
|
2858
|
-
if cells[0] != '':
|
3407
|
+
if cells[0].data.strip() != '':
|
2859
3408
|
self.error('missing leading separator: %s' % separator,
|
2860
3409
|
self.start)
|
2861
3410
|
else:
|
@@ -2886,16 +3435,18 @@ class Table(AbstractBlock):
|
|
2886
3435
|
delimiter = reader.read() # Discard closing delimiter.
|
2887
3436
|
assert re.match(self.delimiter,delimiter)
|
2888
3437
|
if len(text) == 0:
|
2889
|
-
warning('[%s] table is empty' % self.name)
|
3438
|
+
message.warning('[%s] table is empty' % self.name)
|
2890
3439
|
return
|
2891
3440
|
cols = attrs.get('cols')
|
2892
3441
|
if not cols:
|
2893
3442
|
# Calculate column count from number of items in first line.
|
2894
3443
|
if self.parameters.format == 'csv':
|
2895
|
-
cols = text[0].count(self.parameters.separator)
|
3444
|
+
cols = text[0].count(self.parameters.separator) + 1
|
2896
3445
|
else:
|
2897
|
-
cols =
|
2898
|
-
|
3446
|
+
cols = 0
|
3447
|
+
for cell in self.parse_psv_dsv(text[:1]):
|
3448
|
+
cols += cell.span
|
3449
|
+
self.parse_cols(cols, attrs.get('halign'), attrs.get('valign'))
|
2899
3450
|
# Set calculated attributes.
|
2900
3451
|
self.attributes['colcount'] = len(self.columns)
|
2901
3452
|
self.build_colspecs()
|
@@ -2912,27 +3463,27 @@ class Table(AbstractBlock):
|
|
2912
3463
|
headrows = footrows = bodyrows = None
|
2913
3464
|
if self.rows and 'header' in self.parameters.options:
|
2914
3465
|
headrows = self.subs_rows(self.rows[0:1],'header')
|
2915
|
-
self.attributes['headrows'] = '\
|
3466
|
+
self.attributes['headrows'] = '\x07headrows\x07'
|
2916
3467
|
self.rows = self.rows[1:]
|
2917
3468
|
if self.rows and 'footer' in self.parameters.options:
|
2918
3469
|
footrows = self.subs_rows( self.rows[-1:], 'footer')
|
2919
|
-
self.attributes['footrows'] = '\
|
3470
|
+
self.attributes['footrows'] = '\x07footrows\x07'
|
2920
3471
|
self.rows = self.rows[:-1]
|
2921
3472
|
if self.rows:
|
2922
3473
|
bodyrows = self.subs_rows(self.rows)
|
2923
|
-
self.attributes['bodyrows'] = '\
|
3474
|
+
self.attributes['bodyrows'] = '\x07bodyrows\x07'
|
2924
3475
|
table = subs_attrs(config.sections[self.parameters.template],
|
2925
3476
|
self.attributes)
|
2926
3477
|
table = writer.newline.join(table)
|
2927
3478
|
# Before we finish replace the table head, foot and body place holders
|
2928
3479
|
# with the real data.
|
2929
3480
|
if headrows:
|
2930
|
-
table = table.replace('\
|
3481
|
+
table = table.replace('\x07headrows\x07', headrows, 1)
|
2931
3482
|
if footrows:
|
2932
|
-
table = table.replace('\
|
3483
|
+
table = table.replace('\x07footrows\x07', footrows, 1)
|
2933
3484
|
if bodyrows:
|
2934
|
-
table = table.replace('\
|
2935
|
-
writer.write(table)
|
3485
|
+
table = table.replace('\x07bodyrows\x07', bodyrows, 1)
|
3486
|
+
writer.write(table,trace='table')
|
2936
3487
|
|
2937
3488
|
class Tables(AbstractBlocks):
|
2938
3489
|
"""List of tables."""
|
@@ -2962,7 +3513,7 @@ class Tables(AbstractBlocks):
|
|
2962
3513
|
parse_entries(sections.get(section,()),d)
|
2963
3514
|
for k in d.keys():
|
2964
3515
|
if k not in self.TAGS:
|
2965
|
-
warning('[%s] contains illegal table tag: %s' %
|
3516
|
+
message.warning('[%s] contains illegal table tag: %s' %
|
2966
3517
|
(section,k))
|
2967
3518
|
self.tags[name] = d
|
2968
3519
|
def validate(self):
|
@@ -2973,7 +3524,7 @@ class Tables(AbstractBlocks):
|
|
2973
3524
|
default = self.blocks[i]
|
2974
3525
|
break
|
2975
3526
|
else:
|
2976
|
-
raise EAsciiDoc,'missing [tabledef-default]
|
3527
|
+
raise EAsciiDoc,'missing section: [tabledef-default]'
|
2977
3528
|
# Propagate defaults to unspecified table parameters.
|
2978
3529
|
for b in self.blocks:
|
2979
3530
|
if b is not default:
|
@@ -2981,7 +3532,7 @@ class Tables(AbstractBlocks):
|
|
2981
3532
|
if b.template is None: b.template = default.template
|
2982
3533
|
# Check tags and propagate default tags.
|
2983
3534
|
if not 'default' in self.tags:
|
2984
|
-
raise EAsciiDoc,'missing [tabletags-default]
|
3535
|
+
raise EAsciiDoc,'missing section: [tabletags-default]'
|
2985
3536
|
default = self.tags['default']
|
2986
3537
|
for tag in ('bodyrow','bodydata','paragraph'): # Mandatory default tags.
|
2987
3538
|
if tag not in default:
|
@@ -3030,14 +3581,17 @@ class Macros:
|
|
3030
3581
|
m.load(entry)
|
3031
3582
|
if m.name is None:
|
3032
3583
|
# Delete undefined macro.
|
3033
|
-
for i in
|
3034
|
-
if
|
3584
|
+
for i,m2 in enumerate(self.macros):
|
3585
|
+
if m2.pattern == m.pattern:
|
3035
3586
|
del self.macros[i]
|
3587
|
+
break
|
3588
|
+
else:
|
3589
|
+
message.warning('unable to delete missing macro: %s' % m.pattern)
|
3036
3590
|
else:
|
3037
3591
|
# Check for duplicates.
|
3038
3592
|
for m2 in self.macros:
|
3039
|
-
if m.
|
3040
|
-
verbose('macro redefinition: %s%s' % (m.prefix,m.name))
|
3593
|
+
if m2.pattern == m.pattern:
|
3594
|
+
message.verbose('macro redefinition: %s%s' % (m.prefix,m.name))
|
3041
3595
|
break
|
3042
3596
|
else:
|
3043
3597
|
self.macros.append(m)
|
@@ -3102,7 +3656,7 @@ class Macros:
|
|
3102
3656
|
""" Replace passthough placeholders with the original passthrough
|
3103
3657
|
text."""
|
3104
3658
|
for i,v in enumerate(self.passthroughs):
|
3105
|
-
text = text.replace('\
|
3659
|
+
text = text.replace('\x07'+str(i)+'\x07', self.passthroughs[i])
|
3106
3660
|
return text
|
3107
3661
|
|
3108
3662
|
class Macro:
|
@@ -3128,21 +3682,16 @@ class Macro:
|
|
3128
3682
|
if name+suffix in config.sections:
|
3129
3683
|
return name+suffix
|
3130
3684
|
else:
|
3131
|
-
warning('missing macro section: [%s]' % (name+suffix))
|
3685
|
+
message.warning('missing macro section: [%s]' % (name+suffix))
|
3132
3686
|
return None
|
3133
|
-
def equals(self,m):
|
3134
|
-
if self.pattern != m.pattern:
|
3135
|
-
return False
|
3136
|
-
if self.name != m.name:
|
3137
|
-
return False
|
3138
|
-
if self.prefix != m.prefix:
|
3139
|
-
return False
|
3140
|
-
return True
|
3141
3687
|
def load(self,entry):
|
3142
3688
|
e = parse_entry(entry)
|
3143
|
-
if
|
3144
|
-
|
3145
|
-
|
3689
|
+
if e is None:
|
3690
|
+
# Only the macro pattern was specified, mark for deletion.
|
3691
|
+
self.name = None
|
3692
|
+
self.pattern = entry
|
3693
|
+
return
|
3694
|
+
if not is_re(e[0]):
|
3146
3695
|
raise EAsciiDoc,'illegal macro regular expression: %s' % e[0]
|
3147
3696
|
pattern, name = e
|
3148
3697
|
if name and name[0] in ('+','#'):
|
@@ -3167,7 +3716,7 @@ class Macro:
|
|
3167
3716
|
|
3168
3717
|
def subs(self,text):
|
3169
3718
|
def subs_func(mo):
|
3170
|
-
"""Function called to perform
|
3719
|
+
"""Function called to perform macro substitution.
|
3171
3720
|
Uses matched macro regular expression object and returns string
|
3172
3721
|
containing the substituted macro body."""
|
3173
3722
|
# Check if macro reference is escaped.
|
@@ -3181,7 +3730,7 @@ class Macro:
|
|
3181
3730
|
name = self.name
|
3182
3731
|
else:
|
3183
3732
|
if not 'name' in d:
|
3184
|
-
warning('missing macro name group: %s' % mo.re.pattern)
|
3733
|
+
message.warning('missing macro name group: %s' % mo.re.pattern)
|
3185
3734
|
return ''
|
3186
3735
|
name = d['name']
|
3187
3736
|
section_name = self.section_name(name)
|
@@ -3189,7 +3738,7 @@ class Macro:
|
|
3189
3738
|
return ''
|
3190
3739
|
# If we're dealing with a block macro get optional block ID and
|
3191
3740
|
# block title.
|
3192
|
-
if self.prefix == '#':
|
3741
|
+
if self.prefix == '#' and self.name != 'comment':
|
3193
3742
|
AttributeList.consume(d)
|
3194
3743
|
BlockTitle.consume(d)
|
3195
3744
|
# Parse macro attributes.
|
@@ -3207,9 +3756,15 @@ class Macro:
|
|
3207
3756
|
'%s: illegal option name' % name)
|
3208
3757
|
for option in options:
|
3209
3758
|
d[option+'-option'] = ''
|
3759
|
+
# Substitute single quoted attribute values in block macros.
|
3760
|
+
if self.prefix == '#':
|
3761
|
+
AttributeList.subs(d)
|
3210
3762
|
if name == 'callout':
|
3211
3763
|
listindex =int(d['index'])
|
3212
3764
|
d['coid'] = calloutmap.add(listindex)
|
3765
|
+
# The alt attribute is the first image macro positional attribute.
|
3766
|
+
if name == 'image' and '1' in d:
|
3767
|
+
d['alt'] = d['1']
|
3213
3768
|
# Unescape special characters in LaTeX target file names.
|
3214
3769
|
if document.backend == 'latex' and 'target' in d and d['target']:
|
3215
3770
|
if not '0' in d:
|
@@ -3246,6 +3801,7 @@ class Macro:
|
|
3246
3801
|
""" Block macro translation."""
|
3247
3802
|
assert self.prefix == '#'
|
3248
3803
|
s = reader.read()
|
3804
|
+
before = s
|
3249
3805
|
if self.has_passthrough():
|
3250
3806
|
s = macros.extract_passthroughs(s,'#')
|
3251
3807
|
s = subs_attrs(s)
|
@@ -3254,6 +3810,7 @@ class Macro:
|
|
3254
3810
|
if self.has_passthrough():
|
3255
3811
|
s = macros.restore_passthroughs(s)
|
3256
3812
|
if s:
|
3813
|
+
trace('macro block',before,s)
|
3257
3814
|
writer.write(s)
|
3258
3815
|
|
3259
3816
|
def subs_passthroughs(self, text, passthroughs):
|
@@ -3269,13 +3826,16 @@ class Macro:
|
|
3269
3826
|
return mo.group()
|
3270
3827
|
d = mo.groupdict()
|
3271
3828
|
if not 'passtext' in d:
|
3272
|
-
warning('passthrough macro %s: missing passtext group' %
|
3829
|
+
message.warning('passthrough macro %s: missing passtext group' %
|
3273
3830
|
d.get('name',''))
|
3274
3831
|
return mo.group()
|
3275
3832
|
passtext = d['passtext']
|
3833
|
+
if re.search('\x07\\d+\x07', passtext):
|
3834
|
+
message.warning('nested inline passthrough')
|
3835
|
+
return mo.group()
|
3276
3836
|
if d.get('subslist'):
|
3277
3837
|
if d['subslist'].startswith(':'):
|
3278
|
-
error('block macro cannot occur here: %s' % mo.group(),
|
3838
|
+
message.error('block macro cannot occur here: %s' % mo.group(),
|
3279
3839
|
halt=True)
|
3280
3840
|
subslist = parse_options(d['subslist'], SUBS_OPTIONS,
|
3281
3841
|
'illegal passthrough macro subs option')
|
@@ -3290,7 +3850,7 @@ class Macro:
|
|
3290
3850
|
# Tabs guarantee the placeholders are unambiguous.
|
3291
3851
|
result = (
|
3292
3852
|
text[mo.start():mo.start('passtext')] +
|
3293
|
-
'\
|
3853
|
+
'\x07' + str(len(passthroughs)-1) + '\x07' +
|
3294
3854
|
text[mo.end('passtext'):mo.end()]
|
3295
3855
|
)
|
3296
3856
|
return result
|
@@ -3328,25 +3888,27 @@ class CalloutMap:
|
|
3328
3888
|
result += ' ' + self.calloutid(self.listnumber,coindex)
|
3329
3889
|
return result.strip()
|
3330
3890
|
else:
|
3331
|
-
warning('no callouts refer to list item '+str(listindex))
|
3891
|
+
message.warning('no callouts refer to list item '+str(listindex))
|
3332
3892
|
return ''
|
3333
3893
|
def validate(self,maxlistindex):
|
3334
3894
|
# Check that all list indexes referenced by callouts exist.
|
3335
3895
|
for listindex in self.comap.keys():
|
3336
3896
|
if listindex > maxlistindex:
|
3337
|
-
warning('callout refers to non-existent list item '
|
3897
|
+
message.warning('callout refers to non-existent list item '
|
3338
3898
|
+ str(listindex))
|
3339
3899
|
|
3340
3900
|
#---------------------------------------------------------------------------
|
3341
3901
|
# Input stream Reader and output stream writer classes.
|
3342
3902
|
#---------------------------------------------------------------------------
|
3343
3903
|
|
3904
|
+
UTF8_BOM = '\xef\xbb\xbf'
|
3905
|
+
|
3344
3906
|
class Reader1:
|
3345
3907
|
"""Line oriented AsciiDoc input file reader. Processes include and
|
3346
3908
|
conditional inclusion system macros. Tabs are expanded and lines are right
|
3347
3909
|
trimmed."""
|
3348
3910
|
# This class is not used directly, use Reader class instead.
|
3349
|
-
READ_BUFFER_MIN = 10
|
3911
|
+
READ_BUFFER_MIN = 10 # Read buffer low level.
|
3350
3912
|
def __init__(self):
|
3351
3913
|
self.f = None # Input file object.
|
3352
3914
|
self.fname = None # Input file name.
|
@@ -3358,17 +3920,29 @@ class Reader1:
|
|
3358
3920
|
self._lineno = 0 # The last line read from file object f.
|
3359
3921
|
self.current_depth = 0 # Current include depth.
|
3360
3922
|
self.max_depth = 5 # Initial maxiumum allowed include depth.
|
3923
|
+
self.bom = None # Byte order mark (BOM).
|
3924
|
+
self.infile = None # Saved document 'infile' attribute.
|
3925
|
+
self.indir = None # Saved document 'indir' attribute.
|
3361
3926
|
def open(self,fname):
|
3362
3927
|
self.fname = fname
|
3363
|
-
verbose('reading: '+fname)
|
3928
|
+
message.verbose('reading: '+fname)
|
3364
3929
|
if fname == '<stdin>':
|
3365
3930
|
self.f = sys.stdin
|
3931
|
+
self.infile = None
|
3932
|
+
self.indir = None
|
3366
3933
|
else:
|
3367
3934
|
self.f = open(fname,'rb')
|
3935
|
+
self.infile = fname
|
3936
|
+
self.indir = os.path.dirname(fname)
|
3937
|
+
document.attributes['infile'] = self.infile
|
3938
|
+
document.attributes['indir'] = self.indir
|
3368
3939
|
self._lineno = 0 # The last line read from file object f.
|
3369
3940
|
self.next = []
|
3370
3941
|
# Prefill buffer by reading the first line and then pushing it back.
|
3371
3942
|
if Reader1.read(self):
|
3943
|
+
if self.cursor[2].startswith(UTF8_BOM):
|
3944
|
+
self.cursor[2] = self.cursor[2][len(UTF8_BOM):]
|
3945
|
+
self.bom = UTF8_BOM
|
3372
3946
|
self.unread(self.cursor)
|
3373
3947
|
self.cursor = None
|
3374
3948
|
def closefile(self):
|
@@ -3464,6 +4038,8 @@ class Reader1:
|
|
3464
4038
|
if self.parent:
|
3465
4039
|
self.closefile()
|
3466
4040
|
assign(self,self.parent) # Restore parent reader.
|
4041
|
+
document.attributes['infile'] = self.infile
|
4042
|
+
document.attributes['indir'] = self.indir
|
3467
4043
|
return Reader1.eof(self)
|
3468
4044
|
else:
|
3469
4045
|
return True
|
@@ -3501,43 +4077,69 @@ class Reader(Reader1):
|
|
3501
4077
|
if result is None:
|
3502
4078
|
return None
|
3503
4079
|
while self.skip:
|
3504
|
-
mo = macros.match('+',r'ifdef|ifndef|endif',result)
|
4080
|
+
mo = macros.match('+',r'ifdef|ifndef|ifeval|endif',result)
|
3505
4081
|
if mo:
|
3506
4082
|
name = mo.group('name')
|
3507
4083
|
target = mo.group('target')
|
4084
|
+
attrlist = mo.group('attrlist')
|
3508
4085
|
if name == 'endif':
|
3509
|
-
self.depth
|
4086
|
+
self.depth -= 1
|
3510
4087
|
if self.depth < 0:
|
3511
4088
|
raise EAsciiDoc,'mismatched macro: %s' % result
|
3512
4089
|
if self.depth == self.skipto:
|
3513
4090
|
self.skip = False
|
3514
4091
|
if target and self.skipname != target:
|
3515
4092
|
raise EAsciiDoc,'mismatched macro: %s' % result
|
3516
|
-
else:
|
3517
|
-
if
|
3518
|
-
|
3519
|
-
|
4093
|
+
else:
|
4094
|
+
if name in ('ifdef','ifndef'):
|
4095
|
+
if not target:
|
4096
|
+
raise EAsciiDoc,'missing macro target: %s' % result
|
4097
|
+
if not attrlist:
|
4098
|
+
self.depth += 1
|
4099
|
+
elif name == 'ifeval':
|
4100
|
+
if not attrlist:
|
4101
|
+
raise EAsciiDoc,'missing ifeval condition: %s' % result
|
4102
|
+
self.depth += 1
|
3520
4103
|
result = self.read_super()
|
3521
4104
|
if result is None:
|
3522
4105
|
return None
|
3523
|
-
mo = macros.match('+',r'ifdef|ifndef|endif',result)
|
4106
|
+
mo = macros.match('+',r'ifdef|ifndef|ifeval|endif',result)
|
3524
4107
|
if mo:
|
3525
4108
|
name = mo.group('name')
|
3526
4109
|
target = mo.group('target')
|
4110
|
+
attrlist = mo.group('attrlist')
|
3527
4111
|
if name == 'endif':
|
3528
4112
|
self.depth = self.depth-1
|
3529
|
-
else:
|
3530
|
-
if not target:
|
4113
|
+
else:
|
4114
|
+
if not target and name in ('ifdef','ifndef'):
|
3531
4115
|
raise EAsciiDoc,'missing macro target: %s' % result
|
3532
|
-
defined = document.attributes
|
4116
|
+
defined = is_attr_defined(target, document.attributes)
|
3533
4117
|
if name == 'ifdef':
|
3534
|
-
|
3535
|
-
|
3536
|
-
|
3537
|
-
|
3538
|
-
|
3539
|
-
|
3540
|
-
|
4118
|
+
if attrlist:
|
4119
|
+
if defined: return attrlist
|
4120
|
+
else:
|
4121
|
+
self.skip = not defined
|
4122
|
+
elif name == 'ifndef':
|
4123
|
+
if attrlist:
|
4124
|
+
if not defined: return attrlist
|
4125
|
+
else:
|
4126
|
+
self.skip = defined
|
4127
|
+
elif name == 'ifeval':
|
4128
|
+
if not attrlist:
|
4129
|
+
raise EAsciiDoc,'missing ifeval condition: %s' % result
|
4130
|
+
cond = False
|
4131
|
+
attrlist = subs_attrs(attrlist)
|
4132
|
+
if attrlist:
|
4133
|
+
try:
|
4134
|
+
cond = eval(attrlist)
|
4135
|
+
except Exception,e:
|
4136
|
+
raise EAsciiDoc,'error evaluating ifeval condition: %s: %s' % (result, str(e))
|
4137
|
+
self.skip = not cond
|
4138
|
+
if not attrlist or name == 'ifeval':
|
4139
|
+
if self.skip:
|
4140
|
+
self.skipto = self.depth
|
4141
|
+
self.skipname = target
|
4142
|
+
self.depth = self.depth+1
|
3541
4143
|
result = self.read()
|
3542
4144
|
if result:
|
3543
4145
|
# Expand executable block macros.
|
@@ -3563,17 +4165,6 @@ class Reader(Reader1):
|
|
3563
4165
|
self.unread(self.cursor)
|
3564
4166
|
self.cursor = save_cursor
|
3565
4167
|
return result
|
3566
|
-
def read_all(self,fname):
|
3567
|
-
"""Read all lines from file fname and return as list. Use like class
|
3568
|
-
method: Reader().read_all(fname)"""
|
3569
|
-
result = []
|
3570
|
-
self.open(fname)
|
3571
|
-
try:
|
3572
|
-
while not self.eof():
|
3573
|
-
result.append(self.read())
|
3574
|
-
finally:
|
3575
|
-
self.close()
|
3576
|
-
return result
|
3577
4168
|
def read_lines(self,count=1):
|
3578
4169
|
"""Return tuple containing count lines."""
|
3579
4170
|
result = []
|
@@ -3599,79 +4190,68 @@ class Reader(Reader1):
|
|
3599
4190
|
return tuple(result)
|
3600
4191
|
def skip_blank_lines(self):
|
3601
4192
|
reader.read_until(r'\s*\S+')
|
3602
|
-
def read_until(self,
|
4193
|
+
def read_until(self,terminators,same_file=False):
|
3603
4194
|
"""Like read() but reads lines up to (but not including) the first line
|
3604
|
-
that matches the
|
3605
|
-
|
3606
|
-
|
4195
|
+
that matches the terminator regular expression, regular expression
|
4196
|
+
object or list of regular expression objects. If same_file is True then
|
4197
|
+
the terminating pattern must occur in the file the was being read when
|
4198
|
+
the routine was called."""
|
3607
4199
|
if same_file:
|
3608
4200
|
fname = self.cursor[0]
|
3609
4201
|
result = []
|
3610
|
-
|
4202
|
+
if not isinstance(terminators,list):
|
4203
|
+
if isinstance(terminators,basestring):
|
4204
|
+
terminators = [re.compile(terminators)]
|
4205
|
+
else:
|
4206
|
+
terminators = [terminators]
|
3611
4207
|
while not self.eof():
|
3612
4208
|
save_cursor = self.cursor
|
3613
4209
|
s = self.read()
|
3614
|
-
if
|
3615
|
-
|
3616
|
-
|
3617
|
-
|
4210
|
+
if not same_file or fname == self.cursor[0]:
|
4211
|
+
for reo in terminators:
|
4212
|
+
if reo.match(s):
|
4213
|
+
self.unread(self.cursor)
|
4214
|
+
self.cursor = save_cursor
|
4215
|
+
return tuple(result)
|
3618
4216
|
result.append(s)
|
3619
4217
|
return tuple(result)
|
3620
|
-
# NOT USED -- part of unimplemented attempt a generalised line continuation.
|
3621
|
-
def read_continuation(self):
|
3622
|
-
"""Like read() but treats trailing backslash as line continuation
|
3623
|
-
character."""
|
3624
|
-
s = self.read()
|
3625
|
-
if s is None:
|
3626
|
-
return None
|
3627
|
-
result = ''
|
3628
|
-
while s is not None and len(s) > 0 and s[-1] == '\\':
|
3629
|
-
result = result + s[:-1]
|
3630
|
-
s = self.read()
|
3631
|
-
if s is not None:
|
3632
|
-
result = result + s
|
3633
|
-
return result
|
3634
|
-
# NOT USED -- part of unimplemented attempt a generalised line continuation.
|
3635
|
-
def read_next_continuation(self):
|
3636
|
-
"""Like read_next() but treats trailing backslash as line continuation
|
3637
|
-
character."""
|
3638
|
-
save_cursor = self.cursor
|
3639
|
-
result = self.read_continuation()
|
3640
|
-
if result is not None:
|
3641
|
-
self.unread(self.cursor)
|
3642
|
-
self.cursor = save_cursor
|
3643
|
-
return result
|
3644
4218
|
|
3645
4219
|
class Writer:
|
3646
4220
|
"""Writes lines to output file."""
|
3647
|
-
|
3648
|
-
|
3649
|
-
|
3650
|
-
|
3651
|
-
|
3652
|
-
|
3653
|
-
|
3654
|
-
|
4221
|
+
def __init__(self):
|
4222
|
+
self.newline = '\r\n' # End of line terminator.
|
4223
|
+
self.f = None # Output file object.
|
4224
|
+
self.fname = None # Output file name.
|
4225
|
+
self.lines_out = 0 # Number of lines written.
|
4226
|
+
self.skip_blank_lines = False # If True don't output blank lines.
|
4227
|
+
def open(self,fname,bom=None):
|
4228
|
+
'''
|
4229
|
+
bom is optional byte order mark.
|
4230
|
+
http://en.wikipedia.org/wiki/Byte-order_mark
|
4231
|
+
'''
|
4232
|
+
self.fname = fname
|
3655
4233
|
if fname == '<stdout>':
|
3656
4234
|
self.f = sys.stdout
|
3657
4235
|
else:
|
3658
4236
|
self.f = open(fname,'wb+')
|
4237
|
+
message.verbose('writing: '+writer.fname,False)
|
4238
|
+
if bom:
|
4239
|
+
self.f.write(bom)
|
3659
4240
|
self.lines_out = 0
|
3660
4241
|
def close(self):
|
3661
4242
|
if self.fname != '<stdout>':
|
3662
4243
|
self.f.close()
|
3663
4244
|
def write_line(self, line=None):
|
3664
4245
|
if not (self.skip_blank_lines and (not line or not line.strip())):
|
3665
|
-
|
3666
|
-
self.f.write(line + self.newline)
|
3667
|
-
else:
|
3668
|
-
self.f.write(self.newline)
|
4246
|
+
self.f.write((line or '') + self.newline)
|
3669
4247
|
self.lines_out = self.lines_out + 1
|
3670
|
-
def write(self,*args):
|
4248
|
+
def write(self,*args,**kwargs):
|
3671
4249
|
"""Iterates arguments, writes tuple and list arguments one line per
|
3672
4250
|
element, else writes argument as single line. If no arguments writes
|
3673
4251
|
blank line. If argument is None nothing is written. self.newline is
|
3674
4252
|
appended to each line."""
|
4253
|
+
if 'trace' in kwargs and len(args) > 0:
|
4254
|
+
trace(kwargs['trace'],args[0])
|
3675
4255
|
if len(args) == 0:
|
3676
4256
|
self.write_line()
|
3677
4257
|
self.lines_out = self.lines_out + 1
|
@@ -3682,17 +4262,20 @@ class Writer:
|
|
3682
4262
|
self.write_line(s)
|
3683
4263
|
elif arg is not None:
|
3684
4264
|
self.write_line(arg)
|
3685
|
-
def write_tag(self,tag,content,subs=None,d=None):
|
4265
|
+
def write_tag(self,tag,content,subs=None,d=None,**kwargs):
|
3686
4266
|
"""Write content enveloped by tag.
|
3687
4267
|
Substitutions specified in the 'subs' list are perform on the
|
3688
4268
|
'content'."""
|
3689
4269
|
if subs is None:
|
3690
4270
|
subs = config.subsnormal
|
3691
4271
|
stag,etag = subs_tag(tag,d)
|
4272
|
+
content = Lex.subs(content,subs)
|
4273
|
+
if 'trace' in kwargs:
|
4274
|
+
trace(kwargs['trace'],[stag]+content+[etag])
|
3692
4275
|
if stag:
|
3693
4276
|
self.write(stag)
|
3694
4277
|
if content:
|
3695
|
-
self.write(
|
4278
|
+
self.write(content)
|
3696
4279
|
if etag:
|
3697
4280
|
self.write(etag)
|
3698
4281
|
|
@@ -3757,27 +4340,56 @@ class Config:
|
|
3757
4340
|
# is corresponding section name.
|
3758
4341
|
self.quotes = OrderedDict() # Values contain corresponding tag name.
|
3759
4342
|
self.fname = '' # Most recently loaded configuration file name.
|
3760
|
-
self.conf_attrs = {} #
|
4343
|
+
self.conf_attrs = {} # Attributes entries from conf files.
|
3761
4344
|
self.cmd_attrs = {} # Attributes from command-line -a options.
|
3762
4345
|
self.loaded = [] # Loaded conf files.
|
3763
4346
|
self.include1 = {} # Holds include1::[] files for {include1:}.
|
3764
4347
|
self.dumping = False # True if asciidoc -c option specified.
|
3765
4348
|
|
3766
|
-
def
|
3767
|
-
"""
|
3768
|
-
|
3769
|
-
|
4349
|
+
def init(self, cmd):
|
4350
|
+
"""
|
4351
|
+
Check Python version and locate the executable and configuration files
|
4352
|
+
directory.
|
4353
|
+
cmd is the asciidoc command or asciidoc.py path.
|
4354
|
+
"""
|
4355
|
+
if float(sys.version[:3]) < MIN_PYTHON_VERSION:
|
4356
|
+
message.stderr('FAILED: Python 2.3 or better required')
|
4357
|
+
sys.exit(1)
|
4358
|
+
if not os.path.exists(cmd):
|
4359
|
+
message.stderr('FAILED: Missing asciidoc command: %s' % cmd)
|
4360
|
+
sys.exit(1)
|
4361
|
+
global APP_FILE
|
4362
|
+
APP_FILE = os.path.realpath(cmd)
|
4363
|
+
global APP_DIR
|
4364
|
+
APP_DIR = os.path.dirname(APP_FILE)
|
4365
|
+
global USER_DIR
|
4366
|
+
USER_DIR = userdir()
|
4367
|
+
if USER_DIR is not None:
|
4368
|
+
USER_DIR = os.path.join(USER_DIR,'.asciidoc')
|
4369
|
+
if not os.path.isdir(USER_DIR):
|
4370
|
+
USER_DIR = None
|
4371
|
+
|
4372
|
+
def load_file(self, fname, dir=None, include=[], exclude=[]):
|
4373
|
+
"""
|
4374
|
+
Loads sections dictionary with sections from file fname.
|
4375
|
+
Existing sections are overlaid.
|
4376
|
+
The 'include' list contains the section names to be loaded.
|
4377
|
+
The 'exclude' list contains section names not to be loaded.
|
4378
|
+
Return False if no file was found in any of the locations.
|
4379
|
+
"""
|
3770
4380
|
if dir:
|
3771
4381
|
fname = os.path.join(dir, fname)
|
3772
4382
|
# Sliently skip missing configuration file.
|
3773
4383
|
if not os.path.isfile(fname):
|
3774
|
-
return
|
4384
|
+
return False
|
3775
4385
|
# Don't load conf files twice (local and application conf files are the
|
3776
4386
|
# same if the source file is in the application directory).
|
3777
4387
|
if os.path.realpath(fname) in self.loaded:
|
3778
|
-
return
|
4388
|
+
return True
|
3779
4389
|
rdr = Reader() # Reader processes system macros.
|
4390
|
+
message.linenos = False # Disable document line numbers.
|
3780
4391
|
rdr.open(fname)
|
4392
|
+
message.linenos = None
|
3781
4393
|
self.fname = fname
|
3782
4394
|
reo = re.compile(r'(?u)^\[(?P<section>[^\W\d][\w-]*)\]\s*$')
|
3783
4395
|
sections = OrderedDict()
|
@@ -3816,13 +4428,26 @@ class Config:
|
|
3816
4428
|
else:
|
3817
4429
|
sections[section] = contents
|
3818
4430
|
rdr.close()
|
3819
|
-
|
3820
|
-
|
4431
|
+
if include:
|
4432
|
+
for s in set(sections) - set(include):
|
4433
|
+
del sections[s]
|
4434
|
+
if exclude:
|
4435
|
+
for s in set(sections) & set(exclude):
|
4436
|
+
del sections[s]
|
4437
|
+
attrs = {}
|
4438
|
+
self.load_sections(sections,attrs)
|
4439
|
+
if not include:
|
4440
|
+
# If all sections are loaded mark this file as loaded.
|
4441
|
+
self.loaded.append(os.path.realpath(fname))
|
4442
|
+
document.update_attributes(attrs) # So they are available immediately.
|
4443
|
+
return True
|
3821
4444
|
|
3822
|
-
def load_sections(self,sections):
|
3823
|
-
|
4445
|
+
def load_sections(self,sections,attrs=None):
|
4446
|
+
"""
|
4447
|
+
Loads sections dictionary. Each dictionary entry contains a
|
3824
4448
|
list of lines.
|
3825
|
-
''
|
4449
|
+
Updates 'attrs' with parsed [attributes] section entries.
|
4450
|
+
"""
|
3826
4451
|
# Delete trailing blank lines from sections.
|
3827
4452
|
for k in sections.keys():
|
3828
4453
|
for i in range(len(sections[k])-1,-1,-1):
|
@@ -3837,13 +4462,11 @@ class Config:
|
|
3837
4462
|
d = {}
|
3838
4463
|
parse_entries(sections.get('miscellaneous',()), d, unquote=True,
|
3839
4464
|
allow_name_only=True)
|
3840
|
-
update_attrs(self.conf_attrs,d)
|
3841
|
-
d = {}
|
3842
4465
|
parse_entries(sections.get('attributes',()), d, unquote=True,
|
3843
4466
|
allow_name_only=True)
|
3844
4467
|
update_attrs(self.conf_attrs,d)
|
3845
|
-
|
3846
|
-
|
4468
|
+
if attrs is not None:
|
4469
|
+
attrs.update(d)
|
3847
4470
|
d = {}
|
3848
4471
|
parse_entries(sections.get('titles',()),d)
|
3849
4472
|
Title.load(d)
|
@@ -3860,23 +4483,76 @@ class Config:
|
|
3860
4483
|
tables.load(sections)
|
3861
4484
|
macros.load(sections.get('macros',()))
|
3862
4485
|
|
3863
|
-
def
|
3864
|
-
"""
|
3865
|
-
|
3866
|
-
|
3867
|
-
|
3868
|
-
|
3869
|
-
|
3870
|
-
|
3871
|
-
|
3872
|
-
|
3873
|
-
|
3874
|
-
# Load
|
3875
|
-
|
3876
|
-
|
3877
|
-
|
3878
|
-
|
3879
|
-
|
4486
|
+
def get_load_dirs(self):
|
4487
|
+
"""
|
4488
|
+
Return list of well known paths with conf files.
|
4489
|
+
"""
|
4490
|
+
result = []
|
4491
|
+
if localapp():
|
4492
|
+
# Load from folders in asciidoc executable directory.
|
4493
|
+
result.append(APP_DIR)
|
4494
|
+
else:
|
4495
|
+
# Load from global configuration directory.
|
4496
|
+
result.append(CONF_DIR)
|
4497
|
+
# Load configuration files from ~/.asciidoc if it exists.
|
4498
|
+
if USER_DIR is not None:
|
4499
|
+
result.append(USER_DIR)
|
4500
|
+
return result
|
4501
|
+
|
4502
|
+
def find_in_dirs(self, filename, dirs=None):
|
4503
|
+
"""
|
4504
|
+
Find conf files from dirs list.
|
4505
|
+
Return list of found file paths.
|
4506
|
+
Return empty list if not found in any of the locations.
|
4507
|
+
"""
|
4508
|
+
result = []
|
4509
|
+
if dirs is None:
|
4510
|
+
dirs = self.get_load_dirs()
|
4511
|
+
for d in dirs:
|
4512
|
+
f = os.path.join(d,filename)
|
4513
|
+
if os.path.isfile(f):
|
4514
|
+
result.append(f)
|
4515
|
+
return result
|
4516
|
+
|
4517
|
+
def load_from_dirs(self, filename, dirs=None, include=[]):
|
4518
|
+
"""
|
4519
|
+
Load conf file from dirs list.
|
4520
|
+
If dirs not specified try all the well known locations.
|
4521
|
+
Return False if no file was sucessfully loaded.
|
4522
|
+
"""
|
4523
|
+
count = 0
|
4524
|
+
for f in self.find_in_dirs(filename,dirs):
|
4525
|
+
if self.load_file(f, include=include):
|
4526
|
+
count += 1
|
4527
|
+
return count != 0
|
4528
|
+
|
4529
|
+
def load_backend(self, dirs=None):
|
4530
|
+
"""
|
4531
|
+
Load the backend configuration files from dirs list.
|
4532
|
+
If dirs not specified try all the well known locations.
|
4533
|
+
"""
|
4534
|
+
if dirs is None:
|
4535
|
+
dirs = self.get_load_dirs()
|
4536
|
+
for d in dirs:
|
4537
|
+
conf = document.backend + '.conf'
|
4538
|
+
self.load_file(conf,d)
|
4539
|
+
conf = document.backend + '-' + document.doctype + '.conf'
|
4540
|
+
self.load_file(conf,d)
|
4541
|
+
|
4542
|
+
def load_filters(self, dirs=None):
|
4543
|
+
"""
|
4544
|
+
Load filter configuration files from 'filters' directory in dirs list.
|
4545
|
+
If dirs not specified try all the well known locations.
|
4546
|
+
"""
|
4547
|
+
if dirs is None:
|
4548
|
+
dirs = self.get_load_dirs()
|
4549
|
+
for d in dirs:
|
4550
|
+
# Load filter .conf files.
|
4551
|
+
filtersdir = os.path.join(d,'filters')
|
4552
|
+
for dirpath,dirnames,filenames in os.walk(filtersdir):
|
4553
|
+
for f in filenames:
|
4554
|
+
if re.match(r'^.+\.conf$',f):
|
4555
|
+
self.load_file(f,dirpath)
|
3880
4556
|
|
3881
4557
|
def load_miscellaneous(self,d):
|
3882
4558
|
"""Set miscellaneous configuration entries from dictionary 'd'."""
|
@@ -3889,7 +4565,9 @@ class Config:
|
|
3889
4565
|
setattr(self, name, validate(d[name],rule,errmsg))
|
3890
4566
|
set_misc('tabsize','int($)>0',intval=True)
|
3891
4567
|
set_misc('textwidth','int($)>0',intval=True) # DEPRECATED: Old tables only.
|
3892
|
-
set_misc('pagewidth','
|
4568
|
+
set_misc('pagewidth','"%f" % $')
|
4569
|
+
if 'pagewidth' in d:
|
4570
|
+
self.pagewidth = float(self.pagewidth)
|
3893
4571
|
set_misc('pageunits')
|
3894
4572
|
set_misc('outfilesuffix')
|
3895
4573
|
if 'newline' in d:
|
@@ -3907,7 +4585,8 @@ class Config:
|
|
3907
4585
|
def validate(self):
|
3908
4586
|
"""Check the configuration for internal consistancy. Called after all
|
3909
4587
|
configuration files have been loaded."""
|
3910
|
-
|
4588
|
+
message.linenos = False # Disable document line numbers.
|
4589
|
+
# Heuristic to validate that at least one configuration file was loaded.
|
3911
4590
|
if not self.specialchars or not self.tags or not lists:
|
3912
4591
|
raise EAsciiDoc,'incomplete configuration files'
|
3913
4592
|
# Check special characters are only one character long.
|
@@ -3920,9 +4599,9 @@ class Config:
|
|
3920
4599
|
if not is_name(macro):
|
3921
4600
|
raise EAsciiDoc,'illegal special word name: %s' % macro
|
3922
4601
|
if not macro in self.sections:
|
3923
|
-
warning('missing special word macro: [%s]' % macro)
|
4602
|
+
message.warning('missing special word macro: [%s]' % macro)
|
3924
4603
|
# Check all text quotes have a corresponding tag.
|
3925
|
-
for q in self.quotes.keys():
|
4604
|
+
for q in self.quotes.keys()[:]:
|
3926
4605
|
tag = self.quotes[q]
|
3927
4606
|
if not tag:
|
3928
4607
|
del self.quotes[q] # Undefine quote.
|
@@ -3930,17 +4609,20 @@ class Config:
|
|
3930
4609
|
if tag[0] == '#':
|
3931
4610
|
tag = tag[1:]
|
3932
4611
|
if not tag in self.tags:
|
3933
|
-
warning('[quotes] %s missing tag definition: %s' % (q,tag))
|
4612
|
+
message.warning('[quotes] %s missing tag definition: %s' % (q,tag))
|
3934
4613
|
# Check all specialsections section names exist.
|
3935
4614
|
for k,v in self.specialsections.items():
|
3936
|
-
if not v
|
3937
|
-
|
4615
|
+
if not v:
|
4616
|
+
del self.specialsections[k]
|
4617
|
+
elif not v in self.sections:
|
4618
|
+
message.warning('missing specialsections section: [%s]' % v)
|
3938
4619
|
paragraphs.validate()
|
3939
4620
|
lists.validate()
|
3940
4621
|
blocks.validate()
|
3941
4622
|
tables_OLD.validate()
|
3942
4623
|
tables.validate()
|
3943
4624
|
macros.validate()
|
4625
|
+
message.linenos = None
|
3944
4626
|
|
3945
4627
|
def entries_section(self,section_name):
|
3946
4628
|
"""
|
@@ -4008,7 +4690,7 @@ class Config:
|
|
4008
4690
|
if section in self.sections:
|
4009
4691
|
return subs_attrs(self.sections[section],d)
|
4010
4692
|
else:
|
4011
|
-
warning('missing [%s]
|
4693
|
+
message.warning('missing section: [%s]' % section)
|
4012
4694
|
return ()
|
4013
4695
|
|
4014
4696
|
def parse_tags(self):
|
@@ -4033,14 +4715,6 @@ class Config:
|
|
4033
4715
|
[tags] section. Raise error if not found. If a dictionary 'd' is
|
4034
4716
|
passed then merge with document attributes and perform attribute
|
4035
4717
|
substitution on tags."""
|
4036
|
-
|
4037
|
-
# TODO: Tags should be stored a single string, not split into start
|
4038
|
-
# and end tags since most are going to be substituted anyway (see
|
4039
|
-
# subs_tag() for how we should process them. parse_tags() (above)
|
4040
|
-
# should only validate i.e. parse_check(). This routine should be renamed
|
4041
|
-
# split_tag() and would call subs_tag(). self.tags dictionary values
|
4042
|
-
# would be strings not tuples.
|
4043
|
-
|
4044
4718
|
if not name in self.tags:
|
4045
4719
|
raise EAsciiDoc, 'missing tag: %s' % name
|
4046
4720
|
stag,etag = self.tags[name]
|
@@ -4062,7 +4736,7 @@ class Config:
|
|
4062
4736
|
parse_entries(self.sections.get('specialsections',()),d,unquote=True)
|
4063
4737
|
for pat,sectname in d.items():
|
4064
4738
|
pat = strip_quotes(pat)
|
4065
|
-
if not
|
4739
|
+
if not is_re(pat):
|
4066
4740
|
raise EAsciiDoc,'[specialsections] entry ' \
|
4067
4741
|
'is not a valid regular expression: %s' % pat
|
4068
4742
|
if sectname is None:
|
@@ -4084,7 +4758,7 @@ class Config:
|
|
4084
4758
|
def set_replacement(pat, rep, replacements):
|
4085
4759
|
"""Add pattern and replacement to replacements dictionary."""
|
4086
4760
|
pat = strip_quotes(pat)
|
4087
|
-
if not
|
4761
|
+
if not is_re(pat):
|
4088
4762
|
return False
|
4089
4763
|
if rep is None:
|
4090
4764
|
if pat in replacements:
|
@@ -4121,7 +4795,7 @@ class Config:
|
|
4121
4795
|
words = reo.findall(wordlist)
|
4122
4796
|
for word in words:
|
4123
4797
|
word = strip_quotes(word)
|
4124
|
-
if not
|
4798
|
+
if not is_re(word):
|
4125
4799
|
raise EAsciiDoc,'[specialwords] entry in %s ' \
|
4126
4800
|
'is not a valid regular expression: %s' \
|
4127
4801
|
% (self.fname,word)
|
@@ -4153,33 +4827,36 @@ class Config:
|
|
4153
4827
|
result = re.sub(word, _subs_specialwords, result)
|
4154
4828
|
return result
|
4155
4829
|
|
4156
|
-
def expand_templates(self,
|
4830
|
+
def expand_templates(self,entries):
|
4831
|
+
"""Expand any template::[] macros in a list of section entries."""
|
4157
4832
|
result = []
|
4158
|
-
for line in
|
4833
|
+
for line in entries:
|
4159
4834
|
mo = macros.match('+',r'template',line)
|
4160
4835
|
if mo:
|
4161
4836
|
s = mo.group('attrlist')
|
4162
4837
|
if s in self.sections:
|
4163
|
-
result += self.sections[s]
|
4838
|
+
result += self.expand_templates(self.sections[s])
|
4164
4839
|
else:
|
4165
|
-
warning('missing [%s]
|
4840
|
+
message.warning('missing section: [%s]' % s)
|
4841
|
+
result.append(line)
|
4166
4842
|
else:
|
4167
4843
|
result.append(line)
|
4168
4844
|
return result
|
4169
4845
|
|
4170
4846
|
def expand_all_templates(self):
|
4171
|
-
for k in self.sections.
|
4172
|
-
self.sections[k] = self.expand_templates(
|
4847
|
+
for k,v in self.sections.items():
|
4848
|
+
self.sections[k] = self.expand_templates(v)
|
4173
4849
|
|
4174
|
-
def section2tags(self, section, d={}):
|
4850
|
+
def section2tags(self, section, d={}, skipstart=False, skipend=False):
|
4175
4851
|
"""Perform attribute substitution on 'section' using document
|
4176
4852
|
attributes plus 'd' attributes. Return tuple (stag,etag) containing
|
4177
|
-
pre and post | placeholder tags.
|
4853
|
+
pre and post | placeholder tags. 'skipstart' and 'skipend' are
|
4854
|
+
used to suppress substitution."""
|
4178
4855
|
assert section is not None
|
4179
4856
|
if section in self.sections:
|
4180
4857
|
body = self.sections[section]
|
4181
4858
|
else:
|
4182
|
-
warning('missing [%s]
|
4859
|
+
message.warning('missing section: [%s]' % section)
|
4183
4860
|
body = ()
|
4184
4861
|
# Split macro body into start and end tag lists.
|
4185
4862
|
stag = []
|
@@ -4203,8 +4880,10 @@ class Config:
|
|
4203
4880
|
title = d.get('title')
|
4204
4881
|
if title:
|
4205
4882
|
d['title'] = chr(0) # Replace with unused character.
|
4206
|
-
|
4207
|
-
|
4883
|
+
if not skipstart:
|
4884
|
+
stag = subs_attrs(stag, d)
|
4885
|
+
if not skipend:
|
4886
|
+
etag = subs_attrs(etag, d)
|
4208
4887
|
# Put the {title} back.
|
4209
4888
|
if title:
|
4210
4889
|
stag = map(lambda x: x.replace(chr(0), title), stag)
|
@@ -4217,7 +4896,6 @@ class Config:
|
|
4217
4896
|
# Deprecated old table classes follow.
|
4218
4897
|
# Naming convention is an _OLD name suffix.
|
4219
4898
|
# These will be removed from future versions of AsciiDoc
|
4220
|
-
#
|
4221
4899
|
|
4222
4900
|
def join_lines_OLD(lines):
|
4223
4901
|
"""Return a list in which lines terminated with the backslash line
|
@@ -4437,7 +5115,7 @@ class Table_OLD(AbstractBlock):
|
|
4437
5115
|
self.attributes['colnumber'] = str(i + 1)
|
4438
5116
|
s = subs_attrs(self.colspec,self.attributes)
|
4439
5117
|
if not s:
|
4440
|
-
warning('colspec dropped: contains undefined attribute')
|
5118
|
+
message.warning('colspec dropped: contains undefined attribute')
|
4441
5119
|
else:
|
4442
5120
|
cols.append(s)
|
4443
5121
|
self.attributes['colspecs'] = writer.newline.join(cols)
|
@@ -4481,9 +5159,9 @@ class Table_OLD(AbstractBlock):
|
|
4481
5159
|
Returns a substituted list of output table data items."""
|
4482
5160
|
result = []
|
4483
5161
|
if len(data) < len(self.columns):
|
4484
|
-
warning('fewer row data items then table columns')
|
5162
|
+
message.warning('fewer row data items then table columns')
|
4485
5163
|
if len(data) > len(self.columns):
|
4486
|
-
warning('more row data items than table columns')
|
5164
|
+
message.warning('more row data items than table columns')
|
4487
5165
|
for i in range(len(self.columns)):
|
4488
5166
|
if i > len(data) - 1:
|
4489
5167
|
d = '' # Fill missing column data with blanks.
|
@@ -4539,7 +5217,7 @@ class Table_OLD(AbstractBlock):
|
|
4539
5217
|
try:
|
4540
5218
|
for row in rdr:
|
4541
5219
|
result.append(row)
|
4542
|
-
except:
|
5220
|
+
except Exception:
|
4543
5221
|
raise EAsciiDoc,'csv parse error: %s' % row
|
4544
5222
|
return result
|
4545
5223
|
def parse_dsv(self,rows):
|
@@ -4562,7 +5240,7 @@ class Table_OLD(AbstractBlock):
|
|
4562
5240
|
result.append(data)
|
4563
5241
|
return result
|
4564
5242
|
def translate(self):
|
4565
|
-
deprecated('old tables syntax')
|
5243
|
+
message.deprecated('old tables syntax')
|
4566
5244
|
AbstractBlock.translate(self)
|
4567
5245
|
# Reset instance specific properties.
|
4568
5246
|
self.underline = None
|
@@ -4583,7 +5261,7 @@ class Table_OLD(AbstractBlock):
|
|
4583
5261
|
elif k == 'tablewidth':
|
4584
5262
|
try:
|
4585
5263
|
self.tablewidth = float(attrs['tablewidth'])
|
4586
|
-
except:
|
5264
|
+
except Exception:
|
4587
5265
|
raise EAsciiDoc, 'illegal [%s] %s: %s' % (self.name,k,v)
|
4588
5266
|
self.merge_attributes(attrs)
|
4589
5267
|
# Parse table ruler.
|
@@ -4603,9 +5281,8 @@ class Table_OLD(AbstractBlock):
|
|
4603
5281
|
table.append(reader.read())
|
4604
5282
|
# EXPERIMENTAL: The number of lines in the table, requested by Benjamin Klum.
|
4605
5283
|
self.attributes['rows'] = str(len(table))
|
4606
|
-
#TODO: Inherited validate() doesn't set check_msg, needs checking.
|
4607
5284
|
if self.check_msg: # Skip if table definition was marked invalid.
|
4608
|
-
warning('skipping %s table: %s' % (self.name,self.check_msg))
|
5285
|
+
message.warning('skipping %s table: %s' % (self.name,self.check_msg))
|
4609
5286
|
return
|
4610
5287
|
# Generate colwidths and colspecs.
|
4611
5288
|
self.build_colspecs()
|
@@ -4626,24 +5303,24 @@ class Table_OLD(AbstractBlock):
|
|
4626
5303
|
if headrows:
|
4627
5304
|
headrows = self.parse_rows(headrows, self.headrow, self.headdata)
|
4628
5305
|
headrows = writer.newline.join(headrows)
|
4629
|
-
self.attributes['headrows'] = '\
|
5306
|
+
self.attributes['headrows'] = '\x07headrows\x07'
|
4630
5307
|
if footrows:
|
4631
5308
|
footrows = self.parse_rows(footrows, self.footrow, self.footdata)
|
4632
5309
|
footrows = writer.newline.join(footrows)
|
4633
|
-
self.attributes['footrows'] = '\
|
5310
|
+
self.attributes['footrows'] = '\x07footrows\x07'
|
4634
5311
|
bodyrows = self.parse_rows(bodyrows, self.bodyrow, self.bodydata)
|
4635
5312
|
bodyrows = writer.newline.join(bodyrows)
|
4636
|
-
self.attributes['bodyrows'] = '\
|
5313
|
+
self.attributes['bodyrows'] = '\x07bodyrows\x07'
|
4637
5314
|
table = subs_attrs(config.sections[self.template],self.attributes)
|
4638
5315
|
table = writer.newline.join(table)
|
4639
5316
|
# Before we finish replace the table head, foot and body place holders
|
4640
5317
|
# with the real data.
|
4641
5318
|
if headrows:
|
4642
|
-
table = table.replace('\
|
5319
|
+
table = table.replace('\x07headrows\x07', headrows, 1)
|
4643
5320
|
if footrows:
|
4644
|
-
table = table.replace('\
|
4645
|
-
table = table.replace('\
|
4646
|
-
writer.write(table)
|
5321
|
+
table = table.replace('\x07footrows\x07', footrows, 1)
|
5322
|
+
table = table.replace('\x07bodyrows\x07', bodyrows, 1)
|
5323
|
+
writer.write(table,trace='table')
|
4647
5324
|
|
4648
5325
|
class Tables_OLD(AbstractBlocks):
|
4649
5326
|
"""List of tables."""
|
@@ -4661,7 +5338,7 @@ class Tables_OLD(AbstractBlocks):
|
|
4661
5338
|
default = self.blocks[i]
|
4662
5339
|
break
|
4663
5340
|
else:
|
4664
|
-
raise EAsciiDoc,'missing [OLD_tabledef-default]
|
5341
|
+
raise EAsciiDoc,'missing section: [OLD_tabledef-default]'
|
4665
5342
|
# Set default table defaults.
|
4666
5343
|
if default.format is None: default.subs = 'fixed'
|
4667
5344
|
# Propagate defaults to unspecified table parameters.
|
@@ -4700,17 +5377,146 @@ class Tables_OLD(AbstractBlocks):
|
|
4700
5377
|
b.headdata = b.bodydata
|
4701
5378
|
if not b.footdata:
|
4702
5379
|
b.footdata = b.bodydata
|
4703
|
-
self.
|
5380
|
+
self.delimiters = re_join(delimiters)
|
4704
5381
|
# Check table definitions are valid.
|
4705
5382
|
for b in self.blocks:
|
4706
5383
|
b.validate()
|
4707
5384
|
if config.verbose:
|
4708
5385
|
if b.check_msg:
|
4709
|
-
warning('[%s] table definition: %s' % (b.name,b.check_msg))
|
5386
|
+
message.warning('[%s] table definition: %s' % (b.name,b.check_msg))
|
4710
5387
|
|
4711
5388
|
# End of deprecated old table classes.
|
4712
5389
|
#---------------------------------------------------------------------------
|
4713
5390
|
|
5391
|
+
#---------------------------------------------------------------------------
|
5392
|
+
# Filter commands.
|
5393
|
+
#---------------------------------------------------------------------------
|
5394
|
+
import shutil, zipfile
|
5395
|
+
|
5396
|
+
def die(msg):
|
5397
|
+
message.stderr(msg)
|
5398
|
+
sys.exit(1)
|
5399
|
+
|
5400
|
+
def unzip(zip_file, destdir):
|
5401
|
+
"""
|
5402
|
+
Unzip Zip file to destination directory.
|
5403
|
+
Throws exception if error occurs.
|
5404
|
+
"""
|
5405
|
+
zipo = zipfile.ZipFile(zip_file, 'r')
|
5406
|
+
try:
|
5407
|
+
for zi in zipo.infolist():
|
5408
|
+
outfile = zi.filename
|
5409
|
+
if not outfile.endswith('/'):
|
5410
|
+
d, outfile = os.path.split(outfile)
|
5411
|
+
directory = os.path.normpath(os.path.join(destdir, d))
|
5412
|
+
if not os.path.isdir(directory):
|
5413
|
+
os.makedirs(directory)
|
5414
|
+
outfile = os.path.join(directory, outfile)
|
5415
|
+
perms = (zi.external_attr >> 16) & 0777
|
5416
|
+
message.verbose('extracting: %s' % outfile)
|
5417
|
+
fh = os.open(outfile, os.O_CREAT | os.O_WRONLY, perms)
|
5418
|
+
try:
|
5419
|
+
os.write(fh, zipo.read(zi.filename))
|
5420
|
+
finally:
|
5421
|
+
os.close(fh)
|
5422
|
+
finally:
|
5423
|
+
zipo.close()
|
5424
|
+
|
5425
|
+
class Filter:
|
5426
|
+
"""
|
5427
|
+
--filter option commands.
|
5428
|
+
"""
|
5429
|
+
|
5430
|
+
@staticmethod
|
5431
|
+
def get_filters_dir():
|
5432
|
+
"""
|
5433
|
+
Return path of .asciidoc/filters in user's home direcory or None if
|
5434
|
+
user home not defined.
|
5435
|
+
"""
|
5436
|
+
result = userdir()
|
5437
|
+
if result:
|
5438
|
+
result = os.path.join(result,'.asciidoc','filters')
|
5439
|
+
return result
|
5440
|
+
|
5441
|
+
@staticmethod
|
5442
|
+
def install(args):
|
5443
|
+
"""
|
5444
|
+
Install filter Zip file.
|
5445
|
+
args[0] is filter zip file path.
|
5446
|
+
args[1] is optional destination filters directory.
|
5447
|
+
"""
|
5448
|
+
if len(args) not in (1,2):
|
5449
|
+
die('invalid number of arguments: --filter install %s'
|
5450
|
+
% ' '.join(args))
|
5451
|
+
zip_file = args[0]
|
5452
|
+
if not os.path.isfile(zip_file):
|
5453
|
+
die('file not found: %s' % zip_file)
|
5454
|
+
reo = re.match(r'^\w+',os.path.split(zip_file)[1])
|
5455
|
+
if not reo:
|
5456
|
+
die('filter file name does not start with legal filter name: %s'
|
5457
|
+
% zip_file)
|
5458
|
+
filter_name = reo.group()
|
5459
|
+
if len(args) == 2:
|
5460
|
+
filters_dir = args[1]
|
5461
|
+
if not os.path.isdir(filters_dir):
|
5462
|
+
die('directory not found: %s' % filters_dir)
|
5463
|
+
else:
|
5464
|
+
filters_dir = Filter.get_filters_dir()
|
5465
|
+
if not filters_dir:
|
5466
|
+
die('user home directory is not defined')
|
5467
|
+
filter_dir = os.path.join(filters_dir, filter_name)
|
5468
|
+
if os.path.exists(filter_dir):
|
5469
|
+
die('filter is already installed: %s' % filter_dir)
|
5470
|
+
try:
|
5471
|
+
os.makedirs(filter_dir)
|
5472
|
+
except Exception,e:
|
5473
|
+
die('failed to create filter directory: %s' % str(e))
|
5474
|
+
try:
|
5475
|
+
unzip(zip_file, filter_dir)
|
5476
|
+
except Exception,e:
|
5477
|
+
die('failed to extract filter: %s' % str(e))
|
5478
|
+
|
5479
|
+
@staticmethod
|
5480
|
+
def remove(args):
|
5481
|
+
"""
|
5482
|
+
Delete filter from .asciidoc/filters/ in user's home directory.
|
5483
|
+
args[0] is filter name.
|
5484
|
+
args[1] is optional filters directory.
|
5485
|
+
"""
|
5486
|
+
if len(args) not in (1,2):
|
5487
|
+
die('invalid number of arguments: --filter remove %s'
|
5488
|
+
% ' '.join(args))
|
5489
|
+
filter_name = args[0]
|
5490
|
+
if not re.match(r'^\w+$',filter_name):
|
5491
|
+
die('illegal filter name: %s' % filter_name)
|
5492
|
+
if len(args) == 2:
|
5493
|
+
d = args[1]
|
5494
|
+
if not os.path.isdir(d):
|
5495
|
+
die('directory not found: %s' % d)
|
5496
|
+
else:
|
5497
|
+
d = Filter.get_filters_dir()
|
5498
|
+
if not d:
|
5499
|
+
die('user directory is not defined')
|
5500
|
+
filter_dir = os.path.join(d, filter_name)
|
5501
|
+
if not os.path.isdir(filter_dir):
|
5502
|
+
die('cannot find filter: %s' % filter_dir)
|
5503
|
+
try:
|
5504
|
+
message.verbose('removing: %s' % filter_dir)
|
5505
|
+
shutil.rmtree(filter_dir)
|
5506
|
+
except Exception,e:
|
5507
|
+
die('failed to delete filter: %s' % str(e))
|
5508
|
+
|
5509
|
+
@staticmethod
|
5510
|
+
def list():
|
5511
|
+
"""
|
5512
|
+
List all filter directories (global and local).
|
5513
|
+
"""
|
5514
|
+
for d in [os.path.join(d,'filters') for d in config.get_load_dirs()]:
|
5515
|
+
if os.path.isdir(d):
|
5516
|
+
for f in os.walk(d).next()[1]:
|
5517
|
+
message.stdout(os.path.join(d,f))
|
5518
|
+
|
5519
|
+
|
4714
5520
|
#---------------------------------------------------------------------------
|
4715
5521
|
# Application code.
|
4716
5522
|
#---------------------------------------------------------------------------
|
@@ -4729,6 +5535,7 @@ document = Document() # The document being processed.
|
|
4729
5535
|
config = Config() # Configuration file reader.
|
4730
5536
|
reader = Reader() # Input stream line reader.
|
4731
5537
|
writer = Writer() # Output stream line writer.
|
5538
|
+
message = Message() # Message functions.
|
4732
5539
|
paragraphs = Paragraphs() # Paragraph definitions.
|
4733
5540
|
lists = Lists() # List definitions.
|
4734
5541
|
blocks = DelimitedBlocks() # DelimitedBlock definitions.
|
@@ -4736,78 +5543,139 @@ tables_OLD = Tables_OLD() # Table_OLD definitions.
|
|
4736
5543
|
tables = Tables() # Table definitions.
|
4737
5544
|
macros = Macros() # Macro definitions.
|
4738
5545
|
calloutmap = CalloutMap() # Coordinates callouts and callout list.
|
5546
|
+
trace = Trace() # Implements trace attribute processing.
|
5547
|
+
|
5548
|
+
### Used by asciidocapi.py ###
|
5549
|
+
# List of message strings written to stderr.
|
5550
|
+
messages = message.messages
|
5551
|
+
|
4739
5552
|
|
4740
5553
|
def asciidoc(backend, doctype, confiles, infile, outfile, options):
|
4741
5554
|
"""Convert AsciiDoc document to DocBook document of type doctype
|
4742
5555
|
The AsciiDoc document is read from file object src the translated
|
4743
5556
|
DocBook file written to file object dst."""
|
5557
|
+
def load_conffiles(include=[], exclude=[]):
|
5558
|
+
# Load conf files specified on the command-line and by the conf-files attribute.
|
5559
|
+
files = document.attributes.get('conf-files','')
|
5560
|
+
files = [f.strip() for f in files.split('|') if f.strip()]
|
5561
|
+
files += confiles
|
5562
|
+
if files:
|
5563
|
+
for f in files:
|
5564
|
+
if os.path.isfile(f):
|
5565
|
+
config.load_file(f, include=include, exclude=exclude)
|
5566
|
+
else:
|
5567
|
+
raise EAsciiDoc,'configuration file %s missing' % f
|
5568
|
+
|
4744
5569
|
try:
|
4745
|
-
if doctype not in ('article','manpage','book'):
|
5570
|
+
if doctype not in (None,'article','manpage','book'):
|
4746
5571
|
raise EAsciiDoc,'illegal document type'
|
4747
|
-
document.backend = backend
|
4748
|
-
if not os.path.exists(os.path.join(APP_DIR, backend+'.conf')) and not \
|
4749
|
-
os.path.exists(os.path.join(CONF_DIR, backend+'.conf')):
|
4750
|
-
warning('non-standard %s backend' % backend, linenos=False)
|
4751
|
-
document.doctype = doctype
|
4752
|
-
document.infile = infile
|
4753
|
-
document.init_attrs()
|
4754
5572
|
# Set processing options.
|
4755
5573
|
for o in options:
|
4756
5574
|
if o == '-c': config.dumping = True
|
4757
5575
|
if o == '-s': config.header_footer = False
|
4758
5576
|
if o == '-v': config.verbose = True
|
4759
|
-
|
4760
|
-
if infile != '<stdin>' and not os.path.isfile(infile):
|
4761
|
-
raise EAsciiDoc,'input file %s missing' % infile
|
5577
|
+
document.update_attributes()
|
4762
5578
|
if '-e' not in options:
|
4763
|
-
# Load
|
4764
|
-
|
4765
|
-
#
|
4766
|
-
config.
|
4767
|
-
|
4768
|
-
|
4769
|
-
|
4770
|
-
# Load configuration files from document directory.
|
5579
|
+
# Load asciidoc.conf files in two passes: the first for attributes
|
5580
|
+
# the second for everything. This is so that locally set attributes
|
5581
|
+
# available are in the global asciidoc.conf
|
5582
|
+
if not config.load_from_dirs('asciidoc.conf',include=['attributes']):
|
5583
|
+
raise EAsciiDoc,'configuration file asciidoc.conf missing'
|
5584
|
+
load_conffiles(include=['attributes'])
|
5585
|
+
config.load_from_dirs('asciidoc.conf')
|
4771
5586
|
if infile != '<stdin>':
|
4772
|
-
|
5587
|
+
indir = os.path.dirname(infile)
|
5588
|
+
config.load_file('asciidoc.conf', indir,
|
5589
|
+
include=['attributes','titles','specialchars'])
|
5590
|
+
else:
|
5591
|
+
load_conffiles(include=['attributes','titles','specialchars'])
|
5592
|
+
document.update_attributes()
|
5593
|
+
# Check the infile exists.
|
4773
5594
|
if infile != '<stdin>':
|
4774
|
-
|
4775
|
-
|
4776
|
-
|
4777
|
-
|
4778
|
-
|
4779
|
-
|
4780
|
-
|
4781
|
-
|
4782
|
-
|
4783
|
-
|
4784
|
-
|
4785
|
-
|
4786
|
-
|
4787
|
-
|
5595
|
+
if not os.path.isfile(infile):
|
5596
|
+
raise EAsciiDoc,'input file %s missing' % infile
|
5597
|
+
document.infile = infile
|
5598
|
+
AttributeList.initialize()
|
5599
|
+
# Open input file and parse document header.
|
5600
|
+
reader.tabsize = config.tabsize
|
5601
|
+
reader.open(infile)
|
5602
|
+
has_header = document.parse_header(doctype,backend)
|
5603
|
+
# doctype is now finalized.
|
5604
|
+
document.attributes['doctype-'+document.doctype] = ''
|
5605
|
+
# Load backend configuration files.
|
5606
|
+
if '-e' not in options:
|
5607
|
+
f = document.backend + '.conf'
|
5608
|
+
if not config.find_in_dirs(f):
|
5609
|
+
message.warning('missing backend conf file: %s' % f, linenos=False)
|
5610
|
+
config.load_backend()
|
5611
|
+
# backend is now known.
|
5612
|
+
document.attributes['backend-'+document.backend] = ''
|
5613
|
+
document.attributes[document.backend+'-'+document.doctype] = ''
|
5614
|
+
doc_conffiles = []
|
5615
|
+
if '-e' not in options:
|
5616
|
+
# Load filters and language file.
|
5617
|
+
config.load_filters()
|
5618
|
+
document.load_lang()
|
5619
|
+
if infile != '<stdin>':
|
5620
|
+
# Load local conf files (files in the source file directory).
|
5621
|
+
config.load_file('asciidoc.conf', indir)
|
5622
|
+
config.load_backend([indir])
|
5623
|
+
config.load_filters([indir])
|
5624
|
+
# Load document specific configuration files.
|
5625
|
+
f = os.path.splitext(infile)[0]
|
5626
|
+
doc_conffiles = [
|
5627
|
+
f for f in (f+'.conf', f+'-'+document.backend+'.conf')
|
5628
|
+
if os.path.isfile(f) ]
|
5629
|
+
for f in doc_conffiles:
|
5630
|
+
config.load_file(f)
|
5631
|
+
load_conffiles()
|
5632
|
+
# Build asciidoc-args attribute.
|
5633
|
+
args = ''
|
5634
|
+
# Add custom conf file arguments.
|
5635
|
+
for f in doc_conffiles + confiles:
|
5636
|
+
args += ' --conf-file "%s"' % f
|
5637
|
+
# Add command-line and header attributes.
|
5638
|
+
attrs = {}
|
5639
|
+
attrs.update(AttributeEntry.attributes)
|
5640
|
+
attrs.update(config.cmd_attrs)
|
5641
|
+
if 'title' in attrs: # Don't pass the header title.
|
5642
|
+
del attrs['title']
|
5643
|
+
for k,v in attrs.items():
|
5644
|
+
if v:
|
5645
|
+
args += ' --attribute "%s=%s"' % (k,v)
|
5646
|
+
else:
|
5647
|
+
args += ' --attribute "%s"' % k
|
5648
|
+
document.attributes['asciidoc-args'] = args
|
5649
|
+
# Build outfile name.
|
4788
5650
|
if outfile is None:
|
4789
|
-
outfile = os.path.splitext(infile)[0] + '.' + backend
|
5651
|
+
outfile = os.path.splitext(infile)[0] + '.' + document.backend
|
4790
5652
|
if config.outfilesuffix:
|
4791
5653
|
# Change file extension.
|
4792
5654
|
outfile = os.path.splitext(outfile)[0] + config.outfilesuffix
|
4793
5655
|
document.outfile = outfile
|
5656
|
+
# Document header attributes override conf file attributes.
|
5657
|
+
document.attributes.update(AttributeEntry.attributes)
|
5658
|
+
document.update_attributes()
|
5659
|
+
# Configuration is fully loaded so can expand templates.
|
5660
|
+
config.expand_all_templates()
|
5661
|
+
# Check configuration for consistency.
|
5662
|
+
config.validate()
|
5663
|
+
paragraphs.initialize()
|
5664
|
+
lists.initialize()
|
4794
5665
|
if config.dumping:
|
4795
5666
|
config.dump()
|
4796
5667
|
else:
|
4797
|
-
|
4798
|
-
reader.open(infile)
|
5668
|
+
writer.newline = config.newline
|
4799
5669
|
try:
|
4800
|
-
writer.
|
4801
|
-
writer.open(outfile)
|
5670
|
+
writer.open(outfile, reader.bom)
|
4802
5671
|
try:
|
4803
|
-
document.
|
4804
|
-
document.translate()
|
5672
|
+
document.translate(has_header) # Generate the output.
|
4805
5673
|
finally:
|
4806
5674
|
writer.close()
|
4807
5675
|
finally:
|
4808
|
-
reader.closefile()
|
4809
|
-
except
|
4810
|
-
|
5676
|
+
reader.closefile()
|
5677
|
+
except KeyboardInterrupt:
|
5678
|
+
raise
|
4811
5679
|
except Exception,e:
|
4812
5680
|
# Cleanup.
|
4813
5681
|
if outfile and outfile != '<stdout>' and os.path.isfile(outfile):
|
@@ -4815,23 +5683,28 @@ def asciidoc(backend, doctype, confiles, infile, outfile, options):
|
|
4815
5683
|
# Build and print error description.
|
4816
5684
|
msg = 'FAILED: '
|
4817
5685
|
if reader.cursor:
|
4818
|
-
msg =
|
4819
|
-
if isinstance(e,EAsciiDoc):
|
4820
|
-
|
5686
|
+
msg = message.format('', msg)
|
5687
|
+
if isinstance(e, EAsciiDoc):
|
5688
|
+
message.stderr('%s%s' % (msg,str(e)))
|
4821
5689
|
else:
|
4822
|
-
|
4823
|
-
|
4824
|
-
|
4825
|
-
|
5690
|
+
if __name__ == '__main__':
|
5691
|
+
message.stderr(msg+'unexpected error:')
|
5692
|
+
message.stderr('-'*60)
|
5693
|
+
traceback.print_exc(file=sys.stderr)
|
5694
|
+
message.stderr('-'*60)
|
5695
|
+
else:
|
5696
|
+
message.stderr('%sunexpected error: %s' % (msg,str(e)))
|
4826
5697
|
sys.exit(1)
|
4827
5698
|
|
4828
5699
|
def usage(msg=''):
|
4829
5700
|
if msg:
|
4830
|
-
|
5701
|
+
message.stderr(msg)
|
4831
5702
|
show_help('default', sys.stderr)
|
4832
5703
|
|
4833
|
-
def show_help(topic,
|
4834
|
-
"""Print help topic to
|
5704
|
+
def show_help(topic, f=None):
|
5705
|
+
"""Print help topic to file object f."""
|
5706
|
+
if f is None:
|
5707
|
+
f = sys.stdout
|
4835
5708
|
# Select help file.
|
4836
5709
|
lang = config.cmd_attrs.get('lang')
|
4837
5710
|
if lang and lang != 'en':
|
@@ -4839,86 +5712,86 @@ def show_help(topic, stream=sys.stdout):
|
|
4839
5712
|
else:
|
4840
5713
|
help_file = HELP_FILE
|
4841
5714
|
# Print [topic] section from help file.
|
4842
|
-
|
4843
|
-
|
4844
|
-
load_sections(topics, help_file, APP_DIR)
|
4845
|
-
if USER_DIR is not None:
|
4846
|
-
load_sections(topics, help_file, USER_DIR)
|
4847
|
-
if len(topics) == 0:
|
5715
|
+
config.load_from_dirs(help_file)
|
5716
|
+
if len(config.sections) == 0:
|
4848
5717
|
# Default to English if specified language help files not found.
|
4849
5718
|
help_file = HELP_FILE
|
4850
|
-
|
4851
|
-
|
4852
|
-
|
4853
|
-
print_stderr('no help topics found')
|
5719
|
+
config.load_from_dirs(help_file)
|
5720
|
+
if len(config.sections) == 0:
|
5721
|
+
message.stderr('no help topics found')
|
4854
5722
|
sys.exit(1)
|
4855
5723
|
n = 0
|
4856
|
-
for k in
|
5724
|
+
for k in config.sections:
|
4857
5725
|
if re.match(re.escape(topic), k):
|
4858
5726
|
n += 1
|
4859
|
-
lines =
|
5727
|
+
lines = config.sections[k]
|
4860
5728
|
if n == 0:
|
4861
|
-
|
4862
|
-
|
5729
|
+
if topic != 'topics':
|
5730
|
+
message.stderr('help topic not found: [%s] in %s' % (topic, help_file))
|
5731
|
+
message.stderr('available help topics: %s' % ', '.join(config.sections.keys()))
|
4863
5732
|
sys.exit(1)
|
4864
5733
|
elif n > 1:
|
4865
|
-
|
5734
|
+
message.stderr('ambiguous help topic: %s' % topic)
|
4866
5735
|
else:
|
4867
5736
|
for line in lines:
|
4868
|
-
print >>
|
5737
|
+
print >>f, line
|
4869
5738
|
|
4870
|
-
|
4871
|
-
|
4872
|
-
|
4873
|
-
|
4874
|
-
|
4875
|
-
|
4876
|
-
|
4877
|
-
|
4878
|
-
|
4879
|
-
|
4880
|
-
|
4881
|
-
|
4882
|
-
|
4883
|
-
|
4884
|
-
|
4885
|
-
|
4886
|
-
|
4887
|
-
|
4888
|
-
|
4889
|
-
|
4890
|
-
|
4891
|
-
|
4892
|
-
|
4893
|
-
|
4894
|
-
|
4895
|
-
|
5739
|
+
### Used by asciidocapi.py ###
|
5740
|
+
def execute(cmd,opts,args):
|
5741
|
+
"""
|
5742
|
+
Execute asciidoc with command-line options and arguments.
|
5743
|
+
cmd is asciidoc command or asciidoc.py path.
|
5744
|
+
opts and args conform to values returned by getopt.getopt().
|
5745
|
+
Raises SystemExit if an error occurs.
|
5746
|
+
|
5747
|
+
Doctests:
|
5748
|
+
|
5749
|
+
1. Check execution:
|
5750
|
+
|
5751
|
+
>>> import StringIO
|
5752
|
+
>>> infile = StringIO.StringIO('Hello *{author}*')
|
5753
|
+
>>> outfile = StringIO.StringIO()
|
5754
|
+
>>> opts = []
|
5755
|
+
>>> opts.append(('--backend','html4'))
|
5756
|
+
>>> opts.append(('--no-header-footer',None))
|
5757
|
+
>>> opts.append(('--attribute','author=Joe Bloggs'))
|
5758
|
+
>>> opts.append(('--out-file',outfile))
|
5759
|
+
>>> execute(__file__, opts, [infile])
|
5760
|
+
>>> print outfile.getvalue()
|
5761
|
+
<p>Hello <strong>Joe Bloggs</strong></p>
|
5762
|
+
|
5763
|
+
>>>
|
5764
|
+
|
5765
|
+
"""
|
5766
|
+
config.init(cmd)
|
4896
5767
|
if len(args) > 1:
|
4897
|
-
usage()
|
5768
|
+
usage('To many arguments')
|
4898
5769
|
sys.exit(1)
|
4899
|
-
backend =
|
4900
|
-
doctype =
|
5770
|
+
backend = None
|
5771
|
+
doctype = None
|
4901
5772
|
confiles = []
|
4902
5773
|
outfile = None
|
4903
5774
|
options = []
|
4904
|
-
prof = False
|
4905
5775
|
help_option = False
|
4906
5776
|
for o,v in opts:
|
4907
5777
|
if o in ('--help','-h'):
|
4908
5778
|
help_option = True
|
4909
|
-
|
4910
|
-
prof = True
|
5779
|
+
#DEPRECATED: --unsafe option.
|
4911
5780
|
if o == '--unsafe':
|
4912
5781
|
document.safe = False
|
5782
|
+
if o == '--safe':
|
5783
|
+
document.safe = True
|
4913
5784
|
if o == '--version':
|
4914
5785
|
print('asciidoc %s' % VERSION)
|
4915
5786
|
sys.exit(0)
|
4916
5787
|
if o in ('-b','--backend'):
|
4917
5788
|
backend = v
|
5789
|
+
# config.cmd_attrs['backend'] = v
|
4918
5790
|
if o in ('-c','--dump-conf'):
|
4919
5791
|
options.append('-c')
|
4920
5792
|
if o in ('-d','--doctype'):
|
4921
5793
|
doctype = v
|
5794
|
+
# config.cmd_attrs['doctype'] = v
|
4922
5795
|
if o in ('-e','--no-conf'):
|
4923
5796
|
options.append('-e')
|
4924
5797
|
if o in ('-f','--conf-file'):
|
@@ -4938,10 +5811,7 @@ def main():
|
|
4938
5811
|
else:
|
4939
5812
|
config.cmd_attrs[k] = v
|
4940
5813
|
if o in ('-o','--out-file'):
|
4941
|
-
|
4942
|
-
outfile = '<stdout>'
|
4943
|
-
else:
|
4944
|
-
outfile = v
|
5814
|
+
outfile = v
|
4945
5815
|
if o in ('-s','--no-header-footer'):
|
4946
5816
|
options.append('-s')
|
4947
5817
|
if o in ('-v','--verbose'):
|
@@ -4954,45 +5824,79 @@ def main():
|
|
4954
5824
|
sys.exit(0)
|
4955
5825
|
if len(args) == 0 and len(opts) == 0:
|
4956
5826
|
usage()
|
4957
|
-
sys.exit(
|
5827
|
+
sys.exit(0)
|
4958
5828
|
if len(args) == 0:
|
4959
5829
|
usage('No source file specified')
|
4960
5830
|
sys.exit(1)
|
4961
|
-
if not backend:
|
4962
|
-
usage('No --backend option specified')
|
4963
|
-
sys.exit(1)
|
4964
|
-
|
4965
|
-
|
4966
|
-
else:
|
5831
|
+
# if not backend:
|
5832
|
+
# usage('No --backend option specified')
|
5833
|
+
# sys.exit(1)
|
5834
|
+
stdin,stdout = sys.stdin,sys.stdout
|
5835
|
+
try:
|
4967
5836
|
infile = args[0]
|
4968
|
-
|
4969
|
-
|
4970
|
-
|
4971
|
-
|
4972
|
-
|
4973
|
-
|
4974
|
-
|
4975
|
-
|
4976
|
-
|
4977
|
-
|
4978
|
-
|
4979
|
-
|
4980
|
-
|
5837
|
+
if infile == '-':
|
5838
|
+
infile = '<stdin>'
|
5839
|
+
elif isinstance(infile, str):
|
5840
|
+
infile = os.path.abspath(infile)
|
5841
|
+
else: # Input file is file object from API call.
|
5842
|
+
sys.stdin = infile
|
5843
|
+
infile = '<stdin>'
|
5844
|
+
if outfile == '-':
|
5845
|
+
outfile = '<stdout>'
|
5846
|
+
elif isinstance(outfile, str):
|
5847
|
+
outfile = os.path.abspath(outfile)
|
5848
|
+
elif outfile is None:
|
5849
|
+
if infile == '<stdin>':
|
5850
|
+
outfile = '<stdout>'
|
5851
|
+
else: # Output file is file object from API call.
|
5852
|
+
sys.stdout = outfile
|
5853
|
+
outfile = '<stdout>'
|
5854
|
+
# Do the work.
|
4981
5855
|
asciidoc(backend, doctype, confiles, infile, outfile, options)
|
4982
|
-
|
4983
|
-
|
5856
|
+
if document.has_errors:
|
5857
|
+
sys.exit(1)
|
5858
|
+
finally:
|
5859
|
+
sys.stdin,sys.stdout = stdin,stdout
|
4984
5860
|
|
4985
5861
|
if __name__ == '__main__':
|
5862
|
+
# Process command line options.
|
5863
|
+
import getopt
|
5864
|
+
try:
|
5865
|
+
#DEPRECATED: --unsafe option.
|
5866
|
+
opts,args = getopt.getopt(sys.argv[1:],
|
5867
|
+
'a:b:cd:ef:hno:svw:',
|
5868
|
+
['attribute=','backend=','conf-file=','doctype=','dump-conf',
|
5869
|
+
'help','no-conf','no-header-footer','out-file=',
|
5870
|
+
'section-numbers','verbose','version','safe','unsafe',
|
5871
|
+
'doctest','filter'])
|
5872
|
+
except getopt.GetoptError:
|
5873
|
+
message.stderr('illegal command options')
|
5874
|
+
sys.exit(1)
|
5875
|
+
if '--doctest' in [opt[0] for opt in opts]:
|
5876
|
+
# Run module doctests.
|
5877
|
+
import doctest
|
5878
|
+
options = doctest.NORMALIZE_WHITESPACE + doctest.ELLIPSIS
|
5879
|
+
failures,tries = doctest.testmod(optionflags=options)
|
5880
|
+
if failures == 0:
|
5881
|
+
message.stderr('All doctests passed')
|
5882
|
+
sys.exit(0)
|
5883
|
+
else:
|
5884
|
+
sys.exit(1)
|
5885
|
+
if '--filter' in [opt[0] for opt in opts]:
|
5886
|
+
config.init(sys.argv[0])
|
5887
|
+
config.verbose = bool(set(['-v','--verbose']) & set([opt[0] for opt in opts]))
|
5888
|
+
if not args:
|
5889
|
+
die('missing --filter command')
|
5890
|
+
elif args[0] == 'install':
|
5891
|
+
Filter.install(args[1:])
|
5892
|
+
elif args[0] == 'remove':
|
5893
|
+
Filter.remove(args[1:])
|
5894
|
+
elif args[0] == 'list':
|
5895
|
+
Filter.list()
|
5896
|
+
else:
|
5897
|
+
die('illegal --filter command: %s' % args[0])
|
5898
|
+
sys.exit(0)
|
4986
5899
|
try:
|
4987
|
-
|
5900
|
+
execute(sys.argv[0],opts,args)
|
4988
5901
|
except KeyboardInterrupt:
|
4989
|
-
pass
|
4990
|
-
except SystemExit:
|
4991
|
-
raise
|
4992
|
-
except:
|
4993
|
-
print_stderr('%s: unexpected error: %s' %
|
4994
|
-
(os.path.basename(sys.argv[0]), sys.exc_info()[1]))
|
4995
|
-
print_stderr('-'*60)
|
4996
|
-
traceback.print_exc(file=sys.stderr)
|
4997
|
-
print_stderr('-'*60)
|
4998
5902
|
sys.exit(1)
|