pygmentize 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- data/LICENSE +19 -0
- data/lib/pygments.rb +23 -0
- data/pygmentize.gemspec +11 -0
- data/test/pygments.rb +19 -0
- data/vendor/pygmentize.py +7 -0
- data/vendor/pygments/AUTHORS +73 -0
- data/vendor/pygments/LICENSE +25 -0
- data/vendor/pygments/__init__.py +91 -0
- data/vendor/pygments/__init__.pyc +0 -0
- data/vendor/pygments/cmdline.py +430 -0
- data/vendor/pygments/cmdline.pyc +0 -0
- data/vendor/pygments/console.py +74 -0
- data/vendor/pygments/console.pyc +0 -0
- data/vendor/pygments/filter.py +74 -0
- data/vendor/pygments/filter.pyc +0 -0
- data/vendor/pygments/filters/__init__.py +357 -0
- data/vendor/pygments/filters/__init__.pyc +0 -0
- data/vendor/pygments/formatter.py +92 -0
- data/vendor/pygments/formatter.pyc +0 -0
- data/vendor/pygments/formatters/__init__.py +68 -0
- data/vendor/pygments/formatters/__init__.pyc +0 -0
- data/vendor/pygments/formatters/_mapping.py +92 -0
- data/vendor/pygments/formatters/_mapping.pyc +0 -0
- data/vendor/pygments/formatters/bbcode.py +109 -0
- data/vendor/pygments/formatters/bbcode.pyc +0 -0
- data/vendor/pygments/formatters/html.py +723 -0
- data/vendor/pygments/formatters/html.pyc +0 -0
- data/vendor/pygments/formatters/img.py +553 -0
- data/vendor/pygments/formatters/img.pyc +0 -0
- data/vendor/pygments/formatters/latex.py +354 -0
- data/vendor/pygments/formatters/latex.pyc +0 -0
- data/vendor/pygments/formatters/other.py +117 -0
- data/vendor/pygments/formatters/other.pyc +0 -0
- data/vendor/pygments/formatters/rtf.py +136 -0
- data/vendor/pygments/formatters/rtf.pyc +0 -0
- data/vendor/pygments/formatters/svg.py +154 -0
- data/vendor/pygments/formatters/svg.pyc +0 -0
- data/vendor/pygments/formatters/terminal.py +109 -0
- data/vendor/pygments/formatters/terminal.pyc +0 -0
- data/vendor/pygments/formatters/terminal256.py +219 -0
- data/vendor/pygments/formatters/terminal256.pyc +0 -0
- data/vendor/pygments/lexer.py +660 -0
- data/vendor/pygments/lexer.pyc +0 -0
- data/vendor/pygments/lexers/__init__.py +226 -0
- data/vendor/pygments/lexers/__init__.pyc +0 -0
- data/vendor/pygments/lexers/_asybuiltins.py +1645 -0
- data/vendor/pygments/lexers/_clbuiltins.py +232 -0
- data/vendor/pygments/lexers/_luabuiltins.py +256 -0
- data/vendor/pygments/lexers/_mapping.py +234 -0
- data/vendor/pygments/lexers/_mapping.pyc +0 -0
- data/vendor/pygments/lexers/_phpbuiltins.py +3389 -0
- data/vendor/pygments/lexers/_vimbuiltins.py +3 -0
- data/vendor/pygments/lexers/agile.py +1485 -0
- data/vendor/pygments/lexers/agile.pyc +0 -0
- data/vendor/pygments/lexers/asm.py +353 -0
- data/vendor/pygments/lexers/compiled.py +2365 -0
- data/vendor/pygments/lexers/dotnet.py +355 -0
- data/vendor/pygments/lexers/functional.py +756 -0
- data/vendor/pygments/lexers/functional.pyc +0 -0
- data/vendor/pygments/lexers/math.py +461 -0
- data/vendor/pygments/lexers/other.py +2297 -0
- data/vendor/pygments/lexers/parsers.py +695 -0
- data/vendor/pygments/lexers/special.py +100 -0
- data/vendor/pygments/lexers/special.pyc +0 -0
- data/vendor/pygments/lexers/templates.py +1387 -0
- data/vendor/pygments/lexers/text.py +1586 -0
- data/vendor/pygments/lexers/web.py +1619 -0
- data/vendor/pygments/lexers/web.pyc +0 -0
- data/vendor/pygments/plugin.py +74 -0
- data/vendor/pygments/plugin.pyc +0 -0
- data/vendor/pygments/scanner.py +104 -0
- data/vendor/pygments/style.py +117 -0
- data/vendor/pygments/style.pyc +0 -0
- data/vendor/pygments/styles/__init__.py +68 -0
- data/vendor/pygments/styles/__init__.pyc +0 -0
- data/vendor/pygments/styles/autumn.py +65 -0
- data/vendor/pygments/styles/borland.py +51 -0
- data/vendor/pygments/styles/bw.py +49 -0
- data/vendor/pygments/styles/colorful.py +81 -0
- data/vendor/pygments/styles/default.py +73 -0
- data/vendor/pygments/styles/default.pyc +0 -0
- data/vendor/pygments/styles/emacs.py +72 -0
- data/vendor/pygments/styles/friendly.py +72 -0
- data/vendor/pygments/styles/fruity.py +43 -0
- data/vendor/pygments/styles/manni.py +75 -0
- data/vendor/pygments/styles/monokai.py +106 -0
- data/vendor/pygments/styles/murphy.py +80 -0
- data/vendor/pygments/styles/native.py +65 -0
- data/vendor/pygments/styles/pastie.py +75 -0
- data/vendor/pygments/styles/perldoc.py +69 -0
- data/vendor/pygments/styles/tango.py +141 -0
- data/vendor/pygments/styles/trac.py +63 -0
- data/vendor/pygments/styles/vim.py +63 -0
- data/vendor/pygments/styles/vs.py +38 -0
- data/vendor/pygments/token.py +198 -0
- data/vendor/pygments/token.pyc +0 -0
- data/vendor/pygments/unistring.py +130 -0
- data/vendor/pygments/unistring.pyc +0 -0
- data/vendor/pygments/util.py +226 -0
- data/vendor/pygments/util.pyc +0 -0
- metadata +166 -0
@@ -0,0 +1,1586 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""
|
3
|
+
pygments.lexers.text
|
4
|
+
~~~~~~~~~~~~~~~~~~~~
|
5
|
+
|
6
|
+
Lexers for non-source code file types.
|
7
|
+
|
8
|
+
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
|
9
|
+
:license: BSD, see LICENSE for details.
|
10
|
+
"""
|
11
|
+
|
12
|
+
import re
|
13
|
+
from bisect import bisect
|
14
|
+
|
15
|
+
from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \
|
16
|
+
bygroups, include, using, this, do_insertions
|
17
|
+
from pygments.token import Punctuation, Text, Comment, Keyword, Name, String, \
|
18
|
+
Generic, Operator, Number, Whitespace, Literal
|
19
|
+
from pygments.util import get_bool_opt
|
20
|
+
from pygments.lexers.other import BashLexer
|
21
|
+
|
22
|
+
__all__ = ['IniLexer', 'SourcesListLexer', 'BaseMakefileLexer',
|
23
|
+
'MakefileLexer', 'DiffLexer', 'IrcLogsLexer', 'TexLexer',
|
24
|
+
'GroffLexer', 'ApacheConfLexer', 'BBCodeLexer', 'MoinWikiLexer',
|
25
|
+
'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer',
|
26
|
+
'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer',
|
27
|
+
'LighttpdConfLexer', 'NginxConfLexer', 'CMakeLexer']
|
28
|
+
|
29
|
+
|
30
|
+
class IniLexer(RegexLexer):
|
31
|
+
"""
|
32
|
+
Lexer for configuration files in INI style.
|
33
|
+
"""
|
34
|
+
|
35
|
+
name = 'INI'
|
36
|
+
aliases = ['ini', 'cfg']
|
37
|
+
filenames = ['*.ini', '*.cfg', '*.properties']
|
38
|
+
mimetypes = ['text/x-ini']
|
39
|
+
|
40
|
+
tokens = {
|
41
|
+
'root': [
|
42
|
+
(r'\s+', Text),
|
43
|
+
(r'[;#].*?$', Comment),
|
44
|
+
(r'\[.*?\]$', Keyword),
|
45
|
+
(r'(.*?)([ \t]*)(=)([ \t]*)(.*?)$',
|
46
|
+
bygroups(Name.Attribute, Text, Operator, Text, String))
|
47
|
+
]
|
48
|
+
}
|
49
|
+
|
50
|
+
def analyse_text(text):
|
51
|
+
npos = text.find('\n')
|
52
|
+
if npos < 3:
|
53
|
+
return False
|
54
|
+
return text[0] == '[' and text[npos-1] == ']'
|
55
|
+
|
56
|
+
|
57
|
+
class SourcesListLexer(RegexLexer):
|
58
|
+
"""
|
59
|
+
Lexer that highlights debian sources.list files.
|
60
|
+
|
61
|
+
*New in Pygments 0.7.*
|
62
|
+
"""
|
63
|
+
|
64
|
+
name = 'Debian Sourcelist'
|
65
|
+
aliases = ['sourceslist', 'sources.list']
|
66
|
+
filenames = ['sources.list']
|
67
|
+
mimetype = ['application/x-debian-sourceslist']
|
68
|
+
|
69
|
+
tokens = {
|
70
|
+
'root': [
|
71
|
+
(r'\s+', Text),
|
72
|
+
(r'#.*?$', Comment),
|
73
|
+
(r'^(deb(?:-src)?)(\s+)',
|
74
|
+
bygroups(Keyword, Text), 'distribution')
|
75
|
+
],
|
76
|
+
'distribution': [
|
77
|
+
(r'#.*?$', Comment, '#pop'),
|
78
|
+
(r'\$\(ARCH\)', Name.Variable),
|
79
|
+
(r'[^\s$[]+', String),
|
80
|
+
(r'\[', String.Other, 'escaped-distribution'),
|
81
|
+
(r'\$', String),
|
82
|
+
(r'\s+', Text, 'components')
|
83
|
+
],
|
84
|
+
'escaped-distribution': [
|
85
|
+
(r'\]', String.Other, '#pop'),
|
86
|
+
(r'\$\(ARCH\)', Name.Variable),
|
87
|
+
(r'[^\]$]+', String.Other),
|
88
|
+
(r'\$', String.Other)
|
89
|
+
],
|
90
|
+
'components': [
|
91
|
+
(r'#.*?$', Comment, '#pop:2'),
|
92
|
+
(r'$', Text, '#pop:2'),
|
93
|
+
(r'\s+', Text),
|
94
|
+
(r'\S+', Keyword.Pseudo),
|
95
|
+
]
|
96
|
+
}
|
97
|
+
|
98
|
+
def analyse_text(text):
|
99
|
+
for line in text.split('\n'):
|
100
|
+
line = line.strip()
|
101
|
+
if not (line.startswith('#') or line.startswith('deb ') or
|
102
|
+
line.startswith('deb-src ') or not line):
|
103
|
+
return False
|
104
|
+
return True
|
105
|
+
|
106
|
+
|
107
|
+
class MakefileLexer(Lexer):
|
108
|
+
"""
|
109
|
+
Lexer for BSD and GNU make extensions (lenient enough to handle both in
|
110
|
+
the same file even).
|
111
|
+
|
112
|
+
*Rewritten in Pygments 0.10.*
|
113
|
+
"""
|
114
|
+
|
115
|
+
name = 'Makefile'
|
116
|
+
aliases = ['make', 'makefile', 'mf', 'bsdmake']
|
117
|
+
filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
|
118
|
+
mimetypes = ['text/x-makefile']
|
119
|
+
|
120
|
+
r_special = re.compile(r'^(?:'
|
121
|
+
# BSD Make
|
122
|
+
r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
|
123
|
+
# GNU Make
|
124
|
+
r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:))(?=\s)')
|
125
|
+
r_comment = re.compile(r'^\s*@?#')
|
126
|
+
|
127
|
+
def get_tokens_unprocessed(self, text):
|
128
|
+
ins = []
|
129
|
+
lines = text.splitlines(True)
|
130
|
+
done = ''
|
131
|
+
lex = BaseMakefileLexer(**self.options)
|
132
|
+
backslashflag = False
|
133
|
+
for line in lines:
|
134
|
+
if self.r_special.match(line) or backslashflag:
|
135
|
+
ins.append((len(done), [(0, Comment.Preproc, line)]))
|
136
|
+
backslashflag = line.strip().endswith('\\')
|
137
|
+
elif self.r_comment.match(line):
|
138
|
+
ins.append((len(done), [(0, Comment, line)]))
|
139
|
+
else:
|
140
|
+
done += line
|
141
|
+
for item in do_insertions(ins, lex.get_tokens_unprocessed(done)):
|
142
|
+
yield item
|
143
|
+
|
144
|
+
|
145
|
+
class BaseMakefileLexer(RegexLexer):
|
146
|
+
"""
|
147
|
+
Lexer for simple Makefiles (no preprocessing).
|
148
|
+
|
149
|
+
*New in Pygments 0.10.*
|
150
|
+
"""
|
151
|
+
|
152
|
+
name = 'Makefile'
|
153
|
+
aliases = ['basemake']
|
154
|
+
filenames = []
|
155
|
+
mimetypes = []
|
156
|
+
|
157
|
+
tokens = {
|
158
|
+
'root': [
|
159
|
+
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
|
160
|
+
(r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)),
|
161
|
+
(r'\s+', Text),
|
162
|
+
(r'#.*?\n', Comment),
|
163
|
+
(r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)',
|
164
|
+
bygroups(Keyword, Text), 'export'),
|
165
|
+
(r'export\s+', Keyword),
|
166
|
+
# assignment
|
167
|
+
(r'([a-zA-Z0-9_${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n|.*\n)+)',
|
168
|
+
bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))),
|
169
|
+
# strings
|
170
|
+
(r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double),
|
171
|
+
(r"(?s)'(\\\\|\\.|[^'\\])*'", String.Single),
|
172
|
+
# targets
|
173
|
+
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
|
174
|
+
'block-header'),
|
175
|
+
# TODO: add paren handling (grr)
|
176
|
+
],
|
177
|
+
'export': [
|
178
|
+
(r'[a-zA-Z0-9_${}-]+', Name.Variable),
|
179
|
+
(r'\n', Text, '#pop'),
|
180
|
+
(r'\s+', Text),
|
181
|
+
],
|
182
|
+
'block-header': [
|
183
|
+
(r'[^,\\\n#]+', Number),
|
184
|
+
(r',', Punctuation),
|
185
|
+
(r'#.*?\n', Comment),
|
186
|
+
(r'\\\n', Text), # line continuation
|
187
|
+
(r'\\.', Text),
|
188
|
+
(r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'),
|
189
|
+
],
|
190
|
+
}
|
191
|
+
|
192
|
+
|
193
|
+
class DiffLexer(RegexLexer):
|
194
|
+
"""
|
195
|
+
Lexer for unified or context-style diffs or patches.
|
196
|
+
"""
|
197
|
+
|
198
|
+
name = 'Diff'
|
199
|
+
aliases = ['diff', 'udiff']
|
200
|
+
filenames = ['*.diff', '*.patch']
|
201
|
+
mimetypes = ['text/x-diff', 'text/x-patch']
|
202
|
+
|
203
|
+
tokens = {
|
204
|
+
'root': [
|
205
|
+
(r' .*\n', Text),
|
206
|
+
(r'\+.*\n', Generic.Inserted),
|
207
|
+
(r'-.*\n', Generic.Deleted),
|
208
|
+
(r'!.*\n', Generic.Strong),
|
209
|
+
(r'@.*\n', Generic.Subheading),
|
210
|
+
(r'([Ii]ndex|diff).*\n', Generic.Heading),
|
211
|
+
(r'=.*\n', Generic.Heading),
|
212
|
+
(r'.*\n', Text),
|
213
|
+
]
|
214
|
+
}
|
215
|
+
|
216
|
+
def analyse_text(text):
|
217
|
+
if text[:7] == 'Index: ':
|
218
|
+
return True
|
219
|
+
if text[:5] == 'diff ':
|
220
|
+
return True
|
221
|
+
if text[:4] == '--- ':
|
222
|
+
return 0.9
|
223
|
+
|
224
|
+
|
225
|
+
DPATCH_KEYWORDS = ['hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
|
226
|
+
'replace']
|
227
|
+
|
228
|
+
class DarcsPatchLexer(RegexLexer):
|
229
|
+
"""
|
230
|
+
DarcsPatchLexer is a lexer for the various versions of the darcs patch
|
231
|
+
format. Examples of this format are derived by commands such as
|
232
|
+
``darcs annotate --patch`` and ``darcs send``.
|
233
|
+
|
234
|
+
*New in Pygments 0.10.*
|
235
|
+
"""
|
236
|
+
name = 'Darcs Patch'
|
237
|
+
aliases = ['dpatch']
|
238
|
+
filenames = ['*.dpatch', '*.darcspatch']
|
239
|
+
|
240
|
+
tokens = {
|
241
|
+
'root': [
|
242
|
+
(r'<', Operator),
|
243
|
+
(r'>', Operator),
|
244
|
+
(r'{', Operator),
|
245
|
+
(r'}', Operator),
|
246
|
+
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
|
247
|
+
bygroups(Operator, Keyword, Name, Text, Name, Operator,
|
248
|
+
Literal.Date, Text, Operator)),
|
249
|
+
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
|
250
|
+
bygroups(Operator, Keyword, Name, Text, Name, Operator,
|
251
|
+
Literal.Date, Text), 'comment'),
|
252
|
+
(r'New patches:', Generic.Heading),
|
253
|
+
(r'Context:', Generic.Heading),
|
254
|
+
(r'Patch bundle hash:', Generic.Heading),
|
255
|
+
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
|
256
|
+
bygroups(Text, Keyword, Text)),
|
257
|
+
(r'\+', Generic.Inserted, "insert"),
|
258
|
+
(r'-', Generic.Deleted, "delete"),
|
259
|
+
(r'.*\n', Text),
|
260
|
+
],
|
261
|
+
'comment': [
|
262
|
+
(r'[^\]].*\n', Comment),
|
263
|
+
(r'\]', Operator, "#pop"),
|
264
|
+
],
|
265
|
+
'specialText': [ # darcs add [_CODE_] special operators for clarity
|
266
|
+
(r'\n', Text, "#pop"), # line-based
|
267
|
+
(r'\[_[^_]*_]', Operator),
|
268
|
+
],
|
269
|
+
'insert': [
|
270
|
+
include('specialText'),
|
271
|
+
(r'\[', Generic.Inserted),
|
272
|
+
(r'[^\n\[]*', Generic.Inserted),
|
273
|
+
],
|
274
|
+
'delete': [
|
275
|
+
include('specialText'),
|
276
|
+
(r'\[', Generic.Deleted),
|
277
|
+
(r'[^\n\[]*', Generic.Deleted),
|
278
|
+
],
|
279
|
+
}
|
280
|
+
|
281
|
+
|
282
|
+
class IrcLogsLexer(RegexLexer):
|
283
|
+
"""
|
284
|
+
Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
|
285
|
+
"""
|
286
|
+
|
287
|
+
name = 'IRC logs'
|
288
|
+
aliases = ['irc']
|
289
|
+
filenames = ['*.weechatlog']
|
290
|
+
mimetypes = ['text/x-irclog']
|
291
|
+
|
292
|
+
flags = re.VERBOSE | re.MULTILINE
|
293
|
+
timestamp = r"""
|
294
|
+
(
|
295
|
+
# irssi / xchat and others
|
296
|
+
(?: \[|\()? # Opening bracket or paren for the timestamp
|
297
|
+
(?: # Timestamp
|
298
|
+
(?: (?:\d{1,4} [-/]?)+ # Date as - or /-separated groups of digits
|
299
|
+
[T ])? # Date/time separator: T or space
|
300
|
+
(?: \d?\d [:.]?)+ # Time as :/.-separated groups of 1 or 2 digits
|
301
|
+
)
|
302
|
+
(?: \]|\))?\s+ # Closing bracket or paren for the timestamp
|
303
|
+
|
|
304
|
+
# weechat
|
305
|
+
\d{4}\s\w{3}\s\d{2}\s # Date
|
306
|
+
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
|
307
|
+
|
|
308
|
+
# xchat
|
309
|
+
\w{3}\s\d{2}\s # Date
|
310
|
+
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
|
311
|
+
)?
|
312
|
+
"""
|
313
|
+
tokens = {
|
314
|
+
'root': [
|
315
|
+
# log start/end
|
316
|
+
(r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
|
317
|
+
# hack
|
318
|
+
("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
|
319
|
+
# normal msgs
|
320
|
+
("^" + timestamp + r"""
|
321
|
+
(\s*<.*?>\s*) # Nick """,
|
322
|
+
bygroups(Comment.Preproc, Name.Tag), 'msg'),
|
323
|
+
# /me msgs
|
324
|
+
("^" + timestamp + r"""
|
325
|
+
(\s*[*]\s+) # Star
|
326
|
+
([^\s]+\s+.*?\n) # Nick + rest of message """,
|
327
|
+
bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
|
328
|
+
# join/part msgs
|
329
|
+
("^" + timestamp + r"""
|
330
|
+
(\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
|
331
|
+
([^\s]+\s+) # Nick + Space
|
332
|
+
(.*?\n) # Rest of message """,
|
333
|
+
bygroups(Comment.Preproc, Keyword, String, Comment)),
|
334
|
+
(r"^.*?\n", Text),
|
335
|
+
],
|
336
|
+
'msg': [
|
337
|
+
(r"[^\s]+:(?!//)", Name.Attribute), # Prefix
|
338
|
+
(r".*\n", Text, '#pop'),
|
339
|
+
],
|
340
|
+
}
|
341
|
+
|
342
|
+
|
343
|
+
class BBCodeLexer(RegexLexer):
|
344
|
+
"""
|
345
|
+
A lexer that highlights BBCode(-like) syntax.
|
346
|
+
|
347
|
+
*New in Pygments 0.6.*
|
348
|
+
"""
|
349
|
+
|
350
|
+
name = 'BBCode'
|
351
|
+
aliases = ['bbcode']
|
352
|
+
mimetypes = ['text/x-bbcode']
|
353
|
+
|
354
|
+
tokens = {
|
355
|
+
'root': [
|
356
|
+
(r'[^[]+', Text),
|
357
|
+
# tag/end tag begin
|
358
|
+
(r'\[/?\w+', Keyword, 'tag'),
|
359
|
+
# stray bracket
|
360
|
+
(r'\[', Text),
|
361
|
+
],
|
362
|
+
'tag': [
|
363
|
+
(r'\s+', Text),
|
364
|
+
# attribute with value
|
365
|
+
(r'(\w+)(=)("?[^\s"\]]+"?)',
|
366
|
+
bygroups(Name.Attribute, Operator, String)),
|
367
|
+
# tag argument (a la [color=green])
|
368
|
+
(r'(=)("?[^\s"\]]+"?)',
|
369
|
+
bygroups(Operator, String)),
|
370
|
+
# tag end
|
371
|
+
(r'\]', Keyword, '#pop'),
|
372
|
+
],
|
373
|
+
}
|
374
|
+
|
375
|
+
|
376
|
+
class TexLexer(RegexLexer):
|
377
|
+
"""
|
378
|
+
Lexer for the TeX and LaTeX typesetting languages.
|
379
|
+
"""
|
380
|
+
|
381
|
+
name = 'TeX'
|
382
|
+
aliases = ['tex', 'latex']
|
383
|
+
filenames = ['*.tex', '*.aux', '*.toc']
|
384
|
+
mimetypes = ['text/x-tex', 'text/x-latex']
|
385
|
+
|
386
|
+
tokens = {
|
387
|
+
'general': [
|
388
|
+
(r'%.*?\n', Comment),
|
389
|
+
(r'[{}]', Name.Builtin),
|
390
|
+
(r'[&_^]', Name.Builtin),
|
391
|
+
],
|
392
|
+
'root': [
|
393
|
+
(r'\\\[', String.Backtick, 'displaymath'),
|
394
|
+
(r'\\\(', String, 'inlinemath'),
|
395
|
+
(r'\$\$', String.Backtick, 'displaymath'),
|
396
|
+
(r'\$', String, 'inlinemath'),
|
397
|
+
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
|
398
|
+
include('general'),
|
399
|
+
(r'[^\\$%&_^{}]+', Text),
|
400
|
+
],
|
401
|
+
'math': [
|
402
|
+
(r'\\([a-zA-Z]+|.)', Name.Variable),
|
403
|
+
include('general'),
|
404
|
+
(r'[0-9]+', Number),
|
405
|
+
(r'[-=!+*/()\[\]]', Operator),
|
406
|
+
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
|
407
|
+
],
|
408
|
+
'inlinemath': [
|
409
|
+
(r'\\\)', String, '#pop'),
|
410
|
+
(r'\$', String, '#pop'),
|
411
|
+
include('math'),
|
412
|
+
],
|
413
|
+
'displaymath': [
|
414
|
+
(r'\\\]', String, '#pop'),
|
415
|
+
(r'\$\$', String, '#pop'),
|
416
|
+
(r'\$', Name.Builtin),
|
417
|
+
include('math'),
|
418
|
+
],
|
419
|
+
'command': [
|
420
|
+
(r'\[.*?\]', Name.Attribute),
|
421
|
+
(r'\*', Keyword),
|
422
|
+
(r'', Text, '#pop'),
|
423
|
+
],
|
424
|
+
}
|
425
|
+
|
426
|
+
def analyse_text(text):
|
427
|
+
for start in ("\\documentclass", "\\input", "\\documentstyle",
|
428
|
+
"\\relax"):
|
429
|
+
if text[:len(start)] == start:
|
430
|
+
return True
|
431
|
+
|
432
|
+
|
433
|
+
class GroffLexer(RegexLexer):
|
434
|
+
"""
|
435
|
+
Lexer for the (g)roff typesetting language, supporting groff
|
436
|
+
extensions. Mainly useful for highlighting manpage sources.
|
437
|
+
|
438
|
+
*New in Pygments 0.6.*
|
439
|
+
"""
|
440
|
+
|
441
|
+
name = 'Groff'
|
442
|
+
aliases = ['groff', 'nroff', 'man']
|
443
|
+
filenames = ['*.[1234567]', '*.man']
|
444
|
+
mimetypes = ['application/x-troff', 'text/troff']
|
445
|
+
|
446
|
+
tokens = {
|
447
|
+
'root': [
|
448
|
+
(r'(?i)(\.)(\w+)', bygroups(Text, Keyword), 'request'),
|
449
|
+
(r'\.', Punctuation, 'request'),
|
450
|
+
# Regular characters, slurp till we find a backslash or newline
|
451
|
+
(r'[^\\\n]*', Text, 'textline'),
|
452
|
+
],
|
453
|
+
'textline': [
|
454
|
+
include('escapes'),
|
455
|
+
(r'[^\\\n]+', Text),
|
456
|
+
(r'\n', Text, '#pop'),
|
457
|
+
],
|
458
|
+
'escapes': [
|
459
|
+
# groff has many ways to write escapes.
|
460
|
+
(r'\\"[^\n]*', Comment),
|
461
|
+
(r'\\[fn]\w', String.Escape),
|
462
|
+
(r'\\\(..', String.Escape),
|
463
|
+
(r'\\.\[.*\]', String.Escape),
|
464
|
+
(r'\\.', String.Escape),
|
465
|
+
(r'\\\n', Text, 'request'),
|
466
|
+
],
|
467
|
+
'request': [
|
468
|
+
(r'\n', Text, '#pop'),
|
469
|
+
include('escapes'),
|
470
|
+
(r'"[^\n"]+"', String.Double),
|
471
|
+
(r'\d+', Number),
|
472
|
+
(r'\S+', String),
|
473
|
+
(r'\s+', Text),
|
474
|
+
],
|
475
|
+
}
|
476
|
+
|
477
|
+
def analyse_text(text):
|
478
|
+
if text[:1] != '.':
|
479
|
+
return False
|
480
|
+
if text[:3] == '.\\"':
|
481
|
+
return True
|
482
|
+
if text[:4] == '.TH ':
|
483
|
+
return True
|
484
|
+
if text[1:3].isalnum() and text[3].isspace():
|
485
|
+
return 0.9
|
486
|
+
|
487
|
+
|
488
|
+
class ApacheConfLexer(RegexLexer):
|
489
|
+
"""
|
490
|
+
Lexer for configuration files following the Apache config file
|
491
|
+
format.
|
492
|
+
|
493
|
+
*New in Pygments 0.6.*
|
494
|
+
"""
|
495
|
+
|
496
|
+
name = 'ApacheConf'
|
497
|
+
aliases = ['apacheconf', 'aconf', 'apache']
|
498
|
+
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
|
499
|
+
mimetypes = ['text/x-apacheconf']
|
500
|
+
flags = re.MULTILINE | re.IGNORECASE
|
501
|
+
|
502
|
+
tokens = {
|
503
|
+
'root': [
|
504
|
+
(r'\s+', Text),
|
505
|
+
(r'(#.*?)$', Comment),
|
506
|
+
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
|
507
|
+
bygroups(Name.Tag, Text, String, Name.Tag)),
|
508
|
+
(r'([a-zA-Z][a-zA-Z0-9]*)(\s+)',
|
509
|
+
bygroups(Name.Builtin, Text), 'value'),
|
510
|
+
(r'\.+', Text),
|
511
|
+
],
|
512
|
+
'value': [
|
513
|
+
(r'$', Text, '#pop'),
|
514
|
+
(r'[^\S\n]+', Text),
|
515
|
+
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
|
516
|
+
(r'\d+', Number),
|
517
|
+
(r'/([a-zA-Z0-9][a-zA-Z0-9_./-]+)', String.Other),
|
518
|
+
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
|
519
|
+
r'os|productonly|full|emerg|alert|crit|error|warn|'
|
520
|
+
r'notice|info|debug|registry|script|inetd|standalone|'
|
521
|
+
r'user|group)\b', Keyword),
|
522
|
+
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
|
523
|
+
(r'[^\s"]+', Text)
|
524
|
+
]
|
525
|
+
}
|
526
|
+
|
527
|
+
|
528
|
+
class MoinWikiLexer(RegexLexer):
|
529
|
+
"""
|
530
|
+
For MoinMoin (and Trac) Wiki markup.
|
531
|
+
|
532
|
+
*New in Pygments 0.7.*
|
533
|
+
"""
|
534
|
+
|
535
|
+
name = 'MoinMoin/Trac Wiki markup'
|
536
|
+
aliases = ['trac-wiki', 'moin']
|
537
|
+
filenames = []
|
538
|
+
mimetypes = ['text/x-trac-wiki']
|
539
|
+
flags = re.MULTILINE | re.IGNORECASE
|
540
|
+
|
541
|
+
tokens = {
|
542
|
+
'root': [
|
543
|
+
(r'^#.*$', Comment),
|
544
|
+
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
|
545
|
+
# Titles
|
546
|
+
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
|
547
|
+
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
|
548
|
+
# Literal code blocks, with optional shebang
|
549
|
+
(r'({{{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
|
550
|
+
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
|
551
|
+
# Lists
|
552
|
+
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
|
553
|
+
(r'^( +)([a-zivx]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
|
554
|
+
# Other Formatting
|
555
|
+
(r'\[\[\w+.*?\]\]', Keyword), # Macro
|
556
|
+
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
|
557
|
+
bygroups(Keyword, String, Keyword)), # Link
|
558
|
+
(r'^----+$', Keyword), # Horizontal rules
|
559
|
+
(r'[^\n\'\[{!_~^,|]+', Text),
|
560
|
+
(r'\n', Text),
|
561
|
+
(r'.', Text),
|
562
|
+
],
|
563
|
+
'codeblock': [
|
564
|
+
(r'}}}', Name.Builtin, '#pop'),
|
565
|
+
# these blocks are allowed to be nested in Trac, but not MoinMoin
|
566
|
+
(r'{{{', Text, '#push'),
|
567
|
+
(r'[^{}]+', Comment.Preproc), # slurp boring text
|
568
|
+
(r'.', Comment.Preproc), # allow loose { or }
|
569
|
+
],
|
570
|
+
}
|
571
|
+
|
572
|
+
|
573
|
+
class RstLexer(RegexLexer):
|
574
|
+
"""
|
575
|
+
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
|
576
|
+
|
577
|
+
*New in Pygments 0.7.*
|
578
|
+
|
579
|
+
Additional options accepted:
|
580
|
+
|
581
|
+
`handlecodeblocks`
|
582
|
+
Highlight the contents of ``.. sourcecode:: langauge`` and
|
583
|
+
``.. code:: language`` directives with a lexer for the given
|
584
|
+
language (default: ``True``). *New in Pygments 0.8.*
|
585
|
+
"""
|
586
|
+
name = 'reStructuredText'
|
587
|
+
aliases = ['rst', 'rest', 'restructuredtext']
|
588
|
+
filenames = ['*.rst', '*.rest']
|
589
|
+
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
|
590
|
+
flags = re.MULTILINE
|
591
|
+
|
592
|
+
def _handle_sourcecode(self, match):
|
593
|
+
from pygments.lexers import get_lexer_by_name
|
594
|
+
from pygments.util import ClassNotFound
|
595
|
+
|
596
|
+
# section header
|
597
|
+
yield match.start(1), Punctuation, match.group(1)
|
598
|
+
yield match.start(2), Text, match.group(2)
|
599
|
+
yield match.start(3), Operator.Word, match.group(3)
|
600
|
+
yield match.start(4), Punctuation, match.group(4)
|
601
|
+
yield match.start(5), Text, match.group(5)
|
602
|
+
yield match.start(6), Keyword, match.group(6)
|
603
|
+
yield match.start(7), Text, match.group(7)
|
604
|
+
|
605
|
+
# lookup lexer if wanted and existing
|
606
|
+
lexer = None
|
607
|
+
if self.handlecodeblocks:
|
608
|
+
try:
|
609
|
+
lexer = get_lexer_by_name(match.group(6).strip())
|
610
|
+
except ClassNotFound:
|
611
|
+
pass
|
612
|
+
indention = match.group(8)
|
613
|
+
indention_size = len(indention)
|
614
|
+
code = (indention + match.group(9) + match.group(10) + match.group(11))
|
615
|
+
|
616
|
+
# no lexer for this language. handle it like it was a code block
|
617
|
+
if lexer is None:
|
618
|
+
yield match.start(8), String, code
|
619
|
+
return
|
620
|
+
|
621
|
+
# highlight the lines with the lexer.
|
622
|
+
ins = []
|
623
|
+
codelines = code.splitlines(True)
|
624
|
+
code = ''
|
625
|
+
for line in codelines:
|
626
|
+
if len(line) > indention_size:
|
627
|
+
ins.append((len(code), [(0, Text, line[:indention_size])]))
|
628
|
+
code += line[indention_size:]
|
629
|
+
else:
|
630
|
+
code += line
|
631
|
+
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
|
632
|
+
yield item
|
633
|
+
|
634
|
+
tokens = {
|
635
|
+
'root': [
|
636
|
+
# Heading with overline
|
637
|
+
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
|
638
|
+
r'(.+)(\n)(\1)(\n)',
|
639
|
+
bygroups(Generic.Heading, Text, Generic.Heading,
|
640
|
+
Text, Generic.Heading, Text)),
|
641
|
+
# Plain heading
|
642
|
+
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
|
643
|
+
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
|
644
|
+
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
|
645
|
+
# Bulleted lists
|
646
|
+
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
|
647
|
+
bygroups(Text, Number, using(this, state='inline'))),
|
648
|
+
# Numbered lists
|
649
|
+
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
|
650
|
+
bygroups(Text, Number, using(this, state='inline'))),
|
651
|
+
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
|
652
|
+
bygroups(Text, Number, using(this, state='inline'))),
|
653
|
+
# Numbered, but keep words at BOL from becoming lists
|
654
|
+
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
|
655
|
+
bygroups(Text, Number, using(this, state='inline'))),
|
656
|
+
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
|
657
|
+
bygroups(Text, Number, using(this, state='inline'))),
|
658
|
+
# Line blocks
|
659
|
+
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
|
660
|
+
bygroups(Text, Operator, using(this, state='inline'))),
|
661
|
+
# Sourcecode directives
|
662
|
+
(r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)'
|
663
|
+
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
|
664
|
+
_handle_sourcecode),
|
665
|
+
# A directive
|
666
|
+
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
|
667
|
+
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
|
668
|
+
using(this, state='inline'))),
|
669
|
+
# A reference target
|
670
|
+
(r'^( *\.\.)(\s*)([\w\t ]+:)(.*?)$',
|
671
|
+
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
|
672
|
+
# A footnote target
|
673
|
+
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
|
674
|
+
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
|
675
|
+
# A substitution def
|
676
|
+
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
|
677
|
+
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
|
678
|
+
Punctuation, Text, using(this, state='inline'))),
|
679
|
+
# Comments
|
680
|
+
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
|
681
|
+
# Field list
|
682
|
+
(r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
|
683
|
+
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
|
684
|
+
bygroups(Text, Name.Class, Text, Name.Function)),
|
685
|
+
# Definition list
|
686
|
+
(r'^([^ ].*(?<!::)\n)((?:(?: +.*)\n)+)',
|
687
|
+
bygroups(using(this, state='inline'), using(this, state='inline'))),
|
688
|
+
# Code blocks
|
689
|
+
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
|
690
|
+
bygroups(String.Escape, Text, String, String, Text, String)),
|
691
|
+
include('inline'),
|
692
|
+
],
|
693
|
+
'inline': [
|
694
|
+
(r'\\.', Text), # escape
|
695
|
+
(r'``', String, 'literal'), # code
|
696
|
+
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
|
697
|
+
bygroups(String, String.Interpol, String)),
|
698
|
+
(r'`.+?`__?', String), # reference
|
699
|
+
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
|
700
|
+
bygroups(Name.Variable, Name.Attribute)), # role
|
701
|
+
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
|
702
|
+
bygroups(Name.Attribute, Name.Variable)), # role (content first)
|
703
|
+
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
|
704
|
+
(r'\*.+?\*', Generic.Emph), # Emphasis
|
705
|
+
(r'\[.*?\]_', String), # Footnote or citation
|
706
|
+
(r'<.+?>', Name.Tag), # Hyperlink
|
707
|
+
(r'[^\\\n\[*`:]+', Text),
|
708
|
+
(r'.', Text),
|
709
|
+
],
|
710
|
+
'literal': [
|
711
|
+
(r'[^`\\]+', String),
|
712
|
+
(r'\\.', String),
|
713
|
+
(r'``', String, '#pop'),
|
714
|
+
(r'[`\\]', String),
|
715
|
+
]
|
716
|
+
}
|
717
|
+
|
718
|
+
def __init__(self, **options):
|
719
|
+
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
|
720
|
+
RegexLexer.__init__(self, **options)
|
721
|
+
|
722
|
+
def analyse_text(text):
|
723
|
+
if text[:2] == '..' and text[2:3] != '.':
|
724
|
+
return 0.3
|
725
|
+
p1 = text.find("\n")
|
726
|
+
p2 = text.find("\n", p1 + 1)
|
727
|
+
if (p2 > -1 and # has two lines
|
728
|
+
p1 * 2 + 1 == p2 and # they are the same length
|
729
|
+
text[p1+1] in '-=' and # the next line both starts and ends with
|
730
|
+
text[p1+1] == text[p2-1]): # ...a sufficiently high header
|
731
|
+
return 0.5
|
732
|
+
|
733
|
+
|
734
|
+
class VimLexer(RegexLexer):
|
735
|
+
"""
|
736
|
+
Lexer for VimL script files.
|
737
|
+
|
738
|
+
*New in Pygments 0.8.*
|
739
|
+
"""
|
740
|
+
name = 'VimL'
|
741
|
+
aliases = ['vim']
|
742
|
+
filenames = ['*.vim', '.vimrc']
|
743
|
+
mimetypes = ['text/x-vim']
|
744
|
+
flags = re.MULTILINE
|
745
|
+
|
746
|
+
tokens = {
|
747
|
+
'root': [
|
748
|
+
# Who decided that doublequote was a good comment character??
|
749
|
+
(r'^\s*".*', Comment),
|
750
|
+
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
|
751
|
+
|
752
|
+
(r'[ \t]+', Text),
|
753
|
+
# TODO: regexes can have other delims
|
754
|
+
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
|
755
|
+
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
|
756
|
+
(r"'(\\\\|\\'|[^\n'])*'", String.Single),
|
757
|
+
(r'-?\d+', Number),
|
758
|
+
(r'#[0-9a-f]{6}', Number.Hex),
|
759
|
+
(r'^:', Punctuation),
|
760
|
+
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
|
761
|
+
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
|
762
|
+
Keyword),
|
763
|
+
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
|
764
|
+
(r'\b\w+\b', Name.Other), # These are postprocessed below
|
765
|
+
(r'.', Text),
|
766
|
+
],
|
767
|
+
}
|
768
|
+
def __init__(self, **options):
|
769
|
+
from pygments.lexers._vimbuiltins import command, option, auto
|
770
|
+
self._cmd = command
|
771
|
+
self._opt = option
|
772
|
+
self._aut = auto
|
773
|
+
|
774
|
+
RegexLexer.__init__(self, **options)
|
775
|
+
|
776
|
+
def is_in(self, w, mapping):
|
777
|
+
r"""
|
778
|
+
It's kind of difficult to decide if something might be a keyword
|
779
|
+
in VimL because it allows you to abbreviate them. In fact,
|
780
|
+
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
|
781
|
+
valid ways to call it so rather than making really awful regexps
|
782
|
+
like::
|
783
|
+
|
784
|
+
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
|
785
|
+
|
786
|
+
we match `\b\w+\b` and then call is_in() on those tokens. See
|
787
|
+
`scripts/get_vimkw.py` for how the lists are extracted.
|
788
|
+
"""
|
789
|
+
p = bisect(mapping, (w,))
|
790
|
+
if p > 0:
|
791
|
+
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
|
792
|
+
mapping[p-1][1][:len(w)] == w: return True
|
793
|
+
if p < len(mapping):
|
794
|
+
return mapping[p][0] == w[:len(mapping[p][0])] and \
|
795
|
+
mapping[p][1][:len(w)] == w
|
796
|
+
return False
|
797
|
+
|
798
|
+
def get_tokens_unprocessed(self, text):
|
799
|
+
# TODO: builtins are only subsequent tokens on lines
|
800
|
+
# and 'keywords' only happen at the beginning except
|
801
|
+
# for :au ones
|
802
|
+
for index, token, value in \
|
803
|
+
RegexLexer.get_tokens_unprocessed(self, text):
|
804
|
+
if token is Name.Other:
|
805
|
+
if self.is_in(value, self._cmd):
|
806
|
+
yield index, Keyword, value
|
807
|
+
elif self.is_in(value, self._opt) or \
|
808
|
+
self.is_in(value, self._aut):
|
809
|
+
yield index, Name.Builtin, value
|
810
|
+
else:
|
811
|
+
yield index, Text, value
|
812
|
+
else:
|
813
|
+
yield index, token, value
|
814
|
+
|
815
|
+
|
816
|
+
class GettextLexer(RegexLexer):
|
817
|
+
"""
|
818
|
+
Lexer for Gettext catalog files.
|
819
|
+
|
820
|
+
*New in Pygments 0.9.*
|
821
|
+
"""
|
822
|
+
name = 'Gettext Catalog'
|
823
|
+
aliases = ['pot', 'po']
|
824
|
+
filenames = ['*.pot', '*.po']
|
825
|
+
mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
|
826
|
+
|
827
|
+
tokens = {
|
828
|
+
'root': [
|
829
|
+
(r'^#,\s.*?$', Keyword.Type),
|
830
|
+
(r'^#:\s.*?$', Keyword.Declaration),
|
831
|
+
#(r'^#$', Comment),
|
832
|
+
(r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
|
833
|
+
(r'^(")([\w-]*:)(.*")$',
|
834
|
+
bygroups(String, Name.Property, String)),
|
835
|
+
(r'^".*"$', String),
|
836
|
+
(r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$',
|
837
|
+
bygroups(Name.Variable, Text, String)),
|
838
|
+
(r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
|
839
|
+
bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
|
840
|
+
]
|
841
|
+
}
|
842
|
+
|
843
|
+
|
844
|
+
class SquidConfLexer(RegexLexer):
|
845
|
+
"""
|
846
|
+
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
|
847
|
+
|
848
|
+
*New in Pygments 0.9.*
|
849
|
+
"""
|
850
|
+
|
851
|
+
name = 'SquidConf'
|
852
|
+
aliases = ['squidconf', 'squid.conf', 'squid']
|
853
|
+
filenames = ['squid.conf']
|
854
|
+
mimetypes = ['text/x-squidconf']
|
855
|
+
flags = re.IGNORECASE
|
856
|
+
|
857
|
+
keywords = [ "acl", "always_direct", "announce_host",
|
858
|
+
"announce_period", "announce_port", "announce_to",
|
859
|
+
"anonymize_headers", "append_domain", "as_whois_server",
|
860
|
+
"auth_param_basic", "authenticate_children",
|
861
|
+
"authenticate_program", "authenticate_ttl", "broken_posts",
|
862
|
+
"buffered_logs", "cache_access_log", "cache_announce",
|
863
|
+
"cache_dir", "cache_dns_program", "cache_effective_group",
|
864
|
+
"cache_effective_user", "cache_host", "cache_host_acl",
|
865
|
+
"cache_host_domain", "cache_log", "cache_mem",
|
866
|
+
"cache_mem_high", "cache_mem_low", "cache_mgr",
|
867
|
+
"cachemgr_passwd", "cache_peer", "cache_peer_access",
|
868
|
+
"cahce_replacement_policy", "cache_stoplist",
|
869
|
+
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
|
870
|
+
"cache_swap_high", "cache_swap_log", "cache_swap_low",
|
871
|
+
"client_db", "client_lifetime", "client_netmask",
|
872
|
+
"connect_timeout", "coredump_dir", "dead_peer_timeout",
|
873
|
+
"debug_options", "delay_access", "delay_class",
|
874
|
+
"delay_initial_bucket_level", "delay_parameters",
|
875
|
+
"delay_pools", "deny_info", "dns_children", "dns_defnames",
|
876
|
+
"dns_nameservers", "dns_testnames", "emulate_httpd_log",
|
877
|
+
"err_html_text", "fake_user_agent", "firewall_ip",
|
878
|
+
"forwarded_for", "forward_snmpd_port", "fqdncache_size",
|
879
|
+
"ftpget_options", "ftpget_program", "ftp_list_width",
|
880
|
+
"ftp_passive", "ftp_user", "half_closed_clients",
|
881
|
+
"header_access", "header_replace", "hierarchy_stoplist",
|
882
|
+
"high_response_time_warning", "high_page_fault_warning",
|
883
|
+
"htcp_port", "http_access", "http_anonymizer", "httpd_accel",
|
884
|
+
"httpd_accel_host", "httpd_accel_port",
|
885
|
+
"httpd_accel_uses_host_header", "httpd_accel_with_proxy",
|
886
|
+
"http_port", "http_reply_access", "icp_access",
|
887
|
+
"icp_hit_stale", "icp_port", "icp_query_timeout",
|
888
|
+
"ident_lookup", "ident_lookup_access", "ident_timeout",
|
889
|
+
"incoming_http_average", "incoming_icp_average",
|
890
|
+
"inside_firewall", "ipcache_high", "ipcache_low",
|
891
|
+
"ipcache_size", "local_domain", "local_ip", "logfile_rotate",
|
892
|
+
"log_fqdn", "log_icp_queries", "log_mime_hdrs",
|
893
|
+
"maximum_object_size", "maximum_single_addr_tries",
|
894
|
+
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
|
895
|
+
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
|
896
|
+
"memory_pools_limit", "memory_replacement_policy",
|
897
|
+
"mime_table", "min_http_poll_cnt", "min_icp_poll_cnt",
|
898
|
+
"minimum_direct_hops", "minimum_object_size",
|
899
|
+
"minimum_retry_timeout", "miss_access", "negative_dns_ttl",
|
900
|
+
"negative_ttl", "neighbor_timeout", "neighbor_type_domain",
|
901
|
+
"netdb_high", "netdb_low", "netdb_ping_period",
|
902
|
+
"netdb_ping_rate", "never_direct", "no_cache",
|
903
|
+
"passthrough_proxy", "pconn_timeout", "pid_filename",
|
904
|
+
"pinger_program", "positive_dns_ttl", "prefer_direct",
|
905
|
+
"proxy_auth", "proxy_auth_realm", "query_icmp", "quick_abort",
|
906
|
+
"quick_abort", "quick_abort_max", "quick_abort_min",
|
907
|
+
"quick_abort_pct", "range_offset_limit", "read_timeout",
|
908
|
+
"redirect_children", "redirect_program",
|
909
|
+
"redirect_rewrites_host_header", "reference_age",
|
910
|
+
"reference_age", "refresh_pattern", "reload_into_ims",
|
911
|
+
"request_body_max_size", "request_size", "request_timeout",
|
912
|
+
"shutdown_lifetime", "single_parent_bypass",
|
913
|
+
"siteselect_timeout", "snmp_access", "snmp_incoming_address",
|
914
|
+
"snmp_port", "source_ping", "ssl_proxy",
|
915
|
+
"store_avg_object_size", "store_objects_per_bucket",
|
916
|
+
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
|
917
|
+
"tcp_incoming_address", "tcp_outgoing_address",
|
918
|
+
"tcp_recv_bufsize", "test_reachability", "udp_hit_obj",
|
919
|
+
"udp_hit_obj_size", "udp_incoming_address",
|
920
|
+
"udp_outgoing_address", "unique_hostname", "unlinkd_program",
|
921
|
+
"uri_whitespace", "useragent_log", "visible_hostname",
|
922
|
+
"wais_relay", "wais_relay_host", "wais_relay_port",
|
923
|
+
]
|
924
|
+
|
925
|
+
opts = [ "proxy-only", "weight", "ttl", "no-query", "default",
|
926
|
+
"round-robin", "multicast-responder", "on", "off", "all",
|
927
|
+
"deny", "allow", "via", "parent", "no-digest", "heap", "lru",
|
928
|
+
"realm", "children", "credentialsttl", "none", "disable",
|
929
|
+
"offline_toggle", "diskd", "q1", "q2",
|
930
|
+
]
|
931
|
+
|
932
|
+
actions = [ "shutdown", "info", "parameter", "server_list",
|
933
|
+
"client_list", r'squid\.conf',
|
934
|
+
]
|
935
|
+
|
936
|
+
actions_stats = [ "objects", "vm_objects", "utilization",
|
937
|
+
"ipcache", "fqdncache", "dns", "redirector", "io",
|
938
|
+
"reply_headers", "filedescriptors", "netdb",
|
939
|
+
]
|
940
|
+
|
941
|
+
actions_log = [ "status", "enable", "disable", "clear"]
|
942
|
+
|
943
|
+
acls = [ "url_regex", "urlpath_regex", "referer_regex", "port",
|
944
|
+
"proto", "req_mime_type", "rep_mime_type", "method",
|
945
|
+
"browser", "user", "src", "dst", "time", "dstdomain", "ident",
|
946
|
+
"snmp_community",
|
947
|
+
]
|
948
|
+
|
949
|
+
ip_re = r'\b(?:\d{1,3}\.){3}\d{1,3}\b'
|
950
|
+
|
951
|
+
def makelistre(list):
|
952
|
+
return r'\b(?:'+'|'.join(list)+r')\b'
|
953
|
+
|
954
|
+
tokens = {
|
955
|
+
'root': [
|
956
|
+
(r'\s+', Text),
|
957
|
+
(r'#', Comment, 'comment'),
|
958
|
+
(makelistre(keywords), Keyword),
|
959
|
+
(makelistre(opts), Name.Constant),
|
960
|
+
# Actions
|
961
|
+
(makelistre(actions), String),
|
962
|
+
(r'stats/'+makelistre(actions), String),
|
963
|
+
(r'log/'+makelistre(actions)+r'=', String),
|
964
|
+
(makelistre(acls), Keyword),
|
965
|
+
(ip_re+r'(?:/(?:'+ip_re+r')|\d+)?', Number),
|
966
|
+
(r'\b\d+\b', Number),
|
967
|
+
(r'\S+', Text),
|
968
|
+
],
|
969
|
+
'comment': [
|
970
|
+
(r'\s*TAG:.*', String.Escape, '#pop'),
|
971
|
+
(r'.*', Comment, '#pop'),
|
972
|
+
],
|
973
|
+
}
|
974
|
+
|
975
|
+
|
976
|
+
class DebianControlLexer(RegexLexer):
|
977
|
+
"""
|
978
|
+
Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
|
979
|
+
|
980
|
+
*New in Pygments 0.9.*
|
981
|
+
"""
|
982
|
+
name = 'Debian Control file'
|
983
|
+
aliases = ['control']
|
984
|
+
filenames = ['control']
|
985
|
+
|
986
|
+
tokens = {
|
987
|
+
'root': [
|
988
|
+
(r'^(Description)', Keyword, 'description'),
|
989
|
+
(r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'),
|
990
|
+
(r'^((Build-)?Depends)', Keyword, 'depends'),
|
991
|
+
(r'^((?:Python-)?Version)(:\s*)([^\s]+)$',
|
992
|
+
bygroups(Keyword, Text, Number)),
|
993
|
+
(r'^((?:Installed-)?Size)(:\s*)([^\s]+)$',
|
994
|
+
bygroups(Keyword, Text, Number)),
|
995
|
+
(r'^(MD5Sum|SHA1|SHA256)(:\s*)([^\s]+)$',
|
996
|
+
bygroups(Keyword, Text, Number)),
|
997
|
+
(r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$',
|
998
|
+
bygroups(Keyword, Whitespace, String)),
|
999
|
+
],
|
1000
|
+
'maintainer': [
|
1001
|
+
(r'<[^>]+>', Generic.Strong),
|
1002
|
+
(r'<[^>]+>$', Generic.Strong, '#pop'),
|
1003
|
+
(r',\n?', Text),
|
1004
|
+
(r'.', Text),
|
1005
|
+
],
|
1006
|
+
'description': [
|
1007
|
+
(r'(.*)(Homepage)(: )([^\s]+)', bygroups(Text, String, Name, Name.Class)),
|
1008
|
+
(r':.*\n', Generic.Strong),
|
1009
|
+
(r' .*\n', Text),
|
1010
|
+
('', Text, '#pop'),
|
1011
|
+
],
|
1012
|
+
'depends': [
|
1013
|
+
(r':\s*', Text),
|
1014
|
+
(r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)),
|
1015
|
+
(r'\(', Text, 'depend_vers'),
|
1016
|
+
(r',', Text),
|
1017
|
+
(r'\|', Operator),
|
1018
|
+
(r'[\s]+', Text),
|
1019
|
+
(r'[}\)]\s*$', Text, '#pop'),
|
1020
|
+
(r'[}]', Text),
|
1021
|
+
(r'[^,]$', Name.Function, '#pop'),
|
1022
|
+
(r'([\+\.a-zA-Z0-9-][\s\n]*)', Name.Function),
|
1023
|
+
(r'\[.*?\]', Name.Entity),
|
1024
|
+
],
|
1025
|
+
'depend_vers': [
|
1026
|
+
(r'\),', Text, '#pop'),
|
1027
|
+
(r'\)[^,]', Text, '#pop:2'),
|
1028
|
+
(r'([><=]+)(\s*)([^\)]+)', bygroups(Operator, Text, Number))
|
1029
|
+
]
|
1030
|
+
}
|
1031
|
+
|
1032
|
+
|
1033
|
+
class YamlLexerContext(LexerContext):
|
1034
|
+
"""Indentation context for the YAML lexer."""
|
1035
|
+
|
1036
|
+
def __init__(self, *args, **kwds):
|
1037
|
+
super(YamlLexerContext, self).__init__(*args, **kwds)
|
1038
|
+
self.indent_stack = []
|
1039
|
+
self.indent = -1
|
1040
|
+
self.next_indent = 0
|
1041
|
+
self.block_scalar_indent = None
|
1042
|
+
|
1043
|
+
|
1044
|
+
class YamlLexer(ExtendedRegexLexer):
|
1045
|
+
"""
|
1046
|
+
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
|
1047
|
+
language.
|
1048
|
+
|
1049
|
+
*New in Pygments 0.11.*
|
1050
|
+
"""
|
1051
|
+
|
1052
|
+
name = 'YAML'
|
1053
|
+
aliases = ['yaml']
|
1054
|
+
filenames = ['*.yaml', '*.yml']
|
1055
|
+
mimetypes = ['text/x-yaml']
|
1056
|
+
|
1057
|
+
|
1058
|
+
def something(token_class):
|
1059
|
+
"""Do not produce empty tokens."""
|
1060
|
+
def callback(lexer, match, context):
|
1061
|
+
text = match.group()
|
1062
|
+
if not text:
|
1063
|
+
return
|
1064
|
+
yield match.start(), token_class, text
|
1065
|
+
context.pos = match.end()
|
1066
|
+
return callback
|
1067
|
+
|
1068
|
+
def reset_indent(token_class):
|
1069
|
+
"""Reset the indentation levels."""
|
1070
|
+
def callback(lexer, match, context):
|
1071
|
+
text = match.group()
|
1072
|
+
context.indent_stack = []
|
1073
|
+
context.indent = -1
|
1074
|
+
context.next_indent = 0
|
1075
|
+
context.block_scalar_indent = None
|
1076
|
+
yield match.start(), token_class, text
|
1077
|
+
context.pos = match.end()
|
1078
|
+
return callback
|
1079
|
+
|
1080
|
+
def save_indent(token_class, start=False):
|
1081
|
+
"""Save a possible indentation level."""
|
1082
|
+
def callback(lexer, match, context):
|
1083
|
+
text = match.group()
|
1084
|
+
extra = ''
|
1085
|
+
if start:
|
1086
|
+
context.next_indent = len(text)
|
1087
|
+
if context.next_indent < context.indent:
|
1088
|
+
while context.next_indent < context.indent:
|
1089
|
+
context.indent = context.indent_stack.pop()
|
1090
|
+
if context.next_indent > context.indent:
|
1091
|
+
extra = text[context.indent:]
|
1092
|
+
text = text[:context.indent]
|
1093
|
+
else:
|
1094
|
+
context.next_indent += len(text)
|
1095
|
+
if text:
|
1096
|
+
yield match.start(), token_class, text
|
1097
|
+
if extra:
|
1098
|
+
yield match.start()+len(text), token_class.Error, extra
|
1099
|
+
context.pos = match.end()
|
1100
|
+
return callback
|
1101
|
+
|
1102
|
+
def set_indent(token_class, implicit=False):
|
1103
|
+
"""Set the previously saved indentation level."""
|
1104
|
+
def callback(lexer, match, context):
|
1105
|
+
text = match.group()
|
1106
|
+
if context.indent < context.next_indent:
|
1107
|
+
context.indent_stack.append(context.indent)
|
1108
|
+
context.indent = context.next_indent
|
1109
|
+
if not implicit:
|
1110
|
+
context.next_indent += len(text)
|
1111
|
+
yield match.start(), token_class, text
|
1112
|
+
context.pos = match.end()
|
1113
|
+
return callback
|
1114
|
+
|
1115
|
+
def set_block_scalar_indent(token_class):
|
1116
|
+
"""Set an explicit indentation level for a block scalar."""
|
1117
|
+
def callback(lexer, match, context):
|
1118
|
+
text = match.group()
|
1119
|
+
context.block_scalar_indent = None
|
1120
|
+
if not text:
|
1121
|
+
return
|
1122
|
+
increment = match.group(1)
|
1123
|
+
if increment:
|
1124
|
+
current_indent = max(context.indent, 0)
|
1125
|
+
increment = int(increment)
|
1126
|
+
context.block_scalar_indent = current_indent + increment
|
1127
|
+
if text:
|
1128
|
+
yield match.start(), token_class, text
|
1129
|
+
context.pos = match.end()
|
1130
|
+
return callback
|
1131
|
+
|
1132
|
+
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
|
1133
|
+
"""Process an empty line in a block scalar."""
|
1134
|
+
def callback(lexer, match, context):
|
1135
|
+
text = match.group()
|
1136
|
+
if (context.block_scalar_indent is None or
|
1137
|
+
len(text) <= context.block_scalar_indent):
|
1138
|
+
if text:
|
1139
|
+
yield match.start(), indent_token_class, text
|
1140
|
+
else:
|
1141
|
+
indentation = text[:context.block_scalar_indent]
|
1142
|
+
content = text[context.block_scalar_indent:]
|
1143
|
+
yield match.start(), indent_token_class, indentation
|
1144
|
+
yield (match.start()+context.block_scalar_indent,
|
1145
|
+
content_token_class, content)
|
1146
|
+
context.pos = match.end()
|
1147
|
+
return callback
|
1148
|
+
|
1149
|
+
def parse_block_scalar_indent(token_class):
|
1150
|
+
"""Process indentation spaces in a block scalar."""
|
1151
|
+
def callback(lexer, match, context):
|
1152
|
+
text = match.group()
|
1153
|
+
if context.block_scalar_indent is None:
|
1154
|
+
if len(text) <= max(context.indent, 0):
|
1155
|
+
context.stack.pop()
|
1156
|
+
context.stack.pop()
|
1157
|
+
return
|
1158
|
+
context.block_scalar_indent = len(text)
|
1159
|
+
else:
|
1160
|
+
if len(text) < context.block_scalar_indent:
|
1161
|
+
context.stack.pop()
|
1162
|
+
context.stack.pop()
|
1163
|
+
return
|
1164
|
+
if text:
|
1165
|
+
yield match.start(), token_class, text
|
1166
|
+
context.pos = match.end()
|
1167
|
+
return callback
|
1168
|
+
|
1169
|
+
def parse_plain_scalar_indent(token_class):
|
1170
|
+
"""Process indentation spaces in a plain scalar."""
|
1171
|
+
def callback(lexer, match, context):
|
1172
|
+
text = match.group()
|
1173
|
+
if len(text) <= context.indent:
|
1174
|
+
context.stack.pop()
|
1175
|
+
context.stack.pop()
|
1176
|
+
return
|
1177
|
+
if text:
|
1178
|
+
yield match.start(), token_class, text
|
1179
|
+
context.pos = match.end()
|
1180
|
+
return callback
|
1181
|
+
|
1182
|
+
|
1183
|
+
|
1184
|
+
tokens = {
|
1185
|
+
# the root rules
|
1186
|
+
'root': [
|
1187
|
+
# ignored whitespaces
|
1188
|
+
(r'[ ]+(?=#|$)', Text),
|
1189
|
+
# line breaks
|
1190
|
+
(r'\n+', Text),
|
1191
|
+
# a comment
|
1192
|
+
(r'#[^\n]*', Comment.Single),
|
1193
|
+
# the '%YAML' directive
|
1194
|
+
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
|
1195
|
+
# the %TAG directive
|
1196
|
+
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
|
1197
|
+
# document start and document end indicators
|
1198
|
+
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
|
1199
|
+
'block-line'),
|
1200
|
+
# indentation spaces
|
1201
|
+
(r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True),
|
1202
|
+
('block-line', 'indentation')),
|
1203
|
+
],
|
1204
|
+
|
1205
|
+
# trailing whitespaces after directives or a block scalar indicator
|
1206
|
+
'ignored-line': [
|
1207
|
+
# ignored whitespaces
|
1208
|
+
(r'[ ]+(?=#|$)', Text),
|
1209
|
+
# a comment
|
1210
|
+
(r'#[^\n]*', Comment.Single),
|
1211
|
+
# line break
|
1212
|
+
(r'\n', Text, '#pop:2'),
|
1213
|
+
],
|
1214
|
+
|
1215
|
+
# the %YAML directive
|
1216
|
+
'yaml-directive': [
|
1217
|
+
# the version number
|
1218
|
+
(r'([ ]+)([0-9]+\.[0-9]+)',
|
1219
|
+
bygroups(Text, Number), 'ignored-line'),
|
1220
|
+
],
|
1221
|
+
|
1222
|
+
# the %YAG directive
|
1223
|
+
'tag-directive': [
|
1224
|
+
# a tag handle and the corresponding prefix
|
1225
|
+
(r'([ ]+)(!|![0-9A-Za-z_-]*!)'
|
1226
|
+
r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
|
1227
|
+
bygroups(Text, Keyword.Type, Text, Keyword.Type),
|
1228
|
+
'ignored-line'),
|
1229
|
+
],
|
1230
|
+
|
1231
|
+
# block scalar indicators and indentation spaces
|
1232
|
+
'indentation': [
|
1233
|
+
# trailing whitespaces are ignored
|
1234
|
+
(r'[ ]*$', something(Text), '#pop:2'),
|
1235
|
+
# whitespaces preceeding block collection indicators
|
1236
|
+
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
|
1237
|
+
# block collection indicators
|
1238
|
+
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
|
1239
|
+
# the beginning a block line
|
1240
|
+
(r'[ ]*', save_indent(Text), '#pop'),
|
1241
|
+
],
|
1242
|
+
|
1243
|
+
# an indented line in the block context
|
1244
|
+
'block-line': [
|
1245
|
+
# the line end
|
1246
|
+
(r'[ ]*(?=#|$)', something(Text), '#pop'),
|
1247
|
+
# whitespaces separating tokens
|
1248
|
+
(r'[ ]+', Text),
|
1249
|
+
# tags, anchors and aliases,
|
1250
|
+
include('descriptors'),
|
1251
|
+
# block collections and scalars
|
1252
|
+
include('block-nodes'),
|
1253
|
+
# flow collections and quoted scalars
|
1254
|
+
include('flow-nodes'),
|
1255
|
+
# a plain scalar
|
1256
|
+
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
|
1257
|
+
something(Name.Variable),
|
1258
|
+
'plain-scalar-in-block-context'),
|
1259
|
+
],
|
1260
|
+
|
1261
|
+
# tags, anchors, aliases
|
1262
|
+
'descriptors' : [
|
1263
|
+
# a full-form tag
|
1264
|
+
(r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type),
|
1265
|
+
# a tag in the form '!', '!suffix' or '!handle!suffix'
|
1266
|
+
(r'!(?:[0-9A-Za-z_-]+)?'
|
1267
|
+
r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type),
|
1268
|
+
# an anchor
|
1269
|
+
(r'&[0-9A-Za-z_-]+', Name.Label),
|
1270
|
+
# an alias
|
1271
|
+
(r'\*[0-9A-Za-z_-]+', Name.Variable),
|
1272
|
+
],
|
1273
|
+
|
1274
|
+
# block collections and scalars
|
1275
|
+
'block-nodes': [
|
1276
|
+
# implicit key
|
1277
|
+
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
|
1278
|
+
# literal and folded scalars
|
1279
|
+
(r'[|>]', Punctuation.Indicator,
|
1280
|
+
('block-scalar-content', 'block-scalar-header')),
|
1281
|
+
],
|
1282
|
+
|
1283
|
+
# flow collections and quoted scalars
|
1284
|
+
'flow-nodes': [
|
1285
|
+
# a flow sequence
|
1286
|
+
(r'\[', Punctuation.Indicator, 'flow-sequence'),
|
1287
|
+
# a flow mapping
|
1288
|
+
(r'\{', Punctuation.Indicator, 'flow-mapping'),
|
1289
|
+
# a single-quoted scalar
|
1290
|
+
(r'\'', String, 'single-quoted-scalar'),
|
1291
|
+
# a double-quoted scalar
|
1292
|
+
(r'\"', String, 'double-quoted-scalar'),
|
1293
|
+
],
|
1294
|
+
|
1295
|
+
# the content of a flow collection
|
1296
|
+
'flow-collection': [
|
1297
|
+
# whitespaces
|
1298
|
+
(r'[ ]+', Text),
|
1299
|
+
# line breaks
|
1300
|
+
(r'\n+', Text),
|
1301
|
+
# a comment
|
1302
|
+
(r'#[^\n]*', Comment.Single),
|
1303
|
+
# simple indicators
|
1304
|
+
(r'[?:,]', Punctuation.Indicator),
|
1305
|
+
# tags, anchors and aliases
|
1306
|
+
include('descriptors'),
|
1307
|
+
# nested collections and quoted scalars
|
1308
|
+
include('flow-nodes'),
|
1309
|
+
# a plain scalar
|
1310
|
+
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
|
1311
|
+
something(Name.Variable),
|
1312
|
+
'plain-scalar-in-flow-context'),
|
1313
|
+
],
|
1314
|
+
|
1315
|
+
# a flow sequence indicated by '[' and ']'
|
1316
|
+
'flow-sequence': [
|
1317
|
+
# include flow collection rules
|
1318
|
+
include('flow-collection'),
|
1319
|
+
# the closing indicator
|
1320
|
+
(r'\]', Punctuation.Indicator, '#pop'),
|
1321
|
+
],
|
1322
|
+
|
1323
|
+
# a flow mapping indicated by '{' and '}'
|
1324
|
+
'flow-mapping': [
|
1325
|
+
# include flow collection rules
|
1326
|
+
include('flow-collection'),
|
1327
|
+
# the closing indicator
|
1328
|
+
(r'\}', Punctuation.Indicator, '#pop'),
|
1329
|
+
],
|
1330
|
+
|
1331
|
+
# block scalar lines
|
1332
|
+
'block-scalar-content': [
|
1333
|
+
# line break
|
1334
|
+
(r'\n', Text),
|
1335
|
+
# empty line
|
1336
|
+
(r'^[ ]+$',
|
1337
|
+
parse_block_scalar_empty_line(Text, Name.Constant)),
|
1338
|
+
# indentation spaces (we may leave the state here)
|
1339
|
+
(r'^[ ]*', parse_block_scalar_indent(Text)),
|
1340
|
+
# line content
|
1341
|
+
(r'[^\n\r\f\v]+', Name.Constant),
|
1342
|
+
],
|
1343
|
+
|
1344
|
+
# the content of a literal or folded scalar
|
1345
|
+
'block-scalar-header': [
|
1346
|
+
# indentation indicator followed by chomping flag
|
1347
|
+
(r'([1-9])?[+-]?(?=[ ]|$)',
|
1348
|
+
set_block_scalar_indent(Punctuation.Indicator),
|
1349
|
+
'ignored-line'),
|
1350
|
+
# chomping flag followed by indentation indicator
|
1351
|
+
(r'[+-]?([1-9])?(?=[ ]|$)',
|
1352
|
+
set_block_scalar_indent(Punctuation.Indicator),
|
1353
|
+
'ignored-line'),
|
1354
|
+
],
|
1355
|
+
|
1356
|
+
# ignored and regular whitespaces in quoted scalars
|
1357
|
+
'quoted-scalar-whitespaces': [
|
1358
|
+
# leading and trailing whitespaces are ignored
|
1359
|
+
(r'^[ ]+|[ ]+$', Text),
|
1360
|
+
# line breaks are ignored
|
1361
|
+
(r'\n+', Text),
|
1362
|
+
# other whitespaces are a part of the value
|
1363
|
+
(r'[ ]+', Name.Variable),
|
1364
|
+
],
|
1365
|
+
|
1366
|
+
# single-quoted scalars
|
1367
|
+
'single-quoted-scalar': [
|
1368
|
+
# include whitespace and line break rules
|
1369
|
+
include('quoted-scalar-whitespaces'),
|
1370
|
+
# escaping of the quote character
|
1371
|
+
(r'\'\'', String.Escape),
|
1372
|
+
# regular non-whitespace characters
|
1373
|
+
(r'[^ \t\n\r\f\v\']+', String),
|
1374
|
+
# the closing quote
|
1375
|
+
(r'\'', String, '#pop'),
|
1376
|
+
],
|
1377
|
+
|
1378
|
+
# double-quoted scalars
|
1379
|
+
'double-quoted-scalar': [
|
1380
|
+
# include whitespace and line break rules
|
1381
|
+
include('quoted-scalar-whitespaces'),
|
1382
|
+
# escaping of special characters
|
1383
|
+
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
|
1384
|
+
# escape codes
|
1385
|
+
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
|
1386
|
+
String.Escape),
|
1387
|
+
# regular non-whitespace characters
|
1388
|
+
(r'[^ \t\n\r\f\v\"\\]+', String),
|
1389
|
+
# the closing quote
|
1390
|
+
(r'"', String, '#pop'),
|
1391
|
+
],
|
1392
|
+
|
1393
|
+
# the beginning of a new line while scanning a plain scalar
|
1394
|
+
'plain-scalar-in-block-context-new-line': [
|
1395
|
+
# empty lines
|
1396
|
+
(r'^[ ]+$', Text),
|
1397
|
+
# line breaks
|
1398
|
+
(r'\n+', Text),
|
1399
|
+
# document start and document end indicators
|
1400
|
+
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
|
1401
|
+
# indentation spaces (we may leave the block line state here)
|
1402
|
+
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
|
1403
|
+
],
|
1404
|
+
|
1405
|
+
# a plain scalar in the block context
|
1406
|
+
'plain-scalar-in-block-context': [
|
1407
|
+
# the scalar ends with the ':' indicator
|
1408
|
+
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
|
1409
|
+
# the scalar ends with whitespaces followed by a comment
|
1410
|
+
(r'[ ]+(?=#)', Text, '#pop'),
|
1411
|
+
# trailing whitespaces are ignored
|
1412
|
+
(r'[ ]+$', Text),
|
1413
|
+
# line breaks are ignored
|
1414
|
+
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
|
1415
|
+
# other whitespaces are a part of the value
|
1416
|
+
(r'[ ]+', Literal.Scalar.Plain),
|
1417
|
+
# regular non-whitespace characters
|
1418
|
+
(r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain),
|
1419
|
+
],
|
1420
|
+
|
1421
|
+
# a plain scalar is the flow context
|
1422
|
+
'plain-scalar-in-flow-context': [
|
1423
|
+
# the scalar ends with an indicator character
|
1424
|
+
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
|
1425
|
+
# the scalar ends with a comment
|
1426
|
+
(r'[ ]+(?=#)', Text, '#pop'),
|
1427
|
+
# leading and trailing whitespaces are ignored
|
1428
|
+
(r'^[ ]+|[ ]+$', Text),
|
1429
|
+
# line breaks are ignored
|
1430
|
+
(r'\n+', Text),
|
1431
|
+
# other whitespaces are a part of the value
|
1432
|
+
(r'[ ]+', Name.Variable),
|
1433
|
+
# regular non-whitespace characters
|
1434
|
+
(r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable),
|
1435
|
+
],
|
1436
|
+
|
1437
|
+
}
|
1438
|
+
|
1439
|
+
def get_tokens_unprocessed(self, text=None, context=None):
|
1440
|
+
if context is None:
|
1441
|
+
context = YamlLexerContext(text, 0)
|
1442
|
+
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
|
1443
|
+
|
1444
|
+
|
1445
|
+
class LighttpdConfLexer(RegexLexer):
|
1446
|
+
"""
|
1447
|
+
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
|
1448
|
+
|
1449
|
+
*New in Pygments 0.11.*
|
1450
|
+
"""
|
1451
|
+
name = 'Lighttpd configuration file'
|
1452
|
+
aliases = ['lighty', 'lighttpd']
|
1453
|
+
filenames = []
|
1454
|
+
mimetypes = ['text/x-lighttpd-conf']
|
1455
|
+
|
1456
|
+
tokens = {
|
1457
|
+
'root': [
|
1458
|
+
(r'#.*\n', Comment.Single),
|
1459
|
+
(r'/\S*', Name), # pathname
|
1460
|
+
(r'[a-zA-Z._-]+', Keyword),
|
1461
|
+
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
|
1462
|
+
(r'[0-9]+', Number),
|
1463
|
+
(r'=>|=~|\+=|==|=|\+', Operator),
|
1464
|
+
(r'\$[A-Z]+', Name.Builtin),
|
1465
|
+
(r'[(){}\[\],]', Punctuation),
|
1466
|
+
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
|
1467
|
+
(r'\s+', Text),
|
1468
|
+
],
|
1469
|
+
|
1470
|
+
}
|
1471
|
+
|
1472
|
+
|
1473
|
+
class NginxConfLexer(RegexLexer):
|
1474
|
+
"""
|
1475
|
+
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
|
1476
|
+
|
1477
|
+
*New in Pygments 0.11.*
|
1478
|
+
"""
|
1479
|
+
name = 'Nginx configuration file'
|
1480
|
+
aliases = ['nginx']
|
1481
|
+
filenames = []
|
1482
|
+
mimetypes = ['text/x-nginx-conf']
|
1483
|
+
|
1484
|
+
tokens = {
|
1485
|
+
'root': [
|
1486
|
+
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
|
1487
|
+
(r'[^\s;#]+', Keyword, 'stmt'),
|
1488
|
+
include('base'),
|
1489
|
+
],
|
1490
|
+
'block': [
|
1491
|
+
(r'}', Punctuation, '#pop:2'),
|
1492
|
+
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
|
1493
|
+
include('base'),
|
1494
|
+
],
|
1495
|
+
'stmt': [
|
1496
|
+
(r'{', Punctuation, 'block'),
|
1497
|
+
(r';', Punctuation, '#pop'),
|
1498
|
+
include('base'),
|
1499
|
+
],
|
1500
|
+
'base': [
|
1501
|
+
(r'#.*\n', Comment.Single),
|
1502
|
+
(r'on|off', Name.Constant),
|
1503
|
+
(r'\$[^\s;#()]+', Name.Variable),
|
1504
|
+
(r'([a-z0-9.-]+)(:)([0-9]+)',
|
1505
|
+
bygroups(Name, Punctuation, Number.Integer)),
|
1506
|
+
(r'[a-z-]+/[a-z-+]+', String), # mimetype
|
1507
|
+
#(r'[a-zA-Z._-]+', Keyword),
|
1508
|
+
(r'[0-9]+[km]?\b', Number.Integer),
|
1509
|
+
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
|
1510
|
+
(r'[:=~]', Punctuation),
|
1511
|
+
(r'[^\s;#{}$]+', String), # catch all
|
1512
|
+
(r'/[^\s;#]*', Name), # pathname
|
1513
|
+
(r'\s+', Text),
|
1514
|
+
(r'[$;]', Text), # leftover characters
|
1515
|
+
],
|
1516
|
+
}
|
1517
|
+
|
1518
|
+
|
1519
|
+
class CMakeLexer(RegexLexer):
|
1520
|
+
"""
|
1521
|
+
Lexer for `CMake <http://cmake.org/Wiki/CMake>`_ files.
|
1522
|
+
|
1523
|
+
*New in Pygments 1.2.*
|
1524
|
+
"""
|
1525
|
+
name = 'CMake'
|
1526
|
+
aliases = ['cmake']
|
1527
|
+
filenames = ['*.cmake']
|
1528
|
+
mimetypes = ['text/x-cmake']
|
1529
|
+
|
1530
|
+
tokens = {
|
1531
|
+
'root': [
|
1532
|
+
#(r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|'
|
1533
|
+
# r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|'
|
1534
|
+
# r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|'
|
1535
|
+
# r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|'
|
1536
|
+
# r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|'
|
1537
|
+
# r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|'
|
1538
|
+
# r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|'
|
1539
|
+
# r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|'
|
1540
|
+
# r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|'
|
1541
|
+
# r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|'
|
1542
|
+
# r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|'
|
1543
|
+
# r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|'
|
1544
|
+
# r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|'
|
1545
|
+
# r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|'
|
1546
|
+
# r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|'
|
1547
|
+
# r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|'
|
1548
|
+
# r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|'
|
1549
|
+
# r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|'
|
1550
|
+
# r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|'
|
1551
|
+
# r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|'
|
1552
|
+
# r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|'
|
1553
|
+
# r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|'
|
1554
|
+
# r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|'
|
1555
|
+
# r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
|
1556
|
+
# r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
|
1557
|
+
# r'COUNTARGS)\b', Name.Builtin, 'args'),
|
1558
|
+
(r'\b([A-Za-z_]+)([ \t]*)(\()', bygroups(Name.Builtin, Text,
|
1559
|
+
Punctuation), 'args'),
|
1560
|
+
include('keywords'),
|
1561
|
+
include('ws')
|
1562
|
+
],
|
1563
|
+
'args': [
|
1564
|
+
(r'\(', Punctuation, '#push'),
|
1565
|
+
(r'\)', Punctuation, '#pop'),
|
1566
|
+
(r'(\${)(.+?)(})', bygroups(Operator, Name.Variable, Operator)),
|
1567
|
+
(r'(?s)".*?"', String.Double),
|
1568
|
+
(r'\\\S+', String),
|
1569
|
+
(r'[^\)$"# \t\n]+', String),
|
1570
|
+
(r'\n', Text), # explicitly legal
|
1571
|
+
include('keywords'),
|
1572
|
+
include('ws')
|
1573
|
+
],
|
1574
|
+
'string': [
|
1575
|
+
|
1576
|
+
],
|
1577
|
+
'keywords': [
|
1578
|
+
(r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|'
|
1579
|
+
r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword),
|
1580
|
+
],
|
1581
|
+
'ws': [
|
1582
|
+
(r'[ \t]+', Text),
|
1583
|
+
(r'#.+\n', Comment),
|
1584
|
+
]
|
1585
|
+
}
|
1586
|
+
|