pygmentize 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. data/LICENSE +19 -0
  2. data/lib/pygments.rb +23 -0
  3. data/pygmentize.gemspec +11 -0
  4. data/test/pygments.rb +19 -0
  5. data/vendor/pygmentize.py +7 -0
  6. data/vendor/pygments/AUTHORS +73 -0
  7. data/vendor/pygments/LICENSE +25 -0
  8. data/vendor/pygments/__init__.py +91 -0
  9. data/vendor/pygments/__init__.pyc +0 -0
  10. data/vendor/pygments/cmdline.py +430 -0
  11. data/vendor/pygments/cmdline.pyc +0 -0
  12. data/vendor/pygments/console.py +74 -0
  13. data/vendor/pygments/console.pyc +0 -0
  14. data/vendor/pygments/filter.py +74 -0
  15. data/vendor/pygments/filter.pyc +0 -0
  16. data/vendor/pygments/filters/__init__.py +357 -0
  17. data/vendor/pygments/filters/__init__.pyc +0 -0
  18. data/vendor/pygments/formatter.py +92 -0
  19. data/vendor/pygments/formatter.pyc +0 -0
  20. data/vendor/pygments/formatters/__init__.py +68 -0
  21. data/vendor/pygments/formatters/__init__.pyc +0 -0
  22. data/vendor/pygments/formatters/_mapping.py +92 -0
  23. data/vendor/pygments/formatters/_mapping.pyc +0 -0
  24. data/vendor/pygments/formatters/bbcode.py +109 -0
  25. data/vendor/pygments/formatters/bbcode.pyc +0 -0
  26. data/vendor/pygments/formatters/html.py +723 -0
  27. data/vendor/pygments/formatters/html.pyc +0 -0
  28. data/vendor/pygments/formatters/img.py +553 -0
  29. data/vendor/pygments/formatters/img.pyc +0 -0
  30. data/vendor/pygments/formatters/latex.py +354 -0
  31. data/vendor/pygments/formatters/latex.pyc +0 -0
  32. data/vendor/pygments/formatters/other.py +117 -0
  33. data/vendor/pygments/formatters/other.pyc +0 -0
  34. data/vendor/pygments/formatters/rtf.py +136 -0
  35. data/vendor/pygments/formatters/rtf.pyc +0 -0
  36. data/vendor/pygments/formatters/svg.py +154 -0
  37. data/vendor/pygments/formatters/svg.pyc +0 -0
  38. data/vendor/pygments/formatters/terminal.py +109 -0
  39. data/vendor/pygments/formatters/terminal.pyc +0 -0
  40. data/vendor/pygments/formatters/terminal256.py +219 -0
  41. data/vendor/pygments/formatters/terminal256.pyc +0 -0
  42. data/vendor/pygments/lexer.py +660 -0
  43. data/vendor/pygments/lexer.pyc +0 -0
  44. data/vendor/pygments/lexers/__init__.py +226 -0
  45. data/vendor/pygments/lexers/__init__.pyc +0 -0
  46. data/vendor/pygments/lexers/_asybuiltins.py +1645 -0
  47. data/vendor/pygments/lexers/_clbuiltins.py +232 -0
  48. data/vendor/pygments/lexers/_luabuiltins.py +256 -0
  49. data/vendor/pygments/lexers/_mapping.py +234 -0
  50. data/vendor/pygments/lexers/_mapping.pyc +0 -0
  51. data/vendor/pygments/lexers/_phpbuiltins.py +3389 -0
  52. data/vendor/pygments/lexers/_vimbuiltins.py +3 -0
  53. data/vendor/pygments/lexers/agile.py +1485 -0
  54. data/vendor/pygments/lexers/agile.pyc +0 -0
  55. data/vendor/pygments/lexers/asm.py +353 -0
  56. data/vendor/pygments/lexers/compiled.py +2365 -0
  57. data/vendor/pygments/lexers/dotnet.py +355 -0
  58. data/vendor/pygments/lexers/functional.py +756 -0
  59. data/vendor/pygments/lexers/functional.pyc +0 -0
  60. data/vendor/pygments/lexers/math.py +461 -0
  61. data/vendor/pygments/lexers/other.py +2297 -0
  62. data/vendor/pygments/lexers/parsers.py +695 -0
  63. data/vendor/pygments/lexers/special.py +100 -0
  64. data/vendor/pygments/lexers/special.pyc +0 -0
  65. data/vendor/pygments/lexers/templates.py +1387 -0
  66. data/vendor/pygments/lexers/text.py +1586 -0
  67. data/vendor/pygments/lexers/web.py +1619 -0
  68. data/vendor/pygments/lexers/web.pyc +0 -0
  69. data/vendor/pygments/plugin.py +74 -0
  70. data/vendor/pygments/plugin.pyc +0 -0
  71. data/vendor/pygments/scanner.py +104 -0
  72. data/vendor/pygments/style.py +117 -0
  73. data/vendor/pygments/style.pyc +0 -0
  74. data/vendor/pygments/styles/__init__.py +68 -0
  75. data/vendor/pygments/styles/__init__.pyc +0 -0
  76. data/vendor/pygments/styles/autumn.py +65 -0
  77. data/vendor/pygments/styles/borland.py +51 -0
  78. data/vendor/pygments/styles/bw.py +49 -0
  79. data/vendor/pygments/styles/colorful.py +81 -0
  80. data/vendor/pygments/styles/default.py +73 -0
  81. data/vendor/pygments/styles/default.pyc +0 -0
  82. data/vendor/pygments/styles/emacs.py +72 -0
  83. data/vendor/pygments/styles/friendly.py +72 -0
  84. data/vendor/pygments/styles/fruity.py +43 -0
  85. data/vendor/pygments/styles/manni.py +75 -0
  86. data/vendor/pygments/styles/monokai.py +106 -0
  87. data/vendor/pygments/styles/murphy.py +80 -0
  88. data/vendor/pygments/styles/native.py +65 -0
  89. data/vendor/pygments/styles/pastie.py +75 -0
  90. data/vendor/pygments/styles/perldoc.py +69 -0
  91. data/vendor/pygments/styles/tango.py +141 -0
  92. data/vendor/pygments/styles/trac.py +63 -0
  93. data/vendor/pygments/styles/vim.py +63 -0
  94. data/vendor/pygments/styles/vs.py +38 -0
  95. data/vendor/pygments/token.py +198 -0
  96. data/vendor/pygments/token.pyc +0 -0
  97. data/vendor/pygments/unistring.py +130 -0
  98. data/vendor/pygments/unistring.pyc +0 -0
  99. data/vendor/pygments/util.py +226 -0
  100. data/vendor/pygments/util.pyc +0 -0
  101. metadata +166 -0
@@ -0,0 +1,100 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ pygments.lexers.special
4
+ ~~~~~~~~~~~~~~~~~~~~~~~
5
+
6
+ Special lexers.
7
+
8
+ :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
9
+ :license: BSD, see LICENSE for details.
10
+ """
11
+
12
+ import re
13
+ import cStringIO
14
+
15
+ from pygments.lexer import Lexer
16
+ from pygments.token import Token, Error, Text
17
+ from pygments.util import get_choice_opt, b
18
+
19
+
20
+ __all__ = ['TextLexer', 'RawTokenLexer']
21
+
22
+
23
+ class TextLexer(Lexer):
24
+ """
25
+ "Null" lexer, doesn't highlight anything.
26
+ """
27
+ name = 'Text only'
28
+ aliases = ['text']
29
+ filenames = ['*.txt']
30
+ mimetypes = ['text/plain']
31
+
32
+ def get_tokens_unprocessed(self, text):
33
+ yield 0, Text, text
34
+
35
+
36
+ _ttype_cache = {}
37
+
38
+ line_re = re.compile(b('.*?\n'))
39
+
40
+ class RawTokenLexer(Lexer):
41
+ """
42
+ Recreate a token stream formatted with the `RawTokenFormatter`. This
43
+ lexer raises exceptions during parsing if the token stream in the
44
+ file is malformed.
45
+
46
+ Additional options accepted:
47
+
48
+ `compress`
49
+ If set to ``"gz"`` or ``"bz2"``, decompress the token stream with
50
+ the given compression algorithm before lexing (default: ``""``).
51
+ """
52
+ name = 'Raw token data'
53
+ aliases = ['raw']
54
+ filenames = []
55
+ mimetypes = ['application/x-pygments-tokens']
56
+
57
+ def __init__(self, **options):
58
+ self.compress = get_choice_opt(options, 'compress',
59
+ ['', 'none', 'gz', 'bz2'], '')
60
+ Lexer.__init__(self, **options)
61
+
62
+ def get_tokens(self, text):
63
+ if isinstance(text, unicode):
64
+ # raw token stream never has any non-ASCII characters
65
+ text = text.encode('ascii')
66
+ if self.compress == 'gz':
67
+ import gzip
68
+ gzipfile = gzip.GzipFile('', 'rb', 9, cStringIO.StringIO(text))
69
+ text = gzipfile.read()
70
+ elif self.compress == 'bz2':
71
+ import bz2
72
+ text = bz2.decompress(text)
73
+
74
+ # do not call Lexer.get_tokens() because we do not want Unicode
75
+ # decoding to occur, and stripping is not optional.
76
+ text = text.strip(b('\n')) + b('\n')
77
+ for i, t, v in self.get_tokens_unprocessed(text):
78
+ yield t, v
79
+
80
+ def get_tokens_unprocessed(self, text):
81
+ length = 0
82
+ for match in line_re.finditer(text):
83
+ try:
84
+ ttypestr, val = match.group().split(b('\t'), 1)
85
+ except ValueError:
86
+ val = match.group().decode(self.encoding)
87
+ ttype = Error
88
+ else:
89
+ ttype = _ttype_cache.get(ttypestr)
90
+ if not ttype:
91
+ ttype = Token
92
+ ttypes = ttypestr.split('.')[1:]
93
+ for ttype_ in ttypes:
94
+ if not ttype_ or not ttype_[0].isupper():
95
+ raise ValueError('malformed token name')
96
+ ttype = getattr(ttype, ttype_)
97
+ _ttype_cache[ttypestr] = ttype
98
+ val = val[2:-2].decode('unicode-escape')
99
+ yield length, ttype, val
100
+ length += len(val)
@@ -0,0 +1,1387 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ pygments.lexers.templates
4
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
5
+
6
+ Lexers for various template engines' markup.
7
+
8
+ :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
9
+ :license: BSD, see LICENSE for details.
10
+ """
11
+
12
+ import re
13
+
14
+ from pygments.lexers.web import \
15
+ PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
16
+ from pygments.lexers.agile import PythonLexer
17
+ from pygments.lexers.compiled import JavaLexer
18
+ from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
19
+ include, using, this
20
+ from pygments.token import Error, Punctuation, \
21
+ Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
22
+ from pygments.util import html_doctype_matches, looks_like_xml
23
+
24
+ __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
25
+ 'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
26
+ 'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
27
+ 'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
28
+ 'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
29
+ 'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
30
+ 'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
31
+ 'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
32
+ 'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
33
+ 'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MakoLexer',
34
+ 'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
35
+ 'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
36
+ 'CheetahXmlLexer', 'CheetahJavascriptLexer',
37
+ 'EvoqueLexer', 'EvoqueHtmlLexer', 'EvoqueXmlLexer',
38
+ 'ColdfusionLexer', 'ColdfusionHtmlLexer']
39
+
40
+
41
+ class ErbLexer(Lexer):
42
+ """
43
+ Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
44
+ lexer.
45
+
46
+ Just highlights ruby code between the preprocessor directives, other data
47
+ is left untouched by the lexer.
48
+
49
+ All options are also forwarded to the `RubyLexer`.
50
+ """
51
+
52
+ name = 'ERB'
53
+ aliases = ['erb']
54
+ mimetypes = ['application/x-ruby-templating']
55
+
56
+ _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
57
+
58
+ def __init__(self, **options):
59
+ from pygments.lexers.agile import RubyLexer
60
+ self.ruby_lexer = RubyLexer(**options)
61
+ Lexer.__init__(self, **options)
62
+
63
+ def get_tokens_unprocessed(self, text):
64
+ """
65
+ Since ERB doesn't allow "<%" and other tags inside of ruby
66
+ blocks we have to use a split approach here that fails for
67
+ that too.
68
+ """
69
+ tokens = self._block_re.split(text)
70
+ tokens.reverse()
71
+ state = idx = 0
72
+ try:
73
+ while True:
74
+ # text
75
+ if state == 0:
76
+ val = tokens.pop()
77
+ yield idx, Other, val
78
+ idx += len(val)
79
+ state = 1
80
+ # block starts
81
+ elif state == 1:
82
+ tag = tokens.pop()
83
+ # literals
84
+ if tag in ('<%%', '%%>'):
85
+ yield idx, Other, tag
86
+ idx += 3
87
+ state = 0
88
+ # comment
89
+ elif tag == '<%#':
90
+ yield idx, Comment.Preproc, tag
91
+ val = tokens.pop()
92
+ yield idx + 3, Comment, val
93
+ idx += 3 + len(val)
94
+ state = 2
95
+ # blocks or output
96
+ elif tag in ('<%', '<%=', '<%-'):
97
+ yield idx, Comment.Preproc, tag
98
+ idx += len(tag)
99
+ data = tokens.pop()
100
+ r_idx = 0
101
+ for r_idx, r_token, r_value in \
102
+ self.ruby_lexer.get_tokens_unprocessed(data):
103
+ yield r_idx + idx, r_token, r_value
104
+ idx += len(data)
105
+ state = 2
106
+ elif tag in ('%>', '-%>'):
107
+ yield idx, Error, tag
108
+ idx += len(tag)
109
+ state = 0
110
+ # % raw ruby statements
111
+ else:
112
+ yield idx, Comment.Preproc, tag[0]
113
+ r_idx = 0
114
+ for r_idx, r_token, r_value in \
115
+ self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
116
+ yield idx + 1 + r_idx, r_token, r_value
117
+ idx += len(tag)
118
+ state = 0
119
+ # block ends
120
+ elif state == 2:
121
+ tag = tokens.pop()
122
+ if tag not in ('%>', '-%>'):
123
+ yield idx, Other, tag
124
+ else:
125
+ yield idx, Comment.Preproc, tag
126
+ idx += len(tag)
127
+ state = 0
128
+ except IndexError:
129
+ return
130
+
131
+ def analyse_text(text):
132
+ if '<%' in text and '%>' in text:
133
+ return 0.4
134
+
135
+
136
+ class SmartyLexer(RegexLexer):
137
+ """
138
+ Generic `Smarty <http://smarty.php.net/>`_ template lexer.
139
+
140
+ Just highlights smarty code between the preprocessor directives, other
141
+ data is left untouched by the lexer.
142
+ """
143
+
144
+ name = 'Smarty'
145
+ aliases = ['smarty']
146
+ filenames = ['*.tpl']
147
+ mimetypes = ['application/x-smarty']
148
+
149
+ flags = re.MULTILINE | re.DOTALL
150
+
151
+ tokens = {
152
+ 'root': [
153
+ (r'[^{]+', Other),
154
+ (r'(\{)(\*.*?\*)(\})',
155
+ bygroups(Comment.Preproc, Comment, Comment.Preproc)),
156
+ (r'(\{php\})(.*?)(\{/php\})',
157
+ bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
158
+ Comment.Preproc)),
159
+ (r'(\{)(/?[a-zA-Z_][a-zA-Z0-9_]*)(\s*)',
160
+ bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
161
+ (r'\{', Comment.Preproc, 'smarty')
162
+ ],
163
+ 'smarty': [
164
+ (r'\s+', Text),
165
+ (r'\}', Comment.Preproc, '#pop'),
166
+ (r'#[a-zA-Z_][a-zA-Z0-9_]*#', Name.Variable),
167
+ (r'\$[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z0-9_]+)*', Name.Variable),
168
+ (r'[~!%^&*()+=|\[\]:;,.<>/?{}@-]', Operator),
169
+ ('(true|false|null)\b', Keyword.Constant),
170
+ (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
171
+ r"0[xX][0-9a-fA-F]+[Ll]?", Number),
172
+ (r'"(\\\\|\\"|[^"])*"', String.Double),
173
+ (r"'(\\\\|\\'|[^'])*'", String.Single),
174
+ (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute)
175
+ ]
176
+ }
177
+
178
+ def analyse_text(text):
179
+ rv = 0.0
180
+ if re.search('\{if\s+.*?\}.*?\{/if\}', text):
181
+ rv += 0.15
182
+ if re.search('\{include\s+file=.*?\}', text):
183
+ rv += 0.15
184
+ if re.search('\{foreach\s+.*?\}.*?\{/foreach\}', text):
185
+ rv += 0.15
186
+ if re.search('\{\$.*?\}', text):
187
+ rv += 0.01
188
+ return rv
189
+
190
+
191
+ class DjangoLexer(RegexLexer):
192
+ """
193
+ Generic `django <http://www.djangoproject.com/documentation/templates/>`_
194
+ and `jinja <http://wsgiarea.pocoo.org/jinja/>`_ template lexer.
195
+
196
+ It just highlights django/jinja code between the preprocessor directives,
197
+ other data is left untouched by the lexer.
198
+ """
199
+
200
+ name = 'Django/Jinja'
201
+ aliases = ['django', 'jinja']
202
+ mimetypes = ['application/x-django-templating', 'application/x-jinja']
203
+
204
+ flags = re.M | re.S
205
+
206
+ tokens = {
207
+ 'root': [
208
+ (r'[^{]+', Other),
209
+ (r'\{\{', Comment.Preproc, 'var'),
210
+ # jinja/django comments
211
+ (r'\{[*#].*?[*#]\}', Comment),
212
+ # django comments
213
+ (r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
214
+ r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
215
+ bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
216
+ Comment, Comment.Preproc, Text, Keyword, Text,
217
+ Comment.Preproc)),
218
+ # raw jinja blocks
219
+ (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
220
+ r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
221
+ bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
222
+ Text, Comment.Preproc, Text, Keyword, Text,
223
+ Comment.Preproc)),
224
+ # filter blocks
225
+ (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
226
+ bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
227
+ 'block'),
228
+ (r'(\{%)(-?\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
229
+ bygroups(Comment.Preproc, Text, Keyword), 'block'),
230
+ (r'\{', Other)
231
+ ],
232
+ 'varnames': [
233
+ (r'(\|)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
234
+ bygroups(Operator, Text, Name.Function)),
235
+ (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_][a-zA-Z0-9_]*)',
236
+ bygroups(Keyword, Text, Keyword, Text, Name.Function)),
237
+ (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
238
+ (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
239
+ r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
240
+ Keyword),
241
+ (r'(loop|block|super|forloop)\b', Name.Builtin),
242
+ (r'[a-zA-Z][a-zA-Z0-9_]*', Name.Variable),
243
+ (r'\.[a-zA-Z0-9_]+', Name.Variable),
244
+ (r':?"(\\\\|\\"|[^"])*"', String.Double),
245
+ (r":?'(\\\\|\\'|[^'])*'", String.Single),
246
+ (r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator),
247
+ (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
248
+ r"0[xX][0-9a-fA-F]+[Ll]?", Number),
249
+ ],
250
+ 'var': [
251
+ (r'\s+', Text),
252
+ (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
253
+ include('varnames')
254
+ ],
255
+ 'block': [
256
+ (r'\s+', Text),
257
+ (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
258
+ include('varnames'),
259
+ (r'.', Punctuation)
260
+ ]
261
+ }
262
+
263
+ def analyse_text(text):
264
+ rv = 0.0
265
+ if re.search(r'\{%\s*(block|extends)', text) is not None:
266
+ rv += 0.4
267
+ if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
268
+ rv += 0.1
269
+ if re.search(r'\{\{.*?\}\}', text) is not None:
270
+ rv += 0.1
271
+ return rv
272
+
273
+
274
+ class MyghtyLexer(RegexLexer):
275
+ """
276
+ Generic `myghty templates`_ lexer. Code that isn't Myghty
277
+ markup is yielded as `Token.Other`.
278
+
279
+ *New in Pygments 0.6.*
280
+
281
+ .. _myghty templates: http://www.myghty.org/
282
+ """
283
+
284
+ name = 'Myghty'
285
+ aliases = ['myghty']
286
+ filenames = ['*.myt', 'autodelegate']
287
+ mimetypes = ['application/x-myghty']
288
+
289
+ tokens = {
290
+ 'root': [
291
+ (r'\s+', Text),
292
+ (r'(<%(def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
293
+ bygroups(Name.Tag, None, Text, Name.Function, Name.Tag,
294
+ using(this), Name.Tag)),
295
+ (r'(<%(\w+))(.*?)(>)(.*?)(</%\2\s*>)(?s)',
296
+ bygroups(Name.Tag, None, Name.Function, Name.Tag,
297
+ using(PythonLexer), Name.Tag)),
298
+ (r'(<&[^|])(.*?)(,.*?)?(&>)',
299
+ bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
300
+ (r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
301
+ bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
302
+ (r'</&>', Name.Tag),
303
+ (r'(<%!?)(.*?)(%>)(?s)',
304
+ bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
305
+ (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
306
+ (r'(?<=^)(%)([^\n]*)(\n|\Z)',
307
+ bygroups(Name.Tag, using(PythonLexer), Other)),
308
+ (r"""(?sx)
309
+ (.+?) # anything, followed by:
310
+ (?:
311
+ (?<=\n)(?=[%#]) | # an eval or comment line
312
+ (?=</?[%&]) | # a substitution or block or
313
+ # call start or end
314
+ # - don't consume
315
+ (\\\n) | # an escaped newline
316
+ \Z # end of string
317
+ )""", bygroups(Other, Operator)),
318
+ ]
319
+ }
320
+
321
+
322
+ class MyghtyHtmlLexer(DelegatingLexer):
323
+ """
324
+ Subclass of the `MyghtyLexer` that highlights unlexer data
325
+ with the `HtmlLexer`.
326
+
327
+ *New in Pygments 0.6.*
328
+ """
329
+
330
+ name = 'HTML+Myghty'
331
+ aliases = ['html+myghty']
332
+ mimetypes = ['text/html+myghty']
333
+
334
+ def __init__(self, **options):
335
+ super(MyghtyHtmlLexer, self).__init__(HtmlLexer, MyghtyLexer,
336
+ **options)
337
+
338
+
339
+ class MyghtyXmlLexer(DelegatingLexer):
340
+ """
341
+ Subclass of the `MyghtyLexer` that highlights unlexer data
342
+ with the `XmlLexer`.
343
+
344
+ *New in Pygments 0.6.*
345
+ """
346
+
347
+ name = 'XML+Myghty'
348
+ aliases = ['xml+myghty']
349
+ mimetypes = ['application/xml+myghty']
350
+
351
+ def __init__(self, **options):
352
+ super(MyghtyXmlLexer, self).__init__(XmlLexer, MyghtyLexer,
353
+ **options)
354
+
355
+
356
+ class MyghtyJavascriptLexer(DelegatingLexer):
357
+ """
358
+ Subclass of the `MyghtyLexer` that highlights unlexer data
359
+ with the `JavascriptLexer`.
360
+
361
+ *New in Pygments 0.6.*
362
+ """
363
+
364
+ name = 'JavaScript+Myghty'
365
+ aliases = ['js+myghty', 'javascript+myghty']
366
+ mimetypes = ['application/x-javascript+myghty',
367
+ 'text/x-javascript+myghty',
368
+ 'text/javascript+mygthy']
369
+
370
+ def __init__(self, **options):
371
+ super(MyghtyJavascriptLexer, self).__init__(JavascriptLexer,
372
+ MyghtyLexer, **options)
373
+
374
+
375
+ class MyghtyCssLexer(DelegatingLexer):
376
+ """
377
+ Subclass of the `MyghtyLexer` that highlights unlexer data
378
+ with the `CssLexer`.
379
+
380
+ *New in Pygments 0.6.*
381
+ """
382
+
383
+ name = 'CSS+Myghty'
384
+ aliases = ['css+myghty']
385
+ mimetypes = ['text/css+myghty']
386
+
387
+ def __init__(self, **options):
388
+ super(MyghtyCssLexer, self).__init__(CssLexer, MyghtyLexer,
389
+ **options)
390
+
391
+
392
+ class MakoLexer(RegexLexer):
393
+ """
394
+ Generic `mako templates`_ lexer. Code that isn't Mako
395
+ markup is yielded as `Token.Other`.
396
+
397
+ *New in Pygments 0.7.*
398
+
399
+ .. _mako templates: http://www.makotemplates.org/
400
+ """
401
+
402
+ name = 'Mako'
403
+ aliases = ['mako']
404
+ filenames = ['*.mao']
405
+ mimetypes = ['application/x-mako']
406
+
407
+ tokens = {
408
+ 'root': [
409
+ (r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
410
+ bygroups(Text, Comment.Preproc, Keyword, Other)),
411
+ (r'(\s*)(%)([^\n]*)(\n|\Z)',
412
+ bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
413
+ (r'(\s*)(##[^\n]*)(\n|\Z)',
414
+ bygroups(Text, Comment.Preproc, Other)),
415
+ (r'(?s)<%doc>.*?</%doc>', Comment.Preproc),
416
+ (r'(<%)([\w\.\:]+)',
417
+ bygroups(Comment.Preproc, Name.Builtin), 'tag'),
418
+ (r'(</%)([\w\.\:]+)(>)',
419
+ bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
420
+ (r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
421
+ (r'(<%(?:!?))(.*?)(%>)(?s)',
422
+ bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
423
+ (r'(\$\{)(.*?)(\})',
424
+ bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
425
+ (r'''(?sx)
426
+ (.+?) # anything, followed by:
427
+ (?:
428
+ (?<=\n)(?=%|\#\#) | # an eval or comment line
429
+ (?=\#\*) | # multiline comment
430
+ (?=</?%) | # a python block
431
+ # call start or end
432
+ (?=\$\{) | # a substitution
433
+ (?<=\n)(?=\s*%) |
434
+ # - don't consume
435
+ (\\\n) | # an escaped newline
436
+ \Z # end of string
437
+ )
438
+ ''', bygroups(Other, Operator)),
439
+ (r'\s+', Text),
440
+ ],
441
+ 'ondeftags': [
442
+ (r'<%', Comment.Preproc),
443
+ (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
444
+ include('tag'),
445
+ ],
446
+ 'tag': [
447
+ (r'((?:\w+)\s*=)\s*(".*?")',
448
+ bygroups(Name.Attribute, String)),
449
+ (r'/?\s*>', Comment.Preproc, '#pop'),
450
+ (r'\s+', Text),
451
+ ],
452
+ 'attr': [
453
+ ('".*?"', String, '#pop'),
454
+ ("'.*?'", String, '#pop'),
455
+ (r'[^\s>]+', String, '#pop'),
456
+ ],
457
+ }
458
+
459
+
460
+ class MakoHtmlLexer(DelegatingLexer):
461
+ """
462
+ Subclass of the `MakoLexer` that highlights unlexed data
463
+ with the `HtmlLexer`.
464
+
465
+ *New in Pygments 0.7.*
466
+ """
467
+
468
+ name = 'HTML+Mako'
469
+ aliases = ['html+mako']
470
+ mimetypes = ['text/html+mako']
471
+
472
+ def __init__(self, **options):
473
+ super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
474
+ **options)
475
+
476
+ class MakoXmlLexer(DelegatingLexer):
477
+ """
478
+ Subclass of the `MakoLexer` that highlights unlexer data
479
+ with the `XmlLexer`.
480
+
481
+ *New in Pygments 0.7.*
482
+ """
483
+
484
+ name = 'XML+Mako'
485
+ aliases = ['xml+mako']
486
+ mimetypes = ['application/xml+mako']
487
+
488
+ def __init__(self, **options):
489
+ super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
490
+ **options)
491
+
492
+ class MakoJavascriptLexer(DelegatingLexer):
493
+ """
494
+ Subclass of the `MakoLexer` that highlights unlexer data
495
+ with the `JavascriptLexer`.
496
+
497
+ *New in Pygments 0.7.*
498
+ """
499
+
500
+ name = 'JavaScript+Mako'
501
+ aliases = ['js+mako', 'javascript+mako']
502
+ mimetypes = ['application/x-javascript+mako',
503
+ 'text/x-javascript+mako',
504
+ 'text/javascript+mako']
505
+
506
+ def __init__(self, **options):
507
+ super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
508
+ MakoLexer, **options)
509
+
510
+ class MakoCssLexer(DelegatingLexer):
511
+ """
512
+ Subclass of the `MakoLexer` that highlights unlexer data
513
+ with the `CssLexer`.
514
+
515
+ *New in Pygments 0.7.*
516
+ """
517
+
518
+ name = 'CSS+Mako'
519
+ aliases = ['css+mako']
520
+ mimetypes = ['text/css+mako']
521
+
522
+ def __init__(self, **options):
523
+ super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
524
+ **options)
525
+
526
+
527
+ # Genshi and Cheetah lexers courtesy of Matt Good.
528
+
529
+ class CheetahPythonLexer(Lexer):
530
+ """
531
+ Lexer for handling Cheetah's special $ tokens in Python syntax.
532
+ """
533
+
534
+ def get_tokens_unprocessed(self, text):
535
+ pylexer = PythonLexer(**self.options)
536
+ for pos, type_, value in pylexer.get_tokens_unprocessed(text):
537
+ if type_ == Token.Error and value == '$':
538
+ type_ = Comment.Preproc
539
+ yield pos, type_, value
540
+
541
+
542
+ class CheetahLexer(RegexLexer):
543
+ """
544
+ Generic `cheetah templates`_ lexer. Code that isn't Cheetah
545
+ markup is yielded as `Token.Other`. This also works for
546
+ `spitfire templates`_ which use the same syntax.
547
+
548
+ .. _cheetah templates: http://www.cheetahtemplate.org/
549
+ .. _spitfire templates: http://code.google.com/p/spitfire/
550
+ """
551
+
552
+ name = 'Cheetah'
553
+ aliases = ['cheetah', 'spitfire']
554
+ filenames = ['*.tmpl', '*.spt']
555
+ mimetypes = ['application/x-cheetah', 'application/x-spitfire']
556
+
557
+ tokens = {
558
+ 'root': [
559
+ (r'(##[^\n]*)$',
560
+ (bygroups(Comment))),
561
+ (r'#[*](.|\n)*?[*]#', Comment),
562
+ (r'#end[^#\n]*(?:#|$)', Comment.Preproc),
563
+ (r'#slurp$', Comment.Preproc),
564
+ (r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
565
+ (bygroups(Comment.Preproc, using(CheetahPythonLexer),
566
+ Comment.Preproc))),
567
+ # TODO support other Python syntax like $foo['bar']
568
+ (r'(\$)([a-zA-Z_][a-zA-Z0-9_\.]*[a-zA-Z0-9_])',
569
+ bygroups(Comment.Preproc, using(CheetahPythonLexer))),
570
+ (r'(\$\{!?)(.*?)(\})(?s)',
571
+ bygroups(Comment.Preproc, using(CheetahPythonLexer),
572
+ Comment.Preproc)),
573
+ (r'''(?sx)
574
+ (.+?) # anything, followed by:
575
+ (?:
576
+ (?=[#][#a-zA-Z]*) | # an eval comment
577
+ (?=\$[a-zA-Z_{]) | # a substitution
578
+ \Z # end of string
579
+ )
580
+ ''', Other),
581
+ (r'\s+', Text),
582
+ ],
583
+ }
584
+
585
+
586
+ class CheetahHtmlLexer(DelegatingLexer):
587
+ """
588
+ Subclass of the `CheetahLexer` that highlights unlexer data
589
+ with the `HtmlLexer`.
590
+ """
591
+
592
+ name = 'HTML+Cheetah'
593
+ aliases = ['html+cheetah', 'html+spitfire']
594
+ mimetypes = ['text/html+cheetah', 'text/html+spitfire']
595
+
596
+ def __init__(self, **options):
597
+ super(CheetahHtmlLexer, self).__init__(HtmlLexer, CheetahLexer,
598
+ **options)
599
+
600
+
601
+ class CheetahXmlLexer(DelegatingLexer):
602
+ """
603
+ Subclass of the `CheetahLexer` that highlights unlexer data
604
+ with the `XmlLexer`.
605
+ """
606
+
607
+ name = 'XML+Cheetah'
608
+ aliases = ['xml+cheetah', 'xml+spitfire']
609
+ mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
610
+
611
+ def __init__(self, **options):
612
+ super(CheetahXmlLexer, self).__init__(XmlLexer, CheetahLexer,
613
+ **options)
614
+
615
+
616
+ class CheetahJavascriptLexer(DelegatingLexer):
617
+ """
618
+ Subclass of the `CheetahLexer` that highlights unlexer data
619
+ with the `JavascriptLexer`.
620
+ """
621
+
622
+ name = 'JavaScript+Cheetah'
623
+ aliases = ['js+cheetah', 'javascript+cheetah',
624
+ 'js+spitfire', 'javascript+spitfire']
625
+ mimetypes = ['application/x-javascript+cheetah',
626
+ 'text/x-javascript+cheetah',
627
+ 'text/javascript+cheetah',
628
+ 'application/x-javascript+spitfire',
629
+ 'text/x-javascript+spitfire',
630
+ 'text/javascript+spitfire']
631
+
632
+ def __init__(self, **options):
633
+ super(CheetahJavascriptLexer, self).__init__(JavascriptLexer,
634
+ CheetahLexer, **options)
635
+
636
+
637
+ class GenshiTextLexer(RegexLexer):
638
+ """
639
+ A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ text
640
+ templates.
641
+ """
642
+
643
+ name = 'Genshi Text'
644
+ aliases = ['genshitext']
645
+ mimetypes = ['application/x-genshi-text', 'text/x-genshi']
646
+
647
+ tokens = {
648
+ 'root': [
649
+ (r'[^#\$\s]+', Other),
650
+ (r'^(\s*)(##.*)$', bygroups(Text, Comment)),
651
+ (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
652
+ include('variable'),
653
+ (r'[#\$\s]', Other),
654
+ ],
655
+ 'directive': [
656
+ (r'\n', Text, '#pop'),
657
+ (r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
658
+ (r'(choose|when|with)([^\S\n]+)(.*)',
659
+ bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
660
+ (r'(choose|otherwise)\b', Keyword, '#pop'),
661
+ (r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
662
+ ],
663
+ 'variable': [
664
+ (r'(?<!\$)(\$\{)(.+?)(\})',
665
+ bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
666
+ (r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
667
+ Name.Variable),
668
+ ]
669
+ }
670
+
671
+
672
+ class GenshiMarkupLexer(RegexLexer):
673
+ """
674
+ Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
675
+ `GenshiLexer`.
676
+ """
677
+
678
+ flags = re.DOTALL
679
+
680
+ tokens = {
681
+ 'root': [
682
+ (r'[^<\$]+', Other),
683
+ (r'(<\?python)(.*?)(\?>)',
684
+ bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
685
+ # yield style and script blocks as Other
686
+ (r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
687
+ (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
688
+ (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
689
+ include('variable'),
690
+ (r'[<\$]', Other),
691
+ ],
692
+ 'pytag': [
693
+ (r'\s+', Text),
694
+ (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'pyattr'),
695
+ (r'/?\s*>', Name.Tag, '#pop'),
696
+ ],
697
+ 'pyattr': [
698
+ ('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
699
+ ("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
700
+ (r'[^\s>]+', String, '#pop'),
701
+ ],
702
+ 'tag': [
703
+ (r'\s+', Text),
704
+ (r'py:[a-zA-Z0-9_-]+\s*=', Name.Attribute, 'pyattr'),
705
+ (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
706
+ (r'/?\s*>', Name.Tag, '#pop'),
707
+ ],
708
+ 'attr': [
709
+ ('"', String, 'attr-dstring'),
710
+ ("'", String, 'attr-sstring'),
711
+ (r'[^\s>]*', String, '#pop')
712
+ ],
713
+ 'attr-dstring': [
714
+ ('"', String, '#pop'),
715
+ include('strings'),
716
+ ("'", String)
717
+ ],
718
+ 'attr-sstring': [
719
+ ("'", String, '#pop'),
720
+ include('strings'),
721
+ ("'", String)
722
+ ],
723
+ 'strings': [
724
+ ('[^"\'$]+', String),
725
+ include('variable')
726
+ ],
727
+ 'variable': [
728
+ (r'(?<!\$)(\$\{)(.+?)(\})',
729
+ bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
730
+ (r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
731
+ Name.Variable),
732
+ ]
733
+ }
734
+
735
+
736
+ class HtmlGenshiLexer(DelegatingLexer):
737
+ """
738
+ A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
739
+ `kid <http://kid-templating.org/>`_ kid HTML templates.
740
+ """
741
+
742
+ name = 'HTML+Genshi'
743
+ aliases = ['html+genshi', 'html+kid']
744
+ alias_filenames = ['*.html', '*.htm', '*.xhtml']
745
+ mimetypes = ['text/html+genshi']
746
+
747
+ def __init__(self, **options):
748
+ super(HtmlGenshiLexer, self).__init__(HtmlLexer, GenshiMarkupLexer,
749
+ **options)
750
+
751
+ def analyse_text(text):
752
+ rv = 0.0
753
+ if re.search('\$\{.*?\}', text) is not None:
754
+ rv += 0.2
755
+ if re.search('py:(.*?)=["\']', text) is not None:
756
+ rv += 0.2
757
+ return rv + HtmlLexer.analyse_text(text) - 0.01
758
+
759
+
760
+ class GenshiLexer(DelegatingLexer):
761
+ """
762
+ A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
763
+ `kid <http://kid-templating.org/>`_ kid XML templates.
764
+ """
765
+
766
+ name = 'Genshi'
767
+ aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
768
+ filenames = ['*.kid']
769
+ alias_filenames = ['*.xml']
770
+ mimetypes = ['application/x-genshi', 'application/x-kid']
771
+
772
+ def __init__(self, **options):
773
+ super(GenshiLexer, self).__init__(XmlLexer, GenshiMarkupLexer,
774
+ **options)
775
+
776
+ def analyse_text(text):
777
+ rv = 0.0
778
+ if re.search('\$\{.*?\}', text) is not None:
779
+ rv += 0.2
780
+ if re.search('py:(.*?)=["\']', text) is not None:
781
+ rv += 0.2
782
+ return rv + XmlLexer.analyse_text(text) - 0.01
783
+
784
+
785
+ class JavascriptGenshiLexer(DelegatingLexer):
786
+ """
787
+ A lexer that highlights javascript code in genshi text templates.
788
+ """
789
+
790
+ name = 'JavaScript+Genshi Text'
791
+ aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
792
+ 'javascript+genshi']
793
+ alias_filenames = ['*.js']
794
+ mimetypes = ['application/x-javascript+genshi',
795
+ 'text/x-javascript+genshi',
796
+ 'text/javascript+genshi']
797
+
798
+ def __init__(self, **options):
799
+ super(JavascriptGenshiLexer, self).__init__(JavascriptLexer,
800
+ GenshiTextLexer,
801
+ **options)
802
+
803
+ def analyse_text(text):
804
+ return GenshiLexer.analyse_text(text) - 0.05
805
+
806
+
807
+ class CssGenshiLexer(DelegatingLexer):
808
+ """
809
+ A lexer that highlights CSS definitions in genshi text templates.
810
+ """
811
+
812
+ name = 'CSS+Genshi Text'
813
+ aliases = ['css+genshitext', 'css+genshi']
814
+ alias_filenames = ['*.css']
815
+ mimetypes = ['text/css+genshi']
816
+
817
+ def __init__(self, **options):
818
+ super(CssGenshiLexer, self).__init__(CssLexer, GenshiTextLexer,
819
+ **options)
820
+
821
+ def analyse_text(text):
822
+ return GenshiLexer.analyse_text(text) - 0.05
823
+
824
+
825
+ class RhtmlLexer(DelegatingLexer):
826
+ """
827
+ Subclass of the ERB lexer that highlights the unlexed data with the
828
+ html lexer.
829
+
830
+ Nested Javascript and CSS is highlighted too.
831
+ """
832
+
833
+ name = 'RHTML'
834
+ aliases = ['rhtml', 'html+erb', 'html+ruby']
835
+ filenames = ['*.rhtml']
836
+ alias_filenames = ['*.html', '*.htm', '*.xhtml']
837
+ mimetypes = ['text/html+ruby']
838
+
839
+ def __init__(self, **options):
840
+ super(RhtmlLexer, self).__init__(HtmlLexer, ErbLexer, **options)
841
+
842
+ def analyse_text(text):
843
+ rv = ErbLexer.analyse_text(text) - 0.01
844
+ if html_doctype_matches(text):
845
+ # one more than the XmlErbLexer returns
846
+ rv += 0.5
847
+ return rv
848
+
849
+
850
+ class XmlErbLexer(DelegatingLexer):
851
+ """
852
+ Subclass of `ErbLexer` which highlights data outside preprocessor
853
+ directives with the `XmlLexer`.
854
+ """
855
+
856
+ name = 'XML+Ruby'
857
+ aliases = ['xml+erb', 'xml+ruby']
858
+ alias_filenames = ['*.xml']
859
+ mimetypes = ['application/xml+ruby']
860
+
861
+ def __init__(self, **options):
862
+ super(XmlErbLexer, self).__init__(XmlLexer, ErbLexer, **options)
863
+
864
+ def analyse_text(text):
865
+ rv = ErbLexer.analyse_text(text) - 0.01
866
+ if looks_like_xml(text):
867
+ rv += 0.4
868
+ return rv
869
+
870
+
871
+ class CssErbLexer(DelegatingLexer):
872
+ """
873
+ Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
874
+ """
875
+
876
+ name = 'CSS+Ruby'
877
+ aliases = ['css+erb', 'css+ruby']
878
+ alias_filenames = ['*.css']
879
+ mimetypes = ['text/css+ruby']
880
+
881
+ def __init__(self, **options):
882
+ super(CssErbLexer, self).__init__(CssLexer, ErbLexer, **options)
883
+
884
+ def analyse_text(text):
885
+ return ErbLexer.analyse_text(text) - 0.05
886
+
887
+
888
+ class JavascriptErbLexer(DelegatingLexer):
889
+ """
890
+ Subclass of `ErbLexer` which highlights unlexed data with the
891
+ `JavascriptLexer`.
892
+ """
893
+
894
+ name = 'JavaScript+Ruby'
895
+ aliases = ['js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby']
896
+ alias_filenames = ['*.js']
897
+ mimetypes = ['application/x-javascript+ruby',
898
+ 'text/x-javascript+ruby',
899
+ 'text/javascript+ruby']
900
+
901
+ def __init__(self, **options):
902
+ super(JavascriptErbLexer, self).__init__(JavascriptLexer, ErbLexer,
903
+ **options)
904
+
905
+ def analyse_text(text):
906
+ return ErbLexer.analyse_text(text) - 0.05
907
+
908
+
909
+ class HtmlPhpLexer(DelegatingLexer):
910
+ """
911
+ Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
912
+
913
+ Nested Javascript and CSS is highlighted too.
914
+ """
915
+
916
+ name = 'HTML+PHP'
917
+ aliases = ['html+php']
918
+ filenames = ['*.phtml']
919
+ alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
920
+ '*.php[345]']
921
+ mimetypes = ['application/x-php',
922
+ 'application/x-httpd-php', 'application/x-httpd-php3',
923
+ 'application/x-httpd-php4', 'application/x-httpd-php5']
924
+
925
+ def __init__(self, **options):
926
+ super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
927
+
928
+ def analyse_text(text):
929
+ rv = PhpLexer.analyse_text(text) - 0.01
930
+ if html_doctype_matches(text):
931
+ rv += 0.5
932
+ return rv
933
+
934
+
935
+ class XmlPhpLexer(DelegatingLexer):
936
+ """
937
+ Subclass of `PhpLexer` that higlights unhandled data with the `XmlLexer`.
938
+ """
939
+
940
+ name = 'XML+PHP'
941
+ aliases = ['xml+php']
942
+ alias_filenames = ['*.xml', '*.php', '*.php[345]']
943
+ mimetypes = ['application/xml+php']
944
+
945
+ def __init__(self, **options):
946
+ super(XmlPhpLexer, self).__init__(XmlLexer, PhpLexer, **options)
947
+
948
+ def analyse_text(text):
949
+ rv = PhpLexer.analyse_text(text) - 0.01
950
+ if looks_like_xml(text):
951
+ rv += 0.4
952
+ return rv
953
+
954
+
955
+ class CssPhpLexer(DelegatingLexer):
956
+ """
957
+ Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
958
+ """
959
+
960
+ name = 'CSS+PHP'
961
+ aliases = ['css+php']
962
+ alias_filenames = ['*.css']
963
+ mimetypes = ['text/css+php']
964
+
965
+ def __init__(self, **options):
966
+ super(CssPhpLexer, self).__init__(CssLexer, PhpLexer, **options)
967
+
968
+ def analyse_text(text):
969
+ return PhpLexer.analyse_text(text) - 0.05
970
+
971
+
972
+ class JavascriptPhpLexer(DelegatingLexer):
973
+ """
974
+ Subclass of `PhpLexer` which highlights unmatched data with the
975
+ `JavascriptLexer`.
976
+ """
977
+
978
+ name = 'JavaScript+PHP'
979
+ aliases = ['js+php', 'javascript+php']
980
+ alias_filenames = ['*.js']
981
+ mimetypes = ['application/x-javascript+php',
982
+ 'text/x-javascript+php',
983
+ 'text/javascript+php']
984
+
985
+ def __init__(self, **options):
986
+ super(JavascriptPhpLexer, self).__init__(JavascriptLexer, PhpLexer,
987
+ **options)
988
+
989
+ def analyse_text(text):
990
+ return PhpLexer.analyse_text(text)
991
+
992
+
993
+ class HtmlSmartyLexer(DelegatingLexer):
994
+ """
995
+ Subclass of the `SmartyLexer` that highighlights unlexed data with the
996
+ `HtmlLexer`.
997
+
998
+ Nested Javascript and CSS is highlighted too.
999
+ """
1000
+
1001
+ name = 'HTML+Smarty'
1002
+ aliases = ['html+smarty']
1003
+ alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
1004
+ mimetypes = ['text/html+smarty']
1005
+
1006
+ def __init__(self, **options):
1007
+ super(HtmlSmartyLexer, self).__init__(HtmlLexer, SmartyLexer, **options)
1008
+
1009
+ def analyse_text(text):
1010
+ rv = SmartyLexer.analyse_text(text) - 0.01
1011
+ if html_doctype_matches(text):
1012
+ rv += 0.5
1013
+ return rv
1014
+
1015
+
1016
+ class XmlSmartyLexer(DelegatingLexer):
1017
+ """
1018
+ Subclass of the `SmartyLexer` that highlights unlexed data with the
1019
+ `XmlLexer`.
1020
+ """
1021
+
1022
+ name = 'XML+Smarty'
1023
+ aliases = ['xml+smarty']
1024
+ alias_filenames = ['*.xml', '*.tpl']
1025
+ mimetypes = ['application/xml+smarty']
1026
+
1027
+ def __init__(self, **options):
1028
+ super(XmlSmartyLexer, self).__init__(XmlLexer, SmartyLexer, **options)
1029
+
1030
+ def analyse_text(text):
1031
+ rv = SmartyLexer.analyse_text(text) - 0.01
1032
+ if looks_like_xml(text):
1033
+ rv += 0.4
1034
+ return rv
1035
+
1036
+
1037
+ class CssSmartyLexer(DelegatingLexer):
1038
+ """
1039
+ Subclass of the `SmartyLexer` that highlights unlexed data with the
1040
+ `CssLexer`.
1041
+ """
1042
+
1043
+ name = 'CSS+Smarty'
1044
+ aliases = ['css+smarty']
1045
+ alias_filenames = ['*.css', '*.tpl']
1046
+ mimetypes = ['text/css+smarty']
1047
+
1048
+ def __init__(self, **options):
1049
+ super(CssSmartyLexer, self).__init__(CssLexer, SmartyLexer, **options)
1050
+
1051
+ def analyse_text(text):
1052
+ return SmartyLexer.analyse_text(text) - 0.05
1053
+
1054
+
1055
+ class JavascriptSmartyLexer(DelegatingLexer):
1056
+ """
1057
+ Subclass of the `SmartyLexer` that highlights unlexed data with the
1058
+ `JavascriptLexer`.
1059
+ """
1060
+
1061
+ name = 'JavaScript+Smarty'
1062
+ aliases = ['js+smarty', 'javascript+smarty']
1063
+ alias_filenames = ['*.js', '*.tpl']
1064
+ mimetypes = ['application/x-javascript+smarty',
1065
+ 'text/x-javascript+smarty',
1066
+ 'text/javascript+smarty']
1067
+
1068
+ def __init__(self, **options):
1069
+ super(JavascriptSmartyLexer, self).__init__(JavascriptLexer, SmartyLexer,
1070
+ **options)
1071
+
1072
+ def analyse_text(text):
1073
+ return SmartyLexer.analyse_text(text) - 0.05
1074
+
1075
+
1076
+ class HtmlDjangoLexer(DelegatingLexer):
1077
+ """
1078
+ Subclass of the `DjangoLexer` that highighlights unlexed data with the
1079
+ `HtmlLexer`.
1080
+
1081
+ Nested Javascript and CSS is highlighted too.
1082
+ """
1083
+
1084
+ name = 'HTML+Django/Jinja'
1085
+ aliases = ['html+django', 'html+jinja']
1086
+ alias_filenames = ['*.html', '*.htm', '*.xhtml']
1087
+ mimetypes = ['text/html+django', 'text/html+jinja']
1088
+
1089
+ def __init__(self, **options):
1090
+ super(HtmlDjangoLexer, self).__init__(HtmlLexer, DjangoLexer, **options)
1091
+
1092
+ def analyse_text(text):
1093
+ rv = DjangoLexer.analyse_text(text) - 0.01
1094
+ if html_doctype_matches(text):
1095
+ rv += 0.5
1096
+ return rv
1097
+
1098
+
1099
+ class XmlDjangoLexer(DelegatingLexer):
1100
+ """
1101
+ Subclass of the `DjangoLexer` that highlights unlexed data with the
1102
+ `XmlLexer`.
1103
+ """
1104
+
1105
+ name = 'XML+Django/Jinja'
1106
+ aliases = ['xml+django', 'xml+jinja']
1107
+ alias_filenames = ['*.xml']
1108
+ mimetypes = ['application/xml+django', 'application/xml+jinja']
1109
+
1110
+ def __init__(self, **options):
1111
+ super(XmlDjangoLexer, self).__init__(XmlLexer, DjangoLexer, **options)
1112
+
1113
+ def analyse_text(text):
1114
+ rv = DjangoLexer.analyse_text(text) - 0.01
1115
+ if looks_like_xml(text):
1116
+ rv += 0.4
1117
+ return rv
1118
+
1119
+
1120
+ class CssDjangoLexer(DelegatingLexer):
1121
+ """
1122
+ Subclass of the `DjangoLexer` that highlights unlexed data with the
1123
+ `CssLexer`.
1124
+ """
1125
+
1126
+ name = 'CSS+Django/Jinja'
1127
+ aliases = ['css+django', 'css+jinja']
1128
+ alias_filenames = ['*.css']
1129
+ mimetypes = ['text/css+django', 'text/css+jinja']
1130
+
1131
+ def __init__(self, **options):
1132
+ super(CssDjangoLexer, self).__init__(CssLexer, DjangoLexer, **options)
1133
+
1134
+ def analyse_text(text):
1135
+ return DjangoLexer.analyse_text(text) - 0.05
1136
+
1137
+
1138
+ class JavascriptDjangoLexer(DelegatingLexer):
1139
+ """
1140
+ Subclass of the `DjangoLexer` that highlights unlexed data with the
1141
+ `JavascriptLexer`.
1142
+ """
1143
+
1144
+ name = 'JavaScript+Django/Jinja'
1145
+ aliases = ['js+django', 'javascript+django',
1146
+ 'js+jinja', 'javascript+jinja']
1147
+ alias_filenames = ['*.js']
1148
+ mimetypes = ['application/x-javascript+django',
1149
+ 'application/x-javascript+jinja',
1150
+ 'text/x-javascript+django',
1151
+ 'text/x-javascript+jinja',
1152
+ 'text/javascript+django',
1153
+ 'text/javascript+jinja']
1154
+
1155
+ def __init__(self, **options):
1156
+ super(JavascriptDjangoLexer, self).__init__(JavascriptLexer, DjangoLexer,
1157
+ **options)
1158
+
1159
+ def analyse_text(text):
1160
+ return DjangoLexer.analyse_text(text) - 0.05
1161
+
1162
+
1163
+ class JspRootLexer(RegexLexer):
1164
+ """
1165
+ Base for the `JspLexer`. Yields `Token.Other` for area outside of
1166
+ JSP tags.
1167
+
1168
+ *New in Pygments 0.7.*
1169
+ """
1170
+
1171
+ tokens = {
1172
+ 'root': [
1173
+ (r'<%\S?', Keyword, 'sec'),
1174
+ # FIXME: I want to make these keywords but still parse attributes.
1175
+ (r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
1176
+ Keyword),
1177
+ (r'[^<]+', Other),
1178
+ (r'<', Other),
1179
+ ],
1180
+ 'sec': [
1181
+ (r'%>', Keyword, '#pop'),
1182
+ # note: '\w\W' != '.' without DOTALL.
1183
+ (r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
1184
+ ],
1185
+ }
1186
+
1187
+
1188
+ class JspLexer(DelegatingLexer):
1189
+ """
1190
+ Lexer for Java Server Pages.
1191
+
1192
+ *New in Pygments 0.7.*
1193
+ """
1194
+ name = 'Java Server Page'
1195
+ aliases = ['jsp']
1196
+ filenames = ['*.jsp']
1197
+ mimetypes = ['application/x-jsp']
1198
+
1199
+ def __init__(self, **options):
1200
+ super(JspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
1201
+
1202
+ def analyse_text(text):
1203
+ rv = JavaLexer.analyse_text(text) - 0.01
1204
+ if looks_like_xml(text):
1205
+ rv += 0.4
1206
+ if '<%' in text and '%>' in text:
1207
+ rv += 0.1
1208
+ return rv
1209
+
1210
+
1211
+ class EvoqueLexer(RegexLexer):
1212
+ """
1213
+ For files using the Evoque templating system.
1214
+
1215
+ *New in Pygments 1.1.*
1216
+ """
1217
+ name = 'Evoque'
1218
+ aliases = ['evoque']
1219
+ filenames = ['*.evoque']
1220
+ mimetypes = ['application/x-evoque']
1221
+
1222
+ flags = re.DOTALL
1223
+
1224
+ tokens = {
1225
+ 'root': [
1226
+ (r'[^#$]+', Other),
1227
+ (r'#\[', Comment.Multiline, 'comment'),
1228
+ (r'\$\$', Other),
1229
+ # svn keywords
1230
+ (r'\$\w+:[^$\n]*\$', Comment.Multiline),
1231
+ # directives: begin, end
1232
+ (r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
1233
+ bygroups(Punctuation, Name.Builtin, Punctuation, None,
1234
+ String, Punctuation, None)),
1235
+ # directives: evoque, overlay
1236
+ # see doc for handling first name arg: /directives/evoque/
1237
+ #+ minor inconsistency: the "name" in e.g. $overlay{name=site_base}
1238
+ # should be using(PythonLexer), not passed out as String
1239
+ (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
1240
+ r'(.*?)((?(4)%)\})',
1241
+ bygroups(Punctuation, Name.Builtin, Punctuation, None,
1242
+ String, using(PythonLexer), Punctuation, None)),
1243
+ # directives: if, for, prefer, test
1244
+ (r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
1245
+ bygroups(Punctuation, Name.Builtin, Punctuation, None,
1246
+ using(PythonLexer), Punctuation, None)),
1247
+ # directive clauses (no {} expression)
1248
+ (r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
1249
+ # expressions
1250
+ (r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
1251
+ bygroups(Punctuation, None, using(PythonLexer),
1252
+ Name.Builtin, None, None, Punctuation, None)),
1253
+ (r'#', Other),
1254
+ ],
1255
+ 'comment': [
1256
+ (r'[^\]#]', Comment.Multiline),
1257
+ (r'#\[', Comment.Multiline, '#push'),
1258
+ (r'\]#', Comment.Multiline, '#pop'),
1259
+ (r'[\]#]', Comment.Multiline)
1260
+ ],
1261
+ }
1262
+
1263
+ class EvoqueHtmlLexer(DelegatingLexer):
1264
+ """
1265
+ Subclass of the `EvoqueLexer` that highlights unlexed data with the
1266
+ `HtmlLexer`.
1267
+
1268
+ *New in Pygments 1.1.*
1269
+ """
1270
+ name = 'HTML+Evoque'
1271
+ aliases = ['html+evoque']
1272
+ filenames = ['*.html']
1273
+ mimetypes = ['text/html+evoque']
1274
+
1275
+ def __init__(self, **options):
1276
+ super(EvoqueHtmlLexer, self).__init__(HtmlLexer, EvoqueLexer,
1277
+ **options)
1278
+
1279
+ class EvoqueXmlLexer(DelegatingLexer):
1280
+ """
1281
+ Subclass of the `EvoqueLexer` that highlights unlexed data with the
1282
+ `XmlLexer`.
1283
+
1284
+ *New in Pygments 1.1.*
1285
+ """
1286
+ name = 'XML+Evoque'
1287
+ aliases = ['xml+evoque']
1288
+ filenames = ['*.xml']
1289
+ mimetypes = ['application/xml+evoque']
1290
+
1291
+ def __init__(self, **options):
1292
+ super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer,
1293
+ **options)
1294
+
1295
+ class ColdfusionLexer(RegexLexer):
1296
+ """
1297
+ Coldfusion statements
1298
+ """
1299
+ name = 'cfstatement'
1300
+ aliases = ['cfs']
1301
+ filenames = []
1302
+ mimetypes = []
1303
+ flags = re.IGNORECASE | re.MULTILINE
1304
+
1305
+ tokens = {
1306
+ 'root': [
1307
+ (r'//.*', Comment),
1308
+ (r'\+\+|--', Operator),
1309
+ (r'[-+*/^&=!]', Operator),
1310
+ (r'<=|>=|<|>', Operator),
1311
+ (r'mod\b', Operator),
1312
+ (r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
1313
+ (r'\|\||&&', Operator),
1314
+ (r'"', String.Double, 'string'),
1315
+ # There is a special rule for allowing html in single quoted
1316
+ # strings, evidently.
1317
+ (r"'.*?'", String.Single),
1318
+ (r'\d+', Number),
1319
+ (r'(if|else|len|var|case|default|break|switch)\b', Keyword),
1320
+ (r'([A-Za-z_$][A-Za-z0-9_.]*)\s*(\()', bygroups(Name.Function, Punctuation)),
1321
+ (r'[A-Za-z_$][A-Za-z0-9_.]*', Name.Variable),
1322
+ (r'[()\[\]{};:,.\\]', Punctuation),
1323
+ (r'\s+', Text),
1324
+ ],
1325
+ 'string': [
1326
+ (r'""', String.Double),
1327
+ (r'#.+?#', String.Interp),
1328
+ (r'[^"#]+', String.Double),
1329
+ (r'#', String.Double),
1330
+ (r'"', String.Double, '#pop'),
1331
+ ],
1332
+ }
1333
+
1334
+ class ColdfusionMarkupLexer(RegexLexer):
1335
+ """
1336
+ Coldfusion markup only
1337
+ """
1338
+ name = 'Coldfusion'
1339
+ aliases = ['cf']
1340
+ filenames = []
1341
+ mimetypes = []
1342
+
1343
+ tokens = {
1344
+ 'root': [
1345
+ (r'[^<]+', Other),
1346
+ include('tags'),
1347
+ (r'<[^<>]*', Other),
1348
+ ],
1349
+ 'tags': [
1350
+ (r'(?s)<!---.*?--->', Comment.Multiline),
1351
+ (r'(?s)<!--.*?-->', Comment),
1352
+ (r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
1353
+ (r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
1354
+ bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
1355
+ # negative lookbehind is for strings with embedded >
1356
+ (r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|'
1357
+ r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|'
1358
+ r'mailpart|mail|header|content|zip|image|lock|argument|try|'
1359
+ r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)',
1360
+ bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
1361
+ ],
1362
+ 'cfoutput': [
1363
+ (r'[^#<]+', Other),
1364
+ (r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
1365
+ Punctuation)),
1366
+ #(r'<cfoutput.*?>', Name.Builtin, '#push'),
1367
+ (r'</cfoutput.*?>', Name.Builtin, '#pop'),
1368
+ include('tags'),
1369
+ (r'(?s)<[^<>]*', Other),
1370
+ (r'#', Other),
1371
+ ],
1372
+ }
1373
+
1374
+
1375
+ class ColdfusionHtmlLexer(DelegatingLexer):
1376
+ """
1377
+ Coldfusion markup in html
1378
+ """
1379
+ name = 'Coldufsion HTML'
1380
+ aliases = ['cfm']
1381
+ filenames = ['*.cfm', '*.cfml', '*.cfc']
1382
+ mimetypes = ['application/x-coldfusion']
1383
+
1384
+ def __init__(self, **options):
1385
+ super(ColdfusionHtmlLexer, self).__init__(HtmlLexer, ColdfusionMarkupLexer,
1386
+ **options)
1387
+