pygments.rb 0.3.2 → 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. data/README.md +6 -1
  2. data/lexers +0 -0
  3. data/lib/pygments/version.rb +1 -1
  4. data/vendor/pygments-main/AUTHORS +15 -0
  5. data/vendor/pygments-main/CHANGES +28 -1
  6. data/vendor/pygments-main/LICENSE +1 -1
  7. data/vendor/pygments-main/external/lasso-builtins-generator-9.lasso +121 -0
  8. data/vendor/pygments-main/pygments/cmdline.py +1 -1
  9. data/vendor/pygments-main/pygments/filters/__init__.py +0 -1
  10. data/vendor/pygments-main/pygments/formatters/_mapping.py +2 -2
  11. data/vendor/pygments-main/pygments/formatters/img.py +1 -1
  12. data/vendor/pygments-main/pygments/formatters/latex.py +8 -8
  13. data/vendor/pygments-main/pygments/formatters/other.py +0 -2
  14. data/vendor/pygments-main/pygments/lexers/_lassobuiltins.py +5413 -0
  15. data/vendor/pygments-main/pygments/lexers/_mapping.py +36 -11
  16. data/vendor/pygments-main/pygments/lexers/_openedgebuiltins.py +551 -0
  17. data/vendor/pygments-main/pygments/lexers/_postgres_builtins.py +0 -1
  18. data/vendor/pygments-main/pygments/lexers/_robotframeworklexer.py +546 -0
  19. data/vendor/pygments-main/pygments/lexers/_sourcemodbuiltins.py +1072 -0
  20. data/vendor/pygments-main/pygments/lexers/_stan_builtins.py +174 -0
  21. data/vendor/pygments-main/pygments/lexers/_vimbuiltins.py +13 -3
  22. data/vendor/pygments-main/pygments/lexers/agile.py +145 -33
  23. data/vendor/pygments-main/pygments/lexers/asm.py +2 -2
  24. data/vendor/pygments-main/pygments/lexers/compiled.py +328 -36
  25. data/vendor/pygments-main/pygments/lexers/dalvik.py +104 -0
  26. data/vendor/pygments-main/pygments/lexers/dotnet.py +8 -14
  27. data/vendor/pygments-main/pygments/lexers/functional.py +773 -8
  28. data/vendor/pygments-main/pygments/lexers/jvm.py +184 -36
  29. data/vendor/pygments-main/pygments/lexers/math.py +349 -23
  30. data/vendor/pygments-main/pygments/lexers/other.py +315 -492
  31. data/vendor/pygments-main/pygments/lexers/parsers.py +83 -1
  32. data/vendor/pygments-main/pygments/lexers/shell.py +4 -1
  33. data/vendor/pygments-main/pygments/lexers/templates.py +112 -2
  34. data/vendor/pygments-main/pygments/lexers/text.py +52 -3
  35. data/vendor/pygments-main/pygments/lexers/web.py +382 -36
  36. data/vendor/pygments-main/pygments/unistring.py +35 -25
  37. data/vendor/pygments-main/pygments/util.py +45 -0
  38. data/vendor/pygments-main/tests/examplefiles/Config.in.cache +1973 -0
  39. data/vendor/pygments-main/tests/examplefiles/example.Rd +78 -0
  40. data/vendor/pygments-main/tests/examplefiles/example.bug +54 -0
  41. data/vendor/pygments-main/tests/examplefiles/example.ceylon +33 -0
  42. data/vendor/pygments-main/tests/examplefiles/example.jag +48 -0
  43. data/vendor/pygments-main/tests/examplefiles/example.monkey +152 -0
  44. data/vendor/pygments-main/tests/examplefiles/example.msc +43 -0
  45. data/vendor/pygments-main/tests/examplefiles/example.reg +19 -0
  46. data/vendor/pygments-main/tests/examplefiles/example.rkt +95 -0
  47. data/vendor/pygments-main/tests/examplefiles/example.rpf +4 -0
  48. data/vendor/pygments-main/tests/examplefiles/example.stan +97 -0
  49. data/vendor/pygments-main/tests/examplefiles/example.xtend +34 -0
  50. data/vendor/pygments-main/tests/examplefiles/example2.msc +79 -0
  51. data/vendor/pygments-main/tests/examplefiles/garcia-wachs.kk +123 -0
  52. data/vendor/pygments-main/tests/examplefiles/hello.smali +40 -0
  53. data/vendor/pygments-main/tests/examplefiles/hello.sp +9 -0
  54. data/vendor/pygments-main/tests/examplefiles/http_request_example +2 -1
  55. data/vendor/pygments-main/tests/examplefiles/http_response_example +4 -2
  56. data/vendor/pygments-main/tests/examplefiles/inet_pton6.dg +71 -0
  57. data/vendor/pygments-main/tests/examplefiles/json.lasso +301 -0
  58. data/vendor/pygments-main/tests/examplefiles/json.lasso9 +213 -0
  59. data/vendor/pygments-main/tests/examplefiles/livescript-demo.ls +41 -0
  60. data/vendor/pygments-main/tests/examplefiles/matlab_sample +5 -2
  61. data/vendor/pygments-main/tests/examplefiles/metagrammar.treetop +455 -0
  62. data/vendor/pygments-main/tests/examplefiles/pytb_test3.pytb +4 -0
  63. data/vendor/pygments-main/tests/examplefiles/robotframework.txt +39 -0
  64. data/vendor/pygments-main/tests/examplefiles/rust_example.rs +743 -0
  65. data/vendor/pygments-main/tests/examplefiles/test.R +149 -115
  66. data/vendor/pygments-main/tests/examplefiles/test.cu +36 -0
  67. data/vendor/pygments-main/tests/test_basic_api.py +1 -1
  68. data/vendor/pygments-main/tests/test_util.py +18 -0
  69. metadata +34 -3
  70. data/vendor/pygments-main/REVISION +0 -1
@@ -41,7 +41,6 @@ def parse_keywords(f):
41
41
 
42
42
  def parse_datatypes(f):
43
43
  dt = set()
44
- re_entry = re.compile('\s*<entry><type>([^<]+)</type></entry>')
45
44
  for line in f:
46
45
  if '<sect1' in line:
47
46
  break
@@ -0,0 +1,546 @@
1
+ # Copyright 2012 Nokia Siemens Networks Oyj
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+
17
+ from pygments.lexer import Lexer
18
+ from pygments.token import Token
19
+
20
+
21
+ HEADING = Token.Generic.Heading
22
+ SETTING = Token.Keyword.Namespace
23
+ IMPORT = Token.Name.Namespace
24
+ TC_KW_NAME = Token.Generic.Subheading
25
+ KEYWORD = Token.Name.Function
26
+ ARGUMENT = Token.String
27
+ VARIABLE = Token.Name.Variable
28
+ COMMENT = Token.Comment
29
+ SEPARATOR = Token.Punctuation
30
+ SYNTAX = Token.Punctuation
31
+ GHERKIN = Token.Generic.Emph
32
+ ERROR = Token.Error
33
+
34
+
35
+ def normalize(string, remove=''):
36
+ string = string.lower()
37
+ for char in remove + ' ':
38
+ if char in string:
39
+ string = string.replace(char, '')
40
+ return string
41
+
42
+
43
+ class RobotFrameworkLexer(Lexer):
44
+ """
45
+ For `Robot Framework <http://robotframework.org>`_ test data.
46
+
47
+ Supports both space and pipe separated plain text formats.
48
+
49
+ *New in Pygments 1.6.*
50
+ """
51
+ name = 'RobotFramework'
52
+ aliases = ['RobotFramework', 'robotframework']
53
+ filenames = ['*.txt']
54
+ mimetypes = ['text/x-robotframework']
55
+
56
+ def __init__(self, **options):
57
+ options['tabsize'] = 2
58
+ options['encoding'] = 'UTF-8'
59
+ Lexer.__init__(self, **options)
60
+
61
+ def get_tokens_unprocessed(self, text):
62
+ row_tokenizer = RowTokenizer()
63
+ var_tokenizer = VariableTokenizer()
64
+ index = 0
65
+ for row in text.splitlines():
66
+ for value, token in row_tokenizer.tokenize(row):
67
+ for value, token in var_tokenizer.tokenize(value, token):
68
+ if value:
69
+ yield index, token, unicode(value)
70
+ index += len(value)
71
+
72
+
73
+ class VariableTokenizer(object):
74
+
75
+ def tokenize(self, string, token):
76
+ var = VariableSplitter(string, identifiers='$@%')
77
+ if var.start < 0 or token in (COMMENT, ERROR):
78
+ yield string, token
79
+ return
80
+ for value, token in self._tokenize(var, string, token):
81
+ if value:
82
+ yield value, token
83
+
84
+ def _tokenize(self, var, string, orig_token):
85
+ before = string[:var.start]
86
+ yield before, orig_token
87
+ yield var.identifier + '{', SYNTAX
88
+ for value, token in self.tokenize(var.base, VARIABLE):
89
+ yield value, token
90
+ yield '}', SYNTAX
91
+ if var.index:
92
+ yield '[', SYNTAX
93
+ for value, token in self.tokenize(var.index, VARIABLE):
94
+ yield value, token
95
+ yield ']', SYNTAX
96
+ for value, token in self.tokenize(string[var.end:], orig_token):
97
+ yield value, token
98
+
99
+
100
+ class RowTokenizer(object):
101
+
102
+ def __init__(self):
103
+ self._table = UnknownTable()
104
+ self._splitter = RowSplitter()
105
+ testcases = TestCaseTable()
106
+ settings = SettingTable(testcases.set_default_template)
107
+ variables = VariableTable()
108
+ keywords = KeywordTable()
109
+ self._tables = {'settings': settings, 'setting': settings,
110
+ 'metadata': settings,
111
+ 'variables': variables, 'variable': variables,
112
+ 'testcases': testcases, 'testcase': testcases,
113
+ 'keywords': keywords, 'keyword': keywords,
114
+ 'userkeywords': keywords, 'userkeyword': keywords}
115
+
116
+ def tokenize(self, row):
117
+ commented = False
118
+ heading = False
119
+ for index, value in enumerate(self._splitter.split(row)):
120
+ # First value, and every second after that, is a separator.
121
+ index, separator = divmod(index-1, 2)
122
+ if value.startswith('#'):
123
+ commented = True
124
+ elif index == 0 and value.startswith('*'):
125
+ self._table = self._start_table(value)
126
+ heading = True
127
+ for value, token in self._tokenize(value, index, commented,
128
+ separator, heading):
129
+ yield value, token
130
+ self._table.end_row()
131
+
132
+ def _start_table(self, header):
133
+ name = normalize(header, remove='*')
134
+ return self._tables.get(name, UnknownTable())
135
+
136
+ def _tokenize(self, value, index, commented, separator, heading):
137
+ if commented:
138
+ yield value, COMMENT
139
+ elif separator:
140
+ yield value, SEPARATOR
141
+ elif heading:
142
+ yield value, HEADING
143
+ else:
144
+ for value, token in self._table.tokenize(value, index):
145
+ yield value, token
146
+
147
+
148
+ class RowSplitter(object):
149
+ _space_splitter = re.compile('( {2,})')
150
+ _pipe_splitter = re.compile('((?:^| +)\|(?: +|$))')
151
+
152
+ def split(self, row):
153
+ splitter = self._split_from_spaces \
154
+ if not row.startswith('| ') else self._split_from_pipes
155
+ for value in splitter(row.rstrip()):
156
+ yield value
157
+ yield '\n'
158
+
159
+ def _split_from_spaces(self, row):
160
+ yield '' # Start with (pseudo)separator similarly as with pipes
161
+ for value in self._space_splitter.split(row):
162
+ yield value
163
+
164
+ def _split_from_pipes(self, row):
165
+ _, separator, rest = self._pipe_splitter.split(row, 1)
166
+ yield separator
167
+ while self._pipe_splitter.search(rest):
168
+ cell, separator, rest = self._pipe_splitter.split(rest, 1)
169
+ yield cell
170
+ yield separator
171
+ yield rest
172
+
173
+
174
+ class Tokenizer(object):
175
+ _tokens = None
176
+
177
+ def __init__(self):
178
+ self._index = 0
179
+
180
+ def tokenize(self, value):
181
+ values_and_tokens = self._tokenize(value, self._index)
182
+ self._index += 1
183
+ if isinstance(values_and_tokens, type(Token)):
184
+ values_and_tokens = [(value, values_and_tokens)]
185
+ return values_and_tokens
186
+
187
+ def _tokenize(self, value, index):
188
+ index = min(index, len(self._tokens) - 1)
189
+ return self._tokens[index]
190
+
191
+ def _is_assign(self, value):
192
+ if value.endswith('='):
193
+ value = value[:-1].strip()
194
+ var = VariableSplitter(value, identifiers='$@')
195
+ return var.start == 0 and var.end == len(value)
196
+
197
+
198
+ class Comment(Tokenizer):
199
+ _tokens = (COMMENT,)
200
+
201
+
202
+ class Setting(Tokenizer):
203
+ _tokens = (SETTING, ARGUMENT)
204
+ _keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown',
205
+ 'suitepostcondition', 'testsetup', 'testprecondition',
206
+ 'testteardown', 'testpostcondition', 'testtemplate')
207
+ _import_settings = ('library', 'resource', 'variables')
208
+ _other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags',
209
+ 'testtimeout')
210
+ _custom_tokenizer = None
211
+
212
+ def __init__(self, template_setter=None):
213
+ Tokenizer.__init__(self)
214
+ self._template_setter = template_setter
215
+
216
+ def _tokenize(self, value, index):
217
+ if index == 1 and self._template_setter:
218
+ self._template_setter(value)
219
+ if index == 0:
220
+ normalized = normalize(value)
221
+ if normalized in self._keyword_settings:
222
+ self._custom_tokenizer = KeywordCall(support_assign=False)
223
+ elif normalized in self._import_settings:
224
+ self._custom_tokenizer = ImportSetting()
225
+ elif normalized not in self._other_settings:
226
+ return ERROR
227
+ elif self._custom_tokenizer:
228
+ return self._custom_tokenizer.tokenize(value)
229
+ return Tokenizer._tokenize(self, value, index)
230
+
231
+
232
+ class ImportSetting(Tokenizer):
233
+ _tokens = (IMPORT, ARGUMENT)
234
+
235
+
236
+ class TestCaseSetting(Setting):
237
+ _keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition',
238
+ 'template')
239
+ _import_settings = ()
240
+ _other_settings = ('documentation', 'tags', 'timeout')
241
+
242
+ def _tokenize(self, value, index):
243
+ if index == 0:
244
+ type = Setting._tokenize(self, value[1:-1], index)
245
+ return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)]
246
+ return Setting._tokenize(self, value, index)
247
+
248
+
249
+ class KeywordSetting(TestCaseSetting):
250
+ _keyword_settings = ('teardown',)
251
+ _other_settings = ('documentation', 'arguments', 'return', 'timeout')
252
+
253
+
254
+ class Variable(Tokenizer):
255
+ _tokens = (SYNTAX, ARGUMENT)
256
+
257
+ def _tokenize(self, value, index):
258
+ if index == 0 and not self._is_assign(value):
259
+ return ERROR
260
+ return Tokenizer._tokenize(self, value, index)
261
+
262
+
263
+ class KeywordCall(Tokenizer):
264
+ _tokens = (KEYWORD, ARGUMENT)
265
+
266
+ def __init__(self, support_assign=True):
267
+ Tokenizer.__init__(self)
268
+ self._keyword_found = not support_assign
269
+ self._assigns = 0
270
+
271
+ def _tokenize(self, value, index):
272
+ if not self._keyword_found and self._is_assign(value):
273
+ self._assigns += 1
274
+ return SYNTAX # VariableTokenizer tokenizes this later.
275
+ if self._keyword_found:
276
+ return Tokenizer._tokenize(self, value, index - self._assigns)
277
+ self._keyword_found = True
278
+ return GherkinTokenizer().tokenize(value, KEYWORD)
279
+
280
+
281
+ class GherkinTokenizer(object):
282
+ _gherkin_prefix = re.compile('^(Given|When|Then|And) ', re.IGNORECASE)
283
+
284
+ def tokenize(self, value, token):
285
+ match = self._gherkin_prefix.match(value)
286
+ if not match:
287
+ return [(value, token)]
288
+ end = match.end()
289
+ return [(value[:end], GHERKIN), (value[end:], token)]
290
+
291
+
292
+ class TemplatedKeywordCall(Tokenizer):
293
+ _tokens = (ARGUMENT,)
294
+
295
+
296
+ class ForLoop(Tokenizer):
297
+
298
+ def __init__(self):
299
+ Tokenizer.__init__(self)
300
+ self._in_arguments = False
301
+
302
+ def _tokenize(self, value, index):
303
+ token = ARGUMENT if self._in_arguments else SYNTAX
304
+ if value.upper() in ('IN', 'IN RANGE'):
305
+ self._in_arguments = True
306
+ return token
307
+
308
+
309
+ class _Table(object):
310
+ _tokenizer_class = None
311
+
312
+ def __init__(self, prev_tokenizer=None):
313
+ self._tokenizer = self._tokenizer_class()
314
+ self._prev_tokenizer = prev_tokenizer
315
+ self._prev_values_on_row = []
316
+
317
+ def tokenize(self, value, index):
318
+ if self._continues(value, index):
319
+ self._tokenizer = self._prev_tokenizer
320
+ yield value, SYNTAX
321
+ else:
322
+ for value_and_token in self._tokenize(value, index):
323
+ yield value_and_token
324
+ self._prev_values_on_row.append(value)
325
+
326
+ def _continues(self, value, index):
327
+ return value == '...' and all(self._is_empty(t)
328
+ for t in self._prev_values_on_row)
329
+
330
+ def _is_empty(self, value):
331
+ return value in ('', '\\')
332
+
333
+ def _tokenize(self, value, index):
334
+ return self._tokenizer.tokenize(value)
335
+
336
+ def end_row(self):
337
+ self.__init__(prev_tokenizer=self._tokenizer)
338
+
339
+
340
+ class UnknownTable(_Table):
341
+ _tokenizer_class = Comment
342
+
343
+ def _continues(self, value, index):
344
+ return False
345
+
346
+
347
+ class VariableTable(_Table):
348
+ _tokenizer_class = Variable
349
+
350
+
351
+ class SettingTable(_Table):
352
+ _tokenizer_class = Setting
353
+
354
+ def __init__(self, template_setter, prev_tokenizer=None):
355
+ _Table.__init__(self, prev_tokenizer)
356
+ self._template_setter = template_setter
357
+
358
+ def _tokenize(self, value, index):
359
+ if index == 0 and normalize(value) == 'testtemplate':
360
+ self._tokenizer = Setting(self._template_setter)
361
+ return _Table._tokenize(self, value, index)
362
+
363
+ def end_row(self):
364
+ self.__init__(self._template_setter, prev_tokenizer=self._tokenizer)
365
+
366
+
367
+ class TestCaseTable(_Table):
368
+ _setting_class = TestCaseSetting
369
+ _test_template = None
370
+ _default_template = None
371
+
372
+ @property
373
+ def _tokenizer_class(self):
374
+ if self._test_template or (self._default_template and
375
+ self._test_template is not False):
376
+ return TemplatedKeywordCall
377
+ return KeywordCall
378
+
379
+ def _continues(self, value, index):
380
+ return index > 0 and _Table._continues(self, value, index)
381
+
382
+ def _tokenize(self, value, index):
383
+ if index == 0:
384
+ if value:
385
+ self._test_template = None
386
+ return GherkinTokenizer().tokenize(value, TC_KW_NAME)
387
+ if index == 1 and self._is_setting(value):
388
+ if self._is_template(value):
389
+ self._test_template = False
390
+ self._tokenizer = self._setting_class(self.set_test_template)
391
+ else:
392
+ self._tokenizer = self._setting_class()
393
+ if index == 1 and self._is_for_loop(value):
394
+ self._tokenizer = ForLoop()
395
+ if index == 1 and self._is_empty(value):
396
+ return [(value, SYNTAX)]
397
+ return _Table._tokenize(self, value, index)
398
+
399
+ def _is_setting(self, value):
400
+ return value.startswith('[') and value.endswith(']')
401
+
402
+ def _is_template(self, value):
403
+ return normalize(value) == '[template]'
404
+
405
+ def _is_for_loop(self, value):
406
+ return value.startswith(':') and normalize(value, remove=':') == 'for'
407
+
408
+ def set_test_template(self, template):
409
+ self._test_template = self._is_template_set(template)
410
+
411
+ def set_default_template(self, template):
412
+ self._default_template = self._is_template_set(template)
413
+
414
+ def _is_template_set(self, template):
415
+ return normalize(template) not in ('', '\\', 'none', '${empty}')
416
+
417
+
418
+ class KeywordTable(TestCaseTable):
419
+ _tokenizer_class = KeywordCall
420
+ _setting_class = KeywordSetting
421
+
422
+ def _is_template(self, value):
423
+ return False
424
+
425
+
426
+ # Following code copied directly from Robot Framework 2.7.5.
427
+
428
+ class VariableSplitter:
429
+
430
+ def __init__(self, string, identifiers):
431
+ self.identifier = None
432
+ self.base = None
433
+ self.index = None
434
+ self.start = -1
435
+ self.end = -1
436
+ self._identifiers = identifiers
437
+ self._may_have_internal_variables = False
438
+ try:
439
+ self._split(string)
440
+ except ValueError:
441
+ pass
442
+ else:
443
+ self._finalize()
444
+
445
+ def get_replaced_base(self, variables):
446
+ if self._may_have_internal_variables:
447
+ return variables.replace_string(self.base)
448
+ return self.base
449
+
450
+ def _finalize(self):
451
+ self.identifier = self._variable_chars[0]
452
+ self.base = ''.join(self._variable_chars[2:-1])
453
+ self.end = self.start + len(self._variable_chars)
454
+ if self._has_list_variable_index():
455
+ self.index = ''.join(self._list_variable_index_chars[1:-1])
456
+ self.end += len(self._list_variable_index_chars)
457
+
458
+ def _has_list_variable_index(self):
459
+ return self._list_variable_index_chars\
460
+ and self._list_variable_index_chars[-1] == ']'
461
+
462
+ def _split(self, string):
463
+ start_index, max_index = self._find_variable(string)
464
+ self.start = start_index
465
+ self._open_curly = 1
466
+ self._state = self._variable_state
467
+ self._variable_chars = [string[start_index], '{']
468
+ self._list_variable_index_chars = []
469
+ self._string = string
470
+ start_index += 2
471
+ for index, char in enumerate(string[start_index:]):
472
+ index += start_index # Giving start to enumerate only in Py 2.6+
473
+ try:
474
+ self._state(char, index)
475
+ except StopIteration:
476
+ return
477
+ if index == max_index and not self._scanning_list_variable_index():
478
+ return
479
+
480
+ def _scanning_list_variable_index(self):
481
+ return self._state in [self._waiting_list_variable_index_state,
482
+ self._list_variable_index_state]
483
+
484
+ def _find_variable(self, string):
485
+ max_end_index = string.rfind('}')
486
+ if max_end_index == -1:
487
+ return ValueError('No variable end found')
488
+ if self._is_escaped(string, max_end_index):
489
+ return self._find_variable(string[:max_end_index])
490
+ start_index = self._find_start_index(string, 1, max_end_index)
491
+ if start_index == -1:
492
+ return ValueError('No variable start found')
493
+ return start_index, max_end_index
494
+
495
+ def _find_start_index(self, string, start, end):
496
+ index = string.find('{', start, end) - 1
497
+ if index < 0:
498
+ return -1
499
+ if self._start_index_is_ok(string, index):
500
+ return index
501
+ return self._find_start_index(string, index+2, end)
502
+
503
+ def _start_index_is_ok(self, string, index):
504
+ return string[index] in self._identifiers\
505
+ and not self._is_escaped(string, index)
506
+
507
+ def _is_escaped(self, string, index):
508
+ escaped = False
509
+ while index > 0 and string[index-1] == '\\':
510
+ index -= 1
511
+ escaped = not escaped
512
+ return escaped
513
+
514
+ def _variable_state(self, char, index):
515
+ self._variable_chars.append(char)
516
+ if char == '}' and not self._is_escaped(self._string, index):
517
+ self._open_curly -= 1
518
+ if self._open_curly == 0:
519
+ if not self._is_list_variable():
520
+ raise StopIteration
521
+ self._state = self._waiting_list_variable_index_state
522
+ elif char in self._identifiers:
523
+ self._state = self._internal_variable_start_state
524
+
525
+ def _is_list_variable(self):
526
+ return self._variable_chars[0] == '@'
527
+
528
+ def _internal_variable_start_state(self, char, index):
529
+ self._state = self._variable_state
530
+ if char == '{':
531
+ self._variable_chars.append(char)
532
+ self._open_curly += 1
533
+ self._may_have_internal_variables = True
534
+ else:
535
+ self._variable_state(char, index)
536
+
537
+ def _waiting_list_variable_index_state(self, char, index):
538
+ if char != '[':
539
+ raise StopIteration
540
+ self._list_variable_index_chars.append(char)
541
+ self._state = self._list_variable_index_state
542
+
543
+ def _list_variable_index_state(self, char, index):
544
+ self._list_variable_index_chars.append(char)
545
+ if char == ']':
546
+ raise StopIteration