gherkin 1.0.2-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (89) hide show
  1. data/.gitignore +7 -0
  2. data/.mailmap +2 -0
  3. data/History.txt +9 -0
  4. data/LICENSE +20 -0
  5. data/README.rdoc +38 -0
  6. data/Rakefile +48 -0
  7. data/VERSION.yml +4 -0
  8. data/bin/gherkin +5 -0
  9. data/cucumber.yml +3 -0
  10. data/features/feature_parser.feature +206 -0
  11. data/features/native_lexer.feature +19 -0
  12. data/features/parser_with_native_lexer.feature +205 -0
  13. data/features/pretty_printer.feature +14 -0
  14. data/features/step_definitions/gherkin_steps.rb +34 -0
  15. data/features/step_definitions/pretty_printer_steps.rb +56 -0
  16. data/features/steps_parser.feature +46 -0
  17. data/features/support/env.rb +33 -0
  18. data/gherkin.gemspec +155 -0
  19. data/java/.gitignore +2 -0
  20. data/java/Gherkin.iml +24 -0
  21. data/java/build.xml +13 -0
  22. data/java/src/gherkin/FixJava.java +34 -0
  23. data/java/src/gherkin/Lexer.java +5 -0
  24. data/java/src/gherkin/LexingError.java +7 -0
  25. data/java/src/gherkin/Listener.java +27 -0
  26. data/java/src/gherkin/ParseError.java +22 -0
  27. data/java/src/gherkin/Parser.java +185 -0
  28. data/java/src/gherkin/lexer/.gitignore +1 -0
  29. data/java/src/gherkin/parser/StateMachineReader.java +62 -0
  30. data/lib/.gitignore +4 -0
  31. data/lib/gherkin.rb +2 -0
  32. data/lib/gherkin/c_lexer.rb +10 -0
  33. data/lib/gherkin/cli/main.rb +34 -0
  34. data/lib/gherkin/core_ext/array.rb +5 -0
  35. data/lib/gherkin/i18n.rb +87 -0
  36. data/lib/gherkin/i18n.yml +535 -0
  37. data/lib/gherkin/i18n_lexer.rb +29 -0
  38. data/lib/gherkin/java_lexer.rb +10 -0
  39. data/lib/gherkin/lexer.rb +44 -0
  40. data/lib/gherkin/parser.rb +19 -0
  41. data/lib/gherkin/parser/meta.txt +4 -0
  42. data/lib/gherkin/parser/root.txt +9 -0
  43. data/lib/gherkin/parser/steps.txt +3 -0
  44. data/lib/gherkin/rb_lexer.rb +10 -0
  45. data/lib/gherkin/rb_lexer/.gitignore +1 -0
  46. data/lib/gherkin/rb_lexer/README.rdoc +8 -0
  47. data/lib/gherkin/rb_parser.rb +117 -0
  48. data/lib/gherkin/tools.rb +8 -0
  49. data/lib/gherkin/tools/files.rb +30 -0
  50. data/lib/gherkin/tools/pretty_listener.rb +84 -0
  51. data/lib/gherkin/tools/reformat.rb +19 -0
  52. data/lib/gherkin/tools/stats.rb +21 -0
  53. data/lib/gherkin/tools/stats_listener.rb +50 -0
  54. data/nativegems.sh +5 -0
  55. data/ragel/i18n/.gitignore +1 -0
  56. data/ragel/lexer.c.rl.erb +403 -0
  57. data/ragel/lexer.java.rl.erb +200 -0
  58. data/ragel/lexer.rb.rl.erb +171 -0
  59. data/ragel/lexer_common.rl.erb +46 -0
  60. data/spec/gherkin/c_lexer_spec.rb +21 -0
  61. data/spec/gherkin/fixtures/1.feature +8 -0
  62. data/spec/gherkin/fixtures/complex.feature +43 -0
  63. data/spec/gherkin/fixtures/i18n_fr.feature +13 -0
  64. data/spec/gherkin/fixtures/i18n_no.feature +6 -0
  65. data/spec/gherkin/fixtures/i18n_zh-CN.feature +8 -0
  66. data/spec/gherkin/fixtures/simple.feature +3 -0
  67. data/spec/gherkin/fixtures/simple_with_comments.feature +7 -0
  68. data/spec/gherkin/fixtures/simple_with_tags.feature +11 -0
  69. data/spec/gherkin/i18n_lexer_spec.rb +22 -0
  70. data/spec/gherkin/i18n_spec.rb +57 -0
  71. data/spec/gherkin/java_lexer_spec.rb +20 -0
  72. data/spec/gherkin/parser_spec.rb +28 -0
  73. data/spec/gherkin/rb_lexer_spec.rb +18 -0
  74. data/spec/gherkin/sexp_recorder.rb +29 -0
  75. data/spec/gherkin/shared/lexer_spec.rb +433 -0
  76. data/spec/gherkin/shared/py_string_spec.rb +124 -0
  77. data/spec/gherkin/shared/table_spec.rb +97 -0
  78. data/spec/gherkin/shared/tags_spec.rb +50 -0
  79. data/spec/spec_helper.rb +53 -0
  80. data/tasks/bench.rake +186 -0
  81. data/tasks/bench/feature_builder.rb +49 -0
  82. data/tasks/bench/generated/.gitignore +1 -0
  83. data/tasks/bench/null_listener.rb +4 -0
  84. data/tasks/compile.rake +70 -0
  85. data/tasks/cucumber.rake +20 -0
  86. data/tasks/ragel_task.rb +70 -0
  87. data/tasks/rdoc.rake +12 -0
  88. data/tasks/rspec.rake +15 -0
  89. metadata +196 -0
@@ -0,0 +1,5 @@
1
+ #!/bin/sh
2
+ # Builds gems for all supported platforms
3
+ rake gemspec build PLATFORM=java
4
+ rake cross compile gemspec build PLATFORM=i386-mswin32 RUBY_CC_VERSION=1.8.6
5
+ rake gemspec build PLATFORM=i386-mingw32 RUBY_CC_VERSION=1.8.6
@@ -0,0 +1 @@
1
+ *.rl
@@ -0,0 +1,403 @@
1
+ #include <assert.h>
2
+ #include <ruby.h>
3
+
4
+ #if defined(_WIN32)
5
+ #include <stddef.h>
6
+ #endif
7
+
8
+ #ifdef HAVE_RUBY_RE_H
9
+ #include <ruby/re.h>
10
+ #else
11
+ #include <re.h>
12
+ #endif
13
+
14
+ #ifdef HAVE_RUBY_ENCODING_H
15
+ #include <ruby/encoding.h>
16
+ #define ENCODED_STR_NEW(ptr, len) \
17
+ rb_enc_str_new(ptr, len, rb_utf8_encoding());
18
+ #else
19
+ #define ENCODED_STR_NEW(ptr, len) \
20
+ rb_str_new(ptr, len);
21
+ #endif
22
+
23
+ #ifndef RSTRING_PTR
24
+ #define RSTRING_PTR(s) (RSTRING(s)->ptr)
25
+ #endif
26
+
27
+ #ifndef RSTRING_LEN
28
+ #define RSTRING_LEN(s) (RSTRING(s)->len)
29
+ #endif
30
+
31
+ #define DATA_GET(FROM, TYPE, NAME) \
32
+ Data_Get_Struct(FROM, TYPE, NAME); \
33
+ if (NAME == NULL) { \
34
+ rb_raise(rb_eArgError, "NULL found for " # NAME " when it shouldn't be."); \
35
+ }
36
+
37
+ typedef struct lexer_state {
38
+ int content_len;
39
+ int line_number;
40
+ int current_line;
41
+ int start_col;
42
+ size_t mark;
43
+ size_t keyword_start;
44
+ size_t keyword_end;
45
+ size_t next_keyword_start;
46
+ size_t content_start;
47
+ size_t content_end;
48
+ size_t field_len;
49
+ size_t query_start;
50
+ size_t last_newline;
51
+ size_t final_newline;
52
+ } lexer_state;
53
+
54
+ static VALUE mGherkin;
55
+ static VALUE mLexer;
56
+ static VALUE mCLexer;
57
+ static VALUE cI18nLexer;
58
+ static VALUE rb_eGherkinLexerError;
59
+
60
+ #define LEN(AT, P) (P - data - lexer->AT)
61
+ #define MARK(M, P) (lexer->M = (P) - data)
62
+ #define PTR_TO(P) (data + lexer->P)
63
+
64
+ #define STORE_KW_END_CON(EVENT) \
65
+ store_kw_con(listener, # EVENT, \
66
+ PTR_TO(keyword_start), LEN(keyword_start, PTR_TO(keyword_end - 1)), \
67
+ PTR_TO(content_start), LEN(content_start, PTR_TO(content_end)), \
68
+ lexer->current_line); \
69
+ if (lexer->content_end != 0) { \
70
+ p = PTR_TO(content_end - 1); \
71
+ } \
72
+ lexer->content_end = 0;
73
+
74
+ #define STORE_ATTR(ATTR) \
75
+ store_attr(listener, # ATTR, \
76
+ PTR_TO(content_start), LEN(content_start, p), \
77
+ lexer->line_number);
78
+
79
+ %%{
80
+ machine lexer;
81
+
82
+ action begin_content {
83
+ MARK(content_start, p);
84
+ lexer->current_line = lexer->line_number;
85
+ }
86
+
87
+ action begin_pystring_content {
88
+ MARK(content_start, p);
89
+ }
90
+
91
+ action start_pystring {
92
+ lexer->current_line = lexer->line_number;
93
+ lexer->start_col = p - data - lexer->last_newline;
94
+ }
95
+
96
+ action store_pystring_content {
97
+ int len = LEN(content_start, PTR_TO(final_newline));
98
+
99
+ if (len < 0) len = 0;
100
+
101
+ store_pystring_content(listener, lexer->start_col, PTR_TO(content_start), len, lexer->current_line);
102
+ }
103
+
104
+ action store_feature_content {
105
+ STORE_KW_END_CON(feature)
106
+ }
107
+
108
+ action store_background_content {
109
+ STORE_KW_END_CON(background)
110
+ }
111
+
112
+ action store_scenario_content {
113
+ STORE_KW_END_CON(scenario)
114
+ }
115
+
116
+ action store_scenario_outline_content {
117
+ STORE_KW_END_CON(scenario_outline)
118
+ }
119
+
120
+ action store_examples_content {
121
+ STORE_KW_END_CON(examples)
122
+ }
123
+
124
+ action store_step_content {
125
+ store_kw_con(listener, "step",
126
+ PTR_TO(keyword_start), LEN(keyword_start, PTR_TO(keyword_end)),
127
+ PTR_TO(content_start), LEN(content_start, p),
128
+ lexer->current_line);
129
+ }
130
+
131
+ action store_comment_content {
132
+ STORE_ATTR(comment)
133
+ lexer->mark = 0;
134
+ }
135
+
136
+ action store_tag_content {
137
+ STORE_ATTR(tag)
138
+ lexer->mark = 0;
139
+ }
140
+
141
+ action inc_line_number {
142
+ lexer->line_number += 1;
143
+ MARK(final_newline, p);
144
+ }
145
+
146
+ action last_newline {
147
+ MARK(last_newline, p + 1);
148
+ }
149
+
150
+ action start_keyword {
151
+ if (lexer->mark == 0) {
152
+ MARK(mark, p);
153
+ }
154
+ }
155
+
156
+ action end_keyword {
157
+ MARK(keyword_end, p);
158
+ MARK(keyword_start, PTR_TO(mark));
159
+ MARK(content_start, p + 1);
160
+ lexer->mark = 0;
161
+ }
162
+
163
+ action next_keyword_start {
164
+ MARK(content_end, p);
165
+ }
166
+
167
+ action start_table {
168
+ p = p - 1;
169
+ lexer->current_line = lexer->line_number;
170
+ rb_ary_clear(rows);
171
+ rb_ary_clear(current_row);
172
+ }
173
+
174
+ action begin_cell_content {
175
+ MARK(content_start, p);
176
+ }
177
+
178
+ action store_cell_content {
179
+ VALUE con = Qnil;
180
+ con = ENCODED_STR_NEW(PTR_TO(content_start), LEN(content_start, p));
181
+ rb_funcall(con, rb_intern("strip!"), 0);
182
+
183
+ rb_ary_push(current_row, con);
184
+ }
185
+
186
+ action start_row {
187
+ current_row = rb_ary_new();
188
+ }
189
+
190
+ action store_row {
191
+ rb_ary_push(rows, current_row);
192
+ }
193
+
194
+ action store_table {
195
+ rb_funcall(listener, rb_intern("table"), 2, rows, INT2FIX(lexer->current_line));
196
+ }
197
+
198
+ action end_feature {
199
+ if (cs < lexer_first_final) {
200
+ if (raise_lexer_error != NULL) {
201
+ int count = 0;
202
+ int newstr_count = 0;
203
+ size_t len;
204
+ const char *buff;
205
+ if (lexer->last_newline != 0) {
206
+ len = LEN(last_newline, eof);
207
+ buff = PTR_TO(last_newline);
208
+ } else {
209
+ len = strlen(data);
210
+ buff = data;
211
+ }
212
+
213
+ char newstr[len];
214
+
215
+ for (count = 0; count < len; count++) {
216
+ if(buff[count] == 10) {
217
+ newstr[newstr_count] = '\0'; // terminate new string at first newline found
218
+ break;
219
+ } else {
220
+ if (buff[count] == '%') {
221
+ newstr[newstr_count++] = buff[count];
222
+ newstr[newstr_count] = buff[count];
223
+ } else {
224
+ newstr[newstr_count] = buff[count];
225
+ }
226
+ }
227
+ newstr_count++;
228
+ }
229
+
230
+ int line = lexer->line_number;
231
+ lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
232
+ raise_lexer_error(listener, newstr, line);
233
+ }
234
+ }
235
+ }
236
+
237
+ include lexer_common "lexer_common.<%= @i18n.sanitized_key %>.rl";
238
+
239
+ }%%
240
+
241
+ /** Data **/
242
+ %% write data;
243
+
244
+ static VALUE
245
+ strip_i(VALUE str, VALUE ary)
246
+ {
247
+ rb_funcall(str, rb_intern("strip!"), 0);
248
+ rb_ary_push(ary, str);
249
+
250
+ return Qnil;
251
+ }
252
+
253
+ static VALUE
254
+ multiline_strip(VALUE text)
255
+ {
256
+ VALUE map = rb_ary_new();
257
+ VALUE split = rb_str_split(text, "\n");
258
+
259
+ rb_iterate(rb_each, split, strip_i, map);
260
+
261
+ return rb_ary_join(split, rb_str_new2("\n"));
262
+ }
263
+
264
+ static void
265
+ store_kw_con(VALUE listener, const char * event_name,
266
+ const char * keyword_at, size_t keyword_length,
267
+ const char * at, size_t length,
268
+ int current_line)
269
+ {
270
+ VALUE con = Qnil, kw = Qnil;
271
+ kw = ENCODED_STR_NEW(keyword_at, keyword_length);
272
+ con = ENCODED_STR_NEW(at, length);
273
+ con = multiline_strip(con);
274
+ rb_funcall(con, rb_intern("strip!"), 0);
275
+ rb_funcall(kw, rb_intern("strip!"), 0);
276
+ rb_funcall(listener, rb_intern(event_name), 3, kw, con, INT2FIX(current_line));
277
+ }
278
+
279
+ static void
280
+ store_attr(VALUE listener, const char * attr_type,
281
+ const char * at, size_t length,
282
+ int line)
283
+ {
284
+ VALUE val = ENCODED_STR_NEW(at, length);
285
+ rb_funcall(listener, rb_intern(attr_type), 2, val, INT2FIX(line));
286
+ }
287
+
288
+ static void
289
+ store_pystring_content(VALUE listener,
290
+ int start_col,
291
+ const char *at, size_t length,
292
+ int current_line)
293
+ {
294
+ VALUE con = ENCODED_STR_NEW(at, length);
295
+ // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
296
+ char pat[32];
297
+ snprintf(pat, 32, "^ {0,%d}", start_col);
298
+ VALUE re = rb_reg_regcomp(rb_str_new2(pat));
299
+ rb_funcall(con, rb_intern("gsub!"), 2, re, rb_str_new2(""));
300
+ rb_funcall(listener, rb_intern("py_string"), 2, con, INT2FIX(current_line));
301
+ }
302
+
303
+ static void
304
+ raise_lexer_error(VALUE listener, const char * at, int line)
305
+ {
306
+ rb_raise(rb_eGherkinLexerError, "Lexing error on line %d: '%s'.", line, at);
307
+ }
308
+
309
+ static void lexer_init(lexer_state *lexer) {
310
+ lexer->content_start = 0;
311
+ lexer->content_end = 0;
312
+ lexer->content_len = 0;
313
+ lexer->mark = 0;
314
+ lexer->field_len = 0;
315
+ lexer->keyword_start = 0;
316
+ lexer->keyword_end = 0;
317
+ lexer->next_keyword_start = 0;
318
+ lexer->line_number = 1;
319
+ lexer->last_newline = 0;
320
+ lexer->final_newline = 0;
321
+ lexer->start_col = 0;
322
+ }
323
+
324
+ static VALUE CLexer_alloc(VALUE klass)
325
+ {
326
+ VALUE obj;
327
+ lexer_state *lxr = ALLOC(lexer_state);
328
+ lexer_init(lxr);
329
+
330
+ obj = Data_Wrap_Struct(klass, NULL, -1, lxr);
331
+
332
+ return obj;
333
+ }
334
+
335
+ static VALUE CLexer_init(VALUE self, VALUE listener)
336
+ {
337
+ rb_iv_set(self, "@listener", listener);
338
+
339
+ lexer_state *lxr = NULL;
340
+ DATA_GET(self, lexer_state, lxr);
341
+ lexer_init(lxr);
342
+
343
+ return self;
344
+ }
345
+
346
+ static VALUE CLexer_scan(VALUE self, VALUE input)
347
+ {
348
+ lexer_state *lexer = NULL;
349
+ DATA_GET(self, lexer_state, lexer);
350
+
351
+ VALUE input_copy = rb_str_dup(input);
352
+ rb_str_append(input_copy, rb_str_new2("\n%_FEATURE_END_%"));
353
+ char *data = RSTRING_PTR(input_copy);
354
+ long len = RSTRING_LEN(input_copy);
355
+
356
+ if (len == 0) {
357
+ rb_raise(rb_eGherkinLexerError, "No content to lex.");
358
+ } else {
359
+ const char *p, *pe, *eof;
360
+ int cs = 0;
361
+
362
+ VALUE listener = rb_iv_get(self, "@listener");
363
+ VALUE rows = rb_ary_new();
364
+ VALUE current_row = rb_ary_new();
365
+
366
+ p = data;
367
+ pe = data + len;
368
+ eof = pe;
369
+
370
+ assert(*pe == '\0' && "pointer does not end on NULL");
371
+ assert(pe - p == len && "pointers aren't same distance");
372
+
373
+ %% write init;
374
+ %% write exec;
375
+
376
+ assert(p <= pe && "data overflow after parsing execute");
377
+ assert(lexer->content_start <= len && "content starts after data end");
378
+ assert(lexer->mark < len && "mark is after data end");
379
+ assert(lexer->field_len <= len && "field has length longer than the whole data");
380
+
381
+ // Reset lexer by re-initializing the whole thing
382
+ lexer_init(lexer);
383
+
384
+ if (cs == lexer_error) {
385
+ rb_raise(rb_eGherkinLexerError, "Invalid format, lexing fails.");
386
+ } else {
387
+ return Qtrue;
388
+ }
389
+ }
390
+ }
391
+
392
+ void Init_gherkin_lexer_<%= @i18n.sanitized_key %>()
393
+ {
394
+ mGherkin = rb_define_module("Gherkin");
395
+ mLexer = rb_const_get(mGherkin, rb_intern("Lexer"));
396
+ rb_eGherkinLexerError = rb_const_get(mLexer, rb_intern("LexingError"));
397
+
398
+ mCLexer = rb_define_module_under(mGherkin, "CLexer");
399
+ cI18nLexer = rb_define_class_under(mCLexer, "<%= @i18n.sanitized_key.capitalize %>", rb_cObject);
400
+ rb_define_alloc_func(cI18nLexer, CLexer_alloc);
401
+ rb_define_method(cI18nLexer, "initialize", CLexer_init, 1);
402
+ rb_define_method(cI18nLexer, "scan", CLexer_scan, 1);
403
+ }
@@ -0,0 +1,200 @@
1
+ package gherkin.lexer;
2
+
3
+ import java.util.List;
4
+ import java.util.ArrayList;
5
+ import java.util.regex.Pattern;
6
+ import gherkin.Lexer;
7
+ import gherkin.Listener;
8
+ import gherkin.LexingError;
9
+
10
+ public class <%= @i18n.sanitized_key.capitalize %> implements Lexer {
11
+ %%{
12
+ machine lexer;
13
+ alphtype byte;
14
+
15
+ action begin_content {
16
+ contentStart = p;
17
+ currentLine = lineNumber;
18
+ }
19
+
20
+ action start_pystring {
21
+ currentLine = lineNumber;
22
+ startCol = p - lastNewline;
23
+ }
24
+
25
+ action begin_pystring_content {
26
+ contentStart = p;
27
+ }
28
+
29
+ action store_pystring_content {
30
+ String con = unindent(startCol, substring(data, contentStart, nextKeywordStart-1).replaceFirst("(\\r?\\n)?( )*\\Z", ""));
31
+ listener.py_string(con, currentLine);
32
+ }
33
+
34
+ action store_feature_content {
35
+ String con = multilineStrip(keywordContent(data, p, eof, nextKeywordStart, contentStart).trim());
36
+ listener.feature(keyword, con, currentLine);
37
+ if(nextKeywordStart != -1) p = nextKeywordStart - 1;
38
+ nextKeywordStart = -1;
39
+ }
40
+
41
+ action store_background_content {
42
+ String con = multilineStrip(keywordContent(data, p, eof, nextKeywordStart, contentStart));
43
+ listener.background(keyword, con, currentLine);
44
+ if(nextKeywordStart != -1) p = nextKeywordStart - 1;
45
+ nextKeywordStart = -1;
46
+ }
47
+
48
+ action store_scenario_content {
49
+ String con = multilineStrip(keywordContent(data, p, eof, nextKeywordStart, contentStart));
50
+ listener.scenario(keyword, con, currentLine);
51
+ if(nextKeywordStart != -1) p = nextKeywordStart - 1;
52
+ nextKeywordStart = -1;
53
+ }
54
+
55
+ action store_scenario_outline_content {
56
+ String con = multilineStrip(keywordContent(data, p, eof, nextKeywordStart, contentStart));
57
+ listener.scenario_outline(keyword, con, currentLine);
58
+ if(nextKeywordStart != -1) p = nextKeywordStart - 1;
59
+ nextKeywordStart = -1;
60
+ }
61
+
62
+ action store_examples_content {
63
+ String con = multilineStrip(keywordContent(data, p, eof, nextKeywordStart, contentStart));
64
+ listener.examples(keyword, con, currentLine);
65
+ if(nextKeywordStart != -1) p = nextKeywordStart - 1;
66
+ nextKeywordStart = -1;
67
+ }
68
+
69
+ action store_step_content {
70
+ listener.step(keyword, substring(data, contentStart, p).trim(), currentLine);
71
+ }
72
+
73
+ action store_comment_content {
74
+ listener.comment(substring(data, contentStart, p).trim(), lineNumber);
75
+ keywordStart = -1;
76
+ }
77
+
78
+ action store_tag_content {
79
+ listener.tag(substring(data, contentStart, p).trim(), currentLine);
80
+ keywordStart = -1;
81
+ }
82
+
83
+ action inc_line_number {
84
+ lineNumber++;
85
+ }
86
+
87
+ action last_newline {
88
+ lastNewline = p + 1;
89
+ }
90
+
91
+ action start_keyword {
92
+ if(keywordStart == -1) keywordStart = p;
93
+ }
94
+
95
+ action end_keyword {
96
+ keyword = substring(data, keywordStart, p).replaceFirst(":$","").trim();
97
+ keywordStart = -1;
98
+ }
99
+
100
+ action next_keyword_start {
101
+ nextKeywordStart = p;
102
+ }
103
+
104
+ action start_table {
105
+ p = p - 1;
106
+ rows = new ArrayList<List<String>>();
107
+ currentLine = lineNumber;
108
+ }
109
+
110
+ action start_row {
111
+ currentRow = new ArrayList<String>();
112
+ }
113
+
114
+ action begin_cell_content {
115
+ contentStart = p;
116
+ }
117
+
118
+ action store_cell_content {
119
+ currentRow.add(substring(data, contentStart, p).trim());
120
+ }
121
+
122
+ action store_row {
123
+ rows.add(currentRow);
124
+ }
125
+
126
+ action store_table {
127
+ if(!rows.isEmpty()) {
128
+ listener.table(rows, currentLine);
129
+ }
130
+ }
131
+
132
+ action end_feature {
133
+ if(cs < lexer_first_final) {
134
+ String content = currentLineContent(data, lastNewline);
135
+ throw new LexingError("Lexing error on line " + lineNumber);
136
+ }
137
+ }
138
+
139
+ include lexer_common "lexer_common.<%= @i18n.sanitized_key %>.rl";
140
+ }%%
141
+
142
+ private final Listener listener;
143
+
144
+ public <%= @i18n.sanitized_key.capitalize %>(Listener listener) {
145
+ this.listener = listener;
146
+ }
147
+
148
+ %% write data noerror;
149
+
150
+ public void scan(CharSequence inputSequence) {
151
+ String input = inputSequence.toString() + "\n%_FEATURE_END_%";
152
+ byte[] data = input.getBytes();
153
+ int cs, p = 0, pe = data.length;
154
+ int eof = pe;
155
+
156
+ int lineNumber = 1;
157
+ int lastNewline = 0;
158
+
159
+ int contentStart = -1;
160
+ int currentLine = -1;
161
+ int startCol = -1;
162
+ int nextKeywordStart = -1;
163
+ int keywordStart = -1;
164
+ String keyword = null;
165
+ List<List<String>> rows = null;
166
+ List<String> currentRow = null;
167
+
168
+ %% write init;
169
+ %% write exec;
170
+ }
171
+
172
+ private String keywordContent(byte[] data, int p, int eof, int nextKeywordStart, int contentStart) {
173
+ int endPoint = (nextKeywordStart == -1 || (p == eof)) ? p : nextKeywordStart;
174
+ return substring(data, contentStart, endPoint);
175
+ }
176
+
177
+ private String multilineStrip(String text) {
178
+ StringBuffer result = new StringBuffer();
179
+ for(String s : text.split("\n")) {
180
+ result.append(s.trim()).append("\n");
181
+ }
182
+ return result.toString().trim();
183
+ }
184
+
185
+ private String unindent(int startCol, String text) {
186
+ return Pattern.compile("^ {0," + startCol + "}", Pattern.MULTILINE).matcher(text).replaceAll("");
187
+ }
188
+
189
+ private String currentLineContent(byte[] data, int lastNewline) {
190
+ return substring(data, lastNewline, data.length).trim();
191
+ }
192
+
193
+ private String substring(byte[] data, int start, int end) {
194
+ try {
195
+ return new String(data, start, end-start, "utf-8");
196
+ } catch(java.io.UnsupportedEncodingException e) {
197
+ throw new RuntimeException("Internal error", e);
198
+ }
199
+ }
200
+ }