gherkin 1.0.30-universal-dotnet

Sign up to get free protection for your applications and to get access to all the features.
Files changed (104) hide show
  1. data/.gitattributes +2 -0
  2. data/.gitignore +9 -0
  3. data/.mailmap +2 -0
  4. data/History.txt +187 -0
  5. data/LICENSE +20 -0
  6. data/README.rdoc +59 -0
  7. data/Rakefile +58 -0
  8. data/VERSION.yml +5 -0
  9. data/bin/gherkin +5 -0
  10. data/cucumber.yml +3 -0
  11. data/features/escaped_pipes.feature +8 -0
  12. data/features/feature_parser.feature +226 -0
  13. data/features/native_lexer.feature +19 -0
  14. data/features/parser_with_native_lexer.feature +205 -0
  15. data/features/pretty_printer.feature +14 -0
  16. data/features/step_definitions/eyeball_steps.rb +3 -0
  17. data/features/step_definitions/gherkin_steps.rb +30 -0
  18. data/features/step_definitions/pretty_formatter_steps.rb +55 -0
  19. data/features/steps_parser.feature +46 -0
  20. data/features/support/env.rb +33 -0
  21. data/ikvm/.gitignore +3 -0
  22. data/java/.gitignore +2 -0
  23. data/java/src/main/java/gherkin/lexer/.gitignore +1 -0
  24. data/java/src/main/resources/gherkin/.gitignore +1 -0
  25. data/lib/.gitignore +4 -0
  26. data/lib/gherkin.rb +2 -0
  27. data/lib/gherkin/c_lexer.rb +17 -0
  28. data/lib/gherkin/cli/main.rb +33 -0
  29. data/lib/gherkin/formatter/argument.rb +27 -0
  30. data/lib/gherkin/formatter/colors.rb +119 -0
  31. data/lib/gherkin/formatter/escaping.rb +15 -0
  32. data/lib/gherkin/formatter/monochrome_format.rb +9 -0
  33. data/lib/gherkin/formatter/pretty_formatter.rb +168 -0
  34. data/lib/gherkin/i18n.rb +176 -0
  35. data/lib/gherkin/i18n.yml +588 -0
  36. data/lib/gherkin/i18n_lexer.rb +38 -0
  37. data/lib/gherkin/native.rb +7 -0
  38. data/lib/gherkin/native/ikvm.rb +55 -0
  39. data/lib/gherkin/native/java.rb +47 -0
  40. data/lib/gherkin/native/null.rb +9 -0
  41. data/lib/gherkin/parser/event.rb +45 -0
  42. data/lib/gherkin/parser/filter_listener.rb +199 -0
  43. data/lib/gherkin/parser/meta.txt +5 -0
  44. data/lib/gherkin/parser/parser.rb +142 -0
  45. data/lib/gherkin/parser/root.txt +11 -0
  46. data/lib/gherkin/parser/steps.txt +4 -0
  47. data/lib/gherkin/parser/tag_expression.rb +50 -0
  48. data/lib/gherkin/rb_lexer.rb +8 -0
  49. data/lib/gherkin/rb_lexer/.gitignore +1 -0
  50. data/lib/gherkin/rb_lexer/README.rdoc +8 -0
  51. data/lib/gherkin/rubify.rb +18 -0
  52. data/lib/gherkin/tools.rb +8 -0
  53. data/lib/gherkin/tools/files.rb +35 -0
  54. data/lib/gherkin/tools/reformat.rb +19 -0
  55. data/lib/gherkin/tools/stats.rb +21 -0
  56. data/lib/gherkin/tools/stats_listener.rb +57 -0
  57. data/ragel/i18n/.gitignore +1 -0
  58. data/ragel/lexer.c.rl.erb +425 -0
  59. data/ragel/lexer.java.rl.erb +216 -0
  60. data/ragel/lexer.rb.rl.erb +173 -0
  61. data/ragel/lexer_common.rl.erb +50 -0
  62. data/spec/gherkin/c_lexer_spec.rb +21 -0
  63. data/spec/gherkin/csharp_lexer_spec.rb +20 -0
  64. data/spec/gherkin/fixtures/1.feature +8 -0
  65. data/spec/gherkin/fixtures/comments_in_table.feature +9 -0
  66. data/spec/gherkin/fixtures/complex.feature +45 -0
  67. data/spec/gherkin/fixtures/dos_line_endings.feature +45 -0
  68. data/spec/gherkin/fixtures/i18n_fr.feature +14 -0
  69. data/spec/gherkin/fixtures/i18n_no.feature +7 -0
  70. data/spec/gherkin/fixtures/i18n_zh-CN.feature +9 -0
  71. data/spec/gherkin/fixtures/simple_with_comments.feature +7 -0
  72. data/spec/gherkin/fixtures/simple_with_tags.feature +11 -0
  73. data/spec/gherkin/fixtures/with_bom.feature +3 -0
  74. data/spec/gherkin/formatter/argument_spec.rb +28 -0
  75. data/spec/gherkin/formatter/colors_spec.rb +19 -0
  76. data/spec/gherkin/formatter/pretty_formatter_spec.rb +162 -0
  77. data/spec/gherkin/formatter/spaces.feature +9 -0
  78. data/spec/gherkin/formatter/tabs.feature +9 -0
  79. data/spec/gherkin/i18n_lexer_spec.rb +26 -0
  80. data/spec/gherkin/i18n_spec.rb +144 -0
  81. data/spec/gherkin/java_lexer_spec.rb +21 -0
  82. data/spec/gherkin/parser/filter_listener_spec.rb +390 -0
  83. data/spec/gherkin/parser/parser_spec.rb +50 -0
  84. data/spec/gherkin/parser/tag_expression_spec.rb +116 -0
  85. data/spec/gherkin/rb_lexer_spec.rb +19 -0
  86. data/spec/gherkin/sexp_recorder.rb +32 -0
  87. data/spec/gherkin/shared/lexer_spec.rb +550 -0
  88. data/spec/gherkin/shared/py_string_spec.rb +150 -0
  89. data/spec/gherkin/shared/row_spec.rb +104 -0
  90. data/spec/gherkin/shared/tags_spec.rb +50 -0
  91. data/spec/spec_helper.rb +87 -0
  92. data/tasks/bench.rake +188 -0
  93. data/tasks/bench/feature_builder.rb +49 -0
  94. data/tasks/bench/generated/.gitignore +1 -0
  95. data/tasks/bench/null_listener.rb +4 -0
  96. data/tasks/compile.rake +89 -0
  97. data/tasks/cucumber.rake +26 -0
  98. data/tasks/gems.rake +45 -0
  99. data/tasks/ikvm.rake +47 -0
  100. data/tasks/ragel_task.rb +70 -0
  101. data/tasks/rdoc.rake +12 -0
  102. data/tasks/release.rake +26 -0
  103. data/tasks/rspec.rake +15 -0
  104. metadata +257 -0
@@ -0,0 +1,216 @@
1
+ package gherkin.lexer;
2
+
3
+ import java.io.UnsupportedEncodingException;
4
+ import java.util.List;
5
+ import java.util.ArrayList;
6
+ import java.util.regex.Pattern;
7
+ import java.util.regex.Matcher;
8
+ import gherkin.Lexer;
9
+ import gherkin.Listener;
10
+ import gherkin.LexingError;
11
+
12
+ public class <%= @i18n.underscored_iso_code.upcase %> implements Lexer {
13
+ %%{
14
+ machine lexer;
15
+ alphtype byte;
16
+
17
+ action begin_content {
18
+ contentStart = p;
19
+ currentLine = lineNumber;
20
+ }
21
+
22
+ action start_pystring {
23
+ currentLine = lineNumber;
24
+ startCol = p - lastNewline;
25
+ }
26
+
27
+ action begin_pystring_content {
28
+ contentStart = p;
29
+ }
30
+
31
+ action store_pystring_content {
32
+ String con = unindent(startCol, substring(data, contentStart, nextKeywordStart-1).replaceFirst("(\\r?\\n)?([\\t ])*\\Z", "").replaceAll("\\\\\"\\\\\"\\\\\"", "\"\"\""));
33
+ listener.pyString(con, currentLine);
34
+ }
35
+
36
+ action store_feature_content {
37
+ String con = multilineStrip(keywordContent(data, p, eof, nextKeywordStart, contentStart).trim());
38
+ listener.feature(keyword, con, currentLine);
39
+ if(nextKeywordStart != -1) p = nextKeywordStart - 1;
40
+ nextKeywordStart = -1;
41
+ }
42
+
43
+ action store_background_content {
44
+ String con = multilineStrip(keywordContent(data, p, eof, nextKeywordStart, contentStart));
45
+ listener.background(keyword, con, currentLine);
46
+ if(nextKeywordStart != -1) p = nextKeywordStart - 1;
47
+ nextKeywordStart = -1;
48
+ }
49
+
50
+ action store_scenario_content {
51
+ String con = multilineStrip(keywordContent(data, p, eof, nextKeywordStart, contentStart));
52
+ listener.scenario(keyword, con, currentLine);
53
+ if(nextKeywordStart != -1) p = nextKeywordStart - 1;
54
+ nextKeywordStart = -1;
55
+ }
56
+
57
+ action store_scenario_outline_content {
58
+ String con = multilineStrip(keywordContent(data, p, eof, nextKeywordStart, contentStart));
59
+ listener.scenarioOutline(keyword, con, currentLine);
60
+ if(nextKeywordStart != -1) p = nextKeywordStart - 1;
61
+ nextKeywordStart = -1;
62
+ }
63
+
64
+ action store_examples_content {
65
+ String con = multilineStrip(keywordContent(data, p, eof, nextKeywordStart, contentStart));
66
+ listener.examples(keyword, con, currentLine);
67
+ if(nextKeywordStart != -1) p = nextKeywordStart - 1;
68
+ nextKeywordStart = -1;
69
+ }
70
+
71
+ action store_step_content {
72
+ listener.step(keyword, substring(data, contentStart, p).trim(), currentLine);
73
+ }
74
+
75
+ action store_comment_content {
76
+ listener.comment(substring(data, contentStart, p).trim(), lineNumber);
77
+ keywordStart = -1;
78
+ }
79
+
80
+ action store_tag_content {
81
+ listener.tag(substring(data, contentStart, p).trim(), currentLine);
82
+ keywordStart = -1;
83
+ }
84
+
85
+ action inc_line_number {
86
+ lineNumber++;
87
+ }
88
+
89
+ action last_newline {
90
+ lastNewline = p + 1;
91
+ }
92
+
93
+ action start_keyword {
94
+ if(keywordStart == -1) keywordStart = p;
95
+ }
96
+
97
+ action end_keyword {
98
+ keyword = substring(data, keywordStart, p).replaceFirst(":$","");
99
+ keywordStart = -1;
100
+ }
101
+
102
+ action next_keyword_start {
103
+ nextKeywordStart = p;
104
+ }
105
+
106
+ action start_row {
107
+ p = p - 1;
108
+ currentRow = new ArrayList<String>();
109
+ currentLine = lineNumber;
110
+ }
111
+
112
+ action begin_cell_content {
113
+ contentStart = p;
114
+ }
115
+
116
+ action store_cell_content {
117
+ String con = substring(data, contentStart, p).trim();
118
+ currentRow.add(con.replaceAll("\\\\\\|", "|").replaceAll("\\\\\\\\", "\\\\"));
119
+ }
120
+
121
+ action store_row {
122
+ listener.row(currentRow, currentLine);
123
+ }
124
+
125
+ action end_feature {
126
+ if(cs < lexer_first_final) {
127
+ String content = currentLineContent(data, lastNewline);
128
+ throw new LexingError("Lexing error on line " + lineNumber + ": '" + content + "'");
129
+ } else {
130
+ listener.eof();
131
+ }
132
+ }
133
+
134
+ include lexer_common "lexer_common.<%= @i18n.underscored_iso_code %>.rl";
135
+ }%%
136
+
137
+ private final Listener listener;
138
+
139
+ public <%= @i18n.underscored_iso_code.upcase %>(Listener listener) {
140
+ this.listener = listener;
141
+ }
142
+
143
+ %% write data noerror;
144
+
145
+ public void scan(String inputSequence) {
146
+ String input = inputSequence.toString() + "\n%_FEATURE_END_%";
147
+ byte[] data = null;
148
+ try {
149
+ data = input.getBytes("UTF-8");
150
+ } catch(UnsupportedEncodingException e) {
151
+ throw new RuntimeException(e);
152
+ }
153
+ int cs, p = 0, pe = data.length;
154
+ int eof = pe;
155
+
156
+ int lineNumber = 1;
157
+ int lastNewline = 0;
158
+
159
+ int contentStart = -1;
160
+ int currentLine = -1;
161
+ int startCol = -1;
162
+ int nextKeywordStart = -1;
163
+ int keywordStart = -1;
164
+ String keyword = null;
165
+ List<String> currentRow = null;
166
+
167
+ %% write init;
168
+ %% write exec;
169
+ }
170
+
171
+ private String keywordContent(byte[] data, int p, int eof, int nextKeywordStart, int contentStart) {
172
+ int endPoint = (nextKeywordStart == -1 || (p == eof)) ? p : nextKeywordStart;
173
+ return substring(data, contentStart, endPoint);
174
+ }
175
+
176
+ private static final Pattern CRLF_RE = Pattern.compile("\r\n");
177
+ private static final Pattern LF_RE = Pattern.compile("[^\r]\n");
178
+ private static final String CRLF = "\r\n";
179
+ private static final String LF = "\n";
180
+
181
+ private String multilineStrip(String text) {
182
+ int crlfCount = matchCount(CRLF_RE.matcher(text));
183
+ int lfCount = matchCount(LF_RE.matcher(text));
184
+ String eol = crlfCount > lfCount ? CRLF : LF;
185
+
186
+ StringBuffer result = new StringBuffer();
187
+ for(String s : text.split("\r?\n")) {
188
+ result.append(s.trim()).append(eol);
189
+ }
190
+ return result.toString().trim();
191
+ }
192
+
193
+ private int matchCount(Matcher m) {
194
+ int count = 0;
195
+ while(m.find()) {
196
+ count++;
197
+ }
198
+ return count;
199
+ }
200
+
201
+ private String unindent(int startCol, String text) {
202
+ return Pattern.compile("^[\t ]{0," + startCol + "}", Pattern.MULTILINE).matcher(text).replaceAll("");
203
+ }
204
+
205
+ private String currentLineContent(byte[] data, int lastNewline) {
206
+ return substring(data, lastNewline, data.length).trim();
207
+ }
208
+
209
+ private String substring(byte[] data, int start, int end) {
210
+ try {
211
+ return new String(data, start, end-start, "utf-8");
212
+ } catch(java.io.UnsupportedEncodingException e) {
213
+ throw new RuntimeException("Internal error", e);
214
+ }
215
+ }
216
+ }
@@ -0,0 +1,173 @@
1
+ module Gherkin
2
+ module RbLexer
3
+ class <%= @i18n.underscored_iso_code.capitalize %> #:nodoc:
4
+ %%{
5
+ machine lexer;
6
+
7
+ action begin_content {
8
+ @content_start = p
9
+ @current_line = @line_number
10
+ }
11
+
12
+ action start_pystring {
13
+ @current_line = @line_number
14
+ @start_col = p - @last_newline
15
+ }
16
+
17
+ action begin_pystring_content {
18
+ @content_start = p
19
+ }
20
+
21
+ action store_pystring_content {
22
+ con = unindent(@start_col, utf8_pack(data[@content_start...@next_keyword_start-1]).sub(/(\r?\n)?([\t ])*\Z/, '').gsub(/\\"\\"\\"/, '"""'))
23
+ @listener.py_string(con, @current_line)
24
+ }
25
+
26
+ action store_feature_content {
27
+ store_keyword_content(:feature, data, p, eof) { |con| multiline_strip(con) }
28
+ p = @next_keyword_start - 1 if @next_keyword_start
29
+ @next_keyword_start = nil
30
+ }
31
+
32
+ action store_background_content {
33
+ store_keyword_content(:background, data, p, eof) { |con| multiline_strip(con) }
34
+ p = @next_keyword_start - 1 if @next_keyword_start
35
+ @next_keyword_start = nil
36
+ }
37
+
38
+ action store_scenario_content {
39
+ store_keyword_content(:scenario, data, p, eof) { |con| multiline_strip(con) }
40
+ p = @next_keyword_start - 1 if @next_keyword_start
41
+ @next_keyword_start = nil
42
+ }
43
+
44
+ action store_scenario_outline_content {
45
+ store_keyword_content(:scenario_outline, data, p, eof) { |con| multiline_strip(con) }
46
+ p = @next_keyword_start - 1 if @next_keyword_start
47
+ @next_keyword_start = nil
48
+ }
49
+
50
+ action store_examples_content {
51
+ store_keyword_content(:examples, data, p, eof) { |con| multiline_strip(con) }
52
+ p = @next_keyword_start - 1 if @next_keyword_start
53
+ @next_keyword_start = nil
54
+ }
55
+
56
+ action store_step_content {
57
+ con = utf8_pack(data[@content_start...p]).strip
58
+ @listener.step(@keyword, con, @current_line)
59
+ }
60
+
61
+ action store_comment_content {
62
+ con = utf8_pack(data[@content_start...p]).strip
63
+ @listener.comment(con, @line_number)
64
+ @keyword_start = nil
65
+ }
66
+
67
+ action store_tag_content {
68
+ con = utf8_pack(data[@content_start...p]).strip
69
+ @listener.tag(con, @current_line)
70
+ @keyword_start = nil
71
+ }
72
+
73
+ action inc_line_number {
74
+ @line_number += 1
75
+ }
76
+
77
+ action last_newline {
78
+ @last_newline = p + 1
79
+ }
80
+
81
+ action start_keyword {
82
+ @keyword_start ||= p
83
+ }
84
+
85
+ action end_keyword {
86
+ @keyword = utf8_pack(data[@keyword_start...p]).sub(/:$/,'')
87
+ @keyword_start = nil
88
+ }
89
+
90
+ action next_keyword_start {
91
+ @next_keyword_start = p
92
+ }
93
+
94
+ action start_row {
95
+ p = p - 1
96
+ current_row = []
97
+ @current_line = @line_number
98
+ }
99
+
100
+ action begin_cell_content {
101
+ @content_start = p
102
+ }
103
+
104
+ action store_cell_content {
105
+ con = utf8_pack(data[@content_start...p]).strip
106
+ current_row << con.gsub(/\\\|/, "|").gsub(/\\\\/, "\\")
107
+ }
108
+
109
+ action store_row {
110
+ @listener.row(current_row, @current_line)
111
+ }
112
+
113
+ action end_feature {
114
+ if cs < lexer_first_final
115
+ content = current_line_content(data, p)
116
+ raise LexingError.new("Lexing error on line %d: '%s'." % [@line_number, content])
117
+ else
118
+ @listener.eof
119
+ end
120
+ }
121
+
122
+ include lexer_common "lexer_common.<%= @i18n.underscored_iso_code %>.rl";
123
+ }%%
124
+
125
+ def initialize(listener)
126
+ @listener = listener
127
+ %% write data;
128
+ end
129
+
130
+ def scan(data)
131
+ data = (data + "\n%_FEATURE_END_%").unpack("c*") # Explicit EOF simplifies things considerably
132
+ eof = pe = data.length
133
+
134
+ @line_number = 1
135
+ @last_newline = 0
136
+
137
+ %% write init;
138
+ %% write exec;
139
+ end
140
+
141
+ CRLF_RE = /\r\n/
142
+ LF_RE = /[^\r]\n/
143
+ CRLF = "\r\n"
144
+ LF = "\n"
145
+
146
+ def multiline_strip(text)
147
+ crlf_count = text.scan(CRLF_RE).size
148
+ lf_count = text.scan(LF_RE).size
149
+ eol = crlf_count > lf_count ? CRLF : LF
150
+ text.split(/\r?\n/).map{|s| s.strip}.join(eol).strip
151
+ end
152
+
153
+ def unindent(startcol, text)
154
+ text.gsub(/^[\t ]{0,#{startcol}}/, "")
155
+ end
156
+
157
+ def store_keyword_content(event, data, p, eof)
158
+ end_point = (!@next_keyword_start or (p == eof)) ? p : @next_keyword_start
159
+ con = yield utf8_pack(data[@content_start...end_point])
160
+ @listener.send(event, @keyword, con, @current_line)
161
+ end
162
+
163
+ def current_line_content(data, p)
164
+ rest = data[@last_newline..-1]
165
+ utf8_pack(rest[0..rest.index(10)||-1]).strip
166
+ end
167
+
168
+ def utf8_pack(array)
169
+ (RUBY_VERSION =~ /^1\.9/) ? array.pack("c*").force_encoding("UTF-8") : array.pack("c*")
170
+ end
171
+ end
172
+ end
173
+ end
@@ -0,0 +1,50 @@
1
+ %%{
2
+ machine lexer_common;
3
+
4
+ # Language specific
5
+ I18N_Feature = (<%= ragel_list(@i18n.keywords('feature')) %> ':') >start_keyword %end_keyword;
6
+ I18N_Background = (<%= ragel_list(@i18n.keywords('background')) %> ':') >start_keyword %end_keyword;
7
+ I18N_ScenarioOutline = (<%= ragel_list(@i18n.keywords('scenario_outline')) %> ':') >start_keyword %end_keyword;
8
+ I18N_Scenario = (<%= ragel_list(@i18n.keywords('scenario')) %> ':') >start_keyword %end_keyword;
9
+ I18N_Step = <%= ragel_list(@i18n.step_keywords) %> >start_keyword %end_keyword;
10
+ I18N_Examples = (<%= ragel_list(@i18n.keywords('examples')) %> ':') >start_keyword %end_keyword;
11
+
12
+ EOF = '%_FEATURE_END_%'; # Explicit EOF added before scanning begins
13
+ EOL = ('\n' | '\r\n') @inc_line_number @last_newline;
14
+ BOM = 0xEF 0xBB 0xBF; # http://en.wikipedia.org/wiki/Byte_order_mark
15
+
16
+ PIPE = '|';
17
+ ESCAPED_PIPE = '\\|';
18
+
19
+ FeatureHeadingEnd = EOL+ space* (I18N_Background | I18N_Scenario | I18N_ScenarioOutline | '@' | '#' | EOF) >next_keyword_start;
20
+ ScenarioHeadingEnd = EOL+ space* ( I18N_Scenario | I18N_ScenarioOutline | I18N_Step | '@' | '#' | EOF ) >next_keyword_start;
21
+ BackgroundHeadingEnd = EOL+ space* ( I18N_Scenario | I18N_ScenarioOutline | I18N_Step | '@' | '#'| EOF ) >next_keyword_start;
22
+ ScenarioOutlineHeadingEnd = EOL+ space* ( I18N_Scenario | I18N_Step | '@' | '#' | EOF ) >next_keyword_start;
23
+ ExamplesHeadingEnd = EOL+ space* '|' >next_keyword_start;
24
+
25
+ FeatureHeading = space* I18N_Feature %begin_content ^FeatureHeadingEnd* :>> FeatureHeadingEnd @store_feature_content;
26
+ BackgroundHeading = space* I18N_Background %begin_content ^BackgroundHeadingEnd* :>> BackgroundHeadingEnd @store_background_content;
27
+ ScenarioHeading = space* I18N_Scenario %begin_content ^ScenarioHeadingEnd* :>> ScenarioHeadingEnd @store_scenario_content;
28
+ ScenarioOutlineHeading = space* I18N_ScenarioOutline %begin_content ^ScenarioOutlineHeadingEnd* :>> ScenarioOutlineHeadingEnd @store_scenario_outline_content;
29
+ ExamplesHeading = space* I18N_Examples %begin_content ^ExamplesHeadingEnd* :>> ExamplesHeadingEnd @store_examples_content;
30
+
31
+ Step = space* I18N_Step %begin_content ^EOL+ %store_step_content :> EOL+;
32
+ Comment = space* '#' >begin_content ^EOL* %store_comment_content :> EOL+;
33
+
34
+ Tag = ( ('@' [^@\r\n\t ]+) >begin_content ) %store_tag_content;
35
+ Tags = space* (Tag space*)+ EOL+;
36
+
37
+ StartRow = space* PIPE >start_row;
38
+ EndRow = EOL space* ^PIPE >next_keyword_start;
39
+ Cell = PIPE (ESCAPED_PIPE | (any - PIPE)+ )* >begin_cell_content %store_cell_content;
40
+ RowBody = space* Cell** PIPE :>> (space* EOL+ space*) %store_row;
41
+ Row = StartRow :>> RowBody <: EndRow?;
42
+
43
+ StartPyString = '"""' >start_pystring space* :>> EOL;
44
+ EndPyString = (space* '"""') >next_keyword_start;
45
+ PyString = space* StartPyString %begin_pystring_content (^EOL | EOL)* :>> EndPyString %store_pystring_content space* EOL+;
46
+
47
+ Tokens = BOM? (space | EOL)* (Tags | Comment | FeatureHeading | BackgroundHeading | ScenarioHeading | ScenarioOutlineHeading | ExamplesHeading | Step | Row | PyString)* (space | EOL)* EOF;
48
+
49
+ main := Tokens %end_feature @!end_feature;
50
+ }%%
@@ -0,0 +1,21 @@
1
+ #encoding: utf-8
2
+ unless defined?(JRUBY_VERSION)
3
+ require File.expand_path(File.dirname(__FILE__) + '/../spec_helper')
4
+ require 'gherkin_lexer_en'
5
+
6
+ module Gherkin
7
+ module Lexer
8
+ describe "C Lexer" do
9
+ before do
10
+ @listener = Gherkin::SexpRecorder.new
11
+ @lexer = Gherkin::CLexer::En.new(@listener)
12
+ end
13
+
14
+ it_should_behave_like "a Gherkin lexer"
15
+ it_should_behave_like "a Gherkin lexer lexing tags"
16
+ it_should_behave_like "a Gherkin lexer lexing py_strings"
17
+ it_should_behave_like "a Gherkin lexer lexing rows"
18
+ end
19
+ end
20
+ end
21
+ end