gherkin 2.11.2 → 2.11.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (59) hide show
  1. data/.travis.yml +1 -0
  2. data/History.md +9 -0
  3. data/README.md +2 -1
  4. data/ext/gherkin_lexer_ar/gherkin_lexer_ar.c +8 -8
  5. data/ext/gherkin_lexer_bg/gherkin_lexer_bg.c +8 -8
  6. data/ext/gherkin_lexer_bm/gherkin_lexer_bm.c +8 -8
  7. data/ext/gherkin_lexer_ca/gherkin_lexer_ca.c +8 -8
  8. data/ext/gherkin_lexer_cs/gherkin_lexer_cs.c +8 -8
  9. data/ext/gherkin_lexer_cy_gb/gherkin_lexer_cy_gb.c +8 -8
  10. data/ext/gherkin_lexer_da/gherkin_lexer_da.c +8 -8
  11. data/ext/gherkin_lexer_de/gherkin_lexer_de.c +8 -8
  12. data/ext/gherkin_lexer_en/gherkin_lexer_en.c +8 -8
  13. data/ext/gherkin_lexer_en_au/gherkin_lexer_en_au.c +915 -373
  14. data/ext/gherkin_lexer_en_lol/gherkin_lexer_en_lol.c +8 -8
  15. data/ext/gherkin_lexer_en_pirate/gherkin_lexer_en_pirate.c +8 -8
  16. data/ext/gherkin_lexer_en_scouse/gherkin_lexer_en_scouse.c +8 -8
  17. data/ext/gherkin_lexer_en_tx/gherkin_lexer_en_tx.c +8 -8
  18. data/ext/gherkin_lexer_eo/gherkin_lexer_eo.c +8 -8
  19. data/ext/gherkin_lexer_es/gherkin_lexer_es.c +8 -8
  20. data/ext/gherkin_lexer_et/gherkin_lexer_et.c +8 -8
  21. data/ext/gherkin_lexer_fi/gherkin_lexer_fi.c +8 -8
  22. data/ext/gherkin_lexer_fr/gherkin_lexer_fr.c +8 -8
  23. data/ext/gherkin_lexer_he/gherkin_lexer_he.c +8 -8
  24. data/ext/gherkin_lexer_hr/gherkin_lexer_hr.c +8 -8
  25. data/ext/gherkin_lexer_hu/gherkin_lexer_hu.c +8 -8
  26. data/ext/gherkin_lexer_id/gherkin_lexer_id.c +8 -8
  27. data/ext/gherkin_lexer_is/gherkin_lexer_is.c +8 -8
  28. data/ext/gherkin_lexer_it/gherkin_lexer_it.c +8 -8
  29. data/ext/gherkin_lexer_ja/gherkin_lexer_ja.c +8 -8
  30. data/ext/gherkin_lexer_ko/gherkin_lexer_ko.c +8 -8
  31. data/ext/gherkin_lexer_lt/gherkin_lexer_lt.c +8 -8
  32. data/ext/gherkin_lexer_lu/gherkin_lexer_lu.c +8 -8
  33. data/ext/gherkin_lexer_lv/gherkin_lexer_lv.c +8 -8
  34. data/ext/gherkin_lexer_nl/gherkin_lexer_nl.c +8 -8
  35. data/ext/gherkin_lexer_no/gherkin_lexer_no.c +8 -8
  36. data/ext/gherkin_lexer_pl/gherkin_lexer_pl.c +8 -8
  37. data/ext/gherkin_lexer_pt/gherkin_lexer_pt.c +8 -8
  38. data/ext/gherkin_lexer_ro/gherkin_lexer_ro.c +8 -8
  39. data/ext/gherkin_lexer_ru/gherkin_lexer_ru.c +8 -8
  40. data/ext/gherkin_lexer_sk/gherkin_lexer_sk.c +8 -8
  41. data/ext/gherkin_lexer_sr_cyrl/gherkin_lexer_sr_cyrl.c +8 -8
  42. data/ext/gherkin_lexer_sr_latn/gherkin_lexer_sr_latn.c +8 -8
  43. data/ext/gherkin_lexer_sv/gherkin_lexer_sv.c +8 -8
  44. data/ext/gherkin_lexer_tr/gherkin_lexer_tr.c +8 -8
  45. data/ext/gherkin_lexer_uk/gherkin_lexer_uk.c +8 -8
  46. data/ext/gherkin_lexer_uz/gherkin_lexer_uz.c +8 -8
  47. data/ext/gherkin_lexer_vi/gherkin_lexer_vi.c +8 -8
  48. data/ext/gherkin_lexer_zh_cn/gherkin_lexer_zh_cn.c +8 -8
  49. data/ext/gherkin_lexer_zh_tw/gherkin_lexer_zh_tw.c +8 -8
  50. data/gherkin.gemspec +4 -4
  51. data/lib/gherkin/i18n.json +10 -10
  52. data/lib/gherkin/i18n.rb +1 -1
  53. data/ragel/lexer.c.rl.erb +5 -5
  54. data/tasks/apidoc.rake +32 -0
  55. data/tasks/ikvm.rake +1 -1
  56. data/tasks/ragel_task.rb +1 -0
  57. data/tasks/release.rake +1 -19
  58. metadata +10 -10
  59. data/tasks/yard.rake +0 -7
@@ -1,3 +1,4 @@
1
+ before_install: gem install bundler -v 1.2.1
1
2
  rvm:
2
3
  - 1.9.3
3
4
  - 1.9.2
data/History.md CHANGED
@@ -1,3 +1,12 @@
1
+ ## [2.11.3](https://github.com/cucumber/gherkin/compare/v2.11.2...v2.11.3)
2
+
3
+ * [.NET] Upgraded IKVM from 0.46.0.1 to 7.1.4532.2 - quite a version bump! (Aslak Hellesøy)
4
+ * [JavaScript] Added a README to prevent npm warnings. (Aslak Hellesøy)
5
+ * [Ruby] Don't use C++ style comments. ([#191](https://github.com/cucumber/gherkin/pull/191) Sam Goldman)
6
+ * [Core] Fix for Australian language support ([#196](https://github.com/cucumber/gherkin/pull/196) hogfish)
7
+ * [Ruby] Add encoding option to IO.read ([#190](https://github.com/cucumber/gherkin/pull/190), [#192](https://github.com/cucumber/gherkin/issues/192) [#194](https://github.com/cucumber/gherkin/pull/194) HUANG Wei, Levin Alexander)
8
+ * [JavaScript] Can't run on IE because of `const` keyword ([#186](https://github.com/cucumber/gherkin/issues/186) Aslak Hellesøy)
9
+
1
10
  ## [2.11.2](https://github.com/cucumber/gherkin/compare/v2.11.1...v2.11.2)
2
11
 
3
12
  * [Java] Depend on an external gherkin-jvm-deps jar with repackaged dependencies (Aslak Hellesøy, Rex Hoffman)
data/README.md CHANGED
@@ -54,7 +54,7 @@ The jar file is in the central Maven repo.
54
54
  <dependency>
55
55
  <groupId>info.cukes</groupId>
56
56
  <artifactId>gherkin</artifactId>
57
- <version>2.11.2</version>
57
+ <version>2.11.3</version>
58
58
  </dependency>
59
59
 
60
60
  You can get it manually from [Maven Central](http://search.maven.org/#browse%7C-2073395818)
@@ -251,6 +251,7 @@ Now we can release:
251
251
  * gherkin.gemspec
252
252
  * java/pom.xml
253
253
  * js/package.json
254
+ * History.md
254
255
  * Run `bundle update`, so Gemfile.lock gets updated with the changes.
255
256
  * Commit changes, otherwise you will get an error at the end when a tag is made.
256
257
  * Run `bundle exec rake gems:prepare && ./build_native_gems.sh && bundle exec rake release:ALL`
@@ -870,7 +870,7 @@ static VALUE
870
870
  unindent(VALUE con, int start_col)
871
871
  {
872
872
  VALUE re;
873
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
873
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
874
874
  char pat[32];
875
875
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
876
876
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1301,14 +1301,14 @@ _match:
1301
1301
  buff = data;
1302
1302
  }
1303
1303
 
1304
- // Allocate as a ruby string so that it gets cleaned up by GC
1304
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1305
1305
  newstr_val = rb_str_new(buff, len);
1306
1306
  newstr = RSTRING_PTR(newstr_val);
1307
1307
 
1308
1308
 
1309
1309
  for (count = 0; count < len; count++) {
1310
1310
  if(buff[count] == 10) {
1311
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1311
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1312
1312
  break;
1313
1313
  } else {
1314
1314
  if (buff[count] == '%') {
@@ -1322,7 +1322,7 @@ _match:
1322
1322
  }
1323
1323
 
1324
1324
  line = lexer->line_number;
1325
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1325
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1326
1326
  raise_lexer_error(newstr, line);
1327
1327
  } else {
1328
1328
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1364,14 +1364,14 @@ _again:
1364
1364
  buff = data;
1365
1365
  }
1366
1366
 
1367
- // Allocate as a ruby string so that it gets cleaned up by GC
1367
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1368
1368
  newstr_val = rb_str_new(buff, len);
1369
1369
  newstr = RSTRING_PTR(newstr_val);
1370
1370
 
1371
1371
 
1372
1372
  for (count = 0; count < len; count++) {
1373
1373
  if(buff[count] == 10) {
1374
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1374
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1375
1375
  break;
1376
1376
  } else {
1377
1377
  if (buff[count] == '%') {
@@ -1385,7 +1385,7 @@ _again:
1385
1385
  }
1386
1386
 
1387
1387
  line = lexer->line_number;
1388
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1388
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1389
1389
  raise_lexer_error(newstr, line);
1390
1390
  } else {
1391
1391
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1406,7 +1406,7 @@ _again:
1406
1406
  assert(lexer->content_start <= len && "content starts after data end");
1407
1407
  assert(lexer->mark < len && "mark is after data end");
1408
1408
 
1409
- // Reset lexer by re-initializing the whole thing
1409
+ /* Reset lexer by re-initializing the whole thing */
1410
1410
  lexer_init(lexer);
1411
1411
 
1412
1412
  if (cs == lexer_error) {
@@ -1093,7 +1093,7 @@ static VALUE
1093
1093
  unindent(VALUE con, int start_col)
1094
1094
  {
1095
1095
  VALUE re;
1096
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
1096
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
1097
1097
  char pat[32];
1098
1098
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
1099
1099
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1524,14 +1524,14 @@ _match:
1524
1524
  buff = data;
1525
1525
  }
1526
1526
 
1527
- // Allocate as a ruby string so that it gets cleaned up by GC
1527
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1528
1528
  newstr_val = rb_str_new(buff, len);
1529
1529
  newstr = RSTRING_PTR(newstr_val);
1530
1530
 
1531
1531
 
1532
1532
  for (count = 0; count < len; count++) {
1533
1533
  if(buff[count] == 10) {
1534
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1534
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1535
1535
  break;
1536
1536
  } else {
1537
1537
  if (buff[count] == '%') {
@@ -1545,7 +1545,7 @@ _match:
1545
1545
  }
1546
1546
 
1547
1547
  line = lexer->line_number;
1548
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1548
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1549
1549
  raise_lexer_error(newstr, line);
1550
1550
  } else {
1551
1551
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1587,14 +1587,14 @@ _again:
1587
1587
  buff = data;
1588
1588
  }
1589
1589
 
1590
- // Allocate as a ruby string so that it gets cleaned up by GC
1590
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1591
1591
  newstr_val = rb_str_new(buff, len);
1592
1592
  newstr = RSTRING_PTR(newstr_val);
1593
1593
 
1594
1594
 
1595
1595
  for (count = 0; count < len; count++) {
1596
1596
  if(buff[count] == 10) {
1597
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1597
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1598
1598
  break;
1599
1599
  } else {
1600
1600
  if (buff[count] == '%') {
@@ -1608,7 +1608,7 @@ _again:
1608
1608
  }
1609
1609
 
1610
1610
  line = lexer->line_number;
1611
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1611
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1612
1612
  raise_lexer_error(newstr, line);
1613
1613
  } else {
1614
1614
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1629,7 +1629,7 @@ _again:
1629
1629
  assert(lexer->content_start <= len && "content starts after data end");
1630
1630
  assert(lexer->mark < len && "mark is after data end");
1631
1631
 
1632
- // Reset lexer by re-initializing the whole thing
1632
+ /* Reset lexer by re-initializing the whole thing */
1633
1633
  lexer_init(lexer);
1634
1634
 
1635
1635
  if (cs == lexer_error) {
@@ -814,7 +814,7 @@ static VALUE
814
814
  unindent(VALUE con, int start_col)
815
815
  {
816
816
  VALUE re;
817
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
817
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
818
818
  char pat[32];
819
819
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
820
820
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1245,14 +1245,14 @@ _match:
1245
1245
  buff = data;
1246
1246
  }
1247
1247
 
1248
- // Allocate as a ruby string so that it gets cleaned up by GC
1248
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1249
1249
  newstr_val = rb_str_new(buff, len);
1250
1250
  newstr = RSTRING_PTR(newstr_val);
1251
1251
 
1252
1252
 
1253
1253
  for (count = 0; count < len; count++) {
1254
1254
  if(buff[count] == 10) {
1255
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1255
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1256
1256
  break;
1257
1257
  } else {
1258
1258
  if (buff[count] == '%') {
@@ -1266,7 +1266,7 @@ _match:
1266
1266
  }
1267
1267
 
1268
1268
  line = lexer->line_number;
1269
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1269
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1270
1270
  raise_lexer_error(newstr, line);
1271
1271
  } else {
1272
1272
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1308,14 +1308,14 @@ _again:
1308
1308
  buff = data;
1309
1309
  }
1310
1310
 
1311
- // Allocate as a ruby string so that it gets cleaned up by GC
1311
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1312
1312
  newstr_val = rb_str_new(buff, len);
1313
1313
  newstr = RSTRING_PTR(newstr_val);
1314
1314
 
1315
1315
 
1316
1316
  for (count = 0; count < len; count++) {
1317
1317
  if(buff[count] == 10) {
1318
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1318
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1319
1319
  break;
1320
1320
  } else {
1321
1321
  if (buff[count] == '%') {
@@ -1329,7 +1329,7 @@ _again:
1329
1329
  }
1330
1330
 
1331
1331
  line = lexer->line_number;
1332
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1332
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1333
1333
  raise_lexer_error(newstr, line);
1334
1334
  } else {
1335
1335
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1350,7 +1350,7 @@ _again:
1350
1350
  assert(lexer->content_start <= len && "content starts after data end");
1351
1351
  assert(lexer->mark < len && "mark is after data end");
1352
1352
 
1353
- // Reset lexer by re-initializing the whole thing
1353
+ /* Reset lexer by re-initializing the whole thing */
1354
1354
  lexer_init(lexer);
1355
1355
 
1356
1356
  if (cs == lexer_error) {
@@ -1019,7 +1019,7 @@ static VALUE
1019
1019
  unindent(VALUE con, int start_col)
1020
1020
  {
1021
1021
  VALUE re;
1022
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
1022
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
1023
1023
  char pat[32];
1024
1024
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
1025
1025
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1450,14 +1450,14 @@ _match:
1450
1450
  buff = data;
1451
1451
  }
1452
1452
 
1453
- // Allocate as a ruby string so that it gets cleaned up by GC
1453
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1454
1454
  newstr_val = rb_str_new(buff, len);
1455
1455
  newstr = RSTRING_PTR(newstr_val);
1456
1456
 
1457
1457
 
1458
1458
  for (count = 0; count < len; count++) {
1459
1459
  if(buff[count] == 10) {
1460
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1460
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1461
1461
  break;
1462
1462
  } else {
1463
1463
  if (buff[count] == '%') {
@@ -1471,7 +1471,7 @@ _match:
1471
1471
  }
1472
1472
 
1473
1473
  line = lexer->line_number;
1474
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1474
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1475
1475
  raise_lexer_error(newstr, line);
1476
1476
  } else {
1477
1477
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1513,14 +1513,14 @@ _again:
1513
1513
  buff = data;
1514
1514
  }
1515
1515
 
1516
- // Allocate as a ruby string so that it gets cleaned up by GC
1516
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1517
1517
  newstr_val = rb_str_new(buff, len);
1518
1518
  newstr = RSTRING_PTR(newstr_val);
1519
1519
 
1520
1520
 
1521
1521
  for (count = 0; count < len; count++) {
1522
1522
  if(buff[count] == 10) {
1523
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1523
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1524
1524
  break;
1525
1525
  } else {
1526
1526
  if (buff[count] == '%') {
@@ -1534,7 +1534,7 @@ _again:
1534
1534
  }
1535
1535
 
1536
1536
  line = lexer->line_number;
1537
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1537
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1538
1538
  raise_lexer_error(newstr, line);
1539
1539
  } else {
1540
1540
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1555,7 +1555,7 @@ _again:
1555
1555
  assert(lexer->content_start <= len && "content starts after data end");
1556
1556
  assert(lexer->mark < len && "mark is after data end");
1557
1557
 
1558
- // Reset lexer by re-initializing the whole thing
1558
+ /* Reset lexer by re-initializing the whole thing */
1559
1559
  lexer_init(lexer);
1560
1560
 
1561
1561
  if (cs == lexer_error) {
@@ -865,7 +865,7 @@ static VALUE
865
865
  unindent(VALUE con, int start_col)
866
866
  {
867
867
  VALUE re;
868
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
868
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
869
869
  char pat[32];
870
870
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
871
871
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1296,14 +1296,14 @@ _match:
1296
1296
  buff = data;
1297
1297
  }
1298
1298
 
1299
- // Allocate as a ruby string so that it gets cleaned up by GC
1299
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1300
1300
  newstr_val = rb_str_new(buff, len);
1301
1301
  newstr = RSTRING_PTR(newstr_val);
1302
1302
 
1303
1303
 
1304
1304
  for (count = 0; count < len; count++) {
1305
1305
  if(buff[count] == 10) {
1306
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1306
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1307
1307
  break;
1308
1308
  } else {
1309
1309
  if (buff[count] == '%') {
@@ -1317,7 +1317,7 @@ _match:
1317
1317
  }
1318
1318
 
1319
1319
  line = lexer->line_number;
1320
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1320
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1321
1321
  raise_lexer_error(newstr, line);
1322
1322
  } else {
1323
1323
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1359,14 +1359,14 @@ _again:
1359
1359
  buff = data;
1360
1360
  }
1361
1361
 
1362
- // Allocate as a ruby string so that it gets cleaned up by GC
1362
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1363
1363
  newstr_val = rb_str_new(buff, len);
1364
1364
  newstr = RSTRING_PTR(newstr_val);
1365
1365
 
1366
1366
 
1367
1367
  for (count = 0; count < len; count++) {
1368
1368
  if(buff[count] == 10) {
1369
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1369
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1370
1370
  break;
1371
1371
  } else {
1372
1372
  if (buff[count] == '%') {
@@ -1380,7 +1380,7 @@ _again:
1380
1380
  }
1381
1381
 
1382
1382
  line = lexer->line_number;
1383
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1383
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1384
1384
  raise_lexer_error(newstr, line);
1385
1385
  } else {
1386
1386
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1401,7 +1401,7 @@ _again:
1401
1401
  assert(lexer->content_start <= len && "content starts after data end");
1402
1402
  assert(lexer->mark < len && "mark is after data end");
1403
1403
 
1404
- // Reset lexer by re-initializing the whole thing
1404
+ /* Reset lexer by re-initializing the whole thing */
1405
1405
  lexer_init(lexer);
1406
1406
 
1407
1407
  if (cs == lexer_error) {
@@ -720,7 +720,7 @@ static VALUE
720
720
  unindent(VALUE con, int start_col)
721
721
  {
722
722
  VALUE re;
723
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
723
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
724
724
  char pat[32];
725
725
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
726
726
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1151,14 +1151,14 @@ _match:
1151
1151
  buff = data;
1152
1152
  }
1153
1153
 
1154
- // Allocate as a ruby string so that it gets cleaned up by GC
1154
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1155
1155
  newstr_val = rb_str_new(buff, len);
1156
1156
  newstr = RSTRING_PTR(newstr_val);
1157
1157
 
1158
1158
 
1159
1159
  for (count = 0; count < len; count++) {
1160
1160
  if(buff[count] == 10) {
1161
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1161
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1162
1162
  break;
1163
1163
  } else {
1164
1164
  if (buff[count] == '%') {
@@ -1172,7 +1172,7 @@ _match:
1172
1172
  }
1173
1173
 
1174
1174
  line = lexer->line_number;
1175
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1175
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1176
1176
  raise_lexer_error(newstr, line);
1177
1177
  } else {
1178
1178
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1214,14 +1214,14 @@ _again:
1214
1214
  buff = data;
1215
1215
  }
1216
1216
 
1217
- // Allocate as a ruby string so that it gets cleaned up by GC
1217
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1218
1218
  newstr_val = rb_str_new(buff, len);
1219
1219
  newstr = RSTRING_PTR(newstr_val);
1220
1220
 
1221
1221
 
1222
1222
  for (count = 0; count < len; count++) {
1223
1223
  if(buff[count] == 10) {
1224
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1224
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1225
1225
  break;
1226
1226
  } else {
1227
1227
  if (buff[count] == '%') {
@@ -1235,7 +1235,7 @@ _again:
1235
1235
  }
1236
1236
 
1237
1237
  line = lexer->line_number;
1238
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1238
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1239
1239
  raise_lexer_error(newstr, line);
1240
1240
  } else {
1241
1241
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1256,7 +1256,7 @@ _again:
1256
1256
  assert(lexer->content_start <= len && "content starts after data end");
1257
1257
  assert(lexer->mark < len && "mark is after data end");
1258
1258
 
1259
- // Reset lexer by re-initializing the whole thing
1259
+ /* Reset lexer by re-initializing the whole thing */
1260
1260
  lexer_init(lexer);
1261
1261
 
1262
1262
  if (cs == lexer_error) {
@@ -736,7 +736,7 @@ static VALUE
736
736
  unindent(VALUE con, int start_col)
737
737
  {
738
738
  VALUE re;
739
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
739
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
740
740
  char pat[32];
741
741
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
742
742
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1167,14 +1167,14 @@ _match:
1167
1167
  buff = data;
1168
1168
  }
1169
1169
 
1170
- // Allocate as a ruby string so that it gets cleaned up by GC
1170
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1171
1171
  newstr_val = rb_str_new(buff, len);
1172
1172
  newstr = RSTRING_PTR(newstr_val);
1173
1173
 
1174
1174
 
1175
1175
  for (count = 0; count < len; count++) {
1176
1176
  if(buff[count] == 10) {
1177
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1177
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1178
1178
  break;
1179
1179
  } else {
1180
1180
  if (buff[count] == '%') {
@@ -1188,7 +1188,7 @@ _match:
1188
1188
  }
1189
1189
 
1190
1190
  line = lexer->line_number;
1191
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1191
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1192
1192
  raise_lexer_error(newstr, line);
1193
1193
  } else {
1194
1194
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1230,14 +1230,14 @@ _again:
1230
1230
  buff = data;
1231
1231
  }
1232
1232
 
1233
- // Allocate as a ruby string so that it gets cleaned up by GC
1233
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1234
1234
  newstr_val = rb_str_new(buff, len);
1235
1235
  newstr = RSTRING_PTR(newstr_val);
1236
1236
 
1237
1237
 
1238
1238
  for (count = 0; count < len; count++) {
1239
1239
  if(buff[count] == 10) {
1240
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1240
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1241
1241
  break;
1242
1242
  } else {
1243
1243
  if (buff[count] == '%') {
@@ -1251,7 +1251,7 @@ _again:
1251
1251
  }
1252
1252
 
1253
1253
  line = lexer->line_number;
1254
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1254
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1255
1255
  raise_lexer_error(newstr, line);
1256
1256
  } else {
1257
1257
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1272,7 +1272,7 @@ _again:
1272
1272
  assert(lexer->content_start <= len && "content starts after data end");
1273
1273
  assert(lexer->mark < len && "mark is after data end");
1274
1274
 
1275
- // Reset lexer by re-initializing the whole thing
1275
+ /* Reset lexer by re-initializing the whole thing */
1276
1276
  lexer_init(lexer);
1277
1277
 
1278
1278
  if (cs == lexer_error) {