gherkin 2.11.2 → 2.11.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. data/.travis.yml +1 -0
  2. data/History.md +9 -0
  3. data/README.md +2 -1
  4. data/ext/gherkin_lexer_ar/gherkin_lexer_ar.c +8 -8
  5. data/ext/gherkin_lexer_bg/gherkin_lexer_bg.c +8 -8
  6. data/ext/gherkin_lexer_bm/gherkin_lexer_bm.c +8 -8
  7. data/ext/gherkin_lexer_ca/gherkin_lexer_ca.c +8 -8
  8. data/ext/gherkin_lexer_cs/gherkin_lexer_cs.c +8 -8
  9. data/ext/gherkin_lexer_cy_gb/gherkin_lexer_cy_gb.c +8 -8
  10. data/ext/gherkin_lexer_da/gherkin_lexer_da.c +8 -8
  11. data/ext/gherkin_lexer_de/gherkin_lexer_de.c +8 -8
  12. data/ext/gherkin_lexer_en/gherkin_lexer_en.c +8 -8
  13. data/ext/gherkin_lexer_en_au/gherkin_lexer_en_au.c +915 -373
  14. data/ext/gherkin_lexer_en_lol/gherkin_lexer_en_lol.c +8 -8
  15. data/ext/gherkin_lexer_en_pirate/gherkin_lexer_en_pirate.c +8 -8
  16. data/ext/gherkin_lexer_en_scouse/gherkin_lexer_en_scouse.c +8 -8
  17. data/ext/gherkin_lexer_en_tx/gherkin_lexer_en_tx.c +8 -8
  18. data/ext/gherkin_lexer_eo/gherkin_lexer_eo.c +8 -8
  19. data/ext/gherkin_lexer_es/gherkin_lexer_es.c +8 -8
  20. data/ext/gherkin_lexer_et/gherkin_lexer_et.c +8 -8
  21. data/ext/gherkin_lexer_fi/gherkin_lexer_fi.c +8 -8
  22. data/ext/gherkin_lexer_fr/gherkin_lexer_fr.c +8 -8
  23. data/ext/gherkin_lexer_he/gherkin_lexer_he.c +8 -8
  24. data/ext/gherkin_lexer_hr/gherkin_lexer_hr.c +8 -8
  25. data/ext/gherkin_lexer_hu/gherkin_lexer_hu.c +8 -8
  26. data/ext/gherkin_lexer_id/gherkin_lexer_id.c +8 -8
  27. data/ext/gherkin_lexer_is/gherkin_lexer_is.c +8 -8
  28. data/ext/gherkin_lexer_it/gherkin_lexer_it.c +8 -8
  29. data/ext/gherkin_lexer_ja/gherkin_lexer_ja.c +8 -8
  30. data/ext/gherkin_lexer_ko/gherkin_lexer_ko.c +8 -8
  31. data/ext/gherkin_lexer_lt/gherkin_lexer_lt.c +8 -8
  32. data/ext/gherkin_lexer_lu/gherkin_lexer_lu.c +8 -8
  33. data/ext/gherkin_lexer_lv/gherkin_lexer_lv.c +8 -8
  34. data/ext/gherkin_lexer_nl/gherkin_lexer_nl.c +8 -8
  35. data/ext/gherkin_lexer_no/gherkin_lexer_no.c +8 -8
  36. data/ext/gherkin_lexer_pl/gherkin_lexer_pl.c +8 -8
  37. data/ext/gherkin_lexer_pt/gherkin_lexer_pt.c +8 -8
  38. data/ext/gherkin_lexer_ro/gherkin_lexer_ro.c +8 -8
  39. data/ext/gherkin_lexer_ru/gherkin_lexer_ru.c +8 -8
  40. data/ext/gherkin_lexer_sk/gherkin_lexer_sk.c +8 -8
  41. data/ext/gherkin_lexer_sr_cyrl/gherkin_lexer_sr_cyrl.c +8 -8
  42. data/ext/gherkin_lexer_sr_latn/gherkin_lexer_sr_latn.c +8 -8
  43. data/ext/gherkin_lexer_sv/gherkin_lexer_sv.c +8 -8
  44. data/ext/gherkin_lexer_tr/gherkin_lexer_tr.c +8 -8
  45. data/ext/gherkin_lexer_uk/gherkin_lexer_uk.c +8 -8
  46. data/ext/gherkin_lexer_uz/gherkin_lexer_uz.c +8 -8
  47. data/ext/gherkin_lexer_vi/gherkin_lexer_vi.c +8 -8
  48. data/ext/gherkin_lexer_zh_cn/gherkin_lexer_zh_cn.c +8 -8
  49. data/ext/gherkin_lexer_zh_tw/gherkin_lexer_zh_tw.c +8 -8
  50. data/gherkin.gemspec +4 -4
  51. data/lib/gherkin/i18n.json +10 -10
  52. data/lib/gherkin/i18n.rb +1 -1
  53. data/ragel/lexer.c.rl.erb +5 -5
  54. data/tasks/apidoc.rake +32 -0
  55. data/tasks/ikvm.rake +1 -1
  56. data/tasks/ragel_task.rb +1 -0
  57. data/tasks/release.rake +1 -19
  58. metadata +10 -10
  59. data/tasks/yard.rake +0 -7
@@ -1005,7 +1005,7 @@ static VALUE
1005
1005
  unindent(VALUE con, int start_col)
1006
1006
  {
1007
1007
  VALUE re;
1008
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
1008
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
1009
1009
  char pat[32];
1010
1010
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
1011
1011
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1436,14 +1436,14 @@ _match:
1436
1436
  buff = data;
1437
1437
  }
1438
1438
 
1439
- // Allocate as a ruby string so that it gets cleaned up by GC
1439
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1440
1440
  newstr_val = rb_str_new(buff, len);
1441
1441
  newstr = RSTRING_PTR(newstr_val);
1442
1442
 
1443
1443
 
1444
1444
  for (count = 0; count < len; count++) {
1445
1445
  if(buff[count] == 10) {
1446
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1446
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1447
1447
  break;
1448
1448
  } else {
1449
1449
  if (buff[count] == '%') {
@@ -1457,7 +1457,7 @@ _match:
1457
1457
  }
1458
1458
 
1459
1459
  line = lexer->line_number;
1460
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1460
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1461
1461
  raise_lexer_error(newstr, line);
1462
1462
  } else {
1463
1463
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1499,14 +1499,14 @@ _again:
1499
1499
  buff = data;
1500
1500
  }
1501
1501
 
1502
- // Allocate as a ruby string so that it gets cleaned up by GC
1502
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1503
1503
  newstr_val = rb_str_new(buff, len);
1504
1504
  newstr = RSTRING_PTR(newstr_val);
1505
1505
 
1506
1506
 
1507
1507
  for (count = 0; count < len; count++) {
1508
1508
  if(buff[count] == 10) {
1509
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1509
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1510
1510
  break;
1511
1511
  } else {
1512
1512
  if (buff[count] == '%') {
@@ -1520,7 +1520,7 @@ _again:
1520
1520
  }
1521
1521
 
1522
1522
  line = lexer->line_number;
1523
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1523
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1524
1524
  raise_lexer_error(newstr, line);
1525
1525
  } else {
1526
1526
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1541,7 +1541,7 @@ _again:
1541
1541
  assert(lexer->content_start <= len && "content starts after data end");
1542
1542
  assert(lexer->mark < len && "mark is after data end");
1543
1543
 
1544
- // Reset lexer by re-initializing the whole thing
1544
+ /* Reset lexer by re-initializing the whole thing */
1545
1545
  lexer_init(lexer);
1546
1546
 
1547
1547
  if (cs == lexer_error) {
@@ -762,7 +762,7 @@ static VALUE
762
762
  unindent(VALUE con, int start_col)
763
763
  {
764
764
  VALUE re;
765
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
765
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
766
766
  char pat[32];
767
767
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
768
768
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1193,14 +1193,14 @@ _match:
1193
1193
  buff = data;
1194
1194
  }
1195
1195
 
1196
- // Allocate as a ruby string so that it gets cleaned up by GC
1196
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1197
1197
  newstr_val = rb_str_new(buff, len);
1198
1198
  newstr = RSTRING_PTR(newstr_val);
1199
1199
 
1200
1200
 
1201
1201
  for (count = 0; count < len; count++) {
1202
1202
  if(buff[count] == 10) {
1203
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1203
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1204
1204
  break;
1205
1205
  } else {
1206
1206
  if (buff[count] == '%') {
@@ -1214,7 +1214,7 @@ _match:
1214
1214
  }
1215
1215
 
1216
1216
  line = lexer->line_number;
1217
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1217
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1218
1218
  raise_lexer_error(newstr, line);
1219
1219
  } else {
1220
1220
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1256,14 +1256,14 @@ _again:
1256
1256
  buff = data;
1257
1257
  }
1258
1258
 
1259
- // Allocate as a ruby string so that it gets cleaned up by GC
1259
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1260
1260
  newstr_val = rb_str_new(buff, len);
1261
1261
  newstr = RSTRING_PTR(newstr_val);
1262
1262
 
1263
1263
 
1264
1264
  for (count = 0; count < len; count++) {
1265
1265
  if(buff[count] == 10) {
1266
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1266
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1267
1267
  break;
1268
1268
  } else {
1269
1269
  if (buff[count] == '%') {
@@ -1277,7 +1277,7 @@ _again:
1277
1277
  }
1278
1278
 
1279
1279
  line = lexer->line_number;
1280
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1280
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1281
1281
  raise_lexer_error(newstr, line);
1282
1282
  } else {
1283
1283
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1298,7 +1298,7 @@ _again:
1298
1298
  assert(lexer->content_start <= len && "content starts after data end");
1299
1299
  assert(lexer->mark < len && "mark is after data end");
1300
1300
 
1301
- // Reset lexer by re-initializing the whole thing
1301
+ /* Reset lexer by re-initializing the whole thing */
1302
1302
  lexer_init(lexer);
1303
1303
 
1304
1304
  if (cs == lexer_error) {
@@ -785,7 +785,7 @@ static VALUE
785
785
  unindent(VALUE con, int start_col)
786
786
  {
787
787
  VALUE re;
788
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
788
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
789
789
  char pat[32];
790
790
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
791
791
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1216,14 +1216,14 @@ _match:
1216
1216
  buff = data;
1217
1217
  }
1218
1218
 
1219
- // Allocate as a ruby string so that it gets cleaned up by GC
1219
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1220
1220
  newstr_val = rb_str_new(buff, len);
1221
1221
  newstr = RSTRING_PTR(newstr_val);
1222
1222
 
1223
1223
 
1224
1224
  for (count = 0; count < len; count++) {
1225
1225
  if(buff[count] == 10) {
1226
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1226
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1227
1227
  break;
1228
1228
  } else {
1229
1229
  if (buff[count] == '%') {
@@ -1237,7 +1237,7 @@ _match:
1237
1237
  }
1238
1238
 
1239
1239
  line = lexer->line_number;
1240
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1240
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1241
1241
  raise_lexer_error(newstr, line);
1242
1242
  } else {
1243
1243
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1279,14 +1279,14 @@ _again:
1279
1279
  buff = data;
1280
1280
  }
1281
1281
 
1282
- // Allocate as a ruby string so that it gets cleaned up by GC
1282
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1283
1283
  newstr_val = rb_str_new(buff, len);
1284
1284
  newstr = RSTRING_PTR(newstr_val);
1285
1285
 
1286
1286
 
1287
1287
  for (count = 0; count < len; count++) {
1288
1288
  if(buff[count] == 10) {
1289
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1289
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1290
1290
  break;
1291
1291
  } else {
1292
1292
  if (buff[count] == '%') {
@@ -1300,7 +1300,7 @@ _again:
1300
1300
  }
1301
1301
 
1302
1302
  line = lexer->line_number;
1303
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1303
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1304
1304
  raise_lexer_error(newstr, line);
1305
1305
  } else {
1306
1306
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1321,7 +1321,7 @@ _again:
1321
1321
  assert(lexer->content_start <= len && "content starts after data end");
1322
1322
  assert(lexer->mark < len && "mark is after data end");
1323
1323
 
1324
- // Reset lexer by re-initializing the whole thing
1324
+ /* Reset lexer by re-initializing the whole thing */
1325
1325
  lexer_init(lexer);
1326
1326
 
1327
1327
  if (cs == lexer_error) {
@@ -1379,7 +1379,7 @@ static VALUE
1379
1379
  unindent(VALUE con, int start_col)
1380
1380
  {
1381
1381
  VALUE re;
1382
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
1382
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
1383
1383
  char pat[32];
1384
1384
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
1385
1385
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1810,14 +1810,14 @@ _match:
1810
1810
  buff = data;
1811
1811
  }
1812
1812
 
1813
- // Allocate as a ruby string so that it gets cleaned up by GC
1813
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1814
1814
  newstr_val = rb_str_new(buff, len);
1815
1815
  newstr = RSTRING_PTR(newstr_val);
1816
1816
 
1817
1817
 
1818
1818
  for (count = 0; count < len; count++) {
1819
1819
  if(buff[count] == 10) {
1820
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1820
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1821
1821
  break;
1822
1822
  } else {
1823
1823
  if (buff[count] == '%') {
@@ -1831,7 +1831,7 @@ _match:
1831
1831
  }
1832
1832
 
1833
1833
  line = lexer->line_number;
1834
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1834
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1835
1835
  raise_lexer_error(newstr, line);
1836
1836
  } else {
1837
1837
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1873,14 +1873,14 @@ _again:
1873
1873
  buff = data;
1874
1874
  }
1875
1875
 
1876
- // Allocate as a ruby string so that it gets cleaned up by GC
1876
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1877
1877
  newstr_val = rb_str_new(buff, len);
1878
1878
  newstr = RSTRING_PTR(newstr_val);
1879
1879
 
1880
1880
 
1881
1881
  for (count = 0; count < len; count++) {
1882
1882
  if(buff[count] == 10) {
1883
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1883
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1884
1884
  break;
1885
1885
  } else {
1886
1886
  if (buff[count] == '%') {
@@ -1894,7 +1894,7 @@ _again:
1894
1894
  }
1895
1895
 
1896
1896
  line = lexer->line_number;
1897
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1897
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1898
1898
  raise_lexer_error(newstr, line);
1899
1899
  } else {
1900
1900
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1915,7 +1915,7 @@ _again:
1915
1915
  assert(lexer->content_start <= len && "content starts after data end");
1916
1916
  assert(lexer->mark < len && "mark is after data end");
1917
1917
 
1918
- // Reset lexer by re-initializing the whole thing
1918
+ /* Reset lexer by re-initializing the whole thing */
1919
1919
  lexer_init(lexer);
1920
1920
 
1921
1921
  if (cs == lexer_error) {
@@ -1089,7 +1089,7 @@ static VALUE
1089
1089
  unindent(VALUE con, int start_col)
1090
1090
  {
1091
1091
  VALUE re;
1092
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
1092
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
1093
1093
  char pat[32];
1094
1094
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
1095
1095
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1520,14 +1520,14 @@ _match:
1520
1520
  buff = data;
1521
1521
  }
1522
1522
 
1523
- // Allocate as a ruby string so that it gets cleaned up by GC
1523
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1524
1524
  newstr_val = rb_str_new(buff, len);
1525
1525
  newstr = RSTRING_PTR(newstr_val);
1526
1526
 
1527
1527
 
1528
1528
  for (count = 0; count < len; count++) {
1529
1529
  if(buff[count] == 10) {
1530
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1530
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1531
1531
  break;
1532
1532
  } else {
1533
1533
  if (buff[count] == '%') {
@@ -1541,7 +1541,7 @@ _match:
1541
1541
  }
1542
1542
 
1543
1543
  line = lexer->line_number;
1544
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1544
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1545
1545
  raise_lexer_error(newstr, line);
1546
1546
  } else {
1547
1547
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1583,14 +1583,14 @@ _again:
1583
1583
  buff = data;
1584
1584
  }
1585
1585
 
1586
- // Allocate as a ruby string so that it gets cleaned up by GC
1586
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1587
1587
  newstr_val = rb_str_new(buff, len);
1588
1588
  newstr = RSTRING_PTR(newstr_val);
1589
1589
 
1590
1590
 
1591
1591
  for (count = 0; count < len; count++) {
1592
1592
  if(buff[count] == 10) {
1593
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1593
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1594
1594
  break;
1595
1595
  } else {
1596
1596
  if (buff[count] == '%') {
@@ -1604,7 +1604,7 @@ _again:
1604
1604
  }
1605
1605
 
1606
1606
  line = lexer->line_number;
1607
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1607
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1608
1608
  raise_lexer_error(newstr, line);
1609
1609
  } else {
1610
1610
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1625,7 +1625,7 @@ _again:
1625
1625
  assert(lexer->content_start <= len && "content starts after data end");
1626
1626
  assert(lexer->mark < len && "mark is after data end");
1627
1627
 
1628
- // Reset lexer by re-initializing the whole thing
1628
+ /* Reset lexer by re-initializing the whole thing */
1629
1629
  lexer_init(lexer);
1630
1630
 
1631
1631
  if (cs == lexer_error) {
@@ -896,7 +896,7 @@ static VALUE
896
896
  unindent(VALUE con, int start_col)
897
897
  {
898
898
  VALUE re;
899
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
899
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
900
900
  char pat[32];
901
901
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
902
902
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1327,14 +1327,14 @@ _match:
1327
1327
  buff = data;
1328
1328
  }
1329
1329
 
1330
- // Allocate as a ruby string so that it gets cleaned up by GC
1330
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1331
1331
  newstr_val = rb_str_new(buff, len);
1332
1332
  newstr = RSTRING_PTR(newstr_val);
1333
1333
 
1334
1334
 
1335
1335
  for (count = 0; count < len; count++) {
1336
1336
  if(buff[count] == 10) {
1337
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1337
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1338
1338
  break;
1339
1339
  } else {
1340
1340
  if (buff[count] == '%') {
@@ -1348,7 +1348,7 @@ _match:
1348
1348
  }
1349
1349
 
1350
1350
  line = lexer->line_number;
1351
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1351
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1352
1352
  raise_lexer_error(newstr, line);
1353
1353
  } else {
1354
1354
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1390,14 +1390,14 @@ _again:
1390
1390
  buff = data;
1391
1391
  }
1392
1392
 
1393
- // Allocate as a ruby string so that it gets cleaned up by GC
1393
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1394
1394
  newstr_val = rb_str_new(buff, len);
1395
1395
  newstr = RSTRING_PTR(newstr_val);
1396
1396
 
1397
1397
 
1398
1398
  for (count = 0; count < len; count++) {
1399
1399
  if(buff[count] == 10) {
1400
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1400
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1401
1401
  break;
1402
1402
  } else {
1403
1403
  if (buff[count] == '%') {
@@ -1411,7 +1411,7 @@ _again:
1411
1411
  }
1412
1412
 
1413
1413
  line = lexer->line_number;
1414
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1414
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1415
1415
  raise_lexer_error(newstr, line);
1416
1416
  } else {
1417
1417
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1432,7 +1432,7 @@ _again:
1432
1432
  assert(lexer->content_start <= len && "content starts after data end");
1433
1433
  assert(lexer->mark < len && "mark is after data end");
1434
1434
 
1435
- // Reset lexer by re-initializing the whole thing
1435
+ /* Reset lexer by re-initializing the whole thing */
1436
1436
  lexer_init(lexer);
1437
1437
 
1438
1438
  if (cs == lexer_error) {
@@ -752,7 +752,7 @@ static VALUE
752
752
  unindent(VALUE con, int start_col)
753
753
  {
754
754
  VALUE re;
755
- // Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
755
+ /* Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters */
756
756
  char pat[32];
757
757
  snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
758
758
  re = rb_reg_regcomp(rb_str_new2(pat));
@@ -1183,14 +1183,14 @@ _match:
1183
1183
  buff = data;
1184
1184
  }
1185
1185
 
1186
- // Allocate as a ruby string so that it gets cleaned up by GC
1186
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1187
1187
  newstr_val = rb_str_new(buff, len);
1188
1188
  newstr = RSTRING_PTR(newstr_val);
1189
1189
 
1190
1190
 
1191
1191
  for (count = 0; count < len; count++) {
1192
1192
  if(buff[count] == 10) {
1193
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1193
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1194
1194
  break;
1195
1195
  } else {
1196
1196
  if (buff[count] == '%') {
@@ -1204,7 +1204,7 @@ _match:
1204
1204
  }
1205
1205
 
1206
1206
  line = lexer->line_number;
1207
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1207
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1208
1208
  raise_lexer_error(newstr, line);
1209
1209
  } else {
1210
1210
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1246,14 +1246,14 @@ _again:
1246
1246
  buff = data;
1247
1247
  }
1248
1248
 
1249
- // Allocate as a ruby string so that it gets cleaned up by GC
1249
+ /* Allocate as a ruby string so that it gets cleaned up by GC */
1250
1250
  newstr_val = rb_str_new(buff, len);
1251
1251
  newstr = RSTRING_PTR(newstr_val);
1252
1252
 
1253
1253
 
1254
1254
  for (count = 0; count < len; count++) {
1255
1255
  if(buff[count] == 10) {
1256
- newstr[newstr_count] = '\0'; // terminate new string at first newline found
1256
+ newstr[newstr_count] = '\0'; /* terminate new string at first newline found */
1257
1257
  break;
1258
1258
  } else {
1259
1259
  if (buff[count] == '%') {
@@ -1267,7 +1267,7 @@ _again:
1267
1267
  }
1268
1268
 
1269
1269
  line = lexer->line_number;
1270
- lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
1270
+ lexer_init(lexer); /* Re-initialize so we can scan again with the same lexer */
1271
1271
  raise_lexer_error(newstr, line);
1272
1272
  } else {
1273
1273
  rb_funcall(listener, rb_intern("eof"), 0);
@@ -1288,7 +1288,7 @@ _again:
1288
1288
  assert(lexer->content_start <= len && "content starts after data end");
1289
1289
  assert(lexer->mark < len && "mark is after data end");
1290
1290
 
1291
- // Reset lexer by re-initializing the whole thing
1291
+ /* Reset lexer by re-initializing the whole thing */
1292
1292
  lexer_init(lexer);
1293
1293
 
1294
1294
  if (cs == lexer_error) {