comment-parser 1.0.0 → 1.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. package/CHANGELOG.md +13 -0
  2. package/README.md +26 -19
  3. package/browser/index.js +331 -272
  4. package/es6/index.d.ts +21 -4
  5. package/es6/index.js +21 -5
  6. package/es6/parser/block-parser.d.ts +18 -2
  7. package/es6/parser/block-parser.js +12 -12
  8. package/es6/parser/index.d.ts +4 -4
  9. package/es6/parser/index.js +25 -26
  10. package/es6/parser/source-parser.js +13 -15
  11. package/es6/parser/spec-parser.d.ts +1 -6
  12. package/es6/parser/spec-parser.js +4 -151
  13. package/es6/parser/tokenizers/description.d.ts +19 -0
  14. package/es6/parser/tokenizers/description.js +46 -0
  15. package/es6/parser/tokenizers/index.d.ts +7 -0
  16. package/es6/parser/tokenizers/index.js +1 -0
  17. package/es6/parser/tokenizers/name.d.ts +6 -0
  18. package/es6/parser/tokenizers/name.js +91 -0
  19. package/es6/parser/tokenizers/tag.d.ts +6 -0
  20. package/es6/parser/tokenizers/tag.js +24 -0
  21. package/es6/parser/tokenizers/type.d.ts +27 -0
  22. package/es6/parser/tokenizers/type.js +67 -0
  23. package/es6/stringifier/index.d.ts +2 -1
  24. package/es6/stringifier/index.js +1 -6
  25. package/es6/stringifier/inspect.d.ts +2 -0
  26. package/es6/stringifier/inspect.js +41 -0
  27. package/es6/transforms/align.js +48 -43
  28. package/es6/transforms/indent.js +11 -22
  29. package/es6/transforms/index.d.ts +0 -2
  30. package/es6/transforms/index.js +2 -10
  31. package/es6/util.d.ts +11 -0
  32. package/es6/util.js +25 -25
  33. package/jest.config.js +12 -11
  34. package/lib/index.d.ts +21 -4
  35. package/lib/index.js +24 -6
  36. package/lib/parser/block-parser.d.ts +18 -2
  37. package/lib/parser/block-parser.js +12 -12
  38. package/lib/parser/index.d.ts +4 -4
  39. package/lib/parser/index.js +26 -27
  40. package/lib/parser/source-parser.js +14 -16
  41. package/lib/parser/spec-parser.d.ts +1 -6
  42. package/lib/parser/spec-parser.js +4 -156
  43. package/lib/parser/tokenizers/description.d.ts +19 -0
  44. package/lib/parser/tokenizers/description.js +51 -0
  45. package/lib/parser/tokenizers/index.d.ts +7 -0
  46. package/lib/parser/tokenizers/index.js +2 -0
  47. package/lib/parser/tokenizers/name.d.ts +6 -0
  48. package/lib/parser/tokenizers/name.js +94 -0
  49. package/lib/parser/tokenizers/tag.d.ts +6 -0
  50. package/lib/parser/tokenizers/tag.js +27 -0
  51. package/lib/parser/tokenizers/type.d.ts +27 -0
  52. package/lib/parser/tokenizers/type.js +70 -0
  53. package/lib/stringifier/index.d.ts +2 -1
  54. package/lib/stringifier/index.js +1 -6
  55. package/lib/stringifier/inspect.d.ts +2 -0
  56. package/lib/stringifier/inspect.js +44 -0
  57. package/lib/transforms/align.js +50 -45
  58. package/lib/transforms/indent.js +12 -23
  59. package/lib/transforms/index.d.ts +0 -2
  60. package/lib/transforms/index.js +3 -13
  61. package/lib/util.d.ts +11 -0
  62. package/lib/util.js +27 -26
  63. package/migrate-1.0.md +7 -7
  64. package/package.json +1 -1
  65. package/src/index.ts +20 -2
  66. package/src/parser/block-parser.ts +19 -1
  67. package/src/parser/index.ts +22 -21
  68. package/src/parser/source-parser.ts +2 -2
  69. package/src/parser/spec-parser.ts +2 -170
  70. package/src/parser/tokenizers/description.ts +75 -0
  71. package/src/parser/tokenizers/index.ts +8 -0
  72. package/src/parser/tokenizers/name.ts +112 -0
  73. package/src/parser/tokenizers/tag.ts +30 -0
  74. package/src/parser/tokenizers/type.ts +92 -0
  75. package/src/stringifier/index.ts +3 -1
  76. package/src/stringifier/inspect.ts +64 -0
  77. package/src/transforms/align.ts +37 -12
  78. package/src/transforms/index.ts +0 -3
  79. package/src/util.ts +20 -0
  80. package/tests/e2e/examples.js +39 -2
  81. package/tests/e2e/examples.spec.js +4 -2
  82. package/tests/e2e/issue-109.spec.js +49 -0
  83. package/tests/e2e/issue-112.spec.js +20 -0
  84. package/tests/e2e/issue-113.spec.js +23 -0
  85. package/tests/e2e/issue-119.spec.js +29 -0
  86. package/tests/e2e/issue-120.spec.js +29 -0
  87. package/tests/e2e/issue-121.spec.js +102 -0
  88. package/tests/e2e/transforms.spec.js +5 -2
  89. package/tests/unit/inspect.spec.ts +36 -0
  90. package/tests/unit/{spacer.spec.ts → spacer-description-joiner.spec.ts} +6 -6
  91. package/tests/unit/spec-description-tokenizer.spec.ts +100 -7
  92. package/tests/unit/spec-name-tokenizer.spec.ts +113 -1
  93. package/tests/unit/spec-parser.spec.ts +6 -9
  94. package/tests/unit/spec-tag-tokenizer.spec.ts +1 -1
  95. package/tests/unit/spec-type-tokenizer.spec.ts +121 -1
  96. package/tests/unit/stringifier.spec.ts +0 -1
  97. package/tests/unit/transforms-align.spec.ts +80 -16
  98. package/tests/unit/util-rewire.spec.ts +107 -0
  99. package/tests/unit/util.spec.ts +0 -48
  100. package/tsconfig.es6.json +1 -1
  101. package/tsconfig.node.json +1 -1
  102. package/es6/parser/spacer.d.ts +0 -3
  103. package/es6/parser/spacer.js +0 -37
  104. package/lib/parser/spacer.d.ts +0 -3
  105. package/lib/parser/spacer.js +0 -40
  106. package/src/parser/spacer.ts +0 -45
package/es6/index.d.ts CHANGED
@@ -1,5 +1,22 @@
1
1
  import { Options as ParserOptions } from './parser/index';
2
- import { Block } from './primitives';
3
- export declare function parse(source: string, options?: Partial<ParserOptions>): Block[];
4
- export declare const stringify: (block: Block) => string;
5
- export * as transforms from './transforms/index';
2
+ import descriptionTokenizer from './parser/tokenizers/description';
3
+ import nameTokenizer from './parser/tokenizers/name';
4
+ import tagTokenizer from './parser/tokenizers/tag';
5
+ import typeTokenizer from './parser/tokenizers/type';
6
+ import alignTransform from './transforms/align';
7
+ import indentTransform from './transforms/indent';
8
+ import { flow as flowTransform } from './transforms/index';
9
+ export declare function parse(source: string, options?: Partial<ParserOptions>): import("./primitives").Block[];
10
+ export declare const stringify: import("./stringifier").Stringifier;
11
+ export { default as inspect } from './stringifier/inspect';
12
+ export declare const transforms: {
13
+ flow: typeof flowTransform;
14
+ align: typeof alignTransform;
15
+ indent: typeof indentTransform;
16
+ };
17
+ export declare const tokenizers: {
18
+ tag: typeof tagTokenizer;
19
+ type: typeof typeTokenizer;
20
+ name: typeof nameTokenizer;
21
+ description: typeof descriptionTokenizer;
22
+ };
package/es6/index.js CHANGED
@@ -1,9 +1,25 @@
1
1
  import getParser from './parser/index';
2
+ import descriptionTokenizer from './parser/tokenizers/description';
3
+ import nameTokenizer from './parser/tokenizers/name';
4
+ import tagTokenizer from './parser/tokenizers/tag';
5
+ import typeTokenizer from './parser/tokenizers/type';
2
6
  import getStringifier from './stringifier/index';
3
- export function parse(source, options) {
4
- if (options === void 0) { options = {}; }
7
+ import alignTransform from './transforms/align';
8
+ import indentTransform from './transforms/indent';
9
+ import { flow as flowTransform } from './transforms/index';
10
+ export function parse(source, options = {}) {
5
11
  return getParser(options)(source);
6
12
  }
7
- export var stringify = getStringifier();
8
- import * as transforms_1 from './transforms/index';
9
- export { transforms_1 as transforms };
13
+ export const stringify = getStringifier();
14
+ export { default as inspect } from './stringifier/inspect';
15
+ export const transforms = {
16
+ flow: flowTransform,
17
+ align: alignTransform,
18
+ indent: indentTransform,
19
+ };
20
+ export const tokenizers = {
21
+ tag: tagTokenizer,
22
+ type: typeTokenizer,
23
+ name: nameTokenizer,
24
+ description: descriptionTokenizer,
25
+ };
@@ -1,8 +1,24 @@
1
1
  import { Line } from '../primitives';
2
+ /**
3
+ * Groups source lines in sections representing tags.
4
+ * First section is a block description if present. Last section captures lines starting with
5
+ * the last tag to the end of the block, including dangling closing marker.
6
+ * @param {Line[]} block souce lines making a single comment block
7
+ */
2
8
  export declare type Parser = (block: Line[]) => Line[][];
3
- declare type Fencer = (source: string) => boolean;
9
+ /**
10
+ * Predicate telling if string contains opening/closing escaping sequence
11
+ * @param {string} source raw source line
12
+ */
13
+ export declare type Fencer = (source: string) => boolean;
14
+ /**
15
+ * `Parser` configuration options
16
+ */
4
17
  export interface Options {
5
18
  fence: string | Fencer;
6
19
  }
20
+ /**
21
+ * Creates configured `Parser`
22
+ * @param {Partial<Options>} options
23
+ */
7
24
  export default function getParser({ fence, }?: Partial<Options>): Parser;
8
- export {};
@@ -1,16 +1,16 @@
1
- var reTag = /^@\S+/;
2
- export default function getParser(_a) {
3
- var _b = (_a === void 0 ? {} : _a).fence, fence = _b === void 0 ? '```' : _b;
4
- var fencer = getFencer(fence);
5
- var toggleFence = function (source, isFenced) {
6
- return fencer(source) ? !isFenced : isFenced;
7
- };
1
+ const reTag = /^@\S+/;
2
+ /**
3
+ * Creates configured `Parser`
4
+ * @param {Partial<Options>} options
5
+ */
6
+ export default function getParser({ fence = '```', } = {}) {
7
+ const fencer = getFencer(fence);
8
+ const toggleFence = (source, isFenced) => fencer(source) ? !isFenced : isFenced;
8
9
  return function parseBlock(source) {
9
10
  // start with description section
10
- var sections = [[]];
11
- var isFenced = false;
12
- for (var _i = 0, source_1 = source; _i < source_1.length; _i++) {
13
- var line = source_1[_i];
11
+ const sections = [[]];
12
+ let isFenced = false;
13
+ for (const line of source) {
14
14
  if (reTag.test(line.tokens.description) && !isFenced) {
15
15
  sections.push([line]);
16
16
  }
@@ -24,6 +24,6 @@ export default function getParser(_a) {
24
24
  }
25
25
  function getFencer(fence) {
26
26
  if (typeof fence === 'string')
27
- return function (source) { return source.split(fence).length % 2 === 0; };
27
+ return (source) => source.split(fence).length % 2 === 0;
28
28
  return fence;
29
29
  }
@@ -1,10 +1,10 @@
1
- import { Tokenizer } from './spec-parser';
2
1
  import { Block } from '../primitives';
3
- import { Spacer } from './spacer';
2
+ import { Tokenizer } from './tokenizers/index';
4
3
  export interface Options {
5
4
  startLine: number;
6
5
  fence: string;
7
- spacing: 'compact' | 'preserve' | Spacer;
6
+ spacing: 'compact' | 'preserve';
8
7
  tokenizers: Tokenizer[];
9
8
  }
10
- export default function getParser({ startLine, fence, spacing, tokenizers, }?: Partial<Options>): (source: string) => Block[];
9
+ export declare type Parser = (source: string) => Block[];
10
+ export default function getParser({ startLine, fence, spacing, tokenizers, }?: Partial<Options>): Parser;
@@ -1,40 +1,39 @@
1
- import sourceParser from './source-parser';
2
- import blockParser from './block-parser';
3
- import specParser, { tagTokenizer, nameTokenizer, typeTokenizer, descriptionTokenizer, } from './spec-parser';
4
- import getSpacer from './spacer';
5
1
  import { splitLines } from '../util';
6
- export default function getParser(_a) {
7
- var _b = _a === void 0 ? {} : _a, _c = _b.startLine, startLine = _c === void 0 ? 0 : _c, _d = _b.fence, fence = _d === void 0 ? '```' : _d, _e = _b.spacing, spacing = _e === void 0 ? 'compact' : _e, _f = _b.tokenizers, tokenizers = _f === void 0 ? [
8
- tagTokenizer(),
9
- typeTokenizer(),
10
- nameTokenizer(),
11
- descriptionTokenizer(getSpacer(spacing)),
12
- ] : _f;
2
+ import blockParser from './block-parser';
3
+ import sourceParser from './source-parser';
4
+ import specParser from './spec-parser';
5
+ import tokenizeTag from './tokenizers/tag';
6
+ import tokenizeType from './tokenizers/type';
7
+ import tokenizeName from './tokenizers/name';
8
+ import tokenizeDescription, { getJoiner as getDescriptionJoiner, } from './tokenizers/description';
9
+ export default function getParser({ startLine = 0, fence = '```', spacing = 'compact', tokenizers = [
10
+ tokenizeTag(),
11
+ tokenizeType(spacing),
12
+ tokenizeName(),
13
+ tokenizeDescription(spacing),
14
+ ], } = {}) {
13
15
  if (startLine < 0 || startLine % 1 > 0)
14
16
  throw new Error('Invalid startLine');
15
- var parseSource = sourceParser({ startLine: startLine });
16
- var parseBlock = blockParser({ fence: fence });
17
- var parseSpec = specParser({ tokenizers: tokenizers });
18
- var join = getSpacer(spacing);
19
- var notEmpty = function (line) {
20
- return line.tokens.description.trim() != '';
21
- };
17
+ const parseSource = sourceParser({ startLine });
18
+ const parseBlock = blockParser({ fence });
19
+ const parseSpec = specParser({ tokenizers });
20
+ const joinDescription = getDescriptionJoiner(spacing);
21
+ const notEmpty = (line) => line.tokens.description.trim() != '';
22
22
  return function (source) {
23
- var blocks = [];
24
- for (var _i = 0, _a = splitLines(source); _i < _a.length; _i++) {
25
- var line = _a[_i];
26
- var lines = parseSource(line);
23
+ const blocks = [];
24
+ for (const line of splitLines(source)) {
25
+ const lines = parseSource(line);
27
26
  if (lines === null)
28
27
  continue;
29
28
  if (lines.find(notEmpty) === undefined)
30
29
  continue;
31
- var sections = parseBlock(lines);
32
- var specs = sections.slice(1).map(parseSpec);
30
+ const sections = parseBlock(lines);
31
+ const specs = sections.slice(1).map(parseSpec);
33
32
  blocks.push({
34
- description: join(sections[0]),
33
+ description: joinDescription(sections[0]),
35
34
  tags: specs,
36
35
  source: lines,
37
- problems: specs.reduce(function (acc, spec) { return acc.concat(spec.problems); }, []),
36
+ problems: specs.reduce((acc, spec) => acc.concat(spec.problems), []),
38
37
  });
39
38
  }
40
39
  return blocks;
@@ -1,44 +1,42 @@
1
- import { splitSpace, seedTokens } from '../util';
2
1
  import { Markers } from '../primitives';
3
- export default function getParser(_a) {
4
- var _b = (_a === void 0 ? {} : _a).startLine, startLine = _b === void 0 ? 0 : _b;
5
- var block = null;
6
- var num = startLine;
2
+ import { seedTokens, splitSpace } from '../util';
3
+ export default function getParser({ startLine = 0, } = {}) {
4
+ let block = null;
5
+ let num = startLine;
7
6
  return function parseSource(source) {
8
- var _a, _b, _c;
9
- var rest = source;
10
- var tokens = seedTokens();
11
- _a = splitSpace(rest), tokens.start = _a[0], rest = _a[1];
7
+ let rest = source;
8
+ const tokens = seedTokens();
9
+ [tokens.start, rest] = splitSpace(rest);
12
10
  if (block === null &&
13
11
  rest.startsWith(Markers.start) &&
14
12
  !rest.startsWith(Markers.nostart)) {
15
13
  block = [];
16
14
  tokens.delimiter = rest.slice(0, Markers.start.length);
17
15
  rest = rest.slice(Markers.start.length);
18
- _b = splitSpace(rest), tokens.postDelimiter = _b[0], rest = _b[1];
16
+ [tokens.postDelimiter, rest] = splitSpace(rest);
19
17
  }
20
18
  if (block === null) {
21
19
  num++;
22
20
  return null;
23
21
  }
24
- var isClosed = rest.trimRight().endsWith(Markers.end);
22
+ const isClosed = rest.trimRight().endsWith(Markers.end);
25
23
  if (tokens.delimiter === '' &&
26
24
  rest.startsWith(Markers.delim) &&
27
25
  !rest.startsWith(Markers.end)) {
28
26
  tokens.delimiter = Markers.delim;
29
27
  rest = rest.slice(Markers.delim.length);
30
- _c = splitSpace(rest), tokens.postDelimiter = _c[0], rest = _c[1];
28
+ [tokens.postDelimiter, rest] = splitSpace(rest);
31
29
  }
32
30
  if (isClosed) {
33
- var trimmed = rest.trimRight();
31
+ const trimmed = rest.trimRight();
34
32
  tokens.end = rest.slice(trimmed.length - Markers.end.length);
35
33
  rest = trimmed.slice(0, -Markers.end.length);
36
34
  }
37
35
  tokens.description = rest;
38
- block.push({ number: num, source: source, tokens: tokens });
36
+ block.push({ number: num, source, tokens });
39
37
  num++;
40
38
  if (isClosed) {
41
- var result = block.slice();
39
+ const result = block.slice();
42
40
  block = null;
43
41
  return result;
44
42
  }
@@ -1,12 +1,7 @@
1
1
  import { Line, Spec } from '../primitives';
2
- import { Spacer } from './spacer';
2
+ import { Tokenizer } from './tokenizers/index';
3
3
  export declare type Parser = (source: Line[]) => Spec;
4
- export declare type Tokenizer = (spec: Spec) => Spec;
5
4
  export interface Options {
6
5
  tokenizers: Tokenizer[];
7
6
  }
8
7
  export default function getParser({ tokenizers }: Options): Parser;
9
- export declare function tagTokenizer(): Tokenizer;
10
- export declare function typeTokenizer(): Tokenizer;
11
- export declare function nameTokenizer(): Tokenizer;
12
- export declare function descriptionTokenizer(join: Spacer): Tokenizer;
@@ -1,11 +1,9 @@
1
- import { splitSpace, isSpace, seedSpec } from '../util';
2
- export default function getParser(_a) {
3
- var tokenizers = _a.tokenizers;
1
+ import { seedSpec } from '../util';
2
+ export default function getParser({ tokenizers }) {
4
3
  return function parseSpec(source) {
5
4
  var _a;
6
- var spec = seedSpec({ source: source });
7
- for (var _i = 0, tokenizers_1 = tokenizers; _i < tokenizers_1.length; _i++) {
8
- var tokenize = tokenizers_1[_i];
5
+ let spec = seedSpec({ source });
6
+ for (const tokenize of tokenizers) {
9
7
  spec = tokenize(spec);
10
8
  if ((_a = spec.problems[spec.problems.length - 1]) === null || _a === void 0 ? void 0 : _a.critical)
11
9
  break;
@@ -13,148 +11,3 @@ export default function getParser(_a) {
13
11
  return spec;
14
12
  };
15
13
  }
16
- export function tagTokenizer() {
17
- return function (spec) {
18
- var tokens = spec.source[0].tokens;
19
- var match = tokens.description.match(/\s*(@(\S+))(\s*)/);
20
- if (match === null) {
21
- spec.problems.push({
22
- code: 'spec:tag:prefix',
23
- message: 'tag should start with "@" symbol',
24
- line: spec.source[0].number,
25
- critical: true,
26
- });
27
- return spec;
28
- }
29
- tokens.tag = match[1];
30
- tokens.postTag = match[3];
31
- tokens.description = tokens.description.slice(match[0].length);
32
- spec.tag = match[2];
33
- return spec;
34
- };
35
- }
36
- export function typeTokenizer() {
37
- return function (spec) {
38
- var _a;
39
- var res = '';
40
- var curlies = 0;
41
- var tokens = spec.source[0].tokens;
42
- var source = tokens.description.trimLeft();
43
- if (source[0] !== '{')
44
- return spec;
45
- for (var _i = 0, source_1 = source; _i < source_1.length; _i++) {
46
- var ch = source_1[_i];
47
- if (ch === '{')
48
- curlies++;
49
- if (ch === '}')
50
- curlies--;
51
- res += ch;
52
- if (curlies === 0) {
53
- break;
54
- }
55
- }
56
- if (curlies !== 0) {
57
- spec.problems.push({
58
- code: 'spec:type:unpaired-curlies',
59
- message: 'unpaired curlies',
60
- line: spec.source[0].number,
61
- critical: true,
62
- });
63
- return spec;
64
- }
65
- spec.type = res.slice(1, -1);
66
- tokens.type = res;
67
- _a = splitSpace(source.slice(tokens.type.length)), tokens.postType = _a[0], tokens.description = _a[1];
68
- return spec;
69
- };
70
- }
71
- export function nameTokenizer() {
72
- return function (spec) {
73
- var _a, _b;
74
- var _c;
75
- var tokens = spec.source[0].tokens;
76
- var source = tokens.description.trimLeft();
77
- var quotedGroups = source.split('"');
78
- // if it starts with quoted group, assume it is a literal
79
- if (quotedGroups.length > 1 &&
80
- quotedGroups[0] === '' &&
81
- quotedGroups.length % 2 === 1) {
82
- spec.name = quotedGroups[1];
83
- tokens.name = "\"" + quotedGroups[1] + "\"";
84
- _a = splitSpace(source.slice(tokens.name.length)), tokens.postName = _a[0], tokens.description = _a[1];
85
- return spec;
86
- }
87
- var brackets = 0;
88
- var name = '';
89
- var optional = false;
90
- var defaultValue;
91
- // assume name is non-space string or anything wrapped into brackets
92
- for (var _i = 0, source_2 = source; _i < source_2.length; _i++) {
93
- var ch = source_2[_i];
94
- if (brackets === 0 && isSpace(ch))
95
- break;
96
- if (ch === '[')
97
- brackets++;
98
- if (ch === ']')
99
- brackets--;
100
- name += ch;
101
- }
102
- if (brackets !== 0) {
103
- spec.problems.push({
104
- code: 'spec:name:unpaired-brackets',
105
- message: 'unpaired brackets',
106
- line: spec.source[0].number,
107
- critical: true,
108
- });
109
- return spec;
110
- }
111
- var nameToken = name;
112
- if (name[0] === '[' && name[name.length - 1] === ']') {
113
- optional = true;
114
- name = name.slice(1, -1);
115
- var parts = name.split('=');
116
- name = parts[0].trim();
117
- defaultValue = (_c = parts[1]) === null || _c === void 0 ? void 0 : _c.trim();
118
- if (name === '') {
119
- spec.problems.push({
120
- code: 'spec:name:empty-name',
121
- message: 'empty name',
122
- line: spec.source[0].number,
123
- critical: true,
124
- });
125
- return spec;
126
- }
127
- if (parts.length > 2) {
128
- spec.problems.push({
129
- code: 'spec:name:invalid-default',
130
- message: 'invalid default value syntax',
131
- line: spec.source[0].number,
132
- critical: true,
133
- });
134
- return spec;
135
- }
136
- if (defaultValue === '') {
137
- spec.problems.push({
138
- code: 'spec:name:empty-default',
139
- message: 'empty default value',
140
- line: spec.source[0].number,
141
- critical: true,
142
- });
143
- return spec;
144
- }
145
- }
146
- spec.optional = optional;
147
- spec.name = name;
148
- tokens.name = nameToken;
149
- if (defaultValue !== undefined)
150
- spec.default = defaultValue;
151
- _b = splitSpace(source.slice(tokens.name.length)), tokens.postName = _b[0], tokens.description = _b[1];
152
- return spec;
153
- };
154
- }
155
- export function descriptionTokenizer(join) {
156
- return function (spec) {
157
- spec.description = join(spec.source);
158
- return spec;
159
- };
160
- }
@@ -0,0 +1,19 @@
1
+ import { Line } from '../../primitives';
2
+ import { Tokenizer } from './index';
3
+ /**
4
+ * Walks over provided lines joining description token into a single string.
5
+ * */
6
+ export declare type Joiner = (lines: Line[]) => string;
7
+ /**
8
+ * Shortcut for standard Joiners
9
+ * compact - strip surrounding whitespace and concat lines using a single string
10
+ * preserve - preserves original whitespace and line breaks as is
11
+ */
12
+ export declare type Spacing = 'compact' | 'preserve' | Joiner;
13
+ /**
14
+ * Makes no changes to `spec.lines[].tokens` but joins them into `spec.description`
15
+ * following given spacing srtategy
16
+ * @param {Spacing} spacing tells how to handle the whitespace
17
+ */
18
+ export default function descriptionTokenizer(spacing?: Spacing): Tokenizer;
19
+ export declare function getJoiner(spacing: Spacing): Joiner;
@@ -0,0 +1,46 @@
1
+ import { Markers } from '../../primitives';
2
+ /**
3
+ * Makes no changes to `spec.lines[].tokens` but joins them into `spec.description`
4
+ * following given spacing srtategy
5
+ * @param {Spacing} spacing tells how to handle the whitespace
6
+ */
7
+ export default function descriptionTokenizer(spacing = 'compact') {
8
+ const join = getJoiner(spacing);
9
+ return (spec) => {
10
+ spec.description = join(spec.source);
11
+ return spec;
12
+ };
13
+ }
14
+ export function getJoiner(spacing) {
15
+ if (spacing === 'compact')
16
+ return compactJoiner;
17
+ if (spacing === 'preserve')
18
+ return preserveJoiner;
19
+ return spacing;
20
+ }
21
+ function compactJoiner(lines) {
22
+ return lines
23
+ .map(({ tokens: { description } }) => description.trim())
24
+ .filter((description) => description !== '')
25
+ .join(' ');
26
+ }
27
+ const lineNo = (num, { tokens }, i) => tokens.type === '' ? num : i;
28
+ const getDescription = ({ tokens }) => (tokens.delimiter === '' ? tokens.start : tokens.postDelimiter.slice(1)) +
29
+ tokens.description;
30
+ function preserveJoiner(lines) {
31
+ if (lines.length === 0)
32
+ return '';
33
+ // skip the opening line with no description
34
+ if (lines[0].tokens.description === '' &&
35
+ lines[0].tokens.delimiter === Markers.start)
36
+ lines = lines.slice(1);
37
+ // skip the closing line with no description
38
+ const lastLine = lines[lines.length - 1];
39
+ if (lastLine !== undefined &&
40
+ lastLine.tokens.description === '' &&
41
+ lastLine.tokens.end.endsWith(Markers.end))
42
+ lines = lines.slice(0, -1);
43
+ // description starts at the last line of type definition
44
+ lines = lines.slice(lines.reduce(lineNo, 0));
45
+ return lines.map(getDescription).join('\n');
46
+ }
@@ -0,0 +1,7 @@
1
+ import { Spec } from '../../primitives';
2
+ /**
3
+ * Splits `spect.lines[].token.description` into other tokens,
4
+ * and populates the spec.{tag, name, type, description}. Invoked in a chaing
5
+ * with other tokens, operations listed above can be moved to separate tokenizers
6
+ */
7
+ export declare type Tokenizer = (spec: Spec) => Spec;
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,6 @@
1
+ import { Tokenizer } from './index';
2
+ /**
3
+ * Splits remaining `spec.lines[].tokens.description` into `name` and `descriptions` tokens,
4
+ * and populates the `spec.name`
5
+ */
6
+ export default function nameTokenizer(): Tokenizer;
@@ -0,0 +1,91 @@
1
+ import { splitSpace, isSpace } from '../../util';
2
+ const isQuoted = (s) => s && s.startsWith('"') && s.endsWith('"');
3
+ /**
4
+ * Splits remaining `spec.lines[].tokens.description` into `name` and `descriptions` tokens,
5
+ * and populates the `spec.name`
6
+ */
7
+ export default function nameTokenizer() {
8
+ const typeEnd = (num, { tokens }, i) => tokens.type === '' ? num : i;
9
+ return (spec) => {
10
+ // look for the name in the line where {type} ends
11
+ const { tokens } = spec.source[spec.source.reduce(typeEnd, 0)];
12
+ const source = tokens.description.trimLeft();
13
+ const quotedGroups = source.split('"');
14
+ // if it starts with quoted group, assume it is a literal
15
+ if (quotedGroups.length > 1 &&
16
+ quotedGroups[0] === '' &&
17
+ quotedGroups.length % 2 === 1) {
18
+ spec.name = quotedGroups[1];
19
+ tokens.name = `"${quotedGroups[1]}"`;
20
+ [tokens.postName, tokens.description] = splitSpace(source.slice(tokens.name.length));
21
+ return spec;
22
+ }
23
+ let brackets = 0;
24
+ let name = '';
25
+ let optional = false;
26
+ let defaultValue;
27
+ // assume name is non-space string or anything wrapped into brackets
28
+ for (const ch of source) {
29
+ if (brackets === 0 && isSpace(ch))
30
+ break;
31
+ if (ch === '[')
32
+ brackets++;
33
+ if (ch === ']')
34
+ brackets--;
35
+ name += ch;
36
+ }
37
+ if (brackets !== 0) {
38
+ spec.problems.push({
39
+ code: 'spec:name:unpaired-brackets',
40
+ message: 'unpaired brackets',
41
+ line: spec.source[0].number,
42
+ critical: true,
43
+ });
44
+ return spec;
45
+ }
46
+ const nameToken = name;
47
+ if (name[0] === '[' && name[name.length - 1] === ']') {
48
+ optional = true;
49
+ name = name.slice(1, -1);
50
+ const parts = name.split('=');
51
+ name = parts[0].trim();
52
+ if (parts[1] !== undefined)
53
+ defaultValue = parts.slice(1).join('=').trim();
54
+ if (name === '') {
55
+ spec.problems.push({
56
+ code: 'spec:name:empty-name',
57
+ message: 'empty name',
58
+ line: spec.source[0].number,
59
+ critical: true,
60
+ });
61
+ return spec;
62
+ }
63
+ if (defaultValue === '') {
64
+ spec.problems.push({
65
+ code: 'spec:name:empty-default',
66
+ message: 'empty default value',
67
+ line: spec.source[0].number,
68
+ critical: true,
69
+ });
70
+ return spec;
71
+ }
72
+ // has "=" and is not a string, except for "=>"
73
+ if (!isQuoted(defaultValue) && /=(?!>)/.test(defaultValue)) {
74
+ spec.problems.push({
75
+ code: 'spec:name:invalid-default',
76
+ message: 'invalid default value syntax',
77
+ line: spec.source[0].number,
78
+ critical: true,
79
+ });
80
+ return spec;
81
+ }
82
+ }
83
+ spec.optional = optional;
84
+ spec.name = name;
85
+ tokens.name = nameToken;
86
+ if (defaultValue !== undefined)
87
+ spec.default = defaultValue;
88
+ [tokens.postName, tokens.description] = splitSpace(source.slice(tokens.name.length));
89
+ return spec;
90
+ };
91
+ }
@@ -0,0 +1,6 @@
1
+ import { Tokenizer } from './index';
2
+ /**
3
+ * Splits the `@prefix` from remaining `Spec.lines[].token.descrioption` into the `tag` token,
4
+ * and populates `spec.tag`
5
+ */
6
+ export default function tagTokenizer(): Tokenizer;
@@ -0,0 +1,24 @@
1
+ /**
2
+ * Splits the `@prefix` from remaining `Spec.lines[].token.descrioption` into the `tag` token,
3
+ * and populates `spec.tag`
4
+ */
5
+ export default function tagTokenizer() {
6
+ return (spec) => {
7
+ const { tokens } = spec.source[0];
8
+ const match = tokens.description.match(/\s*(@(\S+))(\s*)/);
9
+ if (match === null) {
10
+ spec.problems.push({
11
+ code: 'spec:tag:prefix',
12
+ message: 'tag should start with "@" symbol',
13
+ line: spec.source[0].number,
14
+ critical: true,
15
+ });
16
+ return spec;
17
+ }
18
+ tokens.tag = match[1];
19
+ tokens.postTag = match[3];
20
+ tokens.description = tokens.description.slice(match[0].length);
21
+ spec.tag = match[2];
22
+ return spec;
23
+ };
24
+ }