comment-parser 1.4.1 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/CHANGELOG.md +50 -2
  2. package/es6/parser/tokenizers/name.js +11 -3
  3. package/es6/parser/tokenizers/tag.js +3 -0
  4. package/es6/parser/tokenizers/type.d.ts +1 -1
  5. package/es6/parser/tokenizers/type.js +15 -3
  6. package/es6/primitives.js +1 -1
  7. package/jest.config.cjs +5 -7
  8. package/package.json +14 -10
  9. package/src/parser/tokenizers/name.ts +13 -3
  10. package/src/parser/tokenizers/tag.ts +4 -0
  11. package/src/parser/tokenizers/type.ts +16 -3
  12. package/tests/unit/parser.spec.ts +234 -0
  13. package/tests/unit/spec-tag-tokenizer.spec.ts +30 -0
  14. package/tsconfig.node.json +0 -1
  15. package/browser/index.js +0 -650
  16. package/lib/index.cjs +0 -88
  17. package/lib/index.cjs.map +0 -1
  18. package/lib/index.d.ts +0 -32
  19. package/lib/parser/block-parser.cjs +0 -44
  20. package/lib/parser/block-parser.cjs.map +0 -1
  21. package/lib/parser/block-parser.d.ts +0 -24
  22. package/lib/parser/index.cjs +0 -65
  23. package/lib/parser/index.cjs.map +0 -1
  24. package/lib/parser/index.d.ts +0 -11
  25. package/lib/parser/source-parser.cjs +0 -68
  26. package/lib/parser/source-parser.cjs.map +0 -1
  27. package/lib/parser/source-parser.d.ts +0 -7
  28. package/lib/parser/spec-parser.cjs +0 -29
  29. package/lib/parser/spec-parser.cjs.map +0 -1
  30. package/lib/parser/spec-parser.d.ts +0 -7
  31. package/lib/parser/tokenizers/description.cjs +0 -62
  32. package/lib/parser/tokenizers/description.cjs.map +0 -1
  33. package/lib/parser/tokenizers/description.d.ts +0 -20
  34. package/lib/parser/tokenizers/index.cjs +0 -6
  35. package/lib/parser/tokenizers/index.cjs.map +0 -1
  36. package/lib/parser/tokenizers/index.d.ts +0 -7
  37. package/lib/parser/tokenizers/name.cjs +0 -109
  38. package/lib/parser/tokenizers/name.cjs.map +0 -1
  39. package/lib/parser/tokenizers/name.d.ts +0 -6
  40. package/lib/parser/tokenizers/tag.cjs +0 -37
  41. package/lib/parser/tokenizers/tag.cjs.map +0 -1
  42. package/lib/parser/tokenizers/tag.d.ts +0 -6
  43. package/lib/parser/tokenizers/type.cjs +0 -79
  44. package/lib/parser/tokenizers/type.cjs.map +0 -1
  45. package/lib/parser/tokenizers/type.d.ts +0 -27
  46. package/lib/primitives.cjs +0 -17
  47. package/lib/primitives.cjs.map +0 -1
  48. package/lib/primitives.d.ts +0 -54
  49. package/lib/stringifier/index.cjs +0 -18
  50. package/lib/stringifier/index.cjs.map +0 -1
  51. package/lib/stringifier/index.d.ts +0 -3
  52. package/lib/stringifier/inspect.cjs +0 -72
  53. package/lib/stringifier/inspect.cjs.map +0 -1
  54. package/lib/stringifier/inspect.d.ts +0 -2
  55. package/lib/transforms/align.cjs +0 -127
  56. package/lib/transforms/align.cjs.map +0 -1
  57. package/lib/transforms/align.d.ts +0 -3
  58. package/lib/transforms/crlf.cjs +0 -44
  59. package/lib/transforms/crlf.cjs.map +0 -1
  60. package/lib/transforms/crlf.d.ts +0 -3
  61. package/lib/transforms/indent.cjs +0 -58
  62. package/lib/transforms/indent.cjs.map +0 -1
  63. package/lib/transforms/indent.d.ts +0 -2
  64. package/lib/transforms/index.cjs +0 -13
  65. package/lib/transforms/index.cjs.map +0 -1
  66. package/lib/transforms/index.d.ts +0 -3
  67. package/lib/util.cjs +0 -113
  68. package/lib/util.cjs.map +0 -1
  69. package/lib/util.d.ts +0 -21
package/CHANGELOG.md CHANGED
@@ -1,63 +1,89 @@
1
1
  # v1.4.1
2
+
3
+ ## 1.4.2
4
+
5
+ ### Patch Changes
6
+
7
+ - npm audit
8
+ - ensure type, name and description on subsequent lines are properly parsed. @brettz9
9
+ - ensure type, name and description on subsequent lines are properly parsed. @brettz9
2
10
  - fix .prettierignore
3
11
  - add source trasformation example
4
12
 
5
13
  # v1.4.0
14
+
6
15
  - ESM compatibility improvements; fixes #159, #161
7
16
 
8
17
  # v1.3.1
18
+
9
19
  - allow for valid empty jsdoc; fixes #128
10
20
 
11
21
  # v1.3.0
22
+
12
23
  - add support for custom block markers
13
24
 
14
25
  # v1.2.4
26
+
15
27
  - reverting engine constraint back to ^12.0.0
16
28
 
17
29
  # v1.2.3
30
+
18
31
  - publishing missing fix: point package's main to .cjs file
19
32
 
20
33
  # v1.2.2
34
+
21
35
  - re-export ./util on the top-level for compatibility with older Node
22
36
  - point package's main to .cjs file
23
37
 
24
38
  # v1.2.1
39
+
25
40
  - bump `engines` per `exports` issues in earlier Node versions
26
41
 
27
42
  # v1.2.0
43
+
28
44
  - keep and handle appropriately CR line endings
29
45
 
30
46
  # v1.1.6-beta.3
47
+
31
48
  - process CRs as a separate .lineEnd toke
32
49
 
33
50
  # v1.1.6-beta.2
51
+
34
52
  - ESM/CJS compatibility fixes
35
53
 
36
54
  # v1.1.6-beta.1
55
+
37
56
  - support native ESM
38
57
 
39
58
  # v1.1.6-beta.0
59
+
40
60
  - keep original CR line endings
41
61
  - allow to normalize line endings with `crlf` transform
42
62
 
43
63
  # v1.1.5
64
+
44
65
  - drop unused variables
45
66
  - add .editorconfig
46
67
 
47
68
  # v1.1.4
69
+
48
70
  - `bugfix` fix unsynced lib/
49
71
 
50
72
  # v1.1.3
73
+
51
74
  - export primitive type on the top level: Markers, Block, Spec, Line, Tokens, Problem
52
75
 
53
76
  # v1.1.2
77
+
54
78
  - `bugfix` Allow to build nested tags from `name.subname` even if `name` wasn't d
55
79
  - `bugfix` Preserve indentation when extracting comments
56
80
 
57
81
  # v1.1.1
82
+
58
83
  - add helpers for rewiring Spec.source <-> Spec.tags.source
59
84
 
60
85
  # v1.1.0
86
+
61
87
  - split tokenizers into separate modules
62
88
  - allow multiline {type} definitions - issue #109
63
89
  - allow using "=>" in [name=default] defaults – issue #112
@@ -65,75 +91,97 @@
65
91
  - add tokenizers usage example - issue #111
66
92
 
67
93
  # v1.0.0
94
+
68
95
  - complete rewrite in TS with more flexible API
69
96
 
70
97
  # v0.7.6
98
+
71
99
  - distinct non-critical errors by providing `err.warning`
72
100
 
73
101
  # v0.7.5
102
+
74
103
  - name parsing fixes
75
104
 
76
105
  # v0.7.4
106
+
77
107
  - node 8 backward compatibility fixes
78
108
 
79
109
  # v0.7.3
110
+
80
111
  - make stringify result more close to the source
81
112
 
82
113
  # v0.7.2
83
- - make stringify to start each line with * in multiline comments
114
+
115
+ - make stringify to start each line with \* in multiline comments
84
116
 
85
117
  # v0.7.1
118
+
86
119
  - ensure non-space characters after asterisk are included in source
87
120
 
88
121
  # v0.7.0
122
+
89
123
  - allow fenced blocks in tag description, see opts.fence
90
124
 
91
125
  # v0.6.2
126
+
92
127
  - document TypeScript definitions
93
128
 
94
129
  # v0.6.1
130
+
95
131
  - adjust strigifier indentation
96
132
 
97
133
  # v0.6.0
134
+
98
135
  - soft-drop node@6 support
99
136
  - migrate to ES6 syntax
100
137
  - allow to generate comments out of parsed data
101
138
 
102
139
  # v0.5.5
140
+
103
141
  - allow loose tag names, e.g. @.tag, @-tag
104
142
 
105
143
  # v0.5.4
144
+
106
145
  - allow quoted literal names, e.g. `@tag "My Var" description`
107
146
 
108
147
  # v0.5.3
148
+
109
149
  - corrected TypeScript definitions
110
150
 
111
151
  # v0.5.2
152
+
112
153
  - added TypeScript definitions
113
154
  - removed `readable-stream` dependency
114
155
 
115
156
  # v0.5.1
157
+
116
158
  - Support for tab as separator between tag components.
117
159
  - Docs: Indicate when `optional` is `true`; `default` property
118
160
 
119
161
  # v0.5.0
162
+
120
163
  - line wrapping control with `opts.join`
121
164
 
122
165
  # v0.4.2
166
+
123
167
  - tolerate inconsistent lines alignment within block
124
168
 
125
169
  # v0.4.1
126
- - refactored parsing, allow to not start lines with "* " inside block
170
+
171
+ - refactored parsing, allow to not start lines with "\* " inside block
127
172
 
128
173
  # v0.3.2
174
+
129
175
  - fix RegExp for `description` extraction to allow $ char
130
176
 
131
177
  # v0.3.1
178
+
132
179
  - use `readable-stream` fro Node 0.8 comatibility
133
180
  - allow to pass optional parameters to `parse.file(path [,opts], done)`
134
181
  - allow `parse.stream` to work with Buffers in addition to strings
135
182
 
136
183
  # v0.3.0
184
+
137
185
  - `feature` allow to use custom parsers
138
186
  - `feature` always include source, no `raw_value` option needed
139
187
  - `bugfix` always provide `optional` tag property
@@ -7,9 +7,17 @@ const isQuoted = (s) => s && s.startsWith('"') && s.endsWith('"');
7
7
  export default function nameTokenizer() {
8
8
  const typeEnd = (num, { tokens }, i) => tokens.type === '' ? num : i;
9
9
  return (spec) => {
10
- // look for the name in the line where {type} ends
11
- const { tokens } = spec.source[spec.source.reduce(typeEnd, 0)];
12
- const source = tokens.description.trimLeft();
10
+ // look for the name starting in the line where {type} ends
11
+ let finalTypeLine = spec.source.reduce(typeEnd, 0);
12
+ let tokens;
13
+ do {
14
+ ({ tokens } = spec.source[finalTypeLine]);
15
+ if (tokens.description.trim()) {
16
+ break;
17
+ }
18
+ finalTypeLine++;
19
+ } while (spec.source[finalTypeLine]);
20
+ const source = tokens.description.trimStart();
13
21
  const quotedGroups = source.split('"');
14
22
  // if it starts with quoted group, assume it is a literal
15
23
  if (quotedGroups.length > 1 &&
@@ -15,6 +15,9 @@ export default function tagTokenizer() {
15
15
  });
16
16
  return spec;
17
17
  }
18
+ if (match[1].includes('/')) {
19
+ return spec;
20
+ }
18
21
  tokens.tag = match[1];
19
22
  tokens.postTag = match[3];
20
23
  tokens.description = tokens.description.slice(match[0].length);
@@ -18,7 +18,7 @@ export type Joiner = (parts: string[]) => string;
18
18
  */
19
19
  export type Spacing = 'compact' | 'preserve' | Joiner;
20
20
  /**
21
- * Sets splits remaining `Spec.lines[].tokes.description` into `type` and `description`
21
+ * Sets splits remaining `Spec.lines[].tokens.description` into `type` and `description`
22
22
  * tokens and populates Spec.type`
23
23
  *
24
24
  * @param {Spacing} spacing tells how to deal with a whitespace
@@ -1,6 +1,6 @@
1
1
  import { splitSpace } from '../../util.js';
2
2
  /**
3
- * Sets splits remaining `Spec.lines[].tokes.description` into `type` and `description`
3
+ * Sets splits remaining `Spec.lines[].tokens.description` into `type` and `description`
4
4
  * tokens and populates Spec.type`
5
5
  *
6
6
  * @param {Spacing} spacing tells how to deal with a whitespace
@@ -11,10 +11,19 @@ export default function typeTokenizer(spacing = 'compact') {
11
11
  return (spec) => {
12
12
  let curlies = 0;
13
13
  let lines = [];
14
- for (const [i, { tokens }] of spec.source.entries()) {
14
+ let descriptionBegun = false;
15
+ let firstTypeIteration = true;
16
+ for (const { tokens } of spec.source.values()) {
15
17
  let type = '';
16
- if (i === 0 && tokens.description[0] !== '{')
18
+ if (!descriptionBegun && tokens.description.trim()) {
19
+ descriptionBegun = true;
20
+ }
21
+ else if (!descriptionBegun) {
22
+ continue;
23
+ }
24
+ if (firstTypeIteration && tokens.description[0] !== '{')
17
25
  return spec;
26
+ firstTypeIteration = false;
18
27
  for (const ch of tokens.description) {
19
28
  if (ch === '{')
20
29
  curlies++;
@@ -28,6 +37,9 @@ export default function typeTokenizer(spacing = 'compact') {
28
37
  if (curlies === 0)
29
38
  break;
30
39
  }
40
+ if (!descriptionBegun) {
41
+ return spec;
42
+ }
31
43
  if (curlies !== 0) {
32
44
  spec.problems.push({
33
45
  code: 'spec:type:unpaired-curlies',
package/es6/primitives.js CHANGED
@@ -5,4 +5,4 @@ export var Markers;
5
5
  Markers["nostart"] = "/***";
6
6
  Markers["delim"] = "*";
7
7
  Markers["end"] = "*/";
8
- })(Markers = Markers || (Markers = {}));
8
+ })(Markers || (Markers = {}));
package/jest.config.cjs CHANGED
@@ -1,3 +1,5 @@
1
+ 'use strict';
2
+
1
3
  // For a detailed explanation regarding each configuration property, visit:
2
4
  // https://jestjs.io/docs/en/configuration.html
3
5
 
@@ -6,12 +8,6 @@ const { compilerOptions: tsconfig } = JSON.parse(
6
8
  );
7
9
 
8
10
  module.exports = {
9
- globals: {
10
- 'ts-jest': {
11
- tsconfig,
12
- },
13
- },
14
-
15
11
  // All imported modules in your tests should be mocked automatically
16
12
  // automock: false,
17
13
 
@@ -186,7 +182,9 @@ module.exports = {
186
182
 
187
183
  // A map from regular expressions to paths to transformers
188
184
  transform: {
189
- '^.+\\.ts$': 'ts-jest',
185
+ '^.+\\.ts$': ['ts-jest', {
186
+ tsconfig
187
+ }],
190
188
  },
191
189
 
192
190
  // An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation
package/package.json CHANGED
@@ -1,9 +1,12 @@
1
1
  {
2
2
  "name": "comment-parser",
3
- "version": "1.4.1",
3
+ "version": "1.4.2",
4
4
  "description": "Generic JSDoc-like comment parser",
5
5
  "type": "module",
6
6
  "main": "lib/index.cjs",
7
+ "publishConfig": {
8
+ "access": "public"
9
+ },
7
10
  "exports": {
8
11
  ".": {
9
12
  "import": "./es6/index.js",
@@ -35,14 +38,15 @@
35
38
  "test": "tests"
36
39
  },
37
40
  "devDependencies": {
38
- "@types/jest": "^26.0.23",
41
+ "@changesets/cli": "^2.29.8",
42
+ "@types/jest": "^30.0.0",
39
43
  "convert-extension": "^0.3.0",
40
- "jest": "^27.0.5",
41
- "prettier": "2.3.1",
42
- "rimraf": "^3.0.2",
43
- "rollup": "^2.52.2",
44
- "ts-jest": "^27.0.3",
45
- "typescript": "^4.9.5"
44
+ "jest": "^30.1.3",
45
+ "prettier": "3.6.2",
46
+ "rimraf": "^6.0.1",
47
+ "rollup": "^4.52.0",
48
+ "ts-jest": "^29.4.4",
49
+ "typescript": "^5.9.2"
46
50
  },
47
51
  "engines": {
48
52
  "node": ">= 12.0.0"
@@ -56,7 +60,7 @@
56
60
  },
57
61
  "repository": {
58
62
  "type": "git",
59
- "url": "git@github.com:yavorskiy/comment-parser.git"
63
+ "url": "git+ssh://git@github.com/yavorskiy/comment-parser.git"
60
64
  },
61
65
  "keywords": [
62
66
  "jsdoc",
@@ -81,4 +85,4 @@
81
85
  "url": "https://github.com/syavorsky/comment-parser/issues"
82
86
  },
83
87
  "homepage": "https://github.com/syavorsky/comment-parser"
84
- }
88
+ }
@@ -13,9 +13,19 @@ export default function nameTokenizer(): Tokenizer {
13
13
  tokens.type === '' ? num : i;
14
14
 
15
15
  return (spec: Spec): Spec => {
16
- // look for the name in the line where {type} ends
17
- const { tokens } = spec.source[spec.source.reduce(typeEnd, 0)];
18
- const source = tokens.description.trimLeft();
16
+ // look for the name starting in the line where {type} ends
17
+ let finalTypeLine = spec.source.reduce(typeEnd, 0);
18
+
19
+ let tokens;
20
+ do {
21
+ ({ tokens } = spec.source[finalTypeLine]);
22
+ if (tokens.description.trim()) {
23
+ break;
24
+ }
25
+ finalTypeLine++;
26
+ } while (spec.source[finalTypeLine]);
27
+
28
+ const source = tokens.description.trimStart();
19
29
 
20
30
  const quotedGroups = source.split('"');
21
31
 
@@ -20,6 +20,10 @@ export default function tagTokenizer(): Tokenizer {
20
20
  return spec;
21
21
  }
22
22
 
23
+ if (match[1].includes('/')) {
24
+ return spec;
25
+ }
26
+
23
27
  tokens.tag = match[1];
24
28
  tokens.postTag = match[3];
25
29
  tokens.description = tokens.description.slice(match[0].length);
@@ -23,7 +23,7 @@ export type Joiner = (parts: string[]) => string;
23
23
  export type Spacing = 'compact' | 'preserve' | Joiner;
24
24
 
25
25
  /**
26
- * Sets splits remaining `Spec.lines[].tokes.description` into `type` and `description`
26
+ * Sets splits remaining `Spec.lines[].tokens.description` into `type` and `description`
27
27
  * tokens and populates Spec.type`
28
28
  *
29
29
  * @param {Spacing} spacing tells how to deal with a whitespace
@@ -35,9 +35,18 @@ export default function typeTokenizer(spacing: Spacing = 'compact'): Tokenizer {
35
35
  let curlies = 0;
36
36
  let lines: [Tokens, string][] = [];
37
37
 
38
- for (const [i, { tokens }] of spec.source.entries()) {
38
+ let descriptionBegun = false;
39
+ let firstTypeIteration = true;
40
+ for (const { tokens } of spec.source.values()) {
39
41
  let type = '';
40
- if (i === 0 && tokens.description[0] !== '{') return spec;
42
+ if (!descriptionBegun && tokens.description.trim()) {
43
+ descriptionBegun = true;
44
+ } else if (!descriptionBegun) {
45
+ continue;
46
+ }
47
+
48
+ if (firstTypeIteration && tokens.description[0] !== '{') return spec;
49
+ firstTypeIteration = false;
41
50
 
42
51
  for (const ch of tokens.description) {
43
52
  if (ch === '{') curlies++;
@@ -50,6 +59,10 @@ export default function typeTokenizer(spacing: Spacing = 'compact'): Tokenizer {
50
59
  if (curlies === 0) break;
51
60
  }
52
61
 
62
+ if (!descriptionBegun) {
63
+ return spec;
64
+ }
65
+
53
66
  if (curlies !== 0) {
54
67
  spec.problems.push({
55
68
  code: 'spec:type:unpaired-curlies',
@@ -167,6 +167,240 @@ test('block with tags', () => {
167
167
  ]);
168
168
  });
169
169
 
170
+ test('tag with name and description on second line', () => {
171
+ const parsed = getParser()(`
172
+ /**
173
+ * @param {string}
174
+ * foo The foo.
175
+ */`);
176
+ expect(parsed).toEqual([
177
+ {
178
+ description: '',
179
+ tags: [
180
+ {
181
+ tag: 'param',
182
+ name: 'foo',
183
+ type: 'string',
184
+ optional: false,
185
+ description: 'The foo.',
186
+ problems: [],
187
+ source: [
188
+ {
189
+ number: 2,
190
+ source: ' * @param {string}',
191
+ tokens: seedTokens({
192
+ start: ' ',
193
+ delimiter: '*',
194
+ postDelimiter: ' ',
195
+ tag: '@param',
196
+ postTag: ' ',
197
+ type: '{string}',
198
+ }),
199
+ },
200
+ {
201
+ number: 3,
202
+ source: ' * foo The foo.',
203
+ tokens: seedTokens({
204
+ start: ' ',
205
+ delimiter: '*',
206
+ postDelimiter: ' ',
207
+ name: 'foo',
208
+ postName: ' ',
209
+ description: 'The foo.',
210
+ }),
211
+ },
212
+ {
213
+ number: 4,
214
+ source: ' */',
215
+ tokens: seedTokens({
216
+ start: ' ',
217
+ end: '*/',
218
+ }),
219
+ },
220
+ ],
221
+ },
222
+ ],
223
+ source: [
224
+ {
225
+ number: 1,
226
+ source: ' /**',
227
+ tokens: seedTokens({
228
+ delimiter: '/**',
229
+ start: ' ',
230
+ }),
231
+ },
232
+ {
233
+ number: 2,
234
+ source: ' * @param {string}',
235
+ tokens: seedTokens({
236
+ start: ' ',
237
+ delimiter: '*',
238
+ postDelimiter: ' ',
239
+ tag: '@param',
240
+ postTag: ' ',
241
+ type: '{string}',
242
+ }),
243
+ },
244
+ {
245
+ number: 3,
246
+ source: ' * foo The foo.',
247
+ tokens: seedTokens({
248
+ start: ' ',
249
+ delimiter: '*',
250
+ postDelimiter: ' ',
251
+ name: 'foo',
252
+ postName: ' ',
253
+ description: 'The foo.',
254
+ }),
255
+ },
256
+ {
257
+ number: 4,
258
+ source: ' */',
259
+ tokens: seedTokens({
260
+ start: ' ',
261
+ end: '*/',
262
+ }),
263
+ },
264
+ ],
265
+ problems: [],
266
+ },
267
+ ]);
268
+ });
269
+
270
+ test('tag with type, name and description on subsequent lines', () => {
271
+ const parsed = getParser()(`
272
+ /**
273
+ * @param
274
+ * {string}
275
+ * foo
276
+ * The foo.
277
+ */`);
278
+ expect(parsed).toEqual([
279
+ {
280
+ description: '',
281
+ tags: [
282
+ {
283
+ tag: 'param',
284
+ name: 'foo',
285
+ type: 'string',
286
+ optional: false,
287
+ description: 'The foo.',
288
+ problems: [],
289
+ source: [
290
+ {
291
+ number: 2,
292
+ source: ' * @param',
293
+ tokens: seedTokens({
294
+ start: ' ',
295
+ delimiter: '*',
296
+ postDelimiter: ' ',
297
+ tag: '@param',
298
+ }),
299
+ },
300
+ {
301
+ number: 3,
302
+ source: ' * {string}',
303
+ tokens: seedTokens({
304
+ start: ' ',
305
+ delimiter: '*',
306
+ postDelimiter: ' ',
307
+ type: '{string}',
308
+ }),
309
+ },
310
+ {
311
+ number: 4,
312
+ source: ' * foo',
313
+ tokens: seedTokens({
314
+ start: ' ',
315
+ delimiter: '*',
316
+ postDelimiter: ' ',
317
+ name: 'foo',
318
+ }),
319
+ },
320
+ {
321
+ number: 5,
322
+ source: ' * The foo.',
323
+ tokens: seedTokens({
324
+ start: ' ',
325
+ delimiter: '*',
326
+ postDelimiter: ' ',
327
+ description: 'The foo.',
328
+ }),
329
+ },
330
+ {
331
+ number: 6,
332
+ source: ' */',
333
+ tokens: seedTokens({
334
+ start: ' ',
335
+ end: '*/',
336
+ }),
337
+ },
338
+ ],
339
+ },
340
+ ],
341
+ source: [
342
+ {
343
+ number: 1,
344
+ source: ' /**',
345
+ tokens: seedTokens({
346
+ delimiter: '/**',
347
+ start: ' ',
348
+ }),
349
+ },
350
+ {
351
+ number: 2,
352
+ source: ' * @param',
353
+ tokens: seedTokens({
354
+ start: ' ',
355
+ delimiter: '*',
356
+ postDelimiter: ' ',
357
+ tag: '@param',
358
+ }),
359
+ },
360
+ {
361
+ number: 3,
362
+ source: ' * {string}',
363
+ tokens: seedTokens({
364
+ start: ' ',
365
+ delimiter: '*',
366
+ postDelimiter: ' ',
367
+ type: '{string}',
368
+ }),
369
+ },
370
+ {
371
+ number: 4,
372
+ source: ' * foo',
373
+ tokens: seedTokens({
374
+ start: ' ',
375
+ delimiter: '*',
376
+ postDelimiter: ' ',
377
+ name: 'foo',
378
+ }),
379
+ },
380
+ {
381
+ number: 5,
382
+ source: ' * The foo.',
383
+ tokens: seedTokens({
384
+ start: ' ',
385
+ delimiter: '*',
386
+ postDelimiter: ' ',
387
+ description: 'The foo.',
388
+ }),
389
+ },
390
+ {
391
+ number: 6,
392
+ source: ' */',
393
+ tokens: seedTokens({
394
+ start: ' ',
395
+ end: '*/',
396
+ }),
397
+ },
398
+ ],
399
+ problems: [],
400
+ },
401
+ ]);
402
+ });
403
+
170
404
  test('no source cloning', () => {
171
405
  const parsed = getParser()(`
172
406
  /**