comment-parser 1.3.0 → 1.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +3 -0
- package/browser/index.js +1 -4
- package/es6/parser/index.js +0 -3
- package/es6/parser/tokenizers/tag.d.ts +1 -1
- package/es6/parser/tokenizers/tag.js +1 -1
- package/lib/parser/index.cjs +0 -4
- package/lib/parser/index.cjs.map +1 -1
- package/lib/parser/tokenizers/tag.cjs +1 -1
- package/lib/parser/tokenizers/tag.cjs.map +1 -1
- package/lib/parser/tokenizers/tag.d.ts +1 -1
- package/package.json +2 -3
- package/src/parser/index.ts +0 -4
- package/src/parser/tokenizers/tag.ts +1 -1
- package/tests/e2e/parse.spec.js +0 -9
- package/tests/unit/parser.spec.ts +105 -5
- package/tests/unit/util.spec.ts +10 -0
package/CHANGELOG.md
CHANGED
package/browser/index.js
CHANGED
|
@@ -149,7 +149,7 @@ var CommentParser = (function (exports) {
|
|
|
149
149
|
}
|
|
150
150
|
|
|
151
151
|
/**
|
|
152
|
-
* Splits the `@prefix` from remaining `Spec.lines[].token.
|
|
152
|
+
* Splits the `@prefix` from remaining `Spec.lines[].token.description` into the `tag` token,
|
|
153
153
|
* and populates `spec.tag`
|
|
154
154
|
*/
|
|
155
155
|
function tagTokenizer() {
|
|
@@ -388,15 +388,12 @@ var CommentParser = (function (exports) {
|
|
|
388
388
|
const parseBlock = getParser$3({ fence });
|
|
389
389
|
const parseSpec = getParser$1({ tokenizers });
|
|
390
390
|
const joinDescription = getJoiner(spacing);
|
|
391
|
-
const notEmpty = (line) => line.tokens.description.trim() != '';
|
|
392
391
|
return function (source) {
|
|
393
392
|
const blocks = [];
|
|
394
393
|
for (const line of splitLines(source)) {
|
|
395
394
|
const lines = parseSource(line);
|
|
396
395
|
if (lines === null)
|
|
397
396
|
continue;
|
|
398
|
-
if (lines.find(notEmpty) === undefined)
|
|
399
|
-
continue;
|
|
400
397
|
const sections = parseBlock(lines);
|
|
401
398
|
const specs = sections.slice(1).map(parseSpec);
|
|
402
399
|
blocks.push({
|
package/es6/parser/index.js
CHANGED
|
@@ -19,15 +19,12 @@ export default function getParser({ startLine = 0, fence = '```', spacing = 'com
|
|
|
19
19
|
const parseBlock = blockParser({ fence });
|
|
20
20
|
const parseSpec = specParser({ tokenizers });
|
|
21
21
|
const joinDescription = getDescriptionJoiner(spacing);
|
|
22
|
-
const notEmpty = (line) => line.tokens.description.trim() != '';
|
|
23
22
|
return function (source) {
|
|
24
23
|
const blocks = [];
|
|
25
24
|
for (const line of splitLines(source)) {
|
|
26
25
|
const lines = parseSource(line);
|
|
27
26
|
if (lines === null)
|
|
28
27
|
continue;
|
|
29
|
-
if (lines.find(notEmpty) === undefined)
|
|
30
|
-
continue;
|
|
31
28
|
const sections = parseBlock(lines);
|
|
32
29
|
const specs = sections.slice(1).map(parseSpec);
|
|
33
30
|
blocks.push({
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { Tokenizer } from './index';
|
|
2
2
|
/**
|
|
3
|
-
* Splits the `@prefix` from remaining `Spec.lines[].token.
|
|
3
|
+
* Splits the `@prefix` from remaining `Spec.lines[].token.description` into the `tag` token,
|
|
4
4
|
* and populates `spec.tag`
|
|
5
5
|
*/
|
|
6
6
|
export default function tagTokenizer(): Tokenizer;
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Splits the `@prefix` from remaining `Spec.lines[].token.
|
|
2
|
+
* Splits the `@prefix` from remaining `Spec.lines[].token.description` into the `tag` token,
|
|
3
3
|
* and populates `spec.tag`
|
|
4
4
|
*/
|
|
5
5
|
export default function tagTokenizer() {
|
package/lib/parser/index.cjs
CHANGED
|
@@ -41,16 +41,12 @@ function getParser({
|
|
|
41
41
|
tokenizers
|
|
42
42
|
});
|
|
43
43
|
const joinDescription = description_1.getJoiner(spacing);
|
|
44
|
-
|
|
45
|
-
const notEmpty = line => line.tokens.description.trim() != '';
|
|
46
|
-
|
|
47
44
|
return function (source) {
|
|
48
45
|
const blocks = [];
|
|
49
46
|
|
|
50
47
|
for (const line of util_1.splitLines(source)) {
|
|
51
48
|
const lines = parseSource(line);
|
|
52
49
|
if (lines === null) continue;
|
|
53
|
-
if (lines.find(notEmpty) === undefined) continue;
|
|
54
50
|
const sections = parseBlock(lines);
|
|
55
51
|
const specs = sections.slice(1).map(parseSpec);
|
|
56
52
|
blocks.push({
|
package/lib/parser/index.cjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["index.js"],"names":["Object","defineProperty","exports","value","primitives_1","require","util_1","block_parser_1","source_parser_1","spec_parser_1","tag_1","type_1","name_1","description_1","getParser","startLine","fence","spacing","markers","Markers","tokenizers","default","Error","parseSource","parseBlock","parseSpec","joinDescription","getJoiner","
|
|
1
|
+
{"version":3,"sources":["index.js"],"names":["Object","defineProperty","exports","value","primitives_1","require","util_1","block_parser_1","source_parser_1","spec_parser_1","tag_1","type_1","name_1","description_1","getParser","startLine","fence","spacing","markers","Markers","tokenizers","default","Error","parseSource","parseBlock","parseSpec","joinDescription","getJoiner","source","blocks","line","splitLines","lines","sections","specs","slice","map","push","description","tags","problems","reduce","acc","spec","concat"],"mappings":"AAAA;;AACAA,MAAM,CAACC,cAAP,CAAsBC,OAAtB,EAA+B,YAA/B,EAA6C;AAAEC,EAAAA,KAAK,EAAE;AAAT,CAA7C;;AACA,MAAMC,YAAY,GAAGC,OAAH,qBAAlB;;AACA,MAAMC,MAAM,GAAGD,OAAH,eAAZ;;AACA,MAAME,cAAc,GAAGF,OAAH,sBAApB;;AACA,MAAMG,eAAe,GAAGH,OAAH,uBAArB;;AACA,MAAMI,aAAa,GAAGJ,OAAH,qBAAnB;;AACA,MAAMK,KAAK,GAAGL,OAAH,wBAAX;;AACA,MAAMM,MAAM,GAAGN,OAAH,yBAAZ;;AACA,MAAMO,MAAM,GAAGP,OAAH,yBAAZ;;AACA,MAAMQ,aAAa,GAAGR,OAAH,gCAAnB;;AACA,SAASS,SAAT,CAAmB;AAAEC,EAAAA,SAAS,GAAG,CAAd;AAAiBC,EAAAA,KAAK,GAAG,KAAzB;AAAgCC,EAAAA,OAAO,GAAG,SAA1C;AAAqDC,EAAAA,OAAO,GAAGd,YAAY,CAACe,OAA5E;AAAqFC,EAAAA,UAAU,GAAG,CACjHV,KAAK,CAACW,OAAN,EADiH,EAEjHV,MAAM,CAACU,OAAP,CAAeJ,OAAf,CAFiH,EAGjHL,MAAM,CAACS,OAAP,EAHiH,EAIjHR,aAAa,CAACQ,OAAd,CAAsBJ,OAAtB,CAJiH;AAAlG,IAKZ,EALP,EAKW;AACP,MAAIF,SAAS,GAAG,CAAZ,IAAiBA,SAAS,GAAG,CAAZ,GAAgB,CAArC,EACI,MAAM,IAAIO,KAAJ,CAAU,mBAAV,CAAN;AACJ,QAAMC,WAAW,GAAGf,eAAe,CAACa,OAAhB,CAAwB;AAAEN,IAAAA,SAAF;AAAaG,IAAAA;AAAb,GAAxB,CAApB;AACA,QAAMM,UAAU,GAAGjB,cAAc,CAACc,OAAf,CAAuB;AAAEL,IAAAA;AAAF,GAAvB,CAAnB;AACA,QAAMS,SAAS,GAAGhB,aAAa,CAACY,OAAd,CAAsB;AAAED,IAAAA;AAAF,GAAtB,CAAlB;AACA,QAAMM,eAAe,GAAGb,aAAa,CAACc,SAAd,CAAwBV,OAAxB,CAAxB;AACA,SAAO,UAAUW,MAAV,EAAkB;AACrB,UAAMC,MAAM,GAAG,EAAf;;AACA,SAAK,MAAMC,IAAX,IAAmBxB,MAAM,CAACyB,UAAP,CAAkBH,MAAlB,CAAnB,EAA8C;AAC1C,YAAMI,KAAK,GAAGT,WAAW,CAACO,IAAD,CAAzB;AACA,UAAIE,KAAK,KAAK,IAAd,EACI;AACJ,YAAMC,QAAQ,GAAGT,UAAU,CAACQ,KAAD,CAA3B;AACA,YAAME,KAAK,GAAGD,QAAQ,CAACE,KAAT,CAAe,CAAf,EAAkBC,GAAlB,CAAsBX,SAAtB,CAAd;AACAI,MAAAA,MAAM,CAACQ,IAAP,CAAY;AACRC,QAAAA,WAAW,EAAEZ,eAAe,CAACO,QAAQ,CAAC,CAAD,CAAT,EAAcf,OAAd,CADpB;AAERqB,QAAAA,IAAI,EAAEL,KAFE;AAGRN,QAAAA,MAAM,EAAEI,KAHA;AAIRQ,QAAAA,QAAQ,EAAEN,KAAK,CAACO,MAAN,CAAa,CAACC,GAAD,EAAMC,IAAN,KAAeD,GAAG,CAACE,MAAJ,CAAWD,IAAI,CAACH,QAAhB,CAA5B,EAAuD,EAAvD;AAJF,OAAZ;AAMH;;AACD,WAAOX,MAAP;AACH,GAhBD;AAiBH;;AACD3B,OAAO,CAACmB,OAAR,GAAkBP,SAAlB","sourcesContent":["\"use strict\";\nObject.defineProperty(exports, \"__esModule\", { value: true });\nconst primitives_1 = require(\"../primitives\");\nconst util_1 = require(\"../util\");\nconst block_parser_1 = require(\"./block-parser\");\nconst source_parser_1 = require(\"./source-parser\");\nconst spec_parser_1 = require(\"./spec-parser\");\nconst tag_1 = require(\"./tokenizers/tag\");\nconst type_1 = require(\"./tokenizers/type\");\nconst name_1 = require(\"./tokenizers/name\");\nconst description_1 = require(\"./tokenizers/description\");\nfunction getParser({ startLine = 0, fence = '```', spacing = 'compact', markers = primitives_1.Markers, tokenizers = [\n tag_1.default(),\n type_1.default(spacing),\n name_1.default(),\n description_1.default(spacing),\n], } = {}) {\n if (startLine < 0 || startLine % 1 > 0)\n throw new Error('Invalid startLine');\n const parseSource = source_parser_1.default({ startLine, markers });\n const parseBlock = block_parser_1.default({ fence });\n const parseSpec = spec_parser_1.default({ tokenizers });\n const joinDescription = description_1.getJoiner(spacing);\n return function (source) {\n const blocks = [];\n for (const line of util_1.splitLines(source)) {\n const lines = parseSource(line);\n if (lines === null)\n continue;\n const sections = parseBlock(lines);\n const specs = sections.slice(1).map(parseSpec);\n blocks.push({\n description: joinDescription(sections[0], markers),\n tags: specs,\n source: lines,\n problems: specs.reduce((acc, spec) => acc.concat(spec.problems), []),\n });\n }\n return blocks;\n };\n}\nexports.default = getParser;\n"],"file":"index.cjs"}
|
|
@@ -4,7 +4,7 @@ Object.defineProperty(exports, "__esModule", {
|
|
|
4
4
|
value: true
|
|
5
5
|
});
|
|
6
6
|
/**
|
|
7
|
-
* Splits the `@prefix` from remaining `Spec.lines[].token.
|
|
7
|
+
* Splits the `@prefix` from remaining `Spec.lines[].token.description` into the `tag` token,
|
|
8
8
|
* and populates `spec.tag`
|
|
9
9
|
*/
|
|
10
10
|
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["tag.js"],"names":["Object","defineProperty","exports","value","tagTokenizer","spec","tokens","source","match","description","problems","push","code","message","line","number","critical","tag","postTag","slice","length","default"],"mappings":"AAAA;;AACAA,MAAM,CAACC,cAAP,CAAsBC,OAAtB,EAA+B,YAA/B,EAA6C;AAAEC,EAAAA,KAAK,EAAE;AAAT,CAA7C;AACA;AACA;AACA;AACA;;AACA,SAASC,YAAT,GAAwB;AACpB,SAAQC,IAAD,IAAU;AACb,UAAM;AAAEC,MAAAA;AAAF,QAAaD,IAAI,CAACE,MAAL,CAAY,CAAZ,CAAnB;AACA,UAAMC,KAAK,GAAGF,MAAM,CAACG,WAAP,CAAmBD,KAAnB,CAAyB,kBAAzB,CAAd;;AACA,QAAIA,KAAK,KAAK,IAAd,EAAoB;AAChBH,MAAAA,IAAI,CAACK,QAAL,CAAcC,IAAd,CAAmB;AACfC,QAAAA,IAAI,EAAE,iBADS;AAEfC,QAAAA,OAAO,EAAE,kCAFM;AAGfC,QAAAA,IAAI,EAAET,IAAI,CAACE,MAAL,CAAY,CAAZ,EAAeQ,MAHN;AAIfC,QAAAA,QAAQ,EAAE;AAJK,OAAnB;AAMA,aAAOX,IAAP;AACH;;AACDC,IAAAA,MAAM,CAACW,GAAP,GAAaT,KAAK,CAAC,CAAD,CAAlB;AACAF,IAAAA,MAAM,CAACY,OAAP,GAAiBV,KAAK,CAAC,CAAD,CAAtB;AACAF,IAAAA,MAAM,CAACG,WAAP,GAAqBH,MAAM,CAACG,WAAP,CAAmBU,KAAnB,CAAyBX,KAAK,CAAC,CAAD,CAAL,CAASY,MAAlC,CAArB;AACAf,IAAAA,IAAI,CAACY,GAAL,GAAWT,KAAK,CAAC,CAAD,CAAhB;AACA,WAAOH,IAAP;AACH,GAjBD;AAkBH;;AACDH,OAAO,CAACmB,OAAR,GAAkBjB,YAAlB","sourcesContent":["\"use strict\";\nObject.defineProperty(exports, \"__esModule\", { value: true });\n/**\n * Splits the `@prefix` from remaining `Spec.lines[].token.
|
|
1
|
+
{"version":3,"sources":["tag.js"],"names":["Object","defineProperty","exports","value","tagTokenizer","spec","tokens","source","match","description","problems","push","code","message","line","number","critical","tag","postTag","slice","length","default"],"mappings":"AAAA;;AACAA,MAAM,CAACC,cAAP,CAAsBC,OAAtB,EAA+B,YAA/B,EAA6C;AAAEC,EAAAA,KAAK,EAAE;AAAT,CAA7C;AACA;AACA;AACA;AACA;;AACA,SAASC,YAAT,GAAwB;AACpB,SAAQC,IAAD,IAAU;AACb,UAAM;AAAEC,MAAAA;AAAF,QAAaD,IAAI,CAACE,MAAL,CAAY,CAAZ,CAAnB;AACA,UAAMC,KAAK,GAAGF,MAAM,CAACG,WAAP,CAAmBD,KAAnB,CAAyB,kBAAzB,CAAd;;AACA,QAAIA,KAAK,KAAK,IAAd,EAAoB;AAChBH,MAAAA,IAAI,CAACK,QAAL,CAAcC,IAAd,CAAmB;AACfC,QAAAA,IAAI,EAAE,iBADS;AAEfC,QAAAA,OAAO,EAAE,kCAFM;AAGfC,QAAAA,IAAI,EAAET,IAAI,CAACE,MAAL,CAAY,CAAZ,EAAeQ,MAHN;AAIfC,QAAAA,QAAQ,EAAE;AAJK,OAAnB;AAMA,aAAOX,IAAP;AACH;;AACDC,IAAAA,MAAM,CAACW,GAAP,GAAaT,KAAK,CAAC,CAAD,CAAlB;AACAF,IAAAA,MAAM,CAACY,OAAP,GAAiBV,KAAK,CAAC,CAAD,CAAtB;AACAF,IAAAA,MAAM,CAACG,WAAP,GAAqBH,MAAM,CAACG,WAAP,CAAmBU,KAAnB,CAAyBX,KAAK,CAAC,CAAD,CAAL,CAASY,MAAlC,CAArB;AACAf,IAAAA,IAAI,CAACY,GAAL,GAAWT,KAAK,CAAC,CAAD,CAAhB;AACA,WAAOH,IAAP;AACH,GAjBD;AAkBH;;AACDH,OAAO,CAACmB,OAAR,GAAkBjB,YAAlB","sourcesContent":["\"use strict\";\nObject.defineProperty(exports, \"__esModule\", { value: true });\n/**\n * Splits the `@prefix` from remaining `Spec.lines[].token.description` into the `tag` token,\n * and populates `spec.tag`\n */\nfunction tagTokenizer() {\n return (spec) => {\n const { tokens } = spec.source[0];\n const match = tokens.description.match(/\\s*(@(\\S+))(\\s*)/);\n if (match === null) {\n spec.problems.push({\n code: 'spec:tag:prefix',\n message: 'tag should start with \"@\" symbol',\n line: spec.source[0].number,\n critical: true,\n });\n return spec;\n }\n tokens.tag = match[1];\n tokens.postTag = match[3];\n tokens.description = tokens.description.slice(match[0].length);\n spec.tag = match[2];\n return spec;\n };\n}\nexports.default = tagTokenizer;\n"],"file":"tag.cjs"}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { Tokenizer } from './index';
|
|
2
2
|
/**
|
|
3
|
-
* Splits the `@prefix` from remaining `Spec.lines[].token.
|
|
3
|
+
* Splits the `@prefix` from remaining `Spec.lines[].token.description` into the `tag` token,
|
|
4
4
|
* and populates `spec.tag`
|
|
5
5
|
*/
|
|
6
6
|
export default function tagTokenizer(): Tokenizer;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "comment-parser",
|
|
3
|
-
"version": "1.3.
|
|
3
|
+
"version": "1.3.1",
|
|
4
4
|
"description": "Generic JSDoc-like comment parser",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "lib/index.cjs",
|
|
@@ -81,6 +81,5 @@
|
|
|
81
81
|
"bugs": {
|
|
82
82
|
"url": "https://github.com/syavorsky/comment-parser/issues"
|
|
83
83
|
},
|
|
84
|
-
"homepage": "https://github.com/syavorsky/comment-parser"
|
|
85
|
-
"dependencies": {}
|
|
84
|
+
"homepage": "https://github.com/syavorsky/comment-parser"
|
|
86
85
|
}
|
package/src/parser/index.ts
CHANGED
|
@@ -45,16 +45,12 @@ export default function getParser({
|
|
|
45
45
|
const parseSpec = specParser({ tokenizers });
|
|
46
46
|
const joinDescription = getDescriptionJoiner(spacing);
|
|
47
47
|
|
|
48
|
-
const notEmpty = (line: Line): boolean =>
|
|
49
|
-
line.tokens.description.trim() != '';
|
|
50
|
-
|
|
51
48
|
return function (source: string): Block[] {
|
|
52
49
|
const blocks: Block[] = [];
|
|
53
50
|
for (const line of splitLines(source)) {
|
|
54
51
|
const lines = parseSource(line);
|
|
55
52
|
|
|
56
53
|
if (lines === null) continue;
|
|
57
|
-
if (lines.find(notEmpty) === undefined) continue;
|
|
58
54
|
|
|
59
55
|
const sections = parseBlock(lines);
|
|
60
56
|
const specs = sections.slice(1).map(parseSpec);
|
|
@@ -2,7 +2,7 @@ import { Spec } from '../../primitives';
|
|
|
2
2
|
import { Tokenizer } from './index';
|
|
3
3
|
|
|
4
4
|
/**
|
|
5
|
-
* Splits the `@prefix` from remaining `Spec.lines[].token.
|
|
5
|
+
* Splits the `@prefix` from remaining `Spec.lines[].token.description` into the `tag` token,
|
|
6
6
|
* and populates `spec.tag`
|
|
7
7
|
*/
|
|
8
8
|
export default function tagTokenizer(): Tokenizer {
|
package/tests/e2e/parse.spec.js
CHANGED
|
@@ -480,15 +480,6 @@ test('description on the first line', () => {
|
|
|
480
480
|
]);
|
|
481
481
|
});
|
|
482
482
|
|
|
483
|
-
test('skip empty blocks', () => {
|
|
484
|
-
const parsed = parse(`
|
|
485
|
-
/**
|
|
486
|
-
*
|
|
487
|
-
*/
|
|
488
|
-
var a`);
|
|
489
|
-
expect(parsed).toHaveLength(0);
|
|
490
|
-
});
|
|
491
|
-
|
|
492
483
|
test('multiple blocks', () => {
|
|
493
484
|
const parsed = parse(`
|
|
494
485
|
/**
|
|
@@ -4,7 +4,7 @@ import { seedTokens } from '../../src/util';
|
|
|
4
4
|
test('block with tags', () => {
|
|
5
5
|
const parsed = getParser()(`
|
|
6
6
|
/**
|
|
7
|
-
* Description may go
|
|
7
|
+
* Description may go\x20
|
|
8
8
|
* over few lines followed by @tags
|
|
9
9
|
* @param {string} name name parameter
|
|
10
10
|
*
|
|
@@ -167,10 +167,10 @@ test('block with tags', () => {
|
|
|
167
167
|
]);
|
|
168
168
|
});
|
|
169
169
|
|
|
170
|
-
test('no source
|
|
170
|
+
test('no source cloning', () => {
|
|
171
171
|
const parsed = getParser()(`
|
|
172
172
|
/**
|
|
173
|
-
* Description may go
|
|
173
|
+
* Description may go\x20
|
|
174
174
|
* over few lines followed by @tags
|
|
175
175
|
* @param {string} name name parameter
|
|
176
176
|
*
|
|
@@ -179,11 +179,111 @@ test('no source clonning', () => {
|
|
|
179
179
|
expect(parsed[0].tags[0].source[0] === parsed[0].source[3]).toBe(true);
|
|
180
180
|
});
|
|
181
181
|
|
|
182
|
+
test('empty multi-line block', () => {
|
|
183
|
+
const parsed = getParser()(`
|
|
184
|
+
/**
|
|
185
|
+
*
|
|
186
|
+
*/`);
|
|
187
|
+
expect(parsed).toEqual([
|
|
188
|
+
{
|
|
189
|
+
description: '',
|
|
190
|
+
tags: [],
|
|
191
|
+
source: [
|
|
192
|
+
{
|
|
193
|
+
number: 1,
|
|
194
|
+
source: ' /**',
|
|
195
|
+
tokens: {
|
|
196
|
+
delimiter: '/**',
|
|
197
|
+
description: '',
|
|
198
|
+
end: '',
|
|
199
|
+
lineEnd: '',
|
|
200
|
+
name: '',
|
|
201
|
+
postDelimiter: '',
|
|
202
|
+
postName: '',
|
|
203
|
+
postTag: '',
|
|
204
|
+
postType: '',
|
|
205
|
+
start: ' ',
|
|
206
|
+
tag: '',
|
|
207
|
+
type: '',
|
|
208
|
+
},
|
|
209
|
+
},
|
|
210
|
+
{
|
|
211
|
+
number: 2,
|
|
212
|
+
source: ' *',
|
|
213
|
+
tokens: {
|
|
214
|
+
delimiter: '*',
|
|
215
|
+
description: '',
|
|
216
|
+
end: '',
|
|
217
|
+
lineEnd: '',
|
|
218
|
+
name: '',
|
|
219
|
+
postDelimiter: '',
|
|
220
|
+
postName: '',
|
|
221
|
+
postTag: '',
|
|
222
|
+
postType: '',
|
|
223
|
+
start: ' ',
|
|
224
|
+
tag: '',
|
|
225
|
+
type: '',
|
|
226
|
+
},
|
|
227
|
+
},
|
|
228
|
+
{
|
|
229
|
+
number: 3,
|
|
230
|
+
source: ' */',
|
|
231
|
+
tokens: {
|
|
232
|
+
delimiter: '',
|
|
233
|
+
description: '',
|
|
234
|
+
end: '*/',
|
|
235
|
+
lineEnd: '',
|
|
236
|
+
name: '',
|
|
237
|
+
postDelimiter: '',
|
|
238
|
+
postName: '',
|
|
239
|
+
postTag: '',
|
|
240
|
+
postType: '',
|
|
241
|
+
start: ' ',
|
|
242
|
+
tag: '',
|
|
243
|
+
type: '',
|
|
244
|
+
},
|
|
245
|
+
},
|
|
246
|
+
],
|
|
247
|
+
problems: [],
|
|
248
|
+
},
|
|
249
|
+
]);
|
|
250
|
+
});
|
|
251
|
+
|
|
252
|
+
test('empty one-line block', () => {
|
|
253
|
+
const parsed = getParser()(`
|
|
254
|
+
/** */`);
|
|
255
|
+
expect(parsed).toEqual([
|
|
256
|
+
{
|
|
257
|
+
description: '',
|
|
258
|
+
tags: [],
|
|
259
|
+
source: [
|
|
260
|
+
{
|
|
261
|
+
number: 1,
|
|
262
|
+
source: ' /** */',
|
|
263
|
+
tokens: {
|
|
264
|
+
delimiter: '/**',
|
|
265
|
+
description: '',
|
|
266
|
+
end: '*/',
|
|
267
|
+
lineEnd: '',
|
|
268
|
+
name: '',
|
|
269
|
+
postDelimiter: ' ',
|
|
270
|
+
postName: '',
|
|
271
|
+
postTag: '',
|
|
272
|
+
postType: '',
|
|
273
|
+
start: ' ',
|
|
274
|
+
tag: '',
|
|
275
|
+
type: '',
|
|
276
|
+
},
|
|
277
|
+
},
|
|
278
|
+
],
|
|
279
|
+
problems: [],
|
|
280
|
+
},
|
|
281
|
+
]);
|
|
282
|
+
});
|
|
283
|
+
|
|
182
284
|
test.each([
|
|
183
|
-
['empty', '/**\n*\n*/'],
|
|
184
285
|
['one-star', '/*\n*\n*/'],
|
|
185
286
|
['three-star', '/***\n*\n*/'],
|
|
186
|
-
['empty one-liner', '/** */'],
|
|
187
287
|
['one-star oneliner', '/* */'],
|
|
188
288
|
['three-star oneliner', '/*** */'],
|
|
189
289
|
])('skip block - %s', (name, source) => {
|
package/tests/unit/util.spec.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import {
|
|
2
|
+
hasCR,
|
|
2
3
|
isSpace,
|
|
3
4
|
seedTokens,
|
|
4
5
|
seedBlock,
|
|
@@ -7,6 +8,15 @@ import {
|
|
|
7
8
|
seedSpec,
|
|
8
9
|
} from '../../src/util';
|
|
9
10
|
|
|
11
|
+
test.each([
|
|
12
|
+
['beginning', '\r to end', false],
|
|
13
|
+
['middle', 'has \r in middle', false],
|
|
14
|
+
['ending', 'only at end \r', true],
|
|
15
|
+
['none', 'no carriage returns', false],
|
|
16
|
+
])('carriage returns - %s', (name, source, boolResult) => {
|
|
17
|
+
expect(hasCR(source)).toEqual(boolResult);
|
|
18
|
+
});
|
|
19
|
+
|
|
10
20
|
test.each([
|
|
11
21
|
['win', 'a\r\nb\r\nc', ['a\r', 'b\r', 'c']],
|
|
12
22
|
['unix', 'a\nb\nc', ['a', 'b', 'c']],
|