comment-parser 1.4.0 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/CHANGELOG.md +54 -2
  2. package/es6/parser/tokenizers/name.js +11 -3
  3. package/es6/parser/tokenizers/tag.js +3 -0
  4. package/es6/parser/tokenizers/type.d.ts +1 -1
  5. package/es6/parser/tokenizers/type.js +15 -3
  6. package/es6/primitives.js +1 -1
  7. package/jest.config.cjs +5 -7
  8. package/package.json +14 -10
  9. package/src/parser/tokenizers/name.ts +13 -3
  10. package/src/parser/tokenizers/tag.ts +4 -0
  11. package/src/parser/tokenizers/type.ts +16 -3
  12. package/tests/e2e/examples.js +41 -0
  13. package/tests/unit/parser.spec.ts +234 -0
  14. package/tests/unit/spec-tag-tokenizer.spec.ts +30 -0
  15. package/tsconfig.node.json +0 -1
  16. package/browser/index.js +0 -650
  17. package/lib/index.cjs +0 -88
  18. package/lib/index.cjs.map +0 -1
  19. package/lib/index.d.ts +0 -32
  20. package/lib/parser/block-parser.cjs +0 -44
  21. package/lib/parser/block-parser.cjs.map +0 -1
  22. package/lib/parser/block-parser.d.ts +0 -24
  23. package/lib/parser/index.cjs +0 -65
  24. package/lib/parser/index.cjs.map +0 -1
  25. package/lib/parser/index.d.ts +0 -11
  26. package/lib/parser/source-parser.cjs +0 -68
  27. package/lib/parser/source-parser.cjs.map +0 -1
  28. package/lib/parser/source-parser.d.ts +0 -7
  29. package/lib/parser/spec-parser.cjs +0 -29
  30. package/lib/parser/spec-parser.cjs.map +0 -1
  31. package/lib/parser/spec-parser.d.ts +0 -7
  32. package/lib/parser/tokenizers/description.cjs +0 -62
  33. package/lib/parser/tokenizers/description.cjs.map +0 -1
  34. package/lib/parser/tokenizers/description.d.ts +0 -20
  35. package/lib/parser/tokenizers/index.cjs +0 -6
  36. package/lib/parser/tokenizers/index.cjs.map +0 -1
  37. package/lib/parser/tokenizers/index.d.ts +0 -7
  38. package/lib/parser/tokenizers/name.cjs +0 -109
  39. package/lib/parser/tokenizers/name.cjs.map +0 -1
  40. package/lib/parser/tokenizers/name.d.ts +0 -6
  41. package/lib/parser/tokenizers/tag.cjs +0 -37
  42. package/lib/parser/tokenizers/tag.cjs.map +0 -1
  43. package/lib/parser/tokenizers/tag.d.ts +0 -6
  44. package/lib/parser/tokenizers/type.cjs +0 -79
  45. package/lib/parser/tokenizers/type.cjs.map +0 -1
  46. package/lib/parser/tokenizers/type.d.ts +0 -27
  47. package/lib/primitives.cjs +0 -17
  48. package/lib/primitives.cjs.map +0 -1
  49. package/lib/primitives.d.ts +0 -54
  50. package/lib/stringifier/index.cjs +0 -18
  51. package/lib/stringifier/index.cjs.map +0 -1
  52. package/lib/stringifier/index.d.ts +0 -3
  53. package/lib/stringifier/inspect.cjs +0 -72
  54. package/lib/stringifier/inspect.cjs.map +0 -1
  55. package/lib/stringifier/inspect.d.ts +0 -2
  56. package/lib/transforms/align.cjs +0 -127
  57. package/lib/transforms/align.cjs.map +0 -1
  58. package/lib/transforms/align.d.ts +0 -3
  59. package/lib/transforms/crlf.cjs +0 -44
  60. package/lib/transforms/crlf.cjs.map +0 -1
  61. package/lib/transforms/crlf.d.ts +0 -3
  62. package/lib/transforms/indent.cjs +0 -58
  63. package/lib/transforms/indent.cjs.map +0 -1
  64. package/lib/transforms/indent.d.ts +0 -2
  65. package/lib/transforms/index.cjs +0 -13
  66. package/lib/transforms/index.cjs.map +0 -1
  67. package/lib/transforms/index.d.ts +0 -3
  68. package/lib/util.cjs +0 -113
  69. package/lib/util.cjs.map +0 -1
  70. package/lib/util.d.ts +0 -21
package/CHANGELOG.md CHANGED
@@ -1,59 +1,89 @@
1
+ # v1.4.1
2
+
3
+ ## 1.4.2
4
+
5
+ ### Patch Changes
6
+
7
+ - npm audit
8
+ - ensure type, name and description on subsequent lines are properly parsed. @brettz9
9
+ - ensure type, name and description on subsequent lines are properly parsed. @brettz9
10
+ - fix .prettierignore
11
+ - add source trasformation example
12
+
1
13
  # v1.4.0
14
+
2
15
  - ESM compatibility improvements; fixes #159, #161
3
16
 
4
17
  # v1.3.1
18
+
5
19
  - allow for valid empty jsdoc; fixes #128
6
20
 
7
21
  # v1.3.0
22
+
8
23
  - add support for custom block markers
9
24
 
10
25
  # v1.2.4
26
+
11
27
  - reverting engine constraint back to ^12.0.0
12
28
 
13
29
  # v1.2.3
30
+
14
31
  - publishing missing fix: point package's main to .cjs file
15
32
 
16
33
  # v1.2.2
34
+
17
35
  - re-export ./util on the top-level for compatibility with older Node
18
36
  - point package's main to .cjs file
19
37
 
20
38
  # v1.2.1
39
+
21
40
  - bump `engines` per `exports` issues in earlier Node versions
22
41
 
23
42
  # v1.2.0
43
+
24
44
  - keep and handle appropriately CR line endings
25
45
 
26
46
  # v1.1.6-beta.3
47
+
27
48
  - process CRs as a separate .lineEnd toke
28
49
 
29
50
  # v1.1.6-beta.2
51
+
30
52
  - ESM/CJS compatibility fixes
31
53
 
32
54
  # v1.1.6-beta.1
55
+
33
56
  - support native ESM
34
57
 
35
58
  # v1.1.6-beta.0
59
+
36
60
  - keep original CR line endings
37
61
  - allow to normalize line endings with `crlf` transform
38
62
 
39
63
  # v1.1.5
64
+
40
65
  - drop unused variables
41
66
  - add .editorconfig
42
67
 
43
68
  # v1.1.4
69
+
44
70
  - `bugfix` fix unsynced lib/
45
71
 
46
72
  # v1.1.3
73
+
47
74
  - export primitive type on the top level: Markers, Block, Spec, Line, Tokens, Problem
48
75
 
49
76
  # v1.1.2
77
+
50
78
  - `bugfix` Allow to build nested tags from `name.subname` even if `name` wasn't d
51
79
  - `bugfix` Preserve indentation when extracting comments
52
80
 
53
81
  # v1.1.1
82
+
54
83
  - add helpers for rewiring Spec.source <-> Spec.tags.source
55
84
 
56
85
  # v1.1.0
86
+
57
87
  - split tokenizers into separate modules
58
88
  - allow multiline {type} definitions - issue #109
59
89
  - allow using "=>" in [name=default] defaults – issue #112
@@ -61,75 +91,97 @@
61
91
  - add tokenizers usage example - issue #111
62
92
 
63
93
  # v1.0.0
94
+
64
95
  - complete rewrite in TS with more flexible API
65
96
 
66
97
  # v0.7.6
98
+
67
99
  - distinct non-critical errors by providing `err.warning`
68
100
 
69
101
  # v0.7.5
102
+
70
103
  - name parsing fixes
71
104
 
72
105
  # v0.7.4
106
+
73
107
  - node 8 backward compatibility fixes
74
108
 
75
109
  # v0.7.3
110
+
76
111
  - make stringify result more close to the source
77
112
 
78
113
  # v0.7.2
79
- - make stringify to start each line with * in multiline comments
114
+
115
+ - make stringify to start each line with \* in multiline comments
80
116
 
81
117
  # v0.7.1
118
+
82
119
  - ensure non-space characters after asterisk are included in source
83
120
 
84
121
  # v0.7.0
122
+
85
123
  - allow fenced blocks in tag description, see opts.fence
86
124
 
87
125
  # v0.6.2
126
+
88
127
  - document TypeScript definitions
89
128
 
90
129
  # v0.6.1
130
+
91
131
  - adjust strigifier indentation
92
132
 
93
133
  # v0.6.0
134
+
94
135
  - soft-drop node@6 support
95
136
  - migrate to ES6 syntax
96
137
  - allow to generate comments out of parsed data
97
138
 
98
139
  # v0.5.5
140
+
99
141
  - allow loose tag names, e.g. @.tag, @-tag
100
142
 
101
143
  # v0.5.4
144
+
102
145
  - allow quoted literal names, e.g. `@tag "My Var" description`
103
146
 
104
147
  # v0.5.3
148
+
105
149
  - corrected TypeScript definitions
106
150
 
107
151
  # v0.5.2
152
+
108
153
  - added TypeScript definitions
109
154
  - removed `readable-stream` dependency
110
155
 
111
156
  # v0.5.1
157
+
112
158
  - Support for tab as separator between tag components.
113
159
  - Docs: Indicate when `optional` is `true`; `default` property
114
160
 
115
161
  # v0.5.0
162
+
116
163
  - line wrapping control with `opts.join`
117
164
 
118
165
  # v0.4.2
166
+
119
167
  - tolerate inconsistent lines alignment within block
120
168
 
121
169
  # v0.4.1
122
- - refactored parsing, allow to not start lines with "* " inside block
170
+
171
+ - refactored parsing, allow to not start lines with "\* " inside block
123
172
 
124
173
  # v0.3.2
174
+
125
175
  - fix RegExp for `description` extraction to allow $ char
126
176
 
127
177
  # v0.3.1
178
+
128
179
  - use `readable-stream` fro Node 0.8 comatibility
129
180
  - allow to pass optional parameters to `parse.file(path [,opts], done)`
130
181
  - allow `parse.stream` to work with Buffers in addition to strings
131
182
 
132
183
  # v0.3.0
184
+
133
185
  - `feature` allow to use custom parsers
134
186
  - `feature` always include source, no `raw_value` option needed
135
187
  - `bugfix` always provide `optional` tag property
@@ -7,9 +7,17 @@ const isQuoted = (s) => s && s.startsWith('"') && s.endsWith('"');
7
7
  export default function nameTokenizer() {
8
8
  const typeEnd = (num, { tokens }, i) => tokens.type === '' ? num : i;
9
9
  return (spec) => {
10
- // look for the name in the line where {type} ends
11
- const { tokens } = spec.source[spec.source.reduce(typeEnd, 0)];
12
- const source = tokens.description.trimLeft();
10
+ // look for the name starting in the line where {type} ends
11
+ let finalTypeLine = spec.source.reduce(typeEnd, 0);
12
+ let tokens;
13
+ do {
14
+ ({ tokens } = spec.source[finalTypeLine]);
15
+ if (tokens.description.trim()) {
16
+ break;
17
+ }
18
+ finalTypeLine++;
19
+ } while (spec.source[finalTypeLine]);
20
+ const source = tokens.description.trimStart();
13
21
  const quotedGroups = source.split('"');
14
22
  // if it starts with quoted group, assume it is a literal
15
23
  if (quotedGroups.length > 1 &&
@@ -15,6 +15,9 @@ export default function tagTokenizer() {
15
15
  });
16
16
  return spec;
17
17
  }
18
+ if (match[1].includes('/')) {
19
+ return spec;
20
+ }
18
21
  tokens.tag = match[1];
19
22
  tokens.postTag = match[3];
20
23
  tokens.description = tokens.description.slice(match[0].length);
@@ -18,7 +18,7 @@ export type Joiner = (parts: string[]) => string;
18
18
  */
19
19
  export type Spacing = 'compact' | 'preserve' | Joiner;
20
20
  /**
21
- * Sets splits remaining `Spec.lines[].tokes.description` into `type` and `description`
21
+ * Sets splits remaining `Spec.lines[].tokens.description` into `type` and `description`
22
22
  * tokens and populates Spec.type`
23
23
  *
24
24
  * @param {Spacing} spacing tells how to deal with a whitespace
@@ -1,6 +1,6 @@
1
1
  import { splitSpace } from '../../util.js';
2
2
  /**
3
- * Sets splits remaining `Spec.lines[].tokes.description` into `type` and `description`
3
+ * Sets splits remaining `Spec.lines[].tokens.description` into `type` and `description`
4
4
  * tokens and populates Spec.type`
5
5
  *
6
6
  * @param {Spacing} spacing tells how to deal with a whitespace
@@ -11,10 +11,19 @@ export default function typeTokenizer(spacing = 'compact') {
11
11
  return (spec) => {
12
12
  let curlies = 0;
13
13
  let lines = [];
14
- for (const [i, { tokens }] of spec.source.entries()) {
14
+ let descriptionBegun = false;
15
+ let firstTypeIteration = true;
16
+ for (const { tokens } of spec.source.values()) {
15
17
  let type = '';
16
- if (i === 0 && tokens.description[0] !== '{')
18
+ if (!descriptionBegun && tokens.description.trim()) {
19
+ descriptionBegun = true;
20
+ }
21
+ else if (!descriptionBegun) {
22
+ continue;
23
+ }
24
+ if (firstTypeIteration && tokens.description[0] !== '{')
17
25
  return spec;
26
+ firstTypeIteration = false;
18
27
  for (const ch of tokens.description) {
19
28
  if (ch === '{')
20
29
  curlies++;
@@ -28,6 +37,9 @@ export default function typeTokenizer(spacing = 'compact') {
28
37
  if (curlies === 0)
29
38
  break;
30
39
  }
40
+ if (!descriptionBegun) {
41
+ return spec;
42
+ }
31
43
  if (curlies !== 0) {
32
44
  spec.problems.push({
33
45
  code: 'spec:type:unpaired-curlies',
package/es6/primitives.js CHANGED
@@ -5,4 +5,4 @@ export var Markers;
5
5
  Markers["nostart"] = "/***";
6
6
  Markers["delim"] = "*";
7
7
  Markers["end"] = "*/";
8
- })(Markers = Markers || (Markers = {}));
8
+ })(Markers || (Markers = {}));
package/jest.config.cjs CHANGED
@@ -1,3 +1,5 @@
1
+ 'use strict';
2
+
1
3
  // For a detailed explanation regarding each configuration property, visit:
2
4
  // https://jestjs.io/docs/en/configuration.html
3
5
 
@@ -6,12 +8,6 @@ const { compilerOptions: tsconfig } = JSON.parse(
6
8
  );
7
9
 
8
10
  module.exports = {
9
- globals: {
10
- 'ts-jest': {
11
- tsconfig,
12
- },
13
- },
14
-
15
11
  // All imported modules in your tests should be mocked automatically
16
12
  // automock: false,
17
13
 
@@ -186,7 +182,9 @@ module.exports = {
186
182
 
187
183
  // A map from regular expressions to paths to transformers
188
184
  transform: {
189
- '^.+\\.ts$': 'ts-jest',
185
+ '^.+\\.ts$': ['ts-jest', {
186
+ tsconfig
187
+ }],
190
188
  },
191
189
 
192
190
  // An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation
package/package.json CHANGED
@@ -1,9 +1,12 @@
1
1
  {
2
2
  "name": "comment-parser",
3
- "version": "1.4.0",
3
+ "version": "1.4.2",
4
4
  "description": "Generic JSDoc-like comment parser",
5
5
  "type": "module",
6
6
  "main": "lib/index.cjs",
7
+ "publishConfig": {
8
+ "access": "public"
9
+ },
7
10
  "exports": {
8
11
  ".": {
9
12
  "import": "./es6/index.js",
@@ -35,14 +38,15 @@
35
38
  "test": "tests"
36
39
  },
37
40
  "devDependencies": {
38
- "@types/jest": "^26.0.23",
41
+ "@changesets/cli": "^2.29.8",
42
+ "@types/jest": "^30.0.0",
39
43
  "convert-extension": "^0.3.0",
40
- "jest": "^27.0.5",
41
- "prettier": "2.3.1",
42
- "rimraf": "^3.0.2",
43
- "rollup": "^2.52.2",
44
- "ts-jest": "^27.0.3",
45
- "typescript": "^4.9.5"
44
+ "jest": "^30.1.3",
45
+ "prettier": "3.6.2",
46
+ "rimraf": "^6.0.1",
47
+ "rollup": "^4.52.0",
48
+ "ts-jest": "^29.4.4",
49
+ "typescript": "^5.9.2"
46
50
  },
47
51
  "engines": {
48
52
  "node": ">= 12.0.0"
@@ -56,7 +60,7 @@
56
60
  },
57
61
  "repository": {
58
62
  "type": "git",
59
- "url": "git@github.com:yavorskiy/comment-parser.git"
63
+ "url": "git+ssh://git@github.com/yavorskiy/comment-parser.git"
60
64
  },
61
65
  "keywords": [
62
66
  "jsdoc",
@@ -81,4 +85,4 @@
81
85
  "url": "https://github.com/syavorsky/comment-parser/issues"
82
86
  },
83
87
  "homepage": "https://github.com/syavorsky/comment-parser"
84
- }
88
+ }
@@ -13,9 +13,19 @@ export default function nameTokenizer(): Tokenizer {
13
13
  tokens.type === '' ? num : i;
14
14
 
15
15
  return (spec: Spec): Spec => {
16
- // look for the name in the line where {type} ends
17
- const { tokens } = spec.source[spec.source.reduce(typeEnd, 0)];
18
- const source = tokens.description.trimLeft();
16
+ // look for the name starting in the line where {type} ends
17
+ let finalTypeLine = spec.source.reduce(typeEnd, 0);
18
+
19
+ let tokens;
20
+ do {
21
+ ({ tokens } = spec.source[finalTypeLine]);
22
+ if (tokens.description.trim()) {
23
+ break;
24
+ }
25
+ finalTypeLine++;
26
+ } while (spec.source[finalTypeLine]);
27
+
28
+ const source = tokens.description.trimStart();
19
29
 
20
30
  const quotedGroups = source.split('"');
21
31
 
@@ -20,6 +20,10 @@ export default function tagTokenizer(): Tokenizer {
20
20
  return spec;
21
21
  }
22
22
 
23
+ if (match[1].includes('/')) {
24
+ return spec;
25
+ }
26
+
23
27
  tokens.tag = match[1];
24
28
  tokens.postTag = match[3];
25
29
  tokens.description = tokens.description.slice(match[0].length);
@@ -23,7 +23,7 @@ export type Joiner = (parts: string[]) => string;
23
23
  export type Spacing = 'compact' | 'preserve' | Joiner;
24
24
 
25
25
  /**
26
- * Sets splits remaining `Spec.lines[].tokes.description` into `type` and `description`
26
+ * Sets splits remaining `Spec.lines[].tokens.description` into `type` and `description`
27
27
  * tokens and populates Spec.type`
28
28
  *
29
29
  * @param {Spacing} spacing tells how to deal with a whitespace
@@ -35,9 +35,18 @@ export default function typeTokenizer(spacing: Spacing = 'compact'): Tokenizer {
35
35
  let curlies = 0;
36
36
  let lines: [Tokens, string][] = [];
37
37
 
38
- for (const [i, { tokens }] of spec.source.entries()) {
38
+ let descriptionBegun = false;
39
+ let firstTypeIteration = true;
40
+ for (const { tokens } of spec.source.values()) {
39
41
  let type = '';
40
- if (i === 0 && tokens.description[0] !== '{') return spec;
42
+ if (!descriptionBegun && tokens.description.trim()) {
43
+ descriptionBegun = true;
44
+ } else if (!descriptionBegun) {
45
+ continue;
46
+ }
47
+
48
+ if (firstTypeIteration && tokens.description[0] !== '{') return spec;
49
+ firstTypeIteration = false;
41
50
 
42
51
  for (const ch of tokens.description) {
43
52
  if (ch === '{') curlies++;
@@ -50,6 +59,10 @@ export default function typeTokenizer(spacing: Spacing = 'compact'): Tokenizer {
50
59
  if (curlies === 0) break;
51
60
  }
52
61
 
62
+ if (!descriptionBegun) {
63
+ return spec;
64
+ }
65
+
53
66
  if (curlies !== 0) {
54
67
  spec.problems.push({
55
68
  code: 'spec:type:unpaired-curlies',
@@ -192,6 +192,46 @@ function parse_advanced_parsing(source, parse, _, _, tokenizers) {
192
192
  .join('\n');
193
193
  }
194
194
 
195
+ function stringify_rename(source, parse, stringify, transforms) {
196
+ // You can do any manipulations with the parsed result
197
+ // See how each block is being mapped. If you are updating a Block.source
198
+ // then rewireSource(block) should be called on each changed block.
199
+ // If changes were made to Block.tags[].source then call rewireSpecs(block).
200
+ // This example shows how you can "rename" @param tags: value1 -> value11, value2 -> value22
201
+
202
+ /**
203
+ * Description may go
204
+ * over multiple lines followed by @tags
205
+ * @param {string} name the name parameter
206
+ * @param {any} value1 first value parameter
207
+ * with a multipline description
208
+ * @param {any} value2 second value parameter
209
+ */
210
+
211
+ function renameParam(from, to) {
212
+ return (block) => {
213
+ for (const tag of block.tags) {
214
+ if (tag.tag === 'param' && tag.name === from) {
215
+ tag.name = to;
216
+ for (const line of tag.source) {
217
+ if (line.tokens.name === from) line.tokens.name = to;
218
+ }
219
+ }
220
+ }
221
+ return block;
222
+ };
223
+ }
224
+
225
+ const transform = transforms.flow(
226
+ renameParam('value1', 'value11'),
227
+ renameParam('value2', 'value22'),
228
+ stringify
229
+ );
230
+
231
+ const parsed = parse(source);
232
+ const stringified = parsed.map(transform);
233
+ }
234
+
195
235
  (typeof window === 'undefined' ? module.exports : window).examples = [
196
236
  parse_defaults,
197
237
  parse_line_numbering,
@@ -200,4 +240,5 @@ function parse_advanced_parsing(source, parse, _, _, tokenizers) {
200
240
  parse_source_exploration,
201
241
  parse_advanced_parsing,
202
242
  stringify_formatting,
243
+ stringify_rename,
203
244
  ];