@yozora/tokenizer-link 1.3.0 → 2.0.0-alpha.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -84,14 +84,14 @@ so you can use `YozoraParser` / `GfmExParser` / `GfmParser` directly.
84
84
  registered in *YastParser* as a plugin-in before it can be used.
85
85
 
86
86
  ```typescript {4,9}
87
- import { DefaultYastParser } from '@yozora/core-parser'
87
+ import { DefaultParser } from '@yozora/core-parser'
88
88
  import ParagraphTokenizer from '@yozora/tokenizer-paragraph'
89
89
  import TextTokenizer from '@yozora/tokenizer-text'
90
90
  import LinkTokenizer from '@yozora/tokenizer-link'
91
91
 
92
- const parser = new DefaultYastParser()
93
- .useBlockFallbackTokenizer(new ParagraphTokenizer())
94
- .useInlineFallbackTokenizer(new TextTokenizer())
92
+ const parser = new DefaultParser()
93
+ .useFallbackTokenizer(new ParagraphTokenizer())
94
+ .useFallbackTokenizer(new TextTokenizer())
95
95
  .useTokenizer(new LinkTokenizer())
96
96
 
97
97
  // parse source markdown content
@@ -229,7 +229,6 @@ Name | Type | Required | Default
229
229
  [@yozora/tokenizer-link]: https://github.com/yozorajs/yozora/tree/main/tokenizers/link#readme
230
230
  [@yozora/tokenizer-link-reference]: https://github.com/yozorajs/yozora/tree/main/tokenizers/link-reference#readme
231
231
  [@yozora/tokenizer-list]: https://github.com/yozorajs/yozora/tree/main/tokenizers/list#readme
232
- [@yozora/tokenizer-list-item]: https://github.com/yozorajs/yozora/tree/main/tokenizers/list-item#readme
233
232
  [@yozora/tokenizer-math]: https://github.com/yozorajs/yozora/tree/main/tokenizers/math#readme
234
233
  [@yozora/tokenizer-paragraph]: https://github.com/yozorajs/yozora/tree/main/tokenizers/paragraph#readme
235
234
  [@yozora/tokenizer-setext-heading]: https://github.com/yozorajs/yozora/tree/main/tokenizers/setext-heading#readme
@@ -289,7 +288,6 @@ Name | Type | Required | Default
289
288
  [doc-@yozora/tokenizer-definition]: https://yozora.guanghechen.com/docs/package/tokenizer-definition
290
289
  [doc-@yozora/tokenizer-link-reference]: https://yozora.guanghechen.com/docs/package/tokenizer-link-reference
291
290
  [doc-@yozora/tokenizer-list]: https://yozora.guanghechen.com/docs/package/tokenizer-list
292
- [doc-@yozora/tokenizer-list-item]: https://yozora.guanghechen.com/docs/package/tokenizer-list-item
293
291
  [doc-@yozora/tokenizer-math]: https://yozora.guanghechen.com/docs/package/tokenizer-math
294
292
  [doc-@yozora/tokenizer-paragraph]: https://yozora.guanghechen.com/docs/package/tokenizer-paragraph
295
293
  [doc-@yozora/tokenizer-setext-heading]: https://yozora.guanghechen.com/docs/package/tokenizer-setext-heading
package/lib/cjs/index.js CHANGED
@@ -2,11 +2,9 @@
2
2
 
3
3
  Object.defineProperty(exports, '__esModule', { value: true });
4
4
 
5
- var ast = require('@yozora/ast');
6
5
  var character = require('@yozora/character');
7
6
  var coreTokenizer = require('@yozora/core-tokenizer');
8
-
9
- const uniqueName = '@yozora/tokenizer-link';
7
+ var ast = require('@yozora/ast');
10
8
 
11
9
  const checkBalancedBracketsStatus = (startIndex, endIndex, internalTokens, nodePoints) => {
12
10
  let i = startIndex;
@@ -153,15 +151,13 @@ function eatLinkTitle(nodePoints, startIndex, endIndex) {
153
151
  return -1;
154
152
  }
155
153
 
156
- class LinkTokenizer extends coreTokenizer.BaseInlineTokenizer {
157
- constructor(props = {}) {
158
- var _a, _b;
159
- super({
160
- name: (_a = props.name) !== null && _a !== void 0 ? _a : uniqueName,
161
- priority: (_b = props.priority) !== null && _b !== void 0 ? _b : coreTokenizer.TokenizerPriority.LINKS,
162
- });
163
- }
164
- _findDelimiter(startIndex, endIndex, api) {
154
+ const match = function (api) {
155
+ return {
156
+ findDelimiter: () => coreTokenizer.genFindDelimiter(_findDelimiter),
157
+ isDelimiterPair,
158
+ processDelimiterPair,
159
+ };
160
+ function _findDelimiter(startIndex, endIndex) {
165
161
  const nodePoints = api.getNodePoints();
166
162
  const blockEndIndex = api.getBlockEndIndex();
167
163
  for (let i = startIndex; i < endIndex; ++i) {
@@ -179,8 +175,7 @@ class LinkTokenizer extends coreTokenizer.BaseInlineTokenizer {
179
175
  return delimiter;
180
176
  }
181
177
  case character.AsciiCodePoint.CLOSE_BRACKET: {
182
- if (i + 1 >= endIndex ||
183
- nodePoints[i + 1].codePoint !== character.AsciiCodePoint.OPEN_PARENTHESIS)
178
+ if (i + 1 >= endIndex || nodePoints[i + 1].codePoint !== character.AsciiCodePoint.OPEN_PARENTHESIS)
184
179
  break;
185
180
  const destinationStartIndex = coreTokenizer.eatOptionalWhitespaces(nodePoints, i + 2, blockEndIndex);
186
181
  const destinationEndIndex = eatLinkDestination(nodePoints, destinationStartIndex, blockEndIndex);
@@ -193,8 +188,7 @@ class LinkTokenizer extends coreTokenizer.BaseInlineTokenizer {
193
188
  const _startIndex = i;
194
189
  const _endIndex = coreTokenizer.eatOptionalWhitespaces(nodePoints, titleEndIndex, blockEndIndex) + 1;
195
190
  if (_endIndex > blockEndIndex ||
196
- nodePoints[_endIndex - 1].codePoint !==
197
- character.AsciiCodePoint.CLOSE_PARENTHESIS)
191
+ nodePoints[_endIndex - 1].codePoint !== character.AsciiCodePoint.CLOSE_PARENTHESIS)
198
192
  break;
199
193
  return {
200
194
  type: 'closer',
@@ -215,7 +209,7 @@ class LinkTokenizer extends coreTokenizer.BaseInlineTokenizer {
215
209
  }
216
210
  return null;
217
211
  }
218
- isDelimiterPair(openerDelimiter, closerDelimiter, internalTokens, api) {
212
+ function isDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
219
213
  const nodePoints = api.getNodePoints();
220
214
  const hasInternalLinkToken = internalTokens.find(coreTokenizer.isLinkToken) != null;
221
215
  if (hasInternalLinkToken) {
@@ -231,7 +225,7 @@ class LinkTokenizer extends coreTokenizer.BaseInlineTokenizer {
231
225
  return { paired: false, opener: true, closer: false };
232
226
  }
233
227
  }
234
- processDelimiterPair(openerDelimiter, closerDelimiter, internalTokens, api) {
228
+ function processDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
235
229
  const children = api.resolveInternalTokens(internalTokens, openerDelimiter.endIndex, closerDelimiter.startIndex);
236
230
  const token = {
237
231
  nodeType: ast.LinkType,
@@ -243,36 +237,53 @@ class LinkTokenizer extends coreTokenizer.BaseInlineTokenizer {
243
237
  };
244
238
  return { tokens: [token] };
245
239
  }
246
- parseInline(token, children, api) {
247
- const nodePoints = api.getNodePoints();
248
- let url = '';
249
- if (token.destinationContent != null) {
250
- let { startIndex, endIndex } = token.destinationContent;
251
- if (nodePoints[startIndex].codePoint === character.AsciiCodePoint.OPEN_ANGLE) {
252
- startIndex += 1;
253
- endIndex -= 1;
240
+ };
241
+
242
+ const parse = function (api) {
243
+ return {
244
+ parse: tokens => tokens.map(token => {
245
+ const nodePoints = api.getNodePoints();
246
+ let url = '';
247
+ if (token.destinationContent != null) {
248
+ let { startIndex, endIndex } = token.destinationContent;
249
+ if (nodePoints[startIndex].codePoint === character.AsciiCodePoint.OPEN_ANGLE) {
250
+ startIndex += 1;
251
+ endIndex -= 1;
252
+ }
253
+ const destination = character.calcEscapedStringFromNodePoints(nodePoints, startIndex, endIndex, true);
254
+ url = coreTokenizer.encodeLinkDestination(destination);
254
255
  }
255
- const destination = character.calcEscapedStringFromNodePoints(nodePoints, startIndex, endIndex, true);
256
- url = coreTokenizer.encodeLinkDestination(destination);
257
- }
258
- let title;
259
- if (token.titleContent != null) {
260
- const { startIndex, endIndex } = token.titleContent;
261
- title = character.calcEscapedStringFromNodePoints(nodePoints, startIndex + 1, endIndex - 1);
262
- }
263
- const result = {
264
- type: ast.LinkType,
265
- url,
266
- title,
267
- children,
268
- };
269
- return result;
256
+ let title;
257
+ if (token.titleContent != null) {
258
+ const { startIndex, endIndex } = token.titleContent;
259
+ title = character.calcEscapedStringFromNodePoints(nodePoints, startIndex + 1, endIndex - 1);
260
+ }
261
+ const children = api.parseInlineTokens(token.children);
262
+ const node = api.shouldReservePosition
263
+ ? { type: ast.LinkType, position: api.calcPosition(token), url, title, children }
264
+ : { type: ast.LinkType, url, title, children };
265
+ return node;
266
+ }),
267
+ };
268
+ };
269
+
270
+ const uniqueName = '@yozora/tokenizer-link';
271
+
272
+ class LinkTokenizer extends coreTokenizer.BaseInlineTokenizer {
273
+ constructor(props = {}) {
274
+ var _a, _b;
275
+ super({
276
+ name: (_a = props.name) !== null && _a !== void 0 ? _a : uniqueName,
277
+ priority: (_b = props.priority) !== null && _b !== void 0 ? _b : coreTokenizer.TokenizerPriority.LINKS,
278
+ });
279
+ this.match = match;
280
+ this.parse = parse;
270
281
  }
271
282
  }
272
283
 
273
284
  exports.LinkTokenizer = LinkTokenizer;
274
285
  exports.LinkTokenizerName = uniqueName;
275
286
  exports.checkBalancedBracketsStatus = checkBalancedBracketsStatus;
276
- exports['default'] = LinkTokenizer;
287
+ exports["default"] = LinkTokenizer;
277
288
  exports.eatLinkDestination = eatLinkDestination;
278
289
  exports.eatLinkTitle = eatLinkTitle;
package/lib/esm/index.js CHANGED
@@ -1,8 +1,6 @@
1
- import { LinkType } from '@yozora/ast';
2
1
  import { AsciiCodePoint, isWhitespaceCharacter, isAsciiControlCharacter, VirtualCodePoint, calcEscapedStringFromNodePoints } from '@yozora/character';
3
- import { eatOptionalBlankLines, BaseInlineTokenizer, TokenizerPriority, eatOptionalWhitespaces, isLinkToken, encodeLinkDestination } from '@yozora/core-tokenizer';
4
-
5
- const uniqueName = '@yozora/tokenizer-link';
2
+ import { eatOptionalBlankLines, genFindDelimiter, eatOptionalWhitespaces, isLinkToken, encodeLinkDestination, BaseInlineTokenizer, TokenizerPriority } from '@yozora/core-tokenizer';
3
+ import { LinkType } from '@yozora/ast';
6
4
 
7
5
  const checkBalancedBracketsStatus = (startIndex, endIndex, internalTokens, nodePoints) => {
8
6
  let i = startIndex;
@@ -149,15 +147,13 @@ function eatLinkTitle(nodePoints, startIndex, endIndex) {
149
147
  return -1;
150
148
  }
151
149
 
152
- class LinkTokenizer extends BaseInlineTokenizer {
153
- constructor(props = {}) {
154
- var _a, _b;
155
- super({
156
- name: (_a = props.name) !== null && _a !== void 0 ? _a : uniqueName,
157
- priority: (_b = props.priority) !== null && _b !== void 0 ? _b : TokenizerPriority.LINKS,
158
- });
159
- }
160
- _findDelimiter(startIndex, endIndex, api) {
150
+ const match = function (api) {
151
+ return {
152
+ findDelimiter: () => genFindDelimiter(_findDelimiter),
153
+ isDelimiterPair,
154
+ processDelimiterPair,
155
+ };
156
+ function _findDelimiter(startIndex, endIndex) {
161
157
  const nodePoints = api.getNodePoints();
162
158
  const blockEndIndex = api.getBlockEndIndex();
163
159
  for (let i = startIndex; i < endIndex; ++i) {
@@ -175,8 +171,7 @@ class LinkTokenizer extends BaseInlineTokenizer {
175
171
  return delimiter;
176
172
  }
177
173
  case AsciiCodePoint.CLOSE_BRACKET: {
178
- if (i + 1 >= endIndex ||
179
- nodePoints[i + 1].codePoint !== AsciiCodePoint.OPEN_PARENTHESIS)
174
+ if (i + 1 >= endIndex || nodePoints[i + 1].codePoint !== AsciiCodePoint.OPEN_PARENTHESIS)
180
175
  break;
181
176
  const destinationStartIndex = eatOptionalWhitespaces(nodePoints, i + 2, blockEndIndex);
182
177
  const destinationEndIndex = eatLinkDestination(nodePoints, destinationStartIndex, blockEndIndex);
@@ -189,8 +184,7 @@ class LinkTokenizer extends BaseInlineTokenizer {
189
184
  const _startIndex = i;
190
185
  const _endIndex = eatOptionalWhitespaces(nodePoints, titleEndIndex, blockEndIndex) + 1;
191
186
  if (_endIndex > blockEndIndex ||
192
- nodePoints[_endIndex - 1].codePoint !==
193
- AsciiCodePoint.CLOSE_PARENTHESIS)
187
+ nodePoints[_endIndex - 1].codePoint !== AsciiCodePoint.CLOSE_PARENTHESIS)
194
188
  break;
195
189
  return {
196
190
  type: 'closer',
@@ -211,7 +205,7 @@ class LinkTokenizer extends BaseInlineTokenizer {
211
205
  }
212
206
  return null;
213
207
  }
214
- isDelimiterPair(openerDelimiter, closerDelimiter, internalTokens, api) {
208
+ function isDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
215
209
  const nodePoints = api.getNodePoints();
216
210
  const hasInternalLinkToken = internalTokens.find(isLinkToken) != null;
217
211
  if (hasInternalLinkToken) {
@@ -227,7 +221,7 @@ class LinkTokenizer extends BaseInlineTokenizer {
227
221
  return { paired: false, opener: true, closer: false };
228
222
  }
229
223
  }
230
- processDelimiterPair(openerDelimiter, closerDelimiter, internalTokens, api) {
224
+ function processDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
231
225
  const children = api.resolveInternalTokens(internalTokens, openerDelimiter.endIndex, closerDelimiter.startIndex);
232
226
  const token = {
233
227
  nodeType: LinkType,
@@ -239,30 +233,47 @@ class LinkTokenizer extends BaseInlineTokenizer {
239
233
  };
240
234
  return { tokens: [token] };
241
235
  }
242
- parseInline(token, children, api) {
243
- const nodePoints = api.getNodePoints();
244
- let url = '';
245
- if (token.destinationContent != null) {
246
- let { startIndex, endIndex } = token.destinationContent;
247
- if (nodePoints[startIndex].codePoint === AsciiCodePoint.OPEN_ANGLE) {
248
- startIndex += 1;
249
- endIndex -= 1;
236
+ };
237
+
238
+ const parse = function (api) {
239
+ return {
240
+ parse: tokens => tokens.map(token => {
241
+ const nodePoints = api.getNodePoints();
242
+ let url = '';
243
+ if (token.destinationContent != null) {
244
+ let { startIndex, endIndex } = token.destinationContent;
245
+ if (nodePoints[startIndex].codePoint === AsciiCodePoint.OPEN_ANGLE) {
246
+ startIndex += 1;
247
+ endIndex -= 1;
248
+ }
249
+ const destination = calcEscapedStringFromNodePoints(nodePoints, startIndex, endIndex, true);
250
+ url = encodeLinkDestination(destination);
250
251
  }
251
- const destination = calcEscapedStringFromNodePoints(nodePoints, startIndex, endIndex, true);
252
- url = encodeLinkDestination(destination);
253
- }
254
- let title;
255
- if (token.titleContent != null) {
256
- const { startIndex, endIndex } = token.titleContent;
257
- title = calcEscapedStringFromNodePoints(nodePoints, startIndex + 1, endIndex - 1);
258
- }
259
- const result = {
260
- type: LinkType,
261
- url,
262
- title,
263
- children,
264
- };
265
- return result;
252
+ let title;
253
+ if (token.titleContent != null) {
254
+ const { startIndex, endIndex } = token.titleContent;
255
+ title = calcEscapedStringFromNodePoints(nodePoints, startIndex + 1, endIndex - 1);
256
+ }
257
+ const children = api.parseInlineTokens(token.children);
258
+ const node = api.shouldReservePosition
259
+ ? { type: LinkType, position: api.calcPosition(token), url, title, children }
260
+ : { type: LinkType, url, title, children };
261
+ return node;
262
+ }),
263
+ };
264
+ };
265
+
266
+ const uniqueName = '@yozora/tokenizer-link';
267
+
268
+ class LinkTokenizer extends BaseInlineTokenizer {
269
+ constructor(props = {}) {
270
+ var _a, _b;
271
+ super({
272
+ name: (_a = props.name) !== null && _a !== void 0 ? _a : uniqueName,
273
+ priority: (_b = props.priority) !== null && _b !== void 0 ? _b : TokenizerPriority.LINKS,
274
+ });
275
+ this.match = match;
276
+ this.parse = parse;
266
277
  }
267
278
  }
268
279
 
@@ -1,8 +1,6 @@
1
- import { LinkTokenizer } from './tokenizer';
2
1
  export * from './util/check-brackets';
3
2
  export * from './util/link-destination';
4
3
  export * from './util/link-title';
5
- export { LinkTokenizer } from './tokenizer';
4
+ export { LinkTokenizer, LinkTokenizer as default } from './tokenizer';
6
5
  export { uniqueName as LinkTokenizerName } from './types';
7
- export type { Token as LinkToken, TokenizerProps as LinkTokenizerProps, } from './types';
8
- export default LinkTokenizer;
6
+ export type { IThis as ILinkHookContext, IToken as ILinkToken, ITokenizerProps as ILinkTokenizerProps, } from './types';
@@ -0,0 +1,31 @@
1
+ import type { IMatchInlineHookCreator } from '@yozora/core-tokenizer';
2
+ import type { IDelimiter, IThis, IToken, T } from './types';
3
+ /**
4
+ * An inline link consists of a link text followed immediately by a left
5
+ * parenthesis '(', optional whitespace, an optional link destination, an
6
+ * optional link title separated from the link destination by whitespace,
7
+ * optional whitespace, and a right parenthesis ')'. The link’s text consists
8
+ * of the inlines contained in the link text (excluding the enclosing square
9
+ * brackets).
10
+ * The link’s URI consists of the link destination, excluding enclosing '<...>'
11
+ * if present, with backslash-escapes in effect as described above. The link’s
12
+ * title consists of the link title, excluding its enclosing delimiters, with
13
+ * backslash-escapes in effect as described above.
14
+ *
15
+ * ------
16
+ *
17
+ * A 'opener' type delimiter is one of the following forms:
18
+ *
19
+ * - '['
20
+ *
21
+ * A 'closer' type delimiter is one of the following forms:
22
+ *
23
+ * - '](url)'
24
+ * - '](url "title")'
25
+ * - '](<url>)'
26
+ * - '](<url> "title")'
27
+ *
28
+ * @see https://github.com/syntax-tree/mdast#link
29
+ * @see https://github.github.com/gfm/#links
30
+ */
31
+ export declare const match: IMatchInlineHookCreator<T, IDelimiter, IToken, IThis>;
@@ -0,0 +1,3 @@
1
+ import type { IParseInlineHookCreator } from '@yozora/core-tokenizer';
2
+ import type { INode, IThis, IToken, T } from './types';
3
+ export declare const parse: IParseInlineHookCreator<T, IToken, INode, IThis>;
@@ -1,63 +1,13 @@
1
- import type { YastNode } from '@yozora/ast';
2
- import type { MatchInlinePhaseApi, ParseInlinePhaseApi, ResultOfIsDelimiterPair, ResultOfProcessDelimiterPair, Tokenizer, TokenizerMatchInlineHook, TokenizerParseInlineHook, YastInlineToken } from '@yozora/core-tokenizer';
1
+ import type { IInlineTokenizer, IMatchInlineHookCreator, IParseInlineHookCreator } from '@yozora/core-tokenizer';
3
2
  import { BaseInlineTokenizer } from '@yozora/core-tokenizer';
4
- import type { Delimiter, Node, T, Token, TokenizerProps } from './types';
3
+ import type { IDelimiter, INode, IThis, IToken, ITokenizerProps, T } from './types';
5
4
  /**
6
5
  * Lexical Analyzer for InlineLink.
7
- *
8
- * An inline link consists of a link text followed immediately by a left
9
- * parenthesis '(', optional whitespace, an optional link destination, an
10
- * optional link title separated from the link destination by whitespace,
11
- * optional whitespace, and a right parenthesis ')'. The link’s text consists
12
- * of the inlines contained in the link text (excluding the enclosing square
13
- * brackets).
14
- * The link’s URI consists of the link destination, excluding enclosing '<...>'
15
- * if present, with backslash-escapes in effect as described above. The link’s
16
- * title consists of the link title, excluding its enclosing delimiters, with
17
- * backslash-escapes in effect as described above.
18
- *
19
- * ------
20
- *
21
- * A 'opener' type delimiter is one of the following forms:
22
- *
23
- * - '['
24
- *
25
- * A 'closer' type delimiter is one of the following forms:
26
- *
27
- * - '](url)'
28
- * - '](url "title")'
29
- * - '](<url>)'
30
- * - '](<url> "title")'
31
- *
32
6
  * @see https://github.com/syntax-tree/mdast#link
33
7
  * @see https://github.github.com/gfm/#links
34
8
  */
35
- export declare class LinkTokenizer extends BaseInlineTokenizer<Delimiter> implements Tokenizer, TokenizerMatchInlineHook<T, Delimiter, Token>, TokenizerParseInlineHook<T, Token, Node> {
36
- constructor(props?: TokenizerProps);
37
- /**
38
- * An inline link consists of a link text followed immediately by a left
39
- * parenthesis '(', optional whitespace, an optional link destination, an
40
- * optional link title separated from the link destination by whitespace,
41
- * optional whitespace, and a right parenthesis ')'
42
- * @see https://github.github.com/gfm/#inline-link
43
- *
44
- * @override
45
- * @see TokenizerMatchInlineHook
46
- */
47
- protected _findDelimiter(startIndex: number, endIndex: number, api: Readonly<MatchInlinePhaseApi>): Delimiter | null;
48
- /**
49
- * @override
50
- * @see TokenizerMatchInlineHook
51
- */
52
- isDelimiterPair(openerDelimiter: Delimiter, closerDelimiter: Delimiter, internalTokens: ReadonlyArray<YastInlineToken>, api: Readonly<MatchInlinePhaseApi>): ResultOfIsDelimiterPair;
53
- /**
54
- * @override
55
- * @see TokenizerMatchInlineHook
56
- */
57
- processDelimiterPair(openerDelimiter: Delimiter, closerDelimiter: Delimiter, internalTokens: ReadonlyArray<YastInlineToken>, api: Readonly<MatchInlinePhaseApi>): ResultOfProcessDelimiterPair<T, Token, Delimiter>;
58
- /**
59
- * @override
60
- * @see TokenizerParseInlineHook
61
- */
62
- parseInline(token: Token, children: YastNode[], api: Readonly<ParseInlinePhaseApi>): Node;
9
+ export declare class LinkTokenizer extends BaseInlineTokenizer<T, IDelimiter, IToken, INode, IThis> implements IInlineTokenizer<T, IDelimiter, IToken, INode, IThis> {
10
+ constructor(props?: ITokenizerProps);
11
+ readonly match: IMatchInlineHookCreator<T, IDelimiter, IToken, IThis>;
12
+ readonly parse: IParseInlineHookCreator<T, IToken, INode, IThis>;
63
13
  }
@@ -1,31 +1,32 @@
1
1
  import type { Link, LinkType } from '@yozora/ast';
2
- import type { NodeInterval } from '@yozora/character';
3
- import type { BaseInlineTokenizerProps, PartialYastInlineToken, YastTokenDelimiter } from '@yozora/core-tokenizer';
2
+ import type { INodeInterval } from '@yozora/character';
3
+ import type { IBaseInlineTokenizerProps, IPartialYastInlineToken, ITokenizer, IYastTokenDelimiter } from '@yozora/core-tokenizer';
4
4
  export declare type T = LinkType;
5
- export declare type Node = Link;
5
+ export declare type INode = Link;
6
6
  export declare const uniqueName = "@yozora/tokenizer-link";
7
- export interface Token extends PartialYastInlineToken<T> {
7
+ export interface IToken extends IPartialYastInlineToken<T> {
8
8
  /**
9
9
  * Link destination interval.
10
10
  */
11
- destinationContent?: NodeInterval;
11
+ destinationContent?: INodeInterval;
12
12
  /**
13
13
  * Link title interval.
14
14
  */
15
- titleContent?: NodeInterval;
15
+ titleContent?: INodeInterval;
16
16
  }
17
- export interface Delimiter extends YastTokenDelimiter {
17
+ export interface IDelimiter extends IYastTokenDelimiter {
18
18
  /**
19
- * Delimiter type.
19
+ * IDelimiter type.
20
20
  */
21
21
  type: 'opener' | 'closer';
22
22
  /**
23
23
  * Link destination interval.
24
24
  */
25
- destinationContent?: NodeInterval;
25
+ destinationContent?: INodeInterval;
26
26
  /**
27
27
  * Link title interval.
28
28
  */
29
- titleContent?: NodeInterval;
29
+ titleContent?: INodeInterval;
30
30
  }
31
- export declare type TokenizerProps = Partial<BaseInlineTokenizerProps>;
31
+ export declare type IThis = ITokenizer;
32
+ export declare type ITokenizerProps = Partial<IBaseInlineTokenizerProps>;
@@ -1,5 +1,5 @@
1
- import type { NodePoint } from '@yozora/character';
2
- import type { YastInlineToken } from '@yozora/core-tokenizer';
1
+ import type { INodePoint } from '@yozora/character';
2
+ import type { IYastInlineToken } from '@yozora/core-tokenizer';
3
3
  /**
4
4
  * The link text may contain balanced brackets, but not unbalanced ones,
5
5
  * unless they are escaped
@@ -9,4 +9,4 @@ import type { YastInlineToken } from '@yozora/core-tokenizer';
9
9
  * @see https://github.github.com/gfm/#example-522
10
10
  * @see https://github.github.com/gfm/#example-523
11
11
  */
12
- export declare const checkBalancedBracketsStatus: (startIndex: number, endIndex: number, internalTokens: ReadonlyArray<YastInlineToken>, nodePoints: ReadonlyArray<NodePoint>) => -1 | 0 | 1;
12
+ export declare const checkBalancedBracketsStatus: (startIndex: number, endIndex: number, internalTokens: ReadonlyArray<IYastInlineToken>, nodePoints: ReadonlyArray<INodePoint>) => -1 | 0 | 1;
@@ -1,4 +1,4 @@
1
- import type { NodePoint } from '@yozora/character';
1
+ import type { INodePoint } from '@yozora/character';
2
2
  /**
3
3
  * A link destination consists of either
4
4
  * - a sequence of zero or more characters between an opening '<' and a closing '>'
@@ -12,4 +12,4 @@ import type { NodePoint } from '@yozora/character';
12
12
  * @see https://github.github.com/gfm/#link-destination
13
13
  * @return position at next iteration
14
14
  */
15
- export declare function eatLinkDestination(nodePoints: ReadonlyArray<NodePoint>, startIndex: number, endIndex: number): number;
15
+ export declare function eatLinkDestination(nodePoints: ReadonlyArray<INodePoint>, startIndex: number, endIndex: number): number;
@@ -1,4 +1,4 @@
1
- import type { NodePoint } from '@yozora/character';
1
+ import type { INodePoint } from '@yozora/character';
2
2
  /**
3
3
  * A link title consists of either
4
4
  *
@@ -11,4 +11,4 @@ import type { NodePoint } from '@yozora/character';
11
11
  * - a sequence of zero or more characters between matching parentheses '(...)',
12
12
  * including a '(' or ')' character only if it is backslash-escaped.
13
13
  */
14
- export declare function eatLinkTitle(nodePoints: ReadonlyArray<NodePoint>, startIndex: number, endIndex: number): number;
14
+ export declare function eatLinkTitle(nodePoints: ReadonlyArray<INodePoint>, startIndex: number, endIndex: number): number;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@yozora/tokenizer-link",
3
- "version": "1.3.0",
3
+ "version": "2.0.0-alpha.3",
4
4
  "author": {
5
5
  "name": "guanghechen",
6
6
  "url": "https://github.com/guanghechen/"
@@ -35,9 +35,9 @@
35
35
  "test": "cross-env TS_NODE_FILES=true jest --config ../../jest.config.js --rootDir ."
36
36
  },
37
37
  "dependencies": {
38
- "@yozora/ast": "^1.3.0",
39
- "@yozora/character": "^1.3.0",
40
- "@yozora/core-tokenizer": "^1.3.0"
38
+ "@yozora/ast": "^2.0.0-alpha.3",
39
+ "@yozora/character": "^2.0.0-alpha.3",
40
+ "@yozora/core-tokenizer": "^2.0.0-alpha.3"
41
41
  },
42
- "gitHead": "18c9b167004ad97718b2f94f25139f80598cbf7a"
42
+ "gitHead": "9f274fc7487a8c1dd213405d92508f9a7621f730"
43
43
  }