@yozora/tokenizer-html-inline 2.0.0-alpha.0 → 2.0.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -6
- package/lib/cjs/index.js +56 -48
- package/lib/esm/index.js +56 -50
- package/lib/types/index.d.ts +3 -1
- package/lib/types/match.d.ts +11 -0
- package/lib/types/parse.d.ts +3 -0
- package/lib/types/tokenizer.d.ts +4 -4
- package/lib/types/types.d.ts +2 -1
- package/package.json +6 -6
package/README.md
CHANGED
|
@@ -84,14 +84,14 @@ so you can use `YozoraParser` / `GfmExParser` / `GfmParser` directly.
|
|
|
84
84
|
registered in *YastParser* as a plugin-in before it can be used.
|
|
85
85
|
|
|
86
86
|
```typescript {4,9}
|
|
87
|
-
import {
|
|
87
|
+
import { DefaultParser } from '@yozora/core-parser'
|
|
88
88
|
import ParagraphTokenizer from '@yozora/tokenizer-paragraph'
|
|
89
89
|
import TextTokenizer from '@yozora/tokenizer-text'
|
|
90
90
|
import HtmlInlineTokenizer from '@yozora/tokenizer-html-inline'
|
|
91
91
|
|
|
92
|
-
const parser = new
|
|
93
|
-
.
|
|
94
|
-
.
|
|
92
|
+
const parser = new DefaultParser()
|
|
93
|
+
.useFallbackTokenizer(new ParagraphTokenizer())
|
|
94
|
+
.useFallbackTokenizer(new TextTokenizer())
|
|
95
95
|
.useTokenizer(new HtmlInlineTokenizer())
|
|
96
96
|
|
|
97
97
|
// parse source markdown content
|
|
@@ -229,7 +229,6 @@ Name | Type | Required | Default
|
|
|
229
229
|
[@yozora/tokenizer-link]: https://github.com/yozorajs/yozora/tree/main/tokenizers/link#readme
|
|
230
230
|
[@yozora/tokenizer-link-reference]: https://github.com/yozorajs/yozora/tree/main/tokenizers/link-reference#readme
|
|
231
231
|
[@yozora/tokenizer-list]: https://github.com/yozorajs/yozora/tree/main/tokenizers/list#readme
|
|
232
|
-
[@yozora/tokenizer-list-item]: https://github.com/yozorajs/yozora/tree/main/tokenizers/list-item#readme
|
|
233
232
|
[@yozora/tokenizer-math]: https://github.com/yozorajs/yozora/tree/main/tokenizers/math#readme
|
|
234
233
|
[@yozora/tokenizer-paragraph]: https://github.com/yozorajs/yozora/tree/main/tokenizers/paragraph#readme
|
|
235
234
|
[@yozora/tokenizer-setext-heading]: https://github.com/yozorajs/yozora/tree/main/tokenizers/setext-heading#readme
|
|
@@ -289,7 +288,6 @@ Name | Type | Required | Default
|
|
|
289
288
|
[doc-@yozora/tokenizer-definition]: https://yozora.guanghechen.com/docs/package/tokenizer-definition
|
|
290
289
|
[doc-@yozora/tokenizer-link-reference]: https://yozora.guanghechen.com/docs/package/tokenizer-link-reference
|
|
291
290
|
[doc-@yozora/tokenizer-list]: https://yozora.guanghechen.com/docs/package/tokenizer-list
|
|
292
|
-
[doc-@yozora/tokenizer-list-item]: https://yozora.guanghechen.com/docs/package/tokenizer-list-item
|
|
293
291
|
[doc-@yozora/tokenizer-math]: https://yozora.guanghechen.com/docs/package/tokenizer-math
|
|
294
292
|
[doc-@yozora/tokenizer-paragraph]: https://yozora.guanghechen.com/docs/package/tokenizer-paragraph
|
|
295
293
|
[doc-@yozora/tokenizer-setext-heading]: https://yozora.guanghechen.com/docs/package/tokenizer-setext-heading
|
package/lib/cjs/index.js
CHANGED
|
@@ -203,57 +203,37 @@ function eatHtmlInlineTokenOpenDelimiter(nodePoints, startIndex, endIndex) {
|
|
|
203
203
|
return delimiter;
|
|
204
204
|
}
|
|
205
205
|
|
|
206
|
-
const
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
if (
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
switch (c) {
|
|
228
|
-
case character.AsciiCodePoint.BACKSLASH:
|
|
229
|
-
i += 1;
|
|
230
|
-
break;
|
|
231
|
-
case character.AsciiCodePoint.OPEN_ANGLE: {
|
|
232
|
-
const delimiter = tryToEatDelimiter(nodePoints, i, endIndex);
|
|
233
|
-
if (delimiter != null)
|
|
234
|
-
return delimiter;
|
|
235
|
-
break;
|
|
236
|
-
}
|
|
237
|
-
}
|
|
206
|
+
const match = function (api) {
|
|
207
|
+
return {
|
|
208
|
+
findDelimiter: () => coreTokenizer.genFindDelimiter(_findDelimiter),
|
|
209
|
+
processSingleDelimiter,
|
|
210
|
+
};
|
|
211
|
+
function _findDelimiter(startIndex, endIndex) {
|
|
212
|
+
const nodePoints = api.getNodePoints();
|
|
213
|
+
for (let i = startIndex; i < endIndex; ++i) {
|
|
214
|
+
i = coreTokenizer.eatOptionalWhitespaces(nodePoints, i, endIndex);
|
|
215
|
+
if (i >= endIndex)
|
|
216
|
+
break;
|
|
217
|
+
const c = nodePoints[i].codePoint;
|
|
218
|
+
switch (c) {
|
|
219
|
+
case character.AsciiCodePoint.BACKSLASH:
|
|
220
|
+
i += 1;
|
|
221
|
+
break;
|
|
222
|
+
case character.AsciiCodePoint.OPEN_ANGLE: {
|
|
223
|
+
const delimiter = tryToEatDelimiter(nodePoints, i, endIndex);
|
|
224
|
+
if (delimiter != null)
|
|
225
|
+
return delimiter;
|
|
226
|
+
break;
|
|
238
227
|
}
|
|
239
|
-
return null;
|
|
240
|
-
}
|
|
241
|
-
function processSingleDelimiter(delimiter) {
|
|
242
|
-
const token = Object.assign(Object.assign({}, delimiter), { nodeType: ast.HtmlType });
|
|
243
|
-
return [token];
|
|
244
228
|
}
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
parse: token => {
|
|
248
|
-
const { startIndex, endIndex } = token;
|
|
249
|
-
const nodePoints = api.getNodePoints();
|
|
250
|
-
const value = character.calcStringFromNodePoints(nodePoints, startIndex, endIndex);
|
|
251
|
-
const result = { type: ast.HtmlType, value };
|
|
252
|
-
return result;
|
|
253
|
-
},
|
|
254
|
-
});
|
|
229
|
+
}
|
|
230
|
+
return null;
|
|
255
231
|
}
|
|
256
|
-
|
|
232
|
+
function processSingleDelimiter(delimiter) {
|
|
233
|
+
const token = Object.assign(Object.assign({}, delimiter), { nodeType: ast.HtmlType });
|
|
234
|
+
return [token];
|
|
235
|
+
}
|
|
236
|
+
};
|
|
257
237
|
function tryToEatDelimiter(nodePoints, startIndex, endIndex) {
|
|
258
238
|
let delimiter = null;
|
|
259
239
|
delimiter = eatHtmlInlineTokenOpenDelimiter(nodePoints, startIndex, endIndex);
|
|
@@ -275,6 +255,32 @@ function tryToEatDelimiter(nodePoints, startIndex, endIndex) {
|
|
|
275
255
|
return delimiter;
|
|
276
256
|
}
|
|
277
257
|
|
|
258
|
+
const parse = function (api) {
|
|
259
|
+
return {
|
|
260
|
+
parse: token => {
|
|
261
|
+
const { startIndex, endIndex } = token;
|
|
262
|
+
const nodePoints = api.getNodePoints();
|
|
263
|
+
const value = character.calcStringFromNodePoints(nodePoints, startIndex, endIndex);
|
|
264
|
+
const result = { type: ast.HtmlType, value };
|
|
265
|
+
return result;
|
|
266
|
+
},
|
|
267
|
+
};
|
|
268
|
+
};
|
|
269
|
+
|
|
270
|
+
const uniqueName = '@yozora/tokenizer-html-inline';
|
|
271
|
+
|
|
272
|
+
class HtmlInlineTokenizer extends coreTokenizer.BaseInlineTokenizer {
|
|
273
|
+
constructor(props = {}) {
|
|
274
|
+
var _a, _b;
|
|
275
|
+
super({
|
|
276
|
+
name: (_a = props.name) !== null && _a !== void 0 ? _a : uniqueName,
|
|
277
|
+
priority: (_b = props.priority) !== null && _b !== void 0 ? _b : coreTokenizer.TokenizerPriority.ATOMIC,
|
|
278
|
+
});
|
|
279
|
+
this.match = match;
|
|
280
|
+
this.parse = parse;
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
|
|
278
284
|
exports.HtmlInlineTokenizer = HtmlInlineTokenizer;
|
|
279
285
|
exports.HtmlInlineTokenizerName = uniqueName;
|
|
280
286
|
exports["default"] = HtmlInlineTokenizer;
|
|
@@ -284,3 +290,5 @@ exports.eatHtmlInlineCommentDelimiter = eatHtmlInlineCommentDelimiter;
|
|
|
284
290
|
exports.eatHtmlInlineDeclarationDelimiter = eatHtmlInlineDeclarationDelimiter;
|
|
285
291
|
exports.eatHtmlInlineInstructionDelimiter = eatHtmlInlineInstructionDelimiter;
|
|
286
292
|
exports.eatHtmlInlineTokenOpenDelimiter = eatHtmlInlineTokenOpenDelimiter;
|
|
293
|
+
exports.htmlInlineMatch = match;
|
|
294
|
+
exports.htmlInlineParse = parse;
|
package/lib/esm/index.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { AsciiCodePoint, isAsciiUpperLetter, isWhitespaceCharacter, calcStringFromNodePoints } from '@yozora/character';
|
|
2
|
-
import { eatOptionalWhitespaces, BaseInlineTokenizer, TokenizerPriority
|
|
2
|
+
import { eatOptionalWhitespaces, genFindDelimiter, BaseInlineTokenizer, TokenizerPriority } from '@yozora/core-tokenizer';
|
|
3
3
|
import { eatHTMLTagName, eatHTMLAttribute } from '@yozora/tokenizer-html-block';
|
|
4
4
|
import { HtmlType } from '@yozora/ast';
|
|
5
5
|
|
|
@@ -199,57 +199,37 @@ function eatHtmlInlineTokenOpenDelimiter(nodePoints, startIndex, endIndex) {
|
|
|
199
199
|
return delimiter;
|
|
200
200
|
}
|
|
201
201
|
|
|
202
|
-
const
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
if (
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
switch (c) {
|
|
224
|
-
case AsciiCodePoint.BACKSLASH:
|
|
225
|
-
i += 1;
|
|
226
|
-
break;
|
|
227
|
-
case AsciiCodePoint.OPEN_ANGLE: {
|
|
228
|
-
const delimiter = tryToEatDelimiter(nodePoints, i, endIndex);
|
|
229
|
-
if (delimiter != null)
|
|
230
|
-
return delimiter;
|
|
231
|
-
break;
|
|
232
|
-
}
|
|
233
|
-
}
|
|
202
|
+
const match = function (api) {
|
|
203
|
+
return {
|
|
204
|
+
findDelimiter: () => genFindDelimiter(_findDelimiter),
|
|
205
|
+
processSingleDelimiter,
|
|
206
|
+
};
|
|
207
|
+
function _findDelimiter(startIndex, endIndex) {
|
|
208
|
+
const nodePoints = api.getNodePoints();
|
|
209
|
+
for (let i = startIndex; i < endIndex; ++i) {
|
|
210
|
+
i = eatOptionalWhitespaces(nodePoints, i, endIndex);
|
|
211
|
+
if (i >= endIndex)
|
|
212
|
+
break;
|
|
213
|
+
const c = nodePoints[i].codePoint;
|
|
214
|
+
switch (c) {
|
|
215
|
+
case AsciiCodePoint.BACKSLASH:
|
|
216
|
+
i += 1;
|
|
217
|
+
break;
|
|
218
|
+
case AsciiCodePoint.OPEN_ANGLE: {
|
|
219
|
+
const delimiter = tryToEatDelimiter(nodePoints, i, endIndex);
|
|
220
|
+
if (delimiter != null)
|
|
221
|
+
return delimiter;
|
|
222
|
+
break;
|
|
234
223
|
}
|
|
235
|
-
return null;
|
|
236
|
-
}
|
|
237
|
-
function processSingleDelimiter(delimiter) {
|
|
238
|
-
const token = Object.assign(Object.assign({}, delimiter), { nodeType: HtmlType });
|
|
239
|
-
return [token];
|
|
240
224
|
}
|
|
241
|
-
}
|
|
242
|
-
|
|
243
|
-
parse: token => {
|
|
244
|
-
const { startIndex, endIndex } = token;
|
|
245
|
-
const nodePoints = api.getNodePoints();
|
|
246
|
-
const value = calcStringFromNodePoints(nodePoints, startIndex, endIndex);
|
|
247
|
-
const result = { type: HtmlType, value };
|
|
248
|
-
return result;
|
|
249
|
-
},
|
|
250
|
-
});
|
|
225
|
+
}
|
|
226
|
+
return null;
|
|
251
227
|
}
|
|
252
|
-
|
|
228
|
+
function processSingleDelimiter(delimiter) {
|
|
229
|
+
const token = Object.assign(Object.assign({}, delimiter), { nodeType: HtmlType });
|
|
230
|
+
return [token];
|
|
231
|
+
}
|
|
232
|
+
};
|
|
253
233
|
function tryToEatDelimiter(nodePoints, startIndex, endIndex) {
|
|
254
234
|
let delimiter = null;
|
|
255
235
|
delimiter = eatHtmlInlineTokenOpenDelimiter(nodePoints, startIndex, endIndex);
|
|
@@ -271,4 +251,30 @@ function tryToEatDelimiter(nodePoints, startIndex, endIndex) {
|
|
|
271
251
|
return delimiter;
|
|
272
252
|
}
|
|
273
253
|
|
|
274
|
-
|
|
254
|
+
const parse = function (api) {
|
|
255
|
+
return {
|
|
256
|
+
parse: token => {
|
|
257
|
+
const { startIndex, endIndex } = token;
|
|
258
|
+
const nodePoints = api.getNodePoints();
|
|
259
|
+
const value = calcStringFromNodePoints(nodePoints, startIndex, endIndex);
|
|
260
|
+
const result = { type: HtmlType, value };
|
|
261
|
+
return result;
|
|
262
|
+
},
|
|
263
|
+
};
|
|
264
|
+
};
|
|
265
|
+
|
|
266
|
+
const uniqueName = '@yozora/tokenizer-html-inline';
|
|
267
|
+
|
|
268
|
+
class HtmlInlineTokenizer extends BaseInlineTokenizer {
|
|
269
|
+
constructor(props = {}) {
|
|
270
|
+
var _a, _b;
|
|
271
|
+
super({
|
|
272
|
+
name: (_a = props.name) !== null && _a !== void 0 ? _a : uniqueName,
|
|
273
|
+
priority: (_b = props.priority) !== null && _b !== void 0 ? _b : TokenizerPriority.ATOMIC,
|
|
274
|
+
});
|
|
275
|
+
this.match = match;
|
|
276
|
+
this.parse = parse;
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
export { HtmlInlineTokenizer, uniqueName as HtmlInlineTokenizerName, HtmlInlineTokenizer as default, eatHtmlInlineCDataDelimiter, eatHtmlInlineClosingDelimiter, eatHtmlInlineCommentDelimiter, eatHtmlInlineDeclarationDelimiter, eatHtmlInlineInstructionDelimiter, eatHtmlInlineTokenOpenDelimiter, match as htmlInlineMatch, parse as htmlInlineParse };
|
package/lib/types/index.d.ts
CHANGED
|
@@ -4,6 +4,8 @@ export * from './util/comment';
|
|
|
4
4
|
export * from './util/declaration';
|
|
5
5
|
export * from './util/instruction';
|
|
6
6
|
export * from './util/open';
|
|
7
|
+
export { match as htmlInlineMatch } from './match';
|
|
8
|
+
export { parse as htmlInlineParse } from './parse';
|
|
7
9
|
export { HtmlInlineTokenizer, HtmlInlineTokenizer as default } from './tokenizer';
|
|
8
10
|
export { uniqueName as HtmlInlineTokenizerName } from './types';
|
|
9
|
-
export type { IToken as IHtmlInlineToken, ITokenizerProps as IHtmlInlineTokenizerProps, } from './types';
|
|
11
|
+
export type { IThis as IHtmlInlineHookContext, IToken as IHtmlInlineToken, ITokenizerProps as IHtmlInlineTokenizerProps, } from './types';
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import type { IMatchInlineHookCreator } from '@yozora/core-tokenizer';
|
|
2
|
+
import type { IDelimiter, IThis, IToken, T } from './types';
|
|
3
|
+
/**
|
|
4
|
+
* Text between '<' and '>' that looks like an HTML tag is parsed as a raw HTML
|
|
5
|
+
* tag and will be rendered in HTML without escaping. Tag and attribute names
|
|
6
|
+
* are not limited to current HTML tags, so custom tags (and even, say, DocBook
|
|
7
|
+
* tags) may be used.
|
|
8
|
+
*
|
|
9
|
+
* @see https://github.github.com/gfm/#raw-html
|
|
10
|
+
*/
|
|
11
|
+
export declare const match: IMatchInlineHookCreator<T, IDelimiter, IToken, IThis>;
|
package/lib/types/tokenizer.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import type { IInlineTokenizer, IMatchInlineHookCreator, IParseInlineHookCreator } from '@yozora/core-tokenizer';
|
|
2
2
|
import { BaseInlineTokenizer } from '@yozora/core-tokenizer';
|
|
3
|
-
import type { IDelimiter, INode, IToken, ITokenizerProps, T } from './types';
|
|
3
|
+
import type { IDelimiter, INode, IThis, IToken, ITokenizerProps, T } from './types';
|
|
4
4
|
/**
|
|
5
5
|
* Lexical Analyzer for HtmlInline.
|
|
6
6
|
*
|
|
@@ -11,8 +11,8 @@ import type { IDelimiter, INode, IToken, ITokenizerProps, T } from './types';
|
|
|
11
11
|
*
|
|
12
12
|
* @see https://github.github.com/gfm/#raw-html
|
|
13
13
|
*/
|
|
14
|
-
export declare class HtmlInlineTokenizer extends BaseInlineTokenizer<T, IDelimiter, IToken, INode> implements IInlineTokenizer<T, IDelimiter, IToken, INode> {
|
|
14
|
+
export declare class HtmlInlineTokenizer extends BaseInlineTokenizer<T, IDelimiter, IToken, INode, IThis> implements IInlineTokenizer<T, IDelimiter, IToken, INode, IThis> {
|
|
15
15
|
constructor(props?: ITokenizerProps);
|
|
16
|
-
readonly match: IMatchInlineHookCreator<T, IDelimiter, IToken>;
|
|
17
|
-
readonly parse: IParseInlineHookCreator<T, IToken, INode>;
|
|
16
|
+
readonly match: IMatchInlineHookCreator<T, IDelimiter, IToken, IThis>;
|
|
17
|
+
readonly parse: IParseInlineHookCreator<T, IToken, INode, IThis>;
|
|
18
18
|
}
|
package/lib/types/types.d.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import type { HtmlType, IHtml } from '@yozora/ast';
|
|
2
|
-
import type { IBaseInlineTokenizerProps, IPartialYastInlineToken } from '@yozora/core-tokenizer';
|
|
2
|
+
import type { IBaseInlineTokenizerProps, IPartialYastInlineToken, ITokenizer } from '@yozora/core-tokenizer';
|
|
3
3
|
import type { IHtmlInlineCDataDelimiter, IHtmlInlineCDataTokenData } from './util/cdata';
|
|
4
4
|
import type { IHtmlInlineClosingDelimiter, IHtmlInlineClosingTokenData } from './util/closing';
|
|
5
5
|
import type { IHtmlInlineCommentDelimiter, IHtmlInlineCommentTokenData } from './util/comment';
|
|
@@ -19,4 +19,5 @@ export declare const uniqueName = "@yozora/tokenizer-html-inline";
|
|
|
19
19
|
*/
|
|
20
20
|
export declare type IToken = IPartialYastInlineToken<T> & (IHtmlInlineOpenTokenData | IHtmlInlineClosingTokenData | IHtmlInlineCommentTokenData | IHtmlInlineInstructionTokenData | IHtmlInlineDeclarationTokenData | IHtmlInlineCDataTokenData);
|
|
21
21
|
export declare type IDelimiter = IHtmlInlineOpenDelimiter | IHtmlInlineClosingDelimiter | IHtmlInlineCommentDelimiter | IHtmlInlineInstructionDelimiter | IHtmlInlineDeclarationDelimiter | IHtmlInlineCDataDelimiter;
|
|
22
|
+
export declare type IThis = ITokenizer;
|
|
22
23
|
export declare type ITokenizerProps = Partial<IBaseInlineTokenizerProps>;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@yozora/tokenizer-html-inline",
|
|
3
|
-
"version": "2.0.0-alpha.
|
|
3
|
+
"version": "2.0.0-alpha.1",
|
|
4
4
|
"author": {
|
|
5
5
|
"name": "guanghechen",
|
|
6
6
|
"url": "https://github.com/guanghechen/"
|
|
@@ -35,10 +35,10 @@
|
|
|
35
35
|
"test": "cross-env TS_NODE_FILES=true jest --config ../../jest.config.js --rootDir ."
|
|
36
36
|
},
|
|
37
37
|
"dependencies": {
|
|
38
|
-
"@yozora/ast": "^2.0.0-alpha.
|
|
39
|
-
"@yozora/character": "^2.0.0-alpha.
|
|
40
|
-
"@yozora/core-tokenizer": "^2.0.0-alpha.
|
|
41
|
-
"@yozora/tokenizer-html-block": "^2.0.0-alpha.
|
|
38
|
+
"@yozora/ast": "^2.0.0-alpha.1",
|
|
39
|
+
"@yozora/character": "^2.0.0-alpha.1",
|
|
40
|
+
"@yozora/core-tokenizer": "^2.0.0-alpha.1",
|
|
41
|
+
"@yozora/tokenizer-html-block": "^2.0.0-alpha.1"
|
|
42
42
|
},
|
|
43
|
-
"gitHead": "
|
|
43
|
+
"gitHead": "86202e1d2b03ccfc2ab030517d9d314f7aee7666"
|
|
44
44
|
}
|