@yozora/tokenizer-image 2.0.0-alpha.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -6
- package/lib/cjs/index.js +121 -110
- package/lib/esm/index.js +121 -112
- package/lib/types/index.d.ts +3 -1
- package/lib/types/match.d.ts +30 -0
- package/lib/types/parse.d.ts +3 -0
- package/lib/types/tokenizer.d.ts +4 -28
- package/lib/types/types.d.ts +4 -3
- package/lib/types/util.d.ts +2 -2
- package/package.json +6 -6
package/README.md
CHANGED
|
@@ -84,14 +84,14 @@ so you can use `YozoraParser` / `GfmExParser` / `GfmParser` directly.
|
|
|
84
84
|
registered in *YastParser* as a plugin-in before it can be used.
|
|
85
85
|
|
|
86
86
|
```typescript {4,9}
|
|
87
|
-
import {
|
|
87
|
+
import { DefaultParser } from '@yozora/core-parser'
|
|
88
88
|
import ParagraphTokenizer from '@yozora/tokenizer-paragraph'
|
|
89
89
|
import TextTokenizer from '@yozora/tokenizer-text'
|
|
90
90
|
import ImageTokenizer from '@yozora/tokenizer-image'
|
|
91
91
|
|
|
92
|
-
const parser = new
|
|
93
|
-
.
|
|
94
|
-
.
|
|
92
|
+
const parser = new DefaultParser()
|
|
93
|
+
.useFallbackTokenizer(new ParagraphTokenizer())
|
|
94
|
+
.useFallbackTokenizer(new TextTokenizer())
|
|
95
95
|
.useTokenizer(new ImageTokenizer())
|
|
96
96
|
|
|
97
97
|
// parse source markdown content
|
|
@@ -223,7 +223,6 @@ Name | Type | Required | Default
|
|
|
223
223
|
[@yozora/tokenizer-link]: https://github.com/yozorajs/yozora/tree/main/tokenizers/link#readme
|
|
224
224
|
[@yozora/tokenizer-link-reference]: https://github.com/yozorajs/yozora/tree/main/tokenizers/link-reference#readme
|
|
225
225
|
[@yozora/tokenizer-list]: https://github.com/yozorajs/yozora/tree/main/tokenizers/list#readme
|
|
226
|
-
[@yozora/tokenizer-list-item]: https://github.com/yozorajs/yozora/tree/main/tokenizers/list-item#readme
|
|
227
226
|
[@yozora/tokenizer-math]: https://github.com/yozorajs/yozora/tree/main/tokenizers/math#readme
|
|
228
227
|
[@yozora/tokenizer-paragraph]: https://github.com/yozorajs/yozora/tree/main/tokenizers/paragraph#readme
|
|
229
228
|
[@yozora/tokenizer-setext-heading]: https://github.com/yozorajs/yozora/tree/main/tokenizers/setext-heading#readme
|
|
@@ -283,7 +282,6 @@ Name | Type | Required | Default
|
|
|
283
282
|
[doc-@yozora/tokenizer-definition]: https://yozora.guanghechen.com/docs/package/tokenizer-definition
|
|
284
283
|
[doc-@yozora/tokenizer-link-reference]: https://yozora.guanghechen.com/docs/package/tokenizer-link-reference
|
|
285
284
|
[doc-@yozora/tokenizer-list]: https://yozora.guanghechen.com/docs/package/tokenizer-list
|
|
286
|
-
[doc-@yozora/tokenizer-list-item]: https://yozora.guanghechen.com/docs/package/tokenizer-list-item
|
|
287
285
|
[doc-@yozora/tokenizer-math]: https://yozora.guanghechen.com/docs/package/tokenizer-math
|
|
288
286
|
[doc-@yozora/tokenizer-paragraph]: https://yozora.guanghechen.com/docs/package/tokenizer-paragraph
|
|
289
287
|
[doc-@yozora/tokenizer-setext-heading]: https://yozora.guanghechen.com/docs/package/tokenizer-setext-heading
|
package/lib/cjs/index.js
CHANGED
|
@@ -21,6 +21,123 @@ function calcImageAlt(nodes) {
|
|
|
21
21
|
.join('');
|
|
22
22
|
}
|
|
23
23
|
|
|
24
|
+
const match = function (api) {
|
|
25
|
+
return {
|
|
26
|
+
findDelimiter: () => coreTokenizer.genFindDelimiter(_findDelimiter),
|
|
27
|
+
isDelimiterPair,
|
|
28
|
+
processDelimiterPair,
|
|
29
|
+
};
|
|
30
|
+
function _findDelimiter(startIndex, endIndex) {
|
|
31
|
+
const nodePoints = api.getNodePoints();
|
|
32
|
+
const blockEndIndex = api.getBlockEndIndex();
|
|
33
|
+
for (let i = startIndex; i < endIndex; ++i) {
|
|
34
|
+
const c = nodePoints[i].codePoint;
|
|
35
|
+
switch (c) {
|
|
36
|
+
case character.AsciiCodePoint.BACKSLASH:
|
|
37
|
+
i += 1;
|
|
38
|
+
break;
|
|
39
|
+
case character.AsciiCodePoint.EXCLAMATION_MARK: {
|
|
40
|
+
if (i + 1 < endIndex && nodePoints[i + 1].codePoint === character.AsciiCodePoint.OPEN_BRACKET) {
|
|
41
|
+
return {
|
|
42
|
+
type: 'opener',
|
|
43
|
+
startIndex: i,
|
|
44
|
+
endIndex: i + 2,
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
break;
|
|
48
|
+
}
|
|
49
|
+
case character.AsciiCodePoint.CLOSE_BRACKET: {
|
|
50
|
+
if (i + 1 >= endIndex ||
|
|
51
|
+
nodePoints[i + 1].codePoint !== character.AsciiCodePoint.OPEN_PARENTHESIS) {
|
|
52
|
+
break;
|
|
53
|
+
}
|
|
54
|
+
const destinationStartIndex = coreTokenizer.eatOptionalWhitespaces(nodePoints, i + 2, blockEndIndex);
|
|
55
|
+
const destinationEndIndex = tokenizerLink.eatLinkDestination(nodePoints, destinationStartIndex, blockEndIndex);
|
|
56
|
+
if (destinationEndIndex < 0)
|
|
57
|
+
break;
|
|
58
|
+
const titleStartIndex = coreTokenizer.eatOptionalWhitespaces(nodePoints, destinationEndIndex, blockEndIndex);
|
|
59
|
+
const titleEndIndex = tokenizerLink.eatLinkTitle(nodePoints, titleStartIndex, blockEndIndex);
|
|
60
|
+
if (titleEndIndex < 0)
|
|
61
|
+
break;
|
|
62
|
+
const _startIndex = i;
|
|
63
|
+
const _endIndex = coreTokenizer.eatOptionalWhitespaces(nodePoints, titleEndIndex, blockEndIndex) + 1;
|
|
64
|
+
if (_endIndex > blockEndIndex ||
|
|
65
|
+
nodePoints[_endIndex - 1].codePoint !== character.AsciiCodePoint.CLOSE_PARENTHESIS) {
|
|
66
|
+
break;
|
|
67
|
+
}
|
|
68
|
+
return {
|
|
69
|
+
type: 'closer',
|
|
70
|
+
startIndex: _startIndex,
|
|
71
|
+
endIndex: _endIndex,
|
|
72
|
+
destinationContent: destinationStartIndex < destinationEndIndex
|
|
73
|
+
? {
|
|
74
|
+
startIndex: destinationStartIndex,
|
|
75
|
+
endIndex: destinationEndIndex,
|
|
76
|
+
}
|
|
77
|
+
: undefined,
|
|
78
|
+
titleContent: titleStartIndex < titleEndIndex
|
|
79
|
+
? { startIndex: titleStartIndex, endIndex: titleEndIndex }
|
|
80
|
+
: undefined,
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
return null;
|
|
86
|
+
}
|
|
87
|
+
function isDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
|
|
88
|
+
const nodePoints = api.getNodePoints();
|
|
89
|
+
const balancedBracketsStatus = tokenizerLink.checkBalancedBracketsStatus(openerDelimiter.endIndex, closerDelimiter.startIndex, internalTokens, nodePoints);
|
|
90
|
+
switch (balancedBracketsStatus) {
|
|
91
|
+
case -1:
|
|
92
|
+
return { paired: false, opener: false, closer: true };
|
|
93
|
+
case 0:
|
|
94
|
+
return { paired: true };
|
|
95
|
+
case 1:
|
|
96
|
+
return { paired: false, opener: true, closer: false };
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
function processDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
|
|
100
|
+
const token = {
|
|
101
|
+
nodeType: ast.ImageType,
|
|
102
|
+
startIndex: openerDelimiter.startIndex,
|
|
103
|
+
endIndex: closerDelimiter.endIndex,
|
|
104
|
+
destinationContent: closerDelimiter.destinationContent,
|
|
105
|
+
titleContent: closerDelimiter.titleContent,
|
|
106
|
+
children: api.resolveInternalTokens(internalTokens, openerDelimiter.endIndex, closerDelimiter.startIndex),
|
|
107
|
+
};
|
|
108
|
+
return { tokens: [token] };
|
|
109
|
+
}
|
|
110
|
+
};
|
|
111
|
+
|
|
112
|
+
const parse = function (api) {
|
|
113
|
+
return {
|
|
114
|
+
parse: tokens => tokens.map(token => {
|
|
115
|
+
const nodePoints = api.getNodePoints();
|
|
116
|
+
let url = '';
|
|
117
|
+
if (token.destinationContent != null) {
|
|
118
|
+
let { startIndex, endIndex } = token.destinationContent;
|
|
119
|
+
if (nodePoints[startIndex].codePoint === character.AsciiCodePoint.OPEN_ANGLE) {
|
|
120
|
+
startIndex += 1;
|
|
121
|
+
endIndex -= 1;
|
|
122
|
+
}
|
|
123
|
+
const destination = character.calcEscapedStringFromNodePoints(nodePoints, startIndex, endIndex, true);
|
|
124
|
+
url = coreTokenizer.encodeLinkDestination(destination);
|
|
125
|
+
}
|
|
126
|
+
const children = api.parseInlineTokens(token.children);
|
|
127
|
+
const alt = calcImageAlt(children);
|
|
128
|
+
let title;
|
|
129
|
+
if (token.titleContent != null) {
|
|
130
|
+
const { startIndex, endIndex } = token.titleContent;
|
|
131
|
+
title = character.calcEscapedStringFromNodePoints(nodePoints, startIndex + 1, endIndex - 1);
|
|
132
|
+
}
|
|
133
|
+
const node = api.shouldReservePosition
|
|
134
|
+
? { type: ast.ImageType, position: api.calcPosition(token), url, alt, title }
|
|
135
|
+
: { type: ast.ImageType, url, alt, title };
|
|
136
|
+
return node;
|
|
137
|
+
}),
|
|
138
|
+
};
|
|
139
|
+
};
|
|
140
|
+
|
|
24
141
|
const uniqueName = '@yozora/tokenizer-image';
|
|
25
142
|
|
|
26
143
|
class ImageTokenizer extends coreTokenizer.BaseInlineTokenizer {
|
|
@@ -30,116 +147,8 @@ class ImageTokenizer extends coreTokenizer.BaseInlineTokenizer {
|
|
|
30
147
|
name: (_a = props.name) !== null && _a !== void 0 ? _a : uniqueName,
|
|
31
148
|
priority: (_b = props.priority) !== null && _b !== void 0 ? _b : coreTokenizer.TokenizerPriority.LINKS,
|
|
32
149
|
});
|
|
33
|
-
this.match =
|
|
34
|
-
|
|
35
|
-
findDelimiter: () => coreTokenizer.genFindDelimiter(_findDelimiter),
|
|
36
|
-
isDelimiterPair,
|
|
37
|
-
processDelimiterPair,
|
|
38
|
-
};
|
|
39
|
-
function _findDelimiter(startIndex, endIndex) {
|
|
40
|
-
const nodePoints = api.getNodePoints();
|
|
41
|
-
const blockEndIndex = api.getBlockEndIndex();
|
|
42
|
-
for (let i = startIndex; i < endIndex; ++i) {
|
|
43
|
-
const c = nodePoints[i].codePoint;
|
|
44
|
-
switch (c) {
|
|
45
|
-
case character.AsciiCodePoint.BACKSLASH:
|
|
46
|
-
i += 1;
|
|
47
|
-
break;
|
|
48
|
-
case character.AsciiCodePoint.EXCLAMATION_MARK: {
|
|
49
|
-
if (i + 1 < endIndex && nodePoints[i + 1].codePoint === character.AsciiCodePoint.OPEN_BRACKET) {
|
|
50
|
-
return {
|
|
51
|
-
type: 'opener',
|
|
52
|
-
startIndex: i,
|
|
53
|
-
endIndex: i + 2,
|
|
54
|
-
};
|
|
55
|
-
}
|
|
56
|
-
break;
|
|
57
|
-
}
|
|
58
|
-
case character.AsciiCodePoint.CLOSE_BRACKET: {
|
|
59
|
-
if (i + 1 >= endIndex ||
|
|
60
|
-
nodePoints[i + 1].codePoint !== character.AsciiCodePoint.OPEN_PARENTHESIS) {
|
|
61
|
-
break;
|
|
62
|
-
}
|
|
63
|
-
const destinationStartIndex = coreTokenizer.eatOptionalWhitespaces(nodePoints, i + 2, blockEndIndex);
|
|
64
|
-
const destinationEndIndex = tokenizerLink.eatLinkDestination(nodePoints, destinationStartIndex, blockEndIndex);
|
|
65
|
-
if (destinationEndIndex < 0)
|
|
66
|
-
break;
|
|
67
|
-
const titleStartIndex = coreTokenizer.eatOptionalWhitespaces(nodePoints, destinationEndIndex, blockEndIndex);
|
|
68
|
-
const titleEndIndex = tokenizerLink.eatLinkTitle(nodePoints, titleStartIndex, blockEndIndex);
|
|
69
|
-
if (titleEndIndex < 0)
|
|
70
|
-
break;
|
|
71
|
-
const _startIndex = i;
|
|
72
|
-
const _endIndex = coreTokenizer.eatOptionalWhitespaces(nodePoints, titleEndIndex, blockEndIndex) + 1;
|
|
73
|
-
if (_endIndex > blockEndIndex ||
|
|
74
|
-
nodePoints[_endIndex - 1].codePoint !== character.AsciiCodePoint.CLOSE_PARENTHESIS) {
|
|
75
|
-
break;
|
|
76
|
-
}
|
|
77
|
-
return {
|
|
78
|
-
type: 'closer',
|
|
79
|
-
startIndex: _startIndex,
|
|
80
|
-
endIndex: _endIndex,
|
|
81
|
-
destinationContent: destinationStartIndex < destinationEndIndex
|
|
82
|
-
? {
|
|
83
|
-
startIndex: destinationStartIndex,
|
|
84
|
-
endIndex: destinationEndIndex,
|
|
85
|
-
}
|
|
86
|
-
: undefined,
|
|
87
|
-
titleContent: titleStartIndex < titleEndIndex
|
|
88
|
-
? { startIndex: titleStartIndex, endIndex: titleEndIndex }
|
|
89
|
-
: undefined,
|
|
90
|
-
};
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
}
|
|
94
|
-
return null;
|
|
95
|
-
}
|
|
96
|
-
function isDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
|
|
97
|
-
const nodePoints = api.getNodePoints();
|
|
98
|
-
const balancedBracketsStatus = tokenizerLink.checkBalancedBracketsStatus(openerDelimiter.endIndex, closerDelimiter.startIndex, internalTokens, nodePoints);
|
|
99
|
-
switch (balancedBracketsStatus) {
|
|
100
|
-
case -1:
|
|
101
|
-
return { paired: false, opener: false, closer: true };
|
|
102
|
-
case 0:
|
|
103
|
-
return { paired: true };
|
|
104
|
-
case 1:
|
|
105
|
-
return { paired: false, opener: true, closer: false };
|
|
106
|
-
}
|
|
107
|
-
}
|
|
108
|
-
function processDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
|
|
109
|
-
const token = {
|
|
110
|
-
nodeType: ast.ImageType,
|
|
111
|
-
startIndex: openerDelimiter.startIndex,
|
|
112
|
-
endIndex: closerDelimiter.endIndex,
|
|
113
|
-
destinationContent: closerDelimiter.destinationContent,
|
|
114
|
-
titleContent: closerDelimiter.titleContent,
|
|
115
|
-
children: api.resolveInternalTokens(internalTokens, openerDelimiter.endIndex, closerDelimiter.startIndex),
|
|
116
|
-
};
|
|
117
|
-
return { tokens: [token] };
|
|
118
|
-
}
|
|
119
|
-
};
|
|
120
|
-
this.parse = api => ({
|
|
121
|
-
parse: (token, children) => {
|
|
122
|
-
const nodePoints = api.getNodePoints();
|
|
123
|
-
let url = '';
|
|
124
|
-
if (token.destinationContent != null) {
|
|
125
|
-
let { startIndex, endIndex } = token.destinationContent;
|
|
126
|
-
if (nodePoints[startIndex].codePoint === character.AsciiCodePoint.OPEN_ANGLE) {
|
|
127
|
-
startIndex += 1;
|
|
128
|
-
endIndex -= 1;
|
|
129
|
-
}
|
|
130
|
-
const destination = character.calcEscapedStringFromNodePoints(nodePoints, startIndex, endIndex, true);
|
|
131
|
-
url = coreTokenizer.encodeLinkDestination(destination);
|
|
132
|
-
}
|
|
133
|
-
const alt = calcImageAlt(children);
|
|
134
|
-
let title;
|
|
135
|
-
if (token.titleContent != null) {
|
|
136
|
-
const { startIndex, endIndex } = token.titleContent;
|
|
137
|
-
title = character.calcEscapedStringFromNodePoints(nodePoints, startIndex + 1, endIndex - 1);
|
|
138
|
-
}
|
|
139
|
-
const result = { type: ast.ImageType, url, alt, title };
|
|
140
|
-
return result;
|
|
141
|
-
},
|
|
142
|
-
});
|
|
150
|
+
this.match = match;
|
|
151
|
+
this.parse = parse;
|
|
143
152
|
}
|
|
144
153
|
}
|
|
145
154
|
|
|
@@ -147,3 +156,5 @@ exports.ImageTokenizer = ImageTokenizer;
|
|
|
147
156
|
exports.ImageTokenizerName = uniqueName;
|
|
148
157
|
exports.calcImageAlt = calcImageAlt;
|
|
149
158
|
exports["default"] = ImageTokenizer;
|
|
159
|
+
exports.imageMatch = match;
|
|
160
|
+
exports.imageParse = parse;
|
package/lib/esm/index.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { ImageType } from '@yozora/ast';
|
|
2
2
|
import { AsciiCodePoint, calcEscapedStringFromNodePoints } from '@yozora/character';
|
|
3
|
-
import {
|
|
3
|
+
import { genFindDelimiter, eatOptionalWhitespaces, encodeLinkDestination, BaseInlineTokenizer, TokenizerPriority } from '@yozora/core-tokenizer';
|
|
4
4
|
import { eatLinkDestination, eatLinkTitle, checkBalancedBracketsStatus } from '@yozora/tokenizer-link';
|
|
5
5
|
|
|
6
6
|
function calcImageAlt(nodes) {
|
|
@@ -17,6 +17,123 @@ function calcImageAlt(nodes) {
|
|
|
17
17
|
.join('');
|
|
18
18
|
}
|
|
19
19
|
|
|
20
|
+
const match = function (api) {
|
|
21
|
+
return {
|
|
22
|
+
findDelimiter: () => genFindDelimiter(_findDelimiter),
|
|
23
|
+
isDelimiterPair,
|
|
24
|
+
processDelimiterPair,
|
|
25
|
+
};
|
|
26
|
+
function _findDelimiter(startIndex, endIndex) {
|
|
27
|
+
const nodePoints = api.getNodePoints();
|
|
28
|
+
const blockEndIndex = api.getBlockEndIndex();
|
|
29
|
+
for (let i = startIndex; i < endIndex; ++i) {
|
|
30
|
+
const c = nodePoints[i].codePoint;
|
|
31
|
+
switch (c) {
|
|
32
|
+
case AsciiCodePoint.BACKSLASH:
|
|
33
|
+
i += 1;
|
|
34
|
+
break;
|
|
35
|
+
case AsciiCodePoint.EXCLAMATION_MARK: {
|
|
36
|
+
if (i + 1 < endIndex && nodePoints[i + 1].codePoint === AsciiCodePoint.OPEN_BRACKET) {
|
|
37
|
+
return {
|
|
38
|
+
type: 'opener',
|
|
39
|
+
startIndex: i,
|
|
40
|
+
endIndex: i + 2,
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
break;
|
|
44
|
+
}
|
|
45
|
+
case AsciiCodePoint.CLOSE_BRACKET: {
|
|
46
|
+
if (i + 1 >= endIndex ||
|
|
47
|
+
nodePoints[i + 1].codePoint !== AsciiCodePoint.OPEN_PARENTHESIS) {
|
|
48
|
+
break;
|
|
49
|
+
}
|
|
50
|
+
const destinationStartIndex = eatOptionalWhitespaces(nodePoints, i + 2, blockEndIndex);
|
|
51
|
+
const destinationEndIndex = eatLinkDestination(nodePoints, destinationStartIndex, blockEndIndex);
|
|
52
|
+
if (destinationEndIndex < 0)
|
|
53
|
+
break;
|
|
54
|
+
const titleStartIndex = eatOptionalWhitespaces(nodePoints, destinationEndIndex, blockEndIndex);
|
|
55
|
+
const titleEndIndex = eatLinkTitle(nodePoints, titleStartIndex, blockEndIndex);
|
|
56
|
+
if (titleEndIndex < 0)
|
|
57
|
+
break;
|
|
58
|
+
const _startIndex = i;
|
|
59
|
+
const _endIndex = eatOptionalWhitespaces(nodePoints, titleEndIndex, blockEndIndex) + 1;
|
|
60
|
+
if (_endIndex > blockEndIndex ||
|
|
61
|
+
nodePoints[_endIndex - 1].codePoint !== AsciiCodePoint.CLOSE_PARENTHESIS) {
|
|
62
|
+
break;
|
|
63
|
+
}
|
|
64
|
+
return {
|
|
65
|
+
type: 'closer',
|
|
66
|
+
startIndex: _startIndex,
|
|
67
|
+
endIndex: _endIndex,
|
|
68
|
+
destinationContent: destinationStartIndex < destinationEndIndex
|
|
69
|
+
? {
|
|
70
|
+
startIndex: destinationStartIndex,
|
|
71
|
+
endIndex: destinationEndIndex,
|
|
72
|
+
}
|
|
73
|
+
: undefined,
|
|
74
|
+
titleContent: titleStartIndex < titleEndIndex
|
|
75
|
+
? { startIndex: titleStartIndex, endIndex: titleEndIndex }
|
|
76
|
+
: undefined,
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
return null;
|
|
82
|
+
}
|
|
83
|
+
function isDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
|
|
84
|
+
const nodePoints = api.getNodePoints();
|
|
85
|
+
const balancedBracketsStatus = checkBalancedBracketsStatus(openerDelimiter.endIndex, closerDelimiter.startIndex, internalTokens, nodePoints);
|
|
86
|
+
switch (balancedBracketsStatus) {
|
|
87
|
+
case -1:
|
|
88
|
+
return { paired: false, opener: false, closer: true };
|
|
89
|
+
case 0:
|
|
90
|
+
return { paired: true };
|
|
91
|
+
case 1:
|
|
92
|
+
return { paired: false, opener: true, closer: false };
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
function processDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
|
|
96
|
+
const token = {
|
|
97
|
+
nodeType: ImageType,
|
|
98
|
+
startIndex: openerDelimiter.startIndex,
|
|
99
|
+
endIndex: closerDelimiter.endIndex,
|
|
100
|
+
destinationContent: closerDelimiter.destinationContent,
|
|
101
|
+
titleContent: closerDelimiter.titleContent,
|
|
102
|
+
children: api.resolveInternalTokens(internalTokens, openerDelimiter.endIndex, closerDelimiter.startIndex),
|
|
103
|
+
};
|
|
104
|
+
return { tokens: [token] };
|
|
105
|
+
}
|
|
106
|
+
};
|
|
107
|
+
|
|
108
|
+
const parse = function (api) {
|
|
109
|
+
return {
|
|
110
|
+
parse: tokens => tokens.map(token => {
|
|
111
|
+
const nodePoints = api.getNodePoints();
|
|
112
|
+
let url = '';
|
|
113
|
+
if (token.destinationContent != null) {
|
|
114
|
+
let { startIndex, endIndex } = token.destinationContent;
|
|
115
|
+
if (nodePoints[startIndex].codePoint === AsciiCodePoint.OPEN_ANGLE) {
|
|
116
|
+
startIndex += 1;
|
|
117
|
+
endIndex -= 1;
|
|
118
|
+
}
|
|
119
|
+
const destination = calcEscapedStringFromNodePoints(nodePoints, startIndex, endIndex, true);
|
|
120
|
+
url = encodeLinkDestination(destination);
|
|
121
|
+
}
|
|
122
|
+
const children = api.parseInlineTokens(token.children);
|
|
123
|
+
const alt = calcImageAlt(children);
|
|
124
|
+
let title;
|
|
125
|
+
if (token.titleContent != null) {
|
|
126
|
+
const { startIndex, endIndex } = token.titleContent;
|
|
127
|
+
title = calcEscapedStringFromNodePoints(nodePoints, startIndex + 1, endIndex - 1);
|
|
128
|
+
}
|
|
129
|
+
const node = api.shouldReservePosition
|
|
130
|
+
? { type: ImageType, position: api.calcPosition(token), url, alt, title }
|
|
131
|
+
: { type: ImageType, url, alt, title };
|
|
132
|
+
return node;
|
|
133
|
+
}),
|
|
134
|
+
};
|
|
135
|
+
};
|
|
136
|
+
|
|
20
137
|
const uniqueName = '@yozora/tokenizer-image';
|
|
21
138
|
|
|
22
139
|
class ImageTokenizer extends BaseInlineTokenizer {
|
|
@@ -26,117 +143,9 @@ class ImageTokenizer extends BaseInlineTokenizer {
|
|
|
26
143
|
name: (_a = props.name) !== null && _a !== void 0 ? _a : uniqueName,
|
|
27
144
|
priority: (_b = props.priority) !== null && _b !== void 0 ? _b : TokenizerPriority.LINKS,
|
|
28
145
|
});
|
|
29
|
-
this.match =
|
|
30
|
-
|
|
31
|
-
findDelimiter: () => genFindDelimiter(_findDelimiter),
|
|
32
|
-
isDelimiterPair,
|
|
33
|
-
processDelimiterPair,
|
|
34
|
-
};
|
|
35
|
-
function _findDelimiter(startIndex, endIndex) {
|
|
36
|
-
const nodePoints = api.getNodePoints();
|
|
37
|
-
const blockEndIndex = api.getBlockEndIndex();
|
|
38
|
-
for (let i = startIndex; i < endIndex; ++i) {
|
|
39
|
-
const c = nodePoints[i].codePoint;
|
|
40
|
-
switch (c) {
|
|
41
|
-
case AsciiCodePoint.BACKSLASH:
|
|
42
|
-
i += 1;
|
|
43
|
-
break;
|
|
44
|
-
case AsciiCodePoint.EXCLAMATION_MARK: {
|
|
45
|
-
if (i + 1 < endIndex && nodePoints[i + 1].codePoint === AsciiCodePoint.OPEN_BRACKET) {
|
|
46
|
-
return {
|
|
47
|
-
type: 'opener',
|
|
48
|
-
startIndex: i,
|
|
49
|
-
endIndex: i + 2,
|
|
50
|
-
};
|
|
51
|
-
}
|
|
52
|
-
break;
|
|
53
|
-
}
|
|
54
|
-
case AsciiCodePoint.CLOSE_BRACKET: {
|
|
55
|
-
if (i + 1 >= endIndex ||
|
|
56
|
-
nodePoints[i + 1].codePoint !== AsciiCodePoint.OPEN_PARENTHESIS) {
|
|
57
|
-
break;
|
|
58
|
-
}
|
|
59
|
-
const destinationStartIndex = eatOptionalWhitespaces(nodePoints, i + 2, blockEndIndex);
|
|
60
|
-
const destinationEndIndex = eatLinkDestination(nodePoints, destinationStartIndex, blockEndIndex);
|
|
61
|
-
if (destinationEndIndex < 0)
|
|
62
|
-
break;
|
|
63
|
-
const titleStartIndex = eatOptionalWhitespaces(nodePoints, destinationEndIndex, blockEndIndex);
|
|
64
|
-
const titleEndIndex = eatLinkTitle(nodePoints, titleStartIndex, blockEndIndex);
|
|
65
|
-
if (titleEndIndex < 0)
|
|
66
|
-
break;
|
|
67
|
-
const _startIndex = i;
|
|
68
|
-
const _endIndex = eatOptionalWhitespaces(nodePoints, titleEndIndex, blockEndIndex) + 1;
|
|
69
|
-
if (_endIndex > blockEndIndex ||
|
|
70
|
-
nodePoints[_endIndex - 1].codePoint !== AsciiCodePoint.CLOSE_PARENTHESIS) {
|
|
71
|
-
break;
|
|
72
|
-
}
|
|
73
|
-
return {
|
|
74
|
-
type: 'closer',
|
|
75
|
-
startIndex: _startIndex,
|
|
76
|
-
endIndex: _endIndex,
|
|
77
|
-
destinationContent: destinationStartIndex < destinationEndIndex
|
|
78
|
-
? {
|
|
79
|
-
startIndex: destinationStartIndex,
|
|
80
|
-
endIndex: destinationEndIndex,
|
|
81
|
-
}
|
|
82
|
-
: undefined,
|
|
83
|
-
titleContent: titleStartIndex < titleEndIndex
|
|
84
|
-
? { startIndex: titleStartIndex, endIndex: titleEndIndex }
|
|
85
|
-
: undefined,
|
|
86
|
-
};
|
|
87
|
-
}
|
|
88
|
-
}
|
|
89
|
-
}
|
|
90
|
-
return null;
|
|
91
|
-
}
|
|
92
|
-
function isDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
|
|
93
|
-
const nodePoints = api.getNodePoints();
|
|
94
|
-
const balancedBracketsStatus = checkBalancedBracketsStatus(openerDelimiter.endIndex, closerDelimiter.startIndex, internalTokens, nodePoints);
|
|
95
|
-
switch (balancedBracketsStatus) {
|
|
96
|
-
case -1:
|
|
97
|
-
return { paired: false, opener: false, closer: true };
|
|
98
|
-
case 0:
|
|
99
|
-
return { paired: true };
|
|
100
|
-
case 1:
|
|
101
|
-
return { paired: false, opener: true, closer: false };
|
|
102
|
-
}
|
|
103
|
-
}
|
|
104
|
-
function processDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
|
|
105
|
-
const token = {
|
|
106
|
-
nodeType: ImageType,
|
|
107
|
-
startIndex: openerDelimiter.startIndex,
|
|
108
|
-
endIndex: closerDelimiter.endIndex,
|
|
109
|
-
destinationContent: closerDelimiter.destinationContent,
|
|
110
|
-
titleContent: closerDelimiter.titleContent,
|
|
111
|
-
children: api.resolveInternalTokens(internalTokens, openerDelimiter.endIndex, closerDelimiter.startIndex),
|
|
112
|
-
};
|
|
113
|
-
return { tokens: [token] };
|
|
114
|
-
}
|
|
115
|
-
};
|
|
116
|
-
this.parse = api => ({
|
|
117
|
-
parse: (token, children) => {
|
|
118
|
-
const nodePoints = api.getNodePoints();
|
|
119
|
-
let url = '';
|
|
120
|
-
if (token.destinationContent != null) {
|
|
121
|
-
let { startIndex, endIndex } = token.destinationContent;
|
|
122
|
-
if (nodePoints[startIndex].codePoint === AsciiCodePoint.OPEN_ANGLE) {
|
|
123
|
-
startIndex += 1;
|
|
124
|
-
endIndex -= 1;
|
|
125
|
-
}
|
|
126
|
-
const destination = calcEscapedStringFromNodePoints(nodePoints, startIndex, endIndex, true);
|
|
127
|
-
url = encodeLinkDestination(destination);
|
|
128
|
-
}
|
|
129
|
-
const alt = calcImageAlt(children);
|
|
130
|
-
let title;
|
|
131
|
-
if (token.titleContent != null) {
|
|
132
|
-
const { startIndex, endIndex } = token.titleContent;
|
|
133
|
-
title = calcEscapedStringFromNodePoints(nodePoints, startIndex + 1, endIndex - 1);
|
|
134
|
-
}
|
|
135
|
-
const result = { type: ImageType, url, alt, title };
|
|
136
|
-
return result;
|
|
137
|
-
},
|
|
138
|
-
});
|
|
146
|
+
this.match = match;
|
|
147
|
+
this.parse = parse;
|
|
139
148
|
}
|
|
140
149
|
}
|
|
141
150
|
|
|
142
|
-
export { ImageTokenizer, uniqueName as ImageTokenizerName, calcImageAlt, ImageTokenizer as default };
|
|
151
|
+
export { ImageTokenizer, uniqueName as ImageTokenizerName, calcImageAlt, ImageTokenizer as default, match as imageMatch, parse as imageParse };
|
package/lib/types/index.d.ts
CHANGED
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
export * from './util';
|
|
2
|
+
export { match as imageMatch } from './match';
|
|
3
|
+
export { parse as imageParse } from './parse';
|
|
2
4
|
export { ImageTokenizer, ImageTokenizer as default } from './tokenizer';
|
|
3
5
|
export { uniqueName as ImageTokenizerName } from './types';
|
|
4
|
-
export type { IToken as IImageToken, ITokenizerProps as IImageTokenizerProps } from './types';
|
|
6
|
+
export type { IThis as IImageHookContext, IToken as IImageToken, ITokenizerProps as IImageTokenizerProps, } from './types';
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import type { IMatchInlineHookCreator } from '@yozora/core-tokenizer';
|
|
2
|
+
import type { IDelimiter, IThis, IToken, T } from './types';
|
|
3
|
+
/**
|
|
4
|
+
* Syntax for images is like the syntax for links, with one difference.
|
|
5
|
+
* Instead of link text, we have an image description.
|
|
6
|
+
* The rules for this are the same as for link text, except that
|
|
7
|
+
*
|
|
8
|
+
* a) an image description starts with ''
|
|
23
|
+
* - '](url "title")'
|
|
24
|
+
* - '](<url>)'
|
|
25
|
+
* - '](<url> "title")'
|
|
26
|
+
*
|
|
27
|
+
* @see https://github.com/syntax-tree/mdast#image
|
|
28
|
+
* @see https://github.github.com/gfm/#images
|
|
29
|
+
*/
|
|
30
|
+
export declare const match: IMatchInlineHookCreator<T, IDelimiter, IToken, IThis>;
|
package/lib/types/tokenizer.d.ts
CHANGED
|
@@ -1,37 +1,13 @@
|
|
|
1
1
|
import type { IInlineTokenizer, IMatchInlineHookCreator, IParseInlineHookCreator } from '@yozora/core-tokenizer';
|
|
2
2
|
import { BaseInlineTokenizer } from '@yozora/core-tokenizer';
|
|
3
|
-
import type { IDelimiter, INode, IToken, ITokenizerProps, T } from './types';
|
|
3
|
+
import type { IDelimiter, INode, IThis, IToken, ITokenizerProps, T } from './types';
|
|
4
4
|
/**
|
|
5
5
|
* Lexical Analyzer for InlineImage.
|
|
6
|
-
*
|
|
7
|
-
* Syntax for images is like the syntax for links, with one difference.
|
|
8
|
-
* Instead of link text, we have an image description.
|
|
9
|
-
* The rules for this are the same as for link text, except that
|
|
10
|
-
*
|
|
11
|
-
* a) an image description starts with ''
|
|
26
|
-
* - '](url "title")'
|
|
27
|
-
* - '](<url>)'
|
|
28
|
-
* - '](<url> "title")'
|
|
29
|
-
*
|
|
30
6
|
* @see https://github.com/syntax-tree/mdast#image
|
|
31
7
|
* @see https://github.github.com/gfm/#images
|
|
32
8
|
*/
|
|
33
|
-
export declare class ImageTokenizer extends BaseInlineTokenizer<T, IDelimiter, IToken, INode> implements IInlineTokenizer<T, IDelimiter, IToken, INode> {
|
|
9
|
+
export declare class ImageTokenizer extends BaseInlineTokenizer<T, IDelimiter, IToken, INode, IThis> implements IInlineTokenizer<T, IDelimiter, IToken, INode, IThis> {
|
|
34
10
|
constructor(props?: ITokenizerProps);
|
|
35
|
-
readonly match: IMatchInlineHookCreator<T, IDelimiter, IToken>;
|
|
36
|
-
readonly parse: IParseInlineHookCreator<T, IToken, INode>;
|
|
11
|
+
readonly match: IMatchInlineHookCreator<T, IDelimiter, IToken, IThis>;
|
|
12
|
+
readonly parse: IParseInlineHookCreator<T, IToken, INode, IThis>;
|
|
37
13
|
}
|
package/lib/types/types.d.ts
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { Image, ImageType } from '@yozora/ast';
|
|
2
2
|
import type { INodeInterval } from '@yozora/character';
|
|
3
|
-
import type { IBaseInlineTokenizerProps, IPartialYastInlineToken, IYastTokenDelimiter } from '@yozora/core-tokenizer';
|
|
3
|
+
import type { IBaseInlineTokenizerProps, IPartialYastInlineToken, ITokenizer, IYastTokenDelimiter } from '@yozora/core-tokenizer';
|
|
4
4
|
export declare type T = ImageType;
|
|
5
|
-
export declare type INode =
|
|
5
|
+
export declare type INode = Image;
|
|
6
6
|
export declare const uniqueName = "@yozora/tokenizer-image";
|
|
7
7
|
/**
|
|
8
8
|
* An image token.
|
|
@@ -31,4 +31,5 @@ export interface IDelimiter extends IYastTokenDelimiter {
|
|
|
31
31
|
*/
|
|
32
32
|
titleContent?: INodeInterval;
|
|
33
33
|
}
|
|
34
|
+
export declare type IThis = ITokenizer;
|
|
34
35
|
export declare type ITokenizerProps = Partial<IBaseInlineTokenizerProps>;
|
package/lib/types/util.d.ts
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { Node } from '@yozora/ast';
|
|
2
2
|
/**
|
|
3
3
|
* calc alt
|
|
4
4
|
* An image description has inline elements as its contents. When an image
|
|
5
5
|
* is rendered to HTML, this is standardly used as the image’s alt attribute
|
|
6
6
|
* @see https://github.github.com/gfm/#example-582
|
|
7
7
|
*/
|
|
8
|
-
export declare function calcImageAlt(nodes: ReadonlyArray<
|
|
8
|
+
export declare function calcImageAlt(nodes: ReadonlyArray<Node>): string;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@yozora/tokenizer-image",
|
|
3
|
-
"version": "2.0.0
|
|
3
|
+
"version": "2.0.0",
|
|
4
4
|
"author": {
|
|
5
5
|
"name": "guanghechen",
|
|
6
6
|
"url": "https://github.com/guanghechen/"
|
|
@@ -35,10 +35,10 @@
|
|
|
35
35
|
"test": "cross-env TS_NODE_FILES=true jest --config ../../jest.config.js --rootDir ."
|
|
36
36
|
},
|
|
37
37
|
"dependencies": {
|
|
38
|
-
"@yozora/ast": "^2.0.0
|
|
39
|
-
"@yozora/character": "^2.0.0
|
|
40
|
-
"@yozora/core-tokenizer": "^2.0.0
|
|
41
|
-
"@yozora/tokenizer-link": "^2.0.0
|
|
38
|
+
"@yozora/ast": "^2.0.0",
|
|
39
|
+
"@yozora/character": "^2.0.0",
|
|
40
|
+
"@yozora/core-tokenizer": "^2.0.0",
|
|
41
|
+
"@yozora/tokenizer-link": "^2.0.0"
|
|
42
42
|
},
|
|
43
|
-
"gitHead": "
|
|
43
|
+
"gitHead": "65e99d1709fdd1c918465dce6b1e91de96bdab5e"
|
|
44
44
|
}
|