@yozora/tokenizer-delete 2.0.0-alpha.0 → 2.0.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -6
- package/lib/cjs/index.js +67 -59
- package/lib/esm/index.js +67 -61
- package/lib/types/index.d.ts +3 -1
- package/lib/types/match.d.ts +9 -0
- package/lib/types/parse.d.ts +3 -0
- package/lib/types/tokenizer.d.ts +4 -7
- package/lib/types/types.d.ts +2 -1
- package/package.json +5 -5
package/README.md
CHANGED
|
@@ -84,14 +84,14 @@ so you can use `YozoraParser` / `GfmExParser` directly.
|
|
|
84
84
|
registered in *YastParser* as a plugin-in before it can be used.
|
|
85
85
|
|
|
86
86
|
```typescript {4,9}
|
|
87
|
-
import {
|
|
87
|
+
import { DefaultParser } from '@yozora/core-parser'
|
|
88
88
|
import ParagraphTokenizer from '@yozora/tokenizer-paragraph'
|
|
89
89
|
import TextTokenizer from '@yozora/tokenizer-text'
|
|
90
90
|
import DeleteTokenizer from '@yozora/tokenizer-delete'
|
|
91
91
|
|
|
92
|
-
const parser = new
|
|
93
|
-
.
|
|
94
|
-
.
|
|
92
|
+
const parser = new DefaultParser()
|
|
93
|
+
.useFallbackTokenizer(new ParagraphTokenizer())
|
|
94
|
+
.useFallbackTokenizer(new TextTokenizer())
|
|
95
95
|
.useTokenizer(new DeleteTokenizer())
|
|
96
96
|
|
|
97
97
|
// parse source markdown content
|
|
@@ -232,7 +232,6 @@ Name | Type | Required | Default
|
|
|
232
232
|
[@yozora/tokenizer-link]: https://github.com/yozorajs/yozora/tree/main/tokenizers/link#readme
|
|
233
233
|
[@yozora/tokenizer-link-reference]: https://github.com/yozorajs/yozora/tree/main/tokenizers/link-reference#readme
|
|
234
234
|
[@yozora/tokenizer-list]: https://github.com/yozorajs/yozora/tree/main/tokenizers/list#readme
|
|
235
|
-
[@yozora/tokenizer-list-item]: https://github.com/yozorajs/yozora/tree/main/tokenizers/list-item#readme
|
|
236
235
|
[@yozora/tokenizer-math]: https://github.com/yozorajs/yozora/tree/main/tokenizers/math#readme
|
|
237
236
|
[@yozora/tokenizer-paragraph]: https://github.com/yozorajs/yozora/tree/main/tokenizers/paragraph#readme
|
|
238
237
|
[@yozora/tokenizer-setext-heading]: https://github.com/yozorajs/yozora/tree/main/tokenizers/setext-heading#readme
|
|
@@ -292,7 +291,6 @@ Name | Type | Required | Default
|
|
|
292
291
|
[doc-@yozora/tokenizer-definition]: https://yozora.guanghechen.com/docs/package/tokenizer-definition
|
|
293
292
|
[doc-@yozora/tokenizer-link-reference]: https://yozora.guanghechen.com/docs/package/tokenizer-link-reference
|
|
294
293
|
[doc-@yozora/tokenizer-list]: https://yozora.guanghechen.com/docs/package/tokenizer-list
|
|
295
|
-
[doc-@yozora/tokenizer-list-item]: https://yozora.guanghechen.com/docs/package/tokenizer-list-item
|
|
296
294
|
[doc-@yozora/tokenizer-math]: https://yozora.guanghechen.com/docs/package/tokenizer-math
|
|
297
295
|
[doc-@yozora/tokenizer-paragraph]: https://yozora.guanghechen.com/docs/package/tokenizer-paragraph
|
|
298
296
|
[doc-@yozora/tokenizer-setext-heading]: https://yozora.guanghechen.com/docs/package/tokenizer-setext-heading
|
package/lib/cjs/index.js
CHANGED
|
@@ -6,6 +6,69 @@ var ast = require('@yozora/ast');
|
|
|
6
6
|
var character = require('@yozora/character');
|
|
7
7
|
var coreTokenizer = require('@yozora/core-tokenizer');
|
|
8
8
|
|
|
9
|
+
const match = function (api) {
|
|
10
|
+
return {
|
|
11
|
+
findDelimiter: () => coreTokenizer.genFindDelimiter(_findDelimiter),
|
|
12
|
+
processDelimiterPair,
|
|
13
|
+
};
|
|
14
|
+
function _findDelimiter(startIndex, endIndex) {
|
|
15
|
+
const nodePoints = api.getNodePoints();
|
|
16
|
+
for (let i = startIndex; i < endIndex; ++i) {
|
|
17
|
+
const c = nodePoints[i].codePoint;
|
|
18
|
+
switch (c) {
|
|
19
|
+
case character.AsciiCodePoint.BACKSLASH:
|
|
20
|
+
i += 1;
|
|
21
|
+
break;
|
|
22
|
+
case character.AsciiCodePoint.TILDE: {
|
|
23
|
+
const _startIndex = i;
|
|
24
|
+
i = coreTokenizer.eatOptionalCharacters(nodePoints, i + 1, endIndex, c) - 1;
|
|
25
|
+
if (i - _startIndex !== 1)
|
|
26
|
+
break;
|
|
27
|
+
let delimiterType = 'both';
|
|
28
|
+
const preceding = _startIndex === startIndex ? null : nodePoints[_startIndex - 1];
|
|
29
|
+
if (preceding != null && character.isWhitespaceCharacter(preceding.codePoint)) {
|
|
30
|
+
delimiterType = 'opener';
|
|
31
|
+
}
|
|
32
|
+
const following = i + 1 === endIndex ? null : nodePoints[i + 1];
|
|
33
|
+
if (following != null && character.isWhitespaceCharacter(following.codePoint)) {
|
|
34
|
+
if (delimiterType !== 'both')
|
|
35
|
+
break;
|
|
36
|
+
delimiterType = 'closer';
|
|
37
|
+
}
|
|
38
|
+
return {
|
|
39
|
+
type: delimiterType,
|
|
40
|
+
startIndex: _startIndex,
|
|
41
|
+
endIndex: i + 1,
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
return null;
|
|
47
|
+
}
|
|
48
|
+
function processDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
|
|
49
|
+
internalTokens = api.resolveInternalTokens(internalTokens, openerDelimiter.endIndex, closerDelimiter.startIndex);
|
|
50
|
+
const token = {
|
|
51
|
+
nodeType: ast.DeleteType,
|
|
52
|
+
startIndex: openerDelimiter.startIndex,
|
|
53
|
+
endIndex: closerDelimiter.endIndex,
|
|
54
|
+
children: internalTokens,
|
|
55
|
+
};
|
|
56
|
+
return { tokens: [token] };
|
|
57
|
+
}
|
|
58
|
+
};
|
|
59
|
+
|
|
60
|
+
const parse = function () {
|
|
61
|
+
return {
|
|
62
|
+
parse: (_token, children) => {
|
|
63
|
+
const result = {
|
|
64
|
+
type: ast.DeleteType,
|
|
65
|
+
children,
|
|
66
|
+
};
|
|
67
|
+
return result;
|
|
68
|
+
},
|
|
69
|
+
};
|
|
70
|
+
};
|
|
71
|
+
|
|
9
72
|
const uniqueName = '@yozora/tokenizer-delete';
|
|
10
73
|
|
|
11
74
|
class DeleteTokenizer extends coreTokenizer.BaseInlineTokenizer {
|
|
@@ -15,68 +78,13 @@ class DeleteTokenizer extends coreTokenizer.BaseInlineTokenizer {
|
|
|
15
78
|
name: (_a = props.name) !== null && _a !== void 0 ? _a : uniqueName,
|
|
16
79
|
priority: (_b = props.priority) !== null && _b !== void 0 ? _b : coreTokenizer.TokenizerPriority.CONTAINING_INLINE,
|
|
17
80
|
});
|
|
18
|
-
this.match =
|
|
19
|
-
|
|
20
|
-
findDelimiter: () => coreTokenizer.genFindDelimiter(_findDelimiter),
|
|
21
|
-
processDelimiterPair,
|
|
22
|
-
};
|
|
23
|
-
function _findDelimiter(startIndex, endIndex) {
|
|
24
|
-
const nodePoints = api.getNodePoints();
|
|
25
|
-
for (let i = startIndex; i < endIndex; ++i) {
|
|
26
|
-
const c = nodePoints[i].codePoint;
|
|
27
|
-
switch (c) {
|
|
28
|
-
case character.AsciiCodePoint.BACKSLASH:
|
|
29
|
-
i += 1;
|
|
30
|
-
break;
|
|
31
|
-
case character.AsciiCodePoint.TILDE: {
|
|
32
|
-
const _startIndex = i;
|
|
33
|
-
i = coreTokenizer.eatOptionalCharacters(nodePoints, i + 1, endIndex, c) - 1;
|
|
34
|
-
if (i - _startIndex !== 1)
|
|
35
|
-
break;
|
|
36
|
-
let delimiterType = 'both';
|
|
37
|
-
const preceding = _startIndex === startIndex ? null : nodePoints[_startIndex - 1];
|
|
38
|
-
if (preceding != null && character.isWhitespaceCharacter(preceding.codePoint)) {
|
|
39
|
-
delimiterType = 'opener';
|
|
40
|
-
}
|
|
41
|
-
const following = i + 1 === endIndex ? null : nodePoints[i + 1];
|
|
42
|
-
if (following != null && character.isWhitespaceCharacter(following.codePoint)) {
|
|
43
|
-
if (delimiterType !== 'both')
|
|
44
|
-
break;
|
|
45
|
-
delimiterType = 'closer';
|
|
46
|
-
}
|
|
47
|
-
return {
|
|
48
|
-
type: delimiterType,
|
|
49
|
-
startIndex: _startIndex,
|
|
50
|
-
endIndex: i + 1,
|
|
51
|
-
};
|
|
52
|
-
}
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
return null;
|
|
56
|
-
}
|
|
57
|
-
function processDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
|
|
58
|
-
internalTokens = api.resolveInternalTokens(internalTokens, openerDelimiter.endIndex, closerDelimiter.startIndex);
|
|
59
|
-
const token = {
|
|
60
|
-
nodeType: ast.DeleteType,
|
|
61
|
-
startIndex: openerDelimiter.startIndex,
|
|
62
|
-
endIndex: closerDelimiter.endIndex,
|
|
63
|
-
children: internalTokens,
|
|
64
|
-
};
|
|
65
|
-
return { tokens: [token] };
|
|
66
|
-
}
|
|
67
|
-
};
|
|
68
|
-
this.parse = () => ({
|
|
69
|
-
parse: (token, children) => {
|
|
70
|
-
const result = {
|
|
71
|
-
type: ast.DeleteType,
|
|
72
|
-
children,
|
|
73
|
-
};
|
|
74
|
-
return result;
|
|
75
|
-
},
|
|
76
|
-
});
|
|
81
|
+
this.match = match;
|
|
82
|
+
this.parse = parse;
|
|
77
83
|
}
|
|
78
84
|
}
|
|
79
85
|
|
|
80
86
|
exports.DeleteTokenizer = DeleteTokenizer;
|
|
81
87
|
exports.DeleteTokenizerName = uniqueName;
|
|
82
88
|
exports["default"] = DeleteTokenizer;
|
|
89
|
+
exports.deleteMatch = match;
|
|
90
|
+
exports.deleteParse = parse;
|
package/lib/esm/index.js
CHANGED
|
@@ -1,6 +1,69 @@
|
|
|
1
1
|
import { DeleteType } from '@yozora/ast';
|
|
2
2
|
import { AsciiCodePoint, isWhitespaceCharacter } from '@yozora/character';
|
|
3
|
-
import {
|
|
3
|
+
import { genFindDelimiter, eatOptionalCharacters, BaseInlineTokenizer, TokenizerPriority } from '@yozora/core-tokenizer';
|
|
4
|
+
|
|
5
|
+
const match = function (api) {
|
|
6
|
+
return {
|
|
7
|
+
findDelimiter: () => genFindDelimiter(_findDelimiter),
|
|
8
|
+
processDelimiterPair,
|
|
9
|
+
};
|
|
10
|
+
function _findDelimiter(startIndex, endIndex) {
|
|
11
|
+
const nodePoints = api.getNodePoints();
|
|
12
|
+
for (let i = startIndex; i < endIndex; ++i) {
|
|
13
|
+
const c = nodePoints[i].codePoint;
|
|
14
|
+
switch (c) {
|
|
15
|
+
case AsciiCodePoint.BACKSLASH:
|
|
16
|
+
i += 1;
|
|
17
|
+
break;
|
|
18
|
+
case AsciiCodePoint.TILDE: {
|
|
19
|
+
const _startIndex = i;
|
|
20
|
+
i = eatOptionalCharacters(nodePoints, i + 1, endIndex, c) - 1;
|
|
21
|
+
if (i - _startIndex !== 1)
|
|
22
|
+
break;
|
|
23
|
+
let delimiterType = 'both';
|
|
24
|
+
const preceding = _startIndex === startIndex ? null : nodePoints[_startIndex - 1];
|
|
25
|
+
if (preceding != null && isWhitespaceCharacter(preceding.codePoint)) {
|
|
26
|
+
delimiterType = 'opener';
|
|
27
|
+
}
|
|
28
|
+
const following = i + 1 === endIndex ? null : nodePoints[i + 1];
|
|
29
|
+
if (following != null && isWhitespaceCharacter(following.codePoint)) {
|
|
30
|
+
if (delimiterType !== 'both')
|
|
31
|
+
break;
|
|
32
|
+
delimiterType = 'closer';
|
|
33
|
+
}
|
|
34
|
+
return {
|
|
35
|
+
type: delimiterType,
|
|
36
|
+
startIndex: _startIndex,
|
|
37
|
+
endIndex: i + 1,
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
return null;
|
|
43
|
+
}
|
|
44
|
+
function processDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
|
|
45
|
+
internalTokens = api.resolveInternalTokens(internalTokens, openerDelimiter.endIndex, closerDelimiter.startIndex);
|
|
46
|
+
const token = {
|
|
47
|
+
nodeType: DeleteType,
|
|
48
|
+
startIndex: openerDelimiter.startIndex,
|
|
49
|
+
endIndex: closerDelimiter.endIndex,
|
|
50
|
+
children: internalTokens,
|
|
51
|
+
};
|
|
52
|
+
return { tokens: [token] };
|
|
53
|
+
}
|
|
54
|
+
};
|
|
55
|
+
|
|
56
|
+
const parse = function () {
|
|
57
|
+
return {
|
|
58
|
+
parse: (_token, children) => {
|
|
59
|
+
const result = {
|
|
60
|
+
type: DeleteType,
|
|
61
|
+
children,
|
|
62
|
+
};
|
|
63
|
+
return result;
|
|
64
|
+
},
|
|
65
|
+
};
|
|
66
|
+
};
|
|
4
67
|
|
|
5
68
|
const uniqueName = '@yozora/tokenizer-delete';
|
|
6
69
|
|
|
@@ -11,66 +74,9 @@ class DeleteTokenizer extends BaseInlineTokenizer {
|
|
|
11
74
|
name: (_a = props.name) !== null && _a !== void 0 ? _a : uniqueName,
|
|
12
75
|
priority: (_b = props.priority) !== null && _b !== void 0 ? _b : TokenizerPriority.CONTAINING_INLINE,
|
|
13
76
|
});
|
|
14
|
-
this.match =
|
|
15
|
-
|
|
16
|
-
findDelimiter: () => genFindDelimiter(_findDelimiter),
|
|
17
|
-
processDelimiterPair,
|
|
18
|
-
};
|
|
19
|
-
function _findDelimiter(startIndex, endIndex) {
|
|
20
|
-
const nodePoints = api.getNodePoints();
|
|
21
|
-
for (let i = startIndex; i < endIndex; ++i) {
|
|
22
|
-
const c = nodePoints[i].codePoint;
|
|
23
|
-
switch (c) {
|
|
24
|
-
case AsciiCodePoint.BACKSLASH:
|
|
25
|
-
i += 1;
|
|
26
|
-
break;
|
|
27
|
-
case AsciiCodePoint.TILDE: {
|
|
28
|
-
const _startIndex = i;
|
|
29
|
-
i = eatOptionalCharacters(nodePoints, i + 1, endIndex, c) - 1;
|
|
30
|
-
if (i - _startIndex !== 1)
|
|
31
|
-
break;
|
|
32
|
-
let delimiterType = 'both';
|
|
33
|
-
const preceding = _startIndex === startIndex ? null : nodePoints[_startIndex - 1];
|
|
34
|
-
if (preceding != null && isWhitespaceCharacter(preceding.codePoint)) {
|
|
35
|
-
delimiterType = 'opener';
|
|
36
|
-
}
|
|
37
|
-
const following = i + 1 === endIndex ? null : nodePoints[i + 1];
|
|
38
|
-
if (following != null && isWhitespaceCharacter(following.codePoint)) {
|
|
39
|
-
if (delimiterType !== 'both')
|
|
40
|
-
break;
|
|
41
|
-
delimiterType = 'closer';
|
|
42
|
-
}
|
|
43
|
-
return {
|
|
44
|
-
type: delimiterType,
|
|
45
|
-
startIndex: _startIndex,
|
|
46
|
-
endIndex: i + 1,
|
|
47
|
-
};
|
|
48
|
-
}
|
|
49
|
-
}
|
|
50
|
-
}
|
|
51
|
-
return null;
|
|
52
|
-
}
|
|
53
|
-
function processDelimiterPair(openerDelimiter, closerDelimiter, internalTokens) {
|
|
54
|
-
internalTokens = api.resolveInternalTokens(internalTokens, openerDelimiter.endIndex, closerDelimiter.startIndex);
|
|
55
|
-
const token = {
|
|
56
|
-
nodeType: DeleteType,
|
|
57
|
-
startIndex: openerDelimiter.startIndex,
|
|
58
|
-
endIndex: closerDelimiter.endIndex,
|
|
59
|
-
children: internalTokens,
|
|
60
|
-
};
|
|
61
|
-
return { tokens: [token] };
|
|
62
|
-
}
|
|
63
|
-
};
|
|
64
|
-
this.parse = () => ({
|
|
65
|
-
parse: (token, children) => {
|
|
66
|
-
const result = {
|
|
67
|
-
type: DeleteType,
|
|
68
|
-
children,
|
|
69
|
-
};
|
|
70
|
-
return result;
|
|
71
|
-
},
|
|
72
|
-
});
|
|
77
|
+
this.match = match;
|
|
78
|
+
this.parse = parse;
|
|
73
79
|
}
|
|
74
80
|
}
|
|
75
81
|
|
|
76
|
-
export { DeleteTokenizer, uniqueName as DeleteTokenizerName, DeleteTokenizer as default };
|
|
82
|
+
export { DeleteTokenizer, uniqueName as DeleteTokenizerName, DeleteTokenizer as default, match as deleteMatch, parse as deleteParse };
|
package/lib/types/index.d.ts
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
export { match as deleteMatch } from './match';
|
|
2
|
+
export { parse as deleteParse } from './parse';
|
|
1
3
|
export { DeleteTokenizer, DeleteTokenizer as default } from './tokenizer';
|
|
2
4
|
export { uniqueName as DeleteTokenizerName } from './types';
|
|
3
|
-
export type { IToken as IDeleteToken, ITokenizerProps as IDeleteTokenizerProps } from './types';
|
|
5
|
+
export type { IThis as IDeleteHookContext, IToken as IDeleteToken, ITokenizerProps as IDeleteTokenizerProps, } from './types';
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import type { IMatchInlineHookCreator } from '@yozora/core-tokenizer';
|
|
2
|
+
import type { IDelimiter, IThis, IToken, T } from './types';
|
|
3
|
+
/**
|
|
4
|
+
* Strikethrough text is any text wrapped in two tildes (~).
|
|
5
|
+
*
|
|
6
|
+
* @see https://github.com/syntax-tree/mdast#delete
|
|
7
|
+
* @see https://github.github.com/gfm/#strikethrough-extension-
|
|
8
|
+
*/
|
|
9
|
+
export declare const match: IMatchInlineHookCreator<T, IDelimiter, IToken, IThis>;
|
package/lib/types/tokenizer.d.ts
CHANGED
|
@@ -1,16 +1,13 @@
|
|
|
1
1
|
import type { IInlineTokenizer, IMatchInlineHookCreator, IParseInlineHookCreator } from '@yozora/core-tokenizer';
|
|
2
2
|
import { BaseInlineTokenizer } from '@yozora/core-tokenizer';
|
|
3
|
-
import type { IDelimiter, INode, IToken, ITokenizerProps, T } from './types';
|
|
3
|
+
import type { IDelimiter, INode, IThis, IToken, ITokenizerProps, T } from './types';
|
|
4
4
|
/**
|
|
5
5
|
* Lexical Analyzer for Delete.
|
|
6
|
-
*
|
|
7
|
-
* Strikethrough text is any text wrapped in two tildes (~).
|
|
8
|
-
*
|
|
9
6
|
* @see https://github.com/syntax-tree/mdast#delete
|
|
10
7
|
* @see https://github.github.com/gfm/#strikethrough-extension-
|
|
11
8
|
*/
|
|
12
|
-
export declare class DeleteTokenizer extends BaseInlineTokenizer<T, IDelimiter, IToken, INode> implements IInlineTokenizer<T, IDelimiter, IToken, INode> {
|
|
9
|
+
export declare class DeleteTokenizer extends BaseInlineTokenizer<T, IDelimiter, IToken, INode, IThis> implements IInlineTokenizer<T, IDelimiter, IToken, INode, IThis> {
|
|
13
10
|
constructor(props?: ITokenizerProps);
|
|
14
|
-
readonly match: IMatchInlineHookCreator<T, IDelimiter, IToken>;
|
|
15
|
-
readonly parse: IParseInlineHookCreator<T, IToken, INode>;
|
|
11
|
+
readonly match: IMatchInlineHookCreator<T, IDelimiter, IToken, IThis>;
|
|
12
|
+
readonly parse: IParseInlineHookCreator<T, IToken, INode, IThis>;
|
|
16
13
|
}
|
package/lib/types/types.d.ts
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import type { DeleteType, IDelete } from '@yozora/ast';
|
|
2
|
-
import type { IBaseInlineTokenizerProps, IPartialYastInlineToken, IYastTokenDelimiter } from '@yozora/core-tokenizer';
|
|
2
|
+
import type { IBaseInlineTokenizerProps, IPartialYastInlineToken, ITokenizer, IYastTokenDelimiter } from '@yozora/core-tokenizer';
|
|
3
3
|
export declare type T = DeleteType;
|
|
4
4
|
export declare type INode = IDelete;
|
|
5
5
|
export declare const uniqueName = "@yozora/tokenizer-delete";
|
|
6
6
|
export declare type IToken = IPartialYastInlineToken<T>;
|
|
7
7
|
export declare type IDelimiter = IYastTokenDelimiter;
|
|
8
|
+
export declare type IThis = ITokenizer;
|
|
8
9
|
export declare type ITokenizerProps = Partial<IBaseInlineTokenizerProps>;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@yozora/tokenizer-delete",
|
|
3
|
-
"version": "2.0.0-alpha.
|
|
3
|
+
"version": "2.0.0-alpha.1",
|
|
4
4
|
"author": {
|
|
5
5
|
"name": "guanghechen",
|
|
6
6
|
"url": "https://github.com/guanghechen/"
|
|
@@ -35,9 +35,9 @@
|
|
|
35
35
|
"test": "cross-env TS_NODE_FILES=true jest --config ../../jest.config.js --rootDir ."
|
|
36
36
|
},
|
|
37
37
|
"dependencies": {
|
|
38
|
-
"@yozora/ast": "^2.0.0-alpha.
|
|
39
|
-
"@yozora/character": "^2.0.0-alpha.
|
|
40
|
-
"@yozora/core-tokenizer": "^2.0.0-alpha.
|
|
38
|
+
"@yozora/ast": "^2.0.0-alpha.1",
|
|
39
|
+
"@yozora/character": "^2.0.0-alpha.1",
|
|
40
|
+
"@yozora/core-tokenizer": "^2.0.0-alpha.1"
|
|
41
41
|
},
|
|
42
|
-
"gitHead": "
|
|
42
|
+
"gitHead": "86202e1d2b03ccfc2ab030517d9d314f7aee7666"
|
|
43
43
|
}
|