amateras 0.5.0 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +23 -26
- package/ext/html/node/$Anchor.ts +2 -2
- package/ext/html/node/$Canvas.ts +2 -2
- package/ext/html/node/$Dialog.ts +2 -2
- package/ext/html/node/$Form.ts +2 -2
- package/ext/html/node/$Image.ts +2 -2
- package/ext/html/node/$Input.ts +2 -2
- package/ext/html/node/$Label.ts +2 -2
- package/ext/html/node/$Media.ts +2 -2
- package/ext/html/node/$OptGroup.ts +2 -2
- package/ext/html/node/$Option.ts +2 -2
- package/ext/html/node/$Select.ts +2 -2
- package/ext/html/node/$TextArea.ts +2 -2
- package/ext/i18n/README.md +20 -0
- package/ext/i18n/src/index.ts +106 -12
- package/ext/i18n/src/structure/I18n.ts +12 -8
- package/ext/i18n/src/structure/I18nTranslation.ts +35 -0
- package/ext/idb/src/structure/builder/$IDBBuilder.ts +8 -8
- package/ext/markdown/README.md +53 -0
- package/ext/markdown/package.json +7 -0
- package/ext/markdown/src/index.ts +3 -0
- package/ext/markdown/src/lib/type.ts +26 -0
- package/ext/markdown/src/lib/util.ts +21 -0
- package/ext/markdown/src/structure/Markdown.ts +54 -0
- package/ext/markdown/src/structure/MarkdownLexer.ts +111 -0
- package/ext/markdown/src/structure/MarkdownParser.ts +33 -0
- package/ext/markdown/src/syntax/alert.ts +46 -0
- package/ext/markdown/src/syntax/blockquote.ts +35 -0
- package/ext/markdown/src/syntax/bold.ts +11 -0
- package/ext/markdown/src/syntax/code.ts +11 -0
- package/ext/markdown/src/syntax/codeblock.ts +44 -0
- package/ext/markdown/src/syntax/heading.ts +14 -0
- package/ext/markdown/src/syntax/horizontalRule.ts +11 -0
- package/ext/markdown/src/syntax/image.ts +23 -0
- package/ext/markdown/src/syntax/italic.ts +11 -0
- package/ext/markdown/src/syntax/link.ts +46 -0
- package/ext/markdown/src/syntax/list.ts +121 -0
- package/ext/markdown/src/syntax/table.ts +67 -0
- package/ext/markdown/src/syntax/text.ts +19 -0
- package/ext/router/README.md +111 -17
- package/ext/router/package.json +10 -0
- package/ext/router/src/index.ts +69 -0
- package/ext/router/src/node/Page.ts +34 -0
- package/ext/router/src/node/Router.ts +191 -0
- package/ext/router/{node → src/node}/RouterAnchor.ts +13 -2
- package/ext/router/src/structure/PageBuilder.ts +24 -0
- package/ext/router/src/structure/Route.ts +105 -0
- package/ext/signal/README.md +93 -0
- package/ext/signal/package.json +9 -0
- package/ext/signal/src/index.ts +128 -0
- package/{src → ext/signal/src}/structure/Signal.ts +6 -10
- package/ext/ssr/index.ts +4 -4
- package/ext/ui/lib/VirtualScroll.ts +25 -0
- package/ext/ui/node/Accordian.ts +97 -0
- package/ext/ui/node/Form.ts +53 -0
- package/ext/ui/node/Grid.ts +0 -0
- package/ext/ui/node/Table.ts +43 -0
- package/ext/ui/node/Tabs.ts +114 -0
- package/ext/ui/node/Toast.ts +16 -0
- package/ext/ui/node/Waterfall.ts +72 -0
- package/ext/ui/package.json +11 -0
- package/package.json +6 -3
- package/src/core.ts +30 -60
- package/src/global.ts +9 -2
- package/src/index.ts +1 -2
- package/src/lib/assignProperties.ts +57 -0
- package/src/lib/native.ts +25 -8
- package/src/lib/uppercase.ts +3 -0
- package/src/node/$Element.ts +7 -41
- package/src/node/$EventTarget.ts +45 -0
- package/src/node/$Node.ts +60 -65
- package/src/node/$Virtual.ts +65 -0
- package/src/node.ts +7 -6
- package/ext/i18n/src/node/I18nText.ts +0 -35
- package/ext/markdown/index.ts +0 -121
- package/ext/router/index.ts +0 -73
- package/ext/router/node/Page.ts +0 -27
- package/ext/router/node/Route.ts +0 -54
- package/ext/router/node/Router.ts +0 -149
- package/src/lib/assign.ts +0 -38
- package/src/lib/assignHelper.ts +0 -18
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
export const INLINE = 'INLINE';
|
|
2
|
+
export const BLOCK = 'BLOCK';
|
|
3
|
+
export const TEXT = 'TEXT';
|
|
4
|
+
export const IMAGE = 'IMAGE';
|
|
5
|
+
export const LINK = 'LINK';
|
|
6
|
+
export const QUICK_LINK = 'QUICK_LINK';
|
|
7
|
+
export const CODE = 'CODE';
|
|
8
|
+
export const ITALIC = 'ITALIC';
|
|
9
|
+
export const BOLD = 'BOLD';
|
|
10
|
+
export const TEXT_LINE = 'TEXT_LINE';
|
|
11
|
+
export const HEADING = 'HEADING';
|
|
12
|
+
export const CODE_START = 'CODE_START';
|
|
13
|
+
export const CODE_LINE = 'CODE_LINE';
|
|
14
|
+
export const CODE_END = 'CODE_END';
|
|
15
|
+
export const UNORDERED_LIST_ITEM = 'UNORDERED_LIST_ITEM';
|
|
16
|
+
export const ORDERED_LIST_ITEM = 'ORDERED_LIST_ITEM';
|
|
17
|
+
export const BLOCKQUOTE = 'BLOCKQUOTE';
|
|
18
|
+
export const ALERT = 'ALERT';
|
|
19
|
+
export const ALERT_LINE = 'ALERT_LINE';
|
|
20
|
+
export const HORIZONTAL_RULE = 'HORIZONTAL_RULE';
|
|
21
|
+
export const TABLE = 'TABLE';
|
|
22
|
+
export const TABLE_ROW = 'TABLE_ROW';
|
|
23
|
+
export const TABLE_COLUMN = 'TABLE_COLUMN';
|
|
24
|
+
export const EMPTY_LINE = 'EMPTY_LINE'
|
|
25
|
+
export const INLINE_TEXT = "INLINE_TEXT";
|
|
26
|
+
export const INLINE_CONTENT = "INLINE_CONTENT"
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import type { BlockTokenizer, InlineTokenizer, MarkdownLexer } from "#structure/MarkdownLexer";
|
|
2
|
+
import type { MarkdownParseProcessor, MarkdownParser } from "#structure/MarkdownParser";
|
|
3
|
+
import { forEach } from "amateras/lib/native";
|
|
4
|
+
|
|
5
|
+
export const setBlockTokenizer = (lexer: MarkdownLexer, type: string, tokenizer: BlockTokenizer) => lexer.blockTokenizers.set(type, tokenizer);
|
|
6
|
+
export const setInlineTokenizer = (lexer: MarkdownLexer, type: string, tokenizer: InlineTokenizer) => lexer.inlineTokenizers.set(type, tokenizer);
|
|
7
|
+
|
|
8
|
+
export const setProcessor = (parser: MarkdownParser, type: string, processor: MarkdownParseProcessor) => parser.processors.set(type, processor);
|
|
9
|
+
|
|
10
|
+
export const htmltag = (tagname: string, content: string) => `<${tagname}>${content}</${tagname}>`
|
|
11
|
+
|
|
12
|
+
export const htmlEscapeChar = (str: string) => {
|
|
13
|
+
forEach([
|
|
14
|
+
['&', '&'],
|
|
15
|
+
['<', '<'],
|
|
16
|
+
['>', '>'],
|
|
17
|
+
['"', '"'],
|
|
18
|
+
["'", ''']
|
|
19
|
+
] as [string, string][], group => str = str.replaceAll(...group))
|
|
20
|
+
return str;
|
|
21
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import { alertProcessor, alertTokenizer } from "../syntax/alert";
|
|
2
|
+
import { blockquoteProcessor, blockquoteTokenizer } from "../syntax/blockquote";
|
|
3
|
+
import { boldProcessor, boldTokenizer } from "../syntax/bold";
|
|
4
|
+
import { codeProcessor, codeTokenizer } from "../syntax/code";
|
|
5
|
+
import { codeblockProcessor, codeblockTokenizer } from "../syntax/codeblock";
|
|
6
|
+
import { headingProcessor, headingTokenizer } from "../syntax/heading";
|
|
7
|
+
import { horizontalRuleProcessor, horizontalRuleTokenizer } from "../syntax/horizontalRule";
|
|
8
|
+
import { imageProcessor, imageTokenizer } from "../syntax/image";
|
|
9
|
+
import { italicProcessor, italicTokenizer } from "../syntax/italic";
|
|
10
|
+
import { linkProcessor, linkTokenizer } from "../syntax/link";
|
|
11
|
+
import { listProcessor, listTokenizer } from "../syntax/list";
|
|
12
|
+
import { textLineProcessor, textProcessor } from "../syntax/text";
|
|
13
|
+
import { MarkdownLexer } from "./MarkdownLexer";
|
|
14
|
+
import { MarkdownParser } from "./MarkdownParser";
|
|
15
|
+
|
|
16
|
+
export class Markdown {
|
|
17
|
+
lexer = new MarkdownLexer();
|
|
18
|
+
parser = new MarkdownParser();
|
|
19
|
+
constructor() {
|
|
20
|
+
this.lexer.use(
|
|
21
|
+
headingTokenizer,
|
|
22
|
+
codeblockTokenizer,
|
|
23
|
+
listTokenizer,
|
|
24
|
+
alertTokenizer,
|
|
25
|
+
blockquoteTokenizer,
|
|
26
|
+
horizontalRuleTokenizer,
|
|
27
|
+
imageTokenizer, // image tokenizer must before link
|
|
28
|
+
linkTokenizer, // link tokenizer must before bold and italic and code
|
|
29
|
+
codeTokenizer,
|
|
30
|
+
boldTokenizer,
|
|
31
|
+
italicTokenizer,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
this.parser.use(
|
|
35
|
+
textProcessor,
|
|
36
|
+
imageProcessor,
|
|
37
|
+
linkProcessor,
|
|
38
|
+
codeProcessor,
|
|
39
|
+
italicProcessor,
|
|
40
|
+
boldProcessor,
|
|
41
|
+
textLineProcessor,
|
|
42
|
+
headingProcessor,
|
|
43
|
+
codeblockProcessor,
|
|
44
|
+
listProcessor,
|
|
45
|
+
alertProcessor,
|
|
46
|
+
blockquoteProcessor,
|
|
47
|
+
horizontalRuleProcessor
|
|
48
|
+
)
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
parseHTML(str: string) {
|
|
52
|
+
return this.parser.parse(this.lexer.blockTokenize(str));
|
|
53
|
+
}
|
|
54
|
+
}
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import { BLOCK, EMPTY_LINE, INLINE_CONTENT, INLINE_TEXT, TEXT_LINE } from "#lib/type";
|
|
2
|
+
import { forEach, isString } from "amateras/lib/native";
|
|
3
|
+
|
|
4
|
+
export class MarkdownLexer {
|
|
5
|
+
blockTokenizers = new Map<string, BlockTokenizer>();
|
|
6
|
+
inlineTokenizers = new Map<string, InlineTokenizer>();
|
|
7
|
+
|
|
8
|
+
blockTokenize(str: string) {
|
|
9
|
+
const lines = str.split(/\r?\n/);
|
|
10
|
+
const tokens: BlockToken[] = [];
|
|
11
|
+
let lineIndex = 0;
|
|
12
|
+
lineLoop: while (lineIndex < lines.length) {
|
|
13
|
+
let line = lines[lineIndex];
|
|
14
|
+
if (line === undefined) throw 'LINE ERROR';
|
|
15
|
+
let token: BlockToken | undefined;
|
|
16
|
+
for (const [type, tokenizer] of this.blockTokenizers) {
|
|
17
|
+
const matched = line.match(tokenizer.regex);
|
|
18
|
+
if (matched) {
|
|
19
|
+
const {content, multiLine, data} = tokenizer.handle(matched, lineIndex, lines);
|
|
20
|
+
token = { layout: BLOCK, type, content, data }
|
|
21
|
+
if (multiLine) {
|
|
22
|
+
tokens.push(token);
|
|
23
|
+
tokens.push(...multiLine.tokens)
|
|
24
|
+
lineIndex = multiLine.skip;
|
|
25
|
+
continue lineLoop;
|
|
26
|
+
}
|
|
27
|
+
break;
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
if (!token) token = {
|
|
31
|
+
layout: BLOCK,
|
|
32
|
+
...(
|
|
33
|
+
line.length
|
|
34
|
+
? { type: TEXT_LINE, content: this.inlineTokenize(line) }
|
|
35
|
+
: { type: EMPTY_LINE, content: [] }
|
|
36
|
+
)
|
|
37
|
+
};
|
|
38
|
+
tokens.push(token);
|
|
39
|
+
lineIndex++;
|
|
40
|
+
}
|
|
41
|
+
return tokens;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
inlineTokenize(str: string): InlineToken[] {
|
|
45
|
+
const tokens: InlineToken[] = [];
|
|
46
|
+
let remainStr = str;
|
|
47
|
+
while (remainStr.length) {
|
|
48
|
+
let token: InlineToken | undefined;
|
|
49
|
+
for (const [type, tokenizer] of this.inlineTokenizers) {
|
|
50
|
+
const matched = remainStr.match(tokenizer.regex);
|
|
51
|
+
if (matched) {
|
|
52
|
+
const {index, 0: matchStr} = matched;
|
|
53
|
+
// handle before matched string
|
|
54
|
+
if (index != 0) tokens.push(...this.inlineTokenize(remainStr.substring(0, index)));
|
|
55
|
+
// handle matched string
|
|
56
|
+
const {content, data} = tokenizer.handle(matched);
|
|
57
|
+
token = { type, ...(isString(content) ? { layout: INLINE_TEXT, text: content } : { layout: INLINE_CONTENT, content })};
|
|
58
|
+
if (data) token.data = data;
|
|
59
|
+
remainStr = remainStr.substring(index! + matchStr.length);
|
|
60
|
+
break;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
if (!token) {
|
|
64
|
+
token = { type: 'TEXT', layout: INLINE_TEXT, text: remainStr };
|
|
65
|
+
remainStr = '';
|
|
66
|
+
}
|
|
67
|
+
tokens.push(token);
|
|
68
|
+
}
|
|
69
|
+
return tokens;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
use(...handle: ((parser: this) => void)[]) {
|
|
73
|
+
forEach(handle, fn => fn(this));
|
|
74
|
+
return this;
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
export type BlockTokenizer = {
|
|
79
|
+
regex: RegExp;
|
|
80
|
+
handle: (matches: RegExpMatchArray, position: number, lines: string[]) => { content: (InlineToken | BlockToken)[], multiLine?: BlockTokenizerMultiLine, data?: {[key: string]: any} };
|
|
81
|
+
}
|
|
82
|
+
export type BlockTokenizerMultiLine = {
|
|
83
|
+
skip: number;
|
|
84
|
+
tokens: BlockToken[];
|
|
85
|
+
}
|
|
86
|
+
export type InlineTokenizer = {
|
|
87
|
+
regex: RegExp;
|
|
88
|
+
handle: (matches: RegExpMatchArray) => { content: InlineToken[] | string, data?: {[key: string]: any} }
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
export interface TokenBase {
|
|
92
|
+
type: string;
|
|
93
|
+
layout: 'BLOCK' | 'INLINE_CONTENT' | 'INLINE_TEXT';
|
|
94
|
+
content?: Token[];
|
|
95
|
+
text?: string;
|
|
96
|
+
data?: {[key: string]: any};
|
|
97
|
+
}
|
|
98
|
+
export interface BlockToken extends TokenBase {
|
|
99
|
+
layout: 'BLOCK'
|
|
100
|
+
content: Token[];
|
|
101
|
+
}
|
|
102
|
+
export interface InlineTextToken extends TokenBase {
|
|
103
|
+
layout: 'INLINE_TEXT'
|
|
104
|
+
text: string;
|
|
105
|
+
}
|
|
106
|
+
export interface InlineContentToken extends TokenBase {
|
|
107
|
+
layout: 'INLINE_CONTENT'
|
|
108
|
+
content: (InlineToken)[];
|
|
109
|
+
}
|
|
110
|
+
export type InlineToken = InlineTextToken | InlineContentToken;
|
|
111
|
+
export type Token = BlockToken | InlineToken;
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import { forEach, isString } from "../../../../src/lib/native";
|
|
2
|
+
import { type Token } from "./MarkdownLexer";
|
|
3
|
+
|
|
4
|
+
export class MarkdownParser {
|
|
5
|
+
processors = new Map<string, MarkdownParseProcessor>();
|
|
6
|
+
|
|
7
|
+
parse(tokens: (Token)[]) {
|
|
8
|
+
let html = '';
|
|
9
|
+
let i = 0;
|
|
10
|
+
while (i < tokens.length) {
|
|
11
|
+
const token = tokens[i]!;
|
|
12
|
+
const processor = this.processors.get(token.type);
|
|
13
|
+
if (processor) {
|
|
14
|
+
const result = processor(token, tokens.slice(i));
|
|
15
|
+
if (isString(result)) {
|
|
16
|
+
html += result;
|
|
17
|
+
} else {
|
|
18
|
+
html += result.html;
|
|
19
|
+
i += result.skipTokens;
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
i++;
|
|
23
|
+
}
|
|
24
|
+
return html;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
use(...handle: ((parser: this) => void)[]) {
|
|
28
|
+
forEach(handle, fn => fn(this));
|
|
29
|
+
return this;
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export type MarkdownParseProcessor = (token: Token, tokens: Token[]) => (string | { html: string, skipTokens: number })
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import { ALERT, ALERT_LINE, BLOCK } from "#lib/type";
|
|
2
|
+
import { setBlockTokenizer, setProcessor } from "#lib/util";
|
|
3
|
+
import type { BlockToken, MarkdownLexer } from "#structure/MarkdownLexer";
|
|
4
|
+
import type { MarkdownParser } from "#structure/MarkdownParser";
|
|
5
|
+
import { uppercase } from "amateras/lib/uppercase";
|
|
6
|
+
|
|
7
|
+
export const alertProcessor = (parser: MarkdownParser) => setProcessor(parser, ALERT, (token, tokens) => {
|
|
8
|
+
let html = '';
|
|
9
|
+
let i = 1;
|
|
10
|
+
while (i < tokens.length) {
|
|
11
|
+
const token = tokens[i]!;
|
|
12
|
+
if (token.type !== ALERT_LINE) break;
|
|
13
|
+
html += parser.parse(token.content![0]!.content!);
|
|
14
|
+
i++;
|
|
15
|
+
}
|
|
16
|
+
const alertType = token.data?.alertType as string;
|
|
17
|
+
return {
|
|
18
|
+
html: `<blockquote class="alert alert-${alertType}"><p class="alert-title">${uppercase(alertType, 0, 1)}</p>${html}</blockquote>`,
|
|
19
|
+
skipTokens: i
|
|
20
|
+
}
|
|
21
|
+
})
|
|
22
|
+
|
|
23
|
+
export const alertTokenizer = (lexer: MarkdownLexer) => setBlockTokenizer(lexer, ALERT, {
|
|
24
|
+
regex: /^> ?\[!(?:(?:NOTE)|(?:TIP)|(?:IMPORTANT)|(?:WARNING)|(?:CAUTION))\]/,
|
|
25
|
+
handle(_, position, lines) {
|
|
26
|
+
const tokens: BlockToken[] = [];
|
|
27
|
+
const match = lines[position]!.match(/> ?\[!(.+?)\]/);
|
|
28
|
+
const alertType = match?.[1]?.toLowerCase();
|
|
29
|
+
position++
|
|
30
|
+
while (position < lines.length) {
|
|
31
|
+
const line = lines[position]!;
|
|
32
|
+
const match = line.match(/^> ?(.+)/);
|
|
33
|
+
if (match) tokens.push({ layout: BLOCK, type: ALERT_LINE, content: lexer.blockTokenize(match[1]!) });
|
|
34
|
+
else break;
|
|
35
|
+
position++;
|
|
36
|
+
}
|
|
37
|
+
return {
|
|
38
|
+
content: [],
|
|
39
|
+
data: { alertType },
|
|
40
|
+
multiLine: {
|
|
41
|
+
skip: position,
|
|
42
|
+
tokens
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
},
|
|
46
|
+
})
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { BLOCKQUOTE } from "#lib/type";
|
|
2
|
+
import { htmltag, setBlockTokenizer, setProcessor } from "#lib/util";
|
|
3
|
+
import type { MarkdownLexer } from "#structure/MarkdownLexer";
|
|
4
|
+
import type { MarkdownParser } from "#structure/MarkdownParser";
|
|
5
|
+
|
|
6
|
+
export const blockquoteProcessor = (parser: MarkdownParser) => setProcessor(parser, BLOCKQUOTE, (token, tokens) => {
|
|
7
|
+
let i = 0;
|
|
8
|
+
const blockquote = (deep: number) => {
|
|
9
|
+
let html = '';
|
|
10
|
+
while (i < tokens.length) {
|
|
11
|
+
const {type, content, data} = tokens[i]!;
|
|
12
|
+
if (type !== BLOCKQUOTE) break;
|
|
13
|
+
if (data!.deep > deep) html += blockquote(data!.deep);
|
|
14
|
+
else if (data!.deep < deep) break;
|
|
15
|
+
else { html += parser.parse(content!); i++ }
|
|
16
|
+
}
|
|
17
|
+
return htmltag('blockquote', html)
|
|
18
|
+
}
|
|
19
|
+
return {
|
|
20
|
+
html: blockquote(token.data!.deep),
|
|
21
|
+
skipTokens: i
|
|
22
|
+
}
|
|
23
|
+
})
|
|
24
|
+
|
|
25
|
+
export const blockquoteTokenizer = (lexer: MarkdownLexer) => setBlockTokenizer(lexer, BLOCKQUOTE, {
|
|
26
|
+
regex: /^(>+) ?(.+)?/,
|
|
27
|
+
handle(matches) {
|
|
28
|
+
return {
|
|
29
|
+
content: lexer.blockTokenize(matches[2] ?? ''),
|
|
30
|
+
data: {
|
|
31
|
+
deep: (matches[1]!.length - 1)
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
})
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { BOLD } from "#lib/type";
|
|
2
|
+
import { htmltag, setInlineTokenizer, setProcessor } from "#lib/util";
|
|
3
|
+
import type { MarkdownLexer } from "#structure/MarkdownLexer";
|
|
4
|
+
import type { MarkdownParser } from "#structure/MarkdownParser";
|
|
5
|
+
|
|
6
|
+
export const boldProcessor = (parser: MarkdownParser) => setProcessor(parser, BOLD, token => htmltag('b', parser.parse(token.content!)))
|
|
7
|
+
|
|
8
|
+
export const boldTokenizer = (lexer: MarkdownLexer) => setInlineTokenizer(lexer, BOLD, {
|
|
9
|
+
regex: /\*\*(.+?\*?)\*\*/,
|
|
10
|
+
handle: matches => ({ content: lexer.inlineTokenize(matches[1]!) })
|
|
11
|
+
})
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { CODE } from "#lib/type";
|
|
2
|
+
import { htmlEscapeChar, htmltag, setInlineTokenizer, setProcessor } from "#lib/util";
|
|
3
|
+
import type { MarkdownLexer } from "#structure/MarkdownLexer";
|
|
4
|
+
import type { MarkdownParser } from "#structure/MarkdownParser";
|
|
5
|
+
|
|
6
|
+
export const codeProcessor = (parser: MarkdownParser) => setProcessor(parser, CODE, token => htmltag('code', htmlEscapeChar(token.text!)))
|
|
7
|
+
|
|
8
|
+
export const codeTokenizer = (lexer: MarkdownLexer) => setInlineTokenizer(lexer, CODE, {
|
|
9
|
+
regex: /`(.+?)`/,
|
|
10
|
+
handle: matches => ({ content: matches[1]! })
|
|
11
|
+
})
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import { BLOCK, CODE_END, CODE_LINE, CODE_START } from "#lib/type";
|
|
2
|
+
import { htmlEscapeChar, setBlockTokenizer, setProcessor } from "#lib/util";
|
|
3
|
+
import type { BlockToken, MarkdownLexer } from "#structure/MarkdownLexer";
|
|
4
|
+
import type { MarkdownParser } from "#structure/MarkdownParser";
|
|
5
|
+
|
|
6
|
+
export const codeblockProcessor = (parser: MarkdownParser) => setProcessor(parser, CODE_START, (token, tokens) => {
|
|
7
|
+
let html = '';
|
|
8
|
+
let i = 1;
|
|
9
|
+
while (i < tokens.length) {
|
|
10
|
+
const token = tokens[i]!;
|
|
11
|
+
if (token.type === CODE_END) break;
|
|
12
|
+
html += token.content![0]!.text;
|
|
13
|
+
i++;
|
|
14
|
+
}
|
|
15
|
+
return {
|
|
16
|
+
html: `<pre><code${token.data?.lang ? ` lang="${token.data.lang}"` : ''}>${htmlEscapeChar(html)}</code></pre>`,
|
|
17
|
+
skipTokens: i
|
|
18
|
+
}
|
|
19
|
+
})
|
|
20
|
+
|
|
21
|
+
export const codeblockTokenizer = (lexer: MarkdownLexer) => setBlockTokenizer(lexer, CODE_START, {
|
|
22
|
+
regex: /^```(\w+)/,
|
|
23
|
+
handle: (matches, position, lines) => {
|
|
24
|
+
const tokens: BlockToken[] = [];
|
|
25
|
+
position++;
|
|
26
|
+
while (position < lines.length) {
|
|
27
|
+
const line = lines[position]!;
|
|
28
|
+
position++;
|
|
29
|
+
if (line.includes('```')) {
|
|
30
|
+
tokens.push({ layout: BLOCK, type: CODE_END, content: [] })
|
|
31
|
+
break;
|
|
32
|
+
}
|
|
33
|
+
tokens.push({ layout: BLOCK, type: CODE_LINE, content: [{ layout: "INLINE_TEXT", type: 'CODE_TEXT', text: `${line}\n` }] });
|
|
34
|
+
}
|
|
35
|
+
return {
|
|
36
|
+
content: [],
|
|
37
|
+
data: { lang: matches[1] },
|
|
38
|
+
multiLine: {
|
|
39
|
+
skip: position,
|
|
40
|
+
tokens
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
})
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { HEADING } from "#lib/type";
|
|
2
|
+
import { htmltag, setBlockTokenizer, setProcessor } from "#lib/util";
|
|
3
|
+
import type { MarkdownLexer } from "#structure/MarkdownLexer";
|
|
4
|
+
import type { MarkdownParser } from "#structure/MarkdownParser";
|
|
5
|
+
|
|
6
|
+
export const headingProcessor = (parser: MarkdownParser) => setProcessor(parser, HEADING, token => {
|
|
7
|
+
const tagname = `h${token.data!.level}`;
|
|
8
|
+
return htmltag(tagname, parser.parse(token.content!))
|
|
9
|
+
})
|
|
10
|
+
|
|
11
|
+
export const headingTokenizer = (lexer: MarkdownLexer) => setBlockTokenizer(lexer, HEADING, {
|
|
12
|
+
regex: /^(#+) (.+)/,
|
|
13
|
+
handle: matches => ({ content: lexer.inlineTokenize(matches[2]!), data: { level: matches[1]!.length } })
|
|
14
|
+
})
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { HORIZONTAL_RULE } from "#lib/type";
|
|
2
|
+
import { setBlockTokenizer, setProcessor } from "#lib/util";
|
|
3
|
+
import type { MarkdownLexer } from "#structure/MarkdownLexer";
|
|
4
|
+
import type { MarkdownParser } from "#structure/MarkdownParser";
|
|
5
|
+
|
|
6
|
+
export const horizontalRuleProcessor = (parser: MarkdownParser) => setProcessor(parser, HORIZONTAL_RULE, _ => `<hr>`)
|
|
7
|
+
|
|
8
|
+
export const horizontalRuleTokenizer = (lexer: MarkdownLexer) => setBlockTokenizer(lexer, HORIZONTAL_RULE, {
|
|
9
|
+
regex: /^---/,
|
|
10
|
+
handle: _ => ({ content: [] })
|
|
11
|
+
})
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import { IMAGE } from "#lib/type";
|
|
2
|
+
import { setInlineTokenizer, setProcessor } from "#lib/util";
|
|
3
|
+
import type { MarkdownLexer } from "#structure/MarkdownLexer";
|
|
4
|
+
import type { MarkdownParser } from "#structure/MarkdownParser";
|
|
5
|
+
|
|
6
|
+
export const imageProcessor = (parser: MarkdownParser) => setProcessor(parser, IMAGE, token => {
|
|
7
|
+
const { url, title } = token.data!;
|
|
8
|
+
return `<img alt="${parser.parse(token.content!)}" src="${url}"${title ? ` title="${title}"` : ''}>`
|
|
9
|
+
})
|
|
10
|
+
|
|
11
|
+
export const imageTokenizer = (lexer: MarkdownLexer) => setInlineTokenizer(lexer, IMAGE, {
|
|
12
|
+
regex: /^!\[(.+?)\]\((.+?)\)/,
|
|
13
|
+
handle: matches => {
|
|
14
|
+
const [_, alt, detail] = matches as [string, string, string];
|
|
15
|
+
const [__, url, title] = detail.match(/(\w\w+?:\/\/[^\s]+)(?: "(.+?)")?/) as [string, string, string];
|
|
16
|
+
return {
|
|
17
|
+
content: lexer.inlineTokenize(alt),
|
|
18
|
+
data: {
|
|
19
|
+
url, title
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
})
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { ITALIC } from "#lib/type";
|
|
2
|
+
import { setInlineTokenizer, setProcessor } from "#lib/util";
|
|
3
|
+
import type { MarkdownLexer } from "#structure/MarkdownLexer";
|
|
4
|
+
import type { MarkdownParser } from "#structure/MarkdownParser";
|
|
5
|
+
|
|
6
|
+
export const italicProcessor = (parser: MarkdownParser) => setProcessor(parser, ITALIC, token => `<i>${parser.parse(token.content!)}</i>`)
|
|
7
|
+
|
|
8
|
+
export const italicTokenizer = (lexer: MarkdownLexer) => setInlineTokenizer(lexer, ITALIC, {
|
|
9
|
+
regex: /\*(.+?)\*/,
|
|
10
|
+
handle: matches => ({ content: lexer.inlineTokenize(matches[1]!) })
|
|
11
|
+
})
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import { LINK, QUICK_LINK } from "#lib/type";
|
|
2
|
+
import { setInlineTokenizer, setProcessor } from "#lib/util";
|
|
3
|
+
import type { MarkdownLexer, Token } from "#structure/MarkdownLexer";
|
|
4
|
+
import type { MarkdownParser } from "#structure/MarkdownParser";
|
|
5
|
+
import { isUndefined } from "amateras/lib/native";
|
|
6
|
+
|
|
7
|
+
export const linkProcessor = (parser: MarkdownParser) => {
|
|
8
|
+
const linkProcessor = (token: Token) => {
|
|
9
|
+
const {href, email, title} = token.data!;
|
|
10
|
+
return `<a href="${isUndefined(href) ? `mailto:${email}` : href}"${title ? ` title="${title}"` : ''}>${parser.parse(token.content!)}</a>`
|
|
11
|
+
}
|
|
12
|
+
setProcessor(parser, QUICK_LINK, linkProcessor)
|
|
13
|
+
setProcessor(parser, LINK, linkProcessor)
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export const linkTokenizer = (lexer: MarkdownLexer) => {
|
|
17
|
+
|
|
18
|
+
setInlineTokenizer(lexer, LINK, {
|
|
19
|
+
regex: /\[(.+?)\]\(((?:\w+?@(?:\w|\.\w)+)|(?:\w\w+?:[^\s)]+))(?: "(.+)?")?\)/,
|
|
20
|
+
handle: matches => {
|
|
21
|
+
const [_, alt, detail, title] = matches as [string, string, string, string];
|
|
22
|
+
const match = detail.match(/(?:\w+?@(?:\w|\.\w)+)|(?:\w\w+?:\/\/[^\s]+)/);
|
|
23
|
+
const [resolver] = match!;
|
|
24
|
+
const email_or_href = resolver.includes('@') ? { email: resolver }: { href: resolver };
|
|
25
|
+
return {
|
|
26
|
+
content: lexer.inlineTokenize(alt),
|
|
27
|
+
data: {
|
|
28
|
+
title, ...email_or_href
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
})
|
|
33
|
+
setInlineTokenizer(lexer, QUICK_LINK, {
|
|
34
|
+
regex: /<((?:\w+?@(?:\w|\.\w)+)|(?:\w\w+?:[^\s>]+))>/,
|
|
35
|
+
handle: matches => {
|
|
36
|
+
const [_, detail] = matches as [string, string];
|
|
37
|
+
const match = detail.match(/(?:\w+?@(?:\w|\.\w)+)|(?:\w\w+?:\/\/[^\s]+)/);
|
|
38
|
+
const [resolver] = match!;
|
|
39
|
+
const email_or_href = resolver.includes('@') ? { email: resolver }: { href: resolver };
|
|
40
|
+
return {
|
|
41
|
+
content: resolver,
|
|
42
|
+
data: email_or_href
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
})
|
|
46
|
+
}
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
import { EMPTY_LINE, ORDERED_LIST_ITEM, TEXT_LINE, UNORDERED_LIST_ITEM } from "#lib/type";
|
|
2
|
+
import { htmltag, setBlockTokenizer, setProcessor } from "#lib/util";
|
|
3
|
+
import type { MarkdownLexer, Token } from "#structure/MarkdownLexer";
|
|
4
|
+
import type { MarkdownParser } from "#structure/MarkdownParser";
|
|
5
|
+
import { equal, isString } from "amateras/lib/native";
|
|
6
|
+
|
|
7
|
+
export const listProcessor = (parser: MarkdownParser) => {
|
|
8
|
+
const listType = (type: string) => type === ORDERED_LIST_ITEM ? 'ol' : 'ul'
|
|
9
|
+
const listProcessor = (token: Token, tokens: Token[]) => {
|
|
10
|
+
let i = 0;
|
|
11
|
+
// cache the list by deep number
|
|
12
|
+
const deepListMap = new Map<number, List>();
|
|
13
|
+
|
|
14
|
+
const listGenerator = (type: string, deep: number) => {
|
|
15
|
+
const getList = deepListMap.get(deep)
|
|
16
|
+
const list = getList && listType(type) === getList.tagname ? getList : List(listType(type), []);
|
|
17
|
+
deepListMap.set(deep, list);
|
|
18
|
+
|
|
19
|
+
while (i < tokens.length) {
|
|
20
|
+
const token = tokens[i]!;
|
|
21
|
+
const tokenType = token.type;
|
|
22
|
+
// if token type not equal list item / empty line / text line, then finish loop
|
|
23
|
+
if (!equal(tokenType, ORDERED_LIST_ITEM, UNORDERED_LIST_ITEM, EMPTY_LINE, TEXT_LINE)) { i--; break};
|
|
24
|
+
// if token type equal text line
|
|
25
|
+
if (tokenType === TEXT_LINE) {
|
|
26
|
+
const text = token.content![0]?.text;
|
|
27
|
+
// if text start with double space
|
|
28
|
+
if (text?.match(/^\s\s/)) {
|
|
29
|
+
const match = text.match(/^(\s+)(.+)?/)!;
|
|
30
|
+
// if no content, then next token
|
|
31
|
+
if (!match[2]) { i++; continue }
|
|
32
|
+
token.data = { deep: Math.trunc(match[1]!.length / 2) - 1 }
|
|
33
|
+
}
|
|
34
|
+
// if text start with tab
|
|
35
|
+
else if (text?.match(/^\t/)) {
|
|
36
|
+
const match = text.match(/^(\t+)(.+)?/)!;
|
|
37
|
+
// if no content, then next token
|
|
38
|
+
if (!match[2]) { i++; continue }
|
|
39
|
+
token.data = { deep: match[1]!.length - 1 }
|
|
40
|
+
}
|
|
41
|
+
// else break
|
|
42
|
+
else {i--; break};
|
|
43
|
+
}
|
|
44
|
+
// if token type equal empty line, jump to next token
|
|
45
|
+
if (tokenType === EMPTY_LINE) i++;
|
|
46
|
+
// if token deep number not equal latest deep of list
|
|
47
|
+
else if (token.data!.deep !== deep) {
|
|
48
|
+
// if bigger, push deeper list into current list item
|
|
49
|
+
if (token.data!.deep > deep) deepListMap.get(deep)?.items.at(-1)?.content.push(listGenerator(tokenType, token.data!.deep))
|
|
50
|
+
// else delete current deep cache and return to upper list
|
|
51
|
+
else { deepListMap.delete(deep); break; }
|
|
52
|
+
}
|
|
53
|
+
// if token type equal text line, convert this list to paragraph mode
|
|
54
|
+
else if (tokenType === TEXT_LINE) {
|
|
55
|
+
list.paragraph = true;
|
|
56
|
+
list.items.at(-1)?.content.push(parser.parse(token.content!))
|
|
57
|
+
i++
|
|
58
|
+
}
|
|
59
|
+
// if list type not equal, then finish loop
|
|
60
|
+
else if (tokenType !== type) {
|
|
61
|
+
deepListMap.delete(deep);
|
|
62
|
+
break;
|
|
63
|
+
}
|
|
64
|
+
// push list item
|
|
65
|
+
else {
|
|
66
|
+
list.items.push(ListItem([parser.parse(token.content!)]));
|
|
67
|
+
i++
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
return list;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return {
|
|
74
|
+
html: `${listGenerator(token.type, token.data!.deep)}`,
|
|
75
|
+
skipTokens: i
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
interface ListItem { content: (string | List)[], toString(): string }
|
|
80
|
+
const ListItem = (content: (string | List)[]): ListItem => ({
|
|
81
|
+
content: content,
|
|
82
|
+
toString() { return htmltag('li', this.content.join('')) }
|
|
83
|
+
})
|
|
84
|
+
|
|
85
|
+
interface List { tagname: string, items: ListItem[], paragraph: boolean, toString(): string }
|
|
86
|
+
const List = (tagname: 'ul' | 'ol', items: ListItem[]): List => ({
|
|
87
|
+
tagname: tagname,
|
|
88
|
+
items: items,
|
|
89
|
+
paragraph: false,
|
|
90
|
+
toString() {
|
|
91
|
+
if (this.paragraph) this.items.forEach(item => item.content.forEach((text, i) => isString(text) && (item.content[i] = htmltag('p', text))))
|
|
92
|
+
return htmltag(this.tagname, this.items.join(''))
|
|
93
|
+
}
|
|
94
|
+
})
|
|
95
|
+
|
|
96
|
+
setProcessor(parser, UNORDERED_LIST_ITEM, listProcessor);
|
|
97
|
+
setProcessor(parser, ORDERED_LIST_ITEM, listProcessor);
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
export const listTokenizer = (lexer: MarkdownLexer) => {
|
|
101
|
+
const listHandle = (matches: RegExpMatchArray) => {
|
|
102
|
+
const prefix = matches[0].split(/[-*]/)[0]!;
|
|
103
|
+
const spaces = prefix.match(/\s/)?.length ?? 0;
|
|
104
|
+
const tabs = prefix.match(/\t/)?.length ?? 0;
|
|
105
|
+
return ({
|
|
106
|
+
content: lexer.inlineTokenize(matches[1]!),
|
|
107
|
+
data: {
|
|
108
|
+
deep: Math.trunc(tabs + spaces / 2)
|
|
109
|
+
}
|
|
110
|
+
})
|
|
111
|
+
}
|
|
112
|
+
setBlockTokenizer(lexer, UNORDERED_LIST_ITEM, {
|
|
113
|
+
regex: /^(?:[\s\t]+)?[-*] (.+)/,
|
|
114
|
+
handle: listHandle
|
|
115
|
+
})
|
|
116
|
+
|
|
117
|
+
setBlockTokenizer(lexer, ORDERED_LIST_ITEM, {
|
|
118
|
+
regex: /^(?:[\s\t]+)?\d+\. (.+)/,
|
|
119
|
+
handle: listHandle
|
|
120
|
+
})
|
|
121
|
+
}
|