amateras 0.4.2 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. package/README.md +24 -25
  2. package/ext/html/node/$Anchor.ts +3 -3
  3. package/ext/html/node/$Canvas.ts +2 -2
  4. package/ext/html/node/$Dialog.ts +2 -2
  5. package/ext/html/node/$Form.ts +2 -2
  6. package/ext/html/node/$Image.ts +2 -2
  7. package/ext/html/node/$Input.ts +28 -4
  8. package/ext/html/node/$Label.ts +12 -3
  9. package/ext/html/node/$Media.ts +2 -2
  10. package/ext/html/node/$OptGroup.ts +2 -2
  11. package/ext/html/node/$Option.ts +2 -2
  12. package/ext/html/node/$Select.ts +2 -2
  13. package/ext/html/node/$TextArea.ts +2 -2
  14. package/ext/i18n/README.md +20 -0
  15. package/ext/i18n/src/index.ts +106 -12
  16. package/ext/i18n/src/structure/I18n.ts +12 -8
  17. package/ext/i18n/src/structure/I18nDictionary.ts +2 -2
  18. package/ext/i18n/src/structure/I18nTranslation.ts +35 -0
  19. package/ext/idb/README.md +127 -0
  20. package/ext/idb/package.json +13 -0
  21. package/ext/idb/src/core.ts +6 -0
  22. package/ext/idb/src/index.ts +17 -0
  23. package/ext/idb/src/lib/$IDBRequest.ts +8 -0
  24. package/ext/idb/src/structure/$IDB.ts +63 -0
  25. package/ext/idb/src/structure/$IDBCursor.ts +34 -0
  26. package/ext/idb/src/structure/$IDBIndex.ts +48 -0
  27. package/ext/idb/src/structure/$IDBStore.ts +103 -0
  28. package/ext/idb/src/structure/$IDBStoreBase.ts +30 -0
  29. package/ext/idb/src/structure/$IDBTransaction.ts +38 -0
  30. package/ext/idb/src/structure/builder/$IDBBuilder.ts +230 -0
  31. package/ext/idb/src/structure/builder/$IDBStoreBuilder.ts +100 -0
  32. package/ext/markdown/README.md +53 -0
  33. package/ext/markdown/package.json +15 -0
  34. package/ext/markdown/src/index.ts +3 -0
  35. package/ext/markdown/src/lib/type.ts +26 -0
  36. package/ext/markdown/src/lib/util.ts +21 -0
  37. package/ext/markdown/src/structure/Markdown.ts +54 -0
  38. package/ext/markdown/src/structure/MarkdownLexer.ts +111 -0
  39. package/ext/markdown/src/structure/MarkdownParser.ts +33 -0
  40. package/ext/markdown/src/syntax/alert.ts +46 -0
  41. package/ext/markdown/src/syntax/blockquote.ts +35 -0
  42. package/ext/markdown/src/syntax/bold.ts +11 -0
  43. package/ext/markdown/src/syntax/code.ts +11 -0
  44. package/ext/markdown/src/syntax/codeblock.ts +44 -0
  45. package/ext/markdown/src/syntax/heading.ts +14 -0
  46. package/ext/markdown/src/syntax/horizontalRule.ts +11 -0
  47. package/ext/markdown/src/syntax/image.ts +23 -0
  48. package/ext/markdown/src/syntax/italic.ts +11 -0
  49. package/ext/markdown/src/syntax/link.ts +46 -0
  50. package/ext/markdown/src/syntax/list.ts +121 -0
  51. package/ext/markdown/src/syntax/table.ts +67 -0
  52. package/ext/markdown/src/syntax/text.ts +19 -0
  53. package/ext/router/README.md +111 -17
  54. package/ext/router/package.json +10 -0
  55. package/ext/router/src/index.ts +69 -0
  56. package/ext/router/src/node/Page.ts +34 -0
  57. package/ext/router/src/node/Router.ts +191 -0
  58. package/ext/router/src/node/RouterAnchor.ts +24 -0
  59. package/ext/router/src/structure/PageBuilder.ts +24 -0
  60. package/ext/router/src/structure/Route.ts +105 -0
  61. package/ext/signal/README.md +93 -0
  62. package/ext/signal/package.json +9 -0
  63. package/ext/signal/src/index.ts +128 -0
  64. package/{src → ext/signal/src}/structure/Signal.ts +7 -11
  65. package/ext/ssr/index.ts +4 -4
  66. package/ext/ui/lib/VirtualScroll.ts +25 -0
  67. package/ext/ui/node/Accordian.ts +97 -0
  68. package/ext/ui/node/Form.ts +53 -0
  69. package/ext/ui/node/Grid.ts +0 -0
  70. package/ext/ui/node/Table.ts +43 -0
  71. package/ext/ui/node/Tabs.ts +114 -0
  72. package/ext/ui/node/Toast.ts +16 -0
  73. package/ext/ui/node/Waterfall.ts +72 -0
  74. package/ext/ui/package.json +11 -0
  75. package/package.json +9 -3
  76. package/src/core.ts +31 -59
  77. package/src/global.ts +12 -2
  78. package/src/index.ts +1 -2
  79. package/src/lib/assignProperties.ts +57 -0
  80. package/src/lib/native.ts +33 -11
  81. package/src/lib/sleep.ts +3 -1
  82. package/src/lib/toArray.ts +9 -0
  83. package/src/lib/trycatch.ts +17 -0
  84. package/src/lib/uppercase.ts +3 -0
  85. package/src/node/$Element.ts +7 -53
  86. package/src/node/$EventTarget.ts +45 -0
  87. package/src/node/$Node.ts +63 -55
  88. package/src/node/$Virtual.ts +65 -0
  89. package/src/node.ts +7 -6
  90. package/ext/i18n/src/node/I18nText.ts +0 -35
  91. package/ext/router/index.ts +0 -73
  92. package/ext/router/node/Page.ts +0 -27
  93. package/ext/router/node/Route.ts +0 -54
  94. package/ext/router/node/Router.ts +0 -149
  95. package/ext/router/node/RouterAnchor.ts +0 -8
  96. package/src/lib/assign.ts +0 -38
  97. package/src/lib/assignHelper.ts +0 -18
@@ -0,0 +1,46 @@
1
+ import { ALERT, ALERT_LINE, BLOCK } from "#lib/type";
2
+ import { setBlockTokenizer, setProcessor } from "#lib/util";
3
+ import type { BlockToken, MarkdownLexer } from "#structure/MarkdownLexer";
4
+ import type { MarkdownParser } from "#structure/MarkdownParser";
5
+ import { uppercase } from "amateras/lib/uppercase";
6
+
7
+ export const alertProcessor = (parser: MarkdownParser) => setProcessor(parser, ALERT, (token, tokens) => {
8
+ let html = '';
9
+ let i = 1;
10
+ while (i < tokens.length) {
11
+ const token = tokens[i]!;
12
+ if (token.type !== ALERT_LINE) break;
13
+ html += parser.parse(token.content![0]!.content!);
14
+ i++;
15
+ }
16
+ const alertType = token.data?.alertType as string;
17
+ return {
18
+ html: `<blockquote class="alert alert-${alertType}"><p class="alert-title">${uppercase(alertType, 0, 1)}</p>${html}</blockquote>`,
19
+ skipTokens: i
20
+ }
21
+ })
22
+
23
+ export const alertTokenizer = (lexer: MarkdownLexer) => setBlockTokenizer(lexer, ALERT, {
24
+ regex: /^> ?\[!(?:(?:NOTE)|(?:TIP)|(?:IMPORTANT)|(?:WARNING)|(?:CAUTION))\]/,
25
+ handle(_, position, lines) {
26
+ const tokens: BlockToken[] = [];
27
+ const match = lines[position]!.match(/> ?\[!(.+?)\]/);
28
+ const alertType = match?.[1]?.toLowerCase();
29
+ position++
30
+ while (position < lines.length) {
31
+ const line = lines[position]!;
32
+ const match = line.match(/^> ?(.+)/);
33
+ if (match) tokens.push({ layout: BLOCK, type: ALERT_LINE, content: lexer.blockTokenize(match[1]!) });
34
+ else break;
35
+ position++;
36
+ }
37
+ return {
38
+ content: [],
39
+ data: { alertType },
40
+ multiLine: {
41
+ skip: position,
42
+ tokens
43
+ }
44
+ }
45
+ },
46
+ })
@@ -0,0 +1,35 @@
1
+ import { BLOCKQUOTE } from "#lib/type";
2
+ import { htmltag, setBlockTokenizer, setProcessor } from "#lib/util";
3
+ import type { MarkdownLexer } from "#structure/MarkdownLexer";
4
+ import type { MarkdownParser } from "#structure/MarkdownParser";
5
+
6
+ export const blockquoteProcessor = (parser: MarkdownParser) => setProcessor(parser, BLOCKQUOTE, (token, tokens) => {
7
+ let i = 0;
8
+ const blockquote = (deep: number) => {
9
+ let html = '';
10
+ while (i < tokens.length) {
11
+ const {type, content, data} = tokens[i]!;
12
+ if (type !== BLOCKQUOTE) break;
13
+ if (data!.deep > deep) html += blockquote(data!.deep);
14
+ else if (data!.deep < deep) break;
15
+ else { html += parser.parse(content!); i++ }
16
+ }
17
+ return htmltag('blockquote', html)
18
+ }
19
+ return {
20
+ html: blockquote(token.data!.deep),
21
+ skipTokens: i
22
+ }
23
+ })
24
+
25
+ export const blockquoteTokenizer = (lexer: MarkdownLexer) => setBlockTokenizer(lexer, BLOCKQUOTE, {
26
+ regex: /^(>+) ?(.+)?/,
27
+ handle(matches) {
28
+ return {
29
+ content: lexer.blockTokenize(matches[2] ?? ''),
30
+ data: {
31
+ deep: (matches[1]!.length - 1)
32
+ }
33
+ }
34
+ }
35
+ })
@@ -0,0 +1,11 @@
1
+ import { BOLD } from "#lib/type";
2
+ import { htmltag, setInlineTokenizer, setProcessor } from "#lib/util";
3
+ import type { MarkdownLexer } from "#structure/MarkdownLexer";
4
+ import type { MarkdownParser } from "#structure/MarkdownParser";
5
+
6
+ export const boldProcessor = (parser: MarkdownParser) => setProcessor(parser, BOLD, token => htmltag('b', parser.parse(token.content!)))
7
+
8
+ export const boldTokenizer = (lexer: MarkdownLexer) => setInlineTokenizer(lexer, BOLD, {
9
+ regex: /\*\*(.+?\*?)\*\*/,
10
+ handle: matches => ({ content: lexer.inlineTokenize(matches[1]!) })
11
+ })
@@ -0,0 +1,11 @@
1
+ import { CODE } from "#lib/type";
2
+ import { htmlEscapeChar, htmltag, setInlineTokenizer, setProcessor } from "#lib/util";
3
+ import type { MarkdownLexer } from "#structure/MarkdownLexer";
4
+ import type { MarkdownParser } from "#structure/MarkdownParser";
5
+
6
+ export const codeProcessor = (parser: MarkdownParser) => setProcessor(parser, CODE, token => htmltag('code', htmlEscapeChar(token.text!)))
7
+
8
+ export const codeTokenizer = (lexer: MarkdownLexer) => setInlineTokenizer(lexer, CODE, {
9
+ regex: /`(.+?)`/,
10
+ handle: matches => ({ content: matches[1]! })
11
+ })
@@ -0,0 +1,44 @@
1
+ import { BLOCK, CODE_END, CODE_LINE, CODE_START } from "#lib/type";
2
+ import { htmlEscapeChar, setBlockTokenizer, setProcessor } from "#lib/util";
3
+ import type { BlockToken, MarkdownLexer } from "#structure/MarkdownLexer";
4
+ import type { MarkdownParser } from "#structure/MarkdownParser";
5
+
6
+ export const codeblockProcessor = (parser: MarkdownParser) => setProcessor(parser, CODE_START, (token, tokens) => {
7
+ let html = '';
8
+ let i = 1;
9
+ while (i < tokens.length) {
10
+ const token = tokens[i]!;
11
+ if (token.type === CODE_END) break;
12
+ html += token.content![0]!.text;
13
+ i++;
14
+ }
15
+ return {
16
+ html: `<pre><code${token.data?.lang ? ` lang="${token.data.lang}"` : ''}>${htmlEscapeChar(html)}</code></pre>`,
17
+ skipTokens: i
18
+ }
19
+ })
20
+
21
+ export const codeblockTokenizer = (lexer: MarkdownLexer) => setBlockTokenizer(lexer, CODE_START, {
22
+ regex: /^```(\w+)/,
23
+ handle: (matches, position, lines) => {
24
+ const tokens: BlockToken[] = [];
25
+ position++;
26
+ while (position < lines.length) {
27
+ const line = lines[position]!;
28
+ position++;
29
+ if (line.includes('```')) {
30
+ tokens.push({ layout: BLOCK, type: CODE_END, content: [] })
31
+ break;
32
+ }
33
+ tokens.push({ layout: BLOCK, type: CODE_LINE, content: [{ layout: "INLINE_TEXT", type: 'CODE_TEXT', text: `${line}\n` }] });
34
+ }
35
+ return {
36
+ content: [],
37
+ data: { lang: matches[1] },
38
+ multiLine: {
39
+ skip: position,
40
+ tokens
41
+ }
42
+ }
43
+ }
44
+ })
@@ -0,0 +1,14 @@
1
+ import { HEADING } from "#lib/type";
2
+ import { htmltag, setBlockTokenizer, setProcessor } from "#lib/util";
3
+ import type { MarkdownLexer } from "#structure/MarkdownLexer";
4
+ import type { MarkdownParser } from "#structure/MarkdownParser";
5
+
6
+ export const headingProcessor = (parser: MarkdownParser) => setProcessor(parser, HEADING, token => {
7
+ const tagname = `h${token.data!.level}`;
8
+ return htmltag(tagname, parser.parse(token.content!))
9
+ })
10
+
11
+ export const headingTokenizer = (lexer: MarkdownLexer) => setBlockTokenizer(lexer, HEADING, {
12
+ regex: /^(#+) (.+)/,
13
+ handle: matches => ({ content: lexer.inlineTokenize(matches[2]!), data: { level: matches[1]!.length } })
14
+ })
@@ -0,0 +1,11 @@
1
+ import { HORIZONTAL_RULE } from "#lib/type";
2
+ import { setBlockTokenizer, setProcessor } from "#lib/util";
3
+ import type { MarkdownLexer } from "#structure/MarkdownLexer";
4
+ import type { MarkdownParser } from "#structure/MarkdownParser";
5
+
6
+ export const horizontalRuleProcessor = (parser: MarkdownParser) => setProcessor(parser, HORIZONTAL_RULE, _ => `<hr>`)
7
+
8
+ export const horizontalRuleTokenizer = (lexer: MarkdownLexer) => setBlockTokenizer(lexer, HORIZONTAL_RULE, {
9
+ regex: /^---/,
10
+ handle: _ => ({ content: [] })
11
+ })
@@ -0,0 +1,23 @@
1
+ import { IMAGE } from "#lib/type";
2
+ import { setInlineTokenizer, setProcessor } from "#lib/util";
3
+ import type { MarkdownLexer } from "#structure/MarkdownLexer";
4
+ import type { MarkdownParser } from "#structure/MarkdownParser";
5
+
6
+ export const imageProcessor = (parser: MarkdownParser) => setProcessor(parser, IMAGE, token => {
7
+ const { url, title } = token.data!;
8
+ return `<img alt="${parser.parse(token.content!)}" src="${url}"${title ? ` title="${title}"` : ''}>`
9
+ })
10
+
11
+ export const imageTokenizer = (lexer: MarkdownLexer) => setInlineTokenizer(lexer, IMAGE, {
12
+ regex: /^!\[(.+?)\]\((.+?)\)/,
13
+ handle: matches => {
14
+ const [_, alt, detail] = matches as [string, string, string];
15
+ const [__, url, title] = detail.match(/(\w\w+?:\/\/[^\s]+)(?: "(.+?)")?/) as [string, string, string];
16
+ return {
17
+ content: lexer.inlineTokenize(alt),
18
+ data: {
19
+ url, title
20
+ }
21
+ }
22
+ }
23
+ })
@@ -0,0 +1,11 @@
1
+ import { ITALIC } from "#lib/type";
2
+ import { setInlineTokenizer, setProcessor } from "#lib/util";
3
+ import type { MarkdownLexer } from "#structure/MarkdownLexer";
4
+ import type { MarkdownParser } from "#structure/MarkdownParser";
5
+
6
+ export const italicProcessor = (parser: MarkdownParser) => setProcessor(parser, ITALIC, token => `<i>${parser.parse(token.content!)}</i>`)
7
+
8
+ export const italicTokenizer = (lexer: MarkdownLexer) => setInlineTokenizer(lexer, ITALIC, {
9
+ regex: /\*(.+?)\*/,
10
+ handle: matches => ({ content: lexer.inlineTokenize(matches[1]!) })
11
+ })
@@ -0,0 +1,46 @@
1
+ import { LINK, QUICK_LINK } from "#lib/type";
2
+ import { setInlineTokenizer, setProcessor } from "#lib/util";
3
+ import type { MarkdownLexer, Token } from "#structure/MarkdownLexer";
4
+ import type { MarkdownParser } from "#structure/MarkdownParser";
5
+ import { isUndefined } from "amateras/lib/native";
6
+
7
+ export const linkProcessor = (parser: MarkdownParser) => {
8
+ const linkProcessor = (token: Token) => {
9
+ const {href, email, title} = token.data!;
10
+ return `<a href="${isUndefined(href) ? `mailto:${email}` : href}"${title ? ` title="${title}"` : ''}>${parser.parse(token.content!)}</a>`
11
+ }
12
+ setProcessor(parser, QUICK_LINK, linkProcessor)
13
+ setProcessor(parser, LINK, linkProcessor)
14
+ }
15
+
16
+ export const linkTokenizer = (lexer: MarkdownLexer) => {
17
+
18
+ setInlineTokenizer(lexer, LINK, {
19
+ regex: /\[(.+?)\]\(((?:\w+?@(?:\w|\.\w)+)|(?:\w\w+?:[^\s)]+))(?: "(.+)?")?\)/,
20
+ handle: matches => {
21
+ const [_, alt, detail, title] = matches as [string, string, string, string];
22
+ const match = detail.match(/(?:\w+?@(?:\w|\.\w)+)|(?:\w\w+?:\/\/[^\s]+)/);
23
+ const [resolver] = match!;
24
+ const email_or_href = resolver.includes('@') ? { email: resolver }: { href: resolver };
25
+ return {
26
+ content: lexer.inlineTokenize(alt),
27
+ data: {
28
+ title, ...email_or_href
29
+ }
30
+ }
31
+ }
32
+ })
33
+ setInlineTokenizer(lexer, QUICK_LINK, {
34
+ regex: /<((?:\w+?@(?:\w|\.\w)+)|(?:\w\w+?:[^\s>]+))>/,
35
+ handle: matches => {
36
+ const [_, detail] = matches as [string, string];
37
+ const match = detail.match(/(?:\w+?@(?:\w|\.\w)+)|(?:\w\w+?:\/\/[^\s]+)/);
38
+ const [resolver] = match!;
39
+ const email_or_href = resolver.includes('@') ? { email: resolver }: { href: resolver };
40
+ return {
41
+ content: resolver,
42
+ data: email_or_href
43
+ }
44
+ }
45
+ })
46
+ }
@@ -0,0 +1,121 @@
1
+ import { EMPTY_LINE, ORDERED_LIST_ITEM, TEXT_LINE, UNORDERED_LIST_ITEM } from "#lib/type";
2
+ import { htmltag, setBlockTokenizer, setProcessor } from "#lib/util";
3
+ import type { MarkdownLexer, Token } from "#structure/MarkdownLexer";
4
+ import type { MarkdownParser } from "#structure/MarkdownParser";
5
+ import { equal, isString } from "amateras/lib/native";
6
+
7
+ export const listProcessor = (parser: MarkdownParser) => {
8
+ const listType = (type: string) => type === ORDERED_LIST_ITEM ? 'ol' : 'ul'
9
+ const listProcessor = (token: Token, tokens: Token[]) => {
10
+ let i = 0;
11
+ // cache the list by deep number
12
+ const deepListMap = new Map<number, List>();
13
+
14
+ const listGenerator = (type: string, deep: number) => {
15
+ const getList = deepListMap.get(deep)
16
+ const list = getList && listType(type) === getList.tagname ? getList : List(listType(type), []);
17
+ deepListMap.set(deep, list);
18
+
19
+ while (i < tokens.length) {
20
+ const token = tokens[i]!;
21
+ const tokenType = token.type;
22
+ // if token type not equal list item / empty line / text line, then finish loop
23
+ if (!equal(tokenType, ORDERED_LIST_ITEM, UNORDERED_LIST_ITEM, EMPTY_LINE, TEXT_LINE)) { i--; break};
24
+ // if token type equal text line
25
+ if (tokenType === TEXT_LINE) {
26
+ const text = token.content![0]?.text;
27
+ // if text start with double space
28
+ if (text?.match(/^\s\s/)) {
29
+ const match = text.match(/^(\s+)(.+)?/)!;
30
+ // if no content, then next token
31
+ if (!match[2]) { i++; continue }
32
+ token.data = { deep: Math.trunc(match[1]!.length / 2) - 1 }
33
+ }
34
+ // if text start with tab
35
+ else if (text?.match(/^\t/)) {
36
+ const match = text.match(/^(\t+)(.+)?/)!;
37
+ // if no content, then next token
38
+ if (!match[2]) { i++; continue }
39
+ token.data = { deep: match[1]!.length - 1 }
40
+ }
41
+ // else break
42
+ else {i--; break};
43
+ }
44
+ // if token type equal empty line, jump to next token
45
+ if (tokenType === EMPTY_LINE) i++;
46
+ // if token deep number not equal latest deep of list
47
+ else if (token.data!.deep !== deep) {
48
+ // if bigger, push deeper list into current list item
49
+ if (token.data!.deep > deep) deepListMap.get(deep)?.items.at(-1)?.content.push(listGenerator(tokenType, token.data!.deep))
50
+ // else delete current deep cache and return to upper list
51
+ else { deepListMap.delete(deep); break; }
52
+ }
53
+ // if token type equal text line, convert this list to paragraph mode
54
+ else if (tokenType === TEXT_LINE) {
55
+ list.paragraph = true;
56
+ list.items.at(-1)?.content.push(parser.parse(token.content!))
57
+ i++
58
+ }
59
+ // if list type not equal, then finish loop
60
+ else if (tokenType !== type) {
61
+ deepListMap.delete(deep);
62
+ break;
63
+ }
64
+ // push list item
65
+ else {
66
+ list.items.push(ListItem([parser.parse(token.content!)]));
67
+ i++
68
+ }
69
+ }
70
+ return list;
71
+ }
72
+
73
+ return {
74
+ html: `${listGenerator(token.type, token.data!.deep)}`,
75
+ skipTokens: i
76
+ }
77
+ }
78
+
79
+ interface ListItem { content: (string | List)[], toString(): string }
80
+ const ListItem = (content: (string | List)[]): ListItem => ({
81
+ content: content,
82
+ toString() { return htmltag('li', this.content.join('')) }
83
+ })
84
+
85
+ interface List { tagname: string, items: ListItem[], paragraph: boolean, toString(): string }
86
+ const List = (tagname: 'ul' | 'ol', items: ListItem[]): List => ({
87
+ tagname: tagname,
88
+ items: items,
89
+ paragraph: false,
90
+ toString() {
91
+ if (this.paragraph) this.items.forEach(item => item.content.forEach((text, i) => isString(text) && (item.content[i] = htmltag('p', text))))
92
+ return htmltag(this.tagname, this.items.join(''))
93
+ }
94
+ })
95
+
96
+ setProcessor(parser, UNORDERED_LIST_ITEM, listProcessor);
97
+ setProcessor(parser, ORDERED_LIST_ITEM, listProcessor);
98
+ }
99
+
100
+ export const listTokenizer = (lexer: MarkdownLexer) => {
101
+ const listHandle = (matches: RegExpMatchArray) => {
102
+ const prefix = matches[0].split(/[-*]/)[0]!;
103
+ const spaces = prefix.match(/\s/)?.length ?? 0;
104
+ const tabs = prefix.match(/\t/)?.length ?? 0;
105
+ return ({
106
+ content: lexer.inlineTokenize(matches[1]!),
107
+ data: {
108
+ deep: Math.trunc(tabs + spaces / 2)
109
+ }
110
+ })
111
+ }
112
+ setBlockTokenizer(lexer, UNORDERED_LIST_ITEM, {
113
+ regex: /^(?:[\s\t]+)?[-*] (.+)/,
114
+ handle: listHandle
115
+ })
116
+
117
+ setBlockTokenizer(lexer, ORDERED_LIST_ITEM, {
118
+ regex: /^(?:[\s\t]+)?\d+\. (.+)/,
119
+ handle: listHandle
120
+ })
121
+ }
@@ -0,0 +1,67 @@
1
+ import { BLOCK, TABLE, TABLE_COLUMN, TABLE_ROW } from "#lib/type";
2
+ import { htmltag, setBlockTokenizer, setProcessor } from "#lib/util";
3
+ import type { BlockToken, MarkdownLexer } from "#structure/MarkdownLexer";
4
+ import type { MarkdownParser } from "#structure/MarkdownParser";
5
+ import { _Array_from } from "amateras/lib/native";
6
+
7
+ export const tableProcessor = (parser: MarkdownParser) => setProcessor(parser, TABLE, (token) => {
8
+ let thead = '';
9
+ let tbody = '';
10
+ let rowIndex = 0;
11
+ for (const row of token.content!) {
12
+ let rowHTML = '';
13
+ for (let i = 0; i < row.content!.length; i++) {
14
+ const col = row.content![i]!;
15
+ const align = token.data!.align[i];
16
+ const tagname = rowIndex === 0 ? 'th' : 'td';
17
+ rowHTML += `<${tagname} align="${align ?? 'left'}">${parser.parse(col.content!)}</${tagname}>`
18
+ }
19
+ if (rowIndex === 0) thead += htmltag('thead', htmltag('tr', rowHTML));
20
+ else tbody += htmltag('tr', rowHTML);
21
+ rowIndex++
22
+ }
23
+ tbody = htmltag('tbody', tbody);
24
+ return htmltag('table', thead + tbody)
25
+ })
26
+
27
+ export const tableTokenizer = (lexer: MarkdownLexer) => setBlockTokenizer(lexer, TABLE, {
28
+ regex: /\|(?:.+\|)+/,
29
+ handle(_, position, lines) {
30
+ const tokens: BlockToken[] = [];
31
+ const align = []
32
+ while (position < lines.length) {
33
+ const row: BlockToken = {
34
+ type: TABLE_ROW,
35
+ layout: BLOCK,
36
+ content: []
37
+ }
38
+ const line = lines[position]!;
39
+ const matches = _Array_from(line.matchAll(/\| ([^|]+)/g));
40
+ if (!matches.length) break;
41
+ for (const match of matches) {
42
+ const text = match[1]!;
43
+ const separator = text.match(/(:)?---+(:)?/);
44
+ if (separator) {
45
+ const [_, LEFT, RIGHT] = separator;
46
+ align.push(RIGHT ? LEFT ? 'center' : 'right' : 'left');
47
+ continue;
48
+ }
49
+ row.content.push({
50
+ type: TABLE_COLUMN,
51
+ content: lexer.inlineTokenize(text.trim()),
52
+ layout: BLOCK
53
+ })
54
+ }
55
+ if (row.content.length) tokens.push(row);
56
+ position++
57
+ }
58
+ return {
59
+ content: tokens,
60
+ data: { align },
61
+ multiLine: {
62
+ skip: position,
63
+ tokens: []
64
+ }
65
+ }
66
+ },
67
+ })
@@ -0,0 +1,19 @@
1
+ import { EMPTY_LINE, TEXT, TEXT_LINE } from "#lib/type";
2
+ import { htmltag, setProcessor } from "#lib/util";
3
+ import type { MarkdownParser } from "#structure/MarkdownParser";
4
+
5
+ export const textProcessor = (parser: MarkdownParser) => setProcessor(parser, TEXT, token => token.text!);
6
+
7
+ export const textLineProcessor = (parser: MarkdownParser) => setProcessor(parser, TEXT_LINE, (_, tokens) => {
8
+ let html = '';
9
+ let i = 0;
10
+ for (const token of tokens) {
11
+ if (token.type === EMPTY_LINE) break;
12
+ html += parser.parse(token.content!);
13
+ i++;
14
+ }
15
+ return {
16
+ html: htmltag('p', html),
17
+ skipTokens: i
18
+ };
19
+ })
@@ -9,8 +9,8 @@ import 'amateras/router';
9
9
  ## Create Route Map
10
10
  ```ts
11
11
  // create home page route
12
- const HomePage = $('route', '/', page => page
13
- .pageTitle('Home')
12
+ const HomePage = $.route(page => page
13
+ .pageTitle('Home | My Site') // set window title
14
14
  .content([
15
15
  $('h1').content('Home')
16
16
  ])
@@ -24,6 +24,9 @@ $(document.body).content([
24
24
  ])
25
25
  ```
26
26
 
27
+ > [!NOTE]
28
+ > Don't forget to `.listen()` the path change!
29
+
27
30
  ## Router Anchor
28
31
  Use `RouterAnchor` to prevent load page when open link by default `HTMLAnchorElement` element.
29
32
  ```ts
@@ -36,46 +39,137 @@ $('ra').content('Contact').href('/contact');
36
39
  - `$.forward()`: Forward page.
37
40
  - `$.back()`: Back page.
38
41
 
39
- ## Path Parameter and Query
42
+ ## Async Route
43
+ ```ts
44
+ // ./page/home_page.ts
45
+ export default $.route(page => {
46
+ return page
47
+ .content([
48
+ $('h1').content('Home Page')
49
+ ])
50
+ })
51
+
52
+ // ./router.ts
53
+ $('router')
54
+ .route('/about', () => import('./page/home_page'))
55
+ ```
56
+
57
+ ## Path Parameter
58
+ TypeScript will parse the parameter in the path, parameter always start with `:`, after the colon comes the name of the parameter. You can access theses parameter using `Page.params`.
59
+
40
60
  ```ts
41
61
  $('router')
42
62
  .route('/user/@:username', page => {
43
- console.log(page.params)
63
+ console.log(page.params.username);
64
+ return page;
44
65
  })
45
- .route('/posts?search'), page => {
46
- console.log(page.query)
47
- }
48
66
  .listen()
49
67
  // simulate page open
50
- .resolve('/user/@amateras') // { username: 'amateras' }
51
- .resolve('/posts"') // { }
52
- .resolve('/posts?search=tsukimi&user') // { search: 'tsukimi', user: '' }
68
+ .resolve('/user/@amateras') // 'amateras'
69
+ ```
70
+
71
+ If you want separate router and route builder to different file, use generic parameter to define parameter name on `$.route` method.
72
+
73
+ ```ts
74
+ // greating_page.ts
75
+ export default $.route<['name']>(page => {
76
+ return page
77
+ .content(`Hello, ${name}`)
78
+ })
79
+
80
+ // router.ts
81
+ $('router')
82
+ .route('/greating', () => import('./greating_page'))
83
+ // ^ typescript wiil report an error, the route builder required 'name' parameter
84
+
85
+ .route('/greating/:name', () => import('./greating_page'))
86
+ // ^ pass
87
+ ```
88
+
89
+ ### Optional Parameter
90
+ Sometime we parameter can be optional, you can define the optional parameter by add `?` sign after the name of the parameter.
91
+
92
+ ```ts
93
+ const userPage = $.route<'photoId', 'postId?'>(page => {
94
+ return page
95
+ .content([
96
+ `Photo ID: ${page.params.photoId}`, // photoId: string
97
+ `Post ID: ${page.params.postId}` // postId: string | undefined
98
+ ])
99
+ })
100
+
101
+ $('router')
102
+ .route('/photos/:photoId', userPage)
103
+ // ^ pass
104
+ .route('/posts/:postId/photos/:photoId', userPage)
105
+ // ^ pass
53
106
  ```
54
107
 
55
108
  ## Nesting Route
109
+ `Router` element is the container of `Page` element, we can archieve nesting route by create `Router` and append it inside `Page`.
56
110
  ```ts
57
- const ContactPage = $('route', '/contact', page => page
111
+ const ContactPage = $.route(page => page
58
112
  .pageTitle('Home')
59
113
  .content([
60
114
  $('h1').content('Contact'),
61
- // append router with page, nested routes will show in this router
62
- $('router', page)
115
+ $('router', page)
116
+ // here is the magic happened,
117
+ // pass the Page into router arguments
63
118
  ])
64
119
  )
120
+ ```
65
121
 
66
- const ContactEmailPage = $('route', '/contact/email', () => 'amateras@example.com')
122
+ Then, we need to declare the router map like this:
67
123
 
124
+ ```ts
68
125
  $('router')
69
126
  .route('/', HomePage)
70
127
  .route('/contact', ContactPage, route => route
128
+ // we can define more child routes inside this '/contact' route!
71
129
  .route('/', () => 'My name is Amateras.')
72
130
  .route('/phone', () => '0123456789')
73
- .route('/email', ContactEmailPage)
74
131
  )
75
132
  ```
76
133
 
77
- ## Async Route
134
+ ### Alias Path
135
+
136
+ Sometime, the page doesn't have just one path. We can declare the alias paths to one route!
137
+
78
138
  ```ts
79
139
  $('router')
80
- .route('/about', () => import('./pages/about.ts'))
140
+ .route('/', HomePage, route => route
141
+ .alias('/home')
142
+ .alias('/the/another/way/to/home')
143
+ // ... more alias path
144
+ )
145
+ ```
146
+
147
+ What if the main path included parameters? Here is how to deal with it:
148
+
149
+ ```ts
150
+ $('router')
151
+ .route('/users/:username', UserPage, route = route
152
+ .alias('/u/:username')
153
+ .alias('/profile')
154
+ // ^ typescript will report an error
155
+
156
+ .alias('/profile', { username: 'amateras' })
157
+ // ^ pass, the params required is fulfilled
158
+
159
+ .alias('/profile', () => { return { username: getUsername() } })
160
+ // ^ even you can pass an arrow function!
161
+ )
162
+ ```
163
+
164
+ ### Group Path
165
+
166
+ There have a lot of paths got same prefix? We provide the solution:
167
+
168
+ ```ts
169
+ $('router')
170
+ .route('/', HomePage)
171
+ .group('/search', route => route
172
+ .route('/', SearchPage)
173
+ .route('/users', SearchUserPage)
174
+ )
81
175
  ```
@@ -0,0 +1,10 @@
1
+ {
2
+ "name": "@amateras/router",
3
+ "peerDependencies": {
4
+ "amateras": "../../"
5
+ },
6
+ "imports": {
7
+ "#node/*": "./src/node/*.ts",
8
+ "#structure/*": "./src/structure/*.ts"
9
+ }
10
+ }