bbcode-compiler 0.1.8 → 0.1.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/generator/Generator.d.ts +0 -1
- package/dist/generator/Generator.d.ts.map +1 -1
- package/dist/generator/transforms/Transform.d.ts +0 -1
- package/dist/generator/transforms/htmlTransforms.d.ts +0 -1
- package/dist/generator/utils/getTagImmediateAttrVal.d.ts +0 -1
- package/dist/generator/utils/getTagImmediateText.d.ts +0 -1
- package/dist/generator/utils/getTagImmediateText.d.ts.map +1 -1
- package/dist/generator/utils/getWidthHeightAttr.d.ts +0 -1
- package/dist/generator/utils/isOrderedList.d.ts +0 -1
- package/dist/index.js +104 -165
- package/dist/index.js.map +1 -1
- package/dist/index.umd.cjs +107 -168
- package/dist/index.umd.cjs.map +1 -1
- package/dist/lexer/Lexer.d.ts +0 -1
- package/dist/lexer/Token.d.ts +0 -1
- package/dist/lexer/Token.d.ts.map +1 -1
- package/dist/lexer/TokenType.d.ts +1 -13
- package/dist/lexer/TokenType.d.ts.map +1 -1
- package/dist/parser/AstNode.d.ts +10 -18
- package/dist/parser/AstNode.d.ts.map +1 -1
- package/dist/parser/Parser.d.ts +0 -1
- package/dist/parser/Parser.d.ts.map +1 -1
- package/dist/parser/nodeIsType.d.ts +8 -9
- package/dist/parser/nodeIsType.d.ts.map +1 -1
- package/package.json +83 -83
- package/src/generator/Generator.ts +4 -4
- package/src/generator/utils/getTagImmediateText.ts +3 -3
- package/src/lexer/Lexer.ts +8 -8
- package/src/lexer/Token.ts +12 -12
- package/src/lexer/TokenType.ts +39 -40
- package/src/parser/AstNode.ts +29 -28
- package/src/parser/Parser.ts +26 -26
- package/src/parser/nodeIsType.ts +8 -8
package/src/parser/Parser.ts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { htmlTransforms } from '../generator/transforms/htmlTransforms.js'
|
|
2
|
-
import { stringifyTokens, Token } from '../lexer/Token.js'
|
|
3
|
-
import { isStringToken
|
|
4
|
-
import { RootNode, AttrNode, TextNode, LinebreakNode, StartTagNode, EndTagNode,
|
|
2
|
+
import { stringifyTokens, type Token } from '../lexer/Token.js'
|
|
3
|
+
import { isStringToken } from '../lexer/TokenType.js'
|
|
4
|
+
import { RootNode, AttrNode, TextNode, LinebreakNode, StartTagNode, EndTagNode, TagNode, AstNode } from './AstNode.js'
|
|
5
5
|
import { nodeIsType } from './nodeIsType.js'
|
|
6
6
|
|
|
7
7
|
export class Parser {
|
|
@@ -33,7 +33,7 @@ export class Parser {
|
|
|
33
33
|
break
|
|
34
34
|
}
|
|
35
35
|
|
|
36
|
-
if (endOnQuotes && (tokens[idx].type ===
|
|
36
|
+
if (endOnQuotes && (tokens[idx].type === 'XSS_S_QUOTE' || tokens[idx].type === 'XSS_D_QUOTE')) {
|
|
37
37
|
break
|
|
38
38
|
}
|
|
39
39
|
|
|
@@ -56,13 +56,13 @@ export class Parser {
|
|
|
56
56
|
|
|
57
57
|
if (spaceIdx >= 0) {
|
|
58
58
|
const oldToken: Token = {
|
|
59
|
-
type:
|
|
59
|
+
type: 'STR',
|
|
60
60
|
offset: tokens[idx].offset,
|
|
61
61
|
length: spaceIdx,
|
|
62
62
|
}
|
|
63
63
|
|
|
64
64
|
const newToken: Token = {
|
|
65
|
-
type:
|
|
65
|
+
type: 'STR',
|
|
66
66
|
offset: tokens[idx].offset + spaceIdx,
|
|
67
67
|
length: tokens[idx].length - spaceIdx,
|
|
68
68
|
}
|
|
@@ -90,10 +90,10 @@ export class Parser {
|
|
|
90
90
|
|
|
91
91
|
const attrNode = new AttrNode()
|
|
92
92
|
|
|
93
|
-
if (tokens[idx].type ===
|
|
93
|
+
if (tokens[idx].type === 'EQUALS' && isStringToken(tokens[idx + 1].type)) { // [Tag = VAL ...] or [Tag = "VAL"]
|
|
94
94
|
idx += 1 // Consume EQUALS
|
|
95
95
|
|
|
96
|
-
const openedWithQuotes = tokens[idx].type ===
|
|
96
|
+
const openedWithQuotes = tokens[idx].type === 'XSS_S_QUOTE' || tokens[idx].type === 'XSS_D_QUOTE'
|
|
97
97
|
if (openedWithQuotes) {
|
|
98
98
|
idx += 1
|
|
99
99
|
}
|
|
@@ -102,19 +102,19 @@ export class Parser {
|
|
|
102
102
|
attrNode.addChild(valNode)
|
|
103
103
|
|
|
104
104
|
if (openedWithQuotes) {
|
|
105
|
-
if (tokens[idx].type !==
|
|
105
|
+
if (tokens[idx].type !== 'XSS_S_QUOTE' && tokens[idx].type !== 'XSS_D_QUOTE') {
|
|
106
106
|
return null
|
|
107
107
|
}
|
|
108
108
|
|
|
109
109
|
idx += 1
|
|
110
110
|
}
|
|
111
|
-
} else if (isStringToken(tokens[idx].type) && tokens[idx + 1].type ===
|
|
111
|
+
} else if (isStringToken(tokens[idx].type) && tokens[idx + 1].type === 'EQUALS' && (idx + 2 < tokens.length && isStringToken(tokens[idx + 2].type))) { // [Tag KEY = VAL ...] or [Tag KEY = "VAL" ...]
|
|
112
112
|
const keyNode = parseText()
|
|
113
113
|
attrNode.addChild(keyNode)
|
|
114
114
|
|
|
115
115
|
idx += 1 // Consume EQUALS
|
|
116
116
|
|
|
117
|
-
const openedWithQuotes = tokens[idx].type ===
|
|
117
|
+
const openedWithQuotes = tokens[idx].type === 'XSS_S_QUOTE' || tokens[idx].type === 'XSS_D_QUOTE'
|
|
118
118
|
if (openedWithQuotes) {
|
|
119
119
|
idx += 1
|
|
120
120
|
}
|
|
@@ -122,7 +122,7 @@ export class Parser {
|
|
|
122
122
|
const valNode = parseText(openedWithQuotes, true)
|
|
123
123
|
|
|
124
124
|
if (openedWithQuotes) {
|
|
125
|
-
if (tokens[idx].type !==
|
|
125
|
+
if (tokens[idx].type !== 'XSS_S_QUOTE' && tokens[idx].type !== 'XSS_D_QUOTE') {
|
|
126
126
|
return null
|
|
127
127
|
}
|
|
128
128
|
|
|
@@ -130,7 +130,7 @@ export class Parser {
|
|
|
130
130
|
}
|
|
131
131
|
|
|
132
132
|
attrNode.addChild(valNode)
|
|
133
|
-
} else if (isStringToken(tokens[idx].type) && tokens[idx + 1].type !==
|
|
133
|
+
} else if (isStringToken(tokens[idx].type) && tokens[idx + 1].type !== 'EQUALS') { // [Tag VAL ...]
|
|
134
134
|
const valNode = parseText()
|
|
135
135
|
attrNode.addChild(valNode)
|
|
136
136
|
} else {
|
|
@@ -145,7 +145,7 @@ export class Parser {
|
|
|
145
145
|
return null
|
|
146
146
|
}
|
|
147
147
|
|
|
148
|
-
if (tokens[idx].type !==
|
|
148
|
+
if (tokens[idx].type !== 'L_BRACKET') {
|
|
149
149
|
return null
|
|
150
150
|
}
|
|
151
151
|
|
|
@@ -169,7 +169,7 @@ export class Parser {
|
|
|
169
169
|
attrNodes.push(attrNode)
|
|
170
170
|
}
|
|
171
171
|
|
|
172
|
-
if (tokens[idx].type !==
|
|
172
|
+
if (tokens[idx].type !== 'R_BRACKET') {
|
|
173
173
|
return null
|
|
174
174
|
}
|
|
175
175
|
|
|
@@ -182,7 +182,7 @@ export class Parser {
|
|
|
182
182
|
}
|
|
183
183
|
|
|
184
184
|
// If L_BRACKET is followed by BACKSLASH, then it must be EndTag or is invalid
|
|
185
|
-
if (tokens[idx + 1].type ===
|
|
185
|
+
if (tokens[idx + 1].type === 'BACKSLASH') {
|
|
186
186
|
const startIdx = idx
|
|
187
187
|
idx += 1 // Consume L_BRACKET
|
|
188
188
|
idx += 1 // Consume BACKSLASH
|
|
@@ -192,7 +192,7 @@ export class Parser {
|
|
|
192
192
|
return null
|
|
193
193
|
}
|
|
194
194
|
|
|
195
|
-
if (tokens[idx].type !==
|
|
195
|
+
if (tokens[idx].type !== 'R_BRACKET') {
|
|
196
196
|
return null
|
|
197
197
|
}
|
|
198
198
|
|
|
@@ -211,7 +211,7 @@ export class Parser {
|
|
|
211
211
|
const root = new RootNode()
|
|
212
212
|
|
|
213
213
|
while (idx < tokens.length) {
|
|
214
|
-
if (tokens[idx].type ===
|
|
214
|
+
if (tokens[idx].type === 'L_BRACKET') {
|
|
215
215
|
const startIdx = idx
|
|
216
216
|
const tagNode = parseTag()
|
|
217
217
|
|
|
@@ -223,14 +223,14 @@ export class Parser {
|
|
|
223
223
|
const textNode = new TextNode(str)
|
|
224
224
|
root.addChild(textNode)
|
|
225
225
|
}
|
|
226
|
-
} else if (tokens[idx].type ===
|
|
226
|
+
} else if (tokens[idx].type === 'LINEBREAK') {
|
|
227
227
|
idx += 1 // Consume LINEBREAK
|
|
228
228
|
root.addChild(new LinebreakNode())
|
|
229
229
|
} else {
|
|
230
230
|
const startIdx = idx
|
|
231
231
|
|
|
232
232
|
// Advance until we see the start of another RootNode's child (TagNode or LinebreakNode)
|
|
233
|
-
while (idx < tokens.length && tokens[idx].type !==
|
|
233
|
+
while (idx < tokens.length && tokens[idx].type !== 'L_BRACKET' && tokens[idx].type !== 'LINEBREAK') {
|
|
234
234
|
idx += 1
|
|
235
235
|
}
|
|
236
236
|
|
|
@@ -258,7 +258,7 @@ export class Parser {
|
|
|
258
258
|
for (let i = 0; i < rootNode.children.length; i++) {
|
|
259
259
|
const child = rootNode.children[i]
|
|
260
260
|
|
|
261
|
-
if (nodeIsType(child,
|
|
261
|
+
if (nodeIsType(child, 'StartTagNode')) {
|
|
262
262
|
const endTag = this.findMatchingEndTag(rootNode.children, i, child.tagName)
|
|
263
263
|
const isStandalone = this.standaloneTags.has(child.tagName)
|
|
264
264
|
|
|
@@ -278,13 +278,13 @@ export class Parser {
|
|
|
278
278
|
// If no end tag exists, then treat tag as string literal
|
|
279
279
|
transformedRoot.addChild(new TextNode(child.ogTag))
|
|
280
280
|
}
|
|
281
|
-
} else if (nodeIsType(child,
|
|
281
|
+
} else if (nodeIsType(child, 'EndTagNode')) {
|
|
282
282
|
// Encountered end tag when we're not expecting an end tag so we treat it as a string literal
|
|
283
283
|
transformedRoot.addChild(new TextNode(child.ogTag))
|
|
284
|
-
} else if (nodeIsType(child,
|
|
284
|
+
} else if (nodeIsType(child, 'TextNode')) {
|
|
285
285
|
// Normal text nodes get copied
|
|
286
286
|
transformedRoot.addChild(child)
|
|
287
|
-
} else if (nodeIsType(child,
|
|
287
|
+
} else if (nodeIsType(child, 'LinebreakNode')) {
|
|
288
288
|
// Linebreak nodes get copied
|
|
289
289
|
transformedRoot.addChild(child)
|
|
290
290
|
} else {
|
|
@@ -303,8 +303,8 @@ export class Parser {
|
|
|
303
303
|
for (let i = startIdx; i < siblings.length; i++) {
|
|
304
304
|
const sibling = siblings[i]
|
|
305
305
|
const isEndTag =
|
|
306
|
-
(nodeIsType(sibling,
|
|
307
|
-
(nodeIsType(sibling,
|
|
306
|
+
(nodeIsType(sibling, 'LinebreakNode') && this.linebreakTerminatedTags.has(tagName)) ||
|
|
307
|
+
(nodeIsType(sibling, 'EndTagNode') && sibling.tagName === tagName)
|
|
308
308
|
|
|
309
309
|
if (isEndTag) {
|
|
310
310
|
return {
|
package/src/parser/nodeIsType.ts
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import type { AstNode, AttrNode, RootNode, TagNode, TextNode, LinebreakNode, EndTagNode, StartTagNode } from './AstNode.js'
|
|
2
2
|
|
|
3
3
|
type AstMap = {
|
|
4
|
-
[
|
|
5
|
-
[
|
|
6
|
-
[
|
|
7
|
-
[
|
|
8
|
-
[
|
|
9
|
-
[
|
|
10
|
-
[
|
|
4
|
+
['RootNode']: RootNode
|
|
5
|
+
['LinebreakNode']: LinebreakNode
|
|
6
|
+
['TextNode']: TextNode
|
|
7
|
+
['TagNode']: TagNode
|
|
8
|
+
['StartTagNode']: StartTagNode
|
|
9
|
+
['EndTagNode']: EndTagNode
|
|
10
|
+
['AttrNode']: AttrNode
|
|
11
11
|
}
|
|
12
12
|
|
|
13
13
|
export function nodeIsType<T extends keyof AstMap>(node: AstNode, nodeType: T): node is AstMap[T] {
|