@truto/truto-jsonata 1.0.13 → 1.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/main.cjs +34 -18
- package/dist/main.cjs.map +1 -1
- package/dist/module.js +35 -19
- package/dist/module.js.map +1 -1
- package/package.json +1 -1
package/dist/main.cjs
CHANGED
|
@@ -458,6 +458,7 @@ var $90c566adb85cb52e$export$2e2bcd8739ae039 = $90c566adb85cb52e$var$convertMark
|
|
|
458
458
|
|
|
459
459
|
|
|
460
460
|
|
|
461
|
+
|
|
461
462
|
const $bc8e7b2fdb012b3a$var$parseMarkedTokenToNotionRequest = (tokens, acc = [])=>{
|
|
462
463
|
return (0, $dxT2C$lodashes.reduce)(tokens, (acc, token)=>{
|
|
463
464
|
const childTokens = token.type === "blockquote" ? (0, $dxT2C$lodashes.get)(token, "tokens[0].tokens") : (0, $dxT2C$lodashes.get)(token, "tokens");
|
|
@@ -519,17 +520,25 @@ const $bc8e7b2fdb012b3a$var$parseMarkedTokenToNotionRequest = (tokens, acc = [])
|
|
|
519
520
|
return acc;
|
|
520
521
|
}
|
|
521
522
|
if (token.type === "code") {
|
|
523
|
+
const textSplitByNewLine = token.text.split("\n");
|
|
524
|
+
const chunksOfChunks = (0, $dxT2C$lodashes.compact)((0, $dxT2C$lodashes.map)(textSplitByNewLine, (chunk)=>$bc8e7b2fdb012b3a$var$chunkText(chunk)));
|
|
525
|
+
const chunksWithNewLines = (0, $dxT2C$lodashes.flattenDeep)((0, $233848446ca33fe6$export$2e2bcd8739ae039)((0, $dxT2C$lodashes.map)(chunksOfChunks, (chunk)=>{
|
|
526
|
+
return (0, $dxT2C$lodashes.map)(chunk, (_chunk)=>({
|
|
527
|
+
type: "text",
|
|
528
|
+
text: {
|
|
529
|
+
content: _chunk
|
|
530
|
+
}
|
|
531
|
+
}));
|
|
532
|
+
}), {
|
|
533
|
+
type: "text",
|
|
534
|
+
text: {
|
|
535
|
+
content: "\n\n"
|
|
536
|
+
}
|
|
537
|
+
}));
|
|
522
538
|
acc.push({
|
|
523
539
|
type: "code",
|
|
524
540
|
code: {
|
|
525
|
-
rich_text:
|
|
526
|
-
{
|
|
527
|
-
type: "text",
|
|
528
|
-
text: {
|
|
529
|
-
content: token.text
|
|
530
|
-
}
|
|
531
|
-
}
|
|
532
|
-
],
|
|
541
|
+
rich_text: chunksWithNewLines,
|
|
533
542
|
language: token.lang || "plain text"
|
|
534
543
|
}
|
|
535
544
|
});
|
|
@@ -632,20 +641,27 @@ const $bc8e7b2fdb012b3a$var$parseMarkedTokenToNotionRequest = (tokens, acc = [])
|
|
|
632
641
|
const text = (0, $dxT2C$lodashes.get)(token, "text");
|
|
633
642
|
if (!text) return acc;
|
|
634
643
|
const textToInsert = (0, $5a7cb266718aeaae$export$2e2bcd8739ae039)(text);
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
644
|
+
// chunk the text into 2000 character chunks, should handle emojis and multi-byte characters
|
|
645
|
+
const chunks = $bc8e7b2fdb012b3a$var$chunkText(textToInsert);
|
|
646
|
+
(0, $dxT2C$lodashes.each)(chunks, (chunk)=>{
|
|
647
|
+
acc.push({
|
|
648
|
+
type: "text",
|
|
649
|
+
text: {
|
|
650
|
+
content: chunk
|
|
651
|
+
},
|
|
652
|
+
...token.type === "codespan" ? {
|
|
653
|
+
annotations: {
|
|
654
|
+
code: true
|
|
655
|
+
}
|
|
656
|
+
} : {}
|
|
657
|
+
});
|
|
645
658
|
});
|
|
646
659
|
return acc;
|
|
647
660
|
}, acc);
|
|
648
661
|
};
|
|
662
|
+
const $bc8e7b2fdb012b3a$var$chunkText = (text, numChars = 2000)=>{
|
|
663
|
+
return text.match(new RegExp(`.{1,${numChars}}`, "g"));
|
|
664
|
+
};
|
|
649
665
|
const $bc8e7b2fdb012b3a$var$convertMarkdownToNotion = (text)=>{
|
|
650
666
|
const tokens = (0, $dxT2C$marked.Lexer).lex(text);
|
|
651
667
|
const parsedTokens = $bc8e7b2fdb012b3a$var$parseMarkedTokenToNotionRequest(tokens);
|