@ekz/lexical-markdown 0.40.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/EkzLexicalMarkdown.dev.js +1672 -0
- package/EkzLexicalMarkdown.dev.mjs +1647 -0
- package/EkzLexicalMarkdown.js +11 -0
- package/EkzLexicalMarkdown.mjs +35 -0
- package/EkzLexicalMarkdown.node.mjs +33 -0
- package/EkzLexicalMarkdown.prod.js +9 -0
- package/EkzLexicalMarkdown.prod.mjs +9 -0
- package/LICENSE +21 -0
- package/LexicalMarkdown.js.flow +135 -0
- package/MarkdownExport.d.ts +13 -0
- package/MarkdownImport.d.ts +18 -0
- package/MarkdownShortcuts.d.ts +10 -0
- package/MarkdownTransformers.d.ts +155 -0
- package/README.md +96 -0
- package/importTextFormatTransformer.d.ts +21 -0
- package/importTextMatchTransformer.d.ts +20 -0
- package/importTextTransformers.d.ts +24 -0
- package/index.d.ts +23 -0
- package/package.json +47 -0
- package/utils.d.ts +35 -0
|
@@ -0,0 +1,1672 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
3
|
+
*
|
|
4
|
+
* This source code is licensed under the MIT license found in the
|
|
5
|
+
* LICENSE file in the root directory of this source tree.
|
|
6
|
+
*
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
'use strict';
|
|
10
|
+
|
|
11
|
+
var lexical = require('@ekz/lexical');
|
|
12
|
+
var lexicalList = require('@ekz/lexical-list');
|
|
13
|
+
var lexicalRichText = require('@ekz/lexical-rich-text');
|
|
14
|
+
var lexicalUtils = require('@ekz/lexical-utils');
|
|
15
|
+
var lexicalCode = require('@ekz/lexical-code');
|
|
16
|
+
var lexicalLink = require('@ekz/lexical-link');
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
20
|
+
*
|
|
21
|
+
* This source code is licensed under the MIT license found in the
|
|
22
|
+
* LICENSE file in the root directory of this source tree.
|
|
23
|
+
*
|
|
24
|
+
*/
|
|
25
|
+
|
|
26
|
+
function indexBy(list, callback) {
|
|
27
|
+
const index = {};
|
|
28
|
+
for (const item of list) {
|
|
29
|
+
const key = callback(item);
|
|
30
|
+
if (!key) {
|
|
31
|
+
continue;
|
|
32
|
+
}
|
|
33
|
+
if (index[key]) {
|
|
34
|
+
index[key].push(item);
|
|
35
|
+
} else {
|
|
36
|
+
index[key] = [item];
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
return index;
|
|
40
|
+
}
|
|
41
|
+
function transformersByType(transformers) {
|
|
42
|
+
const byType = indexBy(transformers, t => t.type);
|
|
43
|
+
return {
|
|
44
|
+
element: byType.element || [],
|
|
45
|
+
multilineElement: byType['multiline-element'] || [],
|
|
46
|
+
textFormat: byType['text-format'] || [],
|
|
47
|
+
textMatch: byType['text-match'] || []
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
const PUNCTUATION_OR_SPACE = /[!-/:-@[-`{-~\s]/;
|
|
51
|
+
const WHITESPACE = /[ \t\n\r\f]/;
|
|
52
|
+
const PUNCTUATION = /[!"#$%&'()*+,\-./:;<=>?@[\]^_`{|}~]/;
|
|
53
|
+
const MARKDOWN_EMPTY_LINE_REG_EXP = /^\s{0,3}$/;
|
|
54
|
+
function isEmptyParagraph(node) {
|
|
55
|
+
if (!lexical.$isParagraphNode(node)) {
|
|
56
|
+
return false;
|
|
57
|
+
}
|
|
58
|
+
const firstChild = node.getFirstChild();
|
|
59
|
+
return firstChild == null || node.getChildrenSize() === 1 && lexical.$isTextNode(firstChild) && MARKDOWN_EMPTY_LINE_REG_EXP.test(firstChild.getTextContent());
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
64
|
+
*
|
|
65
|
+
* This source code is licensed under the MIT license found in the
|
|
66
|
+
* LICENSE file in the root directory of this source tree.
|
|
67
|
+
*
|
|
68
|
+
*/
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Renders string from markdown. The selection is moved to the start after the operation.
|
|
73
|
+
*/
|
|
74
|
+
function createMarkdownExport(transformers, shouldPreserveNewLines = false) {
|
|
75
|
+
const byType = transformersByType(transformers);
|
|
76
|
+
const elementTransformers = [...byType.multilineElement, ...byType.element];
|
|
77
|
+
const isNewlineDelimited = !shouldPreserveNewLines;
|
|
78
|
+
|
|
79
|
+
// Export only uses text formats that are responsible for single format
|
|
80
|
+
// e.g. it will filter out *** (bold, italic) and instead use separate ** and *
|
|
81
|
+
const textFormatTransformers = byType.textFormat.filter(transformer => transformer.format.length === 1)
|
|
82
|
+
// Make sure all text transformers that contain 'code' in their format are at the end of the array. Otherwise, formatted code like
|
|
83
|
+
// <strong><code>code</code></strong> will be exported as `**Bold Code**`, as the code format will be applied first, and the bold format
|
|
84
|
+
// will be applied second and thus skipped entirely, as the code format will prevent any further formatting.
|
|
85
|
+
.sort((a, b) => {
|
|
86
|
+
return Number(a.format.includes('code')) - Number(b.format.includes('code'));
|
|
87
|
+
});
|
|
88
|
+
return node => {
|
|
89
|
+
const output = [];
|
|
90
|
+
const children = (node || lexical.$getRoot()).getChildren();
|
|
91
|
+
for (let i = 0; i < children.length; i++) {
|
|
92
|
+
const child = children[i];
|
|
93
|
+
const result = exportTopLevelElements(child, elementTransformers, textFormatTransformers, byType.textMatch);
|
|
94
|
+
if (result != null) {
|
|
95
|
+
output.push(
|
|
96
|
+
// separate consecutive group of texts with a line break: eg. ["hello", "world"] -> ["hello", "/nworld"]
|
|
97
|
+
isNewlineDelimited && i > 0 && !isEmptyParagraph(child) && !isEmptyParagraph(children[i - 1]) ? '\n'.concat(result) : result);
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
// Ensure consecutive groups of texts are at least \n\n apart while each empty paragraph render as a newline.
|
|
101
|
+
// Eg. ["hello", "", "", "hi", "\nworld"] -> "hello\n\n\nhi\n\nworld"
|
|
102
|
+
return output.join('\n');
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
function exportTopLevelElements(node, elementTransformers, textTransformersIndex, textMatchTransformers) {
|
|
106
|
+
for (const transformer of elementTransformers) {
|
|
107
|
+
if (!transformer.export) {
|
|
108
|
+
continue;
|
|
109
|
+
}
|
|
110
|
+
const result = transformer.export(node, _node => exportChildren(_node, textTransformersIndex, textMatchTransformers));
|
|
111
|
+
if (result != null) {
|
|
112
|
+
return result;
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
if (lexical.$isElementNode(node)) {
|
|
116
|
+
return exportChildren(node, textTransformersIndex, textMatchTransformers);
|
|
117
|
+
} else if (lexical.$isDecoratorNode(node)) {
|
|
118
|
+
return node.getTextContent();
|
|
119
|
+
} else {
|
|
120
|
+
return null;
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
function exportChildren(node, textTransformersIndex, textMatchTransformers, unclosedTags, unclosableTags) {
|
|
124
|
+
const output = [];
|
|
125
|
+
const children = node.getChildren();
|
|
126
|
+
// keep track of unclosed tags from the very beginning
|
|
127
|
+
if (!unclosedTags) {
|
|
128
|
+
unclosedTags = [];
|
|
129
|
+
}
|
|
130
|
+
if (!unclosableTags) {
|
|
131
|
+
unclosableTags = [];
|
|
132
|
+
}
|
|
133
|
+
mainLoop: for (const child of children) {
|
|
134
|
+
for (const transformer of textMatchTransformers) {
|
|
135
|
+
if (!transformer.export) {
|
|
136
|
+
continue;
|
|
137
|
+
}
|
|
138
|
+
const result = transformer.export(child, parentNode => exportChildren(parentNode, textTransformersIndex, textMatchTransformers, unclosedTags,
|
|
139
|
+
// Add current unclosed tags to the list of unclosable tags - we don't want nested tags from
|
|
140
|
+
// textmatch transformers to close the outer ones, as that may result in invalid markdown.
|
|
141
|
+
// E.g. **text [text**](https://lexical.io)
|
|
142
|
+
// is invalid markdown, as the closing ** is inside the link.
|
|
143
|
+
//
|
|
144
|
+
[...unclosableTags, ...unclosedTags]), (textNode, textContent) => exportTextFormat(textNode, textContent, textTransformersIndex, unclosedTags, unclosableTags));
|
|
145
|
+
if (result != null) {
|
|
146
|
+
output.push(result);
|
|
147
|
+
continue mainLoop;
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
if (lexical.$isLineBreakNode(child)) {
|
|
151
|
+
output.push('\n');
|
|
152
|
+
} else if (lexical.$isTextNode(child)) {
|
|
153
|
+
output.push(exportTextFormat(child, child.getTextContent(), textTransformersIndex, unclosedTags, unclosableTags));
|
|
154
|
+
} else if (lexical.$isElementNode(child)) {
|
|
155
|
+
// empty paragraph returns ""
|
|
156
|
+
output.push(exportChildren(child, textTransformersIndex, textMatchTransformers, unclosedTags, unclosableTags));
|
|
157
|
+
} else if (lexical.$isDecoratorNode(child)) {
|
|
158
|
+
output.push(child.getTextContent());
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
return output.join('');
|
|
162
|
+
}
|
|
163
|
+
function exportTextFormat(node, textContent, textTransformers,
|
|
164
|
+
// unclosed tags include the markdown tags that haven't been closed yet, and their associated formats
|
|
165
|
+
unclosedTags, unclosableTags) {
|
|
166
|
+
// This function handles the case of a string looking like this: " foo "
|
|
167
|
+
// Where it would be invalid markdown to generate: "** foo **"
|
|
168
|
+
// If the node has no format, we use the original text.
|
|
169
|
+
// Otherwise, we escape leading and trailing whitespaces to their corresponding code points,
|
|
170
|
+
// ensuring the returned string maintains its original formatting, e.g., "**   foo   **".
|
|
171
|
+
let output = node.getFormat() === 0 ? textContent : escapeLeadingAndTrailingWhitespaces(textContent);
|
|
172
|
+
if (!node.hasFormat('code')) {
|
|
173
|
+
// Escape any markdown characters in the text content
|
|
174
|
+
output = output.replace(/([*_`~\\])/g, '\\$1');
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
// the opening tags to be added to the result
|
|
178
|
+
let openingTags = '';
|
|
179
|
+
// the closing tags to be added to the result
|
|
180
|
+
let closingTagsBefore = '';
|
|
181
|
+
let closingTagsAfter = '';
|
|
182
|
+
const prevNode = getTextSibling(node, true);
|
|
183
|
+
const nextNode = getTextSibling(node, false);
|
|
184
|
+
const applied = new Set();
|
|
185
|
+
for (const transformer of textTransformers) {
|
|
186
|
+
const format = transformer.format[0];
|
|
187
|
+
const tag = transformer.tag;
|
|
188
|
+
|
|
189
|
+
// dedup applied formats
|
|
190
|
+
if (hasFormat(node, format) && !applied.has(format)) {
|
|
191
|
+
// Multiple tags might be used for the same format (*, _)
|
|
192
|
+
applied.add(format);
|
|
193
|
+
|
|
194
|
+
// append the tag to openingTags, if it's not applied to the previous nodes,
|
|
195
|
+
// or the nodes before that (which would result in an unclosed tag)
|
|
196
|
+
if (!hasFormat(prevNode, format) || !unclosedTags.find(element => element.tag === tag)) {
|
|
197
|
+
unclosedTags.push({
|
|
198
|
+
format,
|
|
199
|
+
tag
|
|
200
|
+
});
|
|
201
|
+
openingTags += tag;
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
// close any tags in the same order they were applied, if necessary
|
|
207
|
+
for (let i = 0; i < unclosedTags.length; i++) {
|
|
208
|
+
const nodeHasFormat = hasFormat(node, unclosedTags[i].format);
|
|
209
|
+
const nextNodeHasFormat = hasFormat(nextNode, unclosedTags[i].format);
|
|
210
|
+
|
|
211
|
+
// prevent adding closing tag if next sibling will do it
|
|
212
|
+
if (nodeHasFormat && nextNodeHasFormat) {
|
|
213
|
+
continue;
|
|
214
|
+
}
|
|
215
|
+
const unhandledUnclosedTags = [...unclosedTags]; // Shallow copy to avoid modifying the original array
|
|
216
|
+
|
|
217
|
+
while (unhandledUnclosedTags.length > i) {
|
|
218
|
+
const unclosedTag = unhandledUnclosedTags.pop();
|
|
219
|
+
|
|
220
|
+
// If tag is unclosable, don't close it and leave it in the original array,
|
|
221
|
+
// So that it can be closed when it's no longer unclosable
|
|
222
|
+
if (unclosableTags && unclosedTag && unclosableTags.find(element => element.tag === unclosedTag.tag)) {
|
|
223
|
+
continue;
|
|
224
|
+
}
|
|
225
|
+
if (unclosedTag && typeof unclosedTag.tag === 'string') {
|
|
226
|
+
if (!nodeHasFormat) {
|
|
227
|
+
// Handles cases where the tag has not been closed before, e.g. if the previous node
|
|
228
|
+
// was a text match transformer that did not account for closing tags of the next node (e.g. a link)
|
|
229
|
+
closingTagsBefore += unclosedTag.tag;
|
|
230
|
+
} else if (!nextNodeHasFormat) {
|
|
231
|
+
closingTagsAfter += unclosedTag.tag;
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
// Mutate the original array to remove the closed tag
|
|
235
|
+
unclosedTags.pop();
|
|
236
|
+
}
|
|
237
|
+
break;
|
|
238
|
+
}
|
|
239
|
+
output = openingTags + output + closingTagsAfter;
|
|
240
|
+
// Replace trimmed version of textContent ensuring surrounding whitespace is not modified
|
|
241
|
+
return closingTagsBefore + output;
|
|
242
|
+
}
|
|
243
|
+
function getTextSibling(node, backward) {
|
|
244
|
+
const sibling = backward ? node.getPreviousSibling() : node.getNextSibling();
|
|
245
|
+
if (lexical.$isTextNode(sibling)) {
|
|
246
|
+
return sibling;
|
|
247
|
+
}
|
|
248
|
+
return null;
|
|
249
|
+
}
|
|
250
|
+
function hasFormat(node, format) {
|
|
251
|
+
return lexical.$isTextNode(node) && node.hasFormat(format);
|
|
252
|
+
}
|
|
253
|
+
function escapeLeadingAndTrailingWhitespaces(textContent) {
|
|
254
|
+
return textContent.replace(/^\s+|\s+$/g, match => {
|
|
255
|
+
return [...match].map(char => '&#' + char.codePointAt(0) + ';').join('');
|
|
256
|
+
});
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
/**
|
|
260
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
261
|
+
*
|
|
262
|
+
* This source code is licensed under the MIT license found in the
|
|
263
|
+
* LICENSE file in the root directory of this source tree.
|
|
264
|
+
*
|
|
265
|
+
*/
|
|
266
|
+
|
|
267
|
+
function findOutermostTextFormatTransformer(textNode, textFormatTransformersIndex) {
|
|
268
|
+
const textContent = textNode.getTextContent();
|
|
269
|
+
|
|
270
|
+
// Find code span first. Emphasis delimiters inside inline elements (e.g., code spans)
|
|
271
|
+
// should not be processed. Currently only code spans are handled; other inline elements
|
|
272
|
+
// (e.g., links, raw HTML) may need similar treatment in the future.
|
|
273
|
+
const codeRegex = textFormatTransformersIndex.fullMatchRegExpByTag['`'];
|
|
274
|
+
const codeTransformer = textFormatTransformersIndex.transformersByTag['`'];
|
|
275
|
+
const excludeRanges = [];
|
|
276
|
+
let codeMatch = null;
|
|
277
|
+
if (codeRegex && codeTransformer) {
|
|
278
|
+
const globalRegex = new RegExp(codeRegex.source, 'g');
|
|
279
|
+
const matches = Array.from(textContent.matchAll(globalRegex));
|
|
280
|
+
for (const match of matches) {
|
|
281
|
+
const startIndex = match.index;
|
|
282
|
+
const endIndex = startIndex + match[0].length;
|
|
283
|
+
if (!codeMatch) {
|
|
284
|
+
codeMatch = {
|
|
285
|
+
content: match[2],
|
|
286
|
+
endIndex,
|
|
287
|
+
startIndex,
|
|
288
|
+
tag: '`'
|
|
289
|
+
};
|
|
290
|
+
}
|
|
291
|
+
excludeRanges.push({
|
|
292
|
+
end: endIndex,
|
|
293
|
+
start: startIndex
|
|
294
|
+
});
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
const delimiters = scanDelimiters(textContent, textFormatTransformersIndex, excludeRanges);
|
|
298
|
+
const emphasisMatch = delimiters.length > 0 ? processEmphasis(textContent, delimiters, textFormatTransformersIndex) : null;
|
|
299
|
+
let resultMatch = null;
|
|
300
|
+
let resultTransformer = null;
|
|
301
|
+
if (codeMatch && emphasisMatch) {
|
|
302
|
+
if (emphasisMatch.startIndex <= codeMatch.startIndex && emphasisMatch.endIndex >= codeMatch.endIndex) {
|
|
303
|
+
resultMatch = emphasisMatch;
|
|
304
|
+
resultTransformer = textFormatTransformersIndex.transformersByTag[emphasisMatch.tag];
|
|
305
|
+
} else {
|
|
306
|
+
resultMatch = codeMatch;
|
|
307
|
+
resultTransformer = codeTransformer;
|
|
308
|
+
}
|
|
309
|
+
} else if (codeMatch) {
|
|
310
|
+
resultMatch = codeMatch;
|
|
311
|
+
resultTransformer = codeTransformer;
|
|
312
|
+
} else if (emphasisMatch) {
|
|
313
|
+
resultMatch = emphasisMatch;
|
|
314
|
+
resultTransformer = textFormatTransformersIndex.transformersByTag[emphasisMatch.tag];
|
|
315
|
+
}
|
|
316
|
+
if (!resultMatch || !resultTransformer) {
|
|
317
|
+
return null;
|
|
318
|
+
}
|
|
319
|
+
const regexMatch = [textContent.slice(resultMatch.startIndex, resultMatch.endIndex), resultMatch.tag, resultMatch.content];
|
|
320
|
+
regexMatch.index = resultMatch.startIndex;
|
|
321
|
+
regexMatch.input = textContent;
|
|
322
|
+
return {
|
|
323
|
+
endIndex: resultMatch.endIndex,
|
|
324
|
+
match: regexMatch,
|
|
325
|
+
startIndex: resultMatch.startIndex,
|
|
326
|
+
transformer: resultTransformer
|
|
327
|
+
};
|
|
328
|
+
}
|
|
329
|
+
function scanDelimiters(text, transformersIndex, excludeRanges = []) {
|
|
330
|
+
const delimiters = [];
|
|
331
|
+
const delimiterChars = new Set(Object.keys(transformersIndex.transformersByTag).filter(tag => tag[0] !== '`').map(tag => tag[0]));
|
|
332
|
+
const isEscaped = index => {
|
|
333
|
+
let count = 0;
|
|
334
|
+
for (let i = index - 1; i >= 0 && text[i] === '\\'; i--) {
|
|
335
|
+
count++;
|
|
336
|
+
}
|
|
337
|
+
return count % 2 === 1;
|
|
338
|
+
};
|
|
339
|
+
const isInExcludedRange = index => {
|
|
340
|
+
return excludeRanges.some(range => index >= range.start && index < range.end);
|
|
341
|
+
};
|
|
342
|
+
let i = 0;
|
|
343
|
+
while (i < text.length) {
|
|
344
|
+
const char = text[i];
|
|
345
|
+
if (!delimiterChars.has(char) || isEscaped(i) || isInExcludedRange(i)) {
|
|
346
|
+
i++;
|
|
347
|
+
continue;
|
|
348
|
+
}
|
|
349
|
+
let len = 1;
|
|
350
|
+
while (i + len < text.length && text[i + len] === char) {
|
|
351
|
+
len++;
|
|
352
|
+
}
|
|
353
|
+
const canOpen = canEmphasis(char, text, i, len, true);
|
|
354
|
+
const canClose = canEmphasis(char, text, i, len, false);
|
|
355
|
+
if (canOpen || canClose) {
|
|
356
|
+
delimiters.push({
|
|
357
|
+
active: true,
|
|
358
|
+
canClose,
|
|
359
|
+
canOpen,
|
|
360
|
+
char,
|
|
361
|
+
index: i,
|
|
362
|
+
length: len,
|
|
363
|
+
originalLength: len
|
|
364
|
+
});
|
|
365
|
+
}
|
|
366
|
+
i += len;
|
|
367
|
+
}
|
|
368
|
+
return delimiters;
|
|
369
|
+
}
|
|
370
|
+
function processEmphasis(text, delimiters, transformersIndex) {
|
|
371
|
+
const openersBottom = {};
|
|
372
|
+
let currentPos = 0;
|
|
373
|
+
let result = null;
|
|
374
|
+
while (currentPos < delimiters.length) {
|
|
375
|
+
const closer = delimiters[currentPos];
|
|
376
|
+
if (!closer.active || !closer.canClose || closer.length === 0) {
|
|
377
|
+
currentPos++;
|
|
378
|
+
continue;
|
|
379
|
+
}
|
|
380
|
+
const bottomKey = `${closer.char}${closer.canOpen}`;
|
|
381
|
+
const bottom = openersBottom[bottomKey] ?? -1;
|
|
382
|
+
let foundOpener = false;
|
|
383
|
+
for (let openIdx = currentPos - 1; openIdx > bottom; openIdx--) {
|
|
384
|
+
const opener = delimiters[openIdx];
|
|
385
|
+
if (!opener.active || !opener.canOpen || opener.length === 0 || opener.char !== closer.char) {
|
|
386
|
+
continue;
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
// Rule of 3
|
|
390
|
+
if (opener.canClose || closer.canOpen) {
|
|
391
|
+
const sum = opener.originalLength + closer.originalLength;
|
|
392
|
+
if (sum % 3 === 0 && opener.originalLength % 3 !== 0 && closer.originalLength % 3 !== 0) {
|
|
393
|
+
continue;
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
const maxLen = Math.min(opener.length, closer.length);
|
|
397
|
+
const matchedTag = Object.keys(transformersIndex.transformersByTag).filter(t => t[0] === opener.char && t.length <= maxLen).sort((a, b) => b.length - a.length)[0];
|
|
398
|
+
if (!matchedTag) {
|
|
399
|
+
continue;
|
|
400
|
+
}
|
|
401
|
+
foundOpener = true;
|
|
402
|
+
const matchLen = matchedTag.length;
|
|
403
|
+
const match = {
|
|
404
|
+
content: text.slice(opener.index + opener.length, closer.index),
|
|
405
|
+
endIndex: closer.index + matchLen,
|
|
406
|
+
startIndex: opener.index + (opener.length - matchLen),
|
|
407
|
+
tag: matchedTag
|
|
408
|
+
};
|
|
409
|
+
if (!result || match.startIndex < result.startIndex || match.startIndex === result.startIndex && match.endIndex > result.endIndex) {
|
|
410
|
+
result = match;
|
|
411
|
+
}
|
|
412
|
+
for (let j = openIdx + 1; j < currentPos; j++) {
|
|
413
|
+
delimiters[j].active = false;
|
|
414
|
+
}
|
|
415
|
+
opener.length -= matchLen;
|
|
416
|
+
closer.length -= matchLen;
|
|
417
|
+
opener.active = opener.length > 0;
|
|
418
|
+
if (closer.length > 0) {
|
|
419
|
+
closer.index += matchLen;
|
|
420
|
+
} else {
|
|
421
|
+
closer.active = false;
|
|
422
|
+
currentPos++;
|
|
423
|
+
}
|
|
424
|
+
break;
|
|
425
|
+
}
|
|
426
|
+
if (!foundOpener) {
|
|
427
|
+
openersBottom[bottomKey] = currentPos - 1;
|
|
428
|
+
if (!closer.canOpen) {
|
|
429
|
+
closer.active = false;
|
|
430
|
+
}
|
|
431
|
+
currentPos++;
|
|
432
|
+
}
|
|
433
|
+
}
|
|
434
|
+
return result;
|
|
435
|
+
}
|
|
436
|
+
function canEmphasis(char, text, index, length, isOpen) {
|
|
437
|
+
if (!isFlanking(text, index, length, isOpen)) {
|
|
438
|
+
return false;
|
|
439
|
+
}
|
|
440
|
+
if (char === '*') {
|
|
441
|
+
return true;
|
|
442
|
+
}
|
|
443
|
+
if (char === '_') {
|
|
444
|
+
if (!isFlanking(text, index, length, !isOpen)) {
|
|
445
|
+
return true;
|
|
446
|
+
}
|
|
447
|
+
const adjacentChar = isOpen ? text[index - 1] : text[index + length];
|
|
448
|
+
return adjacentChar !== undefined && PUNCTUATION.test(adjacentChar);
|
|
449
|
+
}
|
|
450
|
+
return true;
|
|
451
|
+
}
|
|
452
|
+
function isFlanking(text, index, length, isLeft) {
|
|
453
|
+
const charBefore = text[index - 1];
|
|
454
|
+
const charAfter = text[index + length];
|
|
455
|
+
const [primary, secondary] = isLeft ? [charAfter, charBefore] : [charBefore, charAfter];
|
|
456
|
+
if (primary === undefined || WHITESPACE.test(primary)) {
|
|
457
|
+
return false;
|
|
458
|
+
}
|
|
459
|
+
if (!PUNCTUATION.test(primary)) {
|
|
460
|
+
return true;
|
|
461
|
+
}
|
|
462
|
+
return secondary === undefined || WHITESPACE.test(secondary) || PUNCTUATION.test(secondary);
|
|
463
|
+
}
|
|
464
|
+
function importTextFormatTransformer(textNode, startIndex, endIndex, transformer, match) {
|
|
465
|
+
const textContent = textNode.getTextContent();
|
|
466
|
+
|
|
467
|
+
// No text matches - we can safely process the text format match
|
|
468
|
+
let transformedNode, nodeAfter, nodeBefore;
|
|
469
|
+
|
|
470
|
+
// If matching full content there's no need to run splitText and can reuse existing textNode
|
|
471
|
+
// to update its content and apply format. E.g. for **_Hello_** string after applying bold
|
|
472
|
+
// format (**) it will reuse the same text node to apply italic (_)
|
|
473
|
+
if (match[0] === textContent) {
|
|
474
|
+
transformedNode = textNode;
|
|
475
|
+
} else {
|
|
476
|
+
if (startIndex === 0) {
|
|
477
|
+
[transformedNode, nodeAfter] = textNode.splitText(endIndex);
|
|
478
|
+
} else {
|
|
479
|
+
[nodeBefore, transformedNode, nodeAfter] = textNode.splitText(startIndex, endIndex);
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
transformedNode.setTextContent(match[2]);
|
|
483
|
+
if (transformer) {
|
|
484
|
+
for (const format of transformer.format) {
|
|
485
|
+
if (!transformedNode.hasFormat(format)) {
|
|
486
|
+
transformedNode.toggleFormat(format);
|
|
487
|
+
}
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
return {
|
|
491
|
+
nodeAfter: nodeAfter,
|
|
492
|
+
nodeBefore: nodeBefore,
|
|
493
|
+
transformedNode: transformedNode
|
|
494
|
+
};
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
/**
|
|
498
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
499
|
+
*
|
|
500
|
+
* This source code is licensed under the MIT license found in the
|
|
501
|
+
* LICENSE file in the root directory of this source tree.
|
|
502
|
+
*
|
|
503
|
+
*/
|
|
504
|
+
|
|
505
|
+
function findOutermostTextMatchTransformer(textNode_, textMatchTransformers) {
|
|
506
|
+
const textNode = textNode_;
|
|
507
|
+
let foundMatchStartIndex = undefined;
|
|
508
|
+
let foundMatchEndIndex = undefined;
|
|
509
|
+
let foundMatchTransformer = undefined;
|
|
510
|
+
let foundMatch = undefined;
|
|
511
|
+
for (const transformer of textMatchTransformers) {
|
|
512
|
+
if (!transformer.replace || !transformer.importRegExp) {
|
|
513
|
+
continue;
|
|
514
|
+
}
|
|
515
|
+
const match = textNode.getTextContent().match(transformer.importRegExp);
|
|
516
|
+
if (!match) {
|
|
517
|
+
continue;
|
|
518
|
+
}
|
|
519
|
+
const startIndex = match.index || 0;
|
|
520
|
+
const endIndex = transformer.getEndIndex ? transformer.getEndIndex(textNode, match) : startIndex + match[0].length;
|
|
521
|
+
if (endIndex === false) {
|
|
522
|
+
continue;
|
|
523
|
+
}
|
|
524
|
+
if (foundMatchStartIndex === undefined || foundMatchEndIndex === undefined ||
|
|
525
|
+
// Wraps previous match or is strictly before it.
|
|
526
|
+
startIndex < foundMatchStartIndex && (endIndex > foundMatchEndIndex || endIndex <= foundMatchStartIndex)) {
|
|
527
|
+
foundMatchStartIndex = startIndex;
|
|
528
|
+
foundMatchEndIndex = endIndex;
|
|
529
|
+
foundMatchTransformer = transformer;
|
|
530
|
+
foundMatch = match;
|
|
531
|
+
}
|
|
532
|
+
}
|
|
533
|
+
if (foundMatchStartIndex === undefined || foundMatchEndIndex === undefined || foundMatchTransformer === undefined || foundMatch === undefined) {
|
|
534
|
+
return null;
|
|
535
|
+
}
|
|
536
|
+
return {
|
|
537
|
+
endIndex: foundMatchEndIndex,
|
|
538
|
+
match: foundMatch,
|
|
539
|
+
startIndex: foundMatchStartIndex,
|
|
540
|
+
transformer: foundMatchTransformer
|
|
541
|
+
};
|
|
542
|
+
}
|
|
543
|
+
function importFoundTextMatchTransformer(textNode, startIndex, endIndex, transformer, match) {
|
|
544
|
+
let transformedNode, nodeAfter, nodeBefore;
|
|
545
|
+
if (startIndex === 0) {
|
|
546
|
+
[transformedNode, nodeAfter] = textNode.splitText(endIndex);
|
|
547
|
+
} else {
|
|
548
|
+
[nodeBefore, transformedNode, nodeAfter] = textNode.splitText(startIndex, endIndex);
|
|
549
|
+
}
|
|
550
|
+
if (!transformer.replace) {
|
|
551
|
+
return null;
|
|
552
|
+
}
|
|
553
|
+
const potentialTransformedNode = transformer.replace(transformedNode, match);
|
|
554
|
+
return {
|
|
555
|
+
nodeAfter,
|
|
556
|
+
nodeBefore,
|
|
557
|
+
transformedNode: potentialTransformedNode || undefined
|
|
558
|
+
};
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
/**
|
|
562
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
563
|
+
*
|
|
564
|
+
* This source code is licensed under the MIT license found in the
|
|
565
|
+
* LICENSE file in the root directory of this source tree.
|
|
566
|
+
*
|
|
567
|
+
*/
|
|
568
|
+
|
|
569
|
+
|
|
570
|
+
/**
|
|
571
|
+
* Returns true if the node can contain transformable markdown.
|
|
572
|
+
* Code nodes cannot contain transformable markdown.
|
|
573
|
+
* For example, `code **bold**` should not be transformed to
|
|
574
|
+
* <code>code <strong>bold</strong></code>.
|
|
575
|
+
*/
|
|
576
|
+
function canContainTransformableMarkdown(node) {
|
|
577
|
+
return lexical.$isTextNode(node) && !node.hasFormat('code');
|
|
578
|
+
}
|
|
579
|
+
|
|
580
|
+
/**
|
|
581
|
+
* Handles applying both text format and text match transformers.
|
|
582
|
+
* It finds the outermost text format or text match and applies it,
|
|
583
|
+
* then recursively calls itself to apply the next outermost transformer,
|
|
584
|
+
* until there are no more transformers to apply.
|
|
585
|
+
*/
|
|
586
|
+
function importTextTransformers(textNode, textFormatTransformersIndex, textMatchTransformers) {
|
|
587
|
+
let foundTextFormat = findOutermostTextFormatTransformer(textNode, textFormatTransformersIndex);
|
|
588
|
+
let foundTextMatch = findOutermostTextMatchTransformer(textNode, textMatchTransformers);
|
|
589
|
+
if (foundTextFormat && foundTextMatch) {
|
|
590
|
+
// Find the outermost transformer
|
|
591
|
+
if (foundTextFormat.startIndex <= foundTextMatch.startIndex && foundTextFormat.endIndex >= foundTextMatch.endIndex ||
|
|
592
|
+
// foundTextMatch is not contained within foundTextFormat
|
|
593
|
+
foundTextMatch.startIndex > foundTextFormat.endIndex) {
|
|
594
|
+
// foundTextFormat wraps foundTextMatch - apply foundTextFormat by setting foundTextMatch to null
|
|
595
|
+
foundTextMatch = null;
|
|
596
|
+
} else {
|
|
597
|
+
// foundTextMatch wraps foundTextFormat - apply foundTextMatch by setting foundTextFormat to null
|
|
598
|
+
foundTextFormat = null;
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
if (foundTextFormat) {
|
|
602
|
+
const result = importTextFormatTransformer(textNode, foundTextFormat.startIndex, foundTextFormat.endIndex, foundTextFormat.transformer, foundTextFormat.match);
|
|
603
|
+
if (canContainTransformableMarkdown(result.nodeAfter)) {
|
|
604
|
+
importTextTransformers(result.nodeAfter, textFormatTransformersIndex, textMatchTransformers);
|
|
605
|
+
}
|
|
606
|
+
if (canContainTransformableMarkdown(result.nodeBefore)) {
|
|
607
|
+
importTextTransformers(result.nodeBefore, textFormatTransformersIndex, textMatchTransformers);
|
|
608
|
+
}
|
|
609
|
+
if (canContainTransformableMarkdown(result.transformedNode)) {
|
|
610
|
+
importTextTransformers(result.transformedNode, textFormatTransformersIndex, textMatchTransformers);
|
|
611
|
+
}
|
|
612
|
+
} else if (foundTextMatch) {
|
|
613
|
+
const result = importFoundTextMatchTransformer(textNode, foundTextMatch.startIndex, foundTextMatch.endIndex, foundTextMatch.transformer, foundTextMatch.match);
|
|
614
|
+
if (!result) {
|
|
615
|
+
return;
|
|
616
|
+
}
|
|
617
|
+
if (canContainTransformableMarkdown(result.nodeAfter)) {
|
|
618
|
+
importTextTransformers(result.nodeAfter, textFormatTransformersIndex, textMatchTransformers);
|
|
619
|
+
}
|
|
620
|
+
if (canContainTransformableMarkdown(result.nodeBefore)) {
|
|
621
|
+
importTextTransformers(result.nodeBefore, textFormatTransformersIndex, textMatchTransformers);
|
|
622
|
+
}
|
|
623
|
+
if (canContainTransformableMarkdown(result.transformedNode)) {
|
|
624
|
+
importTextTransformers(result.transformedNode, textFormatTransformersIndex, textMatchTransformers);
|
|
625
|
+
}
|
|
626
|
+
}
|
|
627
|
+
|
|
628
|
+
// Handle escape characters
|
|
629
|
+
const textContent = textNode.getTextContent();
|
|
630
|
+
const escapedText = textContent.replace(/\\([*_`~\\])/g, '$1').replace(/&#(\d+);/g, (_, codePoint) => {
|
|
631
|
+
return String.fromCodePoint(codePoint);
|
|
632
|
+
});
|
|
633
|
+
textNode.setTextContent(escapedText);
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
/**
|
|
637
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
638
|
+
*
|
|
639
|
+
* This source code is licensed under the MIT license found in the
|
|
640
|
+
* LICENSE file in the root directory of this source tree.
|
|
641
|
+
*
|
|
642
|
+
*/
|
|
643
|
+
|
|
644
|
+
/**
|
|
645
|
+
* Renders markdown from a string. The selection is moved to the start after the operation.
|
|
646
|
+
*/
|
|
647
|
+
function createMarkdownImport(transformers, shouldPreserveNewLines = false) {
|
|
648
|
+
const byType = transformersByType(transformers);
|
|
649
|
+
const textFormatTransformersIndex = createTextFormatTransformersIndex(byType.textFormat);
|
|
650
|
+
return (markdownString, node) => {
|
|
651
|
+
const lines = markdownString.split('\n');
|
|
652
|
+
const linesLength = lines.length;
|
|
653
|
+
const root = node || lexical.$getRoot();
|
|
654
|
+
root.clear();
|
|
655
|
+
for (let i = 0; i < linesLength; i++) {
|
|
656
|
+
const lineText = lines[i];
|
|
657
|
+
const [imported, shiftedIndex] = $importMultiline(lines, i, byType.multilineElement, root);
|
|
658
|
+
if (imported) {
|
|
659
|
+
// If a multiline markdown element was imported, we don't want to process the lines that were part of it anymore.
|
|
660
|
+
// There could be other sub-markdown elements (both multiline and normal ones) matching within this matched multiline element's children.
|
|
661
|
+
// However, it would be the responsibility of the matched multiline transformer to decide how it wants to handle them.
|
|
662
|
+
// We cannot handle those, as there is no way for us to know how to maintain the correct order of generated lexical nodes for possible children.
|
|
663
|
+
i = shiftedIndex; // Next loop will start from the line after the last line of the multiline element
|
|
664
|
+
continue;
|
|
665
|
+
}
|
|
666
|
+
$importBlocks(lineText, root, byType.element, textFormatTransformersIndex, byType.textMatch, shouldPreserveNewLines);
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
// By default, removing empty paragraphs as md does not really
|
|
670
|
+
// allow empty lines and uses them as delimiter.
|
|
671
|
+
// If you need empty lines set shouldPreserveNewLines = true.
|
|
672
|
+
const children = root.getChildren();
|
|
673
|
+
for (const child of children) {
|
|
674
|
+
if (!shouldPreserveNewLines && isEmptyParagraph(child) && root.getChildrenSize() > 1) {
|
|
675
|
+
child.remove();
|
|
676
|
+
}
|
|
677
|
+
}
|
|
678
|
+
if (lexical.$getSelection() !== null) {
|
|
679
|
+
root.selectStart();
|
|
680
|
+
}
|
|
681
|
+
};
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
/**
|
|
685
|
+
*
|
|
686
|
+
* @returns first element of the returned tuple is a boolean indicating if a multiline element was imported. The second element is the index of the last line that was processed.
|
|
687
|
+
*/
|
|
688
|
+
function $importMultiline(lines, startLineIndex, multilineElementTransformers, rootNode) {
|
|
689
|
+
for (const transformer of multilineElementTransformers) {
|
|
690
|
+
const {
|
|
691
|
+
handleImportAfterStartMatch,
|
|
692
|
+
regExpEnd,
|
|
693
|
+
regExpStart,
|
|
694
|
+
replace
|
|
695
|
+
} = transformer;
|
|
696
|
+
const startMatch = lines[startLineIndex].match(regExpStart);
|
|
697
|
+
if (!startMatch) {
|
|
698
|
+
continue; // Try next transformer
|
|
699
|
+
}
|
|
700
|
+
if (handleImportAfterStartMatch) {
|
|
701
|
+
const result = handleImportAfterStartMatch({
|
|
702
|
+
lines,
|
|
703
|
+
rootNode,
|
|
704
|
+
startLineIndex,
|
|
705
|
+
startMatch,
|
|
706
|
+
transformer
|
|
707
|
+
});
|
|
708
|
+
if (result === null) {
|
|
709
|
+
continue;
|
|
710
|
+
} else if (result) {
|
|
711
|
+
return result;
|
|
712
|
+
}
|
|
713
|
+
}
|
|
714
|
+
const regexpEndRegex = typeof regExpEnd === 'object' && 'regExp' in regExpEnd ? regExpEnd.regExp : regExpEnd;
|
|
715
|
+
const isEndOptional = regExpEnd && typeof regExpEnd === 'object' && 'optional' in regExpEnd ? regExpEnd.optional : !regExpEnd;
|
|
716
|
+
let endLineIndex = startLineIndex;
|
|
717
|
+
const linesLength = lines.length;
|
|
718
|
+
|
|
719
|
+
// check every single line for the closing match. It could also be on the same line as the opening match.
|
|
720
|
+
while (endLineIndex < linesLength) {
|
|
721
|
+
const endMatch = regexpEndRegex ? lines[endLineIndex].match(regexpEndRegex) : null;
|
|
722
|
+
if (!endMatch) {
|
|
723
|
+
if (!isEndOptional || isEndOptional && endLineIndex < linesLength - 1 // Optional end, but didn't reach the end of the document yet => continue searching for potential closing match
|
|
724
|
+
) {
|
|
725
|
+
endLineIndex++;
|
|
726
|
+
continue; // Search next line for closing match
|
|
727
|
+
}
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
// Now, check if the closing match matched is the same as the opening match.
|
|
731
|
+
// If it is, we need to continue searching for the actual closing match.
|
|
732
|
+
if (endMatch && startLineIndex === endLineIndex && endMatch.index === startMatch.index) {
|
|
733
|
+
endLineIndex++;
|
|
734
|
+
continue; // Search next line for closing match
|
|
735
|
+
}
|
|
736
|
+
|
|
737
|
+
// At this point, we have found the closing match. Next: calculate the lines in between open and closing match
|
|
738
|
+
// This should not include the matches themselves, and be split up by lines
|
|
739
|
+
const linesInBetween = [];
|
|
740
|
+
if (endMatch && startLineIndex === endLineIndex) {
|
|
741
|
+
linesInBetween.push(lines[startLineIndex].slice(startMatch[0].length, -endMatch[0].length));
|
|
742
|
+
} else {
|
|
743
|
+
for (let i = startLineIndex; i <= endLineIndex; i++) {
|
|
744
|
+
if (i === startLineIndex) {
|
|
745
|
+
const text = lines[i].slice(startMatch[0].length);
|
|
746
|
+
linesInBetween.push(text); // Also include empty text
|
|
747
|
+
} else if (i === endLineIndex && endMatch) {
|
|
748
|
+
const text = lines[i].slice(0, -endMatch[0].length);
|
|
749
|
+
linesInBetween.push(text); // Also include empty text
|
|
750
|
+
} else {
|
|
751
|
+
linesInBetween.push(lines[i]);
|
|
752
|
+
}
|
|
753
|
+
}
|
|
754
|
+
}
|
|
755
|
+
if (replace(rootNode, null, startMatch, endMatch, linesInBetween, true) !== false) {
|
|
756
|
+
// Return here. This $importMultiline function is run line by line and should only process a single multiline element at a time.
|
|
757
|
+
return [true, endLineIndex];
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
// The replace function returned false, despite finding the matching open and close tags => this transformer does not want to handle it.
|
|
761
|
+
// Thus, we continue letting the remaining transformers handle the passed lines of text from the beginning
|
|
762
|
+
break;
|
|
763
|
+
}
|
|
764
|
+
}
|
|
765
|
+
|
|
766
|
+
// No multiline transformer handled this line successfully
|
|
767
|
+
return [false, startLineIndex];
|
|
768
|
+
}
|
|
769
|
+
function $importBlocks(lineText, rootNode, elementTransformers, textFormatTransformersIndex, textMatchTransformers, shouldPreserveNewLines) {
|
|
770
|
+
const textNode = lexical.$createTextNode(lineText);
|
|
771
|
+
const elementNode = lexical.$createParagraphNode();
|
|
772
|
+
elementNode.append(textNode);
|
|
773
|
+
rootNode.append(elementNode);
|
|
774
|
+
for (const {
|
|
775
|
+
regExp,
|
|
776
|
+
replace
|
|
777
|
+
} of elementTransformers) {
|
|
778
|
+
const match = lineText.match(regExp);
|
|
779
|
+
if (match) {
|
|
780
|
+
textNode.setTextContent(lineText.slice(match[0].length));
|
|
781
|
+
if (replace(elementNode, [textNode], match, true) !== false) {
|
|
782
|
+
break;
|
|
783
|
+
}
|
|
784
|
+
}
|
|
785
|
+
}
|
|
786
|
+
importTextTransformers(textNode, textFormatTransformersIndex, textMatchTransformers);
|
|
787
|
+
|
|
788
|
+
// If no transformer found and we left with original paragraph node
|
|
789
|
+
// can check if its content can be appended to the previous node
|
|
790
|
+
// if it's a paragraph, quote or list
|
|
791
|
+
if (elementNode.isAttached() && lineText.length > 0) {
|
|
792
|
+
const previousNode = elementNode.getPreviousSibling();
|
|
793
|
+
if (!shouldPreserveNewLines && (
|
|
794
|
+
// Only append if we're not preserving newlines
|
|
795
|
+
lexical.$isParagraphNode(previousNode) || lexicalRichText.$isQuoteNode(previousNode) || lexicalList.$isListNode(previousNode))) {
|
|
796
|
+
let targetNode = previousNode;
|
|
797
|
+
if (lexicalList.$isListNode(previousNode)) {
|
|
798
|
+
const lastDescendant = previousNode.getLastDescendant();
|
|
799
|
+
if (lastDescendant == null) {
|
|
800
|
+
targetNode = null;
|
|
801
|
+
} else {
|
|
802
|
+
targetNode = lexicalUtils.$findMatchingParent(lastDescendant, lexicalList.$isListItemNode);
|
|
803
|
+
}
|
|
804
|
+
}
|
|
805
|
+
if (targetNode != null && targetNode.getTextContentSize() > 0) {
|
|
806
|
+
targetNode.splice(targetNode.getChildrenSize(), 0, [lexical.$createLineBreakNode(), ...elementNode.getChildren()]);
|
|
807
|
+
elementNode.remove();
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
}
|
|
811
|
+
}
|
|
812
|
+
function createTextFormatTransformersIndex(textTransformers) {
|
|
813
|
+
const transformersByTag = {};
|
|
814
|
+
const fullMatchRegExpByTag = {};
|
|
815
|
+
const openTagsRegExp = [];
|
|
816
|
+
const escapeRegExp = `(?<![\\\\])`;
|
|
817
|
+
for (const transformer of textTransformers) {
|
|
818
|
+
const {
|
|
819
|
+
tag
|
|
820
|
+
} = transformer;
|
|
821
|
+
transformersByTag[tag] = transformer;
|
|
822
|
+
const tagRegExp = tag.replace(/(\*|\^|\+)/g, '\\$1');
|
|
823
|
+
openTagsRegExp.push(tagRegExp);
|
|
824
|
+
|
|
825
|
+
// Single-char tag (e.g. "*"),
|
|
826
|
+
if (tag.length === 1) {
|
|
827
|
+
if (tag === '`') {
|
|
828
|
+
// Special handling for backticks - match content with escaped backticks
|
|
829
|
+
fullMatchRegExpByTag[tag] = new RegExp(`(?<![\\\\\`])(\`)((?:\\\\\`|[^\`])+?)(\`)(?!\`)`);
|
|
830
|
+
} else {
|
|
831
|
+
fullMatchRegExpByTag[tag] = new RegExp(`(?<![\\\\${tagRegExp}])(${tagRegExp})((\\\\${tagRegExp})?.*?[^${tagRegExp}\\s](\\\\${tagRegExp})?)((?<!\\\\)|(?<=\\\\\\\\))(${tagRegExp})(?![\\\\${tagRegExp}])`);
|
|
832
|
+
}
|
|
833
|
+
} else {
|
|
834
|
+
// Multi‐char tags (e.g. "**")
|
|
835
|
+
fullMatchRegExpByTag[tag] = new RegExp(`(?<!\\\\)(${tagRegExp})((\\\\${tagRegExp})?.*?[^\\s](\\\\${tagRegExp})?)((?<!\\\\)|(?<=\\\\\\\\))(${tagRegExp})(?!\\\\)`);
|
|
836
|
+
}
|
|
837
|
+
}
|
|
838
|
+
return {
|
|
839
|
+
// Reg exp to find open tag + content + close tag
|
|
840
|
+
fullMatchRegExpByTag,
|
|
841
|
+
// Regexp to locate *any* potential opening tag (longest first).
|
|
842
|
+
openTagsRegExp: new RegExp(`${escapeRegExp}(${openTagsRegExp.join('|')})`, 'g'),
|
|
843
|
+
transformersByTag
|
|
844
|
+
};
|
|
845
|
+
}
|
|
846
|
+
|
|
847
|
+
/**
|
|
848
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
849
|
+
*
|
|
850
|
+
* This source code is licensed under the MIT license found in the
|
|
851
|
+
* LICENSE file in the root directory of this source tree.
|
|
852
|
+
*
|
|
853
|
+
*/
|
|
854
|
+
|
|
855
|
+
// Do not require this module directly! Use normal `invariant` calls.
|
|
856
|
+
|
|
857
|
+
function formatDevErrorMessage(message) {
|
|
858
|
+
throw new Error(message);
|
|
859
|
+
}
|
|
860
|
+
|
|
861
|
+
/**
|
|
862
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
863
|
+
*
|
|
864
|
+
* This source code is licensed under the MIT license found in the
|
|
865
|
+
* LICENSE file in the root directory of this source tree.
|
|
866
|
+
*
|
|
867
|
+
*/
|
|
868
|
+
|
|
869
|
+
const ORDERED_LIST_REGEX = /^(\s*)(\d{1,})\.\s/;
|
|
870
|
+
const UNORDERED_LIST_REGEX = /^(\s*)[-*+]\s/;
|
|
871
|
+
const CHECK_LIST_REGEX = /^(\s*)(?:[-*+]\s)?\s?(\[(\s|x)?\])\s/i;
|
|
872
|
+
const HEADING_REGEX = /^(#{1,6})\s/;
|
|
873
|
+
const QUOTE_REGEX = /^>\s/;
|
|
874
|
+
const CODE_START_REGEX = /^([ \t]*`{3,})([\w-]+)?[ \t]?/;
|
|
875
|
+
const CODE_END_REGEX = /^[ \t]*`{3,}$/;
|
|
876
|
+
const CODE_SINGLE_LINE_REGEX = /^[ \t]*```[^`]+(?:(?:`{1,2}|`{4,})[^`]+)*```(?:[^`]|$)/;
|
|
877
|
+
const TABLE_ROW_REG_EXP = /^(?:\|)(.+)(?:\|)\s?$/;
|
|
878
|
+
const TABLE_ROW_DIVIDER_REG_EXP = /^(\| ?:?-*:? ?)+\|\s?$/;
|
|
879
|
+
const TAG_START_REGEX = /^<[a-z_][\w-]*(?:\s[^<>]*)?\/?>/i;
|
|
880
|
+
const TAG_END_REGEX = /^<\/[a-z_][\w-]*\s*>/i;
|
|
881
|
+
const ENDS_WITH = regex => new RegExp(`(?:${regex.source})$`, regex.flags);
|
|
882
|
+
const listMarkerState = lexical.createState('mdListMarker', {
|
|
883
|
+
parse: v => typeof v === 'string' && /^[-*+]$/.test(v) ? v : '-'
|
|
884
|
+
});
|
|
885
|
+
const codeFenceState = lexical.createState('mdCodeFence', {
|
|
886
|
+
parse: val => {
|
|
887
|
+
if (typeof val === 'string' && /^`{3,}$/.test(val)) {
|
|
888
|
+
return val;
|
|
889
|
+
}
|
|
890
|
+
return '```';
|
|
891
|
+
}
|
|
892
|
+
});
|
|
893
|
+
const createBlockNode = createNode => {
|
|
894
|
+
return (parentNode, children, match, isImport) => {
|
|
895
|
+
const node = createNode(match);
|
|
896
|
+
node.append(...children);
|
|
897
|
+
parentNode.replace(node);
|
|
898
|
+
if (!isImport) {
|
|
899
|
+
node.select(0, 0);
|
|
900
|
+
}
|
|
901
|
+
};
|
|
902
|
+
};
|
|
903
|
+
|
|
904
|
+
// Amount of spaces that define indentation level
|
|
905
|
+
// TODO: should be an option
|
|
906
|
+
const LIST_INDENT_SIZE = 4;
|
|
907
|
+
function getIndent(whitespaces) {
|
|
908
|
+
const tabs = whitespaces.match(/\t/g);
|
|
909
|
+
const spaces = whitespaces.match(/ /g);
|
|
910
|
+
let indent = 0;
|
|
911
|
+
if (tabs) {
|
|
912
|
+
indent += tabs.length;
|
|
913
|
+
}
|
|
914
|
+
if (spaces) {
|
|
915
|
+
indent += Math.floor(spaces.length / LIST_INDENT_SIZE);
|
|
916
|
+
}
|
|
917
|
+
return indent;
|
|
918
|
+
}
|
|
919
|
+
const listReplace = listType => {
|
|
920
|
+
return (parentNode, children, match, isImport) => {
|
|
921
|
+
const previousNode = parentNode.getPreviousSibling();
|
|
922
|
+
const nextNode = parentNode.getNextSibling();
|
|
923
|
+
const listItem = lexicalList.$createListItemNode(listType === 'check' ? match[3] === 'x' : undefined);
|
|
924
|
+
const firstMatchChar = match[0].trim()[0];
|
|
925
|
+
const listMarker = (listType === 'bullet' || listType === 'check') && firstMatchChar === listMarkerState.parse(firstMatchChar) ? firstMatchChar : undefined;
|
|
926
|
+
if (lexicalList.$isListNode(nextNode) && nextNode.getListType() === listType) {
|
|
927
|
+
if (listMarker) {
|
|
928
|
+
lexical.$setState(nextNode, listMarkerState, listMarker);
|
|
929
|
+
}
|
|
930
|
+
const firstChild = nextNode.getFirstChild();
|
|
931
|
+
if (firstChild !== null) {
|
|
932
|
+
firstChild.insertBefore(listItem);
|
|
933
|
+
} else {
|
|
934
|
+
// should never happen, but let's handle gracefully, just in case.
|
|
935
|
+
nextNode.append(listItem);
|
|
936
|
+
}
|
|
937
|
+
parentNode.remove();
|
|
938
|
+
} else if (lexicalList.$isListNode(previousNode) && previousNode.getListType() === listType) {
|
|
939
|
+
if (listMarker) {
|
|
940
|
+
lexical.$setState(previousNode, listMarkerState, listMarker);
|
|
941
|
+
}
|
|
942
|
+
previousNode.append(listItem);
|
|
943
|
+
parentNode.remove();
|
|
944
|
+
} else {
|
|
945
|
+
const list = lexicalList.$createListNode(listType, listType === 'number' ? Number(match[2]) : undefined);
|
|
946
|
+
if (listMarker) {
|
|
947
|
+
lexical.$setState(list, listMarkerState, listMarker);
|
|
948
|
+
}
|
|
949
|
+
list.append(listItem);
|
|
950
|
+
parentNode.replace(list);
|
|
951
|
+
}
|
|
952
|
+
listItem.append(...children);
|
|
953
|
+
if (!isImport) {
|
|
954
|
+
listItem.select(0, 0);
|
|
955
|
+
}
|
|
956
|
+
const indent = getIndent(match[1]);
|
|
957
|
+
if (indent) {
|
|
958
|
+
listItem.setIndent(indent);
|
|
959
|
+
}
|
|
960
|
+
};
|
|
961
|
+
};
|
|
962
|
+
const $listExport = (listNode, exportChildren, depth) => {
|
|
963
|
+
const output = [];
|
|
964
|
+
const children = listNode.getChildren();
|
|
965
|
+
let index = 0;
|
|
966
|
+
for (const listItemNode of children) {
|
|
967
|
+
if (lexicalList.$isListItemNode(listItemNode)) {
|
|
968
|
+
if (listItemNode.getChildrenSize() === 1) {
|
|
969
|
+
const firstChild = listItemNode.getFirstChild();
|
|
970
|
+
if (lexicalList.$isListNode(firstChild)) {
|
|
971
|
+
output.push($listExport(firstChild, exportChildren, depth + 1));
|
|
972
|
+
continue;
|
|
973
|
+
}
|
|
974
|
+
}
|
|
975
|
+
const indent = ' '.repeat(depth * LIST_INDENT_SIZE);
|
|
976
|
+
const listType = listNode.getListType();
|
|
977
|
+
const listMarker = lexical.$getState(listNode, listMarkerState);
|
|
978
|
+
const prefix = listType === 'number' ? `${listNode.getStart() + index}. ` : listType === 'check' ? `${listMarker} [${listItemNode.getChecked() ? 'x' : ' '}] ` : listMarker + ' ';
|
|
979
|
+
output.push(indent + prefix + exportChildren(listItemNode));
|
|
980
|
+
index++;
|
|
981
|
+
}
|
|
982
|
+
}
|
|
983
|
+
return output.join('\n');
|
|
984
|
+
};
|
|
985
|
+
const HEADING = {
|
|
986
|
+
dependencies: [lexicalRichText.HeadingNode],
|
|
987
|
+
export: (node, exportChildren) => {
|
|
988
|
+
if (!lexicalRichText.$isHeadingNode(node)) {
|
|
989
|
+
return null;
|
|
990
|
+
}
|
|
991
|
+
const level = Number(node.getTag().slice(1));
|
|
992
|
+
return '#'.repeat(level) + ' ' + exportChildren(node);
|
|
993
|
+
},
|
|
994
|
+
regExp: HEADING_REGEX,
|
|
995
|
+
replace: createBlockNode(match => {
|
|
996
|
+
const tag = 'h' + match[1].length;
|
|
997
|
+
return lexicalRichText.$createHeadingNode(tag);
|
|
998
|
+
}),
|
|
999
|
+
type: 'element'
|
|
1000
|
+
};
|
|
1001
|
+
const QUOTE = {
|
|
1002
|
+
dependencies: [lexicalRichText.QuoteNode],
|
|
1003
|
+
export: (node, exportChildren) => {
|
|
1004
|
+
if (!lexicalRichText.$isQuoteNode(node)) {
|
|
1005
|
+
return null;
|
|
1006
|
+
}
|
|
1007
|
+
const lines = exportChildren(node).split('\n');
|
|
1008
|
+
const output = [];
|
|
1009
|
+
for (const line of lines) {
|
|
1010
|
+
output.push('> ' + line);
|
|
1011
|
+
}
|
|
1012
|
+
return output.join('\n');
|
|
1013
|
+
},
|
|
1014
|
+
regExp: QUOTE_REGEX,
|
|
1015
|
+
replace: (parentNode, children, _match, isImport) => {
|
|
1016
|
+
if (isImport) {
|
|
1017
|
+
const previousNode = parentNode.getPreviousSibling();
|
|
1018
|
+
if (lexicalRichText.$isQuoteNode(previousNode)) {
|
|
1019
|
+
previousNode.splice(previousNode.getChildrenSize(), 0, [lexical.$createLineBreakNode(), ...children]);
|
|
1020
|
+
parentNode.remove();
|
|
1021
|
+
return;
|
|
1022
|
+
}
|
|
1023
|
+
}
|
|
1024
|
+
const node = lexicalRichText.$createQuoteNode();
|
|
1025
|
+
node.append(...children);
|
|
1026
|
+
parentNode.replace(node);
|
|
1027
|
+
if (!isImport) {
|
|
1028
|
+
node.select(0, 0);
|
|
1029
|
+
}
|
|
1030
|
+
},
|
|
1031
|
+
type: 'element'
|
|
1032
|
+
};
|
|
1033
|
+
const CODE = {
|
|
1034
|
+
dependencies: [lexicalCode.CodeNode],
|
|
1035
|
+
export: node => {
|
|
1036
|
+
if (!lexicalCode.$isCodeNode(node)) {
|
|
1037
|
+
return null;
|
|
1038
|
+
}
|
|
1039
|
+
const textContent = node.getTextContent();
|
|
1040
|
+
let fence = lexical.$getState(node, codeFenceState);
|
|
1041
|
+
if (textContent.indexOf(fence) > -1) {
|
|
1042
|
+
const backticks = textContent.match(/`{3,}/g);
|
|
1043
|
+
if (backticks) {
|
|
1044
|
+
const maxLength = Math.max(...backticks.map(b => b.length));
|
|
1045
|
+
fence = '`'.repeat(maxLength + 1);
|
|
1046
|
+
}
|
|
1047
|
+
}
|
|
1048
|
+
return fence + (node.getLanguage() || '') + (textContent ? '\n' + textContent : '') + '\n' + fence;
|
|
1049
|
+
},
|
|
1050
|
+
handleImportAfterStartMatch: ({
|
|
1051
|
+
lines,
|
|
1052
|
+
rootNode,
|
|
1053
|
+
startLineIndex,
|
|
1054
|
+
startMatch
|
|
1055
|
+
}) => {
|
|
1056
|
+
const fence = startMatch[1];
|
|
1057
|
+
const fenceLength = fence.trim().length;
|
|
1058
|
+
const currentLine = lines[startLineIndex];
|
|
1059
|
+
const afterFenceIndex = startMatch.index + fence.length;
|
|
1060
|
+
const afterFence = currentLine.slice(afterFenceIndex);
|
|
1061
|
+
const singleLineEndRegex = new RegExp(`\`{${fenceLength},}$`);
|
|
1062
|
+
if (singleLineEndRegex.test(afterFence)) {
|
|
1063
|
+
const endMatch = afterFence.match(singleLineEndRegex);
|
|
1064
|
+
const content = afterFence.slice(0, afterFence.lastIndexOf(endMatch[0]));
|
|
1065
|
+
const fakeStartMatch = [...startMatch];
|
|
1066
|
+
fakeStartMatch[2] = '';
|
|
1067
|
+
CODE.replace(rootNode, null, fakeStartMatch, endMatch, [content], true);
|
|
1068
|
+
return [true, startLineIndex];
|
|
1069
|
+
}
|
|
1070
|
+
const multilineEndRegex = new RegExp(`^[ \\t]*\`{${fenceLength},}$`);
|
|
1071
|
+
for (let i = startLineIndex + 1; i < lines.length; i++) {
|
|
1072
|
+
const line = lines[i];
|
|
1073
|
+
if (multilineEndRegex.test(line)) {
|
|
1074
|
+
const endMatch = line.match(multilineEndRegex);
|
|
1075
|
+
const linesInBetween = lines.slice(startLineIndex + 1, i);
|
|
1076
|
+
const afterFullMatch = currentLine.slice(startMatch[0].length);
|
|
1077
|
+
if (afterFullMatch.length > 0) {
|
|
1078
|
+
linesInBetween.unshift(afterFullMatch);
|
|
1079
|
+
}
|
|
1080
|
+
CODE.replace(rootNode, null, startMatch, endMatch, linesInBetween, true);
|
|
1081
|
+
return [true, i];
|
|
1082
|
+
}
|
|
1083
|
+
}
|
|
1084
|
+
const linesInBetween = lines.slice(startLineIndex + 1);
|
|
1085
|
+
const afterFullMatch = currentLine.slice(startMatch[0].length);
|
|
1086
|
+
if (afterFullMatch.length > 0) {
|
|
1087
|
+
linesInBetween.unshift(afterFullMatch);
|
|
1088
|
+
}
|
|
1089
|
+
CODE.replace(rootNode, null, startMatch, null, linesInBetween, true);
|
|
1090
|
+
return [true, lines.length - 1];
|
|
1091
|
+
},
|
|
1092
|
+
regExpEnd: {
|
|
1093
|
+
optional: true,
|
|
1094
|
+
regExp: CODE_END_REGEX
|
|
1095
|
+
},
|
|
1096
|
+
regExpStart: CODE_START_REGEX,
|
|
1097
|
+
replace: (rootNode, children, startMatch, endMatch, linesInBetween, isImport) => {
|
|
1098
|
+
let codeBlockNode;
|
|
1099
|
+
let code;
|
|
1100
|
+
const fence = startMatch[1] ? startMatch[1].trim() : '```';
|
|
1101
|
+
const language = startMatch[2] || undefined;
|
|
1102
|
+
if (!children && linesInBetween) {
|
|
1103
|
+
if (linesInBetween.length === 1) {
|
|
1104
|
+
if (endMatch) {
|
|
1105
|
+
codeBlockNode = lexicalCode.$createCodeNode(language);
|
|
1106
|
+
code = linesInBetween[0];
|
|
1107
|
+
} else {
|
|
1108
|
+
codeBlockNode = lexicalCode.$createCodeNode(language);
|
|
1109
|
+
code = linesInBetween[0].startsWith(' ') ? linesInBetween[0].slice(1) : linesInBetween[0];
|
|
1110
|
+
}
|
|
1111
|
+
} else {
|
|
1112
|
+
codeBlockNode = lexicalCode.$createCodeNode(language);
|
|
1113
|
+
if (linesInBetween.length > 0) {
|
|
1114
|
+
if (linesInBetween[0].trim().length === 0) {
|
|
1115
|
+
linesInBetween.shift();
|
|
1116
|
+
} else if (linesInBetween[0].startsWith(' ')) {
|
|
1117
|
+
linesInBetween[0] = linesInBetween[0].slice(1);
|
|
1118
|
+
}
|
|
1119
|
+
}
|
|
1120
|
+
while (linesInBetween.length > 0 && !linesInBetween[linesInBetween.length - 1].length) {
|
|
1121
|
+
linesInBetween.pop();
|
|
1122
|
+
}
|
|
1123
|
+
code = linesInBetween.join('\n');
|
|
1124
|
+
}
|
|
1125
|
+
lexical.$setState(codeBlockNode, codeFenceState, fence);
|
|
1126
|
+
const textNode = lexical.$createTextNode(code);
|
|
1127
|
+
codeBlockNode.append(textNode);
|
|
1128
|
+
rootNode.append(codeBlockNode);
|
|
1129
|
+
} else if (children) {
|
|
1130
|
+
createBlockNode(match => {
|
|
1131
|
+
return lexicalCode.$createCodeNode(match ? match[2] : undefined);
|
|
1132
|
+
})(rootNode, children, startMatch, isImport);
|
|
1133
|
+
}
|
|
1134
|
+
},
|
|
1135
|
+
type: 'multiline-element'
|
|
1136
|
+
};
|
|
1137
|
+
const UNORDERED_LIST = {
|
|
1138
|
+
dependencies: [lexicalList.ListNode, lexicalList.ListItemNode],
|
|
1139
|
+
export: (node, exportChildren) => {
|
|
1140
|
+
return lexicalList.$isListNode(node) ? $listExport(node, exportChildren, 0) : null;
|
|
1141
|
+
},
|
|
1142
|
+
regExp: UNORDERED_LIST_REGEX,
|
|
1143
|
+
replace: listReplace('bullet'),
|
|
1144
|
+
type: 'element'
|
|
1145
|
+
};
|
|
1146
|
+
const CHECK_LIST = {
|
|
1147
|
+
dependencies: [lexicalList.ListNode, lexicalList.ListItemNode],
|
|
1148
|
+
export: (node, exportChildren) => {
|
|
1149
|
+
return lexicalList.$isListNode(node) ? $listExport(node, exportChildren, 0) : null;
|
|
1150
|
+
},
|
|
1151
|
+
regExp: CHECK_LIST_REGEX,
|
|
1152
|
+
replace: listReplace('check'),
|
|
1153
|
+
type: 'element'
|
|
1154
|
+
};
|
|
1155
|
+
const ORDERED_LIST = {
|
|
1156
|
+
dependencies: [lexicalList.ListNode, lexicalList.ListItemNode],
|
|
1157
|
+
export: (node, exportChildren) => {
|
|
1158
|
+
return lexicalList.$isListNode(node) ? $listExport(node, exportChildren, 0) : null;
|
|
1159
|
+
},
|
|
1160
|
+
regExp: ORDERED_LIST_REGEX,
|
|
1161
|
+
replace: listReplace('number'),
|
|
1162
|
+
type: 'element'
|
|
1163
|
+
};
|
|
1164
|
+
const INLINE_CODE = {
|
|
1165
|
+
format: ['code'],
|
|
1166
|
+
tag: '`',
|
|
1167
|
+
type: 'text-format'
|
|
1168
|
+
};
|
|
1169
|
+
const HIGHLIGHT = {
|
|
1170
|
+
format: ['highlight'],
|
|
1171
|
+
tag: '==',
|
|
1172
|
+
type: 'text-format'
|
|
1173
|
+
};
|
|
1174
|
+
const BOLD_ITALIC_STAR = {
|
|
1175
|
+
format: ['bold', 'italic'],
|
|
1176
|
+
tag: '***',
|
|
1177
|
+
type: 'text-format'
|
|
1178
|
+
};
|
|
1179
|
+
const BOLD_ITALIC_UNDERSCORE = {
|
|
1180
|
+
format: ['bold', 'italic'],
|
|
1181
|
+
intraword: false,
|
|
1182
|
+
tag: '___',
|
|
1183
|
+
type: 'text-format'
|
|
1184
|
+
};
|
|
1185
|
+
const BOLD_STAR = {
|
|
1186
|
+
format: ['bold'],
|
|
1187
|
+
tag: '**',
|
|
1188
|
+
type: 'text-format'
|
|
1189
|
+
};
|
|
1190
|
+
const BOLD_UNDERSCORE = {
|
|
1191
|
+
format: ['bold'],
|
|
1192
|
+
intraword: false,
|
|
1193
|
+
tag: '__',
|
|
1194
|
+
type: 'text-format'
|
|
1195
|
+
};
|
|
1196
|
+
const STRIKETHROUGH = {
|
|
1197
|
+
format: ['strikethrough'],
|
|
1198
|
+
tag: '~~',
|
|
1199
|
+
type: 'text-format'
|
|
1200
|
+
};
|
|
1201
|
+
const ITALIC_STAR = {
|
|
1202
|
+
format: ['italic'],
|
|
1203
|
+
tag: '*',
|
|
1204
|
+
type: 'text-format'
|
|
1205
|
+
};
|
|
1206
|
+
const ITALIC_UNDERSCORE = {
|
|
1207
|
+
format: ['italic'],
|
|
1208
|
+
intraword: false,
|
|
1209
|
+
tag: '_',
|
|
1210
|
+
type: 'text-format'
|
|
1211
|
+
};
|
|
1212
|
+
|
|
1213
|
+
// Order of text transformers matters:
|
|
1214
|
+
//
|
|
1215
|
+
// - code should go first as it prevents any transformations inside
|
|
1216
|
+
// - then longer tags match (e.g. ** or __ should go before * or _)
|
|
1217
|
+
const LINK = {
|
|
1218
|
+
dependencies: [lexicalLink.LinkNode],
|
|
1219
|
+
export: (node, exportChildren, exportFormat) => {
|
|
1220
|
+
if (!lexicalLink.$isLinkNode(node) || lexicalLink.$isAutoLinkNode(node)) {
|
|
1221
|
+
return null;
|
|
1222
|
+
}
|
|
1223
|
+
const title = node.getTitle();
|
|
1224
|
+
const textContent = exportChildren(node);
|
|
1225
|
+
const linkContent = title ? `[${textContent}](${node.getURL()} "${title}")` : `[${textContent}](${node.getURL()})`;
|
|
1226
|
+
return linkContent;
|
|
1227
|
+
},
|
|
1228
|
+
importRegExp: /(?:\[(.+?)\])(?:\((?:([^()\s]+)(?:\s"((?:[^"]*\\")*[^"]*)"\s*)?)\))/,
|
|
1229
|
+
regExp: /(?:\[(.+?)\])(?:\((?:([^()\s]+)(?:\s"((?:[^"]*\\")*[^"]*)"\s*)?)\))$/,
|
|
1230
|
+
replace: (textNode, match) => {
|
|
1231
|
+
const [, linkText, linkUrl, linkTitle] = match;
|
|
1232
|
+
const linkNode = lexicalLink.$createLinkNode(linkUrl, {
|
|
1233
|
+
title: linkTitle
|
|
1234
|
+
});
|
|
1235
|
+
const openBracketAmount = linkText.split('[').length - 1;
|
|
1236
|
+
const closeBracketAmount = linkText.split(']').length - 1;
|
|
1237
|
+
let parsedLinkText = linkText;
|
|
1238
|
+
let outsideLinkText = '';
|
|
1239
|
+
if (openBracketAmount < closeBracketAmount) {
|
|
1240
|
+
return;
|
|
1241
|
+
} else if (openBracketAmount > closeBracketAmount) {
|
|
1242
|
+
const linkTextParts = linkText.split('[');
|
|
1243
|
+
outsideLinkText = '[' + linkTextParts[0];
|
|
1244
|
+
parsedLinkText = linkTextParts.slice(1).join('[');
|
|
1245
|
+
}
|
|
1246
|
+
const linkTextNode = lexical.$createTextNode(parsedLinkText);
|
|
1247
|
+
linkTextNode.setFormat(textNode.getFormat());
|
|
1248
|
+
linkNode.append(linkTextNode);
|
|
1249
|
+
textNode.replace(linkNode);
|
|
1250
|
+
if (outsideLinkText) {
|
|
1251
|
+
linkNode.insertBefore(lexical.$createTextNode(outsideLinkText));
|
|
1252
|
+
}
|
|
1253
|
+
return linkTextNode;
|
|
1254
|
+
},
|
|
1255
|
+
trigger: ')',
|
|
1256
|
+
type: 'text-match'
|
|
1257
|
+
};
|
|
1258
|
+
const ELEMENT_TRANSFORMERS = [HEADING, QUOTE, UNORDERED_LIST, ORDERED_LIST];
|
|
1259
|
+
const MULTILINE_ELEMENT_TRANSFORMERS = [CODE];
|
|
1260
|
+
|
|
1261
|
+
// Order of text format transformers matters:
|
|
1262
|
+
//
|
|
1263
|
+
// - code should go first as it prevents any transformations inside
|
|
1264
|
+
// - then longer tags match (e.g. ** or __ should go before * or _)
|
|
1265
|
+
const TEXT_FORMAT_TRANSFORMERS = [INLINE_CODE, BOLD_ITALIC_STAR, BOLD_ITALIC_UNDERSCORE, BOLD_STAR, BOLD_UNDERSCORE, HIGHLIGHT, ITALIC_STAR, ITALIC_UNDERSCORE, STRIKETHROUGH];
|
|
1266
|
+
const TEXT_MATCH_TRANSFORMERS = [LINK];
|
|
1267
|
+
const TRANSFORMERS = [...ELEMENT_TRANSFORMERS, ...MULTILINE_ELEMENT_TRANSFORMERS, ...TEXT_FORMAT_TRANSFORMERS, ...TEXT_MATCH_TRANSFORMERS];
|
|
1268
|
+
function normalizeMarkdown(input, shouldMergeAdjacentLines = false) {
|
|
1269
|
+
const lines = input.split('\n');
|
|
1270
|
+
let inCodeBlock = false;
|
|
1271
|
+
const sanitizedLines = [];
|
|
1272
|
+
for (let i = 0; i < lines.length; i++) {
|
|
1273
|
+
const line = lines[i].trimEnd();
|
|
1274
|
+
const lastLine = sanitizedLines[sanitizedLines.length - 1];
|
|
1275
|
+
|
|
1276
|
+
// Code blocks of ```single line``` don't toggle the inCodeBlock flag
|
|
1277
|
+
if (CODE_SINGLE_LINE_REGEX.test(line)) {
|
|
1278
|
+
sanitizedLines.push(line);
|
|
1279
|
+
continue;
|
|
1280
|
+
}
|
|
1281
|
+
|
|
1282
|
+
// Detect the start or end of a code block
|
|
1283
|
+
if (CODE_START_REGEX.test(line) || CODE_END_REGEX.test(line)) {
|
|
1284
|
+
inCodeBlock = !inCodeBlock;
|
|
1285
|
+
sanitizedLines.push(line);
|
|
1286
|
+
continue;
|
|
1287
|
+
}
|
|
1288
|
+
|
|
1289
|
+
// If we are inside a code block, keep the line unchanged
|
|
1290
|
+
if (inCodeBlock) {
|
|
1291
|
+
sanitizedLines.push(line);
|
|
1292
|
+
continue;
|
|
1293
|
+
}
|
|
1294
|
+
|
|
1295
|
+
// In markdown the concept of "empty paragraphs" does not exist.
|
|
1296
|
+
// Blocks must be separated by an empty line. Non-empty adjacent lines must be merged.
|
|
1297
|
+
if (line === '' || lastLine === '' || !lastLine || HEADING_REGEX.test(lastLine) || HEADING_REGEX.test(line) || QUOTE_REGEX.test(line) || ORDERED_LIST_REGEX.test(line) || UNORDERED_LIST_REGEX.test(line) || CHECK_LIST_REGEX.test(line) || TABLE_ROW_REG_EXP.test(line) || TABLE_ROW_DIVIDER_REG_EXP.test(line) || !shouldMergeAdjacentLines || TAG_START_REGEX.test(line) || TAG_END_REGEX.test(line) || ENDS_WITH(TAG_END_REGEX).test(lastLine) || ENDS_WITH(TAG_START_REGEX).test(lastLine) || CODE_END_REGEX.test(lastLine)) {
|
|
1298
|
+
sanitizedLines.push(line);
|
|
1299
|
+
} else {
|
|
1300
|
+
sanitizedLines[sanitizedLines.length - 1] = lastLine + ' ' + line.trimStart();
|
|
1301
|
+
}
|
|
1302
|
+
}
|
|
1303
|
+
return sanitizedLines.join('\n');
|
|
1304
|
+
}
|
|
1305
|
+
|
|
1306
|
+
function runElementTransformers(parentNode, anchorNode, anchorOffset, elementTransformers) {
|
|
1307
|
+
const grandParentNode = parentNode.getParent();
|
|
1308
|
+
if (!lexical.$isRootOrShadowRoot(grandParentNode) || parentNode.getFirstChild() !== anchorNode) {
|
|
1309
|
+
return false;
|
|
1310
|
+
}
|
|
1311
|
+
const textContent = anchorNode.getTextContent();
|
|
1312
|
+
|
|
1313
|
+
// Checking for anchorOffset position to prevent any checks for cases when caret is too far
|
|
1314
|
+
// from a line start to be a part of block-level markdown trigger.
|
|
1315
|
+
//
|
|
1316
|
+
// TODO:
|
|
1317
|
+
// Can have a quick check if caret is close enough to the beginning of the string (e.g. offset less than 10-20)
|
|
1318
|
+
// since otherwise it won't be a markdown shortcut, but tables are exception
|
|
1319
|
+
if (textContent[anchorOffset - 1] !== ' ') {
|
|
1320
|
+
return false;
|
|
1321
|
+
}
|
|
1322
|
+
for (const {
|
|
1323
|
+
regExp,
|
|
1324
|
+
replace
|
|
1325
|
+
} of elementTransformers) {
|
|
1326
|
+
const match = textContent.match(regExp);
|
|
1327
|
+
if (match && match[0].length === (match[0].endsWith(' ') ? anchorOffset : anchorOffset - 1)) {
|
|
1328
|
+
const nextSiblings = anchorNode.getNextSiblings();
|
|
1329
|
+
const [leadingNode, remainderNode] = anchorNode.splitText(anchorOffset);
|
|
1330
|
+
const siblings = remainderNode ? [remainderNode, ...nextSiblings] : nextSiblings;
|
|
1331
|
+
if (replace(parentNode, siblings, match, false) !== false) {
|
|
1332
|
+
leadingNode.remove();
|
|
1333
|
+
return true;
|
|
1334
|
+
}
|
|
1335
|
+
}
|
|
1336
|
+
}
|
|
1337
|
+
return false;
|
|
1338
|
+
}
|
|
1339
|
+
function runMultilineElementTransformers(parentNode, anchorNode, anchorOffset, elementTransformers) {
|
|
1340
|
+
const grandParentNode = parentNode.getParent();
|
|
1341
|
+
if (!lexical.$isRootOrShadowRoot(grandParentNode) || parentNode.getFirstChild() !== anchorNode) {
|
|
1342
|
+
return false;
|
|
1343
|
+
}
|
|
1344
|
+
const textContent = anchorNode.getTextContent();
|
|
1345
|
+
|
|
1346
|
+
// Checking for anchorOffset position to prevent any checks for cases when caret is too far
|
|
1347
|
+
// from a line start to be a part of block-level markdown trigger.
|
|
1348
|
+
//
|
|
1349
|
+
// TODO:
|
|
1350
|
+
// Can have a quick check if caret is close enough to the beginning of the string (e.g. offset less than 10-20)
|
|
1351
|
+
// since otherwise it won't be a markdown shortcut, but tables are exception
|
|
1352
|
+
if (textContent[anchorOffset - 1] !== ' ') {
|
|
1353
|
+
return false;
|
|
1354
|
+
}
|
|
1355
|
+
for (const {
|
|
1356
|
+
regExpStart,
|
|
1357
|
+
replace,
|
|
1358
|
+
regExpEnd
|
|
1359
|
+
} of elementTransformers) {
|
|
1360
|
+
if (regExpEnd && !('optional' in regExpEnd) || regExpEnd && 'optional' in regExpEnd && !regExpEnd.optional) {
|
|
1361
|
+
continue;
|
|
1362
|
+
}
|
|
1363
|
+
const match = textContent.match(regExpStart);
|
|
1364
|
+
if (match && match[0].length === (match[0].endsWith(' ') ? anchorOffset : anchorOffset - 1)) {
|
|
1365
|
+
const nextSiblings = anchorNode.getNextSiblings();
|
|
1366
|
+
const [leadingNode, remainderNode] = anchorNode.splitText(anchorOffset);
|
|
1367
|
+
const siblings = remainderNode ? [remainderNode, ...nextSiblings] : nextSiblings;
|
|
1368
|
+
if (replace(parentNode, siblings, match, null, null, false) !== false) {
|
|
1369
|
+
leadingNode.remove();
|
|
1370
|
+
return true;
|
|
1371
|
+
}
|
|
1372
|
+
}
|
|
1373
|
+
}
|
|
1374
|
+
return false;
|
|
1375
|
+
}
|
|
1376
|
+
function runTextMatchTransformers(anchorNode, anchorOffset, transformersByTrigger) {
|
|
1377
|
+
let textContent = anchorNode.getTextContent();
|
|
1378
|
+
const lastChar = textContent[anchorOffset - 1];
|
|
1379
|
+
const transformers = transformersByTrigger[lastChar];
|
|
1380
|
+
if (transformers == null) {
|
|
1381
|
+
return false;
|
|
1382
|
+
}
|
|
1383
|
+
|
|
1384
|
+
// If typing in the middle of content, remove the tail to do
|
|
1385
|
+
// reg exp match up to a string end (caret position)
|
|
1386
|
+
if (anchorOffset < textContent.length) {
|
|
1387
|
+
textContent = textContent.slice(0, anchorOffset);
|
|
1388
|
+
}
|
|
1389
|
+
for (const transformer of transformers) {
|
|
1390
|
+
if (!transformer.replace || !transformer.regExp) {
|
|
1391
|
+
continue;
|
|
1392
|
+
}
|
|
1393
|
+
const match = textContent.match(transformer.regExp);
|
|
1394
|
+
if (match === null) {
|
|
1395
|
+
continue;
|
|
1396
|
+
}
|
|
1397
|
+
const startIndex = match.index || 0;
|
|
1398
|
+
const endIndex = startIndex + match[0].length;
|
|
1399
|
+
let replaceNode;
|
|
1400
|
+
if (startIndex === 0) {
|
|
1401
|
+
[replaceNode] = anchorNode.splitText(endIndex);
|
|
1402
|
+
} else {
|
|
1403
|
+
[, replaceNode] = anchorNode.splitText(startIndex, endIndex);
|
|
1404
|
+
}
|
|
1405
|
+
replaceNode.selectNext(0, 0);
|
|
1406
|
+
transformer.replace(replaceNode, match);
|
|
1407
|
+
return true;
|
|
1408
|
+
}
|
|
1409
|
+
return false;
|
|
1410
|
+
}
|
|
1411
|
+
function $runTextFormatTransformers(anchorNode, anchorOffset, textFormatTransformers) {
|
|
1412
|
+
const textContent = anchorNode.getTextContent();
|
|
1413
|
+
const closeTagEndIndex = anchorOffset - 1;
|
|
1414
|
+
const closeChar = textContent[closeTagEndIndex];
|
|
1415
|
+
// Quick check if we're possibly at the end of inline markdown style
|
|
1416
|
+
const matchers = textFormatTransformers[closeChar];
|
|
1417
|
+
if (!matchers) {
|
|
1418
|
+
return false;
|
|
1419
|
+
}
|
|
1420
|
+
for (const matcher of matchers) {
|
|
1421
|
+
const {
|
|
1422
|
+
tag
|
|
1423
|
+
} = matcher;
|
|
1424
|
+
const tagLength = tag.length;
|
|
1425
|
+
const closeTagStartIndex = closeTagEndIndex - tagLength + 1;
|
|
1426
|
+
|
|
1427
|
+
// If tag is not single char check if rest of it matches with text content
|
|
1428
|
+
if (tagLength > 1) {
|
|
1429
|
+
if (!isEqualSubString(textContent, closeTagStartIndex, tag, 0, tagLength)) {
|
|
1430
|
+
continue;
|
|
1431
|
+
}
|
|
1432
|
+
}
|
|
1433
|
+
|
|
1434
|
+
// Space before closing tag cancels inline markdown
|
|
1435
|
+
if (textContent[closeTagStartIndex - 1] === ' ') {
|
|
1436
|
+
continue;
|
|
1437
|
+
}
|
|
1438
|
+
|
|
1439
|
+
// Some tags can not be used within words, hence should have newline/space/punctuation after it
|
|
1440
|
+
const afterCloseTagChar = textContent[closeTagEndIndex + 1];
|
|
1441
|
+
if (matcher.intraword === false && afterCloseTagChar && !PUNCTUATION_OR_SPACE.test(afterCloseTagChar)) {
|
|
1442
|
+
continue;
|
|
1443
|
+
}
|
|
1444
|
+
const closeNode = anchorNode;
|
|
1445
|
+
let openNode = closeNode;
|
|
1446
|
+
let openTagStartIndex = getOpenTagStartIndex(textContent, closeTagStartIndex, tag);
|
|
1447
|
+
|
|
1448
|
+
// Go through text node siblings and search for opening tag
|
|
1449
|
+
// if haven't found it within the same text node as closing tag
|
|
1450
|
+
let sibling = openNode;
|
|
1451
|
+
while (openTagStartIndex < 0 && (sibling = sibling.getPreviousSibling())) {
|
|
1452
|
+
if (lexical.$isLineBreakNode(sibling)) {
|
|
1453
|
+
break;
|
|
1454
|
+
}
|
|
1455
|
+
if (lexical.$isTextNode(sibling)) {
|
|
1456
|
+
if (sibling.hasFormat('code')) {
|
|
1457
|
+
continue;
|
|
1458
|
+
}
|
|
1459
|
+
const siblingTextContent = sibling.getTextContent();
|
|
1460
|
+
openNode = sibling;
|
|
1461
|
+
openTagStartIndex = getOpenTagStartIndex(siblingTextContent, siblingTextContent.length, tag);
|
|
1462
|
+
}
|
|
1463
|
+
}
|
|
1464
|
+
|
|
1465
|
+
// Opening tag is not found
|
|
1466
|
+
if (openTagStartIndex < 0) {
|
|
1467
|
+
continue;
|
|
1468
|
+
}
|
|
1469
|
+
|
|
1470
|
+
// No content between opening and closing tag
|
|
1471
|
+
if (openNode === closeNode && openTagStartIndex + tagLength === closeTagStartIndex) {
|
|
1472
|
+
continue;
|
|
1473
|
+
}
|
|
1474
|
+
|
|
1475
|
+
// Checking longer tags for repeating chars (e.g. *** vs **)
|
|
1476
|
+
const prevOpenNodeText = openNode.getTextContent();
|
|
1477
|
+
if (openTagStartIndex > 0 && prevOpenNodeText[openTagStartIndex - 1] === closeChar) {
|
|
1478
|
+
continue;
|
|
1479
|
+
}
|
|
1480
|
+
|
|
1481
|
+
// Some tags can not be used within words, hence should have newline/space/punctuation before it
|
|
1482
|
+
const beforeOpenTagChar = prevOpenNodeText[openTagStartIndex - 1];
|
|
1483
|
+
if (matcher.intraword === false && beforeOpenTagChar && !PUNCTUATION_OR_SPACE.test(beforeOpenTagChar)) {
|
|
1484
|
+
continue;
|
|
1485
|
+
}
|
|
1486
|
+
|
|
1487
|
+
// Clean text from opening and closing tags (starting from closing tag
|
|
1488
|
+
// to prevent any offset shifts if we start from opening one)
|
|
1489
|
+
const prevCloseNodeText = closeNode.getTextContent();
|
|
1490
|
+
const closeNodeText = prevCloseNodeText.slice(0, closeTagStartIndex) + prevCloseNodeText.slice(closeTagEndIndex + 1);
|
|
1491
|
+
closeNode.setTextContent(closeNodeText);
|
|
1492
|
+
const openNodeText = openNode === closeNode ? closeNodeText : prevOpenNodeText;
|
|
1493
|
+
openNode.setTextContent(openNodeText.slice(0, openTagStartIndex) + openNodeText.slice(openTagStartIndex + tagLength));
|
|
1494
|
+
const selection = lexical.$getSelection();
|
|
1495
|
+
const nextSelection = lexical.$createRangeSelection();
|
|
1496
|
+
lexical.$setSelection(nextSelection);
|
|
1497
|
+
// Adjust offset based on deleted chars
|
|
1498
|
+
const newOffset = closeTagEndIndex - tagLength * (openNode === closeNode ? 2 : 1) + 1;
|
|
1499
|
+
nextSelection.anchor.set(openNode.__key, openTagStartIndex, 'text');
|
|
1500
|
+
nextSelection.focus.set(closeNode.__key, newOffset, 'text');
|
|
1501
|
+
|
|
1502
|
+
// Apply formatting to selected text
|
|
1503
|
+
for (const format of matcher.format) {
|
|
1504
|
+
if (!nextSelection.hasFormat(format)) {
|
|
1505
|
+
nextSelection.formatText(format);
|
|
1506
|
+
}
|
|
1507
|
+
}
|
|
1508
|
+
|
|
1509
|
+
// Collapse selection up to the focus point
|
|
1510
|
+
nextSelection.anchor.set(nextSelection.focus.key, nextSelection.focus.offset, nextSelection.focus.type);
|
|
1511
|
+
|
|
1512
|
+
// Remove formatting from collapsed selection
|
|
1513
|
+
for (const format of matcher.format) {
|
|
1514
|
+
if (nextSelection.hasFormat(format)) {
|
|
1515
|
+
nextSelection.toggleFormat(format);
|
|
1516
|
+
}
|
|
1517
|
+
}
|
|
1518
|
+
if (lexical.$isRangeSelection(selection)) {
|
|
1519
|
+
nextSelection.format = selection.format;
|
|
1520
|
+
}
|
|
1521
|
+
return true;
|
|
1522
|
+
}
|
|
1523
|
+
return false;
|
|
1524
|
+
}
|
|
1525
|
+
function getOpenTagStartIndex(string, maxIndex, tag) {
|
|
1526
|
+
const tagLength = tag.length;
|
|
1527
|
+
for (let i = maxIndex; i >= tagLength; i--) {
|
|
1528
|
+
const startIndex = i - tagLength;
|
|
1529
|
+
if (isEqualSubString(string, startIndex, tag, 0, tagLength) &&
|
|
1530
|
+
// Space after opening tag cancels transformation
|
|
1531
|
+
string[startIndex + tagLength] !== ' ') {
|
|
1532
|
+
return startIndex;
|
|
1533
|
+
}
|
|
1534
|
+
}
|
|
1535
|
+
return -1;
|
|
1536
|
+
}
|
|
1537
|
+
function isEqualSubString(stringA, aStart, stringB, bStart, length) {
|
|
1538
|
+
for (let i = 0; i < length; i++) {
|
|
1539
|
+
if (stringA[aStart + i] !== stringB[bStart + i]) {
|
|
1540
|
+
return false;
|
|
1541
|
+
}
|
|
1542
|
+
}
|
|
1543
|
+
return true;
|
|
1544
|
+
}
|
|
1545
|
+
function registerMarkdownShortcuts(editor, transformers = TRANSFORMERS) {
|
|
1546
|
+
const byType = transformersByType(transformers);
|
|
1547
|
+
const textFormatTransformersByTrigger = indexBy(byType.textFormat, ({
|
|
1548
|
+
tag
|
|
1549
|
+
}) => tag[tag.length - 1]);
|
|
1550
|
+
const textMatchTransformersByTrigger = indexBy(byType.textMatch, ({
|
|
1551
|
+
trigger
|
|
1552
|
+
}) => trigger);
|
|
1553
|
+
for (const transformer of transformers) {
|
|
1554
|
+
const type = transformer.type;
|
|
1555
|
+
if (type === 'element' || type === 'text-match' || type === 'multiline-element') {
|
|
1556
|
+
const dependencies = transformer.dependencies;
|
|
1557
|
+
for (const node of dependencies) {
|
|
1558
|
+
if (!editor.hasNode(node)) {
|
|
1559
|
+
{
|
|
1560
|
+
formatDevErrorMessage(`MarkdownShortcuts: missing dependency ${node.getType()} for transformer. Ensure node dependency is included in editor initial config.`);
|
|
1561
|
+
}
|
|
1562
|
+
}
|
|
1563
|
+
}
|
|
1564
|
+
}
|
|
1565
|
+
}
|
|
1566
|
+
const $transform = (parentNode, anchorNode, anchorOffset) => {
|
|
1567
|
+
if (runElementTransformers(parentNode, anchorNode, anchorOffset, byType.element)) {
|
|
1568
|
+
return;
|
|
1569
|
+
}
|
|
1570
|
+
if (runMultilineElementTransformers(parentNode, anchorNode, anchorOffset, byType.multilineElement)) {
|
|
1571
|
+
return;
|
|
1572
|
+
}
|
|
1573
|
+
if (runTextMatchTransformers(anchorNode, anchorOffset, textMatchTransformersByTrigger)) {
|
|
1574
|
+
return;
|
|
1575
|
+
}
|
|
1576
|
+
$runTextFormatTransformers(anchorNode, anchorOffset, textFormatTransformersByTrigger);
|
|
1577
|
+
};
|
|
1578
|
+
return editor.registerUpdateListener(({
|
|
1579
|
+
tags,
|
|
1580
|
+
dirtyLeaves,
|
|
1581
|
+
editorState,
|
|
1582
|
+
prevEditorState
|
|
1583
|
+
}) => {
|
|
1584
|
+
// Ignore updates from collaboration and undo/redo (as changes already calculated)
|
|
1585
|
+
if (tags.has(lexical.COLLABORATION_TAG) || tags.has(lexical.HISTORIC_TAG)) {
|
|
1586
|
+
return;
|
|
1587
|
+
}
|
|
1588
|
+
|
|
1589
|
+
// If editor is still composing (i.e. backticks) we must wait before the user confirms the key
|
|
1590
|
+
if (editor.isComposing()) {
|
|
1591
|
+
return;
|
|
1592
|
+
}
|
|
1593
|
+
const selection = editorState.read(lexical.$getSelection);
|
|
1594
|
+
const prevSelection = prevEditorState.read(lexical.$getSelection);
|
|
1595
|
+
|
|
1596
|
+
// We expect selection to be a collapsed range and not match previous one (as we want
|
|
1597
|
+
// to trigger transforms only as user types)
|
|
1598
|
+
if (!lexical.$isRangeSelection(prevSelection) || !lexical.$isRangeSelection(selection) || !selection.isCollapsed() || selection.is(prevSelection)) {
|
|
1599
|
+
return;
|
|
1600
|
+
}
|
|
1601
|
+
const anchorKey = selection.anchor.key;
|
|
1602
|
+
const anchorOffset = selection.anchor.offset;
|
|
1603
|
+
const anchorNode = editorState._nodeMap.get(anchorKey);
|
|
1604
|
+
if (!lexical.$isTextNode(anchorNode) || !dirtyLeaves.has(anchorKey) || anchorOffset !== 1 && anchorOffset > prevSelection.anchor.offset + 1) {
|
|
1605
|
+
return;
|
|
1606
|
+
}
|
|
1607
|
+
editor.update(() => {
|
|
1608
|
+
if (!canContainTransformableMarkdown(anchorNode)) {
|
|
1609
|
+
return;
|
|
1610
|
+
}
|
|
1611
|
+
const parentNode = anchorNode.getParent();
|
|
1612
|
+
if (parentNode === null || lexicalCode.$isCodeNode(parentNode)) {
|
|
1613
|
+
return;
|
|
1614
|
+
}
|
|
1615
|
+
$transform(parentNode, anchorNode, selection.anchor.offset);
|
|
1616
|
+
});
|
|
1617
|
+
});
|
|
1618
|
+
}
|
|
1619
|
+
|
|
1620
|
+
/**
|
|
1621
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
1622
|
+
*
|
|
1623
|
+
* This source code is licensed under the MIT license found in the
|
|
1624
|
+
* LICENSE file in the root directory of this source tree.
|
|
1625
|
+
*
|
|
1626
|
+
*/
|
|
1627
|
+
|
|
1628
|
+
|
|
1629
|
+
/**
|
|
1630
|
+
* Renders markdown from a string. The selection is moved to the start after the operation.
|
|
1631
|
+
*
|
|
1632
|
+
* @param {boolean} [shouldPreserveNewLines] By setting this to true, new lines will be preserved between conversions
|
|
1633
|
+
* @param {boolean} [shouldMergeAdjacentLines] By setting this to true, adjacent non empty lines will be merged according to commonmark spec: https://spec.commonmark.org/0.24/#example-177. Not applicable if shouldPreserveNewLines = true.
|
|
1634
|
+
*/
|
|
1635
|
+
function $convertFromMarkdownString(markdown, transformers = TRANSFORMERS, node, shouldPreserveNewLines = false, shouldMergeAdjacentLines = false) {
|
|
1636
|
+
const sanitizedMarkdown = shouldPreserveNewLines ? markdown : normalizeMarkdown(markdown, shouldMergeAdjacentLines);
|
|
1637
|
+
const importMarkdown = createMarkdownImport(transformers, shouldPreserveNewLines);
|
|
1638
|
+
return importMarkdown(sanitizedMarkdown, node);
|
|
1639
|
+
}
|
|
1640
|
+
|
|
1641
|
+
/**
|
|
1642
|
+
* Renders string from markdown. The selection is moved to the start after the operation.
|
|
1643
|
+
*/
|
|
1644
|
+
function $convertToMarkdownString(transformers = TRANSFORMERS, node, shouldPreserveNewLines = false) {
|
|
1645
|
+
const exportMarkdown = createMarkdownExport(transformers, shouldPreserveNewLines);
|
|
1646
|
+
return exportMarkdown(node);
|
|
1647
|
+
}
|
|
1648
|
+
|
|
1649
|
+
exports.$convertFromMarkdownString = $convertFromMarkdownString;
|
|
1650
|
+
exports.$convertToMarkdownString = $convertToMarkdownString;
|
|
1651
|
+
exports.BOLD_ITALIC_STAR = BOLD_ITALIC_STAR;
|
|
1652
|
+
exports.BOLD_ITALIC_UNDERSCORE = BOLD_ITALIC_UNDERSCORE;
|
|
1653
|
+
exports.BOLD_STAR = BOLD_STAR;
|
|
1654
|
+
exports.BOLD_UNDERSCORE = BOLD_UNDERSCORE;
|
|
1655
|
+
exports.CHECK_LIST = CHECK_LIST;
|
|
1656
|
+
exports.CODE = CODE;
|
|
1657
|
+
exports.ELEMENT_TRANSFORMERS = ELEMENT_TRANSFORMERS;
|
|
1658
|
+
exports.HEADING = HEADING;
|
|
1659
|
+
exports.HIGHLIGHT = HIGHLIGHT;
|
|
1660
|
+
exports.INLINE_CODE = INLINE_CODE;
|
|
1661
|
+
exports.ITALIC_STAR = ITALIC_STAR;
|
|
1662
|
+
exports.ITALIC_UNDERSCORE = ITALIC_UNDERSCORE;
|
|
1663
|
+
exports.LINK = LINK;
|
|
1664
|
+
exports.MULTILINE_ELEMENT_TRANSFORMERS = MULTILINE_ELEMENT_TRANSFORMERS;
|
|
1665
|
+
exports.ORDERED_LIST = ORDERED_LIST;
|
|
1666
|
+
exports.QUOTE = QUOTE;
|
|
1667
|
+
exports.STRIKETHROUGH = STRIKETHROUGH;
|
|
1668
|
+
exports.TEXT_FORMAT_TRANSFORMERS = TEXT_FORMAT_TRANSFORMERS;
|
|
1669
|
+
exports.TEXT_MATCH_TRANSFORMERS = TEXT_MATCH_TRANSFORMERS;
|
|
1670
|
+
exports.TRANSFORMERS = TRANSFORMERS;
|
|
1671
|
+
exports.UNORDERED_LIST = UNORDERED_LIST;
|
|
1672
|
+
exports.registerMarkdownShortcuts = registerMarkdownShortcuts;
|