@payloadcms/richtext-lexical 3.68.0-internal-debug.2eb12b9 → 3.68.0-internal-debug.185cc5f
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/features/blocks/client/component/index.js +1 -0
- package/dist/features/blocks/client/component/index.js.map +1 -1
- package/dist/features/blocks/client/componentInline/index.js +1 -0
- package/dist/features/blocks/client/componentInline/index.js.map +1 -1
- package/dist/features/blocks/client/markdown/markdownTransformer.js +1 -0
- package/dist/features/blocks/client/markdown/markdownTransformer.js.map +1 -1
- package/dist/features/blocks/premade/CodeBlock/Component/Block.js +1 -0
- package/dist/features/blocks/premade/CodeBlock/Component/Block.js.map +1 -1
- package/dist/features/blocks/premade/CodeBlock/Component/Collapse/index.js +1 -0
- package/dist/features/blocks/premade/CodeBlock/Component/Collapse/index.js.map +1 -1
- package/dist/features/blocks/premade/CodeBlock/Component/FloatingCollapse/index.js +1 -0
- package/dist/features/blocks/premade/CodeBlock/Component/FloatingCollapse/index.js.map +1 -1
- package/dist/features/blocks/server/markdown/markdownTransformer.js +1 -0
- package/dist/features/blocks/server/markdown/markdownTransformer.js.map +1 -1
- package/dist/features/debug/jsxConverter/client/plugin/index.js +1 -0
- package/dist/features/debug/jsxConverter/client/plugin/index.js.map +1 -1
- package/dist/features/debug/testRecorder/client/plugin/index.js +1 -0
- package/dist/features/debug/testRecorder/client/plugin/index.js.map +1 -1
- package/dist/features/debug/treeView/client/plugin/index.js +1 -0
- package/dist/features/debug/treeView/client/plugin/index.js.map +1 -1
- package/dist/features/experimental_table/client/plugins/TableActionMenuPlugin/index.js +1 -0
- package/dist/features/experimental_table/client/plugins/TableActionMenuPlugin/index.js.map +1 -1
- package/dist/features/experimental_table/client/plugins/TableCellResizerPlugin/index.js +1 -0
- package/dist/features/experimental_table/client/plugins/TableCellResizerPlugin/index.js.map +1 -1
- package/dist/features/experimental_table/client/plugins/TablePlugin/index.js +1 -0
- package/dist/features/experimental_table/client/plugins/TablePlugin/index.js.map +1 -1
- package/dist/features/horizontalRule/client/plugin/index.js +1 -0
- package/dist/features/horizontalRule/client/plugin/index.js.map +1 -1
- package/dist/features/link/client/plugins/autoLink/index.js +3 -0
- package/dist/features/link/client/plugins/autoLink/index.js.map +1 -1
- package/dist/features/link/client/plugins/floatingLinkEditor/index.js +1 -0
- package/dist/features/link/client/plugins/floatingLinkEditor/index.js.map +1 -1
- package/dist/features/link/server/baseFields.js +1 -0
- package/dist/features/link/server/baseFields.js.map +1 -1
- package/dist/features/migrations/lexicalPluginToLexical/nodes/unknownConvertedNode/Component.js +1 -0
- package/dist/features/migrations/lexicalPluginToLexical/nodes/unknownConvertedNode/Component.js.map +1 -1
- package/dist/features/migrations/slateToLexical/nodes/unknownConvertedNode/Component.js +1 -0
- package/dist/features/migrations/slateToLexical/nodes/unknownConvertedNode/Component.js.map +1 -1
- package/dist/features/relationship/client/components/RelationshipComponent.js +1 -0
- package/dist/features/relationship/client/components/RelationshipComponent.js.map +1 -1
- package/dist/features/toolbars/fixed/client/Toolbar/index.js +1 -0
- package/dist/features/toolbars/fixed/client/Toolbar/index.js.map +1 -1
- package/dist/features/toolbars/inline/client/Toolbar/index.js +1 -0
- package/dist/features/toolbars/inline/client/Toolbar/index.js.map +1 -1
- package/dist/features/toolbars/shared/ToolbarButton/index.js +1 -0
- package/dist/features/toolbars/shared/ToolbarButton/index.js.map +1 -1
- package/dist/features/toolbars/shared/ToolbarDropdown/index.js +1 -0
- package/dist/features/toolbars/shared/ToolbarDropdown/index.js.map +1 -1
- package/dist/features/upload/client/component/index.js +1 -0
- package/dist/features/upload/client/component/index.js.map +1 -1
- package/dist/features/upload/client/component/pending/index.js +1 -0
- package/dist/features/upload/client/component/pending/index.js.map +1 -1
- package/dist/field/Diff/converters/listitem/index.js +1 -0
- package/dist/field/Diff/converters/listitem/index.js.map +1 -1
- package/dist/field/Diff/converters/relationship/index.js +1 -0
- package/dist/field/Diff/converters/relationship/index.js.map +1 -1
- package/dist/field/Diff/converters/unknown/index.js +1 -0
- package/dist/field/Diff/converters/unknown/index.js.map +1 -1
- package/dist/field/Diff/converters/upload/index.js +1 -0
- package/dist/field/Diff/converters/upload/index.js.map +1 -1
- package/dist/field/Diff/index.js +1 -0
- package/dist/field/Diff/index.js.map +1 -1
- package/dist/field/Field.js +2 -0
- package/dist/field/Field.js.map +1 -1
- package/dist/index.js +2 -0
- package/dist/index.js.map +1 -1
- package/dist/lexical/LexicalEditor.js +1 -0
- package/dist/lexical/LexicalEditor.js.map +1 -1
- package/dist/lexical/plugins/DecoratorPlugin/index.js +1 -0
- package/dist/lexical/plugins/DecoratorPlugin/index.js.map +1 -1
- package/dist/lexical/plugins/InsertParagraphAtEnd/index.js +1 -0
- package/dist/lexical/plugins/InsertParagraphAtEnd/index.js.map +1 -1
- package/dist/lexical/plugins/SlashMenu/LexicalTypeaheadMenuPlugin/LexicalMenu.js +2 -0
- package/dist/lexical/plugins/SlashMenu/LexicalTypeaheadMenuPlugin/LexicalMenu.js.map +1 -1
- package/dist/lexical/plugins/SlashMenu/index.js +2 -0
- package/dist/lexical/plugins/SlashMenu/index.js.map +1 -1
- package/dist/lexical/plugins/handles/AddBlockHandlePlugin/index.js +1 -0
- package/dist/lexical/plugins/handles/AddBlockHandlePlugin/index.js.map +1 -1
- package/dist/lexical/plugins/handles/DraggableBlockPlugin/index.js +1 -0
- package/dist/lexical/plugins/handles/DraggableBlockPlugin/index.js.map +1 -1
- package/dist/lexical/ui/ContentEditable.js +1 -0
- package/dist/lexical/ui/ContentEditable.js.map +1 -1
- package/dist/packages/@lexical/markdown/MarkdownShortcuts.js +2 -0
- package/dist/packages/@lexical/markdown/MarkdownShortcuts.js.map +1 -1
- package/dist/packages/@lexical/markdown/importTextFormatTransformer.js +2 -0
- package/dist/packages/@lexical/markdown/importTextFormatTransformer.js.map +1 -1
- package/dist/packages/@lexical/markdown/importTextMatchTransformer.js +2 -0
- package/dist/packages/@lexical/markdown/importTextMatchTransformer.js.map +1 -1
- package/dist/utilities/migrateSlateToLexical/migrateDocumentFieldsRecursively.js +2 -0
- package/dist/utilities/migrateSlateToLexical/migrateDocumentFieldsRecursively.js.map +1 -1
- package/dist/utilities/upgradeLexicalData/upgradeDocumentFieldsRecursively.js +2 -0
- package/dist/utilities/upgradeLexicalData/upgradeDocumentFieldsRecursively.js.map +1 -1
- package/package.json +7 -7
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../src/lexical/ui/ContentEditable.tsx"],"sourcesContent":["'use client'\nimport type { JSX } from 'react'\n\nimport { useLexicalComposerContext } from '@lexical/react/LexicalComposerContext'\nimport { ContentEditable } from '@lexical/react/LexicalContentEditable.js'\nimport { useTranslation } from '@payloadcms/ui'\n\nimport './ContentEditable.scss'\n\nimport * as React from 'react'\n\nimport type { SanitizedClientEditorConfig } from '../config/types.js'\n\nexport function LexicalContentEditable({\n className,\n editorConfig,\n}: {\n className?: string\n editorConfig: SanitizedClientEditorConfig\n}): JSX.Element {\n const { t } = useTranslation<{}, string>()\n const [_, { getTheme }] = useLexicalComposerContext()\n const theme = getTheme()\n\n return (\n <ContentEditable\n aria-placeholder={t('lexical:general:placeholder')}\n className={className ?? 'ContentEditable__root'}\n placeholder={\n <p className={theme?.placeholder}>\n {editorConfig?.admin?.placeholder ?? t('lexical:general:placeholder')}\n </p>\n }\n />\n )\n}\n"],"names":["useLexicalComposerContext","ContentEditable","useTranslation","React","LexicalContentEditable","className","editorConfig","t","_","getTheme","theme","aria-placeholder","placeholder","p","admin"],"mappings":"AAAA;;AAGA,SAASA,yBAAyB,QAAQ,wCAAuC;AACjF,SAASC,eAAe,QAAQ,2CAA0C;AAC1E,SAASC,cAAc,QAAQ,iBAAgB;
|
|
1
|
+
{"version":3,"sources":["../../../src/lexical/ui/ContentEditable.tsx"],"sourcesContent":["'use client'\nimport type { JSX } from 'react'\n\nimport { useLexicalComposerContext } from '@lexical/react/LexicalComposerContext'\nimport { ContentEditable } from '@lexical/react/LexicalContentEditable.js'\nimport { useTranslation } from '@payloadcms/ui'\n\nimport './ContentEditable.scss'\n\nimport * as React from 'react'\n\nimport type { SanitizedClientEditorConfig } from '../config/types.js'\n\nexport function LexicalContentEditable({\n className,\n editorConfig,\n}: {\n className?: string\n editorConfig: SanitizedClientEditorConfig\n}): JSX.Element {\n const { t } = useTranslation<{}, string>()\n const [_, { getTheme }] = useLexicalComposerContext()\n const theme = getTheme()\n\n return (\n <ContentEditable\n aria-placeholder={t('lexical:general:placeholder')}\n className={className ?? 'ContentEditable__root'}\n placeholder={\n <p className={theme?.placeholder}>\n {editorConfig?.admin?.placeholder ?? t('lexical:general:placeholder')}\n </p>\n }\n />\n )\n}\n"],"names":["useLexicalComposerContext","ContentEditable","useTranslation","React","LexicalContentEditable","className","editorConfig","t","_","getTheme","theme","aria-placeholder","placeholder","p","admin"],"mappings":"AAAA;;AAGA,SAASA,yBAAyB,QAAQ,wCAAuC;AACjF,SAASC,eAAe,QAAQ,2CAA0C;AAC1E,SAASC,cAAc,QAAQ,iBAAgB;AAE/C,OAAO,yBAAwB;AAE/B,YAAYC,WAAW,QAAO;AAI9B,OAAO,SAASC,uBAAuB,EACrCC,SAAS,EACTC,YAAY,EAIb;IACC,MAAM,EAAEC,CAAC,EAAE,GAAGL;IACd,MAAM,CAACM,GAAG,EAAEC,QAAQ,EAAE,CAAC,GAAGT;IAC1B,MAAMU,QAAQD;IAEd,qBACE,KAACR;QACCU,oBAAkBJ,EAAE;QACpBF,WAAWA,aAAa;QACxBO,2BACE,KAACC;YAAER,WAAWK,OAAOE;sBAClBN,cAAcQ,OAAOF,eAAeL,EAAE;;;AAKjD"}
|
|
@@ -98,8 +98,10 @@ function runTextMatchTransformers(anchorNode, anchorOffset, transformersByTrigge
|
|
|
98
98
|
const endIndex = startIndex + match[0].length;
|
|
99
99
|
let replaceNode;
|
|
100
100
|
if (startIndex === 0) {
|
|
101
|
+
;
|
|
101
102
|
[replaceNode] = anchorNode.splitText(endIndex);
|
|
102
103
|
} else {
|
|
104
|
+
;
|
|
103
105
|
[, replaceNode] = anchorNode.splitText(startIndex, endIndex);
|
|
104
106
|
}
|
|
105
107
|
if (replaceNode) {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../src/packages/@lexical/markdown/MarkdownShortcuts.ts"],"sourcesContent":["/**\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n *\n */\n\nimport type { ElementNode, LexicalEditor, TextNode } from 'lexical'\n\nimport {\n $createRangeSelection,\n $getSelection,\n $isLineBreakNode,\n $isRangeSelection,\n $isRootOrShadowRoot,\n $isTextNode,\n $setSelection,\n} from 'lexical'\n\nimport type {\n ElementTransformer,\n MultilineElementTransformer,\n TextFormatTransformer,\n TextMatchTransformer,\n Transformer,\n} from './MarkdownTransformers.js'\n\nimport { TRANSFORMERS } from './index.js'\nimport { indexBy, PUNCTUATION_OR_SPACE, transformersByType } from './utils.js'\n\nfunction runElementTransformers(\n parentNode: ElementNode,\n anchorNode: TextNode,\n anchorOffset: number,\n elementTransformers: ReadonlyArray<ElementTransformer>,\n): boolean {\n const grandParentNode = parentNode.getParent()\n\n if (!$isRootOrShadowRoot(grandParentNode) || parentNode.getFirstChild() !== anchorNode) {\n return false\n }\n\n const textContent = anchorNode.getTextContent()\n\n // Checking for anchorOffset position to prevent any checks for cases when caret is too far\n // from a line start to be a part of block-level markdown trigger.\n //\n // TODO:\n // Can have a quick check if caret is close enough to the beginning of the string (e.g. offset less than 10-20)\n // since otherwise it won't be a markdown shortcut, but tables are exception\n if (textContent[anchorOffset - 1] !== ' ') {\n return false\n }\n\n for (const { regExp, replace } of elementTransformers) {\n const match = textContent.match(regExp)\n\n if (match && match[0].length === (match[0].endsWith(' ') ? anchorOffset : anchorOffset - 1)) {\n const nextSiblings = anchorNode.getNextSiblings()\n const [leadingNode, remainderNode] = anchorNode.splitText(anchorOffset)\n leadingNode?.remove()\n const siblings = remainderNode ? [remainderNode, ...nextSiblings] : nextSiblings\n if (replace(parentNode, siblings, match, false) !== false) {\n return true\n }\n }\n }\n\n return false\n}\n\nfunction runMultilineElementTransformers(\n parentNode: ElementNode,\n anchorNode: TextNode,\n anchorOffset: number,\n elementTransformers: ReadonlyArray<MultilineElementTransformer>,\n): boolean {\n const grandParentNode = parentNode.getParent()\n\n if (!$isRootOrShadowRoot(grandParentNode) || parentNode.getFirstChild() !== anchorNode) {\n return false\n }\n\n const textContent = anchorNode.getTextContent()\n\n // Checking for anchorOffset position to prevent any checks for cases when caret is too far\n // from a line start to be a part of block-level markdown trigger.\n //\n // TODO:\n // Can have a quick check if caret is close enough to the beginning of the string (e.g. offset less than 10-20)\n // since otherwise it won't be a markdown shortcut, but tables are exception\n if (textContent[anchorOffset - 1] !== ' ') {\n return false\n }\n\n for (const { regExpEnd, regExpStart, replace } of elementTransformers) {\n if (\n (regExpEnd && !('optional' in regExpEnd)) ||\n (regExpEnd && 'optional' in regExpEnd && !regExpEnd.optional)\n ) {\n continue\n }\n\n const match = textContent.match(regExpStart)\n\n if (match && match[0].length === (match[0].endsWith(' ') ? anchorOffset : anchorOffset - 1)) {\n const nextSiblings = anchorNode.getNextSiblings()\n const [leadingNode, remainderNode] = anchorNode.splitText(anchorOffset)\n leadingNode?.remove()\n const siblings = remainderNode ? [remainderNode, ...nextSiblings] : nextSiblings\n\n if (replace(parentNode, siblings, match, null, null, false) !== false) {\n return true\n }\n }\n }\n\n return false\n}\n\nfunction runTextMatchTransformers(\n anchorNode: TextNode,\n anchorOffset: number,\n transformersByTrigger: Readonly<Record<string, Array<TextMatchTransformer>>>,\n): boolean {\n let textContent = anchorNode.getTextContent()\n const lastChar = textContent[anchorOffset - 1]!\n const transformers = transformersByTrigger[lastChar]\n\n if (transformers == null) {\n return false\n }\n\n // If typing in the middle of content, remove the tail to do\n // reg exp match up to a string end (caret position)\n if (anchorOffset < textContent.length) {\n textContent = textContent.slice(0, anchorOffset)\n }\n\n for (const transformer of transformers) {\n if (!transformer.replace || !transformer.regExp) {\n continue\n }\n const match = textContent.match(transformer.regExp)\n\n if (match === null) {\n continue\n }\n\n const startIndex = match.index || 0\n const endIndex = startIndex + match[0].length\n let replaceNode\n\n if (startIndex === 0) {\n ;[replaceNode] = anchorNode.splitText(endIndex)\n } else {\n ;[, replaceNode] = anchorNode.splitText(startIndex, endIndex)\n }\n if (replaceNode) {\n replaceNode.selectNext(0, 0)\n transformer.replace(replaceNode, match)\n }\n return true\n }\n\n return false\n}\n\nfunction $runTextFormatTransformers(\n anchorNode: TextNode,\n anchorOffset: number,\n textFormatTransformers: Readonly<Record<string, ReadonlyArray<TextFormatTransformer>>>,\n): boolean {\n const textContent = anchorNode.getTextContent()\n const closeTagEndIndex = anchorOffset - 1\n const closeChar = textContent[closeTagEndIndex]!\n // Quick check if we're possibly at the end of inline markdown style\n const matchers = textFormatTransformers[closeChar]\n\n if (!matchers) {\n return false\n }\n\n for (const matcher of matchers) {\n const { tag } = matcher\n const tagLength = tag.length\n const closeTagStartIndex = closeTagEndIndex - tagLength + 1\n\n // If tag is not single char check if rest of it matches with text content\n if (tagLength > 1) {\n if (!isEqualSubString(textContent, closeTagStartIndex, tag, 0, tagLength)) {\n continue\n }\n }\n\n // Space before closing tag cancels inline markdown\n if (textContent[closeTagStartIndex - 1] === ' ') {\n continue\n }\n\n // Some tags can not be used within words, hence should have newline/space/punctuation after it\n const afterCloseTagChar = textContent[closeTagEndIndex + 1]\n\n if (\n matcher.intraword === false &&\n afterCloseTagChar &&\n !PUNCTUATION_OR_SPACE.test(afterCloseTagChar)\n ) {\n continue\n }\n\n const closeNode = anchorNode\n let openNode = closeNode\n let openTagStartIndex = getOpenTagStartIndex(textContent, closeTagStartIndex, tag)\n\n // Go through text node siblings and search for opening tag\n // if haven't found it within the same text node as closing tag\n let sibling: null | TextNode = openNode\n\n while (openTagStartIndex < 0 && (sibling = sibling.getPreviousSibling<TextNode>())) {\n if ($isLineBreakNode(sibling)) {\n break\n }\n\n if ($isTextNode(sibling)) {\n const siblingTextContent = sibling.getTextContent()\n openNode = sibling\n openTagStartIndex = getOpenTagStartIndex(siblingTextContent, siblingTextContent.length, tag)\n }\n }\n\n // Opening tag is not found\n if (openTagStartIndex < 0) {\n continue\n }\n\n // No content between opening and closing tag\n if (openNode === closeNode && openTagStartIndex + tagLength === closeTagStartIndex) {\n continue\n }\n\n // Checking longer tags for repeating chars (e.g. *** vs **)\n const prevOpenNodeText = openNode.getTextContent()\n\n if (openTagStartIndex > 0 && prevOpenNodeText[openTagStartIndex - 1] === closeChar) {\n continue\n }\n\n // Some tags can not be used within words, hence should have newline/space/punctuation before it\n const beforeOpenTagChar = prevOpenNodeText[openTagStartIndex - 1]\n\n if (\n matcher.intraword === false &&\n beforeOpenTagChar &&\n !PUNCTUATION_OR_SPACE.test(beforeOpenTagChar)\n ) {\n continue\n }\n\n // Clean text from opening and closing tags (starting from closing tag\n // to prevent any offset shifts if we start from opening one)\n const prevCloseNodeText = closeNode.getTextContent()\n const closeNodeText =\n prevCloseNodeText.slice(0, closeTagStartIndex) + prevCloseNodeText.slice(closeTagEndIndex + 1)\n closeNode.setTextContent(closeNodeText)\n const openNodeText = openNode === closeNode ? closeNodeText : prevOpenNodeText\n openNode.setTextContent(\n openNodeText.slice(0, openTagStartIndex) + openNodeText.slice(openTagStartIndex + tagLength),\n )\n const selection = $getSelection()\n const nextSelection = $createRangeSelection()\n $setSelection(nextSelection)\n // Adjust offset based on deleted chars\n const newOffset = closeTagEndIndex - tagLength * (openNode === closeNode ? 2 : 1) + 1\n nextSelection.anchor.set(openNode.__key, openTagStartIndex, 'text')\n nextSelection.focus.set(closeNode.__key, newOffset, 'text')\n\n // Apply formatting to selected text\n for (const format of matcher.format) {\n if (!nextSelection.hasFormat(format)) {\n nextSelection.formatText(format)\n }\n }\n\n // Collapse selection up to the focus point\n nextSelection.anchor.set(\n nextSelection.focus.key,\n nextSelection.focus.offset,\n nextSelection.focus.type,\n )\n\n // Remove formatting from collapsed selection\n for (const format of matcher.format) {\n if (nextSelection.hasFormat(format)) {\n nextSelection.toggleFormat(format)\n }\n }\n\n if ($isRangeSelection(selection)) {\n nextSelection.format = selection.format\n }\n\n return true\n }\n\n return false\n}\n\nfunction getOpenTagStartIndex(string: string, maxIndex: number, tag: string): number {\n const tagLength = tag.length\n\n for (let i = maxIndex; i >= tagLength; i--) {\n const startIndex = i - tagLength\n\n if (\n isEqualSubString(string, startIndex, tag, 0, tagLength) && // Space after opening tag cancels transformation\n string[startIndex + tagLength] !== ' '\n ) {\n return startIndex\n }\n }\n\n return -1\n}\n\nfunction isEqualSubString(\n stringA: string,\n aStart: number,\n stringB: string,\n bStart: number,\n length: number,\n): boolean {\n for (let i = 0; i < length; i++) {\n if (stringA[aStart + i] !== stringB[bStart + i]) {\n return false\n }\n }\n\n return true\n}\n\nexport function registerMarkdownShortcuts(\n editor: LexicalEditor,\n transformers: Array<Transformer> = TRANSFORMERS,\n): () => void {\n const byType = transformersByType(transformers)\n const textFormatTransformersByTrigger = indexBy(\n byType.textFormat,\n ({ tag }) => tag[tag.length - 1],\n )\n const textMatchTransformersByTrigger = indexBy(byType.textMatch, ({ trigger }) => trigger)\n\n for (const transformer of transformers) {\n const type = transformer.type\n if (type === 'element' || type === 'text-match' || type === 'multiline-element') {\n const dependencies = transformer.dependencies\n for (const node of dependencies) {\n if (!editor.hasNode(node)) {\n throw new Error(\n 'MarkdownShortcuts: missing dependency %s for transformer. Ensure node dependency is included in editor initial config.' +\n node.getType(),\n )\n }\n }\n }\n }\n\n const $transform = (parentNode: ElementNode, anchorNode: TextNode, anchorOffset: number) => {\n if (runElementTransformers(parentNode, anchorNode, anchorOffset, byType.element)) {\n return\n }\n\n if (\n runMultilineElementTransformers(parentNode, anchorNode, anchorOffset, byType.multilineElement)\n ) {\n return\n }\n\n if (runTextMatchTransformers(anchorNode, anchorOffset, textMatchTransformersByTrigger)) {\n return\n }\n\n $runTextFormatTransformers(anchorNode, anchorOffset, textFormatTransformersByTrigger)\n }\n\n return editor.registerUpdateListener(({ dirtyLeaves, editorState, prevEditorState, tags }) => {\n // Ignore updates from collaboration and undo/redo (as changes already calculated)\n if (tags.has('collaboration') || tags.has('historic')) {\n return\n }\n\n // If editor is still composing (i.e. backticks) we must wait before the user confirms the key\n if (editor.isComposing()) {\n return\n }\n\n const selection = editorState.read($getSelection)\n const prevSelection = prevEditorState.read($getSelection)\n\n // We expect selection to be a collapsed range and not match previous one (as we want\n // to trigger transforms only as user types)\n if (\n !$isRangeSelection(prevSelection) ||\n !$isRangeSelection(selection) ||\n !selection.isCollapsed() ||\n selection.is(prevSelection)\n ) {\n return\n }\n\n const anchorKey = selection.anchor.key\n const anchorOffset = selection.anchor.offset\n\n const anchorNode = editorState._nodeMap.get(anchorKey)\n\n if (\n !$isTextNode(anchorNode) ||\n !dirtyLeaves.has(anchorKey) ||\n (anchorOffset !== 1 && anchorOffset > prevSelection.anchor.offset + 1)\n ) {\n return\n }\n\n editor.update(() => {\n // Markdown is not available inside code\n if (anchorNode.hasFormat('code')) {\n return\n }\n\n const parentNode = anchorNode.getParent()\n\n if (parentNode === null) {\n return\n }\n\n $transform(parentNode, anchorNode, selection.anchor.offset)\n })\n })\n}\n"],"names":["$createRangeSelection","$getSelection","$isLineBreakNode","$isRangeSelection","$isRootOrShadowRoot","$isTextNode","$setSelection","TRANSFORMERS","indexBy","PUNCTUATION_OR_SPACE","transformersByType","runElementTransformers","parentNode","anchorNode","anchorOffset","elementTransformers","grandParentNode","getParent","getFirstChild","textContent","getTextContent","regExp","replace","match","length","endsWith","nextSiblings","getNextSiblings","leadingNode","remainderNode","splitText","remove","siblings","runMultilineElementTransformers","regExpEnd","regExpStart","optional","runTextMatchTransformers","transformersByTrigger","lastChar","transformers","slice","transformer","startIndex","index","endIndex","replaceNode","selectNext","$runTextFormatTransformers","textFormatTransformers","closeTagEndIndex","closeChar","matchers","matcher","tag","tagLength","closeTagStartIndex","isEqualSubString","afterCloseTagChar","intraword","test","closeNode","openNode","openTagStartIndex","getOpenTagStartIndex","sibling","getPreviousSibling","siblingTextContent","prevOpenNodeText","beforeOpenTagChar","prevCloseNodeText","closeNodeText","setTextContent","openNodeText","selection","nextSelection","newOffset","anchor","set","__key","focus","format","hasFormat","formatText","key","offset","type","toggleFormat","string","maxIndex","i","stringA","aStart","stringB","bStart","registerMarkdownShortcuts","editor","byType","textFormatTransformersByTrigger","textFormat","textMatchTransformersByTrigger","textMatch","trigger","dependencies","node","hasNode","Error","getType","$transform","element","multilineElement","registerUpdateListener","dirtyLeaves","editorState","prevEditorState","tags","has","isComposing","read","prevSelection","isCollapsed","is","anchorKey","_nodeMap","get","update"],"mappings":"AAAA;;;;;;CAMC,GAID,SACEA,qBAAqB,EACrBC,aAAa,EACbC,gBAAgB,EAChBC,iBAAiB,EACjBC,mBAAmB,EACnBC,WAAW,EACXC,aAAa,QACR,UAAS;AAUhB,SAASC,YAAY,QAAQ,aAAY;AACzC,SAASC,OAAO,EAAEC,oBAAoB,EAAEC,kBAAkB,QAAQ,aAAY;AAE9E,SAASC,uBACPC,UAAuB,EACvBC,UAAoB,EACpBC,YAAoB,EACpBC,mBAAsD;IAEtD,MAAMC,kBAAkBJ,WAAWK,SAAS;IAE5C,IAAI,CAACb,oBAAoBY,oBAAoBJ,WAAWM,aAAa,OAAOL,YAAY;QACtF,OAAO;IACT;IAEA,MAAMM,cAAcN,WAAWO,cAAc;IAE7C,2FAA2F;IAC3F,kEAAkE;IAClE,EAAE;IACF,QAAQ;IACR,+GAA+G;IAC/G,4EAA4E;IAC5E,IAAID,WAAW,CAACL,eAAe,EAAE,KAAK,KAAK;QACzC,OAAO;IACT;IAEA,KAAK,MAAM,EAAEO,MAAM,EAAEC,OAAO,EAAE,IAAIP,oBAAqB;QACrD,MAAMQ,QAAQJ,YAAYI,KAAK,CAACF;QAEhC,IAAIE,SAASA,KAAK,CAAC,EAAE,CAACC,MAAM,KAAMD,CAAAA,KAAK,CAAC,EAAE,CAACE,QAAQ,CAAC,OAAOX,eAAeA,eAAe,CAAA,GAAI;YAC3F,MAAMY,eAAeb,WAAWc,eAAe;YAC/C,MAAM,CAACC,aAAaC,cAAc,GAAGhB,WAAWiB,SAAS,CAAChB;YAC1Dc,aAAaG;YACb,MAAMC,WAAWH,gBAAgB;gBAACA;mBAAkBH;aAAa,GAAGA;YACpE,IAAIJ,QAAQV,YAAYoB,UAAUT,OAAO,WAAW,OAAO;gBACzD,OAAO;YACT;QACF;IACF;IAEA,OAAO;AACT;AAEA,SAASU,gCACPrB,UAAuB,EACvBC,UAAoB,EACpBC,YAAoB,EACpBC,mBAA+D;IAE/D,MAAMC,kBAAkBJ,WAAWK,SAAS;IAE5C,IAAI,CAACb,oBAAoBY,oBAAoBJ,WAAWM,aAAa,OAAOL,YAAY;QACtF,OAAO;IACT;IAEA,MAAMM,cAAcN,WAAWO,cAAc;IAE7C,2FAA2F;IAC3F,kEAAkE;IAClE,EAAE;IACF,QAAQ;IACR,+GAA+G;IAC/G,4EAA4E;IAC5E,IAAID,WAAW,CAACL,eAAe,EAAE,KAAK,KAAK;QACzC,OAAO;IACT;IAEA,KAAK,MAAM,EAAEoB,SAAS,EAAEC,WAAW,EAAEb,OAAO,EAAE,IAAIP,oBAAqB;QACrE,IACE,AAACmB,aAAa,CAAE,CAAA,cAAcA,SAAQ,KACrCA,aAAa,cAAcA,aAAa,CAACA,UAAUE,QAAQ,EAC5D;YACA;QACF;QAEA,MAAMb,QAAQJ,YAAYI,KAAK,CAACY;QAEhC,IAAIZ,SAASA,KAAK,CAAC,EAAE,CAACC,MAAM,KAAMD,CAAAA,KAAK,CAAC,EAAE,CAACE,QAAQ,CAAC,OAAOX,eAAeA,eAAe,CAAA,GAAI;YAC3F,MAAMY,eAAeb,WAAWc,eAAe;YAC/C,MAAM,CAACC,aAAaC,cAAc,GAAGhB,WAAWiB,SAAS,CAAChB;YAC1Dc,aAAaG;YACb,MAAMC,WAAWH,gBAAgB;gBAACA;mBAAkBH;aAAa,GAAGA;YAEpE,IAAIJ,QAAQV,YAAYoB,UAAUT,OAAO,MAAM,MAAM,WAAW,OAAO;gBACrE,OAAO;YACT;QACF;IACF;IAEA,OAAO;AACT;AAEA,SAASc,yBACPxB,UAAoB,EACpBC,YAAoB,EACpBwB,qBAA4E;IAE5E,IAAInB,cAAcN,WAAWO,cAAc;IAC3C,MAAMmB,WAAWpB,WAAW,CAACL,eAAe,EAAE;IAC9C,MAAM0B,eAAeF,qBAAqB,CAACC,SAAS;IAEpD,IAAIC,gBAAgB,MAAM;QACxB,OAAO;IACT;IAEA,4DAA4D;IAC5D,oDAAoD;IACpD,IAAI1B,eAAeK,YAAYK,MAAM,EAAE;QACrCL,cAAcA,YAAYsB,KAAK,CAAC,GAAG3B;IACrC;IAEA,KAAK,MAAM4B,eAAeF,aAAc;QACtC,IAAI,CAACE,YAAYpB,OAAO,IAAI,CAACoB,YAAYrB,MAAM,EAAE;YAC/C;QACF;QACA,MAAME,QAAQJ,YAAYI,KAAK,CAACmB,YAAYrB,MAAM;QAElD,IAAIE,UAAU,MAAM;YAClB;QACF;QAEA,MAAMoB,aAAapB,MAAMqB,KAAK,IAAI;QAClC,MAAMC,WAAWF,aAAapB,KAAK,CAAC,EAAE,CAACC,MAAM;QAC7C,IAAIsB;QAEJ,IAAIH,eAAe,GAAG;YACnB,CAACG,YAAY,GAAGjC,WAAWiB,SAAS,CAACe;QACxC,OAAO;YACJ,GAAGC,YAAY,GAAGjC,WAAWiB,SAAS,CAACa,YAAYE;QACtD;QACA,IAAIC,aAAa;YACfA,YAAYC,UAAU,CAAC,GAAG;YAC1BL,YAAYpB,OAAO,CAACwB,aAAavB;QACnC;QACA,OAAO;IACT;IAEA,OAAO;AACT;AAEA,SAASyB,2BACPnC,UAAoB,EACpBC,YAAoB,EACpBmC,sBAAsF;IAEtF,MAAM9B,cAAcN,WAAWO,cAAc;IAC7C,MAAM8B,mBAAmBpC,eAAe;IACxC,MAAMqC,YAAYhC,WAAW,CAAC+B,iBAAiB;IAC/C,oEAAoE;IACpE,MAAME,WAAWH,sBAAsB,CAACE,UAAU;IAElD,IAAI,CAACC,UAAU;QACb,OAAO;IACT;IAEA,KAAK,MAAMC,WAAWD,SAAU;QAC9B,MAAM,EAAEE,GAAG,EAAE,GAAGD;QAChB,MAAME,YAAYD,IAAI9B,MAAM;QAC5B,MAAMgC,qBAAqBN,mBAAmBK,YAAY;QAE1D,0EAA0E;QAC1E,IAAIA,YAAY,GAAG;YACjB,IAAI,CAACE,iBAAiBtC,aAAaqC,oBAAoBF,KAAK,GAAGC,YAAY;gBACzE;YACF;QACF;QAEA,mDAAmD;QACnD,IAAIpC,WAAW,CAACqC,qBAAqB,EAAE,KAAK,KAAK;YAC/C;QACF;QAEA,+FAA+F;QAC/F,MAAME,oBAAoBvC,WAAW,CAAC+B,mBAAmB,EAAE;QAE3D,IACEG,QAAQM,SAAS,KAAK,SACtBD,qBACA,CAACjD,qBAAqBmD,IAAI,CAACF,oBAC3B;YACA;QACF;QAEA,MAAMG,YAAYhD;QAClB,IAAIiD,WAAWD;QACf,IAAIE,oBAAoBC,qBAAqB7C,aAAaqC,oBAAoBF;QAE9E,2DAA2D;QAC3D,+DAA+D;QAC/D,IAAIW,UAA2BH;QAE/B,MAAOC,oBAAoB,KAAME,CAAAA,UAAUA,QAAQC,kBAAkB,EAAW,EAAI;YAClF,IAAIhE,iBAAiB+D,UAAU;gBAC7B;YACF;YAEA,IAAI5D,YAAY4D,UAAU;gBACxB,MAAME,qBAAqBF,QAAQ7C,cAAc;gBACjD0C,WAAWG;gBACXF,oBAAoBC,qBAAqBG,oBAAoBA,mBAAmB3C,MAAM,EAAE8B;YAC1F;QACF;QAEA,2BAA2B;QAC3B,IAAIS,oBAAoB,GAAG;YACzB;QACF;QAEA,6CAA6C;QAC7C,IAAID,aAAaD,aAAaE,oBAAoBR,cAAcC,oBAAoB;YAClF;QACF;QAEA,4DAA4D;QAC5D,MAAMY,mBAAmBN,SAAS1C,cAAc;QAEhD,IAAI2C,oBAAoB,KAAKK,gBAAgB,CAACL,oBAAoB,EAAE,KAAKZ,WAAW;YAClF;QACF;QAEA,gGAAgG;QAChG,MAAMkB,oBAAoBD,gBAAgB,CAACL,oBAAoB,EAAE;QAEjE,IACEV,QAAQM,SAAS,KAAK,SACtBU,qBACA,CAAC5D,qBAAqBmD,IAAI,CAACS,oBAC3B;YACA;QACF;QAEA,sEAAsE;QACtE,6DAA6D;QAC7D,MAAMC,oBAAoBT,UAAUzC,cAAc;QAClD,MAAMmD,gBACJD,kBAAkB7B,KAAK,CAAC,GAAGe,sBAAsBc,kBAAkB7B,KAAK,CAACS,mBAAmB;QAC9FW,UAAUW,cAAc,CAACD;QACzB,MAAME,eAAeX,aAAaD,YAAYU,gBAAgBH;QAC9DN,SAASU,cAAc,CACrBC,aAAahC,KAAK,CAAC,GAAGsB,qBAAqBU,aAAahC,KAAK,CAACsB,oBAAoBR;QAEpF,MAAMmB,YAAYzE;QAClB,MAAM0E,gBAAgB3E;QACtBM,cAAcqE;QACd,uCAAuC;QACvC,MAAMC,YAAY1B,mBAAmBK,YAAaO,CAAAA,aAAaD,YAAY,IAAI,CAAA,IAAK;QACpFc,cAAcE,MAAM,CAACC,GAAG,CAAChB,SAASiB,KAAK,EAAEhB,mBAAmB;QAC5DY,cAAcK,KAAK,CAACF,GAAG,CAACjB,UAAUkB,KAAK,EAAEH,WAAW;QAEpD,oCAAoC;QACpC,KAAK,MAAMK,UAAU5B,QAAQ4B,MAAM,CAAE;YACnC,IAAI,CAACN,cAAcO,SAAS,CAACD,SAAS;gBACpCN,cAAcQ,UAAU,CAACF;YAC3B;QACF;QAEA,2CAA2C;QAC3CN,cAAcE,MAAM,CAACC,GAAG,CACtBH,cAAcK,KAAK,CAACI,GAAG,EACvBT,cAAcK,KAAK,CAACK,MAAM,EAC1BV,cAAcK,KAAK,CAACM,IAAI;QAG1B,6CAA6C;QAC7C,KAAK,MAAML,UAAU5B,QAAQ4B,MAAM,CAAE;YACnC,IAAIN,cAAcO,SAAS,CAACD,SAAS;gBACnCN,cAAcY,YAAY,CAACN;YAC7B;QACF;QAEA,IAAI9E,kBAAkBuE,YAAY;YAChCC,cAAcM,MAAM,GAAGP,UAAUO,MAAM;QACzC;QAEA,OAAO;IACT;IAEA,OAAO;AACT;AAEA,SAASjB,qBAAqBwB,MAAc,EAAEC,QAAgB,EAAEnC,GAAW;IACzE,MAAMC,YAAYD,IAAI9B,MAAM;IAE5B,IAAK,IAAIkE,IAAID,UAAUC,KAAKnC,WAAWmC,IAAK;QAC1C,MAAM/C,aAAa+C,IAAInC;QAEvB,IACEE,iBAAiB+B,QAAQ7C,YAAYW,KAAK,GAAGC,cAAc,iDAAiD;QAC5GiC,MAAM,CAAC7C,aAAaY,UAAU,KAAK,KACnC;YACA,OAAOZ;QACT;IACF;IAEA,OAAO,CAAC;AACV;AAEA,SAASc,iBACPkC,OAAe,EACfC,MAAc,EACdC,OAAe,EACfC,MAAc,EACdtE,MAAc;IAEd,IAAK,IAAIkE,IAAI,GAAGA,IAAIlE,QAAQkE,IAAK;QAC/B,IAAIC,OAAO,CAACC,SAASF,EAAE,KAAKG,OAAO,CAACC,SAASJ,EAAE,EAAE;YAC/C,OAAO;QACT;IACF;IAEA,OAAO;AACT;AAEA,OAAO,SAASK,0BACdC,MAAqB,EACrBxD,eAAmCjC,YAAY;IAE/C,MAAM0F,SAASvF,mBAAmB8B;IAClC,MAAM0D,kCAAkC1F,QACtCyF,OAAOE,UAAU,EACjB,CAAC,EAAE7C,GAAG,EAAE,GAAKA,GAAG,CAACA,IAAI9B,MAAM,GAAG,EAAE;IAElC,MAAM4E,iCAAiC5F,QAAQyF,OAAOI,SAAS,EAAE,CAAC,EAAEC,OAAO,EAAE,GAAKA;IAElF,KAAK,MAAM5D,eAAeF,aAAc;QACtC,MAAM8C,OAAO5C,YAAY4C,IAAI;QAC7B,IAAIA,SAAS,aAAaA,SAAS,gBAAgBA,SAAS,qBAAqB;YAC/E,MAAMiB,eAAe7D,YAAY6D,YAAY;YAC7C,KAAK,MAAMC,QAAQD,aAAc;gBAC/B,IAAI,CAACP,OAAOS,OAAO,CAACD,OAAO;oBACzB,MAAM,IAAIE,MACR,2HACEF,KAAKG,OAAO;gBAElB;YACF;QACF;IACF;IAEA,MAAMC,aAAa,CAAChG,YAAyBC,YAAsBC;QACjE,IAAIH,uBAAuBC,YAAYC,YAAYC,cAAcmF,OAAOY,OAAO,GAAG;YAChF;QACF;QAEA,IACE5E,gCAAgCrB,YAAYC,YAAYC,cAAcmF,OAAOa,gBAAgB,GAC7F;YACA;QACF;QAEA,IAAIzE,yBAAyBxB,YAAYC,cAAcsF,iCAAiC;YACtF;QACF;QAEApD,2BAA2BnC,YAAYC,cAAcoF;IACvD;IAEA,OAAOF,OAAOe,sBAAsB,CAAC,CAAC,EAAEC,WAAW,EAAEC,WAAW,EAAEC,eAAe,EAAEC,IAAI,EAAE;QACvF,kFAAkF;QAClF,IAAIA,KAAKC,GAAG,CAAC,oBAAoBD,KAAKC,GAAG,CAAC,aAAa;YACrD;QACF;QAEA,8FAA8F;QAC9F,IAAIpB,OAAOqB,WAAW,IAAI;YACxB;QACF;QAEA,MAAM3C,YAAYuC,YAAYK,IAAI,CAACrH;QACnC,MAAMsH,gBAAgBL,gBAAgBI,IAAI,CAACrH;QAE3C,qFAAqF;QACrF,4CAA4C;QAC5C,IACE,CAACE,kBAAkBoH,kBACnB,CAACpH,kBAAkBuE,cACnB,CAACA,UAAU8C,WAAW,MACtB9C,UAAU+C,EAAE,CAACF,gBACb;YACA;QACF;QAEA,MAAMG,YAAYhD,UAAUG,MAAM,CAACO,GAAG;QACtC,MAAMtE,eAAe4D,UAAUG,MAAM,CAACQ,MAAM;QAE5C,MAAMxE,aAAaoG,YAAYU,QAAQ,CAACC,GAAG,CAACF;QAE5C,IACE,CAACrH,YAAYQ,eACb,CAACmG,YAAYI,GAAG,CAACM,cAChB5G,iBAAiB,KAAKA,eAAeyG,cAAc1C,MAAM,CAACQ,MAAM,GAAG,GACpE;YACA;QACF;QAEAW,OAAO6B,MAAM,CAAC;YACZ,wCAAwC;YACxC,IAAIhH,WAAWqE,SAAS,CAAC,SAAS;gBAChC;YACF;YAEA,MAAMtE,aAAaC,WAAWI,SAAS;YAEvC,IAAIL,eAAe,MAAM;gBACvB;YACF;YAEAgG,WAAWhG,YAAYC,YAAY6D,UAAUG,MAAM,CAACQ,MAAM;QAC5D;IACF;AACF"}
|
|
1
|
+
{"version":3,"sources":["../../../../src/packages/@lexical/markdown/MarkdownShortcuts.ts"],"sourcesContent":["/**\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n *\n */\n\nimport type { ElementNode, LexicalEditor, TextNode } from 'lexical'\n\nimport {\n $createRangeSelection,\n $getSelection,\n $isLineBreakNode,\n $isRangeSelection,\n $isRootOrShadowRoot,\n $isTextNode,\n $setSelection,\n} from 'lexical'\n\nimport type {\n ElementTransformer,\n MultilineElementTransformer,\n TextFormatTransformer,\n TextMatchTransformer,\n Transformer,\n} from './MarkdownTransformers.js'\n\nimport { TRANSFORMERS } from './index.js'\nimport { indexBy, PUNCTUATION_OR_SPACE, transformersByType } from './utils.js'\n\nfunction runElementTransformers(\n parentNode: ElementNode,\n anchorNode: TextNode,\n anchorOffset: number,\n elementTransformers: ReadonlyArray<ElementTransformer>,\n): boolean {\n const grandParentNode = parentNode.getParent()\n\n if (!$isRootOrShadowRoot(grandParentNode) || parentNode.getFirstChild() !== anchorNode) {\n return false\n }\n\n const textContent = anchorNode.getTextContent()\n\n // Checking for anchorOffset position to prevent any checks for cases when caret is too far\n // from a line start to be a part of block-level markdown trigger.\n //\n // TODO:\n // Can have a quick check if caret is close enough to the beginning of the string (e.g. offset less than 10-20)\n // since otherwise it won't be a markdown shortcut, but tables are exception\n if (textContent[anchorOffset - 1] !== ' ') {\n return false\n }\n\n for (const { regExp, replace } of elementTransformers) {\n const match = textContent.match(regExp)\n\n if (match && match[0].length === (match[0].endsWith(' ') ? anchorOffset : anchorOffset - 1)) {\n const nextSiblings = anchorNode.getNextSiblings()\n const [leadingNode, remainderNode] = anchorNode.splitText(anchorOffset)\n leadingNode?.remove()\n const siblings = remainderNode ? [remainderNode, ...nextSiblings] : nextSiblings\n if (replace(parentNode, siblings, match, false) !== false) {\n return true\n }\n }\n }\n\n return false\n}\n\nfunction runMultilineElementTransformers(\n parentNode: ElementNode,\n anchorNode: TextNode,\n anchorOffset: number,\n elementTransformers: ReadonlyArray<MultilineElementTransformer>,\n): boolean {\n const grandParentNode = parentNode.getParent()\n\n if (!$isRootOrShadowRoot(grandParentNode) || parentNode.getFirstChild() !== anchorNode) {\n return false\n }\n\n const textContent = anchorNode.getTextContent()\n\n // Checking for anchorOffset position to prevent any checks for cases when caret is too far\n // from a line start to be a part of block-level markdown trigger.\n //\n // TODO:\n // Can have a quick check if caret is close enough to the beginning of the string (e.g. offset less than 10-20)\n // since otherwise it won't be a markdown shortcut, but tables are exception\n if (textContent[anchorOffset - 1] !== ' ') {\n return false\n }\n\n for (const { regExpEnd, regExpStart, replace } of elementTransformers) {\n if (\n (regExpEnd && !('optional' in regExpEnd)) ||\n (regExpEnd && 'optional' in regExpEnd && !regExpEnd.optional)\n ) {\n continue\n }\n\n const match = textContent.match(regExpStart)\n\n if (match && match[0].length === (match[0].endsWith(' ') ? anchorOffset : anchorOffset - 1)) {\n const nextSiblings = anchorNode.getNextSiblings()\n const [leadingNode, remainderNode] = anchorNode.splitText(anchorOffset)\n leadingNode?.remove()\n const siblings = remainderNode ? [remainderNode, ...nextSiblings] : nextSiblings\n\n if (replace(parentNode, siblings, match, null, null, false) !== false) {\n return true\n }\n }\n }\n\n return false\n}\n\nfunction runTextMatchTransformers(\n anchorNode: TextNode,\n anchorOffset: number,\n transformersByTrigger: Readonly<Record<string, Array<TextMatchTransformer>>>,\n): boolean {\n let textContent = anchorNode.getTextContent()\n const lastChar = textContent[anchorOffset - 1]!\n const transformers = transformersByTrigger[lastChar]\n\n if (transformers == null) {\n return false\n }\n\n // If typing in the middle of content, remove the tail to do\n // reg exp match up to a string end (caret position)\n if (anchorOffset < textContent.length) {\n textContent = textContent.slice(0, anchorOffset)\n }\n\n for (const transformer of transformers) {\n if (!transformer.replace || !transformer.regExp) {\n continue\n }\n const match = textContent.match(transformer.regExp)\n\n if (match === null) {\n continue\n }\n\n const startIndex = match.index || 0\n const endIndex = startIndex + match[0].length\n let replaceNode\n\n if (startIndex === 0) {\n ;[replaceNode] = anchorNode.splitText(endIndex)\n } else {\n ;[, replaceNode] = anchorNode.splitText(startIndex, endIndex)\n }\n if (replaceNode) {\n replaceNode.selectNext(0, 0)\n transformer.replace(replaceNode, match)\n }\n return true\n }\n\n return false\n}\n\nfunction $runTextFormatTransformers(\n anchorNode: TextNode,\n anchorOffset: number,\n textFormatTransformers: Readonly<Record<string, ReadonlyArray<TextFormatTransformer>>>,\n): boolean {\n const textContent = anchorNode.getTextContent()\n const closeTagEndIndex = anchorOffset - 1\n const closeChar = textContent[closeTagEndIndex]!\n // Quick check if we're possibly at the end of inline markdown style\n const matchers = textFormatTransformers[closeChar]\n\n if (!matchers) {\n return false\n }\n\n for (const matcher of matchers) {\n const { tag } = matcher\n const tagLength = tag.length\n const closeTagStartIndex = closeTagEndIndex - tagLength + 1\n\n // If tag is not single char check if rest of it matches with text content\n if (tagLength > 1) {\n if (!isEqualSubString(textContent, closeTagStartIndex, tag, 0, tagLength)) {\n continue\n }\n }\n\n // Space before closing tag cancels inline markdown\n if (textContent[closeTagStartIndex - 1] === ' ') {\n continue\n }\n\n // Some tags can not be used within words, hence should have newline/space/punctuation after it\n const afterCloseTagChar = textContent[closeTagEndIndex + 1]\n\n if (\n matcher.intraword === false &&\n afterCloseTagChar &&\n !PUNCTUATION_OR_SPACE.test(afterCloseTagChar)\n ) {\n continue\n }\n\n const closeNode = anchorNode\n let openNode = closeNode\n let openTagStartIndex = getOpenTagStartIndex(textContent, closeTagStartIndex, tag)\n\n // Go through text node siblings and search for opening tag\n // if haven't found it within the same text node as closing tag\n let sibling: null | TextNode = openNode\n\n while (openTagStartIndex < 0 && (sibling = sibling.getPreviousSibling<TextNode>())) {\n if ($isLineBreakNode(sibling)) {\n break\n }\n\n if ($isTextNode(sibling)) {\n const siblingTextContent = sibling.getTextContent()\n openNode = sibling\n openTagStartIndex = getOpenTagStartIndex(siblingTextContent, siblingTextContent.length, tag)\n }\n }\n\n // Opening tag is not found\n if (openTagStartIndex < 0) {\n continue\n }\n\n // No content between opening and closing tag\n if (openNode === closeNode && openTagStartIndex + tagLength === closeTagStartIndex) {\n continue\n }\n\n // Checking longer tags for repeating chars (e.g. *** vs **)\n const prevOpenNodeText = openNode.getTextContent()\n\n if (openTagStartIndex > 0 && prevOpenNodeText[openTagStartIndex - 1] === closeChar) {\n continue\n }\n\n // Some tags can not be used within words, hence should have newline/space/punctuation before it\n const beforeOpenTagChar = prevOpenNodeText[openTagStartIndex - 1]\n\n if (\n matcher.intraword === false &&\n beforeOpenTagChar &&\n !PUNCTUATION_OR_SPACE.test(beforeOpenTagChar)\n ) {\n continue\n }\n\n // Clean text from opening and closing tags (starting from closing tag\n // to prevent any offset shifts if we start from opening one)\n const prevCloseNodeText = closeNode.getTextContent()\n const closeNodeText =\n prevCloseNodeText.slice(0, closeTagStartIndex) + prevCloseNodeText.slice(closeTagEndIndex + 1)\n closeNode.setTextContent(closeNodeText)\n const openNodeText = openNode === closeNode ? closeNodeText : prevOpenNodeText\n openNode.setTextContent(\n openNodeText.slice(0, openTagStartIndex) + openNodeText.slice(openTagStartIndex + tagLength),\n )\n const selection = $getSelection()\n const nextSelection = $createRangeSelection()\n $setSelection(nextSelection)\n // Adjust offset based on deleted chars\n const newOffset = closeTagEndIndex - tagLength * (openNode === closeNode ? 2 : 1) + 1\n nextSelection.anchor.set(openNode.__key, openTagStartIndex, 'text')\n nextSelection.focus.set(closeNode.__key, newOffset, 'text')\n\n // Apply formatting to selected text\n for (const format of matcher.format) {\n if (!nextSelection.hasFormat(format)) {\n nextSelection.formatText(format)\n }\n }\n\n // Collapse selection up to the focus point\n nextSelection.anchor.set(\n nextSelection.focus.key,\n nextSelection.focus.offset,\n nextSelection.focus.type,\n )\n\n // Remove formatting from collapsed selection\n for (const format of matcher.format) {\n if (nextSelection.hasFormat(format)) {\n nextSelection.toggleFormat(format)\n }\n }\n\n if ($isRangeSelection(selection)) {\n nextSelection.format = selection.format\n }\n\n return true\n }\n\n return false\n}\n\nfunction getOpenTagStartIndex(string: string, maxIndex: number, tag: string): number {\n const tagLength = tag.length\n\n for (let i = maxIndex; i >= tagLength; i--) {\n const startIndex = i - tagLength\n\n if (\n isEqualSubString(string, startIndex, tag, 0, tagLength) && // Space after opening tag cancels transformation\n string[startIndex + tagLength] !== ' '\n ) {\n return startIndex\n }\n }\n\n return -1\n}\n\nfunction isEqualSubString(\n stringA: string,\n aStart: number,\n stringB: string,\n bStart: number,\n length: number,\n): boolean {\n for (let i = 0; i < length; i++) {\n if (stringA[aStart + i] !== stringB[bStart + i]) {\n return false\n }\n }\n\n return true\n}\n\nexport function registerMarkdownShortcuts(\n editor: LexicalEditor,\n transformers: Array<Transformer> = TRANSFORMERS,\n): () => void {\n const byType = transformersByType(transformers)\n const textFormatTransformersByTrigger = indexBy(\n byType.textFormat,\n ({ tag }) => tag[tag.length - 1],\n )\n const textMatchTransformersByTrigger = indexBy(byType.textMatch, ({ trigger }) => trigger)\n\n for (const transformer of transformers) {\n const type = transformer.type\n if (type === 'element' || type === 'text-match' || type === 'multiline-element') {\n const dependencies = transformer.dependencies\n for (const node of dependencies) {\n if (!editor.hasNode(node)) {\n throw new Error(\n 'MarkdownShortcuts: missing dependency %s for transformer. Ensure node dependency is included in editor initial config.' +\n node.getType(),\n )\n }\n }\n }\n }\n\n const $transform = (parentNode: ElementNode, anchorNode: TextNode, anchorOffset: number) => {\n if (runElementTransformers(parentNode, anchorNode, anchorOffset, byType.element)) {\n return\n }\n\n if (\n runMultilineElementTransformers(parentNode, anchorNode, anchorOffset, byType.multilineElement)\n ) {\n return\n }\n\n if (runTextMatchTransformers(anchorNode, anchorOffset, textMatchTransformersByTrigger)) {\n return\n }\n\n $runTextFormatTransformers(anchorNode, anchorOffset, textFormatTransformersByTrigger)\n }\n\n return editor.registerUpdateListener(({ dirtyLeaves, editorState, prevEditorState, tags }) => {\n // Ignore updates from collaboration and undo/redo (as changes already calculated)\n if (tags.has('collaboration') || tags.has('historic')) {\n return\n }\n\n // If editor is still composing (i.e. backticks) we must wait before the user confirms the key\n if (editor.isComposing()) {\n return\n }\n\n const selection = editorState.read($getSelection)\n const prevSelection = prevEditorState.read($getSelection)\n\n // We expect selection to be a collapsed range and not match previous one (as we want\n // to trigger transforms only as user types)\n if (\n !$isRangeSelection(prevSelection) ||\n !$isRangeSelection(selection) ||\n !selection.isCollapsed() ||\n selection.is(prevSelection)\n ) {\n return\n }\n\n const anchorKey = selection.anchor.key\n const anchorOffset = selection.anchor.offset\n\n const anchorNode = editorState._nodeMap.get(anchorKey)\n\n if (\n !$isTextNode(anchorNode) ||\n !dirtyLeaves.has(anchorKey) ||\n (anchorOffset !== 1 && anchorOffset > prevSelection.anchor.offset + 1)\n ) {\n return\n }\n\n editor.update(() => {\n // Markdown is not available inside code\n if (anchorNode.hasFormat('code')) {\n return\n }\n\n const parentNode = anchorNode.getParent()\n\n if (parentNode === null) {\n return\n }\n\n $transform(parentNode, anchorNode, selection.anchor.offset)\n })\n })\n}\n"],"names":["$createRangeSelection","$getSelection","$isLineBreakNode","$isRangeSelection","$isRootOrShadowRoot","$isTextNode","$setSelection","TRANSFORMERS","indexBy","PUNCTUATION_OR_SPACE","transformersByType","runElementTransformers","parentNode","anchorNode","anchorOffset","elementTransformers","grandParentNode","getParent","getFirstChild","textContent","getTextContent","regExp","replace","match","length","endsWith","nextSiblings","getNextSiblings","leadingNode","remainderNode","splitText","remove","siblings","runMultilineElementTransformers","regExpEnd","regExpStart","optional","runTextMatchTransformers","transformersByTrigger","lastChar","transformers","slice","transformer","startIndex","index","endIndex","replaceNode","selectNext","$runTextFormatTransformers","textFormatTransformers","closeTagEndIndex","closeChar","matchers","matcher","tag","tagLength","closeTagStartIndex","isEqualSubString","afterCloseTagChar","intraword","test","closeNode","openNode","openTagStartIndex","getOpenTagStartIndex","sibling","getPreviousSibling","siblingTextContent","prevOpenNodeText","beforeOpenTagChar","prevCloseNodeText","closeNodeText","setTextContent","openNodeText","selection","nextSelection","newOffset","anchor","set","__key","focus","format","hasFormat","formatText","key","offset","type","toggleFormat","string","maxIndex","i","stringA","aStart","stringB","bStart","registerMarkdownShortcuts","editor","byType","textFormatTransformersByTrigger","textFormat","textMatchTransformersByTrigger","textMatch","trigger","dependencies","node","hasNode","Error","getType","$transform","element","multilineElement","registerUpdateListener","dirtyLeaves","editorState","prevEditorState","tags","has","isComposing","read","prevSelection","isCollapsed","is","anchorKey","_nodeMap","get","update"],"mappings":"AAAA;;;;;;CAMC,GAID,SACEA,qBAAqB,EACrBC,aAAa,EACbC,gBAAgB,EAChBC,iBAAiB,EACjBC,mBAAmB,EACnBC,WAAW,EACXC,aAAa,QACR,UAAS;AAUhB,SAASC,YAAY,QAAQ,aAAY;AACzC,SAASC,OAAO,EAAEC,oBAAoB,EAAEC,kBAAkB,QAAQ,aAAY;AAE9E,SAASC,uBACPC,UAAuB,EACvBC,UAAoB,EACpBC,YAAoB,EACpBC,mBAAsD;IAEtD,MAAMC,kBAAkBJ,WAAWK,SAAS;IAE5C,IAAI,CAACb,oBAAoBY,oBAAoBJ,WAAWM,aAAa,OAAOL,YAAY;QACtF,OAAO;IACT;IAEA,MAAMM,cAAcN,WAAWO,cAAc;IAE7C,2FAA2F;IAC3F,kEAAkE;IAClE,EAAE;IACF,QAAQ;IACR,+GAA+G;IAC/G,4EAA4E;IAC5E,IAAID,WAAW,CAACL,eAAe,EAAE,KAAK,KAAK;QACzC,OAAO;IACT;IAEA,KAAK,MAAM,EAAEO,MAAM,EAAEC,OAAO,EAAE,IAAIP,oBAAqB;QACrD,MAAMQ,QAAQJ,YAAYI,KAAK,CAACF;QAEhC,IAAIE,SAASA,KAAK,CAAC,EAAE,CAACC,MAAM,KAAMD,CAAAA,KAAK,CAAC,EAAE,CAACE,QAAQ,CAAC,OAAOX,eAAeA,eAAe,CAAA,GAAI;YAC3F,MAAMY,eAAeb,WAAWc,eAAe;YAC/C,MAAM,CAACC,aAAaC,cAAc,GAAGhB,WAAWiB,SAAS,CAAChB;YAC1Dc,aAAaG;YACb,MAAMC,WAAWH,gBAAgB;gBAACA;mBAAkBH;aAAa,GAAGA;YACpE,IAAIJ,QAAQV,YAAYoB,UAAUT,OAAO,WAAW,OAAO;gBACzD,OAAO;YACT;QACF;IACF;IAEA,OAAO;AACT;AAEA,SAASU,gCACPrB,UAAuB,EACvBC,UAAoB,EACpBC,YAAoB,EACpBC,mBAA+D;IAE/D,MAAMC,kBAAkBJ,WAAWK,SAAS;IAE5C,IAAI,CAACb,oBAAoBY,oBAAoBJ,WAAWM,aAAa,OAAOL,YAAY;QACtF,OAAO;IACT;IAEA,MAAMM,cAAcN,WAAWO,cAAc;IAE7C,2FAA2F;IAC3F,kEAAkE;IAClE,EAAE;IACF,QAAQ;IACR,+GAA+G;IAC/G,4EAA4E;IAC5E,IAAID,WAAW,CAACL,eAAe,EAAE,KAAK,KAAK;QACzC,OAAO;IACT;IAEA,KAAK,MAAM,EAAEoB,SAAS,EAAEC,WAAW,EAAEb,OAAO,EAAE,IAAIP,oBAAqB;QACrE,IACE,AAACmB,aAAa,CAAE,CAAA,cAAcA,SAAQ,KACrCA,aAAa,cAAcA,aAAa,CAACA,UAAUE,QAAQ,EAC5D;YACA;QACF;QAEA,MAAMb,QAAQJ,YAAYI,KAAK,CAACY;QAEhC,IAAIZ,SAASA,KAAK,CAAC,EAAE,CAACC,MAAM,KAAMD,CAAAA,KAAK,CAAC,EAAE,CAACE,QAAQ,CAAC,OAAOX,eAAeA,eAAe,CAAA,GAAI;YAC3F,MAAMY,eAAeb,WAAWc,eAAe;YAC/C,MAAM,CAACC,aAAaC,cAAc,GAAGhB,WAAWiB,SAAS,CAAChB;YAC1Dc,aAAaG;YACb,MAAMC,WAAWH,gBAAgB;gBAACA;mBAAkBH;aAAa,GAAGA;YAEpE,IAAIJ,QAAQV,YAAYoB,UAAUT,OAAO,MAAM,MAAM,WAAW,OAAO;gBACrE,OAAO;YACT;QACF;IACF;IAEA,OAAO;AACT;AAEA,SAASc,yBACPxB,UAAoB,EACpBC,YAAoB,EACpBwB,qBAA4E;IAE5E,IAAInB,cAAcN,WAAWO,cAAc;IAC3C,MAAMmB,WAAWpB,WAAW,CAACL,eAAe,EAAE;IAC9C,MAAM0B,eAAeF,qBAAqB,CAACC,SAAS;IAEpD,IAAIC,gBAAgB,MAAM;QACxB,OAAO;IACT;IAEA,4DAA4D;IAC5D,oDAAoD;IACpD,IAAI1B,eAAeK,YAAYK,MAAM,EAAE;QACrCL,cAAcA,YAAYsB,KAAK,CAAC,GAAG3B;IACrC;IAEA,KAAK,MAAM4B,eAAeF,aAAc;QACtC,IAAI,CAACE,YAAYpB,OAAO,IAAI,CAACoB,YAAYrB,MAAM,EAAE;YAC/C;QACF;QACA,MAAME,QAAQJ,YAAYI,KAAK,CAACmB,YAAYrB,MAAM;QAElD,IAAIE,UAAU,MAAM;YAClB;QACF;QAEA,MAAMoB,aAAapB,MAAMqB,KAAK,IAAI;QAClC,MAAMC,WAAWF,aAAapB,KAAK,CAAC,EAAE,CAACC,MAAM;QAC7C,IAAIsB;QAEJ,IAAIH,eAAe,GAAG;;YACnB,CAACG,YAAY,GAAGjC,WAAWiB,SAAS,CAACe;QACxC,OAAO;;YACJ,GAAGC,YAAY,GAAGjC,WAAWiB,SAAS,CAACa,YAAYE;QACtD;QACA,IAAIC,aAAa;YACfA,YAAYC,UAAU,CAAC,GAAG;YAC1BL,YAAYpB,OAAO,CAACwB,aAAavB;QACnC;QACA,OAAO;IACT;IAEA,OAAO;AACT;AAEA,SAASyB,2BACPnC,UAAoB,EACpBC,YAAoB,EACpBmC,sBAAsF;IAEtF,MAAM9B,cAAcN,WAAWO,cAAc;IAC7C,MAAM8B,mBAAmBpC,eAAe;IACxC,MAAMqC,YAAYhC,WAAW,CAAC+B,iBAAiB;IAC/C,oEAAoE;IACpE,MAAME,WAAWH,sBAAsB,CAACE,UAAU;IAElD,IAAI,CAACC,UAAU;QACb,OAAO;IACT;IAEA,KAAK,MAAMC,WAAWD,SAAU;QAC9B,MAAM,EAAEE,GAAG,EAAE,GAAGD;QAChB,MAAME,YAAYD,IAAI9B,MAAM;QAC5B,MAAMgC,qBAAqBN,mBAAmBK,YAAY;QAE1D,0EAA0E;QAC1E,IAAIA,YAAY,GAAG;YACjB,IAAI,CAACE,iBAAiBtC,aAAaqC,oBAAoBF,KAAK,GAAGC,YAAY;gBACzE;YACF;QACF;QAEA,mDAAmD;QACnD,IAAIpC,WAAW,CAACqC,qBAAqB,EAAE,KAAK,KAAK;YAC/C;QACF;QAEA,+FAA+F;QAC/F,MAAME,oBAAoBvC,WAAW,CAAC+B,mBAAmB,EAAE;QAE3D,IACEG,QAAQM,SAAS,KAAK,SACtBD,qBACA,CAACjD,qBAAqBmD,IAAI,CAACF,oBAC3B;YACA;QACF;QAEA,MAAMG,YAAYhD;QAClB,IAAIiD,WAAWD;QACf,IAAIE,oBAAoBC,qBAAqB7C,aAAaqC,oBAAoBF;QAE9E,2DAA2D;QAC3D,+DAA+D;QAC/D,IAAIW,UAA2BH;QAE/B,MAAOC,oBAAoB,KAAME,CAAAA,UAAUA,QAAQC,kBAAkB,EAAW,EAAI;YAClF,IAAIhE,iBAAiB+D,UAAU;gBAC7B;YACF;YAEA,IAAI5D,YAAY4D,UAAU;gBACxB,MAAME,qBAAqBF,QAAQ7C,cAAc;gBACjD0C,WAAWG;gBACXF,oBAAoBC,qBAAqBG,oBAAoBA,mBAAmB3C,MAAM,EAAE8B;YAC1F;QACF;QAEA,2BAA2B;QAC3B,IAAIS,oBAAoB,GAAG;YACzB;QACF;QAEA,6CAA6C;QAC7C,IAAID,aAAaD,aAAaE,oBAAoBR,cAAcC,oBAAoB;YAClF;QACF;QAEA,4DAA4D;QAC5D,MAAMY,mBAAmBN,SAAS1C,cAAc;QAEhD,IAAI2C,oBAAoB,KAAKK,gBAAgB,CAACL,oBAAoB,EAAE,KAAKZ,WAAW;YAClF;QACF;QAEA,gGAAgG;QAChG,MAAMkB,oBAAoBD,gBAAgB,CAACL,oBAAoB,EAAE;QAEjE,IACEV,QAAQM,SAAS,KAAK,SACtBU,qBACA,CAAC5D,qBAAqBmD,IAAI,CAACS,oBAC3B;YACA;QACF;QAEA,sEAAsE;QACtE,6DAA6D;QAC7D,MAAMC,oBAAoBT,UAAUzC,cAAc;QAClD,MAAMmD,gBACJD,kBAAkB7B,KAAK,CAAC,GAAGe,sBAAsBc,kBAAkB7B,KAAK,CAACS,mBAAmB;QAC9FW,UAAUW,cAAc,CAACD;QACzB,MAAME,eAAeX,aAAaD,YAAYU,gBAAgBH;QAC9DN,SAASU,cAAc,CACrBC,aAAahC,KAAK,CAAC,GAAGsB,qBAAqBU,aAAahC,KAAK,CAACsB,oBAAoBR;QAEpF,MAAMmB,YAAYzE;QAClB,MAAM0E,gBAAgB3E;QACtBM,cAAcqE;QACd,uCAAuC;QACvC,MAAMC,YAAY1B,mBAAmBK,YAAaO,CAAAA,aAAaD,YAAY,IAAI,CAAA,IAAK;QACpFc,cAAcE,MAAM,CAACC,GAAG,CAAChB,SAASiB,KAAK,EAAEhB,mBAAmB;QAC5DY,cAAcK,KAAK,CAACF,GAAG,CAACjB,UAAUkB,KAAK,EAAEH,WAAW;QAEpD,oCAAoC;QACpC,KAAK,MAAMK,UAAU5B,QAAQ4B,MAAM,CAAE;YACnC,IAAI,CAACN,cAAcO,SAAS,CAACD,SAAS;gBACpCN,cAAcQ,UAAU,CAACF;YAC3B;QACF;QAEA,2CAA2C;QAC3CN,cAAcE,MAAM,CAACC,GAAG,CACtBH,cAAcK,KAAK,CAACI,GAAG,EACvBT,cAAcK,KAAK,CAACK,MAAM,EAC1BV,cAAcK,KAAK,CAACM,IAAI;QAG1B,6CAA6C;QAC7C,KAAK,MAAML,UAAU5B,QAAQ4B,MAAM,CAAE;YACnC,IAAIN,cAAcO,SAAS,CAACD,SAAS;gBACnCN,cAAcY,YAAY,CAACN;YAC7B;QACF;QAEA,IAAI9E,kBAAkBuE,YAAY;YAChCC,cAAcM,MAAM,GAAGP,UAAUO,MAAM;QACzC;QAEA,OAAO;IACT;IAEA,OAAO;AACT;AAEA,SAASjB,qBAAqBwB,MAAc,EAAEC,QAAgB,EAAEnC,GAAW;IACzE,MAAMC,YAAYD,IAAI9B,MAAM;IAE5B,IAAK,IAAIkE,IAAID,UAAUC,KAAKnC,WAAWmC,IAAK;QAC1C,MAAM/C,aAAa+C,IAAInC;QAEvB,IACEE,iBAAiB+B,QAAQ7C,YAAYW,KAAK,GAAGC,cAAc,iDAAiD;QAC5GiC,MAAM,CAAC7C,aAAaY,UAAU,KAAK,KACnC;YACA,OAAOZ;QACT;IACF;IAEA,OAAO,CAAC;AACV;AAEA,SAASc,iBACPkC,OAAe,EACfC,MAAc,EACdC,OAAe,EACfC,MAAc,EACdtE,MAAc;IAEd,IAAK,IAAIkE,IAAI,GAAGA,IAAIlE,QAAQkE,IAAK;QAC/B,IAAIC,OAAO,CAACC,SAASF,EAAE,KAAKG,OAAO,CAACC,SAASJ,EAAE,EAAE;YAC/C,OAAO;QACT;IACF;IAEA,OAAO;AACT;AAEA,OAAO,SAASK,0BACdC,MAAqB,EACrBxD,eAAmCjC,YAAY;IAE/C,MAAM0F,SAASvF,mBAAmB8B;IAClC,MAAM0D,kCAAkC1F,QACtCyF,OAAOE,UAAU,EACjB,CAAC,EAAE7C,GAAG,EAAE,GAAKA,GAAG,CAACA,IAAI9B,MAAM,GAAG,EAAE;IAElC,MAAM4E,iCAAiC5F,QAAQyF,OAAOI,SAAS,EAAE,CAAC,EAAEC,OAAO,EAAE,GAAKA;IAElF,KAAK,MAAM5D,eAAeF,aAAc;QACtC,MAAM8C,OAAO5C,YAAY4C,IAAI;QAC7B,IAAIA,SAAS,aAAaA,SAAS,gBAAgBA,SAAS,qBAAqB;YAC/E,MAAMiB,eAAe7D,YAAY6D,YAAY;YAC7C,KAAK,MAAMC,QAAQD,aAAc;gBAC/B,IAAI,CAACP,OAAOS,OAAO,CAACD,OAAO;oBACzB,MAAM,IAAIE,MACR,2HACEF,KAAKG,OAAO;gBAElB;YACF;QACF;IACF;IAEA,MAAMC,aAAa,CAAChG,YAAyBC,YAAsBC;QACjE,IAAIH,uBAAuBC,YAAYC,YAAYC,cAAcmF,OAAOY,OAAO,GAAG;YAChF;QACF;QAEA,IACE5E,gCAAgCrB,YAAYC,YAAYC,cAAcmF,OAAOa,gBAAgB,GAC7F;YACA;QACF;QAEA,IAAIzE,yBAAyBxB,YAAYC,cAAcsF,iCAAiC;YACtF;QACF;QAEApD,2BAA2BnC,YAAYC,cAAcoF;IACvD;IAEA,OAAOF,OAAOe,sBAAsB,CAAC,CAAC,EAAEC,WAAW,EAAEC,WAAW,EAAEC,eAAe,EAAEC,IAAI,EAAE;QACvF,kFAAkF;QAClF,IAAIA,KAAKC,GAAG,CAAC,oBAAoBD,KAAKC,GAAG,CAAC,aAAa;YACrD;QACF;QAEA,8FAA8F;QAC9F,IAAIpB,OAAOqB,WAAW,IAAI;YACxB;QACF;QAEA,MAAM3C,YAAYuC,YAAYK,IAAI,CAACrH;QACnC,MAAMsH,gBAAgBL,gBAAgBI,IAAI,CAACrH;QAE3C,qFAAqF;QACrF,4CAA4C;QAC5C,IACE,CAACE,kBAAkBoH,kBACnB,CAACpH,kBAAkBuE,cACnB,CAACA,UAAU8C,WAAW,MACtB9C,UAAU+C,EAAE,CAACF,gBACb;YACA;QACF;QAEA,MAAMG,YAAYhD,UAAUG,MAAM,CAACO,GAAG;QACtC,MAAMtE,eAAe4D,UAAUG,MAAM,CAACQ,MAAM;QAE5C,MAAMxE,aAAaoG,YAAYU,QAAQ,CAACC,GAAG,CAACF;QAE5C,IACE,CAACrH,YAAYQ,eACb,CAACmG,YAAYI,GAAG,CAACM,cAChB5G,iBAAiB,KAAKA,eAAeyG,cAAc1C,MAAM,CAACQ,MAAM,GAAG,GACpE;YACA;QACF;QAEAW,OAAO6B,MAAM,CAAC;YACZ,wCAAwC;YACxC,IAAIhH,WAAWqE,SAAS,CAAC,SAAS;gBAChC;YACF;YAEA,MAAMtE,aAAaC,WAAWI,SAAS;YAEvC,IAAIL,eAAe,MAAM;gBACvB;YACF;YAEAgG,WAAWhG,YAAYC,YAAY6D,UAAUG,MAAM,CAACQ,MAAM;QAC5D;IACF;AACF"}
|
|
@@ -69,8 +69,10 @@ export function importTextFormatTransformer(textNode, startIndex, endIndex, tran
|
|
|
69
69
|
transformedNode = textNode;
|
|
70
70
|
} else {
|
|
71
71
|
if (startIndex === 0) {
|
|
72
|
+
;
|
|
72
73
|
[transformedNode, nodeAfter] = textNode.splitText(endIndex);
|
|
73
74
|
} else {
|
|
75
|
+
;
|
|
74
76
|
[nodeBefore, transformedNode, nodeAfter] = textNode.splitText(startIndex, endIndex);
|
|
75
77
|
}
|
|
76
78
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../src/packages/@lexical/markdown/importTextFormatTransformer.ts"],"sourcesContent":["/**\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n *\n */\n\nimport type { TextNode } from 'lexical'\n\nimport type { TextFormatTransformersIndex } from './MarkdownImport.js'\nimport type { TextFormatTransformer } from './MarkdownTransformers.js'\n\nimport { PUNCTUATION_OR_SPACE } from './utils.js'\n\nexport function findOutermostTextFormatTransformer(\n textNode: TextNode,\n textFormatTransformersIndex: TextFormatTransformersIndex,\n): {\n endIndex: number\n match: RegExpMatchArray\n startIndex: number\n transformer: TextFormatTransformer\n} | null {\n const textContent = textNode.getTextContent()\n const match = findOutermostMatch(textContent, textFormatTransformersIndex)\n\n if (!match) {\n return null\n }\n\n const textFormatMatchStart: number = match.index || 0\n const textFormatMatchEnd = textFormatMatchStart + match[0].length\n\n // @ts-expect-error - vestiges of when tsconfig was not strict. Feel free to improve\n const transformer: TextFormatTransformer = textFormatTransformersIndex.transformersByTag[match[1]]\n\n return {\n endIndex: textFormatMatchEnd,\n match,\n startIndex: textFormatMatchStart,\n transformer,\n }\n}\n\n// Finds first \"<tag>content<tag>\" match that is not nested into another tag\nfunction findOutermostMatch(\n textContent: string,\n textTransformersIndex: TextFormatTransformersIndex,\n): null | RegExpMatchArray {\n const openTagsMatch = textContent.match(textTransformersIndex.openTagsRegExp)\n\n if (openTagsMatch == null) {\n return null\n }\n\n for (const match of openTagsMatch) {\n // Open tags reg exp might capture leading space so removing it\n // before using match to find transformer\n const tag = match.replace(/^\\s/, '')\n const fullMatchRegExp = textTransformersIndex.fullMatchRegExpByTag[tag]\n if (fullMatchRegExp == null) {\n continue\n }\n\n const fullMatch = textContent.match(fullMatchRegExp)\n const transformer = textTransformersIndex.transformersByTag[tag]\n if (fullMatch != null && transformer != null) {\n if (transformer.intraword !== false) {\n return fullMatch\n }\n\n // For non-intraword transformers checking if it's within a word\n // or surrounded with space/punctuation/newline\n const { index = 0 } = fullMatch\n const beforeChar = textContent[index - 1]\n const afterChar = textContent[index + fullMatch[0].length]\n\n if (\n (!beforeChar || PUNCTUATION_OR_SPACE.test(beforeChar)) &&\n (!afterChar || PUNCTUATION_OR_SPACE.test(afterChar))\n ) {\n return fullMatch\n }\n }\n }\n\n return null\n}\n\nexport function importTextFormatTransformer(\n textNode: TextNode,\n startIndex: number,\n endIndex: number,\n transformer: TextFormatTransformer,\n match: RegExpMatchArray,\n): {\n nodeAfter: TextNode | undefined // If split\n nodeBefore: TextNode | undefined // If split\n transformedNode: TextNode\n} {\n const textContent = textNode.getTextContent()\n\n // No text matches - we can safely process the text format match\n let nodeAfter: TextNode | undefined\n let nodeBefore: TextNode | undefined\n let transformedNode: TextNode\n\n // If matching full content there's no need to run splitText and can reuse existing textNode\n // to update its content and apply format. E.g. for **_Hello_** string after applying bold\n // format (**) it will reuse the same text node to apply italic (_)\n if (match[0] === textContent) {\n transformedNode = textNode\n } else {\n if (startIndex === 0) {\n ;[transformedNode, nodeAfter] = textNode.splitText(endIndex) as [\n TextNode,\n TextNode | undefined,\n ]\n } else {\n ;[nodeBefore, transformedNode, nodeAfter] = textNode.splitText(startIndex, endIndex) as [\n TextNode,\n TextNode,\n TextNode | undefined,\n ]\n }\n }\n\n transformedNode.setTextContent(match[2]!)\n if (transformer) {\n for (const format of transformer.format) {\n if (!transformedNode.hasFormat(format)) {\n transformedNode.toggleFormat(format)\n }\n }\n }\n\n return {\n nodeAfter,\n nodeBefore,\n transformedNode,\n }\n}\n"],"names":["PUNCTUATION_OR_SPACE","findOutermostTextFormatTransformer","textNode","textFormatTransformersIndex","textContent","getTextContent","match","findOutermostMatch","textFormatMatchStart","index","textFormatMatchEnd","length","transformer","transformersByTag","endIndex","startIndex","textTransformersIndex","openTagsMatch","openTagsRegExp","tag","replace","fullMatchRegExp","fullMatchRegExpByTag","fullMatch","intraword","beforeChar","afterChar","test","importTextFormatTransformer","nodeAfter","nodeBefore","transformedNode","splitText","setTextContent","format","hasFormat","toggleFormat"],"mappings":"AAAA;;;;;;CAMC,GAOD,SAASA,oBAAoB,QAAQ,aAAY;AAEjD,OAAO,SAASC,mCACdC,QAAkB,EAClBC,2BAAwD;IAOxD,MAAMC,cAAcF,SAASG,cAAc;IAC3C,MAAMC,QAAQC,mBAAmBH,aAAaD;IAE9C,IAAI,CAACG,OAAO;QACV,OAAO;IACT;IAEA,MAAME,uBAA+BF,MAAMG,KAAK,IAAI;IACpD,MAAMC,qBAAqBF,uBAAuBF,KAAK,CAAC,EAAE,CAACK,MAAM;IAEjE,oFAAoF;IACpF,MAAMC,cAAqCT,4BAA4BU,iBAAiB,CAACP,KAAK,CAAC,EAAE,CAAC;IAElG,OAAO;QACLQ,UAAUJ;QACVJ;QACAS,YAAYP;QACZI;IACF;AACF;AAEA,4EAA4E;AAC5E,SAASL,mBACPH,WAAmB,EACnBY,qBAAkD;IAElD,MAAMC,gBAAgBb,YAAYE,KAAK,CAACU,sBAAsBE,cAAc;IAE5E,IAAID,iBAAiB,MAAM;QACzB,OAAO;IACT;IAEA,KAAK,MAAMX,SAASW,cAAe;QACjC,+DAA+D;QAC/D,yCAAyC;QACzC,MAAME,MAAMb,MAAMc,OAAO,CAAC,OAAO;QACjC,MAAMC,kBAAkBL,sBAAsBM,oBAAoB,CAACH,IAAI;QACvE,IAAIE,mBAAmB,MAAM;YAC3B;QACF;QAEA,MAAME,YAAYnB,YAAYE,KAAK,CAACe;QACpC,MAAMT,cAAcI,sBAAsBH,iBAAiB,CAACM,IAAI;QAChE,IAAII,aAAa,QAAQX,eAAe,MAAM;YAC5C,IAAIA,YAAYY,SAAS,KAAK,OAAO;gBACnC,OAAOD;YACT;YAEA,gEAAgE;YAChE,+CAA+C;YAC/C,MAAM,EAAEd,QAAQ,CAAC,EAAE,GAAGc;YACtB,MAAME,aAAarB,WAAW,CAACK,QAAQ,EAAE;YACzC,MAAMiB,YAAYtB,WAAW,CAACK,QAAQc,SAAS,CAAC,EAAE,CAACZ,MAAM,CAAC;YAE1D,IACE,AAAC,CAAA,CAACc,cAAczB,qBAAqB2B,IAAI,CAACF,WAAU,KACnD,CAAA,CAACC,aAAa1B,qBAAqB2B,IAAI,CAACD,UAAS,GAClD;gBACA,OAAOH;YACT;QACF;IACF;IAEA,OAAO;AACT;AAEA,OAAO,SAASK,4BACd1B,QAAkB,EAClBa,UAAkB,EAClBD,QAAgB,EAChBF,WAAkC,EAClCN,KAAuB,EAES,WAAW;AACV,WAAW;;IAG5C,MAAMF,cAAcF,SAASG,cAAc;IAE3C,gEAAgE;IAChE,IAAIwB;IACJ,IAAIC;IACJ,IAAIC;IAEJ,4FAA4F;IAC5F,0FAA0F;IAC1F,mEAAmE;IACnE,IAAIzB,KAAK,CAAC,EAAE,KAAKF,aAAa;QAC5B2B,kBAAkB7B;IACpB,OAAO;QACL,IAAIa,eAAe,GAAG
|
|
1
|
+
{"version":3,"sources":["../../../../src/packages/@lexical/markdown/importTextFormatTransformer.ts"],"sourcesContent":["/**\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n *\n */\n\nimport type { TextNode } from 'lexical'\n\nimport type { TextFormatTransformersIndex } from './MarkdownImport.js'\nimport type { TextFormatTransformer } from './MarkdownTransformers.js'\n\nimport { PUNCTUATION_OR_SPACE } from './utils.js'\n\nexport function findOutermostTextFormatTransformer(\n textNode: TextNode,\n textFormatTransformersIndex: TextFormatTransformersIndex,\n): {\n endIndex: number\n match: RegExpMatchArray\n startIndex: number\n transformer: TextFormatTransformer\n} | null {\n const textContent = textNode.getTextContent()\n const match = findOutermostMatch(textContent, textFormatTransformersIndex)\n\n if (!match) {\n return null\n }\n\n const textFormatMatchStart: number = match.index || 0\n const textFormatMatchEnd = textFormatMatchStart + match[0].length\n\n // @ts-expect-error - vestiges of when tsconfig was not strict. Feel free to improve\n const transformer: TextFormatTransformer = textFormatTransformersIndex.transformersByTag[match[1]]\n\n return {\n endIndex: textFormatMatchEnd,\n match,\n startIndex: textFormatMatchStart,\n transformer,\n }\n}\n\n// Finds first \"<tag>content<tag>\" match that is not nested into another tag\nfunction findOutermostMatch(\n textContent: string,\n textTransformersIndex: TextFormatTransformersIndex,\n): null | RegExpMatchArray {\n const openTagsMatch = textContent.match(textTransformersIndex.openTagsRegExp)\n\n if (openTagsMatch == null) {\n return null\n }\n\n for (const match of openTagsMatch) {\n // Open tags reg exp might capture leading space so removing it\n // before using match to find transformer\n const tag = match.replace(/^\\s/, '')\n const fullMatchRegExp = textTransformersIndex.fullMatchRegExpByTag[tag]\n if (fullMatchRegExp == null) {\n continue\n }\n\n const fullMatch = textContent.match(fullMatchRegExp)\n const transformer = textTransformersIndex.transformersByTag[tag]\n if (fullMatch != null && transformer != null) {\n if (transformer.intraword !== false) {\n return fullMatch\n }\n\n // For non-intraword transformers checking if it's within a word\n // or surrounded with space/punctuation/newline\n const { index = 0 } = fullMatch\n const beforeChar = textContent[index - 1]\n const afterChar = textContent[index + fullMatch[0].length]\n\n if (\n (!beforeChar || PUNCTUATION_OR_SPACE.test(beforeChar)) &&\n (!afterChar || PUNCTUATION_OR_SPACE.test(afterChar))\n ) {\n return fullMatch\n }\n }\n }\n\n return null\n}\n\nexport function importTextFormatTransformer(\n textNode: TextNode,\n startIndex: number,\n endIndex: number,\n transformer: TextFormatTransformer,\n match: RegExpMatchArray,\n): {\n nodeAfter: TextNode | undefined // If split\n nodeBefore: TextNode | undefined // If split\n transformedNode: TextNode\n} {\n const textContent = textNode.getTextContent()\n\n // No text matches - we can safely process the text format match\n let nodeAfter: TextNode | undefined\n let nodeBefore: TextNode | undefined\n let transformedNode: TextNode\n\n // If matching full content there's no need to run splitText and can reuse existing textNode\n // to update its content and apply format. E.g. for **_Hello_** string after applying bold\n // format (**) it will reuse the same text node to apply italic (_)\n if (match[0] === textContent) {\n transformedNode = textNode\n } else {\n if (startIndex === 0) {\n ;[transformedNode, nodeAfter] = textNode.splitText(endIndex) as [\n TextNode,\n TextNode | undefined,\n ]\n } else {\n ;[nodeBefore, transformedNode, nodeAfter] = textNode.splitText(startIndex, endIndex) as [\n TextNode,\n TextNode,\n TextNode | undefined,\n ]\n }\n }\n\n transformedNode.setTextContent(match[2]!)\n if (transformer) {\n for (const format of transformer.format) {\n if (!transformedNode.hasFormat(format)) {\n transformedNode.toggleFormat(format)\n }\n }\n }\n\n return {\n nodeAfter,\n nodeBefore,\n transformedNode,\n }\n}\n"],"names":["PUNCTUATION_OR_SPACE","findOutermostTextFormatTransformer","textNode","textFormatTransformersIndex","textContent","getTextContent","match","findOutermostMatch","textFormatMatchStart","index","textFormatMatchEnd","length","transformer","transformersByTag","endIndex","startIndex","textTransformersIndex","openTagsMatch","openTagsRegExp","tag","replace","fullMatchRegExp","fullMatchRegExpByTag","fullMatch","intraword","beforeChar","afterChar","test","importTextFormatTransformer","nodeAfter","nodeBefore","transformedNode","splitText","setTextContent","format","hasFormat","toggleFormat"],"mappings":"AAAA;;;;;;CAMC,GAOD,SAASA,oBAAoB,QAAQ,aAAY;AAEjD,OAAO,SAASC,mCACdC,QAAkB,EAClBC,2BAAwD;IAOxD,MAAMC,cAAcF,SAASG,cAAc;IAC3C,MAAMC,QAAQC,mBAAmBH,aAAaD;IAE9C,IAAI,CAACG,OAAO;QACV,OAAO;IACT;IAEA,MAAME,uBAA+BF,MAAMG,KAAK,IAAI;IACpD,MAAMC,qBAAqBF,uBAAuBF,KAAK,CAAC,EAAE,CAACK,MAAM;IAEjE,oFAAoF;IACpF,MAAMC,cAAqCT,4BAA4BU,iBAAiB,CAACP,KAAK,CAAC,EAAE,CAAC;IAElG,OAAO;QACLQ,UAAUJ;QACVJ;QACAS,YAAYP;QACZI;IACF;AACF;AAEA,4EAA4E;AAC5E,SAASL,mBACPH,WAAmB,EACnBY,qBAAkD;IAElD,MAAMC,gBAAgBb,YAAYE,KAAK,CAACU,sBAAsBE,cAAc;IAE5E,IAAID,iBAAiB,MAAM;QACzB,OAAO;IACT;IAEA,KAAK,MAAMX,SAASW,cAAe;QACjC,+DAA+D;QAC/D,yCAAyC;QACzC,MAAME,MAAMb,MAAMc,OAAO,CAAC,OAAO;QACjC,MAAMC,kBAAkBL,sBAAsBM,oBAAoB,CAACH,IAAI;QACvE,IAAIE,mBAAmB,MAAM;YAC3B;QACF;QAEA,MAAME,YAAYnB,YAAYE,KAAK,CAACe;QACpC,MAAMT,cAAcI,sBAAsBH,iBAAiB,CAACM,IAAI;QAChE,IAAII,aAAa,QAAQX,eAAe,MAAM;YAC5C,IAAIA,YAAYY,SAAS,KAAK,OAAO;gBACnC,OAAOD;YACT;YAEA,gEAAgE;YAChE,+CAA+C;YAC/C,MAAM,EAAEd,QAAQ,CAAC,EAAE,GAAGc;YACtB,MAAME,aAAarB,WAAW,CAACK,QAAQ,EAAE;YACzC,MAAMiB,YAAYtB,WAAW,CAACK,QAAQc,SAAS,CAAC,EAAE,CAACZ,MAAM,CAAC;YAE1D,IACE,AAAC,CAAA,CAACc,cAAczB,qBAAqB2B,IAAI,CAACF,WAAU,KACnD,CAAA,CAACC,aAAa1B,qBAAqB2B,IAAI,CAACD,UAAS,GAClD;gBACA,OAAOH;YACT;QACF;IACF;IAEA,OAAO;AACT;AAEA,OAAO,SAASK,4BACd1B,QAAkB,EAClBa,UAAkB,EAClBD,QAAgB,EAChBF,WAAkC,EAClCN,KAAuB,EAES,WAAW;AACV,WAAW;;IAG5C,MAAMF,cAAcF,SAASG,cAAc;IAE3C,gEAAgE;IAChE,IAAIwB;IACJ,IAAIC;IACJ,IAAIC;IAEJ,4FAA4F;IAC5F,0FAA0F;IAC1F,mEAAmE;IACnE,IAAIzB,KAAK,CAAC,EAAE,KAAKF,aAAa;QAC5B2B,kBAAkB7B;IACpB,OAAO;QACL,IAAIa,eAAe,GAAG;;YACnB,CAACgB,iBAAiBF,UAAU,GAAG3B,SAAS8B,SAAS,CAAClB;QAIrD,OAAO;;YACJ,CAACgB,YAAYC,iBAAiBF,UAAU,GAAG3B,SAAS8B,SAAS,CAACjB,YAAYD;QAK7E;IACF;IAEAiB,gBAAgBE,cAAc,CAAC3B,KAAK,CAAC,EAAE;IACvC,IAAIM,aAAa;QACf,KAAK,MAAMsB,UAAUtB,YAAYsB,MAAM,CAAE;YACvC,IAAI,CAACH,gBAAgBI,SAAS,CAACD,SAAS;gBACtCH,gBAAgBK,YAAY,CAACF;YAC/B;QACF;IACF;IAEA,OAAO;QACLL;QACAC;QACAC;IACF;AACF"}
|
|
@@ -45,8 +45,10 @@ export function importFoundTextMatchTransformer(textNode, startIndex, endIndex,
|
|
|
45
45
|
{
|
|
46
46
|
let nodeAfter, nodeBefore, transformedNode;
|
|
47
47
|
if (startIndex === 0) {
|
|
48
|
+
;
|
|
48
49
|
[transformedNode, nodeAfter] = textNode.splitText(endIndex);
|
|
49
50
|
} else {
|
|
51
|
+
;
|
|
50
52
|
[nodeBefore, transformedNode, nodeAfter] = textNode.splitText(startIndex, endIndex);
|
|
51
53
|
}
|
|
52
54
|
if (!transformer.replace) {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../src/packages/@lexical/markdown/importTextMatchTransformer.ts"],"sourcesContent":["import { type TextNode } from 'lexical'\n\n/**\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n *\n */\nimport type { TextMatchTransformer } from './MarkdownTransformers.js'\n\nexport function findOutermostTextMatchTransformer(\n textNode_: TextNode,\n textMatchTransformers: Array<TextMatchTransformer>,\n): {\n endIndex: number\n match: RegExpMatchArray\n startIndex: number\n transformer: TextMatchTransformer\n} | null {\n const textNode = textNode_\n\n let foundMatchStartIndex: number | undefined = undefined\n let foundMatchEndIndex: number | undefined = undefined\n let foundMatchTransformer: TextMatchTransformer | undefined = undefined\n let foundMatch: RegExpMatchArray | undefined = undefined\n\n for (const transformer of textMatchTransformers) {\n if (!transformer.replace || !transformer.importRegExp) {\n continue\n }\n const match = textNode.getTextContent().match(transformer.importRegExp)\n\n if (!match) {\n continue\n }\n\n const startIndex = match.index || 0\n const endIndex = transformer.getEndIndex\n ? transformer.getEndIndex(textNode, match)\n : startIndex + match[0].length\n\n if (endIndex === false) {\n continue\n }\n\n if (\n foundMatchStartIndex === undefined ||\n foundMatchEndIndex === undefined ||\n (startIndex < foundMatchStartIndex && endIndex > foundMatchEndIndex)\n ) {\n foundMatchStartIndex = startIndex\n foundMatchEndIndex = endIndex\n foundMatchTransformer = transformer\n foundMatch = match\n }\n }\n\n if (\n foundMatchStartIndex === undefined ||\n foundMatchEndIndex === undefined ||\n foundMatchTransformer === undefined ||\n foundMatch === undefined\n ) {\n return null\n }\n\n return {\n endIndex: foundMatchEndIndex,\n match: foundMatch,\n startIndex: foundMatchStartIndex,\n transformer: foundMatchTransformer,\n }\n}\n\nexport function importFoundTextMatchTransformer(\n textNode: TextNode,\n startIndex: number,\n endIndex: number,\n transformer: TextMatchTransformer,\n match: RegExpMatchArray,\n): {\n nodeAfter: TextNode | undefined // If split\n nodeBefore: TextNode | undefined // If split\n transformedNode?: TextNode\n} | null {\n let nodeAfter, nodeBefore, transformedNode\n\n if (startIndex === 0) {\n ;[transformedNode, nodeAfter] = textNode.splitText(endIndex)\n } else {\n ;[nodeBefore, transformedNode, nodeAfter] = textNode.splitText(startIndex, endIndex)\n }\n\n if (!transformer.replace) {\n return null\n }\n const potentialTransformedNode = transformedNode\n ? transformer.replace(transformedNode, match)\n : undefined\n\n return {\n nodeAfter,\n nodeBefore,\n transformedNode: potentialTransformedNode || undefined,\n }\n}\n"],"names":["findOutermostTextMatchTransformer","textNode_","textMatchTransformers","textNode","foundMatchStartIndex","undefined","foundMatchEndIndex","foundMatchTransformer","foundMatch","transformer","replace","importRegExp","match","getTextContent","startIndex","index","endIndex","getEndIndex","length","importFoundTextMatchTransformer","nodeAfter","nodeBefore","transformedNode","splitText","potentialTransformedNode"],"mappings":"AAEA;;;;;;CAMC,GAGD,OAAO,SAASA,kCACdC,SAAmB,EACnBC,qBAAkD;IAOlD,MAAMC,WAAWF;IAEjB,IAAIG,uBAA2CC;IAC/C,IAAIC,qBAAyCD;IAC7C,IAAIE,wBAA0DF;IAC9D,IAAIG,aAA2CH;IAE/C,KAAK,MAAMI,eAAeP,sBAAuB;QAC/C,IAAI,CAACO,YAAYC,OAAO,IAAI,CAACD,YAAYE,YAAY,EAAE;YACrD;QACF;QACA,MAAMC,QAAQT,SAASU,cAAc,GAAGD,KAAK,CAACH,YAAYE,YAAY;QAEtE,IAAI,CAACC,OAAO;YACV;QACF;QAEA,MAAME,aAAaF,MAAMG,KAAK,IAAI;QAClC,MAAMC,WAAWP,YAAYQ,WAAW,GACpCR,YAAYQ,WAAW,CAACd,UAAUS,SAClCE,aAAaF,KAAK,CAAC,EAAE,CAACM,MAAM;QAEhC,IAAIF,aAAa,OAAO;YACtB;QACF;QAEA,IACEZ,yBAAyBC,aACzBC,uBAAuBD,aACtBS,aAAaV,wBAAwBY,WAAWV,oBACjD;YACAF,uBAAuBU;YACvBR,qBAAqBU;YACrBT,wBAAwBE;YACxBD,aAAaI;QACf;IACF;IAEA,IACER,yBAAyBC,aACzBC,uBAAuBD,aACvBE,0BAA0BF,aAC1BG,eAAeH,WACf;QACA,OAAO;IACT;IAEA,OAAO;QACLW,UAAUV;QACVM,OAAOJ;QACPM,YAAYV;QACZK,aAAaF;IACf;AACF;AAEA,OAAO,SAASY,gCACdhB,QAAkB,EAClBW,UAAkB,EAClBE,QAAgB,EAChBP,WAAiC,EACjCG,KAAuB,EAES,WAAW;AACV,WAAW;;IAG5C,IAAIQ,WAAWC,YAAYC;IAE3B,IAAIR,eAAe,GAAG
|
|
1
|
+
{"version":3,"sources":["../../../../src/packages/@lexical/markdown/importTextMatchTransformer.ts"],"sourcesContent":["import { type TextNode } from 'lexical'\n\n/**\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n *\n */\nimport type { TextMatchTransformer } from './MarkdownTransformers.js'\n\nexport function findOutermostTextMatchTransformer(\n textNode_: TextNode,\n textMatchTransformers: Array<TextMatchTransformer>,\n): {\n endIndex: number\n match: RegExpMatchArray\n startIndex: number\n transformer: TextMatchTransformer\n} | null {\n const textNode = textNode_\n\n let foundMatchStartIndex: number | undefined = undefined\n let foundMatchEndIndex: number | undefined = undefined\n let foundMatchTransformer: TextMatchTransformer | undefined = undefined\n let foundMatch: RegExpMatchArray | undefined = undefined\n\n for (const transformer of textMatchTransformers) {\n if (!transformer.replace || !transformer.importRegExp) {\n continue\n }\n const match = textNode.getTextContent().match(transformer.importRegExp)\n\n if (!match) {\n continue\n }\n\n const startIndex = match.index || 0\n const endIndex = transformer.getEndIndex\n ? transformer.getEndIndex(textNode, match)\n : startIndex + match[0].length\n\n if (endIndex === false) {\n continue\n }\n\n if (\n foundMatchStartIndex === undefined ||\n foundMatchEndIndex === undefined ||\n (startIndex < foundMatchStartIndex && endIndex > foundMatchEndIndex)\n ) {\n foundMatchStartIndex = startIndex\n foundMatchEndIndex = endIndex\n foundMatchTransformer = transformer\n foundMatch = match\n }\n }\n\n if (\n foundMatchStartIndex === undefined ||\n foundMatchEndIndex === undefined ||\n foundMatchTransformer === undefined ||\n foundMatch === undefined\n ) {\n return null\n }\n\n return {\n endIndex: foundMatchEndIndex,\n match: foundMatch,\n startIndex: foundMatchStartIndex,\n transformer: foundMatchTransformer,\n }\n}\n\nexport function importFoundTextMatchTransformer(\n textNode: TextNode,\n startIndex: number,\n endIndex: number,\n transformer: TextMatchTransformer,\n match: RegExpMatchArray,\n): {\n nodeAfter: TextNode | undefined // If split\n nodeBefore: TextNode | undefined // If split\n transformedNode?: TextNode\n} | null {\n let nodeAfter, nodeBefore, transformedNode\n\n if (startIndex === 0) {\n ;[transformedNode, nodeAfter] = textNode.splitText(endIndex)\n } else {\n ;[nodeBefore, transformedNode, nodeAfter] = textNode.splitText(startIndex, endIndex)\n }\n\n if (!transformer.replace) {\n return null\n }\n const potentialTransformedNode = transformedNode\n ? transformer.replace(transformedNode, match)\n : undefined\n\n return {\n nodeAfter,\n nodeBefore,\n transformedNode: potentialTransformedNode || undefined,\n }\n}\n"],"names":["findOutermostTextMatchTransformer","textNode_","textMatchTransformers","textNode","foundMatchStartIndex","undefined","foundMatchEndIndex","foundMatchTransformer","foundMatch","transformer","replace","importRegExp","match","getTextContent","startIndex","index","endIndex","getEndIndex","length","importFoundTextMatchTransformer","nodeAfter","nodeBefore","transformedNode","splitText","potentialTransformedNode"],"mappings":"AAEA;;;;;;CAMC,GAGD,OAAO,SAASA,kCACdC,SAAmB,EACnBC,qBAAkD;IAOlD,MAAMC,WAAWF;IAEjB,IAAIG,uBAA2CC;IAC/C,IAAIC,qBAAyCD;IAC7C,IAAIE,wBAA0DF;IAC9D,IAAIG,aAA2CH;IAE/C,KAAK,MAAMI,eAAeP,sBAAuB;QAC/C,IAAI,CAACO,YAAYC,OAAO,IAAI,CAACD,YAAYE,YAAY,EAAE;YACrD;QACF;QACA,MAAMC,QAAQT,SAASU,cAAc,GAAGD,KAAK,CAACH,YAAYE,YAAY;QAEtE,IAAI,CAACC,OAAO;YACV;QACF;QAEA,MAAME,aAAaF,MAAMG,KAAK,IAAI;QAClC,MAAMC,WAAWP,YAAYQ,WAAW,GACpCR,YAAYQ,WAAW,CAACd,UAAUS,SAClCE,aAAaF,KAAK,CAAC,EAAE,CAACM,MAAM;QAEhC,IAAIF,aAAa,OAAO;YACtB;QACF;QAEA,IACEZ,yBAAyBC,aACzBC,uBAAuBD,aACtBS,aAAaV,wBAAwBY,WAAWV,oBACjD;YACAF,uBAAuBU;YACvBR,qBAAqBU;YACrBT,wBAAwBE;YACxBD,aAAaI;QACf;IACF;IAEA,IACER,yBAAyBC,aACzBC,uBAAuBD,aACvBE,0BAA0BF,aAC1BG,eAAeH,WACf;QACA,OAAO;IACT;IAEA,OAAO;QACLW,UAAUV;QACVM,OAAOJ;QACPM,YAAYV;QACZK,aAAaF;IACf;AACF;AAEA,OAAO,SAASY,gCACdhB,QAAkB,EAClBW,UAAkB,EAClBE,QAAgB,EAChBP,WAAiC,EACjCG,KAAuB,EAES,WAAW;AACV,WAAW;;IAG5C,IAAIQ,WAAWC,YAAYC;IAE3B,IAAIR,eAAe,GAAG;;QACnB,CAACQ,iBAAiBF,UAAU,GAAGjB,SAASoB,SAAS,CAACP;IACrD,OAAO;;QACJ,CAACK,YAAYC,iBAAiBF,UAAU,GAAGjB,SAASoB,SAAS,CAACT,YAAYE;IAC7E;IAEA,IAAI,CAACP,YAAYC,OAAO,EAAE;QACxB,OAAO;IACT;IACA,MAAMc,2BAA2BF,kBAC7Bb,YAAYC,OAAO,CAACY,iBAAiBV,SACrCP;IAEJ,OAAO;QACLe;QACAC;QACAC,iBAAiBE,4BAA4BnB;IAC/C;AACF"}
|
|
@@ -29,6 +29,7 @@ export const migrateDocumentFieldsRecursively = ({ data, fields, found, payload
|
|
|
29
29
|
});
|
|
30
30
|
} else if (Array.isArray(data[field.name])) {
|
|
31
31
|
if (field.type === 'blocks') {
|
|
32
|
+
;
|
|
32
33
|
data[field.name].forEach((row)=>{
|
|
33
34
|
const blockTypeToMatch = row?.blockType;
|
|
34
35
|
const block = payload?.blocks[blockTypeToMatch] ?? (field.blockReferences ?? field.blocks).find((block)=>typeof block !== 'string' && block.slug === blockTypeToMatch);
|
|
@@ -43,6 +44,7 @@ export const migrateDocumentFieldsRecursively = ({ data, fields, found, payload
|
|
|
43
44
|
});
|
|
44
45
|
}
|
|
45
46
|
if (field.type === 'array') {
|
|
47
|
+
;
|
|
46
48
|
data[field.name].forEach((row)=>{
|
|
47
49
|
found += migrateDocumentFieldsRecursively({
|
|
48
50
|
data: row,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../src/utilities/migrateSlateToLexical/migrateDocumentFieldsRecursively.ts"],"sourcesContent":["import type { Field, FlattenedBlock, Payload } from 'payload'\n\nimport { fieldAffectsData, fieldHasSubFields, fieldIsArrayType, tabHasName } from 'payload/shared'\n\nimport type {\n SlateNode,\n SlateNodeConverter,\n} from '../../features/migrations/slateToLexical/converter/types.js'\nimport type { LexicalRichTextAdapter } from '../../types.js'\n\nimport { convertSlateToLexical } from '../../features/migrations/slateToLexical/converter/index.js'\n\ntype NestedRichTextFieldsArgs = {\n data: Record<string, unknown>\n\n fields: Field[]\n found: number\n payload: Payload\n}\n\nexport const migrateDocumentFieldsRecursively = ({\n data,\n fields,\n found,\n payload,\n}: NestedRichTextFieldsArgs): number => {\n for (const field of fields) {\n if (fieldHasSubFields(field) && !fieldIsArrayType(field)) {\n if (fieldAffectsData(field) && typeof data[field.name] === 'object') {\n found += migrateDocumentFieldsRecursively({\n data: data[field.name] as Record<string, unknown>,\n fields: field.fields,\n found,\n payload,\n })\n } else {\n found += migrateDocumentFieldsRecursively({\n data,\n fields: field.fields,\n found,\n payload,\n })\n }\n } else if (field.type === 'tabs') {\n field.tabs.forEach((tab) => {\n found += migrateDocumentFieldsRecursively({\n data: (tabHasName(tab) ? data[tab.name] : data) as Record<string, unknown>,\n fields: tab.fields,\n found,\n payload,\n })\n })\n } else if (Array.isArray(data[field.name])) {\n if (field.type === 'blocks') {\n ;(data[field.name] as Array<Record<string, unknown>>).forEach((row) => {\n const blockTypeToMatch: string = row?.blockType as string\n const block =\n payload?.blocks[blockTypeToMatch] ??\n ((field.blockReferences ?? field.blocks).find(\n (block) => typeof block !== 'string' && block.slug === blockTypeToMatch,\n ) as FlattenedBlock | undefined)\n\n if (block) {\n found += migrateDocumentFieldsRecursively({\n data: row,\n fields: block.fields,\n found,\n payload,\n })\n }\n })\n }\n\n if (field.type === 'array') {\n ;(data[field.name] as Array<Record<string, unknown>>).forEach((row) => {\n found += migrateDocumentFieldsRecursively({\n data: row,\n fields: field.fields,\n found,\n payload,\n })\n })\n }\n }\n\n if (field.type === 'richText' && Array.isArray(data[field.name])) {\n // Slate richText\n const editor: LexicalRichTextAdapter = field.editor as LexicalRichTextAdapter\n if (editor && typeof editor === 'object') {\n if ('features' in editor && editor.features?.length) {\n // find slatetolexical feature\n const slateToLexicalFeature = editor.editorConfig.resolvedFeatureMap.get('slateToLexical')\n if (slateToLexicalFeature) {\n // DO CONVERSION\n\n const { converters } = slateToLexicalFeature.sanitizedServerFeatureProps as {\n converters?: SlateNodeConverter[]\n }\n\n data[field.name] = convertSlateToLexical({\n converters: converters!,\n slateData: data[field.name] as SlateNode[],\n })\n\n found++\n }\n }\n }\n }\n }\n\n return found\n}\n"],"names":["fieldAffectsData","fieldHasSubFields","fieldIsArrayType","tabHasName","convertSlateToLexical","migrateDocumentFieldsRecursively","data","fields","found","payload","field","name","type","tabs","forEach","tab","Array","isArray","row","blockTypeToMatch","blockType","block","blocks","blockReferences","find","slug","editor","features","length","slateToLexicalFeature","editorConfig","resolvedFeatureMap","get","converters","sanitizedServerFeatureProps","slateData"],"mappings":"AAEA,SAASA,gBAAgB,EAAEC,iBAAiB,EAAEC,gBAAgB,EAAEC,UAAU,QAAQ,iBAAgB;AAQlG,SAASC,qBAAqB,QAAQ,8DAA6D;AAUnG,OAAO,MAAMC,mCAAmC,CAAC,EAC/CC,IAAI,EACJC,MAAM,EACNC,KAAK,EACLC,OAAO,EACkB;IACzB,KAAK,MAAMC,SAASH,OAAQ;QAC1B,IAAIN,kBAAkBS,UAAU,CAACR,iBAAiBQ,QAAQ;YACxD,IAAIV,iBAAiBU,UAAU,OAAOJ,IAAI,CAACI,MAAMC,IAAI,CAAC,KAAK,UAAU;gBACnEH,SAASH,iCAAiC;oBACxCC,MAAMA,IAAI,CAACI,MAAMC,IAAI,CAAC;oBACtBJ,QAAQG,MAAMH,MAAM;oBACpBC;oBACAC;gBACF;YACF,OAAO;gBACLD,SAASH,iCAAiC;oBACxCC;oBACAC,QAAQG,MAAMH,MAAM;oBACpBC;oBACAC;gBACF;YACF;QACF,OAAO,IAAIC,MAAME,IAAI,KAAK,QAAQ;YAChCF,MAAMG,IAAI,CAACC,OAAO,CAAC,CAACC;gBAClBP,SAASH,iCAAiC;oBACxCC,MAAOH,WAAWY,OAAOT,IAAI,CAACS,IAAIJ,IAAI,CAAC,GAAGL;oBAC1CC,QAAQQ,IAAIR,MAAM;oBAClBC;oBACAC;gBACF;YACF;QACF,OAAO,IAAIO,MAAMC,OAAO,CAACX,IAAI,CAACI,MAAMC,IAAI,CAAC,GAAG;YAC1C,IAAID,MAAME,IAAI,KAAK,UAAU
|
|
1
|
+
{"version":3,"sources":["../../../src/utilities/migrateSlateToLexical/migrateDocumentFieldsRecursively.ts"],"sourcesContent":["import type { Field, FlattenedBlock, Payload } from 'payload'\n\nimport { fieldAffectsData, fieldHasSubFields, fieldIsArrayType, tabHasName } from 'payload/shared'\n\nimport type {\n SlateNode,\n SlateNodeConverter,\n} from '../../features/migrations/slateToLexical/converter/types.js'\nimport type { LexicalRichTextAdapter } from '../../types.js'\n\nimport { convertSlateToLexical } from '../../features/migrations/slateToLexical/converter/index.js'\n\ntype NestedRichTextFieldsArgs = {\n data: Record<string, unknown>\n\n fields: Field[]\n found: number\n payload: Payload\n}\n\nexport const migrateDocumentFieldsRecursively = ({\n data,\n fields,\n found,\n payload,\n}: NestedRichTextFieldsArgs): number => {\n for (const field of fields) {\n if (fieldHasSubFields(field) && !fieldIsArrayType(field)) {\n if (fieldAffectsData(field) && typeof data[field.name] === 'object') {\n found += migrateDocumentFieldsRecursively({\n data: data[field.name] as Record<string, unknown>,\n fields: field.fields,\n found,\n payload,\n })\n } else {\n found += migrateDocumentFieldsRecursively({\n data,\n fields: field.fields,\n found,\n payload,\n })\n }\n } else if (field.type === 'tabs') {\n field.tabs.forEach((tab) => {\n found += migrateDocumentFieldsRecursively({\n data: (tabHasName(tab) ? data[tab.name] : data) as Record<string, unknown>,\n fields: tab.fields,\n found,\n payload,\n })\n })\n } else if (Array.isArray(data[field.name])) {\n if (field.type === 'blocks') {\n ;(data[field.name] as Array<Record<string, unknown>>).forEach((row) => {\n const blockTypeToMatch: string = row?.blockType as string\n const block =\n payload?.blocks[blockTypeToMatch] ??\n ((field.blockReferences ?? field.blocks).find(\n (block) => typeof block !== 'string' && block.slug === blockTypeToMatch,\n ) as FlattenedBlock | undefined)\n\n if (block) {\n found += migrateDocumentFieldsRecursively({\n data: row,\n fields: block.fields,\n found,\n payload,\n })\n }\n })\n }\n\n if (field.type === 'array') {\n ;(data[field.name] as Array<Record<string, unknown>>).forEach((row) => {\n found += migrateDocumentFieldsRecursively({\n data: row,\n fields: field.fields,\n found,\n payload,\n })\n })\n }\n }\n\n if (field.type === 'richText' && Array.isArray(data[field.name])) {\n // Slate richText\n const editor: LexicalRichTextAdapter = field.editor as LexicalRichTextAdapter\n if (editor && typeof editor === 'object') {\n if ('features' in editor && editor.features?.length) {\n // find slatetolexical feature\n const slateToLexicalFeature = editor.editorConfig.resolvedFeatureMap.get('slateToLexical')\n if (slateToLexicalFeature) {\n // DO CONVERSION\n\n const { converters } = slateToLexicalFeature.sanitizedServerFeatureProps as {\n converters?: SlateNodeConverter[]\n }\n\n data[field.name] = convertSlateToLexical({\n converters: converters!,\n slateData: data[field.name] as SlateNode[],\n })\n\n found++\n }\n }\n }\n }\n }\n\n return found\n}\n"],"names":["fieldAffectsData","fieldHasSubFields","fieldIsArrayType","tabHasName","convertSlateToLexical","migrateDocumentFieldsRecursively","data","fields","found","payload","field","name","type","tabs","forEach","tab","Array","isArray","row","blockTypeToMatch","blockType","block","blocks","blockReferences","find","slug","editor","features","length","slateToLexicalFeature","editorConfig","resolvedFeatureMap","get","converters","sanitizedServerFeatureProps","slateData"],"mappings":"AAEA,SAASA,gBAAgB,EAAEC,iBAAiB,EAAEC,gBAAgB,EAAEC,UAAU,QAAQ,iBAAgB;AAQlG,SAASC,qBAAqB,QAAQ,8DAA6D;AAUnG,OAAO,MAAMC,mCAAmC,CAAC,EAC/CC,IAAI,EACJC,MAAM,EACNC,KAAK,EACLC,OAAO,EACkB;IACzB,KAAK,MAAMC,SAASH,OAAQ;QAC1B,IAAIN,kBAAkBS,UAAU,CAACR,iBAAiBQ,QAAQ;YACxD,IAAIV,iBAAiBU,UAAU,OAAOJ,IAAI,CAACI,MAAMC,IAAI,CAAC,KAAK,UAAU;gBACnEH,SAASH,iCAAiC;oBACxCC,MAAMA,IAAI,CAACI,MAAMC,IAAI,CAAC;oBACtBJ,QAAQG,MAAMH,MAAM;oBACpBC;oBACAC;gBACF;YACF,OAAO;gBACLD,SAASH,iCAAiC;oBACxCC;oBACAC,QAAQG,MAAMH,MAAM;oBACpBC;oBACAC;gBACF;YACF;QACF,OAAO,IAAIC,MAAME,IAAI,KAAK,QAAQ;YAChCF,MAAMG,IAAI,CAACC,OAAO,CAAC,CAACC;gBAClBP,SAASH,iCAAiC;oBACxCC,MAAOH,WAAWY,OAAOT,IAAI,CAACS,IAAIJ,IAAI,CAAC,GAAGL;oBAC1CC,QAAQQ,IAAIR,MAAM;oBAClBC;oBACAC;gBACF;YACF;QACF,OAAO,IAAIO,MAAMC,OAAO,CAACX,IAAI,CAACI,MAAMC,IAAI,CAAC,GAAG;YAC1C,IAAID,MAAME,IAAI,KAAK,UAAU;;gBACzBN,IAAI,CAACI,MAAMC,IAAI,CAAC,CAAoCG,OAAO,CAAC,CAACI;oBAC7D,MAAMC,mBAA2BD,KAAKE;oBACtC,MAAMC,QACJZ,SAASa,MAAM,CAACH,iBAAiB,IAChC,AAACT,CAAAA,MAAMa,eAAe,IAAIb,MAAMY,MAAM,AAAD,EAAGE,IAAI,CAC3C,CAACH,QAAU,OAAOA,UAAU,YAAYA,MAAMI,IAAI,KAAKN;oBAG3D,IAAIE,OAAO;wBACTb,SAASH,iCAAiC;4BACxCC,MAAMY;4BACNX,QAAQc,MAAMd,MAAM;4BACpBC;4BACAC;wBACF;oBACF;gBACF;YACF;YAEA,IAAIC,MAAME,IAAI,KAAK,SAAS;;gBACxBN,IAAI,CAACI,MAAMC,IAAI,CAAC,CAAoCG,OAAO,CAAC,CAACI;oBAC7DV,SAASH,iCAAiC;wBACxCC,MAAMY;wBACNX,QAAQG,MAAMH,MAAM;wBACpBC;wBACAC;oBACF;gBACF;YACF;QACF;QAEA,IAAIC,MAAME,IAAI,KAAK,cAAcI,MAAMC,OAAO,CAACX,IAAI,CAACI,MAAMC,IAAI,CAAC,GAAG;YAChE,iBAAiB;YACjB,MAAMe,SAAiChB,MAAMgB,MAAM;YACnD,IAAIA,UAAU,OAAOA,WAAW,UAAU;gBACxC,IAAI,cAAcA,UAAUA,OAAOC,QAAQ,EAAEC,QAAQ;oBACnD,8BAA8B;oBAC9B,MAAMC,wBAAwBH,OAAOI,YAAY,CAACC,kBAAkB,CAACC,GAAG,CAAC;oBACzE,IAAIH,uBAAuB;wBACzB,gBAAgB;wBAEhB,MAAM,EAAEI,UAAU,EAAE,GAAGJ,sBAAsBK,2BAA2B;wBAIxE5B,IAAI,CAACI,MAAMC,IAAI,CAAC,GAAGP,sBAAsB;4BACvC6B,YAAYA;4BACZE,WAAW7B,IAAI,CAACI,MAAMC,IAAI,CAAC;wBAC7B;wBAEAH;oBACF;gBACF;YACF;QACF;IACF;IAEA,OAAOA;AACT,EAAC"}
|
|
@@ -30,6 +30,7 @@ export const upgradeDocumentFieldsRecursively = ({ data, fields, found, payload
|
|
|
30
30
|
});
|
|
31
31
|
} else if (Array.isArray(data[field.name])) {
|
|
32
32
|
if (field.type === 'blocks') {
|
|
33
|
+
;
|
|
33
34
|
data[field.name].forEach((row)=>{
|
|
34
35
|
const blockTypeToMatch = row?.blockType;
|
|
35
36
|
const block = payload.blocks[blockTypeToMatch] ?? (field.blockReferences ?? field.blocks).find((block)=>typeof block !== 'string' && block.slug === blockTypeToMatch);
|
|
@@ -44,6 +45,7 @@ export const upgradeDocumentFieldsRecursively = ({ data, fields, found, payload
|
|
|
44
45
|
});
|
|
45
46
|
}
|
|
46
47
|
if (field.type === 'array') {
|
|
48
|
+
;
|
|
47
49
|
data[field.name].forEach((row)=>{
|
|
48
50
|
found += upgradeDocumentFieldsRecursively({
|
|
49
51
|
data: row,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../src/utilities/upgradeLexicalData/upgradeDocumentFieldsRecursively.ts"],"sourcesContent":["import type { SerializedEditorState } from 'lexical'\nimport type { Field, FlattenedBlock, Payload } from 'payload'\n\nimport { createHeadlessEditor } from '@lexical/headless'\nimport { fieldAffectsData, fieldHasSubFields, fieldIsArrayType, tabHasName } from 'payload/shared'\n\nimport type { LexicalRichTextAdapter } from '../../types.js'\n\nimport { getEnabledNodes } from '../../lexical/nodes/index.js'\n\ntype NestedRichTextFieldsArgs = {\n data: Record<string, unknown>\n\n fields: Field[]\n found: number\n payload: Payload\n}\n\nexport const upgradeDocumentFieldsRecursively = ({\n data,\n fields,\n found,\n payload,\n}: NestedRichTextFieldsArgs): number => {\n for (const field of fields) {\n if (fieldHasSubFields(field) && !fieldIsArrayType(field)) {\n if (fieldAffectsData(field) && typeof data[field.name] === 'object') {\n found += upgradeDocumentFieldsRecursively({\n data: data[field.name] as Record<string, unknown>,\n fields: field.fields,\n found,\n payload,\n })\n } else {\n found += upgradeDocumentFieldsRecursively({\n data,\n fields: field.fields,\n found,\n payload,\n })\n }\n } else if (field.type === 'tabs') {\n field.tabs.forEach((tab) => {\n found += upgradeDocumentFieldsRecursively({\n data: (tabHasName(tab) ? data[tab.name] : data) as Record<string, unknown>,\n fields: tab.fields,\n found,\n payload,\n })\n })\n } else if (Array.isArray(data[field.name])) {\n if (field.type === 'blocks') {\n ;(data[field.name] as Record<string, unknown>[]).forEach((row) => {\n const blockTypeToMatch: string = row?.blockType as string\n\n const block =\n payload.blocks[blockTypeToMatch] ??\n ((field.blockReferences ?? field.blocks).find(\n (block) => typeof block !== 'string' && block.slug === blockTypeToMatch,\n ) as FlattenedBlock | undefined)\n\n if (block) {\n found += upgradeDocumentFieldsRecursively({\n data: row,\n fields: block.fields,\n found,\n payload,\n })\n }\n })\n }\n\n if (field.type === 'array') {\n ;(data[field.name] as Record<string, unknown>[]).forEach((row) => {\n found += upgradeDocumentFieldsRecursively({\n data: row,\n fields: field.fields,\n found,\n payload,\n })\n })\n }\n }\n\n if (\n field.type === 'richText' &&\n data[field.name] &&\n !Array.isArray(data[field.name]) &&\n 'root' in (data[field.name] as Record<string, unknown>)\n ) {\n // Lexical richText\n const editor: LexicalRichTextAdapter = field.editor as LexicalRichTextAdapter\n if (editor && typeof editor === 'object') {\n if ('features' in editor && editor.features?.length) {\n // Load lexical editor into lexical, then save it immediately\n const editorState = data[field.name] as SerializedEditorState\n\n const headlessEditor = createHeadlessEditor({\n nodes: getEnabledNodes({\n editorConfig: editor.editorConfig,\n }),\n })\n headlessEditor.update(\n () => {\n headlessEditor.setEditorState(headlessEditor.parseEditorState(editorState))\n },\n { discrete: true },\n )\n\n // get editor state\n data[field.name] = headlessEditor.getEditorState().toJSON()\n\n found++\n }\n }\n }\n }\n\n return found\n}\n"],"names":["createHeadlessEditor","fieldAffectsData","fieldHasSubFields","fieldIsArrayType","tabHasName","getEnabledNodes","upgradeDocumentFieldsRecursively","data","fields","found","payload","field","name","type","tabs","forEach","tab","Array","isArray","row","blockTypeToMatch","blockType","block","blocks","blockReferences","find","slug","editor","features","length","editorState","headlessEditor","nodes","editorConfig","update","setEditorState","parseEditorState","discrete","getEditorState","toJSON"],"mappings":"AAGA,SAASA,oBAAoB,QAAQ,oBAAmB;AACxD,SAASC,gBAAgB,EAAEC,iBAAiB,EAAEC,gBAAgB,EAAEC,UAAU,QAAQ,iBAAgB;AAIlG,SAASC,eAAe,QAAQ,+BAA8B;AAU9D,OAAO,MAAMC,mCAAmC,CAAC,EAC/CC,IAAI,EACJC,MAAM,EACNC,KAAK,EACLC,OAAO,EACkB;IACzB,KAAK,MAAMC,SAASH,OAAQ;QAC1B,IAAIN,kBAAkBS,UAAU,CAACR,iBAAiBQ,QAAQ;YACxD,IAAIV,iBAAiBU,UAAU,OAAOJ,IAAI,CAACI,MAAMC,IAAI,CAAC,KAAK,UAAU;gBACnEH,SAASH,iCAAiC;oBACxCC,MAAMA,IAAI,CAACI,MAAMC,IAAI,CAAC;oBACtBJ,QAAQG,MAAMH,MAAM;oBACpBC;oBACAC;gBACF;YACF,OAAO;gBACLD,SAASH,iCAAiC;oBACxCC;oBACAC,QAAQG,MAAMH,MAAM;oBACpBC;oBACAC;gBACF;YACF;QACF,OAAO,IAAIC,MAAME,IAAI,KAAK,QAAQ;YAChCF,MAAMG,IAAI,CAACC,OAAO,CAAC,CAACC;gBAClBP,SAASH,iCAAiC;oBACxCC,MAAOH,WAAWY,OAAOT,IAAI,CAACS,IAAIJ,IAAI,CAAC,GAAGL;oBAC1CC,QAAQQ,IAAIR,MAAM;oBAClBC;oBACAC;gBACF;YACF;QACF,OAAO,IAAIO,MAAMC,OAAO,CAACX,IAAI,CAACI,MAAMC,IAAI,CAAC,GAAG;YAC1C,IAAID,MAAME,IAAI,KAAK,UAAU
|
|
1
|
+
{"version":3,"sources":["../../../src/utilities/upgradeLexicalData/upgradeDocumentFieldsRecursively.ts"],"sourcesContent":["import type { SerializedEditorState } from 'lexical'\nimport type { Field, FlattenedBlock, Payload } from 'payload'\n\nimport { createHeadlessEditor } from '@lexical/headless'\nimport { fieldAffectsData, fieldHasSubFields, fieldIsArrayType, tabHasName } from 'payload/shared'\n\nimport type { LexicalRichTextAdapter } from '../../types.js'\n\nimport { getEnabledNodes } from '../../lexical/nodes/index.js'\n\ntype NestedRichTextFieldsArgs = {\n data: Record<string, unknown>\n\n fields: Field[]\n found: number\n payload: Payload\n}\n\nexport const upgradeDocumentFieldsRecursively = ({\n data,\n fields,\n found,\n payload,\n}: NestedRichTextFieldsArgs): number => {\n for (const field of fields) {\n if (fieldHasSubFields(field) && !fieldIsArrayType(field)) {\n if (fieldAffectsData(field) && typeof data[field.name] === 'object') {\n found += upgradeDocumentFieldsRecursively({\n data: data[field.name] as Record<string, unknown>,\n fields: field.fields,\n found,\n payload,\n })\n } else {\n found += upgradeDocumentFieldsRecursively({\n data,\n fields: field.fields,\n found,\n payload,\n })\n }\n } else if (field.type === 'tabs') {\n field.tabs.forEach((tab) => {\n found += upgradeDocumentFieldsRecursively({\n data: (tabHasName(tab) ? data[tab.name] : data) as Record<string, unknown>,\n fields: tab.fields,\n found,\n payload,\n })\n })\n } else if (Array.isArray(data[field.name])) {\n if (field.type === 'blocks') {\n ;(data[field.name] as Record<string, unknown>[]).forEach((row) => {\n const blockTypeToMatch: string = row?.blockType as string\n\n const block =\n payload.blocks[blockTypeToMatch] ??\n ((field.blockReferences ?? field.blocks).find(\n (block) => typeof block !== 'string' && block.slug === blockTypeToMatch,\n ) as FlattenedBlock | undefined)\n\n if (block) {\n found += upgradeDocumentFieldsRecursively({\n data: row,\n fields: block.fields,\n found,\n payload,\n })\n }\n })\n }\n\n if (field.type === 'array') {\n ;(data[field.name] as Record<string, unknown>[]).forEach((row) => {\n found += upgradeDocumentFieldsRecursively({\n data: row,\n fields: field.fields,\n found,\n payload,\n })\n })\n }\n }\n\n if (\n field.type === 'richText' &&\n data[field.name] &&\n !Array.isArray(data[field.name]) &&\n 'root' in (data[field.name] as Record<string, unknown>)\n ) {\n // Lexical richText\n const editor: LexicalRichTextAdapter = field.editor as LexicalRichTextAdapter\n if (editor && typeof editor === 'object') {\n if ('features' in editor && editor.features?.length) {\n // Load lexical editor into lexical, then save it immediately\n const editorState = data[field.name] as SerializedEditorState\n\n const headlessEditor = createHeadlessEditor({\n nodes: getEnabledNodes({\n editorConfig: editor.editorConfig,\n }),\n })\n headlessEditor.update(\n () => {\n headlessEditor.setEditorState(headlessEditor.parseEditorState(editorState))\n },\n { discrete: true },\n )\n\n // get editor state\n data[field.name] = headlessEditor.getEditorState().toJSON()\n\n found++\n }\n }\n }\n }\n\n return found\n}\n"],"names":["createHeadlessEditor","fieldAffectsData","fieldHasSubFields","fieldIsArrayType","tabHasName","getEnabledNodes","upgradeDocumentFieldsRecursively","data","fields","found","payload","field","name","type","tabs","forEach","tab","Array","isArray","row","blockTypeToMatch","blockType","block","blocks","blockReferences","find","slug","editor","features","length","editorState","headlessEditor","nodes","editorConfig","update","setEditorState","parseEditorState","discrete","getEditorState","toJSON"],"mappings":"AAGA,SAASA,oBAAoB,QAAQ,oBAAmB;AACxD,SAASC,gBAAgB,EAAEC,iBAAiB,EAAEC,gBAAgB,EAAEC,UAAU,QAAQ,iBAAgB;AAIlG,SAASC,eAAe,QAAQ,+BAA8B;AAU9D,OAAO,MAAMC,mCAAmC,CAAC,EAC/CC,IAAI,EACJC,MAAM,EACNC,KAAK,EACLC,OAAO,EACkB;IACzB,KAAK,MAAMC,SAASH,OAAQ;QAC1B,IAAIN,kBAAkBS,UAAU,CAACR,iBAAiBQ,QAAQ;YACxD,IAAIV,iBAAiBU,UAAU,OAAOJ,IAAI,CAACI,MAAMC,IAAI,CAAC,KAAK,UAAU;gBACnEH,SAASH,iCAAiC;oBACxCC,MAAMA,IAAI,CAACI,MAAMC,IAAI,CAAC;oBACtBJ,QAAQG,MAAMH,MAAM;oBACpBC;oBACAC;gBACF;YACF,OAAO;gBACLD,SAASH,iCAAiC;oBACxCC;oBACAC,QAAQG,MAAMH,MAAM;oBACpBC;oBACAC;gBACF;YACF;QACF,OAAO,IAAIC,MAAME,IAAI,KAAK,QAAQ;YAChCF,MAAMG,IAAI,CAACC,OAAO,CAAC,CAACC;gBAClBP,SAASH,iCAAiC;oBACxCC,MAAOH,WAAWY,OAAOT,IAAI,CAACS,IAAIJ,IAAI,CAAC,GAAGL;oBAC1CC,QAAQQ,IAAIR,MAAM;oBAClBC;oBACAC;gBACF;YACF;QACF,OAAO,IAAIO,MAAMC,OAAO,CAACX,IAAI,CAACI,MAAMC,IAAI,CAAC,GAAG;YAC1C,IAAID,MAAME,IAAI,KAAK,UAAU;;gBACzBN,IAAI,CAACI,MAAMC,IAAI,CAAC,CAA+BG,OAAO,CAAC,CAACI;oBACxD,MAAMC,mBAA2BD,KAAKE;oBAEtC,MAAMC,QACJZ,QAAQa,MAAM,CAACH,iBAAiB,IAC/B,AAACT,CAAAA,MAAMa,eAAe,IAAIb,MAAMY,MAAM,AAAD,EAAGE,IAAI,CAC3C,CAACH,QAAU,OAAOA,UAAU,YAAYA,MAAMI,IAAI,KAAKN;oBAG3D,IAAIE,OAAO;wBACTb,SAASH,iCAAiC;4BACxCC,MAAMY;4BACNX,QAAQc,MAAMd,MAAM;4BACpBC;4BACAC;wBACF;oBACF;gBACF;YACF;YAEA,IAAIC,MAAME,IAAI,KAAK,SAAS;;gBACxBN,IAAI,CAACI,MAAMC,IAAI,CAAC,CAA+BG,OAAO,CAAC,CAACI;oBACxDV,SAASH,iCAAiC;wBACxCC,MAAMY;wBACNX,QAAQG,MAAMH,MAAM;wBACpBC;wBACAC;oBACF;gBACF;YACF;QACF;QAEA,IACEC,MAAME,IAAI,KAAK,cACfN,IAAI,CAACI,MAAMC,IAAI,CAAC,IAChB,CAACK,MAAMC,OAAO,CAACX,IAAI,CAACI,MAAMC,IAAI,CAAC,KAC/B,UAAWL,IAAI,CAACI,MAAMC,IAAI,CAAC,EAC3B;YACA,mBAAmB;YACnB,MAAMe,SAAiChB,MAAMgB,MAAM;YACnD,IAAIA,UAAU,OAAOA,WAAW,UAAU;gBACxC,IAAI,cAAcA,UAAUA,OAAOC,QAAQ,EAAEC,QAAQ;oBACnD,6DAA6D;oBAC7D,MAAMC,cAAcvB,IAAI,CAACI,MAAMC,IAAI,CAAC;oBAEpC,MAAMmB,iBAAiB/B,qBAAqB;wBAC1CgC,OAAO3B,gBAAgB;4BACrB4B,cAAcN,OAAOM,YAAY;wBACnC;oBACF;oBACAF,eAAeG,MAAM,CACnB;wBACEH,eAAeI,cAAc,CAACJ,eAAeK,gBAAgB,CAACN;oBAChE,GACA;wBAAEO,UAAU;oBAAK;oBAGnB,mBAAmB;oBACnB9B,IAAI,CAACI,MAAMC,IAAI,CAAC,GAAGmB,eAAeO,cAAc,GAAGC,MAAM;oBAEzD9B;gBACF;YACF;QACF;IACF;IAEA,OAAOA;AACT,EAAC"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@payloadcms/richtext-lexical",
|
|
3
|
-
"version": "3.68.0-internal-debug.
|
|
3
|
+
"version": "3.68.0-internal-debug.185cc5f",
|
|
4
4
|
"description": "The officially supported Lexical richtext adapter for Payload",
|
|
5
5
|
"homepage": "https://payloadcms.com",
|
|
6
6
|
"repository": {
|
|
@@ -374,8 +374,8 @@
|
|
|
374
374
|
"react-error-boundary": "4.1.2",
|
|
375
375
|
"ts-essentials": "10.0.3",
|
|
376
376
|
"uuid": "10.0.0",
|
|
377
|
-
"@payloadcms/translations": "3.68.0-internal-debug.
|
|
378
|
-
"@payloadcms/ui": "3.68.0-internal-debug.
|
|
377
|
+
"@payloadcms/translations": "3.68.0-internal-debug.185cc5f",
|
|
378
|
+
"@payloadcms/ui": "3.68.0-internal-debug.185cc5f"
|
|
379
379
|
},
|
|
380
380
|
"devDependencies": {
|
|
381
381
|
"@babel/cli": "7.27.2",
|
|
@@ -394,16 +394,16 @@
|
|
|
394
394
|
"esbuild": "0.25.5",
|
|
395
395
|
"esbuild-sass-plugin": "3.3.1",
|
|
396
396
|
"swc-plugin-transform-remove-imports": "4.0.4",
|
|
397
|
-
"
|
|
398
|
-
"
|
|
397
|
+
"payload": "3.68.0-internal-debug.185cc5f",
|
|
398
|
+
"@payloadcms/eslint-config": "3.28.0"
|
|
399
399
|
},
|
|
400
400
|
"peerDependencies": {
|
|
401
401
|
"@faceless-ui/modal": "3.0.0",
|
|
402
402
|
"@faceless-ui/scroll-info": "2.0.0",
|
|
403
403
|
"react": "^19.0.1 || ^19.1.2 || ^19.2.1",
|
|
404
404
|
"react-dom": "^19.0.1 || ^19.1.2 || ^19.2.1",
|
|
405
|
-
"@payloadcms/next": "3.68.0-internal-debug.
|
|
406
|
-
"payload": "3.68.0-internal-debug.
|
|
405
|
+
"@payloadcms/next": "3.68.0-internal-debug.185cc5f",
|
|
406
|
+
"payload": "3.68.0-internal-debug.185cc5f"
|
|
407
407
|
},
|
|
408
408
|
"engines": {
|
|
409
409
|
"node": "^18.20.2 || >=20.9.0"
|