motion-markdown-it 4.4.0 → 8.4.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +69 -16
- data/lib/motion-markdown-it.rb +7 -5
- data/lib/motion-markdown-it/common/html_blocks.rb +6 -2
- data/lib/motion-markdown-it/common/utils.rb +19 -4
- data/lib/motion-markdown-it/helpers/helper_wrapper.rb +9 -0
- data/lib/motion-markdown-it/helpers/parse_link_destination.rb +8 -7
- data/lib/motion-markdown-it/index.rb +60 -18
- data/lib/motion-markdown-it/parser_block.rb +7 -10
- data/lib/motion-markdown-it/parser_inline.rb +50 -14
- data/lib/motion-markdown-it/presets/commonmark.rb +7 -1
- data/lib/motion-markdown-it/presets/default.rb +4 -3
- data/lib/motion-markdown-it/presets/zero.rb +6 -1
- data/lib/motion-markdown-it/renderer.rb +46 -14
- data/lib/motion-markdown-it/rules_block/blockquote.rb +167 -31
- data/lib/motion-markdown-it/rules_block/code.rb +4 -3
- data/lib/motion-markdown-it/rules_block/fence.rb +9 -4
- data/lib/motion-markdown-it/rules_block/heading.rb +8 -3
- data/lib/motion-markdown-it/rules_block/hr.rb +10 -5
- data/lib/motion-markdown-it/rules_block/html_block.rb +6 -3
- data/lib/motion-markdown-it/rules_block/lheading.rb +64 -26
- data/lib/motion-markdown-it/rules_block/list.rb +91 -22
- data/lib/motion-markdown-it/rules_block/paragraph.rb +14 -9
- data/lib/motion-markdown-it/rules_block/reference.rb +24 -14
- data/lib/motion-markdown-it/rules_block/state_block.rb +79 -24
- data/lib/motion-markdown-it/rules_block/table.rb +52 -26
- data/lib/motion-markdown-it/rules_core/normalize.rb +1 -23
- data/lib/motion-markdown-it/rules_core/replacements.rb +22 -2
- data/lib/motion-markdown-it/rules_core/smartquotes.rb +41 -12
- data/lib/motion-markdown-it/rules_inline/autolink.rb +5 -4
- data/lib/motion-markdown-it/rules_inline/balance_pairs.rb +48 -0
- data/lib/motion-markdown-it/rules_inline/emphasis.rb +104 -149
- data/lib/motion-markdown-it/rules_inline/entity.rb +2 -2
- data/lib/motion-markdown-it/rules_inline/escape.rb +5 -3
- data/lib/motion-markdown-it/rules_inline/image.rb +12 -23
- data/lib/motion-markdown-it/rules_inline/link.rb +20 -25
- data/lib/motion-markdown-it/rules_inline/newline.rb +2 -1
- data/lib/motion-markdown-it/rules_inline/state_inline.rb +60 -1
- data/lib/motion-markdown-it/rules_inline/strikethrough.rb +81 -97
- data/lib/motion-markdown-it/rules_inline/text_collapse.rb +40 -0
- data/lib/motion-markdown-it/token.rb +46 -1
- data/lib/motion-markdown-it/version.rb +1 -1
- data/spec/motion-markdown-it/markdown_it_spec.rb +2 -2
- data/spec/motion-markdown-it/misc_spec.rb +90 -14
- data/spec/motion-markdown-it/testgen_helper.rb +1 -1
- data/spec/spec_helper.rb +2 -3
- metadata +13 -13
- data/lib/motion-markdown-it/common/url_schemas.rb +0 -173
- data/spec/motion-markdown-it/bench_mark_spec.rb +0 -44
@@ -4,11 +4,9 @@ module MarkdownIt
|
|
4
4
|
module RulesCore
|
5
5
|
class Normalize
|
6
6
|
|
7
|
-
|
8
|
-
NEWLINES_RE = /\r[\n\u0085]|[\u2424\u2028\u0085]/
|
7
|
+
NEWLINES_RE = /\r[\n\u0085]?|[\u2424\u2028\u0085]/
|
9
8
|
NULL_RE = /\u0000/
|
10
9
|
|
11
|
-
|
12
10
|
#------------------------------------------------------------------------------
|
13
11
|
def self.inline(state)
|
14
12
|
# Normalize newlines
|
@@ -17,26 +15,6 @@ module MarkdownIt
|
|
17
15
|
# Replace NULL characters
|
18
16
|
str = str.gsub(NULL_RE, '\uFFFD')
|
19
17
|
|
20
|
-
# Replace tabs with proper number of spaces (1..4)
|
21
|
-
if str.include?("\t")
|
22
|
-
lineStart = 0
|
23
|
-
lastTabPos = 0
|
24
|
-
|
25
|
-
str = str.gsub(TABS_SCAN_RE) do
|
26
|
-
md = Regexp.last_match
|
27
|
-
match = md.to_s
|
28
|
-
offset = md.begin(0)
|
29
|
-
if str.charCodeAt(offset) == 0x0A
|
30
|
-
lineStart = offset + 1
|
31
|
-
lastTabPos = 0
|
32
|
-
next match
|
33
|
-
end
|
34
|
-
result = ' '.slice_to_end((offset - lineStart - lastTabPos) % 4)
|
35
|
-
lastTabPos = offset - lineStart + 1
|
36
|
-
result
|
37
|
-
end
|
38
|
-
end
|
39
|
-
|
40
18
|
state.src = str
|
41
19
|
end
|
42
20
|
end
|
@@ -34,19 +34,31 @@ module MarkdownIt
|
|
34
34
|
|
35
35
|
#------------------------------------------------------------------------------
|
36
36
|
def self.replace_scoped(inlineTokens)
|
37
|
+
inside_autolink = 0
|
38
|
+
|
37
39
|
(inlineTokens.length - 1).downto(0) do |i|
|
38
40
|
token = inlineTokens[i]
|
39
|
-
if
|
41
|
+
if token.type == 'text' && inside_autolink == 0
|
40
42
|
token.content = token.content.gsub(SCOPED_ABBR_RE) {|match| self.replaceFn(match, $1)}
|
41
43
|
end
|
44
|
+
|
45
|
+
if token.type == 'link_open' && token.info == 'auto'
|
46
|
+
inside_autolink -= 1
|
47
|
+
end
|
48
|
+
|
49
|
+
if token.type == 'link_close' && token.info == 'auto'
|
50
|
+
inside_autolink += 1
|
51
|
+
end
|
42
52
|
end
|
43
53
|
end
|
44
54
|
|
45
55
|
#------------------------------------------------------------------------------
|
46
56
|
def self.replace_rare(inlineTokens)
|
57
|
+
inside_autolink = 0
|
58
|
+
|
47
59
|
(inlineTokens.length - 1).downto(0) do |i|
|
48
60
|
token = inlineTokens[i]
|
49
|
-
if
|
61
|
+
if token.type == 'text' && inside_autolink == 0
|
50
62
|
if (RARE_RE =~ token.content)
|
51
63
|
token.content = token.content.
|
52
64
|
gsub(/\+-/, '±').
|
@@ -61,6 +73,14 @@ module MarkdownIt
|
|
61
73
|
gsub(/(^|[^-\s])--([^-\s]|$)/m, "\\1\u2013\\2")
|
62
74
|
end
|
63
75
|
end
|
76
|
+
|
77
|
+
if token.type == 'link_open' && token.info == 'auto'
|
78
|
+
inside_autolink -= 1
|
79
|
+
end
|
80
|
+
|
81
|
+
if token.type == 'link_close' && token.info == 'auto'
|
82
|
+
inside_autolink += 1
|
83
|
+
end
|
64
84
|
end
|
65
85
|
end
|
66
86
|
|
@@ -4,12 +4,11 @@ module MarkdownIt
|
|
4
4
|
module RulesCore
|
5
5
|
class Smartquotes
|
6
6
|
extend Common::Utils
|
7
|
-
|
7
|
+
|
8
8
|
QUOTE_TEST_RE = /['"]/
|
9
9
|
QUOTE_RE = /['"]/
|
10
10
|
APOSTROPHE = "\u2019" # ’
|
11
11
|
|
12
|
-
|
13
12
|
#------------------------------------------------------------------------------
|
14
13
|
def self.replaceAt(str, index, ch)
|
15
14
|
return str[0, index] + ch + str.slice_to_end(index + 1)
|
@@ -34,7 +33,7 @@ module MarkdownIt
|
|
34
33
|
stack = (j < stack.length ? stack.slice(0, j + 1) : stack.fill(nil, stack.length...(j+1)))
|
35
34
|
|
36
35
|
next if (token.type != 'text')
|
37
|
-
|
36
|
+
|
38
37
|
text = token.content
|
39
38
|
pos = 0
|
40
39
|
max = text.length
|
@@ -50,12 +49,42 @@ module MarkdownIt
|
|
50
49
|
pos = t.begin(0) + 1
|
51
50
|
isSingle = (t[0] == "'")
|
52
51
|
|
53
|
-
#
|
54
|
-
|
55
|
-
|
52
|
+
# Find previous character,
|
53
|
+
# default to space if it's the beginning of the line
|
54
|
+
#
|
55
|
+
lastChar = 0x20
|
56
|
+
|
57
|
+
if t.begin(0) - 1 >= 0
|
58
|
+
lastChar = text.charCodeAt(t.begin(0) - 1)
|
59
|
+
else
|
60
|
+
(i - 1).downto(0) do |j|
|
61
|
+
break if tokens[j].type == 'softbreak' || tokens[j].type == 'hardbreak' # lastChar defaults to 0x20
|
62
|
+
next if tokens[j].type != 'text'
|
63
|
+
|
64
|
+
lastChar = tokens[j].content.charCodeAt(tokens[j].content.length - 1)
|
65
|
+
break
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
# Find next character,
|
70
|
+
# default to space if it's the end of the line
|
71
|
+
#
|
72
|
+
nextChar = 0x20
|
56
73
|
|
57
|
-
|
58
|
-
|
74
|
+
if pos < max
|
75
|
+
nextChar = text.charCodeAt(pos)
|
76
|
+
else
|
77
|
+
(i + 1).upto(tokens.length - 1) do |j|
|
78
|
+
break if tokens[j].type == 'softbreak' || tokens[j].type == 'hardbreak' # nextChar defaults to 0x20
|
79
|
+
next if tokens[j].type != 'text'
|
80
|
+
|
81
|
+
nextChar = tokens[j].content.charCodeAt(0)
|
82
|
+
break
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(fromCodePoint(lastChar))
|
87
|
+
isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(fromCodePoint(nextChar))
|
59
88
|
|
60
89
|
isLastWhiteSpace = isWhiteSpace(lastChar)
|
61
90
|
isNextWhiteSpace = isWhiteSpace(nextChar)
|
@@ -112,7 +141,7 @@ module MarkdownIt
|
|
112
141
|
openQuote = state.md.options[:quotes][0]
|
113
142
|
closeQuote = state.md.options[:quotes][1]
|
114
143
|
end
|
115
|
-
|
144
|
+
|
116
145
|
# replace token.content *before* tokens[item.token].content,
|
117
146
|
# because, if they are pointing at the same token, replaceAt
|
118
147
|
# could mess up indices when quote length != 1
|
@@ -124,7 +153,7 @@ module MarkdownIt
|
|
124
153
|
|
125
154
|
text = token.content
|
126
155
|
max = text.length
|
127
|
-
|
156
|
+
|
128
157
|
stack = (j < stack.length ? stack.slice(0, j) : stack.fill(nil, stack.length...(j))) # stack.length = j
|
129
158
|
continue_outer_loop = true # continue OUTER;
|
130
159
|
break
|
@@ -133,7 +162,7 @@ module MarkdownIt
|
|
133
162
|
end
|
134
163
|
end
|
135
164
|
next if continue_outer_loop
|
136
|
-
|
165
|
+
|
137
166
|
if (canOpen)
|
138
167
|
stack.push({
|
139
168
|
token: i,
|
@@ -164,7 +193,7 @@ module MarkdownIt
|
|
164
193
|
blkIdx -= 1
|
165
194
|
end
|
166
195
|
end
|
167
|
-
|
196
|
+
|
168
197
|
end
|
169
198
|
end
|
170
199
|
end
|
@@ -5,8 +5,7 @@ module MarkdownIt
|
|
5
5
|
class Autolink
|
6
6
|
|
7
7
|
EMAIL_RE = /^<([a-zA-Z0-9.!#$\%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)>/
|
8
|
-
AUTOLINK_RE = /^<([a-zA-Z
|
9
|
-
|
8
|
+
AUTOLINK_RE = /^<([a-zA-Z][a-zA-Z0-9+.\-]{1,31}):([^<>\x00-\x20]*)>/
|
10
9
|
|
11
10
|
#------------------------------------------------------------------------------
|
12
11
|
def self.autolink(state, silent)
|
@@ -21,8 +20,6 @@ module MarkdownIt
|
|
21
20
|
if (AUTOLINK_RE =~ tail)
|
22
21
|
linkMatch = tail.match(AUTOLINK_RE)
|
23
22
|
|
24
|
-
return false if !URL_SCHEMAS.include?(linkMatch[1].downcase)
|
25
|
-
|
26
23
|
url = linkMatch[0].slice(1...-1)
|
27
24
|
fullUrl = state.md.normalizeLink.call(url)
|
28
25
|
return false if (!state.md.validateLink.call(fullUrl))
|
@@ -30,11 +27,15 @@ module MarkdownIt
|
|
30
27
|
if (!silent)
|
31
28
|
token = state.push('link_open', 'a', 1)
|
32
29
|
token.attrs = [ [ 'href', fullUrl ] ]
|
30
|
+
token.markup = 'autolink'
|
31
|
+
token.info = 'auto'
|
33
32
|
|
34
33
|
token = state.push('text', '', 0)
|
35
34
|
token.content = state.md.normalizeLinkText.call(url)
|
36
35
|
|
37
36
|
token = state.push('link_close', 'a', -1)
|
37
|
+
token.markup = 'autolink'
|
38
|
+
token.info = 'auto'
|
38
39
|
end
|
39
40
|
|
40
41
|
state.pos += linkMatch[0].length
|
@@ -0,0 +1,48 @@
|
|
1
|
+
# For each opening emphasis-like marker find a matching closing one
|
2
|
+
#------------------------------------------------------------------------------
|
3
|
+
module MarkdownIt
|
4
|
+
module RulesInline
|
5
|
+
class BalancePairs
|
6
|
+
|
7
|
+
#------------------------------------------------------------------------------
|
8
|
+
def self.link_pairs(state)
|
9
|
+
delimiters = state.delimiters
|
10
|
+
max = state.delimiters.length
|
11
|
+
|
12
|
+
0.upto(max - 1) do |i|
|
13
|
+
lastDelim = delimiters[i]
|
14
|
+
|
15
|
+
next if !lastDelim[:close]
|
16
|
+
|
17
|
+
j = i - lastDelim[:jump] - 1
|
18
|
+
|
19
|
+
while j >= 0
|
20
|
+
currDelim = delimiters[j]
|
21
|
+
|
22
|
+
if currDelim[:open] &&
|
23
|
+
currDelim[:marker] == lastDelim[:marker] &&
|
24
|
+
currDelim[:end] < 0 &&
|
25
|
+
currDelim[:level] == lastDelim[:level]
|
26
|
+
|
27
|
+
# typeofs are for backward compatibility with plugins
|
28
|
+
# not needed: typeof currDelim.length !== 'undefined' &&
|
29
|
+
# typeof lastDelim.length !== 'undefined' &&
|
30
|
+
odd_match = (currDelim[:close] || lastDelim[:open]) &&
|
31
|
+
(currDelim[:length] + lastDelim[:length]) % 3 == 0
|
32
|
+
|
33
|
+
if !odd_match
|
34
|
+
lastDelim[:jump] = i - j
|
35
|
+
lastDelim[:open] = false
|
36
|
+
currDelim[:end] = i
|
37
|
+
currDelim[:jump] = 0
|
38
|
+
break
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
j -= currDelim[:jump] + 1
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
@@ -4,166 +4,121 @@ module MarkdownIt
|
|
4
4
|
module RulesInline
|
5
5
|
class Emphasis
|
6
6
|
extend MarkdownIt::Common::Utils
|
7
|
-
|
8
|
-
# parse sequence of emphasis markers,
|
9
|
-
# "start" should point at a valid marker
|
10
|
-
#------------------------------------------------------------------------------
|
11
|
-
def self.scanDelims(state, start)
|
12
|
-
pos = start
|
13
|
-
left_flanking = true
|
14
|
-
right_flanking = true
|
15
|
-
max = state.posMax
|
16
|
-
marker = state.src.charCodeAt(start)
|
17
|
-
|
18
|
-
# treat beginning of the line as a whitespace
|
19
|
-
lastChar = start > 0 ? state.src.charCodeAt(start - 1) : 0x20
|
20
|
-
|
21
|
-
while (pos < max && state.src.charCodeAt(pos) == marker)
|
22
|
-
pos += 1
|
23
|
-
end
|
24
|
-
|
25
|
-
count = pos - start
|
26
|
-
|
27
|
-
# treat end of the line as a whitespace
|
28
|
-
nextChar = pos < max ? state.src.charCodeAt(pos) : 0x20
|
29
|
-
|
30
|
-
isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(lastChar.chr(Encoding::UTF_8))
|
31
|
-
isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(nextChar.chr(Encoding::UTF_8))
|
32
7
|
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
8
|
+
# Insert each marker as a separate text token, and add it to delimiter list
|
9
|
+
#
|
10
|
+
def self.tokenize(state, silent)
|
11
|
+
start = state.pos
|
12
|
+
marker = state.src.charCodeAt(start)
|
13
|
+
|
14
|
+
return false if silent
|
15
|
+
|
16
|
+
return false if (marker != 0x5F && marker != 0x2A) # _ and *
|
17
|
+
|
18
|
+
scanned = state.scanDelims(state.pos, marker == 0x2A)
|
19
|
+
|
20
|
+
0.upto(scanned[:length] - 1) do |i|
|
21
|
+
token = state.push('text', '', 0)
|
22
|
+
token.content = fromCodePoint(marker)
|
23
|
+
|
24
|
+
state.delimiters.push({
|
25
|
+
# Char code of the starting marker (number).
|
26
|
+
#
|
27
|
+
marker: marker,
|
28
|
+
|
29
|
+
# Total length of these series of delimiters.
|
30
|
+
#
|
31
|
+
length: scanned[:length],
|
32
|
+
|
33
|
+
# An amount of characters before this one that's equivalent to
|
34
|
+
# current one. In plain English: if this delimiter does not open
|
35
|
+
# an emphasis, neither do previous `jump` characters.
|
36
|
+
#
|
37
|
+
# Used to skip sequences like "*****" in one step, for 1st asterisk
|
38
|
+
# value will be 0, for 2nd it's 1 and so on.
|
39
|
+
#
|
40
|
+
jump: i,
|
41
|
+
|
42
|
+
# A position of the token this delimiter corresponds to.
|
43
|
+
#
|
44
|
+
token: state.tokens.length - 1,
|
45
|
+
|
46
|
+
# Token level.
|
47
|
+
#
|
48
|
+
level: state.level,
|
49
|
+
|
50
|
+
# If this delimiter is matched as a valid opener, `end` will be
|
51
|
+
# equal to its position, otherwise it's `-1`.
|
52
|
+
#
|
53
|
+
end: -1,
|
54
|
+
|
55
|
+
# Boolean flags that determine if this delimiter could open or close
|
56
|
+
# an emphasis.
|
57
|
+
#
|
58
|
+
open: scanned[:can_open],
|
59
|
+
close: scanned[:can_close]
|
60
|
+
})
|
42
61
|
end
|
43
62
|
|
44
|
-
|
45
|
-
right_flanking = false
|
46
|
-
elsif (isLastPunctChar)
|
47
|
-
if (!(isNextWhiteSpace || isNextPunctChar))
|
48
|
-
right_flanking = false
|
49
|
-
end
|
50
|
-
end
|
51
|
-
|
52
|
-
if (marker == 0x5F) # _
|
53
|
-
# "_" inside a word can neither open nor close an emphasis
|
54
|
-
can_open = left_flanking && (!right_flanking || isLastPunctChar)
|
55
|
-
can_close = right_flanking && (!left_flanking || isNextPunctChar)
|
56
|
-
else
|
57
|
-
can_open = left_flanking
|
58
|
-
can_close = right_flanking
|
59
|
-
end
|
63
|
+
state.pos += scanned[:length]
|
60
64
|
|
61
|
-
return
|
65
|
+
return true
|
62
66
|
end
|
63
67
|
|
64
|
-
#------------------------------------------------------------------------------
|
65
|
-
def self.emphasis(state, silent)
|
66
|
-
max = state.posMax
|
67
|
-
start = state.pos
|
68
|
-
marker = state.src.charCodeAt(start)
|
69
|
-
|
70
|
-
return false if (marker != 0x5F && marker != 0x2A) # _ *
|
71
|
-
return false if (silent) # don't run any pairs in validation mode
|
72
|
-
|
73
|
-
res = scanDelims(state, start)
|
74
|
-
startCount = res[:delims]
|
75
|
-
if (!res[:can_open])
|
76
|
-
state.pos += startCount
|
77
|
-
# Earlier we checked !silent, but this implementation does not need it
|
78
|
-
state.pending += state.src.slice(start...state.pos)
|
79
|
-
return true
|
80
|
-
end
|
81
68
|
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
69
|
+
# Walk through delimiter list and replace text tokens with tags
|
70
|
+
#
|
71
|
+
def self.postProcess(state)
|
72
|
+
delimiters = state.delimiters
|
73
|
+
max = state.delimiters.length
|
74
|
+
|
75
|
+
i = max - 1
|
76
|
+
while i >= 0
|
77
|
+
startDelim = delimiters[i]
|
78
|
+
|
79
|
+
(i -= 1) and next if startDelim[:marker] != 0x5F && startDelim[:marker] != 0x2A # _ and *
|
80
|
+
|
81
|
+
# Process only opening markers
|
82
|
+
(i -= 1) and next if startDelim[:end] == -1
|
83
|
+
|
84
|
+
endDelim = delimiters[startDelim[:end]]
|
85
|
+
|
86
|
+
# If the previous delimiter has the same marker and is adjacent to this one,
|
87
|
+
# merge those into one strong delimiter.
|
88
|
+
#
|
89
|
+
# `<em><em>whatever</em></em>` -> `<strong>whatever</strong>`
|
90
|
+
#
|
91
|
+
isStrong = i > 0 &&
|
92
|
+
delimiters[i - 1][:end] == startDelim[:end] + 1 &&
|
93
|
+
delimiters[i - 1][:token] == startDelim[:token] - 1 &&
|
94
|
+
delimiters[startDelim[:end] + 1][:token] == endDelim[:token] + 1 &&
|
95
|
+
delimiters[i - 1][:marker] == startDelim[:marker]
|
96
|
+
|
97
|
+
ch = fromCodePoint(startDelim[:marker])
|
98
|
+
|
99
|
+
token = state.tokens[startDelim[:token]]
|
100
|
+
token.type = isStrong ? 'strong_open' : 'em_open'
|
101
|
+
token.tag = isStrong ? 'strong' : 'em'
|
102
|
+
token.nesting = 1
|
103
|
+
token.markup = isStrong ? ch + ch : ch
|
104
|
+
token.content = ''
|
105
|
+
|
106
|
+
token = state.tokens[endDelim[:token]]
|
107
|
+
token.type = isStrong ? 'strong_close' : 'em_close'
|
108
|
+
token.tag = isStrong ? 'strong' : 'em'
|
109
|
+
token.nesting = -1
|
110
|
+
token.markup = isStrong ? ch + ch : ch
|
111
|
+
token.content = ''
|
112
|
+
|
113
|
+
if isStrong
|
114
|
+
state.tokens[delimiters[i - 1][:token]].content = ''
|
115
|
+
state.tokens[delimiters[startDelim[:end] + 1][:token]].content = ''
|
116
|
+
i -= 1
|
119
117
|
end
|
120
118
|
|
121
|
-
|
122
|
-
end
|
123
|
-
|
124
|
-
if (!found)
|
125
|
-
# parser failed to find ending tag, so it's not valid emphasis
|
126
|
-
state.pos = start
|
127
|
-
return false
|
119
|
+
i -= 1
|
128
120
|
end
|
129
|
-
|
130
|
-
# found!
|
131
|
-
state.posMax = state.pos
|
132
|
-
state.pos = start + startCount
|
133
|
-
|
134
|
-
# Earlier we checked !silent, but this implementation does not need it
|
135
|
-
|
136
|
-
# we have `startCount` starting and ending markers,
|
137
|
-
# now trying to serialize them into tokens
|
138
|
-
count = startCount
|
139
|
-
while count > 1
|
140
|
-
token = state.push('strong_open', 'strong', 1)
|
141
|
-
token.markup = marker.chr + marker.chr
|
142
|
-
count -= 2
|
143
|
-
end
|
144
|
-
if (count % 2 == 1)
|
145
|
-
token = state.push('em_open', 'em', 1)
|
146
|
-
token.markup = marker.chr
|
147
|
-
end
|
148
|
-
|
149
|
-
state.md.inline.tokenize(state)
|
150
|
-
|
151
|
-
if (count % 2 == 1)
|
152
|
-
token = state.push('em_close', 'em', -1)
|
153
|
-
token.markup = marker.chr
|
154
|
-
end
|
155
|
-
count = startCount
|
156
|
-
while count > 1
|
157
|
-
token = state.push('strong_close', 'strong', -1)
|
158
|
-
token.markup = marker.chr + marker.chr
|
159
|
-
count -= 2
|
160
|
-
end
|
161
|
-
|
162
|
-
state.pos = state.posMax + startCount
|
163
|
-
state.posMax = max
|
164
|
-
return true
|
165
121
|
end
|
166
|
-
|
167
122
|
end
|
168
123
|
end
|
169
124
|
end
|