metanorma-standoc 1.9.4 → 1.10.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/rake.yml +1 -1
- data/.rubocop.yml +1 -1
- data/lib/asciidoctor/standoc/cleanup_inline.rb +117 -77
- data/lib/asciidoctor/standoc/cleanup_ref.rb +7 -0
- data/lib/asciidoctor/standoc/cleanup_terms.rb +19 -18
- data/lib/asciidoctor/standoc/inline.rb +20 -17
- data/lib/asciidoctor/standoc/isodoc.rng +18 -1
- data/lib/asciidoctor/standoc/macros_plantuml.rb +19 -21
- data/lib/asciidoctor/standoc/macros_terms.rb +33 -23
- data/lib/asciidoctor/standoc/term_lookup_cleanup.rb +10 -12
- data/lib/asciidoctor/standoc/terms.rb +1 -1
- data/lib/asciidoctor/standoc/validate.rb +21 -8
- data/lib/metanorma/standoc/version.rb +1 -1
- data/metanorma-standoc.gemspec +2 -2
- data/spec/asciidoctor/blocks_spec.rb +6 -6
- data/spec/asciidoctor/cleanup_spec.rb +37 -6
- data/spec/asciidoctor/isobib_cache_spec.rb +4 -6
- data/spec/asciidoctor/lists_spec.rb +147 -135
- data/spec/asciidoctor/macros_spec.rb +505 -181
- data/spec/asciidoctor/refs_spec.rb +12 -12
- data/spec/asciidoctor/validate_spec.rb +66 -20
- data/spec/vcr_cassettes/dated_iso_ref_joint_iso_iec.yml +42 -42
- data/spec/vcr_cassettes/isobib_get_123.yml +12 -12
- data/spec/vcr_cassettes/isobib_get_123_1.yml +26 -26
- data/spec/vcr_cassettes/isobib_get_123_1_fr.yml +35 -35
- data/spec/vcr_cassettes/isobib_get_123_2001.yml +13 -13
- data/spec/vcr_cassettes/isobib_get_124.yml +12 -12
- data/spec/vcr_cassettes/rfcbib_get_rfc8341.yml +13 -13
- data/spec/vcr_cassettes/separates_iev_citations_by_top_level_clause.yml +51 -61
- metadata +5 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f802ef3bf91b1b1af85aa22ca72569b6a6d74b29ed4389373e6b42e7d364b016
|
4
|
+
data.tar.gz: 55bd7031c2f7a2623955c9d1778b48a5a1666d15a0fe160a8058b544ffe60f92
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 59462520c525758884fdeac23103756b75908daf58707c25ed1420c3e7184e6c3cfaaa7ecd3770619cb3f5d7d9eedc9778b96980b5a33fcdedbea015da9e2684
|
7
|
+
data.tar.gz: ad562abbf62b1217ab40c933a1f32fe7dbbc474561c47ff0ed91c7de9e9262e3b0612393d8354eccacf30b034316635f5c84aa80f24cc2d5e6e5650bd918e485
|
data/.github/workflows/rake.yml
CHANGED
data/.rubocop.yml
CHANGED
@@ -3,32 +3,54 @@ require "metanorma-utils"
|
|
3
3
|
module Asciidoctor
|
4
4
|
module Standoc
|
5
5
|
module Cleanup
|
6
|
-
def empty_text_before_first_element(
|
7
|
-
|
8
|
-
return false if c.text?
|
6
|
+
def empty_text_before_first_element(elem)
|
7
|
+
elem.children.each do |c|
|
8
|
+
return false if c.text? && /\S/.match(c.text)
|
9
9
|
return true if c.element?
|
10
10
|
end
|
11
11
|
true
|
12
12
|
end
|
13
13
|
|
14
|
-
def strip_initial_space(
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
14
|
+
def strip_initial_space(elem)
|
15
|
+
return unless elem.children[0].text?
|
16
|
+
|
17
|
+
if /\S/.match?(elem.children[0].text)
|
18
|
+
elem.children[0].content = elem.children[0].text.gsub(/^ /, "")
|
19
|
+
else
|
20
|
+
elem.children[0].remove
|
21
21
|
end
|
22
22
|
end
|
23
23
|
|
24
24
|
def bookmark_cleanup(xmldoc)
|
25
|
+
li_bookmark_cleanup(xmldoc)
|
26
|
+
dt_bookmark_cleanup(xmldoc)
|
27
|
+
end
|
28
|
+
|
29
|
+
def bookmark_to_id(elem, bookmark)
|
30
|
+
parent = bookmark.parent
|
31
|
+
elem["id"] = bookmark.remove["id"]
|
32
|
+
strip_initial_space(parent)
|
33
|
+
end
|
34
|
+
|
35
|
+
def li_bookmark_cleanup(xmldoc)
|
25
36
|
xmldoc.xpath("//li[descendant::bookmark]").each do |x|
|
26
|
-
if x
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
37
|
+
if x.at("./*[1][local-name() = 'p']/"\
|
38
|
+
"*[1][local-name() = 'bookmark']") &&
|
39
|
+
empty_text_before_first_element(x.elements[0])
|
40
|
+
bookmark_to_id(x, x.elements[0].elements[0])
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
def dt_bookmark_cleanup(xmldoc)
|
46
|
+
xmldoc.xpath("//dt[descendant::bookmark]").each do |x|
|
47
|
+
if x.at("./*[1][local-name() = 'p']/"\
|
48
|
+
"*[1][local-name() = 'bookmark']") &&
|
49
|
+
empty_text_before_first_element(x.elements[0])
|
50
|
+
bookmark_to_id(x, x.elements[0].elements[0])
|
51
|
+
elsif x.at("./*[1][local-name() = 'bookmark']") &&
|
52
|
+
empty_text_before_first_element(x)
|
53
|
+
bookmark_to_id(x, x.elements[0])
|
32
54
|
end
|
33
55
|
end
|
34
56
|
end
|
@@ -46,65 +68,67 @@ module Asciidoctor
|
|
46
68
|
LOCALITY_RE = Regexp.new(LOCALITY_REGEX_STR.gsub(/\s/, ""),
|
47
69
|
Regexp::IGNORECASE | Regexp::MULTILINE)
|
48
70
|
|
49
|
-
def tq(
|
50
|
-
|
71
|
+
def tq(text)
|
72
|
+
text.sub(/^"/, "").sub(/"$/, "")
|
51
73
|
end
|
52
74
|
|
53
|
-
def extract_localities(
|
54
|
-
f =
|
75
|
+
def extract_localities(elem)
|
76
|
+
f = elem&.children&.first or return
|
55
77
|
f.text? or return
|
56
78
|
head = f.remove.text
|
57
|
-
tail =
|
58
|
-
extract_localities1(
|
59
|
-
tail and
|
79
|
+
tail = elem&.children&.remove
|
80
|
+
extract_localities1(elem, head)
|
81
|
+
tail and elem << tail
|
60
82
|
end
|
61
83
|
|
62
|
-
def extract_localities1(
|
63
|
-
b =
|
84
|
+
def extract_localities1(elem, text)
|
85
|
+
b = elem.add_child("<localityStack/>").first if LOCALITY_RE.match text
|
64
86
|
while (m = LOCALITY_RE.match text)
|
65
87
|
ref = m[:ref] ? "<referenceFrom>#{tq m[:ref]}</referenceFrom>" : ""
|
66
88
|
refto = m[:to] ? "<referenceTo>#{tq m[:to]}</referenceTo>" : ""
|
67
89
|
loc = m[:locality]&.downcase || m[:locality2]&.downcase
|
68
90
|
b.add_child("<locality type='#{loc}'>#{ref}#{refto}</locality>")
|
69
91
|
text = m[:text]
|
70
|
-
b =
|
92
|
+
b = elem.add_child("<localityStack/>").first if m[:punct] == ";"
|
71
93
|
end
|
72
|
-
|
94
|
+
elem.add_child(text) if text
|
73
95
|
end
|
74
96
|
|
75
|
-
def xref_to_eref(
|
76
|
-
|
77
|
-
unless
|
78
|
-
@internal_eref_namespaces.include?(
|
79
|
-
|
80
|
-
|
97
|
+
def xref_to_eref(elem)
|
98
|
+
elem["bibitemid"] = elem["target"]
|
99
|
+
unless elem["citeas"] = @anchors&.dig(elem["target"], :xref)
|
100
|
+
@internal_eref_namespaces.include?(elem["type"]) or
|
101
|
+
@log.add("Crossreferences", elem,
|
102
|
+
"#{elem['target']} does not have a corresponding "\
|
103
|
+
"anchor ID in the bibliography!")
|
81
104
|
end
|
82
|
-
|
83
|
-
extract_localities(
|
105
|
+
elem.delete("target")
|
106
|
+
extract_localities(elem) unless elem.children.empty?
|
84
107
|
end
|
85
108
|
|
86
109
|
def xref_cleanup(xmldoc)
|
87
110
|
xmldoc.xpath("//xref").each do |x|
|
88
111
|
/:/.match(x["target"]) and xref_to_internal_eref(x)
|
89
112
|
next unless x.name == "xref"
|
113
|
+
|
90
114
|
if refid? x["target"]
|
91
115
|
x.name = "eref"
|
92
116
|
xref_to_eref(x)
|
93
|
-
else
|
94
|
-
x.delete("type")
|
117
|
+
else x.delete("type")
|
95
118
|
end
|
96
119
|
end
|
97
120
|
end
|
98
121
|
|
99
|
-
def xref_to_internal_eref(
|
100
|
-
a =
|
101
|
-
unless
|
102
|
-
|
103
|
-
a.size > 2 and
|
104
|
-
|
122
|
+
def xref_to_internal_eref(elem)
|
123
|
+
a = elem["target"].split(":", 3)
|
124
|
+
unless a.size < 2 || a[0].empty? || a[1].empty?
|
125
|
+
elem["target"] = "#{a[0]}_#{a[1]}"
|
126
|
+
a.size > 2 and
|
127
|
+
elem.children = %{anchor="#{a[2..-1].join}",#{elem&.children&.text}}
|
128
|
+
elem["type"] = a[0]
|
105
129
|
@internal_eref_namespaces << a[0]
|
106
|
-
|
107
|
-
xref_to_eref(
|
130
|
+
elem.name = "eref"
|
131
|
+
xref_to_eref(elem)
|
108
132
|
end
|
109
133
|
end
|
110
134
|
|
@@ -116,10 +140,11 @@ module Asciidoctor
|
|
116
140
|
|
117
141
|
def origin_cleanup(xmldoc)
|
118
142
|
xmldoc.xpath("//origin/concept[termref]").each do |x|
|
119
|
-
x.
|
143
|
+
t = x.at("./termref")
|
144
|
+
x.replace(t)
|
120
145
|
end
|
121
146
|
xmldoc.xpath("//origin").each do |x|
|
122
|
-
x["citeas"] = @anchors&.dig(x["bibitemid"], :xref)
|
147
|
+
x["citeas"] = @anchors&.dig(x["bibitemid"], :xref) or
|
123
148
|
@log.add("Crossreferences", x,
|
124
149
|
"#{x['bibitemid']} does not have a corresponding anchor "\
|
125
150
|
"ID in the bibliography!")
|
@@ -128,68 +153,83 @@ module Asciidoctor
|
|
128
153
|
end
|
129
154
|
|
130
155
|
def concept_cleanup(xmldoc)
|
131
|
-
xmldoc.xpath("//concept").each do |x|
|
132
|
-
x.
|
133
|
-
if
|
156
|
+
xmldoc.xpath("//concept[not(termxref)]").each do |x|
|
157
|
+
term = x.at("./refterm")
|
158
|
+
term&.remove if term&.text&.empty?
|
159
|
+
x.children.remove if x&.children&.text&.strip&.empty?
|
160
|
+
key_extract_locality(x)
|
161
|
+
if /:/.match?(x["key"]) then concept_termbase_cleanup(x)
|
134
162
|
elsif refid? x["key"] then concept_eref_cleanup(x)
|
135
|
-
else
|
136
|
-
concept_xref_cleanup(x)
|
163
|
+
else concept_xref_cleanup(x)
|
137
164
|
end
|
138
165
|
x.delete("key")
|
139
166
|
end
|
140
167
|
end
|
141
168
|
|
142
|
-
def
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
169
|
+
def key_extract_locality(elem)
|
170
|
+
return unless /,/.match?(elem["key"])
|
171
|
+
|
172
|
+
elem.add_child("<locality>#{elem['key'].sub(/^[^,]+,/, '')}</locality>")
|
173
|
+
elem["key"] = elem["key"].sub(/,.*$/, "")
|
174
|
+
end
|
175
|
+
|
176
|
+
def concept_termbase_cleanup(elem)
|
177
|
+
t = elem&.at("./xrefrender")&.remove&.children
|
178
|
+
termbase, key = elem["key"].split(/:/, 2)
|
179
|
+
elem.add_child(%(<termref base="#{termbase}" target="#{key}">) +
|
180
|
+
"#{t&.to_xml}</termref>")
|
147
181
|
end
|
148
182
|
|
149
|
-
def concept_xref_cleanup(
|
150
|
-
|
151
|
-
|
183
|
+
def concept_xref_cleanup(elem)
|
184
|
+
t = elem&.at("./xrefrender")&.remove&.children
|
185
|
+
elem.add_child(%(<xref target="#{elem['key']}">#{t&.to_xml}</xref>))
|
152
186
|
end
|
153
187
|
|
154
|
-
def concept_eref_cleanup(
|
155
|
-
|
156
|
-
|
188
|
+
def concept_eref_cleanup(elem)
|
189
|
+
t = elem&.at("./xrefrender")&.remove&.children&.to_xml
|
190
|
+
l = elem&.at("./locality")&.remove&.children&.to_xml
|
191
|
+
elem.add_child "<eref bibitemid='#{elem['key']}'>#{l}</eref>"
|
192
|
+
extract_localities(elem.elements[-1])
|
193
|
+
elem.elements[-1].add_child(t) if t
|
157
194
|
end
|
158
195
|
|
159
|
-
def to_xreftarget(
|
160
|
-
return Metanorma::Utils::to_ncname(
|
161
|
-
|
162
|
-
pref
|
163
|
-
|
196
|
+
def to_xreftarget(str)
|
197
|
+
return Metanorma::Utils::to_ncname(str) unless /^[^#]+#.+$/.match?(str)
|
198
|
+
|
199
|
+
/^(?<pref>[^#]+)#(?<suff>.+)$/ =~ str
|
200
|
+
pref = pref.gsub(%r([#{Metanorma::Utils::NAMECHAR}])o, "_")
|
201
|
+
suff = suff.gsub(%r([#{Metanorma::Utils::NAMECHAR}])o, "_")
|
164
202
|
"#{pref}##{suff}"
|
165
203
|
end
|
166
204
|
|
167
205
|
IDREF = "//*/@id | //review/@from | //review/@to | "\
|
168
206
|
"//callout/@target | //citation/@bibitemid | //eref/@bibitemid".freeze
|
169
207
|
|
170
|
-
def anchor_cleanup(
|
171
|
-
anchor_cleanup1(
|
172
|
-
xreftarget_cleanup(
|
208
|
+
def anchor_cleanup(elem)
|
209
|
+
anchor_cleanup1(elem)
|
210
|
+
xreftarget_cleanup(elem)
|
173
211
|
end
|
174
212
|
|
175
|
-
def anchor_cleanup1(
|
176
|
-
|
213
|
+
def anchor_cleanup1(elem)
|
214
|
+
elem.xpath(IDREF).each do |s|
|
177
215
|
if (ret = Metanorma::Utils::to_ncname(s.value)) != (orig = s.value)
|
178
216
|
s.value = ret
|
179
217
|
output = s.parent.dup
|
180
218
|
output.children.remove
|
181
|
-
@log.add("Anchors", s.parent,
|
219
|
+
@log.add("Anchors", s.parent,
|
220
|
+
"normalised identifier in #{output} from #{orig}")
|
182
221
|
end
|
183
222
|
end
|
184
223
|
end
|
185
224
|
|
186
|
-
def xreftarget_cleanup(
|
187
|
-
|
225
|
+
def xreftarget_cleanup(elem)
|
226
|
+
elem.xpath("//xref/@target").each do |s|
|
188
227
|
if (ret = to_xreftarget(s.value)) != (orig = s.value)
|
189
228
|
s.value = ret
|
190
229
|
output = s.parent.dup
|
191
230
|
output.children.remove
|
192
|
-
@log.add("Anchors", s.parent,
|
231
|
+
@log.add("Anchors", s.parent,
|
232
|
+
"normalised identifier in #{output} from #{orig}")
|
193
233
|
end
|
194
234
|
end
|
195
235
|
end
|
@@ -149,7 +149,14 @@ module Asciidoctor
|
|
149
149
|
end
|
150
150
|
end
|
151
151
|
|
152
|
+
def bibitem_nested_id(xmldoc)
|
153
|
+
xmldoc.xpath("//bibitem//bibitem").each do |b|
|
154
|
+
b.delete("id")
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
152
158
|
def bibitem_cleanup(xmldoc)
|
159
|
+
bibitem_nested_id(xmldoc)
|
153
160
|
ref_dl_cleanup(xmldoc)
|
154
161
|
fetch_local_bibitem(xmldoc)
|
155
162
|
end
|
@@ -60,15 +60,15 @@ module Asciidoctor
|
|
60
60
|
|
61
61
|
def term_children_cleanup(xmldoc)
|
62
62
|
xmldoc.xpath("//term").each do |t|
|
63
|
-
|
64
|
-
|
65
|
-
|
63
|
+
%w(termnote termexample termsource).each do |w|
|
64
|
+
t.xpath("./#{w}").each { |n| t << n.remove }
|
65
|
+
end
|
66
66
|
end
|
67
|
-
end
|
67
|
+
end
|
68
68
|
|
69
69
|
def termdef_from_termbase(xmldoc)
|
70
70
|
xmldoc.xpath("//term").each do |x|
|
71
|
-
if c = x.at("./origin/termref")
|
71
|
+
if (c = x.at("./origin/termref")) && !x.at("./definition")
|
72
72
|
x.at("./origin").previous = fetch_termbase(c["base"], c.text)
|
73
73
|
end
|
74
74
|
end
|
@@ -93,33 +93,34 @@ module Asciidoctor
|
|
93
93
|
termdomain1_cleanup(xmldoc)
|
94
94
|
termnote_example_cleanup(xmldoc)
|
95
95
|
termdef_subclause_cleanup(xmldoc)
|
96
|
-
term_children_cleanup(xmldoc)
|
96
|
+
term_children_cleanup(xmldoc)
|
97
97
|
termdocsource_cleanup(xmldoc)
|
98
|
-
end
|
98
|
+
end
|
99
99
|
|
100
100
|
# Indices sort after letter but before any following
|
101
101
|
# letter (x, x_m, x_1, xa); we use colon to force that sort order.
|
102
102
|
# Numbers sort *after* letters; we use thorn to force that sort order.
|
103
|
-
def symbol_key(
|
104
|
-
key =
|
103
|
+
def symbol_key(sym)
|
104
|
+
key = sym.dup
|
105
105
|
key.traverse do |n|
|
106
106
|
next unless n.name == "math"
|
107
|
+
|
107
108
|
n.replace(grkletters(MathML2AsciiMath.m2a(n.to_xml)))
|
108
109
|
end
|
109
110
|
ret = Nokogiri::XML(key.to_xml)
|
110
|
-
HTMLEntities.new.decode(ret.text.downcase)
|
111
|
-
gsub(/[\[\]
|
112
|
-
gsub(/[[:punct:]]|[_^]/, ":\\0").gsub(/`/, "")
|
113
|
-
gsub(/[0-9]+/, "þ\\0")
|
111
|
+
HTMLEntities.new.decode(ret.text.downcase)
|
112
|
+
.gsub(/[\[\]{}<>()]/, "").gsub(/\s/m, "")
|
113
|
+
.gsub(/[[:punct:]]|[_^]/, ":\\0").gsub(/`/, "")
|
114
|
+
.gsub(/[0-9]+/, "þ\\0")
|
114
115
|
end
|
115
|
-
|
116
|
+
|
116
117
|
def grkletters(x)
|
117
118
|
x.gsub(/\b(alpha|beta|gamma|delta|epsilon|zeta|eta|theta|iota|kappa|lambda|mu|nu|xi|omicron|pi|rho|sigma|tau|upsilon|phi|chi|psi|omega)\b/i, "&\\1;")
|
118
119
|
end
|
119
120
|
|
120
|
-
def extract_symbols_list(
|
121
|
+
def extract_symbols_list(dlist)
|
121
122
|
dl_out = []
|
122
|
-
|
123
|
+
dlist.xpath("./dt | ./dd").each do |dtd|
|
123
124
|
if dtd.name == "dt"
|
124
125
|
dl_out << { dt: dtd.remove, key: symbol_key(dtd) }
|
125
126
|
else
|
@@ -128,7 +129,7 @@ module Asciidoctor
|
|
128
129
|
end
|
129
130
|
dl_out
|
130
131
|
end
|
131
|
-
|
132
|
+
|
132
133
|
def symbols_cleanup(docxml)
|
133
134
|
docxml.xpath("//definitions/dl").each do |dl|
|
134
135
|
dl_out = extract_symbols_list(dl)
|
@@ -136,7 +137,7 @@ module Asciidoctor
|
|
136
137
|
dl.children = dl_out.map { |d| d[:dt].to_s + d[:dd].to_s }.join("\n")
|
137
138
|
end
|
138
139
|
docxml
|
139
|
-
end
|
140
|
+
end
|
140
141
|
end
|
141
142
|
end
|
142
143
|
end
|
@@ -46,13 +46,13 @@ module Asciidoctor
|
|
46
46
|
def inline_anchor_xref_attrs(node)
|
47
47
|
m = /^(?<drop>droploc%)?(?<case>capital%|lowercase%)?(?<drop2>droploc%)?
|
48
48
|
(?<fn>fn:?\s*)?(?<text>.*)$/x.match node.text
|
49
|
-
casing = m.nil? ? nil : m[:case]&.sub(/%$/, "")
|
50
|
-
droploc = m.nil? ? nil : ((m[:drop].nil? && m[:drop2].nil?) ? nil: true)
|
51
|
-
f = (m.nil? || m[:fn].nil?) ? "inline" : "footnote"
|
52
|
-
c = (!m.nil? && (%i[case fn drop drop2].any? { |x| !m[x].nil? })) ?
|
53
|
-
m[:text] : node.text
|
54
49
|
t = node.target.gsub(/^#/, "").gsub(%r{(\.xml|\.adoc)(#.*$)}, "\\2")
|
55
|
-
{ target: t, type:
|
50
|
+
m.nil? and return { target: t, type: "inline", text: node.text }
|
51
|
+
droploc = m[:drop].nil? && m[:drop2].nil? ? nil : true
|
52
|
+
f = m[:fn].nil? ? "inline" : "footnote"
|
53
|
+
c = %i[case fn drop drop2].any? { |x| !m[x].nil? } ? m[:text] : node.text
|
54
|
+
{ target: t, type: f, case: m[:case]&.sub(/%$/, ""), droploc: droploc,
|
55
|
+
text: c }
|
56
56
|
end
|
57
57
|
|
58
58
|
def inline_anchor_link(node)
|
@@ -115,12 +115,12 @@ module Asciidoctor
|
|
115
115
|
|
116
116
|
def xml_encode(text)
|
117
117
|
HTMLEntities.new.encode(text, :basic, :hexadecimal)
|
118
|
-
.gsub(/&gt;/, ">").gsub(
|
118
|
+
.gsub(/&gt;/, ">").gsub(/&lt;/, "<").gsub(/&amp;/, "&")
|
119
119
|
.gsub(/>/, ">").gsub(/</, "<").gsub(/&/, "&")
|
120
120
|
.gsub(/"/, '"').gsub(/
/, "\n").gsub(/&#/, "&#")
|
121
121
|
end
|
122
122
|
|
123
|
-
def
|
123
|
+
def latex_parse1(text)
|
124
124
|
lxm_input = Unicode2LaTeX.unicode2latex(HTMLEntities.new.decode(text))
|
125
125
|
results = Latexmath.parse(lxm_input).to_mathml
|
126
126
|
results.nil? and
|
@@ -134,16 +134,19 @@ module Asciidoctor
|
|
134
134
|
<([^:>&]+:)?math(\s+[^>&]+)?>/x.match? text
|
135
135
|
math = xml_encode(text)
|
136
136
|
xml.stem math, **{ type: "MathML" }
|
137
|
-
elsif style == :latexmath
|
138
|
-
latex = latex_parse(text) or return xml.stem **{ type: "MathML" }
|
139
|
-
xml.stem **{ type: "MathML" } do |s|
|
140
|
-
math = Nokogiri::XML.fragment(latex.sub(/<\?[^>]+>/, ""))
|
141
|
-
.elements[0]
|
142
|
-
math.delete("alttext")
|
143
|
-
s.parent.children = math
|
144
|
-
end
|
137
|
+
elsif style == :latexmath then latex_parse(text, xml)
|
145
138
|
else
|
146
|
-
xml.stem text&.gsub(
|
139
|
+
xml.stem text&.gsub(/&#/, "&#"), **{ type: "AsciiMath" }
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
def latex_parse(text, xml)
|
144
|
+
latex = latex_parse1(text) or return xml.stem **{ type: "MathML" }
|
145
|
+
xml.stem **{ type: "MathML" } do |s|
|
146
|
+
math = Nokogiri::XML.fragment(latex.sub(/<\?[^>]+>/, ""))
|
147
|
+
.elements[0]
|
148
|
+
math.delete("alttext")
|
149
|
+
s.parent.children = math
|
147
150
|
end
|
148
151
|
end
|
149
152
|
|