metanorma-standoc 1.9.4 → 1.10.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (31) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/rake.yml +1 -1
  3. data/.rubocop.yml +1 -1
  4. data/lib/asciidoctor/standoc/cleanup_inline.rb +117 -77
  5. data/lib/asciidoctor/standoc/cleanup_ref.rb +7 -0
  6. data/lib/asciidoctor/standoc/cleanup_terms.rb +19 -18
  7. data/lib/asciidoctor/standoc/inline.rb +20 -17
  8. data/lib/asciidoctor/standoc/isodoc.rng +18 -1
  9. data/lib/asciidoctor/standoc/macros_plantuml.rb +19 -21
  10. data/lib/asciidoctor/standoc/macros_terms.rb +33 -23
  11. data/lib/asciidoctor/standoc/term_lookup_cleanup.rb +10 -12
  12. data/lib/asciidoctor/standoc/terms.rb +1 -1
  13. data/lib/asciidoctor/standoc/validate.rb +21 -8
  14. data/lib/metanorma/standoc/version.rb +1 -1
  15. data/metanorma-standoc.gemspec +2 -2
  16. data/spec/asciidoctor/blocks_spec.rb +6 -6
  17. data/spec/asciidoctor/cleanup_spec.rb +37 -6
  18. data/spec/asciidoctor/isobib_cache_spec.rb +4 -6
  19. data/spec/asciidoctor/lists_spec.rb +147 -135
  20. data/spec/asciidoctor/macros_spec.rb +505 -181
  21. data/spec/asciidoctor/refs_spec.rb +12 -12
  22. data/spec/asciidoctor/validate_spec.rb +66 -20
  23. data/spec/vcr_cassettes/dated_iso_ref_joint_iso_iec.yml +42 -42
  24. data/spec/vcr_cassettes/isobib_get_123.yml +12 -12
  25. data/spec/vcr_cassettes/isobib_get_123_1.yml +26 -26
  26. data/spec/vcr_cassettes/isobib_get_123_1_fr.yml +35 -35
  27. data/spec/vcr_cassettes/isobib_get_123_2001.yml +13 -13
  28. data/spec/vcr_cassettes/isobib_get_124.yml +12 -12
  29. data/spec/vcr_cassettes/rfcbib_get_rfc8341.yml +13 -13
  30. data/spec/vcr_cassettes/separates_iev_citations_by_top_level_clause.yml +51 -61
  31. metadata +5 -5
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d42939fe573eddd8bd58ff20a2e3eaacd3eb9de328afccabba60453a8b94cd43
4
- data.tar.gz: e2d2e0f516b11587c5dcc6e8f87d39e4d63095b8e96014e7d1bfa610ceb6bae8
3
+ metadata.gz: f802ef3bf91b1b1af85aa22ca72569b6a6d74b29ed4389373e6b42e7d364b016
4
+ data.tar.gz: 55bd7031c2f7a2623955c9d1778b48a5a1666d15a0fe160a8058b544ffe60f92
5
5
  SHA512:
6
- metadata.gz: 653295f7c1dbdb48a201f0784e6d123058a2ccd0abc018fe9ae68049b9c01f29d0120c3a3ec5a25b968e18fbbf4ca3c85c0da294985f2dcee42ea2d049cdb526
7
- data.tar.gz: d36fd8fa5b679f71c780bab99fe4c5a18d58c40e8995065975d87cbc0ce51f6459748c1c5310f3c4419d3058087a78d5e9d481839e4579c2d872d0359852ee28
6
+ metadata.gz: 59462520c525758884fdeac23103756b75908daf58707c25ed1420c3e7184e6c3cfaaa7ecd3770619cb3f5d7d9eedc9778b96980b5a33fcdedbea015da9e2684
7
+ data.tar.gz: ad562abbf62b1217ab40c933a1f32fe7dbbc474561c47ff0ed91c7de9e9262e3b0612393d8354eccacf30b034316635f5c84aa80f24cc2d5e6e5650bd918e485
@@ -16,7 +16,7 @@ jobs:
16
16
  strategy:
17
17
  fail-fast: false
18
18
  matrix:
19
- ruby: [ '3.0', '2.7', '2.6', '2.5', '2.4' ]
19
+ ruby: [ '3.0', '2.7', '2.6', '2.5' ]
20
20
  os: [ ubuntu-latest, windows-latest, macos-latest ]
21
21
  experimental: [ false ]
22
22
  steps:
data/.rubocop.yml CHANGED
@@ -7,4 +7,4 @@ inherit_from:
7
7
  # ...
8
8
 
9
9
  AllCops:
10
- TargetRubyVersion: 2.4
10
+ TargetRubyVersion: 2.5
@@ -3,32 +3,54 @@ require "metanorma-utils"
3
3
  module Asciidoctor
4
4
  module Standoc
5
5
  module Cleanup
6
- def empty_text_before_first_element(x)
7
- x.children.each do |c|
8
- return false if c.text? and /\S/.match(c.text)
6
+ def empty_text_before_first_element(elem)
7
+ elem.children.each do |c|
8
+ return false if c.text? && /\S/.match(c.text)
9
9
  return true if c.element?
10
10
  end
11
11
  true
12
12
  end
13
13
 
14
- def strip_initial_space(x)
15
- if x.children[0].text?
16
- if !/\S/.match(x.children[0].text)
17
- x.children[0].remove
18
- else
19
- x.children[0].content = x.children[0].text.gsub(/^ /, "")
20
- end
14
+ def strip_initial_space(elem)
15
+ return unless elem.children[0].text?
16
+
17
+ if /\S/.match?(elem.children[0].text)
18
+ elem.children[0].content = elem.children[0].text.gsub(/^ /, "")
19
+ else
20
+ elem.children[0].remove
21
21
  end
22
22
  end
23
23
 
24
24
  def bookmark_cleanup(xmldoc)
25
+ li_bookmark_cleanup(xmldoc)
26
+ dt_bookmark_cleanup(xmldoc)
27
+ end
28
+
29
+ def bookmark_to_id(elem, bookmark)
30
+ parent = bookmark.parent
31
+ elem["id"] = bookmark.remove["id"]
32
+ strip_initial_space(parent)
33
+ end
34
+
35
+ def li_bookmark_cleanup(xmldoc)
25
36
  xmldoc.xpath("//li[descendant::bookmark]").each do |x|
26
- if x&.elements&.first&.name == "p" &&
27
- x&.elements&.first&.elements&.first&.name == "bookmark"
28
- if empty_text_before_first_element(x.elements[0])
29
- x["id"] = x.elements[0].elements[0].remove["id"]
30
- strip_initial_space(x.elements[0])
31
- end
37
+ if x.at("./*[1][local-name() = 'p']/"\
38
+ "*[1][local-name() = 'bookmark']") &&
39
+ empty_text_before_first_element(x.elements[0])
40
+ bookmark_to_id(x, x.elements[0].elements[0])
41
+ end
42
+ end
43
+ end
44
+
45
+ def dt_bookmark_cleanup(xmldoc)
46
+ xmldoc.xpath("//dt[descendant::bookmark]").each do |x|
47
+ if x.at("./*[1][local-name() = 'p']/"\
48
+ "*[1][local-name() = 'bookmark']") &&
49
+ empty_text_before_first_element(x.elements[0])
50
+ bookmark_to_id(x, x.elements[0].elements[0])
51
+ elsif x.at("./*[1][local-name() = 'bookmark']") &&
52
+ empty_text_before_first_element(x)
53
+ bookmark_to_id(x, x.elements[0])
32
54
  end
33
55
  end
34
56
  end
@@ -46,65 +68,67 @@ module Asciidoctor
46
68
  LOCALITY_RE = Regexp.new(LOCALITY_REGEX_STR.gsub(/\s/, ""),
47
69
  Regexp::IGNORECASE | Regexp::MULTILINE)
48
70
 
49
- def tq(x)
50
- x.sub(/^"/, "").sub(/"$/, "")
71
+ def tq(text)
72
+ text.sub(/^"/, "").sub(/"$/, "")
51
73
  end
52
74
 
53
- def extract_localities(x)
54
- f = x&.children&.first or return
75
+ def extract_localities(elem)
76
+ f = elem&.children&.first or return
55
77
  f.text? or return
56
78
  head = f.remove.text
57
- tail = x&.children&.remove
58
- extract_localities1(x, head)
59
- tail and x << tail
79
+ tail = elem&.children&.remove
80
+ extract_localities1(elem, head)
81
+ tail and elem << tail
60
82
  end
61
83
 
62
- def extract_localities1(x, text)
63
- b = x.add_child("<localityStack/>").first if LOCALITY_RE.match text
84
+ def extract_localities1(elem, text)
85
+ b = elem.add_child("<localityStack/>").first if LOCALITY_RE.match text
64
86
  while (m = LOCALITY_RE.match text)
65
87
  ref = m[:ref] ? "<referenceFrom>#{tq m[:ref]}</referenceFrom>" : ""
66
88
  refto = m[:to] ? "<referenceTo>#{tq m[:to]}</referenceTo>" : ""
67
89
  loc = m[:locality]&.downcase || m[:locality2]&.downcase
68
90
  b.add_child("<locality type='#{loc}'>#{ref}#{refto}</locality>")
69
91
  text = m[:text]
70
- b = x.add_child("<localityStack/>").first if m[:punct] == ";"
92
+ b = elem.add_child("<localityStack/>").first if m[:punct] == ";"
71
93
  end
72
- x.add_child(text) if text
94
+ elem.add_child(text) if text
73
95
  end
74
96
 
75
- def xref_to_eref(x)
76
- x["bibitemid"] = x["target"]
77
- unless x["citeas"] = @anchors&.dig(x["target"], :xref)
78
- @internal_eref_namespaces.include?(x["type"]) or
79
- @log.add("Crossreferences", x,
80
- "#{x['target']} does not have a corresponding anchor ID in the bibliography!")
97
+ def xref_to_eref(elem)
98
+ elem["bibitemid"] = elem["target"]
99
+ unless elem["citeas"] = @anchors&.dig(elem["target"], :xref)
100
+ @internal_eref_namespaces.include?(elem["type"]) or
101
+ @log.add("Crossreferences", elem,
102
+ "#{elem['target']} does not have a corresponding "\
103
+ "anchor ID in the bibliography!")
81
104
  end
82
- x.delete("target")
83
- extract_localities(x) unless x.children.empty?
105
+ elem.delete("target")
106
+ extract_localities(elem) unless elem.children.empty?
84
107
  end
85
108
 
86
109
  def xref_cleanup(xmldoc)
87
110
  xmldoc.xpath("//xref").each do |x|
88
111
  /:/.match(x["target"]) and xref_to_internal_eref(x)
89
112
  next unless x.name == "xref"
113
+
90
114
  if refid? x["target"]
91
115
  x.name = "eref"
92
116
  xref_to_eref(x)
93
- else
94
- x.delete("type")
117
+ else x.delete("type")
95
118
  end
96
119
  end
97
120
  end
98
121
 
99
- def xref_to_internal_eref(x)
100
- a = x["target"].split(":", 3)
101
- unless a.size < 2 || a[0].empty? || a[1].empty?
102
- x["target"] = "#{a[0]}_#{a[1]}"
103
- a.size > 2 and x.children = %{anchor="#{a[2..-1].join("")}",#{x&.children&.text}}
104
- x["type"] = a[0]
122
+ def xref_to_internal_eref(elem)
123
+ a = elem["target"].split(":", 3)
124
+ unless a.size < 2 || a[0].empty? || a[1].empty?
125
+ elem["target"] = "#{a[0]}_#{a[1]}"
126
+ a.size > 2 and
127
+ elem.children = %{anchor="#{a[2..-1].join}",#{elem&.children&.text}}
128
+ elem["type"] = a[0]
105
129
  @internal_eref_namespaces << a[0]
106
- x.name = "eref"
107
- xref_to_eref(x)
130
+ elem.name = "eref"
131
+ xref_to_eref(elem)
108
132
  end
109
133
  end
110
134
 
@@ -116,10 +140,11 @@ module Asciidoctor
116
140
 
117
141
  def origin_cleanup(xmldoc)
118
142
  xmldoc.xpath("//origin/concept[termref]").each do |x|
119
- x.replace(x.children)
143
+ t = x.at("./termref")
144
+ x.replace(t)
120
145
  end
121
146
  xmldoc.xpath("//origin").each do |x|
122
- x["citeas"] = @anchors&.dig(x["bibitemid"], :xref) ||
147
+ x["citeas"] = @anchors&.dig(x["bibitemid"], :xref) or
123
148
  @log.add("Crossreferences", x,
124
149
  "#{x['bibitemid']} does not have a corresponding anchor "\
125
150
  "ID in the bibliography!")
@@ -128,68 +153,83 @@ module Asciidoctor
128
153
  end
129
154
 
130
155
  def concept_cleanup(xmldoc)
131
- xmldoc.xpath("//concept").each do |x|
132
- x.delete("term") if x["term"].empty?
133
- if /:/.match(x["key"]) then concept_termbase_cleanup(x)
156
+ xmldoc.xpath("//concept[not(termxref)]").each do |x|
157
+ term = x.at("./refterm")
158
+ term&.remove if term&.text&.empty?
159
+ x.children.remove if x&.children&.text&.strip&.empty?
160
+ key_extract_locality(x)
161
+ if /:/.match?(x["key"]) then concept_termbase_cleanup(x)
134
162
  elsif refid? x["key"] then concept_eref_cleanup(x)
135
- else
136
- concept_xref_cleanup(x)
163
+ else concept_xref_cleanup(x)
137
164
  end
138
165
  x.delete("key")
139
166
  end
140
167
  end
141
168
 
142
- def concept_termbase_cleanup(x)
143
- text = x&.children&.first&.remove&.text
144
- termbase, key = x["key"].split(/:/, 2)
145
- x.add_child(%(<termref base="#{termbase}" target="#{key}">) +
146
- "#{text}</termref>")
169
+ def key_extract_locality(elem)
170
+ return unless /,/.match?(elem["key"])
171
+
172
+ elem.add_child("<locality>#{elem['key'].sub(/^[^,]+,/, '')}</locality>")
173
+ elem["key"] = elem["key"].sub(/,.*$/, "")
174
+ end
175
+
176
+ def concept_termbase_cleanup(elem)
177
+ t = elem&.at("./xrefrender")&.remove&.children
178
+ termbase, key = elem["key"].split(/:/, 2)
179
+ elem.add_child(%(<termref base="#{termbase}" target="#{key}">) +
180
+ "#{t&.to_xml}</termref>")
147
181
  end
148
182
 
149
- def concept_xref_cleanup(x)
150
- text = x&.children&.first&.remove&.text
151
- x.add_child(%(<xref target="#{x['key']}">#{text}</xref>))
183
+ def concept_xref_cleanup(elem)
184
+ t = elem&.at("./xrefrender")&.remove&.children
185
+ elem.add_child(%(<xref target="#{elem['key']}">#{t&.to_xml}</xref>))
152
186
  end
153
187
 
154
- def concept_eref_cleanup(x)
155
- x.children = "<eref>#{x.children.to_xml}</eref>"
156
- extract_localities(x.first_element_child)
188
+ def concept_eref_cleanup(elem)
189
+ t = elem&.at("./xrefrender")&.remove&.children&.to_xml
190
+ l = elem&.at("./locality")&.remove&.children&.to_xml
191
+ elem.add_child "<eref bibitemid='#{elem['key']}'>#{l}</eref>"
192
+ extract_localities(elem.elements[-1])
193
+ elem.elements[-1].add_child(t) if t
157
194
  end
158
195
 
159
- def to_xreftarget(s)
160
- return Metanorma::Utils::to_ncname(s) unless /^[^#]+#.+$/.match(s)
161
- /^(?<pref>[^#]+)#(?<suff>.+)$/ =~ s
162
- pref = pref.gsub(%r([#{Metanorma::Utils::NAMECHAR}]), "_")
163
- suff = suff.gsub(%r([#{Metanorma::Utils::NAMECHAR}]), "_")
196
+ def to_xreftarget(str)
197
+ return Metanorma::Utils::to_ncname(str) unless /^[^#]+#.+$/.match?(str)
198
+
199
+ /^(?<pref>[^#]+)#(?<suff>.+)$/ =~ str
200
+ pref = pref.gsub(%r([#{Metanorma::Utils::NAMECHAR}])o, "_")
201
+ suff = suff.gsub(%r([#{Metanorma::Utils::NAMECHAR}])o, "_")
164
202
  "#{pref}##{suff}"
165
203
  end
166
204
 
167
205
  IDREF = "//*/@id | //review/@from | //review/@to | "\
168
206
  "//callout/@target | //citation/@bibitemid | //eref/@bibitemid".freeze
169
207
 
170
- def anchor_cleanup(x)
171
- anchor_cleanup1(x)
172
- xreftarget_cleanup(x)
208
+ def anchor_cleanup(elem)
209
+ anchor_cleanup1(elem)
210
+ xreftarget_cleanup(elem)
173
211
  end
174
212
 
175
- def anchor_cleanup1(x)
176
- x.xpath(IDREF).each do |s|
213
+ def anchor_cleanup1(elem)
214
+ elem.xpath(IDREF).each do |s|
177
215
  if (ret = Metanorma::Utils::to_ncname(s.value)) != (orig = s.value)
178
216
  s.value = ret
179
217
  output = s.parent.dup
180
218
  output.children.remove
181
- @log.add("Anchors", s.parent, "normalised identifier in #{output} from #{orig}")
219
+ @log.add("Anchors", s.parent,
220
+ "normalised identifier in #{output} from #{orig}")
182
221
  end
183
222
  end
184
223
  end
185
224
 
186
- def xreftarget_cleanup(x)
187
- x.xpath("//xref/@target").each do |s|
225
+ def xreftarget_cleanup(elem)
226
+ elem.xpath("//xref/@target").each do |s|
188
227
  if (ret = to_xreftarget(s.value)) != (orig = s.value)
189
228
  s.value = ret
190
229
  output = s.parent.dup
191
230
  output.children.remove
192
- @log.add("Anchors", s.parent, "normalised identifier in #{output} from #{orig}")
231
+ @log.add("Anchors", s.parent,
232
+ "normalised identifier in #{output} from #{orig}")
193
233
  end
194
234
  end
195
235
  end
@@ -149,7 +149,14 @@ module Asciidoctor
149
149
  end
150
150
  end
151
151
 
152
+ def bibitem_nested_id(xmldoc)
153
+ xmldoc.xpath("//bibitem//bibitem").each do |b|
154
+ b.delete("id")
155
+ end
156
+ end
157
+
152
158
  def bibitem_cleanup(xmldoc)
159
+ bibitem_nested_id(xmldoc)
153
160
  ref_dl_cleanup(xmldoc)
154
161
  fetch_local_bibitem(xmldoc)
155
162
  end
@@ -60,15 +60,15 @@ module Asciidoctor
60
60
 
61
61
  def term_children_cleanup(xmldoc)
62
62
  xmldoc.xpath("//term").each do |t|
63
- t.xpath("./termnote").each { |n| t << n.remove }
64
- t.xpath("./termexample").each { |n| t << n.remove }
65
- t.xpath("./termsource").each { |n| t << n.remove }
63
+ %w(termnote termexample termsource).each do |w|
64
+ t.xpath("./#{w}").each { |n| t << n.remove }
65
+ end
66
66
  end
67
- end
67
+ end
68
68
 
69
69
  def termdef_from_termbase(xmldoc)
70
70
  xmldoc.xpath("//term").each do |x|
71
- if c = x.at("./origin/termref") and !x.at("./definition")
71
+ if (c = x.at("./origin/termref")) && !x.at("./definition")
72
72
  x.at("./origin").previous = fetch_termbase(c["base"], c.text)
73
73
  end
74
74
  end
@@ -93,33 +93,34 @@ module Asciidoctor
93
93
  termdomain1_cleanup(xmldoc)
94
94
  termnote_example_cleanup(xmldoc)
95
95
  termdef_subclause_cleanup(xmldoc)
96
- term_children_cleanup(xmldoc)
96
+ term_children_cleanup(xmldoc)
97
97
  termdocsource_cleanup(xmldoc)
98
- end
98
+ end
99
99
 
100
100
  # Indices sort after letter but before any following
101
101
  # letter (x, x_m, x_1, xa); we use colon to force that sort order.
102
102
  # Numbers sort *after* letters; we use thorn to force that sort order.
103
- def symbol_key(x)
104
- key = x.dup
103
+ def symbol_key(sym)
104
+ key = sym.dup
105
105
  key.traverse do |n|
106
106
  next unless n.name == "math"
107
+
107
108
  n.replace(grkletters(MathML2AsciiMath.m2a(n.to_xml)))
108
109
  end
109
110
  ret = Nokogiri::XML(key.to_xml)
110
- HTMLEntities.new.decode(ret.text.downcase).
111
- gsub(/[\[\]\{\}<>\(\)]/, "").gsub(/\s/m, "").
112
- gsub(/[[:punct:]]|[_^]/, ":\\0").gsub(/`/, "").
113
- gsub(/[0-9]+/, "þ\\0")
111
+ HTMLEntities.new.decode(ret.text.downcase)
112
+ .gsub(/[\[\]{}<>()]/, "").gsub(/\s/m, "")
113
+ .gsub(/[[:punct:]]|[_^]/, ":\\0").gsub(/`/, "")
114
+ .gsub(/[0-9]+/, "þ\\0")
114
115
  end
115
-
116
+
116
117
  def grkletters(x)
117
118
  x.gsub(/\b(alpha|beta|gamma|delta|epsilon|zeta|eta|theta|iota|kappa|lambda|mu|nu|xi|omicron|pi|rho|sigma|tau|upsilon|phi|chi|psi|omega)\b/i, "&\\1;")
118
119
  end
119
120
 
120
- def extract_symbols_list(dl)
121
+ def extract_symbols_list(dlist)
121
122
  dl_out = []
122
- dl.xpath("./dt | ./dd").each do |dtd|
123
+ dlist.xpath("./dt | ./dd").each do |dtd|
123
124
  if dtd.name == "dt"
124
125
  dl_out << { dt: dtd.remove, key: symbol_key(dtd) }
125
126
  else
@@ -128,7 +129,7 @@ module Asciidoctor
128
129
  end
129
130
  dl_out
130
131
  end
131
-
132
+
132
133
  def symbols_cleanup(docxml)
133
134
  docxml.xpath("//definitions/dl").each do |dl|
134
135
  dl_out = extract_symbols_list(dl)
@@ -136,7 +137,7 @@ module Asciidoctor
136
137
  dl.children = dl_out.map { |d| d[:dt].to_s + d[:dd].to_s }.join("\n")
137
138
  end
138
139
  docxml
139
- end
140
+ end
140
141
  end
141
142
  end
142
143
  end
@@ -46,13 +46,13 @@ module Asciidoctor
46
46
  def inline_anchor_xref_attrs(node)
47
47
  m = /^(?<drop>droploc%)?(?<case>capital%|lowercase%)?(?<drop2>droploc%)?
48
48
  (?<fn>fn:?\s*)?(?<text>.*)$/x.match node.text
49
- casing = m.nil? ? nil : m[:case]&.sub(/%$/, "")
50
- droploc = m.nil? ? nil : ((m[:drop].nil? && m[:drop2].nil?) ? nil: true)
51
- f = (m.nil? || m[:fn].nil?) ? "inline" : "footnote"
52
- c = (!m.nil? && (%i[case fn drop drop2].any? { |x| !m[x].nil? })) ?
53
- m[:text] : node.text
54
49
  t = node.target.gsub(/^#/, "").gsub(%r{(\.xml|\.adoc)(#.*$)}, "\\2")
55
- { target: t, type: f, case: casing, droploc: droploc, text: c }
50
+ m.nil? and return { target: t, type: "inline", text: node.text }
51
+ droploc = m[:drop].nil? && m[:drop2].nil? ? nil : true
52
+ f = m[:fn].nil? ? "inline" : "footnote"
53
+ c = %i[case fn drop drop2].any? { |x| !m[x].nil? } ? m[:text] : node.text
54
+ { target: t, type: f, case: m[:case]&.sub(/%$/, ""), droploc: droploc,
55
+ text: c }
56
56
  end
57
57
 
58
58
  def inline_anchor_link(node)
@@ -115,12 +115,12 @@ module Asciidoctor
115
115
 
116
116
  def xml_encode(text)
117
117
  HTMLEntities.new.encode(text, :basic, :hexadecimal)
118
- .gsub(/&amp;gt;/, ">").gsub(/\&amp;lt;/, "<").gsub(/&amp;amp;/, "&")
118
+ .gsub(/&amp;gt;/, ">").gsub(/&amp;lt;/, "<").gsub(/&amp;amp;/, "&")
119
119
  .gsub(/&gt;/, ">").gsub(/&lt;/, "<").gsub(/&amp;/, "&")
120
120
  .gsub(/&quot;/, '"').gsub(/&#xa;/, "\n").gsub(/&amp;#/, "&#")
121
121
  end
122
122
 
123
- def latex_parse(text)
123
+ def latex_parse1(text)
124
124
  lxm_input = Unicode2LaTeX.unicode2latex(HTMLEntities.new.decode(text))
125
125
  results = Latexmath.parse(lxm_input).to_mathml
126
126
  results.nil? and
@@ -134,16 +134,19 @@ module Asciidoctor
134
134
  <([^:>&]+:)?math(\s+[^>&]+)?>/x.match? text
135
135
  math = xml_encode(text)
136
136
  xml.stem math, **{ type: "MathML" }
137
- elsif style == :latexmath
138
- latex = latex_parse(text) or return xml.stem **{ type: "MathML" }
139
- xml.stem **{ type: "MathML" } do |s|
140
- math = Nokogiri::XML.fragment(latex.sub(/<\?[^>]+>/, ""))
141
- .elements[0]
142
- math.delete("alttext")
143
- s.parent.children = math
144
- end
137
+ elsif style == :latexmath then latex_parse(text, xml)
145
138
  else
146
- xml.stem text&.gsub(/\&amp;#/, "&#"), **{ type: "AsciiMath" }
139
+ xml.stem text&.gsub(/&amp;#/, "&#"), **{ type: "AsciiMath" }
140
+ end
141
+ end
142
+
143
+ def latex_parse(text, xml)
144
+ latex = latex_parse1(text) or return xml.stem **{ type: "MathML" }
145
+ xml.stem **{ type: "MathML" } do |s|
146
+ math = Nokogiri::XML.fragment(latex.sub(/<\?[^>]+>/, ""))
147
+ .elements[0]
148
+ math.delete("alttext")
149
+ s.parent.children = math
147
150
  end
148
151
  end
149
152