metanorma-un 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +7 -0
  2. data/.github/workflows/macos.yml +39 -0
  3. data/.github/workflows/ubuntu.yml +39 -0
  4. data/.github/workflows/windows.yml +42 -0
  5. data/.gitignore +1 -0
  6. data/.hound.yml +3 -0
  7. data/.rubocop.yml +10 -0
  8. data/CODE_OF_CONDUCT.md +74 -0
  9. data/Gemfile +11 -0
  10. data/LICENSE +25 -0
  11. data/README.adoc +141 -0
  12. data/Rakefile +6 -0
  13. data/bin/console +14 -0
  14. data/bin/rspec +17 -0
  15. data/bin/setup +8 -0
  16. data/lib/asciidoctor/un.rb +7 -0
  17. data/lib/asciidoctor/un/basicdoc.rng +1059 -0
  18. data/lib/asciidoctor/un/biblio.rng +1142 -0
  19. data/lib/asciidoctor/un/boilerplate.xml +57 -0
  20. data/lib/asciidoctor/un/converter.rb +209 -0
  21. data/lib/asciidoctor/un/isodoc.rng +1028 -0
  22. data/lib/asciidoctor/un/reqt.rng +171 -0
  23. data/lib/asciidoctor/un/un.rng +242 -0
  24. data/lib/asciidoctor/un/validate.rb +22 -0
  25. data/lib/isodoc/un.rb +10 -0
  26. data/lib/isodoc/un/base_convert.rb +227 -0
  27. data/lib/isodoc/un/html/header.html +225 -0
  28. data/lib/isodoc/un/html/html_unece_intro.html +15 -0
  29. data/lib/isodoc/un/html/html_unece_plenary_titlepage.html +100 -0
  30. data/lib/isodoc/un/html/html_unece_titlepage.html +81 -0
  31. data/lib/isodoc/un/html/htmlstyle.scss +1174 -0
  32. data/lib/isodoc/un/html/logo.jpg +0 -0
  33. data/lib/isodoc/un/html/scripts.html +84 -0
  34. data/lib/isodoc/un/html/scripts.pdf.html +72 -0
  35. data/lib/isodoc/un/html/unece.scss +801 -0
  36. data/lib/isodoc/un/html/word_unece_intro.html +15 -0
  37. data/lib/isodoc/un/html/word_unece_plenary_titlepage.html +160 -0
  38. data/lib/isodoc/un/html/word_unece_titlepage.html +30 -0
  39. data/lib/isodoc/un/html/wordstyle.scss +1141 -0
  40. data/lib/isodoc/un/html_convert.rb +121 -0
  41. data/lib/isodoc/un/metadata.rb +115 -0
  42. data/lib/isodoc/un/pdf_convert.rb +133 -0
  43. data/lib/isodoc/un/word_convert.rb +180 -0
  44. data/lib/metanorma-un.rb +8 -0
  45. data/lib/metanorma/un.rb +12 -0
  46. data/lib/metanorma/un/UN_emblem_blue.svg +193 -0
  47. data/lib/metanorma/un/input.rb +18 -0
  48. data/lib/metanorma/un/processor.rb +43 -0
  49. data/lib/metanorma/un/version.rb +5 -0
  50. data/metanorma-unece.gemspec +48 -0
  51. metadata +334 -0
@@ -0,0 +1,121 @@
1
+ require_relative "base_convert"
2
+ require "isodoc"
3
+
4
+ module IsoDoc
5
+ module UN
6
+
7
+ # A {Converter} implementation that generates HTML output, and a document
8
+ # schema encapsulation of the document for validation
9
+ #
10
+ class HtmlConvert < IsoDoc::HtmlConvert
11
+ def initialize(options)
12
+ @libdir = File.dirname(__FILE__)
13
+ super
14
+ end
15
+
16
+ def default_fonts(options)
17
+ {
18
+ bodyfont: (
19
+ options[:script] == "Hans" ?
20
+ '"SimSun",serif' :
21
+ '"Roboto", "Helvetica Neue", Helvetica, Arial, sans-serif'
22
+ ),
23
+ headerfont: (
24
+ options[:script] == "Hans" ?
25
+ '"SimHei",sans-serif' :
26
+ '"Roboto", "Helvetica Neue", Helvetica, Arial, sans-serif'
27
+ ),
28
+ monospacefont: '"Space Mono",monospace'
29
+ }
30
+ end
31
+
32
+ def default_file_locations(_options)
33
+ {
34
+ htmlstylesheet: html_doc_path("htmlstyle.scss"),
35
+ htmlcoverpage: html_doc_path("html_unece_titlepage.html"),
36
+ htmlintropage: html_doc_path("html_unece_intro.html"),
37
+ scripts: html_doc_path("scripts.html"),
38
+ }
39
+ end
40
+
41
+
42
+ def googlefonts
43
+ <<~HEAD.freeze
44
+ <link href="https://fonts.googleapis.com/css?family=Open+Sans:300,300i,400,400i,600,600i|Space+Mono:400,700" rel="stylesheet">
45
+ <link href="https://fonts.googleapis.com/css?family=Roboto:300,400,400i,500,700,900" rel="stylesheet">
46
+ HEAD
47
+ end
48
+
49
+ def make_body(xml, docxml)
50
+ plenary = docxml.at(ns("//bibdata/ext[doctype = 'plenary']"))
51
+ body_attr = { lang: "EN-US", link: "blue", vlink: "#954F72", "xml:lang": "EN-US", class: "container" }
52
+ if plenary && @htmlcoverpage == html_doc_path("html_unece_titlepage.html")
53
+ @htmlcoverpage = html_doc_path("html_unece_plenary_titlepage.html")
54
+ end
55
+ xml.body **body_attr do |body|
56
+ make_body1(body, docxml)
57
+ make_body2(body, docxml)
58
+ make_body3(body, docxml)
59
+ end
60
+ end
61
+
62
+ def make_body3(body, docxml)
63
+ body.div **{ class: "main-section" } do |div3|
64
+ boilerplate docxml, div3
65
+ abstract docxml, div3
66
+ foreword docxml, div3
67
+ introduction docxml, div3
68
+ middle docxml, div3
69
+ footnotes div3
70
+ comments div3
71
+ end
72
+ end
73
+
74
+ def middle(isoxml, out)
75
+ clause isoxml, out
76
+ annex isoxml, out
77
+ bibliography isoxml, out
78
+ end
79
+
80
+ def clause_parse_title(node, div, c1, out)
81
+ if node["inline-header"] == "true"
82
+ inline_header_title(out, node, c1)
83
+ else
84
+ div.send "h#{anchor(node['id'], :level, false) || '1'}" do |h|
85
+ lbl = anchor(node['id'], :label, false)
86
+ h << "#{lbl}. " if lbl && !@suppressheadingnumbers
87
+ insert_tab(h, 1) if lbl && !@suppressheadingnumbers
88
+ c1&.children&.each { |c2| parse(c2, h) }
89
+ end
90
+ end
91
+ end
92
+
93
+ def introduction(isoxml, out)
94
+ f = isoxml.at(ns("//introduction")) || return
95
+ page_break(out)
96
+ out.div **{ class: "Section3", id: f["id"] } do |div|
97
+ div.h1(**{ class: "IntroTitle" }) do |h1|
98
+ h1 << @introduction_lbl
99
+ end
100
+ f.elements.each do |e|
101
+ parse(e, div) unless e.name == "title"
102
+ end
103
+ end
104
+ end
105
+
106
+ def foreword(isoxml, out)
107
+ f = isoxml.at(ns("//foreword")) || return
108
+ page_break(out)
109
+ out.div **attr_code(id: f["id"]) do |s|
110
+ s.h1(**{ class: "ForewordTitle" }) do |h1|
111
+ h1 << @foreword_lbl
112
+ end
113
+ f.elements.each { |e| parse(e, s) unless e.name == "title" }
114
+ end
115
+ end
116
+
117
+ include BaseConvert
118
+ end
119
+ end
120
+ end
121
+
@@ -0,0 +1,115 @@
1
+ require "isodoc"
2
+ require "twitter_cldr"
3
+ require "iso-639"
4
+
5
+ module IsoDoc
6
+ module UN
7
+
8
+ class Metadata < IsoDoc::Metadata
9
+ def initialize(lang, script, labels)
10
+ super
11
+ here = File.dirname(__FILE__)
12
+ set(:logo, File.expand_path(File.join(here, "html", "logo.jpg")))
13
+ end
14
+
15
+ def title(isoxml, _out)
16
+ main = isoxml&.at(ns("//bibdata/title[@language='en' and @type='main']"))&.text
17
+ set(:doctitle, main)
18
+ end
19
+
20
+ def subtitle(isoxml, _out)
21
+ main = isoxml&.at(ns("//bibdata/title[@language='en' and @type='subtitle']"))&.text
22
+ set(:docsubtitle, main)
23
+ end
24
+
25
+ def extract_languages(nodeset)
26
+ lgs = []
27
+ nodeset.each do |l|
28
+ l && ISO_639&.find(l.text)&.english_name &&
29
+ lgs << ISO_639.find(l.text).english_name
30
+ end
31
+ lgs.map { |l| l == "Spanish; Castilian" ? "Spanish" : l }
32
+ end
33
+
34
+ def author(isoxml, _out)
35
+ tc = isoxml.at(ns("//bibdata/ext/editorialgroup/committee"))
36
+ set(:tc, tc.text) if tc
37
+ set(:distribution, isoxml&.at(ns("//bibdata/ext/distribution"))&.text)
38
+ lgs = extract_languages(isoxml.xpath(ns("//bibdata/language")))
39
+ lgs = [] if lgs.sort == %w(English French Arabic Chinese Russian Spanish).sort
40
+ slgs = extract_languages(isoxml.xpath(ns("//bibdata/ext/submissionlanguage")))
41
+ lgs = [] if slgs.size == 1
42
+ set(:doclanguage, lgs) unless lgs.empty?
43
+ set(:submissionlanguage, slgs) unless slgs.empty?
44
+ session(isoxml, _out)
45
+ end
46
+
47
+ def multival(isoxml, xpath)
48
+ items = []
49
+ isoxml.xpath(ns(xpath)).each { |i| items << i.text }
50
+ items
51
+ end
52
+
53
+ def session(isoxml, _out)
54
+ set(:session_number, isoxml&.at(ns("//bibdata/ext/session/number"))&.text&.to_i&.
55
+ localize&.to_rbnf_s("SpelloutRules", "spellout-ordinal")&.capitalize)
56
+ set(:session_date, isoxml&.at(ns("//bibdata/ext/session/date"))&.text)
57
+ set(:session_collaborator, isoxml&.at(ns("//bibdata/ext/session/collaborator"))&.text)
58
+ set(:session_id, isoxml&.at(ns("//bibdata/ext/session/id"))&.text)
59
+ set(:item_footnote, isoxml&.at(ns("//bibdata/ext/session/item-footnote"))&.text)
60
+ set(:session_itemnumber, multival(isoxml, "//bibdata/ext/session/item-number"))
61
+ set(:session_itemname, multival(isoxml, "//bibdata/ext/session/item-name"))
62
+ set(:session_subitemname, multival(isoxml, "//bibdata/ext/session/subitem-name"))
63
+ end
64
+
65
+ def docid(isoxml, _out)
66
+ dn = isoxml.at(ns("//bibdata/docidentifier"))&.text
67
+ set(:docnumber, dn)
68
+ type = isoxml&.at(ns("//bibdata/ext/doctype"))&.text
69
+ set(:formatted_docnumber, type == "recommendation" ? "UN/CEFACT Recommendation #{dn}" : dn)
70
+ end
71
+
72
+ def stage_abbr(status)
73
+ case status
74
+ when "working-draft" then "wd"
75
+ when "committee-draft" then "cd"
76
+ when "draft-standard" then "d"
77
+ else
78
+ ""
79
+ end
80
+ end
81
+
82
+ def unpublished(status)
83
+ !%w(published withdrawn).include? status.downcase
84
+ end
85
+
86
+ def version(isoxml, _out)
87
+ super
88
+ revdate = get[:revdate]
89
+ set(:revdate_monthyear, monthyr(revdate))
90
+ end
91
+
92
+ MONTHS = {
93
+ "01": "January",
94
+ "02": "February",
95
+ "03": "March",
96
+ "04": "April",
97
+ "05": "May",
98
+ "06": "June",
99
+ "07": "July",
100
+ "08": "August",
101
+ "09": "September",
102
+ "10": "October",
103
+ "11": "November",
104
+ "12": "December",
105
+ }.freeze
106
+
107
+ def monthyr(isodate)
108
+ m = /(?<yr>\d\d\d\d)-(?<mo>\d\d)/.match isodate
109
+ return isodate unless m && m[:yr] && m[:mo]
110
+ return "#{MONTHS[m[:mo].to_sym]} #{m[:yr]}"
111
+ end
112
+
113
+ end
114
+ end
115
+ end
@@ -0,0 +1,133 @@
1
+ require_relative "base_convert"
2
+ require "isodoc"
3
+
4
+ module IsoDoc
5
+ module UN
6
+
7
+ # A {Converter} implementation that generates HTML output, and a document
8
+ # schema encapsulation of the document for validation
9
+ #
10
+ class PdfConvert < IsoDoc::PdfConvert
11
+ def initialize(options)
12
+ @libdir = File.dirname(__FILE__)
13
+ super
14
+ end
15
+
16
+ #def convert1(docxml, filename, dir)
17
+ #FileUtils.cp html_doc_path('logo.jpg'), File.join(@localdir, "logo.jpg")
18
+ #@files_to_delete << File.join(@localdir, "logo.jpg")
19
+ #super
20
+ #end
21
+
22
+ def default_fonts(options)
23
+ {
24
+ bodyfont: (
25
+ options[:script] == "Hans" ?
26
+ '"SimSun",serif' :
27
+ '"Roboto", "Helvetica Neue", Helvetica, Arial, sans-serif'
28
+ ),
29
+ headerfont: (
30
+ options[:script] == "Hans" ?
31
+ '"SimHei",sans-serif' :
32
+ '"Roboto", "Helvetica Neue", Helvetica, Arial, sans-serif'
33
+ ),
34
+ monospacefont: '"Space Mono",monospace'
35
+ }
36
+ end
37
+
38
+ def default_file_locations(_options)
39
+ {
40
+ htmlstylesheet: html_doc_path("htmlstyle.scss"),
41
+ htmlcoverpage: html_doc_path("html_unece_titlepage.html"),
42
+ htmlintropage: html_doc_path("html_unece_intro.html"),
43
+ scripts: html_doc_path("scripts.pdf.html"),
44
+ }
45
+ end
46
+
47
+
48
+ def googlefonts
49
+ <<~HEAD.freeze
50
+ <link href="https://fonts.googleapis.com/css?family=Open+Sans:300,300i,400,400i,600,600i|Space+Mono:400,700" rel="stylesheet">
51
+ <link href="https://fonts.googleapis.com/css?family=Roboto:300,400,400i,500,700,900" rel="stylesheet">
52
+ HEAD
53
+ end
54
+
55
+ def make_body(xml, docxml)
56
+ plenary = docxml.at(ns("//bibdata/ext[doctype = 'plenary']"))
57
+ body_attr = { lang: "EN-US", link: "blue", vlink: "#954F72", "xml:lang": "EN-US", class: "container" }
58
+ if plenary && @htmlcoverpage == html_doc_path("html_unece_titlepage.html")
59
+ @htmlcoverpage = html_doc_path("html_unece_plenary_titlepage.html")
60
+ end
61
+ #@htmlintropage = nil if plenary
62
+ xml.body **body_attr do |body|
63
+ make_body1(body, docxml)
64
+ make_body2(body, docxml)
65
+ make_body3(body, docxml)
66
+ end
67
+ end
68
+
69
+ def make_body3(body, docxml)
70
+ body.div **{ class: "main-section" } do |div3|
71
+ boilerplate docxml, div3
72
+ abstract docxml, div3
73
+ foreword docxml, div3
74
+ introduction docxml, div3
75
+ middle docxml, div3
76
+ footnotes div3
77
+ comments div3
78
+ end
79
+ end
80
+
81
+ def html_preface(docxml)
82
+ super
83
+ docxml
84
+ end
85
+
86
+ def middle(isoxml, out)
87
+ clause isoxml, out
88
+ annex isoxml, out
89
+ bibliography isoxml, out
90
+ end
91
+
92
+ def clause_parse_title(node, div, c1, out)
93
+ if node["inline-header"] == "true"
94
+ inline_header_title(out, node, c1)
95
+ else
96
+ div.send "h#{anchor(node['id'], :level, false) || '1'}" do |h|
97
+ lbl = anchor(node['id'], :label, false)
98
+ h << "#{lbl}. " if lbl && !@suppressheadingnumbers
99
+ insert_tab(h, 1) if lbl && !@suppressheadingnumbers
100
+ c1&.children&.each { |c2| parse(c2, h) }
101
+ end
102
+ end
103
+ end
104
+
105
+ def introduction(isoxml, out)
106
+ f = isoxml.at(ns("//introduction")) || return
107
+ page_break(out)
108
+ out.div **{ class: "Section3", id: f["id"] } do |div|
109
+ div.h1(**{ class: "IntroTitle" }) do |h1|
110
+ h1 << @introduction_lbl
111
+ end
112
+ f.elements.each do |e|
113
+ parse(e, div) unless e.name == "title"
114
+ end
115
+ end
116
+ end
117
+
118
+ def foreword(isoxml, out)
119
+ f = isoxml.at(ns("//foreword")) || return
120
+ page_break(out)
121
+ out.div **attr_code(id: f["id"]) do |s|
122
+ s.h1(**{ class: "ForewordTitle" }) do |h1|
123
+ h1 << @foreword_lbl
124
+ end
125
+ f.elements.each { |e| parse(e, s) unless e.name == "title" }
126
+ end
127
+ end
128
+
129
+ include BaseConvert
130
+ end
131
+ end
132
+ end
133
+
@@ -0,0 +1,180 @@
1
+ require_relative "base_convert"
2
+ require "isodoc"
3
+
4
+ module IsoDoc
5
+ module UN
6
+ # A {Converter} implementation that generates Word output, and a document
7
+ # schema encapsulation of the document for validation
8
+
9
+ class WordConvert < IsoDoc::WordConvert
10
+ def initialize(options)
11
+ @libdir = File.dirname(__FILE__)
12
+ super
13
+ @toc = options[:toc]
14
+ end
15
+
16
+ #def convert1(docxml, filename, dir)
17
+ #FileUtils.cp html_doc_path('logo.jpg'), File.join(@localdir, "logo.jpg")
18
+ #super
19
+ #end
20
+
21
+ def default_fonts(options)
22
+ {
23
+ bodyfont: (options[:script] == "Hans" ? '"SimSun",serif' : '"Times New Roman",serif'),
24
+ headerfont: (options[:script] == "Hans" ? '"SimHei",sans-serif' : '"Times New Roman",serif'),
25
+ monospacefont: '"Courier New",monospace'
26
+ }
27
+ end
28
+
29
+ def default_file_locations(options)
30
+ {
31
+ wordstylesheet: html_doc_path("wordstyle.scss"),
32
+ standardstylesheet: html_doc_path("unece.scss"),
33
+ header: html_doc_path("header.html"),
34
+ wordcoverpage: html_doc_path("word_unece_titlepage.html"),
35
+ wordintropage: html_doc_path("word_unece_intro.html"),
36
+ ulstyle: "l3",
37
+ olstyle: "l2",
38
+ }
39
+ end
40
+
41
+ def footnotes(div)
42
+ if @meta.get[:item_footnote]
43
+ fn = noko do |xml|
44
+ xml.aside **{ id: "ftnitem" } do |div|
45
+ div.p @meta.get[:item_footnote]
46
+ end
47
+ end.join("\n")
48
+ @footnotes.unshift fn
49
+ end
50
+ super
51
+ end
52
+
53
+ def make_body(xml, docxml)
54
+ plenary = docxml.at(ns("//bibdata/ext[doctype = 'plenary']"))
55
+ if plenary && @wordcoverpage == html_doc_path("word_unece_titlepage.html")
56
+ @wordcoverpage = html_doc_path("word_unece_plenary_titlepage.html")
57
+ end
58
+ @wordintropage = nil if plenary && !@toc
59
+ body_attr = { lang: "EN-US", link: "blue", vlink: "#954F72" }
60
+ xml.body **body_attr do |body|
61
+ make_body1(body, docxml)
62
+ make_body2(body, docxml)
63
+ make_body3(body, docxml)
64
+ end
65
+ end
66
+
67
+ def make_body2(body, docxml)
68
+ body.div **{ class: "WordSection2" } do |div2|
69
+ info docxml, div2
70
+ boilerplate docxml, div2
71
+ abstract docxml, div2
72
+ foreword docxml, div2
73
+ introduction docxml, div2
74
+ div2.p { |p| p << "&nbsp;" } # placeholder
75
+ end
76
+ section_break(body)
77
+ end
78
+
79
+ ENDLINE = <<~END.freeze
80
+ <v:line
81
+ alt="" style='position:absolute;left:0;text-align:left;z-index:251662848;
82
+ mso-wrap-edited:f;mso-width-percent:0;mso-height-percent:0;
83
+ mso-width-percent:0;mso-height-percent:0'
84
+ from="6.375cm,20.95pt" to="10.625cm,20.95pt"
85
+ strokeweight="1.5pt"/>
86
+ END
87
+
88
+ def end_line(_isoxml, out)
89
+ out.parent.add_child(ENDLINE)
90
+ end
91
+
92
+ def middle(isoxml, out)
93
+ clause isoxml, out
94
+ annex isoxml, out
95
+ bibliography isoxml, out
96
+ end_line(isoxml, out)
97
+ end
98
+
99
+ def clause_parse_title(node, div, c1, out)
100
+ if node["inline-header"] == "true"
101
+ inline_header_title(out, node, c1)
102
+ else
103
+ div.send "h#{anchor(node['id'], :level, false) || '1'}" do |h|
104
+ lbl = anchor(node['id'], :label, false)
105
+ if lbl && !@suppressheadingnumbers
106
+ h << "#{lbl}. "
107
+ insert_tab(h, 1)
108
+ end
109
+ c1&.children&.each { |c2| parse(c2, h) }
110
+ end
111
+ end
112
+ end
113
+
114
+ def introduction(isoxml, out)
115
+ f = isoxml.at(ns("//introduction")) || return
116
+ out.div **{ class: "Section3", id: f["id"] } do |div|
117
+ page_break(out)
118
+ div.p(**{ class: "IntroTitle" }) do |h1|
119
+ h1 << @introduction_lbl
120
+ end
121
+ f.elements.each do |e|
122
+ parse(e, div) unless e.name == "title"
123
+ end
124
+ end
125
+ end
126
+
127
+ def foreword(isoxml, out)
128
+ f = isoxml.at(ns("//foreword")) || return
129
+ out.div **attr_code(id: f["id"]) do |s|
130
+ page_break(out)
131
+ s.p(**{ class: "ForewordTitle" }) do |h1|
132
+ h1 << @foreword_lbl
133
+ end
134
+ f.elements.each { |e| parse(e, s) unless e.name == "title" }
135
+ end
136
+ end
137
+
138
+ def word_preface(docxml)
139
+ super
140
+ preface_container = docxml.at("//div[@id = 'preface_container']") # recommendation
141
+ abstractbox = docxml.at("//div[@id = 'abstractbox']") # plenary
142
+ foreword = docxml.at("//p[@class = 'ForewordTitle']/..")
143
+ intro = docxml.at("//p[@class = 'IntroTitle']/..")
144
+ abstract = docxml.at("//p[@class = 'AbstractTitle']/..")
145
+ abstract.parent = (abstractbox || preface_container) if abstract
146
+ abstractbox and abstract&.xpath(".//p/br")&.each do |a|
147
+ a.parent.remove if /page-break-before:always/.match(a["style"])
148
+ end
149
+ docxml&.at("//p[@class = 'AbstractTitle']")&.remove if abstractbox
150
+ foreword.parent = preface_container if foreword && preface_container
151
+ intro.parent = preface_container if intro && preface_container
152
+ if preface_container && (foreword || intro)
153
+ preface_container.at("./div/p[br]").remove # remove initial page break
154
+ end
155
+ if abstractbox && !intro && !foreword && !@toc
156
+ sect2 = docxml.at("//div[@class='WordSection2']")
157
+ sect2.next_element.remove # pagebreak
158
+ sect2.remove # pagebreak
159
+ end
160
+ end
161
+
162
+ def abstract(isoxml, out)
163
+ f = isoxml.at(ns("//abstract")) || return
164
+ out.div **attr_code(id: f["id"]) do |s|
165
+ page_break(out)
166
+ s.p(**{ class: "AbstractTitle" }) { |h1| h1 << @abstract_lbl }
167
+ f.elements.each { |e| parse(e, s) unless e.name == "title" }
168
+ end
169
+ end
170
+
171
+ def authority_cleanup(docxml)
172
+ super
173
+ a = docxml.at("//div[@id = 'boilerplate-ECEhdr']") and a["class"] = "boilerplate-ECEhdr"
174
+ docxml&.at("//div[@class = 'authority']")&.remove
175
+ end
176
+
177
+ include BaseConvert
178
+ end
179
+ end
180
+ end