markdownr 0.6.17 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,157 @@
1
+ module MarkdownServer
2
+ module Helpers
3
+ module SearchHelpers
4
+ BINARY_EXTENSIONS = %w[
5
+ .png .jpg .jpeg .gif .bmp .ico .svg .webp
6
+ .pdf .epub .mobi
7
+ .zip .gz .tar .bz2 .7z .rar
8
+ .exe .dll .so .dylib .o
9
+ .mp3 .mp4 .avi .mov .wav .flac .ogg
10
+ .woff .woff2 .ttf .eot .otf
11
+ .pyc .class .beam
12
+ .sqlite .db
13
+ ].freeze
14
+
15
+ MAX_SEARCH_FILES = 100
16
+ MAX_FILE_READ_BYTES = 512_000 # 500KB
17
+ CONTEXT_LINES = 2 # lines before/after match to send
18
+ MAX_LINE_DISPLAY = 200 # chars before truncating a line
19
+
20
+ def search_single_file(file_path, regexes)
21
+ base = File.realpath(root_dir)
22
+ begin
23
+ content = File.binread(file_path, MAX_FILE_READ_BYTES)
24
+ rescue
25
+ return []
26
+ end
27
+ content.force_encoding("utf-8")
28
+ return [] unless content.valid_encoding?
29
+ return [] unless regexes.all? { |re| re.match?(content) }
30
+
31
+ relative = file_path.sub("#{base}/", "")
32
+ lines = content.lines
33
+ matches = collect_matching_lines(lines, regexes)
34
+ [{ path: relative, matches: matches }]
35
+ end
36
+
37
+ def search_files(dir_path, regexes)
38
+ results = []
39
+ base = File.realpath(root_dir)
40
+
41
+ catch(:search_limit) do
42
+ walk_directory(dir_path) do |file_path|
43
+ throw :search_limit if results.length >= MAX_SEARCH_FILES
44
+
45
+ content = File.binread(file_path, MAX_FILE_READ_BYTES) rescue next
46
+ content.force_encoding("utf-8")
47
+ next unless content.valid_encoding?
48
+
49
+ # All regexes must match somewhere in the file
50
+ next unless regexes.all? { |re| re.match?(content) }
51
+
52
+ relative = file_path.sub("#{base}/", "")
53
+ lines = content.lines
54
+ matches = collect_matching_lines(lines, regexes)
55
+
56
+ results << { path: relative, matches: matches }
57
+ end
58
+ end
59
+
60
+ results
61
+ end
62
+
63
+ def walk_directory(dir_path, &block)
64
+ Dir.entries(dir_path).sort.each do |entry|
65
+ next if entry.start_with?(".") || EXCLUDED.include?(entry)
66
+ full = File.join(dir_path, entry)
67
+
68
+ if File.directory?(full)
69
+ walk_directory(full, &block)
70
+ elsif File.file?(full)
71
+ ext = File.extname(entry).downcase
72
+ next if BINARY_EXTENSIONS.include?(ext)
73
+ block.call(full)
74
+ end
75
+ end
76
+ end
77
+
78
+ def collect_matching_lines(lines, regexes)
79
+ match_indices = Set.new
80
+ lines.each_with_index do |line, i|
81
+ if regexes.any? { |re| re.match?(line) }
82
+ match_indices << i
83
+ end
84
+ end
85
+
86
+ # Build context groups
87
+ groups = []
88
+ sorted = match_indices.sort
89
+
90
+ sorted.each do |idx|
91
+ range_start = [idx - CONTEXT_LINES, 0].max
92
+ range_end = [idx + CONTEXT_LINES, lines.length - 1].min
93
+
94
+ if groups.last && range_start <= groups.last[:end] + 1
95
+ groups.last[:end] = range_end
96
+ else
97
+ groups << { start: range_start, end: range_end }
98
+ end
99
+ end
100
+
101
+ groups.map do |g|
102
+ context_lines = (g[:start]..g[:end]).map do |i|
103
+ distance = match_indices.include?(i) ? 0 : match_indices.map { |m| (m - i).abs }.min
104
+ { number: i + 1, text: lines[i].to_s.chomp, distance: distance }
105
+ end
106
+ { lines: context_lines }
107
+ end
108
+ end
109
+
110
+ def highlight_search_line(text, regexes, is_match)
111
+ # Build a combined regex with non-greedy quantifiers for shorter highlights
112
+ combined = Regexp.union(regexes.map { |r|
113
+ Regexp.new(r.source.gsub(/(?<!\\)([*+}])(?!\?)/, '\1?'), r.options)
114
+ })
115
+
116
+ # Truncate long lines, centering around the first match
117
+ prefix_trunc = false
118
+ suffix_trunc = false
119
+ if text.length > MAX_LINE_DISPLAY
120
+ if is_match && (m = combined.match(text))
121
+ center = m.begin(0) + m[0].length / 2
122
+ half = MAX_LINE_DISPLAY / 2
123
+ start = [[center - half, 0].max, [text.length - MAX_LINE_DISPLAY, 0].max].min
124
+ else
125
+ start = 0
126
+ end
127
+ prefix_trunc = start > 0
128
+ suffix_trunc = (start + MAX_LINE_DISPLAY) < text.length
129
+ text = text[start, MAX_LINE_DISPLAY]
130
+ end
131
+
132
+ html = ""
133
+ html << '<span class="truncated">...</span>' if prefix_trunc
134
+ if is_match
135
+ pieces = text.split(combined)
136
+ matches = text.scan(combined)
137
+ pieces.each_with_index do |piece, i|
138
+ html << h(piece)
139
+ html << %(<span class="highlight-match">#{h(matches[i])}</span>) if matches[i]
140
+ end
141
+ else
142
+ html << h(text)
143
+ end
144
+ html << '<span class="truncated">...</span>' if suffix_trunc
145
+ html
146
+ end
147
+
148
+ def compile_regexes(query)
149
+ words = query.split(/\s+/).reject(&:empty?)
150
+ return nil if words.empty?
151
+ words.map { |w| Regexp.new(w, Regexp::IGNORECASE) }
152
+ rescue RegexpError => e
153
+ raise RegexpError, e.message
154
+ end
155
+ end
156
+ end
157
+ end
@@ -26,7 +26,13 @@ module MarkdownServer
26
26
  def transform_markdown(text) text end
27
27
 
28
28
  # Hook: transform raw HTML (runs on .html files before popup injection)
29
- def transform_html(html) html end
29
+ def transform_html(html, app = nil) html end
30
+
31
+ # Hook: claim a /fetch URL — return { title:, html: } hash or nil to pass
32
+ def process_fetch(url, html, app) nil end
33
+
34
+ # Hook: post-process rendered markdown HTML (runs after render_markdown)
35
+ def post_render(html, meta, app) html end
30
36
  end
31
37
 
32
38
  class PluginRegistry
@@ -39,7 +45,13 @@ module MarkdownServer
39
45
  registered << klass unless registered.include?(klass)
40
46
  end
41
47
 
42
- def load_plugins(root_dir, cli_overrides = {})
48
+ def load_plugins(root_dir, cli_overrides = {}, plugin_dirs: [])
49
+ # Discover and require plugins from external directories
50
+ plugin_dirs.each do |dir|
51
+ expanded = File.expand_path(dir)
52
+ Dir[File.join(expanded, "*", "plugin.rb")].sort.each { |f| require f }
53
+ end
54
+
43
55
  config = resolve_config(root_dir, cli_overrides)
44
56
  registered.filter_map do |klass|
45
57
  plugin_config = config.fetch(klass.plugin_name, {})
@@ -0,0 +1,365 @@
1
+ # frozen_string_literal: true
2
+
3
+ module MarkdownServer
4
+ module Plugins
5
+ class BibleCitations
6
+ module Helpers
7
+ # Server-side Strong's dictionary cache
8
+ @@strongs_cache = nil
9
+ @@strongs_cache_url = nil
10
+ @@strongs_fetched_at = nil
11
+ DICTIONARY_TTL = 3600 # 1 hour
12
+
13
+ # ── Strong's Dictionary ───────────────────────────────────────────────
14
+
15
+ def strongs_map
16
+ url = settings.dictionary_url
17
+ return {} unless url
18
+
19
+ now = Time.now
20
+ if @@strongs_cache && @@strongs_cache_url == url && @@strongs_fetched_at
21
+ if url.match?(/\Ahttps?:\/\//i)
22
+ return @@strongs_cache if (now - @@strongs_fetched_at) < DICTIONARY_TTL
23
+ else
24
+ path = File.expand_path(url, root_dir)
25
+ mtime = File.mtime(path) rescue nil
26
+ return @@strongs_cache if mtime && mtime <= @@strongs_fetched_at
27
+ end
28
+ end
29
+
30
+ begin
31
+ raw = if url.match?(/\Ahttps?:\/\//i)
32
+ uri = URI.parse(url)
33
+ http = Net::HTTP.new(uri.host, uri.port)
34
+ http.use_ssl = (uri.scheme == "https")
35
+ http.open_timeout = FetchHelpers::FETCH_TIMEOUT
36
+ http.read_timeout = 10
37
+ req = Net::HTTP::Get.new(uri.request_uri)
38
+ req["Accept"] = "application/json"
39
+ resp = http.request(req)
40
+ return @@strongs_cache || {} unless resp.is_a?(Net::HTTPSuccess)
41
+ resp.body
42
+ else
43
+ path = File.expand_path(url, root_dir)
44
+ return @@strongs_cache || {} unless File.exist?(path)
45
+ File.read(path, encoding: "utf-8")
46
+ end
47
+
48
+ data = JSON.parse(raw)
49
+ url_tpl = data["url"] || ""
50
+ map = {}
51
+ %w[greek hebrew].each do |lang|
52
+ stems = data.dig("stems", lang)
53
+ next unless stems.is_a?(Hash)
54
+ stems.each_value do |entry|
55
+ sn = entry["strongs"].to_s.strip.upcase
56
+ next if sn.empty?
57
+ map[sn] = url_tpl.gsub("{filename}", entry["filename"].to_s)
58
+ end
59
+ end
60
+ @@strongs_cache = map
61
+ @@strongs_cache_url = url
62
+ @@strongs_fetched_at = now
63
+ map
64
+ rescue StandardError
65
+ @@strongs_cache || {}
66
+ end
67
+ end
68
+
69
+ def strongs_popup_url(strongs)
70
+ sn = strongs.to_s.strip.upcase
71
+ return nil unless sn.match?(/\A[GH]\d+\z/)
72
+ url = strongs_map[sn]
73
+ return url if url
74
+ prefix = sn.start_with?("H") ? "wlc" : "tr"
75
+ "https://www.blueletterbible.org/lexicon/#{sn.downcase}/nasb20/#{prefix}/0-1/"
76
+ end
77
+
78
+ def inject_strongs_urls(html)
79
+ return html unless settings.dictionary_url
80
+ html = html.gsub(/<span\s+class="subst"([^>]*data-strongs="([^"]+)"[^>]*)>/) do
81
+ attrs, strongs = $1, $2
82
+ popup_url = strongs_popup_url(strongs)
83
+ if popup_url && !attrs.include?("data-popup-url")
84
+ %(<span class="subst" data-popup-url="#{h(popup_url)}"#{attrs}>)
85
+ else
86
+ $&
87
+ end
88
+ end
89
+ html.gsub(/<span\s+class="word"([^>]*data-strongs="([^"]+)"[^>]*)>/) do
90
+ attrs, strongs = $1, $2
91
+ dict_url = strongs_map[strongs.strip.upcase]
92
+ if dict_url && !attrs.include?("data-popup-url")
93
+ %(<span class="word" data-popup-url="#{h(dict_url)}"#{attrs}>)
94
+ else
95
+ $&
96
+ end
97
+ end
98
+ end
99
+
100
+ # ── Blue Letter Bible ─────────────────────────────────────────────────
101
+
102
+ BLB_BASE = "https://www.blueletterbible.org"
103
+
104
+ def blueletterbible_html(html, url)
105
+ info_html = extract_blb_info(html, url)
106
+ infl_html = extract_blb_inflections(html, info_html[:word])
107
+ usage_html = extract_blb_usage(html)
108
+ conc_html = extract_blb_concordance(html)
109
+
110
+ info_html[:table] + infl_html + usage_html + conc_html
111
+ end
112
+
113
+ # ── Scripture Pages ───────────────────────────────────────────────────
114
+
115
+ def scrip_html(html, source_url = nil)
116
+ content = html[/<div\s+class="passage-text[^"]*"[^>]*>([\s\S]*)<\/div>\s*(?:<nav|<script|<style|\z)/im, 1] || ""
117
+ return page_html(html, source_url) if content.empty?
118
+
119
+ content = content.gsub(/<sup class="verse-number">(\d+)[^<]*<\/sup>/) do
120
+ %(<sup class="verse-number" data-verse="#{$1}">#{$1} </sup>)
121
+ end
122
+
123
+ css_blocks = inline_external_css(html, source_url)
124
+ content = inject_strongs_urls(content)
125
+ css_blocks + '<div style="font-family:Georgia,serif;line-height:1.7">' + content + "</div>"
126
+ end
127
+
128
+ def markdownr_html(html)
129
+ content = html[/<div[^>]+class="md-content"[^>]*>([\s\S]*?)<\/div>\s*(?:<div\s+class="frontmatter"|<\/div>\s*<\/div>|\z)/im, 1]
130
+ return page_html(html) unless content && content.length > 10
131
+
132
+ fm = html[/<div\s+class="frontmatter">([\s\S]*?)<\/div>\s*<\/div>/im, 0] || ""
133
+ result = content.strip
134
+ result += "\n#{fm}" unless fm.empty?
135
+ result.length > 15_000 ? result[0, 15_000] : result
136
+ end
137
+
138
+ # ── Pronunciation Icons ───────────────────────────────────────────────
139
+
140
+ def inject_pronunciation_icon(html, meta)
141
+ return html unless meta.is_a?(Hash) && meta["strongs"] && meta["language"]
142
+ strongs = meta["strongs"].to_s.strip
143
+ lang = meta["language"].to_s.strip.downcase
144
+ return html unless strongs.match?(/\A[GH]\d+\z/i) && %w[hebrew greek].include?(lang)
145
+
146
+ sn = strongs.upcase
147
+ blb_audio_url = "/pronunciation?strongs=#{sn}"
148
+ blb_btn = audio_button(blb_audio_url, "Blue Letter Bible", lazy_fetch: true, data_strongs: sn)
149
+
150
+ sl_lang = lang == "hebrew" ? "hebrew" : "greek"
151
+ sl_num = sn.sub(/\A[HG]/, "")
152
+ sl_url = "https://www.studylight.org/multi-media/audio/lexicons/eng/#{sl_lang}.html?n=#{sl_num}"
153
+ sl_btn = audio_button(sl_url, "StudyLight")
154
+
155
+ html.sub(%r{</h2>}) { " #{blb_btn}#{sl_btn}</h2>" }
156
+ end
157
+
158
+ private
159
+
160
+ # ── Shared Helpers ────────────────────────────────────────────────────
161
+
162
+ def audio_button(url, source, lazy_fetch: false, data_strongs: nil)
163
+ data_attr = data_strongs ? %( data-strongs="#{h(data_strongs)}") : ""
164
+ style = "background:none;border:none;cursor:pointer;padding:0 0 0 4px;font-size:1.1em;vertical-align:middle;line-height:1;color:#888;"
165
+ if lazy_fetch
166
+ onclick = "(function(btn){if(btn._loading)return;var a=btn._audio;if(a){a.currentTime=0;a.play();return;}" \
167
+ "btn._loading=true;btn.style.opacity='0.4';" \
168
+ "fetch('#{h(url)}').then(function(r){return r.json()}).then(function(d){" \
169
+ "if(d.audio){a=new Audio(d.audio);btn._audio=a;a.play();}btn._loading=false;btn.style.opacity='1';}).catch(function(){" \
170
+ "btn._loading=false;btn.style.opacity='1';});})(this)"
171
+ else
172
+ onclick = "var a=this._a||(this._a=new Audio('#{h(url)}'));a.currentTime=0;a.play();"
173
+ end
174
+ %(<button class="pronunciation-btn"#{data_attr} title="Play pronunciation (#{h(source)})" style="#{style}" onclick="#{onclick}">&#128266;</button>)
175
+ end
176
+
177
+ def inline_external_css(html, source_url)
178
+ return "" unless source_url
179
+ css_blocks = ""
180
+ base_uri = URI.parse(source_url) rescue nil
181
+ return "" unless base_uri
182
+
183
+ html.scan(/<link[^>]+rel=["']stylesheet["'][^>]*>/i).each do |tag|
184
+ href = tag[/href=["']([^"']+)["']/i, 1]
185
+ next unless href
186
+ abs = (URI.join(base_uri, href).to_s rescue nil)
187
+ next unless abs
188
+ css_body = fetch_css(abs)
189
+ if css_body
190
+ css_dir = abs.sub(%r{/[^/]*\z}, "/")
191
+ css_body = css_body.gsub(/@import\s+url\(["']?([^"')]+)["']?\)\s*;/) do
192
+ import_url = (URI.join(css_dir, $1).to_s rescue nil)
193
+ import_url ? (fetch_css(import_url) || "") : ""
194
+ end
195
+ css_blocks += "<style>#{css_body}</style>\n"
196
+ end
197
+ end
198
+ css_blocks
199
+ end
200
+
201
+ # ── Blue Letter Bible Section Extractors ──────────────────────────────
202
+
203
+ def extract_blb_info(html, url)
204
+ word = html[/<h6[^>]+class="lexTitle(?:Gk|Hb)"[^>]*>(.*?)<\/h6>/im, 1]
205
+ &.gsub(/<[^>]+>/, "")&.strip || ""
206
+ transliteration = html[/<div[^>]+id="lexTrans".*?<em>(.*?)<\/em>/im, 1]&.strip || ""
207
+ pronunciation = html[/class="[^"]*lexicon-pronunc[^"]*"[^>]*>\s*([^\n<]{1,50})/i, 1]&.strip || ""
208
+ pos = html[/<div[^>]+id="lexPart".*?small-text-right"[^>]*>(.*?)<\/div>/im, 1]
209
+ &.gsub(/<[^>]+>/, "")&.strip || ""
210
+
211
+ audio_buttons = build_blb_audio_buttons(html, url)
212
+
213
+ rows = [
214
+ ["Word", h(word)],
215
+ ["Transliteration", "<em>#{h(transliteration)}</em>"],
216
+ ["Pronunciation", "#{h(pronunciation)}#{audio_buttons}"],
217
+ ["Part of Speech", h(pos)],
218
+ ]
219
+ table = %(<table class="blb-table">) +
220
+ rows.map { |label, v|
221
+ %(<tr><th class="blb-th">#{h(label)}</th><td>#{v}</td></tr>)
222
+ }.join + "</table>"
223
+
224
+ { table: table, word: word }
225
+ end
226
+
227
+ def build_blb_audio_buttons(html, url)
228
+ buttons = ""
229
+ data_pronunc = html[/data-pronunc="([a-fA-F0-9]{20,})"/i, 1] || ""
230
+ if data_pronunc.length > 10
231
+ au = "#{BLB_BASE}/lang/lexicon/lexPronouncePlayer.cfm?skin=#{data_pronunc}"
232
+ buttons += audio_button(au, "Blue Letter Bible")
233
+ end
234
+
235
+ sl_match = url.to_s.match(%r{/lexicon/([hg])(\d+)/}i)
236
+ if sl_match
237
+ sl_lang = sl_match[1].downcase == "h" ? "hebrew" : "greek"
238
+ sl_url = "https://www.studylight.org/multi-media/audio/lexicons/eng/#{sl_lang}.html?n=#{sl_match[2]}"
239
+ buttons += audio_button(sl_url, "StudyLight")
240
+ end
241
+ buttons
242
+ end
243
+
244
+ def extract_blb_inflections(html, word)
245
+ m = html.match(/<div\s[^>]*id="greek-tr-inflections"[^>]*>/im)
246
+ return "" unless m
247
+
248
+ after_infl = html[m.end(0)..]
249
+ stop = after_infl.index(/<div\s[^>]*id="greek-(?:mgnt|lxx)-inflections"/i) || after_infl.length
250
+ infl_section = after_infl[0...stop]
251
+
252
+ inflections = []
253
+ infl_section.scan(/<div\s[^>]*class="greekInflection"[^>]*>(.*?)<\/div>\s*<\/div>/im) do |mv|
254
+ chunk = mv[0]
255
+ href = chunk[/href="([^"]+)"/i, 1]
256
+ gk = chunk[/<span[^>]+class="Gk"[^>]*>(.*?)<\/span>/im, 1]&.gsub(/<[^>]+>/, "")&.strip || ""
257
+ freq = chunk[/&#8212;\s*(\d+)x<\/a>/i, 1]&.to_i || 0
258
+ next if gk.empty? || freq.zero?
259
+ inflections << { word: gk, freq: freq,
260
+ href: href ? BLB_BASE + href.gsub("&amp;", "&") : nil }
261
+ end
262
+ return "" if inflections.empty?
263
+
264
+ inflections.sort_by! { |i| -i[:freq] }
265
+ rows = inflections.map { |i|
266
+ match = i[:word] == word
267
+ cls = match ? ' class="blb-match"' : ""
268
+ link = i[:href] ? %(<a href="#{h(i[:href])}" target="_blank" rel="noopener">#{h(i[:word])}</a>) : h(i[:word])
269
+ %(<tr><td#{cls}>#{link}</td><td class="blb-right">#{i[:freq]}x</td></tr>)
270
+ }.join
271
+
272
+ %(<h4 class="blb-heading">Inflections</h4>) +
273
+ %(<table class="blb-table"><thead><tr><th class="blb-th">Form</th>) +
274
+ %(<th class="blb-th blb-right">Count</th></tr></thead><tbody>#{rows}</tbody></table>)
275
+ end
276
+
277
+ def extract_blb_usage(html)
278
+ um = html.match(/<div[^>]+id="outlineBiblical"[^>]*>/im)
279
+ return "" unless um
280
+
281
+ after_usage = html[um.end(0)..]
282
+ inner = after_usage.match(/\A\s*<div>([\s\S]*?)<\/div>\s*<\/div>/im)
283
+ return "" unless inner
284
+
285
+ cleaned = inner[1]
286
+ .gsub(/<(\/?)(\w+)[^>]*>/) { "<#{$1}#{$2.downcase}>" }
287
+ .gsub(/[ \t]+/, " ")
288
+ .strip
289
+ %(<h4 class="blb-heading">Biblical Usage</h4><div class="blb-usage">#{cleaned}</div>)
290
+ end
291
+
292
+ def extract_blb_concordance(html)
293
+ trans_name = html[/id="bibleTable"[^>]+data-translation="([^"]+)"/i, 1] || ""
294
+ chunks = html.split(/<div\s[^>]*id="bVerse_\d+"[^>]*>/).drop(1)
295
+ return "" if chunks.empty?
296
+
297
+ verses = chunks.filter_map { |chunk| extract_blb_verse(chunk) }
298
+ return "" if verses.empty?
299
+
300
+ heading = trans_name.empty? ? "Concordance" : "Concordance (#{h(trans_name)})"
301
+ rows = verses.map { |v|
302
+ link = v[:href] ? %(<a href="#{h(v[:href])}" target="_blank" rel="noopener">#{h(v[:cite])}</a>) : h(v[:cite])
303
+ %(<tr><td class="blb-nowrap">#{link}</td><td>#{v[:verse_html]}</td></tr>)
304
+ }.join
305
+ %(<h4 class="blb-heading">#{heading}</h4>) +
306
+ %(<table class="blb-table"><tbody>#{rows}</tbody></table>)
307
+ end
308
+
309
+ def extract_blb_verse(chunk)
310
+ cite_href = chunk[/tablet-order-2[^>]*>[\s\S]{0,400}?href="([^"]+)"/im, 1] || ""
311
+ cite = chunk[/tablet-order-2[^>]*>[\s\S]{0,400}?<a[^>]*>(.*?)<\/a>/im, 1]
312
+ &.gsub(/<[^>]+>/, "")&.strip || ""
313
+
314
+ raw_html = chunk[/class="EngBibleText[^"]*"[^>]*>([\s\S]*?)<\/div>/im, 1] || ""
315
+ verse_html = clean_blb_verse_html(raw_html, cite)
316
+
317
+ return nil if cite.empty? || verse_html.empty?
318
+ full_href = cite_href.empty? ? nil : (cite_href.start_with?("http") ? cite_href : BLB_BASE + cite_href)
319
+ { cite: cite, verse_html: verse_html, href: full_href }
320
+ end
321
+
322
+ def clean_blb_verse_html(raw_html, cite)
323
+ raw_html = raw_html.dup
324
+ raw_html.gsub!(/<img[^>]*>/, "")
325
+ raw_html.gsub!(/<a[^>]*class="hide-for-tablet"[^>]*>[\s\S]*?<\/a>/im, "")
326
+ raw_html.gsub!(/<span[^>]*class="hide-for-tablet"[^>]*>[\s\S]*?<\/span>/im, "")
327
+
328
+ # Mark criteria words with control-char placeholders so they survive tag stripping
329
+ text = raw_html.gsub(/<span\s[^>]*class="word-phrase"[^>]*>([\s\S]*?)<\/span>/im) do
330
+ inner = $1
331
+ word = inner.sub(/<sup[\s\S]*/im, "").gsub(/<[^>]+>/, "").gsub(/&nbsp;/i, " ").strip
332
+ inner.match?(/<sup[^>]*class="[^"]*strongs criteria[^"]*"/i) ?
333
+ "\x02#{word}\x03" : word
334
+ end
335
+
336
+ # Fallback for translations without word-phrase spans (NASB, ESV, etc.)
337
+ unless text.include?("\x02")
338
+ text.gsub!(/([\w]+[,;:.!?'"]*)\s*<sup[^>]*class="[^"]*strongs criteria[^"]*"[\s\S]*?<\/sup>/im) do
339
+ "\x02#{$1}\x03"
340
+ end
341
+ end
342
+
343
+ # Strip remaining HTML tags and decode entities
344
+ text.gsub!(/<sup[^>]*>[\s\S]*?<\/sup>/im, "")
345
+ text.gsub!(/<[^>]+>/, "")
346
+ text.gsub!(/&nbsp;/i, " ")
347
+ text.gsub!(/&#(\d+);/) { [$1.to_i].pack("U") rescue " " }
348
+ text.gsub!(/&#x([\da-f]+);/i) { [$1.to_i(16)].pack("U") rescue " " }
349
+ text.gsub!(/&amp;/, "&")
350
+ text.gsub!(/&lt;/, "<")
351
+ text.gsub!(/&gt;/, ">")
352
+ text.gsub!(/\s+/, " ")
353
+ text.strip!
354
+
355
+ # Strip mobile citation prefix left by hide-for-tablet removal
356
+ text.sub!(/\A#{Regexp.escape(cite)}\s*-\s*/i, "")
357
+
358
+ # Restore placeholders as highlighted spans
359
+ text.gsub!(/\x02([^\x03]*)\x03/) { %(<span class="blb-match">#{h($1.strip)}</span>) }
360
+ text
361
+ end
362
+ end
363
+ end
364
+ end
365
+ end
@@ -2,6 +2,7 @@
2
2
 
3
3
  require "cgi"
4
4
  require_relative "citations"
5
+ require_relative "helpers"
5
6
  require_relative "../../plugin"
6
7
 
7
8
  module MarkdownServer
@@ -9,11 +10,13 @@ module MarkdownServer
9
10
  class BibleCitations
10
11
  include MarkdownServer::Plugin
11
12
 
12
- def self.enabled_by_default? = true
13
+ def self.enabled_by_default? = false
13
14
 
14
15
  def setup(config)
15
16
  @version = config.fetch("version", Citations::DEFAULT_VERSION)
16
17
  @link_target = config.fetch("link_target", "scrip")
18
+ MarkdownServer::App.set :dictionary_url, config["dictionary_url"] if config["dictionary_url"]
19
+ MarkdownServer::App.helpers(Helpers)
17
20
  end
18
21
 
19
22
  def transform_markdown(text)
@@ -22,11 +25,32 @@ module MarkdownServer
22
25
  end
23
26
  end
24
27
 
25
- def transform_html(html)
26
- Citations.link_citations_html(html) do |canonical, verse, citation|
28
+ def transform_html(html, app = nil)
29
+ html = Citations.link_citations_html(html) do |canonical, verse, citation|
27
30
  url = citation_url(canonical, verse)
28
31
  %(<a href="#{CGI.escapeHTML(url)}">#{CGI.escapeHTML(citation)}</a>)
29
32
  end
33
+ html = app.inject_strongs_urls(html) if app.respond_to?(:inject_strongs_urls)
34
+ html
35
+ end
36
+
37
+ def process_fetch(url, html, app)
38
+ if url.match?(/blueletterbible\.org\/lexicon\//i)
39
+ raw = app.page_title(html)
40
+ title = raw.match(/^([GH]\d+ - \w+)/i)&.[](1)&.sub(" - ", " – ") ||
41
+ raw.sub(/ [-–] .*/, "").strip
42
+ { title: title, html: app.blueletterbible_html(html, url) }
43
+ elsif url.match?(%r{/definitions/[^/]+\.md(\?|#|\z)}i)
44
+ title = app.page_title(html).sub(/ [-–] .*/, "").strip
45
+ { title: title, html: app.markdownr_html(html) }
46
+ elsif url.match?(/scrip\.risensavior\.com/i) || html.match?(/<div\s+class="passage-text/i)
47
+ title = app.page_title(html).sub(/ [-–] .*/, "").strip
48
+ { title: title, html: app.scrip_html(html, url) }
49
+ end
50
+ end
51
+
52
+ def post_render(html, meta, app)
53
+ app.inject_pronunciation_icon(html, meta)
30
54
  end
31
55
 
32
56
  private
@@ -1,3 +1,3 @@
1
1
  module MarkdownServer
2
- VERSION = "0.6.17"
2
+ VERSION = "0.7.0"
3
3
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: markdownr
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.17
4
+ version: 0.7.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Brian Dunn
@@ -105,8 +105,15 @@ files:
105
105
  - bin/markdownr
106
106
  - lib/markdown_server.rb
107
107
  - lib/markdown_server/app.rb
108
+ - lib/markdown_server/helpers/admin_helpers.rb
109
+ - lib/markdown_server/helpers/fetch_helpers.rb
110
+ - lib/markdown_server/helpers/formatting_helpers.rb
111
+ - lib/markdown_server/helpers/markdown_helpers.rb
112
+ - lib/markdown_server/helpers/path_helpers.rb
113
+ - lib/markdown_server/helpers/search_helpers.rb
108
114
  - lib/markdown_server/plugin.rb
109
115
  - lib/markdown_server/plugins/bible_citations/citations.rb
116
+ - lib/markdown_server/plugins/bible_citations/helpers.rb
110
117
  - lib/markdown_server/plugins/bible_citations/plugin.rb
111
118
  - lib/markdown_server/version.rb
112
119
  - views/admin_login.erb