shamela 1.3.3 → 1.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,3 +1,11 @@
1
+ https://dev.shamela.ws/updates/win/87_64.zip
2
+ https://dev.shamela.ws/versions/win.php
3
+ https://dev.shamela.ws/api/v1/patches/master?api_key=7b9524-8fc30c-e6241o-a0167e-a6d013&version=0
4
+ https://dev.shamela.ws/api/v1/patches/master-download/master-0-1209.zip?api_key=7b9524-8fc30c-e6241o-a0167e-a6d013
5
+ https://dev.shamela.ws/covers/3.jpg?1
6
+ https://dev.shamela.ws/api/v1/patches/book-updates/1681?api_key=7b9524-8fc30c-e6241o-a0167e-a6d013&major_release=0&minor_release=0
7
+ https://ready.shamela.ws/ready/1681-6-1.zip
8
+
1
9
  # shamela
2
10
 
3
11
  [![wakatime](https://wakatime.com/badge/user/a0b906ce-b8e7-4463-8bce-383238df6d4b/project/faef70ab-efdb-448b-ab83-0fc66c95888e.svg)](https://wakatime.com/badge/user/a0b906ce-b8e7-4463-8bce-383238df6d4b/project/faef70ab-efdb-448b-ab83-0fc66c95888e)
@@ -30,7 +38,8 @@ A universal TypeScript library for accessing and downloading Maktabah Shamela v4
30
38
  - [Quick Start](#quick-start)
31
39
  - [Standard Node.js](#standard-nodejs)
32
40
  - [Next.js / Bundled Environments](#nextjs--bundled-environments)
33
- - [Browser](#browser)
41
+ - [Browser (Full API)](#browser-full-api)
42
+ - [Browser (Content Utilities Only)](#browser-content-utilities-only)
34
43
  - [API Reference](#api-reference)
35
44
  - [Configuration](#configuration)
36
45
  - [configure](#configure)
@@ -93,8 +102,9 @@ import { configure, getBook } from 'shamela';
93
102
  // Configure API credentials
94
103
  configure({
95
104
  apiKey: process.env.SHAMELA_API_KEY,
96
- booksEndpoint: process.env.SHAMELA_BOOKS_ENDPOINT,
97
- masterPatchEndpoint: process.env.SHAMELA_MASTER_ENDPOINT,
105
+ // Configure only the endpoints you need:
106
+ booksEndpoint: process.env.SHAMELA_BOOKS_ENDPOINT, // Required for book APIs
107
+ masterPatchEndpoint: process.env.SHAMELA_MASTER_ENDPOINT, // Required for master APIs
98
108
  // sqlJsWasmUrl is auto-detected in standard Node.js
99
109
  });
100
110
 
@@ -155,7 +165,7 @@ export async function downloadBookAction(bookId: number) {
155
165
 
156
166
  **Important:** Only import `shamela` in server-side code (Server Actions, API Routes, or Server Components). Never import in client components or `layout.tsx`.
157
167
 
158
- ### Browser
168
+ ### Browser (Full API)
159
169
 
160
170
  In browsers, the library automatically uses a CDN-hosted WASM file:
161
171
 
@@ -172,6 +182,54 @@ configure({
172
182
  const book = await getBook(26592);
173
183
  ```
174
184
 
185
+ ### Browser (Content Utilities Only)
186
+
187
+ If you only need the content processing utilities (sanitization, parsing, etc.) without the database functionality, use the lightweight `shamela/content` export:
188
+
189
+ ```typescript
190
+ import {
191
+ sanitizePageContent,
192
+ splitPageBodyFromFooter,
193
+ removeTagsExceptSpan,
194
+ parseContentRobust,
195
+ } from 'shamela/content';
196
+
197
+ // Process content without loading sql.js (~1.5KB gzipped vs ~900KB)
198
+ const clean = removeTagsExceptSpan(sanitizePageContent(rawContent));
199
+ const [body, footnotes] = splitPageBodyFromFooter(clean);
200
+ ```
201
+
202
+ This is ideal for:
203
+ - Client-side React/Next.js components
204
+ - Bundled environments where you want to avoid sql.js WASM
205
+ - Processing pre-downloaded book data
206
+
207
+ **Available exports from `shamela/content`:**
208
+ - `parseContentRobust` - Parse HTML into structured lines
209
+ - `mapPageCharacterContent` - Normalize Arabic text with mapping rules
210
+ - `splitPageBodyFromFooter` - Separate body from footnotes
211
+ - `removeArabicNumericPageMarkers` - Remove page markers
212
+ - `removeTagsExceptSpan` - Strip HTML except spans
213
+ - `htmlToMarkdown` - Convert Shamela HTML to Markdown
214
+ - `normalizeHtml` - Normalize hadeeth tags to standard spans
215
+
216
+ ### Extending Content Processing Rules
217
+
218
+ You can import `DEFAULT_MAPPING_RULES` from `shamela/constants` to extend or customize the character mapping used by `mapPageCharacterContent`:
219
+
220
+ ```typescript
221
+ import { mapPageCharacterContent } from 'shamela/content';
222
+ import { DEFAULT_MAPPING_RULES } from 'shamela/constants';
223
+
224
+ // Extend default rules with custom mappings
225
+ const customRules = {
226
+ ...DEFAULT_MAPPING_RULES,
227
+ 'customPattern': 'replacement',
228
+ };
229
+
230
+ const processed = mapPageCharacterContent(rawContent, customRules);
231
+ ```
232
+
175
233
  ## API Reference
176
234
 
177
235
  ### Configuration
@@ -633,6 +691,40 @@ bun run format # Format code
633
691
  bun run lint # Lint code
634
692
  ```
635
693
 
694
+ ## Scripts Folder
695
+
696
+ The `scripts/` directory contains standalone reverse-engineering tools for extracting and decoding data from Shamela's desktop application databases. These are **development tools**, not part of the published npm package.
697
+
698
+ ### Available Scripts
699
+
700
+ | Script | Purpose |
701
+ |--------|---------|
702
+ | `shamela-decoder.ts` | Core decoder for Shamela's custom character encoding |
703
+ | `export-narrators.ts` | Exports 18,989 narrator biographies from S1.db |
704
+ | `export-roots.ts` | Exports 3.2M Arabic word→root morphological mappings from S2.db |
705
+
706
+ ### Running Scripts
707
+
708
+ ```bash
709
+ # Export narrators to JSON
710
+ bun run scripts/export-narrators.ts
711
+
712
+ # Export Arabic roots
713
+ bun run scripts/export-roots.ts
714
+
715
+ # Run script tests
716
+ bun test scripts/
717
+ ```
718
+
719
+ ### Script Documentation
720
+
721
+ - `scripts/README.md` – Quick start guide and reverse-engineering methodology
722
+ - `scripts/AGENTS.md` – Comprehensive documentation including:
723
+ - Database schema discoveries
724
+ - Character encoding algorithm (substitution cipher)
725
+ - Validation approaches and coverage statistics
726
+ - Common patterns and debugging techniques
727
+
636
728
  ## License
637
729
 
638
730
  MIT License - see LICENSE file for details.
@@ -0,0 +1,2 @@
1
+ const e=0,t=`99999`,n={"<img[^>]*>>":``,舄:``,"﵀":`رَحِمَهُ ٱللَّٰهُ`,"﵁":`رضي الله عنه`,"﵂":`رَضِيَ ٱللَّٰهُ عَنْهَا`,"﵃":`رَضِيَ اللَّهُ عَنْهُمْ`,"﵄":`رَضِيَ ٱللَّٰهُ عَنْهُمَا`,"﵅":`رَضِيَ اللَّهُ عَنْهُنَّ`,"﵇":`عَلَيْهِ ٱلسَّلَٰمُ`,"﵈":`عَلَيْهِمُ السَّلامُ`,"﵊":`عليه الصلاة والسلام`,"﵌":`صلى الله عليه وآله وسلم`,"﵍":`عَلَيْهِ ٱلسَّلَٰمُ`,"﵎":`تبارك وتعالى`,"﵏":`رَحِمَهُمُ ٱللَّٰهُ`,"﷽":`بِسْمِ اللَّهِ الرَّحْمَنِ الرَّحِيمِ`,"﷿":`عَزَّ وَجَلَّ`};export{e as n,t as r,n as t};
2
+ //# sourceMappingURL=constants-CSNUBkui.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"constants-CSNUBkui.js","names":["DEFAULT_MAPPING_RULES: Record<string, string>"],"sources":["../src/utils/constants.ts"],"sourcesContent":["/**\n * The default version number for master metadata.\n * @constant {number}\n */\nexport const DEFAULT_MASTER_METADATA_VERSION = 0;\n\n/**\n * Placeholder value used to represent unknown or missing data.\n * @constant {string}\n */\nexport const UNKNOWN_VALUE_PLACEHOLDER = '99999';\n\n/**\n * Default rules to map characters from page content.\n */\nexport const DEFAULT_MAPPING_RULES: Record<string, string> = {\n '<img[^>]*>>': '',\n 舄: '',\n '﵀': 'رَحِمَهُ ٱللَّٰهُ',\n '﵁': 'رضي الله عنه',\n '﵂': 'رَضِيَ ٱللَّٰهُ عَنْهَا',\n '﵃': 'رَضِيَ اللَّهُ عَنْهُمْ',\n '﵄': 'رَضِيَ ٱللَّٰهُ عَنْهُمَا',\n '﵅': 'رَضِيَ اللَّهُ عَنْهُنَّ',\n '﵇': 'عَلَيْهِ ٱلسَّلَٰمُ',\n '﵈': 'عَلَيْهِمُ السَّلامُ',\n '﵊': 'عليه الصلاة والسلام',\n '﵌': 'صلى الله عليه وآله وسلم',\n '﵍': 'عَلَيْهِ ٱلسَّلَٰمُ',\n '﵎': 'تبارك وتعالى',\n '﵏': 'رَحِمَهُمُ ٱللَّٰهُ',\n '﷽': 'بِسْمِ اللَّهِ الرَّحْمَنِ الرَّحِيمِ',\n '﷿': 'عَزَّ وَجَلَّ',\n};\n"],"mappings":"AAIA,MAAa,EAAkC,EAMlC,EAA4B,QAK5BA,EAAgD,CACzD,cAAe,GACf,EAAG,GACH,IAAK,oBACL,IAAK,eACL,IAAK,0BACL,IAAK,0BACL,IAAK,4BACL,IAAK,2BACL,IAAK,sBACL,IAAK,uBACL,IAAK,sBACL,IAAK,0BACL,IAAK,sBACL,IAAK,eACL,IAAK,sBACL,IAAK,wCACL,IAAK,gBACR"}
@@ -0,0 +1,70 @@
1
+ //#region src/content.d.ts
2
+ type Line = {
3
+ id?: string;
4
+ text: string;
5
+ };
6
+ /**
7
+ * Parses Shamela HTML content into structured lines while preserving headings.
8
+ *
9
+ * @param content - The raw HTML markup representing a page
10
+ * @returns An array of {@link Line} objects containing text and optional IDs
11
+ */
12
+ declare const parseContentRobust: (content: string) => Line[];
13
+ /**
14
+ * Sanitises page content by applying regex replacement rules.
15
+ *
16
+ * @param text - The text to clean
17
+ * @param rules - Optional custom replacements, defaults to {@link DEFAULT_MAPPING_RULES}
18
+ * @returns The sanitised content
19
+ */
20
+ declare const mapPageCharacterContent: (text: string, rules?: Record<string, string>) => string;
21
+ /**
22
+ * Splits a page body from its trailing footnotes using a marker string.
23
+ *
24
+ * @param content - Combined body and footnote text
25
+ * @param footnoteMarker - Marker indicating the start of footnotes
26
+ * @returns A tuple containing the page body followed by the footnote section
27
+ */
28
+ declare const splitPageBodyFromFooter: (content: string, footnoteMarker?: string) => readonly [string, string];
29
+ /**
30
+ * Removes Arabic numeral page markers enclosed in turtle ⦗ ⦘ brackets.
31
+ * Replaces the marker along with up to two preceding whitespace characters
32
+ * (space or carriage return) and up to one following whitespace character
33
+ * with a single space.
34
+ *
35
+ * @param text - Text potentially containing page markers
36
+ * @returns The text with numeric markers replaced by a single space
37
+ */
38
+ declare const removeArabicNumericPageMarkers: (text: string) => string;
39
+ /**
40
+ * Removes anchor and hadeeth tags from the content while preserving spans.
41
+ *
42
+ * @param content - HTML string containing various tags
43
+ * @returns The content with only span tags retained
44
+ */
45
+ declare const removeTagsExceptSpan: (content: string) => string;
46
+ /**
47
+ * Normalizes Shamela HTML for CSS styling:
48
+ * - Converts <hadeeth-N> to <span class="hadeeth">
49
+ * - Converts </hadeeth> or standalone <hadeeth> to </span>
50
+ */
51
+ declare const normalizeHtml: (html: string) => string;
52
+ /**
53
+ * Convert Shamela HTML to Markdown format for easier pattern matching.
54
+ *
55
+ * Transformations:
56
+ * - `<span data-type="title">text</span>` → `## text`
57
+ * - `<a href="inr://...">text</a>` → `text` (strip narrator links)
58
+ * - All other HTML tags → stripped
59
+ *
60
+ * Note: Content typically already has proper line breaks before title spans,
61
+ * so we don't add extra newlines around the ## header.
62
+ * Line ending normalization is handled by segmentPages.
63
+ *
64
+ * @param html - HTML content from Shamela
65
+ * @returns Markdown-formatted content
66
+ */
67
+ declare const htmlToMarkdown: (html: string) => string;
68
+ //#endregion
69
+ export { parseContentRobust as a, splitPageBodyFromFooter as c, normalizeHtml as i, htmlToMarkdown as n, removeArabicNumericPageMarkers as o, mapPageCharacterContent as r, removeTagsExceptSpan as s, Line as t };
70
+ //# sourceMappingURL=content-BCABDXRO.d.ts.map
@@ -0,0 +1,8 @@
1
+ import{t as e}from"./constants-CSNUBkui.js";const t=/^[)\]\u00BB"”'’.,?!:\u061B\u060C\u061F\u06D4\u2026]+$/,n=e=>{let n=[];for(let r of e){let e=n[n.length-1];e&&t.test(r.text)?e.text+=r.text:n.push(r)}return n},r=e=>e.replace(/\r\n/g,`
2
+ `).replace(/\r/g,`
3
+ `).split(`
4
+ `).map(e=>e.trim()).filter(Boolean),i=e=>r(e).map(e=>({text:e})),a=(e,t)=>{let n=RegExp(`${t}\\s*=\\s*("([^"]*)"|'([^']*)'|([^s>]+))`,`i`),r=e.match(n);if(r)return r[2]??r[3]??r[4]},o=e=>{let t=[],n=/<[^>]+>/g,r=0,i;for(i=n.exec(e);i;){i.index>r&&t.push({type:`text`,value:e.slice(r,i.index)});let o=i[0],s=/^<\//.test(o),c=o.match(/^<\/?\s*([a-zA-Z0-9:-]+)/),l=c?c[1].toLowerCase():``;if(s)t.push({name:l,type:`end`});else{let e={};e.id=a(o,`id`),e[`data-type`]=a(o,`data-type`),t.push({attributes:e,name:l,type:`start`})}r=n.lastIndex,i=n.exec(e)}return r<e.length&&t.push({type:`text`,value:e.slice(r)}),t},s=(e,t)=>{let n=e.trim();return n?t?{id:t,text:n}:{text:n}:null},c=e=>{for(let t=e.length-1;t>=0;t--){let n=e[t];if(n.isTitle&&n.id)return n.id}},l=(e,t)=>{if(!e)return;let n=e.split(`
5
+ `);for(let e=0;e<n.length;e++){if(e>0){let e=s(t.currentText,t.currentId);e&&t.result.push(e),t.currentText=``,t.currentId=c(t.spanStack)||void 0}n[e]&&(t.currentText+=n[e])}},u=(e,t)=>{let n=e.attributes[`data-type`]===`title`,r;n&&(r=(e.attributes.id??``).replace(/^toc-/,``)),t.spanStack.push({id:r,isTitle:n}),n&&r&&!t.currentId&&(t.currentId=r)},d=e=>{if(e=e.replace(/\r\n/g,`
6
+ `).replace(/\r/g,`
7
+ `),!/<span[^>]*>/i.test(e))return n(i(e));let t=o(`<root>${e}</root>`),r={currentId:void 0,currentText:``,result:[],spanStack:[]};for(let e of t)e.type===`text`?l(e.value,r):e.type===`start`&&e.name===`span`?u(e,r):e.type===`end`&&e.name===`span`&&r.spanStack.pop();let a=s(r.currentText,r.currentId);return a&&r.result.push(a),n(r.result).filter(e=>e.text.length>0)},f=Object.entries(e).map(([e,t])=>({regex:new RegExp(e,`g`),replacement:t})),p=t=>{if(t===e)return f;let n=[];for(let e in t)n.push({regex:new RegExp(e,`g`),replacement:t[e]});return n},m=(t,n=e)=>{let r=p(n),i=t;for(let e=0;e<r.length;e++){let{regex:t,replacement:n}=r[e];i=i.replace(t,n)}return i},h=(e,t=`_________`)=>{let n=``,r=e.indexOf(t);return r>=0&&(n=e.slice(r+t.length),e=e.slice(0,r)),[e,n]},g=e=>e.replace(/(?: |\r){0,2}⦗[\u0660-\u0669]+⦘(?: |\r)?/g,` `),_=e=>(e=e.replace(/<a[^>]*>(.*?)<\/a>/gs,`$1`),e=e.replace(/<hadeeth[^>]*>|<\/hadeeth>|<hadeeth-\d+>/gs,``),e),v=e=>e.replace(/<hadeeth-\d+>/gi,`<span class="hadeeth">`).replace(/<\s*\/?\s*hadeeth\s*>/gi,`</span>`),y=e=>e.replace(/<span[^>]*data-type=["']title["'][^>]*>(.*?)<\/span>/gi,`## $1`).replace(/<a[^>]*href=["']inr:\/\/[^"']*["'][^>]*>(.*?)<\/a>/gi,`$1`).replace(/<[^>]*>/g,``);export{g as a,d as i,m as n,_ as o,v as r,h as s,y as t};
8
+ //# sourceMappingURL=content-DlA9y7g5.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"content-DlA9y7g5.js","names":["out: Line[]","tokens: Token[]","match: RegExpExecArray | null","attributes: Record<string, string | undefined>","id: string | undefined"],"sources":["../src/content.ts"],"sourcesContent":["import { DEFAULT_MAPPING_RULES } from './utils/constants';\n\nexport type Line = {\n id?: string;\n text: string;\n};\n\nconst PUNCT_ONLY = /^[)\\]\\u00BB\"”'’.,?!:\\u061B\\u060C\\u061F\\u06D4\\u2026]+$/;\n\n/**\n * Merges punctuation-only lines into the preceding title when appropriate.\n *\n * @param lines - The processed line candidates to normalise\n * @returns A new array where dangling punctuation fragments are appended to titles\n */\nconst mergeDanglingPunctuation = (lines: Line[]): Line[] => {\n const out: Line[] = [];\n for (const item of lines) {\n const last = out[out.length - 1];\n if (last && PUNCT_ONLY.test(item.text)) {\n last.text += item.text;\n } else {\n out.push(item);\n }\n }\n return out;\n};\n\n/**\n * Normalises raw text into discrete line entries.\n *\n * @param text - Raw book content potentially containing inconsistent breaks\n * @returns An array of trimmed line strings with empty entries removed\n */\nconst splitIntoLines = (text: string) => {\n const normalized = text.replace(/\\r\\n/g, '\\n').replace(/\\r/g, '\\n');\n\n return normalized\n .split('\\n')\n .map((line) => line.trim())\n .filter(Boolean);\n};\n\n/**\n * Converts plain text content into {@link Line} objects without title metadata.\n *\n * @param content - The text content to split into line structures\n * @returns A {@link Line} array wrapping each detected sentence fragment\n */\nconst processTextContent = (content: string): Line[] => {\n return splitIntoLines(content).map((line) => ({ text: line }));\n};\n\n/**\n * Extracts an attribute value from the provided HTML tag string.\n *\n * @param tag - Raw HTML tag source\n * @param name - Attribute name to locate\n * @returns The attribute value when found; otherwise undefined\n */\nconst extractAttribute = (tag: string, name: string): string | undefined => {\n const pattern = new RegExp(`${name}\\\\s*=\\\\s*(\"([^\"]*)\"|'([^']*)'|([^s>]+))`, 'i');\n const match = tag.match(pattern);\n if (!match) {\n return undefined;\n }\n return match[2] ?? match[3] ?? match[4];\n};\n\ntype Token =\n | { type: 'text'; value: string }\n | { type: 'start'; name: string; attributes: Record<string, string | undefined> }\n | { type: 'end'; name: string };\n\n/**\n * Breaks the provided HTML fragment into structural tokens.\n *\n * @param html - HTML fragment containing book content markup\n * @returns A token stream describing text and span boundaries\n */\nconst tokenize = (html: string): Token[] => {\n const tokens: Token[] = [];\n const tagRegex = /<[^>]+>/g;\n let lastIndex = 0;\n let match: RegExpExecArray | null;\n match = tagRegex.exec(html);\n\n while (match) {\n if (match.index > lastIndex) {\n tokens.push({ type: 'text', value: html.slice(lastIndex, match.index) });\n }\n\n const raw = match[0];\n const isEnd = /^<\\//.test(raw);\n const nameMatch = raw.match(/^<\\/?\\s*([a-zA-Z0-9:-]+)/);\n const name = nameMatch ? nameMatch[1].toLowerCase() : '';\n\n if (isEnd) {\n tokens.push({ name, type: 'end' });\n } else {\n const attributes: Record<string, string | undefined> = {};\n attributes.id = extractAttribute(raw, 'id');\n attributes['data-type'] = extractAttribute(raw, 'data-type');\n tokens.push({ attributes, name, type: 'start' });\n }\n\n lastIndex = tagRegex.lastIndex;\n match = tagRegex.exec(html);\n }\n\n if (lastIndex < html.length) {\n tokens.push({ type: 'text', value: html.slice(lastIndex) });\n }\n\n return tokens;\n};\n\n/**\n * Pushes the accumulated text as a new line to the result array.\n */\nconst createLine = (text: string, id?: string): Line | null => {\n const trimmed = text.trim();\n if (!trimmed) {\n return null;\n }\n return id ? { id, text: trimmed } : { text: trimmed };\n};\n\n/**\n * Finds the active title ID from the span stack.\n */\nconst getActiveTitleId = (spanStack: Array<{ isTitle: boolean; id?: string }>): string | undefined => {\n for (let i = spanStack.length - 1; i >= 0; i--) {\n const entry = spanStack[i];\n if (entry.isTitle && entry.id) {\n return entry.id;\n }\n }\n};\n\n/**\n * Processes text content by handling line breaks and maintaining title context.\n */\nconst processTextWithLineBreaks = (\n raw: string,\n state: {\n currentText: string;\n currentId?: string;\n result: Line[];\n spanStack: Array<{ isTitle: boolean; id?: string }>;\n },\n) => {\n if (!raw) {\n return;\n }\n\n const parts = raw.split('\\n');\n\n for (let i = 0; i < parts.length; i++) {\n // Push previous line when crossing a line break\n if (i > 0) {\n const line = createLine(state.currentText, state.currentId);\n if (line) {\n state.result.push(line);\n }\n state.currentText = '';\n\n // Preserve title ID if still inside a title span\n const activeTitleId = getActiveTitleId(state.spanStack);\n state.currentId = activeTitleId || undefined;\n }\n\n // Append the text part\n if (parts[i]) {\n state.currentText += parts[i];\n }\n }\n};\n\n/**\n * Handles the start of a span tag, updating the stack and current ID.\n */\nconst handleSpanStart = (\n token: { attributes: Record<string, string | undefined> },\n state: {\n currentId?: string;\n spanStack: Array<{ isTitle: boolean; id?: string }>;\n },\n) => {\n const dataType = token.attributes['data-type'];\n const isTitle = dataType === 'title';\n\n let id: string | undefined;\n if (isTitle) {\n const rawId = token.attributes.id ?? '';\n id = rawId.replace(/^toc-/, '');\n }\n\n state.spanStack.push({ id, isTitle });\n\n // First title span on the current physical line wins\n if (isTitle && id && !state.currentId) {\n state.currentId = id;\n }\n};\n\n/**\n * Parses Shamela HTML content into structured lines while preserving headings.\n *\n * @param content - The raw HTML markup representing a page\n * @returns An array of {@link Line} objects containing text and optional IDs\n */\nexport const parseContentRobust = (content: string): Line[] => {\n // Normalize line endings first\n content = content.replace(/\\r\\n/g, '\\n').replace(/\\r/g, '\\n');\n\n // Fast path when there are no span tags at all\n if (!/<span[^>]*>/i.test(content)) {\n return mergeDanglingPunctuation(processTextContent(content));\n }\n\n const tokens = tokenize(`<root>${content}</root>`);\n const state = {\n currentId: undefined as string | undefined,\n currentText: '',\n result: [] as Line[],\n spanStack: [] as Array<{ isTitle: boolean; id?: string }>,\n };\n\n // Process all tokens\n for (const token of tokens) {\n if (token.type === 'text') {\n processTextWithLineBreaks(token.value, state);\n } else if (token.type === 'start' && token.name === 'span') {\n handleSpanStart(token, state);\n } else if (token.type === 'end' && token.name === 'span') {\n // Closing a span does NOT end the line; trailing text stays on the same line\n state.spanStack.pop();\n }\n }\n\n // Flush any trailing text\n const finalLine = createLine(state.currentText, state.currentId);\n if (finalLine) {\n state.result.push(finalLine);\n }\n\n // Merge punctuation-only lines and drop empties\n return mergeDanglingPunctuation(state.result).filter((line) => line.text.length > 0);\n};\n\nconst DEFAULT_COMPILED_RULES = Object.entries(DEFAULT_MAPPING_RULES).map(([pattern, replacement]) => ({\n regex: new RegExp(pattern, 'g'),\n replacement,\n}));\n\n/**\n * Compiles sanitisation rules into RegExp objects for reuse.\n *\n * @param rules - Key/value replacements used during sanitisation\n * @returns A list of compiled regular expression rules\n */\nconst getCompiledRules = (rules: Record<string, string>) => {\n if (rules === DEFAULT_MAPPING_RULES) {\n return DEFAULT_COMPILED_RULES;\n }\n\n const compiled = [];\n for (const pattern in rules) {\n compiled.push({\n regex: new RegExp(pattern, 'g'),\n replacement: rules[pattern],\n });\n }\n return compiled;\n};\n\n/**\n * Sanitises page content by applying regex replacement rules.\n *\n * @param text - The text to clean\n * @param rules - Optional custom replacements, defaults to {@link DEFAULT_MAPPING_RULES}\n * @returns The sanitised content\n */\nexport const mapPageCharacterContent = (\n text: string,\n rules: Record<string, string> = DEFAULT_MAPPING_RULES,\n): string => {\n const compiledRules = getCompiledRules(rules);\n\n let content = text;\n for (let i = 0; i < compiledRules.length; i++) {\n const { regex, replacement } = compiledRules[i];\n content = content.replace(regex, replacement);\n }\n return content;\n};\n\n/**\n * Splits a page body from its trailing footnotes using a marker string.\n *\n * @param content - Combined body and footnote text\n * @param footnoteMarker - Marker indicating the start of footnotes\n * @returns A tuple containing the page body followed by the footnote section\n */\nexport const splitPageBodyFromFooter = (content: string, footnoteMarker = '_________') => {\n let footnote = '';\n const indexOfFootnote = content.indexOf(footnoteMarker);\n\n if (indexOfFootnote >= 0) {\n footnote = content.slice(indexOfFootnote + footnoteMarker.length);\n content = content.slice(0, indexOfFootnote);\n }\n\n return [content, footnote] as const;\n};\n\n/**\n * Removes Arabic numeral page markers enclosed in turtle ⦗ ⦘ brackets.\n * Replaces the marker along with up to two preceding whitespace characters\n * (space or carriage return) and up to one following whitespace character\n * with a single space.\n *\n * @param text - Text potentially containing page markers\n * @returns The text with numeric markers replaced by a single space\n */\nexport const removeArabicNumericPageMarkers = (text: string) => {\n return text.replace(/(?: |\\r){0,2}⦗[\\u0660-\\u0669]+⦘(?: |\\r)?/g, ' ');\n};\n\n/**\n * Removes anchor and hadeeth tags from the content while preserving spans.\n *\n * @param content - HTML string containing various tags\n * @returns The content with only span tags retained\n */\nexport const removeTagsExceptSpan = (content: string) => {\n // Remove <a> tags and their content, keeping only the text inside\n content = content.replace(/<a[^>]*>(.*?)<\\/a>/gs, '$1');\n\n // Remove <hadeeth> tags (both self-closing, with content, and numbered)\n content = content.replace(/<hadeeth[^>]*>|<\\/hadeeth>|<hadeeth-\\d+>/gs, '');\n\n return content;\n};\n\n/**\n * Normalizes Shamela HTML for CSS styling:\n * - Converts <hadeeth-N> to <span class=\"hadeeth\">\n * - Converts </hadeeth> or standalone <hadeeth> to </span>\n */\nexport const normalizeHtml = (html: string): string => {\n return html.replace(/<hadeeth-\\d+>/gi, '<span class=\"hadeeth\">').replace(/<\\s*\\/?\\s*hadeeth\\s*>/gi, '</span>');\n};\n\n/**\n * Convert Shamela HTML to Markdown format for easier pattern matching.\n *\n * Transformations:\n * - `<span data-type=\"title\">text</span>` → `## text`\n * - `<a href=\"inr://...\">text</a>` → `text` (strip narrator links)\n * - All other HTML tags → stripped\n *\n * Note: Content typically already has proper line breaks before title spans,\n * so we don't add extra newlines around the ## header.\n * Line ending normalization is handled by segmentPages.\n *\n * @param html - HTML content from Shamela\n * @returns Markdown-formatted content\n */\nexport const htmlToMarkdown = (html: string): string => {\n return (\n html\n // Convert title spans to markdown headers (no extra newlines - content already has them)\n .replace(/<span[^>]*data-type=[\"']title[\"'][^>]*>(.*?)<\\/span>/gi, '## $1')\n // Strip narrator links but keep text\n .replace(/<a[^>]*href=[\"']inr:\\/\\/[^\"']*[\"'][^>]*>(.*?)<\\/a>/gi, '$1')\n // Strip all remaining HTML tags\n .replace(/<[^>]*>/g, '')\n );\n};\n"],"mappings":"4CAOA,MAAM,EAAa,wDAQb,EAA4B,GAA0B,CACxD,IAAMA,EAAc,EAAE,CACtB,IAAK,IAAM,KAAQ,EAAO,CACtB,IAAM,EAAO,EAAI,EAAI,OAAS,GAC1B,GAAQ,EAAW,KAAK,EAAK,KAAK,CAClC,EAAK,MAAQ,EAAK,KAElB,EAAI,KAAK,EAAK,CAGtB,OAAO,GASL,EAAkB,GACD,EAAK,QAAQ,QAAS;EAAK,CAAC,QAAQ,MAAO;EAAK,CAG9D,MAAM;EAAK,CACX,IAAK,GAAS,EAAK,MAAM,CAAC,CAC1B,OAAO,QAAQ,CASlB,EAAsB,GACjB,EAAe,EAAQ,CAAC,IAAK,IAAU,CAAE,KAAM,EAAM,EAAE,CAU5D,GAAoB,EAAa,IAAqC,CACxE,IAAM,EAAc,OAAO,GAAG,EAAK,yCAA0C,IAAI,CAC3E,EAAQ,EAAI,MAAM,EAAQ,CAC3B,KAGL,OAAO,EAAM,IAAM,EAAM,IAAM,EAAM,IAcnC,EAAY,GAA0B,CACxC,IAAMC,EAAkB,EAAE,CACpB,EAAW,WACb,EAAY,EACZC,EAGJ,IAFA,EAAQ,EAAS,KAAK,EAAK,CAEpB,GAAO,CACN,EAAM,MAAQ,GACd,EAAO,KAAK,CAAE,KAAM,OAAQ,MAAO,EAAK,MAAM,EAAW,EAAM,MAAM,CAAE,CAAC,CAG5E,IAAM,EAAM,EAAM,GACZ,EAAQ,OAAO,KAAK,EAAI,CACxB,EAAY,EAAI,MAAM,2BAA2B,CACjD,EAAO,EAAY,EAAU,GAAG,aAAa,CAAG,GAEtD,GAAI,EACA,EAAO,KAAK,CAAE,OAAM,KAAM,MAAO,CAAC,KAC/B,CACH,IAAMC,EAAiD,EAAE,CACzD,EAAW,GAAK,EAAiB,EAAK,KAAK,CAC3C,EAAW,aAAe,EAAiB,EAAK,YAAY,CAC5D,EAAO,KAAK,CAAE,aAAY,OAAM,KAAM,QAAS,CAAC,CAGpD,EAAY,EAAS,UACrB,EAAQ,EAAS,KAAK,EAAK,CAO/B,OAJI,EAAY,EAAK,QACjB,EAAO,KAAK,CAAE,KAAM,OAAQ,MAAO,EAAK,MAAM,EAAU,CAAE,CAAC,CAGxD,GAML,GAAc,EAAc,IAA6B,CAC3D,IAAM,EAAU,EAAK,MAAM,CAI3B,OAHK,EAGE,EAAK,CAAE,KAAI,KAAM,EAAS,CAAG,CAAE,KAAM,EAAS,CAF1C,MAQT,EAAoB,GAA4E,CAClG,IAAK,IAAI,EAAI,EAAU,OAAS,EAAG,GAAK,EAAG,IAAK,CAC5C,IAAM,EAAQ,EAAU,GACxB,GAAI,EAAM,SAAW,EAAM,GACvB,OAAO,EAAM,KAQnB,GACF,EACA,IAMC,CACD,GAAI,CAAC,EACD,OAGJ,IAAM,EAAQ,EAAI,MAAM;EAAK,CAE7B,IAAK,IAAI,EAAI,EAAG,EAAI,EAAM,OAAQ,IAAK,CAEnC,GAAI,EAAI,EAAG,CACP,IAAM,EAAO,EAAW,EAAM,YAAa,EAAM,UAAU,CACvD,GACA,EAAM,OAAO,KAAK,EAAK,CAE3B,EAAM,YAAc,GAIpB,EAAM,UADgB,EAAiB,EAAM,UAAU,EACpB,IAAA,GAInC,EAAM,KACN,EAAM,aAAe,EAAM,MAQjC,GACF,EACA,IAIC,CAED,IAAM,EADW,EAAM,WAAW,eACL,QAEzBC,EACA,IAEA,GADc,EAAM,WAAW,IAAM,IAC1B,QAAQ,QAAS,GAAG,EAGnC,EAAM,UAAU,KAAK,CAAE,KAAI,UAAS,CAAC,CAGjC,GAAW,GAAM,CAAC,EAAM,YACxB,EAAM,UAAY,IAUb,EAAsB,GAA4B,CAK3D,GAHA,EAAU,EAAQ,QAAQ,QAAS;EAAK,CAAC,QAAQ,MAAO;EAAK,CAGzD,CAAC,eAAe,KAAK,EAAQ,CAC7B,OAAO,EAAyB,EAAmB,EAAQ,CAAC,CAGhE,IAAM,EAAS,EAAS,SAAS,EAAQ,SAAS,CAC5C,EAAQ,CACV,UAAW,IAAA,GACX,YAAa,GACb,OAAQ,EAAE,CACV,UAAW,EAAE,CAChB,CAGD,IAAK,IAAM,KAAS,EACZ,EAAM,OAAS,OACf,EAA0B,EAAM,MAAO,EAAM,CACtC,EAAM,OAAS,SAAW,EAAM,OAAS,OAChD,EAAgB,EAAO,EAAM,CACtB,EAAM,OAAS,OAAS,EAAM,OAAS,QAE9C,EAAM,UAAU,KAAK,CAK7B,IAAM,EAAY,EAAW,EAAM,YAAa,EAAM,UAAU,CAMhE,OALI,GACA,EAAM,OAAO,KAAK,EAAU,CAIzB,EAAyB,EAAM,OAAO,CAAC,OAAQ,GAAS,EAAK,KAAK,OAAS,EAAE,EAGlF,EAAyB,OAAO,QAAQ,EAAsB,CAAC,KAAK,CAAC,EAAS,MAAkB,CAClG,MAAO,IAAI,OAAO,EAAS,IAAI,CAC/B,cACH,EAAE,CAQG,EAAoB,GAAkC,CACxD,GAAI,IAAU,EACV,OAAO,EAGX,IAAM,EAAW,EAAE,CACnB,IAAK,IAAM,KAAW,EAClB,EAAS,KAAK,CACV,MAAO,IAAI,OAAO,EAAS,IAAI,CAC/B,YAAa,EAAM,GACtB,CAAC,CAEN,OAAO,GAUE,GACT,EACA,EAAgC,IACvB,CACT,IAAM,EAAgB,EAAiB,EAAM,CAEzC,EAAU,EACd,IAAK,IAAI,EAAI,EAAG,EAAI,EAAc,OAAQ,IAAK,CAC3C,GAAM,CAAE,QAAO,eAAgB,EAAc,GAC7C,EAAU,EAAQ,QAAQ,EAAO,EAAY,CAEjD,OAAO,GAUE,GAA2B,EAAiB,EAAiB,cAAgB,CACtF,IAAI,EAAW,GACT,EAAkB,EAAQ,QAAQ,EAAe,CAOvD,OALI,GAAmB,IACnB,EAAW,EAAQ,MAAM,EAAkB,EAAe,OAAO,CACjE,EAAU,EAAQ,MAAM,EAAG,EAAgB,EAGxC,CAAC,EAAS,EAAS,EAYjB,EAAkC,GACpC,EAAK,QAAQ,4CAA6C,IAAI,CAS5D,EAAwB,IAEjC,EAAU,EAAQ,QAAQ,uBAAwB,KAAK,CAGvD,EAAU,EAAQ,QAAQ,6CAA8C,GAAG,CAEpE,GAQE,EAAiB,GACnB,EAAK,QAAQ,kBAAmB,yBAAyB,CAAC,QAAQ,0BAA2B,UAAU,CAkBrG,EAAkB,GAEvB,EAEK,QAAQ,yDAA0D,QAAQ,CAE1E,QAAQ,uDAAwD,KAAK,CAErE,QAAQ,WAAY,GAAG"}
@@ -0,0 +1,2 @@
1
+ import { a as parseContentRobust, c as splitPageBodyFromFooter, i as normalizeHtml, n as htmlToMarkdown, o as removeArabicNumericPageMarkers, r as mapPageCharacterContent, s as removeTagsExceptSpan, t as Line } from "./content-BCABDXRO.js";
2
+ export { Line, htmlToMarkdown, mapPageCharacterContent, normalizeHtml, parseContentRobust, removeArabicNumericPageMarkers, removeTagsExceptSpan, splitPageBodyFromFooter };
@@ -0,0 +1 @@
1
+ import{a as e,i as t,n,o as r,r as i,s as a,t as o}from"./content-DlA9y7g5.js";export{o as htmlToMarkdown,n as mapPageCharacterContent,i as normalizeHtml,t as parseContentRobust,e as removeArabicNumericPageMarkers,r as removeTagsExceptSpan,a as splitPageBodyFromFooter};
package/dist/index.d.ts CHANGED
@@ -1,228 +1,8 @@
1
- //#region src/db/types.d.ts
1
+ import { a as parseContentRobust, c as splitPageBodyFromFooter, i as normalizeHtml, n as htmlToMarkdown, o as removeArabicNumericPageMarkers, r as mapPageCharacterContent, s as removeTagsExceptSpan, t as Line } from "./content-BCABDXRO.js";
2
+ import { a as DownloadBookOptions, c as GetBookMetadataResponsePayload, d as OutputOptions, f as Page, h as Title, i as Category, l as GetMasterMetadataResponsePayload, m as ShamelaConfigKey, n as Book, o as DownloadMasterOptions, p as ShamelaConfig, r as BookData, s as GetBookMetadataOptions, t as Author, u as MasterData } from "./types-BRXOrgNv.js";
2
3
 
3
- /**
4
- * A record that can be deleted by patches.
5
- */
6
- type Deletable = {
7
- /** Indicates if it was deleted in the patch if it is set to '1 */
8
- is_deleted?: string;
9
- };
10
- type Unique = {
11
- /** Unique identifier */
12
- id: number;
13
- };
14
- /**
15
- * Database row structure for the author table.
16
- */
17
- type AuthorRow = Deletable & Unique & {
18
- /** Author biography */
19
- biography: string;
20
- /** Death year */
21
- death_number: string;
22
- /** The death year as a text */
23
- death_text: string;
24
- /** Author name */
25
- name: string;
26
- };
27
- /**
28
- * Database row structure for the book table.
29
- */
30
- type BookRow = Deletable & Unique & {
31
- /** Serialized author ID(s) "2747, 3147" or "513" */
32
- author: string;
33
- /** Bibliography information */
34
- bibliography: string;
35
- /** Category ID */
36
- category: string;
37
- /** Publication date (or 99999 for unavailable) */
38
- date: string;
39
- /** Hint or description */
40
- hint: string;
41
- /** Major version */
42
- major_release: string;
43
- /** Serialized metadata */
44
- metadata: string;
45
- /** Minor version */
46
- minor_release: string;
47
- /** Book name */
48
- name: string;
49
- /** Serialized PDF links */
50
- pdf_links: string;
51
- /** Printed flag */
52
- printed: string;
53
- /** Book type */
54
- type: string;
55
- };
56
- /**
57
- * Database row structure for the category table.
58
- */
59
- type CategoryRow = Deletable & Unique & {
60
- /** Category name */
61
- name: string;
62
- /** Category order in the list to show. */
63
- order: string;
64
- };
65
- /**
66
- * Database row structure for the page table.
67
- */
68
- type PageRow = Deletable & Unique & {
69
- /** Page content */
70
- content: string;
71
- /** Page number */
72
- number: string | null;
73
- /** Page reference */
74
- page: string | null;
75
- /** Part number */
76
- part: string | null;
77
- /** Additional metadata */
78
- services: string | null;
79
- };
80
- /**
81
- * Database row structure for the title table.
82
- */
83
- type TitleRow = Deletable & Unique & {
84
- /** Title content */
85
- content: string;
86
- /** Page number */
87
- page: string;
88
- /** Parent title ID */
89
- parent: string | null;
90
- };
91
- //#endregion
92
- //#region src/types.d.ts
93
- /**
94
- * Represents an author entity.
95
- */
96
- type Author = AuthorRow;
97
- /**
98
- * Represents a book entity.
99
- */
100
- type Book = BookRow;
101
- /**
102
- * A category for a book.
103
- */
104
- type Category = CategoryRow;
105
- /**
106
- * A page in a book.
107
- */
108
- type Page = Pick<PageRow, 'id' | 'content'> & {
109
- page?: number;
110
- part?: string;
111
- number?: string;
112
- };
113
- /**
114
- * A title heading in a book.
115
- */
116
- type Title = Pick<TitleRow, 'id' | 'content'> & {
117
- page: number;
118
- parent?: number;
119
- };
120
- /**
121
- * Represents book content data.
122
- */
123
- type BookData = {
124
- /** Array of pages in the book */
125
- pages: Page[];
126
- /** Array of titles/chapters */
127
- titles: Title[];
128
- };
129
- /**
130
- * Master data structure containing all core entities.
131
- */
132
- type MasterData = {
133
- /** Array of all authors */
134
- authors: Author[];
135
- /** Array of all books */
136
- books: Book[];
137
- /** Array of all categories */
138
- categories: Category[];
139
- /** Version number for the downloaded master database */
140
- version: number;
141
- };
142
- /**
143
- * Options for downloading a book.
144
- */
145
- type DownloadBookOptions = {
146
- /** Optional book metadata */
147
- bookMetadata?: GetBookMetadataResponsePayload;
148
- /** Output file configuration */
149
- outputFile: OutputOptions;
150
- };
151
- /**
152
- * Options for downloading master data.
153
- */
154
- type DownloadMasterOptions = {
155
- /** Optional master metadata */
156
- masterMetadata?: GetMasterMetadataResponsePayload;
157
- /** Output file configuration */
158
- outputFile: OutputOptions;
159
- };
160
- /**
161
- * Options for getting book metadata.
162
- */
163
- type GetBookMetadataOptions = {
164
- /** Major version number */
165
- majorVersion: number;
166
- /** Minor version number */
167
- minorVersion: number;
168
- };
169
- /**
170
- * Response payload for book metadata requests.
171
- */
172
- type GetBookMetadataResponsePayload = {
173
- /** Major release version */
174
- majorRelease: number;
175
- /** URL for major release download */
176
- majorReleaseUrl: string;
177
- /** Optional minor release version */
178
- minorRelease?: number;
179
- /** Optional URL for minor release download */
180
- minorReleaseUrl?: string;
181
- };
182
- /**
183
- * Response payload for master metadata requests.
184
- */
185
- type GetMasterMetadataResponsePayload = {
186
- /** Download URL */
187
- url: string;
188
- /** Version number */
189
- version: number;
190
- };
191
- type NodeJSOutput = {
192
- /** Output file path (Node.js only) */
193
- path: string;
194
- writer?: never;
195
- };
196
- type CustomOutput = {
197
- /** Custom writer used when path is not provided */
198
- writer: (payload: string | Uint8Array) => Promise<void> | void;
199
- path?: undefined;
200
- };
201
- /**
202
- * Output file options.
203
- */
204
- type OutputOptions = NodeJSOutput | CustomOutput;
205
- /**
206
- * Runtime configuration for the library.
207
- */
208
- type ShamelaConfig = {
209
- /** API key used to authenticate against Shamela services */
210
- apiKey?: string;
211
- /** Endpoint used for book metadata */
212
- booksEndpoint?: string;
213
- /** Endpoint used for master metadata */
214
- masterPatchEndpoint?: string;
215
- /** Optional override for the sql.js wasm asset location */
216
- sqlJsWasmUrl?: string;
217
- /** Optional custom fetch implementation for environments without a global fetch */
218
- fetchImplementation?: typeof fetch;
219
- };
220
- /**
221
- * Valid configuration keys.
222
- */
223
- type ShamelaConfigKey = keyof ShamelaConfig;
224
- //#endregion
225
4
  //#region src/api.d.ts
5
+
226
6
  /**
227
7
  * Retrieves metadata for a specific book from the Shamela API.
228
8
  *
@@ -399,51 +179,5 @@ declare const configure: (config: ConfigureOptions) => void;
399
179
  */
400
180
  declare const resetConfig: () => void;
401
181
  //#endregion
402
- //#region src/content.d.ts
403
- type Line = {
404
- id?: string;
405
- text: string;
406
- };
407
- /**
408
- * Parses Shamela HTML content into structured lines while preserving headings.
409
- *
410
- * @param content - The raw HTML markup representing a page
411
- * @returns An array of {@link Line} objects containing text and optional IDs
412
- */
413
- declare const parseContentRobust: (content: string) => Line[];
414
- /**
415
- * Sanitises page content by applying regex replacement rules.
416
- *
417
- * @param text - The text to clean
418
- * @param rules - Optional custom replacements, defaults to {@link DEFAULT_SANITIZATION_RULES}
419
- * @returns The sanitised content
420
- */
421
- declare const sanitizePageContent: (text: string, rules?: Record<string, string>) => string;
422
- /**
423
- * Splits a page body from its trailing footnotes using a marker string.
424
- *
425
- * @param content - Combined body and footnote text
426
- * @param footnoteMarker - Marker indicating the start of footnotes
427
- * @returns A tuple containing the page body followed by the footnote section
428
- */
429
- declare const splitPageBodyFromFooter: (content: string, footnoteMarker?: string) => readonly [string, string];
430
- /**
431
- * Removes Arabic numeral page markers enclosed in turtle ⦗ ⦘ brackets.
432
- * Replaces the marker along with up to two preceding whitespace characters
433
- * (space or carriage return) and up to one following whitespace character
434
- * with a single space.
435
- *
436
- * @param text - Text potentially containing page markers
437
- * @returns The text with numeric markers replaced by a single space
438
- */
439
- declare const removeArabicNumericPageMarkers: (text: string) => string;
440
- /**
441
- * Removes anchor and hadeeth tags from the content while preserving spans.
442
- *
443
- * @param content - HTML string containing various tags
444
- * @returns The content with only span tags retained
445
- */
446
- declare const removeTagsExceptSpan: (content: string) => string;
447
- //#endregion
448
- export { Author, Book, BookData, Category, type ConfigureOptions, DownloadBookOptions, DownloadMasterOptions, GetBookMetadataOptions, GetBookMetadataResponsePayload, GetMasterMetadataResponsePayload, Line, type Logger, MasterData, OutputOptions, Page, ShamelaConfig, ShamelaConfigKey, Title, configure, downloadBook, downloadMasterDatabase, getBook, getBookMetadata, getCoverUrl, getMaster, getMasterMetadata, parseContentRobust, removeArabicNumericPageMarkers, removeTagsExceptSpan, resetConfig, sanitizePageContent, splitPageBodyFromFooter };
182
+ export { Author, Book, BookData, Category, type ConfigureOptions, DownloadBookOptions, DownloadMasterOptions, GetBookMetadataOptions, GetBookMetadataResponsePayload, GetMasterMetadataResponsePayload, Line, type Logger, MasterData, OutputOptions, Page, ShamelaConfig, ShamelaConfigKey, Title, configure, downloadBook, downloadMasterDatabase, getBook, getBookMetadata, getCoverUrl, getMaster, getMasterMetadata, htmlToMarkdown, mapPageCharacterContent, normalizeHtml, parseContentRobust, removeArabicNumericPageMarkers, removeTagsExceptSpan, resetConfig, splitPageBodyFromFooter };
449
183
  //# sourceMappingURL=index.d.ts.map