flappa-doormal 1.0.0 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/AGENTS.md +325 -0
- package/README.md +477 -199
- package/dist/index.d.mts +871 -327
- package/dist/index.d.mts.map +1 -1
- package/dist/index.mjs +1611 -393
- package/dist/index.mjs.map +1 -1
- package/package.json +13 -10
package/dist/index.mjs
CHANGED
|
@@ -1,517 +1,1735 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
1
|
+
//#region src/segmentation/fuzzy.ts
|
|
2
|
+
/**
|
|
3
|
+
* Fuzzy matching utilities for Arabic text.
|
|
4
|
+
*
|
|
5
|
+
* Provides diacritic-insensitive and character-equivalence matching for Arabic text.
|
|
6
|
+
* This allows matching text regardless of:
|
|
7
|
+
* - Diacritical marks (harakat/tashkeel): فَتْحَة، ضَمَّة، كَسْرَة، سُكُون، شَدَّة، تَنْوين
|
|
8
|
+
* - Character equivalences: ا↔آ↔أ↔إ, ة↔ه, ى↔ي
|
|
9
|
+
*
|
|
10
|
+
* @module fuzzy
|
|
11
|
+
*
|
|
12
|
+
* @example
|
|
13
|
+
* // Make a pattern diacritic-insensitive
|
|
14
|
+
* const pattern = makeDiacriticInsensitive('حدثنا');
|
|
15
|
+
* new RegExp(pattern, 'u').test('حَدَّثَنَا') // → true
|
|
16
|
+
*/
|
|
17
|
+
/**
|
|
18
|
+
* Character class matching all Arabic diacritics (Tashkeel/Harakat).
|
|
19
|
+
*
|
|
20
|
+
* Includes the following diacritical marks:
|
|
21
|
+
* - U+064B: ً (fathatan - double fatha)
|
|
22
|
+
* - U+064C: ٌ (dammatan - double damma)
|
|
23
|
+
* - U+064D: ٍ (kasratan - double kasra)
|
|
24
|
+
* - U+064E: َ (fatha - short a)
|
|
25
|
+
* - U+064F: ُ (damma - short u)
|
|
26
|
+
* - U+0650: ِ (kasra - short i)
|
|
27
|
+
* - U+0651: ّ (shadda - gemination)
|
|
28
|
+
* - U+0652: ْ (sukun - no vowel)
|
|
29
|
+
*
|
|
30
|
+
* @internal
|
|
31
|
+
*/
|
|
32
|
+
const DIACRITICS_CLASS = "[ًٌٍَُِّْ]";
|
|
4
33
|
/**
|
|
5
|
-
*
|
|
34
|
+
* Groups of equivalent Arabic characters.
|
|
35
|
+
*
|
|
36
|
+
* Characters within the same group are considered equivalent for matching purposes.
|
|
37
|
+
* This handles common variations in Arabic text where different characters are
|
|
38
|
+
* used interchangeably or have the same underlying meaning.
|
|
39
|
+
*
|
|
40
|
+
* Equivalence groups:
|
|
41
|
+
* - Alef variants: ا (bare), آ (with madda), أ (with hamza above), إ (with hamza below)
|
|
42
|
+
* - Ta marbuta and Ha: ة ↔ ه (often interchangeable at word endings)
|
|
43
|
+
* - Alef maqsura and Ya: ى ↔ ي (often interchangeable at word endings)
|
|
44
|
+
*
|
|
45
|
+
* @internal
|
|
6
46
|
*/
|
|
7
|
-
const
|
|
47
|
+
const EQUIV_GROUPS = [
|
|
48
|
+
[
|
|
49
|
+
"ا",
|
|
50
|
+
"آ",
|
|
51
|
+
"أ",
|
|
52
|
+
"إ"
|
|
53
|
+
],
|
|
54
|
+
["ة", "ه"],
|
|
55
|
+
["ى", "ي"]
|
|
56
|
+
];
|
|
8
57
|
/**
|
|
9
|
-
*
|
|
58
|
+
* Escapes a string for safe inclusion in a regular expression.
|
|
59
|
+
*
|
|
60
|
+
* Escapes all regex metacharacters: `.*+?^${}()|[\]\\`
|
|
61
|
+
*
|
|
62
|
+
* @param s - Any string to escape
|
|
63
|
+
* @returns String with regex metacharacters escaped
|
|
64
|
+
*
|
|
65
|
+
* @example
|
|
66
|
+
* escapeRegex('hello.world') // → 'hello\\.world'
|
|
67
|
+
* escapeRegex('[test]') // → '\\[test\\]'
|
|
68
|
+
* escapeRegex('a+b*c?') // → 'a\\+b\\*c\\?'
|
|
10
69
|
*/
|
|
11
|
-
const
|
|
70
|
+
const escapeRegex = (s) => s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
12
71
|
/**
|
|
13
|
-
*
|
|
72
|
+
* Returns a regex character class for all equivalents of a given character.
|
|
73
|
+
*
|
|
74
|
+
* If the character belongs to one of the predefined equivalence groups
|
|
75
|
+
* (e.g., ا/آ/أ/إ), the returned class will match any member of that group.
|
|
76
|
+
* Otherwise, the original character is simply escaped for safe regex inclusion.
|
|
77
|
+
*
|
|
78
|
+
* @param ch - A single character to expand into its equivalence class
|
|
79
|
+
* @returns A RegExp-safe string representing the character and its equivalents
|
|
80
|
+
*
|
|
81
|
+
* @example
|
|
82
|
+
* getEquivClass('ا') // → '[اآأإ]' (matches any alef variant)
|
|
83
|
+
* getEquivClass('ب') // → 'ب' (no equivalents, just escaped)
|
|
84
|
+
* getEquivClass('.') // → '\\.' (regex metachar escaped)
|
|
85
|
+
*
|
|
86
|
+
* @internal
|
|
14
87
|
*/
|
|
15
|
-
const
|
|
88
|
+
const getEquivClass = (ch) => {
|
|
89
|
+
for (const group of EQUIV_GROUPS) if (group.includes(ch)) return `[${group.map((c) => escapeRegex(c)).join("")}]`;
|
|
90
|
+
return escapeRegex(ch);
|
|
91
|
+
};
|
|
16
92
|
/**
|
|
17
|
-
*
|
|
93
|
+
* Performs light normalization on Arabic text for consistent matching.
|
|
94
|
+
*
|
|
95
|
+
* Normalization steps:
|
|
96
|
+
* 1. NFC normalization (canonical decomposition then composition)
|
|
97
|
+
* 2. Remove Zero-Width Joiner (U+200D) and Zero-Width Non-Joiner (U+200C)
|
|
98
|
+
* 3. Collapse multiple whitespace characters to single space
|
|
99
|
+
* 4. Trim leading and trailing whitespace
|
|
100
|
+
*
|
|
101
|
+
* This normalization preserves diacritics and letter forms while removing
|
|
102
|
+
* invisible characters that could interfere with matching.
|
|
103
|
+
*
|
|
104
|
+
* @param str - Arabic text to normalize
|
|
105
|
+
* @returns Normalized string
|
|
106
|
+
*
|
|
107
|
+
* @example
|
|
108
|
+
* normalizeArabicLight('حَدَّثَنَا') // → 'حَدَّثَنَا' (diacritics preserved)
|
|
109
|
+
* normalizeArabicLight('بسم الله') // → 'بسم الله' (spaces collapsed)
|
|
110
|
+
* normalizeArabicLight(' text ') // → 'text' (trimmed)
|
|
111
|
+
*
|
|
112
|
+
* @internal
|
|
18
113
|
*/
|
|
19
|
-
const
|
|
20
|
-
"
|
|
21
|
-
"latin": "\\d+"
|
|
114
|
+
const normalizeArabicLight = (str) => {
|
|
115
|
+
return str.normalize("NFC").replace(/[\u200C\u200D]/g, "").replace(/\s+/g, " ").trim();
|
|
22
116
|
};
|
|
23
117
|
/**
|
|
24
|
-
*
|
|
118
|
+
* Creates a diacritic-insensitive regex pattern for Arabic text matching.
|
|
119
|
+
*
|
|
120
|
+
* Transforms input text into a regex pattern that matches the text regardless
|
|
121
|
+
* of diacritical marks (harakat) and character variations. Each character in
|
|
122
|
+
* the input is:
|
|
123
|
+
* 1. Expanded to its equivalence class (if applicable)
|
|
124
|
+
* 2. Followed by an optional diacritics matcher
|
|
125
|
+
*
|
|
126
|
+
* This allows matching:
|
|
127
|
+
* - `حدثنا` with `حَدَّثَنَا` (with full diacritics)
|
|
128
|
+
* - `الإيمان` with `الايمان` (alef variants)
|
|
129
|
+
* - `صلاة` with `صلاه` (ta marbuta ↔ ha)
|
|
130
|
+
*
|
|
131
|
+
* @param text - Input Arabic text to make diacritic-insensitive
|
|
132
|
+
* @returns Regex pattern string that matches the text with or without diacritics
|
|
133
|
+
*
|
|
134
|
+
* @example
|
|
135
|
+
* const pattern = makeDiacriticInsensitive('حدثنا');
|
|
136
|
+
* // Each char gets equivalence class + optional diacritics
|
|
137
|
+
* // Result matches: حدثنا, حَدَّثَنَا, حَدَثَنَا, etc.
|
|
138
|
+
*
|
|
139
|
+
* @example
|
|
140
|
+
* const pattern = makeDiacriticInsensitive('باب');
|
|
141
|
+
* new RegExp(pattern, 'u').test('بَابٌ') // → true
|
|
142
|
+
* new RegExp(pattern, 'u').test('باب') // → true
|
|
143
|
+
*
|
|
144
|
+
* @example
|
|
145
|
+
* // Using with split rules
|
|
146
|
+
* {
|
|
147
|
+
* lineStartsWith: ['باب'],
|
|
148
|
+
* split: 'at',
|
|
149
|
+
* fuzzy: true // Applies makeDiacriticInsensitive internally
|
|
150
|
+
* }
|
|
25
151
|
*/
|
|
26
|
-
const
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
"none": "",
|
|
31
|
-
"paren": "\\)"
|
|
152
|
+
const makeDiacriticInsensitive = (text) => {
|
|
153
|
+
const diacriticsMatcher = `${DIACRITICS_CLASS}*`;
|
|
154
|
+
const norm = normalizeArabicLight(text);
|
|
155
|
+
return Array.from(norm).map((ch) => getEquivClass(ch) + diacriticsMatcher).join("");
|
|
32
156
|
};
|
|
33
157
|
|
|
34
158
|
//#endregion
|
|
35
|
-
//#region src/
|
|
159
|
+
//#region src/segmentation/breakpoint-utils.ts
|
|
160
|
+
/**
|
|
161
|
+
* Normalizes a breakpoint to the object form.
|
|
162
|
+
* Strings are converted to { pattern: str } with no constraints.
|
|
163
|
+
*
|
|
164
|
+
* @param bp - Breakpoint as string or object
|
|
165
|
+
* @returns Normalized BreakpointRule object
|
|
166
|
+
*
|
|
167
|
+
* @example
|
|
168
|
+
* normalizeBreakpoint('\\n\\n')
|
|
169
|
+
* // → { pattern: '\\n\\n' }
|
|
170
|
+
*
|
|
171
|
+
* normalizeBreakpoint({ pattern: '\\n', min: 10 })
|
|
172
|
+
* // → { pattern: '\\n', min: 10 }
|
|
173
|
+
*/
|
|
174
|
+
const normalizeBreakpoint = (bp) => typeof bp === "string" ? { pattern: bp } : bp;
|
|
36
175
|
/**
|
|
37
|
-
*
|
|
38
|
-
*
|
|
176
|
+
* Checks if a page ID is in an excluded list (single pages or ranges).
|
|
177
|
+
*
|
|
178
|
+
* @param pageId - Page ID to check
|
|
179
|
+
* @param excludeList - List of page IDs or [from, to] ranges to exclude
|
|
180
|
+
* @returns True if page is excluded
|
|
181
|
+
*
|
|
182
|
+
* @example
|
|
183
|
+
* isPageExcluded(5, [1, 5, 10])
|
|
184
|
+
* // → true
|
|
185
|
+
*
|
|
186
|
+
* isPageExcluded(5, [[3, 7]])
|
|
187
|
+
* // → true
|
|
188
|
+
*
|
|
189
|
+
* isPageExcluded(5, [[10, 20]])
|
|
190
|
+
* // → false
|
|
39
191
|
*/
|
|
192
|
+
const isPageExcluded = (pageId, excludeList) => {
|
|
193
|
+
if (!excludeList || excludeList.length === 0) return false;
|
|
194
|
+
for (const item of excludeList) if (typeof item === "number") {
|
|
195
|
+
if (pageId === item) return true;
|
|
196
|
+
} else {
|
|
197
|
+
const [from, to] = item;
|
|
198
|
+
if (pageId >= from && pageId <= to) return true;
|
|
199
|
+
}
|
|
200
|
+
return false;
|
|
201
|
+
};
|
|
40
202
|
/**
|
|
41
|
-
*
|
|
42
|
-
*
|
|
203
|
+
* Checks if a page ID is within a breakpoint's min/max range and not excluded.
|
|
204
|
+
*
|
|
205
|
+
* @param pageId - Page ID to check
|
|
206
|
+
* @param rule - Breakpoint rule with optional min/max/exclude constraints
|
|
207
|
+
* @returns True if page is within valid range
|
|
208
|
+
*
|
|
209
|
+
* @example
|
|
210
|
+
* isInBreakpointRange(50, { pattern: '\\n', min: 10, max: 100 })
|
|
211
|
+
* // → true
|
|
212
|
+
*
|
|
213
|
+
* isInBreakpointRange(5, { pattern: '\\n', min: 10 })
|
|
214
|
+
* // → false (below min)
|
|
43
215
|
*/
|
|
44
|
-
const
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
"حدَّثني",
|
|
50
|
-
"وحدثنا",
|
|
51
|
-
"حُدِّثت عن",
|
|
52
|
-
"وحَدَّثَنَا"
|
|
53
|
-
];
|
|
216
|
+
const isInBreakpointRange = (pageId, rule) => {
|
|
217
|
+
if (rule.min !== void 0 && pageId < rule.min) return false;
|
|
218
|
+
if (rule.max !== void 0 && pageId > rule.max) return false;
|
|
219
|
+
return !isPageExcluded(pageId, rule.exclude);
|
|
220
|
+
};
|
|
54
221
|
/**
|
|
55
|
-
*
|
|
56
|
-
*
|
|
222
|
+
* Builds an exclude set from a PageRange array for O(1) lookups.
|
|
223
|
+
*
|
|
224
|
+
* @param excludeList - List of page IDs or [from, to] ranges
|
|
225
|
+
* @returns Set of all excluded page IDs
|
|
226
|
+
*
|
|
227
|
+
* @remarks
|
|
228
|
+
* This expands ranges into explicit page IDs for fast membership checks. For typical
|
|
229
|
+
* book-scale inputs (thousands of pages), this is small and keeps downstream logic
|
|
230
|
+
* simple and fast. If you expect extremely large ranges (e.g., millions of pages),
|
|
231
|
+
* consider avoiding broad excludes or introducing a range-based membership structure.
|
|
232
|
+
*
|
|
233
|
+
* @example
|
|
234
|
+
* buildExcludeSet([1, 5, [10, 12]])
|
|
235
|
+
* // → Set { 1, 5, 10, 11, 12 }
|
|
57
236
|
*/
|
|
58
|
-
const
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
237
|
+
const buildExcludeSet = (excludeList) => {
|
|
238
|
+
const excludeSet = /* @__PURE__ */ new Set();
|
|
239
|
+
for (const item of excludeList || []) if (typeof item === "number") excludeSet.add(item);
|
|
240
|
+
else for (let i = item[0]; i <= item[1]; i++) excludeSet.add(i);
|
|
241
|
+
return excludeSet;
|
|
242
|
+
};
|
|
243
|
+
/**
|
|
244
|
+
* Creates a segment with optional to and meta fields.
|
|
245
|
+
* Returns null if content is empty after trimming.
|
|
246
|
+
*
|
|
247
|
+
* @param content - Segment content
|
|
248
|
+
* @param fromPageId - Starting page ID
|
|
249
|
+
* @param toPageId - Optional ending page ID (omitted if same as from)
|
|
250
|
+
* @param meta - Optional metadata to attach
|
|
251
|
+
* @returns Segment object or null if empty
|
|
252
|
+
*
|
|
253
|
+
* @example
|
|
254
|
+
* createSegment('Hello world', 1, 3, { chapter: 1 })
|
|
255
|
+
* // → { content: 'Hello world', from: 1, to: 3, meta: { chapter: 1 } }
|
|
256
|
+
*
|
|
257
|
+
* createSegment(' ', 1, undefined, undefined)
|
|
258
|
+
* // → null (empty content)
|
|
259
|
+
*/
|
|
260
|
+
const createSegment = (content, fromPageId, toPageId, meta) => {
|
|
261
|
+
const trimmed = content.trim();
|
|
262
|
+
if (!trimmed) return null;
|
|
263
|
+
const seg = {
|
|
264
|
+
content: trimmed,
|
|
265
|
+
from: fromPageId
|
|
266
|
+
};
|
|
267
|
+
if (toPageId !== void 0 && toPageId !== fromPageId) seg.to = toPageId;
|
|
268
|
+
if (meta) seg.meta = meta;
|
|
269
|
+
return seg;
|
|
270
|
+
};
|
|
271
|
+
/**
|
|
272
|
+
* Expands breakpoint patterns and pre-computes exclude sets.
|
|
273
|
+
*
|
|
274
|
+
* @param breakpoints - Array of breakpoint patterns or rules
|
|
275
|
+
* @param processPattern - Function to expand tokens in patterns
|
|
276
|
+
* @returns Array of expanded breakpoints with compiled regexes
|
|
277
|
+
*
|
|
278
|
+
* @remarks
|
|
279
|
+
* This function compiles regex patterns dynamically. This can be a ReDoS vector
|
|
280
|
+
* if patterns come from untrusted sources. In typical usage, breakpoint rules
|
|
281
|
+
* are application configuration, not user input.
|
|
282
|
+
*/
|
|
283
|
+
const expandBreakpoints = (breakpoints, processPattern$1) => breakpoints.map((bp) => {
|
|
284
|
+
const rule = normalizeBreakpoint(bp);
|
|
285
|
+
const excludeSet = buildExcludeSet(rule.exclude);
|
|
286
|
+
const skipWhenRegex = rule.skipWhen !== void 0 ? (() => {
|
|
287
|
+
const expandedSkip = processPattern$1(rule.skipWhen);
|
|
288
|
+
try {
|
|
289
|
+
return new RegExp(expandedSkip, "mu");
|
|
290
|
+
} catch (error) {
|
|
291
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
292
|
+
throw new Error(`Invalid breakpoint skipWhen regex: ${rule.skipWhen}\n Cause: ${message}`);
|
|
293
|
+
}
|
|
294
|
+
})() : null;
|
|
295
|
+
if (rule.pattern === "") return {
|
|
296
|
+
excludeSet,
|
|
297
|
+
regex: null,
|
|
298
|
+
rule,
|
|
299
|
+
skipWhenRegex
|
|
300
|
+
};
|
|
301
|
+
const expanded = processPattern$1(rule.pattern);
|
|
302
|
+
try {
|
|
303
|
+
return {
|
|
304
|
+
excludeSet,
|
|
305
|
+
regex: new RegExp(expanded, "gmu"),
|
|
306
|
+
rule,
|
|
307
|
+
skipWhenRegex
|
|
308
|
+
};
|
|
309
|
+
} catch (error) {
|
|
310
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
311
|
+
throw new Error(`Invalid breakpoint regex: ${rule.pattern}\n Cause: ${message}`);
|
|
312
|
+
}
|
|
313
|
+
});
|
|
314
|
+
/**
|
|
315
|
+
* Finds the actual ending page index by searching backwards for page content prefix.
|
|
316
|
+
* Used to determine which page a segment actually ends on based on content matching.
|
|
317
|
+
*
|
|
318
|
+
* @param pieceContent - Content of the segment piece
|
|
319
|
+
* @param currentFromIdx - Current starting index in pageIds
|
|
320
|
+
* @param toIdx - Maximum ending index to search
|
|
321
|
+
* @param pageIds - Array of page IDs
|
|
322
|
+
* @param normalizedPages - Map of page ID to normalized content
|
|
323
|
+
* @returns The actual ending page index
|
|
324
|
+
*/
|
|
325
|
+
const findActualEndPage = (pieceContent, currentFromIdx, toIdx, pageIds, normalizedPages) => {
|
|
326
|
+
for (let pi = toIdx; pi > currentFromIdx; pi--) {
|
|
327
|
+
const pageData = normalizedPages.get(pageIds[pi]);
|
|
328
|
+
if (pageData) {
|
|
329
|
+
const checkPortion = pageData.content.slice(0, Math.min(30, pageData.length));
|
|
330
|
+
if (checkPortion.length > 0 && pieceContent.indexOf(checkPortion) > 0) return pi;
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
return currentFromIdx;
|
|
334
|
+
};
|
|
335
|
+
/**
|
|
336
|
+
* Finds the actual starting page index by searching forwards for page content prefix.
|
|
337
|
+
* Used to determine which page content actually starts from based on content matching.
|
|
338
|
+
*
|
|
339
|
+
* This is the counterpart to findActualEndPage - it searches forward to find which
|
|
340
|
+
* page the content starts on, rather than which page it ends on.
|
|
341
|
+
*
|
|
342
|
+
* @param pieceContent - Content of the segment piece
|
|
343
|
+
* @param currentFromIdx - Current starting index in pageIds
|
|
344
|
+
* @param toIdx - Maximum ending index to search
|
|
345
|
+
* @param pageIds - Array of page IDs
|
|
346
|
+
* @param normalizedPages - Map of page ID to normalized content
|
|
347
|
+
* @returns The actual starting page index
|
|
348
|
+
*/
|
|
349
|
+
const findActualStartPage = (pieceContent, currentFromIdx, toIdx, pageIds, normalizedPages) => {
|
|
350
|
+
const trimmedPiece = pieceContent.trimStart();
|
|
351
|
+
if (!trimmedPiece) return currentFromIdx;
|
|
352
|
+
for (let pi = currentFromIdx; pi <= toIdx; pi++) {
|
|
353
|
+
const pageData = normalizedPages.get(pageIds[pi]);
|
|
354
|
+
if (pageData) {
|
|
355
|
+
const pagePrefix = pageData.content.slice(0, Math.min(30, pageData.length)).trim();
|
|
356
|
+
const piecePrefix = trimmedPiece.slice(0, Math.min(30, trimmedPiece.length));
|
|
357
|
+
if (pagePrefix.length > 0) {
|
|
358
|
+
if (trimmedPiece.startsWith(pagePrefix)) return pi;
|
|
359
|
+
if (pageData.content.trimStart().startsWith(piecePrefix)) return pi;
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
return currentFromIdx;
|
|
364
|
+
};
|
|
365
|
+
/**
|
|
366
|
+
* Checks if any page in a range is excluded by the given exclude set.
|
|
367
|
+
*
|
|
368
|
+
* @param excludeSet - Set of excluded page IDs
|
|
369
|
+
* @param pageIds - Array of page IDs
|
|
370
|
+
* @param fromIdx - Start index (inclusive)
|
|
371
|
+
* @param toIdx - End index (inclusive)
|
|
372
|
+
* @returns True if any page in range is excluded
|
|
373
|
+
*/
|
|
374
|
+
const hasExcludedPageInRange = (excludeSet, pageIds, fromIdx, toIdx) => {
|
|
375
|
+
if (excludeSet.size === 0) return false;
|
|
376
|
+
for (let pageIdx = fromIdx; pageIdx <= toIdx; pageIdx++) if (excludeSet.has(pageIds[pageIdx])) return true;
|
|
377
|
+
return false;
|
|
378
|
+
};
|
|
379
|
+
/**
|
|
380
|
+
* Finds the position of the next page content within remaining content.
|
|
381
|
+
* Returns -1 if not found.
|
|
382
|
+
*
|
|
383
|
+
* @param remainingContent - Content to search in
|
|
384
|
+
* @param nextPageData - Normalized data for the next page
|
|
385
|
+
* @returns Position of next page content, or -1 if not found
|
|
386
|
+
*/
|
|
387
|
+
const findNextPagePosition = (remainingContent, nextPageData) => {
|
|
388
|
+
const searchPrefix = nextPageData.content.trim().slice(0, Math.min(30, nextPageData.length));
|
|
389
|
+
if (searchPrefix.length === 0) return -1;
|
|
390
|
+
const pos = remainingContent.indexOf(searchPrefix);
|
|
391
|
+
return pos > 0 ? pos : -1;
|
|
392
|
+
};
|
|
393
|
+
/**
|
|
394
|
+
* Finds matches within a window and returns the selected position based on preference.
|
|
395
|
+
*
|
|
396
|
+
* @param windowContent - Content to search
|
|
397
|
+
* @param regex - Regex to match
|
|
398
|
+
* @param prefer - 'longer' for last match, 'shorter' for first match
|
|
399
|
+
* @returns Break position after the selected match, or -1 if no matches
|
|
400
|
+
*/
|
|
401
|
+
const findPatternBreakPosition = (windowContent, regex, prefer) => {
|
|
402
|
+
const matches = [];
|
|
403
|
+
for (const m of windowContent.matchAll(regex)) matches.push({
|
|
404
|
+
index: m.index,
|
|
405
|
+
length: m[0].length
|
|
406
|
+
});
|
|
407
|
+
if (matches.length === 0) return -1;
|
|
408
|
+
const selected = prefer === "longer" ? matches[matches.length - 1] : matches[0];
|
|
409
|
+
return selected.index + selected.length;
|
|
410
|
+
};
|
|
411
|
+
/**
|
|
412
|
+
* Tries to find a break position within the current window using breakpoint patterns.
|
|
413
|
+
* Returns the break position or -1 if no suitable break was found.
|
|
414
|
+
*
|
|
415
|
+
* @param remainingContent - Content remaining to be segmented
|
|
416
|
+
* @param currentFromIdx - Current starting page index
|
|
417
|
+
* @param toIdx - Ending page index
|
|
418
|
+
* @param windowEndIdx - Maximum window end index
|
|
419
|
+
* @param ctx - Breakpoint context with page data and patterns
|
|
420
|
+
* @returns Break position in the content, or -1 if no break found
|
|
421
|
+
*/
|
|
422
|
+
const findBreakPosition = (remainingContent, currentFromIdx, toIdx, windowEndIdx, ctx) => {
|
|
423
|
+
const { pageIds, normalizedPages, cumulativeOffsets, expandedBreakpoints, prefer } = ctx;
|
|
424
|
+
for (const { rule, regex, excludeSet, skipWhenRegex } of expandedBreakpoints) {
|
|
425
|
+
if (!isInBreakpointRange(pageIds[currentFromIdx], rule)) continue;
|
|
426
|
+
if (hasExcludedPageInRange(excludeSet, pageIds, currentFromIdx, windowEndIdx)) continue;
|
|
427
|
+
if (skipWhenRegex?.test(remainingContent)) continue;
|
|
428
|
+
if (regex === null) {
|
|
429
|
+
const nextPageIdx = windowEndIdx + 1;
|
|
430
|
+
if (nextPageIdx <= toIdx) {
|
|
431
|
+
const nextPageData = normalizedPages.get(pageIds[nextPageIdx]);
|
|
432
|
+
if (nextPageData) {
|
|
433
|
+
const pos = findNextPagePosition(remainingContent, nextPageData);
|
|
434
|
+
if (pos > 0) return pos;
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
return Math.min(cumulativeOffsets[windowEndIdx + 1] - cumulativeOffsets[currentFromIdx], remainingContent.length);
|
|
438
|
+
}
|
|
439
|
+
const windowEndPosition = Math.min(cumulativeOffsets[windowEndIdx + 1] - cumulativeOffsets[currentFromIdx], remainingContent.length);
|
|
440
|
+
const breakPos = findPatternBreakPosition(remainingContent.slice(0, windowEndPosition), regex, prefer);
|
|
441
|
+
if (breakPos > 0) return breakPos;
|
|
442
|
+
}
|
|
443
|
+
return -1;
|
|
444
|
+
};
|
|
63
445
|
|
|
64
446
|
//#endregion
|
|
65
|
-
//#region src/
|
|
447
|
+
//#region src/segmentation/match-utils.ts
|
|
448
|
+
/**
|
|
449
|
+
* Utility functions for regex matching and result processing.
|
|
450
|
+
*
|
|
451
|
+
* These functions were extracted from `segmenter.ts` to reduce complexity
|
|
452
|
+
* and enable independent testing. They handle match filtering, capture
|
|
453
|
+
* extraction, and occurrence-based selection.
|
|
454
|
+
*
|
|
455
|
+
* @module match-utils
|
|
456
|
+
*/
|
|
66
457
|
/**
|
|
67
|
-
*
|
|
68
|
-
*
|
|
458
|
+
* Extracts named capture groups from a regex match.
|
|
459
|
+
*
|
|
460
|
+
* Only includes groups that are in the `captureNames` list and have
|
|
461
|
+
* defined values. This filters out positional captures and ensures
|
|
462
|
+
* only explicitly requested named captures are returned.
|
|
463
|
+
*
|
|
464
|
+
* @param groups - The `match.groups` object from `RegExp.exec()`
|
|
465
|
+
* @param captureNames - List of capture names to extract (from `{{token:name}}` syntax)
|
|
466
|
+
* @returns Object with capture name → value pairs, or `undefined` if none found
|
|
467
|
+
*
|
|
468
|
+
* @example
|
|
469
|
+
* const match = /(?<num>[٠-٩]+) -/.exec('٦٦٩٦ - text');
|
|
470
|
+
* extractNamedCaptures(match.groups, ['num'])
|
|
471
|
+
* // → { num: '٦٦٩٦' }
|
|
472
|
+
*
|
|
473
|
+
* @example
|
|
474
|
+
* // No matching captures
|
|
475
|
+
* extractNamedCaptures({}, ['num'])
|
|
476
|
+
* // → undefined
|
|
477
|
+
*
|
|
478
|
+
* @example
|
|
479
|
+
* // Undefined groups
|
|
480
|
+
* extractNamedCaptures(undefined, ['num'])
|
|
481
|
+
* // → undefined
|
|
69
482
|
*/
|
|
483
|
+
const extractNamedCaptures = (groups, captureNames) => {
|
|
484
|
+
if (!groups || captureNames.length === 0) return;
|
|
485
|
+
const namedCaptures = {};
|
|
486
|
+
for (const name of captureNames) if (groups[name] !== void 0) namedCaptures[name] = groups[name];
|
|
487
|
+
return Object.keys(namedCaptures).length > 0 ? namedCaptures : void 0;
|
|
488
|
+
};
|
|
70
489
|
/**
|
|
71
|
-
*
|
|
72
|
-
*
|
|
490
|
+
* Gets the last defined positional capture group from a match array.
|
|
491
|
+
*
|
|
492
|
+
* Used for `lineStartsAfter` patterns where the content capture (`.*`)
|
|
493
|
+
* is always at the end of the pattern. Named captures may shift the
|
|
494
|
+
* positional indices, so we iterate backward to find the actual content.
|
|
495
|
+
*
|
|
496
|
+
* @param match - RegExp exec result array
|
|
497
|
+
* @returns The last defined capture group value, or `undefined` if none
|
|
498
|
+
*
|
|
499
|
+
* @example
|
|
500
|
+
* // Pattern: ^(?:(?<num>[٠-٩]+) - )(.*)
|
|
501
|
+
* // Match array: ['٦٦٩٦ - content', '٦٦٩٦', 'content']
|
|
502
|
+
* getLastPositionalCapture(match)
|
|
503
|
+
* // → 'content'
|
|
504
|
+
*
|
|
505
|
+
* @example
|
|
506
|
+
* // No captures
|
|
507
|
+
* getLastPositionalCapture(['full match'])
|
|
508
|
+
* // → undefined
|
|
73
509
|
*/
|
|
74
|
-
const
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
510
|
+
const getLastPositionalCapture = (match) => {
|
|
511
|
+
if (match.length <= 1) return;
|
|
512
|
+
for (let i = match.length - 1; i >= 1; i--) if (match[i] !== void 0) return match[i];
|
|
513
|
+
};
|
|
514
|
+
/**
|
|
515
|
+
* Filters matches to only include those within page ID constraints.
|
|
516
|
+
*
|
|
517
|
+
* Applies the `min`, `max`, and `exclude` constraints from a rule to filter out
|
|
518
|
+
* matches that occur on pages outside the allowed range or explicitly excluded.
|
|
519
|
+
*
|
|
520
|
+
* @param matches - Array of match results to filter
|
|
521
|
+
* @param rule - Rule containing `min`, `max`, and/or `exclude` page constraints
|
|
522
|
+
* @param getId - Function that returns the page ID for a given offset
|
|
523
|
+
* @returns Filtered array containing only matches within constraints
|
|
524
|
+
*
|
|
525
|
+
* @example
|
|
526
|
+
* const matches = [
|
|
527
|
+
* { start: 0, end: 10 }, // Page 1
|
|
528
|
+
* { start: 100, end: 110 }, // Page 5
|
|
529
|
+
* { start: 200, end: 210 }, // Page 10
|
|
530
|
+
* ];
|
|
531
|
+
* filterByConstraints(matches, { min: 3, max: 8 }, getId)
|
|
532
|
+
* // → [{ start: 100, end: 110 }] (only page 5 match)
|
|
533
|
+
*/
|
|
534
|
+
const filterByConstraints = (matches, rule, getId) => {
|
|
535
|
+
return matches.filter((m) => {
|
|
536
|
+
const id = getId(m.start);
|
|
537
|
+
if (rule.min !== void 0 && id < rule.min) return false;
|
|
538
|
+
if (rule.max !== void 0 && id > rule.max) return false;
|
|
539
|
+
if (isPageExcluded(id, rule.exclude)) return false;
|
|
540
|
+
return true;
|
|
541
|
+
});
|
|
542
|
+
};
|
|
543
|
+
/**
|
|
544
|
+
* Filters matches based on occurrence setting (first, last, or all).
|
|
545
|
+
*
|
|
546
|
+
* Applies occurrence-based selection to a list of matches:
|
|
547
|
+
* - `'all'` or `undefined`: Return all matches (default)
|
|
548
|
+
* - `'first'`: Return only the first match
|
|
549
|
+
* - `'last'`: Return only the last match
|
|
550
|
+
*
|
|
551
|
+
* @param matches - Array of match results to filter
|
|
552
|
+
* @param occurrence - Which occurrence(s) to keep
|
|
553
|
+
* @returns Filtered array based on occurrence setting
|
|
554
|
+
*
|
|
555
|
+
* @example
|
|
556
|
+
* const matches = [{ start: 0 }, { start: 10 }, { start: 20 }];
|
|
557
|
+
*
|
|
558
|
+
* filterByOccurrence(matches, 'first')
|
|
559
|
+
* // → [{ start: 0 }]
|
|
560
|
+
*
|
|
561
|
+
* filterByOccurrence(matches, 'last')
|
|
562
|
+
* // → [{ start: 20 }]
|
|
563
|
+
*
|
|
564
|
+
* filterByOccurrence(matches, 'all')
|
|
565
|
+
* // → [{ start: 0 }, { start: 10 }, { start: 20 }]
|
|
566
|
+
*
|
|
567
|
+
* filterByOccurrence(matches, undefined)
|
|
568
|
+
* // → [{ start: 0 }, { start: 10 }, { start: 20 }] (default: all)
|
|
569
|
+
*/
|
|
570
|
+
const filterByOccurrence = (matches, occurrence) => {
|
|
571
|
+
if (!matches.length) return [];
|
|
572
|
+
if (occurrence === "first") return [matches[0]];
|
|
573
|
+
if (occurrence === "last") return [matches[matches.length - 1]];
|
|
574
|
+
return matches;
|
|
575
|
+
};
|
|
576
|
+
/**
|
|
577
|
+
* Checks if any rule in the list allows the given page ID.
|
|
578
|
+
*
|
|
579
|
+
* A rule allows an ID if it falls within the rule's `min`/`max` constraints.
|
|
580
|
+
* Rules without constraints allow all page IDs.
|
|
581
|
+
*
|
|
582
|
+
* This is used to determine whether to create a segment for content
|
|
583
|
+
* that appears before any split points (the "first segment").
|
|
584
|
+
*
|
|
585
|
+
* @param rules - Array of rules with optional `min` and `max` constraints
|
|
586
|
+
* @param pageId - Page ID to check
|
|
587
|
+
* @returns `true` if at least one rule allows the page ID
|
|
588
|
+
*
|
|
589
|
+
* @example
|
|
590
|
+
* const rules = [
|
|
591
|
+
* { min: 5, max: 10 }, // Allows pages 5-10
|
|
592
|
+
* { min: 20 }, // Allows pages 20+
|
|
593
|
+
* ];
|
|
594
|
+
*
|
|
595
|
+
* anyRuleAllowsId(rules, 7) // → true (first rule allows)
|
|
596
|
+
* anyRuleAllowsId(rules, 3) // → false (no rule allows)
|
|
597
|
+
* anyRuleAllowsId(rules, 25) // → true (second rule allows)
|
|
598
|
+
*
|
|
599
|
+
* @example
|
|
600
|
+
* // Rules without constraints allow everything
|
|
601
|
+
* anyRuleAllowsId([{}], 999) // → true
|
|
602
|
+
*/
|
|
603
|
+
const anyRuleAllowsId = (rules, pageId) => {
|
|
604
|
+
return rules.some((r) => {
|
|
605
|
+
const minOk = r.min === void 0 || pageId >= r.min;
|
|
606
|
+
const maxOk = r.max === void 0 || pageId <= r.max;
|
|
607
|
+
return minOk && maxOk;
|
|
608
|
+
});
|
|
88
609
|
};
|
|
89
610
|
|
|
90
611
|
//#endregion
|
|
91
|
-
//#region src/
|
|
92
|
-
/**
|
|
93
|
-
*
|
|
94
|
-
*
|
|
95
|
-
*
|
|
96
|
-
*
|
|
97
|
-
*
|
|
98
|
-
* @param template - Template string with {token} placeholders
|
|
99
|
-
* @param options - Optional configuration
|
|
100
|
-
* @returns Regex pattern string with named groups
|
|
101
|
-
*
|
|
102
|
-
* @example
|
|
103
|
-
* expandTemplate('{num} {dash}')
|
|
104
|
-
* // Returns: ^(?<full>(?<marker>[\\u0660-\\u0669]+\\s?[-–—ـ])(?<content>[\\s\\S]*?))
|
|
105
|
-
*/
|
|
106
|
-
function expandTemplate(template, options) {
|
|
107
|
-
const tokenMap = options?.tokens || TOKENS;
|
|
108
|
-
let expandedMarker = template;
|
|
109
|
-
for (const [token, pattern] of Object.entries(tokenMap)) {
|
|
110
|
-
const placeholder = `{${token}}`;
|
|
111
|
-
expandedMarker = expandedMarker.replaceAll(placeholder, pattern);
|
|
112
|
-
}
|
|
113
|
-
return String.raw`^(?<full>(?<marker>${expandedMarker})(?<content>[\s\S]*))`;
|
|
114
|
-
}
|
|
115
|
-
/**
|
|
116
|
-
* Create a custom token map by extending the base tokens.
|
|
117
|
-
*
|
|
118
|
-
* @param customTokens - Custom token definitions
|
|
119
|
-
* @returns Combined token map
|
|
120
|
-
*
|
|
121
|
-
* @example
|
|
122
|
-
* const myTokens = createTokenMap({
|
|
123
|
-
* verse: '\\[[\\u0660-\\u0669]+\\]',
|
|
124
|
-
* tafsir: 'تفسير'
|
|
125
|
-
* });
|
|
612
|
+
//#region src/segmentation/textUtils.ts
|
|
613
|
+
/**
|
|
614
|
+
* Strip all HTML tags from content, keeping only text.
|
|
615
|
+
*
|
|
616
|
+
* @param html - HTML content
|
|
617
|
+
* @returns Plain text content
|
|
126
618
|
*/
|
|
127
|
-
|
|
128
|
-
return
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
*
|
|
135
|
-
*
|
|
136
|
-
* @param
|
|
137
|
-
* @
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
* @example
|
|
141
|
-
* validateTemplate('{num} {dash}')
|
|
142
|
-
* // Returns: { valid: true }
|
|
143
|
-
*
|
|
144
|
-
* validateTemplate('{invalid}')
|
|
145
|
-
* // Returns: { valid: false, errors: ['Unknown token: {invalid}'] }
|
|
146
|
-
*/
|
|
147
|
-
function validateTemplate(template, tokens = TOKENS) {
|
|
148
|
-
const unknownTokens = (template.match(/\{(\w+)\}/g) || []).map((t) => t.slice(1, -1)).filter((name) => !tokens[name]);
|
|
149
|
-
if (unknownTokens.length > 0) return {
|
|
150
|
-
valid: false,
|
|
151
|
-
errors: [`Unknown tokens: ${unknownTokens.map((t) => `{${t}}`).join(", ")}`, `Available tokens: ${Object.keys(tokens).map((t) => `{${t}}`).join(", ")}`]
|
|
152
|
-
};
|
|
153
|
-
return { valid: true };
|
|
154
|
-
}
|
|
619
|
+
const stripHtmlTags = (html) => {
|
|
620
|
+
return html.replace(/<[^>]*>/g, "");
|
|
621
|
+
};
|
|
622
|
+
/**
|
|
623
|
+
* Normalizes line endings to Unix-style (`\n`).
|
|
624
|
+
*
|
|
625
|
+
* Converts Windows (`\r\n`) and old Mac (`\r`) line endings to Unix style
|
|
626
|
+
* for consistent pattern matching across platforms.
|
|
627
|
+
*
|
|
628
|
+
* @param content - Raw content with potentially mixed line endings
|
|
629
|
+
* @returns Content with all line endings normalized to `\n`
|
|
630
|
+
*/
|
|
631
|
+
const normalizeLineEndings = (content) => content.replace(/\r\n?/g, "\n");
|
|
155
632
|
|
|
156
633
|
//#endregion
|
|
157
|
-
//#region src/
|
|
634
|
+
//#region src/segmentation/tokens.ts
|
|
158
635
|
/**
|
|
159
|
-
*
|
|
636
|
+
* Token-based template system for Arabic text pattern matching.
|
|
160
637
|
*
|
|
161
|
-
*
|
|
162
|
-
*
|
|
163
|
-
*
|
|
638
|
+
* This module provides a human-readable way to define regex patterns using
|
|
639
|
+
* `{{token}}` placeholders that expand to their regex equivalents. It supports
|
|
640
|
+
* named capture groups for extracting matched values into metadata.
|
|
164
641
|
*
|
|
165
|
-
* @
|
|
166
|
-
* @returns A compiled RegExp object for matching the pattern
|
|
167
|
-
* @throws {Error} When neither `template` nor `pattern` is provided
|
|
642
|
+
* @module tokens
|
|
168
643
|
*
|
|
169
644
|
* @example
|
|
170
|
-
* //
|
|
171
|
-
*
|
|
645
|
+
* // Simple token expansion
|
|
646
|
+
* expandTokens('{{raqms}} {{dash}}')
|
|
647
|
+
* // → '[\\u0660-\\u0669]+ [-–—ـ]'
|
|
172
648
|
*
|
|
173
649
|
* @example
|
|
174
|
-
* //
|
|
175
|
-
*
|
|
650
|
+
* // Named capture groups
|
|
651
|
+
* expandTokensWithCaptures('{{raqms:num}} {{dash}}')
|
|
652
|
+
* // → { pattern: '(?<num>[\\u0660-\\u0669]+) [-–—ـ]', captureNames: ['num'], hasCaptures: true }
|
|
653
|
+
*/
|
|
654
|
+
/**
|
|
655
|
+
* Token definitions mapping human-readable token names to regex patterns.
|
|
656
|
+
*
|
|
657
|
+
* Tokens are used in template strings with double-brace syntax:
|
|
658
|
+
* - `{{token}}` - Expands to the pattern (non-capturing in context)
|
|
659
|
+
* - `{{token:name}}` - Expands to a named capture group `(?<name>pattern)`
|
|
660
|
+
* - `{{:name}}` - Captures any content with the given name `(?<name>.+)`
|
|
661
|
+
*
|
|
662
|
+
* @remarks
|
|
663
|
+
* These patterns are designed for Arabic text matching. For diacritic-insensitive
|
|
664
|
+
* matching of Arabic patterns, use the `fuzzy: true` option in split rules,
|
|
665
|
+
* which applies `makeDiacriticInsensitive()` to the expanded patterns.
|
|
176
666
|
*
|
|
177
667
|
* @example
|
|
178
|
-
* // Using
|
|
179
|
-
*
|
|
180
|
-
*
|
|
181
|
-
*
|
|
182
|
-
*
|
|
183
|
-
* }
|
|
668
|
+
* // Using tokens in a split rule
|
|
669
|
+
* { lineStartsWith: ['{{kitab}}', '{{bab}}'], split: 'at', fuzzy: true }
|
|
670
|
+
*
|
|
671
|
+
* @example
|
|
672
|
+
* // Using tokens with named captures
|
|
673
|
+
* { lineStartsAfter: ['{{raqms:hadithNum}} {{dash}} '], split: 'at' }
|
|
184
674
|
*/
|
|
185
|
-
function generatePatternRegex(config) {
|
|
186
|
-
if (config.template) {
|
|
187
|
-
const tokenMap = config.tokens ? createTokenMap(config.tokens) : TOKENS;
|
|
188
|
-
const pattern = expandTemplate(config.template, { tokens: tokenMap });
|
|
189
|
-
return new RegExp(pattern, "u");
|
|
190
|
-
}
|
|
191
|
-
if (!config.pattern) throw new Error("pattern marker must provide either a template or pattern");
|
|
192
|
-
return new RegExp(config.pattern, "u");
|
|
193
|
-
}
|
|
194
675
|
/**
|
|
195
|
-
*
|
|
676
|
+
* Base token definitions mapping human-readable token names to regex patterns.
|
|
677
|
+
*
|
|
678
|
+
* These tokens contain raw regex patterns and do not reference other tokens.
|
|
679
|
+
* For composite tokens that build on these, see `COMPOSITE_TOKENS`.
|
|
680
|
+
*
|
|
681
|
+
* @internal
|
|
682
|
+
*/
|
|
683
|
+
const BASE_TOKENS = {
|
|
684
|
+
bab: "باب",
|
|
685
|
+
basmalah: "بسم الله|﷽",
|
|
686
|
+
bullet: "[•*°]",
|
|
687
|
+
dash: "[-–—ـ]",
|
|
688
|
+
fasl: "فصل|مسألة",
|
|
689
|
+
harf: "[أ-ي]",
|
|
690
|
+
kitab: "كتاب",
|
|
691
|
+
naql: "حدثنا|أخبرنا|حدثني|وحدثنا|أنبأنا|سمعت",
|
|
692
|
+
raqm: "[\\u0660-\\u0669]",
|
|
693
|
+
raqms: "[\\u0660-\\u0669]+",
|
|
694
|
+
tarqim: "[.!?؟؛]"
|
|
695
|
+
};
|
|
696
|
+
/**
|
|
697
|
+
* Composite token definitions using template syntax.
|
|
698
|
+
*
|
|
699
|
+
* These tokens reference base tokens using `{{token}}` syntax and are
|
|
700
|
+
* automatically expanded to their final regex patterns at module load time.
|
|
701
|
+
*
|
|
702
|
+
* This provides better abstraction - if base tokens change, composites
|
|
703
|
+
* automatically update on the next build.
|
|
704
|
+
*
|
|
705
|
+
* @internal
|
|
706
|
+
*/
|
|
707
|
+
const COMPOSITE_TOKENS = { numbered: "{{raqms}} {{dash}} " };
|
|
708
|
+
/**
|
|
709
|
+
* Expands base tokens in a template string.
|
|
710
|
+
* Used internally to pre-expand composite tokens.
|
|
711
|
+
*
|
|
712
|
+
* @param template - Template string with `{{token}}` placeholders
|
|
713
|
+
* @returns Expanded pattern with base tokens replaced
|
|
714
|
+
* @internal
|
|
715
|
+
*/
|
|
716
|
+
const expandBaseTokens = (template) => {
|
|
717
|
+
return template.replace(/\{\{(\w+)\}\}/g, (_, tokenName) => {
|
|
718
|
+
return BASE_TOKENS[tokenName] ?? `{{${tokenName}}}`;
|
|
719
|
+
});
|
|
720
|
+
};
|
|
721
|
+
/**
|
|
722
|
+
* Token definitions mapping human-readable token names to regex patterns.
|
|
196
723
|
*
|
|
197
|
-
*
|
|
198
|
-
*
|
|
724
|
+
* Tokens are used in template strings with double-brace syntax:
|
|
725
|
+
* - `{{token}}` - Expands to the pattern (non-capturing in context)
|
|
726
|
+
* - `{{token:name}}` - Expands to a named capture group `(?<name>pattern)`
|
|
727
|
+
* - `{{:name}}` - Captures any content with the given name `(?<name>.+)`
|
|
199
728
|
*
|
|
200
|
-
* @
|
|
729
|
+
* @remarks
|
|
730
|
+
* These patterns are designed for Arabic text matching. For diacritic-insensitive
|
|
731
|
+
* matching of Arabic patterns, use the `fuzzy: true` option in split rules,
|
|
732
|
+
* which applies `makeDiacriticInsensitive()` to the expanded patterns.
|
|
201
733
|
*
|
|
202
734
|
* @example
|
|
203
|
-
*
|
|
204
|
-
*
|
|
205
|
-
*
|
|
206
|
-
*
|
|
735
|
+
* // Using tokens in a split rule
|
|
736
|
+
* { lineStartsWith: ['{{kitab}}', '{{bab}}'], split: 'at', fuzzy: true }
|
|
737
|
+
*
|
|
738
|
+
* @example
|
|
739
|
+
* // Using tokens with named captures
|
|
740
|
+
* { lineStartsAfter: ['{{raqms:hadithNum}} {{dash}} '], split: 'at' }
|
|
741
|
+
*
|
|
742
|
+
* @example
|
|
743
|
+
* // Using the numbered convenience token
|
|
744
|
+
* { lineStartsAfter: ['{{numbered}}'], split: 'at' }
|
|
745
|
+
*/
|
|
746
|
+
const TOKEN_PATTERNS = {
|
|
747
|
+
...BASE_TOKENS,
|
|
748
|
+
...Object.fromEntries(Object.entries(COMPOSITE_TOKENS).map(([k, v]) => [k, expandBaseTokens(v)]))
|
|
749
|
+
};
|
|
750
|
+
/**
|
|
751
|
+
* Regex pattern for matching tokens with optional named capture syntax.
|
|
752
|
+
*
|
|
753
|
+
* Matches:
|
|
754
|
+
* - `{{token}}` - Simple token (group 1 = token name, group 2 = empty)
|
|
755
|
+
* - `{{token:name}}` - Token with capture (group 1 = token, group 2 = name)
|
|
756
|
+
* - `{{:name}}` - Capture-only (group 1 = empty, group 2 = name)
|
|
757
|
+
*
|
|
758
|
+
* @internal
|
|
759
|
+
*/
|
|
760
|
+
const TOKEN_WITH_CAPTURE_REGEX = /\{\{(\w*):?(\w*)\}\}/g;
|
|
761
|
+
/**
|
|
762
|
+
* Regex pattern for simple token matching (no capture syntax).
|
|
763
|
+
*
|
|
764
|
+
* Matches only `{{token}}` format where token is one or more word characters.
|
|
765
|
+
* Used by `containsTokens()` for quick detection.
|
|
766
|
+
*
|
|
767
|
+
* @internal
|
|
207
768
|
*/
|
|
208
|
-
|
|
209
|
-
const babPattern = makeDiacriticInsensitive("باب");
|
|
210
|
-
const pattern = String.raw`^(?<full>(?<marker>${babPattern}[ًٌٍَُ]?)(?<content>[\s\S]*))`;
|
|
211
|
-
return new RegExp(pattern, "u");
|
|
212
|
-
}
|
|
769
|
+
const SIMPLE_TOKEN_REGEX = /\{\{(\w+)\}\}/g;
|
|
213
770
|
/**
|
|
214
|
-
*
|
|
771
|
+
* Checks if a query string contains template tokens.
|
|
215
772
|
*
|
|
216
|
-
*
|
|
217
|
-
*
|
|
218
|
-
*
|
|
773
|
+
* Performs a quick test for `{{token}}` patterns without actually
|
|
774
|
+
* expanding them. Useful for determining whether to apply token
|
|
775
|
+
* expansion to a string.
|
|
219
776
|
*
|
|
220
|
-
* @param
|
|
221
|
-
* @returns
|
|
777
|
+
* @param query - String to check for tokens
|
|
778
|
+
* @returns `true` if the string contains at least one `{{token}}` pattern
|
|
222
779
|
*
|
|
223
780
|
* @example
|
|
224
|
-
* //
|
|
225
|
-
*
|
|
226
|
-
*
|
|
781
|
+
* containsTokens('{{raqms}} {{dash}}') // → true
|
|
782
|
+
* containsTokens('plain text') // → false
|
|
783
|
+
* containsTokens('[٠-٩]+ - ') // → false (raw regex, no tokens)
|
|
784
|
+
*/
|
|
785
|
+
const containsTokens = (query) => {
|
|
786
|
+
SIMPLE_TOKEN_REGEX.lastIndex = 0;
|
|
787
|
+
return SIMPLE_TOKEN_REGEX.test(query);
|
|
788
|
+
};
|
|
789
|
+
/**
|
|
790
|
+
* Expands template tokens with support for named captures.
|
|
791
|
+
*
|
|
792
|
+
* This is the primary token expansion function that handles all token syntax:
|
|
793
|
+
* - `{{token}}` → Expands to the token's pattern (no capture group)
|
|
794
|
+
* - `{{token:name}}` → Expands to `(?<name>pattern)` (named capture)
|
|
795
|
+
* - `{{:name}}` → Expands to `(?<name>.+)` (capture anything)
|
|
796
|
+
*
|
|
797
|
+
* Unknown tokens are left as-is in the output, allowing for partial templates.
|
|
798
|
+
*
|
|
799
|
+
* @param query - The template string containing tokens
|
|
800
|
+
* @param fuzzyTransform - Optional function to transform Arabic text for fuzzy matching.
|
|
801
|
+
* Applied to both token patterns and plain Arabic text between tokens.
|
|
802
|
+
* Typically `makeDiacriticInsensitive` from the fuzzy module.
|
|
803
|
+
* @returns Object with expanded pattern, capture names, and capture flag
|
|
227
804
|
*
|
|
228
805
|
* @example
|
|
229
|
-
* //
|
|
230
|
-
*
|
|
231
|
-
*
|
|
232
|
-
*
|
|
233
|
-
*
|
|
806
|
+
* // Simple token expansion
|
|
807
|
+
* expandTokensWithCaptures('{{raqms}} {{dash}}')
|
|
808
|
+
* // → { pattern: '[\\u0660-\\u0669]+ [-–—ـ]', captureNames: [], hasCaptures: false }
|
|
809
|
+
*
|
|
810
|
+
* @example
|
|
811
|
+
* // Named capture
|
|
812
|
+
* expandTokensWithCaptures('{{raqms:num}} {{dash}}')
|
|
813
|
+
* // → { pattern: '(?<num>[\\u0660-\\u0669]+) [-–—ـ]', captureNames: ['num'], hasCaptures: true }
|
|
814
|
+
*
|
|
815
|
+
* @example
|
|
816
|
+
* // Capture-only token
|
|
817
|
+
* expandTokensWithCaptures('{{raqms:num}} {{dash}} {{:content}}')
|
|
818
|
+
* // → { pattern: '(?<num>[٠-٩]+) [-–—ـ] (?<content>.+)', captureNames: ['num', 'content'], hasCaptures: true }
|
|
819
|
+
*
|
|
820
|
+
* @example
|
|
821
|
+
* // With fuzzy transform
|
|
822
|
+
* expandTokensWithCaptures('{{bab}}', makeDiacriticInsensitive)
|
|
823
|
+
* // → { pattern: 'بَ?ا?بٌ?', captureNames: [], hasCaptures: false }
|
|
234
824
|
*/
|
|
235
|
-
|
|
236
|
-
const
|
|
237
|
-
const
|
|
238
|
-
|
|
239
|
-
|
|
825
|
+
const expandTokensWithCaptures = (query, fuzzyTransform) => {
|
|
826
|
+
const captureNames = [];
|
|
827
|
+
const segments = [];
|
|
828
|
+
let lastIndex = 0;
|
|
829
|
+
TOKEN_WITH_CAPTURE_REGEX.lastIndex = 0;
|
|
830
|
+
let match;
|
|
831
|
+
while ((match = TOKEN_WITH_CAPTURE_REGEX.exec(query)) !== null) {
|
|
832
|
+
if (match.index > lastIndex) segments.push({
|
|
833
|
+
type: "text",
|
|
834
|
+
value: query.slice(lastIndex, match.index)
|
|
835
|
+
});
|
|
836
|
+
segments.push({
|
|
837
|
+
type: "token",
|
|
838
|
+
value: match[0]
|
|
839
|
+
});
|
|
840
|
+
lastIndex = match.index + match[0].length;
|
|
841
|
+
}
|
|
842
|
+
if (lastIndex < query.length) segments.push({
|
|
843
|
+
type: "text",
|
|
844
|
+
value: query.slice(lastIndex)
|
|
845
|
+
});
|
|
846
|
+
const processedParts = segments.map((segment) => {
|
|
847
|
+
if (segment.type === "text") {
|
|
848
|
+
if (fuzzyTransform && /[\u0600-\u06FF]/.test(segment.value)) return fuzzyTransform(segment.value);
|
|
849
|
+
return segment.value;
|
|
850
|
+
}
|
|
851
|
+
TOKEN_WITH_CAPTURE_REGEX.lastIndex = 0;
|
|
852
|
+
const tokenMatch = TOKEN_WITH_CAPTURE_REGEX.exec(segment.value);
|
|
853
|
+
if (!tokenMatch) return segment.value;
|
|
854
|
+
const [, tokenName, captureName] = tokenMatch;
|
|
855
|
+
if (!tokenName && captureName) {
|
|
856
|
+
captureNames.push(captureName);
|
|
857
|
+
return `(?<${captureName}>.+)`;
|
|
858
|
+
}
|
|
859
|
+
let tokenPattern = TOKEN_PATTERNS[tokenName];
|
|
860
|
+
if (!tokenPattern) return segment.value;
|
|
861
|
+
if (fuzzyTransform) tokenPattern = tokenPattern.split("|").map((part) => /[\u0600-\u06FF]/.test(part) ? fuzzyTransform(part) : part).join("|");
|
|
862
|
+
if (captureName) {
|
|
863
|
+
captureNames.push(captureName);
|
|
864
|
+
return `(?<${captureName}>${tokenPattern})`;
|
|
865
|
+
}
|
|
866
|
+
return tokenPattern;
|
|
867
|
+
});
|
|
868
|
+
return {
|
|
869
|
+
captureNames,
|
|
870
|
+
hasCaptures: captureNames.length > 0,
|
|
871
|
+
pattern: processedParts.join("")
|
|
872
|
+
};
|
|
873
|
+
};
|
|
240
874
|
/**
|
|
241
|
-
*
|
|
875
|
+
* Expands template tokens in a query string to their regex equivalents.
|
|
242
876
|
*
|
|
243
|
-
*
|
|
244
|
-
*
|
|
245
|
-
* - بِسْمِ اللَّهِ (with diacritics)
|
|
246
|
-
* - Special patterns like [بسم, [تم
|
|
877
|
+
* This is the simple version without capture support. It returns only the
|
|
878
|
+
* expanded pattern string, not capture metadata.
|
|
247
879
|
*
|
|
248
|
-
*
|
|
880
|
+
* Unknown tokens are left as-is, allowing for partial templates.
|
|
881
|
+
*
|
|
882
|
+
* @param query - Template string containing `{{token}}` placeholders
|
|
883
|
+
* @returns Expanded regex pattern string
|
|
249
884
|
*
|
|
250
885
|
* @example
|
|
251
|
-
*
|
|
252
|
-
*
|
|
253
|
-
* //
|
|
886
|
+
* expandTokens('، {{raqms}}') // → '، [\\u0660-\\u0669]+'
|
|
887
|
+
* expandTokens('{{raqm}}*') // → '[\\u0660-\\u0669]*'
|
|
888
|
+
* expandTokens('{{dash}}{{raqm}}') // → '[-–—ـ][\\u0660-\\u0669]'
|
|
889
|
+
* expandTokens('{{unknown}}') // → '{{unknown}}' (left as-is)
|
|
890
|
+
*
|
|
891
|
+
* @see expandTokensWithCaptures for full capture group support
|
|
254
892
|
*/
|
|
255
|
-
|
|
256
|
-
const combinedPattern = DEFAULT_BASMALA_PATTERNS.map((p) => makeDiacriticInsensitive(p)).join("|");
|
|
257
|
-
const pattern = String.raw`^(?<full>(?<marker>${combinedPattern})(?<content>[\s\S]*))`;
|
|
258
|
-
return new RegExp(pattern, "u");
|
|
259
|
-
}
|
|
893
|
+
const expandTokens = (query) => expandTokensWithCaptures(query).pattern;
|
|
260
894
|
/**
|
|
261
|
-
*
|
|
895
|
+
* Converts a template string to a compiled RegExp.
|
|
896
|
+
*
|
|
897
|
+
* Expands all tokens and attempts to compile the result as a RegExp
|
|
898
|
+
* with Unicode flag. Returns `null` if the resulting pattern is invalid.
|
|
262
899
|
*
|
|
263
|
-
*
|
|
264
|
-
*
|
|
900
|
+
* @remarks
|
|
901
|
+
* This function dynamically compiles regular expressions from template strings.
|
|
902
|
+
* If templates may come from untrusted sources, be aware of potential ReDoS
|
|
903
|
+
* (Regular Expression Denial of Service) risks due to catastrophic backtracking.
|
|
904
|
+
* Consider validating pattern complexity or applying execution timeouts when
|
|
905
|
+
* running user-submitted patterns.
|
|
265
906
|
*
|
|
266
|
-
* @param
|
|
267
|
-
* @returns
|
|
268
|
-
* @throws {Error} When `phrases` is undefined or empty
|
|
907
|
+
* @param template - Template string containing `{{token}}` placeholders
|
|
908
|
+
* @returns Compiled RegExp with 'u' flag, or `null` if invalid
|
|
269
909
|
*
|
|
270
910
|
* @example
|
|
271
|
-
*
|
|
272
|
-
*
|
|
273
|
-
*
|
|
274
|
-
* });
|
|
911
|
+
* templateToRegex('، {{raqms}}') // → /، [٠-٩]+/u
|
|
912
|
+
* templateToRegex('{{raqms}}+') // → /[٠-٩]++/u (might be invalid in some engines)
|
|
913
|
+
* templateToRegex('(((') // → null (invalid regex)
|
|
275
914
|
*/
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
915
|
+
const templateToRegex = (template) => {
|
|
916
|
+
const expanded = expandTokens(template);
|
|
917
|
+
try {
|
|
918
|
+
return new RegExp(expanded, "u");
|
|
919
|
+
} catch {
|
|
920
|
+
return null;
|
|
921
|
+
}
|
|
922
|
+
};
|
|
282
923
|
/**
|
|
283
|
-
*
|
|
924
|
+
* Lists all available token names defined in `TOKEN_PATTERNS`.
|
|
284
925
|
*
|
|
285
|
-
*
|
|
286
|
-
*
|
|
287
|
-
* - • [٦٥] - With bullet prefix
|
|
288
|
-
* - ° [٦٥] - With degree prefix
|
|
926
|
+
* Useful for documentation, validation, or building user interfaces
|
|
927
|
+
* that show available tokens.
|
|
289
928
|
*
|
|
290
|
-
* @returns
|
|
929
|
+
* @returns Array of token names (e.g., `['bab', 'basmala', 'bullet', ...]`)
|
|
291
930
|
*
|
|
292
931
|
* @example
|
|
293
|
-
*
|
|
294
|
-
*
|
|
295
|
-
* // match.groups.content -> ' نص الحديث'
|
|
932
|
+
* getAvailableTokens()
|
|
933
|
+
* // → ['bab', 'basmala', 'bullet', 'dash', 'harf', 'kitab', 'naql', 'raqm', 'raqms']
|
|
296
934
|
*/
|
|
297
|
-
|
|
298
|
-
const markerPattern = String.raw`[•°]?\s?\[[\u0660-\u0669]+\]\s?`;
|
|
299
|
-
const pattern = String.raw`^(?<full>(?<marker>${markerPattern})(?<content>[\s\S]*))`;
|
|
300
|
-
return new RegExp(pattern, "u");
|
|
301
|
-
}
|
|
935
|
+
const getAvailableTokens = () => Object.keys(TOKEN_PATTERNS);
|
|
302
936
|
/**
|
|
303
|
-
*
|
|
937
|
+
* Gets the regex pattern for a specific token name.
|
|
304
938
|
*
|
|
305
|
-
*
|
|
306
|
-
*
|
|
307
|
-
* - 5 ب. (Latin number, Arabic letter, dot)
|
|
939
|
+
* Returns the raw pattern string as defined in `TOKEN_PATTERNS`,
|
|
940
|
+
* without any expansion or capture group wrapping.
|
|
308
941
|
*
|
|
309
|
-
* @param
|
|
310
|
-
* @returns
|
|
942
|
+
* @param tokenName - The token name to look up (e.g., 'raqms', 'dash')
|
|
943
|
+
* @returns The regex pattern string, or `undefined` if token doesn't exist
|
|
311
944
|
*
|
|
312
945
|
* @example
|
|
313
|
-
*
|
|
314
|
-
*
|
|
315
|
-
*
|
|
316
|
-
* });
|
|
317
|
-
* const match = regex.exec('٥ أ - نص');
|
|
946
|
+
* getTokenPattern('raqms') // → '[\\u0660-\\u0669]+'
|
|
947
|
+
* getTokenPattern('dash') // → '[-–—ـ]'
|
|
948
|
+
* getTokenPattern('unknown') // → undefined
|
|
318
949
|
*/
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
const pattern = String.raw`^(?<full>(?<marker>${markerPattern})(?<content>[\s\S]*))`;
|
|
324
|
-
return new RegExp(pattern, "u");
|
|
325
|
-
}
|
|
950
|
+
const getTokenPattern = (tokenName) => TOKEN_PATTERNS[tokenName];
|
|
951
|
+
|
|
952
|
+
//#endregion
|
|
953
|
+
//#region src/segmentation/segmenter.ts
|
|
326
954
|
/**
|
|
327
|
-
*
|
|
955
|
+
* Core segmentation engine for splitting Arabic text pages into logical segments.
|
|
328
956
|
*
|
|
329
|
-
*
|
|
330
|
-
*
|
|
331
|
-
*
|
|
957
|
+
* The segmenter takes an array of pages and applies pattern-based rules to
|
|
958
|
+
* identify split points, producing segments with content, page references,
|
|
959
|
+
* and optional metadata.
|
|
332
960
|
*
|
|
333
|
-
* @
|
|
334
|
-
|
|
961
|
+
* @module segmenter
|
|
962
|
+
*/
|
|
963
|
+
/**
|
|
964
|
+
* Checks if a regex pattern contains standard (anonymous) capturing groups.
|
|
965
|
+
*
|
|
966
|
+
* Detects standard capturing groups `(...)` while excluding:
|
|
967
|
+
* - Non-capturing groups `(?:...)`
|
|
968
|
+
* - Lookahead assertions `(?=...)` and `(?!...)`
|
|
969
|
+
* - Lookbehind assertions `(?<=...)` and `(?<!...)`
|
|
970
|
+
* - Named groups `(?<name>...)` (start with `(?` so excluded here)
|
|
971
|
+
*
|
|
972
|
+
* **Note**: Named capture groups `(?<name>...)` ARE capturing groups but are
|
|
973
|
+
* excluded by this check because they are tracked separately via the
|
|
974
|
+
* `captureNames` array from token expansion. This function only detects
|
|
975
|
+
* anonymous capturing groups like `(.*)`.
|
|
976
|
+
*
|
|
977
|
+
* @param pattern - Regex pattern string to analyze
|
|
978
|
+
* @returns `true` if the pattern contains at least one anonymous capturing group
|
|
979
|
+
*/
|
|
980
|
+
const hasCapturingGroup = (pattern) => {
|
|
981
|
+
return /\((?!\?)/.test(pattern);
|
|
982
|
+
};
|
|
983
|
+
/**
|
|
984
|
+
* Processes a pattern string by expanding tokens and optionally applying fuzzy matching.
|
|
985
|
+
*
|
|
986
|
+
* Fuzzy matching makes Arabic text diacritic-insensitive. When enabled, the
|
|
987
|
+
* transform is applied to token patterns BEFORE wrapping with capture groups,
|
|
988
|
+
* ensuring regex metacharacters (`(`, `)`, `|`, etc.) are not corrupted.
|
|
989
|
+
*
|
|
990
|
+
* @param pattern - Pattern string potentially containing `{{token}}` placeholders
|
|
991
|
+
* @param fuzzy - Whether to apply diacritic-insensitive transformation
|
|
992
|
+
* @returns Processed pattern with expanded tokens and capture names
|
|
335
993
|
*
|
|
336
994
|
* @example
|
|
337
|
-
*
|
|
338
|
-
*
|
|
339
|
-
*
|
|
340
|
-
*
|
|
341
|
-
*
|
|
995
|
+
* processPattern('{{raqms:num}} {{dash}}', false)
|
|
996
|
+
* // → { pattern: '(?<num>[٠-٩]+) [-–—ـ]', captureNames: ['num'] }
|
|
997
|
+
*
|
|
998
|
+
* @example
|
|
999
|
+
* processPattern('{{naql}}', true)
|
|
1000
|
+
* // → { pattern: 'حَ?دَّ?ثَ?نَ?ا|...', captureNames: [] }
|
|
342
1001
|
*/
|
|
343
|
-
|
|
344
|
-
const
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
}
|
|
1002
|
+
const processPattern = (pattern, fuzzy) => {
|
|
1003
|
+
const { pattern: expanded, captureNames } = expandTokensWithCaptures(pattern, fuzzy ? makeDiacriticInsensitive : void 0);
|
|
1004
|
+
return {
|
|
1005
|
+
captureNames,
|
|
1006
|
+
pattern: expanded
|
|
1007
|
+
};
|
|
1008
|
+
};
|
|
350
1009
|
/**
|
|
351
|
-
*
|
|
1010
|
+
* Builds a compiled regex and metadata from a split rule.
|
|
352
1011
|
*
|
|
353
|
-
*
|
|
354
|
-
* -
|
|
355
|
-
* -
|
|
1012
|
+
* Handles all pattern types:
|
|
1013
|
+
* - `regex`: Used as-is (no token expansion)
|
|
1014
|
+
* - `template`: Tokens expanded via `expandTokensWithCaptures`
|
|
1015
|
+
* - `lineStartsWith`: Converted to `^(?:patterns...)`
|
|
1016
|
+
* - `lineStartsAfter`: Converted to `^(?:patterns...)(.*)`
|
|
1017
|
+
* - `lineEndsWith`: Converted to `(?:patterns...)$`
|
|
1018
|
+
*
|
|
1019
|
+
* @param rule - Split rule containing pattern and options
|
|
1020
|
+
* @returns Compiled regex with capture metadata
|
|
1021
|
+
*/
|
|
1022
|
+
const buildRuleRegex = (rule) => {
|
|
1023
|
+
const s = { ...rule };
|
|
1024
|
+
const fuzzy = rule.fuzzy ?? false;
|
|
1025
|
+
let allCaptureNames = [];
|
|
1026
|
+
/**
|
|
1027
|
+
* Safely compiles a regex pattern, throwing a helpful error if invalid.
|
|
1028
|
+
*
|
|
1029
|
+
* @remarks
|
|
1030
|
+
* This catches syntax errors only. It does NOT protect against ReDoS
|
|
1031
|
+
* (catastrophic backtracking) from pathological patterns. Avoid compiling
|
|
1032
|
+
* patterns from untrusted sources.
|
|
1033
|
+
*/
|
|
1034
|
+
const compileRegex = (pattern) => {
|
|
1035
|
+
try {
|
|
1036
|
+
return new RegExp(pattern, "gmu");
|
|
1037
|
+
} catch (error) {
|
|
1038
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
1039
|
+
throw new Error(`Invalid regex pattern: ${pattern}\n Cause: ${message}`);
|
|
1040
|
+
}
|
|
1041
|
+
};
|
|
1042
|
+
if (s.lineStartsAfter?.length) {
|
|
1043
|
+
const processed = s.lineStartsAfter.map((p) => processPattern(p, fuzzy));
|
|
1044
|
+
const patterns = processed.map((p) => p.pattern).join("|");
|
|
1045
|
+
allCaptureNames = processed.flatMap((p) => p.captureNames);
|
|
1046
|
+
s.regex = `^(?:${patterns})(.*)`;
|
|
1047
|
+
return {
|
|
1048
|
+
captureNames: allCaptureNames,
|
|
1049
|
+
regex: compileRegex(s.regex),
|
|
1050
|
+
usesCapture: true,
|
|
1051
|
+
usesLineStartsAfter: true
|
|
1052
|
+
};
|
|
1053
|
+
}
|
|
1054
|
+
if (s.lineStartsWith?.length) {
|
|
1055
|
+
const processed = s.lineStartsWith.map((p) => processPattern(p, fuzzy));
|
|
1056
|
+
const patterns = processed.map((p) => p.pattern).join("|");
|
|
1057
|
+
allCaptureNames = processed.flatMap((p) => p.captureNames);
|
|
1058
|
+
s.template = `^(?:${patterns})`;
|
|
1059
|
+
}
|
|
1060
|
+
if (s.lineEndsWith?.length) {
|
|
1061
|
+
const processed = s.lineEndsWith.map((p) => processPattern(p, fuzzy));
|
|
1062
|
+
const patterns = processed.map((p) => p.pattern).join("|");
|
|
1063
|
+
allCaptureNames = processed.flatMap((p) => p.captureNames);
|
|
1064
|
+
s.template = `(?:${patterns})$`;
|
|
1065
|
+
}
|
|
1066
|
+
if (s.template) {
|
|
1067
|
+
const { pattern, captureNames } = expandTokensWithCaptures(s.template);
|
|
1068
|
+
s.regex = pattern;
|
|
1069
|
+
allCaptureNames = [...allCaptureNames, ...captureNames];
|
|
1070
|
+
}
|
|
1071
|
+
if (!s.regex) throw new Error("Rule must specify exactly one pattern type: regex, template, lineStartsWith, lineStartsAfter, or lineEndsWith");
|
|
1072
|
+
const usesCapture = hasCapturingGroup(s.regex) || allCaptureNames.length > 0;
|
|
1073
|
+
return {
|
|
1074
|
+
captureNames: allCaptureNames,
|
|
1075
|
+
regex: compileRegex(s.regex),
|
|
1076
|
+
usesCapture,
|
|
1077
|
+
usesLineStartsAfter: false
|
|
1078
|
+
};
|
|
1079
|
+
};
|
|
1080
|
+
/**
|
|
1081
|
+
* Builds a concatenated content string and page mapping from input pages.
|
|
356
1082
|
*
|
|
357
|
-
*
|
|
1083
|
+
* Pages are joined with newline characters, and a page map is created to
|
|
1084
|
+
* track which page each offset belongs to. This allows pattern matching
|
|
1085
|
+
* across page boundaries while preserving page reference information.
|
|
358
1086
|
*
|
|
359
|
-
* @param
|
|
360
|
-
* @returns
|
|
1087
|
+
* @param pages - Array of input pages with id and content
|
|
1088
|
+
* @returns Concatenated content string and page mapping utilities
|
|
361
1089
|
*
|
|
362
1090
|
* @example
|
|
363
|
-
* const
|
|
364
|
-
*
|
|
365
|
-
*
|
|
366
|
-
*
|
|
367
|
-
* const
|
|
368
|
-
*
|
|
1091
|
+
* const pages = [
|
|
1092
|
+
* { id: 1, content: 'Page 1 text' },
|
|
1093
|
+
* { id: 2, content: 'Page 2 text' }
|
|
1094
|
+
* ];
|
|
1095
|
+
* const { content, pageMap } = buildPageMap(pages);
|
|
1096
|
+
* // content = 'Page 1 text\nPage 2 text'
|
|
1097
|
+
* // pageMap.getId(0) = 1
|
|
1098
|
+
* // pageMap.getId(12) = 2
|
|
1099
|
+
*/
|
|
1100
|
+
const buildPageMap = (pages) => {
|
|
1101
|
+
const boundaries = [];
|
|
1102
|
+
const pageBreaks = [];
|
|
1103
|
+
let offset = 0;
|
|
1104
|
+
const parts = [];
|
|
1105
|
+
for (let i = 0; i < pages.length; i++) {
|
|
1106
|
+
const normalized = normalizeLineEndings(pages[i].content);
|
|
1107
|
+
boundaries.push({
|
|
1108
|
+
end: offset + normalized.length,
|
|
1109
|
+
id: pages[i].id,
|
|
1110
|
+
start: offset
|
|
1111
|
+
});
|
|
1112
|
+
parts.push(normalized);
|
|
1113
|
+
if (i < pages.length - 1) {
|
|
1114
|
+
pageBreaks.push(offset + normalized.length);
|
|
1115
|
+
offset += normalized.length + 1;
|
|
1116
|
+
} else offset += normalized.length;
|
|
1117
|
+
}
|
|
1118
|
+
/**
|
|
1119
|
+
* Finds the page boundary containing the given offset using binary search.
|
|
1120
|
+
* O(log n) complexity for efficient lookup with many pages.
|
|
1121
|
+
*
|
|
1122
|
+
* @param off - Character offset to look up
|
|
1123
|
+
* @returns Page boundary or the last boundary as fallback
|
|
1124
|
+
*/
|
|
1125
|
+
const findBoundary = (off) => {
|
|
1126
|
+
let lo = 0;
|
|
1127
|
+
let hi = boundaries.length - 1;
|
|
1128
|
+
while (lo <= hi) {
|
|
1129
|
+
const mid = lo + hi >>> 1;
|
|
1130
|
+
const b = boundaries[mid];
|
|
1131
|
+
if (off < b.start) hi = mid - 1;
|
|
1132
|
+
else if (off > b.end) lo = mid + 1;
|
|
1133
|
+
else return b;
|
|
1134
|
+
}
|
|
1135
|
+
return boundaries[boundaries.length - 1];
|
|
1136
|
+
};
|
|
1137
|
+
return {
|
|
1138
|
+
content: parts.join("\n"),
|
|
1139
|
+
normalizedPages: parts,
|
|
1140
|
+
pageMap: {
|
|
1141
|
+
boundaries,
|
|
1142
|
+
getId: (off) => findBoundary(off)?.id ?? 0,
|
|
1143
|
+
pageBreaks,
|
|
1144
|
+
pageIds: boundaries.map((b) => b.id)
|
|
1145
|
+
}
|
|
1146
|
+
};
|
|
1147
|
+
};
|
|
1148
|
+
/**
|
|
1149
|
+
* Executes a regex against content and extracts match results with capture information.
|
|
1150
|
+
*
|
|
1151
|
+
* @param content - Full content string to search
|
|
1152
|
+
* @param regex - Compiled regex with 'g' flag
|
|
1153
|
+
* @param usesCapture - Whether to extract captured content
|
|
1154
|
+
* @param captureNames - Names of expected named capture groups
|
|
1155
|
+
* @returns Array of match results with positions and captures
|
|
1156
|
+
*/
|
|
1157
|
+
const findMatches = (content, regex, usesCapture, captureNames) => {
|
|
1158
|
+
const matches = [];
|
|
1159
|
+
regex.lastIndex = 0;
|
|
1160
|
+
let m = regex.exec(content);
|
|
1161
|
+
while (m !== null) {
|
|
1162
|
+
const result = {
|
|
1163
|
+
end: m.index + m[0].length,
|
|
1164
|
+
start: m.index
|
|
1165
|
+
};
|
|
1166
|
+
result.namedCaptures = extractNamedCaptures(m.groups, captureNames);
|
|
1167
|
+
if (usesCapture) result.captured = getLastPositionalCapture(m);
|
|
1168
|
+
matches.push(result);
|
|
1169
|
+
if (m[0].length === 0) regex.lastIndex++;
|
|
1170
|
+
m = regex.exec(content);
|
|
1171
|
+
}
|
|
1172
|
+
return matches;
|
|
1173
|
+
};
|
|
1174
|
+
/**
|
|
1175
|
+
* Finds page breaks within a given offset range using binary search.
|
|
1176
|
+
* O(log n + k) where n = total breaks, k = breaks in range.
|
|
1177
|
+
*
|
|
1178
|
+
* @param startOffset - Start of range (inclusive)
|
|
1179
|
+
* @param endOffset - End of range (exclusive)
|
|
1180
|
+
* @param sortedBreaks - Sorted array of page break offsets
|
|
1181
|
+
* @returns Array of break offsets relative to startOffset
|
|
1182
|
+
*/
|
|
1183
|
+
const findBreaksInRange = (startOffset, endOffset, sortedBreaks) => {
|
|
1184
|
+
if (sortedBreaks.length === 0) return [];
|
|
1185
|
+
let lo = 0;
|
|
1186
|
+
let hi = sortedBreaks.length;
|
|
1187
|
+
while (lo < hi) {
|
|
1188
|
+
const mid = lo + hi >>> 1;
|
|
1189
|
+
if (sortedBreaks[mid] < startOffset) lo = mid + 1;
|
|
1190
|
+
else hi = mid;
|
|
1191
|
+
}
|
|
1192
|
+
const result = [];
|
|
1193
|
+
for (let i = lo; i < sortedBreaks.length && sortedBreaks[i] < endOffset; i++) result.push(sortedBreaks[i] - startOffset);
|
|
1194
|
+
return result;
|
|
1195
|
+
};
|
|
1196
|
+
/**
|
|
1197
|
+
* Converts page-break newlines to spaces in segment content.
|
|
1198
|
+
*
|
|
1199
|
+
* When a segment spans multiple pages, the newline characters that were
|
|
1200
|
+
* inserted as page separators during concatenation are converted to spaces
|
|
1201
|
+
* for more natural reading.
|
|
1202
|
+
*
|
|
1203
|
+
* Uses binary search for O(log n + k) lookup instead of O(n) iteration.
|
|
1204
|
+
*
|
|
1205
|
+
* @param content - Segment content string
|
|
1206
|
+
* @param startOffset - Starting offset of this content in concatenated string
|
|
1207
|
+
* @param pageBreaks - Sorted array of page break offsets
|
|
1208
|
+
* @returns Content with page-break newlines converted to spaces
|
|
369
1209
|
*/
|
|
370
|
-
|
|
371
|
-
const
|
|
372
|
-
|
|
373
|
-
const
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
}
|
|
1210
|
+
const convertPageBreaks = (content, startOffset, pageBreaks) => {
|
|
1211
|
+
const breaksInRange = findBreaksInRange(startOffset, startOffset + content.length, pageBreaks);
|
|
1212
|
+
if (breaksInRange.length === 0) return content;
|
|
1213
|
+
const breakSet = new Set(breaksInRange);
|
|
1214
|
+
return content.replace(/\n/g, (match, offset) => breakSet.has(offset) ? " " : match);
|
|
1215
|
+
};
|
|
377
1216
|
/**
|
|
378
|
-
*
|
|
1217
|
+
* Applies breakpoints to oversized segments.
|
|
1218
|
+
*
|
|
1219
|
+
* For each segment that spans more than maxPages, tries the breakpoint patterns
|
|
1220
|
+
* in order to find a suitable split point. Structural markers (from rules) are
|
|
1221
|
+
* always respected - segments are only broken within their boundaries.
|
|
379
1222
|
*
|
|
380
|
-
*
|
|
381
|
-
*
|
|
382
|
-
*
|
|
1223
|
+
* @param segments - Initial segments from rule processing
|
|
1224
|
+
* @param pages - Original pages for page lookup
|
|
1225
|
+
* @param maxPages - Maximum pages before breakpoints apply
|
|
1226
|
+
* @param breakpoints - Patterns to try in order (tokens supported)
|
|
1227
|
+
* @param prefer - 'longer' for last match, 'shorter' for first match
|
|
1228
|
+
* @returns Processed segments with oversized ones broken up
|
|
1229
|
+
*/
|
|
1230
|
+
const applyBreakpoints = (segments, pages, normalizedContent, maxPages, breakpoints, prefer, logger) => {
|
|
1231
|
+
const findExclusionBreakPosition = (currentFromIdx, windowEndIdx, toIdx, pageIds$1, expandedBreakpoints$1, cumulativeOffsets$1) => {
|
|
1232
|
+
const startingPageId = pageIds$1[currentFromIdx];
|
|
1233
|
+
if (expandedBreakpoints$1.some((bp) => bp.excludeSet.has(startingPageId)) && currentFromIdx < toIdx) return cumulativeOffsets$1[currentFromIdx + 1] - cumulativeOffsets$1[currentFromIdx];
|
|
1234
|
+
for (let pageIdx = currentFromIdx + 1; pageIdx <= windowEndIdx; pageIdx++) {
|
|
1235
|
+
const pageId = pageIds$1[pageIdx];
|
|
1236
|
+
if (expandedBreakpoints$1.some((bp) => bp.excludeSet.has(pageId))) return cumulativeOffsets$1[pageIdx] - cumulativeOffsets$1[currentFromIdx];
|
|
1237
|
+
}
|
|
1238
|
+
return -1;
|
|
1239
|
+
};
|
|
1240
|
+
const pageIds = pages.map((p) => p.id);
|
|
1241
|
+
const pageIdToIndex = new Map(pageIds.map((id, i) => [id, i]));
|
|
1242
|
+
const normalizedPages = /* @__PURE__ */ new Map();
|
|
1243
|
+
for (let i = 0; i < pages.length; i++) {
|
|
1244
|
+
const content = normalizedContent[i];
|
|
1245
|
+
normalizedPages.set(pages[i].id, {
|
|
1246
|
+
content,
|
|
1247
|
+
index: i,
|
|
1248
|
+
length: content.length
|
|
1249
|
+
});
|
|
1250
|
+
}
|
|
1251
|
+
const cumulativeOffsets = [0];
|
|
1252
|
+
let totalOffset = 0;
|
|
1253
|
+
for (let i = 0; i < pageIds.length; i++) {
|
|
1254
|
+
const pageData = normalizedPages.get(pageIds[i]);
|
|
1255
|
+
totalOffset += pageData ? pageData.length : 0;
|
|
1256
|
+
if (i < pageIds.length - 1) totalOffset += 1;
|
|
1257
|
+
cumulativeOffsets.push(totalOffset);
|
|
1258
|
+
}
|
|
1259
|
+
const patternProcessor = (p) => processPattern(p, false).pattern;
|
|
1260
|
+
const expandedBreakpoints = expandBreakpoints(breakpoints, patternProcessor);
|
|
1261
|
+
const result = [];
|
|
1262
|
+
logger?.info?.("Starting breakpoint processing", {
|
|
1263
|
+
maxPages,
|
|
1264
|
+
segmentCount: segments.length
|
|
1265
|
+
});
|
|
1266
|
+
for (const segment of segments) {
|
|
1267
|
+
const fromIdx = pageIdToIndex.get(segment.from) ?? -1;
|
|
1268
|
+
const toIdx = segment.to !== void 0 ? pageIdToIndex.get(segment.to) ?? fromIdx : fromIdx;
|
|
1269
|
+
logger?.debug?.("Processing segment", {
|
|
1270
|
+
contentLength: segment.content.length,
|
|
1271
|
+
contentPreview: segment.content.slice(0, 100),
|
|
1272
|
+
from: segment.from,
|
|
1273
|
+
fromIdx,
|
|
1274
|
+
to: segment.to,
|
|
1275
|
+
toIdx
|
|
1276
|
+
});
|
|
1277
|
+
const segmentSpan = (segment.to ?? segment.from) - segment.from;
|
|
1278
|
+
const hasExclusions = expandedBreakpoints.some((bp) => hasExcludedPageInRange(bp.excludeSet, pageIds, fromIdx, toIdx));
|
|
1279
|
+
if (segmentSpan <= maxPages && !hasExclusions) {
|
|
1280
|
+
logger?.trace?.("Segment within limit, keeping as-is");
|
|
1281
|
+
result.push(segment);
|
|
1282
|
+
continue;
|
|
1283
|
+
}
|
|
1284
|
+
logger?.debug?.("Segment exceeds limit or has exclusions, breaking it up");
|
|
1285
|
+
let remainingContent = segment.content;
|
|
1286
|
+
let currentFromIdx = fromIdx;
|
|
1287
|
+
let isFirstPiece = true;
|
|
1288
|
+
let iterationCount = 0;
|
|
1289
|
+
const maxIterations = 1e4;
|
|
1290
|
+
while (currentFromIdx <= toIdx) {
|
|
1291
|
+
iterationCount++;
|
|
1292
|
+
if (iterationCount > maxIterations) {
|
|
1293
|
+
logger?.error?.("INFINITE LOOP DETECTED! Breaking out", { iterationCount: maxIterations });
|
|
1294
|
+
logger?.error?.("Loop state", {
|
|
1295
|
+
currentFromIdx,
|
|
1296
|
+
remainingContentLength: remainingContent.length,
|
|
1297
|
+
toIdx
|
|
1298
|
+
});
|
|
1299
|
+
break;
|
|
1300
|
+
}
|
|
1301
|
+
const remainingSpan = pageIds[toIdx] - pageIds[currentFromIdx];
|
|
1302
|
+
logger?.trace?.("Loop iteration", {
|
|
1303
|
+
currentFromIdx,
|
|
1304
|
+
currentPageId: pageIds[currentFromIdx],
|
|
1305
|
+
iterationCount,
|
|
1306
|
+
remainingContentLength: remainingContent.length,
|
|
1307
|
+
remainingContentPreview: remainingContent.slice(0, 80),
|
|
1308
|
+
remainingSpan,
|
|
1309
|
+
toIdx,
|
|
1310
|
+
toPageId: pageIds[toIdx]
|
|
1311
|
+
});
|
|
1312
|
+
const remainingHasExclusions = expandedBreakpoints.some((bp) => hasExcludedPageInRange(bp.excludeSet, pageIds, currentFromIdx, toIdx));
|
|
1313
|
+
if (remainingSpan <= maxPages && !remainingHasExclusions) {
|
|
1314
|
+
logger?.debug?.("Remaining span within limit, outputting final segment");
|
|
1315
|
+
const finalSeg = createSegment(remainingContent, pageIds[currentFromIdx], currentFromIdx !== toIdx ? pageIds[toIdx] : void 0, isFirstPiece ? segment.meta : void 0);
|
|
1316
|
+
if (finalSeg) result.push(finalSeg);
|
|
1317
|
+
break;
|
|
1318
|
+
}
|
|
1319
|
+
const currentPageId = pageIds[currentFromIdx];
|
|
1320
|
+
const maxWindowPageId = currentPageId + maxPages;
|
|
1321
|
+
let windowEndIdx = currentFromIdx;
|
|
1322
|
+
for (let i = currentFromIdx; i <= toIdx; i++) if (pageIds[i] <= maxWindowPageId) windowEndIdx = i;
|
|
1323
|
+
else break;
|
|
1324
|
+
logger?.trace?.("Window calculation", {
|
|
1325
|
+
currentPageId,
|
|
1326
|
+
maxWindowPageId,
|
|
1327
|
+
windowEndIdx,
|
|
1328
|
+
windowEndPageId: pageIds[windowEndIdx]
|
|
1329
|
+
});
|
|
1330
|
+
const windowHasExclusions = expandedBreakpoints.some((bp) => hasExcludedPageInRange(bp.excludeSet, pageIds, currentFromIdx, windowEndIdx));
|
|
1331
|
+
let breakPosition = -1;
|
|
1332
|
+
if (windowHasExclusions) {
|
|
1333
|
+
logger?.trace?.("Window has exclusions, finding exclusion break position");
|
|
1334
|
+
breakPosition = findExclusionBreakPosition(currentFromIdx, windowEndIdx, toIdx, pageIds, expandedBreakpoints, cumulativeOffsets);
|
|
1335
|
+
logger?.trace?.("Exclusion break position", { breakPosition });
|
|
1336
|
+
}
|
|
1337
|
+
if (breakPosition <= 0) {
|
|
1338
|
+
const breakpointCtx = {
|
|
1339
|
+
cumulativeOffsets,
|
|
1340
|
+
expandedBreakpoints,
|
|
1341
|
+
normalizedPages,
|
|
1342
|
+
pageIds,
|
|
1343
|
+
prefer
|
|
1344
|
+
};
|
|
1345
|
+
logger?.trace?.("Finding break position using patterns...");
|
|
1346
|
+
breakPosition = findBreakPosition(remainingContent, currentFromIdx, toIdx, windowEndIdx, breakpointCtx);
|
|
1347
|
+
logger?.trace?.("Pattern break position", { breakPosition });
|
|
1348
|
+
}
|
|
1349
|
+
if (breakPosition <= 0) {
|
|
1350
|
+
logger?.debug?.("No pattern matched, falling back to page boundary");
|
|
1351
|
+
if (windowEndIdx === currentFromIdx) {
|
|
1352
|
+
logger?.trace?.("Single page window, outputting page and advancing");
|
|
1353
|
+
const pageContent = cumulativeOffsets[currentFromIdx + 1] !== void 0 ? remainingContent.slice(0, cumulativeOffsets[currentFromIdx + 1] - cumulativeOffsets[currentFromIdx]) : remainingContent;
|
|
1354
|
+
const pageSeg = createSegment(pageContent.trim(), pageIds[currentFromIdx], void 0, isFirstPiece ? segment.meta : void 0);
|
|
1355
|
+
if (pageSeg) result.push(pageSeg);
|
|
1356
|
+
remainingContent = remainingContent.slice(pageContent.length).trim();
|
|
1357
|
+
currentFromIdx++;
|
|
1358
|
+
isFirstPiece = false;
|
|
1359
|
+
logger?.trace?.("After single page", {
|
|
1360
|
+
currentFromIdx,
|
|
1361
|
+
remainingContentLength: remainingContent.length
|
|
1362
|
+
});
|
|
1363
|
+
continue;
|
|
1364
|
+
}
|
|
1365
|
+
breakPosition = cumulativeOffsets[windowEndIdx + 1] - cumulativeOffsets[currentFromIdx];
|
|
1366
|
+
logger?.trace?.("Multi-page window, using full window break position", { breakPosition });
|
|
1367
|
+
}
|
|
1368
|
+
const pieceContent = remainingContent.slice(0, breakPosition).trim();
|
|
1369
|
+
logger?.trace?.("Piece extracted", {
|
|
1370
|
+
breakPosition,
|
|
1371
|
+
pieceContentLength: pieceContent.length,
|
|
1372
|
+
pieceContentPreview: pieceContent.slice(0, 80)
|
|
1373
|
+
});
|
|
1374
|
+
const actualStartIdx = pieceContent ? findActualStartPage(pieceContent, currentFromIdx, toIdx, pageIds, normalizedPages) : currentFromIdx;
|
|
1375
|
+
const actualEndIdx = pieceContent ? findActualEndPage(pieceContent, actualStartIdx, windowEndIdx, pageIds, normalizedPages) : currentFromIdx;
|
|
1376
|
+
logger?.trace?.("Actual page indices", {
|
|
1377
|
+
actualEndIdx,
|
|
1378
|
+
actualStartIdx,
|
|
1379
|
+
pieceHasContent: !!pieceContent
|
|
1380
|
+
});
|
|
1381
|
+
if (pieceContent) {
|
|
1382
|
+
const pieceSeg = createSegment(pieceContent, pageIds[actualStartIdx], actualEndIdx > actualStartIdx ? pageIds[actualEndIdx] : void 0, isFirstPiece ? segment.meta : void 0);
|
|
1383
|
+
if (pieceSeg) {
|
|
1384
|
+
result.push(pieceSeg);
|
|
1385
|
+
logger?.debug?.("Created segment", {
|
|
1386
|
+
contentLength: pieceSeg.content.length,
|
|
1387
|
+
from: pieceSeg.from,
|
|
1388
|
+
to: pieceSeg.to
|
|
1389
|
+
});
|
|
1390
|
+
}
|
|
1391
|
+
}
|
|
1392
|
+
const prevRemainingLength = remainingContent.length;
|
|
1393
|
+
remainingContent = remainingContent.slice(breakPosition).trim();
|
|
1394
|
+
logger?.trace?.("After slicing remainingContent", {
|
|
1395
|
+
newLength: remainingContent.length,
|
|
1396
|
+
prevLength: prevRemainingLength,
|
|
1397
|
+
slicedAmount: breakPosition
|
|
1398
|
+
});
|
|
1399
|
+
if (!remainingContent) {
|
|
1400
|
+
logger?.debug?.("No remaining content, breaking out of loop");
|
|
1401
|
+
break;
|
|
1402
|
+
}
|
|
1403
|
+
let nextFromIdx = actualEndIdx;
|
|
1404
|
+
if (actualEndIdx + 1 <= toIdx) {
|
|
1405
|
+
const nextPageData = normalizedPages.get(pageIds[actualEndIdx + 1]);
|
|
1406
|
+
if (nextPageData) {
|
|
1407
|
+
const nextPrefix = nextPageData.content.slice(0, Math.min(30, nextPageData.length));
|
|
1408
|
+
if (nextPrefix && remainingContent.startsWith(nextPrefix)) {
|
|
1409
|
+
nextFromIdx = actualEndIdx + 1;
|
|
1410
|
+
logger?.trace?.("Content starts with next page prefix", { advancingTo: nextFromIdx });
|
|
1411
|
+
}
|
|
1412
|
+
}
|
|
1413
|
+
}
|
|
1414
|
+
logger?.trace?.("End of iteration", {
|
|
1415
|
+
nextFromIdx,
|
|
1416
|
+
prevCurrentFromIdx: currentFromIdx,
|
|
1417
|
+
willAdvance: nextFromIdx !== currentFromIdx
|
|
1418
|
+
});
|
|
1419
|
+
currentFromIdx = nextFromIdx;
|
|
1420
|
+
isFirstPiece = false;
|
|
1421
|
+
}
|
|
1422
|
+
}
|
|
1423
|
+
logger?.info?.("Breakpoint processing completed", { resultCount: result.length });
|
|
1424
|
+
return result;
|
|
1425
|
+
};
|
|
1426
|
+
/**
|
|
1427
|
+
* Segments pages of content based on pattern-matching rules.
|
|
383
1428
|
*
|
|
384
|
-
*
|
|
385
|
-
*
|
|
386
|
-
*
|
|
1429
|
+
* This is the main entry point for the segmentation engine. It takes an array
|
|
1430
|
+
* of pages and applies the provided rules to identify split points, producing
|
|
1431
|
+
* an array of segments with content, page references, and metadata.
|
|
387
1432
|
*
|
|
388
|
-
* @param
|
|
389
|
-
* @
|
|
1433
|
+
* @param pages - Array of pages with id and content
|
|
1434
|
+
* @param options - Segmentation options including splitting rules
|
|
1435
|
+
* @returns Array of segments with content, from/to page references, and optional metadata
|
|
390
1436
|
*
|
|
391
1437
|
* @example
|
|
392
|
-
* //
|
|
393
|
-
* const
|
|
394
|
-
*
|
|
395
|
-
*
|
|
396
|
-
*
|
|
1438
|
+
* // Split markdown by headers
|
|
1439
|
+
* const segments = segmentPages(pages, {
|
|
1440
|
+
* rules: [
|
|
1441
|
+
* { lineStartsWith: ['## '], split: 'at', meta: { type: 'chapter' } }
|
|
1442
|
+
* ]
|
|
397
1443
|
* });
|
|
398
1444
|
*
|
|
399
1445
|
* @example
|
|
400
|
-
* //
|
|
401
|
-
* const
|
|
402
|
-
*
|
|
403
|
-
*
|
|
1446
|
+
* // Split Arabic hadith text with number extraction
|
|
1447
|
+
* const segments = segmentPages(pages, {
|
|
1448
|
+
* rules: [
|
|
1449
|
+
* {
|
|
1450
|
+
* lineStartsAfter: ['{{raqms:hadithNum}} {{dash}} '],
|
|
1451
|
+
* split: 'at',
|
|
1452
|
+
* fuzzy: true,
|
|
1453
|
+
* meta: { type: 'hadith' }
|
|
1454
|
+
* }
|
|
1455
|
+
* ]
|
|
404
1456
|
* });
|
|
405
|
-
* const match = regex.exec('٥ - نص');
|
|
406
1457
|
*
|
|
407
1458
|
* @example
|
|
408
|
-
* //
|
|
409
|
-
* const
|
|
410
|
-
*
|
|
411
|
-
*
|
|
1459
|
+
* // Multiple rules with page constraints
|
|
1460
|
+
* const segments = segmentPages(pages, {
|
|
1461
|
+
* rules: [
|
|
1462
|
+
* { lineStartsWith: ['{{kitab}}'], split: 'at', meta: { type: 'book' } },
|
|
1463
|
+
* { lineStartsWith: ['{{bab}}'], split: 'at', min: 10, meta: { type: 'chapter' } },
|
|
1464
|
+
* { regex: '^[٠-٩]+ - ', split: 'at', meta: { type: 'hadith' } }
|
|
1465
|
+
* ]
|
|
412
1466
|
* });
|
|
413
|
-
* const match = regex.exec('5 text');
|
|
414
1467
|
*/
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
1468
|
+
const segmentPages = (pages, options) => {
|
|
1469
|
+
const { rules = [], maxPages, breakpoints, prefer = "longer", logger } = options;
|
|
1470
|
+
if (!pages.length) return [];
|
|
1471
|
+
const { content: matchContent, normalizedPages: normalizedContent, pageMap } = buildPageMap(pages);
|
|
1472
|
+
const splitPoints = [];
|
|
1473
|
+
for (const rule of rules) {
|
|
1474
|
+
const { regex, usesCapture, captureNames, usesLineStartsAfter } = buildRuleRegex(rule);
|
|
1475
|
+
const finalMatches = filterByOccurrence(filterByConstraints(findMatches(matchContent, regex, usesCapture, captureNames), rule, pageMap.getId), rule.occurrence);
|
|
1476
|
+
for (const m of finalMatches) {
|
|
1477
|
+
const isLineStartsAfter = usesLineStartsAfter && m.captured !== void 0;
|
|
1478
|
+
const markerLength = isLineStartsAfter ? m.end - m.captured.length - m.start : 0;
|
|
1479
|
+
splitPoints.push({
|
|
1480
|
+
capturedContent: isLineStartsAfter ? void 0 : m.captured,
|
|
1481
|
+
contentStartOffset: isLineStartsAfter ? markerLength : void 0,
|
|
1482
|
+
index: rule.split === "at" ? m.start : m.end,
|
|
1483
|
+
meta: rule.meta,
|
|
1484
|
+
namedCaptures: m.namedCaptures
|
|
1485
|
+
});
|
|
1486
|
+
}
|
|
1487
|
+
}
|
|
1488
|
+
const byIndex = /* @__PURE__ */ new Map();
|
|
1489
|
+
for (const p of splitPoints) {
|
|
1490
|
+
const existing = byIndex.get(p.index);
|
|
1491
|
+
if (!existing) byIndex.set(p.index, p);
|
|
1492
|
+
else if (p.contentStartOffset !== void 0 && existing.contentStartOffset === void 0 || p.meta !== void 0 && existing.meta === void 0) byIndex.set(p.index, p);
|
|
420
1493
|
}
|
|
421
|
-
const
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
1494
|
+
const unique = [...byIndex.values()];
|
|
1495
|
+
unique.sort((a, b) => a.index - b.index);
|
|
1496
|
+
let segments = buildSegments(unique, matchContent, pageMap, rules);
|
|
1497
|
+
if (segments.length === 0 && pages.length > 0) {
|
|
1498
|
+
const firstPage = pages[0];
|
|
1499
|
+
const lastPage = pages[pages.length - 1];
|
|
1500
|
+
const initialSeg = {
|
|
1501
|
+
content: pages.map((p) => normalizeLineEndings(p.content)).join("\n").trim(),
|
|
1502
|
+
from: firstPage.id
|
|
1503
|
+
};
|
|
1504
|
+
if (lastPage.id !== firstPage.id) initialSeg.to = lastPage.id;
|
|
1505
|
+
if (initialSeg.content) segments = [initialSeg];
|
|
1506
|
+
}
|
|
1507
|
+
if (maxPages !== void 0 && maxPages >= 0 && breakpoints?.length) return applyBreakpoints(segments, pages, normalizedContent, maxPages, breakpoints, prefer, logger);
|
|
1508
|
+
return segments;
|
|
1509
|
+
};
|
|
428
1510
|
/**
|
|
429
|
-
*
|
|
1511
|
+
* Creates segment objects from split points.
|
|
430
1512
|
*
|
|
431
|
-
*
|
|
432
|
-
* -
|
|
433
|
-
* -
|
|
434
|
-
* -
|
|
435
|
-
* -
|
|
1513
|
+
* Handles segment creation including:
|
|
1514
|
+
* - Content extraction (with captured content for `lineStartsAfter`)
|
|
1515
|
+
* - Page break conversion to spaces
|
|
1516
|
+
* - From/to page reference calculation
|
|
1517
|
+
* - Metadata merging (static + named captures)
|
|
436
1518
|
*
|
|
437
|
-
* @
|
|
1519
|
+
* @param splitPoints - Sorted, unique split points
|
|
1520
|
+
* @param content - Full concatenated content string
|
|
1521
|
+
* @param pageMap - Page mapping utilities
|
|
1522
|
+
* @param rules - Original rules (for constraint checking on first segment)
|
|
1523
|
+
* @returns Array of segment objects
|
|
1524
|
+
*/
|
|
1525
|
+
const buildSegments = (splitPoints, content, pageMap, rules) => {
|
|
1526
|
+
/**
|
|
1527
|
+
* Creates a single segment from a content range.
|
|
1528
|
+
*/
|
|
1529
|
+
const createSegment$1 = (start, end, meta, capturedContent, namedCaptures, contentStartOffset) => {
|
|
1530
|
+
const actualStart = start + (contentStartOffset ?? 0);
|
|
1531
|
+
const sliced = content.slice(actualStart, end);
|
|
1532
|
+
let text = capturedContent?.trim() ?? (contentStartOffset ? sliced.trim() : sliced.replace(/[\s\n]+$/, ""));
|
|
1533
|
+
if (!text) return null;
|
|
1534
|
+
if (!capturedContent) text = convertPageBreaks(text, actualStart, pageMap.pageBreaks);
|
|
1535
|
+
const from = pageMap.getId(actualStart);
|
|
1536
|
+
const to = capturedContent ? pageMap.getId(end - 1) : pageMap.getId(actualStart + text.length - 1);
|
|
1537
|
+
const seg = {
|
|
1538
|
+
content: text,
|
|
1539
|
+
from
|
|
1540
|
+
};
|
|
1541
|
+
if (to !== from) seg.to = to;
|
|
1542
|
+
if (meta || namedCaptures) seg.meta = {
|
|
1543
|
+
...meta,
|
|
1544
|
+
...namedCaptures
|
|
1545
|
+
};
|
|
1546
|
+
return seg;
|
|
1547
|
+
};
|
|
1548
|
+
/**
|
|
1549
|
+
* Creates segments from an array of split points.
|
|
1550
|
+
*/
|
|
1551
|
+
const createSegmentsFromSplitPoints = () => {
|
|
1552
|
+
const result = [];
|
|
1553
|
+
for (let i = 0; i < splitPoints.length; i++) {
|
|
1554
|
+
const sp = splitPoints[i];
|
|
1555
|
+
const end = i < splitPoints.length - 1 ? splitPoints[i + 1].index : content.length;
|
|
1556
|
+
const s = createSegment$1(sp.index, end, sp.meta, sp.capturedContent, sp.namedCaptures, sp.contentStartOffset);
|
|
1557
|
+
if (s) result.push(s);
|
|
1558
|
+
}
|
|
1559
|
+
return result;
|
|
1560
|
+
};
|
|
1561
|
+
const segments = [];
|
|
1562
|
+
if (!splitPoints.length) {
|
|
1563
|
+
if (anyRuleAllowsId(rules, pageMap.getId(0))) {
|
|
1564
|
+
const s = createSegment$1(0, content.length);
|
|
1565
|
+
if (s) segments.push(s);
|
|
1566
|
+
}
|
|
1567
|
+
return segments;
|
|
1568
|
+
}
|
|
1569
|
+
if (splitPoints[0].index > 0) {
|
|
1570
|
+
if (anyRuleAllowsId(rules, pageMap.getId(0))) {
|
|
1571
|
+
const s = createSegment$1(0, splitPoints[0].index);
|
|
1572
|
+
if (s) segments.push(s);
|
|
1573
|
+
}
|
|
1574
|
+
}
|
|
1575
|
+
return [...segments, ...createSegmentsFromSplitPoints()];
|
|
1576
|
+
};
|
|
1577
|
+
|
|
1578
|
+
//#endregion
|
|
1579
|
+
//#region src/pattern-detection.ts
|
|
1580
|
+
/**
|
|
1581
|
+
* Pattern detection utilities for recognizing template tokens in Arabic text.
|
|
1582
|
+
* Used to auto-detect patterns from user-highlighted text in the segmentation dialog.
|
|
438
1583
|
*
|
|
439
|
-
* @
|
|
440
|
-
* const regex = generateBulletRegex();
|
|
441
|
-
* const match = regex.exec('• نقطة');
|
|
442
|
-
* // match.groups.content -> 'نقطة'
|
|
1584
|
+
* @module pattern-detection
|
|
443
1585
|
*/
|
|
444
|
-
function generateBulletRegex() {
|
|
445
|
-
return new RegExp("^(?<full>(?<marker>[•*°\\-]\\s?)(?<content>[\\s\\S]*))", "u");
|
|
446
|
-
}
|
|
447
1586
|
/**
|
|
448
|
-
*
|
|
1587
|
+
* Token detection order - more specific patterns first to avoid partial matches.
|
|
1588
|
+
* Example: 'raqms' before 'raqm' so "٣٤" matches 'raqms' not just the first digit.
|
|
449
1589
|
*
|
|
450
|
-
*
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
1590
|
+
* Tokens not in this list are appended in alphabetical order from TOKEN_PATTERNS.
|
|
1591
|
+
*/
|
|
1592
|
+
const TOKEN_PRIORITY_ORDER = [
|
|
1593
|
+
"basmalah",
|
|
1594
|
+
"kitab",
|
|
1595
|
+
"bab",
|
|
1596
|
+
"fasl",
|
|
1597
|
+
"naql",
|
|
1598
|
+
"numbered",
|
|
1599
|
+
"raqms",
|
|
1600
|
+
"raqm",
|
|
1601
|
+
"tarqim",
|
|
1602
|
+
"bullet",
|
|
1603
|
+
"dash",
|
|
1604
|
+
"harf"
|
|
1605
|
+
];
|
|
1606
|
+
/**
|
|
1607
|
+
* Gets the token detection priority order.
|
|
1608
|
+
* Returns tokens in priority order, with any TOKEN_PATTERNS not in the priority list appended.
|
|
1609
|
+
*/
|
|
1610
|
+
const getTokenPriority = () => {
|
|
1611
|
+
const allTokens = getAvailableTokens();
|
|
1612
|
+
const prioritized = TOKEN_PRIORITY_ORDER.filter((t) => allTokens.includes(t));
|
|
1613
|
+
const remaining = allTokens.filter((t) => !TOKEN_PRIORITY_ORDER.includes(t)).sort();
|
|
1614
|
+
return [...prioritized, ...remaining];
|
|
1615
|
+
};
|
|
1616
|
+
/**
|
|
1617
|
+
* Analyzes text and returns all detected token patterns with their positions.
|
|
1618
|
+
* Patterns are detected in priority order to avoid partial matches.
|
|
455
1619
|
*
|
|
456
|
-
* @
|
|
1620
|
+
* @param text - The text to analyze for token patterns
|
|
1621
|
+
* @returns Array of detected patterns sorted by position
|
|
457
1622
|
*
|
|
458
1623
|
* @example
|
|
459
|
-
*
|
|
460
|
-
*
|
|
461
|
-
* // match
|
|
462
|
-
* // match
|
|
1624
|
+
* detectTokenPatterns("٣٤ - حدثنا")
|
|
1625
|
+
* // Returns: [
|
|
1626
|
+
* // { token: 'raqms', match: '٣٤', index: 0, endIndex: 2 },
|
|
1627
|
+
* // { token: 'dash', match: '-', index: 3, endIndex: 4 },
|
|
1628
|
+
* // { token: 'naql', match: 'حدثنا', index: 5, endIndex: 10 }
|
|
1629
|
+
* // ]
|
|
463
1630
|
*/
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
/**
|
|
471
|
-
* Generates a regex pattern from a marker configuration.
|
|
472
|
-
* Always returns a regex with three named capture groups:
|
|
473
|
-
* - full: Complete match including marker
|
|
474
|
-
* - marker: Just the marker part (for metadata/indexing)
|
|
475
|
-
* - content: Clean content without marker (for LLM processing)
|
|
476
|
-
*
|
|
477
|
-
* This function applies all default values before delegating to type-specific generators.
|
|
478
|
-
*
|
|
479
|
-
* @param config - Marker configuration
|
|
480
|
-
* @returns Regular expression with named groups
|
|
481
|
-
*
|
|
482
|
-
* @example
|
|
483
|
-
* const regex = generateRegexFromMarker({ type: 'numbered' });
|
|
484
|
-
* const match = regex.exec('٥ - نص');
|
|
485
|
-
* match.groups.full // "٥ - نص"
|
|
486
|
-
* match.groups.marker // "٥ -"
|
|
487
|
-
* match.groups.content // "نص"
|
|
488
|
-
*/
|
|
489
|
-
function generateRegexFromMarker(config) {
|
|
490
|
-
const normalized = {
|
|
491
|
-
numbering: config.numbering ?? DEFAULT_NUMBERING,
|
|
492
|
-
separator: config.separator ?? DEFAULT_SEPARATOR,
|
|
493
|
-
...config
|
|
1631
|
+
const detectTokenPatterns = (text) => {
|
|
1632
|
+
if (!text) return [];
|
|
1633
|
+
const results = [];
|
|
1634
|
+
const coveredRanges = [];
|
|
1635
|
+
const isPositionCovered = (start, end) => {
|
|
1636
|
+
return coveredRanges.some(([s, e]) => start >= s && start < e || end > s && end <= e || start <= s && end >= e);
|
|
494
1637
|
};
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
1638
|
+
for (const tokenName of getTokenPriority()) {
|
|
1639
|
+
const pattern = TOKEN_PATTERNS[tokenName];
|
|
1640
|
+
if (!pattern) continue;
|
|
1641
|
+
try {
|
|
1642
|
+
const regex = new RegExp(`(${pattern})`, "gu");
|
|
1643
|
+
let match;
|
|
1644
|
+
while ((match = regex.exec(text)) !== null) {
|
|
1645
|
+
const startIndex = match.index;
|
|
1646
|
+
const endIndex = startIndex + match[0].length;
|
|
1647
|
+
if (isPositionCovered(startIndex, endIndex)) continue;
|
|
1648
|
+
results.push({
|
|
1649
|
+
endIndex,
|
|
1650
|
+
index: startIndex,
|
|
1651
|
+
match: match[0],
|
|
1652
|
+
token: tokenName
|
|
1653
|
+
});
|
|
1654
|
+
coveredRanges.push([startIndex, endIndex]);
|
|
1655
|
+
}
|
|
1656
|
+
} catch {}
|
|
512
1657
|
}
|
|
513
|
-
|
|
1658
|
+
return results.sort((a, b) => a.index - b.index);
|
|
1659
|
+
};
|
|
1660
|
+
/**
|
|
1661
|
+
* Generates a template pattern from text using detected tokens.
|
|
1662
|
+
* Replaces matched portions with {{token}} syntax.
|
|
1663
|
+
*
|
|
1664
|
+
* @param text - Original text
|
|
1665
|
+
* @param detected - Array of detected patterns from detectTokenPatterns
|
|
1666
|
+
* @returns Template string with tokens, e.g., "{{raqms}} {{dash}} "
|
|
1667
|
+
*
|
|
1668
|
+
* @example
|
|
1669
|
+
* const detected = detectTokenPatterns("٣٤ - ");
|
|
1670
|
+
* generateTemplateFromText("٣٤ - ", detected);
|
|
1671
|
+
* // Returns: "{{raqms}} {{dash}} "
|
|
1672
|
+
*/
|
|
1673
|
+
const generateTemplateFromText = (text, detected) => {
|
|
1674
|
+
if (!text || detected.length === 0) return text;
|
|
1675
|
+
let template = text;
|
|
1676
|
+
const sortedByIndexDesc = [...detected].sort((a, b) => b.index - a.index);
|
|
1677
|
+
for (const d of sortedByIndexDesc) template = `${template.slice(0, d.index)}{{${d.token}}}${template.slice(d.endIndex)}`;
|
|
1678
|
+
return template;
|
|
1679
|
+
};
|
|
1680
|
+
/**
|
|
1681
|
+
* Determines the best pattern type for auto-generated rules based on detected patterns.
|
|
1682
|
+
*
|
|
1683
|
+
* @param detected - Array of detected patterns
|
|
1684
|
+
* @returns Suggested pattern type and whether to use fuzzy matching
|
|
1685
|
+
*/
|
|
1686
|
+
const suggestPatternConfig = (detected) => {
|
|
1687
|
+
const hasStructuralToken = detected.some((d) => [
|
|
1688
|
+
"basmalah",
|
|
1689
|
+
"kitab",
|
|
1690
|
+
"bab",
|
|
1691
|
+
"fasl"
|
|
1692
|
+
].includes(d.token));
|
|
1693
|
+
const hasNumberedPattern = detected.some((d) => [
|
|
1694
|
+
"raqms",
|
|
1695
|
+
"raqm",
|
|
1696
|
+
"numbered"
|
|
1697
|
+
].includes(d.token));
|
|
1698
|
+
if (hasStructuralToken) return {
|
|
1699
|
+
fuzzy: true,
|
|
1700
|
+
metaType: detected.find((d) => [
|
|
1701
|
+
"kitab",
|
|
1702
|
+
"bab",
|
|
1703
|
+
"fasl"
|
|
1704
|
+
].includes(d.token))?.token || "chapter",
|
|
1705
|
+
patternType: "lineStartsWith"
|
|
1706
|
+
};
|
|
1707
|
+
if (hasNumberedPattern) return {
|
|
1708
|
+
fuzzy: false,
|
|
1709
|
+
metaType: "hadith",
|
|
1710
|
+
patternType: "lineStartsAfter"
|
|
1711
|
+
};
|
|
1712
|
+
return {
|
|
1713
|
+
fuzzy: false,
|
|
1714
|
+
patternType: "lineStartsAfter"
|
|
1715
|
+
};
|
|
1716
|
+
};
|
|
1717
|
+
/**
|
|
1718
|
+
* Analyzes text and generates a complete suggested rule configuration.
|
|
1719
|
+
*
|
|
1720
|
+
* @param text - Highlighted text from the page
|
|
1721
|
+
* @returns Suggested rule configuration or null if no patterns detected
|
|
1722
|
+
*/
|
|
1723
|
+
const analyzeTextForRule = (text) => {
|
|
1724
|
+
const detected = detectTokenPatterns(text);
|
|
1725
|
+
if (detected.length === 0) return null;
|
|
1726
|
+
return {
|
|
1727
|
+
detected,
|
|
1728
|
+
template: generateTemplateFromText(text, detected),
|
|
1729
|
+
...suggestPatternConfig(detected)
|
|
1730
|
+
};
|
|
1731
|
+
};
|
|
514
1732
|
|
|
515
1733
|
//#endregion
|
|
516
|
-
export {
|
|
1734
|
+
export { TOKEN_PATTERNS, analyzeTextForRule, containsTokens, detectTokenPatterns, escapeRegex, expandTokens, expandTokensWithCaptures, generateTemplateFromText, getAvailableTokens, getTokenPattern, makeDiacriticInsensitive, normalizeLineEndings, segmentPages, stripHtmlTags, suggestPatternConfig, templateToRegex };
|
|
517
1735
|
//# sourceMappingURL=index.mjs.map
|