eyecite-ts 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +338 -0
- package/dist/annotate/index.cjs +2 -0
- package/dist/annotate/index.cjs.map +1 -0
- package/dist/annotate/index.d.cts +163 -0
- package/dist/annotate/index.d.cts.map +1 -0
- package/dist/annotate/index.d.mts +163 -0
- package/dist/annotate/index.d.mts.map +1 -0
- package/dist/annotate/index.mjs +2 -0
- package/dist/annotate/index.mjs.map +1 -0
- package/dist/citation-8_GvfEuj.d.mts +286 -0
- package/dist/citation-8_GvfEuj.d.mts.map +1 -0
- package/dist/citation-BcY5zzWb.d.cts +286 -0
- package/dist/citation-BcY5zzWb.d.cts.map +1 -0
- package/dist/data/index.cjs +2 -0
- package/dist/data/index.cjs.map +1 -0
- package/dist/data/index.d.cts +116 -0
- package/dist/data/index.d.cts.map +1 -0
- package/dist/data/index.d.mts +116 -0
- package/dist/data/index.d.mts.map +1 -0
- package/dist/data/index.mjs +2 -0
- package/dist/data/index.mjs.map +1 -0
- package/dist/index.cjs +2 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +682 -0
- package/dist/index.d.cts.map +1 -0
- package/dist/index.d.mts +682 -0
- package/dist/index.d.mts.map +1 -0
- package/dist/index.mjs +2 -0
- package/dist/index.mjs.map +1 -0
- package/dist/reporters-BclWimmk.cjs +2 -0
- package/dist/reporters-BclWimmk.cjs.map +1 -0
- package/dist/reporters-DYNnh4O0.mjs +2 -0
- package/dist/reporters-DYNnh4O0.mjs.map +1 -0
- package/package.json +69 -0
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
//#region src/data/reporters.d.ts
|
|
2
|
+
/**
|
|
3
|
+
* Reporter database integration for citation validation
|
|
4
|
+
*
|
|
5
|
+
* This module provides lazy-loadable access to the reporters-db database,
|
|
6
|
+
* containing 1200+ court reporters with variant forms. The library works
|
|
7
|
+
* in degraded mode (pattern-based extraction only) if reporters are not loaded.
|
|
8
|
+
*
|
|
9
|
+
* @example
|
|
10
|
+
* // Degraded mode: extraction works without reporter data
|
|
11
|
+
* const citations = await extract(text)
|
|
12
|
+
*
|
|
13
|
+
* @example
|
|
14
|
+
* // Full mode: load reporters for validation
|
|
15
|
+
* await loadReporters()
|
|
16
|
+
* const citations = await extract(text) // Now with reporter validation
|
|
17
|
+
*/
|
|
18
|
+
/**
|
|
19
|
+
* Edition entry from reporters-db
|
|
20
|
+
*
|
|
21
|
+
* Represents a specific edition of a reporter with start/end dates.
|
|
22
|
+
*/
|
|
23
|
+
interface ReporterEdition {
|
|
24
|
+
/** Start date in ISO 8601 format */
|
|
25
|
+
start: string | null;
|
|
26
|
+
/** End date in ISO 8601 format (null if ongoing) */
|
|
27
|
+
end: string | null;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Reporter entry from reporters-db
|
|
31
|
+
*
|
|
32
|
+
* Represents a single court reporter with all metadata needed for
|
|
33
|
+
* citation validation and enrichment.
|
|
34
|
+
*
|
|
35
|
+
* Note: The reporters-db structure has the actual data; this interface
|
|
36
|
+
* represents it flexibly to handle all variations in the JSON.
|
|
37
|
+
*/
|
|
38
|
+
interface ReporterEntry {
|
|
39
|
+
/** Full reporter name (e.g., "Federal Reporter") */
|
|
40
|
+
name: string;
|
|
41
|
+
/** Citation type: state, federal, specialty, neutral, state_regional, etc. */
|
|
42
|
+
cite_type: string;
|
|
43
|
+
/** Editions keyed by abbreviation (e.g., {"F.2d": {...}, "F.3d": {...}}) */
|
|
44
|
+
editions: Record<string, ReporterEdition>;
|
|
45
|
+
/** Variant forms mapped to canonical form (e.g., {"F. 2d": "F.2d"}) */
|
|
46
|
+
variations?: Record<string, string | undefined>;
|
|
47
|
+
/** MLZ jurisdiction identifiers (optional) */
|
|
48
|
+
mlz_jurisdiction?: string[];
|
|
49
|
+
/** Publisher (optional) */
|
|
50
|
+
publisher?: string;
|
|
51
|
+
/** Notes (optional) */
|
|
52
|
+
notes?: string;
|
|
53
|
+
}
|
|
54
|
+
/**
|
|
55
|
+
* In-memory reporter database with fast O(1) lookup
|
|
56
|
+
*
|
|
57
|
+
* Uses Map-based indexing for case-insensitive abbreviation lookup.
|
|
58
|
+
* All variant forms are indexed to support fuzzy matching.
|
|
59
|
+
*/
|
|
60
|
+
interface ReportersDatabase {
|
|
61
|
+
/** Fast O(1) lookup by abbreviation (lowercase normalized keys) */
|
|
62
|
+
byAbbreviation: Map<string, ReporterEntry[]>;
|
|
63
|
+
/** All reporters (for iteration/filtering) */
|
|
64
|
+
all: ReporterEntry[];
|
|
65
|
+
}
|
|
66
|
+
/**
|
|
67
|
+
* Load reporter database asynchronously with lazy loading
|
|
68
|
+
*
|
|
69
|
+
* Dynamic import prevents loading 1200+ reporters until explicitly requested.
|
|
70
|
+
* Result is cached after first load for subsequent calls.
|
|
71
|
+
*
|
|
72
|
+
* @returns Promise resolving to indexed reporter database
|
|
73
|
+
*
|
|
74
|
+
* @example
|
|
75
|
+
* const db = await loadReporters()
|
|
76
|
+
* const reporters = db.byAbbreviation.get('f.2d') // Fast O(1) lookup
|
|
77
|
+
*/
|
|
78
|
+
declare function loadReporters(): Promise<ReportersDatabase>;
|
|
79
|
+
/**
|
|
80
|
+
* Get cached reporter database synchronously (degraded mode support)
|
|
81
|
+
*
|
|
82
|
+
* Returns null if reporters not loaded yet. This enables the library to
|
|
83
|
+
* work in degraded mode without reporter validation.
|
|
84
|
+
*
|
|
85
|
+
* @returns Cached database or null if not loaded
|
|
86
|
+
*
|
|
87
|
+
* @example
|
|
88
|
+
* const db = getReportersSync()
|
|
89
|
+
* if (db) {
|
|
90
|
+
* // Full mode: validate citations
|
|
91
|
+
* } else {
|
|
92
|
+
* // Degraded mode: extract without validation
|
|
93
|
+
* }
|
|
94
|
+
*/
|
|
95
|
+
declare function getReportersSync(): ReportersDatabase | null;
|
|
96
|
+
/**
|
|
97
|
+
* Find reporters by abbreviation (case-insensitive)
|
|
98
|
+
*
|
|
99
|
+
* Loads reporter database if not already loaded. Returns all reporters
|
|
100
|
+
* matching the abbreviation (including variant forms).
|
|
101
|
+
*
|
|
102
|
+
* @param abbr - Reporter abbreviation to look up
|
|
103
|
+
* @returns Promise resolving to matching reporters (empty array if none)
|
|
104
|
+
*
|
|
105
|
+
* @example
|
|
106
|
+
* const reporters = await findReportersByAbbreviation('F.2d')
|
|
107
|
+
* // [{ abbreviation: 'F.2d', name: 'Federal Reporter, Second Series', ... }]
|
|
108
|
+
*
|
|
109
|
+
* @example
|
|
110
|
+
* const unknown = await findReportersByAbbreviation('NONEXISTENT')
|
|
111
|
+
* // [] (empty array, not error)
|
|
112
|
+
*/
|
|
113
|
+
declare function findReportersByAbbreviation(abbr: string): Promise<ReporterEntry[]>;
|
|
114
|
+
//#endregion
|
|
115
|
+
export { ReporterEdition, ReporterEntry, ReportersDatabase, findReportersByAbbreviation, getReportersSync, loadReporters };
|
|
116
|
+
//# sourceMappingURL=index.d.cts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.cts","names":[],"sources":["../../src/data/reporters.ts"],"mappings":";;AAsBA;;;;;AAgBA;;;;;;;;;;;;;;;UAhBiB,eAAA;;EAEf,KAAA;;EAEA,GAAA;AAAA;;;;;;;;;;UAYe,aAAA;;EAEf,IAAA;;EAEA,SAAA;EA2CF;EAzCE,QAAA,EAAU,MAAA,SAAe,eAAA;;EAEzB,UAAA,GAAa,MAAA;EAuCgC;EArC7C,gBAAA;EAwGc;EAtGd,SAAA;EAsGkC;EApGlC,KAAA;AAAA;;;;;;;UASe,iBAAA;EAkHN;EAhHT,cAAA,EAAgB,GAAA,SAAY,aAAA;;EAE5B,GAAA,EAAK,aAAA;AAAA;;;;;;;;;;;;;iBAoBe,aAAA,CAAA,GAAiB,OAAA,CAAQ,iBAAA;;;;;;;;;;;;;;;;;iBAmE/B,gBAAA,CAAA,GAAoB,iBAAA;;;;;;;;;;;;;;;;;;iBAqBd,2BAAA,CACpB,IAAA,WACC,OAAA,CAAQ,aAAA"}
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
//#region src/data/reporters.d.ts
|
|
2
|
+
/**
|
|
3
|
+
* Reporter database integration for citation validation
|
|
4
|
+
*
|
|
5
|
+
* This module provides lazy-loadable access to the reporters-db database,
|
|
6
|
+
* containing 1200+ court reporters with variant forms. The library works
|
|
7
|
+
* in degraded mode (pattern-based extraction only) if reporters are not loaded.
|
|
8
|
+
*
|
|
9
|
+
* @example
|
|
10
|
+
* // Degraded mode: extraction works without reporter data
|
|
11
|
+
* const citations = await extract(text)
|
|
12
|
+
*
|
|
13
|
+
* @example
|
|
14
|
+
* // Full mode: load reporters for validation
|
|
15
|
+
* await loadReporters()
|
|
16
|
+
* const citations = await extract(text) // Now with reporter validation
|
|
17
|
+
*/
|
|
18
|
+
/**
|
|
19
|
+
* Edition entry from reporters-db
|
|
20
|
+
*
|
|
21
|
+
* Represents a specific edition of a reporter with start/end dates.
|
|
22
|
+
*/
|
|
23
|
+
interface ReporterEdition {
|
|
24
|
+
/** Start date in ISO 8601 format */
|
|
25
|
+
start: string | null;
|
|
26
|
+
/** End date in ISO 8601 format (null if ongoing) */
|
|
27
|
+
end: string | null;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Reporter entry from reporters-db
|
|
31
|
+
*
|
|
32
|
+
* Represents a single court reporter with all metadata needed for
|
|
33
|
+
* citation validation and enrichment.
|
|
34
|
+
*
|
|
35
|
+
* Note: The reporters-db structure has the actual data; this interface
|
|
36
|
+
* represents it flexibly to handle all variations in the JSON.
|
|
37
|
+
*/
|
|
38
|
+
interface ReporterEntry {
|
|
39
|
+
/** Full reporter name (e.g., "Federal Reporter") */
|
|
40
|
+
name: string;
|
|
41
|
+
/** Citation type: state, federal, specialty, neutral, state_regional, etc. */
|
|
42
|
+
cite_type: string;
|
|
43
|
+
/** Editions keyed by abbreviation (e.g., {"F.2d": {...}, "F.3d": {...}}) */
|
|
44
|
+
editions: Record<string, ReporterEdition>;
|
|
45
|
+
/** Variant forms mapped to canonical form (e.g., {"F. 2d": "F.2d"}) */
|
|
46
|
+
variations?: Record<string, string | undefined>;
|
|
47
|
+
/** MLZ jurisdiction identifiers (optional) */
|
|
48
|
+
mlz_jurisdiction?: string[];
|
|
49
|
+
/** Publisher (optional) */
|
|
50
|
+
publisher?: string;
|
|
51
|
+
/** Notes (optional) */
|
|
52
|
+
notes?: string;
|
|
53
|
+
}
|
|
54
|
+
/**
|
|
55
|
+
* In-memory reporter database with fast O(1) lookup
|
|
56
|
+
*
|
|
57
|
+
* Uses Map-based indexing for case-insensitive abbreviation lookup.
|
|
58
|
+
* All variant forms are indexed to support fuzzy matching.
|
|
59
|
+
*/
|
|
60
|
+
interface ReportersDatabase {
|
|
61
|
+
/** Fast O(1) lookup by abbreviation (lowercase normalized keys) */
|
|
62
|
+
byAbbreviation: Map<string, ReporterEntry[]>;
|
|
63
|
+
/** All reporters (for iteration/filtering) */
|
|
64
|
+
all: ReporterEntry[];
|
|
65
|
+
}
|
|
66
|
+
/**
|
|
67
|
+
* Load reporter database asynchronously with lazy loading
|
|
68
|
+
*
|
|
69
|
+
* Dynamic import prevents loading 1200+ reporters until explicitly requested.
|
|
70
|
+
* Result is cached after first load for subsequent calls.
|
|
71
|
+
*
|
|
72
|
+
* @returns Promise resolving to indexed reporter database
|
|
73
|
+
*
|
|
74
|
+
* @example
|
|
75
|
+
* const db = await loadReporters()
|
|
76
|
+
* const reporters = db.byAbbreviation.get('f.2d') // Fast O(1) lookup
|
|
77
|
+
*/
|
|
78
|
+
declare function loadReporters(): Promise<ReportersDatabase>;
|
|
79
|
+
/**
|
|
80
|
+
* Get cached reporter database synchronously (degraded mode support)
|
|
81
|
+
*
|
|
82
|
+
* Returns null if reporters not loaded yet. This enables the library to
|
|
83
|
+
* work in degraded mode without reporter validation.
|
|
84
|
+
*
|
|
85
|
+
* @returns Cached database or null if not loaded
|
|
86
|
+
*
|
|
87
|
+
* @example
|
|
88
|
+
* const db = getReportersSync()
|
|
89
|
+
* if (db) {
|
|
90
|
+
* // Full mode: validate citations
|
|
91
|
+
* } else {
|
|
92
|
+
* // Degraded mode: extract without validation
|
|
93
|
+
* }
|
|
94
|
+
*/
|
|
95
|
+
declare function getReportersSync(): ReportersDatabase | null;
|
|
96
|
+
/**
|
|
97
|
+
* Find reporters by abbreviation (case-insensitive)
|
|
98
|
+
*
|
|
99
|
+
* Loads reporter database if not already loaded. Returns all reporters
|
|
100
|
+
* matching the abbreviation (including variant forms).
|
|
101
|
+
*
|
|
102
|
+
* @param abbr - Reporter abbreviation to look up
|
|
103
|
+
* @returns Promise resolving to matching reporters (empty array if none)
|
|
104
|
+
*
|
|
105
|
+
* @example
|
|
106
|
+
* const reporters = await findReportersByAbbreviation('F.2d')
|
|
107
|
+
* // [{ abbreviation: 'F.2d', name: 'Federal Reporter, Second Series', ... }]
|
|
108
|
+
*
|
|
109
|
+
* @example
|
|
110
|
+
* const unknown = await findReportersByAbbreviation('NONEXISTENT')
|
|
111
|
+
* // [] (empty array, not error)
|
|
112
|
+
*/
|
|
113
|
+
declare function findReportersByAbbreviation(abbr: string): Promise<ReporterEntry[]>;
|
|
114
|
+
//#endregion
|
|
115
|
+
export { ReporterEdition, ReporterEntry, ReportersDatabase, findReportersByAbbreviation, getReportersSync, loadReporters };
|
|
116
|
+
//# sourceMappingURL=index.d.mts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.mts","names":[],"sources":["../../src/data/reporters.ts"],"mappings":";;AAsBA;;;;;AAgBA;;;;;;;;;;;;;;;UAhBiB,eAAA;;EAEf,KAAA;;EAEA,GAAA;AAAA;;;;;;;;;;UAYe,aAAA;;EAEf,IAAA;;EAEA,SAAA;EA2CF;EAzCE,QAAA,EAAU,MAAA,SAAe,eAAA;;EAEzB,UAAA,GAAa,MAAA;EAuCgC;EArC7C,gBAAA;EAwGc;EAtGd,SAAA;EAsGkC;EApGlC,KAAA;AAAA;;;;;;;UASe,iBAAA;EAkHN;EAhHT,cAAA,EAAgB,GAAA,SAAY,aAAA;;EAE5B,GAAA,EAAK,aAAA;AAAA;;;;;;;;;;;;;iBAoBe,aAAA,CAAA,GAAiB,OAAA,CAAQ,iBAAA;;;;;;;;;;;;;;;;;iBAmE/B,gBAAA,CAAA,GAAoB,iBAAA;;;;;;;;;;;;;;;;;;iBAqBd,2BAAA,CACpB,IAAA,WACC,OAAA,CAAQ,aAAA"}
|
|
@@ -0,0 +1,2 @@
|
|
|
1
|
+
let e=null;async function t(){if(e)return e;let t=await import(`../../data/reporters.json`,{assert:{type:`json`}}),n=new Map,r=[],i=t.default||t;for(let[e,t]of Object.entries(i))for(let e of t){r.push(e);for(let t of Object.keys(e.editions)){let r=t.toLowerCase();n.has(r)||n.set(r,[]),n.get(r).push(e)}for(let[t,r]of Object.entries(e.variations||{})){let r=t.toLowerCase();n.has(r)||n.set(r,[]),n.get(r).push(e)}}return e={byAbbreviation:n,all:r},e}function n(){return e}async function r(e){return(await t()).byAbbreviation.get(e.toLowerCase())??[]}export{r as findReportersByAbbreviation,n as getReportersSync,t as loadReporters};
|
|
2
|
+
//# sourceMappingURL=index.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.mjs","names":[],"sources":["../../src/data/reporters.ts"],"sourcesContent":["/**\n * Reporter database integration for citation validation\n *\n * This module provides lazy-loadable access to the reporters-db database,\n * containing 1200+ court reporters with variant forms. The library works\n * in degraded mode (pattern-based extraction only) if reporters are not loaded.\n *\n * @example\n * // Degraded mode: extraction works without reporter data\n * const citations = await extract(text)\n *\n * @example\n * // Full mode: load reporters for validation\n * await loadReporters()\n * const citations = await extract(text) // Now with reporter validation\n */\n\n/**\n * Edition entry from reporters-db\n *\n * Represents a specific edition of a reporter with start/end dates.\n */\nexport interface ReporterEdition {\n /** Start date in ISO 8601 format */\n start: string | null\n /** End date in ISO 8601 format (null if ongoing) */\n end: string | null\n}\n\n/**\n * Reporter entry from reporters-db\n *\n * Represents a single court reporter with all metadata needed for\n * citation validation and enrichment.\n *\n * Note: The reporters-db structure has the actual data; this interface\n * represents it flexibly to handle all variations in the JSON.\n */\nexport interface ReporterEntry {\n /** Full reporter name (e.g., \"Federal Reporter\") */\n name: string\n /** Citation type: state, federal, specialty, neutral, state_regional, etc. */\n cite_type: string\n /** Editions keyed by abbreviation (e.g., {\"F.2d\": {...}, \"F.3d\": {...}}) */\n editions: Record<string, ReporterEdition>\n /** Variant forms mapped to canonical form (e.g., {\"F. 2d\": \"F.2d\"}) */\n variations?: Record<string, string | undefined>\n /** MLZ jurisdiction identifiers (optional) */\n mlz_jurisdiction?: string[]\n /** Publisher (optional) */\n publisher?: string\n /** Notes (optional) */\n notes?: string\n}\n\n/**\n * In-memory reporter database with fast O(1) lookup\n *\n * Uses Map-based indexing for case-insensitive abbreviation lookup.\n * All variant forms are indexed to support fuzzy matching.\n */\nexport interface ReportersDatabase {\n /** Fast O(1) lookup by abbreviation (lowercase normalized keys) */\n byAbbreviation: Map<string, ReporterEntry[]>\n /** All reporters (for iteration/filtering) */\n all: ReporterEntry[]\n}\n\n/**\n * Cached database instance (null until loadReporters() called)\n */\nlet cached: ReportersDatabase | null = null\n\n/**\n * Load reporter database asynchronously with lazy loading\n *\n * Dynamic import prevents loading 1200+ reporters until explicitly requested.\n * Result is cached after first load for subsequent calls.\n *\n * @returns Promise resolving to indexed reporter database\n *\n * @example\n * const db = await loadReporters()\n * const reporters = db.byAbbreviation.get('f.2d') // Fast O(1) lookup\n */\nexport async function loadReporters(): Promise<ReportersDatabase> {\n if (cached) return cached\n\n // Dynamic import prevents loading until requested (keeps core bundle small)\n const data = await import(\"../../data/reporters.json\", {\n assert: { type: \"json\" },\n })\n\n const byAbbreviation = new Map<string, ReporterEntry[]>()\n const all: ReporterEntry[] = []\n\n // reporters.json structure: { \"A.\": [...], \"F.2d\": [...], ... }\n const reportersData = (data.default || data) as Record<\n string,\n ReporterEntry[]\n >\n\n // Build fast lookup index with lowercase normalization\n for (const [canonicalAbbr, reporters] of Object.entries(reportersData)) {\n for (const reporter of reporters) {\n all.push(reporter)\n\n // Index by all edition abbreviations\n for (const editionAbbr of Object.keys(reporter.editions)) {\n const key = editionAbbr.toLowerCase()\n if (!byAbbreviation.has(key)) {\n byAbbreviation.set(key, [])\n }\n byAbbreviation.get(key)!.push(reporter)\n }\n\n // Index all variations for fuzzy matching\n for (const [variant, canonical] of Object.entries(\n reporter.variations || {},\n )) {\n const variantKey = variant.toLowerCase()\n if (!byAbbreviation.has(variantKey)) {\n byAbbreviation.set(variantKey, [])\n }\n byAbbreviation.get(variantKey)!.push(reporter)\n }\n }\n }\n\n cached = {\n byAbbreviation,\n all,\n }\n return cached\n}\n\n/**\n * Get cached reporter database synchronously (degraded mode support)\n *\n * Returns null if reporters not loaded yet. This enables the library to\n * work in degraded mode without reporter validation.\n *\n * @returns Cached database or null if not loaded\n *\n * @example\n * const db = getReportersSync()\n * if (db) {\n * // Full mode: validate citations\n * } else {\n * // Degraded mode: extract without validation\n * }\n */\nexport function getReportersSync(): ReportersDatabase | null {\n return cached\n}\n\n/**\n * Find reporters by abbreviation (case-insensitive)\n *\n * Loads reporter database if not already loaded. Returns all reporters\n * matching the abbreviation (including variant forms).\n *\n * @param abbr - Reporter abbreviation to look up\n * @returns Promise resolving to matching reporters (empty array if none)\n *\n * @example\n * const reporters = await findReportersByAbbreviation('F.2d')\n * // [{ abbreviation: 'F.2d', name: 'Federal Reporter, Second Series', ... }]\n *\n * @example\n * const unknown = await findReportersByAbbreviation('NONEXISTENT')\n * // [] (empty array, not error)\n */\nexport async function findReportersByAbbreviation(\n abbr: string,\n): Promise<ReporterEntry[]> {\n const db = await loadReporters()\n return db.byAbbreviation.get(abbr.toLowerCase()) ?? []\n}\n"],"mappings":"AAuEA,IAAI,EAAmC,KAcvC,eAAsB,GAA4C,CAChE,GAAI,EAAQ,OAAO,EAGnB,IAAM,EAAO,MAAM,OAAO,4BAA6B,CACrD,OAAQ,CAAE,KAAM,OAAQ,CACzB,EAEK,EAAiB,IAAI,IACrB,EAAuB,EAAE,CAGzB,EAAiB,EAAK,SAAW,EAMvC,IAAK,GAAM,CAAC,EAAe,KAAc,OAAO,QAAQ,EAAc,CACpE,IAAK,IAAM,KAAY,EAAW,CAChC,EAAI,KAAK,EAAS,CAGlB,IAAK,IAAM,KAAe,OAAO,KAAK,EAAS,SAAS,CAAE,CACxD,IAAM,EAAM,EAAY,aAAa,CAChC,EAAe,IAAI,EAAI,EAC1B,EAAe,IAAI,EAAK,EAAE,CAAC,CAE7B,EAAe,IAAI,EAAI,CAAE,KAAK,EAAS,CAIzC,IAAK,GAAM,CAAC,EAAS,KAAc,OAAO,QACxC,EAAS,YAAc,EAAE,CAC1B,CAAE,CACD,IAAM,EAAa,EAAQ,aAAa,CACnC,EAAe,IAAI,EAAW,EACjC,EAAe,IAAI,EAAY,EAAE,CAAC,CAEpC,EAAe,IAAI,EAAW,CAAE,KAAK,EAAS,EASpD,MAJA,GAAS,CACP,iBACA,MACD,CACM,EAmBT,SAAgB,GAA6C,CAC3D,OAAO,EAoBT,eAAsB,EACpB,EAC0B,CAE1B,OADW,MAAM,GAAe,EACtB,eAAe,IAAI,EAAK,aAAa,CAAC,EAAI,EAAE"}
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
1
|
+
Object.defineProperty(exports,Symbol.toStringTag,{value:`Module`});function e(e){return e.replace(/<[^>]+>/g,``)}function t(e){return e.replace(/[\t\n\r]+/g,` `).replace(/ {2,}/g,` `)}function n(e){return e.normalize(`NFKC`)}function r(e){return e.replace(/[\u201C\u201D]/g,`"`).replace(/[\u2018\u2019]/g,`'`)}function i(i,o=[e,t,n,r]){let s=i,c=new Map,l=new Map;for(let e=0;e<=i.length;e++)c.set(e,e),l.set(e,e);for(let e of o){let t=s,n=e(s);if(t!==n){let{newCleanToOriginal:e,newOriginalToClean:r}=a(t,n,c,l);c=e,l=r,s=n}}return{cleaned:s,transformationMap:{cleanToOriginal:c,originalToClean:l},warnings:[]}}function a(e,t,n,r){let i=new Map,a=new Map,o=0,s=0;for(;o<=e.length||s<=t.length;){if(o>=e.length&&s>=t.length){let e=n.get(o)??o;i.set(s,e),a.set(e,s);break}if(o>=e.length){let e=n.get(o)??o;i.set(s,e),s++;continue}if(s>=t.length){let e=n.get(o)??o;a.set(e,s),o++;continue}if(e[o]===t[s]){let e=n.get(o)??o;i.set(s,e),a.set(e,s),o++,s++}else{let r=!1;for(let i=1;i<=20&&!(o+i>=e.length);i++)if(e[o+i]===t[s]){for(let e=0;e<i;e++){let t=n.get(o+e)??o+e;a.set(t,s)}o+=i,r=!0;break}if(r)continue;for(let a=1;a<=20&&!(s+a>=t.length);a++)if(e[o]===t[s+a]){let e=n.get(o)??o;for(let t=0;t<a;t++)i.set(s+t,e);s+=a,r=!0;break}if(r)continue;let c=n.get(o)??o;i.set(s,c),a.set(c,s),o++,s++}}return{newCleanToOriginal:i,newOriginalToClean:a}}const o=[{id:`federal-reporter`,regex:/\b(\d+)\s+(F\.|F\.2d|F\.3d|F\.\s?Supp\.|F\.\s?Supp\.\s?2d|F\.\s?Supp\.\s?3d)\s+(\d+)\b/g,description:`Federal Reporter (F., F.2d, F.3d, F.Supp., etc.)`,type:`case`},{id:`supreme-court`,regex:/\b(\d+)\s+(U\.S\.|S\.\s?Ct\.|L\.\s?Ed\.(?:\s?2d)?)\s+(\d+)\b/g,description:`U.S. Supreme Court reporters`,type:`case`},{id:`state-reporter`,regex:/\b(\d+)\s+([A-Z][A-Za-z\.]+(?:\s?2d|\s?3d)?)\s+(\d+)\b/g,description:`State reporters (broad pattern, validated against reporters-db in Phase 3)`,type:`case`}],s=[{id:`usc`,regex:/\b(\d+)\s+U\.S\.C\.?\s+§+\s*(\d+)\b/g,description:`U.S. Code citations (e.g., "42 U.S.C. § 1983")`,type:`statute`},{id:`state-code`,regex:/\b([A-Z][a-z]+\.?\s+[A-Za-z\.]+\s+Code)\s+§\s*(\d+)\b/g,description:`State code citations (broad pattern, e.g., "Cal. Penal Code § 187")`,type:`statute`}],c=[{id:`law-review`,regex:/\b(\d+)\s+([A-Z][A-Za-z\.\s]+)\s+(\d+)\b/g,description:`Law review citations (e.g., "120 Harv. L. Rev. 500"), validated against journals-db in Phase 3`,type:`journal`}],l=[{id:`westlaw`,regex:/\b(\d{4})\s+WL\s+(\d+)\b/g,description:`WestLaw citations (e.g., "2021 WL 123456")`,type:`neutral`},{id:`lexis`,regex:/\b(\d{4})\s+U\.S\.\s+LEXIS\s+(\d+)\b/g,description:`LexisNexis citations (e.g., "2021 U.S. LEXIS 5000")`,type:`neutral`},{id:`public-law`,regex:/\bPub\.\s?L\.\s?No\.\s?(\d+-\d+)\b/g,description:`Public Law citations (e.g., "Pub. L. No. 117-58")`,type:`publicLaw`},{id:`federal-register`,regex:/\b(\d+)\s+Fed\.\s?Reg\.\s+(\d+)\b/g,description:`Federal Register citations (e.g., "86 Fed. Reg. 12345")`,type:`federalRegister`}],u=[{id:`id`,regex:/\b[Ii]d\.(?:\s+at\s+(\d+))?/g,description:`Id. citations (e.g., "Id." or "Id. at 253")`,type:`case`},{id:`ibid`,regex:/\b[Ii]bid\.(?:\s+at\s+(\d+))?/g,description:`Ibid. citations (e.g., "Ibid." or "Ibid. at 125")`,type:`case`},{id:`supra`,regex:/\b([A-Z][a-zA-Z]+(?:(?:\s+v\.?\s+|\s+)[A-Z][a-zA-Z]+)*),?\s+supra(?:,?\s+at\s+(\d+))?/g,description:`Supra citations (e.g., "Smith, supra" or "Smith, supra, at 460")`,type:`case`},{id:`shortFormCase`,regex:/\b(\d+)\s+([A-Z][A-Za-z.\s]+?(?:\d[a-z])?)\s+at\s+(\d+)\b/g,description:`Short-form case citations (e.g., "500 F.2d at 125")`,type:`case`}];function d(e,t=[...o,...s,...c,...l,...u]){let n=[];for(let r of t)try{let t=e.matchAll(r.regex);for(let e of t)n.push({text:e[0],span:{cleanStart:e.index,cleanEnd:e.index+e[0].length},type:r.type,patternId:r.id})}catch(e){console.warn(`Pattern ${r.id} threw error, skipping:`,e instanceof Error?e.message:String(e));continue}return n.sort((e,t)=>e.span.cleanStart-t.span.cleanStart),n}function f(e,t){let{text:n,span:r}=e,i=/^(\d+)\s+([A-Za-z0-9.\s]+)\s+(\d+)/.exec(n);if(!i)throw Error(`Failed to parse case citation: ${n}`);let a=Number.parseInt(i[1],10),o=i[2].trim(),s=Number.parseInt(i[3],10),c=/,\s*(\d+)/.exec(n),l=c?Number.parseInt(c[1],10):void 0,u=/\((?:[^)]*\s)?(\d{4})\)/.exec(n),d=u?Number.parseInt(u[1],10):void 0,f=/\(([^)]*[A-Za-z][^)]*)\)/.exec(n),p=f?f[1].trim():void 0,m=t.cleanToOriginal.get(r.cleanStart)??r.cleanStart,h=t.cleanToOriginal.get(r.cleanEnd)??r.cleanEnd,g=.5;return`F.,F.2d,F.3d,F.4th,U.S.,S. Ct.,L. Ed.,P.,P.2d,P.3d,A.,A.2d,A.3d,N.E.,N.E.2d,N.E.3d,N.W.,N.W.2d,S.E.,S.E.2d,S.W.,S.W.2d,S.W.3d,So.,So. 2d,So. 3d`.split(`,`).some(e=>o.includes(e))&&(g+=.3),d!==void 0&&d<=new Date().getFullYear()&&(g+=.2),g=Math.min(g,1),{type:`case`,text:n,span:{cleanStart:r.cleanStart,cleanEnd:r.cleanEnd,originalStart:m,originalEnd:h},confidence:g,matchedText:n,processTimeMs:0,patternsChecked:1,volume:a,reporter:o,page:s,pincite:l,court:p,year:d}}function p(e,t){let{text:n,span:r}=e,i=/^(?:(\d+)\s+)?([A-Za-z.\s]+?)\s*§\s*(\d+[A-Za-z0-9\-]*)/.exec(n);if(!i)throw Error(`Failed to parse statute citation: ${n}`);let a=i[1]?Number.parseInt(i[1],10):void 0,o=i[2].trim(),s=i[3],c=t.cleanToOriginal.get(r.cleanStart)??r.cleanStart,l=t.cleanToOriginal.get(r.cleanEnd)??r.cleanEnd,u=.5;return[`U.S.C.`,`C.F.R.`,`Cal. Civ. Code`,`Cal. Penal Code`,`N.Y. Civ. Prac. L. & R.`,`Tex. Civ. Prac. & Rem. Code`].some(e=>o.includes(e))&&(u+=.3),u=Math.min(u,1),{type:`statute`,text:n,span:{cleanStart:r.cleanStart,cleanEnd:r.cleanEnd,originalStart:c,originalEnd:l},confidence:u,matchedText:n,processTimeMs:0,patternsChecked:1,title:a,code:o,section:s}}function m(e,t){let{text:n,span:r}=e,i=/^(\d+)\s+([A-Za-z.\s]+?)\s+(\d+)/.exec(n);if(!i)throw Error(`Failed to parse journal citation: ${n}`);let a=Number.parseInt(i[1],10),o=i[2].trim(),s=Number.parseInt(i[3],10),c=/,\s*(\d+)/.exec(n),l=c?Number.parseInt(c[1],10):void 0,u=t.cleanToOriginal.get(r.cleanStart)??r.cleanStart,d=t.cleanToOriginal.get(r.cleanEnd)??r.cleanEnd;return{type:`journal`,text:n,span:{cleanStart:r.cleanStart,cleanEnd:r.cleanEnd,originalStart:u,originalEnd:d},confidence:.6,matchedText:n,processTimeMs:0,patternsChecked:1,volume:a,journal:o,abbreviation:o,page:s,pincite:l}}function h(e,t){let{text:n,span:r}=e,i=/^(\d{4})\s+(WL|LEXIS|U\.S\.\s+LEXIS)\s+(\d+)/.exec(n);if(!i)throw Error(`Failed to parse neutral citation: ${n}`);let a=Number.parseInt(i[1],10),o=i[2],s=i[3],c=t.cleanToOriginal.get(r.cleanStart)??r.cleanStart,l=t.cleanToOriginal.get(r.cleanEnd)??r.cleanEnd;return{type:`neutral`,text:n,span:{cleanStart:r.cleanStart,cleanEnd:r.cleanEnd,originalStart:c,originalEnd:l},confidence:1,matchedText:n,processTimeMs:0,patternsChecked:1,year:a,court:o,documentNumber:s}}function g(e,t){let{text:n,span:r}=e,i=/Pub\.\s?L\.(?:\s?No\.)?\s?(\d+)-(\d+)/.exec(n);if(!i)throw Error(`Failed to parse public law citation: ${n}`);let a=Number.parseInt(i[1],10),o=Number.parseInt(i[2],10),s=t.cleanToOriginal.get(r.cleanStart)??r.cleanStart,c=t.cleanToOriginal.get(r.cleanEnd)??r.cleanEnd;return{type:`publicLaw`,text:n,span:{cleanStart:r.cleanStart,cleanEnd:r.cleanEnd,originalStart:s,originalEnd:c},confidence:.9,matchedText:n,processTimeMs:0,patternsChecked:1,congress:a,lawNumber:o}}function _(e,t){let{text:n,span:r}=e,i=/^(\d+)\s+Fed\.\s?Reg\.\s+(\d+)/.exec(n);if(!i)throw Error(`Failed to parse Federal Register citation: ${n}`);let a=Number.parseInt(i[1],10),o=Number.parseInt(i[2],10),s=/\((?:.*?\s)?(\d{4})\)/.exec(n),c=s?Number.parseInt(s[1],10):void 0,l=t.cleanToOriginal.get(r.cleanStart)??r.cleanStart,u=t.cleanToOriginal.get(r.cleanEnd)??r.cleanEnd;return{type:`federalRegister`,text:n,span:{cleanStart:r.cleanStart,cleanEnd:r.cleanEnd,originalStart:l,originalEnd:u},confidence:.9,matchedText:n,processTimeMs:0,patternsChecked:1,volume:a,page:o,year:c}}function v(e,t){let{text:n,span:r}=e,i=/[Ii](?:d|bid)\.(?:\s+at\s+(\d+))?/.exec(n);if(!i)throw Error(`Failed to parse Id. citation: ${n}`);let a=i[1]?Number.parseInt(i[1],10):void 0,o=t.cleanToOriginal.get(r.cleanStart)??r.cleanStart,s=t.cleanToOriginal.get(r.cleanEnd)??r.cleanEnd;return{type:`id`,text:n,span:{cleanStart:r.cleanStart,cleanEnd:r.cleanEnd,originalStart:o,originalEnd:s},confidence:1,matchedText:n,processTimeMs:0,patternsChecked:1,pincite:a}}function y(e,t){let{text:n,span:r}=e,i=/\b([A-Z][a-zA-Z]+(?:(?:\s+v\.?\s+|\s+)[A-Z][a-zA-Z]+)*),?\s+supra(?:,?\s+at\s+(\d+))?/.exec(n);if(!i)throw Error(`Failed to parse supra citation: ${n}`);let a=i[1],o=i[2]?Number.parseInt(i[2],10):void 0,s=t.cleanToOriginal.get(r.cleanStart)??r.cleanStart,c=t.cleanToOriginal.get(r.cleanEnd)??r.cleanEnd;return{type:`supra`,text:n,span:{cleanStart:r.cleanStart,cleanEnd:r.cleanEnd,originalStart:s,originalEnd:c},confidence:.9,matchedText:n,processTimeMs:0,patternsChecked:1,partyName:a,pincite:o}}function b(e,t){let{text:n,span:r}=e,i=/(\d+)\s+([A-Z][A-Za-z.\s]+?(?:\d[a-z])?)\s+at\s+(\d+)/.exec(n);if(!i)throw Error(`Failed to parse short-form case citation: ${n}`);let a=Number.parseInt(i[1],10),o=i[2].trim(),s=Number.parseInt(i[3],10),c=t.cleanToOriginal.get(r.cleanStart)??r.cleanStart,l=t.cleanToOriginal.get(r.cleanEnd)??r.cleanEnd;return{type:`shortFormCase`,text:n,span:{cleanStart:r.cleanStart,cleanEnd:r.cleanEnd,originalStart:c,originalEnd:l},confidence:.7,matchedText:n,processTimeMs:0,patternsChecked:1,volume:a,reporter:o,pincite:s}}function x(e,t,n=/\n\n+/g){let r=new Map,i=[0],a;for(;(a=n.exec(e))!==null;)i.push(a.index+a[0].length);i.push(e.length);for(let e=0;e<t.length;e++){let n=t[e].span.originalStart,a=0;for(let e=0;e<i.length-1;e++)if(n>=i[e]&&n<i[e+1]){a=e;break}r.set(e,a)}return r}function S(e,t,n,r){if(r===`none`)return!0;let i=n.get(e),a=n.get(t);return i===void 0||a===void 0?!0:i===a}function C(e,t){if(e.length===0)return t.length;if(t.length===0)return e.length;let n=Array.from({length:e.length+1},()=>Array(t.length+1).fill(0));for(let t=0;t<=e.length;t++)n[t][0]=t;for(let e=0;e<=t.length;e++)n[0][e]=e;for(let r=1;r<=e.length;r++)for(let i=1;i<=t.length;i++)e[r-1]===t[i-1]?n[r][i]=n[r-1][i-1]:n[r][i]=1+Math.min(n[r-1][i],n[r][i-1],n[r-1][i-1]);return n[e.length][t.length]}function w(e,t){let n=e.toLowerCase(),r=t.toLowerCase(),i=C(n,r),a=Math.max(n.length,r.length);return a===0?1:1-i/a}var T=class{constructor(e,t,n={}){this.citations=e,this.text=t,this.options={scopeStrategy:n.scopeStrategy??`paragraph`,autoDetectParagraphs:n.autoDetectParagraphs??!0,paragraphBoundaryPattern:n.paragraphBoundaryPattern??/\n\n+/g,fuzzyPartyMatching:n.fuzzyPartyMatching??!0,partyMatchThreshold:n.partyMatchThreshold??.8,allowNestedResolution:n.allowNestedResolution??!1,reportUnresolved:n.reportUnresolved??!0},this.context={citationIndex:0,allCitations:e,lastFullCitation:void 0,fullCitationHistory:new Map,paragraphMap:new Map},this.options.autoDetectParagraphs&&(this.context.paragraphMap=x(t,e,this.options.paragraphBoundaryPattern))}resolve(){let e=[];for(let t=0;t<this.citations.length;t++){this.context.citationIndex=t;let n=this.citations[t],r;switch(n.type){case`id`:r=this.resolveId(n);break;case`supra`:r=this.resolveSupra(n);break;case`shortFormCase`:r=this.resolveShortFormCase(n);break;default:this.isFullCitation(n)&&(this.context.lastFullCitation=t,this.trackFullCitation(n,t));break}e.push({...n,resolution:r})}return e}resolveId(e){let t=this.context.citationIndex,n;for(let e=t-1;e>=0;e--)if(this.citations[e].type===`case`){n=e;break}return n===void 0?this.createFailureResult(`No preceding full case citation found`):this.isWithinScope(n,t)?{resolvedTo:n,confidence:1}:this.createFailureResult(`Antecedent citation outside scope boundary`)}resolveSupra(e){let t=this.context.citationIndex,n=this.normalizePartyName(e.partyName),r;for(let[e,i]of this.context.fullCitationHistory){if(!this.isWithinScope(i,t))continue;let a=w(n,e);(!r||a>r.similarity)&&(r={index:i,similarity:a})}if(!r)return this.createFailureResult(`No full citation found in scope`);if(r.similarity<this.options.partyMatchThreshold)return this.createFailureResult(`Party name similarity ${r.similarity.toFixed(2)} below threshold ${this.options.partyMatchThreshold}`);let i=[];return r.similarity<1&&i.push(`Fuzzy match: similarity ${r.similarity.toFixed(2)}`),{resolvedTo:r.index,confidence:r.similarity,warnings:i.length>0?i:void 0}}resolveShortFormCase(e){let t=this.context.citationIndex;for(let n=t-1;n>=0;n--){let r=this.citations[n];if(r.type!==`case`)continue;let i=r;if(i.volume===e.volume&&this.normalizeReporter(i.reporter)===this.normalizeReporter(e.reporter))return this.isWithinScope(n,t)?{resolvedTo:n,confidence:.95}:this.createFailureResult(`Matching citation outside scope boundary`)}return this.createFailureResult(`No matching full case citation found`)}isFullCitation(e){return e.type===`case`||e.type===`statute`||e.type===`journal`||e.type===`neutral`||e.type===`publicLaw`||e.type===`federalRegister`}trackFullCitation(e,t){if(e.type===`case`){let n=e,r=this.extractPartyName(n);if(r){let e=this.normalizePartyName(r);this.context.fullCitationHistory.set(e,t)}}}extractPartyName(e){let t=e.span.originalStart,n=Math.max(0,t-100),r=this.text.substring(n,t),i=r.match(/([A-Z][a-zA-Z]*(?:\s+[A-Z][a-zA-Z]*)*)\s+v\.?\s+[A-Z][a-zA-Z]*(?:\s+[A-Z][a-zA-Z]*)*,\s*$/);return i?i[1].trim():r.match(/([A-Z][a-zA-Z]*(?:\s+[A-Z][a-zA-Z]*)*),\s*$/)?.[1].trim()}normalizePartyName(e){return e.toLowerCase().replace(/\s+/g,` `).trim()}normalizeReporter(e){return e.toLowerCase().replace(/\s+/g,``).replace(/\./g,``)}isWithinScope(e,t){return S(e,t,this.context.paragraphMap,this.options.scopeStrategy)}createFailureResult(e){if(this.options.reportUnresolved)return{resolvedTo:void 0,failureReason:e,confidence:0}}};function E(e,t,n){return new T(e,t,n).resolve()}function D(e,t){let n=performance.now(),{cleaned:r,transformationMap:a,warnings:x}=i(e,t?.cleaners),S=d(r,t?.patterns||[...l,...u,...o,...s,...c]),C=[],w=new Set;for(let e of S){let t=`${e.span.cleanStart}-${e.span.cleanEnd}`;w.has(t)||(w.add(t),C.push(e))}let T=[];for(let e of C){let t;switch(e.type){case`case`:t=e.patternId===`id`||e.patternId===`ibid`?v(e,a):e.patternId===`supra`?y(e,a):e.patternId===`shortFormCase`?b(e,a):f(e,a);break;case`statute`:t=p(e,a);break;case`journal`:t=m(e,a);break;case`neutral`:t=h(e,a);break;case`publicLaw`:t=g(e,a);break;case`federalRegister`:t=_(e,a);break;default:continue}x.length>0&&(t.warnings=[...t.warnings||[],...x]),t.processTimeMs=performance.now()-n,T.push(t)}return t?.resolve?E(T,e,t.resolutionOptions):T}async function O(e,t){return D(e,t)}exports.DocumentResolver=T,exports.cleanText=i,exports.extractCase=f,exports.extractCitations=D,exports.extractCitationsAsync=O,exports.extractFederalRegister=_,exports.extractJournal=m,exports.extractNeutral=h,exports.extractPublicLaw=g,exports.extractStatute=p,exports.resolveCitations=E,exports.tokenize=d;
|
|
2
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.cjs","names":[],"sources":["../src/clean/cleaners.ts","../src/clean/cleanText.ts","../src/patterns/casePatterns.ts","../src/patterns/statutePatterns.ts","../src/patterns/journalPatterns.ts","../src/patterns/neutralPatterns.ts","../src/patterns/shortForm.ts","../src/tokenize/tokenizer.ts","../src/extract/extractCase.ts","../src/extract/extractStatute.ts","../src/extract/extractJournal.ts","../src/extract/extractNeutral.ts","../src/extract/extractPublicLaw.ts","../src/extract/extractFederalRegister.ts","../src/extract/extractShortForms.ts","../src/resolve/scopeBoundary.ts","../src/resolve/levenshtein.ts","../src/resolve/DocumentResolver.ts","../src/resolve/index.ts","../src/extract/extractCitations.ts"],"sourcesContent":["/**\n * Built-in text cleaner functions for preprocessing legal documents.\n *\n * Each cleaner is a simple transformation: (text: string) => string\n * Cleaners can be composed via the cleanText() pipeline.\n */\n\n/**\n * Remove all HTML tags from text.\n *\n * @example\n * stripHtmlTags(\"Smith v. <b>Doe</b>, 500 F.2d 123\")\n * // => \"Smith v. Doe, 500 F.2d 123\"\n */\nexport function stripHtmlTags(text: string): string {\n\treturn text.replace(/<[^>]+>/g, \"\")\n}\n\n/**\n * Normalize whitespace: convert tabs/newlines to spaces, collapse multiple spaces.\n *\n * @example\n * normalizeWhitespace(\"Smith v. Doe, 500 F.2d 123\")\n * // => \"Smith v. Doe, 500 F.2d 123\"\n */\nexport function normalizeWhitespace(text: string): string {\n\treturn text.replace(/[\\t\\n\\r]+/g, \" \").replace(/ {2,}/g, \" \")\n}\n\n/**\n * Apply Unicode NFKC normalization (ligatures → separate chars).\n *\n * @example\n * normalizeUnicode(\"Smith v. Doe, 500 F.2d 123\") // with ligature \"fi\"\n * // => \"Smith v. Doe, 500 F.2d 123\" // normalized\n */\nexport function normalizeUnicode(text: string): string {\n\treturn text.normalize(\"NFKC\")\n}\n\n/**\n * Replace curly quotes and apostrophes with straight quotes.\n *\n * @example\n * fixSmartQuotes(\"\"Smith\" v. 'Doe', 500 F.2d 123\")\n * // => \"\\\"Smith\\\" v. 'Doe', 500 F.2d 123\"\n */\nexport function fixSmartQuotes(text: string): string {\n\treturn text\n\t\t.replace(/[\\u201C\\u201D]/g, '\"') // curly double quotes\n\t\t.replace(/[\\u2018\\u2019]/g, \"'\") // curly single quotes/apostrophes\n}\n\n/**\n * Remove underscore OCR artifacts (common in scanned documents).\n *\n * @example\n * removeOcrArtifacts(\"Smith v. Doe, 500 F._2d 123\")\n * // => \"Smith v. Doe, 500 F.2d 123\"\n */\nexport function removeOcrArtifacts(text: string): string {\n\treturn text.replace(/_/g, \"\")\n}\n","import type { TransformationMap } from \"../types/span\"\nimport {\n\tfixSmartQuotes,\n\tnormalizeUnicode,\n\tnormalizeWhitespace,\n\tstripHtmlTags,\n} from \"./cleaners\"\n\n/**\n * Result of text cleaning operation.\n */\nexport interface CleanTextResult {\n\t/** Cleaned text after all transformations */\n\tcleaned: string\n\n\t/** Position mappings between cleaned and original text */\n\ttransformationMap: TransformationMap\n\n\t/** Warnings generated during cleaning (currently unused) */\n\twarnings: Warning[]\n}\n\n/**\n * Warning generated during text cleaning.\n */\nexport interface Warning {\n\tlevel: \"error\" | \"warning\" | \"info\"\n\tmessage: string\n\tposition: { start: number; end: number }\n}\n\n/**\n * Clean text using a pipeline of transformation functions.\n *\n * Applies cleaners sequentially while maintaining accurate position mappings\n * between the original and cleaned text. This enables citation extraction from\n * cleaned text while reporting positions in the original text.\n *\n * @param original - Original input text\n * @param cleaners - Array of cleaner functions to apply (default: stripHtmlTags, normalizeWhitespace, normalizeUnicode, fixSmartQuotes)\n * @returns Cleaned text with position mappings and warnings\n *\n * @example\n * const result = cleanText(\"Smith v. <b>Doe</b>, 500 F.2d 123\")\n * // result.cleaned: \"Smith v. Doe, 500 F.2d 123\"\n * // result.transformationMap tracks position shifts from HTML removal\n */\nexport function cleanText(\n\toriginal: string,\n\tcleaners: Array<(text: string) => string> = [\n\t\tstripHtmlTags,\n\t\tnormalizeWhitespace,\n\t\tnormalizeUnicode,\n\t\tfixSmartQuotes,\n\t],\n): CleanTextResult {\n\t// Initialize 1:1 position mapping\n\tlet currentText = original\n\tlet cleanToOriginal = new Map<number, number>()\n\tlet originalToClean = new Map<number, number>()\n\n\t// Identity mapping: cleanToOriginal[i] = i, originalToClean[i] = i\n\tfor (let i = 0; i <= original.length; i++) {\n\t\tcleanToOriginal.set(i, i)\n\t\toriginalToClean.set(i, i)\n\t}\n\n\t// Apply each cleaner sequentially, rebuilding position maps\n\tfor (const cleaner of cleaners) {\n\t\tconst beforeText = currentText\n\t\tconst afterText = cleaner(currentText)\n\n\t\tif (beforeText !== afterText) {\n\t\t\t// Text changed - rebuild position maps\n\t\t\tconst { newCleanToOriginal, newOriginalToClean } = rebuildPositionMaps(\n\t\t\t\tbeforeText,\n\t\t\t\tafterText,\n\t\t\t\tcleanToOriginal,\n\t\t\t\toriginalToClean,\n\t\t\t)\n\n\t\t\tcleanToOriginal = newCleanToOriginal\n\t\t\toriginalToClean = newOriginalToClean\n\t\t\tcurrentText = afterText\n\t\t}\n\t}\n\n\tconst transformationMap: TransformationMap = {\n\t\tcleanToOriginal,\n\t\toriginalToClean,\n\t}\n\n\treturn {\n\t\tcleaned: currentText,\n\t\ttransformationMap,\n\t\twarnings: [],\n\t}\n}\n\n/**\n * Rebuild position maps after a text transformation.\n *\n * Uses a simplified algorithm that scans through both strings, matching\n * characters where possible and tracking the offset accumulation.\n *\n * @param beforeText - Text before transformation\n * @param afterText - Text after transformation\n * @param oldCleanToOriginal - Previous clean-to-original mapping\n * @param oldOriginalToClean - Previous original-to-clean mapping\n * @returns New position maps\n */\nfunction rebuildPositionMaps(\n\tbeforeText: string,\n\tafterText: string,\n\toldCleanToOriginal: Map<number, number>,\n\toldOriginalToClean: Map<number, number>,\n): {\n\tnewCleanToOriginal: Map<number, number>\n\tnewOriginalToClean: Map<number, number>\n} {\n\tconst newCleanToOriginal = new Map<number, number>()\n\tconst newOriginalToClean = new Map<number, number>()\n\n\tlet beforeIdx = 0\n\tlet afterIdx = 0\n\n\t// Scan through both strings, matching characters where possible\n\twhile (beforeIdx <= beforeText.length || afterIdx <= afterText.length) {\n\t\t// Both at end\n\t\tif (beforeIdx >= beforeText.length && afterIdx >= afterText.length) {\n\t\t\tconst originalPos = oldCleanToOriginal.get(beforeIdx) ?? beforeIdx\n\t\t\tnewCleanToOriginal.set(afterIdx, originalPos)\n\t\t\tnewOriginalToClean.set(originalPos, afterIdx)\n\t\t\tbreak\n\t\t}\n\n\t\t// Before text exhausted (expansion case)\n\t\tif (beforeIdx >= beforeText.length) {\n\t\t\tconst originalPos = oldCleanToOriginal.get(beforeIdx) ?? beforeIdx\n\t\t\tnewCleanToOriginal.set(afterIdx, originalPos)\n\t\t\tafterIdx++\n\t\t\tcontinue\n\t\t}\n\n\t\t// After text exhausted (removal case)\n\t\tif (afterIdx >= afterText.length) {\n\t\t\tconst originalPos = oldCleanToOriginal.get(beforeIdx) ?? beforeIdx\n\t\t\tnewOriginalToClean.set(originalPos, afterIdx)\n\t\t\tbeforeIdx++\n\t\t\tcontinue\n\t\t}\n\n\t\t// Characters match - carry forward the mapping\n\t\tif (beforeText[beforeIdx] === afterText[afterIdx]) {\n\t\t\tconst originalPos = oldCleanToOriginal.get(beforeIdx) ?? beforeIdx\n\t\t\tnewCleanToOriginal.set(afterIdx, originalPos)\n\t\t\tnewOriginalToClean.set(originalPos, afterIdx)\n\t\t\tbeforeIdx++\n\t\t\tafterIdx++\n\t\t} else {\n\t\t\t// Characters differ - need to determine if this is insertion/deletion/replacement\n\t\t\t// Look ahead to find next match\n\t\t\tlet foundMatch = false\n\t\t\tconst maxLookAhead = 20 // Limit lookahead to avoid performance issues\n\n\t\t\t// Check if something was deleted from before text\n\t\t\tfor (let lookAhead = 1; lookAhead <= maxLookAhead; lookAhead++) {\n\t\t\t\tif (beforeIdx + lookAhead >= beforeText.length) break\n\n\t\t\t\tif (beforeText[beforeIdx + lookAhead] === afterText[afterIdx]) {\n\t\t\t\t\t// Found a match - characters were deleted from before text\n\t\t\t\t\tfor (let i = 0; i < lookAhead; i++) {\n\t\t\t\t\t\tconst originalPos =\n\t\t\t\t\t\t\toldCleanToOriginal.get(beforeIdx + i) ?? beforeIdx + i\n\t\t\t\t\t\tnewOriginalToClean.set(originalPos, afterIdx)\n\t\t\t\t\t}\n\t\t\t\t\tbeforeIdx += lookAhead\n\t\t\t\t\tfoundMatch = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (foundMatch) continue\n\n\t\t\t// Check if something was inserted into after text\n\t\t\tfor (let lookAhead = 1; lookAhead <= maxLookAhead; lookAhead++) {\n\t\t\t\tif (afterIdx + lookAhead >= afterText.length) break\n\n\t\t\t\tif (beforeText[beforeIdx] === afterText[afterIdx + lookAhead]) {\n\t\t\t\t\t// Found a match - characters were inserted into after text\n\t\t\t\t\tconst originalPos = oldCleanToOriginal.get(beforeIdx) ?? beforeIdx\n\t\t\t\t\tfor (let i = 0; i < lookAhead; i++) {\n\t\t\t\t\t\tnewCleanToOriginal.set(afterIdx + i, originalPos)\n\t\t\t\t\t}\n\t\t\t\t\tafterIdx += lookAhead\n\t\t\t\t\tfoundMatch = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (foundMatch) continue\n\n\t\t\t// No match found within lookahead - treat as replacement\n\t\t\tconst originalPos = oldCleanToOriginal.get(beforeIdx) ?? beforeIdx\n\t\t\tnewCleanToOriginal.set(afterIdx, originalPos)\n\t\t\tnewOriginalToClean.set(originalPos, afterIdx)\n\t\t\tbeforeIdx++\n\t\t\tafterIdx++\n\t\t}\n\t}\n\n\treturn { newCleanToOriginal, newOriginalToClean }\n}\n","/**\n * Case Citation Regex Patterns\n *\n * These patterns are designed for tokenization (broad matching) not extraction.\n * They identify potential case citations in text for the tokenizer (Plan 3).\n * Metadata parsing and validation against reporters-db happens in Phase 2 Plan 5 (extraction layer).\n *\n * Pattern Design Principles (from RESEARCH.md):\n * - Use \\b word boundaries to avoid matching \"F.\" in \"F.B.I.\"\n * - Avoid nested quantifiers: (a+)+ causes ReDoS\n * - Keep patterns simple: tokenization only needs to find candidates\n * - Use global flag /g for matchAll()\n */\n\nexport interface Pattern {\n id: string\n regex: RegExp\n description: string\n type: 'case' | 'statute' | 'journal' | 'neutral' | 'publicLaw' | 'federalRegister'\n}\n\nexport const casePatterns: Pattern[] = [\n {\n id: 'federal-reporter',\n regex: /\\b(\\d+)\\s+(F\\.|F\\.2d|F\\.3d|F\\.\\s?Supp\\.|F\\.\\s?Supp\\.\\s?2d|F\\.\\s?Supp\\.\\s?3d)\\s+(\\d+)\\b/g,\n description: 'Federal Reporter (F., F.2d, F.3d, F.Supp., etc.)',\n type: 'case',\n },\n {\n id: 'supreme-court',\n regex: /\\b(\\d+)\\s+(U\\.S\\.|S\\.\\s?Ct\\.|L\\.\\s?Ed\\.(?:\\s?2d)?)\\s+(\\d+)\\b/g,\n description: 'U.S. Supreme Court reporters',\n type: 'case',\n },\n {\n id: 'state-reporter',\n regex: /\\b(\\d+)\\s+([A-Z][A-Za-z\\.]+(?:\\s?2d|\\s?3d)?)\\s+(\\d+)\\b/g,\n description: 'State reporters (broad pattern, validated against reporters-db in Phase 3)',\n type: 'case',\n },\n]\n","/**\n * Statute Citation Regex Patterns\n *\n * Patterns for U.S. Code and state code citations.\n * These are intentionally broad for tokenization - validation against\n * actual code databases happens in Phase 2 Plan 5 (extraction layer).\n *\n * Pattern Design:\n * - Simple structure to avoid ReDoS\n * - Matches both \"§\" and \"Section\" formats\n * - State codes use broad pattern (validated later)\n */\n\nimport type { Pattern } from './casePatterns'\n\nexport const statutePatterns: Pattern[] = [\n {\n id: 'usc',\n regex: /\\b(\\d+)\\s+U\\.S\\.C\\.?\\s+§+\\s*(\\d+)\\b/g,\n description: 'U.S. Code citations (e.g., \"42 U.S.C. § 1983\")',\n type: 'statute',\n },\n {\n id: 'state-code',\n regex: /\\b([A-Z][a-z]+\\.?\\s+[A-Za-z\\.]+\\s+Code)\\s+§\\s*(\\d+)\\b/g,\n description: 'State code citations (broad pattern, e.g., \"Cal. Penal Code § 187\")',\n type: 'statute',\n },\n]\n","/**\n * Journal Citation Regex Patterns\n *\n * Patterns for law review and journal citations.\n * These are intentionally broad for tokenization - validation against\n * journals-db happens in Phase 3 (extraction layer).\n *\n * Pattern Design:\n * - Matches volume-journal-page format\n * - Broad journal name matching (validated later)\n * - Simple structure to avoid ReDoS\n */\n\nimport type { Pattern } from './casePatterns'\n\nexport const journalPatterns: Pattern[] = [\n {\n id: 'law-review',\n regex: /\\b(\\d+)\\s+([A-Z][A-Za-z\\.\\s]+)\\s+(\\d+)\\b/g,\n description: 'Law review citations (e.g., \"120 Harv. L. Rev. 500\"), validated against journals-db in Phase 3',\n type: 'journal',\n },\n]\n","/**\n * Neutral and Online Citation Regex Patterns\n *\n * Patterns for WestLaw, LexisNexis, public laws, and Federal Register citations.\n * These have predictable formats and don't require external validation.\n *\n * Pattern Design:\n * - Matches year-database-number format for online citations\n * - Matches Pub. L. No. format for public laws\n * - Matches volume-Fed. Reg.-page for Federal Register\n * - Simple structure to avoid ReDoS\n */\n\nimport type { Pattern } from './casePatterns'\n\nexport const neutralPatterns: Pattern[] = [\n {\n id: 'westlaw',\n regex: /\\b(\\d{4})\\s+WL\\s+(\\d+)\\b/g,\n description: 'WestLaw citations (e.g., \"2021 WL 123456\")',\n type: 'neutral',\n },\n {\n id: 'lexis',\n regex: /\\b(\\d{4})\\s+U\\.S\\.\\s+LEXIS\\s+(\\d+)\\b/g,\n description: 'LexisNexis citations (e.g., \"2021 U.S. LEXIS 5000\")',\n type: 'neutral',\n },\n {\n id: 'public-law',\n regex: /\\bPub\\.\\s?L\\.\\s?No\\.\\s?(\\d+-\\d+)\\b/g,\n description: 'Public Law citations (e.g., \"Pub. L. No. 117-58\")',\n type: 'publicLaw',\n },\n {\n id: 'federal-register',\n regex: /\\b(\\d+)\\s+Fed\\.\\s?Reg\\.\\s+(\\d+)\\b/g,\n description: 'Federal Register citations (e.g., \"86 Fed. Reg. 12345\")',\n type: 'federalRegister',\n },\n]\n","/**\n * Short-form Citation Regex Patterns\n *\n * Patterns for Id., Ibid., supra, and short-form case citations.\n * These refer to earlier citations in the document.\n *\n * Pattern Design:\n * - Simple structure to avoid ReDoS (no nested quantifiers)\n * - Broad matching for tokenization; validation happens in extraction layer\n * - Word boundaries to prevent false positives (e.g., \"Idaho\" vs \"Id.\")\n */\n\nimport type { Pattern } from './casePatterns'\n\n/** Id. with optional pincite: \"Id.\" or \"Id. at 253\" */\nexport const ID_PATTERN: RegExp = /\\b[Ii]d\\.(?:\\s+at\\s+(\\d+))?/g\n\n/** Ibid. with optional pincite (less common variant) */\nexport const IBID_PATTERN: RegExp = /\\b[Ii]bid\\.(?:\\s+at\\s+(\\d+))?/g\n\n/**\n * Supra with party name and optional pincite.\n * Pattern: word(s), supra [, at page]\n * Captures: (1) party name, (2) pincite\n * Note: Matches party names including \"v.\" (e.g., \"Smith v. Jones, supra\")\n */\nexport const SUPRA_PATTERN: RegExp = /\\b([A-Z][a-zA-Z]+(?:(?:\\s+v\\.?\\s+|\\s+)[A-Z][a-zA-Z]+)*),?\\s+supra(?:,?\\s+at\\s+(\\d+))?/g\n\n/**\n * Short-form case: volume reporter at page\n * Pattern: number space abbreviation space \"at\" space number\n * Simplified detection; full parsing in extraction layer\n */\nexport const SHORT_FORM_CASE_PATTERN: RegExp = /\\b(\\d+)\\s+([A-Z][A-Za-z.\\s]+?(?:\\d[a-z])?)\\s+at\\s+(\\d+)\\b/g\n\n/** All short-form patterns for tokenization */\nexport const SHORT_FORM_PATTERNS: readonly RegExp[] = [\n ID_PATTERN,\n IBID_PATTERN,\n SUPRA_PATTERN,\n SHORT_FORM_CASE_PATTERN,\n] as const\n\n/** Pattern objects for consistency with other pattern modules */\nexport const shortFormPatterns: Pattern[] = [\n {\n id: 'id',\n regex: ID_PATTERN,\n description: 'Id. citations (e.g., \"Id.\" or \"Id. at 253\")',\n type: 'case', // Will be typed as 'id' in extraction layer\n },\n {\n id: 'ibid',\n regex: IBID_PATTERN,\n description: 'Ibid. citations (e.g., \"Ibid.\" or \"Ibid. at 125\")',\n type: 'case', // Will be typed as 'id' in extraction layer\n },\n {\n id: 'supra',\n regex: SUPRA_PATTERN,\n description: 'Supra citations (e.g., \"Smith, supra\" or \"Smith, supra, at 460\")',\n type: 'case', // Will be typed as 'supra' in extraction layer\n },\n {\n id: 'shortFormCase',\n regex: SHORT_FORM_CASE_PATTERN,\n description: 'Short-form case citations (e.g., \"500 F.2d at 125\")',\n type: 'case', // Will be typed as 'shortFormCase' in extraction layer\n },\n]\n","/**\n * Tokenization Layer for Citation Extraction\n *\n * Applies regex patterns to cleaned text to produce citation candidate tokens.\n * This is the second stage of the parsing pipeline:\n * 1. Clean text (remove HTML, normalize Unicode)\n * 2. Tokenize (apply patterns to find candidates) ← THIS MODULE\n * 3. Extract (parse metadata, validate against reporters-db)\n *\n * Tokenization is intentionally broad - it finds potential citations without\n * validating them. The extraction layer (Plan 5) validates tokens against\n * reporters-db and parses metadata.\n *\n * @module tokenize\n */\n\nimport type { Span } from '@/types/span'\nimport type { Pattern } from '@/patterns'\nimport {\n casePatterns,\n statutePatterns,\n journalPatterns,\n neutralPatterns,\n} from '@/patterns'\nimport { shortFormPatterns } from '@/patterns/shortForm'\n\n/**\n * A token representing a potential citation found in cleaned text.\n *\n * Tokens are produced by applying regex patterns to cleaned text.\n * They include matched text, position in cleaned text, and pattern metadata\n * for use in the extraction layer.\n */\nexport interface Token {\n /** Matched text from input */\n text: string\n\n /** Position in cleaned text (cleanStart/cleanEnd only, no original positions yet) */\n span: Pick<Span, 'cleanStart' | 'cleanEnd'>\n\n /** Pattern type that matched this token */\n type: Pattern['type']\n\n /** Pattern ID that matched this token */\n patternId: string\n}\n\n/**\n * Tokenizes cleaned text by applying regex patterns to find citation candidates.\n *\n * For each pattern in the patterns array:\n * 1. Apply pattern.regex.matchAll(cleanedText)\n * 2. Create Token for each match with position, text, and pattern metadata\n * 3. Collect all tokens from all patterns\n * 4. Sort by cleanStart position (ascending)\n *\n * Timeout protection: If a pattern throws (e.g., ReDoS), skip it and continue\n * with remaining patterns. Logs warning to console.\n *\n * Note: This function is synchronous because regex matching is inherently\n * synchronous. This enables both sync (extractCitations) and async\n * (extractCitationsAsync) APIs in Plan 6.\n *\n * @param cleanedText - Text that has been cleaned by cleanText() from Plan 1\n * @param patterns - Regex patterns to apply (defaults to all patterns from Plan 2)\n * @returns Array of tokens sorted by position (cleanStart ascending)\n *\n * @example\n * ```typescript\n * import { tokenize } from '@/tokenize'\n * import { cleanText } from '@/clean'\n *\n * const original = \"See Smith v. Doe, 500 F.2d 123 (9th Cir. 2020)\"\n * const { cleanedText } = cleanText(original)\n * const tokens = tokenize(cleanedText)\n * // tokens[0] = {\n * // text: \"500 F.2d 123\",\n * // span: { cleanStart: 18, cleanEnd: 30 },\n * // type: \"case\",\n * // patternId: \"federal-reporter\"\n * // }\n * ```\n */\nexport function tokenize(\n cleanedText: string,\n patterns: Pattern[] = [\n ...casePatterns,\n ...statutePatterns,\n ...journalPatterns,\n ...neutralPatterns,\n ...shortFormPatterns,\n ]\n): Token[] {\n const tokens: Token[] = []\n\n for (const pattern of patterns) {\n try {\n // Apply pattern to cleaned text\n const matches = cleanedText.matchAll(pattern.regex)\n\n for (const match of matches) {\n // Create token from match\n tokens.push({\n text: match[0],\n span: {\n cleanStart: match.index!,\n cleanEnd: match.index! + match[0].length,\n },\n type: pattern.type,\n patternId: pattern.id,\n })\n }\n } catch (error) {\n // Timeout protection: If pattern throws (ReDoS, etc.), skip it\n console.warn(\n `Pattern ${pattern.id} threw error, skipping:`,\n error instanceof Error ? error.message : String(error)\n )\n continue\n }\n }\n\n // Sort tokens by position (cleanStart ascending)\n tokens.sort((a, b) => a.span.cleanStart - b.span.cleanStart)\n\n return tokens\n}\n","/**\n * Case Citation Extraction\n *\n * Parses tokenized case citations to extract volume, reporter, page, and\n * optional metadata (pincite, court, year). This is the third stage of\n * the parsing pipeline:\n * 1. Clean text (remove HTML, normalize Unicode)\n * 2. Tokenize (apply patterns to find candidates)\n * 3. Extract (parse metadata, validate) ← THIS MODULE\n *\n * Extraction parses structured data from token text. Validation against\n * reporters-db happens in Phase 3 (resolution layer).\n *\n * @module extract/extractCase\n */\n\nimport type { Token } from '@/tokenize'\nimport type { FullCaseCitation } from '@/types/citation'\nimport type { TransformationMap } from '@/types/span'\n\n/**\n * Extracts case citation metadata from a tokenized citation.\n *\n * Parses token text to extract:\n * - Volume: Leading digits (e.g., \"500\" from \"500 F.2d 123\")\n * - Reporter: Alphabetic abbreviation (e.g., \"F.2d\")\n * - Page: Trailing digits after reporter (e.g., \"123\")\n * - Pincite: Optional page reference after comma (e.g., \", 125\")\n * - Court: Optional court abbreviation in parentheses (e.g., \"(9th Cir.)\")\n * - Year: Optional year in parentheses (e.g., \"(2020)\")\n *\n * Confidence scoring:\n * - Base: 0.5\n * - Common reporter pattern (F., U.S., etc.): +0.3\n * - Valid year (not future): +0.2\n * - Capped at 1.0\n *\n * Position translation:\n * - Uses TransformationMap to convert clean positions → original positions\n * - cleanStart/cleanEnd from token span\n * - originalStart/originalEnd via transformationMap.cleanToOriginal\n *\n * Note: This function does NOT validate against reporters-db. That happens\n * in Phase 3 (resolution layer). Phase 2 extraction only parses structure.\n *\n * @param token - Token from tokenizer containing matched text and clean positions\n * @param transformationMap - Position mapping from clean → original text\n * @returns FullCaseCitation with parsed metadata and translated positions\n *\n * @example\n * ```typescript\n * const token = {\n * text: \"500 F.2d 123, 125\",\n * span: { cleanStart: 10, cleanEnd: 27 },\n * type: \"case\",\n * patternId: \"federal-reporter\"\n * }\n * const citation = extractCase(token, transformationMap)\n * // citation = {\n * // type: \"case\",\n * // text: \"500 F.2d 123, 125\",\n * // volume: 500,\n * // reporter: \"F.2d\",\n * // page: 123,\n * // pincite: 125,\n * // span: { cleanStart: 10, cleanEnd: 27, originalStart: 10, originalEnd: 27 },\n * // confidence: 0.8,\n * // ...\n * // }\n * ```\n */\nexport function extractCase(\n\ttoken: Token,\n\ttransformationMap: TransformationMap,\n): FullCaseCitation {\n\tconst { text, span } = token\n\n\t// Parse volume-reporter-page using regex\n\t// Pattern: volume (digits) + reporter (letters/periods/spaces/numbers) + page (digits)\n\t// Use greedy matching for reporter to capture full abbreviation including spaces\n\tconst volumeReporterPageRegex = /^(\\d+)\\s+([A-Za-z0-9.\\s]+)\\s+(\\d+)/\n\tconst match = volumeReporterPageRegex.exec(text)\n\n\tif (!match) {\n\t\t// Fallback if pattern doesn't match (shouldn't happen if tokenizer is correct)\n\t\tthrow new Error(`Failed to parse case citation: ${text}`)\n\t}\n\n\tconst volume = Number.parseInt(match[1], 10)\n\tconst reporter = match[2].trim()\n\tconst page = Number.parseInt(match[3], 10)\n\n\t// Extract optional pincite (page reference after comma)\n\t// Pattern: \", digits\" (e.g., \", 125\")\n\tconst pinciteRegex = /,\\s*(\\d+)/\n\tconst pinciteMatch = pinciteRegex.exec(text)\n\tconst pincite = pinciteMatch ? Number.parseInt(pinciteMatch[1], 10) : undefined\n\n\t// Extract optional year in parentheses (extract first for better matching)\n\t// Pattern: 4-digit year anywhere in parentheses\n\tconst yearRegex = /\\((?:[^)]*\\s)?(\\d{4})\\)/\n\tconst yearMatch = yearRegex.exec(text)\n\tconst year = yearMatch ? Number.parseInt(yearMatch[1], 10) : undefined\n\n\t// Extract optional court abbreviation in parentheses\n\t// Pattern: \"(text)\" where text contains letters (captures full parenthetical)\n\tconst courtRegex = /\\(([^)]*[A-Za-z][^)]*)\\)/\n\tconst courtMatch = courtRegex.exec(text)\n\tconst court = courtMatch ? courtMatch[1].trim() : undefined\n\n\t// Translate positions from clean → original\n\tconst originalStart =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanStart) ?? span.cleanStart\n\tconst originalEnd =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanEnd) ?? span.cleanEnd\n\n\t// Calculate confidence score\n\tlet confidence = 0.5 // Base confidence\n\n\t// Common reporter patterns (F., U.S., S. Ct., etc.)\n\tconst commonReporters = [\n\t\t'F.',\n\t\t'F.2d',\n\t\t'F.3d',\n\t\t'F.4th',\n\t\t'U.S.',\n\t\t'S. Ct.',\n\t\t'L. Ed.',\n\t\t'P.',\n\t\t'P.2d',\n\t\t'P.3d',\n\t\t'A.',\n\t\t'A.2d',\n\t\t'A.3d',\n\t\t'N.E.',\n\t\t'N.E.2d',\n\t\t'N.E.3d',\n\t\t'N.W.',\n\t\t'N.W.2d',\n\t\t'S.E.',\n\t\t'S.E.2d',\n\t\t'S.W.',\n\t\t'S.W.2d',\n\t\t'S.W.3d',\n\t\t'So.',\n\t\t'So. 2d',\n\t\t'So. 3d',\n\t]\n\n\tif (commonReporters.some((r) => reporter.includes(r))) {\n\t\tconfidence += 0.3\n\t}\n\n\t// Valid year check (not in future)\n\tif (year !== undefined) {\n\t\tconst currentYear = new Date().getFullYear()\n\t\tif (year <= currentYear) {\n\t\t\tconfidence += 0.2\n\t\t}\n\t}\n\n\t// Cap at 1.0\n\tconfidence = Math.min(confidence, 1.0)\n\n\treturn {\n\t\ttype: 'case',\n\t\ttext,\n\t\tspan: {\n\t\t\tcleanStart: span.cleanStart,\n\t\t\tcleanEnd: span.cleanEnd,\n\t\t\toriginalStart,\n\t\t\toriginalEnd,\n\t\t},\n\t\tconfidence,\n\t\tmatchedText: text,\n\t\tprocessTimeMs: 0, // Placeholder - timing handled by orchestration layer\n\t\tpatternsChecked: 1, // Single token processed\n\t\tvolume,\n\t\treporter,\n\t\tpage,\n\t\tpincite,\n\t\tcourt,\n\t\tyear,\n\t}\n}\n","/**\n * Statute Citation Extraction\n *\n * Parses tokenized statute citations to extract title, code, section, and\n * optional subsections. Examples: \"42 U.S.C. § 1983\", \"Cal. Civ. Code § 1234(a)(1)\"\n *\n * @module extract/extractStatute\n */\n\nimport type { Token } from '@/tokenize'\nimport type { StatuteCitation } from '@/types/citation'\nimport type { TransformationMap } from '@/types/span'\n\n/**\n * Extracts statute citation metadata from a tokenized citation.\n *\n * Parses token text to extract:\n * - Title: Optional leading digits (e.g., \"42\" from \"42 U.S.C. § 1983\")\n * - Code: Statutory code abbreviation (e.g., \"U.S.C.\", \"Cal. Civ. Code\")\n * - Section: Section number after § symbol (e.g., \"1983\")\n * - Subsections: Optional parenthetical subdivisions (e.g., \"(a)(1)\")\n *\n * Confidence scoring:\n * - Base: 0.5\n * - Known code pattern (U.S.C., C.F.R., state codes): +0.3\n * - Capped at 1.0\n *\n * @param token - Token from tokenizer containing matched text and clean positions\n * @param transformationMap - Position mapping from clean → original text\n * @returns StatuteCitation with parsed metadata and translated positions\n *\n * @example\n * ```typescript\n * const token = {\n * text: \"42 U.S.C. § 1983\",\n * span: { cleanStart: 10, cleanEnd: 26 },\n * type: \"statute\",\n * patternId: \"usc\"\n * }\n * const citation = extractStatute(token, transformationMap)\n * // citation = {\n * // type: \"statute\",\n * // title: 42,\n * // code: \"U.S.C.\",\n * // section: \"1983\",\n * // ...\n * // }\n * ```\n */\nexport function extractStatute(\n\ttoken: Token,\n\ttransformationMap: TransformationMap,\n): StatuteCitation {\n\tconst { text, span } = token\n\n\t// Parse title-code-section using regex\n\t// Pattern: optional title (digits) + code (letters/periods/spaces) + § + section\n\tconst statuteRegex = /^(?:(\\d+)\\s+)?([A-Za-z.\\s]+?)\\s*§\\s*(\\d+[A-Za-z0-9\\-]*)/\n\tconst match = statuteRegex.exec(text)\n\n\tif (!match) {\n\t\tthrow new Error(`Failed to parse statute citation: ${text}`)\n\t}\n\n\tconst title = match[1] ? Number.parseInt(match[1], 10) : undefined\n\tconst code = match[2].trim()\n\tconst section = match[3]\n\n\t// Translate positions from clean → original\n\tconst originalStart =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanStart) ?? span.cleanStart\n\tconst originalEnd =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanEnd) ?? span.cleanEnd\n\n\t// Calculate confidence score\n\tlet confidence = 0.5 // Base confidence\n\n\t// Known statutory code patterns\n\tconst knownCodes = [\n\t\t'U.S.C.',\n\t\t'C.F.R.',\n\t\t'Cal. Civ. Code',\n\t\t'Cal. Penal Code',\n\t\t'N.Y. Civ. Prac. L. & R.',\n\t\t'Tex. Civ. Prac. & Rem. Code',\n\t]\n\n\tif (knownCodes.some((c) => code.includes(c))) {\n\t\tconfidence += 0.3\n\t}\n\n\tconfidence = Math.min(confidence, 1.0)\n\n\treturn {\n\t\ttype: 'statute',\n\t\ttext,\n\t\tspan: {\n\t\t\tcleanStart: span.cleanStart,\n\t\t\tcleanEnd: span.cleanEnd,\n\t\t\toriginalStart,\n\t\t\toriginalEnd,\n\t\t},\n\t\tconfidence,\n\t\tmatchedText: text,\n\t\tprocessTimeMs: 0,\n\t\tpatternsChecked: 1,\n\t\ttitle,\n\t\tcode,\n\t\tsection,\n\t}\n}\n","/**\n * Journal Citation Extraction\n *\n * Parses tokenized journal citations to extract volume, journal name, page,\n * and optional metadata. Examples: \"123 Harv. L. Rev. 456\", \"75 Yale L.J. 789, 791\"\n *\n * @module extract/extractJournal\n */\n\nimport type { Token } from '@/tokenize'\nimport type { JournalCitation } from '@/types/citation'\nimport type { TransformationMap } from '@/types/span'\n\n/**\n * Extracts journal citation metadata from a tokenized citation.\n *\n * Parses token text to extract:\n * - Volume: Leading digits (e.g., \"123\" from \"123 Harv. L. Rev. 456\")\n * - Journal: Journal abbreviation (e.g., \"Harv. L. Rev.\")\n * - Page: Starting page number (e.g., \"456\")\n * - Pincite: Optional specific page reference after comma (e.g., \", 458\")\n *\n * Confidence scoring:\n * - Base: 0.6 (journal validation happens in Phase 3)\n *\n * Note: Author and title extraction from preceding text is not implemented\n * in Phase 2. That requires context analysis in Phase 3.\n *\n * @param token - Token from tokenizer containing matched text and clean positions\n * @param transformationMap - Position mapping from clean → original text\n * @returns JournalCitation with parsed metadata and translated positions\n *\n * @example\n * ```typescript\n * const token = {\n * text: \"123 Harv. L. Rev. 456\",\n * span: { cleanStart: 10, cleanEnd: 31 },\n * type: \"journal\",\n * patternId: \"journal-standard\"\n * }\n * const citation = extractJournal(token, transformationMap)\n * // citation = {\n * // type: \"journal\",\n * // volume: 123,\n * // journal: \"Harv. L. Rev.\",\n * // abbreviation: \"Harv. L. Rev.\",\n * // page: 456,\n * // ...\n * // }\n * ```\n */\nexport function extractJournal(\n\ttoken: Token,\n\ttransformationMap: TransformationMap,\n): JournalCitation {\n\tconst { text, span } = token\n\n\t// Parse volume-journal-page using regex\n\t// Pattern: volume (digits) + journal (letters/periods/spaces) + page (digits)\n\tconst journalRegex = /^(\\d+)\\s+([A-Za-z.\\s]+?)\\s+(\\d+)/\n\tconst match = journalRegex.exec(text)\n\n\tif (!match) {\n\t\tthrow new Error(`Failed to parse journal citation: ${text}`)\n\t}\n\n\tconst volume = Number.parseInt(match[1], 10)\n\tconst journal = match[2].trim()\n\tconst page = Number.parseInt(match[3], 10)\n\n\t// Extract optional pincite (page reference after comma)\n\tconst pinciteRegex = /,\\s*(\\d+)/\n\tconst pinciteMatch = pinciteRegex.exec(text)\n\tconst pincite = pinciteMatch ? Number.parseInt(pinciteMatch[1], 10) : undefined\n\n\t// Translate positions from clean → original\n\tconst originalStart =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanStart) ?? span.cleanStart\n\tconst originalEnd =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanEnd) ?? span.cleanEnd\n\n\t// Confidence: 0.6 base (journal validation against database happens in Phase 3)\n\tconst confidence = 0.6\n\n\treturn {\n\t\ttype: 'journal',\n\t\ttext,\n\t\tspan: {\n\t\t\tcleanStart: span.cleanStart,\n\t\t\tcleanEnd: span.cleanEnd,\n\t\t\toriginalStart,\n\t\t\toriginalEnd,\n\t\t},\n\t\tconfidence,\n\t\tmatchedText: text,\n\t\tprocessTimeMs: 0,\n\t\tpatternsChecked: 1,\n\t\tvolume,\n\t\tjournal,\n\t\tabbreviation: journal, // For Phase 2, abbreviation = journal name\n\t\tpage,\n\t\tpincite,\n\t}\n}\n","/**\n * Neutral Citation Extraction\n *\n * Parses tokenized neutral (vendor-neutral) citations to extract year, court,\n * and document number. Examples: \"2020 WL 123456\", \"2020 U.S. LEXIS 456\"\n *\n * @module extract/extractNeutral\n */\n\nimport type { Token } from '@/tokenize'\nimport type { NeutralCitation } from '@/types/citation'\nimport type { TransformationMap } from '@/types/span'\n\n/**\n * Extracts neutral citation metadata from a tokenized citation.\n *\n * Parses token text to extract:\n * - Year: 4-digit year (e.g., \"2020\")\n * - Court: Vendor identifier (e.g., \"WL\", \"U.S. LEXIS\")\n * - Document number: Unique document identifier (e.g., \"123456\")\n *\n * Confidence scoring:\n * - 1.0 (neutral format is unambiguous and standardized)\n *\n * @param token - Token from tokenizer containing matched text and clean positions\n * @param transformationMap - Position mapping from clean → original text\n * @returns NeutralCitation with parsed metadata and translated positions\n *\n * @example\n * ```typescript\n * const token = {\n * text: \"2020 WL 123456\",\n * span: { cleanStart: 10, cleanEnd: 24 },\n * type: \"neutral\",\n * patternId: \"westlaw-neutral\"\n * }\n * const citation = extractNeutral(token, transformationMap)\n * // citation = {\n * // type: \"neutral\",\n * // year: 2020,\n * // court: \"WL\",\n * // documentNumber: \"123456\",\n * // confidence: 1.0,\n * // ...\n * // }\n * ```\n */\nexport function extractNeutral(\n\ttoken: Token,\n\ttransformationMap: TransformationMap,\n): NeutralCitation {\n\tconst { text, span } = token\n\n\t// Parse year-court-documentNumber using regex\n\t// Pattern: 4-digit year + court identifier (WL, LEXIS, etc.) + document number\n\tconst neutralRegex = /^(\\d{4})\\s+(WL|LEXIS|U\\.S\\.\\s+LEXIS)\\s+(\\d+)/\n\tconst match = neutralRegex.exec(text)\n\n\tif (!match) {\n\t\tthrow new Error(`Failed to parse neutral citation: ${text}`)\n\t}\n\n\tconst year = Number.parseInt(match[1], 10)\n\tconst court = match[2]\n\tconst documentNumber = match[3]\n\n\t// Translate positions from clean → original\n\tconst originalStart =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanStart) ?? span.cleanStart\n\tconst originalEnd =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanEnd) ?? span.cleanEnd\n\n\t// Confidence: 1.0 (neutral format is unambiguous)\n\tconst confidence = 1.0\n\n\treturn {\n\t\ttype: 'neutral',\n\t\ttext,\n\t\tspan: {\n\t\t\tcleanStart: span.cleanStart,\n\t\t\tcleanEnd: span.cleanEnd,\n\t\t\toriginalStart,\n\t\t\toriginalEnd,\n\t\t},\n\t\tconfidence,\n\t\tmatchedText: text,\n\t\tprocessTimeMs: 0,\n\t\tpatternsChecked: 1,\n\t\tyear,\n\t\tcourt,\n\t\tdocumentNumber,\n\t}\n}\n","/**\n * Public Law Citation Extraction\n *\n * Parses tokenized public law citations to extract congress number and law number.\n * Examples: \"Pub. L. No. 116-283\", \"Pub. L. 117-58\"\n *\n * @module extract/extractPublicLaw\n */\n\nimport type { Token } from '@/tokenize'\nimport type { PublicLawCitation } from '@/types/citation'\nimport type { TransformationMap } from '@/types/span'\n\n/**\n * Extracts public law citation metadata from a tokenized citation.\n *\n * Parses token text to extract:\n * - Congress: Congress number (e.g., \"116\" from \"Pub. L. No. 116-283\")\n * - Law number: Law number within that Congress (e.g., \"283\")\n *\n * Confidence scoring:\n * - 0.9 (public law format is fairly standard)\n *\n * Note: Bill title extraction from nearby text is not implemented in Phase 2.\n * That requires context analysis in Phase 3.\n *\n * @param token - Token from tokenizer containing matched text and clean positions\n * @param transformationMap - Position mapping from clean → original text\n * @returns PublicLawCitation with parsed metadata and translated positions\n *\n * @example\n * ```typescript\n * const token = {\n * text: \"Pub. L. No. 116-283\",\n * span: { cleanStart: 10, cleanEnd: 29 },\n * type: \"publicLaw\",\n * patternId: \"public-law\"\n * }\n * const citation = extractPublicLaw(token, transformationMap)\n * // citation = {\n * // type: \"publicLaw\",\n * // congress: 116,\n * // lawNumber: 283,\n * // confidence: 0.9,\n * // ...\n * // }\n * ```\n */\nexport function extractPublicLaw(\n\ttoken: Token,\n\ttransformationMap: TransformationMap,\n): PublicLawCitation {\n\tconst { text, span } = token\n\n\t// Parse congress-lawNumber using regex\n\t// Pattern: \"Pub. L.\" (with optional \"No.\") + congress number + \"-\" + law number\n\tconst publicLawRegex = /Pub\\.\\s?L\\.(?:\\s?No\\.)?\\s?(\\d+)-(\\d+)/\n\tconst match = publicLawRegex.exec(text)\n\n\tif (!match) {\n\t\tthrow new Error(`Failed to parse public law citation: ${text}`)\n\t}\n\n\tconst congress = Number.parseInt(match[1], 10)\n\tconst lawNumber = Number.parseInt(match[2], 10)\n\n\t// Translate positions from clean → original\n\tconst originalStart =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanStart) ?? span.cleanStart\n\tconst originalEnd =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanEnd) ?? span.cleanEnd\n\n\t// Confidence: 0.9 (public law format is fairly standard)\n\tconst confidence = 0.9\n\n\treturn {\n\t\ttype: 'publicLaw',\n\t\ttext,\n\t\tspan: {\n\t\t\tcleanStart: span.cleanStart,\n\t\t\tcleanEnd: span.cleanEnd,\n\t\t\toriginalStart,\n\t\t\toriginalEnd,\n\t\t},\n\t\tconfidence,\n\t\tmatchedText: text,\n\t\tprocessTimeMs: 0,\n\t\tpatternsChecked: 1,\n\t\tcongress,\n\t\tlawNumber,\n\t}\n}\n","/**\n * Federal Register Citation Extraction\n *\n * Parses tokenized Federal Register citations to extract volume, page, and\n * optional year. Examples: \"85 Fed. Reg. 12345\", \"86 Fed. Reg. 56789 (Jan. 15, 2021)\"\n *\n * @module extract/extractFederalRegister\n */\n\nimport type { Token } from '@/tokenize'\nimport type { FederalRegisterCitation } from '@/types/citation'\nimport type { TransformationMap } from '@/types/span'\n\n/**\n * Extracts Federal Register citation metadata from a tokenized citation.\n *\n * Parses token text to extract:\n * - Volume: Federal Register volume number (e.g., \"85\")\n * - Page: Page number (e.g., \"12345\")\n * - Year: Optional publication year in parentheses (e.g., \"(2021)\")\n *\n * Confidence scoring:\n * - 0.9 (Federal Register format is standardized)\n *\n * @param token - Token from tokenizer containing matched text and clean positions\n * @param transformationMap - Position mapping from clean → original text\n * @returns FederalRegisterCitation with parsed metadata and translated positions\n *\n * @example\n * ```typescript\n * const token = {\n * text: \"85 Fed. Reg. 12345\",\n * span: { cleanStart: 10, cleanEnd: 28 },\n * type: \"federalRegister\",\n * patternId: \"federal-register\"\n * }\n * const citation = extractFederalRegister(token, transformationMap)\n * // citation = {\n * // type: \"federalRegister\",\n * // volume: 85,\n * // page: 12345,\n * // confidence: 0.9,\n * // ...\n * // }\n * ```\n */\nexport function extractFederalRegister(\n\ttoken: Token,\n\ttransformationMap: TransformationMap,\n): FederalRegisterCitation {\n\tconst { text, span } = token\n\n\t// Parse volume-page using regex\n\t// Pattern: volume (digits) + \"Fed. Reg.\" + page (digits)\n\tconst federalRegisterRegex = /^(\\d+)\\s+Fed\\.\\s?Reg\\.\\s+(\\d+)/\n\tconst match = federalRegisterRegex.exec(text)\n\n\tif (!match) {\n\t\tthrow new Error(`Failed to parse Federal Register citation: ${text}`)\n\t}\n\n\tconst volume = Number.parseInt(match[1], 10)\n\tconst page = Number.parseInt(match[2], 10)\n\n\t// Extract optional year in parentheses\n\t// Pattern: \"(year)\" or \"(month day, year)\"\n\tconst yearRegex = /\\((?:.*?\\s)?(\\d{4})\\)/\n\tconst yearMatch = yearRegex.exec(text)\n\tconst year = yearMatch ? Number.parseInt(yearMatch[1], 10) : undefined\n\n\t// Translate positions from clean → original\n\tconst originalStart =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanStart) ?? span.cleanStart\n\tconst originalEnd =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanEnd) ?? span.cleanEnd\n\n\t// Confidence: 0.9 (Federal Register format is standardized)\n\tconst confidence = 0.9\n\n\treturn {\n\t\ttype: 'federalRegister',\n\t\ttext,\n\t\tspan: {\n\t\t\tcleanStart: span.cleanStart,\n\t\t\tcleanEnd: span.cleanEnd,\n\t\t\toriginalStart,\n\t\t\toriginalEnd,\n\t\t},\n\t\tconfidence,\n\t\tmatchedText: text,\n\t\tprocessTimeMs: 0,\n\t\tpatternsChecked: 1,\n\t\tvolume,\n\t\tpage,\n\t\tyear,\n\t}\n}\n","/**\n * Short-form Citation Extraction\n *\n * Parses tokenized short-form citations (Id., supra, short-form case) to extract\n * metadata. Short-form citations refer to earlier citations in the document.\n *\n * @module extract/extractShortForms\n */\n\nimport type { Token } from '@/tokenize'\nimport type { IdCitation, SupraCitation, ShortFormCaseCitation } from '@/types/citation'\nimport type { TransformationMap } from '@/types/span'\n\n/**\n * Extracts Id. citation metadata from a tokenized citation.\n *\n * Parses token text to extract:\n * - Pincite: Optional page reference (e.g., \"253\" from \"Id. at 253\")\n *\n * Confidence scoring:\n * - 1.0 (Id. format is unambiguous and standardized)\n *\n * @param token - Token from tokenizer containing matched text and clean positions\n * @param transformationMap - Position mapping from clean → original text\n * @returns IdCitation with parsed metadata and translated positions\n *\n * @example\n * ```typescript\n * const token = {\n * text: \"Id. at 253\",\n * span: { cleanStart: 10, cleanEnd: 20 },\n * type: \"case\",\n * patternId: \"id\"\n * }\n * const citation = extractId(token, transformationMap)\n * // citation = {\n * // type: \"id\",\n * // pincite: 253,\n * // confidence: 1.0,\n * // ...\n * // }\n * ```\n */\nexport function extractId(\n\ttoken: Token,\n\ttransformationMap: TransformationMap,\n): IdCitation {\n\tconst { text, span } = token\n\n\t// Parse Id. with optional pincite\n\t// Pattern: Id. or Ibid. with optional \"at [page]\"\n\tconst idRegex = /[Ii](?:d|bid)\\.(?:\\s+at\\s+(\\d+))?/\n\tconst match = idRegex.exec(text)\n\n\tif (!match) {\n\t\tthrow new Error(`Failed to parse Id. citation: ${text}`)\n\t}\n\n\t// Extract pincite if present\n\tconst pincite = match[1] ? Number.parseInt(match[1], 10) : undefined\n\n\t// Translate positions from clean → original\n\tconst originalStart =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanStart) ?? span.cleanStart\n\tconst originalEnd =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanEnd) ?? span.cleanEnd\n\n\t// Confidence: 1.0 (Id. format is unambiguous)\n\tconst confidence = 1.0\n\n\treturn {\n\t\ttype: 'id',\n\t\ttext,\n\t\tspan: {\n\t\t\tcleanStart: span.cleanStart,\n\t\t\tcleanEnd: span.cleanEnd,\n\t\t\toriginalStart,\n\t\t\toriginalEnd,\n\t\t},\n\t\tconfidence,\n\t\tmatchedText: text,\n\t\tprocessTimeMs: 0,\n\t\tpatternsChecked: 1,\n\t\tpincite,\n\t}\n}\n\n/**\n * Extracts supra citation metadata from a tokenized citation.\n *\n * Parses token text to extract:\n * - Party name: Name preceding \"supra\" (e.g., \"Smith\" from \"Smith, supra\")\n * - Pincite: Optional page reference (e.g., \"460\" from \"Smith, supra, at 460\")\n *\n * Confidence scoring:\n * - 0.9 (supra format is fairly standard but party name extraction can vary)\n *\n * @param token - Token from tokenizer containing matched text and clean positions\n * @param transformationMap - Position mapping from clean → original text\n * @returns SupraCitation with parsed metadata and translated positions\n *\n * @example\n * ```typescript\n * const token = {\n * text: \"Smith, supra, at 460\",\n * span: { cleanStart: 10, cleanEnd: 30 },\n * type: \"case\",\n * patternId: \"supra\"\n * }\n * const citation = extractSupra(token, transformationMap)\n * // citation = {\n * // type: \"supra\",\n * // partyName: \"Smith\",\n * // pincite: 460,\n * // confidence: 0.9,\n * // ...\n * // }\n * ```\n */\nexport function extractSupra(\n\ttoken: Token,\n\ttransformationMap: TransformationMap,\n): SupraCitation {\n\tconst { text, span } = token\n\n\t// Parse party name and optional pincite\n\t// Pattern: word(s), supra [, at page]\n\t// Note: Matches party names including \"v.\" (e.g., \"Smith v. Jones\")\n\tconst supraRegex = /\\b([A-Z][a-zA-Z]+(?:(?:\\s+v\\.?\\s+|\\s+)[A-Z][a-zA-Z]+)*),?\\s+supra(?:,?\\s+at\\s+(\\d+))?/\n\tconst match = supraRegex.exec(text)\n\n\tif (!match) {\n\t\tthrow new Error(`Failed to parse supra citation: ${text}`)\n\t}\n\n\tconst partyName = match[1]\n\tconst pincite = match[2] ? Number.parseInt(match[2], 10) : undefined\n\n\t// Translate positions from clean → original\n\tconst originalStart =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanStart) ?? span.cleanStart\n\tconst originalEnd =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanEnd) ?? span.cleanEnd\n\n\t// Confidence: 0.9 (supra format is fairly standard)\n\tconst confidence = 0.9\n\n\treturn {\n\t\ttype: 'supra',\n\t\ttext,\n\t\tspan: {\n\t\t\tcleanStart: span.cleanStart,\n\t\t\tcleanEnd: span.cleanEnd,\n\t\t\toriginalStart,\n\t\t\toriginalEnd,\n\t\t},\n\t\tconfidence,\n\t\tmatchedText: text,\n\t\tprocessTimeMs: 0,\n\t\tpatternsChecked: 1,\n\t\tpartyName,\n\t\tpincite,\n\t}\n}\n\n/**\n * Extracts short-form case citation metadata from a tokenized citation.\n *\n * Parses token text to extract:\n * - Volume: Volume number\n * - Reporter: Reporter abbreviation\n * - Pincite: Page reference (from \"at [page]\" pattern)\n *\n * Confidence scoring:\n * - 0.7 (short-form case citations are more ambiguous than full citations)\n *\n * @param token - Token from tokenizer containing matched text and clean positions\n * @param transformationMap - Position mapping from clean → original text\n * @returns ShortFormCaseCitation with parsed metadata and translated positions\n *\n * @example\n * ```typescript\n * const token = {\n * text: \"500 F.2d at 125\",\n * span: { cleanStart: 10, cleanEnd: 25 },\n * type: \"case\",\n * patternId: \"short-form-case\"\n * }\n * const citation = extractShortFormCase(token, transformationMap)\n * // citation = {\n * // type: \"shortFormCase\",\n * // volume: 500,\n * // reporter: \"F.2d\",\n * // pincite: 125,\n * // confidence: 0.7,\n * // ...\n * // }\n * ```\n */\nexport function extractShortFormCase(\n\ttoken: Token,\n\ttransformationMap: TransformationMap,\n): ShortFormCaseCitation {\n\tconst { text, span } = token\n\n\t// Parse volume-reporter-at-page\n\t// Pattern: number space abbreviation space \"at\" space number\n\tconst shortFormRegex = /(\\d+)\\s+([A-Z][A-Za-z.\\s]+?(?:\\d[a-z])?)\\s+at\\s+(\\d+)/\n\tconst match = shortFormRegex.exec(text)\n\n\tif (!match) {\n\t\tthrow new Error(`Failed to parse short-form case citation: ${text}`)\n\t}\n\n\tconst volume = Number.parseInt(match[1], 10)\n\tconst reporter = match[2].trim() // Remove trailing spaces\n\tconst pincite = Number.parseInt(match[3], 10)\n\n\t// Translate positions from clean → original\n\tconst originalStart =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanStart) ?? span.cleanStart\n\tconst originalEnd =\n\t\ttransformationMap.cleanToOriginal.get(span.cleanEnd) ?? span.cleanEnd\n\n\t// Confidence: 0.7 (short-form citations are more ambiguous)\n\tconst confidence = 0.7\n\n\treturn {\n\t\ttype: 'shortFormCase',\n\t\ttext,\n\t\tspan: {\n\t\t\tcleanStart: span.cleanStart,\n\t\t\tcleanEnd: span.cleanEnd,\n\t\t\toriginalStart,\n\t\t\toriginalEnd,\n\t\t},\n\t\tconfidence,\n\t\tmatchedText: text,\n\t\tprocessTimeMs: 0,\n\t\tpatternsChecked: 1,\n\t\tvolume,\n\t\treporter,\n\t\tpincite,\n\t}\n}\n","/**\n * Scope Boundary Detection\n *\n * Detects paragraph/section boundaries in text and validates whether\n * an antecedent citation is within the resolution scope.\n */\n\nimport type { Citation } from '../types/citation'\nimport type { ScopeStrategy } from './types'\n\n/**\n * Detects paragraph boundaries from text and assigns each citation to a paragraph.\n *\n * @param text - Original document text\n * @param citations - Extracted citations with position spans\n * @param boundaryPattern - Regex pattern to detect boundaries (default: /\\n\\n+/)\n * @returns Map of citation index to paragraph number (0-based)\n */\nexport function detectParagraphBoundaries(\n text: string,\n citations: Citation[],\n boundaryPattern: RegExp = /\\n\\n+/g\n): Map<number, number> {\n const paragraphMap = new Map<number, number>()\n\n // Find all paragraph boundaries (positions in text)\n const boundaries: number[] = [0] // Start of document is first boundary\n let match: RegExpExecArray | null\n\n while ((match = boundaryPattern.exec(text)) !== null) {\n // Boundary is at end of match (start of next paragraph)\n boundaries.push(match.index + match[0].length)\n }\n\n boundaries.push(text.length) // End of document\n\n // Assign each citation to a paragraph\n for (let i = 0; i < citations.length; i++) {\n const citation = citations[i]\n const citationStart = citation.span.originalStart\n\n // Find which paragraph this citation belongs to\n let paragraphNum = 0\n for (let j = 0; j < boundaries.length - 1; j++) {\n if (citationStart >= boundaries[j] && citationStart < boundaries[j + 1]) {\n paragraphNum = j\n break\n }\n }\n\n paragraphMap.set(i, paragraphNum)\n }\n\n return paragraphMap\n}\n\n/**\n * Checks if an antecedent citation is within resolution scope.\n *\n * @param antecedentIndex - Index of the antecedent citation\n * @param currentIndex - Index of current citation being resolved\n * @param paragraphMap - Map of citation index to paragraph number\n * @param strategy - Scope boundary strategy\n * @returns true if antecedent is within scope, false otherwise\n */\nexport function isWithinBoundary(\n antecedentIndex: number,\n currentIndex: number,\n paragraphMap: Map<number, number>,\n strategy: ScopeStrategy\n): boolean {\n if (strategy === 'none') {\n // No boundary restriction - can resolve across entire document\n return true\n }\n\n // Get paragraph numbers for both citations\n const antecedentParagraph = paragraphMap.get(antecedentIndex)\n const currentParagraph = paragraphMap.get(currentIndex)\n\n // If either is undefined, default to allowing resolution\n if (antecedentParagraph === undefined || currentParagraph === undefined) {\n return true\n }\n\n // For paragraph/section/footnote strategies, citations must be in same boundary\n // (In this MVP, section and footnote behave same as paragraph - future enhancement)\n return antecedentParagraph === currentParagraph\n}\n","/**\n * Levenshtein Distance\n *\n * Calculates edit distance between strings for fuzzy party name matching\n * in supra citation resolution.\n *\n * Uses dynamic programming for O(m*n) time complexity.\n */\n\n/**\n * Calculates Levenshtein distance (edit distance) between two strings.\n *\n * The edit distance is the minimum number of single-character edits\n * (insertions, deletions, substitutions) needed to change one string into the other.\n *\n * @param a - First string\n * @param b - Second string\n * @returns Number of edits required (0 = identical)\n */\nexport function levenshteinDistance(a: string, b: string): number {\n // Handle empty strings\n if (a.length === 0) return b.length\n if (b.length === 0) return a.length\n\n // Create 2D array for dynamic programming\n // dp[i][j] = edit distance between a[0...i-1] and b[0...j-1]\n const dp: number[][] = Array.from({ length: a.length + 1 }, () =>\n Array(b.length + 1).fill(0)\n )\n\n // Initialize base cases\n for (let i = 0; i <= a.length; i++) {\n dp[i][0] = i // Distance from a[0...i-1] to empty string\n }\n for (let j = 0; j <= b.length; j++) {\n dp[0][j] = j // Distance from empty string to b[0...j-1]\n }\n\n // Fill the DP table\n for (let i = 1; i <= a.length; i++) {\n for (let j = 1; j <= b.length; j++) {\n if (a[i - 1] === b[j - 1]) {\n // Characters match - no edit needed\n dp[i][j] = dp[i - 1][j - 1]\n } else {\n // Characters differ - take minimum of:\n // 1. Insert: dp[i][j-1] + 1\n // 2. Delete: dp[i-1][j] + 1\n // 3. Substitute: dp[i-1][j-1] + 1\n dp[i][j] = 1 + Math.min(\n dp[i - 1][j], // Delete from a\n dp[i][j - 1], // Insert into a\n dp[i - 1][j - 1] // Substitute\n )\n }\n }\n }\n\n return dp[a.length][b.length]\n}\n\n/**\n * Calculates normalized Levenshtein similarity (0-1 scale).\n *\n * Returns similarity score where:\n * - 1.0 = identical strings\n * - 0.0 = completely different\n *\n * Comparison is case-insensitive.\n *\n * @param a - First string\n * @param b - Second string\n * @returns Similarity score from 0 to 1\n */\nexport function normalizedLevenshteinDistance(a: string, b: string): number {\n // Normalize to lowercase for case-insensitive comparison\n const lowerA = a.toLowerCase()\n const lowerB = b.toLowerCase()\n\n // Calculate raw edit distance\n const distance = levenshteinDistance(lowerA, lowerB)\n\n // Normalize by max length\n const maxLength = Math.max(lowerA.length, lowerB.length)\n if (maxLength === 0) return 1.0 // Both empty strings\n\n // Convert distance to similarity: 1 - (distance / maxLength)\n return 1 - distance / maxLength\n}\n","/**\n * Document-Scoped Citation Resolver\n *\n * Resolves short-form citations (Id./supra/short-form case) to their full antecedent citations\n * by maintaining resolution context and enforcing scope boundaries.\n *\n * Resolution rules:\n * - Id. resolves to immediately preceding full citation (within scope)\n * - Supra resolves to full citation with matching party name (within scope)\n * - Short-form case resolves to full case with matching volume/reporter (within scope)\n */\n\nimport type {\n Citation,\n IdCitation,\n SupraCitation,\n ShortFormCaseCitation,\n FullCaseCitation,\n} from '../types/citation'\nimport type {\n ResolutionOptions,\n ResolutionResult,\n ResolvedCitation,\n ResolutionContext,\n} from './types'\nimport { detectParagraphBoundaries, isWithinBoundary } from './scopeBoundary'\nimport { normalizedLevenshteinDistance } from './levenshtein'\n\n/**\n * Document-scoped resolver that processes citations sequentially\n * and resolves short-form citations to their antecedents.\n */\nexport class DocumentResolver {\n private readonly citations: Citation[]\n private readonly text: string\n private readonly options: Required<ResolutionOptions>\n private readonly context: ResolutionContext\n\n /**\n * Creates a new DocumentResolver.\n *\n * @param citations - All citations in document (in order of appearance)\n * @param text - Original document text\n * @param options - Resolution options\n */\n constructor(\n citations: Citation[],\n text: string,\n options: ResolutionOptions = {}\n ) {\n this.citations = citations\n this.text = text\n\n // Apply defaults to options\n this.options = {\n scopeStrategy: options.scopeStrategy ?? 'paragraph',\n autoDetectParagraphs: options.autoDetectParagraphs ?? true,\n paragraphBoundaryPattern: options.paragraphBoundaryPattern ?? /\\n\\n+/g,\n fuzzyPartyMatching: options.fuzzyPartyMatching ?? true,\n partyMatchThreshold: options.partyMatchThreshold ?? 0.8,\n allowNestedResolution: options.allowNestedResolution ?? false,\n reportUnresolved: options.reportUnresolved ?? true,\n }\n\n // Initialize resolution context\n this.context = {\n citationIndex: 0,\n allCitations: citations,\n lastFullCitation: undefined,\n fullCitationHistory: new Map(),\n paragraphMap: new Map(),\n }\n\n // Detect paragraph boundaries if enabled\n if (this.options.autoDetectParagraphs) {\n this.context.paragraphMap = detectParagraphBoundaries(\n text,\n citations,\n this.options.paragraphBoundaryPattern\n )\n }\n }\n\n /**\n * Resolves all citations in the document.\n *\n * @returns Array of citations with resolution metadata\n */\n resolve(): ResolvedCitation[] {\n const resolved: ResolvedCitation[] = []\n\n for (let i = 0; i < this.citations.length; i++) {\n this.context.citationIndex = i\n const citation = this.citations[i]\n\n // Resolve based on citation type\n let resolution: ResolutionResult | undefined\n\n switch (citation.type) {\n case 'id':\n resolution = this.resolveId(citation as IdCitation)\n break\n case 'supra':\n resolution = this.resolveSupra(citation as SupraCitation)\n break\n case 'shortFormCase':\n resolution = this.resolveShortFormCase(citation as ShortFormCaseCitation)\n break\n default:\n // Full citation - update context for future resolutions\n if (this.isFullCitation(citation)) {\n this.context.lastFullCitation = i\n this.trackFullCitation(citation, i)\n }\n break\n }\n\n // Add citation with optional resolution metadata\n resolved.push({\n ...citation,\n resolution,\n })\n }\n\n return resolved\n }\n\n /**\n * Resolves Id. citation to immediately preceding full case citation.\n */\n private resolveId(citation: IdCitation): ResolutionResult | undefined {\n const currentIndex = this.context.citationIndex\n\n // Find most recent full case citation (Id. only resolves to case citations, not statutes/journals)\n let antecedentIndex: number | undefined\n for (let i = currentIndex - 1; i >= 0; i--) {\n const candidate = this.citations[i]\n if (candidate.type === 'case') {\n antecedentIndex = i\n break\n }\n }\n\n // Check if we have a previous case citation\n if (antecedentIndex === undefined) {\n return this.createFailureResult('No preceding full case citation found')\n }\n\n // Check scope boundary\n if (!this.isWithinScope(antecedentIndex, currentIndex)) {\n return this.createFailureResult('Antecedent citation outside scope boundary')\n }\n\n return {\n resolvedTo: antecedentIndex,\n confidence: 1.0, // Id. resolution is unambiguous when successful\n }\n }\n\n /**\n * Resolves supra citation by matching party name.\n */\n private resolveSupra(citation: SupraCitation): ResolutionResult | undefined {\n const currentIndex = this.context.citationIndex\n const targetPartyName = this.normalizePartyName(citation.partyName)\n\n // Search full citation history for matching party name\n let bestMatch: { index: number; similarity: number } | undefined\n\n for (const [partyName, citationIndex] of this.context.fullCitationHistory) {\n // Check scope boundary\n if (!this.isWithinScope(citationIndex, currentIndex)) {\n continue\n }\n\n // Calculate similarity\n const similarity = normalizedLevenshteinDistance(targetPartyName, partyName)\n\n // Update best match if this is better\n if (!bestMatch || similarity > bestMatch.similarity) {\n bestMatch = { index: citationIndex, similarity }\n }\n }\n\n // Check if we found a match above threshold\n if (!bestMatch) {\n return this.createFailureResult('No full citation found in scope')\n }\n\n if (bestMatch.similarity < this.options.partyMatchThreshold) {\n return this.createFailureResult(\n `Party name similarity ${bestMatch.similarity.toFixed(2)} below threshold ${this.options.partyMatchThreshold}`\n )\n }\n\n // Return successful resolution with confidence based on similarity\n const warnings: string[] = []\n if (bestMatch.similarity < 1.0) {\n warnings.push(`Fuzzy match: similarity ${bestMatch.similarity.toFixed(2)}`)\n }\n\n return {\n resolvedTo: bestMatch.index,\n confidence: bestMatch.similarity,\n warnings: warnings.length > 0 ? warnings : undefined,\n }\n }\n\n /**\n * Resolves short-form case citation by matching volume/reporter.\n */\n private resolveShortFormCase(citation: ShortFormCaseCitation): ResolutionResult | undefined {\n const currentIndex = this.context.citationIndex\n\n // Search backwards for matching full case citation\n for (let i = currentIndex - 1; i >= 0; i--) {\n const candidate = this.citations[i]\n\n // Only match against full case citations\n if (candidate.type !== 'case') {\n continue\n }\n\n const fullCase = candidate as FullCaseCitation\n\n // Check if volume and reporter match\n if (\n fullCase.volume === citation.volume &&\n this.normalizeReporter(fullCase.reporter) === this.normalizeReporter(citation.reporter)\n ) {\n // Check scope boundary\n if (!this.isWithinScope(i, currentIndex)) {\n return this.createFailureResult('Matching citation outside scope boundary')\n }\n\n // Found a match\n return {\n resolvedTo: i,\n confidence: 0.95, // High confidence but not perfect (multiple cases could have same volume/reporter)\n }\n }\n }\n\n return this.createFailureResult('No matching full case citation found')\n }\n\n /**\n * Checks if a citation is a full citation (not short-form).\n */\n private isFullCitation(citation: Citation): boolean {\n return (\n citation.type === 'case' ||\n citation.type === 'statute' ||\n citation.type === 'journal' ||\n citation.type === 'neutral' ||\n citation.type === 'publicLaw' ||\n citation.type === 'federalRegister'\n )\n }\n\n /**\n * Tracks a full citation in the resolution history.\n * Extracts party name for supra resolution.\n */\n private trackFullCitation(citation: Citation, index: number): void {\n // Only case citations have party names for supra resolution\n if (citation.type === 'case') {\n const fullCase = citation as FullCaseCitation\n const partyName = this.extractPartyName(fullCase)\n if (partyName) {\n const normalized = this.normalizePartyName(partyName)\n this.context.fullCitationHistory.set(normalized, index)\n }\n }\n }\n\n /**\n * Extracts party name from full case citation text.\n * Handles \"Party v. Party\" format by looking at text before citation span.\n */\n private extractPartyName(citation: FullCaseCitation): string | undefined {\n // Look at text before citation span to find party names\n // Case citations typically appear as: \"Smith v. Jones, 100 F.2d 10\"\n // But tokenizer only captures \"100 F.2d 10\" - we need to look backwards in text\n\n const citationStart = citation.span.originalStart\n // Look backwards up to 100 characters for party name\n const lookbackStart = Math.max(0, citationStart - 100)\n const beforeText = this.text.substring(lookbackStart, citationStart)\n\n // Match pattern: \"FirstParty v. SecondParty, \" before the citation\n // Capture the first party name (handles single-letter party names like \"A\" or \"B\")\n const vMatch = beforeText.match(/([A-Z][a-zA-Z]*(?:\\s+[A-Z][a-zA-Z]*)*)\\s+v\\.?\\s+[A-Z][a-zA-Z]*(?:\\s+[A-Z][a-zA-Z]*)*,\\s*$/)\n if (vMatch) {\n return vMatch[1].trim()\n }\n\n // Fallback: try to find any capitalized word(s) before comma\n const beforeComma = beforeText.match(/([A-Z][a-zA-Z]*(?:\\s+[A-Z][a-zA-Z]*)*),\\s*$/)\n return beforeComma?.[1].trim()\n }\n\n /**\n * Normalizes party name for matching.\n */\n private normalizePartyName(name: string): string {\n return name\n .toLowerCase()\n .replace(/\\s+/g, ' ') // Normalize whitespace\n .trim()\n }\n\n /**\n * Normalizes reporter abbreviation for matching.\n */\n private normalizeReporter(reporter: string): string {\n return reporter\n .toLowerCase()\n .replace(/\\s+/g, '') // Remove spaces (F.2d vs F. 2d)\n .replace(/\\./g, '') // Remove periods\n }\n\n /**\n * Checks if antecedent citation is within scope boundary.\n */\n private isWithinScope(antecedentIndex: number, currentIndex: number): boolean {\n return isWithinBoundary(\n antecedentIndex,\n currentIndex,\n this.context.paragraphMap,\n this.options.scopeStrategy\n )\n }\n\n /**\n * Creates a failure result for unresolved citations.\n */\n private createFailureResult(reason: string): ResolutionResult | undefined {\n if (this.options.reportUnresolved) {\n return {\n resolvedTo: undefined,\n failureReason: reason,\n confidence: 0.0,\n }\n }\n return undefined\n }\n}\n","/**\n * Citation Resolution\n *\n * Resolves short-form citations (Id./supra/short-form case) to their full antecedents.\n *\n * @example\n * ```ts\n * import { resolveCitations } from 'eyecite-ts/resolve'\n * import { extractCitations } from 'eyecite-ts'\n *\n * const text = 'See Smith v. Jones, 500 F.2d 100 (1974). Id. at 105.'\n * const citations = extractCitations(text)\n * const resolved = resolveCitations(citations, text)\n *\n * // resolved[1] is Id. citation with resolution.resolvedTo = 0\n * console.log(resolved[1].resolution?.resolvedTo) // 0 (points to Smith v. Jones)\n * ```\n */\n\nimport type { Citation } from '../types/citation'\nimport type { ResolutionOptions, ResolvedCitation } from './types'\nimport { DocumentResolver } from './DocumentResolver'\n\n/**\n * Resolves short-form citations to their full antecedents.\n *\n * Convenience wrapper around DocumentResolver that handles common use cases.\n *\n * @param citations - Extracted citations in order of appearance\n * @param text - Original document text\n * @param options - Resolution options\n * @returns Citations with resolution metadata\n */\nexport function resolveCitations(\n citations: Citation[],\n text: string,\n options?: ResolutionOptions\n): ResolvedCitation[] {\n const resolver = new DocumentResolver(citations, text, options)\n return resolver.resolve()\n}\n\n// Re-export core types and classes\nexport { DocumentResolver } from './DocumentResolver'\nexport type {\n ResolutionOptions,\n ResolutionResult,\n ResolvedCitation,\n ScopeStrategy,\n} from './types'\n","/**\n * Main Citation Extraction Pipeline\n *\n * Orchestrates the complete citation extraction flow:\n * 1. Clean text (remove HTML, normalize Unicode)\n * 2. Tokenize (apply patterns to find candidates)\n * 3. Extract (parse metadata from tokens)\n *\n * This is the primary public API for citation extraction.\n *\n * @module extract/extractCitations\n */\n\nimport { cleanText } from '@/clean'\nimport { tokenize } from '@/tokenize'\nimport {\n\textractCase,\n\textractStatute,\n\textractJournal,\n\textractNeutral,\n\textractPublicLaw,\n\textractFederalRegister,\n} from '@/extract'\nimport { extractId, extractSupra, extractShortFormCase } from './extractShortForms'\nimport {\n\tcasePatterns,\n\tstatutePatterns,\n\tjournalPatterns,\n\tneutralPatterns,\n\tshortFormPatterns,\n} from '@/patterns'\nimport { resolveCitations } from '../resolve'\nimport type { Citation } from '@/types/citation'\nimport type { Pattern } from '@/patterns'\nimport type { ResolutionOptions, ResolvedCitation } from '../resolve/types'\n\n/**\n * Options for customizing citation extraction behavior.\n */\nexport interface ExtractOptions {\n\t/**\n\t * Custom text cleaners (overrides defaults).\n\t *\n\t * If provided, these cleaners replace the default pipeline:\n\t * [stripHtmlTags, normalizeWhitespace, normalizeUnicode, fixSmartQuotes]\n\t *\n\t * @example\n\t * ```typescript\n\t * // Use only HTML stripping, skip Unicode normalization\n\t * const citations = extractCitations(text, {\n\t * cleaners: [stripHtmlTags]\n\t * })\n\t * ```\n\t */\n\tcleaners?: Array<(text: string) => string>\n\n\t/**\n\t * Custom regex patterns (overrides defaults).\n\t *\n\t * If provided, these patterns replace the default pattern set:\n\t * [casePatterns, statutePatterns, journalPatterns, neutralPatterns, shortFormPatterns]\n\t *\n\t * @example\n\t * ```typescript\n\t * // Extract only case citations\n\t * const citations = extractCitations(text, {\n\t * patterns: casePatterns\n\t * })\n\t * ```\n\t */\n\tpatterns?: Pattern[]\n\n\t/**\n\t * Resolve short-form citations to their full antecedents (default: false).\n\t *\n\t * If true, returns ResolvedCitation[] with resolution metadata for short-form citations\n\t * (Id., supra, short-form case). Full citations are unchanged.\n\t *\n\t * @example\n\t * ```typescript\n\t * const text = \"Smith v. Jones, 500 F.2d 100 (1974). Id. at 105.\"\n\t * const citations = extractCitations(text, { resolve: true })\n\t * // citations[1].resolution.resolvedTo === 0 (points to Smith v. Jones)\n\t * ```\n\t */\n\tresolve?: boolean\n\n\t/**\n\t * Options for citation resolution (only used if resolve: true).\n\t *\n\t * @example\n\t * ```typescript\n\t * const citations = extractCitations(text, {\n\t * resolve: true,\n\t * resolutionOptions: {\n\t * scopeStrategy: 'paragraph',\n\t * fuzzyPartyMatching: true\n\t * }\n\t * })\n\t * ```\n\t */\n\tresolutionOptions?: ResolutionOptions\n}\n\n/**\n * Extracts legal citations from text using the full parsing pipeline.\n *\n * Pipeline flow:\n * 1. **Clean:** Remove HTML tags, normalize Unicode, fix smart quotes\n * 2. **Tokenize:** Apply regex patterns to find citation candidates\n * 3. **Extract:** Parse metadata (volume, reporter, page, etc.)\n * 4. **Translate:** Map positions from cleaned text back to original text\n *\n * This function is synchronous because all stages (cleaning, tokenization,\n * extraction) are synchronous. For async operations (e.g., future reporters-db\n * lookups), use extractCitationsAsync().\n *\n * Position tracking:\n * - TransformationMap is built during cleaning\n * - Tokens contain positions in cleaned text (cleanStart/cleanEnd)\n * - Extraction translates cleaned positions → original positions\n * - Final citations have originalStart/originalEnd pointing to input text\n *\n * Warnings from cleaning layer are attached to all extracted citations.\n *\n * @param text - Raw text to extract citations from (may contain HTML, Unicode)\n * @param options - Optional customization (cleaners, patterns)\n * @returns Array of citations with parsed metadata and accurate positions\n *\n * @example\n * ```typescript\n * const text = \"See Smith v. Doe, 500 F.2d 123 (9th Cir. 2020)\"\n * const citations = extractCitations(text)\n * // citations[0] = {\n * // type: \"case\",\n * // volume: 500,\n * // reporter: \"F.2d\",\n * // page: 123,\n * // court: \"9th Cir.\",\n * // year: 2020,\n * // span: { originalStart: 18, originalEnd: 30, ... }\n * // }\n * ```\n *\n * @example\n * ```typescript\n * // Extract from HTML\n * const html = \"<p>In <b>Smith</b>, 500 F.2d 123, the court held...</p>\"\n * const citations = extractCitations(html)\n * // HTML is stripped, positions point to original HTML\n * ```\n *\n * @example\n * ```typescript\n * // Extract multiple citation types\n * const text = \"See 42 U.S.C. § 1983; Smith, 500 F.2d 123; 123 Harv. L. Rev. 456\"\n * const citations = extractCitations(text)\n * // citations[0].type === \"statute\"\n * // citations[1].type === \"case\"\n * // citations[2].type === \"journal\"\n * ```\n */\nexport function extractCitations(\n\ttext: string,\n\toptions?: ExtractOptions,\n): Citation[] | ResolvedCitation[] {\n\tconst startTime = performance.now()\n\n\t// Step 1: Clean text\n\tconst { cleaned, transformationMap, warnings } = cleanText(\n\t\ttext,\n\t\toptions?.cleaners,\n\t)\n\n\t// Step 2: Tokenize (synchronous)\n\t// Note: Pattern order matters for deduplication - more specific patterns first\n\tconst allPatterns = options?.patterns || [\n\t\t...neutralPatterns, // Most specific (year-based format)\n\t\t...shortFormPatterns, // Short-form (requires \" at \" keyword)\n\t\t...casePatterns, // Case citations (reporter-specific)\n\t\t...statutePatterns, // Statutes (code-specific)\n\t\t...journalPatterns, // Least specific (broad pattern)\n\t]\n\tconst tokens = tokenize(cleaned, allPatterns)\n\n\t// Step 3: Deduplicate overlapping tokens\n\t// Multiple patterns may match the same text (e.g., \"500 F.2d 123\" matches both federal-reporter and state-reporter)\n\t// Keep only the most specific match for each position\n\tconst deduplicatedTokens: typeof tokens = []\n\tconst seenPositions = new Set<string>()\n\n\tfor (const token of tokens) {\n\t\tconst posKey = `${token.span.cleanStart}-${token.span.cleanEnd}`\n\t\tif (!seenPositions.has(posKey)) {\n\t\t\tseenPositions.add(posKey)\n\t\t\tdeduplicatedTokens.push(token)\n\t\t}\n\t}\n\n\t// Step 4: Extract citations from deduplicated tokens\n\tconst citations: Citation[] = []\n\tfor (const token of deduplicatedTokens) {\n\t\tlet citation: Citation\n\n\t\tswitch (token.type) {\n\t\t\tcase 'case':\n\t\t\t\t// Check pattern ID to distinguish short-form from full citations\n\t\t\t\tif (token.patternId === 'id' || token.patternId === 'ibid') {\n\t\t\t\t\tcitation = extractId(token, transformationMap)\n\t\t\t\t} else if (token.patternId === 'supra') {\n\t\t\t\t\tcitation = extractSupra(token, transformationMap)\n\t\t\t\t} else if (token.patternId === 'shortFormCase') {\n\t\t\t\t\tcitation = extractShortFormCase(token, transformationMap)\n\t\t\t\t} else {\n\t\t\t\t\tcitation = extractCase(token, transformationMap)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\tcase 'statute':\n\t\t\t\tcitation = extractStatute(token, transformationMap)\n\t\t\t\tbreak\n\t\t\tcase 'journal':\n\t\t\t\tcitation = extractJournal(token, transformationMap)\n\t\t\t\tbreak\n\t\t\tcase 'neutral':\n\t\t\t\tcitation = extractNeutral(token, transformationMap)\n\t\t\t\tbreak\n\t\t\tcase 'publicLaw':\n\t\t\t\tcitation = extractPublicLaw(token, transformationMap)\n\t\t\t\tbreak\n\t\t\tcase 'federalRegister':\n\t\t\t\tcitation = extractFederalRegister(token, transformationMap)\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t\t// Unknown type - skip\n\t\t\t\tcontinue\n\t\t}\n\n\t\t// Attach cleaning warnings to citation if any\n\t\tif (warnings.length > 0) {\n\t\t\tcitation.warnings = [...(citation.warnings || []), ...warnings]\n\t\t}\n\n\t\t// Update processing time\n\t\tcitation.processTimeMs = performance.now() - startTime\n\n\t\tcitations.push(citation)\n\t}\n\n\t// Step 5: Resolve short-form citations if requested\n\tif (options?.resolve) {\n\t\treturn resolveCitations(citations, text, options.resolutionOptions)\n\t}\n\n\treturn citations\n}\n\n/**\n * Asynchronous version of extractCitations().\n *\n * Currently wraps the synchronous extractCitations() function. This API\n * exists for future extensibility when async operations are added:\n * - Async reporters-db lookups (Phase 3)\n * - Async resolution/annotation services\n * - Web Workers for parallel processing\n *\n * For now, this function immediately resolves with the same results as\n * the synchronous version.\n *\n * @param text - Raw text to extract citations from\n * @param options - Optional customization (cleaners, patterns, resolve)\n * @returns Promise resolving to array of citations (or ResolvedCitation[] if resolve: true)\n *\n * @example\n * ```typescript\n * const citations = await extractCitationsAsync(text, { resolve: true })\n * // Returns ResolvedCitation[] with resolution metadata\n * ```\n */\nexport async function extractCitationsAsync(\n\ttext: string,\n\toptions?: ExtractOptions,\n): Promise<Citation[] | ResolvedCitation[]> {\n\t// Async wrapper for future extensibility (e.g., async reporters-db lookup)\n\t// For MVP, wraps synchronous extractCitations\n\treturn extractCitations(text, options)\n}\n"],"mappings":"mEAcA,SAAgB,EAAc,EAAsB,CACnD,OAAO,EAAK,QAAQ,WAAY,GAAG,CAUpC,SAAgB,EAAoB,EAAsB,CACzD,OAAO,EAAK,QAAQ,aAAc,IAAI,CAAC,QAAQ,SAAU,IAAI,CAU9D,SAAgB,EAAiB,EAAsB,CACtD,OAAO,EAAK,UAAU,OAAO,CAU9B,SAAgB,EAAe,EAAsB,CACpD,OAAO,EACL,QAAQ,kBAAmB,IAAI,CAC/B,QAAQ,kBAAmB,IAAI,CCHlC,SAAgB,EACf,EACA,EAA4C,CAC3C,EACA,EACA,EACA,EACA,CACiB,CAElB,IAAI,EAAc,EACd,EAAkB,IAAI,IACtB,EAAkB,IAAI,IAG1B,IAAK,IAAI,EAAI,EAAG,GAAK,EAAS,OAAQ,IACrC,EAAgB,IAAI,EAAG,EAAE,CACzB,EAAgB,IAAI,EAAG,EAAE,CAI1B,IAAK,IAAM,KAAW,EAAU,CAC/B,IAAM,EAAa,EACb,EAAY,EAAQ,EAAY,CAEtC,GAAI,IAAe,EAAW,CAE7B,GAAM,CAAE,qBAAoB,sBAAuB,EAClD,EACA,EACA,EACA,EACA,CAED,EAAkB,EAClB,EAAkB,EAClB,EAAc,GAShB,MAAO,CACN,QAAS,EACT,kBAP4C,CAC5C,kBACA,kBACA,CAKA,SAAU,EAAE,CACZ,CAeF,SAAS,EACR,EACA,EACA,EACA,EAIC,CACD,IAAM,EAAqB,IAAI,IACzB,EAAqB,IAAI,IAE3B,EAAY,EACZ,EAAW,EAGf,KAAO,GAAa,EAAW,QAAU,GAAY,EAAU,QAAQ,CAEtE,GAAI,GAAa,EAAW,QAAU,GAAY,EAAU,OAAQ,CACnE,IAAM,EAAc,EAAmB,IAAI,EAAU,EAAI,EACzD,EAAmB,IAAI,EAAU,EAAY,CAC7C,EAAmB,IAAI,EAAa,EAAS,CAC7C,MAID,GAAI,GAAa,EAAW,OAAQ,CACnC,IAAM,EAAc,EAAmB,IAAI,EAAU,EAAI,EACzD,EAAmB,IAAI,EAAU,EAAY,CAC7C,IACA,SAID,GAAI,GAAY,EAAU,OAAQ,CACjC,IAAM,EAAc,EAAmB,IAAI,EAAU,EAAI,EACzD,EAAmB,IAAI,EAAa,EAAS,CAC7C,IACA,SAID,GAAI,EAAW,KAAe,EAAU,GAAW,CAClD,IAAM,EAAc,EAAmB,IAAI,EAAU,EAAI,EACzD,EAAmB,IAAI,EAAU,EAAY,CAC7C,EAAmB,IAAI,EAAa,EAAS,CAC7C,IACA,QACM,CAGN,IAAI,EAAa,GAIjB,IAAK,IAAI,EAAY,EAAG,GAAa,IAChC,IAAY,GAAa,EAAW,QADU,IAGlD,GAAI,EAAW,EAAY,KAAe,EAAU,GAAW,CAE9D,IAAK,IAAI,EAAI,EAAG,EAAI,EAAW,IAAK,CACnC,IAAM,EACL,EAAmB,IAAI,EAAY,EAAE,EAAI,EAAY,EACtD,EAAmB,IAAI,EAAa,EAAS,CAE9C,GAAa,EACb,EAAa,GACb,MAIF,GAAI,EAAY,SAGhB,IAAK,IAAI,EAAY,EAAG,GAAa,IAChC,IAAW,GAAa,EAAU,QADY,IAGlD,GAAI,EAAW,KAAe,EAAU,EAAW,GAAY,CAE9D,IAAM,EAAc,EAAmB,IAAI,EAAU,EAAI,EACzD,IAAK,IAAI,EAAI,EAAG,EAAI,EAAW,IAC9B,EAAmB,IAAI,EAAW,EAAG,EAAY,CAElD,GAAY,EACZ,EAAa,GACb,MAIF,GAAI,EAAY,SAGhB,IAAM,EAAc,EAAmB,IAAI,EAAU,EAAI,EACzD,EAAmB,IAAI,EAAU,EAAY,CAC7C,EAAmB,IAAI,EAAa,EAAS,CAC7C,IACA,KAIF,MAAO,CAAE,qBAAoB,qBAAoB,CC9LlD,MAAa,EAA0B,CACrC,CACE,GAAI,mBACJ,MAAO,0FACP,YAAa,mDACb,KAAM,OACP,CACD,CACE,GAAI,gBACJ,MAAO,gEACP,YAAa,+BACb,KAAM,OACP,CACD,CACE,GAAI,iBACJ,MAAO,0DACP,YAAa,6EACb,KAAM,OACP,CACF,CCzBY,EAA6B,CACxC,CACE,GAAI,MACJ,MAAO,uCACP,YAAa,iDACb,KAAM,UACP,CACD,CACE,GAAI,aACJ,MAAO,yDACP,YAAa,sEACb,KAAM,UACP,CACF,CCbY,EAA6B,CACxC,CACE,GAAI,aACJ,MAAO,4CACP,YAAa,iGACb,KAAM,UACP,CACF,CCPY,EAA6B,CACxC,CACE,GAAI,UACJ,MAAO,4BACP,YAAa,6CACb,KAAM,UACP,CACD,CACE,GAAI,QACJ,MAAO,wCACP,YAAa,sDACb,KAAM,UACP,CACD,CACE,GAAI,aACJ,MAAO,sCACP,YAAa,oDACb,KAAM,YACP,CACD,CACE,GAAI,mBACJ,MAAO,qCACP,YAAa,0DACb,KAAM,kBACP,CACF,CCIY,EAA+B,CAC1C,CACE,GAAI,KACJ,MAhC8B,+BAiC9B,YAAa,8CACb,KAAM,OACP,CACD,CACE,GAAI,OACJ,MAnCgC,iCAoChC,YAAa,oDACb,KAAM,OACP,CACD,CACE,GAAI,QACJ,MAjCiC,yFAkCjC,YAAa,mEACb,KAAM,OACP,CACD,CACE,GAAI,gBACJ,MAhC2C,6DAiC3C,YAAa,sDACb,KAAM,OACP,CACF,CCcD,SAAgB,EACd,EACA,EAAsB,CACpB,GAAG,EACH,GAAG,EACH,GAAG,EACH,GAAG,EACH,GAAG,EACJ,CACQ,CACT,IAAM,EAAkB,EAAE,CAE1B,IAAK,IAAM,KAAW,EACpB,GAAI,CAEF,IAAM,EAAU,EAAY,SAAS,EAAQ,MAAM,CAEnD,IAAK,IAAM,KAAS,EAElB,EAAO,KAAK,CACV,KAAM,EAAM,GACZ,KAAM,CACJ,WAAY,EAAM,MAClB,SAAU,EAAM,MAAS,EAAM,GAAG,OACnC,CACD,KAAM,EAAQ,KACd,UAAW,EAAQ,GACpB,CAAC,OAEG,EAAO,CAEd,QAAQ,KACN,WAAW,EAAQ,GAAG,yBACtB,aAAiB,MAAQ,EAAM,QAAU,OAAO,EAAM,CACvD,CACD,SAOJ,OAFA,EAAO,MAAM,EAAG,IAAM,EAAE,KAAK,WAAa,EAAE,KAAK,WAAW,CAErD,ECtDT,SAAgB,EACf,EACA,EACmB,CACnB,GAAM,CAAE,OAAM,QAAS,EAMjB,EAD0B,qCACM,KAAK,EAAK,CAEhD,GAAI,CAAC,EAEJ,MAAU,MAAM,kCAAkC,IAAO,CAG1D,IAAM,EAAS,OAAO,SAAS,EAAM,GAAI,GAAG,CACtC,EAAW,EAAM,GAAG,MAAM,CAC1B,EAAO,OAAO,SAAS,EAAM,GAAI,GAAG,CAKpC,EADe,YACa,KAAK,EAAK,CACtC,EAAU,EAAe,OAAO,SAAS,EAAa,GAAI,GAAG,CAAG,IAAA,GAKhE,EADY,0BACU,KAAK,EAAK,CAChC,EAAO,EAAY,OAAO,SAAS,EAAU,GAAI,GAAG,CAAG,IAAA,GAKvD,EADa,2BACW,KAAK,EAAK,CAClC,EAAQ,EAAa,EAAW,GAAG,MAAM,CAAG,IAAA,GAG5C,EACL,EAAkB,gBAAgB,IAAI,EAAK,WAAW,EAAI,EAAK,WAC1D,EACL,EAAkB,gBAAgB,IAAI,EAAK,SAAS,EAAI,EAAK,SAG1D,EAAa,GA+CjB,MA5CwB,4JA2BvB,CAEmB,KAAM,GAAM,EAAS,SAAS,EAAE,CAAC,GACpD,GAAc,IAIX,IAAS,IAAA,IAER,GADgB,IAAI,MAAM,CAAC,aAAa,GAE3C,GAAc,IAKhB,EAAa,KAAK,IAAI,EAAY,EAAI,CAE/B,CACN,KAAM,OACN,OACA,KAAM,CACL,WAAY,EAAK,WACjB,SAAU,EAAK,SACf,gBACA,cACA,CACD,aACA,YAAa,EACb,cAAe,EACf,gBAAiB,EACjB,SACA,WACA,OACA,UACA,QACA,OACA,CCtIF,SAAgB,EACf,EACA,EACkB,CAClB,GAAM,CAAE,OAAM,QAAS,EAKjB,EADe,0DACM,KAAK,EAAK,CAErC,GAAI,CAAC,EACJ,MAAU,MAAM,qCAAqC,IAAO,CAG7D,IAAM,EAAQ,EAAM,GAAK,OAAO,SAAS,EAAM,GAAI,GAAG,CAAG,IAAA,GACnD,EAAO,EAAM,GAAG,MAAM,CACtB,EAAU,EAAM,GAGhB,EACL,EAAkB,gBAAgB,IAAI,EAAK,WAAW,EAAI,EAAK,WAC1D,EACL,EAAkB,gBAAgB,IAAI,EAAK,SAAS,EAAI,EAAK,SAG1D,EAAa,GAkBjB,MAfmB,CAClB,SACA,SACA,iBACA,kBACA,0BACA,8BACA,CAEc,KAAM,GAAM,EAAK,SAAS,EAAE,CAAC,GAC3C,GAAc,IAGf,EAAa,KAAK,IAAI,EAAY,EAAI,CAE/B,CACN,KAAM,UACN,OACA,KAAM,CACL,WAAY,EAAK,WACjB,SAAU,EAAK,SACf,gBACA,cACA,CACD,aACA,YAAa,EACb,cAAe,EACf,gBAAiB,EACjB,QACA,OACA,UACA,CC1DF,SAAgB,EACf,EACA,EACkB,CAClB,GAAM,CAAE,OAAM,QAAS,EAKjB,EADe,mCACM,KAAK,EAAK,CAErC,GAAI,CAAC,EACJ,MAAU,MAAM,qCAAqC,IAAO,CAG7D,IAAM,EAAS,OAAO,SAAS,EAAM,GAAI,GAAG,CACtC,EAAU,EAAM,GAAG,MAAM,CACzB,EAAO,OAAO,SAAS,EAAM,GAAI,GAAG,CAIpC,EADe,YACa,KAAK,EAAK,CACtC,EAAU,EAAe,OAAO,SAAS,EAAa,GAAI,GAAG,CAAG,IAAA,GAGhE,EACL,EAAkB,gBAAgB,IAAI,EAAK,WAAW,EAAI,EAAK,WAC1D,EACL,EAAkB,gBAAgB,IAAI,EAAK,SAAS,EAAI,EAAK,SAK9D,MAAO,CACN,KAAM,UACN,OACA,KAAM,CACL,WAAY,EAAK,WACjB,SAAU,EAAK,SACf,gBACA,cACA,CACD,WAXkB,GAYlB,YAAa,EACb,cAAe,EACf,gBAAiB,EACjB,SACA,UACA,aAAc,EACd,OACA,UACA,CCvDF,SAAgB,EACf,EACA,EACkB,CAClB,GAAM,CAAE,OAAM,QAAS,EAKjB,EADe,+CACM,KAAK,EAAK,CAErC,GAAI,CAAC,EACJ,MAAU,MAAM,qCAAqC,IAAO,CAG7D,IAAM,EAAO,OAAO,SAAS,EAAM,GAAI,GAAG,CACpC,EAAQ,EAAM,GACd,EAAiB,EAAM,GAGvB,EACL,EAAkB,gBAAgB,IAAI,EAAK,WAAW,EAAI,EAAK,WAC1D,EACL,EAAkB,gBAAgB,IAAI,EAAK,SAAS,EAAI,EAAK,SAK9D,MAAO,CACN,KAAM,UACN,OACA,KAAM,CACL,WAAY,EAAK,WACjB,SAAU,EAAK,SACf,gBACA,cACA,CACD,WAXkB,EAYlB,YAAa,EACb,cAAe,EACf,gBAAiB,EACjB,OACA,QACA,iBACA,CC3CF,SAAgB,EACf,EACA,EACoB,CACpB,GAAM,CAAE,OAAM,QAAS,EAKjB,EADiB,wCACM,KAAK,EAAK,CAEvC,GAAI,CAAC,EACJ,MAAU,MAAM,wCAAwC,IAAO,CAGhE,IAAM,EAAW,OAAO,SAAS,EAAM,GAAI,GAAG,CACxC,EAAY,OAAO,SAAS,EAAM,GAAI,GAAG,CAGzC,EACL,EAAkB,gBAAgB,IAAI,EAAK,WAAW,EAAI,EAAK,WAC1D,EACL,EAAkB,gBAAgB,IAAI,EAAK,SAAS,EAAI,EAAK,SAK9D,MAAO,CACN,KAAM,YACN,OACA,KAAM,CACL,WAAY,EAAK,WACjB,SAAU,EAAK,SACf,gBACA,cACA,CACD,WAXkB,GAYlB,YAAa,EACb,cAAe,EACf,gBAAiB,EACjB,WACA,YACA,CC5CF,SAAgB,EACf,EACA,EAC0B,CAC1B,GAAM,CAAE,OAAM,QAAS,EAKjB,EADuB,iCACM,KAAK,EAAK,CAE7C,GAAI,CAAC,EACJ,MAAU,MAAM,8CAA8C,IAAO,CAGtE,IAAM,EAAS,OAAO,SAAS,EAAM,GAAI,GAAG,CACtC,EAAO,OAAO,SAAS,EAAM,GAAI,GAAG,CAKpC,EADY,wBACU,KAAK,EAAK,CAChC,EAAO,EAAY,OAAO,SAAS,EAAU,GAAI,GAAG,CAAG,IAAA,GAGvD,EACL,EAAkB,gBAAgB,IAAI,EAAK,WAAW,EAAI,EAAK,WAC1D,EACL,EAAkB,gBAAgB,IAAI,EAAK,SAAS,EAAI,EAAK,SAK9D,MAAO,CACN,KAAM,kBACN,OACA,KAAM,CACL,WAAY,EAAK,WACjB,SAAU,EAAK,SACf,gBACA,cACA,CACD,WAXkB,GAYlB,YAAa,EACb,cAAe,EACf,gBAAiB,EACjB,SACA,OACA,OACA,CCpDF,SAAgB,EACf,EACA,EACa,CACb,GAAM,CAAE,OAAM,QAAS,EAKjB,EADU,oCACM,KAAK,EAAK,CAEhC,GAAI,CAAC,EACJ,MAAU,MAAM,iCAAiC,IAAO,CAIzD,IAAM,EAAU,EAAM,GAAK,OAAO,SAAS,EAAM,GAAI,GAAG,CAAG,IAAA,GAGrD,EACL,EAAkB,gBAAgB,IAAI,EAAK,WAAW,EAAI,EAAK,WAC1D,EACL,EAAkB,gBAAgB,IAAI,EAAK,SAAS,EAAI,EAAK,SAK9D,MAAO,CACN,KAAM,KACN,OACA,KAAM,CACL,WAAY,EAAK,WACjB,SAAU,EAAK,SACf,gBACA,cACA,CACD,WAXkB,EAYlB,YAAa,EACb,cAAe,EACf,gBAAiB,EACjB,UACA,CAmCF,SAAgB,EACf,EACA,EACgB,CAChB,GAAM,CAAE,OAAM,QAAS,EAMjB,EADa,wFACM,KAAK,EAAK,CAEnC,GAAI,CAAC,EACJ,MAAU,MAAM,mCAAmC,IAAO,CAG3D,IAAM,EAAY,EAAM,GAClB,EAAU,EAAM,GAAK,OAAO,SAAS,EAAM,GAAI,GAAG,CAAG,IAAA,GAGrD,EACL,EAAkB,gBAAgB,IAAI,EAAK,WAAW,EAAI,EAAK,WAC1D,EACL,EAAkB,gBAAgB,IAAI,EAAK,SAAS,EAAI,EAAK,SAK9D,MAAO,CACN,KAAM,QACN,OACA,KAAM,CACL,WAAY,EAAK,WACjB,SAAU,EAAK,SACf,gBACA,cACA,CACD,WAXkB,GAYlB,YAAa,EACb,cAAe,EACf,gBAAiB,EACjB,YACA,UACA,CAqCF,SAAgB,EACf,EACA,EACwB,CACxB,GAAM,CAAE,OAAM,QAAS,EAKjB,EADiB,wDACM,KAAK,EAAK,CAEvC,GAAI,CAAC,EACJ,MAAU,MAAM,6CAA6C,IAAO,CAGrE,IAAM,EAAS,OAAO,SAAS,EAAM,GAAI,GAAG,CACtC,EAAW,EAAM,GAAG,MAAM,CAC1B,EAAU,OAAO,SAAS,EAAM,GAAI,GAAG,CAGvC,EACL,EAAkB,gBAAgB,IAAI,EAAK,WAAW,EAAI,EAAK,WAC1D,EACL,EAAkB,gBAAgB,IAAI,EAAK,SAAS,EAAI,EAAK,SAK9D,MAAO,CACN,KAAM,gBACN,OACA,KAAM,CACL,WAAY,EAAK,WACjB,SAAU,EAAK,SACf,gBACA,cACA,CACD,WAXkB,GAYlB,YAAa,EACb,cAAe,EACf,gBAAiB,EACjB,SACA,WACA,UACA,CCjOF,SAAgB,EACd,EACA,EACA,EAA0B,SACL,CACrB,IAAM,EAAe,IAAI,IAGnB,EAAuB,CAAC,EAAE,CAC5B,EAEJ,MAAQ,EAAQ,EAAgB,KAAK,EAAK,IAAM,MAE9C,EAAW,KAAK,EAAM,MAAQ,EAAM,GAAG,OAAO,CAGhD,EAAW,KAAK,EAAK,OAAO,CAG5B,IAAK,IAAI,EAAI,EAAG,EAAI,EAAU,OAAQ,IAAK,CAEzC,IAAM,EADW,EAAU,GACI,KAAK,cAGhC,EAAe,EACnB,IAAK,IAAI,EAAI,EAAG,EAAI,EAAW,OAAS,EAAG,IACzC,GAAI,GAAiB,EAAW,IAAM,EAAgB,EAAW,EAAI,GAAI,CACvE,EAAe,EACf,MAIJ,EAAa,IAAI,EAAG,EAAa,CAGnC,OAAO,EAYT,SAAgB,EACd,EACA,EACA,EACA,EACS,CACT,GAAI,IAAa,OAEf,MAAO,GAIT,IAAM,EAAsB,EAAa,IAAI,EAAgB,CACvD,EAAmB,EAAa,IAAI,EAAa,CASvD,OANI,IAAwB,IAAA,IAAa,IAAqB,IAAA,GACrD,GAKF,IAAwB,ECpEjC,SAAgB,EAAoB,EAAW,EAAmB,CAEhE,GAAI,EAAE,SAAW,EAAG,OAAO,EAAE,OAC7B,GAAI,EAAE,SAAW,EAAG,OAAO,EAAE,OAI7B,IAAM,EAAiB,MAAM,KAAK,CAAE,OAAQ,EAAE,OAAS,EAAG,KACxD,MAAM,EAAE,OAAS,EAAE,CAAC,KAAK,EAAE,CAC5B,CAGD,IAAK,IAAI,EAAI,EAAG,GAAK,EAAE,OAAQ,IAC7B,EAAG,GAAG,GAAK,EAEb,IAAK,IAAI,EAAI,EAAG,GAAK,EAAE,OAAQ,IAC7B,EAAG,GAAG,GAAK,EAIb,IAAK,IAAI,EAAI,EAAG,GAAK,EAAE,OAAQ,IAC7B,IAAK,IAAI,EAAI,EAAG,GAAK,EAAE,OAAQ,IACzB,EAAE,EAAI,KAAO,EAAE,EAAI,GAErB,EAAG,GAAG,GAAK,EAAG,EAAI,GAAG,EAAI,GAMzB,EAAG,GAAG,GAAK,EAAI,KAAK,IAClB,EAAG,EAAI,GAAG,GACV,EAAG,GAAG,EAAI,GACV,EAAG,EAAI,GAAG,EAAI,GACf,CAKP,OAAO,EAAG,EAAE,QAAQ,EAAE,QAgBxB,SAAgB,EAA8B,EAAW,EAAmB,CAE1E,IAAM,EAAS,EAAE,aAAa,CACxB,EAAS,EAAE,aAAa,CAGxB,EAAW,EAAoB,EAAQ,EAAO,CAG9C,EAAY,KAAK,IAAI,EAAO,OAAQ,EAAO,OAAO,CAIxD,OAHI,IAAc,EAAU,EAGrB,EAAI,EAAW,ECvDxB,IAAa,EAAb,KAA8B,CAa5B,YACE,EACA,EACA,EAA6B,EAAE,CAC/B,CACA,KAAK,UAAY,EACjB,KAAK,KAAO,EAGZ,KAAK,QAAU,CACb,cAAe,EAAQ,eAAiB,YACxC,qBAAsB,EAAQ,sBAAwB,GACtD,yBAA0B,EAAQ,0BAA4B,SAC9D,mBAAoB,EAAQ,oBAAsB,GAClD,oBAAqB,EAAQ,qBAAuB,GACpD,sBAAuB,EAAQ,uBAAyB,GACxD,iBAAkB,EAAQ,kBAAoB,GAC/C,CAGD,KAAK,QAAU,CACb,cAAe,EACf,aAAc,EACd,iBAAkB,IAAA,GAClB,oBAAqB,IAAI,IACzB,aAAc,IAAI,IACnB,CAGG,KAAK,QAAQ,uBACf,KAAK,QAAQ,aAAe,EAC1B,EACA,EACA,KAAK,QAAQ,yBACd,EASL,SAA8B,CAC5B,IAAM,EAA+B,EAAE,CAEvC,IAAK,IAAI,EAAI,EAAG,EAAI,KAAK,UAAU,OAAQ,IAAK,CAC9C,KAAK,QAAQ,cAAgB,EAC7B,IAAM,EAAW,KAAK,UAAU,GAG5B,EAEJ,OAAQ,EAAS,KAAjB,CACE,IAAK,KACH,EAAa,KAAK,UAAU,EAAuB,CACnD,MACF,IAAK,QACH,EAAa,KAAK,aAAa,EAA0B,CACzD,MACF,IAAK,gBACH,EAAa,KAAK,qBAAqB,EAAkC,CACzE,MACF,QAEM,KAAK,eAAe,EAAS,GAC/B,KAAK,QAAQ,iBAAmB,EAChC,KAAK,kBAAkB,EAAU,EAAE,EAErC,MAIJ,EAAS,KAAK,CACZ,GAAG,EACH,aACD,CAAC,CAGJ,OAAO,EAMT,UAAkB,EAAoD,CACpE,IAAM,EAAe,KAAK,QAAQ,cAG9B,EACJ,IAAK,IAAI,EAAI,EAAe,EAAG,GAAK,EAAG,IAErC,GADkB,KAAK,UAAU,GACnB,OAAS,OAAQ,CAC7B,EAAkB,EAClB,MAcJ,OATI,IAAoB,IAAA,GACf,KAAK,oBAAoB,wCAAwC,CAIrE,KAAK,cAAc,EAAiB,EAAa,CAI/C,CACL,WAAY,EACZ,WAAY,EACb,CANQ,KAAK,oBAAoB,6CAA6C,CAYjF,aAAqB,EAAuD,CAC1E,IAAM,EAAe,KAAK,QAAQ,cAC5B,EAAkB,KAAK,mBAAmB,EAAS,UAAU,CAG/D,EAEJ,IAAK,GAAM,CAAC,EAAW,KAAkB,KAAK,QAAQ,oBAAqB,CAEzE,GAAI,CAAC,KAAK,cAAc,EAAe,EAAa,CAClD,SAIF,IAAM,EAAa,EAA8B,EAAiB,EAAU,EAGxE,CAAC,GAAa,EAAa,EAAU,cACvC,EAAY,CAAE,MAAO,EAAe,aAAY,EAKpD,GAAI,CAAC,EACH,OAAO,KAAK,oBAAoB,kCAAkC,CAGpE,GAAI,EAAU,WAAa,KAAK,QAAQ,oBACtC,OAAO,KAAK,oBACV,yBAAyB,EAAU,WAAW,QAAQ,EAAE,CAAC,mBAAmB,KAAK,QAAQ,sBAC1F,CAIH,IAAM,EAAqB,EAAE,CAK7B,OAJI,EAAU,WAAa,GACzB,EAAS,KAAK,2BAA2B,EAAU,WAAW,QAAQ,EAAE,GAAG,CAGtE,CACL,WAAY,EAAU,MACtB,WAAY,EAAU,WACtB,SAAU,EAAS,OAAS,EAAI,EAAW,IAAA,GAC5C,CAMH,qBAA6B,EAA+D,CAC1F,IAAM,EAAe,KAAK,QAAQ,cAGlC,IAAK,IAAI,EAAI,EAAe,EAAG,GAAK,EAAG,IAAK,CAC1C,IAAM,EAAY,KAAK,UAAU,GAGjC,GAAI,EAAU,OAAS,OACrB,SAGF,IAAM,EAAW,EAGjB,GACE,EAAS,SAAW,EAAS,QAC7B,KAAK,kBAAkB,EAAS,SAAS,GAAK,KAAK,kBAAkB,EAAS,SAAS,CAQvF,OALK,KAAK,cAAc,EAAG,EAAa,CAKjC,CACL,WAAY,EACZ,WAAY,IACb,CAPQ,KAAK,oBAAoB,2CAA2C,CAWjF,OAAO,KAAK,oBAAoB,uCAAuC,CAMzE,eAAuB,EAA6B,CAClD,OACE,EAAS,OAAS,QAClB,EAAS,OAAS,WAClB,EAAS,OAAS,WAClB,EAAS,OAAS,WAClB,EAAS,OAAS,aAClB,EAAS,OAAS,kBAQtB,kBAA0B,EAAoB,EAAqB,CAEjE,GAAI,EAAS,OAAS,OAAQ,CAC5B,IAAM,EAAW,EACX,EAAY,KAAK,iBAAiB,EAAS,CACjD,GAAI,EAAW,CACb,IAAM,EAAa,KAAK,mBAAmB,EAAU,CACrD,KAAK,QAAQ,oBAAoB,IAAI,EAAY,EAAM,GAS7D,iBAAyB,EAAgD,CAKvE,IAAM,EAAgB,EAAS,KAAK,cAE9B,EAAgB,KAAK,IAAI,EAAG,EAAgB,IAAI,CAChD,EAAa,KAAK,KAAK,UAAU,EAAe,EAAc,CAI9D,EAAS,EAAW,MAAM,4FAA4F,CAO5H,OANI,EACK,EAAO,GAAG,MAAM,CAIL,EAAW,MAAM,8CAA8C,GAC9D,GAAG,MAAM,CAMhC,mBAA2B,EAAsB,CAC/C,OAAO,EACJ,aAAa,CACb,QAAQ,OAAQ,IAAI,CACpB,MAAM,CAMX,kBAA0B,EAA0B,CAClD,OAAO,EACJ,aAAa,CACb,QAAQ,OAAQ,GAAG,CACnB,QAAQ,MAAO,GAAG,CAMvB,cAAsB,EAAyB,EAA+B,CAC5E,OAAO,EACL,EACA,EACA,KAAK,QAAQ,aACb,KAAK,QAAQ,cACd,CAMH,oBAA4B,EAA8C,CACxE,GAAI,KAAK,QAAQ,iBACf,MAAO,CACL,WAAY,IAAA,GACZ,cAAe,EACf,WAAY,EACb,GCtTP,SAAgB,EACd,EACA,EACA,EACoB,CAEpB,OADiB,IAAI,EAAiB,EAAW,EAAM,EAAQ,CAC/C,SAAS,CC2H3B,SAAgB,EACf,EACA,EACkC,CAClC,IAAM,EAAY,YAAY,KAAK,CAG7B,CAAE,UAAS,oBAAmB,YAAa,EAChD,EACA,GAAS,SACT,CAWK,EAAS,EAAS,EAPJ,GAAS,UAAY,CACxC,GAAG,EACH,GAAG,EACH,GAAG,EACH,GAAG,EACH,GAAG,EACH,CAC4C,CAKvC,EAAoC,EAAE,CACtC,EAAgB,IAAI,IAE1B,IAAK,IAAM,KAAS,EAAQ,CAC3B,IAAM,EAAS,GAAG,EAAM,KAAK,WAAW,GAAG,EAAM,KAAK,WACjD,EAAc,IAAI,EAAO,GAC7B,EAAc,IAAI,EAAO,CACzB,EAAmB,KAAK,EAAM,EAKhC,IAAM,EAAwB,EAAE,CAChC,IAAK,IAAM,KAAS,EAAoB,CACvC,IAAI,EAEJ,OAAQ,EAAM,KAAd,CACC,IAAK,OAEJ,AAOC,EAPG,EAAM,YAAc,MAAQ,EAAM,YAAc,OACxC,EAAU,EAAO,EAAkB,CACpC,EAAM,YAAc,QACnB,EAAa,EAAO,EAAkB,CACvC,EAAM,YAAc,gBACnB,EAAqB,EAAO,EAAkB,CAE9C,EAAY,EAAO,EAAkB,CAEjD,MACD,IAAK,UACJ,EAAW,EAAe,EAAO,EAAkB,CACnD,MACD,IAAK,UACJ,EAAW,EAAe,EAAO,EAAkB,CACnD,MACD,IAAK,UACJ,EAAW,EAAe,EAAO,EAAkB,CACnD,MACD,IAAK,YACJ,EAAW,EAAiB,EAAO,EAAkB,CACrD,MACD,IAAK,kBACJ,EAAW,EAAuB,EAAO,EAAkB,CAC3D,MACD,QAEC,SAIE,EAAS,OAAS,IACrB,EAAS,SAAW,CAAC,GAAI,EAAS,UAAY,EAAE,CAAG,GAAG,EAAS,EAIhE,EAAS,cAAgB,YAAY,KAAK,CAAG,EAE7C,EAAU,KAAK,EAAS,CAQzB,OAJI,GAAS,QACL,EAAiB,EAAW,EAAM,EAAQ,kBAAkB,CAG7D,EAyBR,eAAsB,EACrB,EACA,EAC2C,CAG3C,OAAO,EAAiB,EAAM,EAAQ"}
|