@lexbuild/usc 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +140 -0
- package/dist/index.js +753 -0
- package/dist/index.js.map +1 -0
- package/package.json +70 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* USC Converter — orchestrates the full conversion pipeline for a single USC XML file.
|
|
3
|
+
*
|
|
4
|
+
* Creates a ReadStream → SAX Parser → AST Builder (emit at section) →
|
|
5
|
+
* Markdown Renderer + Frontmatter → File Writer.
|
|
6
|
+
*/
|
|
7
|
+
/** Options for converting a USC XML file */
|
|
8
|
+
interface ConvertOptions {
|
|
9
|
+
/** Path to the input XML file */
|
|
10
|
+
input: string;
|
|
11
|
+
/** Output directory root */
|
|
12
|
+
output: string;
|
|
13
|
+
/** Output granularity: "section" (one file per section) or "chapter" (sections inline) */
|
|
14
|
+
granularity: "section" | "chapter";
|
|
15
|
+
/** How to render cross-references */
|
|
16
|
+
linkStyle: "relative" | "canonical" | "plaintext";
|
|
17
|
+
/** Include source credits in output */
|
|
18
|
+
includeSourceCredits: boolean;
|
|
19
|
+
/** Include notes in output. True = all notes (default). False = no notes. */
|
|
20
|
+
includeNotes: boolean;
|
|
21
|
+
/** Include editorial notes only (when includeNotes is false) */
|
|
22
|
+
includeEditorialNotes: boolean;
|
|
23
|
+
/** Include statutory notes only (when includeNotes is false) */
|
|
24
|
+
includeStatutoryNotes: boolean;
|
|
25
|
+
/** Include amendment history notes only (when includeNotes is false) */
|
|
26
|
+
includeAmendments: boolean;
|
|
27
|
+
/** Dry-run mode: parse and report structure without writing files */
|
|
28
|
+
dryRun: boolean;
|
|
29
|
+
}
|
|
30
|
+
/** Result of a conversion */
|
|
31
|
+
interface ConvertResult {
|
|
32
|
+
/** Number of sections written (or that would be written in dry-run) */
|
|
33
|
+
sectionsWritten: number;
|
|
34
|
+
/** Output paths of all written files (empty in dry-run) */
|
|
35
|
+
files: string[];
|
|
36
|
+
/** Title number extracted from metadata */
|
|
37
|
+
titleNumber: string;
|
|
38
|
+
/** Title name extracted from metadata */
|
|
39
|
+
titleName: string;
|
|
40
|
+
/** Whether this was a dry run */
|
|
41
|
+
dryRun: boolean;
|
|
42
|
+
/** Chapter count */
|
|
43
|
+
chapterCount: number;
|
|
44
|
+
/** Estimated total tokens */
|
|
45
|
+
totalTokenEstimate: number;
|
|
46
|
+
/** Peak resident set size in bytes during conversion */
|
|
47
|
+
peakMemoryBytes: number;
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Convert a single USC XML file to section-level Markdown files.
|
|
51
|
+
*/
|
|
52
|
+
declare function convertTitle(options: ConvertOptions): Promise<ConvertResult>;
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* OLRC U.S. Code XML downloader.
|
|
56
|
+
*
|
|
57
|
+
* Downloads USC title XML zip files from the Office of the Law Revision Counsel
|
|
58
|
+
* and extracts them to a local directory.
|
|
59
|
+
*/
|
|
60
|
+
/**
|
|
61
|
+
* Current OLRC release point.
|
|
62
|
+
*
|
|
63
|
+
* Update this value when OLRC publishes a new release point.
|
|
64
|
+
* The release point appears in download URLs and identifies which
|
|
65
|
+
* public laws are incorporated. Format: "{congress}-{law}[not{excluded}]"
|
|
66
|
+
*
|
|
67
|
+
* Check https://uscode.house.gov/download/download.shtml for the latest.
|
|
68
|
+
*/
|
|
69
|
+
declare const CURRENT_RELEASE_POINT = "119-73not60";
|
|
70
|
+
/** Valid USC title numbers (1-54) */
|
|
71
|
+
declare const USC_TITLE_NUMBERS: number[];
|
|
72
|
+
/**
|
|
73
|
+
* Check whether a list of title numbers covers all 54 USC titles.
|
|
74
|
+
*
|
|
75
|
+
* Handles arbitrary ordering and duplicates.
|
|
76
|
+
*/
|
|
77
|
+
declare function isAllTitles(titles: number[]): boolean;
|
|
78
|
+
/** Options for downloading USC XML files */
|
|
79
|
+
interface DownloadOptions {
|
|
80
|
+
/** Directory to save downloaded XML files */
|
|
81
|
+
outputDir: string;
|
|
82
|
+
/** Specific title numbers to download, or undefined for all */
|
|
83
|
+
titles?: number[] | undefined;
|
|
84
|
+
/** Release point override (default: CURRENT_RELEASE_POINT) */
|
|
85
|
+
releasePoint?: string | undefined;
|
|
86
|
+
}
|
|
87
|
+
/** Result of a download operation */
|
|
88
|
+
interface DownloadResult {
|
|
89
|
+
/** Release point used */
|
|
90
|
+
releasePoint: string;
|
|
91
|
+
/** Files successfully downloaded and extracted */
|
|
92
|
+
files: DownloadedFile[];
|
|
93
|
+
/** Titles that failed to download */
|
|
94
|
+
errors: DownloadError[];
|
|
95
|
+
}
|
|
96
|
+
/** A successfully downloaded file */
|
|
97
|
+
interface DownloadedFile {
|
|
98
|
+
/** Title number */
|
|
99
|
+
titleNumber: number;
|
|
100
|
+
/** Path to the extracted XML file */
|
|
101
|
+
filePath: string;
|
|
102
|
+
/** Size in bytes */
|
|
103
|
+
size: number;
|
|
104
|
+
}
|
|
105
|
+
/** A failed download */
|
|
106
|
+
interface DownloadError {
|
|
107
|
+
/** Title number */
|
|
108
|
+
titleNumber: number;
|
|
109
|
+
/** Error message */
|
|
110
|
+
message: string;
|
|
111
|
+
}
|
|
112
|
+
/**
|
|
113
|
+
* Download USC title XML files from OLRC.
|
|
114
|
+
*
|
|
115
|
+
* When all 54 titles are requested, uses the bulk `uscAll` zip for a single
|
|
116
|
+
* HTTP round-trip instead of 54 individual requests. Falls back to per-title
|
|
117
|
+
* downloads if the bulk download fails.
|
|
118
|
+
*/
|
|
119
|
+
declare function downloadTitles(options: DownloadOptions): Promise<DownloadResult>;
|
|
120
|
+
/**
|
|
121
|
+
* Build the download URL for a single title's XML zip.
|
|
122
|
+
*
|
|
123
|
+
* Format: {base}/pl/{releasePointPath}/xml_usc{NN}@{releasePoint}.zip
|
|
124
|
+
*
|
|
125
|
+
* The release point path splits the release point into directory segments.
|
|
126
|
+
* For "119-73not60", the path is "119/73not60".
|
|
127
|
+
*/
|
|
128
|
+
declare function buildDownloadUrl(titleNumber: number, releasePoint: string): string;
|
|
129
|
+
/**
|
|
130
|
+
* Build the download URL for all titles in a single zip.
|
|
131
|
+
*/
|
|
132
|
+
declare function buildAllTitlesUrl(releasePoint: string): string;
|
|
133
|
+
/**
|
|
134
|
+
* Convert a release point string to a URL path segment.
|
|
135
|
+
* "119-73not60" → "119/73not60"
|
|
136
|
+
* "119-43" → "119/43"
|
|
137
|
+
*/
|
|
138
|
+
declare function releasePointToPath(releasePoint: string): string;
|
|
139
|
+
|
|
140
|
+
export { CURRENT_RELEASE_POINT, type ConvertOptions, type ConvertResult, type DownloadError, type DownloadOptions, type DownloadResult, type DownloadedFile, USC_TITLE_NUMBERS, buildAllTitlesUrl, buildDownloadUrl, convertTitle, downloadTitles, isAllTitles, releasePointToPath };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,753 @@
|
|
|
1
|
+
// src/converter.ts
|
|
2
|
+
import { createReadStream } from "fs";
|
|
3
|
+
import { mkdir, writeFile } from "fs/promises";
|
|
4
|
+
import { join, dirname } from "path";
|
|
5
|
+
import { basename } from "path";
|
|
6
|
+
import {
|
|
7
|
+
XMLParser,
|
|
8
|
+
ASTBuilder,
|
|
9
|
+
renderDocument,
|
|
10
|
+
renderSection,
|
|
11
|
+
generateFrontmatter,
|
|
12
|
+
createLinkResolver,
|
|
13
|
+
FORMAT_VERSION,
|
|
14
|
+
GENERATOR
|
|
15
|
+
} from "@lexbuild/core";
|
|
16
|
+
var DEFAULTS = {
|
|
17
|
+
granularity: "section",
|
|
18
|
+
linkStyle: "plaintext",
|
|
19
|
+
includeSourceCredits: true,
|
|
20
|
+
includeNotes: true,
|
|
21
|
+
includeEditorialNotes: false,
|
|
22
|
+
includeStatutoryNotes: false,
|
|
23
|
+
includeAmendments: false,
|
|
24
|
+
dryRun: false
|
|
25
|
+
};
|
|
26
|
+
async function convertTitle(options) {
|
|
27
|
+
const opts = { ...DEFAULTS, ...options };
|
|
28
|
+
const files = [];
|
|
29
|
+
let peakMemory = process.memoryUsage.rss();
|
|
30
|
+
const collected = [];
|
|
31
|
+
const emitAt = opts.granularity === "chapter" ? "chapter" : "section";
|
|
32
|
+
const builder = new ASTBuilder({
|
|
33
|
+
emitAt,
|
|
34
|
+
onEmit: (node, context) => {
|
|
35
|
+
collected.push({ node, context });
|
|
36
|
+
}
|
|
37
|
+
});
|
|
38
|
+
const parser = new XMLParser();
|
|
39
|
+
parser.on("openElement", (name, attrs) => builder.onOpenElement(name, attrs));
|
|
40
|
+
parser.on("closeElement", (name) => builder.onCloseElement(name));
|
|
41
|
+
parser.on("text", (text) => builder.onText(text));
|
|
42
|
+
const stream = createReadStream(opts.input, "utf-8");
|
|
43
|
+
await parser.parseStream(stream);
|
|
44
|
+
peakMemory = Math.max(peakMemory, process.memoryUsage.rss());
|
|
45
|
+
const sectionMetas = [];
|
|
46
|
+
const meta = builder.getDocumentMeta();
|
|
47
|
+
if (opts.dryRun) {
|
|
48
|
+
for (const { node, context } of collected) {
|
|
49
|
+
if (opts.granularity === "chapter") {
|
|
50
|
+
for (const child of node.children) {
|
|
51
|
+
if (child.type === "level" && child.levelType === "section") {
|
|
52
|
+
sectionMetas.push(buildSectionMetaDryRun(child, node, context));
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
} else {
|
|
56
|
+
if (node.numValue) {
|
|
57
|
+
sectionMetas.push(buildSectionMetaDryRun(node, null, context));
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
} else if (opts.granularity === "chapter") {
|
|
62
|
+
for (const { node, context } of collected) {
|
|
63
|
+
const result = await writeChapter(node, context, opts);
|
|
64
|
+
if (result) {
|
|
65
|
+
files.push(result.filePath);
|
|
66
|
+
for (const m of result.sectionMetas) {
|
|
67
|
+
sectionMetas.push(m);
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
} else {
|
|
72
|
+
const sectionCounts = /* @__PURE__ */ new Map();
|
|
73
|
+
const suffixes = [];
|
|
74
|
+
for (const { node, context } of collected) {
|
|
75
|
+
const sectionNum = node.numValue;
|
|
76
|
+
if (!sectionNum) {
|
|
77
|
+
suffixes.push(void 0);
|
|
78
|
+
continue;
|
|
79
|
+
}
|
|
80
|
+
const chapterDir = buildChapterDir(context) ?? "__root__";
|
|
81
|
+
const key = `${chapterDir}/${sectionNum}`;
|
|
82
|
+
const count = (sectionCounts.get(key) ?? 0) + 1;
|
|
83
|
+
sectionCounts.set(key, count);
|
|
84
|
+
suffixes.push(count > 1 ? `-${count}` : void 0);
|
|
85
|
+
}
|
|
86
|
+
const linkResolver = createLinkResolver();
|
|
87
|
+
for (const [i, { node, context }] of collected.entries()) {
|
|
88
|
+
const sectionNum = node.numValue;
|
|
89
|
+
if (sectionNum && node.identifier) {
|
|
90
|
+
const filePath = buildOutputPath(context, sectionNum, opts.output, suffixes[i]);
|
|
91
|
+
const regId = suffixes[i] ? `${node.identifier}#${suffixes[i]}` : node.identifier;
|
|
92
|
+
linkResolver.register(regId, filePath);
|
|
93
|
+
if (!suffixes[i]) {
|
|
94
|
+
linkResolver.register(node.identifier, filePath);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
for (const [i, { node, context }] of collected.entries()) {
|
|
99
|
+
const result = await writeSection(node, context, opts, linkResolver, suffixes[i]);
|
|
100
|
+
if (result) {
|
|
101
|
+
files.push(result.filePath);
|
|
102
|
+
sectionMetas.push(result.meta);
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
const firstCollected = collected[0];
|
|
107
|
+
const titleHeading = firstCollected ? findAncestor(firstCollected.context.ancestors, "title")?.heading?.trim() : void 0;
|
|
108
|
+
if (!opts.dryRun) {
|
|
109
|
+
await writeMetaFiles(sectionMetas, meta, opts, titleHeading);
|
|
110
|
+
}
|
|
111
|
+
peakMemory = Math.max(peakMemory, process.memoryUsage.rss());
|
|
112
|
+
const chapterIds = new Set(sectionMetas.map((s) => s.chapterIdentifier));
|
|
113
|
+
const totalTokens = sectionMetas.reduce((sum, s) => sum + Math.ceil(s.contentLength / 4), 0);
|
|
114
|
+
return {
|
|
115
|
+
sectionsWritten: opts.dryRun ? sectionMetas.length : files.length,
|
|
116
|
+
files,
|
|
117
|
+
titleNumber: meta.docNumber ?? "unknown",
|
|
118
|
+
titleName: titleHeading ?? meta.dcTitle ?? "Unknown Title",
|
|
119
|
+
dryRun: opts.dryRun,
|
|
120
|
+
chapterCount: chapterIds.size,
|
|
121
|
+
totalTokenEstimate: totalTokens,
|
|
122
|
+
peakMemoryBytes: peakMemory
|
|
123
|
+
};
|
|
124
|
+
}
|
|
125
|
+
async function writeSection(node, context, options, linkResolver, dupSuffix) {
|
|
126
|
+
const sectionNum = node.numValue;
|
|
127
|
+
if (!sectionNum) return null;
|
|
128
|
+
const filePath = buildOutputPath(context, sectionNum, options.output, dupSuffix);
|
|
129
|
+
const frontmatter = buildFrontmatter(node, context);
|
|
130
|
+
const notesFilter = buildNotesFilter(options);
|
|
131
|
+
const renderOpts = {
|
|
132
|
+
headingOffset: 0,
|
|
133
|
+
linkStyle: options.linkStyle,
|
|
134
|
+
resolveLink: linkResolver ? (identifier) => linkResolver.resolve(identifier, filePath) : void 0,
|
|
135
|
+
notesFilter
|
|
136
|
+
};
|
|
137
|
+
const sectionNode = options.includeSourceCredits ? node : stripSourceCredits(node);
|
|
138
|
+
const markdown = renderDocument(sectionNode, frontmatter, renderOpts);
|
|
139
|
+
await mkdir(dirname(filePath), { recursive: true });
|
|
140
|
+
await writeFile(filePath, markdown, "utf-8");
|
|
141
|
+
const titleNum = findAncestor(context.ancestors, "title")?.numValue ?? "0";
|
|
142
|
+
const chapterAncestor = findAncestor(context.ancestors, "chapter");
|
|
143
|
+
const chapterDir = chapterAncestor?.numValue ? `chapter-${padTwo(chapterAncestor.numValue)}` : "";
|
|
144
|
+
const sectionFileName = `section-${sectionNum}${dupSuffix ?? ""}.md`;
|
|
145
|
+
const relativeFile = chapterDir ? `${chapterDir}/${sectionFileName}` : sectionFileName;
|
|
146
|
+
const hasNotes = node.children.some((c) => c.type === "notesContainer" || c.type === "note");
|
|
147
|
+
const sectionMeta = {
|
|
148
|
+
identifier: node.identifier ?? `/us/usc/t${titleNum}/s${sectionNum}`,
|
|
149
|
+
number: sectionNum,
|
|
150
|
+
name: node.heading?.trim() ?? "",
|
|
151
|
+
fileName: sectionFileName,
|
|
152
|
+
relativeFile,
|
|
153
|
+
contentLength: markdown.length,
|
|
154
|
+
hasNotes,
|
|
155
|
+
status: node.status ?? "current",
|
|
156
|
+
chapterIdentifier: chapterAncestor?.identifier ?? "",
|
|
157
|
+
chapterNumber: chapterAncestor?.numValue ?? "0",
|
|
158
|
+
chapterName: chapterAncestor?.heading?.trim() ?? ""
|
|
159
|
+
};
|
|
160
|
+
return { filePath, meta: sectionMeta };
|
|
161
|
+
}
|
|
162
|
+
async function writeMetaFiles(sectionMetas, docMeta, options, titleHeading) {
|
|
163
|
+
if (sectionMetas.length === 0) return;
|
|
164
|
+
const docNum = docMeta.docNumber ?? "0";
|
|
165
|
+
const titleDirName = buildTitleDirFromDocNumber(docNum);
|
|
166
|
+
const titleDir = join(options.output, "usc", titleDirName);
|
|
167
|
+
const currency = parseCurrency(docMeta.docPublicationName ?? "");
|
|
168
|
+
const chapterMap = /* @__PURE__ */ new Map();
|
|
169
|
+
for (const sm of sectionMetas) {
|
|
170
|
+
const key = sm.chapterIdentifier || "__no_chapter__";
|
|
171
|
+
let arr = chapterMap.get(key);
|
|
172
|
+
if (!arr) {
|
|
173
|
+
arr = [];
|
|
174
|
+
chapterMap.set(key, arr);
|
|
175
|
+
}
|
|
176
|
+
arr.push(sm);
|
|
177
|
+
}
|
|
178
|
+
const chapterEntries = [];
|
|
179
|
+
for (const [chapterId, chapterSections] of chapterMap) {
|
|
180
|
+
if (chapterId === "__no_chapter__") continue;
|
|
181
|
+
const first = chapterSections[0];
|
|
182
|
+
if (!first) continue;
|
|
183
|
+
const chapterDir = `chapter-${padTwo(first.chapterNumber)}`;
|
|
184
|
+
const sections = chapterSections.map((sm) => ({
|
|
185
|
+
identifier: sm.identifier,
|
|
186
|
+
number: sm.number,
|
|
187
|
+
name: sm.name,
|
|
188
|
+
file: sm.fileName,
|
|
189
|
+
token_estimate: Math.ceil(sm.contentLength / 4),
|
|
190
|
+
has_notes: sm.hasNotes,
|
|
191
|
+
status: sm.status
|
|
192
|
+
}));
|
|
193
|
+
const chapterMeta = {
|
|
194
|
+
format_version: FORMAT_VERSION,
|
|
195
|
+
identifier: chapterId,
|
|
196
|
+
chapter_number: parseIntSafe(first.chapterNumber),
|
|
197
|
+
chapter_name: first.chapterName,
|
|
198
|
+
title_number: parseIntSafe(docNum),
|
|
199
|
+
section_count: sections.length,
|
|
200
|
+
sections
|
|
201
|
+
};
|
|
202
|
+
const chapterMetaPath = join(titleDir, chapterDir, "_meta.json");
|
|
203
|
+
await mkdir(dirname(chapterMetaPath), { recursive: true });
|
|
204
|
+
await writeFile(chapterMetaPath, JSON.stringify(chapterMeta, null, 2) + "\n", "utf-8");
|
|
205
|
+
chapterEntries.push({
|
|
206
|
+
identifier: chapterId,
|
|
207
|
+
number: parseIntSafe(first.chapterNumber),
|
|
208
|
+
name: first.chapterName,
|
|
209
|
+
directory: chapterDir,
|
|
210
|
+
sections
|
|
211
|
+
});
|
|
212
|
+
}
|
|
213
|
+
const totalTokens = sectionMetas.reduce((sum, sm) => sum + Math.ceil(sm.contentLength / 4), 0);
|
|
214
|
+
const titleMeta = {
|
|
215
|
+
format_version: FORMAT_VERSION,
|
|
216
|
+
generator: GENERATOR,
|
|
217
|
+
generated_at: (/* @__PURE__ */ new Date()).toISOString(),
|
|
218
|
+
identifier: docMeta.identifier ?? `/us/usc/t${docNum}`,
|
|
219
|
+
title_number: parseIntSafe(docNum),
|
|
220
|
+
title_name: titleHeading ?? docMeta.dcTitle ?? "",
|
|
221
|
+
positive_law: docMeta.positivelaw ?? false,
|
|
222
|
+
currency,
|
|
223
|
+
source_xml: basename(options.input),
|
|
224
|
+
granularity: options.granularity,
|
|
225
|
+
stats: {
|
|
226
|
+
chapter_count: chapterEntries.length,
|
|
227
|
+
section_count: sectionMetas.length,
|
|
228
|
+
total_files: sectionMetas.length,
|
|
229
|
+
total_tokens_estimate: totalTokens
|
|
230
|
+
},
|
|
231
|
+
chapters: chapterEntries
|
|
232
|
+
};
|
|
233
|
+
const titleMetaPath = join(titleDir, "_meta.json");
|
|
234
|
+
await mkdir(dirname(titleMetaPath), { recursive: true });
|
|
235
|
+
await writeFile(titleMetaPath, JSON.stringify(titleMeta, null, 2) + "\n", "utf-8");
|
|
236
|
+
const readmePath = join(titleDir, "README.md");
|
|
237
|
+
const readme = generateTitleReadme(titleMeta);
|
|
238
|
+
await writeFile(readmePath, readme, "utf-8");
|
|
239
|
+
}
|
|
240
|
+
function generateTitleReadme(meta) {
|
|
241
|
+
const lines = [];
|
|
242
|
+
lines.push(`# Title ${meta.title_number} \u2014 ${meta.title_name}`);
|
|
243
|
+
lines.push("");
|
|
244
|
+
lines.push(`| | |`);
|
|
245
|
+
lines.push(`| --- | --- |`);
|
|
246
|
+
lines.push(`| **Positive Law** | ${meta.positive_law ? "Yes" : "No"} |`);
|
|
247
|
+
lines.push(`| **Currency** | ${meta.currency || "unknown"} |`);
|
|
248
|
+
lines.push(`| **Chapters** | ${meta.stats.chapter_count} |`);
|
|
249
|
+
lines.push(`| **Sections** | ${meta.stats.section_count.toLocaleString()} |`);
|
|
250
|
+
lines.push(`| **Est. Tokens** | ${meta.stats.total_tokens_estimate.toLocaleString()} |`);
|
|
251
|
+
lines.push(`| **Granularity** | ${meta.granularity} |`);
|
|
252
|
+
lines.push("");
|
|
253
|
+
lines.push("## Chapters");
|
|
254
|
+
lines.push("");
|
|
255
|
+
for (const ch of meta.chapters) {
|
|
256
|
+
const sectionCount = ch.sections.length;
|
|
257
|
+
lines.push(`### Chapter ${ch.number} \u2014 ${ch.name}`);
|
|
258
|
+
lines.push("");
|
|
259
|
+
lines.push(
|
|
260
|
+
`${sectionCount} section${sectionCount !== 1 ? "s" : ""} \xB7 [${ch.directory}/](${ch.directory}/)`
|
|
261
|
+
);
|
|
262
|
+
lines.push("");
|
|
263
|
+
}
|
|
264
|
+
lines.push("---");
|
|
265
|
+
lines.push("");
|
|
266
|
+
lines.push(`Generated by [lexbuild](https://github.com/chris-c-thomas/lexbuild)`);
|
|
267
|
+
lines.push("");
|
|
268
|
+
return lines.join("\n");
|
|
269
|
+
}
|
|
270
|
+
async function writeChapter(chapterNode, context, options) {
|
|
271
|
+
const chapterNum = chapterNode.numValue;
|
|
272
|
+
if (!chapterNum) return null;
|
|
273
|
+
const titleNum = findAncestor(context.ancestors, "title")?.numValue ?? "0";
|
|
274
|
+
const titleDir = `title-${padTwo(titleNum)}`;
|
|
275
|
+
const chapterFile = `chapter-${padTwo(chapterNum)}.md`;
|
|
276
|
+
const filePath = join(options.output, "usc", titleDir, chapterFile);
|
|
277
|
+
const titleAncestor = findAncestor(context.ancestors, "title");
|
|
278
|
+
const meta = context.documentMeta;
|
|
279
|
+
const chapterName = chapterNode.heading?.trim() ?? "";
|
|
280
|
+
const titleName = titleAncestor?.heading?.trim() ?? meta.dcTitle ?? "";
|
|
281
|
+
const currency = parseCurrency(meta.docPublicationName ?? "");
|
|
282
|
+
const lastUpdated = parseDate(meta.created ?? "");
|
|
283
|
+
const fmData = {
|
|
284
|
+
identifier: chapterNode.identifier ?? `/us/usc/t${titleNum}/ch${chapterNum}`,
|
|
285
|
+
title: `${titleNum} USC Chapter ${chapterNum} - ${chapterName}`,
|
|
286
|
+
title_number: parseIntSafe(titleNum),
|
|
287
|
+
title_name: titleName,
|
|
288
|
+
section_number: chapterNum,
|
|
289
|
+
section_name: chapterName,
|
|
290
|
+
chapter_number: parseIntSafe(chapterNum),
|
|
291
|
+
chapter_name: chapterName,
|
|
292
|
+
positive_law: meta.positivelaw ?? false,
|
|
293
|
+
currency,
|
|
294
|
+
last_updated: lastUpdated
|
|
295
|
+
};
|
|
296
|
+
const notesFilter = buildNotesFilter(options);
|
|
297
|
+
const renderOpts = {
|
|
298
|
+
headingOffset: 0,
|
|
299
|
+
linkStyle: options.linkStyle,
|
|
300
|
+
notesFilter
|
|
301
|
+
};
|
|
302
|
+
const parts = [];
|
|
303
|
+
parts.push(generateFrontmatter(fmData));
|
|
304
|
+
parts.push("");
|
|
305
|
+
parts.push(`# Chapter ${chapterNum} \u2014 ${chapterName}`);
|
|
306
|
+
const sectionMetas = [];
|
|
307
|
+
for (const child of chapterNode.children) {
|
|
308
|
+
if (child.type === "level" && child.levelType === "section") {
|
|
309
|
+
const sectionOpts = { ...renderOpts, headingOffset: 1 };
|
|
310
|
+
const sectionNode = options.includeSourceCredits ? child : stripSourceCredits(child);
|
|
311
|
+
const sectionMd = renderSection(sectionNode, sectionOpts);
|
|
312
|
+
parts.push("");
|
|
313
|
+
parts.push(sectionMd);
|
|
314
|
+
const sectionNum = child.numValue ?? "0";
|
|
315
|
+
const hasNotes = child.children.some((c) => c.type === "notesContainer" || c.type === "note");
|
|
316
|
+
sectionMetas.push({
|
|
317
|
+
identifier: child.identifier ?? `/us/usc/t${titleNum}/s${sectionNum}`,
|
|
318
|
+
number: sectionNum,
|
|
319
|
+
name: child.heading?.trim() ?? "",
|
|
320
|
+
fileName: `section-${sectionNum}.md`,
|
|
321
|
+
relativeFile: chapterFile,
|
|
322
|
+
contentLength: sectionMd.length,
|
|
323
|
+
hasNotes,
|
|
324
|
+
status: child.status ?? "current",
|
|
325
|
+
chapterIdentifier: chapterNode.identifier ?? "",
|
|
326
|
+
chapterNumber: chapterNum,
|
|
327
|
+
chapterName
|
|
328
|
+
});
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
const markdown = parts.join("\n") + "\n";
|
|
332
|
+
await mkdir(dirname(filePath), { recursive: true });
|
|
333
|
+
await writeFile(filePath, markdown, "utf-8");
|
|
334
|
+
return { filePath, sectionMetas };
|
|
335
|
+
}
|
|
336
|
+
function buildOutputPath(context, sectionNum, outputRoot, dupSuffix) {
|
|
337
|
+
const titleDir = buildTitleDir(context);
|
|
338
|
+
const chapterDir = buildChapterDir(context);
|
|
339
|
+
const sectionFile = `section-${sectionNum}${dupSuffix ?? ""}.md`;
|
|
340
|
+
if (chapterDir) {
|
|
341
|
+
return join(outputRoot, "usc", titleDir, chapterDir, sectionFile);
|
|
342
|
+
}
|
|
343
|
+
return join(outputRoot, "usc", titleDir, sectionFile);
|
|
344
|
+
}
|
|
345
|
+
function buildTitleDir(context) {
|
|
346
|
+
const docNum = context.documentMeta.docNumber ?? "";
|
|
347
|
+
const appendixMatch = /^(\d+)a$/i.exec(docNum);
|
|
348
|
+
if (appendixMatch?.[1]) {
|
|
349
|
+
return `title-${padTwo(appendixMatch[1])}-appendix`;
|
|
350
|
+
}
|
|
351
|
+
const appendixAncestor = findAncestor(context.ancestors, "appendix");
|
|
352
|
+
if (appendixAncestor) {
|
|
353
|
+
const num = appendixAncestor.numValue ?? docNum;
|
|
354
|
+
const numericPart = /^(\d+)/.exec(num);
|
|
355
|
+
if (numericPart?.[1]) {
|
|
356
|
+
return `title-${padTwo(numericPart[1])}-appendix`;
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
const titleNum = findAncestor(context.ancestors, "title")?.numValue ?? "00";
|
|
360
|
+
return `title-${padTwo(titleNum)}`;
|
|
361
|
+
}
|
|
362
|
+
function buildChapterDir(context) {
|
|
363
|
+
const chapterNum = findAncestor(context.ancestors, "chapter")?.numValue;
|
|
364
|
+
if (chapterNum) return `chapter-${padTwo(chapterNum)}`;
|
|
365
|
+
const compiledAct = findAncestor(context.ancestors, "compiledAct");
|
|
366
|
+
if (compiledAct) {
|
|
367
|
+
const heading = compiledAct.heading?.trim() ?? "";
|
|
368
|
+
const slug = heading.toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/^-|-$/g, "").slice(0, 50);
|
|
369
|
+
return slug || "compiled-act";
|
|
370
|
+
}
|
|
371
|
+
const reorgPlan = findAncestor(context.ancestors, "reorganizationPlan");
|
|
372
|
+
if (reorgPlan) {
|
|
373
|
+
const heading = reorgPlan.heading?.trim() ?? "";
|
|
374
|
+
const slug = heading.toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/^-|-$/g, "").slice(0, 50);
|
|
375
|
+
return slug || "reorganization-plan";
|
|
376
|
+
}
|
|
377
|
+
const reorgPlans = findAncestor(context.ancestors, "reorganizationPlans");
|
|
378
|
+
if (reorgPlans) {
|
|
379
|
+
return "reorganization-plans";
|
|
380
|
+
}
|
|
381
|
+
return void 0;
|
|
382
|
+
}
|
|
383
|
+
function buildFrontmatter(node, context) {
|
|
384
|
+
const meta = context.documentMeta;
|
|
385
|
+
const titleAncestor = findAncestor(context.ancestors, "title") ?? findAncestor(context.ancestors, "appendix");
|
|
386
|
+
const chapterAncestor = findAncestor(context.ancestors, "chapter") ?? findAncestor(context.ancestors, "compiledAct") ?? findAncestor(context.ancestors, "reorganizationPlan");
|
|
387
|
+
const subchapterAncestor = findAncestor(context.ancestors, "subchapter");
|
|
388
|
+
const partAncestor = findAncestor(context.ancestors, "part");
|
|
389
|
+
const docNum = meta.docNumber ?? titleAncestor?.numValue ?? "0";
|
|
390
|
+
const titleNum = parseIntSafe(docNum.replace(/a$/i, ""));
|
|
391
|
+
const sectionNum = node.numValue ?? "0";
|
|
392
|
+
const sectionName = node.heading?.trim() ?? "";
|
|
393
|
+
const titleName = titleAncestor?.heading?.trim() ?? meta.dcTitle ?? "";
|
|
394
|
+
const displayTitle = `${titleNum} USC \xA7 ${sectionNum} - ${sectionName}`;
|
|
395
|
+
const sourceCredit = extractSourceCreditText(node);
|
|
396
|
+
const currency = parseCurrency(meta.docPublicationName ?? "");
|
|
397
|
+
const lastUpdated = parseDate(meta.created ?? "");
|
|
398
|
+
const fm = {
|
|
399
|
+
identifier: node.identifier ?? `/us/usc/t${titleNum}/s${sectionNum}`,
|
|
400
|
+
title: displayTitle,
|
|
401
|
+
title_number: titleNum,
|
|
402
|
+
title_name: titleName,
|
|
403
|
+
section_number: sectionNum,
|
|
404
|
+
section_name: sectionName,
|
|
405
|
+
positive_law: meta.positivelaw ?? false,
|
|
406
|
+
currency,
|
|
407
|
+
last_updated: lastUpdated
|
|
408
|
+
};
|
|
409
|
+
if (chapterAncestor?.numValue) {
|
|
410
|
+
fm.chapter_number = parseIntSafe(chapterAncestor.numValue);
|
|
411
|
+
}
|
|
412
|
+
if (chapterAncestor?.heading) {
|
|
413
|
+
fm.chapter_name = chapterAncestor.heading.trim();
|
|
414
|
+
}
|
|
415
|
+
if (subchapterAncestor?.numValue) {
|
|
416
|
+
fm.subchapter_number = subchapterAncestor.numValue;
|
|
417
|
+
}
|
|
418
|
+
if (subchapterAncestor?.heading) {
|
|
419
|
+
fm.subchapter_name = subchapterAncestor.heading.trim();
|
|
420
|
+
}
|
|
421
|
+
if (partAncestor?.numValue) {
|
|
422
|
+
fm.part_number = partAncestor.numValue;
|
|
423
|
+
}
|
|
424
|
+
if (partAncestor?.heading) {
|
|
425
|
+
fm.part_name = partAncestor.heading.trim();
|
|
426
|
+
}
|
|
427
|
+
if (sourceCredit) {
|
|
428
|
+
fm.source_credit = sourceCredit;
|
|
429
|
+
}
|
|
430
|
+
if (node.status) {
|
|
431
|
+
fm.status = node.status;
|
|
432
|
+
}
|
|
433
|
+
return fm;
|
|
434
|
+
}
|
|
435
|
+
function buildSectionMetaDryRun(sectionNode, chapterNode, context) {
|
|
436
|
+
const titleNum = findAncestor(context.ancestors, "title")?.numValue ?? "0";
|
|
437
|
+
const chapterAncestor = chapterNode ? {
|
|
438
|
+
numValue: chapterNode.numValue,
|
|
439
|
+
heading: chapterNode.heading,
|
|
440
|
+
identifier: chapterNode.identifier
|
|
441
|
+
} : findAncestor(context.ancestors, "chapter");
|
|
442
|
+
const sectionNum = sectionNode.numValue ?? "0";
|
|
443
|
+
const chapterNum = chapterAncestor?.numValue ?? "0";
|
|
444
|
+
const chapterDir = chapterNum !== "0" ? `chapter-${padTwo(chapterNum)}` : "";
|
|
445
|
+
const hasNotes = sectionNode.children.some(
|
|
446
|
+
(c) => c.type === "notesContainer" || c.type === "note"
|
|
447
|
+
);
|
|
448
|
+
let contentLength = 0;
|
|
449
|
+
const walk = (node) => {
|
|
450
|
+
if (node.text) contentLength += node.text.length;
|
|
451
|
+
if (node.children) {
|
|
452
|
+
for (const child of node.children) {
|
|
453
|
+
walk(child);
|
|
454
|
+
}
|
|
455
|
+
}
|
|
456
|
+
};
|
|
457
|
+
walk(sectionNode);
|
|
458
|
+
const sectionFileName = `section-${sectionNum}.md`;
|
|
459
|
+
return {
|
|
460
|
+
identifier: sectionNode.identifier ?? `/us/usc/t${titleNum}/s${sectionNum}`,
|
|
461
|
+
number: sectionNum,
|
|
462
|
+
name: sectionNode.heading?.trim() ?? "",
|
|
463
|
+
fileName: sectionFileName,
|
|
464
|
+
relativeFile: chapterDir ? `${chapterDir}/${sectionFileName}` : sectionFileName,
|
|
465
|
+
contentLength,
|
|
466
|
+
hasNotes,
|
|
467
|
+
status: sectionNode.status ?? "current",
|
|
468
|
+
chapterIdentifier: chapterAncestor?.identifier ?? "",
|
|
469
|
+
chapterNumber: chapterNum,
|
|
470
|
+
chapterName: chapterAncestor?.heading?.trim() ?? ""
|
|
471
|
+
};
|
|
472
|
+
}
|
|
473
|
+
function buildNotesFilter(options) {
|
|
474
|
+
if (options.includeNotes) return void 0;
|
|
475
|
+
if (!options.includeEditorialNotes && !options.includeStatutoryNotes && !options.includeAmendments) {
|
|
476
|
+
return { editorial: false, statutory: false, amendments: false };
|
|
477
|
+
}
|
|
478
|
+
return {
|
|
479
|
+
editorial: options.includeEditorialNotes,
|
|
480
|
+
statutory: options.includeStatutoryNotes,
|
|
481
|
+
amendments: options.includeAmendments
|
|
482
|
+
};
|
|
483
|
+
}
|
|
484
|
+
function findAncestor(ancestors, levelType) {
|
|
485
|
+
return ancestors.find((a) => a.levelType === levelType);
|
|
486
|
+
}
|
|
487
|
+
function buildTitleDirFromDocNumber(docNum) {
|
|
488
|
+
const appendixMatch = /^(\d+)a$/i.exec(docNum);
|
|
489
|
+
if (appendixMatch?.[1]) {
|
|
490
|
+
return `title-${padTwo(appendixMatch[1])}-appendix`;
|
|
491
|
+
}
|
|
492
|
+
return `title-${padTwo(docNum)}`;
|
|
493
|
+
}
|
|
494
|
+
function padTwo(num) {
|
|
495
|
+
const n = parseInt(num, 10);
|
|
496
|
+
if (isNaN(n)) return num;
|
|
497
|
+
return n.toString().padStart(2, "0");
|
|
498
|
+
}
|
|
499
|
+
function parseIntSafe(s) {
|
|
500
|
+
const n = parseInt(s, 10);
|
|
501
|
+
return isNaN(n) ? 0 : n;
|
|
502
|
+
}
|
|
503
|
+
function extractSourceCreditText(node) {
|
|
504
|
+
for (const child of node.children) {
|
|
505
|
+
if (child.type === "sourceCredit") {
|
|
506
|
+
return child.children.map((inline) => inlineToText(inline)).join("");
|
|
507
|
+
}
|
|
508
|
+
}
|
|
509
|
+
return void 0;
|
|
510
|
+
}
|
|
511
|
+
function inlineToText(node) {
|
|
512
|
+
if (node.text) return node.text;
|
|
513
|
+
if (node.children) {
|
|
514
|
+
return node.children.map((c) => c.text ?? "").join("");
|
|
515
|
+
}
|
|
516
|
+
return "";
|
|
517
|
+
}
|
|
518
|
+
function parseCurrency(pubName) {
|
|
519
|
+
const match = /(\d+-\d+)/.exec(pubName);
|
|
520
|
+
if (match?.[1]) return match[1];
|
|
521
|
+
return pubName || "unknown";
|
|
522
|
+
}
|
|
523
|
+
function parseDate(dateStr) {
|
|
524
|
+
if (!dateStr) return "unknown";
|
|
525
|
+
const datePart = dateStr.split("T")[0];
|
|
526
|
+
return datePart ?? dateStr;
|
|
527
|
+
}
|
|
528
|
+
function stripSourceCredits(node) {
|
|
529
|
+
return {
|
|
530
|
+
...node,
|
|
531
|
+
children: node.children.filter((c) => c.type !== "sourceCredit")
|
|
532
|
+
};
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
// src/downloader.ts
|
|
536
|
+
import { createWriteStream } from "fs";
|
|
537
|
+
import { mkdir as mkdir2, stat, unlink } from "fs/promises";
|
|
538
|
+
import { join as join2 } from "path";
|
|
539
|
+
import { pipeline } from "stream/promises";
|
|
540
|
+
import { Readable } from "stream";
|
|
541
|
+
import { open as yauzlOpen } from "yauzl";
|
|
542
|
+
var CURRENT_RELEASE_POINT = "119-73not60";
|
|
543
|
+
var OLRC_BASE_URL = "https://uscode.house.gov/download/releasepoints/us/pl";
|
|
544
|
+
var USC_TITLE_NUMBERS = Array.from({ length: 54 }, (_, i) => i + 1);
|
|
545
|
+
function isAllTitles(titles) {
|
|
546
|
+
const unique = new Set(titles);
|
|
547
|
+
return unique.size === 54 && USC_TITLE_NUMBERS.every((n) => unique.has(n));
|
|
548
|
+
}
|
|
549
|
+
async function downloadTitles(options) {
|
|
550
|
+
const releasePoint = options.releasePoint ?? CURRENT_RELEASE_POINT;
|
|
551
|
+
const titles = options.titles ?? USC_TITLE_NUMBERS;
|
|
552
|
+
await mkdir2(options.outputDir, { recursive: true });
|
|
553
|
+
if (options.titles === void 0 || isAllTitles(titles)) {
|
|
554
|
+
try {
|
|
555
|
+
const files2 = await downloadAndExtractAllTitles(releasePoint, options.outputDir);
|
|
556
|
+
return { releasePoint, files: files2, errors: [] };
|
|
557
|
+
} catch {
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
const files = [];
|
|
561
|
+
const errors = [];
|
|
562
|
+
for (const titleNum of titles) {
|
|
563
|
+
try {
|
|
564
|
+
const file = await downloadAndExtractTitle(titleNum, releasePoint, options.outputDir);
|
|
565
|
+
files.push(file);
|
|
566
|
+
} catch (err) {
|
|
567
|
+
errors.push({
|
|
568
|
+
titleNumber: titleNum,
|
|
569
|
+
message: err instanceof Error ? err.message : String(err)
|
|
570
|
+
});
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
return { releasePoint, files, errors };
|
|
574
|
+
}
|
|
575
|
+
function buildDownloadUrl(titleNumber, releasePoint) {
|
|
576
|
+
const paddedTitle = titleNumber.toString().padStart(2, "0");
|
|
577
|
+
const rpPath = releasePointToPath(releasePoint);
|
|
578
|
+
return `${OLRC_BASE_URL}/${rpPath}/xml_usc${paddedTitle}@${releasePoint}.zip`;
|
|
579
|
+
}
|
|
580
|
+
function buildAllTitlesUrl(releasePoint) {
|
|
581
|
+
const rpPath = releasePointToPath(releasePoint);
|
|
582
|
+
return `${OLRC_BASE_URL}/${rpPath}/xml_uscAll@${releasePoint}.zip`;
|
|
583
|
+
}
|
|
584
|
+
function releasePointToPath(releasePoint) {
|
|
585
|
+
const dashIndex = releasePoint.indexOf("-");
|
|
586
|
+
if (dashIndex === -1) return releasePoint;
|
|
587
|
+
return `${releasePoint.slice(0, dashIndex)}/${releasePoint.slice(dashIndex + 1)}`;
|
|
588
|
+
}
|
|
589
|
+
async function downloadAndExtractTitle(titleNumber, releasePoint, outputDir) {
|
|
590
|
+
const url = buildDownloadUrl(titleNumber, releasePoint);
|
|
591
|
+
const paddedTitle = titleNumber.toString().padStart(2, "0");
|
|
592
|
+
const zipPath = join2(outputDir, `usc${paddedTitle}.zip`);
|
|
593
|
+
const xmlFileName = `usc${paddedTitle}.xml`;
|
|
594
|
+
const xmlPath = join2(outputDir, xmlFileName);
|
|
595
|
+
const response = await fetch(url);
|
|
596
|
+
if (!response.ok) {
|
|
597
|
+
throw new Error(`HTTP ${response.status}: ${response.statusText} for ${url}`);
|
|
598
|
+
}
|
|
599
|
+
if (!response.body) {
|
|
600
|
+
throw new Error(`No response body for ${url}`);
|
|
601
|
+
}
|
|
602
|
+
const fileStream = createWriteStream(zipPath);
|
|
603
|
+
await pipeline(Readable.fromWeb(response.body), fileStream);
|
|
604
|
+
await extractXmlFromZip(zipPath, xmlFileName, xmlPath);
|
|
605
|
+
await unlink(zipPath);
|
|
606
|
+
const fileStat = await stat(xmlPath);
|
|
607
|
+
return {
|
|
608
|
+
titleNumber,
|
|
609
|
+
filePath: xmlPath,
|
|
610
|
+
size: fileStat.size
|
|
611
|
+
};
|
|
612
|
+
}
|
|
613
|
+
function extractXmlFromZip(zipPath, targetFileName, outputPath) {
|
|
614
|
+
return new Promise((resolve, reject) => {
|
|
615
|
+
yauzlOpen(zipPath, { lazyEntries: true }, (err, zipFile) => {
|
|
616
|
+
if (err) {
|
|
617
|
+
reject(new Error(`Failed to open zip: ${err.message}`));
|
|
618
|
+
return;
|
|
619
|
+
}
|
|
620
|
+
if (!zipFile) {
|
|
621
|
+
reject(new Error("Failed to open zip: no zipFile returned"));
|
|
622
|
+
return;
|
|
623
|
+
}
|
|
624
|
+
let found = false;
|
|
625
|
+
zipFile.on("entry", (entry) => {
|
|
626
|
+
const fileName = entry.fileName.split("/").pop() ?? entry.fileName;
|
|
627
|
+
if (fileName === targetFileName || entry.fileName.endsWith(`.xml`)) {
|
|
628
|
+
found = true;
|
|
629
|
+
extractEntry(zipFile, entry, outputPath).then(() => {
|
|
630
|
+
zipFile.close();
|
|
631
|
+
resolve();
|
|
632
|
+
}).catch((extractErr) => {
|
|
633
|
+
zipFile.close();
|
|
634
|
+
reject(extractErr);
|
|
635
|
+
});
|
|
636
|
+
} else {
|
|
637
|
+
zipFile.readEntry();
|
|
638
|
+
}
|
|
639
|
+
});
|
|
640
|
+
zipFile.on("end", () => {
|
|
641
|
+
if (!found) {
|
|
642
|
+
reject(new Error(`${targetFileName} not found in zip`));
|
|
643
|
+
}
|
|
644
|
+
});
|
|
645
|
+
zipFile.on("error", (zipErr) => {
|
|
646
|
+
reject(new Error(`Zip error: ${zipErr.message}`));
|
|
647
|
+
});
|
|
648
|
+
zipFile.readEntry();
|
|
649
|
+
});
|
|
650
|
+
});
|
|
651
|
+
}
|
|
652
|
+
function extractEntry(zipFile, entry, outputPath) {
|
|
653
|
+
return new Promise((resolve, reject) => {
|
|
654
|
+
zipFile.openReadStream(entry, (err, readStream) => {
|
|
655
|
+
if (err) {
|
|
656
|
+
reject(new Error(`Failed to read zip entry: ${err.message}`));
|
|
657
|
+
return;
|
|
658
|
+
}
|
|
659
|
+
if (!readStream) {
|
|
660
|
+
reject(new Error("No read stream for zip entry"));
|
|
661
|
+
return;
|
|
662
|
+
}
|
|
663
|
+
const writeStream = createWriteStream(outputPath);
|
|
664
|
+
readStream.pipe(writeStream);
|
|
665
|
+
writeStream.on("finish", () => resolve());
|
|
666
|
+
writeStream.on("error", (writeErr) => reject(new Error(`Write error: ${writeErr.message}`)));
|
|
667
|
+
readStream.on("error", (readErr) => reject(new Error(`Read error: ${readErr.message}`)));
|
|
668
|
+
});
|
|
669
|
+
});
|
|
670
|
+
}
|
|
671
|
+
var USC_XML_RE = /^(?:.*\/)?usc(\d{2})\.xml$/;
|
|
672
|
+
function extractAllXmlFromZip(zipPath, outputDir) {
|
|
673
|
+
return new Promise((resolve, reject) => {
|
|
674
|
+
yauzlOpen(zipPath, { lazyEntries: true }, (err, zipFile) => {
|
|
675
|
+
if (err) {
|
|
676
|
+
reject(new Error(`Failed to open zip: ${err.message}`));
|
|
677
|
+
return;
|
|
678
|
+
}
|
|
679
|
+
if (!zipFile) {
|
|
680
|
+
reject(new Error("Failed to open zip: no zipFile returned"));
|
|
681
|
+
return;
|
|
682
|
+
}
|
|
683
|
+
const extracted = [];
|
|
684
|
+
let pending = 0;
|
|
685
|
+
let ended = false;
|
|
686
|
+
const maybeResolve = () => {
|
|
687
|
+
if (ended && pending === 0) {
|
|
688
|
+
resolve(extracted);
|
|
689
|
+
}
|
|
690
|
+
};
|
|
691
|
+
zipFile.on("entry", (entry) => {
|
|
692
|
+
const match = USC_XML_RE.exec(entry.fileName);
|
|
693
|
+
if (match) {
|
|
694
|
+
const titleNum = parseInt(match[1] ?? "0", 10);
|
|
695
|
+
const outPath = join2(outputDir, `usc${match[1] ?? "00"}.xml`);
|
|
696
|
+
pending++;
|
|
697
|
+
extractEntry(zipFile, entry, outPath).then(() => {
|
|
698
|
+
extracted.push({ titleNumber: titleNum, filePath: outPath });
|
|
699
|
+
pending--;
|
|
700
|
+
zipFile.readEntry();
|
|
701
|
+
maybeResolve();
|
|
702
|
+
}).catch((extractErr) => {
|
|
703
|
+
zipFile.close();
|
|
704
|
+
reject(extractErr);
|
|
705
|
+
});
|
|
706
|
+
} else {
|
|
707
|
+
zipFile.readEntry();
|
|
708
|
+
}
|
|
709
|
+
});
|
|
710
|
+
zipFile.on("end", () => {
|
|
711
|
+
ended = true;
|
|
712
|
+
maybeResolve();
|
|
713
|
+
});
|
|
714
|
+
zipFile.on("error", (zipErr) => {
|
|
715
|
+
reject(new Error(`Zip error: ${zipErr.message}`));
|
|
716
|
+
});
|
|
717
|
+
zipFile.readEntry();
|
|
718
|
+
});
|
|
719
|
+
});
|
|
720
|
+
}
|
|
721
|
+
async function downloadAndExtractAllTitles(releasePoint, outputDir) {
|
|
722
|
+
const url = buildAllTitlesUrl(releasePoint);
|
|
723
|
+
const zipPath = join2(outputDir, "uscAll.zip");
|
|
724
|
+
const response = await fetch(url);
|
|
725
|
+
if (!response.ok) {
|
|
726
|
+
throw new Error(`HTTP ${response.status}: ${response.statusText} for ${url}`);
|
|
727
|
+
}
|
|
728
|
+
if (!response.body) {
|
|
729
|
+
throw new Error(`No response body for ${url}`);
|
|
730
|
+
}
|
|
731
|
+
const fileStream = createWriteStream(zipPath);
|
|
732
|
+
await pipeline(Readable.fromWeb(response.body), fileStream);
|
|
733
|
+
const extracted = await extractAllXmlFromZip(zipPath, outputDir);
|
|
734
|
+
await unlink(zipPath);
|
|
735
|
+
const files = [];
|
|
736
|
+
for (const { titleNumber, filePath } of extracted) {
|
|
737
|
+
const fileStat = await stat(filePath);
|
|
738
|
+
files.push({ titleNumber, filePath, size: fileStat.size });
|
|
739
|
+
}
|
|
740
|
+
files.sort((a, b) => a.titleNumber - b.titleNumber);
|
|
741
|
+
return files;
|
|
742
|
+
}
|
|
743
|
+
export {
|
|
744
|
+
CURRENT_RELEASE_POINT,
|
|
745
|
+
USC_TITLE_NUMBERS,
|
|
746
|
+
buildAllTitlesUrl,
|
|
747
|
+
buildDownloadUrl,
|
|
748
|
+
convertTitle,
|
|
749
|
+
downloadTitles,
|
|
750
|
+
isAllTitles,
|
|
751
|
+
releasePointToPath
|
|
752
|
+
};
|
|
753
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/converter.ts","../src/downloader.ts"],"sourcesContent":["/**\n * USC Converter — orchestrates the full conversion pipeline for a single USC XML file.\n *\n * Creates a ReadStream → SAX Parser → AST Builder (emit at section) →\n * Markdown Renderer + Frontmatter → File Writer.\n */\n\nimport { createReadStream } from \"node:fs\";\nimport { mkdir, writeFile } from \"node:fs/promises\";\nimport { join, dirname } from \"node:path\";\nimport { basename } from \"node:path\";\nimport {\n XMLParser,\n ASTBuilder,\n renderDocument,\n renderSection,\n generateFrontmatter,\n createLinkResolver,\n FORMAT_VERSION,\n GENERATOR,\n} from \"@lexbuild/core\";\nimport type {\n LevelNode,\n EmitContext,\n FrontmatterData,\n RenderOptions,\n NotesFilter,\n AncestorInfo,\n LinkResolver,\n} from \"@lexbuild/core\";\n\n/** Options for converting a USC XML file */\nexport interface ConvertOptions {\n /** Path to the input XML file */\n input: string;\n /** Output directory root */\n output: string;\n /** Output granularity: \"section\" (one file per section) or \"chapter\" (sections inline) */\n granularity: \"section\" | \"chapter\";\n /** How to render cross-references */\n linkStyle: \"relative\" | \"canonical\" | \"plaintext\";\n /** Include source credits in output */\n includeSourceCredits: boolean;\n /** Include notes in output. True = all notes (default). False = no notes. */\n includeNotes: boolean;\n /** Include editorial notes only (when includeNotes is false) */\n includeEditorialNotes: boolean;\n /** Include statutory notes only (when includeNotes is false) */\n includeStatutoryNotes: boolean;\n /** Include amendment history notes only (when includeNotes is false) */\n includeAmendments: boolean;\n /** Dry-run mode: parse and report structure without writing files */\n dryRun: boolean;\n}\n\n/** Result of a conversion */\nexport interface ConvertResult {\n /** Number of sections written (or that would be written in dry-run) */\n sectionsWritten: number;\n /** Output paths of all written files (empty in dry-run) */\n files: string[];\n /** Title number extracted from metadata */\n titleNumber: string;\n /** Title name extracted from metadata */\n titleName: string;\n /** Whether this was a dry run */\n dryRun: boolean;\n /** Chapter count */\n chapterCount: number;\n /** Estimated total tokens */\n totalTokenEstimate: number;\n /** Peak resident set size in bytes during conversion */\n peakMemoryBytes: number;\n}\n\n/** Default convert options */\nconst DEFAULTS: Omit<ConvertOptions, \"input\" | \"output\"> = {\n granularity: \"section\",\n linkStyle: \"plaintext\",\n includeSourceCredits: true,\n includeNotes: true,\n includeEditorialNotes: false,\n includeStatutoryNotes: false,\n includeAmendments: false,\n dryRun: false,\n};\n\n/** Metadata collected for a written section (used to build _meta.json) */\ninterface SectionMeta {\n identifier: string;\n number: string;\n name: string;\n /** Filename only (e.g., \"section-3598.md\" or \"section-3598-2.md\" for duplicates) */\n fileName: string;\n /** File path relative to the title directory (e.g., \"chapter-01/section-1.md\") */\n relativeFile: string;\n /** Content length in characters (for token estimation) */\n contentLength: number;\n hasNotes: boolean;\n status: string;\n /** Chapter identifier this section belongs to */\n chapterIdentifier: string;\n chapterNumber: string;\n chapterName: string;\n}\n\n/** A collected section ready to be written */\ninterface CollectedSection {\n node: LevelNode;\n context: EmitContext;\n}\n\n/**\n * Convert a single USC XML file to section-level Markdown files.\n */\nexport async function convertTitle(options: ConvertOptions): Promise<ConvertResult> {\n const opts = { ...DEFAULTS, ...options };\n const files: string[] = [];\n let peakMemory = process.memoryUsage.rss();\n\n // Collect emitted nodes during parsing (synchronous), write after parsing completes\n const collected: CollectedSection[] = [];\n\n // Set up the AST builder — emit level depends on granularity\n const emitAt = opts.granularity === \"chapter\" ? (\"chapter\" as const) : (\"section\" as const);\n const builder = new ASTBuilder({\n emitAt,\n onEmit: (node, context) => {\n collected.push({ node, context });\n },\n });\n\n // Set up the XML parser\n const parser = new XMLParser();\n parser.on(\"openElement\", (name, attrs) => builder.onOpenElement(name, attrs));\n parser.on(\"closeElement\", (name) => builder.onCloseElement(name));\n parser.on(\"text\", (text) => builder.onText(text));\n\n // Parse the XML file\n const stream = createReadStream(opts.input, \"utf-8\");\n await parser.parseStream(stream);\n peakMemory = Math.max(peakMemory, process.memoryUsage.rss());\n\n const sectionMetas: SectionMeta[] = [];\n const meta = builder.getDocumentMeta();\n\n if (opts.dryRun) {\n // Dry-run: collect metadata without writing files\n for (const { node, context } of collected) {\n if (opts.granularity === \"chapter\") {\n // Extract section metadata from chapter children\n for (const child of node.children) {\n if (child.type === \"level\" && child.levelType === \"section\") {\n sectionMetas.push(buildSectionMetaDryRun(child, node, context));\n }\n }\n } else {\n if (node.numValue) {\n sectionMetas.push(buildSectionMetaDryRun(node, null, context));\n }\n }\n }\n } else if (opts.granularity === \"chapter\") {\n // Chapter-level: each emitted node is a chapter containing sections\n for (const { node, context } of collected) {\n const result = await writeChapter(node, context, opts);\n if (result) {\n files.push(result.filePath);\n for (const m of result.sectionMetas) {\n sectionMetas.push(m);\n }\n }\n }\n } else {\n // Section-level with relative links: need two-pass for link resolver\n // Track duplicate section numbers per chapter to disambiguate filenames\n const sectionCounts = new Map<string, number>();\n const suffixes: (string | undefined)[] = [];\n for (const { node, context } of collected) {\n const sectionNum = node.numValue;\n if (!sectionNum) {\n suffixes.push(undefined);\n continue;\n }\n const chapterDir = buildChapterDir(context) ?? \"__root__\";\n const key = `${chapterDir}/${sectionNum}`;\n const count = (sectionCounts.get(key) ?? 0) + 1;\n sectionCounts.set(key, count);\n suffixes.push(count > 1 ? `-${count}` : undefined);\n }\n\n const linkResolver = createLinkResolver();\n for (const [i, { node, context }] of collected.entries()) {\n const sectionNum = node.numValue;\n if (sectionNum && node.identifier) {\n const filePath = buildOutputPath(context, sectionNum, opts.output, suffixes[i]);\n // For duplicates, register with the XML element @id to disambiguate\n const regId = suffixes[i] ? `${node.identifier}#${suffixes[i]}` : node.identifier;\n linkResolver.register(regId, filePath);\n // Always register the first occurrence under the canonical identifier\n if (!suffixes[i]) {\n linkResolver.register(node.identifier, filePath);\n }\n }\n }\n\n for (const [i, { node, context }] of collected.entries()) {\n const result = await writeSection(node, context, opts, linkResolver, suffixes[i]);\n if (result) {\n files.push(result.filePath);\n sectionMetas.push(result.meta);\n }\n }\n }\n\n // Extract the title heading from the first collected section's ancestors\n const firstCollected = collected[0];\n const titleHeading = firstCollected\n ? findAncestor(firstCollected.context.ancestors, \"title\")?.heading?.trim()\n : undefined;\n\n // Generate _meta.json and README.md files (skip in dry-run)\n if (!opts.dryRun) {\n await writeMetaFiles(sectionMetas, meta, opts, titleHeading);\n }\n\n // Final memory sample\n peakMemory = Math.max(peakMemory, process.memoryUsage.rss());\n\n // Compute stats\n const chapterIds = new Set(sectionMetas.map((s) => s.chapterIdentifier));\n const totalTokens = sectionMetas.reduce((sum, s) => sum + Math.ceil(s.contentLength / 4), 0);\n\n return {\n sectionsWritten: opts.dryRun ? sectionMetas.length : files.length,\n files,\n titleNumber: meta.docNumber ?? \"unknown\",\n titleName: titleHeading ?? meta.dcTitle ?? \"Unknown Title\",\n dryRun: opts.dryRun,\n chapterCount: chapterIds.size,\n totalTokenEstimate: totalTokens,\n peakMemoryBytes: peakMemory,\n };\n}\n\n/** Result of writing a single section */\ninterface WriteSectionResult {\n filePath: string;\n meta: SectionMeta;\n}\n\n/**\n * Write a single section to disk.\n * Returns the file path and metadata, or null if the section was skipped.\n */\nasync function writeSection(\n node: LevelNode,\n context: EmitContext,\n options: ConvertOptions,\n linkResolver?: LinkResolver | undefined,\n /** Disambiguation suffix for duplicate section numbers (e.g., \"-2\") */\n dupSuffix?: string | undefined,\n): Promise<WriteSectionResult | null> {\n const sectionNum = node.numValue;\n if (!sectionNum) return null;\n\n // Build the output file path (with optional duplicate suffix)\n const filePath = buildOutputPath(context, sectionNum, options.output, dupSuffix);\n\n // Build frontmatter data\n const frontmatter = buildFrontmatter(node, context);\n\n // Build notes filter\n const notesFilter = buildNotesFilter(options);\n\n // Build render options with link resolver for relative links\n const renderOpts: RenderOptions = {\n headingOffset: 0,\n linkStyle: options.linkStyle,\n resolveLink: linkResolver\n ? (identifier: string) => linkResolver.resolve(identifier, filePath)\n : undefined,\n notesFilter,\n };\n\n // Optionally strip source credits\n const sectionNode = options.includeSourceCredits ? node : stripSourceCredits(node);\n\n // Render the document\n const markdown = renderDocument(sectionNode, frontmatter, renderOpts);\n\n // Ensure the directory exists and write the file\n await mkdir(dirname(filePath), { recursive: true });\n await writeFile(filePath, markdown, \"utf-8\");\n\n // Collect metadata\n const titleNum = findAncestor(context.ancestors, \"title\")?.numValue ?? \"0\";\n const chapterAncestor = findAncestor(context.ancestors, \"chapter\");\n const chapterDir = chapterAncestor?.numValue ? `chapter-${padTwo(chapterAncestor.numValue)}` : \"\";\n const sectionFileName = `section-${sectionNum}${dupSuffix ?? \"\"}.md`;\n const relativeFile = chapterDir ? `${chapterDir}/${sectionFileName}` : sectionFileName;\n\n const hasNotes = node.children.some((c) => c.type === \"notesContainer\" || c.type === \"note\");\n\n const sectionMeta: SectionMeta = {\n identifier: node.identifier ?? `/us/usc/t${titleNum}/s${sectionNum}`,\n number: sectionNum,\n name: node.heading?.trim() ?? \"\",\n fileName: sectionFileName,\n relativeFile,\n contentLength: markdown.length,\n hasNotes,\n status: node.status ?? \"current\",\n chapterIdentifier: chapterAncestor?.identifier ?? \"\",\n chapterNumber: chapterAncestor?.numValue ?? \"0\",\n chapterName: chapterAncestor?.heading?.trim() ?? \"\",\n };\n\n return { filePath, meta: sectionMeta };\n}\n\n/**\n * Build the output file path for a section.\n *\n * Format: {output}/usc/title-{NN}/chapter-{NN}/section-{N}.md\n */\n/**\n * Generate _meta.json files at title and chapter levels.\n */\nasync function writeMetaFiles(\n sectionMetas: SectionMeta[],\n docMeta: {\n dcTitle?: string | undefined;\n docNumber?: string | undefined;\n positivelaw?: boolean | undefined;\n docPublicationName?: string | undefined;\n created?: string | undefined;\n identifier?: string | undefined;\n },\n options: ConvertOptions,\n titleHeading?: string | undefined,\n): Promise<void> {\n if (sectionMetas.length === 0) return;\n\n const docNum = docMeta.docNumber ?? \"0\";\n const titleDirName = buildTitleDirFromDocNumber(docNum);\n const titleDir = join(options.output, \"usc\", titleDirName);\n const currency = parseCurrency(docMeta.docPublicationName ?? \"\");\n\n // Group sections by chapter\n const chapterMap = new Map<string, SectionMeta[]>();\n for (const sm of sectionMetas) {\n const key = sm.chapterIdentifier || \"__no_chapter__\";\n let arr = chapterMap.get(key);\n if (!arr) {\n arr = [];\n chapterMap.set(key, arr);\n }\n arr.push(sm);\n }\n\n // Write chapter-level _meta.json files\n const chapterEntries: Array<{\n identifier: string;\n number: number;\n name: string;\n directory: string;\n sections: Array<{\n identifier: string;\n number: string;\n name: string;\n file: string;\n token_estimate: number;\n has_notes: boolean;\n status: string;\n }>;\n }> = [];\n\n for (const [chapterId, chapterSections] of chapterMap) {\n if (chapterId === \"__no_chapter__\") continue;\n\n const first = chapterSections[0];\n if (!first) continue;\n\n const chapterDir = `chapter-${padTwo(first.chapterNumber)}`;\n\n const sections = chapterSections.map((sm) => ({\n identifier: sm.identifier,\n number: sm.number,\n name: sm.name,\n file: sm.fileName,\n token_estimate: Math.ceil(sm.contentLength / 4),\n has_notes: sm.hasNotes,\n status: sm.status,\n }));\n\n const chapterMeta = {\n format_version: FORMAT_VERSION,\n identifier: chapterId,\n chapter_number: parseIntSafe(first.chapterNumber),\n chapter_name: first.chapterName,\n title_number: parseIntSafe(docNum),\n section_count: sections.length,\n sections,\n };\n\n const chapterMetaPath = join(titleDir, chapterDir, \"_meta.json\");\n await mkdir(dirname(chapterMetaPath), { recursive: true });\n await writeFile(chapterMetaPath, JSON.stringify(chapterMeta, null, 2) + \"\\n\", \"utf-8\");\n\n chapterEntries.push({\n identifier: chapterId,\n number: parseIntSafe(first.chapterNumber),\n name: first.chapterName,\n directory: chapterDir,\n sections,\n });\n }\n\n // Write title-level _meta.json\n const totalTokens = sectionMetas.reduce((sum, sm) => sum + Math.ceil(sm.contentLength / 4), 0);\n\n const titleMeta = {\n format_version: FORMAT_VERSION,\n generator: GENERATOR,\n generated_at: new Date().toISOString(),\n identifier: docMeta.identifier ?? `/us/usc/t${docNum}`,\n title_number: parseIntSafe(docNum),\n title_name: titleHeading ?? docMeta.dcTitle ?? \"\",\n positive_law: docMeta.positivelaw ?? false,\n currency,\n source_xml: basename(options.input),\n granularity: options.granularity,\n stats: {\n chapter_count: chapterEntries.length,\n section_count: sectionMetas.length,\n total_files: sectionMetas.length,\n total_tokens_estimate: totalTokens,\n },\n chapters: chapterEntries,\n };\n\n const titleMetaPath = join(titleDir, \"_meta.json\");\n await mkdir(dirname(titleMetaPath), { recursive: true });\n await writeFile(titleMetaPath, JSON.stringify(titleMeta, null, 2) + \"\\n\", \"utf-8\");\n\n // Write title-level README.md\n const readmePath = join(titleDir, \"README.md\");\n const readme = generateTitleReadme(titleMeta);\n await writeFile(readmePath, readme, \"utf-8\");\n}\n\n/**\n * Generate a human-readable README.md for a title output directory.\n */\nfunction generateTitleReadme(meta: {\n title_number: number;\n title_name: string;\n positive_law: boolean;\n currency: string;\n granularity: string;\n stats: {\n chapter_count: number;\n section_count: number;\n total_tokens_estimate: number;\n };\n chapters: Array<{\n number: number;\n name: string;\n directory: string;\n sections: Array<{\n number: string;\n name: string;\n file: string;\n status: string;\n }>;\n }>;\n}): string {\n const lines: string[] = [];\n\n lines.push(`# Title ${meta.title_number} — ${meta.title_name}`);\n lines.push(\"\");\n lines.push(`| | |`);\n lines.push(`| --- | --- |`);\n lines.push(`| **Positive Law** | ${meta.positive_law ? \"Yes\" : \"No\"} |`);\n lines.push(`| **Currency** | ${meta.currency || \"unknown\"} |`);\n lines.push(`| **Chapters** | ${meta.stats.chapter_count} |`);\n lines.push(`| **Sections** | ${meta.stats.section_count.toLocaleString()} |`);\n lines.push(`| **Est. Tokens** | ${meta.stats.total_tokens_estimate.toLocaleString()} |`);\n lines.push(`| **Granularity** | ${meta.granularity} |`);\n lines.push(\"\");\n\n // Chapter listing\n lines.push(\"## Chapters\");\n lines.push(\"\");\n\n for (const ch of meta.chapters) {\n const sectionCount = ch.sections.length;\n lines.push(`### Chapter ${ch.number} — ${ch.name}`);\n lines.push(\"\");\n lines.push(\n `${sectionCount} section${sectionCount !== 1 ? \"s\" : \"\"} · [${ch.directory}/](${ch.directory}/)`,\n );\n lines.push(\"\");\n }\n\n // Footer\n lines.push(\"---\");\n lines.push(\"\");\n lines.push(`Generated by [lexbuild](https://github.com/chris-c-thomas/lexbuild)`);\n lines.push(\"\");\n\n return lines.join(\"\\n\");\n}\n\n/** Result of writing a chapter file */\ninterface WriteChapterResult {\n filePath: string;\n sectionMetas: SectionMeta[];\n}\n\n/**\n * Write a chapter-level file (all sections inlined).\n * The emitted node is a chapter LevelNode whose children include section LevelNodes.\n */\nasync function writeChapter(\n chapterNode: LevelNode,\n context: EmitContext,\n options: ConvertOptions,\n): Promise<WriteChapterResult | null> {\n const chapterNum = chapterNode.numValue;\n if (!chapterNum) return null;\n\n const titleNum = findAncestor(context.ancestors, \"title\")?.numValue ?? \"0\";\n const titleDir = `title-${padTwo(titleNum)}`;\n const chapterFile = `chapter-${padTwo(chapterNum)}.md`;\n const filePath = join(options.output, \"usc\", titleDir, chapterFile);\n\n // Build chapter-level frontmatter\n const titleAncestor = findAncestor(context.ancestors, \"title\");\n const meta = context.documentMeta;\n const chapterName = chapterNode.heading?.trim() ?? \"\";\n const titleName = titleAncestor?.heading?.trim() ?? meta.dcTitle ?? \"\";\n const currency = parseCurrency(meta.docPublicationName ?? \"\");\n const lastUpdated = parseDate(meta.created ?? \"\");\n\n const fmData: FrontmatterData = {\n identifier: chapterNode.identifier ?? `/us/usc/t${titleNum}/ch${chapterNum}`,\n title: `${titleNum} USC Chapter ${chapterNum} - ${chapterName}`,\n title_number: parseIntSafe(titleNum),\n title_name: titleName,\n section_number: chapterNum,\n section_name: chapterName,\n chapter_number: parseIntSafe(chapterNum),\n chapter_name: chapterName,\n positive_law: meta.positivelaw ?? false,\n currency,\n last_updated: lastUpdated,\n };\n\n const notesFilter = buildNotesFilter(options);\n const renderOpts: RenderOptions = {\n headingOffset: 0,\n linkStyle: options.linkStyle,\n notesFilter,\n };\n\n // Build the chapter Markdown: heading + each section rendered with H2\n const parts: string[] = [];\n parts.push(generateFrontmatter(fmData));\n parts.push(\"\");\n parts.push(`# Chapter ${chapterNum} — ${chapterName}`);\n\n // Collect section metas and render each section\n const sectionMetas: SectionMeta[] = [];\n\n for (const child of chapterNode.children) {\n if (child.type === \"level\" && child.levelType === \"section\") {\n const sectionOpts: RenderOptions = { ...renderOpts, headingOffset: 1 };\n const sectionNode = options.includeSourceCredits ? child : stripSourceCredits(child);\n const sectionMd = renderSection(sectionNode, sectionOpts);\n parts.push(\"\");\n parts.push(sectionMd);\n\n // Collect section metadata\n const sectionNum = child.numValue ?? \"0\";\n const hasNotes = child.children.some((c) => c.type === \"notesContainer\" || c.type === \"note\");\n sectionMetas.push({\n identifier: child.identifier ?? `/us/usc/t${titleNum}/s${sectionNum}`,\n number: sectionNum,\n name: child.heading?.trim() ?? \"\",\n fileName: `section-${sectionNum}.md`,\n relativeFile: chapterFile,\n contentLength: sectionMd.length,\n hasNotes,\n status: child.status ?? \"current\",\n chapterIdentifier: chapterNode.identifier ?? \"\",\n chapterNumber: chapterNum,\n chapterName,\n });\n }\n }\n\n const markdown = parts.join(\"\\n\") + \"\\n\";\n\n await mkdir(dirname(filePath), { recursive: true });\n await writeFile(filePath, markdown, \"utf-8\");\n\n return { filePath, sectionMetas };\n}\n\nfunction buildOutputPath(\n context: EmitContext,\n sectionNum: string,\n outputRoot: string,\n /** Disambiguation suffix for duplicate section numbers (e.g., \"-2\") */\n dupSuffix?: string | undefined,\n): string {\n const titleDir = buildTitleDir(context);\n const chapterDir = buildChapterDir(context);\n const sectionFile = `section-${sectionNum}${dupSuffix ?? \"\"}.md`;\n\n if (chapterDir) {\n return join(outputRoot, \"usc\", titleDir, chapterDir, sectionFile);\n }\n\n return join(outputRoot, \"usc\", titleDir, sectionFile);\n}\n\n/**\n * Build the title directory name from context.\n * Handles appendix titles: docNumber \"5a\" → \"title-05-appendix\"\n */\nfunction buildTitleDir(context: EmitContext): string {\n // Check for appendix via docNumber (e.g., \"5a\", \"11a\")\n const docNum = context.documentMeta.docNumber ?? \"\";\n const appendixMatch = /^(\\d+)a$/i.exec(docNum);\n if (appendixMatch?.[1]) {\n return `title-${padTwo(appendixMatch[1])}-appendix`;\n }\n\n // Check for appendix ancestor\n const appendixAncestor = findAncestor(context.ancestors, \"appendix\");\n if (appendixAncestor) {\n const num = appendixAncestor.numValue ?? docNum;\n const numericPart = /^(\\d+)/.exec(num);\n if (numericPart?.[1]) {\n return `title-${padTwo(numericPart[1])}-appendix`;\n }\n }\n\n // Normal title\n const titleNum = findAncestor(context.ancestors, \"title\")?.numValue ?? \"00\";\n return `title-${padTwo(titleNum)}`;\n}\n\n/**\n * Build the chapter directory name from context.\n * Handles chapter equivalents: compiledAct, reorganizationPlan.\n */\nfunction buildChapterDir(context: EmitContext): string | undefined {\n // Standard chapter\n const chapterNum = findAncestor(context.ancestors, \"chapter\")?.numValue;\n if (chapterNum) return `chapter-${padTwo(chapterNum)}`;\n\n // Compiled act as chapter equivalent\n const compiledAct = findAncestor(context.ancestors, \"compiledAct\");\n if (compiledAct) {\n const heading = compiledAct.heading?.trim() ?? \"\";\n // Use a slug of the heading as directory name\n const slug = heading\n .toLowerCase()\n .replace(/[^a-z0-9]+/g, \"-\")\n .replace(/^-|-$/g, \"\")\n .slice(0, 50);\n return slug || \"compiled-act\";\n }\n\n // Reorganization plan as chapter equivalent\n const reorgPlan = findAncestor(context.ancestors, \"reorganizationPlan\");\n if (reorgPlan) {\n const heading = reorgPlan.heading?.trim() ?? \"\";\n const slug = heading\n .toLowerCase()\n .replace(/[^a-z0-9]+/g, \"-\")\n .replace(/^-|-$/g, \"\")\n .slice(0, 50);\n return slug || \"reorganization-plan\";\n }\n\n // Reorganization plans container\n const reorgPlans = findAncestor(context.ancestors, \"reorganizationPlans\");\n if (reorgPlans) {\n return \"reorganization-plans\";\n }\n\n return undefined;\n}\n\n/**\n * Build FrontmatterData from the emitted section node and context.\n */\nfunction buildFrontmatter(node: LevelNode, context: EmitContext): FrontmatterData {\n const meta = context.documentMeta;\n const titleAncestor =\n findAncestor(context.ancestors, \"title\") ?? findAncestor(context.ancestors, \"appendix\");\n const chapterAncestor =\n findAncestor(context.ancestors, \"chapter\") ??\n findAncestor(context.ancestors, \"compiledAct\") ??\n findAncestor(context.ancestors, \"reorganizationPlan\");\n const subchapterAncestor = findAncestor(context.ancestors, \"subchapter\");\n const partAncestor = findAncestor(context.ancestors, \"part\");\n\n const docNum = meta.docNumber ?? titleAncestor?.numValue ?? \"0\";\n const titleNum = parseIntSafe(docNum.replace(/a$/i, \"\"));\n const sectionNum = node.numValue ?? \"0\";\n const sectionName = node.heading?.trim() ?? \"\";\n const titleName = titleAncestor?.heading?.trim() ?? meta.dcTitle ?? \"\";\n\n // Build the human-readable title: \"1 USC § 1 - Section Name\"\n const displayTitle = `${titleNum} USC § ${sectionNum} - ${sectionName}`;\n\n // Extract source credit text from the section's children\n const sourceCredit = extractSourceCreditText(node);\n\n // Parse currency from docPublicationName (e.g., \"Online@119-73not60\" → \"119-73\")\n const currency = parseCurrency(meta.docPublicationName ?? \"\");\n\n // Parse last_updated from created timestamp\n const lastUpdated = parseDate(meta.created ?? \"\");\n\n const fm: FrontmatterData = {\n identifier: node.identifier ?? `/us/usc/t${titleNum}/s${sectionNum}`,\n title: displayTitle,\n title_number: titleNum,\n title_name: titleName,\n section_number: sectionNum,\n section_name: sectionName,\n positive_law: meta.positivelaw ?? false,\n currency,\n last_updated: lastUpdated,\n };\n\n if (chapterAncestor?.numValue) {\n fm.chapter_number = parseIntSafe(chapterAncestor.numValue);\n }\n if (chapterAncestor?.heading) {\n fm.chapter_name = chapterAncestor.heading.trim();\n }\n if (subchapterAncestor?.numValue) {\n fm.subchapter_number = subchapterAncestor.numValue;\n }\n if (subchapterAncestor?.heading) {\n fm.subchapter_name = subchapterAncestor.heading.trim();\n }\n if (partAncestor?.numValue) {\n fm.part_number = partAncestor.numValue;\n }\n if (partAncestor?.heading) {\n fm.part_name = partAncestor.heading.trim();\n }\n if (sourceCredit) {\n fm.source_credit = sourceCredit;\n }\n if (node.status) {\n fm.status = node.status;\n }\n\n return fm;\n}\n\n// ---------------------------------------------------------------------------\n// Utility functions\n// ---------------------------------------------------------------------------\n\n/**\n * Find an ancestor by level type.\n */\n/**\n * Build a NotesFilter from convert options.\n * Returns undefined if all notes should be included (default).\n */\n/**\n * Build SectionMeta from AST node without rendering (for dry-run mode).\n */\nfunction buildSectionMetaDryRun(\n sectionNode: LevelNode,\n chapterNode: LevelNode | null,\n context: EmitContext,\n): SectionMeta {\n const titleNum = findAncestor(context.ancestors, \"title\")?.numValue ?? \"0\";\n const chapterAncestor = chapterNode\n ? {\n numValue: chapterNode.numValue,\n heading: chapterNode.heading,\n identifier: chapterNode.identifier,\n }\n : findAncestor(context.ancestors, \"chapter\");\n const sectionNum = sectionNode.numValue ?? \"0\";\n const chapterNum = chapterAncestor?.numValue ?? \"0\";\n const chapterDir = chapterNum !== \"0\" ? `chapter-${padTwo(chapterNum)}` : \"\";\n\n const hasNotes = sectionNode.children.some(\n (c) => c.type === \"notesContainer\" || c.type === \"note\",\n );\n\n // Rough content length estimate from AST text nodes\n let contentLength = 0;\n const walk = (node: {\n children?: readonly { text?: string | undefined; children?: readonly unknown[] }[] | undefined;\n text?: string | undefined;\n }): void => {\n if (node.text) contentLength += node.text.length;\n if (node.children) {\n for (const child of node.children) {\n walk(child as typeof node);\n }\n }\n };\n walk(sectionNode as unknown as Parameters<typeof walk>[0]);\n\n const sectionFileName = `section-${sectionNum}.md`;\n return {\n identifier: sectionNode.identifier ?? `/us/usc/t${titleNum}/s${sectionNum}`,\n number: sectionNum,\n name: sectionNode.heading?.trim() ?? \"\",\n fileName: sectionFileName,\n relativeFile: chapterDir ? `${chapterDir}/${sectionFileName}` : sectionFileName,\n contentLength,\n hasNotes,\n status: sectionNode.status ?? \"current\",\n chapterIdentifier: chapterAncestor?.identifier ?? \"\",\n chapterNumber: chapterNum,\n chapterName: chapterAncestor?.heading?.trim() ?? \"\",\n };\n}\n\nfunction buildNotesFilter(options: ConvertOptions): NotesFilter | undefined {\n // Default: include all notes\n if (options.includeNotes) return undefined;\n\n // No notes at all\n if (\n !options.includeEditorialNotes &&\n !options.includeStatutoryNotes &&\n !options.includeAmendments\n ) {\n return { editorial: false, statutory: false, amendments: false };\n }\n\n // Selective inclusion\n return {\n editorial: options.includeEditorialNotes,\n statutory: options.includeStatutoryNotes,\n amendments: options.includeAmendments,\n };\n}\n\nfunction findAncestor(\n ancestors: readonly AncestorInfo[],\n levelType: string,\n): AncestorInfo | undefined {\n return ancestors.find((a) => a.levelType === levelType);\n}\n\n/**\n * Zero-pad a number string to 2 digits.\n */\n/**\n * Build title directory name from docNumber.\n * \"5\" → \"title-05\", \"5a\" → \"title-05-appendix\"\n */\nfunction buildTitleDirFromDocNumber(docNum: string): string {\n const appendixMatch = /^(\\d+)a$/i.exec(docNum);\n if (appendixMatch?.[1]) {\n return `title-${padTwo(appendixMatch[1])}-appendix`;\n }\n return `title-${padTwo(docNum)}`;\n}\n\nfunction padTwo(num: string): string {\n const n = parseInt(num, 10);\n if (isNaN(n)) return num;\n return n.toString().padStart(2, \"0\");\n}\n\n/**\n * Parse an integer safely, returning 0 if invalid.\n */\nfunction parseIntSafe(s: string): number {\n const n = parseInt(s, 10);\n return isNaN(n) ? 0 : n;\n}\n\n/**\n * Extract source credit plain text from a section node's children.\n */\nfunction extractSourceCreditText(node: LevelNode): string | undefined {\n for (const child of node.children) {\n if (child.type === \"sourceCredit\") {\n return child.children.map((inline) => inlineToText(inline)).join(\"\");\n }\n }\n return undefined;\n}\n\n/**\n * Recursively extract plain text from an InlineNode.\n */\nfunction inlineToText(node: {\n readonly type: \"inline\";\n text?: string | undefined;\n children?: readonly { readonly type: \"inline\"; text?: string | undefined }[] | undefined;\n}): string {\n if (node.text) return node.text;\n if (node.children) {\n return node.children.map((c) => c.text ?? \"\").join(\"\");\n }\n return \"\";\n}\n\n/**\n * Parse currency/release point from docPublicationName.\n * Example: \"Online@119-73not60\" → \"119-73\"\n */\nfunction parseCurrency(pubName: string): string {\n // Try to extract the release point pattern (e.g., \"119-73\")\n const match = /(\\d+-\\d+)/.exec(pubName);\n if (match?.[1]) return match[1];\n return pubName || \"unknown\";\n}\n\n/**\n * Parse a date string to ISO date format (YYYY-MM-DD).\n */\nfunction parseDate(dateStr: string): string {\n if (!dateStr) return \"unknown\";\n // Handle ISO timestamp: \"2025-12-03T10:11:39\" → \"2025-12-03\"\n const datePart = dateStr.split(\"T\")[0];\n return datePart ?? dateStr;\n}\n\n/**\n * Create a copy of a section node with source credit children removed.\n */\nfunction stripSourceCredits(node: LevelNode): LevelNode {\n return {\n ...node,\n children: node.children.filter((c) => c.type !== \"sourceCredit\"),\n };\n}\n","/**\n * OLRC U.S. Code XML downloader.\n *\n * Downloads USC title XML zip files from the Office of the Law Revision Counsel\n * and extracts them to a local directory.\n */\n\nimport { createWriteStream } from \"node:fs\";\nimport { mkdir, stat, unlink } from \"node:fs/promises\";\nimport { join } from \"node:path\";\nimport { pipeline } from \"node:stream/promises\";\nimport { Readable } from \"node:stream\";\nimport { open as yauzlOpen } from \"yauzl\";\nimport type { ZipFile, Entry } from \"yauzl\";\n\n// ---------------------------------------------------------------------------\n// Release point configuration\n// ---------------------------------------------------------------------------\n\n/**\n * Current OLRC release point.\n *\n * Update this value when OLRC publishes a new release point.\n * The release point appears in download URLs and identifies which\n * public laws are incorporated. Format: \"{congress}-{law}[not{excluded}]\"\n *\n * Check https://uscode.house.gov/download/download.shtml for the latest.\n */\nexport const CURRENT_RELEASE_POINT = \"119-73not60\";\n\n/** OLRC base URL for release point downloads */\nconst OLRC_BASE_URL = \"https://uscode.house.gov/download/releasepoints/us/pl\";\n\n/** Valid USC title numbers (1-54) */\nexport const USC_TITLE_NUMBERS = Array.from({ length: 54 }, (_, i) => i + 1);\n\n// ---------------------------------------------------------------------------\n// Helpers\n// ---------------------------------------------------------------------------\n\n/**\n * Check whether a list of title numbers covers all 54 USC titles.\n *\n * Handles arbitrary ordering and duplicates.\n */\nexport function isAllTitles(titles: number[]): boolean {\n const unique = new Set(titles);\n return unique.size === 54 && USC_TITLE_NUMBERS.every((n) => unique.has(n));\n}\n\n// ---------------------------------------------------------------------------\n// Public API\n// ---------------------------------------------------------------------------\n\n/** Options for downloading USC XML files */\nexport interface DownloadOptions {\n /** Directory to save downloaded XML files */\n outputDir: string;\n /** Specific title numbers to download, or undefined for all */\n titles?: number[] | undefined;\n /** Release point override (default: CURRENT_RELEASE_POINT) */\n releasePoint?: string | undefined;\n}\n\n/** Result of a download operation */\nexport interface DownloadResult {\n /** Release point used */\n releasePoint: string;\n /** Files successfully downloaded and extracted */\n files: DownloadedFile[];\n /** Titles that failed to download */\n errors: DownloadError[];\n}\n\n/** A successfully downloaded file */\nexport interface DownloadedFile {\n /** Title number */\n titleNumber: number;\n /** Path to the extracted XML file */\n filePath: string;\n /** Size in bytes */\n size: number;\n}\n\n/** A failed download */\nexport interface DownloadError {\n /** Title number */\n titleNumber: number;\n /** Error message */\n message: string;\n}\n\n/**\n * Download USC title XML files from OLRC.\n *\n * When all 54 titles are requested, uses the bulk `uscAll` zip for a single\n * HTTP round-trip instead of 54 individual requests. Falls back to per-title\n * downloads if the bulk download fails.\n */\nexport async function downloadTitles(options: DownloadOptions): Promise<DownloadResult> {\n const releasePoint = options.releasePoint ?? CURRENT_RELEASE_POINT;\n const titles = options.titles ?? USC_TITLE_NUMBERS;\n\n await mkdir(options.outputDir, { recursive: true });\n\n // Use bulk zip when all 54 titles are requested\n if (options.titles === undefined || isAllTitles(titles)) {\n try {\n const files = await downloadAndExtractAllTitles(releasePoint, options.outputDir);\n return { releasePoint, files, errors: [] };\n } catch {\n // Fall back to per-title downloads\n }\n }\n\n const files: DownloadedFile[] = [];\n const errors: DownloadError[] = [];\n\n for (const titleNum of titles) {\n try {\n const file = await downloadAndExtractTitle(titleNum, releasePoint, options.outputDir);\n files.push(file);\n } catch (err) {\n errors.push({\n titleNumber: titleNum,\n message: err instanceof Error ? err.message : String(err),\n });\n }\n }\n\n return { releasePoint, files, errors };\n}\n\n// ---------------------------------------------------------------------------\n// URL construction\n// ---------------------------------------------------------------------------\n\n/**\n * Build the download URL for a single title's XML zip.\n *\n * Format: {base}/pl/{releasePointPath}/xml_usc{NN}@{releasePoint}.zip\n *\n * The release point path splits the release point into directory segments.\n * For \"119-73not60\", the path is \"119/73not60\".\n */\nexport function buildDownloadUrl(titleNumber: number, releasePoint: string): string {\n const paddedTitle = titleNumber.toString().padStart(2, \"0\");\n const rpPath = releasePointToPath(releasePoint);\n return `${OLRC_BASE_URL}/${rpPath}/xml_usc${paddedTitle}@${releasePoint}.zip`;\n}\n\n/**\n * Build the download URL for all titles in a single zip.\n */\nexport function buildAllTitlesUrl(releasePoint: string): string {\n const rpPath = releasePointToPath(releasePoint);\n return `${OLRC_BASE_URL}/${rpPath}/xml_uscAll@${releasePoint}.zip`;\n}\n\n/**\n * Convert a release point string to a URL path segment.\n * \"119-73not60\" → \"119/73not60\"\n * \"119-43\" → \"119/43\"\n */\nexport function releasePointToPath(releasePoint: string): string {\n // Split on the first hyphen only\n const dashIndex = releasePoint.indexOf(\"-\");\n if (dashIndex === -1) return releasePoint;\n return `${releasePoint.slice(0, dashIndex)}/${releasePoint.slice(dashIndex + 1)}`;\n}\n\n// ---------------------------------------------------------------------------\n// Download and extraction\n// ---------------------------------------------------------------------------\n\n/**\n * Download a single title's zip and extract the XML file.\n */\nasync function downloadAndExtractTitle(\n titleNumber: number,\n releasePoint: string,\n outputDir: string,\n): Promise<DownloadedFile> {\n const url = buildDownloadUrl(titleNumber, releasePoint);\n const paddedTitle = titleNumber.toString().padStart(2, \"0\");\n const zipPath = join(outputDir, `usc${paddedTitle}.zip`);\n const xmlFileName = `usc${paddedTitle}.xml`;\n const xmlPath = join(outputDir, xmlFileName);\n\n // Download the zip file\n const response = await fetch(url);\n if (!response.ok) {\n throw new Error(`HTTP ${response.status}: ${response.statusText} for ${url}`);\n }\n\n if (!response.body) {\n throw new Error(`No response body for ${url}`);\n }\n\n // Write zip to disk\n const fileStream = createWriteStream(zipPath);\n // ReadableStream type mismatch between DOM and Node — cast to `never` to bridge\n await pipeline(Readable.fromWeb(response.body as never), fileStream);\n\n // Extract XML from zip\n await extractXmlFromZip(zipPath, xmlFileName, xmlPath);\n\n // Clean up zip file\n await unlink(zipPath);\n\n // Get file size\n const fileStat = await stat(xmlPath);\n\n return {\n titleNumber,\n filePath: xmlPath,\n size: fileStat.size,\n };\n}\n\n/**\n * Extract a specific XML file from a zip archive.\n */\nfunction extractXmlFromZip(\n zipPath: string,\n targetFileName: string,\n outputPath: string,\n): Promise<void> {\n return new Promise((resolve, reject) => {\n yauzlOpen(zipPath, { lazyEntries: true }, (err, zipFile) => {\n if (err) {\n reject(new Error(`Failed to open zip: ${err.message}`));\n return;\n }\n if (!zipFile) {\n reject(new Error(\"Failed to open zip: no zipFile returned\"));\n return;\n }\n\n let found = false;\n\n zipFile.on(\"entry\", (entry: Entry) => {\n // Look for the target XML file (might be at root or in a subdirectory)\n const fileName = entry.fileName.split(\"/\").pop() ?? entry.fileName;\n if (fileName === targetFileName || entry.fileName.endsWith(`.xml`)) {\n found = true;\n extractEntry(zipFile, entry, outputPath)\n .then(() => {\n zipFile.close();\n resolve();\n })\n .catch((extractErr) => {\n zipFile.close();\n reject(extractErr);\n });\n } else {\n zipFile.readEntry();\n }\n });\n\n zipFile.on(\"end\", () => {\n if (!found) {\n reject(new Error(`${targetFileName} not found in zip`));\n }\n });\n\n zipFile.on(\"error\", (zipErr: Error) => {\n reject(new Error(`Zip error: ${zipErr.message}`));\n });\n\n zipFile.readEntry();\n });\n });\n}\n\n/**\n * Extract a single zip entry to a file.\n */\nfunction extractEntry(zipFile: ZipFile, entry: Entry, outputPath: string): Promise<void> {\n return new Promise((resolve, reject) => {\n zipFile.openReadStream(entry, (err, readStream) => {\n if (err) {\n reject(new Error(`Failed to read zip entry: ${err.message}`));\n return;\n }\n if (!readStream) {\n reject(new Error(\"No read stream for zip entry\"));\n return;\n }\n\n const writeStream = createWriteStream(outputPath);\n readStream.pipe(writeStream);\n\n writeStream.on(\"finish\", () => resolve());\n writeStream.on(\"error\", (writeErr) => reject(new Error(`Write error: ${writeErr.message}`)));\n readStream.on(\"error\", (readErr) => reject(new Error(`Read error: ${readErr.message}`)));\n });\n });\n}\n\n// ---------------------------------------------------------------------------\n// Bulk download (all titles in one zip)\n// ---------------------------------------------------------------------------\n\n/** Regex matching USC XML filenames like usc01.xml, usc54.xml */\nconst USC_XML_RE = /^(?:.*\\/)?usc(\\d{2})\\.xml$/;\n\n/**\n * Extract all `usc{NN}.xml` files from a bulk zip archive.\n *\n * Returns an array of `{ titleNumber, filePath }` for each extracted file.\n */\nfunction extractAllXmlFromZip(\n zipPath: string,\n outputDir: string,\n): Promise<{ titleNumber: number; filePath: string }[]> {\n return new Promise((resolve, reject) => {\n yauzlOpen(zipPath, { lazyEntries: true }, (err, zipFile) => {\n if (err) {\n reject(new Error(`Failed to open zip: ${err.message}`));\n return;\n }\n if (!zipFile) {\n reject(new Error(\"Failed to open zip: no zipFile returned\"));\n return;\n }\n\n const extracted: { titleNumber: number; filePath: string }[] = [];\n let pending = 0;\n let ended = false;\n\n const maybeResolve = (): void => {\n if (ended && pending === 0) {\n resolve(extracted);\n }\n };\n\n zipFile.on(\"entry\", (entry: Entry) => {\n const match = USC_XML_RE.exec(entry.fileName);\n if (match) {\n const titleNum = parseInt(match[1] ?? \"0\", 10);\n const outPath = join(outputDir, `usc${match[1] ?? \"00\"}.xml`);\n pending++;\n\n extractEntry(zipFile, entry, outPath)\n .then(() => {\n extracted.push({ titleNumber: titleNum, filePath: outPath });\n pending--;\n // Continue reading entries after extraction completes\n zipFile.readEntry();\n maybeResolve();\n })\n .catch((extractErr) => {\n zipFile.close();\n reject(extractErr);\n });\n } else {\n zipFile.readEntry();\n }\n });\n\n zipFile.on(\"end\", () => {\n ended = true;\n maybeResolve();\n });\n\n zipFile.on(\"error\", (zipErr: Error) => {\n reject(new Error(`Zip error: ${zipErr.message}`));\n });\n\n zipFile.readEntry();\n });\n });\n}\n\n/**\n * Download the bulk all-titles zip and extract every `usc{NN}.xml` file.\n */\nasync function downloadAndExtractAllTitles(\n releasePoint: string,\n outputDir: string,\n): Promise<DownloadedFile[]> {\n const url = buildAllTitlesUrl(releasePoint);\n const zipPath = join(outputDir, \"uscAll.zip\");\n\n // Download the zip file\n const response = await fetch(url);\n if (!response.ok) {\n throw new Error(`HTTP ${response.status}: ${response.statusText} for ${url}`);\n }\n\n if (!response.body) {\n throw new Error(`No response body for ${url}`);\n }\n\n // Write zip to disk\n const fileStream = createWriteStream(zipPath);\n await pipeline(Readable.fromWeb(response.body as never), fileStream);\n\n // Extract all XML files from zip\n const extracted = await extractAllXmlFromZip(zipPath, outputDir);\n\n // Clean up zip file\n await unlink(zipPath);\n\n // Stat each extracted file and build results\n const files: DownloadedFile[] = [];\n for (const { titleNumber, filePath } of extracted) {\n const fileStat = await stat(filePath);\n files.push({ titleNumber, filePath, size: fileStat.size });\n }\n\n // Sort by title number for consistent ordering\n files.sort((a, b) => a.titleNumber - b.titleNumber);\n\n return files;\n}\n"],"mappings":";AAOA,SAAS,wBAAwB;AACjC,SAAS,OAAO,iBAAiB;AACjC,SAAS,MAAM,eAAe;AAC9B,SAAS,gBAAgB;AACzB;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AAwDP,IAAM,WAAqD;AAAA,EACzD,aAAa;AAAA,EACb,WAAW;AAAA,EACX,sBAAsB;AAAA,EACtB,cAAc;AAAA,EACd,uBAAuB;AAAA,EACvB,uBAAuB;AAAA,EACvB,mBAAmB;AAAA,EACnB,QAAQ;AACV;AA8BA,eAAsB,aAAa,SAAiD;AAClF,QAAM,OAAO,EAAE,GAAG,UAAU,GAAG,QAAQ;AACvC,QAAM,QAAkB,CAAC;AACzB,MAAI,aAAa,QAAQ,YAAY,IAAI;AAGzC,QAAM,YAAgC,CAAC;AAGvC,QAAM,SAAS,KAAK,gBAAgB,YAAa,YAAuB;AACxE,QAAM,UAAU,IAAI,WAAW;AAAA,IAC7B;AAAA,IACA,QAAQ,CAAC,MAAM,YAAY;AACzB,gBAAU,KAAK,EAAE,MAAM,QAAQ,CAAC;AAAA,IAClC;AAAA,EACF,CAAC;AAGD,QAAM,SAAS,IAAI,UAAU;AAC7B,SAAO,GAAG,eAAe,CAAC,MAAM,UAAU,QAAQ,cAAc,MAAM,KAAK,CAAC;AAC5E,SAAO,GAAG,gBAAgB,CAAC,SAAS,QAAQ,eAAe,IAAI,CAAC;AAChE,SAAO,GAAG,QAAQ,CAAC,SAAS,QAAQ,OAAO,IAAI,CAAC;AAGhD,QAAM,SAAS,iBAAiB,KAAK,OAAO,OAAO;AACnD,QAAM,OAAO,YAAY,MAAM;AAC/B,eAAa,KAAK,IAAI,YAAY,QAAQ,YAAY,IAAI,CAAC;AAE3D,QAAM,eAA8B,CAAC;AACrC,QAAM,OAAO,QAAQ,gBAAgB;AAErC,MAAI,KAAK,QAAQ;AAEf,eAAW,EAAE,MAAM,QAAQ,KAAK,WAAW;AACzC,UAAI,KAAK,gBAAgB,WAAW;AAElC,mBAAW,SAAS,KAAK,UAAU;AACjC,cAAI,MAAM,SAAS,WAAW,MAAM,cAAc,WAAW;AAC3D,yBAAa,KAAK,uBAAuB,OAAO,MAAM,OAAO,CAAC;AAAA,UAChE;AAAA,QACF;AAAA,MACF,OAAO;AACL,YAAI,KAAK,UAAU;AACjB,uBAAa,KAAK,uBAAuB,MAAM,MAAM,OAAO,CAAC;AAAA,QAC/D;AAAA,MACF;AAAA,IACF;AAAA,EACF,WAAW,KAAK,gBAAgB,WAAW;AAEzC,eAAW,EAAE,MAAM,QAAQ,KAAK,WAAW;AACzC,YAAM,SAAS,MAAM,aAAa,MAAM,SAAS,IAAI;AACrD,UAAI,QAAQ;AACV,cAAM,KAAK,OAAO,QAAQ;AAC1B,mBAAW,KAAK,OAAO,cAAc;AACnC,uBAAa,KAAK,CAAC;AAAA,QACrB;AAAA,MACF;AAAA,IACF;AAAA,EACF,OAAO;AAGL,UAAM,gBAAgB,oBAAI,IAAoB;AAC9C,UAAM,WAAmC,CAAC;AAC1C,eAAW,EAAE,MAAM,QAAQ,KAAK,WAAW;AACzC,YAAM,aAAa,KAAK;AACxB,UAAI,CAAC,YAAY;AACf,iBAAS,KAAK,MAAS;AACvB;AAAA,MACF;AACA,YAAM,aAAa,gBAAgB,OAAO,KAAK;AAC/C,YAAM,MAAM,GAAG,UAAU,IAAI,UAAU;AACvC,YAAM,SAAS,cAAc,IAAI,GAAG,KAAK,KAAK;AAC9C,oBAAc,IAAI,KAAK,KAAK;AAC5B,eAAS,KAAK,QAAQ,IAAI,IAAI,KAAK,KAAK,MAAS;AAAA,IACnD;AAEA,UAAM,eAAe,mBAAmB;AACxC,eAAW,CAAC,GAAG,EAAE,MAAM,QAAQ,CAAC,KAAK,UAAU,QAAQ,GAAG;AACxD,YAAM,aAAa,KAAK;AACxB,UAAI,cAAc,KAAK,YAAY;AACjC,cAAM,WAAW,gBAAgB,SAAS,YAAY,KAAK,QAAQ,SAAS,CAAC,CAAC;AAE9E,cAAM,QAAQ,SAAS,CAAC,IAAI,GAAG,KAAK,UAAU,IAAI,SAAS,CAAC,CAAC,KAAK,KAAK;AACvE,qBAAa,SAAS,OAAO,QAAQ;AAErC,YAAI,CAAC,SAAS,CAAC,GAAG;AAChB,uBAAa,SAAS,KAAK,YAAY,QAAQ;AAAA,QACjD;AAAA,MACF;AAAA,IACF;AAEA,eAAW,CAAC,GAAG,EAAE,MAAM,QAAQ,CAAC,KAAK,UAAU,QAAQ,GAAG;AACxD,YAAM,SAAS,MAAM,aAAa,MAAM,SAAS,MAAM,cAAc,SAAS,CAAC,CAAC;AAChF,UAAI,QAAQ;AACV,cAAM,KAAK,OAAO,QAAQ;AAC1B,qBAAa,KAAK,OAAO,IAAI;AAAA,MAC/B;AAAA,IACF;AAAA,EACF;AAGA,QAAM,iBAAiB,UAAU,CAAC;AAClC,QAAM,eAAe,iBACjB,aAAa,eAAe,QAAQ,WAAW,OAAO,GAAG,SAAS,KAAK,IACvE;AAGJ,MAAI,CAAC,KAAK,QAAQ;AAChB,UAAM,eAAe,cAAc,MAAM,MAAM,YAAY;AAAA,EAC7D;AAGA,eAAa,KAAK,IAAI,YAAY,QAAQ,YAAY,IAAI,CAAC;AAG3D,QAAM,aAAa,IAAI,IAAI,aAAa,IAAI,CAAC,MAAM,EAAE,iBAAiB,CAAC;AACvE,QAAM,cAAc,aAAa,OAAO,CAAC,KAAK,MAAM,MAAM,KAAK,KAAK,EAAE,gBAAgB,CAAC,GAAG,CAAC;AAE3F,SAAO;AAAA,IACL,iBAAiB,KAAK,SAAS,aAAa,SAAS,MAAM;AAAA,IAC3D;AAAA,IACA,aAAa,KAAK,aAAa;AAAA,IAC/B,WAAW,gBAAgB,KAAK,WAAW;AAAA,IAC3C,QAAQ,KAAK;AAAA,IACb,cAAc,WAAW;AAAA,IACzB,oBAAoB;AAAA,IACpB,iBAAiB;AAAA,EACnB;AACF;AAYA,eAAe,aACb,MACA,SACA,SACA,cAEA,WACoC;AACpC,QAAM,aAAa,KAAK;AACxB,MAAI,CAAC,WAAY,QAAO;AAGxB,QAAM,WAAW,gBAAgB,SAAS,YAAY,QAAQ,QAAQ,SAAS;AAG/E,QAAM,cAAc,iBAAiB,MAAM,OAAO;AAGlD,QAAM,cAAc,iBAAiB,OAAO;AAG5C,QAAM,aAA4B;AAAA,IAChC,eAAe;AAAA,IACf,WAAW,QAAQ;AAAA,IACnB,aAAa,eACT,CAAC,eAAuB,aAAa,QAAQ,YAAY,QAAQ,IACjE;AAAA,IACJ;AAAA,EACF;AAGA,QAAM,cAAc,QAAQ,uBAAuB,OAAO,mBAAmB,IAAI;AAGjF,QAAM,WAAW,eAAe,aAAa,aAAa,UAAU;AAGpE,QAAM,MAAM,QAAQ,QAAQ,GAAG,EAAE,WAAW,KAAK,CAAC;AAClD,QAAM,UAAU,UAAU,UAAU,OAAO;AAG3C,QAAM,WAAW,aAAa,QAAQ,WAAW,OAAO,GAAG,YAAY;AACvE,QAAM,kBAAkB,aAAa,QAAQ,WAAW,SAAS;AACjE,QAAM,aAAa,iBAAiB,WAAW,WAAW,OAAO,gBAAgB,QAAQ,CAAC,KAAK;AAC/F,QAAM,kBAAkB,WAAW,UAAU,GAAG,aAAa,EAAE;AAC/D,QAAM,eAAe,aAAa,GAAG,UAAU,IAAI,eAAe,KAAK;AAEvE,QAAM,WAAW,KAAK,SAAS,KAAK,CAAC,MAAM,EAAE,SAAS,oBAAoB,EAAE,SAAS,MAAM;AAE3F,QAAM,cAA2B;AAAA,IAC/B,YAAY,KAAK,cAAc,YAAY,QAAQ,KAAK,UAAU;AAAA,IAClE,QAAQ;AAAA,IACR,MAAM,KAAK,SAAS,KAAK,KAAK;AAAA,IAC9B,UAAU;AAAA,IACV;AAAA,IACA,eAAe,SAAS;AAAA,IACxB;AAAA,IACA,QAAQ,KAAK,UAAU;AAAA,IACvB,mBAAmB,iBAAiB,cAAc;AAAA,IAClD,eAAe,iBAAiB,YAAY;AAAA,IAC5C,aAAa,iBAAiB,SAAS,KAAK,KAAK;AAAA,EACnD;AAEA,SAAO,EAAE,UAAU,MAAM,YAAY;AACvC;AAUA,eAAe,eACb,cACA,SAQA,SACA,cACe;AACf,MAAI,aAAa,WAAW,EAAG;AAE/B,QAAM,SAAS,QAAQ,aAAa;AACpC,QAAM,eAAe,2BAA2B,MAAM;AACtD,QAAM,WAAW,KAAK,QAAQ,QAAQ,OAAO,YAAY;AACzD,QAAM,WAAW,cAAc,QAAQ,sBAAsB,EAAE;AAG/D,QAAM,aAAa,oBAAI,IAA2B;AAClD,aAAW,MAAM,cAAc;AAC7B,UAAM,MAAM,GAAG,qBAAqB;AACpC,QAAI,MAAM,WAAW,IAAI,GAAG;AAC5B,QAAI,CAAC,KAAK;AACR,YAAM,CAAC;AACP,iBAAW,IAAI,KAAK,GAAG;AAAA,IACzB;AACA,QAAI,KAAK,EAAE;AAAA,EACb;AAGA,QAAM,iBAcD,CAAC;AAEN,aAAW,CAAC,WAAW,eAAe,KAAK,YAAY;AACrD,QAAI,cAAc,iBAAkB;AAEpC,UAAM,QAAQ,gBAAgB,CAAC;AAC/B,QAAI,CAAC,MAAO;AAEZ,UAAM,aAAa,WAAW,OAAO,MAAM,aAAa,CAAC;AAEzD,UAAM,WAAW,gBAAgB,IAAI,CAAC,QAAQ;AAAA,MAC5C,YAAY,GAAG;AAAA,MACf,QAAQ,GAAG;AAAA,MACX,MAAM,GAAG;AAAA,MACT,MAAM,GAAG;AAAA,MACT,gBAAgB,KAAK,KAAK,GAAG,gBAAgB,CAAC;AAAA,MAC9C,WAAW,GAAG;AAAA,MACd,QAAQ,GAAG;AAAA,IACb,EAAE;AAEF,UAAM,cAAc;AAAA,MAClB,gBAAgB;AAAA,MAChB,YAAY;AAAA,MACZ,gBAAgB,aAAa,MAAM,aAAa;AAAA,MAChD,cAAc,MAAM;AAAA,MACpB,cAAc,aAAa,MAAM;AAAA,MACjC,eAAe,SAAS;AAAA,MACxB;AAAA,IACF;AAEA,UAAM,kBAAkB,KAAK,UAAU,YAAY,YAAY;AAC/D,UAAM,MAAM,QAAQ,eAAe,GAAG,EAAE,WAAW,KAAK,CAAC;AACzD,UAAM,UAAU,iBAAiB,KAAK,UAAU,aAAa,MAAM,CAAC,IAAI,MAAM,OAAO;AAErF,mBAAe,KAAK;AAAA,MAClB,YAAY;AAAA,MACZ,QAAQ,aAAa,MAAM,aAAa;AAAA,MACxC,MAAM,MAAM;AAAA,MACZ,WAAW;AAAA,MACX;AAAA,IACF,CAAC;AAAA,EACH;AAGA,QAAM,cAAc,aAAa,OAAO,CAAC,KAAK,OAAO,MAAM,KAAK,KAAK,GAAG,gBAAgB,CAAC,GAAG,CAAC;AAE7F,QAAM,YAAY;AAAA,IAChB,gBAAgB;AAAA,IAChB,WAAW;AAAA,IACX,eAAc,oBAAI,KAAK,GAAE,YAAY;AAAA,IACrC,YAAY,QAAQ,cAAc,YAAY,MAAM;AAAA,IACpD,cAAc,aAAa,MAAM;AAAA,IACjC,YAAY,gBAAgB,QAAQ,WAAW;AAAA,IAC/C,cAAc,QAAQ,eAAe;AAAA,IACrC;AAAA,IACA,YAAY,SAAS,QAAQ,KAAK;AAAA,IAClC,aAAa,QAAQ;AAAA,IACrB,OAAO;AAAA,MACL,eAAe,eAAe;AAAA,MAC9B,eAAe,aAAa;AAAA,MAC5B,aAAa,aAAa;AAAA,MAC1B,uBAAuB;AAAA,IACzB;AAAA,IACA,UAAU;AAAA,EACZ;AAEA,QAAM,gBAAgB,KAAK,UAAU,YAAY;AACjD,QAAM,MAAM,QAAQ,aAAa,GAAG,EAAE,WAAW,KAAK,CAAC;AACvD,QAAM,UAAU,eAAe,KAAK,UAAU,WAAW,MAAM,CAAC,IAAI,MAAM,OAAO;AAGjF,QAAM,aAAa,KAAK,UAAU,WAAW;AAC7C,QAAM,SAAS,oBAAoB,SAAS;AAC5C,QAAM,UAAU,YAAY,QAAQ,OAAO;AAC7C;AAKA,SAAS,oBAAoB,MAsBlB;AACT,QAAM,QAAkB,CAAC;AAEzB,QAAM,KAAK,WAAW,KAAK,YAAY,WAAM,KAAK,UAAU,EAAE;AAC9D,QAAM,KAAK,EAAE;AACb,QAAM,KAAK,OAAO;AAClB,QAAM,KAAK,eAAe;AAC1B,QAAM,KAAK,wBAAwB,KAAK,eAAe,QAAQ,IAAI,IAAI;AACvE,QAAM,KAAK,oBAAoB,KAAK,YAAY,SAAS,IAAI;AAC7D,QAAM,KAAK,oBAAoB,KAAK,MAAM,aAAa,IAAI;AAC3D,QAAM,KAAK,oBAAoB,KAAK,MAAM,cAAc,eAAe,CAAC,IAAI;AAC5E,QAAM,KAAK,uBAAuB,KAAK,MAAM,sBAAsB,eAAe,CAAC,IAAI;AACvF,QAAM,KAAK,uBAAuB,KAAK,WAAW,IAAI;AACtD,QAAM,KAAK,EAAE;AAGb,QAAM,KAAK,aAAa;AACxB,QAAM,KAAK,EAAE;AAEb,aAAW,MAAM,KAAK,UAAU;AAC9B,UAAM,eAAe,GAAG,SAAS;AACjC,UAAM,KAAK,eAAe,GAAG,MAAM,WAAM,GAAG,IAAI,EAAE;AAClD,UAAM,KAAK,EAAE;AACb,UAAM;AAAA,MACJ,GAAG,YAAY,WAAW,iBAAiB,IAAI,MAAM,EAAE,UAAO,GAAG,SAAS,MAAM,GAAG,SAAS;AAAA,IAC9F;AACA,UAAM,KAAK,EAAE;AAAA,EACf;AAGA,QAAM,KAAK,KAAK;AAChB,QAAM,KAAK,EAAE;AACb,QAAM,KAAK,qEAAqE;AAChF,QAAM,KAAK,EAAE;AAEb,SAAO,MAAM,KAAK,IAAI;AACxB;AAYA,eAAe,aACb,aACA,SACA,SACoC;AACpC,QAAM,aAAa,YAAY;AAC/B,MAAI,CAAC,WAAY,QAAO;AAExB,QAAM,WAAW,aAAa,QAAQ,WAAW,OAAO,GAAG,YAAY;AACvE,QAAM,WAAW,SAAS,OAAO,QAAQ,CAAC;AAC1C,QAAM,cAAc,WAAW,OAAO,UAAU,CAAC;AACjD,QAAM,WAAW,KAAK,QAAQ,QAAQ,OAAO,UAAU,WAAW;AAGlE,QAAM,gBAAgB,aAAa,QAAQ,WAAW,OAAO;AAC7D,QAAM,OAAO,QAAQ;AACrB,QAAM,cAAc,YAAY,SAAS,KAAK,KAAK;AACnD,QAAM,YAAY,eAAe,SAAS,KAAK,KAAK,KAAK,WAAW;AACpE,QAAM,WAAW,cAAc,KAAK,sBAAsB,EAAE;AAC5D,QAAM,cAAc,UAAU,KAAK,WAAW,EAAE;AAEhD,QAAM,SAA0B;AAAA,IAC9B,YAAY,YAAY,cAAc,YAAY,QAAQ,MAAM,UAAU;AAAA,IAC1E,OAAO,GAAG,QAAQ,gBAAgB,UAAU,MAAM,WAAW;AAAA,IAC7D,cAAc,aAAa,QAAQ;AAAA,IACnC,YAAY;AAAA,IACZ,gBAAgB;AAAA,IAChB,cAAc;AAAA,IACd,gBAAgB,aAAa,UAAU;AAAA,IACvC,cAAc;AAAA,IACd,cAAc,KAAK,eAAe;AAAA,IAClC;AAAA,IACA,cAAc;AAAA,EAChB;AAEA,QAAM,cAAc,iBAAiB,OAAO;AAC5C,QAAM,aAA4B;AAAA,IAChC,eAAe;AAAA,IACf,WAAW,QAAQ;AAAA,IACnB;AAAA,EACF;AAGA,QAAM,QAAkB,CAAC;AACzB,QAAM,KAAK,oBAAoB,MAAM,CAAC;AACtC,QAAM,KAAK,EAAE;AACb,QAAM,KAAK,aAAa,UAAU,WAAM,WAAW,EAAE;AAGrD,QAAM,eAA8B,CAAC;AAErC,aAAW,SAAS,YAAY,UAAU;AACxC,QAAI,MAAM,SAAS,WAAW,MAAM,cAAc,WAAW;AAC3D,YAAM,cAA6B,EAAE,GAAG,YAAY,eAAe,EAAE;AACrE,YAAM,cAAc,QAAQ,uBAAuB,QAAQ,mBAAmB,KAAK;AACnF,YAAM,YAAY,cAAc,aAAa,WAAW;AACxD,YAAM,KAAK,EAAE;AACb,YAAM,KAAK,SAAS;AAGpB,YAAM,aAAa,MAAM,YAAY;AACrC,YAAM,WAAW,MAAM,SAAS,KAAK,CAAC,MAAM,EAAE,SAAS,oBAAoB,EAAE,SAAS,MAAM;AAC5F,mBAAa,KAAK;AAAA,QAChB,YAAY,MAAM,cAAc,YAAY,QAAQ,KAAK,UAAU;AAAA,QACnE,QAAQ;AAAA,QACR,MAAM,MAAM,SAAS,KAAK,KAAK;AAAA,QAC/B,UAAU,WAAW,UAAU;AAAA,QAC/B,cAAc;AAAA,QACd,eAAe,UAAU;AAAA,QACzB;AAAA,QACA,QAAQ,MAAM,UAAU;AAAA,QACxB,mBAAmB,YAAY,cAAc;AAAA,QAC7C,eAAe;AAAA,QACf;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAEA,QAAM,WAAW,MAAM,KAAK,IAAI,IAAI;AAEpC,QAAM,MAAM,QAAQ,QAAQ,GAAG,EAAE,WAAW,KAAK,CAAC;AAClD,QAAM,UAAU,UAAU,UAAU,OAAO;AAE3C,SAAO,EAAE,UAAU,aAAa;AAClC;AAEA,SAAS,gBACP,SACA,YACA,YAEA,WACQ;AACR,QAAM,WAAW,cAAc,OAAO;AACtC,QAAM,aAAa,gBAAgB,OAAO;AAC1C,QAAM,cAAc,WAAW,UAAU,GAAG,aAAa,EAAE;AAE3D,MAAI,YAAY;AACd,WAAO,KAAK,YAAY,OAAO,UAAU,YAAY,WAAW;AAAA,EAClE;AAEA,SAAO,KAAK,YAAY,OAAO,UAAU,WAAW;AACtD;AAMA,SAAS,cAAc,SAA8B;AAEnD,QAAM,SAAS,QAAQ,aAAa,aAAa;AACjD,QAAM,gBAAgB,YAAY,KAAK,MAAM;AAC7C,MAAI,gBAAgB,CAAC,GAAG;AACtB,WAAO,SAAS,OAAO,cAAc,CAAC,CAAC,CAAC;AAAA,EAC1C;AAGA,QAAM,mBAAmB,aAAa,QAAQ,WAAW,UAAU;AACnE,MAAI,kBAAkB;AACpB,UAAM,MAAM,iBAAiB,YAAY;AACzC,UAAM,cAAc,SAAS,KAAK,GAAG;AACrC,QAAI,cAAc,CAAC,GAAG;AACpB,aAAO,SAAS,OAAO,YAAY,CAAC,CAAC,CAAC;AAAA,IACxC;AAAA,EACF;AAGA,QAAM,WAAW,aAAa,QAAQ,WAAW,OAAO,GAAG,YAAY;AACvE,SAAO,SAAS,OAAO,QAAQ,CAAC;AAClC;AAMA,SAAS,gBAAgB,SAA0C;AAEjE,QAAM,aAAa,aAAa,QAAQ,WAAW,SAAS,GAAG;AAC/D,MAAI,WAAY,QAAO,WAAW,OAAO,UAAU,CAAC;AAGpD,QAAM,cAAc,aAAa,QAAQ,WAAW,aAAa;AACjE,MAAI,aAAa;AACf,UAAM,UAAU,YAAY,SAAS,KAAK,KAAK;AAE/C,UAAM,OAAO,QACV,YAAY,EACZ,QAAQ,eAAe,GAAG,EAC1B,QAAQ,UAAU,EAAE,EACpB,MAAM,GAAG,EAAE;AACd,WAAO,QAAQ;AAAA,EACjB;AAGA,QAAM,YAAY,aAAa,QAAQ,WAAW,oBAAoB;AACtE,MAAI,WAAW;AACb,UAAM,UAAU,UAAU,SAAS,KAAK,KAAK;AAC7C,UAAM,OAAO,QACV,YAAY,EACZ,QAAQ,eAAe,GAAG,EAC1B,QAAQ,UAAU,EAAE,EACpB,MAAM,GAAG,EAAE;AACd,WAAO,QAAQ;AAAA,EACjB;AAGA,QAAM,aAAa,aAAa,QAAQ,WAAW,qBAAqB;AACxE,MAAI,YAAY;AACd,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAKA,SAAS,iBAAiB,MAAiB,SAAuC;AAChF,QAAM,OAAO,QAAQ;AACrB,QAAM,gBACJ,aAAa,QAAQ,WAAW,OAAO,KAAK,aAAa,QAAQ,WAAW,UAAU;AACxF,QAAM,kBACJ,aAAa,QAAQ,WAAW,SAAS,KACzC,aAAa,QAAQ,WAAW,aAAa,KAC7C,aAAa,QAAQ,WAAW,oBAAoB;AACtD,QAAM,qBAAqB,aAAa,QAAQ,WAAW,YAAY;AACvE,QAAM,eAAe,aAAa,QAAQ,WAAW,MAAM;AAE3D,QAAM,SAAS,KAAK,aAAa,eAAe,YAAY;AAC5D,QAAM,WAAW,aAAa,OAAO,QAAQ,OAAO,EAAE,CAAC;AACvD,QAAM,aAAa,KAAK,YAAY;AACpC,QAAM,cAAc,KAAK,SAAS,KAAK,KAAK;AAC5C,QAAM,YAAY,eAAe,SAAS,KAAK,KAAK,KAAK,WAAW;AAGpE,QAAM,eAAe,GAAG,QAAQ,aAAU,UAAU,MAAM,WAAW;AAGrE,QAAM,eAAe,wBAAwB,IAAI;AAGjD,QAAM,WAAW,cAAc,KAAK,sBAAsB,EAAE;AAG5D,QAAM,cAAc,UAAU,KAAK,WAAW,EAAE;AAEhD,QAAM,KAAsB;AAAA,IAC1B,YAAY,KAAK,cAAc,YAAY,QAAQ,KAAK,UAAU;AAAA,IAClE,OAAO;AAAA,IACP,cAAc;AAAA,IACd,YAAY;AAAA,IACZ,gBAAgB;AAAA,IAChB,cAAc;AAAA,IACd,cAAc,KAAK,eAAe;AAAA,IAClC;AAAA,IACA,cAAc;AAAA,EAChB;AAEA,MAAI,iBAAiB,UAAU;AAC7B,OAAG,iBAAiB,aAAa,gBAAgB,QAAQ;AAAA,EAC3D;AACA,MAAI,iBAAiB,SAAS;AAC5B,OAAG,eAAe,gBAAgB,QAAQ,KAAK;AAAA,EACjD;AACA,MAAI,oBAAoB,UAAU;AAChC,OAAG,oBAAoB,mBAAmB;AAAA,EAC5C;AACA,MAAI,oBAAoB,SAAS;AAC/B,OAAG,kBAAkB,mBAAmB,QAAQ,KAAK;AAAA,EACvD;AACA,MAAI,cAAc,UAAU;AAC1B,OAAG,cAAc,aAAa;AAAA,EAChC;AACA,MAAI,cAAc,SAAS;AACzB,OAAG,YAAY,aAAa,QAAQ,KAAK;AAAA,EAC3C;AACA,MAAI,cAAc;AAChB,OAAG,gBAAgB;AAAA,EACrB;AACA,MAAI,KAAK,QAAQ;AACf,OAAG,SAAS,KAAK;AAAA,EACnB;AAEA,SAAO;AACT;AAgBA,SAAS,uBACP,aACA,aACA,SACa;AACb,QAAM,WAAW,aAAa,QAAQ,WAAW,OAAO,GAAG,YAAY;AACvE,QAAM,kBAAkB,cACpB;AAAA,IACE,UAAU,YAAY;AAAA,IACtB,SAAS,YAAY;AAAA,IACrB,YAAY,YAAY;AAAA,EAC1B,IACA,aAAa,QAAQ,WAAW,SAAS;AAC7C,QAAM,aAAa,YAAY,YAAY;AAC3C,QAAM,aAAa,iBAAiB,YAAY;AAChD,QAAM,aAAa,eAAe,MAAM,WAAW,OAAO,UAAU,CAAC,KAAK;AAE1E,QAAM,WAAW,YAAY,SAAS;AAAA,IACpC,CAAC,MAAM,EAAE,SAAS,oBAAoB,EAAE,SAAS;AAAA,EACnD;AAGA,MAAI,gBAAgB;AACpB,QAAM,OAAO,CAAC,SAGF;AACV,QAAI,KAAK,KAAM,kBAAiB,KAAK,KAAK;AAC1C,QAAI,KAAK,UAAU;AACjB,iBAAW,SAAS,KAAK,UAAU;AACjC,aAAK,KAAoB;AAAA,MAC3B;AAAA,IACF;AAAA,EACF;AACA,OAAK,WAAoD;AAEzD,QAAM,kBAAkB,WAAW,UAAU;AAC7C,SAAO;AAAA,IACL,YAAY,YAAY,cAAc,YAAY,QAAQ,KAAK,UAAU;AAAA,IACzE,QAAQ;AAAA,IACR,MAAM,YAAY,SAAS,KAAK,KAAK;AAAA,IACrC,UAAU;AAAA,IACV,cAAc,aAAa,GAAG,UAAU,IAAI,eAAe,KAAK;AAAA,IAChE;AAAA,IACA;AAAA,IACA,QAAQ,YAAY,UAAU;AAAA,IAC9B,mBAAmB,iBAAiB,cAAc;AAAA,IAClD,eAAe;AAAA,IACf,aAAa,iBAAiB,SAAS,KAAK,KAAK;AAAA,EACnD;AACF;AAEA,SAAS,iBAAiB,SAAkD;AAE1E,MAAI,QAAQ,aAAc,QAAO;AAGjC,MACE,CAAC,QAAQ,yBACT,CAAC,QAAQ,yBACT,CAAC,QAAQ,mBACT;AACA,WAAO,EAAE,WAAW,OAAO,WAAW,OAAO,YAAY,MAAM;AAAA,EACjE;AAGA,SAAO;AAAA,IACL,WAAW,QAAQ;AAAA,IACnB,WAAW,QAAQ;AAAA,IACnB,YAAY,QAAQ;AAAA,EACtB;AACF;AAEA,SAAS,aACP,WACA,WAC0B;AAC1B,SAAO,UAAU,KAAK,CAAC,MAAM,EAAE,cAAc,SAAS;AACxD;AASA,SAAS,2BAA2B,QAAwB;AAC1D,QAAM,gBAAgB,YAAY,KAAK,MAAM;AAC7C,MAAI,gBAAgB,CAAC,GAAG;AACtB,WAAO,SAAS,OAAO,cAAc,CAAC,CAAC,CAAC;AAAA,EAC1C;AACA,SAAO,SAAS,OAAO,MAAM,CAAC;AAChC;AAEA,SAAS,OAAO,KAAqB;AACnC,QAAM,IAAI,SAAS,KAAK,EAAE;AAC1B,MAAI,MAAM,CAAC,EAAG,QAAO;AACrB,SAAO,EAAE,SAAS,EAAE,SAAS,GAAG,GAAG;AACrC;AAKA,SAAS,aAAa,GAAmB;AACvC,QAAM,IAAI,SAAS,GAAG,EAAE;AACxB,SAAO,MAAM,CAAC,IAAI,IAAI;AACxB;AAKA,SAAS,wBAAwB,MAAqC;AACpE,aAAW,SAAS,KAAK,UAAU;AACjC,QAAI,MAAM,SAAS,gBAAgB;AACjC,aAAO,MAAM,SAAS,IAAI,CAAC,WAAW,aAAa,MAAM,CAAC,EAAE,KAAK,EAAE;AAAA,IACrE;AAAA,EACF;AACA,SAAO;AACT;AAKA,SAAS,aAAa,MAIX;AACT,MAAI,KAAK,KAAM,QAAO,KAAK;AAC3B,MAAI,KAAK,UAAU;AACjB,WAAO,KAAK,SAAS,IAAI,CAAC,MAAM,EAAE,QAAQ,EAAE,EAAE,KAAK,EAAE;AAAA,EACvD;AACA,SAAO;AACT;AAMA,SAAS,cAAc,SAAyB;AAE9C,QAAM,QAAQ,YAAY,KAAK,OAAO;AACtC,MAAI,QAAQ,CAAC,EAAG,QAAO,MAAM,CAAC;AAC9B,SAAO,WAAW;AACpB;AAKA,SAAS,UAAU,SAAyB;AAC1C,MAAI,CAAC,QAAS,QAAO;AAErB,QAAM,WAAW,QAAQ,MAAM,GAAG,EAAE,CAAC;AACrC,SAAO,YAAY;AACrB;AAKA,SAAS,mBAAmB,MAA4B;AACtD,SAAO;AAAA,IACL,GAAG;AAAA,IACH,UAAU,KAAK,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,cAAc;AAAA,EACjE;AACF;;;AC/6BA,SAAS,yBAAyB;AAClC,SAAS,SAAAA,QAAO,MAAM,cAAc;AACpC,SAAS,QAAAC,aAAY;AACrB,SAAS,gBAAgB;AACzB,SAAS,gBAAgB;AACzB,SAAS,QAAQ,iBAAiB;AAgB3B,IAAM,wBAAwB;AAGrC,IAAM,gBAAgB;AAGf,IAAM,oBAAoB,MAAM,KAAK,EAAE,QAAQ,GAAG,GAAG,CAAC,GAAG,MAAM,IAAI,CAAC;AAWpE,SAAS,YAAY,QAA2B;AACrD,QAAM,SAAS,IAAI,IAAI,MAAM;AAC7B,SAAO,OAAO,SAAS,MAAM,kBAAkB,MAAM,CAAC,MAAM,OAAO,IAAI,CAAC,CAAC;AAC3E;AAmDA,eAAsB,eAAe,SAAmD;AACtF,QAAM,eAAe,QAAQ,gBAAgB;AAC7C,QAAM,SAAS,QAAQ,UAAU;AAEjC,QAAMD,OAAM,QAAQ,WAAW,EAAE,WAAW,KAAK,CAAC;AAGlD,MAAI,QAAQ,WAAW,UAAa,YAAY,MAAM,GAAG;AACvD,QAAI;AACF,YAAME,SAAQ,MAAM,4BAA4B,cAAc,QAAQ,SAAS;AAC/E,aAAO,EAAE,cAAc,OAAAA,QAAO,QAAQ,CAAC,EAAE;AAAA,IAC3C,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,QAAM,QAA0B,CAAC;AACjC,QAAM,SAA0B,CAAC;AAEjC,aAAW,YAAY,QAAQ;AAC7B,QAAI;AACF,YAAM,OAAO,MAAM,wBAAwB,UAAU,cAAc,QAAQ,SAAS;AACpF,YAAM,KAAK,IAAI;AAAA,IACjB,SAAS,KAAK;AACZ,aAAO,KAAK;AAAA,QACV,aAAa;AAAA,QACb,SAAS,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG;AAAA,MAC1D,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO,EAAE,cAAc,OAAO,OAAO;AACvC;AAcO,SAAS,iBAAiB,aAAqB,cAA8B;AAClF,QAAM,cAAc,YAAY,SAAS,EAAE,SAAS,GAAG,GAAG;AAC1D,QAAM,SAAS,mBAAmB,YAAY;AAC9C,SAAO,GAAG,aAAa,IAAI,MAAM,WAAW,WAAW,IAAI,YAAY;AACzE;AAKO,SAAS,kBAAkB,cAA8B;AAC9D,QAAM,SAAS,mBAAmB,YAAY;AAC9C,SAAO,GAAG,aAAa,IAAI,MAAM,eAAe,YAAY;AAC9D;AAOO,SAAS,mBAAmB,cAA8B;AAE/D,QAAM,YAAY,aAAa,QAAQ,GAAG;AAC1C,MAAI,cAAc,GAAI,QAAO;AAC7B,SAAO,GAAG,aAAa,MAAM,GAAG,SAAS,CAAC,IAAI,aAAa,MAAM,YAAY,CAAC,CAAC;AACjF;AASA,eAAe,wBACb,aACA,cACA,WACyB;AACzB,QAAM,MAAM,iBAAiB,aAAa,YAAY;AACtD,QAAM,cAAc,YAAY,SAAS,EAAE,SAAS,GAAG,GAAG;AAC1D,QAAM,UAAUD,MAAK,WAAW,MAAM,WAAW,MAAM;AACvD,QAAM,cAAc,MAAM,WAAW;AACrC,QAAM,UAAUA,MAAK,WAAW,WAAW;AAG3C,QAAM,WAAW,MAAM,MAAM,GAAG;AAChC,MAAI,CAAC,SAAS,IAAI;AAChB,UAAM,IAAI,MAAM,QAAQ,SAAS,MAAM,KAAK,SAAS,UAAU,QAAQ,GAAG,EAAE;AAAA,EAC9E;AAEA,MAAI,CAAC,SAAS,MAAM;AAClB,UAAM,IAAI,MAAM,wBAAwB,GAAG,EAAE;AAAA,EAC/C;AAGA,QAAM,aAAa,kBAAkB,OAAO;AAE5C,QAAM,SAAS,SAAS,QAAQ,SAAS,IAAa,GAAG,UAAU;AAGnE,QAAM,kBAAkB,SAAS,aAAa,OAAO;AAGrD,QAAM,OAAO,OAAO;AAGpB,QAAM,WAAW,MAAM,KAAK,OAAO;AAEnC,SAAO;AAAA,IACL;AAAA,IACA,UAAU;AAAA,IACV,MAAM,SAAS;AAAA,EACjB;AACF;AAKA,SAAS,kBACP,SACA,gBACA,YACe;AACf,SAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,cAAU,SAAS,EAAE,aAAa,KAAK,GAAG,CAAC,KAAK,YAAY;AAC1D,UAAI,KAAK;AACP,eAAO,IAAI,MAAM,uBAAuB,IAAI,OAAO,EAAE,CAAC;AACtD;AAAA,MACF;AACA,UAAI,CAAC,SAAS;AACZ,eAAO,IAAI,MAAM,yCAAyC,CAAC;AAC3D;AAAA,MACF;AAEA,UAAI,QAAQ;AAEZ,cAAQ,GAAG,SAAS,CAAC,UAAiB;AAEpC,cAAM,WAAW,MAAM,SAAS,MAAM,GAAG,EAAE,IAAI,KAAK,MAAM;AAC1D,YAAI,aAAa,kBAAkB,MAAM,SAAS,SAAS,MAAM,GAAG;AAClE,kBAAQ;AACR,uBAAa,SAAS,OAAO,UAAU,EACpC,KAAK,MAAM;AACV,oBAAQ,MAAM;AACd,oBAAQ;AAAA,UACV,CAAC,EACA,MAAM,CAAC,eAAe;AACrB,oBAAQ,MAAM;AACd,mBAAO,UAAU;AAAA,UACnB,CAAC;AAAA,QACL,OAAO;AACL,kBAAQ,UAAU;AAAA,QACpB;AAAA,MACF,CAAC;AAED,cAAQ,GAAG,OAAO,MAAM;AACtB,YAAI,CAAC,OAAO;AACV,iBAAO,IAAI,MAAM,GAAG,cAAc,mBAAmB,CAAC;AAAA,QACxD;AAAA,MACF,CAAC;AAED,cAAQ,GAAG,SAAS,CAAC,WAAkB;AACrC,eAAO,IAAI,MAAM,cAAc,OAAO,OAAO,EAAE,CAAC;AAAA,MAClD,CAAC;AAED,cAAQ,UAAU;AAAA,IACpB,CAAC;AAAA,EACH,CAAC;AACH;AAKA,SAAS,aAAa,SAAkB,OAAc,YAAmC;AACvF,SAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,YAAQ,eAAe,OAAO,CAAC,KAAK,eAAe;AACjD,UAAI,KAAK;AACP,eAAO,IAAI,MAAM,6BAA6B,IAAI,OAAO,EAAE,CAAC;AAC5D;AAAA,MACF;AACA,UAAI,CAAC,YAAY;AACf,eAAO,IAAI,MAAM,8BAA8B,CAAC;AAChD;AAAA,MACF;AAEA,YAAM,cAAc,kBAAkB,UAAU;AAChD,iBAAW,KAAK,WAAW;AAE3B,kBAAY,GAAG,UAAU,MAAM,QAAQ,CAAC;AACxC,kBAAY,GAAG,SAAS,CAAC,aAAa,OAAO,IAAI,MAAM,gBAAgB,SAAS,OAAO,EAAE,CAAC,CAAC;AAC3F,iBAAW,GAAG,SAAS,CAAC,YAAY,OAAO,IAAI,MAAM,eAAe,QAAQ,OAAO,EAAE,CAAC,CAAC;AAAA,IACzF,CAAC;AAAA,EACH,CAAC;AACH;AAOA,IAAM,aAAa;AAOnB,SAAS,qBACP,SACA,WACsD;AACtD,SAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,cAAU,SAAS,EAAE,aAAa,KAAK,GAAG,CAAC,KAAK,YAAY;AAC1D,UAAI,KAAK;AACP,eAAO,IAAI,MAAM,uBAAuB,IAAI,OAAO,EAAE,CAAC;AACtD;AAAA,MACF;AACA,UAAI,CAAC,SAAS;AACZ,eAAO,IAAI,MAAM,yCAAyC,CAAC;AAC3D;AAAA,MACF;AAEA,YAAM,YAAyD,CAAC;AAChE,UAAI,UAAU;AACd,UAAI,QAAQ;AAEZ,YAAM,eAAe,MAAY;AAC/B,YAAI,SAAS,YAAY,GAAG;AAC1B,kBAAQ,SAAS;AAAA,QACnB;AAAA,MACF;AAEA,cAAQ,GAAG,SAAS,CAAC,UAAiB;AACpC,cAAM,QAAQ,WAAW,KAAK,MAAM,QAAQ;AAC5C,YAAI,OAAO;AACT,gBAAM,WAAW,SAAS,MAAM,CAAC,KAAK,KAAK,EAAE;AAC7C,gBAAM,UAAUA,MAAK,WAAW,MAAM,MAAM,CAAC,KAAK,IAAI,MAAM;AAC5D;AAEA,uBAAa,SAAS,OAAO,OAAO,EACjC,KAAK,MAAM;AACV,sBAAU,KAAK,EAAE,aAAa,UAAU,UAAU,QAAQ,CAAC;AAC3D;AAEA,oBAAQ,UAAU;AAClB,yBAAa;AAAA,UACf,CAAC,EACA,MAAM,CAAC,eAAe;AACrB,oBAAQ,MAAM;AACd,mBAAO,UAAU;AAAA,UACnB,CAAC;AAAA,QACL,OAAO;AACL,kBAAQ,UAAU;AAAA,QACpB;AAAA,MACF,CAAC;AAED,cAAQ,GAAG,OAAO,MAAM;AACtB,gBAAQ;AACR,qBAAa;AAAA,MACf,CAAC;AAED,cAAQ,GAAG,SAAS,CAAC,WAAkB;AACrC,eAAO,IAAI,MAAM,cAAc,OAAO,OAAO,EAAE,CAAC;AAAA,MAClD,CAAC;AAED,cAAQ,UAAU;AAAA,IACpB,CAAC;AAAA,EACH,CAAC;AACH;AAKA,eAAe,4BACb,cACA,WAC2B;AAC3B,QAAM,MAAM,kBAAkB,YAAY;AAC1C,QAAM,UAAUA,MAAK,WAAW,YAAY;AAG5C,QAAM,WAAW,MAAM,MAAM,GAAG;AAChC,MAAI,CAAC,SAAS,IAAI;AAChB,UAAM,IAAI,MAAM,QAAQ,SAAS,MAAM,KAAK,SAAS,UAAU,QAAQ,GAAG,EAAE;AAAA,EAC9E;AAEA,MAAI,CAAC,SAAS,MAAM;AAClB,UAAM,IAAI,MAAM,wBAAwB,GAAG,EAAE;AAAA,EAC/C;AAGA,QAAM,aAAa,kBAAkB,OAAO;AAC5C,QAAM,SAAS,SAAS,QAAQ,SAAS,IAAa,GAAG,UAAU;AAGnE,QAAM,YAAY,MAAM,qBAAqB,SAAS,SAAS;AAG/D,QAAM,OAAO,OAAO;AAGpB,QAAM,QAA0B,CAAC;AACjC,aAAW,EAAE,aAAa,SAAS,KAAK,WAAW;AACjD,UAAM,WAAW,MAAM,KAAK,QAAQ;AACpC,UAAM,KAAK,EAAE,aAAa,UAAU,MAAM,SAAS,KAAK,CAAC;AAAA,EAC3D;AAGA,QAAM,KAAK,CAAC,GAAG,MAAM,EAAE,cAAc,EAAE,WAAW;AAElD,SAAO;AACT;","names":["mkdir","join","files"]}
|
package/package.json
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@lexbuild/usc",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Federal U.S. Code specific element handlers and downloader for lexbuild",
|
|
5
|
+
"author": "Chris Thomas",
|
|
6
|
+
"license": "MIT",
|
|
7
|
+
"homepage": "https://github.com/chris-c-thomas/lexbuild#readme",
|
|
8
|
+
"bugs": {
|
|
9
|
+
"url": "https://github.com/chris-c-thomas/lexbuild/issues"
|
|
10
|
+
},
|
|
11
|
+
"repository": {
|
|
12
|
+
"type": "git",
|
|
13
|
+
"url": "git+https://github.com/chris-c-thomas/lexbuild.git",
|
|
14
|
+
"directory": "packages/usc"
|
|
15
|
+
},
|
|
16
|
+
"keywords": [
|
|
17
|
+
"lexbuild",
|
|
18
|
+
"legal-tech",
|
|
19
|
+
"law",
|
|
20
|
+
"rag",
|
|
21
|
+
"llm",
|
|
22
|
+
"xml",
|
|
23
|
+
"parser",
|
|
24
|
+
"uscode",
|
|
25
|
+
"uslm",
|
|
26
|
+
"statutes",
|
|
27
|
+
"federal-law",
|
|
28
|
+
"olrc",
|
|
29
|
+
"typescript"
|
|
30
|
+
],
|
|
31
|
+
"type": "module",
|
|
32
|
+
"main": "./dist/index.js",
|
|
33
|
+
"types": "./dist/index.d.ts",
|
|
34
|
+
"exports": {
|
|
35
|
+
".": {
|
|
36
|
+
"types": "./dist/index.d.ts",
|
|
37
|
+
"import": "./dist/index.js"
|
|
38
|
+
}
|
|
39
|
+
},
|
|
40
|
+
"sideEffects": false,
|
|
41
|
+
"files": [
|
|
42
|
+
"dist"
|
|
43
|
+
],
|
|
44
|
+
"scripts": {
|
|
45
|
+
"build": "tsup",
|
|
46
|
+
"dev": "tsup --watch",
|
|
47
|
+
"typecheck": "tsc --noEmit",
|
|
48
|
+
"test": "vitest run",
|
|
49
|
+
"test:watch": "vitest",
|
|
50
|
+
"lint": "eslint src",
|
|
51
|
+
"lint:fix": "eslint src --fix"
|
|
52
|
+
},
|
|
53
|
+
"dependencies": {
|
|
54
|
+
"@lexbuild/core": "workspace:*",
|
|
55
|
+
"yauzl": "^3.2.0"
|
|
56
|
+
},
|
|
57
|
+
"devDependencies": {
|
|
58
|
+
"@types/node": "^25.3.2",
|
|
59
|
+
"@types/yauzl": "^2.10.3",
|
|
60
|
+
"tsup": "^8",
|
|
61
|
+
"typescript": "^5.8",
|
|
62
|
+
"vitest": "^3"
|
|
63
|
+
},
|
|
64
|
+
"engines": {
|
|
65
|
+
"node": ">=20"
|
|
66
|
+
},
|
|
67
|
+
"publishConfig": {
|
|
68
|
+
"access": "public"
|
|
69
|
+
}
|
|
70
|
+
}
|