@f-o-t/pdf 0.4.1 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{index-3qa5wvjk.js → index-eedrjd25.js} +181 -106
- package/dist/index-eedrjd25.js.map +13 -0
- package/dist/index-ty7xfwkn.js +307 -0
- package/dist/index-ty7xfwkn.js.map +10 -0
- package/dist/{index-w5nfn63z.js → index-xbm3820d.js} +67 -304
- package/dist/index-xbm3820d.js.map +12 -0
- package/dist/index.js +4 -3
- package/dist/index.js.map +1 -1
- package/dist/plugins/editing/document.d.ts.map +1 -1
- package/dist/plugins/editing/index.js +3 -2
- package/dist/plugins/editing/index.js.map +1 -1
- package/dist/plugins/editing/page.d.ts +17 -0
- package/dist/plugins/editing/page.d.ts.map +1 -1
- package/dist/plugins/editing/types.d.ts +2 -0
- package/dist/plugins/editing/types.d.ts.map +1 -1
- package/dist/plugins/parsing/index.js +3 -2
- package/dist/plugins/parsing/index.js.map +1 -1
- package/dist/plugins/parsing/parser.d.ts.map +1 -1
- package/dist/plugins/parsing/reader.d.ts +22 -20
- package/dist/plugins/parsing/reader.d.ts.map +1 -1
- package/package.json +1 -1
- package/dist/index-3qa5wvjk.js.map +0 -13
- package/dist/index-w5nfn63z.js.map +0 -13
|
@@ -0,0 +1,307 @@
|
|
|
1
|
+
// @bun
|
|
2
|
+
// src/plugins/editing/parser.ts
|
|
3
|
+
function findStartXref(pdfStr) {
|
|
4
|
+
const idx = pdfStr.lastIndexOf("startxref");
|
|
5
|
+
if (idx === -1)
|
|
6
|
+
throw new Error("Cannot find startxref in PDF");
|
|
7
|
+
const after = pdfStr.slice(idx + 9).trim().split(/[\r\n\s]/)[0];
|
|
8
|
+
return parseInt(after, 10);
|
|
9
|
+
}
|
|
10
|
+
function parseTrailer(pdfStr) {
|
|
11
|
+
const startxrefIdx = pdfStr.lastIndexOf("startxref");
|
|
12
|
+
const trailerIdx = pdfStr.lastIndexOf("trailer");
|
|
13
|
+
let dictStr;
|
|
14
|
+
if (trailerIdx !== -1 && trailerIdx < startxrefIdx) {
|
|
15
|
+
dictStr = pdfStr.slice(trailerIdx, startxrefIdx);
|
|
16
|
+
} else {
|
|
17
|
+
const xrefOffset = findStartXref(pdfStr);
|
|
18
|
+
const xrefObjStr = pdfStr.slice(xrefOffset, xrefOffset + 4096);
|
|
19
|
+
const dictStart = xrefObjStr.indexOf("<<");
|
|
20
|
+
if (dictStart === -1) {
|
|
21
|
+
throw new Error("Cannot find trailer or xref stream dictionary in PDF");
|
|
22
|
+
}
|
|
23
|
+
const dictEnd = findMatchingDictEnd(xrefObjStr, dictStart);
|
|
24
|
+
if (dictEnd === -1) {
|
|
25
|
+
throw new Error("Cannot find end of xref stream dictionary");
|
|
26
|
+
}
|
|
27
|
+
dictStr = xrefObjStr.slice(dictStart, dictEnd + 2);
|
|
28
|
+
}
|
|
29
|
+
const rootMatch = dictStr.match(/\/Root\s+(\d+)\s+\d+\s+R/);
|
|
30
|
+
if (!rootMatch)
|
|
31
|
+
throw new Error("Cannot find Root ref in trailer");
|
|
32
|
+
const sizeMatch = dictStr.match(/\/Size\s+(\d+)/);
|
|
33
|
+
if (!sizeMatch)
|
|
34
|
+
throw new Error("Cannot find Size in trailer");
|
|
35
|
+
const infoMatch = dictStr.match(/\/Info\s+(\d+)\s+\d+\s+R/);
|
|
36
|
+
const prevMatch = dictStr.match(/\/Prev\s+(\d+)/);
|
|
37
|
+
return {
|
|
38
|
+
root: parseInt(rootMatch[1], 10),
|
|
39
|
+
size: parseInt(sizeMatch[1], 10),
|
|
40
|
+
info: infoMatch ? parseInt(infoMatch[1], 10) : null,
|
|
41
|
+
prevXref: prevMatch ? parseInt(prevMatch[1], 10) : findStartXref(pdfStr)
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
function buildObjectIndex(pdfStr) {
|
|
45
|
+
const index = new Map;
|
|
46
|
+
const regex = /(?:^|\s)(\d+)\s+0\s+obj/gm;
|
|
47
|
+
let m;
|
|
48
|
+
while ((m = regex.exec(pdfStr)) !== null) {
|
|
49
|
+
const objNum = parseInt(m[1], 10);
|
|
50
|
+
if (!index.has(objNum)) {
|
|
51
|
+
index.set(objNum, m.index + m[0].length);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
return index;
|
|
55
|
+
}
|
|
56
|
+
function extractObjectDictContent(pdfStr, objNum, objIndex) {
|
|
57
|
+
let searchStart;
|
|
58
|
+
if (objIndex) {
|
|
59
|
+
const pos = objIndex.get(objNum);
|
|
60
|
+
if (pos === undefined) {
|
|
61
|
+
throw new Error(`Cannot find object ${objNum} in PDF`);
|
|
62
|
+
}
|
|
63
|
+
searchStart = pos;
|
|
64
|
+
} else {
|
|
65
|
+
const objRegex = new RegExp(`(?:^|\\s)${objNum}\\s+0\\s+obj`, "m");
|
|
66
|
+
const match = pdfStr.match(objRegex);
|
|
67
|
+
if (!match || match.index === undefined) {
|
|
68
|
+
throw new Error(`Cannot find object ${objNum} in PDF`);
|
|
69
|
+
}
|
|
70
|
+
searchStart = match.index + match[0].length;
|
|
71
|
+
}
|
|
72
|
+
const dictStart = pdfStr.indexOf("<<", searchStart);
|
|
73
|
+
if (dictStart === -1 || dictStart > searchStart + 200) {
|
|
74
|
+
throw new Error(`Cannot find dictionary start for object ${objNum}`);
|
|
75
|
+
}
|
|
76
|
+
const dictEnd = findMatchingDictEnd(pdfStr, dictStart);
|
|
77
|
+
if (dictEnd === -1) {
|
|
78
|
+
throw new Error(`Cannot find dictionary end for object ${objNum}`);
|
|
79
|
+
}
|
|
80
|
+
return pdfStr.slice(dictStart + 2, dictEnd);
|
|
81
|
+
}
|
|
82
|
+
function findPageObjectsIndexed(pdfStr, rootNum, objIndex) {
|
|
83
|
+
const rootContent = extractObjectDictContent(pdfStr, rootNum, objIndex);
|
|
84
|
+
const pagesMatch = rootContent.match(/\/Pages\s+(\d+)\s+\d+\s+R/);
|
|
85
|
+
if (!pagesMatch)
|
|
86
|
+
throw new Error("Cannot find Pages ref in Root catalog");
|
|
87
|
+
const pagesNum = parseInt(pagesMatch[1], 10);
|
|
88
|
+
return collectPageLeafsIndexed(pdfStr, pagesNum, new Set, objIndex);
|
|
89
|
+
}
|
|
90
|
+
function collectPageLeafsIndexed(pdfStr, objNum, visited, objIndex) {
|
|
91
|
+
if (visited.has(objNum))
|
|
92
|
+
return [];
|
|
93
|
+
visited.add(objNum);
|
|
94
|
+
const content = extractObjectDictContent(pdfStr, objNum, objIndex);
|
|
95
|
+
const typeMatch = content.match(/\/Type\s+\/(\w+)/);
|
|
96
|
+
if (typeMatch?.[1] === "Page") {
|
|
97
|
+
return [objNum];
|
|
98
|
+
}
|
|
99
|
+
const kidsMatch = content.match(/\/Kids\s*\[([^\]]+)\]/);
|
|
100
|
+
if (!kidsMatch) {
|
|
101
|
+
return [objNum];
|
|
102
|
+
}
|
|
103
|
+
const refs = [];
|
|
104
|
+
const refRegex = /(\d+)\s+\d+\s+R/g;
|
|
105
|
+
let m;
|
|
106
|
+
while ((m = refRegex.exec(kidsMatch[1])) !== null) {
|
|
107
|
+
refs.push(parseInt(m[1], 10));
|
|
108
|
+
}
|
|
109
|
+
const pages = [];
|
|
110
|
+
for (const ref of refs) {
|
|
111
|
+
pages.push(...collectPageLeafsIndexed(pdfStr, ref, visited, objIndex));
|
|
112
|
+
}
|
|
113
|
+
return pages;
|
|
114
|
+
}
|
|
115
|
+
function getMediaBox(pdfStr, pageObjNum, objIndex) {
|
|
116
|
+
const visited = new Set;
|
|
117
|
+
let objNum = pageObjNum;
|
|
118
|
+
while (objNum !== null && !visited.has(objNum)) {
|
|
119
|
+
visited.add(objNum);
|
|
120
|
+
const content = extractObjectDictContent(pdfStr, objNum, objIndex);
|
|
121
|
+
const mediaBoxMatch = content.match(/\/MediaBox\s*\[\s*([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s*\]/);
|
|
122
|
+
if (mediaBoxMatch) {
|
|
123
|
+
return [
|
|
124
|
+
parseFloat(mediaBoxMatch[1]),
|
|
125
|
+
parseFloat(mediaBoxMatch[2]),
|
|
126
|
+
parseFloat(mediaBoxMatch[3]),
|
|
127
|
+
parseFloat(mediaBoxMatch[4])
|
|
128
|
+
];
|
|
129
|
+
}
|
|
130
|
+
const parentMatch = content.match(/\/Parent\s+(\d+)\s+\d+\s+R/);
|
|
131
|
+
objNum = parentMatch ? parseInt(parentMatch[1], 10) : null;
|
|
132
|
+
}
|
|
133
|
+
throw new Error(`Cannot find MediaBox for page object ${pageObjNum}`);
|
|
134
|
+
}
|
|
135
|
+
function parsePdfStructure(pdfStr) {
|
|
136
|
+
const xrefOffset = findStartXref(pdfStr);
|
|
137
|
+
const trailer = parseTrailer(pdfStr);
|
|
138
|
+
const objIndex = buildObjectIndex(pdfStr);
|
|
139
|
+
const rootContent = extractObjectDictContent(pdfStr, trailer.root, objIndex);
|
|
140
|
+
const pagesMatch = rootContent.match(/\/Pages\s+(\d+)\s+\d+\s+R/);
|
|
141
|
+
if (!pagesMatch)
|
|
142
|
+
throw new Error("Cannot find Pages ref in Root catalog");
|
|
143
|
+
const pagesNum = parseInt(pagesMatch[1], 10);
|
|
144
|
+
const pageNums = findPageObjectsIndexed(pdfStr, trailer.root, objIndex);
|
|
145
|
+
const pageDictContents = pageNums.map((pn) => extractObjectDictContent(pdfStr, pn, objIndex));
|
|
146
|
+
return {
|
|
147
|
+
xrefOffset,
|
|
148
|
+
rootNum: trailer.root,
|
|
149
|
+
infoNum: trailer.info,
|
|
150
|
+
size: trailer.size,
|
|
151
|
+
pagesNum,
|
|
152
|
+
pageNums,
|
|
153
|
+
rootDictContent: rootContent,
|
|
154
|
+
pageDictContents,
|
|
155
|
+
objIndex
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
function findMatchingDictEnd(str, startPos) {
|
|
159
|
+
let depth = 0;
|
|
160
|
+
let i = startPos;
|
|
161
|
+
while (i < str.length - 1) {
|
|
162
|
+
if (str[i] === "(") {
|
|
163
|
+
i++;
|
|
164
|
+
while (i < str.length && str[i] !== ")") {
|
|
165
|
+
if (str[i] === "\\")
|
|
166
|
+
i++;
|
|
167
|
+
i++;
|
|
168
|
+
}
|
|
169
|
+
i++;
|
|
170
|
+
} else if (str[i] === "<" && str[i + 1] === "<") {
|
|
171
|
+
depth++;
|
|
172
|
+
i += 2;
|
|
173
|
+
} else if (str[i] === ">" && str[i + 1] === ">") {
|
|
174
|
+
depth--;
|
|
175
|
+
if (depth === 0)
|
|
176
|
+
return i;
|
|
177
|
+
i += 2;
|
|
178
|
+
} else {
|
|
179
|
+
i++;
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
return -1;
|
|
183
|
+
}
|
|
184
|
+
function findMatchingArrayEnd(str, startPos) {
|
|
185
|
+
let depth = 0;
|
|
186
|
+
let i = startPos;
|
|
187
|
+
while (i < str.length) {
|
|
188
|
+
if (str[i] === "(") {
|
|
189
|
+
i++;
|
|
190
|
+
while (i < str.length && str[i] !== ")") {
|
|
191
|
+
if (str[i] === "\\")
|
|
192
|
+
i++;
|
|
193
|
+
i++;
|
|
194
|
+
}
|
|
195
|
+
i++;
|
|
196
|
+
} else if (str[i] === "[") {
|
|
197
|
+
depth++;
|
|
198
|
+
i++;
|
|
199
|
+
} else if (str[i] === "]") {
|
|
200
|
+
depth--;
|
|
201
|
+
if (depth === 0)
|
|
202
|
+
return i;
|
|
203
|
+
i++;
|
|
204
|
+
} else {
|
|
205
|
+
i++;
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
return -1;
|
|
209
|
+
}
|
|
210
|
+
function parseResourcesDict(pageContent, pdfStr, objIndex) {
|
|
211
|
+
const result = {};
|
|
212
|
+
const inlineMatch = pageContent.match(/\/Resources\s*<</);
|
|
213
|
+
if (inlineMatch) {
|
|
214
|
+
const resIdx = pageContent.indexOf("/Resources");
|
|
215
|
+
const resStart = pageContent.indexOf("<<", resIdx);
|
|
216
|
+
const resEnd = findMatchingDictEnd(pageContent, resStart);
|
|
217
|
+
if (resEnd === -1) {
|
|
218
|
+
throw new Error("Cannot find end of Resources dictionary");
|
|
219
|
+
}
|
|
220
|
+
const resContent = pageContent.slice(resStart + 2, resEnd);
|
|
221
|
+
return parseResourceEntries(resContent);
|
|
222
|
+
}
|
|
223
|
+
const refMatch = pageContent.match(/\/Resources\s+(\d+)\s+\d+\s+R/);
|
|
224
|
+
if (refMatch) {
|
|
225
|
+
const objNum = parseInt(refMatch[1], 10);
|
|
226
|
+
const objContent = extractObjectDictContent(pdfStr, objNum, objIndex);
|
|
227
|
+
return parseResourceEntries(objContent);
|
|
228
|
+
}
|
|
229
|
+
return result;
|
|
230
|
+
}
|
|
231
|
+
function mergeResourcesDicts(existing, additions) {
|
|
232
|
+
const result = { ...existing };
|
|
233
|
+
for (const [resType, addValue] of Object.entries(additions)) {
|
|
234
|
+
if (!result[resType]) {
|
|
235
|
+
result[resType] = addValue;
|
|
236
|
+
continue;
|
|
237
|
+
}
|
|
238
|
+
const existingValue = result[resType];
|
|
239
|
+
if (existingValue.startsWith("[")) {
|
|
240
|
+
continue;
|
|
241
|
+
}
|
|
242
|
+
if (existingValue.startsWith("<<")) {
|
|
243
|
+
result[resType] = mergeDictEntries(existingValue, addValue);
|
|
244
|
+
} else {
|
|
245
|
+
throw new Error(`Unexpected resource format for ${resType}: ${existingValue}`);
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
return result;
|
|
249
|
+
}
|
|
250
|
+
function mergeDictEntries(existing, additions) {
|
|
251
|
+
const existingEntries = extractDictEntries(existing);
|
|
252
|
+
const additionEntries = extractDictEntries(additions);
|
|
253
|
+
const merged = { ...existingEntries, ...additionEntries };
|
|
254
|
+
const entries = Object.entries(merged).map(([name, ref]) => `${name} ${ref}`).join(" ");
|
|
255
|
+
return `<< ${entries} >>`;
|
|
256
|
+
}
|
|
257
|
+
function extractDictEntries(dict) {
|
|
258
|
+
const entries = {};
|
|
259
|
+
const inner = dict.replace(/^<<\s*/, "").replace(/\s*>>$/, "");
|
|
260
|
+
const regex = /(\/[^\s<>\[\]()\/]+)\s+(\d+\s+\d+\s+R)/g;
|
|
261
|
+
let match;
|
|
262
|
+
while ((match = regex.exec(inner)) !== null) {
|
|
263
|
+
entries[match[1]] = match[2];
|
|
264
|
+
}
|
|
265
|
+
return entries;
|
|
266
|
+
}
|
|
267
|
+
function parseResourceEntries(content) {
|
|
268
|
+
const result = {};
|
|
269
|
+
const resourceTypes = [
|
|
270
|
+
"/Font",
|
|
271
|
+
"/XObject",
|
|
272
|
+
"/ExtGState",
|
|
273
|
+
"/ColorSpace",
|
|
274
|
+
"/Pattern",
|
|
275
|
+
"/Shading",
|
|
276
|
+
"/ProcSet"
|
|
277
|
+
];
|
|
278
|
+
for (const resType of resourceTypes) {
|
|
279
|
+
const pattern = new RegExp(`${resType.replace(/\//g, "\\/")}\\s+([<\\[])`);
|
|
280
|
+
const match = content.match(pattern);
|
|
281
|
+
if (!match)
|
|
282
|
+
continue;
|
|
283
|
+
const idx = match.index;
|
|
284
|
+
let valueStart = idx + resType.length;
|
|
285
|
+
while (valueStart < content.length && /\s/.test(content[valueStart])) {
|
|
286
|
+
valueStart++;
|
|
287
|
+
}
|
|
288
|
+
if (content[valueStart] === "<" && content[valueStart + 1] === "<") {
|
|
289
|
+
const dictEnd = findMatchingDictEnd(content, valueStart);
|
|
290
|
+
if (dictEnd === -1) {
|
|
291
|
+
throw new Error(`Cannot find end of ${resType} dictionary`);
|
|
292
|
+
}
|
|
293
|
+
result[resType] = content.slice(valueStart, dictEnd + 2);
|
|
294
|
+
} else if (content[valueStart] === "[") {
|
|
295
|
+
const arrayEnd = findMatchingArrayEnd(content, valueStart);
|
|
296
|
+
if (arrayEnd === -1) {
|
|
297
|
+
throw new Error(`Cannot find end of ${resType} array`);
|
|
298
|
+
}
|
|
299
|
+
result[resType] = content.slice(valueStart, arrayEnd + 1);
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
return result;
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
export { parseTrailer, buildObjectIndex, extractObjectDictContent, getMediaBox, parsePdfStructure, parseResourcesDict, mergeResourcesDicts };
|
|
306
|
+
|
|
307
|
+
//# debugId=FDAB11AF6775D4B564756E2164756E21
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": 3,
|
|
3
|
+
"sources": ["../src/plugins/editing/parser.ts"],
|
|
4
|
+
"sourcesContent": [
|
|
5
|
+
"/**\n * Lightweight PDF structure parser for incremental editing\n *\n * Parses just enough of an existing PDF to enable incremental updates:\n * - Finds the cross-reference table via startxref\n * - Reads the trailer to get Root, Size, Info\n * - Follows Root -> Pages -> Kids to locate page objects\n * - Extracts MediaBox dimensions from pages\n *\n * All public functions accept a pre-decoded latin1 string (`pdfStr`) rather\n * than the raw Uint8Array so that the caller can decode ONCE and reuse the\n * string across many calls — avoiding O(pages) redundant allocations.\n */\n\n/**\n * Parsed PDF structure needed for incremental updates\n */\nexport type PdfStructure = {\n\txrefOffset: number;\n\trootNum: number;\n\tinfoNum: number | null;\n\tsize: number;\n\tpagesNum: number;\n\tpageNums: number[];\n\trootDictContent: string;\n\tpageDictContents: string[];\n\t/** Pre-built object position index for O(1) lookups */\n\tobjIndex: Map<number, number>;\n};\n\n/**\n * Find the byte offset recorded after the last `startxref` keyword\n */\nexport function findStartXref(pdfStr: string): number {\n\tconst idx = pdfStr.lastIndexOf(\"startxref\");\n\tif (idx === -1) throw new Error(\"Cannot find startxref in PDF\");\n\tconst after = pdfStr.slice(idx + 9).trim().split(/[\\r\\n\\s]/)[0];\n\treturn parseInt(after!, 10);\n}\n\n/**\n * Parse the trailer dictionary to extract Root, Size, Info, and Prev xref offset.\n *\n * Supports both traditional trailers (`trailer << ... >>`) and\n * cross-reference streams (PDF 1.5+) where the trailer entries live\n * inside the xref stream object dictionary.\n */\nexport function parseTrailer(pdfStr: string): {\n\troot: number;\n\tsize: number;\n\tinfo: number | null;\n\tprevXref: number;\n} {\n\tconst startxrefIdx = pdfStr.lastIndexOf(\"startxref\");\n\n\t// Try traditional trailer first\n\tconst trailerIdx = pdfStr.lastIndexOf(\"trailer\");\n\n\tlet dictStr: string;\n\n\tif (trailerIdx !== -1 && trailerIdx < startxrefIdx) {\n\t\t// Traditional trailer\n\t\tdictStr = pdfStr.slice(trailerIdx, startxrefIdx);\n\t} else {\n\t\t// Cross-reference stream (PDF 1.5+): startxref points to an object\n\t\t// whose dictionary contains the trailer entries (Root, Size, Info, etc.)\n\t\tconst xrefOffset = findStartXref(pdfStr);\n\t\tconst xrefObjStr = pdfStr.slice(xrefOffset, xrefOffset + 4096);\n\t\tconst dictStart = xrefObjStr.indexOf(\"<<\");\n\t\tif (dictStart === -1) {\n\t\t\tthrow new Error(\"Cannot find trailer or xref stream dictionary in PDF\");\n\t\t}\n\t\tconst dictEnd = findMatchingDictEnd(xrefObjStr, dictStart);\n\t\tif (dictEnd === -1) {\n\t\t\tthrow new Error(\"Cannot find end of xref stream dictionary\");\n\t\t}\n\t\tdictStr = xrefObjStr.slice(dictStart, dictEnd + 2);\n\t}\n\n\tconst rootMatch = dictStr.match(/\\/Root\\s+(\\d+)\\s+\\d+\\s+R/);\n\tif (!rootMatch) throw new Error(\"Cannot find Root ref in trailer\");\n\n\tconst sizeMatch = dictStr.match(/\\/Size\\s+(\\d+)/);\n\tif (!sizeMatch) throw new Error(\"Cannot find Size in trailer\");\n\n\tconst infoMatch = dictStr.match(/\\/Info\\s+(\\d+)\\s+\\d+\\s+R/);\n\tconst prevMatch = dictStr.match(/\\/Prev\\s+(\\d+)/);\n\n\treturn {\n\t\troot: parseInt(rootMatch[1]!, 10),\n\t\tsize: parseInt(sizeMatch[1]!, 10),\n\t\tinfo: infoMatch ? parseInt(infoMatch[1]!, 10) : null,\n\t\tprevXref: prevMatch ? parseInt(prevMatch[1]!, 10) : findStartXref(pdfStr),\n\t};\n}\n\n/**\n * Build an index mapping object numbers to their position (end of \"N 0 obj\" marker)\n * in a single O(fileSize) pass. Callers look up objects in O(1) instead of\n * scanning the entire string with a new regex per object.\n */\nexport function buildObjectIndex(pdfStr: string): Map<number, number> {\n\tconst index = new Map<number, number>();\n\tconst regex = /(?:^|\\s)(\\d+)\\s+0\\s+obj/gm;\n\tlet m: RegExpExecArray | null;\n\twhile ((m = regex.exec(pdfStr)) !== null) {\n\t\tconst objNum = parseInt(m[1]!, 10);\n\t\tif (!index.has(objNum)) {\n\t\t\tindex.set(objNum, m.index! + m[0].length);\n\t\t}\n\t}\n\treturn index;\n}\n\n/**\n * Extract the dictionary content (between outer << and >>) for a given object number.\n * Returns the content string without the delimiters.\n *\n * When called with a pre-built objIndex, lookups are O(1).\n * Falls back to a per-call regex search if no index is provided.\n */\nexport function extractObjectDictContent(\n\tpdfStr: string,\n\tobjNum: number,\n\tobjIndex?: Map<number, number>,\n): string {\n\tlet searchStart: number;\n\n\tif (objIndex) {\n\t\tconst pos = objIndex.get(objNum);\n\t\tif (pos === undefined) {\n\t\t\tthrow new Error(`Cannot find object ${objNum} in PDF`);\n\t\t}\n\t\tsearchStart = pos;\n\t} else {\n\t\tconst objRegex = new RegExp(`(?:^|\\\\s)${objNum}\\\\s+0\\\\s+obj`, \"m\");\n\t\tconst match = pdfStr.match(objRegex);\n\t\tif (!match || match.index === undefined) {\n\t\t\tthrow new Error(`Cannot find object ${objNum} in PDF`);\n\t\t}\n\t\tsearchStart = match.index + match[0].length;\n\t}\n\n\tconst dictStart = pdfStr.indexOf(\"<<\", searchStart);\n\tif (dictStart === -1 || dictStart > searchStart + 200) {\n\t\tthrow new Error(`Cannot find dictionary start for object ${objNum}`);\n\t}\n\n\tconst dictEnd = findMatchingDictEnd(pdfStr, dictStart);\n\tif (dictEnd === -1) {\n\t\tthrow new Error(`Cannot find dictionary end for object ${objNum}`);\n\t}\n\n\treturn pdfStr.slice(dictStart + 2, dictEnd);\n}\n\n/**\n * Find all LEAF page object numbers by following Root -> Pages -> Kids recursively.\n *\n * The PDF page tree can be arbitrarily deep — every ~100-200 pages, PDF tools\n * create intermediate Pages nodes (Type=Pages) that group their children.\n * This function recurses until it reaches actual Page leaves (Type=Page).\n */\nexport function findPageObjects(pdfStr: string, rootNum: number): number[] {\n\tconst rootContent = extractObjectDictContent(pdfStr, rootNum);\n\tconst pagesMatch = rootContent.match(/\\/Pages\\s+(\\d+)\\s+\\d+\\s+R/);\n\tif (!pagesMatch) throw new Error(\"Cannot find Pages ref in Root catalog\");\n\tconst pagesNum = parseInt(pagesMatch[1]!, 10);\n\n\treturn collectPageLeafs(pdfStr, pagesNum, new Set());\n}\n\n/** Index-accelerated version of findPageObjects */\nfunction findPageObjectsIndexed(\n\tpdfStr: string,\n\trootNum: number,\n\tobjIndex: Map<number, number>,\n): number[] {\n\tconst rootContent = extractObjectDictContent(pdfStr, rootNum, objIndex);\n\tconst pagesMatch = rootContent.match(/\\/Pages\\s+(\\d+)\\s+\\d+\\s+R/);\n\tif (!pagesMatch) throw new Error(\"Cannot find Pages ref in Root catalog\");\n\tconst pagesNum = parseInt(pagesMatch[1]!, 10);\n\n\treturn collectPageLeafsIndexed(pdfStr, pagesNum, new Set(), objIndex);\n}\n\n/**\n * Recursively collect leaf Page object numbers from a page tree node.\n * Handles both flat trees (all Kids are Pages) and nested trees\n * (intermediate Pages nodes with their own Kids arrays).\n */\nfunction collectPageLeafs(\n\tpdfStr: string,\n\tobjNum: number,\n\tvisited: Set<number>,\n): number[] {\n\tif (visited.has(objNum)) return []; // guard against malformed circular refs\n\tvisited.add(objNum);\n\n\tconst content = extractObjectDictContent(pdfStr, objNum);\n\n\t// Distinguish leaf Page from intermediate Pages node via /Type\n\tconst typeMatch = content.match(/\\/Type\\s+\\/(\\w+)/);\n\tif (typeMatch?.[1] === \"Page\") {\n\t\treturn [objNum];\n\t}\n\n\t// Intermediate Pages node — recurse into each kid\n\tconst kidsMatch = content.match(/\\/Kids\\s*\\[([^\\]]+)\\]/);\n\tif (!kidsMatch) {\n\t\t// Malformed node: no Kids and not a leaf — treat as single page (best effort)\n\t\treturn [objNum];\n\t}\n\n\tconst refs: number[] = [];\n\tconst refRegex = /(\\d+)\\s+\\d+\\s+R/g;\n\tlet m: RegExpExecArray | null;\n\twhile ((m = refRegex.exec(kidsMatch[1]!)) !== null) {\n\t\trefs.push(parseInt(m[1]!, 10));\n\t}\n\n\tconst pages: number[] = [];\n\tfor (const ref of refs) {\n\t\tpages.push(...collectPageLeafs(pdfStr, ref, visited));\n\t}\n\treturn pages;\n}\n\n/** Index-accelerated version of collectPageLeafs */\nfunction collectPageLeafsIndexed(\n\tpdfStr: string,\n\tobjNum: number,\n\tvisited: Set<number>,\n\tobjIndex: Map<number, number>,\n): number[] {\n\tif (visited.has(objNum)) return [];\n\tvisited.add(objNum);\n\n\tconst content = extractObjectDictContent(pdfStr, objNum, objIndex);\n\n\tconst typeMatch = content.match(/\\/Type\\s+\\/(\\w+)/);\n\tif (typeMatch?.[1] === \"Page\") {\n\t\treturn [objNum];\n\t}\n\n\tconst kidsMatch = content.match(/\\/Kids\\s*\\[([^\\]]+)\\]/);\n\tif (!kidsMatch) {\n\t\treturn [objNum];\n\t}\n\n\tconst refs: number[] = [];\n\tconst refRegex = /(\\d+)\\s+\\d+\\s+R/g;\n\tlet m: RegExpExecArray | null;\n\twhile ((m = refRegex.exec(kidsMatch[1]!)) !== null) {\n\t\trefs.push(parseInt(m[1]!, 10));\n\t}\n\n\tconst pages: number[] = [];\n\tfor (const ref of refs) {\n\t\tpages.push(...collectPageLeafsIndexed(pdfStr, ref, visited, objIndex));\n\t}\n\treturn pages;\n}\n\n/**\n * Get the MediaBox for a page object: [x1, y1, x2, y2].\n *\n * Per the PDF spec, /MediaBox is inherited: if the Page object itself does not\n * carry the entry, we walk up the /Parent chain until we find one.\n */\nexport function getMediaBox(\n\tpdfStr: string,\n\tpageObjNum: number,\n\tobjIndex?: Map<number, number>,\n): [number, number, number, number] {\n\tconst visited = new Set<number>(); // guard against malformed circular refs\n\tlet objNum: number | null = pageObjNum;\n\n\twhile (objNum !== null && !visited.has(objNum)) {\n\t\tvisited.add(objNum);\n\t\tconst content = extractObjectDictContent(pdfStr, objNum, objIndex);\n\n\t\tconst mediaBoxMatch = content.match(\n\t\t\t/\\/MediaBox\\s*\\[\\s*([\\d.]+)\\s+([\\d.]+)\\s+([\\d.]+)\\s+([\\d.]+)\\s*\\]/,\n\t\t);\n\t\tif (mediaBoxMatch) {\n\t\t\treturn [\n\t\t\t\tparseFloat(mediaBoxMatch[1]!),\n\t\t\t\tparseFloat(mediaBoxMatch[2]!),\n\t\t\t\tparseFloat(mediaBoxMatch[3]!),\n\t\t\t\tparseFloat(mediaBoxMatch[4]!),\n\t\t\t];\n\t\t}\n\n\t\t// Walk up the parent chain\n\t\tconst parentMatch = content.match(/\\/Parent\\s+(\\d+)\\s+\\d+\\s+R/);\n\t\tobjNum = parentMatch ? parseInt(parentMatch[1]!, 10) : null;\n\t}\n\n\tthrow new Error(`Cannot find MediaBox for page object ${pageObjNum}`);\n}\n\n/**\n * Parse the full PDF structure needed for incremental editing.\n *\n * @param pdfStr - The PDF file decoded as a latin1 string (decode ONCE and pass here)\n */\nexport function parsePdfStructure(pdfStr: string): PdfStructure {\n\tconst xrefOffset = findStartXref(pdfStr);\n\tconst trailer = parseTrailer(pdfStr);\n\n\t// Build object position index once — all subsequent lookups are O(1)\n\tconst objIndex = buildObjectIndex(pdfStr);\n\n\tconst rootContent = extractObjectDictContent(pdfStr, trailer.root, objIndex);\n\tconst pagesMatch = rootContent.match(/\\/Pages\\s+(\\d+)\\s+\\d+\\s+R/);\n\tif (!pagesMatch) throw new Error(\"Cannot find Pages ref in Root catalog\");\n\tconst pagesNum = parseInt(pagesMatch[1]!, 10);\n\n\tconst pageNums = findPageObjectsIndexed(pdfStr, trailer.root, objIndex);\n\tconst pageDictContents = pageNums.map((pn) =>\n\t\textractObjectDictContent(pdfStr, pn, objIndex),\n\t);\n\n\treturn {\n\t\txrefOffset,\n\t\trootNum: trailer.root,\n\t\tinfoNum: trailer.info,\n\t\tsize: trailer.size,\n\t\tpagesNum,\n\t\tpageNums,\n\t\trootDictContent: rootContent,\n\t\tpageDictContents,\n\t\tobjIndex,\n\t};\n}\n\n/**\n * Find the position of the >> that closes the dictionary starting at startPos.\n * Handles nested << >> and skips PDF string literals in parentheses.\n */\nfunction findMatchingDictEnd(str: string, startPos: number): number {\n\tlet depth = 0;\n\tlet i = startPos;\n\n\twhile (i < str.length - 1) {\n\t\tif (str[i] === \"(\") {\n\t\t\t// skip parenthesized string\n\t\t\ti++;\n\t\t\twhile (i < str.length && str[i] !== \")\") {\n\t\t\t\tif (str[i] === \"\\\\\") i++;\n\t\t\t\ti++;\n\t\t\t}\n\t\t\ti++; // skip ')'\n\t\t} else if (str[i] === \"<\" && str[i + 1] === \"<\") {\n\t\t\tdepth++;\n\t\t\ti += 2;\n\t\t} else if (str[i] === \">\" && str[i + 1] === \">\") {\n\t\t\tdepth--;\n\t\t\tif (depth === 0) return i;\n\t\t\ti += 2;\n\t\t} else {\n\t\t\ti++;\n\t\t}\n\t}\n\n\treturn -1;\n}\n\n/**\n * Find the position of the ] that closes the array starting at startPos.\n * Handles nested [ ] and skips PDF string literals in parentheses.\n */\nfunction findMatchingArrayEnd(str: string, startPos: number): number {\n\tlet depth = 0;\n\tlet i = startPos;\n\n\twhile (i < str.length) {\n\t\tif (str[i] === \"(\") {\n\t\t\t// skip parenthesized string\n\t\t\ti++;\n\t\t\twhile (i < str.length && str[i] !== \")\") {\n\t\t\t\tif (str[i] === \"\\\\\") i++;\n\t\t\t\ti++;\n\t\t\t}\n\t\t\ti++; // skip ')'\n\t\t} else if (str[i] === \"[\") {\n\t\t\tdepth++;\n\t\t\ti++;\n\t\t} else if (str[i] === \"]\") {\n\t\t\tdepth--;\n\t\t\tif (depth === 0) return i;\n\t\t\ti++;\n\t\t} else {\n\t\t\ti++;\n\t\t}\n\t}\n\n\treturn -1;\n}\n\n/**\n * Parse a Resources dictionary from page content, handling both inline\n * dictionaries and indirect references.\n *\n * Returns a map of resource type names to their dictionary/array content strings.\n * Example: { \"/Font\": \"<< /F1 10 0 R >>\", \"/ProcSet\": \"[/PDF /Text]\" }\n */\nexport function parseResourcesDict(\n\tpageContent: string,\n\tpdfStr: string,\n\tobjIndex?: Map<number, number>,\n): Record<string, string> {\n\tconst result: Record<string, string> = {};\n\n\t// Check for inline Resources dictionary\n\tconst inlineMatch = pageContent.match(/\\/Resources\\s*<</);\n\tif (inlineMatch) {\n\t\tconst resIdx = pageContent.indexOf(\"/Resources\");\n\t\tconst resStart = pageContent.indexOf(\"<<\", resIdx);\n\t\tconst resEnd = findMatchingDictEnd(pageContent, resStart);\n\n\t\tif (resEnd === -1) {\n\t\t\tthrow new Error(\"Cannot find end of Resources dictionary\");\n\t\t}\n\n\t\tconst resContent = pageContent.slice(resStart + 2, resEnd);\n\t\treturn parseResourceEntries(resContent);\n\t}\n\n\t// Check for indirect Resources reference\n\tconst refMatch = pageContent.match(/\\/Resources\\s+(\\d+)\\s+\\d+\\s+R/);\n\tif (refMatch) {\n\t\tconst objNum = parseInt(refMatch[1]!, 10);\n\t\tconst objContent = extractObjectDictContent(pdfStr, objNum, objIndex);\n\t\treturn parseResourceEntries(objContent);\n\t}\n\n\t// No Resources found\n\treturn result;\n}\n\n/**\n * Merge two Resources dictionaries, combining entries from both.\n *\n * For dictionary-type entries like /Font, /XObject, extracts individual\n * name-reference pairs and combines them. For array-type entries like\n * /ProcSet, uses the existing value (no merge needed).\n *\n * @param existing - Parsed Resources from original page\n * @param additions - New Resources to add (from signature appearance)\n * @returns Merged Resources dictionary entries\n */\nexport function mergeResourcesDicts(\n\texisting: Record<string, string>,\n\tadditions: Record<string, string>,\n): Record<string, string> {\n\tconst result = { ...existing };\n\n\tfor (const [resType, addValue] of Object.entries(additions)) {\n\t\tif (!result[resType]) {\n\t\t\t// No existing entry for this type, just add it\n\t\t\tresult[resType] = addValue;\n\t\t\tcontinue;\n\t\t}\n\n\t\tconst existingValue = result[resType]!;\n\n\t\t// Arrays (like /ProcSet) - keep existing, don't merge\n\t\tif (existingValue.startsWith(\"[\")) {\n\t\t\tcontinue;\n\t\t}\n\n\t\t// Dictionaries - merge entries\n\t\tif (existingValue.startsWith(\"<<\")) {\n\t\t\tresult[resType] = mergeDictEntries(existingValue, addValue);\n\t\t} else {\n\t\t\tthrow new Error(`Unexpected resource format for ${resType}: ${existingValue}`);\n\t\t}\n\t}\n\n\treturn result;\n}\n\n/**\n * Merge two PDF dictionary strings by combining their name-reference pairs.\n *\n * Example:\n * existing: \"<< /F1 10 0 R /F2 11 0 R >>\"\n * additions: \"<< /SigF1 20 0 R >>\"\n * result: \"<< /F1 10 0 R /F2 11 0 R /SigF1 20 0 R >>\"\n */\nfunction mergeDictEntries(existing: string, additions: string): string {\n\t// Extract entries from both dictionaries\n\tconst existingEntries = extractDictEntries(existing);\n\tconst additionEntries = extractDictEntries(additions);\n\n\t// Combine (additions override existing if same key)\n\tconst merged = { ...existingEntries, ...additionEntries };\n\n\t// Rebuild dictionary string\n\tconst entries = Object.entries(merged)\n\t\t.map(([name, ref]) => `${name} ${ref}`)\n\t\t.join(\" \");\n\n\treturn `<< ${entries} >>`;\n}\n\n/**\n * Extract name-reference pairs from a PDF dictionary string.\n *\n * Example: \"<< /F1 10 0 R /F2 11 0 R >>\"\n * Returns: { \"/F1\": \"10 0 R\", \"/F2\": \"11 0 R\" }\n */\nfunction extractDictEntries(dict: string): Record<string, string> {\n\tconst entries: Record<string, string> = {};\n\n\t// Remove outer << >>\n\tconst inner = dict.replace(/^<<\\s*/, \"\").replace(/\\s*>>$/, \"\");\n\n\t// Match /Name objNum gen R patterns\n\t// Pattern matches PDF names with hyphens, dots, and hex-encoded characters\n\tconst regex = /(\\/[^\\s<>\\[\\]()\\/]+)\\s+(\\d+\\s+\\d+\\s+R)/g;\n\tlet match: RegExpExecArray | null;\n\n\twhile ((match = regex.exec(inner)) !== null) {\n\t\tentries[match[1]!] = match[2]!;\n\t}\n\n\treturn entries;\n}\n\n/**\n * Parse individual resource entries from a Resources dictionary content string.\n *\n * Extracts top-level entries like /Font, /XObject, /ExtGState, etc.\n */\nfunction parseResourceEntries(content: string): Record<string, string> {\n\tconst result: Record<string, string> = {};\n\n\t// Resource entry names to extract\n\tconst resourceTypes = [\n\t\t\"/Font\",\n\t\t\"/XObject\",\n\t\t\"/ExtGState\",\n\t\t\"/ColorSpace\",\n\t\t\"/Pattern\",\n\t\t\"/Shading\",\n\t\t\"/ProcSet\",\n\t];\n\n\tfor (const resType of resourceTypes) {\n\t\t// Use regex to match resource type at dictionary level (not nested inside values)\n\t\t// Pattern: /ResourceType followed by whitespace and then either << or [\n\t\tconst pattern = new RegExp(\n\t\t\t`${resType.replace(/\\//g, \"\\\\/\")}\\\\s+([<\\\\[])`\n\t\t);\n\t\tconst match = content.match(pattern);\n\n\t\tif (!match) continue;\n\n\t\tconst idx = match.index!;\n\n\t\t// Find the value (either << dict >> or [ array ])\n\t\tlet valueStart = idx + resType.length;\n\t\twhile (valueStart < content.length && /\\s/.test(content[valueStart]!)) {\n\t\t\tvalueStart++;\n\t\t}\n\n\t\tif (content[valueStart] === \"<\" && content[valueStart + 1] === \"<\") {\n\t\t\t// Dictionary value\n\t\t\tconst dictEnd = findMatchingDictEnd(content, valueStart);\n\t\t\tif (dictEnd === -1) {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`Cannot find end of ${resType} dictionary`\n\t\t\t\t);\n\t\t\t}\n\t\t\tresult[resType] = content.slice(valueStart, dictEnd + 2);\n\t\t} else if (content[valueStart] === \"[\") {\n\t\t\t// Array value\n\t\t\tconst arrayEnd = findMatchingArrayEnd(content, valueStart);\n\t\t\tif (arrayEnd === -1) {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`Cannot find end of ${resType} array`\n\t\t\t\t);\n\t\t\t}\n\t\t\tresult[resType] = content.slice(valueStart, arrayEnd + 1);\n\t\t}\n\t}\n\n\treturn result;\n}\n"
|
|
6
|
+
],
|
|
7
|
+
"mappings": ";;AAiCO,SAAS,aAAa,CAAC,QAAwB;AAAA,EACrD,MAAM,MAAM,OAAO,YAAY,WAAW;AAAA,EAC1C,IAAI,QAAQ;AAAA,IAAI,MAAM,IAAI,MAAM,8BAA8B;AAAA,EAC9D,MAAM,QAAQ,OAAO,MAAM,MAAM,CAAC,EAAE,KAAK,EAAE,MAAM,UAAU,EAAE;AAAA,EAC7D,OAAO,SAAS,OAAQ,EAAE;AAAA;AAUpB,SAAS,YAAY,CAAC,QAK3B;AAAA,EACD,MAAM,eAAe,OAAO,YAAY,WAAW;AAAA,EAGnD,MAAM,aAAa,OAAO,YAAY,SAAS;AAAA,EAE/C,IAAI;AAAA,EAEJ,IAAI,eAAe,MAAM,aAAa,cAAc;AAAA,IAEnD,UAAU,OAAO,MAAM,YAAY,YAAY;AAAA,EAChD,EAAO;AAAA,IAGN,MAAM,aAAa,cAAc,MAAM;AAAA,IACvC,MAAM,aAAa,OAAO,MAAM,YAAY,aAAa,IAAI;AAAA,IAC7D,MAAM,YAAY,WAAW,QAAQ,IAAI;AAAA,IACzC,IAAI,cAAc,IAAI;AAAA,MACrB,MAAM,IAAI,MAAM,sDAAsD;AAAA,IACvE;AAAA,IACA,MAAM,UAAU,oBAAoB,YAAY,SAAS;AAAA,IACzD,IAAI,YAAY,IAAI;AAAA,MACnB,MAAM,IAAI,MAAM,2CAA2C;AAAA,IAC5D;AAAA,IACA,UAAU,WAAW,MAAM,WAAW,UAAU,CAAC;AAAA;AAAA,EAGlD,MAAM,YAAY,QAAQ,MAAM,0BAA0B;AAAA,EAC1D,IAAI,CAAC;AAAA,IAAW,MAAM,IAAI,MAAM,iCAAiC;AAAA,EAEjE,MAAM,YAAY,QAAQ,MAAM,gBAAgB;AAAA,EAChD,IAAI,CAAC;AAAA,IAAW,MAAM,IAAI,MAAM,6BAA6B;AAAA,EAE7D,MAAM,YAAY,QAAQ,MAAM,0BAA0B;AAAA,EAC1D,MAAM,YAAY,QAAQ,MAAM,gBAAgB;AAAA,EAEhD,OAAO;AAAA,IACN,MAAM,SAAS,UAAU,IAAK,EAAE;AAAA,IAChC,MAAM,SAAS,UAAU,IAAK,EAAE;AAAA,IAChC,MAAM,YAAY,SAAS,UAAU,IAAK,EAAE,IAAI;AAAA,IAChD,UAAU,YAAY,SAAS,UAAU,IAAK,EAAE,IAAI,cAAc,MAAM;AAAA,EACzE;AAAA;AAQM,SAAS,gBAAgB,CAAC,QAAqC;AAAA,EACrE,MAAM,QAAQ,IAAI;AAAA,EAClB,MAAM,QAAQ;AAAA,EACd,IAAI;AAAA,EACJ,QAAQ,IAAI,MAAM,KAAK,MAAM,OAAO,MAAM;AAAA,IACzC,MAAM,SAAS,SAAS,EAAE,IAAK,EAAE;AAAA,IACjC,IAAI,CAAC,MAAM,IAAI,MAAM,GAAG;AAAA,MACvB,MAAM,IAAI,QAAQ,EAAE,QAAS,EAAE,GAAG,MAAM;AAAA,IACzC;AAAA,EACD;AAAA,EACA,OAAO;AAAA;AAUD,SAAS,wBAAwB,CACvC,QACA,QACA,UACS;AAAA,EACT,IAAI;AAAA,EAEJ,IAAI,UAAU;AAAA,IACb,MAAM,MAAM,SAAS,IAAI,MAAM;AAAA,IAC/B,IAAI,QAAQ,WAAW;AAAA,MACtB,MAAM,IAAI,MAAM,sBAAsB,eAAe;AAAA,IACtD;AAAA,IACA,cAAc;AAAA,EACf,EAAO;AAAA,IACN,MAAM,WAAW,IAAI,OAAO,YAAY,sBAAsB,GAAG;AAAA,IACjE,MAAM,QAAQ,OAAO,MAAM,QAAQ;AAAA,IACnC,IAAI,CAAC,SAAS,MAAM,UAAU,WAAW;AAAA,MACxC,MAAM,IAAI,MAAM,sBAAsB,eAAe;AAAA,IACtD;AAAA,IACA,cAAc,MAAM,QAAQ,MAAM,GAAG;AAAA;AAAA,EAGtC,MAAM,YAAY,OAAO,QAAQ,MAAM,WAAW;AAAA,EAClD,IAAI,cAAc,MAAM,YAAY,cAAc,KAAK;AAAA,IACtD,MAAM,IAAI,MAAM,2CAA2C,QAAQ;AAAA,EACpE;AAAA,EAEA,MAAM,UAAU,oBAAoB,QAAQ,SAAS;AAAA,EACrD,IAAI,YAAY,IAAI;AAAA,IACnB,MAAM,IAAI,MAAM,yCAAyC,QAAQ;AAAA,EAClE;AAAA,EAEA,OAAO,OAAO,MAAM,YAAY,GAAG,OAAO;AAAA;AAoB3C,SAAS,sBAAsB,CAC9B,QACA,SACA,UACW;AAAA,EACX,MAAM,cAAc,yBAAyB,QAAQ,SAAS,QAAQ;AAAA,EACtE,MAAM,aAAa,YAAY,MAAM,2BAA2B;AAAA,EAChE,IAAI,CAAC;AAAA,IAAY,MAAM,IAAI,MAAM,uCAAuC;AAAA,EACxE,MAAM,WAAW,SAAS,WAAW,IAAK,EAAE;AAAA,EAE5C,OAAO,wBAAwB,QAAQ,UAAU,IAAI,KAAO,QAAQ;AAAA;AA8CrE,SAAS,uBAAuB,CAC/B,QACA,QACA,SACA,UACW;AAAA,EACX,IAAI,QAAQ,IAAI,MAAM;AAAA,IAAG,OAAO,CAAC;AAAA,EACjC,QAAQ,IAAI,MAAM;AAAA,EAElB,MAAM,UAAU,yBAAyB,QAAQ,QAAQ,QAAQ;AAAA,EAEjE,MAAM,YAAY,QAAQ,MAAM,kBAAkB;AAAA,EAClD,IAAI,YAAY,OAAO,QAAQ;AAAA,IAC9B,OAAO,CAAC,MAAM;AAAA,EACf;AAAA,EAEA,MAAM,YAAY,QAAQ,MAAM,uBAAuB;AAAA,EACvD,IAAI,CAAC,WAAW;AAAA,IACf,OAAO,CAAC,MAAM;AAAA,EACf;AAAA,EAEA,MAAM,OAAiB,CAAC;AAAA,EACxB,MAAM,WAAW;AAAA,EACjB,IAAI;AAAA,EACJ,QAAQ,IAAI,SAAS,KAAK,UAAU,EAAG,OAAO,MAAM;AAAA,IACnD,KAAK,KAAK,SAAS,EAAE,IAAK,EAAE,CAAC;AAAA,EAC9B;AAAA,EAEA,MAAM,QAAkB,CAAC;AAAA,EACzB,WAAW,OAAO,MAAM;AAAA,IACvB,MAAM,KAAK,GAAG,wBAAwB,QAAQ,KAAK,SAAS,QAAQ,CAAC;AAAA,EACtE;AAAA,EACA,OAAO;AAAA;AASD,SAAS,WAAW,CAC1B,QACA,YACA,UACmC;AAAA,EACnC,MAAM,UAAU,IAAI;AAAA,EACpB,IAAI,SAAwB;AAAA,EAE5B,OAAO,WAAW,QAAQ,CAAC,QAAQ,IAAI,MAAM,GAAG;AAAA,IAC/C,QAAQ,IAAI,MAAM;AAAA,IAClB,MAAM,UAAU,yBAAyB,QAAQ,QAAQ,QAAQ;AAAA,IAEjE,MAAM,gBAAgB,QAAQ,MAC7B,kEACD;AAAA,IACA,IAAI,eAAe;AAAA,MAClB,OAAO;AAAA,QACN,WAAW,cAAc,EAAG;AAAA,QAC5B,WAAW,cAAc,EAAG;AAAA,QAC5B,WAAW,cAAc,EAAG;AAAA,QAC5B,WAAW,cAAc,EAAG;AAAA,MAC7B;AAAA,IACD;AAAA,IAGA,MAAM,cAAc,QAAQ,MAAM,4BAA4B;AAAA,IAC9D,SAAS,cAAc,SAAS,YAAY,IAAK,EAAE,IAAI;AAAA,EACxD;AAAA,EAEA,MAAM,IAAI,MAAM,wCAAwC,YAAY;AAAA;AAQ9D,SAAS,iBAAiB,CAAC,QAA8B;AAAA,EAC/D,MAAM,aAAa,cAAc,MAAM;AAAA,EACvC,MAAM,UAAU,aAAa,MAAM;AAAA,EAGnC,MAAM,WAAW,iBAAiB,MAAM;AAAA,EAExC,MAAM,cAAc,yBAAyB,QAAQ,QAAQ,MAAM,QAAQ;AAAA,EAC3E,MAAM,aAAa,YAAY,MAAM,2BAA2B;AAAA,EAChE,IAAI,CAAC;AAAA,IAAY,MAAM,IAAI,MAAM,uCAAuC;AAAA,EACxE,MAAM,WAAW,SAAS,WAAW,IAAK,EAAE;AAAA,EAE5C,MAAM,WAAW,uBAAuB,QAAQ,QAAQ,MAAM,QAAQ;AAAA,EACtE,MAAM,mBAAmB,SAAS,IAAI,CAAC,OACtC,yBAAyB,QAAQ,IAAI,QAAQ,CAC9C;AAAA,EAEA,OAAO;AAAA,IACN;AAAA,IACA,SAAS,QAAQ;AAAA,IACjB,SAAS,QAAQ;AAAA,IACjB,MAAM,QAAQ;AAAA,IACd;AAAA,IACA;AAAA,IACA,iBAAiB;AAAA,IACjB;AAAA,IACA;AAAA,EACD;AAAA;AAOD,SAAS,mBAAmB,CAAC,KAAa,UAA0B;AAAA,EACnE,IAAI,QAAQ;AAAA,EACZ,IAAI,IAAI;AAAA,EAER,OAAO,IAAI,IAAI,SAAS,GAAG;AAAA,IAC1B,IAAI,IAAI,OAAO,KAAK;AAAA,MAEnB;AAAA,MACA,OAAO,IAAI,IAAI,UAAU,IAAI,OAAO,KAAK;AAAA,QACxC,IAAI,IAAI,OAAO;AAAA,UAAM;AAAA,QACrB;AAAA,MACD;AAAA,MACA;AAAA,IACD,EAAO,SAAI,IAAI,OAAO,OAAO,IAAI,IAAI,OAAO,KAAK;AAAA,MAChD;AAAA,MACA,KAAK;AAAA,IACN,EAAO,SAAI,IAAI,OAAO,OAAO,IAAI,IAAI,OAAO,KAAK;AAAA,MAChD;AAAA,MACA,IAAI,UAAU;AAAA,QAAG,OAAO;AAAA,MACxB,KAAK;AAAA,IACN,EAAO;AAAA,MACN;AAAA;AAAA,EAEF;AAAA,EAEA,OAAO;AAAA;AAOR,SAAS,oBAAoB,CAAC,KAAa,UAA0B;AAAA,EACpE,IAAI,QAAQ;AAAA,EACZ,IAAI,IAAI;AAAA,EAER,OAAO,IAAI,IAAI,QAAQ;AAAA,IACtB,IAAI,IAAI,OAAO,KAAK;AAAA,MAEnB;AAAA,MACA,OAAO,IAAI,IAAI,UAAU,IAAI,OAAO,KAAK;AAAA,QACxC,IAAI,IAAI,OAAO;AAAA,UAAM;AAAA,QACrB;AAAA,MACD;AAAA,MACA;AAAA,IACD,EAAO,SAAI,IAAI,OAAO,KAAK;AAAA,MAC1B;AAAA,MACA;AAAA,IACD,EAAO,SAAI,IAAI,OAAO,KAAK;AAAA,MAC1B;AAAA,MACA,IAAI,UAAU;AAAA,QAAG,OAAO;AAAA,MACxB;AAAA,IACD,EAAO;AAAA,MACN;AAAA;AAAA,EAEF;AAAA,EAEA,OAAO;AAAA;AAUD,SAAS,kBAAkB,CACjC,aACA,QACA,UACyB;AAAA,EACzB,MAAM,SAAiC,CAAC;AAAA,EAGxC,MAAM,cAAc,YAAY,MAAM,kBAAkB;AAAA,EACxD,IAAI,aAAa;AAAA,IAChB,MAAM,SAAS,YAAY,QAAQ,YAAY;AAAA,IAC/C,MAAM,WAAW,YAAY,QAAQ,MAAM,MAAM;AAAA,IACjD,MAAM,SAAS,oBAAoB,aAAa,QAAQ;AAAA,IAExD,IAAI,WAAW,IAAI;AAAA,MAClB,MAAM,IAAI,MAAM,yCAAyC;AAAA,IAC1D;AAAA,IAEA,MAAM,aAAa,YAAY,MAAM,WAAW,GAAG,MAAM;AAAA,IACzD,OAAO,qBAAqB,UAAU;AAAA,EACvC;AAAA,EAGA,MAAM,WAAW,YAAY,MAAM,+BAA+B;AAAA,EAClE,IAAI,UAAU;AAAA,IACb,MAAM,SAAS,SAAS,SAAS,IAAK,EAAE;AAAA,IACxC,MAAM,aAAa,yBAAyB,QAAQ,QAAQ,QAAQ;AAAA,IACpE,OAAO,qBAAqB,UAAU;AAAA,EACvC;AAAA,EAGA,OAAO;AAAA;AAcD,SAAS,mBAAmB,CAClC,UACA,WACyB;AAAA,EACzB,MAAM,SAAS,KAAK,SAAS;AAAA,EAE7B,YAAY,SAAS,aAAa,OAAO,QAAQ,SAAS,GAAG;AAAA,IAC5D,IAAI,CAAC,OAAO,UAAU;AAAA,MAErB,OAAO,WAAW;AAAA,MAClB;AAAA,IACD;AAAA,IAEA,MAAM,gBAAgB,OAAO;AAAA,IAG7B,IAAI,cAAc,WAAW,GAAG,GAAG;AAAA,MAClC;AAAA,IACD;AAAA,IAGA,IAAI,cAAc,WAAW,IAAI,GAAG;AAAA,MACnC,OAAO,WAAW,iBAAiB,eAAe,QAAQ;AAAA,IAC3D,EAAO;AAAA,MACN,MAAM,IAAI,MAAM,kCAAkC,YAAY,eAAe;AAAA;AAAA,EAE/E;AAAA,EAEA,OAAO;AAAA;AAWR,SAAS,gBAAgB,CAAC,UAAkB,WAA2B;AAAA,EAEtE,MAAM,kBAAkB,mBAAmB,QAAQ;AAAA,EACnD,MAAM,kBAAkB,mBAAmB,SAAS;AAAA,EAGpD,MAAM,SAAS,KAAK,oBAAoB,gBAAgB;AAAA,EAGxD,MAAM,UAAU,OAAO,QAAQ,MAAM,EACnC,IAAI,EAAE,MAAM,SAAS,GAAG,QAAQ,KAAK,EACrC,KAAK,GAAG;AAAA,EAEV,OAAO,MAAM;AAAA;AASd,SAAS,kBAAkB,CAAC,MAAsC;AAAA,EACjE,MAAM,UAAkC,CAAC;AAAA,EAGzC,MAAM,QAAQ,KAAK,QAAQ,UAAU,EAAE,EAAE,QAAQ,UAAU,EAAE;AAAA,EAI7D,MAAM,QAAQ;AAAA,EACd,IAAI;AAAA,EAEJ,QAAQ,QAAQ,MAAM,KAAK,KAAK,OAAO,MAAM;AAAA,IAC5C,QAAQ,MAAM,MAAO,MAAM;AAAA,EAC5B;AAAA,EAEA,OAAO;AAAA;AAQR,SAAS,oBAAoB,CAAC,SAAyC;AAAA,EACtE,MAAM,SAAiC,CAAC;AAAA,EAGxC,MAAM,gBAAgB;AAAA,IACrB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACD;AAAA,EAEA,WAAW,WAAW,eAAe;AAAA,IAGpC,MAAM,UAAU,IAAI,OACnB,GAAG,QAAQ,QAAQ,OAAO,KAAK,eAChC;AAAA,IACA,MAAM,QAAQ,QAAQ,MAAM,OAAO;AAAA,IAEnC,IAAI,CAAC;AAAA,MAAO;AAAA,IAEZ,MAAM,MAAM,MAAM;AAAA,IAGlB,IAAI,aAAa,MAAM,QAAQ;AAAA,IAC/B,OAAO,aAAa,QAAQ,UAAU,KAAK,KAAK,QAAQ,WAAY,GAAG;AAAA,MACtE;AAAA,IACD;AAAA,IAEA,IAAI,QAAQ,gBAAgB,OAAO,QAAQ,aAAa,OAAO,KAAK;AAAA,MAEnE,MAAM,UAAU,oBAAoB,SAAS,UAAU;AAAA,MACvD,IAAI,YAAY,IAAI;AAAA,QACnB,MAAM,IAAI,MACT,sBAAsB,oBACvB;AAAA,MACD;AAAA,MACA,OAAO,WAAW,QAAQ,MAAM,YAAY,UAAU,CAAC;AAAA,IACxD,EAAO,SAAI,QAAQ,gBAAgB,KAAK;AAAA,MAEvC,MAAM,WAAW,qBAAqB,SAAS,UAAU;AAAA,MACzD,IAAI,aAAa,IAAI;AAAA,QACpB,MAAM,IAAI,MACT,sBAAsB,eACvB;AAAA,MACD;AAAA,MACA,OAAO,WAAW,QAAQ,MAAM,YAAY,WAAW,CAAC;AAAA,IACzD;AAAA,EACD;AAAA,EAEA,OAAO;AAAA;",
|
|
8
|
+
"debugId": "FDAB11AF6775D4B564756E2164756E21",
|
|
9
|
+
"names": []
|
|
10
|
+
}
|