@vaadin-component-factory/vcf-pdf-viewer 0.9.0 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/package.json +42 -26
- package/{src/display → pdfjs/dist}/display_utils.js +344 -139
- package/{src/display → pdfjs/dist}/fetch_stream.js +115 -97
- package/pdfjs/dist/l10n_utils.js +140 -0
- package/{src/shared → pdfjs/dist}/message_handler.js +243 -259
- package/{src/display → pdfjs/dist}/network.js +149 -87
- package/{src/display/content_disposition.js → pdfjs/dist/network_utils.js} +167 -55
- package/{src/display → pdfjs/dist}/node_stream.js +133 -98
- package/pdfjs/dist/pdf.js +12778 -0
- package/pdfjs/dist/pdf_link_service.js +638 -0
- package/pdfjs/dist/pdf_rendering_queue.js +199 -0
- package/pdfjs/dist/pdf_thumbnail_viewer.js +819 -0
- package/pdfjs/dist/pdf_viewer.js +3598 -0
- package/pdfjs/dist/ui_utils.js +1033 -0
- package/{src/shared → pdfjs/dist}/util.js +301 -287
- package/pdfjs/dist/worker.js +62813 -0
- package/src/vcf-pdf-viewer.js +98 -46
- package/theme/lumo/vcf-pdf-viewer-styles.js +4 -4
- package/theme/material/vcf-pdf-viewer-styles.js +4 -4
- package/theme/material/vcf-pdf-viewer.js +2 -2
- package/src/core/.eslintrc +0 -13
- package/src/core/annotation.js +0 -2948
- package/src/core/arithmetic_decoder.js +0 -182
- package/src/core/ascii_85_stream.js +0 -98
- package/src/core/ascii_hex_stream.js +0 -79
- package/src/core/base_stream.js +0 -110
- package/src/core/bidi.js +0 -438
- package/src/core/calibri_factors.js +0 -308
- package/src/core/catalog.js +0 -1459
- package/src/core/ccitt.js +0 -1062
- package/src/core/ccitt_stream.js +0 -60
- package/src/core/cff_font.js +0 -116
- package/src/core/cff_parser.js +0 -1949
- package/src/core/charsets.js +0 -119
- package/src/core/chunked_stream.js +0 -557
- package/src/core/cmap.js +0 -1039
- package/src/core/colorspace.js +0 -1533
- package/src/core/core_utils.js +0 -464
- package/src/core/crypto.js +0 -1900
- package/src/core/decode_stream.js +0 -170
- package/src/core/decrypt_stream.js +0 -59
- package/src/core/default_appearance.js +0 -99
- package/src/core/document.js +0 -1456
- package/src/core/encodings.js +0 -301
- package/src/core/evaluator.js +0 -4601
- package/src/core/file_spec.js +0 -108
- package/src/core/flate_stream.js +0 -402
- package/src/core/font_renderer.js +0 -882
- package/src/core/fonts.js +0 -3260
- package/src/core/fonts_utils.js +0 -221
- package/src/core/function.js +0 -1257
- package/src/core/glyf.js +0 -706
- package/src/core/glyphlist.js +0 -4558
- package/src/core/helvetica_factors.js +0 -353
- package/src/core/image.js +0 -802
- package/src/core/image_utils.js +0 -291
- package/src/core/jbig2.js +0 -2572
- package/src/core/jbig2_stream.js +0 -73
- package/src/core/jpeg_stream.js +0 -105
- package/src/core/jpg.js +0 -1416
- package/src/core/jpx.js +0 -2343
- package/src/core/jpx_stream.js +0 -87
- package/src/core/liberationsans_widths.js +0 -221
- package/src/core/lzw_stream.js +0 -150
- package/src/core/metadata_parser.js +0 -146
- package/src/core/metrics.js +0 -2970
- package/src/core/murmurhash3.js +0 -139
- package/src/core/myriadpro_factors.js +0 -290
- package/src/core/name_number_tree.js +0 -153
- package/src/core/object_loader.js +0 -149
- package/src/core/opentype_file_builder.js +0 -154
- package/src/core/operator_list.js +0 -734
- package/src/core/parser.js +0 -1416
- package/src/core/pattern.js +0 -985
- package/src/core/pdf_manager.js +0 -217
- package/src/core/predictor_stream.js +0 -238
- package/src/core/primitives.js +0 -402
- package/src/core/ps_parser.js +0 -272
- package/src/core/run_length_stream.js +0 -61
- package/src/core/segoeui_factors.js +0 -308
- package/src/core/standard_fonts.js +0 -817
- package/src/core/stream.js +0 -103
- package/src/core/struct_tree.js +0 -335
- package/src/core/to_unicode_map.js +0 -103
- package/src/core/type1_font.js +0 -421
- package/src/core/type1_parser.js +0 -776
- package/src/core/unicode.js +0 -1649
- package/src/core/worker.js +0 -848
- package/src/core/worker_stream.js +0 -135
- package/src/core/writer.js +0 -278
- package/src/core/xfa/bind.js +0 -652
- package/src/core/xfa/builder.js +0 -207
- package/src/core/xfa/config.js +0 -1926
- package/src/core/xfa/connection_set.js +0 -202
- package/src/core/xfa/data.js +0 -82
- package/src/core/xfa/datasets.js +0 -76
- package/src/core/xfa/factory.js +0 -111
- package/src/core/xfa/fonts.js +0 -181
- package/src/core/xfa/formcalc_lexer.js +0 -385
- package/src/core/xfa/formcalc_parser.js +0 -1340
- package/src/core/xfa/html_utils.js +0 -639
- package/src/core/xfa/layout.js +0 -383
- package/src/core/xfa/locale_set.js +0 -345
- package/src/core/xfa/namespaces.js +0 -81
- package/src/core/xfa/parser.js +0 -184
- package/src/core/xfa/setup.js +0 -38
- package/src/core/xfa/signature.js +0 -40
- package/src/core/xfa/som.js +0 -338
- package/src/core/xfa/stylesheet.js +0 -40
- package/src/core/xfa/template.js +0 -6260
- package/src/core/xfa/text.js +0 -290
- package/src/core/xfa/unknown.js +0 -29
- package/src/core/xfa/utils.js +0 -217
- package/src/core/xfa/xdp.js +0 -59
- package/src/core/xfa/xfa_object.js +0 -1130
- package/src/core/xfa/xhtml.js +0 -543
- package/src/core/xfa_fonts.js +0 -208
- package/src/core/xml_parser.js +0 -507
- package/src/core/xref.js +0 -899
- package/src/display/annotation_layer.js +0 -2107
- package/src/display/annotation_storage.js +0 -113
- package/src/display/api.js +0 -3292
- package/src/display/base_factory.js +0 -180
- package/src/display/canvas.js +0 -2828
- package/src/display/font_loader.js +0 -484
- package/src/display/metadata.js +0 -41
- package/src/display/network_utils.js +0 -100
- package/src/display/node_utils.js +0 -83
- package/src/display/optional_content_config.js +0 -189
- package/src/display/pattern_helper.js +0 -659
- package/src/display/svg.js +0 -1709
- package/src/display/text_layer.js +0 -847
- package/src/display/transport_stream.js +0 -303
- package/src/display/worker_options.js +0 -40
- package/src/display/xfa_layer.js +0 -204
- package/src/doc_helper.js +0 -25
- package/src/images/logo.svg +0 -41
- package/src/interfaces.js +0 -169
- package/src/license_header.js +0 -14
- package/src/license_header_libre.js +0 -21
- package/src/pdf.image_decoders.js +0 -46
- package/src/pdf.js +0 -146
- package/src/pdf.sandbox.external.js +0 -181
- package/src/pdf.sandbox.js +0 -151
- package/src/pdf.scripting.js +0 -25
- package/src/pdf.worker.entry.js +0 -19
- package/src/pdf.worker.js +0 -23
- package/src/scripting_api/aform.js +0 -608
- package/src/scripting_api/app.js +0 -621
- package/src/scripting_api/color.js +0 -129
- package/src/scripting_api/common.js +0 -58
- package/src/scripting_api/console.js +0 -38
- package/src/scripting_api/constants.js +0 -208
- package/src/scripting_api/doc.js +0 -1195
- package/src/scripting_api/error.js +0 -23
- package/src/scripting_api/event.js +0 -232
- package/src/scripting_api/field.js +0 -620
- package/src/scripting_api/fullscreen.js +0 -145
- package/src/scripting_api/initialization.js +0 -223
- package/src/scripting_api/pdf_object.js +0 -24
- package/src/scripting_api/print_params.js +0 -146
- package/src/scripting_api/proxy.js +0 -139
- package/src/scripting_api/thermometer.js +0 -69
- package/src/scripting_api/util.js +0 -581
- package/src/shared/.eslintrc +0 -13
- package/src/shared/cffStandardStrings.js +0 -311
- package/src/shared/compatibility.js +0 -114
- package/src/shared/fonts_utils.js +0 -429
- package/src/shared/is_node.js +0 -27
- package/src/shared/scripting_utils.js +0 -85
- package/src/worker_loader.js +0 -32
package/src/core/xref.js
DELETED
|
@@ -1,899 +0,0 @@
|
|
|
1
|
-
/* Copyright 2021 Mozilla Foundation
|
|
2
|
-
*
|
|
3
|
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
* you may not use this file except in compliance with the License.
|
|
5
|
-
* You may obtain a copy of the License at
|
|
6
|
-
*
|
|
7
|
-
* http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
*
|
|
9
|
-
* Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
* See the License for the specific language governing permissions and
|
|
13
|
-
* limitations under the License.
|
|
14
|
-
*/
|
|
15
|
-
|
|
16
|
-
import {
|
|
17
|
-
assert,
|
|
18
|
-
bytesToString,
|
|
19
|
-
FormatError,
|
|
20
|
-
info,
|
|
21
|
-
InvalidPDFException,
|
|
22
|
-
warn,
|
|
23
|
-
} from "../shared/util.js";
|
|
24
|
-
import {
|
|
25
|
-
Cmd,
|
|
26
|
-
Dict,
|
|
27
|
-
isCmd,
|
|
28
|
-
isDict,
|
|
29
|
-
isRef,
|
|
30
|
-
isStream,
|
|
31
|
-
Ref,
|
|
32
|
-
} from "./primitives.js";
|
|
33
|
-
import { Lexer, Parser } from "./parser.js";
|
|
34
|
-
import {
|
|
35
|
-
MissingDataException,
|
|
36
|
-
ParserEOFException,
|
|
37
|
-
XRefEntryException,
|
|
38
|
-
XRefParseException,
|
|
39
|
-
} from "./core_utils.js";
|
|
40
|
-
import { CipherTransformFactory } from "./crypto.js";
|
|
41
|
-
|
|
42
|
-
class XRef {
|
|
43
|
-
constructor(stream, pdfManager) {
|
|
44
|
-
this.stream = stream;
|
|
45
|
-
this.pdfManager = pdfManager;
|
|
46
|
-
this.entries = [];
|
|
47
|
-
this.xrefstms = Object.create(null);
|
|
48
|
-
this._cacheMap = new Map(); // Prepare the XRef cache.
|
|
49
|
-
this.stats = {
|
|
50
|
-
streamTypes: Object.create(null),
|
|
51
|
-
fontTypes: Object.create(null),
|
|
52
|
-
};
|
|
53
|
-
this._newRefNum = null;
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
getNewRef() {
|
|
57
|
-
if (this._newRefNum === null) {
|
|
58
|
-
this._newRefNum = this.entries.length;
|
|
59
|
-
}
|
|
60
|
-
return Ref.get(this._newRefNum++, 0);
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
resetNewRef() {
|
|
64
|
-
this._newRefNum = null;
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
setStartXRef(startXRef) {
|
|
68
|
-
// Store the starting positions of xref tables as we process them
|
|
69
|
-
// so we can recover from missing data errors
|
|
70
|
-
this.startXRefQueue = [startXRef];
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
parse(recoveryMode = false) {
|
|
74
|
-
let trailerDict;
|
|
75
|
-
if (!recoveryMode) {
|
|
76
|
-
trailerDict = this.readXRef();
|
|
77
|
-
} else {
|
|
78
|
-
warn("Indexing all PDF objects");
|
|
79
|
-
trailerDict = this.indexObjects();
|
|
80
|
-
}
|
|
81
|
-
trailerDict.assignXref(this);
|
|
82
|
-
this.trailer = trailerDict;
|
|
83
|
-
|
|
84
|
-
let encrypt;
|
|
85
|
-
try {
|
|
86
|
-
encrypt = trailerDict.get("Encrypt");
|
|
87
|
-
} catch (ex) {
|
|
88
|
-
if (ex instanceof MissingDataException) {
|
|
89
|
-
throw ex;
|
|
90
|
-
}
|
|
91
|
-
warn(`XRef.parse - Invalid "Encrypt" reference: "${ex}".`);
|
|
92
|
-
}
|
|
93
|
-
if (isDict(encrypt)) {
|
|
94
|
-
const ids = trailerDict.get("ID");
|
|
95
|
-
const fileId = ids && ids.length ? ids[0] : "";
|
|
96
|
-
// The 'Encrypt' dictionary itself should not be encrypted, and by
|
|
97
|
-
// setting `suppressEncryption` we can prevent an infinite loop inside
|
|
98
|
-
// of `XRef_fetchUncompressed` if the dictionary contains indirect
|
|
99
|
-
// objects (fixes issue7665.pdf).
|
|
100
|
-
encrypt.suppressEncryption = true;
|
|
101
|
-
this.encrypt = new CipherTransformFactory(
|
|
102
|
-
encrypt,
|
|
103
|
-
fileId,
|
|
104
|
-
this.pdfManager.password
|
|
105
|
-
);
|
|
106
|
-
}
|
|
107
|
-
|
|
108
|
-
// Get the root dictionary (catalog) object, and do some basic validation.
|
|
109
|
-
let root;
|
|
110
|
-
try {
|
|
111
|
-
root = trailerDict.get("Root");
|
|
112
|
-
} catch (ex) {
|
|
113
|
-
if (ex instanceof MissingDataException) {
|
|
114
|
-
throw ex;
|
|
115
|
-
}
|
|
116
|
-
warn(`XRef.parse - Invalid "Root" reference: "${ex}".`);
|
|
117
|
-
}
|
|
118
|
-
if (isDict(root) && root.has("Pages")) {
|
|
119
|
-
this.root = root;
|
|
120
|
-
} else {
|
|
121
|
-
if (!recoveryMode) {
|
|
122
|
-
throw new XRefParseException();
|
|
123
|
-
}
|
|
124
|
-
throw new FormatError("Invalid root reference");
|
|
125
|
-
}
|
|
126
|
-
}
|
|
127
|
-
|
|
128
|
-
processXRefTable(parser) {
|
|
129
|
-
if (!("tableState" in this)) {
|
|
130
|
-
// Stores state of the table as we process it so we can resume
|
|
131
|
-
// from middle of table in case of missing data error
|
|
132
|
-
this.tableState = {
|
|
133
|
-
entryNum: 0,
|
|
134
|
-
streamPos: parser.lexer.stream.pos,
|
|
135
|
-
parserBuf1: parser.buf1,
|
|
136
|
-
parserBuf2: parser.buf2,
|
|
137
|
-
};
|
|
138
|
-
}
|
|
139
|
-
|
|
140
|
-
const obj = this.readXRefTable(parser);
|
|
141
|
-
|
|
142
|
-
// Sanity check
|
|
143
|
-
if (!isCmd(obj, "trailer")) {
|
|
144
|
-
throw new FormatError(
|
|
145
|
-
"Invalid XRef table: could not find trailer dictionary"
|
|
146
|
-
);
|
|
147
|
-
}
|
|
148
|
-
// Read trailer dictionary, e.g.
|
|
149
|
-
// trailer
|
|
150
|
-
// << /Size 22
|
|
151
|
-
// /Root 20R
|
|
152
|
-
// /Info 10R
|
|
153
|
-
// /ID [ <81b14aafa313db63dbd6f981e49f94f4> ]
|
|
154
|
-
// >>
|
|
155
|
-
// The parser goes through the entire stream << ... >> and provides
|
|
156
|
-
// a getter interface for the key-value table
|
|
157
|
-
let dict = parser.getObj();
|
|
158
|
-
|
|
159
|
-
// The pdflib PDF generator can generate a nested trailer dictionary
|
|
160
|
-
if (!isDict(dict) && dict.dict) {
|
|
161
|
-
dict = dict.dict;
|
|
162
|
-
}
|
|
163
|
-
if (!isDict(dict)) {
|
|
164
|
-
throw new FormatError(
|
|
165
|
-
"Invalid XRef table: could not parse trailer dictionary"
|
|
166
|
-
);
|
|
167
|
-
}
|
|
168
|
-
delete this.tableState;
|
|
169
|
-
|
|
170
|
-
return dict;
|
|
171
|
-
}
|
|
172
|
-
|
|
173
|
-
readXRefTable(parser) {
|
|
174
|
-
// Example of cross-reference table:
|
|
175
|
-
// xref
|
|
176
|
-
// 0 1 <-- subsection header (first obj #, obj count)
|
|
177
|
-
// 0000000000 65535 f <-- actual object (offset, generation #, f/n)
|
|
178
|
-
// 23 2 <-- subsection header ... and so on ...
|
|
179
|
-
// 0000025518 00002 n
|
|
180
|
-
// 0000025635 00000 n
|
|
181
|
-
// trailer
|
|
182
|
-
// ...
|
|
183
|
-
|
|
184
|
-
const stream = parser.lexer.stream;
|
|
185
|
-
const tableState = this.tableState;
|
|
186
|
-
stream.pos = tableState.streamPos;
|
|
187
|
-
parser.buf1 = tableState.parserBuf1;
|
|
188
|
-
parser.buf2 = tableState.parserBuf2;
|
|
189
|
-
|
|
190
|
-
// Outer loop is over subsection headers
|
|
191
|
-
let obj;
|
|
192
|
-
|
|
193
|
-
while (true) {
|
|
194
|
-
if (!("firstEntryNum" in tableState) || !("entryCount" in tableState)) {
|
|
195
|
-
if (isCmd((obj = parser.getObj()), "trailer")) {
|
|
196
|
-
break;
|
|
197
|
-
}
|
|
198
|
-
tableState.firstEntryNum = obj;
|
|
199
|
-
tableState.entryCount = parser.getObj();
|
|
200
|
-
}
|
|
201
|
-
|
|
202
|
-
let first = tableState.firstEntryNum;
|
|
203
|
-
const count = tableState.entryCount;
|
|
204
|
-
if (!Number.isInteger(first) || !Number.isInteger(count)) {
|
|
205
|
-
throw new FormatError(
|
|
206
|
-
"Invalid XRef table: wrong types in subsection header"
|
|
207
|
-
);
|
|
208
|
-
}
|
|
209
|
-
// Inner loop is over objects themselves
|
|
210
|
-
for (let i = tableState.entryNum; i < count; i++) {
|
|
211
|
-
tableState.streamPos = stream.pos;
|
|
212
|
-
tableState.entryNum = i;
|
|
213
|
-
tableState.parserBuf1 = parser.buf1;
|
|
214
|
-
tableState.parserBuf2 = parser.buf2;
|
|
215
|
-
|
|
216
|
-
const entry = {};
|
|
217
|
-
entry.offset = parser.getObj();
|
|
218
|
-
entry.gen = parser.getObj();
|
|
219
|
-
const type = parser.getObj();
|
|
220
|
-
|
|
221
|
-
if (type instanceof Cmd) {
|
|
222
|
-
switch (type.cmd) {
|
|
223
|
-
case "f":
|
|
224
|
-
entry.free = true;
|
|
225
|
-
break;
|
|
226
|
-
case "n":
|
|
227
|
-
entry.uncompressed = true;
|
|
228
|
-
break;
|
|
229
|
-
}
|
|
230
|
-
}
|
|
231
|
-
|
|
232
|
-
// Validate entry obj
|
|
233
|
-
if (
|
|
234
|
-
!Number.isInteger(entry.offset) ||
|
|
235
|
-
!Number.isInteger(entry.gen) ||
|
|
236
|
-
!(entry.free || entry.uncompressed)
|
|
237
|
-
) {
|
|
238
|
-
throw new FormatError(
|
|
239
|
-
`Invalid entry in XRef subsection: ${first}, ${count}`
|
|
240
|
-
);
|
|
241
|
-
}
|
|
242
|
-
|
|
243
|
-
// The first xref table entry, i.e. obj 0, should be free. Attempting
|
|
244
|
-
// to adjust an incorrect first obj # (fixes issue 3248 and 7229).
|
|
245
|
-
if (i === 0 && entry.free && first === 1) {
|
|
246
|
-
first = 0;
|
|
247
|
-
}
|
|
248
|
-
|
|
249
|
-
if (!this.entries[i + first]) {
|
|
250
|
-
this.entries[i + first] = entry;
|
|
251
|
-
}
|
|
252
|
-
}
|
|
253
|
-
|
|
254
|
-
tableState.entryNum = 0;
|
|
255
|
-
tableState.streamPos = stream.pos;
|
|
256
|
-
tableState.parserBuf1 = parser.buf1;
|
|
257
|
-
tableState.parserBuf2 = parser.buf2;
|
|
258
|
-
delete tableState.firstEntryNum;
|
|
259
|
-
delete tableState.entryCount;
|
|
260
|
-
}
|
|
261
|
-
|
|
262
|
-
// Sanity check: as per spec, first object must be free
|
|
263
|
-
if (this.entries[0] && !this.entries[0].free) {
|
|
264
|
-
throw new FormatError("Invalid XRef table: unexpected first object");
|
|
265
|
-
}
|
|
266
|
-
return obj;
|
|
267
|
-
}
|
|
268
|
-
|
|
269
|
-
processXRefStream(stream) {
|
|
270
|
-
if (!("streamState" in this)) {
|
|
271
|
-
// Stores state of the stream as we process it so we can resume
|
|
272
|
-
// from middle of stream in case of missing data error
|
|
273
|
-
const streamParameters = stream.dict;
|
|
274
|
-
const byteWidths = streamParameters.get("W");
|
|
275
|
-
let range = streamParameters.get("Index");
|
|
276
|
-
if (!range) {
|
|
277
|
-
range = [0, streamParameters.get("Size")];
|
|
278
|
-
}
|
|
279
|
-
|
|
280
|
-
this.streamState = {
|
|
281
|
-
entryRanges: range,
|
|
282
|
-
byteWidths,
|
|
283
|
-
entryNum: 0,
|
|
284
|
-
streamPos: stream.pos,
|
|
285
|
-
};
|
|
286
|
-
}
|
|
287
|
-
this.readXRefStream(stream);
|
|
288
|
-
delete this.streamState;
|
|
289
|
-
|
|
290
|
-
return stream.dict;
|
|
291
|
-
}
|
|
292
|
-
|
|
293
|
-
readXRefStream(stream) {
|
|
294
|
-
let i, j;
|
|
295
|
-
const streamState = this.streamState;
|
|
296
|
-
stream.pos = streamState.streamPos;
|
|
297
|
-
|
|
298
|
-
const byteWidths = streamState.byteWidths;
|
|
299
|
-
const typeFieldWidth = byteWidths[0];
|
|
300
|
-
const offsetFieldWidth = byteWidths[1];
|
|
301
|
-
const generationFieldWidth = byteWidths[2];
|
|
302
|
-
|
|
303
|
-
const entryRanges = streamState.entryRanges;
|
|
304
|
-
while (entryRanges.length > 0) {
|
|
305
|
-
const first = entryRanges[0];
|
|
306
|
-
const n = entryRanges[1];
|
|
307
|
-
|
|
308
|
-
if (!Number.isInteger(first) || !Number.isInteger(n)) {
|
|
309
|
-
throw new FormatError(`Invalid XRef range fields: ${first}, ${n}`);
|
|
310
|
-
}
|
|
311
|
-
if (
|
|
312
|
-
!Number.isInteger(typeFieldWidth) ||
|
|
313
|
-
!Number.isInteger(offsetFieldWidth) ||
|
|
314
|
-
!Number.isInteger(generationFieldWidth)
|
|
315
|
-
) {
|
|
316
|
-
throw new FormatError(
|
|
317
|
-
`Invalid XRef entry fields length: ${first}, ${n}`
|
|
318
|
-
);
|
|
319
|
-
}
|
|
320
|
-
for (i = streamState.entryNum; i < n; ++i) {
|
|
321
|
-
streamState.entryNum = i;
|
|
322
|
-
streamState.streamPos = stream.pos;
|
|
323
|
-
|
|
324
|
-
let type = 0,
|
|
325
|
-
offset = 0,
|
|
326
|
-
generation = 0;
|
|
327
|
-
for (j = 0; j < typeFieldWidth; ++j) {
|
|
328
|
-
type = (type << 8) | stream.getByte();
|
|
329
|
-
}
|
|
330
|
-
// if type field is absent, its default value is 1
|
|
331
|
-
if (typeFieldWidth === 0) {
|
|
332
|
-
type = 1;
|
|
333
|
-
}
|
|
334
|
-
for (j = 0; j < offsetFieldWidth; ++j) {
|
|
335
|
-
offset = (offset << 8) | stream.getByte();
|
|
336
|
-
}
|
|
337
|
-
for (j = 0; j < generationFieldWidth; ++j) {
|
|
338
|
-
generation = (generation << 8) | stream.getByte();
|
|
339
|
-
}
|
|
340
|
-
const entry = {};
|
|
341
|
-
entry.offset = offset;
|
|
342
|
-
entry.gen = generation;
|
|
343
|
-
switch (type) {
|
|
344
|
-
case 0:
|
|
345
|
-
entry.free = true;
|
|
346
|
-
break;
|
|
347
|
-
case 1:
|
|
348
|
-
entry.uncompressed = true;
|
|
349
|
-
break;
|
|
350
|
-
case 2:
|
|
351
|
-
break;
|
|
352
|
-
default:
|
|
353
|
-
throw new FormatError(`Invalid XRef entry type: ${type}`);
|
|
354
|
-
}
|
|
355
|
-
if (!this.entries[first + i]) {
|
|
356
|
-
this.entries[first + i] = entry;
|
|
357
|
-
}
|
|
358
|
-
}
|
|
359
|
-
|
|
360
|
-
streamState.entryNum = 0;
|
|
361
|
-
streamState.streamPos = stream.pos;
|
|
362
|
-
entryRanges.splice(0, 2);
|
|
363
|
-
}
|
|
364
|
-
}
|
|
365
|
-
|
|
366
|
-
indexObjects() {
|
|
367
|
-
// Simple scan through the PDF content to find objects,
|
|
368
|
-
// trailers and XRef streams.
|
|
369
|
-
const TAB = 0x9,
|
|
370
|
-
LF = 0xa,
|
|
371
|
-
CR = 0xd,
|
|
372
|
-
SPACE = 0x20;
|
|
373
|
-
const PERCENT = 0x25,
|
|
374
|
-
LT = 0x3c;
|
|
375
|
-
|
|
376
|
-
function readToken(data, offset) {
|
|
377
|
-
let token = "",
|
|
378
|
-
ch = data[offset];
|
|
379
|
-
while (ch !== LF && ch !== CR && ch !== LT) {
|
|
380
|
-
if (++offset >= data.length) {
|
|
381
|
-
break;
|
|
382
|
-
}
|
|
383
|
-
token += String.fromCharCode(ch);
|
|
384
|
-
ch = data[offset];
|
|
385
|
-
}
|
|
386
|
-
return token;
|
|
387
|
-
}
|
|
388
|
-
function skipUntil(data, offset, what) {
|
|
389
|
-
const length = what.length,
|
|
390
|
-
dataLength = data.length;
|
|
391
|
-
let skipped = 0;
|
|
392
|
-
// finding byte sequence
|
|
393
|
-
while (offset < dataLength) {
|
|
394
|
-
let i = 0;
|
|
395
|
-
while (i < length && data[offset + i] === what[i]) {
|
|
396
|
-
++i;
|
|
397
|
-
}
|
|
398
|
-
if (i >= length) {
|
|
399
|
-
break; // sequence found
|
|
400
|
-
}
|
|
401
|
-
offset++;
|
|
402
|
-
skipped++;
|
|
403
|
-
}
|
|
404
|
-
return skipped;
|
|
405
|
-
}
|
|
406
|
-
const objRegExp = /^(\d+)\s+(\d+)\s+obj\b/;
|
|
407
|
-
const endobjRegExp = /\bendobj[\b\s]$/;
|
|
408
|
-
const nestedObjRegExp = /\s+(\d+\s+\d+\s+obj[\b\s<])$/;
|
|
409
|
-
const CHECK_CONTENT_LENGTH = 25;
|
|
410
|
-
|
|
411
|
-
const trailerBytes = new Uint8Array([116, 114, 97, 105, 108, 101, 114]);
|
|
412
|
-
const startxrefBytes = new Uint8Array([
|
|
413
|
-
115, 116, 97, 114, 116, 120, 114, 101, 102,
|
|
414
|
-
]);
|
|
415
|
-
const objBytes = new Uint8Array([111, 98, 106]);
|
|
416
|
-
const xrefBytes = new Uint8Array([47, 88, 82, 101, 102]);
|
|
417
|
-
|
|
418
|
-
// Clear out any existing entries, since they may be bogus.
|
|
419
|
-
this.entries.length = 0;
|
|
420
|
-
|
|
421
|
-
const stream = this.stream;
|
|
422
|
-
stream.pos = 0;
|
|
423
|
-
const buffer = stream.getBytes(),
|
|
424
|
-
length = buffer.length;
|
|
425
|
-
let position = stream.start;
|
|
426
|
-
const trailers = [],
|
|
427
|
-
xrefStms = [];
|
|
428
|
-
while (position < length) {
|
|
429
|
-
let ch = buffer[position];
|
|
430
|
-
if (ch === TAB || ch === LF || ch === CR || ch === SPACE) {
|
|
431
|
-
++position;
|
|
432
|
-
continue;
|
|
433
|
-
}
|
|
434
|
-
if (ch === PERCENT) {
|
|
435
|
-
// %-comment
|
|
436
|
-
do {
|
|
437
|
-
++position;
|
|
438
|
-
if (position >= length) {
|
|
439
|
-
break;
|
|
440
|
-
}
|
|
441
|
-
ch = buffer[position];
|
|
442
|
-
} while (ch !== LF && ch !== CR);
|
|
443
|
-
continue;
|
|
444
|
-
}
|
|
445
|
-
const token = readToken(buffer, position);
|
|
446
|
-
let m;
|
|
447
|
-
if (
|
|
448
|
-
token.startsWith("xref") &&
|
|
449
|
-
(token.length === 4 || /\s/.test(token[4]))
|
|
450
|
-
) {
|
|
451
|
-
position += skipUntil(buffer, position, trailerBytes);
|
|
452
|
-
trailers.push(position);
|
|
453
|
-
position += skipUntil(buffer, position, startxrefBytes);
|
|
454
|
-
} else if ((m = objRegExp.exec(token))) {
|
|
455
|
-
const num = m[1] | 0,
|
|
456
|
-
gen = m[2] | 0;
|
|
457
|
-
|
|
458
|
-
let contentLength,
|
|
459
|
-
startPos = position + token.length,
|
|
460
|
-
updateEntries = false;
|
|
461
|
-
if (!this.entries[num]) {
|
|
462
|
-
updateEntries = true;
|
|
463
|
-
} else if (this.entries[num].gen === gen) {
|
|
464
|
-
// Before overwriting an existing entry, ensure that the new one won't
|
|
465
|
-
// cause *immediate* errors when it's accessed (fixes issue13783.pdf).
|
|
466
|
-
try {
|
|
467
|
-
const parser = new Parser({
|
|
468
|
-
lexer: new Lexer(stream.makeSubStream(startPos)),
|
|
469
|
-
});
|
|
470
|
-
parser.getObj();
|
|
471
|
-
updateEntries = true;
|
|
472
|
-
} catch (ex) {
|
|
473
|
-
if (ex instanceof ParserEOFException) {
|
|
474
|
-
warn(`indexObjects -- checking object (${token}): "${ex}".`);
|
|
475
|
-
} else {
|
|
476
|
-
// The error may come from the `Parser`-instance being initialized
|
|
477
|
-
// without an `XRef`-instance (we don't have a usable one yet).
|
|
478
|
-
updateEntries = true;
|
|
479
|
-
}
|
|
480
|
-
}
|
|
481
|
-
}
|
|
482
|
-
if (updateEntries) {
|
|
483
|
-
this.entries[num] = {
|
|
484
|
-
offset: position - stream.start,
|
|
485
|
-
gen,
|
|
486
|
-
uncompressed: true,
|
|
487
|
-
};
|
|
488
|
-
}
|
|
489
|
-
|
|
490
|
-
// Find the next "obj" string, rather than "endobj", to ensure that
|
|
491
|
-
// we won't skip over a new 'obj' operator in corrupt files where
|
|
492
|
-
// 'endobj' operators are missing (fixes issue9105_reduced.pdf).
|
|
493
|
-
while (startPos < buffer.length) {
|
|
494
|
-
const endPos = startPos + skipUntil(buffer, startPos, objBytes) + 4;
|
|
495
|
-
contentLength = endPos - position;
|
|
496
|
-
|
|
497
|
-
const checkPos = Math.max(endPos - CHECK_CONTENT_LENGTH, startPos);
|
|
498
|
-
const tokenStr = bytesToString(buffer.subarray(checkPos, endPos));
|
|
499
|
-
|
|
500
|
-
// Check if the current object ends with an 'endobj' operator.
|
|
501
|
-
if (endobjRegExp.test(tokenStr)) {
|
|
502
|
-
break;
|
|
503
|
-
} else {
|
|
504
|
-
// Check if an "obj" occurrence is actually a new object,
|
|
505
|
-
// i.e. the current object is missing the 'endobj' operator.
|
|
506
|
-
const objToken = nestedObjRegExp.exec(tokenStr);
|
|
507
|
-
|
|
508
|
-
if (objToken && objToken[1]) {
|
|
509
|
-
warn(
|
|
510
|
-
'indexObjects: Found new "obj" inside of another "obj", ' +
|
|
511
|
-
'caused by missing "endobj" -- trying to recover.'
|
|
512
|
-
);
|
|
513
|
-
contentLength -= objToken[1].length;
|
|
514
|
-
break;
|
|
515
|
-
}
|
|
516
|
-
}
|
|
517
|
-
startPos = endPos;
|
|
518
|
-
}
|
|
519
|
-
const content = buffer.subarray(position, position + contentLength);
|
|
520
|
-
|
|
521
|
-
// checking XRef stream suspect
|
|
522
|
-
// (it shall have '/XRef' and next char is not a letter)
|
|
523
|
-
const xrefTagOffset = skipUntil(content, 0, xrefBytes);
|
|
524
|
-
if (xrefTagOffset < contentLength && content[xrefTagOffset + 5] < 64) {
|
|
525
|
-
xrefStms.push(position - stream.start);
|
|
526
|
-
this.xrefstms[position - stream.start] = 1; // Avoid recursion
|
|
527
|
-
}
|
|
528
|
-
|
|
529
|
-
position += contentLength;
|
|
530
|
-
} else if (
|
|
531
|
-
token.startsWith("trailer") &&
|
|
532
|
-
(token.length === 7 || /\s/.test(token[7]))
|
|
533
|
-
) {
|
|
534
|
-
trailers.push(position);
|
|
535
|
-
position += skipUntil(buffer, position, startxrefBytes);
|
|
536
|
-
} else {
|
|
537
|
-
position += token.length + 1;
|
|
538
|
-
}
|
|
539
|
-
}
|
|
540
|
-
// reading XRef streams
|
|
541
|
-
for (let i = 0, ii = xrefStms.length; i < ii; ++i) {
|
|
542
|
-
this.startXRefQueue.push(xrefStms[i]);
|
|
543
|
-
this.readXRef(/* recoveryMode */ true);
|
|
544
|
-
}
|
|
545
|
-
// finding main trailer
|
|
546
|
-
let trailerDict;
|
|
547
|
-
for (let i = 0, ii = trailers.length; i < ii; ++i) {
|
|
548
|
-
stream.pos = trailers[i];
|
|
549
|
-
const parser = new Parser({
|
|
550
|
-
lexer: new Lexer(stream),
|
|
551
|
-
xref: this,
|
|
552
|
-
allowStreams: true,
|
|
553
|
-
recoveryMode: true,
|
|
554
|
-
});
|
|
555
|
-
const obj = parser.getObj();
|
|
556
|
-
if (!isCmd(obj, "trailer")) {
|
|
557
|
-
continue;
|
|
558
|
-
}
|
|
559
|
-
// read the trailer dictionary
|
|
560
|
-
const dict = parser.getObj();
|
|
561
|
-
if (!isDict(dict)) {
|
|
562
|
-
continue;
|
|
563
|
-
}
|
|
564
|
-
// Do some basic validation of the trailer/root dictionary candidate.
|
|
565
|
-
try {
|
|
566
|
-
const rootDict = dict.get("Root");
|
|
567
|
-
if (!(rootDict instanceof Dict)) {
|
|
568
|
-
continue;
|
|
569
|
-
}
|
|
570
|
-
const pagesDict = rootDict.get("Pages");
|
|
571
|
-
if (!(pagesDict instanceof Dict)) {
|
|
572
|
-
continue;
|
|
573
|
-
}
|
|
574
|
-
const pagesCount = pagesDict.get("Count");
|
|
575
|
-
if (!Number.isInteger(pagesCount)) {
|
|
576
|
-
continue;
|
|
577
|
-
}
|
|
578
|
-
// The top-level /Pages dictionary isn't obviously corrupt.
|
|
579
|
-
} catch (ex) {
|
|
580
|
-
continue;
|
|
581
|
-
}
|
|
582
|
-
// taking the first one with 'ID'
|
|
583
|
-
if (dict.has("ID")) {
|
|
584
|
-
return dict;
|
|
585
|
-
}
|
|
586
|
-
// The current dictionary is a candidate, but continue searching.
|
|
587
|
-
trailerDict = dict;
|
|
588
|
-
}
|
|
589
|
-
// No trailer with 'ID', taking last one (if exists).
|
|
590
|
-
if (trailerDict) {
|
|
591
|
-
return trailerDict;
|
|
592
|
-
}
|
|
593
|
-
// nothing helps
|
|
594
|
-
throw new InvalidPDFException("Invalid PDF structure.");
|
|
595
|
-
}
|
|
596
|
-
|
|
597
|
-
readXRef(recoveryMode = false) {
|
|
598
|
-
const stream = this.stream;
|
|
599
|
-
// Keep track of already parsed XRef tables, to prevent an infinite loop
|
|
600
|
-
// when parsing corrupt PDF files where e.g. the /Prev entries create a
|
|
601
|
-
// circular dependency between tables (fixes bug1393476.pdf).
|
|
602
|
-
const startXRefParsedCache = new Set();
|
|
603
|
-
|
|
604
|
-
try {
|
|
605
|
-
while (this.startXRefQueue.length) {
|
|
606
|
-
const startXRef = this.startXRefQueue[0];
|
|
607
|
-
|
|
608
|
-
if (startXRefParsedCache.has(startXRef)) {
|
|
609
|
-
warn("readXRef - skipping XRef table since it was already parsed.");
|
|
610
|
-
this.startXRefQueue.shift();
|
|
611
|
-
continue;
|
|
612
|
-
}
|
|
613
|
-
startXRefParsedCache.add(startXRef);
|
|
614
|
-
|
|
615
|
-
stream.pos = startXRef + stream.start;
|
|
616
|
-
|
|
617
|
-
const parser = new Parser({
|
|
618
|
-
lexer: new Lexer(stream),
|
|
619
|
-
xref: this,
|
|
620
|
-
allowStreams: true,
|
|
621
|
-
});
|
|
622
|
-
let obj = parser.getObj();
|
|
623
|
-
let dict;
|
|
624
|
-
|
|
625
|
-
// Get dictionary
|
|
626
|
-
if (isCmd(obj, "xref")) {
|
|
627
|
-
// Parse end-of-file XRef
|
|
628
|
-
dict = this.processXRefTable(parser);
|
|
629
|
-
if (!this.topDict) {
|
|
630
|
-
this.topDict = dict;
|
|
631
|
-
}
|
|
632
|
-
|
|
633
|
-
// Recursively get other XRefs 'XRefStm', if any
|
|
634
|
-
obj = dict.get("XRefStm");
|
|
635
|
-
if (Number.isInteger(obj)) {
|
|
636
|
-
const pos = obj;
|
|
637
|
-
// ignore previously loaded xref streams
|
|
638
|
-
// (possible infinite recursion)
|
|
639
|
-
if (!(pos in this.xrefstms)) {
|
|
640
|
-
this.xrefstms[pos] = 1;
|
|
641
|
-
this.startXRefQueue.push(pos);
|
|
642
|
-
}
|
|
643
|
-
}
|
|
644
|
-
} else if (Number.isInteger(obj)) {
|
|
645
|
-
// Parse in-stream XRef
|
|
646
|
-
if (
|
|
647
|
-
!Number.isInteger(parser.getObj()) ||
|
|
648
|
-
!isCmd(parser.getObj(), "obj") ||
|
|
649
|
-
!isStream((obj = parser.getObj()))
|
|
650
|
-
) {
|
|
651
|
-
throw new FormatError("Invalid XRef stream");
|
|
652
|
-
}
|
|
653
|
-
dict = this.processXRefStream(obj);
|
|
654
|
-
if (!this.topDict) {
|
|
655
|
-
this.topDict = dict;
|
|
656
|
-
}
|
|
657
|
-
if (!dict) {
|
|
658
|
-
throw new FormatError("Failed to read XRef stream");
|
|
659
|
-
}
|
|
660
|
-
} else {
|
|
661
|
-
throw new FormatError("Invalid XRef stream header");
|
|
662
|
-
}
|
|
663
|
-
|
|
664
|
-
// Recursively get previous dictionary, if any
|
|
665
|
-
obj = dict.get("Prev");
|
|
666
|
-
if (Number.isInteger(obj)) {
|
|
667
|
-
this.startXRefQueue.push(obj);
|
|
668
|
-
} else if (isRef(obj)) {
|
|
669
|
-
// The spec says Prev must not be a reference, i.e. "/Prev NNN"
|
|
670
|
-
// This is a fallback for non-compliant PDFs, i.e. "/Prev NNN 0 R"
|
|
671
|
-
this.startXRefQueue.push(obj.num);
|
|
672
|
-
}
|
|
673
|
-
|
|
674
|
-
this.startXRefQueue.shift();
|
|
675
|
-
}
|
|
676
|
-
|
|
677
|
-
return this.topDict;
|
|
678
|
-
} catch (e) {
|
|
679
|
-
if (e instanceof MissingDataException) {
|
|
680
|
-
throw e;
|
|
681
|
-
}
|
|
682
|
-
info("(while reading XRef): " + e);
|
|
683
|
-
}
|
|
684
|
-
|
|
685
|
-
if (recoveryMode) {
|
|
686
|
-
return undefined;
|
|
687
|
-
}
|
|
688
|
-
throw new XRefParseException();
|
|
689
|
-
}
|
|
690
|
-
|
|
691
|
-
getEntry(i) {
|
|
692
|
-
const xrefEntry = this.entries[i];
|
|
693
|
-
if (xrefEntry && !xrefEntry.free && xrefEntry.offset) {
|
|
694
|
-
return xrefEntry;
|
|
695
|
-
}
|
|
696
|
-
return null;
|
|
697
|
-
}
|
|
698
|
-
|
|
699
|
-
fetchIfRef(obj, suppressEncryption = false) {
|
|
700
|
-
if (obj instanceof Ref) {
|
|
701
|
-
return this.fetch(obj, suppressEncryption);
|
|
702
|
-
}
|
|
703
|
-
return obj;
|
|
704
|
-
}
|
|
705
|
-
|
|
706
|
-
fetch(ref, suppressEncryption = false) {
|
|
707
|
-
if (!(ref instanceof Ref)) {
|
|
708
|
-
throw new Error("ref object is not a reference");
|
|
709
|
-
}
|
|
710
|
-
const num = ref.num;
|
|
711
|
-
|
|
712
|
-
// The XRef cache is populated with objects which are obtained through
|
|
713
|
-
// `Parser.getObj`, and indirectly via `Lexer.getObj`. Neither of these
|
|
714
|
-
// methods should ever return `undefined` (note the `assert` calls below).
|
|
715
|
-
const cacheEntry = this._cacheMap.get(num);
|
|
716
|
-
if (cacheEntry !== undefined) {
|
|
717
|
-
// In documents with Object Streams, it's possible that cached `Dict`s
|
|
718
|
-
// have not been assigned an `objId` yet (see e.g. issue3115r.pdf).
|
|
719
|
-
if (cacheEntry instanceof Dict && !cacheEntry.objId) {
|
|
720
|
-
cacheEntry.objId = ref.toString();
|
|
721
|
-
}
|
|
722
|
-
return cacheEntry;
|
|
723
|
-
}
|
|
724
|
-
let xrefEntry = this.getEntry(num);
|
|
725
|
-
|
|
726
|
-
if (xrefEntry === null) {
|
|
727
|
-
// The referenced entry can be free.
|
|
728
|
-
this._cacheMap.set(num, xrefEntry);
|
|
729
|
-
return xrefEntry;
|
|
730
|
-
}
|
|
731
|
-
|
|
732
|
-
if (xrefEntry.uncompressed) {
|
|
733
|
-
xrefEntry = this.fetchUncompressed(ref, xrefEntry, suppressEncryption);
|
|
734
|
-
} else {
|
|
735
|
-
xrefEntry = this.fetchCompressed(ref, xrefEntry, suppressEncryption);
|
|
736
|
-
}
|
|
737
|
-
if (isDict(xrefEntry)) {
|
|
738
|
-
xrefEntry.objId = ref.toString();
|
|
739
|
-
} else if (isStream(xrefEntry)) {
|
|
740
|
-
xrefEntry.dict.objId = ref.toString();
|
|
741
|
-
}
|
|
742
|
-
return xrefEntry;
|
|
743
|
-
}
|
|
744
|
-
|
|
745
|
-
fetchUncompressed(ref, xrefEntry, suppressEncryption = false) {
|
|
746
|
-
const gen = ref.gen;
|
|
747
|
-
let num = ref.num;
|
|
748
|
-
if (xrefEntry.gen !== gen) {
|
|
749
|
-
throw new XRefEntryException(`Inconsistent generation in XRef: ${ref}`);
|
|
750
|
-
}
|
|
751
|
-
const stream = this.stream.makeSubStream(
|
|
752
|
-
xrefEntry.offset + this.stream.start
|
|
753
|
-
);
|
|
754
|
-
const parser = new Parser({
|
|
755
|
-
lexer: new Lexer(stream),
|
|
756
|
-
xref: this,
|
|
757
|
-
allowStreams: true,
|
|
758
|
-
});
|
|
759
|
-
const obj1 = parser.getObj();
|
|
760
|
-
const obj2 = parser.getObj();
|
|
761
|
-
const obj3 = parser.getObj();
|
|
762
|
-
|
|
763
|
-
if (obj1 !== num || obj2 !== gen || !(obj3 instanceof Cmd)) {
|
|
764
|
-
throw new XRefEntryException(`Bad (uncompressed) XRef entry: ${ref}`);
|
|
765
|
-
}
|
|
766
|
-
if (obj3.cmd !== "obj") {
|
|
767
|
-
// some bad PDFs use "obj1234" and really mean 1234
|
|
768
|
-
if (obj3.cmd.startsWith("obj")) {
|
|
769
|
-
num = parseInt(obj3.cmd.substring(3), 10);
|
|
770
|
-
if (!Number.isNaN(num)) {
|
|
771
|
-
return num;
|
|
772
|
-
}
|
|
773
|
-
}
|
|
774
|
-
throw new XRefEntryException(`Bad (uncompressed) XRef entry: ${ref}`);
|
|
775
|
-
}
|
|
776
|
-
if (this.encrypt && !suppressEncryption) {
|
|
777
|
-
xrefEntry = parser.getObj(this.encrypt.createCipherTransform(num, gen));
|
|
778
|
-
} else {
|
|
779
|
-
xrefEntry = parser.getObj();
|
|
780
|
-
}
|
|
781
|
-
if (!isStream(xrefEntry)) {
|
|
782
|
-
if (
|
|
783
|
-
typeof PDFJSDev === "undefined" ||
|
|
784
|
-
PDFJSDev.test("!PRODUCTION || TESTING")
|
|
785
|
-
) {
|
|
786
|
-
assert(
|
|
787
|
-
xrefEntry !== undefined,
|
|
788
|
-
'fetchUncompressed: The "xrefEntry" cannot be undefined.'
|
|
789
|
-
);
|
|
790
|
-
}
|
|
791
|
-
this._cacheMap.set(num, xrefEntry);
|
|
792
|
-
}
|
|
793
|
-
return xrefEntry;
|
|
794
|
-
}
|
|
795
|
-
|
|
796
|
-
fetchCompressed(ref, xrefEntry, suppressEncryption = false) {
|
|
797
|
-
const tableOffset = xrefEntry.offset;
|
|
798
|
-
const stream = this.fetch(Ref.get(tableOffset, 0));
|
|
799
|
-
if (!isStream(stream)) {
|
|
800
|
-
throw new FormatError("bad ObjStm stream");
|
|
801
|
-
}
|
|
802
|
-
const first = stream.dict.get("First");
|
|
803
|
-
const n = stream.dict.get("N");
|
|
804
|
-
if (!Number.isInteger(first) || !Number.isInteger(n)) {
|
|
805
|
-
throw new FormatError("invalid first and n parameters for ObjStm stream");
|
|
806
|
-
}
|
|
807
|
-
let parser = new Parser({
|
|
808
|
-
lexer: new Lexer(stream),
|
|
809
|
-
xref: this,
|
|
810
|
-
allowStreams: true,
|
|
811
|
-
});
|
|
812
|
-
const nums = new Array(n);
|
|
813
|
-
const offsets = new Array(n);
|
|
814
|
-
// read the object numbers to populate cache
|
|
815
|
-
for (let i = 0; i < n; ++i) {
|
|
816
|
-
const num = parser.getObj();
|
|
817
|
-
if (!Number.isInteger(num)) {
|
|
818
|
-
throw new FormatError(
|
|
819
|
-
`invalid object number in the ObjStm stream: ${num}`
|
|
820
|
-
);
|
|
821
|
-
}
|
|
822
|
-
const offset = parser.getObj();
|
|
823
|
-
if (!Number.isInteger(offset)) {
|
|
824
|
-
throw new FormatError(
|
|
825
|
-
`invalid object offset in the ObjStm stream: ${offset}`
|
|
826
|
-
);
|
|
827
|
-
}
|
|
828
|
-
nums[i] = num;
|
|
829
|
-
offsets[i] = offset;
|
|
830
|
-
}
|
|
831
|
-
|
|
832
|
-
const start = (stream.start || 0) + first;
|
|
833
|
-
const entries = new Array(n);
|
|
834
|
-
// read stream objects for cache
|
|
835
|
-
for (let i = 0; i < n; ++i) {
|
|
836
|
-
const length = i < n - 1 ? offsets[i + 1] - offsets[i] : undefined;
|
|
837
|
-
if (length < 0) {
|
|
838
|
-
throw new FormatError("Invalid offset in the ObjStm stream.");
|
|
839
|
-
}
|
|
840
|
-
parser = new Parser({
|
|
841
|
-
lexer: new Lexer(
|
|
842
|
-
stream.makeSubStream(start + offsets[i], length, stream.dict)
|
|
843
|
-
),
|
|
844
|
-
xref: this,
|
|
845
|
-
allowStreams: true,
|
|
846
|
-
});
|
|
847
|
-
|
|
848
|
-
const obj = parser.getObj();
|
|
849
|
-
entries[i] = obj;
|
|
850
|
-
if (isStream(obj)) {
|
|
851
|
-
continue;
|
|
852
|
-
}
|
|
853
|
-
const num = nums[i],
|
|
854
|
-
entry = this.entries[num];
|
|
855
|
-
if (entry && entry.offset === tableOffset && entry.gen === i) {
|
|
856
|
-
if (
|
|
857
|
-
typeof PDFJSDev === "undefined" ||
|
|
858
|
-
PDFJSDev.test("!PRODUCTION || TESTING")
|
|
859
|
-
) {
|
|
860
|
-
assert(
|
|
861
|
-
obj !== undefined,
|
|
862
|
-
'fetchCompressed: The "obj" cannot be undefined.'
|
|
863
|
-
);
|
|
864
|
-
}
|
|
865
|
-
this._cacheMap.set(num, obj);
|
|
866
|
-
}
|
|
867
|
-
}
|
|
868
|
-
xrefEntry = entries[xrefEntry.gen];
|
|
869
|
-
if (xrefEntry === undefined) {
|
|
870
|
-
throw new XRefEntryException(`Bad (compressed) XRef entry: ${ref}`);
|
|
871
|
-
}
|
|
872
|
-
return xrefEntry;
|
|
873
|
-
}
|
|
874
|
-
|
|
875
|
-
async fetchIfRefAsync(obj, suppressEncryption) {
|
|
876
|
-
if (obj instanceof Ref) {
|
|
877
|
-
return this.fetchAsync(obj, suppressEncryption);
|
|
878
|
-
}
|
|
879
|
-
return obj;
|
|
880
|
-
}
|
|
881
|
-
|
|
882
|
-
async fetchAsync(ref, suppressEncryption) {
|
|
883
|
-
try {
|
|
884
|
-
return this.fetch(ref, suppressEncryption);
|
|
885
|
-
} catch (ex) {
|
|
886
|
-
if (!(ex instanceof MissingDataException)) {
|
|
887
|
-
throw ex;
|
|
888
|
-
}
|
|
889
|
-
await this.pdfManager.requestRange(ex.begin, ex.end);
|
|
890
|
-
return this.fetchAsync(ref, suppressEncryption);
|
|
891
|
-
}
|
|
892
|
-
}
|
|
893
|
-
|
|
894
|
-
getCatalogObj() {
|
|
895
|
-
return this.root;
|
|
896
|
-
}
|
|
897
|
-
}
|
|
898
|
-
|
|
899
|
-
export { XRef };
|