roxify 1.13.3 → 1.13.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/Cargo.toml +3 -1
- package/README.md +26 -13
- package/dist/rox-macos-universal +0 -0
- package/dist/roxify_native +0 -0
- package/dist/roxify_native-macos-arm64 +0 -0
- package/dist/roxify_native-macos-x64 +0 -0
- package/dist/roxify_native.exe +0 -0
- package/dist/utils/decoder.js +62 -58
- package/dist/utils/encoder.js +13 -6
- package/native/encoder.rs +10 -22
- package/native/io_advice.rs +43 -0
- package/native/lib.rs +2 -0
- package/native/main.rs +81 -43
- package/native/packer.rs +232 -75
- package/native/png_chunk_writer.rs +146 -0
- package/native/png_utils.rs +70 -54
- package/native/streaming.rs +16 -39
- package/native/streaming_decode.rs +313 -114
- package/native/streaming_encode.rs +272 -128
- package/package.json +1 -1
- package/roxify_native-aarch64-apple-darwin.node +0 -0
- package/roxify_native-aarch64-pc-windows-msvc.node +0 -0
- package/roxify_native-aarch64-unknown-linux-gnu.node +0 -0
- package/roxify_native-i686-pc-windows-msvc.node +0 -0
- package/roxify_native-i686-unknown-linux-gnu.node +0 -0
- package/roxify_native-x86_64-apple-darwin.node +0 -0
- package/roxify_native-x86_64-pc-windows-msvc.node +0 -0
- package/roxify_native-x86_64-unknown-linux-gnu.node +0 -0
|
@@ -1,8 +1,11 @@
|
|
|
1
1
|
use std::io::{Write, BufWriter, Read};
|
|
2
2
|
use std::fs::File;
|
|
3
|
-
use std::path::Path;
|
|
3
|
+
use std::path::{Path, PathBuf};
|
|
4
|
+
use rayon::prelude::*;
|
|
5
|
+
use serde::Serialize;
|
|
4
6
|
use walkdir::WalkDir;
|
|
5
|
-
|
|
7
|
+
|
|
8
|
+
use crate::png_chunk_writer::{ChunkedIdatWriter, write_png_chunk};
|
|
6
9
|
|
|
7
10
|
const PNG_HEADER: &[u8] = &[137, 80, 78, 71, 13, 10, 26, 10];
|
|
8
11
|
const PIXEL_MAGIC: &[u8] = b"PXL1";
|
|
@@ -10,9 +13,36 @@ const MARKER_START: [(u8, u8, u8); 3] = [(255, 0, 0), (0, 255, 0), (0, 0, 255)];
|
|
|
10
13
|
const MARKER_END: [(u8, u8, u8); 3] = [(0, 0, 255), (0, 255, 0), (255, 0, 0)];
|
|
11
14
|
const MARKER_ZSTD: (u8, u8, u8) = (0, 255, 0);
|
|
12
15
|
const MAGIC: &[u8] = b"ROX1";
|
|
16
|
+
const PACK_MAGIC: u32 = 0x524f5850;
|
|
17
|
+
|
|
18
|
+
const MIN_ZST_CAPACITY: usize = 16 * 1024 * 1024;
|
|
19
|
+
const MB: u64 = 1024 * 1024;
|
|
20
|
+
const MAX_FILE_BUFFER_CAPACITY: usize = 4 * 1024 * 1024;
|
|
21
|
+
const PARALLEL_IO_FILE_THRESHOLD: u64 = MB;
|
|
22
|
+
const PARALLEL_IO_BATCH_BYTES: u64 = 128 * MB;
|
|
23
|
+
const PARALLEL_IO_BATCH_FILES: usize = 512;
|
|
24
|
+
const PARALLEL_IO_MIN_FILES: usize = 8;
|
|
25
|
+
const HEADER_VERSION_V2: u8 = 2;
|
|
13
26
|
|
|
14
27
|
pub type ProgressCallback = Box<dyn Fn(u64, u64, &str) + Send>;
|
|
15
28
|
|
|
29
|
+
struct DirectoryFile {
|
|
30
|
+
path: PathBuf,
|
|
31
|
+
rel_path: String,
|
|
32
|
+
size: u64,
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
#[derive(Serialize)]
|
|
36
|
+
struct FileListEntry {
|
|
37
|
+
name: String,
|
|
38
|
+
size: u64,
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
struct CollectedDirectory {
|
|
42
|
+
entries: Vec<DirectoryFile>,
|
|
43
|
+
total_bytes: u64,
|
|
44
|
+
}
|
|
45
|
+
|
|
16
46
|
pub fn encode_dir_to_png(
|
|
17
47
|
dir_path: &Path,
|
|
18
48
|
output_path: &Path,
|
|
@@ -42,9 +72,7 @@ pub fn encode_dir_to_png_encrypted_with_progress(
|
|
|
42
72
|
encrypt_type: Option<&str>,
|
|
43
73
|
progress: Option<ProgressCallback>,
|
|
44
74
|
) -> anyhow::Result<()> {
|
|
45
|
-
let (zst_buf,
|
|
46
|
-
|
|
47
|
-
let file_list_json = serde_json::to_string(&file_list)?;
|
|
75
|
+
let (zst_buf, file_list_json) = compress_dir_to_zst_mem(dir_path, compression_level, &progress)?;
|
|
48
76
|
|
|
49
77
|
let result = write_png_from_zst_mem(
|
|
50
78
|
zst_buf, output_path, name, Some(&file_list_json),
|
|
@@ -62,26 +90,19 @@ fn compress_dir_to_zst_mem(
|
|
|
62
90
|
dir_path: &Path,
|
|
63
91
|
compression_level: i32,
|
|
64
92
|
progress: &Option<ProgressCallback>,
|
|
65
|
-
) -> anyhow::Result<(Vec<u8>,
|
|
66
|
-
let
|
|
67
|
-
|
|
68
|
-
let entries
|
|
69
|
-
.follow_links(false)
|
|
70
|
-
.into_iter()
|
|
71
|
-
.filter_map(|e| e.ok())
|
|
72
|
-
.filter(|e| e.file_type().is_file())
|
|
73
|
-
.collect();
|
|
74
|
-
|
|
75
|
-
let total_bytes: u64 = entries.iter()
|
|
76
|
-
.filter_map(|e| std::fs::metadata(e.path()).ok())
|
|
77
|
-
.map(|m| m.len())
|
|
78
|
-
.sum();
|
|
93
|
+
) -> anyhow::Result<(Vec<u8>, String)> {
|
|
94
|
+
let collected = collect_directory_files(dir_path);
|
|
95
|
+
let total_bytes = collected.total_bytes;
|
|
96
|
+
let entries = collected.entries;
|
|
79
97
|
|
|
80
98
|
let actual_level = compression_level.min(3);
|
|
81
|
-
let mut encoder = zstd::stream::Encoder::new(
|
|
99
|
+
let mut encoder = zstd::stream::Encoder::new(
|
|
100
|
+
Vec::with_capacity(estimate_zst_capacity(total_bytes)),
|
|
101
|
+
actual_level,
|
|
102
|
+
)
|
|
82
103
|
.map_err(|e| anyhow::anyhow!("zstd init: {}", e))?;
|
|
83
104
|
|
|
84
|
-
let threads =
|
|
105
|
+
let threads = select_zstd_threads(total_bytes);
|
|
85
106
|
if threads > 1 {
|
|
86
107
|
let _ = encoder.multithread(threads);
|
|
87
108
|
}
|
|
@@ -89,61 +110,221 @@ fn compress_dir_to_zst_mem(
|
|
|
89
110
|
let _ = encoder.window_log(30);
|
|
90
111
|
|
|
91
112
|
encoder.write_all(MAGIC)?;
|
|
113
|
+
encoder.write_all(&PACK_MAGIC.to_be_bytes())?;
|
|
114
|
+
encoder.write_all(&(entries.len() as u32).to_be_bytes())?;
|
|
92
115
|
|
|
93
|
-
let mut file_list = Vec::
|
|
116
|
+
let mut file_list = Vec::with_capacity(entries.len());
|
|
94
117
|
let mut bytes_processed: u64 = 0;
|
|
95
118
|
let mut last_pct: u64 = 0;
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
let
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
Ok(m) => m,
|
|
105
|
-
Err(_) => continue,
|
|
106
|
-
};
|
|
107
|
-
let size = metadata.len();
|
|
108
|
-
|
|
109
|
-
let mut header = Header::new_gnu();
|
|
110
|
-
header.set_size(size);
|
|
111
|
-
header.set_mode(0o644);
|
|
112
|
-
header.set_cksum();
|
|
113
|
-
|
|
114
|
-
let file = match File::open(full) {
|
|
115
|
-
Ok(f) => f,
|
|
116
|
-
Err(_) => continue,
|
|
117
|
-
};
|
|
118
|
-
let buf_reader = std::io::BufReader::with_capacity(
|
|
119
|
-
(size as usize).min(4 * 1024 * 1024).max(8192),
|
|
120
|
-
file,
|
|
121
|
-
);
|
|
122
|
-
|
|
123
|
-
tar_builder.append_data(&mut header, &rel_str, buf_reader)
|
|
124
|
-
.map_err(|e| anyhow::anyhow!("tar append {}: {}", rel_str, e))?;
|
|
125
|
-
|
|
126
|
-
file_list.push(serde_json::json!({"name": rel_str, "size": size}));
|
|
127
|
-
|
|
128
|
-
bytes_processed += size;
|
|
129
|
-
if let Some(ref cb) = progress {
|
|
130
|
-
let pct = if total_bytes > 0 {
|
|
131
|
-
(bytes_processed * 89 / total_bytes).min(89)
|
|
132
|
-
} else {
|
|
133
|
-
89
|
|
119
|
+
let mut entry_index = 0usize;
|
|
120
|
+
while entry_index < entries.len() {
|
|
121
|
+
let batch_end = select_parallel_batch_end(&entries, entry_index);
|
|
122
|
+
if batch_end > entry_index + 1 {
|
|
123
|
+
let loaded = load_small_file_batch(&entries[entry_index..batch_end])?;
|
|
124
|
+
for (entry, maybe_bytes) in entries[entry_index..batch_end].iter().zip(loaded.into_iter()) {
|
|
125
|
+
let Some(bytes) = maybe_bytes else {
|
|
126
|
+
continue;
|
|
134
127
|
};
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
128
|
+
|
|
129
|
+
write_pack_entry_header(&mut encoder, &entry.rel_path, entry.size)?;
|
|
130
|
+
encoder.write_all(&bytes)
|
|
131
|
+
.map_err(|e| anyhow::anyhow!("pack write {}: {}", entry.rel_path, e))?;
|
|
132
|
+
|
|
133
|
+
file_list.push(FileListEntry {
|
|
134
|
+
name: entry.rel_path.clone(),
|
|
135
|
+
size: entry.size,
|
|
136
|
+
});
|
|
137
|
+
|
|
138
|
+
bytes_processed += entry.size;
|
|
139
|
+
report_compress_progress(progress, total_bytes, bytes_processed, &mut last_pct);
|
|
139
140
|
}
|
|
141
|
+
entry_index = batch_end;
|
|
142
|
+
continue;
|
|
140
143
|
}
|
|
141
|
-
|
|
144
|
+
|
|
145
|
+
let entry = &entries[entry_index];
|
|
146
|
+
if write_directory_entry(&mut encoder, entry)? {
|
|
147
|
+
file_list.push(FileListEntry {
|
|
148
|
+
name: entry.rel_path.clone(),
|
|
149
|
+
size: entry.size,
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
bytes_processed += entry.size;
|
|
153
|
+
report_compress_progress(progress, total_bytes, bytes_processed, &mut last_pct);
|
|
154
|
+
}
|
|
155
|
+
entry_index += 1;
|
|
142
156
|
}
|
|
143
157
|
|
|
144
158
|
let zst_buf = encoder.finish().map_err(|e| anyhow::anyhow!("zstd finish: {}", e))?;
|
|
159
|
+
let file_list_json = serde_json::to_string(&file_list)?;
|
|
145
160
|
|
|
146
|
-
Ok((zst_buf,
|
|
161
|
+
Ok((zst_buf, file_list_json))
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
fn write_pack_entry_header<W: Write>(writer: &mut W, rel_path: &str, size: u64) -> anyhow::Result<()> {
|
|
165
|
+
let name_bytes = rel_path.as_bytes();
|
|
166
|
+
let name_len = u16::try_from(name_bytes.len())
|
|
167
|
+
.map_err(|_| anyhow::anyhow!("path too long for pack entry: {}", rel_path))?;
|
|
168
|
+
writer.write_all(&name_len.to_be_bytes())?;
|
|
169
|
+
writer.write_all(name_bytes)?;
|
|
170
|
+
writer.write_all(&size.to_be_bytes())?;
|
|
171
|
+
Ok(())
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
fn write_directory_entry<W: Write>(writer: &mut W, entry: &DirectoryFile) -> anyhow::Result<bool> {
|
|
175
|
+
let file = match File::open(&entry.path) {
|
|
176
|
+
Ok(file) => file,
|
|
177
|
+
Err(_) => return Ok(false),
|
|
178
|
+
};
|
|
179
|
+
|
|
180
|
+
write_pack_entry_header(writer, &entry.rel_path, entry.size)?;
|
|
181
|
+
|
|
182
|
+
let mut buf_reader = std::io::BufReader::with_capacity(file_buffer_capacity(entry.size), file);
|
|
183
|
+
std::io::copy(&mut buf_reader, writer)
|
|
184
|
+
.map_err(|e| anyhow::anyhow!("pack write {}: {}", entry.rel_path, e))?;
|
|
185
|
+
|
|
186
|
+
Ok(true)
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
fn load_small_file_batch(entries: &[DirectoryFile]) -> anyhow::Result<Vec<Option<Vec<u8>>>> {
|
|
190
|
+
entries.par_iter().map(load_directory_entry_bytes).collect()
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
fn load_directory_entry_bytes(entry: &DirectoryFile) -> anyhow::Result<Option<Vec<u8>>> {
|
|
194
|
+
let mut file = match File::open(&entry.path) {
|
|
195
|
+
Ok(file) => file,
|
|
196
|
+
Err(_) => return Ok(None),
|
|
197
|
+
};
|
|
198
|
+
|
|
199
|
+
let reserve = usize::try_from(entry.size.min(PARALLEL_IO_BATCH_BYTES)).unwrap_or(MAX_FILE_BUFFER_CAPACITY);
|
|
200
|
+
let mut bytes = Vec::with_capacity(reserve.max(8192));
|
|
201
|
+
file.read_to_end(&mut bytes)
|
|
202
|
+
.map_err(|e| anyhow::anyhow!("pack read {}: {}", entry.rel_path, e))?;
|
|
203
|
+
|
|
204
|
+
Ok(Some(bytes))
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
fn select_parallel_batch_end(entries: &[DirectoryFile], start: usize) -> usize {
|
|
208
|
+
let Some(first) = entries.get(start) else {
|
|
209
|
+
return start;
|
|
210
|
+
};
|
|
211
|
+
if !should_parallelize_entry(first) {
|
|
212
|
+
return start + 1;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
let mut end = start;
|
|
216
|
+
let mut batch_bytes = 0u64;
|
|
217
|
+
while end < entries.len() {
|
|
218
|
+
let entry = &entries[end];
|
|
219
|
+
if !should_parallelize_entry(entry) {
|
|
220
|
+
break;
|
|
221
|
+
}
|
|
222
|
+
if end > start {
|
|
223
|
+
if end - start >= PARALLEL_IO_BATCH_FILES {
|
|
224
|
+
break;
|
|
225
|
+
}
|
|
226
|
+
if batch_bytes.saturating_add(entry.size) > PARALLEL_IO_BATCH_BYTES {
|
|
227
|
+
break;
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
batch_bytes = batch_bytes.saturating_add(entry.size);
|
|
231
|
+
end += 1;
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
if end - start >= PARALLEL_IO_MIN_FILES {
|
|
235
|
+
end
|
|
236
|
+
} else {
|
|
237
|
+
start + 1
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
fn should_parallelize_entry(entry: &DirectoryFile) -> bool {
|
|
242
|
+
entry.size <= PARALLEL_IO_FILE_THRESHOLD
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
fn file_buffer_capacity(size: u64) -> usize {
|
|
246
|
+
usize::try_from(size)
|
|
247
|
+
.unwrap_or(MAX_FILE_BUFFER_CAPACITY)
|
|
248
|
+
.min(MAX_FILE_BUFFER_CAPACITY)
|
|
249
|
+
.max(8192)
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
fn report_compress_progress(
|
|
253
|
+
progress: &Option<ProgressCallback>,
|
|
254
|
+
total_bytes: u64,
|
|
255
|
+
bytes_processed: u64,
|
|
256
|
+
last_pct: &mut u64,
|
|
257
|
+
) {
|
|
258
|
+
if let Some(ref cb) = progress {
|
|
259
|
+
let pct = if total_bytes > 0 {
|
|
260
|
+
(bytes_processed * 89 / total_bytes).min(89)
|
|
261
|
+
} else {
|
|
262
|
+
89
|
|
263
|
+
};
|
|
264
|
+
if pct > *last_pct {
|
|
265
|
+
*last_pct = pct;
|
|
266
|
+
cb(pct, 100, "compressing");
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
fn collect_directory_files(dir_path: &Path) -> CollectedDirectory {
|
|
272
|
+
let mut entries = Vec::new();
|
|
273
|
+
let mut total_bytes = 0u64;
|
|
274
|
+
|
|
275
|
+
for entry in WalkDir::new(dir_path)
|
|
276
|
+
.follow_links(false)
|
|
277
|
+
.into_iter()
|
|
278
|
+
.filter_map(|entry| entry.ok())
|
|
279
|
+
.filter(|entry| entry.file_type().is_file())
|
|
280
|
+
{
|
|
281
|
+
let size = match entry.metadata() {
|
|
282
|
+
Ok(metadata) => metadata.len(),
|
|
283
|
+
Err(_) => continue,
|
|
284
|
+
};
|
|
285
|
+
let path = entry.into_path();
|
|
286
|
+
let rel = path.strip_prefix(dir_path).unwrap_or(path.as_path());
|
|
287
|
+
let rel_path = normalize_rel_path(rel);
|
|
288
|
+
|
|
289
|
+
total_bytes += size;
|
|
290
|
+
entries.push(DirectoryFile {
|
|
291
|
+
path,
|
|
292
|
+
rel_path,
|
|
293
|
+
size,
|
|
294
|
+
});
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
CollectedDirectory {
|
|
298
|
+
entries,
|
|
299
|
+
total_bytes,
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
fn normalize_rel_path(path: &Path) -> String {
|
|
304
|
+
let rel_path = path.to_string_lossy();
|
|
305
|
+
if rel_path.contains('\\') {
|
|
306
|
+
rel_path.replace('\\', "/")
|
|
307
|
+
} else {
|
|
308
|
+
rel_path.into_owned()
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
fn estimate_zst_capacity(total_bytes: u64) -> usize {
|
|
313
|
+
let capped = total_bytes.min(usize::MAX as u64) as usize;
|
|
314
|
+
(capped / 3).max(MIN_ZST_CAPACITY)
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
fn select_zstd_threads(total_bytes: u64) -> u32 {
|
|
318
|
+
let max_threads = num_cpus::get().max(1) as u32;
|
|
319
|
+
if total_bytes <= 32 * MB {
|
|
320
|
+
1
|
|
321
|
+
} else if total_bytes <= 128 * MB {
|
|
322
|
+
max_threads.min(2)
|
|
323
|
+
} else if total_bytes <= 512 * MB {
|
|
324
|
+
max_threads.min(4)
|
|
325
|
+
} else {
|
|
326
|
+
max_threads.min(8)
|
|
327
|
+
}
|
|
147
328
|
}
|
|
148
329
|
|
|
149
330
|
fn write_png_from_zst_mem(
|
|
@@ -167,12 +348,12 @@ fn write_png_from_zst_mem(
|
|
|
167
348
|
|
|
168
349
|
let encrypted_payload_len = enc_header_len + zst_size + hmac_trailer_len;
|
|
169
350
|
|
|
170
|
-
let version =
|
|
351
|
+
let version = HEADER_VERSION_V2;
|
|
171
352
|
let name_bytes = name.map(|n| n.as_bytes()).unwrap_or(&[]);
|
|
172
353
|
let name_len = name_bytes.len().min(255) as u8;
|
|
173
|
-
let payload_len_bytes = (encrypted_payload_len as
|
|
354
|
+
let payload_len_bytes = (encrypted_payload_len as u64).to_be_bytes();
|
|
174
355
|
|
|
175
|
-
let mut meta_header = Vec::with_capacity(1 + 1 + name_len as usize +
|
|
356
|
+
let mut meta_header = Vec::with_capacity(1 + 1 + name_len as usize + 8);
|
|
176
357
|
meta_header.push(version);
|
|
177
358
|
meta_header.push(name_len);
|
|
178
359
|
if name_len > 0 {
|
|
@@ -220,13 +401,6 @@ fn write_png_from_zst_mem(
|
|
|
220
401
|
|
|
221
402
|
let header_bytes = build_header_bytes(&meta_header, &enc_header_bytes);
|
|
222
403
|
|
|
223
|
-
let stride = row_bytes + 1;
|
|
224
|
-
let scanlines_total = height * stride;
|
|
225
|
-
|
|
226
|
-
const MAX_BLOCK: usize = 65535;
|
|
227
|
-
let num_blocks = (scanlines_total + MAX_BLOCK - 1) / MAX_BLOCK;
|
|
228
|
-
let idat_len = 2 + num_blocks * 5 + scanlines_total + 4;
|
|
229
|
-
|
|
230
404
|
let out_file = File::create(output_path)?;
|
|
231
405
|
let buf_capacity = if total_data_bytes > 256 * 1024 * 1024 { 16 * 1024 * 1024 }
|
|
232
406
|
else if total_data_bytes > 16 * 1024 * 1024 { 8 * 1024 * 1024 }
|
|
@@ -240,7 +414,7 @@ fn write_png_from_zst_mem(
|
|
|
240
414
|
ihdr[4..8].copy_from_slice(&(height as u32).to_be_bytes());
|
|
241
415
|
ihdr[8] = 8;
|
|
242
416
|
ihdr[9] = 2;
|
|
243
|
-
|
|
417
|
+
write_png_chunk(&mut w, b"IHDR", &ihdr)?;
|
|
244
418
|
|
|
245
419
|
let mut zst_reader = std::io::Cursor::new(zst_buf);
|
|
246
420
|
|
|
@@ -255,15 +429,14 @@ fn write_png_from_zst_mem(
|
|
|
255
429
|
height,
|
|
256
430
|
row_bytes,
|
|
257
431
|
marker_end_pos,
|
|
258
|
-
idat_len,
|
|
259
432
|
total_data_bytes,
|
|
260
433
|
progress,
|
|
261
434
|
)?;
|
|
262
435
|
|
|
263
436
|
if let Some(fl) = file_list {
|
|
264
|
-
|
|
437
|
+
write_png_chunk(&mut w, b"rXFL", fl.as_bytes())?;
|
|
265
438
|
}
|
|
266
|
-
|
|
439
|
+
write_png_chunk(&mut w, b"IEND", &[])?;
|
|
267
440
|
w.flush()?;
|
|
268
441
|
|
|
269
442
|
Ok(())
|
|
@@ -292,33 +465,32 @@ fn write_idat_streaming<W: Write, R: Read>(
|
|
|
292
465
|
height: usize,
|
|
293
466
|
row_bytes: usize,
|
|
294
467
|
marker_end_pos: usize,
|
|
295
|
-
idat_len: usize,
|
|
296
468
|
total_data_bytes: usize,
|
|
297
469
|
progress: &Option<ProgressCallback>,
|
|
298
470
|
) -> anyhow::Result<()> {
|
|
299
|
-
|
|
300
|
-
w.write_all(b"IDAT")?;
|
|
301
|
-
|
|
302
|
-
let mut crc = crc32fast::Hasher::new();
|
|
303
|
-
crc.update(b"IDAT");
|
|
471
|
+
let mut idat = ChunkedIdatWriter::new(w);
|
|
304
472
|
|
|
305
473
|
let stride = row_bytes + 1;
|
|
306
474
|
let scanlines_total = height * stride;
|
|
307
475
|
|
|
308
476
|
let zlib = [0x78u8, 0x01];
|
|
309
|
-
|
|
310
|
-
crc.update(&zlib);
|
|
477
|
+
idat.write_all(&zlib)?;
|
|
311
478
|
|
|
312
479
|
let fl_chunk_data = file_list_chunk.unwrap_or(&[]);
|
|
313
480
|
let payload_total = header_bytes.len() + zst_size + hmac_trailer_len + fl_chunk_data.len();
|
|
314
481
|
let padding_after = total_data_bytes - payload_total.min(total_data_bytes);
|
|
315
|
-
|
|
316
482
|
let marker_end_bytes = build_marker_end_bytes();
|
|
317
483
|
|
|
318
484
|
let mut flat_pos: usize = 0;
|
|
319
485
|
let mut scanline_pos: usize = 0;
|
|
320
486
|
let mut deflate_block_remaining: usize = 0;
|
|
321
487
|
|
|
488
|
+
let mut adler = simd_adler32::Adler32::new();
|
|
489
|
+
|
|
490
|
+
let buf_size = 1024 * 1024;
|
|
491
|
+
let mut transfer_buf = vec![0u8; buf_size];
|
|
492
|
+
let zero_buf = vec![0u8; buf_size];
|
|
493
|
+
|
|
322
494
|
let mut header_pos: usize = 0;
|
|
323
495
|
let mut zst_remaining = zst_size;
|
|
324
496
|
let mut hmac_pos: usize = 0;
|
|
@@ -327,12 +499,6 @@ fn write_idat_streaming<W: Write, R: Read>(
|
|
|
327
499
|
let mut fl_pos: usize = 0;
|
|
328
500
|
let mut zero_remaining = padding_after;
|
|
329
501
|
|
|
330
|
-
let mut adler = simd_adler32::Adler32::new();
|
|
331
|
-
|
|
332
|
-
let buf_size = 1024 * 1024;
|
|
333
|
-
let mut transfer_buf = vec![0u8; buf_size];
|
|
334
|
-
let zero_buf = vec![0u8; buf_size];
|
|
335
|
-
|
|
336
502
|
let mut last_png_pct: u64 = 89;
|
|
337
503
|
|
|
338
504
|
for row_idx in 0..height {
|
|
@@ -347,14 +513,12 @@ fn write_idat_streaming<W: Write, R: Read>(
|
|
|
347
513
|
!block_size as u8,
|
|
348
514
|
(!(block_size >> 8)) as u8,
|
|
349
515
|
];
|
|
350
|
-
|
|
351
|
-
crc.update(&header);
|
|
516
|
+
idat.write_all(&header)?;
|
|
352
517
|
deflate_block_remaining = block_size;
|
|
353
518
|
}
|
|
354
519
|
|
|
355
520
|
let filter_byte = [0u8];
|
|
356
|
-
|
|
357
|
-
crc.update(&filter_byte);
|
|
521
|
+
idat.write_all(&filter_byte)?;
|
|
358
522
|
adler.write(&filter_byte);
|
|
359
523
|
scanline_pos += 1;
|
|
360
524
|
deflate_block_remaining -= 1;
|
|
@@ -372,8 +536,7 @@ fn write_idat_streaming<W: Write, R: Read>(
|
|
|
372
536
|
!block_size as u8,
|
|
373
537
|
(!(block_size >> 8)) as u8,
|
|
374
538
|
];
|
|
375
|
-
|
|
376
|
-
crc.update(&header);
|
|
539
|
+
idat.write_all(&header)?;
|
|
377
540
|
deflate_block_remaining = block_size;
|
|
378
541
|
}
|
|
379
542
|
|
|
@@ -390,8 +553,7 @@ fn write_idat_streaming<W: Write, R: Read>(
|
|
|
390
553
|
let me_remaining = 9 - me_offset;
|
|
391
554
|
let take = need.min(me_remaining);
|
|
392
555
|
let slice = &marker_end_bytes[me_offset..me_offset + take];
|
|
393
|
-
|
|
394
|
-
crc.update(slice);
|
|
556
|
+
idat.write_all(slice)?;
|
|
395
557
|
adler.write(slice);
|
|
396
558
|
flat_pos += take;
|
|
397
559
|
chunk_written += take;
|
|
@@ -405,8 +567,7 @@ fn write_idat_streaming<W: Write, R: Read>(
|
|
|
405
567
|
let avail = header_bytes.len() - header_pos;
|
|
406
568
|
let take = need.min(avail);
|
|
407
569
|
let slice = &header_bytes[header_pos..header_pos + take];
|
|
408
|
-
|
|
409
|
-
crc.update(slice);
|
|
570
|
+
idat.write_all(slice)?;
|
|
410
571
|
adler.write(slice);
|
|
411
572
|
header_pos += take;
|
|
412
573
|
flat_pos += take;
|
|
@@ -422,8 +583,7 @@ fn write_idat_streaming<W: Write, R: Read>(
|
|
|
422
583
|
if let Some(ref mut enc) = encryptor {
|
|
423
584
|
enc.encrypt_chunk(&mut transfer_buf[..got]);
|
|
424
585
|
}
|
|
425
|
-
|
|
426
|
-
crc.update(&transfer_buf[..got]);
|
|
586
|
+
idat.write_all(&transfer_buf[..got])?;
|
|
427
587
|
adler.write(&transfer_buf[..got]);
|
|
428
588
|
zst_remaining -= got;
|
|
429
589
|
flat_pos += got;
|
|
@@ -441,8 +601,7 @@ fn write_idat_streaming<W: Write, R: Read>(
|
|
|
441
601
|
let avail = hmac_trailer_len - hmac_pos;
|
|
442
602
|
let take = need.min(avail);
|
|
443
603
|
let slice = &hmac_bytes[hmac_pos..hmac_pos + take];
|
|
444
|
-
|
|
445
|
-
crc.update(slice);
|
|
604
|
+
idat.write_all(slice)?;
|
|
446
605
|
adler.write(slice);
|
|
447
606
|
hmac_pos += take;
|
|
448
607
|
flat_pos += take;
|
|
@@ -460,8 +619,7 @@ fn write_idat_streaming<W: Write, R: Read>(
|
|
|
460
619
|
let avail = fl_chunk_data.len() - fl_pos;
|
|
461
620
|
let take = need.min(avail);
|
|
462
621
|
let slice = &fl_chunk_data[fl_pos..fl_pos + take];
|
|
463
|
-
|
|
464
|
-
crc.update(slice);
|
|
622
|
+
idat.write_all(slice)?;
|
|
465
623
|
adler.write(slice);
|
|
466
624
|
fl_pos += take;
|
|
467
625
|
flat_pos += take;
|
|
@@ -477,8 +635,7 @@ fn write_idat_streaming<W: Write, R: Read>(
|
|
|
477
635
|
};
|
|
478
636
|
let take = need.min(zero_remaining).min(buf_size).min(max_before_marker);
|
|
479
637
|
if take == 0 { break; }
|
|
480
|
-
|
|
481
|
-
crc.update(&zero_buf[..take]);
|
|
638
|
+
idat.write_all(&zero_buf[..take])?;
|
|
482
639
|
adler.write(&zero_buf[..take]);
|
|
483
640
|
zero_remaining -= take;
|
|
484
641
|
flat_pos += take;
|
|
@@ -501,11 +658,8 @@ fn write_idat_streaming<W: Write, R: Read>(
|
|
|
501
658
|
|
|
502
659
|
let adler_val = adler.finish();
|
|
503
660
|
let adler_bytes = adler_val.to_be_bytes();
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
w.write_all(&crc.finalize().to_be_bytes())?;
|
|
508
|
-
Ok(())
|
|
661
|
+
idat.write_all(&adler_bytes)?;
|
|
662
|
+
idat.finish()
|
|
509
663
|
}
|
|
510
664
|
|
|
511
665
|
fn build_marker_end_bytes() -> [u8; 9] {
|
|
@@ -518,13 +672,3 @@ fn build_marker_end_bytes() -> [u8; 9] {
|
|
|
518
672
|
buf
|
|
519
673
|
}
|
|
520
674
|
|
|
521
|
-
fn write_chunk_hdr<W: Write>(w: &mut W, chunk_type: &[u8; 4], data: &[u8]) -> anyhow::Result<()> {
|
|
522
|
-
w.write_all(&(data.len() as u32).to_be_bytes())?;
|
|
523
|
-
w.write_all(chunk_type)?;
|
|
524
|
-
w.write_all(data)?;
|
|
525
|
-
let mut h = crc32fast::Hasher::new();
|
|
526
|
-
h.update(chunk_type);
|
|
527
|
-
h.update(data);
|
|
528
|
-
w.write_all(&h.finalize().to_be_bytes())?;
|
|
529
|
-
Ok(())
|
|
530
|
-
}
|
package/package.json
CHANGED
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|