roxify 1.13.8 → 1.13.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/dist/cli.js +1 -8
  2. package/dist/stub-progress.d.ts +4 -4
  3. package/dist/stub-progress.js +4 -4
  4. package/dist/utils/decoder.d.ts +10 -1
  5. package/dist/utils/decoder.js +111 -7
  6. package/dist/utils/ecc.js +0 -1
  7. package/dist/utils/inspection.d.ts +1 -1
  8. package/dist/utils/inspection.js +2 -2
  9. package/dist/utils/robust-audio.js +0 -13
  10. package/dist/utils/robust-image.js +0 -26
  11. package/package.json +12 -29
  12. package/roxify_native-aarch64-apple-darwin.node +0 -0
  13. package/roxify_native-aarch64-pc-windows-msvc.node +0 -0
  14. package/roxify_native-aarch64-unknown-linux-gnu.node +0 -0
  15. package/roxify_native-i686-pc-windows-msvc.node +0 -0
  16. package/roxify_native-i686-unknown-linux-gnu.node +0 -0
  17. package/{dist/rox-macos-universal → roxify_native-universal-apple-darwin.node} +0 -0
  18. package/roxify_native-x86_64-apple-darwin.node +0 -0
  19. package/roxify_native-x86_64-pc-windows-msvc.node +0 -0
  20. package/roxify_native-x86_64-unknown-linux-gnu.node +0 -0
  21. package/scripts/postinstall.cjs +23 -2
  22. package/Cargo.toml +0 -91
  23. package/dist/roxify_native +0 -0
  24. package/dist/roxify_native-macos-arm64 +0 -0
  25. package/dist/roxify_native-macos-x64 +0 -0
  26. package/dist/roxify_native.exe +0 -0
  27. package/native/archive.rs +0 -220
  28. package/native/audio.rs +0 -151
  29. package/native/bench_hybrid.rs +0 -145
  30. package/native/bwt.rs +0 -56
  31. package/native/context_mixing.rs +0 -117
  32. package/native/core.rs +0 -378
  33. package/native/crypto.rs +0 -209
  34. package/native/encoder.rs +0 -405
  35. package/native/hybrid.rs +0 -297
  36. package/native/image_utils.rs +0 -82
  37. package/native/io_advice.rs +0 -43
  38. package/native/io_ntfs_optimized.rs +0 -99
  39. package/native/lib.rs +0 -480
  40. package/native/main.rs +0 -842
  41. package/native/mtf.rs +0 -106
  42. package/native/packer.rs +0 -604
  43. package/native/png_chunk_writer.rs +0 -146
  44. package/native/png_utils.rs +0 -554
  45. package/native/pool.rs +0 -101
  46. package/native/progress.rs +0 -142
  47. package/native/rans.rs +0 -149
  48. package/native/rans_byte.rs +0 -286
  49. package/native/reconstitution.rs +0 -623
  50. package/native/streaming.rs +0 -189
  51. package/native/streaming_decode.rs +0 -625
  52. package/native/streaming_encode.rs +0 -684
  53. package/native/test_small_bwt.rs +0 -31
  54. package/native/test_stages.rs +0 -70
@@ -1,684 +0,0 @@
1
- use std::io::{Write, BufWriter, Read};
2
- use std::fs::File;
3
- use std::path::{Path, PathBuf};
4
- use rayon::prelude::*;
5
- use serde::Serialize;
6
- use walkdir::WalkDir;
7
-
8
- use crate::png_chunk_writer::{ChunkedIdatWriter, write_png_chunk};
9
-
10
- const PNG_HEADER: &[u8] = &[137, 80, 78, 71, 13, 10, 26, 10];
11
- const PIXEL_MAGIC: &[u8] = b"PXL1";
12
- const MARKER_START: [(u8, u8, u8); 3] = [(255, 0, 0), (0, 255, 0), (0, 0, 255)];
13
- const MARKER_END: [(u8, u8, u8); 3] = [(0, 0, 255), (0, 255, 0), (255, 0, 0)];
14
- const MARKER_ZSTD: (u8, u8, u8) = (0, 255, 0);
15
- const MAGIC: &[u8] = b"ROX1";
16
- const PACK_MAGIC: u32 = 0x524f5850;
17
-
18
- const MIN_ZST_CAPACITY: usize = 16 * 1024 * 1024;
19
- const MB: u64 = 1024 * 1024;
20
- const MAX_FILE_BUFFER_CAPACITY: usize = 4 * 1024 * 1024;
21
- const PARALLEL_IO_FILE_THRESHOLD: u64 = MB;
22
- const PARALLEL_IO_BATCH_BYTES: u64 = 128 * MB;
23
- const PARALLEL_IO_BATCH_FILES: usize = 512;
24
- const PARALLEL_IO_MIN_FILES: usize = 8;
25
- const HEADER_VERSION_V2: u8 = 2;
26
-
27
- pub type ProgressCallback = Box<dyn Fn(u64, u64, &str) + Send>;
28
-
29
- struct DirectoryFile {
30
- path: PathBuf,
31
- rel_path: String,
32
- size: u64,
33
- }
34
-
35
- #[derive(Serialize)]
36
- struct FileListEntry {
37
- name: String,
38
- size: u64,
39
- }
40
-
41
- struct CollectedDirectory {
42
- entries: Vec<DirectoryFile>,
43
- total_bytes: u64,
44
- }
45
-
46
- pub fn encode_dir_to_png(
47
- dir_path: &Path,
48
- output_path: &Path,
49
- compression_level: i32,
50
- name: Option<&str>,
51
- ) -> anyhow::Result<()> {
52
- encode_dir_to_png_encrypted(dir_path, output_path, compression_level, name, None, None)
53
- }
54
-
55
- pub fn encode_dir_to_png_encrypted(
56
- dir_path: &Path,
57
- output_path: &Path,
58
- compression_level: i32,
59
- name: Option<&str>,
60
- passphrase: Option<&str>,
61
- encrypt_type: Option<&str>,
62
- ) -> anyhow::Result<()> {
63
- encode_dir_to_png_encrypted_with_progress(dir_path, output_path, compression_level, name, passphrase, encrypt_type, None)
64
- }
65
-
66
- pub fn encode_dir_to_png_encrypted_with_progress(
67
- dir_path: &Path,
68
- output_path: &Path,
69
- compression_level: i32,
70
- name: Option<&str>,
71
- passphrase: Option<&str>,
72
- encrypt_type: Option<&str>,
73
- progress: Option<ProgressCallback>,
74
- ) -> anyhow::Result<()> {
75
- let (zst_buf, file_list_json) = compress_dir_to_zst_mem(dir_path, compression_level, &progress)?;
76
-
77
- let result = write_png_from_zst_mem(
78
- zst_buf, output_path, name, Some(&file_list_json),
79
- passphrase, encrypt_type, &progress,
80
- );
81
-
82
- if let Some(ref cb) = progress {
83
- cb(100, 100, "done");
84
- }
85
-
86
- result
87
- }
88
-
89
- fn compress_dir_to_zst_mem(
90
- dir_path: &Path,
91
- compression_level: i32,
92
- progress: &Option<ProgressCallback>,
93
- ) -> anyhow::Result<(Vec<u8>, String)> {
94
- let collected = collect_directory_files(dir_path);
95
- let total_bytes = collected.total_bytes;
96
- let entries = collected.entries;
97
-
98
- let actual_level = compression_level.min(3);
99
- let mut encoder = zstd::stream::Encoder::new(
100
- Vec::with_capacity(estimate_zst_capacity(total_bytes)),
101
- actual_level,
102
- )
103
- .map_err(|e| anyhow::anyhow!("zstd init: {}", e))?;
104
-
105
- let threads = select_zstd_threads(total_bytes);
106
- if threads > 1 {
107
- let _ = encoder.multithread(threads);
108
- }
109
- let _ = encoder.long_distance_matching(true);
110
- let _ = encoder.window_log(30);
111
-
112
- encoder.write_all(MAGIC)?;
113
- encoder.write_all(&PACK_MAGIC.to_be_bytes())?;
114
- encoder.write_all(&(entries.len() as u32).to_be_bytes())?;
115
-
116
- let mut file_list = Vec::with_capacity(entries.len());
117
- let mut bytes_processed: u64 = 0;
118
- let mut last_pct: u64 = 0;
119
- let mut entry_index = 0usize;
120
- while entry_index < entries.len() {
121
- let batch_end = select_parallel_batch_end(&entries, entry_index);
122
- if batch_end > entry_index + 1 {
123
- let loaded = load_small_file_batch(&entries[entry_index..batch_end])?;
124
- for (entry, maybe_bytes) in entries[entry_index..batch_end].iter().zip(loaded.into_iter()) {
125
- let Some(bytes) = maybe_bytes else {
126
- continue;
127
- };
128
-
129
- write_pack_entry_header(&mut encoder, &entry.rel_path, entry.size)?;
130
- encoder.write_all(&bytes)
131
- .map_err(|e| anyhow::anyhow!("pack write {}: {}", entry.rel_path, e))?;
132
-
133
- file_list.push(FileListEntry {
134
- name: entry.rel_path.clone(),
135
- size: entry.size,
136
- });
137
-
138
- bytes_processed += entry.size;
139
- report_compress_progress(progress, total_bytes, bytes_processed, &mut last_pct);
140
- }
141
- entry_index = batch_end;
142
- continue;
143
- }
144
-
145
- let entry = &entries[entry_index];
146
- if write_directory_entry(&mut encoder, entry)? {
147
- file_list.push(FileListEntry {
148
- name: entry.rel_path.clone(),
149
- size: entry.size,
150
- });
151
-
152
- bytes_processed += entry.size;
153
- report_compress_progress(progress, total_bytes, bytes_processed, &mut last_pct);
154
- }
155
- entry_index += 1;
156
- }
157
-
158
- let zst_buf = encoder.finish().map_err(|e| anyhow::anyhow!("zstd finish: {}", e))?;
159
- let file_list_json = serde_json::to_string(&file_list)?;
160
-
161
- Ok((zst_buf, file_list_json))
162
- }
163
-
164
- fn write_pack_entry_header<W: Write>(writer: &mut W, rel_path: &str, size: u64) -> anyhow::Result<()> {
165
- let name_bytes = rel_path.as_bytes();
166
- let name_len = u16::try_from(name_bytes.len())
167
- .map_err(|_| anyhow::anyhow!("path too long for pack entry: {}", rel_path))?;
168
- writer.write_all(&name_len.to_be_bytes())?;
169
- writer.write_all(name_bytes)?;
170
- writer.write_all(&size.to_be_bytes())?;
171
- Ok(())
172
- }
173
-
174
- fn write_directory_entry<W: Write>(writer: &mut W, entry: &DirectoryFile) -> anyhow::Result<bool> {
175
- let file = match File::open(&entry.path) {
176
- Ok(file) => file,
177
- Err(_) => return Ok(false),
178
- };
179
-
180
- write_pack_entry_header(writer, &entry.rel_path, entry.size)?;
181
-
182
- let mut buf_reader = std::io::BufReader::with_capacity(file_buffer_capacity(entry.size), file);
183
- std::io::copy(&mut buf_reader, writer)
184
- .map_err(|e| anyhow::anyhow!("pack write {}: {}", entry.rel_path, e))?;
185
-
186
- Ok(true)
187
- }
188
-
189
- fn load_small_file_batch(entries: &[DirectoryFile]) -> anyhow::Result<Vec<Option<Vec<u8>>>> {
190
- entries.par_iter().map(load_directory_entry_bytes).collect()
191
- }
192
-
193
- fn load_directory_entry_bytes(entry: &DirectoryFile) -> anyhow::Result<Option<Vec<u8>>> {
194
- let mut file = match File::open(&entry.path) {
195
- Ok(file) => file,
196
- Err(_) => return Ok(None),
197
- };
198
-
199
- let reserve = usize::try_from(entry.size.min(PARALLEL_IO_BATCH_BYTES)).unwrap_or(MAX_FILE_BUFFER_CAPACITY);
200
- let mut bytes = Vec::with_capacity(reserve.max(8192));
201
- file.read_to_end(&mut bytes)
202
- .map_err(|e| anyhow::anyhow!("pack read {}: {}", entry.rel_path, e))?;
203
-
204
- Ok(Some(bytes))
205
- }
206
-
207
- fn select_parallel_batch_end(entries: &[DirectoryFile], start: usize) -> usize {
208
- let Some(first) = entries.get(start) else {
209
- return start;
210
- };
211
- if !should_parallelize_entry(first) {
212
- return start + 1;
213
- }
214
-
215
- let mut end = start;
216
- let mut batch_bytes = 0u64;
217
- while end < entries.len() {
218
- let entry = &entries[end];
219
- if !should_parallelize_entry(entry) {
220
- break;
221
- }
222
- if end > start {
223
- if end - start >= PARALLEL_IO_BATCH_FILES {
224
- break;
225
- }
226
- if batch_bytes.saturating_add(entry.size) > PARALLEL_IO_BATCH_BYTES {
227
- break;
228
- }
229
- }
230
- batch_bytes = batch_bytes.saturating_add(entry.size);
231
- end += 1;
232
- }
233
-
234
- if end - start >= PARALLEL_IO_MIN_FILES {
235
- end
236
- } else {
237
- start + 1
238
- }
239
- }
240
-
241
- fn should_parallelize_entry(entry: &DirectoryFile) -> bool {
242
- entry.size <= PARALLEL_IO_FILE_THRESHOLD
243
- }
244
-
245
- fn file_buffer_capacity(size: u64) -> usize {
246
- usize::try_from(size)
247
- .unwrap_or(MAX_FILE_BUFFER_CAPACITY)
248
- .min(MAX_FILE_BUFFER_CAPACITY)
249
- .max(8192)
250
- }
251
-
252
- fn report_compress_progress(
253
- progress: &Option<ProgressCallback>,
254
- total_bytes: u64,
255
- bytes_processed: u64,
256
- last_pct: &mut u64,
257
- ) {
258
- if let Some(ref cb) = progress {
259
- let pct = if total_bytes > 0 {
260
- (bytes_processed * 89 / total_bytes).min(89)
261
- } else {
262
- 89
263
- };
264
- if pct > *last_pct {
265
- *last_pct = pct;
266
- cb(pct, 100, "compressing");
267
- }
268
- }
269
- }
270
-
271
- fn collect_directory_files(dir_path: &Path) -> CollectedDirectory {
272
- let mut entries = Vec::new();
273
- let mut total_bytes = 0u64;
274
-
275
- for entry in WalkDir::new(dir_path)
276
- .follow_links(false)
277
- .into_iter()
278
- .filter_map(|entry| entry.ok())
279
- .filter(|entry| entry.file_type().is_file())
280
- {
281
- let size = match entry.metadata() {
282
- Ok(metadata) => metadata.len(),
283
- Err(_) => continue,
284
- };
285
- let path = entry.into_path();
286
- let rel = path.strip_prefix(dir_path).unwrap_or(path.as_path());
287
- let rel_path = normalize_rel_path(rel);
288
-
289
- total_bytes += size;
290
- entries.push(DirectoryFile {
291
- path,
292
- rel_path,
293
- size,
294
- });
295
- }
296
-
297
- CollectedDirectory {
298
- entries,
299
- total_bytes,
300
- }
301
- }
302
-
303
- fn normalize_rel_path(path: &Path) -> String {
304
- let rel_path = path.to_string_lossy();
305
- if rel_path.contains('\\') {
306
- rel_path.replace('\\', "/")
307
- } else {
308
- rel_path.into_owned()
309
- }
310
- }
311
-
312
- fn estimate_zst_capacity(total_bytes: u64) -> usize {
313
- let capped = total_bytes.min(usize::MAX as u64) as usize;
314
- (capped / 3).max(MIN_ZST_CAPACITY)
315
- }
316
-
317
- fn select_zstd_threads(total_bytes: u64) -> u32 {
318
- let max_threads = num_cpus::get().max(1) as u32;
319
- let ram_mb = crate::parse_linux_mem_available_mb().unwrap_or(4096);
320
-
321
- // Aggressive multi-threading for Pyxelze speed target (<10s)
322
- if total_bytes <= 16 * MB {
323
- // Small files: single thread to avoid overhead
324
- 1
325
- } else if total_bytes <= 64 * MB {
326
- // Small-medium files: 2 threads
327
- max_threads.min(2)
328
- } else if total_bytes <= 256 * MB || ram_mb >= 8192 {
329
- // Medium files or high RAM: up to 4 threads
330
- max_threads.min(4)
331
- } else if total_bytes <= 1024 * MB || ram_mb >= 4096 {
332
- // Large files or medium RAM: up to 8 threads
333
- max_threads.min(8)
334
- } else {
335
- // Very large files: use all available cores up to 16
336
- max_threads.min(16)
337
- }
338
- }
339
-
340
- fn write_png_from_zst_mem(
341
- zst_buf: Vec<u8>,
342
- output_path: &Path,
343
- name: Option<&str>,
344
- file_list: Option<&str>,
345
- passphrase: Option<&str>,
346
- _encrypt_type: Option<&str>,
347
- progress: &Option<ProgressCallback>,
348
- ) -> anyhow::Result<()> {
349
- let zst_size = zst_buf.len();
350
-
351
- let mut encryptor = match passphrase {
352
- Some(pass) if !pass.is_empty() => Some(crate::crypto::StreamingEncryptor::new(pass)?),
353
- _ => None,
354
- };
355
-
356
- let enc_header_len = encryptor.as_ref().map(|e| e.header_len()).unwrap_or(1);
357
- let hmac_trailer_len: usize = if encryptor.is_some() { 32 } else { 0 };
358
-
359
- let encrypted_payload_len = enc_header_len + zst_size + hmac_trailer_len;
360
-
361
- let version = HEADER_VERSION_V2;
362
- let name_bytes = name.map(|n| n.as_bytes()).unwrap_or(&[]);
363
- let name_len = name_bytes.len().min(255) as u8;
364
- let payload_len_bytes = (encrypted_payload_len as u64).to_be_bytes();
365
-
366
- let mut meta_header = Vec::with_capacity(1 + 1 + name_len as usize + 8);
367
- meta_header.push(version);
368
- meta_header.push(name_len);
369
- if name_len > 0 {
370
- meta_header.extend_from_slice(&name_bytes[..name_len as usize]);
371
- }
372
- meta_header.extend_from_slice(&payload_len_bytes);
373
-
374
- let meta_header_len = meta_header.len();
375
-
376
- let file_list_chunk = file_list.map(|fl| {
377
- let json_bytes = fl.as_bytes();
378
- let mut chunk = Vec::with_capacity(4 + 4 + json_bytes.len());
379
- chunk.extend_from_slice(b"rXFL");
380
- chunk.extend_from_slice(&(json_bytes.len() as u32).to_be_bytes());
381
- chunk.extend_from_slice(json_bytes);
382
- chunk
383
- });
384
- let file_list_inline_len = file_list_chunk.as_ref().map(|c| c.len()).unwrap_or(0);
385
-
386
- let total_meta_pixel_len = meta_header_len + encrypted_payload_len + file_list_inline_len;
387
- let raw_payload_len = PIXEL_MAGIC.len() + total_meta_pixel_len;
388
- let padding_needed = (3 - (raw_payload_len % 3)) % 3;
389
- let padded_len = raw_payload_len + padding_needed;
390
-
391
- let marker_start_len = 12;
392
- let marker_end_bytes = 9;
393
- let data_with_markers_len = marker_start_len + padded_len;
394
- let data_pixels = (data_with_markers_len + 2) / 3;
395
- let end_marker_pixels = 3;
396
- let total_pixels = data_pixels + end_marker_pixels;
397
-
398
- let side = (total_pixels as f64).sqrt().ceil() as usize;
399
- let side = side.max(end_marker_pixels);
400
- let width = side;
401
- let height = side;
402
- let row_bytes = width * 3;
403
- let total_data_bytes = width * height * 3;
404
- let marker_end_pos = total_data_bytes - marker_end_bytes;
405
-
406
- let enc_header_bytes = if let Some(ref enc) = encryptor {
407
- enc.header.clone()
408
- } else {
409
- vec![0x00]
410
- };
411
-
412
- let header_bytes = build_header_bytes(&meta_header, &enc_header_bytes);
413
-
414
- let out_file = File::create(output_path)?;
415
- let buf_capacity = if total_data_bytes > 256 * 1024 * 1024 { 16 * 1024 * 1024 }
416
- else if total_data_bytes > 16 * 1024 * 1024 { 8 * 1024 * 1024 }
417
- else { (total_data_bytes / 2).max(65536).min(4 * 1024 * 1024) };
418
- let mut w = BufWriter::with_capacity(buf_capacity, out_file);
419
-
420
- w.write_all(PNG_HEADER)?;
421
-
422
- let mut ihdr = [0u8; 13];
423
- ihdr[0..4].copy_from_slice(&(width as u32).to_be_bytes());
424
- ihdr[4..8].copy_from_slice(&(height as u32).to_be_bytes());
425
- ihdr[8] = 8;
426
- ihdr[9] = 2;
427
- write_png_chunk(&mut w, b"IHDR", &ihdr)?;
428
-
429
- let mut zst_reader = std::io::Cursor::new(zst_buf);
430
-
431
- write_idat_streaming(
432
- &mut w,
433
- &header_bytes,
434
- &mut zst_reader,
435
- zst_size,
436
- file_list_chunk.as_deref(),
437
- &mut encryptor,
438
- hmac_trailer_len,
439
- height,
440
- row_bytes,
441
- marker_end_pos,
442
- total_data_bytes,
443
- progress,
444
- )?;
445
-
446
- if let Some(fl) = file_list {
447
- write_png_chunk(&mut w, b"rXFL", fl.as_bytes())?;
448
- }
449
- write_png_chunk(&mut w, b"IEND", &[])?;
450
- w.flush()?;
451
-
452
- Ok(())
453
- }
454
-
455
- fn build_header_bytes(meta_header: &[u8], enc_header: &[u8]) -> Vec<u8> {
456
- let mut header = Vec::with_capacity(12 + PIXEL_MAGIC.len() + meta_header.len() + enc_header.len());
457
- for m in &MARKER_START {
458
- header.push(m.0); header.push(m.1); header.push(m.2);
459
- }
460
- header.push(MARKER_ZSTD.0); header.push(MARKER_ZSTD.1); header.push(MARKER_ZSTD.2);
461
- header.extend_from_slice(PIXEL_MAGIC);
462
- header.extend_from_slice(meta_header);
463
- header.extend_from_slice(enc_header);
464
- header
465
- }
466
-
467
- fn write_idat_streaming<W: Write, R: Read>(
468
- w: &mut W,
469
- header_bytes: &[u8],
470
- zst_reader: &mut R,
471
- zst_size: usize,
472
- file_list_chunk: Option<&[u8]>,
473
- encryptor: &mut Option<crate::crypto::StreamingEncryptor>,
474
- hmac_trailer_len: usize,
475
- height: usize,
476
- row_bytes: usize,
477
- marker_end_pos: usize,
478
- total_data_bytes: usize,
479
- progress: &Option<ProgressCallback>,
480
- ) -> anyhow::Result<()> {
481
- let mut idat = ChunkedIdatWriter::new(w);
482
-
483
- let stride = row_bytes + 1;
484
- let scanlines_total = height * stride;
485
-
486
- let zlib = [0x78u8, 0x01];
487
- idat.write_all(&zlib)?;
488
-
489
- let fl_chunk_data = file_list_chunk.unwrap_or(&[]);
490
- let payload_total = header_bytes.len() + zst_size + hmac_trailer_len + fl_chunk_data.len();
491
- let padding_after = total_data_bytes - payload_total.min(total_data_bytes);
492
- let marker_end_bytes = build_marker_end_bytes();
493
-
494
- let mut flat_pos: usize = 0;
495
- let mut scanline_pos: usize = 0;
496
- let mut deflate_block_remaining: usize = 0;
497
-
498
- let mut adler = simd_adler32::Adler32::new();
499
-
500
- let buf_size = 1024 * 1024;
501
- let mut transfer_buf = vec![0u8; buf_size];
502
- let zero_buf = vec![0u8; buf_size];
503
-
504
- let mut header_pos: usize = 0;
505
- let mut zst_remaining = zst_size;
506
- let mut hmac_pos: usize = 0;
507
- let mut hmac_written = hmac_trailer_len == 0;
508
- let mut hmac_finalized: Option<[u8; 32]> = None;
509
- let mut fl_pos: usize = 0;
510
- let mut zero_remaining = padding_after;
511
-
512
- let mut last_png_pct: u64 = 89;
513
-
514
- for row_idx in 0..height {
515
- if deflate_block_remaining == 0 {
516
- let remaining_scanlines = scanlines_total - scanline_pos;
517
- let block_size = remaining_scanlines.min(65535);
518
- let is_last = scanline_pos + block_size >= scanlines_total;
519
- let header = [
520
- if is_last { 0x01 } else { 0x00 },
521
- block_size as u8,
522
- (block_size >> 8) as u8,
523
- !block_size as u8,
524
- (!(block_size >> 8)) as u8,
525
- ];
526
- idat.write_all(&header)?;
527
- deflate_block_remaining = block_size;
528
- }
529
-
530
- let filter_byte = [0u8];
531
- idat.write_all(&filter_byte)?;
532
- adler.write(&filter_byte);
533
- scanline_pos += 1;
534
- deflate_block_remaining -= 1;
535
-
536
- let mut cols_written = 0;
537
- while cols_written < row_bytes {
538
- if deflate_block_remaining == 0 {
539
- let remaining_scanlines = scanlines_total - scanline_pos;
540
- let block_size = remaining_scanlines.min(65535);
541
- let is_last = scanline_pos + block_size >= scanlines_total;
542
- let header = [
543
- if is_last { 0x01 } else { 0x00 },
544
- block_size as u8,
545
- (block_size >> 8) as u8,
546
- !block_size as u8,
547
- (!(block_size >> 8)) as u8,
548
- ];
549
- idat.write_all(&header)?;
550
- deflate_block_remaining = block_size;
551
- }
552
-
553
- let can_write = (row_bytes - cols_written).min(deflate_block_remaining);
554
-
555
- let mut chunk_written = 0;
556
- while chunk_written < can_write {
557
- let need = can_write - chunk_written;
558
-
559
- let is_marker_end_region = flat_pos >= marker_end_pos && flat_pos < marker_end_pos + 9;
560
-
561
- if is_marker_end_region {
562
- let me_offset = flat_pos - marker_end_pos;
563
- let me_remaining = 9 - me_offset;
564
- let take = need.min(me_remaining);
565
- let slice = &marker_end_bytes[me_offset..me_offset + take];
566
- idat.write_all(slice)?;
567
- adler.write(slice);
568
- flat_pos += take;
569
- chunk_written += take;
570
- scanline_pos += take;
571
- deflate_block_remaining -= take;
572
- cols_written += take;
573
- continue;
574
- }
575
-
576
- if header_pos < header_bytes.len() {
577
- let avail = header_bytes.len() - header_pos;
578
- let take = need.min(avail);
579
- let slice = &header_bytes[header_pos..header_pos + take];
580
- idat.write_all(slice)?;
581
- adler.write(slice);
582
- header_pos += take;
583
- flat_pos += take;
584
- chunk_written += take;
585
- scanline_pos += take;
586
- deflate_block_remaining -= take;
587
- cols_written += take;
588
- } else if zst_remaining > 0 {
589
- let take = need.min(zst_remaining).min(buf_size);
590
- let got = zst_reader.read(&mut transfer_buf[..take])
591
- .map_err(|e| anyhow::anyhow!("read zst: {}", e))?;
592
- if got == 0 { break; }
593
- if let Some(ref mut enc) = encryptor {
594
- enc.encrypt_chunk(&mut transfer_buf[..got]);
595
- }
596
- idat.write_all(&transfer_buf[..got])?;
597
- adler.write(&transfer_buf[..got]);
598
- zst_remaining -= got;
599
- flat_pos += got;
600
- chunk_written += got;
601
- scanline_pos += got;
602
- deflate_block_remaining -= got;
603
- cols_written += got;
604
- } else if !hmac_written {
605
- if hmac_finalized.is_none() {
606
- if let Some(enc) = encryptor.take() {
607
- hmac_finalized = Some(enc.finalize_hmac());
608
- }
609
- }
610
- if let Some(ref hmac_bytes) = hmac_finalized {
611
- let avail = hmac_trailer_len - hmac_pos;
612
- let take = need.min(avail);
613
- let slice = &hmac_bytes[hmac_pos..hmac_pos + take];
614
- idat.write_all(slice)?;
615
- adler.write(slice);
616
- hmac_pos += take;
617
- flat_pos += take;
618
- chunk_written += take;
619
- scanline_pos += take;
620
- deflate_block_remaining -= take;
621
- cols_written += take;
622
- if hmac_pos >= hmac_trailer_len {
623
- hmac_written = true;
624
- }
625
- } else {
626
- hmac_written = true;
627
- }
628
- } else if fl_pos < fl_chunk_data.len() {
629
- let avail = fl_chunk_data.len() - fl_pos;
630
- let take = need.min(avail);
631
- let slice = &fl_chunk_data[fl_pos..fl_pos + take];
632
- idat.write_all(slice)?;
633
- adler.write(slice);
634
- fl_pos += take;
635
- flat_pos += take;
636
- chunk_written += take;
637
- scanline_pos += take;
638
- deflate_block_remaining -= take;
639
- cols_written += take;
640
- } else {
641
- let max_before_marker = if flat_pos < marker_end_pos {
642
- marker_end_pos - flat_pos
643
- } else {
644
- need
645
- };
646
- let take = need.min(zero_remaining).min(buf_size).min(max_before_marker);
647
- if take == 0 { break; }
648
- idat.write_all(&zero_buf[..take])?;
649
- adler.write(&zero_buf[..take]);
650
- zero_remaining -= take;
651
- flat_pos += take;
652
- chunk_written += take;
653
- scanline_pos += take;
654
- deflate_block_remaining -= take;
655
- cols_written += take;
656
- }
657
- }
658
- }
659
-
660
- if let Some(ref cb) = progress {
661
- let pct = 90 + ((row_idx as u64 + 1) * 9 / height as u64).min(9);
662
- if pct > last_png_pct {
663
- last_png_pct = pct;
664
- cb(pct, 100, "writing_png");
665
- }
666
- }
667
- }
668
-
669
- let adler_val = adler.finish();
670
- let adler_bytes = adler_val.to_be_bytes();
671
- idat.write_all(&adler_bytes)?;
672
- idat.finish()
673
- }
674
-
675
- fn build_marker_end_bytes() -> [u8; 9] {
676
- let mut buf = [0u8; 9];
677
- for (i, m) in MARKER_END.iter().enumerate() {
678
- buf[i * 3] = m.0;
679
- buf[i * 3 + 1] = m.1;
680
- buf[i * 3 + 2] = m.2;
681
- }
682
- buf
683
- }
684
-
@@ -1,31 +0,0 @@
1
- mod bwt;
2
- mod mtf;
3
- mod rans_byte;
4
- mod context_mixing;
5
- mod pool;
6
- mod hybrid;
7
-
8
- fn main() {
9
- println!("Testing small inputs through full pipeline...");
10
-
11
- for size in [1, 2, 3, 4, 5, 6, 10, 100] {
12
- let data: Vec<u8> = (0..size).map(|i| (i % 256) as u8).collect();
13
- print!("Size {}: ", size);
14
-
15
- match hybrid::compress_high_performance(&data) {
16
- Ok((compressed, _stats)) => {
17
- match hybrid::decompress_high_performance(&compressed) {
18
- Ok(decompressed) => {
19
- if decompressed == data {
20
- println!("OK (compressed {} -> {} bytes)", size, compressed.len());
21
- } else {
22
- println!("MISMATCH!");
23
- }
24
- }
25
- Err(e) => println!("DECOMPRESS ERROR: {}", e),
26
- }
27
- }
28
- Err(e) => println!("COMPRESS ERROR: {}", e),
29
- }
30
- }
31
- }