roxify 1.13.8 → 1.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/dist/cli.js +23 -21
  2. package/dist/stub-progress.d.ts +4 -4
  3. package/dist/stub-progress.js +4 -4
  4. package/dist/utils/decoder.d.ts +10 -1
  5. package/dist/utils/decoder.js +111 -7
  6. package/dist/utils/ecc.js +0 -1
  7. package/dist/utils/inspection.d.ts +1 -1
  8. package/dist/utils/inspection.js +2 -2
  9. package/dist/utils/robust-audio.js +0 -13
  10. package/dist/utils/robust-image.js +0 -26
  11. package/package.json +12 -29
  12. package/roxify_native-aarch64-apple-darwin.node +0 -0
  13. package/roxify_native-aarch64-pc-windows-msvc.node +0 -0
  14. package/roxify_native-aarch64-unknown-linux-gnu.node +0 -0
  15. package/roxify_native-i686-pc-windows-msvc.node +0 -0
  16. package/roxify_native-i686-unknown-linux-gnu.node +0 -0
  17. package/{dist/rox-macos-universal → roxify_native-universal-apple-darwin.node} +0 -0
  18. package/roxify_native-x86_64-apple-darwin.node +0 -0
  19. package/roxify_native-x86_64-pc-windows-msvc.node +0 -0
  20. package/roxify_native-x86_64-unknown-linux-gnu.node +0 -0
  21. package/scripts/postinstall.cjs +23 -2
  22. package/Cargo.toml +0 -91
  23. package/dist/roxify_native +0 -0
  24. package/dist/roxify_native-macos-arm64 +0 -0
  25. package/dist/roxify_native-macos-x64 +0 -0
  26. package/dist/roxify_native.exe +0 -0
  27. package/native/archive.rs +0 -220
  28. package/native/audio.rs +0 -151
  29. package/native/bench_hybrid.rs +0 -145
  30. package/native/bwt.rs +0 -56
  31. package/native/context_mixing.rs +0 -117
  32. package/native/core.rs +0 -378
  33. package/native/crypto.rs +0 -209
  34. package/native/encoder.rs +0 -405
  35. package/native/hybrid.rs +0 -297
  36. package/native/image_utils.rs +0 -82
  37. package/native/io_advice.rs +0 -43
  38. package/native/io_ntfs_optimized.rs +0 -99
  39. package/native/lib.rs +0 -480
  40. package/native/main.rs +0 -842
  41. package/native/mtf.rs +0 -106
  42. package/native/packer.rs +0 -604
  43. package/native/png_chunk_writer.rs +0 -146
  44. package/native/png_utils.rs +0 -554
  45. package/native/pool.rs +0 -101
  46. package/native/progress.rs +0 -142
  47. package/native/rans.rs +0 -149
  48. package/native/rans_byte.rs +0 -286
  49. package/native/reconstitution.rs +0 -623
  50. package/native/streaming.rs +0 -189
  51. package/native/streaming_decode.rs +0 -625
  52. package/native/streaming_encode.rs +0 -684
  53. package/native/test_small_bwt.rs +0 -31
  54. package/native/test_stages.rs +0 -70
package/native/mtf.rs DELETED
@@ -1,106 +0,0 @@
1
- pub fn mtf_encode(data: &[u8]) -> Vec<u8> {
2
- let mut table = [0u8; 256];
3
- for i in 0..256 {
4
- table[i] = i as u8;
5
- }
6
- let mut output = Vec::with_capacity(data.len());
7
-
8
- for &byte in data {
9
- let pos = table.iter().position(|&b| b == byte).unwrap();
10
- output.push(pos as u8);
11
- if pos > 0 {
12
- let val = table[pos];
13
- table.copy_within(0..pos, 1);
14
- table[0] = val;
15
- }
16
- }
17
-
18
- output
19
- }
20
-
21
- pub fn mtf_decode(data: &[u8]) -> Vec<u8> {
22
- let mut table = [0u8; 256];
23
- for i in 0..256 {
24
- table[i] = i as u8;
25
- }
26
- let mut output = Vec::with_capacity(data.len());
27
-
28
- for &idx in data {
29
- let pos = idx as usize;
30
- let byte = table[pos];
31
- output.push(byte);
32
- if pos > 0 {
33
- let val = table[pos];
34
- table.copy_within(0..pos, 1);
35
- table[0] = val;
36
- }
37
- }
38
-
39
- output
40
- }
41
-
42
- pub fn rle0_encode(data: &[u8]) -> Vec<u8> {
43
- let mut output = Vec::with_capacity(data.len());
44
- let mut i = 0;
45
-
46
- while i < data.len() {
47
- if data[i] == 0 {
48
- let mut run = 0u32;
49
- while i < data.len() && data[i] == 0 {
50
- run += 1;
51
- i += 1;
52
- }
53
- output.push(0);
54
- if run <= 127 {
55
- output.push(run as u8);
56
- } else if run <= 16383 {
57
- output.push(0x80 | ((run >> 8) as u8));
58
- output.push((run & 0xFF) as u8);
59
- } else {
60
- output.push(0xC0 | ((run >> 16) as u8));
61
- output.push(((run >> 8) & 0xFF) as u8);
62
- output.push((run & 0xFF) as u8);
63
- }
64
- } else {
65
- output.push(data[i]);
66
- i += 1;
67
- }
68
- }
69
-
70
- output
71
- }
72
-
73
- pub fn rle0_decode(data: &[u8]) -> Vec<u8> {
74
- let mut output = Vec::with_capacity(data.len() * 2);
75
- let mut i = 0;
76
-
77
- while i < data.len() {
78
- if data[i] == 0 {
79
- i += 1;
80
- if i >= data.len() {
81
- break;
82
- }
83
- let run;
84
- if data[i] & 0xC0 == 0xC0 {
85
- let hi = (data[i] & 0x3F) as u32;
86
- run = (hi << 16) | ((data[i + 1] as u32) << 8) | (data[i + 2] as u32);
87
- i += 3;
88
- } else if data[i] & 0x80 != 0 {
89
- let hi = (data[i] & 0x7F) as u32;
90
- run = (hi << 8) | (data[i + 1] as u32);
91
- i += 2;
92
- } else {
93
- run = data[i] as u32;
94
- i += 1;
95
- }
96
- for _ in 0..run {
97
- output.push(0);
98
- }
99
- } else {
100
- output.push(data[i]);
101
- i += 1;
102
- }
103
- }
104
-
105
- output
106
- }
package/native/packer.rs DELETED
@@ -1,604 +0,0 @@
1
- use anyhow::Result;
2
- use std::fs;
3
- use std::path::{Path, PathBuf};
4
- use walkdir::WalkDir;
5
- use rayon::prelude::*;
6
- use serde_json::json;
7
-
8
- pub struct PackResult {
9
- pub data: Vec<u8>,
10
- pub file_list_json: Option<String>,
11
- }
12
-
13
- pub fn pack_directory(dir_path: &Path, base_dir: Option<&Path>) -> Result<Vec<u8>> {
14
- let base = base_dir.unwrap_or(dir_path);
15
-
16
- let files: Vec<PathBuf> = WalkDir::new(dir_path)
17
- .follow_links(false)
18
- .into_iter()
19
- .filter_map(|e| e.ok())
20
- .filter(|e| e.file_type().is_file())
21
- .map(|e| e.path().to_path_buf())
22
- .collect();
23
-
24
- let file_data: Vec<(String, Vec<u8>)> = files
25
- .par_iter()
26
- .filter_map(|file_path| {
27
- let rel_path = file_path.strip_prefix(base)
28
- .unwrap_or(file_path.as_path())
29
- .to_string_lossy()
30
- .replace('\\', "/");
31
-
32
- match fs::read(file_path) {
33
- Ok(content) => Some((rel_path, content)),
34
- Err(e) => {
35
- eprintln!("⚠️ Erreur lecture {}: {}", rel_path, e);
36
- None
37
- }
38
- }
39
- })
40
- .collect();
41
-
42
-
43
- let total_size: usize = file_data.par_iter().map(|(path, content)| path.len() + content.len() + 10).sum();
44
- let mut result = Vec::with_capacity(8 + total_size);
45
-
46
- result.extend_from_slice(&0x524f5850u32.to_be_bytes());
47
- result.extend_from_slice(&(file_data.len() as u32).to_be_bytes());
48
-
49
- for (rel_path, content) in file_data {
50
- let name_bytes = rel_path.as_bytes();
51
- let name_len = (name_bytes.len() as u16).to_be_bytes();
52
- let size = (content.len() as u64).to_be_bytes();
53
-
54
- result.extend_from_slice(&name_len);
55
- result.extend_from_slice(name_bytes);
56
- result.extend_from_slice(&size);
57
- result.extend_from_slice(&content);
58
- }
59
-
60
- Ok(result)
61
- }
62
-
63
- pub fn pack_path(path: &Path) -> Result<Vec<u8>> {
64
- if path.is_file() {
65
- fs::read(path).map_err(Into::into)
66
- } else if path.is_dir() {
67
- pack_directory(path, Some(path))
68
- } else {
69
- Err(anyhow::anyhow!("Path is neither file nor directory"))
70
- }
71
- }
72
-
73
- pub fn pack_path_with_metadata(path: &Path) -> Result<PackResult> {
74
- if path.is_file() {
75
- let data = fs::read(path)?;
76
- let size = data.len();
77
- let name = path.file_name()
78
- .and_then(|n| n.to_str())
79
- .unwrap_or("file");
80
-
81
- let file_list = json!([{"name": name, "size": size}]);
82
- Ok(PackResult {
83
- data,
84
- file_list_json: Some(file_list.to_string()),
85
- })
86
- } else if path.is_dir() {
87
- let base = path;
88
- let files: Vec<PathBuf> = WalkDir::new(path)
89
- .follow_links(false)
90
- .into_iter()
91
- .filter_map(|e| e.ok())
92
- .filter(|e| e.file_type().is_file())
93
- .map(|e| e.path().to_path_buf())
94
- .collect();
95
-
96
- let file_data: Vec<(String, Vec<u8>)> = files
97
- .par_iter()
98
- .filter_map(|file_path| {
99
- let rel_path = file_path.strip_prefix(base)
100
- .unwrap_or(file_path.as_path())
101
- .to_string_lossy()
102
- .replace('\\', "/");
103
-
104
- match fs::read(file_path) {
105
- Ok(content) => Some((rel_path, content)),
106
- Err(e) => {
107
- eprintln!("⚠️ Erreur lecture {}: {}", rel_path, e);
108
- None
109
- }
110
- }
111
- })
112
- .collect();
113
-
114
- let file_list: Vec<_> = file_data.iter()
115
- .map(|(name, content)| json!({"name": name, "size": content.len()}))
116
- .collect();
117
-
118
- let total_size: usize = file_data.par_iter()
119
- .map(|(path, content)| path.len() + content.len() + 10)
120
- .sum();
121
-
122
- let mut result = Vec::with_capacity(8 + total_size);
123
- result.extend_from_slice(&0x524f5850u32.to_be_bytes());
124
- result.extend_from_slice(&(file_data.len() as u32).to_be_bytes());
125
-
126
- for (rel_path, content) in file_data {
127
- let name_bytes = rel_path.as_bytes();
128
- let name_len = (name_bytes.len() as u16).to_be_bytes();
129
- let size = (content.len() as u64).to_be_bytes();
130
-
131
- result.extend_from_slice(&name_len);
132
- result.extend_from_slice(name_bytes);
133
- result.extend_from_slice(&size);
134
- result.extend_from_slice(&content);
135
- }
136
-
137
- Ok(PackResult {
138
- data: result,
139
- file_list_json: Some(serde_json::to_string(&file_list)?),
140
- })
141
-
142
- } else {
143
- Err(anyhow::anyhow!("Path is neither file nor directory"))
144
- }
145
- }
146
-
147
- pub fn unpack_buffer_to_dir(buf: &[u8], out_dir: &Path, files_opt: Option<&[String]>) -> Result<Vec<String>> {
148
-
149
- use std::convert::TryInto;
150
- let mut written = Vec::new();
151
- let mut pos = 0usize;
152
-
153
- if buf.len() < 8 { return Err(anyhow::anyhow!("Buffer too small")); }
154
- let magic = u32::from_be_bytes(buf[0..4].try_into().unwrap());
155
-
156
- if magic == 0x524f5849u32 {
157
- let index_len = u32::from_be_bytes(buf[4..8].try_into().unwrap()) as usize;
158
- pos = 8 + index_len;
159
- return unpack_entries_sequential(buf, pos, out_dir, files_opt);
160
- }
161
-
162
- if magic != 0x524f5850u32 { return Err(anyhow::anyhow!("Invalid pack magic")); }
163
- pos += 4;
164
- let file_count = u32::from_be_bytes(buf[pos..pos+4].try_into().unwrap()) as usize; pos += 4;
165
-
166
- let files_filter: Option<std::collections::HashSet<String>> = files_opt.map(|l| l.iter().map(|s| s.clone()).collect());
167
-
168
- for _ in 0..file_count {
169
- if pos + 2 > buf.len() { return Err(anyhow::anyhow!("Truncated pack (name len)")); }
170
- let name_len = u16::from_be_bytes(buf[pos..pos+2].try_into().unwrap()) as usize; pos += 2;
171
- if pos + name_len > buf.len() { return Err(anyhow::anyhow!("Truncated pack (name)")); }
172
- let name = String::from_utf8_lossy(&buf[pos..pos+name_len]).to_string(); pos += name_len;
173
- if pos + 8 > buf.len() { return Err(anyhow::anyhow!("Truncated pack (size)")); }
174
- let size = u64::from_be_bytes(buf[pos..pos+8].try_into().unwrap()) as usize; pos += 8;
175
- if pos + size > buf.len() { return Err(anyhow::anyhow!("Truncated pack (content)")); }
176
-
177
- let should_write = match &files_filter {
178
- Some(set) => set.contains(&name),
179
- None => true,
180
- };
181
-
182
- if should_write {
183
- let content = &buf[pos..pos+size];
184
- let p = Path::new(&name);
185
- let mut safe = std::path::PathBuf::new();
186
- for comp in p.components() {
187
- if let std::path::Component::Normal(osstr) = comp {
188
- safe.push(osstr);
189
- }
190
- }
191
- let dest = out_dir.join(&safe);
192
- if let Some(parent) = dest.parent() {
193
- std::fs::create_dir_all(parent).map_err(|e| anyhow::anyhow!("Cannot create parent dir {:?}: {}", parent, e))?;
194
- }
195
- std::fs::write(&dest, content).map_err(|e| anyhow::anyhow!("Cannot write {:?}: {}", dest, e))?;
196
- written.push(safe.to_string_lossy().to_string());
197
- }
198
-
199
- pos += size;
200
- }
201
-
202
- Ok(written)
203
- }
204
-
205
- fn unpack_entries_sequential(buf: &[u8], start: usize, out_dir: &Path, files_opt: Option<&[String]>) -> Result<Vec<String>> {
206
- let mut written = Vec::new();
207
- let mut pos = start;
208
- let files_filter: Option<std::collections::HashSet<String>> = files_opt.map(|l| l.iter().map(|s| s.clone()).collect());
209
-
210
- while pos + 2 < buf.len() {
211
- let magic = u32::from_be_bytes(buf[pos..pos+4].try_into().unwrap_or([0;4]));
212
- if magic == 0x524f5849u32 {
213
- if pos + 8 > buf.len() { break; }
214
- let index_len = u32::from_be_bytes(buf[pos+4..pos+8].try_into().unwrap()) as usize;
215
- pos += 8 + index_len;
216
- continue;
217
- }
218
-
219
- if pos + 2 > buf.len() { break; }
220
- let name_len = u16::from_be_bytes(buf[pos..pos+2].try_into().unwrap()) as usize;
221
- pos += 2;
222
- if pos + name_len > buf.len() { break; }
223
- let name = String::from_utf8_lossy(&buf[pos..pos+name_len]).to_string();
224
- pos += name_len;
225
- if pos + 8 > buf.len() { break; }
226
- let size = u64::from_be_bytes(buf[pos..pos+8].try_into().unwrap()) as usize;
227
- pos += 8;
228
- if pos + size > buf.len() { break; }
229
-
230
- let should_write = match &files_filter {
231
- Some(set) => set.contains(&name),
232
- None => true,
233
- };
234
-
235
- if should_write {
236
- let content = &buf[pos..pos+size];
237
- let p = Path::new(&name);
238
- let mut safe = std::path::PathBuf::new();
239
- for comp in p.components() {
240
- if let std::path::Component::Normal(osstr) = comp {
241
- safe.push(osstr);
242
- }
243
- }
244
- let dest = out_dir.join(&safe);
245
- if let Some(parent) = dest.parent() {
246
- std::fs::create_dir_all(parent).map_err(|e| anyhow::anyhow!("Cannot create parent dir {:?}: {}", parent, e))?;
247
- }
248
- std::fs::write(&dest, content).map_err(|e| anyhow::anyhow!("Cannot write {:?}: {}", dest, e))?;
249
- written.push(safe.to_string_lossy().to_string());
250
- }
251
-
252
- pos += size;
253
- }
254
-
255
- Ok(written)
256
- }
257
-
258
- fn unpack_progress_percent(total_expected: u64, bytes_processed: u64, file_count: usize, processed_files: usize) -> u64 {
259
- if total_expected > 0 {
260
- return 10 + (bytes_processed.saturating_mul(89) / total_expected).min(89);
261
- }
262
- if file_count > 0 {
263
- return 10 + ((processed_files as u64).saturating_mul(89) / file_count as u64).min(89);
264
- }
265
- 10
266
- }
267
-
268
- fn report_unpack_progress(
269
- progress: Option<&(dyn Fn(u64, u64, &str) + Send)>,
270
- total_expected: u64,
271
- bytes_processed: u64,
272
- file_count: usize,
273
- processed_files: usize,
274
- last_pct: &mut u64,
275
- ) {
276
- if let Some(cb) = progress {
277
- let pct = unpack_progress_percent(total_expected, bytes_processed, file_count, processed_files);
278
- if pct > *last_pct {
279
- *last_pct = pct;
280
- cb(pct, 100, "extracting");
281
- }
282
- }
283
- }
284
-
285
- pub fn unpack_stream_to_dir<R: std::io::Read>(
286
- reader: &mut R,
287
- out_dir: &Path,
288
- files_opt: Option<&[String]>,
289
- progress: Option<&(dyn Fn(u64, u64, &str) + Send)>,
290
- total_expected: u64,
291
- ) -> Result<Vec<String>> {
292
- let mut written = Vec::new();
293
- let files_filter: Option<std::collections::HashSet<String>> = files_opt.map(|l| l.iter().map(|s| s.clone()).collect());
294
- let mut requested = files_filter.as_ref().map(|s| s.len()).unwrap_or(usize::MAX);
295
- let mut file_count = 0usize;
296
- let mut processed_files = 0usize;
297
- let mut bytes_processed = 0u64;
298
- let mut last_pct = 10u64;
299
-
300
- let mut magic = read_pack_u32(reader)?;
301
- if magic == 0x524f5831u32 {
302
- magic = read_pack_u32(reader)?;
303
- }
304
- if magic == 0x524f5849u32 {
305
- let index_len = read_pack_u32(reader)? as u64;
306
- discard_pack_bytes(reader, index_len, &mut bytes_processed, file_count, processed_files, total_expected, progress, &mut last_pct)?;
307
- magic = read_pack_u32(reader)?;
308
- }
309
- if magic != 0x524f5850u32 {
310
- return Err(anyhow::anyhow!("Invalid pack magic: 0x{:08x}", magic));
311
- }
312
-
313
- file_count = read_pack_u32(reader)? as usize;
314
-
315
- for _ in 0..file_count {
316
- let name_len = read_pack_u16(reader)? as usize;
317
- let mut name_bytes = vec![0u8; name_len];
318
- read_pack_exact(reader, &mut name_bytes)?;
319
- let name = String::from_utf8_lossy(&name_bytes).to_string();
320
- let size = read_pack_u64(reader)?;
321
-
322
- let should_write = match &files_filter {
323
- Some(set) => set.contains(&name),
324
- None => true,
325
- };
326
-
327
- if should_write {
328
- let safe = sanitize_pack_path(&name);
329
- let dest = out_dir.join(&safe);
330
- if let Some(parent) = dest.parent() {
331
- std::fs::create_dir_all(parent).map_err(|e| anyhow::anyhow!("Cannot create parent dir {:?}: {}", parent, e))?;
332
- }
333
- let file = std::fs::File::create(&dest).map_err(|e| anyhow::anyhow!("Cannot write {:?}: {}", dest, e))?;
334
- let mut writer = std::io::BufWriter::with_capacity(file_buffer_capacity(size), file);
335
- copy_pack_bytes(reader, &mut writer, size, &mut bytes_processed, file_count, processed_files, total_expected, progress, &mut last_pct)?;
336
- finalize_output_file(writer, size, &dest)?;
337
- written.push(safe.to_string_lossy().to_string());
338
- if files_filter.is_some() {
339
- requested = requested.saturating_sub(1);
340
- }
341
- } else {
342
- discard_pack_bytes(reader, size, &mut bytes_processed, file_count, processed_files, total_expected, progress, &mut last_pct)?;
343
- }
344
-
345
- processed_files = processed_files.saturating_add(1);
346
- report_unpack_progress(progress, total_expected, bytes_processed, file_count, processed_files, &mut last_pct);
347
-
348
- if requested == 0 {
349
- if let Some(cb) = progress {
350
- cb(99, 100, "finishing");
351
- }
352
- return Ok(written);
353
- }
354
- }
355
-
356
- if let Some(cb) = progress {
357
- cb(99, 100, "finishing");
358
- }
359
-
360
- Ok(written)
361
- }
362
-
363
- fn read_pack_exact<R: std::io::Read>(reader: &mut R, buf: &mut [u8]) -> Result<()> {
364
- reader.read_exact(buf).map_err(|e| anyhow::anyhow!("Stream read error: {}", e))
365
- }
366
-
367
- fn read_pack_u16<R: std::io::Read>(reader: &mut R) -> Result<u16> {
368
- let mut buf = [0u8; 2];
369
- read_pack_exact(reader, &mut buf)?;
370
- Ok(u16::from_be_bytes(buf))
371
- }
372
-
373
- fn read_pack_u32<R: std::io::Read>(reader: &mut R) -> Result<u32> {
374
- let mut buf = [0u8; 4];
375
- read_pack_exact(reader, &mut buf)?;
376
- Ok(u32::from_be_bytes(buf))
377
- }
378
-
379
- fn read_pack_u64<R: std::io::Read>(reader: &mut R) -> Result<u64> {
380
- let mut buf = [0u8; 8];
381
- read_pack_exact(reader, &mut buf)?;
382
- Ok(u64::from_be_bytes(buf))
383
- }
384
-
385
- fn sanitize_pack_path(name: &str) -> std::path::PathBuf {
386
- let p = Path::new(name);
387
- let mut safe = std::path::PathBuf::new();
388
- for comp in p.components() {
389
- if let std::path::Component::Normal(osstr) = comp {
390
- safe.push(osstr);
391
- }
392
- }
393
- safe
394
- }
395
-
396
- fn file_buffer_capacity(size: u64) -> usize {
397
- usize::try_from(size)
398
- .unwrap_or(4 * 1024 * 1024)
399
- .min(4 * 1024 * 1024)
400
- .max(8192)
401
- }
402
-
403
- fn finalize_output_file(
404
- mut writer: std::io::BufWriter<std::fs::File>,
405
- size: u64,
406
- dest: &Path,
407
- ) -> Result<()> {
408
- std::io::Write::flush(&mut writer).map_err(|e| anyhow::anyhow!("Cannot flush {:?}: {}", dest, e))?;
409
- let file = writer.into_inner().map_err(|e| anyhow::anyhow!("Cannot finalize {:?}: {}", dest, e.error()))?;
410
- crate::io_advice::sync_and_drop(&file, size);
411
- Ok(())
412
- }
413
-
414
- fn copy_pack_bytes<R: std::io::Read, W: std::io::Write>(
415
- reader: &mut R,
416
- writer: &mut W,
417
- mut remaining: u64,
418
- bytes_processed: &mut u64,
419
- file_count: usize,
420
- processed_files: usize,
421
- total_expected: u64,
422
- progress: Option<&(dyn Fn(u64, u64, &str) + Send)>,
423
- last_pct: &mut u64,
424
- ) -> Result<()> {
425
- let mut buf = vec![0u8; 1024 * 1024];
426
- while remaining > 0 {
427
- let take = remaining.min(buf.len() as u64) as usize;
428
- let read = reader.read(&mut buf[..take]).map_err(|e| anyhow::anyhow!("Stream read error: {}", e))?;
429
- if read == 0 {
430
- return Err(anyhow::anyhow!("Truncated pack content"));
431
- }
432
- writer.write_all(&buf[..read]).map_err(|e| anyhow::anyhow!("Stream write error: {}", e))?;
433
- remaining -= read as u64;
434
- *bytes_processed = bytes_processed.saturating_add(read as u64);
435
- report_unpack_progress(progress, total_expected, *bytes_processed, file_count, processed_files, last_pct);
436
- }
437
- Ok(())
438
- }
439
-
440
- fn discard_pack_bytes<R: std::io::Read>(
441
- reader: &mut R,
442
- mut remaining: u64,
443
- bytes_processed: &mut u64,
444
- file_count: usize,
445
- processed_files: usize,
446
- total_expected: u64,
447
- progress: Option<&(dyn Fn(u64, u64, &str) + Send)>,
448
- last_pct: &mut u64,
449
- ) -> Result<()> {
450
- let mut buf = vec![0u8; 1024 * 1024];
451
- while remaining > 0 {
452
- let take = remaining.min(buf.len() as u64) as usize;
453
- let read = reader.read(&mut buf[..take]).map_err(|e| anyhow::anyhow!("Stream read error: {}", e))?;
454
- if read == 0 {
455
- return Err(anyhow::anyhow!("Truncated pack content"));
456
- }
457
- remaining -= read as u64;
458
- *bytes_processed = bytes_processed.saturating_add(read as u64);
459
- report_unpack_progress(progress, total_expected, *bytes_processed, file_count, processed_files, last_pct);
460
- }
461
- Ok(())
462
- }
463
-
464
- #[cfg(test)]
465
- mod stream_tests {
466
- use super::*;
467
- use std::io::{Write, Read};
468
- use std::time::{SystemTime, UNIX_EPOCH};
469
-
470
- struct ChunkedReader<R> {
471
- inner: R,
472
- max_chunk: usize,
473
- }
474
-
475
- impl<R: Read> Read for ChunkedReader<R> {
476
- fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
477
- let limit = buf.len().min(self.max_chunk);
478
- self.inner.read(&mut buf[..limit])
479
- }
480
- }
481
-
482
- #[test]
483
- fn test_unpack_stream_to_dir() -> Result<()> {
484
- let mut parts: Vec<u8> = Vec::new();
485
- parts.extend_from_slice(&0x524f5850u32.to_be_bytes()); parts.extend_from_slice(&(2u32.to_be_bytes()));
486
- let name1 = b"file1.txt";
487
- parts.extend_from_slice(&(name1.len() as u16).to_be_bytes());
488
- parts.extend_from_slice(name1);
489
- let content1 = b"hello world";
490
- parts.extend_from_slice(&(content1.len() as u64).to_be_bytes());
491
- parts.extend_from_slice(content1);
492
-
493
- let name2 = b"file2.txt";
494
- parts.extend_from_slice(&(name2.len() as u16).to_be_bytes());
495
- parts.extend_from_slice(name2);
496
- let content2 = b"goodbye";
497
- parts.extend_from_slice(&(content2.len() as u64).to_be_bytes());
498
- parts.extend_from_slice(content2);
499
-
500
- let mut encoder = zstd::stream::Encoder::new(Vec::new(), 0).map_err(|e| anyhow::anyhow!(e))?;
501
- encoder.write_all(&parts).map_err(|e| anyhow::anyhow!(e))?;
502
- let compressed = encoder.finish().map_err(|e| anyhow::anyhow!(e))?;
503
-
504
- let mut dec = zstd::stream::Decoder::new(std::io::Cursor::new(compressed.clone())).map_err(|e| anyhow::anyhow!(e))?;
505
- dec.window_log_max(31).map_err(|e| anyhow::anyhow!(e))?;
506
-
507
- let mut all = Vec::new();
508
- dec.read_to_end(&mut all).map_err(|e| anyhow::anyhow!(e))?;
509
- assert_eq!(all.len(), parts.len());
510
- assert_eq!(&all[..], &parts[..]);
511
-
512
- let mut dec2 = zstd::stream::Decoder::new(std::io::Cursor::new(compressed)).map_err(|e| anyhow::anyhow!(e))?;
513
- dec2.window_log_max(31).map_err(|e| anyhow::anyhow!(e))?;
514
-
515
- let ms = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis();
516
- let tmpdir = std::env::temp_dir().join(format!("rox_unpack_test_{}", ms));
517
- let _ = std::fs::create_dir_all(&tmpdir);
518
-
519
- let out = unpack_stream_to_dir(&mut dec2, &tmpdir, None, None, 0)?;
520
-
521
- assert_eq!(out.len(), 2);
522
- assert!(tmpdir.join("file1.txt").exists());
523
- assert!(tmpdir.join("file2.txt").exists());
524
- let _ = std::fs::remove_file(tmpdir.join("file1.txt"));
525
- let _ = std::fs::remove_file(tmpdir.join("file2.txt"));
526
- let _ = std::fs::remove_dir(&tmpdir);
527
- Ok(())
528
- }
529
-
530
- #[test]
531
- fn test_unpack_stream_from_png_payload() -> Result<()> {
532
- let mut parts: Vec<u8> = Vec::new();
533
- parts.extend_from_slice(&0x524f5850u32.to_be_bytes()); parts.extend_from_slice(&(2u32.to_be_bytes()));
534
- let name1 = b"file1.txt";
535
- parts.extend_from_slice(&(name1.len() as u16).to_be_bytes());
536
- parts.extend_from_slice(name1);
537
- let content1 = b"hello world";
538
- parts.extend_from_slice(&(content1.len() as u64).to_be_bytes());
539
- parts.extend_from_slice(content1);
540
-
541
- let name2 = b"file2.txt";
542
- parts.extend_from_slice(&(name2.len() as u16).to_be_bytes());
543
- parts.extend_from_slice(name2);
544
- let content2 = b"goodbye";
545
- parts.extend_from_slice(&(content2.len() as u64).to_be_bytes());
546
- parts.extend_from_slice(content2);
547
-
548
- let png = crate::encoder::encode_to_png_with_name_and_filelist(&parts, 0, None, None)?;
549
- let payload = crate::png_utils::extract_payload_from_png(&png).map_err(|e| anyhow::anyhow!(e))?;
550
- assert!(!payload.is_empty());
551
- let first = payload[0];
552
- assert_eq!(first, 0x00u8);
553
- let compressed = payload[1..].to_vec();
554
- let mut dec = zstd::stream::Decoder::new(std::io::Cursor::new(compressed)).map_err(|e| anyhow::anyhow!(e))?;
555
- dec.window_log_max(31).map_err(|e| anyhow::anyhow!(e))?;
556
-
557
- let ms = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis();
558
- let tmpdir = std::env::temp_dir().join(format!("rox_unpack_png_test_{}", ms));
559
- let _ = std::fs::create_dir_all(&tmpdir);
560
-
561
- let out = unpack_stream_to_dir(&mut dec, &tmpdir, None, None, 0)?;
562
-
563
- assert_eq!(out.len(), 2);
564
- assert!(tmpdir.join("file1.txt").exists());
565
- assert!(tmpdir.join("file2.txt").exists());
566
-
567
- let _ = std::fs::remove_file(tmpdir.join("file1.txt"));
568
- let _ = std::fs::remove_file(tmpdir.join("file2.txt"));
569
- let _ = std::fs::remove_dir(&tmpdir);
570
- Ok(())
571
- }
572
-
573
- #[test]
574
- fn test_unpack_stream_to_dir_large_file_small_reads() -> Result<()> {
575
- let large = vec![0x5a; 2 * 1024 * 1024];
576
- let mut parts: Vec<u8> = Vec::new();
577
- parts.extend_from_slice(&0x524f5850u32.to_be_bytes());
578
- parts.extend_from_slice(&(1u32.to_be_bytes()));
579
- let name = b"big.bin";
580
- parts.extend_from_slice(&(name.len() as u16).to_be_bytes());
581
- parts.extend_from_slice(name);
582
- parts.extend_from_slice(&(large.len() as u64).to_be_bytes());
583
- parts.extend_from_slice(&large);
584
-
585
- let reader = std::io::Cursor::new(parts);
586
- let mut reader = ChunkedReader { inner: reader, max_chunk: 37 };
587
-
588
- let ms = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis();
589
- let tmpdir = std::env::temp_dir().join(format!("rox_unpack_large_stream_test_{}", ms));
590
- let _ = std::fs::create_dir_all(&tmpdir);
591
-
592
- let out = unpack_stream_to_dir(&mut reader, &tmpdir, None, None, large.len() as u64)?;
593
-
594
- assert_eq!(out, vec!["big.bin".to_string()]);
595
- let restored = std::fs::read(tmpdir.join("big.bin"))?;
596
- assert_eq!(restored.len(), large.len());
597
- assert_eq!(restored, large);
598
-
599
- let _ = std::fs::remove_file(tmpdir.join("big.bin"));
600
- let _ = std::fs::remove_dir(&tmpdir);
601
- Ok(())
602
- }
603
- }
604
-