roxify 1.13.7 → 1.13.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/dist/cli.js +1 -8
  2. package/dist/stub-progress.d.ts +4 -4
  3. package/dist/stub-progress.js +4 -4
  4. package/dist/utils/decoder.d.ts +46 -2
  5. package/dist/utils/decoder.js +248 -38
  6. package/dist/utils/ecc.js +0 -1
  7. package/dist/utils/encoder.d.ts +30 -1
  8. package/dist/utils/encoder.js +34 -18
  9. package/dist/utils/inspection.d.ts +1 -1
  10. package/dist/utils/inspection.js +2 -2
  11. package/dist/utils/robust-audio.js +0 -13
  12. package/dist/utils/robust-image.js +0 -26
  13. package/package.json +12 -29
  14. package/roxify_native-aarch64-apple-darwin.node +0 -0
  15. package/roxify_native-aarch64-pc-windows-msvc.node +0 -0
  16. package/roxify_native-aarch64-unknown-linux-gnu.node +0 -0
  17. package/roxify_native-i686-pc-windows-msvc.node +0 -0
  18. package/roxify_native-i686-unknown-linux-gnu.node +0 -0
  19. package/{dist/rox-macos-universal → roxify_native-universal-apple-darwin.node} +0 -0
  20. package/roxify_native-x86_64-apple-darwin.node +0 -0
  21. package/roxify_native-x86_64-pc-windows-msvc.node +0 -0
  22. package/roxify_native-x86_64-unknown-linux-gnu.node +0 -0
  23. package/scripts/postinstall.cjs +23 -2
  24. package/Cargo.toml +0 -91
  25. package/dist/roxify_native +0 -0
  26. package/dist/roxify_native-macos-arm64 +0 -0
  27. package/dist/roxify_native-macos-x64 +0 -0
  28. package/dist/roxify_native.exe +0 -0
  29. package/native/archive.rs +0 -220
  30. package/native/audio.rs +0 -151
  31. package/native/bench_hybrid.rs +0 -145
  32. package/native/bwt.rs +0 -56
  33. package/native/context_mixing.rs +0 -117
  34. package/native/core.rs +0 -378
  35. package/native/crypto.rs +0 -209
  36. package/native/encoder.rs +0 -405
  37. package/native/hybrid.rs +0 -297
  38. package/native/image_utils.rs +0 -82
  39. package/native/io_advice.rs +0 -43
  40. package/native/io_ntfs_optimized.rs +0 -99
  41. package/native/lib.rs +0 -480
  42. package/native/main.rs +0 -842
  43. package/native/mtf.rs +0 -106
  44. package/native/packer.rs +0 -604
  45. package/native/png_chunk_writer.rs +0 -146
  46. package/native/png_utils.rs +0 -554
  47. package/native/pool.rs +0 -101
  48. package/native/progress.rs +0 -142
  49. package/native/rans.rs +0 -149
  50. package/native/rans_byte.rs +0 -286
  51. package/native/reconstitution.rs +0 -623
  52. package/native/streaming.rs +0 -189
  53. package/native/streaming_decode.rs +0 -625
  54. package/native/streaming_encode.rs +0 -684
  55. package/native/test_small_bwt.rs +0 -31
  56. package/native/test_stages.rs +0 -70
package/native/hybrid.rs DELETED
@@ -1,297 +0,0 @@
1
- use anyhow::Result;
2
- use rayon::prelude::*;
3
- use crate::bwt::{bwt_encode, bwt_decode};
4
- use crate::mtf::{mtf_encode, mtf_decode, rle0_encode, rle0_decode};
5
- use crate::rans_byte::{SymbolStats, rans_encode_block, rans_decode_block};
6
- use crate::context_mixing::analyze_entropy;
7
-
8
- const BLOCK_SIZE: usize = 1024 * 1024;
9
-
10
- const BLOCK_FLAG_BWT: u8 = 0;
11
- const BLOCK_FLAG_ZSTD: u8 = 1;
12
- const BLOCK_FLAG_STORE: u8 = 2;
13
-
14
- const ENTROPY_THRESHOLD_STORE: f32 = 7.95;
15
- const ENTROPY_THRESHOLD_ZSTD: f32 = 7.5;
16
-
17
- #[derive(Clone, Debug)]
18
- pub struct CompressionStats {
19
- pub original_size: u64,
20
- pub compressed_size: u64,
21
- pub ratio: f64,
22
- pub entropy_bits: f32,
23
- pub blocks_count: usize,
24
- }
25
-
26
- pub struct HybridCompressor {
27
- block_size: usize,
28
- }
29
-
30
- impl HybridCompressor {
31
- pub fn new() -> Self {
32
- HybridCompressor {
33
- block_size: BLOCK_SIZE,
34
- }
35
- }
36
-
37
- pub fn compress(&self, data: &[u8]) -> Result<(Vec<u8>, CompressionStats)> {
38
- let original_size = data.len() as u64;
39
- let blocks: Vec<&[u8]> = data.chunks(self.block_size).collect();
40
- let blocks_count = blocks.len();
41
-
42
- let compressed_blocks: Vec<Vec<u8>> = blocks
43
- .par_iter()
44
- .map(|block| compress_block(block))
45
- .collect::<Result<Vec<_>, _>>()?;
46
-
47
- let entropy = if data.len() > 4096 {
48
- analyze_entropy(&data[..4096.min(data.len())])
49
- } else {
50
- analyze_entropy(data)
51
- };
52
-
53
- let total_compressed: usize = compressed_blocks.iter().map(|b| b.len() + 4).sum();
54
- let mut result = Vec::with_capacity(16 + total_compressed);
55
- result.extend_from_slice(b"RBW2");
56
- result.extend_from_slice(&(blocks_count as u32).to_le_bytes());
57
- result.extend_from_slice(&original_size.to_le_bytes());
58
-
59
- for block in &compressed_blocks {
60
- result.extend_from_slice(&(block.len() as u32).to_le_bytes());
61
- result.extend_from_slice(block);
62
- }
63
-
64
- let compressed_size = result.len() as u64;
65
- let ratio = (compressed_size as f64) / (original_size as f64);
66
-
67
- Ok((result, CompressionStats {
68
- original_size,
69
- compressed_size,
70
- ratio,
71
- entropy_bits: entropy,
72
- blocks_count,
73
- }))
74
- }
75
-
76
- pub fn decompress(&self, data: &[u8]) -> Result<Vec<u8>> {
77
- if data.len() < 16 {
78
- return Err(anyhow::anyhow!("Invalid compressed data"));
79
- }
80
-
81
- let magic = &data[0..4];
82
- let v2 = magic == b"RBW2";
83
- if magic != b"RBW1" && !v2 {
84
- return Err(anyhow::anyhow!("Invalid magic"));
85
- }
86
-
87
- let blocks_count = u32::from_le_bytes([data[4], data[5], data[6], data[7]]) as usize;
88
- let original_size = u64::from_le_bytes([
89
- data[8], data[9], data[10], data[11],
90
- data[12], data[13], data[14], data[15],
91
- ]) as usize;
92
-
93
- let mut pos = 16;
94
- let mut block_slices: Vec<&[u8]> = Vec::with_capacity(blocks_count);
95
-
96
- for _ in 0..blocks_count {
97
- if pos + 4 > data.len() {
98
- return Err(anyhow::anyhow!("Truncated block header"));
99
- }
100
- let block_size = u32::from_le_bytes([
101
- data[pos], data[pos + 1], data[pos + 2], data[pos + 3],
102
- ]) as usize;
103
- pos += 4;
104
- if pos + block_size > data.len() {
105
- return Err(anyhow::anyhow!("Truncated block data"));
106
- }
107
- block_slices.push(&data[pos..pos + block_size]);
108
- pos += block_size;
109
- }
110
-
111
- let decompressed_blocks: Vec<Vec<u8>> = block_slices
112
- .par_iter()
113
- .map(|block_data| {
114
- if v2 {
115
- decompress_block_v2(block_data)
116
- } else {
117
- decompress_block_v1(block_data)
118
- }
119
- })
120
- .collect::<Result<Vec<_>, _>>()?;
121
-
122
- let mut result = Vec::with_capacity(original_size);
123
- for block in decompressed_blocks {
124
- result.extend_from_slice(&block);
125
- }
126
-
127
- Ok(result)
128
- }
129
-
130
- pub fn estimate_gain(&self, data: &[u8]) -> f64 {
131
- let entropy = analyze_entropy(data);
132
- let theoretical_min = (data.len() as f64) * (entropy as f64) / 8.0;
133
- let ratio = theoretical_min / (data.len() as f64);
134
- (1.0 - ratio) * 100.0
135
- }
136
- }
137
-
138
- fn compress_block_with_entropy(block: &[u8], entropy: f32) -> Result<Vec<u8>> {
139
- if block.is_empty() {
140
- return Ok(vec![BLOCK_FLAG_STORE]);
141
- }
142
-
143
- if entropy >= ENTROPY_THRESHOLD_STORE {
144
- let mut result = Vec::with_capacity(1 + block.len());
145
- result.push(BLOCK_FLAG_STORE);
146
- result.extend_from_slice(block);
147
- return Ok(result);
148
- }
149
-
150
- if entropy >= ENTROPY_THRESHOLD_ZSTD {
151
- let compressed = zstd::encode_all(block, 1)?;
152
- if compressed.len() < block.len() {
153
- let mut result = Vec::with_capacity(1 + 4 + compressed.len());
154
- result.push(BLOCK_FLAG_ZSTD);
155
- result.extend_from_slice(&(block.len() as u32).to_le_bytes());
156
- result.extend_from_slice(&compressed);
157
- return Ok(result);
158
- }
159
- let mut result = Vec::with_capacity(1 + block.len());
160
- result.push(BLOCK_FLAG_STORE);
161
- result.extend_from_slice(block);
162
- return Ok(result);
163
- }
164
-
165
- try_bwt_or_zstd(block)
166
- }
167
-
168
- fn compress_block(block: &[u8]) -> Result<Vec<u8>> {
169
- if block.is_empty() {
170
- return Ok(vec![BLOCK_FLAG_STORE]);
171
- }
172
-
173
- let entropy = analyze_entropy(block);
174
- compress_block_with_entropy(block, entropy)
175
- }
176
-
177
- fn try_bwt_or_zstd(block: &[u8]) -> Result<Vec<u8>> {
178
- let bwt = bwt_encode(block)?;
179
- let mtf_data = mtf_encode(&bwt.transformed);
180
- let rle_data = rle0_encode(&mtf_data);
181
- let stats = SymbolStats::from_data(&rle_data);
182
- let encoded = rans_encode_block(&rle_data, &stats);
183
- let stats_bytes = stats.serialize();
184
-
185
- let bwt_total = 1 + 4 + 4 + 4 + stats_bytes.len() + encoded.len();
186
-
187
- if bwt_total < block.len() {
188
- let zstd_compressed = zstd::encode_all(block, 3)?;
189
- let zstd_total = 1 + 4 + zstd_compressed.len();
190
-
191
- if zstd_total < bwt_total {
192
- let mut result = Vec::with_capacity(zstd_total);
193
- result.push(BLOCK_FLAG_ZSTD);
194
- result.extend_from_slice(&(block.len() as u32).to_le_bytes());
195
- result.extend_from_slice(&zstd_compressed);
196
- return Ok(result);
197
- }
198
-
199
- let mut result = Vec::with_capacity(bwt_total);
200
- result.push(BLOCK_FLAG_BWT);
201
- result.extend_from_slice(&bwt.primary_index.to_le_bytes());
202
- result.extend_from_slice(&(block.len() as u32).to_le_bytes());
203
- result.extend_from_slice(&(rle_data.len() as u32).to_le_bytes());
204
- result.extend_from_slice(&stats_bytes);
205
- result.extend_from_slice(&encoded);
206
- return Ok(result);
207
- }
208
-
209
- let zstd_compressed = zstd::encode_all(block, 3)?;
210
- if 1 + 4 + zstd_compressed.len() < block.len() {
211
- let mut result = Vec::with_capacity(1 + 4 + zstd_compressed.len());
212
- result.push(BLOCK_FLAG_ZSTD);
213
- result.extend_from_slice(&(block.len() as u32).to_le_bytes());
214
- result.extend_from_slice(&zstd_compressed);
215
- return Ok(result);
216
- }
217
-
218
- let mut result = Vec::with_capacity(1 + block.len());
219
- result.push(BLOCK_FLAG_STORE);
220
- result.extend_from_slice(block);
221
- Ok(result)
222
- }
223
-
224
- fn decompress_block_v2(block: &[u8]) -> Result<Vec<u8>> {
225
- if block.is_empty() {
226
- return Err(anyhow::anyhow!("Empty block"));
227
- }
228
-
229
- match block[0] {
230
- BLOCK_FLAG_STORE => Ok(block[1..].to_vec()),
231
- BLOCK_FLAG_ZSTD => {
232
- if block.len() < 5 {
233
- return Err(anyhow::anyhow!("Truncated zstd block"));
234
- }
235
- let orig_len = u32::from_le_bytes([block[1], block[2], block[3], block[4]]) as usize;
236
- let mut decoded = zstd::decode_all(&block[5..])?;
237
- decoded.truncate(orig_len);
238
- Ok(decoded)
239
- }
240
- BLOCK_FLAG_BWT => {
241
- if block.len() < 13 {
242
- return Err(anyhow::anyhow!("Truncated BWT block"));
243
- }
244
- let primary_index = u32::from_le_bytes([block[1], block[2], block[3], block[4]]);
245
- let orig_len = u32::from_le_bytes([block[5], block[6], block[7], block[8]]) as usize;
246
- let rle_len = u32::from_le_bytes([block[9], block[10], block[11], block[12]]) as usize;
247
-
248
- let (stats, stats_size) = SymbolStats::deserialize(&block[13..])?;
249
- let encoded = &block[13 + stats_size..];
250
-
251
- let rle_data = rans_decode_block(encoded, &stats, rle_len)?;
252
- let mtf_data = rle0_decode(&rle_data);
253
- let bwt_data = mtf_decode(&mtf_data);
254
- let original = bwt_decode(&bwt_data, primary_index)?;
255
-
256
- if original.len() != orig_len {
257
- return Err(anyhow::anyhow!("Size mismatch"));
258
- }
259
- Ok(original)
260
- }
261
- _ => Err(anyhow::anyhow!("Unknown block type: {}", block[0])),
262
- }
263
- }
264
-
265
- fn decompress_block_v1(block: &[u8]) -> Result<Vec<u8>> {
266
- if block.len() < 12 {
267
- return Err(anyhow::anyhow!("Block too small"));
268
- }
269
-
270
- let primary_index = u32::from_le_bytes([block[0], block[1], block[2], block[3]]);
271
- let orig_len = u32::from_le_bytes([block[4], block[5], block[6], block[7]]) as usize;
272
- let rle_len = u32::from_le_bytes([block[8], block[9], block[10], block[11]]) as usize;
273
-
274
- let (stats, stats_size) = SymbolStats::deserialize(&block[12..])?;
275
- let encoded = &block[12 + stats_size..];
276
-
277
- let rle_data = rans_decode_block(encoded, &stats, rle_len)?;
278
- let mtf_data = rle0_decode(&rle_data);
279
- let bwt_data = mtf_decode(&mtf_data);
280
- let original = bwt_decode(&bwt_data, primary_index)?;
281
-
282
- if original.len() != orig_len {
283
- return Err(anyhow::anyhow!("Size mismatch"));
284
- }
285
-
286
- Ok(original)
287
- }
288
-
289
- pub fn compress_high_performance(data: &[u8]) -> Result<(Vec<u8>, CompressionStats)> {
290
- let compressor = HybridCompressor::new();
291
- compressor.compress(data)
292
- }
293
-
294
- pub fn decompress_high_performance(data: &[u8]) -> Result<Vec<u8>> {
295
- let compressor = HybridCompressor::new();
296
- compressor.decompress(data)
297
- }
@@ -1,82 +0,0 @@
1
- use image::{ImageFormat, DynamicImage, ImageReader};
2
- use std::io::Cursor;
3
-
4
- fn load_no_limits(input: &[u8]) -> Result<DynamicImage, String> {
5
- let mut reader = ImageReader::new(Cursor::new(input))
6
- .with_guessed_format()
7
- .map_err(|e| format!("Failed to guess format: {}", e))?;
8
- reader.no_limits();
9
- reader.decode().map_err(|e| format!("Failed to load image: {}", e))
10
- }
11
-
12
- pub fn sharp_resize(
13
- input: &[u8],
14
- width: u32,
15
- height: u32,
16
- kernel: &str,
17
- ) -> Result<Vec<u8>, String> {
18
- let img = load_no_limits(input)?;
19
-
20
- let filter = match kernel {
21
- "nearest" => image::imageops::FilterType::Nearest,
22
- "bilinear" | "linear" => image::imageops::FilterType::Triangle,
23
- "cubic" | "bicubic" => image::imageops::FilterType::CatmullRom,
24
- "lanczos" | "lanczos3" => image::imageops::FilterType::Lanczos3,
25
- _ => image::imageops::FilterType::Nearest,
26
- };
27
-
28
- let resized = img.resize_exact(width, height, filter);
29
-
30
- let mut output = Vec::new();
31
- resized.write_to(&mut Cursor::new(&mut output), ImageFormat::Png)
32
- .map_err(|e| format!("Failed to encode PNG: {}", e))?;
33
-
34
- Ok(output)
35
- }
36
-
37
- pub fn sharp_raw_pixels(input: &[u8]) -> Result<(Vec<u8>, u32, u32), String> {
38
- let img = load_no_limits(input)?;
39
-
40
- let rgb = img.to_rgb8();
41
- let width = rgb.width();
42
- let height = rgb.height();
43
- let raw = rgb.into_raw();
44
-
45
- Ok((raw, width, height))
46
- }
47
-
48
- pub fn sharp_metadata(input: &[u8]) -> Result<(u32, u32, String), String> {
49
- let img = load_no_limits(input)?;
50
-
51
- let width = img.width();
52
- let height = img.height();
53
- let format = match img {
54
- DynamicImage::ImageLuma8(_) => "gray",
55
- DynamicImage::ImageRgb8(_) => "rgb",
56
- DynamicImage::ImageRgba8(_) => "rgba",
57
- _ => "unknown",
58
- };
59
-
60
- Ok((width, height, format.to_string()))
61
- }
62
-
63
- pub fn rgb_to_png(rgb: &[u8], width: u32, height: u32) -> Result<Vec<u8>, String> {
64
- use image::codecs::png::{PngEncoder, CompressionType, FilterType};
65
- use image::ImageEncoder;
66
-
67
- let mut output = Vec::new();
68
- // Data is already zstd-compressed, so PNG deflate adds overhead.
69
- // Use Uncompressed (stored blocks) to avoid wasting CPU on incompressible data.
70
- let encoder = PngEncoder::new_with_quality(
71
- &mut output,
72
- CompressionType::Uncompressed,
73
- FilterType::NoFilter,
74
- );
75
- encoder.write_image(rgb, width, height, image::ExtendedColorType::Rgb8)
76
- .map_err(|e| format!("Failed to encode PNG: {}", e))?;
77
- Ok(output)
78
- }
79
-
80
- pub fn png_to_rgb(png: &[u8]) -> Result<(Vec<u8>, u32, u32), String> {
81
- sharp_raw_pixels(png)
82
- }
@@ -1,43 +0,0 @@
1
- use std::fs::File;
2
- #[cfg(target_os = "linux")]
3
- use std::os::fd::AsRawFd;
4
-
5
- pub const INPUT_DROP_GRANULARITY: u64 = 8 * 1024 * 1024;
6
-
7
- pub fn advise_file_sequential(file: &File) {
8
- #[cfg(target_os = "linux")]
9
- unsafe {
10
- let _ = libc::posix_fadvise(file.as_raw_fd(), 0, 0, libc::POSIX_FADV_SEQUENTIAL);
11
- }
12
-
13
- #[cfg(not(target_os = "linux"))]
14
- let _ = file;
15
- }
16
-
17
- pub fn advise_drop(file: &File, offset: u64, len: u64) {
18
- if len == 0 {
19
- return;
20
- }
21
-
22
- #[cfg(target_os = "linux")]
23
- unsafe {
24
- let _ = libc::posix_fadvise(
25
- file.as_raw_fd(),
26
- offset as libc::off_t,
27
- len as libc::off_t,
28
- libc::POSIX_FADV_DONTNEED,
29
- );
30
- }
31
-
32
- #[cfg(not(target_os = "linux"))]
33
- let _ = (file, offset, len);
34
- }
35
-
36
- pub fn sync_and_drop(file: &File, len: u64) {
37
- if len < INPUT_DROP_GRANULARITY {
38
- return;
39
- }
40
-
41
- let _ = file.sync_data();
42
- advise_drop(file, 0, len);
43
- }
@@ -1,99 +0,0 @@
1
- use std::fs::{File, OpenOptions};
2
- use std::io::{BufWriter, Write};
3
- use std::path::Path;
4
-
5
- /// Buffer size optimized for NTFS (larger = fewer syscalls)
6
- #[cfg(windows)]
7
- const NTFS_WRITE_BUFFER: usize = 4 * 1024 * 1024; // 4MB for Windows/NTFS
8
- #[cfg(not(windows))]
9
- const NTFS_WRITE_BUFFER: usize = 64 * 1024; // 64KB for Unix
10
-
11
- /// Optimized file writer with large buffer for NTFS
12
- pub struct OptimizedFileWriter {
13
- writer: BufWriter<File>,
14
- }
15
-
16
- impl OptimizedFileWriter {
17
- pub fn create(path: &Path) -> std::io::Result<Self> {
18
- let file = OpenOptions::new()
19
- .write(true)
20
- .create(true)
21
- .truncate(true)
22
- .open(path)?;
23
-
24
- Ok(Self {
25
- writer: BufWriter::with_capacity(NTFS_WRITE_BUFFER, file),
26
- })
27
- }
28
-
29
- pub fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> {
30
- self.writer.write_all(buf)
31
- }
32
-
33
- pub fn flush(&mut self) -> std::io::Result<()> {
34
- self.writer.flush()
35
- }
36
- }
37
-
38
- /// Write file with optimized buffering for target filesystem
39
- pub fn write_file_optimized(path: &Path, content: &[u8]) -> std::io::Result<()> {
40
- let mut writer = OptimizedFileWriter::create(path)?;
41
- writer.write_all(content)?;
42
- writer.flush()?;
43
- Ok(())
44
- }
45
-
46
- /// Batch write multiple files - keeps files open for better NTFS performance
47
- pub fn write_files_batch(
48
- base_dir: &Path,
49
- files: &[(String, &[u8])],
50
- ) -> Result<Vec<String>, String> {
51
- let mut written = Vec::with_capacity(files.len());
52
-
53
- for (rel_path, content) in files {
54
- let safe_path = sanitize_path(rel_path);
55
- let dest = base_dir.join(&safe_path);
56
-
57
- if let Some(parent) = dest.parent() {
58
- std::fs::create_dir_all(parent)
59
- .map_err(|e| format!("Cannot create parent dir {:?}: {}", parent, e))?;
60
- }
61
-
62
- write_file_optimized(&dest, content)
63
- .map_err(|e| format!("Cannot write {:?}: {}", dest, e))?;
64
-
65
- written.push(safe_path.to_string_lossy().to_string());
66
- }
67
-
68
- Ok(written)
69
- }
70
-
71
- fn sanitize_path(path: &str) -> std::path::PathBuf {
72
- let mut safe = std::path::PathBuf::new();
73
- for comp in std::path::Path::new(path).components() {
74
- if let std::path::Component::Normal(osstr) = comp {
75
- safe.push(osstr);
76
- }
77
- }
78
- safe
79
- }
80
-
81
- /// Pre-allocate file space on NTFS to reduce fragmentation
82
- #[cfg(windows)]
83
- pub fn preallocate_file(path: &Path, size: u64) -> std::io::Result<()> {
84
- use std::os::windows::fs::FileExt;
85
-
86
- let file = OpenOptions::new()
87
- .write(true)
88
- .create(true)
89
- .open(path)?;
90
-
91
- // Pre-allocate space
92
- file.set_len(size)?;
93
- Ok(())
94
- }
95
-
96
- #[cfg(not(windows))]
97
- pub fn preallocate_file(_path: &Path, _size: u64) -> std::io::Result<()> {
98
- Ok(())
99
- }