roxify 1.12.6 → 1.12.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/Cargo.toml +98 -0
- package/dist/cli.js +104 -130
- package/dist/stub-progress.d.ts +5 -10
- package/dist/stub-progress.js +7 -20
- package/dist/utils/encoder.js +1 -1
- package/dist/utils/rust-cli-wrapper.d.ts +1 -4
- package/dist/utils/rust-cli-wrapper.js +3 -69
- package/native/archive.rs +220 -0
- package/native/audio.rs +151 -0
- package/native/bench_hybrid.rs +145 -0
- package/native/bwt.rs +56 -0
- package/native/context_mixing.rs +117 -0
- package/native/core.rs +382 -0
- package/native/crypto.rs +204 -0
- package/native/encoder.rs +690 -0
- package/native/gpu.rs +116 -0
- package/native/hybrid.rs +287 -0
- package/native/image_utils.rs +82 -0
- package/native/lib.rs +489 -0
- package/native/main.rs +534 -0
- package/native/mtf.rs +106 -0
- package/native/packer.rs +447 -0
- package/native/png_utils.rs +538 -0
- package/native/pool.rs +101 -0
- package/native/progress.rs +43 -0
- package/native/rans.rs +149 -0
- package/native/rans_byte.rs +286 -0
- package/native/reconstitution.rs +623 -0
- package/native/streaming.rs +214 -0
- package/native/streaming_decode.rs +338 -0
- package/native/streaming_encode.rs +494 -0
- package/native/test_small_bwt.rs +31 -0
- package/native/test_stages.rs +70 -0
- package/package.json +111 -113
package/native/gpu.rs
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
use anyhow::{anyhow, Result};
|
|
2
|
+
use parking_lot::RwLock;
|
|
3
|
+
use std::sync::Arc;
|
|
4
|
+
use wgpu::*;
|
|
5
|
+
use wgpu::util::DeviceExt;
|
|
6
|
+
|
|
7
|
+
pub struct GpuDevice {
|
|
8
|
+
device: Device,
|
|
9
|
+
queue: Queue,
|
|
10
|
+
supported: bool,
|
|
11
|
+
adapter_info: String,
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
pub struct GpuContext {
|
|
15
|
+
inner: Arc<RwLock<Option<GpuDevice>>>,
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
impl GpuContext {
|
|
19
|
+
pub async fn new() -> Self {
|
|
20
|
+
let instance = Instance::new(InstanceDescriptor {
|
|
21
|
+
backends: Backends::all(),
|
|
22
|
+
..Default::default()
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
let adapter = instance.request_adapter(&RequestAdapterOptions {
|
|
26
|
+
power_preference: PowerPreference::HighPerformance,
|
|
27
|
+
compatible_surface: None,
|
|
28
|
+
force_fallback_adapter: false,
|
|
29
|
+
}).await;
|
|
30
|
+
|
|
31
|
+
let device_info = if let Some(adapter) = adapter {
|
|
32
|
+
match adapter.request_device(&DeviceDescriptor {
|
|
33
|
+
label: Some("roxify-compute"),
|
|
34
|
+
required_features: Features::empty(),
|
|
35
|
+
required_limits: Limits::default(),
|
|
36
|
+
}, None).await {
|
|
37
|
+
Ok((device, queue)) => {
|
|
38
|
+
let info = adapter.get_info();
|
|
39
|
+
Some(GpuDevice {
|
|
40
|
+
device,
|
|
41
|
+
queue,
|
|
42
|
+
supported: true,
|
|
43
|
+
adapter_info: format!("{:?}", info.driver),
|
|
44
|
+
})
|
|
45
|
+
}
|
|
46
|
+
Err(_) => None,
|
|
47
|
+
}
|
|
48
|
+
} else {
|
|
49
|
+
None
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
GpuContext {
|
|
53
|
+
inner: Arc::new(RwLock::new(device_info)),
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
pub fn is_available(&self) -> bool {
|
|
58
|
+
self.inner.read().is_some()
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
pub fn get_adapter_info(&self) -> Option<String> {
|
|
62
|
+
self.inner.read().as_ref().map(|d| d.adapter_info.clone())
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
pub async fn create_compute_pipeline(
|
|
66
|
+
&self,
|
|
67
|
+
shader_src: &str,
|
|
68
|
+
entry_point: &str,
|
|
69
|
+
) -> Result<ComputePipeline> {
|
|
70
|
+
let gpu = self.inner.read();
|
|
71
|
+
let gpu = gpu.as_ref().ok_or_else(|| anyhow!("No GPU device available"))?;
|
|
72
|
+
|
|
73
|
+
let shader_module = gpu.device.create_shader_module(ShaderModuleDescriptor {
|
|
74
|
+
label: Some("compute-shader"),
|
|
75
|
+
source: ShaderSource::Wgsl(std::borrow::Cow::Borrowed(shader_src)),
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
let pipeline_layout = gpu.device.create_pipeline_layout(&PipelineLayoutDescriptor {
|
|
79
|
+
label: Some("compute-layout"),
|
|
80
|
+
bind_group_layouts: &[],
|
|
81
|
+
push_constant_ranges: &[],
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
Ok(gpu.device.create_compute_pipeline(&ComputePipelineDescriptor {
|
|
85
|
+
label: Some("compute-pipeline"),
|
|
86
|
+
layout: Some(&pipeline_layout),
|
|
87
|
+
module: &shader_module,
|
|
88
|
+
entry_point,
|
|
89
|
+
}))
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
pub fn create_buffer_init(&self, data: &[u8], usage: BufferUsages) -> Result<Buffer> {
|
|
93
|
+
let gpu = self.inner.read();
|
|
94
|
+
let gpu = gpu.as_ref().ok_or_else(|| anyhow!("No GPU device available"))?;
|
|
95
|
+
|
|
96
|
+
Ok(gpu.device.create_buffer_init(&util::BufferInitDescriptor {
|
|
97
|
+
label: None,
|
|
98
|
+
contents: data,
|
|
99
|
+
usage,
|
|
100
|
+
}))
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
pub fn gpu_available() -> bool {
|
|
105
|
+
pollster::block_on(async {
|
|
106
|
+
let instance = Instance::new(InstanceDescriptor {
|
|
107
|
+
backends: Backends::all(),
|
|
108
|
+
..Default::default()
|
|
109
|
+
});
|
|
110
|
+
instance.request_adapter(&RequestAdapterOptions {
|
|
111
|
+
power_preference: PowerPreference::HighPerformance,
|
|
112
|
+
compatible_surface: None,
|
|
113
|
+
force_fallback_adapter: false,
|
|
114
|
+
}).await.is_some()
|
|
115
|
+
})
|
|
116
|
+
}
|
package/native/hybrid.rs
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
use anyhow::Result;
|
|
2
|
+
use rayon::prelude::*;
|
|
3
|
+
use crate::bwt::{bwt_encode, bwt_decode};
|
|
4
|
+
use crate::mtf::{mtf_encode, mtf_decode, rle0_encode, rle0_decode};
|
|
5
|
+
use crate::rans_byte::{SymbolStats, rans_encode_block, rans_decode_block};
|
|
6
|
+
use crate::context_mixing::analyze_entropy;
|
|
7
|
+
|
|
8
|
+
const BLOCK_SIZE: usize = 1024 * 1024;
|
|
9
|
+
|
|
10
|
+
const BLOCK_FLAG_BWT: u8 = 0;
|
|
11
|
+
const BLOCK_FLAG_ZSTD: u8 = 1;
|
|
12
|
+
const BLOCK_FLAG_STORE: u8 = 2;
|
|
13
|
+
|
|
14
|
+
const ENTROPY_THRESHOLD_STORE: f32 = 7.95;
|
|
15
|
+
const ENTROPY_THRESHOLD_ZSTD: f32 = 7.5;
|
|
16
|
+
|
|
17
|
+
#[derive(Clone, Debug)]
|
|
18
|
+
pub struct CompressionStats {
|
|
19
|
+
pub original_size: u64,
|
|
20
|
+
pub compressed_size: u64,
|
|
21
|
+
pub ratio: f64,
|
|
22
|
+
pub entropy_bits: f32,
|
|
23
|
+
pub blocks_count: usize,
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
pub struct HybridCompressor {
|
|
27
|
+
block_size: usize,
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
impl HybridCompressor {
|
|
31
|
+
pub fn new(_enable_gpu: bool, _pool_size: usize) -> Self {
|
|
32
|
+
HybridCompressor {
|
|
33
|
+
block_size: BLOCK_SIZE,
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
pub fn compress(&self, data: &[u8]) -> Result<(Vec<u8>, CompressionStats)> {
|
|
38
|
+
let original_size = data.len() as u64;
|
|
39
|
+
|
|
40
|
+
let blocks: Vec<&[u8]> = data.chunks(self.block_size).collect();
|
|
41
|
+
let blocks_count = blocks.len();
|
|
42
|
+
|
|
43
|
+
let compressed_blocks: Vec<Vec<u8>> = blocks
|
|
44
|
+
.par_iter()
|
|
45
|
+
.map(|block| compress_block(block))
|
|
46
|
+
.collect::<Result<Vec<_>, _>>()?;
|
|
47
|
+
|
|
48
|
+
let entropy = if data.len() > 4096 {
|
|
49
|
+
analyze_entropy(&data[..4096.min(data.len())])
|
|
50
|
+
} else {
|
|
51
|
+
analyze_entropy(data)
|
|
52
|
+
};
|
|
53
|
+
|
|
54
|
+
let total_compressed: usize = compressed_blocks.iter().map(|b| b.len() + 4).sum();
|
|
55
|
+
let mut result = Vec::with_capacity(16 + total_compressed);
|
|
56
|
+
result.extend_from_slice(b"RBW2");
|
|
57
|
+
result.extend_from_slice(&(blocks_count as u32).to_le_bytes());
|
|
58
|
+
result.extend_from_slice(&original_size.to_le_bytes());
|
|
59
|
+
|
|
60
|
+
for block in &compressed_blocks {
|
|
61
|
+
result.extend_from_slice(&(block.len() as u32).to_le_bytes());
|
|
62
|
+
result.extend_from_slice(block);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
let compressed_size = result.len() as u64;
|
|
66
|
+
let ratio = (compressed_size as f64) / (original_size as f64);
|
|
67
|
+
|
|
68
|
+
Ok((result, CompressionStats {
|
|
69
|
+
original_size,
|
|
70
|
+
compressed_size,
|
|
71
|
+
ratio,
|
|
72
|
+
entropy_bits: entropy,
|
|
73
|
+
blocks_count,
|
|
74
|
+
}))
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
pub fn decompress(&self, data: &[u8]) -> Result<Vec<u8>> {
|
|
78
|
+
if data.len() < 16 {
|
|
79
|
+
return Err(anyhow::anyhow!("Invalid compressed data"));
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
let magic = &data[0..4];
|
|
83
|
+
let v2 = magic == b"RBW2";
|
|
84
|
+
if magic != b"RBW1" && !v2 {
|
|
85
|
+
return Err(anyhow::anyhow!("Invalid magic"));
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
let blocks_count = u32::from_le_bytes([data[4], data[5], data[6], data[7]]) as usize;
|
|
89
|
+
let original_size = u64::from_le_bytes([
|
|
90
|
+
data[8], data[9], data[10], data[11],
|
|
91
|
+
data[12], data[13], data[14], data[15],
|
|
92
|
+
]) as usize;
|
|
93
|
+
|
|
94
|
+
let mut pos = 16;
|
|
95
|
+
let mut block_slices: Vec<&[u8]> = Vec::with_capacity(blocks_count);
|
|
96
|
+
|
|
97
|
+
for _ in 0..blocks_count {
|
|
98
|
+
if pos + 4 > data.len() {
|
|
99
|
+
return Err(anyhow::anyhow!("Truncated block header"));
|
|
100
|
+
}
|
|
101
|
+
let block_size = u32::from_le_bytes([
|
|
102
|
+
data[pos], data[pos + 1], data[pos + 2], data[pos + 3],
|
|
103
|
+
]) as usize;
|
|
104
|
+
pos += 4;
|
|
105
|
+
if pos + block_size > data.len() {
|
|
106
|
+
return Err(anyhow::anyhow!("Truncated block data"));
|
|
107
|
+
}
|
|
108
|
+
block_slices.push(&data[pos..pos + block_size]);
|
|
109
|
+
pos += block_size;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
let decompressed_blocks: Vec<Vec<u8>> = block_slices
|
|
113
|
+
.par_iter()
|
|
114
|
+
.map(|block_data| {
|
|
115
|
+
if v2 {
|
|
116
|
+
decompress_block_v2(block_data)
|
|
117
|
+
} else {
|
|
118
|
+
decompress_block_v1(block_data)
|
|
119
|
+
}
|
|
120
|
+
})
|
|
121
|
+
.collect::<Result<Vec<_>, _>>()?;
|
|
122
|
+
|
|
123
|
+
let mut result = Vec::with_capacity(original_size);
|
|
124
|
+
for block in decompressed_blocks {
|
|
125
|
+
result.extend_from_slice(&block);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
Ok(result)
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
pub fn estimate_gain(&self, data: &[u8]) -> f64 {
|
|
132
|
+
let entropy = analyze_entropy(data);
|
|
133
|
+
let theoretical_min = (data.len() as f64) * (entropy as f64) / 8.0;
|
|
134
|
+
let ratio = theoretical_min / (data.len() as f64);
|
|
135
|
+
(1.0 - ratio) * 100.0
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
fn compress_block(block: &[u8]) -> Result<Vec<u8>> {
|
|
140
|
+
if block.is_empty() {
|
|
141
|
+
return Ok(vec![BLOCK_FLAG_STORE]);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
let entropy = analyze_entropy(block);
|
|
145
|
+
|
|
146
|
+
if entropy >= ENTROPY_THRESHOLD_STORE {
|
|
147
|
+
let mut result = Vec::with_capacity(1 + block.len());
|
|
148
|
+
result.push(BLOCK_FLAG_STORE);
|
|
149
|
+
result.extend_from_slice(block);
|
|
150
|
+
return Ok(result);
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
if entropy >= ENTROPY_THRESHOLD_ZSTD {
|
|
154
|
+
let compressed = zstd::encode_all(block, 1)?;
|
|
155
|
+
if compressed.len() < block.len() {
|
|
156
|
+
let mut result = Vec::with_capacity(1 + 4 + compressed.len());
|
|
157
|
+
result.push(BLOCK_FLAG_ZSTD);
|
|
158
|
+
result.extend_from_slice(&(block.len() as u32).to_le_bytes());
|
|
159
|
+
result.extend_from_slice(&compressed);
|
|
160
|
+
return Ok(result);
|
|
161
|
+
}
|
|
162
|
+
let mut result = Vec::with_capacity(1 + block.len());
|
|
163
|
+
result.push(BLOCK_FLAG_STORE);
|
|
164
|
+
result.extend_from_slice(block);
|
|
165
|
+
return Ok(result);
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
let bwt = bwt_encode(block)?;
|
|
169
|
+
let mtf_data = mtf_encode(&bwt.transformed);
|
|
170
|
+
let rle_data = rle0_encode(&mtf_data);
|
|
171
|
+
let stats = SymbolStats::from_data(&rle_data);
|
|
172
|
+
let encoded = rans_encode_block(&rle_data, &stats);
|
|
173
|
+
let stats_bytes = stats.serialize();
|
|
174
|
+
|
|
175
|
+
let bwt_total = 1 + 4 + 4 + 4 + stats_bytes.len() + encoded.len();
|
|
176
|
+
|
|
177
|
+
if bwt_total < block.len() {
|
|
178
|
+
let zstd_compressed = zstd::encode_all(block, 3)?;
|
|
179
|
+
let zstd_total = 1 + 4 + zstd_compressed.len();
|
|
180
|
+
|
|
181
|
+
if zstd_total < bwt_total {
|
|
182
|
+
let mut result = Vec::with_capacity(zstd_total);
|
|
183
|
+
result.push(BLOCK_FLAG_ZSTD);
|
|
184
|
+
result.extend_from_slice(&(block.len() as u32).to_le_bytes());
|
|
185
|
+
result.extend_from_slice(&zstd_compressed);
|
|
186
|
+
return Ok(result);
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
let mut result = Vec::with_capacity(bwt_total);
|
|
190
|
+
result.push(BLOCK_FLAG_BWT);
|
|
191
|
+
result.extend_from_slice(&bwt.primary_index.to_le_bytes());
|
|
192
|
+
result.extend_from_slice(&(block.len() as u32).to_le_bytes());
|
|
193
|
+
result.extend_from_slice(&(rle_data.len() as u32).to_le_bytes());
|
|
194
|
+
result.extend_from_slice(&stats_bytes);
|
|
195
|
+
result.extend_from_slice(&encoded);
|
|
196
|
+
return Ok(result);
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
let zstd_compressed = zstd::encode_all(block, 3)?;
|
|
200
|
+
if 1 + 4 + zstd_compressed.len() < block.len() {
|
|
201
|
+
let mut result = Vec::with_capacity(1 + 4 + zstd_compressed.len());
|
|
202
|
+
result.push(BLOCK_FLAG_ZSTD);
|
|
203
|
+
result.extend_from_slice(&(block.len() as u32).to_le_bytes());
|
|
204
|
+
result.extend_from_slice(&zstd_compressed);
|
|
205
|
+
return Ok(result);
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
let mut result = Vec::with_capacity(1 + block.len());
|
|
209
|
+
result.push(BLOCK_FLAG_STORE);
|
|
210
|
+
result.extend_from_slice(block);
|
|
211
|
+
Ok(result)
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
fn decompress_block_v2(block: &[u8]) -> Result<Vec<u8>> {
|
|
215
|
+
if block.is_empty() {
|
|
216
|
+
return Err(anyhow::anyhow!("Empty block"));
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
match block[0] {
|
|
220
|
+
BLOCK_FLAG_STORE => Ok(block[1..].to_vec()),
|
|
221
|
+
BLOCK_FLAG_ZSTD => {
|
|
222
|
+
if block.len() < 5 {
|
|
223
|
+
return Err(anyhow::anyhow!("Truncated zstd block"));
|
|
224
|
+
}
|
|
225
|
+
let orig_len = u32::from_le_bytes([block[1], block[2], block[3], block[4]]) as usize;
|
|
226
|
+
let mut decoded = zstd::decode_all(&block[5..])?;
|
|
227
|
+
decoded.truncate(orig_len);
|
|
228
|
+
Ok(decoded)
|
|
229
|
+
}
|
|
230
|
+
BLOCK_FLAG_BWT => {
|
|
231
|
+
if block.len() < 13 {
|
|
232
|
+
return Err(anyhow::anyhow!("Truncated BWT block"));
|
|
233
|
+
}
|
|
234
|
+
let primary_index = u32::from_le_bytes([block[1], block[2], block[3], block[4]]);
|
|
235
|
+
let orig_len = u32::from_le_bytes([block[5], block[6], block[7], block[8]]) as usize;
|
|
236
|
+
let rle_len = u32::from_le_bytes([block[9], block[10], block[11], block[12]]) as usize;
|
|
237
|
+
|
|
238
|
+
let (stats, stats_size) = SymbolStats::deserialize(&block[13..])?;
|
|
239
|
+
let encoded = &block[13 + stats_size..];
|
|
240
|
+
|
|
241
|
+
let rle_data = rans_decode_block(encoded, &stats, rle_len)?;
|
|
242
|
+
let mtf_data = rle0_decode(&rle_data);
|
|
243
|
+
let bwt_data = mtf_decode(&mtf_data);
|
|
244
|
+
let original = bwt_decode(&bwt_data, primary_index)?;
|
|
245
|
+
|
|
246
|
+
if original.len() != orig_len {
|
|
247
|
+
return Err(anyhow::anyhow!("Size mismatch"));
|
|
248
|
+
}
|
|
249
|
+
Ok(original)
|
|
250
|
+
}
|
|
251
|
+
_ => Err(anyhow::anyhow!("Unknown block type: {}", block[0])),
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
fn decompress_block_v1(block: &[u8]) -> Result<Vec<u8>> {
|
|
256
|
+
if block.len() < 12 {
|
|
257
|
+
return Err(anyhow::anyhow!("Block too small"));
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
let primary_index = u32::from_le_bytes([block[0], block[1], block[2], block[3]]);
|
|
261
|
+
let orig_len = u32::from_le_bytes([block[4], block[5], block[6], block[7]]) as usize;
|
|
262
|
+
let rle_len = u32::from_le_bytes([block[8], block[9], block[10], block[11]]) as usize;
|
|
263
|
+
|
|
264
|
+
let (stats, stats_size) = SymbolStats::deserialize(&block[12..])?;
|
|
265
|
+
let encoded = &block[12 + stats_size..];
|
|
266
|
+
|
|
267
|
+
let rle_data = rans_decode_block(encoded, &stats, rle_len)?;
|
|
268
|
+
let mtf_data = rle0_decode(&rle_data);
|
|
269
|
+
let bwt_data = mtf_decode(&mtf_data);
|
|
270
|
+
let original = bwt_decode(&bwt_data, primary_index)?;
|
|
271
|
+
|
|
272
|
+
if original.len() != orig_len {
|
|
273
|
+
return Err(anyhow::anyhow!("Size mismatch"));
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
Ok(original)
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
pub fn compress_high_performance(data: &[u8]) -> Result<(Vec<u8>, CompressionStats)> {
|
|
280
|
+
let compressor = HybridCompressor::new(false, 0);
|
|
281
|
+
compressor.compress(data)
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
pub fn decompress_high_performance(data: &[u8]) -> Result<Vec<u8>> {
|
|
285
|
+
let compressor = HybridCompressor::new(false, 0);
|
|
286
|
+
compressor.decompress(data)
|
|
287
|
+
}
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
use image::{ImageFormat, DynamicImage, ImageReader};
|
|
2
|
+
use std::io::Cursor;
|
|
3
|
+
|
|
4
|
+
fn load_no_limits(input: &[u8]) -> Result<DynamicImage, String> {
|
|
5
|
+
let mut reader = ImageReader::new(Cursor::new(input))
|
|
6
|
+
.with_guessed_format()
|
|
7
|
+
.map_err(|e| format!("Failed to guess format: {}", e))?;
|
|
8
|
+
reader.no_limits();
|
|
9
|
+
reader.decode().map_err(|e| format!("Failed to load image: {}", e))
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
pub fn sharp_resize(
|
|
13
|
+
input: &[u8],
|
|
14
|
+
width: u32,
|
|
15
|
+
height: u32,
|
|
16
|
+
kernel: &str,
|
|
17
|
+
) -> Result<Vec<u8>, String> {
|
|
18
|
+
let img = load_no_limits(input)?;
|
|
19
|
+
|
|
20
|
+
let filter = match kernel {
|
|
21
|
+
"nearest" => image::imageops::FilterType::Nearest,
|
|
22
|
+
"bilinear" | "linear" => image::imageops::FilterType::Triangle,
|
|
23
|
+
"cubic" | "bicubic" => image::imageops::FilterType::CatmullRom,
|
|
24
|
+
"lanczos" | "lanczos3" => image::imageops::FilterType::Lanczos3,
|
|
25
|
+
_ => image::imageops::FilterType::Nearest,
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
let resized = img.resize_exact(width, height, filter);
|
|
29
|
+
|
|
30
|
+
let mut output = Vec::new();
|
|
31
|
+
resized.write_to(&mut Cursor::new(&mut output), ImageFormat::Png)
|
|
32
|
+
.map_err(|e| format!("Failed to encode PNG: {}", e))?;
|
|
33
|
+
|
|
34
|
+
Ok(output)
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
pub fn sharp_raw_pixels(input: &[u8]) -> Result<(Vec<u8>, u32, u32), String> {
|
|
38
|
+
let img = load_no_limits(input)?;
|
|
39
|
+
|
|
40
|
+
let rgb = img.to_rgb8();
|
|
41
|
+
let width = rgb.width();
|
|
42
|
+
let height = rgb.height();
|
|
43
|
+
let raw = rgb.into_raw();
|
|
44
|
+
|
|
45
|
+
Ok((raw, width, height))
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
pub fn sharp_metadata(input: &[u8]) -> Result<(u32, u32, String), String> {
|
|
49
|
+
let img = load_no_limits(input)?;
|
|
50
|
+
|
|
51
|
+
let width = img.width();
|
|
52
|
+
let height = img.height();
|
|
53
|
+
let format = match img {
|
|
54
|
+
DynamicImage::ImageLuma8(_) => "gray",
|
|
55
|
+
DynamicImage::ImageRgb8(_) => "rgb",
|
|
56
|
+
DynamicImage::ImageRgba8(_) => "rgba",
|
|
57
|
+
_ => "unknown",
|
|
58
|
+
};
|
|
59
|
+
|
|
60
|
+
Ok((width, height, format.to_string()))
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
pub fn rgb_to_png(rgb: &[u8], width: u32, height: u32) -> Result<Vec<u8>, String> {
|
|
64
|
+
use image::codecs::png::{PngEncoder, CompressionType, FilterType};
|
|
65
|
+
use image::ImageEncoder;
|
|
66
|
+
|
|
67
|
+
let mut output = Vec::new();
|
|
68
|
+
// Data is already zstd-compressed, so PNG deflate adds overhead.
|
|
69
|
+
// Use Uncompressed (stored blocks) to avoid wasting CPU on incompressible data.
|
|
70
|
+
let encoder = PngEncoder::new_with_quality(
|
|
71
|
+
&mut output,
|
|
72
|
+
CompressionType::Uncompressed,
|
|
73
|
+
FilterType::NoFilter,
|
|
74
|
+
);
|
|
75
|
+
encoder.write_image(rgb, width, height, image::ExtendedColorType::Rgb8)
|
|
76
|
+
.map_err(|e| format!("Failed to encode PNG: {}", e))?;
|
|
77
|
+
Ok(output)
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
pub fn png_to_rgb(png: &[u8]) -> Result<(Vec<u8>, u32, u32), String> {
|
|
81
|
+
sharp_raw_pixels(png)
|
|
82
|
+
}
|