qlzruby 0.1.1 → 0.1.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (6) hide show
  1. data/README +50 -0
  2. data/ext/extconf.rb +16 -0
  3. data/ext/qlzruby.c +16 -118
  4. metadata +24 -11
  5. data/README.txt +0 -76
  6. data/ext/quicklz/quicklz.c +0 -948
data/README ADDED
@@ -0,0 +1,50 @@
1
+ = QuickLZ/Ruby
2
+
3
+ Copyright (c) 2008 SUGAWARA Genki <sgwr_dts@yahoo.co.jp>
4
+
5
+ == Description
6
+
7
+ Ruby bindings for QuickLZ.
8
+
9
+ QuickLZ is a data compression library which gives fast compression.
10
+
11
+ == Source Code
12
+
13
+ https://bitbucket.org/winebarrel/quick-ruby
14
+
15
+ == Install
16
+
17
+ gem install qlzruby
18
+
19
+ == Example
20
+
21
+ require 'qlzruby'
22
+ require 'open-uri'
23
+ require 'stringio'
24
+
25
+ source = <<-EOS
26
+ London Bridge Is falling down,
27
+ Falling down, Falling down.
28
+ London Bridge Is falling down,
29
+ My fair lady.
30
+ EOS
31
+
32
+ comp_data = QuickLZ.compress(source)
33
+ decomp_data = QuickLZ.decompress(comp_data)
34
+
35
+ puts <<-EOS
36
+ - block compress -
37
+ uncompress size: #{source.length}
38
+ compress size: #{comp_data.length}
39
+ decompress size: #{decomp_data.length}
40
+ decompress success?: #{source == decomp_data}
41
+ EOS
42
+
43
+ === QuickLZ
44
+
45
+ QuickLZ/Ruby contains QuickLZ.
46
+
47
+ QuickLZ is a data compression library which gives fast compression.
48
+
49
+ * http://www.quicklz.com/
50
+ * Copyright 2006-2008 Lasse Reinhold
data/ext/extconf.rb CHANGED
@@ -1,2 +1,18 @@
1
1
  require 'mkmf'
2
+ require 'open-uri'
3
+
4
+ def download(url)
5
+ dir = File.expand_path(File.dirname(__FILE__))
6
+ dst = File.join(dir, File.basename(url))
7
+
8
+ open(url) do |src|
9
+ open(dst, 'w') do |f|
10
+ f << src.read
11
+ end
12
+ end
13
+ end
14
+
15
+ download 'http://www.quicklz.com/quicklz.h'
16
+ download 'http://www.quicklz.com/quicklz.c'
17
+
2
18
  create_makefile('qlzruby')
data/ext/qlzruby.c CHANGED
@@ -6,7 +6,7 @@ __declspec(dllexport) void Init_qlzruby(void);
6
6
  #endif
7
7
 
8
8
  #include <string.h>
9
- #include "quicklz/quicklz.c"
9
+ #include "quicklz.h"
10
10
  #include "ruby.h"
11
11
 
12
12
  #ifndef RSTRING_PTR
@@ -26,155 +26,53 @@ __declspec(dllexport) void Init_qlzruby(void);
26
26
  } \
27
27
  } while(0)
28
28
 
29
- #define VERSION "0.1.1"
29
+ #define VERSION "0.1.2"
30
30
  #define DEFAULT_BLOCKSIZE 5120
31
31
  #define HEADER_LEN 9
32
32
 
33
33
  /* */
34
- static VALUE qlzruby_block_compress(VALUE self, VALUE v_src) {
34
+ static VALUE qlzruby_compress(VALUE self, VALUE v_src) {
35
35
  VALUE v_dst;
36
- char *src, *dst, *scratch;
36
+ char *src, *dst;
37
+ qlz_state_compress *state_compress;
37
38
  size_t len;
38
39
 
39
40
  Check_Type(v_src, T_STRING);
40
41
  src = RSTRING_PTR(v_src);
41
42
  len = RSTRING_LEN(v_src);
43
+ state_compress = (qlz_state_compress *) xmalloc(sizeof(qlz_state_compress));
42
44
  dst = xmalloc(len + 400);
43
- scratch = xmalloc(SCRATCH_COMPRESS);
44
- memset(scratch, 0, SCRATCH_COMPRESS);
45
- len = qlz_compress(src, dst, len, scratch);
45
+ len = qlz_compress(src, dst, len, state_compress);
46
46
  v_dst = rb_str_new(dst, len);
47
- xfree(scratch);
47
+ xfree(state_compress);
48
48
  xfree(dst);
49
49
 
50
50
  return v_dst;
51
51
  }
52
52
 
53
53
  /* */
54
- static VALUE qlzruby_block_decompress(VALUE self, VALUE v_src) {
54
+ static VALUE qlzruby_decompress(VALUE self, VALUE v_src) {
55
55
  VALUE v_dst;
56
- char *src, *dst, *scratch;
56
+ char *src, *dst;
57
+ qlz_state_decompress *state_decompress;
57
58
  size_t len;
58
59
 
59
60
  Check_Type(v_src, T_STRING);
60
61
  src = RSTRING_PTR(v_src);
61
62
  len = qlz_size_decompressed(src);
62
63
  dst = xmalloc(len);
63
- scratch = xmalloc(SCRATCH_DECOMPRESS);
64
- memset(scratch, 0, SCRATCH_DECOMPRESS);
65
- len = qlz_decompress(src, dst, scratch);
64
+ state_decompress = (qlz_state_decompress *) xmalloc(sizeof(qlz_state_decompress));
65
+ len = qlz_decompress(src, dst, state_decompress);
66
66
  v_dst = rb_str_new(dst, len);
67
- xfree(scratch);
67
+ xfree(state_decompress);
68
68
  xfree(dst);
69
69
 
70
70
  return v_dst;
71
71
  }
72
72
 
73
- /* */
74
- static VALUE qlzruby_stream_compress(int argc, const VALUE *argv, VALUE self) {
75
- VALUE in, out, blocksize, v_src, v_dst;
76
- char *src, *dst, *scratch;
77
- size_t len, i_blocksize;
78
-
79
- rb_scan_args(argc, argv, "21", &in, &out, &blocksize);
80
- Check_IO(in);
81
- Check_IO(out);
82
-
83
- if (NIL_P(blocksize)) {
84
- blocksize = INT2FIX(DEFAULT_BLOCKSIZE);
85
- i_blocksize = DEFAULT_BLOCKSIZE;
86
- } else {
87
- i_blocksize = NUM2LONG(blocksize);
88
-
89
- if (i_blocksize < 1) {
90
- blocksize = INT2FIX(DEFAULT_BLOCKSIZE);
91
- i_blocksize = DEFAULT_BLOCKSIZE;
92
- }
93
- }
94
-
95
- dst = alloca(i_blocksize + 400);
96
- scratch = alloca(SCRATCH_COMPRESS);
97
- memset(scratch, 0, SCRATCH_COMPRESS);
98
-
99
- while (1) {
100
- v_src = rb_funcall(in, rb_intern("read"), 1, blocksize);
101
-
102
- if (NIL_P(v_src)) { break; }
103
-
104
- src = RSTRING_PTR(v_src);
105
- len = RSTRING_LEN(v_src);
106
-
107
- if (len < 1) { break; }
108
-
109
- len = qlz_compress(src, dst, len, scratch);
110
- v_dst = rb_str_new(dst, len);
111
- rb_funcall(out, rb_intern("write"), 1, v_dst);
112
- }
113
-
114
- return Qnil;
115
- }
116
-
117
- static size_t qlzruby_stream_decompress_block(VALUE in, VALUE out, char *scratch) {
118
- VALUE v_header, v_src, v_dst;
119
- char *header, *src, *dst;
120
- size_t header_len, len;
121
-
122
- v_header = rb_funcall(in, rb_intern("read"), 1, INT2FIX(HEADER_LEN));
123
-
124
- if (NIL_P(v_header)) { return 0; }
125
-
126
- header = RSTRING_PTR(v_header);
127
- header_len = RSTRING_LEN(v_header);
128
-
129
- if (header_len < HEADER_LEN) { return 0; }
130
-
131
- len = qlz_size_compressed(header);
132
- v_src = rb_funcall(in, rb_intern("read"), 1, INT2FIX(len - HEADER_LEN));
133
- len = RSTRING_LEN(v_src);
134
-
135
- if (len < 1) { return 0; }
136
-
137
- len += HEADER_LEN;
138
- src = alloca(len);
139
- memcpy(src, header, HEADER_LEN);
140
- memcpy(src + HEADER_LEN, RSTRING_PTR(v_src), len - HEADER_LEN);
141
-
142
- len = qlz_size_decompressed(src);
143
- dst = alloca(len);
144
- len = qlz_decompress(src, dst, scratch);
145
- v_dst = rb_str_new(dst, len);
146
- rb_funcall(out, rb_intern("write"), 1, v_dst);
147
-
148
- return len;
149
- }
150
-
151
- /* */
152
- static VALUE qlzruby_stream_decompress(VALUE self, VALUE in, VALUE out) {
153
- char *scratch;
154
-
155
- Check_IO(in);
156
- Check_IO(out);
157
-
158
- scratch = alloca(SCRATCH_DECOMPRESS);
159
- memset(scratch, 0, SCRATCH_DECOMPRESS);
160
-
161
- while (1) {
162
- int len = qlzruby_stream_decompress_block(in, out, scratch);
163
-
164
- if (len < 1) {
165
- break;
166
- }
167
- }
168
-
169
- return Qnil;
170
-
171
- }
172
-
173
73
  void Init_qlzruby() {
174
74
  VALUE QuickLZ = rb_define_module("QuickLZ");
175
75
  rb_define_const(QuickLZ, "VERSION", rb_str_new2(VERSION));
176
- rb_define_module_function(QuickLZ, "block_compress", qlzruby_block_compress, 1);
177
- rb_define_module_function(QuickLZ, "block_decompress", qlzruby_block_decompress, 1);
178
- rb_define_module_function(QuickLZ, "stream_compress", qlzruby_stream_compress, -1);
179
- rb_define_module_function(QuickLZ, "stream_decompress", qlzruby_stream_decompress, 2);
76
+ rb_define_module_function(QuickLZ, "compress", qlzruby_compress, 1);
77
+ rb_define_module_function(QuickLZ, "decompress", qlzruby_decompress, 1);
180
78
  }
metadata CHANGED
@@ -1,7 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: qlzruby
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.1
4
+ hash: 31
5
+ prerelease: false
6
+ segments:
7
+ - 0
8
+ - 1
9
+ - 2
10
+ version: 0.1.2
5
11
  platform: ruby
6
12
  authors:
7
13
  - winebarrel
@@ -9,7 +15,7 @@ autorequire:
9
15
  bindir: bin
10
16
  cert_chain: []
11
17
 
12
- date: 2008-08-08 00:00:00 +09:00
18
+ date: 2011-01-13 00:00:00 +09:00
13
19
  default_executable:
14
20
  dependencies: []
15
21
 
@@ -20,17 +26,18 @@ executables: []
20
26
  extensions:
21
27
  - ext/extconf.rb
22
28
  extra_rdoc_files:
23
- - README.txt
29
+ - README
24
30
  - ext/qlzruby.c
25
31
  - LICENSE.txt
26
32
  files:
27
33
  - ext/qlzruby.c
28
- - ext/quicklz/quicklz.c
29
34
  - ext/extconf.rb
30
- - README.txt
35
+ - README
31
36
  - LICENSE.txt
32
37
  has_rdoc: true
33
- homepage: http://qlzruby.rubyforge.org
38
+ homepage: https://bitbucket.org/winebarrel/quick-ruby
39
+ licenses: []
40
+
34
41
  post_install_message:
35
42
  rdoc_options:
36
43
  - --title
@@ -38,23 +45,29 @@ rdoc_options:
38
45
  require_paths:
39
46
  - lib
40
47
  required_ruby_version: !ruby/object:Gem::Requirement
48
+ none: false
41
49
  requirements:
42
50
  - - ">="
43
51
  - !ruby/object:Gem::Version
52
+ hash: 3
53
+ segments:
54
+ - 0
44
55
  version: "0"
45
- version:
46
56
  required_rubygems_version: !ruby/object:Gem::Requirement
57
+ none: false
47
58
  requirements:
48
59
  - - ">="
49
60
  - !ruby/object:Gem::Version
61
+ hash: 3
62
+ segments:
63
+ - 0
50
64
  version: "0"
51
- version:
52
65
  requirements: []
53
66
 
54
- rubyforge_project: qlzruby
55
- rubygems_version: 1.1.1
67
+ rubyforge_project:
68
+ rubygems_version: 1.3.7
56
69
  signing_key:
57
- specification_version: 2
70
+ specification_version: 3
58
71
  summary: Ruby bindings for QuickLZ.
59
72
  test_files: []
60
73
 
data/README.txt DELETED
@@ -1,76 +0,0 @@
1
- = QuickLZ/Ruby
2
-
3
- Copyright (c) 2008 SUGAWARA Genki <sgwr_dts@yahoo.co.jp>
4
-
5
- == Description
6
-
7
- Ruby bindings for QuickLZ.
8
-
9
- QuickLZ is a data compression library which gives fast compression.
10
-
11
- == Project Page
12
-
13
- http://rubyforge.org/projects/qlzruby
14
-
15
- == Install
16
-
17
- gem install qlzruby
18
-
19
- == Download
20
-
21
- http://rubyforge.org/frs/?group_id=6766
22
-
23
- == Example
24
-
25
- require 'qlzruby'
26
- require 'open-uri'
27
- require 'stringio'
28
-
29
- # block compress
30
- source = <<-EOS
31
- London Bridge Is falling down,
32
- Falling down, Falling down.
33
- London Bridge Is falling down,
34
- My fair lady.
35
- EOS
36
-
37
- comp_data = QuickLZ.block_compress(source)
38
- decomp_data = QuickLZ.block_decompress(comp_data)
39
-
40
- puts <<-EOS
41
- - block compress -
42
- uncompress size: #{source.length}
43
- compress size: #{comp_data.length}
44
- decompress size: #{decomp_data.length}
45
- decompress success?: #{source == decomp_data}
46
-
47
- EOS
48
-
49
- # stream compress
50
- source = open('http://www.ruby-lang.org/') {|f| f.read }
51
- source = StringIO.new(source)
52
- comp_data = StringIO.new
53
-
54
- QuickLZ.stream_compress(source, comp_data)
55
-
56
- comp_data.seek(0)
57
- decomp_data = StringIO.new
58
-
59
- QuickLZ.stream_decompress(comp_data, decomp_data)
60
-
61
- puts <<-EOS
62
- - stream compress -
63
- uncompress size: #{source.length}
64
- compress size: #{comp_data.length}
65
- decompress size: #{decomp_data.length}
66
- decompress success?: #{source.string == decomp_data.string}
67
- EOS
68
-
69
- === QuickLZ
70
-
71
- QuickLZ/Ruby contains QuickLZ.
72
-
73
- QuickLZ is a data compression library which gives fast compression.
74
-
75
- * http://www.quicklz.com/
76
- * Copyright 2006-2008 Lasse Reinhold
@@ -1,948 +0,0 @@
1
- // QuickLZ data compression library
2
- // Copyright (C) 2006-2007 Lasse Mikkel Reinhold
3
- // lar@quicklz.com
4
- //
5
- // QuickLZ can be used for free under the GPL-1 or GPL-2 license (where anything
6
- // released into public must be open source) or under a commercial license if such
7
- // has been acquired (see http://www.quicklz.com/order.html). The commercial license
8
- // does not cover derived or ported versions created by third parties under GPL.
9
-
10
- // Version 1.31 final
11
- #define QLZ_VERSION_MAJOR 1
12
- #define QLZ_VERSION_MINOR 3
13
- #define QLZ_VERSION_REVISION 1
14
-
15
- // Set following flags according to the manual
16
- #define COMPRESSION_LEVEL 0
17
- #define STREAMING_MODE 960000
18
- #define test_rle
19
- #define speedup_incompressible
20
- //#define memory_safe
21
-
22
- // Public functions of QuickLZ
23
- size_t qlz_decompress(const char *source, void *destination, char *scratch);
24
- size_t qlz_compress(const void *source, char *destination, size_t size, char *scratch);
25
- size_t qlz_size_decompressed(const char *source);
26
- size_t qlz_size_compressed(const char *source);
27
- int qlz_get_setting(int setting);
28
-
29
- #include <string.h>
30
-
31
- #if (defined(__X86__) || defined(__i386__) || defined(i386) || defined(_M_IX86) || defined(__386__) || defined(__x86_64__) || defined(_M_X64))
32
- #define X86X64
33
- #endif
34
-
35
- // Compute SCRATCH_COMPRESS, SCRATCH_DECOMPRESS and constants used internally
36
- #if COMPRESSION_LEVEL == 0 && defined(memory_safe)
37
- #error memory_safe flag cannot be used with COMPRESSION_LEVEL 0
38
- #endif
39
-
40
- #define HASH_ENTRIES 4096
41
-
42
- #if (COMPRESSION_LEVEL == 0 || COMPRESSION_LEVEL == 1 || COMPRESSION_LEVEL == 2)
43
- #define AND 1
44
- #elif (COMPRESSION_LEVEL == 3)
45
- #define AND 0x7
46
- #else
47
- #error COMPRESSION_LEVEL must be 0, 1, 2 or 3
48
- #endif
49
-
50
- #define HASH_SIZE (AND + 1)*HASH_ENTRIES*sizeof(unsigned char *)
51
-
52
- #ifdef STREAMING_MODE
53
- #define STREAMING_MODE_VALUE STREAMING_MODE
54
- #else
55
- #define STREAMING_MODE_VALUE 0
56
- #endif
57
-
58
- #define STREAMING_MODE_ROUNDED ((STREAMING_MODE_VALUE >> 3) << 3)
59
-
60
- #if (COMPRESSION_LEVEL > 1)
61
- #define SCRATCH_COMPRESS HASH_SIZE + STREAMING_MODE_VALUE + 16 + HASH_ENTRIES
62
- #else
63
- #define SCRATCH_COMPRESS HASH_SIZE + STREAMING_MODE_VALUE + 16
64
- #endif
65
-
66
- #if (COMPRESSION_LEVEL == 0)
67
- #define SCRATCH_DECOMPRESS HASH_ENTRIES*sizeof(unsigned char *) + 16 + STREAMING_MODE_VALUE
68
- #else
69
- #define SCRATCH_DECOMPRESS 16 + STREAMING_MODE_VALUE
70
- #endif
71
-
72
- int qlz_get_setting(int setting)
73
- {
74
- switch (setting)
75
- {
76
- case 0: return COMPRESSION_LEVEL;
77
- case 1: return SCRATCH_COMPRESS;
78
- case 2: return SCRATCH_DECOMPRESS;
79
- case 3: return STREAMING_MODE_VALUE;
80
- #ifdef test_rle
81
- case 4: return 1;
82
- #else
83
- case 4: return 0;
84
- #endif
85
- #ifdef speedup_incompressible
86
- case 5: return 1;
87
- #else
88
- case 5: return 0;
89
- #endif
90
- #ifdef memory_safe
91
- case 6: return 1;
92
- #else
93
- case 6: return 0;
94
- #endif
95
- case 7: return QLZ_VERSION_MAJOR;
96
- case 8: return QLZ_VERSION_MINOR;
97
- case 9: return QLZ_VERSION_REVISION;
98
- }
99
- return -1;
100
- }
101
-
102
- __inline unsigned int hash_func(unsigned int i)
103
- {
104
- return ((i >> 12) ^ i) & 0x0fff;
105
- }
106
-
107
- __inline unsigned int fast_read(void const *src, unsigned int bytes)
108
- {
109
- #ifndef X86X64
110
- unsigned char *p = (unsigned char*)src;
111
- switch (bytes)
112
- {
113
- case 4:
114
- return(*p | *(p + 1) << 8 | *(p + 2) << 16 | *(p + 3) << 24);
115
- case 3:
116
- return(*p | *(p + 1) << 8 | *(p + 2) << 16);
117
- case 2:
118
- return(*p | *(p + 1) << 8);
119
- case 1:
120
- return(*p);
121
- }
122
- return 0;
123
- #else
124
- if (bytes >= 1 && bytes <= 4)
125
- return *((unsigned int*)src);
126
- else
127
- return 0;
128
- #endif
129
- }
130
-
131
- __inline void fast_write(unsigned int f, void *dst, unsigned int bytes)
132
- {
133
- #ifndef X86X64
134
- unsigned char *p = (unsigned char*)dst;
135
-
136
- switch (bytes)
137
- {
138
- case 4:
139
- *p = (unsigned char)f;
140
- *(p + 1) = (unsigned char)(f >> 8);
141
- *(p + 2) = (unsigned char)(f >> 16);
142
- *(p + 3) = (unsigned char)(f >> 24);
143
- return;
144
- case 3:
145
- *p = (unsigned char)f;
146
- *(p + 1) = (unsigned char)(f >> 8);
147
- *(p + 2) = (unsigned char)(f >> 16);
148
- return;
149
- case 2:
150
- *p = (unsigned char)f;
151
- *(p + 1) = (unsigned char)(f >> 8);
152
- return;
153
- case 1:
154
- *p = (unsigned char)f;
155
- return;
156
- }
157
- #else
158
- switch (bytes)
159
- {
160
- case 4:
161
- *((unsigned int*)dst) = f;
162
- return;
163
- case 3:
164
- *((unsigned int*)dst) = f;
165
- return;
166
- case 2:
167
- #if COMPRESSION_LEVEL == 0
168
- // 2 byte writes are common in level 0
169
- *((unsigned short int*)dst) = (unsigned short int)f;
170
- #else
171
- *((unsigned int*)dst) = f;
172
- #endif
173
- return;
174
- case 1:
175
- *((unsigned char*)dst) = (unsigned char)f;
176
- return;
177
- }
178
- #endif
179
- }
180
-
181
- __inline void memcpy_up(unsigned char *dst, const unsigned char *src, unsigned int n)
182
- {
183
- // cannot be replaced by overlap handling of memmove() due to LZSS algorithm
184
- #ifndef X86X64
185
-
186
- if(n > 8 && src + n < dst)
187
- memcpy(dst, src, n);
188
- else
189
- {
190
- unsigned char *end = dst + n;
191
- while(dst < end)
192
- {
193
- *dst = *src;
194
- dst++;
195
- src++;
196
- }
197
- }
198
- #else
199
- if (n < 5)
200
- *((unsigned int*)dst) = *((unsigned int*)src);
201
- else
202
- {
203
- unsigned char *end = dst + n;
204
- while(dst < end)
205
- {
206
- *((unsigned int*)dst) = *((unsigned int*)src);
207
- dst = dst + 4;
208
- src = src + 4;
209
- }
210
- }
211
- #endif
212
- }
213
-
214
- __inline unsigned int fast_read_safe(void const *src, unsigned int bytes, const unsigned char *invalid)
215
- {
216
- #ifdef memory_safe
217
- if ((const unsigned char *)src + 4 > (const unsigned char *)invalid)
218
- return 0xffffffff;
219
- #endif
220
- invalid = invalid;
221
- return fast_read(src, bytes);
222
- }
223
-
224
- unsigned int qlz_compress_core(const void *source, unsigned char *destination, unsigned int size, const unsigned char *hashtable[][AND + 1], const unsigned char *first_valid, unsigned char *hash_counter)
225
- {
226
- const unsigned char *source_c = (const unsigned char*)source;
227
- unsigned char *destination_c = (unsigned char*)destination;
228
- const unsigned char *last_byte = source_c + size - 1;
229
- const unsigned char *src = source_c;
230
- unsigned char *cword_ptr = destination_c;
231
- unsigned char *dst = destination_c + 4;
232
- unsigned int cword_val = 1U << 31;
233
- const unsigned char *guarantee_uncompressed = last_byte - 8;
234
-
235
- #ifdef speedup_incompressible
236
- unsigned char *prev_dst = dst;
237
- const unsigned char *prev_src = src;
238
- #endif
239
-
240
- hash_counter = hash_counter;
241
- first_valid = first_valid;
242
-
243
- // save first 4 bytes uncompressed
244
- while(src < source_c + 4 && src < guarantee_uncompressed)
245
- {
246
- cword_val = (cword_val >> 1);
247
- *dst = *src;
248
- dst++;
249
- src++;
250
- }
251
-
252
- while(src < guarantee_uncompressed)
253
- {
254
- unsigned int fetch;
255
- if ((cword_val & 1) == 1)
256
- {
257
- // check if destinationc pointer could exceed destination buffer
258
- if (dst > destination_c + size)
259
- return 0;
260
-
261
- // store control word
262
- fast_write((cword_val >> 1) | (1U << 31), cword_ptr, 4);
263
- cword_ptr = dst;
264
- dst += 4;
265
- cword_val = 1U << 31;
266
-
267
- #ifdef speedup_incompressible
268
- // check if source chunk is compressible
269
- if (dst - prev_dst > src - prev_src && src > source_c + 1000)
270
- {
271
- int q;
272
- for(q = 0; q < 30 && src + 31 < guarantee_uncompressed && dst + 35 < destination_c + size; q++)
273
- {
274
-
275
- #if(COMPRESSION_LEVEL == 0)
276
- int w;
277
- for(w = 0; w < 31; w++)
278
- {
279
- fetch = fast_read(src + w, 4);
280
- *(unsigned int*)&hashtable[hash_func(fetch)][0] = fast_read(src + w, 4);
281
- hashtable[hash_func(fetch)][1] = src + w;
282
- }
283
- #endif
284
- fast_write((1U << 31), dst - 4, 4);
285
- memcpy(dst, src, 31);
286
-
287
- dst += 4*8 - 1 + 4;
288
- src += 4*8 - 1;
289
- prev_src = src;
290
- prev_dst = dst;
291
- cword_ptr = dst - 4;
292
- }
293
- }
294
- #endif
295
- }
296
- #ifdef test_rle
297
- // check for rle sequence
298
- if (fast_read(src, 4) == fast_read(src + 1, 4))
299
- {
300
- const unsigned char *orig_src;
301
- fetch = fast_read(src, 4);
302
- orig_src = src;
303
- do src = src + 4; while (src <= guarantee_uncompressed - 4 && fetch == fast_read(src, 4));
304
- if((src - orig_src) <= 2047)
305
- {
306
- fast_write(((fetch & 0xff) << 16) | (unsigned int)((src - orig_src) << 4) | 15, dst, 4);
307
- dst = dst + 3;
308
- }
309
- else
310
- {
311
- fast_write(((fetch & 0xff) << 16) | 15, dst, 4);
312
- fast_write((unsigned int)(src - orig_src), dst + 3, 4);
313
- dst = dst + 7;
314
- }
315
- cword_val = (cword_val >> 1) | (1 << 31);
316
- }
317
- else
318
- #endif
319
- {
320
- const unsigned char *o;
321
- unsigned int hash, matchlen;
322
-
323
- #if(COMPRESSION_LEVEL < 2)
324
- unsigned int cached;
325
-
326
- fetch = fast_read(src, 4);
327
- hash = hash_func(fetch);
328
-
329
- cached = fetch ^ *(unsigned int*)&hashtable[hash][0];
330
- *(unsigned int*)&hashtable[hash][0] = fetch;
331
-
332
- o = hashtable[hash][1];
333
- hashtable[hash][1] = src;
334
-
335
- #else
336
- unsigned char c;
337
- unsigned int k, m;
338
- const unsigned char *offset2 = 0;
339
-
340
- fetch = fast_read(src, 4);
341
- hash = hash_func(fetch);
342
-
343
- matchlen = 0;
344
- c = hash_counter[hash];
345
- for(k = 0; k < AND + 1; k++)
346
- {
347
- o = hashtable[hash][(c - k) & AND];
348
- if(o > first_valid && o < src - 3 && *(src + matchlen) == *(o + matchlen) && (fast_read(o, 3) & 0xffffff) == (fetch & 0xffffff) && src - o < 131071)
349
- {
350
- size_t remaining;
351
- remaining = guarantee_uncompressed - src;
352
- m = 3;
353
- if (fast_read(o, 4) == fetch)
354
- {
355
- while(*(o + m) == *(src + m) && m < remaining)
356
- m++;
357
- }
358
- if (m > matchlen)
359
- {
360
- matchlen = m;
361
- offset2 = o;
362
- }
363
- }
364
- }
365
- o = offset2;
366
- c = (hash_counter[hash] + 1) & AND;
367
- hash_counter[hash] = c;
368
- hashtable[hash][c] = src;
369
- #endif
370
-
371
- #if(COMPRESSION_LEVEL == 0)
372
- if (o != 0 && (cached & 0xffffff) == 0 && src - o > 3)
373
- #elif(COMPRESSION_LEVEL == 1)
374
- if ((cached & 0xffffff) == 0 && o > first_valid && o < src - 3 && ((fast_read(o, 3) ^ fast_read(src, 3)) & 0xffffff) == 0 && src - o < 131071)
375
- #elif(COMPRESSION_LEVEL > 1)
376
- if(matchlen == 3)
377
- #endif
378
- {
379
- unsigned int offset;
380
- offset = (unsigned int)(src - o);
381
-
382
- #if(COMPRESSION_LEVEL < 2)
383
- if (cached & 0xffffffff)
384
- #endif
385
- {
386
- #if (COMPRESSION_LEVEL > 2)
387
- unsigned int u;
388
- for(u = 1; u < 3; u++)
389
- {
390
- hash = hash_func(fast_read(src + u, 4));
391
- c = (hash_counter[hash] + 1) & AND;
392
- hash_counter[hash] = c;
393
- hashtable[hash][c] = src + u;
394
- }
395
- #endif
396
-
397
- #if (COMPRESSION_LEVEL == 0)
398
- cword_val = (cword_val >> 1) | (1U << 31);
399
- fast_write(3 | (hash << 4), dst, 2);
400
- src += 3;
401
- dst += 2;
402
- #else
403
-
404
- if(offset <= 63)
405
- {
406
- // encode lz match
407
- *dst = (unsigned char)(offset << 2);
408
- cword_val = (cword_val >> 1) | (1U << 31);
409
- src += 3;
410
- dst++;
411
- }
412
- else if (offset <= 16383)
413
- {
414
- // encode lz match
415
- unsigned int f = (offset << 2) | 1;
416
- fast_write(f, dst, 2);
417
- cword_val = (cword_val >> 1) | (1U << 31);
418
- src += 3;
419
- dst += 2;
420
- }
421
- else
422
- {
423
- // encode literal
424
- *dst = *src;
425
- src++;
426
- dst++;
427
- cword_val = (cword_val >> 1);
428
- }
429
- #endif
430
- }
431
- #if(COMPRESSION_LEVEL > 1)
432
- }
433
- else if(matchlen > 3)
434
- {
435
- #elif(COMPRESSION_LEVEL < 2)
436
- else
437
- #endif
438
- {
439
- // encode lz match
440
- unsigned int offset;
441
-
442
- #if(COMPRESSION_LEVEL < 2)
443
- const unsigned char *old_src = src;
444
- offset = (unsigned int)(src - o);
445
- cword_val = (cword_val >> 1) | (1U << 31);
446
-
447
- src += 3;
448
- while(*(o + (src - old_src)) == *src && src < guarantee_uncompressed)
449
- src++;
450
- matchlen = (unsigned int)(src - old_src);
451
- #else
452
- unsigned int u;
453
- offset = (unsigned int)(src - o);
454
- cword_val = (cword_val >> 1) | (1U << 31);
455
-
456
- #if (COMPRESSION_LEVEL > 2)
457
- for(u = 1; u < matchlen; u++)
458
- #else
459
- for(u = 1; u < matchlen && u < 5; u++)
460
- #endif
461
- {
462
- hash = hash_func(fast_read(src + u, 4));
463
- c = (hash_counter[hash] + 1) & AND;
464
- hash_counter[hash] = c;
465
- hashtable[hash][c] = src + u;
466
- }
467
- src += matchlen;
468
- #endif
469
-
470
- #if (COMPRESSION_LEVEL == 0)
471
- if (matchlen < 15)
472
- {
473
- fast_write(matchlen | (hash << 4), dst, 2);
474
- dst += 2;
475
- }
476
- else if (matchlen < 255)
477
- {
478
- fast_write(hash << 4, dst, 2);
479
- *(dst + 2) = (unsigned char)matchlen;
480
- dst += 3;
481
- }
482
- else
483
- {
484
- fast_write(hash << 4, dst, 2);
485
- *(dst + 2) = 0;
486
- fast_write(matchlen, dst + 3, 4);
487
- dst += 7;
488
- }
489
- #else
490
- if (matchlen <= 18 && offset <= 1023)
491
- {
492
- unsigned int f = ((matchlen - 3) << 2) | (offset << 6) | 2;
493
- fast_write(f, dst, 2);
494
- dst += 2;
495
- }
496
-
497
- else if(matchlen <= 34 && offset <= 65535)
498
- {
499
- unsigned int f = ((matchlen - 3) << 3) | (offset << 8) | 3;
500
- fast_write(f, dst, 3);
501
- dst += 3;
502
- }
503
- else if (matchlen >= 3)
504
- {
505
- if (matchlen <= 2050)
506
- {
507
- unsigned int f = ((matchlen - 3) << 4) | (offset << 15) | 7;
508
- fast_write(f, dst, 4);
509
- dst += 4;
510
- }
511
- else
512
- {
513
- fast_write(7, dst, 4);
514
- fast_write(matchlen, dst + 4, 4);
515
- fast_write(offset, dst + 8, 4);
516
- dst += 12;
517
- }
518
- }
519
- #endif
520
- }
521
- }
522
-
523
- else
524
- {
525
- // encode literal
526
- *dst = *src;
527
- src++;
528
- dst++;
529
- cword_val = (cword_val >> 1);
530
- }
531
- }
532
- }
533
-
534
- // save last source bytes as literals
535
- while (src <= last_byte)
536
- {
537
- if ((cword_val & 1) == 1)
538
- {
539
- fast_write((cword_val >> 1) | (1U << 31), cword_ptr, 4);
540
- cword_ptr = dst;
541
- dst += 4;
542
- cword_val = 1U << 31;
543
- }
544
-
545
- if (src < last_byte - 2 && src > source_c + 3)
546
- {
547
- hashtable[hash_func(fast_read(src, 4))][1] = src;
548
- *(unsigned int*)&hashtable[hash_func(fast_read(src, 4))][0] = fast_read(src, 4);
549
- }
550
- *dst = *src;
551
- src++;
552
- dst++;
553
-
554
- cword_val = (cword_val >> 1);
555
- }
556
-
557
- while((cword_val & 1) != 1)
558
- cword_val = (cword_val >> 1);
559
-
560
- fast_write((cword_val >> 1) | (1U << 31), cword_ptr, 4);
561
-
562
- // min. size must be 9 bytes so that the qlz_size functions can take 9 bytes as argument
563
- if (dst - destination_c < 9)
564
- return 9;
565
- else
566
- return (unsigned int)(dst - destination_c);
567
- }
568
-
569
- size_t qlz_decompress_core(const unsigned char *source, void *destination, size_t size, size_t source_size, unsigned char *first_valid, const unsigned char *hashtable[])
570
- {
571
- const unsigned char *source_c = (const unsigned char*)source;
572
- unsigned char *destination_c = (unsigned char*)destination;
573
- const unsigned char *src = source_c;
574
- unsigned char *dst = destination_c;
575
- const unsigned char* last_byte_successor = destination_c + size;
576
- unsigned int cword_val = 1;
577
- const unsigned int bitlut[16] = {4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0};
578
- const unsigned char *guaranteed_uncompressed = last_byte_successor - 4;
579
- unsigned char *last_hashed = destination_c + 3;
580
-
581
- first_valid = first_valid;
582
- last_hashed = last_hashed;
583
- hashtable = hashtable;
584
-
585
- // prevent spurious memory read on a source with size < 4
586
- if (dst >= guaranteed_uncompressed)
587
- {
588
- src += 4;
589
- while(dst < last_byte_successor)
590
- {
591
- *dst = *src;
592
- dst++;
593
- src++;
594
- }
595
-
596
- return (unsigned int)(dst - destination_c);
597
- }
598
-
599
-
600
- for(;;)
601
- {
602
- unsigned int fetch;
603
-
604
- if (cword_val == 1)
605
- {
606
- // fetch control word
607
- cword_val = fast_read_safe(src, 4, source_c + source_size) | (1U << 31);
608
- src += 4;
609
- }
610
-
611
- fetch = fast_read_safe(src, 4, source_c + source_size);
612
-
613
- // check if we must decode lz match
614
- if ((cword_val & 1) == 1)
615
- {
616
- unsigned int matchlen;
617
-
618
- #if(COMPRESSION_LEVEL == 0)
619
- unsigned int hash;
620
- const unsigned char *offset2;
621
-
622
- cword_val = cword_val >> 1;
623
-
624
- if((fetch & 0xf) != 15)
625
- {
626
- hash = (fetch >> 4) & 0xfff;
627
- offset2 = hashtable[hash];
628
-
629
- if((fetch & 0xf) != 0)
630
- {
631
- matchlen = (fetch & 0xf);
632
- src += 2;
633
- }
634
- else if((fetch & 0x00ff0000) != 0)
635
- {
636
- matchlen = *(src + 2);
637
- src += 3;
638
- }
639
- else
640
- {
641
- matchlen = fast_read(src + 3, 4);
642
- src += 7;
643
- }
644
- memcpy_up(dst, offset2, matchlen);
645
- while(last_hashed < dst)
646
- {
647
- last_hashed++;
648
- hashtable[hash_func(fast_read(last_hashed, 4))] = last_hashed;
649
- }
650
- dst += matchlen;
651
- last_hashed = dst - 1;
652
- }
653
-
654
- #else
655
- unsigned int offset;
656
- cword_val = cword_val >> 1;
657
-
658
- if ((fetch & 3) == 0)
659
- {
660
- offset = (fetch & 0xff) >> 2;
661
- #ifdef memory_safe
662
- if (3 > (unsigned int)(guaranteed_uncompressed - dst) || offset > (unsigned int)(dst - first_valid))
663
- return 0;
664
- #endif
665
- memcpy_up(dst, dst - offset, 3);
666
- dst += 3;
667
- src++;
668
- }
669
- else if ((fetch & 2) == 0)
670
- {
671
- offset = (fetch & 0xffff) >> 2;
672
- #ifdef memory_safe
673
- if (3 > (unsigned int)(guaranteed_uncompressed - dst) || offset > (unsigned int)(dst - first_valid))
674
- return 0;
675
- #endif
676
- memcpy_up(dst, dst - offset, 3);
677
- dst += 3;
678
- src += 2;
679
- }
680
- else if ((fetch & 1) == 0)
681
- {
682
- offset = (fetch & 0xffff) >> 6;
683
- matchlen = ((fetch >> 2) & 15) + 3;
684
- #ifdef memory_safe
685
- if (matchlen > (unsigned int)(guaranteed_uncompressed - dst) || offset > (unsigned int)(dst - first_valid))
686
- return 0;
687
- #endif
688
- memcpy_up(dst, dst - offset, matchlen);
689
- src += 2;
690
- dst += matchlen;
691
- }
692
- else if ((fetch & 4) == 0)
693
- {
694
- offset = (fetch & 0xffffff) >> 8;
695
- matchlen = ((fetch >> 3) & 31) + 3;
696
- #ifdef memory_safe
697
- if (matchlen > (unsigned int)(guaranteed_uncompressed - dst) || offset > (unsigned int)(dst - first_valid))
698
- return 0;
699
- #endif
700
- memcpy_up(dst, dst - offset, matchlen);
701
- src += 3;
702
- dst += matchlen;
703
- }
704
- else if ((fetch & 8) == 0)
705
- {
706
- offset = (fetch >> 15);
707
- if (offset != 0)
708
- {
709
- matchlen = ((fetch >> 4) & 2047) + 3;
710
- src += 4;
711
- }
712
- else
713
- {
714
- matchlen = fast_read_safe(src + 4, 4, source_c + source_size);
715
- offset = fast_read_safe(src + 8, 4, source_c + source_size);
716
- src += 12;
717
- }
718
- #ifdef memory_safe
719
- if (matchlen > (unsigned int)(guaranteed_uncompressed - dst) || offset > (unsigned int)(dst - first_valid))
720
- return 0;
721
- #endif
722
- memcpy_up(dst, dst - offset, matchlen);
723
- dst += matchlen;
724
- }
725
- #endif
726
- else
727
- {
728
- // decode rle sequence
729
- unsigned char rle_char;
730
- rle_char = (unsigned char)(fetch >> 16);
731
- matchlen = ((fetch >> 4) & 0xfff);
732
-
733
- if(matchlen != 0)
734
- src += 3;
735
- else
736
- {
737
- matchlen = fast_read_safe(src + 3, 4, source_c + source_size);
738
- src += 7;
739
- }
740
-
741
- #ifdef memory_safe
742
- if(matchlen > (unsigned int)(guaranteed_uncompressed - dst))
743
- return 0;
744
- #endif
745
- memset(dst, rle_char, matchlen);
746
-
747
- #if(COMPRESSION_LEVEL == 0)
748
- while(last_hashed < dst - 1)
749
- {
750
- last_hashed++;
751
- hashtable[hash_func(fast_read(last_hashed, 4))] = last_hashed;
752
- }
753
- last_hashed = dst - 1 + matchlen;
754
- #endif
755
- dst += matchlen;
756
- }
757
- }
758
- else
759
- {
760
- // decode literal
761
- #ifdef memory_safe
762
- if (4 > destination_c + size - dst || src > source_c + source_size + 4)
763
- return 0;
764
- #endif
765
- memcpy_up(dst, src, 4);
766
-
767
- dst += bitlut[cword_val & 0xf];
768
- src += bitlut[cword_val & 0xf];
769
- cword_val = cword_val >> (bitlut[cword_val & 0xf]);
770
-
771
- #if(COMPRESSION_LEVEL == 0)
772
- while(last_hashed < dst - 3)
773
- {
774
- last_hashed++;
775
- hashtable[hash_func(fast_read(last_hashed, 4))] = last_hashed;
776
- }
777
- #endif
778
- if (dst >= guaranteed_uncompressed)
779
- {
780
- // decode last literals and exit
781
- while(dst < last_byte_successor)
782
- {
783
- if (cword_val == 1)
784
- {
785
- src += 4;
786
- cword_val = 1U << 31;
787
- }
788
- if (1 > destination_c + size - dst)
789
- return 0;
790
-
791
- *dst = *src;
792
- dst++;
793
- src++;
794
- cword_val = cword_val >> 1;
795
- }
796
-
797
- #if(COMPRESSION_LEVEL == 0)
798
- while(last_hashed < last_byte_successor - 4)
799
- {
800
- last_hashed++;
801
- hashtable[hash_func(fast_read(last_hashed, 4))] = last_hashed;
802
- }
803
- #endif
804
- if((src - 1) - source_c > 8) // 8 bytes comp. size excessive len is ok
805
- return 0;
806
- else if(dst - destination_c - size == 0)
807
- return size;
808
- else
809
- return 0;
810
- }
811
- }
812
- }
813
- }
814
-
815
- size_t qlz_size_decompressed(const char *source)
816
- {
817
- unsigned int n, r;
818
- n = (((*source) & 2) == 2) ? 4 : 1;
819
- r = fast_read(source + 1 + n, n);
820
- r = r & (0xffffffff >> ((4 - n)*8));
821
- return r;
822
- }
823
-
824
- size_t qlz_size_compressed(const char *source)
825
- {
826
- unsigned int n, r;
827
- n = (((*source) & 2) == 2) ? 4 : 1;
828
- r = fast_read(source + 1, n);
829
- r = r & (0xffffffff >> ((4 - n)*8));
830
- return r;
831
- }
832
-
833
- size_t qlz_compress(const void *source, char *destination, size_t size, char *scratch)
834
- {
835
- // 1-8 bytes for aligning (not 0-7!); 8 bytes for buffersize (padds on 32 bit cpu); HASH_SIZE hash table; STREAMING_MODE_ROUNDED bytes streambuffer; optional HASH_ENTRIES byte hash counter
836
- unsigned char *buffer_aligned = (unsigned char *)scratch + 8 - (((size_t)scratch) % 8);
837
- const unsigned char *(*hashtable)[AND + 1] = (const unsigned char *(*)[AND + 1])(buffer_aligned + 8);
838
- size_t *buffersize = (size_t *)buffer_aligned;
839
- unsigned char *streambuffer = buffer_aligned + 8 + HASH_SIZE;
840
- unsigned int r;
841
- unsigned int compressed, base;
842
- unsigned char *hash_counter = streambuffer + STREAMING_MODE_ROUNDED;
843
-
844
- if(size == 0 || size > 0xffffffff)
845
- return 0;
846
-
847
- #if (COMPRESSION_LEVEL == 0 && STREAMING_MODE_ROUNDED == 0)
848
- memset((void *)hashtable, 0, HASH_SIZE);
849
- #endif
850
-
851
- if(size < 216)
852
- base = 3;
853
- else
854
- base = 9;
855
-
856
- // if not STREAMING_MODE, then STREAMING_MODE_ROUNDED == 0 and first case (streaming buffer full) is executed unconditionally, functioning as block comp.
857
- if (*buffersize + size - 1 >= STREAMING_MODE_ROUNDED)
858
- {
859
- #if (COMPRESSION_LEVEL == 0 && STREAMING_MODE_ROUNDED != 0)
860
- memset((void *)hashtable, 0, HASH_SIZE);
861
- #endif
862
-
863
- r = base + qlz_compress_core(source, (unsigned char*)destination + base, (unsigned int)size, hashtable, (const unsigned char*)source, hash_counter);
864
- #if (COMPRESSION_LEVEL == 0 && STREAMING_MODE_ROUNDED != 0)
865
- memset((void *)hashtable, 0, HASH_SIZE);
866
- #endif
867
-
868
- if(r == base)
869
- {
870
- memcpy(destination + base, source, size);
871
- r = (unsigned int)size + base;
872
- compressed = 0;
873
- }
874
- else
875
- compressed = 1;
876
- *buffersize = 0;
877
- }
878
- else
879
- {
880
- memcpy(streambuffer + *buffersize, source, size);
881
- r = base + qlz_compress_core(streambuffer + *buffersize, (unsigned char*)destination + base, (unsigned int)size, hashtable, streambuffer, hash_counter);
882
-
883
- if(r == base)
884
- {
885
- memcpy(destination + base, streambuffer + *buffersize, size);
886
- r = (unsigned int)size + base;
887
- compressed = 0;
888
-
889
- memset((void*)hashtable, 0, HASH_SIZE);
890
- }
891
- else
892
- compressed = 1;
893
- *buffersize += size;
894
- }
895
-
896
- if(base == 3)
897
- {
898
- *destination = (unsigned char)(0 | compressed);
899
- *(destination + 1) = (unsigned char)r;
900
- *(destination + 2) = (unsigned char)size;
901
- }
902
- else
903
- {
904
- *destination = (unsigned char)(2 | compressed);
905
- fast_write(r, destination + 1, 4);
906
- fast_write((unsigned int)size, destination + 5, 4);
907
- }
908
-
909
- #if (COMPRESSION_LEVEL == 0)
910
- *destination = (*destination) | 4;
911
- #endif
912
-
913
- return (size_t)r;
914
- }
915
-
916
-
917
-
918
- size_t qlz_decompress(const char *source, void *destination, char *scratch)
919
- {
920
- // 1-8 bytes for aligning (not 0-7!); 8 bytes for buffersize (padds on 32bit cpu); STREAMING_MODE_ROUNDED streambuffer; HASH_SIZE hash table
921
- unsigned char *buffer_aligned = (unsigned char *)scratch + 8 - (((size_t)scratch) % 8);
922
- size_t *buffersize = (size_t *)buffer_aligned;
923
- unsigned int headerlen = 2*((((*source) & 2) == 2) ? 4 : 1) + 1; // get header len
924
-
925
- unsigned char *streambuffer = buffer_aligned + 8;
926
- const unsigned char **hashtable = (const unsigned char **)(streambuffer + STREAMING_MODE_ROUNDED);
927
-
928
- size_t dsiz = qlz_size_decompressed((char *)source);
929
- size_t csiz = qlz_size_compressed((char *)source);
930
- if (*buffersize + qlz_size_decompressed((char *)source) - 1 >= STREAMING_MODE_ROUNDED)
931
- {
932
- if((*source & 1) == 1)
933
- qlz_decompress_core((const unsigned char *)source + headerlen, destination, dsiz, csiz, (unsigned char*)destination, hashtable);
934
- else
935
- memcpy(destination, source + headerlen, dsiz);
936
- *buffersize = 0;
937
- }
938
- else
939
- {
940
- if((*source & 1) == 1)
941
- qlz_decompress_core((const unsigned char *)source + headerlen, streambuffer + *buffersize, dsiz, csiz, streambuffer, hashtable);
942
- else
943
- memcpy(streambuffer + *buffersize, source + headerlen, dsiz);
944
- memcpy(destination, streambuffer + *buffersize, dsiz);
945
- *buffersize += dsiz;
946
- }
947
- return dsiz;
948
- }