extlz4 0.2.4.2 → 0.3.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (79) hide show
  1. checksums.yaml +5 -5
  2. data/HISTORY.ja.md +25 -0
  3. data/README.md +49 -41
  4. data/bin/extlz4 +1 -1
  5. data/contrib/lz4/INSTALL +1 -0
  6. data/contrib/lz4/Makefile.inc +87 -0
  7. data/contrib/lz4/NEWS +89 -0
  8. data/contrib/lz4/README.md +42 -36
  9. data/contrib/lz4/build/README.md +55 -0
  10. data/contrib/lz4/build/VS2010/datagen/datagen.vcxproj +169 -0
  11. data/contrib/lz4/build/VS2010/frametest/frametest.vcxproj +176 -0
  12. data/contrib/lz4/build/VS2010/fullbench-dll/fullbench-dll.vcxproj +180 -0
  13. data/contrib/lz4/build/VS2010/fullbench/fullbench.vcxproj +176 -0
  14. data/contrib/lz4/build/VS2010/fuzzer/fuzzer.vcxproj +173 -0
  15. data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.rc +51 -0
  16. data/contrib/lz4/build/VS2010/liblz4-dll/liblz4-dll.vcxproj +179 -0
  17. data/contrib/lz4/build/VS2010/liblz4/liblz4.vcxproj +175 -0
  18. data/contrib/lz4/build/VS2010/lz4.sln +98 -0
  19. data/contrib/lz4/build/VS2010/lz4/lz4.rc +51 -0
  20. data/contrib/lz4/build/VS2010/lz4/lz4.vcxproj +189 -0
  21. data/contrib/lz4/build/VS2017/datagen/datagen.vcxproj +173 -0
  22. data/contrib/lz4/build/VS2017/frametest/frametest.vcxproj +180 -0
  23. data/contrib/lz4/build/VS2017/fullbench-dll/fullbench-dll.vcxproj +184 -0
  24. data/contrib/lz4/build/VS2017/fullbench/fullbench.vcxproj +180 -0
  25. data/contrib/lz4/build/VS2017/fuzzer/fuzzer.vcxproj +177 -0
  26. data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.rc +51 -0
  27. data/contrib/lz4/build/VS2017/liblz4-dll/liblz4-dll.vcxproj +183 -0
  28. data/contrib/lz4/build/VS2017/liblz4/liblz4.vcxproj +179 -0
  29. data/contrib/lz4/build/VS2017/lz4.sln +103 -0
  30. data/contrib/lz4/build/VS2017/lz4/lz4.rc +51 -0
  31. data/contrib/lz4/build/VS2017/lz4/lz4.vcxproj +164 -0
  32. data/contrib/lz4/build/cmake/CMakeLists.txt +235 -0
  33. data/contrib/lz4/lib/README.md +98 -34
  34. data/contrib/lz4/lib/liblz4-dll.rc.in +35 -0
  35. data/contrib/lz4/lib/lz4.c +1698 -681
  36. data/contrib/lz4/lib/lz4.h +546 -235
  37. data/contrib/lz4/lib/lz4frame.c +608 -378
  38. data/contrib/lz4/lib/lz4frame.h +315 -83
  39. data/contrib/lz4/lib/lz4frame_static.h +4 -100
  40. data/contrib/lz4/lib/lz4hc.c +1090 -282
  41. data/contrib/lz4/lib/lz4hc.h +276 -141
  42. data/contrib/lz4/lib/xxhash.c +371 -235
  43. data/contrib/lz4/lib/xxhash.h +128 -93
  44. data/contrib/lz4/ossfuzz/Makefile +78 -0
  45. data/contrib/lz4/ossfuzz/compress_frame_fuzzer.c +48 -0
  46. data/contrib/lz4/ossfuzz/compress_fuzzer.c +58 -0
  47. data/contrib/lz4/ossfuzz/compress_hc_fuzzer.c +64 -0
  48. data/contrib/lz4/ossfuzz/decompress_frame_fuzzer.c +75 -0
  49. data/contrib/lz4/ossfuzz/decompress_fuzzer.c +62 -0
  50. data/contrib/lz4/ossfuzz/fuzz.h +48 -0
  51. data/contrib/lz4/ossfuzz/fuzz_data_producer.c +77 -0
  52. data/contrib/lz4/ossfuzz/fuzz_data_producer.h +36 -0
  53. data/contrib/lz4/ossfuzz/fuzz_helpers.h +94 -0
  54. data/contrib/lz4/ossfuzz/lz4_helpers.c +51 -0
  55. data/contrib/lz4/ossfuzz/lz4_helpers.h +13 -0
  56. data/contrib/lz4/ossfuzz/ossfuzz.sh +23 -0
  57. data/contrib/lz4/ossfuzz/round_trip_frame_fuzzer.c +43 -0
  58. data/contrib/lz4/ossfuzz/round_trip_fuzzer.c +57 -0
  59. data/contrib/lz4/ossfuzz/round_trip_hc_fuzzer.c +44 -0
  60. data/contrib/lz4/ossfuzz/round_trip_stream_fuzzer.c +302 -0
  61. data/contrib/lz4/ossfuzz/standaloneengine.c +74 -0
  62. data/contrib/lz4/ossfuzz/travisoss.sh +26 -0
  63. data/contrib/lz4/tmp +0 -0
  64. data/contrib/lz4/tmpsparse +0 -0
  65. data/ext/blockapi.c +5 -5
  66. data/ext/extlz4.c +2 -0
  67. data/ext/extlz4.h +5 -0
  68. data/ext/frameapi.c +1 -1
  69. data/ext/hashargs.c +2 -2
  70. data/ext/hashargs.h +1 -1
  71. data/ext/lz4_amalgam.c +0 -23
  72. data/gemstub.rb +5 -16
  73. data/lib/extlz4.rb +51 -3
  74. data/lib/extlz4/oldstream.rb +1 -1
  75. data/test/common.rb +2 -2
  76. metadata +73 -16
  77. data/contrib/lz4/circle.yml +0 -39
  78. data/contrib/lz4/lib/lz4opt.h +0 -366
  79. data/lib/extlz4/version.rb +0 -3
@@ -0,0 +1,235 @@
1
+ # CMake support for LZ4
2
+ #
3
+ # To the extent possible under law, the author(s) have dedicated all
4
+ # copyright and related and neighboring rights to this software to
5
+ # the public domain worldwide. This software is distributed without
6
+ # any warranty.
7
+ #
8
+ # For details, see <http://creativecommons.org/publicdomain/zero/1.0/>.
9
+ #
10
+ # LZ4's CMake support is maintained by Evan Nemerson; when filing
11
+ # bugs please mention @nemequ to make sure I see it.
12
+
13
+ set(LZ4_TOP_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../..")
14
+
15
+ option(LZ4_BUILD_CLI "Build lz4 program" ON)
16
+ option(LZ4_BUILD_LEGACY_LZ4C "Build lz4c progam with legacy argument support" ON)
17
+
18
+ # Parse version information
19
+ file(STRINGS "${LZ4_TOP_SOURCE_DIR}/lib/lz4.h" LZ4_VERSION_MAJOR REGEX "^#define LZ4_VERSION_MAJOR +([0-9]+) +.*$")
20
+ string(REGEX REPLACE "^#define LZ4_VERSION_MAJOR +([0-9]+) +.*$" "\\1" LZ4_VERSION_MAJOR "${LZ4_VERSION_MAJOR}")
21
+ file(STRINGS "${LZ4_TOP_SOURCE_DIR}/lib/lz4.h" LZ4_VERSION_MINOR REGEX "^#define LZ4_VERSION_MINOR +([0-9]+) +.*$")
22
+ string(REGEX REPLACE "^#define LZ4_VERSION_MINOR +([0-9]+) +.*$" "\\1" LZ4_VERSION_MINOR "${LZ4_VERSION_MINOR}")
23
+ file(STRINGS "${LZ4_TOP_SOURCE_DIR}/lib/lz4.h" LZ4_VERSION_RELEASE REGEX "^#define LZ4_VERSION_RELEASE +([0-9]+) +.*$")
24
+ string(REGEX REPLACE "^#define LZ4_VERSION_RELEASE +([0-9]+) +.*$" "\\1" LZ4_VERSION_RELEASE "${LZ4_VERSION_RELEASE}")
25
+ set(LZ4_VERSION_STRING "${LZ4_VERSION_MAJOR}.${LZ4_VERSION_MINOR}.${LZ4_VERSION_RELEASE}")
26
+ mark_as_advanced(LZ4_VERSION_STRING LZ4_VERSION_MAJOR LZ4_VERSION_MINOR LZ4_VERSION_RELEASE)
27
+
28
+ if("${CMAKE_VERSION}" VERSION_LESS "3.0")
29
+ project(LZ4 C)
30
+ else()
31
+ cmake_policy (SET CMP0048 NEW)
32
+ project(LZ4
33
+ VERSION ${LZ4_VERSION_STRING}
34
+ LANGUAGES C)
35
+ endif()
36
+
37
+ cmake_minimum_required (VERSION 2.8.6)
38
+
39
+ # If LZ4 is being bundled in another project, we don't want to
40
+ # install anything. However, we want to let people override this, so
41
+ # we'll use the LZ4_BUNDLED_MODE variable to let them do that; just
42
+ # set it to OFF in your project before you add_subdirectory(lz4/contrib/cmake_unofficial).
43
+ get_directory_property(LZ4_PARENT_DIRECTORY PARENT_DIRECTORY)
44
+ if("${LZ4_BUNDLED_MODE}" STREQUAL "")
45
+ # Bundled mode hasn't been set one way or the other, set the default
46
+ # depending on whether or not we are the top-level project.
47
+ if("${LZ4_PARENT_DIRECTORY}" STREQUAL "")
48
+ set(LZ4_BUNDLED_MODE OFF)
49
+ else()
50
+ set(LZ4_BUNDLED_MODE ON)
51
+ endif()
52
+ endif()
53
+ mark_as_advanced(LZ4_BUNDLED_MODE)
54
+
55
+ # CPack
56
+ if(NOT LZ4_BUNDLED_MODE AND NOT CPack_CMake_INCLUDED)
57
+ set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "LZ4 compression library")
58
+ set(CPACK_PACKAGE_DESCRIPTION_FILE "${LZ4_TOP_SOURCE_DIR}/README.md")
59
+ set(CPACK_RESOURCE_FILE_LICENSE "${LZ4_TOP_SOURCE_DIR}/LICENSE")
60
+ set(CPACK_PACKAGE_VERSION_MAJOR ${LZ4_VERSION_MAJOR})
61
+ set(CPACK_PACKAGE_VERSION_MINOR ${LZ4_VERSION_MINOR})
62
+ set(CPACK_PACKAGE_VERSION_PATCH ${LZ4_VERSION_RELEASE})
63
+ include(CPack)
64
+ endif(NOT LZ4_BUNDLED_MODE AND NOT CPack_CMake_INCLUDED)
65
+
66
+ # Allow people to choose whether to build shared or static libraries
67
+ # via the BUILD_SHARED_LIBS option unless we are in bundled mode, in
68
+ # which case we always use static libraries.
69
+ include(CMakeDependentOption)
70
+ CMAKE_DEPENDENT_OPTION(BUILD_SHARED_LIBS "Build shared libraries" ON "NOT LZ4_BUNDLED_MODE" OFF)
71
+ CMAKE_DEPENDENT_OPTION(BUILD_STATIC_LIBS "Build static libraries" OFF "BUILD_SHARED_LIBS" ON)
72
+
73
+ if(NOT BUILD_SHARED_LIBS AND NOT BUILD_STATIC_LIBS)
74
+ message(FATAL_ERROR "Both BUILD_SHARED_LIBS and BUILD_STATIC_LIBS have been disabled")
75
+ endif()
76
+
77
+ set(LZ4_LIB_SOURCE_DIR "${LZ4_TOP_SOURCE_DIR}/lib")
78
+ set(LZ4_PROG_SOURCE_DIR "${LZ4_TOP_SOURCE_DIR}/programs")
79
+
80
+ include_directories("${LZ4_LIB_SOURCE_DIR}")
81
+
82
+ # CLI sources
83
+ set(LZ4_SOURCES
84
+ "${LZ4_LIB_SOURCE_DIR}/lz4.c"
85
+ "${LZ4_LIB_SOURCE_DIR}/lz4hc.c"
86
+ "${LZ4_LIB_SOURCE_DIR}/lz4.h"
87
+ "${LZ4_LIB_SOURCE_DIR}/lz4hc.h"
88
+ "${LZ4_LIB_SOURCE_DIR}/lz4frame.c"
89
+ "${LZ4_LIB_SOURCE_DIR}/lz4frame.h"
90
+ "${LZ4_LIB_SOURCE_DIR}/xxhash.c")
91
+ set(LZ4_CLI_SOURCES
92
+ "${LZ4_PROG_SOURCE_DIR}/bench.c"
93
+ "${LZ4_PROG_SOURCE_DIR}/lz4cli.c"
94
+ "${LZ4_PROG_SOURCE_DIR}/lz4io.c"
95
+ "${LZ4_PROG_SOURCE_DIR}/datagen.c")
96
+
97
+ # Whether to use position independent code for the static library. If
98
+ # we're building a shared library this is ignored and PIC is always
99
+ # used.
100
+ option(LZ4_POSITION_INDEPENDENT_LIB "Use position independent code for static library (if applicable)" ON)
101
+
102
+ # liblz4
103
+ set(LZ4_LIBRARIES_BUILT)
104
+ if(BUILD_SHARED_LIBS)
105
+ add_library(lz4_shared SHARED ${LZ4_SOURCES})
106
+ set_target_properties(lz4_shared PROPERTIES
107
+ OUTPUT_NAME lz4
108
+ SOVERSION "${LZ4_VERSION_MAJOR}"
109
+ VERSION "${LZ4_VERSION_STRING}")
110
+ if(MSVC)
111
+ target_compile_definitions(lz4_shared PRIVATE
112
+ LZ4_DLL_EXPORT=1)
113
+ endif()
114
+ list(APPEND LZ4_LIBRARIES_BUILT lz4_shared)
115
+ endif()
116
+ if(BUILD_STATIC_LIBS)
117
+ add_library(lz4_static STATIC ${LZ4_SOURCES})
118
+ set_target_properties(lz4_static PROPERTIES
119
+ OUTPUT_NAME lz4
120
+ POSITION_INDEPENDENT_CODE ${LZ4_POSITION_INDEPENDENT_LIB})
121
+ list(APPEND LZ4_LIBRARIES_BUILT lz4_static)
122
+ endif()
123
+
124
+ # link to shared whenever possible, to static otherwise
125
+ if(BUILD_SHARED_LIBS)
126
+ set(LZ4_LINK_LIBRARY lz4_shared)
127
+ else()
128
+ set(LZ4_LINK_LIBRARY lz4_static)
129
+ endif()
130
+
131
+ # lz4
132
+ if (LZ4_BUILD_CLI)
133
+ set(LZ4_PROGRAMS_BUILT lz4cli)
134
+ add_executable(lz4cli ${LZ4_CLI_SOURCES})
135
+ set_target_properties(lz4cli PROPERTIES OUTPUT_NAME lz4)
136
+ target_link_libraries(lz4cli ${LZ4_LINK_LIBRARY})
137
+ endif()
138
+
139
+ # lz4c
140
+ if (LZ4_BUILD_LEGACY_LZ4C)
141
+ list(APPEND LZ4_PROGRAMS_BUILT lz4c)
142
+ add_executable(lz4c ${LZ4_CLI_SOURCES})
143
+ set_target_properties(lz4c PROPERTIES COMPILE_DEFINITIONS "ENABLE_LZ4C_LEGACY_OPTIONS")
144
+ target_link_libraries(lz4c ${LZ4_LINK_LIBRARY})
145
+ endif()
146
+
147
+ # Extra warning flags
148
+ include (CheckCCompilerFlag)
149
+ foreach (flag
150
+ # GCC-style
151
+ -Wall
152
+ -Wextra
153
+ -Wundef
154
+ -Wcast-qual
155
+ -Wcast-align
156
+ -Wshadow
157
+ -Wswitch-enum
158
+ -Wdeclaration-after-statement
159
+ -Wstrict-prototypes
160
+ -Wpointer-arith
161
+
162
+ # MSVC-style
163
+ /W4)
164
+ # Because https://gcc.gnu.org/wiki/FAQ#wnowarning
165
+ string(REGEX REPLACE "\\-Wno\\-(.+)" "-W\\1" flag_to_test "${flag}")
166
+ string(REGEX REPLACE "[^a-zA-Z0-9]+" "_" test_name "CFLAG_${flag_to_test}")
167
+
168
+ check_c_compiler_flag("${ADD_COMPILER_FLAGS_PREPEND} ${flag_to_test}" ${test_name})
169
+
170
+ if(${test_name})
171
+ set(CMAKE_C_FLAGS "${flag} ${CMAKE_C_FLAGS}")
172
+ endif()
173
+
174
+ unset(test_name)
175
+ unset(flag_to_test)
176
+ endforeach (flag)
177
+
178
+ if(NOT LZ4_BUNDLED_MODE)
179
+ include(GNUInstallDirs)
180
+
181
+ install(TARGETS ${LZ4_PROGRAMS_BUILT}
182
+ BUNDLE DESTINATION "${CMAKE_INSTALL_BINDIR}"
183
+ RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}")
184
+ install(TARGETS ${LZ4_LIBRARIES_BUILT}
185
+ LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
186
+ ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
187
+ RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}")
188
+ install(FILES
189
+ "${LZ4_LIB_SOURCE_DIR}/lz4.h"
190
+ "${LZ4_LIB_SOURCE_DIR}/lz4frame.h"
191
+ "${LZ4_LIB_SOURCE_DIR}/lz4hc.h"
192
+ DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
193
+ install(FILES "${LZ4_PROG_SOURCE_DIR}/lz4.1"
194
+ DESTINATION "${CMAKE_INSTALL_MANDIR}/man1")
195
+ install(FILES "${CMAKE_CURRENT_BINARY_DIR}/liblz4.pc"
196
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
197
+
198
+ # install lz4cat and unlz4 symlinks on *nix
199
+ if(UNIX AND LZ4_BUILD_CLI)
200
+ install(CODE "
201
+ foreach(f lz4cat unlz4)
202
+ set(dest \"\$ENV{DESTDIR}${CMAKE_INSTALL_FULL_BINDIR}/\${f}\")
203
+ message(STATUS \"Symlinking: \${dest} -> lz4\")
204
+ execute_process(
205
+ COMMAND \"${CMAKE_COMMAND}\" -E create_symlink lz4 \"\${dest}\")
206
+ endforeach()
207
+ ")
208
+
209
+ # create manpage aliases
210
+ foreach(f lz4cat unlz4)
211
+ file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/${f}.1" ".so man1/lz4.1\n")
212
+ install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${f}.1"
213
+ DESTINATION "${CMAKE_INSTALL_MANDIR}/man1")
214
+ endforeach()
215
+ endif(UNIX AND LZ4_BUILD_CLI)
216
+ endif(NOT LZ4_BUNDLED_MODE)
217
+
218
+ # pkg-config
219
+ set(PREFIX "${CMAKE_INSTALL_PREFIX}")
220
+
221
+ if("${CMAKE_INSTALL_FULL_LIBDIR}" STREQUAL "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
222
+ set(LIBDIR "\${prefix}/${CMAKE_INSTALL_LIBDIR}")
223
+ else()
224
+ set(LIBDIR "${CMAKE_INSTALL_FULL_LIBDIR}")
225
+ endif()
226
+
227
+ if("${CMAKE_INSTALL_FULL_INCLUDEDIR}" STREQUAL "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}")
228
+ set(INCLUDEDIR "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}")
229
+ else()
230
+ set(INCLUDEDIR "${CMAKE_INSTALL_FULL_INCLUDEDIR}")
231
+ endif()
232
+
233
+ # for liblz4.pc substitution
234
+ set(VERSION ${LZ4_VERSION_STRING})
235
+ configure_file(${LZ4_LIB_SOURCE_DIR}/liblz4.pc.in liblz4.pc @ONLY)
@@ -1,47 +1,110 @@
1
1
  LZ4 - Library Files
2
2
  ================================
3
3
 
4
- The directory contains many files, but depending on project's objectives,
4
+ The `/lib` directory contains many files, but depending on project's objectives,
5
5
  not all of them are necessary.
6
6
 
7
7
  #### Minimal LZ4 build
8
8
 
9
9
  The minimum required is **`lz4.c`** and **`lz4.h`**,
10
- which will provide the fast compression and decompression algorithm.
10
+ which provides the fast compression and decompression algorithms.
11
+ They generate and decode data using the [LZ4 block format].
11
12
 
12
13
 
13
- #### The High Compression variant of LZ4
14
+ #### High Compression variant
14
15
 
15
- For more compression at the cost of compression speed,
16
- the High Compression variant **lz4hc** is available.
17
- It's necessary to add **`lz4hc.c`** and **`lz4hc.h`**.
18
- The variant still depends on regular `lz4` source files.
19
- In particular, the decompression is still provided by `lz4.c`.
16
+ For more compression ratio at the cost of compression speed,
17
+ the High Compression variant called **lz4hc** is available.
18
+ Add files **`lz4hc.c`** and **`lz4hc.h`**.
19
+ This variant also compresses data using the [LZ4 block format],
20
+ and depends on regular `lib/lz4.*` source files.
20
21
 
21
22
 
22
- #### Compatibility issues
23
+ #### Frame support, for interoperability
23
24
 
24
- In order to produce files or streams compatible with `lz4` command line utility,
25
- it's necessary to encode lz4-compressed blocks using the [official interoperable frame format].
25
+ In order to produce compressed data compatible with `lz4` command line utility,
26
+ it's necessary to use the [official interoperable frame format].
26
27
  This format is generated and decoded automatically by the **lz4frame** library.
27
- In order to work properly, lz4frame needs lz4 and lz4hc, and also **xxhash**,
28
- which provides error detection.
29
- (_Advanced stuff_ : It's possible to hide xxhash symbols into a local namespace.
30
- This is what `liblz4` does, to avoid symbol duplication
31
- in case a user program would link to several libraries containing xxhash symbols.)
32
-
33
-
34
- #### Advanced API
35
-
36
- A more complex `lz4frame_static.h` is also provided.
37
- It contains definitions which are not guaranteed to remain stable within future versions.
38
- It must be used with static linking ***only***.
28
+ Its public API is described in `lib/lz4frame.h`.
29
+ In order to work properly, lz4frame needs all other modules present in `/lib`,
30
+ including, lz4 and lz4hc, and also **xxhash**.
31
+ So it's necessary to include all `*.c` and `*.h` files present in `/lib`.
32
+
33
+
34
+ #### Advanced / Experimental API
35
+
36
+ Definitions which are not guaranteed to remain stable in future versions,
37
+ are protected behind macros, such as `LZ4_STATIC_LINKING_ONLY`.
38
+ As the name strongly implies, these definitions should only be invoked
39
+ in the context of static linking ***only***.
40
+ Otherwise, dependent application may fail on API or ABI break in the future.
41
+ The associated symbols are also not exposed by the dynamic library by default.
42
+ Should they be nonetheless needed, it's possible to force their publication
43
+ by using build macros `LZ4_PUBLISH_STATIC_FUNCTIONS`
44
+ and `LZ4F_PUBLISH_STATIC_FUNCTIONS`.
45
+
46
+
47
+ #### Build macros
48
+
49
+ The following build macro can be selected to adjust source code behavior at compilation time :
50
+
51
+ - `LZ4_FAST_DEC_LOOP` : this triggers a speed optimized decompression loop, more powerful on modern cpus.
52
+ This loop works great on `x86`, `x64` and `aarch64` cpus, and is automatically enabled for them.
53
+ It's also possible to enable or disable it manually, by passing `LZ4_FAST_DEC_LOOP=1` or `0` to the preprocessor.
54
+ For example, with `gcc` : `-DLZ4_FAST_DEC_LOOP=1`,
55
+ and with `make` : `CPPFLAGS+=-DLZ4_FAST_DEC_LOOP=1 make lz4`.
56
+
57
+ - `LZ4_DISTANCE_MAX` : control the maximum offset that the compressor will allow.
58
+ Set to 65535 by default, which is the maximum value supported by lz4 format.
59
+ Reducing maximum distance will reduce opportunities for LZ4 to find matches,
60
+ hence will produce a worse compression ratio.
61
+ However, a smaller max distance can allow compatibility with specific decoders using limited memory budget.
62
+ This build macro only influences the compressed output of the compressor.
63
+
64
+ - `LZ4_DISABLE_DEPRECATE_WARNINGS` : invoking a deprecated function will make the compiler generate a warning.
65
+ This is meant to invite users to update their source code.
66
+ Should this be a problem, it's generally possible to make the compiler ignore these warnings,
67
+ for example with `-Wno-deprecated-declarations` on `gcc`,
68
+ or `_CRT_SECURE_NO_WARNINGS` for Visual Studio.
69
+ This build macro offers another project-specific method
70
+ by defining `LZ4_DISABLE_DEPRECATE_WARNINGS` before including the LZ4 header files.
71
+
72
+ - `LZ4_USER_MEMORY_FUNCTIONS` : replace calls to <stdlib>'s `malloc`, `calloc` and `free`
73
+ by user-defined functions, which must be called `LZ4_malloc()`, `LZ4_calloc()` and `LZ4_free()`.
74
+ User functions must be available at link time.
75
+
76
+ - `LZ4_FORCE_SW_BITCOUNT` : by default, the compression algorithm tries to determine lengths
77
+ by using bitcount instructions, generally implemented as fast single instructions in many cpus.
78
+ In case the target cpus doesn't support it, or compiler intrinsic doesn't work, or feature bad performance,
79
+ it's possible to use an optimized software path instead.
80
+ This is achieved by setting this build macros .
81
+ In most cases, it's not expected to be necessary,
82
+ but it can be legitimately considered for less common platforms.
83
+
84
+ - `LZ4_ALIGN_TEST` : alignment test ensures that the memory area
85
+ passed as argument to become a compression state is suitably aligned.
86
+ This test can be disabled if it proves flaky, by setting this value to 0.
87
+
88
+
89
+ #### Amalgamation
90
+
91
+ lz4 source code can be amalgamated into a single file.
92
+ One can combine all source code into `lz4_all.c` by using following command:
93
+ ```
94
+ cat lz4.c lz4hc.c lz4frame.c > lz4_all.c
95
+ ```
96
+ (`cat` file order is important) then compile `lz4_all.c`.
97
+ All `*.h` files present in `/lib` remain necessary to compile `lz4_all.c`.
39
98
 
40
99
 
41
- #### Using MinGW+MSYS to create DLL
100
+ #### Windows : using MinGW+MSYS to create DLL
42
101
 
43
102
  DLL can be created using MinGW+MSYS with the `make liblz4` command.
44
103
  This command creates `dll\liblz4.dll` and the import library `dll\liblz4.lib`.
104
+ To override the `dlltool` command when cross-compiling on Linux, just set the `DLLTOOL` variable. Example of cross compilation on Linux with mingw-w64 64 bits:
105
+ ```
106
+ make BUILD_STATIC=no CC=x86_64-w64-mingw32-gcc DLLTOOL=x86_64-w64-mingw32-dlltool OS=Windows_NT
107
+ ```
45
108
  The import library is only required with Visual C++.
46
109
  The header files `lz4.h`, `lz4hc.h`, `lz4frame.h` and the dynamic library
47
110
  `dll\liblz4.dll` are required to compile a project using gcc/MinGW.
@@ -49,25 +112,26 @@ The dynamic library has to be added to linking options.
49
112
  It means that if a project that uses LZ4 consists of a single `test-dll.c`
50
113
  file it should be linked with `dll\liblz4.dll`. For example:
51
114
  ```
52
- gcc $(CFLAGS) -Iinclude/ test-dll.c -o test-dll dll\liblz4.dll
115
+ $(CC) $(CFLAGS) -Iinclude/ test-dll.c -o test-dll dll\liblz4.dll
53
116
  ```
54
- The compiled executable will require LZ4 DLL which is available at `dll\liblz4.dll`.
117
+ The compiled executable will require LZ4 DLL which is available at `dll\liblz4.dll`.
55
118
 
56
119
 
57
- #### Miscellaneous
120
+ #### Miscellaneous
58
121
 
59
- Other files present in the directory are not source code. There are :
122
+ Other files present in the directory are not source code. They are :
60
123
 
61
- - LICENSE : contains the BSD license text
62
- - Makefile : script to compile or install lz4 library (static or dynamic)
63
- - liblz4.pc.in : for pkg-config (make install)
64
- - README.md : this file
124
+ - `LICENSE` : contains the BSD license text
125
+ - `Makefile` : `make` script to compile and install lz4 library (static and dynamic)
126
+ - `liblz4.pc.in` : for `pkg-config` (used in `make install`)
127
+ - `README.md` : this file
65
128
 
66
129
  [official interoperable frame format]: ../doc/lz4_Frame_format.md
130
+ [LZ4 block format]: ../doc/lz4_Block_format.md
67
131
 
68
132
 
69
- #### License
133
+ #### License
70
134
 
71
135
  All source material within __lib__ directory are BSD 2-Clause licensed.
72
136
  See [LICENSE](LICENSE) for details.
73
- The license is also repeated at the top of each source file.
137
+ The license is also reminded at the top of each source file.
@@ -0,0 +1,35 @@
1
+ #include <windows.h>
2
+
3
+ // DLL version information.
4
+ 1 VERSIONINFO
5
+ FILEVERSION @LIBVER_MAJOR@,@LIBVER_MINOR@,@LIBVER_PATCH@,0
6
+ PRODUCTVERSION @LIBVER_MAJOR@,@LIBVER_MINOR@,@LIBVER_PATCH@,0
7
+ FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
8
+ #ifdef _DEBUG
9
+ FILEFLAGS VS_FF_DEBUG | VS_FF_PRERELEASE
10
+ #else
11
+ FILEFLAGS 0
12
+ #endif
13
+ FILEOS VOS_NT_WINDOWS32
14
+ FILETYPE VFT_DLL
15
+ FILESUBTYPE VFT2_UNKNOWN
16
+ BEGIN
17
+ BLOCK "StringFileInfo"
18
+ BEGIN
19
+ BLOCK "040904B0"
20
+ BEGIN
21
+ VALUE "CompanyName", "Yann Collet"
22
+ VALUE "FileDescription", "Extremely fast compression"
23
+ VALUE "FileVersion", "@LIBVER_MAJOR@.@LIBVER_MINOR@.@LIBVER_PATCH@.0"
24
+ VALUE "InternalName", "@LIBLZ4@"
25
+ VALUE "LegalCopyright", "Copyright (C) 2013-2016, Yann Collet"
26
+ VALUE "OriginalFilename", "@LIBLZ4@.dll"
27
+ VALUE "ProductName", "LZ4"
28
+ VALUE "ProductVersion", "@LIBVER_MAJOR@.@LIBVER_MINOR@.@LIBVER_PATCH@.0"
29
+ END
30
+ END
31
+ BLOCK "VarFileInfo"
32
+ BEGIN
33
+ VALUE "Translation", 0x0409, 1200
34
+ END
35
+ END
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  LZ4 - Fast LZ compression algorithm
3
- Copyright (C) 2011-2017, Yann Collet.
3
+ Copyright (C) 2011-present, Yann Collet.
4
4
 
5
5
  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
6
 
@@ -32,7 +32,6 @@
32
32
  - LZ4 source repository : https://github.com/lz4/lz4
33
33
  */
34
34
 
35
-
36
35
  /*-************************************
37
36
  * Tuning parameters
38
37
  **************************************/
@@ -46,10 +45,16 @@
46
45
  #endif
47
46
 
48
47
  /*
49
- * ACCELERATION_DEFAULT :
48
+ * LZ4_ACCELERATION_DEFAULT :
50
49
  * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
51
50
  */
52
- #define ACCELERATION_DEFAULT 1
51
+ #define LZ4_ACCELERATION_DEFAULT 1
52
+ /*
53
+ * LZ4_ACCELERATION_MAX :
54
+ * Any "acceleration" value higher than this threshold
55
+ * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
56
+ */
57
+ #define LZ4_ACCELERATION_MAX 65537
53
58
 
54
59
 
55
60
  /*-************************************
@@ -69,9 +74,11 @@
69
74
  * Prefer these methods in priority order (0 > 1 > 2)
70
75
  */
71
76
  #ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
72
- # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
77
+ # if defined(__GNUC__) && \
78
+ ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
79
+ || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
73
80
  # define LZ4_FORCE_MEMORY_ACCESS 2
74
- # elif defined(__INTEL_COMPILER) || defined(__GNUC__)
81
+ # elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
75
82
  # define LZ4_FORCE_MEMORY_ACCESS 1
76
83
  # endif
77
84
  #endif
@@ -80,14 +87,33 @@
80
87
  * LZ4_FORCE_SW_BITCOUNT
81
88
  * Define this parameter if your target system or compiler does not support hardware bit count
82
89
  */
83
- #if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
90
+ #if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
91
+ # undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
84
92
  # define LZ4_FORCE_SW_BITCOUNT
85
93
  #endif
86
94
 
87
95
 
96
+
88
97
  /*-************************************
89
98
  * Dependency
90
99
  **************************************/
100
+ /*
101
+ * LZ4_SRC_INCLUDED:
102
+ * Amalgamation flag, whether lz4.c is included
103
+ */
104
+ #ifndef LZ4_SRC_INCLUDED
105
+ # define LZ4_SRC_INCLUDED 1
106
+ #endif
107
+
108
+ #ifndef LZ4_STATIC_LINKING_ONLY
109
+ #define LZ4_STATIC_LINKING_ONLY
110
+ #endif
111
+
112
+ #ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
113
+ #define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
114
+ #endif
115
+
116
+ #define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
91
117
  #include "lz4.h"
92
118
  /* see also "memory routines" below */
93
119
 
@@ -95,27 +121,48 @@
95
121
  /*-************************************
96
122
  * Compiler Options
97
123
  **************************************/
98
- #ifdef _MSC_VER /* Visual Studio */
99
- # include <intrin.h>
100
- # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
101
- # pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */
124
+ #if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
125
+ # include <intrin.h> /* only present in VS2005+ */
126
+ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
102
127
  #endif /* _MSC_VER */
103
128
 
104
- #ifndef FORCE_INLINE
129
+ #ifndef LZ4_FORCE_INLINE
105
130
  # ifdef _MSC_VER /* Visual Studio */
106
- # define FORCE_INLINE static __forceinline
131
+ # define LZ4_FORCE_INLINE static __forceinline
107
132
  # else
108
133
  # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
109
134
  # ifdef __GNUC__
110
- # define FORCE_INLINE static inline __attribute__((always_inline))
135
+ # define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
111
136
  # else
112
- # define FORCE_INLINE static inline
137
+ # define LZ4_FORCE_INLINE static inline
113
138
  # endif
114
139
  # else
115
- # define FORCE_INLINE static
140
+ # define LZ4_FORCE_INLINE static
116
141
  # endif /* __STDC_VERSION__ */
117
142
  # endif /* _MSC_VER */
118
- #endif /* FORCE_INLINE */
143
+ #endif /* LZ4_FORCE_INLINE */
144
+
145
+ /* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
146
+ * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
147
+ * together with a simple 8-byte copy loop as a fall-back path.
148
+ * However, this optimization hurts the decompression speed by >30%,
149
+ * because the execution does not go to the optimized loop
150
+ * for typical compressible data, and all of the preamble checks
151
+ * before going to the fall-back path become useless overhead.
152
+ * This optimization happens only with the -O3 flag, and -O2 generates
153
+ * a simple 8-byte copy loop.
154
+ * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
155
+ * functions are annotated with __attribute__((optimize("O2"))),
156
+ * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
157
+ * of LZ4_wildCopy8 does not affect the compression speed.
158
+ */
159
+ #if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
160
+ # define LZ4_FORCE_O2 __attribute__((optimize("O2")))
161
+ # undef LZ4_FORCE_INLINE
162
+ # define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline))
163
+ #else
164
+ # define LZ4_FORCE_O2
165
+ #endif
119
166
 
120
167
  #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
121
168
  # define expect(expr,value) (__builtin_expect ((expr),(value)) )
@@ -123,23 +170,107 @@
123
170
  # define expect(expr,value) (expr)
124
171
  #endif
125
172
 
173
+ #ifndef likely
126
174
  #define likely(expr) expect((expr) != 0, 1)
175
+ #endif
176
+ #ifndef unlikely
127
177
  #define unlikely(expr) expect((expr) != 0, 0)
178
+ #endif
179
+
180
+ /* Should the alignment test prove unreliable, for some reason,
181
+ * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
182
+ #ifndef LZ4_ALIGN_TEST /* can be externally provided */
183
+ # define LZ4_ALIGN_TEST 1
184
+ #endif
128
185
 
129
186
 
130
187
  /*-************************************
131
188
  * Memory routines
132
189
  **************************************/
133
- #include <stdlib.h> /* malloc, calloc, free */
134
- #define ALLOCATOR(n,s) calloc(n,s)
135
- #define FREEMEM free
190
+ #ifdef LZ4_USER_MEMORY_FUNCTIONS
191
+ /* memory management functions can be customized by user project.
192
+ * Below functions must exist somewhere in the Project
193
+ * and be available at link time */
194
+ void* LZ4_malloc(size_t s);
195
+ void* LZ4_calloc(size_t n, size_t s);
196
+ void LZ4_free(void* p);
197
+ # define ALLOC(s) LZ4_malloc(s)
198
+ # define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
199
+ # define FREEMEM(p) LZ4_free(p)
200
+ #else
201
+ # include <stdlib.h> /* malloc, calloc, free */
202
+ # define ALLOC(s) malloc(s)
203
+ # define ALLOC_AND_ZERO(s) calloc(1,s)
204
+ # define FREEMEM(p) free(p)
205
+ #endif
206
+
136
207
  #include <string.h> /* memset, memcpy */
137
- #define MEM_INIT memset
208
+ #define MEM_INIT(p,v,s) memset((p),(v),(s))
209
+
210
+
211
+ /*-************************************
212
+ * Common Constants
213
+ **************************************/
214
+ #define MINMATCH 4
215
+
216
+ #define WILDCOPYLENGTH 8
217
+ #define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
218
+ #define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
219
+ #define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
220
+ #define FASTLOOP_SAFE_DISTANCE 64
221
+ static const int LZ4_minLength = (MFLIMIT+1);
222
+
223
+ #define KB *(1 <<10)
224
+ #define MB *(1 <<20)
225
+ #define GB *(1U<<30)
226
+
227
+ #define LZ4_DISTANCE_ABSOLUTE_MAX 65535
228
+ #if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
229
+ # error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
230
+ #endif
231
+
232
+ #define ML_BITS 4
233
+ #define ML_MASK ((1U<<ML_BITS)-1)
234
+ #define RUN_BITS (8-ML_BITS)
235
+ #define RUN_MASK ((1U<<RUN_BITS)-1)
236
+
237
+
238
+ /*-************************************
239
+ * Error detection
240
+ **************************************/
241
+ #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
242
+ # include <assert.h>
243
+ #else
244
+ # ifndef assert
245
+ # define assert(condition) ((void)0)
246
+ # endif
247
+ #endif
248
+
249
+ #define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */
250
+
251
+ #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
252
+ # include <stdio.h>
253
+ static int g_debuglog_enable = 1;
254
+ # define DEBUGLOG(l, ...) { \
255
+ if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
256
+ fprintf(stderr, __FILE__ ": "); \
257
+ fprintf(stderr, __VA_ARGS__); \
258
+ fprintf(stderr, " \n"); \
259
+ } }
260
+ #else
261
+ # define DEBUGLOG(l, ...) {} /* disabled */
262
+ #endif
263
+
264
+ static int LZ4_isAligned(const void* ptr, size_t alignment)
265
+ {
266
+ return ((size_t)ptr & (alignment -1)) == 0;
267
+ }
138
268
 
139
269
 
140
270
  /*-************************************
141
- * Basic Types
271
+ * Types
142
272
  **************************************/
273
+ #include <limits.h>
143
274
  #if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
144
275
  # include <stdint.h>
145
276
  typedef uint8_t BYTE;
@@ -149,6 +280,9 @@
149
280
  typedef uint64_t U64;
150
281
  typedef uintptr_t uptrval;
151
282
  #else
283
+ # if UINT_MAX != 4294967295UL
284
+ # error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
285
+ # endif
152
286
  typedef unsigned char BYTE;
153
287
  typedef unsigned short U16;
154
288
  typedef unsigned int U32;
@@ -163,9 +297,31 @@
163
297
  typedef size_t reg_t; /* 32-bits in x32 mode */
164
298
  #endif
165
299
 
300
+ typedef enum {
301
+ notLimited = 0,
302
+ limitedOutput = 1,
303
+ fillOutput = 2
304
+ } limitedOutput_directive;
305
+
306
+
166
307
  /*-************************************
167
308
  * Reading and writing into memory
168
309
  **************************************/
310
+
311
+ /**
312
+ * LZ4 relies on memcpy with a constant size being inlined. In freestanding
313
+ * environments, the compiler can't assume the implementation of memcpy() is
314
+ * standard compliant, so it can't apply its specialized memcpy() inlining
315
+ * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
316
+ * memcpy() as if it were standard compliant, so it can inline it in freestanding
317
+ * environments. This is needed when decompressing the Linux Kernel, for example.
318
+ */
319
+ #if defined(__GNUC__) && (__GNUC__ >= 4)
320
+ #define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
321
+ #else
322
+ #define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
323
+ #endif
324
+
169
325
  static unsigned LZ4_isLittleEndian(void)
170
326
  {
171
327
  const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
@@ -196,31 +352,31 @@ static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArc
196
352
  static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
197
353
  static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
198
354
 
199
- #else /* safe and portable access through memcpy() */
355
+ #else /* safe and portable access using memcpy() */
200
356
 
201
357
  static U16 LZ4_read16(const void* memPtr)
202
358
  {
203
- U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
359
+ U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
204
360
  }
205
361
 
206
362
  static U32 LZ4_read32(const void* memPtr)
207
363
  {
208
- U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
364
+ U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
209
365
  }
210
366
 
211
367
  static reg_t LZ4_read_ARCH(const void* memPtr)
212
368
  {
213
- reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
369
+ reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
214
370
  }
215
371
 
216
372
  static void LZ4_write16(void* memPtr, U16 value)
217
373
  {
218
- memcpy(memPtr, &value, sizeof(value));
374
+ LZ4_memcpy(memPtr, &value, sizeof(value));
219
375
  }
220
376
 
221
377
  static void LZ4_write32(void* memPtr, U32 value)
222
378
  {
223
- memcpy(memPtr, &value, sizeof(value));
379
+ LZ4_memcpy(memPtr, &value, sizeof(value));
224
380
  }
225
381
 
226
382
  #endif /* LZ4_FORCE_MEMORY_ACCESS */
@@ -247,130 +403,216 @@ static void LZ4_writeLE16(void* memPtr, U16 value)
247
403
  }
248
404
  }
249
405
 
250
- static void LZ4_copy8(void* dst, const void* src)
251
- {
252
- memcpy(dst,src,8);
253
- }
254
-
255
406
  /* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
256
- static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
407
+ LZ4_FORCE_INLINE
408
+ void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
257
409
  {
258
410
  BYTE* d = (BYTE*)dstPtr;
259
411
  const BYTE* s = (const BYTE*)srcPtr;
260
412
  BYTE* const e = (BYTE*)dstEnd;
261
413
 
262
- do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
414
+ do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
263
415
  }
264
416
 
417
+ static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
418
+ static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
265
419
 
266
- /*-************************************
267
- * Common Constants
268
- **************************************/
269
- #define MINMATCH 4
270
420
 
271
- #define WILDCOPYLENGTH 8
272
- #define LASTLITERALS 5
273
- #define MFLIMIT (WILDCOPYLENGTH+MINMATCH)
274
- static const int LZ4_minLength = (MFLIMIT+1);
421
+ #ifndef LZ4_FAST_DEC_LOOP
422
+ # if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
423
+ # define LZ4_FAST_DEC_LOOP 1
424
+ # elif defined(__aarch64__) && !defined(__clang__)
425
+ /* On aarch64, we disable this optimization for clang because on certain
426
+ * mobile chipsets, performance is reduced with clang. For information
427
+ * refer to https://github.com/lz4/lz4/pull/707 */
428
+ # define LZ4_FAST_DEC_LOOP 1
429
+ # else
430
+ # define LZ4_FAST_DEC_LOOP 0
431
+ # endif
432
+ #endif
275
433
 
276
- #define KB *(1 <<10)
277
- #define MB *(1 <<20)
278
- #define GB *(1U<<30)
434
+ #if LZ4_FAST_DEC_LOOP
279
435
 
280
- #define MAXD_LOG 16
281
- #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
436
+ LZ4_FORCE_INLINE void
437
+ LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
438
+ {
439
+ assert(srcPtr + offset == dstPtr);
440
+ if (offset < 8) {
441
+ LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
442
+ dstPtr[0] = srcPtr[0];
443
+ dstPtr[1] = srcPtr[1];
444
+ dstPtr[2] = srcPtr[2];
445
+ dstPtr[3] = srcPtr[3];
446
+ srcPtr += inc32table[offset];
447
+ LZ4_memcpy(dstPtr+4, srcPtr, 4);
448
+ srcPtr -= dec64table[offset];
449
+ dstPtr += 8;
450
+ } else {
451
+ LZ4_memcpy(dstPtr, srcPtr, 8);
452
+ dstPtr += 8;
453
+ srcPtr += 8;
454
+ }
282
455
 
283
- #define ML_BITS 4
284
- #define ML_MASK ((1U<<ML_BITS)-1)
285
- #define RUN_BITS (8-ML_BITS)
286
- #define RUN_MASK ((1U<<RUN_BITS)-1)
456
+ LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
457
+ }
287
458
 
459
+ /* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
460
+ * this version copies two times 16 bytes (instead of one time 32 bytes)
461
+ * because it must be compatible with offsets >= 16. */
462
+ LZ4_FORCE_INLINE void
463
+ LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
464
+ {
465
+ BYTE* d = (BYTE*)dstPtr;
466
+ const BYTE* s = (const BYTE*)srcPtr;
467
+ BYTE* const e = (BYTE*)dstEnd;
288
468
 
289
- /*-************************************
290
- * Error detection
291
- **************************************/
292
- #define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
469
+ do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
470
+ }
293
471
 
294
- #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
295
- # include <stdio.h>
296
- # define DEBUGLOG(l, ...) { \
297
- if (l<=LZ4_DEBUG) { \
298
- fprintf(stderr, __FILE__ ": "); \
299
- fprintf(stderr, __VA_ARGS__); \
300
- fprintf(stderr, " \n"); \
301
- } }
302
- #else
303
- # define DEBUGLOG(l, ...) {} /* disabled */
472
+ /* LZ4_memcpy_using_offset() presumes :
473
+ * - dstEnd >= dstPtr + MINMATCH
474
+ * - there is at least 8 bytes available to write after dstEnd */
475
+ LZ4_FORCE_INLINE void
476
+ LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
477
+ {
478
+ BYTE v[8];
479
+
480
+ assert(dstEnd >= dstPtr + MINMATCH);
481
+
482
+ switch(offset) {
483
+ case 1:
484
+ MEM_INIT(v, *srcPtr, 8);
485
+ break;
486
+ case 2:
487
+ LZ4_memcpy(v, srcPtr, 2);
488
+ LZ4_memcpy(&v[2], srcPtr, 2);
489
+ LZ4_memcpy(&v[4], v, 4);
490
+ break;
491
+ case 4:
492
+ LZ4_memcpy(v, srcPtr, 4);
493
+ LZ4_memcpy(&v[4], srcPtr, 4);
494
+ break;
495
+ default:
496
+ LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
497
+ return;
498
+ }
499
+
500
+ LZ4_memcpy(dstPtr, v, 8);
501
+ dstPtr += 8;
502
+ while (dstPtr < dstEnd) {
503
+ LZ4_memcpy(dstPtr, v, 8);
504
+ dstPtr += 8;
505
+ }
506
+ }
304
507
  #endif
305
508
 
306
509
 
307
510
  /*-************************************
308
511
  * Common functions
309
512
  **************************************/
310
- static unsigned LZ4_NbCommonBytes (register reg_t val)
513
+ static unsigned LZ4_NbCommonBytes (reg_t val)
311
514
  {
515
+ assert(val != 0);
312
516
  if (LZ4_isLittleEndian()) {
313
- if (sizeof(val)==8) {
314
- # if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
517
+ if (sizeof(val) == 8) {
518
+ # if defined(_MSC_VER) && (_MSC_VER >= 1800) && defined(_M_AMD64) && !defined(LZ4_FORCE_SW_BITCOUNT)
519
+ /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
520
+ return (unsigned)_tzcnt_u64(val) >> 3;
521
+ # elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
315
522
  unsigned long r = 0;
316
- _BitScanForward64( &r, (U64)val );
317
- return (int)(r>>3);
318
- # elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
319
- return (__builtin_ctzll((U64)val) >> 3);
523
+ _BitScanForward64(&r, (U64)val);
524
+ return (unsigned)r >> 3;
525
+ # elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
526
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
527
+ !defined(LZ4_FORCE_SW_BITCOUNT)
528
+ return (unsigned)__builtin_ctzll((U64)val) >> 3;
320
529
  # else
321
- static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
322
- return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
530
+ const U64 m = 0x0101010101010101ULL;
531
+ val ^= val - 1;
532
+ return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
323
533
  # endif
324
534
  } else /* 32 bits */ {
325
- # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
535
+ # if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
326
536
  unsigned long r;
327
- _BitScanForward( &r, (U32)val );
328
- return (int)(r>>3);
329
- # elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
330
- return (__builtin_ctz((U32)val) >> 3);
537
+ _BitScanForward(&r, (U32)val);
538
+ return (unsigned)r >> 3;
539
+ # elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
540
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
541
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
542
+ return (unsigned)__builtin_ctz((U32)val) >> 3;
331
543
  # else
332
- static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
333
- return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
544
+ const U32 m = 0x01010101;
545
+ return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
334
546
  # endif
335
547
  }
336
548
  } else /* Big Endian CPU */ {
337
549
  if (sizeof(val)==8) {
338
- # if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
339
- unsigned long r = 0;
340
- _BitScanReverse64( &r, val );
341
- return (unsigned)(r>>3);
342
- # elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
343
- return (__builtin_clzll((U64)val) >> 3);
550
+ # if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
551
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
552
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
553
+ return (unsigned)__builtin_clzll((U64)val) >> 3;
344
554
  # else
555
+ #if 1
556
+ /* this method is probably faster,
557
+ * but adds a 128 bytes lookup table */
558
+ static const unsigned char ctz7_tab[128] = {
559
+ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
560
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
561
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
562
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
563
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
564
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
565
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
566
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
567
+ };
568
+ U64 const mask = 0x0101010101010101ULL;
569
+ U64 const t = (((val >> 8) - mask) | val) & mask;
570
+ return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
571
+ #else
572
+ /* this method doesn't consume memory space like the previous one,
573
+ * but it contains several branches,
574
+ * that may end up slowing execution */
575
+ static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.
576
+ Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
577
+ Note that this code path is never triggered in 32-bits mode. */
345
578
  unsigned r;
346
- if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
579
+ if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
347
580
  if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
348
581
  r += (!val);
349
582
  return r;
583
+ #endif
350
584
  # endif
351
585
  } else /* 32 bits */ {
352
- # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
353
- unsigned long r = 0;
354
- _BitScanReverse( &r, (unsigned long)val );
355
- return (unsigned)(r>>3);
356
- # elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
357
- return (__builtin_clz((U32)val) >> 3);
586
+ # if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
587
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
588
+ !defined(LZ4_FORCE_SW_BITCOUNT)
589
+ return (unsigned)__builtin_clz((U32)val) >> 3;
358
590
  # else
359
- unsigned r;
360
- if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
361
- r += (!val);
362
- return r;
591
+ val >>= 8;
592
+ val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
593
+ (val + 0x00FF0000)) >> 24;
594
+ return (unsigned)val ^ 3;
363
595
  # endif
364
596
  }
365
597
  }
366
598
  }
367
599
 
600
+
368
601
  #define STEPSIZE sizeof(reg_t)
369
- static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
602
+ LZ4_FORCE_INLINE
603
+ unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
370
604
  {
371
605
  const BYTE* const pStart = pIn;
372
606
 
373
- while (likely(pIn<pInLimit-(STEPSIZE-1))) {
607
+ if (likely(pIn < pInLimit-(STEPSIZE-1))) {
608
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
609
+ if (!diff) {
610
+ pIn+=STEPSIZE; pMatch+=STEPSIZE;
611
+ } else {
612
+ return LZ4_NbCommonBytes(diff);
613
+ } }
614
+
615
+ while (likely(pIn < pInLimit-(STEPSIZE-1))) {
374
616
  reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
375
617
  if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
376
618
  pIn += LZ4_NbCommonBytes(diff);
@@ -395,15 +637,34 @@ static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression ru
395
637
  /*-************************************
396
638
  * Local Structures and types
397
639
  **************************************/
398
- typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
399
- typedef enum { byPtr, byU32, byU16 } tableType_t;
400
-
401
- typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
640
+ typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
641
+
642
+ /**
643
+ * This enum distinguishes several different modes of accessing previous
644
+ * content in the stream.
645
+ *
646
+ * - noDict : There is no preceding content.
647
+ * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
648
+ * blob being compressed are valid and refer to the preceding
649
+ * content (of length ctx->dictSize), which is available
650
+ * contiguously preceding in memory the content currently
651
+ * being compressed.
652
+ * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere
653
+ * else in memory, starting at ctx->dictionary with length
654
+ * ctx->dictSize.
655
+ * - usingDictCtx : Like usingExtDict, but everything concerning the preceding
656
+ * content is in a separate context, pointed to by
657
+ * ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table
658
+ * entries in the current context that refer to positions
659
+ * preceding the beginning of the current compression are
660
+ * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
661
+ * ->dictSize describe the location and size of the preceding
662
+ * content, and matches are found by looking in the ctx
663
+ * ->dictCtx->hashTable.
664
+ */
665
+ typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;
402
666
  typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
403
667
 
404
- typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
405
- typedef enum { full = 0, partial = 1 } earlyEnd_directive;
406
-
407
668
 
408
669
  /*-************************************
409
670
  * Local Utils
@@ -411,13 +672,30 @@ typedef enum { full = 0, partial = 1 } earlyEnd_directive;
411
672
  int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
412
673
  const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
413
674
  int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
414
- int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
675
+ int LZ4_sizeofState(void) { return LZ4_STREAMSIZE; }
676
+
677
+
678
+ /*-************************************
679
+ * Internal Definitions used in Tests
680
+ **************************************/
681
+ #if defined (__cplusplus)
682
+ extern "C" {
683
+ #endif
684
+
685
+ int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
686
+
687
+ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
688
+ int compressedSize, int maxOutputSize,
689
+ const void* dictStart, size_t dictSize);
415
690
 
691
+ #if defined (__cplusplus)
692
+ }
693
+ #endif
416
694
 
417
695
  /*-******************************
418
696
  * Compression functions
419
697
  ********************************/
420
- static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
698
+ LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
421
699
  {
422
700
  if (tableType == byU16)
423
701
  return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
@@ -425,104 +703,222 @@ static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
425
703
  return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
426
704
  }
427
705
 
428
- static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
706
+ LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
429
707
  {
430
- static const U64 prime5bytes = 889523592379ULL;
431
- static const U64 prime8bytes = 11400714785074694791ULL;
432
708
  const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
433
- if (LZ4_isLittleEndian())
709
+ if (LZ4_isLittleEndian()) {
710
+ const U64 prime5bytes = 889523592379ULL;
434
711
  return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
435
- else
712
+ } else {
713
+ const U64 prime8bytes = 11400714785074694791ULL;
436
714
  return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
715
+ }
437
716
  }
438
717
 
439
- FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
718
+ LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
440
719
  {
441
720
  if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
442
721
  return LZ4_hash4(LZ4_read32(p), tableType);
443
722
  }
444
723
 
445
- static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
724
+ LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
725
+ {
726
+ switch (tableType)
727
+ {
728
+ default: /* fallthrough */
729
+ case clearedTable: { /* illegal! */ assert(0); return; }
730
+ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
731
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
732
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
733
+ }
734
+ }
735
+
736
+ LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
737
+ {
738
+ switch (tableType)
739
+ {
740
+ default: /* fallthrough */
741
+ case clearedTable: /* fallthrough */
742
+ case byPtr: { /* illegal! */ assert(0); return; }
743
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
744
+ case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
745
+ }
746
+ }
747
+
748
+ LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
749
+ void* tableBase, tableType_t const tableType,
750
+ const BYTE* srcBase)
446
751
  {
447
752
  switch (tableType)
448
753
  {
754
+ case clearedTable: { /* illegal! */ assert(0); return; }
449
755
  case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
450
756
  case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
451
757
  case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
452
758
  }
453
759
  }
454
760
 
455
- FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
761
+ LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
456
762
  {
457
763
  U32 const h = LZ4_hashPosition(p, tableType);
458
764
  LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
459
765
  }
460
766
 
461
- static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
767
+ /* LZ4_getIndexOnHash() :
768
+ * Index of match position registered in hash table.
769
+ * hash position must be calculated by using base+index, or dictBase+index.
770
+ * Assumption 1 : only valid if tableType == byU32 or byU16.
771
+ * Assumption 2 : h is presumed valid (within limits of hash table)
772
+ */
773
+ LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
774
+ {
775
+ LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
776
+ if (tableType == byU32) {
777
+ const U32* const hashTable = (const U32*) tableBase;
778
+ assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
779
+ return hashTable[h];
780
+ }
781
+ if (tableType == byU16) {
782
+ const U16* const hashTable = (const U16*) tableBase;
783
+ assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
784
+ return hashTable[h];
785
+ }
786
+ assert(0); return 0; /* forbidden case */
787
+ }
788
+
789
+ static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
462
790
  {
463
- if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
464
- if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
465
- { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
791
+ if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
792
+ if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
793
+ { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
466
794
  }
467
795
 
468
- FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
796
+ LZ4_FORCE_INLINE const BYTE*
797
+ LZ4_getPosition(const BYTE* p,
798
+ const void* tableBase, tableType_t tableType,
799
+ const BYTE* srcBase)
469
800
  {
470
801
  U32 const h = LZ4_hashPosition(p, tableType);
471
802
  return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
472
803
  }
473
804
 
805
+ LZ4_FORCE_INLINE void
806
+ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
807
+ const int inputSize,
808
+ const tableType_t tableType) {
809
+ /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
810
+ * therefore safe to use no matter what mode we're in. Otherwise, we figure
811
+ * out if it's safe to leave as is or whether it needs to be reset.
812
+ */
813
+ if ((tableType_t)cctx->tableType != clearedTable) {
814
+ assert(inputSize >= 0);
815
+ if ((tableType_t)cctx->tableType != tableType
816
+ || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
817
+ || ((tableType == byU32) && cctx->currentOffset > 1 GB)
818
+ || tableType == byPtr
819
+ || inputSize >= 4 KB)
820
+ {
821
+ DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
822
+ MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
823
+ cctx->currentOffset = 0;
824
+ cctx->tableType = (U32)clearedTable;
825
+ } else {
826
+ DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
827
+ }
828
+ }
829
+
830
+ /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, is faster
831
+ * than compressing without a gap. However, compressing with
832
+ * currentOffset == 0 is faster still, so we preserve that case.
833
+ */
834
+ if (cctx->currentOffset != 0 && tableType == byU32) {
835
+ DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
836
+ cctx->currentOffset += 64 KB;
837
+ }
838
+
839
+ /* Finally, clear history */
840
+ cctx->dictCtx = NULL;
841
+ cctx->dictionary = NULL;
842
+ cctx->dictSize = 0;
843
+ }
474
844
 
475
845
  /** LZ4_compress_generic() :
476
- inlined, to ensure branches are decided at compilation time */
477
- FORCE_INLINE int LZ4_compress_generic(
846
+ * inlined, to ensure branches are decided at compilation time.
847
+ * Presumed already validated at this stage:
848
+ * - source != NULL
849
+ * - inputSize > 0
850
+ */
851
+ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
478
852
  LZ4_stream_t_internal* const cctx,
479
853
  const char* const source,
480
854
  char* const dest,
481
855
  const int inputSize,
856
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
482
857
  const int maxOutputSize,
483
- const limitedOutput_directive outputLimited,
858
+ const limitedOutput_directive outputDirective,
484
859
  const tableType_t tableType,
485
- const dict_directive dict,
860
+ const dict_directive dictDirective,
486
861
  const dictIssue_directive dictIssue,
487
- const U32 acceleration)
862
+ const int acceleration)
488
863
  {
864
+ int result;
489
865
  const BYTE* ip = (const BYTE*) source;
490
- const BYTE* base;
866
+
867
+ U32 const startIndex = cctx->currentOffset;
868
+ const BYTE* base = (const BYTE*) source - startIndex;
491
869
  const BYTE* lowLimit;
492
- const BYTE* const lowRefLimit = ip - cctx->dictSize;
493
- const BYTE* const dictionary = cctx->dictionary;
494
- const BYTE* const dictEnd = dictionary + cctx->dictSize;
495
- const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source;
870
+
871
+ const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
872
+ const BYTE* const dictionary =
873
+ dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
874
+ const U32 dictSize =
875
+ dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
876
+ const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
877
+
878
+ int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
879
+ U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
880
+ const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
496
881
  const BYTE* anchor = (const BYTE*) source;
497
882
  const BYTE* const iend = ip + inputSize;
498
- const BYTE* const mflimit = iend - MFLIMIT;
883
+ const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
499
884
  const BYTE* const matchlimit = iend - LASTLITERALS;
500
885
 
886
+ /* the dictCtx currentOffset is indexed on the start of the dictionary,
887
+ * while a dictionary in the current context precedes the currentOffset */
888
+ const BYTE* dictBase = !dictionary ? NULL : (dictDirective == usingDictCtx) ?
889
+ dictionary + dictSize - dictCtx->currentOffset :
890
+ dictionary + dictSize - startIndex;
891
+
501
892
  BYTE* op = (BYTE*) dest;
502
893
  BYTE* const olimit = op + maxOutputSize;
503
894
 
895
+ U32 offset = 0;
504
896
  U32 forwardH;
505
897
 
506
- /* Init conditions */
507
- if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */
508
- switch(dict)
509
- {
510
- case noDict:
511
- default:
512
- base = (const BYTE*)source;
513
- lowLimit = (const BYTE*)source;
514
- break;
515
- case withPrefix64k:
516
- base = (const BYTE*)source - cctx->currentOffset;
517
- lowLimit = (const BYTE*)source - cctx->dictSize;
518
- break;
519
- case usingExtDict:
520
- base = (const BYTE*)source - cctx->currentOffset;
521
- lowLimit = (const BYTE*)source;
522
- break;
898
+ DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
899
+ assert(ip != NULL);
900
+ /* If init conditions are not met, we don't have to mark stream
901
+ * as having dirty context, since no action was taken yet */
902
+ if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
903
+ if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
904
+ if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
905
+ assert(acceleration >= 1);
906
+
907
+ lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
908
+
909
+ /* Update context state */
910
+ if (dictDirective == usingDictCtx) {
911
+ /* Subsequent linked blocks can't use the dictionary. */
912
+ /* Instead, they use the block we just compressed. */
913
+ cctx->dictCtx = NULL;
914
+ cctx->dictSize = (U32)inputSize;
915
+ } else {
916
+ cctx->dictSize += (U32)inputSize;
523
917
  }
524
- if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
525
- if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
918
+ cctx->currentOffset += (U32)inputSize;
919
+ cctx->tableType = (U32)tableType;
920
+
921
+ if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
526
922
 
527
923
  /* First Byte */
528
924
  LZ4_putPosition(ip, cctx->hashTable, tableType, base);
@@ -530,50 +926,112 @@ FORCE_INLINE int LZ4_compress_generic(
530
926
 
531
927
  /* Main Loop */
532
928
  for ( ; ; ) {
533
- ptrdiff_t refDelta = 0;
534
929
  const BYTE* match;
535
930
  BYTE* token;
931
+ const BYTE* filledIp;
536
932
 
537
933
  /* Find a match */
538
- { const BYTE* forwardIp = ip;
539
- unsigned step = 1;
540
- unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
934
+ if (tableType == byPtr) {
935
+ const BYTE* forwardIp = ip;
936
+ int step = 1;
937
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
541
938
  do {
542
939
  U32 const h = forwardH;
543
940
  ip = forwardIp;
544
941
  forwardIp += step;
545
942
  step = (searchMatchNb++ >> LZ4_skipTrigger);
546
943
 
547
- if (unlikely(forwardIp > mflimit)) goto _last_literals;
944
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
945
+ assert(ip < mflimitPlusOne);
548
946
 
549
947
  match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
550
- if (dict==usingExtDict) {
551
- if (match < (const BYTE*)source) {
552
- refDelta = dictDelta;
948
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
949
+ LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
950
+
951
+ } while ( (match+LZ4_DISTANCE_MAX < ip)
952
+ || (LZ4_read32(match) != LZ4_read32(ip)) );
953
+
954
+ } else { /* byU32, byU16 */
955
+
956
+ const BYTE* forwardIp = ip;
957
+ int step = 1;
958
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
959
+ do {
960
+ U32 const h = forwardH;
961
+ U32 const current = (U32)(forwardIp - base);
962
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
963
+ assert(matchIndex <= current);
964
+ assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
965
+ ip = forwardIp;
966
+ forwardIp += step;
967
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
968
+
969
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
970
+ assert(ip < mflimitPlusOne);
971
+
972
+ if (dictDirective == usingDictCtx) {
973
+ if (matchIndex < startIndex) {
974
+ /* there was no match, try the dictionary */
975
+ assert(tableType == byU32);
976
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
977
+ match = dictBase + matchIndex;
978
+ matchIndex += dictDelta; /* make dictCtx index comparable with current context */
553
979
  lowLimit = dictionary;
554
980
  } else {
555
- refDelta = 0;
981
+ match = base + matchIndex;
556
982
  lowLimit = (const BYTE*)source;
557
- } }
983
+ }
984
+ } else if (dictDirective==usingExtDict) {
985
+ if (matchIndex < startIndex) {
986
+ DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
987
+ assert(startIndex - matchIndex >= MINMATCH);
988
+ match = dictBase + matchIndex;
989
+ lowLimit = dictionary;
990
+ } else {
991
+ match = base + matchIndex;
992
+ lowLimit = (const BYTE*)source;
993
+ }
994
+ } else { /* single continuous memory segment */
995
+ match = base + matchIndex;
996
+ }
558
997
  forwardH = LZ4_hashPosition(forwardIp, tableType);
559
- LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
998
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
999
+
1000
+ DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex);
1001
+ if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */
1002
+ assert(matchIndex < current);
1003
+ if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
1004
+ && (matchIndex+LZ4_DISTANCE_MAX < current)) {
1005
+ continue;
1006
+ } /* too far */
1007
+ assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */
1008
+
1009
+ if (LZ4_read32(match) == LZ4_read32(ip)) {
1010
+ if (maybe_extMem) offset = current - matchIndex;
1011
+ break; /* match found */
1012
+ }
560
1013
 
561
- } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
562
- || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
563
- || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
1014
+ } while(1);
564
1015
  }
565
1016
 
566
1017
  /* Catch up */
567
- while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
1018
+ filledIp = ip;
1019
+ while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
568
1020
 
569
1021
  /* Encode Literals */
570
1022
  { unsigned const litLength = (unsigned)(ip - anchor);
571
1023
  token = op++;
572
- if ((outputLimited) && /* Check output buffer overflow */
573
- (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
574
- return 0;
1024
+ if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
1025
+ (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
1026
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1027
+ }
1028
+ if ((outputDirective == fillOutput) &&
1029
+ (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
1030
+ op--;
1031
+ goto _last_literals;
1032
+ }
575
1033
  if (litLength >= RUN_MASK) {
576
- int len = (int)litLength-RUN_MASK;
1034
+ int len = (int)(litLength - RUN_MASK);
577
1035
  *token = (RUN_MASK<<ML_BITS);
578
1036
  for(; len >= 255 ; len-=255) *op++ = 255;
579
1037
  *op++ = (BYTE)len;
@@ -581,82 +1039,184 @@ FORCE_INLINE int LZ4_compress_generic(
581
1039
  else *token = (BYTE)(litLength<<ML_BITS);
582
1040
 
583
1041
  /* Copy Literals */
584
- LZ4_wildCopy(op, anchor, op+litLength);
1042
+ LZ4_wildCopy8(op, anchor, op+litLength);
585
1043
  op+=litLength;
1044
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1045
+ (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
586
1046
  }
587
1047
 
588
1048
  _next_match:
1049
+ /* at this stage, the following variables must be correctly set :
1050
+ * - ip : at start of LZ operation
1051
+ * - match : at start of previous pattern occurence; can be within current prefix, or within extDict
1052
+ * - offset : if maybe_ext_memSegment==1 (constant)
1053
+ * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
1054
+ * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
1055
+ */
1056
+
1057
+ if ((outputDirective == fillOutput) &&
1058
+ (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
1059
+ /* the match was too close to the end, rewind and go to last literals */
1060
+ op = token;
1061
+ goto _last_literals;
1062
+ }
1063
+
589
1064
  /* Encode Offset */
590
- LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
1065
+ if (maybe_extMem) { /* static test */
1066
+ DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
1067
+ assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
1068
+ LZ4_writeLE16(op, (U16)offset); op+=2;
1069
+ } else {
1070
+ DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match));
1071
+ assert(ip-match <= LZ4_DISTANCE_MAX);
1072
+ LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
1073
+ }
591
1074
 
592
1075
  /* Encode MatchLength */
593
1076
  { unsigned matchCode;
594
1077
 
595
- if ((dict==usingExtDict) && (lowLimit==dictionary)) {
596
- const BYTE* limit;
597
- match += refDelta;
598
- limit = ip + (dictEnd-match);
1078
+ if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
1079
+ && (lowLimit==dictionary) /* match within extDict */ ) {
1080
+ const BYTE* limit = ip + (dictEnd-match);
1081
+ assert(dictEnd > match);
599
1082
  if (limit > matchlimit) limit = matchlimit;
600
1083
  matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
601
- ip += MINMATCH + matchCode;
1084
+ ip += (size_t)matchCode + MINMATCH;
602
1085
  if (ip==limit) {
603
- unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit);
1086
+ unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
604
1087
  matchCode += more;
605
1088
  ip += more;
606
1089
  }
1090
+ DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
607
1091
  } else {
608
1092
  matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
609
- ip += MINMATCH + matchCode;
1093
+ ip += (size_t)matchCode + MINMATCH;
1094
+ DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
610
1095
  }
611
1096
 
612
- if ( outputLimited && /* Check output buffer overflow */
613
- (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) )
614
- return 0;
1097
+ if ((outputDirective) && /* Check output buffer overflow */
1098
+ (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
1099
+ if (outputDirective == fillOutput) {
1100
+ /* Match description too long : reduce it */
1101
+ U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
1102
+ ip -= matchCode - newMatchCode;
1103
+ assert(newMatchCode < matchCode);
1104
+ matchCode = newMatchCode;
1105
+ if (unlikely(ip <= filledIp)) {
1106
+ /* We have already filled up to filledIp so if ip ends up less than filledIp
1107
+ * we have positions in the hash table beyond the current position. This is
1108
+ * a problem if we reuse the hash table. So we have to remove these positions
1109
+ * from the hash table.
1110
+ */
1111
+ const BYTE* ptr;
1112
+ DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
1113
+ for (ptr = ip; ptr <= filledIp; ++ptr) {
1114
+ U32 const h = LZ4_hashPosition(ptr, tableType);
1115
+ LZ4_clearHash(h, cctx->hashTable, tableType);
1116
+ }
1117
+ }
1118
+ } else {
1119
+ assert(outputDirective == limitedOutput);
1120
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1121
+ }
1122
+ }
615
1123
  if (matchCode >= ML_MASK) {
616
1124
  *token += ML_MASK;
617
1125
  matchCode -= ML_MASK;
618
1126
  LZ4_write32(op, 0xFFFFFFFF);
619
- while (matchCode >= 4*255) op+=4, LZ4_write32(op, 0xFFFFFFFF), matchCode -= 4*255;
1127
+ while (matchCode >= 4*255) {
1128
+ op+=4;
1129
+ LZ4_write32(op, 0xFFFFFFFF);
1130
+ matchCode -= 4*255;
1131
+ }
620
1132
  op += matchCode / 255;
621
1133
  *op++ = (BYTE)(matchCode % 255);
622
1134
  } else
623
1135
  *token += (BYTE)(matchCode);
624
1136
  }
1137
+ /* Ensure we have enough space for the last literals. */
1138
+ assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
625
1139
 
626
1140
  anchor = ip;
627
1141
 
628
1142
  /* Test end of chunk */
629
- if (ip > mflimit) break;
1143
+ if (ip >= mflimitPlusOne) break;
630
1144
 
631
1145
  /* Fill table */
632
1146
  LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
633
1147
 
634
1148
  /* Test next position */
635
- match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
636
- if (dict==usingExtDict) {
637
- if (match < (const BYTE*)source) {
638
- refDelta = dictDelta;
639
- lowLimit = dictionary;
640
- } else {
641
- refDelta = 0;
642
- lowLimit = (const BYTE*)source;
643
- } }
644
- LZ4_putPosition(ip, cctx->hashTable, tableType, base);
645
- if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
646
- && (match+MAX_DISTANCE>=ip)
647
- && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
648
- { token=op++; *token=0; goto _next_match; }
1149
+ if (tableType == byPtr) {
1150
+
1151
+ match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
1152
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
1153
+ if ( (match+LZ4_DISTANCE_MAX >= ip)
1154
+ && (LZ4_read32(match) == LZ4_read32(ip)) )
1155
+ { token=op++; *token=0; goto _next_match; }
1156
+
1157
+ } else { /* byU32, byU16 */
1158
+
1159
+ U32 const h = LZ4_hashPosition(ip, tableType);
1160
+ U32 const current = (U32)(ip-base);
1161
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
1162
+ assert(matchIndex < current);
1163
+ if (dictDirective == usingDictCtx) {
1164
+ if (matchIndex < startIndex) {
1165
+ /* there was no match, try the dictionary */
1166
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
1167
+ match = dictBase + matchIndex;
1168
+ lowLimit = dictionary; /* required for match length counter */
1169
+ matchIndex += dictDelta;
1170
+ } else {
1171
+ match = base + matchIndex;
1172
+ lowLimit = (const BYTE*)source; /* required for match length counter */
1173
+ }
1174
+ } else if (dictDirective==usingExtDict) {
1175
+ if (matchIndex < startIndex) {
1176
+ match = dictBase + matchIndex;
1177
+ lowLimit = dictionary; /* required for match length counter */
1178
+ } else {
1179
+ match = base + matchIndex;
1180
+ lowLimit = (const BYTE*)source; /* required for match length counter */
1181
+ }
1182
+ } else { /* single memory segment */
1183
+ match = base + matchIndex;
1184
+ }
1185
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1186
+ assert(matchIndex < current);
1187
+ if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
1188
+ && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
1189
+ && (LZ4_read32(match) == LZ4_read32(ip)) ) {
1190
+ token=op++;
1191
+ *token=0;
1192
+ if (maybe_extMem) offset = current - matchIndex;
1193
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1194
+ (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
1195
+ goto _next_match;
1196
+ }
1197
+ }
649
1198
 
650
1199
  /* Prepare next loop */
651
1200
  forwardH = LZ4_hashPosition(++ip, tableType);
1201
+
652
1202
  }
653
1203
 
654
1204
  _last_literals:
655
1205
  /* Encode Last Literals */
656
- { size_t const lastRun = (size_t)(iend - anchor);
657
- if ( (outputLimited) && /* Check output buffer overflow */
658
- ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) )
659
- return 0;
1206
+ { size_t lastRun = (size_t)(iend - anchor);
1207
+ if ( (outputDirective) && /* Check output buffer overflow */
1208
+ (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
1209
+ if (outputDirective == fillOutput) {
1210
+ /* adapt lastRun to fill 'dst' */
1211
+ assert(olimit >= op);
1212
+ lastRun = (size_t)(olimit-op) - 1/*token*/;
1213
+ lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/
1214
+ } else {
1215
+ assert(outputDirective == limitedOutput);
1216
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1217
+ }
1218
+ }
1219
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
660
1220
  if (lastRun >= RUN_MASK) {
661
1221
  size_t accumulator = lastRun - RUN_MASK;
662
1222
  *op++ = RUN_MASK << ML_BITS;
@@ -665,252 +1225,182 @@ _last_literals:
665
1225
  } else {
666
1226
  *op++ = (BYTE)(lastRun<<ML_BITS);
667
1227
  }
668
- memcpy(op, anchor, lastRun);
1228
+ LZ4_memcpy(op, anchor, lastRun);
1229
+ ip = anchor + lastRun;
669
1230
  op += lastRun;
670
1231
  }
671
1232
 
672
- /* End */
673
- return (int) (((char*)op)-dest);
1233
+ if (outputDirective == fillOutput) {
1234
+ *inputConsumed = (int) (((const char*)ip)-source);
1235
+ }
1236
+ result = (int)(((char*)op) - dest);
1237
+ assert(result > 0);
1238
+ DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result);
1239
+ return result;
1240
+ }
1241
+
1242
+ /** LZ4_compress_generic() :
1243
+ * inlined, to ensure branches are decided at compilation time;
1244
+ * takes care of src == (NULL, 0)
1245
+ * and forward the rest to LZ4_compress_generic_validated */
1246
+ LZ4_FORCE_INLINE int LZ4_compress_generic(
1247
+ LZ4_stream_t_internal* const cctx,
1248
+ const char* const src,
1249
+ char* const dst,
1250
+ const int srcSize,
1251
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
1252
+ const int dstCapacity,
1253
+ const limitedOutput_directive outputDirective,
1254
+ const tableType_t tableType,
1255
+ const dict_directive dictDirective,
1256
+ const dictIssue_directive dictIssue,
1257
+ const int acceleration)
1258
+ {
1259
+ DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i",
1260
+ srcSize, dstCapacity);
1261
+
1262
+ if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */
1263
+ if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
1264
+ if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */
1265
+ DEBUGLOG(5, "Generating an empty block");
1266
+ assert(outputDirective == notLimited || dstCapacity >= 1);
1267
+ assert(dst != NULL);
1268
+ dst[0] = 0;
1269
+ if (outputDirective == fillOutput) {
1270
+ assert (inputConsumed != NULL);
1271
+ *inputConsumed = 0;
1272
+ }
1273
+ return 1;
1274
+ }
1275
+ assert(src != NULL);
1276
+
1277
+ return LZ4_compress_generic_validated(cctx, src, dst, srcSize,
1278
+ inputConsumed, /* only written into if outputDirective == fillOutput */
1279
+ dstCapacity, outputDirective,
1280
+ tableType, dictDirective, dictIssue, acceleration);
674
1281
  }
675
1282
 
676
1283
 
677
1284
  int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
678
1285
  {
679
- LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
680
- LZ4_resetStream((LZ4_stream_t*)state);
681
- if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
682
-
1286
+ LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
1287
+ assert(ctx != NULL);
1288
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
1289
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
683
1290
  if (maxOutputSize >= LZ4_compressBound(inputSize)) {
684
- if (inputSize < LZ4_64Klimit)
685
- return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
686
- else
687
- return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
1291
+ if (inputSize < LZ4_64Klimit) {
1292
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
1293
+ } else {
1294
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1295
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1296
+ }
688
1297
  } else {
689
- if (inputSize < LZ4_64Klimit)
690
- return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
691
- else
692
- return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
1298
+ if (inputSize < LZ4_64Klimit) {
1299
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
1300
+ } else {
1301
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1302
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
1303
+ }
693
1304
  }
694
1305
  }
695
1306
 
696
-
697
- int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1307
+ /**
1308
+ * LZ4_compress_fast_extState_fastReset() :
1309
+ * A variant of LZ4_compress_fast_extState().
1310
+ *
1311
+ * Using this variant avoids an expensive initialization step. It is only safe
1312
+ * to call if the state buffer is known to be correctly initialized already
1313
+ * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
1314
+ * "correctly initialized").
1315
+ */
1316
+ int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
698
1317
  {
699
- #if (LZ4_HEAPMODE)
700
- void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
701
- #else
702
- LZ4_stream_t ctx;
703
- void* const ctxPtr = &ctx;
704
- #endif
705
-
706
- int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
707
-
708
- #if (LZ4_HEAPMODE)
709
- FREEMEM(ctxPtr);
710
- #endif
711
- return result;
712
- }
713
-
714
-
715
- int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize)
716
- {
717
- return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
718
- }
719
-
720
-
721
- /* hidden debug function */
722
- /* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
723
- int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
724
- {
725
- LZ4_stream_t ctx;
726
- LZ4_resetStream(&ctx);
727
-
728
- if (inputSize < LZ4_64Klimit)
729
- return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
730
- else
731
- return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
732
- }
733
-
734
-
735
- /*-******************************
736
- * *_destSize() variant
737
- ********************************/
738
-
739
- static int LZ4_compress_destSize_generic(
740
- LZ4_stream_t_internal* const ctx,
741
- const char* const src,
742
- char* const dst,
743
- int* const srcSizePtr,
744
- const int targetDstSize,
745
- const tableType_t tableType)
746
- {
747
- const BYTE* ip = (const BYTE*) src;
748
- const BYTE* base = (const BYTE*) src;
749
- const BYTE* lowLimit = (const BYTE*) src;
750
- const BYTE* anchor = ip;
751
- const BYTE* const iend = ip + *srcSizePtr;
752
- const BYTE* const mflimit = iend - MFLIMIT;
753
- const BYTE* const matchlimit = iend - LASTLITERALS;
754
-
755
- BYTE* op = (BYTE*) dst;
756
- BYTE* const oend = op + targetDstSize;
757
- BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
758
- BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
759
- BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
760
-
761
- U32 forwardH;
762
-
763
-
764
- /* Init conditions */
765
- if (targetDstSize < 1) return 0; /* Impossible to store anything */
766
- if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
767
- if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
768
- if (*srcSizePtr<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
769
-
770
- /* First Byte */
771
- *srcSizePtr = 0;
772
- LZ4_putPosition(ip, ctx->hashTable, tableType, base);
773
- ip++; forwardH = LZ4_hashPosition(ip, tableType);
774
-
775
- /* Main Loop */
776
- for ( ; ; ) {
777
- const BYTE* match;
778
- BYTE* token;
779
-
780
- /* Find a match */
781
- { const BYTE* forwardIp = ip;
782
- unsigned step = 1;
783
- unsigned searchMatchNb = 1 << LZ4_skipTrigger;
784
-
785
- do {
786
- U32 h = forwardH;
787
- ip = forwardIp;
788
- forwardIp += step;
789
- step = (searchMatchNb++ >> LZ4_skipTrigger);
790
-
791
- if (unlikely(forwardIp > mflimit)) goto _last_literals;
792
-
793
- match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base);
794
- forwardH = LZ4_hashPosition(forwardIp, tableType);
795
- LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base);
796
-
797
- } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
798
- || (LZ4_read32(match) != LZ4_read32(ip)) );
799
- }
800
-
801
- /* Catch up */
802
- while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
803
-
804
- /* Encode Literal length */
805
- { unsigned litLength = (unsigned)(ip - anchor);
806
- token = op++;
807
- if (op + ((litLength+240)/255) + litLength > oMaxLit) {
808
- /* Not enough space for a last match */
809
- op--;
810
- goto _last_literals;
811
- }
812
- if (litLength>=RUN_MASK) {
813
- unsigned len = litLength - RUN_MASK;
814
- *token=(RUN_MASK<<ML_BITS);
815
- for(; len >= 255 ; len-=255) *op++ = 255;
816
- *op++ = (BYTE)len;
1318
+ LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
1319
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
1320
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
1321
+
1322
+ if (dstCapacity >= LZ4_compressBound(srcSize)) {
1323
+ if (srcSize < LZ4_64Klimit) {
1324
+ const tableType_t tableType = byU16;
1325
+ LZ4_prepareTable(ctx, srcSize, tableType);
1326
+ if (ctx->currentOffset) {
1327
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
1328
+ } else {
1329
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
817
1330
  }
818
- else *token = (BYTE)(litLength<<ML_BITS);
819
-
820
- /* Copy Literals */
821
- LZ4_wildCopy(op, anchor, op+litLength);
822
- op += litLength;
1331
+ } else {
1332
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1333
+ LZ4_prepareTable(ctx, srcSize, tableType);
1334
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
823
1335
  }
824
-
825
- _next_match:
826
- /* Encode Offset */
827
- LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
828
-
829
- /* Encode MatchLength */
830
- { size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
831
-
832
- if (op + ((matchLength+240)/255) > oMaxMatch) {
833
- /* Match description too long : reduce it */
834
- matchLength = (15-1) + (oMaxMatch-op) * 255;
835
- }
836
- ip += MINMATCH + matchLength;
837
-
838
- if (matchLength>=ML_MASK) {
839
- *token += ML_MASK;
840
- matchLength -= ML_MASK;
841
- while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
842
- *op++ = (BYTE)matchLength;
1336
+ } else {
1337
+ if (srcSize < LZ4_64Klimit) {
1338
+ const tableType_t tableType = byU16;
1339
+ LZ4_prepareTable(ctx, srcSize, tableType);
1340
+ if (ctx->currentOffset) {
1341
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
1342
+ } else {
1343
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
843
1344
  }
844
- else *token += (BYTE)(matchLength);
1345
+ } else {
1346
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1347
+ LZ4_prepareTable(ctx, srcSize, tableType);
1348
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
845
1349
  }
1350
+ }
1351
+ }
846
1352
 
847
- anchor = ip;
848
-
849
- /* Test end of block */
850
- if (ip > mflimit) break;
851
- if (op > oMaxSeq) break;
852
-
853
- /* Fill table */
854
- LZ4_putPosition(ip-2, ctx->hashTable, tableType, base);
855
-
856
- /* Test next position */
857
- match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
858
- LZ4_putPosition(ip, ctx->hashTable, tableType, base);
859
- if ( (match+MAX_DISTANCE>=ip)
860
- && (LZ4_read32(match)==LZ4_read32(ip)) )
861
- { token=op++; *token=0; goto _next_match; }
862
1353
 
863
- /* Prepare next loop */
864
- forwardH = LZ4_hashPosition(++ip, tableType);
865
- }
1354
+ int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1355
+ {
1356
+ int result;
1357
+ #if (LZ4_HEAPMODE)
1358
+ LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1359
+ if (ctxPtr == NULL) return 0;
1360
+ #else
1361
+ LZ4_stream_t ctx;
1362
+ LZ4_stream_t* const ctxPtr = &ctx;
1363
+ #endif
1364
+ result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
866
1365
 
867
- _last_literals:
868
- /* Encode Last Literals */
869
- { size_t lastRunSize = (size_t)(iend - anchor);
870
- if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) {
871
- /* adapt lastRunSize to fill 'dst' */
872
- lastRunSize = (oend-op) - 1;
873
- lastRunSize -= (lastRunSize+240)/255;
874
- }
875
- ip = anchor + lastRunSize;
1366
+ #if (LZ4_HEAPMODE)
1367
+ FREEMEM(ctxPtr);
1368
+ #endif
1369
+ return result;
1370
+ }
876
1371
 
877
- if (lastRunSize >= RUN_MASK) {
878
- size_t accumulator = lastRunSize - RUN_MASK;
879
- *op++ = RUN_MASK << ML_BITS;
880
- for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
881
- *op++ = (BYTE) accumulator;
882
- } else {
883
- *op++ = (BYTE)(lastRunSize<<ML_BITS);
884
- }
885
- memcpy(op, anchor, lastRunSize);
886
- op += lastRunSize;
887
- }
888
1372
 
889
- /* End */
890
- *srcSizePtr = (int) (((const char*)ip)-src);
891
- return (int) (((char*)op)-dst);
1373
+ int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
1374
+ {
1375
+ return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
892
1376
  }
893
1377
 
894
1378
 
1379
+ /* Note!: This function leaves the stream in an unclean/broken state!
1380
+ * It is not safe to subsequently use the same state with a _fastReset() or
1381
+ * _continue() call without resetting it. */
895
1382
  static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
896
1383
  {
897
- LZ4_resetStream(state);
1384
+ void* const s = LZ4_initStream(state, sizeof (*state));
1385
+ assert(s != NULL); (void)s;
898
1386
 
899
1387
  if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
900
1388
  return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
901
1389
  } else {
902
- if (*srcSizePtr < LZ4_64Klimit)
903
- return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16);
904
- else
905
- return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr);
906
- }
1390
+ if (*srcSizePtr < LZ4_64Klimit) {
1391
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
1392
+ } else {
1393
+ tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1394
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
1395
+ } }
907
1396
  }
908
1397
 
909
1398
 
910
1399
  int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
911
1400
  {
912
1401
  #if (LZ4_HEAPMODE)
913
- LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1402
+ LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1403
+ if (ctx == NULL) return 0;
914
1404
  #else
915
1405
  LZ4_stream_t ctxBody;
916
1406
  LZ4_stream_t* ctx = &ctxBody;
@@ -932,20 +1422,50 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe
932
1422
 
933
1423
  LZ4_stream_t* LZ4_createStream(void)
934
1424
  {
935
- LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
1425
+ LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
936
1426
  LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
937
- LZ4_resetStream(lz4s);
1427
+ DEBUGLOG(4, "LZ4_createStream %p", lz4s);
1428
+ if (lz4s == NULL) return NULL;
1429
+ LZ4_initStream(lz4s, sizeof(*lz4s));
938
1430
  return lz4s;
939
1431
  }
940
1432
 
1433
+ static size_t LZ4_stream_t_alignment(void)
1434
+ {
1435
+ #if LZ4_ALIGN_TEST
1436
+ typedef struct { char c; LZ4_stream_t t; } t_a;
1437
+ return sizeof(t_a) - sizeof(LZ4_stream_t);
1438
+ #else
1439
+ return 1; /* effectively disabled */
1440
+ #endif
1441
+ }
1442
+
1443
+ LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
1444
+ {
1445
+ DEBUGLOG(5, "LZ4_initStream");
1446
+ if (buffer == NULL) { return NULL; }
1447
+ if (size < sizeof(LZ4_stream_t)) { return NULL; }
1448
+ if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
1449
+ MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
1450
+ return (LZ4_stream_t*)buffer;
1451
+ }
1452
+
1453
+ /* resetStream is now deprecated,
1454
+ * prefer initStream() which is more general */
941
1455
  void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
942
1456
  {
943
- MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
1457
+ DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
1458
+ MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
1459
+ }
1460
+
1461
+ void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
1462
+ LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
944
1463
  }
945
1464
 
946
1465
  int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
947
1466
  {
948
1467
  if (!LZ4_stream) return 0; /* support free on NULL */
1468
+ DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
949
1469
  FREEMEM(LZ4_stream);
950
1470
  return (0);
951
1471
  }
@@ -955,43 +1475,82 @@ int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
955
1475
  int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
956
1476
  {
957
1477
  LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
1478
+ const tableType_t tableType = byU32;
958
1479
  const BYTE* p = (const BYTE*)dictionary;
959
1480
  const BYTE* const dictEnd = p + dictSize;
960
1481
  const BYTE* base;
961
1482
 
962
- if ((dict->initCheck) || (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */
963
- LZ4_resetStream(LZ4_dict);
1483
+ DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
1484
+
1485
+ /* It's necessary to reset the context,
1486
+ * and not just continue it with prepareTable()
1487
+ * to avoid any risk of generating overflowing matchIndex
1488
+ * when compressing using this dictionary */
1489
+ LZ4_resetStream(LZ4_dict);
1490
+
1491
+ /* We always increment the offset by 64 KB, since, if the dict is longer,
1492
+ * we truncate it to the last 64k, and if it's shorter, we still want to
1493
+ * advance by a whole window length so we can provide the guarantee that
1494
+ * there are only valid offsets in the window, which allows an optimization
1495
+ * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
1496
+ * dictionary isn't a full 64k. */
1497
+ dict->currentOffset += 64 KB;
964
1498
 
965
1499
  if (dictSize < (int)HASH_UNIT) {
966
- dict->dictionary = NULL;
967
- dict->dictSize = 0;
968
1500
  return 0;
969
1501
  }
970
1502
 
971
1503
  if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
972
- dict->currentOffset += 64 KB;
973
- base = p - dict->currentOffset;
1504
+ base = dictEnd - dict->currentOffset;
974
1505
  dict->dictionary = p;
975
1506
  dict->dictSize = (U32)(dictEnd - p);
976
- dict->currentOffset += dict->dictSize;
1507
+ dict->tableType = (U32)tableType;
977
1508
 
978
1509
  while (p <= dictEnd-HASH_UNIT) {
979
- LZ4_putPosition(p, dict->hashTable, byU32, base);
1510
+ LZ4_putPosition(p, dict->hashTable, tableType, base);
980
1511
  p+=3;
981
1512
  }
982
1513
 
983
- return dict->dictSize;
1514
+ return (int)dict->dictSize;
984
1515
  }
985
1516
 
1517
+ void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) {
1518
+ const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL :
1519
+ &(dictionaryStream->internal_donotuse);
1520
+
1521
+ DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
1522
+ workingStream, dictionaryStream,
1523
+ dictCtx != NULL ? dictCtx->dictSize : 0);
1524
+
1525
+ if (dictCtx != NULL) {
1526
+ /* If the current offset is zero, we will never look in the
1527
+ * external dictionary context, since there is no value a table
1528
+ * entry can take that indicate a miss. In that case, we need
1529
+ * to bump the offset to something non-zero.
1530
+ */
1531
+ if (workingStream->internal_donotuse.currentOffset == 0) {
1532
+ workingStream->internal_donotuse.currentOffset = 64 KB;
1533
+ }
986
1534
 
987
- static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
1535
+ /* Don't actually attach an empty dictionary.
1536
+ */
1537
+ if (dictCtx->dictSize == 0) {
1538
+ dictCtx = NULL;
1539
+ }
1540
+ }
1541
+ workingStream->internal_donotuse.dictCtx = dictCtx;
1542
+ }
1543
+
1544
+
1545
+ static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
988
1546
  {
989
- if ((LZ4_dict->currentOffset > 0x80000000) ||
990
- ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { /* address space overflow */
1547
+ assert(nextSize >= 0);
1548
+ if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
991
1549
  /* rescale hash table */
992
1550
  U32 const delta = LZ4_dict->currentOffset - 64 KB;
993
1551
  const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
994
1552
  int i;
1553
+ DEBUGLOG(4, "LZ4_renormDictT");
995
1554
  for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
996
1555
  if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
997
1556
  else LZ4_dict->hashTable[i] -= delta;
@@ -1003,16 +1562,29 @@ static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
1003
1562
  }
1004
1563
 
1005
1564
 
1006
- int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1565
+ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
1566
+ const char* source, char* dest,
1567
+ int inputSize, int maxOutputSize,
1568
+ int acceleration)
1007
1569
  {
1570
+ const tableType_t tableType = byU32;
1008
1571
  LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
1009
- const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1572
+ const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1573
+
1574
+ DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize);
1010
1575
 
1011
- const BYTE* smallest = (const BYTE*) source;
1012
- if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
1013
- if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
1014
- LZ4_renormDictT(streamPtr, smallest);
1015
- if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
1576
+ LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
1577
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
1578
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
1579
+
1580
+ /* invalidate tiny dictionaries */
1581
+ if ( (streamPtr->dictSize-1 < 4-1) /* intentional underflow */
1582
+ && (dictEnd != (const BYTE*)source) ) {
1583
+ DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
1584
+ streamPtr->dictSize = 0;
1585
+ streamPtr->dictionary = (const BYTE*)source;
1586
+ dictEnd = (const BYTE*)source;
1587
+ }
1016
1588
 
1017
1589
  /* Check overlapping input/dictionary space */
1018
1590
  { const BYTE* sourceEnd = (const BYTE*) source + inputSize;
@@ -1026,46 +1598,61 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch
1026
1598
 
1027
1599
  /* prefix mode : source data follows dictionary */
1028
1600
  if (dictEnd == (const BYTE*)source) {
1029
- int result;
1030
1601
  if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1031
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration);
1602
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
1032
1603
  else
1033
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration);
1034
- streamPtr->dictSize += (U32)inputSize;
1035
- streamPtr->currentOffset += (U32)inputSize;
1036
- return result;
1604
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
1037
1605
  }
1038
1606
 
1039
1607
  /* external dictionary mode */
1040
1608
  { int result;
1041
- if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1042
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration);
1043
- else
1044
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration);
1609
+ if (streamPtr->dictCtx) {
1610
+ /* We depend here on the fact that dictCtx'es (produced by
1611
+ * LZ4_loadDict) guarantee that their tables contain no references
1612
+ * to offsets between dictCtx->currentOffset - 64 KB and
1613
+ * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
1614
+ * to use noDictIssue even when the dict isn't a full 64 KB.
1615
+ */
1616
+ if (inputSize > 4 KB) {
1617
+ /* For compressing large blobs, it is faster to pay the setup
1618
+ * cost to copy the dictionary's tables into the active context,
1619
+ * so that the compression loop is only looking into one table.
1620
+ */
1621
+ LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
1622
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
1623
+ } else {
1624
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
1625
+ }
1626
+ } else {
1627
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1628
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
1629
+ } else {
1630
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
1631
+ }
1632
+ }
1045
1633
  streamPtr->dictionary = (const BYTE*)source;
1046
1634
  streamPtr->dictSize = (U32)inputSize;
1047
- streamPtr->currentOffset += (U32)inputSize;
1048
1635
  return result;
1049
1636
  }
1050
1637
  }
1051
1638
 
1052
1639
 
1053
- /* Hidden debug function, to force external dictionary mode */
1054
- int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
1640
+ /* Hidden debug function, to force-test external dictionary mode */
1641
+ int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
1055
1642
  {
1056
1643
  LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
1057
1644
  int result;
1058
- const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1059
1645
 
1060
- const BYTE* smallest = dictEnd;
1061
- if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
1062
- LZ4_renormDictT(streamPtr, smallest);
1646
+ LZ4_renormDictT(streamPtr, srcSize);
1063
1647
 
1064
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
1648
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1649
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
1650
+ } else {
1651
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
1652
+ }
1065
1653
 
1066
1654
  streamPtr->dictionary = (const BYTE*)source;
1067
- streamPtr->dictSize = (U32)inputSize;
1068
- streamPtr->currentOffset += (U32)inputSize;
1655
+ streamPtr->dictSize = (U32)srcSize;
1069
1656
 
1070
1657
  return result;
1071
1658
  }
@@ -1083,10 +1670,12 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1083
1670
  LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1084
1671
  const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1085
1672
 
1086
- if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
1087
- if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
1673
+ if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
1674
+ if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
1088
1675
 
1089
- memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1676
+ if (safeBuffer == NULL) assert(dictSize == 0);
1677
+ if (dictSize > 0)
1678
+ memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1090
1679
 
1091
1680
  dict->dictionary = (const BYTE*)safeBuffer;
1092
1681
  dict->dictSize = (U32)dictSize;
@@ -1096,214 +1685,602 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1096
1685
 
1097
1686
 
1098
1687
 
1099
- /*-*****************************
1100
- * Decompression functions
1101
- *******************************/
1688
+ /*-*******************************
1689
+ * Decompression functions
1690
+ ********************************/
1691
+
1692
+ typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
1693
+ typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
1694
+
1695
+ #undef MIN
1696
+ #define MIN(a,b) ( (a) < (b) ? (a) : (b) )
1697
+
1698
+ /* Read the variable-length literal or match length.
1699
+ *
1700
+ * ip - pointer to use as input.
1701
+ * lencheck - end ip. Return an error if ip advances >= lencheck.
1702
+ * loop_check - check ip >= lencheck in body of loop. Returns loop_error if so.
1703
+ * initial_check - check ip >= lencheck before start of loop. Returns initial_error if so.
1704
+ * error (output) - error code. Should be set to 0 before call.
1705
+ */
1706
+ typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error;
1707
+ LZ4_FORCE_INLINE unsigned
1708
+ read_variable_length(const BYTE**ip, const BYTE* lencheck,
1709
+ int loop_check, int initial_check,
1710
+ variable_length_error* error)
1711
+ {
1712
+ U32 length = 0;
1713
+ U32 s;
1714
+ if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
1715
+ *error = initial_error;
1716
+ return length;
1717
+ }
1718
+ do {
1719
+ s = **ip;
1720
+ (*ip)++;
1721
+ length += s;
1722
+ if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
1723
+ *error = loop_error;
1724
+ return length;
1725
+ }
1726
+ } while (s==255);
1727
+
1728
+ return length;
1729
+ }
1730
+
1102
1731
  /*! LZ4_decompress_generic() :
1103
- * This generic decompression function cover all use cases.
1104
- * It shall be instantiated several times, using different sets of directives
1105
- * Note that it is important this generic function is really inlined,
1732
+ * This generic decompression function covers all use cases.
1733
+ * It shall be instantiated several times, using different sets of directives.
1734
+ * Note that it is important for performance that this function really get inlined,
1106
1735
  * in order to remove useless branches during compilation optimization.
1107
1736
  */
1108
- FORCE_INLINE int LZ4_decompress_generic(
1109
- const char* const source,
1110
- char* const dest,
1111
- int inputSize,
1112
- int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
1113
-
1114
- int endOnInput, /* endOnOutputSize, endOnInputSize */
1115
- int partialDecoding, /* full, partial */
1116
- int targetOutputSize, /* only used if partialDecoding==partial */
1117
- int dict, /* noDict, withPrefix64k, usingExtDict */
1118
- const BYTE* const lowPrefix, /* == dest when no prefix */
1737
+ LZ4_FORCE_INLINE int
1738
+ LZ4_decompress_generic(
1739
+ const char* const src,
1740
+ char* const dst,
1741
+ int srcSize,
1742
+ int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
1743
+
1744
+ endCondition_directive endOnInput, /* endOnOutputSize, endOnInputSize */
1745
+ earlyEnd_directive partialDecoding, /* full, partial */
1746
+ dict_directive dict, /* noDict, withPrefix64k, usingExtDict */
1747
+ const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
1119
1748
  const BYTE* const dictStart, /* only if dict==usingExtDict */
1120
1749
  const size_t dictSize /* note : = 0 if noDict */
1121
1750
  )
1122
1751
  {
1123
- /* Local Variables */
1124
- const BYTE* ip = (const BYTE*) source;
1125
- const BYTE* const iend = ip + inputSize;
1752
+ if (src == NULL) { return -1; }
1126
1753
 
1127
- BYTE* op = (BYTE*) dest;
1128
- BYTE* const oend = op + outputSize;
1129
- BYTE* cpy;
1130
- BYTE* oexit = op + targetOutputSize;
1131
- const BYTE* const lowLimit = lowPrefix - dictSize;
1754
+ { const BYTE* ip = (const BYTE*) src;
1755
+ const BYTE* const iend = ip + srcSize;
1132
1756
 
1133
- const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
1134
- const unsigned dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};
1135
- const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
1757
+ BYTE* op = (BYTE*) dst;
1758
+ BYTE* const oend = op + outputSize;
1759
+ BYTE* cpy;
1136
1760
 
1137
- const int safeDecode = (endOnInput==endOnInputSize);
1138
- const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1761
+ const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;
1139
1762
 
1763
+ const int safeDecode = (endOnInput==endOnInputSize);
1764
+ const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1140
1765
 
1141
- /* Special cases */
1142
- if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */
1143
- if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
1144
- if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
1145
1766
 
1146
- /* Main Loop : decode sequences */
1147
- while (1) {
1148
- size_t length;
1767
+ /* Set up the "end" pointers for the shortcut. */
1768
+ const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
1769
+ const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
1770
+
1149
1771
  const BYTE* match;
1150
1772
  size_t offset;
1773
+ unsigned token;
1774
+ size_t length;
1151
1775
 
1152
- /* get literal length */
1153
- unsigned const token = *ip++;
1154
- if ((length=(token>>ML_BITS)) == RUN_MASK) {
1155
- unsigned s;
1156
- do {
1157
- s = *ip++;
1158
- length += s;
1159
- } while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) & (s==255) );
1160
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) goto _output_error; /* overflow detection */
1161
- if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) goto _output_error; /* overflow detection */
1776
+
1777
+ DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
1778
+
1779
+ /* Special cases */
1780
+ assert(lowPrefix <= op);
1781
+ if ((endOnInput) && (unlikely(outputSize==0))) {
1782
+ /* Empty output buffer */
1783
+ if (partialDecoding) return 0;
1784
+ return ((srcSize==1) && (*ip==0)) ? 0 : -1;
1785
+ }
1786
+ if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); }
1787
+ if ((endOnInput) && unlikely(srcSize==0)) { return -1; }
1788
+
1789
+ /* Currently the fast loop shows a regression on qualcomm arm chips. */
1790
+ #if LZ4_FAST_DEC_LOOP
1791
+ if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
1792
+ DEBUGLOG(6, "skip fast decode loop");
1793
+ goto safe_decode;
1162
1794
  }
1163
1795
 
1164
- /* copy literals */
1165
- cpy = op+length;
1166
- if ( ((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
1167
- || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
1168
- {
1169
- if (partialDecoding) {
1170
- if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */
1171
- if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */
1796
+ /* Fast loop : decode sequences as long as output < iend-FASTLOOP_SAFE_DISTANCE */
1797
+ while (1) {
1798
+ /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
1799
+ assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
1800
+ if (endOnInput) { assert(ip < iend); }
1801
+ token = *ip++;
1802
+ length = token >> ML_BITS; /* literal length */
1803
+
1804
+ assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
1805
+
1806
+ /* decode literal length */
1807
+ if (length == RUN_MASK) {
1808
+ variable_length_error error = ok;
1809
+ length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
1810
+ if (error == initial_error) { goto _output_error; }
1811
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
1812
+ if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
1813
+
1814
+ /* copy literals */
1815
+ cpy = op+length;
1816
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
1817
+ if (endOnInput) { /* LZ4_decompress_safe() */
1818
+ if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
1819
+ LZ4_wildCopy32(op, ip, cpy);
1820
+ } else { /* LZ4_decompress_fast() */
1821
+ if (cpy>oend-8) { goto safe_literal_copy; }
1822
+ LZ4_wildCopy8(op, ip, cpy); /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
1823
+ * it doesn't know input length, and only relies on end-of-block properties */
1824
+ }
1825
+ ip += length; op = cpy;
1172
1826
  } else {
1173
- if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */
1174
- if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */
1827
+ cpy = op+length;
1828
+ if (endOnInput) { /* LZ4_decompress_safe() */
1829
+ DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
1830
+ /* We don't need to check oend, since we check it once for each loop below */
1831
+ if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
1832
+ /* Literals can only be 14, but hope compilers optimize if we copy by a register size */
1833
+ LZ4_memcpy(op, ip, 16);
1834
+ } else { /* LZ4_decompress_fast() */
1835
+ /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
1836
+ * it doesn't know input length, and relies on end-of-block properties */
1837
+ LZ4_memcpy(op, ip, 8);
1838
+ if (length > 8) { LZ4_memcpy(op+8, ip+8, 8); }
1839
+ }
1840
+ ip += length; op = cpy;
1175
1841
  }
1176
- memcpy(op, ip, length);
1177
- ip += length;
1178
- op += length;
1179
- break; /* Necessarily EOF, due to parsing restrictions */
1180
- }
1181
- LZ4_wildCopy(op, ip, cpy);
1182
- ip += length; op = cpy;
1183
-
1184
- /* get offset */
1185
- offset = LZ4_readLE16(ip); ip+=2;
1186
- match = op - offset;
1187
- if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside buffers */
1188
- LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */
1189
-
1190
- /* get matchlength */
1191
- length = token & ML_MASK;
1192
- if (length == ML_MASK) {
1193
- unsigned s;
1194
- do {
1195
- s = *ip++;
1196
- if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
1197
- length += s;
1198
- } while (s==255);
1199
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
1842
+
1843
+ /* get offset */
1844
+ offset = LZ4_readLE16(ip); ip+=2;
1845
+ match = op - offset;
1846
+ assert(match <= op);
1847
+
1848
+ /* get matchlength */
1849
+ length = token & ML_MASK;
1850
+
1851
+ if (length == ML_MASK) {
1852
+ variable_length_error error = ok;
1853
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
1854
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
1855
+ if (error != ok) { goto _output_error; }
1856
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
1857
+ length += MINMATCH;
1858
+ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
1859
+ goto safe_match_copy;
1860
+ }
1861
+ } else {
1862
+ length += MINMATCH;
1863
+ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
1864
+ goto safe_match_copy;
1865
+ }
1866
+
1867
+ /* Fastpath check: Avoids a branch in LZ4_wildCopy32 if true */
1868
+ if ((dict == withPrefix64k) || (match >= lowPrefix)) {
1869
+ if (offset >= 8) {
1870
+ assert(match >= lowPrefix);
1871
+ assert(match <= op);
1872
+ assert(op + 18 <= oend);
1873
+
1874
+ LZ4_memcpy(op, match, 8);
1875
+ LZ4_memcpy(op+8, match+8, 8);
1876
+ LZ4_memcpy(op+16, match+16, 2);
1877
+ op += length;
1878
+ continue;
1879
+ } } }
1880
+
1881
+ if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
1882
+ /* match starting within external dictionary */
1883
+ if ((dict==usingExtDict) && (match < lowPrefix)) {
1884
+ if (unlikely(op+length > oend-LASTLITERALS)) {
1885
+ if (partialDecoding) {
1886
+ DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
1887
+ length = MIN(length, (size_t)(oend-op));
1888
+ } else {
1889
+ goto _output_error; /* end-of-block condition violated */
1890
+ } }
1891
+
1892
+ if (length <= (size_t)(lowPrefix-match)) {
1893
+ /* match fits entirely within external dictionary : just copy */
1894
+ memmove(op, dictEnd - (lowPrefix-match), length);
1895
+ op += length;
1896
+ } else {
1897
+ /* match stretches into both external dictionary and current block */
1898
+ size_t const copySize = (size_t)(lowPrefix - match);
1899
+ size_t const restSize = length - copySize;
1900
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
1901
+ op += copySize;
1902
+ if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
1903
+ BYTE* const endOfMatch = op + restSize;
1904
+ const BYTE* copyFrom = lowPrefix;
1905
+ while (op < endOfMatch) { *op++ = *copyFrom++; }
1906
+ } else {
1907
+ LZ4_memcpy(op, lowPrefix, restSize);
1908
+ op += restSize;
1909
+ } }
1910
+ continue;
1911
+ }
1912
+
1913
+ /* copy match within block */
1914
+ cpy = op + length;
1915
+
1916
+ assert((op <= oend) && (oend-op >= 32));
1917
+ if (unlikely(offset<16)) {
1918
+ LZ4_memcpy_using_offset(op, match, cpy, offset);
1919
+ } else {
1920
+ LZ4_wildCopy32(op, match, cpy);
1921
+ }
1922
+
1923
+ op = cpy; /* wildcopy correction */
1200
1924
  }
1201
- length += MINMATCH;
1925
+ safe_decode:
1926
+ #endif
1927
+
1928
+ /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
1929
+ while (1) {
1930
+ token = *ip++;
1931
+ length = token >> ML_BITS; /* literal length */
1932
+
1933
+ assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
1934
+
1935
+ /* A two-stage shortcut for the most common case:
1936
+ * 1) If the literal length is 0..14, and there is enough space,
1937
+ * enter the shortcut and copy 16 bytes on behalf of the literals
1938
+ * (in the fast mode, only 8 bytes can be safely copied this way).
1939
+ * 2) Further if the match length is 4..18, copy 18 bytes in a similar
1940
+ * manner; but we ensure that there's enough space in the output for
1941
+ * those 18 bytes earlier, upon entering the shortcut (in other words,
1942
+ * there is a combined check for both stages).
1943
+ */
1944
+ if ( (endOnInput ? length != RUN_MASK : length <= 8)
1945
+ /* strictly "less than" on input, to re-enter the loop with at least one byte */
1946
+ && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) {
1947
+ /* Copy the literals */
1948
+ LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
1949
+ op += length; ip += length;
1950
+
1951
+ /* The second stage: prepare for match copying, decode full info.
1952
+ * If it doesn't work out, the info won't be wasted. */
1953
+ length = token & ML_MASK; /* match length */
1954
+ offset = LZ4_readLE16(ip); ip += 2;
1955
+ match = op - offset;
1956
+ assert(match <= op); /* check overflow */
1957
+
1958
+ /* Do not deal with overlapping matches. */
1959
+ if ( (length != ML_MASK)
1960
+ && (offset >= 8)
1961
+ && (dict==withPrefix64k || match >= lowPrefix) ) {
1962
+ /* Copy the match. */
1963
+ LZ4_memcpy(op + 0, match + 0, 8);
1964
+ LZ4_memcpy(op + 8, match + 8, 8);
1965
+ LZ4_memcpy(op +16, match +16, 2);
1966
+ op += length + MINMATCH;
1967
+ /* Both stages worked, load the next token. */
1968
+ continue;
1969
+ }
1202
1970
 
1203
- /* check external dictionary */
1204
- if ((dict==usingExtDict) && (match < lowPrefix)) {
1205
- if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */
1971
+ /* The second stage didn't work out, but the info is ready.
1972
+ * Propel it right to the point of match copying. */
1973
+ goto _copy_match;
1974
+ }
1206
1975
 
1207
- if (length <= (size_t)(lowPrefix-match)) {
1208
- /* match can be copied as a single segment from external dictionary */
1209
- memmove(op, dictEnd - (lowPrefix-match), length);
1976
+ /* decode literal length */
1977
+ if (length == RUN_MASK) {
1978
+ variable_length_error error = ok;
1979
+ length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
1980
+ if (error == initial_error) { goto _output_error; }
1981
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
1982
+ if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
1983
+ }
1984
+
1985
+ /* copy literals */
1986
+ cpy = op+length;
1987
+ #if LZ4_FAST_DEC_LOOP
1988
+ safe_literal_copy:
1989
+ #endif
1990
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
1991
+ if ( ((endOnInput) && ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) )
1992
+ || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
1993
+ {
1994
+ /* We've either hit the input parsing restriction or the output parsing restriction.
1995
+ * In the normal scenario, decoding a full block, it must be the last sequence,
1996
+ * otherwise it's an error (invalid input or dimensions).
1997
+ * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
1998
+ */
1999
+ if (partialDecoding) {
2000
+ /* Since we are partial decoding we may be in this block because of the output parsing
2001
+ * restriction, which is not valid since the output buffer is allowed to be undersized.
2002
+ */
2003
+ assert(endOnInput);
2004
+ DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
2005
+ DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
2006
+ DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
2007
+ DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
2008
+ /* Finishing in the middle of a literals segment,
2009
+ * due to lack of input.
2010
+ */
2011
+ if (ip+length > iend) {
2012
+ length = (size_t)(iend-ip);
2013
+ cpy = op + length;
2014
+ }
2015
+ /* Finishing in the middle of a literals segment,
2016
+ * due to lack of output space.
2017
+ */
2018
+ if (cpy > oend) {
2019
+ cpy = oend;
2020
+ assert(op<=oend);
2021
+ length = (size_t)(oend-op);
2022
+ }
2023
+ } else {
2024
+ /* We must be on the last sequence because of the parsing limitations so check
2025
+ * that we exactly regenerate the original size (must be exact when !endOnInput).
2026
+ */
2027
+ if ((!endOnInput) && (cpy != oend)) { goto _output_error; }
2028
+ /* We must be on the last sequence (or invalid) because of the parsing limitations
2029
+ * so check that we exactly consume the input and don't overrun the output buffer.
2030
+ */
2031
+ if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) {
2032
+ DEBUGLOG(6, "should have been last run of literals")
2033
+ DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
2034
+ DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
2035
+ goto _output_error;
2036
+ }
2037
+ }
2038
+ memmove(op, ip, length); /* supports overlapping memory regions; only matters for in-place decompression scenarios */
2039
+ ip += length;
1210
2040
  op += length;
2041
+ /* Necessarily EOF when !partialDecoding.
2042
+ * When partialDecoding, it is EOF if we've either
2043
+ * filled the output buffer or
2044
+ * can't proceed with reading an offset for following match.
2045
+ */
2046
+ if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
2047
+ break;
2048
+ }
1211
2049
  } else {
1212
- /* match encompass external dictionary and current block */
1213
- size_t const copySize = (size_t)(lowPrefix-match);
1214
- size_t const restSize = length - copySize;
1215
- memcpy(op, dictEnd - copySize, copySize);
1216
- op += copySize;
1217
- if (restSize > (size_t)(op-lowPrefix)) { /* overlap copy */
1218
- BYTE* const endOfMatch = op + restSize;
1219
- const BYTE* copyFrom = lowPrefix;
1220
- while (op < endOfMatch) *op++ = *copyFrom++;
2050
+ LZ4_wildCopy8(op, ip, cpy); /* may overwrite up to WILDCOPYLENGTH beyond cpy */
2051
+ ip += length; op = cpy;
2052
+ }
2053
+
2054
+ /* get offset */
2055
+ offset = LZ4_readLE16(ip); ip+=2;
2056
+ match = op - offset;
2057
+
2058
+ /* get matchlength */
2059
+ length = token & ML_MASK;
2060
+
2061
+ _copy_match:
2062
+ if (length == ML_MASK) {
2063
+ variable_length_error error = ok;
2064
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
2065
+ if (error != ok) goto _output_error;
2066
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
2067
+ }
2068
+ length += MINMATCH;
2069
+
2070
+ #if LZ4_FAST_DEC_LOOP
2071
+ safe_match_copy:
2072
+ #endif
2073
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
2074
+ /* match starting within external dictionary */
2075
+ if ((dict==usingExtDict) && (match < lowPrefix)) {
2076
+ if (unlikely(op+length > oend-LASTLITERALS)) {
2077
+ if (partialDecoding) length = MIN(length, (size_t)(oend-op));
2078
+ else goto _output_error; /* doesn't respect parsing restriction */
2079
+ }
2080
+
2081
+ if (length <= (size_t)(lowPrefix-match)) {
2082
+ /* match fits entirely within external dictionary : just copy */
2083
+ memmove(op, dictEnd - (lowPrefix-match), length);
2084
+ op += length;
1221
2085
  } else {
1222
- memcpy(op, lowPrefix, restSize);
1223
- op += restSize;
1224
- } }
1225
- continue;
1226
- }
2086
+ /* match stretches into both external dictionary and current block */
2087
+ size_t const copySize = (size_t)(lowPrefix - match);
2088
+ size_t const restSize = length - copySize;
2089
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
2090
+ op += copySize;
2091
+ if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
2092
+ BYTE* const endOfMatch = op + restSize;
2093
+ const BYTE* copyFrom = lowPrefix;
2094
+ while (op < endOfMatch) *op++ = *copyFrom++;
2095
+ } else {
2096
+ LZ4_memcpy(op, lowPrefix, restSize);
2097
+ op += restSize;
2098
+ } }
2099
+ continue;
2100
+ }
2101
+ assert(match >= lowPrefix);
2102
+
2103
+ /* copy match within block */
2104
+ cpy = op + length;
2105
+
2106
+ /* partialDecoding : may end anywhere within the block */
2107
+ assert(op<=oend);
2108
+ if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
2109
+ size_t const mlen = MIN(length, (size_t)(oend-op));
2110
+ const BYTE* const matchEnd = match + mlen;
2111
+ BYTE* const copyEnd = op + mlen;
2112
+ if (matchEnd > op) { /* overlap copy */
2113
+ while (op < copyEnd) { *op++ = *match++; }
2114
+ } else {
2115
+ LZ4_memcpy(op, match, mlen);
2116
+ }
2117
+ op = copyEnd;
2118
+ if (op == oend) { break; }
2119
+ continue;
2120
+ }
1227
2121
 
1228
- /* copy match within block */
1229
- cpy = op + length;
1230
- if (unlikely(offset<8)) {
1231
- const int dec64 = dec64table[offset];
1232
- op[0] = match[0];
1233
- op[1] = match[1];
1234
- op[2] = match[2];
1235
- op[3] = match[3];
1236
- match += dec32table[offset];
1237
- memcpy(op+4, match, 4);
1238
- match -= dec64;
1239
- } else { LZ4_copy8(op, match); match+=8; }
1240
- op += 8;
1241
-
1242
- if (unlikely(cpy>oend-12)) {
1243
- BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1);
1244
- if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
1245
- if (op < oCopyLimit) {
1246
- LZ4_wildCopy(op, match, oCopyLimit);
1247
- match += oCopyLimit - op;
1248
- op = oCopyLimit;
2122
+ if (unlikely(offset<8)) {
2123
+ LZ4_write32(op, 0); /* silence msan warning when offset==0 */
2124
+ op[0] = match[0];
2125
+ op[1] = match[1];
2126
+ op[2] = match[2];
2127
+ op[3] = match[3];
2128
+ match += inc32table[offset];
2129
+ LZ4_memcpy(op+4, match, 4);
2130
+ match -= dec64table[offset];
2131
+ } else {
2132
+ LZ4_memcpy(op, match, 8);
2133
+ match += 8;
1249
2134
  }
1250
- while (op<cpy) *op++ = *match++;
1251
- } else {
1252
- LZ4_copy8(op, match);
1253
- if (length>16) LZ4_wildCopy(op+8, match+8, cpy);
2135
+ op += 8;
2136
+
2137
+ if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
2138
+ BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
2139
+ if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
2140
+ if (op < oCopyLimit) {
2141
+ LZ4_wildCopy8(op, match, oCopyLimit);
2142
+ match += oCopyLimit - op;
2143
+ op = oCopyLimit;
2144
+ }
2145
+ while (op < cpy) { *op++ = *match++; }
2146
+ } else {
2147
+ LZ4_memcpy(op, match, 8);
2148
+ if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
2149
+ }
2150
+ op = cpy; /* wildcopy correction */
1254
2151
  }
1255
- op=cpy; /* correction */
1256
- }
1257
-
1258
- /* end of decoding */
1259
- if (endOnInput)
1260
- return (int) (((char*)op)-dest); /* Nb of output bytes decoded */
1261
- else
1262
- return (int) (((const char*)ip)-source); /* Nb of input bytes read */
1263
2152
 
1264
- /* Overflow error detected */
1265
- _output_error:
1266
- return (int) (-(((const char*)ip)-source))-1;
2153
+ /* end of decoding */
2154
+ if (endOnInput) {
2155
+ DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
2156
+ return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
2157
+ } else {
2158
+ return (int) (((const char*)ip)-src); /* Nb of input bytes read */
2159
+ }
2160
+
2161
+ /* Overflow error detected */
2162
+ _output_error:
2163
+ return (int) (-(((const char*)ip)-src))-1;
2164
+ }
1267
2165
  }
1268
2166
 
1269
2167
 
2168
+ /*===== Instantiate the API decoding functions. =====*/
2169
+
2170
+ LZ4_FORCE_O2
1270
2171
  int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
1271
2172
  {
1272
- return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
2173
+ return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
2174
+ endOnInputSize, decode_full_block, noDict,
2175
+ (BYTE*)dest, NULL, 0);
1273
2176
  }
1274
2177
 
1275
- int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
2178
+ LZ4_FORCE_O2
2179
+ int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
1276
2180
  {
1277
- return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
2181
+ dstCapacity = MIN(targetOutputSize, dstCapacity);
2182
+ return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
2183
+ endOnInputSize, partial_decode,
2184
+ noDict, (BYTE*)dst, NULL, 0);
1278
2185
  }
1279
2186
 
2187
+ LZ4_FORCE_O2
1280
2188
  int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
1281
2189
  {
1282
- return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
2190
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
2191
+ endOnOutputSize, decode_full_block, withPrefix64k,
2192
+ (BYTE*)dest - 64 KB, NULL, 0);
2193
+ }
2194
+
2195
+ /*===== Instantiate a few more decoding cases, used more than once. =====*/
2196
+
2197
+ LZ4_FORCE_O2 /* Exported, an obsolete API function. */
2198
+ int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
2199
+ {
2200
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2201
+ endOnInputSize, decode_full_block, withPrefix64k,
2202
+ (BYTE*)dest - 64 KB, NULL, 0);
1283
2203
  }
1284
2204
 
2205
+ /* Another obsolete API function, paired with the previous one. */
2206
+ int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
2207
+ {
2208
+ /* LZ4_decompress_fast doesn't validate match offsets,
2209
+ * and thus serves well with any prefixed dictionary. */
2210
+ return LZ4_decompress_fast(source, dest, originalSize);
2211
+ }
2212
+
2213
+ LZ4_FORCE_O2
2214
+ static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
2215
+ size_t prefixSize)
2216
+ {
2217
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2218
+ endOnInputSize, decode_full_block, noDict,
2219
+ (BYTE*)dest-prefixSize, NULL, 0);
2220
+ }
2221
+
2222
+ LZ4_FORCE_O2
2223
+ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
2224
+ int compressedSize, int maxOutputSize,
2225
+ const void* dictStart, size_t dictSize)
2226
+ {
2227
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2228
+ endOnInputSize, decode_full_block, usingExtDict,
2229
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2230
+ }
2231
+
2232
+ LZ4_FORCE_O2
2233
+ static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
2234
+ const void* dictStart, size_t dictSize)
2235
+ {
2236
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
2237
+ endOnOutputSize, decode_full_block, usingExtDict,
2238
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2239
+ }
2240
+
2241
+ /* The "double dictionary" mode, for use with e.g. ring buffers: the first part
2242
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
2243
+ * These routines are used only once, in LZ4_decompress_*_continue().
2244
+ */
2245
+ LZ4_FORCE_INLINE
2246
+ int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,
2247
+ size_t prefixSize, const void* dictStart, size_t dictSize)
2248
+ {
2249
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2250
+ endOnInputSize, decode_full_block, usingExtDict,
2251
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
2252
+ }
2253
+
2254
+ LZ4_FORCE_INLINE
2255
+ int LZ4_decompress_fast_doubleDict(const char* source, char* dest, int originalSize,
2256
+ size_t prefixSize, const void* dictStart, size_t dictSize)
2257
+ {
2258
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
2259
+ endOnOutputSize, decode_full_block, usingExtDict,
2260
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
2261
+ }
1285
2262
 
1286
2263
  /*===== streaming decompression functions =====*/
1287
2264
 
1288
2265
  LZ4_streamDecode_t* LZ4_createStreamDecode(void)
1289
2266
  {
1290
- LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
2267
+ LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
2268
+ LZ4_STATIC_ASSERT(LZ4_STREAMDECODESIZE >= sizeof(LZ4_streamDecode_t_internal)); /* A compilation error here means LZ4_STREAMDECODESIZE is not large enough */
1291
2269
  return lz4s;
1292
2270
  }
1293
2271
 
1294
2272
  int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
1295
2273
  {
1296
- if (!LZ4_stream) return 0; /* support free on NULL */
2274
+ if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
1297
2275
  FREEMEM(LZ4_stream);
1298
2276
  return 0;
1299
2277
  }
1300
2278
 
1301
- /*!
1302
- * LZ4_setStreamDecode() :
1303
- * Use this function to instruct where to find the dictionary.
1304
- * This function is not necessary if previous data is still available where it was decoded.
1305
- * Loading a size of 0 is allowed (same effect as no dictionary).
1306
- * Return : 1 if OK, 0 if error
2279
+ /*! LZ4_setStreamDecode() :
2280
+ * Use this function to instruct where to find the dictionary.
2281
+ * This function is not necessary if previous data is still available where it was decoded.
2282
+ * Loading a size of 0 is allowed (same effect as no dictionary).
2283
+ * @return : 1 if OK, 0 if error
1307
2284
  */
1308
2285
  int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
1309
2286
  {
@@ -1315,6 +2292,25 @@ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dicti
1315
2292
  return 1;
1316
2293
  }
1317
2294
 
2295
+ /*! LZ4_decoderRingBufferSize() :
2296
+ * when setting a ring buffer for streaming decompression (optional scenario),
2297
+ * provides the minimum size of this ring buffer
2298
+ * to be compatible with any source respecting maxBlockSize condition.
2299
+ * Note : in a ring buffer scenario,
2300
+ * blocks are presumed decompressed next to each other.
2301
+ * When not enough space remains for next block (remainingSize < maxBlockSize),
2302
+ * decoding resumes from beginning of ring buffer.
2303
+ * @return : minimum ring buffer size,
2304
+ * or 0 if there is an error (invalid maxBlockSize).
2305
+ */
2306
+ int LZ4_decoderRingBufferSize(int maxBlockSize)
2307
+ {
2308
+ if (maxBlockSize < 0) return 0;
2309
+ if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
2310
+ if (maxBlockSize < 16) maxBlockSize = 16;
2311
+ return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
2312
+ }
2313
+
1318
2314
  /*
1319
2315
  *_continue() :
1320
2316
  These decoding functions allow decompression of multiple blocks in "streaming" mode.
@@ -1322,52 +2318,75 @@ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dicti
1322
2318
  If it's not possible, save the relevant part of decoded data into a safe buffer,
1323
2319
  and indicate where it stands using LZ4_setStreamDecode()
1324
2320
  */
2321
+ LZ4_FORCE_O2
1325
2322
  int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
1326
2323
  {
1327
2324
  LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1328
2325
  int result;
1329
2326
 
1330
- if (lz4sd->prefixEnd == (BYTE*)dest) {
1331
- result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1332
- endOnInputSize, full, 0,
1333
- usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
2327
+ if (lz4sd->prefixSize == 0) {
2328
+ /* The first call, no dictionary yet. */
2329
+ assert(lz4sd->extDictSize == 0);
2330
+ result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
2331
+ if (result <= 0) return result;
2332
+ lz4sd->prefixSize = (size_t)result;
2333
+ lz4sd->prefixEnd = (BYTE*)dest + result;
2334
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2335
+ /* They're rolling the current segment. */
2336
+ if (lz4sd->prefixSize >= 64 KB - 1)
2337
+ result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
2338
+ else if (lz4sd->extDictSize == 0)
2339
+ result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,
2340
+ lz4sd->prefixSize);
2341
+ else
2342
+ result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,
2343
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1334
2344
  if (result <= 0) return result;
1335
- lz4sd->prefixSize += result;
2345
+ lz4sd->prefixSize += (size_t)result;
1336
2346
  lz4sd->prefixEnd += result;
1337
2347
  } else {
2348
+ /* The buffer wraps around, or they're switching to another buffer. */
1338
2349
  lz4sd->extDictSize = lz4sd->prefixSize;
1339
2350
  lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1340
- result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1341
- endOnInputSize, full, 0,
1342
- usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
2351
+ result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,
2352
+ lz4sd->externalDict, lz4sd->extDictSize);
1343
2353
  if (result <= 0) return result;
1344
- lz4sd->prefixSize = result;
2354
+ lz4sd->prefixSize = (size_t)result;
1345
2355
  lz4sd->prefixEnd = (BYTE*)dest + result;
1346
2356
  }
1347
2357
 
1348
2358
  return result;
1349
2359
  }
1350
2360
 
2361
+ LZ4_FORCE_O2
1351
2362
  int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
1352
2363
  {
1353
2364
  LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1354
2365
  int result;
2366
+ assert(originalSize >= 0);
1355
2367
 
1356
- if (lz4sd->prefixEnd == (BYTE*)dest) {
1357
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
1358
- endOnOutputSize, full, 0,
1359
- usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
2368
+ if (lz4sd->prefixSize == 0) {
2369
+ assert(lz4sd->extDictSize == 0);
2370
+ result = LZ4_decompress_fast(source, dest, originalSize);
1360
2371
  if (result <= 0) return result;
1361
- lz4sd->prefixSize += originalSize;
2372
+ lz4sd->prefixSize = (size_t)originalSize;
2373
+ lz4sd->prefixEnd = (BYTE*)dest + originalSize;
2374
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2375
+ if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0)
2376
+ result = LZ4_decompress_fast(source, dest, originalSize);
2377
+ else
2378
+ result = LZ4_decompress_fast_doubleDict(source, dest, originalSize,
2379
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
2380
+ if (result <= 0) return result;
2381
+ lz4sd->prefixSize += (size_t)originalSize;
1362
2382
  lz4sd->prefixEnd += originalSize;
1363
2383
  } else {
1364
2384
  lz4sd->extDictSize = lz4sd->prefixSize;
1365
2385
  lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1366
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
1367
- endOnOutputSize, full, 0,
1368
- usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
2386
+ result = LZ4_decompress_fast_extDict(source, dest, originalSize,
2387
+ lz4sd->externalDict, lz4sd->extDictSize);
1369
2388
  if (result <= 0) return result;
1370
- lz4sd->prefixSize = originalSize;
2389
+ lz4sd->prefixSize = (size_t)originalSize;
1371
2390
  lz4sd->prefixEnd = (BYTE*)dest + originalSize;
1372
2391
  }
1373
2392
 
@@ -1382,32 +2401,27 @@ Advanced decoding functions :
1382
2401
  the dictionary must be explicitly provided within parameters
1383
2402
  */
1384
2403
 
1385
- FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
2404
+ int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1386
2405
  {
1387
2406
  if (dictSize==0)
1388
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
2407
+ return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
1389
2408
  if (dictStart+dictSize == dest) {
1390
- if (dictSize >= (int)(64 KB - 1))
1391
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
1392
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
2409
+ if (dictSize >= 64 KB - 1) {
2410
+ return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
2411
+ }
2412
+ assert(dictSize >= 0);
2413
+ return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
1393
2414
  }
1394
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1395
- }
1396
-
1397
- int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1398
- {
1399
- return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
2415
+ assert(dictSize >= 0);
2416
+ return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
1400
2417
  }
1401
2418
 
1402
2419
  int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
1403
2420
  {
1404
- return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
1405
- }
1406
-
1407
- /* debug function */
1408
- int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1409
- {
1410
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2421
+ if (dictSize==0 || dictStart+dictSize == dest)
2422
+ return LZ4_decompress_fast(source, dest, originalSize);
2423
+ assert(dictSize >= 0);
2424
+ return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
1411
2425
  }
1412
2426
 
1413
2427
 
@@ -1415,64 +2429,67 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compres
1415
2429
  * Obsolete Functions
1416
2430
  ***************************************************/
1417
2431
  /* obsolete compression functions */
1418
- int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
1419
- int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
1420
- int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
1421
- int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
1422
- int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
1423
- int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
2432
+ int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
2433
+ {
2434
+ return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
2435
+ }
2436
+ int LZ4_compress(const char* src, char* dest, int srcSize)
2437
+ {
2438
+ return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
2439
+ }
2440
+ int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
2441
+ {
2442
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
2443
+ }
2444
+ int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
2445
+ {
2446
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);
2447
+ }
2448
+ int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)
2449
+ {
2450
+ return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);
2451
+ }
2452
+ int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)
2453
+ {
2454
+ return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);
2455
+ }
1424
2456
 
1425
2457
  /*
1426
- These function names are deprecated and should no longer be used.
2458
+ These decompression functions are deprecated and should no longer be used.
1427
2459
  They are only provided here for compatibility with older user programs.
1428
2460
  - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
1429
2461
  - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
1430
2462
  */
1431
- int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
1432
- int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
1433
-
2463
+ int LZ4_uncompress (const char* source, char* dest, int outputSize)
2464
+ {
2465
+ return LZ4_decompress_fast(source, dest, outputSize);
2466
+ }
2467
+ int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)
2468
+ {
2469
+ return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
2470
+ }
1434
2471
 
1435
2472
  /* Obsolete Streaming functions */
1436
2473
 
1437
- int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
1438
-
1439
- static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base)
1440
- {
1441
- MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t));
1442
- lz4ds->internal_donotuse.bufferStart = base;
1443
- }
2474
+ int LZ4_sizeofStreamState(void) { return LZ4_STREAMSIZE; }
1444
2475
 
1445
2476
  int LZ4_resetStreamState(void* state, char* inputBuffer)
1446
2477
  {
1447
- if ((((uptrval)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
1448
- LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer);
2478
+ (void)inputBuffer;
2479
+ LZ4_resetStream((LZ4_stream_t*)state);
1449
2480
  return 0;
1450
2481
  }
1451
2482
 
1452
2483
  void* LZ4_create (char* inputBuffer)
1453
2484
  {
1454
- LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t));
1455
- LZ4_init (lz4ds, (BYTE*)inputBuffer);
1456
- return lz4ds;
2485
+ (void)inputBuffer;
2486
+ return LZ4_createStream();
1457
2487
  }
1458
2488
 
1459
- char* LZ4_slideInputBuffer (void* LZ4_Data)
1460
- {
1461
- LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse;
1462
- int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
1463
- return (char*)(ctx->bufferStart + dictSize);
1464
- }
1465
-
1466
- /* Obsolete streaming decompression functions */
1467
-
1468
- int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
1469
- {
1470
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1471
- }
1472
-
1473
- int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
2489
+ char* LZ4_slideInputBuffer (void* state)
1474
2490
  {
1475
- return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
2491
+ /* avoid const char * -> char * conversion warning */
2492
+ return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
1476
2493
  }
1477
2494
 
1478
2495
  #endif /* LZ4_COMMONDEFS_ONLY */