zstd-native-ruby 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (651) hide show
  1. checksums.yaml +7 -0
  2. data/CLAUDE.md +101 -0
  3. data/LICENSE +19 -0
  4. data/README.md +47 -0
  5. data/ext/zstd_ruby/extconf.rb +35 -0
  6. data/ext/zstd_ruby/zstd_ruby.c +221 -0
  7. data/lib/zstd_ruby.rb +68 -0
  8. data/vendor/zstd/CHANGELOG +863 -0
  9. data/vendor/zstd/CMakeLists.txt +11 -0
  10. data/vendor/zstd/CODE_OF_CONDUCT.md +5 -0
  11. data/vendor/zstd/CONTRIBUTING.md +494 -0
  12. data/vendor/zstd/COPYING +339 -0
  13. data/vendor/zstd/LICENSE +30 -0
  14. data/vendor/zstd/Makefile +470 -0
  15. data/vendor/zstd/Package.swift +36 -0
  16. data/vendor/zstd/README.md +244 -0
  17. data/vendor/zstd/SECURITY.md +15 -0
  18. data/vendor/zstd/TESTING.md +43 -0
  19. data/vendor/zstd/build/LICENSE +0 -0
  20. data/vendor/zstd/build/README.md +56 -0
  21. data/vendor/zstd/build/VS2008/fullbench/fullbench.vcproj +549 -0
  22. data/vendor/zstd/build/VS2008/fuzzer/fuzzer.vcproj +585 -0
  23. data/vendor/zstd/build/VS2008/zstd/zstd.vcproj +677 -0
  24. data/vendor/zstd/build/VS2008/zstd.sln +56 -0
  25. data/vendor/zstd/build/VS2008/zstdlib/zstdlib.vcproj +635 -0
  26. data/vendor/zstd/build/VS2010/CompileAsCpp.props +8 -0
  27. data/vendor/zstd/build/VS2010/datagen/datagen.vcxproj +170 -0
  28. data/vendor/zstd/build/VS2010/fullbench/fullbench.vcxproj +220 -0
  29. data/vendor/zstd/build/VS2010/fuzzer/fuzzer.vcxproj +224 -0
  30. data/vendor/zstd/build/VS2010/libzstd/libzstd.vcxproj +243 -0
  31. data/vendor/zstd/build/VS2010/libzstd-dll/libzstd-dll.rc +51 -0
  32. data/vendor/zstd/build/VS2010/libzstd-dll/libzstd-dll.vcxproj +250 -0
  33. data/vendor/zstd/build/VS2010/zstd/zstd.rc +51 -0
  34. data/vendor/zstd/build/VS2010/zstd/zstd.vcxproj +266 -0
  35. data/vendor/zstd/build/VS2010/zstd.sln +84 -0
  36. data/vendor/zstd/build/VS_scripts/README.md +64 -0
  37. data/vendor/zstd/build/VS_scripts/build.VS2010.cmd +7 -0
  38. data/vendor/zstd/build/VS_scripts/build.VS2012.cmd +6 -0
  39. data/vendor/zstd/build/VS_scripts/build.VS2013.cmd +7 -0
  40. data/vendor/zstd/build/VS_scripts/build.VS2015.cmd +7 -0
  41. data/vendor/zstd/build/VS_scripts/build.VS2017.cmd +7 -0
  42. data/vendor/zstd/build/VS_scripts/build.VS2017Community.cmd +7 -0
  43. data/vendor/zstd/build/VS_scripts/build.VS2017Enterprise.cmd +7 -0
  44. data/vendor/zstd/build/VS_scripts/build.VS2017Professional.cmd +7 -0
  45. data/vendor/zstd/build/VS_scripts/build.VSPreview.cmd +7 -0
  46. data/vendor/zstd/build/VS_scripts/build.generic.cmd +67 -0
  47. data/vendor/zstd/build/cmake/CMakeLists.txt +81 -0
  48. data/vendor/zstd/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake +138 -0
  49. data/vendor/zstd/build/cmake/CMakeModules/FindLibLZ4.cmake +49 -0
  50. data/vendor/zstd/build/cmake/CMakeModules/GetZstdLibraryVersion.cmake +10 -0
  51. data/vendor/zstd/build/cmake/CMakeModules/JoinPaths.cmake +23 -0
  52. data/vendor/zstd/build/cmake/CMakeModules/ZstdBuild.cmake +42 -0
  53. data/vendor/zstd/build/cmake/CMakeModules/ZstdDependencies.cmake +30 -0
  54. data/vendor/zstd/build/cmake/CMakeModules/ZstdOptions.cmake +68 -0
  55. data/vendor/zstd/build/cmake/CMakeModules/ZstdPackage.cmake +42 -0
  56. data/vendor/zstd/build/cmake/CMakeModules/ZstdVersion.cmake +31 -0
  57. data/vendor/zstd/build/cmake/README.md +155 -0
  58. data/vendor/zstd/build/cmake/contrib/CMakeLists.txt +13 -0
  59. data/vendor/zstd/build/cmake/contrib/gen_html/CMakeLists.txt +30 -0
  60. data/vendor/zstd/build/cmake/contrib/pzstd/CMakeLists.txt +39 -0
  61. data/vendor/zstd/build/cmake/lib/CMakeLists.txt +298 -0
  62. data/vendor/zstd/build/cmake/lib/cmake_uninstall.cmake.in +22 -0
  63. data/vendor/zstd/build/cmake/programs/CMakeLists.txt +144 -0
  64. data/vendor/zstd/build/cmake/tests/CMakeLists.txt +118 -0
  65. data/vendor/zstd/build/cmake/zstdConfig.cmake.in +10 -0
  66. data/vendor/zstd/build/meson/GetZstdLibraryVersion.py +39 -0
  67. data/vendor/zstd/build/meson/InstallSymlink.py +55 -0
  68. data/vendor/zstd/build/meson/README.md +38 -0
  69. data/vendor/zstd/build/meson/contrib/gen_html/meson.build +30 -0
  70. data/vendor/zstd/build/meson/contrib/meson.build +12 -0
  71. data/vendor/zstd/build/meson/contrib/pzstd/meson.build +25 -0
  72. data/vendor/zstd/build/meson/lib/meson.build +175 -0
  73. data/vendor/zstd/build/meson/meson.build +155 -0
  74. data/vendor/zstd/build/meson/meson_options.txt +36 -0
  75. data/vendor/zstd/build/meson/programs/meson.build +124 -0
  76. data/vendor/zstd/build/meson/tests/meson.build +217 -0
  77. data/vendor/zstd/build/meson/tests/valgrindTest.py +90 -0
  78. data/vendor/zstd/build/single_file_libs/README.md +33 -0
  79. data/vendor/zstd/build/single_file_libs/build_decoder_test.sh +91 -0
  80. data/vendor/zstd/build/single_file_libs/build_library_test.sh +98 -0
  81. data/vendor/zstd/build/single_file_libs/combine.py +234 -0
  82. data/vendor/zstd/build/single_file_libs/combine.sh +249 -0
  83. data/vendor/zstd/build/single_file_libs/create_single_file_decoder.sh +19 -0
  84. data/vendor/zstd/build/single_file_libs/create_single_file_library.sh +19 -0
  85. data/vendor/zstd/build/single_file_libs/examples/README.md +11 -0
  86. data/vendor/zstd/build/single_file_libs/examples/emscripten.c +340 -0
  87. data/vendor/zstd/build/single_file_libs/examples/roundtrip.c +83 -0
  88. data/vendor/zstd/build/single_file_libs/examples/shell.html +31 -0
  89. data/vendor/zstd/build/single_file_libs/examples/simple.c +75 -0
  90. data/vendor/zstd/build/single_file_libs/examples/testcard-dxt1.inl +2731 -0
  91. data/vendor/zstd/build/single_file_libs/examples/testcard-zstd.inl +261 -0
  92. data/vendor/zstd/build/single_file_libs/examples/testcard.png +0 -0
  93. data/vendor/zstd/build/single_file_libs/zstd-in.c +91 -0
  94. data/vendor/zstd/build/single_file_libs/zstddeclib-in.c +62 -0
  95. data/vendor/zstd/contrib/VS2005/README.md +3 -0
  96. data/vendor/zstd/contrib/VS2005/fullbench/fullbench.vcproj +440 -0
  97. data/vendor/zstd/contrib/VS2005/fuzzer/fuzzer.vcproj +488 -0
  98. data/vendor/zstd/contrib/VS2005/zstd/zstd.vcproj +552 -0
  99. data/vendor/zstd/contrib/VS2005/zstd.sln +55 -0
  100. data/vendor/zstd/contrib/VS2005/zstdlib/zstdlib.vcproj +546 -0
  101. data/vendor/zstd/contrib/cleanTabs +2 -0
  102. data/vendor/zstd/contrib/diagnose_corruption/Makefile +35 -0
  103. data/vendor/zstd/contrib/diagnose_corruption/check_flipped_bits.c +400 -0
  104. data/vendor/zstd/contrib/docker/Dockerfile +20 -0
  105. data/vendor/zstd/contrib/docker/README.md +20 -0
  106. data/vendor/zstd/contrib/externalSequenceProducer/Makefile +40 -0
  107. data/vendor/zstd/contrib/externalSequenceProducer/README.md +14 -0
  108. data/vendor/zstd/contrib/externalSequenceProducer/main.c +108 -0
  109. data/vendor/zstd/contrib/externalSequenceProducer/sequence_producer.c +80 -0
  110. data/vendor/zstd/contrib/externalSequenceProducer/sequence_producer.h +26 -0
  111. data/vendor/zstd/contrib/freestanding_lib/freestanding.py +774 -0
  112. data/vendor/zstd/contrib/gen_html/Makefile +51 -0
  113. data/vendor/zstd/contrib/gen_html/README.md +31 -0
  114. data/vendor/zstd/contrib/gen_html/gen-zstd-manual.sh +9 -0
  115. data/vendor/zstd/contrib/gen_html/gen_html.cpp +225 -0
  116. data/vendor/zstd/contrib/largeNbDicts/Makefile +58 -0
  117. data/vendor/zstd/contrib/largeNbDicts/README.md +33 -0
  118. data/vendor/zstd/contrib/largeNbDicts/largeNbDicts.c +1087 -0
  119. data/vendor/zstd/contrib/linux-kernel/Makefile +108 -0
  120. data/vendor/zstd/contrib/linux-kernel/README.md +14 -0
  121. data/vendor/zstd/contrib/linux-kernel/btrfs-benchmark.sh +104 -0
  122. data/vendor/zstd/contrib/linux-kernel/btrfs-extract-benchmark.sh +99 -0
  123. data/vendor/zstd/contrib/linux-kernel/decompress_sources.h +34 -0
  124. data/vendor/zstd/contrib/linux-kernel/linux.mk +44 -0
  125. data/vendor/zstd/contrib/linux-kernel/linux_zstd.h +691 -0
  126. data/vendor/zstd/contrib/linux-kernel/mem.h +262 -0
  127. data/vendor/zstd/contrib/linux-kernel/squashfs-benchmark.sh +39 -0
  128. data/vendor/zstd/contrib/linux-kernel/test/Makefile +49 -0
  129. data/vendor/zstd/contrib/linux-kernel/test/include/linux/compiler.h +23 -0
  130. data/vendor/zstd/contrib/linux-kernel/test/include/linux/errno.h +15 -0
  131. data/vendor/zstd/contrib/linux-kernel/test/include/linux/kernel.h +19 -0
  132. data/vendor/zstd/contrib/linux-kernel/test/include/linux/limits.h +15 -0
  133. data/vendor/zstd/contrib/linux-kernel/test/include/linux/math64.h +15 -0
  134. data/vendor/zstd/contrib/linux-kernel/test/include/linux/module.h +20 -0
  135. data/vendor/zstd/contrib/linux-kernel/test/include/linux/printk.h +15 -0
  136. data/vendor/zstd/contrib/linux-kernel/test/include/linux/stddef.h +15 -0
  137. data/vendor/zstd/contrib/linux-kernel/test/include/linux/swab.h +16 -0
  138. data/vendor/zstd/contrib/linux-kernel/test/include/linux/types.h +16 -0
  139. data/vendor/zstd/contrib/linux-kernel/test/include/linux/unaligned.h +187 -0
  140. data/vendor/zstd/contrib/linux-kernel/test/include/linux/xxhash.h +745 -0
  141. data/vendor/zstd/contrib/linux-kernel/test/macro-test.sh +44 -0
  142. data/vendor/zstd/contrib/linux-kernel/test/static_test.c +52 -0
  143. data/vendor/zstd/contrib/linux-kernel/test/test.c +229 -0
  144. data/vendor/zstd/contrib/linux-kernel/zstd_common_module.c +29 -0
  145. data/vendor/zstd/contrib/linux-kernel/zstd_compress_module.c +286 -0
  146. data/vendor/zstd/contrib/linux-kernel/zstd_decompress_module.c +141 -0
  147. data/vendor/zstd/contrib/linux-kernel/zstd_deps.h +121 -0
  148. data/vendor/zstd/contrib/match_finders/README.md +42 -0
  149. data/vendor/zstd/contrib/match_finders/zstd_edist.c +558 -0
  150. data/vendor/zstd/contrib/match_finders/zstd_edist.h +70 -0
  151. data/vendor/zstd/contrib/premake/premake4.lua +6 -0
  152. data/vendor/zstd/contrib/premake/zstd.lua +81 -0
  153. data/vendor/zstd/contrib/pzstd/BUCK +72 -0
  154. data/vendor/zstd/contrib/pzstd/ErrorHolder.h +54 -0
  155. data/vendor/zstd/contrib/pzstd/Logging.h +73 -0
  156. data/vendor/zstd/contrib/pzstd/Makefile +265 -0
  157. data/vendor/zstd/contrib/pzstd/Options.cpp +424 -0
  158. data/vendor/zstd/contrib/pzstd/Options.h +71 -0
  159. data/vendor/zstd/contrib/pzstd/Pzstd.cpp +626 -0
  160. data/vendor/zstd/contrib/pzstd/Pzstd.h +153 -0
  161. data/vendor/zstd/contrib/pzstd/README.md +56 -0
  162. data/vendor/zstd/contrib/pzstd/SkippableFrame.cpp +30 -0
  163. data/vendor/zstd/contrib/pzstd/SkippableFrame.h +64 -0
  164. data/vendor/zstd/contrib/pzstd/images/Cspeed.png +0 -0
  165. data/vendor/zstd/contrib/pzstd/images/Dspeed.png +0 -0
  166. data/vendor/zstd/contrib/pzstd/main.cpp +27 -0
  167. data/vendor/zstd/contrib/pzstd/test/BUCK +37 -0
  168. data/vendor/zstd/contrib/pzstd/test/OptionsTest.cpp +536 -0
  169. data/vendor/zstd/contrib/pzstd/test/PzstdTest.cpp +147 -0
  170. data/vendor/zstd/contrib/pzstd/test/RoundTrip.h +86 -0
  171. data/vendor/zstd/contrib/pzstd/test/RoundTripTest.cpp +84 -0
  172. data/vendor/zstd/contrib/pzstd/utils/BUCK +75 -0
  173. data/vendor/zstd/contrib/pzstd/utils/Buffer.h +99 -0
  174. data/vendor/zstd/contrib/pzstd/utils/FileSystem.h +96 -0
  175. data/vendor/zstd/contrib/pzstd/utils/Likely.h +28 -0
  176. data/vendor/zstd/contrib/pzstd/utils/Portability.h +16 -0
  177. data/vendor/zstd/contrib/pzstd/utils/Range.h +133 -0
  178. data/vendor/zstd/contrib/pzstd/utils/ResourcePool.h +96 -0
  179. data/vendor/zstd/contrib/pzstd/utils/ScopeGuard.h +50 -0
  180. data/vendor/zstd/contrib/pzstd/utils/ThreadPool.h +58 -0
  181. data/vendor/zstd/contrib/pzstd/utils/WorkQueue.h +182 -0
  182. data/vendor/zstd/contrib/pzstd/utils/test/BUCK +35 -0
  183. data/vendor/zstd/contrib/pzstd/utils/test/BufferTest.cpp +89 -0
  184. data/vendor/zstd/contrib/pzstd/utils/test/RangeTest.cpp +82 -0
  185. data/vendor/zstd/contrib/pzstd/utils/test/ResourcePoolTest.cpp +72 -0
  186. data/vendor/zstd/contrib/pzstd/utils/test/ScopeGuardTest.cpp +28 -0
  187. data/vendor/zstd/contrib/pzstd/utils/test/ThreadPoolTest.cpp +71 -0
  188. data/vendor/zstd/contrib/pzstd/utils/test/WorkQueueTest.cpp +282 -0
  189. data/vendor/zstd/contrib/recovery/Makefile +35 -0
  190. data/vendor/zstd/contrib/recovery/recover_directory.c +152 -0
  191. data/vendor/zstd/contrib/seekable_format/README.md +42 -0
  192. data/vendor/zstd/contrib/seekable_format/examples/Makefile +55 -0
  193. data/vendor/zstd/contrib/seekable_format/examples/parallel_compression.c +254 -0
  194. data/vendor/zstd/contrib/seekable_format/examples/parallel_processing.c +191 -0
  195. data/vendor/zstd/contrib/seekable_format/examples/seekable_compression.c +136 -0
  196. data/vendor/zstd/contrib/seekable_format/examples/seekable_decompression.c +141 -0
  197. data/vendor/zstd/contrib/seekable_format/examples/seekable_decompression_mem.c +147 -0
  198. data/vendor/zstd/contrib/seekable_format/tests/Makefile +58 -0
  199. data/vendor/zstd/contrib/seekable_format/tests/seekable_tests.c +375 -0
  200. data/vendor/zstd/contrib/seekable_format/zstd_seekable.h +226 -0
  201. data/vendor/zstd/contrib/seekable_format/zstd_seekable_compression_format.md +116 -0
  202. data/vendor/zstd/contrib/seekable_format/zstdseek_compress.c +365 -0
  203. data/vendor/zstd/contrib/seekable_format/zstdseek_decompress.c +600 -0
  204. data/vendor/zstd/contrib/seqBench/Makefile +58 -0
  205. data/vendor/zstd/contrib/seqBench/seqBench.c +53 -0
  206. data/vendor/zstd/contrib/snap/snapcraft.yaml +28 -0
  207. data/vendor/zstd/doc/README.md +26 -0
  208. data/vendor/zstd/doc/decompressor_errata.md +148 -0
  209. data/vendor/zstd/doc/decompressor_permissive.md +80 -0
  210. data/vendor/zstd/doc/educational_decoder/Makefile +62 -0
  211. data/vendor/zstd/doc/educational_decoder/README.md +36 -0
  212. data/vendor/zstd/doc/educational_decoder/harness.c +119 -0
  213. data/vendor/zstd/doc/educational_decoder/zstd_decompress.c +2323 -0
  214. data/vendor/zstd/doc/educational_decoder/zstd_decompress.h +61 -0
  215. data/vendor/zstd/doc/images/CSpeed2.png +0 -0
  216. data/vendor/zstd/doc/images/DCspeed5.png +0 -0
  217. data/vendor/zstd/doc/images/DSpeed3.png +0 -0
  218. data/vendor/zstd/doc/images/cdict_v136.png +0 -0
  219. data/vendor/zstd/doc/images/dict-cr.png +0 -0
  220. data/vendor/zstd/doc/images/dict-cs.png +0 -0
  221. data/vendor/zstd/doc/images/dict-ds.png +0 -0
  222. data/vendor/zstd/doc/images/zstd_cdict_v1_3_5.png +0 -0
  223. data/vendor/zstd/doc/images/zstd_logo86.png +0 -0
  224. data/vendor/zstd/doc/zstd_compression_format.md +1772 -0
  225. data/vendor/zstd/doc/zstd_manual.html +2244 -0
  226. data/vendor/zstd/examples/Makefile +93 -0
  227. data/vendor/zstd/examples/README.md +46 -0
  228. data/vendor/zstd/examples/common.h +246 -0
  229. data/vendor/zstd/examples/dictionary_compression.c +107 -0
  230. data/vendor/zstd/examples/dictionary_decompression.c +99 -0
  231. data/vendor/zstd/examples/multiple_simple_compression.c +116 -0
  232. data/vendor/zstd/examples/multiple_streaming_compression.c +133 -0
  233. data/vendor/zstd/examples/simple_compression.c +68 -0
  234. data/vendor/zstd/examples/simple_decompression.c +65 -0
  235. data/vendor/zstd/examples/streaming_compression.c +146 -0
  236. data/vendor/zstd/examples/streaming_compression_thread_pool.c +180 -0
  237. data/vendor/zstd/examples/streaming_decompression.c +100 -0
  238. data/vendor/zstd/examples/streaming_memory_usage.c +137 -0
  239. data/vendor/zstd/lib/BUCK +232 -0
  240. data/vendor/zstd/lib/Makefile +389 -0
  241. data/vendor/zstd/lib/README.md +267 -0
  242. data/vendor/zstd/lib/common/allocations.h +55 -0
  243. data/vendor/zstd/lib/common/bits.h +205 -0
  244. data/vendor/zstd/lib/common/bitstream.h +454 -0
  245. data/vendor/zstd/lib/common/compiler.h +482 -0
  246. data/vendor/zstd/lib/common/cpu.h +249 -0
  247. data/vendor/zstd/lib/common/debug.c +30 -0
  248. data/vendor/zstd/lib/common/debug.h +107 -0
  249. data/vendor/zstd/lib/common/debug.o +0 -0
  250. data/vendor/zstd/lib/common/entropy_common.c +340 -0
  251. data/vendor/zstd/lib/common/entropy_common.o +0 -0
  252. data/vendor/zstd/lib/common/error_private.c +64 -0
  253. data/vendor/zstd/lib/common/error_private.h +158 -0
  254. data/vendor/zstd/lib/common/error_private.o +0 -0
  255. data/vendor/zstd/lib/common/fse.h +625 -0
  256. data/vendor/zstd/lib/common/fse_decompress.c +315 -0
  257. data/vendor/zstd/lib/common/fse_decompress.o +0 -0
  258. data/vendor/zstd/lib/common/huf.h +277 -0
  259. data/vendor/zstd/lib/common/mem.h +422 -0
  260. data/vendor/zstd/lib/common/pool.c +371 -0
  261. data/vendor/zstd/lib/common/pool.h +81 -0
  262. data/vendor/zstd/lib/common/pool.o +0 -0
  263. data/vendor/zstd/lib/common/portability_macros.h +190 -0
  264. data/vendor/zstd/lib/common/threading.c +196 -0
  265. data/vendor/zstd/lib/common/threading.h +142 -0
  266. data/vendor/zstd/lib/common/threading.o +0 -0
  267. data/vendor/zstd/lib/common/xxhash.c +18 -0
  268. data/vendor/zstd/lib/common/xxhash.h +7094 -0
  269. data/vendor/zstd/lib/common/xxhash.o +0 -0
  270. data/vendor/zstd/lib/common/zstd_common.c +57 -0
  271. data/vendor/zstd/lib/common/zstd_common.o +0 -0
  272. data/vendor/zstd/lib/common/zstd_deps.h +123 -0
  273. data/vendor/zstd/lib/common/zstd_internal.h +326 -0
  274. data/vendor/zstd/lib/common/zstd_trace.h +156 -0
  275. data/vendor/zstd/lib/compress/clevels.h +134 -0
  276. data/vendor/zstd/lib/compress/fse_compress.c +625 -0
  277. data/vendor/zstd/lib/compress/fse_compress.o +0 -0
  278. data/vendor/zstd/lib/compress/hist.c +446 -0
  279. data/vendor/zstd/lib/compress/hist.h +86 -0
  280. data/vendor/zstd/lib/compress/hist.o +0 -0
  281. data/vendor/zstd/lib/compress/huf_compress.c +1465 -0
  282. data/vendor/zstd/lib/compress/huf_compress.o +0 -0
  283. data/vendor/zstd/lib/compress/zstd_compress.c +8362 -0
  284. data/vendor/zstd/lib/compress/zstd_compress.o +0 -0
  285. data/vendor/zstd/lib/compress/zstd_compress_internal.h +1636 -0
  286. data/vendor/zstd/lib/compress/zstd_compress_literals.c +235 -0
  287. data/vendor/zstd/lib/compress/zstd_compress_literals.h +39 -0
  288. data/vendor/zstd/lib/compress/zstd_compress_literals.o +0 -0
  289. data/vendor/zstd/lib/compress/zstd_compress_sequences.c +442 -0
  290. data/vendor/zstd/lib/compress/zstd_compress_sequences.h +55 -0
  291. data/vendor/zstd/lib/compress/zstd_compress_sequences.o +0 -0
  292. data/vendor/zstd/lib/compress/zstd_compress_superblock.c +688 -0
  293. data/vendor/zstd/lib/compress/zstd_compress_superblock.h +32 -0
  294. data/vendor/zstd/lib/compress/zstd_compress_superblock.o +0 -0
  295. data/vendor/zstd/lib/compress/zstd_cwksp.h +765 -0
  296. data/vendor/zstd/lib/compress/zstd_double_fast.c +778 -0
  297. data/vendor/zstd/lib/compress/zstd_double_fast.h +42 -0
  298. data/vendor/zstd/lib/compress/zstd_double_fast.o +0 -0
  299. data/vendor/zstd/lib/compress/zstd_fast.c +985 -0
  300. data/vendor/zstd/lib/compress/zstd_fast.h +30 -0
  301. data/vendor/zstd/lib/compress/zstd_fast.o +0 -0
  302. data/vendor/zstd/lib/compress/zstd_lazy.c +2243 -0
  303. data/vendor/zstd/lib/compress/zstd_lazy.h +193 -0
  304. data/vendor/zstd/lib/compress/zstd_lazy.o +0 -0
  305. data/vendor/zstd/lib/compress/zstd_ldm.c +745 -0
  306. data/vendor/zstd/lib/compress/zstd_ldm.h +109 -0
  307. data/vendor/zstd/lib/compress/zstd_ldm.o +0 -0
  308. data/vendor/zstd/lib/compress/zstd_ldm_geartab.h +106 -0
  309. data/vendor/zstd/lib/compress/zstd_opt.c +1572 -0
  310. data/vendor/zstd/lib/compress/zstd_opt.h +72 -0
  311. data/vendor/zstd/lib/compress/zstd_opt.o +0 -0
  312. data/vendor/zstd/lib/compress/zstd_preSplit.c +238 -0
  313. data/vendor/zstd/lib/compress/zstd_preSplit.h +33 -0
  314. data/vendor/zstd/lib/compress/zstd_preSplit.o +0 -0
  315. data/vendor/zstd/lib/compress/zstdmt_compress.c +1924 -0
  316. data/vendor/zstd/lib/compress/zstdmt_compress.h +102 -0
  317. data/vendor/zstd/lib/compress/zstdmt_compress.o +0 -0
  318. data/vendor/zstd/lib/decompress/huf_decompress.c +1953 -0
  319. data/vendor/zstd/lib/decompress/huf_decompress.o +0 -0
  320. data/vendor/zstd/lib/decompress/huf_decompress_amd64.S +766 -0
  321. data/vendor/zstd/lib/decompress/zstd_ddict.c +244 -0
  322. data/vendor/zstd/lib/decompress/zstd_ddict.h +44 -0
  323. data/vendor/zstd/lib/decompress/zstd_ddict.o +0 -0
  324. data/vendor/zstd/lib/decompress/zstd_decompress.c +2410 -0
  325. data/vendor/zstd/lib/decompress/zstd_decompress.o +0 -0
  326. data/vendor/zstd/lib/decompress/zstd_decompress_block.c +2311 -0
  327. data/vendor/zstd/lib/decompress/zstd_decompress_block.h +73 -0
  328. data/vendor/zstd/lib/decompress/zstd_decompress_block.o +0 -0
  329. data/vendor/zstd/lib/decompress/zstd_decompress_internal.h +240 -0
  330. data/vendor/zstd/lib/deprecated/zbuff.h +214 -0
  331. data/vendor/zstd/lib/deprecated/zbuff_common.c +26 -0
  332. data/vendor/zstd/lib/deprecated/zbuff_compress.c +167 -0
  333. data/vendor/zstd/lib/deprecated/zbuff_decompress.c +77 -0
  334. data/vendor/zstd/lib/dictBuilder/cover.c +1333 -0
  335. data/vendor/zstd/lib/dictBuilder/cover.h +152 -0
  336. data/vendor/zstd/lib/dictBuilder/divsufsort.c +1913 -0
  337. data/vendor/zstd/lib/dictBuilder/divsufsort.h +57 -0
  338. data/vendor/zstd/lib/dictBuilder/fastcover.c +765 -0
  339. data/vendor/zstd/lib/dictBuilder/zdict.c +1137 -0
  340. data/vendor/zstd/lib/dll/example/Makefile +48 -0
  341. data/vendor/zstd/lib/dll/example/README.md +63 -0
  342. data/vendor/zstd/lib/dll/example/build_package.bat +55 -0
  343. data/vendor/zstd/lib/dll/example/fullbench-dll.sln +25 -0
  344. data/vendor/zstd/lib/dll/example/fullbench-dll.vcxproj +181 -0
  345. data/vendor/zstd/lib/install_oses.mk +17 -0
  346. data/vendor/zstd/lib/legacy/zstd_legacy.h +452 -0
  347. data/vendor/zstd/lib/legacy/zstd_v01.c +2128 -0
  348. data/vendor/zstd/lib/legacy/zstd_v01.h +94 -0
  349. data/vendor/zstd/lib/legacy/zstd_v02.c +3465 -0
  350. data/vendor/zstd/lib/legacy/zstd_v02.h +93 -0
  351. data/vendor/zstd/lib/legacy/zstd_v03.c +3105 -0
  352. data/vendor/zstd/lib/legacy/zstd_v03.h +93 -0
  353. data/vendor/zstd/lib/legacy/zstd_v04.c +3598 -0
  354. data/vendor/zstd/lib/legacy/zstd_v04.h +142 -0
  355. data/vendor/zstd/lib/legacy/zstd_v05.c +4005 -0
  356. data/vendor/zstd/lib/legacy/zstd_v05.h +162 -0
  357. data/vendor/zstd/lib/legacy/zstd_v06.c +4110 -0
  358. data/vendor/zstd/lib/legacy/zstd_v06.h +172 -0
  359. data/vendor/zstd/lib/legacy/zstd_v07.c +4490 -0
  360. data/vendor/zstd/lib/legacy/zstd_v07.h +187 -0
  361. data/vendor/zstd/lib/libzstd.mk +238 -0
  362. data/vendor/zstd/lib/libzstd.pc.in +17 -0
  363. data/vendor/zstd/lib/module.modulemap +35 -0
  364. data/vendor/zstd/lib/zdict.h +481 -0
  365. data/vendor/zstd/lib/zstd.h +3209 -0
  366. data/vendor/zstd/lib/zstd_errors.h +107 -0
  367. data/vendor/zstd/programs/BUCK +44 -0
  368. data/vendor/zstd/programs/Makefile +445 -0
  369. data/vendor/zstd/programs/README.md +344 -0
  370. data/vendor/zstd/programs/benchfn.c +256 -0
  371. data/vendor/zstd/programs/benchfn.h +173 -0
  372. data/vendor/zstd/programs/benchzstd.c +1270 -0
  373. data/vendor/zstd/programs/benchzstd.h +191 -0
  374. data/vendor/zstd/programs/datagen.c +186 -0
  375. data/vendor/zstd/programs/datagen.h +38 -0
  376. data/vendor/zstd/programs/dibio.c +447 -0
  377. data/vendor/zstd/programs/dibio.h +39 -0
  378. data/vendor/zstd/programs/fileio.c +3717 -0
  379. data/vendor/zstd/programs/fileio.h +171 -0
  380. data/vendor/zstd/programs/fileio_asyncio.c +663 -0
  381. data/vendor/zstd/programs/fileio_asyncio.h +195 -0
  382. data/vendor/zstd/programs/fileio_common.h +121 -0
  383. data/vendor/zstd/programs/fileio_types.h +86 -0
  384. data/vendor/zstd/programs/lorem.c +285 -0
  385. data/vendor/zstd/programs/lorem.h +32 -0
  386. data/vendor/zstd/programs/platform.h +217 -0
  387. data/vendor/zstd/programs/timefn.c +170 -0
  388. data/vendor/zstd/programs/timefn.h +59 -0
  389. data/vendor/zstd/programs/util.c +1731 -0
  390. data/vendor/zstd/programs/util.h +364 -0
  391. data/vendor/zstd/programs/windres/verrsrc.h +17 -0
  392. data/vendor/zstd/programs/windres/zstd.rc +51 -0
  393. data/vendor/zstd/programs/windres/zstd32.res +0 -0
  394. data/vendor/zstd/programs/windres/zstd64.res +0 -0
  395. data/vendor/zstd/programs/zstd.1 +580 -0
  396. data/vendor/zstd/programs/zstd.1.md +714 -0
  397. data/vendor/zstd/programs/zstdcli.c +1675 -0
  398. data/vendor/zstd/programs/zstdcli_trace.c +173 -0
  399. data/vendor/zstd/programs/zstdcli_trace.h +24 -0
  400. data/vendor/zstd/programs/zstdgrep +134 -0
  401. data/vendor/zstd/programs/zstdgrep.1 +26 -0
  402. data/vendor/zstd/programs/zstdgrep.1.md +30 -0
  403. data/vendor/zstd/programs/zstdless +8 -0
  404. data/vendor/zstd/programs/zstdless.1 +14 -0
  405. data/vendor/zstd/programs/zstdless.1.md +16 -0
  406. data/vendor/zstd/tests/DEPRECATED-test-zstd-speed.py +378 -0
  407. data/vendor/zstd/tests/Makefile +485 -0
  408. data/vendor/zstd/tests/README.md +184 -0
  409. data/vendor/zstd/tests/automated_benchmarking.py +326 -0
  410. data/vendor/zstd/tests/checkTag.c +65 -0
  411. data/vendor/zstd/tests/check_size.py +31 -0
  412. data/vendor/zstd/tests/cli-tests/README.md +258 -0
  413. data/vendor/zstd/tests/cli-tests/basic/args.sh +10 -0
  414. data/vendor/zstd/tests/cli-tests/basic/args.sh.exit +1 -0
  415. data/vendor/zstd/tests/cli-tests/basic/args.sh.stderr.glob +28 -0
  416. data/vendor/zstd/tests/cli-tests/basic/help.sh +10 -0
  417. data/vendor/zstd/tests/cli-tests/basic/help.sh.stdout.glob +34 -0
  418. data/vendor/zstd/tests/cli-tests/basic/memlimit.sh +40 -0
  419. data/vendor/zstd/tests/cli-tests/basic/memlimit.sh.stderr.exact +13 -0
  420. data/vendor/zstd/tests/cli-tests/basic/memlimit.sh.stdout.exact +13 -0
  421. data/vendor/zstd/tests/cli-tests/basic/output_dir.sh +7 -0
  422. data/vendor/zstd/tests/cli-tests/basic/output_dir.sh.stderr.exact +2 -0
  423. data/vendor/zstd/tests/cli-tests/basic/output_dir.sh.stdout.exact +2 -0
  424. data/vendor/zstd/tests/cli-tests/basic/version.sh +6 -0
  425. data/vendor/zstd/tests/cli-tests/basic/version.sh.stdout.glob +2 -0
  426. data/vendor/zstd/tests/cli-tests/bin/cmp_size +44 -0
  427. data/vendor/zstd/tests/cli-tests/bin/datagen +3 -0
  428. data/vendor/zstd/tests/cli-tests/bin/die +4 -0
  429. data/vendor/zstd/tests/cli-tests/bin/println +2 -0
  430. data/vendor/zstd/tests/cli-tests/bin/unzstd +1 -0
  431. data/vendor/zstd/tests/cli-tests/bin/zstd +9 -0
  432. data/vendor/zstd/tests/cli-tests/bin/zstdcat +1 -0
  433. data/vendor/zstd/tests/cli-tests/bin/zstdgrep +2 -0
  434. data/vendor/zstd/tests/cli-tests/bin/zstdless +2 -0
  435. data/vendor/zstd/tests/cli-tests/cltools/setup +6 -0
  436. data/vendor/zstd/tests/cli-tests/cltools/zstdgrep.sh +8 -0
  437. data/vendor/zstd/tests/cli-tests/cltools/zstdgrep.sh.exit +1 -0
  438. data/vendor/zstd/tests/cli-tests/cltools/zstdgrep.sh.stderr.exact +1 -0
  439. data/vendor/zstd/tests/cli-tests/cltools/zstdgrep.sh.stdout.glob +4 -0
  440. data/vendor/zstd/tests/cli-tests/cltools/zstdless.sh +10 -0
  441. data/vendor/zstd/tests/cli-tests/cltools/zstdless.sh.stderr.exact +2 -0
  442. data/vendor/zstd/tests/cli-tests/cltools/zstdless.sh.stdout.glob +5 -0
  443. data/vendor/zstd/tests/cli-tests/common/format.sh +19 -0
  444. data/vendor/zstd/tests/cli-tests/common/mtime.sh +13 -0
  445. data/vendor/zstd/tests/cli-tests/common/permissions.sh +18 -0
  446. data/vendor/zstd/tests/cli-tests/common/platform.sh +47 -0
  447. data/vendor/zstd/tests/cli-tests/compression/adapt.sh +14 -0
  448. data/vendor/zstd/tests/cli-tests/compression/basic.sh +36 -0
  449. data/vendor/zstd/tests/cli-tests/compression/compress-literals.sh +10 -0
  450. data/vendor/zstd/tests/cli-tests/compression/format.sh +16 -0
  451. data/vendor/zstd/tests/cli-tests/compression/golden.sh +16 -0
  452. data/vendor/zstd/tests/cli-tests/compression/gzip-compat.sh +17 -0
  453. data/vendor/zstd/tests/cli-tests/compression/levels.sh +75 -0
  454. data/vendor/zstd/tests/cli-tests/compression/levels.sh.stderr.exact +80 -0
  455. data/vendor/zstd/tests/cli-tests/compression/long-distance-matcher.sh +7 -0
  456. data/vendor/zstd/tests/cli-tests/compression/multi-threaded.sh +22 -0
  457. data/vendor/zstd/tests/cli-tests/compression/multi-threaded.sh.stderr.exact +21 -0
  458. data/vendor/zstd/tests/cli-tests/compression/multiple-files.sh +21 -0
  459. data/vendor/zstd/tests/cli-tests/compression/multiple-files.sh.stdout.exact +12 -0
  460. data/vendor/zstd/tests/cli-tests/compression/row-match-finder.sh +7 -0
  461. data/vendor/zstd/tests/cli-tests/compression/setup +7 -0
  462. data/vendor/zstd/tests/cli-tests/compression/stream-size.sh +7 -0
  463. data/vendor/zstd/tests/cli-tests/compression/verbose-wlog.sh +11 -0
  464. data/vendor/zstd/tests/cli-tests/compression/verbose-wlog.sh.stderr.glob +5 -0
  465. data/vendor/zstd/tests/cli-tests/compression/verbose-wlog.sh.stdout.glob +5 -0
  466. data/vendor/zstd/tests/cli-tests/compression/window-resize.sh +9 -0
  467. data/vendor/zstd/tests/cli-tests/compression/window-resize.sh.stderr.ignore +0 -0
  468. data/vendor/zstd/tests/cli-tests/compression/window-resize.sh.stdout.glob +3 -0
  469. data/vendor/zstd/tests/cli-tests/decompression/detectErrors.sh +11 -0
  470. data/vendor/zstd/tests/cli-tests/decompression/golden.sh +7 -0
  471. data/vendor/zstd/tests/cli-tests/decompression/pass-through.sh +57 -0
  472. data/vendor/zstd/tests/cli-tests/decompression/pass-through.sh.stderr.exact +11 -0
  473. data/vendor/zstd/tests/cli-tests/decompression/pass-through.sh.stdout.exact +25 -0
  474. data/vendor/zstd/tests/cli-tests/determinism/basic.sh +36 -0
  475. data/vendor/zstd/tests/cli-tests/determinism/basic.sh.stderr.exact +0 -0
  476. data/vendor/zstd/tests/cli-tests/determinism/basic.sh.stdout.exact +880 -0
  477. data/vendor/zstd/tests/cli-tests/determinism/multithread.sh +45 -0
  478. data/vendor/zstd/tests/cli-tests/determinism/multithread.sh.stderr.exact +0 -0
  479. data/vendor/zstd/tests/cli-tests/determinism/multithread.sh.stdout.exact +260 -0
  480. data/vendor/zstd/tests/cli-tests/determinism/reuse.sh +44 -0
  481. data/vendor/zstd/tests/cli-tests/determinism/reuse.sh.stderr.exact +0 -0
  482. data/vendor/zstd/tests/cli-tests/determinism/reuse.sh.stdout.exact +19 -0
  483. data/vendor/zstd/tests/cli-tests/determinism/setup +5 -0
  484. data/vendor/zstd/tests/cli-tests/determinism/setup_once +30 -0
  485. data/vendor/zstd/tests/cli-tests/dict-builder/empty-input.sh +9 -0
  486. data/vendor/zstd/tests/cli-tests/dict-builder/empty-input.sh.stderr.exact +1 -0
  487. data/vendor/zstd/tests/cli-tests/dict-builder/no-inputs.sh +3 -0
  488. data/vendor/zstd/tests/cli-tests/dict-builder/no-inputs.sh.exit +1 -0
  489. data/vendor/zstd/tests/cli-tests/dict-builder/no-inputs.sh.stderr.exact +5 -0
  490. data/vendor/zstd/tests/cli-tests/dictionaries/dictionary-mismatch.sh +29 -0
  491. data/vendor/zstd/tests/cli-tests/dictionaries/dictionary-mismatch.sh.stderr.exact +7 -0
  492. data/vendor/zstd/tests/cli-tests/dictionaries/golden.sh +9 -0
  493. data/vendor/zstd/tests/cli-tests/dictionaries/setup +6 -0
  494. data/vendor/zstd/tests/cli-tests/dictionaries/setup_once +24 -0
  495. data/vendor/zstd/tests/cli-tests/file-handling/directory-mirror.sh +49 -0
  496. data/vendor/zstd/tests/cli-tests/file-handling/directory-mirror.sh.stderr.exact +0 -0
  497. data/vendor/zstd/tests/cli-tests/file-handling/directory-mirror.sh.stdout.exact +0 -0
  498. data/vendor/zstd/tests/cli-tests/file-stat/compress-file-to-dir-without-write-perm.sh +12 -0
  499. data/vendor/zstd/tests/cli-tests/file-stat/compress-file-to-dir-without-write-perm.sh.stderr.exact +30 -0
  500. data/vendor/zstd/tests/cli-tests/file-stat/compress-file-to-file.sh +9 -0
  501. data/vendor/zstd/tests/cli-tests/file-stat/compress-file-to-file.sh.stderr.glob +46 -0
  502. data/vendor/zstd/tests/cli-tests/file-stat/compress-file-to-stdout.sh +8 -0
  503. data/vendor/zstd/tests/cli-tests/file-stat/compress-file-to-stdout.sh.stderr.exact +24 -0
  504. data/vendor/zstd/tests/cli-tests/file-stat/compress-stdin-to-file.sh +8 -0
  505. data/vendor/zstd/tests/cli-tests/file-stat/compress-stdin-to-file.sh.stderr.glob +28 -0
  506. data/vendor/zstd/tests/cli-tests/file-stat/compress-stdin-to-stdout.sh +8 -0
  507. data/vendor/zstd/tests/cli-tests/file-stat/compress-stdin-to-stdout.sh.stderr.exact +18 -0
  508. data/vendor/zstd/tests/cli-tests/file-stat/decompress-file-to-file.sh +8 -0
  509. data/vendor/zstd/tests/cli-tests/file-stat/decompress-file-to-file.sh.stderr.glob +42 -0
  510. data/vendor/zstd/tests/cli-tests/file-stat/decompress-file-to-stdout.sh +7 -0
  511. data/vendor/zstd/tests/cli-tests/file-stat/decompress-file-to-stdout.sh.stderr.exact +18 -0
  512. data/vendor/zstd/tests/cli-tests/file-stat/decompress-stdin-to-file.sh +7 -0
  513. data/vendor/zstd/tests/cli-tests/file-stat/decompress-stdin-to-file.sh.stderr.glob +24 -0
  514. data/vendor/zstd/tests/cli-tests/file-stat/decompress-stdin-to-stdout.sh +7 -0
  515. data/vendor/zstd/tests/cli-tests/file-stat/decompress-stdin-to-stdout.sh.stderr.exact +14 -0
  516. data/vendor/zstd/tests/cli-tests/progress/no-progress.sh +46 -0
  517. data/vendor/zstd/tests/cli-tests/progress/no-progress.sh.stderr.glob +96 -0
  518. data/vendor/zstd/tests/cli-tests/progress/progress.sh +41 -0
  519. data/vendor/zstd/tests/cli-tests/progress/progress.sh.stderr.glob +62 -0
  520. data/vendor/zstd/tests/cli-tests/run.py +732 -0
  521. data/vendor/zstd/tests/cli-tests/zstd-symlinks/setup +6 -0
  522. data/vendor/zstd/tests/cli-tests/zstd-symlinks/zstdcat.sh +12 -0
  523. data/vendor/zstd/tests/cli-tests/zstd-symlinks/zstdcat.sh.stdout.exact +8 -0
  524. data/vendor/zstd/tests/datagencli.c +149 -0
  525. data/vendor/zstd/tests/decodecorpus.c +1998 -0
  526. data/vendor/zstd/tests/dict-files/zero-weight-dict +0 -0
  527. data/vendor/zstd/tests/external_matchfinder.c +140 -0
  528. data/vendor/zstd/tests/external_matchfinder.h +39 -0
  529. data/vendor/zstd/tests/fullbench.c +1210 -0
  530. data/vendor/zstd/tests/fuzz/Makefile +278 -0
  531. data/vendor/zstd/tests/fuzz/README.md +161 -0
  532. data/vendor/zstd/tests/fuzz/block_decompress.c +53 -0
  533. data/vendor/zstd/tests/fuzz/block_round_trip.c +103 -0
  534. data/vendor/zstd/tests/fuzz/decompress_cross_format.c +130 -0
  535. data/vendor/zstd/tests/fuzz/decompress_dstSize_tooSmall.c +74 -0
  536. data/vendor/zstd/tests/fuzz/dictionary_decompress.c +77 -0
  537. data/vendor/zstd/tests/fuzz/dictionary_loader.c +106 -0
  538. data/vendor/zstd/tests/fuzz/dictionary_round_trip.c +155 -0
  539. data/vendor/zstd/tests/fuzz/dictionary_stream_round_trip.c +209 -0
  540. data/vendor/zstd/tests/fuzz/fse_read_ncount.c +100 -0
  541. data/vendor/zstd/tests/fuzz/fuzz.h +57 -0
  542. data/vendor/zstd/tests/fuzz/fuzz.py +910 -0
  543. data/vendor/zstd/tests/fuzz/fuzz_data_producer.c +95 -0
  544. data/vendor/zstd/tests/fuzz/fuzz_data_producer.h +66 -0
  545. data/vendor/zstd/tests/fuzz/fuzz_helpers.c +47 -0
  546. data/vendor/zstd/tests/fuzz/fuzz_helpers.h +82 -0
  547. data/vendor/zstd/tests/fuzz/fuzz_third_party_seq_prod.h +116 -0
  548. data/vendor/zstd/tests/fuzz/generate_sequences.c +88 -0
  549. data/vendor/zstd/tests/fuzz/huf_decompress.c +68 -0
  550. data/vendor/zstd/tests/fuzz/huf_round_trip.c +137 -0
  551. data/vendor/zstd/tests/fuzz/raw_dictionary_round_trip.c +119 -0
  552. data/vendor/zstd/tests/fuzz/regression_driver.c +90 -0
  553. data/vendor/zstd/tests/fuzz/seekable_roundtrip.c +88 -0
  554. data/vendor/zstd/tests/fuzz/seq_prod_fuzz_example/Makefile +16 -0
  555. data/vendor/zstd/tests/fuzz/seq_prod_fuzz_example/README.md +12 -0
  556. data/vendor/zstd/tests/fuzz/seq_prod_fuzz_example/example_seq_prod.c +52 -0
  557. data/vendor/zstd/tests/fuzz/sequence_compression_api.c +452 -0
  558. data/vendor/zstd/tests/fuzz/simple_compress.c +60 -0
  559. data/vendor/zstd/tests/fuzz/simple_decompress.c +59 -0
  560. data/vendor/zstd/tests/fuzz/simple_round_trip.c +182 -0
  561. data/vendor/zstd/tests/fuzz/stream_decompress.c +119 -0
  562. data/vendor/zstd/tests/fuzz/stream_round_trip.c +218 -0
  563. data/vendor/zstd/tests/fuzz/zstd_frame_info.c +43 -0
  564. data/vendor/zstd/tests/fuzz/zstd_helpers.c +208 -0
  565. data/vendor/zstd/tests/fuzz/zstd_helpers.h +56 -0
  566. data/vendor/zstd/tests/fuzzer.c +5482 -0
  567. data/vendor/zstd/tests/golden-compression/PR-3517-block-splitter-corruption-test +1 -0
  568. data/vendor/zstd/tests/golden-compression/http +1 -0
  569. data/vendor/zstd/tests/golden-compression/huffman-compressed-larger +0 -0
  570. data/vendor/zstd/tests/golden-compression/large-literal-and-match-lengths +0 -0
  571. data/vendor/zstd/tests/golden-decompression/block-128k.zst +0 -0
  572. data/vendor/zstd/tests/golden-decompression/empty-block.zst +0 -0
  573. data/vendor/zstd/tests/golden-decompression/rle-first-block.zst +0 -0
  574. data/vendor/zstd/tests/golden-decompression/zeroSeq_2B.zst +0 -0
  575. data/vendor/zstd/tests/golden-decompression-errors/off0.bin.zst +0 -0
  576. data/vendor/zstd/tests/golden-decompression-errors/truncated_huff_state.zst +0 -0
  577. data/vendor/zstd/tests/golden-decompression-errors/zeroSeq_extraneous.zst +0 -0
  578. data/vendor/zstd/tests/golden-dictionaries/http-dict-missing-symbols +0 -0
  579. data/vendor/zstd/tests/gzip/Makefile +45 -0
  580. data/vendor/zstd/tests/gzip/gzip-env.sh +46 -0
  581. data/vendor/zstd/tests/gzip/helin-segv.sh +31 -0
  582. data/vendor/zstd/tests/gzip/help-version.sh +270 -0
  583. data/vendor/zstd/tests/gzip/hufts-segv.gz +0 -0
  584. data/vendor/zstd/tests/gzip/hufts.sh +34 -0
  585. data/vendor/zstd/tests/gzip/init.cfg +5 -0
  586. data/vendor/zstd/tests/gzip/init.sh +616 -0
  587. data/vendor/zstd/tests/gzip/keep.sh +51 -0
  588. data/vendor/zstd/tests/gzip/list.sh +31 -0
  589. data/vendor/zstd/tests/gzip/memcpy-abuse.sh +34 -0
  590. data/vendor/zstd/tests/gzip/mixed.sh +68 -0
  591. data/vendor/zstd/tests/gzip/null-suffix-clobber.sh +35 -0
  592. data/vendor/zstd/tests/gzip/stdin.sh +31 -0
  593. data/vendor/zstd/tests/gzip/test-driver.sh +150 -0
  594. data/vendor/zstd/tests/gzip/trailing-nul.sh +37 -0
  595. data/vendor/zstd/tests/gzip/unpack-invalid.sh +36 -0
  596. data/vendor/zstd/tests/gzip/z-suffix.sh +30 -0
  597. data/vendor/zstd/tests/gzip/zdiff.sh +48 -0
  598. data/vendor/zstd/tests/gzip/zgrep-context.sh +47 -0
  599. data/vendor/zstd/tests/gzip/zgrep-f.sh +43 -0
  600. data/vendor/zstd/tests/gzip/zgrep-signal.sh +64 -0
  601. data/vendor/zstd/tests/gzip/znew-k.sh +40 -0
  602. data/vendor/zstd/tests/invalidDictionaries.c +61 -0
  603. data/vendor/zstd/tests/largeDictionary.c +131 -0
  604. data/vendor/zstd/tests/legacy.c +262 -0
  605. data/vendor/zstd/tests/libzstd_builds.sh +104 -0
  606. data/vendor/zstd/tests/longmatch.c +123 -0
  607. data/vendor/zstd/tests/loremOut.c +50 -0
  608. data/vendor/zstd/tests/loremOut.h +15 -0
  609. data/vendor/zstd/tests/paramgrill.c +2965 -0
  610. data/vendor/zstd/tests/playTests.sh +1926 -0
  611. data/vendor/zstd/tests/poolTests.c +271 -0
  612. data/vendor/zstd/tests/rateLimiter.py +41 -0
  613. data/vendor/zstd/tests/regression/Makefile +60 -0
  614. data/vendor/zstd/tests/regression/README.md +28 -0
  615. data/vendor/zstd/tests/regression/config.c +404 -0
  616. data/vendor/zstd/tests/regression/config.h +91 -0
  617. data/vendor/zstd/tests/regression/data.c +631 -0
  618. data/vendor/zstd/tests/regression/data.h +121 -0
  619. data/vendor/zstd/tests/regression/levels.h +59 -0
  620. data/vendor/zstd/tests/regression/method.c +701 -0
  621. data/vendor/zstd/tests/regression/method.h +65 -0
  622. data/vendor/zstd/tests/regression/result.c +30 -0
  623. data/vendor/zstd/tests/regression/result.h +103 -0
  624. data/vendor/zstd/tests/regression/results.csv +1480 -0
  625. data/vendor/zstd/tests/regression/test.c +362 -0
  626. data/vendor/zstd/tests/roundTripCrash.c +241 -0
  627. data/vendor/zstd/tests/seqgen.c +260 -0
  628. data/vendor/zstd/tests/seqgen.h +58 -0
  629. data/vendor/zstd/tests/test-license.py +156 -0
  630. data/vendor/zstd/tests/test-variants.sh +115 -0
  631. data/vendor/zstd/tests/test-zstd-versions.py +308 -0
  632. data/vendor/zstd/tests/test_process_substitution.bash +92 -0
  633. data/vendor/zstd/tests/zstreamtest.c +3467 -0
  634. data/vendor/zstd/zlibWrapper/BUCK +22 -0
  635. data/vendor/zstd/zlibWrapper/Makefile +120 -0
  636. data/vendor/zstd/zlibWrapper/README.md +163 -0
  637. data/vendor/zstd/zlibWrapper/examples/example.c +598 -0
  638. data/vendor/zstd/zlibWrapper/examples/example_original.c +599 -0
  639. data/vendor/zstd/zlibWrapper/examples/fitblk.c +254 -0
  640. data/vendor/zstd/zlibWrapper/examples/fitblk_original.c +233 -0
  641. data/vendor/zstd/zlibWrapper/examples/minigzip.c +605 -0
  642. data/vendor/zstd/zlibWrapper/examples/zwrapbench.c +1018 -0
  643. data/vendor/zstd/zlibWrapper/gzclose.c +26 -0
  644. data/vendor/zstd/zlibWrapper/gzcompatibility.h +68 -0
  645. data/vendor/zstd/zlibWrapper/gzguts.h +229 -0
  646. data/vendor/zstd/zlibWrapper/gzlib.c +587 -0
  647. data/vendor/zstd/zlibWrapper/gzread.c +637 -0
  648. data/vendor/zstd/zlibWrapper/gzwrite.c +632 -0
  649. data/vendor/zstd/zlibWrapper/zstd_zlibwrapper.c +1200 -0
  650. data/vendor/zstd/zlibWrapper/zstd_zlibwrapper.h +91 -0
  651. metadata +738 -0
@@ -0,0 +1,1953 @@
1
+ /* ******************************************************************
2
+ * huff0 huffman decoder,
3
+ * part of Finite State Entropy library
4
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
5
+ *
6
+ * You can contact the author at :
7
+ * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
8
+ *
9
+ * This source code is licensed under both the BSD-style license (found in the
10
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11
+ * in the COPYING file in the root directory of this source tree).
12
+ * You may select, at your option, one of the above-listed licenses.
13
+ ****************************************************************** */
14
+
15
+ /* **************************************************************
16
+ * Dependencies
17
+ ****************************************************************/
18
+ #include <stddef.h> /* size_t */
19
+ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
20
+ #include "../common/compiler.h"
21
+ #include "../common/bitstream.h" /* BIT_* */
22
+ #include "../common/fse.h" /* to compress headers */
23
+ #include "../common/huf.h"
24
+ #include "../common/error_private.h"
25
+ #include "../common/zstd_internal.h"
26
+ #include "../common/bits.h" /* ZSTD_highbit32, ZSTD_countTrailingZeros64 */
27
+
28
+ /* **************************************************************
29
+ * Constants
30
+ ****************************************************************/
31
+
32
+ #define HUF_DECODER_FAST_TABLELOG 11
33
+
34
+ /* **************************************************************
35
+ * Macros
36
+ ****************************************************************/
37
+
38
+ #ifdef HUF_DISABLE_FAST_DECODE
39
+ # define HUF_ENABLE_FAST_DECODE 0
40
+ #else
41
+ # define HUF_ENABLE_FAST_DECODE 1
42
+ #endif
43
+
44
+ /* These two optional macros force the use one way or another of the two
45
+ * Huffman decompression implementations. You can't force in both directions
46
+ * at the same time.
47
+ */
48
+ #if defined(HUF_FORCE_DECOMPRESS_X1) && \
49
+ defined(HUF_FORCE_DECOMPRESS_X2)
50
+ #error "Cannot force the use of the X1 and X2 decoders at the same time!"
51
+ #endif
52
+
53
+ /* When DYNAMIC_BMI2 is enabled, fast decoders are only called when bmi2 is
54
+ * supported at runtime, so we can add the BMI2 target attribute.
55
+ * When it is disabled, we will still get BMI2 if it is enabled statically.
56
+ */
57
+ #if DYNAMIC_BMI2
58
+ # define HUF_FAST_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE
59
+ #else
60
+ # define HUF_FAST_BMI2_ATTRS
61
+ #endif
62
+
63
+ #ifdef __cplusplus
64
+ # define HUF_EXTERN_C extern "C"
65
+ #else
66
+ # define HUF_EXTERN_C
67
+ #endif
68
+ #define HUF_ASM_DECL HUF_EXTERN_C
69
+
70
+ #if DYNAMIC_BMI2
71
+ # define HUF_NEED_BMI2_FUNCTION 1
72
+ #else
73
+ # define HUF_NEED_BMI2_FUNCTION 0
74
+ #endif
75
+
76
+ /* **************************************************************
77
+ * Error Management
78
+ ****************************************************************/
79
+ #define HUF_isError ERR_isError
80
+
81
+
82
+ /* **************************************************************
83
+ * Byte alignment for workSpace management
84
+ ****************************************************************/
85
+ #define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
86
+ #define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
87
+
88
+
89
+ /* **************************************************************
90
+ * BMI2 Variant Wrappers
91
+ ****************************************************************/
92
+ typedef size_t (*HUF_DecompressUsingDTableFn)(void *dst, size_t dstSize,
93
+ const void *cSrc,
94
+ size_t cSrcSize,
95
+ const HUF_DTable *DTable);
96
+
97
+ #if DYNAMIC_BMI2
98
+
99
+ #define HUF_DGEN(fn) \
100
+ \
101
+ static size_t fn##_default( \
102
+ void* dst, size_t dstSize, \
103
+ const void* cSrc, size_t cSrcSize, \
104
+ const HUF_DTable* DTable) \
105
+ { \
106
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
107
+ } \
108
+ \
109
+ static BMI2_TARGET_ATTRIBUTE size_t fn##_bmi2( \
110
+ void* dst, size_t dstSize, \
111
+ const void* cSrc, size_t cSrcSize, \
112
+ const HUF_DTable* DTable) \
113
+ { \
114
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
115
+ } \
116
+ \
117
+ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
118
+ size_t cSrcSize, HUF_DTable const* DTable, int flags) \
119
+ { \
120
+ if (flags & HUF_flags_bmi2) { \
121
+ return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
122
+ } \
123
+ return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
124
+ }
125
+
126
+ #else
127
+
128
+ #define HUF_DGEN(fn) \
129
+ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
130
+ size_t cSrcSize, HUF_DTable const* DTable, int flags) \
131
+ { \
132
+ (void)flags; \
133
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
134
+ }
135
+
136
+ #endif
137
+
138
+
139
+ /*-***************************/
140
+ /* generic DTableDesc */
141
+ /*-***************************/
142
+ typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
143
+
144
+ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
145
+ {
146
+ DTableDesc dtd;
147
+ ZSTD_memcpy(&dtd, table, sizeof(dtd));
148
+ return dtd;
149
+ }
150
+
151
+ static size_t HUF_initFastDStream(BYTE const* ip) {
152
+ BYTE const lastByte = ip[7];
153
+ size_t const bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0;
154
+ size_t const value = MEM_readLEST(ip) | 1;
155
+ assert(bitsConsumed <= 8);
156
+ assert(sizeof(size_t) == 8);
157
+ return value << bitsConsumed;
158
+ }
159
+
160
+
161
+ /**
162
+ * The input/output arguments to the Huffman fast decoding loop:
163
+ *
164
+ * ip [in/out] - The input pointers, must be updated to reflect what is consumed.
165
+ * op [in/out] - The output pointers, must be updated to reflect what is written.
166
+ * bits [in/out] - The bitstream containers, must be updated to reflect the current state.
167
+ * dt [in] - The decoding table.
168
+ * ilowest [in] - The beginning of the valid range of the input. Decoders may read
169
+ * down to this pointer. It may be below iend[0].
170
+ * oend [in] - The end of the output stream. op[3] must not cross oend.
171
+ * iend [in] - The end of each input stream. ip[i] may cross iend[i],
172
+ * as long as it is above ilowest, but that indicates corruption.
173
+ */
174
+ typedef struct {
175
+ BYTE const* ip[4];
176
+ BYTE* op[4];
177
+ U64 bits[4];
178
+ void const* dt;
179
+ BYTE const* ilowest;
180
+ BYTE* oend;
181
+ BYTE const* iend[4];
182
+ } HUF_DecompressFastArgs;
183
+
184
+ typedef void (*HUF_DecompressFastLoopFn)(HUF_DecompressFastArgs*);
185
+
186
+ /**
187
+ * Initializes args for the fast decoding loop.
188
+ * @returns 1 on success
189
+ * 0 if the fallback implementation should be used.
190
+ * Or an error code on failure.
191
+ */
192
+ static size_t HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable)
193
+ {
194
+ void const* dt = DTable + 1;
195
+ U32 const dtLog = HUF_getDTableDesc(DTable).tableLog;
196
+
197
+ const BYTE* const istart = (const BYTE*)src;
198
+
199
+ BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)dstSize);
200
+
201
+ /* The fast decoding loop assumes 64-bit little-endian.
202
+ * This condition is false on x32.
203
+ */
204
+ if (!MEM_isLittleEndian() || MEM_32bits())
205
+ return 0;
206
+
207
+ /* Avoid nullptr addition */
208
+ if (dstSize == 0)
209
+ return 0;
210
+ assert(dst != NULL);
211
+
212
+ /* strict minimum : jump table + 1 byte per stream */
213
+ if (srcSize < 10)
214
+ return ERROR(corruption_detected);
215
+
216
+ /* Must have at least 8 bytes per stream because we don't handle initializing smaller bit containers.
217
+ * If table log is not correct at this point, fallback to the old decoder.
218
+ * On small inputs we don't have enough data to trigger the fast loop, so use the old decoder.
219
+ */
220
+ if (dtLog != HUF_DECODER_FAST_TABLELOG)
221
+ return 0;
222
+
223
+ /* Read the jump table. */
224
+ {
225
+ size_t const length1 = MEM_readLE16(istart);
226
+ size_t const length2 = MEM_readLE16(istart+2);
227
+ size_t const length3 = MEM_readLE16(istart+4);
228
+ size_t const length4 = srcSize - (length1 + length2 + length3 + 6);
229
+ args->iend[0] = istart + 6; /* jumpTable */
230
+ args->iend[1] = args->iend[0] + length1;
231
+ args->iend[2] = args->iend[1] + length2;
232
+ args->iend[3] = args->iend[2] + length3;
233
+
234
+ /* HUF_initFastDStream() requires this, and this small of an input
235
+ * won't benefit from the ASM loop anyways.
236
+ */
237
+ if (length1 < 8 || length2 < 8 || length3 < 8 || length4 < 8)
238
+ return 0;
239
+ if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */
240
+ }
241
+ /* ip[] contains the position that is currently loaded into bits[]. */
242
+ args->ip[0] = args->iend[1] - sizeof(U64);
243
+ args->ip[1] = args->iend[2] - sizeof(U64);
244
+ args->ip[2] = args->iend[3] - sizeof(U64);
245
+ args->ip[3] = (BYTE const*)src + srcSize - sizeof(U64);
246
+
247
+ /* op[] contains the output pointers. */
248
+ args->op[0] = (BYTE*)dst;
249
+ args->op[1] = args->op[0] + (dstSize+3)/4;
250
+ args->op[2] = args->op[1] + (dstSize+3)/4;
251
+ args->op[3] = args->op[2] + (dstSize+3)/4;
252
+
253
+ /* No point to call the ASM loop for tiny outputs. */
254
+ if (args->op[3] >= oend)
255
+ return 0;
256
+
257
+ /* bits[] is the bit container.
258
+ * It is read from the MSB down to the LSB.
259
+ * It is shifted left as it is read, and zeros are
260
+ * shifted in. After the lowest valid bit a 1 is
261
+ * set, so that CountTrailingZeros(bits[]) can be used
262
+ * to count how many bits we've consumed.
263
+ */
264
+ args->bits[0] = HUF_initFastDStream(args->ip[0]);
265
+ args->bits[1] = HUF_initFastDStream(args->ip[1]);
266
+ args->bits[2] = HUF_initFastDStream(args->ip[2]);
267
+ args->bits[3] = HUF_initFastDStream(args->ip[3]);
268
+
269
+ /* The decoders must be sure to never read beyond ilowest.
270
+ * This is lower than iend[0], but allowing decoders to read
271
+ * down to ilowest can allow an extra iteration or two in the
272
+ * fast loop.
273
+ */
274
+ args->ilowest = istart;
275
+
276
+ args->oend = oend;
277
+ args->dt = dt;
278
+
279
+ return 1;
280
+ }
281
+
282
+ static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressFastArgs const* args, int stream, BYTE* segmentEnd)
283
+ {
284
+ /* Validate that we haven't overwritten. */
285
+ if (args->op[stream] > segmentEnd)
286
+ return ERROR(corruption_detected);
287
+ /* Validate that we haven't read beyond iend[].
288
+ * Note that ip[] may be < iend[] because the MSB is
289
+ * the next bit to read, and we may have consumed 100%
290
+ * of the stream, so down to iend[i] - 8 is valid.
291
+ */
292
+ if (args->ip[stream] < args->iend[stream] - 8)
293
+ return ERROR(corruption_detected);
294
+
295
+ /* Construct the BIT_DStream_t. */
296
+ assert(sizeof(size_t) == 8);
297
+ bit->bitContainer = MEM_readLEST(args->ip[stream]);
298
+ bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]);
299
+ bit->start = (const char*)args->ilowest;
300
+ bit->limitPtr = bit->start + sizeof(size_t);
301
+ bit->ptr = (const char*)args->ip[stream];
302
+
303
+ return 0;
304
+ }
305
+
306
+ /* Calls X(N) for each stream 0, 1, 2, 3. */
307
+ #define HUF_4X_FOR_EACH_STREAM(X) \
308
+ do { \
309
+ X(0); \
310
+ X(1); \
311
+ X(2); \
312
+ X(3); \
313
+ } while (0)
314
+
315
+ /* Calls X(N, var) for each stream 0, 1, 2, 3. */
316
+ #define HUF_4X_FOR_EACH_STREAM_WITH_VAR(X, var) \
317
+ do { \
318
+ X(0, (var)); \
319
+ X(1, (var)); \
320
+ X(2, (var)); \
321
+ X(3, (var)); \
322
+ } while (0)
323
+
324
+
325
+ #ifndef HUF_FORCE_DECOMPRESS_X2
326
+
327
+ /*-***************************/
328
+ /* single-symbol decoding */
329
+ /*-***************************/
330
+ typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decoding */
331
+
332
+ /**
333
+ * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
334
+ * a time.
335
+ */
336
+ static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
337
+ U64 D4;
338
+ if (MEM_isLittleEndian()) {
339
+ D4 = (U64)((symbol << 8) + nbBits);
340
+ } else {
341
+ D4 = (U64)(symbol + (nbBits << 8));
342
+ }
343
+ assert(D4 < (1U << 16));
344
+ D4 *= 0x0001000100010001ULL;
345
+ return D4;
346
+ }
347
+
348
+ /**
349
+ * Increase the tableLog to targetTableLog and rescales the stats.
350
+ * If tableLog > targetTableLog this is a no-op.
351
+ * @returns New tableLog
352
+ */
353
+ static U32 HUF_rescaleStats(BYTE* huffWeight, U32* rankVal, U32 nbSymbols, U32 tableLog, U32 targetTableLog)
354
+ {
355
+ if (tableLog > targetTableLog)
356
+ return tableLog;
357
+ if (tableLog < targetTableLog) {
358
+ U32 const scale = targetTableLog - tableLog;
359
+ U32 s;
360
+ /* Increase the weight for all non-zero probability symbols by scale. */
361
+ for (s = 0; s < nbSymbols; ++s) {
362
+ huffWeight[s] += (BYTE)((huffWeight[s] == 0) ? 0 : scale);
363
+ }
364
+ /* Update rankVal to reflect the new weights.
365
+ * All weights except 0 get moved to weight + scale.
366
+ * Weights [1, scale] are empty.
367
+ */
368
+ for (s = targetTableLog; s > scale; --s) {
369
+ rankVal[s] = rankVal[s - scale];
370
+ }
371
+ for (s = scale; s > 0; --s) {
372
+ rankVal[s] = 0;
373
+ }
374
+ }
375
+ return targetTableLog;
376
+ }
377
+
378
+ typedef struct {
379
+ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
380
+ U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
381
+ U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
382
+ BYTE symbols[HUF_SYMBOLVALUE_MAX + 1];
383
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
384
+ } HUF_ReadDTableX1_Workspace;
385
+
386
+ size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags)
387
+ {
388
+ U32 tableLog = 0;
389
+ U32 nbSymbols = 0;
390
+ size_t iSize;
391
+ void* const dtPtr = DTable + 1;
392
+ HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
393
+ HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
394
+
395
+ DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
396
+ if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
397
+
398
+ DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
399
+ /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
400
+
401
+ iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), flags);
402
+ if (HUF_isError(iSize)) return iSize;
403
+
404
+
405
+ /* Table header */
406
+ { DTableDesc dtd = HUF_getDTableDesc(DTable);
407
+ U32 const maxTableLog = dtd.maxTableLog + 1;
408
+ U32 const targetTableLog = MIN(maxTableLog, HUF_DECODER_FAST_TABLELOG);
409
+ tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog);
410
+ if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
411
+ dtd.tableType = 0;
412
+ dtd.tableLog = (BYTE)tableLog;
413
+ ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
414
+ }
415
+
416
+ /* Compute symbols and rankStart given rankVal:
417
+ *
418
+ * rankVal already contains the number of values of each weight.
419
+ *
420
+ * symbols contains the symbols ordered by weight. First are the rankVal[0]
421
+ * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
422
+ * symbols[0] is filled (but unused) to avoid a branch.
423
+ *
424
+ * rankStart contains the offset where each rank belongs in the DTable.
425
+ * rankStart[0] is not filled because there are no entries in the table for
426
+ * weight 0.
427
+ */
428
+ { int n;
429
+ U32 nextRankStart = 0;
430
+ int const unroll = 4;
431
+ int const nLimit = (int)nbSymbols - unroll + 1;
432
+ for (n=0; n<(int)tableLog+1; n++) {
433
+ U32 const curr = nextRankStart;
434
+ nextRankStart += wksp->rankVal[n];
435
+ wksp->rankStart[n] = curr;
436
+ }
437
+ for (n=0; n < nLimit; n += unroll) {
438
+ int u;
439
+ for (u=0; u < unroll; ++u) {
440
+ size_t const w = wksp->huffWeight[n+u];
441
+ wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
442
+ }
443
+ }
444
+ for (; n < (int)nbSymbols; ++n) {
445
+ size_t const w = wksp->huffWeight[n];
446
+ wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
447
+ }
448
+ }
449
+
450
+ /* fill DTable
451
+ * We fill all entries of each weight in order.
452
+ * That way length is a constant for each iteration of the outer loop.
453
+ * We can switch based on the length to a different inner loop which is
454
+ * optimized for that particular case.
455
+ */
456
+ { U32 w;
457
+ int symbol = wksp->rankVal[0];
458
+ int rankStart = 0;
459
+ for (w=1; w<tableLog+1; ++w) {
460
+ int const symbolCount = wksp->rankVal[w];
461
+ int const length = (1 << w) >> 1;
462
+ int uStart = rankStart;
463
+ BYTE const nbBits = (BYTE)(tableLog + 1 - w);
464
+ int s;
465
+ int u;
466
+ switch (length) {
467
+ case 1:
468
+ for (s=0; s<symbolCount; ++s) {
469
+ HUF_DEltX1 D;
470
+ D.byte = wksp->symbols[symbol + s];
471
+ D.nbBits = nbBits;
472
+ dt[uStart] = D;
473
+ uStart += 1;
474
+ }
475
+ break;
476
+ case 2:
477
+ for (s=0; s<symbolCount; ++s) {
478
+ HUF_DEltX1 D;
479
+ D.byte = wksp->symbols[symbol + s];
480
+ D.nbBits = nbBits;
481
+ dt[uStart+0] = D;
482
+ dt[uStart+1] = D;
483
+ uStart += 2;
484
+ }
485
+ break;
486
+ case 4:
487
+ for (s=0; s<symbolCount; ++s) {
488
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
489
+ MEM_write64(dt + uStart, D4);
490
+ uStart += 4;
491
+ }
492
+ break;
493
+ case 8:
494
+ for (s=0; s<symbolCount; ++s) {
495
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
496
+ MEM_write64(dt + uStart, D4);
497
+ MEM_write64(dt + uStart + 4, D4);
498
+ uStart += 8;
499
+ }
500
+ break;
501
+ default:
502
+ for (s=0; s<symbolCount; ++s) {
503
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
504
+ for (u=0; u < length; u += 16) {
505
+ MEM_write64(dt + uStart + u + 0, D4);
506
+ MEM_write64(dt + uStart + u + 4, D4);
507
+ MEM_write64(dt + uStart + u + 8, D4);
508
+ MEM_write64(dt + uStart + u + 12, D4);
509
+ }
510
+ assert(u == length);
511
+ uStart += length;
512
+ }
513
+ break;
514
+ }
515
+ symbol += symbolCount;
516
+ rankStart += symbolCount * length;
517
+ }
518
+ }
519
+ return iSize;
520
+ }
521
+
522
+ FORCE_INLINE_TEMPLATE BYTE
523
+ HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
524
+ {
525
+ size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
526
+ BYTE const c = dt[val].byte;
527
+ BIT_skipBits(Dstream, dt[val].nbBits);
528
+ return c;
529
+ }
530
+
531
+ #define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
532
+ do { *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog); } while (0)
533
+
534
+ #define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
535
+ do { \
536
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
537
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \
538
+ } while (0)
539
+
540
+ #define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
541
+ do { \
542
+ if (MEM_64bits()) \
543
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \
544
+ } while (0)
545
+
546
+ HINT_INLINE size_t
547
+ HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
548
+ {
549
+ BYTE* const pStart = p;
550
+
551
+ /* up to 4 symbols at a time */
552
+ if ((pEnd - p) > 3) {
553
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
554
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
555
+ HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
556
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
557
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
558
+ }
559
+ } else {
560
+ BIT_reloadDStream(bitDPtr);
561
+ }
562
+
563
+ /* [0-3] symbols remaining */
564
+ if (MEM_32bits())
565
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
566
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
567
+
568
+ /* no more data to retrieve from bitstream, no need to reload */
569
+ while (p < pEnd)
570
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
571
+
572
+ return (size_t)(pEnd-pStart);
573
+ }
574
+
575
+ FORCE_INLINE_TEMPLATE size_t
576
+ HUF_decompress1X1_usingDTable_internal_body(
577
+ void* dst, size_t dstSize,
578
+ const void* cSrc, size_t cSrcSize,
579
+ const HUF_DTable* DTable)
580
+ {
581
+ BYTE* op = (BYTE*)dst;
582
+ BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(op, (ptrdiff_t)dstSize);
583
+ const void* dtPtr = DTable + 1;
584
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
585
+ BIT_DStream_t bitD;
586
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
587
+ U32 const dtLog = dtd.tableLog;
588
+
589
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
590
+
591
+ HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
592
+
593
+ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
594
+
595
+ return dstSize;
596
+ }
597
+
598
+ /* HUF_decompress4X1_usingDTable_internal_body():
599
+ * Conditions :
600
+ * @dstSize >= 6
601
+ */
602
+ FORCE_INLINE_TEMPLATE size_t
603
+ HUF_decompress4X1_usingDTable_internal_body(
604
+ void* dst, size_t dstSize,
605
+ const void* cSrc, size_t cSrcSize,
606
+ const HUF_DTable* DTable)
607
+ {
608
+ /* Check */
609
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
610
+ if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
611
+
612
+ { const BYTE* const istart = (const BYTE*) cSrc;
613
+ BYTE* const ostart = (BYTE*) dst;
614
+ BYTE* const oend = ostart + dstSize;
615
+ BYTE* const olimit = oend - 3;
616
+ const void* const dtPtr = DTable + 1;
617
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
618
+
619
+ /* Init */
620
+ BIT_DStream_t bitD1;
621
+ BIT_DStream_t bitD2;
622
+ BIT_DStream_t bitD3;
623
+ BIT_DStream_t bitD4;
624
+ size_t const length1 = MEM_readLE16(istart);
625
+ size_t const length2 = MEM_readLE16(istart+2);
626
+ size_t const length3 = MEM_readLE16(istart+4);
627
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
628
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
629
+ const BYTE* const istart2 = istart1 + length1;
630
+ const BYTE* const istart3 = istart2 + length2;
631
+ const BYTE* const istart4 = istart3 + length3;
632
+ const size_t segmentSize = (dstSize+3) / 4;
633
+ BYTE* const opStart2 = ostart + segmentSize;
634
+ BYTE* const opStart3 = opStart2 + segmentSize;
635
+ BYTE* const opStart4 = opStart3 + segmentSize;
636
+ BYTE* op1 = ostart;
637
+ BYTE* op2 = opStart2;
638
+ BYTE* op3 = opStart3;
639
+ BYTE* op4 = opStart4;
640
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
641
+ U32 const dtLog = dtd.tableLog;
642
+ U32 endSignal = 1;
643
+
644
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
645
+ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
646
+ assert(dstSize >= 6); /* validated above */
647
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
648
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
649
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
650
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
651
+
652
+ /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
653
+ if ((size_t)(oend - op4) >= sizeof(size_t)) {
654
+ for ( ; (endSignal) & (op4 < olimit) ; ) {
655
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
656
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
657
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
658
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
659
+ HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
660
+ HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
661
+ HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
662
+ HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
663
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
664
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
665
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
666
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
667
+ HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
668
+ HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
669
+ HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
670
+ HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
671
+ endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
672
+ endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
673
+ endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
674
+ endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
675
+ }
676
+ }
677
+
678
+ /* check corruption */
679
+ /* note : should not be necessary : op# advance in lock step, and we control op4.
680
+ * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
681
+ if (op1 > opStart2) return ERROR(corruption_detected);
682
+ if (op2 > opStart3) return ERROR(corruption_detected);
683
+ if (op3 > opStart4) return ERROR(corruption_detected);
684
+ /* note : op4 supposed already verified within main loop */
685
+
686
+ /* finish bitStreams one by one */
687
+ HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
688
+ HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
689
+ HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
690
+ HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog);
691
+
692
+ /* check */
693
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
694
+ if (!endCheck) return ERROR(corruption_detected); }
695
+
696
+ /* decoded size */
697
+ return dstSize;
698
+ }
699
+ }
700
+
701
+ #if HUF_NEED_BMI2_FUNCTION
702
+ static BMI2_TARGET_ATTRIBUTE
703
+ size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
704
+ size_t cSrcSize, HUF_DTable const* DTable) {
705
+ return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
706
+ }
707
+ #endif
708
+
709
+ static
710
+ size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
711
+ size_t cSrcSize, HUF_DTable const* DTable) {
712
+ return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
713
+ }
714
+
715
+ #if ZSTD_ENABLE_ASM_X86_64_BMI2
716
+
717
+ HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN;
718
+
719
+ #endif
720
+
721
+ static HUF_FAST_BMI2_ATTRS
722
+ void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args)
723
+ {
724
+ U64 bits[4];
725
+ BYTE const* ip[4];
726
+ BYTE* op[4];
727
+ U16 const* const dtable = (U16 const*)args->dt;
728
+ BYTE* const oend = args->oend;
729
+ BYTE const* const ilowest = args->ilowest;
730
+
731
+ /* Copy the arguments to local variables */
732
+ ZSTD_memcpy(&bits, &args->bits, sizeof(bits));
733
+ ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip));
734
+ ZSTD_memcpy(&op, &args->op, sizeof(op));
735
+
736
+ assert(MEM_isLittleEndian());
737
+ assert(!MEM_32bits());
738
+
739
+ for (;;) {
740
+ BYTE* olimit;
741
+ int stream;
742
+
743
+ /* Assert loop preconditions */
744
+ #ifndef NDEBUG
745
+ for (stream = 0; stream < 4; ++stream) {
746
+ assert(op[stream] <= (stream == 3 ? oend : op[stream + 1]));
747
+ assert(ip[stream] >= ilowest);
748
+ }
749
+ #endif
750
+ /* Compute olimit */
751
+ {
752
+ /* Each iteration produces 5 output symbols per stream */
753
+ size_t const oiters = (size_t)(oend - op[3]) / 5;
754
+ /* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes
755
+ * per stream.
756
+ */
757
+ size_t const iiters = (size_t)(ip[0] - ilowest) / 7;
758
+ /* We can safely run iters iterations before running bounds checks */
759
+ size_t const iters = MIN(oiters, iiters);
760
+ size_t const symbols = iters * 5;
761
+
762
+ /* We can simply check that op[3] < olimit, instead of checking all
763
+ * of our bounds, since we can't hit the other bounds until we've run
764
+ * iters iterations, which only happens when op[3] == olimit.
765
+ */
766
+ olimit = op[3] + symbols;
767
+
768
+ /* Exit fast decoding loop once we reach the end. */
769
+ if (op[3] == olimit)
770
+ break;
771
+
772
+ /* Exit the decoding loop if any input pointer has crossed the
773
+ * previous one. This indicates corruption, and a precondition
774
+ * to our loop is that ip[i] >= ip[0].
775
+ */
776
+ for (stream = 1; stream < 4; ++stream) {
777
+ if (ip[stream] < ip[stream - 1])
778
+ goto _out;
779
+ }
780
+ }
781
+
782
+ #ifndef NDEBUG
783
+ for (stream = 1; stream < 4; ++stream) {
784
+ assert(ip[stream] >= ip[stream - 1]);
785
+ }
786
+ #endif
787
+
788
+ #define HUF_4X1_DECODE_SYMBOL(_stream, _symbol) \
789
+ do { \
790
+ U64 const index = bits[(_stream)] >> 53; \
791
+ U16 const entry = dtable[index]; \
792
+ bits[(_stream)] <<= entry & 0x3F; \
793
+ op[(_stream)][(_symbol)] = (BYTE)(entry >> 8); \
794
+ } while (0)
795
+
796
+ #define HUF_5X1_RELOAD_STREAM(_stream) \
797
+ do { \
798
+ U64 const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \
799
+ U64 const nbBits = ctz & 7; \
800
+ U64 const nbBytes = ctz >> 3; \
801
+ op[(_stream)] += 5; \
802
+ ip[(_stream)] -= nbBytes; \
803
+ bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \
804
+ bits[(_stream)] <<= nbBits; \
805
+ } while (0)
806
+
807
+ /* Manually unroll the loop because compilers don't consistently
808
+ * unroll the inner loops, which destroys performance.
809
+ */
810
+ do {
811
+ /* Decode 5 symbols in each of the 4 streams */
812
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 0);
813
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 1);
814
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 2);
815
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 3);
816
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 4);
817
+
818
+ /* Reload each of the 4 the bitstreams */
819
+ HUF_4X_FOR_EACH_STREAM(HUF_5X1_RELOAD_STREAM);
820
+ } while (op[3] < olimit);
821
+
822
+ #undef HUF_4X1_DECODE_SYMBOL
823
+ #undef HUF_5X1_RELOAD_STREAM
824
+ }
825
+
826
+ _out:
827
+
828
+ /* Save the final values of each of the state variables back to args. */
829
+ ZSTD_memcpy(&args->bits, &bits, sizeof(bits));
830
+ ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip));
831
+ ZSTD_memcpy(&args->op, &op, sizeof(op));
832
+ }
833
+
834
+ /**
835
+ * @returns @p dstSize on success (>= 6)
836
+ * 0 if the fallback implementation should be used
837
+ * An error if an error occurred
838
+ */
839
+ static HUF_FAST_BMI2_ATTRS
840
+ size_t
841
+ HUF_decompress4X1_usingDTable_internal_fast(
842
+ void* dst, size_t dstSize,
843
+ const void* cSrc, size_t cSrcSize,
844
+ const HUF_DTable* DTable,
845
+ HUF_DecompressFastLoopFn loopFn)
846
+ {
847
+ void const* dt = DTable + 1;
848
+ BYTE const* const ilowest = (BYTE const*)cSrc;
849
+ BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)dstSize);
850
+ HUF_DecompressFastArgs args;
851
+ { size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
852
+ FORWARD_IF_ERROR(ret, "Failed to init fast loop args");
853
+ if (ret == 0)
854
+ return 0;
855
+ }
856
+
857
+ assert(args.ip[0] >= args.ilowest);
858
+ loopFn(&args);
859
+
860
+ /* Our loop guarantees that ip[] >= ilowest and that we haven't
861
+ * overwritten any op[].
862
+ */
863
+ assert(args.ip[0] >= ilowest);
864
+ assert(args.ip[0] >= ilowest);
865
+ assert(args.ip[1] >= ilowest);
866
+ assert(args.ip[2] >= ilowest);
867
+ assert(args.ip[3] >= ilowest);
868
+ assert(args.op[3] <= oend);
869
+
870
+ assert(ilowest == args.ilowest);
871
+ assert(ilowest + 6 == args.iend[0]);
872
+ (void)ilowest;
873
+
874
+ /* finish bit streams one by one. */
875
+ { size_t const segmentSize = (dstSize+3) / 4;
876
+ BYTE* segmentEnd = (BYTE*)dst;
877
+ int i;
878
+ for (i = 0; i < 4; ++i) {
879
+ BIT_DStream_t bit;
880
+ if (segmentSize <= (size_t)(oend - segmentEnd))
881
+ segmentEnd += segmentSize;
882
+ else
883
+ segmentEnd = oend;
884
+ FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
885
+ /* Decompress and validate that we've produced exactly the expected length. */
886
+ args.op[i] += HUF_decodeStreamX1(args.op[i], &bit, segmentEnd, (HUF_DEltX1 const*)dt, HUF_DECODER_FAST_TABLELOG);
887
+ if (args.op[i] != segmentEnd) return ERROR(corruption_detected);
888
+ }
889
+ }
890
+
891
+ /* decoded size */
892
+ assert(dstSize != 0);
893
+ return dstSize;
894
+ }
895
+
896
+ HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
897
+
898
+ static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
899
+ size_t cSrcSize, HUF_DTable const* DTable, int flags)
900
+ {
901
+ HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X1_usingDTable_internal_default;
902
+ HUF_DecompressFastLoopFn loopFn = HUF_decompress4X1_usingDTable_internal_fast_c_loop;
903
+
904
+ #if DYNAMIC_BMI2
905
+ if (flags & HUF_flags_bmi2) {
906
+ fallbackFn = HUF_decompress4X1_usingDTable_internal_bmi2;
907
+ # if ZSTD_ENABLE_ASM_X86_64_BMI2
908
+ if (!(flags & HUF_flags_disableAsm)) {
909
+ loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop;
910
+ }
911
+ # endif
912
+ } else {
913
+ return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
914
+ }
915
+ #endif
916
+
917
+ #if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
918
+ if (!(flags & HUF_flags_disableAsm)) {
919
+ loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop;
920
+ }
921
+ #endif
922
+
923
+ if (HUF_ENABLE_FAST_DECODE && !(flags & HUF_flags_disableFast)) {
924
+ size_t const ret = HUF_decompress4X1_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn);
925
+ if (ret != 0)
926
+ return ret;
927
+ }
928
+ return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
929
+ }
930
+
931
+ static size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
932
+ const void* cSrc, size_t cSrcSize,
933
+ void* workSpace, size_t wkspSize, int flags)
934
+ {
935
+ const BYTE* ip = (const BYTE*) cSrc;
936
+
937
+ size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags);
938
+ if (HUF_isError(hSize)) return hSize;
939
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
940
+ ip += hSize; cSrcSize -= hSize;
941
+
942
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags);
943
+ }
944
+
945
+ #endif /* HUF_FORCE_DECOMPRESS_X2 */
946
+
947
+
948
+ #ifndef HUF_FORCE_DECOMPRESS_X1
949
+
950
+ /* *************************/
951
+ /* double-symbols decoding */
952
+ /* *************************/
953
+
954
+ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */
955
+ typedef struct { BYTE symbol; } sortedSymbol_t;
956
+ typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
957
+ typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
958
+
959
+ /**
960
+ * Constructs a HUF_DEltX2 in a U32.
961
+ */
962
+ static U32 HUF_buildDEltX2U32(U32 symbol, U32 nbBits, U32 baseSeq, int level)
963
+ {
964
+ U32 seq;
965
+ DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, sequence) == 0);
966
+ DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, nbBits) == 2);
967
+ DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, length) == 3);
968
+ DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U32));
969
+ if (MEM_isLittleEndian()) {
970
+ seq = level == 1 ? symbol : (baseSeq + (symbol << 8));
971
+ return seq + (nbBits << 16) + ((U32)level << 24);
972
+ } else {
973
+ seq = level == 1 ? (symbol << 8) : ((baseSeq << 8) + symbol);
974
+ return (seq << 16) + (nbBits << 8) + (U32)level;
975
+ }
976
+ }
977
+
978
+ /**
979
+ * Constructs a HUF_DEltX2.
980
+ */
981
+ static HUF_DEltX2 HUF_buildDEltX2(U32 symbol, U32 nbBits, U32 baseSeq, int level)
982
+ {
983
+ HUF_DEltX2 DElt;
984
+ U32 const val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
985
+ DEBUG_STATIC_ASSERT(sizeof(DElt) == sizeof(val));
986
+ ZSTD_memcpy(&DElt, &val, sizeof(val));
987
+ return DElt;
988
+ }
989
+
990
+ /**
991
+ * Constructs 2 HUF_DEltX2s and packs them into a U64.
992
+ */
993
+ static U64 HUF_buildDEltX2U64(U32 symbol, U32 nbBits, U16 baseSeq, int level)
994
+ {
995
+ U32 DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
996
+ return (U64)DElt + ((U64)DElt << 32);
997
+ }
998
+
999
+ /**
1000
+ * Fills the DTable rank with all the symbols from [begin, end) that are each
1001
+ * nbBits long.
1002
+ *
1003
+ * @param DTableRank The start of the rank in the DTable.
1004
+ * @param begin The first symbol to fill (inclusive).
1005
+ * @param end The last symbol to fill (exclusive).
1006
+ * @param nbBits Each symbol is nbBits long.
1007
+ * @param tableLog The table log.
1008
+ * @param baseSeq If level == 1 { 0 } else { the first level symbol }
1009
+ * @param level The level in the table. Must be 1 or 2.
1010
+ */
1011
+ static void HUF_fillDTableX2ForWeight(
1012
+ HUF_DEltX2* DTableRank,
1013
+ sortedSymbol_t const* begin, sortedSymbol_t const* end,
1014
+ U32 nbBits, U32 tableLog,
1015
+ U16 baseSeq, int const level)
1016
+ {
1017
+ U32 const length = 1U << ((tableLog - nbBits) & 0x1F /* quiet static-analyzer */);
1018
+ const sortedSymbol_t* ptr;
1019
+ assert(level >= 1 && level <= 2);
1020
+ switch (length) {
1021
+ case 1:
1022
+ for (ptr = begin; ptr != end; ++ptr) {
1023
+ HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
1024
+ *DTableRank++ = DElt;
1025
+ }
1026
+ break;
1027
+ case 2:
1028
+ for (ptr = begin; ptr != end; ++ptr) {
1029
+ HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
1030
+ DTableRank[0] = DElt;
1031
+ DTableRank[1] = DElt;
1032
+ DTableRank += 2;
1033
+ }
1034
+ break;
1035
+ case 4:
1036
+ for (ptr = begin; ptr != end; ++ptr) {
1037
+ U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
1038
+ ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
1039
+ ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
1040
+ DTableRank += 4;
1041
+ }
1042
+ break;
1043
+ case 8:
1044
+ for (ptr = begin; ptr != end; ++ptr) {
1045
+ U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
1046
+ ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
1047
+ ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
1048
+ ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
1049
+ ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
1050
+ DTableRank += 8;
1051
+ }
1052
+ break;
1053
+ default:
1054
+ for (ptr = begin; ptr != end; ++ptr) {
1055
+ U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
1056
+ HUF_DEltX2* const DTableRankEnd = DTableRank + length;
1057
+ for (; DTableRank != DTableRankEnd; DTableRank += 8) {
1058
+ ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
1059
+ ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
1060
+ ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
1061
+ ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
1062
+ }
1063
+ }
1064
+ break;
1065
+ }
1066
+ }
1067
+
1068
+ /* HUF_fillDTableX2Level2() :
1069
+ * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
1070
+ static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 consumedBits,
1071
+ const U32* rankVal, const int minWeight, const int maxWeight1,
1072
+ const sortedSymbol_t* sortedSymbols, U32 const* rankStart,
1073
+ U32 nbBitsBaseline, U16 baseSeq)
1074
+ {
1075
+ /* Fill skipped values (all positions up to rankVal[minWeight]).
1076
+ * These are positions only get a single symbol because the combined weight
1077
+ * is too large.
1078
+ */
1079
+ if (minWeight>1) {
1080
+ U32 const length = 1U << ((targetLog - consumedBits) & 0x1F /* quiet static-analyzer */);
1081
+ U64 const DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, /* baseSeq */ 0, /* level */ 1);
1082
+ int const skipSize = rankVal[minWeight];
1083
+ assert(length > 1);
1084
+ assert((U32)skipSize < length);
1085
+ switch (length) {
1086
+ case 2:
1087
+ assert(skipSize == 1);
1088
+ ZSTD_memcpy(DTable, &DEltX2, sizeof(DEltX2));
1089
+ break;
1090
+ case 4:
1091
+ assert(skipSize <= 4);
1092
+ ZSTD_memcpy(DTable + 0, &DEltX2, sizeof(DEltX2));
1093
+ ZSTD_memcpy(DTable + 2, &DEltX2, sizeof(DEltX2));
1094
+ break;
1095
+ default:
1096
+ {
1097
+ int i;
1098
+ for (i = 0; i < skipSize; i += 8) {
1099
+ ZSTD_memcpy(DTable + i + 0, &DEltX2, sizeof(DEltX2));
1100
+ ZSTD_memcpy(DTable + i + 2, &DEltX2, sizeof(DEltX2));
1101
+ ZSTD_memcpy(DTable + i + 4, &DEltX2, sizeof(DEltX2));
1102
+ ZSTD_memcpy(DTable + i + 6, &DEltX2, sizeof(DEltX2));
1103
+ }
1104
+ }
1105
+ }
1106
+ }
1107
+
1108
+ /* Fill each of the second level symbols by weight. */
1109
+ {
1110
+ int w;
1111
+ for (w = minWeight; w < maxWeight1; ++w) {
1112
+ int const begin = rankStart[w];
1113
+ int const end = rankStart[w+1];
1114
+ U32 const nbBits = nbBitsBaseline - w;
1115
+ U32 const totalBits = nbBits + consumedBits;
1116
+ HUF_fillDTableX2ForWeight(
1117
+ DTable + rankVal[w],
1118
+ sortedSymbols + begin, sortedSymbols + end,
1119
+ totalBits, targetLog,
1120
+ baseSeq, /* level */ 2);
1121
+ }
1122
+ }
1123
+ }
1124
+
1125
+ static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
1126
+ const sortedSymbol_t* sortedList,
1127
+ const U32* rankStart, rankValCol_t* rankValOrigin, const U32 maxWeight,
1128
+ const U32 nbBitsBaseline)
1129
+ {
1130
+ U32* const rankVal = rankValOrigin[0];
1131
+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
1132
+ const U32 minBits = nbBitsBaseline - maxWeight;
1133
+ int w;
1134
+ int const wEnd = (int)maxWeight + 1;
1135
+
1136
+ /* Fill DTable in order of weight. */
1137
+ for (w = 1; w < wEnd; ++w) {
1138
+ int const begin = (int)rankStart[w];
1139
+ int const end = (int)rankStart[w+1];
1140
+ U32 const nbBits = nbBitsBaseline - w;
1141
+
1142
+ if (targetLog-nbBits >= minBits) {
1143
+ /* Enough room for a second symbol. */
1144
+ int start = rankVal[w];
1145
+ U32 const length = 1U << ((targetLog - nbBits) & 0x1F /* quiet static-analyzer */);
1146
+ int minWeight = nbBits + scaleLog;
1147
+ int s;
1148
+ if (minWeight < 1) minWeight = 1;
1149
+ /* Fill the DTable for every symbol of weight w.
1150
+ * These symbols get at least 1 second symbol.
1151
+ */
1152
+ for (s = begin; s != end; ++s) {
1153
+ HUF_fillDTableX2Level2(
1154
+ DTable + start, targetLog, nbBits,
1155
+ rankValOrigin[nbBits], minWeight, wEnd,
1156
+ sortedList, rankStart,
1157
+ nbBitsBaseline, sortedList[s].symbol);
1158
+ start += length;
1159
+ }
1160
+ } else {
1161
+ /* Only a single symbol. */
1162
+ HUF_fillDTableX2ForWeight(
1163
+ DTable + rankVal[w],
1164
+ sortedList + begin, sortedList + end,
1165
+ nbBits, targetLog,
1166
+ /* baseSeq */ 0, /* level */ 1);
1167
+ }
1168
+ }
1169
+ }
1170
+
1171
+ typedef struct {
1172
+ rankValCol_t rankVal[HUF_TABLELOG_MAX];
1173
+ U32 rankStats[HUF_TABLELOG_MAX + 1];
1174
+ U32 rankStart0[HUF_TABLELOG_MAX + 3];
1175
+ sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
1176
+ BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
1177
+ U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
1178
+ } HUF_ReadDTableX2_Workspace;
1179
+
1180
+ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
1181
+ const void* src, size_t srcSize,
1182
+ void* workSpace, size_t wkspSize, int flags)
1183
+ {
1184
+ U32 tableLog, maxW, nbSymbols;
1185
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
1186
+ U32 maxTableLog = dtd.maxTableLog;
1187
+ size_t iSize;
1188
+ void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
1189
+ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
1190
+ U32 *rankStart;
1191
+
1192
+ HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
1193
+
1194
+ if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
1195
+
1196
+ rankStart = wksp->rankStart0 + 1;
1197
+ ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
1198
+ ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
1199
+
1200
+ DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
1201
+ if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
1202
+ /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
1203
+
1204
+ iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), flags);
1205
+ if (HUF_isError(iSize)) return iSize;
1206
+
1207
+ /* check result */
1208
+ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
1209
+ if (tableLog <= HUF_DECODER_FAST_TABLELOG && maxTableLog > HUF_DECODER_FAST_TABLELOG) maxTableLog = HUF_DECODER_FAST_TABLELOG;
1210
+
1211
+ /* find maxWeight */
1212
+ for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
1213
+
1214
+ /* Get start index of each weight */
1215
+ { U32 w, nextRankStart = 0;
1216
+ for (w=1; w<maxW+1; w++) {
1217
+ U32 curr = nextRankStart;
1218
+ nextRankStart += wksp->rankStats[w];
1219
+ rankStart[w] = curr;
1220
+ }
1221
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
1222
+ rankStart[maxW+1] = nextRankStart;
1223
+ }
1224
+
1225
+ /* sort symbols by weight */
1226
+ { U32 s;
1227
+ for (s=0; s<nbSymbols; s++) {
1228
+ U32 const w = wksp->weightList[s];
1229
+ U32 const r = rankStart[w]++;
1230
+ wksp->sortedSymbol[r].symbol = (BYTE)s;
1231
+ }
1232
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
1233
+ }
1234
+
1235
+ /* Build rankVal */
1236
+ { U32* const rankVal0 = wksp->rankVal[0];
1237
+ { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
1238
+ U32 nextRankVal = 0;
1239
+ U32 w;
1240
+ for (w=1; w<maxW+1; w++) {
1241
+ U32 curr = nextRankVal;
1242
+ nextRankVal += wksp->rankStats[w] << (w+rescale);
1243
+ rankVal0[w] = curr;
1244
+ } }
1245
+ { U32 const minBits = tableLog+1 - maxW;
1246
+ U32 consumed;
1247
+ for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
1248
+ U32* const rankValPtr = wksp->rankVal[consumed];
1249
+ U32 w;
1250
+ for (w = 1; w < maxW+1; w++) {
1251
+ rankValPtr[w] = rankVal0[w] >> consumed;
1252
+ } } } }
1253
+
1254
+ HUF_fillDTableX2(dt, maxTableLog,
1255
+ wksp->sortedSymbol,
1256
+ wksp->rankStart0, wksp->rankVal, maxW,
1257
+ tableLog+1);
1258
+
1259
+ dtd.tableLog = (BYTE)maxTableLog;
1260
+ dtd.tableType = 1;
1261
+ ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
1262
+ return iSize;
1263
+ }
1264
+
1265
+
1266
+ FORCE_INLINE_TEMPLATE U32
1267
+ HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
1268
+ {
1269
+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
1270
+ ZSTD_memcpy(op, &dt[val].sequence, 2);
1271
+ BIT_skipBits(DStream, dt[val].nbBits);
1272
+ return dt[val].length;
1273
+ }
1274
+
1275
+ FORCE_INLINE_TEMPLATE U32
1276
+ HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
1277
+ {
1278
+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
1279
+ ZSTD_memcpy(op, &dt[val].sequence, 1);
1280
+ if (dt[val].length==1) {
1281
+ BIT_skipBits(DStream, dt[val].nbBits);
1282
+ } else {
1283
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
1284
+ BIT_skipBits(DStream, dt[val].nbBits);
1285
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
1286
+ /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
1287
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
1288
+ }
1289
+ }
1290
+ return 1;
1291
+ }
1292
+
1293
+ #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
1294
+ do { ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); } while (0)
1295
+
1296
+ #define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
1297
+ do { \
1298
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
1299
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \
1300
+ } while (0)
1301
+
1302
+ #define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
1303
+ do { \
1304
+ if (MEM_64bits()) \
1305
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \
1306
+ } while (0)
1307
+
1308
+ HINT_INLINE size_t
1309
+ HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
1310
+ const HUF_DEltX2* const dt, const U32 dtLog)
1311
+ {
1312
+ BYTE* const pStart = p;
1313
+
1314
+ /* up to 8 symbols at a time */
1315
+ if ((size_t)(pEnd - p) >= sizeof(bitDPtr->bitContainer)) {
1316
+ if (dtLog <= 11 && MEM_64bits()) {
1317
+ /* up to 10 symbols at a time */
1318
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-9)) {
1319
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1320
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1321
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1322
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1323
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1324
+ }
1325
+ } else {
1326
+ /* up to 8 symbols at a time */
1327
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
1328
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
1329
+ HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
1330
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
1331
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1332
+ }
1333
+ }
1334
+ } else {
1335
+ BIT_reloadDStream(bitDPtr);
1336
+ }
1337
+
1338
+ /* closer to end : up to 2 symbols at a time */
1339
+ if ((size_t)(pEnd - p) >= 2) {
1340
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
1341
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1342
+
1343
+ while (p <= pEnd-2)
1344
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
1345
+ }
1346
+
1347
+ if (p < pEnd)
1348
+ p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
1349
+
1350
+ return p-pStart;
1351
+ }
1352
+
1353
+ FORCE_INLINE_TEMPLATE size_t
1354
+ HUF_decompress1X2_usingDTable_internal_body(
1355
+ void* dst, size_t dstSize,
1356
+ const void* cSrc, size_t cSrcSize,
1357
+ const HUF_DTable* DTable)
1358
+ {
1359
+ BIT_DStream_t bitD;
1360
+
1361
+ /* Init */
1362
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
1363
+
1364
+ /* decode */
1365
+ { BYTE* const ostart = (BYTE*) dst;
1366
+ BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)dstSize);
1367
+ const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
1368
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
1369
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
1370
+ HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
1371
+ }
1372
+
1373
+ /* check */
1374
+ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
1375
+
1376
+ /* decoded size */
1377
+ return dstSize;
1378
+ }
1379
+
1380
+ /* HUF_decompress4X2_usingDTable_internal_body():
1381
+ * Conditions:
1382
+ * @dstSize >= 6
1383
+ */
1384
+ FORCE_INLINE_TEMPLATE size_t
1385
+ HUF_decompress4X2_usingDTable_internal_body(
1386
+ void* dst, size_t dstSize,
1387
+ const void* cSrc, size_t cSrcSize,
1388
+ const HUF_DTable* DTable)
1389
+ {
1390
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
1391
+ if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
1392
+
1393
+ { const BYTE* const istart = (const BYTE*) cSrc;
1394
+ BYTE* const ostart = (BYTE*) dst;
1395
+ BYTE* const oend = ostart + dstSize;
1396
+ BYTE* const olimit = oend - (sizeof(size_t)-1);
1397
+ const void* const dtPtr = DTable+1;
1398
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
1399
+
1400
+ /* Init */
1401
+ BIT_DStream_t bitD1;
1402
+ BIT_DStream_t bitD2;
1403
+ BIT_DStream_t bitD3;
1404
+ BIT_DStream_t bitD4;
1405
+ size_t const length1 = MEM_readLE16(istart);
1406
+ size_t const length2 = MEM_readLE16(istart+2);
1407
+ size_t const length3 = MEM_readLE16(istart+4);
1408
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
1409
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
1410
+ const BYTE* const istart2 = istart1 + length1;
1411
+ const BYTE* const istart3 = istart2 + length2;
1412
+ const BYTE* const istart4 = istart3 + length3;
1413
+ size_t const segmentSize = (dstSize+3) / 4;
1414
+ BYTE* const opStart2 = ostart + segmentSize;
1415
+ BYTE* const opStart3 = opStart2 + segmentSize;
1416
+ BYTE* const opStart4 = opStart3 + segmentSize;
1417
+ BYTE* op1 = ostart;
1418
+ BYTE* op2 = opStart2;
1419
+ BYTE* op3 = opStart3;
1420
+ BYTE* op4 = opStart4;
1421
+ U32 endSignal = 1;
1422
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
1423
+ U32 const dtLog = dtd.tableLog;
1424
+
1425
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
1426
+ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
1427
+ assert(dstSize >= 6 /* validated above */);
1428
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
1429
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
1430
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
1431
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
1432
+
1433
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
1434
+ if ((size_t)(oend - op4) >= sizeof(size_t)) {
1435
+ for ( ; (endSignal) & (op4 < olimit); ) {
1436
+ #if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
1437
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
1438
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
1439
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
1440
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
1441
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
1442
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
1443
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
1444
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
1445
+ endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
1446
+ endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
1447
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
1448
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
1449
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
1450
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
1451
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
1452
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
1453
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
1454
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
1455
+ endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
1456
+ endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
1457
+ #else
1458
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
1459
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
1460
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
1461
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
1462
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
1463
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
1464
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
1465
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
1466
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
1467
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
1468
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
1469
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
1470
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
1471
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
1472
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
1473
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
1474
+ endSignal = (U32)LIKELY((U32)
1475
+ (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
1476
+ & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
1477
+ & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
1478
+ & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
1479
+ #endif
1480
+ }
1481
+ }
1482
+
1483
+ /* check corruption */
1484
+ if (op1 > opStart2) return ERROR(corruption_detected);
1485
+ if (op2 > opStart3) return ERROR(corruption_detected);
1486
+ if (op3 > opStart4) return ERROR(corruption_detected);
1487
+ /* note : op4 already verified within main loop */
1488
+
1489
+ /* finish bitStreams one by one */
1490
+ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
1491
+ HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
1492
+ HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
1493
+ HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
1494
+
1495
+ /* check */
1496
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
1497
+ if (!endCheck) return ERROR(corruption_detected); }
1498
+
1499
+ /* decoded size */
1500
+ return dstSize;
1501
+ }
1502
+ }
1503
+
1504
+ #if HUF_NEED_BMI2_FUNCTION
1505
+ static BMI2_TARGET_ATTRIBUTE
1506
+ size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
1507
+ size_t cSrcSize, HUF_DTable const* DTable) {
1508
+ return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
1509
+ }
1510
+ #endif
1511
+
1512
+ static
1513
+ size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
1514
+ size_t cSrcSize, HUF_DTable const* DTable) {
1515
+ return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
1516
+ }
1517
+
1518
+ #if ZSTD_ENABLE_ASM_X86_64_BMI2
1519
+
1520
+ HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN;
1521
+
1522
+ #endif
1523
+
1524
+ static HUF_FAST_BMI2_ATTRS
1525
+ void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args)
1526
+ {
1527
+ U64 bits[4];
1528
+ BYTE const* ip[4];
1529
+ BYTE* op[4];
1530
+ BYTE* oend[4];
1531
+ HUF_DEltX2 const* const dtable = (HUF_DEltX2 const*)args->dt;
1532
+ BYTE const* const ilowest = args->ilowest;
1533
+
1534
+ /* Copy the arguments to local registers. */
1535
+ ZSTD_memcpy(&bits, &args->bits, sizeof(bits));
1536
+ ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip));
1537
+ ZSTD_memcpy(&op, &args->op, sizeof(op));
1538
+
1539
+ oend[0] = op[1];
1540
+ oend[1] = op[2];
1541
+ oend[2] = op[3];
1542
+ oend[3] = args->oend;
1543
+
1544
+ assert(MEM_isLittleEndian());
1545
+ assert(!MEM_32bits());
1546
+
1547
+ for (;;) {
1548
+ BYTE* olimit;
1549
+ int stream;
1550
+
1551
+ /* Assert loop preconditions */
1552
+ #ifndef NDEBUG
1553
+ for (stream = 0; stream < 4; ++stream) {
1554
+ assert(op[stream] <= oend[stream]);
1555
+ assert(ip[stream] >= ilowest);
1556
+ }
1557
+ #endif
1558
+ /* Compute olimit */
1559
+ {
1560
+ /* Each loop does 5 table lookups for each of the 4 streams.
1561
+ * Each table lookup consumes up to 11 bits of input, and produces
1562
+ * up to 2 bytes of output.
1563
+ */
1564
+ /* We can consume up to 7 bytes of input per iteration per stream.
1565
+ * We also know that each input pointer is >= ip[0]. So we can run
1566
+ * iters loops before running out of input.
1567
+ */
1568
+ size_t iters = (size_t)(ip[0] - ilowest) / 7;
1569
+ /* Each iteration can produce up to 10 bytes of output per stream.
1570
+ * Each output stream my advance at different rates. So take the
1571
+ * minimum number of safe iterations among all the output streams.
1572
+ */
1573
+ for (stream = 0; stream < 4; ++stream) {
1574
+ size_t const oiters = (size_t)(oend[stream] - op[stream]) / 10;
1575
+ iters = MIN(iters, oiters);
1576
+ }
1577
+
1578
+ /* Each iteration produces at least 5 output symbols. So until
1579
+ * op[3] crosses olimit, we know we haven't executed iters
1580
+ * iterations yet. This saves us maintaining an iters counter,
1581
+ * at the expense of computing the remaining # of iterations
1582
+ * more frequently.
1583
+ */
1584
+ olimit = op[3] + (iters * 5);
1585
+
1586
+ /* Exit the fast decoding loop once we reach the end. */
1587
+ if (op[3] == olimit)
1588
+ break;
1589
+
1590
+ /* Exit the decoding loop if any input pointer has crossed the
1591
+ * previous one. This indicates corruption, and a precondition
1592
+ * to our loop is that ip[i] >= ip[0].
1593
+ */
1594
+ for (stream = 1; stream < 4; ++stream) {
1595
+ if (ip[stream] < ip[stream - 1])
1596
+ goto _out;
1597
+ }
1598
+ }
1599
+
1600
+ #ifndef NDEBUG
1601
+ for (stream = 1; stream < 4; ++stream) {
1602
+ assert(ip[stream] >= ip[stream - 1]);
1603
+ }
1604
+ #endif
1605
+
1606
+ #define HUF_4X2_DECODE_SYMBOL(_stream, _decode3) \
1607
+ do { \
1608
+ if ((_decode3) || (_stream) != 3) { \
1609
+ U64 const index = bits[(_stream)] >> 53; \
1610
+ size_t const entry = MEM_readLE32(&dtable[index]); \
1611
+ MEM_write16(op[(_stream)], (U16)entry); \
1612
+ bits[(_stream)] <<= (entry >> 16) & 0x3F; \
1613
+ op[(_stream)] += entry >> 24; \
1614
+ } \
1615
+ } while (0)
1616
+
1617
+ #define HUF_5X2_RELOAD_STREAM(_stream, _decode3) \
1618
+ do { \
1619
+ if (_decode3) HUF_4X2_DECODE_SYMBOL(3, 1); \
1620
+ { \
1621
+ U64 const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \
1622
+ U64 const nbBits = ctz & 7; \
1623
+ U64 const nbBytes = ctz >> 3; \
1624
+ ip[(_stream)] -= nbBytes; \
1625
+ bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \
1626
+ bits[(_stream)] <<= nbBits; \
1627
+ } \
1628
+ } while (0)
1629
+
1630
+ #if defined(__aarch64__)
1631
+ # define HUF_4X2_4WAY 1
1632
+ #else
1633
+ # define HUF_4X2_4WAY 0
1634
+ #endif
1635
+ #define HUF_4X2_3WAY !HUF_4X2_4WAY
1636
+
1637
+ /* Manually unroll the loop because compilers don't consistently
1638
+ * unroll the inner loops, which destroys performance.
1639
+ */
1640
+ do {
1641
+ /* Decode 5 symbols from each of the first 3 or 4 streams.
1642
+ * In the 3-way case the final stream will be decoded during
1643
+ * the reload phase to reduce register pressure.
1644
+ */
1645
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY);
1646
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY);
1647
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY);
1648
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY);
1649
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY);
1650
+
1651
+ /* In the 3-way case decode one symbol from the final stream. */
1652
+ HUF_4X2_DECODE_SYMBOL(3, HUF_4X2_3WAY);
1653
+
1654
+ /* In the 3-way case decode 4 symbols from the final stream &
1655
+ * reload bitstreams. The final stream is reloaded last, meaning
1656
+ * that all 5 symbols are decoded from the final stream before
1657
+ * it is reloaded.
1658
+ */
1659
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_5X2_RELOAD_STREAM, HUF_4X2_3WAY);
1660
+ } while (op[3] < olimit);
1661
+ }
1662
+
1663
+ #undef HUF_4X2_DECODE_SYMBOL
1664
+ #undef HUF_5X2_RELOAD_STREAM
1665
+
1666
+ _out:
1667
+
1668
+ /* Save the final values of each of the state variables back to args. */
1669
+ ZSTD_memcpy(&args->bits, &bits, sizeof(bits));
1670
+ ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip));
1671
+ ZSTD_memcpy(&args->op, &op, sizeof(op));
1672
+ }
1673
+
1674
+
1675
+ static HUF_FAST_BMI2_ATTRS size_t
1676
+ HUF_decompress4X2_usingDTable_internal_fast(
1677
+ void* dst, size_t dstSize,
1678
+ const void* cSrc, size_t cSrcSize,
1679
+ const HUF_DTable* DTable,
1680
+ HUF_DecompressFastLoopFn loopFn) {
1681
+ void const* dt = DTable + 1;
1682
+ const BYTE* const ilowest = (const BYTE*)cSrc;
1683
+ BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)dstSize);
1684
+ HUF_DecompressFastArgs args;
1685
+ {
1686
+ size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
1687
+ FORWARD_IF_ERROR(ret, "Failed to init asm args");
1688
+ if (ret == 0)
1689
+ return 0;
1690
+ }
1691
+
1692
+ assert(args.ip[0] >= args.ilowest);
1693
+ loopFn(&args);
1694
+
1695
+ /* note : op4 already verified within main loop */
1696
+ assert(args.ip[0] >= ilowest);
1697
+ assert(args.ip[1] >= ilowest);
1698
+ assert(args.ip[2] >= ilowest);
1699
+ assert(args.ip[3] >= ilowest);
1700
+ assert(args.op[3] <= oend);
1701
+
1702
+ assert(ilowest == args.ilowest);
1703
+ assert(ilowest + 6 == args.iend[0]);
1704
+ (void)ilowest;
1705
+
1706
+ /* finish bitStreams one by one */
1707
+ {
1708
+ size_t const segmentSize = (dstSize+3) / 4;
1709
+ BYTE* segmentEnd = (BYTE*)dst;
1710
+ int i;
1711
+ for (i = 0; i < 4; ++i) {
1712
+ BIT_DStream_t bit;
1713
+ if (segmentSize <= (size_t)(oend - segmentEnd))
1714
+ segmentEnd += segmentSize;
1715
+ else
1716
+ segmentEnd = oend;
1717
+ FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
1718
+ args.op[i] += HUF_decodeStreamX2(args.op[i], &bit, segmentEnd, (HUF_DEltX2 const*)dt, HUF_DECODER_FAST_TABLELOG);
1719
+ if (args.op[i] != segmentEnd)
1720
+ return ERROR(corruption_detected);
1721
+ }
1722
+ }
1723
+
1724
+ /* decoded size */
1725
+ return dstSize;
1726
+ }
1727
+
1728
+ static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
1729
+ size_t cSrcSize, HUF_DTable const* DTable, int flags)
1730
+ {
1731
+ HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X2_usingDTable_internal_default;
1732
+ HUF_DecompressFastLoopFn loopFn = HUF_decompress4X2_usingDTable_internal_fast_c_loop;
1733
+
1734
+ #if DYNAMIC_BMI2
1735
+ if (flags & HUF_flags_bmi2) {
1736
+ fallbackFn = HUF_decompress4X2_usingDTable_internal_bmi2;
1737
+ # if ZSTD_ENABLE_ASM_X86_64_BMI2
1738
+ if (!(flags & HUF_flags_disableAsm)) {
1739
+ loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop;
1740
+ }
1741
+ # endif
1742
+ } else {
1743
+ return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
1744
+ }
1745
+ #endif
1746
+
1747
+ #if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
1748
+ if (!(flags & HUF_flags_disableAsm)) {
1749
+ loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop;
1750
+ }
1751
+ #endif
1752
+
1753
+ if (HUF_ENABLE_FAST_DECODE && !(flags & HUF_flags_disableFast)) {
1754
+ size_t const ret = HUF_decompress4X2_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn);
1755
+ if (ret != 0)
1756
+ return ret;
1757
+ }
1758
+ return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
1759
+ }
1760
+
1761
+ HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
1762
+
1763
+ size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
1764
+ const void* cSrc, size_t cSrcSize,
1765
+ void* workSpace, size_t wkspSize, int flags)
1766
+ {
1767
+ const BYTE* ip = (const BYTE*) cSrc;
1768
+
1769
+ size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
1770
+ workSpace, wkspSize, flags);
1771
+ if (HUF_isError(hSize)) return hSize;
1772
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
1773
+ ip += hSize; cSrcSize -= hSize;
1774
+
1775
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, flags);
1776
+ }
1777
+
1778
+ static size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
1779
+ const void* cSrc, size_t cSrcSize,
1780
+ void* workSpace, size_t wkspSize, int flags)
1781
+ {
1782
+ const BYTE* ip = (const BYTE*) cSrc;
1783
+
1784
+ size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
1785
+ workSpace, wkspSize, flags);
1786
+ if (HUF_isError(hSize)) return hSize;
1787
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
1788
+ ip += hSize; cSrcSize -= hSize;
1789
+
1790
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags);
1791
+ }
1792
+
1793
+ #endif /* HUF_FORCE_DECOMPRESS_X1 */
1794
+
1795
+
1796
+ /* ***********************************/
1797
+ /* Universal decompression selectors */
1798
+ /* ***********************************/
1799
+
1800
+
1801
+ #if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
1802
+ typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
1803
+ static const algo_time_t algoTime[16 /* Quantization */][2 /* single, double */] =
1804
+ {
1805
+ /* single, double, quad */
1806
+ {{0,0}, {1,1}}, /* Q==0 : impossible */
1807
+ {{0,0}, {1,1}}, /* Q==1 : impossible */
1808
+ {{ 150,216}, { 381,119}}, /* Q == 2 : 12-18% */
1809
+ {{ 170,205}, { 514,112}}, /* Q == 3 : 18-25% */
1810
+ {{ 177,199}, { 539,110}}, /* Q == 4 : 25-32% */
1811
+ {{ 197,194}, { 644,107}}, /* Q == 5 : 32-38% */
1812
+ {{ 221,192}, { 735,107}}, /* Q == 6 : 38-44% */
1813
+ {{ 256,189}, { 881,106}}, /* Q == 7 : 44-50% */
1814
+ {{ 359,188}, {1167,109}}, /* Q == 8 : 50-56% */
1815
+ {{ 582,187}, {1570,114}}, /* Q == 9 : 56-62% */
1816
+ {{ 688,187}, {1712,122}}, /* Q ==10 : 62-69% */
1817
+ {{ 825,186}, {1965,136}}, /* Q ==11 : 69-75% */
1818
+ {{ 976,185}, {2131,150}}, /* Q ==12 : 75-81% */
1819
+ {{1180,186}, {2070,175}}, /* Q ==13 : 81-87% */
1820
+ {{1377,185}, {1731,202}}, /* Q ==14 : 87-93% */
1821
+ {{1412,185}, {1695,202}}, /* Q ==15 : 93-99% */
1822
+ };
1823
+ #endif
1824
+
1825
+ /** HUF_selectDecoder() :
1826
+ * Tells which decoder is likely to decode faster,
1827
+ * based on a set of pre-computed metrics.
1828
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
1829
+ * Assumption : 0 < dstSize <= 128 KB */
1830
+ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
1831
+ {
1832
+ assert(dstSize > 0);
1833
+ assert(dstSize <= 128*1024);
1834
+ #if defined(HUF_FORCE_DECOMPRESS_X1)
1835
+ (void)dstSize;
1836
+ (void)cSrcSize;
1837
+ return 0;
1838
+ #elif defined(HUF_FORCE_DECOMPRESS_X2)
1839
+ (void)dstSize;
1840
+ (void)cSrcSize;
1841
+ return 1;
1842
+ #else
1843
+ /* decoder timing evaluation */
1844
+ { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
1845
+ U32 const D256 = (U32)(dstSize >> 8);
1846
+ U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
1847
+ U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
1848
+ DTime1 += DTime1 >> 5; /* small advantage to algorithm using less memory, to reduce cache eviction */
1849
+ return DTime1 < DTime0;
1850
+ }
1851
+ #endif
1852
+ }
1853
+
1854
+ size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
1855
+ const void* cSrc, size_t cSrcSize,
1856
+ void* workSpace, size_t wkspSize, int flags)
1857
+ {
1858
+ /* validation checks */
1859
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
1860
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
1861
+ if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
1862
+ if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
1863
+
1864
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
1865
+ #if defined(HUF_FORCE_DECOMPRESS_X1)
1866
+ (void)algoNb;
1867
+ assert(algoNb == 0);
1868
+ return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
1869
+ cSrcSize, workSpace, wkspSize, flags);
1870
+ #elif defined(HUF_FORCE_DECOMPRESS_X2)
1871
+ (void)algoNb;
1872
+ assert(algoNb == 1);
1873
+ return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
1874
+ cSrcSize, workSpace, wkspSize, flags);
1875
+ #else
1876
+ return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
1877
+ cSrcSize, workSpace, wkspSize, flags):
1878
+ HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
1879
+ cSrcSize, workSpace, wkspSize, flags);
1880
+ #endif
1881
+ }
1882
+ }
1883
+
1884
+
1885
+ size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags)
1886
+ {
1887
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
1888
+ #if defined(HUF_FORCE_DECOMPRESS_X1)
1889
+ (void)dtd;
1890
+ assert(dtd.tableType == 0);
1891
+ return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
1892
+ #elif defined(HUF_FORCE_DECOMPRESS_X2)
1893
+ (void)dtd;
1894
+ assert(dtd.tableType == 1);
1895
+ return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
1896
+ #else
1897
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) :
1898
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
1899
+ #endif
1900
+ }
1901
+
1902
+ #ifndef HUF_FORCE_DECOMPRESS_X2
1903
+ size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags)
1904
+ {
1905
+ const BYTE* ip = (const BYTE*) cSrc;
1906
+
1907
+ size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags);
1908
+ if (HUF_isError(hSize)) return hSize;
1909
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
1910
+ ip += hSize; cSrcSize -= hSize;
1911
+
1912
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags);
1913
+ }
1914
+ #endif
1915
+
1916
+ size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags)
1917
+ {
1918
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
1919
+ #if defined(HUF_FORCE_DECOMPRESS_X1)
1920
+ (void)dtd;
1921
+ assert(dtd.tableType == 0);
1922
+ return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
1923
+ #elif defined(HUF_FORCE_DECOMPRESS_X2)
1924
+ (void)dtd;
1925
+ assert(dtd.tableType == 1);
1926
+ return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
1927
+ #else
1928
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) :
1929
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
1930
+ #endif
1931
+ }
1932
+
1933
+ size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags)
1934
+ {
1935
+ /* validation checks */
1936
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
1937
+ if (cSrcSize == 0) return ERROR(corruption_detected);
1938
+
1939
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
1940
+ #if defined(HUF_FORCE_DECOMPRESS_X1)
1941
+ (void)algoNb;
1942
+ assert(algoNb == 0);
1943
+ return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags);
1944
+ #elif defined(HUF_FORCE_DECOMPRESS_X2)
1945
+ (void)algoNb;
1946
+ assert(algoNb == 1);
1947
+ return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags);
1948
+ #else
1949
+ return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) :
1950
+ HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags);
1951
+ #endif
1952
+ }
1953
+ }