image_pack 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (319) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +18 -0
  3. data/LICENSE.txt +21 -0
  4. data/README.md +140 -0
  5. data/THIRD_PARTY_NOTICES.md +8 -0
  6. data/ext/image_pack/extconf.rb +515 -0
  7. data/ext/image_pack/image_pack.c +1618 -0
  8. data/ext/image_pack/vendor/.vendored +1 -0
  9. data/ext/image_pack/vendor/mozjpeg/BUILDING.txt +902 -0
  10. data/ext/image_pack/vendor/mozjpeg/CMakeLists.txt +1593 -0
  11. data/ext/image_pack/vendor/mozjpeg/LICENSE.md +132 -0
  12. data/ext/image_pack/vendor/mozjpeg/README-mozilla.txt +194 -0
  13. data/ext/image_pack/vendor/mozjpeg/README-turbo.txt +346 -0
  14. data/ext/image_pack/vendor/mozjpeg/README.ijg +258 -0
  15. data/ext/image_pack/vendor/mozjpeg/README.md +29 -0
  16. data/ext/image_pack/vendor/mozjpeg/cderror.h +128 -0
  17. data/ext/image_pack/vendor/mozjpeg/cdjpeg.c +156 -0
  18. data/ext/image_pack/vendor/mozjpeg/cdjpeg.h +171 -0
  19. data/ext/image_pack/vendor/mozjpeg/cjpeg.c +961 -0
  20. data/ext/image_pack/vendor/mozjpeg/cmyk.h +60 -0
  21. data/ext/image_pack/vendor/mozjpeg/coderules.txt +78 -0
  22. data/ext/image_pack/vendor/mozjpeg/croptest.in +95 -0
  23. data/ext/image_pack/vendor/mozjpeg/djpeg.c +855 -0
  24. data/ext/image_pack/vendor/mozjpeg/example.txt +464 -0
  25. data/ext/image_pack/vendor/mozjpeg/jaricom.c +157 -0
  26. data/ext/image_pack/vendor/mozjpeg/jcapimin.c +307 -0
  27. data/ext/image_pack/vendor/mozjpeg/jcapistd.c +168 -0
  28. data/ext/image_pack/vendor/mozjpeg/jcarith.c +972 -0
  29. data/ext/image_pack/vendor/mozjpeg/jccoefct.c +609 -0
  30. data/ext/image_pack/vendor/mozjpeg/jccolext.c +144 -0
  31. data/ext/image_pack/vendor/mozjpeg/jccolor.c +721 -0
  32. data/ext/image_pack/vendor/mozjpeg/jcdctmgr.c +1776 -0
  33. data/ext/image_pack/vendor/mozjpeg/jcext.c +219 -0
  34. data/ext/image_pack/vendor/mozjpeg/jchuff.c +1146 -0
  35. data/ext/image_pack/vendor/mozjpeg/jchuff.h +57 -0
  36. data/ext/image_pack/vendor/mozjpeg/jcicc.c +105 -0
  37. data/ext/image_pack/vendor/mozjpeg/jcinit.c +82 -0
  38. data/ext/image_pack/vendor/mozjpeg/jcmainct.c +162 -0
  39. data/ext/image_pack/vendor/mozjpeg/jcmarker.c +844 -0
  40. data/ext/image_pack/vendor/mozjpeg/jcmaster.c +958 -0
  41. data/ext/image_pack/vendor/mozjpeg/jcmaster.h +56 -0
  42. data/ext/image_pack/vendor/mozjpeg/jcomapi.c +109 -0
  43. data/ext/image_pack/vendor/mozjpeg/jconfig.h.in +37 -0
  44. data/ext/image_pack/vendor/mozjpeg/jconfig.txt +93 -0
  45. data/ext/image_pack/vendor/mozjpeg/jconfigint.h.in +44 -0
  46. data/ext/image_pack/vendor/mozjpeg/jcparam.c +991 -0
  47. data/ext/image_pack/vendor/mozjpeg/jcphuff.c +1123 -0
  48. data/ext/image_pack/vendor/mozjpeg/jcprepct.c +351 -0
  49. data/ext/image_pack/vendor/mozjpeg/jcsample.c +522 -0
  50. data/ext/image_pack/vendor/mozjpeg/jcstest.c +126 -0
  51. data/ext/image_pack/vendor/mozjpeg/jctrans.c +408 -0
  52. data/ext/image_pack/vendor/mozjpeg/jdapimin.c +407 -0
  53. data/ext/image_pack/vendor/mozjpeg/jdapistd.c +691 -0
  54. data/ext/image_pack/vendor/mozjpeg/jdarith.c +782 -0
  55. data/ext/image_pack/vendor/mozjpeg/jdatadst-tj.c +198 -0
  56. data/ext/image_pack/vendor/mozjpeg/jdatadst.c +299 -0
  57. data/ext/image_pack/vendor/mozjpeg/jdatasrc-tj.c +194 -0
  58. data/ext/image_pack/vendor/mozjpeg/jdatasrc.c +295 -0
  59. data/ext/image_pack/vendor/mozjpeg/jdcoefct.c +881 -0
  60. data/ext/image_pack/vendor/mozjpeg/jdcoefct.h +83 -0
  61. data/ext/image_pack/vendor/mozjpeg/jdcol565.c +384 -0
  62. data/ext/image_pack/vendor/mozjpeg/jdcolext.c +141 -0
  63. data/ext/image_pack/vendor/mozjpeg/jdcolor.c +881 -0
  64. data/ext/image_pack/vendor/mozjpeg/jdct.h +208 -0
  65. data/ext/image_pack/vendor/mozjpeg/jddctmgr.c +367 -0
  66. data/ext/image_pack/vendor/mozjpeg/jdhuff.c +834 -0
  67. data/ext/image_pack/vendor/mozjpeg/jdhuff.h +247 -0
  68. data/ext/image_pack/vendor/mozjpeg/jdicc.c +167 -0
  69. data/ext/image_pack/vendor/mozjpeg/jdinput.c +408 -0
  70. data/ext/image_pack/vendor/mozjpeg/jdmainct.c +460 -0
  71. data/ext/image_pack/vendor/mozjpeg/jdmainct.h +71 -0
  72. data/ext/image_pack/vendor/mozjpeg/jdmarker.c +1374 -0
  73. data/ext/image_pack/vendor/mozjpeg/jdmaster.c +727 -0
  74. data/ext/image_pack/vendor/mozjpeg/jdmaster.h +33 -0
  75. data/ext/image_pack/vendor/mozjpeg/jdmerge.c +587 -0
  76. data/ext/image_pack/vendor/mozjpeg/jdmerge.h +47 -0
  77. data/ext/image_pack/vendor/mozjpeg/jdmrg565.c +354 -0
  78. data/ext/image_pack/vendor/mozjpeg/jdmrgext.c +184 -0
  79. data/ext/image_pack/vendor/mozjpeg/jdphuff.c +679 -0
  80. data/ext/image_pack/vendor/mozjpeg/jdpostct.c +294 -0
  81. data/ext/image_pack/vendor/mozjpeg/jdsample.c +524 -0
  82. data/ext/image_pack/vendor/mozjpeg/jdsample.h +50 -0
  83. data/ext/image_pack/vendor/mozjpeg/jdtrans.c +156 -0
  84. data/ext/image_pack/vendor/mozjpeg/jerror.c +251 -0
  85. data/ext/image_pack/vendor/mozjpeg/jerror.h +335 -0
  86. data/ext/image_pack/vendor/mozjpeg/jfdctflt.c +169 -0
  87. data/ext/image_pack/vendor/mozjpeg/jfdctfst.c +227 -0
  88. data/ext/image_pack/vendor/mozjpeg/jfdctint.c +288 -0
  89. data/ext/image_pack/vendor/mozjpeg/jidctflt.c +240 -0
  90. data/ext/image_pack/vendor/mozjpeg/jidctfst.c +371 -0
  91. data/ext/image_pack/vendor/mozjpeg/jidctint.c +2627 -0
  92. data/ext/image_pack/vendor/mozjpeg/jidctred.c +409 -0
  93. data/ext/image_pack/vendor/mozjpeg/jinclude.h +147 -0
  94. data/ext/image_pack/vendor/mozjpeg/jmemmgr.c +1180 -0
  95. data/ext/image_pack/vendor/mozjpeg/jmemnobs.c +110 -0
  96. data/ext/image_pack/vendor/mozjpeg/jmemsys.h +178 -0
  97. data/ext/image_pack/vendor/mozjpeg/jmorecfg.h +382 -0
  98. data/ext/image_pack/vendor/mozjpeg/jpeg_nbits_table.h +4098 -0
  99. data/ext/image_pack/vendor/mozjpeg/jpegcomp.h +32 -0
  100. data/ext/image_pack/vendor/mozjpeg/jpegint.h +453 -0
  101. data/ext/image_pack/vendor/mozjpeg/jpeglib.h +1211 -0
  102. data/ext/image_pack/vendor/mozjpeg/jpegtran.c +827 -0
  103. data/ext/image_pack/vendor/mozjpeg/jpegyuv.c +172 -0
  104. data/ext/image_pack/vendor/mozjpeg/jquant1.c +856 -0
  105. data/ext/image_pack/vendor/mozjpeg/jquant2.c +1286 -0
  106. data/ext/image_pack/vendor/mozjpeg/jsimd.h +123 -0
  107. data/ext/image_pack/vendor/mozjpeg/jsimd_none.c +431 -0
  108. data/ext/image_pack/vendor/mozjpeg/jsimddct.h +70 -0
  109. data/ext/image_pack/vendor/mozjpeg/jstdhuff.c +144 -0
  110. data/ext/image_pack/vendor/mozjpeg/jutils.c +133 -0
  111. data/ext/image_pack/vendor/mozjpeg/jversion.h.in +56 -0
  112. data/ext/image_pack/vendor/mozjpeg/libjpeg.map.in +11 -0
  113. data/ext/image_pack/vendor/mozjpeg/libjpeg.txt +3150 -0
  114. data/ext/image_pack/vendor/mozjpeg/rdbmp.c +690 -0
  115. data/ext/image_pack/vendor/mozjpeg/rdcolmap.c +253 -0
  116. data/ext/image_pack/vendor/mozjpeg/rdgif.c +720 -0
  117. data/ext/image_pack/vendor/mozjpeg/rdjpeg.c +160 -0
  118. data/ext/image_pack/vendor/mozjpeg/rdjpgcom.c +494 -0
  119. data/ext/image_pack/vendor/mozjpeg/rdpng.c +194 -0
  120. data/ext/image_pack/vendor/mozjpeg/rdppm.c +781 -0
  121. data/ext/image_pack/vendor/mozjpeg/rdswitch.c +642 -0
  122. data/ext/image_pack/vendor/mozjpeg/rdtarga.c +508 -0
  123. data/ext/image_pack/vendor/mozjpeg/simd/arm/aarch32/jccolext-neon.c +148 -0
  124. data/ext/image_pack/vendor/mozjpeg/simd/arm/aarch32/jchuff-neon.c +334 -0
  125. data/ext/image_pack/vendor/mozjpeg/simd/arm/aarch32/jsimd.c +976 -0
  126. data/ext/image_pack/vendor/mozjpeg/simd/arm/aarch32/jsimd_neon.S +1200 -0
  127. data/ext/image_pack/vendor/mozjpeg/simd/arm/aarch64/jccolext-neon.c +316 -0
  128. data/ext/image_pack/vendor/mozjpeg/simd/arm/aarch64/jchuff-neon.c +411 -0
  129. data/ext/image_pack/vendor/mozjpeg/simd/arm/aarch64/jsimd.c +1053 -0
  130. data/ext/image_pack/vendor/mozjpeg/simd/arm/aarch64/jsimd_neon.S +2254 -0
  131. data/ext/image_pack/vendor/mozjpeg/simd/arm/align.h +28 -0
  132. data/ext/image_pack/vendor/mozjpeg/simd/arm/jccolor-neon.c +160 -0
  133. data/ext/image_pack/vendor/mozjpeg/simd/arm/jcgray-neon.c +120 -0
  134. data/ext/image_pack/vendor/mozjpeg/simd/arm/jcgryext-neon.c +106 -0
  135. data/ext/image_pack/vendor/mozjpeg/simd/arm/jchuff.h +131 -0
  136. data/ext/image_pack/vendor/mozjpeg/simd/arm/jcphuff-neon.c +623 -0
  137. data/ext/image_pack/vendor/mozjpeg/simd/arm/jcsample-neon.c +192 -0
  138. data/ext/image_pack/vendor/mozjpeg/simd/arm/jdcolext-neon.c +374 -0
  139. data/ext/image_pack/vendor/mozjpeg/simd/arm/jdcolor-neon.c +141 -0
  140. data/ext/image_pack/vendor/mozjpeg/simd/arm/jdmerge-neon.c +144 -0
  141. data/ext/image_pack/vendor/mozjpeg/simd/arm/jdmrgext-neon.c +723 -0
  142. data/ext/image_pack/vendor/mozjpeg/simd/arm/jdsample-neon.c +569 -0
  143. data/ext/image_pack/vendor/mozjpeg/simd/arm/jfdctfst-neon.c +214 -0
  144. data/ext/image_pack/vendor/mozjpeg/simd/arm/jfdctint-neon.c +376 -0
  145. data/ext/image_pack/vendor/mozjpeg/simd/arm/jidctfst-neon.c +472 -0
  146. data/ext/image_pack/vendor/mozjpeg/simd/arm/jidctint-neon.c +801 -0
  147. data/ext/image_pack/vendor/mozjpeg/simd/arm/jidctred-neon.c +486 -0
  148. data/ext/image_pack/vendor/mozjpeg/simd/arm/jquanti-neon.c +193 -0
  149. data/ext/image_pack/vendor/mozjpeg/simd/arm/neon-compat.h +26 -0
  150. data/ext/image_pack/vendor/mozjpeg/simd/arm/neon-compat.h.in +37 -0
  151. data/ext/image_pack/vendor/mozjpeg/simd/i386/jccolext-avx2.asm +578 -0
  152. data/ext/image_pack/vendor/mozjpeg/simd/i386/jccolext-mmx.asm +476 -0
  153. data/ext/image_pack/vendor/mozjpeg/simd/i386/jccolext-sse2.asm +503 -0
  154. data/ext/image_pack/vendor/mozjpeg/simd/i386/jccolor-avx2.asm +121 -0
  155. data/ext/image_pack/vendor/mozjpeg/simd/i386/jccolor-mmx.asm +121 -0
  156. data/ext/image_pack/vendor/mozjpeg/simd/i386/jccolor-sse2.asm +120 -0
  157. data/ext/image_pack/vendor/mozjpeg/simd/i386/jcgray-avx2.asm +113 -0
  158. data/ext/image_pack/vendor/mozjpeg/simd/i386/jcgray-mmx.asm +113 -0
  159. data/ext/image_pack/vendor/mozjpeg/simd/i386/jcgray-sse2.asm +112 -0
  160. data/ext/image_pack/vendor/mozjpeg/simd/i386/jcgryext-avx2.asm +457 -0
  161. data/ext/image_pack/vendor/mozjpeg/simd/i386/jcgryext-mmx.asm +355 -0
  162. data/ext/image_pack/vendor/mozjpeg/simd/i386/jcgryext-sse2.asm +382 -0
  163. data/ext/image_pack/vendor/mozjpeg/simd/i386/jchuff-sse2.asm +761 -0
  164. data/ext/image_pack/vendor/mozjpeg/simd/i386/jcphuff-sse2.asm +662 -0
  165. data/ext/image_pack/vendor/mozjpeg/simd/i386/jcsample-avx2.asm +388 -0
  166. data/ext/image_pack/vendor/mozjpeg/simd/i386/jcsample-mmx.asm +324 -0
  167. data/ext/image_pack/vendor/mozjpeg/simd/i386/jcsample-sse2.asm +351 -0
  168. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdcolext-avx2.asm +515 -0
  169. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdcolext-mmx.asm +404 -0
  170. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdcolext-sse2.asm +458 -0
  171. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdcolor-avx2.asm +118 -0
  172. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdcolor-mmx.asm +117 -0
  173. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdcolor-sse2.asm +117 -0
  174. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdmerge-avx2.asm +136 -0
  175. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdmerge-mmx.asm +123 -0
  176. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdmerge-sse2.asm +135 -0
  177. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdmrgext-avx2.asm +575 -0
  178. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdmrgext-mmx.asm +460 -0
  179. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdmrgext-sse2.asm +517 -0
  180. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdsample-avx2.asm +760 -0
  181. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdsample-mmx.asm +731 -0
  182. data/ext/image_pack/vendor/mozjpeg/simd/i386/jdsample-sse2.asm +724 -0
  183. data/ext/image_pack/vendor/mozjpeg/simd/i386/jfdctflt-3dn.asm +318 -0
  184. data/ext/image_pack/vendor/mozjpeg/simd/i386/jfdctflt-sse.asm +369 -0
  185. data/ext/image_pack/vendor/mozjpeg/simd/i386/jfdctfst-mmx.asm +395 -0
  186. data/ext/image_pack/vendor/mozjpeg/simd/i386/jfdctfst-sse2.asm +403 -0
  187. data/ext/image_pack/vendor/mozjpeg/simd/i386/jfdctint-avx2.asm +331 -0
  188. data/ext/image_pack/vendor/mozjpeg/simd/i386/jfdctint-mmx.asm +620 -0
  189. data/ext/image_pack/vendor/mozjpeg/simd/i386/jfdctint-sse2.asm +633 -0
  190. data/ext/image_pack/vendor/mozjpeg/simd/i386/jidctflt-3dn.asm +451 -0
  191. data/ext/image_pack/vendor/mozjpeg/simd/i386/jidctflt-sse.asm +571 -0
  192. data/ext/image_pack/vendor/mozjpeg/simd/i386/jidctflt-sse2.asm +497 -0
  193. data/ext/image_pack/vendor/mozjpeg/simd/i386/jidctfst-mmx.asm +499 -0
  194. data/ext/image_pack/vendor/mozjpeg/simd/i386/jidctfst-sse2.asm +501 -0
  195. data/ext/image_pack/vendor/mozjpeg/simd/i386/jidctint-avx2.asm +453 -0
  196. data/ext/image_pack/vendor/mozjpeg/simd/i386/jidctint-mmx.asm +851 -0
  197. data/ext/image_pack/vendor/mozjpeg/simd/i386/jidctint-sse2.asm +858 -0
  198. data/ext/image_pack/vendor/mozjpeg/simd/i386/jidctred-mmx.asm +704 -0
  199. data/ext/image_pack/vendor/mozjpeg/simd/i386/jidctred-sse2.asm +592 -0
  200. data/ext/image_pack/vendor/mozjpeg/simd/i386/jquant-3dn.asm +230 -0
  201. data/ext/image_pack/vendor/mozjpeg/simd/i386/jquant-mmx.asm +276 -0
  202. data/ext/image_pack/vendor/mozjpeg/simd/i386/jquant-sse.asm +208 -0
  203. data/ext/image_pack/vendor/mozjpeg/simd/i386/jquantf-sse2.asm +168 -0
  204. data/ext/image_pack/vendor/mozjpeg/simd/i386/jquanti-avx2.asm +188 -0
  205. data/ext/image_pack/vendor/mozjpeg/simd/i386/jquanti-sse2.asm +201 -0
  206. data/ext/image_pack/vendor/mozjpeg/simd/i386/jsimd.c +1312 -0
  207. data/ext/image_pack/vendor/mozjpeg/simd/i386/jsimdcpu.asm +135 -0
  208. data/ext/image_pack/vendor/mozjpeg/simd/jsimd.h +1258 -0
  209. data/ext/image_pack/vendor/mozjpeg/simd/mips/jsimd.c +1143 -0
  210. data/ext/image_pack/vendor/mozjpeg/simd/mips/jsimd_dspr2.S +4543 -0
  211. data/ext/image_pack/vendor/mozjpeg/simd/mips/jsimd_dspr2_asm.h +292 -0
  212. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jccolext-mmi.c +455 -0
  213. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jccolor-mmi.c +148 -0
  214. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jcgray-mmi.c +132 -0
  215. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jcgryext-mmi.c +374 -0
  216. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jcsample-mmi.c +98 -0
  217. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jcsample.h +28 -0
  218. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jdcolext-mmi.c +415 -0
  219. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jdcolor-mmi.c +139 -0
  220. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jdmerge-mmi.c +149 -0
  221. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jdmrgext-mmi.c +615 -0
  222. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jdsample-mmi.c +304 -0
  223. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jfdctfst-mmi.c +255 -0
  224. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jfdctint-mmi.c +398 -0
  225. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jidctfst-mmi.c +395 -0
  226. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jidctint-mmi.c +571 -0
  227. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jquanti-mmi.c +124 -0
  228. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jsimd.c +866 -0
  229. data/ext/image_pack/vendor/mozjpeg/simd/mips64/jsimd_mmi.h +69 -0
  230. data/ext/image_pack/vendor/mozjpeg/simd/mips64/loongson-mmintrin.h +1334 -0
  231. data/ext/image_pack/vendor/mozjpeg/simd/nasm/jcolsamp.inc +135 -0
  232. data/ext/image_pack/vendor/mozjpeg/simd/nasm/jdct.inc +31 -0
  233. data/ext/image_pack/vendor/mozjpeg/simd/nasm/jsimdcfg.inc +93 -0
  234. data/ext/image_pack/vendor/mozjpeg/simd/nasm/jsimdcfg.inc.h +133 -0
  235. data/ext/image_pack/vendor/mozjpeg/simd/nasm/jsimdext.inc +520 -0
  236. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jccolext-altivec.c +269 -0
  237. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jccolor-altivec.c +116 -0
  238. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jcgray-altivec.c +111 -0
  239. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jcgryext-altivec.c +228 -0
  240. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jcsample-altivec.c +159 -0
  241. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jcsample.h +28 -0
  242. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jdcolext-altivec.c +276 -0
  243. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jdcolor-altivec.c +106 -0
  244. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jdmerge-altivec.c +130 -0
  245. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jdmrgext-altivec.c +329 -0
  246. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jdsample-altivec.c +400 -0
  247. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jfdctfst-altivec.c +154 -0
  248. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jfdctint-altivec.c +258 -0
  249. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jidctfst-altivec.c +255 -0
  250. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jidctint-altivec.c +357 -0
  251. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jquanti-altivec.c +250 -0
  252. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jsimd.c +884 -0
  253. data/ext/image_pack/vendor/mozjpeg/simd/powerpc/jsimd_altivec.h +98 -0
  254. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jccolext-avx2.asm +559 -0
  255. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jccolext-sse2.asm +484 -0
  256. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jccolor-avx2.asm +121 -0
  257. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jccolor-sse2.asm +120 -0
  258. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jcgray-avx2.asm +113 -0
  259. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jcgray-sse2.asm +112 -0
  260. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jcgryext-avx2.asm +438 -0
  261. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jcgryext-sse2.asm +363 -0
  262. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jchuff-sse2.asm +583 -0
  263. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jcphuff-sse2.asm +639 -0
  264. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jcsample-avx2.asm +367 -0
  265. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jcsample-sse2.asm +330 -0
  266. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jdcolext-avx2.asm +496 -0
  267. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jdcolext-sse2.asm +439 -0
  268. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jdcolor-avx2.asm +118 -0
  269. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jdcolor-sse2.asm +117 -0
  270. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jdmerge-avx2.asm +136 -0
  271. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jdmerge-sse2.asm +135 -0
  272. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jdmrgext-avx2.asm +596 -0
  273. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jdmrgext-sse2.asm +538 -0
  274. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jdsample-avx2.asm +696 -0
  275. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jdsample-sse2.asm +665 -0
  276. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jfdctflt-sse.asm +355 -0
  277. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jfdctfst-sse2.asm +389 -0
  278. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jfdctint-avx2.asm +320 -0
  279. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jfdctint-sse2.asm +619 -0
  280. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jidctflt-sse2.asm +482 -0
  281. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jidctfst-sse2.asm +491 -0
  282. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jidctint-avx2.asm +418 -0
  283. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jidctint-sse2.asm +847 -0
  284. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jidctred-sse2.asm +574 -0
  285. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jquantf-sse2.asm +155 -0
  286. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jquanti-avx2.asm +163 -0
  287. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jquanti-sse2.asm +188 -0
  288. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jsimd.c +1110 -0
  289. data/ext/image_pack/vendor/mozjpeg/simd/x86_64/jsimdcpu.asm +86 -0
  290. data/ext/image_pack/vendor/mozjpeg/strtest.c +170 -0
  291. data/ext/image_pack/vendor/mozjpeg/structure.txt +900 -0
  292. data/ext/image_pack/vendor/mozjpeg/tjbench.c +1044 -0
  293. data/ext/image_pack/vendor/mozjpeg/tjbenchtest.in +256 -0
  294. data/ext/image_pack/vendor/mozjpeg/tjbenchtest.java.in +215 -0
  295. data/ext/image_pack/vendor/mozjpeg/tjexample.c +406 -0
  296. data/ext/image_pack/vendor/mozjpeg/tjexampletest.in +149 -0
  297. data/ext/image_pack/vendor/mozjpeg/tjexampletest.java.in +151 -0
  298. data/ext/image_pack/vendor/mozjpeg/tjunittest.c +961 -0
  299. data/ext/image_pack/vendor/mozjpeg/tjutil.c +70 -0
  300. data/ext/image_pack/vendor/mozjpeg/tjutil.h +53 -0
  301. data/ext/image_pack/vendor/mozjpeg/transupp.c +2373 -0
  302. data/ext/image_pack/vendor/mozjpeg/transupp.h +243 -0
  303. data/ext/image_pack/vendor/mozjpeg/turbojpeg-jni.c +1259 -0
  304. data/ext/image_pack/vendor/mozjpeg/turbojpeg.c +2320 -0
  305. data/ext/image_pack/vendor/mozjpeg/turbojpeg.h +1784 -0
  306. data/ext/image_pack/vendor/mozjpeg/usage.txt +679 -0
  307. data/ext/image_pack/vendor/mozjpeg/wizard.txt +220 -0
  308. data/ext/image_pack/vendor/mozjpeg/wrbmp.c +552 -0
  309. data/ext/image_pack/vendor/mozjpeg/wrgif.c +580 -0
  310. data/ext/image_pack/vendor/mozjpeg/wrjpgcom.c +577 -0
  311. data/ext/image_pack/vendor/mozjpeg/wrppm.c +366 -0
  312. data/ext/image_pack/vendor/mozjpeg/wrtarga.c +258 -0
  313. data/ext/image_pack/vendor/mozjpeg/yuvjpeg.c +268 -0
  314. data/lib/image_pack/backend.rb +8 -0
  315. data/lib/image_pack/configuration.rb +23 -0
  316. data/lib/image_pack/errors.rb +13 -0
  317. data/lib/image_pack/version.rb +5 -0
  318. data/lib/image_pack.rb +208 -0
  319. metadata +433 -0
@@ -0,0 +1,801 @@
1
+ /*
2
+ * jidctint-neon.c - accurate integer IDCT (Arm Neon)
3
+ *
4
+ * Copyright (C) 2020, Arm Limited. All Rights Reserved.
5
+ * Copyright (C) 2020, D. R. Commander. All Rights Reserved.
6
+ *
7
+ * This software is provided 'as-is', without any express or implied
8
+ * warranty. In no event will the authors be held liable for any damages
9
+ * arising from the use of this software.
10
+ *
11
+ * Permission is granted to anyone to use this software for any purpose,
12
+ * including commercial applications, and to alter it and redistribute it
13
+ * freely, subject to the following restrictions:
14
+ *
15
+ * 1. The origin of this software must not be misrepresented; you must not
16
+ * claim that you wrote the original software. If you use this software
17
+ * in a product, an acknowledgment in the product documentation would be
18
+ * appreciated but is not required.
19
+ * 2. Altered source versions must be plainly marked as such, and must not be
20
+ * misrepresented as being the original software.
21
+ * 3. This notice may not be removed or altered from any source distribution.
22
+ */
23
+
24
+ #define JPEG_INTERNALS
25
+ #include "../../jinclude.h"
26
+ #include "../../jpeglib.h"
27
+ #include "../../jsimd.h"
28
+ #include "../../jdct.h"
29
+ #include "../../jsimddct.h"
30
+ #include "../jsimd.h"
31
+ #include "align.h"
32
+ #include "neon-compat.h"
33
+
34
+ #include <arm_neon.h>
35
+
36
+
37
+ #define CONST_BITS 13
38
+ #define PASS1_BITS 2
39
+
40
+ #define DESCALE_P1 (CONST_BITS - PASS1_BITS)
41
+ #define DESCALE_P2 (CONST_BITS + PASS1_BITS + 3)
42
+
43
+ /* The computation of the inverse DCT requires the use of constants known at
44
+ * compile time. Scaled integer constants are used to avoid floating-point
45
+ * arithmetic:
46
+ * 0.298631336 = 2446 * 2^-13
47
+ * 0.390180644 = 3196 * 2^-13
48
+ * 0.541196100 = 4433 * 2^-13
49
+ * 0.765366865 = 6270 * 2^-13
50
+ * 0.899976223 = 7373 * 2^-13
51
+ * 1.175875602 = 9633 * 2^-13
52
+ * 1.501321110 = 12299 * 2^-13
53
+ * 1.847759065 = 15137 * 2^-13
54
+ * 1.961570560 = 16069 * 2^-13
55
+ * 2.053119869 = 16819 * 2^-13
56
+ * 2.562915447 = 20995 * 2^-13
57
+ * 3.072711026 = 25172 * 2^-13
58
+ */
59
+
60
+ #define F_0_298 2446
61
+ #define F_0_390 3196
62
+ #define F_0_541 4433
63
+ #define F_0_765 6270
64
+ #define F_0_899 7373
65
+ #define F_1_175 9633
66
+ #define F_1_501 12299
67
+ #define F_1_847 15137
68
+ #define F_1_961 16069
69
+ #define F_2_053 16819
70
+ #define F_2_562 20995
71
+ #define F_3_072 25172
72
+
73
+ #define F_1_175_MINUS_1_961 (F_1_175 - F_1_961)
74
+ #define F_1_175_MINUS_0_390 (F_1_175 - F_0_390)
75
+ #define F_0_541_MINUS_1_847 (F_0_541 - F_1_847)
76
+ #define F_3_072_MINUS_2_562 (F_3_072 - F_2_562)
77
+ #define F_0_298_MINUS_0_899 (F_0_298 - F_0_899)
78
+ #define F_1_501_MINUS_0_899 (F_1_501 - F_0_899)
79
+ #define F_2_053_MINUS_2_562 (F_2_053 - F_2_562)
80
+ #define F_0_541_PLUS_0_765 (F_0_541 + F_0_765)
81
+
82
+
83
+ ALIGN(16) static const int16_t jsimd_idct_islow_neon_consts[] = {
84
+ F_0_899, F_0_541,
85
+ F_2_562, F_0_298_MINUS_0_899,
86
+ F_1_501_MINUS_0_899, F_2_053_MINUS_2_562,
87
+ F_0_541_PLUS_0_765, F_1_175,
88
+ F_1_175_MINUS_0_390, F_0_541_MINUS_1_847,
89
+ F_3_072_MINUS_2_562, F_1_175_MINUS_1_961,
90
+ 0, 0, 0, 0
91
+ };
92
+
93
+
94
+ /* Forward declaration of regular and sparse IDCT helper functions */
95
+
96
+ static INLINE void jsimd_idct_islow_pass1_regular(int16x4_t row0,
97
+ int16x4_t row1,
98
+ int16x4_t row2,
99
+ int16x4_t row3,
100
+ int16x4_t row4,
101
+ int16x4_t row5,
102
+ int16x4_t row6,
103
+ int16x4_t row7,
104
+ int16x4_t quant_row0,
105
+ int16x4_t quant_row1,
106
+ int16x4_t quant_row2,
107
+ int16x4_t quant_row3,
108
+ int16x4_t quant_row4,
109
+ int16x4_t quant_row5,
110
+ int16x4_t quant_row6,
111
+ int16x4_t quant_row7,
112
+ int16_t *workspace_1,
113
+ int16_t *workspace_2);
114
+
115
+ static INLINE void jsimd_idct_islow_pass1_sparse(int16x4_t row0,
116
+ int16x4_t row1,
117
+ int16x4_t row2,
118
+ int16x4_t row3,
119
+ int16x4_t quant_row0,
120
+ int16x4_t quant_row1,
121
+ int16x4_t quant_row2,
122
+ int16x4_t quant_row3,
123
+ int16_t *workspace_1,
124
+ int16_t *workspace_2);
125
+
126
+ static INLINE void jsimd_idct_islow_pass2_regular(int16_t *workspace,
127
+ JSAMPARRAY output_buf,
128
+ JDIMENSION output_col,
129
+ unsigned buf_offset);
130
+
131
+ static INLINE void jsimd_idct_islow_pass2_sparse(int16_t *workspace,
132
+ JSAMPARRAY output_buf,
133
+ JDIMENSION output_col,
134
+ unsigned buf_offset);
135
+
136
+
137
+ /* Perform dequantization and inverse DCT on one block of coefficients. For
138
+ * reference, the C implementation (jpeg_idct_slow()) can be found in
139
+ * jidctint.c.
140
+ *
141
+ * Optimization techniques used for fast data access:
142
+ *
143
+ * In each pass, the inverse DCT is computed for the left and right 4x8 halves
144
+ * of the DCT block. This avoids spilling due to register pressure, and the
145
+ * increased granularity allows for an optimized calculation depending on the
146
+ * values of the DCT coefficients. Between passes, intermediate data is stored
147
+ * in 4x8 workspace buffers.
148
+ *
149
+ * Transposing the 8x8 DCT block after each pass can be achieved by transposing
150
+ * each of the four 4x4 quadrants and swapping quadrants 1 and 2 (refer to the
151
+ * diagram below.) Swapping quadrants is cheap, since the second pass can just
152
+ * swap the workspace buffer pointers.
153
+ *
154
+ * +-------+-------+ +-------+-------+
155
+ * | | | | | |
156
+ * | 0 | 1 | | 0 | 2 |
157
+ * | | | transpose | | |
158
+ * +-------+-------+ ------> +-------+-------+
159
+ * | | | | | |
160
+ * | 2 | 3 | | 1 | 3 |
161
+ * | | | | | |
162
+ * +-------+-------+ +-------+-------+
163
+ *
164
+ * Optimization techniques used to accelerate the inverse DCT calculation:
165
+ *
166
+ * In a DCT coefficient block, the coefficients are increasingly likely to be 0
167
+ * as you move diagonally from top left to bottom right. If whole rows of
168
+ * coefficients are 0, then the inverse DCT calculation can be simplified. On
169
+ * the first pass of the inverse DCT, we test for three special cases before
170
+ * defaulting to a full "regular" inverse DCT:
171
+ *
172
+ * 1) Coefficients in rows 4-7 are all zero. In this case, we perform a
173
+ * "sparse" simplified inverse DCT on rows 0-3.
174
+ * 2) AC coefficients (rows 1-7) are all zero. In this case, the inverse DCT
175
+ * result is equal to the dequantized DC coefficients.
176
+ * 3) AC and DC coefficients are all zero. In this case, the inverse DCT
177
+ * result is all zero. For the left 4x8 half, this is handled identically
178
+ * to Case 2 above. For the right 4x8 half, we do no work and signal that
179
+ * the "sparse" algorithm is required for the second pass.
180
+ *
181
+ * In the second pass, only a single special case is tested: whether the AC and
182
+ * DC coefficients were all zero in the right 4x8 block during the first pass
183
+ * (refer to Case 3 above.) If this is the case, then a "sparse" variant of
184
+ * the second pass is performed for both the left and right halves of the DCT
185
+ * block. (The transposition after the first pass means that the right 4x8
186
+ * block during the first pass becomes rows 4-7 during the second pass.)
187
+ */
188
+
189
+ void jsimd_idct_islow_neon(void *dct_table, JCOEFPTR coef_block,
190
+ JSAMPARRAY output_buf, JDIMENSION output_col)
191
+ {
192
+ ISLOW_MULT_TYPE *quantptr = dct_table;
193
+
194
+ int16_t workspace_l[8 * DCTSIZE / 2];
195
+ int16_t workspace_r[8 * DCTSIZE / 2];
196
+
197
+ /* Compute IDCT first pass on left 4x8 coefficient block. */
198
+
199
+ /* Load DCT coefficients in left 4x8 block. */
200
+ int16x4_t row0 = vld1_s16(coef_block + 0 * DCTSIZE);
201
+ int16x4_t row1 = vld1_s16(coef_block + 1 * DCTSIZE);
202
+ int16x4_t row2 = vld1_s16(coef_block + 2 * DCTSIZE);
203
+ int16x4_t row3 = vld1_s16(coef_block + 3 * DCTSIZE);
204
+ int16x4_t row4 = vld1_s16(coef_block + 4 * DCTSIZE);
205
+ int16x4_t row5 = vld1_s16(coef_block + 5 * DCTSIZE);
206
+ int16x4_t row6 = vld1_s16(coef_block + 6 * DCTSIZE);
207
+ int16x4_t row7 = vld1_s16(coef_block + 7 * DCTSIZE);
208
+
209
+ /* Load quantization table for left 4x8 block. */
210
+ int16x4_t quant_row0 = vld1_s16(quantptr + 0 * DCTSIZE);
211
+ int16x4_t quant_row1 = vld1_s16(quantptr + 1 * DCTSIZE);
212
+ int16x4_t quant_row2 = vld1_s16(quantptr + 2 * DCTSIZE);
213
+ int16x4_t quant_row3 = vld1_s16(quantptr + 3 * DCTSIZE);
214
+ int16x4_t quant_row4 = vld1_s16(quantptr + 4 * DCTSIZE);
215
+ int16x4_t quant_row5 = vld1_s16(quantptr + 5 * DCTSIZE);
216
+ int16x4_t quant_row6 = vld1_s16(quantptr + 6 * DCTSIZE);
217
+ int16x4_t quant_row7 = vld1_s16(quantptr + 7 * DCTSIZE);
218
+
219
+ /* Construct bitmap to test if DCT coefficients in left 4x8 block are 0. */
220
+ int16x4_t bitmap = vorr_s16(row7, row6);
221
+ bitmap = vorr_s16(bitmap, row5);
222
+ bitmap = vorr_s16(bitmap, row4);
223
+ int64_t bitmap_rows_4567 = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
224
+
225
+ if (bitmap_rows_4567 == 0) {
226
+ bitmap = vorr_s16(bitmap, row3);
227
+ bitmap = vorr_s16(bitmap, row2);
228
+ bitmap = vorr_s16(bitmap, row1);
229
+ int64_t left_ac_bitmap = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
230
+
231
+ if (left_ac_bitmap == 0) {
232
+ int16x4_t dcval = vshl_n_s16(vmul_s16(row0, quant_row0), PASS1_BITS);
233
+ int16x4x4_t quadrant = { { dcval, dcval, dcval, dcval } };
234
+ /* Store 4x4 blocks to workspace, transposing in the process. */
235
+ vst4_s16(workspace_l, quadrant);
236
+ vst4_s16(workspace_r, quadrant);
237
+ } else {
238
+ jsimd_idct_islow_pass1_sparse(row0, row1, row2, row3, quant_row0,
239
+ quant_row1, quant_row2, quant_row3,
240
+ workspace_l, workspace_r);
241
+ }
242
+ } else {
243
+ jsimd_idct_islow_pass1_regular(row0, row1, row2, row3, row4, row5,
244
+ row6, row7, quant_row0, quant_row1,
245
+ quant_row2, quant_row3, quant_row4,
246
+ quant_row5, quant_row6, quant_row7,
247
+ workspace_l, workspace_r);
248
+ }
249
+
250
+ /* Compute IDCT first pass on right 4x8 coefficient block. */
251
+
252
+ /* Load DCT coefficients in right 4x8 block. */
253
+ row0 = vld1_s16(coef_block + 0 * DCTSIZE + 4);
254
+ row1 = vld1_s16(coef_block + 1 * DCTSIZE + 4);
255
+ row2 = vld1_s16(coef_block + 2 * DCTSIZE + 4);
256
+ row3 = vld1_s16(coef_block + 3 * DCTSIZE + 4);
257
+ row4 = vld1_s16(coef_block + 4 * DCTSIZE + 4);
258
+ row5 = vld1_s16(coef_block + 5 * DCTSIZE + 4);
259
+ row6 = vld1_s16(coef_block + 6 * DCTSIZE + 4);
260
+ row7 = vld1_s16(coef_block + 7 * DCTSIZE + 4);
261
+
262
+ /* Load quantization table for right 4x8 block. */
263
+ quant_row0 = vld1_s16(quantptr + 0 * DCTSIZE + 4);
264
+ quant_row1 = vld1_s16(quantptr + 1 * DCTSIZE + 4);
265
+ quant_row2 = vld1_s16(quantptr + 2 * DCTSIZE + 4);
266
+ quant_row3 = vld1_s16(quantptr + 3 * DCTSIZE + 4);
267
+ quant_row4 = vld1_s16(quantptr + 4 * DCTSIZE + 4);
268
+ quant_row5 = vld1_s16(quantptr + 5 * DCTSIZE + 4);
269
+ quant_row6 = vld1_s16(quantptr + 6 * DCTSIZE + 4);
270
+ quant_row7 = vld1_s16(quantptr + 7 * DCTSIZE + 4);
271
+
272
+ /* Construct bitmap to test if DCT coefficients in right 4x8 block are 0. */
273
+ bitmap = vorr_s16(row7, row6);
274
+ bitmap = vorr_s16(bitmap, row5);
275
+ bitmap = vorr_s16(bitmap, row4);
276
+ bitmap_rows_4567 = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
277
+ bitmap = vorr_s16(bitmap, row3);
278
+ bitmap = vorr_s16(bitmap, row2);
279
+ bitmap = vorr_s16(bitmap, row1);
280
+ int64_t right_ac_bitmap = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
281
+
282
+ /* If this remains non-zero, a "regular" second pass will be performed. */
283
+ int64_t right_ac_dc_bitmap = 1;
284
+
285
+ if (right_ac_bitmap == 0) {
286
+ bitmap = vorr_s16(bitmap, row0);
287
+ right_ac_dc_bitmap = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
288
+
289
+ if (right_ac_dc_bitmap != 0) {
290
+ int16x4_t dcval = vshl_n_s16(vmul_s16(row0, quant_row0), PASS1_BITS);
291
+ int16x4x4_t quadrant = { { dcval, dcval, dcval, dcval } };
292
+ /* Store 4x4 blocks to workspace, transposing in the process. */
293
+ vst4_s16(workspace_l + 4 * DCTSIZE / 2, quadrant);
294
+ vst4_s16(workspace_r + 4 * DCTSIZE / 2, quadrant);
295
+ }
296
+ } else {
297
+ if (bitmap_rows_4567 == 0) {
298
+ jsimd_idct_islow_pass1_sparse(row0, row1, row2, row3, quant_row0,
299
+ quant_row1, quant_row2, quant_row3,
300
+ workspace_l + 4 * DCTSIZE / 2,
301
+ workspace_r + 4 * DCTSIZE / 2);
302
+ } else {
303
+ jsimd_idct_islow_pass1_regular(row0, row1, row2, row3, row4, row5,
304
+ row6, row7, quant_row0, quant_row1,
305
+ quant_row2, quant_row3, quant_row4,
306
+ quant_row5, quant_row6, quant_row7,
307
+ workspace_l + 4 * DCTSIZE / 2,
308
+ workspace_r + 4 * DCTSIZE / 2);
309
+ }
310
+ }
311
+
312
+ /* Second pass: compute IDCT on rows in workspace. */
313
+
314
+ /* If all coefficients in right 4x8 block are 0, use "sparse" second pass. */
315
+ if (right_ac_dc_bitmap == 0) {
316
+ jsimd_idct_islow_pass2_sparse(workspace_l, output_buf, output_col, 0);
317
+ jsimd_idct_islow_pass2_sparse(workspace_r, output_buf, output_col, 4);
318
+ } else {
319
+ jsimd_idct_islow_pass2_regular(workspace_l, output_buf, output_col, 0);
320
+ jsimd_idct_islow_pass2_regular(workspace_r, output_buf, output_col, 4);
321
+ }
322
+ }
323
+
324
+
325
+ /* Perform dequantization and the first pass of the accurate inverse DCT on a
326
+ * 4x8 block of coefficients. (To process the full 8x8 DCT block, this
327
+ * function-- or some other optimized variant-- needs to be called for both the
328
+ * left and right 4x8 blocks.)
329
+ *
330
+ * This "regular" version assumes that no optimization can be made to the IDCT
331
+ * calculation, since no useful set of AC coefficients is all 0.
332
+ *
333
+ * The original C implementation of the accurate IDCT (jpeg_idct_slow()) can be
334
+ * found in jidctint.c. Algorithmic changes made here are documented inline.
335
+ */
336
+
337
+ static INLINE void jsimd_idct_islow_pass1_regular(int16x4_t row0,
338
+ int16x4_t row1,
339
+ int16x4_t row2,
340
+ int16x4_t row3,
341
+ int16x4_t row4,
342
+ int16x4_t row5,
343
+ int16x4_t row6,
344
+ int16x4_t row7,
345
+ int16x4_t quant_row0,
346
+ int16x4_t quant_row1,
347
+ int16x4_t quant_row2,
348
+ int16x4_t quant_row3,
349
+ int16x4_t quant_row4,
350
+ int16x4_t quant_row5,
351
+ int16x4_t quant_row6,
352
+ int16x4_t quant_row7,
353
+ int16_t *workspace_1,
354
+ int16_t *workspace_2)
355
+ {
356
+ /* Load constants for IDCT computation. */
357
+ #ifdef HAVE_VLD1_S16_X3
358
+ const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_islow_neon_consts);
359
+ #else
360
+ const int16x4_t consts1 = vld1_s16(jsimd_idct_islow_neon_consts);
361
+ const int16x4_t consts2 = vld1_s16(jsimd_idct_islow_neon_consts + 4);
362
+ const int16x4_t consts3 = vld1_s16(jsimd_idct_islow_neon_consts + 8);
363
+ const int16x4x3_t consts = { { consts1, consts2, consts3 } };
364
+ #endif
365
+
366
+ /* Even part */
367
+ int16x4_t z2_s16 = vmul_s16(row2, quant_row2);
368
+ int16x4_t z3_s16 = vmul_s16(row6, quant_row6);
369
+
370
+ int32x4_t tmp2 = vmull_lane_s16(z2_s16, consts.val[0], 1);
371
+ int32x4_t tmp3 = vmull_lane_s16(z2_s16, consts.val[1], 2);
372
+ tmp2 = vmlal_lane_s16(tmp2, z3_s16, consts.val[2], 1);
373
+ tmp3 = vmlal_lane_s16(tmp3, z3_s16, consts.val[0], 1);
374
+
375
+ z2_s16 = vmul_s16(row0, quant_row0);
376
+ z3_s16 = vmul_s16(row4, quant_row4);
377
+
378
+ int32x4_t tmp0 = vshll_n_s16(vadd_s16(z2_s16, z3_s16), CONST_BITS);
379
+ int32x4_t tmp1 = vshll_n_s16(vsub_s16(z2_s16, z3_s16), CONST_BITS);
380
+
381
+ int32x4_t tmp10 = vaddq_s32(tmp0, tmp3);
382
+ int32x4_t tmp13 = vsubq_s32(tmp0, tmp3);
383
+ int32x4_t tmp11 = vaddq_s32(tmp1, tmp2);
384
+ int32x4_t tmp12 = vsubq_s32(tmp1, tmp2);
385
+
386
+ /* Odd part */
387
+ int16x4_t tmp0_s16 = vmul_s16(row7, quant_row7);
388
+ int16x4_t tmp1_s16 = vmul_s16(row5, quant_row5);
389
+ int16x4_t tmp2_s16 = vmul_s16(row3, quant_row3);
390
+ int16x4_t tmp3_s16 = vmul_s16(row1, quant_row1);
391
+
392
+ z3_s16 = vadd_s16(tmp0_s16, tmp2_s16);
393
+ int16x4_t z4_s16 = vadd_s16(tmp1_s16, tmp3_s16);
394
+
395
+ /* Implementation as per jpeg_idct_islow() in jidctint.c:
396
+ * z5 = (z3 + z4) * 1.175875602;
397
+ * z3 = z3 * -1.961570560; z4 = z4 * -0.390180644;
398
+ * z3 += z5; z4 += z5;
399
+ *
400
+ * This implementation:
401
+ * z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602;
402
+ * z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644);
403
+ */
404
+
405
+ int32x4_t z3 = vmull_lane_s16(z3_s16, consts.val[2], 3);
406
+ int32x4_t z4 = vmull_lane_s16(z3_s16, consts.val[1], 3);
407
+ z3 = vmlal_lane_s16(z3, z4_s16, consts.val[1], 3);
408
+ z4 = vmlal_lane_s16(z4, z4_s16, consts.val[2], 0);
409
+
410
+ /* Implementation as per jpeg_idct_islow() in jidctint.c:
411
+ * z1 = tmp0 + tmp3; z2 = tmp1 + tmp2;
412
+ * tmp0 = tmp0 * 0.298631336; tmp1 = tmp1 * 2.053119869;
413
+ * tmp2 = tmp2 * 3.072711026; tmp3 = tmp3 * 1.501321110;
414
+ * z1 = z1 * -0.899976223; z2 = z2 * -2.562915447;
415
+ * tmp0 += z1 + z3; tmp1 += z2 + z4;
416
+ * tmp2 += z2 + z3; tmp3 += z1 + z4;
417
+ *
418
+ * This implementation:
419
+ * tmp0 = tmp0 * (0.298631336 - 0.899976223) + tmp3 * -0.899976223;
420
+ * tmp1 = tmp1 * (2.053119869 - 2.562915447) + tmp2 * -2.562915447;
421
+ * tmp2 = tmp1 * -2.562915447 + tmp2 * (3.072711026 - 2.562915447);
422
+ * tmp3 = tmp0 * -0.899976223 + tmp3 * (1.501321110 - 0.899976223);
423
+ * tmp0 += z3; tmp1 += z4;
424
+ * tmp2 += z3; tmp3 += z4;
425
+ */
426
+
427
+ tmp0 = vmull_lane_s16(tmp0_s16, consts.val[0], 3);
428
+ tmp1 = vmull_lane_s16(tmp1_s16, consts.val[1], 1);
429
+ tmp2 = vmull_lane_s16(tmp2_s16, consts.val[2], 2);
430
+ tmp3 = vmull_lane_s16(tmp3_s16, consts.val[1], 0);
431
+
432
+ tmp0 = vmlsl_lane_s16(tmp0, tmp3_s16, consts.val[0], 0);
433
+ tmp1 = vmlsl_lane_s16(tmp1, tmp2_s16, consts.val[0], 2);
434
+ tmp2 = vmlsl_lane_s16(tmp2, tmp1_s16, consts.val[0], 2);
435
+ tmp3 = vmlsl_lane_s16(tmp3, tmp0_s16, consts.val[0], 0);
436
+
437
+ tmp0 = vaddq_s32(tmp0, z3);
438
+ tmp1 = vaddq_s32(tmp1, z4);
439
+ tmp2 = vaddq_s32(tmp2, z3);
440
+ tmp3 = vaddq_s32(tmp3, z4);
441
+
442
+ /* Final output stage: descale and narrow to 16-bit. */
443
+ int16x4x4_t rows_0123 = { {
444
+ vrshrn_n_s32(vaddq_s32(tmp10, tmp3), DESCALE_P1),
445
+ vrshrn_n_s32(vaddq_s32(tmp11, tmp2), DESCALE_P1),
446
+ vrshrn_n_s32(vaddq_s32(tmp12, tmp1), DESCALE_P1),
447
+ vrshrn_n_s32(vaddq_s32(tmp13, tmp0), DESCALE_P1)
448
+ } };
449
+ int16x4x4_t rows_4567 = { {
450
+ vrshrn_n_s32(vsubq_s32(tmp13, tmp0), DESCALE_P1),
451
+ vrshrn_n_s32(vsubq_s32(tmp12, tmp1), DESCALE_P1),
452
+ vrshrn_n_s32(vsubq_s32(tmp11, tmp2), DESCALE_P1),
453
+ vrshrn_n_s32(vsubq_s32(tmp10, tmp3), DESCALE_P1)
454
+ } };
455
+
456
+ /* Store 4x4 blocks to the intermediate workspace, ready for the second pass.
457
+ * (VST4 transposes the blocks. We need to operate on rows in the next
458
+ * pass.)
459
+ */
460
+ vst4_s16(workspace_1, rows_0123);
461
+ vst4_s16(workspace_2, rows_4567);
462
+ }
463
+
464
+
465
+ /* Perform dequantization and the first pass of the accurate inverse DCT on a
466
+ * 4x8 block of coefficients.
467
+ *
468
+ * This "sparse" version assumes that the AC coefficients in rows 4-7 are all
469
+ * 0. This simplifies the IDCT calculation, accelerating overall performance.
470
+ */
471
+
472
+ static INLINE void jsimd_idct_islow_pass1_sparse(int16x4_t row0,
473
+ int16x4_t row1,
474
+ int16x4_t row2,
475
+ int16x4_t row3,
476
+ int16x4_t quant_row0,
477
+ int16x4_t quant_row1,
478
+ int16x4_t quant_row2,
479
+ int16x4_t quant_row3,
480
+ int16_t *workspace_1,
481
+ int16_t *workspace_2)
482
+ {
483
+ /* Load constants for IDCT computation. */
484
+ #ifdef HAVE_VLD1_S16_X3
485
+ const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_islow_neon_consts);
486
+ #else
487
+ const int16x4_t consts1 = vld1_s16(jsimd_idct_islow_neon_consts);
488
+ const int16x4_t consts2 = vld1_s16(jsimd_idct_islow_neon_consts + 4);
489
+ const int16x4_t consts3 = vld1_s16(jsimd_idct_islow_neon_consts + 8);
490
+ const int16x4x3_t consts = { { consts1, consts2, consts3 } };
491
+ #endif
492
+
493
+ /* Even part (z3 is all 0) */
494
+ int16x4_t z2_s16 = vmul_s16(row2, quant_row2);
495
+
496
+ int32x4_t tmp2 = vmull_lane_s16(z2_s16, consts.val[0], 1);
497
+ int32x4_t tmp3 = vmull_lane_s16(z2_s16, consts.val[1], 2);
498
+
499
+ z2_s16 = vmul_s16(row0, quant_row0);
500
+ int32x4_t tmp0 = vshll_n_s16(z2_s16, CONST_BITS);
501
+ int32x4_t tmp1 = vshll_n_s16(z2_s16, CONST_BITS);
502
+
503
+ int32x4_t tmp10 = vaddq_s32(tmp0, tmp3);
504
+ int32x4_t tmp13 = vsubq_s32(tmp0, tmp3);
505
+ int32x4_t tmp11 = vaddq_s32(tmp1, tmp2);
506
+ int32x4_t tmp12 = vsubq_s32(tmp1, tmp2);
507
+
508
+ /* Odd part (tmp0 and tmp1 are both all 0) */
509
+ int16x4_t tmp2_s16 = vmul_s16(row3, quant_row3);
510
+ int16x4_t tmp3_s16 = vmul_s16(row1, quant_row1);
511
+
512
+ int16x4_t z3_s16 = tmp2_s16;
513
+ int16x4_t z4_s16 = tmp3_s16;
514
+
515
+ int32x4_t z3 = vmull_lane_s16(z3_s16, consts.val[2], 3);
516
+ int32x4_t z4 = vmull_lane_s16(z3_s16, consts.val[1], 3);
517
+ z3 = vmlal_lane_s16(z3, z4_s16, consts.val[1], 3);
518
+ z4 = vmlal_lane_s16(z4, z4_s16, consts.val[2], 0);
519
+
520
+ tmp0 = vmlsl_lane_s16(z3, tmp3_s16, consts.val[0], 0);
521
+ tmp1 = vmlsl_lane_s16(z4, tmp2_s16, consts.val[0], 2);
522
+ tmp2 = vmlal_lane_s16(z3, tmp2_s16, consts.val[2], 2);
523
+ tmp3 = vmlal_lane_s16(z4, tmp3_s16, consts.val[1], 0);
524
+
525
+ /* Final output stage: descale and narrow to 16-bit. */
526
+ int16x4x4_t rows_0123 = { {
527
+ vrshrn_n_s32(vaddq_s32(tmp10, tmp3), DESCALE_P1),
528
+ vrshrn_n_s32(vaddq_s32(tmp11, tmp2), DESCALE_P1),
529
+ vrshrn_n_s32(vaddq_s32(tmp12, tmp1), DESCALE_P1),
530
+ vrshrn_n_s32(vaddq_s32(tmp13, tmp0), DESCALE_P1)
531
+ } };
532
+ int16x4x4_t rows_4567 = { {
533
+ vrshrn_n_s32(vsubq_s32(tmp13, tmp0), DESCALE_P1),
534
+ vrshrn_n_s32(vsubq_s32(tmp12, tmp1), DESCALE_P1),
535
+ vrshrn_n_s32(vsubq_s32(tmp11, tmp2), DESCALE_P1),
536
+ vrshrn_n_s32(vsubq_s32(tmp10, tmp3), DESCALE_P1)
537
+ } };
538
+
539
+ /* Store 4x4 blocks to the intermediate workspace, ready for the second pass.
540
+ * (VST4 transposes the blocks. We need to operate on rows in the next
541
+ * pass.)
542
+ */
543
+ vst4_s16(workspace_1, rows_0123);
544
+ vst4_s16(workspace_2, rows_4567);
545
+ }
546
+
547
+
548
+ /* Perform the second pass of the accurate inverse DCT on a 4x8 block of
549
+ * coefficients. (To process the full 8x8 DCT block, this function-- or some
550
+ * other optimized variant-- needs to be called for both the right and left 4x8
551
+ * blocks.)
552
+ *
553
+ * This "regular" version assumes that no optimization can be made to the IDCT
554
+ * calculation, since no useful set of coefficient values are all 0 after the
555
+ * first pass.
556
+ *
557
+ * Again, the original C implementation of the accurate IDCT (jpeg_idct_slow())
558
+ * can be found in jidctint.c. Algorithmic changes made here are documented
559
+ * inline.
560
+ */
561
+
562
+ static INLINE void jsimd_idct_islow_pass2_regular(int16_t *workspace,
563
+ JSAMPARRAY output_buf,
564
+ JDIMENSION output_col,
565
+ unsigned buf_offset)
566
+ {
567
+ /* Load constants for IDCT computation. */
568
+ #ifdef HAVE_VLD1_S16_X3
569
+ const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_islow_neon_consts);
570
+ #else
571
+ const int16x4_t consts1 = vld1_s16(jsimd_idct_islow_neon_consts);
572
+ const int16x4_t consts2 = vld1_s16(jsimd_idct_islow_neon_consts + 4);
573
+ const int16x4_t consts3 = vld1_s16(jsimd_idct_islow_neon_consts + 8);
574
+ const int16x4x3_t consts = { { consts1, consts2, consts3 } };
575
+ #endif
576
+
577
+ /* Even part */
578
+ int16x4_t z2_s16 = vld1_s16(workspace + 2 * DCTSIZE / 2);
579
+ int16x4_t z3_s16 = vld1_s16(workspace + 6 * DCTSIZE / 2);
580
+
581
+ int32x4_t tmp2 = vmull_lane_s16(z2_s16, consts.val[0], 1);
582
+ int32x4_t tmp3 = vmull_lane_s16(z2_s16, consts.val[1], 2);
583
+ tmp2 = vmlal_lane_s16(tmp2, z3_s16, consts.val[2], 1);
584
+ tmp3 = vmlal_lane_s16(tmp3, z3_s16, consts.val[0], 1);
585
+
586
+ z2_s16 = vld1_s16(workspace + 0 * DCTSIZE / 2);
587
+ z3_s16 = vld1_s16(workspace + 4 * DCTSIZE / 2);
588
+
589
+ int32x4_t tmp0 = vshll_n_s16(vadd_s16(z2_s16, z3_s16), CONST_BITS);
590
+ int32x4_t tmp1 = vshll_n_s16(vsub_s16(z2_s16, z3_s16), CONST_BITS);
591
+
592
+ int32x4_t tmp10 = vaddq_s32(tmp0, tmp3);
593
+ int32x4_t tmp13 = vsubq_s32(tmp0, tmp3);
594
+ int32x4_t tmp11 = vaddq_s32(tmp1, tmp2);
595
+ int32x4_t tmp12 = vsubq_s32(tmp1, tmp2);
596
+
597
+ /* Odd part */
598
+ int16x4_t tmp0_s16 = vld1_s16(workspace + 7 * DCTSIZE / 2);
599
+ int16x4_t tmp1_s16 = vld1_s16(workspace + 5 * DCTSIZE / 2);
600
+ int16x4_t tmp2_s16 = vld1_s16(workspace + 3 * DCTSIZE / 2);
601
+ int16x4_t tmp3_s16 = vld1_s16(workspace + 1 * DCTSIZE / 2);
602
+
603
+ z3_s16 = vadd_s16(tmp0_s16, tmp2_s16);
604
+ int16x4_t z4_s16 = vadd_s16(tmp1_s16, tmp3_s16);
605
+
606
+ /* Implementation as per jpeg_idct_islow() in jidctint.c:
607
+ * z5 = (z3 + z4) * 1.175875602;
608
+ * z3 = z3 * -1.961570560; z4 = z4 * -0.390180644;
609
+ * z3 += z5; z4 += z5;
610
+ *
611
+ * This implementation:
612
+ * z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602;
613
+ * z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644);
614
+ */
615
+
616
+ int32x4_t z3 = vmull_lane_s16(z3_s16, consts.val[2], 3);
617
+ int32x4_t z4 = vmull_lane_s16(z3_s16, consts.val[1], 3);
618
+ z3 = vmlal_lane_s16(z3, z4_s16, consts.val[1], 3);
619
+ z4 = vmlal_lane_s16(z4, z4_s16, consts.val[2], 0);
620
+
621
+ /* Implementation as per jpeg_idct_islow() in jidctint.c:
622
+ * z1 = tmp0 + tmp3; z2 = tmp1 + tmp2;
623
+ * tmp0 = tmp0 * 0.298631336; tmp1 = tmp1 * 2.053119869;
624
+ * tmp2 = tmp2 * 3.072711026; tmp3 = tmp3 * 1.501321110;
625
+ * z1 = z1 * -0.899976223; z2 = z2 * -2.562915447;
626
+ * tmp0 += z1 + z3; tmp1 += z2 + z4;
627
+ * tmp2 += z2 + z3; tmp3 += z1 + z4;
628
+ *
629
+ * This implementation:
630
+ * tmp0 = tmp0 * (0.298631336 - 0.899976223) + tmp3 * -0.899976223;
631
+ * tmp1 = tmp1 * (2.053119869 - 2.562915447) + tmp2 * -2.562915447;
632
+ * tmp2 = tmp1 * -2.562915447 + tmp2 * (3.072711026 - 2.562915447);
633
+ * tmp3 = tmp0 * -0.899976223 + tmp3 * (1.501321110 - 0.899976223);
634
+ * tmp0 += z3; tmp1 += z4;
635
+ * tmp2 += z3; tmp3 += z4;
636
+ */
637
+
638
+ tmp0 = vmull_lane_s16(tmp0_s16, consts.val[0], 3);
639
+ tmp1 = vmull_lane_s16(tmp1_s16, consts.val[1], 1);
640
+ tmp2 = vmull_lane_s16(tmp2_s16, consts.val[2], 2);
641
+ tmp3 = vmull_lane_s16(tmp3_s16, consts.val[1], 0);
642
+
643
+ tmp0 = vmlsl_lane_s16(tmp0, tmp3_s16, consts.val[0], 0);
644
+ tmp1 = vmlsl_lane_s16(tmp1, tmp2_s16, consts.val[0], 2);
645
+ tmp2 = vmlsl_lane_s16(tmp2, tmp1_s16, consts.val[0], 2);
646
+ tmp3 = vmlsl_lane_s16(tmp3, tmp0_s16, consts.val[0], 0);
647
+
648
+ tmp0 = vaddq_s32(tmp0, z3);
649
+ tmp1 = vaddq_s32(tmp1, z4);
650
+ tmp2 = vaddq_s32(tmp2, z3);
651
+ tmp3 = vaddq_s32(tmp3, z4);
652
+
653
+ /* Final output stage: descale and narrow to 16-bit. */
654
+ int16x8_t cols_02_s16 = vcombine_s16(vaddhn_s32(tmp10, tmp3),
655
+ vaddhn_s32(tmp12, tmp1));
656
+ int16x8_t cols_13_s16 = vcombine_s16(vaddhn_s32(tmp11, tmp2),
657
+ vaddhn_s32(tmp13, tmp0));
658
+ int16x8_t cols_46_s16 = vcombine_s16(vsubhn_s32(tmp13, tmp0),
659
+ vsubhn_s32(tmp11, tmp2));
660
+ int16x8_t cols_57_s16 = vcombine_s16(vsubhn_s32(tmp12, tmp1),
661
+ vsubhn_s32(tmp10, tmp3));
662
+ /* Descale and narrow to 8-bit. */
663
+ int8x8_t cols_02_s8 = vqrshrn_n_s16(cols_02_s16, DESCALE_P2 - 16);
664
+ int8x8_t cols_13_s8 = vqrshrn_n_s16(cols_13_s16, DESCALE_P2 - 16);
665
+ int8x8_t cols_46_s8 = vqrshrn_n_s16(cols_46_s16, DESCALE_P2 - 16);
666
+ int8x8_t cols_57_s8 = vqrshrn_n_s16(cols_57_s16, DESCALE_P2 - 16);
667
+ /* Clamp to range [0-255]. */
668
+ uint8x8_t cols_02_u8 = vadd_u8(vreinterpret_u8_s8(cols_02_s8),
669
+ vdup_n_u8(CENTERJSAMPLE));
670
+ uint8x8_t cols_13_u8 = vadd_u8(vreinterpret_u8_s8(cols_13_s8),
671
+ vdup_n_u8(CENTERJSAMPLE));
672
+ uint8x8_t cols_46_u8 = vadd_u8(vreinterpret_u8_s8(cols_46_s8),
673
+ vdup_n_u8(CENTERJSAMPLE));
674
+ uint8x8_t cols_57_u8 = vadd_u8(vreinterpret_u8_s8(cols_57_s8),
675
+ vdup_n_u8(CENTERJSAMPLE));
676
+
677
+ /* Transpose 4x8 block and store to memory. (Zipping adjacent columns
678
+ * together allows us to store 16-bit elements.)
679
+ */
680
+ uint8x8x2_t cols_01_23 = vzip_u8(cols_02_u8, cols_13_u8);
681
+ uint8x8x2_t cols_45_67 = vzip_u8(cols_46_u8, cols_57_u8);
682
+ uint16x4x4_t cols_01_23_45_67 = { {
683
+ vreinterpret_u16_u8(cols_01_23.val[0]),
684
+ vreinterpret_u16_u8(cols_01_23.val[1]),
685
+ vreinterpret_u16_u8(cols_45_67.val[0]),
686
+ vreinterpret_u16_u8(cols_45_67.val[1])
687
+ } };
688
+
689
+ JSAMPROW outptr0 = output_buf[buf_offset + 0] + output_col;
690
+ JSAMPROW outptr1 = output_buf[buf_offset + 1] + output_col;
691
+ JSAMPROW outptr2 = output_buf[buf_offset + 2] + output_col;
692
+ JSAMPROW outptr3 = output_buf[buf_offset + 3] + output_col;
693
+ /* VST4 of 16-bit elements completes the transpose. */
694
+ vst4_lane_u16((uint16_t *)outptr0, cols_01_23_45_67, 0);
695
+ vst4_lane_u16((uint16_t *)outptr1, cols_01_23_45_67, 1);
696
+ vst4_lane_u16((uint16_t *)outptr2, cols_01_23_45_67, 2);
697
+ vst4_lane_u16((uint16_t *)outptr3, cols_01_23_45_67, 3);
698
+ }
699
+
700
+
701
+ /* Performs the second pass of the accurate inverse DCT on a 4x8 block
702
+ * of coefficients.
703
+ *
704
+ * This "sparse" version assumes that the coefficient values (after the first
705
+ * pass) in rows 4-7 are all 0. This simplifies the IDCT calculation,
706
+ * accelerating overall performance.
707
+ */
708
+
709
+ static INLINE void jsimd_idct_islow_pass2_sparse(int16_t *workspace,
710
+ JSAMPARRAY output_buf,
711
+ JDIMENSION output_col,
712
+ unsigned buf_offset)
713
+ {
714
+ /* Load constants for IDCT computation. */
715
+ #ifdef HAVE_VLD1_S16_X3
716
+ const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_islow_neon_consts);
717
+ #else
718
+ const int16x4_t consts1 = vld1_s16(jsimd_idct_islow_neon_consts);
719
+ const int16x4_t consts2 = vld1_s16(jsimd_idct_islow_neon_consts + 4);
720
+ const int16x4_t consts3 = vld1_s16(jsimd_idct_islow_neon_consts + 8);
721
+ const int16x4x3_t consts = { { consts1, consts2, consts3 } };
722
+ #endif
723
+
724
+ /* Even part (z3 is all 0) */
725
+ int16x4_t z2_s16 = vld1_s16(workspace + 2 * DCTSIZE / 2);
726
+
727
+ int32x4_t tmp2 = vmull_lane_s16(z2_s16, consts.val[0], 1);
728
+ int32x4_t tmp3 = vmull_lane_s16(z2_s16, consts.val[1], 2);
729
+
730
+ z2_s16 = vld1_s16(workspace + 0 * DCTSIZE / 2);
731
+ int32x4_t tmp0 = vshll_n_s16(z2_s16, CONST_BITS);
732
+ int32x4_t tmp1 = vshll_n_s16(z2_s16, CONST_BITS);
733
+
734
+ int32x4_t tmp10 = vaddq_s32(tmp0, tmp3);
735
+ int32x4_t tmp13 = vsubq_s32(tmp0, tmp3);
736
+ int32x4_t tmp11 = vaddq_s32(tmp1, tmp2);
737
+ int32x4_t tmp12 = vsubq_s32(tmp1, tmp2);
738
+
739
+ /* Odd part (tmp0 and tmp1 are both all 0) */
740
+ int16x4_t tmp2_s16 = vld1_s16(workspace + 3 * DCTSIZE / 2);
741
+ int16x4_t tmp3_s16 = vld1_s16(workspace + 1 * DCTSIZE / 2);
742
+
743
+ int16x4_t z3_s16 = tmp2_s16;
744
+ int16x4_t z4_s16 = tmp3_s16;
745
+
746
+ int32x4_t z3 = vmull_lane_s16(z3_s16, consts.val[2], 3);
747
+ z3 = vmlal_lane_s16(z3, z4_s16, consts.val[1], 3);
748
+ int32x4_t z4 = vmull_lane_s16(z3_s16, consts.val[1], 3);
749
+ z4 = vmlal_lane_s16(z4, z4_s16, consts.val[2], 0);
750
+
751
+ tmp0 = vmlsl_lane_s16(z3, tmp3_s16, consts.val[0], 0);
752
+ tmp1 = vmlsl_lane_s16(z4, tmp2_s16, consts.val[0], 2);
753
+ tmp2 = vmlal_lane_s16(z3, tmp2_s16, consts.val[2], 2);
754
+ tmp3 = vmlal_lane_s16(z4, tmp3_s16, consts.val[1], 0);
755
+
756
+ /* Final output stage: descale and narrow to 16-bit. */
757
+ int16x8_t cols_02_s16 = vcombine_s16(vaddhn_s32(tmp10, tmp3),
758
+ vaddhn_s32(tmp12, tmp1));
759
+ int16x8_t cols_13_s16 = vcombine_s16(vaddhn_s32(tmp11, tmp2),
760
+ vaddhn_s32(tmp13, tmp0));
761
+ int16x8_t cols_46_s16 = vcombine_s16(vsubhn_s32(tmp13, tmp0),
762
+ vsubhn_s32(tmp11, tmp2));
763
+ int16x8_t cols_57_s16 = vcombine_s16(vsubhn_s32(tmp12, tmp1),
764
+ vsubhn_s32(tmp10, tmp3));
765
+ /* Descale and narrow to 8-bit. */
766
+ int8x8_t cols_02_s8 = vqrshrn_n_s16(cols_02_s16, DESCALE_P2 - 16);
767
+ int8x8_t cols_13_s8 = vqrshrn_n_s16(cols_13_s16, DESCALE_P2 - 16);
768
+ int8x8_t cols_46_s8 = vqrshrn_n_s16(cols_46_s16, DESCALE_P2 - 16);
769
+ int8x8_t cols_57_s8 = vqrshrn_n_s16(cols_57_s16, DESCALE_P2 - 16);
770
+ /* Clamp to range [0-255]. */
771
+ uint8x8_t cols_02_u8 = vadd_u8(vreinterpret_u8_s8(cols_02_s8),
772
+ vdup_n_u8(CENTERJSAMPLE));
773
+ uint8x8_t cols_13_u8 = vadd_u8(vreinterpret_u8_s8(cols_13_s8),
774
+ vdup_n_u8(CENTERJSAMPLE));
775
+ uint8x8_t cols_46_u8 = vadd_u8(vreinterpret_u8_s8(cols_46_s8),
776
+ vdup_n_u8(CENTERJSAMPLE));
777
+ uint8x8_t cols_57_u8 = vadd_u8(vreinterpret_u8_s8(cols_57_s8),
778
+ vdup_n_u8(CENTERJSAMPLE));
779
+
780
+ /* Transpose 4x8 block and store to memory. (Zipping adjacent columns
781
+ * together allows us to store 16-bit elements.)
782
+ */
783
+ uint8x8x2_t cols_01_23 = vzip_u8(cols_02_u8, cols_13_u8);
784
+ uint8x8x2_t cols_45_67 = vzip_u8(cols_46_u8, cols_57_u8);
785
+ uint16x4x4_t cols_01_23_45_67 = { {
786
+ vreinterpret_u16_u8(cols_01_23.val[0]),
787
+ vreinterpret_u16_u8(cols_01_23.val[1]),
788
+ vreinterpret_u16_u8(cols_45_67.val[0]),
789
+ vreinterpret_u16_u8(cols_45_67.val[1])
790
+ } };
791
+
792
+ JSAMPROW outptr0 = output_buf[buf_offset + 0] + output_col;
793
+ JSAMPROW outptr1 = output_buf[buf_offset + 1] + output_col;
794
+ JSAMPROW outptr2 = output_buf[buf_offset + 2] + output_col;
795
+ JSAMPROW outptr3 = output_buf[buf_offset + 3] + output_col;
796
+ /* VST4 of 16-bit elements completes the transpose. */
797
+ vst4_lane_u16((uint16_t *)outptr0, cols_01_23_45_67, 0);
798
+ vst4_lane_u16((uint16_t *)outptr1, cols_01_23_45_67, 1);
799
+ vst4_lane_u16((uint16_t *)outptr2, cols_01_23_45_67, 2);
800
+ vst4_lane_u16((uint16_t *)outptr3, cols_01_23_45_67, 3);
801
+ }