immunio 0.15.4 → 0.16.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (454) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +0 -27
  3. data/ext/immunio/Rakefile +9 -0
  4. data/lib/immunio/plugins/active_record.rb +1 -1
  5. data/lib/immunio/plugins/active_record_relation.rb +1 -1
  6. data/lib/immunio/plugins/environment_reporter.rb +20 -0
  7. data/lib/immunio/rufus_lua_ext/ref.rb +1 -3
  8. data/lib/immunio/version.rb +1 -1
  9. data/lib/immunio/vm.rb +1 -2
  10. data/lua-hooks/Makefile +97 -0
  11. data/lua-hooks/ext/all.c +41 -52
  12. data/lua-hooks/ext/all.o +0 -0
  13. data/lua-hooks/ext/libinjection/libinjection_html5.o +0 -0
  14. data/lua-hooks/ext/libinjection/libinjection_sqli.o +0 -0
  15. data/lua-hooks/ext/libinjection/libinjection_xss.o +0 -0
  16. data/lua-hooks/ext/libinjection/lualib.c +2 -2
  17. data/lua-hooks/ext/lpeg/lpcap.c +2 -2
  18. data/lua-hooks/ext/lpeg/lpcap.o +0 -0
  19. data/lua-hooks/ext/lpeg/lpcode.c +2 -2
  20. data/lua-hooks/ext/lpeg/lpcode.h +1 -1
  21. data/lua-hooks/ext/lpeg/lpcode.o +0 -0
  22. data/lua-hooks/ext/lpeg/lpprint.o +0 -0
  23. data/lua-hooks/ext/lpeg/lptree.c +2 -2
  24. data/lua-hooks/ext/lpeg/lptypes.h +1 -1
  25. data/lua-hooks/ext/lpeg/lpvm.c +2 -2
  26. data/lua-hooks/ext/lpeg/lpvm.o +0 -0
  27. data/lua-hooks/ext/lua-cmsgpack/lua_cmsgpack.c +16 -3
  28. data/lua-hooks/ext/lua-snapshot/snapshot.c +14 -7
  29. data/lua-hooks/ext/luajit/COPYRIGHT +56 -0
  30. data/lua-hooks/ext/luajit/Makefile +159 -0
  31. data/lua-hooks/ext/luajit/README +16 -0
  32. data/lua-hooks/ext/luajit/doc/bluequad-print.css +166 -0
  33. data/lua-hooks/ext/luajit/doc/bluequad.css +325 -0
  34. data/lua-hooks/ext/luajit/doc/changes.html +804 -0
  35. data/lua-hooks/ext/luajit/doc/contact.html +104 -0
  36. data/lua-hooks/ext/luajit/doc/ext_c_api.html +189 -0
  37. data/lua-hooks/ext/luajit/doc/ext_ffi.html +332 -0
  38. data/lua-hooks/ext/luajit/doc/ext_ffi_api.html +570 -0
  39. data/lua-hooks/ext/luajit/doc/ext_ffi_semantics.html +1261 -0
  40. data/lua-hooks/ext/luajit/doc/ext_ffi_tutorial.html +603 -0
  41. data/lua-hooks/ext/luajit/doc/ext_jit.html +201 -0
  42. data/lua-hooks/ext/luajit/doc/ext_profiler.html +365 -0
  43. data/lua-hooks/ext/luajit/doc/extensions.html +448 -0
  44. data/lua-hooks/ext/luajit/doc/faq.html +186 -0
  45. data/lua-hooks/ext/luajit/doc/img/contact.png +0 -0
  46. data/lua-hooks/ext/luajit/doc/install.html +659 -0
  47. data/lua-hooks/ext/luajit/doc/luajit.html +236 -0
  48. data/lua-hooks/ext/luajit/doc/running.html +309 -0
  49. data/lua-hooks/ext/luajit/doc/status.html +118 -0
  50. data/lua-hooks/ext/luajit/dynasm/dasm_arm.h +456 -0
  51. data/lua-hooks/ext/luajit/dynasm/dasm_arm.lua +1125 -0
  52. data/lua-hooks/ext/luajit/dynasm/dasm_arm64.h +518 -0
  53. data/lua-hooks/ext/luajit/dynasm/dasm_arm64.lua +1166 -0
  54. data/lua-hooks/ext/luajit/dynasm/dasm_mips.h +416 -0
  55. data/lua-hooks/ext/luajit/dynasm/dasm_mips.lua +953 -0
  56. data/lua-hooks/ext/luajit/dynasm/dasm_ppc.h +419 -0
  57. data/lua-hooks/ext/luajit/dynasm/dasm_ppc.lua +1919 -0
  58. data/lua-hooks/ext/luajit/dynasm/dasm_proto.h +83 -0
  59. data/lua-hooks/ext/luajit/dynasm/dasm_x64.lua +12 -0
  60. data/lua-hooks/ext/luajit/dynasm/dasm_x86.h +471 -0
  61. data/lua-hooks/ext/luajit/dynasm/dasm_x86.lua +1945 -0
  62. data/lua-hooks/ext/luajit/dynasm/dynasm.lua +1094 -0
  63. data/lua-hooks/ext/luajit/etc/luajit.1 +88 -0
  64. data/lua-hooks/ext/luajit/etc/luajit.pc +25 -0
  65. data/lua-hooks/ext/luajit/src/Makefile +697 -0
  66. data/lua-hooks/ext/luajit/src/Makefile.dep +244 -0
  67. data/lua-hooks/ext/luajit/src/host/README +4 -0
  68. data/lua-hooks/ext/luajit/src/host/buildvm +0 -0
  69. data/lua-hooks/ext/luajit/src/host/buildvm.c +518 -0
  70. data/lua-hooks/ext/luajit/src/host/buildvm.h +105 -0
  71. data/lua-hooks/ext/luajit/src/host/buildvm.o +0 -0
  72. data/lua-hooks/ext/luajit/src/host/buildvm_arch.h +7449 -0
  73. data/lua-hooks/ext/luajit/src/host/buildvm_asm.c +345 -0
  74. data/lua-hooks/ext/luajit/src/host/buildvm_asm.o +0 -0
  75. data/lua-hooks/ext/luajit/src/host/buildvm_fold.c +229 -0
  76. data/lua-hooks/ext/luajit/src/host/buildvm_fold.o +0 -0
  77. data/lua-hooks/ext/luajit/src/host/buildvm_lib.c +457 -0
  78. data/lua-hooks/ext/luajit/src/host/buildvm_lib.o +0 -0
  79. data/lua-hooks/ext/luajit/src/host/buildvm_libbc.h +45 -0
  80. data/lua-hooks/ext/luajit/src/host/buildvm_peobj.c +368 -0
  81. data/lua-hooks/ext/luajit/src/host/buildvm_peobj.o +0 -0
  82. data/lua-hooks/ext/luajit/src/host/genlibbc.lua +197 -0
  83. data/lua-hooks/ext/luajit/src/host/genminilua.lua +428 -0
  84. data/lua-hooks/ext/luajit/src/host/minilua +0 -0
  85. data/lua-hooks/ext/luajit/src/host/minilua.c +7770 -0
  86. data/lua-hooks/ext/luajit/src/host/minilua.o +0 -0
  87. data/lua-hooks/ext/luajit/src/jit/bc.lua +190 -0
  88. data/lua-hooks/ext/luajit/src/jit/bcsave.lua +661 -0
  89. data/lua-hooks/ext/luajit/src/jit/dis_arm.lua +689 -0
  90. data/lua-hooks/ext/luajit/src/jit/dis_mips.lua +428 -0
  91. data/lua-hooks/ext/luajit/src/jit/dis_mipsel.lua +17 -0
  92. data/lua-hooks/ext/luajit/src/jit/dis_ppc.lua +591 -0
  93. data/lua-hooks/ext/luajit/src/jit/dis_x64.lua +17 -0
  94. data/lua-hooks/ext/luajit/src/jit/dis_x86.lua +838 -0
  95. data/lua-hooks/ext/luajit/src/jit/dump.lua +706 -0
  96. data/lua-hooks/ext/luajit/src/jit/p.lua +310 -0
  97. data/lua-hooks/ext/luajit/src/jit/v.lua +170 -0
  98. data/lua-hooks/ext/luajit/src/jit/vmdef.lua +362 -0
  99. data/lua-hooks/ext/luajit/src/jit/zone.lua +45 -0
  100. data/lua-hooks/ext/{lua → luajit/src}/lauxlib.h +10 -17
  101. data/lua-hooks/ext/luajit/src/lib_aux.c +356 -0
  102. data/lua-hooks/ext/luajit/src/lib_aux.o +0 -0
  103. data/lua-hooks/ext/luajit/src/lib_aux_dyn.o +0 -0
  104. data/lua-hooks/ext/luajit/src/lib_base.c +664 -0
  105. data/lua-hooks/ext/luajit/src/lib_base.o +0 -0
  106. data/lua-hooks/ext/luajit/src/lib_base_dyn.o +0 -0
  107. data/lua-hooks/ext/luajit/src/lib_bit.c +180 -0
  108. data/lua-hooks/ext/luajit/src/lib_bit.o +0 -0
  109. data/lua-hooks/ext/luajit/src/lib_bit_dyn.o +0 -0
  110. data/lua-hooks/ext/luajit/src/lib_debug.c +405 -0
  111. data/lua-hooks/ext/luajit/src/lib_debug.o +0 -0
  112. data/lua-hooks/ext/luajit/src/lib_debug_dyn.o +0 -0
  113. data/lua-hooks/ext/luajit/src/lib_ffi.c +872 -0
  114. data/lua-hooks/ext/luajit/src/lib_ffi.o +0 -0
  115. data/lua-hooks/ext/luajit/src/lib_ffi_dyn.o +0 -0
  116. data/lua-hooks/ext/luajit/src/lib_init.c +55 -0
  117. data/lua-hooks/ext/luajit/src/lib_init.o +0 -0
  118. data/lua-hooks/ext/luajit/src/lib_init_dyn.o +0 -0
  119. data/lua-hooks/ext/luajit/src/lib_io.c +541 -0
  120. data/lua-hooks/ext/luajit/src/lib_io.o +0 -0
  121. data/lua-hooks/ext/luajit/src/lib_io_dyn.o +0 -0
  122. data/lua-hooks/ext/luajit/src/lib_jit.c +767 -0
  123. data/lua-hooks/ext/luajit/src/lib_jit.o +0 -0
  124. data/lua-hooks/ext/luajit/src/lib_jit_dyn.o +0 -0
  125. data/lua-hooks/ext/luajit/src/lib_math.c +230 -0
  126. data/lua-hooks/ext/luajit/src/lib_math.o +0 -0
  127. data/lua-hooks/ext/luajit/src/lib_math_dyn.o +0 -0
  128. data/lua-hooks/ext/luajit/src/lib_os.c +292 -0
  129. data/lua-hooks/ext/luajit/src/lib_os.o +0 -0
  130. data/lua-hooks/ext/luajit/src/lib_os_dyn.o +0 -0
  131. data/lua-hooks/ext/luajit/src/lib_package.c +610 -0
  132. data/lua-hooks/ext/luajit/src/lib_package.o +0 -0
  133. data/lua-hooks/ext/luajit/src/lib_package_dyn.o +0 -0
  134. data/lua-hooks/ext/luajit/src/lib_string.c +752 -0
  135. data/lua-hooks/ext/luajit/src/lib_string.o +0 -0
  136. data/lua-hooks/ext/luajit/src/lib_string_dyn.o +0 -0
  137. data/lua-hooks/ext/luajit/src/lib_table.c +307 -0
  138. data/lua-hooks/ext/luajit/src/lib_table.o +0 -0
  139. data/lua-hooks/ext/luajit/src/lib_table_dyn.o +0 -0
  140. data/lua-hooks/ext/luajit/src/libluajit.a +0 -0
  141. data/lua-hooks/ext/luajit/src/libluajit.so +0 -0
  142. data/lua-hooks/ext/luajit/src/lj.supp +26 -0
  143. data/lua-hooks/ext/luajit/src/lj_alloc.c +1398 -0
  144. data/lua-hooks/ext/luajit/src/lj_alloc.h +17 -0
  145. data/lua-hooks/ext/luajit/src/lj_alloc.o +0 -0
  146. data/lua-hooks/ext/luajit/src/lj_alloc_dyn.o +0 -0
  147. data/lua-hooks/ext/luajit/src/lj_api.c +1210 -0
  148. data/lua-hooks/ext/luajit/src/lj_api.o +0 -0
  149. data/lua-hooks/ext/luajit/src/lj_api_dyn.o +0 -0
  150. data/lua-hooks/ext/luajit/src/lj_arch.h +509 -0
  151. data/lua-hooks/ext/luajit/src/lj_asm.c +2278 -0
  152. data/lua-hooks/ext/luajit/src/lj_asm.h +17 -0
  153. data/lua-hooks/ext/luajit/src/lj_asm.o +0 -0
  154. data/lua-hooks/ext/luajit/src/lj_asm_arm.h +2217 -0
  155. data/lua-hooks/ext/luajit/src/lj_asm_dyn.o +0 -0
  156. data/lua-hooks/ext/luajit/src/lj_asm_mips.h +1833 -0
  157. data/lua-hooks/ext/luajit/src/lj_asm_ppc.h +2015 -0
  158. data/lua-hooks/ext/luajit/src/lj_asm_x86.h +2634 -0
  159. data/lua-hooks/ext/luajit/src/lj_bc.c +14 -0
  160. data/lua-hooks/ext/luajit/src/lj_bc.h +265 -0
  161. data/lua-hooks/ext/luajit/src/lj_bc.o +0 -0
  162. data/lua-hooks/ext/luajit/src/lj_bc_dyn.o +0 -0
  163. data/lua-hooks/ext/luajit/src/lj_bcdef.h +220 -0
  164. data/lua-hooks/ext/luajit/src/lj_bcdump.h +68 -0
  165. data/lua-hooks/ext/luajit/src/lj_bcread.c +457 -0
  166. data/lua-hooks/ext/luajit/src/lj_bcread.o +0 -0
  167. data/lua-hooks/ext/luajit/src/lj_bcread_dyn.o +0 -0
  168. data/lua-hooks/ext/luajit/src/lj_bcwrite.c +361 -0
  169. data/lua-hooks/ext/luajit/src/lj_bcwrite.o +0 -0
  170. data/lua-hooks/ext/luajit/src/lj_bcwrite_dyn.o +0 -0
  171. data/lua-hooks/ext/luajit/src/lj_buf.c +234 -0
  172. data/lua-hooks/ext/luajit/src/lj_buf.h +105 -0
  173. data/lua-hooks/ext/luajit/src/lj_buf.o +0 -0
  174. data/lua-hooks/ext/luajit/src/lj_buf_dyn.o +0 -0
  175. data/lua-hooks/ext/luajit/src/lj_carith.c +429 -0
  176. data/lua-hooks/ext/luajit/src/lj_carith.h +37 -0
  177. data/lua-hooks/ext/luajit/src/lj_carith.o +0 -0
  178. data/lua-hooks/ext/luajit/src/lj_carith_dyn.o +0 -0
  179. data/lua-hooks/ext/luajit/src/lj_ccall.c +984 -0
  180. data/lua-hooks/ext/luajit/src/lj_ccall.h +178 -0
  181. data/lua-hooks/ext/luajit/src/lj_ccall.o +0 -0
  182. data/lua-hooks/ext/luajit/src/lj_ccall_dyn.o +0 -0
  183. data/lua-hooks/ext/luajit/src/lj_ccallback.c +712 -0
  184. data/lua-hooks/ext/luajit/src/lj_ccallback.h +25 -0
  185. data/lua-hooks/ext/luajit/src/lj_ccallback.o +0 -0
  186. data/lua-hooks/ext/luajit/src/lj_ccallback_dyn.o +0 -0
  187. data/lua-hooks/ext/luajit/src/lj_cconv.c +752 -0
  188. data/lua-hooks/ext/luajit/src/lj_cconv.h +70 -0
  189. data/lua-hooks/ext/luajit/src/lj_cconv.o +0 -0
  190. data/lua-hooks/ext/luajit/src/lj_cconv_dyn.o +0 -0
  191. data/lua-hooks/ext/luajit/src/lj_cdata.c +288 -0
  192. data/lua-hooks/ext/luajit/src/lj_cdata.h +76 -0
  193. data/lua-hooks/ext/luajit/src/lj_cdata.o +0 -0
  194. data/lua-hooks/ext/luajit/src/lj_cdata_dyn.o +0 -0
  195. data/lua-hooks/ext/luajit/src/lj_char.c +43 -0
  196. data/lua-hooks/ext/luajit/src/lj_char.h +42 -0
  197. data/lua-hooks/ext/luajit/src/lj_char.o +0 -0
  198. data/lua-hooks/ext/luajit/src/lj_char_dyn.o +0 -0
  199. data/lua-hooks/ext/luajit/src/lj_clib.c +418 -0
  200. data/lua-hooks/ext/luajit/src/lj_clib.h +29 -0
  201. data/lua-hooks/ext/luajit/src/lj_clib.o +0 -0
  202. data/lua-hooks/ext/luajit/src/lj_clib_dyn.o +0 -0
  203. data/lua-hooks/ext/luajit/src/lj_cparse.c +1862 -0
  204. data/lua-hooks/ext/luajit/src/lj_cparse.h +65 -0
  205. data/lua-hooks/ext/luajit/src/lj_cparse.o +0 -0
  206. data/lua-hooks/ext/luajit/src/lj_cparse_dyn.o +0 -0
  207. data/lua-hooks/ext/luajit/src/lj_crecord.c +1834 -0
  208. data/lua-hooks/ext/luajit/src/lj_crecord.h +38 -0
  209. data/lua-hooks/ext/luajit/src/lj_crecord.o +0 -0
  210. data/lua-hooks/ext/luajit/src/lj_crecord_dyn.o +0 -0
  211. data/lua-hooks/ext/luajit/src/lj_ctype.c +635 -0
  212. data/lua-hooks/ext/luajit/src/lj_ctype.h +461 -0
  213. data/lua-hooks/ext/luajit/src/lj_ctype.o +0 -0
  214. data/lua-hooks/ext/luajit/src/lj_ctype_dyn.o +0 -0
  215. data/lua-hooks/ext/luajit/src/lj_debug.c +699 -0
  216. data/lua-hooks/ext/luajit/src/lj_debug.h +65 -0
  217. data/lua-hooks/ext/luajit/src/lj_debug.o +0 -0
  218. data/lua-hooks/ext/luajit/src/lj_debug_dyn.o +0 -0
  219. data/lua-hooks/ext/luajit/src/lj_def.h +365 -0
  220. data/lua-hooks/ext/luajit/src/lj_dispatch.c +557 -0
  221. data/lua-hooks/ext/luajit/src/lj_dispatch.h +138 -0
  222. data/lua-hooks/ext/luajit/src/lj_dispatch.o +0 -0
  223. data/lua-hooks/ext/luajit/src/lj_dispatch_dyn.o +0 -0
  224. data/lua-hooks/ext/luajit/src/lj_emit_arm.h +356 -0
  225. data/lua-hooks/ext/luajit/src/lj_emit_mips.h +211 -0
  226. data/lua-hooks/ext/luajit/src/lj_emit_ppc.h +238 -0
  227. data/lua-hooks/ext/luajit/src/lj_emit_x86.h +462 -0
  228. data/lua-hooks/ext/luajit/src/lj_err.c +794 -0
  229. data/lua-hooks/ext/luajit/src/lj_err.h +41 -0
  230. data/lua-hooks/ext/luajit/src/lj_err.o +0 -0
  231. data/lua-hooks/ext/luajit/src/lj_err_dyn.o +0 -0
  232. data/lua-hooks/ext/luajit/src/lj_errmsg.h +190 -0
  233. data/lua-hooks/ext/luajit/src/lj_ff.h +18 -0
  234. data/lua-hooks/ext/luajit/src/lj_ffdef.h +209 -0
  235. data/lua-hooks/ext/luajit/src/lj_ffrecord.c +1247 -0
  236. data/lua-hooks/ext/luajit/src/lj_ffrecord.h +24 -0
  237. data/lua-hooks/ext/luajit/src/lj_ffrecord.o +0 -0
  238. data/lua-hooks/ext/luajit/src/lj_ffrecord_dyn.o +0 -0
  239. data/lua-hooks/ext/luajit/src/lj_folddef.h +1138 -0
  240. data/lua-hooks/ext/luajit/src/lj_frame.h +259 -0
  241. data/lua-hooks/ext/luajit/src/lj_func.c +185 -0
  242. data/lua-hooks/ext/luajit/src/lj_func.h +24 -0
  243. data/lua-hooks/ext/luajit/src/lj_func.o +0 -0
  244. data/lua-hooks/ext/luajit/src/lj_func_dyn.o +0 -0
  245. data/lua-hooks/ext/luajit/src/lj_gc.c +845 -0
  246. data/lua-hooks/ext/luajit/src/lj_gc.h +134 -0
  247. data/lua-hooks/ext/luajit/src/lj_gc.o +0 -0
  248. data/lua-hooks/ext/luajit/src/lj_gc_dyn.o +0 -0
  249. data/lua-hooks/ext/luajit/src/lj_gdbjit.c +787 -0
  250. data/lua-hooks/ext/luajit/src/lj_gdbjit.h +22 -0
  251. data/lua-hooks/ext/luajit/src/lj_gdbjit.o +0 -0
  252. data/lua-hooks/ext/luajit/src/lj_gdbjit_dyn.o +0 -0
  253. data/lua-hooks/ext/luajit/src/lj_ir.c +505 -0
  254. data/lua-hooks/ext/luajit/src/lj_ir.h +577 -0
  255. data/lua-hooks/ext/luajit/src/lj_ir.o +0 -0
  256. data/lua-hooks/ext/luajit/src/lj_ir_dyn.o +0 -0
  257. data/lua-hooks/ext/luajit/src/lj_ircall.h +321 -0
  258. data/lua-hooks/ext/luajit/src/lj_iropt.h +161 -0
  259. data/lua-hooks/ext/luajit/src/lj_jit.h +440 -0
  260. data/lua-hooks/ext/luajit/src/lj_lex.c +482 -0
  261. data/lua-hooks/ext/luajit/src/lj_lex.h +86 -0
  262. data/lua-hooks/ext/luajit/src/lj_lex.o +0 -0
  263. data/lua-hooks/ext/luajit/src/lj_lex_dyn.o +0 -0
  264. data/lua-hooks/ext/luajit/src/lj_lib.c +303 -0
  265. data/lua-hooks/ext/luajit/src/lj_lib.h +115 -0
  266. data/lua-hooks/ext/luajit/src/lj_lib.o +0 -0
  267. data/lua-hooks/ext/luajit/src/lj_lib_dyn.o +0 -0
  268. data/lua-hooks/ext/luajit/src/lj_libdef.h +414 -0
  269. data/lua-hooks/ext/luajit/src/lj_load.c +168 -0
  270. data/lua-hooks/ext/luajit/src/lj_load.o +0 -0
  271. data/lua-hooks/ext/luajit/src/lj_load_dyn.o +0 -0
  272. data/lua-hooks/ext/luajit/src/lj_mcode.c +386 -0
  273. data/lua-hooks/ext/luajit/src/lj_mcode.h +30 -0
  274. data/lua-hooks/ext/luajit/src/lj_mcode.o +0 -0
  275. data/lua-hooks/ext/luajit/src/lj_mcode_dyn.o +0 -0
  276. data/lua-hooks/ext/luajit/src/lj_meta.c +477 -0
  277. data/lua-hooks/ext/luajit/src/lj_meta.h +38 -0
  278. data/lua-hooks/ext/luajit/src/lj_meta.o +0 -0
  279. data/lua-hooks/ext/luajit/src/lj_meta_dyn.o +0 -0
  280. data/lua-hooks/ext/luajit/src/lj_obj.c +50 -0
  281. data/lua-hooks/ext/luajit/src/lj_obj.h +976 -0
  282. data/lua-hooks/ext/luajit/src/lj_obj.o +0 -0
  283. data/lua-hooks/ext/luajit/src/lj_obj_dyn.o +0 -0
  284. data/lua-hooks/ext/luajit/src/lj_opt_dce.c +78 -0
  285. data/lua-hooks/ext/luajit/src/lj_opt_dce.o +0 -0
  286. data/lua-hooks/ext/luajit/src/lj_opt_dce_dyn.o +0 -0
  287. data/lua-hooks/ext/luajit/src/lj_opt_fold.c +2488 -0
  288. data/lua-hooks/ext/luajit/src/lj_opt_fold.o +0 -0
  289. data/lua-hooks/ext/luajit/src/lj_opt_fold_dyn.o +0 -0
  290. data/lua-hooks/ext/luajit/src/lj_opt_loop.c +449 -0
  291. data/lua-hooks/ext/luajit/src/lj_opt_loop.o +0 -0
  292. data/lua-hooks/ext/luajit/src/lj_opt_loop_dyn.o +0 -0
  293. data/lua-hooks/ext/luajit/src/lj_opt_mem.c +935 -0
  294. data/lua-hooks/ext/luajit/src/lj_opt_mem.o +0 -0
  295. data/lua-hooks/ext/luajit/src/lj_opt_mem_dyn.o +0 -0
  296. data/lua-hooks/ext/luajit/src/lj_opt_narrow.c +652 -0
  297. data/lua-hooks/ext/luajit/src/lj_opt_narrow.o +0 -0
  298. data/lua-hooks/ext/luajit/src/lj_opt_narrow_dyn.o +0 -0
  299. data/lua-hooks/ext/luajit/src/lj_opt_sink.c +245 -0
  300. data/lua-hooks/ext/luajit/src/lj_opt_sink.o +0 -0
  301. data/lua-hooks/ext/luajit/src/lj_opt_sink_dyn.o +0 -0
  302. data/lua-hooks/ext/luajit/src/lj_opt_split.c +856 -0
  303. data/lua-hooks/ext/luajit/src/lj_opt_split.o +0 -0
  304. data/lua-hooks/ext/luajit/src/lj_opt_split_dyn.o +0 -0
  305. data/lua-hooks/ext/luajit/src/lj_parse.c +2725 -0
  306. data/lua-hooks/ext/luajit/src/lj_parse.h +18 -0
  307. data/lua-hooks/ext/luajit/src/lj_parse.o +0 -0
  308. data/lua-hooks/ext/luajit/src/lj_parse_dyn.o +0 -0
  309. data/lua-hooks/ext/luajit/src/lj_profile.c +368 -0
  310. data/lua-hooks/ext/luajit/src/lj_profile.h +21 -0
  311. data/lua-hooks/ext/luajit/src/lj_profile.o +0 -0
  312. data/lua-hooks/ext/luajit/src/lj_profile_dyn.o +0 -0
  313. data/lua-hooks/ext/luajit/src/lj_recdef.h +270 -0
  314. data/lua-hooks/ext/luajit/src/lj_record.c +2554 -0
  315. data/lua-hooks/ext/luajit/src/lj_record.h +45 -0
  316. data/lua-hooks/ext/luajit/src/lj_record.o +0 -0
  317. data/lua-hooks/ext/luajit/src/lj_record_dyn.o +0 -0
  318. data/lua-hooks/ext/luajit/src/lj_snap.c +870 -0
  319. data/lua-hooks/ext/luajit/src/lj_snap.h +34 -0
  320. data/lua-hooks/ext/luajit/src/lj_snap.o +0 -0
  321. data/lua-hooks/ext/luajit/src/lj_snap_dyn.o +0 -0
  322. data/lua-hooks/ext/luajit/src/lj_state.c +300 -0
  323. data/lua-hooks/ext/luajit/src/lj_state.h +35 -0
  324. data/lua-hooks/ext/luajit/src/lj_state.o +0 -0
  325. data/lua-hooks/ext/luajit/src/lj_state_dyn.o +0 -0
  326. data/lua-hooks/ext/luajit/src/lj_str.c +197 -0
  327. data/lua-hooks/ext/luajit/src/lj_str.h +27 -0
  328. data/lua-hooks/ext/luajit/src/lj_str.o +0 -0
  329. data/lua-hooks/ext/luajit/src/lj_str_dyn.o +0 -0
  330. data/lua-hooks/ext/luajit/src/lj_strfmt.c +554 -0
  331. data/lua-hooks/ext/luajit/src/lj_strfmt.h +125 -0
  332. data/lua-hooks/ext/luajit/src/lj_strfmt.o +0 -0
  333. data/lua-hooks/ext/luajit/src/lj_strfmt_dyn.o +0 -0
  334. data/lua-hooks/ext/luajit/src/lj_strscan.c +547 -0
  335. data/lua-hooks/ext/luajit/src/lj_strscan.h +39 -0
  336. data/lua-hooks/ext/luajit/src/lj_strscan.o +0 -0
  337. data/lua-hooks/ext/luajit/src/lj_strscan_dyn.o +0 -0
  338. data/lua-hooks/ext/luajit/src/lj_tab.c +666 -0
  339. data/lua-hooks/ext/luajit/src/lj_tab.h +73 -0
  340. data/lua-hooks/ext/luajit/src/lj_tab.o +0 -0
  341. data/lua-hooks/ext/luajit/src/lj_tab_dyn.o +0 -0
  342. data/lua-hooks/ext/luajit/src/lj_target.h +164 -0
  343. data/lua-hooks/ext/luajit/src/lj_target_arm.h +270 -0
  344. data/lua-hooks/ext/luajit/src/lj_target_arm64.h +97 -0
  345. data/lua-hooks/ext/luajit/src/lj_target_mips.h +260 -0
  346. data/lua-hooks/ext/luajit/src/lj_target_ppc.h +280 -0
  347. data/lua-hooks/ext/luajit/src/lj_target_x86.h +345 -0
  348. data/lua-hooks/ext/luajit/src/lj_trace.c +859 -0
  349. data/lua-hooks/ext/luajit/src/lj_trace.h +54 -0
  350. data/lua-hooks/ext/luajit/src/lj_trace.o +0 -0
  351. data/lua-hooks/ext/luajit/src/lj_trace_dyn.o +0 -0
  352. data/lua-hooks/ext/luajit/src/lj_traceerr.h +63 -0
  353. data/lua-hooks/ext/luajit/src/lj_udata.c +34 -0
  354. data/lua-hooks/ext/luajit/src/lj_udata.h +14 -0
  355. data/lua-hooks/ext/luajit/src/lj_udata.o +0 -0
  356. data/lua-hooks/ext/luajit/src/lj_udata_dyn.o +0 -0
  357. data/lua-hooks/ext/luajit/src/lj_vm.S +2730 -0
  358. data/lua-hooks/ext/luajit/src/lj_vm.h +114 -0
  359. data/lua-hooks/ext/luajit/src/lj_vm.o +0 -0
  360. data/lua-hooks/ext/luajit/src/lj_vm_dyn.o +0 -0
  361. data/lua-hooks/ext/luajit/src/lj_vmevent.c +58 -0
  362. data/lua-hooks/ext/luajit/src/lj_vmevent.h +59 -0
  363. data/lua-hooks/ext/luajit/src/lj_vmevent.o +0 -0
  364. data/lua-hooks/ext/luajit/src/lj_vmevent_dyn.o +0 -0
  365. data/lua-hooks/ext/luajit/src/lj_vmmath.c +152 -0
  366. data/lua-hooks/ext/luajit/src/lj_vmmath.o +0 -0
  367. data/lua-hooks/ext/luajit/src/lj_vmmath_dyn.o +0 -0
  368. data/lua-hooks/ext/luajit/src/ljamalg.c +96 -0
  369. data/lua-hooks/ext/{lua → luajit/src}/lua.h +12 -7
  370. data/lua-hooks/ext/luajit/src/lua.hpp +9 -0
  371. data/lua-hooks/ext/luajit/src/luaconf.h +156 -0
  372. data/lua-hooks/ext/luajit/src/luajit +0 -0
  373. data/lua-hooks/ext/luajit/src/luajit.c +570 -0
  374. data/lua-hooks/ext/luajit/src/luajit.h +79 -0
  375. data/lua-hooks/ext/luajit/src/luajit.o +0 -0
  376. data/lua-hooks/ext/luajit/src/lualib.h +43 -0
  377. data/lua-hooks/ext/luajit/src/msvcbuild.bat +114 -0
  378. data/lua-hooks/ext/luajit/src/ps4build.bat +103 -0
  379. data/lua-hooks/ext/luajit/src/psvitabuild.bat +93 -0
  380. data/lua-hooks/ext/luajit/src/vm_arm.dasc +4585 -0
  381. data/lua-hooks/ext/luajit/src/vm_arm64.dasc +3764 -0
  382. data/lua-hooks/ext/luajit/src/vm_mips.dasc +4355 -0
  383. data/lua-hooks/ext/luajit/src/vm_ppc.dasc +5252 -0
  384. data/lua-hooks/ext/luajit/src/vm_x64.dasc +4902 -0
  385. data/lua-hooks/ext/luajit/src/vm_x86.dasc +5710 -0
  386. data/lua-hooks/ext/luajit/src/xb1build.bat +101 -0
  387. data/lua-hooks/ext/luajit/src/xedkbuild.bat +92 -0
  388. data/lua-hooks/ext/luautf8/lutf8lib.c +3 -3
  389. data/lua-hooks/lib/boot.lua +37 -2
  390. metadata +372 -69
  391. data/lua-hooks/ext/bitop/README +0 -22
  392. data/lua-hooks/ext/bitop/bit.c +0 -189
  393. data/lua-hooks/ext/extconf.rb +0 -38
  394. data/lua-hooks/ext/lua/COPYRIGHT +0 -34
  395. data/lua-hooks/ext/lua/lapi.c +0 -1087
  396. data/lua-hooks/ext/lua/lapi.h +0 -16
  397. data/lua-hooks/ext/lua/lauxlib.c +0 -652
  398. data/lua-hooks/ext/lua/lbaselib.c +0 -659
  399. data/lua-hooks/ext/lua/lcode.c +0 -831
  400. data/lua-hooks/ext/lua/lcode.h +0 -76
  401. data/lua-hooks/ext/lua/ldblib.c +0 -398
  402. data/lua-hooks/ext/lua/ldebug.c +0 -638
  403. data/lua-hooks/ext/lua/ldebug.h +0 -33
  404. data/lua-hooks/ext/lua/ldo.c +0 -519
  405. data/lua-hooks/ext/lua/ldo.h +0 -57
  406. data/lua-hooks/ext/lua/ldump.c +0 -164
  407. data/lua-hooks/ext/lua/lfunc.c +0 -174
  408. data/lua-hooks/ext/lua/lfunc.h +0 -34
  409. data/lua-hooks/ext/lua/lgc.c +0 -710
  410. data/lua-hooks/ext/lua/lgc.h +0 -110
  411. data/lua-hooks/ext/lua/linit.c +0 -38
  412. data/lua-hooks/ext/lua/liolib.c +0 -556
  413. data/lua-hooks/ext/lua/llex.c +0 -463
  414. data/lua-hooks/ext/lua/llex.h +0 -81
  415. data/lua-hooks/ext/lua/llimits.h +0 -128
  416. data/lua-hooks/ext/lua/lmathlib.c +0 -263
  417. data/lua-hooks/ext/lua/lmem.c +0 -86
  418. data/lua-hooks/ext/lua/lmem.h +0 -49
  419. data/lua-hooks/ext/lua/loadlib.c +0 -705
  420. data/lua-hooks/ext/lua/loadlib_rel.c +0 -760
  421. data/lua-hooks/ext/lua/lobject.c +0 -214
  422. data/lua-hooks/ext/lua/lobject.h +0 -381
  423. data/lua-hooks/ext/lua/lopcodes.c +0 -102
  424. data/lua-hooks/ext/lua/lopcodes.h +0 -268
  425. data/lua-hooks/ext/lua/loslib.c +0 -243
  426. data/lua-hooks/ext/lua/lparser.c +0 -1339
  427. data/lua-hooks/ext/lua/lparser.h +0 -82
  428. data/lua-hooks/ext/lua/lstate.c +0 -214
  429. data/lua-hooks/ext/lua/lstate.h +0 -169
  430. data/lua-hooks/ext/lua/lstring.c +0 -111
  431. data/lua-hooks/ext/lua/lstring.h +0 -31
  432. data/lua-hooks/ext/lua/lstrlib.c +0 -871
  433. data/lua-hooks/ext/lua/ltable.c +0 -588
  434. data/lua-hooks/ext/lua/ltable.h +0 -40
  435. data/lua-hooks/ext/lua/ltablib.c +0 -287
  436. data/lua-hooks/ext/lua/ltm.c +0 -75
  437. data/lua-hooks/ext/lua/ltm.h +0 -54
  438. data/lua-hooks/ext/lua/lua.c +0 -392
  439. data/lua-hooks/ext/lua/lua.def +0 -131
  440. data/lua-hooks/ext/lua/lua.rc +0 -28
  441. data/lua-hooks/ext/lua/lua_dll.rc +0 -26
  442. data/lua-hooks/ext/lua/luac.c +0 -200
  443. data/lua-hooks/ext/lua/luac.rc +0 -1
  444. data/lua-hooks/ext/lua/luaconf.h +0 -763
  445. data/lua-hooks/ext/lua/luaconf.h.in +0 -724
  446. data/lua-hooks/ext/lua/luaconf.h.orig +0 -763
  447. data/lua-hooks/ext/lua/lualib.h +0 -53
  448. data/lua-hooks/ext/lua/lundump.c +0 -227
  449. data/lua-hooks/ext/lua/lundump.h +0 -36
  450. data/lua-hooks/ext/lua/lvm.c +0 -767
  451. data/lua-hooks/ext/lua/lvm.h +0 -36
  452. data/lua-hooks/ext/lua/lzio.c +0 -82
  453. data/lua-hooks/ext/lua/lzio.h +0 -67
  454. data/lua-hooks/ext/lua/print.c +0 -227
@@ -0,0 +1,307 @@
1
+ /*
2
+ ** Table library.
3
+ ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
4
+ **
5
+ ** Major portions taken verbatim or adapted from the Lua interpreter.
6
+ ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
7
+ */
8
+
9
+ #define lib_table_c
10
+ #define LUA_LIB
11
+
12
+ #include "lua.h"
13
+ #include "lauxlib.h"
14
+ #include "lualib.h"
15
+
16
+ #include "lj_obj.h"
17
+ #include "lj_gc.h"
18
+ #include "lj_err.h"
19
+ #include "lj_buf.h"
20
+ #include "lj_tab.h"
21
+ #include "lj_ff.h"
22
+ #include "lj_lib.h"
23
+
24
+ /* ------------------------------------------------------------------------ */
25
+
26
+ #define LJLIB_MODULE_table
27
+
28
+ LJLIB_LUA(table_foreachi) /*
29
+ function(t, f)
30
+ CHECK_tab(t)
31
+ CHECK_func(f)
32
+ for i=1,#t do
33
+ local r = f(i, t[i])
34
+ if r ~= nil then return r end
35
+ end
36
+ end
37
+ */
38
+
39
+ LJLIB_LUA(table_foreach) /*
40
+ function(t, f)
41
+ CHECK_tab(t)
42
+ CHECK_func(f)
43
+ for k, v in PAIRS(t) do
44
+ local r = f(k, v)
45
+ if r ~= nil then return r end
46
+ end
47
+ end
48
+ */
49
+
50
+ LJLIB_LUA(table_getn) /*
51
+ function(t)
52
+ CHECK_tab(t)
53
+ return #t
54
+ end
55
+ */
56
+
57
+ LJLIB_CF(table_maxn)
58
+ {
59
+ GCtab *t = lj_lib_checktab(L, 1);
60
+ TValue *array = tvref(t->array);
61
+ Node *node;
62
+ lua_Number m = 0;
63
+ ptrdiff_t i;
64
+ for (i = (ptrdiff_t)t->asize - 1; i >= 0; i--)
65
+ if (!tvisnil(&array[i])) {
66
+ m = (lua_Number)(int32_t)i;
67
+ break;
68
+ }
69
+ node = noderef(t->node);
70
+ for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
71
+ if (!tvisnil(&node[i].val) && tvisnumber(&node[i].key)) {
72
+ lua_Number n = numberVnum(&node[i].key);
73
+ if (n > m) m = n;
74
+ }
75
+ setnumV(L->top-1, m);
76
+ return 1;
77
+ }
78
+
79
+ LJLIB_CF(table_insert) LJLIB_REC(.)
80
+ {
81
+ GCtab *t = lj_lib_checktab(L, 1);
82
+ int32_t n, i = (int32_t)lj_tab_len(t) + 1;
83
+ int nargs = (int)((char *)L->top - (char *)L->base);
84
+ if (nargs != 2*sizeof(TValue)) {
85
+ if (nargs != 3*sizeof(TValue))
86
+ lj_err_caller(L, LJ_ERR_TABINS);
87
+ /* NOBARRIER: This just moves existing elements around. */
88
+ for (n = lj_lib_checkint(L, 2); i > n; i--) {
89
+ /* The set may invalidate the get pointer, so need to do it first! */
90
+ TValue *dst = lj_tab_setint(L, t, i);
91
+ cTValue *src = lj_tab_getint(t, i-1);
92
+ if (src) {
93
+ copyTV(L, dst, src);
94
+ } else {
95
+ setnilV(dst);
96
+ }
97
+ }
98
+ i = n;
99
+ }
100
+ {
101
+ TValue *dst = lj_tab_setint(L, t, i);
102
+ copyTV(L, dst, L->top-1); /* Set new value. */
103
+ lj_gc_barriert(L, t, dst);
104
+ }
105
+ return 0;
106
+ }
107
+
108
+ LJLIB_LUA(table_remove) /*
109
+ function(t, pos)
110
+ CHECK_tab(t)
111
+ local len = #t
112
+ if pos == nil then
113
+ if len ~= 0 then
114
+ local old = t[len]
115
+ t[len] = nil
116
+ return old
117
+ end
118
+ else
119
+ CHECK_int(pos)
120
+ if pos >= 1 and pos <= len then
121
+ local old = t[pos]
122
+ for i=pos+1,len do
123
+ t[i-1] = t[i]
124
+ end
125
+ t[len] = nil
126
+ return old
127
+ end
128
+ end
129
+ end
130
+ */
131
+
132
+ LJLIB_CF(table_concat) LJLIB_REC(.)
133
+ {
134
+ GCtab *t = lj_lib_checktab(L, 1);
135
+ GCstr *sep = lj_lib_optstr(L, 2);
136
+ int32_t i = lj_lib_optint(L, 3, 1);
137
+ int32_t e = (L->base+3 < L->top && !tvisnil(L->base+3)) ?
138
+ lj_lib_checkint(L, 4) : (int32_t)lj_tab_len(t);
139
+ SBuf *sb = lj_buf_tmp_(L);
140
+ SBuf *sbx = lj_buf_puttab(sb, t, sep, i, e);
141
+ if (LJ_UNLIKELY(!sbx)) { /* Error: bad element type. */
142
+ int32_t idx = (int32_t)(intptr_t)sbufP(sb);
143
+ cTValue *o = lj_tab_getint(t, idx);
144
+ lj_err_callerv(L, LJ_ERR_TABCAT,
145
+ lj_obj_itypename[o ? itypemap(o) : ~LJ_TNIL], idx);
146
+ }
147
+ setstrV(L, L->top-1, lj_buf_str(L, sbx));
148
+ lj_gc_check(L);
149
+ return 1;
150
+ }
151
+
152
+ /* ------------------------------------------------------------------------ */
153
+
154
+ static void set2(lua_State *L, int i, int j)
155
+ {
156
+ lua_rawseti(L, 1, i);
157
+ lua_rawseti(L, 1, j);
158
+ }
159
+
160
+ static int sort_comp(lua_State *L, int a, int b)
161
+ {
162
+ if (!lua_isnil(L, 2)) { /* function? */
163
+ int res;
164
+ lua_pushvalue(L, 2);
165
+ lua_pushvalue(L, a-1); /* -1 to compensate function */
166
+ lua_pushvalue(L, b-2); /* -2 to compensate function and `a' */
167
+ lua_call(L, 2, 1);
168
+ res = lua_toboolean(L, -1);
169
+ lua_pop(L, 1);
170
+ return res;
171
+ } else { /* a < b? */
172
+ return lua_lessthan(L, a, b);
173
+ }
174
+ }
175
+
176
+ static void auxsort(lua_State *L, int l, int u)
177
+ {
178
+ while (l < u) { /* for tail recursion */
179
+ int i, j;
180
+ /* sort elements a[l], a[(l+u)/2] and a[u] */
181
+ lua_rawgeti(L, 1, l);
182
+ lua_rawgeti(L, 1, u);
183
+ if (sort_comp(L, -1, -2)) /* a[u] < a[l]? */
184
+ set2(L, l, u); /* swap a[l] - a[u] */
185
+ else
186
+ lua_pop(L, 2);
187
+ if (u-l == 1) break; /* only 2 elements */
188
+ i = (l+u)/2;
189
+ lua_rawgeti(L, 1, i);
190
+ lua_rawgeti(L, 1, l);
191
+ if (sort_comp(L, -2, -1)) { /* a[i]<a[l]? */
192
+ set2(L, i, l);
193
+ } else {
194
+ lua_pop(L, 1); /* remove a[l] */
195
+ lua_rawgeti(L, 1, u);
196
+ if (sort_comp(L, -1, -2)) /* a[u]<a[i]? */
197
+ set2(L, i, u);
198
+ else
199
+ lua_pop(L, 2);
200
+ }
201
+ if (u-l == 2) break; /* only 3 elements */
202
+ lua_rawgeti(L, 1, i); /* Pivot */
203
+ lua_pushvalue(L, -1);
204
+ lua_rawgeti(L, 1, u-1);
205
+ set2(L, i, u-1);
206
+ /* a[l] <= P == a[u-1] <= a[u], only need to sort from l+1 to u-2 */
207
+ i = l; j = u-1;
208
+ for (;;) { /* invariant: a[l..i] <= P <= a[j..u] */
209
+ /* repeat ++i until a[i] >= P */
210
+ while (lua_rawgeti(L, 1, ++i), sort_comp(L, -1, -2)) {
211
+ if (i>=u) lj_err_caller(L, LJ_ERR_TABSORT);
212
+ lua_pop(L, 1); /* remove a[i] */
213
+ }
214
+ /* repeat --j until a[j] <= P */
215
+ while (lua_rawgeti(L, 1, --j), sort_comp(L, -3, -1)) {
216
+ if (j<=l) lj_err_caller(L, LJ_ERR_TABSORT);
217
+ lua_pop(L, 1); /* remove a[j] */
218
+ }
219
+ if (j<i) {
220
+ lua_pop(L, 3); /* pop pivot, a[i], a[j] */
221
+ break;
222
+ }
223
+ set2(L, i, j);
224
+ }
225
+ lua_rawgeti(L, 1, u-1);
226
+ lua_rawgeti(L, 1, i);
227
+ set2(L, u-1, i); /* swap pivot (a[u-1]) with a[i] */
228
+ /* a[l..i-1] <= a[i] == P <= a[i+1..u] */
229
+ /* adjust so that smaller half is in [j..i] and larger one in [l..u] */
230
+ if (i-l < u-i) {
231
+ j=l; i=i-1; l=i+2;
232
+ } else {
233
+ j=i+1; i=u; u=j-2;
234
+ }
235
+ auxsort(L, j, i); /* call recursively the smaller one */
236
+ } /* repeat the routine for the larger one */
237
+ }
238
+
239
+ LJLIB_CF(table_sort)
240
+ {
241
+ GCtab *t = lj_lib_checktab(L, 1);
242
+ int32_t n = (int32_t)lj_tab_len(t);
243
+ lua_settop(L, 2);
244
+ if (!tvisnil(L->base+1))
245
+ lj_lib_checkfunc(L, 2);
246
+ auxsort(L, 1, n);
247
+ return 0;
248
+ }
249
+
250
+ #if LJ_52
251
+ LJLIB_PUSH("n")
252
+ LJLIB_CF(table_pack)
253
+ {
254
+ TValue *array, *base = L->base;
255
+ MSize i, n = (uint32_t)(L->top - base);
256
+ GCtab *t = lj_tab_new(L, n ? n+1 : 0, 1);
257
+ /* NOBARRIER: The table is new (marked white). */
258
+ setintV(lj_tab_setstr(L, t, strV(lj_lib_upvalue(L, 1))), (int32_t)n);
259
+ for (array = tvref(t->array) + 1, i = 0; i < n; i++)
260
+ copyTV(L, &array[i], &base[i]);
261
+ settabV(L, base, t);
262
+ L->top = base+1;
263
+ lj_gc_check(L);
264
+ return 1;
265
+ }
266
+ #endif
267
+
268
+ LJLIB_NOREG LJLIB_CF(table_new) LJLIB_REC(.)
269
+ {
270
+ int32_t a = lj_lib_checkint(L, 1);
271
+ int32_t h = lj_lib_checkint(L, 2);
272
+ lua_createtable(L, a, h);
273
+ return 1;
274
+ }
275
+
276
+ LJLIB_NOREG LJLIB_CF(table_clear) LJLIB_REC(.)
277
+ {
278
+ lj_tab_clear(lj_lib_checktab(L, 1));
279
+ return 0;
280
+ }
281
+
282
+ static int luaopen_table_new(lua_State *L)
283
+ {
284
+ return lj_lib_postreg(L, lj_cf_table_new, FF_table_new, "new");
285
+ }
286
+
287
+ static int luaopen_table_clear(lua_State *L)
288
+ {
289
+ return lj_lib_postreg(L, lj_cf_table_clear, FF_table_clear, "clear");
290
+ }
291
+
292
+ /* ------------------------------------------------------------------------ */
293
+
294
+ #include "lj_libdef.h"
295
+
296
+ LUALIB_API int luaopen_table(lua_State *L)
297
+ {
298
+ LJ_LIB_REG(L, LUA_TABLIBNAME, table);
299
+ #if LJ_52
300
+ lua_getglobal(L, "unpack");
301
+ lua_setfield(L, -2, "unpack");
302
+ #endif
303
+ lj_lib_prereg(L, LUA_TABLIBNAME ".new", luaopen_table_new, tabV(L->top-1));
304
+ lj_lib_prereg(L, LUA_TABLIBNAME ".clear", luaopen_table_clear, tabV(L->top-1));
305
+ return 1;
306
+ }
307
+
@@ -0,0 +1,26 @@
1
+ # Valgrind suppression file for LuaJIT 2.0.
2
+ {
3
+ Optimized string compare
4
+ Memcheck:Addr4
5
+ fun:lj_str_cmp
6
+ }
7
+ {
8
+ Optimized string compare
9
+ Memcheck:Addr1
10
+ fun:lj_str_cmp
11
+ }
12
+ {
13
+ Optimized string compare
14
+ Memcheck:Addr4
15
+ fun:lj_str_new
16
+ }
17
+ {
18
+ Optimized string compare
19
+ Memcheck:Addr1
20
+ fun:lj_str_new
21
+ }
22
+ {
23
+ Optimized string compare
24
+ Memcheck:Cond
25
+ fun:lj_str_new
26
+ }
@@ -0,0 +1,1398 @@
1
+ /*
2
+ ** Bundled memory allocator.
3
+ **
4
+ ** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5
+ ** The original bears the following remark:
6
+ **
7
+ ** This is a version (aka dlmalloc) of malloc/free/realloc written by
8
+ ** Doug Lea and released to the public domain, as explained at
9
+ ** http://creativecommons.org/licenses/publicdomain.
10
+ **
11
+ ** * Version pre-2.8.4 Wed Mar 29 19:46:29 2006 (dl at gee)
12
+ **
13
+ ** No additional copyright is claimed over the customizations.
14
+ ** Please do NOT bother the original author about this version here!
15
+ **
16
+ ** If you want to use dlmalloc in another project, you should get
17
+ ** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18
+ ** For thread-safe derivatives, take a look at:
19
+ ** - ptmalloc: http://www.malloc.de/
20
+ ** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
21
+ */
22
+
23
+ #define lj_alloc_c
24
+ #define LUA_CORE
25
+
26
+ /* To get the mremap prototype. Must be defined before any system includes. */
27
+ #if defined(__linux__) && !defined(_GNU_SOURCE)
28
+ #define _GNU_SOURCE
29
+ #endif
30
+
31
+ #include "lj_def.h"
32
+ #include "lj_arch.h"
33
+ #include "lj_alloc.h"
34
+
35
+ #ifndef LUAJIT_USE_SYSMALLOC
36
+
37
+ #define MAX_SIZE_T (~(size_t)0)
38
+ #define MALLOC_ALIGNMENT ((size_t)8U)
39
+
40
+ #define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U)
41
+ #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
42
+ #define DEFAULT_MMAP_THRESHOLD ((size_t)128U * (size_t)1024U)
43
+ #define MAX_RELEASE_CHECK_RATE 255
44
+
45
+ /* ------------------- size_t and alignment properties -------------------- */
46
+
47
+ /* The byte and bit size of a size_t */
48
+ #define SIZE_T_SIZE (sizeof(size_t))
49
+ #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
50
+
51
+ /* Some constants coerced to size_t */
52
+ /* Annoying but necessary to avoid errors on some platforms */
53
+ #define SIZE_T_ZERO ((size_t)0)
54
+ #define SIZE_T_ONE ((size_t)1)
55
+ #define SIZE_T_TWO ((size_t)2)
56
+ #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
57
+ #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
58
+ #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
59
+
60
+ /* The bit mask value corresponding to MALLOC_ALIGNMENT */
61
+ #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
62
+
63
+ /* the number of bytes to offset an address to align it */
64
+ #define align_offset(A)\
65
+ ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66
+ ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
67
+
68
+ /* -------------------------- MMAP support ------------------------------- */
69
+
70
+ #define MFAIL ((void *)(MAX_SIZE_T))
71
+ #define CMFAIL ((char *)(MFAIL)) /* defined for convenience */
72
+
73
+ #define IS_DIRECT_BIT (SIZE_T_ONE)
74
+
75
+ #if LJ_TARGET_WINDOWS
76
+
77
+ #define WIN32_LEAN_AND_MEAN
78
+ #include <windows.h>
79
+
80
+ #if LJ_64 && !LJ_GC64
81
+
82
+ /* Undocumented, but hey, that's what we all love so much about Windows. */
83
+ typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits,
84
+ size_t *size, ULONG alloctype, ULONG prot);
85
+ static PNTAVM ntavm;
86
+
87
+ /* Number of top bits of the lower 32 bits of an address that must be zero.
88
+ ** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
89
+ */
90
+ #define NTAVM_ZEROBITS 1
91
+
92
+ static void INIT_MMAP(void)
93
+ {
94
+ ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
95
+ "NtAllocateVirtualMemory");
96
+ }
97
+
98
+ /* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
99
+ static LJ_AINLINE void *CALL_MMAP(size_t size)
100
+ {
101
+ DWORD olderr = GetLastError();
102
+ void *ptr = NULL;
103
+ long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
104
+ MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
105
+ SetLastError(olderr);
106
+ return st == 0 ? ptr : MFAIL;
107
+ }
108
+
109
+ /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
110
+ static LJ_AINLINE void *DIRECT_MMAP(size_t size)
111
+ {
112
+ DWORD olderr = GetLastError();
113
+ void *ptr = NULL;
114
+ long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
115
+ MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
116
+ SetLastError(olderr);
117
+ return st == 0 ? ptr : MFAIL;
118
+ }
119
+
120
+ #else
121
+
122
+ #define INIT_MMAP() ((void)0)
123
+
124
+ /* Win32 MMAP via VirtualAlloc */
125
+ static LJ_AINLINE void *CALL_MMAP(size_t size)
126
+ {
127
+ DWORD olderr = GetLastError();
128
+ void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
129
+ SetLastError(olderr);
130
+ return ptr ? ptr : MFAIL;
131
+ }
132
+
133
+ /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
134
+ static LJ_AINLINE void *DIRECT_MMAP(size_t size)
135
+ {
136
+ DWORD olderr = GetLastError();
137
+ void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
138
+ PAGE_READWRITE);
139
+ SetLastError(olderr);
140
+ return ptr ? ptr : MFAIL;
141
+ }
142
+
143
+ #endif
144
+
145
+ /* This function supports releasing coalesed segments */
146
+ static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
147
+ {
148
+ DWORD olderr = GetLastError();
149
+ MEMORY_BASIC_INFORMATION minfo;
150
+ char *cptr = (char *)ptr;
151
+ while (size) {
152
+ if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
153
+ return -1;
154
+ if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
155
+ minfo.State != MEM_COMMIT || minfo.RegionSize > size)
156
+ return -1;
157
+ if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
158
+ return -1;
159
+ cptr += minfo.RegionSize;
160
+ size -= minfo.RegionSize;
161
+ }
162
+ SetLastError(olderr);
163
+ return 0;
164
+ }
165
+
166
+ #else
167
+
168
+ #include <errno.h>
169
+ #include <sys/mman.h>
170
+
171
+ #define MMAP_PROT (PROT_READ|PROT_WRITE)
172
+ #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
173
+ #define MAP_ANONYMOUS MAP_ANON
174
+ #endif
175
+ #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
176
+
177
+ #if LJ_64 && !LJ_GC64
178
+ /* 64 bit mode with 32 bit pointers needs special support for allocating
179
+ ** memory in the lower 2GB.
180
+ */
181
+
182
+ #if defined(MAP_32BIT)
183
+
184
+ #if defined(__sun__)
185
+ #define MMAP_REGION_START ((uintptr_t)0x1000)
186
+ #else
187
+ /* Actually this only gives us max. 1GB in current Linux kernels. */
188
+ #define MMAP_REGION_START ((uintptr_t)0)
189
+ #endif
190
+
191
+ static LJ_AINLINE void *CALL_MMAP(size_t size)
192
+ {
193
+ int olderr = errno;
194
+ void *ptr = mmap((void *)MMAP_REGION_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
195
+ errno = olderr;
196
+ return ptr;
197
+ }
198
+
199
+ #elif LJ_TARGET_OSX || LJ_TARGET_PS4 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__sun__)
200
+
201
+ /* OSX and FreeBSD mmap() use a naive first-fit linear search.
202
+ ** That's perfect for us. Except that -pagezero_size must be set for OSX,
203
+ ** otherwise the lower 4GB are blocked. And the 32GB RLIMIT_DATA needs
204
+ ** to be reduced to 250MB on FreeBSD.
205
+ */
206
+ #if LJ_TARGET_OSX || defined(__DragonFly__)
207
+ #define MMAP_REGION_START ((uintptr_t)0x10000)
208
+ #elif LJ_TARGET_PS4
209
+ #define MMAP_REGION_START ((uintptr_t)0x4000)
210
+ #else
211
+ #define MMAP_REGION_START ((uintptr_t)0x10000000)
212
+ #endif
213
+ #define MMAP_REGION_END ((uintptr_t)0x80000000)
214
+
215
+ #if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
216
+ #include <sys/resource.h>
217
+ #endif
218
+
219
+ static LJ_AINLINE void *CALL_MMAP(size_t size)
220
+ {
221
+ int olderr = errno;
222
+ /* Hint for next allocation. Doesn't need to be thread-safe. */
223
+ static uintptr_t alloc_hint = MMAP_REGION_START;
224
+ int retry = 0;
225
+ #if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
226
+ static int rlimit_modified = 0;
227
+ if (LJ_UNLIKELY(rlimit_modified == 0)) {
228
+ struct rlimit rlim;
229
+ rlim.rlim_cur = rlim.rlim_max = MMAP_REGION_START;
230
+ setrlimit(RLIMIT_DATA, &rlim); /* Ignore result. May fail below. */
231
+ rlimit_modified = 1;
232
+ }
233
+ #endif
234
+ for (;;) {
235
+ void *p = mmap((void *)alloc_hint, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
236
+ if ((uintptr_t)p >= MMAP_REGION_START &&
237
+ (uintptr_t)p + size < MMAP_REGION_END) {
238
+ alloc_hint = (uintptr_t)p + size;
239
+ errno = olderr;
240
+ return p;
241
+ }
242
+ if (p != CMFAIL) munmap(p, size);
243
+ #if defined(__sun__) || defined(__DragonFly__)
244
+ alloc_hint += 0x1000000; /* Need near-exhaustive linear scan. */
245
+ if (alloc_hint + size < MMAP_REGION_END) continue;
246
+ #endif
247
+ if (retry) break;
248
+ retry = 1;
249
+ alloc_hint = MMAP_REGION_START;
250
+ }
251
+ errno = olderr;
252
+ return CMFAIL;
253
+ }
254
+
255
+ #else
256
+
257
+ #error "NYI: need an equivalent of MAP_32BIT for this 64 bit OS"
258
+
259
+ #endif
260
+
261
+ #else
262
+
263
+ /* 32 bit mode and GC64 mode is easy. */
264
+ static LJ_AINLINE void *CALL_MMAP(size_t size)
265
+ {
266
+ int olderr = errno;
267
+ void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
268
+ errno = olderr;
269
+ return ptr;
270
+ }
271
+
272
+ #endif
273
+
274
+ #define INIT_MMAP() ((void)0)
275
+ #define DIRECT_MMAP(s) CALL_MMAP(s)
276
+
277
+ static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
278
+ {
279
+ int olderr = errno;
280
+ int ret = munmap(ptr, size);
281
+ errno = olderr;
282
+ return ret;
283
+ }
284
+
285
+ #if LJ_TARGET_LINUX
286
+ /* Need to define _GNU_SOURCE to get the mremap prototype. */
287
+ static LJ_AINLINE void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz,
288
+ int flags)
289
+ {
290
+ int olderr = errno;
291
+ ptr = mremap(ptr, osz, nsz, flags);
292
+ errno = olderr;
293
+ return ptr;
294
+ }
295
+
296
+ #define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
297
+ #define CALL_MREMAP_NOMOVE 0
298
+ #define CALL_MREMAP_MAYMOVE 1
299
+ #if LJ_64 && !LJ_GC64
300
+ #define CALL_MREMAP_MV CALL_MREMAP_NOMOVE
301
+ #else
302
+ #define CALL_MREMAP_MV CALL_MREMAP_MAYMOVE
303
+ #endif
304
+ #endif
305
+
306
+ #endif
307
+
308
+ #ifndef CALL_MREMAP
309
+ #define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
310
+ #endif
311
+
312
+ /* ----------------------- Chunk representations ------------------------ */
313
+
314
+ struct malloc_chunk {
315
+ size_t prev_foot; /* Size of previous chunk (if free). */
316
+ size_t head; /* Size and inuse bits. */
317
+ struct malloc_chunk *fd; /* double links -- used only if free. */
318
+ struct malloc_chunk *bk;
319
+ };
320
+
321
+ typedef struct malloc_chunk mchunk;
322
+ typedef struct malloc_chunk *mchunkptr;
323
+ typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
324
+ typedef size_t bindex_t; /* Described below */
325
+ typedef unsigned int binmap_t; /* Described below */
326
+ typedef unsigned int flag_t; /* The type of various bit flag sets */
327
+
328
+ /* ------------------- Chunks sizes and alignments ----------------------- */
329
+
330
+ #define MCHUNK_SIZE (sizeof(mchunk))
331
+
332
+ #define CHUNK_OVERHEAD (SIZE_T_SIZE)
333
+
334
+ /* Direct chunks need a second word of overhead ... */
335
+ #define DIRECT_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
336
+ /* ... and additional padding for fake next-chunk at foot */
337
+ #define DIRECT_FOOT_PAD (FOUR_SIZE_T_SIZES)
338
+
339
+ /* The smallest size we can malloc is an aligned minimal chunk */
340
+ #define MIN_CHUNK_SIZE\
341
+ ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
342
+
343
+ /* conversion from malloc headers to user pointers, and back */
344
+ #define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
345
+ #define mem2chunk(mem) ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
346
+ /* chunk associated with aligned address A */
347
+ #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
348
+
349
+ /* Bounds on request (not chunk) sizes. */
350
+ #define MAX_REQUEST ((~MIN_CHUNK_SIZE+1) << 2)
351
+ #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
352
+
353
+ /* pad request bytes into a usable size */
354
+ #define pad_request(req) \
355
+ (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
356
+
357
+ /* pad request, checking for minimum (but not maximum) */
358
+ #define request2size(req) \
359
+ (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
360
+
361
+ /* ------------------ Operations on head and foot fields ----------------- */
362
+
363
+ #define PINUSE_BIT (SIZE_T_ONE)
364
+ #define CINUSE_BIT (SIZE_T_TWO)
365
+ #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
366
+
367
+ /* Head value for fenceposts */
368
+ #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
369
+
370
+ /* extraction of fields from head words */
371
+ #define cinuse(p) ((p)->head & CINUSE_BIT)
372
+ #define pinuse(p) ((p)->head & PINUSE_BIT)
373
+ #define chunksize(p) ((p)->head & ~(INUSE_BITS))
374
+
375
+ #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
376
+ #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
377
+
378
+ /* Treat space at ptr +/- offset as a chunk */
379
+ #define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s)))
380
+ #define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s)))
381
+
382
+ /* Ptr to next or previous physical malloc_chunk. */
383
+ #define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
384
+ #define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
385
+
386
+ /* extract next chunk's pinuse bit */
387
+ #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
388
+
389
+ /* Get/set size at footer */
390
+ #define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot)
391
+ #define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
392
+
393
+ /* Set size, pinuse bit, and foot */
394
+ #define set_size_and_pinuse_of_free_chunk(p, s)\
395
+ ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
396
+
397
+ /* Set size, pinuse bit, foot, and clear next pinuse */
398
+ #define set_free_with_pinuse(p, s, n)\
399
+ (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
400
+
401
+ #define is_direct(p)\
402
+ (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
403
+
404
+ /* Get the internal overhead associated with chunk p */
405
+ #define overhead_for(p)\
406
+ (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
407
+
408
+ /* ---------------------- Overlaid data structures ----------------------- */
409
+
410
+ struct malloc_tree_chunk {
411
+ /* The first four fields must be compatible with malloc_chunk */
412
+ size_t prev_foot;
413
+ size_t head;
414
+ struct malloc_tree_chunk *fd;
415
+ struct malloc_tree_chunk *bk;
416
+
417
+ struct malloc_tree_chunk *child[2];
418
+ struct malloc_tree_chunk *parent;
419
+ bindex_t index;
420
+ };
421
+
422
+ typedef struct malloc_tree_chunk tchunk;
423
+ typedef struct malloc_tree_chunk *tchunkptr;
424
+ typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
425
+
426
+ /* A little helper macro for trees */
427
+ #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
428
+
429
+ /* ----------------------------- Segments -------------------------------- */
430
+
431
+ struct malloc_segment {
432
+ char *base; /* base address */
433
+ size_t size; /* allocated size */
434
+ struct malloc_segment *next; /* ptr to next segment */
435
+ };
436
+
437
+ typedef struct malloc_segment msegment;
438
+ typedef struct malloc_segment *msegmentptr;
439
+
440
+ /* ---------------------------- malloc_state ----------------------------- */
441
+
442
+ /* Bin types, widths and sizes */
443
+ #define NSMALLBINS (32U)
444
+ #define NTREEBINS (32U)
445
+ #define SMALLBIN_SHIFT (3U)
446
+ #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
447
+ #define TREEBIN_SHIFT (8U)
448
+ #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
449
+ #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
450
+ #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
451
+
452
+ struct malloc_state {
453
+ binmap_t smallmap;
454
+ binmap_t treemap;
455
+ size_t dvsize;
456
+ size_t topsize;
457
+ mchunkptr dv;
458
+ mchunkptr top;
459
+ size_t trim_check;
460
+ size_t release_checks;
461
+ mchunkptr smallbins[(NSMALLBINS+1)*2];
462
+ tbinptr treebins[NTREEBINS];
463
+ msegment seg;
464
+ };
465
+
466
+ typedef struct malloc_state *mstate;
467
+
468
+ #define is_initialized(M) ((M)->top != 0)
469
+
470
+ /* -------------------------- system alloc setup ------------------------- */
471
+
472
+ /* page-align a size */
473
+ #define page_align(S)\
474
+ (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
475
+
476
+ /* granularity-align a size */
477
+ #define granularity_align(S)\
478
+ (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
479
+ & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
480
+
481
+ #if LJ_TARGET_WINDOWS
482
+ #define mmap_align(S) granularity_align(S)
483
+ #else
484
+ #define mmap_align(S) page_align(S)
485
+ #endif
486
+
487
+ /* True if segment S holds address A */
488
+ #define segment_holds(S, A)\
489
+ ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
490
+
491
+ /* Return segment holding given address */
492
+ static msegmentptr segment_holding(mstate m, char *addr)
493
+ {
494
+ msegmentptr sp = &m->seg;
495
+ for (;;) {
496
+ if (addr >= sp->base && addr < sp->base + sp->size)
497
+ return sp;
498
+ if ((sp = sp->next) == 0)
499
+ return 0;
500
+ }
501
+ }
502
+
503
+ /* Return true if segment contains a segment link */
504
+ static int has_segment_link(mstate m, msegmentptr ss)
505
+ {
506
+ msegmentptr sp = &m->seg;
507
+ for (;;) {
508
+ if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
509
+ return 1;
510
+ if ((sp = sp->next) == 0)
511
+ return 0;
512
+ }
513
+ }
514
+
515
+ /*
516
+ TOP_FOOT_SIZE is padding at the end of a segment, including space
517
+ that may be needed to place segment records and fenceposts when new
518
+ noncontiguous segments are added.
519
+ */
520
+ #define TOP_FOOT_SIZE\
521
+ (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
522
+
523
+ /* ---------------------------- Indexing Bins ---------------------------- */
524
+
525
+ #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
526
+ #define small_index(s) ((s) >> SMALLBIN_SHIFT)
527
+ #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
528
+ #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
529
+
530
+ /* addressing by index. See above about smallbin repositioning */
531
+ #define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
532
+ #define treebin_at(M,i) (&((M)->treebins[i]))
533
+
534
+ /* assign tree index for size S to variable I */
535
+ #define compute_tree_index(S, I)\
536
+ {\
537
+ unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
538
+ if (X == 0) {\
539
+ I = 0;\
540
+ } else if (X > 0xFFFF) {\
541
+ I = NTREEBINS-1;\
542
+ } else {\
543
+ unsigned int K = lj_fls(X);\
544
+ I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
545
+ }\
546
+ }
547
+
548
+ /* Bit representing maximum resolved size in a treebin at i */
549
+ #define bit_for_tree_index(i) \
550
+ (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
551
+
552
+ /* Shift placing maximum resolved bit in a treebin at i as sign bit */
553
+ #define leftshift_for_tree_index(i) \
554
+ ((i == NTREEBINS-1)? 0 : \
555
+ ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
556
+
557
+ /* The size of the smallest chunk held in bin with index i */
558
+ #define minsize_for_tree_index(i) \
559
+ ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
560
+ (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
561
+
562
+ /* ------------------------ Operations on bin maps ----------------------- */
563
+
564
+ /* bit corresponding to given index */
565
+ #define idx2bit(i) ((binmap_t)(1) << (i))
566
+
567
+ /* Mark/Clear bits with given index */
568
+ #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
569
+ #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
570
+ #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
571
+
572
+ #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
573
+ #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
574
+ #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
575
+
576
+ /* mask with all bits to left of least bit of x on */
577
+ #define left_bits(x) ((x<<1) | (~(x<<1)+1))
578
+
579
+ /* Set cinuse bit and pinuse bit of next chunk */
580
+ #define set_inuse(M,p,s)\
581
+ ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
582
+ ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
583
+
584
+ /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
585
+ #define set_inuse_and_pinuse(M,p,s)\
586
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
587
+ ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
588
+
589
+ /* Set size, cinuse and pinuse bit of this chunk */
590
+ #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
591
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
592
+
593
+ /* ----------------------- Operations on smallbins ----------------------- */
594
+
595
+ /* Link a free chunk into a smallbin */
596
+ #define insert_small_chunk(M, P, S) {\
597
+ bindex_t I = small_index(S);\
598
+ mchunkptr B = smallbin_at(M, I);\
599
+ mchunkptr F = B;\
600
+ if (!smallmap_is_marked(M, I))\
601
+ mark_smallmap(M, I);\
602
+ else\
603
+ F = B->fd;\
604
+ B->fd = P;\
605
+ F->bk = P;\
606
+ P->fd = F;\
607
+ P->bk = B;\
608
+ }
609
+
610
+ /* Unlink a chunk from a smallbin */
611
+ #define unlink_small_chunk(M, P, S) {\
612
+ mchunkptr F = P->fd;\
613
+ mchunkptr B = P->bk;\
614
+ bindex_t I = small_index(S);\
615
+ if (F == B) {\
616
+ clear_smallmap(M, I);\
617
+ } else {\
618
+ F->bk = B;\
619
+ B->fd = F;\
620
+ }\
621
+ }
622
+
623
+ /* Unlink the first chunk from a smallbin */
624
+ #define unlink_first_small_chunk(M, B, P, I) {\
625
+ mchunkptr F = P->fd;\
626
+ if (B == F) {\
627
+ clear_smallmap(M, I);\
628
+ } else {\
629
+ B->fd = F;\
630
+ F->bk = B;\
631
+ }\
632
+ }
633
+
634
+ /* Replace dv node, binning the old one */
635
+ /* Used only when dvsize known to be small */
636
+ #define replace_dv(M, P, S) {\
637
+ size_t DVS = M->dvsize;\
638
+ if (DVS != 0) {\
639
+ mchunkptr DV = M->dv;\
640
+ insert_small_chunk(M, DV, DVS);\
641
+ }\
642
+ M->dvsize = S;\
643
+ M->dv = P;\
644
+ }
645
+
646
+ /* ------------------------- Operations on trees ------------------------- */
647
+
648
+ /* Insert chunk into tree */
649
+ #define insert_large_chunk(M, X, S) {\
650
+ tbinptr *H;\
651
+ bindex_t I;\
652
+ compute_tree_index(S, I);\
653
+ H = treebin_at(M, I);\
654
+ X->index = I;\
655
+ X->child[0] = X->child[1] = 0;\
656
+ if (!treemap_is_marked(M, I)) {\
657
+ mark_treemap(M, I);\
658
+ *H = X;\
659
+ X->parent = (tchunkptr)H;\
660
+ X->fd = X->bk = X;\
661
+ } else {\
662
+ tchunkptr T = *H;\
663
+ size_t K = S << leftshift_for_tree_index(I);\
664
+ for (;;) {\
665
+ if (chunksize(T) != S) {\
666
+ tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
667
+ K <<= 1;\
668
+ if (*C != 0) {\
669
+ T = *C;\
670
+ } else {\
671
+ *C = X;\
672
+ X->parent = T;\
673
+ X->fd = X->bk = X;\
674
+ break;\
675
+ }\
676
+ } else {\
677
+ tchunkptr F = T->fd;\
678
+ T->fd = F->bk = X;\
679
+ X->fd = F;\
680
+ X->bk = T;\
681
+ X->parent = 0;\
682
+ break;\
683
+ }\
684
+ }\
685
+ }\
686
+ }
687
+
688
+ #define unlink_large_chunk(M, X) {\
689
+ tchunkptr XP = X->parent;\
690
+ tchunkptr R;\
691
+ if (X->bk != X) {\
692
+ tchunkptr F = X->fd;\
693
+ R = X->bk;\
694
+ F->bk = R;\
695
+ R->fd = F;\
696
+ } else {\
697
+ tchunkptr *RP;\
698
+ if (((R = *(RP = &(X->child[1]))) != 0) ||\
699
+ ((R = *(RP = &(X->child[0]))) != 0)) {\
700
+ tchunkptr *CP;\
701
+ while ((*(CP = &(R->child[1])) != 0) ||\
702
+ (*(CP = &(R->child[0])) != 0)) {\
703
+ R = *(RP = CP);\
704
+ }\
705
+ *RP = 0;\
706
+ }\
707
+ }\
708
+ if (XP != 0) {\
709
+ tbinptr *H = treebin_at(M, X->index);\
710
+ if (X == *H) {\
711
+ if ((*H = R) == 0) \
712
+ clear_treemap(M, X->index);\
713
+ } else {\
714
+ if (XP->child[0] == X) \
715
+ XP->child[0] = R;\
716
+ else \
717
+ XP->child[1] = R;\
718
+ }\
719
+ if (R != 0) {\
720
+ tchunkptr C0, C1;\
721
+ R->parent = XP;\
722
+ if ((C0 = X->child[0]) != 0) {\
723
+ R->child[0] = C0;\
724
+ C0->parent = R;\
725
+ }\
726
+ if ((C1 = X->child[1]) != 0) {\
727
+ R->child[1] = C1;\
728
+ C1->parent = R;\
729
+ }\
730
+ }\
731
+ }\
732
+ }
733
+
734
+ /* Relays to large vs small bin operations */
735
+
736
+ #define insert_chunk(M, P, S)\
737
+ if (is_small(S)) { insert_small_chunk(M, P, S)\
738
+ } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
739
+
740
+ #define unlink_chunk(M, P, S)\
741
+ if (is_small(S)) { unlink_small_chunk(M, P, S)\
742
+ } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
743
+
744
+ /* ----------------------- Direct-mmapping chunks ----------------------- */
745
+
746
+ static void *direct_alloc(size_t nb)
747
+ {
748
+ size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
749
+ if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */
750
+ char *mm = (char *)(DIRECT_MMAP(mmsize));
751
+ if (mm != CMFAIL) {
752
+ size_t offset = align_offset(chunk2mem(mm));
753
+ size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
754
+ mchunkptr p = (mchunkptr)(mm + offset);
755
+ p->prev_foot = offset | IS_DIRECT_BIT;
756
+ p->head = psize|CINUSE_BIT;
757
+ chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
758
+ chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
759
+ return chunk2mem(p);
760
+ }
761
+ }
762
+ return NULL;
763
+ }
764
+
765
+ static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
766
+ {
767
+ size_t oldsize = chunksize(oldp);
768
+ if (is_small(nb)) /* Can't shrink direct regions below small size */
769
+ return NULL;
770
+ /* Keep old chunk if big enough but not too big */
771
+ if (oldsize >= nb + SIZE_T_SIZE &&
772
+ (oldsize - nb) <= (DEFAULT_GRANULARITY >> 1)) {
773
+ return oldp;
774
+ } else {
775
+ size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT;
776
+ size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
777
+ size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
778
+ char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
779
+ oldmmsize, newmmsize, CALL_MREMAP_MV);
780
+ if (cp != CMFAIL) {
781
+ mchunkptr newp = (mchunkptr)(cp + offset);
782
+ size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
783
+ newp->head = psize|CINUSE_BIT;
784
+ chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
785
+ chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
786
+ return newp;
787
+ }
788
+ }
789
+ return NULL;
790
+ }
791
+
792
+ /* -------------------------- mspace management -------------------------- */
793
+
794
+ /* Initialize top chunk and its size */
795
+ static void init_top(mstate m, mchunkptr p, size_t psize)
796
+ {
797
+ /* Ensure alignment */
798
+ size_t offset = align_offset(chunk2mem(p));
799
+ p = (mchunkptr)((char *)p + offset);
800
+ psize -= offset;
801
+
802
+ m->top = p;
803
+ m->topsize = psize;
804
+ p->head = psize | PINUSE_BIT;
805
+ /* set size of fake trailing chunk holding overhead space only once */
806
+ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
807
+ m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
808
+ }
809
+
810
+ /* Initialize bins for a new mstate that is otherwise zeroed out */
811
+ static void init_bins(mstate m)
812
+ {
813
+ /* Establish circular links for smallbins */
814
+ bindex_t i;
815
+ for (i = 0; i < NSMALLBINS; i++) {
816
+ sbinptr bin = smallbin_at(m,i);
817
+ bin->fd = bin->bk = bin;
818
+ }
819
+ }
820
+
821
+ /* Allocate chunk and prepend remainder with chunk in successor base. */
822
+ static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
823
+ {
824
+ mchunkptr p = align_as_chunk(newbase);
825
+ mchunkptr oldfirst = align_as_chunk(oldbase);
826
+ size_t psize = (size_t)((char *)oldfirst - (char *)p);
827
+ mchunkptr q = chunk_plus_offset(p, nb);
828
+ size_t qsize = psize - nb;
829
+ set_size_and_pinuse_of_inuse_chunk(m, p, nb);
830
+
831
+ /* consolidate remainder with first chunk of old base */
832
+ if (oldfirst == m->top) {
833
+ size_t tsize = m->topsize += qsize;
834
+ m->top = q;
835
+ q->head = tsize | PINUSE_BIT;
836
+ } else if (oldfirst == m->dv) {
837
+ size_t dsize = m->dvsize += qsize;
838
+ m->dv = q;
839
+ set_size_and_pinuse_of_free_chunk(q, dsize);
840
+ } else {
841
+ if (!cinuse(oldfirst)) {
842
+ size_t nsize = chunksize(oldfirst);
843
+ unlink_chunk(m, oldfirst, nsize);
844
+ oldfirst = chunk_plus_offset(oldfirst, nsize);
845
+ qsize += nsize;
846
+ }
847
+ set_free_with_pinuse(q, qsize, oldfirst);
848
+ insert_chunk(m, q, qsize);
849
+ }
850
+
851
+ return chunk2mem(p);
852
+ }
853
+
854
+ /* Add a segment to hold a new noncontiguous region */
855
+ static void add_segment(mstate m, char *tbase, size_t tsize)
856
+ {
857
+ /* Determine locations and sizes of segment, fenceposts, old top */
858
+ char *old_top = (char *)m->top;
859
+ msegmentptr oldsp = segment_holding(m, old_top);
860
+ char *old_end = oldsp->base + oldsp->size;
861
+ size_t ssize = pad_request(sizeof(struct malloc_segment));
862
+ char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
863
+ size_t offset = align_offset(chunk2mem(rawsp));
864
+ char *asp = rawsp + offset;
865
+ char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
866
+ mchunkptr sp = (mchunkptr)csp;
867
+ msegmentptr ss = (msegmentptr)(chunk2mem(sp));
868
+ mchunkptr tnext = chunk_plus_offset(sp, ssize);
869
+ mchunkptr p = tnext;
870
+
871
+ /* reset top to new space */
872
+ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
873
+
874
+ /* Set up segment record */
875
+ set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
876
+ *ss = m->seg; /* Push current record */
877
+ m->seg.base = tbase;
878
+ m->seg.size = tsize;
879
+ m->seg.next = ss;
880
+
881
+ /* Insert trailing fenceposts */
882
+ for (;;) {
883
+ mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
884
+ p->head = FENCEPOST_HEAD;
885
+ if ((char *)(&(nextp->head)) < old_end)
886
+ p = nextp;
887
+ else
888
+ break;
889
+ }
890
+
891
+ /* Insert the rest of old top into a bin as an ordinary free chunk */
892
+ if (csp != old_top) {
893
+ mchunkptr q = (mchunkptr)old_top;
894
+ size_t psize = (size_t)(csp - old_top);
895
+ mchunkptr tn = chunk_plus_offset(q, psize);
896
+ set_free_with_pinuse(q, psize, tn);
897
+ insert_chunk(m, q, psize);
898
+ }
899
+ }
900
+
901
+ /* -------------------------- System allocation -------------------------- */
902
+
903
+ static void *alloc_sys(mstate m, size_t nb)
904
+ {
905
+ char *tbase = CMFAIL;
906
+ size_t tsize = 0;
907
+
908
+ /* Directly map large chunks */
909
+ if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
910
+ void *mem = direct_alloc(nb);
911
+ if (mem != 0)
912
+ return mem;
913
+ }
914
+
915
+ {
916
+ size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
917
+ size_t rsize = granularity_align(req);
918
+ if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
919
+ char *mp = (char *)(CALL_MMAP(rsize));
920
+ if (mp != CMFAIL) {
921
+ tbase = mp;
922
+ tsize = rsize;
923
+ }
924
+ }
925
+ }
926
+
927
+ if (tbase != CMFAIL) {
928
+ msegmentptr sp = &m->seg;
929
+ /* Try to merge with an existing segment */
930
+ while (sp != 0 && tbase != sp->base + sp->size)
931
+ sp = sp->next;
932
+ if (sp != 0 && segment_holds(sp, m->top)) { /* append */
933
+ sp->size += tsize;
934
+ init_top(m, m->top, m->topsize + tsize);
935
+ } else {
936
+ sp = &m->seg;
937
+ while (sp != 0 && sp->base != tbase + tsize)
938
+ sp = sp->next;
939
+ if (sp != 0) {
940
+ char *oldbase = sp->base;
941
+ sp->base = tbase;
942
+ sp->size += tsize;
943
+ return prepend_alloc(m, tbase, oldbase, nb);
944
+ } else {
945
+ add_segment(m, tbase, tsize);
946
+ }
947
+ }
948
+
949
+ if (nb < m->topsize) { /* Allocate from new or extended top space */
950
+ size_t rsize = m->topsize -= nb;
951
+ mchunkptr p = m->top;
952
+ mchunkptr r = m->top = chunk_plus_offset(p, nb);
953
+ r->head = rsize | PINUSE_BIT;
954
+ set_size_and_pinuse_of_inuse_chunk(m, p, nb);
955
+ return chunk2mem(p);
956
+ }
957
+ }
958
+
959
+ return NULL;
960
+ }
961
+
962
+ /* ----------------------- system deallocation -------------------------- */
963
+
964
+ /* Unmap and unlink any mmapped segments that don't contain used chunks */
965
+ static size_t release_unused_segments(mstate m)
966
+ {
967
+ size_t released = 0;
968
+ size_t nsegs = 0;
969
+ msegmentptr pred = &m->seg;
970
+ msegmentptr sp = pred->next;
971
+ while (sp != 0) {
972
+ char *base = sp->base;
973
+ size_t size = sp->size;
974
+ msegmentptr next = sp->next;
975
+ nsegs++;
976
+ {
977
+ mchunkptr p = align_as_chunk(base);
978
+ size_t psize = chunksize(p);
979
+ /* Can unmap if first chunk holds entire segment and not pinned */
980
+ if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
981
+ tchunkptr tp = (tchunkptr)p;
982
+ if (p == m->dv) {
983
+ m->dv = 0;
984
+ m->dvsize = 0;
985
+ } else {
986
+ unlink_large_chunk(m, tp);
987
+ }
988
+ if (CALL_MUNMAP(base, size) == 0) {
989
+ released += size;
990
+ /* unlink obsoleted record */
991
+ sp = pred;
992
+ sp->next = next;
993
+ } else { /* back out if cannot unmap */
994
+ insert_large_chunk(m, tp, psize);
995
+ }
996
+ }
997
+ }
998
+ pred = sp;
999
+ sp = next;
1000
+ }
1001
+ /* Reset check counter */
1002
+ m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
1003
+ nsegs : MAX_RELEASE_CHECK_RATE;
1004
+ return released;
1005
+ }
1006
+
1007
+ static int alloc_trim(mstate m, size_t pad)
1008
+ {
1009
+ size_t released = 0;
1010
+ if (pad < MAX_REQUEST && is_initialized(m)) {
1011
+ pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
1012
+
1013
+ if (m->topsize > pad) {
1014
+ /* Shrink top space in granularity-size units, keeping at least one */
1015
+ size_t unit = DEFAULT_GRANULARITY;
1016
+ size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
1017
+ SIZE_T_ONE) * unit;
1018
+ msegmentptr sp = segment_holding(m, (char *)m->top);
1019
+
1020
+ if (sp->size >= extra &&
1021
+ !has_segment_link(m, sp)) { /* can't shrink if pinned */
1022
+ size_t newsize = sp->size - extra;
1023
+ /* Prefer mremap, fall back to munmap */
1024
+ if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) ||
1025
+ (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
1026
+ released = extra;
1027
+ }
1028
+ }
1029
+
1030
+ if (released != 0) {
1031
+ sp->size -= released;
1032
+ init_top(m, m->top, m->topsize - released);
1033
+ }
1034
+ }
1035
+
1036
+ /* Unmap any unused mmapped segments */
1037
+ released += release_unused_segments(m);
1038
+
1039
+ /* On failure, disable autotrim to avoid repeated failed future calls */
1040
+ if (released == 0 && m->topsize > m->trim_check)
1041
+ m->trim_check = MAX_SIZE_T;
1042
+ }
1043
+
1044
+ return (released != 0)? 1 : 0;
1045
+ }
1046
+
1047
+ /* ---------------------------- malloc support --------------------------- */
1048
+
1049
+ /* allocate a large request from the best fitting chunk in a treebin */
1050
+ static void *tmalloc_large(mstate m, size_t nb)
1051
+ {
1052
+ tchunkptr v = 0;
1053
+ size_t rsize = ~nb+1; /* Unsigned negation */
1054
+ tchunkptr t;
1055
+ bindex_t idx;
1056
+ compute_tree_index(nb, idx);
1057
+
1058
+ if ((t = *treebin_at(m, idx)) != 0) {
1059
+ /* Traverse tree for this bin looking for node with size == nb */
1060
+ size_t sizebits = nb << leftshift_for_tree_index(idx);
1061
+ tchunkptr rst = 0; /* The deepest untaken right subtree */
1062
+ for (;;) {
1063
+ tchunkptr rt;
1064
+ size_t trem = chunksize(t) - nb;
1065
+ if (trem < rsize) {
1066
+ v = t;
1067
+ if ((rsize = trem) == 0)
1068
+ break;
1069
+ }
1070
+ rt = t->child[1];
1071
+ t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1072
+ if (rt != 0 && rt != t)
1073
+ rst = rt;
1074
+ if (t == 0) {
1075
+ t = rst; /* set t to least subtree holding sizes > nb */
1076
+ break;
1077
+ }
1078
+ sizebits <<= 1;
1079
+ }
1080
+ }
1081
+
1082
+ if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
1083
+ binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
1084
+ if (leftbits != 0)
1085
+ t = *treebin_at(m, lj_ffs(leftbits));
1086
+ }
1087
+
1088
+ while (t != 0) { /* find smallest of tree or subtree */
1089
+ size_t trem = chunksize(t) - nb;
1090
+ if (trem < rsize) {
1091
+ rsize = trem;
1092
+ v = t;
1093
+ }
1094
+ t = leftmost_child(t);
1095
+ }
1096
+
1097
+ /* If dv is a better fit, return NULL so malloc will use it */
1098
+ if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
1099
+ mchunkptr r = chunk_plus_offset(v, nb);
1100
+ unlink_large_chunk(m, v);
1101
+ if (rsize < MIN_CHUNK_SIZE) {
1102
+ set_inuse_and_pinuse(m, v, (rsize + nb));
1103
+ } else {
1104
+ set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1105
+ set_size_and_pinuse_of_free_chunk(r, rsize);
1106
+ insert_chunk(m, r, rsize);
1107
+ }
1108
+ return chunk2mem(v);
1109
+ }
1110
+ return NULL;
1111
+ }
1112
+
1113
+ /* allocate a small request from the best fitting chunk in a treebin */
1114
+ static void *tmalloc_small(mstate m, size_t nb)
1115
+ {
1116
+ tchunkptr t, v;
1117
+ mchunkptr r;
1118
+ size_t rsize;
1119
+ bindex_t i = lj_ffs(m->treemap);
1120
+
1121
+ v = t = *treebin_at(m, i);
1122
+ rsize = chunksize(t) - nb;
1123
+
1124
+ while ((t = leftmost_child(t)) != 0) {
1125
+ size_t trem = chunksize(t) - nb;
1126
+ if (trem < rsize) {
1127
+ rsize = trem;
1128
+ v = t;
1129
+ }
1130
+ }
1131
+
1132
+ r = chunk_plus_offset(v, nb);
1133
+ unlink_large_chunk(m, v);
1134
+ if (rsize < MIN_CHUNK_SIZE) {
1135
+ set_inuse_and_pinuse(m, v, (rsize + nb));
1136
+ } else {
1137
+ set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1138
+ set_size_and_pinuse_of_free_chunk(r, rsize);
1139
+ replace_dv(m, r, rsize);
1140
+ }
1141
+ return chunk2mem(v);
1142
+ }
1143
+
1144
+ /* ----------------------------------------------------------------------- */
1145
+
1146
+ void *lj_alloc_create(void)
1147
+ {
1148
+ size_t tsize = DEFAULT_GRANULARITY;
1149
+ char *tbase;
1150
+ INIT_MMAP();
1151
+ tbase = (char *)(CALL_MMAP(tsize));
1152
+ if (tbase != CMFAIL) {
1153
+ size_t msize = pad_request(sizeof(struct malloc_state));
1154
+ mchunkptr mn;
1155
+ mchunkptr msp = align_as_chunk(tbase);
1156
+ mstate m = (mstate)(chunk2mem(msp));
1157
+ memset(m, 0, msize);
1158
+ msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
1159
+ m->seg.base = tbase;
1160
+ m->seg.size = tsize;
1161
+ m->release_checks = MAX_RELEASE_CHECK_RATE;
1162
+ init_bins(m);
1163
+ mn = next_chunk(mem2chunk(m));
1164
+ init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
1165
+ return m;
1166
+ }
1167
+ return NULL;
1168
+ }
1169
+
1170
+ void lj_alloc_destroy(void *msp)
1171
+ {
1172
+ mstate ms = (mstate)msp;
1173
+ msegmentptr sp = &ms->seg;
1174
+ while (sp != 0) {
1175
+ char *base = sp->base;
1176
+ size_t size = sp->size;
1177
+ sp = sp->next;
1178
+ CALL_MUNMAP(base, size);
1179
+ }
1180
+ }
1181
+
1182
+ static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
1183
+ {
1184
+ mstate ms = (mstate)msp;
1185
+ void *mem;
1186
+ size_t nb;
1187
+ if (nsize <= MAX_SMALL_REQUEST) {
1188
+ bindex_t idx;
1189
+ binmap_t smallbits;
1190
+ nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
1191
+ idx = small_index(nb);
1192
+ smallbits = ms->smallmap >> idx;
1193
+
1194
+ if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
1195
+ mchunkptr b, p;
1196
+ idx += ~smallbits & 1; /* Uses next bin if idx empty */
1197
+ b = smallbin_at(ms, idx);
1198
+ p = b->fd;
1199
+ unlink_first_small_chunk(ms, b, p, idx);
1200
+ set_inuse_and_pinuse(ms, p, small_index2size(idx));
1201
+ mem = chunk2mem(p);
1202
+ return mem;
1203
+ } else if (nb > ms->dvsize) {
1204
+ if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
1205
+ mchunkptr b, p, r;
1206
+ size_t rsize;
1207
+ binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
1208
+ bindex_t i = lj_ffs(leftbits);
1209
+ b = smallbin_at(ms, i);
1210
+ p = b->fd;
1211
+ unlink_first_small_chunk(ms, b, p, i);
1212
+ rsize = small_index2size(i) - nb;
1213
+ /* Fit here cannot be remainderless if 4byte sizes */
1214
+ if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
1215
+ set_inuse_and_pinuse(ms, p, small_index2size(i));
1216
+ } else {
1217
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
1218
+ r = chunk_plus_offset(p, nb);
1219
+ set_size_and_pinuse_of_free_chunk(r, rsize);
1220
+ replace_dv(ms, r, rsize);
1221
+ }
1222
+ mem = chunk2mem(p);
1223
+ return mem;
1224
+ } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
1225
+ return mem;
1226
+ }
1227
+ }
1228
+ } else if (nsize >= MAX_REQUEST) {
1229
+ nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1230
+ } else {
1231
+ nb = pad_request(nsize);
1232
+ if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
1233
+ return mem;
1234
+ }
1235
+ }
1236
+
1237
+ if (nb <= ms->dvsize) {
1238
+ size_t rsize = ms->dvsize - nb;
1239
+ mchunkptr p = ms->dv;
1240
+ if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
1241
+ mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
1242
+ ms->dvsize = rsize;
1243
+ set_size_and_pinuse_of_free_chunk(r, rsize);
1244
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
1245
+ } else { /* exhaust dv */
1246
+ size_t dvs = ms->dvsize;
1247
+ ms->dvsize = 0;
1248
+ ms->dv = 0;
1249
+ set_inuse_and_pinuse(ms, p, dvs);
1250
+ }
1251
+ mem = chunk2mem(p);
1252
+ return mem;
1253
+ } else if (nb < ms->topsize) { /* Split top */
1254
+ size_t rsize = ms->topsize -= nb;
1255
+ mchunkptr p = ms->top;
1256
+ mchunkptr r = ms->top = chunk_plus_offset(p, nb);
1257
+ r->head = rsize | PINUSE_BIT;
1258
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
1259
+ mem = chunk2mem(p);
1260
+ return mem;
1261
+ }
1262
+ return alloc_sys(ms, nb);
1263
+ }
1264
+
1265
+ static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
1266
+ {
1267
+ if (ptr != 0) {
1268
+ mchunkptr p = mem2chunk(ptr);
1269
+ mstate fm = (mstate)msp;
1270
+ size_t psize = chunksize(p);
1271
+ mchunkptr next = chunk_plus_offset(p, psize);
1272
+ if (!pinuse(p)) {
1273
+ size_t prevsize = p->prev_foot;
1274
+ if ((prevsize & IS_DIRECT_BIT) != 0) {
1275
+ prevsize &= ~IS_DIRECT_BIT;
1276
+ psize += prevsize + DIRECT_FOOT_PAD;
1277
+ CALL_MUNMAP((char *)p - prevsize, psize);
1278
+ return NULL;
1279
+ } else {
1280
+ mchunkptr prev = chunk_minus_offset(p, prevsize);
1281
+ psize += prevsize;
1282
+ p = prev;
1283
+ /* consolidate backward */
1284
+ if (p != fm->dv) {
1285
+ unlink_chunk(fm, p, prevsize);
1286
+ } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
1287
+ fm->dvsize = psize;
1288
+ set_free_with_pinuse(p, psize, next);
1289
+ return NULL;
1290
+ }
1291
+ }
1292
+ }
1293
+ if (!cinuse(next)) { /* consolidate forward */
1294
+ if (next == fm->top) {
1295
+ size_t tsize = fm->topsize += psize;
1296
+ fm->top = p;
1297
+ p->head = tsize | PINUSE_BIT;
1298
+ if (p == fm->dv) {
1299
+ fm->dv = 0;
1300
+ fm->dvsize = 0;
1301
+ }
1302
+ if (tsize > fm->trim_check)
1303
+ alloc_trim(fm, 0);
1304
+ return NULL;
1305
+ } else if (next == fm->dv) {
1306
+ size_t dsize = fm->dvsize += psize;
1307
+ fm->dv = p;
1308
+ set_size_and_pinuse_of_free_chunk(p, dsize);
1309
+ return NULL;
1310
+ } else {
1311
+ size_t nsize = chunksize(next);
1312
+ psize += nsize;
1313
+ unlink_chunk(fm, next, nsize);
1314
+ set_size_and_pinuse_of_free_chunk(p, psize);
1315
+ if (p == fm->dv) {
1316
+ fm->dvsize = psize;
1317
+ return NULL;
1318
+ }
1319
+ }
1320
+ } else {
1321
+ set_free_with_pinuse(p, psize, next);
1322
+ }
1323
+
1324
+ if (is_small(psize)) {
1325
+ insert_small_chunk(fm, p, psize);
1326
+ } else {
1327
+ tchunkptr tp = (tchunkptr)p;
1328
+ insert_large_chunk(fm, tp, psize);
1329
+ if (--fm->release_checks == 0)
1330
+ release_unused_segments(fm);
1331
+ }
1332
+ }
1333
+ return NULL;
1334
+ }
1335
+
1336
+ static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
1337
+ {
1338
+ if (nsize >= MAX_REQUEST) {
1339
+ return NULL;
1340
+ } else {
1341
+ mstate m = (mstate)msp;
1342
+ mchunkptr oldp = mem2chunk(ptr);
1343
+ size_t oldsize = chunksize(oldp);
1344
+ mchunkptr next = chunk_plus_offset(oldp, oldsize);
1345
+ mchunkptr newp = 0;
1346
+ size_t nb = request2size(nsize);
1347
+
1348
+ /* Try to either shrink or extend into top. Else malloc-copy-free */
1349
+ if (is_direct(oldp)) {
1350
+ newp = direct_resize(oldp, nb); /* this may return NULL. */
1351
+ } else if (oldsize >= nb) { /* already big enough */
1352
+ size_t rsize = oldsize - nb;
1353
+ newp = oldp;
1354
+ if (rsize >= MIN_CHUNK_SIZE) {
1355
+ mchunkptr rem = chunk_plus_offset(newp, nb);
1356
+ set_inuse(m, newp, nb);
1357
+ set_inuse(m, rem, rsize);
1358
+ lj_alloc_free(m, chunk2mem(rem));
1359
+ }
1360
+ } else if (next == m->top && oldsize + m->topsize > nb) {
1361
+ /* Expand into top */
1362
+ size_t newsize = oldsize + m->topsize;
1363
+ size_t newtopsize = newsize - nb;
1364
+ mchunkptr newtop = chunk_plus_offset(oldp, nb);
1365
+ set_inuse(m, oldp, nb);
1366
+ newtop->head = newtopsize |PINUSE_BIT;
1367
+ m->top = newtop;
1368
+ m->topsize = newtopsize;
1369
+ newp = oldp;
1370
+ }
1371
+
1372
+ if (newp != 0) {
1373
+ return chunk2mem(newp);
1374
+ } else {
1375
+ void *newmem = lj_alloc_malloc(m, nsize);
1376
+ if (newmem != 0) {
1377
+ size_t oc = oldsize - overhead_for(oldp);
1378
+ memcpy(newmem, ptr, oc < nsize ? oc : nsize);
1379
+ lj_alloc_free(m, ptr);
1380
+ }
1381
+ return newmem;
1382
+ }
1383
+ }
1384
+ }
1385
+
1386
+ void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
1387
+ {
1388
+ (void)osize;
1389
+ if (nsize == 0) {
1390
+ return lj_alloc_free(msp, ptr);
1391
+ } else if (ptr == NULL) {
1392
+ return lj_alloc_malloc(msp, nsize);
1393
+ } else {
1394
+ return lj_alloc_realloc(msp, ptr, nsize);
1395
+ }
1396
+ }
1397
+
1398
+ #endif