libv8-freebsd 3.3.10.4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (703) hide show
  1. data/.gitignore +9 -0
  2. data/.gitmodules +3 -0
  3. data/Gemfile +4 -0
  4. data/README.md +75 -0
  5. data/Rakefile +115 -0
  6. data/ext/libv8/extconf.rb +27 -0
  7. data/lib/libv8.rb +15 -0
  8. data/lib/libv8/Makefile +39 -0
  9. data/lib/libv8/detect_cpu.rb +27 -0
  10. data/lib/libv8/fpic-on-freebsd-amd64.patch +16 -0
  11. data/lib/libv8/fpic-on-linux-amd64.patch +13 -0
  12. data/lib/libv8/scons/CHANGES.txt +5541 -0
  13. data/lib/libv8/scons/LICENSE.txt +20 -0
  14. data/lib/libv8/scons/MANIFEST +200 -0
  15. data/lib/libv8/scons/PKG-INFO +13 -0
  16. data/lib/libv8/scons/README.txt +243 -0
  17. data/lib/libv8/scons/RELEASE.txt +100 -0
  18. data/lib/libv8/scons/engine/SCons/Action.py +1257 -0
  19. data/lib/libv8/scons/engine/SCons/Builder.py +877 -0
  20. data/lib/libv8/scons/engine/SCons/CacheDir.py +216 -0
  21. data/lib/libv8/scons/engine/SCons/Conftest.py +793 -0
  22. data/lib/libv8/scons/engine/SCons/Debug.py +220 -0
  23. data/lib/libv8/scons/engine/SCons/Defaults.py +494 -0
  24. data/lib/libv8/scons/engine/SCons/Environment.py +2417 -0
  25. data/lib/libv8/scons/engine/SCons/Errors.py +205 -0
  26. data/lib/libv8/scons/engine/SCons/Executor.py +633 -0
  27. data/lib/libv8/scons/engine/SCons/Job.py +435 -0
  28. data/lib/libv8/scons/engine/SCons/Memoize.py +244 -0
  29. data/lib/libv8/scons/engine/SCons/Node/Alias.py +152 -0
  30. data/lib/libv8/scons/engine/SCons/Node/FS.py +3302 -0
  31. data/lib/libv8/scons/engine/SCons/Node/Python.py +128 -0
  32. data/lib/libv8/scons/engine/SCons/Node/__init__.py +1329 -0
  33. data/lib/libv8/scons/engine/SCons/Options/BoolOption.py +50 -0
  34. data/lib/libv8/scons/engine/SCons/Options/EnumOption.py +50 -0
  35. data/lib/libv8/scons/engine/SCons/Options/ListOption.py +50 -0
  36. data/lib/libv8/scons/engine/SCons/Options/PackageOption.py +50 -0
  37. data/lib/libv8/scons/engine/SCons/Options/PathOption.py +76 -0
  38. data/lib/libv8/scons/engine/SCons/Options/__init__.py +67 -0
  39. data/lib/libv8/scons/engine/SCons/PathList.py +231 -0
  40. data/lib/libv8/scons/engine/SCons/Platform/__init__.py +241 -0
  41. data/lib/libv8/scons/engine/SCons/Platform/aix.py +69 -0
  42. data/lib/libv8/scons/engine/SCons/Platform/cygwin.py +55 -0
  43. data/lib/libv8/scons/engine/SCons/Platform/darwin.py +70 -0
  44. data/lib/libv8/scons/engine/SCons/Platform/hpux.py +46 -0
  45. data/lib/libv8/scons/engine/SCons/Platform/irix.py +44 -0
  46. data/lib/libv8/scons/engine/SCons/Platform/os2.py +58 -0
  47. data/lib/libv8/scons/engine/SCons/Platform/posix.py +263 -0
  48. data/lib/libv8/scons/engine/SCons/Platform/sunos.py +50 -0
  49. data/lib/libv8/scons/engine/SCons/Platform/win32.py +385 -0
  50. data/lib/libv8/scons/engine/SCons/SConf.py +1030 -0
  51. data/lib/libv8/scons/engine/SCons/SConsign.py +389 -0
  52. data/lib/libv8/scons/engine/SCons/Scanner/C.py +132 -0
  53. data/lib/libv8/scons/engine/SCons/Scanner/D.py +73 -0
  54. data/lib/libv8/scons/engine/SCons/Scanner/Dir.py +109 -0
  55. data/lib/libv8/scons/engine/SCons/Scanner/Fortran.py +316 -0
  56. data/lib/libv8/scons/engine/SCons/Scanner/IDL.py +48 -0
  57. data/lib/libv8/scons/engine/SCons/Scanner/LaTeX.py +387 -0
  58. data/lib/libv8/scons/engine/SCons/Scanner/Prog.py +101 -0
  59. data/lib/libv8/scons/engine/SCons/Scanner/RC.py +55 -0
  60. data/lib/libv8/scons/engine/SCons/Scanner/__init__.py +413 -0
  61. data/lib/libv8/scons/engine/SCons/Script/Interactive.py +384 -0
  62. data/lib/libv8/scons/engine/SCons/Script/Main.py +1405 -0
  63. data/lib/libv8/scons/engine/SCons/Script/SConsOptions.py +939 -0
  64. data/lib/libv8/scons/engine/SCons/Script/SConscript.py +640 -0
  65. data/lib/libv8/scons/engine/SCons/Script/__init__.py +412 -0
  66. data/lib/libv8/scons/engine/SCons/Sig.py +63 -0
  67. data/lib/libv8/scons/engine/SCons/Subst.py +904 -0
  68. data/lib/libv8/scons/engine/SCons/Taskmaster.py +1025 -0
  69. data/lib/libv8/scons/engine/SCons/Tool/386asm.py +61 -0
  70. data/lib/libv8/scons/engine/SCons/Tool/BitKeeper.py +67 -0
  71. data/lib/libv8/scons/engine/SCons/Tool/CVS.py +73 -0
  72. data/lib/libv8/scons/engine/SCons/Tool/FortranCommon.py +263 -0
  73. data/lib/libv8/scons/engine/SCons/Tool/JavaCommon.py +323 -0
  74. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/__init__.py +56 -0
  75. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/arch.py +61 -0
  76. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/common.py +240 -0
  77. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/netframework.py +82 -0
  78. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/sdk.py +391 -0
  79. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/vc.py +459 -0
  80. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/vs.py +526 -0
  81. data/lib/libv8/scons/engine/SCons/Tool/Perforce.py +103 -0
  82. data/lib/libv8/scons/engine/SCons/Tool/PharLapCommon.py +137 -0
  83. data/lib/libv8/scons/engine/SCons/Tool/RCS.py +64 -0
  84. data/lib/libv8/scons/engine/SCons/Tool/SCCS.py +64 -0
  85. data/lib/libv8/scons/engine/SCons/Tool/Subversion.py +71 -0
  86. data/lib/libv8/scons/engine/SCons/Tool/__init__.py +681 -0
  87. data/lib/libv8/scons/engine/SCons/Tool/aixc++.py +82 -0
  88. data/lib/libv8/scons/engine/SCons/Tool/aixcc.py +74 -0
  89. data/lib/libv8/scons/engine/SCons/Tool/aixf77.py +80 -0
  90. data/lib/libv8/scons/engine/SCons/Tool/aixlink.py +76 -0
  91. data/lib/libv8/scons/engine/SCons/Tool/applelink.py +71 -0
  92. data/lib/libv8/scons/engine/SCons/Tool/ar.py +63 -0
  93. data/lib/libv8/scons/engine/SCons/Tool/as.py +78 -0
  94. data/lib/libv8/scons/engine/SCons/Tool/bcc32.py +81 -0
  95. data/lib/libv8/scons/engine/SCons/Tool/c++.py +99 -0
  96. data/lib/libv8/scons/engine/SCons/Tool/cc.py +102 -0
  97. data/lib/libv8/scons/engine/SCons/Tool/cvf.py +58 -0
  98. data/lib/libv8/scons/engine/SCons/Tool/default.py +50 -0
  99. data/lib/libv8/scons/engine/SCons/Tool/dmd.py +240 -0
  100. data/lib/libv8/scons/engine/SCons/Tool/dvi.py +64 -0
  101. data/lib/libv8/scons/engine/SCons/Tool/dvipdf.py +125 -0
  102. data/lib/libv8/scons/engine/SCons/Tool/dvips.py +95 -0
  103. data/lib/libv8/scons/engine/SCons/Tool/f03.py +63 -0
  104. data/lib/libv8/scons/engine/SCons/Tool/f77.py +62 -0
  105. data/lib/libv8/scons/engine/SCons/Tool/f90.py +62 -0
  106. data/lib/libv8/scons/engine/SCons/Tool/f95.py +63 -0
  107. data/lib/libv8/scons/engine/SCons/Tool/filesystem.py +98 -0
  108. data/lib/libv8/scons/engine/SCons/Tool/fortran.py +62 -0
  109. data/lib/libv8/scons/engine/SCons/Tool/g++.py +90 -0
  110. data/lib/libv8/scons/engine/SCons/Tool/g77.py +73 -0
  111. data/lib/libv8/scons/engine/SCons/Tool/gas.py +53 -0
  112. data/lib/libv8/scons/engine/SCons/Tool/gcc.py +80 -0
  113. data/lib/libv8/scons/engine/SCons/Tool/gfortran.py +64 -0
  114. data/lib/libv8/scons/engine/SCons/Tool/gnulink.py +62 -0
  115. data/lib/libv8/scons/engine/SCons/Tool/gs.py +81 -0
  116. data/lib/libv8/scons/engine/SCons/Tool/hpc++.py +84 -0
  117. data/lib/libv8/scons/engine/SCons/Tool/hpcc.py +53 -0
  118. data/lib/libv8/scons/engine/SCons/Tool/hplink.py +77 -0
  119. data/lib/libv8/scons/engine/SCons/Tool/icc.py +59 -0
  120. data/lib/libv8/scons/engine/SCons/Tool/icl.py +52 -0
  121. data/lib/libv8/scons/engine/SCons/Tool/ifl.py +72 -0
  122. data/lib/libv8/scons/engine/SCons/Tool/ifort.py +88 -0
  123. data/lib/libv8/scons/engine/SCons/Tool/ilink.py +59 -0
  124. data/lib/libv8/scons/engine/SCons/Tool/ilink32.py +60 -0
  125. data/lib/libv8/scons/engine/SCons/Tool/install.py +283 -0
  126. data/lib/libv8/scons/engine/SCons/Tool/intelc.py +522 -0
  127. data/lib/libv8/scons/engine/SCons/Tool/ipkg.py +67 -0
  128. data/lib/libv8/scons/engine/SCons/Tool/jar.py +116 -0
  129. data/lib/libv8/scons/engine/SCons/Tool/javac.py +230 -0
  130. data/lib/libv8/scons/engine/SCons/Tool/javah.py +137 -0
  131. data/lib/libv8/scons/engine/SCons/Tool/latex.py +80 -0
  132. data/lib/libv8/scons/engine/SCons/Tool/lex.py +97 -0
  133. data/lib/libv8/scons/engine/SCons/Tool/link.py +122 -0
  134. data/lib/libv8/scons/engine/SCons/Tool/linkloc.py +112 -0
  135. data/lib/libv8/scons/engine/SCons/Tool/m4.py +63 -0
  136. data/lib/libv8/scons/engine/SCons/Tool/masm.py +77 -0
  137. data/lib/libv8/scons/engine/SCons/Tool/midl.py +88 -0
  138. data/lib/libv8/scons/engine/SCons/Tool/mingw.py +179 -0
  139. data/lib/libv8/scons/engine/SCons/Tool/mslib.py +64 -0
  140. data/lib/libv8/scons/engine/SCons/Tool/mslink.py +318 -0
  141. data/lib/libv8/scons/engine/SCons/Tool/mssdk.py +50 -0
  142. data/lib/libv8/scons/engine/SCons/Tool/msvc.py +278 -0
  143. data/lib/libv8/scons/engine/SCons/Tool/msvs.py +1806 -0
  144. data/lib/libv8/scons/engine/SCons/Tool/mwcc.py +207 -0
  145. data/lib/libv8/scons/engine/SCons/Tool/mwld.py +107 -0
  146. data/lib/libv8/scons/engine/SCons/Tool/nasm.py +72 -0
  147. data/lib/libv8/scons/engine/SCons/Tool/packaging/__init__.py +312 -0
  148. data/lib/libv8/scons/engine/SCons/Tool/packaging/ipk.py +185 -0
  149. data/lib/libv8/scons/engine/SCons/Tool/packaging/msi.py +527 -0
  150. data/lib/libv8/scons/engine/SCons/Tool/packaging/rpm.py +365 -0
  151. data/lib/libv8/scons/engine/SCons/Tool/packaging/src_tarbz2.py +43 -0
  152. data/lib/libv8/scons/engine/SCons/Tool/packaging/src_targz.py +43 -0
  153. data/lib/libv8/scons/engine/SCons/Tool/packaging/src_zip.py +43 -0
  154. data/lib/libv8/scons/engine/SCons/Tool/packaging/tarbz2.py +44 -0
  155. data/lib/libv8/scons/engine/SCons/Tool/packaging/targz.py +44 -0
  156. data/lib/libv8/scons/engine/SCons/Tool/packaging/zip.py +44 -0
  157. data/lib/libv8/scons/engine/SCons/Tool/pdf.py +78 -0
  158. data/lib/libv8/scons/engine/SCons/Tool/pdflatex.py +84 -0
  159. data/lib/libv8/scons/engine/SCons/Tool/pdftex.py +109 -0
  160. data/lib/libv8/scons/engine/SCons/Tool/qt.py +336 -0
  161. data/lib/libv8/scons/engine/SCons/Tool/rmic.py +126 -0
  162. data/lib/libv8/scons/engine/SCons/Tool/rpcgen.py +70 -0
  163. data/lib/libv8/scons/engine/SCons/Tool/rpm.py +132 -0
  164. data/lib/libv8/scons/engine/SCons/Tool/sgiar.py +68 -0
  165. data/lib/libv8/scons/engine/SCons/Tool/sgic++.py +58 -0
  166. data/lib/libv8/scons/engine/SCons/Tool/sgicc.py +53 -0
  167. data/lib/libv8/scons/engine/SCons/Tool/sgilink.py +62 -0
  168. data/lib/libv8/scons/engine/SCons/Tool/sunar.py +67 -0
  169. data/lib/libv8/scons/engine/SCons/Tool/sunc++.py +142 -0
  170. data/lib/libv8/scons/engine/SCons/Tool/suncc.py +58 -0
  171. data/lib/libv8/scons/engine/SCons/Tool/sunf77.py +63 -0
  172. data/lib/libv8/scons/engine/SCons/Tool/sunf90.py +64 -0
  173. data/lib/libv8/scons/engine/SCons/Tool/sunf95.py +64 -0
  174. data/lib/libv8/scons/engine/SCons/Tool/sunlink.py +76 -0
  175. data/lib/libv8/scons/engine/SCons/Tool/swig.py +183 -0
  176. data/lib/libv8/scons/engine/SCons/Tool/tar.py +73 -0
  177. data/lib/libv8/scons/engine/SCons/Tool/tex.py +866 -0
  178. data/lib/libv8/scons/engine/SCons/Tool/textfile.py +175 -0
  179. data/lib/libv8/scons/engine/SCons/Tool/tlib.py +53 -0
  180. data/lib/libv8/scons/engine/SCons/Tool/wix.py +99 -0
  181. data/lib/libv8/scons/engine/SCons/Tool/yacc.py +140 -0
  182. data/lib/libv8/scons/engine/SCons/Tool/zip.py +99 -0
  183. data/lib/libv8/scons/engine/SCons/Util.py +1492 -0
  184. data/lib/libv8/scons/engine/SCons/Variables/BoolVariable.py +89 -0
  185. data/lib/libv8/scons/engine/SCons/Variables/EnumVariable.py +103 -0
  186. data/lib/libv8/scons/engine/SCons/Variables/ListVariable.py +135 -0
  187. data/lib/libv8/scons/engine/SCons/Variables/PackageVariable.py +106 -0
  188. data/lib/libv8/scons/engine/SCons/Variables/PathVariable.py +147 -0
  189. data/lib/libv8/scons/engine/SCons/Variables/__init__.py +312 -0
  190. data/lib/libv8/scons/engine/SCons/Warnings.py +246 -0
  191. data/lib/libv8/scons/engine/SCons/__init__.py +49 -0
  192. data/lib/libv8/scons/engine/SCons/compat/__init__.py +237 -0
  193. data/lib/libv8/scons/engine/SCons/compat/_scons_builtins.py +150 -0
  194. data/lib/libv8/scons/engine/SCons/compat/_scons_collections.py +45 -0
  195. data/lib/libv8/scons/engine/SCons/compat/_scons_dbm.py +45 -0
  196. data/lib/libv8/scons/engine/SCons/compat/_scons_hashlib.py +76 -0
  197. data/lib/libv8/scons/engine/SCons/compat/_scons_io.py +45 -0
  198. data/lib/libv8/scons/engine/SCons/compat/_scons_sets.py +563 -0
  199. data/lib/libv8/scons/engine/SCons/compat/_scons_subprocess.py +1281 -0
  200. data/lib/libv8/scons/engine/SCons/cpp.py +589 -0
  201. data/lib/libv8/scons/engine/SCons/dblite.py +254 -0
  202. data/lib/libv8/scons/engine/SCons/exitfuncs.py +77 -0
  203. data/lib/libv8/scons/os_spawnv_fix.diff +83 -0
  204. data/lib/libv8/scons/scons-time.1 +1017 -0
  205. data/lib/libv8/scons/scons.1 +15225 -0
  206. data/lib/libv8/scons/sconsign.1 +208 -0
  207. data/lib/libv8/scons/script/scons +196 -0
  208. data/lib/libv8/scons/script/scons-time +1544 -0
  209. data/lib/libv8/scons/script/scons.bat +34 -0
  210. data/lib/libv8/scons/script/sconsign +514 -0
  211. data/lib/libv8/scons/setup.cfg +5 -0
  212. data/lib/libv8/scons/setup.py +423 -0
  213. data/lib/libv8/v8/.gitignore +35 -0
  214. data/lib/libv8/v8/AUTHORS +44 -0
  215. data/lib/libv8/v8/ChangeLog +2839 -0
  216. data/lib/libv8/v8/LICENSE +52 -0
  217. data/lib/libv8/v8/LICENSE.strongtalk +29 -0
  218. data/lib/libv8/v8/LICENSE.v8 +26 -0
  219. data/lib/libv8/v8/LICENSE.valgrind +45 -0
  220. data/lib/libv8/v8/SConstruct +1478 -0
  221. data/lib/libv8/v8/build/README.txt +49 -0
  222. data/lib/libv8/v8/build/all.gyp +18 -0
  223. data/lib/libv8/v8/build/armu.gypi +32 -0
  224. data/lib/libv8/v8/build/common.gypi +144 -0
  225. data/lib/libv8/v8/build/gyp_v8 +145 -0
  226. data/lib/libv8/v8/include/v8-debug.h +395 -0
  227. data/lib/libv8/v8/include/v8-preparser.h +117 -0
  228. data/lib/libv8/v8/include/v8-profiler.h +505 -0
  229. data/lib/libv8/v8/include/v8-testing.h +104 -0
  230. data/lib/libv8/v8/include/v8.h +4124 -0
  231. data/lib/libv8/v8/include/v8stdint.h +53 -0
  232. data/lib/libv8/v8/preparser/SConscript +38 -0
  233. data/lib/libv8/v8/preparser/preparser-process.cc +379 -0
  234. data/lib/libv8/v8/src/SConscript +368 -0
  235. data/lib/libv8/v8/src/accessors.cc +767 -0
  236. data/lib/libv8/v8/src/accessors.h +123 -0
  237. data/lib/libv8/v8/src/allocation-inl.h +49 -0
  238. data/lib/libv8/v8/src/allocation.cc +122 -0
  239. data/lib/libv8/v8/src/allocation.h +143 -0
  240. data/lib/libv8/v8/src/api.cc +5845 -0
  241. data/lib/libv8/v8/src/api.h +574 -0
  242. data/lib/libv8/v8/src/apinatives.js +110 -0
  243. data/lib/libv8/v8/src/apiutils.h +73 -0
  244. data/lib/libv8/v8/src/arguments.h +118 -0
  245. data/lib/libv8/v8/src/arm/assembler-arm-inl.h +353 -0
  246. data/lib/libv8/v8/src/arm/assembler-arm.cc +2661 -0
  247. data/lib/libv8/v8/src/arm/assembler-arm.h +1375 -0
  248. data/lib/libv8/v8/src/arm/builtins-arm.cc +1658 -0
  249. data/lib/libv8/v8/src/arm/code-stubs-arm.cc +6398 -0
  250. data/lib/libv8/v8/src/arm/code-stubs-arm.h +673 -0
  251. data/lib/libv8/v8/src/arm/codegen-arm.cc +52 -0
  252. data/lib/libv8/v8/src/arm/codegen-arm.h +91 -0
  253. data/lib/libv8/v8/src/arm/constants-arm.cc +152 -0
  254. data/lib/libv8/v8/src/arm/constants-arm.h +775 -0
  255. data/lib/libv8/v8/src/arm/cpu-arm.cc +120 -0
  256. data/lib/libv8/v8/src/arm/debug-arm.cc +317 -0
  257. data/lib/libv8/v8/src/arm/deoptimizer-arm.cc +754 -0
  258. data/lib/libv8/v8/src/arm/disasm-arm.cc +1506 -0
  259. data/lib/libv8/v8/src/arm/frames-arm.cc +45 -0
  260. data/lib/libv8/v8/src/arm/frames-arm.h +168 -0
  261. data/lib/libv8/v8/src/arm/full-codegen-arm.cc +4375 -0
  262. data/lib/libv8/v8/src/arm/ic-arm.cc +1562 -0
  263. data/lib/libv8/v8/src/arm/lithium-arm.cc +2206 -0
  264. data/lib/libv8/v8/src/arm/lithium-arm.h +2348 -0
  265. data/lib/libv8/v8/src/arm/lithium-codegen-arm.cc +4526 -0
  266. data/lib/libv8/v8/src/arm/lithium-codegen-arm.h +403 -0
  267. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.cc +305 -0
  268. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.h +84 -0
  269. data/lib/libv8/v8/src/arm/macro-assembler-arm.cc +3163 -0
  270. data/lib/libv8/v8/src/arm/macro-assembler-arm.h +1126 -0
  271. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.cc +1287 -0
  272. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.h +253 -0
  273. data/lib/libv8/v8/src/arm/simulator-arm.cc +3424 -0
  274. data/lib/libv8/v8/src/arm/simulator-arm.h +431 -0
  275. data/lib/libv8/v8/src/arm/stub-cache-arm.cc +4243 -0
  276. data/lib/libv8/v8/src/array.js +1366 -0
  277. data/lib/libv8/v8/src/assembler.cc +1207 -0
  278. data/lib/libv8/v8/src/assembler.h +858 -0
  279. data/lib/libv8/v8/src/ast-inl.h +112 -0
  280. data/lib/libv8/v8/src/ast.cc +1146 -0
  281. data/lib/libv8/v8/src/ast.h +2188 -0
  282. data/lib/libv8/v8/src/atomicops.h +167 -0
  283. data/lib/libv8/v8/src/atomicops_internals_arm_gcc.h +145 -0
  284. data/lib/libv8/v8/src/atomicops_internals_mips_gcc.h +169 -0
  285. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.cc +133 -0
  286. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.h +287 -0
  287. data/lib/libv8/v8/src/atomicops_internals_x86_macosx.h +301 -0
  288. data/lib/libv8/v8/src/atomicops_internals_x86_msvc.h +203 -0
  289. data/lib/libv8/v8/src/bignum-dtoa.cc +655 -0
  290. data/lib/libv8/v8/src/bignum-dtoa.h +81 -0
  291. data/lib/libv8/v8/src/bignum.cc +768 -0
  292. data/lib/libv8/v8/src/bignum.h +140 -0
  293. data/lib/libv8/v8/src/bootstrapper.cc +2184 -0
  294. data/lib/libv8/v8/src/bootstrapper.h +188 -0
  295. data/lib/libv8/v8/src/builtins.cc +1707 -0
  296. data/lib/libv8/v8/src/builtins.h +371 -0
  297. data/lib/libv8/v8/src/bytecodes-irregexp.h +105 -0
  298. data/lib/libv8/v8/src/cached-powers.cc +177 -0
  299. data/lib/libv8/v8/src/cached-powers.h +65 -0
  300. data/lib/libv8/v8/src/char-predicates-inl.h +94 -0
  301. data/lib/libv8/v8/src/char-predicates.h +67 -0
  302. data/lib/libv8/v8/src/checks.cc +110 -0
  303. data/lib/libv8/v8/src/checks.h +296 -0
  304. data/lib/libv8/v8/src/circular-queue-inl.h +53 -0
  305. data/lib/libv8/v8/src/circular-queue.cc +122 -0
  306. data/lib/libv8/v8/src/circular-queue.h +103 -0
  307. data/lib/libv8/v8/src/code-stubs.cc +267 -0
  308. data/lib/libv8/v8/src/code-stubs.h +1011 -0
  309. data/lib/libv8/v8/src/code.h +70 -0
  310. data/lib/libv8/v8/src/codegen.cc +231 -0
  311. data/lib/libv8/v8/src/codegen.h +84 -0
  312. data/lib/libv8/v8/src/compilation-cache.cc +540 -0
  313. data/lib/libv8/v8/src/compilation-cache.h +287 -0
  314. data/lib/libv8/v8/src/compiler.cc +786 -0
  315. data/lib/libv8/v8/src/compiler.h +312 -0
  316. data/lib/libv8/v8/src/contexts.cc +347 -0
  317. data/lib/libv8/v8/src/contexts.h +391 -0
  318. data/lib/libv8/v8/src/conversions-inl.h +106 -0
  319. data/lib/libv8/v8/src/conversions.cc +1131 -0
  320. data/lib/libv8/v8/src/conversions.h +135 -0
  321. data/lib/libv8/v8/src/counters.cc +93 -0
  322. data/lib/libv8/v8/src/counters.h +254 -0
  323. data/lib/libv8/v8/src/cpu-profiler-inl.h +101 -0
  324. data/lib/libv8/v8/src/cpu-profiler.cc +609 -0
  325. data/lib/libv8/v8/src/cpu-profiler.h +302 -0
  326. data/lib/libv8/v8/src/cpu.h +69 -0
  327. data/lib/libv8/v8/src/d8-debug.cc +367 -0
  328. data/lib/libv8/v8/src/d8-debug.h +158 -0
  329. data/lib/libv8/v8/src/d8-posix.cc +695 -0
  330. data/lib/libv8/v8/src/d8-readline.cc +130 -0
  331. data/lib/libv8/v8/src/d8-windows.cc +42 -0
  332. data/lib/libv8/v8/src/d8.cc +803 -0
  333. data/lib/libv8/v8/src/d8.gyp +91 -0
  334. data/lib/libv8/v8/src/d8.h +235 -0
  335. data/lib/libv8/v8/src/d8.js +2798 -0
  336. data/lib/libv8/v8/src/data-flow.cc +66 -0
  337. data/lib/libv8/v8/src/data-flow.h +205 -0
  338. data/lib/libv8/v8/src/date.js +1103 -0
  339. data/lib/libv8/v8/src/dateparser-inl.h +127 -0
  340. data/lib/libv8/v8/src/dateparser.cc +178 -0
  341. data/lib/libv8/v8/src/dateparser.h +266 -0
  342. data/lib/libv8/v8/src/debug-agent.cc +447 -0
  343. data/lib/libv8/v8/src/debug-agent.h +129 -0
  344. data/lib/libv8/v8/src/debug-debugger.js +2569 -0
  345. data/lib/libv8/v8/src/debug.cc +3165 -0
  346. data/lib/libv8/v8/src/debug.h +1057 -0
  347. data/lib/libv8/v8/src/deoptimizer.cc +1256 -0
  348. data/lib/libv8/v8/src/deoptimizer.h +602 -0
  349. data/lib/libv8/v8/src/disasm.h +80 -0
  350. data/lib/libv8/v8/src/disassembler.cc +343 -0
  351. data/lib/libv8/v8/src/disassembler.h +58 -0
  352. data/lib/libv8/v8/src/diy-fp.cc +58 -0
  353. data/lib/libv8/v8/src/diy-fp.h +117 -0
  354. data/lib/libv8/v8/src/double.h +238 -0
  355. data/lib/libv8/v8/src/dtoa.cc +103 -0
  356. data/lib/libv8/v8/src/dtoa.h +85 -0
  357. data/lib/libv8/v8/src/execution.cc +849 -0
  358. data/lib/libv8/v8/src/execution.h +297 -0
  359. data/lib/libv8/v8/src/extensions/experimental/break-iterator.cc +250 -0
  360. data/lib/libv8/v8/src/extensions/experimental/break-iterator.h +89 -0
  361. data/lib/libv8/v8/src/extensions/experimental/collator.cc +218 -0
  362. data/lib/libv8/v8/src/extensions/experimental/collator.h +69 -0
  363. data/lib/libv8/v8/src/extensions/experimental/experimental.gyp +94 -0
  364. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.cc +78 -0
  365. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.h +54 -0
  366. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.cc +112 -0
  367. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.h +60 -0
  368. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.cc +43 -0
  369. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.h +49 -0
  370. data/lib/libv8/v8/src/extensions/experimental/i18n.js +180 -0
  371. data/lib/libv8/v8/src/extensions/experimental/language-matcher.cc +251 -0
  372. data/lib/libv8/v8/src/extensions/experimental/language-matcher.h +95 -0
  373. data/lib/libv8/v8/src/extensions/externalize-string-extension.cc +141 -0
  374. data/lib/libv8/v8/src/extensions/externalize-string-extension.h +50 -0
  375. data/lib/libv8/v8/src/extensions/gc-extension.cc +58 -0
  376. data/lib/libv8/v8/src/extensions/gc-extension.h +49 -0
  377. data/lib/libv8/v8/src/factory.cc +1222 -0
  378. data/lib/libv8/v8/src/factory.h +442 -0
  379. data/lib/libv8/v8/src/fast-dtoa.cc +736 -0
  380. data/lib/libv8/v8/src/fast-dtoa.h +83 -0
  381. data/lib/libv8/v8/src/fixed-dtoa.cc +405 -0
  382. data/lib/libv8/v8/src/fixed-dtoa.h +55 -0
  383. data/lib/libv8/v8/src/flag-definitions.h +560 -0
  384. data/lib/libv8/v8/src/flags.cc +551 -0
  385. data/lib/libv8/v8/src/flags.h +79 -0
  386. data/lib/libv8/v8/src/frames-inl.h +247 -0
  387. data/lib/libv8/v8/src/frames.cc +1243 -0
  388. data/lib/libv8/v8/src/frames.h +870 -0
  389. data/lib/libv8/v8/src/full-codegen.cc +1374 -0
  390. data/lib/libv8/v8/src/full-codegen.h +771 -0
  391. data/lib/libv8/v8/src/func-name-inferrer.cc +92 -0
  392. data/lib/libv8/v8/src/func-name-inferrer.h +111 -0
  393. data/lib/libv8/v8/src/gdb-jit.cc +1555 -0
  394. data/lib/libv8/v8/src/gdb-jit.h +143 -0
  395. data/lib/libv8/v8/src/global-handles.cc +665 -0
  396. data/lib/libv8/v8/src/global-handles.h +284 -0
  397. data/lib/libv8/v8/src/globals.h +325 -0
  398. data/lib/libv8/v8/src/handles-inl.h +177 -0
  399. data/lib/libv8/v8/src/handles.cc +987 -0
  400. data/lib/libv8/v8/src/handles.h +382 -0
  401. data/lib/libv8/v8/src/hashmap.cc +230 -0
  402. data/lib/libv8/v8/src/hashmap.h +123 -0
  403. data/lib/libv8/v8/src/heap-inl.h +704 -0
  404. data/lib/libv8/v8/src/heap-profiler.cc +1173 -0
  405. data/lib/libv8/v8/src/heap-profiler.h +397 -0
  406. data/lib/libv8/v8/src/heap.cc +5930 -0
  407. data/lib/libv8/v8/src/heap.h +2268 -0
  408. data/lib/libv8/v8/src/hydrogen-instructions.cc +1769 -0
  409. data/lib/libv8/v8/src/hydrogen-instructions.h +3971 -0
  410. data/lib/libv8/v8/src/hydrogen.cc +6239 -0
  411. data/lib/libv8/v8/src/hydrogen.h +1202 -0
  412. data/lib/libv8/v8/src/ia32/assembler-ia32-inl.h +446 -0
  413. data/lib/libv8/v8/src/ia32/assembler-ia32.cc +2487 -0
  414. data/lib/libv8/v8/src/ia32/assembler-ia32.h +1144 -0
  415. data/lib/libv8/v8/src/ia32/builtins-ia32.cc +1621 -0
  416. data/lib/libv8/v8/src/ia32/code-stubs-ia32.cc +6198 -0
  417. data/lib/libv8/v8/src/ia32/code-stubs-ia32.h +517 -0
  418. data/lib/libv8/v8/src/ia32/codegen-ia32.cc +265 -0
  419. data/lib/libv8/v8/src/ia32/codegen-ia32.h +79 -0
  420. data/lib/libv8/v8/src/ia32/cpu-ia32.cc +88 -0
  421. data/lib/libv8/v8/src/ia32/debug-ia32.cc +312 -0
  422. data/lib/libv8/v8/src/ia32/deoptimizer-ia32.cc +774 -0
  423. data/lib/libv8/v8/src/ia32/disasm-ia32.cc +1628 -0
  424. data/lib/libv8/v8/src/ia32/frames-ia32.cc +45 -0
  425. data/lib/libv8/v8/src/ia32/frames-ia32.h +142 -0
  426. data/lib/libv8/v8/src/ia32/full-codegen-ia32.cc +4338 -0
  427. data/lib/libv8/v8/src/ia32/ic-ia32.cc +1597 -0
  428. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.cc +4461 -0
  429. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.h +375 -0
  430. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.cc +475 -0
  431. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.h +110 -0
  432. data/lib/libv8/v8/src/ia32/lithium-ia32.cc +2261 -0
  433. data/lib/libv8/v8/src/ia32/lithium-ia32.h +2396 -0
  434. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.cc +2136 -0
  435. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.h +775 -0
  436. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.cc +1263 -0
  437. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.h +216 -0
  438. data/lib/libv8/v8/src/ia32/simulator-ia32.cc +30 -0
  439. data/lib/libv8/v8/src/ia32/simulator-ia32.h +74 -0
  440. data/lib/libv8/v8/src/ia32/stub-cache-ia32.cc +3847 -0
  441. data/lib/libv8/v8/src/ic-inl.h +130 -0
  442. data/lib/libv8/v8/src/ic.cc +2577 -0
  443. data/lib/libv8/v8/src/ic.h +736 -0
  444. data/lib/libv8/v8/src/inspector.cc +63 -0
  445. data/lib/libv8/v8/src/inspector.h +62 -0
  446. data/lib/libv8/v8/src/interpreter-irregexp.cc +659 -0
  447. data/lib/libv8/v8/src/interpreter-irregexp.h +49 -0
  448. data/lib/libv8/v8/src/isolate-inl.h +50 -0
  449. data/lib/libv8/v8/src/isolate.cc +1869 -0
  450. data/lib/libv8/v8/src/isolate.h +1382 -0
  451. data/lib/libv8/v8/src/json-parser.cc +504 -0
  452. data/lib/libv8/v8/src/json-parser.h +161 -0
  453. data/lib/libv8/v8/src/json.js +342 -0
  454. data/lib/libv8/v8/src/jsregexp.cc +5385 -0
  455. data/lib/libv8/v8/src/jsregexp.h +1492 -0
  456. data/lib/libv8/v8/src/list-inl.h +212 -0
  457. data/lib/libv8/v8/src/list.h +174 -0
  458. data/lib/libv8/v8/src/lithium-allocator-inl.h +142 -0
  459. data/lib/libv8/v8/src/lithium-allocator.cc +2123 -0
  460. data/lib/libv8/v8/src/lithium-allocator.h +630 -0
  461. data/lib/libv8/v8/src/lithium.cc +190 -0
  462. data/lib/libv8/v8/src/lithium.h +597 -0
  463. data/lib/libv8/v8/src/liveedit-debugger.js +1082 -0
  464. data/lib/libv8/v8/src/liveedit.cc +1691 -0
  465. data/lib/libv8/v8/src/liveedit.h +180 -0
  466. data/lib/libv8/v8/src/liveobjectlist-inl.h +126 -0
  467. data/lib/libv8/v8/src/liveobjectlist.cc +2589 -0
  468. data/lib/libv8/v8/src/liveobjectlist.h +322 -0
  469. data/lib/libv8/v8/src/log-inl.h +59 -0
  470. data/lib/libv8/v8/src/log-utils.cc +428 -0
  471. data/lib/libv8/v8/src/log-utils.h +231 -0
  472. data/lib/libv8/v8/src/log.cc +1993 -0
  473. data/lib/libv8/v8/src/log.h +476 -0
  474. data/lib/libv8/v8/src/macro-assembler.h +120 -0
  475. data/lib/libv8/v8/src/macros.py +178 -0
  476. data/lib/libv8/v8/src/mark-compact.cc +3143 -0
  477. data/lib/libv8/v8/src/mark-compact.h +506 -0
  478. data/lib/libv8/v8/src/math.js +264 -0
  479. data/lib/libv8/v8/src/messages.cc +179 -0
  480. data/lib/libv8/v8/src/messages.h +113 -0
  481. data/lib/libv8/v8/src/messages.js +1096 -0
  482. data/lib/libv8/v8/src/mips/assembler-mips-inl.h +312 -0
  483. data/lib/libv8/v8/src/mips/assembler-mips.cc +1960 -0
  484. data/lib/libv8/v8/src/mips/assembler-mips.h +1138 -0
  485. data/lib/libv8/v8/src/mips/builtins-mips.cc +1628 -0
  486. data/lib/libv8/v8/src/mips/code-stubs-mips.cc +6656 -0
  487. data/lib/libv8/v8/src/mips/code-stubs-mips.h +682 -0
  488. data/lib/libv8/v8/src/mips/codegen-mips.cc +52 -0
  489. data/lib/libv8/v8/src/mips/codegen-mips.h +98 -0
  490. data/lib/libv8/v8/src/mips/constants-mips.cc +352 -0
  491. data/lib/libv8/v8/src/mips/constants-mips.h +739 -0
  492. data/lib/libv8/v8/src/mips/cpu-mips.cc +96 -0
  493. data/lib/libv8/v8/src/mips/debug-mips.cc +308 -0
  494. data/lib/libv8/v8/src/mips/deoptimizer-mips.cc +91 -0
  495. data/lib/libv8/v8/src/mips/disasm-mips.cc +1050 -0
  496. data/lib/libv8/v8/src/mips/frames-mips.cc +47 -0
  497. data/lib/libv8/v8/src/mips/frames-mips.h +219 -0
  498. data/lib/libv8/v8/src/mips/full-codegen-mips.cc +4388 -0
  499. data/lib/libv8/v8/src/mips/ic-mips.cc +1580 -0
  500. data/lib/libv8/v8/src/mips/lithium-codegen-mips.h +65 -0
  501. data/lib/libv8/v8/src/mips/lithium-mips.h +307 -0
  502. data/lib/libv8/v8/src/mips/macro-assembler-mips.cc +4056 -0
  503. data/lib/libv8/v8/src/mips/macro-assembler-mips.h +1214 -0
  504. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.cc +1251 -0
  505. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.h +252 -0
  506. data/lib/libv8/v8/src/mips/simulator-mips.cc +2621 -0
  507. data/lib/libv8/v8/src/mips/simulator-mips.h +401 -0
  508. data/lib/libv8/v8/src/mips/stub-cache-mips.cc +4285 -0
  509. data/lib/libv8/v8/src/mirror-debugger.js +2382 -0
  510. data/lib/libv8/v8/src/mksnapshot.cc +328 -0
  511. data/lib/libv8/v8/src/natives.h +64 -0
  512. data/lib/libv8/v8/src/objects-debug.cc +738 -0
  513. data/lib/libv8/v8/src/objects-inl.h +4323 -0
  514. data/lib/libv8/v8/src/objects-printer.cc +829 -0
  515. data/lib/libv8/v8/src/objects-visiting.cc +148 -0
  516. data/lib/libv8/v8/src/objects-visiting.h +424 -0
  517. data/lib/libv8/v8/src/objects.cc +10585 -0
  518. data/lib/libv8/v8/src/objects.h +6838 -0
  519. data/lib/libv8/v8/src/parser.cc +4997 -0
  520. data/lib/libv8/v8/src/parser.h +765 -0
  521. data/lib/libv8/v8/src/platform-cygwin.cc +779 -0
  522. data/lib/libv8/v8/src/platform-freebsd.cc +826 -0
  523. data/lib/libv8/v8/src/platform-linux.cc +1149 -0
  524. data/lib/libv8/v8/src/platform-macos.cc +830 -0
  525. data/lib/libv8/v8/src/platform-nullos.cc +479 -0
  526. data/lib/libv8/v8/src/platform-openbsd.cc +640 -0
  527. data/lib/libv8/v8/src/platform-posix.cc +424 -0
  528. data/lib/libv8/v8/src/platform-solaris.cc +762 -0
  529. data/lib/libv8/v8/src/platform-tls-mac.h +62 -0
  530. data/lib/libv8/v8/src/platform-tls-win32.h +62 -0
  531. data/lib/libv8/v8/src/platform-tls.h +50 -0
  532. data/lib/libv8/v8/src/platform-win32.cc +2021 -0
  533. data/lib/libv8/v8/src/platform.h +667 -0
  534. data/lib/libv8/v8/src/preparse-data-format.h +62 -0
  535. data/lib/libv8/v8/src/preparse-data.cc +183 -0
  536. data/lib/libv8/v8/src/preparse-data.h +225 -0
  537. data/lib/libv8/v8/src/preparser-api.cc +220 -0
  538. data/lib/libv8/v8/src/preparser.cc +1450 -0
  539. data/lib/libv8/v8/src/preparser.h +493 -0
  540. data/lib/libv8/v8/src/prettyprinter.cc +1493 -0
  541. data/lib/libv8/v8/src/prettyprinter.h +223 -0
  542. data/lib/libv8/v8/src/profile-generator-inl.h +128 -0
  543. data/lib/libv8/v8/src/profile-generator.cc +3098 -0
  544. data/lib/libv8/v8/src/profile-generator.h +1126 -0
  545. data/lib/libv8/v8/src/property.cc +105 -0
  546. data/lib/libv8/v8/src/property.h +365 -0
  547. data/lib/libv8/v8/src/proxy.js +83 -0
  548. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp-inl.h +78 -0
  549. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.cc +471 -0
  550. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.h +142 -0
  551. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.cc +373 -0
  552. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.h +104 -0
  553. data/lib/libv8/v8/src/regexp-macro-assembler.cc +267 -0
  554. data/lib/libv8/v8/src/regexp-macro-assembler.h +243 -0
  555. data/lib/libv8/v8/src/regexp-stack.cc +111 -0
  556. data/lib/libv8/v8/src/regexp-stack.h +147 -0
  557. data/lib/libv8/v8/src/regexp.js +483 -0
  558. data/lib/libv8/v8/src/rewriter.cc +360 -0
  559. data/lib/libv8/v8/src/rewriter.h +50 -0
  560. data/lib/libv8/v8/src/runtime-profiler.cc +489 -0
  561. data/lib/libv8/v8/src/runtime-profiler.h +201 -0
  562. data/lib/libv8/v8/src/runtime.cc +12227 -0
  563. data/lib/libv8/v8/src/runtime.h +652 -0
  564. data/lib/libv8/v8/src/runtime.js +649 -0
  565. data/lib/libv8/v8/src/safepoint-table.cc +256 -0
  566. data/lib/libv8/v8/src/safepoint-table.h +270 -0
  567. data/lib/libv8/v8/src/scanner-base.cc +952 -0
  568. data/lib/libv8/v8/src/scanner-base.h +670 -0
  569. data/lib/libv8/v8/src/scanner.cc +345 -0
  570. data/lib/libv8/v8/src/scanner.h +146 -0
  571. data/lib/libv8/v8/src/scopeinfo.cc +646 -0
  572. data/lib/libv8/v8/src/scopeinfo.h +254 -0
  573. data/lib/libv8/v8/src/scopes.cc +1150 -0
  574. data/lib/libv8/v8/src/scopes.h +507 -0
  575. data/lib/libv8/v8/src/serialize.cc +1574 -0
  576. data/lib/libv8/v8/src/serialize.h +589 -0
  577. data/lib/libv8/v8/src/shell.h +55 -0
  578. data/lib/libv8/v8/src/simulator.h +43 -0
  579. data/lib/libv8/v8/src/small-pointer-list.h +163 -0
  580. data/lib/libv8/v8/src/smart-pointer.h +109 -0
  581. data/lib/libv8/v8/src/snapshot-common.cc +83 -0
  582. data/lib/libv8/v8/src/snapshot-empty.cc +54 -0
  583. data/lib/libv8/v8/src/snapshot.h +91 -0
  584. data/lib/libv8/v8/src/spaces-inl.h +529 -0
  585. data/lib/libv8/v8/src/spaces.cc +3145 -0
  586. data/lib/libv8/v8/src/spaces.h +2369 -0
  587. data/lib/libv8/v8/src/splay-tree-inl.h +310 -0
  588. data/lib/libv8/v8/src/splay-tree.h +205 -0
  589. data/lib/libv8/v8/src/string-search.cc +41 -0
  590. data/lib/libv8/v8/src/string-search.h +568 -0
  591. data/lib/libv8/v8/src/string-stream.cc +592 -0
  592. data/lib/libv8/v8/src/string-stream.h +191 -0
  593. data/lib/libv8/v8/src/string.js +994 -0
  594. data/lib/libv8/v8/src/strtod.cc +440 -0
  595. data/lib/libv8/v8/src/strtod.h +40 -0
  596. data/lib/libv8/v8/src/stub-cache.cc +1965 -0
  597. data/lib/libv8/v8/src/stub-cache.h +924 -0
  598. data/lib/libv8/v8/src/third_party/valgrind/valgrind.h +3925 -0
  599. data/lib/libv8/v8/src/token.cc +63 -0
  600. data/lib/libv8/v8/src/token.h +288 -0
  601. data/lib/libv8/v8/src/type-info.cc +507 -0
  602. data/lib/libv8/v8/src/type-info.h +272 -0
  603. data/lib/libv8/v8/src/unbound-queue-inl.h +95 -0
  604. data/lib/libv8/v8/src/unbound-queue.h +69 -0
  605. data/lib/libv8/v8/src/unicode-inl.h +238 -0
  606. data/lib/libv8/v8/src/unicode.cc +1624 -0
  607. data/lib/libv8/v8/src/unicode.h +280 -0
  608. data/lib/libv8/v8/src/uri.js +408 -0
  609. data/lib/libv8/v8/src/utils-inl.h +48 -0
  610. data/lib/libv8/v8/src/utils.cc +371 -0
  611. data/lib/libv8/v8/src/utils.h +800 -0
  612. data/lib/libv8/v8/src/v8-counters.cc +62 -0
  613. data/lib/libv8/v8/src/v8-counters.h +314 -0
  614. data/lib/libv8/v8/src/v8.cc +213 -0
  615. data/lib/libv8/v8/src/v8.h +131 -0
  616. data/lib/libv8/v8/src/v8checks.h +64 -0
  617. data/lib/libv8/v8/src/v8dll-main.cc +44 -0
  618. data/lib/libv8/v8/src/v8globals.h +512 -0
  619. data/lib/libv8/v8/src/v8memory.h +82 -0
  620. data/lib/libv8/v8/src/v8natives.js +1310 -0
  621. data/lib/libv8/v8/src/v8preparserdll-main.cc +39 -0
  622. data/lib/libv8/v8/src/v8threads.cc +464 -0
  623. data/lib/libv8/v8/src/v8threads.h +165 -0
  624. data/lib/libv8/v8/src/v8utils.h +319 -0
  625. data/lib/libv8/v8/src/variables.cc +114 -0
  626. data/lib/libv8/v8/src/variables.h +167 -0
  627. data/lib/libv8/v8/src/version.cc +116 -0
  628. data/lib/libv8/v8/src/version.h +68 -0
  629. data/lib/libv8/v8/src/vm-state-inl.h +138 -0
  630. data/lib/libv8/v8/src/vm-state.h +71 -0
  631. data/lib/libv8/v8/src/win32-headers.h +96 -0
  632. data/lib/libv8/v8/src/x64/assembler-x64-inl.h +462 -0
  633. data/lib/libv8/v8/src/x64/assembler-x64.cc +3027 -0
  634. data/lib/libv8/v8/src/x64/assembler-x64.h +1633 -0
  635. data/lib/libv8/v8/src/x64/builtins-x64.cc +1520 -0
  636. data/lib/libv8/v8/src/x64/code-stubs-x64.cc +5132 -0
  637. data/lib/libv8/v8/src/x64/code-stubs-x64.h +514 -0
  638. data/lib/libv8/v8/src/x64/codegen-x64.cc +146 -0
  639. data/lib/libv8/v8/src/x64/codegen-x64.h +76 -0
  640. data/lib/libv8/v8/src/x64/cpu-x64.cc +88 -0
  641. data/lib/libv8/v8/src/x64/debug-x64.cc +319 -0
  642. data/lib/libv8/v8/src/x64/deoptimizer-x64.cc +815 -0
  643. data/lib/libv8/v8/src/x64/disasm-x64.cc +1832 -0
  644. data/lib/libv8/v8/src/x64/frames-x64.cc +45 -0
  645. data/lib/libv8/v8/src/x64/frames-x64.h +130 -0
  646. data/lib/libv8/v8/src/x64/full-codegen-x64.cc +4318 -0
  647. data/lib/libv8/v8/src/x64/ic-x64.cc +1608 -0
  648. data/lib/libv8/v8/src/x64/lithium-codegen-x64.cc +4267 -0
  649. data/lib/libv8/v8/src/x64/lithium-codegen-x64.h +367 -0
  650. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.cc +320 -0
  651. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.h +74 -0
  652. data/lib/libv8/v8/src/x64/lithium-x64.cc +2202 -0
  653. data/lib/libv8/v8/src/x64/lithium-x64.h +2333 -0
  654. data/lib/libv8/v8/src/x64/macro-assembler-x64.cc +3745 -0
  655. data/lib/libv8/v8/src/x64/macro-assembler-x64.h +1290 -0
  656. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.cc +1398 -0
  657. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.h +282 -0
  658. data/lib/libv8/v8/src/x64/simulator-x64.cc +27 -0
  659. data/lib/libv8/v8/src/x64/simulator-x64.h +72 -0
  660. data/lib/libv8/v8/src/x64/stub-cache-x64.cc +3610 -0
  661. data/lib/libv8/v8/src/zone-inl.h +140 -0
  662. data/lib/libv8/v8/src/zone.cc +196 -0
  663. data/lib/libv8/v8/src/zone.h +240 -0
  664. data/lib/libv8/v8/tools/codemap.js +265 -0
  665. data/lib/libv8/v8/tools/consarray.js +93 -0
  666. data/lib/libv8/v8/tools/csvparser.js +78 -0
  667. data/lib/libv8/v8/tools/disasm.py +92 -0
  668. data/lib/libv8/v8/tools/freebsd-tick-processor +10 -0
  669. data/lib/libv8/v8/tools/gc-nvp-trace-processor.py +342 -0
  670. data/lib/libv8/v8/tools/gcmole/README +62 -0
  671. data/lib/libv8/v8/tools/gcmole/gccause.lua +60 -0
  672. data/lib/libv8/v8/tools/gcmole/gcmole.cc +1261 -0
  673. data/lib/libv8/v8/tools/gcmole/gcmole.lua +378 -0
  674. data/lib/libv8/v8/tools/generate-ten-powers.scm +286 -0
  675. data/lib/libv8/v8/tools/grokdump.py +841 -0
  676. data/lib/libv8/v8/tools/gyp/v8.gyp +995 -0
  677. data/lib/libv8/v8/tools/js2c.py +364 -0
  678. data/lib/libv8/v8/tools/jsmin.py +280 -0
  679. data/lib/libv8/v8/tools/linux-tick-processor +35 -0
  680. data/lib/libv8/v8/tools/ll_prof.py +942 -0
  681. data/lib/libv8/v8/tools/logreader.js +185 -0
  682. data/lib/libv8/v8/tools/mac-nm +18 -0
  683. data/lib/libv8/v8/tools/mac-tick-processor +6 -0
  684. data/lib/libv8/v8/tools/oom_dump/README +31 -0
  685. data/lib/libv8/v8/tools/oom_dump/SConstruct +42 -0
  686. data/lib/libv8/v8/tools/oom_dump/oom_dump.cc +288 -0
  687. data/lib/libv8/v8/tools/presubmit.py +305 -0
  688. data/lib/libv8/v8/tools/process-heap-prof.py +120 -0
  689. data/lib/libv8/v8/tools/profile.js +751 -0
  690. data/lib/libv8/v8/tools/profile_view.js +219 -0
  691. data/lib/libv8/v8/tools/run-valgrind.py +77 -0
  692. data/lib/libv8/v8/tools/splaytree.js +316 -0
  693. data/lib/libv8/v8/tools/stats-viewer.py +468 -0
  694. data/lib/libv8/v8/tools/test.py +1510 -0
  695. data/lib/libv8/v8/tools/tickprocessor-driver.js +59 -0
  696. data/lib/libv8/v8/tools/tickprocessor.js +877 -0
  697. data/lib/libv8/v8/tools/utils.py +96 -0
  698. data/lib/libv8/v8/tools/visual_studio/README.txt +12 -0
  699. data/lib/libv8/v8/tools/windows-tick-processor.bat +30 -0
  700. data/lib/libv8/version.rb +6 -0
  701. data/libv8.gemspec +36 -0
  702. data/thefrontside.png +0 -0
  703. metadata +776 -0
@@ -0,0 +1,84 @@
1
+ // Copyright 2011 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
29
+ #define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
30
+
31
+ #include "v8.h"
32
+
33
+ #include "lithium.h"
34
+
35
+ namespace v8 {
36
+ namespace internal {
37
+
38
+ class LCodeGen;
39
+ class LGapResolver;
40
+
41
+ class LGapResolver BASE_EMBEDDED {
42
+ public:
43
+
44
+ explicit LGapResolver(LCodeGen* owner);
45
+
46
+ // Resolve a set of parallel moves, emitting assembler instructions.
47
+ void Resolve(LParallelMove* parallel_move);
48
+
49
+ private:
50
+ // Build the initial list of moves.
51
+ void BuildInitialMoveList(LParallelMove* parallel_move);
52
+
53
+ // Perform the move at the moves_ index in question (possibly requiring
54
+ // other moves to satisfy dependencies).
55
+ void PerformMove(int index);
56
+
57
+ // If a cycle is found in the series of moves, save the blocking value to
58
+ // a scratch register. The cycle must be found by hitting the root of the
59
+ // depth-first search.
60
+ void BreakCycle(int index);
61
+
62
+ // After a cycle has been resolved, restore the value from the scratch
63
+ // register to its proper destination.
64
+ void RestoreValue();
65
+
66
+ // Emit a move and remove it from the move graph.
67
+ void EmitMove(int index);
68
+
69
+ // Verify the move list before performing moves.
70
+ void Verify();
71
+
72
+ LCodeGen* cgen_;
73
+
74
+ // List of moves not yet resolved.
75
+ ZoneList<LMoveOperands> moves_;
76
+
77
+ int root_index_;
78
+ bool in_cycle_;
79
+ LOperand* saved_destination_;
80
+ };
81
+
82
+ } } // namespace v8::internal
83
+
84
+ #endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
@@ -0,0 +1,3163 @@
1
+ // Copyright 2011 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #include <limits.h> // For LONG_MIN, LONG_MAX.
29
+
30
+ #include "v8.h"
31
+
32
+ #if defined(V8_TARGET_ARCH_ARM)
33
+
34
+ #include "bootstrapper.h"
35
+ #include "codegen.h"
36
+ #include "debug.h"
37
+ #include "runtime.h"
38
+
39
+ namespace v8 {
40
+ namespace internal {
41
+
42
+ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43
+ : Assembler(arg_isolate, buffer, size),
44
+ generating_stub_(false),
45
+ allow_stub_calls_(true) {
46
+ if (isolate() != NULL) {
47
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
48
+ isolate());
49
+ }
50
+ }
51
+
52
+
53
+ // We always generate arm code, never thumb code, even if V8 is compiled to
54
+ // thumb, so we require inter-working support
55
+ #if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
56
+ #error "flag -mthumb-interwork missing"
57
+ #endif
58
+
59
+
60
+ // We do not support thumb inter-working with an arm architecture not supporting
61
+ // the blx instruction (below v5t). If you know what CPU you are compiling for
62
+ // you can use -march=armv7 or similar.
63
+ #if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
64
+ # error "For thumb inter-working we require an architecture which supports blx"
65
+ #endif
66
+
67
+
68
+ // Using bx does not yield better code, so use it only when required
69
+ #if defined(USE_THUMB_INTERWORK)
70
+ #define USE_BX 1
71
+ #endif
72
+
73
+
74
+ void MacroAssembler::Jump(Register target, Condition cond) {
75
+ #if USE_BX
76
+ bx(target, cond);
77
+ #else
78
+ mov(pc, Operand(target), LeaveCC, cond);
79
+ #endif
80
+ }
81
+
82
+
83
+ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
84
+ Condition cond) {
85
+ #if USE_BX
86
+ mov(ip, Operand(target, rmode));
87
+ bx(ip, cond);
88
+ #else
89
+ mov(pc, Operand(target, rmode), LeaveCC, cond);
90
+ #endif
91
+ }
92
+
93
+
94
+ void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
95
+ Condition cond) {
96
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
97
+ Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
98
+ }
99
+
100
+
101
+ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
102
+ Condition cond) {
103
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
104
+ // 'code' is always generated ARM code, never THUMB code
105
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
106
+ }
107
+
108
+
109
+ int MacroAssembler::CallSize(Register target, Condition cond) {
110
+ #if USE_BLX
111
+ return kInstrSize;
112
+ #else
113
+ return 2 * kInstrSize;
114
+ #endif
115
+ }
116
+
117
+
118
+ void MacroAssembler::Call(Register target, Condition cond) {
119
+ // Block constant pool for the call instruction sequence.
120
+ BlockConstPoolScope block_const_pool(this);
121
+ #ifdef DEBUG
122
+ int pre_position = pc_offset();
123
+ #endif
124
+
125
+ #if USE_BLX
126
+ blx(target, cond);
127
+ #else
128
+ // set lr for return at current pc + 8
129
+ mov(lr, Operand(pc), LeaveCC, cond);
130
+ mov(pc, Operand(target), LeaveCC, cond);
131
+ #endif
132
+
133
+ #ifdef DEBUG
134
+ int post_position = pc_offset();
135
+ CHECK_EQ(pre_position + CallSize(target, cond), post_position);
136
+ #endif
137
+ }
138
+
139
+
140
+ int MacroAssembler::CallSize(
141
+ intptr_t target, RelocInfo::Mode rmode, Condition cond) {
142
+ int size = 2 * kInstrSize;
143
+ Instr mov_instr = cond | MOV | LeaveCC;
144
+ if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
145
+ size += kInstrSize;
146
+ }
147
+ return size;
148
+ }
149
+
150
+
151
+ void MacroAssembler::Call(intptr_t target,
152
+ RelocInfo::Mode rmode,
153
+ Condition cond) {
154
+ // Block constant pool for the call instruction sequence.
155
+ BlockConstPoolScope block_const_pool(this);
156
+ #ifdef DEBUG
157
+ int pre_position = pc_offset();
158
+ #endif
159
+
160
+ #if USE_BLX
161
+ // On ARMv5 and after the recommended call sequence is:
162
+ // ldr ip, [pc, #...]
163
+ // blx ip
164
+
165
+ // Statement positions are expected to be recorded when the target
166
+ // address is loaded. The mov method will automatically record
167
+ // positions when pc is the target, since this is not the case here
168
+ // we have to do it explicitly.
169
+ positions_recorder()->WriteRecordedPositions();
170
+
171
+ mov(ip, Operand(target, rmode));
172
+ blx(ip, cond);
173
+
174
+ ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
175
+ #else
176
+ // Set lr for return at current pc + 8.
177
+ mov(lr, Operand(pc), LeaveCC, cond);
178
+ // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
179
+ mov(pc, Operand(target, rmode), LeaveCC, cond);
180
+ ASSERT(kCallTargetAddressOffset == kInstrSize);
181
+ #endif
182
+
183
+ #ifdef DEBUG
184
+ int post_position = pc_offset();
185
+ CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
186
+ #endif
187
+ }
188
+
189
+
190
+ int MacroAssembler::CallSize(
191
+ byte* target, RelocInfo::Mode rmode, Condition cond) {
192
+ return CallSize(reinterpret_cast<intptr_t>(target), rmode);
193
+ }
194
+
195
+
196
+ void MacroAssembler::Call(
197
+ byte* target, RelocInfo::Mode rmode, Condition cond) {
198
+ #ifdef DEBUG
199
+ int pre_position = pc_offset();
200
+ #endif
201
+
202
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
203
+ Call(reinterpret_cast<intptr_t>(target), rmode, cond);
204
+
205
+ #ifdef DEBUG
206
+ int post_position = pc_offset();
207
+ CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
208
+ #endif
209
+ }
210
+
211
+
212
+ int MacroAssembler::CallSize(
213
+ Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
214
+ return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
215
+ }
216
+
217
+
218
+ void MacroAssembler::CallWithAstId(Handle<Code> code,
219
+ RelocInfo::Mode rmode,
220
+ unsigned ast_id,
221
+ Condition cond) {
222
+ #ifdef DEBUG
223
+ int pre_position = pc_offset();
224
+ #endif
225
+
226
+ ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
227
+ ASSERT(ast_id != kNoASTId);
228
+ ASSERT(ast_id_for_reloc_info_ == kNoASTId);
229
+ ast_id_for_reloc_info_ = ast_id;
230
+ // 'code' is always generated ARM code, never THUMB code
231
+ Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
232
+
233
+ #ifdef DEBUG
234
+ int post_position = pc_offset();
235
+ CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
236
+ #endif
237
+ }
238
+
239
+
240
+ void MacroAssembler::Call(Handle<Code> code,
241
+ RelocInfo::Mode rmode,
242
+ Condition cond) {
243
+ #ifdef DEBUG
244
+ int pre_position = pc_offset();
245
+ #endif
246
+
247
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
248
+ // 'code' is always generated ARM code, never THUMB code
249
+ Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
250
+
251
+ #ifdef DEBUG
252
+ int post_position = pc_offset();
253
+ CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
254
+ #endif
255
+ }
256
+
257
+
258
+ void MacroAssembler::Ret(Condition cond) {
259
+ #if USE_BX
260
+ bx(lr, cond);
261
+ #else
262
+ mov(pc, Operand(lr), LeaveCC, cond);
263
+ #endif
264
+ }
265
+
266
+
267
+ void MacroAssembler::Drop(int count, Condition cond) {
268
+ if (count > 0) {
269
+ add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
270
+ }
271
+ }
272
+
273
+
274
+ void MacroAssembler::Ret(int drop, Condition cond) {
275
+ Drop(drop, cond);
276
+ Ret(cond);
277
+ }
278
+
279
+
280
+ void MacroAssembler::Swap(Register reg1,
281
+ Register reg2,
282
+ Register scratch,
283
+ Condition cond) {
284
+ if (scratch.is(no_reg)) {
285
+ eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
286
+ eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
287
+ eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
288
+ } else {
289
+ mov(scratch, reg1, LeaveCC, cond);
290
+ mov(reg1, reg2, LeaveCC, cond);
291
+ mov(reg2, scratch, LeaveCC, cond);
292
+ }
293
+ }
294
+
295
+
296
+ void MacroAssembler::Call(Label* target) {
297
+ bl(target);
298
+ }
299
+
300
+
301
+ void MacroAssembler::Move(Register dst, Handle<Object> value) {
302
+ mov(dst, Operand(value));
303
+ }
304
+
305
+
306
+ void MacroAssembler::Move(Register dst, Register src) {
307
+ if (!dst.is(src)) {
308
+ mov(dst, src);
309
+ }
310
+ }
311
+
312
+
313
+ void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
314
+ ASSERT(CpuFeatures::IsSupported(VFP3));
315
+ CpuFeatures::Scope scope(VFP3);
316
+ if (!dst.is(src)) {
317
+ vmov(dst, src);
318
+ }
319
+ }
320
+
321
+
322
+ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
323
+ Condition cond) {
324
+ if (!src2.is_reg() &&
325
+ !src2.must_use_constant_pool() &&
326
+ src2.immediate() == 0) {
327
+ mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
328
+
329
+ } else if (!src2.is_single_instruction() &&
330
+ !src2.must_use_constant_pool() &&
331
+ CpuFeatures::IsSupported(ARMv7) &&
332
+ IsPowerOf2(src2.immediate() + 1)) {
333
+ ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
334
+
335
+ } else {
336
+ and_(dst, src1, src2, LeaveCC, cond);
337
+ }
338
+ }
339
+
340
+
341
+ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
342
+ Condition cond) {
343
+ ASSERT(lsb < 32);
344
+ if (!CpuFeatures::IsSupported(ARMv7)) {
345
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
346
+ and_(dst, src1, Operand(mask), LeaveCC, cond);
347
+ if (lsb != 0) {
348
+ mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
349
+ }
350
+ } else {
351
+ ubfx(dst, src1, lsb, width, cond);
352
+ }
353
+ }
354
+
355
+
356
+ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
357
+ Condition cond) {
358
+ ASSERT(lsb < 32);
359
+ if (!CpuFeatures::IsSupported(ARMv7)) {
360
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
361
+ and_(dst, src1, Operand(mask), LeaveCC, cond);
362
+ int shift_up = 32 - lsb - width;
363
+ int shift_down = lsb + shift_up;
364
+ if (shift_up != 0) {
365
+ mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
366
+ }
367
+ if (shift_down != 0) {
368
+ mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
369
+ }
370
+ } else {
371
+ sbfx(dst, src1, lsb, width, cond);
372
+ }
373
+ }
374
+
375
+
376
+ void MacroAssembler::Bfi(Register dst,
377
+ Register src,
378
+ Register scratch,
379
+ int lsb,
380
+ int width,
381
+ Condition cond) {
382
+ ASSERT(0 <= lsb && lsb < 32);
383
+ ASSERT(0 <= width && width < 32);
384
+ ASSERT(lsb + width < 32);
385
+ ASSERT(!scratch.is(dst));
386
+ if (width == 0) return;
387
+ if (!CpuFeatures::IsSupported(ARMv7)) {
388
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
389
+ bic(dst, dst, Operand(mask));
390
+ and_(scratch, src, Operand((1 << width) - 1));
391
+ mov(scratch, Operand(scratch, LSL, lsb));
392
+ orr(dst, dst, scratch);
393
+ } else {
394
+ bfi(dst, src, lsb, width, cond);
395
+ }
396
+ }
397
+
398
+
399
+ void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
400
+ ASSERT(lsb < 32);
401
+ if (!CpuFeatures::IsSupported(ARMv7)) {
402
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
403
+ bic(dst, dst, Operand(mask));
404
+ } else {
405
+ bfc(dst, lsb, width, cond);
406
+ }
407
+ }
408
+
409
+
410
+ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
411
+ Condition cond) {
412
+ if (!CpuFeatures::IsSupported(ARMv7)) {
413
+ ASSERT(!dst.is(pc) && !src.rm().is(pc));
414
+ ASSERT((satpos >= 0) && (satpos <= 31));
415
+
416
+ // These asserts are required to ensure compatibility with the ARMv7
417
+ // implementation.
418
+ ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
419
+ ASSERT(src.rs().is(no_reg));
420
+
421
+ Label done;
422
+ int satval = (1 << satpos) - 1;
423
+
424
+ if (cond != al) {
425
+ b(NegateCondition(cond), &done); // Skip saturate if !condition.
426
+ }
427
+ if (!(src.is_reg() && dst.is(src.rm()))) {
428
+ mov(dst, src);
429
+ }
430
+ tst(dst, Operand(~satval));
431
+ b(eq, &done);
432
+ mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
433
+ mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
434
+ bind(&done);
435
+ } else {
436
+ usat(dst, satpos, src, cond);
437
+ }
438
+ }
439
+
440
+
441
+ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
442
+ // Empty the const pool.
443
+ CheckConstPool(true, true);
444
+ add(pc, pc, Operand(index,
445
+ LSL,
446
+ Instruction::kInstrSizeLog2 - kSmiTagSize));
447
+ BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
448
+ nop(); // Jump table alignment.
449
+ for (int i = 0; i < targets.length(); i++) {
450
+ b(targets[i]);
451
+ }
452
+ }
453
+
454
+
455
+ void MacroAssembler::LoadRoot(Register destination,
456
+ Heap::RootListIndex index,
457
+ Condition cond) {
458
+ ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
459
+ }
460
+
461
+
462
+ void MacroAssembler::StoreRoot(Register source,
463
+ Heap::RootListIndex index,
464
+ Condition cond) {
465
+ str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
466
+ }
467
+
468
+
469
+ void MacroAssembler::RecordWriteHelper(Register object,
470
+ Register address,
471
+ Register scratch) {
472
+ if (emit_debug_code()) {
473
+ // Check that the object is not in new space.
474
+ Label not_in_new_space;
475
+ InNewSpace(object, scratch, ne, &not_in_new_space);
476
+ Abort("new-space object passed to RecordWriteHelper");
477
+ bind(&not_in_new_space);
478
+ }
479
+
480
+ // Calculate page address.
481
+ Bfc(object, 0, kPageSizeBits);
482
+
483
+ // Calculate region number.
484
+ Ubfx(address, address, Page::kRegionSizeLog2,
485
+ kPageSizeBits - Page::kRegionSizeLog2);
486
+
487
+ // Mark region dirty.
488
+ ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
489
+ mov(ip, Operand(1));
490
+ orr(scratch, scratch, Operand(ip, LSL, address));
491
+ str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
492
+ }
493
+
494
+
495
+ void MacroAssembler::InNewSpace(Register object,
496
+ Register scratch,
497
+ Condition cond,
498
+ Label* branch) {
499
+ ASSERT(cond == eq || cond == ne);
500
+ and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
501
+ cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
502
+ b(cond, branch);
503
+ }
504
+
505
+
506
+ // Will clobber 4 registers: object, offset, scratch, ip. The
507
+ // register 'object' contains a heap object pointer. The heap object
508
+ // tag is shifted away.
509
+ void MacroAssembler::RecordWrite(Register object,
510
+ Operand offset,
511
+ Register scratch0,
512
+ Register scratch1) {
513
+ // The compiled code assumes that record write doesn't change the
514
+ // context register, so we check that none of the clobbered
515
+ // registers are cp.
516
+ ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
517
+
518
+ Label done;
519
+
520
+ // First, test that the object is not in the new space. We cannot set
521
+ // region marks for new space pages.
522
+ InNewSpace(object, scratch0, eq, &done);
523
+
524
+ // Add offset into the object.
525
+ add(scratch0, object, offset);
526
+
527
+ // Record the actual write.
528
+ RecordWriteHelper(object, scratch0, scratch1);
529
+
530
+ bind(&done);
531
+
532
+ // Clobber all input registers when running with the debug-code flag
533
+ // turned on to provoke errors.
534
+ if (emit_debug_code()) {
535
+ mov(object, Operand(BitCast<int32_t>(kZapValue)));
536
+ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
537
+ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
538
+ }
539
+ }
540
+
541
+
542
+ // Will clobber 4 registers: object, address, scratch, ip. The
543
+ // register 'object' contains a heap object pointer. The heap object
544
+ // tag is shifted away.
545
+ void MacroAssembler::RecordWrite(Register object,
546
+ Register address,
547
+ Register scratch) {
548
+ // The compiled code assumes that record write doesn't change the
549
+ // context register, so we check that none of the clobbered
550
+ // registers are cp.
551
+ ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
552
+
553
+ Label done;
554
+
555
+ // First, test that the object is not in the new space. We cannot set
556
+ // region marks for new space pages.
557
+ InNewSpace(object, scratch, eq, &done);
558
+
559
+ // Record the actual write.
560
+ RecordWriteHelper(object, address, scratch);
561
+
562
+ bind(&done);
563
+
564
+ // Clobber all input registers when running with the debug-code flag
565
+ // turned on to provoke errors.
566
+ if (emit_debug_code()) {
567
+ mov(object, Operand(BitCast<int32_t>(kZapValue)));
568
+ mov(address, Operand(BitCast<int32_t>(kZapValue)));
569
+ mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
570
+ }
571
+ }
572
+
573
+
574
+ // Push and pop all registers that can hold pointers.
575
+ void MacroAssembler::PushSafepointRegisters() {
576
+ // Safepoints expect a block of contiguous register values starting with r0:
577
+ ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
578
+ // Safepoints expect a block of kNumSafepointRegisters values on the
579
+ // stack, so adjust the stack for unsaved registers.
580
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
581
+ ASSERT(num_unsaved >= 0);
582
+ sub(sp, sp, Operand(num_unsaved * kPointerSize));
583
+ stm(db_w, sp, kSafepointSavedRegisters);
584
+ }
585
+
586
+
587
+ void MacroAssembler::PopSafepointRegisters() {
588
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
589
+ ldm(ia_w, sp, kSafepointSavedRegisters);
590
+ add(sp, sp, Operand(num_unsaved * kPointerSize));
591
+ }
592
+
593
+
594
+ void MacroAssembler::PushSafepointRegistersAndDoubles() {
595
+ PushSafepointRegisters();
596
+ sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
597
+ kDoubleSize));
598
+ for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
599
+ vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
600
+ }
601
+ }
602
+
603
+
604
+ void MacroAssembler::PopSafepointRegistersAndDoubles() {
605
+ for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
606
+ vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
607
+ }
608
+ add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
609
+ kDoubleSize));
610
+ PopSafepointRegisters();
611
+ }
612
+
613
+ void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
614
+ Register dst) {
615
+ str(src, SafepointRegistersAndDoublesSlot(dst));
616
+ }
617
+
618
+
619
+ void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
620
+ str(src, SafepointRegisterSlot(dst));
621
+ }
622
+
623
+
624
+ void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
625
+ ldr(dst, SafepointRegisterSlot(src));
626
+ }
627
+
628
+
629
+ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
630
+ // The registers are pushed starting with the highest encoding,
631
+ // which means that lowest encodings are closest to the stack pointer.
632
+ ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
633
+ return reg_code;
634
+ }
635
+
636
+
637
+ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
638
+ return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
639
+ }
640
+
641
+
642
+ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
643
+ // General purpose registers are pushed last on the stack.
644
+ int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
645
+ int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
646
+ return MemOperand(sp, doubles_size + register_offset);
647
+ }
648
+
649
+
650
+ void MacroAssembler::Ldrd(Register dst1, Register dst2,
651
+ const MemOperand& src, Condition cond) {
652
+ ASSERT(src.rm().is(no_reg));
653
+ ASSERT(!dst1.is(lr)); // r14.
654
+ ASSERT_EQ(0, dst1.code() % 2);
655
+ ASSERT_EQ(dst1.code() + 1, dst2.code());
656
+
657
+ // Generate two ldr instructions if ldrd is not available.
658
+ if (CpuFeatures::IsSupported(ARMv7)) {
659
+ CpuFeatures::Scope scope(ARMv7);
660
+ ldrd(dst1, dst2, src, cond);
661
+ } else {
662
+ MemOperand src2(src);
663
+ src2.set_offset(src2.offset() + 4);
664
+ if (dst1.is(src.rn())) {
665
+ ldr(dst2, src2, cond);
666
+ ldr(dst1, src, cond);
667
+ } else {
668
+ ldr(dst1, src, cond);
669
+ ldr(dst2, src2, cond);
670
+ }
671
+ }
672
+ }
673
+
674
+
675
+ void MacroAssembler::Strd(Register src1, Register src2,
676
+ const MemOperand& dst, Condition cond) {
677
+ ASSERT(dst.rm().is(no_reg));
678
+ ASSERT(!src1.is(lr)); // r14.
679
+ ASSERT_EQ(0, src1.code() % 2);
680
+ ASSERT_EQ(src1.code() + 1, src2.code());
681
+
682
+ // Generate two str instructions if strd is not available.
683
+ if (CpuFeatures::IsSupported(ARMv7)) {
684
+ CpuFeatures::Scope scope(ARMv7);
685
+ strd(src1, src2, dst, cond);
686
+ } else {
687
+ MemOperand dst2(dst);
688
+ dst2.set_offset(dst2.offset() + 4);
689
+ str(src1, dst, cond);
690
+ str(src2, dst2, cond);
691
+ }
692
+ }
693
+
694
+
695
+ void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
696
+ const Register scratch,
697
+ const Condition cond) {
698
+ vmrs(scratch, cond);
699
+ bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
700
+ vmsr(scratch, cond);
701
+ }
702
+
703
+
704
+ void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
705
+ const DwVfpRegister src2,
706
+ const Condition cond) {
707
+ // Compare and move FPSCR flags to the normal condition flags.
708
+ VFPCompareAndLoadFlags(src1, src2, pc, cond);
709
+ }
710
+
711
+ void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
712
+ const double src2,
713
+ const Condition cond) {
714
+ // Compare and move FPSCR flags to the normal condition flags.
715
+ VFPCompareAndLoadFlags(src1, src2, pc, cond);
716
+ }
717
+
718
+
719
+ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
720
+ const DwVfpRegister src2,
721
+ const Register fpscr_flags,
722
+ const Condition cond) {
723
+ // Compare and load FPSCR.
724
+ vcmp(src1, src2, cond);
725
+ vmrs(fpscr_flags, cond);
726
+ }
727
+
728
+ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
729
+ const double src2,
730
+ const Register fpscr_flags,
731
+ const Condition cond) {
732
+ // Compare and load FPSCR.
733
+ vcmp(src1, src2, cond);
734
+ vmrs(fpscr_flags, cond);
735
+ }
736
+
737
+
738
+ void MacroAssembler::EnterFrame(StackFrame::Type type) {
739
+ // r0-r3: preserved
740
+ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
741
+ mov(ip, Operand(Smi::FromInt(type)));
742
+ push(ip);
743
+ mov(ip, Operand(CodeObject()));
744
+ push(ip);
745
+ add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
746
+ }
747
+
748
+
749
+ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
750
+ // r0: preserved
751
+ // r1: preserved
752
+ // r2: preserved
753
+
754
+ // Drop the execution stack down to the frame pointer and restore
755
+ // the caller frame pointer and return address.
756
+ mov(sp, fp);
757
+ ldm(ia_w, sp, fp.bit() | lr.bit());
758
+ }
759
+
760
+
761
+ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
762
+ // Setup the frame structure on the stack.
763
+ ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
764
+ ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
765
+ ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
766
+ Push(lr, fp);
767
+ mov(fp, Operand(sp)); // Setup new frame pointer.
768
+ // Reserve room for saved entry sp and code object.
769
+ sub(sp, sp, Operand(2 * kPointerSize));
770
+ if (emit_debug_code()) {
771
+ mov(ip, Operand(0));
772
+ str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
773
+ }
774
+ mov(ip, Operand(CodeObject()));
775
+ str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
776
+
777
+ // Save the frame pointer and the context in top.
778
+ mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
779
+ str(fp, MemOperand(ip));
780
+ mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
781
+ str(cp, MemOperand(ip));
782
+
783
+ // Optionally save all double registers.
784
+ if (save_doubles) {
785
+ DwVfpRegister first = d0;
786
+ DwVfpRegister last =
787
+ DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
788
+ vstm(db_w, sp, first, last);
789
+ // Note that d0 will be accessible at
790
+ // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
791
+ // since the sp slot and code slot were pushed after the fp.
792
+ }
793
+
794
+ // Reserve place for the return address and stack space and align the frame
795
+ // preparing for calling the runtime function.
796
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
797
+ sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
798
+ if (frame_alignment > 0) {
799
+ ASSERT(IsPowerOf2(frame_alignment));
800
+ and_(sp, sp, Operand(-frame_alignment));
801
+ }
802
+
803
+ // Set the exit frame sp value to point just before the return address
804
+ // location.
805
+ add(ip, sp, Operand(kPointerSize));
806
+ str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
807
+ }
808
+
809
+
810
+ void MacroAssembler::InitializeNewString(Register string,
811
+ Register length,
812
+ Heap::RootListIndex map_index,
813
+ Register scratch1,
814
+ Register scratch2) {
815
+ mov(scratch1, Operand(length, LSL, kSmiTagSize));
816
+ LoadRoot(scratch2, map_index);
817
+ str(scratch1, FieldMemOperand(string, String::kLengthOffset));
818
+ mov(scratch1, Operand(String::kEmptyHashField));
819
+ str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
820
+ str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
821
+ }
822
+
823
+
824
+ int MacroAssembler::ActivationFrameAlignment() {
825
+ #if defined(V8_HOST_ARCH_ARM)
826
+ // Running on the real platform. Use the alignment as mandated by the local
827
+ // environment.
828
+ // Note: This will break if we ever start generating snapshots on one ARM
829
+ // platform for another ARM platform with a different alignment.
830
+ return OS::ActivationFrameAlignment();
831
+ #else // defined(V8_HOST_ARCH_ARM)
832
+ // If we are using the simulator then we should always align to the expected
833
+ // alignment. As the simulator is used to generate snapshots we do not know
834
+ // if the target platform will need alignment, so this is controlled from a
835
+ // flag.
836
+ return FLAG_sim_stack_alignment;
837
+ #endif // defined(V8_HOST_ARCH_ARM)
838
+ }
839
+
840
+
841
+ void MacroAssembler::LeaveExitFrame(bool save_doubles,
842
+ Register argument_count) {
843
+ // Optionally restore all double registers.
844
+ if (save_doubles) {
845
+ // Calculate the stack location of the saved doubles and restore them.
846
+ const int offset = 2 * kPointerSize;
847
+ sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
848
+ DwVfpRegister first = d0;
849
+ DwVfpRegister last =
850
+ DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
851
+ vldm(ia, r3, first, last);
852
+ }
853
+
854
+ // Clear top frame.
855
+ mov(r3, Operand(0, RelocInfo::NONE));
856
+ mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
857
+ str(r3, MemOperand(ip));
858
+
859
+ // Restore current context from top and clear it in debug mode.
860
+ mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
861
+ ldr(cp, MemOperand(ip));
862
+ #ifdef DEBUG
863
+ str(r3, MemOperand(ip));
864
+ #endif
865
+
866
+ // Tear down the exit frame, pop the arguments, and return.
867
+ mov(sp, Operand(fp));
868
+ ldm(ia_w, sp, fp.bit() | lr.bit());
869
+ if (argument_count.is_valid()) {
870
+ add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
871
+ }
872
+ }
873
+
874
+ void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
875
+ if (use_eabi_hardfloat()) {
876
+ Move(dst, d0);
877
+ } else {
878
+ vmov(dst, r0, r1);
879
+ }
880
+ }
881
+
882
+
883
+ void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
884
+ // This macro takes the dst register to make the code more readable
885
+ // at the call sites. However, the dst register has to be r5 to
886
+ // follow the calling convention which requires the call type to be
887
+ // in r5.
888
+ ASSERT(dst.is(r5));
889
+ if (call_kind == CALL_AS_FUNCTION) {
890
+ mov(dst, Operand(Smi::FromInt(1)));
891
+ } else {
892
+ mov(dst, Operand(Smi::FromInt(0)));
893
+ }
894
+ }
895
+
896
+
897
+ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
898
+ const ParameterCount& actual,
899
+ Handle<Code> code_constant,
900
+ Register code_reg,
901
+ Label* done,
902
+ InvokeFlag flag,
903
+ const CallWrapper& call_wrapper,
904
+ CallKind call_kind) {
905
+ bool definitely_matches = false;
906
+ Label regular_invoke;
907
+
908
+ // Check whether the expected and actual arguments count match. If not,
909
+ // setup registers according to contract with ArgumentsAdaptorTrampoline:
910
+ // r0: actual arguments count
911
+ // r1: function (passed through to callee)
912
+ // r2: expected arguments count
913
+ // r3: callee code entry
914
+
915
+ // The code below is made a lot easier because the calling code already sets
916
+ // up actual and expected registers according to the contract if values are
917
+ // passed in registers.
918
+ ASSERT(actual.is_immediate() || actual.reg().is(r0));
919
+ ASSERT(expected.is_immediate() || expected.reg().is(r2));
920
+ ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
921
+
922
+ if (expected.is_immediate()) {
923
+ ASSERT(actual.is_immediate());
924
+ if (expected.immediate() == actual.immediate()) {
925
+ definitely_matches = true;
926
+ } else {
927
+ mov(r0, Operand(actual.immediate()));
928
+ const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
929
+ if (expected.immediate() == sentinel) {
930
+ // Don't worry about adapting arguments for builtins that
931
+ // don't want that done. Skip adaption code by making it look
932
+ // like we have a match between expected and actual number of
933
+ // arguments.
934
+ definitely_matches = true;
935
+ } else {
936
+ mov(r2, Operand(expected.immediate()));
937
+ }
938
+ }
939
+ } else {
940
+ if (actual.is_immediate()) {
941
+ cmp(expected.reg(), Operand(actual.immediate()));
942
+ b(eq, &regular_invoke);
943
+ mov(r0, Operand(actual.immediate()));
944
+ } else {
945
+ cmp(expected.reg(), Operand(actual.reg()));
946
+ b(eq, &regular_invoke);
947
+ }
948
+ }
949
+
950
+ if (!definitely_matches) {
951
+ if (!code_constant.is_null()) {
952
+ mov(r3, Operand(code_constant));
953
+ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
954
+ }
955
+
956
+ Handle<Code> adaptor =
957
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
958
+ if (flag == CALL_FUNCTION) {
959
+ call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
960
+ SetCallKind(r5, call_kind);
961
+ Call(adaptor, RelocInfo::CODE_TARGET);
962
+ call_wrapper.AfterCall();
963
+ b(done);
964
+ } else {
965
+ SetCallKind(r5, call_kind);
966
+ Jump(adaptor, RelocInfo::CODE_TARGET);
967
+ }
968
+ bind(&regular_invoke);
969
+ }
970
+ }
971
+
972
+
973
+ void MacroAssembler::InvokeCode(Register code,
974
+ const ParameterCount& expected,
975
+ const ParameterCount& actual,
976
+ InvokeFlag flag,
977
+ const CallWrapper& call_wrapper,
978
+ CallKind call_kind) {
979
+ Label done;
980
+
981
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
982
+ call_wrapper, call_kind);
983
+ if (flag == CALL_FUNCTION) {
984
+ call_wrapper.BeforeCall(CallSize(code));
985
+ SetCallKind(r5, call_kind);
986
+ Call(code);
987
+ call_wrapper.AfterCall();
988
+ } else {
989
+ ASSERT(flag == JUMP_FUNCTION);
990
+ SetCallKind(r5, call_kind);
991
+ Jump(code);
992
+ }
993
+
994
+ // Continue here if InvokePrologue does handle the invocation due to
995
+ // mismatched parameter counts.
996
+ bind(&done);
997
+ }
998
+
999
+
1000
+ void MacroAssembler::InvokeCode(Handle<Code> code,
1001
+ const ParameterCount& expected,
1002
+ const ParameterCount& actual,
1003
+ RelocInfo::Mode rmode,
1004
+ InvokeFlag flag,
1005
+ CallKind call_kind) {
1006
+ Label done;
1007
+
1008
+ InvokePrologue(expected, actual, code, no_reg, &done, flag,
1009
+ NullCallWrapper(), call_kind);
1010
+ if (flag == CALL_FUNCTION) {
1011
+ SetCallKind(r5, call_kind);
1012
+ Call(code, rmode);
1013
+ } else {
1014
+ SetCallKind(r5, call_kind);
1015
+ Jump(code, rmode);
1016
+ }
1017
+
1018
+ // Continue here if InvokePrologue does handle the invocation due to
1019
+ // mismatched parameter counts.
1020
+ bind(&done);
1021
+ }
1022
+
1023
+
1024
+ void MacroAssembler::InvokeFunction(Register fun,
1025
+ const ParameterCount& actual,
1026
+ InvokeFlag flag,
1027
+ const CallWrapper& call_wrapper,
1028
+ CallKind call_kind) {
1029
+ // Contract with called JS functions requires that function is passed in r1.
1030
+ ASSERT(fun.is(r1));
1031
+
1032
+ Register expected_reg = r2;
1033
+ Register code_reg = r3;
1034
+
1035
+ ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1036
+ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1037
+ ldr(expected_reg,
1038
+ FieldMemOperand(code_reg,
1039
+ SharedFunctionInfo::kFormalParameterCountOffset));
1040
+ mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
1041
+ ldr(code_reg,
1042
+ FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1043
+
1044
+ ParameterCount expected(expected_reg);
1045
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
1046
+ }
1047
+
1048
+
1049
+ void MacroAssembler::InvokeFunction(JSFunction* function,
1050
+ const ParameterCount& actual,
1051
+ InvokeFlag flag) {
1052
+ ASSERT(function->is_compiled());
1053
+
1054
+ // Get the function and setup the context.
1055
+ mov(r1, Operand(Handle<JSFunction>(function)));
1056
+ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1057
+
1058
+ // Invoke the cached code.
1059
+ Handle<Code> code(function->code());
1060
+ ParameterCount expected(function->shared()->formal_parameter_count());
1061
+ if (V8::UseCrankshaft()) {
1062
+ // TODO(kasperl): For now, we always call indirectly through the
1063
+ // code field in the function to allow recompilation to take effect
1064
+ // without changing any of the call sites.
1065
+ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1066
+ InvokeCode(r3, expected, actual, flag);
1067
+ } else {
1068
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
1069
+ }
1070
+ }
1071
+
1072
+
1073
+ void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1074
+ Register map,
1075
+ Register scratch,
1076
+ Label* fail) {
1077
+ ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1078
+ IsInstanceJSObjectType(map, scratch, fail);
1079
+ }
1080
+
1081
+
1082
+ void MacroAssembler::IsInstanceJSObjectType(Register map,
1083
+ Register scratch,
1084
+ Label* fail) {
1085
+ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1086
+ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
1087
+ b(lt, fail);
1088
+ cmp(scratch, Operand(LAST_JS_OBJECT_TYPE));
1089
+ b(gt, fail);
1090
+ }
1091
+
1092
+
1093
+ void MacroAssembler::IsObjectJSStringType(Register object,
1094
+ Register scratch,
1095
+ Label* fail) {
1096
+ ASSERT(kNotStringTag != 0);
1097
+
1098
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1099
+ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1100
+ tst(scratch, Operand(kIsNotStringMask));
1101
+ b(ne, fail);
1102
+ }
1103
+
1104
+
1105
+ #ifdef ENABLE_DEBUGGER_SUPPORT
1106
+ void MacroAssembler::DebugBreak() {
1107
+ ASSERT(allow_stub_calls());
1108
+ mov(r0, Operand(0, RelocInfo::NONE));
1109
+ mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1110
+ CEntryStub ces(1);
1111
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1112
+ }
1113
+ #endif
1114
+
1115
+
1116
+ void MacroAssembler::PushTryHandler(CodeLocation try_location,
1117
+ HandlerType type) {
1118
+ // Adjust this code if not the case.
1119
+ ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1120
+ // The pc (return address) is passed in register lr.
1121
+ if (try_location == IN_JAVASCRIPT) {
1122
+ if (type == TRY_CATCH_HANDLER) {
1123
+ mov(r3, Operand(StackHandler::TRY_CATCH));
1124
+ } else {
1125
+ mov(r3, Operand(StackHandler::TRY_FINALLY));
1126
+ }
1127
+ ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
1128
+ && StackHandlerConstants::kFPOffset == 2 * kPointerSize
1129
+ && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
1130
+ stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
1131
+ // Save the current handler as the next handler.
1132
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
1133
+ ldr(r1, MemOperand(r3));
1134
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
1135
+ push(r1);
1136
+ // Link this handler as the new current one.
1137
+ str(sp, MemOperand(r3));
1138
+ } else {
1139
+ // Must preserve r0-r4, r5-r7 are available.
1140
+ ASSERT(try_location == IN_JS_ENTRY);
1141
+ // The frame pointer does not point to a JS frame so we save NULL
1142
+ // for fp. We expect the code throwing an exception to check fp
1143
+ // before dereferencing it to restore the context.
1144
+ mov(ip, Operand(0, RelocInfo::NONE)); // To save a NULL frame pointer.
1145
+ mov(r6, Operand(StackHandler::ENTRY));
1146
+ ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
1147
+ && StackHandlerConstants::kFPOffset == 2 * kPointerSize
1148
+ && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
1149
+ stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
1150
+ // Save the current handler as the next handler.
1151
+ mov(r7, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
1152
+ ldr(r6, MemOperand(r7));
1153
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
1154
+ push(r6);
1155
+ // Link this handler as the new current one.
1156
+ str(sp, MemOperand(r7));
1157
+ }
1158
+ }
1159
+
1160
+
1161
+ void MacroAssembler::PopTryHandler() {
1162
+ ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
1163
+ pop(r1);
1164
+ mov(ip, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
1165
+ add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1166
+ str(r1, MemOperand(ip));
1167
+ }
1168
+
1169
+
1170
+ void MacroAssembler::Throw(Register value) {
1171
+ // r0 is expected to hold the exception.
1172
+ if (!value.is(r0)) {
1173
+ mov(r0, value);
1174
+ }
1175
+
1176
+ // Adjust this code if not the case.
1177
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1178
+
1179
+ // Drop the sp to the top of the handler.
1180
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
1181
+ ldr(sp, MemOperand(r3));
1182
+
1183
+ // Restore the next handler and frame pointer, discard handler state.
1184
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1185
+ pop(r2);
1186
+ str(r2, MemOperand(r3));
1187
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
1188
+ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
1189
+
1190
+ // Before returning we restore the context from the frame pointer if
1191
+ // not NULL. The frame pointer is NULL in the exception handler of a
1192
+ // JS entry frame.
1193
+ cmp(fp, Operand(0, RelocInfo::NONE));
1194
+ // Set cp to NULL if fp is NULL.
1195
+ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
1196
+ // Restore cp otherwise.
1197
+ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1198
+ #ifdef DEBUG
1199
+ if (emit_debug_code()) {
1200
+ mov(lr, Operand(pc));
1201
+ }
1202
+ #endif
1203
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
1204
+ pop(pc);
1205
+ }
1206
+
1207
+
1208
+ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
1209
+ Register value) {
1210
+ // Adjust this code if not the case.
1211
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1212
+
1213
+ // r0 is expected to hold the exception.
1214
+ if (!value.is(r0)) {
1215
+ mov(r0, value);
1216
+ }
1217
+
1218
+ // Drop sp to the top stack handler.
1219
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
1220
+ ldr(sp, MemOperand(r3));
1221
+
1222
+ // Unwind the handlers until the ENTRY handler is found.
1223
+ Label loop, done;
1224
+ bind(&loop);
1225
+ // Load the type of the current stack handler.
1226
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
1227
+ ldr(r2, MemOperand(sp, kStateOffset));
1228
+ cmp(r2, Operand(StackHandler::ENTRY));
1229
+ b(eq, &done);
1230
+ // Fetch the next handler in the list.
1231
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
1232
+ ldr(sp, MemOperand(sp, kNextOffset));
1233
+ jmp(&loop);
1234
+ bind(&done);
1235
+
1236
+ // Set the top handler address to next handler past the current ENTRY handler.
1237
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1238
+ pop(r2);
1239
+ str(r2, MemOperand(r3));
1240
+
1241
+ if (type == OUT_OF_MEMORY) {
1242
+ // Set external caught exception to false.
1243
+ ExternalReference external_caught(
1244
+ Isolate::k_external_caught_exception_address, isolate());
1245
+ mov(r0, Operand(false, RelocInfo::NONE));
1246
+ mov(r2, Operand(external_caught));
1247
+ str(r0, MemOperand(r2));
1248
+
1249
+ // Set pending exception and r0 to out of memory exception.
1250
+ Failure* out_of_memory = Failure::OutOfMemoryException();
1251
+ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
1252
+ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
1253
+ isolate())));
1254
+ str(r0, MemOperand(r2));
1255
+ }
1256
+
1257
+ // Stack layout at this point. See also StackHandlerConstants.
1258
+ // sp -> state (ENTRY)
1259
+ // fp
1260
+ // lr
1261
+
1262
+ // Discard handler state (r2 is not used) and restore frame pointer.
1263
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
1264
+ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
1265
+ // Before returning we restore the context from the frame pointer if
1266
+ // not NULL. The frame pointer is NULL in the exception handler of a
1267
+ // JS entry frame.
1268
+ cmp(fp, Operand(0, RelocInfo::NONE));
1269
+ // Set cp to NULL if fp is NULL.
1270
+ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
1271
+ // Restore cp otherwise.
1272
+ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1273
+ #ifdef DEBUG
1274
+ if (emit_debug_code()) {
1275
+ mov(lr, Operand(pc));
1276
+ }
1277
+ #endif
1278
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
1279
+ pop(pc);
1280
+ }
1281
+
1282
+
1283
+ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1284
+ Register scratch,
1285
+ Label* miss) {
1286
+ Label same_contexts;
1287
+
1288
+ ASSERT(!holder_reg.is(scratch));
1289
+ ASSERT(!holder_reg.is(ip));
1290
+ ASSERT(!scratch.is(ip));
1291
+
1292
+ // Load current lexical context from the stack frame.
1293
+ ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1294
+ // In debug mode, make sure the lexical context is set.
1295
+ #ifdef DEBUG
1296
+ cmp(scratch, Operand(0, RelocInfo::NONE));
1297
+ Check(ne, "we should not have an empty lexical context");
1298
+ #endif
1299
+
1300
+ // Load the global context of the current context.
1301
+ int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
1302
+ ldr(scratch, FieldMemOperand(scratch, offset));
1303
+ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
1304
+
1305
+ // Check the context is a global context.
1306
+ if (emit_debug_code()) {
1307
+ // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1308
+ // Cannot use ip as a temporary in this verification code. Due to the fact
1309
+ // that ip is clobbered as part of cmp with an object Operand.
1310
+ push(holder_reg); // Temporarily save holder on the stack.
1311
+ // Read the first word and compare to the global_context_map.
1312
+ ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1313
+ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
1314
+ cmp(holder_reg, ip);
1315
+ Check(eq, "JSGlobalObject::global_context should be a global context.");
1316
+ pop(holder_reg); // Restore holder.
1317
+ }
1318
+
1319
+ // Check if both contexts are the same.
1320
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
1321
+ cmp(scratch, Operand(ip));
1322
+ b(eq, &same_contexts);
1323
+
1324
+ // Check the context is a global context.
1325
+ if (emit_debug_code()) {
1326
+ // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1327
+ // Cannot use ip as a temporary in this verification code. Due to the fact
1328
+ // that ip is clobbered as part of cmp with an object Operand.
1329
+ push(holder_reg); // Temporarily save holder on the stack.
1330
+ mov(holder_reg, ip); // Move ip to its holding place.
1331
+ LoadRoot(ip, Heap::kNullValueRootIndex);
1332
+ cmp(holder_reg, ip);
1333
+ Check(ne, "JSGlobalProxy::context() should not be null.");
1334
+
1335
+ ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1336
+ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
1337
+ cmp(holder_reg, ip);
1338
+ Check(eq, "JSGlobalObject::global_context should be a global context.");
1339
+ // Restore ip is not needed. ip is reloaded below.
1340
+ pop(holder_reg); // Restore holder.
1341
+ // Restore ip to holder's context.
1342
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
1343
+ }
1344
+
1345
+ // Check that the security token in the calling global object is
1346
+ // compatible with the security token in the receiving global
1347
+ // object.
1348
+ int token_offset = Context::kHeaderSize +
1349
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
1350
+
1351
+ ldr(scratch, FieldMemOperand(scratch, token_offset));
1352
+ ldr(ip, FieldMemOperand(ip, token_offset));
1353
+ cmp(scratch, Operand(ip));
1354
+ b(ne, miss);
1355
+
1356
+ bind(&same_contexts);
1357
+ }
1358
+
1359
+
1360
+ void MacroAssembler::AllocateInNewSpace(int object_size,
1361
+ Register result,
1362
+ Register scratch1,
1363
+ Register scratch2,
1364
+ Label* gc_required,
1365
+ AllocationFlags flags) {
1366
+ if (!FLAG_inline_new) {
1367
+ if (emit_debug_code()) {
1368
+ // Trash the registers to simulate an allocation failure.
1369
+ mov(result, Operand(0x7091));
1370
+ mov(scratch1, Operand(0x7191));
1371
+ mov(scratch2, Operand(0x7291));
1372
+ }
1373
+ jmp(gc_required);
1374
+ return;
1375
+ }
1376
+
1377
+ ASSERT(!result.is(scratch1));
1378
+ ASSERT(!result.is(scratch2));
1379
+ ASSERT(!scratch1.is(scratch2));
1380
+ ASSERT(!scratch1.is(ip));
1381
+ ASSERT(!scratch2.is(ip));
1382
+
1383
+ // Make object size into bytes.
1384
+ if ((flags & SIZE_IN_WORDS) != 0) {
1385
+ object_size *= kPointerSize;
1386
+ }
1387
+ ASSERT_EQ(0, object_size & kObjectAlignmentMask);
1388
+
1389
+ // Check relative positions of allocation top and limit addresses.
1390
+ // The values must be adjacent in memory to allow the use of LDM.
1391
+ // Also, assert that the registers are numbered such that the values
1392
+ // are loaded in the correct order.
1393
+ ExternalReference new_space_allocation_top =
1394
+ ExternalReference::new_space_allocation_top_address(isolate());
1395
+ ExternalReference new_space_allocation_limit =
1396
+ ExternalReference::new_space_allocation_limit_address(isolate());
1397
+ intptr_t top =
1398
+ reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1399
+ intptr_t limit =
1400
+ reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1401
+ ASSERT((limit - top) == kPointerSize);
1402
+ ASSERT(result.code() < ip.code());
1403
+
1404
+ // Set up allocation top address and object size registers.
1405
+ Register topaddr = scratch1;
1406
+ Register obj_size_reg = scratch2;
1407
+ mov(topaddr, Operand(new_space_allocation_top));
1408
+ mov(obj_size_reg, Operand(object_size));
1409
+
1410
+ // This code stores a temporary value in ip. This is OK, as the code below
1411
+ // does not need ip for implicit literal generation.
1412
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
1413
+ // Load allocation top into result and allocation limit into ip.
1414
+ ldm(ia, topaddr, result.bit() | ip.bit());
1415
+ } else {
1416
+ if (emit_debug_code()) {
1417
+ // Assert that result actually contains top on entry. ip is used
1418
+ // immediately below so this use of ip does not cause difference with
1419
+ // respect to register content between debug and release mode.
1420
+ ldr(ip, MemOperand(topaddr));
1421
+ cmp(result, ip);
1422
+ Check(eq, "Unexpected allocation top");
1423
+ }
1424
+ // Load allocation limit into ip. Result already contains allocation top.
1425
+ ldr(ip, MemOperand(topaddr, limit - top));
1426
+ }
1427
+
1428
+ // Calculate new top and bail out if new space is exhausted. Use result
1429
+ // to calculate the new top.
1430
+ add(scratch2, result, Operand(obj_size_reg), SetCC);
1431
+ b(cs, gc_required);
1432
+ cmp(scratch2, Operand(ip));
1433
+ b(hi, gc_required);
1434
+ str(scratch2, MemOperand(topaddr));
1435
+
1436
+ // Tag object if requested.
1437
+ if ((flags & TAG_OBJECT) != 0) {
1438
+ add(result, result, Operand(kHeapObjectTag));
1439
+ }
1440
+ }
1441
+
1442
+
1443
+ void MacroAssembler::AllocateInNewSpace(Register object_size,
1444
+ Register result,
1445
+ Register scratch1,
1446
+ Register scratch2,
1447
+ Label* gc_required,
1448
+ AllocationFlags flags) {
1449
+ if (!FLAG_inline_new) {
1450
+ if (emit_debug_code()) {
1451
+ // Trash the registers to simulate an allocation failure.
1452
+ mov(result, Operand(0x7091));
1453
+ mov(scratch1, Operand(0x7191));
1454
+ mov(scratch2, Operand(0x7291));
1455
+ }
1456
+ jmp(gc_required);
1457
+ return;
1458
+ }
1459
+
1460
+ // Assert that the register arguments are different and that none of
1461
+ // them are ip. ip is used explicitly in the code generated below.
1462
+ ASSERT(!result.is(scratch1));
1463
+ ASSERT(!result.is(scratch2));
1464
+ ASSERT(!scratch1.is(scratch2));
1465
+ ASSERT(!result.is(ip));
1466
+ ASSERT(!scratch1.is(ip));
1467
+ ASSERT(!scratch2.is(ip));
1468
+
1469
+ // Check relative positions of allocation top and limit addresses.
1470
+ // The values must be adjacent in memory to allow the use of LDM.
1471
+ // Also, assert that the registers are numbered such that the values
1472
+ // are loaded in the correct order.
1473
+ ExternalReference new_space_allocation_top =
1474
+ ExternalReference::new_space_allocation_top_address(isolate());
1475
+ ExternalReference new_space_allocation_limit =
1476
+ ExternalReference::new_space_allocation_limit_address(isolate());
1477
+ intptr_t top =
1478
+ reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1479
+ intptr_t limit =
1480
+ reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1481
+ ASSERT((limit - top) == kPointerSize);
1482
+ ASSERT(result.code() < ip.code());
1483
+
1484
+ // Set up allocation top address.
1485
+ Register topaddr = scratch1;
1486
+ mov(topaddr, Operand(new_space_allocation_top));
1487
+
1488
+ // This code stores a temporary value in ip. This is OK, as the code below
1489
+ // does not need ip for implicit literal generation.
1490
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
1491
+ // Load allocation top into result and allocation limit into ip.
1492
+ ldm(ia, topaddr, result.bit() | ip.bit());
1493
+ } else {
1494
+ if (emit_debug_code()) {
1495
+ // Assert that result actually contains top on entry. ip is used
1496
+ // immediately below so this use of ip does not cause difference with
1497
+ // respect to register content between debug and release mode.
1498
+ ldr(ip, MemOperand(topaddr));
1499
+ cmp(result, ip);
1500
+ Check(eq, "Unexpected allocation top");
1501
+ }
1502
+ // Load allocation limit into ip. Result already contains allocation top.
1503
+ ldr(ip, MemOperand(topaddr, limit - top));
1504
+ }
1505
+
1506
+ // Calculate new top and bail out if new space is exhausted. Use result
1507
+ // to calculate the new top. Object size may be in words so a shift is
1508
+ // required to get the number of bytes.
1509
+ if ((flags & SIZE_IN_WORDS) != 0) {
1510
+ add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1511
+ } else {
1512
+ add(scratch2, result, Operand(object_size), SetCC);
1513
+ }
1514
+ b(cs, gc_required);
1515
+ cmp(scratch2, Operand(ip));
1516
+ b(hi, gc_required);
1517
+
1518
+ // Update allocation top. result temporarily holds the new top.
1519
+ if (emit_debug_code()) {
1520
+ tst(scratch2, Operand(kObjectAlignmentMask));
1521
+ Check(eq, "Unaligned allocation in new space");
1522
+ }
1523
+ str(scratch2, MemOperand(topaddr));
1524
+
1525
+ // Tag object if requested.
1526
+ if ((flags & TAG_OBJECT) != 0) {
1527
+ add(result, result, Operand(kHeapObjectTag));
1528
+ }
1529
+ }
1530
+
1531
+
1532
+ void MacroAssembler::UndoAllocationInNewSpace(Register object,
1533
+ Register scratch) {
1534
+ ExternalReference new_space_allocation_top =
1535
+ ExternalReference::new_space_allocation_top_address(isolate());
1536
+
1537
+ // Make sure the object has no tag before resetting top.
1538
+ and_(object, object, Operand(~kHeapObjectTagMask));
1539
+ #ifdef DEBUG
1540
+ // Check that the object un-allocated is below the current top.
1541
+ mov(scratch, Operand(new_space_allocation_top));
1542
+ ldr(scratch, MemOperand(scratch));
1543
+ cmp(object, scratch);
1544
+ Check(lt, "Undo allocation of non allocated memory");
1545
+ #endif
1546
+ // Write the address of the object to un-allocate as the current top.
1547
+ mov(scratch, Operand(new_space_allocation_top));
1548
+ str(object, MemOperand(scratch));
1549
+ }
1550
+
1551
+
1552
+ void MacroAssembler::AllocateTwoByteString(Register result,
1553
+ Register length,
1554
+ Register scratch1,
1555
+ Register scratch2,
1556
+ Register scratch3,
1557
+ Label* gc_required) {
1558
+ // Calculate the number of bytes needed for the characters in the string while
1559
+ // observing object alignment.
1560
+ ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1561
+ mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
1562
+ add(scratch1, scratch1,
1563
+ Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1564
+ and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1565
+
1566
+ // Allocate two-byte string in new space.
1567
+ AllocateInNewSpace(scratch1,
1568
+ result,
1569
+ scratch2,
1570
+ scratch3,
1571
+ gc_required,
1572
+ TAG_OBJECT);
1573
+
1574
+ // Set the map, length and hash field.
1575
+ InitializeNewString(result,
1576
+ length,
1577
+ Heap::kStringMapRootIndex,
1578
+ scratch1,
1579
+ scratch2);
1580
+ }
1581
+
1582
+
1583
+ void MacroAssembler::AllocateAsciiString(Register result,
1584
+ Register length,
1585
+ Register scratch1,
1586
+ Register scratch2,
1587
+ Register scratch3,
1588
+ Label* gc_required) {
1589
+ // Calculate the number of bytes needed for the characters in the string while
1590
+ // observing object alignment.
1591
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
1592
+ ASSERT(kCharSize == 1);
1593
+ add(scratch1, length,
1594
+ Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
1595
+ and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1596
+
1597
+ // Allocate ASCII string in new space.
1598
+ AllocateInNewSpace(scratch1,
1599
+ result,
1600
+ scratch2,
1601
+ scratch3,
1602
+ gc_required,
1603
+ TAG_OBJECT);
1604
+
1605
+ // Set the map, length and hash field.
1606
+ InitializeNewString(result,
1607
+ length,
1608
+ Heap::kAsciiStringMapRootIndex,
1609
+ scratch1,
1610
+ scratch2);
1611
+ }
1612
+
1613
+
1614
+ void MacroAssembler::AllocateTwoByteConsString(Register result,
1615
+ Register length,
1616
+ Register scratch1,
1617
+ Register scratch2,
1618
+ Label* gc_required) {
1619
+ AllocateInNewSpace(ConsString::kSize,
1620
+ result,
1621
+ scratch1,
1622
+ scratch2,
1623
+ gc_required,
1624
+ TAG_OBJECT);
1625
+
1626
+ InitializeNewString(result,
1627
+ length,
1628
+ Heap::kConsStringMapRootIndex,
1629
+ scratch1,
1630
+ scratch2);
1631
+ }
1632
+
1633
+
1634
+ void MacroAssembler::AllocateAsciiConsString(Register result,
1635
+ Register length,
1636
+ Register scratch1,
1637
+ Register scratch2,
1638
+ Label* gc_required) {
1639
+ AllocateInNewSpace(ConsString::kSize,
1640
+ result,
1641
+ scratch1,
1642
+ scratch2,
1643
+ gc_required,
1644
+ TAG_OBJECT);
1645
+
1646
+ InitializeNewString(result,
1647
+ length,
1648
+ Heap::kConsAsciiStringMapRootIndex,
1649
+ scratch1,
1650
+ scratch2);
1651
+ }
1652
+
1653
+
1654
+ void MacroAssembler::CompareObjectType(Register object,
1655
+ Register map,
1656
+ Register type_reg,
1657
+ InstanceType type) {
1658
+ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
1659
+ CompareInstanceType(map, type_reg, type);
1660
+ }
1661
+
1662
+
1663
+ void MacroAssembler::CompareInstanceType(Register map,
1664
+ Register type_reg,
1665
+ InstanceType type) {
1666
+ ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1667
+ cmp(type_reg, Operand(type));
1668
+ }
1669
+
1670
+
1671
+ void MacroAssembler::CompareRoot(Register obj,
1672
+ Heap::RootListIndex index) {
1673
+ ASSERT(!obj.is(ip));
1674
+ LoadRoot(ip, index);
1675
+ cmp(obj, ip);
1676
+ }
1677
+
1678
+
1679
+ void MacroAssembler::CheckMap(Register obj,
1680
+ Register scratch,
1681
+ Handle<Map> map,
1682
+ Label* fail,
1683
+ SmiCheckType smi_check_type) {
1684
+ if (smi_check_type == DO_SMI_CHECK) {
1685
+ JumpIfSmi(obj, fail);
1686
+ }
1687
+ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1688
+ mov(ip, Operand(map));
1689
+ cmp(scratch, ip);
1690
+ b(ne, fail);
1691
+ }
1692
+
1693
+
1694
+ void MacroAssembler::CheckMap(Register obj,
1695
+ Register scratch,
1696
+ Heap::RootListIndex index,
1697
+ Label* fail,
1698
+ SmiCheckType smi_check_type) {
1699
+ if (smi_check_type == DO_SMI_CHECK) {
1700
+ JumpIfSmi(obj, fail);
1701
+ }
1702
+ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1703
+ LoadRoot(ip, index);
1704
+ cmp(scratch, ip);
1705
+ b(ne, fail);
1706
+ }
1707
+
1708
+
1709
+ void MacroAssembler::DispatchMap(Register obj,
1710
+ Register scratch,
1711
+ Handle<Map> map,
1712
+ Handle<Code> success,
1713
+ SmiCheckType smi_check_type) {
1714
+ Label fail;
1715
+ if (smi_check_type == DO_SMI_CHECK) {
1716
+ JumpIfSmi(obj, &fail);
1717
+ }
1718
+ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1719
+ mov(ip, Operand(map));
1720
+ cmp(scratch, ip);
1721
+ Jump(success, RelocInfo::CODE_TARGET, eq);
1722
+ bind(&fail);
1723
+ }
1724
+
1725
+
1726
+ void MacroAssembler::TryGetFunctionPrototype(Register function,
1727
+ Register result,
1728
+ Register scratch,
1729
+ Label* miss) {
1730
+ // Check that the receiver isn't a smi.
1731
+ JumpIfSmi(function, miss);
1732
+
1733
+ // Check that the function really is a function. Load map into result reg.
1734
+ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
1735
+ b(ne, miss);
1736
+
1737
+ // Make sure that the function has an instance prototype.
1738
+ Label non_instance;
1739
+ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
1740
+ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
1741
+ b(ne, &non_instance);
1742
+
1743
+ // Get the prototype or initial map from the function.
1744
+ ldr(result,
1745
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1746
+
1747
+ // If the prototype or initial map is the hole, don't return it and
1748
+ // simply miss the cache instead. This will allow us to allocate a
1749
+ // prototype object on-demand in the runtime system.
1750
+ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1751
+ cmp(result, ip);
1752
+ b(eq, miss);
1753
+
1754
+ // If the function does not have an initial map, we're done.
1755
+ Label done;
1756
+ CompareObjectType(result, scratch, scratch, MAP_TYPE);
1757
+ b(ne, &done);
1758
+
1759
+ // Get the prototype from the initial map.
1760
+ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
1761
+ jmp(&done);
1762
+
1763
+ // Non-instance prototype: Fetch prototype from constructor field
1764
+ // in initial map.
1765
+ bind(&non_instance);
1766
+ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
1767
+
1768
+ // All done.
1769
+ bind(&done);
1770
+ }
1771
+
1772
+
1773
+ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
1774
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1775
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1776
+ }
1777
+
1778
+
1779
+ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
1780
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1781
+ Object* result;
1782
+ { MaybeObject* maybe_result = stub->TryGetCode();
1783
+ if (!maybe_result->ToObject(&result)) return maybe_result;
1784
+ }
1785
+ Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
1786
+ return result;
1787
+ }
1788
+
1789
+
1790
+ void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1791
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1792
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1793
+ }
1794
+
1795
+
1796
+ MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
1797
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1798
+ Object* result;
1799
+ { MaybeObject* maybe_result = stub->TryGetCode();
1800
+ if (!maybe_result->ToObject(&result)) return maybe_result;
1801
+ }
1802
+ Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
1803
+ return result;
1804
+ }
1805
+
1806
+
1807
+ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
1808
+ return ref0.address() - ref1.address();
1809
+ }
1810
+
1811
+
1812
+ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
1813
+ ExternalReference function, int stack_space) {
1814
+ ExternalReference next_address =
1815
+ ExternalReference::handle_scope_next_address();
1816
+ const int kNextOffset = 0;
1817
+ const int kLimitOffset = AddressOffset(
1818
+ ExternalReference::handle_scope_limit_address(),
1819
+ next_address);
1820
+ const int kLevelOffset = AddressOffset(
1821
+ ExternalReference::handle_scope_level_address(),
1822
+ next_address);
1823
+
1824
+ // Allocate HandleScope in callee-save registers.
1825
+ mov(r7, Operand(next_address));
1826
+ ldr(r4, MemOperand(r7, kNextOffset));
1827
+ ldr(r5, MemOperand(r7, kLimitOffset));
1828
+ ldr(r6, MemOperand(r7, kLevelOffset));
1829
+ add(r6, r6, Operand(1));
1830
+ str(r6, MemOperand(r7, kLevelOffset));
1831
+
1832
+ // Native call returns to the DirectCEntry stub which redirects to the
1833
+ // return address pushed on stack (could have moved after GC).
1834
+ // DirectCEntry stub itself is generated early and never moves.
1835
+ DirectCEntryStub stub;
1836
+ stub.GenerateCall(this, function);
1837
+
1838
+ Label promote_scheduled_exception;
1839
+ Label delete_allocated_handles;
1840
+ Label leave_exit_frame;
1841
+
1842
+ // If result is non-zero, dereference to get the result value
1843
+ // otherwise set it to undefined.
1844
+ cmp(r0, Operand(0));
1845
+ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
1846
+ ldr(r0, MemOperand(r0), ne);
1847
+
1848
+ // No more valid handles (the result handle was the last one). Restore
1849
+ // previous handle scope.
1850
+ str(r4, MemOperand(r7, kNextOffset));
1851
+ if (emit_debug_code()) {
1852
+ ldr(r1, MemOperand(r7, kLevelOffset));
1853
+ cmp(r1, r6);
1854
+ Check(eq, "Unexpected level after return from api call");
1855
+ }
1856
+ sub(r6, r6, Operand(1));
1857
+ str(r6, MemOperand(r7, kLevelOffset));
1858
+ ldr(ip, MemOperand(r7, kLimitOffset));
1859
+ cmp(r5, ip);
1860
+ b(ne, &delete_allocated_handles);
1861
+
1862
+ // Check if the function scheduled an exception.
1863
+ bind(&leave_exit_frame);
1864
+ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
1865
+ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
1866
+ ldr(r5, MemOperand(ip));
1867
+ cmp(r4, r5);
1868
+ b(ne, &promote_scheduled_exception);
1869
+
1870
+ // LeaveExitFrame expects unwind space to be in a register.
1871
+ mov(r4, Operand(stack_space));
1872
+ LeaveExitFrame(false, r4);
1873
+ mov(pc, lr);
1874
+
1875
+ bind(&promote_scheduled_exception);
1876
+ MaybeObject* result
1877
+ = TryTailCallExternalReference(
1878
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
1879
+ 0,
1880
+ 1);
1881
+ if (result->IsFailure()) {
1882
+ return result;
1883
+ }
1884
+
1885
+ // HandleScope limit has changed. Delete allocated extensions.
1886
+ bind(&delete_allocated_handles);
1887
+ str(r5, MemOperand(r7, kLimitOffset));
1888
+ mov(r4, r0);
1889
+ PrepareCallCFunction(1, r5);
1890
+ mov(r0, Operand(ExternalReference::isolate_address()));
1891
+ CallCFunction(
1892
+ ExternalReference::delete_handle_scope_extensions(isolate()), 1);
1893
+ mov(r0, r4);
1894
+ jmp(&leave_exit_frame);
1895
+
1896
+ return result;
1897
+ }
1898
+
1899
+
1900
+ void MacroAssembler::IllegalOperation(int num_arguments) {
1901
+ if (num_arguments > 0) {
1902
+ add(sp, sp, Operand(num_arguments * kPointerSize));
1903
+ }
1904
+ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
1905
+ }
1906
+
1907
+
1908
+ void MacroAssembler::IndexFromHash(Register hash, Register index) {
1909
+ // If the hash field contains an array index pick it out. The assert checks
1910
+ // that the constants for the maximum number of digits for an array index
1911
+ // cached in the hash field and the number of bits reserved for it does not
1912
+ // conflict.
1913
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
1914
+ (1 << String::kArrayIndexValueBits));
1915
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
1916
+ // the low kHashShift bits.
1917
+ STATIC_ASSERT(kSmiTag == 0);
1918
+ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
1919
+ mov(index, Operand(hash, LSL, kSmiTagSize));
1920
+ }
1921
+
1922
+
1923
+ void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
1924
+ Register outHighReg,
1925
+ Register outLowReg) {
1926
+ // ARMv7 VFP3 instructions to implement integer to double conversion.
1927
+ mov(r7, Operand(inReg, ASR, kSmiTagSize));
1928
+ vmov(s15, r7);
1929
+ vcvt_f64_s32(d7, s15);
1930
+ vmov(outLowReg, outHighReg, d7);
1931
+ }
1932
+
1933
+
1934
+ void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
1935
+ DwVfpRegister result,
1936
+ Register scratch1,
1937
+ Register scratch2,
1938
+ Register heap_number_map,
1939
+ SwVfpRegister scratch3,
1940
+ Label* not_number,
1941
+ ObjectToDoubleFlags flags) {
1942
+ Label done;
1943
+ if ((flags & OBJECT_NOT_SMI) == 0) {
1944
+ Label not_smi;
1945
+ JumpIfNotSmi(object, &not_smi);
1946
+ // Remove smi tag and convert to double.
1947
+ mov(scratch1, Operand(object, ASR, kSmiTagSize));
1948
+ vmov(scratch3, scratch1);
1949
+ vcvt_f64_s32(result, scratch3);
1950
+ b(&done);
1951
+ bind(&not_smi);
1952
+ }
1953
+ // Check for heap number and load double value from it.
1954
+ ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
1955
+ sub(scratch2, object, Operand(kHeapObjectTag));
1956
+ cmp(scratch1, heap_number_map);
1957
+ b(ne, not_number);
1958
+ if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
1959
+ // If exponent is all ones the number is either a NaN or +/-Infinity.
1960
+ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
1961
+ Sbfx(scratch1,
1962
+ scratch1,
1963
+ HeapNumber::kExponentShift,
1964
+ HeapNumber::kExponentBits);
1965
+ // All-one value sign extend to -1.
1966
+ cmp(scratch1, Operand(-1));
1967
+ b(eq, not_number);
1968
+ }
1969
+ vldr(result, scratch2, HeapNumber::kValueOffset);
1970
+ bind(&done);
1971
+ }
1972
+
1973
+
1974
+ void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
1975
+ DwVfpRegister value,
1976
+ Register scratch1,
1977
+ SwVfpRegister scratch2) {
1978
+ mov(scratch1, Operand(smi, ASR, kSmiTagSize));
1979
+ vmov(scratch2, scratch1);
1980
+ vcvt_f64_s32(value, scratch2);
1981
+ }
1982
+
1983
+
1984
+ // Tries to get a signed int32 out of a double precision floating point heap
1985
+ // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
1986
+ // 32bits signed integer range.
1987
+ void MacroAssembler::ConvertToInt32(Register source,
1988
+ Register dest,
1989
+ Register scratch,
1990
+ Register scratch2,
1991
+ DwVfpRegister double_scratch,
1992
+ Label *not_int32) {
1993
+ if (CpuFeatures::IsSupported(VFP3)) {
1994
+ CpuFeatures::Scope scope(VFP3);
1995
+ sub(scratch, source, Operand(kHeapObjectTag));
1996
+ vldr(double_scratch, scratch, HeapNumber::kValueOffset);
1997
+ vcvt_s32_f64(double_scratch.low(), double_scratch);
1998
+ vmov(dest, double_scratch.low());
1999
+ // Signed vcvt instruction will saturate to the minimum (0x80000000) or
2000
+ // maximun (0x7fffffff) signed 32bits integer when the double is out of
2001
+ // range. When substracting one, the minimum signed integer becomes the
2002
+ // maximun signed integer.
2003
+ sub(scratch, dest, Operand(1));
2004
+ cmp(scratch, Operand(LONG_MAX - 1));
2005
+ // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
2006
+ b(ge, not_int32);
2007
+ } else {
2008
+ // This code is faster for doubles that are in the ranges -0x7fffffff to
2009
+ // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
2010
+ // the range of signed int32 values that are not Smis. Jumps to the label
2011
+ // 'not_int32' if the double isn't in the range -0x80000000.0 to
2012
+ // 0x80000000.0 (excluding the endpoints).
2013
+ Label right_exponent, done;
2014
+ // Get exponent word.
2015
+ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
2016
+ // Get exponent alone in scratch2.
2017
+ Ubfx(scratch2,
2018
+ scratch,
2019
+ HeapNumber::kExponentShift,
2020
+ HeapNumber::kExponentBits);
2021
+ // Load dest with zero. We use this either for the final shift or
2022
+ // for the answer.
2023
+ mov(dest, Operand(0, RelocInfo::NONE));
2024
+ // Check whether the exponent matches a 32 bit signed int that is not a Smi.
2025
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
2026
+ // the exponent that we are fastest at and also the highest exponent we can
2027
+ // handle here.
2028
+ const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
2029
+ // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
2030
+ // split it up to avoid a constant pool entry. You can't do that in general
2031
+ // for cmp because of the overflow flag, but we know the exponent is in the
2032
+ // range 0-2047 so there is no overflow.
2033
+ int fudge_factor = 0x400;
2034
+ sub(scratch2, scratch2, Operand(fudge_factor));
2035
+ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
2036
+ // If we have a match of the int32-but-not-Smi exponent then skip some
2037
+ // logic.
2038
+ b(eq, &right_exponent);
2039
+ // If the exponent is higher than that then go to slow case. This catches
2040
+ // numbers that don't fit in a signed int32, infinities and NaNs.
2041
+ b(gt, not_int32);
2042
+
2043
+ // We know the exponent is smaller than 30 (biased). If it is less than
2044
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
2045
+ // it rounds to zero.
2046
+ const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
2047
+ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
2048
+ // Dest already has a Smi zero.
2049
+ b(lt, &done);
2050
+
2051
+ // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
2052
+ // get how much to shift down.
2053
+ rsb(dest, scratch2, Operand(30));
2054
+
2055
+ bind(&right_exponent);
2056
+ // Get the top bits of the mantissa.
2057
+ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
2058
+ // Put back the implicit 1.
2059
+ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
2060
+ // Shift up the mantissa bits to take up the space the exponent used to
2061
+ // take. We just orred in the implicit bit so that took care of one and
2062
+ // we want to leave the sign bit 0 so we subtract 2 bits from the shift
2063
+ // distance.
2064
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
2065
+ mov(scratch2, Operand(scratch2, LSL, shift_distance));
2066
+ // Put sign in zero flag.
2067
+ tst(scratch, Operand(HeapNumber::kSignMask));
2068
+ // Get the second half of the double. For some exponents we don't
2069
+ // actually need this because the bits get shifted out again, but
2070
+ // it's probably slower to test than just to do it.
2071
+ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
2072
+ // Shift down 22 bits to get the last 10 bits.
2073
+ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
2074
+ // Move down according to the exponent.
2075
+ mov(dest, Operand(scratch, LSR, dest));
2076
+ // Fix sign if sign bit was set.
2077
+ rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
2078
+ bind(&done);
2079
+ }
2080
+ }
2081
+
2082
+
2083
+ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
2084
+ SwVfpRegister result,
2085
+ DwVfpRegister double_input,
2086
+ Register scratch1,
2087
+ Register scratch2,
2088
+ CheckForInexactConversion check_inexact) {
2089
+ ASSERT(CpuFeatures::IsSupported(VFP3));
2090
+ CpuFeatures::Scope scope(VFP3);
2091
+ Register prev_fpscr = scratch1;
2092
+ Register scratch = scratch2;
2093
+
2094
+ int32_t check_inexact_conversion =
2095
+ (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
2096
+
2097
+ // Set custom FPCSR:
2098
+ // - Set rounding mode.
2099
+ // - Clear vfp cumulative exception flags.
2100
+ // - Make sure Flush-to-zero mode control bit is unset.
2101
+ vmrs(prev_fpscr);
2102
+ bic(scratch,
2103
+ prev_fpscr,
2104
+ Operand(kVFPExceptionMask |
2105
+ check_inexact_conversion |
2106
+ kVFPRoundingModeMask |
2107
+ kVFPFlushToZeroMask));
2108
+ // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
2109
+ if (rounding_mode != kRoundToNearest) {
2110
+ orr(scratch, scratch, Operand(rounding_mode));
2111
+ }
2112
+ vmsr(scratch);
2113
+
2114
+ // Convert the argument to an integer.
2115
+ vcvt_s32_f64(result,
2116
+ double_input,
2117
+ (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
2118
+ : kFPSCRRounding);
2119
+
2120
+ // Retrieve FPSCR.
2121
+ vmrs(scratch);
2122
+ // Restore FPSCR.
2123
+ vmsr(prev_fpscr);
2124
+ // Check for vfp exceptions.
2125
+ tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
2126
+ }
2127
+
2128
+
2129
+ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
2130
+ Register input_high,
2131
+ Register input_low,
2132
+ Register scratch) {
2133
+ Label done, normal_exponent, restore_sign;
2134
+
2135
+ // Extract the biased exponent in result.
2136
+ Ubfx(result,
2137
+ input_high,
2138
+ HeapNumber::kExponentShift,
2139
+ HeapNumber::kExponentBits);
2140
+
2141
+ // Check for Infinity and NaNs, which should return 0.
2142
+ cmp(result, Operand(HeapNumber::kExponentMask));
2143
+ mov(result, Operand(0), LeaveCC, eq);
2144
+ b(eq, &done);
2145
+
2146
+ // Express exponent as delta to (number of mantissa bits + 31).
2147
+ sub(result,
2148
+ result,
2149
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
2150
+ SetCC);
2151
+
2152
+ // If the delta is strictly positive, all bits would be shifted away,
2153
+ // which means that we can return 0.
2154
+ b(le, &normal_exponent);
2155
+ mov(result, Operand(0));
2156
+ b(&done);
2157
+
2158
+ bind(&normal_exponent);
2159
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2160
+ // Calculate shift.
2161
+ add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
2162
+
2163
+ // Save the sign.
2164
+ Register sign = result;
2165
+ result = no_reg;
2166
+ and_(sign, input_high, Operand(HeapNumber::kSignMask));
2167
+
2168
+ // Set the implicit 1 before the mantissa part in input_high.
2169
+ orr(input_high,
2170
+ input_high,
2171
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2172
+ // Shift the mantissa bits to the correct position.
2173
+ // We don't need to clear non-mantissa bits as they will be shifted away.
2174
+ // If they weren't, it would mean that the answer is in the 32bit range.
2175
+ mov(input_high, Operand(input_high, LSL, scratch));
2176
+
2177
+ // Replace the shifted bits with bits from the lower mantissa word.
2178
+ Label pos_shift, shift_done;
2179
+ rsb(scratch, scratch, Operand(32), SetCC);
2180
+ b(&pos_shift, ge);
2181
+
2182
+ // Negate scratch.
2183
+ rsb(scratch, scratch, Operand(0));
2184
+ mov(input_low, Operand(input_low, LSL, scratch));
2185
+ b(&shift_done);
2186
+
2187
+ bind(&pos_shift);
2188
+ mov(input_low, Operand(input_low, LSR, scratch));
2189
+
2190
+ bind(&shift_done);
2191
+ orr(input_high, input_high, Operand(input_low));
2192
+ // Restore sign if necessary.
2193
+ cmp(sign, Operand(0));
2194
+ result = sign;
2195
+ sign = no_reg;
2196
+ rsb(result, input_high, Operand(0), LeaveCC, ne);
2197
+ mov(result, input_high, LeaveCC, eq);
2198
+ bind(&done);
2199
+ }
2200
+
2201
+
2202
+ void MacroAssembler::EmitECMATruncate(Register result,
2203
+ DwVfpRegister double_input,
2204
+ SwVfpRegister single_scratch,
2205
+ Register scratch,
2206
+ Register input_high,
2207
+ Register input_low) {
2208
+ CpuFeatures::Scope scope(VFP3);
2209
+ ASSERT(!input_high.is(result));
2210
+ ASSERT(!input_low.is(result));
2211
+ ASSERT(!input_low.is(input_high));
2212
+ ASSERT(!scratch.is(result) &&
2213
+ !scratch.is(input_high) &&
2214
+ !scratch.is(input_low));
2215
+ ASSERT(!single_scratch.is(double_input.low()) &&
2216
+ !single_scratch.is(double_input.high()));
2217
+
2218
+ Label done;
2219
+
2220
+ // Clear cumulative exception flags.
2221
+ ClearFPSCRBits(kVFPExceptionMask, scratch);
2222
+ // Try a conversion to a signed integer.
2223
+ vcvt_s32_f64(single_scratch, double_input);
2224
+ vmov(result, single_scratch);
2225
+ // Retrieve he FPSCR.
2226
+ vmrs(scratch);
2227
+ // Check for overflow and NaNs.
2228
+ tst(scratch, Operand(kVFPOverflowExceptionBit |
2229
+ kVFPUnderflowExceptionBit |
2230
+ kVFPInvalidOpExceptionBit));
2231
+ // If we had no exceptions we are done.
2232
+ b(eq, &done);
2233
+
2234
+ // Load the double value and perform a manual truncation.
2235
+ vmov(input_low, input_high, double_input);
2236
+ EmitOutOfInt32RangeTruncate(result,
2237
+ input_high,
2238
+ input_low,
2239
+ scratch);
2240
+ bind(&done);
2241
+ }
2242
+
2243
+
2244
+ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2245
+ Register src,
2246
+ int num_least_bits) {
2247
+ if (CpuFeatures::IsSupported(ARMv7)) {
2248
+ ubfx(dst, src, kSmiTagSize, num_least_bits);
2249
+ } else {
2250
+ mov(dst, Operand(src, ASR, kSmiTagSize));
2251
+ and_(dst, dst, Operand((1 << num_least_bits) - 1));
2252
+ }
2253
+ }
2254
+
2255
+
2256
+ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2257
+ Register src,
2258
+ int num_least_bits) {
2259
+ and_(dst, src, Operand((1 << num_least_bits) - 1));
2260
+ }
2261
+
2262
+
2263
+ void MacroAssembler::CallRuntime(const Runtime::Function* f,
2264
+ int num_arguments) {
2265
+ // All parameters are on the stack. r0 has the return value after call.
2266
+
2267
+ // If the expected number of arguments of the runtime function is
2268
+ // constant, we check that the actual number of arguments match the
2269
+ // expectation.
2270
+ if (f->nargs >= 0 && f->nargs != num_arguments) {
2271
+ IllegalOperation(num_arguments);
2272
+ return;
2273
+ }
2274
+
2275
+ // TODO(1236192): Most runtime routines don't need the number of
2276
+ // arguments passed in because it is constant. At some point we
2277
+ // should remove this need and make the runtime routine entry code
2278
+ // smarter.
2279
+ mov(r0, Operand(num_arguments));
2280
+ mov(r1, Operand(ExternalReference(f, isolate())));
2281
+ CEntryStub stub(1);
2282
+ CallStub(&stub);
2283
+ }
2284
+
2285
+
2286
+ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
2287
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
2288
+ }
2289
+
2290
+
2291
+ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
2292
+ const Runtime::Function* function = Runtime::FunctionForId(id);
2293
+ mov(r0, Operand(function->nargs));
2294
+ mov(r1, Operand(ExternalReference(function, isolate())));
2295
+ CEntryStub stub(1);
2296
+ stub.SaveDoubles();
2297
+ CallStub(&stub);
2298
+ }
2299
+
2300
+
2301
+ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2302
+ int num_arguments) {
2303
+ mov(r0, Operand(num_arguments));
2304
+ mov(r1, Operand(ext));
2305
+
2306
+ CEntryStub stub(1);
2307
+ CallStub(&stub);
2308
+ }
2309
+
2310
+
2311
+ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2312
+ int num_arguments,
2313
+ int result_size) {
2314
+ // TODO(1236192): Most runtime routines don't need the number of
2315
+ // arguments passed in because it is constant. At some point we
2316
+ // should remove this need and make the runtime routine entry code
2317
+ // smarter.
2318
+ mov(r0, Operand(num_arguments));
2319
+ JumpToExternalReference(ext);
2320
+ }
2321
+
2322
+
2323
+ MaybeObject* MacroAssembler::TryTailCallExternalReference(
2324
+ const ExternalReference& ext, int num_arguments, int result_size) {
2325
+ // TODO(1236192): Most runtime routines don't need the number of
2326
+ // arguments passed in because it is constant. At some point we
2327
+ // should remove this need and make the runtime routine entry code
2328
+ // smarter.
2329
+ mov(r0, Operand(num_arguments));
2330
+ return TryJumpToExternalReference(ext);
2331
+ }
2332
+
2333
+
2334
+ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2335
+ int num_arguments,
2336
+ int result_size) {
2337
+ TailCallExternalReference(ExternalReference(fid, isolate()),
2338
+ num_arguments,
2339
+ result_size);
2340
+ }
2341
+
2342
+
2343
+ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2344
+ #if defined(__thumb__)
2345
+ // Thumb mode builtin.
2346
+ ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2347
+ #endif
2348
+ mov(r1, Operand(builtin));
2349
+ CEntryStub stub(1);
2350
+ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2351
+ }
2352
+
2353
+
2354
+ MaybeObject* MacroAssembler::TryJumpToExternalReference(
2355
+ const ExternalReference& builtin) {
2356
+ #if defined(__thumb__)
2357
+ // Thumb mode builtin.
2358
+ ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2359
+ #endif
2360
+ mov(r1, Operand(builtin));
2361
+ CEntryStub stub(1);
2362
+ return TryTailCallStub(&stub);
2363
+ }
2364
+
2365
+
2366
+ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2367
+ InvokeFlag flag,
2368
+ const CallWrapper& call_wrapper) {
2369
+ GetBuiltinEntry(r2, id);
2370
+ if (flag == CALL_FUNCTION) {
2371
+ call_wrapper.BeforeCall(CallSize(r2));
2372
+ Call(r2);
2373
+ call_wrapper.AfterCall();
2374
+ } else {
2375
+ ASSERT(flag == JUMP_FUNCTION);
2376
+ Jump(r2);
2377
+ }
2378
+ }
2379
+
2380
+
2381
+ void MacroAssembler::GetBuiltinFunction(Register target,
2382
+ Builtins::JavaScript id) {
2383
+ // Load the builtins object into target register.
2384
+ ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2385
+ ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2386
+ // Load the JavaScript builtin function from the builtins object.
2387
+ ldr(target, FieldMemOperand(target,
2388
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2389
+ }
2390
+
2391
+
2392
+ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2393
+ ASSERT(!target.is(r1));
2394
+ GetBuiltinFunction(r1, id);
2395
+ // Load the code entry point from the builtins object.
2396
+ ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2397
+ }
2398
+
2399
+
2400
+ void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2401
+ Register scratch1, Register scratch2) {
2402
+ if (FLAG_native_code_counters && counter->Enabled()) {
2403
+ mov(scratch1, Operand(value));
2404
+ mov(scratch2, Operand(ExternalReference(counter)));
2405
+ str(scratch1, MemOperand(scratch2));
2406
+ }
2407
+ }
2408
+
2409
+
2410
+ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2411
+ Register scratch1, Register scratch2) {
2412
+ ASSERT(value > 0);
2413
+ if (FLAG_native_code_counters && counter->Enabled()) {
2414
+ mov(scratch2, Operand(ExternalReference(counter)));
2415
+ ldr(scratch1, MemOperand(scratch2));
2416
+ add(scratch1, scratch1, Operand(value));
2417
+ str(scratch1, MemOperand(scratch2));
2418
+ }
2419
+ }
2420
+
2421
+
2422
+ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2423
+ Register scratch1, Register scratch2) {
2424
+ ASSERT(value > 0);
2425
+ if (FLAG_native_code_counters && counter->Enabled()) {
2426
+ mov(scratch2, Operand(ExternalReference(counter)));
2427
+ ldr(scratch1, MemOperand(scratch2));
2428
+ sub(scratch1, scratch1, Operand(value));
2429
+ str(scratch1, MemOperand(scratch2));
2430
+ }
2431
+ }
2432
+
2433
+
2434
+ void MacroAssembler::Assert(Condition cond, const char* msg) {
2435
+ if (emit_debug_code())
2436
+ Check(cond, msg);
2437
+ }
2438
+
2439
+
2440
+ void MacroAssembler::AssertRegisterIsRoot(Register reg,
2441
+ Heap::RootListIndex index) {
2442
+ if (emit_debug_code()) {
2443
+ LoadRoot(ip, index);
2444
+ cmp(reg, ip);
2445
+ Check(eq, "Register did not match expected root");
2446
+ }
2447
+ }
2448
+
2449
+
2450
+ void MacroAssembler::AssertFastElements(Register elements) {
2451
+ if (emit_debug_code()) {
2452
+ ASSERT(!elements.is(ip));
2453
+ Label ok;
2454
+ push(elements);
2455
+ ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2456
+ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2457
+ cmp(elements, ip);
2458
+ b(eq, &ok);
2459
+ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2460
+ cmp(elements, ip);
2461
+ b(eq, &ok);
2462
+ Abort("JSObject with fast elements map has slow elements");
2463
+ bind(&ok);
2464
+ pop(elements);
2465
+ }
2466
+ }
2467
+
2468
+
2469
+ void MacroAssembler::Check(Condition cond, const char* msg) {
2470
+ Label L;
2471
+ b(cond, &L);
2472
+ Abort(msg);
2473
+ // will not return here
2474
+ bind(&L);
2475
+ }
2476
+
2477
+
2478
+ void MacroAssembler::Abort(const char* msg) {
2479
+ Label abort_start;
2480
+ bind(&abort_start);
2481
+ // We want to pass the msg string like a smi to avoid GC
2482
+ // problems, however msg is not guaranteed to be aligned
2483
+ // properly. Instead, we pass an aligned pointer that is
2484
+ // a proper v8 smi, but also pass the alignment difference
2485
+ // from the real pointer as a smi.
2486
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2487
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2488
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2489
+ #ifdef DEBUG
2490
+ if (msg != NULL) {
2491
+ RecordComment("Abort message: ");
2492
+ RecordComment(msg);
2493
+ }
2494
+ #endif
2495
+ // Disable stub call restrictions to always allow calls to abort.
2496
+ AllowStubCallsScope allow_scope(this, true);
2497
+
2498
+ mov(r0, Operand(p0));
2499
+ push(r0);
2500
+ mov(r0, Operand(Smi::FromInt(p1 - p0)));
2501
+ push(r0);
2502
+ CallRuntime(Runtime::kAbort, 2);
2503
+ // will not return here
2504
+ if (is_const_pool_blocked()) {
2505
+ // If the calling code cares about the exact number of
2506
+ // instructions generated, we insert padding here to keep the size
2507
+ // of the Abort macro constant.
2508
+ static const int kExpectedAbortInstructions = 10;
2509
+ int abort_instructions = InstructionsGeneratedSince(&abort_start);
2510
+ ASSERT(abort_instructions <= kExpectedAbortInstructions);
2511
+ while (abort_instructions++ < kExpectedAbortInstructions) {
2512
+ nop();
2513
+ }
2514
+ }
2515
+ }
2516
+
2517
+
2518
+ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2519
+ if (context_chain_length > 0) {
2520
+ // Move up the chain of contexts to the context containing the slot.
2521
+ ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
2522
+ // Load the function context (which is the incoming, outer context).
2523
+ ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
2524
+ for (int i = 1; i < context_chain_length; i++) {
2525
+ ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
2526
+ ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
2527
+ }
2528
+ } else {
2529
+ // Slot is in the current function context. Move it into the
2530
+ // destination register in case we store into it (the write barrier
2531
+ // cannot be allowed to destroy the context in esi).
2532
+ mov(dst, cp);
2533
+ }
2534
+
2535
+ // We should not have found a 'with' context by walking the context chain
2536
+ // (i.e., the static scope chain and runtime context chain do not agree).
2537
+ // A variable occurring in such a scope should have slot type LOOKUP and
2538
+ // not CONTEXT.
2539
+ if (emit_debug_code()) {
2540
+ ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2541
+ cmp(dst, ip);
2542
+ Check(eq, "Yo dawg, I heard you liked function contexts "
2543
+ "so I put function contexts in all your contexts");
2544
+ }
2545
+ }
2546
+
2547
+
2548
+ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2549
+ // Load the global or builtins object from the current context.
2550
+ ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2551
+ // Load the global context from the global or builtins object.
2552
+ ldr(function, FieldMemOperand(function,
2553
+ GlobalObject::kGlobalContextOffset));
2554
+ // Load the function from the global context.
2555
+ ldr(function, MemOperand(function, Context::SlotOffset(index)));
2556
+ }
2557
+
2558
+
2559
+ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2560
+ Register map,
2561
+ Register scratch) {
2562
+ // Load the initial map. The global functions all have initial maps.
2563
+ ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2564
+ if (emit_debug_code()) {
2565
+ Label ok, fail;
2566
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2567
+ b(&ok);
2568
+ bind(&fail);
2569
+ Abort("Global functions must have initial map");
2570
+ bind(&ok);
2571
+ }
2572
+ }
2573
+
2574
+
2575
+ void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2576
+ Register reg,
2577
+ Register scratch,
2578
+ Label* not_power_of_two_or_zero) {
2579
+ sub(scratch, reg, Operand(1), SetCC);
2580
+ b(mi, not_power_of_two_or_zero);
2581
+ tst(scratch, reg);
2582
+ b(ne, not_power_of_two_or_zero);
2583
+ }
2584
+
2585
+
2586
+ void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2587
+ Register reg,
2588
+ Register scratch,
2589
+ Label* zero_and_neg,
2590
+ Label* not_power_of_two) {
2591
+ sub(scratch, reg, Operand(1), SetCC);
2592
+ b(mi, zero_and_neg);
2593
+ tst(scratch, reg);
2594
+ b(ne, not_power_of_two);
2595
+ }
2596
+
2597
+
2598
+ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2599
+ Register reg2,
2600
+ Label* on_not_both_smi) {
2601
+ STATIC_ASSERT(kSmiTag == 0);
2602
+ tst(reg1, Operand(kSmiTagMask));
2603
+ tst(reg2, Operand(kSmiTagMask), eq);
2604
+ b(ne, on_not_both_smi);
2605
+ }
2606
+
2607
+
2608
+ void MacroAssembler::JumpIfEitherSmi(Register reg1,
2609
+ Register reg2,
2610
+ Label* on_either_smi) {
2611
+ STATIC_ASSERT(kSmiTag == 0);
2612
+ tst(reg1, Operand(kSmiTagMask));
2613
+ tst(reg2, Operand(kSmiTagMask), ne);
2614
+ b(eq, on_either_smi);
2615
+ }
2616
+
2617
+
2618
+ void MacroAssembler::AbortIfSmi(Register object) {
2619
+ STATIC_ASSERT(kSmiTag == 0);
2620
+ tst(object, Operand(kSmiTagMask));
2621
+ Assert(ne, "Operand is a smi");
2622
+ }
2623
+
2624
+
2625
+ void MacroAssembler::AbortIfNotSmi(Register object) {
2626
+ STATIC_ASSERT(kSmiTag == 0);
2627
+ tst(object, Operand(kSmiTagMask));
2628
+ Assert(eq, "Operand is not smi");
2629
+ }
2630
+
2631
+
2632
+ void MacroAssembler::AbortIfNotString(Register object) {
2633
+ STATIC_ASSERT(kSmiTag == 0);
2634
+ tst(object, Operand(kSmiTagMask));
2635
+ Assert(ne, "Operand is not a string");
2636
+ push(object);
2637
+ ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2638
+ CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2639
+ pop(object);
2640
+ Assert(lo, "Operand is not a string");
2641
+ }
2642
+
2643
+
2644
+
2645
+ void MacroAssembler::AbortIfNotRootValue(Register src,
2646
+ Heap::RootListIndex root_value_index,
2647
+ const char* message) {
2648
+ CompareRoot(src, root_value_index);
2649
+ Assert(eq, message);
2650
+ }
2651
+
2652
+
2653
+ void MacroAssembler::JumpIfNotHeapNumber(Register object,
2654
+ Register heap_number_map,
2655
+ Register scratch,
2656
+ Label* on_not_heap_number) {
2657
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2658
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2659
+ cmp(scratch, heap_number_map);
2660
+ b(ne, on_not_heap_number);
2661
+ }
2662
+
2663
+
2664
+ void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
2665
+ Register first,
2666
+ Register second,
2667
+ Register scratch1,
2668
+ Register scratch2,
2669
+ Label* failure) {
2670
+ // Test that both first and second are sequential ASCII strings.
2671
+ // Assume that they are non-smis.
2672
+ ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2673
+ ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2674
+ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2675
+ ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2676
+
2677
+ JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
2678
+ scratch2,
2679
+ scratch1,
2680
+ scratch2,
2681
+ failure);
2682
+ }
2683
+
2684
+ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
2685
+ Register second,
2686
+ Register scratch1,
2687
+ Register scratch2,
2688
+ Label* failure) {
2689
+ // Check that neither is a smi.
2690
+ STATIC_ASSERT(kSmiTag == 0);
2691
+ and_(scratch1, first, Operand(second));
2692
+ tst(scratch1, Operand(kSmiTagMask));
2693
+ b(eq, failure);
2694
+ JumpIfNonSmisNotBothSequentialAsciiStrings(first,
2695
+ second,
2696
+ scratch1,
2697
+ scratch2,
2698
+ failure);
2699
+ }
2700
+
2701
+
2702
+ // Allocates a heap number or jumps to the need_gc label if the young space
2703
+ // is full and a scavenge is needed.
2704
+ void MacroAssembler::AllocateHeapNumber(Register result,
2705
+ Register scratch1,
2706
+ Register scratch2,
2707
+ Register heap_number_map,
2708
+ Label* gc_required) {
2709
+ // Allocate an object in the heap for the heap number and tag it as a heap
2710
+ // object.
2711
+ AllocateInNewSpace(HeapNumber::kSize,
2712
+ result,
2713
+ scratch1,
2714
+ scratch2,
2715
+ gc_required,
2716
+ TAG_OBJECT);
2717
+
2718
+ // Store heap number map in the allocated object.
2719
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2720
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
2721
+ }
2722
+
2723
+
2724
+ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
2725
+ DwVfpRegister value,
2726
+ Register scratch1,
2727
+ Register scratch2,
2728
+ Register heap_number_map,
2729
+ Label* gc_required) {
2730
+ AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
2731
+ sub(scratch1, result, Operand(kHeapObjectTag));
2732
+ vstr(value, scratch1, HeapNumber::kValueOffset);
2733
+ }
2734
+
2735
+
2736
+ // Copies a fixed number of fields of heap objects from src to dst.
2737
+ void MacroAssembler::CopyFields(Register dst,
2738
+ Register src,
2739
+ RegList temps,
2740
+ int field_count) {
2741
+ // At least one bit set in the first 15 registers.
2742
+ ASSERT((temps & ((1 << 15) - 1)) != 0);
2743
+ ASSERT((temps & dst.bit()) == 0);
2744
+ ASSERT((temps & src.bit()) == 0);
2745
+ // Primitive implementation using only one temporary register.
2746
+
2747
+ Register tmp = no_reg;
2748
+ // Find a temp register in temps list.
2749
+ for (int i = 0; i < 15; i++) {
2750
+ if ((temps & (1 << i)) != 0) {
2751
+ tmp.set_code(i);
2752
+ break;
2753
+ }
2754
+ }
2755
+ ASSERT(!tmp.is(no_reg));
2756
+
2757
+ for (int i = 0; i < field_count; i++) {
2758
+ ldr(tmp, FieldMemOperand(src, i * kPointerSize));
2759
+ str(tmp, FieldMemOperand(dst, i * kPointerSize));
2760
+ }
2761
+ }
2762
+
2763
+
2764
+ void MacroAssembler::CopyBytes(Register src,
2765
+ Register dst,
2766
+ Register length,
2767
+ Register scratch) {
2768
+ Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
2769
+
2770
+ // Align src before copying in word size chunks.
2771
+ bind(&align_loop);
2772
+ cmp(length, Operand(0));
2773
+ b(eq, &done);
2774
+ bind(&align_loop_1);
2775
+ tst(src, Operand(kPointerSize - 1));
2776
+ b(eq, &word_loop);
2777
+ ldrb(scratch, MemOperand(src, 1, PostIndex));
2778
+ strb(scratch, MemOperand(dst, 1, PostIndex));
2779
+ sub(length, length, Operand(1), SetCC);
2780
+ b(ne, &byte_loop_1);
2781
+
2782
+ // Copy bytes in word size chunks.
2783
+ bind(&word_loop);
2784
+ if (emit_debug_code()) {
2785
+ tst(src, Operand(kPointerSize - 1));
2786
+ Assert(eq, "Expecting alignment for CopyBytes");
2787
+ }
2788
+ cmp(length, Operand(kPointerSize));
2789
+ b(lt, &byte_loop);
2790
+ ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
2791
+ #if CAN_USE_UNALIGNED_ACCESSES
2792
+ str(scratch, MemOperand(dst, kPointerSize, PostIndex));
2793
+ #else
2794
+ strb(scratch, MemOperand(dst, 1, PostIndex));
2795
+ mov(scratch, Operand(scratch, LSR, 8));
2796
+ strb(scratch, MemOperand(dst, 1, PostIndex));
2797
+ mov(scratch, Operand(scratch, LSR, 8));
2798
+ strb(scratch, MemOperand(dst, 1, PostIndex));
2799
+ mov(scratch, Operand(scratch, LSR, 8));
2800
+ strb(scratch, MemOperand(dst, 1, PostIndex));
2801
+ #endif
2802
+ sub(length, length, Operand(kPointerSize));
2803
+ b(&word_loop);
2804
+
2805
+ // Copy the last bytes if any left.
2806
+ bind(&byte_loop);
2807
+ cmp(length, Operand(0));
2808
+ b(eq, &done);
2809
+ bind(&byte_loop_1);
2810
+ ldrb(scratch, MemOperand(src, 1, PostIndex));
2811
+ strb(scratch, MemOperand(dst, 1, PostIndex));
2812
+ sub(length, length, Operand(1), SetCC);
2813
+ b(ne, &byte_loop_1);
2814
+ bind(&done);
2815
+ }
2816
+
2817
+
2818
+ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
2819
+ Register source, // Input.
2820
+ Register scratch) {
2821
+ ASSERT(!zeros.is(source) || !source.is(scratch));
2822
+ ASSERT(!zeros.is(scratch));
2823
+ ASSERT(!scratch.is(ip));
2824
+ ASSERT(!source.is(ip));
2825
+ ASSERT(!zeros.is(ip));
2826
+ #ifdef CAN_USE_ARMV5_INSTRUCTIONS
2827
+ clz(zeros, source); // This instruction is only supported after ARM5.
2828
+ #else
2829
+ mov(zeros, Operand(0, RelocInfo::NONE));
2830
+ Move(scratch, source);
2831
+ // Top 16.
2832
+ tst(scratch, Operand(0xffff0000));
2833
+ add(zeros, zeros, Operand(16), LeaveCC, eq);
2834
+ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
2835
+ // Top 8.
2836
+ tst(scratch, Operand(0xff000000));
2837
+ add(zeros, zeros, Operand(8), LeaveCC, eq);
2838
+ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
2839
+ // Top 4.
2840
+ tst(scratch, Operand(0xf0000000));
2841
+ add(zeros, zeros, Operand(4), LeaveCC, eq);
2842
+ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
2843
+ // Top 2.
2844
+ tst(scratch, Operand(0xc0000000));
2845
+ add(zeros, zeros, Operand(2), LeaveCC, eq);
2846
+ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
2847
+ // Top bit.
2848
+ tst(scratch, Operand(0x80000000u));
2849
+ add(zeros, zeros, Operand(1), LeaveCC, eq);
2850
+ #endif
2851
+ }
2852
+
2853
+
2854
+ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2855
+ Register first,
2856
+ Register second,
2857
+ Register scratch1,
2858
+ Register scratch2,
2859
+ Label* failure) {
2860
+ int kFlatAsciiStringMask =
2861
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2862
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2863
+ and_(scratch1, first, Operand(kFlatAsciiStringMask));
2864
+ and_(scratch2, second, Operand(kFlatAsciiStringMask));
2865
+ cmp(scratch1, Operand(kFlatAsciiStringTag));
2866
+ // Ignore second test if first test failed.
2867
+ cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
2868
+ b(ne, failure);
2869
+ }
2870
+
2871
+
2872
+ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
2873
+ Register scratch,
2874
+ Label* failure) {
2875
+ int kFlatAsciiStringMask =
2876
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2877
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2878
+ and_(scratch, type, Operand(kFlatAsciiStringMask));
2879
+ cmp(scratch, Operand(kFlatAsciiStringTag));
2880
+ b(ne, failure);
2881
+ }
2882
+
2883
+ static const int kRegisterPassedArguments = 4;
2884
+
2885
+
2886
+ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
2887
+ int num_double_arguments) {
2888
+ int stack_passed_words = 0;
2889
+ if (use_eabi_hardfloat()) {
2890
+ // In the hard floating point calling convention, we can use
2891
+ // all double registers to pass doubles.
2892
+ if (num_double_arguments > DoubleRegister::kNumRegisters) {
2893
+ stack_passed_words +=
2894
+ 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
2895
+ }
2896
+ } else {
2897
+ // In the soft floating point calling convention, every double
2898
+ // argument is passed using two registers.
2899
+ num_reg_arguments += 2 * num_double_arguments;
2900
+ }
2901
+ // Up to four simple arguments are passed in registers r0..r3.
2902
+ if (num_reg_arguments > kRegisterPassedArguments) {
2903
+ stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2904
+ }
2905
+ return stack_passed_words;
2906
+ }
2907
+
2908
+
2909
+ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2910
+ int num_double_arguments,
2911
+ Register scratch) {
2912
+ int frame_alignment = ActivationFrameAlignment();
2913
+ int stack_passed_arguments = CalculateStackPassedWords(
2914
+ num_reg_arguments, num_double_arguments);
2915
+ if (frame_alignment > kPointerSize) {
2916
+ // Make stack end at alignment and make room for num_arguments - 4 words
2917
+ // and the original value of sp.
2918
+ mov(scratch, sp);
2919
+ sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
2920
+ ASSERT(IsPowerOf2(frame_alignment));
2921
+ and_(sp, sp, Operand(-frame_alignment));
2922
+ str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
2923
+ } else {
2924
+ sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
2925
+ }
2926
+ }
2927
+
2928
+
2929
+ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2930
+ Register scratch) {
2931
+ PrepareCallCFunction(num_reg_arguments, 0, scratch);
2932
+ }
2933
+
2934
+
2935
+ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
2936
+ if (use_eabi_hardfloat()) {
2937
+ Move(d0, dreg);
2938
+ } else {
2939
+ vmov(r0, r1, dreg);
2940
+ }
2941
+ }
2942
+
2943
+
2944
+ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
2945
+ DoubleRegister dreg2) {
2946
+ if (use_eabi_hardfloat()) {
2947
+ if (dreg2.is(d0)) {
2948
+ ASSERT(!dreg1.is(d1));
2949
+ Move(d1, dreg2);
2950
+ Move(d0, dreg1);
2951
+ } else {
2952
+ Move(d0, dreg1);
2953
+ Move(d1, dreg2);
2954
+ }
2955
+ } else {
2956
+ vmov(r0, r1, dreg1);
2957
+ vmov(r2, r3, dreg2);
2958
+ }
2959
+ }
2960
+
2961
+
2962
+ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
2963
+ Register reg) {
2964
+ if (use_eabi_hardfloat()) {
2965
+ Move(d0, dreg);
2966
+ Move(r0, reg);
2967
+ } else {
2968
+ Move(r2, reg);
2969
+ vmov(r0, r1, dreg);
2970
+ }
2971
+ }
2972
+
2973
+
2974
+ void MacroAssembler::CallCFunction(ExternalReference function,
2975
+ int num_reg_arguments,
2976
+ int num_double_arguments) {
2977
+ CallCFunctionHelper(no_reg,
2978
+ function,
2979
+ ip,
2980
+ num_reg_arguments,
2981
+ num_double_arguments);
2982
+ }
2983
+
2984
+
2985
+ void MacroAssembler::CallCFunction(Register function,
2986
+ Register scratch,
2987
+ int num_reg_arguments,
2988
+ int num_double_arguments) {
2989
+ CallCFunctionHelper(function,
2990
+ ExternalReference::the_hole_value_location(isolate()),
2991
+ scratch,
2992
+ num_reg_arguments,
2993
+ num_double_arguments);
2994
+ }
2995
+
2996
+
2997
+ void MacroAssembler::CallCFunction(ExternalReference function,
2998
+ int num_arguments) {
2999
+ CallCFunction(function, num_arguments, 0);
3000
+ }
3001
+
3002
+
3003
+ void MacroAssembler::CallCFunction(Register function,
3004
+ Register scratch,
3005
+ int num_arguments) {
3006
+ CallCFunction(function, scratch, num_arguments, 0);
3007
+ }
3008
+
3009
+
3010
+ void MacroAssembler::CallCFunctionHelper(Register function,
3011
+ ExternalReference function_reference,
3012
+ Register scratch,
3013
+ int num_reg_arguments,
3014
+ int num_double_arguments) {
3015
+ // Make sure that the stack is aligned before calling a C function unless
3016
+ // running in the simulator. The simulator has its own alignment check which
3017
+ // provides more information.
3018
+ #if defined(V8_HOST_ARCH_ARM)
3019
+ if (emit_debug_code()) {
3020
+ int frame_alignment = OS::ActivationFrameAlignment();
3021
+ int frame_alignment_mask = frame_alignment - 1;
3022
+ if (frame_alignment > kPointerSize) {
3023
+ ASSERT(IsPowerOf2(frame_alignment));
3024
+ Label alignment_as_expected;
3025
+ tst(sp, Operand(frame_alignment_mask));
3026
+ b(eq, &alignment_as_expected);
3027
+ // Don't use Check here, as it will call Runtime_Abort possibly
3028
+ // re-entering here.
3029
+ stop("Unexpected alignment");
3030
+ bind(&alignment_as_expected);
3031
+ }
3032
+ }
3033
+ #endif
3034
+
3035
+ // Just call directly. The function called cannot cause a GC, or
3036
+ // allow preemption, so the return address in the link register
3037
+ // stays correct.
3038
+ if (function.is(no_reg)) {
3039
+ mov(scratch, Operand(function_reference));
3040
+ function = scratch;
3041
+ }
3042
+ Call(function);
3043
+ int stack_passed_arguments = CalculateStackPassedWords(
3044
+ num_reg_arguments, num_double_arguments);
3045
+ if (ActivationFrameAlignment() > kPointerSize) {
3046
+ ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3047
+ } else {
3048
+ add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3049
+ }
3050
+ }
3051
+
3052
+
3053
+ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3054
+ Register result) {
3055
+ const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3056
+ const int32_t kPCRegOffset = 2 * kPointerSize;
3057
+ ldr(result, MemOperand(ldr_location));
3058
+ if (emit_debug_code()) {
3059
+ // Check that the instruction is a ldr reg, [pc + offset] .
3060
+ and_(result, result, Operand(kLdrPCPattern));
3061
+ cmp(result, Operand(kLdrPCPattern));
3062
+ Check(eq, "The instruction to patch should be a load from pc.");
3063
+ // Result was clobbered. Restore it.
3064
+ ldr(result, MemOperand(ldr_location));
3065
+ }
3066
+ // Get the address of the constant.
3067
+ and_(result, result, Operand(kLdrOffsetMask));
3068
+ add(result, ldr_location, Operand(result));
3069
+ add(result, result, Operand(kPCRegOffset));
3070
+ }
3071
+
3072
+
3073
+ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3074
+ Usat(output_reg, 8, Operand(input_reg));
3075
+ }
3076
+
3077
+
3078
+ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3079
+ DoubleRegister input_reg,
3080
+ DoubleRegister temp_double_reg) {
3081
+ Label above_zero;
3082
+ Label done;
3083
+ Label in_bounds;
3084
+
3085
+ vmov(temp_double_reg, 0.0);
3086
+ VFPCompareAndSetFlags(input_reg, temp_double_reg);
3087
+ b(gt, &above_zero);
3088
+
3089
+ // Double value is less than zero, NaN or Inf, return 0.
3090
+ mov(result_reg, Operand(0));
3091
+ b(al, &done);
3092
+
3093
+ // Double value is >= 255, return 255.
3094
+ bind(&above_zero);
3095
+ vmov(temp_double_reg, 255.0);
3096
+ VFPCompareAndSetFlags(input_reg, temp_double_reg);
3097
+ b(le, &in_bounds);
3098
+ mov(result_reg, Operand(255));
3099
+ b(al, &done);
3100
+
3101
+ // In 0-255 range, round and truncate.
3102
+ bind(&in_bounds);
3103
+ vmov(temp_double_reg, 0.5);
3104
+ vadd(temp_double_reg, input_reg, temp_double_reg);
3105
+ vcvt_u32_f64(s0, temp_double_reg);
3106
+ vmov(result_reg, s0);
3107
+ bind(&done);
3108
+ }
3109
+
3110
+
3111
+ void MacroAssembler::LoadInstanceDescriptors(Register map,
3112
+ Register descriptors) {
3113
+ ldr(descriptors,
3114
+ FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
3115
+ Label not_smi;
3116
+ JumpIfNotSmi(descriptors, &not_smi);
3117
+ mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
3118
+ bind(&not_smi);
3119
+ }
3120
+
3121
+
3122
+ CodePatcher::CodePatcher(byte* address, int instructions)
3123
+ : address_(address),
3124
+ instructions_(instructions),
3125
+ size_(instructions * Assembler::kInstrSize),
3126
+ masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
3127
+ // Create a new macro assembler pointing to the address of the code to patch.
3128
+ // The size is adjusted with kGap on order for the assembler to generate size
3129
+ // bytes of instructions without failing with buffer size constraints.
3130
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3131
+ }
3132
+
3133
+
3134
+ CodePatcher::~CodePatcher() {
3135
+ // Indicate that code has changed.
3136
+ CPU::FlushICache(address_, size_);
3137
+
3138
+ // Check that the code was patched as expected.
3139
+ ASSERT(masm_.pc_ == address_ + size_);
3140
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3141
+ }
3142
+
3143
+
3144
+ void CodePatcher::Emit(Instr instr) {
3145
+ masm()->emit(instr);
3146
+ }
3147
+
3148
+
3149
+ void CodePatcher::Emit(Address addr) {
3150
+ masm()->emit(reinterpret_cast<Instr>(addr));
3151
+ }
3152
+
3153
+
3154
+ void CodePatcher::EmitCondition(Condition cond) {
3155
+ Instr instr = Assembler::instr_at(masm_.pc_);
3156
+ instr = (instr & ~kCondMask) | cond;
3157
+ masm_.emit(instr);
3158
+ }
3159
+
3160
+
3161
+ } } // namespace v8::internal
3162
+
3163
+ #endif // V8_TARGET_ARCH_ARM