libv8 3.3.10.4 → 3.5.10.beta1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (538) hide show
  1. data/lib/libv8/scons/CHANGES.txt +24 -231
  2. data/lib/libv8/scons/LICENSE.txt +1 -1
  3. data/lib/libv8/scons/MANIFEST +0 -1
  4. data/lib/libv8/scons/PKG-INFO +1 -1
  5. data/lib/libv8/scons/README.txt +9 -9
  6. data/lib/libv8/scons/RELEASE.txt +75 -77
  7. data/lib/libv8/scons/engine/SCons/Action.py +6 -22
  8. data/lib/libv8/scons/engine/SCons/Builder.py +2 -2
  9. data/lib/libv8/scons/engine/SCons/CacheDir.py +2 -2
  10. data/lib/libv8/scons/engine/SCons/Debug.py +2 -2
  11. data/lib/libv8/scons/engine/SCons/Defaults.py +10 -24
  12. data/lib/libv8/scons/engine/SCons/Environment.py +19 -118
  13. data/lib/libv8/scons/engine/SCons/Errors.py +2 -2
  14. data/lib/libv8/scons/engine/SCons/Executor.py +2 -2
  15. data/lib/libv8/scons/engine/SCons/Job.py +2 -2
  16. data/lib/libv8/scons/engine/SCons/Memoize.py +2 -2
  17. data/lib/libv8/scons/engine/SCons/Node/Alias.py +2 -2
  18. data/lib/libv8/scons/engine/SCons/Node/FS.py +121 -281
  19. data/lib/libv8/scons/engine/SCons/Node/Python.py +2 -2
  20. data/lib/libv8/scons/engine/SCons/Node/__init__.py +5 -6
  21. data/lib/libv8/scons/engine/SCons/Options/BoolOption.py +2 -2
  22. data/lib/libv8/scons/engine/SCons/Options/EnumOption.py +2 -2
  23. data/lib/libv8/scons/engine/SCons/Options/ListOption.py +2 -2
  24. data/lib/libv8/scons/engine/SCons/Options/PackageOption.py +2 -2
  25. data/lib/libv8/scons/engine/SCons/Options/PathOption.py +2 -2
  26. data/lib/libv8/scons/engine/SCons/Options/__init__.py +2 -2
  27. data/lib/libv8/scons/engine/SCons/PathList.py +2 -2
  28. data/lib/libv8/scons/engine/SCons/Platform/__init__.py +2 -2
  29. data/lib/libv8/scons/engine/SCons/Platform/aix.py +2 -2
  30. data/lib/libv8/scons/engine/SCons/Platform/cygwin.py +2 -2
  31. data/lib/libv8/scons/engine/SCons/Platform/darwin.py +3 -27
  32. data/lib/libv8/scons/engine/SCons/Platform/hpux.py +2 -2
  33. data/lib/libv8/scons/engine/SCons/Platform/irix.py +2 -2
  34. data/lib/libv8/scons/engine/SCons/Platform/os2.py +2 -2
  35. data/lib/libv8/scons/engine/SCons/Platform/posix.py +2 -2
  36. data/lib/libv8/scons/engine/SCons/Platform/sunos.py +2 -2
  37. data/lib/libv8/scons/engine/SCons/Platform/win32.py +2 -2
  38. data/lib/libv8/scons/engine/SCons/SConf.py +2 -2
  39. data/lib/libv8/scons/engine/SCons/SConsign.py +3 -9
  40. data/lib/libv8/scons/engine/SCons/Scanner/C.py +2 -2
  41. data/lib/libv8/scons/engine/SCons/Scanner/D.py +2 -2
  42. data/lib/libv8/scons/engine/SCons/Scanner/Dir.py +2 -2
  43. data/lib/libv8/scons/engine/SCons/Scanner/Fortran.py +2 -2
  44. data/lib/libv8/scons/engine/SCons/Scanner/IDL.py +2 -2
  45. data/lib/libv8/scons/engine/SCons/Scanner/LaTeX.py +2 -5
  46. data/lib/libv8/scons/engine/SCons/Scanner/Prog.py +2 -2
  47. data/lib/libv8/scons/engine/SCons/Scanner/RC.py +3 -3
  48. data/lib/libv8/scons/engine/SCons/Scanner/__init__.py +2 -2
  49. data/lib/libv8/scons/engine/SCons/Script/Interactive.py +2 -2
  50. data/lib/libv8/scons/engine/SCons/Script/Main.py +11 -82
  51. data/lib/libv8/scons/engine/SCons/Script/SConsOptions.py +5 -5
  52. data/lib/libv8/scons/engine/SCons/Script/SConscript.py +2 -2
  53. data/lib/libv8/scons/engine/SCons/Script/__init__.py +2 -2
  54. data/lib/libv8/scons/engine/SCons/Sig.py +2 -2
  55. data/lib/libv8/scons/engine/SCons/Subst.py +2 -2
  56. data/lib/libv8/scons/engine/SCons/Taskmaster.py +2 -10
  57. data/lib/libv8/scons/engine/SCons/Tool/386asm.py +2 -2
  58. data/lib/libv8/scons/engine/SCons/Tool/BitKeeper.py +2 -2
  59. data/lib/libv8/scons/engine/SCons/Tool/CVS.py +2 -2
  60. data/lib/libv8/scons/engine/SCons/Tool/FortranCommon.py +2 -19
  61. data/lib/libv8/scons/engine/SCons/Tool/JavaCommon.py +2 -2
  62. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/__init__.py +2 -2
  63. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/arch.py +2 -2
  64. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/common.py +2 -2
  65. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/netframework.py +2 -2
  66. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/sdk.py +2 -2
  67. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/vc.py +6 -9
  68. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/vs.py +2 -29
  69. data/lib/libv8/scons/engine/SCons/Tool/Perforce.py +2 -2
  70. data/lib/libv8/scons/engine/SCons/Tool/PharLapCommon.py +2 -2
  71. data/lib/libv8/scons/engine/SCons/Tool/RCS.py +2 -2
  72. data/lib/libv8/scons/engine/SCons/Tool/SCCS.py +2 -2
  73. data/lib/libv8/scons/engine/SCons/Tool/Subversion.py +2 -2
  74. data/lib/libv8/scons/engine/SCons/Tool/__init__.py +3 -3
  75. data/lib/libv8/scons/engine/SCons/Tool/aixc++.py +2 -2
  76. data/lib/libv8/scons/engine/SCons/Tool/aixcc.py +2 -2
  77. data/lib/libv8/scons/engine/SCons/Tool/aixf77.py +2 -2
  78. data/lib/libv8/scons/engine/SCons/Tool/aixlink.py +2 -2
  79. data/lib/libv8/scons/engine/SCons/Tool/applelink.py +2 -2
  80. data/lib/libv8/scons/engine/SCons/Tool/ar.py +2 -2
  81. data/lib/libv8/scons/engine/SCons/Tool/as.py +2 -2
  82. data/lib/libv8/scons/engine/SCons/Tool/bcc32.py +2 -2
  83. data/lib/libv8/scons/engine/SCons/Tool/c++.py +2 -2
  84. data/lib/libv8/scons/engine/SCons/Tool/cc.py +2 -2
  85. data/lib/libv8/scons/engine/SCons/Tool/cvf.py +2 -2
  86. data/lib/libv8/scons/engine/SCons/Tool/default.py +2 -2
  87. data/lib/libv8/scons/engine/SCons/Tool/dmd.py +7 -24
  88. data/lib/libv8/scons/engine/SCons/Tool/dvi.py +2 -2
  89. data/lib/libv8/scons/engine/SCons/Tool/dvipdf.py +2 -3
  90. data/lib/libv8/scons/engine/SCons/Tool/dvips.py +2 -3
  91. data/lib/libv8/scons/engine/SCons/Tool/f77.py +2 -2
  92. data/lib/libv8/scons/engine/SCons/Tool/f90.py +2 -2
  93. data/lib/libv8/scons/engine/SCons/Tool/f95.py +2 -2
  94. data/lib/libv8/scons/engine/SCons/Tool/filesystem.py +2 -2
  95. data/lib/libv8/scons/engine/SCons/Tool/fortran.py +2 -2
  96. data/lib/libv8/scons/engine/SCons/Tool/g++.py +2 -2
  97. data/lib/libv8/scons/engine/SCons/Tool/g77.py +2 -2
  98. data/lib/libv8/scons/engine/SCons/Tool/gas.py +2 -2
  99. data/lib/libv8/scons/engine/SCons/Tool/gcc.py +2 -2
  100. data/lib/libv8/scons/engine/SCons/Tool/gfortran.py +3 -3
  101. data/lib/libv8/scons/engine/SCons/Tool/gnulink.py +3 -2
  102. data/lib/libv8/scons/engine/SCons/Tool/gs.py +2 -2
  103. data/lib/libv8/scons/engine/SCons/Tool/hpc++.py +2 -2
  104. data/lib/libv8/scons/engine/SCons/Tool/hpcc.py +2 -2
  105. data/lib/libv8/scons/engine/SCons/Tool/hplink.py +2 -2
  106. data/lib/libv8/scons/engine/SCons/Tool/icc.py +2 -2
  107. data/lib/libv8/scons/engine/SCons/Tool/icl.py +2 -2
  108. data/lib/libv8/scons/engine/SCons/Tool/ifl.py +2 -2
  109. data/lib/libv8/scons/engine/SCons/Tool/ifort.py +2 -2
  110. data/lib/libv8/scons/engine/SCons/Tool/ilink.py +2 -2
  111. data/lib/libv8/scons/engine/SCons/Tool/ilink32.py +2 -2
  112. data/lib/libv8/scons/engine/SCons/Tool/install.py +3 -57
  113. data/lib/libv8/scons/engine/SCons/Tool/intelc.py +25 -65
  114. data/lib/libv8/scons/engine/SCons/Tool/ipkg.py +2 -2
  115. data/lib/libv8/scons/engine/SCons/Tool/jar.py +3 -9
  116. data/lib/libv8/scons/engine/SCons/Tool/javac.py +2 -2
  117. data/lib/libv8/scons/engine/SCons/Tool/javah.py +2 -2
  118. data/lib/libv8/scons/engine/SCons/Tool/latex.py +2 -3
  119. data/lib/libv8/scons/engine/SCons/Tool/lex.py +2 -2
  120. data/lib/libv8/scons/engine/SCons/Tool/link.py +5 -6
  121. data/lib/libv8/scons/engine/SCons/Tool/linkloc.py +2 -2
  122. data/lib/libv8/scons/engine/SCons/Tool/m4.py +2 -2
  123. data/lib/libv8/scons/engine/SCons/Tool/masm.py +2 -2
  124. data/lib/libv8/scons/engine/SCons/Tool/midl.py +2 -2
  125. data/lib/libv8/scons/engine/SCons/Tool/mingw.py +10 -31
  126. data/lib/libv8/scons/engine/SCons/Tool/mslib.py +2 -2
  127. data/lib/libv8/scons/engine/SCons/Tool/mslink.py +9 -61
  128. data/lib/libv8/scons/engine/SCons/Tool/mssdk.py +2 -2
  129. data/lib/libv8/scons/engine/SCons/Tool/msvc.py +11 -21
  130. data/lib/libv8/scons/engine/SCons/Tool/msvs.py +59 -477
  131. data/lib/libv8/scons/engine/SCons/Tool/mwcc.py +2 -2
  132. data/lib/libv8/scons/engine/SCons/Tool/mwld.py +2 -2
  133. data/lib/libv8/scons/engine/SCons/Tool/nasm.py +2 -2
  134. data/lib/libv8/scons/engine/SCons/Tool/packaging/__init__.py +2 -2
  135. data/lib/libv8/scons/engine/SCons/Tool/packaging/ipk.py +2 -2
  136. data/lib/libv8/scons/engine/SCons/Tool/packaging/msi.py +2 -2
  137. data/lib/libv8/scons/engine/SCons/Tool/packaging/rpm.py +2 -2
  138. data/lib/libv8/scons/engine/SCons/Tool/packaging/src_tarbz2.py +2 -2
  139. data/lib/libv8/scons/engine/SCons/Tool/packaging/src_targz.py +2 -2
  140. data/lib/libv8/scons/engine/SCons/Tool/packaging/src_zip.py +2 -2
  141. data/lib/libv8/scons/engine/SCons/Tool/packaging/tarbz2.py +2 -2
  142. data/lib/libv8/scons/engine/SCons/Tool/packaging/targz.py +2 -2
  143. data/lib/libv8/scons/engine/SCons/Tool/packaging/zip.py +2 -2
  144. data/lib/libv8/scons/engine/SCons/Tool/pdf.py +2 -2
  145. data/lib/libv8/scons/engine/SCons/Tool/pdflatex.py +2 -3
  146. data/lib/libv8/scons/engine/SCons/Tool/pdftex.py +2 -3
  147. data/lib/libv8/scons/engine/SCons/Tool/qt.py +2 -2
  148. data/lib/libv8/scons/engine/SCons/Tool/rmic.py +3 -9
  149. data/lib/libv8/scons/engine/SCons/Tool/rpcgen.py +2 -2
  150. data/lib/libv8/scons/engine/SCons/Tool/rpm.py +2 -2
  151. data/lib/libv8/scons/engine/SCons/Tool/sgiar.py +2 -2
  152. data/lib/libv8/scons/engine/SCons/Tool/sgic++.py +2 -2
  153. data/lib/libv8/scons/engine/SCons/Tool/sgicc.py +2 -2
  154. data/lib/libv8/scons/engine/SCons/Tool/sgilink.py +3 -2
  155. data/lib/libv8/scons/engine/SCons/Tool/sunar.py +2 -2
  156. data/lib/libv8/scons/engine/SCons/Tool/sunc++.py +2 -2
  157. data/lib/libv8/scons/engine/SCons/Tool/suncc.py +2 -2
  158. data/lib/libv8/scons/engine/SCons/Tool/sunf77.py +2 -2
  159. data/lib/libv8/scons/engine/SCons/Tool/sunf90.py +2 -2
  160. data/lib/libv8/scons/engine/SCons/Tool/sunf95.py +2 -2
  161. data/lib/libv8/scons/engine/SCons/Tool/sunlink.py +3 -2
  162. data/lib/libv8/scons/engine/SCons/Tool/swig.py +5 -6
  163. data/lib/libv8/scons/engine/SCons/Tool/tar.py +2 -2
  164. data/lib/libv8/scons/engine/SCons/Tool/tex.py +43 -96
  165. data/lib/libv8/scons/engine/SCons/Tool/textfile.py +2 -2
  166. data/lib/libv8/scons/engine/SCons/Tool/tlib.py +2 -2
  167. data/lib/libv8/scons/engine/SCons/Tool/wix.py +2 -2
  168. data/lib/libv8/scons/engine/SCons/Tool/yacc.py +2 -12
  169. data/lib/libv8/scons/engine/SCons/Tool/zip.py +2 -2
  170. data/lib/libv8/scons/engine/SCons/Util.py +3 -3
  171. data/lib/libv8/scons/engine/SCons/Variables/BoolVariable.py +2 -2
  172. data/lib/libv8/scons/engine/SCons/Variables/EnumVariable.py +3 -3
  173. data/lib/libv8/scons/engine/SCons/Variables/ListVariable.py +2 -2
  174. data/lib/libv8/scons/engine/SCons/Variables/PackageVariable.py +2 -2
  175. data/lib/libv8/scons/engine/SCons/Variables/PathVariable.py +2 -2
  176. data/lib/libv8/scons/engine/SCons/Variables/__init__.py +2 -2
  177. data/lib/libv8/scons/engine/SCons/Warnings.py +2 -2
  178. data/lib/libv8/scons/engine/SCons/__init__.py +6 -6
  179. data/lib/libv8/scons/engine/SCons/compat/__init__.py +2 -2
  180. data/lib/libv8/scons/engine/SCons/compat/_scons_builtins.py +2 -2
  181. data/lib/libv8/scons/engine/SCons/compat/_scons_collections.py +2 -2
  182. data/lib/libv8/scons/engine/SCons/compat/_scons_dbm.py +2 -2
  183. data/lib/libv8/scons/engine/SCons/compat/_scons_hashlib.py +2 -2
  184. data/lib/libv8/scons/engine/SCons/compat/_scons_io.py +2 -2
  185. data/lib/libv8/scons/engine/SCons/cpp.py +2 -2
  186. data/lib/libv8/scons/engine/SCons/dblite.py +1 -4
  187. data/lib/libv8/scons/engine/SCons/exitfuncs.py +2 -2
  188. data/lib/libv8/scons/scons-time.1 +3 -3
  189. data/lib/libv8/scons/scons.1 +1164 -1170
  190. data/lib/libv8/scons/sconsign.1 +3 -3
  191. data/lib/libv8/scons/script/scons +22 -22
  192. data/lib/libv8/scons/script/scons-time +2 -2
  193. data/lib/libv8/scons/script/scons.bat +4 -7
  194. data/lib/libv8/scons/script/sconsign +20 -21
  195. data/lib/libv8/scons/setup.cfg +1 -0
  196. data/lib/libv8/scons/setup.py +40 -38
  197. data/lib/libv8/v8/.gitignore +1 -1
  198. data/lib/libv8/v8/AUTHORS +2 -0
  199. data/lib/libv8/v8/ChangeLog +387 -0
  200. data/lib/libv8/v8/Makefile +171 -0
  201. data/lib/libv8/v8/SConstruct +124 -51
  202. data/lib/libv8/v8/build/README.txt +31 -14
  203. data/lib/libv8/v8/build/all.gyp +11 -4
  204. data/lib/libv8/v8/build/armu.gypi +6 -2
  205. data/lib/libv8/v8/build/common.gypi +240 -94
  206. data/lib/libv8/v8/build/gyp_v8 +32 -4
  207. data/lib/libv8/v8/build/standalone.gypi +200 -0
  208. data/lib/libv8/v8/include/v8-debug.h +0 -0
  209. data/lib/libv8/v8/include/v8-profiler.h +8 -11
  210. data/lib/libv8/v8/include/v8.h +191 -108
  211. data/lib/libv8/v8/preparser/SConscript +2 -2
  212. data/lib/libv8/v8/preparser/preparser-process.cc +3 -3
  213. data/lib/libv8/v8/preparser/preparser.gyp +42 -0
  214. data/lib/libv8/v8/src/SConscript +33 -8
  215. data/lib/libv8/v8/src/accessors.cc +77 -43
  216. data/lib/libv8/v8/src/api.cc +393 -191
  217. data/lib/libv8/v8/src/api.h +4 -8
  218. data/lib/libv8/v8/src/apinatives.js +15 -3
  219. data/lib/libv8/v8/src/arguments.h +8 -0
  220. data/lib/libv8/v8/src/arm/assembler-arm.cc +120 -120
  221. data/lib/libv8/v8/src/arm/assembler-arm.h +92 -43
  222. data/lib/libv8/v8/src/arm/builtins-arm.cc +32 -39
  223. data/lib/libv8/v8/src/arm/code-stubs-arm.cc +572 -351
  224. data/lib/libv8/v8/src/arm/code-stubs-arm.h +8 -77
  225. data/lib/libv8/v8/src/arm/codegen-arm.h +0 -2
  226. data/lib/libv8/v8/src/arm/deoptimizer-arm.cc +50 -30
  227. data/lib/libv8/v8/src/arm/disasm-arm.cc +1 -1
  228. data/lib/libv8/v8/src/arm/frames-arm.h +9 -5
  229. data/lib/libv8/v8/src/arm/full-codegen-arm.cc +331 -432
  230. data/lib/libv8/v8/src/arm/ic-arm.cc +192 -124
  231. data/lib/libv8/v8/src/arm/lithium-arm.cc +216 -232
  232. data/lib/libv8/v8/src/arm/lithium-arm.h +106 -259
  233. data/lib/libv8/v8/src/arm/lithium-codegen-arm.cc +633 -642
  234. data/lib/libv8/v8/src/arm/lithium-codegen-arm.h +4 -4
  235. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.cc +1 -3
  236. data/lib/libv8/v8/src/arm/macro-assembler-arm.cc +260 -185
  237. data/lib/libv8/v8/src/arm/macro-assembler-arm.h +45 -25
  238. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.cc +25 -13
  239. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.h +3 -0
  240. data/lib/libv8/v8/src/arm/stub-cache-arm.cc +413 -226
  241. data/lib/libv8/v8/src/array.js +38 -18
  242. data/lib/libv8/v8/src/assembler.cc +12 -5
  243. data/lib/libv8/v8/src/assembler.h +15 -9
  244. data/lib/libv8/v8/src/ast-inl.h +34 -25
  245. data/lib/libv8/v8/src/ast.cc +141 -72
  246. data/lib/libv8/v8/src/ast.h +255 -181
  247. data/lib/libv8/v8/src/bignum.cc +3 -4
  248. data/lib/libv8/v8/src/bootstrapper.cc +55 -11
  249. data/lib/libv8/v8/src/bootstrapper.h +3 -2
  250. data/lib/libv8/v8/src/builtins.cc +8 -2
  251. data/lib/libv8/v8/src/builtins.h +4 -0
  252. data/lib/libv8/v8/src/cached-powers.cc +8 -4
  253. data/lib/libv8/v8/src/checks.h +3 -3
  254. data/lib/libv8/v8/src/code-stubs.cc +173 -28
  255. data/lib/libv8/v8/src/code-stubs.h +104 -148
  256. data/lib/libv8/v8/src/codegen.cc +8 -8
  257. data/lib/libv8/v8/src/compilation-cache.cc +2 -47
  258. data/lib/libv8/v8/src/compilation-cache.h +0 -10
  259. data/lib/libv8/v8/src/compiler.cc +27 -16
  260. data/lib/libv8/v8/src/compiler.h +13 -18
  261. data/lib/libv8/v8/src/contexts.cc +107 -72
  262. data/lib/libv8/v8/src/contexts.h +70 -34
  263. data/lib/libv8/v8/src/conversions-inl.h +572 -14
  264. data/lib/libv8/v8/src/conversions.cc +9 -707
  265. data/lib/libv8/v8/src/conversions.h +23 -12
  266. data/lib/libv8/v8/src/cpu-profiler-inl.h +2 -19
  267. data/lib/libv8/v8/src/cpu-profiler.cc +4 -21
  268. data/lib/libv8/v8/src/cpu-profiler.h +8 -17
  269. data/lib/libv8/v8/src/d8-debug.cc +5 -3
  270. data/lib/libv8/v8/src/d8-debug.h +6 -7
  271. data/lib/libv8/v8/src/d8-posix.cc +1 -10
  272. data/lib/libv8/v8/src/d8.cc +721 -219
  273. data/lib/libv8/v8/src/d8.gyp +37 -12
  274. data/lib/libv8/v8/src/d8.h +141 -19
  275. data/lib/libv8/v8/src/d8.js +17 -8
  276. data/lib/libv8/v8/src/date.js +16 -5
  277. data/lib/libv8/v8/src/dateparser-inl.h +242 -39
  278. data/lib/libv8/v8/src/dateparser.cc +38 -4
  279. data/lib/libv8/v8/src/dateparser.h +170 -28
  280. data/lib/libv8/v8/src/debug-agent.cc +5 -3
  281. data/lib/libv8/v8/src/debug-agent.h +11 -7
  282. data/lib/libv8/v8/src/debug-debugger.js +65 -34
  283. data/lib/libv8/v8/src/debug.cc +30 -60
  284. data/lib/libv8/v8/src/debug.h +5 -3
  285. data/lib/libv8/v8/src/deoptimizer.cc +227 -10
  286. data/lib/libv8/v8/src/deoptimizer.h +133 -9
  287. data/lib/libv8/v8/src/disassembler.cc +22 -14
  288. data/lib/libv8/v8/src/diy-fp.cc +4 -3
  289. data/lib/libv8/v8/src/diy-fp.h +3 -3
  290. data/lib/libv8/v8/src/elements.cc +634 -0
  291. data/lib/libv8/v8/src/elements.h +95 -0
  292. data/lib/libv8/v8/src/execution.cc +5 -21
  293. data/lib/libv8/v8/src/extensions/experimental/break-iterator.cc +3 -1
  294. data/lib/libv8/v8/src/extensions/experimental/break-iterator.h +1 -1
  295. data/lib/libv8/v8/src/extensions/experimental/collator.cc +6 -2
  296. data/lib/libv8/v8/src/extensions/experimental/collator.h +1 -2
  297. data/lib/libv8/v8/src/extensions/experimental/datetime-format.cc +384 -0
  298. data/lib/libv8/v8/src/extensions/experimental/datetime-format.h +83 -0
  299. data/lib/libv8/v8/src/extensions/experimental/experimental.gyp +18 -7
  300. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.cc +12 -16
  301. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.h +1 -1
  302. data/lib/libv8/v8/src/extensions/experimental/i18n-js2c.py +126 -0
  303. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.cc +3 -4
  304. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.h +1 -1
  305. data/lib/libv8/v8/src/{shell.h → extensions/experimental/i18n-natives.h} +8 -20
  306. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.cc +45 -1
  307. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.h +21 -1
  308. data/lib/libv8/v8/src/extensions/experimental/i18n.js +211 -11
  309. data/lib/libv8/v8/src/extensions/experimental/language-matcher.cc +4 -3
  310. data/lib/libv8/v8/src/extensions/experimental/language-matcher.h +1 -1
  311. data/lib/libv8/v8/src/extensions/experimental/number-format.cc +374 -0
  312. data/lib/libv8/v8/src/extensions/experimental/number-format.h +71 -0
  313. data/lib/libv8/v8/src/factory.cc +89 -18
  314. data/lib/libv8/v8/src/factory.h +36 -8
  315. data/lib/libv8/v8/src/flag-definitions.h +11 -44
  316. data/lib/libv8/v8/src/frames-inl.h +8 -1
  317. data/lib/libv8/v8/src/frames.cc +39 -3
  318. data/lib/libv8/v8/src/frames.h +10 -3
  319. data/lib/libv8/v8/src/full-codegen.cc +311 -293
  320. data/lib/libv8/v8/src/full-codegen.h +183 -143
  321. data/lib/libv8/v8/src/func-name-inferrer.cc +29 -15
  322. data/lib/libv8/v8/src/func-name-inferrer.h +19 -9
  323. data/lib/libv8/v8/src/gdb-jit.cc +658 -55
  324. data/lib/libv8/v8/src/gdb-jit.h +6 -2
  325. data/lib/libv8/v8/src/global-handles.cc +368 -312
  326. data/lib/libv8/v8/src/global-handles.h +29 -36
  327. data/lib/libv8/v8/src/globals.h +3 -1
  328. data/lib/libv8/v8/src/handles.cc +43 -69
  329. data/lib/libv8/v8/src/handles.h +21 -16
  330. data/lib/libv8/v8/src/heap-inl.h +11 -13
  331. data/lib/libv8/v8/src/heap-profiler.cc +0 -999
  332. data/lib/libv8/v8/src/heap-profiler.h +0 -303
  333. data/lib/libv8/v8/src/heap.cc +366 -141
  334. data/lib/libv8/v8/src/heap.h +87 -26
  335. data/lib/libv8/v8/src/hydrogen-instructions.cc +192 -81
  336. data/lib/libv8/v8/src/hydrogen-instructions.h +711 -482
  337. data/lib/libv8/v8/src/hydrogen.cc +1146 -629
  338. data/lib/libv8/v8/src/hydrogen.h +100 -64
  339. data/lib/libv8/v8/src/ia32/assembler-ia32.cc +19 -0
  340. data/lib/libv8/v8/src/ia32/assembler-ia32.h +15 -2
  341. data/lib/libv8/v8/src/ia32/builtins-ia32.cc +34 -39
  342. data/lib/libv8/v8/src/ia32/code-stubs-ia32.cc +675 -377
  343. data/lib/libv8/v8/src/ia32/code-stubs-ia32.h +8 -69
  344. data/lib/libv8/v8/src/ia32/codegen-ia32.cc +1 -0
  345. data/lib/libv8/v8/src/ia32/codegen-ia32.h +0 -2
  346. data/lib/libv8/v8/src/ia32/cpu-ia32.cc +3 -2
  347. data/lib/libv8/v8/src/ia32/deoptimizer-ia32.cc +28 -3
  348. data/lib/libv8/v8/src/ia32/disasm-ia32.cc +21 -10
  349. data/lib/libv8/v8/src/ia32/frames-ia32.h +6 -5
  350. data/lib/libv8/v8/src/ia32/full-codegen-ia32.cc +459 -465
  351. data/lib/libv8/v8/src/ia32/ic-ia32.cc +196 -147
  352. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.cc +575 -650
  353. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.h +19 -21
  354. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.cc +7 -2
  355. data/lib/libv8/v8/src/ia32/lithium-ia32.cc +261 -256
  356. data/lib/libv8/v8/src/ia32/lithium-ia32.h +234 -335
  357. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.cc +224 -67
  358. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.h +63 -19
  359. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.cc +22 -8
  360. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.h +3 -0
  361. data/lib/libv8/v8/src/ia32/stub-cache-ia32.cc +380 -239
  362. data/lib/libv8/v8/src/ic.cc +198 -234
  363. data/lib/libv8/v8/src/ic.h +32 -30
  364. data/lib/libv8/v8/src/interpreter-irregexp.cc +6 -4
  365. data/lib/libv8/v8/src/isolate.cc +112 -95
  366. data/lib/libv8/v8/src/isolate.h +55 -71
  367. data/lib/libv8/v8/src/json-parser.h +486 -48
  368. data/lib/libv8/v8/src/json.js +28 -23
  369. data/lib/libv8/v8/src/jsregexp.cc +163 -208
  370. data/lib/libv8/v8/src/jsregexp.h +0 -1
  371. data/lib/libv8/v8/src/lithium-allocator-inl.h +29 -27
  372. data/lib/libv8/v8/src/lithium-allocator.cc +22 -17
  373. data/lib/libv8/v8/src/lithium-allocator.h +8 -8
  374. data/lib/libv8/v8/src/lithium.cc +16 -11
  375. data/lib/libv8/v8/src/lithium.h +31 -34
  376. data/lib/libv8/v8/src/liveedit.cc +111 -15
  377. data/lib/libv8/v8/src/liveedit.h +3 -4
  378. data/lib/libv8/v8/src/liveobjectlist.cc +116 -80
  379. data/lib/libv8/v8/src/liveobjectlist.h +2 -2
  380. data/lib/libv8/v8/src/log-inl.h +0 -4
  381. data/lib/libv8/v8/src/log-utils.cc +25 -143
  382. data/lib/libv8/v8/src/log-utils.h +13 -92
  383. data/lib/libv8/v8/src/log.cc +26 -249
  384. data/lib/libv8/v8/src/log.h +6 -17
  385. data/lib/libv8/v8/src/macros.py +9 -6
  386. data/lib/libv8/v8/src/mark-compact.cc +276 -56
  387. data/lib/libv8/v8/src/mark-compact.h +20 -0
  388. data/lib/libv8/v8/src/messages.js +93 -39
  389. data/lib/libv8/v8/src/mips/assembler-mips-inl.h +9 -3
  390. data/lib/libv8/v8/src/mips/assembler-mips.cc +297 -189
  391. data/lib/libv8/v8/src/mips/assembler-mips.h +121 -54
  392. data/lib/libv8/v8/src/mips/builtins-mips.cc +23 -24
  393. data/lib/libv8/v8/src/mips/code-stubs-mips.cc +484 -263
  394. data/lib/libv8/v8/src/mips/code-stubs-mips.h +8 -83
  395. data/lib/libv8/v8/src/mips/codegen-mips.h +0 -2
  396. data/lib/libv8/v8/src/mips/constants-mips.h +37 -11
  397. data/lib/libv8/v8/src/mips/deoptimizer-mips.cc +6 -1
  398. data/lib/libv8/v8/src/mips/frames-mips.h +8 -7
  399. data/lib/libv8/v8/src/mips/full-codegen-mips.cc +258 -419
  400. data/lib/libv8/v8/src/mips/ic-mips.cc +181 -121
  401. data/lib/libv8/v8/src/mips/macro-assembler-mips.cc +640 -382
  402. data/lib/libv8/v8/src/mips/macro-assembler-mips.h +94 -89
  403. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.cc +23 -10
  404. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.h +6 -1
  405. data/lib/libv8/v8/src/mips/simulator-mips.cc +249 -49
  406. data/lib/libv8/v8/src/mips/simulator-mips.h +25 -1
  407. data/lib/libv8/v8/src/mips/stub-cache-mips.cc +373 -161
  408. data/lib/libv8/v8/src/mirror-debugger.js +55 -8
  409. data/lib/libv8/v8/src/misc-intrinsics.h +89 -0
  410. data/lib/libv8/v8/src/mksnapshot.cc +36 -4
  411. data/lib/libv8/v8/src/natives.h +5 -2
  412. data/lib/libv8/v8/src/objects-debug.cc +73 -6
  413. data/lib/libv8/v8/src/objects-inl.h +529 -164
  414. data/lib/libv8/v8/src/objects-printer.cc +67 -12
  415. data/lib/libv8/v8/src/objects-visiting.cc +13 -2
  416. data/lib/libv8/v8/src/objects-visiting.h +41 -1
  417. data/lib/libv8/v8/src/objects.cc +2200 -1177
  418. data/lib/libv8/v8/src/objects.h +912 -283
  419. data/lib/libv8/v8/src/parser.cc +566 -371
  420. data/lib/libv8/v8/src/parser.h +35 -33
  421. data/lib/libv8/v8/src/platform-cygwin.cc +10 -25
  422. data/lib/libv8/v8/src/platform-freebsd.cc +4 -29
  423. data/lib/libv8/v8/src/platform-linux.cc +60 -57
  424. data/lib/libv8/v8/src/platform-macos.cc +4 -27
  425. data/lib/libv8/v8/src/platform-nullos.cc +3 -16
  426. data/lib/libv8/v8/src/platform-openbsd.cc +247 -85
  427. data/lib/libv8/v8/src/platform-posix.cc +43 -1
  428. data/lib/libv8/v8/src/platform-solaris.cc +151 -112
  429. data/lib/libv8/v8/src/platform-tls.h +1 -1
  430. data/lib/libv8/v8/src/platform-win32.cc +65 -39
  431. data/lib/libv8/v8/src/platform.h +17 -14
  432. data/lib/libv8/v8/src/preparse-data-format.h +2 -2
  433. data/lib/libv8/v8/src/preparse-data.h +8 -2
  434. data/lib/libv8/v8/src/preparser-api.cc +2 -18
  435. data/lib/libv8/v8/src/preparser.cc +106 -65
  436. data/lib/libv8/v8/src/preparser.h +26 -5
  437. data/lib/libv8/v8/src/prettyprinter.cc +25 -43
  438. data/lib/libv8/v8/src/profile-generator-inl.h +0 -4
  439. data/lib/libv8/v8/src/profile-generator.cc +213 -34
  440. data/lib/libv8/v8/src/profile-generator.h +9 -9
  441. data/lib/libv8/v8/src/property.h +1 -0
  442. data/lib/libv8/v8/src/proxy.js +74 -4
  443. data/lib/libv8/v8/src/regexp-macro-assembler.cc +10 -6
  444. data/lib/libv8/v8/src/regexp.js +16 -11
  445. data/lib/libv8/v8/src/rewriter.cc +24 -133
  446. data/lib/libv8/v8/src/runtime-profiler.cc +27 -151
  447. data/lib/libv8/v8/src/runtime-profiler.h +5 -31
  448. data/lib/libv8/v8/src/runtime.cc +1450 -681
  449. data/lib/libv8/v8/src/runtime.h +47 -31
  450. data/lib/libv8/v8/src/runtime.js +2 -1
  451. data/lib/libv8/v8/src/scanner-base.cc +358 -220
  452. data/lib/libv8/v8/src/scanner-base.h +30 -138
  453. data/lib/libv8/v8/src/scanner.cc +0 -18
  454. data/lib/libv8/v8/src/scanner.h +0 -15
  455. data/lib/libv8/v8/src/scopeinfo.cc +3 -1
  456. data/lib/libv8/v8/src/scopeinfo.h +1 -6
  457. data/lib/libv8/v8/src/scopes.cc +243 -253
  458. data/lib/libv8/v8/src/scopes.h +58 -109
  459. data/lib/libv8/v8/src/serialize.cc +12 -54
  460. data/lib/libv8/v8/src/serialize.h +47 -0
  461. data/lib/libv8/v8/src/small-pointer-list.h +25 -0
  462. data/lib/libv8/v8/src/spaces-inl.h +4 -50
  463. data/lib/libv8/v8/src/spaces.cc +64 -131
  464. data/lib/libv8/v8/src/spaces.h +19 -70
  465. data/lib/libv8/v8/src/string-stream.cc +3 -1
  466. data/lib/libv8/v8/src/string.js +10 -6
  467. data/lib/libv8/v8/src/strtod.cc +7 -3
  468. data/lib/libv8/v8/src/stub-cache.cc +59 -129
  469. data/lib/libv8/v8/src/stub-cache.h +42 -54
  470. data/lib/libv8/v8/src/third_party/valgrind/valgrind.h +1447 -1339
  471. data/lib/libv8/v8/src/token.cc +4 -4
  472. data/lib/libv8/v8/src/token.h +6 -5
  473. data/lib/libv8/v8/src/type-info.cc +173 -129
  474. data/lib/libv8/v8/src/type-info.h +40 -22
  475. data/lib/libv8/v8/src/utils.cc +25 -304
  476. data/lib/libv8/v8/src/utils.h +118 -3
  477. data/lib/libv8/v8/src/v8-counters.h +3 -6
  478. data/lib/libv8/v8/src/v8.cc +34 -27
  479. data/lib/libv8/v8/src/v8.h +7 -7
  480. data/lib/libv8/v8/src/v8conversions.cc +129 -0
  481. data/lib/libv8/v8/src/v8conversions.h +60 -0
  482. data/lib/libv8/v8/src/v8globals.h +15 -6
  483. data/lib/libv8/v8/src/v8natives.js +300 -78
  484. data/lib/libv8/v8/src/v8threads.cc +14 -6
  485. data/lib/libv8/v8/src/v8threads.h +4 -1
  486. data/lib/libv8/v8/src/v8utils.cc +360 -0
  487. data/lib/libv8/v8/src/v8utils.h +17 -66
  488. data/lib/libv8/v8/src/variables.cc +7 -12
  489. data/lib/libv8/v8/src/variables.h +12 -10
  490. data/lib/libv8/v8/src/version.cc +2 -2
  491. data/lib/libv8/v8/src/vm-state-inl.h +0 -41
  492. data/lib/libv8/v8/src/vm-state.h +0 -11
  493. data/lib/libv8/v8/src/weakmap.js +103 -0
  494. data/lib/libv8/v8/src/x64/assembler-x64.h +6 -3
  495. data/lib/libv8/v8/src/x64/builtins-x64.cc +25 -22
  496. data/lib/libv8/v8/src/x64/code-stubs-x64.cc +523 -250
  497. data/lib/libv8/v8/src/x64/code-stubs-x64.h +8 -71
  498. data/lib/libv8/v8/src/x64/codegen-x64.cc +1 -0
  499. data/lib/libv8/v8/src/x64/codegen-x64.h +0 -2
  500. data/lib/libv8/v8/src/x64/cpu-x64.cc +2 -1
  501. data/lib/libv8/v8/src/x64/deoptimizer-x64.cc +40 -8
  502. data/lib/libv8/v8/src/x64/disasm-x64.cc +12 -10
  503. data/lib/libv8/v8/src/x64/frames-x64.h +7 -6
  504. data/lib/libv8/v8/src/x64/full-codegen-x64.cc +310 -415
  505. data/lib/libv8/v8/src/x64/ic-x64.cc +180 -117
  506. data/lib/libv8/v8/src/x64/lithium-codegen-x64.cc +411 -523
  507. data/lib/libv8/v8/src/x64/lithium-codegen-x64.h +11 -6
  508. data/lib/libv8/v8/src/x64/lithium-x64.cc +191 -216
  509. data/lib/libv8/v8/src/x64/lithium-x64.h +112 -263
  510. data/lib/libv8/v8/src/x64/macro-assembler-x64.cc +177 -61
  511. data/lib/libv8/v8/src/x64/macro-assembler-x64.h +23 -7
  512. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.cc +21 -9
  513. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.h +6 -0
  514. data/lib/libv8/v8/src/x64/stub-cache-x64.cc +273 -107
  515. data/lib/libv8/v8/src/zone.cc +31 -22
  516. data/lib/libv8/v8/src/zone.h +12 -6
  517. data/lib/libv8/v8/tools/codemap.js +8 -0
  518. data/lib/libv8/v8/tools/gcmole/Makefile +43 -0
  519. data/lib/libv8/v8/tools/gcmole/gcmole.lua +0 -2
  520. data/lib/libv8/v8/tools/gdb-v8-support.py +154 -0
  521. data/lib/libv8/v8/tools/grokdump.py +44 -35
  522. data/lib/libv8/v8/tools/gyp/v8.gyp +94 -248
  523. data/lib/libv8/v8/tools/js2c.py +83 -52
  524. data/lib/libv8/v8/tools/linux-tick-processor +4 -6
  525. data/lib/libv8/v8/tools/ll_prof.py +3 -3
  526. data/lib/libv8/v8/tools/oom_dump/README +3 -1
  527. data/lib/libv8/v8/tools/presubmit.py +11 -4
  528. data/lib/libv8/v8/tools/profile.js +46 -2
  529. data/lib/libv8/v8/tools/splaytree.js +11 -0
  530. data/lib/libv8/v8/tools/stats-viewer.py +15 -11
  531. data/lib/libv8/v8/tools/test-wrapper-gypbuild.py +227 -0
  532. data/lib/libv8/v8/tools/test.py +28 -8
  533. data/lib/libv8/v8/tools/tickprocessor.js +0 -16
  534. data/lib/libv8/version.rb +1 -1
  535. data/libv8.gemspec +2 -2
  536. metadata +31 -19
  537. data/lib/libv8/scons/engine/SCons/Tool/f03.py +0 -63
  538. data/lib/libv8/v8/src/json-parser.cc +0 -504
@@ -80,10 +80,10 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
80
80
 
81
81
  // Check that the receiver is a valid JS object.
82
82
  __ GetObjectType(receiver, scratch0, scratch1);
83
- __ Branch(miss, lt, scratch1, Operand(FIRST_JS_OBJECT_TYPE));
83
+ __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE));
84
84
 
85
85
  // If this assert fails, we have to check upper bound too.
86
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
86
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
87
87
 
88
88
  GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
89
89
 
@@ -214,115 +214,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
214
214
  }
215
215
 
216
216
 
217
- static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
218
- Label* miss,
219
- Register elements,
220
- Register key,
221
- Register result,
222
- Register reg0,
223
- Register reg1,
224
- Register reg2) {
225
- // Register use:
226
- //
227
- // elements - holds the slow-case elements of the receiver on entry.
228
- // Unchanged unless 'result' is the same register.
229
- //
230
- // key - holds the smi key on entry.
231
- // Unchanged unless 'result' is the same register.
232
- //
233
- //
234
- // result - holds the result on exit if the load succeeded.
235
- // Allowed to be the same as 'key' or 'result'.
236
- // Unchanged on bailout so 'key' or 'result' can be used
237
- // in further computation.
238
- //
239
- // Scratch registers:
240
- //
241
- // reg0 - holds the untagged key on entry and holds the hash once computed.
242
- //
243
- // reg1 - Used to hold the capacity mask of the dictionary.
244
- //
245
- // reg2 - Used for the index into the dictionary.
246
- // at - Temporary (avoid MacroAssembler instructions also using 'at').
247
- Label done;
248
-
249
- // Compute the hash code from the untagged key. This must be kept in sync
250
- // with ComputeIntegerHash in utils.h.
251
- //
252
- // hash = ~hash + (hash << 15);
253
- __ nor(reg1, reg0, zero_reg);
254
- __ sll(at, reg0, 15);
255
- __ addu(reg0, reg1, at);
256
-
257
- // hash = hash ^ (hash >> 12);
258
- __ srl(at, reg0, 12);
259
- __ xor_(reg0, reg0, at);
260
-
261
- // hash = hash + (hash << 2);
262
- __ sll(at, reg0, 2);
263
- __ addu(reg0, reg0, at);
264
-
265
- // hash = hash ^ (hash >> 4);
266
- __ srl(at, reg0, 4);
267
- __ xor_(reg0, reg0, at);
268
-
269
- // hash = hash * 2057;
270
- __ li(reg1, Operand(2057));
271
- __ mul(reg0, reg0, reg1);
272
-
273
- // hash = hash ^ (hash >> 16);
274
- __ srl(at, reg0, 16);
275
- __ xor_(reg0, reg0, at);
276
-
277
- // Compute the capacity mask.
278
- __ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
279
- __ sra(reg1, reg1, kSmiTagSize);
280
- __ Subu(reg1, reg1, Operand(1));
281
-
282
- // Generate an unrolled loop that performs a few probes before giving up.
283
- static const int kProbes = 4;
284
- for (int i = 0; i < kProbes; i++) {
285
- // Use reg2 for index calculations and keep the hash intact in reg0.
286
- __ mov(reg2, reg0);
287
- // Compute the masked index: (hash + i + i * i) & mask.
288
- if (i > 0) {
289
- __ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
290
- }
291
- __ and_(reg2, reg2, reg1);
292
-
293
- // Scale the index by multiplying by the element size.
294
- ASSERT(NumberDictionary::kEntrySize == 3);
295
- __ sll(at, reg2, 1); // 2x.
296
- __ addu(reg2, reg2, at); // reg2 = reg2 * 3.
297
-
298
- // Check if the key is identical to the name.
299
- __ sll(at, reg2, kPointerSizeLog2);
300
- __ addu(reg2, elements, at);
301
-
302
- __ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
303
- if (i != kProbes - 1) {
304
- __ Branch(&done, eq, key, Operand(at));
305
- } else {
306
- __ Branch(miss, ne, key, Operand(at));
307
- }
308
- }
309
-
310
- __ bind(&done);
311
- // Check that the value is a normal property.
312
- // reg2: elements + (index * kPointerSize).
313
- const int kDetailsOffset =
314
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
315
- __ lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
316
- __ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
317
- __ Branch(miss, ne, at, Operand(zero_reg));
318
-
319
- // Get the value at the masked, scaled index and return.
320
- const int kValueOffset =
321
- NumberDictionary::kElementsStartOffset + kPointerSize;
322
- __ lw(result, FieldMemOperand(reg2, kValueOffset));
323
- }
324
-
325
-
326
217
  void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
327
218
  // ----------- S t a t e -------------
328
219
  // -- a2 : name
@@ -567,7 +458,8 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
567
458
 
568
459
  // Invoke the function.
569
460
  ParameterCount actual(argc);
570
- __ InvokeFunction(a1, actual, JUMP_FUNCTION);
461
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION,
462
+ NullCallWrapper(), CALL_AS_METHOD);
571
463
  }
572
464
 
573
465
 
@@ -750,7 +642,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
750
642
  __ Branch(&slow_load, ne, a3, Operand(at));
751
643
  __ sra(a0, a2, kSmiTagSize);
752
644
  // a0: untagged index
753
- GenerateNumberDictionaryLoad(masm, &slow_load, t0, a2, a1, a0, a3, t1);
645
+ __ LoadFromNumberDictionary(&slow_load, t0, a2, a1, a0, a3, t1);
754
646
  __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
755
647
  __ jmp(&do_call);
756
648
 
@@ -892,6 +784,175 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
892
784
  }
893
785
 
894
786
 
787
+ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
788
+ Register object,
789
+ Register key,
790
+ Register scratch1,
791
+ Register scratch2,
792
+ Register scratch3,
793
+ Label* unmapped_case,
794
+ Label* slow_case) {
795
+ Heap* heap = masm->isolate()->heap();
796
+
797
+ // Check that the receiver is a JSObject. Because of the map check
798
+ // later, we do not need to check for interceptors or whether it
799
+ // requires access checks.
800
+ __ JumpIfSmi(object, slow_case);
801
+ // Check that the object is some kind of JSObject.
802
+ __ GetObjectType(object, scratch1, scratch2);
803
+ __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
804
+
805
+ // Check that the key is a positive smi.
806
+ __ And(scratch1, key, Operand(0x8000001));
807
+ __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
808
+
809
+ // Load the elements into scratch1 and check its map.
810
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
811
+ __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
812
+ __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
813
+
814
+ // Check if element is in the range of mapped arguments. If not, jump
815
+ // to the unmapped lookup with the parameter map in scratch1.
816
+ __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
817
+ __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
818
+ __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
819
+
820
+ // Load element index and check whether it is the hole.
821
+ const int kOffset =
822
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
823
+
824
+ __ li(scratch3, Operand(kPointerSize >> 1));
825
+ __ mul(scratch3, key, scratch3);
826
+ __ Addu(scratch3, scratch3, Operand(kOffset));
827
+
828
+ __ Addu(scratch2, scratch1, scratch3);
829
+ __ lw(scratch2, MemOperand(scratch2));
830
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
831
+ __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
832
+
833
+ // Load value from context and return it. We can reuse scratch1 because
834
+ // we do not jump to the unmapped lookup (which requires the parameter
835
+ // map in scratch1).
836
+ __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
837
+ __ li(scratch3, Operand(kPointerSize >> 1));
838
+ __ mul(scratch3, scratch2, scratch3);
839
+ __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
840
+ __ Addu(scratch2, scratch1, scratch3);
841
+ return MemOperand(scratch2);
842
+ }
843
+
844
+
845
+ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
846
+ Register key,
847
+ Register parameter_map,
848
+ Register scratch,
849
+ Label* slow_case) {
850
+ // Element is in arguments backing store, which is referenced by the
851
+ // second element of the parameter_map. The parameter_map register
852
+ // must be loaded with the parameter map of the arguments object and is
853
+ // overwritten.
854
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
855
+ Register backing_store = parameter_map;
856
+ __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
857
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
858
+ __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
859
+ DONT_DO_SMI_CHECK);
860
+ __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
861
+ __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
862
+ __ li(scratch, Operand(kPointerSize >> 1));
863
+ __ mul(scratch, key, scratch);
864
+ __ Addu(scratch,
865
+ scratch,
866
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
867
+ __ Addu(scratch, backing_store, scratch);
868
+ return MemOperand(scratch);
869
+ }
870
+
871
+
872
+ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
873
+ // ---------- S t a t e --------------
874
+ // -- lr : return address
875
+ // -- a0 : key
876
+ // -- a1 : receiver
877
+ // -----------------------------------
878
+ Label slow, notin;
879
+ MemOperand mapped_location =
880
+ GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
881
+ __ lw(v0, mapped_location);
882
+ __ Ret();
883
+ __ bind(&notin);
884
+ // The unmapped lookup expects that the parameter map is in a2.
885
+ MemOperand unmapped_location =
886
+ GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
887
+ __ lw(a2, unmapped_location);
888
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
889
+ __ Branch(&slow, eq, a2, Operand(a3));
890
+ __ mov(v0, a2);
891
+ __ Ret();
892
+ __ bind(&slow);
893
+ GenerateMiss(masm, false);
894
+ }
895
+
896
+
897
+ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
898
+ // ---------- S t a t e --------------
899
+ // -- a0 : value
900
+ // -- a1 : key
901
+ // -- a2 : receiver
902
+ // -- lr : return address
903
+ // -----------------------------------
904
+ Label slow, notin;
905
+ MemOperand mapped_location =
906
+ GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
907
+ __ sw(a0, mapped_location);
908
+ // Verify mapped_location MemOperand is register, with no offset.
909
+ ASSERT_EQ(mapped_location.offset(), 0);
910
+ __ RecordWrite(a3, mapped_location.rm(), t5);
911
+ __ Ret(USE_DELAY_SLOT);
912
+ __ mov(v0, a0); // (In delay slot) return the value stored in v0.
913
+ __ bind(&notin);
914
+ // The unmapped lookup expects that the parameter map is in a3.
915
+ MemOperand unmapped_location =
916
+ GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
917
+ __ sw(a0, unmapped_location);
918
+ ASSERT_EQ(unmapped_location.offset(), 0);
919
+ __ RecordWrite(a3, unmapped_location.rm(), t5);
920
+ __ Ret(USE_DELAY_SLOT);
921
+ __ mov(v0, a0); // (In delay slot) return the value stored in v0.
922
+ __ bind(&slow);
923
+ GenerateMiss(masm, false);
924
+ }
925
+
926
+
927
+ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
928
+ int argc) {
929
+ // ----------- S t a t e -------------
930
+ // -- a2 : name
931
+ // -- lr : return address
932
+ // -----------------------------------
933
+ Label slow, notin;
934
+ // Load receiver.
935
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
936
+ MemOperand mapped_location =
937
+ GenerateMappedArgumentsLookup(masm, a1, a2, a3, t0, t1, &notin, &slow);
938
+ __ lw(a1, mapped_location);
939
+ GenerateFunctionTailCall(masm, argc, &slow, a3);
940
+ __ bind(&notin);
941
+ // The unmapped lookup expects that the parameter map is in a3.
942
+ MemOperand unmapped_location =
943
+ GenerateUnmappedArgumentsLookup(masm, a2, a3, t0, &slow);
944
+ __ lw(a1, unmapped_location);
945
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
946
+ __ Branch(&slow, eq, a1, Operand(a3));
947
+ GenerateFunctionTailCall(masm, argc, &slow, a3);
948
+ __ bind(&slow);
949
+ GenerateMiss(masm, argc);
950
+ }
951
+
952
+
953
+ Object* KeyedLoadIC_Miss(Arguments args);
954
+
955
+
895
956
  void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
896
957
  // ---------- S t a t e --------------
897
958
  // -- ra : return address
@@ -949,11 +1010,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
949
1010
  GenerateKeyedLoadReceiverCheck(
950
1011
  masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
951
1012
 
952
- // Check the "has fast elements" bit in the receiver's map which is
953
- // now in a2.
954
- __ lbu(a3, FieldMemOperand(a2, Map::kBitField2Offset));
955
- __ And(at, a3, Operand(1 << Map::kHasFastElements));
956
- __ Branch(&check_number_dictionary, eq, at, Operand(zero_reg));
1013
+ // Check the receiver's map to see if it has fast elements.
1014
+ __ CheckFastElements(a2, a3, &check_number_dictionary);
957
1015
 
958
1016
  GenerateFastArrayLoad(
959
1017
  masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
@@ -972,7 +1030,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
972
1030
  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
973
1031
  __ Branch(&slow, ne, a3, Operand(at));
974
1032
  __ sra(a2, a0, kSmiTagSize);
975
- GenerateNumberDictionaryLoad(masm, &slow, t0, a0, v0, a2, a3, t1);
1033
+ __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
976
1034
  __ Ret();
977
1035
 
978
1036
  // Slow case, key and receiver still in a0 and a1.
@@ -1173,8 +1231,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
1173
1231
  __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
1174
1232
 
1175
1233
  __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
1176
- // Check that the object is some kind of JS object.
1177
- __ Branch(&slow, lt, t3, Operand(FIRST_JS_OBJECT_TYPE));
1234
+ // Check that the object is some kind of JSObject.
1235
+ __ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE));
1236
+ __ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE));
1237
+ __ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE));
1178
1238
 
1179
1239
  // Object case: Check key against length in the elements array.
1180
1240
  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -50,87 +50,6 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
50
50
  }
51
51
 
52
52
 
53
- // Arguments macros.
54
- #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
55
- #define COND_ARGS cond, r1, r2
56
-
57
- #define REGISTER_TARGET_BODY(Name) \
58
- void MacroAssembler::Name(Register target, \
59
- BranchDelaySlot bd) { \
60
- Name(Operand(target), bd); \
61
- } \
62
- void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
63
- BranchDelaySlot bd) { \
64
- Name(Operand(target), COND_ARGS, bd); \
65
- }
66
-
67
-
68
- #define INT_PTR_TARGET_BODY(Name) \
69
- void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
70
- BranchDelaySlot bd) { \
71
- Name(Operand(target, rmode), bd); \
72
- } \
73
- void MacroAssembler::Name(intptr_t target, \
74
- RelocInfo::Mode rmode, \
75
- COND_TYPED_ARGS, \
76
- BranchDelaySlot bd) { \
77
- Name(Operand(target, rmode), COND_ARGS, bd); \
78
- }
79
-
80
-
81
- #define BYTE_PTR_TARGET_BODY(Name) \
82
- void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
83
- BranchDelaySlot bd) { \
84
- Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
85
- } \
86
- void MacroAssembler::Name(byte* target, \
87
- RelocInfo::Mode rmode, \
88
- COND_TYPED_ARGS, \
89
- BranchDelaySlot bd) { \
90
- Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
91
- }
92
-
93
-
94
- #define CODE_TARGET_BODY(Name) \
95
- void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
96
- BranchDelaySlot bd) { \
97
- Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
98
- } \
99
- void MacroAssembler::Name(Handle<Code> target, \
100
- RelocInfo::Mode rmode, \
101
- COND_TYPED_ARGS, \
102
- BranchDelaySlot bd) { \
103
- Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
104
- }
105
-
106
-
107
- REGISTER_TARGET_BODY(Jump)
108
- REGISTER_TARGET_BODY(Call)
109
- INT_PTR_TARGET_BODY(Jump)
110
- INT_PTR_TARGET_BODY(Call)
111
- BYTE_PTR_TARGET_BODY(Jump)
112
- BYTE_PTR_TARGET_BODY(Call)
113
- CODE_TARGET_BODY(Jump)
114
- CODE_TARGET_BODY(Call)
115
-
116
- #undef COND_TYPED_ARGS
117
- #undef COND_ARGS
118
- #undef REGISTER_TARGET_BODY
119
- #undef BYTE_PTR_TARGET_BODY
120
- #undef CODE_TARGET_BODY
121
-
122
-
123
- void MacroAssembler::Ret(BranchDelaySlot bd) {
124
- Jump(Operand(ra), bd);
125
- }
126
-
127
-
128
- void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
129
- BranchDelaySlot bd) {
130
- Jump(Operand(ra), cond, r1, r2, bd);
131
- }
132
-
133
-
134
53
  void MacroAssembler::LoadRoot(Register destination,
135
54
  Heap::RootListIndex index) {
136
55
  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
@@ -193,6 +112,7 @@ void MacroAssembler::RecordWriteHelper(Register object,
193
112
  sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
194
113
  }
195
114
 
115
+
196
116
  // Push and pop all registers that can hold pointers.
197
117
  void MacroAssembler::PushSafepointRegisters() {
198
118
  // Safepoints expect a block of kNumSafepointRegisters values on the
@@ -203,12 +123,14 @@ void MacroAssembler::PushSafepointRegisters() {
203
123
  MultiPush(kSafepointSavedRegisters);
204
124
  }
205
125
 
126
+
206
127
  void MacroAssembler::PopSafepointRegisters() {
207
128
  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
208
129
  MultiPop(kSafepointSavedRegisters);
209
130
  Addu(sp, sp, Operand(num_unsaved * kPointerSize));
210
131
  }
211
132
 
133
+
212
134
  void MacroAssembler::PushSafepointRegistersAndDoubles() {
213
135
  PushSafepointRegisters();
214
136
  Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
@@ -218,6 +140,7 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
218
140
  }
219
141
  }
220
142
 
143
+
221
144
  void MacroAssembler::PopSafepointRegistersAndDoubles() {
222
145
  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
223
146
  FPURegister reg = FPURegister::FromAllocationIndex(i);
@@ -227,6 +150,7 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() {
227
150
  PopSafepointRegisters();
228
151
  }
229
152
 
153
+
230
154
  void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
231
155
  Register dst) {
232
156
  sw(src, SafepointRegistersAndDoublesSlot(dst));
@@ -419,6 +343,114 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
419
343
  }
420
344
 
421
345
 
346
+ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
347
+ Register elements,
348
+ Register key,
349
+ Register result,
350
+ Register reg0,
351
+ Register reg1,
352
+ Register reg2) {
353
+ // Register use:
354
+ //
355
+ // elements - holds the slow-case elements of the receiver on entry.
356
+ // Unchanged unless 'result' is the same register.
357
+ //
358
+ // key - holds the smi key on entry.
359
+ // Unchanged unless 'result' is the same register.
360
+ //
361
+ //
362
+ // result - holds the result on exit if the load succeeded.
363
+ // Allowed to be the same as 'key' or 'result'.
364
+ // Unchanged on bailout so 'key' or 'result' can be used
365
+ // in further computation.
366
+ //
367
+ // Scratch registers:
368
+ //
369
+ // reg0 - holds the untagged key on entry and holds the hash once computed.
370
+ //
371
+ // reg1 - Used to hold the capacity mask of the dictionary.
372
+ //
373
+ // reg2 - Used for the index into the dictionary.
374
+ // at - Temporary (avoid MacroAssembler instructions also using 'at').
375
+ Label done;
376
+
377
+ // Compute the hash code from the untagged key. This must be kept in sync
378
+ // with ComputeIntegerHash in utils.h.
379
+ //
380
+ // hash = ~hash + (hash << 15);
381
+ nor(reg1, reg0, zero_reg);
382
+ sll(at, reg0, 15);
383
+ addu(reg0, reg1, at);
384
+
385
+ // hash = hash ^ (hash >> 12);
386
+ srl(at, reg0, 12);
387
+ xor_(reg0, reg0, at);
388
+
389
+ // hash = hash + (hash << 2);
390
+ sll(at, reg0, 2);
391
+ addu(reg0, reg0, at);
392
+
393
+ // hash = hash ^ (hash >> 4);
394
+ srl(at, reg0, 4);
395
+ xor_(reg0, reg0, at);
396
+
397
+ // hash = hash * 2057;
398
+ li(reg1, Operand(2057));
399
+ mul(reg0, reg0, reg1);
400
+
401
+ // hash = hash ^ (hash >> 16);
402
+ srl(at, reg0, 16);
403
+ xor_(reg0, reg0, at);
404
+
405
+ // Compute the capacity mask.
406
+ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
407
+ sra(reg1, reg1, kSmiTagSize);
408
+ Subu(reg1, reg1, Operand(1));
409
+
410
+ // Generate an unrolled loop that performs a few probes before giving up.
411
+ static const int kProbes = 4;
412
+ for (int i = 0; i < kProbes; i++) {
413
+ // Use reg2 for index calculations and keep the hash intact in reg0.
414
+ mov(reg2, reg0);
415
+ // Compute the masked index: (hash + i + i * i) & mask.
416
+ if (i > 0) {
417
+ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
418
+ }
419
+ and_(reg2, reg2, reg1);
420
+
421
+ // Scale the index by multiplying by the element size.
422
+ ASSERT(NumberDictionary::kEntrySize == 3);
423
+ sll(at, reg2, 1); // 2x.
424
+ addu(reg2, reg2, at); // reg2 = reg2 * 3.
425
+
426
+ // Check if the key is identical to the name.
427
+ sll(at, reg2, kPointerSizeLog2);
428
+ addu(reg2, elements, at);
429
+
430
+ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
431
+ if (i != kProbes - 1) {
432
+ Branch(&done, eq, key, Operand(at));
433
+ } else {
434
+ Branch(miss, ne, key, Operand(at));
435
+ }
436
+ }
437
+
438
+ bind(&done);
439
+ // Check that the value is a normal property.
440
+ // reg2: elements + (index * kPointerSize).
441
+ const int kDetailsOffset =
442
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
443
+ lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
444
+ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
445
+ Branch(miss, ne, at, Operand(zero_reg));
446
+
447
+ // Get the value at the masked, scaled index and return.
448
+ const int kValueOffset =
449
+ NumberDictionary::kElementsStartOffset + kPointerSize;
450
+ lw(result, FieldMemOperand(reg2, kValueOffset));
451
+ }
452
+
453
+
422
454
  // ---------------------------------------------------------------------------
423
455
  // Instruction macros.
424
456
 
@@ -670,14 +702,6 @@ void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
670
702
  }
671
703
 
672
704
 
673
- // Exception-generating instructions and debugging support.
674
- void MacroAssembler::stop(const char* msg) {
675
- // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
676
- // We use the 0x54321 value to be able to find it easily when reading memory.
677
- break_(0x54321);
678
- }
679
-
680
-
681
705
  void MacroAssembler::MultiPush(RegList regs) {
682
706
  int16_t NumSaved = 0;
683
707
  int16_t NumToPush = NumberOfBitsSet(regs);
@@ -733,15 +757,20 @@ void MacroAssembler::Ext(Register rt,
733
757
  uint16_t pos,
734
758
  uint16_t size) {
735
759
  ASSERT(pos < 32);
736
- ASSERT(pos + size < 32);
760
+ ASSERT(pos + size < 33);
737
761
 
738
762
  if (mips32r2) {
739
763
  ext_(rt, rs, pos, size);
740
764
  } else {
741
765
  // Move rs to rt and shift it left then right to get the
742
766
  // desired bitfield on the right side and zeroes on the left.
743
- sll(rt, rs, 32 - (pos + size));
744
- srl(rt, rt, 32 - size);
767
+ int shift_left = 32 - (pos + size);
768
+ sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
769
+
770
+ int shift_right = 32 - size;
771
+ if (shift_right > 0) {
772
+ srl(rt, rt, shift_right);
773
+ }
745
774
  }
746
775
  }
747
776
 
@@ -783,28 +812,32 @@ void MacroAssembler::Ins(Register rt,
783
812
  }
784
813
 
785
814
 
786
- void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
787
- // Move the data from fs to t4.
788
- mfc1(t4, fs);
789
- return Cvt_d_uw(fd, t4);
815
+ void MacroAssembler::Cvt_d_uw(FPURegister fd,
816
+ FPURegister fs,
817
+ FPURegister scratch) {
818
+ // Move the data from fs to t8.
819
+ mfc1(t8, fs);
820
+ Cvt_d_uw(fd, t8, scratch);
790
821
  }
791
822
 
792
823
 
793
- void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
824
+ void MacroAssembler::Cvt_d_uw(FPURegister fd,
825
+ Register rs,
826
+ FPURegister scratch) {
794
827
  // Convert rs to a FP value in fd (and fd + 1).
795
828
  // We do this by converting rs minus the MSB to avoid sign conversion,
796
- // then adding 2^31-1 and 1 to the result.
829
+ // then adding 2^31 to the result (if needed).
797
830
 
798
- ASSERT(!fd.is(f20));
831
+ ASSERT(!fd.is(scratch));
799
832
  ASSERT(!rs.is(t9));
800
- ASSERT(!rs.is(t8));
833
+ ASSERT(!rs.is(at));
801
834
 
802
- // Save rs's MSB to t8.
803
- And(t8, rs, 0x80000000);
835
+ // Save rs's MSB to t9.
836
+ Ext(t9, rs, 31, 1);
804
837
  // Remove rs's MSB.
805
- And(t9, rs, 0x7FFFFFFF);
806
- // Move t9 to fd.
807
- mtc1(t9, fd);
838
+ Ext(at, rs, 0, 31);
839
+ // Move the result to fd.
840
+ mtc1(at, fd);
808
841
 
809
842
  // Convert fd to a real FP value.
810
843
  cvt_d_w(fd, fd);
@@ -813,41 +846,39 @@ void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
813
846
 
814
847
  // If rs's MSB was 0, it's done.
815
848
  // Otherwise we need to add that to the FP register.
816
- Branch(&conversion_done, eq, t8, Operand(zero_reg));
817
-
818
- // First load 2^31 - 1 into f20.
819
- Or(t9, zero_reg, 0x7FFFFFFF);
820
- mtc1(t9, f20);
849
+ Branch(&conversion_done, eq, t9, Operand(zero_reg));
821
850
 
822
- // Convert it to FP and add it to fd.
823
- cvt_d_w(f20, f20);
824
- add_d(fd, fd, f20);
825
- // Now add 1.
826
- Or(t9, zero_reg, 1);
827
- mtc1(t9, f20);
851
+ // Load 2^31 into f20 as its float representation.
852
+ li(at, 0x41E00000);
853
+ mtc1(at, FPURegister::from_code(scratch.code() + 1));
854
+ mtc1(zero_reg, scratch);
855
+ // Add it to fd.
856
+ add_d(fd, fd, scratch);
828
857
 
829
- cvt_d_w(f20, f20);
830
- add_d(fd, fd, f20);
831
858
  bind(&conversion_done);
832
859
  }
833
860
 
834
861
 
835
- void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
836
- Trunc_uw_d(fs, t4);
837
- mtc1(t4, fd);
862
+ void MacroAssembler::Trunc_uw_d(FPURegister fd,
863
+ FPURegister fs,
864
+ FPURegister scratch) {
865
+ Trunc_uw_d(fs, t8, scratch);
866
+ mtc1(t8, fd);
838
867
  }
839
868
 
840
869
 
841
- void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
842
- ASSERT(!fd.is(f22));
843
- ASSERT(!rs.is(t6));
870
+ void MacroAssembler::Trunc_uw_d(FPURegister fd,
871
+ Register rs,
872
+ FPURegister scratch) {
873
+ ASSERT(!fd.is(scratch));
874
+ ASSERT(!rs.is(at));
844
875
 
845
- // Load 2^31 into f22.
846
- Or(t6, zero_reg, 0x80000000);
847
- Cvt_d_uw(f22, t6);
848
-
849
- // Test if f22 > fd.
850
- c(OLT, D, fd, f22);
876
+ // Load 2^31 into scratch as its float representation.
877
+ li(at, 0x41E00000);
878
+ mtc1(at, FPURegister::from_code(scratch.code() + 1));
879
+ mtc1(zero_reg, scratch);
880
+ // Test if scratch > fd.
881
+ c(OLT, D, fd, scratch);
851
882
 
852
883
  Label simple_convert;
853
884
  // If fd < 2^31 we can convert it normally.
@@ -855,18 +886,17 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
855
886
 
856
887
  // First we subtract 2^31 from fd, then trunc it to rs
857
888
  // and add 2^31 to rs.
858
-
859
- sub_d(f22, fd, f22);
860
- trunc_w_d(f22, f22);
861
- mfc1(rs, f22);
862
- or_(rs, rs, t6);
889
+ sub_d(scratch, fd, scratch);
890
+ trunc_w_d(scratch, scratch);
891
+ mfc1(rs, scratch);
892
+ Or(rs, rs, 1 << 31);
863
893
 
864
894
  Label done;
865
895
  Branch(&done);
866
896
  // Simple conversion.
867
897
  bind(&simple_convert);
868
- trunc_w_d(f22, fd);
869
- mfc1(rs, f22);
898
+ trunc_w_d(scratch, fd);
899
+ mfc1(rs, scratch);
870
900
 
871
901
  bind(&done);
872
902
  }
@@ -1052,6 +1082,51 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
1052
1082
  }
1053
1083
 
1054
1084
 
1085
+ void MacroAssembler::EmitECMATruncate(Register result,
1086
+ FPURegister double_input,
1087
+ FPURegister single_scratch,
1088
+ Register scratch,
1089
+ Register input_high,
1090
+ Register input_low) {
1091
+ CpuFeatures::Scope scope(FPU);
1092
+ ASSERT(!input_high.is(result));
1093
+ ASSERT(!input_low.is(result));
1094
+ ASSERT(!input_low.is(input_high));
1095
+ ASSERT(!scratch.is(result) &&
1096
+ !scratch.is(input_high) &&
1097
+ !scratch.is(input_low));
1098
+ ASSERT(!single_scratch.is(double_input));
1099
+
1100
+ Label done;
1101
+ Label manual;
1102
+
1103
+ // Clear cumulative exception flags and save the FCSR.
1104
+ Register scratch2 = input_high;
1105
+ cfc1(scratch2, FCSR);
1106
+ ctc1(zero_reg, FCSR);
1107
+ // Try a conversion to a signed integer.
1108
+ trunc_w_d(single_scratch, double_input);
1109
+ mfc1(result, single_scratch);
1110
+ // Retrieve and restore the FCSR.
1111
+ cfc1(scratch, FCSR);
1112
+ ctc1(scratch2, FCSR);
1113
+ // Check for overflow and NaNs.
1114
+ And(scratch,
1115
+ scratch,
1116
+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1117
+ // If we had no exceptions we are done.
1118
+ Branch(&done, eq, scratch, Operand(zero_reg));
1119
+
1120
+ // Load the double value and perform a manual truncation.
1121
+ Move(input_low, input_high, double_input);
1122
+ EmitOutOfInt32RangeTruncate(result,
1123
+ input_high,
1124
+ input_low,
1125
+ scratch);
1126
+ bind(&done);
1127
+ }
1128
+
1129
+
1055
1130
  void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1056
1131
  Register src,
1057
1132
  int num_least_bits) {
@@ -1074,7 +1149,54 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1074
1149
  (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1075
1150
 
1076
1151
 
1152
+ bool MacroAssembler::UseAbsoluteCodePointers() {
1153
+ if (is_trampoline_emitted()) {
1154
+ return true;
1155
+ } else {
1156
+ return false;
1157
+ }
1158
+ }
1159
+
1160
+
1077
1161
  void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1162
+ BranchShort(offset, bdslot);
1163
+ }
1164
+
1165
+
1166
+ void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1167
+ const Operand& rt,
1168
+ BranchDelaySlot bdslot) {
1169
+ BranchShort(offset, cond, rs, rt, bdslot);
1170
+ }
1171
+
1172
+
1173
+ void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1174
+ bool is_label_near = is_near(L);
1175
+ if (UseAbsoluteCodePointers() && !is_label_near) {
1176
+ Jr(L, bdslot);
1177
+ } else {
1178
+ BranchShort(L, bdslot);
1179
+ }
1180
+ }
1181
+
1182
+
1183
+ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1184
+ const Operand& rt,
1185
+ BranchDelaySlot bdslot) {
1186
+ bool is_label_near = is_near(L);
1187
+ if (UseAbsoluteCodePointers() && !is_label_near) {
1188
+ Label skip;
1189
+ Condition neg_cond = NegateCondition(cond);
1190
+ BranchShort(&skip, neg_cond, rs, rt);
1191
+ Jr(L, bdslot);
1192
+ bind(&skip);
1193
+ } else {
1194
+ BranchShort(L, cond, rs, rt, bdslot);
1195
+ }
1196
+ }
1197
+
1198
+
1199
+ void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1078
1200
  b(offset);
1079
1201
 
1080
1202
  // Emit a nop in the branch delay slot if required.
@@ -1083,9 +1205,9 @@ void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1083
1205
  }
1084
1206
 
1085
1207
 
1086
- void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1087
- const Operand& rt,
1088
- BranchDelaySlot bdslot) {
1208
+ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1209
+ const Operand& rt,
1210
+ BranchDelaySlot bdslot) {
1089
1211
  BRANCH_ARGS_CHECK(cond, rs, rt);
1090
1212
  ASSERT(!rs.is(zero_reg));
1091
1213
  Register r2 = no_reg;
@@ -1157,7 +1279,8 @@ void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1157
1279
  break;
1158
1280
  case Uless:
1159
1281
  if (r2.is(zero_reg)) {
1160
- b(offset);
1282
+ // No code needs to be emitted.
1283
+ return;
1161
1284
  } else {
1162
1285
  sltu(scratch, rs, r2);
1163
1286
  bne(scratch, zero_reg, offset);
@@ -1216,7 +1339,7 @@ void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1216
1339
  } else {
1217
1340
  r2 = scratch;
1218
1341
  li(r2, rt);
1219
- sltu(scratch, rs, r2);
1342
+ slt(scratch, rs, r2);
1220
1343
  beq(scratch, zero_reg, offset);
1221
1344
  }
1222
1345
  break;
@@ -1269,7 +1392,8 @@ void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1269
1392
  break;
1270
1393
  case Uless:
1271
1394
  if (rt.imm32_ == 0) {
1272
- b(offset);
1395
+ // No code needs to be emitted.
1396
+ return;
1273
1397
  } else if (is_int16(rt.imm32_)) {
1274
1398
  sltiu(scratch, rs, rt.imm32_);
1275
1399
  bne(scratch, zero_reg, offset);
@@ -1300,7 +1424,7 @@ void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1300
1424
  }
1301
1425
 
1302
1426
 
1303
- void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1427
+ void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
1304
1428
  // We use branch_offset as an argument for the branch instructions to be sure
1305
1429
  // it is called just before generating the branch instruction, as needed.
1306
1430
 
@@ -1312,9 +1436,9 @@ void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1312
1436
  }
1313
1437
 
1314
1438
 
1315
- void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1316
- const Operand& rt,
1317
- BranchDelaySlot bdslot) {
1439
+ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1440
+ const Operand& rt,
1441
+ BranchDelaySlot bdslot) {
1318
1442
  BRANCH_ARGS_CHECK(cond, rs, rt);
1319
1443
 
1320
1444
  int32_t offset;
@@ -1402,8 +1526,8 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1402
1526
  break;
1403
1527
  case Uless:
1404
1528
  if (r2.is(zero_reg)) {
1405
- offset = shifted_branch_offset(L, false);
1406
- b(offset);
1529
+ // No code needs to be emitted.
1530
+ return;
1407
1531
  } else {
1408
1532
  sltu(scratch, rs, r2);
1409
1533
  offset = shifted_branch_offset(L, false);
@@ -1433,12 +1557,14 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1433
1557
  b(offset);
1434
1558
  break;
1435
1559
  case eq:
1560
+ ASSERT(!scratch.is(rs));
1436
1561
  r2 = scratch;
1437
1562
  li(r2, rt);
1438
1563
  offset = shifted_branch_offset(L, false);
1439
1564
  beq(rs, r2, offset);
1440
1565
  break;
1441
1566
  case ne:
1567
+ ASSERT(!scratch.is(rs));
1442
1568
  r2 = scratch;
1443
1569
  li(r2, rt);
1444
1570
  offset = shifted_branch_offset(L, false);
@@ -1450,6 +1576,7 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1450
1576
  offset = shifted_branch_offset(L, false);
1451
1577
  bgtz(rs, offset);
1452
1578
  } else {
1579
+ ASSERT(!scratch.is(rs));
1453
1580
  r2 = scratch;
1454
1581
  li(r2, rt);
1455
1582
  slt(scratch, r2, rs);
@@ -1466,9 +1593,10 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1466
1593
  offset = shifted_branch_offset(L, false);
1467
1594
  beq(scratch, zero_reg, offset);
1468
1595
  } else {
1596
+ ASSERT(!scratch.is(rs));
1469
1597
  r2 = scratch;
1470
1598
  li(r2, rt);
1471
- sltu(scratch, rs, r2);
1599
+ slt(scratch, rs, r2);
1472
1600
  offset = shifted_branch_offset(L, false);
1473
1601
  beq(scratch, zero_reg, offset);
1474
1602
  }
@@ -1482,6 +1610,7 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1482
1610
  offset = shifted_branch_offset(L, false);
1483
1611
  bne(scratch, zero_reg, offset);
1484
1612
  } else {
1613
+ ASSERT(!scratch.is(rs));
1485
1614
  r2 = scratch;
1486
1615
  li(r2, rt);
1487
1616
  slt(scratch, rs, r2);
@@ -1494,6 +1623,7 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1494
1623
  offset = shifted_branch_offset(L, false);
1495
1624
  blez(rs, offset);
1496
1625
  } else {
1626
+ ASSERT(!scratch.is(rs));
1497
1627
  r2 = scratch;
1498
1628
  li(r2, rt);
1499
1629
  slt(scratch, r2, rs);
@@ -1507,6 +1637,7 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1507
1637
  offset = shifted_branch_offset(L, false);
1508
1638
  bgtz(rs, offset);
1509
1639
  } else {
1640
+ ASSERT(!scratch.is(rs));
1510
1641
  r2 = scratch;
1511
1642
  li(r2, rt);
1512
1643
  sltu(scratch, r2, rs);
@@ -1523,6 +1654,7 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1523
1654
  offset = shifted_branch_offset(L, false);
1524
1655
  beq(scratch, zero_reg, offset);
1525
1656
  } else {
1657
+ ASSERT(!scratch.is(rs));
1526
1658
  r2 = scratch;
1527
1659
  li(r2, rt);
1528
1660
  sltu(scratch, rs, r2);
@@ -1532,13 +1664,14 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1532
1664
  break;
1533
1665
  case Uless:
1534
1666
  if (rt.imm32_ == 0) {
1535
- offset = shifted_branch_offset(L, false);
1536
- b(offset);
1667
+ // No code needs to be emitted.
1668
+ return;
1537
1669
  } else if (is_int16(rt.imm32_)) {
1538
1670
  sltiu(scratch, rs, rt.imm32_);
1539
1671
  offset = shifted_branch_offset(L, false);
1540
1672
  bne(scratch, zero_reg, offset);
1541
1673
  } else {
1674
+ ASSERT(!scratch.is(rs));
1542
1675
  r2 = scratch;
1543
1676
  li(r2, rt);
1544
1677
  sltu(scratch, rs, r2);
@@ -1551,6 +1684,7 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1551
1684
  offset = shifted_branch_offset(L, false);
1552
1685
  b(offset);
1553
1686
  } else {
1687
+ ASSERT(!scratch.is(rs));
1554
1688
  r2 = scratch;
1555
1689
  li(r2, rt);
1556
1690
  sltu(scratch, r2, rs);
@@ -1570,11 +1704,49 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1570
1704
  }
1571
1705
 
1572
1706
 
1707
+ void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
1708
+ BranchAndLinkShort(offset, bdslot);
1709
+ }
1710
+
1711
+
1712
+ void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
1713
+ const Operand& rt,
1714
+ BranchDelaySlot bdslot) {
1715
+ BranchAndLinkShort(offset, cond, rs, rt, bdslot);
1716
+ }
1717
+
1718
+
1719
+ void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
1720
+ bool is_label_near = is_near(L);
1721
+ if (UseAbsoluteCodePointers() && !is_label_near) {
1722
+ Jalr(L, bdslot);
1723
+ } else {
1724
+ BranchAndLinkShort(L, bdslot);
1725
+ }
1726
+ }
1727
+
1728
+
1729
+ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
1730
+ const Operand& rt,
1731
+ BranchDelaySlot bdslot) {
1732
+ bool is_label_near = is_near(L);
1733
+ if (UseAbsoluteCodePointers() && !is_label_near) {
1734
+ Label skip;
1735
+ Condition neg_cond = NegateCondition(cond);
1736
+ BranchShort(&skip, neg_cond, rs, rt);
1737
+ Jalr(L, bdslot);
1738
+ bind(&skip);
1739
+ } else {
1740
+ BranchAndLinkShort(L, cond, rs, rt, bdslot);
1741
+ }
1742
+ }
1743
+
1744
+
1573
1745
  // We need to use a bgezal or bltzal, but they can't be used directly with the
1574
1746
  // slt instructions. We could use sub or add instead but we would miss overflow
1575
1747
  // cases, so we keep slt and add an intermediate third instruction.
1576
- void MacroAssembler::BranchAndLink(int16_t offset,
1577
- BranchDelaySlot bdslot) {
1748
+ void MacroAssembler::BranchAndLinkShort(int16_t offset,
1749
+ BranchDelaySlot bdslot) {
1578
1750
  bal(offset);
1579
1751
 
1580
1752
  // Emit a nop in the branch delay slot if required.
@@ -1583,9 +1755,9 @@ void MacroAssembler::BranchAndLink(int16_t offset,
1583
1755
  }
1584
1756
 
1585
1757
 
1586
- void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
1587
- const Operand& rt,
1588
- BranchDelaySlot bdslot) {
1758
+ void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
1759
+ Register rs, const Operand& rt,
1760
+ BranchDelaySlot bdslot) {
1589
1761
  BRANCH_ARGS_CHECK(cond, rs, rt);
1590
1762
  Register r2 = no_reg;
1591
1763
  Register scratch = at;
@@ -1665,7 +1837,7 @@ void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
1665
1837
  }
1666
1838
 
1667
1839
 
1668
- void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
1840
+ void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
1669
1841
  bal(shifted_branch_offset(L, false));
1670
1842
 
1671
1843
  // Emit a nop in the branch delay slot if required.
@@ -1674,9 +1846,9 @@ void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
1674
1846
  }
1675
1847
 
1676
1848
 
1677
- void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
1678
- const Operand& rt,
1679
- BranchDelaySlot bdslot) {
1849
+ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
1850
+ const Operand& rt,
1851
+ BranchDelaySlot bdslot) {
1680
1852
  BRANCH_ARGS_CHECK(cond, rs, rt);
1681
1853
 
1682
1854
  int32_t offset;
@@ -1772,164 +1944,230 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
1772
1944
  }
1773
1945
 
1774
1946
 
1775
- void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
1947
+ void MacroAssembler::Jump(Register target,
1948
+ Condition cond,
1949
+ Register rs,
1950
+ const Operand& rt,
1951
+ BranchDelaySlot bd) {
1776
1952
  BlockTrampolinePoolScope block_trampoline_pool(this);
1777
- if (target.is_reg()) {
1778
- jr(target.rm());
1953
+ if (cond == cc_always) {
1954
+ jr(target);
1779
1955
  } else {
1780
- if (!MustUseReg(target.rmode_)) {
1781
- j(target.imm32_);
1782
- } else {
1783
- li(t9, target);
1784
- jr(t9);
1785
- }
1956
+ BRANCH_ARGS_CHECK(cond, rs, rt);
1957
+ Branch(2, NegateCondition(cond), rs, rt);
1958
+ jr(target);
1786
1959
  }
1787
1960
  // Emit a nop in the branch delay slot if required.
1788
- if (bdslot == PROTECT)
1961
+ if (bd == PROTECT)
1789
1962
  nop();
1790
1963
  }
1791
1964
 
1792
1965
 
1793
- void MacroAssembler::Jump(const Operand& target,
1794
- Condition cond, Register rs, const Operand& rt,
1795
- BranchDelaySlot bdslot) {
1796
- BlockTrampolinePoolScope block_trampoline_pool(this);
1797
- BRANCH_ARGS_CHECK(cond, rs, rt);
1798
- if (target.is_reg()) {
1799
- if (cond == cc_always) {
1800
- jr(target.rm());
1801
- } else {
1802
- Branch(2, NegateCondition(cond), rs, rt);
1803
- jr(target.rm());
1804
- }
1805
- } else { // Not register target.
1806
- if (!MustUseReg(target.rmode_)) {
1807
- if (cond == cc_always) {
1808
- j(target.imm32_);
1809
- } else {
1810
- Branch(2, NegateCondition(cond), rs, rt);
1811
- j(target.imm32_); // Will generate only one instruction.
1812
- }
1813
- } else { // MustUseReg(target).
1814
- li(t9, target);
1815
- if (cond == cc_always) {
1816
- jr(t9);
1817
- } else {
1818
- Branch(2, NegateCondition(cond), rs, rt);
1819
- jr(t9); // Will generate only one instruction.
1820
- }
1821
- }
1822
- }
1823
- // Emit a nop in the branch delay slot if required.
1824
- if (bdslot == PROTECT)
1825
- nop();
1966
+ void MacroAssembler::Jump(intptr_t target,
1967
+ RelocInfo::Mode rmode,
1968
+ Condition cond,
1969
+ Register rs,
1970
+ const Operand& rt,
1971
+ BranchDelaySlot bd) {
1972
+ li(t9, Operand(target, rmode));
1973
+ Jump(t9, cond, rs, rt, bd);
1826
1974
  }
1827
1975
 
1828
1976
 
1829
- int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
1830
- return 4 * kInstrSize;
1977
+ void MacroAssembler::Jump(Address target,
1978
+ RelocInfo::Mode rmode,
1979
+ Condition cond,
1980
+ Register rs,
1981
+ const Operand& rt,
1982
+ BranchDelaySlot bd) {
1983
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
1984
+ Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
1831
1985
  }
1832
1986
 
1833
1987
 
1834
- int MacroAssembler::CallSize(Register reg) {
1835
- return 2 * kInstrSize;
1988
+ void MacroAssembler::Jump(Handle<Code> code,
1989
+ RelocInfo::Mode rmode,
1990
+ Condition cond,
1991
+ Register rs,
1992
+ const Operand& rt,
1993
+ BranchDelaySlot bd) {
1994
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
1995
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
1836
1996
  }
1837
1997
 
1838
1998
 
1839
- // Note: To call gcc-compiled C code on mips, you must call thru t9.
1840
- void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
1841
- BlockTrampolinePoolScope block_trampoline_pool(this);
1842
- if (target.is_reg()) {
1843
- jalr(target.rm());
1844
- } else { // !target.is_reg().
1845
- if (!MustUseReg(target.rmode_)) {
1846
- jal(target.imm32_);
1847
- } else { // MustUseReg(target).
1848
- // Must record previous source positions before the
1849
- // li() generates a new code target.
1850
- positions_recorder()->WriteRecordedPositions();
1851
- li(t9, target);
1852
- jalr(t9);
1853
- }
1999
+ int MacroAssembler::CallSize(Register target,
2000
+ Condition cond,
2001
+ Register rs,
2002
+ const Operand& rt,
2003
+ BranchDelaySlot bd) {
2004
+ int size = 0;
2005
+
2006
+ if (cond == cc_always) {
2007
+ size += 1;
2008
+ } else {
2009
+ size += 3;
1854
2010
  }
1855
- // Emit a nop in the branch delay slot if required.
1856
- if (bdslot == PROTECT)
1857
- nop();
2011
+
2012
+ if (bd == PROTECT)
2013
+ size += 1;
2014
+
2015
+ return size * kInstrSize;
1858
2016
  }
1859
2017
 
1860
2018
 
1861
2019
  // Note: To call gcc-compiled C code on mips, you must call thru t9.
1862
- void MacroAssembler::Call(const Operand& target,
1863
- Condition cond, Register rs, const Operand& rt,
1864
- BranchDelaySlot bdslot) {
2020
+ void MacroAssembler::Call(Register target,
2021
+ Condition cond,
2022
+ Register rs,
2023
+ const Operand& rt,
2024
+ BranchDelaySlot bd) {
1865
2025
  BlockTrampolinePoolScope block_trampoline_pool(this);
1866
- BRANCH_ARGS_CHECK(cond, rs, rt);
1867
- if (target.is_reg()) {
1868
- if (cond == cc_always) {
1869
- jalr(target.rm());
1870
- } else {
1871
- Branch(2, NegateCondition(cond), rs, rt);
1872
- jalr(target.rm());
1873
- }
1874
- } else { // !target.is_reg().
1875
- if (!MustUseReg(target.rmode_)) {
1876
- if (cond == cc_always) {
1877
- jal(target.imm32_);
1878
- } else {
1879
- Branch(2, NegateCondition(cond), rs, rt);
1880
- jal(target.imm32_); // Will generate only one instruction.
1881
- }
1882
- } else { // MustUseReg(target)
1883
- li(t9, target);
1884
- if (cond == cc_always) {
1885
- jalr(t9);
1886
- } else {
1887
- Branch(2, NegateCondition(cond), rs, rt);
1888
- jalr(t9); // Will generate only one instruction.
1889
- }
1890
- }
2026
+ Label start;
2027
+ bind(&start);
2028
+ if (cond == cc_always) {
2029
+ jalr(target);
2030
+ } else {
2031
+ BRANCH_ARGS_CHECK(cond, rs, rt);
2032
+ Branch(2, NegateCondition(cond), rs, rt);
2033
+ jalr(target);
1891
2034
  }
1892
2035
  // Emit a nop in the branch delay slot if required.
1893
- if (bdslot == PROTECT)
2036
+ if (bd == PROTECT)
1894
2037
  nop();
2038
+
2039
+ ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2040
+ SizeOfCodeGeneratedSince(&start));
1895
2041
  }
1896
2042
 
1897
2043
 
1898
- void MacroAssembler::CallWithAstId(Handle<Code> code,
1899
- RelocInfo::Mode rmode,
1900
- unsigned ast_id,
1901
- Condition cond,
1902
- Register r1,
1903
- const Operand& r2) {
1904
- ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
1905
- ASSERT(ast_id != kNoASTId);
1906
- ASSERT(ast_id_for_reloc_info_ == kNoASTId);
1907
- ast_id_for_reloc_info_ = ast_id;
1908
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
2044
+ int MacroAssembler::CallSize(Address target,
2045
+ RelocInfo::Mode rmode,
2046
+ Condition cond,
2047
+ Register rs,
2048
+ const Operand& rt,
2049
+ BranchDelaySlot bd) {
2050
+ int size = CallSize(t9, cond, rs, rt, bd);
2051
+ return size + 2 * kInstrSize;
1909
2052
  }
1910
2053
 
1911
2054
 
1912
- void MacroAssembler::Drop(int count,
2055
+ void MacroAssembler::Call(Address target,
2056
+ RelocInfo::Mode rmode,
1913
2057
  Condition cond,
1914
- Register reg,
1915
- const Operand& op) {
1916
- if (count <= 0) {
1917
- return;
2058
+ Register rs,
2059
+ const Operand& rt,
2060
+ BranchDelaySlot bd) {
2061
+ BlockTrampolinePoolScope block_trampoline_pool(this);
2062
+ Label start;
2063
+ bind(&start);
2064
+ int32_t target_int = reinterpret_cast<int32_t>(target);
2065
+ // Must record previous source positions before the
2066
+ // li() generates a new code target.
2067
+ positions_recorder()->WriteRecordedPositions();
2068
+ li(t9, Operand(target_int, rmode), true);
2069
+ Call(t9, cond, rs, rt, bd);
2070
+ ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2071
+ SizeOfCodeGeneratedSince(&start));
2072
+ }
2073
+
2074
+
2075
+ int MacroAssembler::CallSize(Handle<Code> code,
2076
+ RelocInfo::Mode rmode,
2077
+ unsigned ast_id,
2078
+ Condition cond,
2079
+ Register rs,
2080
+ const Operand& rt,
2081
+ BranchDelaySlot bd) {
2082
+ return CallSize(reinterpret_cast<Address>(code.location()),
2083
+ rmode, cond, rs, rt, bd);
2084
+ }
2085
+
2086
+
2087
+ void MacroAssembler::Call(Handle<Code> code,
2088
+ RelocInfo::Mode rmode,
2089
+ unsigned ast_id,
2090
+ Condition cond,
2091
+ Register rs,
2092
+ const Operand& rt,
2093
+ BranchDelaySlot bd) {
2094
+ BlockTrampolinePoolScope block_trampoline_pool(this);
2095
+ Label start;
2096
+ bind(&start);
2097
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
2098
+ if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
2099
+ SetRecordedAstId(ast_id);
2100
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
1918
2101
  }
2102
+ Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2103
+ ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
2104
+ SizeOfCodeGeneratedSince(&start));
2105
+ }
1919
2106
 
1920
- Label skip;
1921
2107
 
1922
- if (cond != al) {
1923
- Branch(&skip, NegateCondition(cond), reg, op);
2108
+ void MacroAssembler::Ret(Condition cond,
2109
+ Register rs,
2110
+ const Operand& rt,
2111
+ BranchDelaySlot bd) {
2112
+ Jump(ra, cond, rs, rt, bd);
2113
+ }
2114
+
2115
+
2116
+ void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2117
+ BlockTrampolinePoolScope block_trampoline_pool(this);
2118
+
2119
+ uint32_t imm28;
2120
+ imm28 = jump_address(L);
2121
+ imm28 &= kImm28Mask;
2122
+ { BlockGrowBufferScope block_buf_growth(this);
2123
+ // Buffer growth (and relocation) must be blocked for internal references
2124
+ // until associated instructions are emitted and available to be patched.
2125
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2126
+ j(imm28);
1924
2127
  }
2128
+ // Emit a nop in the branch delay slot if required.
2129
+ if (bdslot == PROTECT)
2130
+ nop();
2131
+ }
1925
2132
 
1926
- if (count > 0) {
1927
- addiu(sp, sp, count * kPointerSize);
2133
+
2134
+ void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2135
+ BlockTrampolinePoolScope block_trampoline_pool(this);
2136
+
2137
+ uint32_t imm32;
2138
+ imm32 = jump_address(L);
2139
+ { BlockGrowBufferScope block_buf_growth(this);
2140
+ // Buffer growth (and relocation) must be blocked for internal references
2141
+ // until associated instructions are emitted and available to be patched.
2142
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2143
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
2144
+ ori(at, at, (imm32 & kImm16Mask));
1928
2145
  }
2146
+ jr(at);
1929
2147
 
1930
- if (cond != al) {
1931
- bind(&skip);
2148
+ // Emit a nop in the branch delay slot if required.
2149
+ if (bdslot == PROTECT)
2150
+ nop();
2151
+ }
2152
+
2153
+
2154
+ void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2155
+ BlockTrampolinePoolScope block_trampoline_pool(this);
2156
+
2157
+ uint32_t imm32;
2158
+ imm32 = jump_address(L);
2159
+ { BlockGrowBufferScope block_buf_growth(this);
2160
+ // Buffer growth (and relocation) must be blocked for internal references
2161
+ // until associated instructions are emitted and available to be patched.
2162
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2163
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
2164
+ ori(at, at, (imm32 & kImm16Mask));
1932
2165
  }
2166
+ jalr(at);
2167
+
2168
+ // Emit a nop in the branch delay slot if required.
2169
+ if (bdslot == PROTECT)
2170
+ nop();
1933
2171
  }
1934
2172
 
1935
2173
 
@@ -1954,6 +2192,29 @@ void MacroAssembler::DropAndRet(int drop,
1954
2192
  }
1955
2193
 
1956
2194
 
2195
+ void MacroAssembler::Drop(int count,
2196
+ Condition cond,
2197
+ Register reg,
2198
+ const Operand& op) {
2199
+ if (count <= 0) {
2200
+ return;
2201
+ }
2202
+
2203
+ Label skip;
2204
+
2205
+ if (cond != al) {
2206
+ Branch(&skip, NegateCondition(cond), reg, op);
2207
+ }
2208
+
2209
+ addiu(sp, sp, count * kPointerSize);
2210
+
2211
+ if (cond != al) {
2212
+ bind(&skip);
2213
+ }
2214
+ }
2215
+
2216
+
2217
+
1957
2218
  void MacroAssembler::Swap(Register reg1,
1958
2219
  Register reg2,
1959
2220
  Register scratch) {
@@ -1974,6 +2235,12 @@ void MacroAssembler::Call(Label* target) {
1974
2235
  }
1975
2236
 
1976
2237
 
2238
+ void MacroAssembler::Push(Handle<Object> handle) {
2239
+ li(at, Operand(handle));
2240
+ push(at);
2241
+ }
2242
+
2243
+
1977
2244
  #ifdef ENABLE_DEBUGGER_SUPPORT
1978
2245
 
1979
2246
  void MacroAssembler::DebugBreak() {
@@ -1993,7 +2260,13 @@ void MacroAssembler::DebugBreak() {
1993
2260
  void MacroAssembler::PushTryHandler(CodeLocation try_location,
1994
2261
  HandlerType type) {
1995
2262
  // Adjust this code if not the case.
1996
- ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
2263
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2264
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2265
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
2266
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
2267
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
2268
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
2269
+
1997
2270
  // The return address is passed in register ra.
1998
2271
  if (try_location == IN_JAVASCRIPT) {
1999
2272
  if (type == TRY_CATCH_HANDLER) {
@@ -2001,19 +2274,16 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
2001
2274
  } else {
2002
2275
  li(t0, Operand(StackHandler::TRY_FINALLY));
2003
2276
  }
2004
- ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
2005
- && StackHandlerConstants::kFPOffset == 2 * kPointerSize
2006
- && StackHandlerConstants::kPCOffset == 3 * kPointerSize
2007
- && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2008
2277
  // Save the current handler as the next handler.
2009
2278
  li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
2010
2279
  lw(t1, MemOperand(t2));
2011
2280
 
2012
2281
  addiu(sp, sp, -StackHandlerConstants::kSize);
2013
- sw(ra, MemOperand(sp, 12));
2014
- sw(fp, MemOperand(sp, 8));
2015
- sw(t0, MemOperand(sp, 4));
2016
- sw(t1, MemOperand(sp, 0));
2282
+ sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
2283
+ sw(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
2284
+ sw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
2285
+ sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
2286
+ sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
2017
2287
 
2018
2288
  // Link this handler as the new current one.
2019
2289
  sw(sp, MemOperand(t2));
@@ -2021,11 +2291,6 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
2021
2291
  } else {
2022
2292
  // Must preserve a0-a3, and s0 (argv).
2023
2293
  ASSERT(try_location == IN_JS_ENTRY);
2024
- ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
2025
- && StackHandlerConstants::kFPOffset == 2 * kPointerSize
2026
- && StackHandlerConstants::kPCOffset == 3 * kPointerSize
2027
- && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2028
-
2029
2294
  // The frame pointer does not point to a JS frame so we save NULL
2030
2295
  // for fp. We expect the code throwing an exception to check fp
2031
2296
  // before dereferencing it to restore the context.
@@ -2035,11 +2300,14 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
2035
2300
  li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
2036
2301
  lw(t1, MemOperand(t2));
2037
2302
 
2303
+ ASSERT(Smi::FromInt(0) == 0); // Used for no context.
2304
+
2038
2305
  addiu(sp, sp, -StackHandlerConstants::kSize);
2039
- sw(ra, MemOperand(sp, 12));
2040
- sw(zero_reg, MemOperand(sp, 8));
2041
- sw(t0, MemOperand(sp, 4));
2042
- sw(t1, MemOperand(sp, 0));
2306
+ sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
2307
+ sw(zero_reg, MemOperand(sp, StackHandlerConstants::kFPOffset));
2308
+ sw(zero_reg, MemOperand(sp, StackHandlerConstants::kContextOffset));
2309
+ sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
2310
+ sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
2043
2311
 
2044
2312
  // Link this handler as the new current one.
2045
2313
  sw(sp, MemOperand(t2));
@@ -2048,7 +2316,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
2048
2316
 
2049
2317
 
2050
2318
  void MacroAssembler::PopTryHandler() {
2051
- ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
2319
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2052
2320
  pop(a1);
2053
2321
  Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2054
2322
  li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
@@ -2061,28 +2329,31 @@ void MacroAssembler::Throw(Register value) {
2061
2329
  Move(v0, value);
2062
2330
 
2063
2331
  // Adjust this code if not the case.
2064
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
2332
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2333
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2334
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
2335
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
2336
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
2337
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
2065
2338
 
2066
2339
  // Drop the sp to the top of the handler.
2067
2340
  li(a3, Operand(ExternalReference(Isolate::k_handler_address,
2068
- isolate())));
2341
+ isolate())));
2069
2342
  lw(sp, MemOperand(a3));
2070
2343
 
2071
- // Restore the next handler and frame pointer, discard handler state.
2072
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2344
+ // Restore the next handler.
2073
2345
  pop(a2);
2074
2346
  sw(a2, MemOperand(a3));
2075
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
2076
- MultiPop(a3.bit() | fp.bit());
2077
2347
 
2078
- // Before returning we restore the context from the frame pointer if
2079
- // not NULL. The frame pointer is NULL in the exception handler of a
2080
- // JS entry frame.
2081
- // Set cp to NULL if fp is NULL.
2348
+ // Restore context and frame pointer, discard state (a3).
2349
+ MultiPop(a3.bit() | cp.bit() | fp.bit());
2350
+
2351
+ // If the handler is a JS frame, restore the context to the frame.
2352
+ // (a3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
2353
+ // of them.
2082
2354
  Label done;
2083
- Branch(USE_DELAY_SLOT, &done, eq, fp, Operand(zero_reg));
2084
- mov(cp, zero_reg); // In branch delay slot.
2085
- lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2355
+ Branch(&done, eq, fp, Operand(zero_reg));
2356
+ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2086
2357
  bind(&done);
2087
2358
 
2088
2359
  #ifdef DEBUG
@@ -2104,7 +2375,6 @@ void MacroAssembler::Throw(Register value) {
2104
2375
  }
2105
2376
  #endif
2106
2377
 
2107
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
2108
2378
  pop(t9); // 2 instructions: lw, add sp.
2109
2379
  Jump(t9); // 2 instructions: jr, nop (in delay slot).
2110
2380
 
@@ -2119,7 +2389,12 @@ void MacroAssembler::Throw(Register value) {
2119
2389
  void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
2120
2390
  Register value) {
2121
2391
  // Adjust this code if not the case.
2122
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
2392
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2393
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2394
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
2395
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
2396
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
2397
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
2123
2398
 
2124
2399
  // v0 is expected to hold the exception.
2125
2400
  Move(v0, value);
@@ -2142,7 +2417,6 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
2142
2417
  bind(&done);
2143
2418
 
2144
2419
  // Set the top handler address to next handler past the current ENTRY handler.
2145
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2146
2420
  pop(a2);
2147
2421
  sw(a2, MemOperand(a3));
2148
2422
 
@@ -2164,20 +2438,12 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
2164
2438
 
2165
2439
  // Stack layout at this point. See also StackHandlerConstants.
2166
2440
  // sp -> state (ENTRY)
2441
+ // cp
2167
2442
  // fp
2168
2443
  // ra
2169
2444
 
2170
- // Discard handler state (a2 is not used) and restore frame pointer.
2171
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
2172
- MultiPop(a2.bit() | fp.bit()); // a2: discarded state.
2173
- // Before returning we restore the context from the frame pointer if
2174
- // not NULL. The frame pointer is NULL in the exception handler of a
2175
- // JS entry frame.
2176
- Label cp_null;
2177
- Branch(USE_DELAY_SLOT, &cp_null, eq, fp, Operand(zero_reg));
2178
- mov(cp, zero_reg); // In the branch delay slot.
2179
- lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2180
- bind(&cp_null);
2445
+ // Restore context and frame pointer, discard state (r2).
2446
+ MultiPop(a2.bit() | cp.bit() | fp.bit());
2181
2447
 
2182
2448
  #ifdef DEBUG
2183
2449
  // When emitting debug_code, set ra as return address for the jump.
@@ -2197,7 +2463,6 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
2197
2463
  addiu(ra, ra, kOffsetRaBytes);
2198
2464
  }
2199
2465
  #endif
2200
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
2201
2466
  pop(t9); // 2 instructions: lw, add sp.
2202
2467
  Jump(t9); // 2 instructions: jr, nop (in delay slot).
2203
2468
 
@@ -2515,8 +2780,8 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
2515
2780
  Register scratch1,
2516
2781
  Register scratch2,
2517
2782
  Label* gc_required) {
2518
- LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
2519
- AllocateHeapNumber(result, scratch1, scratch2, t6, gc_required);
2783
+ LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
2784
+ AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
2520
2785
  sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
2521
2786
  }
2522
2787
 
@@ -2605,6 +2870,15 @@ void MacroAssembler::CopyBytes(Register src,
2605
2870
  }
2606
2871
 
2607
2872
 
2873
+ void MacroAssembler::CheckFastElements(Register map,
2874
+ Register scratch,
2875
+ Label* fail) {
2876
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
2877
+ lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2878
+ Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
2879
+ }
2880
+
2881
+
2608
2882
  void MacroAssembler::CheckMap(Register obj,
2609
2883
  Register scratch,
2610
2884
  Handle<Map> map,
@@ -2775,9 +3049,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2775
3049
  Handle<Code> adaptor =
2776
3050
  isolate()->builtins()->ArgumentsAdaptorTrampoline();
2777
3051
  if (flag == CALL_FUNCTION) {
2778
- call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
3052
+ call_wrapper.BeforeCall(CallSize(adaptor));
2779
3053
  SetCallKind(t1, call_kind);
2780
- Call(adaptor, RelocInfo::CODE_TARGET);
3054
+ Call(adaptor);
2781
3055
  call_wrapper.AfterCall();
2782
3056
  jmp(done);
2783
3057
  } else {
@@ -2861,7 +3135,8 @@ void MacroAssembler::InvokeFunction(Register function,
2861
3135
 
2862
3136
  void MacroAssembler::InvokeFunction(JSFunction* function,
2863
3137
  const ParameterCount& actual,
2864
- InvokeFlag flag) {
3138
+ InvokeFlag flag,
3139
+ CallKind call_kind) {
2865
3140
  ASSERT(function->is_compiled());
2866
3141
 
2867
3142
  // Get the function and setup the context.
@@ -2874,7 +3149,7 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
2874
3149
  if (V8::UseCrankshaft()) {
2875
3150
  UNIMPLEMENTED_MIPS();
2876
3151
  } else {
2877
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
3152
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
2878
3153
  }
2879
3154
  }
2880
3155
 
@@ -2892,8 +3167,8 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
2892
3167
  Register scratch,
2893
3168
  Label* fail) {
2894
3169
  lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2895
- Branch(fail, lt, scratch, Operand(FIRST_JS_OBJECT_TYPE));
2896
- Branch(fail, gt, scratch, Operand(LAST_JS_OBJECT_TYPE));
3170
+ Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3171
+ Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2897
3172
  }
2898
3173
 
2899
3174
 
@@ -2973,7 +3248,7 @@ void MacroAssembler::GetObjectType(Register object,
2973
3248
  void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
2974
3249
  Register r1, const Operand& r2) {
2975
3250
  ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
2976
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
3251
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
2977
3252
  }
2978
3253
 
2979
3254
 
@@ -2984,17 +3259,18 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
2984
3259
  { MaybeObject* maybe_result = stub->TryGetCode();
2985
3260
  if (!maybe_result->ToObject(&result)) return maybe_result;
2986
3261
  }
2987
- Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
3262
+ Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
3263
+ kNoASTId, cond, r1, r2);
2988
3264
  return result;
2989
3265
  }
2990
3266
 
2991
3267
 
2992
-
2993
3268
  void MacroAssembler::TailCallStub(CodeStub* stub) {
2994
3269
  ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
2995
3270
  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
2996
3271
  }
2997
3272
 
3273
+
2998
3274
  MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
2999
3275
  Condition cond,
3000
3276
  Register r1,
@@ -3190,23 +3466,18 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst,
3190
3466
  ASSERT(!overflow_dst.is(right));
3191
3467
  ASSERT(!left.is(right));
3192
3468
 
3193
- // TODO(kalmard) There must be a way to optimize dst == left and dst == right
3194
- // cases.
3195
-
3196
3469
  if (dst.is(left)) {
3197
- addu(overflow_dst, left, right);
3198
- xor_(dst, overflow_dst, left);
3199
- xor_(scratch, overflow_dst, right);
3200
- and_(scratch, scratch, dst);
3201
- mov(dst, overflow_dst);
3202
- mov(overflow_dst, scratch);
3470
+ mov(scratch, left); // Preserve left.
3471
+ addu(dst, left, right); // Left is overwritten.
3472
+ xor_(scratch, dst, scratch); // Original left.
3473
+ xor_(overflow_dst, dst, right);
3474
+ and_(overflow_dst, overflow_dst, scratch);
3203
3475
  } else if (dst.is(right)) {
3204
- addu(overflow_dst, left, right);
3205
- xor_(dst, overflow_dst, right);
3206
- xor_(scratch, overflow_dst, left);
3207
- and_(scratch, scratch, dst);
3208
- mov(dst, overflow_dst);
3209
- mov(overflow_dst, scratch);
3476
+ mov(scratch, right); // Preserve right.
3477
+ addu(dst, left, right); // Right is overwritten.
3478
+ xor_(scratch, dst, scratch); // Original right.
3479
+ xor_(overflow_dst, dst, left);
3480
+ and_(overflow_dst, overflow_dst, scratch);
3210
3481
  } else {
3211
3482
  addu(dst, left, right);
3212
3483
  xor_(overflow_dst, dst, left);
@@ -3230,23 +3501,18 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst,
3230
3501
  ASSERT(!scratch.is(left));
3231
3502
  ASSERT(!scratch.is(right));
3232
3503
 
3233
- // TODO(kalmard) There must be a way to optimize dst == left and dst == right
3234
- // cases.
3235
-
3236
3504
  if (dst.is(left)) {
3237
- subu(overflow_dst, left, right);
3238
- xor_(scratch, overflow_dst, left);
3239
- xor_(dst, left, right);
3240
- and_(scratch, scratch, dst);
3241
- mov(dst, overflow_dst);
3242
- mov(overflow_dst, scratch);
3505
+ mov(scratch, left); // Preserve left.
3506
+ subu(dst, left, right); // Left is overwritten.
3507
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
3508
+ xor_(scratch, scratch, right); // scratch is original left.
3509
+ and_(overflow_dst, scratch, overflow_dst);
3243
3510
  } else if (dst.is(right)) {
3244
- subu(overflow_dst, left, right);
3245
- xor_(dst, left, right);
3246
- xor_(scratch, overflow_dst, left);
3247
- and_(scratch, scratch, dst);
3248
- mov(dst, overflow_dst);
3249
- mov(overflow_dst, scratch);
3511
+ mov(scratch, right); // Preserve right.
3512
+ subu(dst, left, right); // Right is overwritten.
3513
+ xor_(overflow_dst, dst, left);
3514
+ xor_(scratch, left, scratch); // Original right.
3515
+ and_(overflow_dst, scratch, overflow_dst);
3250
3516
  } else {
3251
3517
  subu(dst, left, right);
3252
3518
  xor_(overflow_dst, dst, left);
@@ -3315,6 +3581,7 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
3315
3581
  JumpToExternalReference(ext);
3316
3582
  }
3317
3583
 
3584
+
3318
3585
  MaybeObject* MacroAssembler::TryTailCallExternalReference(
3319
3586
  const ExternalReference& ext, int num_arguments, int result_size) {
3320
3587
  // TODO(1236192): Most runtime routines don't need the number of
@@ -3356,10 +3623,12 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
3356
3623
  GetBuiltinEntry(t9, id);
3357
3624
  if (flag == CALL_FUNCTION) {
3358
3625
  call_wrapper.BeforeCall(CallSize(t9));
3626
+ SetCallKind(t1, CALL_AS_METHOD);
3359
3627
  Call(t9);
3360
3628
  call_wrapper.AfterCall();
3361
3629
  } else {
3362
3630
  ASSERT(flag == JUMP_FUNCTION);
3631
+ SetCallKind(t1, CALL_AS_METHOD);
3363
3632
  Jump(t9);
3364
3633
  }
3365
3634
  }
@@ -3445,6 +3714,8 @@ void MacroAssembler::AssertFastElements(Register elements) {
3445
3714
  lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
3446
3715
  LoadRoot(at, Heap::kFixedArrayMapRootIndex);
3447
3716
  Branch(&ok, eq, elements, Operand(at));
3717
+ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
3718
+ Branch(&ok, eq, elements, Operand(at));
3448
3719
  LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
3449
3720
  Branch(&ok, eq, elements, Operand(at));
3450
3721
  Abort("JSObject with fast elements map has slow elements");
@@ -3509,12 +3780,9 @@ void MacroAssembler::Abort(const char* msg) {
3509
3780
  void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3510
3781
  if (context_chain_length > 0) {
3511
3782
  // Move up the chain of contexts to the context containing the slot.
3512
- lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
3513
- // Load the function context (which is the incoming, outer context).
3514
- lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
3783
+ lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3515
3784
  for (int i = 1; i < context_chain_length; i++) {
3516
- lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
3517
- lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
3785
+ lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3518
3786
  }
3519
3787
  } else {
3520
3788
  // Slot is in the current function context. Move it into the
@@ -3522,17 +3790,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3522
3790
  // cannot be allowed to destroy the context in esi).
3523
3791
  Move(dst, cp);
3524
3792
  }
3525
-
3526
- // We should not have found a 'with' context by walking the context chain
3527
- // (i.e., the static scope chain and runtime context chain do not agree).
3528
- // A variable occurring in such a scope should have slot type LOOKUP and
3529
- // not CONTEXT.
3530
- if (emit_debug_code()) {
3531
- lw(t9, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
3532
- Check(eq, "Yo dawg, I heard you liked function contexts "
3533
- "so I put function contexts in all your contexts",
3534
- dst, Operand(t9));
3535
- }
3536
3793
  }
3537
3794
 
3538
3795
 
@@ -3718,6 +3975,7 @@ int MacroAssembler::ActivationFrameAlignment() {
3718
3975
  #endif // defined(V8_HOST_ARCH_MIPS)
3719
3976
  }
3720
3977
 
3978
+
3721
3979
  void MacroAssembler::AssertStackIsAligned() {
3722
3980
  if (emit_debug_code()) {
3723
3981
  const int frame_alignment = ActivationFrameAlignment();