libv8 3.3.10.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (700) hide show
  1. data/.gitignore +8 -0
  2. data/.gitmodules +3 -0
  3. data/Gemfile +4 -0
  4. data/README.md +44 -0
  5. data/Rakefile +73 -0
  6. data/ext/libv8/extconf.rb +9 -0
  7. data/lib/libv8.rb +15 -0
  8. data/lib/libv8/Makefile +38 -0
  9. data/lib/libv8/detect_cpu.rb +27 -0
  10. data/lib/libv8/fpic-on-linux-amd64.patch +13 -0
  11. data/lib/libv8/scons/CHANGES.txt +5334 -0
  12. data/lib/libv8/scons/LICENSE.txt +20 -0
  13. data/lib/libv8/scons/MANIFEST +199 -0
  14. data/lib/libv8/scons/PKG-INFO +13 -0
  15. data/lib/libv8/scons/README.txt +243 -0
  16. data/lib/libv8/scons/RELEASE.txt +98 -0
  17. data/lib/libv8/scons/engine/SCons/Action.py +1241 -0
  18. data/lib/libv8/scons/engine/SCons/Builder.py +877 -0
  19. data/lib/libv8/scons/engine/SCons/CacheDir.py +216 -0
  20. data/lib/libv8/scons/engine/SCons/Conftest.py +793 -0
  21. data/lib/libv8/scons/engine/SCons/Debug.py +220 -0
  22. data/lib/libv8/scons/engine/SCons/Defaults.py +480 -0
  23. data/lib/libv8/scons/engine/SCons/Environment.py +2318 -0
  24. data/lib/libv8/scons/engine/SCons/Errors.py +205 -0
  25. data/lib/libv8/scons/engine/SCons/Executor.py +633 -0
  26. data/lib/libv8/scons/engine/SCons/Job.py +435 -0
  27. data/lib/libv8/scons/engine/SCons/Memoize.py +244 -0
  28. data/lib/libv8/scons/engine/SCons/Node/Alias.py +152 -0
  29. data/lib/libv8/scons/engine/SCons/Node/FS.py +3142 -0
  30. data/lib/libv8/scons/engine/SCons/Node/Python.py +128 -0
  31. data/lib/libv8/scons/engine/SCons/Node/__init__.py +1328 -0
  32. data/lib/libv8/scons/engine/SCons/Options/BoolOption.py +50 -0
  33. data/lib/libv8/scons/engine/SCons/Options/EnumOption.py +50 -0
  34. data/lib/libv8/scons/engine/SCons/Options/ListOption.py +50 -0
  35. data/lib/libv8/scons/engine/SCons/Options/PackageOption.py +50 -0
  36. data/lib/libv8/scons/engine/SCons/Options/PathOption.py +76 -0
  37. data/lib/libv8/scons/engine/SCons/Options/__init__.py +67 -0
  38. data/lib/libv8/scons/engine/SCons/PathList.py +231 -0
  39. data/lib/libv8/scons/engine/SCons/Platform/__init__.py +241 -0
  40. data/lib/libv8/scons/engine/SCons/Platform/aix.py +69 -0
  41. data/lib/libv8/scons/engine/SCons/Platform/cygwin.py +55 -0
  42. data/lib/libv8/scons/engine/SCons/Platform/darwin.py +46 -0
  43. data/lib/libv8/scons/engine/SCons/Platform/hpux.py +46 -0
  44. data/lib/libv8/scons/engine/SCons/Platform/irix.py +44 -0
  45. data/lib/libv8/scons/engine/SCons/Platform/os2.py +58 -0
  46. data/lib/libv8/scons/engine/SCons/Platform/posix.py +263 -0
  47. data/lib/libv8/scons/engine/SCons/Platform/sunos.py +50 -0
  48. data/lib/libv8/scons/engine/SCons/Platform/win32.py +385 -0
  49. data/lib/libv8/scons/engine/SCons/SConf.py +1030 -0
  50. data/lib/libv8/scons/engine/SCons/SConsign.py +383 -0
  51. data/lib/libv8/scons/engine/SCons/Scanner/C.py +132 -0
  52. data/lib/libv8/scons/engine/SCons/Scanner/D.py +73 -0
  53. data/lib/libv8/scons/engine/SCons/Scanner/Dir.py +109 -0
  54. data/lib/libv8/scons/engine/SCons/Scanner/Fortran.py +316 -0
  55. data/lib/libv8/scons/engine/SCons/Scanner/IDL.py +48 -0
  56. data/lib/libv8/scons/engine/SCons/Scanner/LaTeX.py +384 -0
  57. data/lib/libv8/scons/engine/SCons/Scanner/Prog.py +101 -0
  58. data/lib/libv8/scons/engine/SCons/Scanner/RC.py +55 -0
  59. data/lib/libv8/scons/engine/SCons/Scanner/__init__.py +413 -0
  60. data/lib/libv8/scons/engine/SCons/Script/Interactive.py +384 -0
  61. data/lib/libv8/scons/engine/SCons/Script/Main.py +1334 -0
  62. data/lib/libv8/scons/engine/SCons/Script/SConsOptions.py +939 -0
  63. data/lib/libv8/scons/engine/SCons/Script/SConscript.py +640 -0
  64. data/lib/libv8/scons/engine/SCons/Script/__init__.py +412 -0
  65. data/lib/libv8/scons/engine/SCons/Sig.py +63 -0
  66. data/lib/libv8/scons/engine/SCons/Subst.py +904 -0
  67. data/lib/libv8/scons/engine/SCons/Taskmaster.py +1017 -0
  68. data/lib/libv8/scons/engine/SCons/Tool/386asm.py +61 -0
  69. data/lib/libv8/scons/engine/SCons/Tool/BitKeeper.py +67 -0
  70. data/lib/libv8/scons/engine/SCons/Tool/CVS.py +73 -0
  71. data/lib/libv8/scons/engine/SCons/Tool/FortranCommon.py +246 -0
  72. data/lib/libv8/scons/engine/SCons/Tool/JavaCommon.py +323 -0
  73. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/__init__.py +56 -0
  74. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/arch.py +61 -0
  75. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/common.py +240 -0
  76. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/netframework.py +82 -0
  77. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/sdk.py +391 -0
  78. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/vc.py +456 -0
  79. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/vs.py +499 -0
  80. data/lib/libv8/scons/engine/SCons/Tool/Perforce.py +103 -0
  81. data/lib/libv8/scons/engine/SCons/Tool/PharLapCommon.py +137 -0
  82. data/lib/libv8/scons/engine/SCons/Tool/RCS.py +64 -0
  83. data/lib/libv8/scons/engine/SCons/Tool/SCCS.py +64 -0
  84. data/lib/libv8/scons/engine/SCons/Tool/Subversion.py +71 -0
  85. data/lib/libv8/scons/engine/SCons/Tool/__init__.py +681 -0
  86. data/lib/libv8/scons/engine/SCons/Tool/aixc++.py +82 -0
  87. data/lib/libv8/scons/engine/SCons/Tool/aixcc.py +74 -0
  88. data/lib/libv8/scons/engine/SCons/Tool/aixf77.py +80 -0
  89. data/lib/libv8/scons/engine/SCons/Tool/aixlink.py +76 -0
  90. data/lib/libv8/scons/engine/SCons/Tool/applelink.py +71 -0
  91. data/lib/libv8/scons/engine/SCons/Tool/ar.py +63 -0
  92. data/lib/libv8/scons/engine/SCons/Tool/as.py +78 -0
  93. data/lib/libv8/scons/engine/SCons/Tool/bcc32.py +81 -0
  94. data/lib/libv8/scons/engine/SCons/Tool/c++.py +99 -0
  95. data/lib/libv8/scons/engine/SCons/Tool/cc.py +102 -0
  96. data/lib/libv8/scons/engine/SCons/Tool/cvf.py +58 -0
  97. data/lib/libv8/scons/engine/SCons/Tool/default.py +50 -0
  98. data/lib/libv8/scons/engine/SCons/Tool/dmd.py +223 -0
  99. data/lib/libv8/scons/engine/SCons/Tool/dvi.py +64 -0
  100. data/lib/libv8/scons/engine/SCons/Tool/dvipdf.py +124 -0
  101. data/lib/libv8/scons/engine/SCons/Tool/dvips.py +94 -0
  102. data/lib/libv8/scons/engine/SCons/Tool/f77.py +62 -0
  103. data/lib/libv8/scons/engine/SCons/Tool/f90.py +62 -0
  104. data/lib/libv8/scons/engine/SCons/Tool/f95.py +63 -0
  105. data/lib/libv8/scons/engine/SCons/Tool/filesystem.py +98 -0
  106. data/lib/libv8/scons/engine/SCons/Tool/fortran.py +62 -0
  107. data/lib/libv8/scons/engine/SCons/Tool/g++.py +90 -0
  108. data/lib/libv8/scons/engine/SCons/Tool/g77.py +73 -0
  109. data/lib/libv8/scons/engine/SCons/Tool/gas.py +53 -0
  110. data/lib/libv8/scons/engine/SCons/Tool/gcc.py +80 -0
  111. data/lib/libv8/scons/engine/SCons/Tool/gfortran.py +64 -0
  112. data/lib/libv8/scons/engine/SCons/Tool/gnulink.py +63 -0
  113. data/lib/libv8/scons/engine/SCons/Tool/gs.py +81 -0
  114. data/lib/libv8/scons/engine/SCons/Tool/hpc++.py +84 -0
  115. data/lib/libv8/scons/engine/SCons/Tool/hpcc.py +53 -0
  116. data/lib/libv8/scons/engine/SCons/Tool/hplink.py +77 -0
  117. data/lib/libv8/scons/engine/SCons/Tool/icc.py +59 -0
  118. data/lib/libv8/scons/engine/SCons/Tool/icl.py +52 -0
  119. data/lib/libv8/scons/engine/SCons/Tool/ifl.py +72 -0
  120. data/lib/libv8/scons/engine/SCons/Tool/ifort.py +88 -0
  121. data/lib/libv8/scons/engine/SCons/Tool/ilink.py +59 -0
  122. data/lib/libv8/scons/engine/SCons/Tool/ilink32.py +60 -0
  123. data/lib/libv8/scons/engine/SCons/Tool/install.py +229 -0
  124. data/lib/libv8/scons/engine/SCons/Tool/intelc.py +482 -0
  125. data/lib/libv8/scons/engine/SCons/Tool/ipkg.py +67 -0
  126. data/lib/libv8/scons/engine/SCons/Tool/jar.py +110 -0
  127. data/lib/libv8/scons/engine/SCons/Tool/javac.py +230 -0
  128. data/lib/libv8/scons/engine/SCons/Tool/javah.py +137 -0
  129. data/lib/libv8/scons/engine/SCons/Tool/latex.py +79 -0
  130. data/lib/libv8/scons/engine/SCons/Tool/lex.py +97 -0
  131. data/lib/libv8/scons/engine/SCons/Tool/link.py +121 -0
  132. data/lib/libv8/scons/engine/SCons/Tool/linkloc.py +112 -0
  133. data/lib/libv8/scons/engine/SCons/Tool/m4.py +63 -0
  134. data/lib/libv8/scons/engine/SCons/Tool/masm.py +77 -0
  135. data/lib/libv8/scons/engine/SCons/Tool/midl.py +88 -0
  136. data/lib/libv8/scons/engine/SCons/Tool/mingw.py +158 -0
  137. data/lib/libv8/scons/engine/SCons/Tool/mslib.py +64 -0
  138. data/lib/libv8/scons/engine/SCons/Tool/mslink.py +266 -0
  139. data/lib/libv8/scons/engine/SCons/Tool/mssdk.py +50 -0
  140. data/lib/libv8/scons/engine/SCons/Tool/msvc.py +268 -0
  141. data/lib/libv8/scons/engine/SCons/Tool/msvs.py +1388 -0
  142. data/lib/libv8/scons/engine/SCons/Tool/mwcc.py +207 -0
  143. data/lib/libv8/scons/engine/SCons/Tool/mwld.py +107 -0
  144. data/lib/libv8/scons/engine/SCons/Tool/nasm.py +72 -0
  145. data/lib/libv8/scons/engine/SCons/Tool/packaging/__init__.py +312 -0
  146. data/lib/libv8/scons/engine/SCons/Tool/packaging/ipk.py +185 -0
  147. data/lib/libv8/scons/engine/SCons/Tool/packaging/msi.py +527 -0
  148. data/lib/libv8/scons/engine/SCons/Tool/packaging/rpm.py +365 -0
  149. data/lib/libv8/scons/engine/SCons/Tool/packaging/src_tarbz2.py +43 -0
  150. data/lib/libv8/scons/engine/SCons/Tool/packaging/src_targz.py +43 -0
  151. data/lib/libv8/scons/engine/SCons/Tool/packaging/src_zip.py +43 -0
  152. data/lib/libv8/scons/engine/SCons/Tool/packaging/tarbz2.py +44 -0
  153. data/lib/libv8/scons/engine/SCons/Tool/packaging/targz.py +44 -0
  154. data/lib/libv8/scons/engine/SCons/Tool/packaging/zip.py +44 -0
  155. data/lib/libv8/scons/engine/SCons/Tool/pdf.py +78 -0
  156. data/lib/libv8/scons/engine/SCons/Tool/pdflatex.py +83 -0
  157. data/lib/libv8/scons/engine/SCons/Tool/pdftex.py +108 -0
  158. data/lib/libv8/scons/engine/SCons/Tool/qt.py +336 -0
  159. data/lib/libv8/scons/engine/SCons/Tool/rmic.py +120 -0
  160. data/lib/libv8/scons/engine/SCons/Tool/rpcgen.py +70 -0
  161. data/lib/libv8/scons/engine/SCons/Tool/rpm.py +132 -0
  162. data/lib/libv8/scons/engine/SCons/Tool/sgiar.py +68 -0
  163. data/lib/libv8/scons/engine/SCons/Tool/sgic++.py +58 -0
  164. data/lib/libv8/scons/engine/SCons/Tool/sgicc.py +53 -0
  165. data/lib/libv8/scons/engine/SCons/Tool/sgilink.py +63 -0
  166. data/lib/libv8/scons/engine/SCons/Tool/sunar.py +67 -0
  167. data/lib/libv8/scons/engine/SCons/Tool/sunc++.py +142 -0
  168. data/lib/libv8/scons/engine/SCons/Tool/suncc.py +58 -0
  169. data/lib/libv8/scons/engine/SCons/Tool/sunf77.py +63 -0
  170. data/lib/libv8/scons/engine/SCons/Tool/sunf90.py +64 -0
  171. data/lib/libv8/scons/engine/SCons/Tool/sunf95.py +64 -0
  172. data/lib/libv8/scons/engine/SCons/Tool/sunlink.py +77 -0
  173. data/lib/libv8/scons/engine/SCons/Tool/swig.py +182 -0
  174. data/lib/libv8/scons/engine/SCons/Tool/tar.py +73 -0
  175. data/lib/libv8/scons/engine/SCons/Tool/tex.py +813 -0
  176. data/lib/libv8/scons/engine/SCons/Tool/textfile.py +175 -0
  177. data/lib/libv8/scons/engine/SCons/Tool/tlib.py +53 -0
  178. data/lib/libv8/scons/engine/SCons/Tool/wix.py +99 -0
  179. data/lib/libv8/scons/engine/SCons/Tool/yacc.py +130 -0
  180. data/lib/libv8/scons/engine/SCons/Tool/zip.py +99 -0
  181. data/lib/libv8/scons/engine/SCons/Util.py +1492 -0
  182. data/lib/libv8/scons/engine/SCons/Variables/BoolVariable.py +89 -0
  183. data/lib/libv8/scons/engine/SCons/Variables/EnumVariable.py +103 -0
  184. data/lib/libv8/scons/engine/SCons/Variables/ListVariable.py +135 -0
  185. data/lib/libv8/scons/engine/SCons/Variables/PackageVariable.py +106 -0
  186. data/lib/libv8/scons/engine/SCons/Variables/PathVariable.py +147 -0
  187. data/lib/libv8/scons/engine/SCons/Variables/__init__.py +312 -0
  188. data/lib/libv8/scons/engine/SCons/Warnings.py +246 -0
  189. data/lib/libv8/scons/engine/SCons/__init__.py +49 -0
  190. data/lib/libv8/scons/engine/SCons/compat/__init__.py +237 -0
  191. data/lib/libv8/scons/engine/SCons/compat/_scons_builtins.py +150 -0
  192. data/lib/libv8/scons/engine/SCons/compat/_scons_collections.py +45 -0
  193. data/lib/libv8/scons/engine/SCons/compat/_scons_dbm.py +45 -0
  194. data/lib/libv8/scons/engine/SCons/compat/_scons_hashlib.py +76 -0
  195. data/lib/libv8/scons/engine/SCons/compat/_scons_io.py +45 -0
  196. data/lib/libv8/scons/engine/SCons/compat/_scons_sets.py +563 -0
  197. data/lib/libv8/scons/engine/SCons/compat/_scons_subprocess.py +1281 -0
  198. data/lib/libv8/scons/engine/SCons/cpp.py +589 -0
  199. data/lib/libv8/scons/engine/SCons/dblite.py +251 -0
  200. data/lib/libv8/scons/engine/SCons/exitfuncs.py +77 -0
  201. data/lib/libv8/scons/os_spawnv_fix.diff +83 -0
  202. data/lib/libv8/scons/scons-time.1 +1017 -0
  203. data/lib/libv8/scons/scons.1 +15219 -0
  204. data/lib/libv8/scons/sconsign.1 +208 -0
  205. data/lib/libv8/scons/script/scons +196 -0
  206. data/lib/libv8/scons/script/scons-time +1544 -0
  207. data/lib/libv8/scons/script/scons.bat +31 -0
  208. data/lib/libv8/scons/script/sconsign +513 -0
  209. data/lib/libv8/scons/setup.cfg +6 -0
  210. data/lib/libv8/scons/setup.py +425 -0
  211. data/lib/libv8/v8/.gitignore +35 -0
  212. data/lib/libv8/v8/AUTHORS +44 -0
  213. data/lib/libv8/v8/ChangeLog +2839 -0
  214. data/lib/libv8/v8/LICENSE +52 -0
  215. data/lib/libv8/v8/LICENSE.strongtalk +29 -0
  216. data/lib/libv8/v8/LICENSE.v8 +26 -0
  217. data/lib/libv8/v8/LICENSE.valgrind +45 -0
  218. data/lib/libv8/v8/SConstruct +1478 -0
  219. data/lib/libv8/v8/build/README.txt +49 -0
  220. data/lib/libv8/v8/build/all.gyp +18 -0
  221. data/lib/libv8/v8/build/armu.gypi +32 -0
  222. data/lib/libv8/v8/build/common.gypi +144 -0
  223. data/lib/libv8/v8/build/gyp_v8 +145 -0
  224. data/lib/libv8/v8/include/v8-debug.h +395 -0
  225. data/lib/libv8/v8/include/v8-preparser.h +117 -0
  226. data/lib/libv8/v8/include/v8-profiler.h +505 -0
  227. data/lib/libv8/v8/include/v8-testing.h +104 -0
  228. data/lib/libv8/v8/include/v8.h +4124 -0
  229. data/lib/libv8/v8/include/v8stdint.h +53 -0
  230. data/lib/libv8/v8/preparser/SConscript +38 -0
  231. data/lib/libv8/v8/preparser/preparser-process.cc +379 -0
  232. data/lib/libv8/v8/src/SConscript +368 -0
  233. data/lib/libv8/v8/src/accessors.cc +767 -0
  234. data/lib/libv8/v8/src/accessors.h +123 -0
  235. data/lib/libv8/v8/src/allocation-inl.h +49 -0
  236. data/lib/libv8/v8/src/allocation.cc +122 -0
  237. data/lib/libv8/v8/src/allocation.h +143 -0
  238. data/lib/libv8/v8/src/api.cc +5845 -0
  239. data/lib/libv8/v8/src/api.h +574 -0
  240. data/lib/libv8/v8/src/apinatives.js +110 -0
  241. data/lib/libv8/v8/src/apiutils.h +73 -0
  242. data/lib/libv8/v8/src/arguments.h +118 -0
  243. data/lib/libv8/v8/src/arm/assembler-arm-inl.h +353 -0
  244. data/lib/libv8/v8/src/arm/assembler-arm.cc +2661 -0
  245. data/lib/libv8/v8/src/arm/assembler-arm.h +1375 -0
  246. data/lib/libv8/v8/src/arm/builtins-arm.cc +1658 -0
  247. data/lib/libv8/v8/src/arm/code-stubs-arm.cc +6398 -0
  248. data/lib/libv8/v8/src/arm/code-stubs-arm.h +673 -0
  249. data/lib/libv8/v8/src/arm/codegen-arm.cc +52 -0
  250. data/lib/libv8/v8/src/arm/codegen-arm.h +91 -0
  251. data/lib/libv8/v8/src/arm/constants-arm.cc +152 -0
  252. data/lib/libv8/v8/src/arm/constants-arm.h +775 -0
  253. data/lib/libv8/v8/src/arm/cpu-arm.cc +120 -0
  254. data/lib/libv8/v8/src/arm/debug-arm.cc +317 -0
  255. data/lib/libv8/v8/src/arm/deoptimizer-arm.cc +754 -0
  256. data/lib/libv8/v8/src/arm/disasm-arm.cc +1506 -0
  257. data/lib/libv8/v8/src/arm/frames-arm.cc +45 -0
  258. data/lib/libv8/v8/src/arm/frames-arm.h +168 -0
  259. data/lib/libv8/v8/src/arm/full-codegen-arm.cc +4375 -0
  260. data/lib/libv8/v8/src/arm/ic-arm.cc +1562 -0
  261. data/lib/libv8/v8/src/arm/lithium-arm.cc +2206 -0
  262. data/lib/libv8/v8/src/arm/lithium-arm.h +2348 -0
  263. data/lib/libv8/v8/src/arm/lithium-codegen-arm.cc +4526 -0
  264. data/lib/libv8/v8/src/arm/lithium-codegen-arm.h +403 -0
  265. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.cc +305 -0
  266. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.h +84 -0
  267. data/lib/libv8/v8/src/arm/macro-assembler-arm.cc +3163 -0
  268. data/lib/libv8/v8/src/arm/macro-assembler-arm.h +1126 -0
  269. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.cc +1287 -0
  270. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.h +253 -0
  271. data/lib/libv8/v8/src/arm/simulator-arm.cc +3424 -0
  272. data/lib/libv8/v8/src/arm/simulator-arm.h +431 -0
  273. data/lib/libv8/v8/src/arm/stub-cache-arm.cc +4243 -0
  274. data/lib/libv8/v8/src/array.js +1366 -0
  275. data/lib/libv8/v8/src/assembler.cc +1207 -0
  276. data/lib/libv8/v8/src/assembler.h +858 -0
  277. data/lib/libv8/v8/src/ast-inl.h +112 -0
  278. data/lib/libv8/v8/src/ast.cc +1146 -0
  279. data/lib/libv8/v8/src/ast.h +2188 -0
  280. data/lib/libv8/v8/src/atomicops.h +167 -0
  281. data/lib/libv8/v8/src/atomicops_internals_arm_gcc.h +145 -0
  282. data/lib/libv8/v8/src/atomicops_internals_mips_gcc.h +169 -0
  283. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.cc +133 -0
  284. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.h +287 -0
  285. data/lib/libv8/v8/src/atomicops_internals_x86_macosx.h +301 -0
  286. data/lib/libv8/v8/src/atomicops_internals_x86_msvc.h +203 -0
  287. data/lib/libv8/v8/src/bignum-dtoa.cc +655 -0
  288. data/lib/libv8/v8/src/bignum-dtoa.h +81 -0
  289. data/lib/libv8/v8/src/bignum.cc +768 -0
  290. data/lib/libv8/v8/src/bignum.h +140 -0
  291. data/lib/libv8/v8/src/bootstrapper.cc +2184 -0
  292. data/lib/libv8/v8/src/bootstrapper.h +188 -0
  293. data/lib/libv8/v8/src/builtins.cc +1707 -0
  294. data/lib/libv8/v8/src/builtins.h +371 -0
  295. data/lib/libv8/v8/src/bytecodes-irregexp.h +105 -0
  296. data/lib/libv8/v8/src/cached-powers.cc +177 -0
  297. data/lib/libv8/v8/src/cached-powers.h +65 -0
  298. data/lib/libv8/v8/src/char-predicates-inl.h +94 -0
  299. data/lib/libv8/v8/src/char-predicates.h +67 -0
  300. data/lib/libv8/v8/src/checks.cc +110 -0
  301. data/lib/libv8/v8/src/checks.h +296 -0
  302. data/lib/libv8/v8/src/circular-queue-inl.h +53 -0
  303. data/lib/libv8/v8/src/circular-queue.cc +122 -0
  304. data/lib/libv8/v8/src/circular-queue.h +103 -0
  305. data/lib/libv8/v8/src/code-stubs.cc +267 -0
  306. data/lib/libv8/v8/src/code-stubs.h +1011 -0
  307. data/lib/libv8/v8/src/code.h +70 -0
  308. data/lib/libv8/v8/src/codegen.cc +231 -0
  309. data/lib/libv8/v8/src/codegen.h +84 -0
  310. data/lib/libv8/v8/src/compilation-cache.cc +540 -0
  311. data/lib/libv8/v8/src/compilation-cache.h +287 -0
  312. data/lib/libv8/v8/src/compiler.cc +786 -0
  313. data/lib/libv8/v8/src/compiler.h +312 -0
  314. data/lib/libv8/v8/src/contexts.cc +347 -0
  315. data/lib/libv8/v8/src/contexts.h +391 -0
  316. data/lib/libv8/v8/src/conversions-inl.h +106 -0
  317. data/lib/libv8/v8/src/conversions.cc +1131 -0
  318. data/lib/libv8/v8/src/conversions.h +135 -0
  319. data/lib/libv8/v8/src/counters.cc +93 -0
  320. data/lib/libv8/v8/src/counters.h +254 -0
  321. data/lib/libv8/v8/src/cpu-profiler-inl.h +101 -0
  322. data/lib/libv8/v8/src/cpu-profiler.cc +609 -0
  323. data/lib/libv8/v8/src/cpu-profiler.h +302 -0
  324. data/lib/libv8/v8/src/cpu.h +69 -0
  325. data/lib/libv8/v8/src/d8-debug.cc +367 -0
  326. data/lib/libv8/v8/src/d8-debug.h +158 -0
  327. data/lib/libv8/v8/src/d8-posix.cc +695 -0
  328. data/lib/libv8/v8/src/d8-readline.cc +130 -0
  329. data/lib/libv8/v8/src/d8-windows.cc +42 -0
  330. data/lib/libv8/v8/src/d8.cc +803 -0
  331. data/lib/libv8/v8/src/d8.gyp +91 -0
  332. data/lib/libv8/v8/src/d8.h +235 -0
  333. data/lib/libv8/v8/src/d8.js +2798 -0
  334. data/lib/libv8/v8/src/data-flow.cc +66 -0
  335. data/lib/libv8/v8/src/data-flow.h +205 -0
  336. data/lib/libv8/v8/src/date.js +1103 -0
  337. data/lib/libv8/v8/src/dateparser-inl.h +127 -0
  338. data/lib/libv8/v8/src/dateparser.cc +178 -0
  339. data/lib/libv8/v8/src/dateparser.h +266 -0
  340. data/lib/libv8/v8/src/debug-agent.cc +447 -0
  341. data/lib/libv8/v8/src/debug-agent.h +129 -0
  342. data/lib/libv8/v8/src/debug-debugger.js +2569 -0
  343. data/lib/libv8/v8/src/debug.cc +3165 -0
  344. data/lib/libv8/v8/src/debug.h +1057 -0
  345. data/lib/libv8/v8/src/deoptimizer.cc +1256 -0
  346. data/lib/libv8/v8/src/deoptimizer.h +602 -0
  347. data/lib/libv8/v8/src/disasm.h +80 -0
  348. data/lib/libv8/v8/src/disassembler.cc +343 -0
  349. data/lib/libv8/v8/src/disassembler.h +58 -0
  350. data/lib/libv8/v8/src/diy-fp.cc +58 -0
  351. data/lib/libv8/v8/src/diy-fp.h +117 -0
  352. data/lib/libv8/v8/src/double.h +238 -0
  353. data/lib/libv8/v8/src/dtoa.cc +103 -0
  354. data/lib/libv8/v8/src/dtoa.h +85 -0
  355. data/lib/libv8/v8/src/execution.cc +849 -0
  356. data/lib/libv8/v8/src/execution.h +297 -0
  357. data/lib/libv8/v8/src/extensions/experimental/break-iterator.cc +250 -0
  358. data/lib/libv8/v8/src/extensions/experimental/break-iterator.h +89 -0
  359. data/lib/libv8/v8/src/extensions/experimental/collator.cc +218 -0
  360. data/lib/libv8/v8/src/extensions/experimental/collator.h +69 -0
  361. data/lib/libv8/v8/src/extensions/experimental/experimental.gyp +94 -0
  362. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.cc +78 -0
  363. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.h +54 -0
  364. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.cc +112 -0
  365. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.h +60 -0
  366. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.cc +43 -0
  367. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.h +49 -0
  368. data/lib/libv8/v8/src/extensions/experimental/i18n.js +180 -0
  369. data/lib/libv8/v8/src/extensions/experimental/language-matcher.cc +251 -0
  370. data/lib/libv8/v8/src/extensions/experimental/language-matcher.h +95 -0
  371. data/lib/libv8/v8/src/extensions/externalize-string-extension.cc +141 -0
  372. data/lib/libv8/v8/src/extensions/externalize-string-extension.h +50 -0
  373. data/lib/libv8/v8/src/extensions/gc-extension.cc +58 -0
  374. data/lib/libv8/v8/src/extensions/gc-extension.h +49 -0
  375. data/lib/libv8/v8/src/factory.cc +1222 -0
  376. data/lib/libv8/v8/src/factory.h +442 -0
  377. data/lib/libv8/v8/src/fast-dtoa.cc +736 -0
  378. data/lib/libv8/v8/src/fast-dtoa.h +83 -0
  379. data/lib/libv8/v8/src/fixed-dtoa.cc +405 -0
  380. data/lib/libv8/v8/src/fixed-dtoa.h +55 -0
  381. data/lib/libv8/v8/src/flag-definitions.h +560 -0
  382. data/lib/libv8/v8/src/flags.cc +551 -0
  383. data/lib/libv8/v8/src/flags.h +79 -0
  384. data/lib/libv8/v8/src/frames-inl.h +247 -0
  385. data/lib/libv8/v8/src/frames.cc +1243 -0
  386. data/lib/libv8/v8/src/frames.h +870 -0
  387. data/lib/libv8/v8/src/full-codegen.cc +1374 -0
  388. data/lib/libv8/v8/src/full-codegen.h +771 -0
  389. data/lib/libv8/v8/src/func-name-inferrer.cc +92 -0
  390. data/lib/libv8/v8/src/func-name-inferrer.h +111 -0
  391. data/lib/libv8/v8/src/gdb-jit.cc +1555 -0
  392. data/lib/libv8/v8/src/gdb-jit.h +143 -0
  393. data/lib/libv8/v8/src/global-handles.cc +665 -0
  394. data/lib/libv8/v8/src/global-handles.h +284 -0
  395. data/lib/libv8/v8/src/globals.h +325 -0
  396. data/lib/libv8/v8/src/handles-inl.h +177 -0
  397. data/lib/libv8/v8/src/handles.cc +987 -0
  398. data/lib/libv8/v8/src/handles.h +382 -0
  399. data/lib/libv8/v8/src/hashmap.cc +230 -0
  400. data/lib/libv8/v8/src/hashmap.h +123 -0
  401. data/lib/libv8/v8/src/heap-inl.h +704 -0
  402. data/lib/libv8/v8/src/heap-profiler.cc +1173 -0
  403. data/lib/libv8/v8/src/heap-profiler.h +397 -0
  404. data/lib/libv8/v8/src/heap.cc +5930 -0
  405. data/lib/libv8/v8/src/heap.h +2268 -0
  406. data/lib/libv8/v8/src/hydrogen-instructions.cc +1769 -0
  407. data/lib/libv8/v8/src/hydrogen-instructions.h +3971 -0
  408. data/lib/libv8/v8/src/hydrogen.cc +6239 -0
  409. data/lib/libv8/v8/src/hydrogen.h +1202 -0
  410. data/lib/libv8/v8/src/ia32/assembler-ia32-inl.h +446 -0
  411. data/lib/libv8/v8/src/ia32/assembler-ia32.cc +2487 -0
  412. data/lib/libv8/v8/src/ia32/assembler-ia32.h +1144 -0
  413. data/lib/libv8/v8/src/ia32/builtins-ia32.cc +1621 -0
  414. data/lib/libv8/v8/src/ia32/code-stubs-ia32.cc +6198 -0
  415. data/lib/libv8/v8/src/ia32/code-stubs-ia32.h +517 -0
  416. data/lib/libv8/v8/src/ia32/codegen-ia32.cc +265 -0
  417. data/lib/libv8/v8/src/ia32/codegen-ia32.h +79 -0
  418. data/lib/libv8/v8/src/ia32/cpu-ia32.cc +88 -0
  419. data/lib/libv8/v8/src/ia32/debug-ia32.cc +312 -0
  420. data/lib/libv8/v8/src/ia32/deoptimizer-ia32.cc +774 -0
  421. data/lib/libv8/v8/src/ia32/disasm-ia32.cc +1628 -0
  422. data/lib/libv8/v8/src/ia32/frames-ia32.cc +45 -0
  423. data/lib/libv8/v8/src/ia32/frames-ia32.h +142 -0
  424. data/lib/libv8/v8/src/ia32/full-codegen-ia32.cc +4338 -0
  425. data/lib/libv8/v8/src/ia32/ic-ia32.cc +1597 -0
  426. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.cc +4461 -0
  427. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.h +375 -0
  428. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.cc +475 -0
  429. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.h +110 -0
  430. data/lib/libv8/v8/src/ia32/lithium-ia32.cc +2261 -0
  431. data/lib/libv8/v8/src/ia32/lithium-ia32.h +2396 -0
  432. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.cc +2136 -0
  433. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.h +775 -0
  434. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.cc +1263 -0
  435. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.h +216 -0
  436. data/lib/libv8/v8/src/ia32/simulator-ia32.cc +30 -0
  437. data/lib/libv8/v8/src/ia32/simulator-ia32.h +74 -0
  438. data/lib/libv8/v8/src/ia32/stub-cache-ia32.cc +3847 -0
  439. data/lib/libv8/v8/src/ic-inl.h +130 -0
  440. data/lib/libv8/v8/src/ic.cc +2577 -0
  441. data/lib/libv8/v8/src/ic.h +736 -0
  442. data/lib/libv8/v8/src/inspector.cc +63 -0
  443. data/lib/libv8/v8/src/inspector.h +62 -0
  444. data/lib/libv8/v8/src/interpreter-irregexp.cc +659 -0
  445. data/lib/libv8/v8/src/interpreter-irregexp.h +49 -0
  446. data/lib/libv8/v8/src/isolate-inl.h +50 -0
  447. data/lib/libv8/v8/src/isolate.cc +1869 -0
  448. data/lib/libv8/v8/src/isolate.h +1382 -0
  449. data/lib/libv8/v8/src/json-parser.cc +504 -0
  450. data/lib/libv8/v8/src/json-parser.h +161 -0
  451. data/lib/libv8/v8/src/json.js +342 -0
  452. data/lib/libv8/v8/src/jsregexp.cc +5385 -0
  453. data/lib/libv8/v8/src/jsregexp.h +1492 -0
  454. data/lib/libv8/v8/src/list-inl.h +212 -0
  455. data/lib/libv8/v8/src/list.h +174 -0
  456. data/lib/libv8/v8/src/lithium-allocator-inl.h +142 -0
  457. data/lib/libv8/v8/src/lithium-allocator.cc +2123 -0
  458. data/lib/libv8/v8/src/lithium-allocator.h +630 -0
  459. data/lib/libv8/v8/src/lithium.cc +190 -0
  460. data/lib/libv8/v8/src/lithium.h +597 -0
  461. data/lib/libv8/v8/src/liveedit-debugger.js +1082 -0
  462. data/lib/libv8/v8/src/liveedit.cc +1691 -0
  463. data/lib/libv8/v8/src/liveedit.h +180 -0
  464. data/lib/libv8/v8/src/liveobjectlist-inl.h +126 -0
  465. data/lib/libv8/v8/src/liveobjectlist.cc +2589 -0
  466. data/lib/libv8/v8/src/liveobjectlist.h +322 -0
  467. data/lib/libv8/v8/src/log-inl.h +59 -0
  468. data/lib/libv8/v8/src/log-utils.cc +428 -0
  469. data/lib/libv8/v8/src/log-utils.h +231 -0
  470. data/lib/libv8/v8/src/log.cc +1993 -0
  471. data/lib/libv8/v8/src/log.h +476 -0
  472. data/lib/libv8/v8/src/macro-assembler.h +120 -0
  473. data/lib/libv8/v8/src/macros.py +178 -0
  474. data/lib/libv8/v8/src/mark-compact.cc +3143 -0
  475. data/lib/libv8/v8/src/mark-compact.h +506 -0
  476. data/lib/libv8/v8/src/math.js +264 -0
  477. data/lib/libv8/v8/src/messages.cc +179 -0
  478. data/lib/libv8/v8/src/messages.h +113 -0
  479. data/lib/libv8/v8/src/messages.js +1096 -0
  480. data/lib/libv8/v8/src/mips/assembler-mips-inl.h +312 -0
  481. data/lib/libv8/v8/src/mips/assembler-mips.cc +1960 -0
  482. data/lib/libv8/v8/src/mips/assembler-mips.h +1138 -0
  483. data/lib/libv8/v8/src/mips/builtins-mips.cc +1628 -0
  484. data/lib/libv8/v8/src/mips/code-stubs-mips.cc +6656 -0
  485. data/lib/libv8/v8/src/mips/code-stubs-mips.h +682 -0
  486. data/lib/libv8/v8/src/mips/codegen-mips.cc +52 -0
  487. data/lib/libv8/v8/src/mips/codegen-mips.h +98 -0
  488. data/lib/libv8/v8/src/mips/constants-mips.cc +352 -0
  489. data/lib/libv8/v8/src/mips/constants-mips.h +739 -0
  490. data/lib/libv8/v8/src/mips/cpu-mips.cc +96 -0
  491. data/lib/libv8/v8/src/mips/debug-mips.cc +308 -0
  492. data/lib/libv8/v8/src/mips/deoptimizer-mips.cc +91 -0
  493. data/lib/libv8/v8/src/mips/disasm-mips.cc +1050 -0
  494. data/lib/libv8/v8/src/mips/frames-mips.cc +47 -0
  495. data/lib/libv8/v8/src/mips/frames-mips.h +219 -0
  496. data/lib/libv8/v8/src/mips/full-codegen-mips.cc +4388 -0
  497. data/lib/libv8/v8/src/mips/ic-mips.cc +1580 -0
  498. data/lib/libv8/v8/src/mips/lithium-codegen-mips.h +65 -0
  499. data/lib/libv8/v8/src/mips/lithium-mips.h +307 -0
  500. data/lib/libv8/v8/src/mips/macro-assembler-mips.cc +4056 -0
  501. data/lib/libv8/v8/src/mips/macro-assembler-mips.h +1214 -0
  502. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.cc +1251 -0
  503. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.h +252 -0
  504. data/lib/libv8/v8/src/mips/simulator-mips.cc +2621 -0
  505. data/lib/libv8/v8/src/mips/simulator-mips.h +401 -0
  506. data/lib/libv8/v8/src/mips/stub-cache-mips.cc +4285 -0
  507. data/lib/libv8/v8/src/mirror-debugger.js +2382 -0
  508. data/lib/libv8/v8/src/mksnapshot.cc +328 -0
  509. data/lib/libv8/v8/src/natives.h +64 -0
  510. data/lib/libv8/v8/src/objects-debug.cc +738 -0
  511. data/lib/libv8/v8/src/objects-inl.h +4323 -0
  512. data/lib/libv8/v8/src/objects-printer.cc +829 -0
  513. data/lib/libv8/v8/src/objects-visiting.cc +148 -0
  514. data/lib/libv8/v8/src/objects-visiting.h +424 -0
  515. data/lib/libv8/v8/src/objects.cc +10585 -0
  516. data/lib/libv8/v8/src/objects.h +6838 -0
  517. data/lib/libv8/v8/src/parser.cc +4997 -0
  518. data/lib/libv8/v8/src/parser.h +765 -0
  519. data/lib/libv8/v8/src/platform-cygwin.cc +779 -0
  520. data/lib/libv8/v8/src/platform-freebsd.cc +826 -0
  521. data/lib/libv8/v8/src/platform-linux.cc +1149 -0
  522. data/lib/libv8/v8/src/platform-macos.cc +830 -0
  523. data/lib/libv8/v8/src/platform-nullos.cc +479 -0
  524. data/lib/libv8/v8/src/platform-openbsd.cc +640 -0
  525. data/lib/libv8/v8/src/platform-posix.cc +424 -0
  526. data/lib/libv8/v8/src/platform-solaris.cc +762 -0
  527. data/lib/libv8/v8/src/platform-tls-mac.h +62 -0
  528. data/lib/libv8/v8/src/platform-tls-win32.h +62 -0
  529. data/lib/libv8/v8/src/platform-tls.h +50 -0
  530. data/lib/libv8/v8/src/platform-win32.cc +2021 -0
  531. data/lib/libv8/v8/src/platform.h +667 -0
  532. data/lib/libv8/v8/src/preparse-data-format.h +62 -0
  533. data/lib/libv8/v8/src/preparse-data.cc +183 -0
  534. data/lib/libv8/v8/src/preparse-data.h +225 -0
  535. data/lib/libv8/v8/src/preparser-api.cc +220 -0
  536. data/lib/libv8/v8/src/preparser.cc +1450 -0
  537. data/lib/libv8/v8/src/preparser.h +493 -0
  538. data/lib/libv8/v8/src/prettyprinter.cc +1493 -0
  539. data/lib/libv8/v8/src/prettyprinter.h +223 -0
  540. data/lib/libv8/v8/src/profile-generator-inl.h +128 -0
  541. data/lib/libv8/v8/src/profile-generator.cc +3098 -0
  542. data/lib/libv8/v8/src/profile-generator.h +1126 -0
  543. data/lib/libv8/v8/src/property.cc +105 -0
  544. data/lib/libv8/v8/src/property.h +365 -0
  545. data/lib/libv8/v8/src/proxy.js +83 -0
  546. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp-inl.h +78 -0
  547. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.cc +471 -0
  548. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.h +142 -0
  549. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.cc +373 -0
  550. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.h +104 -0
  551. data/lib/libv8/v8/src/regexp-macro-assembler.cc +267 -0
  552. data/lib/libv8/v8/src/regexp-macro-assembler.h +243 -0
  553. data/lib/libv8/v8/src/regexp-stack.cc +111 -0
  554. data/lib/libv8/v8/src/regexp-stack.h +147 -0
  555. data/lib/libv8/v8/src/regexp.js +483 -0
  556. data/lib/libv8/v8/src/rewriter.cc +360 -0
  557. data/lib/libv8/v8/src/rewriter.h +50 -0
  558. data/lib/libv8/v8/src/runtime-profiler.cc +489 -0
  559. data/lib/libv8/v8/src/runtime-profiler.h +201 -0
  560. data/lib/libv8/v8/src/runtime.cc +12227 -0
  561. data/lib/libv8/v8/src/runtime.h +652 -0
  562. data/lib/libv8/v8/src/runtime.js +649 -0
  563. data/lib/libv8/v8/src/safepoint-table.cc +256 -0
  564. data/lib/libv8/v8/src/safepoint-table.h +270 -0
  565. data/lib/libv8/v8/src/scanner-base.cc +952 -0
  566. data/lib/libv8/v8/src/scanner-base.h +670 -0
  567. data/lib/libv8/v8/src/scanner.cc +345 -0
  568. data/lib/libv8/v8/src/scanner.h +146 -0
  569. data/lib/libv8/v8/src/scopeinfo.cc +646 -0
  570. data/lib/libv8/v8/src/scopeinfo.h +254 -0
  571. data/lib/libv8/v8/src/scopes.cc +1150 -0
  572. data/lib/libv8/v8/src/scopes.h +507 -0
  573. data/lib/libv8/v8/src/serialize.cc +1574 -0
  574. data/lib/libv8/v8/src/serialize.h +589 -0
  575. data/lib/libv8/v8/src/shell.h +55 -0
  576. data/lib/libv8/v8/src/simulator.h +43 -0
  577. data/lib/libv8/v8/src/small-pointer-list.h +163 -0
  578. data/lib/libv8/v8/src/smart-pointer.h +109 -0
  579. data/lib/libv8/v8/src/snapshot-common.cc +83 -0
  580. data/lib/libv8/v8/src/snapshot-empty.cc +54 -0
  581. data/lib/libv8/v8/src/snapshot.h +91 -0
  582. data/lib/libv8/v8/src/spaces-inl.h +529 -0
  583. data/lib/libv8/v8/src/spaces.cc +3145 -0
  584. data/lib/libv8/v8/src/spaces.h +2369 -0
  585. data/lib/libv8/v8/src/splay-tree-inl.h +310 -0
  586. data/lib/libv8/v8/src/splay-tree.h +205 -0
  587. data/lib/libv8/v8/src/string-search.cc +41 -0
  588. data/lib/libv8/v8/src/string-search.h +568 -0
  589. data/lib/libv8/v8/src/string-stream.cc +592 -0
  590. data/lib/libv8/v8/src/string-stream.h +191 -0
  591. data/lib/libv8/v8/src/string.js +994 -0
  592. data/lib/libv8/v8/src/strtod.cc +440 -0
  593. data/lib/libv8/v8/src/strtod.h +40 -0
  594. data/lib/libv8/v8/src/stub-cache.cc +1965 -0
  595. data/lib/libv8/v8/src/stub-cache.h +924 -0
  596. data/lib/libv8/v8/src/third_party/valgrind/valgrind.h +3925 -0
  597. data/lib/libv8/v8/src/token.cc +63 -0
  598. data/lib/libv8/v8/src/token.h +288 -0
  599. data/lib/libv8/v8/src/type-info.cc +507 -0
  600. data/lib/libv8/v8/src/type-info.h +272 -0
  601. data/lib/libv8/v8/src/unbound-queue-inl.h +95 -0
  602. data/lib/libv8/v8/src/unbound-queue.h +69 -0
  603. data/lib/libv8/v8/src/unicode-inl.h +238 -0
  604. data/lib/libv8/v8/src/unicode.cc +1624 -0
  605. data/lib/libv8/v8/src/unicode.h +280 -0
  606. data/lib/libv8/v8/src/uri.js +408 -0
  607. data/lib/libv8/v8/src/utils-inl.h +48 -0
  608. data/lib/libv8/v8/src/utils.cc +371 -0
  609. data/lib/libv8/v8/src/utils.h +800 -0
  610. data/lib/libv8/v8/src/v8-counters.cc +62 -0
  611. data/lib/libv8/v8/src/v8-counters.h +314 -0
  612. data/lib/libv8/v8/src/v8.cc +213 -0
  613. data/lib/libv8/v8/src/v8.h +131 -0
  614. data/lib/libv8/v8/src/v8checks.h +64 -0
  615. data/lib/libv8/v8/src/v8dll-main.cc +44 -0
  616. data/lib/libv8/v8/src/v8globals.h +512 -0
  617. data/lib/libv8/v8/src/v8memory.h +82 -0
  618. data/lib/libv8/v8/src/v8natives.js +1310 -0
  619. data/lib/libv8/v8/src/v8preparserdll-main.cc +39 -0
  620. data/lib/libv8/v8/src/v8threads.cc +464 -0
  621. data/lib/libv8/v8/src/v8threads.h +165 -0
  622. data/lib/libv8/v8/src/v8utils.h +319 -0
  623. data/lib/libv8/v8/src/variables.cc +114 -0
  624. data/lib/libv8/v8/src/variables.h +167 -0
  625. data/lib/libv8/v8/src/version.cc +116 -0
  626. data/lib/libv8/v8/src/version.h +68 -0
  627. data/lib/libv8/v8/src/vm-state-inl.h +138 -0
  628. data/lib/libv8/v8/src/vm-state.h +71 -0
  629. data/lib/libv8/v8/src/win32-headers.h +96 -0
  630. data/lib/libv8/v8/src/x64/assembler-x64-inl.h +462 -0
  631. data/lib/libv8/v8/src/x64/assembler-x64.cc +3027 -0
  632. data/lib/libv8/v8/src/x64/assembler-x64.h +1633 -0
  633. data/lib/libv8/v8/src/x64/builtins-x64.cc +1520 -0
  634. data/lib/libv8/v8/src/x64/code-stubs-x64.cc +5132 -0
  635. data/lib/libv8/v8/src/x64/code-stubs-x64.h +514 -0
  636. data/lib/libv8/v8/src/x64/codegen-x64.cc +146 -0
  637. data/lib/libv8/v8/src/x64/codegen-x64.h +76 -0
  638. data/lib/libv8/v8/src/x64/cpu-x64.cc +88 -0
  639. data/lib/libv8/v8/src/x64/debug-x64.cc +319 -0
  640. data/lib/libv8/v8/src/x64/deoptimizer-x64.cc +815 -0
  641. data/lib/libv8/v8/src/x64/disasm-x64.cc +1832 -0
  642. data/lib/libv8/v8/src/x64/frames-x64.cc +45 -0
  643. data/lib/libv8/v8/src/x64/frames-x64.h +130 -0
  644. data/lib/libv8/v8/src/x64/full-codegen-x64.cc +4318 -0
  645. data/lib/libv8/v8/src/x64/ic-x64.cc +1608 -0
  646. data/lib/libv8/v8/src/x64/lithium-codegen-x64.cc +4267 -0
  647. data/lib/libv8/v8/src/x64/lithium-codegen-x64.h +367 -0
  648. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.cc +320 -0
  649. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.h +74 -0
  650. data/lib/libv8/v8/src/x64/lithium-x64.cc +2202 -0
  651. data/lib/libv8/v8/src/x64/lithium-x64.h +2333 -0
  652. data/lib/libv8/v8/src/x64/macro-assembler-x64.cc +3745 -0
  653. data/lib/libv8/v8/src/x64/macro-assembler-x64.h +1290 -0
  654. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.cc +1398 -0
  655. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.h +282 -0
  656. data/lib/libv8/v8/src/x64/simulator-x64.cc +27 -0
  657. data/lib/libv8/v8/src/x64/simulator-x64.h +72 -0
  658. data/lib/libv8/v8/src/x64/stub-cache-x64.cc +3610 -0
  659. data/lib/libv8/v8/src/zone-inl.h +140 -0
  660. data/lib/libv8/v8/src/zone.cc +196 -0
  661. data/lib/libv8/v8/src/zone.h +240 -0
  662. data/lib/libv8/v8/tools/codemap.js +265 -0
  663. data/lib/libv8/v8/tools/consarray.js +93 -0
  664. data/lib/libv8/v8/tools/csvparser.js +78 -0
  665. data/lib/libv8/v8/tools/disasm.py +92 -0
  666. data/lib/libv8/v8/tools/freebsd-tick-processor +10 -0
  667. data/lib/libv8/v8/tools/gc-nvp-trace-processor.py +342 -0
  668. data/lib/libv8/v8/tools/gcmole/README +62 -0
  669. data/lib/libv8/v8/tools/gcmole/gccause.lua +60 -0
  670. data/lib/libv8/v8/tools/gcmole/gcmole.cc +1261 -0
  671. data/lib/libv8/v8/tools/gcmole/gcmole.lua +378 -0
  672. data/lib/libv8/v8/tools/generate-ten-powers.scm +286 -0
  673. data/lib/libv8/v8/tools/grokdump.py +841 -0
  674. data/lib/libv8/v8/tools/gyp/v8.gyp +995 -0
  675. data/lib/libv8/v8/tools/js2c.py +364 -0
  676. data/lib/libv8/v8/tools/jsmin.py +280 -0
  677. data/lib/libv8/v8/tools/linux-tick-processor +35 -0
  678. data/lib/libv8/v8/tools/ll_prof.py +942 -0
  679. data/lib/libv8/v8/tools/logreader.js +185 -0
  680. data/lib/libv8/v8/tools/mac-nm +18 -0
  681. data/lib/libv8/v8/tools/mac-tick-processor +6 -0
  682. data/lib/libv8/v8/tools/oom_dump/README +31 -0
  683. data/lib/libv8/v8/tools/oom_dump/SConstruct +42 -0
  684. data/lib/libv8/v8/tools/oom_dump/oom_dump.cc +288 -0
  685. data/lib/libv8/v8/tools/presubmit.py +305 -0
  686. data/lib/libv8/v8/tools/process-heap-prof.py +120 -0
  687. data/lib/libv8/v8/tools/profile.js +751 -0
  688. data/lib/libv8/v8/tools/profile_view.js +219 -0
  689. data/lib/libv8/v8/tools/run-valgrind.py +77 -0
  690. data/lib/libv8/v8/tools/splaytree.js +316 -0
  691. data/lib/libv8/v8/tools/stats-viewer.py +468 -0
  692. data/lib/libv8/v8/tools/test.py +1510 -0
  693. data/lib/libv8/v8/tools/tickprocessor-driver.js +59 -0
  694. data/lib/libv8/v8/tools/tickprocessor.js +877 -0
  695. data/lib/libv8/v8/tools/utils.py +96 -0
  696. data/lib/libv8/v8/tools/visual_studio/README.txt +12 -0
  697. data/lib/libv8/v8/tools/windows-tick-processor.bat +30 -0
  698. data/lib/libv8/version.rb +4 -0
  699. data/libv8.gemspec +31 -0
  700. metadata +800 -0
@@ -0,0 +1,401 @@
1
+ // Copyright 2011 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+
29
+ // Declares a Simulator for MIPS instructions if we are not generating a native
30
+ // MIPS binary. This Simulator allows us to run and debug MIPS code generation
31
+ // on regular desktop machines.
32
+ // V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
33
+ // which will start execution in the Simulator or forwards to the real entry
34
+ // on a MIPS HW platform.
35
+
36
+ #ifndef V8_MIPS_SIMULATOR_MIPS_H_
37
+ #define V8_MIPS_SIMULATOR_MIPS_H_
38
+
39
+ #include "allocation.h"
40
+ #include "constants-mips.h"
41
+
42
+ #if !defined(USE_SIMULATOR)
43
+ // Running without a simulator on a native mips platform.
44
+
45
+ namespace v8 {
46
+ namespace internal {
47
+
48
+ // When running without a simulator we call the entry directly.
49
+ #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
50
+ entry(p0, p1, p2, p3, p4)
51
+
52
+ typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
53
+ void*, int*, Address, int, Isolate*);
54
+
55
+
56
+ // Call the generated regexp code directly. The code at the entry address
57
+ // should act as a function matching the type arm_regexp_matcher.
58
+ // The fifth argument is a dummy that reserves the space used for
59
+ // the return address added by the ExitFrame in native calls.
60
+ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
61
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
62
+ p0, p1, p2, p3, NULL, p4, p5, p6, p7))
63
+
64
+ #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
65
+ reinterpret_cast<TryCatch*>(try_catch_address)
66
+
67
+ // The stack limit beyond which we will throw stack overflow errors in
68
+ // generated code. Because generated code on mips uses the C stack, we
69
+ // just use the C stack limit.
70
+ class SimulatorStack : public v8::internal::AllStatic {
71
+ public:
72
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
73
+ uintptr_t c_limit) {
74
+ return c_limit;
75
+ }
76
+
77
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
78
+ return try_catch_address;
79
+ }
80
+
81
+ static inline void UnregisterCTryCatch() { }
82
+ };
83
+
84
+ } } // namespace v8::internal
85
+
86
+ // Calculated the stack limit beyond which we will throw stack overflow errors.
87
+ // This macro must be called from a C++ method. It relies on being able to take
88
+ // the address of "this" to get a value on the current execution stack and then
89
+ // calculates the stack limit based on that value.
90
+ // NOTE: The check for overflow is not safe as there is no guarantee that the
91
+ // running thread has its stack in all memory up to address 0x00000000.
92
+ #define GENERATED_CODE_STACK_LIMIT(limit) \
93
+ (reinterpret_cast<uintptr_t>(this) >= limit ? \
94
+ reinterpret_cast<uintptr_t>(this) - limit : 0)
95
+
96
+ #else // !defined(USE_SIMULATOR)
97
+ // Running with a simulator.
98
+
99
+ #include "hashmap.h"
100
+ #include "assembler.h"
101
+
102
+ namespace v8 {
103
+ namespace internal {
104
+
105
+ // -----------------------------------------------------------------------------
106
+ // Utility functions
107
+
108
+ class CachePage {
109
+ public:
110
+ static const int LINE_VALID = 0;
111
+ static const int LINE_INVALID = 1;
112
+
113
+ static const int kPageShift = 12;
114
+ static const int kPageSize = 1 << kPageShift;
115
+ static const int kPageMask = kPageSize - 1;
116
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
117
+ static const int kLineLength = 1 << kLineShift;
118
+ static const int kLineMask = kLineLength - 1;
119
+
120
+ CachePage() {
121
+ memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
122
+ }
123
+
124
+ char* ValidityByte(int offset) {
125
+ return &validity_map_[offset >> kLineShift];
126
+ }
127
+
128
+ char* CachedData(int offset) {
129
+ return &data_[offset];
130
+ }
131
+
132
+ private:
133
+ char data_[kPageSize]; // The cached data.
134
+ static const int kValidityMapSize = kPageSize >> kLineShift;
135
+ char validity_map_[kValidityMapSize]; // One byte per line.
136
+ };
137
+
138
+ class Simulator {
139
+ public:
140
+ friend class MipsDebugger;
141
+
142
+ // Registers are declared in order. See SMRL chapter 2.
143
+ enum Register {
144
+ no_reg = -1,
145
+ zero_reg = 0,
146
+ at,
147
+ v0, v1,
148
+ a0, a1, a2, a3,
149
+ t0, t1, t2, t3, t4, t5, t6, t7,
150
+ s0, s1, s2, s3, s4, s5, s6, s7,
151
+ t8, t9,
152
+ k0, k1,
153
+ gp,
154
+ sp,
155
+ s8,
156
+ ra,
157
+ // LO, HI, and pc.
158
+ LO,
159
+ HI,
160
+ pc, // pc must be the last register.
161
+ kNumSimuRegisters,
162
+ // aliases
163
+ fp = s8
164
+ };
165
+
166
+ // Coprocessor registers.
167
+ // Generated code will always use doubles. So we will only use even registers.
168
+ enum FPURegister {
169
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
170
+ f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters.
171
+ f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
172
+ f26, f27, f28, f29, f30, f31,
173
+ kNumFPURegisters
174
+ };
175
+
176
+ explicit Simulator(Isolate* isolate);
177
+ ~Simulator();
178
+
179
+ // The currently executing Simulator instance. Potentially there can be one
180
+ // for each native thread.
181
+ static Simulator* current(v8::internal::Isolate* isolate);
182
+
183
+ // Accessors for register state. Reading the pc value adheres to the MIPS
184
+ // architecture specification and is off by a 8 from the currently executing
185
+ // instruction.
186
+ void set_register(int reg, int32_t value);
187
+ int32_t get_register(int reg) const;
188
+ // Same for FPURegisters.
189
+ void set_fpu_register(int fpureg, int32_t value);
190
+ void set_fpu_register_float(int fpureg, float value);
191
+ void set_fpu_register_double(int fpureg, double value);
192
+ int32_t get_fpu_register(int fpureg) const;
193
+ int64_t get_fpu_register_long(int fpureg) const;
194
+ float get_fpu_register_float(int fpureg) const;
195
+ double get_fpu_register_double(int fpureg) const;
196
+ void set_fcsr_bit(uint32_t cc, bool value);
197
+ bool test_fcsr_bit(uint32_t cc);
198
+ bool set_fcsr_round_error(double original, double rounded);
199
+
200
+ // Special case of set_register and get_register to access the raw PC value.
201
+ void set_pc(int32_t value);
202
+ int32_t get_pc() const;
203
+
204
+ // Accessor to the internal simulator stack area.
205
+ uintptr_t StackLimit() const;
206
+
207
+ // Executes MIPS instructions until the PC reaches end_sim_pc.
208
+ void Execute();
209
+
210
+ // Call on program start.
211
+ static void Initialize(Isolate* isolate);
212
+
213
+ // V8 generally calls into generated JS code with 5 parameters and into
214
+ // generated RegExp code with 7 parameters. This is a convenience function,
215
+ // which sets up the simulator state and grabs the result on return.
216
+ int32_t Call(byte* entry, int argument_count, ...);
217
+
218
+ // Push an address onto the JS stack.
219
+ uintptr_t PushAddress(uintptr_t address);
220
+
221
+ // Pop an address from the JS stack.
222
+ uintptr_t PopAddress();
223
+
224
+ // ICache checking.
225
+ static void FlushICache(v8::internal::HashMap* i_cache, void* start,
226
+ size_t size);
227
+
228
+ // Returns true if pc register contains one of the 'special_values' defined
229
+ // below (bad_ra, end_sim_pc).
230
+ bool has_bad_pc() const;
231
+
232
+ private:
233
+ enum special_values {
234
+ // Known bad pc value to ensure that the simulator does not execute
235
+ // without being properly setup.
236
+ bad_ra = -1,
237
+ // A pc value used to signal the simulator to stop execution. Generally
238
+ // the ra is set to this value on transition from native C code to
239
+ // simulated execution, so that the simulator can "return" to the native
240
+ // C code.
241
+ end_sim_pc = -2,
242
+ // Unpredictable value.
243
+ Unpredictable = 0xbadbeaf
244
+ };
245
+
246
+ // Unsupported instructions use Format to print an error and stop execution.
247
+ void Format(Instruction* instr, const char* format);
248
+
249
+ // Read and write memory.
250
+ inline uint32_t ReadBU(int32_t addr);
251
+ inline int32_t ReadB(int32_t addr);
252
+ inline void WriteB(int32_t addr, uint8_t value);
253
+ inline void WriteB(int32_t addr, int8_t value);
254
+
255
+ inline uint16_t ReadHU(int32_t addr, Instruction* instr);
256
+ inline int16_t ReadH(int32_t addr, Instruction* instr);
257
+ // Note: Overloaded on the sign of the value.
258
+ inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
259
+ inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
260
+
261
+ inline int ReadW(int32_t addr, Instruction* instr);
262
+ inline void WriteW(int32_t addr, int value, Instruction* instr);
263
+
264
+ inline double ReadD(int32_t addr, Instruction* instr);
265
+ inline void WriteD(int32_t addr, double value, Instruction* instr);
266
+
267
+ // Operations depending on endianness.
268
+ // Get Double Higher / Lower word.
269
+ inline int32_t GetDoubleHIW(double* addr);
270
+ inline int32_t GetDoubleLOW(double* addr);
271
+ // Set Double Higher / Lower word.
272
+ inline int32_t SetDoubleHIW(double* addr);
273
+ inline int32_t SetDoubleLOW(double* addr);
274
+
275
+ // Executing is handled based on the instruction type.
276
+ void DecodeTypeRegister(Instruction* instr);
277
+
278
+ // Helper function for DecodeTypeRegister.
279
+ void ConfigureTypeRegister(Instruction* instr,
280
+ int32_t& alu_out,
281
+ int64_t& i64hilo,
282
+ uint64_t& u64hilo,
283
+ int32_t& next_pc,
284
+ bool& do_interrupt);
285
+
286
+ void DecodeTypeImmediate(Instruction* instr);
287
+ void DecodeTypeJump(Instruction* instr);
288
+
289
+ // Used for breakpoints and traps.
290
+ void SoftwareInterrupt(Instruction* instr);
291
+
292
+ // Executes one instruction.
293
+ void InstructionDecode(Instruction* instr);
294
+ // Execute one instruction placed in a branch delay slot.
295
+ void BranchDelayInstructionDecode(Instruction* instr) {
296
+ if (instr->IsForbiddenInBranchDelay()) {
297
+ V8_Fatal(__FILE__, __LINE__,
298
+ "Eror:Unexpected %i opcode in a branch delay slot.",
299
+ instr->OpcodeValue());
300
+ }
301
+ InstructionDecode(instr);
302
+ }
303
+
304
+ // ICache.
305
+ static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
306
+ static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
307
+ int size);
308
+ static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
309
+
310
+ enum Exception {
311
+ none,
312
+ kIntegerOverflow,
313
+ kIntegerUnderflow,
314
+ kDivideByZero,
315
+ kNumExceptions
316
+ };
317
+ int16_t exceptions[kNumExceptions];
318
+
319
+ // Exceptions.
320
+ void SignalExceptions();
321
+
322
+ // Runtime call support.
323
+ static void* RedirectExternalReference(void* external_function,
324
+ ExternalReference::Type type);
325
+
326
+ // For use in calls that take double value arguments.
327
+ void GetFpArgs(double* x, double* y);
328
+ void GetFpArgs(double* x);
329
+ void GetFpArgs(double* x, int32_t* y);
330
+ void SetFpResult(const double& result);
331
+
332
+
333
+ // Architecture state.
334
+ // Registers.
335
+ int32_t registers_[kNumSimuRegisters];
336
+ // Coprocessor Registers.
337
+ int32_t FPUregisters_[kNumFPURegisters];
338
+ // FPU control register.
339
+ uint32_t FCSR_;
340
+
341
+ // Simulator support.
342
+ // Allocate 1MB for stack.
343
+ static const size_t stack_size_ = 1 * 1024*1024;
344
+ char* stack_;
345
+ bool pc_modified_;
346
+ int icount_;
347
+ int break_count_;
348
+
349
+ // Icache simulation.
350
+ v8::internal::HashMap* i_cache_;
351
+
352
+ v8::internal::Isolate* isolate_;
353
+
354
+ // Registered breakpoints.
355
+ Instruction* break_pc_;
356
+ Instr break_instr_;
357
+ };
358
+
359
+
360
+ // When running with the simulator transition into simulated execution at this
361
+ // point.
362
+ #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
363
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
364
+ FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
365
+
366
+ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
367
+ Simulator::current(Isolate::Current())->Call( \
368
+ entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
369
+
370
+ #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
371
+ try_catch_address == NULL ? \
372
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
373
+
374
+
375
+ // The simulator has its own stack. Thus it has a different stack limit from
376
+ // the C-based native code. Setting the c_limit to indicate a very small
377
+ // stack cause stack overflow errors, since the simulator ignores the input.
378
+ // This is unlikely to be an issue in practice, though it might cause testing
379
+ // trouble down the line.
380
+ class SimulatorStack : public v8::internal::AllStatic {
381
+ public:
382
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
383
+ uintptr_t c_limit) {
384
+ return Simulator::current(isolate)->StackLimit();
385
+ }
386
+
387
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
388
+ Simulator* sim = Simulator::current(Isolate::Current());
389
+ return sim->PushAddress(try_catch_address);
390
+ }
391
+
392
+ static inline void UnregisterCTryCatch() {
393
+ Simulator::current(Isolate::Current())->PopAddress();
394
+ }
395
+ };
396
+
397
+ } } // namespace v8::internal
398
+
399
+ #endif // !defined(USE_SIMULATOR)
400
+ #endif // V8_MIPS_SIMULATOR_MIPS_H_
401
+
@@ -0,0 +1,4285 @@
1
+ // Copyright 2011 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #include "v8.h"
29
+
30
+ #if defined(V8_TARGET_ARCH_MIPS)
31
+
32
+ #include "ic-inl.h"
33
+ #include "codegen.h"
34
+ #include "stub-cache.h"
35
+
36
+ namespace v8 {
37
+ namespace internal {
38
+
39
+ #define __ ACCESS_MASM(masm)
40
+
41
+
42
+ static void ProbeTable(Isolate* isolate,
43
+ MacroAssembler* masm,
44
+ Code::Flags flags,
45
+ StubCache::Table table,
46
+ Register name,
47
+ Register offset,
48
+ Register scratch,
49
+ Register scratch2) {
50
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
51
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
52
+
53
+ uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
54
+ uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
55
+
56
+ // Check the relative positions of the address fields.
57
+ ASSERT(value_off_addr > key_off_addr);
58
+ ASSERT((value_off_addr - key_off_addr) % 4 == 0);
59
+ ASSERT((value_off_addr - key_off_addr) < (256 * 4));
60
+
61
+ Label miss;
62
+ Register offsets_base_addr = scratch;
63
+
64
+ // Check that the key in the entry matches the name.
65
+ __ li(offsets_base_addr, Operand(key_offset));
66
+ __ sll(scratch2, offset, 1);
67
+ __ addu(scratch2, offsets_base_addr, scratch2);
68
+ __ lw(scratch2, MemOperand(scratch2));
69
+ __ Branch(&miss, ne, name, Operand(scratch2));
70
+
71
+ // Get the code entry from the cache.
72
+ __ Addu(offsets_base_addr, offsets_base_addr,
73
+ Operand(value_off_addr - key_off_addr));
74
+ __ sll(scratch2, offset, 1);
75
+ __ addu(scratch2, offsets_base_addr, scratch2);
76
+ __ lw(scratch2, MemOperand(scratch2));
77
+
78
+ // Check that the flags match what we're looking for.
79
+ __ lw(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
80
+ __ And(scratch2, scratch2, Operand(~Code::kFlagsNotUsedInLookup));
81
+ __ Branch(&miss, ne, scratch2, Operand(flags));
82
+
83
+ // Re-load code entry from cache.
84
+ __ sll(offset, offset, 1);
85
+ __ addu(offset, offset, offsets_base_addr);
86
+ __ lw(offset, MemOperand(offset));
87
+
88
+ // Jump to the first instruction in the code stub.
89
+ __ Addu(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
90
+ __ Jump(offset);
91
+
92
+ // Miss: fall through.
93
+ __ bind(&miss);
94
+ }
95
+
96
+
97
+ // Helper function used to check that the dictionary doesn't contain
98
+ // the property. This function may return false negatives, so miss_label
99
+ // must always call a backup property check that is complete.
100
+ // This function is safe to call if the receiver has fast properties.
101
+ // Name must be a symbol and receiver must be a heap object.
102
+ MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
103
+ MacroAssembler* masm,
104
+ Label* miss_label,
105
+ Register receiver,
106
+ String* name,
107
+ Register scratch0,
108
+ Register scratch1) {
109
+ ASSERT(name->IsSymbol());
110
+ Counters* counters = masm->isolate()->counters();
111
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
112
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
113
+
114
+ Label done;
115
+
116
+ const int kInterceptorOrAccessCheckNeededMask =
117
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
118
+
119
+ // Bail out if the receiver has a named interceptor or requires access checks.
120
+ Register map = scratch1;
121
+ __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
122
+ __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
123
+ __ And(at, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
124
+ __ Branch(miss_label, ne, at, Operand(zero_reg));
125
+
126
+
127
+ // Check that receiver is a JSObject.
128
+ __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
129
+ __ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_OBJECT_TYPE));
130
+
131
+ // Load properties array.
132
+ Register properties = scratch0;
133
+ __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
134
+ // Check that the properties array is a dictionary.
135
+ __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
136
+ Register tmp = properties;
137
+ __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
138
+ __ Branch(miss_label, ne, map, Operand(tmp));
139
+
140
+ // Restore the temporarily used register.
141
+ __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
142
+
143
+ MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
144
+ masm,
145
+ miss_label,
146
+ &done,
147
+ receiver,
148
+ properties,
149
+ name,
150
+ scratch1);
151
+ if (result->IsFailure()) return result;
152
+
153
+ __ bind(&done);
154
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
155
+
156
+ return result;
157
+ }
158
+
159
+
160
+ void StubCache::GenerateProbe(MacroAssembler* masm,
161
+ Code::Flags flags,
162
+ Register receiver,
163
+ Register name,
164
+ Register scratch,
165
+ Register extra,
166
+ Register extra2) {
167
+ Isolate* isolate = masm->isolate();
168
+ Label miss;
169
+
170
+ // Make sure that code is valid. The shifting code relies on the
171
+ // entry size being 8.
172
+ ASSERT(sizeof(Entry) == 8);
173
+
174
+ // Make sure the flags does not name a specific type.
175
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
176
+
177
+ // Make sure that there are no register conflicts.
178
+ ASSERT(!scratch.is(receiver));
179
+ ASSERT(!scratch.is(name));
180
+ ASSERT(!extra.is(receiver));
181
+ ASSERT(!extra.is(name));
182
+ ASSERT(!extra.is(scratch));
183
+ ASSERT(!extra2.is(receiver));
184
+ ASSERT(!extra2.is(name));
185
+ ASSERT(!extra2.is(scratch));
186
+ ASSERT(!extra2.is(extra));
187
+
188
+ // Check scratch, extra and extra2 registers are valid.
189
+ ASSERT(!scratch.is(no_reg));
190
+ ASSERT(!extra.is(no_reg));
191
+ ASSERT(!extra2.is(no_reg));
192
+
193
+ // Check that the receiver isn't a smi.
194
+ __ JumpIfSmi(receiver, &miss, t0);
195
+
196
+ // Get the map of the receiver and compute the hash.
197
+ __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
198
+ __ lw(t8, FieldMemOperand(receiver, HeapObject::kMapOffset));
199
+ __ Addu(scratch, scratch, Operand(t8));
200
+ __ Xor(scratch, scratch, Operand(flags));
201
+ __ And(scratch,
202
+ scratch,
203
+ Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
204
+
205
+ // Probe the primary table.
206
+ ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
207
+
208
+ // Primary miss: Compute hash for secondary probe.
209
+ __ Subu(scratch, scratch, Operand(name));
210
+ __ Addu(scratch, scratch, Operand(flags));
211
+ __ And(scratch,
212
+ scratch,
213
+ Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
214
+
215
+ // Probe the secondary table.
216
+ ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
217
+
218
+ // Cache miss: Fall-through and let caller handle the miss by
219
+ // entering the runtime system.
220
+ __ bind(&miss);
221
+ }
222
+
223
+
224
+ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
225
+ int index,
226
+ Register prototype) {
227
+ // Load the global or builtins object from the current context.
228
+ __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
229
+ // Load the global context from the global or builtins object.
230
+ __ lw(prototype,
231
+ FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
232
+ // Load the function from the global context.
233
+ __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
234
+ // Load the initial map. The global functions all have initial maps.
235
+ __ lw(prototype,
236
+ FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
237
+ // Load the prototype from the initial map.
238
+ __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
239
+ }
240
+
241
+
242
+ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
243
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
244
+ Isolate* isolate = masm->isolate();
245
+ // Check we're still in the same context.
246
+ __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
247
+ ASSERT(!prototype.is(at));
248
+ __ li(at, isolate->global());
249
+ __ Branch(miss, ne, prototype, Operand(at));
250
+ // Get the global function with the given index.
251
+ JSFunction* function =
252
+ JSFunction::cast(isolate->global_context()->get(index));
253
+ // Load its initial map. The global functions all have initial maps.
254
+ __ li(prototype, Handle<Map>(function->initial_map()));
255
+ // Load the prototype from the initial map.
256
+ __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
257
+ }
258
+
259
+
260
+ // Load a fast property out of a holder object (src). In-object properties
261
+ // are loaded directly otherwise the property is loaded from the properties
262
+ // fixed array.
263
+ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
264
+ Register dst, Register src,
265
+ JSObject* holder, int index) {
266
+ // Adjust for the number of properties stored in the holder.
267
+ index -= holder->map()->inobject_properties();
268
+ if (index < 0) {
269
+ // Get the property straight out of the holder.
270
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
271
+ __ lw(dst, FieldMemOperand(src, offset));
272
+ } else {
273
+ // Calculate the offset into the properties array.
274
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
275
+ __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
276
+ __ lw(dst, FieldMemOperand(dst, offset));
277
+ }
278
+ }
279
+
280
+
281
+ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
282
+ Register receiver,
283
+ Register scratch,
284
+ Label* miss_label) {
285
+ // Check that the receiver isn't a smi.
286
+ __ And(scratch, receiver, Operand(kSmiTagMask));
287
+ __ Branch(miss_label, eq, scratch, Operand(zero_reg));
288
+
289
+ // Check that the object is a JS array.
290
+ __ GetObjectType(receiver, scratch, scratch);
291
+ __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
292
+
293
+ // Load length directly from the JS array.
294
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
295
+ __ Ret();
296
+ }
297
+
298
+
299
+ // Generate code to check if an object is a string. If the object is a
300
+ // heap object, its map's instance type is left in the scratch1 register.
301
+ // If this is not needed, scratch1 and scratch2 may be the same register.
302
+ static void GenerateStringCheck(MacroAssembler* masm,
303
+ Register receiver,
304
+ Register scratch1,
305
+ Register scratch2,
306
+ Label* smi,
307
+ Label* non_string_object) {
308
+ // Check that the receiver isn't a smi.
309
+ __ JumpIfSmi(receiver, smi, t0);
310
+
311
+ // Check that the object is a string.
312
+ __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
313
+ __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
314
+ __ And(scratch2, scratch1, Operand(kIsNotStringMask));
315
+ // The cast is to resolve the overload for the argument of 0x0.
316
+ __ Branch(non_string_object,
317
+ ne,
318
+ scratch2,
319
+ Operand(static_cast<int32_t>(kStringTag)));
320
+ }
321
+
322
+
323
+ // Generate code to load the length from a string object and return the length.
324
+ // If the receiver object is not a string or a wrapped string object the
325
+ // execution continues at the miss label. The register containing the
326
+ // receiver is potentially clobbered.
327
+ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
328
+ Register receiver,
329
+ Register scratch1,
330
+ Register scratch2,
331
+ Label* miss,
332
+ bool support_wrappers) {
333
+ Label check_wrapper;
334
+
335
+ // Check if the object is a string leaving the instance type in the
336
+ // scratch1 register.
337
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
338
+ support_wrappers ? &check_wrapper : miss);
339
+
340
+ // Load length directly from the string.
341
+ __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
342
+ __ Ret();
343
+
344
+ if (support_wrappers) {
345
+ // Check if the object is a JSValue wrapper.
346
+ __ bind(&check_wrapper);
347
+ __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
348
+
349
+ // Unwrap the value and check if the wrapped value is a string.
350
+ __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
351
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
352
+ __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
353
+ __ Ret();
354
+ }
355
+ }
356
+
357
+
358
+ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
359
+ Register receiver,
360
+ Register scratch1,
361
+ Register scratch2,
362
+ Label* miss_label) {
363
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
364
+ __ mov(v0, scratch1);
365
+ __ Ret();
366
+ }
367
+
368
+
369
+ // Generate StoreField code, value is passed in a0 register.
370
+ // After executing generated code, the receiver_reg and name_reg
371
+ // may be clobbered.
372
+ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
373
+ JSObject* object,
374
+ int index,
375
+ Map* transition,
376
+ Register receiver_reg,
377
+ Register name_reg,
378
+ Register scratch,
379
+ Label* miss_label) {
380
+ // a0 : value.
381
+ Label exit;
382
+
383
+ // Check that the receiver isn't a smi.
384
+ __ JumpIfSmi(receiver_reg, miss_label, scratch);
385
+
386
+ // Check that the map of the receiver hasn't changed.
387
+ __ lw(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
388
+ __ Branch(miss_label, ne, scratch, Operand(Handle<Map>(object->map())));
389
+
390
+ // Perform global security token check if needed.
391
+ if (object->IsJSGlobalProxy()) {
392
+ __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
393
+ }
394
+
395
+ // Stub never generated for non-global objects that require access
396
+ // checks.
397
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
398
+
399
+ // Perform map transition for the receiver if necessary.
400
+ if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
401
+ // The properties must be extended before we can store the value.
402
+ // We jump to a runtime call that extends the properties array.
403
+ __ push(receiver_reg);
404
+ __ li(a2, Operand(Handle<Map>(transition)));
405
+ __ Push(a2, a0);
406
+ __ TailCallExternalReference(
407
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
408
+ masm->isolate()),
409
+ 3, 1);
410
+ return;
411
+ }
412
+
413
+ if (transition != NULL) {
414
+ // Update the map of the object; no write barrier updating is
415
+ // needed because the map is never in new space.
416
+ __ li(t0, Operand(Handle<Map>(transition)));
417
+ __ sw(t0, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
418
+ }
419
+
420
+ // Adjust for the number of properties stored in the object. Even in the
421
+ // face of a transition we can use the old map here because the size of the
422
+ // object and the number of in-object properties is not going to change.
423
+ index -= object->map()->inobject_properties();
424
+
425
+ if (index < 0) {
426
+ // Set the property straight into the object.
427
+ int offset = object->map()->instance_size() + (index * kPointerSize);
428
+ __ sw(a0, FieldMemOperand(receiver_reg, offset));
429
+
430
+ // Skip updating write barrier if storing a smi.
431
+ __ JumpIfSmi(a0, &exit, scratch);
432
+
433
+ // Update the write barrier for the array address.
434
+ // Pass the now unused name_reg as a scratch register.
435
+ __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
436
+ } else {
437
+ // Write to the properties array.
438
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
439
+ // Get the properties array.
440
+ __ lw(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
441
+ __ sw(a0, FieldMemOperand(scratch, offset));
442
+
443
+ // Skip updating write barrier if storing a smi.
444
+ __ JumpIfSmi(a0, &exit);
445
+
446
+ // Update the write barrier for the array address.
447
+ // Ok to clobber receiver_reg and name_reg, since we return.
448
+ __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
449
+ }
450
+
451
+ // Return the value (register v0).
452
+ __ bind(&exit);
453
+ __ mov(v0, a0);
454
+ __ Ret();
455
+ }
456
+
457
+
458
+ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
459
+ ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
460
+ Code* code = NULL;
461
+ if (kind == Code::LOAD_IC) {
462
+ code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
463
+ } else {
464
+ code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
465
+ }
466
+
467
+ Handle<Code> ic(code);
468
+ __ Jump(ic, RelocInfo::CODE_TARGET);
469
+ }
470
+
471
+
472
+ static void GenerateCallFunction(MacroAssembler* masm,
473
+ Object* object,
474
+ const ParameterCount& arguments,
475
+ Label* miss) {
476
+ // ----------- S t a t e -------------
477
+ // -- a0: receiver
478
+ // -- a1: function to call
479
+ // -----------------------------------
480
+ // Check that the function really is a function.
481
+ __ JumpIfSmi(a1, miss);
482
+ __ GetObjectType(a1, a3, a3);
483
+ __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
484
+
485
+ // Patch the receiver on the stack with the global proxy if
486
+ // necessary.
487
+ if (object->IsGlobalObject()) {
488
+ __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
489
+ __ sw(a3, MemOperand(sp, arguments.immediate() * kPointerSize));
490
+ }
491
+
492
+ // Invoke the function.
493
+ __ InvokeFunction(a1, arguments, JUMP_FUNCTION);
494
+ }
495
+
496
+
497
+ static void PushInterceptorArguments(MacroAssembler* masm,
498
+ Register receiver,
499
+ Register holder,
500
+ Register name,
501
+ JSObject* holder_obj) {
502
+ __ push(name);
503
+ InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
504
+ ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
505
+ Register scratch = name;
506
+ __ li(scratch, Operand(Handle<Object>(interceptor)));
507
+ __ Push(scratch, receiver, holder);
508
+ __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
509
+ __ push(scratch);
510
+ }
511
+
512
+
513
+ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
514
+ Register receiver,
515
+ Register holder,
516
+ Register name,
517
+ JSObject* holder_obj) {
518
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
519
+
520
+ ExternalReference ref =
521
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
522
+ masm->isolate());
523
+ __ li(a0, Operand(5));
524
+ __ li(a1, Operand(ref));
525
+
526
+ CEntryStub stub(1);
527
+ __ CallStub(&stub);
528
+ }
529
+
530
+
531
+ static const int kFastApiCallArguments = 3;
532
+
533
+
534
+ // Reserves space for the extra arguments to FastHandleApiCall in the
535
+ // caller's frame.
536
+ //
537
+ // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
538
+ static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
539
+ Register scratch) {
540
+ ASSERT(Smi::FromInt(0) == 0);
541
+ for (int i = 0; i < kFastApiCallArguments; i++) {
542
+ __ push(zero_reg);
543
+ }
544
+ }
545
+
546
+
547
+ // Undoes the effects of ReserveSpaceForFastApiCall.
548
+ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
549
+ __ Drop(kFastApiCallArguments);
550
+ }
551
+
552
+
553
+ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
554
+ const CallOptimization& optimization,
555
+ int argc) {
556
+ // ----------- S t a t e -------------
557
+ // -- sp[0] : holder (set by CheckPrototypes)
558
+ // -- sp[4] : callee js function
559
+ // -- sp[8] : call data
560
+ // -- sp[12] : last js argument
561
+ // -- ...
562
+ // -- sp[(argc + 3) * 4] : first js argument
563
+ // -- sp[(argc + 4) * 4] : receiver
564
+ // -----------------------------------
565
+ // Get the function and setup the context.
566
+ JSFunction* function = optimization.constant_function();
567
+ __ li(t1, Operand(Handle<JSFunction>(function)));
568
+ __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
569
+
570
+ // Pass the additional arguments FastHandleApiCall expects.
571
+ Object* call_data = optimization.api_call_info()->data();
572
+ Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
573
+ if (masm->isolate()->heap()->InNewSpace(call_data)) {
574
+ __ li(a0, api_call_info_handle);
575
+ __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
576
+ } else {
577
+ __ li(t2, Operand(Handle<Object>(call_data)));
578
+ }
579
+
580
+ // Store js function and call data.
581
+ __ sw(t1, MemOperand(sp, 1 * kPointerSize));
582
+ __ sw(t2, MemOperand(sp, 2 * kPointerSize));
583
+
584
+ // a2 points to call data as expected by Arguments
585
+ // (refer to layout above).
586
+ __ Addu(a2, sp, Operand(2 * kPointerSize));
587
+
588
+ Object* callback = optimization.api_call_info()->callback();
589
+ Address api_function_address = v8::ToCData<Address>(callback);
590
+ ApiFunction fun(api_function_address);
591
+
592
+ const int kApiStackSpace = 4;
593
+
594
+ __ EnterExitFrame(false, kApiStackSpace);
595
+
596
+ // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
597
+ // struct from the function (which is currently the case). This means we pass
598
+ // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn
599
+ // will handle setting up a0.
600
+
601
+ // a1 = v8::Arguments&
602
+ // Arguments is built at sp + 1 (sp is a reserved spot for ra).
603
+ __ Addu(a1, sp, kPointerSize);
604
+
605
+ // v8::Arguments::implicit_args = data
606
+ __ sw(a2, MemOperand(a1, 0 * kPointerSize));
607
+ // v8::Arguments::values = last argument
608
+ __ Addu(t0, a2, Operand(argc * kPointerSize));
609
+ __ sw(t0, MemOperand(a1, 1 * kPointerSize));
610
+ // v8::Arguments::length_ = argc
611
+ __ li(t0, Operand(argc));
612
+ __ sw(t0, MemOperand(a1, 2 * kPointerSize));
613
+ // v8::Arguments::is_construct_call = 0
614
+ __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
615
+
616
+ // Emitting a stub call may try to allocate (if the code is not
617
+ // already generated). Do not allow the assembler to perform a
618
+ // garbage collection but instead return the allocation failure
619
+ // object.
620
+ const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
621
+ ExternalReference ref =
622
+ ExternalReference(&fun,
623
+ ExternalReference::DIRECT_API_CALL,
624
+ masm->isolate());
625
+ return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
626
+ }
627
+
628
+ class CallInterceptorCompiler BASE_EMBEDDED {
629
+ public:
630
+ CallInterceptorCompiler(StubCompiler* stub_compiler,
631
+ const ParameterCount& arguments,
632
+ Register name)
633
+ : stub_compiler_(stub_compiler),
634
+ arguments_(arguments),
635
+ name_(name) {}
636
+
637
+ MaybeObject* Compile(MacroAssembler* masm,
638
+ JSObject* object,
639
+ JSObject* holder,
640
+ String* name,
641
+ LookupResult* lookup,
642
+ Register receiver,
643
+ Register scratch1,
644
+ Register scratch2,
645
+ Register scratch3,
646
+ Label* miss) {
647
+ ASSERT(holder->HasNamedInterceptor());
648
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
649
+
650
+ // Check that the receiver isn't a smi.
651
+ __ JumpIfSmi(receiver, miss);
652
+
653
+ CallOptimization optimization(lookup);
654
+
655
+ if (optimization.is_constant_call()) {
656
+ return CompileCacheable(masm,
657
+ object,
658
+ receiver,
659
+ scratch1,
660
+ scratch2,
661
+ scratch3,
662
+ holder,
663
+ lookup,
664
+ name,
665
+ optimization,
666
+ miss);
667
+ } else {
668
+ CompileRegular(masm,
669
+ object,
670
+ receiver,
671
+ scratch1,
672
+ scratch2,
673
+ scratch3,
674
+ name,
675
+ holder,
676
+ miss);
677
+ return masm->isolate()->heap()->undefined_value();
678
+ }
679
+ }
680
+
681
+ private:
682
+ MaybeObject* CompileCacheable(MacroAssembler* masm,
683
+ JSObject* object,
684
+ Register receiver,
685
+ Register scratch1,
686
+ Register scratch2,
687
+ Register scratch3,
688
+ JSObject* interceptor_holder,
689
+ LookupResult* lookup,
690
+ String* name,
691
+ const CallOptimization& optimization,
692
+ Label* miss_label) {
693
+ ASSERT(optimization.is_constant_call());
694
+ ASSERT(!lookup->holder()->IsGlobalObject());
695
+
696
+ Counters* counters = masm->isolate()->counters();
697
+
698
+ int depth1 = kInvalidProtoDepth;
699
+ int depth2 = kInvalidProtoDepth;
700
+ bool can_do_fast_api_call = false;
701
+ if (optimization.is_simple_api_call() &&
702
+ !lookup->holder()->IsGlobalObject()) {
703
+ depth1 =
704
+ optimization.GetPrototypeDepthOfExpectedType(object,
705
+ interceptor_holder);
706
+ if (depth1 == kInvalidProtoDepth) {
707
+ depth2 =
708
+ optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
709
+ lookup->holder());
710
+ }
711
+ can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
712
+ (depth2 != kInvalidProtoDepth);
713
+ }
714
+
715
+ __ IncrementCounter(counters->call_const_interceptor(), 1,
716
+ scratch1, scratch2);
717
+
718
+ if (can_do_fast_api_call) {
719
+ __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
720
+ scratch1, scratch2);
721
+ ReserveSpaceForFastApiCall(masm, scratch1);
722
+ }
723
+
724
+ // Check that the maps from receiver to interceptor's holder
725
+ // haven't changed and thus we can invoke interceptor.
726
+ Label miss_cleanup;
727
+ Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
728
+ Register holder =
729
+ stub_compiler_->CheckPrototypes(object, receiver,
730
+ interceptor_holder, scratch1,
731
+ scratch2, scratch3, name, depth1, miss);
732
+
733
+ // Invoke an interceptor and if it provides a value,
734
+ // branch to |regular_invoke|.
735
+ Label regular_invoke;
736
+ LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
737
+ &regular_invoke);
738
+
739
+ // Interceptor returned nothing for this property. Try to use cached
740
+ // constant function.
741
+
742
+ // Check that the maps from interceptor's holder to constant function's
743
+ // holder haven't changed and thus we can use cached constant function.
744
+ if (interceptor_holder != lookup->holder()) {
745
+ stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
746
+ lookup->holder(), scratch1,
747
+ scratch2, scratch3, name, depth2, miss);
748
+ } else {
749
+ // CheckPrototypes has a side effect of fetching a 'holder'
750
+ // for API (object which is instanceof for the signature). It's
751
+ // safe to omit it here, as if present, it should be fetched
752
+ // by the previous CheckPrototypes.
753
+ ASSERT(depth2 == kInvalidProtoDepth);
754
+ }
755
+
756
+ // Invoke function.
757
+ if (can_do_fast_api_call) {
758
+ MaybeObject* result = GenerateFastApiDirectCall(masm,
759
+ optimization,
760
+ arguments_.immediate());
761
+ if (result->IsFailure()) return result;
762
+ } else {
763
+ __ InvokeFunction(optimization.constant_function(), arguments_,
764
+ JUMP_FUNCTION);
765
+ }
766
+
767
+ // Deferred code for fast API call case---clean preallocated space.
768
+ if (can_do_fast_api_call) {
769
+ __ bind(&miss_cleanup);
770
+ FreeSpaceForFastApiCall(masm);
771
+ __ Branch(miss_label);
772
+ }
773
+
774
+ // Invoke a regular function.
775
+ __ bind(&regular_invoke);
776
+ if (can_do_fast_api_call) {
777
+ FreeSpaceForFastApiCall(masm);
778
+ }
779
+
780
+ return masm->isolate()->heap()->undefined_value();
781
+ }
782
+
783
+ void CompileRegular(MacroAssembler* masm,
784
+ JSObject* object,
785
+ Register receiver,
786
+ Register scratch1,
787
+ Register scratch2,
788
+ Register scratch3,
789
+ String* name,
790
+ JSObject* interceptor_holder,
791
+ Label* miss_label) {
792
+ Register holder =
793
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
794
+ scratch1, scratch2, scratch3, name,
795
+ miss_label);
796
+
797
+ // Call a runtime function to load the interceptor property.
798
+ __ EnterInternalFrame();
799
+ // Save the name_ register across the call.
800
+ __ push(name_);
801
+
802
+ PushInterceptorArguments(masm,
803
+ receiver,
804
+ holder,
805
+ name_,
806
+ interceptor_holder);
807
+
808
+ __ CallExternalReference(
809
+ ExternalReference(
810
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
811
+ masm->isolate()),
812
+ 5);
813
+
814
+ // Restore the name_ register.
815
+ __ pop(name_);
816
+ __ LeaveInternalFrame();
817
+ }
818
+
819
+ void LoadWithInterceptor(MacroAssembler* masm,
820
+ Register receiver,
821
+ Register holder,
822
+ JSObject* holder_obj,
823
+ Register scratch,
824
+ Label* interceptor_succeeded) {
825
+ __ EnterInternalFrame();
826
+
827
+ __ Push(holder, name_);
828
+
829
+ CompileCallLoadPropertyWithInterceptor(masm,
830
+ receiver,
831
+ holder,
832
+ name_,
833
+ holder_obj);
834
+
835
+ __ pop(name_); // Restore the name.
836
+ __ pop(receiver); // Restore the holder.
837
+ __ LeaveInternalFrame();
838
+
839
+ // If interceptor returns no-result sentinel, call the constant function.
840
+ __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
841
+ __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
842
+ }
843
+
844
+ StubCompiler* stub_compiler_;
845
+ const ParameterCount& arguments_;
846
+ Register name_;
847
+ };
848
+
849
+
850
+
851
+ // Generate code to check that a global property cell is empty. Create
852
+ // the property cell at compilation time if no cell exists for the
853
+ // property.
854
+ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
855
+ MacroAssembler* masm,
856
+ GlobalObject* global,
857
+ String* name,
858
+ Register scratch,
859
+ Label* miss) {
860
+ Object* probe;
861
+ { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
862
+ if (!maybe_probe->ToObject(&probe)) return maybe_probe;
863
+ }
864
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
865
+ ASSERT(cell->value()->IsTheHole());
866
+ __ li(scratch, Operand(Handle<Object>(cell)));
867
+ __ lw(scratch,
868
+ FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
869
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
870
+ __ Branch(miss, ne, scratch, Operand(at));
871
+ return cell;
872
+ }
873
+
874
+
875
+ // Calls GenerateCheckPropertyCell for each global object in the prototype chain
876
+ // from object to (but not including) holder.
877
+ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
878
+ MacroAssembler* masm,
879
+ JSObject* object,
880
+ JSObject* holder,
881
+ String* name,
882
+ Register scratch,
883
+ Label* miss) {
884
+ JSObject* current = object;
885
+ while (current != holder) {
886
+ if (current->IsGlobalObject()) {
887
+ // Returns a cell or a failure.
888
+ MaybeObject* result = GenerateCheckPropertyCell(
889
+ masm,
890
+ GlobalObject::cast(current),
891
+ name,
892
+ scratch,
893
+ miss);
894
+ if (result->IsFailure()) return result;
895
+ }
896
+ ASSERT(current->IsJSObject());
897
+ current = JSObject::cast(current->GetPrototype());
898
+ }
899
+ return NULL;
900
+ }
901
+
902
+
903
+ // Convert and store int passed in register ival to IEEE 754 single precision
904
+ // floating point value at memory location (dst + 4 * wordoffset)
905
+ // If FPU is available use it for conversion.
906
+ static void StoreIntAsFloat(MacroAssembler* masm,
907
+ Register dst,
908
+ Register wordoffset,
909
+ Register ival,
910
+ Register fval,
911
+ Register scratch1,
912
+ Register scratch2) {
913
+ if (CpuFeatures::IsSupported(FPU)) {
914
+ CpuFeatures::Scope scope(FPU);
915
+ __ mtc1(ival, f0);
916
+ __ cvt_s_w(f0, f0);
917
+ __ sll(scratch1, wordoffset, 2);
918
+ __ addu(scratch1, dst, scratch1);
919
+ __ swc1(f0, MemOperand(scratch1, 0));
920
+ } else {
921
+ // FPU is not available, do manual conversions.
922
+
923
+ Label not_special, done;
924
+ // Move sign bit from source to destination. This works because the sign
925
+ // bit in the exponent word of the double has the same position and polarity
926
+ // as the 2's complement sign bit in a Smi.
927
+ ASSERT(kBinary32SignMask == 0x80000000u);
928
+
929
+ __ And(fval, ival, Operand(kBinary32SignMask));
930
+ // Negate value if it is negative.
931
+ __ subu(scratch1, zero_reg, ival);
932
+ __ movn(ival, scratch1, fval);
933
+
934
+ // We have -1, 0 or 1, which we treat specially. Register ival contains
935
+ // absolute value: it is either equal to 1 (special case of -1 and 1),
936
+ // greater than 1 (not a special case) or less than 1 (special case of 0).
937
+ __ Branch(&not_special, gt, ival, Operand(1));
938
+
939
+ // For 1 or -1 we need to or in the 0 exponent (biased).
940
+ static const uint32_t exponent_word_for_1 =
941
+ kBinary32ExponentBias << kBinary32ExponentShift;
942
+
943
+ __ Xor(scratch1, ival, Operand(1));
944
+ __ li(scratch2, exponent_word_for_1);
945
+ __ or_(scratch2, fval, scratch2);
946
+ __ movz(fval, scratch2, scratch1); // Only if ival is equal to 1.
947
+ __ Branch(&done);
948
+
949
+ __ bind(&not_special);
950
+ // Count leading zeros.
951
+ // Gets the wrong answer for 0, but we already checked for that case above.
952
+ Register zeros = scratch2;
953
+ __ clz(zeros, ival);
954
+
955
+ // Compute exponent and or it into the exponent register.
956
+ __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
957
+ __ subu(scratch1, scratch1, zeros);
958
+
959
+ __ sll(scratch1, scratch1, kBinary32ExponentShift);
960
+ __ or_(fval, fval, scratch1);
961
+
962
+ // Shift up the source chopping the top bit off.
963
+ __ Addu(zeros, zeros, Operand(1));
964
+ // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
965
+ __ sllv(ival, ival, zeros);
966
+ // And the top (top 20 bits).
967
+ __ srl(scratch1, ival, kBitsPerInt - kBinary32MantissaBits);
968
+ __ or_(fval, fval, scratch1);
969
+
970
+ __ bind(&done);
971
+
972
+ __ sll(scratch1, wordoffset, 2);
973
+ __ addu(scratch1, dst, scratch1);
974
+ __ sw(fval, MemOperand(scratch1, 0));
975
+ }
976
+ }
977
+
978
+
979
+ // Convert unsigned integer with specified number of leading zeroes in binary
980
+ // representation to IEEE 754 double.
981
+ // Integer to convert is passed in register hiword.
982
+ // Resulting double is returned in registers hiword:loword.
983
+ // This functions does not work correctly for 0.
984
+ static void GenerateUInt2Double(MacroAssembler* masm,
985
+ Register hiword,
986
+ Register loword,
987
+ Register scratch,
988
+ int leading_zeroes) {
989
+ const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
990
+ const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
991
+
992
+ const int mantissa_shift_for_hi_word =
993
+ meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
994
+
995
+ const int mantissa_shift_for_lo_word =
996
+ kBitsPerInt - mantissa_shift_for_hi_word;
997
+
998
+ __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
999
+ if (mantissa_shift_for_hi_word > 0) {
1000
+ __ sll(loword, hiword, mantissa_shift_for_lo_word);
1001
+ __ srl(hiword, hiword, mantissa_shift_for_hi_word);
1002
+ __ or_(hiword, scratch, hiword);
1003
+ } else {
1004
+ __ mov(loword, zero_reg);
1005
+ __ sll(hiword, hiword, mantissa_shift_for_hi_word);
1006
+ __ or_(hiword, scratch, hiword);
1007
+ }
1008
+
1009
+ // If least significant bit of biased exponent was not 1 it was corrupted
1010
+ // by most significant bit of mantissa so we should fix that.
1011
+ if (!(biased_exponent & 1)) {
1012
+ __ li(scratch, 1 << HeapNumber::kExponentShift);
1013
+ __ nor(scratch, scratch, scratch);
1014
+ __ and_(hiword, hiword, scratch);
1015
+ }
1016
+ }
1017
+
1018
+
1019
+ #undef __
1020
+ #define __ ACCESS_MASM(masm())
1021
+
1022
+
1023
+ Register StubCompiler::CheckPrototypes(JSObject* object,
1024
+ Register object_reg,
1025
+ JSObject* holder,
1026
+ Register holder_reg,
1027
+ Register scratch1,
1028
+ Register scratch2,
1029
+ String* name,
1030
+ int save_at_depth,
1031
+ Label* miss) {
1032
+ // Make sure there's no overlap between holder and object registers.
1033
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
1034
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
1035
+ && !scratch2.is(scratch1));
1036
+
1037
+ // Keep track of the current object in register reg.
1038
+ Register reg = object_reg;
1039
+ int depth = 0;
1040
+
1041
+ if (save_at_depth == depth) {
1042
+ __ sw(reg, MemOperand(sp));
1043
+ }
1044
+
1045
+ // Check the maps in the prototype chain.
1046
+ // Traverse the prototype chain from the object and do map checks.
1047
+ JSObject* current = object;
1048
+ while (current != holder) {
1049
+ depth++;
1050
+
1051
+ // Only global objects and objects that do not require access
1052
+ // checks are allowed in stubs.
1053
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
1054
+
1055
+ ASSERT(current->GetPrototype()->IsJSObject());
1056
+ JSObject* prototype = JSObject::cast(current->GetPrototype());
1057
+ if (!current->HasFastProperties() &&
1058
+ !current->IsJSGlobalObject() &&
1059
+ !current->IsJSGlobalProxy()) {
1060
+ if (!name->IsSymbol()) {
1061
+ MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
1062
+ Object* lookup_result = NULL; // Initialization to please compiler.
1063
+ if (!maybe_lookup_result->ToObject(&lookup_result)) {
1064
+ set_failure(Failure::cast(maybe_lookup_result));
1065
+ return reg;
1066
+ }
1067
+ name = String::cast(lookup_result);
1068
+ }
1069
+ ASSERT(current->property_dictionary()->FindEntry(name) ==
1070
+ StringDictionary::kNotFound);
1071
+
1072
+ MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
1073
+ miss,
1074
+ reg,
1075
+ name,
1076
+ scratch1,
1077
+ scratch2);
1078
+ if (negative_lookup->IsFailure()) {
1079
+ set_failure(Failure::cast(negative_lookup));
1080
+ return reg;
1081
+ }
1082
+
1083
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
1084
+ reg = holder_reg; // From now the object is in holder_reg.
1085
+ __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
1086
+ } else if (heap()->InNewSpace(prototype)) {
1087
+ // Get the map of the current object.
1088
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
1089
+
1090
+ // Branch on the result of the map check.
1091
+ __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
1092
+
1093
+ // Check access rights to the global object. This has to happen
1094
+ // after the map check so that we know that the object is
1095
+ // actually a global object.
1096
+ if (current->IsJSGlobalProxy()) {
1097
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
1098
+ // Restore scratch register to be the map of the object. In the
1099
+ // new space case below, we load the prototype from the map in
1100
+ // the scratch register.
1101
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
1102
+ }
1103
+
1104
+ reg = holder_reg; // From now the object is in holder_reg.
1105
+ // The prototype is in new space; we cannot store a reference
1106
+ // to it in the code. Load it from the map.
1107
+ __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
1108
+ } else {
1109
+ // Check the map of the current object.
1110
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
1111
+ // Branch on the result of the map check.
1112
+ __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
1113
+ // Check access rights to the global object. This has to happen
1114
+ // after the map check so that we know that the object is
1115
+ // actually a global object.
1116
+ if (current->IsJSGlobalProxy()) {
1117
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
1118
+ }
1119
+ // The prototype is in old space; load it directly.
1120
+ reg = holder_reg; // From now the object is in holder_reg.
1121
+ __ li(reg, Operand(Handle<JSObject>(prototype)));
1122
+ }
1123
+
1124
+ if (save_at_depth == depth) {
1125
+ __ sw(reg, MemOperand(sp));
1126
+ }
1127
+
1128
+ // Go to the next object in the prototype chain.
1129
+ current = prototype;
1130
+ }
1131
+
1132
+ // Check the holder map.
1133
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
1134
+ __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
1135
+
1136
+ // Log the check depth.
1137
+ LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
1138
+ // Perform security check for access to the global object.
1139
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
1140
+ if (holder->IsJSGlobalProxy()) {
1141
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
1142
+ };
1143
+
1144
+ // If we've skipped any global objects, it's not enough to verify
1145
+ // that their maps haven't changed. We also need to check that the
1146
+ // property cell for the property is still empty.
1147
+
1148
+ MaybeObject* result = GenerateCheckPropertyCells(masm(),
1149
+ object,
1150
+ holder,
1151
+ name,
1152
+ scratch1,
1153
+ miss);
1154
+ if (result->IsFailure()) set_failure(Failure::cast(result));
1155
+
1156
+ // Return the register containing the holder.
1157
+ return reg;
1158
+ }
1159
+
1160
+
1161
+ void StubCompiler::GenerateLoadField(JSObject* object,
1162
+ JSObject* holder,
1163
+ Register receiver,
1164
+ Register scratch1,
1165
+ Register scratch2,
1166
+ Register scratch3,
1167
+ int index,
1168
+ String* name,
1169
+ Label* miss) {
1170
+ // Check that the receiver isn't a smi.
1171
+ __ And(scratch1, receiver, Operand(kSmiTagMask));
1172
+ __ Branch(miss, eq, scratch1, Operand(zero_reg));
1173
+
1174
+ // Check that the maps haven't changed.
1175
+ Register reg =
1176
+ CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
1177
+ name, miss);
1178
+ GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
1179
+ __ Ret();
1180
+ }
1181
+
1182
+
1183
+ void StubCompiler::GenerateLoadConstant(JSObject* object,
1184
+ JSObject* holder,
1185
+ Register receiver,
1186
+ Register scratch1,
1187
+ Register scratch2,
1188
+ Register scratch3,
1189
+ Object* value,
1190
+ String* name,
1191
+ Label* miss) {
1192
+ // Check that the receiver isn't a smi.
1193
+ __ JumpIfSmi(receiver, miss, scratch1);
1194
+
1195
+ // Check that the maps haven't changed.
1196
+ Register reg =
1197
+ CheckPrototypes(object, receiver, holder,
1198
+ scratch1, scratch2, scratch3, name, miss);
1199
+
1200
+ // Return the constant value.
1201
+ __ li(v0, Operand(Handle<Object>(value)));
1202
+ __ Ret();
1203
+ }
1204
+
1205
+
1206
+ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
1207
+ JSObject* holder,
1208
+ Register receiver,
1209
+ Register name_reg,
1210
+ Register scratch1,
1211
+ Register scratch2,
1212
+ Register scratch3,
1213
+ AccessorInfo* callback,
1214
+ String* name,
1215
+ Label* miss) {
1216
+ // Check that the receiver isn't a smi.
1217
+ __ JumpIfSmi(receiver, miss, scratch1);
1218
+
1219
+ // Check that the maps haven't changed.
1220
+ Register reg =
1221
+ CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
1222
+ name, miss);
1223
+
1224
+ // Build AccessorInfo::args_ list on the stack and push property name below
1225
+ // the exit frame to make GC aware of them and store pointers to them.
1226
+ __ push(receiver);
1227
+ __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
1228
+ Handle<AccessorInfo> callback_handle(callback);
1229
+ if (heap()->InNewSpace(callback_handle->data())) {
1230
+ __ li(scratch3, callback_handle);
1231
+ __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
1232
+ } else {
1233
+ __ li(scratch3, Handle<Object>(callback_handle->data()));
1234
+ }
1235
+ __ Push(reg, scratch3, name_reg);
1236
+ __ mov(a2, scratch2); // Saved in case scratch2 == a1.
1237
+ __ mov(a1, sp); // a1 (first argument - see note below) = Handle<String>
1238
+
1239
+ Address getter_address = v8::ToCData<Address>(callback->getter());
1240
+ ApiFunction fun(getter_address);
1241
+
1242
+ // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
1243
+ // struct from the function (which is currently the case). This means we pass
1244
+ // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
1245
+ // will handle setting up a0.
1246
+
1247
+ const int kApiStackSpace = 1;
1248
+
1249
+ __ EnterExitFrame(false, kApiStackSpace);
1250
+ // Create AccessorInfo instance on the stack above the exit frame with
1251
+ // scratch2 (internal::Object **args_) as the data.
1252
+ __ sw(a2, MemOperand(sp, kPointerSize));
1253
+ // a2 (second argument - see note above) = AccessorInfo&
1254
+ __ Addu(a2, sp, kPointerSize);
1255
+
1256
+ // Emitting a stub call may try to allocate (if the code is not
1257
+ // already generated). Do not allow the assembler to perform a
1258
+ // garbage collection but instead return the allocation failure
1259
+ // object.
1260
+ ExternalReference ref =
1261
+ ExternalReference(&fun,
1262
+ ExternalReference::DIRECT_GETTER_CALL,
1263
+ masm()->isolate());
1264
+ // 4 args - will be freed later by LeaveExitFrame.
1265
+ return masm()->TryCallApiFunctionAndReturn(ref, 4);
1266
+ }
1267
+
1268
+
1269
+ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
1270
+ JSObject* interceptor_holder,
1271
+ LookupResult* lookup,
1272
+ Register receiver,
1273
+ Register name_reg,
1274
+ Register scratch1,
1275
+ Register scratch2,
1276
+ Register scratch3,
1277
+ String* name,
1278
+ Label* miss) {
1279
+ ASSERT(interceptor_holder->HasNamedInterceptor());
1280
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
1281
+
1282
+ // Check that the receiver isn't a smi.
1283
+ __ JumpIfSmi(receiver, miss);
1284
+
1285
+ // So far the most popular follow ups for interceptor loads are FIELD
1286
+ // and CALLBACKS, so inline only them, other cases may be added
1287
+ // later.
1288
+ bool compile_followup_inline = false;
1289
+ if (lookup->IsProperty() && lookup->IsCacheable()) {
1290
+ if (lookup->type() == FIELD) {
1291
+ compile_followup_inline = true;
1292
+ } else if (lookup->type() == CALLBACKS &&
1293
+ lookup->GetCallbackObject()->IsAccessorInfo() &&
1294
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
1295
+ compile_followup_inline = true;
1296
+ }
1297
+ }
1298
+
1299
+ if (compile_followup_inline) {
1300
+ // Compile the interceptor call, followed by inline code to load the
1301
+ // property from further up the prototype chain if the call fails.
1302
+ // Check that the maps haven't changed.
1303
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
1304
+ scratch1, scratch2, scratch3,
1305
+ name, miss);
1306
+ ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
1307
+
1308
+ // Save necessary data before invoking an interceptor.
1309
+ // Requires a frame to make GC aware of pushed pointers.
1310
+ __ EnterInternalFrame();
1311
+
1312
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
1313
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
1314
+ __ Push(receiver, holder_reg, name_reg);
1315
+ } else {
1316
+ __ Push(holder_reg, name_reg);
1317
+ }
1318
+
1319
+ // Invoke an interceptor. Note: map checks from receiver to
1320
+ // interceptor's holder has been compiled before (see a caller
1321
+ // of this method).
1322
+ CompileCallLoadPropertyWithInterceptor(masm(),
1323
+ receiver,
1324
+ holder_reg,
1325
+ name_reg,
1326
+ interceptor_holder);
1327
+
1328
+ // Check if interceptor provided a value for property. If it's
1329
+ // the case, return immediately.
1330
+ Label interceptor_failed;
1331
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
1332
+ __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
1333
+ __ LeaveInternalFrame();
1334
+ __ Ret();
1335
+
1336
+ __ bind(&interceptor_failed);
1337
+ __ pop(name_reg);
1338
+ __ pop(holder_reg);
1339
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
1340
+ __ pop(receiver);
1341
+ }
1342
+
1343
+ __ LeaveInternalFrame();
1344
+
1345
+ // Check that the maps from interceptor's holder to lookup's holder
1346
+ // haven't changed. And load lookup's holder into |holder| register.
1347
+ if (interceptor_holder != lookup->holder()) {
1348
+ holder_reg = CheckPrototypes(interceptor_holder,
1349
+ holder_reg,
1350
+ lookup->holder(),
1351
+ scratch1,
1352
+ scratch2,
1353
+ scratch3,
1354
+ name,
1355
+ miss);
1356
+ }
1357
+
1358
+ if (lookup->type() == FIELD) {
1359
+ // We found FIELD property in prototype chain of interceptor's holder.
1360
+ // Retrieve a field from field's holder.
1361
+ GenerateFastPropertyLoad(masm(), v0, holder_reg,
1362
+ lookup->holder(), lookup->GetFieldIndex());
1363
+ __ Ret();
1364
+ } else {
1365
+ // We found CALLBACKS property in prototype chain of interceptor's
1366
+ // holder.
1367
+ ASSERT(lookup->type() == CALLBACKS);
1368
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
1369
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
1370
+ ASSERT(callback != NULL);
1371
+ ASSERT(callback->getter() != NULL);
1372
+
1373
+ // Tail call to runtime.
1374
+ // Important invariant in CALLBACKS case: the code above must be
1375
+ // structured to never clobber |receiver| register.
1376
+ __ li(scratch2, Handle<AccessorInfo>(callback));
1377
+ // holder_reg is either receiver or scratch1.
1378
+ if (!receiver.is(holder_reg)) {
1379
+ ASSERT(scratch1.is(holder_reg));
1380
+ __ Push(receiver, holder_reg);
1381
+ __ lw(scratch3,
1382
+ FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
1383
+ __ Push(scratch3, scratch2, name_reg);
1384
+ } else {
1385
+ __ push(receiver);
1386
+ __ lw(scratch3,
1387
+ FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
1388
+ __ Push(holder_reg, scratch3, scratch2, name_reg);
1389
+ }
1390
+
1391
+ ExternalReference ref =
1392
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
1393
+ masm()->isolate());
1394
+ __ TailCallExternalReference(ref, 5, 1);
1395
+ }
1396
+ } else { // !compile_followup_inline
1397
+ // Call the runtime system to load the interceptor.
1398
+ // Check that the maps haven't changed.
1399
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
1400
+ scratch1, scratch2, scratch3,
1401
+ name, miss);
1402
+ PushInterceptorArguments(masm(), receiver, holder_reg,
1403
+ name_reg, interceptor_holder);
1404
+
1405
+ ExternalReference ref = ExternalReference(
1406
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
1407
+ __ TailCallExternalReference(ref, 5, 1);
1408
+ }
1409
+ }
1410
+
1411
+
1412
+ void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
1413
+ if (kind_ == Code::KEYED_CALL_IC) {
1414
+ __ Branch(miss, ne, a2, Operand(Handle<String>(name)));
1415
+ }
1416
+ }
1417
+
1418
+
1419
+ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
1420
+ JSObject* holder,
1421
+ String* name,
1422
+ Label* miss) {
1423
+ ASSERT(holder->IsGlobalObject());
1424
+
1425
+ // Get the number of arguments.
1426
+ const int argc = arguments().immediate();
1427
+
1428
+ // Get the receiver from the stack.
1429
+ __ lw(a0, MemOperand(sp, argc * kPointerSize));
1430
+
1431
+ // If the object is the holder then we know that it's a global
1432
+ // object which can only happen for contextual calls. In this case,
1433
+ // the receiver cannot be a smi.
1434
+ if (object != holder) {
1435
+ __ JumpIfSmi(a0, miss);
1436
+ }
1437
+
1438
+ // Check that the maps haven't changed.
1439
+ CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
1440
+ }
1441
+
1442
+
1443
+ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
1444
+ JSFunction* function,
1445
+ Label* miss) {
1446
+ // Get the value from the cell.
1447
+ __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
1448
+ __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
1449
+
1450
+ // Check that the cell contains the same function.
1451
+ if (heap()->InNewSpace(function)) {
1452
+ // We can't embed a pointer to a function in new space so we have
1453
+ // to verify that the shared function info is unchanged. This has
1454
+ // the nice side effect that multiple closures based on the same
1455
+ // function can all use this call IC. Before we load through the
1456
+ // function, we have to verify that it still is a function.
1457
+ __ JumpIfSmi(a1, miss);
1458
+ __ GetObjectType(a1, a3, a3);
1459
+ __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
1460
+
1461
+ // Check the shared function info. Make sure it hasn't changed.
1462
+ __ li(a3, Handle<SharedFunctionInfo>(function->shared()));
1463
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1464
+ __ Branch(miss, ne, t0, Operand(a3));
1465
+ } else {
1466
+ __ Branch(miss, ne, a1, Operand(Handle<JSFunction>(function)));
1467
+ }
1468
+ }
1469
+
1470
+
1471
+ MaybeObject* CallStubCompiler::GenerateMissBranch() {
1472
+ MaybeObject* maybe_obj =
1473
+ isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
1474
+ kind_,
1475
+ extra_ic_state_);
1476
+ Object* obj;
1477
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
1478
+ __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
1479
+ return obj;
1480
+ }
1481
+
1482
+
1483
+ MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
1484
+ JSObject* holder,
1485
+ int index,
1486
+ String* name) {
1487
+ // ----------- S t a t e -------------
1488
+ // -- a2 : name
1489
+ // -- ra : return address
1490
+ // -----------------------------------
1491
+ Label miss;
1492
+
1493
+ GenerateNameCheck(name, &miss);
1494
+
1495
+ const int argc = arguments().immediate();
1496
+
1497
+ // Get the receiver of the function from the stack into a0.
1498
+ __ lw(a0, MemOperand(sp, argc * kPointerSize));
1499
+ // Check that the receiver isn't a smi.
1500
+ __ JumpIfSmi(a0, &miss, t0);
1501
+
1502
+ // Do the right check and compute the holder register.
1503
+ Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
1504
+ GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
1505
+
1506
+ GenerateCallFunction(masm(), object, arguments(), &miss);
1507
+
1508
+ // Handle call cache miss.
1509
+ __ bind(&miss);
1510
+ MaybeObject* maybe_result = GenerateMissBranch();
1511
+ if (maybe_result->IsFailure()) return maybe_result;
1512
+
1513
+ // Return the generated code.
1514
+ return GetCode(FIELD, name);
1515
+ }
1516
+
1517
+
1518
+ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
1519
+ JSObject* holder,
1520
+ JSGlobalPropertyCell* cell,
1521
+ JSFunction* function,
1522
+ String* name) {
1523
+ // ----------- S t a t e -------------
1524
+ // -- a2 : name
1525
+ // -- ra : return address
1526
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1527
+ // -- ...
1528
+ // -- sp[argc * 4] : receiver
1529
+ // -----------------------------------
1530
+
1531
+ // If object is not an array, bail out to regular call.
1532
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
1533
+
1534
+ Label miss;
1535
+
1536
+ GenerateNameCheck(name, &miss);
1537
+
1538
+ Register receiver = a1;
1539
+
1540
+ // Get the receiver from the stack.
1541
+ const int argc = arguments().immediate();
1542
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
1543
+
1544
+ // Check that the receiver isn't a smi.
1545
+ __ JumpIfSmi(receiver, &miss);
1546
+
1547
+ // Check that the maps haven't changed.
1548
+ CheckPrototypes(JSObject::cast(object), receiver,
1549
+ holder, a3, v0, t0, name, &miss);
1550
+
1551
+ if (argc == 0) {
1552
+ // Nothing to do, just return the length.
1553
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1554
+ __ Drop(argc + 1);
1555
+ __ Ret();
1556
+ } else {
1557
+ Label call_builtin;
1558
+
1559
+ Register elements = a3;
1560
+ Register end_elements = t1;
1561
+
1562
+ // Get the elements array of the object.
1563
+ __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
1564
+
1565
+ // Check that the elements are in fast mode and writable.
1566
+ __ CheckMap(elements,
1567
+ v0,
1568
+ Heap::kFixedArrayMapRootIndex,
1569
+ &call_builtin,
1570
+ DONT_DO_SMI_CHECK);
1571
+
1572
+ if (argc == 1) { // Otherwise fall through to call the builtin.
1573
+ Label exit, with_write_barrier, attempt_to_grow_elements;
1574
+
1575
+ // Get the array's length into v0 and calculate new length.
1576
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1577
+ STATIC_ASSERT(kSmiTagSize == 1);
1578
+ STATIC_ASSERT(kSmiTag == 0);
1579
+ __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
1580
+
1581
+ // Get the element's length.
1582
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
1583
+
1584
+ // Check if we could survive without allocation.
1585
+ __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
1586
+
1587
+ // Save new length.
1588
+ __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1589
+
1590
+ // Push the element.
1591
+ __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
1592
+ // We may need a register containing the address end_elements below,
1593
+ // so write back the value in end_elements.
1594
+ __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
1595
+ __ Addu(end_elements, elements, end_elements);
1596
+ const int kEndElementsOffset =
1597
+ FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
1598
+ __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
1599
+ __ Addu(end_elements, end_elements, kPointerSize);
1600
+
1601
+ // Check for a smi.
1602
+ __ JumpIfNotSmi(t0, &with_write_barrier);
1603
+ __ bind(&exit);
1604
+ __ Drop(argc + 1);
1605
+ __ Ret();
1606
+
1607
+ __ bind(&with_write_barrier);
1608
+ __ InNewSpace(elements, t0, eq, &exit);
1609
+ __ RecordWriteHelper(elements, end_elements, t0);
1610
+ __ Drop(argc + 1);
1611
+ __ Ret();
1612
+
1613
+ __ bind(&attempt_to_grow_elements);
1614
+ // v0: array's length + 1.
1615
+ // t0: elements' length.
1616
+
1617
+ if (!FLAG_inline_new) {
1618
+ __ Branch(&call_builtin);
1619
+ }
1620
+
1621
+ ExternalReference new_space_allocation_top =
1622
+ ExternalReference::new_space_allocation_top_address(
1623
+ masm()->isolate());
1624
+ ExternalReference new_space_allocation_limit =
1625
+ ExternalReference::new_space_allocation_limit_address(
1626
+ masm()->isolate());
1627
+
1628
+ const int kAllocationDelta = 4;
1629
+ // Load top and check if it is the end of elements.
1630
+ __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
1631
+ __ Addu(end_elements, elements, end_elements);
1632
+ __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
1633
+ __ li(t3, Operand(new_space_allocation_top));
1634
+ __ lw(t2, MemOperand(t3));
1635
+ __ Branch(&call_builtin, ne, end_elements, Operand(t2));
1636
+
1637
+ __ li(t5, Operand(new_space_allocation_limit));
1638
+ __ lw(t5, MemOperand(t5));
1639
+ __ Addu(t2, t2, Operand(kAllocationDelta * kPointerSize));
1640
+ __ Branch(&call_builtin, hi, t2, Operand(t5));
1641
+
1642
+ // We fit and could grow elements.
1643
+ // Update new_space_allocation_top.
1644
+ __ sw(t2, MemOperand(t3));
1645
+ // Push the argument.
1646
+ __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize));
1647
+ __ sw(t2, MemOperand(end_elements));
1648
+ // Fill the rest with holes.
1649
+ __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
1650
+ for (int i = 1; i < kAllocationDelta; i++) {
1651
+ __ sw(t2, MemOperand(end_elements, i * kPointerSize));
1652
+ }
1653
+
1654
+ // Update elements' and array's sizes.
1655
+ __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1656
+ __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
1657
+ __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
1658
+
1659
+ // Elements are in new space, so write barrier is not required.
1660
+ __ Drop(argc + 1);
1661
+ __ Ret();
1662
+ }
1663
+ __ bind(&call_builtin);
1664
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
1665
+ masm()->isolate()),
1666
+ argc + 1,
1667
+ 1);
1668
+ }
1669
+
1670
+ // Handle call cache miss.
1671
+ __ bind(&miss);
1672
+ MaybeObject* maybe_result = GenerateMissBranch();
1673
+ if (maybe_result->IsFailure()) return maybe_result;
1674
+
1675
+ // Return the generated code.
1676
+ return GetCode(function);
1677
+ }
1678
+
1679
+
1680
+ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
1681
+ JSObject* holder,
1682
+ JSGlobalPropertyCell* cell,
1683
+ JSFunction* function,
1684
+ String* name) {
1685
+ // ----------- S t a t e -------------
1686
+ // -- a2 : name
1687
+ // -- ra : return address
1688
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1689
+ // -- ...
1690
+ // -- sp[argc * 4] : receiver
1691
+ // -----------------------------------
1692
+
1693
+ // If object is not an array, bail out to regular call.
1694
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
1695
+
1696
+ Label miss, return_undefined, call_builtin;
1697
+
1698
+ Register receiver = a1;
1699
+ Register elements = a3;
1700
+
1701
+ GenerateNameCheck(name, &miss);
1702
+
1703
+ // Get the receiver from the stack.
1704
+ const int argc = arguments().immediate();
1705
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
1706
+
1707
+ // Check that the receiver isn't a smi.
1708
+ __ JumpIfSmi(receiver, &miss);
1709
+
1710
+ // Check that the maps haven't changed.
1711
+ CheckPrototypes(JSObject::cast(object),
1712
+ receiver, holder, elements, t0, v0, name, &miss);
1713
+
1714
+ // Get the elements array of the object.
1715
+ __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
1716
+
1717
+ // Check that the elements are in fast mode and writable.
1718
+ __ CheckMap(elements,
1719
+ v0,
1720
+ Heap::kFixedArrayMapRootIndex,
1721
+ &call_builtin,
1722
+ DONT_DO_SMI_CHECK);
1723
+
1724
+ // Get the array's length into t0 and calculate new length.
1725
+ __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1726
+ __ Subu(t0, t0, Operand(Smi::FromInt(1)));
1727
+ __ Branch(&return_undefined, lt, t0, Operand(zero_reg));
1728
+
1729
+ // Get the last element.
1730
+ __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
1731
+ STATIC_ASSERT(kSmiTagSize == 1);
1732
+ STATIC_ASSERT(kSmiTag == 0);
1733
+ // We can't address the last element in one operation. Compute the more
1734
+ // expensive shift first, and use an offset later on.
1735
+ __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
1736
+ __ Addu(elements, elements, t1);
1737
+ __ lw(v0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
1738
+ __ Branch(&call_builtin, eq, v0, Operand(t2));
1739
+
1740
+ // Set the array's length.
1741
+ __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1742
+
1743
+ // Fill with the hole.
1744
+ __ sw(t2, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
1745
+ __ Drop(argc + 1);
1746
+ __ Ret();
1747
+
1748
+ __ bind(&return_undefined);
1749
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
1750
+ __ Drop(argc + 1);
1751
+ __ Ret();
1752
+
1753
+ __ bind(&call_builtin);
1754
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
1755
+ masm()->isolate()),
1756
+ argc + 1,
1757
+ 1);
1758
+
1759
+ // Handle call cache miss.
1760
+ __ bind(&miss);
1761
+ MaybeObject* maybe_result = GenerateMissBranch();
1762
+ if (maybe_result->IsFailure()) return maybe_result;
1763
+
1764
+ // Return the generated code.
1765
+ return GetCode(function);
1766
+ }
1767
+
1768
+
1769
+ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
1770
+ Object* object,
1771
+ JSObject* holder,
1772
+ JSGlobalPropertyCell* cell,
1773
+ JSFunction* function,
1774
+ String* name) {
1775
+ // ----------- S t a t e -------------
1776
+ // -- a2 : function name
1777
+ // -- ra : return address
1778
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1779
+ // -- ...
1780
+ // -- sp[argc * 4] : receiver
1781
+ // -----------------------------------
1782
+
1783
+ // If object is not a string, bail out to regular call.
1784
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
1785
+
1786
+ const int argc = arguments().immediate();
1787
+
1788
+ Label miss;
1789
+ Label name_miss;
1790
+ Label index_out_of_range;
1791
+
1792
+ Label* index_out_of_range_label = &index_out_of_range;
1793
+
1794
+ if (kind_ == Code::CALL_IC &&
1795
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
1796
+ DEFAULT_STRING_STUB)) {
1797
+ index_out_of_range_label = &miss;
1798
+ }
1799
+
1800
+ GenerateNameCheck(name, &name_miss);
1801
+
1802
+ // Check that the maps starting from the prototype haven't changed.
1803
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
1804
+ Context::STRING_FUNCTION_INDEX,
1805
+ v0,
1806
+ &miss);
1807
+ ASSERT(object != holder);
1808
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
1809
+ a1, a3, t0, name, &miss);
1810
+
1811
+ Register receiver = a1;
1812
+ Register index = t1;
1813
+ Register scratch = a3;
1814
+ Register result = v0;
1815
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
1816
+ if (argc > 0) {
1817
+ __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
1818
+ } else {
1819
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
1820
+ }
1821
+
1822
+ StringCharCodeAtGenerator char_code_at_generator(receiver,
1823
+ index,
1824
+ scratch,
1825
+ result,
1826
+ &miss, // When not a string.
1827
+ &miss, // When not a number.
1828
+ index_out_of_range_label,
1829
+ STRING_INDEX_IS_NUMBER);
1830
+ char_code_at_generator.GenerateFast(masm());
1831
+ __ Drop(argc + 1);
1832
+ __ Ret();
1833
+
1834
+ StubRuntimeCallHelper call_helper;
1835
+ char_code_at_generator.GenerateSlow(masm(), call_helper);
1836
+
1837
+ if (index_out_of_range.is_linked()) {
1838
+ __ bind(&index_out_of_range);
1839
+ __ LoadRoot(v0, Heap::kNanValueRootIndex);
1840
+ __ Drop(argc + 1);
1841
+ __ Ret();
1842
+ }
1843
+
1844
+ __ bind(&miss);
1845
+ // Restore function name in a2.
1846
+ __ li(a2, Handle<String>(name));
1847
+ __ bind(&name_miss);
1848
+ MaybeObject* maybe_result = GenerateMissBranch();
1849
+ if (maybe_result->IsFailure()) return maybe_result;
1850
+
1851
+ // Return the generated code.
1852
+ return GetCode(function);
1853
+ }
1854
+
1855
+
1856
+ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
1857
+ Object* object,
1858
+ JSObject* holder,
1859
+ JSGlobalPropertyCell* cell,
1860
+ JSFunction* function,
1861
+ String* name) {
1862
+ // ----------- S t a t e -------------
1863
+ // -- a2 : function name
1864
+ // -- ra : return address
1865
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1866
+ // -- ...
1867
+ // -- sp[argc * 4] : receiver
1868
+ // -----------------------------------
1869
+
1870
+ // If object is not a string, bail out to regular call.
1871
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
1872
+
1873
+ const int argc = arguments().immediate();
1874
+
1875
+ Label miss;
1876
+ Label name_miss;
1877
+ Label index_out_of_range;
1878
+ Label* index_out_of_range_label = &index_out_of_range;
1879
+
1880
+ if (kind_ == Code::CALL_IC &&
1881
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
1882
+ DEFAULT_STRING_STUB)) {
1883
+ index_out_of_range_label = &miss;
1884
+ }
1885
+
1886
+ GenerateNameCheck(name, &name_miss);
1887
+
1888
+ // Check that the maps starting from the prototype haven't changed.
1889
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
1890
+ Context::STRING_FUNCTION_INDEX,
1891
+ v0,
1892
+ &miss);
1893
+ ASSERT(object != holder);
1894
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
1895
+ a1, a3, t0, name, &miss);
1896
+
1897
+ Register receiver = v0;
1898
+ Register index = t1;
1899
+ Register scratch1 = a1;
1900
+ Register scratch2 = a3;
1901
+ Register result = v0;
1902
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
1903
+ if (argc > 0) {
1904
+ __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
1905
+ } else {
1906
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
1907
+ }
1908
+
1909
+ StringCharAtGenerator char_at_generator(receiver,
1910
+ index,
1911
+ scratch1,
1912
+ scratch2,
1913
+ result,
1914
+ &miss, // When not a string.
1915
+ &miss, // When not a number.
1916
+ index_out_of_range_label,
1917
+ STRING_INDEX_IS_NUMBER);
1918
+ char_at_generator.GenerateFast(masm());
1919
+ __ Drop(argc + 1);
1920
+ __ Ret();
1921
+
1922
+ StubRuntimeCallHelper call_helper;
1923
+ char_at_generator.GenerateSlow(masm(), call_helper);
1924
+
1925
+ if (index_out_of_range.is_linked()) {
1926
+ __ bind(&index_out_of_range);
1927
+ __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
1928
+ __ Drop(argc + 1);
1929
+ __ Ret();
1930
+ }
1931
+
1932
+ __ bind(&miss);
1933
+ // Restore function name in a2.
1934
+ __ li(a2, Handle<String>(name));
1935
+ __ bind(&name_miss);
1936
+ MaybeObject* maybe_result = GenerateMissBranch();
1937
+ if (maybe_result->IsFailure()) return maybe_result;
1938
+
1939
+ // Return the generated code.
1940
+ return GetCode(function);
1941
+ }
1942
+
1943
+
1944
+ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
1945
+ Object* object,
1946
+ JSObject* holder,
1947
+ JSGlobalPropertyCell* cell,
1948
+ JSFunction* function,
1949
+ String* name) {
1950
+ // ----------- S t a t e -------------
1951
+ // -- a2 : function name
1952
+ // -- ra : return address
1953
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1954
+ // -- ...
1955
+ // -- sp[argc * 4] : receiver
1956
+ // -----------------------------------
1957
+
1958
+ const int argc = arguments().immediate();
1959
+
1960
+ // If the object is not a JSObject or we got an unexpected number of
1961
+ // arguments, bail out to the regular call.
1962
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
1963
+
1964
+ Label miss;
1965
+ GenerateNameCheck(name, &miss);
1966
+
1967
+ if (cell == NULL) {
1968
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
1969
+
1970
+ STATIC_ASSERT(kSmiTag == 0);
1971
+ __ JumpIfSmi(a1, &miss);
1972
+
1973
+ CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
1974
+ &miss);
1975
+ } else {
1976
+ ASSERT(cell->value() == function);
1977
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
1978
+ GenerateLoadFunctionFromCell(cell, function, &miss);
1979
+ }
1980
+
1981
+ // Load the char code argument.
1982
+ Register code = a1;
1983
+ __ lw(code, MemOperand(sp, 0 * kPointerSize));
1984
+
1985
+ // Check the code is a smi.
1986
+ Label slow;
1987
+ STATIC_ASSERT(kSmiTag == 0);
1988
+ __ JumpIfNotSmi(code, &slow);
1989
+
1990
+ // Convert the smi code to uint16.
1991
+ __ And(code, code, Operand(Smi::FromInt(0xffff)));
1992
+
1993
+ StringCharFromCodeGenerator char_from_code_generator(code, v0);
1994
+ char_from_code_generator.GenerateFast(masm());
1995
+ __ Drop(argc + 1);
1996
+ __ Ret();
1997
+
1998
+ StubRuntimeCallHelper call_helper;
1999
+ char_from_code_generator.GenerateSlow(masm(), call_helper);
2000
+
2001
+ // Tail call the full function. We do not have to patch the receiver
2002
+ // because the function makes no use of it.
2003
+ __ bind(&slow);
2004
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
2005
+
2006
+ __ bind(&miss);
2007
+ // a2: function name.
2008
+ MaybeObject* maybe_result = GenerateMissBranch();
2009
+ if (maybe_result->IsFailure()) return maybe_result;
2010
+
2011
+ // Return the generated code.
2012
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
2013
+ }
2014
+
2015
+
2016
+ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
2017
+ JSObject* holder,
2018
+ JSGlobalPropertyCell* cell,
2019
+ JSFunction* function,
2020
+ String* name) {
2021
+ // ----------- S t a t e -------------
2022
+ // -- a2 : function name
2023
+ // -- ra : return address
2024
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2025
+ // -- ...
2026
+ // -- sp[argc * 4] : receiver
2027
+ // -----------------------------------
2028
+
2029
+ if (!CpuFeatures::IsSupported(FPU))
2030
+ return heap()->undefined_value();
2031
+ CpuFeatures::Scope scope_fpu(FPU);
2032
+
2033
+ const int argc = arguments().immediate();
2034
+
2035
+ // If the object is not a JSObject or we got an unexpected number of
2036
+ // arguments, bail out to the regular call.
2037
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
2038
+
2039
+ Label miss, slow;
2040
+ GenerateNameCheck(name, &miss);
2041
+
2042
+ if (cell == NULL) {
2043
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
2044
+
2045
+ STATIC_ASSERT(kSmiTag == 0);
2046
+ __ JumpIfSmi(a1, &miss);
2047
+
2048
+ CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
2049
+ &miss);
2050
+ } else {
2051
+ ASSERT(cell->value() == function);
2052
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
2053
+ GenerateLoadFunctionFromCell(cell, function, &miss);
2054
+ }
2055
+
2056
+ // Load the (only) argument into v0.
2057
+ __ lw(v0, MemOperand(sp, 0 * kPointerSize));
2058
+
2059
+ // If the argument is a smi, just return.
2060
+ STATIC_ASSERT(kSmiTag == 0);
2061
+ __ And(t0, v0, Operand(kSmiTagMask));
2062
+ __ Drop(argc + 1, eq, t0, Operand(zero_reg));
2063
+ __ Ret(eq, t0, Operand(zero_reg));
2064
+
2065
+ __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
2066
+
2067
+ Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
2068
+
2069
+ // If fpu is enabled, we use the floor instruction.
2070
+
2071
+ // Load the HeapNumber value.
2072
+ __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2073
+
2074
+ // Backup FCSR.
2075
+ __ cfc1(a3, FCSR);
2076
+ // Clearing FCSR clears the exception mask with no side-effects.
2077
+ __ ctc1(zero_reg, FCSR);
2078
+ // Convert the argument to an integer.
2079
+ __ floor_w_d(f0, f0);
2080
+
2081
+ // Start checking for special cases.
2082
+ // Get the argument exponent and clear the sign bit.
2083
+ __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
2084
+ __ And(t2, t1, Operand(~HeapNumber::kSignMask));
2085
+ __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
2086
+
2087
+ // Retrieve FCSR and check for fpu errors.
2088
+ __ cfc1(t5, FCSR);
2089
+ __ srl(t5, t5, kFCSRFlagShift);
2090
+ // Flag 1 marks an inaccurate but still good result so we ignore it.
2091
+ __ And(t5, t5, Operand(kFCSRFlagMask ^ 1));
2092
+ __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
2093
+
2094
+ // Check for NaN, Infinity, and -Infinity.
2095
+ // They are invariant through a Math.Floor call, so just
2096
+ // return the original argument.
2097
+ __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
2098
+ >> HeapNumber::kMantissaBitsInTopWord));
2099
+ __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
2100
+ // We had an overflow or underflow in the conversion. Check if we
2101
+ // have a big exponent.
2102
+ // If greater or equal, the argument is already round and in v0.
2103
+ __ Branch(&restore_fcsr_and_return, ge, t3,
2104
+ Operand(HeapNumber::kMantissaBits));
2105
+ __ Branch(&wont_fit_smi);
2106
+
2107
+ __ bind(&no_fpu_error);
2108
+ // Move the result back to v0.
2109
+ __ mfc1(v0, f0);
2110
+ // Check if the result fits into a smi.
2111
+ __ Addu(a1, v0, Operand(0x40000000));
2112
+ __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
2113
+ // Tag the result.
2114
+ STATIC_ASSERT(kSmiTag == 0);
2115
+ __ sll(v0, v0, kSmiTagSize);
2116
+
2117
+ // Check for -0.
2118
+ __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
2119
+ // t1 already holds the HeapNumber exponent.
2120
+ __ And(t0, t1, Operand(HeapNumber::kSignMask));
2121
+ // If our HeapNumber is negative it was -0, so load its address and return.
2122
+ // Else v0 is loaded with 0, so we can also just return.
2123
+ __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
2124
+ __ lw(v0, MemOperand(sp, 0 * kPointerSize));
2125
+
2126
+ __ bind(&restore_fcsr_and_return);
2127
+ // Restore FCSR and return.
2128
+ __ ctc1(a3, FCSR);
2129
+
2130
+ __ Drop(argc + 1);
2131
+ __ Ret();
2132
+
2133
+ __ bind(&wont_fit_smi);
2134
+ // Restore FCSR and fall to slow case.
2135
+ __ ctc1(a3, FCSR);
2136
+
2137
+ __ bind(&slow);
2138
+ // Tail call the full function. We do not have to patch the receiver
2139
+ // because the function makes no use of it.
2140
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
2141
+
2142
+ __ bind(&miss);
2143
+ // a2: function name.
2144
+ MaybeObject* obj = GenerateMissBranch();
2145
+ if (obj->IsFailure()) return obj;
2146
+
2147
+ // Return the generated code.
2148
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
2149
+ }
2150
+
2151
+
2152
+ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
2153
+ JSObject* holder,
2154
+ JSGlobalPropertyCell* cell,
2155
+ JSFunction* function,
2156
+ String* name) {
2157
+ // ----------- S t a t e -------------
2158
+ // -- a2 : function name
2159
+ // -- ra : return address
2160
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2161
+ // -- ...
2162
+ // -- sp[argc * 4] : receiver
2163
+ // -----------------------------------
2164
+
2165
+ const int argc = arguments().immediate();
2166
+
2167
+ // If the object is not a JSObject or we got an unexpected number of
2168
+ // arguments, bail out to the regular call.
2169
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
2170
+
2171
+ Label miss;
2172
+ GenerateNameCheck(name, &miss);
2173
+
2174
+ if (cell == NULL) {
2175
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
2176
+
2177
+ STATIC_ASSERT(kSmiTag == 0);
2178
+ __ JumpIfSmi(a1, &miss);
2179
+
2180
+ CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
2181
+ &miss);
2182
+ } else {
2183
+ ASSERT(cell->value() == function);
2184
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
2185
+ GenerateLoadFunctionFromCell(cell, function, &miss);
2186
+ }
2187
+
2188
+ // Load the (only) argument into v0.
2189
+ __ lw(v0, MemOperand(sp, 0 * kPointerSize));
2190
+
2191
+ // Check if the argument is a smi.
2192
+ Label not_smi;
2193
+ STATIC_ASSERT(kSmiTag == 0);
2194
+ __ JumpIfNotSmi(v0, &not_smi);
2195
+
2196
+ // Do bitwise not or do nothing depending on the sign of the
2197
+ // argument.
2198
+ __ sra(t0, v0, kBitsPerInt - 1);
2199
+ __ Xor(a1, v0, t0);
2200
+
2201
+ // Add 1 or do nothing depending on the sign of the argument.
2202
+ __ Subu(v0, a1, t0);
2203
+
2204
+ // If the result is still negative, go to the slow case.
2205
+ // This only happens for the most negative smi.
2206
+ Label slow;
2207
+ __ Branch(&slow, lt, v0, Operand(zero_reg));
2208
+
2209
+ // Smi case done.
2210
+ __ Drop(argc + 1);
2211
+ __ Ret();
2212
+
2213
+ // Check if the argument is a heap number and load its exponent and
2214
+ // sign.
2215
+ __ bind(&not_smi);
2216
+ __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
2217
+ __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
2218
+
2219
+ // Check the sign of the argument. If the argument is positive,
2220
+ // just return it.
2221
+ Label negative_sign;
2222
+ __ And(t0, a1, Operand(HeapNumber::kSignMask));
2223
+ __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
2224
+ __ Drop(argc + 1);
2225
+ __ Ret();
2226
+
2227
+ // If the argument is negative, clear the sign, and return a new
2228
+ // number.
2229
+ __ bind(&negative_sign);
2230
+ __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
2231
+ __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
2232
+ __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
2233
+ __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
2234
+ __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
2235
+ __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
2236
+ __ Drop(argc + 1);
2237
+ __ Ret();
2238
+
2239
+ // Tail call the full function. We do not have to patch the receiver
2240
+ // because the function makes no use of it.
2241
+ __ bind(&slow);
2242
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
2243
+
2244
+ __ bind(&miss);
2245
+ // a2: function name.
2246
+ MaybeObject* maybe_result = GenerateMissBranch();
2247
+ if (maybe_result->IsFailure()) return maybe_result;
2248
+
2249
+ // Return the generated code.
2250
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
2251
+ }
2252
+
2253
+
2254
+ MaybeObject* CallStubCompiler::CompileFastApiCall(
2255
+ const CallOptimization& optimization,
2256
+ Object* object,
2257
+ JSObject* holder,
2258
+ JSGlobalPropertyCell* cell,
2259
+ JSFunction* function,
2260
+ String* name) {
2261
+
2262
+ Counters* counters = isolate()->counters();
2263
+
2264
+ ASSERT(optimization.is_simple_api_call());
2265
+ // Bail out if object is a global object as we don't want to
2266
+ // repatch it to global receiver.
2267
+ if (object->IsGlobalObject()) return heap()->undefined_value();
2268
+ if (cell != NULL) return heap()->undefined_value();
2269
+ if (!object->IsJSObject()) return heap()->undefined_value();
2270
+ int depth = optimization.GetPrototypeDepthOfExpectedType(
2271
+ JSObject::cast(object), holder);
2272
+ if (depth == kInvalidProtoDepth) return heap()->undefined_value();
2273
+
2274
+ Label miss, miss_before_stack_reserved;
2275
+
2276
+ GenerateNameCheck(name, &miss_before_stack_reserved);
2277
+
2278
+ // Get the receiver from the stack.
2279
+ const int argc = arguments().immediate();
2280
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
2281
+
2282
+ // Check that the receiver isn't a smi.
2283
+ __ JumpIfSmi(a1, &miss_before_stack_reserved);
2284
+
2285
+ __ IncrementCounter(counters->call_const(), 1, a0, a3);
2286
+ __ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
2287
+
2288
+ ReserveSpaceForFastApiCall(masm(), a0);
2289
+
2290
+ // Check that the maps haven't changed and find a Holder as a side effect.
2291
+ CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
2292
+ depth, &miss);
2293
+
2294
+ MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
2295
+ if (result->IsFailure()) return result;
2296
+
2297
+ __ bind(&miss);
2298
+ FreeSpaceForFastApiCall(masm());
2299
+
2300
+ __ bind(&miss_before_stack_reserved);
2301
+ MaybeObject* maybe_result = GenerateMissBranch();
2302
+ if (maybe_result->IsFailure()) return maybe_result;
2303
+
2304
+ // Return the generated code.
2305
+ return GetCode(function);
2306
+ }
2307
+
2308
+
2309
+ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
2310
+ JSObject* holder,
2311
+ JSFunction* function,
2312
+ String* name,
2313
+ CheckType check) {
2314
+ // ----------- S t a t e -------------
2315
+ // -- a2 : name
2316
+ // -- ra : return address
2317
+ // -----------------------------------
2318
+ if (HasCustomCallGenerator(function)) {
2319
+ MaybeObject* maybe_result = CompileCustomCall(
2320
+ object, holder, NULL, function, name);
2321
+ Object* result;
2322
+ if (!maybe_result->ToObject(&result)) return maybe_result;
2323
+ // Undefined means bail out to regular compiler.
2324
+ if (!result->IsUndefined()) return result;
2325
+ }
2326
+
2327
+ Label miss;
2328
+
2329
+ GenerateNameCheck(name, &miss);
2330
+
2331
+ // Get the receiver from the stack.
2332
+ const int argc = arguments().immediate();
2333
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
2334
+
2335
+ // Check that the receiver isn't a smi.
2336
+ if (check != NUMBER_CHECK) {
2337
+ __ And(t1, a1, Operand(kSmiTagMask));
2338
+ __ Branch(&miss, eq, t1, Operand(zero_reg));
2339
+ }
2340
+
2341
+ // Make sure that it's okay not to patch the on stack receiver
2342
+ // unless we're doing a receiver map check.
2343
+ ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
2344
+
2345
+ SharedFunctionInfo* function_info = function->shared();
2346
+ switch (check) {
2347
+ case RECEIVER_MAP_CHECK:
2348
+ __ IncrementCounter(masm()->isolate()->counters()->call_const(),
2349
+ 1, a0, a3);
2350
+
2351
+ // Check that the maps haven't changed.
2352
+ CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
2353
+ &miss);
2354
+
2355
+ // Patch the receiver on the stack with the global proxy if
2356
+ // necessary.
2357
+ if (object->IsGlobalObject()) {
2358
+ __ lw(a3, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
2359
+ __ sw(a3, MemOperand(sp, argc * kPointerSize));
2360
+ }
2361
+ break;
2362
+
2363
+ case STRING_CHECK:
2364
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
2365
+ // Calling non-strict non-builtins with a value as the receiver
2366
+ // requires boxing.
2367
+ __ jmp(&miss);
2368
+ } else {
2369
+ // Check that the object is a two-byte string or a symbol.
2370
+ __ GetObjectType(a1, a3, a3);
2371
+ __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
2372
+ // Check that the maps starting from the prototype haven't changed.
2373
+ GenerateDirectLoadGlobalFunctionPrototype(
2374
+ masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
2375
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
2376
+ a1, t0, name, &miss);
2377
+ }
2378
+ break;
2379
+
2380
+ case NUMBER_CHECK: {
2381
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
2382
+ // Calling non-strict non-builtins with a value as the receiver
2383
+ // requires boxing.
2384
+ __ jmp(&miss);
2385
+ } else {
2386
+ Label fast;
2387
+ // Check that the object is a smi or a heap number.
2388
+ __ And(t1, a1, Operand(kSmiTagMask));
2389
+ __ Branch(&fast, eq, t1, Operand(zero_reg));
2390
+ __ GetObjectType(a1, a0, a0);
2391
+ __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
2392
+ __ bind(&fast);
2393
+ // Check that the maps starting from the prototype haven't changed.
2394
+ GenerateDirectLoadGlobalFunctionPrototype(
2395
+ masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
2396
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
2397
+ a1, t0, name, &miss);
2398
+ }
2399
+ break;
2400
+ }
2401
+
2402
+ case BOOLEAN_CHECK: {
2403
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
2404
+ // Calling non-strict non-builtins with a value as the receiver
2405
+ // requires boxing.
2406
+ __ jmp(&miss);
2407
+ } else {
2408
+ Label fast;
2409
+ // Check that the object is a boolean.
2410
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
2411
+ __ Branch(&fast, eq, a1, Operand(t0));
2412
+ __ LoadRoot(t0, Heap::kFalseValueRootIndex);
2413
+ __ Branch(&miss, ne, a1, Operand(t0));
2414
+ __ bind(&fast);
2415
+ // Check that the maps starting from the prototype haven't changed.
2416
+ GenerateDirectLoadGlobalFunctionPrototype(
2417
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
2418
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
2419
+ a1, t0, name, &miss);
2420
+ }
2421
+ break;
2422
+ }
2423
+
2424
+ default:
2425
+ UNREACHABLE();
2426
+ }
2427
+
2428
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
2429
+
2430
+ // Handle call cache miss.
2431
+ __ bind(&miss);
2432
+
2433
+ MaybeObject* maybe_result = GenerateMissBranch();
2434
+ if (maybe_result->IsFailure()) return maybe_result;
2435
+
2436
+ // Return the generated code.
2437
+ return GetCode(function);
2438
+ }
2439
+
2440
+
2441
+ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
2442
+ JSObject* holder,
2443
+ String* name) {
2444
+ // ----------- S t a t e -------------
2445
+ // -- a2 : name
2446
+ // -- ra : return address
2447
+ // -----------------------------------
2448
+
2449
+ Label miss;
2450
+
2451
+ GenerateNameCheck(name, &miss);
2452
+
2453
+ // Get the number of arguments.
2454
+ const int argc = arguments().immediate();
2455
+
2456
+ LookupResult lookup;
2457
+ LookupPostInterceptor(holder, name, &lookup);
2458
+
2459
+ // Get the receiver from the stack.
2460
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
2461
+
2462
+ CallInterceptorCompiler compiler(this, arguments(), a2);
2463
+ MaybeObject* result = compiler.Compile(masm(),
2464
+ object,
2465
+ holder,
2466
+ name,
2467
+ &lookup,
2468
+ a1,
2469
+ a3,
2470
+ t0,
2471
+ a0,
2472
+ &miss);
2473
+ if (result->IsFailure()) {
2474
+ return result;
2475
+ }
2476
+
2477
+ // Move returned value, the function to call, to a1.
2478
+ __ mov(a1, v0);
2479
+ // Restore receiver.
2480
+ __ lw(a0, MemOperand(sp, argc * kPointerSize));
2481
+
2482
+ GenerateCallFunction(masm(), object, arguments(), &miss);
2483
+
2484
+ // Handle call cache miss.
2485
+ __ bind(&miss);
2486
+ MaybeObject* maybe_result = GenerateMissBranch();
2487
+ if (maybe_result->IsFailure()) return maybe_result;
2488
+
2489
+ // Return the generated code.
2490
+ return GetCode(INTERCEPTOR, name);
2491
+ }
2492
+
2493
+
2494
+ MaybeObject* CallStubCompiler::CompileCallGlobal(
2495
+ JSObject* object,
2496
+ GlobalObject* holder,
2497
+ JSGlobalPropertyCell* cell,
2498
+ JSFunction* function,
2499
+ String* name,
2500
+ Code::ExtraICState extra_ic_state) {
2501
+ // ----------- S t a t e -------------
2502
+ // -- a2 : name
2503
+ // -- ra : return address
2504
+ // -----------------------------------
2505
+
2506
+ if (HasCustomCallGenerator(function)) {
2507
+ MaybeObject* maybe_result = CompileCustomCall(
2508
+ object, holder, cell, function, name);
2509
+ Object* result;
2510
+ if (!maybe_result->ToObject(&result)) return maybe_result;
2511
+ // Undefined means bail out to regular compiler.
2512
+ if (!result->IsUndefined()) return result;
2513
+ }
2514
+
2515
+ Label miss;
2516
+
2517
+ GenerateNameCheck(name, &miss);
2518
+
2519
+ // Get the number of arguments.
2520
+ const int argc = arguments().immediate();
2521
+
2522
+ GenerateGlobalReceiverCheck(object, holder, name, &miss);
2523
+ GenerateLoadFunctionFromCell(cell, function, &miss);
2524
+
2525
+ // Patch the receiver on the stack with the global proxy if
2526
+ // necessary.
2527
+ if (object->IsGlobalObject()) {
2528
+ __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
2529
+ __ sw(a3, MemOperand(sp, argc * kPointerSize));
2530
+ }
2531
+
2532
+ // Setup the context (function already in r1).
2533
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2534
+
2535
+ // Jump to the cached code (tail call).
2536
+ Counters* counters = masm()->isolate()->counters();
2537
+ __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
2538
+ ASSERT(function->is_compiled());
2539
+ Handle<Code> code(function->code());
2540
+ ParameterCount expected(function->shared()->formal_parameter_count());
2541
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
2542
+ ? CALL_AS_FUNCTION
2543
+ : CALL_AS_METHOD;
2544
+ if (V8::UseCrankshaft()) {
2545
+ UNIMPLEMENTED_MIPS();
2546
+ } else {
2547
+ __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
2548
+ JUMP_FUNCTION, call_kind);
2549
+ }
2550
+
2551
+ // Handle call cache miss.
2552
+ __ bind(&miss);
2553
+ __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
2554
+ MaybeObject* maybe_result = GenerateMissBranch();
2555
+ if (maybe_result->IsFailure()) return maybe_result;
2556
+
2557
+ // Return the generated code.
2558
+ return GetCode(NORMAL, name);
2559
+ }
2560
+
2561
+
2562
+ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
2563
+ int index,
2564
+ Map* transition,
2565
+ String* name) {
2566
+ // ----------- S t a t e -------------
2567
+ // -- a0 : value
2568
+ // -- a1 : receiver
2569
+ // -- a2 : name
2570
+ // -- ra : return address
2571
+ // -----------------------------------
2572
+ Label miss;
2573
+
2574
+ // Name register might be clobbered.
2575
+ GenerateStoreField(masm(),
2576
+ object,
2577
+ index,
2578
+ transition,
2579
+ a1, a2, a3,
2580
+ &miss);
2581
+ __ bind(&miss);
2582
+ __ li(a2, Operand(Handle<String>(name))); // Restore name.
2583
+ Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
2584
+ __ Jump(ic, RelocInfo::CODE_TARGET);
2585
+
2586
+ // Return the generated code.
2587
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
2588
+ }
2589
+
2590
+
2591
+ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
2592
+ AccessorInfo* callback,
2593
+ String* name) {
2594
+ // ----------- S t a t e -------------
2595
+ // -- a0 : value
2596
+ // -- a1 : receiver
2597
+ // -- a2 : name
2598
+ // -- ra : return address
2599
+ // -----------------------------------
2600
+ Label miss;
2601
+
2602
+ // Check that the object isn't a smi.
2603
+ __ JumpIfSmi(a1, &miss);
2604
+
2605
+ // Check that the map of the object hasn't changed.
2606
+ __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
2607
+ __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
2608
+
2609
+ // Perform global security token check if needed.
2610
+ if (object->IsJSGlobalProxy()) {
2611
+ __ CheckAccessGlobalProxy(a1, a3, &miss);
2612
+ }
2613
+
2614
+ // Stub never generated for non-global objects that require access
2615
+ // checks.
2616
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
2617
+
2618
+ __ push(a1); // Receiver.
2619
+ __ li(a3, Operand(Handle<AccessorInfo>(callback))); // Callback info.
2620
+ __ Push(a3, a2, a0);
2621
+
2622
+ // Do tail-call to the runtime system.
2623
+ ExternalReference store_callback_property =
2624
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
2625
+ masm()->isolate());
2626
+ __ TailCallExternalReference(store_callback_property, 4, 1);
2627
+
2628
+ // Handle store cache miss.
2629
+ __ bind(&miss);
2630
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2631
+ __ Jump(ic, RelocInfo::CODE_TARGET);
2632
+
2633
+ // Return the generated code.
2634
+ return GetCode(CALLBACKS, name);
2635
+ }
2636
+
2637
+
2638
+ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
2639
+ String* name) {
2640
+ // ----------- S t a t e -------------
2641
+ // -- a0 : value
2642
+ // -- a1 : receiver
2643
+ // -- a2 : name
2644
+ // -- ra : return address
2645
+ // -----------------------------------
2646
+ Label miss;
2647
+
2648
+ // Check that the object isn't a smi.
2649
+ __ JumpIfSmi(a1, &miss);
2650
+
2651
+ // Check that the map of the object hasn't changed.
2652
+ __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
2653
+ __ Branch(&miss, ne, a3, Operand(Handle<Map>(receiver->map())));
2654
+
2655
+ // Perform global security token check if needed.
2656
+ if (receiver->IsJSGlobalProxy()) {
2657
+ __ CheckAccessGlobalProxy(a1, a3, &miss);
2658
+ }
2659
+
2660
+ // Stub is never generated for non-global objects that require access
2661
+ // checks.
2662
+ ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
2663
+
2664
+ __ Push(a1, a2, a0); // Receiver, name, value.
2665
+
2666
+ __ li(a0, Operand(Smi::FromInt(strict_mode_)));
2667
+ __ push(a0); // Strict mode.
2668
+
2669
+ // Do tail-call to the runtime system.
2670
+ ExternalReference store_ic_property =
2671
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
2672
+ masm()->isolate());
2673
+ __ TailCallExternalReference(store_ic_property, 4, 1);
2674
+
2675
+ // Handle store cache miss.
2676
+ __ bind(&miss);
2677
+ Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
2678
+ __ Jump(ic, RelocInfo::CODE_TARGET);
2679
+
2680
+ // Return the generated code.
2681
+ return GetCode(INTERCEPTOR, name);
2682
+ }
2683
+
2684
+
2685
+ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
2686
+ JSGlobalPropertyCell* cell,
2687
+ String* name) {
2688
+ // ----------- S t a t e -------------
2689
+ // -- a0 : value
2690
+ // -- a1 : receiver
2691
+ // -- a2 : name
2692
+ // -- ra : return address
2693
+ // -----------------------------------
2694
+ Label miss;
2695
+
2696
+ // Check that the map of the global has not changed.
2697
+ __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
2698
+ __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
2699
+
2700
+ // Check that the value in the cell is not the hole. If it is, this
2701
+ // cell could have been deleted and reintroducing the global needs
2702
+ // to update the property details in the property dictionary of the
2703
+ // global object. We bail out to the runtime system to do that.
2704
+ __ li(t0, Operand(Handle<JSGlobalPropertyCell>(cell)));
2705
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
2706
+ __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
2707
+ __ Branch(&miss, eq, t1, Operand(t2));
2708
+
2709
+ // Store the value in the cell.
2710
+ __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
2711
+ __ mov(v0, a0); // Stored value must be returned in v0.
2712
+ Counters* counters = masm()->isolate()->counters();
2713
+ __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
2714
+ __ Ret();
2715
+
2716
+ // Handle store cache miss.
2717
+ __ bind(&miss);
2718
+ __ IncrementCounter(counters->named_store_global_inline_miss(), 1, a1, a3);
2719
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2720
+ __ Jump(ic, RelocInfo::CODE_TARGET);
2721
+
2722
+ // Return the generated code.
2723
+ return GetCode(NORMAL, name);
2724
+ }
2725
+
2726
+
2727
+ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
2728
+ JSObject* object,
2729
+ JSObject* last) {
2730
+ // ----------- S t a t e -------------
2731
+ // -- a0 : receiver
2732
+ // -- ra : return address
2733
+ // -----------------------------------
2734
+ Label miss;
2735
+
2736
+ // Check that the receiver is not a smi.
2737
+ __ JumpIfSmi(a0, &miss);
2738
+
2739
+ // Check the maps of the full prototype chain.
2740
+ CheckPrototypes(object, a0, last, a3, a1, t0, name, &miss);
2741
+
2742
+ // If the last object in the prototype chain is a global object,
2743
+ // check that the global property cell is empty.
2744
+ if (last->IsGlobalObject()) {
2745
+ MaybeObject* cell = GenerateCheckPropertyCell(masm(),
2746
+ GlobalObject::cast(last),
2747
+ name,
2748
+ a1,
2749
+ &miss);
2750
+ if (cell->IsFailure()) {
2751
+ miss.Unuse();
2752
+ return cell;
2753
+ }
2754
+ }
2755
+
2756
+ // Return undefined if maps of the full prototype chain is still the same.
2757
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
2758
+ __ Ret();
2759
+
2760
+ __ bind(&miss);
2761
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
2762
+
2763
+ // Return the generated code.
2764
+ return GetCode(NONEXISTENT, heap()->empty_string());
2765
+ }
2766
+
2767
+
2768
+ MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
2769
+ JSObject* holder,
2770
+ int index,
2771
+ String* name) {
2772
+ // ----------- S t a t e -------------
2773
+ // -- a0 : receiver
2774
+ // -- a2 : name
2775
+ // -- ra : return address
2776
+ // -----------------------------------
2777
+ Label miss;
2778
+
2779
+ __ mov(v0, a0);
2780
+
2781
+ GenerateLoadField(object, holder, v0, a3, a1, t0, index, name, &miss);
2782
+ __ bind(&miss);
2783
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
2784
+
2785
+ // Return the generated code.
2786
+ return GetCode(FIELD, name);
2787
+ }
2788
+
2789
+
2790
+ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
2791
+ JSObject* object,
2792
+ JSObject* holder,
2793
+ AccessorInfo* callback) {
2794
+ // ----------- S t a t e -------------
2795
+ // -- a0 : receiver
2796
+ // -- a2 : name
2797
+ // -- ra : return address
2798
+ // -----------------------------------
2799
+ Label miss;
2800
+
2801
+ MaybeObject* result = GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0,
2802
+ callback, name, &miss);
2803
+ if (result->IsFailure()) {
2804
+ miss.Unuse();
2805
+ return result;
2806
+ }
2807
+
2808
+ __ bind(&miss);
2809
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
2810
+
2811
+ // Return the generated code.
2812
+ return GetCode(CALLBACKS, name);
2813
+ }
2814
+
2815
+
2816
+ MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
2817
+ JSObject* holder,
2818
+ Object* value,
2819
+ String* name) {
2820
+ // ----------- S t a t e -------------
2821
+ // -- a0 : receiver
2822
+ // -- a2 : name
2823
+ // -- ra : return address
2824
+ // -----------------------------------
2825
+ Label miss;
2826
+
2827
+ GenerateLoadConstant(object, holder, a0, a3, a1, t0, value, name, &miss);
2828
+ __ bind(&miss);
2829
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
2830
+
2831
+ // Return the generated code.
2832
+ return GetCode(CONSTANT_FUNCTION, name);
2833
+ }
2834
+
2835
+
2836
+ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
2837
+ JSObject* holder,
2838
+ String* name) {
2839
+ // ----------- S t a t e -------------
2840
+ // -- a0 : receiver
2841
+ // -- a2 : name
2842
+ // -- ra : return address
2843
+ // -- [sp] : receiver
2844
+ // -----------------------------------
2845
+ Label miss;
2846
+
2847
+ LookupResult lookup;
2848
+ LookupPostInterceptor(holder, name, &lookup);
2849
+ GenerateLoadInterceptor(object,
2850
+ holder,
2851
+ &lookup,
2852
+ a0,
2853
+ a2,
2854
+ a3,
2855
+ a1,
2856
+ t0,
2857
+ name,
2858
+ &miss);
2859
+ __ bind(&miss);
2860
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
2861
+
2862
+ // Return the generated code.
2863
+ return GetCode(INTERCEPTOR, name);
2864
+ }
2865
+
2866
+
2867
+ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
2868
+ GlobalObject* holder,
2869
+ JSGlobalPropertyCell* cell,
2870
+ String* name,
2871
+ bool is_dont_delete) {
2872
+ // ----------- S t a t e -------------
2873
+ // -- a0 : receiver
2874
+ // -- a2 : name
2875
+ // -- ra : return address
2876
+ // -----------------------------------
2877
+ Label miss;
2878
+
2879
+ // If the object is the holder then we know that it's a global
2880
+ // object which can only happen for contextual calls. In this case,
2881
+ // the receiver cannot be a smi.
2882
+ if (object != holder) {
2883
+ __ And(t0, a0, Operand(kSmiTagMask));
2884
+ __ Branch(&miss, eq, t0, Operand(zero_reg));
2885
+ }
2886
+
2887
+ // Check that the map of the global has not changed.
2888
+ CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
2889
+
2890
+ // Get the value from the cell.
2891
+ __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
2892
+ __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
2893
+
2894
+ // Check for deleted property if property can actually be deleted.
2895
+ if (!is_dont_delete) {
2896
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2897
+ __ Branch(&miss, eq, t0, Operand(at));
2898
+ }
2899
+
2900
+ __ mov(v0, t0);
2901
+ Counters* counters = masm()->isolate()->counters();
2902
+ __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
2903
+ __ Ret();
2904
+
2905
+ __ bind(&miss);
2906
+ __ IncrementCounter(counters->named_load_global_stub_miss(), 1, a1, a3);
2907
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
2908
+
2909
+ // Return the generated code.
2910
+ return GetCode(NORMAL, name);
2911
+ }
2912
+
2913
+
2914
+ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
2915
+ JSObject* receiver,
2916
+ JSObject* holder,
2917
+ int index) {
2918
+ // ----------- S t a t e -------------
2919
+ // -- ra : return address
2920
+ // -- a0 : key
2921
+ // -- a1 : receiver
2922
+ // -----------------------------------
2923
+ Label miss;
2924
+
2925
+ // Check the key is the cached one.
2926
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
2927
+
2928
+ GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
2929
+ __ bind(&miss);
2930
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2931
+
2932
+ return GetCode(FIELD, name);
2933
+ }
2934
+
2935
+
2936
+ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
2937
+ String* name,
2938
+ JSObject* receiver,
2939
+ JSObject* holder,
2940
+ AccessorInfo* callback) {
2941
+ // ----------- S t a t e -------------
2942
+ // -- ra : return address
2943
+ // -- a0 : key
2944
+ // -- a1 : receiver
2945
+ // -----------------------------------
2946
+ Label miss;
2947
+
2948
+ // Check the key is the cached one.
2949
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
2950
+
2951
+ MaybeObject* result = GenerateLoadCallback(receiver, holder, a1, a0, a2, a3,
2952
+ t0, callback, name, &miss);
2953
+ if (result->IsFailure()) {
2954
+ miss.Unuse();
2955
+ return result;
2956
+ }
2957
+
2958
+ __ bind(&miss);
2959
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2960
+
2961
+ return GetCode(CALLBACKS, name);
2962
+ }
2963
+
2964
+
2965
+ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
2966
+ JSObject* receiver,
2967
+ JSObject* holder,
2968
+ Object* value) {
2969
+ // ----------- S t a t e -------------
2970
+ // -- ra : return address
2971
+ // -- a0 : key
2972
+ // -- a1 : receiver
2973
+ // -----------------------------------
2974
+ Label miss;
2975
+
2976
+ // Check the key is the cached one.
2977
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
2978
+
2979
+ GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
2980
+ __ bind(&miss);
2981
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2982
+
2983
+ // Return the generated code.
2984
+ return GetCode(CONSTANT_FUNCTION, name);
2985
+ }
2986
+
2987
+
2988
+ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
2989
+ JSObject* holder,
2990
+ String* name) {
2991
+ // ----------- S t a t e -------------
2992
+ // -- ra : return address
2993
+ // -- a0 : key
2994
+ // -- a1 : receiver
2995
+ // -----------------------------------
2996
+ Label miss;
2997
+
2998
+ // Check the key is the cached one.
2999
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
3000
+
3001
+ LookupResult lookup;
3002
+ LookupPostInterceptor(holder, name, &lookup);
3003
+ GenerateLoadInterceptor(receiver,
3004
+ holder,
3005
+ &lookup,
3006
+ a1,
3007
+ a0,
3008
+ a2,
3009
+ a3,
3010
+ t0,
3011
+ name,
3012
+ &miss);
3013
+ __ bind(&miss);
3014
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3015
+
3016
+ return GetCode(INTERCEPTOR, name);
3017
+ }
3018
+
3019
+
3020
+ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
3021
+ // ----------- S t a t e -------------
3022
+ // -- ra : return address
3023
+ // -- a0 : key
3024
+ // -- a1 : receiver
3025
+ // -----------------------------------
3026
+ Label miss;
3027
+
3028
+ // Check the key is the cached one.
3029
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
3030
+
3031
+ GenerateLoadArrayLength(masm(), a1, a2, &miss);
3032
+ __ bind(&miss);
3033
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3034
+
3035
+ return GetCode(CALLBACKS, name);
3036
+ }
3037
+
3038
+
3039
+ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
3040
+ // ----------- S t a t e -------------
3041
+ // -- ra : return address
3042
+ // -- a0 : key
3043
+ // -- a1 : receiver
3044
+ // -----------------------------------
3045
+ Label miss;
3046
+
3047
+ Counters* counters = masm()->isolate()->counters();
3048
+ __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
3049
+
3050
+ // Check the key is the cached one.
3051
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
3052
+
3053
+ GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
3054
+ __ bind(&miss);
3055
+ __ DecrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
3056
+
3057
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3058
+
3059
+ return GetCode(CALLBACKS, name);
3060
+ }
3061
+
3062
+
3063
+ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
3064
+ // ----------- S t a t e -------------
3065
+ // -- ra : return address
3066
+ // -- a0 : key
3067
+ // -- a1 : receiver
3068
+ // -----------------------------------
3069
+ Label miss;
3070
+
3071
+ Counters* counters = masm()->isolate()->counters();
3072
+ __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
3073
+
3074
+ // Check the name hasn't changed.
3075
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
3076
+
3077
+ GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
3078
+ __ bind(&miss);
3079
+ __ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
3080
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3081
+
3082
+ return GetCode(CALLBACKS, name);
3083
+ }
3084
+
3085
+
3086
+ MaybeObject* KeyedLoadStubCompiler::CompileLoadFastElement(Map* receiver_map) {
3087
+ // ----------- S t a t e -------------
3088
+ // -- ra : return address
3089
+ // -- a0 : key
3090
+ // -- a1 : receiver
3091
+ // -----------------------------------
3092
+ MaybeObject* maybe_stub = KeyedLoadFastElementStub().TryGetCode();
3093
+ Code* stub;
3094
+ if (!maybe_stub->To(&stub)) return maybe_stub;
3095
+ __ DispatchMap(a1,
3096
+ a2,
3097
+ Handle<Map>(receiver_map),
3098
+ Handle<Code>(stub),
3099
+ DO_SMI_CHECK);
3100
+
3101
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
3102
+ __ Jump(ic, RelocInfo::CODE_TARGET);
3103
+
3104
+ // Return the generated code.
3105
+ return GetCode(NORMAL, NULL);
3106
+ }
3107
+
3108
+
3109
+ MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
3110
+ MapList* receiver_maps,
3111
+ CodeList* handler_ics) {
3112
+ // ----------- S t a t e -------------
3113
+ // -- ra : return address
3114
+ // -- a0 : key
3115
+ // -- a1 : receiver
3116
+ // -----------------------------------
3117
+ Label miss;
3118
+ __ JumpIfSmi(a1, &miss);
3119
+
3120
+ int receiver_count = receiver_maps->length();
3121
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
3122
+ for (int current = 0; current < receiver_count; ++current) {
3123
+ Handle<Map> map(receiver_maps->at(current));
3124
+ Handle<Code> code(handler_ics->at(current));
3125
+ __ Jump(code, RelocInfo::CODE_TARGET, eq, a2, Operand(map));
3126
+ }
3127
+
3128
+ __ bind(&miss);
3129
+ Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
3130
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
3131
+
3132
+ // Return the generated code.
3133
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
3134
+ }
3135
+
3136
+
3137
+ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
3138
+ int index,
3139
+ Map* transition,
3140
+ String* name) {
3141
+ // ----------- S t a t e -------------
3142
+ // -- a0 : value
3143
+ // -- a1 : key
3144
+ // -- a2 : receiver
3145
+ // -- ra : return address
3146
+ // -----------------------------------
3147
+
3148
+ Label miss;
3149
+
3150
+ Counters* counters = masm()->isolate()->counters();
3151
+ __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
3152
+
3153
+ // Check that the name has not changed.
3154
+ __ Branch(&miss, ne, a1, Operand(Handle<String>(name)));
3155
+
3156
+ // a3 is used as scratch register. a1 and a2 keep their values if a jump to
3157
+ // the miss label is generated.
3158
+ GenerateStoreField(masm(),
3159
+ object,
3160
+ index,
3161
+ transition,
3162
+ a2, a1, a3,
3163
+ &miss);
3164
+ __ bind(&miss);
3165
+
3166
+ __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
3167
+ Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
3168
+ __ Jump(ic, RelocInfo::CODE_TARGET);
3169
+
3170
+ // Return the generated code.
3171
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
3172
+ }
3173
+
3174
+
3175
+ MaybeObject* KeyedStoreStubCompiler::CompileStoreFastElement(
3176
+ Map* receiver_map) {
3177
+ // ----------- S t a t e -------------
3178
+ // -- a0 : value
3179
+ // -- a1 : key
3180
+ // -- a2 : receiver
3181
+ // -- ra : return address
3182
+ // -- a3 : scratch
3183
+ // -----------------------------------
3184
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
3185
+ MaybeObject* maybe_stub =
3186
+ KeyedStoreFastElementStub(is_js_array).TryGetCode();
3187
+ Code* stub;
3188
+ if (!maybe_stub->To(&stub)) return maybe_stub;
3189
+ __ DispatchMap(a2,
3190
+ a3,
3191
+ Handle<Map>(receiver_map),
3192
+ Handle<Code>(stub),
3193
+ DO_SMI_CHECK);
3194
+
3195
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
3196
+ __ Jump(ic, RelocInfo::CODE_TARGET);
3197
+
3198
+ // Return the generated code.
3199
+ return GetCode(NORMAL, NULL);
3200
+ }
3201
+
3202
+
3203
+ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
3204
+ MapList* receiver_maps,
3205
+ CodeList* handler_ics) {
3206
+ // ----------- S t a t e -------------
3207
+ // -- a0 : value
3208
+ // -- a1 : key
3209
+ // -- a2 : receiver
3210
+ // -- ra : return address
3211
+ // -- a3 : scratch
3212
+ // -----------------------------------
3213
+ Label miss;
3214
+ __ JumpIfSmi(a2, &miss);
3215
+
3216
+ int receiver_count = receiver_maps->length();
3217
+ __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
3218
+ for (int current = 0; current < receiver_count; ++current) {
3219
+ Handle<Map> map(receiver_maps->at(current));
3220
+ Handle<Code> code(handler_ics->at(current));
3221
+ __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
3222
+ }
3223
+
3224
+ __ bind(&miss);
3225
+ Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
3226
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
3227
+
3228
+ // Return the generated code.
3229
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
3230
+ }
3231
+
3232
+
3233
+ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
3234
+ // a0 : argc
3235
+ // a1 : constructor
3236
+ // ra : return address
3237
+ // [sp] : last argument
3238
+ Label generic_stub_call;
3239
+
3240
+ // Use t7 for holding undefined which is used in several places below.
3241
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
3242
+
3243
+ #ifdef ENABLE_DEBUGGER_SUPPORT
3244
+ // Check to see whether there are any break points in the function code. If
3245
+ // there are jump to the generic constructor stub which calls the actual
3246
+ // code for the function thereby hitting the break points.
3247
+ __ lw(t5, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3248
+ __ lw(a2, FieldMemOperand(t5, SharedFunctionInfo::kDebugInfoOffset));
3249
+ __ Branch(&generic_stub_call, ne, a2, Operand(t7));
3250
+ #endif
3251
+
3252
+ // Load the initial map and verify that it is in fact a map.
3253
+ // a1: constructor function
3254
+ // t7: undefined
3255
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
3256
+ __ And(t0, a2, Operand(kSmiTagMask));
3257
+ __ Branch(&generic_stub_call, eq, t0, Operand(zero_reg));
3258
+ __ GetObjectType(a2, a3, t0);
3259
+ __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
3260
+
3261
+ #ifdef DEBUG
3262
+ // Cannot construct functions this way.
3263
+ // a0: argc
3264
+ // a1: constructor function
3265
+ // a2: initial map
3266
+ // t7: undefined
3267
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
3268
+ __ Check(ne, "Function constructed by construct stub.",
3269
+ a3, Operand(JS_FUNCTION_TYPE));
3270
+ #endif
3271
+
3272
+ // Now allocate the JSObject in new space.
3273
+ // a0: argc
3274
+ // a1: constructor function
3275
+ // a2: initial map
3276
+ // t7: undefined
3277
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
3278
+ __ AllocateInNewSpace(a3,
3279
+ t4,
3280
+ t5,
3281
+ t6,
3282
+ &generic_stub_call,
3283
+ SIZE_IN_WORDS);
3284
+
3285
+ // Allocated the JSObject, now initialize the fields. Map is set to initial
3286
+ // map and properties and elements are set to empty fixed array.
3287
+ // a0: argc
3288
+ // a1: constructor function
3289
+ // a2: initial map
3290
+ // a3: object size (in words)
3291
+ // t4: JSObject (not tagged)
3292
+ // t7: undefined
3293
+ __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
3294
+ __ mov(t5, t4);
3295
+ __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
3296
+ __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
3297
+ __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
3298
+ __ Addu(t5, t5, Operand(3 * kPointerSize));
3299
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
3300
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
3301
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
3302
+
3303
+
3304
+ // Calculate the location of the first argument. The stack contains only the
3305
+ // argc arguments.
3306
+ __ sll(a1, a0, kPointerSizeLog2);
3307
+ __ Addu(a1, a1, sp);
3308
+
3309
+ // Fill all the in-object properties with undefined.
3310
+ // a0: argc
3311
+ // a1: first argument
3312
+ // a3: object size (in words)
3313
+ // t4: JSObject (not tagged)
3314
+ // t5: First in-object property of JSObject (not tagged)
3315
+ // t7: undefined
3316
+ // Fill the initialized properties with a constant value or a passed argument
3317
+ // depending on the this.x = ...; assignment in the function.
3318
+ SharedFunctionInfo* shared = function->shared();
3319
+ for (int i = 0; i < shared->this_property_assignments_count(); i++) {
3320
+ if (shared->IsThisPropertyAssignmentArgument(i)) {
3321
+ Label not_passed, next;
3322
+ // Check if the argument assigned to the property is actually passed.
3323
+ int arg_number = shared->GetThisPropertyAssignmentArgument(i);
3324
+ __ Branch(&not_passed, less_equal, a0, Operand(arg_number));
3325
+ // Argument passed - find it on the stack.
3326
+ __ lw(a2, MemOperand(a1, (arg_number + 1) * -kPointerSize));
3327
+ __ sw(a2, MemOperand(t5));
3328
+ __ Addu(t5, t5, kPointerSize);
3329
+ __ jmp(&next);
3330
+ __ bind(&not_passed);
3331
+ // Set the property to undefined.
3332
+ __ sw(t7, MemOperand(t5));
3333
+ __ Addu(t5, t5, Operand(kPointerSize));
3334
+ __ bind(&next);
3335
+ } else {
3336
+ // Set the property to the constant value.
3337
+ Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
3338
+ __ li(a2, Operand(constant));
3339
+ __ sw(a2, MemOperand(t5));
3340
+ __ Addu(t5, t5, kPointerSize);
3341
+ }
3342
+ }
3343
+
3344
+ // Fill the unused in-object property fields with undefined.
3345
+ ASSERT(function->has_initial_map());
3346
+ for (int i = shared->this_property_assignments_count();
3347
+ i < function->initial_map()->inobject_properties();
3348
+ i++) {
3349
+ __ sw(t7, MemOperand(t5));
3350
+ __ Addu(t5, t5, kPointerSize);
3351
+ }
3352
+
3353
+ // a0: argc
3354
+ // t4: JSObject (not tagged)
3355
+ // Move argc to a1 and the JSObject to return to v0 and tag it.
3356
+ __ mov(a1, a0);
3357
+ __ mov(v0, t4);
3358
+ __ Or(v0, v0, Operand(kHeapObjectTag));
3359
+
3360
+ // v0: JSObject
3361
+ // a1: argc
3362
+ // Remove caller arguments and receiver from the stack and return.
3363
+ __ sll(t0, a1, kPointerSizeLog2);
3364
+ __ Addu(sp, sp, t0);
3365
+ __ Addu(sp, sp, Operand(kPointerSize));
3366
+ Counters* counters = masm()->isolate()->counters();
3367
+ __ IncrementCounter(counters->constructed_objects(), 1, a1, a2);
3368
+ __ IncrementCounter(counters->constructed_objects_stub(), 1, a1, a2);
3369
+ __ Ret();
3370
+
3371
+ // Jump to the generic stub in case the specialized code cannot handle the
3372
+ // construction.
3373
+ __ bind(&generic_stub_call);
3374
+ Handle<Code> generic_construct_stub =
3375
+ masm()->isolate()->builtins()->JSConstructStubGeneric();
3376
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
3377
+
3378
+ // Return the generated code.
3379
+ return GetCode();
3380
+ }
3381
+
3382
+
3383
+ MaybeObject* ExternalArrayLoadStubCompiler::CompileLoad(
3384
+ JSObject*receiver, ExternalArrayType array_type) {
3385
+ // ----------- S t a t e -------------
3386
+ // -- ra : return address
3387
+ // -- a0 : key
3388
+ // -- a1 : receiver
3389
+ // -----------------------------------
3390
+ MaybeObject* maybe_stub =
3391
+ KeyedLoadExternalArrayStub(array_type).TryGetCode();
3392
+ Code* stub;
3393
+ if (!maybe_stub->To(&stub)) return maybe_stub;
3394
+ __ DispatchMap(a1,
3395
+ a2,
3396
+ Handle<Map>(receiver->map()),
3397
+ Handle<Code>(stub),
3398
+ DO_SMI_CHECK);
3399
+
3400
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
3401
+ __ Jump(ic, RelocInfo::CODE_TARGET);
3402
+
3403
+ // Return the generated code.
3404
+ return GetCode();
3405
+ }
3406
+
3407
+
3408
+ MaybeObject* ExternalArrayStoreStubCompiler::CompileStore(
3409
+ JSObject* receiver, ExternalArrayType array_type) {
3410
+ // ----------- S t a t e -------------
3411
+ // -- a0 : value
3412
+ // -- a1 : name
3413
+ // -- a2 : receiver
3414
+ // -- ra : return address
3415
+ // -----------------------------------
3416
+ MaybeObject* maybe_stub =
3417
+ KeyedStoreExternalArrayStub(array_type).TryGetCode();
3418
+ Code* stub;
3419
+ if (!maybe_stub->To(&stub)) return maybe_stub;
3420
+ __ DispatchMap(a2,
3421
+ a3,
3422
+ Handle<Map>(receiver->map()),
3423
+ Handle<Code>(stub),
3424
+ DO_SMI_CHECK);
3425
+
3426
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
3427
+ __ Jump(ic, RelocInfo::CODE_TARGET);
3428
+
3429
+ return GetCode();
3430
+ }
3431
+
3432
+
3433
+ #undef __
3434
+ #define __ ACCESS_MASM(masm)
3435
+
3436
+
3437
+ static bool IsElementTypeSigned(ExternalArrayType array_type) {
3438
+ switch (array_type) {
3439
+ case kExternalByteArray:
3440
+ case kExternalShortArray:
3441
+ case kExternalIntArray:
3442
+ return true;
3443
+
3444
+ case kExternalUnsignedByteArray:
3445
+ case kExternalUnsignedShortArray:
3446
+ case kExternalUnsignedIntArray:
3447
+ return false;
3448
+
3449
+ default:
3450
+ UNREACHABLE();
3451
+ return false;
3452
+ }
3453
+ }
3454
+
3455
+
3456
+ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
3457
+ MacroAssembler* masm,
3458
+ ExternalArrayType array_type) {
3459
+ // ---------- S t a t e --------------
3460
+ // -- ra : return address
3461
+ // -- a0 : key
3462
+ // -- a1 : receiver
3463
+ // -----------------------------------
3464
+ Label miss_force_generic, slow, failed_allocation;
3465
+
3466
+ Register key = a0;
3467
+ Register receiver = a1;
3468
+
3469
+ // This stub is meant to be tail-jumped to, the receiver must already
3470
+ // have been verified by the caller to not be a smi.
3471
+
3472
+ // Check that the key is a smi.
3473
+ __ JumpIfNotSmi(key, &miss_force_generic);
3474
+
3475
+ __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3476
+ // a3: elements array
3477
+
3478
+ // Check that the index is in range.
3479
+ __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
3480
+ __ sra(t2, key, kSmiTagSize);
3481
+ // Unsigned comparison catches both negative and too-large values.
3482
+ __ Branch(&miss_force_generic, Uless, t1, Operand(t2));
3483
+
3484
+ __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
3485
+ // a3: base pointer of external storage
3486
+
3487
+ // We are not untagging smi key and instead work with it
3488
+ // as if it was premultiplied by 2.
3489
+ ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
3490
+
3491
+ Register value = a2;
3492
+ switch (array_type) {
3493
+ case kExternalByteArray:
3494
+ __ srl(t2, key, 1);
3495
+ __ addu(t3, a3, t2);
3496
+ __ lb(value, MemOperand(t3, 0));
3497
+ break;
3498
+ case kExternalPixelArray:
3499
+ case kExternalUnsignedByteArray:
3500
+ __ srl(t2, key, 1);
3501
+ __ addu(t3, a3, t2);
3502
+ __ lbu(value, MemOperand(t3, 0));
3503
+ break;
3504
+ case kExternalShortArray:
3505
+ __ addu(t3, a3, key);
3506
+ __ lh(value, MemOperand(t3, 0));
3507
+ break;
3508
+ case kExternalUnsignedShortArray:
3509
+ __ addu(t3, a3, key);
3510
+ __ lhu(value, MemOperand(t3, 0));
3511
+ break;
3512
+ case kExternalIntArray:
3513
+ case kExternalUnsignedIntArray:
3514
+ __ sll(t2, key, 1);
3515
+ __ addu(t3, a3, t2);
3516
+ __ lw(value, MemOperand(t3, 0));
3517
+ break;
3518
+ case kExternalFloatArray:
3519
+ __ sll(t3, t2, 2);
3520
+ __ addu(t3, a3, t3);
3521
+ if (CpuFeatures::IsSupported(FPU)) {
3522
+ CpuFeatures::Scope scope(FPU);
3523
+ __ lwc1(f0, MemOperand(t3, 0));
3524
+ } else {
3525
+ __ lw(value, MemOperand(t3, 0));
3526
+ }
3527
+ break;
3528
+ case kExternalDoubleArray:
3529
+ __ sll(t2, key, 2);
3530
+ __ addu(t3, a3, t2);
3531
+ if (CpuFeatures::IsSupported(FPU)) {
3532
+ CpuFeatures::Scope scope(FPU);
3533
+ __ ldc1(f0, MemOperand(t3, 0));
3534
+ } else {
3535
+ // t3: pointer to the beginning of the double we want to load.
3536
+ __ lw(a2, MemOperand(t3, 0));
3537
+ __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
3538
+ }
3539
+ break;
3540
+ default:
3541
+ UNREACHABLE();
3542
+ break;
3543
+ }
3544
+
3545
+ // For integer array types:
3546
+ // a2: value
3547
+ // For float array type:
3548
+ // f0: value (if FPU is supported)
3549
+ // a2: value (if FPU is not supported)
3550
+ // For double array type:
3551
+ // f0: value (if FPU is supported)
3552
+ // a2/a3: value (if FPU is not supported)
3553
+
3554
+ if (array_type == kExternalIntArray) {
3555
+ // For the Int and UnsignedInt array types, we need to see whether
3556
+ // the value can be represented in a Smi. If not, we need to convert
3557
+ // it to a HeapNumber.
3558
+ Label box_int;
3559
+ __ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result.
3560
+ __ Branch(&box_int, lt, t3, Operand(zero_reg));
3561
+ // Tag integer as smi and return it.
3562
+ __ sll(v0, value, kSmiTagSize);
3563
+ __ Ret();
3564
+
3565
+ __ bind(&box_int);
3566
+ // Allocate a HeapNumber for the result and perform int-to-double
3567
+ // conversion.
3568
+ // The arm version uses a temporary here to save r0, but we don't need to
3569
+ // (a0 is not modified).
3570
+ __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3571
+ __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
3572
+
3573
+ if (CpuFeatures::IsSupported(FPU)) {
3574
+ CpuFeatures::Scope scope(FPU);
3575
+ __ mtc1(value, f0);
3576
+ __ cvt_d_w(f0, f0);
3577
+ __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
3578
+ __ Ret();
3579
+ } else {
3580
+ Register dst1 = t2;
3581
+ Register dst2 = t3;
3582
+ FloatingPointHelper::Destination dest =
3583
+ FloatingPointHelper::kCoreRegisters;
3584
+ FloatingPointHelper::ConvertIntToDouble(masm,
3585
+ value,
3586
+ dest,
3587
+ f0,
3588
+ dst1,
3589
+ dst2,
3590
+ t1,
3591
+ f2);
3592
+ __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
3593
+ __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
3594
+ __ Ret();
3595
+ }
3596
+ } else if (array_type == kExternalUnsignedIntArray) {
3597
+ // The test is different for unsigned int values. Since we need
3598
+ // the value to be in the range of a positive smi, we can't
3599
+ // handle either of the top two bits being set in the value.
3600
+ if (CpuFeatures::IsSupported(FPU)) {
3601
+ CpuFeatures::Scope scope(FPU);
3602
+ Label pl_box_int;
3603
+ __ And(t2, value, Operand(0xC0000000));
3604
+ __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
3605
+
3606
+ // It can fit in an Smi.
3607
+ // Tag integer as smi and return it.
3608
+ __ sll(v0, value, kSmiTagSize);
3609
+ __ Ret();
3610
+
3611
+ __ bind(&pl_box_int);
3612
+ // Allocate a HeapNumber for the result and perform int-to-double
3613
+ // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
3614
+ // registers - also when jumping due to exhausted young space.
3615
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3616
+ __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
3617
+
3618
+ // This is replaced by a macro:
3619
+ // __ mtc1(value, f0); // LS 32-bits.
3620
+ // __ mtc1(zero_reg, f1); // MS 32-bits are all zero.
3621
+ // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
3622
+
3623
+ __ Cvt_d_uw(f0, value);
3624
+
3625
+ __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
3626
+
3627
+ __ Ret();
3628
+ } else {
3629
+ // Check whether unsigned integer fits into smi.
3630
+ Label box_int_0, box_int_1, done;
3631
+ __ And(t2, value, Operand(0x80000000));
3632
+ __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
3633
+ __ And(t2, value, Operand(0x40000000));
3634
+ __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
3635
+
3636
+ // Tag integer as smi and return it.
3637
+ __ sll(v0, value, kSmiTagSize);
3638
+ __ Ret();
3639
+
3640
+ Register hiword = value; // a2.
3641
+ Register loword = a3;
3642
+
3643
+ __ bind(&box_int_0);
3644
+ // Integer does not have leading zeros.
3645
+ GenerateUInt2Double(masm, hiword, loword, t0, 0);
3646
+ __ Branch(&done);
3647
+
3648
+ __ bind(&box_int_1);
3649
+ // Integer has one leading zero.
3650
+ GenerateUInt2Double(masm, hiword, loword, t0, 1);
3651
+
3652
+
3653
+ __ bind(&done);
3654
+ // Integer was converted to double in registers hiword:loword.
3655
+ // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
3656
+ // clobbers all registers - also when jumping due to exhausted young
3657
+ // space.
3658
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3659
+ __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
3660
+
3661
+ __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
3662
+ __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
3663
+
3664
+ __ mov(v0, t2);
3665
+ __ Ret();
3666
+ }
3667
+ } else if (array_type == kExternalFloatArray) {
3668
+ // For the floating-point array type, we need to always allocate a
3669
+ // HeapNumber.
3670
+ if (CpuFeatures::IsSupported(FPU)) {
3671
+ CpuFeatures::Scope scope(FPU);
3672
+ // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3673
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
3674
+ // exhausted young space.
3675
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3676
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
3677
+ // The float (single) value is already in fpu reg f0 (if we use float).
3678
+ __ cvt_d_s(f0, f0);
3679
+ __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
3680
+ __ Ret();
3681
+ } else {
3682
+ // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3683
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
3684
+ // exhausted young space.
3685
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3686
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
3687
+ // FPU is not available, do manual single to double conversion.
3688
+
3689
+ // a2: floating point value (binary32).
3690
+ // v0: heap number for result
3691
+
3692
+ // Extract mantissa to t4.
3693
+ __ And(t4, value, Operand(kBinary32MantissaMask));
3694
+
3695
+ // Extract exponent to t5.
3696
+ __ srl(t5, value, kBinary32MantissaBits);
3697
+ __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3698
+
3699
+ Label exponent_rebiased;
3700
+ __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
3701
+
3702
+ __ li(t0, 0x7ff);
3703
+ __ Xor(t1, t5, Operand(0xFF));
3704
+ __ movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
3705
+ __ Branch(&exponent_rebiased, eq, t0, Operand(0xff));
3706
+
3707
+ // Rebias exponent.
3708
+ __ Addu(t5,
3709
+ t5,
3710
+ Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
3711
+
3712
+ __ bind(&exponent_rebiased);
3713
+ __ And(a2, value, Operand(kBinary32SignMask));
3714
+ value = no_reg;
3715
+ __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
3716
+ __ or_(a2, a2, t0);
3717
+
3718
+ // Shift mantissa.
3719
+ static const int kMantissaShiftForHiWord =
3720
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3721
+
3722
+ static const int kMantissaShiftForLoWord =
3723
+ kBitsPerInt - kMantissaShiftForHiWord;
3724
+
3725
+ __ srl(t0, t4, kMantissaShiftForHiWord);
3726
+ __ or_(a2, a2, t0);
3727
+ __ sll(a0, t4, kMantissaShiftForLoWord);
3728
+
3729
+ __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
3730
+ __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
3731
+ __ Ret();
3732
+ }
3733
+
3734
+ } else if (array_type == kExternalDoubleArray) {
3735
+ if (CpuFeatures::IsSupported(FPU)) {
3736
+ CpuFeatures::Scope scope(FPU);
3737
+ // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3738
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
3739
+ // exhausted young space.
3740
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3741
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
3742
+ // The double value is already in f0
3743
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
3744
+ __ Ret();
3745
+ } else {
3746
+ // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3747
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
3748
+ // exhausted young space.
3749
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3750
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
3751
+
3752
+ __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
3753
+ __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
3754
+ __ Ret();
3755
+ }
3756
+
3757
+ } else {
3758
+ // Tag integer as smi and return it.
3759
+ __ sll(v0, value, kSmiTagSize);
3760
+ __ Ret();
3761
+ }
3762
+
3763
+ // Slow case, key and receiver still in a0 and a1.
3764
+ __ bind(&slow);
3765
+ __ IncrementCounter(
3766
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
3767
+ 1, a2, a3);
3768
+
3769
+ // ---------- S t a t e --------------
3770
+ // -- ra : return address
3771
+ // -- a0 : key
3772
+ // -- a1 : receiver
3773
+ // -----------------------------------
3774
+
3775
+ __ Push(a1, a0);
3776
+
3777
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
3778
+
3779
+ __ bind(&miss_force_generic);
3780
+ Code* stub = masm->isolate()->builtins()->builtin(
3781
+ Builtins::kKeyedLoadIC_MissForceGeneric);
3782
+ __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
3783
+ }
3784
+
3785
+
3786
+ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
3787
+ MacroAssembler* masm,
3788
+ ExternalArrayType array_type) {
3789
+ // ---------- S t a t e --------------
3790
+ // -- a0 : value
3791
+ // -- a1 : key
3792
+ // -- a2 : receiver
3793
+ // -- ra : return address
3794
+ // -----------------------------------
3795
+
3796
+ Label slow, check_heap_number, miss_force_generic;
3797
+
3798
+ // Register usage.
3799
+ Register value = a0;
3800
+ Register key = a1;
3801
+ Register receiver = a2;
3802
+ // a3 mostly holds the elements array or the destination external array.
3803
+
3804
+ // This stub is meant to be tail-jumped to, the receiver must already
3805
+ // have been verified by the caller to not be a smi.
3806
+
3807
+ __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3808
+
3809
+ // Check that the key is a smi.
3810
+ __ JumpIfNotSmi(key, &miss_force_generic);
3811
+
3812
+ // Check that the index is in range.
3813
+ __ SmiUntag(t0, key);
3814
+ __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
3815
+ // Unsigned comparison catches both negative and too-large values.
3816
+ __ Branch(&miss_force_generic, Ugreater_equal, t0, Operand(t1));
3817
+
3818
+ // Handle both smis and HeapNumbers in the fast path. Go to the
3819
+ // runtime for all other kinds of values.
3820
+ // a3: external array.
3821
+ // t0: key (integer).
3822
+
3823
+ if (array_type == kExternalPixelArray) {
3824
+ // Double to pixel conversion is only implemented in the runtime for now.
3825
+ __ JumpIfNotSmi(value, &slow);
3826
+ } else {
3827
+ __ JumpIfNotSmi(value, &check_heap_number);
3828
+ }
3829
+ __ SmiUntag(t1, value);
3830
+ __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
3831
+
3832
+ // a3: base pointer of external storage.
3833
+ // t0: key (integer).
3834
+ // t1: value (integer).
3835
+
3836
+ switch (array_type) {
3837
+ case kExternalPixelArray: {
3838
+ // Clamp the value to [0..255].
3839
+ // v0 is used as a scratch register here.
3840
+ Label done;
3841
+ __ li(v0, Operand(255));
3842
+ // Normal branch: nop in delay slot.
3843
+ __ Branch(&done, gt, t1, Operand(v0));
3844
+ // Use delay slot in this branch.
3845
+ __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
3846
+ __ mov(v0, zero_reg); // In delay slot.
3847
+ __ mov(v0, t1); // Value is in range 0..255.
3848
+ __ bind(&done);
3849
+ __ mov(t1, v0);
3850
+ __ addu(t8, a3, t0);
3851
+ __ sb(t1, MemOperand(t8, 0));
3852
+ }
3853
+ break;
3854
+ case kExternalByteArray:
3855
+ case kExternalUnsignedByteArray:
3856
+ __ addu(t8, a3, t0);
3857
+ __ sb(t1, MemOperand(t8, 0));
3858
+ break;
3859
+ case kExternalShortArray:
3860
+ case kExternalUnsignedShortArray:
3861
+ __ sll(t8, t0, 1);
3862
+ __ addu(t8, a3, t8);
3863
+ __ sh(t1, MemOperand(t8, 0));
3864
+ break;
3865
+ case kExternalIntArray:
3866
+ case kExternalUnsignedIntArray:
3867
+ __ sll(t8, t0, 2);
3868
+ __ addu(t8, a3, t8);
3869
+ __ sw(t1, MemOperand(t8, 0));
3870
+ break;
3871
+ case kExternalFloatArray:
3872
+ // Perform int-to-float conversion and store to memory.
3873
+ StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
3874
+ break;
3875
+ case kExternalDoubleArray:
3876
+ __ sll(t8, t0, 3);
3877
+ __ addu(a3, a3, t8);
3878
+ // a3: effective address of the double element
3879
+ FloatingPointHelper::Destination destination;
3880
+ if (CpuFeatures::IsSupported(FPU)) {
3881
+ destination = FloatingPointHelper::kFPURegisters;
3882
+ } else {
3883
+ destination = FloatingPointHelper::kCoreRegisters;
3884
+ }
3885
+ FloatingPointHelper::ConvertIntToDouble(
3886
+ masm, t1, destination,
3887
+ f0, t2, t3, // These are: double_dst, dst1, dst2.
3888
+ t0, f2); // These are: scratch2, single_scratch.
3889
+ if (destination == FloatingPointHelper::kFPURegisters) {
3890
+ CpuFeatures::Scope scope(FPU);
3891
+ __ sdc1(f0, MemOperand(a3, 0));
3892
+ } else {
3893
+ __ sw(t2, MemOperand(a3, 0));
3894
+ __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
3895
+ }
3896
+ break;
3897
+ default:
3898
+ UNREACHABLE();
3899
+ break;
3900
+ }
3901
+
3902
+ // Entry registers are intact, a0 holds the value which is the return value.
3903
+ __ mov(v0, value);
3904
+ __ Ret();
3905
+
3906
+ if (array_type != kExternalPixelArray) {
3907
+ // a3: external array.
3908
+ // t0: index (integer).
3909
+ __ bind(&check_heap_number);
3910
+ __ GetObjectType(value, t1, t2);
3911
+ __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
3912
+
3913
+ __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
3914
+
3915
+ // a3: base pointer of external storage.
3916
+ // t0: key (integer).
3917
+
3918
+ // The WebGL specification leaves the behavior of storing NaN and
3919
+ // +/-Infinity into integer arrays basically undefined. For more
3920
+ // reproducible behavior, convert these to zero.
3921
+
3922
+ if (CpuFeatures::IsSupported(FPU)) {
3923
+ CpuFeatures::Scope scope(FPU);
3924
+
3925
+ __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
3926
+
3927
+ if (array_type == kExternalFloatArray) {
3928
+ __ cvt_s_d(f0, f0);
3929
+ __ sll(t8, t0, 2);
3930
+ __ addu(t8, a3, t8);
3931
+ __ swc1(f0, MemOperand(t8, 0));
3932
+ } else if (array_type == kExternalDoubleArray) {
3933
+ __ sll(t8, t0, 3);
3934
+ __ addu(t8, a3, t8);
3935
+ __ sdc1(f0, MemOperand(t8, 0));
3936
+ } else {
3937
+ Label done;
3938
+
3939
+ // Need to perform float-to-int conversion.
3940
+ // Test whether exponent equal to 0x7FF (infinity or NaN).
3941
+
3942
+ __ mfc1(t3, f1); // Move exponent word of double to t3 (as raw bits).
3943
+ __ li(t1, Operand(0x7FF00000));
3944
+ __ And(t3, t3, Operand(t1));
3945
+ __ Branch(USE_DELAY_SLOT, &done, eq, t3, Operand(t1));
3946
+ __ mov(t3, zero_reg); // In delay slot.
3947
+
3948
+ // Not infinity or NaN simply convert to int.
3949
+ if (IsElementTypeSigned(array_type)) {
3950
+ __ trunc_w_d(f0, f0);
3951
+ __ mfc1(t3, f0);
3952
+ } else {
3953
+ __ Trunc_uw_d(f0, t3);
3954
+ }
3955
+
3956
+ // t3: HeapNumber converted to integer
3957
+ __ bind(&done);
3958
+ switch (array_type) {
3959
+ case kExternalByteArray:
3960
+ case kExternalUnsignedByteArray:
3961
+ __ addu(t8, a3, t0);
3962
+ __ sb(t3, MemOperand(t8, 0));
3963
+ break;
3964
+ case kExternalShortArray:
3965
+ case kExternalUnsignedShortArray:
3966
+ __ sll(t8, t0, 1);
3967
+ __ addu(t8, a3, t8);
3968
+ __ sh(t3, MemOperand(t8, 0));
3969
+ break;
3970
+ case kExternalIntArray:
3971
+ case kExternalUnsignedIntArray:
3972
+ __ sll(t8, t0, 2);
3973
+ __ addu(t8, a3, t8);
3974
+ __ sw(t3, MemOperand(t8, 0));
3975
+ break;
3976
+ default:
3977
+ UNREACHABLE();
3978
+ break;
3979
+ }
3980
+ }
3981
+
3982
+ // Entry registers are intact, a0 holds the value
3983
+ // which is the return value.
3984
+ __ mov(v0, value);
3985
+ __ Ret();
3986
+ } else {
3987
+ // FPU is not available, do manual conversions.
3988
+
3989
+ __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
3990
+ __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
3991
+
3992
+ if (array_type == kExternalFloatArray) {
3993
+ Label done, nan_or_infinity_or_zero;
3994
+ static const int kMantissaInHiWordShift =
3995
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3996
+
3997
+ static const int kMantissaInLoWordShift =
3998
+ kBitsPerInt - kMantissaInHiWordShift;
3999
+
4000
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
4001
+ // and infinities. All these should be converted to 0.
4002
+ __ li(t5, HeapNumber::kExponentMask);
4003
+ __ and_(t6, t3, t5);
4004
+ __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg));
4005
+
4006
+ __ xor_(t1, t6, t5);
4007
+ __ li(t2, kBinary32ExponentMask);
4008
+ __ movz(t6, t2, t1); // Only if t6 is equal to t5.
4009
+ __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5));
4010
+
4011
+ // Rebias exponent.
4012
+ __ srl(t6, t6, HeapNumber::kExponentShift);
4013
+ __ Addu(t6,
4014
+ t6,
4015
+ Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
4016
+
4017
+ __ li(t1, Operand(kBinary32MaxExponent));
4018
+ __ Slt(t1, t1, t6);
4019
+ __ And(t2, t3, Operand(HeapNumber::kSignMask));
4020
+ __ Or(t2, t2, Operand(kBinary32ExponentMask));
4021
+ __ movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent.
4022
+ __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
4023
+
4024
+ __ Slt(t1, t6, Operand(kBinary32MinExponent));
4025
+ __ And(t2, t3, Operand(HeapNumber::kSignMask));
4026
+ __ movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent.
4027
+ __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
4028
+
4029
+ __ And(t7, t3, Operand(HeapNumber::kSignMask));
4030
+ __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
4031
+ __ sll(t3, t3, kMantissaInHiWordShift);
4032
+ __ or_(t7, t7, t3);
4033
+ __ srl(t4, t4, kMantissaInLoWordShift);
4034
+ __ or_(t7, t7, t4);
4035
+ __ sll(t6, t6, kBinary32ExponentShift);
4036
+ __ or_(t3, t7, t6);
4037
+
4038
+ __ bind(&done);
4039
+ __ sll(t9, a1, 2);
4040
+ __ addu(t9, a2, t9);
4041
+ __ sw(t3, MemOperand(t9, 0));
4042
+
4043
+ // Entry registers are intact, a0 holds the value which is the return
4044
+ // value.
4045
+ __ mov(v0, value);
4046
+ __ Ret();
4047
+
4048
+ __ bind(&nan_or_infinity_or_zero);
4049
+ __ And(t7, t3, Operand(HeapNumber::kSignMask));
4050
+ __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
4051
+ __ or_(t6, t6, t7);
4052
+ __ sll(t3, t3, kMantissaInHiWordShift);
4053
+ __ or_(t6, t6, t3);
4054
+ __ srl(t4, t4, kMantissaInLoWordShift);
4055
+ __ or_(t3, t6, t4);
4056
+ __ Branch(&done);
4057
+ } else if (array_type == kExternalDoubleArray) {
4058
+ __ sll(t8, t0, 3);
4059
+ __ addu(t8, a3, t8);
4060
+ // t8: effective address of destination element.
4061
+ __ sw(t4, MemOperand(t8, 0));
4062
+ __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
4063
+ __ Ret();
4064
+ } else {
4065
+ bool is_signed_type = IsElementTypeSigned(array_type);
4066
+ int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
4067
+ int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
4068
+
4069
+ Label done, sign;
4070
+
4071
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
4072
+ // and infinities. All these should be converted to 0.
4073
+ __ li(t5, HeapNumber::kExponentMask);
4074
+ __ and_(t6, t3, t5);
4075
+ __ movz(t3, zero_reg, t6); // Only if t6 is equal to zero.
4076
+ __ Branch(&done, eq, t6, Operand(zero_reg));
4077
+
4078
+ __ xor_(t2, t6, t5);
4079
+ __ movz(t3, zero_reg, t2); // Only if t6 is equal to t5.
4080
+ __ Branch(&done, eq, t6, Operand(t5));
4081
+
4082
+ // Unbias exponent.
4083
+ __ srl(t6, t6, HeapNumber::kExponentShift);
4084
+ __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
4085
+ // If exponent is negative then result is 0.
4086
+ __ slt(t2, t6, zero_reg);
4087
+ __ movn(t3, zero_reg, t2); // Only if exponent is negative.
4088
+ __ Branch(&done, lt, t6, Operand(zero_reg));
4089
+
4090
+ // If exponent is too big then result is minimal value.
4091
+ __ slti(t1, t6, meaningfull_bits - 1);
4092
+ __ li(t2, min_value);
4093
+ __ movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1.
4094
+ __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
4095
+
4096
+ __ And(t5, t3, Operand(HeapNumber::kSignMask));
4097
+ __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
4098
+ __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
4099
+
4100
+ __ li(t9, HeapNumber::kMantissaBitsInTopWord);
4101
+ __ subu(t6, t9, t6);
4102
+ __ slt(t1, t6, zero_reg);
4103
+ __ srlv(t2, t3, t6);
4104
+ __ movz(t3, t2, t1); // Only if t6 is positive.
4105
+ __ Branch(&sign, ge, t6, Operand(zero_reg));
4106
+
4107
+ __ subu(t6, zero_reg, t6);
4108
+ __ sllv(t3, t3, t6);
4109
+ __ li(t9, meaningfull_bits);
4110
+ __ subu(t6, t9, t6);
4111
+ __ srlv(t4, t4, t6);
4112
+ __ or_(t3, t3, t4);
4113
+
4114
+ __ bind(&sign);
4115
+ __ subu(t2, t3, zero_reg);
4116
+ __ movz(t3, t2, t5); // Only if t5 is zero.
4117
+
4118
+ __ bind(&done);
4119
+
4120
+ // Result is in t3.
4121
+ // This switch block should be exactly the same as above (FPU mode).
4122
+ switch (array_type) {
4123
+ case kExternalByteArray:
4124
+ case kExternalUnsignedByteArray:
4125
+ __ addu(t8, a3, t0);
4126
+ __ sb(t3, MemOperand(t8, 0));
4127
+ break;
4128
+ case kExternalShortArray:
4129
+ case kExternalUnsignedShortArray:
4130
+ __ sll(t8, t0, 1);
4131
+ __ addu(t8, a3, t8);
4132
+ __ sh(t3, MemOperand(t8, 0));
4133
+ break;
4134
+ case kExternalIntArray:
4135
+ case kExternalUnsignedIntArray:
4136
+ __ sll(t8, t0, 2);
4137
+ __ addu(t8, a3, t8);
4138
+ __ sw(t3, MemOperand(t8, 0));
4139
+ break;
4140
+ default:
4141
+ UNREACHABLE();
4142
+ break;
4143
+ }
4144
+ }
4145
+ }
4146
+ }
4147
+
4148
+ // Slow case, key and receiver still in a0 and a1.
4149
+ __ bind(&slow);
4150
+ __ IncrementCounter(
4151
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
4152
+ 1, a2, a3);
4153
+ // Entry registers are intact.
4154
+ // ---------- S t a t e --------------
4155
+ // -- ra : return address
4156
+ // -- a0 : key
4157
+ // -- a1 : receiver
4158
+ // -----------------------------------
4159
+ Handle<Code> slow_ic =
4160
+ masm->isolate()->builtins()->KeyedStoreIC_Slow();
4161
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET);
4162
+
4163
+ // Miss case, call the runtime.
4164
+ __ bind(&miss_force_generic);
4165
+
4166
+ // ---------- S t a t e --------------
4167
+ // -- ra : return address
4168
+ // -- a0 : key
4169
+ // -- a1 : receiver
4170
+ // -----------------------------------
4171
+
4172
+ Handle<Code> miss_ic =
4173
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4174
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
4175
+ }
4176
+
4177
+
4178
+ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
4179
+ // ----------- S t a t e -------------
4180
+ // -- ra : return address
4181
+ // -- a0 : key
4182
+ // -- a1 : receiver
4183
+ // -----------------------------------
4184
+ Label miss_force_generic;
4185
+
4186
+ // This stub is meant to be tail-jumped to, the receiver must already
4187
+ // have been verified by the caller to not be a smi.
4188
+
4189
+ // Check that the key is a smi.
4190
+ __ JumpIfNotSmi(a0, &miss_force_generic);
4191
+
4192
+ // Get the elements array.
4193
+ __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
4194
+ __ AssertFastElements(a2);
4195
+
4196
+ // Check that the key is within bounds.
4197
+ __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
4198
+ __ Branch(&miss_force_generic, hs, a0, Operand(a3));
4199
+
4200
+ // Load the result and make sure it's not the hole.
4201
+ __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4202
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
4203
+ __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
4204
+ __ Addu(t0, t0, a3);
4205
+ __ lw(t0, MemOperand(t0));
4206
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
4207
+ __ Branch(&miss_force_generic, eq, t0, Operand(t1));
4208
+ __ mov(v0, t0);
4209
+ __ Ret();
4210
+
4211
+ __ bind(&miss_force_generic);
4212
+ Code* stub = masm->isolate()->builtins()->builtin(
4213
+ Builtins::kKeyedLoadIC_MissForceGeneric);
4214
+ __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
4215
+ }
4216
+
4217
+
4218
+ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
4219
+ bool is_js_array) {
4220
+ // ----------- S t a t e -------------
4221
+ // -- a0 : value
4222
+ // -- a1 : key
4223
+ // -- a2 : receiver
4224
+ // -- ra : return address
4225
+ // -- a3 : scratch
4226
+ // -- a4 : scratch (elements)
4227
+ // -----------------------------------
4228
+ Label miss_force_generic;
4229
+
4230
+ Register value_reg = a0;
4231
+ Register key_reg = a1;
4232
+ Register receiver_reg = a2;
4233
+ Register scratch = a3;
4234
+ Register elements_reg = t0;
4235
+ Register scratch2 = t1;
4236
+ Register scratch3 = t2;
4237
+
4238
+ // This stub is meant to be tail-jumped to, the receiver must already
4239
+ // have been verified by the caller to not be a smi.
4240
+
4241
+ // Check that the key is a smi.
4242
+ __ JumpIfNotSmi(a0, &miss_force_generic);
4243
+
4244
+ // Get the elements array and make sure it is a fast element array, not 'cow'.
4245
+ __ lw(elements_reg,
4246
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4247
+ __ CheckMap(elements_reg,
4248
+ scratch,
4249
+ Heap::kFixedArrayMapRootIndex,
4250
+ &miss_force_generic,
4251
+ DONT_DO_SMI_CHECK);
4252
+
4253
+ // Check that the key is within bounds.
4254
+ if (is_js_array) {
4255
+ __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4256
+ } else {
4257
+ __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4258
+ }
4259
+ // Compare smis.
4260
+ __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
4261
+
4262
+ __ Addu(scratch,
4263
+ elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4264
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
4265
+ __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
4266
+ __ Addu(scratch3, scratch2, scratch);
4267
+ __ sw(value_reg, MemOperand(scratch3));
4268
+ __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg);
4269
+
4270
+ // value_reg (a0) is preserved.
4271
+ // Done.
4272
+ __ Ret();
4273
+
4274
+ __ bind(&miss_force_generic);
4275
+ Handle<Code> ic =
4276
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4277
+ __ Jump(ic, RelocInfo::CODE_TARGET);
4278
+ }
4279
+
4280
+
4281
+ #undef __
4282
+
4283
+ } } // namespace v8::internal
4284
+
4285
+ #endif // V8_TARGET_ARCH_MIPS