therubyracer 0.7.4 → 0.7.5

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of therubyracer might be problematic. Click here for more details.

Files changed (482) hide show
  1. data/History.txt +11 -0
  2. data/Rakefile +1 -1
  3. data/ext/v8/extconf.rb +0 -18
  4. data/ext/v8/rr.cpp +2 -2
  5. data/ext/v8/upstream/{2.1.10 → 2.3.3}/AUTHORS +1 -0
  6. data/ext/v8/upstream/{2.1.10 → 2.3.3}/ChangeLog +239 -0
  7. data/ext/v8/upstream/{2.1.10 → 2.3.3}/LICENSE +0 -0
  8. data/ext/v8/upstream/{2.1.10 → 2.3.3}/SConstruct +29 -17
  9. data/ext/v8/upstream/{2.1.10 → 2.3.3}/include/v8-debug.h +61 -3
  10. data/ext/v8/upstream/{2.1.10 → 2.3.3}/include/v8-profiler.h +182 -5
  11. data/ext/v8/upstream/{2.1.10 → 2.3.3}/include/v8.h +458 -257
  12. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/SConscript +2 -5
  13. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/accessors.cc +2 -2
  14. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/accessors.h +0 -0
  15. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/allocation.cc +0 -0
  16. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/allocation.h +0 -0
  17. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/api.cc +574 -30
  18. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/api.h +12 -10
  19. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/apinatives.js +0 -0
  20. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/apiutils.h +0 -0
  21. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arguments.h +0 -0
  22. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/assembler-arm-inl.h +38 -15
  23. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/assembler-arm.cc +646 -101
  24. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/assembler-arm.h +174 -15
  25. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/builtins-arm.cc +56 -47
  26. data/ext/v8/upstream/2.3.3/src/arm/codegen-arm-inl.h +48 -0
  27. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/codegen-arm.cc +2957 -1448
  28. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/codegen-arm.h +230 -74
  29. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/constants-arm.cc +25 -1
  30. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/constants-arm.h +16 -1
  31. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/cpu-arm.cc +4 -0
  32. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/debug-arm.cc +76 -6
  33. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/disasm-arm.cc +168 -20
  34. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/fast-codegen-arm.cc +5 -2
  35. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/frames-arm.cc +4 -4
  36. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/frames-arm.h +0 -0
  37. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/full-codegen-arm.cc +1558 -248
  38. data/ext/v8/upstream/2.3.3/src/arm/ic-arm.cc +2258 -0
  39. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/jump-target-arm.cc +55 -103
  40. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/macro-assembler-arm.cc +358 -185
  41. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/macro-assembler-arm.h +136 -41
  42. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/regexp-macro-assembler-arm.cc +26 -5
  43. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/regexp-macro-assembler-arm.h +0 -0
  44. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/register-allocator-arm-inl.h +0 -0
  45. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/register-allocator-arm.cc +4 -0
  46. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/register-allocator-arm.h +0 -0
  47. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/simulator-arm.cc +203 -22
  48. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/simulator-arm.h +7 -0
  49. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/stub-cache-arm.cc +531 -324
  50. data/ext/v8/upstream/2.3.3/src/arm/virtual-frame-arm-inl.h +59 -0
  51. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/virtual-frame-arm.cc +247 -81
  52. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/arm/virtual-frame-arm.h +99 -83
  53. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/array.js +2 -2
  54. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/assembler.cc +6 -13
  55. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/assembler.h +36 -10
  56. data/ext/v8/upstream/2.3.3/src/ast-inl.h +81 -0
  57. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ast.cc +14 -0
  58. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ast.h +20 -35
  59. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/bootstrapper.cc +32 -1
  60. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/bootstrapper.h +0 -4
  61. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/builtins.cc +50 -33
  62. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/builtins.h +2 -0
  63. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/bytecodes-irregexp.h +0 -0
  64. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/cached-powers.h +0 -0
  65. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/char-predicates-inl.h +0 -0
  66. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/char-predicates.h +0 -0
  67. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/checks.cc +0 -0
  68. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/checks.h +8 -6
  69. data/ext/v8/upstream/2.3.3/src/circular-queue-inl.h +53 -0
  70. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/circular-queue.cc +0 -0
  71. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/circular-queue.h +0 -26
  72. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/code-stubs.cc +2 -4
  73. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/code-stubs.h +1 -0
  74. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/code.h +0 -0
  75. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/codegen-inl.h +0 -0
  76. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/codegen.cc +44 -13
  77. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/codegen.h +310 -31
  78. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/compilation-cache.cc +28 -0
  79. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/compilation-cache.h +3 -0
  80. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/compiler.cc +45 -14
  81. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/compiler.h +0 -0
  82. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/contexts.cc +11 -11
  83. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/contexts.h +0 -0
  84. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/conversions-inl.h +0 -0
  85. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/conversions.cc +25 -11
  86. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/conversions.h +0 -0
  87. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/counters.cc +0 -0
  88. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/counters.h +0 -0
  89. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/cpu-profiler-inl.h +2 -1
  90. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/cpu-profiler.cc +68 -24
  91. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/cpu-profiler.h +19 -11
  92. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/cpu.h +0 -0
  93. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/d8-debug.cc +0 -0
  94. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/d8-debug.h +0 -0
  95. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/d8-posix.cc +0 -0
  96. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/d8-readline.cc +0 -0
  97. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/d8-windows.cc +0 -0
  98. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/d8.cc +3 -0
  99. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/d8.h +0 -0
  100. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/d8.js +55 -2
  101. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/data-flow.cc +3 -0
  102. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/data-flow.h +0 -0
  103. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/date.js +68 -137
  104. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/dateparser-inl.h +0 -0
  105. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/dateparser.cc +2 -8
  106. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/dateparser.h +0 -0
  107. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/debug-agent.cc +3 -3
  108. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/debug-agent.h +0 -0
  109. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/debug-debugger.js +81 -23
  110. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/debug.cc +275 -81
  111. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/debug.h +85 -6
  112. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/disasm.h +0 -0
  113. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/disassembler.cc +1 -1
  114. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/disassembler.h +0 -0
  115. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/diy-fp.cc +0 -0
  116. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/diy-fp.h +0 -0
  117. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/double.h +0 -0
  118. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/dtoa-config.c +0 -0
  119. data/ext/v8/upstream/2.3.3/src/dtoa.cc +77 -0
  120. data/ext/v8/upstream/2.3.3/src/dtoa.h +81 -0
  121. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/execution.cc +111 -3
  122. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/execution.h +12 -1
  123. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/factory.cc +25 -3
  124. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/factory.h +16 -9
  125. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/fast-codegen.cc +0 -0
  126. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/fast-codegen.h +0 -0
  127. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/fast-dtoa.cc +2 -9
  128. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/fast-dtoa.h +1 -2
  129. data/ext/v8/upstream/2.3.3/src/fixed-dtoa.cc +405 -0
  130. data/ext/v8/upstream/{2.1.10/src/jump-target-light.cc → 2.3.3/src/fixed-dtoa.h} +22 -53
  131. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/flag-definitions.h +14 -6
  132. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/flags.cc +5 -9
  133. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/flags.h +0 -0
  134. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/flow-graph.cc +0 -0
  135. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/flow-graph.h +0 -0
  136. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/frame-element.cc +0 -0
  137. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/frame-element.h +0 -0
  138. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/frames-inl.h +0 -0
  139. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/frames.cc +5 -2
  140. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/frames.h +1 -0
  141. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/full-codegen.cc +387 -20
  142. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/full-codegen.h +102 -5
  143. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/func-name-inferrer.cc +0 -0
  144. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/func-name-inferrer.h +0 -0
  145. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/global-handles.cc +8 -4
  146. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/global-handles.h +0 -0
  147. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/globals.h +44 -7
  148. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/handles-inl.h +0 -0
  149. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/handles.cc +19 -0
  150. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/handles.h +8 -0
  151. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/hashmap.cc +0 -0
  152. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/hashmap.h +0 -0
  153. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/heap-inl.h +56 -14
  154. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/heap-profiler.cc +85 -1
  155. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/heap-profiler.h +45 -1
  156. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/heap.cc +994 -396
  157. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/heap.h +220 -65
  158. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/assembler-ia32-inl.h +41 -12
  159. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/assembler-ia32.cc +94 -24
  160. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/assembler-ia32.h +32 -4
  161. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/builtins-ia32.cc +42 -30
  162. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/codegen-ia32-inl.h +0 -0
  163. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/codegen-ia32.cc +1758 -916
  164. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/codegen-ia32.h +67 -74
  165. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/cpu-ia32.cc +4 -0
  166. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/debug-ia32.cc +46 -0
  167. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/disasm-ia32.cc +37 -6
  168. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/fast-codegen-ia32.cc +4 -0
  169. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/fast-codegen-ia32.h +0 -0
  170. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/frames-ia32.cc +4 -0
  171. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/frames-ia32.h +0 -0
  172. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/full-codegen-ia32.cc +1465 -198
  173. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/ic-ia32.cc +688 -367
  174. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/jump-target-ia32.cc +4 -0
  175. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/macro-assembler-ia32.cc +82 -180
  176. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/macro-assembler-ia32.h +41 -25
  177. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/regexp-macro-assembler-ia32.cc +68 -24
  178. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/regexp-macro-assembler-ia32.h +1 -2
  179. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/register-allocator-ia32-inl.h +0 -0
  180. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/register-allocator-ia32.cc +4 -0
  181. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/register-allocator-ia32.h +0 -0
  182. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/simulator-ia32.cc +0 -0
  183. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/simulator-ia32.h +0 -0
  184. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/stub-cache-ia32.cc +649 -302
  185. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/virtual-frame-ia32.cc +23 -1
  186. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ia32/virtual-frame-ia32.h +18 -27
  187. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ic-inl.h +30 -3
  188. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ic.cc +384 -66
  189. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/ic.h +65 -24
  190. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/interpreter-irregexp.cc +0 -0
  191. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/interpreter-irregexp.h +0 -0
  192. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/json.js +3 -3
  193. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/jsregexp.cc +20 -4
  194. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/jsregexp.h +0 -0
  195. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/jump-target-heavy-inl.h +0 -0
  196. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/jump-target-heavy.cc +79 -13
  197. data/ext/v8/upstream/{2.1.10/src/jump-target.h → 2.3.3/src/jump-target-heavy.h} +5 -47
  198. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/jump-target-inl.h +0 -0
  199. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/jump-target-light-inl.h +16 -2
  200. data/ext/v8/upstream/2.3.3/src/jump-target-light.cc +110 -0
  201. data/ext/v8/upstream/2.3.3/src/jump-target-light.h +192 -0
  202. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/jump-target.cc +0 -64
  203. data/ext/v8/upstream/2.3.3/src/jump-target.h +90 -0
  204. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/list-inl.h +0 -0
  205. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/list.h +0 -0
  206. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/liveedit-debugger.js +141 -28
  207. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/liveedit.cc +19 -7
  208. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/liveedit.h +0 -0
  209. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/log-inl.h +0 -0
  210. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/log-utils.cc +0 -0
  211. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/log-utils.h +0 -0
  212. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/log.cc +12 -11
  213. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/log.h +12 -0
  214. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/macro-assembler.h +0 -16
  215. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/macros.py +21 -0
  216. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mark-compact.cc +120 -109
  217. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mark-compact.h +25 -37
  218. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/math.js +0 -0
  219. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/memory.h +0 -0
  220. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/messages.cc +8 -3
  221. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/messages.h +2 -1
  222. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/messages.js +15 -7
  223. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/assembler-mips-inl.h +0 -0
  224. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/assembler-mips.cc +12 -1
  225. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/assembler-mips.h +4 -1
  226. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/builtins-mips.cc +3 -0
  227. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/codegen-mips-inl.h +0 -0
  228. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/codegen-mips.cc +9 -0
  229. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/codegen-mips.h +1 -0
  230. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/constants-mips.cc +5 -0
  231. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/constants-mips.h +0 -0
  232. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/cpu-mips.cc +4 -0
  233. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/debug-mips.cc +3 -0
  234. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/disasm-mips.cc +3 -0
  235. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/fast-codegen-mips.cc +3 -0
  236. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/frames-mips.cc +3 -0
  237. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/frames-mips.h +0 -0
  238. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/full-codegen-mips.cc +5 -1
  239. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/ic-mips.cc +3 -0
  240. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/jump-target-mips.cc +3 -0
  241. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/macro-assembler-mips.cc +3 -0
  242. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/macro-assembler-mips.h +0 -0
  243. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/register-allocator-mips-inl.h +0 -0
  244. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/register-allocator-mips.cc +3 -0
  245. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/register-allocator-mips.h +0 -0
  246. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/simulator-mips.cc +3 -0
  247. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/simulator-mips.h +0 -0
  248. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/stub-cache-mips.cc +3 -0
  249. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/virtual-frame-mips.cc +3 -0
  250. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mips/virtual-frame-mips.h +0 -0
  251. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mirror-debugger.js +46 -4
  252. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/mksnapshot.cc +0 -0
  253. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/natives.h +0 -0
  254. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/objects-debug.cc +8 -1
  255. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/objects-inl.h +235 -62
  256. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/objects.cc +497 -231
  257. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/objects.h +355 -149
  258. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/oprofile-agent.cc +0 -0
  259. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/oprofile-agent.h +0 -0
  260. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/parser.cc +31 -6
  261. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/parser.h +1 -1
  262. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/platform-freebsd.cc +9 -6
  263. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/platform-linux.cc +26 -6
  264. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/platform-macos.cc +11 -6
  265. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/platform-nullos.cc +0 -0
  266. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/platform-openbsd.cc +6 -0
  267. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/platform-posix.cc +0 -0
  268. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/platform-solaris.cc +69 -23
  269. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/platform-win32.cc +15 -11
  270. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/platform.h +10 -6
  271. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/powers-ten.h +0 -0
  272. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/prettyprinter.cc +0 -0
  273. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/prettyprinter.h +0 -0
  274. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/profile-generator-inl.h +26 -2
  275. data/ext/v8/upstream/2.3.3/src/profile-generator.cc +1830 -0
  276. data/ext/v8/upstream/2.3.3/src/profile-generator.h +853 -0
  277. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/property.cc +0 -0
  278. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/property.h +0 -0
  279. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/regexp-macro-assembler-irregexp-inl.h +0 -0
  280. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/regexp-macro-assembler-irregexp.cc +0 -0
  281. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/regexp-macro-assembler-irregexp.h +0 -0
  282. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/regexp-macro-assembler-tracer.cc +0 -0
  283. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/regexp-macro-assembler-tracer.h +0 -0
  284. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/regexp-macro-assembler.cc +1 -3
  285. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/regexp-macro-assembler.h +0 -0
  286. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/regexp-stack.cc +0 -0
  287. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/regexp-stack.h +0 -0
  288. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/regexp.js +25 -4
  289. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/register-allocator-inl.h +0 -0
  290. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/register-allocator.cc +4 -3
  291. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/register-allocator.h +0 -0
  292. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/rewriter.cc +85 -8
  293. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/rewriter.h +0 -0
  294. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/runtime.cc +547 -221
  295. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/runtime.h +5 -1
  296. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/runtime.js +23 -31
  297. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/scanner.cc +12 -6
  298. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/scanner.h +60 -53
  299. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/scopeinfo.cc +156 -168
  300. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/scopeinfo.h +58 -62
  301. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/scopes.cc +0 -0
  302. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/scopes.h +0 -0
  303. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/serialize.cc +320 -242
  304. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/serialize.h +81 -48
  305. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/shell.h +0 -0
  306. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/simulator.h +0 -0
  307. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/smart-pointer.h +0 -0
  308. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/snapshot-common.cc +0 -0
  309. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/snapshot-empty.cc +0 -0
  310. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/snapshot.h +0 -0
  311. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/spaces-inl.h +177 -74
  312. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/spaces.cc +138 -315
  313. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/spaces.h +155 -124
  314. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/splay-tree-inl.h +0 -0
  315. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/splay-tree.h +0 -0
  316. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/string-stream.cc +0 -0
  317. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/string-stream.h +0 -0
  318. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/string.js +113 -119
  319. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/stub-cache.cc +242 -97
  320. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/stub-cache.h +118 -55
  321. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/third_party/dtoa/COPYING +0 -0
  322. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/third_party/dtoa/dtoa.c +4 -0
  323. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/third_party/valgrind/valgrind.h +0 -0
  324. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/token.cc +0 -0
  325. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/token.h +0 -0
  326. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/top.cc +107 -26
  327. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/top.h +9 -4
  328. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/type-info.cc +0 -0
  329. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/type-info.h +2 -2
  330. data/ext/v8/upstream/2.3.3/src/unbound-queue-inl.h +95 -0
  331. data/ext/v8/upstream/2.3.3/src/unbound-queue.h +67 -0
  332. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/unicode-inl.h +0 -0
  333. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/unicode.cc +0 -0
  334. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/unicode.h +0 -0
  335. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/uri.js +0 -0
  336. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/utils.cc +0 -0
  337. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/utils.h +83 -1
  338. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/v8-counters.cc +0 -0
  339. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/v8-counters.h +20 -0
  340. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/v8.cc +5 -1
  341. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/v8.h +0 -0
  342. data/ext/v8/upstream/2.3.3/src/v8dll-main.cc +39 -0
  343. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/v8natives.js +210 -33
  344. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/v8threads.cc +1 -1
  345. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/v8threads.h +1 -1
  346. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/variables.cc +0 -0
  347. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/variables.h +0 -0
  348. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/version.cc +3 -3
  349. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/version.h +0 -0
  350. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/virtual-frame-heavy-inl.h +40 -0
  351. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/virtual-frame-heavy.cc +0 -0
  352. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/virtual-frame-inl.h +0 -0
  353. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/virtual-frame-light-inl.h +106 -5
  354. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/virtual-frame-light.cc +4 -1
  355. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/virtual-frame.cc +0 -0
  356. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/virtual-frame.h +0 -0
  357. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/vm-state-inl.h +6 -3
  358. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/vm-state.cc +1 -1
  359. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/vm-state.h +6 -4
  360. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/assembler-x64-inl.h +42 -5
  361. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/assembler-x64.cc +285 -53
  362. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/assembler-x64.h +54 -18
  363. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/builtins-x64.cc +31 -33
  364. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/codegen-x64-inl.h +0 -0
  365. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/codegen-x64.cc +9787 -8722
  366. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/codegen-x64.h +82 -47
  367. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/cpu-x64.cc +4 -0
  368. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/debug-x64.cc +55 -6
  369. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/disasm-x64.cc +42 -19
  370. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/fast-codegen-x64.cc +4 -0
  371. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/frames-x64.cc +4 -0
  372. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/frames-x64.h +4 -0
  373. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/full-codegen-x64.cc +1487 -210
  374. data/ext/v8/upstream/2.3.3/src/x64/ic-x64.cc +1907 -0
  375. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/jump-target-x64.cc +4 -0
  376. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/macro-assembler-x64.cc +366 -338
  377. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/macro-assembler-x64.h +83 -38
  378. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/regexp-macro-assembler-x64.cc +82 -23
  379. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/regexp-macro-assembler-x64.h +1 -2
  380. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/register-allocator-x64-inl.h +6 -5
  381. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/register-allocator-x64.cc +4 -0
  382. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/register-allocator-x64.h +1 -1
  383. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/simulator-x64.cc +0 -0
  384. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/simulator-x64.h +0 -0
  385. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/stub-cache-x64.cc +556 -377
  386. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/virtual-frame-x64.cc +197 -98
  387. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/x64/virtual-frame-x64.h +37 -28
  388. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/zone-inl.h +0 -0
  389. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/zone.cc +0 -0
  390. data/ext/v8/upstream/{2.1.10 → 2.3.3}/src/zone.h +0 -0
  391. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/codemap.js +0 -0
  392. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/consarray.js +0 -0
  393. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/csvparser.js +0 -0
  394. data/ext/v8/upstream/2.3.3/tools/gc-nvp-trace-processor.py +317 -0
  395. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/generate-ten-powers.scm +0 -0
  396. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/gyp/v8.gyp +87 -20
  397. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/js2c.py +19 -15
  398. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/jsmin.py +0 -0
  399. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/linux-tick-processor +0 -0
  400. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/linux-tick-processor.py +0 -0
  401. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/logreader.js +0 -0
  402. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/mac-nm +0 -0
  403. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/mac-tick-processor +0 -0
  404. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/oprofile/annotate +0 -0
  405. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/oprofile/common +0 -0
  406. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/oprofile/dump +0 -0
  407. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/oprofile/report +0 -0
  408. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/oprofile/reset +0 -0
  409. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/oprofile/run +0 -0
  410. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/oprofile/shutdown +0 -0
  411. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/oprofile/start +0 -0
  412. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/presubmit.py +0 -0
  413. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/process-heap-prof.py +0 -0
  414. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/profile.js +0 -0
  415. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/profile_view.js +0 -0
  416. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/run-valgrind.py +0 -0
  417. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/splaytree.js +0 -0
  418. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/splaytree.py +0 -0
  419. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/stats-viewer.py +25 -13
  420. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/test.py +0 -0
  421. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/tickprocessor-driver.js +0 -0
  422. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/tickprocessor.js +0 -0
  423. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/tickprocessor.py +0 -0
  424. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/utils.py +0 -0
  425. data/ext/v8/upstream/2.3.3/tools/v8.xcodeproj/project.pbxproj +1855 -0
  426. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/README.txt +0 -0
  427. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/arm.vsprops +0 -0
  428. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/common.vsprops +0 -0
  429. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/d8.vcproj +0 -0
  430. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/d8_arm.vcproj +0 -0
  431. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/d8_x64.vcproj +0 -0
  432. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/d8js2c.cmd +0 -0
  433. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/debug.vsprops +0 -0
  434. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/ia32.vsprops +0 -0
  435. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/js2c.cmd +0 -0
  436. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/release.vsprops +0 -0
  437. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8.sln +0 -0
  438. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8.vcproj +0 -0
  439. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_arm.sln +0 -0
  440. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_arm.vcproj +0 -0
  441. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_base.vcproj +40 -0
  442. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_base_arm.vcproj +20 -0
  443. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_base_x64.vcproj +16 -0
  444. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_cctest.vcproj +4 -0
  445. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_cctest_arm.vcproj +0 -0
  446. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_cctest_x64.vcproj +0 -0
  447. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_mksnapshot.vcproj +0 -0
  448. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_mksnapshot_x64.vcproj +0 -0
  449. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_process_sample.vcproj +0 -0
  450. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_process_sample_arm.vcproj +0 -0
  451. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_process_sample_x64.vcproj +0 -0
  452. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_shell_sample.vcproj +0 -0
  453. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_shell_sample_arm.vcproj +0 -0
  454. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_shell_sample_x64.vcproj +0 -0
  455. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_snapshot.vcproj +0 -0
  456. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_snapshot_cc.vcproj +0 -0
  457. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_snapshot_cc_x64.vcproj +0 -0
  458. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_snapshot_x64.vcproj +0 -0
  459. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_x64.sln +0 -0
  460. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/v8_x64.vcproj +0 -0
  461. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/visual_studio/x64.vsprops +0 -0
  462. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/windows-tick-processor.bat +0 -0
  463. data/ext/v8/upstream/{2.1.10 → 2.3.3}/tools/windows-tick-processor.py +0 -0
  464. data/ext/v8/upstream/Makefile +1 -1
  465. data/ext/v8/v8_template.cpp +94 -2
  466. data/ext/v8/v8_try_catch.cpp +2 -2
  467. data/lib/v8.rb +1 -1
  468. data/lib/v8/access.rb +93 -40
  469. data/lib/v8/cli.rb +1 -1
  470. data/lib/v8/function.rb +14 -2
  471. data/spec/redjs/jsapi_spec.rb +231 -42
  472. data/therubyracer.gemspec +3 -3
  473. metadata +463 -453
  474. data/ext/v8/upstream/2.1.10/src/arm/assembler-thumb2-inl.h +0 -263
  475. data/ext/v8/upstream/2.1.10/src/arm/assembler-thumb2.cc +0 -1878
  476. data/ext/v8/upstream/2.1.10/src/arm/assembler-thumb2.h +0 -1036
  477. data/ext/v8/upstream/2.1.10/src/arm/codegen-arm-inl.h +0 -72
  478. data/ext/v8/upstream/2.1.10/src/arm/ic-arm.cc +0 -1833
  479. data/ext/v8/upstream/2.1.10/src/circular-queue-inl.h +0 -101
  480. data/ext/v8/upstream/2.1.10/src/profile-generator.cc +0 -583
  481. data/ext/v8/upstream/2.1.10/src/profile-generator.h +0 -364
  482. data/ext/v8/upstream/2.1.10/src/x64/ic-x64.cc +0 -1621
@@ -27,6 +27,8 @@
27
27
 
28
28
  #include "v8.h"
29
29
 
30
+ #if defined(V8_TARGET_ARCH_IA32)
31
+
30
32
  #include "codegen-inl.h"
31
33
 
32
34
  namespace v8 {
@@ -224,8 +226,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
224
226
  // edx: number of elements
225
227
  // ecx: start of next object
226
228
  __ mov(eax, Factory::fixed_array_map());
227
- __ mov(Operand(edi, JSObject::kMapOffset), eax); // setup the map
228
- __ mov(Operand(edi, Array::kLengthOffset), edx); // and length
229
+ __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
230
+ __ SmiTag(edx);
231
+ __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
229
232
 
230
233
  // Initialize the fields to undefined.
231
234
  // ebx: JSObject
@@ -328,10 +331,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
328
331
 
329
332
  // If the type of the result (stored in its map) is less than
330
333
  // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
331
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
332
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
333
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
334
- __ j(greater_equal, &exit, not_taken);
334
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
335
+ __ j(above_equal, &exit, not_taken);
335
336
 
336
337
  // Throw away the result of the constructor invocation and use the
337
338
  // on-stack receiver as the result.
@@ -466,11 +467,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
466
467
  __ cmp(ebx, Factory::undefined_value());
467
468
  __ j(equal, &use_global_receiver);
468
469
 
470
+ // We don't use IsObjectJSObjectType here because we jump on success.
469
471
  __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
470
472
  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
471
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
472
- __ j(below, &convert_to_object);
473
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
473
+ __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
474
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
474
475
  __ j(below_equal, &shift_arguments);
475
476
 
476
477
  __ bind(&convert_to_object);
@@ -546,6 +547,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
546
547
  __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
547
548
  __ mov(ebx,
548
549
  FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
550
+ __ SmiUntag(ebx);
549
551
  __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
550
552
  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
551
553
  __ cmp(eax, Operand(ebx));
@@ -613,12 +615,12 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
613
615
 
614
616
  // If given receiver is already a JavaScript object then there's no
615
617
  // reason for converting it.
618
+ // We don't use IsObjectJSObjectType here because we jump on success.
616
619
  __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
617
620
  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
618
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
619
- __ j(less, &call_to_object);
620
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
621
- __ j(less_equal, &push_receiver);
621
+ __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
622
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
623
+ __ j(below_equal, &push_receiver);
622
624
 
623
625
  // Convert the receiver to an object.
624
626
  __ bind(&call_to_object);
@@ -750,15 +752,15 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
750
752
  __ lea(scratch1, Operand(result, JSArray::kSize));
751
753
  __ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
752
754
 
753
- // Initialize the FixedArray and fill it with holes. FixedArray length is not
755
+ // Initialize the FixedArray and fill it with holes. FixedArray length is
754
756
  // stored as a smi.
755
757
  // result: JSObject
756
758
  // scratch1: elements array
757
759
  // scratch2: start of next object
758
- __ mov(FieldOperand(scratch1, JSObject::kMapOffset),
760
+ __ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
759
761
  Factory::fixed_array_map());
760
- __ mov(FieldOperand(scratch1, Array::kLengthOffset),
761
- Immediate(initial_capacity));
762
+ __ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
763
+ Immediate(Smi::FromInt(initial_capacity)));
762
764
 
763
765
  // Fill the FixedArray with the hole value. Inline the code if short.
764
766
  // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
@@ -806,6 +808,7 @@ static void AllocateJSArray(MacroAssembler* masm,
806
808
  Label* gc_required) {
807
809
  ASSERT(scratch.is(edi)); // rep stos destination
808
810
  ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
811
+ ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
809
812
 
810
813
  // Load the initial map from the array function.
811
814
  __ mov(elements_array,
@@ -844,34 +847,40 @@ static void AllocateJSArray(MacroAssembler* masm,
844
847
  __ lea(elements_array, Operand(result, JSArray::kSize));
845
848
  __ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
846
849
 
847
- // Initialize the fixed array. FixedArray length is not stored as a smi.
850
+ // Initialize the fixed array. FixedArray length is stored as a smi.
848
851
  // result: JSObject
849
852
  // elements_array: elements array
850
853
  // elements_array_end: start of next object
851
854
  // array_size: size of array (smi)
852
- ASSERT(kSmiTag == 0);
853
- __ SmiUntag(array_size); // Convert from smi to value.
854
- __ mov(FieldOperand(elements_array, JSObject::kMapOffset),
855
+ __ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
855
856
  Factory::fixed_array_map());
856
857
  // For non-empty JSArrays the length of the FixedArray and the JSArray is the
857
858
  // same.
858
- __ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
859
+ __ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
859
860
 
860
861
  // Fill the allocated FixedArray with the hole value if requested.
861
862
  // result: JSObject
862
863
  // elements_array: elements array
863
864
  if (fill_with_hole) {
865
+ __ SmiUntag(array_size);
864
866
  __ lea(edi, Operand(elements_array,
865
867
  FixedArray::kHeaderSize - kHeapObjectTag));
866
-
867
- __ push(eax);
868
868
  __ mov(eax, Factory::the_hole_value());
869
-
870
869
  __ cld();
870
+ // Do not use rep stos when filling less than kRepStosThreshold
871
+ // words.
872
+ const int kRepStosThreshold = 16;
873
+ Label loop, entry, done;
874
+ __ cmp(ecx, kRepStosThreshold);
875
+ __ j(below, &loop); // Note: ecx > 0.
871
876
  __ rep_stos();
872
-
873
- // Restore saved registers.
874
- __ pop(eax);
877
+ __ jmp(&done);
878
+ __ bind(&loop);
879
+ __ stos();
880
+ __ bind(&entry);
881
+ __ cmp(edi, Operand(elements_array_end));
882
+ __ j(below, &loop);
883
+ __ bind(&done);
875
884
  }
876
885
  }
877
886
 
@@ -970,13 +979,14 @@ static void ArrayNativeCode(MacroAssembler* masm,
970
979
  AllocateJSArray(masm,
971
980
  edi,
972
981
  ecx,
973
- eax,
974
982
  ebx,
983
+ eax,
975
984
  edx,
976
985
  edi,
977
986
  true,
978
987
  &prepare_generic_code_call);
979
988
  __ IncrementCounter(&Counters::array_function_native, 1);
989
+ __ mov(eax, ebx);
980
990
  __ pop(ebx);
981
991
  if (construct_call) {
982
992
  __ pop(edi);
@@ -1067,7 +1077,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
1067
1077
  // -- esp[0] : return address
1068
1078
  // -- esp[4] : last argument
1069
1079
  // -----------------------------------
1070
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
1080
+ Label generic_array_code;
1071
1081
 
1072
1082
  // Get the Array function.
1073
1083
  GenerateLoadArrayFunction(masm, edi);
@@ -1247,3 +1257,5 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
1247
1257
  #undef __
1248
1258
 
1249
1259
  } } // namespace v8::internal
1260
+
1261
+ #endif // V8_TARGET_ARCH_IA32
@@ -27,29 +27,28 @@
27
27
 
28
28
  #include "v8.h"
29
29
 
30
+ #if defined(V8_TARGET_ARCH_IA32)
31
+
30
32
  #include "bootstrapper.h"
31
33
  #include "codegen-inl.h"
32
34
  #include "compiler.h"
33
35
  #include "debug.h"
34
36
  #include "ic-inl.h"
35
- #include "jsregexp.h"
36
37
  #include "parser.h"
37
38
  #include "regexp-macro-assembler.h"
38
- #include "regexp-stack.h"
39
39
  #include "register-allocator-inl.h"
40
- #include "runtime.h"
41
40
  #include "scopes.h"
42
41
  #include "virtual-frame-inl.h"
43
42
 
44
43
  namespace v8 {
45
44
  namespace internal {
46
45
 
47
- #define __ ACCESS_MASM(masm_)
46
+ #define __ ACCESS_MASM(masm)
48
47
 
49
48
  // -------------------------------------------------------------------------
50
- // Platform-specific DeferredCode functions.
49
+ // Platform-specific FrameRegisterState functions.
51
50
 
52
- void DeferredCode::SaveRegisters() {
51
+ void FrameRegisterState::Save(MacroAssembler* masm) const {
53
52
  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
54
53
  int action = registers_[i];
55
54
  if (action == kPush) {
@@ -61,7 +60,7 @@ void DeferredCode::SaveRegisters() {
61
60
  }
62
61
 
63
62
 
64
- void DeferredCode::RestoreRegisters() {
63
+ void FrameRegisterState::Restore(MacroAssembler* masm) const {
65
64
  // Restore registers in reverse order due to the stack.
66
65
  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
67
66
  int action = registers_[i];
@@ -75,6 +74,45 @@ void DeferredCode::RestoreRegisters() {
75
74
  }
76
75
 
77
76
 
77
+ #undef __
78
+ #define __ ACCESS_MASM(masm_)
79
+
80
+ // -------------------------------------------------------------------------
81
+ // Platform-specific DeferredCode functions.
82
+
83
+ void DeferredCode::SaveRegisters() {
84
+ frame_state_.Save(masm_);
85
+ }
86
+
87
+
88
+ void DeferredCode::RestoreRegisters() {
89
+ frame_state_.Restore(masm_);
90
+ }
91
+
92
+
93
+ // -------------------------------------------------------------------------
94
+ // Platform-specific RuntimeCallHelper functions.
95
+
96
+ void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
97
+ frame_state_->Save(masm);
98
+ }
99
+
100
+
101
+ void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
102
+ frame_state_->Restore(masm);
103
+ }
104
+
105
+
106
+ void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
107
+ masm->EnterInternalFrame();
108
+ }
109
+
110
+
111
+ void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
112
+ masm->LeaveInternalFrame();
113
+ }
114
+
115
+
78
116
  // -------------------------------------------------------------------------
79
117
  // CodeGenState implementation.
80
118
 
@@ -102,7 +140,7 @@ CodeGenState::~CodeGenState() {
102
140
 
103
141
 
104
142
  // -------------------------------------------------------------------------
105
- // CodeGenerator implementation
143
+ // CodeGenerator implementation.
106
144
 
107
145
  CodeGenerator::CodeGenerator(MacroAssembler* masm)
108
146
  : deferred_(8),
@@ -333,12 +371,11 @@ void CodeGenerator::Generate(CompilationInfo* info) {
333
371
  }
334
372
 
335
373
  // Adjust for function-level loop nesting.
336
- ASSERT_EQ(info->loop_nesting(), loop_nesting_);
374
+ ASSERT_EQ(loop_nesting_, info->loop_nesting());
337
375
  loop_nesting_ = 0;
338
376
 
339
377
  // Code generation state must be reset.
340
378
  ASSERT(state_ == NULL);
341
- ASSERT(loop_nesting() == 0);
342
379
  ASSERT(!function_return_is_shadowed_);
343
380
  function_return_.Unuse();
344
381
  DeleteFrame();
@@ -563,6 +600,10 @@ void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
563
600
  RegisterFile empty_regs;
564
601
  SetFrame(clone, &empty_regs);
565
602
  __ bind(&allocation_failed);
603
+ if (!CpuFeatures::IsSupported(SSE2)) {
604
+ // Pop the value from the floating point stack.
605
+ __ fstp(0);
606
+ }
566
607
  unsafe_bailout_->Jump();
567
608
 
568
609
  done.Bind(value);
@@ -601,7 +642,6 @@ void CodeGenerator::Load(Expression* expr) {
601
642
  } else {
602
643
  JumpTarget true_target;
603
644
  JumpTarget false_target;
604
-
605
645
  ControlDestination dest(&true_target, &false_target, true);
606
646
  LoadCondition(expr, &dest, false);
607
647
 
@@ -693,9 +733,7 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
693
733
  } else if (variable != NULL && variable->slot() != NULL) {
694
734
  // For a variable that rewrites to a slot, we signal it is the immediate
695
735
  // subexpression of a typeof.
696
- Result result =
697
- LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
698
- frame()->Push(&result);
736
+ LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
699
737
  } else {
700
738
  // Anything else can be handled normally.
701
739
  Load(expr);
@@ -741,10 +779,11 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
741
779
  JumpTarget done;
742
780
  bool skip_arguments = false;
743
781
  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
744
- // We have to skip storing into the arguments slot if it has already
745
- // been written to. This can happen if the a function has a local
746
- // variable named 'arguments'.
747
- Result probe = LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
782
+ // We have to skip storing into the arguments slot if it has
783
+ // already been written to. This can happen if the a function
784
+ // has a local variable named 'arguments'.
785
+ LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
786
+ Result probe = frame_->Pop();
748
787
  if (probe.is_constant()) {
749
788
  // We have to skip updating the arguments object if it has
750
789
  // been assigned a proper value.
@@ -829,14 +868,6 @@ void CodeGenerator::LoadReference(Reference* ref) {
829
868
  }
830
869
 
831
870
 
832
- void CodeGenerator::UnloadReference(Reference* ref) {
833
- // Pop a reference from the stack while preserving TOS.
834
- Comment cmnt(masm_, "[ UnloadReference");
835
- frame_->Nip(ref->size());
836
- ref->set_unloaded();
837
- }
838
-
839
-
840
871
  // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
841
872
  // convert it to a boolean in the condition code register or jump to
842
873
  // 'false_target'/'true_target' as appropriate.
@@ -874,7 +905,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
874
905
  __ AbortIfNotNumber(value.reg());
875
906
  }
876
907
  // Smi => false iff zero.
877
- ASSERT(kSmiTag == 0);
908
+ STATIC_ASSERT(kSmiTag == 0);
878
909
  __ test(value.reg(), Operand(value.reg()));
879
910
  dest->false_target()->Branch(zero);
880
911
  __ test(value.reg(), Immediate(kSmiTagMask));
@@ -899,7 +930,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
899
930
  dest->false_target()->Branch(equal);
900
931
 
901
932
  // Smi => false iff zero.
902
- ASSERT(kSmiTag == 0);
933
+ STATIC_ASSERT(kSmiTag == 0);
903
934
  __ test(value.reg(), Operand(value.reg()));
904
935
  dest->false_target()->Branch(zero);
905
936
  __ test(value.reg(), Immediate(kSmiTagMask));
@@ -1138,7 +1169,7 @@ static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
1138
1169
  const Result& left) {
1139
1170
  // Set TypeInfo of result according to the operation performed.
1140
1171
  // Rely on the fact that smis have a 31 bit payload on ia32.
1141
- ASSERT(kSmiValueSize == 31);
1172
+ STATIC_ASSERT(kSmiValueSize == 31);
1142
1173
  switch (op) {
1143
1174
  case Token::COMMA:
1144
1175
  return right.type_info();
@@ -1398,8 +1429,8 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
1398
1429
  } else {
1399
1430
  unsigned_left >>= shift_amount;
1400
1431
  }
1401
- ASSERT(Smi::IsValid(unsigned_left)); // Converted to signed.
1402
- answer_object = Smi::FromInt(unsigned_left); // Converted to signed.
1432
+ ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
1433
+ answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
1403
1434
  break;
1404
1435
  }
1405
1436
  default:
@@ -1414,10 +1445,89 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
1414
1445
  }
1415
1446
 
1416
1447
 
1417
- static void CheckTwoForSminess(MacroAssembler* masm,
1418
- Register left, Register right, Register scratch,
1419
- TypeInfo left_info, TypeInfo right_info,
1420
- DeferredInlineBinaryOperation* deferred);
1448
+ void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
1449
+ Result* right,
1450
+ JumpTarget* both_smi) {
1451
+ TypeInfo left_info = left->type_info();
1452
+ TypeInfo right_info = right->type_info();
1453
+ if (left_info.IsDouble() || left_info.IsString() ||
1454
+ right_info.IsDouble() || right_info.IsString()) {
1455
+ // We know that left and right are not both smi. Don't do any tests.
1456
+ return;
1457
+ }
1458
+
1459
+ if (left->reg().is(right->reg())) {
1460
+ if (!left_info.IsSmi()) {
1461
+ __ test(left->reg(), Immediate(kSmiTagMask));
1462
+ both_smi->Branch(zero);
1463
+ } else {
1464
+ if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
1465
+ left->Unuse();
1466
+ right->Unuse();
1467
+ both_smi->Jump();
1468
+ }
1469
+ } else if (!left_info.IsSmi()) {
1470
+ if (!right_info.IsSmi()) {
1471
+ Result temp = allocator_->Allocate();
1472
+ ASSERT(temp.is_valid());
1473
+ __ mov(temp.reg(), left->reg());
1474
+ __ or_(temp.reg(), Operand(right->reg()));
1475
+ __ test(temp.reg(), Immediate(kSmiTagMask));
1476
+ temp.Unuse();
1477
+ both_smi->Branch(zero);
1478
+ } else {
1479
+ __ test(left->reg(), Immediate(kSmiTagMask));
1480
+ both_smi->Branch(zero);
1481
+ }
1482
+ } else {
1483
+ if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
1484
+ if (!right_info.IsSmi()) {
1485
+ __ test(right->reg(), Immediate(kSmiTagMask));
1486
+ both_smi->Branch(zero);
1487
+ } else {
1488
+ if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
1489
+ left->Unuse();
1490
+ right->Unuse();
1491
+ both_smi->Jump();
1492
+ }
1493
+ }
1494
+ }
1495
+
1496
+
1497
+ void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
1498
+ Register right,
1499
+ Register scratch,
1500
+ TypeInfo left_info,
1501
+ TypeInfo right_info,
1502
+ DeferredCode* deferred) {
1503
+ if (left.is(right)) {
1504
+ if (!left_info.IsSmi()) {
1505
+ __ test(left, Immediate(kSmiTagMask));
1506
+ deferred->Branch(not_zero);
1507
+ } else {
1508
+ if (FLAG_debug_code) __ AbortIfNotSmi(left);
1509
+ }
1510
+ } else if (!left_info.IsSmi()) {
1511
+ if (!right_info.IsSmi()) {
1512
+ __ mov(scratch, left);
1513
+ __ or_(scratch, Operand(right));
1514
+ __ test(scratch, Immediate(kSmiTagMask));
1515
+ deferred->Branch(not_zero);
1516
+ } else {
1517
+ __ test(left, Immediate(kSmiTagMask));
1518
+ deferred->Branch(not_zero);
1519
+ if (FLAG_debug_code) __ AbortIfNotSmi(right);
1520
+ }
1521
+ } else {
1522
+ if (FLAG_debug_code) __ AbortIfNotSmi(left);
1523
+ if (!right_info.IsSmi()) {
1524
+ __ test(right, Immediate(kSmiTagMask));
1525
+ deferred->Branch(not_zero);
1526
+ } else {
1527
+ if (FLAG_debug_code) __ AbortIfNotSmi(right);
1528
+ }
1529
+ }
1530
+ }
1421
1531
 
1422
1532
 
1423
1533
  // Implements a binary operation using a deferred code object and some
@@ -1426,6 +1536,9 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
1426
1536
  Result* left,
1427
1537
  Result* right,
1428
1538
  OverwriteMode overwrite_mode) {
1539
+ // Copy the type info because left and right may be overwritten.
1540
+ TypeInfo left_type_info = left->type_info();
1541
+ TypeInfo right_type_info = right->type_info();
1429
1542
  Token::Value op = expr->op();
1430
1543
  Result answer;
1431
1544
  // Special handling of div and mod because they use fixed registers.
@@ -1501,22 +1614,14 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
1501
1614
  (op == Token::DIV) ? eax : edx,
1502
1615
  left->reg(),
1503
1616
  right->reg(),
1504
- left->type_info(),
1505
- right->type_info(),
1617
+ left_type_info,
1618
+ right_type_info,
1506
1619
  overwrite_mode);
1507
- if (left->reg().is(right->reg())) {
1508
- __ test(left->reg(), Immediate(kSmiTagMask));
1509
- } else {
1510
- // Use the quotient register as a scratch for the tag check.
1511
- if (!left_is_in_eax) __ mov(eax, left->reg());
1512
- left_is_in_eax = false; // About to destroy the value in eax.
1513
- __ or_(eax, Operand(right->reg()));
1514
- ASSERT(kSmiTag == 0); // Adjust test if not the case.
1515
- __ test(eax, Immediate(kSmiTagMask));
1620
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), edx,
1621
+ left_type_info, right_type_info, deferred);
1622
+ if (!left_is_in_eax) {
1623
+ __ mov(eax, left->reg());
1516
1624
  }
1517
- deferred->Branch(not_zero);
1518
-
1519
- if (!left_is_in_eax) __ mov(eax, left->reg());
1520
1625
  // Sign extend eax into edx:eax.
1521
1626
  __ cdq();
1522
1627
  // Check for 0 divisor.
@@ -1543,7 +1648,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
1543
1648
  // Check for the corner case of dividing the most negative smi by
1544
1649
  // -1. We cannot use the overflow flag, since it is not set by
1545
1650
  // idiv instruction.
1546
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1651
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1547
1652
  __ cmp(eax, 0x40000000);
1548
1653
  deferred->Branch(equal);
1549
1654
  // Check that the remainder is zero.
@@ -1605,18 +1710,18 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
1605
1710
  answer.reg(),
1606
1711
  left->reg(),
1607
1712
  ecx,
1608
- left->type_info(),
1609
- right->type_info(),
1713
+ left_type_info,
1714
+ right_type_info,
1610
1715
  overwrite_mode);
1611
1716
 
1612
1717
  Label do_op, left_nonsmi;
1613
1718
  // If right is a smi we make a fast case if left is either a smi
1614
1719
  // or a heapnumber.
1615
- if (CpuFeatures::IsSupported(SSE2) && right->type_info().IsSmi()) {
1720
+ if (CpuFeatures::IsSupported(SSE2) && right_type_info.IsSmi()) {
1616
1721
  CpuFeatures::Scope use_sse2(SSE2);
1617
1722
  __ mov(answer.reg(), left->reg());
1618
1723
  // Fast case - both are actually smis.
1619
- if (!left->type_info().IsSmi()) {
1724
+ if (!left_type_info.IsSmi()) {
1620
1725
  __ test(answer.reg(), Immediate(kSmiTagMask));
1621
1726
  __ j(not_zero, &left_nonsmi);
1622
1727
  } else {
@@ -1639,8 +1744,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
1639
1744
  __ cmp(answer.reg(), 0xc0000000);
1640
1745
  deferred->Branch(negative);
1641
1746
  } else {
1642
- CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
1643
- left->type_info(), right->type_info(), deferred);
1747
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
1748
+ left_type_info, right_type_info, deferred);
1644
1749
 
1645
1750
  // Untag both operands.
1646
1751
  __ mov(answer.reg(), left->reg());
@@ -1713,11 +1818,11 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
1713
1818
  answer.reg(),
1714
1819
  left->reg(),
1715
1820
  right->reg(),
1716
- left->type_info(),
1717
- right->type_info(),
1821
+ left_type_info,
1822
+ right_type_info,
1718
1823
  overwrite_mode);
1719
- CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
1720
- left->type_info(), right->type_info(), deferred);
1824
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
1825
+ left_type_info, right_type_info, deferred);
1721
1826
 
1722
1827
  __ mov(answer.reg(), left->reg());
1723
1828
  switch (op) {
@@ -1733,7 +1838,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
1733
1838
 
1734
1839
  case Token::MUL: {
1735
1840
  // If the smi tag is 0 we can just leave the tag on one operand.
1736
- ASSERT(kSmiTag == 0); // Adjust code below if not the case.
1841
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
1737
1842
  // Remove smi tag from the left operand (but keep sign).
1738
1843
  // Left-hand operand has been copied into answer.
1739
1844
  __ SmiUntag(answer.reg());
@@ -1858,12 +1963,12 @@ class DeferredInlineSmiOperationReversed: public DeferredCode {
1858
1963
 
1859
1964
 
1860
1965
  void DeferredInlineSmiOperationReversed::Generate() {
1861
- GenericBinaryOpStub igostub(
1966
+ GenericBinaryOpStub stub(
1862
1967
  op_,
1863
1968
  overwrite_mode_,
1864
1969
  NO_SMI_CODE_IN_STUB,
1865
1970
  TypeInfo::Combine(TypeInfo::Smi(), type_info_));
1866
- igostub.GenerateCall(masm_, value_, src_);
1971
+ stub.GenerateCall(masm_, value_, src_);
1867
1972
  if (!dst_.is(eax)) __ mov(dst_, eax);
1868
1973
  }
1869
1974
 
@@ -1988,18 +2093,13 @@ void DeferredInlineSmiSub::Generate() {
1988
2093
  }
1989
2094
 
1990
2095
 
1991
- Result CodeGenerator::ConstantSmiBinaryOperation(
1992
- BinaryOperation* expr,
1993
- Result* operand,
1994
- Handle<Object> value,
1995
- bool reversed,
1996
- OverwriteMode overwrite_mode) {
1997
- // NOTE: This is an attempt to inline (a bit) more of the code for
1998
- // some possible smi operations (like + and -) when (at least) one
1999
- // of the operands is a constant smi.
2000
- // Consumes the argument "operand".
2001
- // TODO(199): Optimize some special cases of operations involving a
2002
- // smi literal (multiply by 2, shift by 0, etc.).
2096
+ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
2097
+ Result* operand,
2098
+ Handle<Object> value,
2099
+ bool reversed,
2100
+ OverwriteMode overwrite_mode) {
2101
+ // Generate inline code for a binary operation when one of the
2102
+ // operands is a constant smi. Consumes the argument "operand".
2003
2103
  if (IsUnsafeSmi(value)) {
2004
2104
  Result unsafe_operand(value);
2005
2105
  if (reversed) {
@@ -2245,13 +2345,13 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
2245
2345
  __ AbortIfNotSmi(operand->reg());
2246
2346
  }
2247
2347
  __ mov(answer.reg(), operand->reg());
2248
- ASSERT(kSmiTag == 0); // adjust code if not the case
2348
+ STATIC_ASSERT(kSmiTag == 0); // adjust code if not the case
2249
2349
  // We do no shifts, only the Smi conversion, if shift_value is 1.
2250
2350
  if (shift_value > 1) {
2251
2351
  __ shl(answer.reg(), shift_value - 1);
2252
2352
  }
2253
2353
  // Convert int result to Smi, checking that it is in int range.
2254
- ASSERT(kSmiTagSize == 1); // adjust code if not the case
2354
+ STATIC_ASSERT(kSmiTagSize == 1); // adjust code if not the case
2255
2355
  __ add(answer.reg(), Operand(answer.reg()));
2256
2356
  deferred->Branch(overflow);
2257
2357
  deferred->BindExit();
@@ -2319,8 +2419,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
2319
2419
  overwrite_mode);
2320
2420
  // Check that lowest log2(value) bits of operand are zero, and test
2321
2421
  // smi tag at the same time.
2322
- ASSERT_EQ(0, kSmiTag);
2323
- ASSERT_EQ(1, kSmiTagSize);
2422
+ STATIC_ASSERT(kSmiTag == 0);
2423
+ STATIC_ASSERT(kSmiTagSize == 1);
2324
2424
  __ test(operand->reg(), Immediate(3));
2325
2425
  deferred->Branch(not_zero); // Branch if non-smi or odd smi.
2326
2426
  __ sar(operand->reg(), 1);
@@ -2368,6 +2468,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
2368
2468
  break;
2369
2469
  }
2370
2470
  // Fall through if we did not find a power of 2 on the right hand side!
2471
+ // The next case must be the default.
2371
2472
 
2372
2473
  default: {
2373
2474
  Result constant_operand(value);
@@ -2431,8 +2532,7 @@ void CodeGenerator::Comparison(AstNode* node,
2431
2532
  }
2432
2533
  ASSERT(cc == less || cc == equal || cc == greater_equal);
2433
2534
 
2434
- // If either side is a constant of some sort, we can probably optimize the
2435
- // comparison.
2535
+ // If either side is a constant smi, optimize the comparison.
2436
2536
  bool left_side_constant_smi = false;
2437
2537
  bool left_side_constant_null = false;
2438
2538
  bool left_side_constant_1_char_string = false;
@@ -2457,112 +2557,11 @@ void CodeGenerator::Comparison(AstNode* node,
2457
2557
  }
2458
2558
 
2459
2559
  if (left_side_constant_smi || right_side_constant_smi) {
2460
- if (left_side_constant_smi && right_side_constant_smi) {
2461
- // Trivial case, comparing two constants.
2462
- int left_value = Smi::cast(*left_side.handle())->value();
2463
- int right_value = Smi::cast(*right_side.handle())->value();
2464
- switch (cc) {
2465
- case less:
2466
- dest->Goto(left_value < right_value);
2467
- break;
2468
- case equal:
2469
- dest->Goto(left_value == right_value);
2470
- break;
2471
- case greater_equal:
2472
- dest->Goto(left_value >= right_value);
2473
- break;
2474
- default:
2475
- UNREACHABLE();
2476
- }
2477
- } else {
2478
- // Only one side is a constant Smi.
2479
- // If left side is a constant Smi, reverse the operands.
2480
- // Since one side is a constant Smi, conversion order does not matter.
2481
- if (left_side_constant_smi) {
2482
- Result temp = left_side;
2483
- left_side = right_side;
2484
- right_side = temp;
2485
- cc = ReverseCondition(cc);
2486
- // This may re-introduce greater or less_equal as the value of cc.
2487
- // CompareStub and the inline code both support all values of cc.
2488
- }
2489
- // Implement comparison against a constant Smi, inlining the case
2490
- // where both sides are Smis.
2491
- left_side.ToRegister();
2492
- Register left_reg = left_side.reg();
2493
- Handle<Object> right_val = right_side.handle();
2494
-
2495
- // Here we split control flow to the stub call and inlined cases
2496
- // before finally splitting it to the control destination. We use
2497
- // a jump target and branching to duplicate the virtual frame at
2498
- // the first split. We manually handle the off-frame references
2499
- // by reconstituting them on the non-fall-through path.
2500
-
2501
- if (left_side.is_smi()) {
2502
- if (FLAG_debug_code) __ AbortIfNotSmi(left_side.reg());
2503
- } else {
2504
- JumpTarget is_smi;
2505
- __ test(left_side.reg(), Immediate(kSmiTagMask));
2506
- is_smi.Branch(zero, taken);
2507
-
2508
- bool is_loop_condition = (node->AsExpression() != NULL) &&
2509
- node->AsExpression()->is_loop_condition();
2510
- if (!is_loop_condition &&
2511
- CpuFeatures::IsSupported(SSE2) &&
2512
- right_val->IsSmi()) {
2513
- // Right side is a constant smi and left side has been checked
2514
- // not to be a smi.
2515
- CpuFeatures::Scope use_sse2(SSE2);
2516
- JumpTarget not_number;
2517
- __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
2518
- Immediate(Factory::heap_number_map()));
2519
- not_number.Branch(not_equal, &left_side);
2520
- __ movdbl(xmm1,
2521
- FieldOperand(left_reg, HeapNumber::kValueOffset));
2522
- int value = Smi::cast(*right_val)->value();
2523
- if (value == 0) {
2524
- __ xorpd(xmm0, xmm0);
2525
- } else {
2526
- Result temp = allocator()->Allocate();
2527
- __ mov(temp.reg(), Immediate(value));
2528
- __ cvtsi2sd(xmm0, Operand(temp.reg()));
2529
- temp.Unuse();
2530
- }
2531
- __ comisd(xmm1, xmm0);
2532
- // Jump to builtin for NaN.
2533
- not_number.Branch(parity_even, &left_side);
2534
- left_side.Unuse();
2535
- dest->true_target()->Branch(DoubleCondition(cc));
2536
- dest->false_target()->Jump();
2537
- not_number.Bind(&left_side);
2538
- }
2539
-
2540
- // Setup and call the compare stub.
2541
- CompareStub stub(cc, strict, kCantBothBeNaN);
2542
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
2543
- result.ToRegister();
2544
- __ cmp(result.reg(), 0);
2545
- result.Unuse();
2546
- dest->true_target()->Branch(cc);
2547
- dest->false_target()->Jump();
2548
-
2549
- is_smi.Bind();
2550
- }
2551
-
2552
- left_side = Result(left_reg);
2553
- right_side = Result(right_val);
2554
- // Test smi equality and comparison by signed int comparison.
2555
- if (IsUnsafeSmi(right_side.handle())) {
2556
- right_side.ToRegister();
2557
- __ cmp(left_side.reg(), Operand(right_side.reg()));
2558
- } else {
2559
- __ cmp(Operand(left_side.reg()), Immediate(right_side.handle()));
2560
- }
2561
- left_side.Unuse();
2562
- right_side.Unuse();
2563
- dest->Split(cc);
2564
- }
2565
-
2560
+ bool is_loop_condition = (node->AsExpression() != NULL) &&
2561
+ node->AsExpression()->is_loop_condition();
2562
+ ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
2563
+ left_side_constant_smi, right_side_constant_smi,
2564
+ is_loop_condition);
2566
2565
  } else if (cc == equal &&
2567
2566
  (left_side_constant_null || right_side_constant_null)) {
2568
2567
  // To make null checks efficient, we check if either the left side or
@@ -2592,9 +2591,8 @@ void CodeGenerator::Comparison(AstNode* node,
2592
2591
  ASSERT(temp.is_valid());
2593
2592
  __ mov(temp.reg(),
2594
2593
  FieldOperand(operand.reg(), HeapObject::kMapOffset));
2595
- __ movzx_b(temp.reg(),
2596
- FieldOperand(temp.reg(), Map::kBitFieldOffset));
2597
- __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
2594
+ __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
2595
+ 1 << Map::kIsUndetectable);
2598
2596
  temp.Unuse();
2599
2597
  operand.Unuse();
2600
2598
  dest->Split(not_zero);
@@ -2656,9 +2654,9 @@ void CodeGenerator::Comparison(AstNode* node,
2656
2654
  // side (which is always a symbol).
2657
2655
  if (cc == equal) {
2658
2656
  Label not_a_symbol;
2659
- ASSERT(kSymbolTag != 0);
2657
+ STATIC_ASSERT(kSymbolTag != 0);
2660
2658
  // Ensure that no non-strings have the symbol bit set.
2661
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
2659
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
2662
2660
  __ test(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
2663
2661
  __ j(zero, &not_a_symbol);
2664
2662
  // They are symbols, so do identity compare.
@@ -2688,11 +2686,9 @@ void CodeGenerator::Comparison(AstNode* node,
2688
2686
  // left_side is a sequential ASCII string.
2689
2687
  left_side = Result(left_reg);
2690
2688
  right_side = Result(right_val);
2691
- Result temp2 = allocator_->Allocate();
2692
- ASSERT(temp2.is_valid());
2693
2689
  // Test string equality and comparison.
2690
+ Label comparison_done;
2694
2691
  if (cc == equal) {
2695
- Label comparison_done;
2696
2692
  __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
2697
2693
  Immediate(Smi::FromInt(1)));
2698
2694
  __ j(not_equal, &comparison_done);
@@ -2700,47 +2696,39 @@ void CodeGenerator::Comparison(AstNode* node,
2700
2696
  static_cast<uint8_t>(String::cast(*right_val)->Get(0));
2701
2697
  __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
2702
2698
  char_value);
2703
- __ bind(&comparison_done);
2704
2699
  } else {
2705
- __ mov(temp2.reg(),
2706
- FieldOperand(left_side.reg(), String::kLengthOffset));
2707
- __ SmiUntag(temp2.reg());
2708
- __ sub(Operand(temp2.reg()), Immediate(1));
2709
- Label comparison;
2710
- // If the length is 0 then the subtraction gave -1 which compares less
2711
- // than any character.
2712
- __ j(negative, &comparison);
2713
- // Otherwise load the first character.
2714
- __ movzx_b(temp2.reg(),
2715
- FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
2716
- __ bind(&comparison);
2700
+ __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
2701
+ Immediate(Smi::FromInt(1)));
2702
+ // If the length is 0 then the jump is taken and the flags
2703
+ // correctly represent being less than the one-character string.
2704
+ __ j(below, &comparison_done);
2717
2705
  // Compare the first character of the string with the
2718
2706
  // constant 1-character string.
2719
2707
  uint8_t char_value =
2720
2708
  static_cast<uint8_t>(String::cast(*right_val)->Get(0));
2721
- __ cmp(Operand(temp2.reg()), Immediate(char_value));
2722
- Label characters_were_different;
2723
- __ j(not_equal, &characters_were_different);
2709
+ __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
2710
+ char_value);
2711
+ __ j(not_equal, &comparison_done);
2724
2712
  // If the first character is the same then the long string sorts after
2725
2713
  // the short one.
2726
2714
  __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
2727
2715
  Immediate(Smi::FromInt(1)));
2728
- __ bind(&characters_were_different);
2729
2716
  }
2730
- temp2.Unuse();
2717
+ __ bind(&comparison_done);
2731
2718
  left_side.Unuse();
2732
2719
  right_side.Unuse();
2733
2720
  dest->Split(cc);
2734
2721
  }
2735
2722
  } else {
2736
2723
  // Neither side is a constant Smi, constant 1-char string or constant null.
2737
- // If either side is a non-smi constant, or known to be a heap number skip
2738
- // the smi check.
2724
+ // If either side is a non-smi constant, or known to be a heap number,
2725
+ // skip the smi check.
2739
2726
  bool known_non_smi =
2740
2727
  (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
2741
2728
  (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
2742
2729
  left_side.type_info().IsDouble() ||
2743
2730
  right_side.type_info().IsDouble();
2731
+
2744
2732
  NaNInformation nan_info =
2745
2733
  (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
2746
2734
  kBothCouldBeNaN :
@@ -2765,14 +2753,15 @@ void CodeGenerator::Comparison(AstNode* node,
2765
2753
  right_side.ToRegister();
2766
2754
 
2767
2755
  if (known_non_smi) {
2768
- // Inline the equality check if both operands can't be a NaN. If both
2769
- // objects are the same they are equal.
2756
+ // Inlined equality check:
2757
+ // If at least one of the objects is not NaN, then if the objects
2758
+ // are identical, they are equal.
2770
2759
  if (nan_info == kCantBothBeNaN && cc == equal) {
2771
2760
  __ cmp(left_side.reg(), Operand(right_side.reg()));
2772
2761
  dest->true_target()->Branch(equal);
2773
2762
  }
2774
2763
 
2775
- // Inline number comparison.
2764
+ // Inlined number comparison:
2776
2765
  if (inline_number_compare) {
2777
2766
  GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
2778
2767
  }
@@ -2795,46 +2784,177 @@ void CodeGenerator::Comparison(AstNode* node,
2795
2784
  Register right_reg = right_side.reg();
2796
2785
 
2797
2786
  // In-line check for comparing two smis.
2798
- Result temp = allocator_->Allocate();
2799
- ASSERT(temp.is_valid());
2800
- __ mov(temp.reg(), left_side.reg());
2801
- __ or_(temp.reg(), Operand(right_side.reg()));
2802
- __ test(temp.reg(), Immediate(kSmiTagMask));
2803
- temp.Unuse();
2804
- is_smi.Branch(zero, taken);
2787
+ JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
2805
2788
 
2806
- // Inline the equality check if both operands can't be a NaN. If both
2807
- // objects are the same they are equal.
2808
- if (nan_info == kCantBothBeNaN && cc == equal) {
2809
- __ cmp(left_side.reg(), Operand(right_side.reg()));
2810
- dest->true_target()->Branch(equal);
2789
+ if (has_valid_frame()) {
2790
+ // Inline the equality check if both operands can't be a NaN. If both
2791
+ // objects are the same they are equal.
2792
+ if (nan_info == kCantBothBeNaN && cc == equal) {
2793
+ __ cmp(left_side.reg(), Operand(right_side.reg()));
2794
+ dest->true_target()->Branch(equal);
2795
+ }
2796
+
2797
+ // Inlined number comparison:
2798
+ if (inline_number_compare) {
2799
+ GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
2800
+ }
2801
+
2802
+ // End of in-line compare, call out to the compare stub. Don't include
2803
+ // number comparison in the stub if it was inlined.
2804
+ CompareStub stub(cc, strict, nan_info, !inline_number_compare);
2805
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
2806
+ __ test(answer.reg(), Operand(answer.reg()));
2807
+ answer.Unuse();
2808
+ if (is_smi.is_linked()) {
2809
+ dest->true_target()->Branch(cc);
2810
+ dest->false_target()->Jump();
2811
+ } else {
2812
+ dest->Split(cc);
2813
+ }
2811
2814
  }
2812
2815
 
2813
- // Inline number comparison.
2814
- if (inline_number_compare) {
2815
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
2816
+ if (is_smi.is_linked()) {
2817
+ is_smi.Bind();
2818
+ left_side = Result(left_reg);
2819
+ right_side = Result(right_reg);
2820
+ __ cmp(left_side.reg(), Operand(right_side.reg()));
2821
+ right_side.Unuse();
2822
+ left_side.Unuse();
2823
+ dest->Split(cc);
2816
2824
  }
2825
+ }
2826
+ }
2827
+ }
2817
2828
 
2818
- // End of in-line compare, call out to the compare stub. Don't include
2819
- // number comparison in the stub if it was inlined.
2820
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
2821
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
2829
+
2830
+ void CodeGenerator::ConstantSmiComparison(Condition cc,
2831
+ bool strict,
2832
+ ControlDestination* dest,
2833
+ Result* left_side,
2834
+ Result* right_side,
2835
+ bool left_side_constant_smi,
2836
+ bool right_side_constant_smi,
2837
+ bool is_loop_condition) {
2838
+ if (left_side_constant_smi && right_side_constant_smi) {
2839
+ // Trivial case, comparing two constants.
2840
+ int left_value = Smi::cast(*left_side->handle())->value();
2841
+ int right_value = Smi::cast(*right_side->handle())->value();
2842
+ switch (cc) {
2843
+ case less:
2844
+ dest->Goto(left_value < right_value);
2845
+ break;
2846
+ case equal:
2847
+ dest->Goto(left_value == right_value);
2848
+ break;
2849
+ case greater_equal:
2850
+ dest->Goto(left_value >= right_value);
2851
+ break;
2852
+ default:
2853
+ UNREACHABLE();
2854
+ }
2855
+ } else {
2856
+ // Only one side is a constant Smi.
2857
+ // If left side is a constant Smi, reverse the operands.
2858
+ // Since one side is a constant Smi, conversion order does not matter.
2859
+ if (left_side_constant_smi) {
2860
+ Result* temp = left_side;
2861
+ left_side = right_side;
2862
+ right_side = temp;
2863
+ cc = ReverseCondition(cc);
2864
+ // This may re-introduce greater or less_equal as the value of cc.
2865
+ // CompareStub and the inline code both support all values of cc.
2866
+ }
2867
+ // Implement comparison against a constant Smi, inlining the case
2868
+ // where both sides are Smis.
2869
+ left_side->ToRegister();
2870
+ Register left_reg = left_side->reg();
2871
+ Handle<Object> right_val = right_side->handle();
2872
+
2873
+ if (left_side->is_smi()) {
2874
+ if (FLAG_debug_code) {
2875
+ __ AbortIfNotSmi(left_reg);
2876
+ }
2877
+ // Test smi equality and comparison by signed int comparison.
2878
+ if (IsUnsafeSmi(right_side->handle())) {
2879
+ right_side->ToRegister();
2880
+ __ cmp(left_reg, Operand(right_side->reg()));
2881
+ } else {
2882
+ __ cmp(Operand(left_reg), Immediate(right_side->handle()));
2883
+ }
2884
+ left_side->Unuse();
2885
+ right_side->Unuse();
2886
+ dest->Split(cc);
2887
+ } else {
2888
+ // Only the case where the left side could possibly be a non-smi is left.
2889
+ JumpTarget is_smi;
2822
2890
  if (cc == equal) {
2823
- __ test(answer.reg(), Operand(answer.reg()));
2891
+ // We can do the equality comparison before the smi check.
2892
+ __ cmp(Operand(left_reg), Immediate(right_side->handle()));
2893
+ dest->true_target()->Branch(equal);
2894
+ __ test(left_reg, Immediate(kSmiTagMask));
2895
+ dest->false_target()->Branch(zero);
2824
2896
  } else {
2825
- __ cmp(answer.reg(), 0);
2897
+ // Do the smi check, then the comparison.
2898
+ JumpTarget is_not_smi;
2899
+ __ test(left_reg, Immediate(kSmiTagMask));
2900
+ is_smi.Branch(zero, left_side, right_side);
2901
+ }
2902
+
2903
+ // Jump or fall through to here if we are comparing a non-smi to a
2904
+ // constant smi. If the non-smi is a heap number and this is not
2905
+ // a loop condition, inline the floating point code.
2906
+ if (!is_loop_condition && CpuFeatures::IsSupported(SSE2)) {
2907
+ // Right side is a constant smi and left side has been checked
2908
+ // not to be a smi.
2909
+ CpuFeatures::Scope use_sse2(SSE2);
2910
+ JumpTarget not_number;
2911
+ __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
2912
+ Immediate(Factory::heap_number_map()));
2913
+ not_number.Branch(not_equal, left_side);
2914
+ __ movdbl(xmm1,
2915
+ FieldOperand(left_reg, HeapNumber::kValueOffset));
2916
+ int value = Smi::cast(*right_val)->value();
2917
+ if (value == 0) {
2918
+ __ xorpd(xmm0, xmm0);
2919
+ } else {
2920
+ Result temp = allocator()->Allocate();
2921
+ __ mov(temp.reg(), Immediate(value));
2922
+ __ cvtsi2sd(xmm0, Operand(temp.reg()));
2923
+ temp.Unuse();
2924
+ }
2925
+ __ ucomisd(xmm1, xmm0);
2926
+ // Jump to builtin for NaN.
2927
+ not_number.Branch(parity_even, left_side);
2928
+ left_side->Unuse();
2929
+ dest->true_target()->Branch(DoubleCondition(cc));
2930
+ dest->false_target()->Jump();
2931
+ not_number.Bind(left_side);
2826
2932
  }
2827
- answer.Unuse();
2828
- dest->true_target()->Branch(cc);
2829
- dest->false_target()->Jump();
2830
2933
 
2831
- is_smi.Bind();
2832
- left_side = Result(left_reg);
2833
- right_side = Result(right_reg);
2834
- __ cmp(left_side.reg(), Operand(right_side.reg()));
2835
- right_side.Unuse();
2836
- left_side.Unuse();
2837
- dest->Split(cc);
2934
+ // Setup and call the compare stub.
2935
+ CompareStub stub(cc, strict, kCantBothBeNaN);
2936
+ Result result = frame_->CallStub(&stub, left_side, right_side);
2937
+ result.ToRegister();
2938
+ __ test(result.reg(), Operand(result.reg()));
2939
+ result.Unuse();
2940
+ if (cc == equal) {
2941
+ dest->Split(cc);
2942
+ } else {
2943
+ dest->true_target()->Branch(cc);
2944
+ dest->false_target()->Jump();
2945
+
2946
+ // It is important for performance for this case to be at the end.
2947
+ is_smi.Bind(left_side, right_side);
2948
+ if (IsUnsafeSmi(right_side->handle())) {
2949
+ right_side->ToRegister();
2950
+ __ cmp(left_reg, Operand(right_side->reg()));
2951
+ } else {
2952
+ __ cmp(Operand(left_reg), Immediate(right_side->handle()));
2953
+ }
2954
+ left_side->Unuse();
2955
+ right_side->Unuse();
2956
+ dest->Split(cc);
2957
+ }
2838
2958
  }
2839
2959
  }
2840
2960
  }
@@ -2899,19 +3019,19 @@ static void LoadComparisonOperand(MacroAssembler* masm_,
2899
3019
  // target passing the left and right result if the operand is not a number.
2900
3020
  static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
2901
3021
  Result* operand,
2902
- XMMRegister reg,
3022
+ XMMRegister xmm_reg,
2903
3023
  Result* left_side,
2904
3024
  Result* right_side,
2905
3025
  JumpTarget* not_numbers) {
2906
3026
  Label done;
2907
3027
  if (operand->type_info().IsDouble()) {
2908
3028
  // Operand is known to be a heap number, just load it.
2909
- __ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
3029
+ __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
2910
3030
  } else if (operand->type_info().IsSmi()) {
2911
3031
  // Operand is known to be a smi. Convert it to double and keep the original
2912
3032
  // smi.
2913
3033
  __ SmiUntag(operand->reg());
2914
- __ cvtsi2sd(reg, Operand(operand->reg()));
3034
+ __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
2915
3035
  __ SmiTag(operand->reg());
2916
3036
  } else {
2917
3037
  // Operand type not known, check for smi or heap number.
@@ -2923,13 +3043,13 @@ static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
2923
3043
  Immediate(Factory::heap_number_map()));
2924
3044
  not_numbers->Branch(not_equal, left_side, right_side, taken);
2925
3045
  }
2926
- __ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
3046
+ __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
2927
3047
  __ jmp(&done);
2928
3048
 
2929
3049
  __ bind(&smi);
2930
3050
  // Comvert smi to float and keep the original smi.
2931
3051
  __ SmiUntag(operand->reg());
2932
- __ cvtsi2sd(reg, Operand(operand->reg()));
3052
+ __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
2933
3053
  __ SmiTag(operand->reg());
2934
3054
  __ jmp(&done);
2935
3055
  }
@@ -2953,7 +3073,7 @@ void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
2953
3073
  &not_numbers);
2954
3074
  LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
2955
3075
  &not_numbers);
2956
- __ comisd(xmm0, xmm1);
3076
+ __ ucomisd(xmm0, xmm1);
2957
3077
  } else {
2958
3078
  Label check_right, compare;
2959
3079
 
@@ -2991,6 +3111,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
2991
3111
  int arg_count = args->length();
2992
3112
  for (int i = 0; i < arg_count; i++) {
2993
3113
  Load(args->at(i));
3114
+ frame_->SpillTop();
2994
3115
  }
2995
3116
 
2996
3117
  // Record the position for debugging purposes.
@@ -3035,9 +3156,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
3035
3156
  // Load the receiver and the existing arguments object onto the
3036
3157
  // expression stack. Avoid allocating the arguments object here.
3037
3158
  Load(receiver);
3038
- Result existing_args =
3039
- LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
3040
- frame()->Push(&existing_args);
3159
+ LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
3041
3160
 
3042
3161
  // Emit the source position information after having loaded the
3043
3162
  // receiver and the arguments.
@@ -3083,8 +3202,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
3083
3202
  // JS_FUNCTION_TYPE is the last instance type and it is right
3084
3203
  // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
3085
3204
  // bound.
3086
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
3087
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
3205
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
3206
+ STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
3088
3207
  __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
3089
3208
  __ j(below, &build_args);
3090
3209
 
@@ -3241,6 +3360,9 @@ void CodeGenerator::VisitAndSpill(Statement* statement) {
3241
3360
 
3242
3361
 
3243
3362
  void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
3363
+ #ifdef DEBUG
3364
+ int original_height = frame_->height();
3365
+ #endif
3244
3366
  ASSERT(in_spilled_code());
3245
3367
  set_in_spilled_code(false);
3246
3368
  VisitStatements(statements);
@@ -3248,14 +3370,20 @@ void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
3248
3370
  frame_->SpillAll();
3249
3371
  }
3250
3372
  set_in_spilled_code(true);
3373
+
3374
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
3251
3375
  }
3252
3376
 
3253
3377
 
3254
3378
  void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
3379
+ #ifdef DEBUG
3380
+ int original_height = frame_->height();
3381
+ #endif
3255
3382
  ASSERT(!in_spilled_code());
3256
3383
  for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
3257
3384
  Visit(statements->at(i));
3258
3385
  }
3386
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
3259
3387
  }
3260
3388
 
3261
3389
 
@@ -3518,8 +3646,10 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
3518
3646
  return_value->ToRegister(eax);
3519
3647
 
3520
3648
  // Add a label for checking the size of the code used for returning.
3649
+ #ifdef DEBUG
3521
3650
  Label check_exit_codesize;
3522
3651
  masm_->bind(&check_exit_codesize);
3652
+ #endif
3523
3653
 
3524
3654
  // Leave the frame and return popping the arguments and the
3525
3655
  // receiver.
@@ -3640,7 +3770,6 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
3640
3770
  }
3641
3771
  }
3642
3772
 
3643
-
3644
3773
  // The last instruction emitted was a jump, either to the default
3645
3774
  // clause or the break target, or else to a case body from the loop
3646
3775
  // that compiles the tests.
@@ -3728,8 +3857,8 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
3728
3857
  // Compile the test.
3729
3858
  switch (info) {
3730
3859
  case ALWAYS_TRUE:
3731
- // If control flow can fall off the end of the body, jump back to
3732
- // the top and bind the break target at the exit.
3860
+ // If control flow can fall off the end of the body, jump back
3861
+ // to the top and bind the break target at the exit.
3733
3862
  if (has_valid_frame()) {
3734
3863
  node->continue_target()->Jump();
3735
3864
  }
@@ -3765,6 +3894,8 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
3765
3894
  }
3766
3895
 
3767
3896
  DecrementLoopNesting();
3897
+ node->continue_target()->Unuse();
3898
+ node->break_target()->Unuse();
3768
3899
  }
3769
3900
 
3770
3901
 
@@ -3849,8 +3980,8 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
3849
3980
  break;
3850
3981
  case DONT_KNOW:
3851
3982
  if (test_at_bottom) {
3852
- // If we have chosen to recompile the test at the bottom, then
3853
- // it is the continue target.
3983
+ // If we have chosen to recompile the test at the bottom,
3984
+ // then it is the continue target.
3854
3985
  if (node->continue_target()->is_linked()) {
3855
3986
  node->continue_target()->Bind();
3856
3987
  }
@@ -3966,6 +4097,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
3966
4097
  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3967
4098
  loop.Bind();
3968
4099
  }
4100
+
3969
4101
  // Compile the test with the body as the true target and preferred
3970
4102
  // fall-through and with the break target as the false target.
3971
4103
  ControlDestination dest(&body, node->break_target(), true);
@@ -4075,8 +4207,8 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
4075
4207
  break;
4076
4208
  }
4077
4209
 
4078
- // The break target may be already bound (by the condition), or
4079
- // there may not be a valid frame. Bind it only if needed.
4210
+ // The break target may be already bound (by the condition), or there
4211
+ // may not be a valid frame. Bind it only if needed.
4080
4212
  if (node->break_target()->is_linked()) {
4081
4213
  node->break_target()->Bind();
4082
4214
  }
@@ -4121,9 +4253,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
4121
4253
  // eax: value to be iterated over
4122
4254
  __ test(eax, Immediate(kSmiTagMask));
4123
4255
  primitive.Branch(zero);
4124
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
4125
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
4126
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
4256
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
4127
4257
  jsobject.Branch(above_equal);
4128
4258
 
4129
4259
  primitive.Bind();
@@ -4210,7 +4340,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
4210
4340
  frame_->EmitPush(eax); // <- slot 3
4211
4341
  frame_->EmitPush(edx); // <- slot 2
4212
4342
  __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
4213
- __ SmiTag(eax);
4214
4343
  frame_->EmitPush(eax); // <- slot 1
4215
4344
  frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
4216
4345
  entry.Jump();
@@ -4222,7 +4351,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
4222
4351
 
4223
4352
  // Push the length of the array and the initial index onto the stack.
4224
4353
  __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
4225
- __ SmiTag(eax);
4226
4354
  frame_->EmitPush(eax); // <- slot 1
4227
4355
  frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
4228
4356
 
@@ -4239,8 +4367,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
4239
4367
 
4240
4368
  // Get the i'th entry of the array.
4241
4369
  __ mov(edx, frame_->ElementAt(2));
4242
- __ mov(ebx, Operand(edx, eax, times_2,
4243
- FixedArray::kHeaderSize - kHeapObjectTag));
4370
+ __ mov(ebx, FixedArrayElementOperand(edx, eax));
4244
4371
 
4245
4372
  // Get the expected map from the stack or a zero map in the
4246
4373
  // permanent slow case eax: current iteration count ebx: i'th entry
@@ -4400,7 +4527,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
4400
4527
  // The next handler address is on top of the frame. Unlink from
4401
4528
  // the handler list and drop the rest of this handler from the
4402
4529
  // frame.
4403
- ASSERT(StackHandlerConstants::kNextOffset == 0);
4530
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4404
4531
  frame_->EmitPop(Operand::StaticVariable(handler_address));
4405
4532
  frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4406
4533
  if (has_unlinks) {
@@ -4431,7 +4558,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
4431
4558
  __ mov(esp, Operand::StaticVariable(handler_address));
4432
4559
  frame_->Forget(frame_->height() - handler_height);
4433
4560
 
4434
- ASSERT(StackHandlerConstants::kNextOffset == 0);
4561
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4435
4562
  frame_->EmitPop(Operand::StaticVariable(handler_address));
4436
4563
  frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4437
4564
 
@@ -4517,7 +4644,7 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
4517
4644
  // chain and set the state on the frame to FALLING.
4518
4645
  if (has_valid_frame()) {
4519
4646
  // The next handler address is on top of the frame.
4520
- ASSERT(StackHandlerConstants::kNextOffset == 0);
4647
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4521
4648
  frame_->EmitPop(Operand::StaticVariable(handler_address));
4522
4649
  frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4523
4650
 
@@ -4556,7 +4683,7 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
4556
4683
  frame_->Forget(frame_->height() - handler_height);
4557
4684
 
4558
4685
  // Unlink this handler and drop it from the frame.
4559
- ASSERT(StackHandlerConstants::kNextOffset == 0);
4686
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4560
4687
  frame_->EmitPop(Operand::StaticVariable(handler_address));
4561
4688
  frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4562
4689
 
@@ -4729,49 +4856,21 @@ void CodeGenerator::VisitConditional(Conditional* node) {
4729
4856
  }
4730
4857
 
4731
4858
 
4732
- Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4733
- Result result;
4859
+ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4734
4860
  if (slot->type() == Slot::LOOKUP) {
4735
4861
  ASSERT(slot->var()->is_dynamic());
4736
4862
  JumpTarget slow;
4737
4863
  JumpTarget done;
4864
+ Result value;
4738
4865
 
4739
- // Generate fast-case code for variables that might be shadowed by
4740
- // eval-introduced variables. Eval is used a lot without
4741
- // introducing variables. In those cases, we do not want to
4742
- // perform a runtime call for all variables in the scope
4743
- // containing the eval.
4744
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
4745
- result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
4746
- // If there was no control flow to slow, we can exit early.
4747
- if (!slow.is_linked()) return result;
4748
- done.Jump(&result);
4749
-
4750
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
4751
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
4752
- // Only generate the fast case for locals that rewrite to slots.
4753
- // This rules out argument loads.
4754
- if (potential_slot != NULL) {
4755
- // Allocate a fresh register to use as a temp in
4756
- // ContextSlotOperandCheckExtensions and to hold the result
4757
- // value.
4758
- result = allocator()->Allocate();
4759
- ASSERT(result.is_valid());
4760
- __ mov(result.reg(),
4761
- ContextSlotOperandCheckExtensions(potential_slot,
4762
- result,
4763
- &slow));
4764
- if (potential_slot->var()->mode() == Variable::CONST) {
4765
- __ cmp(result.reg(), Factory::the_hole_value());
4766
- done.Branch(not_equal, &result);
4767
- __ mov(result.reg(), Factory::undefined_value());
4768
- }
4769
- // There is always control flow to slow from
4770
- // ContextSlotOperandCheckExtensions so we have to jump around
4771
- // it.
4772
- done.Jump(&result);
4773
- }
4774
- }
4866
+ // Generate fast case for loading from slots that correspond to
4867
+ // local/global variables or arguments unless they are shadowed by
4868
+ // eval-introduced bindings.
4869
+ EmitDynamicLoadFromSlotFastCase(slot,
4870
+ typeof_state,
4871
+ &value,
4872
+ &slow,
4873
+ &done);
4775
4874
 
4776
4875
  slow.Bind();
4777
4876
  // A runtime call is inevitable. We eagerly sync frame elements
@@ -4781,14 +4880,14 @@ Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4781
4880
  frame()->EmitPush(esi);
4782
4881
  frame()->EmitPush(Immediate(slot->var()->name()));
4783
4882
  if (typeof_state == INSIDE_TYPEOF) {
4784
- result =
4883
+ value =
4785
4884
  frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
4786
4885
  } else {
4787
- result = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
4886
+ value = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
4788
4887
  }
4789
4888
 
4790
- done.Bind(&result);
4791
- return result;
4889
+ done.Bind(&value);
4890
+ frame_->Push(&value);
4792
4891
 
4793
4892
  } else if (slot->var()->mode() == Variable::CONST) {
4794
4893
  // Const slots may contain 'the hole' value (the constant hasn't been
@@ -4805,15 +4904,13 @@ Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4805
4904
  __ j(not_equal, &exit);
4806
4905
  __ mov(ecx, Factory::undefined_value());
4807
4906
  __ bind(&exit);
4808
- return Result(ecx);
4907
+ frame()->EmitPush(ecx);
4809
4908
 
4810
4909
  } else if (slot->type() == Slot::PARAMETER) {
4811
4910
  frame()->PushParameterAt(slot->index());
4812
- return frame()->Pop();
4813
4911
 
4814
4912
  } else if (slot->type() == Slot::LOCAL) {
4815
4913
  frame()->PushLocalAt(slot->index());
4816
- return frame()->Pop();
4817
4914
 
4818
4915
  } else {
4819
4916
  // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
@@ -4822,46 +4919,48 @@ Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4822
4919
  // The use of SlotOperand below is safe for an unspilled frame
4823
4920
  // because it will always be a context slot.
4824
4921
  ASSERT(slot->type() == Slot::CONTEXT);
4825
- result = allocator()->Allocate();
4826
- ASSERT(result.is_valid());
4827
- __ mov(result.reg(), SlotOperand(slot, result.reg()));
4828
- return result;
4922
+ Result temp = allocator()->Allocate();
4923
+ ASSERT(temp.is_valid());
4924
+ __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
4925
+ frame()->Push(&temp);
4829
4926
  }
4830
4927
  }
4831
4928
 
4832
4929
 
4833
- Result CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
4930
+ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
4834
4931
  TypeofState state) {
4835
- Result result = LoadFromSlot(slot, state);
4932
+ LoadFromSlot(slot, state);
4836
4933
 
4837
4934
  // Bail out quickly if we're not using lazy arguments allocation.
4838
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return result;
4935
+ if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
4839
4936
 
4840
4937
  // ... or if the slot isn't a non-parameter arguments slot.
4841
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return result;
4938
+ if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
4842
4939
 
4843
4940
  // If the loaded value is a constant, we know if the arguments
4844
4941
  // object has been lazily loaded yet.
4942
+ Result result = frame()->Pop();
4845
4943
  if (result.is_constant()) {
4846
4944
  if (result.handle()->IsTheHole()) {
4847
- result.Unuse();
4848
- return StoreArgumentsObject(false);
4849
- } else {
4850
- return result;
4945
+ result = StoreArgumentsObject(false);
4851
4946
  }
4947
+ frame()->Push(&result);
4948
+ return;
4852
4949
  }
4853
-
4950
+ ASSERT(result.is_register());
4854
4951
  // The loaded value is in a register. If it is the sentinel that
4855
4952
  // indicates that we haven't loaded the arguments object yet, we
4856
4953
  // need to do it now.
4857
4954
  JumpTarget exit;
4858
4955
  __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
4859
- exit.Branch(not_equal, &result);
4956
+ frame()->Push(&result);
4957
+ exit.Branch(not_equal);
4860
4958
 
4861
- result.Unuse();
4862
4959
  result = StoreArgumentsObject(false);
4863
- exit.Bind(&result);
4864
- return result;
4960
+ frame()->SetElementAt(0, &result);
4961
+ result.Unuse();
4962
+ exit.Bind();
4963
+ return;
4865
4964
  }
4866
4965
 
4867
4966
 
@@ -4940,6 +5039,68 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
4940
5039
  }
4941
5040
 
4942
5041
 
5042
+ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
5043
+ TypeofState typeof_state,
5044
+ Result* result,
5045
+ JumpTarget* slow,
5046
+ JumpTarget* done) {
5047
+ // Generate fast-case code for variables that might be shadowed by
5048
+ // eval-introduced variables. Eval is used a lot without
5049
+ // introducing variables. In those cases, we do not want to
5050
+ // perform a runtime call for all variables in the scope
5051
+ // containing the eval.
5052
+ if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
5053
+ *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
5054
+ done->Jump(result);
5055
+
5056
+ } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
5057
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
5058
+ Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
5059
+ if (potential_slot != NULL) {
5060
+ // Generate fast case for locals that rewrite to slots.
5061
+ // Allocate a fresh register to use as a temp in
5062
+ // ContextSlotOperandCheckExtensions and to hold the result
5063
+ // value.
5064
+ *result = allocator()->Allocate();
5065
+ ASSERT(result->is_valid());
5066
+ __ mov(result->reg(),
5067
+ ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
5068
+ if (potential_slot->var()->mode() == Variable::CONST) {
5069
+ __ cmp(result->reg(), Factory::the_hole_value());
5070
+ done->Branch(not_equal, result);
5071
+ __ mov(result->reg(), Factory::undefined_value());
5072
+ }
5073
+ done->Jump(result);
5074
+ } else if (rewrite != NULL) {
5075
+ // Generate fast case for calls of an argument function.
5076
+ Property* property = rewrite->AsProperty();
5077
+ if (property != NULL) {
5078
+ VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
5079
+ Literal* key_literal = property->key()->AsLiteral();
5080
+ if (obj_proxy != NULL &&
5081
+ key_literal != NULL &&
5082
+ obj_proxy->IsArguments() &&
5083
+ key_literal->handle()->IsSmi()) {
5084
+ // Load arguments object if there are no eval-introduced
5085
+ // variables. Then load the argument from the arguments
5086
+ // object using keyed load.
5087
+ Result arguments = allocator()->Allocate();
5088
+ ASSERT(arguments.is_valid());
5089
+ __ mov(arguments.reg(),
5090
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
5091
+ arguments,
5092
+ slow));
5093
+ frame_->Push(&arguments);
5094
+ frame_->Push(key_literal->handle());
5095
+ *result = EmitKeyedLoad();
5096
+ done->Jump(result);
5097
+ }
5098
+ }
5099
+ }
5100
+ }
5101
+ }
5102
+
5103
+
4943
5104
  void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
4944
5105
  if (slot->type() == Slot::LOOKUP) {
4945
5106
  ASSERT(slot->var()->is_dynamic());
@@ -5049,8 +5210,7 @@ void CodeGenerator::VisitSlot(Slot* slot) {
5049
5210
  UNREACHABLE();
5050
5211
  }
5051
5212
  } else {
5052
- Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
5053
- frame()->Push(&result);
5213
+ LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
5054
5214
  }
5055
5215
  }
5056
5216
 
@@ -5230,8 +5390,13 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
5230
5390
  // Duplicate the object as the IC receiver.
5231
5391
  frame_->Dup();
5232
5392
  Load(property->value());
5233
- Result dummy = frame_->CallStoreIC(Handle<String>::cast(key), false);
5234
- dummy.Unuse();
5393
+ Result ignored =
5394
+ frame_->CallStoreIC(Handle<String>::cast(key), false);
5395
+ // A test eax instruction following the store IC call would
5396
+ // indicate the presence of an inlined version of the
5397
+ // store. Add a nop to indicate that there is no such
5398
+ // inlined version.
5399
+ __ nop();
5235
5400
  break;
5236
5401
  }
5237
5402
  // Fall through
@@ -5371,8 +5536,7 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
5371
5536
  if (node->is_compound()) {
5372
5537
  // For a compound assignment the right-hand side is a binary operation
5373
5538
  // between the current property value and the actual right-hand side.
5374
- Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
5375
- frame()->Push(&result);
5539
+ LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
5376
5540
  Load(node->value());
5377
5541
 
5378
5542
  // Perform the binary operation.
@@ -5706,25 +5870,66 @@ void CodeGenerator::VisitCall(Call* node) {
5706
5870
 
5707
5871
  // Allocate a frame slot for the receiver.
5708
5872
  frame_->Push(Factory::undefined_value());
5873
+
5874
+ // Load the arguments.
5709
5875
  int arg_count = args->length();
5710
5876
  for (int i = 0; i < arg_count; i++) {
5711
5877
  Load(args->at(i));
5878
+ frame_->SpillTop();
5712
5879
  }
5713
5880
 
5714
- // Prepare the stack for the call to ResolvePossiblyDirectEval.
5881
+ // Result to hold the result of the function resolution and the
5882
+ // final result of the eval call.
5883
+ Result result;
5884
+
5885
+ // If we know that eval can only be shadowed by eval-introduced
5886
+ // variables we attempt to load the global eval function directly
5887
+ // in generated code. If we succeed, there is no need to perform a
5888
+ // context lookup in the runtime system.
5889
+ JumpTarget done;
5890
+ if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
5891
+ ASSERT(var->slot()->type() == Slot::LOOKUP);
5892
+ JumpTarget slow;
5893
+ // Prepare the stack for the call to
5894
+ // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
5895
+ // function, the first argument to the eval call and the
5896
+ // receiver.
5897
+ Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(),
5898
+ NOT_INSIDE_TYPEOF,
5899
+ &slow);
5900
+ frame_->Push(&fun);
5901
+ if (arg_count > 0) {
5902
+ frame_->PushElementAt(arg_count);
5903
+ } else {
5904
+ frame_->Push(Factory::undefined_value());
5905
+ }
5906
+ frame_->PushParameterAt(-1);
5907
+
5908
+ // Resolve the call.
5909
+ result =
5910
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
5911
+
5912
+ done.Jump(&result);
5913
+ slow.Bind();
5914
+ }
5915
+
5916
+ // Prepare the stack for the call to ResolvePossiblyDirectEval by
5917
+ // pushing the loaded function, the first argument to the eval
5918
+ // call and the receiver.
5715
5919
  frame_->PushElementAt(arg_count + 1);
5716
5920
  if (arg_count > 0) {
5717
5921
  frame_->PushElementAt(arg_count);
5718
5922
  } else {
5719
5923
  frame_->Push(Factory::undefined_value());
5720
5924
  }
5721
-
5722
- // Push the receiver.
5723
5925
  frame_->PushParameterAt(-1);
5724
5926
 
5725
5927
  // Resolve the call.
5726
- Result result =
5727
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
5928
+ result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
5929
+
5930
+ // If we generated fast-case code bind the jump-target where fast
5931
+ // and slow case merge.
5932
+ if (done.is_linked()) done.Bind(&result);
5728
5933
 
5729
5934
  // The runtime call returns a pair of values in eax (function) and
5730
5935
  // edx (receiver). Touch up the stack with the right values.
@@ -5758,6 +5963,7 @@ void CodeGenerator::VisitCall(Call* node) {
5758
5963
  int arg_count = args->length();
5759
5964
  for (int i = 0; i < arg_count; i++) {
5760
5965
  Load(args->at(i));
5966
+ frame_->SpillTop();
5761
5967
  }
5762
5968
 
5763
5969
  // Push the name of the function onto the frame.
@@ -5774,11 +5980,33 @@ void CodeGenerator::VisitCall(Call* node) {
5774
5980
  } else if (var != NULL && var->slot() != NULL &&
5775
5981
  var->slot()->type() == Slot::LOOKUP) {
5776
5982
  // ----------------------------------
5777
- // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
5983
+ // JavaScript examples:
5984
+ //
5985
+ // with (obj) foo(1, 2, 3) // foo may be in obj.
5986
+ //
5987
+ // function f() {};
5988
+ // function g() {
5989
+ // eval(...);
5990
+ // f(); // f could be in extension object.
5991
+ // }
5778
5992
  // ----------------------------------
5779
5993
 
5780
- // Load the function from the context. Sync the frame so we can
5781
- // push the arguments directly into place.
5994
+ JumpTarget slow, done;
5995
+ Result function;
5996
+
5997
+ // Generate fast case for loading functions from slots that
5998
+ // correspond to local/global variables or arguments unless they
5999
+ // are shadowed by eval-introduced bindings.
6000
+ EmitDynamicLoadFromSlotFastCase(var->slot(),
6001
+ NOT_INSIDE_TYPEOF,
6002
+ &function,
6003
+ &slow,
6004
+ &done);
6005
+
6006
+ slow.Bind();
6007
+ // Enter the runtime system to load the function from the context.
6008
+ // Sync the frame so we can push the arguments directly into
6009
+ // place.
5782
6010
  frame_->SyncRange(0, frame_->element_count() - 1);
5783
6011
  frame_->EmitPush(esi);
5784
6012
  frame_->EmitPush(Immediate(var->name()));
@@ -5795,6 +6023,18 @@ void CodeGenerator::VisitCall(Call* node) {
5795
6023
  ASSERT(!allocator()->is_used(edx));
5796
6024
  frame_->EmitPush(edx);
5797
6025
 
6026
+ // If fast case code has been generated, emit code to push the
6027
+ // function and receiver and have the slow path jump around this
6028
+ // code.
6029
+ if (done.is_linked()) {
6030
+ JumpTarget call;
6031
+ call.Jump();
6032
+ done.Bind(&function);
6033
+ frame_->Push(&function);
6034
+ LoadGlobalReceiver();
6035
+ call.Bind();
6036
+ }
6037
+
5798
6038
  // Call the function.
5799
6039
  CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
5800
6040
 
@@ -5829,6 +6069,7 @@ void CodeGenerator::VisitCall(Call* node) {
5829
6069
  int arg_count = args->length();
5830
6070
  for (int i = 0; i < arg_count; i++) {
5831
6071
  Load(args->at(i));
6072
+ frame_->SpillTop();
5832
6073
  }
5833
6074
 
5834
6075
  // Push the name of the function onto the frame.
@@ -5856,18 +6097,31 @@ void CodeGenerator::VisitCall(Call* node) {
5856
6097
  ref.GetValue();
5857
6098
  // Use global object as receiver.
5858
6099
  LoadGlobalReceiver();
6100
+ // Call the function.
6101
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
5859
6102
  } else {
6103
+ // Push the receiver onto the frame.
5860
6104
  Load(property->obj());
5861
- frame()->Dup();
6105
+
6106
+ // Load the arguments.
6107
+ int arg_count = args->length();
6108
+ for (int i = 0; i < arg_count; i++) {
6109
+ Load(args->at(i));
6110
+ frame_->SpillTop();
6111
+ }
6112
+
6113
+ // Load the name of the function.
5862
6114
  Load(property->key());
5863
- Result function = EmitKeyedLoad();
5864
- Result receiver = frame_->Pop();
5865
- frame_->Push(&function);
5866
- frame_->Push(&receiver);
5867
- }
5868
6115
 
5869
- // Call the function.
5870
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
6116
+ // Call the IC initialization code.
6117
+ CodeForSourcePosition(node->position());
6118
+ Result result =
6119
+ frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
6120
+ arg_count,
6121
+ loop_nesting());
6122
+ frame_->RestoreContextRegister();
6123
+ frame_->Push(&result);
6124
+ }
5871
6125
  }
5872
6126
 
5873
6127
  } else {
@@ -5964,29 +6218,67 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
5964
6218
  }
5965
6219
 
5966
6220
 
5967
- // This generates code that performs a charCodeAt() call or returns
5968
- // undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
5969
- // It can handle flat, 8 and 16 bit characters and cons strings where the
5970
- // answer is found in the left hand branch of the cons. The slow case will
5971
- // flatten the string, which will ensure that the answer is in the left hand
5972
- // side the next time around.
5973
- void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
5974
- Comment(masm_, "[ GenerateFastCharCodeAt");
6221
+ class DeferredStringCharCodeAt : public DeferredCode {
6222
+ public:
6223
+ DeferredStringCharCodeAt(Register object,
6224
+ Register index,
6225
+ Register scratch,
6226
+ Register result)
6227
+ : result_(result),
6228
+ char_code_at_generator_(object,
6229
+ index,
6230
+ scratch,
6231
+ result,
6232
+ &need_conversion_,
6233
+ &need_conversion_,
6234
+ &index_out_of_range_,
6235
+ STRING_INDEX_IS_NUMBER) {}
6236
+
6237
+ StringCharCodeAtGenerator* fast_case_generator() {
6238
+ return &char_code_at_generator_;
6239
+ }
6240
+
6241
+ virtual void Generate() {
6242
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
6243
+ char_code_at_generator_.GenerateSlow(masm(), call_helper);
6244
+
6245
+ __ bind(&need_conversion_);
6246
+ // Move the undefined value into the result register, which will
6247
+ // trigger conversion.
6248
+ __ Set(result_, Immediate(Factory::undefined_value()));
6249
+ __ jmp(exit_label());
6250
+
6251
+ __ bind(&index_out_of_range_);
6252
+ // When the index is out of range, the spec requires us to return
6253
+ // NaN.
6254
+ __ Set(result_, Immediate(Factory::nan_value()));
6255
+ __ jmp(exit_label());
6256
+ }
6257
+
6258
+ private:
6259
+ Register result_;
6260
+
6261
+ Label need_conversion_;
6262
+ Label index_out_of_range_;
6263
+
6264
+ StringCharCodeAtGenerator char_code_at_generator_;
6265
+ };
6266
+
6267
+
6268
+ // This generates code that performs a String.prototype.charCodeAt() call
6269
+ // or returns a smi in order to trigger conversion.
6270
+ void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
6271
+ Comment(masm_, "[ GenerateStringCharCodeAt");
5975
6272
  ASSERT(args->length() == 2);
5976
6273
 
5977
6274
  Load(args->at(0));
5978
6275
  Load(args->at(1));
5979
6276
  Result index = frame_->Pop();
5980
6277
  Result object = frame_->Pop();
5981
-
5982
- // We will mutate the index register and possibly the object register.
5983
- // The case where they are somehow the same register is handled
5984
- // because we only mutate them in the case where the receiver is a
5985
- // heap object and the index is not.
5986
6278
  object.ToRegister();
5987
6279
  index.ToRegister();
6280
+ // We might mutate the object register.
5988
6281
  frame_->Spill(object.reg());
5989
- frame_->Spill(index.reg());
5990
6282
 
5991
6283
  // We need two extra registers.
5992
6284
  Result result = allocator()->Allocate();
@@ -5994,33 +6286,40 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
5994
6286
  Result scratch = allocator()->Allocate();
5995
6287
  ASSERT(scratch.is_valid());
5996
6288
 
5997
- // There is no virtual frame effect from here up to the final result
5998
- // push.
5999
- Label slow_case;
6000
- Label exit;
6001
- StringHelper::GenerateFastCharCodeAt(masm_,
6002
- object.reg(),
6003
- index.reg(),
6004
- scratch.reg(),
6005
- result.reg(),
6006
- &slow_case,
6007
- &slow_case,
6008
- &slow_case,
6009
- &slow_case);
6010
- __ jmp(&exit);
6011
-
6012
- __ bind(&slow_case);
6013
- // Move the undefined value into the result register, which will
6014
- // trigger the slow case.
6015
- __ Set(result.reg(), Immediate(Factory::undefined_value()));
6016
-
6017
- __ bind(&exit);
6289
+ DeferredStringCharCodeAt* deferred =
6290
+ new DeferredStringCharCodeAt(object.reg(),
6291
+ index.reg(),
6292
+ scratch.reg(),
6293
+ result.reg());
6294
+ deferred->fast_case_generator()->GenerateFast(masm_);
6295
+ deferred->BindExit();
6018
6296
  frame_->Push(&result);
6019
6297
  }
6020
6298
 
6021
6299
 
6022
- void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
6023
- Comment(masm_, "[ GenerateCharFromCode");
6300
+ class DeferredStringCharFromCode : public DeferredCode {
6301
+ public:
6302
+ DeferredStringCharFromCode(Register code,
6303
+ Register result)
6304
+ : char_from_code_generator_(code, result) {}
6305
+
6306
+ StringCharFromCodeGenerator* fast_case_generator() {
6307
+ return &char_from_code_generator_;
6308
+ }
6309
+
6310
+ virtual void Generate() {
6311
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
6312
+ char_from_code_generator_.GenerateSlow(masm(), call_helper);
6313
+ }
6314
+
6315
+ private:
6316
+ StringCharFromCodeGenerator char_from_code_generator_;
6317
+ };
6318
+
6319
+
6320
+ // Generates code for creating a one-char string from a char code.
6321
+ void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
6322
+ Comment(masm_, "[ GenerateStringCharFromCode");
6024
6323
  ASSERT(args->length() == 1);
6025
6324
 
6026
6325
  Load(args->at(0));
@@ -6029,16 +6328,97 @@ void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
6029
6328
  code.ToRegister();
6030
6329
  ASSERT(code.is_valid());
6031
6330
 
6032
- // StringHelper::GenerateCharFromCode may do a runtime call.
6033
- frame_->SpillAll();
6034
-
6035
6331
  Result result = allocator()->Allocate();
6036
6332
  ASSERT(result.is_valid());
6037
6333
 
6038
- StringHelper::GenerateCharFromCode(masm_,
6039
- code.reg(),
6040
- result.reg(),
6041
- CALL_FUNCTION);
6334
+ DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
6335
+ code.reg(), result.reg());
6336
+ deferred->fast_case_generator()->GenerateFast(masm_);
6337
+ deferred->BindExit();
6338
+ frame_->Push(&result);
6339
+ }
6340
+
6341
+
6342
+ class DeferredStringCharAt : public DeferredCode {
6343
+ public:
6344
+ DeferredStringCharAt(Register object,
6345
+ Register index,
6346
+ Register scratch1,
6347
+ Register scratch2,
6348
+ Register result)
6349
+ : result_(result),
6350
+ char_at_generator_(object,
6351
+ index,
6352
+ scratch1,
6353
+ scratch2,
6354
+ result,
6355
+ &need_conversion_,
6356
+ &need_conversion_,
6357
+ &index_out_of_range_,
6358
+ STRING_INDEX_IS_NUMBER) {}
6359
+
6360
+ StringCharAtGenerator* fast_case_generator() {
6361
+ return &char_at_generator_;
6362
+ }
6363
+
6364
+ virtual void Generate() {
6365
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
6366
+ char_at_generator_.GenerateSlow(masm(), call_helper);
6367
+
6368
+ __ bind(&need_conversion_);
6369
+ // Move smi zero into the result register, which will trigger
6370
+ // conversion.
6371
+ __ Set(result_, Immediate(Smi::FromInt(0)));
6372
+ __ jmp(exit_label());
6373
+
6374
+ __ bind(&index_out_of_range_);
6375
+ // When the index is out of range, the spec requires us to return
6376
+ // the empty string.
6377
+ __ Set(result_, Immediate(Factory::empty_string()));
6378
+ __ jmp(exit_label());
6379
+ }
6380
+
6381
+ private:
6382
+ Register result_;
6383
+
6384
+ Label need_conversion_;
6385
+ Label index_out_of_range_;
6386
+
6387
+ StringCharAtGenerator char_at_generator_;
6388
+ };
6389
+
6390
+
6391
+ // This generates code that performs a String.prototype.charAt() call
6392
+ // or returns a smi in order to trigger conversion.
6393
+ void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
6394
+ Comment(masm_, "[ GenerateStringCharAt");
6395
+ ASSERT(args->length() == 2);
6396
+
6397
+ Load(args->at(0));
6398
+ Load(args->at(1));
6399
+ Result index = frame_->Pop();
6400
+ Result object = frame_->Pop();
6401
+ object.ToRegister();
6402
+ index.ToRegister();
6403
+ // We might mutate the object register.
6404
+ frame_->Spill(object.reg());
6405
+
6406
+ // We need three extra registers.
6407
+ Result result = allocator()->Allocate();
6408
+ ASSERT(result.is_valid());
6409
+ Result scratch1 = allocator()->Allocate();
6410
+ ASSERT(scratch1.is_valid());
6411
+ Result scratch2 = allocator()->Allocate();
6412
+ ASSERT(scratch2.is_valid());
6413
+
6414
+ DeferredStringCharAt* deferred =
6415
+ new DeferredStringCharAt(object.reg(),
6416
+ index.reg(),
6417
+ scratch1.reg(),
6418
+ scratch2.reg(),
6419
+ result.reg());
6420
+ deferred->fast_case_generator()->GenerateFast(masm_);
6421
+ deferred->BindExit();
6042
6422
  frame_->Push(&result);
6043
6423
  }
6044
6424
 
@@ -6098,17 +6478,39 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
6098
6478
  ASSERT(map.is_valid());
6099
6479
  __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
6100
6480
  // Undetectable objects behave like undefined when tested with typeof.
6101
- __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
6102
- __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
6481
+ __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
6482
+ 1 << Map::kIsUndetectable);
6103
6483
  destination()->false_target()->Branch(not_zero);
6104
- __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
6484
+ // Do a range test for JSObject type. We can't use
6485
+ // MacroAssembler::IsInstanceJSObjectType, because we are using a
6486
+ // ControlDestination, so we copy its implementation here.
6105
6487
  __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
6106
- __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
6107
- destination()->false_target()->Branch(less);
6108
- __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
6488
+ __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
6489
+ __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
6109
6490
  obj.Unuse();
6110
6491
  map.Unuse();
6111
- destination()->Split(less_equal);
6492
+ destination()->Split(below_equal);
6493
+ }
6494
+
6495
+
6496
+ void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
6497
+ // This generates a fast version of:
6498
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
6499
+ // typeof(arg) == function).
6500
+ // It includes undetectable objects (as opposed to IsObject).
6501
+ ASSERT(args->length() == 1);
6502
+ Load(args->at(0));
6503
+ Result value = frame_->Pop();
6504
+ value.ToRegister();
6505
+ ASSERT(value.is_valid());
6506
+ __ test(value.reg(), Immediate(kSmiTagMask));
6507
+ destination()->false_target()->Branch(equal);
6508
+
6509
+ // Check that this is an object.
6510
+ frame_->Spill(value.reg());
6511
+ __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, value.reg());
6512
+ value.Unuse();
6513
+ destination()->Split(above_equal);
6112
6514
  }
6113
6515
 
6114
6516
 
@@ -6141,9 +6543,8 @@ void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
6141
6543
  ASSERT(temp.is_valid());
6142
6544
  __ mov(temp.reg(),
6143
6545
  FieldOperand(obj.reg(), HeapObject::kMapOffset));
6144
- __ movzx_b(temp.reg(),
6145
- FieldOperand(temp.reg(), Map::kBitFieldOffset));
6146
- __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
6546
+ __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
6547
+ 1 << Map::kIsUndetectable);
6147
6548
  obj.Unuse();
6148
6549
  temp.Unuse();
6149
6550
  destination()->Split(not_zero);
@@ -6217,20 +6618,16 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
6217
6618
 
6218
6619
  // Check that the object is a JS object but take special care of JS
6219
6620
  // functions to make sure they have 'Function' as their class.
6220
- { Result tmp = allocator()->Allocate();
6221
- __ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
6222
- __ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
6223
- __ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
6224
- null.Branch(less);
6621
+ __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
6622
+ null.Branch(below);
6225
6623
 
6226
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
6227
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
6228
- // LAST_JS_OBJECT_TYPE.
6229
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
6230
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
6231
- __ cmp(tmp.reg(), JS_FUNCTION_TYPE);
6232
- function.Branch(equal);
6233
- }
6624
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
6625
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
6626
+ // LAST_JS_OBJECT_TYPE.
6627
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
6628
+ STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
6629
+ __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
6630
+ function.Branch(equal);
6234
6631
 
6235
6632
  // Check if the constructor in the map is a function.
6236
6633
  { Result tmp = allocator()->Allocate();
@@ -6369,7 +6766,7 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
6369
6766
 
6370
6767
  void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
6371
6768
  ASSERT(args->length() == 0);
6372
- ASSERT(kSmiTag == 0); // EBP value is aligned, so it should look like Smi.
6769
+ STATIC_ASSERT(kSmiTag == 0); // EBP value is aligned, so it looks like a Smi.
6373
6770
  Result ebp_as_smi = allocator_->Allocate();
6374
6771
  ASSERT(ebp_as_smi.is_valid());
6375
6772
  __ mov(ebp_as_smi.reg(), Operand(ebp));
@@ -6389,11 +6786,8 @@ void CodeGenerator::GenerateRandomHeapNumber(
6389
6786
  __ jmp(&heapnumber_allocated);
6390
6787
 
6391
6788
  __ bind(&slow_allocate_heapnumber);
6392
- // To allocate a heap number, and ensure that it is not a smi, we
6393
- // call the runtime function FUnaryMinus on 0, returning the double
6394
- // -0.0. A new, distinct heap number is returned each time.
6395
- __ push(Immediate(Smi::FromInt(0)));
6396
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
6789
+ // Allocate a heap number.
6790
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
6397
6791
  __ mov(edi, eax);
6398
6792
 
6399
6793
  __ bind(&heapnumber_allocated);
@@ -6544,9 +6938,9 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
6544
6938
  __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
6545
6939
  Immediate(Factory::fixed_array_map()));
6546
6940
  // Set length.
6547
- __ SmiUntag(ecx);
6548
6941
  __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
6549
6942
  // Fill contents of fixed-array with the-hole.
6943
+ __ SmiUntag(ecx);
6550
6944
  __ mov(edx, Immediate(Factory::the_hole_value()));
6551
6945
  __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
6552
6946
  // Fill fixed array elements with hole.
@@ -6582,14 +6976,108 @@ class DeferredSearchCache: public DeferredCode {
6582
6976
  virtual void Generate();
6583
6977
 
6584
6978
  private:
6585
- Register dst_, cache_, key_;
6979
+ Register dst_; // on invocation Smi index of finger, on exit
6980
+ // holds value being looked up.
6981
+ Register cache_; // instance of JSFunctionResultCache.
6982
+ Register key_; // key being looked up.
6586
6983
  };
6587
6984
 
6588
6985
 
6589
6986
  void DeferredSearchCache::Generate() {
6590
- __ push(cache_);
6987
+ Label first_loop, search_further, second_loop, cache_miss;
6988
+
6989
+ // Smi-tagging is equivalent to multiplying by 2.
6990
+ STATIC_ASSERT(kSmiTag == 0);
6991
+ STATIC_ASSERT(kSmiTagSize == 1);
6992
+
6993
+ Smi* kEntrySizeSmi = Smi::FromInt(JSFunctionResultCache::kEntrySize);
6994
+ Smi* kEntriesIndexSmi = Smi::FromInt(JSFunctionResultCache::kEntriesIndex);
6995
+
6996
+ // Check the cache from finger to start of the cache.
6997
+ __ bind(&first_loop);
6998
+ __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
6999
+ __ cmp(Operand(dst_), Immediate(kEntriesIndexSmi));
7000
+ __ j(less, &search_further);
7001
+
7002
+ __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
7003
+ __ j(not_equal, &first_loop);
7004
+
7005
+ __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
7006
+ __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
7007
+ __ jmp(exit_label());
7008
+
7009
+ __ bind(&search_further);
7010
+
7011
+ // Check the cache from end of cache up to finger.
7012
+ __ mov(dst_, FieldOperand(cache_, JSFunctionResultCache::kCacheSizeOffset));
7013
+
7014
+ __ bind(&second_loop);
7015
+ __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
7016
+ // Consider prefetching into some reg.
7017
+ __ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
7018
+ __ j(less_equal, &cache_miss);
7019
+
7020
+ __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
7021
+ __ j(not_equal, &second_loop);
7022
+
7023
+ __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
7024
+ __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
7025
+ __ jmp(exit_label());
7026
+
7027
+ __ bind(&cache_miss);
7028
+ __ push(cache_); // store a reference to cache
7029
+ __ push(key_); // store a key
7030
+ __ push(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
6591
7031
  __ push(key_);
6592
- __ CallRuntime(Runtime::kGetFromCache, 2);
7032
+ // On ia32 function must be in edi.
7033
+ __ mov(edi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
7034
+ ParameterCount expected(1);
7035
+ __ InvokeFunction(edi, expected, CALL_FUNCTION);
7036
+
7037
+ // Find a place to put new cached value into.
7038
+ Label add_new_entry, update_cache;
7039
+ __ mov(ecx, Operand(esp, kPointerSize)); // restore the cache
7040
+ // Possible optimization: cache size is constant for the given cache
7041
+ // so technically we could use a constant here. However, if we have
7042
+ // cache miss this optimization would hardly matter much.
7043
+
7044
+ // Check if we could add new entry to cache.
7045
+ __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
7046
+ __ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
7047
+ __ j(greater, &add_new_entry);
7048
+
7049
+ // Check if we could evict entry after finger.
7050
+ __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
7051
+ __ add(Operand(edx), Immediate(kEntrySizeSmi));
7052
+ __ cmp(ebx, Operand(edx));
7053
+ __ j(greater, &update_cache);
7054
+
7055
+ // Need to wrap over the cache.
7056
+ __ mov(edx, Immediate(kEntriesIndexSmi));
7057
+ __ jmp(&update_cache);
7058
+
7059
+ __ bind(&add_new_entry);
7060
+ __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
7061
+ __ lea(ebx, Operand(edx, JSFunctionResultCache::kEntrySize << 1));
7062
+ __ mov(FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset), ebx);
7063
+
7064
+ // Update the cache itself.
7065
+ // edx holds the index.
7066
+ __ bind(&update_cache);
7067
+ __ pop(ebx); // restore the key
7068
+ __ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx);
7069
+ // Store key.
7070
+ __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
7071
+ __ RecordWrite(ecx, 0, ebx, edx);
7072
+
7073
+ // Store value.
7074
+ __ pop(ecx); // restore the cache.
7075
+ __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
7076
+ __ add(Operand(edx), Immediate(Smi::FromInt(1)));
7077
+ __ mov(ebx, eax);
7078
+ __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
7079
+ __ RecordWrite(ecx, 0, ebx, edx);
7080
+
6593
7081
  if (!dst_.is(eax)) {
6594
7082
  __ mov(dst_, eax);
6595
7083
  }
@@ -6631,21 +7119,14 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
6631
7119
  cache.reg(),
6632
7120
  key.reg());
6633
7121
 
6634
- const int kFingerOffset =
6635
- FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
6636
7122
  // tmp.reg() now holds finger offset as a smi.
6637
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
6638
- __ mov(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
6639
- __ cmp(key.reg(), FieldOperand(cache.reg(),
6640
- tmp.reg(), // as smi
6641
- times_half_pointer_size,
6642
- FixedArray::kHeaderSize));
7123
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
7124
+ __ mov(tmp.reg(), FieldOperand(cache.reg(),
7125
+ JSFunctionResultCache::kFingerOffset));
7126
+ __ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
6643
7127
  deferred->Branch(not_equal);
6644
7128
 
6645
- __ mov(tmp.reg(), FieldOperand(cache.reg(),
6646
- tmp.reg(), // as smi
6647
- times_half_pointer_size,
6648
- kPointerSize + FixedArray::kHeaderSize));
7129
+ __ mov(tmp.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg(), 1));
6649
7130
 
6650
7131
  deferred->BindExit();
6651
7132
  frame_->Push(&tmp);
@@ -6722,9 +7203,9 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
6722
7203
  // Check that object doesn't require security checks and
6723
7204
  // has no indexed interceptor.
6724
7205
  __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
6725
- deferred->Branch(less);
6726
- __ movzx_b(tmp1.reg(), FieldOperand(tmp1.reg(), Map::kBitFieldOffset));
6727
- __ test(tmp1.reg(), Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
7206
+ deferred->Branch(below);
7207
+ __ test_b(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
7208
+ KeyedLoadIC::kSlowCaseBitFieldMask);
6728
7209
  deferred->Branch(not_zero);
6729
7210
 
6730
7211
  // Check the object's elements are in fast case.
@@ -6744,14 +7225,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
6744
7225
  deferred->Branch(not_zero);
6745
7226
 
6746
7227
  // Bring addresses into index1 and index2.
6747
- __ lea(index1.reg(), FieldOperand(tmp1.reg(),
6748
- index1.reg(),
6749
- times_half_pointer_size, // index1 is Smi
6750
- FixedArray::kHeaderSize));
6751
- __ lea(index2.reg(), FieldOperand(tmp1.reg(),
6752
- index2.reg(),
6753
- times_half_pointer_size, // index2 is Smi
6754
- FixedArray::kHeaderSize));
7228
+ __ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg()));
7229
+ __ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg()));
6755
7230
 
6756
7231
  // Swap elements.
6757
7232
  __ mov(object.reg(), Operand(index1.reg(), 0));
@@ -6765,12 +7240,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
6765
7240
  // (or them and test against Smi mask.)
6766
7241
 
6767
7242
  __ mov(tmp2.reg(), tmp1.reg());
6768
- RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
6769
- __ CallStub(&recordWrite1);
6770
-
6771
- RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
6772
- __ CallStub(&recordWrite2);
6773
-
7243
+ __ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
7244
+ __ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
6774
7245
  __ bind(&done);
6775
7246
 
6776
7247
  deferred->BindExit();
@@ -6944,7 +7415,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
6944
7415
  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
6945
7416
  __ addsd(xmm2, xmm3);
6946
7417
  // xmm2 now has 0.5.
6947
- __ comisd(xmm2, xmm1);
7418
+ __ ucomisd(xmm2, xmm1);
6948
7419
  call_runtime.Branch(not_equal);
6949
7420
  // Calculates square root.
6950
7421
  __ movsd(xmm1, xmm0);
@@ -7217,9 +7688,12 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
7217
7688
  frame_->Push(&value);
7218
7689
  } else {
7219
7690
  Load(node->expression());
7220
- bool overwrite =
7691
+ bool can_overwrite =
7221
7692
  (node->expression()->AsBinaryOperation() != NULL &&
7222
7693
  node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
7694
+ UnaryOverwriteMode overwrite =
7695
+ can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
7696
+ bool no_negative_zero = node->expression()->no_negative_zero();
7223
7697
  switch (op) {
7224
7698
  case Token::NOT:
7225
7699
  case Token::DELETE:
@@ -7228,7 +7702,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
7228
7702
  break;
7229
7703
 
7230
7704
  case Token::SUB: {
7231
- GenericUnaryOpStub stub(Token::SUB, overwrite);
7705
+ GenericUnaryOpStub stub(
7706
+ Token::SUB,
7707
+ overwrite,
7708
+ no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
7232
7709
  Result operand = frame_->Pop();
7233
7710
  Result answer = frame_->CallStub(&stub, &operand);
7234
7711
  answer.set_type_info(TypeInfo::Number());
@@ -7988,10 +8465,10 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
7988
8465
  Result temp = allocator()->Allocate();
7989
8466
  ASSERT(temp.is_valid());
7990
8467
  __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
7991
- __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kBitFieldOffset));
7992
- __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
8468
+ __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
8469
+ 1 << Map::kIsUndetectable);
7993
8470
  destination()->false_target()->Branch(not_zero);
7994
- __ CmpObjectType(answer.reg(), FIRST_NONSTRING_TYPE, temp.reg());
8471
+ __ CmpInstanceType(temp.reg(), FIRST_NONSTRING_TYPE);
7995
8472
  temp.Unuse();
7996
8473
  answer.Unuse();
7997
8474
  destination()->Split(below);
@@ -8013,9 +8490,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
8013
8490
  // It can be an undetectable object.
8014
8491
  frame_->Spill(answer.reg());
8015
8492
  __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
8016
- __ movzx_b(answer.reg(),
8017
- FieldOperand(answer.reg(), Map::kBitFieldOffset));
8018
- __ test(answer.reg(), Immediate(1 << Map::kIsUndetectable));
8493
+ __ test_b(FieldOperand(answer.reg(), Map::kBitFieldOffset),
8494
+ 1 << Map::kIsUndetectable);
8019
8495
  answer.Unuse();
8020
8496
  destination()->Split(not_zero);
8021
8497
 
@@ -8042,17 +8518,18 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
8042
8518
  destination()->false_target()->Branch(equal);
8043
8519
 
8044
8520
  // It can be an undetectable object.
8045
- __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
8046
- __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
8521
+ __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
8522
+ 1 << Map::kIsUndetectable);
8047
8523
  destination()->false_target()->Branch(not_zero);
8048
- __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
8524
+ // Do a range test for JSObject type. We can't use
8525
+ // MacroAssembler::IsInstanceJSObjectType, because we are using a
8526
+ // ControlDestination, so we copy its implementation here.
8049
8527
  __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
8050
- __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
8051
- destination()->false_target()->Branch(less);
8052
- __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
8528
+ __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
8529
+ __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
8053
8530
  answer.Unuse();
8054
8531
  map.Unuse();
8055
- destination()->Split(less_equal);
8532
+ destination()->Split(below_equal);
8056
8533
  } else {
8057
8534
  // Uncommon case: typeof testing against a string literal that is
8058
8535
  // never returned from the typeof operator.
@@ -8440,7 +8917,102 @@ Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
8440
8917
  #ifdef DEBUG
8441
8918
  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
8442
8919
  #endif
8443
- Result result = frame()->CallStoreIC(name, is_contextual);
8920
+
8921
+ Result result;
8922
+ if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
8923
+ result = frame()->CallStoreIC(name, is_contextual);
8924
+ // A test eax instruction following the call signals that the inobject
8925
+ // property case was inlined. Ensure that there is not a test eax
8926
+ // instruction here.
8927
+ __ nop();
8928
+ } else {
8929
+ // Inline the in-object property case.
8930
+ JumpTarget slow, done;
8931
+ Label patch_site;
8932
+
8933
+ // Get the value and receiver from the stack.
8934
+ Result value = frame()->Pop();
8935
+ value.ToRegister();
8936
+ Result receiver = frame()->Pop();
8937
+ receiver.ToRegister();
8938
+
8939
+ // Allocate result register.
8940
+ result = allocator()->Allocate();
8941
+ ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
8942
+
8943
+ // Check that the receiver is a heap object.
8944
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
8945
+ slow.Branch(zero, &value, &receiver);
8946
+
8947
+ // This is the map check instruction that will be patched (so we can't
8948
+ // use the double underscore macro that may insert instructions).
8949
+ // Initially use an invalid map to force a failure.
8950
+ __ bind(&patch_site);
8951
+ masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
8952
+ Immediate(Factory::null_value()));
8953
+ // This branch is always a forwards branch so it's always a fixed size
8954
+ // which allows the assert below to succeed and patching to work.
8955
+ slow.Branch(not_equal, &value, &receiver);
8956
+
8957
+ // The delta from the patch label to the store offset must be
8958
+ // statically known.
8959
+ ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
8960
+ StoreIC::kOffsetToStoreInstruction);
8961
+
8962
+ // The initial (invalid) offset has to be large enough to force a 32-bit
8963
+ // instruction encoding to allow patching with an arbitrary offset. Use
8964
+ // kMaxInt (minus kHeapObjectTag).
8965
+ int offset = kMaxInt;
8966
+ __ mov(FieldOperand(receiver.reg(), offset), value.reg());
8967
+ __ mov(result.reg(), Operand(value.reg()));
8968
+
8969
+ // Allocate scratch register for write barrier.
8970
+ Result scratch = allocator()->Allocate();
8971
+ ASSERT(scratch.is_valid());
8972
+
8973
+ // The write barrier clobbers all input registers, so spill the
8974
+ // receiver and the value.
8975
+ frame_->Spill(receiver.reg());
8976
+ frame_->Spill(value.reg());
8977
+
8978
+ // If the receiver and the value share a register allocate a new
8979
+ // register for the receiver.
8980
+ if (receiver.reg().is(value.reg())) {
8981
+ receiver = allocator()->Allocate();
8982
+ ASSERT(receiver.is_valid());
8983
+ __ mov(receiver.reg(), Operand(value.reg()));
8984
+ }
8985
+
8986
+ // Update the write barrier. To save instructions in the inlined
8987
+ // version we do not filter smis.
8988
+ Label skip_write_barrier;
8989
+ __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
8990
+ int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
8991
+ __ lea(scratch.reg(), Operand(receiver.reg(), offset));
8992
+ __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
8993
+ if (FLAG_debug_code) {
8994
+ __ mov(receiver.reg(), Immediate(BitCast<int32_t>(kZapValue)));
8995
+ __ mov(value.reg(), Immediate(BitCast<int32_t>(kZapValue)));
8996
+ __ mov(scratch.reg(), Immediate(BitCast<int32_t>(kZapValue)));
8997
+ }
8998
+ __ bind(&skip_write_barrier);
8999
+ value.Unuse();
9000
+ scratch.Unuse();
9001
+ receiver.Unuse();
9002
+ done.Jump(&result);
9003
+
9004
+ slow.Bind(&value, &receiver);
9005
+ frame()->Push(&receiver);
9006
+ frame()->Push(&value);
9007
+ result = frame()->CallStoreIC(name, is_contextual);
9008
+ // Encode the offset to the map check instruction and the offset
9009
+ // to the write barrier store address computation in a test eax
9010
+ // instruction.
9011
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
9012
+ __ test(eax,
9013
+ Immediate((delta_to_record_write << 16) | delta_to_patch_site));
9014
+ done.Bind(&result);
9015
+ }
8444
9016
 
8445
9017
  ASSERT_EQ(expected_height, frame()->height());
8446
9018
  return result;
@@ -8459,36 +9031,35 @@ Result CodeGenerator::EmitKeyedLoad() {
8459
9031
  if (loop_nesting() > 0) {
8460
9032
  Comment cmnt(masm_, "[ Inlined load from keyed Property");
8461
9033
 
8462
- Result key = frame_->Pop();
8463
- Result receiver = frame_->Pop();
8464
- key.ToRegister();
8465
- receiver.ToRegister();
8466
-
8467
9034
  // Use a fresh temporary to load the elements without destroying
8468
9035
  // the receiver which is needed for the deferred slow case.
8469
9036
  Result elements = allocator()->Allocate();
8470
9037
  ASSERT(elements.is_valid());
8471
9038
 
8472
- // Use a fresh temporary for the index and later the loaded
8473
- // value.
8474
- result = allocator()->Allocate();
8475
- ASSERT(result.is_valid());
9039
+ Result key = frame_->Pop();
9040
+ Result receiver = frame_->Pop();
9041
+ key.ToRegister();
9042
+ receiver.ToRegister();
8476
9043
 
9044
+ // If key and receiver are shared registers on the frame, their values will
9045
+ // be automatically saved and restored when going to deferred code.
9046
+ // The result is in elements, which is guaranteed non-shared.
8477
9047
  DeferredReferenceGetKeyedValue* deferred =
8478
- new DeferredReferenceGetKeyedValue(result.reg(),
9048
+ new DeferredReferenceGetKeyedValue(elements.reg(),
8479
9049
  receiver.reg(),
8480
9050
  key.reg());
8481
9051
 
8482
9052
  __ test(receiver.reg(), Immediate(kSmiTagMask));
8483
9053
  deferred->Branch(zero);
8484
9054
 
9055
+ // Check that the receiver has the expected map.
8485
9056
  // Initially, use an invalid map. The map is patched in the IC
8486
9057
  // initialization code.
8487
9058
  __ bind(deferred->patch_site());
8488
9059
  // Use masm-> here instead of the double underscore macro since extra
8489
9060
  // coverage code can interfere with the patching.
8490
9061
  masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
8491
- Immediate(Factory::null_value()));
9062
+ Immediate(Factory::null_value()));
8492
9063
  deferred->Branch(not_equal);
8493
9064
 
8494
9065
  // Check that the key is a smi.
@@ -8503,24 +9074,26 @@ Result CodeGenerator::EmitKeyedLoad() {
8503
9074
  // is not a dictionary.
8504
9075
  __ mov(elements.reg(),
8505
9076
  FieldOperand(receiver.reg(), JSObject::kElementsOffset));
8506
- __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
8507
- Immediate(Factory::fixed_array_map()));
8508
- deferred->Branch(not_equal);
9077
+ if (FLAG_debug_code) {
9078
+ __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
9079
+ Immediate(Factory::fixed_array_map()));
9080
+ __ Assert(equal, "JSObject with fast elements map has slow elements");
9081
+ }
8509
9082
 
8510
- // Shift the key to get the actual index value and check that
8511
- // it is within bounds. Use unsigned comparison to handle negative keys.
8512
- __ mov(result.reg(), key.reg());
8513
- __ SmiUntag(result.reg());
8514
- __ cmp(result.reg(),
9083
+ // Check that the key is within bounds.
9084
+ __ cmp(key.reg(),
8515
9085
  FieldOperand(elements.reg(), FixedArray::kLengthOffset));
8516
9086
  deferred->Branch(above_equal);
8517
9087
 
8518
9088
  // Load and check that the result is not the hole.
8519
- __ mov(result.reg(), Operand(elements.reg(),
8520
- result.reg(),
8521
- times_4,
8522
- FixedArray::kHeaderSize - kHeapObjectTag));
8523
- elements.Unuse();
9089
+ // Key holds a smi.
9090
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
9091
+ __ mov(elements.reg(),
9092
+ FieldOperand(elements.reg(),
9093
+ key.reg(),
9094
+ times_2,
9095
+ FixedArray::kHeaderSize));
9096
+ result = elements;
8524
9097
  __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
8525
9098
  deferred->Branch(equal);
8526
9099
  __ IncrementCounter(&Counters::keyed_load_inline, 1);
@@ -8605,7 +9178,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
8605
9178
 
8606
9179
  // Check whether it is possible to omit the write barrier. If the elements
8607
9180
  // array is in new space or the value written is a smi we can safely update
8608
- // the elements array without updating the remembered set.
9181
+ // the elements array without write barrier.
8609
9182
  Label in_new_space;
8610
9183
  __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
8611
9184
  if (!value_is_constant) {
@@ -8624,11 +9197,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
8624
9197
  deferred->Branch(not_equal);
8625
9198
 
8626
9199
  // Store the value.
8627
- __ mov(Operand(tmp.reg(),
8628
- key.reg(),
8629
- times_2,
8630
- FixedArray::kHeaderSize - kHeapObjectTag),
8631
- result.reg());
9200
+ __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
8632
9201
  __ IncrementCounter(&Counters::keyed_store_inline, 1);
8633
9202
 
8634
9203
  deferred->BindExit();
@@ -8649,40 +9218,6 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
8649
9218
  #define __ ACCESS_MASM(masm)
8650
9219
 
8651
9220
 
8652
- static void CheckTwoForSminess(MacroAssembler* masm,
8653
- Register left, Register right, Register scratch,
8654
- TypeInfo left_info, TypeInfo right_info,
8655
- DeferredInlineBinaryOperation* deferred) {
8656
- if (left.is(right)) {
8657
- if (!left_info.IsSmi()) {
8658
- __ test(left, Immediate(kSmiTagMask));
8659
- deferred->Branch(not_zero);
8660
- } else {
8661
- if (FLAG_debug_code) __ AbortIfNotSmi(left);
8662
- }
8663
- } else if (!left_info.IsSmi()) {
8664
- if (!right_info.IsSmi()) {
8665
- __ mov(scratch, left);
8666
- __ or_(scratch, Operand(right));
8667
- __ test(scratch, Immediate(kSmiTagMask));
8668
- deferred->Branch(not_zero);
8669
- } else {
8670
- __ test(left, Immediate(kSmiTagMask));
8671
- deferred->Branch(not_zero);
8672
- if (FLAG_debug_code) __ AbortIfNotSmi(right);
8673
- }
8674
- } else {
8675
- if (FLAG_debug_code) __ AbortIfNotSmi(left);
8676
- if (!right_info.IsSmi()) {
8677
- __ test(right, Immediate(kSmiTagMask));
8678
- deferred->Branch(not_zero);
8679
- } else {
8680
- if (FLAG_debug_code) __ AbortIfNotSmi(right);
8681
- }
8682
- }
8683
- }
8684
-
8685
-
8686
9221
  Handle<String> Reference::GetName() {
8687
9222
  ASSERT(type_ == NAMED);
8688
9223
  Property* property = expression_->AsProperty();
@@ -8717,10 +9252,8 @@ void Reference::GetValue() {
8717
9252
  Comment cmnt(masm, "[ Load from Slot");
8718
9253
  Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
8719
9254
  ASSERT(slot != NULL);
8720
- Result result =
8721
- cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
9255
+ cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
8722
9256
  if (!persist_after_get_) set_unloaded();
8723
- cgen_->frame()->Push(&result);
8724
9257
  break;
8725
9258
  }
8726
9259
 
@@ -8881,7 +9414,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
8881
9414
 
8882
9415
  // Setup the object header.
8883
9416
  __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
8884
- __ mov(FieldOperand(eax, Array::kLengthOffset), Immediate(length));
9417
+ __ mov(FieldOperand(eax, Context::kLengthOffset),
9418
+ Immediate(Smi::FromInt(length)));
8885
9419
 
8886
9420
  // Setup the fixed slots.
8887
9421
  __ xor_(ebx, Operand(ebx)); // Set to NULL.
@@ -8929,8 +9463,10 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
8929
9463
  Label slow_case;
8930
9464
  __ mov(ecx, Operand(esp, 3 * kPointerSize));
8931
9465
  __ mov(eax, Operand(esp, 2 * kPointerSize));
8932
- ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
8933
- __ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize));
9466
+ STATIC_ASSERT(kPointerSize == 4);
9467
+ STATIC_ASSERT(kSmiTagSize == 1);
9468
+ STATIC_ASSERT(kSmiTag == 0);
9469
+ __ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax));
8934
9470
  __ cmp(ecx, Factory::undefined_value());
8935
9471
  __ j(equal, &slow_case);
8936
9472
 
@@ -8982,20 +9518,19 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
8982
9518
  __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
8983
9519
 
8984
9520
  // Undetectable => false.
8985
- __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
8986
- __ and_(ebx, 1 << Map::kIsUndetectable);
9521
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
9522
+ 1 << Map::kIsUndetectable);
8987
9523
  __ j(not_zero, &false_result);
8988
9524
 
8989
9525
  // JavaScript object => true.
8990
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
9526
+ __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
8991
9527
  __ j(above_equal, &true_result);
8992
9528
 
8993
9529
  // String value => false iff empty.
8994
- __ cmp(ecx, FIRST_NONSTRING_TYPE);
9530
+ __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
8995
9531
  __ j(above_equal, &not_string);
8996
- __ mov(edx, FieldOperand(eax, String::kLengthOffset));
8997
- ASSERT(kSmiTag == 0);
8998
- __ test(edx, Operand(edx));
9532
+ STATIC_ASSERT(kSmiTag == 0);
9533
+ __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
8999
9534
  __ j(zero, &false_result);
9000
9535
  __ jmp(&true_result);
9001
9536
 
@@ -9244,7 +9779,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
9244
9779
  }
9245
9780
 
9246
9781
  // 3. Perform the smi check of the operands.
9247
- ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
9782
+ STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
9248
9783
  __ test(combined, Immediate(kSmiTagMask));
9249
9784
  __ j(not_zero, &not_smis, not_taken);
9250
9785
 
@@ -9325,7 +9860,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
9325
9860
 
9326
9861
  case Token::MUL:
9327
9862
  // If the smi tag is 0 we can just leave the tag on one operand.
9328
- ASSERT(kSmiTag == 0); // Adjust code below if not the case.
9863
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
9329
9864
  // We can't revert the multiplication if the result is not a smi
9330
9865
  // so save the right operand.
9331
9866
  __ mov(ebx, right);
@@ -9353,7 +9888,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
9353
9888
  // Check for the corner case of dividing the most negative smi by
9354
9889
  // -1. We cannot use the overflow flag, since it is not set by idiv
9355
9890
  // instruction.
9356
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
9891
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
9357
9892
  __ cmp(eax, 0x40000000);
9358
9893
  __ j(equal, &use_fp_on_smis);
9359
9894
  // Check for negative zero result. Use combined = left | right.
@@ -9533,6 +10068,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
9533
10068
  // the four basic operations. The stub stays in the DEFAULT state
9534
10069
  // forever for all other operations (also if smi code is skipped).
9535
10070
  GenerateTypeTransition(masm);
10071
+ break;
9536
10072
  }
9537
10073
 
9538
10074
  Label not_floats;
@@ -9880,51 +10416,28 @@ void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
9880
10416
 
9881
10417
 
9882
10418
  void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
9883
- Label get_result;
9884
-
9885
- // Keep a copy of operands on the stack and make sure they are also in
9886
- // edx, eax.
10419
+ // Ensure the operands are on the stack.
9887
10420
  if (HasArgsInRegisters()) {
9888
10421
  GenerateRegisterArgsPush(masm);
9889
- } else {
9890
- GenerateLoadArguments(masm);
9891
10422
  }
9892
10423
 
9893
- // Internal frame is necessary to handle exceptions properly.
9894
- __ EnterInternalFrame();
10424
+ __ pop(ecx); // Save return address.
9895
10425
 
9896
- // Push arguments on stack if the stub expects them there.
9897
- if (!HasArgsInRegisters()) {
9898
- __ push(edx);
9899
- __ push(eax);
9900
- }
9901
- // Call the stub proper to get the result in eax.
9902
- __ call(&get_result);
9903
- __ LeaveInternalFrame();
9904
-
9905
- __ pop(ecx); // Return address.
9906
10426
  // Left and right arguments are now on top.
9907
- // Push the operation result. The tail call to BinaryOp_Patch will
9908
- // return it to the original caller.
9909
- __ push(eax);
9910
10427
  // Push this stub's key. Although the operation and the type info are
9911
10428
  // encoded into the key, the encoding is opaque, so push them too.
9912
10429
  __ push(Immediate(Smi::FromInt(MinorKey())));
9913
10430
  __ push(Immediate(Smi::FromInt(op_)));
9914
10431
  __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
9915
10432
 
9916
- __ push(ecx); // Return address.
10433
+ __ push(ecx); // Push return address.
9917
10434
 
9918
- // Patch the caller to an appropriate specialized stub
9919
- // and return the operation result.
10435
+ // Patch the caller to an appropriate specialized stub and return the
10436
+ // operation result to the caller of the stub.
9920
10437
  __ TailCallExternalReference(
9921
10438
  ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
9922
- 6,
10439
+ 5,
9923
10440
  1);
9924
-
9925
- // The entry point for the result calculation is assumed to be immediately
9926
- // after this sequence.
9927
- __ bind(&get_result);
9928
10441
  }
9929
10442
 
9930
10443
 
@@ -9948,7 +10461,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
9948
10461
  __ j(not_zero, &input_not_smi);
9949
10462
  // Input is a smi. Untag and load it onto the FPU stack.
9950
10463
  // Then load the low and high words of the double into ebx, edx.
9951
- ASSERT_EQ(1, kSmiTagSize);
10464
+ STATIC_ASSERT(kSmiTagSize == 1);
9952
10465
  __ sar(eax, 1);
9953
10466
  __ sub(Operand(esp), Immediate(2 * kPointerSize));
9954
10467
  __ mov(Operand(esp, 0), eax);
@@ -9972,7 +10485,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
9972
10485
  // ST[0] == double value
9973
10486
  // ebx = low 32 bits of double value
9974
10487
  // edx = high 32 bits of double value
9975
- // Compute hash:
10488
+ // Compute hash (the shifts are arithmetic):
9976
10489
  // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
9977
10490
  __ mov(ecx, ebx);
9978
10491
  __ xor_(ecx, Operand(edx));
@@ -9984,6 +10497,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
9984
10497
  __ xor_(ecx, Operand(eax));
9985
10498
  ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
9986
10499
  __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
10500
+
9987
10501
  // ST[0] == double value.
9988
10502
  // ebx = low 32 bits of double value.
9989
10503
  // edx = high 32 bits of double value.
@@ -10152,6 +10666,11 @@ void IntegerConvert(MacroAssembler* masm,
10152
10666
  Label done, right_exponent, normal_exponent;
10153
10667
  Register scratch = ebx;
10154
10668
  Register scratch2 = edi;
10669
+ if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
10670
+ CpuFeatures::Scope scope(SSE2);
10671
+ __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
10672
+ return;
10673
+ }
10155
10674
  if (!type_info.IsInteger32() || !use_sse3) {
10156
10675
  // Get exponent word.
10157
10676
  __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
@@ -10601,10 +11120,12 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
10601
11120
  __ test(eax, Immediate(kSmiTagMask));
10602
11121
  __ j(not_zero, &try_float, not_taken);
10603
11122
 
10604
- // Go slow case if the value of the expression is zero
10605
- // to make sure that we switch between 0 and -0.
10606
- __ test(eax, Operand(eax));
10607
- __ j(zero, &slow, not_taken);
11123
+ if (negative_zero_ == kStrictNegativeZero) {
11124
+ // Go slow case if the value of the expression is zero
11125
+ // to make sure that we switch between 0 and -0.
11126
+ __ test(eax, Operand(eax));
11127
+ __ j(zero, &slow, not_taken);
11128
+ }
10608
11129
 
10609
11130
  // The value of the expression is a smi that is not zero. Try
10610
11131
  // optimistic subtraction '0 - value'.
@@ -10612,11 +11133,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
10612
11133
  __ mov(edx, Operand(eax));
10613
11134
  __ Set(eax, Immediate(0));
10614
11135
  __ sub(eax, Operand(edx));
10615
- __ j(overflow, &undo, not_taken);
10616
-
10617
- // If result is a smi we are done.
10618
- __ test(eax, Immediate(kSmiTagMask));
10619
- __ j(zero, &done, taken);
11136
+ __ j(no_overflow, &done, taken);
10620
11137
 
10621
11138
  // Restore eax and go slow case.
10622
11139
  __ bind(&undo);
@@ -10628,7 +11145,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
10628
11145
  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
10629
11146
  __ cmp(edx, Factory::heap_number_map());
10630
11147
  __ j(not_equal, &slow);
10631
- if (overwrite_) {
11148
+ if (overwrite_ == UNARY_OVERWRITE) {
10632
11149
  __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
10633
11150
  __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
10634
11151
  __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
@@ -10663,13 +11180,13 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
10663
11180
  __ j(sign, &try_float, not_taken);
10664
11181
 
10665
11182
  // Tag the result as a smi and we're done.
10666
- ASSERT(kSmiTagSize == 1);
11183
+ STATIC_ASSERT(kSmiTagSize == 1);
10667
11184
  __ lea(eax, Operand(ecx, times_2, kSmiTag));
10668
11185
  __ jmp(&done);
10669
11186
 
10670
11187
  // Try to store the result in a heap number.
10671
11188
  __ bind(&try_float);
10672
- if (!overwrite_) {
11189
+ if (overwrite_ == UNARY_NO_OVERWRITE) {
10673
11190
  // Allocate a fresh heap number, but don't overwrite eax until
10674
11191
  // we're sure we can do it without going through the slow case
10675
11192
  // that needs the value in eax.
@@ -10739,7 +11256,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
10739
11256
  __ j(above_equal, &slow, not_taken);
10740
11257
 
10741
11258
  // Read the argument from the stack and return it.
10742
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
11259
+ STATIC_ASSERT(kSmiTagSize == 1);
11260
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
10743
11261
  __ lea(ebx, Operand(ebp, eax, times_2, 0));
10744
11262
  __ neg(edx);
10745
11263
  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
@@ -10754,7 +11272,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
10754
11272
  __ j(above_equal, &slow, not_taken);
10755
11273
 
10756
11274
  // Read the argument from the stack and return it.
10757
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
11275
+ STATIC_ASSERT(kSmiTagSize == 1);
11276
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
10758
11277
  __ lea(ebx, Operand(ebx, ecx, times_2, 0));
10759
11278
  __ neg(edx);
10760
11279
  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
@@ -10825,12 +11344,12 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
10825
11344
  }
10826
11345
 
10827
11346
  // Setup the callee in-object property.
10828
- ASSERT(Heap::arguments_callee_index == 0);
11347
+ STATIC_ASSERT(Heap::arguments_callee_index == 0);
10829
11348
  __ mov(ebx, Operand(esp, 3 * kPointerSize));
10830
11349
  __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
10831
11350
 
10832
11351
  // Get the length (smi tagged) and set that as an in-object property too.
10833
- ASSERT(Heap::arguments_length_index == 1);
11352
+ STATIC_ASSERT(Heap::arguments_length_index == 1);
10834
11353
  __ mov(ecx, Operand(esp, 1 * kPointerSize));
10835
11354
  __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
10836
11355
 
@@ -10839,9 +11358,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
10839
11358
  __ test(ecx, Operand(ecx));
10840
11359
  __ j(zero, &done);
10841
11360
 
10842
- // Get the parameters pointer from the stack and untag the length.
11361
+ // Get the parameters pointer from the stack.
10843
11362
  __ mov(edx, Operand(esp, 2 * kPointerSize));
10844
- __ SmiUntag(ecx);
10845
11363
 
10846
11364
  // Setup the elements pointer in the allocated arguments object and
10847
11365
  // initialize the header in the elements fixed array.
@@ -10850,6 +11368,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
10850
11368
  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
10851
11369
  Immediate(Factory::fixed_array_map()));
10852
11370
  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
11371
+ // Untag the length for the loop below.
11372
+ __ SmiUntag(ecx);
10853
11373
 
10854
11374
  // Copy the fixed array slots.
10855
11375
  Label loop;
@@ -10908,7 +11428,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
10908
11428
 
10909
11429
  // Check that the first argument is a JSRegExp object.
10910
11430
  __ mov(eax, Operand(esp, kJSRegExpOffset));
10911
- ASSERT_EQ(0, kSmiTag);
11431
+ STATIC_ASSERT(kSmiTag == 0);
10912
11432
  __ test(eax, Immediate(kSmiTagMask));
10913
11433
  __ j(zero, &runtime);
10914
11434
  __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
@@ -10933,8 +11453,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
10933
11453
  __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
10934
11454
  // Calculate number of capture registers (number_of_captures + 1) * 2. This
10935
11455
  // uses the asumption that smis are 2 * their untagged value.
10936
- ASSERT_EQ(0, kSmiTag);
10937
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
11456
+ STATIC_ASSERT(kSmiTag == 0);
11457
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
10938
11458
  __ add(Operand(edx), Immediate(2)); // edx was a smi.
10939
11459
  // Check that the static offsets vector buffer is large enough.
10940
11460
  __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
@@ -10958,7 +11478,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
10958
11478
  // string length. A negative value will be greater (unsigned comparison).
10959
11479
  __ mov(eax, Operand(esp, kPreviousIndexOffset));
10960
11480
  __ test(eax, Immediate(kSmiTagMask));
10961
- __ j(zero, &runtime);
11481
+ __ j(not_zero, &runtime);
10962
11482
  __ cmp(eax, Operand(ebx));
10963
11483
  __ j(above_equal, &runtime);
10964
11484
 
@@ -10978,64 +11498,65 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
10978
11498
  // Check that the last match info has space for the capture registers and the
10979
11499
  // additional information.
10980
11500
  __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
11501
+ __ SmiUntag(eax);
10981
11502
  __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
10982
11503
  __ cmp(edx, Operand(eax));
10983
11504
  __ j(greater, &runtime);
10984
11505
 
10985
11506
  // ecx: RegExp data (FixedArray)
10986
11507
  // Check the representation and encoding of the subject string.
10987
- Label seq_string, seq_two_byte_string, check_code;
10988
- const int kStringRepresentationEncodingMask =
10989
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
11508
+ Label seq_ascii_string, seq_two_byte_string, check_code;
10990
11509
  __ mov(eax, Operand(esp, kSubjectOffset));
10991
11510
  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
10992
11511
  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
10993
- __ and_(ebx, kStringRepresentationEncodingMask);
10994
- // First check for sequential string.
10995
- ASSERT_EQ(0, kStringTag);
10996
- ASSERT_EQ(0, kSeqStringTag);
11512
+ // First check for flat two byte string.
11513
+ __ and_(ebx,
11514
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
11515
+ STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
11516
+ __ j(zero, &seq_two_byte_string);
11517
+ // Any other flat string must be a flat ascii string.
10997
11518
  __ test(Operand(ebx),
10998
11519
  Immediate(kIsNotStringMask | kStringRepresentationMask));
10999
- __ j(zero, &seq_string);
11520
+ __ j(zero, &seq_ascii_string);
11000
11521
 
11001
11522
  // Check for flat cons string.
11002
11523
  // A flat cons string is a cons string where the second part is the empty
11003
11524
  // string. In that case the subject string is just the first part of the cons
11004
11525
  // string. Also in this case the first part of the cons string is known to be
11005
11526
  // a sequential string or an external string.
11006
- __ and_(ebx, kStringRepresentationMask);
11007
- __ cmp(ebx, kConsStringTag);
11008
- __ j(not_equal, &runtime);
11527
+ STATIC_ASSERT(kExternalStringTag != 0);
11528
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
11529
+ __ test(Operand(ebx),
11530
+ Immediate(kIsNotStringMask | kExternalStringTag));
11531
+ __ j(not_zero, &runtime);
11532
+ // String is a cons string.
11009
11533
  __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
11010
11534
  __ cmp(Operand(edx), Factory::empty_string());
11011
11535
  __ j(not_equal, &runtime);
11012
11536
  __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
11013
11537
  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
11014
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
11015
- ASSERT_EQ(0, kSeqStringTag);
11016
- __ test(ebx, Immediate(kStringRepresentationMask));
11538
+ // String is a cons string with empty second part.
11539
+ // eax: first part of cons string.
11540
+ // ebx: map of first part of cons string.
11541
+ // Is first part a flat two byte string?
11542
+ __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
11543
+ kStringRepresentationMask | kStringEncodingMask);
11544
+ STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
11545
+ __ j(zero, &seq_two_byte_string);
11546
+ // Any other flat string must be ascii.
11547
+ __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
11548
+ kStringRepresentationMask);
11017
11549
  __ j(not_zero, &runtime);
11018
- __ and_(ebx, kStringRepresentationEncodingMask);
11019
11550
 
11020
- __ bind(&seq_string);
11021
- // eax: subject string (sequential either ascii to two byte)
11022
- // ebx: suject string type & kStringRepresentationEncodingMask
11551
+ __ bind(&seq_ascii_string);
11552
+ // eax: subject string (flat ascii)
11023
11553
  // ecx: RegExp data (FixedArray)
11024
- // Check that the irregexp code has been generated for an ascii string. If
11025
- // it has, the field contains a code object otherwise it contains the hole.
11026
- const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
11027
- __ cmp(ebx, kSeqTwoByteString);
11028
- __ j(equal, &seq_two_byte_string);
11029
- if (FLAG_debug_code) {
11030
- __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
11031
- __ Check(equal, "Expected sequential ascii string");
11032
- }
11033
11554
  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
11034
11555
  __ Set(edi, Immediate(1)); // Type is ascii.
11035
11556
  __ jmp(&check_code);
11036
11557
 
11037
11558
  __ bind(&seq_two_byte_string);
11038
- // eax: subject string
11559
+ // eax: subject string (flat two byte)
11039
11560
  // ecx: RegExp data (FixedArray)
11040
11561
  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
11041
11562
  __ Set(edi, Immediate(0)); // Type is two byte.
@@ -11091,7 +11612,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
11091
11612
  __ jmp(&setup_rest);
11092
11613
 
11093
11614
  __ bind(&setup_two_byte);
11094
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1); // edi is smi (powered by 2).
11615
+ STATIC_ASSERT(kSmiTag == 0);
11616
+ STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2).
11095
11617
  __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
11096
11618
  __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
11097
11619
  __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
@@ -11139,8 +11661,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
11139
11661
  __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
11140
11662
  __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
11141
11663
  // Calculate number of capture registers (number_of_captures + 1) * 2.
11142
- ASSERT_EQ(0, kSmiTag);
11143
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
11664
+ STATIC_ASSERT(kSmiTag == 0);
11665
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
11144
11666
  __ add(Operand(edx), Immediate(2)); // edx was a smi.
11145
11667
 
11146
11668
  // edx: Number of capture registers
@@ -11221,7 +11743,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
11221
11743
  // Make the hash mask from the length of the number string cache. It
11222
11744
  // contains two elements (number and string) for each cache entry.
11223
11745
  __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
11224
- __ shr(mask, 1); // Divide length by two (length is not a smi).
11746
+ __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
11225
11747
  __ sub(Operand(mask), Immediate(1)); // Make mask.
11226
11748
 
11227
11749
  // Calculate the entry in the number string cache. The hash value in the
@@ -11235,7 +11757,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
11235
11757
  __ SmiUntag(scratch);
11236
11758
  } else {
11237
11759
  Label not_smi, hash_calculated;
11238
- ASSERT(kSmiTag == 0);
11760
+ STATIC_ASSERT(kSmiTag == 0);
11239
11761
  __ test(object, Immediate(kSmiTagMask));
11240
11762
  __ j(not_zero, &not_smi);
11241
11763
  __ mov(scratch, object);
@@ -11245,7 +11767,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
11245
11767
  __ cmp(FieldOperand(object, HeapObject::kMapOffset),
11246
11768
  Factory::heap_number_map());
11247
11769
  __ j(not_equal, not_found);
11248
- ASSERT_EQ(8, kDoubleSize);
11770
+ STATIC_ASSERT(8 == kDoubleSize);
11249
11771
  __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
11250
11772
  __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
11251
11773
  // Object is heap number and hash is now in scratch. Calculate cache index.
@@ -11263,7 +11785,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
11263
11785
  CpuFeatures::Scope fscope(SSE2);
11264
11786
  __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
11265
11787
  __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
11266
- __ comisd(xmm0, xmm1);
11788
+ __ ucomisd(xmm0, xmm1);
11267
11789
  } else {
11268
11790
  __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
11269
11791
  __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
@@ -11312,12 +11834,6 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
11312
11834
  }
11313
11835
 
11314
11836
 
11315
- void RecordWriteStub::Generate(MacroAssembler* masm) {
11316
- masm->RecordWriteHelper(object_, addr_, scratch_);
11317
- masm->ret(0);
11318
- }
11319
-
11320
-
11321
11837
  static int NegativeComparisonResult(Condition cc) {
11322
11838
  ASSERT(cc != equal);
11323
11839
  ASSERT((cc == less) || (cc == less_equal)
@@ -11327,7 +11843,9 @@ static int NegativeComparisonResult(Condition cc) {
11327
11843
 
11328
11844
 
11329
11845
  void CompareStub::Generate(MacroAssembler* masm) {
11330
- Label call_builtin, done;
11846
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
11847
+
11848
+ Label check_unequal_objects, done;
11331
11849
 
11332
11850
  // NOTICE! This code is only reached after a smi-fast-case check, so
11333
11851
  // it is certain that at least one operand isn't a smi.
@@ -11357,13 +11875,15 @@ void CompareStub::Generate(MacroAssembler* masm) {
11357
11875
  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
11358
11876
  __ ret(0);
11359
11877
  } else {
11360
- Label return_equal;
11361
11878
  Label heap_number;
11362
- // If it's not a heap number, then return equal.
11363
11879
  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
11364
11880
  Immediate(Factory::heap_number_map()));
11365
11881
  __ j(equal, &heap_number);
11366
- __ bind(&return_equal);
11882
+ if (cc_ != equal) {
11883
+ // Call runtime on identical JSObjects. Otherwise return equal.
11884
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
11885
+ __ j(above_equal, &not_identical);
11886
+ }
11367
11887
  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
11368
11888
  __ ret(0);
11369
11889
 
@@ -11378,7 +11898,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
11378
11898
  // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
11379
11899
  // all bits in the mask are set. We only need to check the word
11380
11900
  // that contains the exponent and high bit of the mantissa.
11381
- ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
11901
+ STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
11382
11902
  __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
11383
11903
  __ xor_(eax, Operand(eax));
11384
11904
  // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
@@ -11386,7 +11906,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
11386
11906
  __ add(edx, Operand(edx));
11387
11907
  __ cmp(edx, kQuietNaNHighBitsMask << 1);
11388
11908
  if (cc_ == equal) {
11389
- ASSERT_NE(1, EQUAL);
11909
+ STATIC_ASSERT(EQUAL != 1);
11390
11910
  __ setcc(above_equal, eax);
11391
11911
  __ ret(0);
11392
11912
  } else {
@@ -11403,94 +11923,78 @@ void CompareStub::Generate(MacroAssembler* masm) {
11403
11923
  __ bind(&not_identical);
11404
11924
  }
11405
11925
 
11406
- if (cc_ == equal) { // Both strict and non-strict.
11926
+ // Strict equality can quickly decide whether objects are equal.
11927
+ // Non-strict object equality is slower, so it is handled later in the stub.
11928
+ if (cc_ == equal && strict_) {
11407
11929
  Label slow; // Fallthrough label.
11408
-
11930
+ Label not_smis;
11409
11931
  // If we're doing a strict equality comparison, we don't have to do
11410
11932
  // type conversion, so we generate code to do fast comparison for objects
11411
11933
  // and oddballs. Non-smi numbers and strings still go through the usual
11412
11934
  // slow-case code.
11413
- if (strict_) {
11414
- // If either is a Smi (we know that not both are), then they can only
11415
- // be equal if the other is a HeapNumber. If so, use the slow case.
11416
- {
11417
- Label not_smis;
11418
- ASSERT_EQ(0, kSmiTag);
11419
- ASSERT_EQ(0, Smi::FromInt(0));
11420
- __ mov(ecx, Immediate(kSmiTagMask));
11421
- __ and_(ecx, Operand(eax));
11422
- __ test(ecx, Operand(edx));
11423
- __ j(not_zero, &not_smis);
11424
- // One operand is a smi.
11425
-
11426
- // Check whether the non-smi is a heap number.
11427
- ASSERT_EQ(1, kSmiTagMask);
11428
- // ecx still holds eax & kSmiTag, which is either zero or one.
11429
- __ sub(Operand(ecx), Immediate(0x01));
11430
- __ mov(ebx, edx);
11431
- __ xor_(ebx, Operand(eax));
11432
- __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
11433
- __ xor_(ebx, Operand(eax));
11434
- // if eax was smi, ebx is now edx, else eax.
11435
-
11436
- // Check if the non-smi operand is a heap number.
11437
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
11438
- Immediate(Factory::heap_number_map()));
11439
- // If heap number, handle it in the slow case.
11440
- __ j(equal, &slow);
11441
- // Return non-equal (ebx is not zero)
11442
- __ mov(eax, ebx);
11443
- __ ret(0);
11444
-
11445
- __ bind(&not_smis);
11446
- }
11447
-
11448
- // If either operand is a JSObject or an oddball value, then they are not
11449
- // equal since their pointers are different
11450
- // There is no test for undetectability in strict equality.
11451
-
11452
- // Get the type of the first operand.
11453
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
11454
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
11455
-
11456
- // If the first object is a JS object, we have done pointer comparison.
11457
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
11458
- Label first_non_object;
11459
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
11460
- __ j(less, &first_non_object);
11461
-
11462
- // Return non-zero (eax is not zero)
11463
- Label return_not_equal;
11464
- ASSERT(kHeapObjectTag != 0);
11465
- __ bind(&return_not_equal);
11466
- __ ret(0);
11935
+ // If either is a Smi (we know that not both are), then they can only
11936
+ // be equal if the other is a HeapNumber. If so, use the slow case.
11937
+ STATIC_ASSERT(kSmiTag == 0);
11938
+ ASSERT_EQ(0, Smi::FromInt(0));
11939
+ __ mov(ecx, Immediate(kSmiTagMask));
11940
+ __ and_(ecx, Operand(eax));
11941
+ __ test(ecx, Operand(edx));
11942
+ __ j(not_zero, &not_smis);
11943
+ // One operand is a smi.
11944
+
11945
+ // Check whether the non-smi is a heap number.
11946
+ STATIC_ASSERT(kSmiTagMask == 1);
11947
+ // ecx still holds eax & kSmiTag, which is either zero or one.
11948
+ __ sub(Operand(ecx), Immediate(0x01));
11949
+ __ mov(ebx, edx);
11950
+ __ xor_(ebx, Operand(eax));
11951
+ __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
11952
+ __ xor_(ebx, Operand(eax));
11953
+ // if eax was smi, ebx is now edx, else eax.
11954
+
11955
+ // Check if the non-smi operand is a heap number.
11956
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
11957
+ Immediate(Factory::heap_number_map()));
11958
+ // If heap number, handle it in the slow case.
11959
+ __ j(equal, &slow);
11960
+ // Return non-equal (ebx is not zero)
11961
+ __ mov(eax, ebx);
11962
+ __ ret(0);
11467
11963
 
11468
- __ bind(&first_non_object);
11469
- // Check for oddballs: true, false, null, undefined.
11470
- __ cmp(ecx, ODDBALL_TYPE);
11471
- __ j(equal, &return_not_equal);
11964
+ __ bind(&not_smis);
11965
+ // If either operand is a JSObject or an oddball value, then they are not
11966
+ // equal since their pointers are different
11967
+ // There is no test for undetectability in strict equality.
11968
+
11969
+ // Get the type of the first operand.
11970
+ // If the first object is a JS object, we have done pointer comparison.
11971
+ Label first_non_object;
11972
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
11973
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
11974
+ __ j(below, &first_non_object);
11975
+
11976
+ // Return non-zero (eax is not zero)
11977
+ Label return_not_equal;
11978
+ STATIC_ASSERT(kHeapObjectTag != 0);
11979
+ __ bind(&return_not_equal);
11980
+ __ ret(0);
11472
11981
 
11473
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
11474
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
11982
+ __ bind(&first_non_object);
11983
+ // Check for oddballs: true, false, null, undefined.
11984
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
11985
+ __ j(equal, &return_not_equal);
11475
11986
 
11476
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
11477
- __ j(greater_equal, &return_not_equal);
11987
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
11988
+ __ j(above_equal, &return_not_equal);
11478
11989
 
11479
- // Check for oddballs: true, false, null, undefined.
11480
- __ cmp(ecx, ODDBALL_TYPE);
11481
- __ j(equal, &return_not_equal);
11990
+ // Check for oddballs: true, false, null, undefined.
11991
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
11992
+ __ j(equal, &return_not_equal);
11482
11993
 
11483
- // Fall through to the general case.
11484
- }
11994
+ // Fall through to the general case.
11485
11995
  __ bind(&slow);
11486
11996
  }
11487
11997
 
11488
- // Push arguments below the return address.
11489
- __ pop(ecx);
11490
- __ push(eax);
11491
- __ push(edx);
11492
- __ push(ecx);
11493
-
11494
11998
  // Generate the number comparison code.
11495
11999
  if (include_number_compare_) {
11496
12000
  Label non_number_comparison;
@@ -11500,7 +12004,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
11500
12004
  CpuFeatures::Scope use_cmov(CMOV);
11501
12005
 
11502
12006
  FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
11503
- __ comisd(xmm0, xmm1);
12007
+ __ ucomisd(xmm0, xmm1);
11504
12008
 
11505
12009
  // Don't base result on EFLAGS when a NaN is involved.
11506
12010
  __ j(parity_even, &unordered, not_taken);
@@ -11510,33 +12014,32 @@ void CompareStub::Generate(MacroAssembler* masm) {
11510
12014
  __ cmov(above, eax, Operand(ecx));
11511
12015
  __ mov(ecx, Immediate(Smi::FromInt(-1)));
11512
12016
  __ cmov(below, eax, Operand(ecx));
11513
- __ ret(2 * kPointerSize);
12017
+ __ ret(0);
11514
12018
  } else {
11515
12019
  FloatingPointHelper::CheckFloatOperands(
11516
12020
  masm, &non_number_comparison, ebx);
11517
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
12021
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
12022
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
11518
12023
  __ FCmp();
11519
12024
 
11520
12025
  // Don't base result on EFLAGS when a NaN is involved.
11521
12026
  __ j(parity_even, &unordered, not_taken);
11522
12027
 
11523
12028
  Label below_label, above_label;
11524
- // Return a result of -1, 0, or 1, based on EFLAGS. In all cases remove
11525
- // two arguments from the stack as they have been pushed in preparation
11526
- // of a possible runtime call.
12029
+ // Return a result of -1, 0, or 1, based on EFLAGS.
11527
12030
  __ j(below, &below_label, not_taken);
11528
12031
  __ j(above, &above_label, not_taken);
11529
12032
 
11530
12033
  __ xor_(eax, Operand(eax));
11531
- __ ret(2 * kPointerSize);
12034
+ __ ret(0);
11532
12035
 
11533
12036
  __ bind(&below_label);
11534
12037
  __ mov(eax, Immediate(Smi::FromInt(-1)));
11535
- __ ret(2 * kPointerSize);
12038
+ __ ret(0);
11536
12039
 
11537
12040
  __ bind(&above_label);
11538
12041
  __ mov(eax, Immediate(Smi::FromInt(1)));
11539
- __ ret(2 * kPointerSize);
12042
+ __ ret(0);
11540
12043
  }
11541
12044
 
11542
12045
  // If one of the numbers was NaN, then the result is always false.
@@ -11548,7 +12051,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
11548
12051
  } else {
11549
12052
  __ mov(eax, Immediate(Smi::FromInt(-1)));
11550
12053
  }
11551
- __ ret(2 * kPointerSize); // eax, edx were pushed
12054
+ __ ret(0);
11552
12055
 
11553
12056
  // The number comparison code did not provide a valid result.
11554
12057
  __ bind(&non_number_comparison);
@@ -11563,12 +12066,13 @@ void CompareStub::Generate(MacroAssembler* masm) {
11563
12066
  // We've already checked for object identity, so if both operands
11564
12067
  // are symbols they aren't equal. Register eax already holds a
11565
12068
  // non-zero value, which indicates not equal, so just return.
11566
- __ ret(2 * kPointerSize);
12069
+ __ ret(0);
11567
12070
  }
11568
12071
 
11569
12072
  __ bind(&check_for_strings);
11570
12073
 
11571
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &call_builtin);
12074
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
12075
+ &check_unequal_objects);
11572
12076
 
11573
12077
  // Inline comparison of ascii strings.
11574
12078
  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@@ -11581,11 +12085,46 @@ void CompareStub::Generate(MacroAssembler* masm) {
11581
12085
  __ Abort("Unexpected fall-through from string comparison");
11582
12086
  #endif
11583
12087
 
11584
- __ bind(&call_builtin);
11585
- // must swap argument order
12088
+ __ bind(&check_unequal_objects);
12089
+ if (cc_ == equal && !strict_) {
12090
+ // Non-strict equality. Objects are unequal if
12091
+ // they are both JSObjects and not undetectable,
12092
+ // and their pointers are different.
12093
+ Label not_both_objects;
12094
+ Label return_unequal;
12095
+ // At most one is a smi, so we can test for smi by adding the two.
12096
+ // A smi plus a heap object has the low bit set, a heap object plus
12097
+ // a heap object has the low bit clear.
12098
+ STATIC_ASSERT(kSmiTag == 0);
12099
+ STATIC_ASSERT(kSmiTagMask == 1);
12100
+ __ lea(ecx, Operand(eax, edx, times_1, 0));
12101
+ __ test(ecx, Immediate(kSmiTagMask));
12102
+ __ j(not_zero, &not_both_objects);
12103
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
12104
+ __ j(below, &not_both_objects);
12105
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
12106
+ __ j(below, &not_both_objects);
12107
+ // We do not bail out after this point. Both are JSObjects, and
12108
+ // they are equal if and only if both are undetectable.
12109
+ // The and of the undetectable flags is 1 if and only if they are equal.
12110
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
12111
+ 1 << Map::kIsUndetectable);
12112
+ __ j(zero, &return_unequal);
12113
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
12114
+ 1 << Map::kIsUndetectable);
12115
+ __ j(zero, &return_unequal);
12116
+ // The objects are both undetectable, so they both compare as the value
12117
+ // undefined, and are equal.
12118
+ __ Set(eax, Immediate(EQUAL));
12119
+ __ bind(&return_unequal);
12120
+ // Return non-equal by returning the non-zero object pointer in eax,
12121
+ // or return equal if we fell through to here.
12122
+ __ ret(0); // rax, rdx were pushed
12123
+ __ bind(&not_both_objects);
12124
+ }
12125
+
12126
+ // Push arguments below the return address.
11586
12127
  __ pop(ecx);
11587
- __ pop(edx);
11588
- __ pop(eax);
11589
12128
  __ push(edx);
11590
12129
  __ push(eax);
11591
12130
 
@@ -11697,16 +12236,16 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
11697
12236
  // eax holds the exception.
11698
12237
 
11699
12238
  // Adjust this code if not the case.
11700
- ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
12239
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
11701
12240
 
11702
12241
  // Drop the sp to the top of the handler.
11703
12242
  ExternalReference handler_address(Top::k_handler_address);
11704
12243
  __ mov(esp, Operand::StaticVariable(handler_address));
11705
12244
 
11706
12245
  // Restore next handler and frame pointer, discard handler state.
11707
- ASSERT(StackHandlerConstants::kNextOffset == 0);
12246
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
11708
12247
  __ pop(Operand::StaticVariable(handler_address));
11709
- ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
12248
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
11710
12249
  __ pop(ebp);
11711
12250
  __ pop(edx); // Remove state.
11712
12251
 
@@ -11720,7 +12259,7 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
11720
12259
  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
11721
12260
  __ bind(&skip);
11722
12261
 
11723
- ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
12262
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
11724
12263
  __ ret(0);
11725
12264
  }
11726
12265
 
@@ -11740,7 +12279,7 @@ void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
11740
12279
  Label prologue;
11741
12280
  Label promote_scheduled_exception;
11742
12281
  __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
11743
- ASSERT_EQ(kArgc, 4);
12282
+ STATIC_ASSERT(kArgc == 4);
11744
12283
  if (kPassHandlesDirectly) {
11745
12284
  // When handles as passed directly we don't have to allocate extra
11746
12285
  // space for and pass an out parameter.
@@ -11855,7 +12394,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
11855
12394
 
11856
12395
  // Check for failure result.
11857
12396
  Label failure_returned;
11858
- ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
12397
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
11859
12398
  __ lea(ecx, Operand(eax, 1));
11860
12399
  // Lower 2 bits of ecx are 0 iff eax has failure tag.
11861
12400
  __ test(ecx, Immediate(kFailureTagMask));
@@ -11870,7 +12409,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
11870
12409
 
11871
12410
  Label retry;
11872
12411
  // If the returned exception is RETRY_AFTER_GC continue at retry label
11873
- ASSERT(Failure::RETRY_AFTER_GC == 0);
12412
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
11874
12413
  __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
11875
12414
  __ j(zero, &retry, taken);
11876
12415
 
@@ -11901,7 +12440,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
11901
12440
  void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
11902
12441
  UncatchableExceptionType type) {
11903
12442
  // Adjust this code if not the case.
11904
- ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
12443
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
11905
12444
 
11906
12445
  // Drop sp to the top stack handler.
11907
12446
  ExternalReference handler_address(Top::k_handler_address);
@@ -11921,7 +12460,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
11921
12460
  __ bind(&done);
11922
12461
 
11923
12462
  // Set the top handler address to next handler past the current ENTRY handler.
11924
- ASSERT(StackHandlerConstants::kNextOffset == 0);
12463
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
11925
12464
  __ pop(Operand::StaticVariable(handler_address));
11926
12465
 
11927
12466
  if (type == OUT_OF_MEMORY) {
@@ -11940,11 +12479,11 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
11940
12479
  __ xor_(esi, Operand(esi));
11941
12480
 
11942
12481
  // Restore fp from handler and discard handler state.
11943
- ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
12482
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
11944
12483
  __ pop(ebp);
11945
12484
  __ pop(edx); // State.
11946
12485
 
11947
- ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
12486
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
11948
12487
  __ ret(0);
11949
12488
  }
11950
12489
 
@@ -12119,28 +12658,42 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
12119
12658
  __ j(zero, &slow, not_taken);
12120
12659
 
12121
12660
  // Check that the left hand is a JS object.
12122
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // eax - object map
12123
- __ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type
12124
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
12125
- __ j(less, &slow, not_taken);
12126
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
12127
- __ j(greater, &slow, not_taken);
12661
+ __ IsObjectJSObjectType(eax, eax, edx, &slow);
12128
12662
 
12129
12663
  // Get the prototype of the function.
12130
12664
  __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
12665
+ // edx is function, eax is map.
12666
+
12667
+ // Look up the function and the map in the instanceof cache.
12668
+ Label miss;
12669
+ ExternalReference roots_address = ExternalReference::roots_address();
12670
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
12671
+ __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
12672
+ __ j(not_equal, &miss);
12673
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
12674
+ __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
12675
+ __ j(not_equal, &miss);
12676
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
12677
+ __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
12678
+ __ ret(2 * kPointerSize);
12679
+
12680
+ __ bind(&miss);
12131
12681
  __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
12132
12682
 
12133
12683
  // Check that the function prototype is a JS object.
12134
12684
  __ test(ebx, Immediate(kSmiTagMask));
12135
12685
  __ j(zero, &slow, not_taken);
12136
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
12137
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
12138
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
12139
- __ j(less, &slow, not_taken);
12140
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
12141
- __ j(greater, &slow, not_taken);
12686
+ __ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
12687
+
12688
+ // Register mapping:
12689
+ // eax is object map.
12690
+ // edx is function.
12691
+ // ebx is function prototype.
12692
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
12693
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
12694
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
12695
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
12142
12696
 
12143
- // Register mapping: eax is object map and ebx is function prototype.
12144
12697
  __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
12145
12698
 
12146
12699
  // Loop through the prototype chain looking for the function prototype.
@@ -12156,10 +12709,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
12156
12709
 
12157
12710
  __ bind(&is_instance);
12158
12711
  __ Set(eax, Immediate(0));
12712
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
12713
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
12159
12714
  __ ret(2 * kPointerSize);
12160
12715
 
12161
12716
  __ bind(&is_not_instance);
12162
12717
  __ Set(eax, Immediate(Smi::FromInt(1)));
12718
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
12719
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
12163
12720
  __ ret(2 * kPointerSize);
12164
12721
 
12165
12722
  // Slow-case: Go through the JavaScript implementation.
@@ -12172,8 +12729,10 @@ int CompareStub::MinorKey() {
12172
12729
  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
12173
12730
  // stubs the never NaN NaN condition is only taken into account if the
12174
12731
  // condition is equals.
12175
- ASSERT(static_cast<unsigned>(cc_) < (1 << 13));
12732
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
12733
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
12176
12734
  return ConditionField::encode(static_cast<unsigned>(cc_))
12735
+ | RegisterField::encode(false) // lhs_ and rhs_ are not used
12177
12736
  | StrictField::encode(strict_)
12178
12737
  | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
12179
12738
  | IncludeNumberCompareField::encode(include_number_compare_);
@@ -12183,6 +12742,8 @@ int CompareStub::MinorKey() {
12183
12742
  // Unfortunately you have to run without snapshots to see most of these
12184
12743
  // names in the profile since most compare stubs end up in the snapshot.
12185
12744
  const char* CompareStub::GetName() {
12745
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
12746
+
12186
12747
  if (name_ != NULL) return name_;
12187
12748
  const int kMaxNameLength = 100;
12188
12749
  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
@@ -12224,152 +12785,205 @@ const char* CompareStub::GetName() {
12224
12785
  }
12225
12786
 
12226
12787
 
12227
- void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
12228
- Register object,
12229
- Register index,
12230
- Register scratch,
12231
- Register result,
12232
- Label* receiver_not_string,
12233
- Label* index_not_smi,
12234
- Label* index_out_of_range,
12235
- Label* slow_case) {
12236
- Label not_a_flat_string;
12237
- Label try_again_with_new_string;
12788
+ // -------------------------------------------------------------------------
12789
+ // StringCharCodeAtGenerator
12790
+
12791
+ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
12792
+ Label flat_string;
12238
12793
  Label ascii_string;
12239
12794
  Label got_char_code;
12240
12795
 
12241
12796
  // If the receiver is a smi trigger the non-string case.
12242
- ASSERT(kSmiTag == 0);
12243
- __ test(object, Immediate(kSmiTagMask));
12244
- __ j(zero, receiver_not_string);
12797
+ STATIC_ASSERT(kSmiTag == 0);
12798
+ __ test(object_, Immediate(kSmiTagMask));
12799
+ __ j(zero, receiver_not_string_);
12245
12800
 
12246
12801
  // Fetch the instance type of the receiver into result register.
12247
- __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
12248
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
12802
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
12803
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
12249
12804
  // If the receiver is not a string trigger the non-string case.
12250
- __ test(result, Immediate(kIsNotStringMask));
12251
- __ j(not_zero, receiver_not_string);
12805
+ __ test(result_, Immediate(kIsNotStringMask));
12806
+ __ j(not_zero, receiver_not_string_);
12252
12807
 
12253
12808
  // If the index is non-smi trigger the non-smi case.
12254
- ASSERT(kSmiTag == 0);
12255
- __ test(index, Immediate(kSmiTagMask));
12256
- __ j(not_zero, index_not_smi);
12809
+ STATIC_ASSERT(kSmiTag == 0);
12810
+ __ test(index_, Immediate(kSmiTagMask));
12811
+ __ j(not_zero, &index_not_smi_);
12257
12812
 
12258
- // Check for index out of range.
12259
- __ cmp(index, FieldOperand(object, String::kLengthOffset));
12260
- __ j(above_equal, index_out_of_range);
12813
+ // Put smi-tagged index into scratch register.
12814
+ __ mov(scratch_, index_);
12815
+ __ bind(&got_smi_index_);
12261
12816
 
12262
- __ bind(&try_again_with_new_string);
12263
- // ----------- S t a t e -------------
12264
- // -- object : string to access
12265
- // -- result : instance type of the string
12266
- // -- scratch : non-negative index < length
12267
- // -----------------------------------
12817
+ // Check for index out of range.
12818
+ __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
12819
+ __ j(above_equal, index_out_of_range_);
12268
12820
 
12269
12821
  // We need special handling for non-flat strings.
12270
- ASSERT(kSeqStringTag == 0);
12271
- __ test(result, Immediate(kStringRepresentationMask));
12272
- __ j(not_zero, &not_a_flat_string);
12273
-
12274
- // Check for 1-byte or 2-byte string.
12275
- ASSERT(kAsciiStringTag != 0);
12276
- __ test(result, Immediate(kStringEncodingMask));
12277
- __ j(not_zero, &ascii_string);
12278
-
12279
- // 2-byte string.
12280
- // Load the 2-byte character code into the result register.
12281
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1); // index is smi (powered by 2).
12282
- __ movzx_w(result, FieldOperand(object,
12283
- index, times_1,
12284
- SeqTwoByteString::kHeaderSize));
12285
- __ jmp(&got_char_code);
12822
+ STATIC_ASSERT(kSeqStringTag == 0);
12823
+ __ test(result_, Immediate(kStringRepresentationMask));
12824
+ __ j(zero, &flat_string);
12286
12825
 
12287
12826
  // Handle non-flat strings.
12288
- __ bind(&not_a_flat_string);
12289
- __ and_(result, kStringRepresentationMask);
12290
- __ cmp(result, kConsStringTag);
12291
- __ j(not_equal, slow_case);
12827
+ __ test(result_, Immediate(kIsConsStringMask));
12828
+ __ j(zero, &call_runtime_);
12292
12829
 
12293
12830
  // ConsString.
12294
12831
  // Check whether the right hand side is the empty string (i.e. if
12295
12832
  // this is really a flat string in a cons string). If that is not
12296
12833
  // the case we would rather go to the runtime system now to flatten
12297
12834
  // the string.
12298
- __ mov(result, FieldOperand(object, ConsString::kSecondOffset));
12299
- __ cmp(Operand(result), Factory::empty_string());
12300
- __ j(not_equal, slow_case);
12835
+ __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
12836
+ Immediate(Factory::empty_string()));
12837
+ __ j(not_equal, &call_runtime_);
12301
12838
  // Get the first of the two strings and load its instance type.
12302
- __ mov(object, FieldOperand(object, ConsString::kFirstOffset));
12303
- __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
12304
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
12305
- __ jmp(&try_again_with_new_string);
12839
+ __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
12840
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
12841
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
12842
+ // If the first cons component is also non-flat, then go to runtime.
12843
+ STATIC_ASSERT(kSeqStringTag == 0);
12844
+ __ test(result_, Immediate(kStringRepresentationMask));
12845
+ __ j(not_zero, &call_runtime_);
12306
12846
 
12307
- // ASCII string.
12308
- __ bind(&ascii_string);
12309
- // Put untagged index into scratch register.
12310
- __ mov(scratch, index);
12311
- __ SmiUntag(scratch);
12847
+ // Check for 1-byte or 2-byte string.
12848
+ __ bind(&flat_string);
12849
+ STATIC_ASSERT(kAsciiStringTag != 0);
12850
+ __ test(result_, Immediate(kStringEncodingMask));
12851
+ __ j(not_zero, &ascii_string);
12312
12852
 
12853
+ // 2-byte string.
12854
+ // Load the 2-byte character code into the result register.
12855
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
12856
+ __ movzx_w(result_, FieldOperand(object_,
12857
+ scratch_, times_1, // Scratch is smi-tagged.
12858
+ SeqTwoByteString::kHeaderSize));
12859
+ __ jmp(&got_char_code);
12860
+
12861
+ // ASCII string.
12313
12862
  // Load the byte into the result register.
12314
- __ movzx_b(result, FieldOperand(object,
12315
- scratch, times_1,
12316
- SeqAsciiString::kHeaderSize));
12863
+ __ bind(&ascii_string);
12864
+ __ SmiUntag(scratch_);
12865
+ __ movzx_b(result_, FieldOperand(object_,
12866
+ scratch_, times_1,
12867
+ SeqAsciiString::kHeaderSize));
12317
12868
  __ bind(&got_char_code);
12318
- __ SmiTag(result);
12869
+ __ SmiTag(result_);
12870
+ __ bind(&exit_);
12319
12871
  }
12320
12872
 
12321
12873
 
12322
- void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
12323
- Register code,
12324
- Register result,
12325
- InvokeFlag flag) {
12326
- ASSERT(!code.is(result));
12874
+ void StringCharCodeAtGenerator::GenerateSlow(
12875
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
12876
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
12327
12877
 
12328
- Label slow_case;
12329
- Label exit;
12878
+ // Index is not a smi.
12879
+ __ bind(&index_not_smi_);
12880
+ // If index is a heap number, try converting it to an integer.
12881
+ __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
12882
+ call_helper.BeforeCall(masm);
12883
+ __ push(object_);
12884
+ __ push(index_);
12885
+ __ push(index_); // Consumed by runtime conversion function.
12886
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
12887
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
12888
+ } else {
12889
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
12890
+ // NumberToSmi discards numbers that are not exact integers.
12891
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
12892
+ }
12893
+ if (!scratch_.is(eax)) {
12894
+ // Save the conversion result before the pop instructions below
12895
+ // have a chance to overwrite it.
12896
+ __ mov(scratch_, eax);
12897
+ }
12898
+ __ pop(index_);
12899
+ __ pop(object_);
12900
+ // Reload the instance type.
12901
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
12902
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
12903
+ call_helper.AfterCall(masm);
12904
+ // If index is still not a smi, it must be out of range.
12905
+ STATIC_ASSERT(kSmiTag == 0);
12906
+ __ test(scratch_, Immediate(kSmiTagMask));
12907
+ __ j(not_zero, index_out_of_range_);
12908
+ // Otherwise, return to the fast path.
12909
+ __ jmp(&got_smi_index_);
12910
+
12911
+ // Call runtime. We get here when the receiver is a string and the
12912
+ // index is a number, but the code of getting the actual character
12913
+ // is too complex (e.g., when the string needs to be flattened).
12914
+ __ bind(&call_runtime_);
12915
+ call_helper.BeforeCall(masm);
12916
+ __ push(object_);
12917
+ __ push(index_);
12918
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
12919
+ if (!result_.is(eax)) {
12920
+ __ mov(result_, eax);
12921
+ }
12922
+ call_helper.AfterCall(masm);
12923
+ __ jmp(&exit_);
12330
12924
 
12925
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
12926
+ }
12927
+
12928
+
12929
+ // -------------------------------------------------------------------------
12930
+ // StringCharFromCodeGenerator
12931
+
12932
+ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
12331
12933
  // Fast case of Heap::LookupSingleCharacterStringFromCode.
12332
- ASSERT(kSmiTag == 0);
12333
- ASSERT(kSmiShiftSize == 0);
12934
+ STATIC_ASSERT(kSmiTag == 0);
12935
+ STATIC_ASSERT(kSmiShiftSize == 0);
12334
12936
  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
12335
- __ test(code,
12937
+ __ test(code_,
12336
12938
  Immediate(kSmiTagMask |
12337
12939
  ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
12338
- __ j(not_zero, &slow_case, not_taken);
12940
+ __ j(not_zero, &slow_case_, not_taken);
12339
12941
 
12340
- __ Set(result, Immediate(Factory::single_character_string_cache()));
12341
- ASSERT(kSmiTag == 0);
12342
- ASSERT(kSmiTagSize == 1);
12343
- ASSERT(kSmiShiftSize == 0);
12942
+ __ Set(result_, Immediate(Factory::single_character_string_cache()));
12943
+ STATIC_ASSERT(kSmiTag == 0);
12944
+ STATIC_ASSERT(kSmiTagSize == 1);
12945
+ STATIC_ASSERT(kSmiShiftSize == 0);
12344
12946
  // At this point code register contains smi tagged ascii char code.
12345
- __ mov(result, FieldOperand(result,
12346
- code, times_half_pointer_size,
12347
- FixedArray::kHeaderSize));
12348
- __ cmp(result, Factory::undefined_value());
12349
- __ j(equal, &slow_case, not_taken);
12350
- __ jmp(&exit);
12947
+ __ mov(result_, FieldOperand(result_,
12948
+ code_, times_half_pointer_size,
12949
+ FixedArray::kHeaderSize));
12950
+ __ cmp(result_, Factory::undefined_value());
12951
+ __ j(equal, &slow_case_, not_taken);
12952
+ __ bind(&exit_);
12953
+ }
12351
12954
 
12352
- __ bind(&slow_case);
12353
- if (flag == CALL_FUNCTION) {
12354
- __ push(code);
12355
- __ CallRuntime(Runtime::kCharFromCode, 1);
12356
- if (!result.is(eax)) {
12357
- __ mov(result, eax);
12358
- }
12359
- } else {
12360
- ASSERT(flag == JUMP_FUNCTION);
12361
- ASSERT(result.is(eax));
12362
- __ pop(eax); // Save return address.
12363
- __ push(code);
12364
- __ push(eax); // Restore return address.
12365
- __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
12366
- }
12367
12955
 
12368
- __ bind(&exit);
12369
- if (flag == JUMP_FUNCTION) {
12370
- ASSERT(result.is(eax));
12371
- __ ret(0);
12956
+ void StringCharFromCodeGenerator::GenerateSlow(
12957
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
12958
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
12959
+
12960
+ __ bind(&slow_case_);
12961
+ call_helper.BeforeCall(masm);
12962
+ __ push(code_);
12963
+ __ CallRuntime(Runtime::kCharFromCode, 1);
12964
+ if (!result_.is(eax)) {
12965
+ __ mov(result_, eax);
12372
12966
  }
12967
+ call_helper.AfterCall(masm);
12968
+ __ jmp(&exit_);
12969
+
12970
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
12971
+ }
12972
+
12973
+
12974
+ // -------------------------------------------------------------------------
12975
+ // StringCharAtGenerator
12976
+
12977
+ void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
12978
+ char_code_at_generator_.GenerateFast(masm);
12979
+ char_from_code_generator_.GenerateFast(masm);
12980
+ }
12981
+
12982
+
12983
+ void StringCharAtGenerator::GenerateSlow(
12984
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
12985
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
12986
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
12373
12987
  }
12374
12988
 
12375
12989
 
@@ -12400,7 +13014,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
12400
13014
  // Check if either of the strings are empty. In that case return the other.
12401
13015
  Label second_not_zero_length, both_not_zero_length;
12402
13016
  __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
12403
- ASSERT(kSmiTag == 0);
13017
+ STATIC_ASSERT(kSmiTag == 0);
12404
13018
  __ test(ecx, Operand(ecx));
12405
13019
  __ j(not_zero, &second_not_zero_length);
12406
13020
  // Second string is empty, result is first string which is already in eax.
@@ -12408,7 +13022,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
12408
13022
  __ ret(2 * kPointerSize);
12409
13023
  __ bind(&second_not_zero_length);
12410
13024
  __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
12411
- ASSERT(kSmiTag == 0);
13025
+ STATIC_ASSERT(kSmiTag == 0);
12412
13026
  __ test(ebx, Operand(ebx));
12413
13027
  __ j(not_zero, &both_not_zero_length);
12414
13028
  // First string is empty, result is second string which is in edx.
@@ -12425,7 +13039,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
12425
13039
  Label string_add_flat_result, longer_than_two;
12426
13040
  __ bind(&both_not_zero_length);
12427
13041
  __ add(ebx, Operand(ecx));
12428
- ASSERT(Smi::kMaxValue == String::kMaxLength);
13042
+ STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
12429
13043
  // Handle exceptionally long strings in the runtime system.
12430
13044
  __ j(overflow, &string_add_runtime);
12431
13045
  // Use the runtime system when adding two one character strings, as it
@@ -12460,15 +13074,16 @@ void StringAddStub::Generate(MacroAssembler* masm) {
12460
13074
 
12461
13075
  // If result is not supposed to be flat allocate a cons string object. If both
12462
13076
  // strings are ascii the result is an ascii cons string.
12463
- Label non_ascii, allocated;
13077
+ Label non_ascii, allocated, ascii_data;
12464
13078
  __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
12465
13079
  __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
12466
13080
  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
12467
13081
  __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
12468
13082
  __ and_(ecx, Operand(edi));
12469
- ASSERT(kStringEncodingMask == kAsciiStringTag);
13083
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
12470
13084
  __ test(ecx, Immediate(kAsciiStringTag));
12471
13085
  __ j(zero, &non_ascii);
13086
+ __ bind(&ascii_data);
12472
13087
  // Allocate an acsii cons string.
12473
13088
  __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
12474
13089
  __ bind(&allocated);
@@ -12483,6 +13098,19 @@ void StringAddStub::Generate(MacroAssembler* masm) {
12483
13098
  __ IncrementCounter(&Counters::string_add_native, 1);
12484
13099
  __ ret(2 * kPointerSize);
12485
13100
  __ bind(&non_ascii);
13101
+ // At least one of the strings is two-byte. Check whether it happens
13102
+ // to contain only ascii characters.
13103
+ // ecx: first instance type AND second instance type.
13104
+ // edi: second instance type.
13105
+ __ test(ecx, Immediate(kAsciiDataHintMask));
13106
+ __ j(not_zero, &ascii_data);
13107
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
13108
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
13109
+ __ xor_(edi, Operand(ecx));
13110
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
13111
+ __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
13112
+ __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
13113
+ __ j(equal, &ascii_data);
12486
13114
  // Allocate a two byte cons string.
12487
13115
  __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
12488
13116
  __ jmp(&allocated);
@@ -12508,18 +13136,16 @@ void StringAddStub::Generate(MacroAssembler* masm) {
12508
13136
  // ebx: length of resulting flat string as a smi
12509
13137
  // edx: second string
12510
13138
  Label non_ascii_string_add_flat_result;
13139
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
12511
13140
  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
12512
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
12513
- ASSERT(kStringEncodingMask == kAsciiStringTag);
12514
- __ test(ecx, Immediate(kAsciiStringTag));
13141
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
12515
13142
  __ j(zero, &non_ascii_string_add_flat_result);
12516
13143
  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
12517
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
12518
- __ test(ecx, Immediate(kAsciiStringTag));
13144
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
12519
13145
  __ j(zero, &string_add_runtime);
12520
13146
 
12521
13147
  __ bind(&make_flat_ascii_string);
12522
- // Both strings are ascii strings. As they are short they are both flat.
13148
+ // Both strings are ascii strings. As they are short they are both flat.
12523
13149
  // ebx: length of resulting flat string as a smi
12524
13150
  __ SmiUntag(ebx);
12525
13151
  __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
@@ -12556,8 +13182,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
12556
13182
  // edx: second string
12557
13183
  __ bind(&non_ascii_string_add_flat_result);
12558
13184
  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
12559
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
12560
- __ and_(ecx, kAsciiStringTag);
13185
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
12561
13186
  __ j(not_zero, &string_add_runtime);
12562
13187
  // Both strings are two byte strings. As they are short they are both
12563
13188
  // flat.
@@ -12630,9 +13255,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
12630
13255
  Register count,
12631
13256
  Register scratch,
12632
13257
  bool ascii) {
12633
- // Copy characters using rep movs of doublewords. Align destination on 4 byte
12634
- // boundary before starting rep movs. Copy remaining characters after running
12635
- // rep movs.
13258
+ // Copy characters using rep movs of doublewords.
13259
+ // The destination is aligned on a 4 byte boundary because we are
13260
+ // copying to the beginning of a newly allocated string.
12636
13261
  ASSERT(dest.is(edi)); // rep movs destination
12637
13262
  ASSERT(src.is(esi)); // rep movs source
12638
13263
  ASSERT(count.is(ecx)); // rep movs count
@@ -12753,9 +13378,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
12753
13378
  }
12754
13379
  __ and_(scratch, Operand(mask));
12755
13380
 
12756
- // Load the entry from the symble table.
13381
+ // Load the entry from the symbol table.
12757
13382
  Register candidate = scratch; // Scratch register contains candidate.
12758
- ASSERT_EQ(1, SymbolTable::kEntrySize);
13383
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
12759
13384
  __ mov(candidate,
12760
13385
  FieldOperand(symbol_table,
12761
13386
  scratch,
@@ -12798,7 +13423,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
12798
13423
  // Scratch register contains result when we fall through to here.
12799
13424
  Register result = scratch;
12800
13425
  __ bind(&found_in_symbol_table);
12801
- __ pop(mask); // Pop temporally saved mask from the stack.
13426
+ __ pop(mask); // Pop saved mask from the stack.
12802
13427
  if (!result.is(eax)) {
12803
13428
  __ mov(eax, result);
12804
13429
  }
@@ -12873,7 +13498,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
12873
13498
 
12874
13499
  // Make sure first argument is a string.
12875
13500
  __ mov(eax, Operand(esp, 3 * kPointerSize));
12876
- ASSERT_EQ(0, kSmiTag);
13501
+ STATIC_ASSERT(kSmiTag == 0);
12877
13502
  __ test(eax, Immediate(kSmiTagMask));
12878
13503
  __ j(zero, &runtime);
12879
13504
  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
@@ -12881,6 +13506,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
12881
13506
 
12882
13507
  // eax: string
12883
13508
  // ebx: instance type
13509
+
12884
13510
  // Calculate length of sub string using the smi values.
12885
13511
  Label result_longer_than_two;
12886
13512
  __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
@@ -12890,6 +13516,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
12890
13516
  __ test(edx, Immediate(kSmiTagMask));
12891
13517
  __ j(not_zero, &runtime);
12892
13518
  __ sub(ecx, Operand(edx));
13519
+ __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
13520
+ Label return_eax;
13521
+ __ j(equal, &return_eax);
12893
13522
  // Special handling of sub-strings of length 1 and 2. One character strings
12894
13523
  // are handled in the runtime system (looked up in the single character
12895
13524
  // cache). Two character strings are looked for in the symbol cache.
@@ -12983,8 +13612,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
12983
13612
  __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
12984
13613
  // As from is a smi it is 2 times the value which matches the size of a two
12985
13614
  // byte character.
12986
- ASSERT_EQ(0, kSmiTag);
12987
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
13615
+ STATIC_ASSERT(kSmiTag == 0);
13616
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
12988
13617
  __ add(esi, Operand(ebx));
12989
13618
 
12990
13619
  // eax: result string
@@ -12994,6 +13623,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
12994
13623
  // esi: character of sub string start
12995
13624
  StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
12996
13625
  __ mov(esi, edx); // Restore esi.
13626
+
13627
+ __ bind(&return_eax);
12997
13628
  __ IncrementCounter(&Counters::sub_string_native, 1);
12998
13629
  __ ret(3 * kPointerSize);
12999
13630
 
@@ -13068,22 +13699,22 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
13068
13699
  __ j(not_zero, &result_not_equal);
13069
13700
 
13070
13701
  // Result is EQUAL.
13071
- ASSERT_EQ(0, EQUAL);
13072
- ASSERT_EQ(0, kSmiTag);
13702
+ STATIC_ASSERT(EQUAL == 0);
13703
+ STATIC_ASSERT(kSmiTag == 0);
13073
13704
  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
13074
- __ ret(2 * kPointerSize);
13705
+ __ ret(0);
13075
13706
 
13076
13707
  __ bind(&result_not_equal);
13077
13708
  __ j(greater, &result_greater);
13078
13709
 
13079
13710
  // Result is LESS.
13080
13711
  __ Set(eax, Immediate(Smi::FromInt(LESS)));
13081
- __ ret(2 * kPointerSize);
13712
+ __ ret(0);
13082
13713
 
13083
13714
  // Result is GREATER.
13084
13715
  __ bind(&result_greater);
13085
13716
  __ Set(eax, Immediate(Smi::FromInt(GREATER)));
13086
- __ ret(2 * kPointerSize);
13717
+ __ ret(0);
13087
13718
  }
13088
13719
 
13089
13720
 
@@ -13101,8 +13732,8 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
13101
13732
  Label not_same;
13102
13733
  __ cmp(edx, Operand(eax));
13103
13734
  __ j(not_equal, &not_same);
13104
- ASSERT_EQ(0, EQUAL);
13105
- ASSERT_EQ(0, kSmiTag);
13735
+ STATIC_ASSERT(EQUAL == 0);
13736
+ STATIC_ASSERT(kSmiTag == 0);
13106
13737
  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
13107
13738
  __ IncrementCounter(&Counters::string_compare_native, 1);
13108
13739
  __ ret(2 * kPointerSize);
@@ -13113,6 +13744,10 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
13113
13744
  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
13114
13745
 
13115
13746
  // Compare flat ascii strings.
13747
+ // Drop arguments from the stack.
13748
+ __ pop(ecx);
13749
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
13750
+ __ push(ecx);
13116
13751
  GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
13117
13752
 
13118
13753
  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
@@ -13123,4 +13758,211 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
13123
13758
 
13124
13759
  #undef __
13125
13760
 
13761
+ #define __ masm.
13762
+
13763
+ MemCopyFunction CreateMemCopyFunction() {
13764
+ size_t actual_size;
13765
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
13766
+ &actual_size,
13767
+ true));
13768
+ CHECK(buffer);
13769
+ HandleScope handles;
13770
+ MacroAssembler masm(buffer, static_cast<int>(actual_size));
13771
+
13772
+ // Generated code is put into a fixed, unmovable, buffer, and not into
13773
+ // the V8 heap. We can't, and don't, refer to any relocatable addresses
13774
+ // (e.g. the JavaScript nan-object).
13775
+
13776
+ // 32-bit C declaration function calls pass arguments on stack.
13777
+
13778
+ // Stack layout:
13779
+ // esp[12]: Third argument, size.
13780
+ // esp[8]: Second argument, source pointer.
13781
+ // esp[4]: First argument, destination pointer.
13782
+ // esp[0]: return address
13783
+
13784
+ const int kDestinationOffset = 1 * kPointerSize;
13785
+ const int kSourceOffset = 2 * kPointerSize;
13786
+ const int kSizeOffset = 3 * kPointerSize;
13787
+
13788
+ int stack_offset = 0; // Update if we change the stack height.
13789
+
13790
+ if (FLAG_debug_code) {
13791
+ __ cmp(Operand(esp, kSizeOffset + stack_offset),
13792
+ Immediate(kMinComplexMemCopy));
13793
+ Label ok;
13794
+ __ j(greater_equal, &ok);
13795
+ __ int3();
13796
+ __ bind(&ok);
13797
+ }
13798
+ if (CpuFeatures::IsSupported(SSE2)) {
13799
+ CpuFeatures::Scope enable(SSE2);
13800
+ __ push(edi);
13801
+ __ push(esi);
13802
+ stack_offset += 2 * kPointerSize;
13803
+ Register dst = edi;
13804
+ Register src = esi;
13805
+ Register count = ecx;
13806
+ __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
13807
+ __ mov(src, Operand(esp, stack_offset + kSourceOffset));
13808
+ __ mov(count, Operand(esp, stack_offset + kSizeOffset));
13809
+
13810
+
13811
+ __ movdqu(xmm0, Operand(src, 0));
13812
+ __ movdqu(Operand(dst, 0), xmm0);
13813
+ __ mov(edx, dst);
13814
+ __ and_(edx, 0xF);
13815
+ __ neg(edx);
13816
+ __ add(Operand(edx), Immediate(16));
13817
+ __ add(dst, Operand(edx));
13818
+ __ add(src, Operand(edx));
13819
+ __ sub(Operand(count), edx);
13820
+
13821
+ // edi is now aligned. Check if esi is also aligned.
13822
+ Label unaligned_source;
13823
+ __ test(Operand(src), Immediate(0x0F));
13824
+ __ j(not_zero, &unaligned_source);
13825
+ {
13826
+ __ IncrementCounter(&Counters::memcopy_aligned, 1);
13827
+ // Copy loop for aligned source and destination.
13828
+ __ mov(edx, count);
13829
+ Register loop_count = ecx;
13830
+ Register count = edx;
13831
+ __ shr(loop_count, 5);
13832
+ {
13833
+ // Main copy loop.
13834
+ Label loop;
13835
+ __ bind(&loop);
13836
+ __ prefetch(Operand(src, 0x20), 1);
13837
+ __ movdqa(xmm0, Operand(src, 0x00));
13838
+ __ movdqa(xmm1, Operand(src, 0x10));
13839
+ __ add(Operand(src), Immediate(0x20));
13840
+
13841
+ __ movdqa(Operand(dst, 0x00), xmm0);
13842
+ __ movdqa(Operand(dst, 0x10), xmm1);
13843
+ __ add(Operand(dst), Immediate(0x20));
13844
+
13845
+ __ dec(loop_count);
13846
+ __ j(not_zero, &loop);
13847
+ }
13848
+
13849
+ // At most 31 bytes to copy.
13850
+ Label move_less_16;
13851
+ __ test(Operand(count), Immediate(0x10));
13852
+ __ j(zero, &move_less_16);
13853
+ __ movdqa(xmm0, Operand(src, 0));
13854
+ __ add(Operand(src), Immediate(0x10));
13855
+ __ movdqa(Operand(dst, 0), xmm0);
13856
+ __ add(Operand(dst), Immediate(0x10));
13857
+ __ bind(&move_less_16);
13858
+
13859
+ // At most 15 bytes to copy. Copy 16 bytes at end of string.
13860
+ __ and_(count, 0xF);
13861
+ __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
13862
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
13863
+
13864
+ __ pop(esi);
13865
+ __ pop(edi);
13866
+ __ ret(0);
13867
+ }
13868
+ __ Align(16);
13869
+ {
13870
+ // Copy loop for unaligned source and aligned destination.
13871
+ // If source is not aligned, we can't read it as efficiently.
13872
+ __ bind(&unaligned_source);
13873
+ __ IncrementCounter(&Counters::memcopy_unaligned, 1);
13874
+ __ mov(edx, ecx);
13875
+ Register loop_count = ecx;
13876
+ Register count = edx;
13877
+ __ shr(loop_count, 5);
13878
+ {
13879
+ // Main copy loop
13880
+ Label loop;
13881
+ __ bind(&loop);
13882
+ __ prefetch(Operand(src, 0x20), 1);
13883
+ __ movdqu(xmm0, Operand(src, 0x00));
13884
+ __ movdqu(xmm1, Operand(src, 0x10));
13885
+ __ add(Operand(src), Immediate(0x20));
13886
+
13887
+ __ movdqa(Operand(dst, 0x00), xmm0);
13888
+ __ movdqa(Operand(dst, 0x10), xmm1);
13889
+ __ add(Operand(dst), Immediate(0x20));
13890
+
13891
+ __ dec(loop_count);
13892
+ __ j(not_zero, &loop);
13893
+ }
13894
+
13895
+ // At most 31 bytes to copy.
13896
+ Label move_less_16;
13897
+ __ test(Operand(count), Immediate(0x10));
13898
+ __ j(zero, &move_less_16);
13899
+ __ movdqu(xmm0, Operand(src, 0));
13900
+ __ add(Operand(src), Immediate(0x10));
13901
+ __ movdqa(Operand(dst, 0), xmm0);
13902
+ __ add(Operand(dst), Immediate(0x10));
13903
+ __ bind(&move_less_16);
13904
+
13905
+ // At most 15 bytes to copy. Copy 16 bytes at end of string.
13906
+ __ and_(count, 0x0F);
13907
+ __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
13908
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
13909
+
13910
+ __ pop(esi);
13911
+ __ pop(edi);
13912
+ __ ret(0);
13913
+ }
13914
+
13915
+ } else {
13916
+ __ IncrementCounter(&Counters::memcopy_noxmm, 1);
13917
+ // SSE2 not supported. Unlikely to happen in practice.
13918
+ __ push(edi);
13919
+ __ push(esi);
13920
+ stack_offset += 2 * kPointerSize;
13921
+ __ cld();
13922
+ Register dst = edi;
13923
+ Register src = esi;
13924
+ Register count = ecx;
13925
+ __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
13926
+ __ mov(src, Operand(esp, stack_offset + kSourceOffset));
13927
+ __ mov(count, Operand(esp, stack_offset + kSizeOffset));
13928
+
13929
+ // Copy the first word.
13930
+ __ mov(eax, Operand(src, 0));
13931
+ __ mov(Operand(dst, 0), eax);
13932
+
13933
+ // Increment src,dstso that dst is aligned.
13934
+ __ mov(edx, dst);
13935
+ __ and_(edx, 0x03);
13936
+ __ neg(edx);
13937
+ __ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3)
13938
+ __ add(dst, Operand(edx));
13939
+ __ add(src, Operand(edx));
13940
+ __ sub(Operand(count), edx);
13941
+ // edi is now aligned, ecx holds number of remaning bytes to copy.
13942
+
13943
+ __ mov(edx, count);
13944
+ count = edx;
13945
+ __ shr(ecx, 2); // Make word count instead of byte count.
13946
+ __ rep_movs();
13947
+
13948
+ // At most 3 bytes left to copy. Copy 4 bytes at end of string.
13949
+ __ and_(count, 3);
13950
+ __ mov(eax, Operand(src, count, times_1, -4));
13951
+ __ mov(Operand(dst, count, times_1, -4), eax);
13952
+
13953
+ __ pop(esi);
13954
+ __ pop(edi);
13955
+ __ ret(0);
13956
+ }
13957
+
13958
+ CodeDesc desc;
13959
+ masm.GetCode(&desc);
13960
+ // Call the function from C++.
13961
+ return FUNCTION_CAST<MemCopyFunction>(buffer);
13962
+ }
13963
+
13964
+ #undef __
13965
+
13126
13966
  } } // namespace v8::internal
13967
+
13968
+ #endif // V8_TARGET_ARCH_IA32