libv8 3.10.8.0 → 3.11.8.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (215) hide show
  1. data/Rakefile +10 -3
  2. data/ext/libv8/compiler.rb +46 -0
  3. data/ext/libv8/extconf.rb +5 -1
  4. data/ext/libv8/make.rb +13 -0
  5. data/lib/libv8/version.rb +1 -1
  6. data/patches/add-freebsd9-and-freebsd10-to-gyp-GetFlavor.patch +11 -0
  7. data/patches/src_platform-freebsd.cc.patch +10 -0
  8. data/vendor/v8/ChangeLog +124 -0
  9. data/vendor/v8/DEPS +27 -0
  10. data/vendor/v8/Makefile +7 -0
  11. data/vendor/v8/SConstruct +15 -2
  12. data/vendor/v8/build/common.gypi +129 -157
  13. data/vendor/v8/build/gyp_v8 +11 -25
  14. data/vendor/v8/build/standalone.gypi +9 -3
  15. data/vendor/v8/include/v8.h +5 -3
  16. data/vendor/v8/src/SConscript +1 -0
  17. data/vendor/v8/src/api.cc +4 -33
  18. data/vendor/v8/src/api.h +2 -2
  19. data/vendor/v8/src/arm/builtins-arm.cc +5 -4
  20. data/vendor/v8/src/arm/code-stubs-arm.cc +21 -14
  21. data/vendor/v8/src/arm/codegen-arm.cc +2 -2
  22. data/vendor/v8/src/arm/debug-arm.cc +3 -1
  23. data/vendor/v8/src/arm/full-codegen-arm.cc +3 -102
  24. data/vendor/v8/src/arm/ic-arm.cc +30 -33
  25. data/vendor/v8/src/arm/lithium-arm.cc +20 -7
  26. data/vendor/v8/src/arm/lithium-arm.h +10 -4
  27. data/vendor/v8/src/arm/lithium-codegen-arm.cc +106 -60
  28. data/vendor/v8/src/arm/macro-assembler-arm.cc +49 -39
  29. data/vendor/v8/src/arm/macro-assembler-arm.h +5 -4
  30. data/vendor/v8/src/arm/regexp-macro-assembler-arm.cc +115 -55
  31. data/vendor/v8/src/arm/regexp-macro-assembler-arm.h +7 -6
  32. data/vendor/v8/src/arm/simulator-arm.h +6 -6
  33. data/vendor/v8/src/arm/stub-cache-arm.cc +64 -19
  34. data/vendor/v8/src/array.js +7 -3
  35. data/vendor/v8/src/ast.cc +11 -6
  36. data/vendor/v8/src/bootstrapper.cc +9 -11
  37. data/vendor/v8/src/builtins.cc +61 -31
  38. data/vendor/v8/src/code-stubs.cc +23 -9
  39. data/vendor/v8/src/code-stubs.h +1 -0
  40. data/vendor/v8/src/codegen.h +3 -3
  41. data/vendor/v8/src/compiler.cc +1 -1
  42. data/vendor/v8/src/contexts.h +2 -18
  43. data/vendor/v8/src/d8.cc +94 -93
  44. data/vendor/v8/src/d8.h +1 -1
  45. data/vendor/v8/src/debug-agent.cc +3 -3
  46. data/vendor/v8/src/debug.cc +41 -1
  47. data/vendor/v8/src/debug.h +50 -0
  48. data/vendor/v8/src/elements-kind.cc +134 -0
  49. data/vendor/v8/src/elements-kind.h +210 -0
  50. data/vendor/v8/src/elements.cc +356 -190
  51. data/vendor/v8/src/elements.h +36 -28
  52. data/vendor/v8/src/factory.cc +44 -4
  53. data/vendor/v8/src/factory.h +11 -7
  54. data/vendor/v8/src/flag-definitions.h +3 -0
  55. data/vendor/v8/src/frames.h +3 -0
  56. data/vendor/v8/src/full-codegen.cc +2 -1
  57. data/vendor/v8/src/func-name-inferrer.h +2 -0
  58. data/vendor/v8/src/globals.h +3 -0
  59. data/vendor/v8/src/heap-inl.h +16 -4
  60. data/vendor/v8/src/heap.cc +38 -32
  61. data/vendor/v8/src/heap.h +3 -17
  62. data/vendor/v8/src/hydrogen-instructions.cc +28 -5
  63. data/vendor/v8/src/hydrogen-instructions.h +142 -44
  64. data/vendor/v8/src/hydrogen.cc +160 -55
  65. data/vendor/v8/src/hydrogen.h +2 -0
  66. data/vendor/v8/src/ia32/assembler-ia32.h +3 -0
  67. data/vendor/v8/src/ia32/builtins-ia32.cc +5 -4
  68. data/vendor/v8/src/ia32/code-stubs-ia32.cc +22 -16
  69. data/vendor/v8/src/ia32/codegen-ia32.cc +2 -2
  70. data/vendor/v8/src/ia32/debug-ia32.cc +29 -2
  71. data/vendor/v8/src/ia32/full-codegen-ia32.cc +8 -101
  72. data/vendor/v8/src/ia32/ic-ia32.cc +23 -19
  73. data/vendor/v8/src/ia32/lithium-codegen-ia32.cc +126 -80
  74. data/vendor/v8/src/ia32/lithium-codegen-ia32.h +2 -1
  75. data/vendor/v8/src/ia32/lithium-ia32.cc +15 -9
  76. data/vendor/v8/src/ia32/lithium-ia32.h +14 -6
  77. data/vendor/v8/src/ia32/macro-assembler-ia32.cc +50 -40
  78. data/vendor/v8/src/ia32/macro-assembler-ia32.h +5 -4
  79. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.cc +113 -43
  80. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.h +9 -4
  81. data/vendor/v8/src/ia32/simulator-ia32.h +4 -4
  82. data/vendor/v8/src/ia32/stub-cache-ia32.cc +52 -14
  83. data/vendor/v8/src/ic.cc +77 -20
  84. data/vendor/v8/src/ic.h +18 -2
  85. data/vendor/v8/src/incremental-marking-inl.h +21 -5
  86. data/vendor/v8/src/incremental-marking.cc +35 -8
  87. data/vendor/v8/src/incremental-marking.h +12 -3
  88. data/vendor/v8/src/isolate.cc +12 -2
  89. data/vendor/v8/src/isolate.h +1 -1
  90. data/vendor/v8/src/jsregexp.cc +66 -26
  91. data/vendor/v8/src/jsregexp.h +60 -31
  92. data/vendor/v8/src/list-inl.h +8 -0
  93. data/vendor/v8/src/list.h +3 -0
  94. data/vendor/v8/src/lithium.cc +5 -2
  95. data/vendor/v8/src/liveedit.cc +57 -5
  96. data/vendor/v8/src/mark-compact-inl.h +17 -11
  97. data/vendor/v8/src/mark-compact.cc +100 -143
  98. data/vendor/v8/src/mark-compact.h +44 -20
  99. data/vendor/v8/src/messages.js +131 -99
  100. data/vendor/v8/src/mips/builtins-mips.cc +5 -4
  101. data/vendor/v8/src/mips/code-stubs-mips.cc +23 -15
  102. data/vendor/v8/src/mips/codegen-mips.cc +2 -2
  103. data/vendor/v8/src/mips/debug-mips.cc +3 -1
  104. data/vendor/v8/src/mips/full-codegen-mips.cc +4 -102
  105. data/vendor/v8/src/mips/ic-mips.cc +34 -36
  106. data/vendor/v8/src/mips/lithium-codegen-mips.cc +116 -68
  107. data/vendor/v8/src/mips/lithium-mips.cc +20 -7
  108. data/vendor/v8/src/mips/lithium-mips.h +11 -4
  109. data/vendor/v8/src/mips/macro-assembler-mips.cc +50 -39
  110. data/vendor/v8/src/mips/macro-assembler-mips.h +5 -4
  111. data/vendor/v8/src/mips/regexp-macro-assembler-mips.cc +110 -50
  112. data/vendor/v8/src/mips/regexp-macro-assembler-mips.h +6 -5
  113. data/vendor/v8/src/mips/simulator-mips.h +5 -5
  114. data/vendor/v8/src/mips/stub-cache-mips.cc +66 -20
  115. data/vendor/v8/src/mksnapshot.cc +5 -1
  116. data/vendor/v8/src/objects-debug.cc +103 -6
  117. data/vendor/v8/src/objects-inl.h +215 -116
  118. data/vendor/v8/src/objects-printer.cc +13 -8
  119. data/vendor/v8/src/objects.cc +608 -331
  120. data/vendor/v8/src/objects.h +129 -94
  121. data/vendor/v8/src/parser.cc +16 -4
  122. data/vendor/v8/src/platform-freebsd.cc +1 -0
  123. data/vendor/v8/src/platform-linux.cc +9 -30
  124. data/vendor/v8/src/platform-posix.cc +28 -7
  125. data/vendor/v8/src/platform-win32.cc +15 -3
  126. data/vendor/v8/src/platform.h +2 -1
  127. data/vendor/v8/src/profile-generator-inl.h +25 -2
  128. data/vendor/v8/src/profile-generator.cc +300 -822
  129. data/vendor/v8/src/profile-generator.h +97 -214
  130. data/vendor/v8/src/regexp-macro-assembler-irregexp.cc +2 -1
  131. data/vendor/v8/src/regexp-macro-assembler-irregexp.h +2 -2
  132. data/vendor/v8/src/regexp-macro-assembler-tracer.cc +6 -5
  133. data/vendor/v8/src/regexp-macro-assembler-tracer.h +1 -1
  134. data/vendor/v8/src/regexp-macro-assembler.cc +7 -3
  135. data/vendor/v8/src/regexp-macro-assembler.h +10 -2
  136. data/vendor/v8/src/regexp.js +6 -0
  137. data/vendor/v8/src/runtime.cc +265 -212
  138. data/vendor/v8/src/runtime.h +6 -5
  139. data/vendor/v8/src/scopes.cc +20 -0
  140. data/vendor/v8/src/scopes.h +6 -3
  141. data/vendor/v8/src/spaces.cc +0 -2
  142. data/vendor/v8/src/string-stream.cc +2 -2
  143. data/vendor/v8/src/v8-counters.h +0 -2
  144. data/vendor/v8/src/v8natives.js +2 -2
  145. data/vendor/v8/src/v8utils.h +6 -3
  146. data/vendor/v8/src/version.cc +1 -1
  147. data/vendor/v8/src/x64/assembler-x64.h +2 -1
  148. data/vendor/v8/src/x64/builtins-x64.cc +5 -4
  149. data/vendor/v8/src/x64/code-stubs-x64.cc +25 -16
  150. data/vendor/v8/src/x64/codegen-x64.cc +2 -2
  151. data/vendor/v8/src/x64/debug-x64.cc +14 -1
  152. data/vendor/v8/src/x64/disasm-x64.cc +1 -1
  153. data/vendor/v8/src/x64/full-codegen-x64.cc +10 -106
  154. data/vendor/v8/src/x64/ic-x64.cc +20 -16
  155. data/vendor/v8/src/x64/lithium-codegen-x64.cc +156 -79
  156. data/vendor/v8/src/x64/lithium-codegen-x64.h +2 -1
  157. data/vendor/v8/src/x64/lithium-x64.cc +18 -8
  158. data/vendor/v8/src/x64/lithium-x64.h +7 -2
  159. data/vendor/v8/src/x64/macro-assembler-x64.cc +50 -40
  160. data/vendor/v8/src/x64/macro-assembler-x64.h +5 -4
  161. data/vendor/v8/src/x64/regexp-macro-assembler-x64.cc +122 -51
  162. data/vendor/v8/src/x64/regexp-macro-assembler-x64.h +17 -8
  163. data/vendor/v8/src/x64/simulator-x64.h +4 -4
  164. data/vendor/v8/src/x64/stub-cache-x64.cc +55 -17
  165. data/vendor/v8/test/cctest/cctest.status +1 -0
  166. data/vendor/v8/test/cctest/test-api.cc +24 -0
  167. data/vendor/v8/test/cctest/test-func-name-inference.cc +38 -0
  168. data/vendor/v8/test/cctest/test-heap-profiler.cc +21 -77
  169. data/vendor/v8/test/cctest/test-heap.cc +164 -3
  170. data/vendor/v8/test/cctest/test-list.cc +12 -0
  171. data/vendor/v8/test/cctest/test-mark-compact.cc +5 -5
  172. data/vendor/v8/test/cctest/test-regexp.cc +14 -8
  173. data/vendor/v8/test/cctest/testcfg.py +2 -0
  174. data/vendor/v8/test/mjsunit/accessor-map-sharing.js +176 -0
  175. data/vendor/v8/test/mjsunit/array-construct-transition.js +3 -3
  176. data/vendor/v8/test/mjsunit/array-literal-transitions.js +10 -10
  177. data/vendor/v8/test/mjsunit/big-array-literal.js +3 -0
  178. data/vendor/v8/test/mjsunit/compiler/inline-construct.js +4 -2
  179. data/vendor/v8/test/mjsunit/debug-liveedit-stack-padding.js +88 -0
  180. data/vendor/v8/test/mjsunit/elements-kind.js +4 -4
  181. data/vendor/v8/test/mjsunit/elements-transition-hoisting.js +2 -2
  182. data/vendor/v8/test/mjsunit/elements-transition.js +5 -5
  183. data/vendor/v8/test/mjsunit/error-constructors.js +68 -33
  184. data/vendor/v8/test/mjsunit/harmony/proxies.js +14 -6
  185. data/vendor/v8/test/mjsunit/mjsunit.status +1 -0
  186. data/vendor/v8/test/mjsunit/packed-elements.js +112 -0
  187. data/vendor/v8/test/mjsunit/regexp-capture-3.js +6 -0
  188. data/vendor/v8/test/mjsunit/regexp-global.js +132 -0
  189. data/vendor/v8/test/mjsunit/regexp.js +11 -0
  190. data/vendor/v8/test/mjsunit/regress/regress-117409.js +52 -0
  191. data/vendor/v8/test/mjsunit/regress/regress-126412.js +33 -0
  192. data/vendor/v8/test/mjsunit/regress/regress-128018.js +35 -0
  193. data/vendor/v8/test/mjsunit/regress/regress-128146.js +33 -0
  194. data/vendor/v8/test/mjsunit/regress/regress-1639-2.js +4 -1
  195. data/vendor/v8/test/mjsunit/regress/regress-1639.js +14 -8
  196. data/vendor/v8/test/mjsunit/regress/regress-1849.js +3 -3
  197. data/vendor/v8/test/mjsunit/regress/regress-1878.js +2 -2
  198. data/vendor/v8/test/mjsunit/regress/regress-2071.js +79 -0
  199. data/vendor/v8/test/mjsunit/regress/regress-2153.js +32 -0
  200. data/vendor/v8/test/mjsunit/regress/regress-crbug-122271.js +4 -4
  201. data/vendor/v8/test/mjsunit/regress/regress-crbug-126414.js +32 -0
  202. data/vendor/v8/test/mjsunit/regress/regress-smi-only-concat.js +2 -2
  203. data/vendor/v8/test/mjsunit/regress/regress-transcendental.js +49 -0
  204. data/vendor/v8/test/mjsunit/stack-traces.js +14 -0
  205. data/vendor/v8/test/mjsunit/unbox-double-arrays.js +4 -3
  206. data/vendor/v8/test/test262/testcfg.py +6 -1
  207. data/vendor/v8/tools/check-static-initializers.sh +11 -3
  208. data/vendor/v8/tools/fuzz-harness.sh +92 -0
  209. data/vendor/v8/tools/grokdump.py +658 -67
  210. data/vendor/v8/tools/gyp/v8.gyp +21 -39
  211. data/vendor/v8/tools/js2c.py +3 -3
  212. data/vendor/v8/tools/jsmin.py +2 -2
  213. data/vendor/v8/tools/presubmit.py +2 -1
  214. data/vendor/v8/tools/test-wrapper-gypbuild.py +25 -11
  215. metadata +624 -612
@@ -5043,7 +5043,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
5043
5043
  1, a0, a2);
5044
5044
 
5045
5045
  // Isolates: note we add an additional parameter here (isolate pointer).
5046
- const int kRegExpExecuteArguments = 8;
5046
+ const int kRegExpExecuteArguments = 9;
5047
5047
  const int kParameterRegisters = 4;
5048
5048
  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
5049
5049
 
@@ -5054,27 +5054,33 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
5054
5054
  // allocating space for the c argument slots, we don't need to calculate
5055
5055
  // that into the argument positions on the stack. This is how the stack will
5056
5056
  // look (sp meaning the value of sp at this moment):
5057
+ // [sp + 5] - Argument 9
5057
5058
  // [sp + 4] - Argument 8
5058
5059
  // [sp + 3] - Argument 7
5059
5060
  // [sp + 2] - Argument 6
5060
5061
  // [sp + 1] - Argument 5
5061
5062
  // [sp + 0] - saved ra
5062
5063
 
5063
- // Argument 8: Pass current isolate address.
5064
+ // Argument 9: Pass current isolate address.
5064
5065
  // CFunctionArgumentOperand handles MIPS stack argument slots.
5065
5066
  __ li(a0, Operand(ExternalReference::isolate_address()));
5066
- __ sw(a0, MemOperand(sp, 4 * kPointerSize));
5067
+ __ sw(a0, MemOperand(sp, 5 * kPointerSize));
5067
5068
 
5068
- // Argument 7: Indicate that this is a direct call from JavaScript.
5069
+ // Argument 8: Indicate that this is a direct call from JavaScript.
5069
5070
  __ li(a0, Operand(1));
5070
- __ sw(a0, MemOperand(sp, 3 * kPointerSize));
5071
+ __ sw(a0, MemOperand(sp, 4 * kPointerSize));
5071
5072
 
5072
- // Argument 6: Start (high end) of backtracking stack memory area.
5073
+ // Argument 7: Start (high end) of backtracking stack memory area.
5073
5074
  __ li(a0, Operand(address_of_regexp_stack_memory_address));
5074
5075
  __ lw(a0, MemOperand(a0, 0));
5075
5076
  __ li(a2, Operand(address_of_regexp_stack_memory_size));
5076
5077
  __ lw(a2, MemOperand(a2, 0));
5077
5078
  __ addu(a0, a0, a2);
5079
+ __ sw(a0, MemOperand(sp, 3 * kPointerSize));
5080
+
5081
+ // Argument 6: Set the number of capture registers to zero to force global
5082
+ // regexps to behave as non-global. This does not affect non-global regexps.
5083
+ __ mov(a0, zero_reg);
5078
5084
  __ sw(a0, MemOperand(sp, 2 * kPointerSize));
5079
5085
 
5080
5086
  // Argument 5: static offsets vector buffer.
@@ -5125,7 +5131,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
5125
5131
  // Check the result.
5126
5132
 
5127
5133
  Label success;
5128
- __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
5134
+ __ Branch(&success, eq, v0, Operand(1));
5135
+ // We expect exactly one result since we force the called regexp to behave
5136
+ // as non-global.
5129
5137
  Label failure;
5130
5138
  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
5131
5139
  // If not exception it can only be retry. Handle that in the runtime system.
@@ -7362,8 +7370,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7362
7370
  // KeyedStoreStubCompiler::GenerateStoreFastElement.
7363
7371
  { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
7364
7372
  { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
7365
- // ElementsTransitionGenerator::GenerateSmiOnlyToObject
7366
- // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
7373
+ // ElementsTransitionGenerator::GenerateMapChangeElementTransition
7374
+ // and ElementsTransitionGenerator::GenerateSmiToDouble
7367
7375
  // and ElementsTransitionGenerator::GenerateDoubleToObject
7368
7376
  { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
7369
7377
  { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
@@ -7629,9 +7637,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7629
7637
  Label fast_elements;
7630
7638
 
7631
7639
  __ CheckFastElements(a2, t1, &double_elements);
7632
- // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
7640
+ // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
7633
7641
  __ JumpIfSmi(a0, &smi_element);
7634
- __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
7642
+ __ CheckFastSmiElements(a2, t1, &fast_elements);
7635
7643
 
7636
7644
  // Store into the array literal requires a elements transition. Call into
7637
7645
  // the runtime.
@@ -7643,7 +7651,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7643
7651
  __ Push(t1, t0);
7644
7652
  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7645
7653
 
7646
- // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
7654
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
7647
7655
  __ bind(&fast_elements);
7648
7656
  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7649
7657
  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
@@ -7656,8 +7664,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7656
7664
  __ Ret(USE_DELAY_SLOT);
7657
7665
  __ mov(v0, a0);
7658
7666
 
7659
- // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
7660
- // FAST_ELEMENTS, and value is Smi.
7667
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
7668
+ // and value is Smi.
7661
7669
  __ bind(&smi_element);
7662
7670
  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7663
7671
  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
@@ -7666,7 +7674,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7666
7674
  __ Ret(USE_DELAY_SLOT);
7667
7675
  __ mov(v0, a0);
7668
7676
 
7669
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7677
+ // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
7670
7678
  __ bind(&double_elements);
7671
7679
  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7672
7680
  __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
@@ -72,7 +72,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
72
72
  // -------------------------------------------------------------------------
73
73
  // Code generators
74
74
 
75
- void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
75
+ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
76
76
  MacroAssembler* masm) {
77
77
  // ----------- S t a t e -------------
78
78
  // -- a0 : value
@@ -95,7 +95,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
95
95
  }
96
96
 
97
97
 
98
- void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
98
+ void ElementsTransitionGenerator::GenerateSmiToDouble(
99
99
  MacroAssembler* masm, Label* fail) {
100
100
  // ----------- S t a t e -------------
101
101
  // -- a0 : value
@@ -1,4 +1,4 @@
1
- // Copyright 2011 the V8 project authors. All rights reserved.
1
+ // Copyright 2012 the V8 project authors. All rights reserved.
2
2
  // Redistribution and use in source and binary forms, with or without
3
3
  // modification, are permitted provided that the following conditions are
4
4
  // met:
@@ -116,6 +116,8 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
116
116
  Assembler::kDebugBreakSlotInstructions);
117
117
  }
118
118
 
119
+ const bool Debug::FramePaddingLayout::kIsSupported = false;
120
+
119
121
 
120
122
  #define __ ACCESS_MASM(masm)
121
123
 
@@ -1711,7 +1711,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
1711
1711
  ASSERT_EQ(2, constant_elements->length());
1712
1712
  ElementsKind constant_elements_kind =
1713
1713
  static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
1714
- bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
1714
+ bool has_fast_elements =
1715
+ IsFastObjectElementsKind(constant_elements_kind);
1715
1716
  Handle<FixedArrayBase> constant_elements_values(
1716
1717
  FixedArrayBase::cast(constant_elements->get(1)));
1717
1718
 
@@ -1733,8 +1734,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
1733
1734
  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
1734
1735
  __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
1735
1736
  } else {
1736
- ASSERT(constant_elements_kind == FAST_ELEMENTS ||
1737
- constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
1737
+ ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
1738
1738
  FLAG_smi_only_arrays);
1739
1739
  FastCloneShallowArrayStub::Mode mode = has_fast_elements
1740
1740
  ? FastCloneShallowArrayStub::CLONE_ELEMENTS
@@ -1763,7 +1763,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
1763
1763
 
1764
1764
  VisitForAccumulatorValue(subexpr);
1765
1765
 
1766
- if (constant_elements_kind == FAST_ELEMENTS) {
1766
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
1767
1767
  int offset = FixedArray::kHeaderSize + (i * kPointerSize);
1768
1768
  __ lw(t2, MemOperand(sp)); // Copy of array literal.
1769
1769
  __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
@@ -3500,104 +3500,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
3500
3500
  }
3501
3501
 
3502
3502
 
3503
- void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
3504
- ZoneList<Expression*>* args = expr->arguments();
3505
- ASSERT(args->length() == 3);
3506
- VisitForStackValue(args->at(0));
3507
- VisitForStackValue(args->at(1));
3508
- VisitForStackValue(args->at(2));
3509
- Label done;
3510
- Label slow_case;
3511
- Register object = a0;
3512
- Register index1 = a1;
3513
- Register index2 = a2;
3514
- Register elements = a3;
3515
- Register scratch1 = t0;
3516
- Register scratch2 = t1;
3517
-
3518
- __ lw(object, MemOperand(sp, 2 * kPointerSize));
3519
- // Fetch the map and check if array is in fast case.
3520
- // Check that object doesn't require security checks and
3521
- // has no indexed interceptor.
3522
- __ GetObjectType(object, scratch1, scratch2);
3523
- __ Branch(&slow_case, ne, scratch2, Operand(JS_ARRAY_TYPE));
3524
- // Map is now in scratch1.
3525
-
3526
- __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
3527
- __ And(scratch2, scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
3528
- __ Branch(&slow_case, ne, scratch2, Operand(zero_reg));
3529
-
3530
- // Check the object's elements are in fast case and writable.
3531
- __ lw(elements, FieldMemOperand(object, JSObject::kElementsOffset));
3532
- __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
3533
- __ LoadRoot(scratch2, Heap::kFixedArrayMapRootIndex);
3534
- __ Branch(&slow_case, ne, scratch1, Operand(scratch2));
3535
-
3536
- // Check that both indices are smis.
3537
- __ lw(index1, MemOperand(sp, 1 * kPointerSize));
3538
- __ lw(index2, MemOperand(sp, 0));
3539
- __ JumpIfNotBothSmi(index1, index2, &slow_case);
3540
-
3541
- // Check that both indices are valid.
3542
- Label not_hi;
3543
- __ lw(scratch1, FieldMemOperand(object, JSArray::kLengthOffset));
3544
- __ Branch(&slow_case, ls, scratch1, Operand(index1));
3545
- __ Branch(&not_hi, NegateCondition(hi), scratch1, Operand(index1));
3546
- __ Branch(&slow_case, ls, scratch1, Operand(index2));
3547
- __ bind(&not_hi);
3548
-
3549
- // Bring the address of the elements into index1 and index2.
3550
- __ Addu(scratch1, elements,
3551
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3552
- __ sll(index1, index1, kPointerSizeLog2 - kSmiTagSize);
3553
- __ Addu(index1, scratch1, index1);
3554
- __ sll(index2, index2, kPointerSizeLog2 - kSmiTagSize);
3555
- __ Addu(index2, scratch1, index2);
3556
-
3557
- // Swap elements.
3558
- __ lw(scratch1, MemOperand(index1, 0));
3559
- __ lw(scratch2, MemOperand(index2, 0));
3560
- __ sw(scratch1, MemOperand(index2, 0));
3561
- __ sw(scratch2, MemOperand(index1, 0));
3562
-
3563
- Label no_remembered_set;
3564
- __ CheckPageFlag(elements,
3565
- scratch1,
3566
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
3567
- ne,
3568
- &no_remembered_set);
3569
- // Possible optimization: do a check that both values are Smis
3570
- // (or them and test against Smi mask).
3571
-
3572
- // We are swapping two objects in an array and the incremental marker never
3573
- // pauses in the middle of scanning a single object. Therefore the
3574
- // incremental marker is not disturbed, so we don't need to call the
3575
- // RecordWrite stub that notifies the incremental marker.
3576
- __ RememberedSetHelper(elements,
3577
- index1,
3578
- scratch2,
3579
- kDontSaveFPRegs,
3580
- MacroAssembler::kFallThroughAtEnd);
3581
- __ RememberedSetHelper(elements,
3582
- index2,
3583
- scratch2,
3584
- kDontSaveFPRegs,
3585
- MacroAssembler::kFallThroughAtEnd);
3586
-
3587
- __ bind(&no_remembered_set);
3588
- // We are done. Drop elements from the stack, and return undefined.
3589
- __ Drop(3);
3590
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
3591
- __ jmp(&done);
3592
-
3593
- __ bind(&slow_case);
3594
- __ CallRuntime(Runtime::kSwapElements, 3);
3595
-
3596
- __ bind(&done);
3597
- context()->Plug(v0);
3598
- }
3599
-
3600
-
3601
3503
  void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
3602
3504
  ZoneList<Expression*>* args = expr->arguments();
3603
3505
  ASSERT_EQ(2, args->length());
@@ -767,7 +767,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
767
767
  __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
768
768
 
769
769
  // Check that the key is a positive smi.
770
- __ And(scratch1, key, Operand(0x8000001));
770
+ __ And(scratch1, key, Operand(0x80000001));
771
771
  __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
772
772
 
773
773
  // Load the elements into scratch1 and check its map.
@@ -1347,34 +1347,35 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
1347
1347
  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1348
1348
  __ Branch(&non_double_value, ne, t0, Operand(at));
1349
1349
 
1350
- // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
1351
- // FAST_DOUBLE_ELEMENTS and complete the store.
1352
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
1350
+
1351
+ // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
1352
+ // and complete the store.
1353
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
1353
1354
  FAST_DOUBLE_ELEMENTS,
1354
1355
  receiver_map,
1355
1356
  t0,
1356
1357
  &slow);
1357
1358
  ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
1358
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
1359
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
1359
1360
  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1360
1361
  __ jmp(&fast_double_without_map_check);
1361
1362
 
1362
1363
  __ bind(&non_double_value);
1363
- // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
1364
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
1364
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
1365
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
1365
1366
  FAST_ELEMENTS,
1366
1367
  receiver_map,
1367
1368
  t0,
1368
1369
  &slow);
1369
1370
  ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
1370
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
1371
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
1371
1372
  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1372
1373
  __ jmp(&finish_object_store);
1373
1374
 
1374
1375
  __ bind(&transition_double_elements);
1375
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
1376
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
1377
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
1376
+ // Elements are double, but value is an Object that's not a HeapNumber. Make
1377
+ // sure that the receiver is a Array with Object elements and transition array
1378
+ // from double elements to Object elements.
1378
1379
  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
1379
1380
  FAST_ELEMENTS,
1380
1381
  receiver_map,
@@ -1471,7 +1472,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
1471
1472
  // Must return the modified receiver in v0.
1472
1473
  if (!FLAG_trace_elements_transitions) {
1473
1474
  Label fail;
1474
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
1475
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
1475
1476
  __ Ret(USE_DELAY_SLOT);
1476
1477
  __ mov(v0, a2);
1477
1478
  __ bind(&fail);
@@ -1688,12 +1689,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
1688
1689
 
1689
1690
  // Activate inlined smi code.
1690
1691
  if (previous_state == UNINITIALIZED) {
1691
- PatchInlinedSmiCode(address());
1692
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
1692
1693
  }
1693
1694
  }
1694
1695
 
1695
1696
 
1696
- void PatchInlinedSmiCode(Address address) {
1697
+ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
1697
1698
  Address andi_instruction_address =
1698
1699
  address + Assembler::kCallTargetAddressOffset;
1699
1700
 
@@ -1727,33 +1728,30 @@ void PatchInlinedSmiCode(Address address) {
1727
1728
  Instr instr_at_patch = Assembler::instr_at(patch_address);
1728
1729
  Instr branch_instr =
1729
1730
  Assembler::instr_at(patch_address + Instruction::kInstrSize);
1730
- ASSERT(Assembler::IsAndImmediate(instr_at_patch));
1731
- ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
1731
+ // This is patching a conditional "jump if not smi/jump if smi" site.
1732
+ // Enabling by changing from
1733
+ // andi at, rx, 0
1734
+ // Branch <target>, eq, at, Operand(zero_reg)
1735
+ // to:
1736
+ // andi at, rx, #kSmiTagMask
1737
+ // Branch <target>, ne, at, Operand(zero_reg)
1738
+ // and vice-versa to be disabled again.
1739
+ CodePatcher patcher(patch_address, 2);
1740
+ Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
1741
+ if (check == ENABLE_INLINED_SMI_CHECK) {
1742
+ ASSERT(Assembler::IsAndImmediate(instr_at_patch));
1743
+ ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
1744
+ patcher.masm()->andi(at, reg, kSmiTagMask);
1745
+ } else {
1746
+ ASSERT(check == DISABLE_INLINED_SMI_CHECK);
1747
+ ASSERT(Assembler::IsAndImmediate(instr_at_patch));
1748
+ patcher.masm()->andi(at, reg, 0);
1749
+ }
1732
1750
  ASSERT(Assembler::IsBranch(branch_instr));
1733
1751
  if (Assembler::IsBeq(branch_instr)) {
1734
- // This is patching a "jump if not smi" site to be active.
1735
- // Changing:
1736
- // andi at, rx, 0
1737
- // Branch <target>, eq, at, Operand(zero_reg)
1738
- // to:
1739
- // andi at, rx, #kSmiTagMask
1740
- // Branch <target>, ne, at, Operand(zero_reg)
1741
- CodePatcher patcher(patch_address, 2);
1742
- Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
1743
- patcher.masm()->andi(at, reg, kSmiTagMask);
1744
1752
  patcher.ChangeBranchCondition(ne);
1745
1753
  } else {
1746
1754
  ASSERT(Assembler::IsBne(branch_instr));
1747
- // This is patching a "jump if smi" site to be active.
1748
- // Changing:
1749
- // andi at, rx, 0
1750
- // Branch <target>, ne, at, Operand(zero_reg)
1751
- // to:
1752
- // andi at, rx, #kSmiTagMask
1753
- // Branch <target>, eq, at, Operand(zero_reg)
1754
- CodePatcher patcher(patch_address, 2);
1755
- Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
1756
- patcher.masm()->andi(at, reg, kSmiTagMask);
1757
1755
  patcher.ChangeBranchCondition(eq);
1758
1756
  }
1759
1757
  }
@@ -2139,8 +2139,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2139
2139
  RelocInfo::CODE_TARGET,
2140
2140
  instr,
2141
2141
  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2142
- ASSERT(instr->HasDeoptimizationEnvironment());
2143
- LEnvironment* env = instr->deoptimization_environment();
2142
+ LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2144
2143
  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2145
2144
  // Put the result value into the result register slot and
2146
2145
  // restore all registers.
@@ -2344,40 +2343,37 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2344
2343
  Register object = ToRegister(instr->object());
2345
2344
  Register result = ToRegister(instr->result());
2346
2345
  Register scratch = scratch0();
2346
+
2347
2347
  int map_count = instr->hydrogen()->types()->length();
2348
+ bool need_generic = instr->hydrogen()->need_generic();
2349
+
2350
+ if (map_count == 0 && !need_generic) {
2351
+ DeoptimizeIf(al, instr->environment());
2352
+ return;
2353
+ }
2348
2354
  Handle<String> name = instr->hydrogen()->name();
2349
- if (map_count == 0) {
2350
- ASSERT(instr->hydrogen()->need_generic());
2351
- __ li(a2, Operand(name));
2352
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2353
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
2354
- } else {
2355
- Label done;
2356
- __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2357
- for (int i = 0; i < map_count - 1; ++i) {
2358
- Handle<Map> map = instr->hydrogen()->types()->at(i);
2355
+ Label done;
2356
+ __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2357
+ for (int i = 0; i < map_count; ++i) {
2358
+ bool last = (i == map_count - 1);
2359
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
2360
+ if (last && !need_generic) {
2361
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
2362
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
2363
+ } else {
2359
2364
  Label next;
2360
2365
  __ Branch(&next, ne, scratch, Operand(map));
2361
2366
  EmitLoadFieldOrConstantFunction(result, object, map, name);
2362
2367
  __ Branch(&done);
2363
2368
  __ bind(&next);
2364
2369
  }
2365
- Handle<Map> map = instr->hydrogen()->types()->last();
2366
- if (instr->hydrogen()->need_generic()) {
2367
- Label generic;
2368
- __ Branch(&generic, ne, scratch, Operand(map));
2369
- EmitLoadFieldOrConstantFunction(result, object, map, name);
2370
- __ Branch(&done);
2371
- __ bind(&generic);
2372
- __ li(a2, Operand(name));
2373
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2374
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
2375
- } else {
2376
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
2377
- EmitLoadFieldOrConstantFunction(result, object, map, name);
2378
- }
2379
- __ bind(&done);
2380
2370
  }
2371
+ if (need_generic) {
2372
+ __ li(a2, Operand(name));
2373
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2374
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
2375
+ }
2376
+ __ bind(&done);
2381
2377
  }
2382
2378
 
2383
2379
 
@@ -2452,8 +2448,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
2452
2448
  __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2453
2449
  __ Ext(scratch, scratch, Map::kElementsKindShift,
2454
2450
  Map::kElementsKindBitCount);
2455
- __ Branch(&done, eq, scratch,
2456
- Operand(FAST_ELEMENTS));
2451
+ __ Branch(&fail, lt, scratch,
2452
+ Operand(GetInitialFastElementsKind()));
2453
+ __ Branch(&done, le, scratch,
2454
+ Operand(TERMINAL_FAST_ELEMENTS_KIND));
2457
2455
  __ Branch(&fail, lt, scratch,
2458
2456
  Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2459
2457
  __ Branch(&done, le, scratch,
@@ -2506,7 +2504,9 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2506
2504
  // Load the result.
2507
2505
  __ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
2508
2506
  __ addu(scratch, elements, scratch);
2509
- __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2507
+ uint32_t offset = FixedArray::kHeaderSize +
2508
+ (instr->additional_index() << kPointerSizeLog2);
2509
+ __ lw(result, FieldMemOperand(scratch, offset));
2510
2510
 
2511
2511
  // Check for the hole value.
2512
2512
  if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2537,17 +2537,21 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
2537
2537
  }
2538
2538
 
2539
2539
  if (key_is_constant) {
2540
- __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
2541
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2540
+ __ Addu(elements, elements,
2541
+ Operand(((constant_key + instr->additional_index()) << shift_size) +
2542
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2542
2543
  } else {
2543
2544
  __ sll(scratch, key, shift_size);
2544
2545
  __ Addu(elements, elements, Operand(scratch));
2545
2546
  __ Addu(elements, elements,
2546
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2547
+ Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
2548
+ (instr->additional_index() << shift_size)));
2547
2549
  }
2548
2550
 
2549
- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2550
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
2551
+ if (instr->hydrogen()->RequiresHoleCheck()) {
2552
+ __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2553
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
2554
+ }
2551
2555
 
2552
2556
  __ ldc1(result, MemOperand(elements));
2553
2557
  }
@@ -2569,32 +2573,41 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2569
2573
  key = ToRegister(instr->key());
2570
2574
  }
2571
2575
  int shift_size = ElementsKindToShiftSize(elements_kind);
2576
+ int additional_offset = instr->additional_index() << shift_size;
2572
2577
 
2573
2578
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2574
2579
  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2575
2580
  FPURegister result = ToDoubleRegister(instr->result());
2576
2581
  if (key_is_constant) {
2577
- __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
2582
+ __ Addu(scratch0(), external_pointer, constant_key << shift_size);
2578
2583
  } else {
2579
2584
  __ sll(scratch0(), key, shift_size);
2580
2585
  __ Addu(scratch0(), scratch0(), external_pointer);
2581
2586
  }
2582
2587
 
2583
2588
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2584
- __ lwc1(result, MemOperand(scratch0()));
2589
+ __ lwc1(result, MemOperand(scratch0(), additional_offset));
2585
2590
  __ cvt_d_s(result, result);
2586
2591
  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2587
- __ ldc1(result, MemOperand(scratch0()));
2592
+ __ ldc1(result, MemOperand(scratch0(), additional_offset));
2588
2593
  }
2589
2594
  } else {
2590
2595
  Register result = ToRegister(instr->result());
2591
2596
  Register scratch = scratch0();
2597
+ if (instr->additional_index() != 0 && !key_is_constant) {
2598
+ __ Addu(scratch, key, instr->additional_index());
2599
+ }
2592
2600
  MemOperand mem_operand(zero_reg);
2593
2601
  if (key_is_constant) {
2594
- mem_operand = MemOperand(external_pointer,
2595
- constant_key * (1 << shift_size));
2602
+ mem_operand =
2603
+ MemOperand(external_pointer,
2604
+ (constant_key << shift_size) + additional_offset);
2596
2605
  } else {
2597
- __ sll(scratch, key, shift_size);
2606
+ if (instr->additional_index() == 0) {
2607
+ __ sll(scratch, key, shift_size);
2608
+ } else {
2609
+ __ sll(scratch, scratch, shift_size);
2610
+ }
2598
2611
  __ Addu(scratch, scratch, external_pointer);
2599
2612
  mem_operand = MemOperand(scratch);
2600
2613
  }
@@ -2627,7 +2640,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2627
2640
  case EXTERNAL_DOUBLE_ELEMENTS:
2628
2641
  case FAST_DOUBLE_ELEMENTS:
2629
2642
  case FAST_ELEMENTS:
2630
- case FAST_SMI_ONLY_ELEMENTS:
2643
+ case FAST_SMI_ELEMENTS:
2644
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
2645
+ case FAST_HOLEY_ELEMENTS:
2646
+ case FAST_HOLEY_SMI_ELEMENTS:
2631
2647
  case DICTIONARY_ELEMENTS:
2632
2648
  case NON_STRICT_ARGUMENTS_ELEMENTS:
2633
2649
  UNREACHABLE();
@@ -2772,7 +2788,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2772
2788
  __ sll(scratch, length, 2);
2773
2789
 
2774
2790
  __ bind(&invoke);
2775
- ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
2791
+ ASSERT(instr->HasPointerMap());
2776
2792
  LPointerMap* pointers = instr->pointer_map();
2777
2793
  RecordPosition(pointers->position());
2778
2794
  SafepointGenerator safepoint_generator(
@@ -3226,7 +3242,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
3226
3242
  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3227
3243
  __ And(a3, a1, Operand(0xFFFF));
3228
3244
  __ li(t0, Operand(18273));
3229
- __ mul(a3, a3, t0);
3245
+ __ Mul(a3, a3, t0);
3230
3246
  __ srl(a1, a1, 16);
3231
3247
  __ Addu(a1, a3, a1);
3232
3248
  // Save state[0].
@@ -3235,7 +3251,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
3235
3251
  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3236
3252
  __ And(a3, a0, Operand(0xFFFF));
3237
3253
  __ li(t0, Operand(36969));
3238
- __ mul(a3, a3, t0);
3254
+ __ Mul(a3, a3, t0);
3239
3255
  __ srl(a0, a0, 16),
3240
3256
  __ Addu(a0, a3, a0);
3241
3257
  // Save state[1].
@@ -3336,7 +3352,6 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3336
3352
  void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3337
3353
  ASSERT(ToRegister(instr->function()).is(a1));
3338
3354
  ASSERT(instr->HasPointerMap());
3339
- ASSERT(instr->HasDeoptimizationEnvironment());
3340
3355
 
3341
3356
  if (instr->known_function().is_null()) {
3342
3357
  LPointerMap* pointers = instr->pointer_map();
@@ -3440,6 +3455,18 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3440
3455
  if (!instr->transition().is_null()) {
3441
3456
  __ li(scratch, Operand(instr->transition()));
3442
3457
  __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3458
+ if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3459
+ Register temp = ToRegister(instr->TempAt(0));
3460
+ // Update the write barrier for the map field.
3461
+ __ RecordWriteField(object,
3462
+ HeapObject::kMapOffset,
3463
+ scratch,
3464
+ temp,
3465
+ kRAHasBeenSaved,
3466
+ kSaveFPRegs,
3467
+ OMIT_REMEMBERED_SET,
3468
+ OMIT_SMI_CHECK);
3469
+ }
3443
3470
  }
3444
3471
 
3445
3472
  // Do the store.
@@ -3510,11 +3537,17 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3510
3537
  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3511
3538
  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3512
3539
  int offset =
3513
- ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
3540
+ (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
3541
+ + FixedArray::kHeaderSize;
3514
3542
  __ sw(value, FieldMemOperand(elements, offset));
3515
3543
  } else {
3516
3544
  __ sll(scratch, key, kPointerSizeLog2);
3517
3545
  __ addu(scratch, elements, scratch);
3546
+ if (instr->additional_index() != 0) {
3547
+ __ Addu(scratch,
3548
+ scratch,
3549
+ instr->additional_index() << kPointerSizeLog2);
3550
+ }
3518
3551
  __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3519
3552
  }
3520
3553
 
@@ -3557,7 +3590,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
3557
3590
  }
3558
3591
  int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3559
3592
  if (key_is_constant) {
3560
- __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
3593
+ __ Addu(scratch, elements, Operand((constant_key << shift_size) +
3561
3594
  FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3562
3595
  } else {
3563
3596
  __ sll(scratch, key, shift_size);
@@ -3578,7 +3611,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
3578
3611
  }
3579
3612
 
3580
3613
  __ bind(&not_nan);
3581
- __ sdc1(value, MemOperand(scratch));
3614
+ __ sdc1(value, MemOperand(scratch, instr->additional_index() << shift_size));
3582
3615
  }
3583
3616
 
3584
3617
 
@@ -3599,12 +3632,13 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3599
3632
  key = ToRegister(instr->key());
3600
3633
  }
3601
3634
  int shift_size = ElementsKindToShiftSize(elements_kind);
3635
+ int additional_offset = instr->additional_index() << shift_size;
3602
3636
 
3603
3637
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3604
3638
  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3605
3639
  FPURegister value(ToDoubleRegister(instr->value()));
3606
3640
  if (key_is_constant) {
3607
- __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
3641
+ __ Addu(scratch0(), external_pointer, constant_key << shift_size);
3608
3642
  } else {
3609
3643
  __ sll(scratch0(), key, shift_size);
3610
3644
  __ Addu(scratch0(), scratch0(), external_pointer);
@@ -3612,19 +3646,27 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3612
3646
 
3613
3647
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3614
3648
  __ cvt_s_d(double_scratch0(), value);
3615
- __ swc1(double_scratch0(), MemOperand(scratch0()));
3649
+ __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
3616
3650
  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3617
- __ sdc1(value, MemOperand(scratch0()));
3651
+ __ sdc1(value, MemOperand(scratch0(), additional_offset));
3618
3652
  }
3619
3653
  } else {
3620
3654
  Register value(ToRegister(instr->value()));
3621
- MemOperand mem_operand(zero_reg);
3622
3655
  Register scratch = scratch0();
3656
+ if (instr->additional_index() != 0 && !key_is_constant) {
3657
+ __ Addu(scratch, key, instr->additional_index());
3658
+ }
3659
+ MemOperand mem_operand(zero_reg);
3623
3660
  if (key_is_constant) {
3624
3661
  mem_operand = MemOperand(external_pointer,
3625
- constant_key * (1 << shift_size));
3662
+ ((constant_key + instr->additional_index())
3663
+ << shift_size));
3626
3664
  } else {
3627
- __ sll(scratch, key, shift_size);
3665
+ if (instr->additional_index() == 0) {
3666
+ __ sll(scratch, key, shift_size);
3667
+ } else {
3668
+ __ sll(scratch, scratch, shift_size);
3669
+ }
3628
3670
  __ Addu(scratch, scratch, external_pointer);
3629
3671
  mem_operand = MemOperand(scratch);
3630
3672
  }
@@ -3646,7 +3688,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3646
3688
  case EXTERNAL_DOUBLE_ELEMENTS:
3647
3689
  case FAST_DOUBLE_ELEMENTS:
3648
3690
  case FAST_ELEMENTS:
3649
- case FAST_SMI_ONLY_ELEMENTS:
3691
+ case FAST_SMI_ELEMENTS:
3692
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
3693
+ case FAST_HOLEY_ELEMENTS:
3694
+ case FAST_HOLEY_SMI_ELEMENTS:
3650
3695
  case DICTIONARY_ELEMENTS:
3651
3696
  case NON_STRICT_ARGUMENTS_ELEMENTS:
3652
3697
  UNREACHABLE();
@@ -3684,20 +3729,21 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3684
3729
  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
3685
3730
 
3686
3731
  __ li(new_map_reg, Operand(to_map));
3687
- if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
3732
+ if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) {
3688
3733
  __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3689
3734
  // Write barrier.
3690
3735
  __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3691
3736
  scratch, kRAHasBeenSaved, kDontSaveFPRegs);
3692
- } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
3693
- to_kind == FAST_DOUBLE_ELEMENTS) {
3737
+ } else if (IsFastSmiElementsKind(from_kind) &&
3738
+ IsFastDoubleElementsKind(to_kind)) {
3694
3739
  Register fixed_object_reg = ToRegister(instr->temp_reg());
3695
3740
  ASSERT(fixed_object_reg.is(a2));
3696
3741
  ASSERT(new_map_reg.is(a3));
3697
3742
  __ mov(fixed_object_reg, object_reg);
3698
3743
  CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3699
3744
  RelocInfo::CODE_TARGET, instr);
3700
- } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
3745
+ } else if (IsFastDoubleElementsKind(from_kind) &&
3746
+ IsFastObjectElementsKind(to_kind)) {
3701
3747
  Register fixed_object_reg = ToRegister(instr->temp_reg());
3702
3748
  ASSERT(fixed_object_reg.is(a2));
3703
3749
  ASSERT(new_map_reg.is(a3));
@@ -4452,8 +4498,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4452
4498
 
4453
4499
  // Deopt if the array literal boilerplate ElementsKind is of a type different
4454
4500
  // than the expected one. The check isn't necessary if the boilerplate has
4455
- // already been converted to FAST_ELEMENTS.
4456
- if (boilerplate_elements_kind != FAST_ELEMENTS) {
4501
+ // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4502
+ if (CanTransitionToMoreGeneralFastElementsKind(
4503
+ boilerplate_elements_kind, true)) {
4457
4504
  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
4458
4505
  // Load map into a2.
4459
4506
  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
@@ -4606,10 +4653,11 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4606
4653
  ElementsKind boilerplate_elements_kind =
4607
4654
  instr->hydrogen()->boilerplate()->GetElementsKind();
4608
4655
 
4609
- // Deopt if the literal boilerplate ElementsKind is of a type different than
4610
- // the expected one. The check isn't necessary if the boilerplate has already
4611
- // been converted to FAST_ELEMENTS.
4612
- if (boilerplate_elements_kind != FAST_ELEMENTS) {
4656
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
4657
+ // than the expected one. The check isn't necessary if the boilerplate has
4658
+ // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4659
+ if (CanTransitionToMoreGeneralFastElementsKind(
4660
+ boilerplate_elements_kind, true)) {
4613
4661
  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4614
4662
  // Load map into a2.
4615
4663
  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
@@ -4955,7 +5003,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
4955
5003
  Register strict = scratch0();
4956
5004
  __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
4957
5005
  __ Push(object, key, strict);
4958
- ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
5006
+ ASSERT(instr->HasPointerMap());
4959
5007
  LPointerMap* pointers = instr->pointer_map();
4960
5008
  RecordPosition(pointers->position());
4961
5009
  SafepointGenerator safepoint_generator(
@@ -4968,7 +5016,7 @@ void LCodeGen::DoIn(LIn* instr) {
4968
5016
  Register obj = ToRegister(instr->object());
4969
5017
  Register key = ToRegister(instr->key());
4970
5018
  __ Push(key, obj);
4971
- ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
5019
+ ASSERT(instr->HasPointerMap());
4972
5020
  LPointerMap* pointers = instr->pointer_map();
4973
5021
  RecordPosition(pointers->position());
4974
5022
  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);