mustang 0.0.1 → 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (560) hide show
  1. data/.rspec +1 -0
  2. data/Isolate +9 -0
  3. data/README.md +6 -12
  4. data/Rakefile +30 -4
  5. data/TODO.md +9 -0
  6. data/ext/v8/extconf.rb +56 -0
  7. data/ext/v8/v8.cpp +37 -0
  8. data/ext/v8/v8_array.cpp +161 -0
  9. data/ext/v8/v8_array.h +17 -0
  10. data/ext/v8/v8_base.cpp +147 -0
  11. data/ext/v8/v8_base.h +23 -0
  12. data/ext/v8/v8_cast.cpp +151 -0
  13. data/ext/v8/v8_cast.h +64 -0
  14. data/ext/v8/v8_context.cpp +174 -0
  15. data/ext/v8/v8_context.h +12 -0
  16. data/ext/v8/v8_date.cpp +61 -0
  17. data/ext/v8/v8_date.h +16 -0
  18. data/ext/v8/v8_errors.cpp +147 -0
  19. data/ext/v8/v8_errors.h +19 -0
  20. data/ext/v8/v8_external.cpp +66 -0
  21. data/ext/v8/v8_external.h +16 -0
  22. data/ext/v8/v8_function.cpp +182 -0
  23. data/ext/v8/v8_function.h +14 -0
  24. data/ext/v8/v8_integer.cpp +70 -0
  25. data/ext/v8/v8_integer.h +16 -0
  26. data/ext/v8/v8_macros.h +30 -0
  27. data/ext/v8/v8_main.cpp +53 -0
  28. data/ext/v8/v8_main.h +13 -0
  29. data/ext/v8/v8_number.cpp +62 -0
  30. data/ext/v8/v8_number.h +16 -0
  31. data/ext/v8/v8_object.cpp +172 -0
  32. data/ext/v8/v8_object.h +17 -0
  33. data/ext/v8/v8_ref.cpp +72 -0
  34. data/ext/v8/v8_ref.h +43 -0
  35. data/ext/v8/v8_regexp.cpp +148 -0
  36. data/ext/v8/v8_regexp.h +16 -0
  37. data/ext/v8/v8_string.cpp +78 -0
  38. data/ext/v8/v8_string.h +16 -0
  39. data/ext/v8/v8_value.cpp +370 -0
  40. data/ext/v8/v8_value.h +19 -0
  41. data/gemspec.yml +2 -1
  42. data/lib/core_ext/class.rb +14 -0
  43. data/lib/core_ext/object.rb +12 -0
  44. data/lib/core_ext/symbol.rb +23 -0
  45. data/lib/mustang.rb +44 -0
  46. data/lib/mustang/context.rb +69 -0
  47. data/lib/mustang/errors.rb +36 -0
  48. data/lib/support/delegated.rb +25 -0
  49. data/lib/v8/array.rb +21 -0
  50. data/lib/v8/context.rb +13 -0
  51. data/lib/v8/date.rb +20 -0
  52. data/lib/v8/error.rb +15 -0
  53. data/lib/v8/external.rb +16 -0
  54. data/lib/v8/function.rb +11 -0
  55. data/lib/v8/integer.rb +16 -0
  56. data/lib/v8/number.rb +16 -0
  57. data/lib/v8/object.rb +66 -0
  58. data/lib/v8/regexp.rb +23 -0
  59. data/lib/v8/string.rb +27 -0
  60. data/mustang.gemspec +3 -0
  61. data/spec/core_ext/class_spec.rb +19 -0
  62. data/spec/core_ext/object_spec.rb +19 -0
  63. data/spec/core_ext/symbol_spec.rb +27 -0
  64. data/spec/fixtures/test1.js +2 -0
  65. data/spec/fixtures/test2.js +2 -0
  66. data/spec/spec_helper.rb +20 -0
  67. data/spec/v8/array_spec.rb +88 -0
  68. data/spec/v8/cast_spec.rb +151 -0
  69. data/spec/v8/context_spec.rb +78 -0
  70. data/spec/v8/data_spec.rb +39 -0
  71. data/spec/v8/date_spec.rb +45 -0
  72. data/spec/v8/empty_spec.rb +27 -0
  73. data/spec/v8/errors_spec.rb +142 -0
  74. data/spec/v8/external_spec.rb +44 -0
  75. data/spec/v8/function_spec.rb +170 -0
  76. data/spec/v8/integer_spec.rb +41 -0
  77. data/spec/v8/main_spec.rb +18 -0
  78. data/spec/v8/null_spec.rb +27 -0
  79. data/spec/v8/number_spec.rb +40 -0
  80. data/spec/v8/object_spec.rb +79 -0
  81. data/spec/v8/primitive_spec.rb +9 -0
  82. data/spec/v8/regexp_spec.rb +65 -0
  83. data/spec/v8/string_spec.rb +48 -0
  84. data/spec/v8/undefined_spec.rb +27 -0
  85. data/spec/v8/value_spec.rb +215 -0
  86. data/vendor/v8/.gitignore +2 -0
  87. data/vendor/v8/AUTHORS +3 -1
  88. data/vendor/v8/ChangeLog +117 -0
  89. data/vendor/v8/SConstruct +334 -53
  90. data/vendor/v8/include/v8-debug.h +21 -11
  91. data/vendor/v8/include/v8-preparser.h +1 -1
  92. data/vendor/v8/include/v8-profiler.h +122 -43
  93. data/vendor/v8/include/v8-testing.h +5 -0
  94. data/vendor/v8/include/v8.h +171 -17
  95. data/vendor/v8/preparser/SConscript +38 -0
  96. data/vendor/v8/preparser/preparser-process.cc +77 -114
  97. data/vendor/v8/samples/shell.cc +232 -46
  98. data/vendor/v8/src/SConscript +29 -5
  99. data/vendor/v8/src/accessors.cc +70 -211
  100. data/vendor/v8/{test/cctest/test-mips.cc → src/allocation-inl.h} +15 -18
  101. data/vendor/v8/src/allocation.cc +0 -82
  102. data/vendor/v8/src/allocation.h +9 -42
  103. data/vendor/v8/src/api.cc +1645 -1156
  104. data/vendor/v8/src/api.h +76 -12
  105. data/vendor/v8/src/apiutils.h +0 -7
  106. data/vendor/v8/src/arguments.h +15 -4
  107. data/vendor/v8/src/arm/assembler-arm-inl.h +10 -9
  108. data/vendor/v8/src/arm/assembler-arm.cc +62 -23
  109. data/vendor/v8/src/arm/assembler-arm.h +76 -11
  110. data/vendor/v8/src/arm/builtins-arm.cc +39 -33
  111. data/vendor/v8/src/arm/code-stubs-arm.cc +1182 -402
  112. data/vendor/v8/src/arm/code-stubs-arm.h +20 -54
  113. data/vendor/v8/src/arm/codegen-arm.cc +159 -106
  114. data/vendor/v8/src/arm/codegen-arm.h +6 -6
  115. data/vendor/v8/src/arm/constants-arm.h +16 -1
  116. data/vendor/v8/src/arm/cpu-arm.cc +7 -5
  117. data/vendor/v8/src/arm/debug-arm.cc +6 -4
  118. data/vendor/v8/src/arm/deoptimizer-arm.cc +51 -14
  119. data/vendor/v8/src/arm/disasm-arm.cc +47 -15
  120. data/vendor/v8/src/arm/frames-arm.h +1 -1
  121. data/vendor/v8/src/arm/full-codegen-arm.cc +724 -408
  122. data/vendor/v8/src/arm/ic-arm.cc +90 -85
  123. data/vendor/v8/src/arm/lithium-arm.cc +140 -69
  124. data/vendor/v8/src/arm/lithium-arm.h +161 -46
  125. data/vendor/v8/src/arm/lithium-codegen-arm.cc +567 -297
  126. data/vendor/v8/src/arm/lithium-codegen-arm.h +21 -9
  127. data/vendor/v8/src/arm/lithium-gap-resolver-arm.cc +2 -0
  128. data/vendor/v8/src/arm/macro-assembler-arm.cc +457 -96
  129. data/vendor/v8/src/arm/macro-assembler-arm.h +115 -18
  130. data/vendor/v8/src/arm/regexp-macro-assembler-arm.cc +20 -13
  131. data/vendor/v8/src/arm/regexp-macro-assembler-arm.h +1 -0
  132. data/vendor/v8/src/arm/simulator-arm.cc +184 -101
  133. data/vendor/v8/src/arm/simulator-arm.h +26 -21
  134. data/vendor/v8/src/arm/stub-cache-arm.cc +450 -467
  135. data/vendor/v8/src/arm/virtual-frame-arm.cc +14 -12
  136. data/vendor/v8/src/arm/virtual-frame-arm.h +11 -8
  137. data/vendor/v8/src/array.js +35 -18
  138. data/vendor/v8/src/assembler.cc +186 -92
  139. data/vendor/v8/src/assembler.h +106 -69
  140. data/vendor/v8/src/ast-inl.h +5 -0
  141. data/vendor/v8/src/ast.cc +46 -35
  142. data/vendor/v8/src/ast.h +107 -50
  143. data/vendor/v8/src/atomicops.h +2 -0
  144. data/vendor/v8/src/atomicops_internals_mips_gcc.h +169 -0
  145. data/vendor/v8/src/bootstrapper.cc +649 -399
  146. data/vendor/v8/src/bootstrapper.h +94 -27
  147. data/vendor/v8/src/builtins.cc +359 -227
  148. data/vendor/v8/src/builtins.h +157 -123
  149. data/vendor/v8/src/checks.cc +2 -2
  150. data/vendor/v8/src/checks.h +4 -0
  151. data/vendor/v8/src/code-stubs.cc +27 -17
  152. data/vendor/v8/src/code-stubs.h +38 -17
  153. data/vendor/v8/src/codegen-inl.h +5 -1
  154. data/vendor/v8/src/codegen.cc +27 -17
  155. data/vendor/v8/src/codegen.h +9 -9
  156. data/vendor/v8/src/compilation-cache.cc +92 -206
  157. data/vendor/v8/src/compilation-cache.h +205 -30
  158. data/vendor/v8/src/compiler.cc +107 -120
  159. data/vendor/v8/src/compiler.h +17 -2
  160. data/vendor/v8/src/contexts.cc +22 -15
  161. data/vendor/v8/src/contexts.h +14 -8
  162. data/vendor/v8/src/conversions.cc +86 -30
  163. data/vendor/v8/src/counters.cc +19 -4
  164. data/vendor/v8/src/counters.h +28 -16
  165. data/vendor/v8/src/cpu-profiler-inl.h +4 -3
  166. data/vendor/v8/src/cpu-profiler.cc +123 -72
  167. data/vendor/v8/src/cpu-profiler.h +33 -19
  168. data/vendor/v8/src/cpu.h +2 -0
  169. data/vendor/v8/src/d8-debug.cc +3 -3
  170. data/vendor/v8/src/d8-debug.h +7 -6
  171. data/vendor/v8/src/d8-posix.cc +2 -0
  172. data/vendor/v8/src/d8.cc +22 -12
  173. data/vendor/v8/src/d8.gyp +3 -0
  174. data/vendor/v8/src/d8.js +618 -0
  175. data/vendor/v8/src/data-flow.h +3 -3
  176. data/vendor/v8/src/dateparser.h +4 -2
  177. data/vendor/v8/src/debug-agent.cc +10 -9
  178. data/vendor/v8/src/debug-agent.h +9 -11
  179. data/vendor/v8/src/debug-debugger.js +121 -0
  180. data/vendor/v8/src/debug.cc +331 -227
  181. data/vendor/v8/src/debug.h +248 -219
  182. data/vendor/v8/src/deoptimizer.cc +173 -62
  183. data/vendor/v8/src/deoptimizer.h +119 -19
  184. data/vendor/v8/src/disasm.h +3 -0
  185. data/vendor/v8/src/disassembler.cc +10 -9
  186. data/vendor/v8/src/execution.cc +185 -129
  187. data/vendor/v8/src/execution.h +47 -78
  188. data/vendor/v8/src/extensions/experimental/break-iterator.cc +250 -0
  189. data/vendor/v8/src/extensions/experimental/break-iterator.h +89 -0
  190. data/vendor/v8/src/extensions/experimental/experimental.gyp +2 -0
  191. data/vendor/v8/src/extensions/experimental/i18n-extension.cc +22 -2
  192. data/vendor/v8/src/extensions/externalize-string-extension.cc +2 -2
  193. data/vendor/v8/src/extensions/gc-extension.cc +1 -1
  194. data/vendor/v8/src/factory.cc +261 -154
  195. data/vendor/v8/src/factory.h +162 -158
  196. data/vendor/v8/src/flag-definitions.h +17 -11
  197. data/vendor/v8/src/frame-element.cc +0 -5
  198. data/vendor/v8/src/frame-element.h +9 -13
  199. data/vendor/v8/src/frames-inl.h +7 -0
  200. data/vendor/v8/src/frames.cc +56 -46
  201. data/vendor/v8/src/frames.h +36 -25
  202. data/vendor/v8/src/full-codegen.cc +15 -24
  203. data/vendor/v8/src/full-codegen.h +13 -41
  204. data/vendor/v8/src/func-name-inferrer.cc +7 -6
  205. data/vendor/v8/src/func-name-inferrer.h +1 -1
  206. data/vendor/v8/src/gdb-jit.cc +1 -0
  207. data/vendor/v8/src/global-handles.cc +118 -56
  208. data/vendor/v8/src/global-handles.h +98 -40
  209. data/vendor/v8/src/globals.h +2 -2
  210. data/vendor/v8/src/handles-inl.h +106 -9
  211. data/vendor/v8/src/handles.cc +220 -157
  212. data/vendor/v8/src/handles.h +38 -59
  213. data/vendor/v8/src/hashmap.h +3 -3
  214. data/vendor/v8/src/heap-inl.h +141 -25
  215. data/vendor/v8/src/heap-profiler.cc +117 -63
  216. data/vendor/v8/src/heap-profiler.h +38 -21
  217. data/vendor/v8/src/heap.cc +805 -564
  218. data/vendor/v8/src/heap.h +640 -594
  219. data/vendor/v8/src/hydrogen-instructions.cc +216 -73
  220. data/vendor/v8/src/hydrogen-instructions.h +259 -124
  221. data/vendor/v8/src/hydrogen.cc +996 -1171
  222. data/vendor/v8/src/hydrogen.h +163 -144
  223. data/vendor/v8/src/ia32/assembler-ia32-inl.h +12 -11
  224. data/vendor/v8/src/ia32/assembler-ia32.cc +85 -39
  225. data/vendor/v8/src/ia32/assembler-ia32.h +82 -16
  226. data/vendor/v8/src/ia32/builtins-ia32.cc +64 -58
  227. data/vendor/v8/src/ia32/code-stubs-ia32.cc +248 -324
  228. data/vendor/v8/src/ia32/code-stubs-ia32.h +3 -44
  229. data/vendor/v8/src/ia32/codegen-ia32.cc +217 -165
  230. data/vendor/v8/src/ia32/codegen-ia32.h +3 -0
  231. data/vendor/v8/src/ia32/cpu-ia32.cc +6 -5
  232. data/vendor/v8/src/ia32/debug-ia32.cc +8 -5
  233. data/vendor/v8/src/ia32/deoptimizer-ia32.cc +124 -14
  234. data/vendor/v8/src/ia32/disasm-ia32.cc +85 -62
  235. data/vendor/v8/src/ia32/frames-ia32.h +1 -1
  236. data/vendor/v8/src/ia32/full-codegen-ia32.cc +348 -435
  237. data/vendor/v8/src/ia32/ic-ia32.cc +91 -91
  238. data/vendor/v8/src/ia32/lithium-codegen-ia32.cc +500 -255
  239. data/vendor/v8/src/ia32/lithium-codegen-ia32.h +13 -4
  240. data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.cc +6 -0
  241. data/vendor/v8/src/ia32/lithium-ia32.cc +122 -45
  242. data/vendor/v8/src/ia32/lithium-ia32.h +128 -41
  243. data/vendor/v8/src/ia32/macro-assembler-ia32.cc +109 -84
  244. data/vendor/v8/src/ia32/macro-assembler-ia32.h +18 -9
  245. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.cc +26 -15
  246. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.h +1 -0
  247. data/vendor/v8/src/ia32/register-allocator-ia32.cc +30 -30
  248. data/vendor/v8/src/ia32/simulator-ia32.h +4 -4
  249. data/vendor/v8/src/ia32/stub-cache-ia32.cc +383 -400
  250. data/vendor/v8/src/ia32/virtual-frame-ia32.cc +36 -13
  251. data/vendor/v8/src/ia32/virtual-frame-ia32.h +11 -5
  252. data/vendor/v8/src/ic-inl.h +12 -2
  253. data/vendor/v8/src/ic.cc +304 -221
  254. data/vendor/v8/src/ic.h +115 -58
  255. data/vendor/v8/src/interpreter-irregexp.cc +25 -21
  256. data/vendor/v8/src/interpreter-irregexp.h +2 -1
  257. data/vendor/v8/src/isolate.cc +883 -0
  258. data/vendor/v8/src/isolate.h +1304 -0
  259. data/vendor/v8/src/json.js +10 -10
  260. data/vendor/v8/src/jsregexp.cc +111 -80
  261. data/vendor/v8/src/jsregexp.h +6 -7
  262. data/vendor/v8/src/jump-target-heavy.cc +5 -8
  263. data/vendor/v8/src/jump-target-heavy.h +0 -6
  264. data/vendor/v8/src/jump-target-inl.h +1 -1
  265. data/vendor/v8/src/jump-target-light.cc +3 -3
  266. data/vendor/v8/src/lithium-allocator-inl.h +2 -0
  267. data/vendor/v8/src/lithium-allocator.cc +42 -30
  268. data/vendor/v8/src/lithium-allocator.h +8 -22
  269. data/vendor/v8/src/lithium.cc +1 -0
  270. data/vendor/v8/src/liveedit.cc +141 -99
  271. data/vendor/v8/src/liveedit.h +7 -2
  272. data/vendor/v8/src/liveobjectlist-inl.h +90 -0
  273. data/vendor/v8/src/liveobjectlist.cc +2537 -1
  274. data/vendor/v8/src/liveobjectlist.h +245 -35
  275. data/vendor/v8/src/log-utils.cc +122 -35
  276. data/vendor/v8/src/log-utils.h +33 -36
  277. data/vendor/v8/src/log.cc +299 -241
  278. data/vendor/v8/src/log.h +177 -110
  279. data/vendor/v8/src/mark-compact.cc +612 -470
  280. data/vendor/v8/src/mark-compact.h +153 -80
  281. data/vendor/v8/src/messages.cc +16 -14
  282. data/vendor/v8/src/messages.js +30 -7
  283. data/vendor/v8/src/mips/assembler-mips-inl.h +155 -35
  284. data/vendor/v8/src/mips/assembler-mips.cc +1093 -219
  285. data/vendor/v8/src/mips/assembler-mips.h +552 -153
  286. data/vendor/v8/src/mips/builtins-mips.cc +43 -100
  287. data/vendor/v8/src/mips/code-stubs-mips.cc +752 -0
  288. data/vendor/v8/src/mips/code-stubs-mips.h +511 -0
  289. data/vendor/v8/src/mips/codegen-mips-inl.h +8 -14
  290. data/vendor/v8/src/mips/codegen-mips.cc +672 -896
  291. data/vendor/v8/src/mips/codegen-mips.h +271 -69
  292. data/vendor/v8/src/mips/constants-mips.cc +44 -20
  293. data/vendor/v8/src/mips/constants-mips.h +238 -40
  294. data/vendor/v8/src/mips/cpu-mips.cc +20 -3
  295. data/vendor/v8/src/mips/debug-mips.cc +35 -7
  296. data/vendor/v8/src/mips/deoptimizer-mips.cc +91 -0
  297. data/vendor/v8/src/mips/disasm-mips.cc +329 -93
  298. data/vendor/v8/src/mips/frames-mips.cc +2 -50
  299. data/vendor/v8/src/mips/frames-mips.h +24 -9
  300. data/vendor/v8/src/mips/full-codegen-mips.cc +473 -23
  301. data/vendor/v8/src/mips/ic-mips.cc +81 -45
  302. data/vendor/v8/src/mips/jump-target-mips.cc +11 -106
  303. data/vendor/v8/src/mips/lithium-codegen-mips.h +65 -0
  304. data/vendor/v8/src/mips/lithium-mips.h +304 -0
  305. data/vendor/v8/src/mips/macro-assembler-mips.cc +2391 -390
  306. data/vendor/v8/src/mips/macro-assembler-mips.h +718 -121
  307. data/vendor/v8/src/mips/regexp-macro-assembler-mips.cc +478 -0
  308. data/vendor/v8/src/mips/regexp-macro-assembler-mips.h +250 -0
  309. data/vendor/v8/src/mips/register-allocator-mips-inl.h +0 -3
  310. data/vendor/v8/src/mips/register-allocator-mips.h +3 -2
  311. data/vendor/v8/src/mips/simulator-mips.cc +1009 -221
  312. data/vendor/v8/src/mips/simulator-mips.h +119 -36
  313. data/vendor/v8/src/mips/stub-cache-mips.cc +331 -148
  314. data/vendor/v8/src/mips/{fast-codegen-mips.cc → virtual-frame-mips-inl.h} +11 -30
  315. data/vendor/v8/src/mips/virtual-frame-mips.cc +137 -149
  316. data/vendor/v8/src/mips/virtual-frame-mips.h +294 -312
  317. data/vendor/v8/src/mirror-debugger.js +9 -8
  318. data/vendor/v8/src/mksnapshot.cc +2 -2
  319. data/vendor/v8/src/objects-debug.cc +16 -16
  320. data/vendor/v8/src/objects-inl.h +421 -195
  321. data/vendor/v8/src/objects-printer.cc +7 -7
  322. data/vendor/v8/src/objects-visiting.cc +1 -1
  323. data/vendor/v8/src/objects-visiting.h +33 -12
  324. data/vendor/v8/src/objects.cc +935 -658
  325. data/vendor/v8/src/objects.h +234 -139
  326. data/vendor/v8/src/parser.cc +484 -439
  327. data/vendor/v8/src/parser.h +35 -14
  328. data/vendor/v8/src/platform-cygwin.cc +173 -107
  329. data/vendor/v8/src/platform-freebsd.cc +224 -72
  330. data/vendor/v8/src/platform-linux.cc +234 -95
  331. data/vendor/v8/src/platform-macos.cc +215 -82
  332. data/vendor/v8/src/platform-nullos.cc +9 -3
  333. data/vendor/v8/src/platform-openbsd.cc +22 -7
  334. data/vendor/v8/src/platform-posix.cc +30 -5
  335. data/vendor/v8/src/platform-solaris.cc +120 -38
  336. data/vendor/v8/src/platform-tls-mac.h +62 -0
  337. data/vendor/v8/src/platform-tls-win32.h +62 -0
  338. data/vendor/v8/src/platform-tls.h +50 -0
  339. data/vendor/v8/src/platform-win32.cc +195 -97
  340. data/vendor/v8/src/platform.h +72 -15
  341. data/vendor/v8/src/preparse-data.cc +2 -0
  342. data/vendor/v8/src/preparser-api.cc +8 -2
  343. data/vendor/v8/src/preparser.cc +1 -1
  344. data/vendor/v8/src/prettyprinter.cc +43 -52
  345. data/vendor/v8/src/prettyprinter.h +1 -1
  346. data/vendor/v8/src/profile-generator-inl.h +0 -28
  347. data/vendor/v8/src/profile-generator.cc +942 -685
  348. data/vendor/v8/src/profile-generator.h +210 -176
  349. data/vendor/v8/src/property.cc +6 -0
  350. data/vendor/v8/src/property.h +14 -3
  351. data/vendor/v8/src/regexp-macro-assembler-irregexp.cc +1 -1
  352. data/vendor/v8/src/regexp-macro-assembler.cc +28 -19
  353. data/vendor/v8/src/regexp-macro-assembler.h +11 -6
  354. data/vendor/v8/src/regexp-stack.cc +18 -10
  355. data/vendor/v8/src/regexp-stack.h +45 -21
  356. data/vendor/v8/src/regexp.js +3 -3
  357. data/vendor/v8/src/register-allocator-inl.h +3 -3
  358. data/vendor/v8/src/register-allocator.cc +1 -7
  359. data/vendor/v8/src/register-allocator.h +5 -15
  360. data/vendor/v8/src/rewriter.cc +2 -1
  361. data/vendor/v8/src/runtime-profiler.cc +158 -128
  362. data/vendor/v8/src/runtime-profiler.h +131 -15
  363. data/vendor/v8/src/runtime.cc +2409 -1692
  364. data/vendor/v8/src/runtime.h +93 -17
  365. data/vendor/v8/src/safepoint-table.cc +3 -0
  366. data/vendor/v8/src/safepoint-table.h +9 -3
  367. data/vendor/v8/src/scanner-base.cc +21 -28
  368. data/vendor/v8/src/scanner-base.h +22 -11
  369. data/vendor/v8/src/scanner.cc +3 -5
  370. data/vendor/v8/src/scanner.h +4 -2
  371. data/vendor/v8/src/scopeinfo.cc +11 -16
  372. data/vendor/v8/src/scopeinfo.h +26 -15
  373. data/vendor/v8/src/scopes.cc +67 -37
  374. data/vendor/v8/src/scopes.h +26 -12
  375. data/vendor/v8/src/serialize.cc +193 -154
  376. data/vendor/v8/src/serialize.h +41 -36
  377. data/vendor/v8/src/small-pointer-list.h +163 -0
  378. data/vendor/v8/src/snapshot-common.cc +1 -1
  379. data/vendor/v8/src/snapshot.h +3 -1
  380. data/vendor/v8/src/spaces-inl.h +30 -25
  381. data/vendor/v8/src/spaces.cc +263 -370
  382. data/vendor/v8/src/spaces.h +178 -166
  383. data/vendor/v8/src/string-search.cc +4 -3
  384. data/vendor/v8/src/string-search.h +21 -20
  385. data/vendor/v8/src/string-stream.cc +32 -24
  386. data/vendor/v8/src/string.js +7 -7
  387. data/vendor/v8/src/stub-cache.cc +324 -248
  388. data/vendor/v8/src/stub-cache.h +181 -155
  389. data/vendor/v8/src/token.cc +3 -3
  390. data/vendor/v8/src/token.h +3 -3
  391. data/vendor/v8/src/top.cc +218 -390
  392. data/vendor/v8/src/type-info.cc +98 -32
  393. data/vendor/v8/src/type-info.h +10 -3
  394. data/vendor/v8/src/unicode.cc +1 -1
  395. data/vendor/v8/src/unicode.h +1 -1
  396. data/vendor/v8/src/utils.h +3 -0
  397. data/vendor/v8/src/v8-counters.cc +18 -11
  398. data/vendor/v8/src/v8-counters.h +34 -13
  399. data/vendor/v8/src/v8.cc +66 -121
  400. data/vendor/v8/src/v8.h +7 -4
  401. data/vendor/v8/src/v8globals.h +18 -12
  402. data/vendor/v8/src/{memory.h → v8memory.h} +0 -0
  403. data/vendor/v8/src/v8natives.js +59 -18
  404. data/vendor/v8/src/v8threads.cc +127 -114
  405. data/vendor/v8/src/v8threads.h +42 -35
  406. data/vendor/v8/src/v8utils.h +2 -39
  407. data/vendor/v8/src/variables.h +1 -1
  408. data/vendor/v8/src/version.cc +26 -5
  409. data/vendor/v8/src/version.h +4 -0
  410. data/vendor/v8/src/virtual-frame-heavy-inl.h +2 -4
  411. data/vendor/v8/src/virtual-frame-light-inl.h +5 -4
  412. data/vendor/v8/src/vm-state-inl.h +21 -17
  413. data/vendor/v8/src/vm-state.h +7 -5
  414. data/vendor/v8/src/win32-headers.h +1 -0
  415. data/vendor/v8/src/x64/assembler-x64-inl.h +12 -11
  416. data/vendor/v8/src/x64/assembler-x64.cc +80 -40
  417. data/vendor/v8/src/x64/assembler-x64.h +67 -17
  418. data/vendor/v8/src/x64/builtins-x64.cc +34 -33
  419. data/vendor/v8/src/x64/code-stubs-x64.cc +636 -377
  420. data/vendor/v8/src/x64/code-stubs-x64.h +14 -48
  421. data/vendor/v8/src/x64/codegen-x64-inl.h +1 -1
  422. data/vendor/v8/src/x64/codegen-x64.cc +158 -136
  423. data/vendor/v8/src/x64/codegen-x64.h +4 -1
  424. data/vendor/v8/src/x64/cpu-x64.cc +7 -5
  425. data/vendor/v8/src/x64/debug-x64.cc +8 -6
  426. data/vendor/v8/src/x64/deoptimizer-x64.cc +195 -20
  427. data/vendor/v8/src/x64/disasm-x64.cc +42 -23
  428. data/vendor/v8/src/x64/frames-x64.cc +1 -1
  429. data/vendor/v8/src/x64/frames-x64.h +2 -2
  430. data/vendor/v8/src/x64/full-codegen-x64.cc +780 -218
  431. data/vendor/v8/src/x64/ic-x64.cc +77 -79
  432. data/vendor/v8/src/x64/jump-target-x64.cc +1 -1
  433. data/vendor/v8/src/x64/lithium-codegen-x64.cc +698 -181
  434. data/vendor/v8/src/x64/lithium-codegen-x64.h +31 -6
  435. data/vendor/v8/src/x64/lithium-x64.cc +136 -54
  436. data/vendor/v8/src/x64/lithium-x64.h +142 -51
  437. data/vendor/v8/src/x64/macro-assembler-x64.cc +456 -187
  438. data/vendor/v8/src/x64/macro-assembler-x64.h +166 -34
  439. data/vendor/v8/src/x64/regexp-macro-assembler-x64.cc +44 -28
  440. data/vendor/v8/src/x64/regexp-macro-assembler-x64.h +8 -4
  441. data/vendor/v8/src/x64/register-allocator-x64-inl.h +3 -3
  442. data/vendor/v8/src/x64/register-allocator-x64.cc +12 -8
  443. data/vendor/v8/src/x64/simulator-x64.h +5 -5
  444. data/vendor/v8/src/x64/stub-cache-x64.cc +299 -344
  445. data/vendor/v8/src/x64/virtual-frame-x64.cc +37 -13
  446. data/vendor/v8/src/x64/virtual-frame-x64.h +13 -7
  447. data/vendor/v8/src/zone-inl.h +49 -3
  448. data/vendor/v8/src/zone.cc +42 -41
  449. data/vendor/v8/src/zone.h +37 -34
  450. data/vendor/v8/test/benchmarks/testcfg.py +100 -0
  451. data/vendor/v8/test/cctest/SConscript +5 -4
  452. data/vendor/v8/test/cctest/cctest.h +3 -2
  453. data/vendor/v8/test/cctest/cctest.status +6 -11
  454. data/vendor/v8/test/cctest/test-accessors.cc +3 -3
  455. data/vendor/v8/test/cctest/test-alloc.cc +39 -33
  456. data/vendor/v8/test/cctest/test-api.cc +1092 -205
  457. data/vendor/v8/test/cctest/test-assembler-arm.cc +39 -25
  458. data/vendor/v8/test/cctest/test-assembler-ia32.cc +36 -37
  459. data/vendor/v8/test/cctest/test-assembler-mips.cc +1098 -40
  460. data/vendor/v8/test/cctest/test-assembler-x64.cc +32 -25
  461. data/vendor/v8/test/cctest/test-ast.cc +1 -0
  462. data/vendor/v8/test/cctest/test-circular-queue.cc +8 -5
  463. data/vendor/v8/test/cctest/test-compiler.cc +24 -24
  464. data/vendor/v8/test/cctest/test-cpu-profiler.cc +140 -5
  465. data/vendor/v8/test/cctest/test-dataflow.cc +1 -0
  466. data/vendor/v8/test/cctest/test-debug.cc +136 -77
  467. data/vendor/v8/test/cctest/test-decls.cc +1 -1
  468. data/vendor/v8/test/cctest/test-deoptimization.cc +25 -24
  469. data/vendor/v8/test/cctest/test-disasm-arm.cc +9 -4
  470. data/vendor/v8/test/cctest/test-disasm-ia32.cc +10 -8
  471. data/vendor/v8/test/cctest/test-func-name-inference.cc +10 -4
  472. data/vendor/v8/test/cctest/test-heap-profiler.cc +226 -164
  473. data/vendor/v8/test/cctest/test-heap.cc +240 -217
  474. data/vendor/v8/test/cctest/test-liveedit.cc +1 -0
  475. data/vendor/v8/test/cctest/test-log-stack-tracer.cc +18 -20
  476. data/vendor/v8/test/cctest/test-log.cc +114 -108
  477. data/vendor/v8/test/cctest/test-macro-assembler-x64.cc +247 -177
  478. data/vendor/v8/test/cctest/test-mark-compact.cc +129 -90
  479. data/vendor/v8/test/cctest/test-parsing.cc +15 -14
  480. data/vendor/v8/test/cctest/test-platform-linux.cc +1 -0
  481. data/vendor/v8/test/cctest/test-platform-tls.cc +66 -0
  482. data/vendor/v8/test/cctest/test-platform-win32.cc +1 -0
  483. data/vendor/v8/test/cctest/test-profile-generator.cc +1 -1
  484. data/vendor/v8/test/cctest/test-regexp.cc +53 -41
  485. data/vendor/v8/test/cctest/test-reloc-info.cc +18 -11
  486. data/vendor/v8/test/cctest/test-serialize.cc +44 -43
  487. data/vendor/v8/test/cctest/test-sockets.cc +8 -3
  488. data/vendor/v8/test/cctest/test-spaces.cc +47 -29
  489. data/vendor/v8/test/cctest/test-strings.cc +20 -20
  490. data/vendor/v8/test/cctest/test-thread-termination.cc +8 -3
  491. data/vendor/v8/test/cctest/test-threads.cc +5 -3
  492. data/vendor/v8/test/cctest/test-utils.cc +5 -4
  493. data/vendor/v8/test/cctest/testcfg.py +7 -3
  494. data/vendor/v8/test/es5conform/es5conform.status +2 -77
  495. data/vendor/v8/test/es5conform/testcfg.py +1 -1
  496. data/vendor/v8/test/message/testcfg.py +1 -1
  497. data/vendor/v8/test/mjsunit/accessors-on-global-object.js +3 -3
  498. data/vendor/v8/test/mjsunit/array-concat.js +43 -1
  499. data/vendor/v8/test/mjsunit/array-join.js +25 -0
  500. data/vendor/v8/test/mjsunit/bitops-info.js +7 -1
  501. data/vendor/v8/test/mjsunit/compiler/array-length.js +2 -2
  502. data/vendor/v8/test/mjsunit/compiler/global-accessors.js +47 -0
  503. data/vendor/v8/test/mjsunit/compiler/pic.js +1 -1
  504. data/vendor/v8/test/mjsunit/compiler/regress-loadfield.js +65 -0
  505. data/vendor/v8/test/mjsunit/math-sqrt.js +5 -1
  506. data/vendor/v8/test/mjsunit/mjsunit.js +59 -8
  507. data/vendor/v8/test/mjsunit/mjsunit.status +0 -12
  508. data/vendor/v8/test/mjsunit/mul-exhaustive.js +129 -11
  509. data/vendor/v8/test/mjsunit/negate-zero.js +1 -1
  510. data/vendor/v8/test/mjsunit/object-freeze.js +5 -13
  511. data/vendor/v8/test/mjsunit/object-prevent-extensions.js +9 -50
  512. data/vendor/v8/test/mjsunit/object-seal.js +4 -13
  513. data/vendor/v8/test/mjsunit/override-eval-with-non-function.js +36 -0
  514. data/vendor/v8/test/mjsunit/regress/regress-1145.js +54 -0
  515. data/vendor/v8/test/mjsunit/regress/regress-1172-bis.js +37 -0
  516. data/vendor/v8/test/mjsunit/regress/regress-1181.js +54 -0
  517. data/vendor/v8/test/mjsunit/regress/regress-1207.js +35 -0
  518. data/vendor/v8/test/mjsunit/regress/regress-1209.js +34 -0
  519. data/vendor/v8/test/mjsunit/regress/regress-1210.js +48 -0
  520. data/vendor/v8/test/mjsunit/regress/regress-1213.js +43 -0
  521. data/vendor/v8/test/mjsunit/regress/regress-1218.js +29 -0
  522. data/vendor/v8/test/mjsunit/regress/regress-1229.js +79 -0
  523. data/vendor/v8/test/mjsunit/regress/regress-1233.js +47 -0
  524. data/vendor/v8/test/mjsunit/regress/regress-1236.js +34 -0
  525. data/vendor/v8/test/mjsunit/regress/regress-1237.js +36 -0
  526. data/vendor/v8/test/mjsunit/regress/regress-1240.js +39 -0
  527. data/vendor/v8/test/mjsunit/regress/regress-1257.js +58 -0
  528. data/vendor/v8/test/mjsunit/regress/regress-1278.js +69 -0
  529. data/vendor/v8/test/mjsunit/regress/regress-create-exception.js +1 -0
  530. data/vendor/v8/test/mjsunit/regress/regress-lazy-deopt-reloc.js +52 -0
  531. data/vendor/v8/test/mjsunit/sin-cos.js +15 -10
  532. data/vendor/v8/test/mjsunit/smi-negative-zero.js +2 -2
  533. data/vendor/v8/test/mjsunit/str-to-num.js +1 -1
  534. data/vendor/v8/test/mjsunit/strict-mode.js +435 -0
  535. data/vendor/v8/test/mjsunit/testcfg.py +23 -6
  536. data/vendor/v8/test/mozilla/mozilla.status +0 -2
  537. data/vendor/v8/test/mozilla/testcfg.py +1 -1
  538. data/vendor/v8/test/preparser/empty.js +28 -0
  539. data/vendor/v8/test/preparser/functions-only.js +38 -0
  540. data/vendor/v8/test/preparser/non-alphanum.js +34 -0
  541. data/vendor/v8/test/preparser/symbols-only.js +49 -0
  542. data/vendor/v8/test/preparser/testcfg.py +90 -0
  543. data/vendor/v8/test/sputnik/testcfg.py +1 -1
  544. data/vendor/v8/test/test262/README +16 -0
  545. data/vendor/v8/test/test262/harness-adapt.js +80 -0
  546. data/vendor/v8/test/test262/test262.status +1506 -0
  547. data/vendor/v8/test/test262/testcfg.py +123 -0
  548. data/vendor/v8/tools/freebsd-tick-processor +10 -0
  549. data/vendor/v8/tools/gyp/v8.gyp +8 -33
  550. data/vendor/v8/tools/linux-tick-processor +5 -3
  551. data/vendor/v8/tools/test.py +37 -14
  552. data/vendor/v8/tools/tickprocessor.js +22 -8
  553. data/vendor/v8/tools/visual_studio/v8_base.vcproj +13 -1
  554. data/vendor/v8/tools/visual_studio/v8_base_arm.vcproj +5 -1
  555. data/vendor/v8/tools/visual_studio/v8_base_x64.vcproj +5 -1
  556. data/vendor/v8/tools/visual_studio/x64.vsprops +1 -0
  557. metadata +1495 -1341
  558. data/ext/extconf.rb +0 -22
  559. data/ext/mustang.cpp +0 -58
  560. data/vendor/v8/src/top.h +0 -608
@@ -32,7 +32,6 @@
32
32
  #include "execution.h"
33
33
  #include "messages.h"
34
34
  #include "spaces-inl.h"
35
- #include "top.h"
36
35
 
37
36
  namespace v8 {
38
37
  namespace internal {
@@ -68,18 +67,18 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
68
67
  Vector< Handle<Object> > args,
69
68
  Handle<String> stack_trace,
70
69
  Handle<JSArray> stack_frames) {
71
- Handle<String> type_handle = Factory::LookupAsciiSymbol(type);
70
+ Handle<String> type_handle = FACTORY->LookupAsciiSymbol(type);
72
71
  Handle<FixedArray> arguments_elements =
73
- Factory::NewFixedArray(args.length());
72
+ FACTORY->NewFixedArray(args.length());
74
73
  for (int i = 0; i < args.length(); i++) {
75
74
  arguments_elements->set(i, *args[i]);
76
75
  }
77
76
  Handle<JSArray> arguments_handle =
78
- Factory::NewJSArrayWithElements(arguments_elements);
77
+ FACTORY->NewJSArrayWithElements(arguments_elements);
79
78
 
80
79
  int start = 0;
81
80
  int end = 0;
82
- Handle<Object> script_handle = Factory::undefined_value();
81
+ Handle<Object> script_handle = FACTORY->undefined_value();
83
82
  if (loc) {
84
83
  start = loc->start_pos();
85
84
  end = loc->end_pos();
@@ -87,15 +86,15 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
87
86
  }
88
87
 
89
88
  Handle<Object> stack_trace_handle = stack_trace.is_null()
90
- ? Factory::undefined_value()
89
+ ? FACTORY->undefined_value()
91
90
  : Handle<Object>::cast(stack_trace);
92
91
 
93
92
  Handle<Object> stack_frames_handle = stack_frames.is_null()
94
- ? Factory::undefined_value()
93
+ ? FACTORY->undefined_value()
95
94
  : Handle<Object>::cast(stack_frames);
96
95
 
97
96
  Handle<JSMessageObject> message =
98
- Factory::NewJSMessageObject(type_handle,
97
+ FACTORY->NewJSMessageObject(type_handle,
99
98
  arguments_handle,
100
99
  start,
101
100
  end,
@@ -111,7 +110,7 @@ void MessageHandler::ReportMessage(MessageLocation* loc,
111
110
  Handle<Object> message) {
112
111
  v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
113
112
 
114
- v8::NeanderArray global_listeners(Factory::message_listeners());
113
+ v8::NeanderArray global_listeners(FACTORY->message_listeners());
115
114
  int global_length = global_listeners.length();
116
115
  if (global_length == 0) {
117
116
  DefaultMessageReport(loc, message);
@@ -131,18 +130,21 @@ void MessageHandler::ReportMessage(MessageLocation* loc,
131
130
 
132
131
 
133
132
  Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
134
- Handle<String> fmt_str = Factory::LookupAsciiSymbol("FormatMessage");
133
+ Handle<String> fmt_str = FACTORY->LookupAsciiSymbol("FormatMessage");
135
134
  Handle<JSFunction> fun =
136
- Handle<JSFunction>(JSFunction::cast(
137
- Top::builtins()->GetPropertyNoExceptionThrown(*fmt_str)));
135
+ Handle<JSFunction>(
136
+ JSFunction::cast(
137
+ Isolate::Current()->js_builtins_object()->
138
+ GetPropertyNoExceptionThrown(*fmt_str)));
138
139
  Object** argv[1] = { data.location() };
139
140
 
140
141
  bool caught_exception;
141
142
  Handle<Object> result =
142
- Execution::TryCall(fun, Top::builtins(), 1, argv, &caught_exception);
143
+ Execution::TryCall(fun,
144
+ Isolate::Current()->js_builtins_object(), 1, argv, &caught_exception);
143
145
 
144
146
  if (caught_exception || !result->IsString()) {
145
- return Factory::LookupAsciiSymbol("<error>");
147
+ return FACTORY->LookupAsciiSymbol("<error>");
146
148
  }
147
149
  Handle<String> result_string = Handle<String>::cast(result);
148
150
  // A string that has been obtained from JS code in this way is
@@ -226,6 +226,15 @@ function FormatMessage(message) {
226
226
  strict_reserved_word: ["Use of future reserved word in strict mode"],
227
227
  strict_delete: ["Delete of an unqualified identifier in strict mode."],
228
228
  strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
229
+ strict_const: ["Use of const in strict mode."],
230
+ strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
231
+ strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"],
232
+ strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
233
+ strict_arguments_callee: ["Cannot access property 'callee' of strict mode arguments"],
234
+ strict_arguments_caller: ["Cannot access property 'caller' of strict mode arguments"],
235
+ strict_function_caller: ["Cannot access property 'caller' of a strict mode function"],
236
+ strict_function_arguments: ["Cannot access property 'arguments' of a strict mode function"],
237
+ strict_caller: ["Illegal access to a strict mode caller function."],
229
238
  };
230
239
  }
231
240
  var message_type = %MessageGetType(message);
@@ -487,10 +496,24 @@ Script.prototype.nameOrSourceURL = function() {
487
496
  // because this file is being processed by js2c whose handling of spaces
488
497
  // in regexps is broken. Also, ['"] are excluded from allowed URLs to
489
498
  // avoid matches against sources that invoke evals with sourceURL.
490
- var sourceUrlPattern =
491
- /\/\/@[\040\t]sourceURL=[\040\t]*([^\s'"]*)[\040\t]*$/m;
492
- var match = sourceUrlPattern.exec(this.source);
493
- return match ? match[1] : this.name;
499
+ // A better solution would be to detect these special comments in
500
+ // the scanner/parser.
501
+ var source = ToString(this.source);
502
+ var sourceUrlPos = %StringIndexOf(source, "sourceURL=", 0);
503
+ if (sourceUrlPos > 4) {
504
+ var sourceUrlPattern =
505
+ /\/\/@[\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm;
506
+ // Don't reuse lastMatchInfo here, so we create a new array with room
507
+ // for four captures (array with length one longer than the index
508
+ // of the fourth capture, where the numbering is zero-based).
509
+ var matchInfo = new InternalArray(CAPTURE(3) + 1);
510
+ var match =
511
+ %_RegExpExec(sourceUrlPattern, source, sourceUrlPos - 4, matchInfo);
512
+ if (match) {
513
+ return SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
514
+ }
515
+ }
516
+ return this.name;
494
517
  }
495
518
 
496
519
 
@@ -1059,9 +1082,9 @@ function errorToString() {
1059
1082
  }
1060
1083
  }
1061
1084
 
1062
- %FunctionSetName(errorToString, 'toString');
1063
- %SetProperty($Error.prototype, 'toString', errorToString, DONT_ENUM);
1085
+
1086
+ InstallFunctions($Error.prototype, DONT_ENUM, ['toString', errorToString]);
1064
1087
 
1065
1088
  // Boilerplate for exceptions for stack overflows. Used from
1066
- // Top::StackOverflow().
1089
+ // Isolate::StackOverflow().
1067
1090
  const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
@@ -38,20 +38,12 @@
38
38
 
39
39
  #include "mips/assembler-mips.h"
40
40
  #include "cpu.h"
41
+ #include "debug.h"
41
42
 
42
43
 
43
44
  namespace v8 {
44
45
  namespace internal {
45
46
 
46
- // -----------------------------------------------------------------------------
47
- // Condition
48
-
49
- Condition NegateCondition(Condition cc) {
50
- ASSERT(cc != cc_always);
51
- return static_cast<Condition>(cc ^ 1);
52
- }
53
-
54
-
55
47
  // -----------------------------------------------------------------------------
56
48
  // Operand and MemOperand
57
49
 
@@ -61,17 +53,13 @@ Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
61
53
  rmode_ = rmode;
62
54
  }
63
55
 
56
+
64
57
  Operand::Operand(const ExternalReference& f) {
65
58
  rm_ = no_reg;
66
59
  imm32_ = reinterpret_cast<int32_t>(f.address());
67
60
  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
68
61
  }
69
62
 
70
- Operand::Operand(const char* s) {
71
- rm_ = no_reg;
72
- imm32_ = reinterpret_cast<int32_t>(s);
73
- rmode_ = RelocInfo::EMBEDDED_STRING;
74
- }
75
63
 
76
64
  Operand::Operand(Smi* value) {
77
65
  rm_ = no_reg;
@@ -79,10 +67,12 @@ Operand::Operand(Smi* value) {
79
67
  rmode_ = RelocInfo::NONE;
80
68
  }
81
69
 
70
+
82
71
  Operand::Operand(Register rm) {
83
72
  rm_ = rm;
84
73
  }
85
74
 
75
+
86
76
  bool Operand::is_reg() const {
87
77
  return rm_.is_valid();
88
78
  }
@@ -105,8 +95,29 @@ Address RelocInfo::target_address() {
105
95
 
106
96
 
107
97
  Address RelocInfo::target_address_address() {
108
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
109
- return reinterpret_cast<Address>(pc_);
98
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
99
+ || rmode_ == EMBEDDED_OBJECT
100
+ || rmode_ == EXTERNAL_REFERENCE);
101
+ // Read the address of the word containing the target_address in an
102
+ // instruction stream.
103
+ // The only architecture-independent user of this function is the serializer.
104
+ // The serializer uses it to find out how many raw bytes of instruction to
105
+ // output before the next target.
106
+ // For an instructions like LUI/ORI where the target bits are mixed into the
107
+ // instruction bits, the size of the target will be zero, indicating that the
108
+ // serializer should not step forward in memory after a target is resolved
109
+ // and written. In this case the target_address_address function should
110
+ // return the end of the instructions to be patched, allowing the
111
+ // deserializer to deserialize the instructions as raw bytes and put them in
112
+ // place, ready to be patched with the target. In our case, that is the
113
+ // address of the instruction that follows LUI/ORI instruction pair.
114
+ return reinterpret_cast<Address>(
115
+ pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
116
+ }
117
+
118
+
119
+ int RelocInfo::target_address_size() {
120
+ return Assembler::kExternalTargetSize;
110
121
  }
111
122
 
112
123
 
@@ -130,8 +141,15 @@ Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
130
141
 
131
142
 
132
143
  Object** RelocInfo::target_object_address() {
144
+ // Provide a "natural pointer" to the embedded object,
145
+ // which can be de-referenced during heap iteration.
133
146
  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
134
- return reinterpret_cast<Object**>(pc_);
147
+ // TODO(mips): Commenting out, to simplify arch-independent changes.
148
+ // GC won't work like this, but this commit is for asm/disasm/sim.
149
+ // reconstructed_obj_ptr_ =
150
+ // reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
151
+ // return &reconstructed_obj_ptr_;
152
+ return NULL;
135
153
  }
136
154
 
137
155
 
@@ -143,23 +161,55 @@ void RelocInfo::set_target_object(Object* target) {
143
161
 
144
162
  Address* RelocInfo::target_reference_address() {
145
163
  ASSERT(rmode_ == EXTERNAL_REFERENCE);
146
- return reinterpret_cast<Address*>(pc_);
164
+ // TODO(mips): Commenting out, to simplify arch-independent changes.
165
+ // GC won't work like this, but this commit is for asm/disasm/sim.
166
+ // reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
167
+ // return &reconstructed_adr_ptr_;
168
+ return NULL;
169
+ }
170
+
171
+
172
+ Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
173
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
174
+ Address address = Memory::Address_at(pc_);
175
+ return Handle<JSGlobalPropertyCell>(
176
+ reinterpret_cast<JSGlobalPropertyCell**>(address));
177
+ }
178
+
179
+
180
+ JSGlobalPropertyCell* RelocInfo::target_cell() {
181
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
182
+ Address address = Memory::Address_at(pc_);
183
+ Object* object = HeapObject::FromAddress(
184
+ address - JSGlobalPropertyCell::kValueOffset);
185
+ return reinterpret_cast<JSGlobalPropertyCell*>(object);
186
+ }
187
+
188
+
189
+ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
190
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
191
+ Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
192
+ Memory::Address_at(pc_) = address;
147
193
  }
148
194
 
149
195
 
150
196
  Address RelocInfo::call_address() {
151
- ASSERT(IsPatchedReturnSequence());
152
- // The 2 instructions offset assumes patched return sequence.
153
- ASSERT(IsJSReturn(rmode()));
154
- return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
197
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
198
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
199
+ // The pc_ offset of 0 assumes mips patched return sequence per
200
+ // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
201
+ // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
202
+ return Assembler::target_address_at(pc_);
155
203
  }
156
204
 
157
205
 
158
206
  void RelocInfo::set_call_address(Address target) {
159
- ASSERT(IsPatchedReturnSequence());
160
- // The 2 instructions offset assumes patched return sequence.
161
- ASSERT(IsJSReturn(rmode()));
162
- Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
207
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
208
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
209
+ // The pc_ offset of 0 assumes mips patched return sequence per
210
+ // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
211
+ // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
212
+ Assembler::set_target_address_at(pc_, target);
163
213
  }
164
214
 
165
215
 
@@ -169,9 +219,8 @@ Object* RelocInfo::call_object() {
169
219
 
170
220
 
171
221
  Object** RelocInfo::call_object_address() {
172
- ASSERT(IsPatchedReturnSequence());
173
- // The 2 instructions offset assumes patched return sequence.
174
- ASSERT(IsJSReturn(rmode()));
222
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
223
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
175
224
  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
176
225
  }
177
226
 
@@ -182,13 +231,76 @@ void RelocInfo::set_call_object(Object* target) {
182
231
 
183
232
 
184
233
  bool RelocInfo::IsPatchedReturnSequence() {
185
- #ifdef DEBUG
186
- PrintF("%s - %d - %s : Checking for jal(r)",
187
- __FILE__, __LINE__, __func__);
234
+ Instr instr0 = Assembler::instr_at(pc_);
235
+ Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
236
+ Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
237
+ bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
238
+ (instr1 & kOpcodeMask) == ORI &&
239
+ (instr2 & kOpcodeMask) == SPECIAL &&
240
+ (instr2 & kFunctionFieldMask) == JALR);
241
+ return patched_return;
242
+ }
243
+
244
+
245
+ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
246
+ Instr current_instr = Assembler::instr_at(pc_);
247
+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
248
+ }
249
+
250
+
251
+ void RelocInfo::Visit(ObjectVisitor* visitor) {
252
+ RelocInfo::Mode mode = rmode();
253
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
254
+ // RelocInfo is needed when pointer must be updated/serialized, such as
255
+ // UpdatingVisitor in mark-compact.cc or Serializer in serialize.cc.
256
+ // It is ignored by visitors that do not need it.
257
+ // Commenting out, to simplify arch-independednt changes.
258
+ // GC won't work like this, but this commit is for asm/disasm/sim.
259
+ // visitor->VisitPointer(target_object_address(), this);
260
+ } else if (RelocInfo::IsCodeTarget(mode)) {
261
+ visitor->VisitCodeTarget(this);
262
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
263
+ // RelocInfo is needed when external-references must be serialized by
264
+ // Serializer Visitor in serialize.cc. It is ignored by visitors that
265
+ // do not need it.
266
+ // Commenting out, to simplify arch-independednt changes.
267
+ // Serializer won't work like this, but this commit is for asm/disasm/sim.
268
+ // visitor->VisitExternalReference(target_reference_address(), this);
269
+ #ifdef ENABLE_DEBUGGER_SUPPORT
270
+ // TODO(isolates): Get a cached isolate below.
271
+ } else if (((RelocInfo::IsJSReturn(mode) &&
272
+ IsPatchedReturnSequence()) ||
273
+ (RelocInfo::IsDebugBreakSlot(mode) &&
274
+ IsPatchedDebugBreakSlotSequence())) &&
275
+ Isolate::Current()->debug()->has_break_points()) {
276
+ visitor->VisitDebugTarget(this);
188
277
  #endif
189
- return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) &&
190
- (((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) ||
191
- ((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR));
278
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
279
+ visitor->VisitRuntimeEntry(this);
280
+ }
281
+ }
282
+
283
+
284
+ template<typename StaticVisitor>
285
+ void RelocInfo::Visit(Heap* heap) {
286
+ RelocInfo::Mode mode = rmode();
287
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
288
+ StaticVisitor::VisitPointer(heap, target_object_address());
289
+ } else if (RelocInfo::IsCodeTarget(mode)) {
290
+ StaticVisitor::VisitCodeTarget(this);
291
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
292
+ StaticVisitor::VisitExternalReference(target_reference_address());
293
+ #ifdef ENABLE_DEBUGGER_SUPPORT
294
+ } else if (heap->isolate()->debug()->has_break_points() &&
295
+ ((RelocInfo::IsJSReturn(mode) &&
296
+ IsPatchedReturnSequence()) ||
297
+ (RelocInfo::IsDebugBreakSlot(mode) &&
298
+ IsPatchedDebugBreakSlotSequence()))) {
299
+ StaticVisitor::VisitDebugTarget(this);
300
+ #endif
301
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
302
+ StaticVisitor::VisitRuntimeEntry(this);
303
+ }
192
304
  }
193
305
 
194
306
 
@@ -203,10 +315,18 @@ void Assembler::CheckBuffer() {
203
315
  }
204
316
 
205
317
 
318
+ void Assembler::CheckTrampolinePoolQuick() {
319
+ if (pc_offset() >= next_buffer_check_) {
320
+ CheckTrampolinePool();
321
+ }
322
+ }
323
+
324
+
206
325
  void Assembler::emit(Instr x) {
207
326
  CheckBuffer();
208
327
  *reinterpret_cast<Instr*>(pc_) = x;
209
328
  pc_ += kInstrSize;
329
+ CheckTrampolinePoolQuick();
210
330
  }
211
331
 
212
332
 
@@ -40,82 +40,40 @@
40
40
  #include "mips/assembler-mips-inl.h"
41
41
  #include "serialize.h"
42
42
 
43
-
44
43
  namespace v8 {
45
44
  namespace internal {
46
45
 
46
+ CpuFeatures::CpuFeatures()
47
+ : supported_(0),
48
+ enabled_(0),
49
+ found_by_runtime_probing_(0) {
50
+ }
47
51
 
52
+ void CpuFeatures::Probe(bool portable) {
53
+ // If the compiler is allowed to use fpu then we can use fpu too in our
54
+ // code generation.
55
+ #if !defined(__mips__)
56
+ // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
57
+ if (FLAG_enable_fpu) {
58
+ supported_ |= 1u << FPU;
59
+ }
60
+ #else
61
+ if (portable && Serializer::enabled()) {
62
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
63
+ return; // No features if we might serialize.
64
+ }
65
+
66
+ if (OS::MipsCpuHasFeature(FPU)) {
67
+ // This implementation also sets the FPU flags if
68
+ // runtime detection of FPU returns true.
69
+ supported_ |= 1u << FPU;
70
+ found_by_runtime_probing_ |= 1u << FPU;
71
+ }
72
+
73
+ if (!portable) found_by_runtime_probing_ = 0;
74
+ #endif
75
+ }
48
76
 
49
- const Register no_reg = { -1 };
50
-
51
- const Register zero_reg = { 0 };
52
- const Register at = { 1 };
53
- const Register v0 = { 2 };
54
- const Register v1 = { 3 };
55
- const Register a0 = { 4 };
56
- const Register a1 = { 5 };
57
- const Register a2 = { 6 };
58
- const Register a3 = { 7 };
59
- const Register t0 = { 8 };
60
- const Register t1 = { 9 };
61
- const Register t2 = { 10 };
62
- const Register t3 = { 11 };
63
- const Register t4 = { 12 };
64
- const Register t5 = { 13 };
65
- const Register t6 = { 14 };
66
- const Register t7 = { 15 };
67
- const Register s0 = { 16 };
68
- const Register s1 = { 17 };
69
- const Register s2 = { 18 };
70
- const Register s3 = { 19 };
71
- const Register s4 = { 20 };
72
- const Register s5 = { 21 };
73
- const Register s6 = { 22 };
74
- const Register s7 = { 23 };
75
- const Register t8 = { 24 };
76
- const Register t9 = { 25 };
77
- const Register k0 = { 26 };
78
- const Register k1 = { 27 };
79
- const Register gp = { 28 };
80
- const Register sp = { 29 };
81
- const Register s8_fp = { 30 };
82
- const Register ra = { 31 };
83
-
84
-
85
- const FPURegister no_creg = { -1 };
86
-
87
- const FPURegister f0 = { 0 };
88
- const FPURegister f1 = { 1 };
89
- const FPURegister f2 = { 2 };
90
- const FPURegister f3 = { 3 };
91
- const FPURegister f4 = { 4 };
92
- const FPURegister f5 = { 5 };
93
- const FPURegister f6 = { 6 };
94
- const FPURegister f7 = { 7 };
95
- const FPURegister f8 = { 8 };
96
- const FPURegister f9 = { 9 };
97
- const FPURegister f10 = { 10 };
98
- const FPURegister f11 = { 11 };
99
- const FPURegister f12 = { 12 };
100
- const FPURegister f13 = { 13 };
101
- const FPURegister f14 = { 14 };
102
- const FPURegister f15 = { 15 };
103
- const FPURegister f16 = { 16 };
104
- const FPURegister f17 = { 17 };
105
- const FPURegister f18 = { 18 };
106
- const FPURegister f19 = { 19 };
107
- const FPURegister f20 = { 20 };
108
- const FPURegister f21 = { 21 };
109
- const FPURegister f22 = { 22 };
110
- const FPURegister f23 = { 23 };
111
- const FPURegister f24 = { 24 };
112
- const FPURegister f25 = { 25 };
113
- const FPURegister f26 = { 26 };
114
- const FPURegister f27 = { 27 };
115
- const FPURegister f28 = { 28 };
116
- const FPURegister f29 = { 29 };
117
- const FPURegister f30 = { 30 };
118
- const FPURegister f31 = { 31 };
119
77
 
120
78
  int ToNumber(Register reg) {
121
79
  ASSERT(reg.is_valid());
@@ -156,6 +114,7 @@ int ToNumber(Register reg) {
156
114
  return kNumbers[reg.code()];
157
115
  }
158
116
 
117
+
159
118
  Register ToRegister(int num) {
160
119
  ASSERT(num >= 0 && num < kNumRegisters);
161
120
  const Register kRegisters[] = {
@@ -181,6 +140,15 @@ Register ToRegister(int num) {
181
140
 
182
141
  const int RelocInfo::kApplyMask = 0;
183
142
 
143
+
144
+ bool RelocInfo::IsCodedSpecially() {
145
+ // The deserializer needs to know whether a pointer is specially coded. Being
146
+ // specially coded on MIPS means that it is a lui/ori instruction, and that is
147
+ // always the case inside code objects.
148
+ return true;
149
+ }
150
+
151
+
184
152
  // Patch the code at the current address with the supplied instructions.
185
153
  void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
186
154
  Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -210,7 +178,7 @@ Operand::Operand(Handle<Object> handle) {
210
178
  rm_ = no_reg;
211
179
  // Verify all Objects referred by code are NOT in new space.
212
180
  Object* obj = *handle;
213
- ASSERT(!Heap::InNewSpace(obj));
181
+ ASSERT(!HEAP->InNewSpace(obj));
214
182
  if (obj->IsHeapObject()) {
215
183
  imm32_ = reinterpret_cast<intptr_t>(handle.location());
216
184
  rmode_ = RelocInfo::EMBEDDED_OBJECT;
@@ -221,26 +189,66 @@ Operand::Operand(Handle<Object> handle) {
221
189
  }
222
190
  }
223
191
 
224
- MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) {
192
+
193
+ MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
225
194
  offset_ = offset;
226
195
  }
227
196
 
228
197
 
229
198
  // -----------------------------------------------------------------------------
230
- // Implementation of Assembler.
231
-
232
- static const int kMinimalBufferSize = 4*KB;
233
- static byte* spare_buffer_ = NULL;
234
-
235
- Assembler::Assembler(void* buffer, int buffer_size) {
199
+ // Specific instructions, constants, and masks.
200
+
201
+ static const int kNegOffset = 0x00008000;
202
+ // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
203
+ // operations as post-increment of sp.
204
+ const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
205
+ | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
206
+ // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
207
+ const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
208
+ | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
209
+ // sw(r, MemOperand(sp, 0))
210
+ const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
211
+ | (0 & kImm16Mask);
212
+ // lw(r, MemOperand(sp, 0))
213
+ const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
214
+ | (0 & kImm16Mask);
215
+
216
+ const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
217
+ | (0 & kImm16Mask);
218
+
219
+ const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
220
+ | (0 & kImm16Mask);
221
+
222
+ const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
223
+ | (kNegOffset & kImm16Mask);
224
+
225
+ const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
226
+ | (kNegOffset & kImm16Mask);
227
+ // A mask for the Rt register for push, pop, lw, sw instructions.
228
+ const Instr kRtMask = kRtFieldMask;
229
+ const Instr kLwSwInstrTypeMask = 0xffe00000;
230
+ const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
231
+ const Instr kLwSwOffsetMask = kImm16Mask;
232
+
233
+
234
+ // Spare buffer.
235
+ static const int kMinimalBufferSize = 4 * KB;
236
+
237
+
238
+ Assembler::Assembler(void* buffer, int buffer_size)
239
+ : AssemblerBase(Isolate::Current()),
240
+ positions_recorder_(this),
241
+ allow_peephole_optimization_(false) {
242
+ // BUG(3245989): disable peephole optimization if crankshaft is enabled.
243
+ allow_peephole_optimization_ = FLAG_peephole_optimization;
236
244
  if (buffer == NULL) {
237
245
  // Do our own buffer management.
238
246
  if (buffer_size <= kMinimalBufferSize) {
239
247
  buffer_size = kMinimalBufferSize;
240
248
 
241
- if (spare_buffer_ != NULL) {
242
- buffer = spare_buffer_;
243
- spare_buffer_ = NULL;
249
+ if (isolate()->assembler_spare_buffer() != NULL) {
250
+ buffer = isolate()->assembler_spare_buffer();
251
+ isolate()->set_assembler_spare_buffer(NULL);
244
252
  }
245
253
  }
246
254
  if (buffer == NULL) {
@@ -263,17 +271,19 @@ Assembler::Assembler(void* buffer, int buffer_size) {
263
271
  ASSERT(buffer_ != NULL);
264
272
  pc_ = buffer_;
265
273
  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
266
- current_statement_position_ = RelocInfo::kNoPosition;
267
- current_position_ = RelocInfo::kNoPosition;
268
- written_statement_position_ = current_statement_position_;
269
- written_position_ = current_position_;
274
+
275
+ last_trampoline_pool_end_ = 0;
276
+ no_trampoline_pool_before_ = 0;
277
+ trampoline_pool_blocked_nesting_ = 0;
278
+ next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
270
279
  }
271
280
 
272
281
 
273
282
  Assembler::~Assembler() {
274
283
  if (own_buffer_) {
275
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
276
- spare_buffer_ = buffer_;
284
+ if (isolate()->assembler_spare_buffer() == NULL &&
285
+ buffer_size_ == kMinimalBufferSize) {
286
+ isolate()->set_assembler_spare_buffer(buffer_);
277
287
  } else {
278
288
  DeleteArray(buffer_);
279
289
  }
@@ -282,7 +292,7 @@ Assembler::~Assembler() {
282
292
 
283
293
 
284
294
  void Assembler::GetCode(CodeDesc* desc) {
285
- ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
295
+ ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
286
296
  // Setup code descriptor.
287
297
  desc->buffer = buffer_;
288
298
  desc->buffer_size = buffer_size_;
@@ -291,6 +301,60 @@ void Assembler::GetCode(CodeDesc* desc) {
291
301
  }
292
302
 
293
303
 
304
+ void Assembler::Align(int m) {
305
+ ASSERT(m >= 4 && IsPowerOf2(m));
306
+ while ((pc_offset() & (m - 1)) != 0) {
307
+ nop();
308
+ }
309
+ }
310
+
311
+
312
+ void Assembler::CodeTargetAlign() {
313
+ // No advantage to aligning branch/call targets to more than
314
+ // single instruction, that I am aware of.
315
+ Align(4);
316
+ }
317
+
318
+
319
+ Register Assembler::GetRt(Instr instr) {
320
+ Register rt;
321
+ rt.code_ = (instr & kRtMask) >> kRtShift;
322
+ return rt;
323
+ }
324
+
325
+
326
+ bool Assembler::IsPop(Instr instr) {
327
+ return (instr & ~kRtMask) == kPopRegPattern;
328
+ }
329
+
330
+
331
+ bool Assembler::IsPush(Instr instr) {
332
+ return (instr & ~kRtMask) == kPushRegPattern;
333
+ }
334
+
335
+
336
+ bool Assembler::IsSwRegFpOffset(Instr instr) {
337
+ return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
338
+ }
339
+
340
+
341
+ bool Assembler::IsLwRegFpOffset(Instr instr) {
342
+ return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
343
+ }
344
+
345
+
346
+ bool Assembler::IsSwRegFpNegOffset(Instr instr) {
347
+ return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
348
+ kSwRegFpNegOffsetPattern);
349
+ }
350
+
351
+
352
+ bool Assembler::IsLwRegFpNegOffset(Instr instr) {
353
+ return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
354
+ kLwRegFpNegOffsetPattern);
355
+ }
356
+
357
+
294
358
  // Labels refer to positions in the (to be) generated code.
295
359
  // There are bound, linked, and unused labels.
296
360
  //
@@ -301,14 +365,19 @@ void Assembler::GetCode(CodeDesc* desc) {
301
365
  // to be generated; pos() is the position of the last
302
366
  // instruction using the label.
303
367
 
368
+ // The link chain is terminated by a value in the instruction of -1,
369
+ // which is an otherwise illegal value (branch -1 is inf loop).
370
+ // The instruction 16-bit offset field addresses 32-bit words, but in
371
+ // code is conv to an 18-bit value addressing bytes, hence the -4 value.
304
372
 
305
- // The link chain is terminated by a negative code position (must be aligned).
306
373
  const int kEndOfChain = -4;
307
374
 
308
- bool Assembler::is_branch(Instr instr) {
375
+
376
+ bool Assembler::IsBranch(Instr instr) {
309
377
  uint32_t opcode = ((instr & kOpcodeMask));
310
378
  uint32_t rt_field = ((instr & kRtFieldMask));
311
379
  uint32_t rs_field = ((instr & kRsFieldMask));
380
+ uint32_t label_constant = (instr & ~kImm16Mask);
312
381
  // Checks if the instruction is a branch.
313
382
  return opcode == BEQ ||
314
383
  opcode == BNE ||
@@ -320,7 +389,79 @@ bool Assembler::is_branch(Instr instr) {
320
389
  opcode == BGTZL||
321
390
  (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
322
391
  rt_field == BLTZAL || rt_field == BGEZAL)) ||
323
- (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
392
+ (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
393
+ label_constant == 0; // Emitted label const in reg-exp engine.
394
+ }
395
+
396
+
397
+ bool Assembler::IsNop(Instr instr, unsigned int type) {
398
+ // See Assembler::nop(type).
399
+ ASSERT(type < 32);
400
+ uint32_t opcode = ((instr & kOpcodeMask));
401
+ uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
402
+ uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
403
+ uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
404
+
405
+ // nop(type) == sll(zero_reg, zero_reg, type);
406
+ // Technically all these values will be 0 but
407
+ // this makes more sense to the reader.
408
+
409
+ bool ret = (opcode == SLL &&
410
+ rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
411
+ rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
412
+ sa == type);
413
+
414
+ return ret;
415
+ }
416
+
417
+
418
+ int32_t Assembler::GetBranchOffset(Instr instr) {
419
+ ASSERT(IsBranch(instr));
420
+ return ((int16_t)(instr & kImm16Mask)) << 2;
421
+ }
422
+
423
+
424
+ bool Assembler::IsLw(Instr instr) {
425
+ return ((instr & kOpcodeMask) == LW);
426
+ }
427
+
428
+
429
+ int16_t Assembler::GetLwOffset(Instr instr) {
430
+ ASSERT(IsLw(instr));
431
+ return ((instr & kImm16Mask));
432
+ }
433
+
434
+
435
+ Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
436
+ ASSERT(IsLw(instr));
437
+
438
+ // We actually create a new lw instruction based on the original one.
439
+ Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
440
+ | (offset & kImm16Mask);
441
+
442
+ return temp_instr;
443
+ }
444
+
445
+
446
+ bool Assembler::IsSw(Instr instr) {
447
+ return ((instr & kOpcodeMask) == SW);
448
+ }
449
+
450
+
451
+ Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
452
+ ASSERT(IsSw(instr));
453
+ return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
454
+ }
455
+
456
+
457
+ bool Assembler::IsAddImmediate(Instr instr) {
458
+ return ((instr & kOpcodeMask) == ADDIU);
459
+ }
460
+
461
+
462
+ Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
463
+ ASSERT(IsAddImmediate(instr));
464
+ return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
324
465
  }
325
466
 
326
467
 
@@ -328,16 +469,25 @@ int Assembler::target_at(int32_t pos) {
328
469
  Instr instr = instr_at(pos);
329
470
  if ((instr & ~kImm16Mask) == 0) {
330
471
  // Emitted label constant, not part of a branch.
331
- return instr - (Code::kHeaderSize - kHeapObjectTag);
472
+ if (instr == 0) {
473
+ return kEndOfChain;
474
+ } else {
475
+ int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
476
+ return (imm18 + pos);
477
+ }
332
478
  }
333
479
  // Check we have a branch instruction.
334
- ASSERT(is_branch(instr));
480
+ ASSERT(IsBranch(instr));
335
481
  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
336
482
  // the compiler uses arithmectic shifts for signed integers.
337
- int32_t imm18 = ((instr &
338
- static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
483
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
339
484
 
340
- return pos + kBranchPCOffset + imm18;
485
+ if (imm18 == kEndOfChain) {
486
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
487
+ return kEndOfChain;
488
+ } else {
489
+ return pos + kBranchPCOffset + imm18;
490
+ }
341
491
  }
342
492
 
343
493
 
@@ -351,7 +501,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
351
501
  return;
352
502
  }
353
503
 
354
- ASSERT(is_branch(instr));
504
+ ASSERT(IsBranch(instr));
355
505
  int32_t imm18 = target_pos - (pos + kBranchPCOffset);
356
506
  ASSERT((imm18 & 3) == 0);
357
507
 
@@ -388,10 +538,28 @@ void Assembler::print(Label* L) {
388
538
 
389
539
 
390
540
  void Assembler::bind_to(Label* L, int pos) {
391
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
541
+ ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
392
542
  while (L->is_linked()) {
393
543
  int32_t fixup_pos = L->pos();
394
- next(L); // call next before overwriting link with target at fixup_pos
544
+ int32_t dist = pos - fixup_pos;
545
+ next(L); // Call next before overwriting link with target at fixup_pos.
546
+ if (dist > kMaxBranchOffset) {
547
+ do {
548
+ int32_t trampoline_pos = get_trampoline_entry(fixup_pos);
549
+ ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
550
+ target_at_put(fixup_pos, trampoline_pos);
551
+ fixup_pos = trampoline_pos;
552
+ dist = pos - fixup_pos;
553
+ } while (dist > kMaxBranchOffset);
554
+ } else if (dist < -kMaxBranchOffset) {
555
+ do {
556
+ int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false);
557
+ ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset);
558
+ target_at_put(fixup_pos, trampoline_pos);
559
+ fixup_pos = trampoline_pos;
560
+ dist = pos - fixup_pos;
561
+ } while (dist < -kMaxBranchOffset);
562
+ };
395
563
  target_at_put(fixup_pos, pos);
396
564
  }
397
565
  L->bind_to(pos);
@@ -416,16 +584,16 @@ void Assembler::link_to(Label* L, Label* appendix) {
416
584
  ASSERT(link == kEndOfChain);
417
585
  target_at_put(fixup_pos, appendix->pos());
418
586
  } else {
419
- // L is empty, simply use appendix
587
+ // L is empty, simply use appendix.
420
588
  *L = *appendix;
421
589
  }
422
590
  }
423
- appendix->Unuse(); // appendix should not be used anymore
591
+ appendix->Unuse(); // Appendix should not be used anymore.
424
592
  }
425
593
 
426
594
 
427
595
  void Assembler::bind(Label* L) {
428
- ASSERT(!L->is_bound()); // label can only be bound once
596
+ ASSERT(!L->is_bound()); // Label can only be bound once.
429
597
  bind_to(L, pc_offset());
430
598
  }
431
599
 
@@ -433,11 +601,11 @@ void Assembler::bind(Label* L) {
433
601
  void Assembler::next(Label* L) {
434
602
  ASSERT(L->is_linked());
435
603
  int link = target_at(L->pos());
436
- if (link > 0) {
437
- L->link_to(link);
438
- } else {
439
- ASSERT(link == kEndOfChain);
604
+ ASSERT(link > 0 || link == kEndOfChain);
605
+ if (link == kEndOfChain) {
440
606
  L->Unuse();
607
+ } else if (link > 0) {
608
+ L->link_to(link);
441
609
  }
442
610
  }
443
611
 
@@ -446,13 +614,8 @@ void Assembler::next(Label* L) {
446
614
  // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
447
615
  // space. There is no guarantee that the relocated location can be similarly
448
616
  // encoded.
449
- bool Assembler::MustUseAt(RelocInfo::Mode rmode) {
450
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
451
- return Serializer::enabled();
452
- } else if (rmode == RelocInfo::NONE) {
453
- return false;
454
- }
455
- return true;
617
+ bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
618
+ return rmode != RelocInfo::NONE;
456
619
  }
457
620
 
458
621
 
@@ -469,6 +632,19 @@ void Assembler::GenInstrRegister(Opcode opcode,
469
632
  }
470
633
 
471
634
 
635
+ void Assembler::GenInstrRegister(Opcode opcode,
636
+ Register rs,
637
+ Register rt,
638
+ uint16_t msb,
639
+ uint16_t lsb,
640
+ SecondaryField func) {
641
+ ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
642
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
643
+ | (msb << kRdShift) | (lsb << kSaShift) | func;
644
+ emit(instr);
645
+ }
646
+
647
+
472
648
  void Assembler::GenInstrRegister(Opcode opcode,
473
649
  SecondaryField fmt,
474
650
  FPURegister ft,
@@ -476,8 +652,9 @@ void Assembler::GenInstrRegister(Opcode opcode,
476
652
  FPURegister fd,
477
653
  SecondaryField func) {
478
654
  ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
479
- Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift)
480
- | (fd.code() << 6) | func;
655
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
656
+ Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
657
+ | (fd.code() << kFdShift) | func;
481
658
  emit(instr);
482
659
  }
483
660
 
@@ -489,8 +666,22 @@ void Assembler::GenInstrRegister(Opcode opcode,
489
666
  FPURegister fd,
490
667
  SecondaryField func) {
491
668
  ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
669
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
492
670
  Instr instr = opcode | fmt | (rt.code() << kRtShift)
493
- | (fs.code() << kFsShift) | (fd.code() << 6) | func;
671
+ | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
672
+ emit(instr);
673
+ }
674
+
675
+
676
+ void Assembler::GenInstrRegister(Opcode opcode,
677
+ SecondaryField fmt,
678
+ Register rt,
679
+ FPUControlRegister fs,
680
+ SecondaryField func) {
681
+ ASSERT(fs.is_valid() && rt.is_valid());
682
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
683
+ Instr instr =
684
+ opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
494
685
  emit(instr);
495
686
  }
496
687
 
@@ -523,6 +714,7 @@ void Assembler::GenInstrImmediate(Opcode opcode,
523
714
  FPURegister ft,
524
715
  int32_t j) {
525
716
  ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
717
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
526
718
  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
527
719
  | (j & kImm16Mask);
528
720
  emit(instr);
@@ -532,26 +724,122 @@ void Assembler::GenInstrImmediate(Opcode opcode,
532
724
  // Registers are in the order of the instruction encoding, from left to right.
533
725
  void Assembler::GenInstrJump(Opcode opcode,
534
726
  uint32_t address) {
727
+ BlockTrampolinePoolScope block_trampoline_pool(this);
535
728
  ASSERT(is_uint26(address));
536
729
  Instr instr = opcode | address;
537
730
  emit(instr);
731
+ BlockTrampolinePoolFor(1); // For associated delay slot.
732
+ }
733
+
734
+
735
+ // Returns the next free label entry from the next trampoline pool.
736
+ int32_t Assembler::get_label_entry(int32_t pos, bool next_pool) {
737
+ int trampoline_count = trampolines_.length();
738
+ int32_t label_entry = 0;
739
+ ASSERT(trampoline_count > 0);
740
+
741
+ if (next_pool) {
742
+ for (int i = 0; i < trampoline_count; i++) {
743
+ if (trampolines_[i].start() > pos) {
744
+ label_entry = trampolines_[i].take_label();
745
+ break;
746
+ }
747
+ }
748
+ } else { // Caller needs a label entry from the previous pool.
749
+ for (int i = trampoline_count-1; i >= 0; i--) {
750
+ if (trampolines_[i].end() < pos) {
751
+ label_entry = trampolines_[i].take_label();
752
+ break;
753
+ }
754
+ }
755
+ }
756
+ return label_entry;
757
+ }
758
+
759
+
760
+ // Returns the next free trampoline entry from the next trampoline pool.
761
+ int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) {
762
+ int trampoline_count = trampolines_.length();
763
+ int32_t trampoline_entry = 0;
764
+ ASSERT(trampoline_count > 0);
765
+
766
+ if (next_pool) {
767
+ for (int i = 0; i < trampoline_count; i++) {
768
+ if (trampolines_[i].start() > pos) {
769
+ trampoline_entry = trampolines_[i].take_slot();
770
+ break;
771
+ }
772
+ }
773
+ } else { // Caller needs a trampoline entry from the previous pool.
774
+ for (int i = trampoline_count-1; i >= 0; i--) {
775
+ if (trampolines_[i].end() < pos) {
776
+ trampoline_entry = trampolines_[i].take_slot();
777
+ break;
778
+ }
779
+ }
780
+ }
781
+ return trampoline_entry;
538
782
  }
539
783
 
540
784
 
541
785
  int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
542
786
  int32_t target_pos;
787
+ int32_t pc_offset_v = pc_offset();
788
+
543
789
  if (L->is_bound()) {
544
790
  target_pos = L->pos();
791
+ int32_t dist = pc_offset_v - target_pos;
792
+ if (dist > kMaxBranchOffset) {
793
+ do {
794
+ int32_t trampoline_pos = get_trampoline_entry(target_pos);
795
+ ASSERT((trampoline_pos - target_pos) > 0);
796
+ ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset);
797
+ target_at_put(trampoline_pos, target_pos);
798
+ target_pos = trampoline_pos;
799
+ dist = pc_offset_v - target_pos;
800
+ } while (dist > kMaxBranchOffset);
801
+ } else if (dist < -kMaxBranchOffset) {
802
+ do {
803
+ int32_t trampoline_pos = get_trampoline_entry(target_pos, false);
804
+ ASSERT((target_pos - trampoline_pos) > 0);
805
+ ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset);
806
+ target_at_put(trampoline_pos, target_pos);
807
+ target_pos = trampoline_pos;
808
+ dist = pc_offset_v - target_pos;
809
+ } while (dist < -kMaxBranchOffset);
810
+ }
545
811
  } else {
546
812
  if (L->is_linked()) {
547
- target_pos = L->pos(); // L's link
813
+ target_pos = L->pos(); // L's link.
814
+ int32_t dist = pc_offset_v - target_pos;
815
+ if (dist > kMaxBranchOffset) {
816
+ do {
817
+ int32_t label_pos = get_label_entry(target_pos);
818
+ ASSERT((label_pos - target_pos) < kMaxBranchOffset);
819
+ label_at_put(L, label_pos);
820
+ target_pos = label_pos;
821
+ dist = pc_offset_v - target_pos;
822
+ } while (dist > kMaxBranchOffset);
823
+ } else if (dist < -kMaxBranchOffset) {
824
+ do {
825
+ int32_t label_pos = get_label_entry(target_pos, false);
826
+ ASSERT((label_pos - target_pos) > -kMaxBranchOffset);
827
+ label_at_put(L, label_pos);
828
+ target_pos = label_pos;
829
+ dist = pc_offset_v - target_pos;
830
+ } while (dist < -kMaxBranchOffset);
831
+ }
832
+ L->link_to(pc_offset());
548
833
  } else {
549
- target_pos = kEndOfChain;
834
+ L->link_to(pc_offset());
835
+ return kEndOfChain;
550
836
  }
551
- L->link_to(pc_offset());
552
837
  }
553
838
 
554
839
  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
840
+ ASSERT((offset & 3) == 0);
841
+ ASSERT(is_int16(offset >> 2));
842
+
555
843
  return offset;
556
844
  }
557
845
 
@@ -560,14 +848,20 @@ void Assembler::label_at_put(Label* L, int at_offset) {
560
848
  int target_pos;
561
849
  if (L->is_bound()) {
562
850
  target_pos = L->pos();
851
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
563
852
  } else {
564
853
  if (L->is_linked()) {
565
- target_pos = L->pos(); // L's link
854
+ target_pos = L->pos(); // L's link.
855
+ int32_t imm18 = target_pos - at_offset;
856
+ ASSERT((imm18 & 3) == 0);
857
+ int32_t imm16 = imm18 >> 2;
858
+ ASSERT(is_int16(imm16));
859
+ instr_at_put(at_offset, (imm16 & kImm16Mask));
566
860
  } else {
567
861
  target_pos = kEndOfChain;
862
+ instr_at_put(at_offset, 0);
568
863
  }
569
864
  L->link_to(at_offset);
570
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
571
865
  }
572
866
  }
573
867
 
@@ -580,47 +874,66 @@ void Assembler::b(int16_t offset) {
580
874
 
581
875
 
582
876
  void Assembler::bal(int16_t offset) {
877
+ positions_recorder()->WriteRecordedPositions();
583
878
  bgezal(zero_reg, offset);
584
879
  }
585
880
 
586
881
 
587
882
  void Assembler::beq(Register rs, Register rt, int16_t offset) {
883
+ BlockTrampolinePoolScope block_trampoline_pool(this);
588
884
  GenInstrImmediate(BEQ, rs, rt, offset);
885
+ BlockTrampolinePoolFor(1); // For associated delay slot.
589
886
  }
590
887
 
591
888
 
592
889
  void Assembler::bgez(Register rs, int16_t offset) {
890
+ BlockTrampolinePoolScope block_trampoline_pool(this);
593
891
  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
892
+ BlockTrampolinePoolFor(1); // For associated delay slot.
594
893
  }
595
894
 
596
895
 
597
896
  void Assembler::bgezal(Register rs, int16_t offset) {
897
+ BlockTrampolinePoolScope block_trampoline_pool(this);
898
+ positions_recorder()->WriteRecordedPositions();
598
899
  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
900
+ BlockTrampolinePoolFor(1); // For associated delay slot.
599
901
  }
600
902
 
601
903
 
602
904
  void Assembler::bgtz(Register rs, int16_t offset) {
905
+ BlockTrampolinePoolScope block_trampoline_pool(this);
603
906
  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
907
+ BlockTrampolinePoolFor(1); // For associated delay slot.
604
908
  }
605
909
 
606
910
 
607
911
  void Assembler::blez(Register rs, int16_t offset) {
912
+ BlockTrampolinePoolScope block_trampoline_pool(this);
608
913
  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
914
+ BlockTrampolinePoolFor(1); // For associated delay slot.
609
915
  }
610
916
 
611
917
 
612
918
  void Assembler::bltz(Register rs, int16_t offset) {
919
+ BlockTrampolinePoolScope block_trampoline_pool(this);
613
920
  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
921
+ BlockTrampolinePoolFor(1); // For associated delay slot.
614
922
  }
615
923
 
616
924
 
617
925
  void Assembler::bltzal(Register rs, int16_t offset) {
926
+ BlockTrampolinePoolScope block_trampoline_pool(this);
927
+ positions_recorder()->WriteRecordedPositions();
618
928
  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
929
+ BlockTrampolinePoolFor(1); // For associated delay slot.
619
930
  }
620
931
 
621
932
 
622
933
  void Assembler::bne(Register rs, Register rt, int16_t offset) {
934
+ BlockTrampolinePoolScope block_trampoline_pool(this);
623
935
  GenInstrImmediate(BNE, rs, rt, offset);
936
+ BlockTrampolinePoolFor(1); // For associated delay slot.
624
937
  }
625
938
 
626
939
 
@@ -631,18 +944,27 @@ void Assembler::j(int32_t target) {
631
944
 
632
945
 
633
946
  void Assembler::jr(Register rs) {
947
+ BlockTrampolinePoolScope block_trampoline_pool(this);
948
+ if (rs.is(ra)) {
949
+ positions_recorder()->WriteRecordedPositions();
950
+ }
634
951
  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
952
+ BlockTrampolinePoolFor(1); // For associated delay slot.
635
953
  }
636
954
 
637
955
 
638
956
  void Assembler::jal(int32_t target) {
957
+ positions_recorder()->WriteRecordedPositions();
639
958
  ASSERT(is_uint28(target) && ((target & 3) == 0));
640
959
  GenInstrJump(JAL, target >> 2);
641
960
  }
642
961
 
643
962
 
644
963
  void Assembler::jalr(Register rs, Register rd) {
964
+ BlockTrampolinePoolScope block_trampoline_pool(this);
965
+ positions_recorder()->WriteRecordedPositions();
645
966
  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
967
+ BlockTrampolinePoolFor(1); // For associated delay slot.
646
968
  }
647
969
 
648
970
 
@@ -650,28 +972,164 @@ void Assembler::jalr(Register rs, Register rd) {
650
972
 
651
973
  // Arithmetic.
652
974
 
653
- void Assembler::add(Register rd, Register rs, Register rt) {
654
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD);
655
- }
656
-
657
-
658
975
  void Assembler::addu(Register rd, Register rs, Register rt) {
659
976
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
660
977
  }
661
978
 
662
979
 
663
- void Assembler::addi(Register rd, Register rs, int32_t j) {
664
- GenInstrImmediate(ADDI, rs, rd, j);
665
- }
666
-
667
-
668
980
  void Assembler::addiu(Register rd, Register rs, int32_t j) {
669
981
  GenInstrImmediate(ADDIU, rs, rd, j);
670
- }
671
982
 
983
+ // Eliminate pattern: push(r), pop().
984
+ // addiu(sp, sp, Operand(-kPointerSize));
985
+ // sw(src, MemOperand(sp, 0);
986
+ // addiu(sp, sp, Operand(kPointerSize));
987
+ // Both instructions can be eliminated.
988
+ if (can_peephole_optimize(3) &&
989
+ // Pattern.
990
+ instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
991
+ (instr_at(pc_ - 2 * kInstrSize) & ~kRtMask) == kPushRegPattern &&
992
+ (instr_at(pc_ - 3 * kInstrSize)) == kPushInstruction) {
993
+ pc_ -= 3 * kInstrSize;
994
+ if (FLAG_print_peephole_optimization) {
995
+ PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
996
+ }
997
+ }
998
+
999
+ // Eliminate pattern: push(ry), pop(rx).
1000
+ // addiu(sp, sp, -kPointerSize)
1001
+ // sw(ry, MemOperand(sp, 0)
1002
+ // lw(rx, MemOperand(sp, 0)
1003
+ // addiu(sp, sp, kPointerSize);
1004
+ // Both instructions can be eliminated if ry = rx.
1005
+ // If ry != rx, a register copy from ry to rx is inserted
1006
+ // after eliminating the push and the pop instructions.
1007
+ if (can_peephole_optimize(4)) {
1008
+ Instr pre_push_sp_set = instr_at(pc_ - 4 * kInstrSize);
1009
+ Instr push_instr = instr_at(pc_ - 3 * kInstrSize);
1010
+ Instr pop_instr = instr_at(pc_ - 2 * kInstrSize);
1011
+ Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
1012
+
1013
+ if (IsPush(push_instr) &&
1014
+ IsPop(pop_instr) && pre_push_sp_set == kPushInstruction &&
1015
+ post_pop_sp_set == kPopInstruction) {
1016
+ if ((pop_instr & kRtMask) != (push_instr & kRtMask)) {
1017
+ // For consecutive push and pop on different registers,
1018
+ // we delete both the push & pop and insert a register move.
1019
+ // push ry, pop rx --> mov rx, ry.
1020
+ Register reg_pushed, reg_popped;
1021
+ reg_pushed = GetRt(push_instr);
1022
+ reg_popped = GetRt(pop_instr);
1023
+ pc_ -= 4 * kInstrSize;
1024
+ // Insert a mov instruction, which is better than a pair of push & pop.
1025
+ or_(reg_popped, reg_pushed, zero_reg);
1026
+ if (FLAG_print_peephole_optimization) {
1027
+ PrintF("%x push/pop (diff reg) replaced by a reg move\n",
1028
+ pc_offset());
1029
+ }
1030
+ } else {
1031
+ // For consecutive push and pop on the same register,
1032
+ // both the push and the pop can be deleted.
1033
+ pc_ -= 4 * kInstrSize;
1034
+ if (FLAG_print_peephole_optimization) {
1035
+ PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1036
+ }
1037
+ }
1038
+ }
1039
+ }
672
1040
 
673
- void Assembler::sub(Register rd, Register rs, Register rt) {
674
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB);
1041
+ if (can_peephole_optimize(5)) {
1042
+ Instr pre_push_sp_set = instr_at(pc_ - 5 * kInstrSize);
1043
+ Instr mem_write_instr = instr_at(pc_ - 4 * kInstrSize);
1044
+ Instr lw_instr = instr_at(pc_ - 3 * kInstrSize);
1045
+ Instr mem_read_instr = instr_at(pc_ - 2 * kInstrSize);
1046
+ Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
1047
+
1048
+ if (IsPush(mem_write_instr) &&
1049
+ pre_push_sp_set == kPushInstruction &&
1050
+ IsPop(mem_read_instr) &&
1051
+ post_pop_sp_set == kPopInstruction) {
1052
+ if ((IsLwRegFpOffset(lw_instr) ||
1053
+ IsLwRegFpNegOffset(lw_instr))) {
1054
+ if ((mem_write_instr & kRtMask) ==
1055
+ (mem_read_instr & kRtMask)) {
1056
+ // Pattern: push & pop from/to same register,
1057
+ // with a fp+offset lw in between.
1058
+ //
1059
+ // The following:
1060
+ // addiu sp, sp, -4
1061
+ // sw rx, [sp, #0]!
1062
+ // lw rz, [fp, #-24]
1063
+ // lw rx, [sp, 0],
1064
+ // addiu sp, sp, 4
1065
+ //
1066
+ // Becomes:
1067
+ // if(rx == rz)
1068
+ // delete all
1069
+ // else
1070
+ // lw rz, [fp, #-24]
1071
+
1072
+ if ((mem_write_instr & kRtMask) == (lw_instr & kRtMask)) {
1073
+ pc_ -= 5 * kInstrSize;
1074
+ } else {
1075
+ pc_ -= 5 * kInstrSize;
1076
+ // Reinsert back the lw rz.
1077
+ emit(lw_instr);
1078
+ }
1079
+ if (FLAG_print_peephole_optimization) {
1080
+ PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
1081
+ }
1082
+ } else {
1083
+ // Pattern: push & pop from/to different registers
1084
+ // with a fp + offset lw in between.
1085
+ //
1086
+ // The following:
1087
+ // addiu sp, sp ,-4
1088
+ // sw rx, [sp, 0]
1089
+ // lw rz, [fp, #-24]
1090
+ // lw ry, [sp, 0]
1091
+ // addiu sp, sp, 4
1092
+ //
1093
+ // Becomes:
1094
+ // if(ry == rz)
1095
+ // mov ry, rx;
1096
+ // else if(rx != rz)
1097
+ // lw rz, [fp, #-24]
1098
+ // mov ry, rx
1099
+ // else if((ry != rz) || (rx == rz)) becomes:
1100
+ // mov ry, rx
1101
+ // lw rz, [fp, #-24]
1102
+
1103
+ Register reg_pushed, reg_popped;
1104
+ if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) {
1105
+ reg_pushed = GetRt(mem_write_instr);
1106
+ reg_popped = GetRt(mem_read_instr);
1107
+ pc_ -= 5 * kInstrSize;
1108
+ or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
1109
+ } else if ((mem_write_instr & kRtMask)
1110
+ != (lw_instr & kRtMask)) {
1111
+ reg_pushed = GetRt(mem_write_instr);
1112
+ reg_popped = GetRt(mem_read_instr);
1113
+ pc_ -= 5 * kInstrSize;
1114
+ emit(lw_instr);
1115
+ or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
1116
+ } else if (((mem_read_instr & kRtMask)
1117
+ != (lw_instr & kRtMask)) ||
1118
+ ((mem_write_instr & kRtMask)
1119
+ == (lw_instr & kRtMask)) ) {
1120
+ reg_pushed = GetRt(mem_write_instr);
1121
+ reg_popped = GetRt(mem_read_instr);
1122
+ pc_ -= 5 * kInstrSize;
1123
+ or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
1124
+ emit(lw_instr);
1125
+ }
1126
+ if (FLAG_print_peephole_optimization) {
1127
+ PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
1128
+ }
1129
+ }
1130
+ }
1131
+ }
1132
+ }
675
1133
  }
676
1134
 
677
1135
 
@@ -743,7 +1201,15 @@ void Assembler::nor(Register rd, Register rs, Register rt) {
743
1201
 
744
1202
 
745
1203
  // Shifts.
746
- void Assembler::sll(Register rd, Register rt, uint16_t sa) {
1204
+ void Assembler::sll(Register rd,
1205
+ Register rt,
1206
+ uint16_t sa,
1207
+ bool coming_from_nop) {
1208
+ // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1209
+ // generated using the sll instruction. They must be generated using
1210
+ // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1211
+ // instructions.
1212
+ ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
747
1213
  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
748
1214
  }
749
1215
 
@@ -773,30 +1239,199 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
773
1239
  }
774
1240
 
775
1241
 
1242
+ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1243
+ // Should be called via MacroAssembler::Ror.
1244
+ ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1245
+ ASSERT(mips32r2);
1246
+ Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1247
+ | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1248
+ emit(instr);
1249
+ }
1250
+
1251
+
1252
+ void Assembler::rotrv(Register rd, Register rt, Register rs) {
1253
+ // Should be called via MacroAssembler::Ror.
1254
+ ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1255
+ ASSERT(mips32r2);
1256
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1257
+ | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1258
+ emit(instr);
1259
+ }
1260
+
1261
+
776
1262
  //------------Memory-instructions-------------
777
1263
 
1264
+ // Helper for base-reg + offset, when offset is larger than int16.
1265
+ void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1266
+ ASSERT(!src.rm().is(at));
1267
+ lui(at, src.offset_ >> kLuiShift);
1268
+ ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1269
+ addu(at, at, src.rm()); // Add base register.
1270
+ }
1271
+
1272
+
778
1273
  void Assembler::lb(Register rd, const MemOperand& rs) {
779
- GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1274
+ if (is_int16(rs.offset_)) {
1275
+ GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1276
+ } else { // Offset > 16 bits, use multiple instructions to load.
1277
+ LoadRegPlusOffsetToAt(rs);
1278
+ GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1279
+ }
780
1280
  }
781
1281
 
782
1282
 
783
1283
  void Assembler::lbu(Register rd, const MemOperand& rs) {
784
- GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1284
+ if (is_int16(rs.offset_)) {
1285
+ GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1286
+ } else { // Offset > 16 bits, use multiple instructions to load.
1287
+ LoadRegPlusOffsetToAt(rs);
1288
+ GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1289
+ }
1290
+ }
1291
+
1292
+
1293
+ void Assembler::lh(Register rd, const MemOperand& rs) {
1294
+ if (is_int16(rs.offset_)) {
1295
+ GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1296
+ } else { // Offset > 16 bits, use multiple instructions to load.
1297
+ LoadRegPlusOffsetToAt(rs);
1298
+ GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1299
+ }
1300
+ }
1301
+
1302
+
1303
+ void Assembler::lhu(Register rd, const MemOperand& rs) {
1304
+ if (is_int16(rs.offset_)) {
1305
+ GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1306
+ } else { // Offset > 16 bits, use multiple instructions to load.
1307
+ LoadRegPlusOffsetToAt(rs);
1308
+ GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1309
+ }
785
1310
  }
786
1311
 
787
1312
 
788
1313
  void Assembler::lw(Register rd, const MemOperand& rs) {
789
- GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1314
+ if (is_int16(rs.offset_)) {
1315
+ GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1316
+ } else { // Offset > 16 bits, use multiple instructions to load.
1317
+ LoadRegPlusOffsetToAt(rs);
1318
+ GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1319
+ }
1320
+
1321
+ if (can_peephole_optimize(2)) {
1322
+ Instr sw_instr = instr_at(pc_ - 2 * kInstrSize);
1323
+ Instr lw_instr = instr_at(pc_ - 1 * kInstrSize);
1324
+
1325
+ if ((IsSwRegFpOffset(sw_instr) &&
1326
+ IsLwRegFpOffset(lw_instr)) ||
1327
+ (IsSwRegFpNegOffset(sw_instr) &&
1328
+ IsLwRegFpNegOffset(lw_instr))) {
1329
+ if ((lw_instr & kLwSwInstrArgumentMask) ==
1330
+ (sw_instr & kLwSwInstrArgumentMask)) {
1331
+ // Pattern: Lw/sw same fp+offset, same register.
1332
+ //
1333
+ // The following:
1334
+ // sw rx, [fp, #-12]
1335
+ // lw rx, [fp, #-12]
1336
+ //
1337
+ // Becomes:
1338
+ // sw rx, [fp, #-12]
1339
+
1340
+ pc_ -= 1 * kInstrSize;
1341
+ if (FLAG_print_peephole_optimization) {
1342
+ PrintF("%x sw/lw (fp + same offset), same reg\n", pc_offset());
1343
+ }
1344
+ } else if ((lw_instr & kLwSwOffsetMask) ==
1345
+ (sw_instr & kLwSwOffsetMask)) {
1346
+ // Pattern: Lw/sw same fp+offset, different register.
1347
+ //
1348
+ // The following:
1349
+ // sw rx, [fp, #-12]
1350
+ // lw ry, [fp, #-12]
1351
+ //
1352
+ // Becomes:
1353
+ // sw rx, [fp, #-12]
1354
+ // mov ry, rx
1355
+
1356
+ Register reg_stored, reg_loaded;
1357
+ reg_stored = GetRt(sw_instr);
1358
+ reg_loaded = GetRt(lw_instr);
1359
+ pc_ -= 1 * kInstrSize;
1360
+ // Insert a mov instruction, which is better than lw.
1361
+ or_(reg_loaded, reg_stored, zero_reg); // Move instruction.
1362
+ if (FLAG_print_peephole_optimization) {
1363
+ PrintF("%x sw/lw (fp + same offset), diff reg \n", pc_offset());
1364
+ }
1365
+ }
1366
+ }
1367
+ }
1368
+ }
1369
+
1370
+
1371
+ void Assembler::lwl(Register rd, const MemOperand& rs) {
1372
+ GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1373
+ }
1374
+
1375
+
1376
+ void Assembler::lwr(Register rd, const MemOperand& rs) {
1377
+ GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
790
1378
  }
791
1379
 
792
1380
 
793
1381
  void Assembler::sb(Register rd, const MemOperand& rs) {
794
- GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1382
+ if (is_int16(rs.offset_)) {
1383
+ GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1384
+ } else { // Offset > 16 bits, use multiple instructions to store.
1385
+ LoadRegPlusOffsetToAt(rs);
1386
+ GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1387
+ }
1388
+ }
1389
+
1390
+
1391
+ void Assembler::sh(Register rd, const MemOperand& rs) {
1392
+ if (is_int16(rs.offset_)) {
1393
+ GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1394
+ } else { // Offset > 16 bits, use multiple instructions to store.
1395
+ LoadRegPlusOffsetToAt(rs);
1396
+ GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1397
+ }
795
1398
  }
796
1399
 
797
1400
 
798
1401
  void Assembler::sw(Register rd, const MemOperand& rs) {
799
- GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1402
+ if (is_int16(rs.offset_)) {
1403
+ GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1404
+ } else { // Offset > 16 bits, use multiple instructions to store.
1405
+ LoadRegPlusOffsetToAt(rs);
1406
+ GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1407
+ }
1408
+
1409
+ // Eliminate pattern: pop(), push(r).
1410
+ // addiu sp, sp, Operand(kPointerSize);
1411
+ // addiu sp, sp, Operand(-kPointerSize);
1412
+ // -> sw r, MemOpernad(sp, 0);
1413
+ if (can_peephole_optimize(3) &&
1414
+ // Pattern.
1415
+ instr_at(pc_ - 1 * kInstrSize) ==
1416
+ (kPushRegPattern | (rd.code() << kRtShift)) &&
1417
+ instr_at(pc_ - 2 * kInstrSize) == kPushInstruction &&
1418
+ instr_at(pc_ - 3 * kInstrSize) == kPopInstruction) {
1419
+ pc_ -= 3 * kInstrSize;
1420
+ GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1421
+ if (FLAG_print_peephole_optimization) {
1422
+ PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1423
+ }
1424
+ }
1425
+ }
1426
+
1427
+
1428
+ void Assembler::swl(Register rd, const MemOperand& rs) {
1429
+ GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1430
+ }
1431
+
1432
+
1433
+ void Assembler::swr(Register rd, const MemOperand& rs) {
1434
+ GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
800
1435
  }
801
1436
 
802
1437
 
@@ -841,7 +1476,8 @@ void Assembler::tlt(Register rs, Register rt, uint16_t code) {
841
1476
 
842
1477
  void Assembler::tltu(Register rs, Register rt, uint16_t code) {
843
1478
  ASSERT(is_uint10(code));
844
- Instr instr = SPECIAL | TLTU | rs.code() << kRsShift
1479
+ Instr instr =
1480
+ SPECIAL | TLTU | rs.code() << kRsShift
845
1481
  | rt.code() << kRtShift | code << 6;
846
1482
  emit(instr);
847
1483
  }
@@ -896,6 +1532,54 @@ void Assembler::sltiu(Register rt, Register rs, int32_t j) {
896
1532
  }
897
1533
 
898
1534
 
1535
+ // Conditional move.
1536
+ void Assembler::movz(Register rd, Register rs, Register rt) {
1537
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1538
+ }
1539
+
1540
+
1541
+ void Assembler::movn(Register rd, Register rs, Register rt) {
1542
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1543
+ }
1544
+
1545
+
1546
+ void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1547
+ Register rt;
1548
+ rt.code_ = (cc & 0x0003) << 2 | 1;
1549
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1550
+ }
1551
+
1552
+
1553
+ void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1554
+ Register rt;
1555
+ rt.code_ = (cc & 0x0003) << 2 | 0;
1556
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1557
+ }
1558
+
1559
+
1560
+ // Bit twiddling.
1561
+ void Assembler::clz(Register rd, Register rs) {
1562
+ // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1563
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1564
+ }
1565
+
1566
+
1567
+ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1568
+ // Should be called via MacroAssembler::Ins.
1569
+ // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1570
+ ASSERT(mips32r2);
1571
+ GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1572
+ }
1573
+
1574
+
1575
+ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1576
+ // Should be called via MacroAssembler::Ext.
1577
+ // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1578
+ ASSERT(mips32r2);
1579
+ GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1580
+ }
1581
+
1582
+
899
1583
  //--------Coprocessor-instructions----------------
900
1584
 
901
1585
  // Load, store, move.
@@ -905,7 +1589,12 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
905
1589
 
906
1590
 
907
1591
  void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
908
- GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
1592
+ // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1593
+ // load to two 32-bit loads.
1594
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1595
+ FPURegister nextfpreg;
1596
+ nextfpreg.setcode(fd.code() + 1);
1597
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
909
1598
  }
910
1599
 
911
1600
 
@@ -915,27 +1604,74 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
915
1604
 
916
1605
 
917
1606
  void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
918
- GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
1607
+ // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1608
+ // store to two 32-bit stores.
1609
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1610
+ FPURegister nextfpreg;
1611
+ nextfpreg.setcode(fd.code() + 1);
1612
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
919
1613
  }
920
1614
 
921
1615
 
922
- void Assembler::mtc1(FPURegister fs, Register rt) {
1616
+ void Assembler::mtc1(Register rt, FPURegister fs) {
923
1617
  GenInstrRegister(COP1, MTC1, rt, fs, f0);
924
1618
  }
925
1619
 
926
1620
 
927
- void Assembler::mthc1(FPURegister fs, Register rt) {
928
- GenInstrRegister(COP1, MTHC1, rt, fs, f0);
1621
+ void Assembler::mfc1(Register rt, FPURegister fs) {
1622
+ GenInstrRegister(COP1, MFC1, rt, fs, f0);
929
1623
  }
930
1624
 
931
1625
 
932
- void Assembler::mfc1(FPURegister fs, Register rt) {
933
- GenInstrRegister(COP1, MFC1, rt, fs, f0);
1626
+ void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1627
+ GenInstrRegister(COP1, CTC1, rt, fs);
1628
+ }
1629
+
1630
+
1631
+ void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1632
+ GenInstrRegister(COP1, CFC1, rt, fs);
1633
+ }
1634
+
1635
+
1636
+ // Arithmetic.
1637
+
1638
+ void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1639
+ GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
1640
+ }
1641
+
1642
+
1643
+ void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1644
+ GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
1645
+ }
1646
+
1647
+
1648
+ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1649
+ GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1650
+ }
1651
+
1652
+
1653
+ void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1654
+ GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1655
+ }
1656
+
1657
+
1658
+ void Assembler::abs_d(FPURegister fd, FPURegister fs) {
1659
+ GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
934
1660
  }
935
1661
 
936
1662
 
937
- void Assembler::mfhc1(FPURegister fs, Register rt) {
938
- GenInstrRegister(COP1, MFHC1, rt, fs, f0);
1663
+ void Assembler::mov_d(FPURegister fd, FPURegister fs) {
1664
+ GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
1665
+ }
1666
+
1667
+
1668
+ void Assembler::neg_d(FPURegister fd, FPURegister fs) {
1669
+ GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
1670
+ }
1671
+
1672
+
1673
+ void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
1674
+ GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
939
1675
  }
940
1676
 
941
1677
 
@@ -951,22 +1687,107 @@ void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
951
1687
  }
952
1688
 
953
1689
 
1690
+ void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
1691
+ GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
1692
+ }
1693
+
1694
+
1695
+ void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
1696
+ GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
1697
+ }
1698
+
1699
+
1700
+ void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
1701
+ GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
1702
+ }
1703
+
1704
+
1705
+ void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
1706
+ GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
1707
+ }
1708
+
1709
+
1710
+ void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
1711
+ GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
1712
+ }
1713
+
1714
+
1715
+ void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
1716
+ GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
1717
+ }
1718
+
1719
+
1720
+ void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
1721
+ GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1722
+ }
1723
+
1724
+
1725
+ void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1726
+ GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1727
+ }
1728
+
1729
+
954
1730
  void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1731
+ ASSERT(mips32r2);
955
1732
  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
956
1733
  }
957
1734
 
958
1735
 
959
1736
  void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1737
+ ASSERT(mips32r2);
960
1738
  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
961
1739
  }
962
1740
 
963
1741
 
1742
+ void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1743
+ ASSERT(mips32r2);
1744
+ GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1745
+ }
1746
+
1747
+
1748
+ void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1749
+ ASSERT(mips32r2);
1750
+ GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1751
+ }
1752
+
1753
+
1754
+ void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1755
+ GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1756
+ }
1757
+
1758
+
1759
+ void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
1760
+ GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
1761
+ }
1762
+
1763
+
1764
+ void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
1765
+ GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
1766
+ }
1767
+
1768
+
1769
+ void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
1770
+ GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
1771
+ }
1772
+
1773
+
1774
+ void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1775
+ GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1776
+ }
1777
+
1778
+
1779
+ void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1780
+ GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1781
+ }
1782
+
1783
+
964
1784
  void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
965
1785
  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
966
1786
  }
967
1787
 
968
1788
 
969
1789
  void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1790
+ ASSERT(mips32r2);
970
1791
  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
971
1792
  }
972
1793
 
@@ -982,6 +1803,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
982
1803
 
983
1804
 
984
1805
  void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1806
+ ASSERT(mips32r2);
985
1807
  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
986
1808
  }
987
1809
 
@@ -993,7 +1815,8 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
993
1815
 
994
1816
  // Conditions.
995
1817
  void Assembler::c(FPUCondition cond, SecondaryField fmt,
996
- FPURegister ft, FPURegister fs, uint16_t cc) {
1818
+ FPURegister fs, FPURegister ft, uint16_t cc) {
1819
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
997
1820
  ASSERT(is_uint3(cc));
998
1821
  ASSERT((fmt & ~(31 << kRsShift)) == 0);
999
1822
  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
@@ -1002,7 +1825,18 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt,
1002
1825
  }
1003
1826
 
1004
1827
 
1828
+ void Assembler::fcmp(FPURegister src1, const double src2,
1829
+ FPUCondition cond) {
1830
+ ASSERT(isolate()->cpu_features()->IsSupported(FPU));
1831
+ ASSERT(src2 == 0.0);
1832
+ mtc1(zero_reg, f14);
1833
+ cvt_d_w(f14, f14);
1834
+ c(cond, D, src1, f14, 0);
1835
+ }
1836
+
1837
+
1005
1838
  void Assembler::bc1f(int16_t offset, uint16_t cc) {
1839
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
1006
1840
  ASSERT(is_uint3(cc));
1007
1841
  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1008
1842
  emit(instr);
@@ -1010,6 +1844,7 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) {
1010
1844
 
1011
1845
 
1012
1846
  void Assembler::bc1t(int16_t offset, uint16_t cc) {
1847
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
1013
1848
  ASSERT(is_uint3(cc));
1014
1849
  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1015
1850
  emit(instr);
@@ -1018,58 +1853,24 @@ void Assembler::bc1t(int16_t offset, uint16_t cc) {
1018
1853
 
1019
1854
  // Debugging.
1020
1855
  void Assembler::RecordJSReturn() {
1021
- WriteRecordedPositions();
1856
+ positions_recorder()->WriteRecordedPositions();
1022
1857
  CheckBuffer();
1023
1858
  RecordRelocInfo(RelocInfo::JS_RETURN);
1024
1859
  }
1025
1860
 
1026
1861
 
1027
- void Assembler::RecordComment(const char* msg) {
1028
- if (FLAG_debug_code) {
1029
- CheckBuffer();
1030
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1031
- }
1032
- }
1033
-
1034
-
1035
- void Assembler::RecordPosition(int pos) {
1036
- if (pos == RelocInfo::kNoPosition) return;
1037
- ASSERT(pos >= 0);
1038
- current_position_ = pos;
1039
- }
1040
-
1041
-
1042
- void Assembler::RecordStatementPosition(int pos) {
1043
- if (pos == RelocInfo::kNoPosition) return;
1044
- ASSERT(pos >= 0);
1045
- current_statement_position_ = pos;
1862
+ void Assembler::RecordDebugBreakSlot() {
1863
+ positions_recorder()->WriteRecordedPositions();
1864
+ CheckBuffer();
1865
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1046
1866
  }
1047
1867
 
1048
1868
 
1049
- bool Assembler::WriteRecordedPositions() {
1050
- bool written = false;
1051
-
1052
- // Write the statement position if it is different from what was written last
1053
- // time.
1054
- if (current_statement_position_ != written_statement_position_) {
1055
- CheckBuffer();
1056
- RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
1057
- written_statement_position_ = current_statement_position_;
1058
- written = true;
1059
- }
1060
-
1061
- // Write the position if it is different from what was written last time and
1062
- // also different from the written statement position.
1063
- if (current_position_ != written_position_ &&
1064
- current_position_ != written_statement_position_) {
1869
+ void Assembler::RecordComment(const char* msg) {
1870
+ if (FLAG_code_comments) {
1065
1871
  CheckBuffer();
1066
- RecordRelocInfo(RelocInfo::POSITION, current_position_);
1067
- written_position_ = current_position_;
1068
- written = true;
1872
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1069
1873
  }
1070
-
1071
- // Return whether something was written.
1072
- return written;
1073
1874
  }
1074
1875
 
1075
1876
 
@@ -1077,7 +1878,7 @@ void Assembler::GrowBuffer() {
1077
1878
  if (!own_buffer_) FATAL("external code buffer is too small");
1078
1879
 
1079
1880
  // Compute new buffer size.
1080
- CodeDesc desc; // the new buffer
1881
+ CodeDesc desc; // The new buffer.
1081
1882
  if (buffer_size_ < 4*KB) {
1082
1883
  desc.buffer_size = 4*KB;
1083
1884
  } else if (buffer_size_ < 1*MB) {
@@ -1085,7 +1886,7 @@ void Assembler::GrowBuffer() {
1085
1886
  } else {
1086
1887
  desc.buffer_size = buffer_size_ + 1*MB;
1087
1888
  }
1088
- CHECK_GT(desc.buffer_size, 0); // no overflow
1889
+ CHECK_GT(desc.buffer_size, 0); // No overflow.
1089
1890
 
1090
1891
  // Setup new buffer.
1091
1892
  desc.buffer = NewArray<byte>(desc.buffer_size);
@@ -1108,7 +1909,6 @@ void Assembler::GrowBuffer() {
1108
1909
  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1109
1910
  reloc_info_writer.last_pc() + pc_delta);
1110
1911
 
1111
-
1112
1912
  // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
1113
1913
  // shift by pc_delta. But on MIPS the target address it directly loaded, so
1114
1914
  // we do not need to relocate here.
@@ -1117,11 +1917,26 @@ void Assembler::GrowBuffer() {
1117
1917
  }
1118
1918
 
1119
1919
 
1920
+ void Assembler::db(uint8_t data) {
1921
+ CheckBuffer();
1922
+ *reinterpret_cast<uint8_t*>(pc_) = data;
1923
+ pc_ += sizeof(uint8_t);
1924
+ }
1925
+
1926
+
1927
+ void Assembler::dd(uint32_t data) {
1928
+ CheckBuffer();
1929
+ *reinterpret_cast<uint32_t*>(pc_) = data;
1930
+ pc_ += sizeof(uint32_t);
1931
+ }
1932
+
1933
+
1120
1934
  void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1121
- RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
1122
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
1935
+ RelocInfo rinfo(pc_, rmode, data); // We do not try to reuse pool constants.
1936
+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
1123
1937
  // Adjust code for new modes.
1124
- ASSERT(RelocInfo::IsJSReturn(rmode)
1938
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
1939
+ || RelocInfo::IsJSReturn(rmode)
1125
1940
  || RelocInfo::IsComment(rmode)
1126
1941
  || RelocInfo::IsPosition(rmode));
1127
1942
  // These modes do not need an entry in the constant pool.
@@ -1133,12 +1948,72 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1133
1948
  !FLAG_debug_code) {
1134
1949
  return;
1135
1950
  }
1136
- ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
1951
+ ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
1137
1952
  reloc_info_writer.Write(&rinfo);
1138
1953
  }
1139
1954
  }
1140
1955
 
1141
1956
 
1957
+ void Assembler::BlockTrampolinePoolFor(int instructions) {
1958
+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
1959
+ }
1960
+
1961
+
1962
+ void Assembler::CheckTrampolinePool(bool force_emit) {
1963
+ // Calculate the offset of the next check.
1964
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
1965
+
1966
+ int dist = pc_offset() - last_trampoline_pool_end_;
1967
+
1968
+ if (dist <= kMaxDistBetweenPools && !force_emit) {
1969
+ return;
1970
+ }
1971
+
1972
+ // Some small sequences of instructions must not be broken up by the
1973
+ // insertion of a trampoline pool; such sequences are protected by setting
1974
+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
1975
+ // which are both checked here. Also, recursive calls to CheckTrampolinePool
1976
+ // are blocked by trampoline_pool_blocked_nesting_.
1977
+ if ((trampoline_pool_blocked_nesting_ > 0) ||
1978
+ (pc_offset() < no_trampoline_pool_before_)) {
1979
+ // Emission is currently blocked; make sure we try again as soon as
1980
+ // possible.
1981
+ if (trampoline_pool_blocked_nesting_ > 0) {
1982
+ next_buffer_check_ = pc_offset() + kInstrSize;
1983
+ } else {
1984
+ next_buffer_check_ = no_trampoline_pool_before_;
1985
+ }
1986
+ return;
1987
+ }
1988
+
1989
+ // First we emit jump (2 instructions), then we emit trampoline pool.
1990
+ { BlockTrampolinePoolScope block_trampoline_pool(this);
1991
+ Label after_pool;
1992
+ b(&after_pool);
1993
+ nop();
1994
+
1995
+ int pool_start = pc_offset();
1996
+ for (int i = 0; i < kSlotsPerTrampoline; i++) {
1997
+ b(&after_pool);
1998
+ nop();
1999
+ }
2000
+ for (int i = 0; i < kLabelsPerTrampoline; i++) {
2001
+ emit(0);
2002
+ }
2003
+ last_trampoline_pool_end_ = pc_offset() - kInstrSize;
2004
+ bind(&after_pool);
2005
+ trampolines_.Add(Trampoline(pool_start,
2006
+ kSlotsPerTrampoline,
2007
+ kLabelsPerTrampoline));
2008
+
2009
+ // Since a trampoline pool was just emitted,
2010
+ // move the check offset forward by the standard interval.
2011
+ next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools;
2012
+ }
2013
+ return;
2014
+ }
2015
+
2016
+
1142
2017
  Address Assembler::target_address_at(Address pc) {
1143
2018
  Instr instr1 = instr_at(pc);
1144
2019
  Instr instr2 = instr_at(pc + kInstrSize);
@@ -1157,7 +2032,7 @@ Address Assembler::target_address_at(Address pc) {
1157
2032
  return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
1158
2033
  }
1159
2034
  } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
1160
- // 32 bits value.
2035
+ // 32 bit value.
1161
2036
  return reinterpret_cast<Address>(
1162
2037
  (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
1163
2038
  }
@@ -1176,38 +2051,37 @@ void Assembler::set_target_address_at(Address pc, Address target) {
1176
2051
  #ifdef DEBUG
1177
2052
  Instr instr1 = instr_at(pc);
1178
2053
 
1179
- // Check we have indeed the result from a li with MustUseAt true.
2054
+ // Check we have indeed the result from a li with MustUseReg true.
1180
2055
  CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
1181
2056
  ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
1182
2057
  (instr2 & kOpcodeMask)== ORI ||
1183
2058
  (instr2 & kOpcodeMask)== LUI)));
1184
2059
  #endif
1185
2060
 
1186
-
1187
2061
  uint32_t rt_code = (instr2 & kRtFieldMask);
1188
2062
  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
1189
2063
  uint32_t itarget = reinterpret_cast<uint32_t>(target);
1190
2064
 
1191
2065
  if (is_int16(itarget)) {
1192
- // nop
1193
- // addiu rt zero_reg j
2066
+ // nop.
2067
+ // addiu rt zero_reg j.
1194
2068
  *p = nopInstr;
1195
- *(p+1) = ADDIU | rt_code | (itarget & LOMask);
1196
- } else if (!(itarget & HIMask)) {
1197
- // nop
1198
- // ori rt zero_reg j
2069
+ *(p+1) = ADDIU | rt_code | (itarget & kImm16Mask);
2070
+ } else if (!(itarget & kHiMask)) {
2071
+ // nop.
2072
+ // ori rt zero_reg j.
1199
2073
  *p = nopInstr;
1200
- *(p+1) = ORI | rt_code | (itarget & LOMask);
1201
- } else if (!(itarget & LOMask)) {
1202
- // nop
1203
- // lui rt (HIMask & itarget)>>16
2074
+ *(p+1) = ORI | rt_code | (itarget & kImm16Mask);
2075
+ } else if (!(itarget & kImm16Mask)) {
2076
+ // nop.
2077
+ // lui rt (kHiMask & itarget) >> kLuiShift.
1204
2078
  *p = nopInstr;
1205
- *(p+1) = LUI | rt_code | ((itarget & HIMask)>>16);
2079
+ *(p+1) = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
1206
2080
  } else {
1207
- // lui rt (HIMask & itarget)>>16
1208
- // ori rt rt, (LOMask & itarget)
1209
- *p = LUI | rt_code | ((itarget & HIMask)>>16);
1210
- *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask);
2081
+ // lui rt (kHiMask & itarget) >> kLuiShift.
2082
+ // ori rt rt, (kImm16Mask & itarget).
2083
+ *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2084
+ *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
1211
2085
  }
1212
2086
 
1213
2087
  CPU::FlushICache(pc, 2 * sizeof(int32_t));