libv8-sgonyea 3.3.10

Sign up to get free protection for your applications and to get access to all the features.
Files changed (500) hide show
  1. data/.gitignore +8 -0
  2. data/.gitmodules +3 -0
  3. data/Gemfile +4 -0
  4. data/README.md +76 -0
  5. data/Rakefile +113 -0
  6. data/ext/libv8/extconf.rb +28 -0
  7. data/lib/libv8.rb +15 -0
  8. data/lib/libv8/Makefile +30 -0
  9. data/lib/libv8/detect_cpu.rb +27 -0
  10. data/lib/libv8/fpic-on-linux-amd64.patch +13 -0
  11. data/lib/libv8/v8/.gitignore +35 -0
  12. data/lib/libv8/v8/AUTHORS +44 -0
  13. data/lib/libv8/v8/ChangeLog +2839 -0
  14. data/lib/libv8/v8/LICENSE +52 -0
  15. data/lib/libv8/v8/LICENSE.strongtalk +29 -0
  16. data/lib/libv8/v8/LICENSE.v8 +26 -0
  17. data/lib/libv8/v8/LICENSE.valgrind +45 -0
  18. data/lib/libv8/v8/SConstruct +1478 -0
  19. data/lib/libv8/v8/build/README.txt +49 -0
  20. data/lib/libv8/v8/build/all.gyp +18 -0
  21. data/lib/libv8/v8/build/armu.gypi +32 -0
  22. data/lib/libv8/v8/build/common.gypi +144 -0
  23. data/lib/libv8/v8/build/gyp_v8 +145 -0
  24. data/lib/libv8/v8/include/v8-debug.h +395 -0
  25. data/lib/libv8/v8/include/v8-preparser.h +117 -0
  26. data/lib/libv8/v8/include/v8-profiler.h +505 -0
  27. data/lib/libv8/v8/include/v8-testing.h +104 -0
  28. data/lib/libv8/v8/include/v8.h +4124 -0
  29. data/lib/libv8/v8/include/v8stdint.h +53 -0
  30. data/lib/libv8/v8/preparser/SConscript +38 -0
  31. data/lib/libv8/v8/preparser/preparser-process.cc +379 -0
  32. data/lib/libv8/v8/src/SConscript +368 -0
  33. data/lib/libv8/v8/src/accessors.cc +767 -0
  34. data/lib/libv8/v8/src/accessors.h +123 -0
  35. data/lib/libv8/v8/src/allocation-inl.h +49 -0
  36. data/lib/libv8/v8/src/allocation.cc +122 -0
  37. data/lib/libv8/v8/src/allocation.h +143 -0
  38. data/lib/libv8/v8/src/api.cc +5845 -0
  39. data/lib/libv8/v8/src/api.h +574 -0
  40. data/lib/libv8/v8/src/apinatives.js +110 -0
  41. data/lib/libv8/v8/src/apiutils.h +73 -0
  42. data/lib/libv8/v8/src/arguments.h +118 -0
  43. data/lib/libv8/v8/src/arm/assembler-arm-inl.h +353 -0
  44. data/lib/libv8/v8/src/arm/assembler-arm.cc +2661 -0
  45. data/lib/libv8/v8/src/arm/assembler-arm.h +1375 -0
  46. data/lib/libv8/v8/src/arm/builtins-arm.cc +1658 -0
  47. data/lib/libv8/v8/src/arm/code-stubs-arm.cc +6398 -0
  48. data/lib/libv8/v8/src/arm/code-stubs-arm.h +673 -0
  49. data/lib/libv8/v8/src/arm/codegen-arm.cc +52 -0
  50. data/lib/libv8/v8/src/arm/codegen-arm.h +91 -0
  51. data/lib/libv8/v8/src/arm/constants-arm.cc +152 -0
  52. data/lib/libv8/v8/src/arm/constants-arm.h +775 -0
  53. data/lib/libv8/v8/src/arm/cpu-arm.cc +120 -0
  54. data/lib/libv8/v8/src/arm/debug-arm.cc +317 -0
  55. data/lib/libv8/v8/src/arm/deoptimizer-arm.cc +754 -0
  56. data/lib/libv8/v8/src/arm/disasm-arm.cc +1506 -0
  57. data/lib/libv8/v8/src/arm/frames-arm.cc +45 -0
  58. data/lib/libv8/v8/src/arm/frames-arm.h +168 -0
  59. data/lib/libv8/v8/src/arm/full-codegen-arm.cc +4375 -0
  60. data/lib/libv8/v8/src/arm/ic-arm.cc +1562 -0
  61. data/lib/libv8/v8/src/arm/lithium-arm.cc +2206 -0
  62. data/lib/libv8/v8/src/arm/lithium-arm.h +2348 -0
  63. data/lib/libv8/v8/src/arm/lithium-codegen-arm.cc +4526 -0
  64. data/lib/libv8/v8/src/arm/lithium-codegen-arm.h +403 -0
  65. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.cc +305 -0
  66. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.h +84 -0
  67. data/lib/libv8/v8/src/arm/macro-assembler-arm.cc +3163 -0
  68. data/lib/libv8/v8/src/arm/macro-assembler-arm.h +1126 -0
  69. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.cc +1287 -0
  70. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.h +253 -0
  71. data/lib/libv8/v8/src/arm/simulator-arm.cc +3424 -0
  72. data/lib/libv8/v8/src/arm/simulator-arm.h +431 -0
  73. data/lib/libv8/v8/src/arm/stub-cache-arm.cc +4243 -0
  74. data/lib/libv8/v8/src/array.js +1366 -0
  75. data/lib/libv8/v8/src/assembler.cc +1207 -0
  76. data/lib/libv8/v8/src/assembler.h +858 -0
  77. data/lib/libv8/v8/src/ast-inl.h +112 -0
  78. data/lib/libv8/v8/src/ast.cc +1146 -0
  79. data/lib/libv8/v8/src/ast.h +2188 -0
  80. data/lib/libv8/v8/src/atomicops.h +167 -0
  81. data/lib/libv8/v8/src/atomicops_internals_arm_gcc.h +145 -0
  82. data/lib/libv8/v8/src/atomicops_internals_mips_gcc.h +169 -0
  83. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.cc +133 -0
  84. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.h +287 -0
  85. data/lib/libv8/v8/src/atomicops_internals_x86_macosx.h +301 -0
  86. data/lib/libv8/v8/src/atomicops_internals_x86_msvc.h +203 -0
  87. data/lib/libv8/v8/src/bignum-dtoa.cc +655 -0
  88. data/lib/libv8/v8/src/bignum-dtoa.h +81 -0
  89. data/lib/libv8/v8/src/bignum.cc +768 -0
  90. data/lib/libv8/v8/src/bignum.h +140 -0
  91. data/lib/libv8/v8/src/bootstrapper.cc +2184 -0
  92. data/lib/libv8/v8/src/bootstrapper.h +188 -0
  93. data/lib/libv8/v8/src/builtins.cc +1707 -0
  94. data/lib/libv8/v8/src/builtins.h +371 -0
  95. data/lib/libv8/v8/src/bytecodes-irregexp.h +105 -0
  96. data/lib/libv8/v8/src/cached-powers.cc +177 -0
  97. data/lib/libv8/v8/src/cached-powers.h +65 -0
  98. data/lib/libv8/v8/src/char-predicates-inl.h +94 -0
  99. data/lib/libv8/v8/src/char-predicates.h +67 -0
  100. data/lib/libv8/v8/src/checks.cc +110 -0
  101. data/lib/libv8/v8/src/checks.h +296 -0
  102. data/lib/libv8/v8/src/circular-queue-inl.h +53 -0
  103. data/lib/libv8/v8/src/circular-queue.cc +122 -0
  104. data/lib/libv8/v8/src/circular-queue.h +103 -0
  105. data/lib/libv8/v8/src/code-stubs.cc +267 -0
  106. data/lib/libv8/v8/src/code-stubs.h +1011 -0
  107. data/lib/libv8/v8/src/code.h +70 -0
  108. data/lib/libv8/v8/src/codegen.cc +231 -0
  109. data/lib/libv8/v8/src/codegen.h +84 -0
  110. data/lib/libv8/v8/src/compilation-cache.cc +540 -0
  111. data/lib/libv8/v8/src/compilation-cache.h +287 -0
  112. data/lib/libv8/v8/src/compiler.cc +786 -0
  113. data/lib/libv8/v8/src/compiler.h +312 -0
  114. data/lib/libv8/v8/src/contexts.cc +347 -0
  115. data/lib/libv8/v8/src/contexts.h +391 -0
  116. data/lib/libv8/v8/src/conversions-inl.h +106 -0
  117. data/lib/libv8/v8/src/conversions.cc +1131 -0
  118. data/lib/libv8/v8/src/conversions.h +135 -0
  119. data/lib/libv8/v8/src/counters.cc +93 -0
  120. data/lib/libv8/v8/src/counters.h +254 -0
  121. data/lib/libv8/v8/src/cpu-profiler-inl.h +101 -0
  122. data/lib/libv8/v8/src/cpu-profiler.cc +609 -0
  123. data/lib/libv8/v8/src/cpu-profiler.h +302 -0
  124. data/lib/libv8/v8/src/cpu.h +69 -0
  125. data/lib/libv8/v8/src/d8-debug.cc +367 -0
  126. data/lib/libv8/v8/src/d8-debug.h +158 -0
  127. data/lib/libv8/v8/src/d8-posix.cc +695 -0
  128. data/lib/libv8/v8/src/d8-readline.cc +130 -0
  129. data/lib/libv8/v8/src/d8-windows.cc +42 -0
  130. data/lib/libv8/v8/src/d8.cc +803 -0
  131. data/lib/libv8/v8/src/d8.gyp +91 -0
  132. data/lib/libv8/v8/src/d8.h +235 -0
  133. data/lib/libv8/v8/src/d8.js +2798 -0
  134. data/lib/libv8/v8/src/data-flow.cc +66 -0
  135. data/lib/libv8/v8/src/data-flow.h +205 -0
  136. data/lib/libv8/v8/src/date.js +1103 -0
  137. data/lib/libv8/v8/src/dateparser-inl.h +127 -0
  138. data/lib/libv8/v8/src/dateparser.cc +178 -0
  139. data/lib/libv8/v8/src/dateparser.h +266 -0
  140. data/lib/libv8/v8/src/debug-agent.cc +447 -0
  141. data/lib/libv8/v8/src/debug-agent.h +129 -0
  142. data/lib/libv8/v8/src/debug-debugger.js +2569 -0
  143. data/lib/libv8/v8/src/debug.cc +3165 -0
  144. data/lib/libv8/v8/src/debug.h +1057 -0
  145. data/lib/libv8/v8/src/deoptimizer.cc +1256 -0
  146. data/lib/libv8/v8/src/deoptimizer.h +602 -0
  147. data/lib/libv8/v8/src/disasm.h +80 -0
  148. data/lib/libv8/v8/src/disassembler.cc +343 -0
  149. data/lib/libv8/v8/src/disassembler.h +58 -0
  150. data/lib/libv8/v8/src/diy-fp.cc +58 -0
  151. data/lib/libv8/v8/src/diy-fp.h +117 -0
  152. data/lib/libv8/v8/src/double.h +238 -0
  153. data/lib/libv8/v8/src/dtoa.cc +103 -0
  154. data/lib/libv8/v8/src/dtoa.h +85 -0
  155. data/lib/libv8/v8/src/execution.cc +849 -0
  156. data/lib/libv8/v8/src/execution.h +297 -0
  157. data/lib/libv8/v8/src/extensions/experimental/break-iterator.cc +250 -0
  158. data/lib/libv8/v8/src/extensions/experimental/break-iterator.h +89 -0
  159. data/lib/libv8/v8/src/extensions/experimental/collator.cc +218 -0
  160. data/lib/libv8/v8/src/extensions/experimental/collator.h +69 -0
  161. data/lib/libv8/v8/src/extensions/experimental/experimental.gyp +94 -0
  162. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.cc +78 -0
  163. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.h +54 -0
  164. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.cc +112 -0
  165. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.h +60 -0
  166. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.cc +43 -0
  167. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.h +49 -0
  168. data/lib/libv8/v8/src/extensions/experimental/i18n.js +180 -0
  169. data/lib/libv8/v8/src/extensions/experimental/language-matcher.cc +251 -0
  170. data/lib/libv8/v8/src/extensions/experimental/language-matcher.h +95 -0
  171. data/lib/libv8/v8/src/extensions/externalize-string-extension.cc +141 -0
  172. data/lib/libv8/v8/src/extensions/externalize-string-extension.h +50 -0
  173. data/lib/libv8/v8/src/extensions/gc-extension.cc +58 -0
  174. data/lib/libv8/v8/src/extensions/gc-extension.h +49 -0
  175. data/lib/libv8/v8/src/factory.cc +1222 -0
  176. data/lib/libv8/v8/src/factory.h +442 -0
  177. data/lib/libv8/v8/src/fast-dtoa.cc +736 -0
  178. data/lib/libv8/v8/src/fast-dtoa.h +83 -0
  179. data/lib/libv8/v8/src/fixed-dtoa.cc +405 -0
  180. data/lib/libv8/v8/src/fixed-dtoa.h +55 -0
  181. data/lib/libv8/v8/src/flag-definitions.h +560 -0
  182. data/lib/libv8/v8/src/flags.cc +551 -0
  183. data/lib/libv8/v8/src/flags.h +79 -0
  184. data/lib/libv8/v8/src/frames-inl.h +247 -0
  185. data/lib/libv8/v8/src/frames.cc +1243 -0
  186. data/lib/libv8/v8/src/frames.h +870 -0
  187. data/lib/libv8/v8/src/full-codegen.cc +1374 -0
  188. data/lib/libv8/v8/src/full-codegen.h +771 -0
  189. data/lib/libv8/v8/src/func-name-inferrer.cc +92 -0
  190. data/lib/libv8/v8/src/func-name-inferrer.h +111 -0
  191. data/lib/libv8/v8/src/gdb-jit.cc +1555 -0
  192. data/lib/libv8/v8/src/gdb-jit.h +143 -0
  193. data/lib/libv8/v8/src/global-handles.cc +665 -0
  194. data/lib/libv8/v8/src/global-handles.h +284 -0
  195. data/lib/libv8/v8/src/globals.h +325 -0
  196. data/lib/libv8/v8/src/handles-inl.h +177 -0
  197. data/lib/libv8/v8/src/handles.cc +987 -0
  198. data/lib/libv8/v8/src/handles.h +382 -0
  199. data/lib/libv8/v8/src/hashmap.cc +230 -0
  200. data/lib/libv8/v8/src/hashmap.h +123 -0
  201. data/lib/libv8/v8/src/heap-inl.h +704 -0
  202. data/lib/libv8/v8/src/heap-profiler.cc +1173 -0
  203. data/lib/libv8/v8/src/heap-profiler.h +397 -0
  204. data/lib/libv8/v8/src/heap.cc +5930 -0
  205. data/lib/libv8/v8/src/heap.h +2268 -0
  206. data/lib/libv8/v8/src/hydrogen-instructions.cc +1769 -0
  207. data/lib/libv8/v8/src/hydrogen-instructions.h +3971 -0
  208. data/lib/libv8/v8/src/hydrogen.cc +6239 -0
  209. data/lib/libv8/v8/src/hydrogen.h +1202 -0
  210. data/lib/libv8/v8/src/ia32/assembler-ia32-inl.h +446 -0
  211. data/lib/libv8/v8/src/ia32/assembler-ia32.cc +2487 -0
  212. data/lib/libv8/v8/src/ia32/assembler-ia32.h +1144 -0
  213. data/lib/libv8/v8/src/ia32/builtins-ia32.cc +1621 -0
  214. data/lib/libv8/v8/src/ia32/code-stubs-ia32.cc +6198 -0
  215. data/lib/libv8/v8/src/ia32/code-stubs-ia32.h +517 -0
  216. data/lib/libv8/v8/src/ia32/codegen-ia32.cc +265 -0
  217. data/lib/libv8/v8/src/ia32/codegen-ia32.h +79 -0
  218. data/lib/libv8/v8/src/ia32/cpu-ia32.cc +88 -0
  219. data/lib/libv8/v8/src/ia32/debug-ia32.cc +312 -0
  220. data/lib/libv8/v8/src/ia32/deoptimizer-ia32.cc +774 -0
  221. data/lib/libv8/v8/src/ia32/disasm-ia32.cc +1628 -0
  222. data/lib/libv8/v8/src/ia32/frames-ia32.cc +45 -0
  223. data/lib/libv8/v8/src/ia32/frames-ia32.h +142 -0
  224. data/lib/libv8/v8/src/ia32/full-codegen-ia32.cc +4338 -0
  225. data/lib/libv8/v8/src/ia32/ic-ia32.cc +1597 -0
  226. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.cc +4461 -0
  227. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.h +375 -0
  228. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.cc +475 -0
  229. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.h +110 -0
  230. data/lib/libv8/v8/src/ia32/lithium-ia32.cc +2261 -0
  231. data/lib/libv8/v8/src/ia32/lithium-ia32.h +2396 -0
  232. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.cc +2136 -0
  233. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.h +775 -0
  234. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.cc +1263 -0
  235. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.h +216 -0
  236. data/lib/libv8/v8/src/ia32/simulator-ia32.cc +30 -0
  237. data/lib/libv8/v8/src/ia32/simulator-ia32.h +74 -0
  238. data/lib/libv8/v8/src/ia32/stub-cache-ia32.cc +3847 -0
  239. data/lib/libv8/v8/src/ic-inl.h +130 -0
  240. data/lib/libv8/v8/src/ic.cc +2577 -0
  241. data/lib/libv8/v8/src/ic.h +736 -0
  242. data/lib/libv8/v8/src/inspector.cc +63 -0
  243. data/lib/libv8/v8/src/inspector.h +62 -0
  244. data/lib/libv8/v8/src/interpreter-irregexp.cc +659 -0
  245. data/lib/libv8/v8/src/interpreter-irregexp.h +49 -0
  246. data/lib/libv8/v8/src/isolate-inl.h +50 -0
  247. data/lib/libv8/v8/src/isolate.cc +1869 -0
  248. data/lib/libv8/v8/src/isolate.h +1382 -0
  249. data/lib/libv8/v8/src/json-parser.cc +504 -0
  250. data/lib/libv8/v8/src/json-parser.h +161 -0
  251. data/lib/libv8/v8/src/json.js +342 -0
  252. data/lib/libv8/v8/src/jsregexp.cc +5385 -0
  253. data/lib/libv8/v8/src/jsregexp.h +1492 -0
  254. data/lib/libv8/v8/src/list-inl.h +212 -0
  255. data/lib/libv8/v8/src/list.h +174 -0
  256. data/lib/libv8/v8/src/lithium-allocator-inl.h +142 -0
  257. data/lib/libv8/v8/src/lithium-allocator.cc +2123 -0
  258. data/lib/libv8/v8/src/lithium-allocator.h +630 -0
  259. data/lib/libv8/v8/src/lithium.cc +190 -0
  260. data/lib/libv8/v8/src/lithium.h +597 -0
  261. data/lib/libv8/v8/src/liveedit-debugger.js +1082 -0
  262. data/lib/libv8/v8/src/liveedit.cc +1691 -0
  263. data/lib/libv8/v8/src/liveedit.h +180 -0
  264. data/lib/libv8/v8/src/liveobjectlist-inl.h +126 -0
  265. data/lib/libv8/v8/src/liveobjectlist.cc +2589 -0
  266. data/lib/libv8/v8/src/liveobjectlist.h +322 -0
  267. data/lib/libv8/v8/src/log-inl.h +59 -0
  268. data/lib/libv8/v8/src/log-utils.cc +428 -0
  269. data/lib/libv8/v8/src/log-utils.h +231 -0
  270. data/lib/libv8/v8/src/log.cc +1993 -0
  271. data/lib/libv8/v8/src/log.h +476 -0
  272. data/lib/libv8/v8/src/macro-assembler.h +120 -0
  273. data/lib/libv8/v8/src/macros.py +178 -0
  274. data/lib/libv8/v8/src/mark-compact.cc +3143 -0
  275. data/lib/libv8/v8/src/mark-compact.h +506 -0
  276. data/lib/libv8/v8/src/math.js +264 -0
  277. data/lib/libv8/v8/src/messages.cc +179 -0
  278. data/lib/libv8/v8/src/messages.h +113 -0
  279. data/lib/libv8/v8/src/messages.js +1096 -0
  280. data/lib/libv8/v8/src/mips/assembler-mips-inl.h +312 -0
  281. data/lib/libv8/v8/src/mips/assembler-mips.cc +1960 -0
  282. data/lib/libv8/v8/src/mips/assembler-mips.h +1138 -0
  283. data/lib/libv8/v8/src/mips/builtins-mips.cc +1628 -0
  284. data/lib/libv8/v8/src/mips/code-stubs-mips.cc +6656 -0
  285. data/lib/libv8/v8/src/mips/code-stubs-mips.h +682 -0
  286. data/lib/libv8/v8/src/mips/codegen-mips.cc +52 -0
  287. data/lib/libv8/v8/src/mips/codegen-mips.h +98 -0
  288. data/lib/libv8/v8/src/mips/constants-mips.cc +352 -0
  289. data/lib/libv8/v8/src/mips/constants-mips.h +739 -0
  290. data/lib/libv8/v8/src/mips/cpu-mips.cc +96 -0
  291. data/lib/libv8/v8/src/mips/debug-mips.cc +308 -0
  292. data/lib/libv8/v8/src/mips/deoptimizer-mips.cc +91 -0
  293. data/lib/libv8/v8/src/mips/disasm-mips.cc +1050 -0
  294. data/lib/libv8/v8/src/mips/frames-mips.cc +47 -0
  295. data/lib/libv8/v8/src/mips/frames-mips.h +219 -0
  296. data/lib/libv8/v8/src/mips/full-codegen-mips.cc +4388 -0
  297. data/lib/libv8/v8/src/mips/ic-mips.cc +1580 -0
  298. data/lib/libv8/v8/src/mips/lithium-codegen-mips.h +65 -0
  299. data/lib/libv8/v8/src/mips/lithium-mips.h +307 -0
  300. data/lib/libv8/v8/src/mips/macro-assembler-mips.cc +4056 -0
  301. data/lib/libv8/v8/src/mips/macro-assembler-mips.h +1214 -0
  302. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.cc +1251 -0
  303. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.h +252 -0
  304. data/lib/libv8/v8/src/mips/simulator-mips.cc +2621 -0
  305. data/lib/libv8/v8/src/mips/simulator-mips.h +401 -0
  306. data/lib/libv8/v8/src/mips/stub-cache-mips.cc +4285 -0
  307. data/lib/libv8/v8/src/mirror-debugger.js +2382 -0
  308. data/lib/libv8/v8/src/mksnapshot.cc +328 -0
  309. data/lib/libv8/v8/src/natives.h +64 -0
  310. data/lib/libv8/v8/src/objects-debug.cc +738 -0
  311. data/lib/libv8/v8/src/objects-inl.h +4323 -0
  312. data/lib/libv8/v8/src/objects-printer.cc +829 -0
  313. data/lib/libv8/v8/src/objects-visiting.cc +148 -0
  314. data/lib/libv8/v8/src/objects-visiting.h +424 -0
  315. data/lib/libv8/v8/src/objects.cc +10585 -0
  316. data/lib/libv8/v8/src/objects.h +6838 -0
  317. data/lib/libv8/v8/src/parser.cc +4997 -0
  318. data/lib/libv8/v8/src/parser.h +765 -0
  319. data/lib/libv8/v8/src/platform-cygwin.cc +779 -0
  320. data/lib/libv8/v8/src/platform-freebsd.cc +826 -0
  321. data/lib/libv8/v8/src/platform-linux.cc +1149 -0
  322. data/lib/libv8/v8/src/platform-macos.cc +830 -0
  323. data/lib/libv8/v8/src/platform-nullos.cc +479 -0
  324. data/lib/libv8/v8/src/platform-openbsd.cc +640 -0
  325. data/lib/libv8/v8/src/platform-posix.cc +424 -0
  326. data/lib/libv8/v8/src/platform-solaris.cc +762 -0
  327. data/lib/libv8/v8/src/platform-tls-mac.h +62 -0
  328. data/lib/libv8/v8/src/platform-tls-win32.h +62 -0
  329. data/lib/libv8/v8/src/platform-tls.h +50 -0
  330. data/lib/libv8/v8/src/platform-win32.cc +2021 -0
  331. data/lib/libv8/v8/src/platform.h +667 -0
  332. data/lib/libv8/v8/src/preparse-data-format.h +62 -0
  333. data/lib/libv8/v8/src/preparse-data.cc +183 -0
  334. data/lib/libv8/v8/src/preparse-data.h +225 -0
  335. data/lib/libv8/v8/src/preparser-api.cc +220 -0
  336. data/lib/libv8/v8/src/preparser.cc +1450 -0
  337. data/lib/libv8/v8/src/preparser.h +493 -0
  338. data/lib/libv8/v8/src/prettyprinter.cc +1493 -0
  339. data/lib/libv8/v8/src/prettyprinter.h +223 -0
  340. data/lib/libv8/v8/src/profile-generator-inl.h +128 -0
  341. data/lib/libv8/v8/src/profile-generator.cc +3098 -0
  342. data/lib/libv8/v8/src/profile-generator.h +1126 -0
  343. data/lib/libv8/v8/src/property.cc +105 -0
  344. data/lib/libv8/v8/src/property.h +365 -0
  345. data/lib/libv8/v8/src/proxy.js +83 -0
  346. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp-inl.h +78 -0
  347. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.cc +471 -0
  348. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.h +142 -0
  349. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.cc +373 -0
  350. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.h +104 -0
  351. data/lib/libv8/v8/src/regexp-macro-assembler.cc +267 -0
  352. data/lib/libv8/v8/src/regexp-macro-assembler.h +243 -0
  353. data/lib/libv8/v8/src/regexp-stack.cc +111 -0
  354. data/lib/libv8/v8/src/regexp-stack.h +147 -0
  355. data/lib/libv8/v8/src/regexp.js +483 -0
  356. data/lib/libv8/v8/src/rewriter.cc +360 -0
  357. data/lib/libv8/v8/src/rewriter.h +50 -0
  358. data/lib/libv8/v8/src/runtime-profiler.cc +489 -0
  359. data/lib/libv8/v8/src/runtime-profiler.h +201 -0
  360. data/lib/libv8/v8/src/runtime.cc +12227 -0
  361. data/lib/libv8/v8/src/runtime.h +652 -0
  362. data/lib/libv8/v8/src/runtime.js +649 -0
  363. data/lib/libv8/v8/src/safepoint-table.cc +256 -0
  364. data/lib/libv8/v8/src/safepoint-table.h +270 -0
  365. data/lib/libv8/v8/src/scanner-base.cc +952 -0
  366. data/lib/libv8/v8/src/scanner-base.h +670 -0
  367. data/lib/libv8/v8/src/scanner.cc +345 -0
  368. data/lib/libv8/v8/src/scanner.h +146 -0
  369. data/lib/libv8/v8/src/scopeinfo.cc +646 -0
  370. data/lib/libv8/v8/src/scopeinfo.h +254 -0
  371. data/lib/libv8/v8/src/scopes.cc +1150 -0
  372. data/lib/libv8/v8/src/scopes.h +507 -0
  373. data/lib/libv8/v8/src/serialize.cc +1574 -0
  374. data/lib/libv8/v8/src/serialize.h +589 -0
  375. data/lib/libv8/v8/src/shell.h +55 -0
  376. data/lib/libv8/v8/src/simulator.h +43 -0
  377. data/lib/libv8/v8/src/small-pointer-list.h +163 -0
  378. data/lib/libv8/v8/src/smart-pointer.h +109 -0
  379. data/lib/libv8/v8/src/snapshot-common.cc +83 -0
  380. data/lib/libv8/v8/src/snapshot-empty.cc +54 -0
  381. data/lib/libv8/v8/src/snapshot.h +91 -0
  382. data/lib/libv8/v8/src/spaces-inl.h +529 -0
  383. data/lib/libv8/v8/src/spaces.cc +3145 -0
  384. data/lib/libv8/v8/src/spaces.h +2369 -0
  385. data/lib/libv8/v8/src/splay-tree-inl.h +310 -0
  386. data/lib/libv8/v8/src/splay-tree.h +205 -0
  387. data/lib/libv8/v8/src/string-search.cc +41 -0
  388. data/lib/libv8/v8/src/string-search.h +568 -0
  389. data/lib/libv8/v8/src/string-stream.cc +592 -0
  390. data/lib/libv8/v8/src/string-stream.h +191 -0
  391. data/lib/libv8/v8/src/string.js +994 -0
  392. data/lib/libv8/v8/src/strtod.cc +440 -0
  393. data/lib/libv8/v8/src/strtod.h +40 -0
  394. data/lib/libv8/v8/src/stub-cache.cc +1965 -0
  395. data/lib/libv8/v8/src/stub-cache.h +924 -0
  396. data/lib/libv8/v8/src/third_party/valgrind/valgrind.h +3925 -0
  397. data/lib/libv8/v8/src/token.cc +63 -0
  398. data/lib/libv8/v8/src/token.h +288 -0
  399. data/lib/libv8/v8/src/type-info.cc +507 -0
  400. data/lib/libv8/v8/src/type-info.h +272 -0
  401. data/lib/libv8/v8/src/unbound-queue-inl.h +95 -0
  402. data/lib/libv8/v8/src/unbound-queue.h +69 -0
  403. data/lib/libv8/v8/src/unicode-inl.h +238 -0
  404. data/lib/libv8/v8/src/unicode.cc +1624 -0
  405. data/lib/libv8/v8/src/unicode.h +280 -0
  406. data/lib/libv8/v8/src/uri.js +408 -0
  407. data/lib/libv8/v8/src/utils-inl.h +48 -0
  408. data/lib/libv8/v8/src/utils.cc +371 -0
  409. data/lib/libv8/v8/src/utils.h +800 -0
  410. data/lib/libv8/v8/src/v8-counters.cc +62 -0
  411. data/lib/libv8/v8/src/v8-counters.h +314 -0
  412. data/lib/libv8/v8/src/v8.cc +213 -0
  413. data/lib/libv8/v8/src/v8.h +131 -0
  414. data/lib/libv8/v8/src/v8checks.h +64 -0
  415. data/lib/libv8/v8/src/v8dll-main.cc +44 -0
  416. data/lib/libv8/v8/src/v8globals.h +512 -0
  417. data/lib/libv8/v8/src/v8memory.h +82 -0
  418. data/lib/libv8/v8/src/v8natives.js +1310 -0
  419. data/lib/libv8/v8/src/v8preparserdll-main.cc +39 -0
  420. data/lib/libv8/v8/src/v8threads.cc +464 -0
  421. data/lib/libv8/v8/src/v8threads.h +165 -0
  422. data/lib/libv8/v8/src/v8utils.h +319 -0
  423. data/lib/libv8/v8/src/variables.cc +114 -0
  424. data/lib/libv8/v8/src/variables.h +167 -0
  425. data/lib/libv8/v8/src/version.cc +116 -0
  426. data/lib/libv8/v8/src/version.h +68 -0
  427. data/lib/libv8/v8/src/vm-state-inl.h +138 -0
  428. data/lib/libv8/v8/src/vm-state.h +71 -0
  429. data/lib/libv8/v8/src/win32-headers.h +96 -0
  430. data/lib/libv8/v8/src/x64/assembler-x64-inl.h +462 -0
  431. data/lib/libv8/v8/src/x64/assembler-x64.cc +3027 -0
  432. data/lib/libv8/v8/src/x64/assembler-x64.h +1633 -0
  433. data/lib/libv8/v8/src/x64/builtins-x64.cc +1520 -0
  434. data/lib/libv8/v8/src/x64/code-stubs-x64.cc +5132 -0
  435. data/lib/libv8/v8/src/x64/code-stubs-x64.h +514 -0
  436. data/lib/libv8/v8/src/x64/codegen-x64.cc +146 -0
  437. data/lib/libv8/v8/src/x64/codegen-x64.h +76 -0
  438. data/lib/libv8/v8/src/x64/cpu-x64.cc +88 -0
  439. data/lib/libv8/v8/src/x64/debug-x64.cc +319 -0
  440. data/lib/libv8/v8/src/x64/deoptimizer-x64.cc +815 -0
  441. data/lib/libv8/v8/src/x64/disasm-x64.cc +1832 -0
  442. data/lib/libv8/v8/src/x64/frames-x64.cc +45 -0
  443. data/lib/libv8/v8/src/x64/frames-x64.h +130 -0
  444. data/lib/libv8/v8/src/x64/full-codegen-x64.cc +4318 -0
  445. data/lib/libv8/v8/src/x64/ic-x64.cc +1608 -0
  446. data/lib/libv8/v8/src/x64/lithium-codegen-x64.cc +4267 -0
  447. data/lib/libv8/v8/src/x64/lithium-codegen-x64.h +367 -0
  448. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.cc +320 -0
  449. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.h +74 -0
  450. data/lib/libv8/v8/src/x64/lithium-x64.cc +2202 -0
  451. data/lib/libv8/v8/src/x64/lithium-x64.h +2333 -0
  452. data/lib/libv8/v8/src/x64/macro-assembler-x64.cc +3745 -0
  453. data/lib/libv8/v8/src/x64/macro-assembler-x64.h +1290 -0
  454. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.cc +1398 -0
  455. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.h +282 -0
  456. data/lib/libv8/v8/src/x64/simulator-x64.cc +27 -0
  457. data/lib/libv8/v8/src/x64/simulator-x64.h +72 -0
  458. data/lib/libv8/v8/src/x64/stub-cache-x64.cc +3610 -0
  459. data/lib/libv8/v8/src/zone-inl.h +140 -0
  460. data/lib/libv8/v8/src/zone.cc +196 -0
  461. data/lib/libv8/v8/src/zone.h +240 -0
  462. data/lib/libv8/v8/tools/codemap.js +265 -0
  463. data/lib/libv8/v8/tools/consarray.js +93 -0
  464. data/lib/libv8/v8/tools/csvparser.js +78 -0
  465. data/lib/libv8/v8/tools/disasm.py +92 -0
  466. data/lib/libv8/v8/tools/freebsd-tick-processor +10 -0
  467. data/lib/libv8/v8/tools/gc-nvp-trace-processor.py +342 -0
  468. data/lib/libv8/v8/tools/gcmole/README +62 -0
  469. data/lib/libv8/v8/tools/gcmole/gccause.lua +60 -0
  470. data/lib/libv8/v8/tools/gcmole/gcmole.cc +1261 -0
  471. data/lib/libv8/v8/tools/gcmole/gcmole.lua +378 -0
  472. data/lib/libv8/v8/tools/generate-ten-powers.scm +286 -0
  473. data/lib/libv8/v8/tools/grokdump.py +841 -0
  474. data/lib/libv8/v8/tools/gyp/v8.gyp +995 -0
  475. data/lib/libv8/v8/tools/js2c.py +364 -0
  476. data/lib/libv8/v8/tools/jsmin.py +280 -0
  477. data/lib/libv8/v8/tools/linux-tick-processor +35 -0
  478. data/lib/libv8/v8/tools/ll_prof.py +942 -0
  479. data/lib/libv8/v8/tools/logreader.js +185 -0
  480. data/lib/libv8/v8/tools/mac-nm +18 -0
  481. data/lib/libv8/v8/tools/mac-tick-processor +6 -0
  482. data/lib/libv8/v8/tools/oom_dump/README +31 -0
  483. data/lib/libv8/v8/tools/oom_dump/SConstruct +42 -0
  484. data/lib/libv8/v8/tools/oom_dump/oom_dump.cc +288 -0
  485. data/lib/libv8/v8/tools/presubmit.py +305 -0
  486. data/lib/libv8/v8/tools/process-heap-prof.py +120 -0
  487. data/lib/libv8/v8/tools/profile.js +751 -0
  488. data/lib/libv8/v8/tools/profile_view.js +219 -0
  489. data/lib/libv8/v8/tools/run-valgrind.py +77 -0
  490. data/lib/libv8/v8/tools/splaytree.js +316 -0
  491. data/lib/libv8/v8/tools/stats-viewer.py +468 -0
  492. data/lib/libv8/v8/tools/test.py +1510 -0
  493. data/lib/libv8/v8/tools/tickprocessor-driver.js +59 -0
  494. data/lib/libv8/v8/tools/tickprocessor.js +877 -0
  495. data/lib/libv8/v8/tools/utils.py +96 -0
  496. data/lib/libv8/v8/tools/visual_studio/README.txt +12 -0
  497. data/lib/libv8/v8/tools/windows-tick-processor.bat +30 -0
  498. data/lib/libv8/version.rb +5 -0
  499. data/libv8.gemspec +36 -0
  500. metadata +578 -0
@@ -0,0 +1,312 @@
1
+ // Copyright (c) 1994-2006 Sun Microsystems Inc.
2
+ // All Rights Reserved.
3
+ //
4
+ // Redistribution and use in source and binary forms, with or without
5
+ // modification, are permitted provided that the following conditions are
6
+ // met:
7
+ //
8
+ // - Redistributions of source code must retain the above copyright notice,
9
+ // this list of conditions and the following disclaimer.
10
+ //
11
+ // - Redistribution in binary form must reproduce the above copyright
12
+ // notice, this list of conditions and the following disclaimer in the
13
+ // documentation and/or other materials provided with the distribution.
14
+ //
15
+ // - Neither the name of Sun Microsystems or the names of contributors may
16
+ // be used to endorse or promote products derived from this software without
17
+ // specific prior written permission.
18
+ //
19
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20
+ // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21
+ // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ // The original source code covered by the above license above has been
32
+ // modified significantly by Google Inc.
33
+ // Copyright 2011 the V8 project authors. All rights reserved.
34
+
35
+
36
+ #ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
37
+ #define V8_MIPS_ASSEMBLER_MIPS_INL_H_
38
+
39
+ #include "mips/assembler-mips.h"
40
+ #include "cpu.h"
41
+ #include "debug.h"
42
+
43
+
44
+ namespace v8 {
45
+ namespace internal {
46
+
47
+ // -----------------------------------------------------------------------------
48
+ // Operand and MemOperand.
49
+
50
+ Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
51
+ rm_ = no_reg;
52
+ imm32_ = immediate;
53
+ rmode_ = rmode;
54
+ }
55
+
56
+
57
+ Operand::Operand(const ExternalReference& f) {
58
+ rm_ = no_reg;
59
+ imm32_ = reinterpret_cast<int32_t>(f.address());
60
+ rmode_ = RelocInfo::EXTERNAL_REFERENCE;
61
+ }
62
+
63
+
64
+ Operand::Operand(Smi* value) {
65
+ rm_ = no_reg;
66
+ imm32_ = reinterpret_cast<intptr_t>(value);
67
+ rmode_ = RelocInfo::NONE;
68
+ }
69
+
70
+
71
+ Operand::Operand(Register rm) {
72
+ rm_ = rm;
73
+ }
74
+
75
+
76
+ bool Operand::is_reg() const {
77
+ return rm_.is_valid();
78
+ }
79
+
80
+
81
+
82
+ // -----------------------------------------------------------------------------
83
+ // RelocInfo.
84
+
85
+ void RelocInfo::apply(intptr_t delta) {
86
+ // On MIPS we do not use pc relative addressing, so we don't need to patch the
87
+ // code here.
88
+ }
89
+
90
+
91
+ Address RelocInfo::target_address() {
92
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
93
+ return Assembler::target_address_at(pc_);
94
+ }
95
+
96
+
97
+ Address RelocInfo::target_address_address() {
98
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
99
+ return reinterpret_cast<Address>(pc_);
100
+ }
101
+
102
+
103
+ int RelocInfo::target_address_size() {
104
+ return Assembler::kExternalTargetSize;
105
+ }
106
+
107
+
108
+ void RelocInfo::set_target_address(Address target) {
109
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
110
+ Assembler::set_target_address_at(pc_, target);
111
+ }
112
+
113
+
114
+ Object* RelocInfo::target_object() {
115
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
116
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
117
+ }
118
+
119
+
120
+ Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
121
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
122
+ return Handle<Object>(reinterpret_cast<Object**>(
123
+ Assembler::target_address_at(pc_)));
124
+ }
125
+
126
+
127
+ Object** RelocInfo::target_object_address() {
128
+ // Provide a "natural pointer" to the embedded object,
129
+ // which can be de-referenced during heap iteration.
130
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
131
+ reconstructed_obj_ptr_ =
132
+ reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
133
+ return &reconstructed_obj_ptr_;
134
+ }
135
+
136
+
137
+ void RelocInfo::set_target_object(Object* target) {
138
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
139
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
140
+ }
141
+
142
+
143
+ Address* RelocInfo::target_reference_address() {
144
+ ASSERT(rmode_ == EXTERNAL_REFERENCE);
145
+ reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
146
+ return &reconstructed_adr_ptr_;
147
+ }
148
+
149
+
150
+ Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
151
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
152
+ Address address = Memory::Address_at(pc_);
153
+ return Handle<JSGlobalPropertyCell>(
154
+ reinterpret_cast<JSGlobalPropertyCell**>(address));
155
+ }
156
+
157
+
158
+ JSGlobalPropertyCell* RelocInfo::target_cell() {
159
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
160
+ Address address = Memory::Address_at(pc_);
161
+ Object* object = HeapObject::FromAddress(
162
+ address - JSGlobalPropertyCell::kValueOffset);
163
+ return reinterpret_cast<JSGlobalPropertyCell*>(object);
164
+ }
165
+
166
+
167
+ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
168
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
169
+ Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
170
+ Memory::Address_at(pc_) = address;
171
+ }
172
+
173
+
174
+ Address RelocInfo::call_address() {
175
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
176
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
177
+ // The pc_ offset of 0 assumes mips patched return sequence per
178
+ // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
179
+ // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
180
+ return Assembler::target_address_at(pc_);
181
+ }
182
+
183
+
184
+ void RelocInfo::set_call_address(Address target) {
185
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
186
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
187
+ // The pc_ offset of 0 assumes mips patched return sequence per
188
+ // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
189
+ // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
190
+ Assembler::set_target_address_at(pc_, target);
191
+ }
192
+
193
+
194
+ Object* RelocInfo::call_object() {
195
+ return *call_object_address();
196
+ }
197
+
198
+
199
+ Object** RelocInfo::call_object_address() {
200
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
201
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
202
+ return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
203
+ }
204
+
205
+
206
+ void RelocInfo::set_call_object(Object* target) {
207
+ *call_object_address() = target;
208
+ }
209
+
210
+
211
+ bool RelocInfo::IsPatchedReturnSequence() {
212
+ Instr instr0 = Assembler::instr_at(pc_);
213
+ Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
214
+ Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
215
+ bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
216
+ (instr1 & kOpcodeMask) == ORI &&
217
+ (instr2 & kOpcodeMask) == SPECIAL &&
218
+ (instr2 & kFunctionFieldMask) == JALR);
219
+ return patched_return;
220
+ }
221
+
222
+
223
+ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
224
+ Instr current_instr = Assembler::instr_at(pc_);
225
+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
226
+ }
227
+
228
+
229
+ void RelocInfo::Visit(ObjectVisitor* visitor) {
230
+ RelocInfo::Mode mode = rmode();
231
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
232
+ Object** p = target_object_address();
233
+ Object* orig = *p;
234
+ visitor->VisitPointer(p);
235
+ if (*p != orig) {
236
+ set_target_object(*p);
237
+ }
238
+ } else if (RelocInfo::IsCodeTarget(mode)) {
239
+ visitor->VisitCodeTarget(this);
240
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
241
+ visitor->VisitGlobalPropertyCell(this);
242
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
243
+ visitor->VisitExternalReference(target_reference_address());
244
+ #ifdef ENABLE_DEBUGGER_SUPPORT
245
+ // TODO(isolates): Get a cached isolate below.
246
+ } else if (((RelocInfo::IsJSReturn(mode) &&
247
+ IsPatchedReturnSequence()) ||
248
+ (RelocInfo::IsDebugBreakSlot(mode) &&
249
+ IsPatchedDebugBreakSlotSequence())) &&
250
+ Isolate::Current()->debug()->has_break_points()) {
251
+ visitor->VisitDebugTarget(this);
252
+ #endif
253
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
254
+ visitor->VisitRuntimeEntry(this);
255
+ }
256
+ }
257
+
258
+
259
+ template<typename StaticVisitor>
260
+ void RelocInfo::Visit(Heap* heap) {
261
+ RelocInfo::Mode mode = rmode();
262
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
263
+ StaticVisitor::VisitPointer(heap, target_object_address());
264
+ } else if (RelocInfo::IsCodeTarget(mode)) {
265
+ StaticVisitor::VisitCodeTarget(heap, this);
266
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
267
+ StaticVisitor::VisitGlobalPropertyCell(heap, this);
268
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
269
+ StaticVisitor::VisitExternalReference(target_reference_address());
270
+ #ifdef ENABLE_DEBUGGER_SUPPORT
271
+ } else if (heap->isolate()->debug()->has_break_points() &&
272
+ ((RelocInfo::IsJSReturn(mode) &&
273
+ IsPatchedReturnSequence()) ||
274
+ (RelocInfo::IsDebugBreakSlot(mode) &&
275
+ IsPatchedDebugBreakSlotSequence()))) {
276
+ StaticVisitor::VisitDebugTarget(heap, this);
277
+ #endif
278
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
279
+ StaticVisitor::VisitRuntimeEntry(this);
280
+ }
281
+ }
282
+
283
+
284
+ // -----------------------------------------------------------------------------
285
+ // Assembler.
286
+
287
+
288
+ void Assembler::CheckBuffer() {
289
+ if (buffer_space() <= kGap) {
290
+ GrowBuffer();
291
+ }
292
+ }
293
+
294
+
295
+ void Assembler::CheckTrampolinePoolQuick() {
296
+ if (pc_offset() >= next_buffer_check_) {
297
+ CheckTrampolinePool();
298
+ }
299
+ }
300
+
301
+
302
+ void Assembler::emit(Instr x) {
303
+ CheckBuffer();
304
+ *reinterpret_cast<Instr*>(pc_) = x;
305
+ pc_ += kInstrSize;
306
+ CheckTrampolinePoolQuick();
307
+ }
308
+
309
+
310
+ } } // namespace v8::internal
311
+
312
+ #endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
@@ -0,0 +1,1960 @@
1
+ // Copyright (c) 1994-2006 Sun Microsystems Inc.
2
+ // All Rights Reserved.
3
+ //
4
+ // Redistribution and use in source and binary forms, with or without
5
+ // modification, are permitted provided that the following conditions are
6
+ // met:
7
+ //
8
+ // - Redistributions of source code must retain the above copyright notice,
9
+ // this list of conditions and the following disclaimer.
10
+ //
11
+ // - Redistribution in binary form must reproduce the above copyright
12
+ // notice, this list of conditions and the following disclaimer in the
13
+ // documentation and/or other materials provided with the distribution.
14
+ //
15
+ // - Neither the name of Sun Microsystems or the names of contributors may
16
+ // be used to endorse or promote products derived from this software without
17
+ // specific prior written permission.
18
+ //
19
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20
+ // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21
+ // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ // The original source code covered by the above license above has been
32
+ // modified significantly by Google Inc.
33
+ // Copyright 2011 the V8 project authors. All rights reserved.
34
+
35
+
36
+ #include "v8.h"
37
+
38
+ #if defined(V8_TARGET_ARCH_MIPS)
39
+
40
+ #include "mips/assembler-mips-inl.h"
41
+ #include "serialize.h"
42
+
43
+ namespace v8 {
44
+ namespace internal {
45
+
46
+ #ifdef DEBUG
47
+ bool CpuFeatures::initialized_ = false;
48
+ #endif
49
+ unsigned CpuFeatures::supported_ = 0;
50
+ unsigned CpuFeatures::found_by_runtime_probing_ = 0;
51
+
52
+ void CpuFeatures::Probe() {
53
+ ASSERT(!initialized_);
54
+ #ifdef DEBUG
55
+ initialized_ = true;
56
+ #endif
57
+ // If the compiler is allowed to use fpu then we can use fpu too in our
58
+ // code generation.
59
+ #if !defined(__mips__)
60
+ // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
61
+ if (FLAG_enable_fpu) {
62
+ supported_ |= 1u << FPU;
63
+ }
64
+ #else
65
+ if (Serializer::enabled()) {
66
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
67
+ return; // No features if we might serialize.
68
+ }
69
+
70
+ if (OS::MipsCpuHasFeature(FPU)) {
71
+ // This implementation also sets the FPU flags if
72
+ // runtime detection of FPU returns true.
73
+ supported_ |= 1u << FPU;
74
+ found_by_runtime_probing_ |= 1u << FPU;
75
+ }
76
+ #endif
77
+ }
78
+
79
+
80
+ int ToNumber(Register reg) {
81
+ ASSERT(reg.is_valid());
82
+ const int kNumbers[] = {
83
+ 0, // zero_reg
84
+ 1, // at
85
+ 2, // v0
86
+ 3, // v1
87
+ 4, // a0
88
+ 5, // a1
89
+ 6, // a2
90
+ 7, // a3
91
+ 8, // t0
92
+ 9, // t1
93
+ 10, // t2
94
+ 11, // t3
95
+ 12, // t4
96
+ 13, // t5
97
+ 14, // t6
98
+ 15, // t7
99
+ 16, // s0
100
+ 17, // s1
101
+ 18, // s2
102
+ 19, // s3
103
+ 20, // s4
104
+ 21, // s5
105
+ 22, // s6
106
+ 23, // s7
107
+ 24, // t8
108
+ 25, // t9
109
+ 26, // k0
110
+ 27, // k1
111
+ 28, // gp
112
+ 29, // sp
113
+ 30, // s8_fp
114
+ 31, // ra
115
+ };
116
+ return kNumbers[reg.code()];
117
+ }
118
+
119
+
120
+ Register ToRegister(int num) {
121
+ ASSERT(num >= 0 && num < kNumRegisters);
122
+ const Register kRegisters[] = {
123
+ zero_reg,
124
+ at,
125
+ v0, v1,
126
+ a0, a1, a2, a3,
127
+ t0, t1, t2, t3, t4, t5, t6, t7,
128
+ s0, s1, s2, s3, s4, s5, s6, s7,
129
+ t8, t9,
130
+ k0, k1,
131
+ gp,
132
+ sp,
133
+ s8_fp,
134
+ ra
135
+ };
136
+ return kRegisters[num];
137
+ }
138
+
139
+
140
+ // -----------------------------------------------------------------------------
141
+ // Implementation of RelocInfo.
142
+
143
+ const int RelocInfo::kApplyMask = 0;
144
+
145
+
146
+ bool RelocInfo::IsCodedSpecially() {
147
+ // The deserializer needs to know whether a pointer is specially coded. Being
148
+ // specially coded on MIPS means that it is a lui/ori instruction, and that is
149
+ // always the case inside code objects.
150
+ return true;
151
+ }
152
+
153
+
154
+ // Patch the code at the current address with the supplied instructions.
155
+ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
156
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
157
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
158
+ for (int i = 0; i < instruction_count; i++) {
159
+ *(pc + i) = *(instr + i);
160
+ }
161
+
162
+ // Indicate that code has changed.
163
+ CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
164
+ }
165
+
166
+
167
+ // Patch the code at the current PC with a call to the target address.
168
+ // Additional guard instructions can be added if required.
169
+ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
170
+ // Patch the code at the current address with a call to the target.
171
+ UNIMPLEMENTED_MIPS();
172
+ }
173
+
174
+
175
+ // -----------------------------------------------------------------------------
176
+ // Implementation of Operand and MemOperand.
177
+ // See assembler-mips-inl.h for inlined constructors.
178
+
179
+ Operand::Operand(Handle<Object> handle) {
180
+ rm_ = no_reg;
181
+ // Verify all Objects referred by code are NOT in new space.
182
+ Object* obj = *handle;
183
+ ASSERT(!HEAP->InNewSpace(obj));
184
+ if (obj->IsHeapObject()) {
185
+ imm32_ = reinterpret_cast<intptr_t>(handle.location());
186
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
187
+ } else {
188
+ // No relocation needed.
189
+ imm32_ = reinterpret_cast<intptr_t>(obj);
190
+ rmode_ = RelocInfo::NONE;
191
+ }
192
+ }
193
+
194
+
195
+ MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
196
+ offset_ = offset;
197
+ }
198
+
199
+
200
+ // -----------------------------------------------------------------------------
201
+ // Specific instructions, constants, and masks.
202
+
203
+ static const int kNegOffset = 0x00008000;
204
+ // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
205
+ // operations as post-increment of sp.
206
+ const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
207
+ | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
208
+ // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
209
+ const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
210
+ | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
211
+ // sw(r, MemOperand(sp, 0))
212
+ const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
213
+ | (0 & kImm16Mask);
214
+ // lw(r, MemOperand(sp, 0))
215
+ const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
216
+ | (0 & kImm16Mask);
217
+
218
+ const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
219
+ | (0 & kImm16Mask);
220
+
221
+ const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
222
+ | (0 & kImm16Mask);
223
+
224
+ const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
225
+ | (kNegOffset & kImm16Mask);
226
+
227
+ const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
228
+ | (kNegOffset & kImm16Mask);
229
+ // A mask for the Rt register for push, pop, lw, sw instructions.
230
+ const Instr kRtMask = kRtFieldMask;
231
+ const Instr kLwSwInstrTypeMask = 0xffe00000;
232
+ const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
233
+ const Instr kLwSwOffsetMask = kImm16Mask;
234
+
235
+
236
+ // Spare buffer.
237
+ static const int kMinimalBufferSize = 4 * KB;
238
+
239
+
240
+ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
241
+ : AssemblerBase(arg_isolate),
242
+ positions_recorder_(this),
243
+ emit_debug_code_(FLAG_debug_code) {
244
+ if (buffer == NULL) {
245
+ // Do our own buffer management.
246
+ if (buffer_size <= kMinimalBufferSize) {
247
+ buffer_size = kMinimalBufferSize;
248
+
249
+ if (isolate()->assembler_spare_buffer() != NULL) {
250
+ buffer = isolate()->assembler_spare_buffer();
251
+ isolate()->set_assembler_spare_buffer(NULL);
252
+ }
253
+ }
254
+ if (buffer == NULL) {
255
+ buffer_ = NewArray<byte>(buffer_size);
256
+ } else {
257
+ buffer_ = static_cast<byte*>(buffer);
258
+ }
259
+ buffer_size_ = buffer_size;
260
+ own_buffer_ = true;
261
+
262
+ } else {
263
+ // Use externally provided buffer instead.
264
+ ASSERT(buffer_size > 0);
265
+ buffer_ = static_cast<byte*>(buffer);
266
+ buffer_size_ = buffer_size;
267
+ own_buffer_ = false;
268
+ }
269
+
270
+ // Setup buffer pointers.
271
+ ASSERT(buffer_ != NULL);
272
+ pc_ = buffer_;
273
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
274
+
275
+ last_trampoline_pool_end_ = 0;
276
+ no_trampoline_pool_before_ = 0;
277
+ trampoline_pool_blocked_nesting_ = 0;
278
+ next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
279
+ internal_trampoline_exception_ = false;
280
+ last_bound_pos_ = 0;
281
+
282
+ ast_id_for_reloc_info_ = kNoASTId;
283
+ }
284
+
285
+
286
+ Assembler::~Assembler() {
287
+ if (own_buffer_) {
288
+ if (isolate()->assembler_spare_buffer() == NULL &&
289
+ buffer_size_ == kMinimalBufferSize) {
290
+ isolate()->set_assembler_spare_buffer(buffer_);
291
+ } else {
292
+ DeleteArray(buffer_);
293
+ }
294
+ }
295
+ }
296
+
297
+
298
+ void Assembler::GetCode(CodeDesc* desc) {
299
+ ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
300
+ // Setup code descriptor.
301
+ desc->buffer = buffer_;
302
+ desc->buffer_size = buffer_size_;
303
+ desc->instr_size = pc_offset();
304
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
305
+ }
306
+
307
+
308
+ void Assembler::Align(int m) {
309
+ ASSERT(m >= 4 && IsPowerOf2(m));
310
+ while ((pc_offset() & (m - 1)) != 0) {
311
+ nop();
312
+ }
313
+ }
314
+
315
+
316
+ void Assembler::CodeTargetAlign() {
317
+ // No advantage to aligning branch/call targets to more than
318
+ // single instruction, that I am aware of.
319
+ Align(4);
320
+ }
321
+
322
+
323
+ Register Assembler::GetRtReg(Instr instr) {
324
+ Register rt;
325
+ rt.code_ = (instr & kRtFieldMask) >> kRtShift;
326
+ return rt;
327
+ }
328
+
329
+
330
+ Register Assembler::GetRsReg(Instr instr) {
331
+ Register rs;
332
+ rs.code_ = (instr & kRsFieldMask) >> kRsShift;
333
+ return rs;
334
+ }
335
+
336
+
337
+ Register Assembler::GetRdReg(Instr instr) {
338
+ Register rd;
339
+ rd.code_ = (instr & kRdFieldMask) >> kRdShift;
340
+ return rd;
341
+ }
342
+
343
+
344
+ uint32_t Assembler::GetRt(Instr instr) {
345
+ return (instr & kRtFieldMask) >> kRtShift;
346
+ }
347
+
348
+
349
+ uint32_t Assembler::GetRtField(Instr instr) {
350
+ return instr & kRtFieldMask;
351
+ }
352
+
353
+
354
+ uint32_t Assembler::GetRs(Instr instr) {
355
+ return (instr & kRsFieldMask) >> kRsShift;
356
+ }
357
+
358
+
359
+ uint32_t Assembler::GetRsField(Instr instr) {
360
+ return instr & kRsFieldMask;
361
+ }
362
+
363
+
364
+ uint32_t Assembler::GetRd(Instr instr) {
365
+ return (instr & kRdFieldMask) >> kRdShift;
366
+ }
367
+
368
+
369
+ uint32_t Assembler::GetRdField(Instr instr) {
370
+ return instr & kRdFieldMask;
371
+ }
372
+
373
+
374
+ uint32_t Assembler::GetSa(Instr instr) {
375
+ return (instr & kSaFieldMask) >> kSaShift;
376
+ }
377
+
378
+
379
+ uint32_t Assembler::GetSaField(Instr instr) {
380
+ return instr & kSaFieldMask;
381
+ }
382
+
383
+
384
+ uint32_t Assembler::GetOpcodeField(Instr instr) {
385
+ return instr & kOpcodeMask;
386
+ }
387
+
388
+
389
+ uint32_t Assembler::GetImmediate16(Instr instr) {
390
+ return instr & kImm16Mask;
391
+ }
392
+
393
+
394
+ uint32_t Assembler::GetLabelConst(Instr instr) {
395
+ return instr & ~kImm16Mask;
396
+ }
397
+
398
+
399
+ bool Assembler::IsPop(Instr instr) {
400
+ return (instr & ~kRtMask) == kPopRegPattern;
401
+ }
402
+
403
+
404
+ bool Assembler::IsPush(Instr instr) {
405
+ return (instr & ~kRtMask) == kPushRegPattern;
406
+ }
407
+
408
+
409
+ bool Assembler::IsSwRegFpOffset(Instr instr) {
410
+ return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
411
+ }
412
+
413
+
414
+ bool Assembler::IsLwRegFpOffset(Instr instr) {
415
+ return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
416
+ }
417
+
418
+
419
+ bool Assembler::IsSwRegFpNegOffset(Instr instr) {
420
+ return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
421
+ kSwRegFpNegOffsetPattern);
422
+ }
423
+
424
+
425
+ bool Assembler::IsLwRegFpNegOffset(Instr instr) {
426
+ return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
427
+ kLwRegFpNegOffsetPattern);
428
+ }
429
+
430
+
431
+ // Labels refer to positions in the (to be) generated code.
432
+ // There are bound, linked, and unused labels.
433
+ //
434
+ // Bound labels refer to known positions in the already
435
+ // generated code. pos() is the position the label refers to.
436
+ //
437
+ // Linked labels refer to unknown positions in the code
438
+ // to be generated; pos() is the position of the last
439
+ // instruction using the label.
440
+
441
+ // The link chain is terminated by a value in the instruction of -1,
442
+ // which is an otherwise illegal value (branch -1 is inf loop).
443
+ // The instruction 16-bit offset field addresses 32-bit words, but in
444
+ // code is conv to an 18-bit value addressing bytes, hence the -4 value.
445
+
446
+ const int kEndOfChain = -4;
447
+
448
+
449
+ bool Assembler::IsBranch(Instr instr) {
450
+ uint32_t opcode = GetOpcodeField(instr);
451
+ uint32_t rt_field = GetRtField(instr);
452
+ uint32_t rs_field = GetRsField(instr);
453
+ uint32_t label_constant = GetLabelConst(instr);
454
+ // Checks if the instruction is a branch.
455
+ return opcode == BEQ ||
456
+ opcode == BNE ||
457
+ opcode == BLEZ ||
458
+ opcode == BGTZ ||
459
+ opcode == BEQL ||
460
+ opcode == BNEL ||
461
+ opcode == BLEZL ||
462
+ opcode == BGTZL ||
463
+ (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
464
+ rt_field == BLTZAL || rt_field == BGEZAL)) ||
465
+ (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
466
+ label_constant == 0; // Emitted label const in reg-exp engine.
467
+ }
468
+
469
+
470
+ bool Assembler::IsBeq(Instr instr) {
471
+ return GetOpcodeField(instr) == BEQ;
472
+ }
473
+
474
+
475
+ bool Assembler::IsBne(Instr instr) {
476
+ return GetOpcodeField(instr) == BNE;
477
+ }
478
+
479
+
480
+ bool Assembler::IsNop(Instr instr, unsigned int type) {
481
+ // See Assembler::nop(type).
482
+ ASSERT(type < 32);
483
+ uint32_t opcode = GetOpcodeField(instr);
484
+ uint32_t rt = GetRt(instr);
485
+ uint32_t rs = GetRs(instr);
486
+ uint32_t sa = GetSa(instr);
487
+
488
+ // nop(type) == sll(zero_reg, zero_reg, type);
489
+ // Technically all these values will be 0 but
490
+ // this makes more sense to the reader.
491
+
492
+ bool ret = (opcode == SLL &&
493
+ rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
494
+ rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
495
+ sa == type);
496
+
497
+ return ret;
498
+ }
499
+
500
+
501
+ int32_t Assembler::GetBranchOffset(Instr instr) {
502
+ ASSERT(IsBranch(instr));
503
+ return ((int16_t)(instr & kImm16Mask)) << 2;
504
+ }
505
+
506
+
507
+ bool Assembler::IsLw(Instr instr) {
508
+ return ((instr & kOpcodeMask) == LW);
509
+ }
510
+
511
+
512
+ int16_t Assembler::GetLwOffset(Instr instr) {
513
+ ASSERT(IsLw(instr));
514
+ return ((instr & kImm16Mask));
515
+ }
516
+
517
+
518
+ Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
519
+ ASSERT(IsLw(instr));
520
+
521
+ // We actually create a new lw instruction based on the original one.
522
+ Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
523
+ | (offset & kImm16Mask);
524
+
525
+ return temp_instr;
526
+ }
527
+
528
+
529
+ bool Assembler::IsSw(Instr instr) {
530
+ return ((instr & kOpcodeMask) == SW);
531
+ }
532
+
533
+
534
+ Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
535
+ ASSERT(IsSw(instr));
536
+ return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
537
+ }
538
+
539
+
540
+ bool Assembler::IsAddImmediate(Instr instr) {
541
+ return ((instr & kOpcodeMask) == ADDIU);
542
+ }
543
+
544
+
545
+ Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
546
+ ASSERT(IsAddImmediate(instr));
547
+ return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
548
+ }
549
+
550
+
551
+ bool Assembler::IsAndImmediate(Instr instr) {
552
+ return GetOpcodeField(instr) == ANDI;
553
+ }
554
+
555
+
556
+ int Assembler::target_at(int32_t pos) {
557
+ Instr instr = instr_at(pos);
558
+ if ((instr & ~kImm16Mask) == 0) {
559
+ // Emitted label constant, not part of a branch.
560
+ if (instr == 0) {
561
+ return kEndOfChain;
562
+ } else {
563
+ int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
564
+ return (imm18 + pos);
565
+ }
566
+ }
567
+ // Check we have a branch instruction.
568
+ ASSERT(IsBranch(instr));
569
+ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
570
+ // the compiler uses arithmectic shifts for signed integers.
571
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
572
+
573
+ if (imm18 == kEndOfChain) {
574
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
575
+ return kEndOfChain;
576
+ } else {
577
+ return pos + kBranchPCOffset + imm18;
578
+ }
579
+ }
580
+
581
+
582
+ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
583
+ Instr instr = instr_at(pos);
584
+ if ((instr & ~kImm16Mask) == 0) {
585
+ ASSERT(target_pos == kEndOfChain || target_pos >= 0);
586
+ // Emitted label constant, not part of a branch.
587
+ // Make label relative to Code* of generated Code object.
588
+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
589
+ return;
590
+ }
591
+
592
+ ASSERT(IsBranch(instr));
593
+ int32_t imm18 = target_pos - (pos + kBranchPCOffset);
594
+ ASSERT((imm18 & 3) == 0);
595
+
596
+ instr &= ~kImm16Mask;
597
+ int32_t imm16 = imm18 >> 2;
598
+ ASSERT(is_int16(imm16));
599
+
600
+ instr_at_put(pos, instr | (imm16 & kImm16Mask));
601
+ }
602
+
603
+
604
+ void Assembler::print(Label* L) {
605
+ if (L->is_unused()) {
606
+ PrintF("unused label\n");
607
+ } else if (L->is_bound()) {
608
+ PrintF("bound label to %d\n", L->pos());
609
+ } else if (L->is_linked()) {
610
+ Label l = *L;
611
+ PrintF("unbound label");
612
+ while (l.is_linked()) {
613
+ PrintF("@ %d ", l.pos());
614
+ Instr instr = instr_at(l.pos());
615
+ if ((instr & ~kImm16Mask) == 0) {
616
+ PrintF("value\n");
617
+ } else {
618
+ PrintF("%d\n", instr);
619
+ }
620
+ next(&l);
621
+ }
622
+ } else {
623
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
624
+ }
625
+ }
626
+
627
+
628
+ void Assembler::bind_to(Label* L, int pos) {
629
+ ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
630
+ while (L->is_linked()) {
631
+ int32_t fixup_pos = L->pos();
632
+ int32_t dist = pos - fixup_pos;
633
+ next(L); // Call next before overwriting link with target at fixup_pos.
634
+ if (dist > kMaxBranchOffset) {
635
+ do {
636
+ int32_t trampoline_pos = get_trampoline_entry(fixup_pos);
637
+ if (kInvalidSlotPos == trampoline_pos) {
638
+ // Internal error.
639
+ return;
640
+ }
641
+ ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
642
+ target_at_put(fixup_pos, trampoline_pos);
643
+ fixup_pos = trampoline_pos;
644
+ dist = pos - fixup_pos;
645
+ } while (dist > kMaxBranchOffset);
646
+ } else if (dist < -kMaxBranchOffset) {
647
+ do {
648
+ int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false);
649
+ if (kInvalidSlotPos == trampoline_pos) {
650
+ // Internal error.
651
+ return;
652
+ }
653
+ ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset);
654
+ target_at_put(fixup_pos, trampoline_pos);
655
+ fixup_pos = trampoline_pos;
656
+ dist = pos - fixup_pos;
657
+ } while (dist < -kMaxBranchOffset);
658
+ };
659
+ target_at_put(fixup_pos, pos);
660
+ }
661
+ L->bind_to(pos);
662
+
663
+ // Keep track of the last bound label so we don't eliminate any instructions
664
+ // before a bound label.
665
+ if (pos > last_bound_pos_)
666
+ last_bound_pos_ = pos;
667
+ }
668
+
669
+
670
+ void Assembler::link_to(Label* L, Label* appendix) {
671
+ if (appendix->is_linked()) {
672
+ if (L->is_linked()) {
673
+ // Append appendix to L's list.
674
+ int fixup_pos;
675
+ int link = L->pos();
676
+ do {
677
+ fixup_pos = link;
678
+ link = target_at(fixup_pos);
679
+ } while (link > 0);
680
+ ASSERT(link == kEndOfChain);
681
+ target_at_put(fixup_pos, appendix->pos());
682
+ } else {
683
+ // L is empty, simply use appendix.
684
+ *L = *appendix;
685
+ }
686
+ }
687
+ appendix->Unuse(); // Appendix should not be used anymore.
688
+ }
689
+
690
+
691
+ void Assembler::bind(Label* L) {
692
+ ASSERT(!L->is_bound()); // Label can only be bound once.
693
+ bind_to(L, pc_offset());
694
+ }
695
+
696
+
697
+ void Assembler::next(Label* L) {
698
+ ASSERT(L->is_linked());
699
+ int link = target_at(L->pos());
700
+ ASSERT(link > 0 || link == kEndOfChain);
701
+ if (link == kEndOfChain) {
702
+ L->Unuse();
703
+ } else if (link > 0) {
704
+ L->link_to(link);
705
+ }
706
+ }
707
+
708
+
709
+ // We have to use a temporary register for things that can be relocated even
710
+ // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
711
+ // space. There is no guarantee that the relocated location can be similarly
712
+ // encoded.
713
+ bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
714
+ return rmode != RelocInfo::NONE;
715
+ }
716
+
717
+
718
+ void Assembler::GenInstrRegister(Opcode opcode,
719
+ Register rs,
720
+ Register rt,
721
+ Register rd,
722
+ uint16_t sa,
723
+ SecondaryField func) {
724
+ ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
725
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
726
+ | (rd.code() << kRdShift) | (sa << kSaShift) | func;
727
+ emit(instr);
728
+ }
729
+
730
+
731
+ void Assembler::GenInstrRegister(Opcode opcode,
732
+ Register rs,
733
+ Register rt,
734
+ uint16_t msb,
735
+ uint16_t lsb,
736
+ SecondaryField func) {
737
+ ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
738
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
739
+ | (msb << kRdShift) | (lsb << kSaShift) | func;
740
+ emit(instr);
741
+ }
742
+
743
+
744
+ void Assembler::GenInstrRegister(Opcode opcode,
745
+ SecondaryField fmt,
746
+ FPURegister ft,
747
+ FPURegister fs,
748
+ FPURegister fd,
749
+ SecondaryField func) {
750
+ ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
751
+ ASSERT(CpuFeatures::IsEnabled(FPU));
752
+ Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
753
+ | (fd.code() << kFdShift) | func;
754
+ emit(instr);
755
+ }
756
+
757
+
758
+ void Assembler::GenInstrRegister(Opcode opcode,
759
+ SecondaryField fmt,
760
+ Register rt,
761
+ FPURegister fs,
762
+ FPURegister fd,
763
+ SecondaryField func) {
764
+ ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
765
+ ASSERT(CpuFeatures::IsEnabled(FPU));
766
+ Instr instr = opcode | fmt | (rt.code() << kRtShift)
767
+ | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
768
+ emit(instr);
769
+ }
770
+
771
+
772
+ void Assembler::GenInstrRegister(Opcode opcode,
773
+ SecondaryField fmt,
774
+ Register rt,
775
+ FPUControlRegister fs,
776
+ SecondaryField func) {
777
+ ASSERT(fs.is_valid() && rt.is_valid());
778
+ ASSERT(CpuFeatures::IsEnabled(FPU));
779
+ Instr instr =
780
+ opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
781
+ emit(instr);
782
+ }
783
+
784
+
785
+ // Instructions with immediate value.
786
+ // Registers are in the order of the instruction encoding, from left to right.
787
+ void Assembler::GenInstrImmediate(Opcode opcode,
788
+ Register rs,
789
+ Register rt,
790
+ int32_t j) {
791
+ ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
792
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
793
+ | (j & kImm16Mask);
794
+ emit(instr);
795
+ }
796
+
797
+
798
+ void Assembler::GenInstrImmediate(Opcode opcode,
799
+ Register rs,
800
+ SecondaryField SF,
801
+ int32_t j) {
802
+ ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
803
+ Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
804
+ emit(instr);
805
+ }
806
+
807
+
808
+ void Assembler::GenInstrImmediate(Opcode opcode,
809
+ Register rs,
810
+ FPURegister ft,
811
+ int32_t j) {
812
+ ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
813
+ ASSERT(CpuFeatures::IsEnabled(FPU));
814
+ Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
815
+ | (j & kImm16Mask);
816
+ emit(instr);
817
+ }
818
+
819
+
820
+ // Registers are in the order of the instruction encoding, from left to right.
821
+ void Assembler::GenInstrJump(Opcode opcode,
822
+ uint32_t address) {
823
+ BlockTrampolinePoolScope block_trampoline_pool(this);
824
+ ASSERT(is_uint26(address));
825
+ Instr instr = opcode | address;
826
+ emit(instr);
827
+ BlockTrampolinePoolFor(1); // For associated delay slot.
828
+ }
829
+
830
+
831
+ // Returns the next free label entry from the next trampoline pool.
832
+ int32_t Assembler::get_label_entry(int32_t pos, bool next_pool) {
833
+ int trampoline_count = trampolines_.length();
834
+ int32_t label_entry = 0;
835
+ ASSERT(trampoline_count > 0);
836
+
837
+ if (next_pool) {
838
+ for (int i = 0; i < trampoline_count; i++) {
839
+ if (trampolines_[i].start() > pos) {
840
+ label_entry = trampolines_[i].take_label();
841
+ break;
842
+ }
843
+ }
844
+ } else { // Caller needs a label entry from the previous pool.
845
+ for (int i = trampoline_count-1; i >= 0; i--) {
846
+ if (trampolines_[i].end() < pos) {
847
+ label_entry = trampolines_[i].take_label();
848
+ break;
849
+ }
850
+ }
851
+ }
852
+ return label_entry;
853
+ }
854
+
855
+
856
+ // Returns the next free trampoline entry from the next trampoline pool.
857
+ int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) {
858
+ int trampoline_count = trampolines_.length();
859
+ int32_t trampoline_entry = kInvalidSlotPos;
860
+ ASSERT(trampoline_count > 0);
861
+
862
+ if (!internal_trampoline_exception_) {
863
+ if (next_pool) {
864
+ for (int i = 0; i < trampoline_count; i++) {
865
+ if (trampolines_[i].start() > pos) {
866
+ trampoline_entry = trampolines_[i].take_slot();
867
+ break;
868
+ }
869
+ }
870
+ } else { // Caller needs a trampoline entry from the previous pool.
871
+ for (int i = trampoline_count-1; i >= 0; i--) {
872
+ if (trampolines_[i].end() < pos) {
873
+ trampoline_entry = trampolines_[i].take_slot();
874
+ break;
875
+ }
876
+ }
877
+ }
878
+ if (kInvalidSlotPos == trampoline_entry) {
879
+ internal_trampoline_exception_ = true;
880
+ }
881
+ }
882
+ return trampoline_entry;
883
+ }
884
+
885
+
886
+ int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
887
+ int32_t target_pos;
888
+ int32_t pc_offset_v = pc_offset();
889
+
890
+ if (L->is_bound()) {
891
+ target_pos = L->pos();
892
+ int32_t dist = pc_offset_v - target_pos;
893
+ if (dist > kMaxBranchOffset) {
894
+ do {
895
+ int32_t trampoline_pos = get_trampoline_entry(target_pos);
896
+ if (kInvalidSlotPos == trampoline_pos) {
897
+ // Internal error.
898
+ return 0;
899
+ }
900
+ ASSERT((trampoline_pos - target_pos) > 0);
901
+ ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset);
902
+ target_at_put(trampoline_pos, target_pos);
903
+ target_pos = trampoline_pos;
904
+ dist = pc_offset_v - target_pos;
905
+ } while (dist > kMaxBranchOffset);
906
+ } else if (dist < -kMaxBranchOffset) {
907
+ do {
908
+ int32_t trampoline_pos = get_trampoline_entry(target_pos, false);
909
+ if (kInvalidSlotPos == trampoline_pos) {
910
+ // Internal error.
911
+ return 0;
912
+ }
913
+ ASSERT((target_pos - trampoline_pos) > 0);
914
+ ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset);
915
+ target_at_put(trampoline_pos, target_pos);
916
+ target_pos = trampoline_pos;
917
+ dist = pc_offset_v - target_pos;
918
+ } while (dist < -kMaxBranchOffset);
919
+ }
920
+ } else {
921
+ if (L->is_linked()) {
922
+ target_pos = L->pos(); // L's link.
923
+ int32_t dist = pc_offset_v - target_pos;
924
+ if (dist > kMaxBranchOffset) {
925
+ do {
926
+ int32_t label_pos = get_label_entry(target_pos);
927
+ ASSERT((label_pos - target_pos) < kMaxBranchOffset);
928
+ label_at_put(L, label_pos);
929
+ target_pos = label_pos;
930
+ dist = pc_offset_v - target_pos;
931
+ } while (dist > kMaxBranchOffset);
932
+ } else if (dist < -kMaxBranchOffset) {
933
+ do {
934
+ int32_t label_pos = get_label_entry(target_pos, false);
935
+ ASSERT((label_pos - target_pos) > -kMaxBranchOffset);
936
+ label_at_put(L, label_pos);
937
+ target_pos = label_pos;
938
+ dist = pc_offset_v - target_pos;
939
+ } while (dist < -kMaxBranchOffset);
940
+ }
941
+ L->link_to(pc_offset());
942
+ } else {
943
+ L->link_to(pc_offset());
944
+ return kEndOfChain;
945
+ }
946
+ }
947
+
948
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
949
+ ASSERT((offset & 3) == 0);
950
+ ASSERT(is_int16(offset >> 2));
951
+
952
+ return offset;
953
+ }
954
+
955
+
956
+ void Assembler::label_at_put(Label* L, int at_offset) {
957
+ int target_pos;
958
+ if (L->is_bound()) {
959
+ target_pos = L->pos();
960
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
961
+ } else {
962
+ if (L->is_linked()) {
963
+ target_pos = L->pos(); // L's link.
964
+ int32_t imm18 = target_pos - at_offset;
965
+ ASSERT((imm18 & 3) == 0);
966
+ int32_t imm16 = imm18 >> 2;
967
+ ASSERT(is_int16(imm16));
968
+ instr_at_put(at_offset, (imm16 & kImm16Mask));
969
+ } else {
970
+ target_pos = kEndOfChain;
971
+ instr_at_put(at_offset, 0);
972
+ }
973
+ L->link_to(at_offset);
974
+ }
975
+ }
976
+
977
+
978
+ //------- Branch and jump instructions --------
979
+
980
+ void Assembler::b(int16_t offset) {
981
+ beq(zero_reg, zero_reg, offset);
982
+ }
983
+
984
+
985
+ void Assembler::bal(int16_t offset) {
986
+ positions_recorder()->WriteRecordedPositions();
987
+ bgezal(zero_reg, offset);
988
+ }
989
+
990
+
991
+ void Assembler::beq(Register rs, Register rt, int16_t offset) {
992
+ BlockTrampolinePoolScope block_trampoline_pool(this);
993
+ GenInstrImmediate(BEQ, rs, rt, offset);
994
+ BlockTrampolinePoolFor(1); // For associated delay slot.
995
+ }
996
+
997
+
998
+ void Assembler::bgez(Register rs, int16_t offset) {
999
+ BlockTrampolinePoolScope block_trampoline_pool(this);
1000
+ GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1001
+ BlockTrampolinePoolFor(1); // For associated delay slot.
1002
+ }
1003
+
1004
+
1005
+ void Assembler::bgezal(Register rs, int16_t offset) {
1006
+ BlockTrampolinePoolScope block_trampoline_pool(this);
1007
+ positions_recorder()->WriteRecordedPositions();
1008
+ GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1009
+ BlockTrampolinePoolFor(1); // For associated delay slot.
1010
+ }
1011
+
1012
+
1013
+ void Assembler::bgtz(Register rs, int16_t offset) {
1014
+ BlockTrampolinePoolScope block_trampoline_pool(this);
1015
+ GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1016
+ BlockTrampolinePoolFor(1); // For associated delay slot.
1017
+ }
1018
+
1019
+
1020
+ void Assembler::blez(Register rs, int16_t offset) {
1021
+ BlockTrampolinePoolScope block_trampoline_pool(this);
1022
+ GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1023
+ BlockTrampolinePoolFor(1); // For associated delay slot.
1024
+ }
1025
+
1026
+
1027
+ void Assembler::bltz(Register rs, int16_t offset) {
1028
+ BlockTrampolinePoolScope block_trampoline_pool(this);
1029
+ GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1030
+ BlockTrampolinePoolFor(1); // For associated delay slot.
1031
+ }
1032
+
1033
+
1034
+ void Assembler::bltzal(Register rs, int16_t offset) {
1035
+ BlockTrampolinePoolScope block_trampoline_pool(this);
1036
+ positions_recorder()->WriteRecordedPositions();
1037
+ GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1038
+ BlockTrampolinePoolFor(1); // For associated delay slot.
1039
+ }
1040
+
1041
+
1042
+ void Assembler::bne(Register rs, Register rt, int16_t offset) {
1043
+ BlockTrampolinePoolScope block_trampoline_pool(this);
1044
+ GenInstrImmediate(BNE, rs, rt, offset);
1045
+ BlockTrampolinePoolFor(1); // For associated delay slot.
1046
+ }
1047
+
1048
+
1049
+ void Assembler::j(int32_t target) {
1050
+ ASSERT(is_uint28(target) && ((target & 3) == 0));
1051
+ GenInstrJump(J, target >> 2);
1052
+ }
1053
+
1054
+
1055
+ void Assembler::jr(Register rs) {
1056
+ BlockTrampolinePoolScope block_trampoline_pool(this);
1057
+ if (rs.is(ra)) {
1058
+ positions_recorder()->WriteRecordedPositions();
1059
+ }
1060
+ GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1061
+ BlockTrampolinePoolFor(1); // For associated delay slot.
1062
+ }
1063
+
1064
+
1065
+ void Assembler::jal(int32_t target) {
1066
+ positions_recorder()->WriteRecordedPositions();
1067
+ ASSERT(is_uint28(target) && ((target & 3) == 0));
1068
+ GenInstrJump(JAL, target >> 2);
1069
+ }
1070
+
1071
+
1072
+ void Assembler::jalr(Register rs, Register rd) {
1073
+ BlockTrampolinePoolScope block_trampoline_pool(this);
1074
+ positions_recorder()->WriteRecordedPositions();
1075
+ GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1076
+ BlockTrampolinePoolFor(1); // For associated delay slot.
1077
+ }
1078
+
1079
+
1080
+ //-------Data-processing-instructions---------
1081
+
1082
+ // Arithmetic.
1083
+
1084
+ void Assembler::addu(Register rd, Register rs, Register rt) {
1085
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1086
+ }
1087
+
1088
+
1089
+ void Assembler::addiu(Register rd, Register rs, int32_t j) {
1090
+ GenInstrImmediate(ADDIU, rs, rd, j);
1091
+ }
1092
+
1093
+
1094
+ void Assembler::subu(Register rd, Register rs, Register rt) {
1095
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1096
+ }
1097
+
1098
+
1099
+ void Assembler::mul(Register rd, Register rs, Register rt) {
1100
+ GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1101
+ }
1102
+
1103
+
1104
+ void Assembler::mult(Register rs, Register rt) {
1105
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1106
+ }
1107
+
1108
+
1109
+ void Assembler::multu(Register rs, Register rt) {
1110
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1111
+ }
1112
+
1113
+
1114
+ void Assembler::div(Register rs, Register rt) {
1115
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1116
+ }
1117
+
1118
+
1119
+ void Assembler::divu(Register rs, Register rt) {
1120
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1121
+ }
1122
+
1123
+
1124
+ // Logical.
1125
+
1126
+ void Assembler::and_(Register rd, Register rs, Register rt) {
1127
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1128
+ }
1129
+
1130
+
1131
+ void Assembler::andi(Register rt, Register rs, int32_t j) {
1132
+ GenInstrImmediate(ANDI, rs, rt, j);
1133
+ }
1134
+
1135
+
1136
+ void Assembler::or_(Register rd, Register rs, Register rt) {
1137
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1138
+ }
1139
+
1140
+
1141
+ void Assembler::ori(Register rt, Register rs, int32_t j) {
1142
+ GenInstrImmediate(ORI, rs, rt, j);
1143
+ }
1144
+
1145
+
1146
+ void Assembler::xor_(Register rd, Register rs, Register rt) {
1147
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1148
+ }
1149
+
1150
+
1151
+ void Assembler::xori(Register rt, Register rs, int32_t j) {
1152
+ GenInstrImmediate(XORI, rs, rt, j);
1153
+ }
1154
+
1155
+
1156
+ void Assembler::nor(Register rd, Register rs, Register rt) {
1157
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1158
+ }
1159
+
1160
+
1161
+ // Shifts.
1162
+ void Assembler::sll(Register rd,
1163
+ Register rt,
1164
+ uint16_t sa,
1165
+ bool coming_from_nop) {
1166
+ // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1167
+ // generated using the sll instruction. They must be generated using
1168
+ // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1169
+ // instructions.
1170
+ ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1171
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1172
+ }
1173
+
1174
+
1175
+ void Assembler::sllv(Register rd, Register rt, Register rs) {
1176
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1177
+ }
1178
+
1179
+
1180
+ void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1181
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1182
+ }
1183
+
1184
+
1185
+ void Assembler::srlv(Register rd, Register rt, Register rs) {
1186
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1187
+ }
1188
+
1189
+
1190
+ void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1191
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1192
+ }
1193
+
1194
+
1195
+ void Assembler::srav(Register rd, Register rt, Register rs) {
1196
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1197
+ }
1198
+
1199
+
1200
+ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1201
+ // Should be called via MacroAssembler::Ror.
1202
+ ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1203
+ ASSERT(mips32r2);
1204
+ Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1205
+ | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1206
+ emit(instr);
1207
+ }
1208
+
1209
+
1210
+ void Assembler::rotrv(Register rd, Register rt, Register rs) {
1211
+ // Should be called via MacroAssembler::Ror.
1212
+ ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1213
+ ASSERT(mips32r2);
1214
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1215
+ | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1216
+ emit(instr);
1217
+ }
1218
+
1219
+
1220
+ //------------Memory-instructions-------------
1221
+
1222
+ // Helper for base-reg + offset, when offset is larger than int16.
1223
+ void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1224
+ ASSERT(!src.rm().is(at));
1225
+ lui(at, src.offset_ >> kLuiShift);
1226
+ ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1227
+ addu(at, at, src.rm()); // Add base register.
1228
+ }
1229
+
1230
+
1231
+ void Assembler::lb(Register rd, const MemOperand& rs) {
1232
+ if (is_int16(rs.offset_)) {
1233
+ GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1234
+ } else { // Offset > 16 bits, use multiple instructions to load.
1235
+ LoadRegPlusOffsetToAt(rs);
1236
+ GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1237
+ }
1238
+ }
1239
+
1240
+
1241
+ void Assembler::lbu(Register rd, const MemOperand& rs) {
1242
+ if (is_int16(rs.offset_)) {
1243
+ GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1244
+ } else { // Offset > 16 bits, use multiple instructions to load.
1245
+ LoadRegPlusOffsetToAt(rs);
1246
+ GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1247
+ }
1248
+ }
1249
+
1250
+
1251
+ void Assembler::lh(Register rd, const MemOperand& rs) {
1252
+ if (is_int16(rs.offset_)) {
1253
+ GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1254
+ } else { // Offset > 16 bits, use multiple instructions to load.
1255
+ LoadRegPlusOffsetToAt(rs);
1256
+ GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1257
+ }
1258
+ }
1259
+
1260
+
1261
+ void Assembler::lhu(Register rd, const MemOperand& rs) {
1262
+ if (is_int16(rs.offset_)) {
1263
+ GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1264
+ } else { // Offset > 16 bits, use multiple instructions to load.
1265
+ LoadRegPlusOffsetToAt(rs);
1266
+ GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1267
+ }
1268
+ }
1269
+
1270
+
1271
+ void Assembler::lw(Register rd, const MemOperand& rs) {
1272
+ if (is_int16(rs.offset_)) {
1273
+ GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1274
+ } else { // Offset > 16 bits, use multiple instructions to load.
1275
+ LoadRegPlusOffsetToAt(rs);
1276
+ GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1277
+ }
1278
+ }
1279
+
1280
+
1281
+ void Assembler::lwl(Register rd, const MemOperand& rs) {
1282
+ GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1283
+ }
1284
+
1285
+
1286
+ void Assembler::lwr(Register rd, const MemOperand& rs) {
1287
+ GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1288
+ }
1289
+
1290
+
1291
+ void Assembler::sb(Register rd, const MemOperand& rs) {
1292
+ if (is_int16(rs.offset_)) {
1293
+ GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1294
+ } else { // Offset > 16 bits, use multiple instructions to store.
1295
+ LoadRegPlusOffsetToAt(rs);
1296
+ GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1297
+ }
1298
+ }
1299
+
1300
+
1301
+ void Assembler::sh(Register rd, const MemOperand& rs) {
1302
+ if (is_int16(rs.offset_)) {
1303
+ GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1304
+ } else { // Offset > 16 bits, use multiple instructions to store.
1305
+ LoadRegPlusOffsetToAt(rs);
1306
+ GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1307
+ }
1308
+ }
1309
+
1310
+
1311
+ void Assembler::sw(Register rd, const MemOperand& rs) {
1312
+ if (is_int16(rs.offset_)) {
1313
+ GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1314
+ } else { // Offset > 16 bits, use multiple instructions to store.
1315
+ LoadRegPlusOffsetToAt(rs);
1316
+ GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1317
+ }
1318
+ }
1319
+
1320
+
1321
+ void Assembler::swl(Register rd, const MemOperand& rs) {
1322
+ GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1323
+ }
1324
+
1325
+
1326
+ void Assembler::swr(Register rd, const MemOperand& rs) {
1327
+ GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1328
+ }
1329
+
1330
+
1331
+ void Assembler::lui(Register rd, int32_t j) {
1332
+ GenInstrImmediate(LUI, zero_reg, rd, j);
1333
+ }
1334
+
1335
+
1336
+ //-------------Misc-instructions--------------
1337
+
1338
+ // Break / Trap instructions.
1339
+ void Assembler::break_(uint32_t code) {
1340
+ ASSERT((code & ~0xfffff) == 0);
1341
+ Instr break_instr = SPECIAL | BREAK | (code << 6);
1342
+ emit(break_instr);
1343
+ }
1344
+
1345
+
1346
+ void Assembler::tge(Register rs, Register rt, uint16_t code) {
1347
+ ASSERT(is_uint10(code));
1348
+ Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1349
+ | rt.code() << kRtShift | code << 6;
1350
+ emit(instr);
1351
+ }
1352
+
1353
+
1354
+ void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1355
+ ASSERT(is_uint10(code));
1356
+ Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1357
+ | rt.code() << kRtShift | code << 6;
1358
+ emit(instr);
1359
+ }
1360
+
1361
+
1362
+ void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1363
+ ASSERT(is_uint10(code));
1364
+ Instr instr =
1365
+ SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1366
+ emit(instr);
1367
+ }
1368
+
1369
+
1370
+ void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1371
+ ASSERT(is_uint10(code));
1372
+ Instr instr =
1373
+ SPECIAL | TLTU | rs.code() << kRsShift
1374
+ | rt.code() << kRtShift | code << 6;
1375
+ emit(instr);
1376
+ }
1377
+
1378
+
1379
+ void Assembler::teq(Register rs, Register rt, uint16_t code) {
1380
+ ASSERT(is_uint10(code));
1381
+ Instr instr =
1382
+ SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1383
+ emit(instr);
1384
+ }
1385
+
1386
+
1387
+ void Assembler::tne(Register rs, Register rt, uint16_t code) {
1388
+ ASSERT(is_uint10(code));
1389
+ Instr instr =
1390
+ SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1391
+ emit(instr);
1392
+ }
1393
+
1394
+
1395
+ // Move from HI/LO register.
1396
+
1397
+ void Assembler::mfhi(Register rd) {
1398
+ GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1399
+ }
1400
+
1401
+
1402
+ void Assembler::mflo(Register rd) {
1403
+ GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1404
+ }
1405
+
1406
+
1407
+ // Set on less than instructions.
1408
+ void Assembler::slt(Register rd, Register rs, Register rt) {
1409
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1410
+ }
1411
+
1412
+
1413
+ void Assembler::sltu(Register rd, Register rs, Register rt) {
1414
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1415
+ }
1416
+
1417
+
1418
+ void Assembler::slti(Register rt, Register rs, int32_t j) {
1419
+ GenInstrImmediate(SLTI, rs, rt, j);
1420
+ }
1421
+
1422
+
1423
+ void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1424
+ GenInstrImmediate(SLTIU, rs, rt, j);
1425
+ }
1426
+
1427
+
1428
+ // Conditional move.
1429
+ void Assembler::movz(Register rd, Register rs, Register rt) {
1430
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1431
+ }
1432
+
1433
+
1434
+ void Assembler::movn(Register rd, Register rs, Register rt) {
1435
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1436
+ }
1437
+
1438
+
1439
+ void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1440
+ Register rt;
1441
+ rt.code_ = (cc & 0x0007) << 2 | 1;
1442
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1443
+ }
1444
+
1445
+
1446
+ void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1447
+ Register rt;
1448
+ rt.code_ = (cc & 0x0007) << 2 | 0;
1449
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1450
+ }
1451
+
1452
+
1453
+ // Bit twiddling.
1454
+ void Assembler::clz(Register rd, Register rs) {
1455
+ // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1456
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1457
+ }
1458
+
1459
+
1460
+ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1461
+ // Should be called via MacroAssembler::Ins.
1462
+ // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1463
+ ASSERT(mips32r2);
1464
+ GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1465
+ }
1466
+
1467
+
1468
+ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1469
+ // Should be called via MacroAssembler::Ext.
1470
+ // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1471
+ ASSERT(mips32r2);
1472
+ GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1473
+ }
1474
+
1475
+
1476
+ //--------Coprocessor-instructions----------------
1477
+
1478
+ // Load, store, move.
1479
+ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1480
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1481
+ }
1482
+
1483
+
1484
+ void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1485
+ // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1486
+ // load to two 32-bit loads.
1487
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1488
+ FPURegister nextfpreg;
1489
+ nextfpreg.setcode(fd.code() + 1);
1490
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
1491
+ }
1492
+
1493
+
1494
+ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1495
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1496
+ }
1497
+
1498
+
1499
+ void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1500
+ // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1501
+ // store to two 32-bit stores.
1502
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1503
+ FPURegister nextfpreg;
1504
+ nextfpreg.setcode(fd.code() + 1);
1505
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
1506
+ }
1507
+
1508
+
1509
+ void Assembler::mtc1(Register rt, FPURegister fs) {
1510
+ GenInstrRegister(COP1, MTC1, rt, fs, f0);
1511
+ }
1512
+
1513
+
1514
+ void Assembler::mfc1(Register rt, FPURegister fs) {
1515
+ GenInstrRegister(COP1, MFC1, rt, fs, f0);
1516
+ }
1517
+
1518
+
1519
+ void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1520
+ GenInstrRegister(COP1, CTC1, rt, fs);
1521
+ }
1522
+
1523
+
1524
+ void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1525
+ GenInstrRegister(COP1, CFC1, rt, fs);
1526
+ }
1527
+
1528
+
1529
+ // Arithmetic.
1530
+
1531
+ void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1532
+ GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
1533
+ }
1534
+
1535
+
1536
+ void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1537
+ GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
1538
+ }
1539
+
1540
+
1541
+ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1542
+ GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1543
+ }
1544
+
1545
+
1546
+ void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1547
+ GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1548
+ }
1549
+
1550
+
1551
+ void Assembler::abs_d(FPURegister fd, FPURegister fs) {
1552
+ GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
1553
+ }
1554
+
1555
+
1556
+ void Assembler::mov_d(FPURegister fd, FPURegister fs) {
1557
+ GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
1558
+ }
1559
+
1560
+
1561
+ void Assembler::neg_d(FPURegister fd, FPURegister fs) {
1562
+ GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
1563
+ }
1564
+
1565
+
1566
+ void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
1567
+ GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
1568
+ }
1569
+
1570
+
1571
+ // Conversions.
1572
+
1573
+ void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
1574
+ GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
1575
+ }
1576
+
1577
+
1578
+ void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
1579
+ GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
1580
+ }
1581
+
1582
+
1583
+ void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
1584
+ GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
1585
+ }
1586
+
1587
+
1588
+ void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
1589
+ GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
1590
+ }
1591
+
1592
+
1593
+ void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
1594
+ GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
1595
+ }
1596
+
1597
+
1598
+ void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
1599
+ GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
1600
+ }
1601
+
1602
+
1603
+ void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
1604
+ GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
1605
+ }
1606
+
1607
+
1608
+ void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
1609
+ GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
1610
+ }
1611
+
1612
+
1613
+ void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
1614
+ GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1615
+ }
1616
+
1617
+
1618
+ void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1619
+ GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1620
+ }
1621
+
1622
+
1623
+ void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1624
+ ASSERT(mips32r2);
1625
+ GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1626
+ }
1627
+
1628
+
1629
+ void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1630
+ ASSERT(mips32r2);
1631
+ GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1632
+ }
1633
+
1634
+
1635
+ void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1636
+ ASSERT(mips32r2);
1637
+ GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1638
+ }
1639
+
1640
+
1641
+ void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1642
+ ASSERT(mips32r2);
1643
+ GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1644
+ }
1645
+
1646
+
1647
+ void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1648
+ GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1649
+ }
1650
+
1651
+
1652
+ void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
1653
+ GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
1654
+ }
1655
+
1656
+
1657
+ void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
1658
+ GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
1659
+ }
1660
+
1661
+
1662
+ void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
1663
+ GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
1664
+ }
1665
+
1666
+
1667
+ void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1668
+ GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1669
+ }
1670
+
1671
+
1672
+ void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1673
+ GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1674
+ }
1675
+
1676
+
1677
+ void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1678
+ GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1679
+ }
1680
+
1681
+
1682
+ void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1683
+ ASSERT(mips32r2);
1684
+ GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1685
+ }
1686
+
1687
+
1688
+ void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1689
+ GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1690
+ }
1691
+
1692
+
1693
+ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1694
+ GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1695
+ }
1696
+
1697
+
1698
+ void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1699
+ ASSERT(mips32r2);
1700
+ GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1701
+ }
1702
+
1703
+
1704
+ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1705
+ GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1706
+ }
1707
+
1708
+
1709
+ // Conditions.
1710
+ void Assembler::c(FPUCondition cond, SecondaryField fmt,
1711
+ FPURegister fs, FPURegister ft, uint16_t cc) {
1712
+ ASSERT(CpuFeatures::IsEnabled(FPU));
1713
+ ASSERT(is_uint3(cc));
1714
+ ASSERT((fmt & ~(31 << kRsShift)) == 0);
1715
+ Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1716
+ | cc << 8 | 3 << 4 | cond;
1717
+ emit(instr);
1718
+ }
1719
+
1720
+
1721
+ void Assembler::fcmp(FPURegister src1, const double src2,
1722
+ FPUCondition cond) {
1723
+ ASSERT(CpuFeatures::IsEnabled(FPU));
1724
+ ASSERT(src2 == 0.0);
1725
+ mtc1(zero_reg, f14);
1726
+ cvt_d_w(f14, f14);
1727
+ c(cond, D, src1, f14, 0);
1728
+ }
1729
+
1730
+
1731
+ void Assembler::bc1f(int16_t offset, uint16_t cc) {
1732
+ ASSERT(CpuFeatures::IsEnabled(FPU));
1733
+ ASSERT(is_uint3(cc));
1734
+ Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1735
+ emit(instr);
1736
+ }
1737
+
1738
+
1739
+ void Assembler::bc1t(int16_t offset, uint16_t cc) {
1740
+ ASSERT(CpuFeatures::IsEnabled(FPU));
1741
+ ASSERT(is_uint3(cc));
1742
+ Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1743
+ emit(instr);
1744
+ }
1745
+
1746
+
1747
+ // Debugging.
1748
+ void Assembler::RecordJSReturn() {
1749
+ positions_recorder()->WriteRecordedPositions();
1750
+ CheckBuffer();
1751
+ RecordRelocInfo(RelocInfo::JS_RETURN);
1752
+ }
1753
+
1754
+
1755
+ void Assembler::RecordDebugBreakSlot() {
1756
+ positions_recorder()->WriteRecordedPositions();
1757
+ CheckBuffer();
1758
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1759
+ }
1760
+
1761
+
1762
+ void Assembler::RecordComment(const char* msg) {
1763
+ if (FLAG_code_comments) {
1764
+ CheckBuffer();
1765
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1766
+ }
1767
+ }
1768
+
1769
+
1770
+ void Assembler::GrowBuffer() {
1771
+ if (!own_buffer_) FATAL("external code buffer is too small");
1772
+
1773
+ // Compute new buffer size.
1774
+ CodeDesc desc; // The new buffer.
1775
+ if (buffer_size_ < 4*KB) {
1776
+ desc.buffer_size = 4*KB;
1777
+ } else if (buffer_size_ < 1*MB) {
1778
+ desc.buffer_size = 2*buffer_size_;
1779
+ } else {
1780
+ desc.buffer_size = buffer_size_ + 1*MB;
1781
+ }
1782
+ CHECK_GT(desc.buffer_size, 0); // No overflow.
1783
+
1784
+ // Setup new buffer.
1785
+ desc.buffer = NewArray<byte>(desc.buffer_size);
1786
+
1787
+ desc.instr_size = pc_offset();
1788
+ desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1789
+
1790
+ // Copy the data.
1791
+ int pc_delta = desc.buffer - buffer_;
1792
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1793
+ memmove(desc.buffer, buffer_, desc.instr_size);
1794
+ memmove(reloc_info_writer.pos() + rc_delta,
1795
+ reloc_info_writer.pos(), desc.reloc_size);
1796
+
1797
+ // Switch buffers.
1798
+ DeleteArray(buffer_);
1799
+ buffer_ = desc.buffer;
1800
+ buffer_size_ = desc.buffer_size;
1801
+ pc_ += pc_delta;
1802
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1803
+ reloc_info_writer.last_pc() + pc_delta);
1804
+
1805
+ // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
1806
+ // shift by pc_delta. But on MIPS the target address it directly loaded, so
1807
+ // we do not need to relocate here.
1808
+
1809
+ ASSERT(!overflow());
1810
+ }
1811
+
1812
+
1813
+ void Assembler::db(uint8_t data) {
1814
+ CheckBuffer();
1815
+ *reinterpret_cast<uint8_t*>(pc_) = data;
1816
+ pc_ += sizeof(uint8_t);
1817
+ }
1818
+
1819
+
1820
+ void Assembler::dd(uint32_t data) {
1821
+ CheckBuffer();
1822
+ *reinterpret_cast<uint32_t*>(pc_) = data;
1823
+ pc_ += sizeof(uint32_t);
1824
+ }
1825
+
1826
+
1827
+ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1828
+ RelocInfo rinfo(pc_, rmode, data); // We do not try to reuse pool constants.
1829
+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
1830
+ // Adjust code for new modes.
1831
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
1832
+ || RelocInfo::IsJSReturn(rmode)
1833
+ || RelocInfo::IsComment(rmode)
1834
+ || RelocInfo::IsPosition(rmode));
1835
+ // These modes do not need an entry in the constant pool.
1836
+ }
1837
+ if (rinfo.rmode() != RelocInfo::NONE) {
1838
+ // Don't record external references unless the heap will be serialized.
1839
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
1840
+ !Serializer::enabled() &&
1841
+ !FLAG_debug_code) {
1842
+ return;
1843
+ }
1844
+ ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
1845
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
1846
+ ASSERT(ast_id_for_reloc_info_ != kNoASTId);
1847
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
1848
+ ast_id_for_reloc_info_ = kNoASTId;
1849
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
1850
+ } else {
1851
+ reloc_info_writer.Write(&rinfo);
1852
+ }
1853
+ }
1854
+ }
1855
+
1856
+
1857
+ void Assembler::BlockTrampolinePoolFor(int instructions) {
1858
+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
1859
+ }
1860
+
1861
+
1862
+ void Assembler::CheckTrampolinePool(bool force_emit) {
1863
+ // Calculate the offset of the next check.
1864
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
1865
+
1866
+ int dist = pc_offset() - last_trampoline_pool_end_;
1867
+
1868
+ if (dist <= kMaxDistBetweenPools && !force_emit) {
1869
+ return;
1870
+ }
1871
+
1872
+ // Some small sequences of instructions must not be broken up by the
1873
+ // insertion of a trampoline pool; such sequences are protected by setting
1874
+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
1875
+ // which are both checked here. Also, recursive calls to CheckTrampolinePool
1876
+ // are blocked by trampoline_pool_blocked_nesting_.
1877
+ if ((trampoline_pool_blocked_nesting_ > 0) ||
1878
+ (pc_offset() < no_trampoline_pool_before_)) {
1879
+ // Emission is currently blocked; make sure we try again as soon as
1880
+ // possible.
1881
+ if (trampoline_pool_blocked_nesting_ > 0) {
1882
+ next_buffer_check_ = pc_offset() + kInstrSize;
1883
+ } else {
1884
+ next_buffer_check_ = no_trampoline_pool_before_;
1885
+ }
1886
+ return;
1887
+ }
1888
+
1889
+ // First we emit jump (2 instructions), then we emit trampoline pool.
1890
+ { BlockTrampolinePoolScope block_trampoline_pool(this);
1891
+ Label after_pool;
1892
+ b(&after_pool);
1893
+ nop();
1894
+
1895
+ int pool_start = pc_offset();
1896
+ for (int i = 0; i < kSlotsPerTrampoline; i++) {
1897
+ b(&after_pool);
1898
+ nop();
1899
+ }
1900
+ for (int i = 0; i < kLabelsPerTrampoline; i++) {
1901
+ emit(0);
1902
+ }
1903
+ last_trampoline_pool_end_ = pc_offset() - kInstrSize;
1904
+ bind(&after_pool);
1905
+ trampolines_.Add(Trampoline(pool_start,
1906
+ kSlotsPerTrampoline,
1907
+ kLabelsPerTrampoline));
1908
+
1909
+ // Since a trampoline pool was just emitted,
1910
+ // move the check offset forward by the standard interval.
1911
+ next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools;
1912
+ }
1913
+ return;
1914
+ }
1915
+
1916
+
1917
+ Address Assembler::target_address_at(Address pc) {
1918
+ Instr instr1 = instr_at(pc);
1919
+ Instr instr2 = instr_at(pc + kInstrSize);
1920
+ // Interpret 2 instructions generated by li: lui/ori
1921
+ if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
1922
+ // Assemble the 32 bit value.
1923
+ return reinterpret_cast<Address>(
1924
+ (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
1925
+ }
1926
+
1927
+ // We should never get here, force a bad address if we do.
1928
+ UNREACHABLE();
1929
+ return (Address)0x0;
1930
+ }
1931
+
1932
+
1933
+ void Assembler::set_target_address_at(Address pc, Address target) {
1934
+ // On MIPS we patch the address into lui/ori instruction pair.
1935
+
1936
+ // First check we have an li (lui/ori pair).
1937
+ Instr instr2 = instr_at(pc + kInstrSize);
1938
+ #ifdef DEBUG
1939
+ Instr instr1 = instr_at(pc);
1940
+
1941
+ // Check we have indeed the result from a li with MustUseReg true.
1942
+ CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
1943
+ #endif
1944
+
1945
+ uint32_t rt_code = GetRtField(instr2);
1946
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
1947
+ uint32_t itarget = reinterpret_cast<uint32_t>(target);
1948
+
1949
+ // lui rt, high-16.
1950
+ // ori rt rt, low-16.
1951
+ *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
1952
+ *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
1953
+
1954
+ CPU::FlushICache(pc, 2 * sizeof(int32_t));
1955
+ }
1956
+
1957
+
1958
+ } } // namespace v8::internal
1959
+
1960
+ #endif // V8_TARGET_ARCH_MIPS