chipper 0.4.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (134) hide show
  1. data/README.rdoc +51 -0
  2. data/ext/extconf.rb +58 -0
  3. data/ext/libstemmer_c/Makefile +10 -0
  4. data/ext/libstemmer_c/examples/stemwords.c +209 -0
  5. data/ext/libstemmer_c/include/libstemmer.h +79 -0
  6. data/ext/libstemmer_c/libstemmer/libstemmer.c +95 -0
  7. data/ext/libstemmer_c/libstemmer/libstemmer_utf8.c +95 -0
  8. data/ext/libstemmer_c/libstemmer/modules.h +190 -0
  9. data/ext/libstemmer_c/libstemmer/modules_utf8.h +121 -0
  10. data/ext/libstemmer_c/mkinc.mak +82 -0
  11. data/ext/libstemmer_c/mkinc_utf8.mak +52 -0
  12. data/ext/libstemmer_c/runtime/api.c +66 -0
  13. data/ext/libstemmer_c/runtime/api.h +26 -0
  14. data/ext/libstemmer_c/runtime/header.h +58 -0
  15. data/ext/libstemmer_c/runtime/utilities.c +478 -0
  16. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_danish.c +337 -0
  17. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_danish.h +16 -0
  18. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_dutch.c +624 -0
  19. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_dutch.h +16 -0
  20. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_english.c +1117 -0
  21. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_english.h +16 -0
  22. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_finnish.c +762 -0
  23. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_finnish.h +16 -0
  24. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_french.c +1246 -0
  25. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_french.h +16 -0
  26. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_german.c +521 -0
  27. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_german.h +16 -0
  28. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_hungarian.c +1230 -0
  29. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_hungarian.h +16 -0
  30. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_italian.c +1065 -0
  31. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_italian.h +16 -0
  32. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_norwegian.c +297 -0
  33. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_norwegian.h +16 -0
  34. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_porter.c +749 -0
  35. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_porter.h +16 -0
  36. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_portuguese.c +1017 -0
  37. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_portuguese.h +16 -0
  38. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_spanish.c +1093 -0
  39. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_spanish.h +16 -0
  40. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_swedish.c +307 -0
  41. data/ext/libstemmer_c/src_c/stem_ISO_8859_1_swedish.h +16 -0
  42. data/ext/libstemmer_c/src_c/stem_ISO_8859_2_romanian.c +998 -0
  43. data/ext/libstemmer_c/src_c/stem_ISO_8859_2_romanian.h +16 -0
  44. data/ext/libstemmer_c/src_c/stem_KOI8_R_russian.c +700 -0
  45. data/ext/libstemmer_c/src_c/stem_KOI8_R_russian.h +16 -0
  46. data/ext/libstemmer_c/src_c/stem_UTF_8_danish.c +339 -0
  47. data/ext/libstemmer_c/src_c/stem_UTF_8_danish.h +16 -0
  48. data/ext/libstemmer_c/src_c/stem_UTF_8_dutch.c +634 -0
  49. data/ext/libstemmer_c/src_c/stem_UTF_8_dutch.h +16 -0
  50. data/ext/libstemmer_c/src_c/stem_UTF_8_english.c +1125 -0
  51. data/ext/libstemmer_c/src_c/stem_UTF_8_english.h +16 -0
  52. data/ext/libstemmer_c/src_c/stem_UTF_8_finnish.c +768 -0
  53. data/ext/libstemmer_c/src_c/stem_UTF_8_finnish.h +16 -0
  54. data/ext/libstemmer_c/src_c/stem_UTF_8_french.c +1256 -0
  55. data/ext/libstemmer_c/src_c/stem_UTF_8_french.h +16 -0
  56. data/ext/libstemmer_c/src_c/stem_UTF_8_german.c +527 -0
  57. data/ext/libstemmer_c/src_c/stem_UTF_8_german.h +16 -0
  58. data/ext/libstemmer_c/src_c/stem_UTF_8_hungarian.c +1234 -0
  59. data/ext/libstemmer_c/src_c/stem_UTF_8_hungarian.h +16 -0
  60. data/ext/libstemmer_c/src_c/stem_UTF_8_italian.c +1073 -0
  61. data/ext/libstemmer_c/src_c/stem_UTF_8_italian.h +16 -0
  62. data/ext/libstemmer_c/src_c/stem_UTF_8_norwegian.c +299 -0
  63. data/ext/libstemmer_c/src_c/stem_UTF_8_norwegian.h +16 -0
  64. data/ext/libstemmer_c/src_c/stem_UTF_8_porter.c +755 -0
  65. data/ext/libstemmer_c/src_c/stem_UTF_8_porter.h +16 -0
  66. data/ext/libstemmer_c/src_c/stem_UTF_8_portuguese.c +1023 -0
  67. data/ext/libstemmer_c/src_c/stem_UTF_8_portuguese.h +16 -0
  68. data/ext/libstemmer_c/src_c/stem_UTF_8_romanian.c +1004 -0
  69. data/ext/libstemmer_c/src_c/stem_UTF_8_romanian.h +16 -0
  70. data/ext/libstemmer_c/src_c/stem_UTF_8_russian.c +694 -0
  71. data/ext/libstemmer_c/src_c/stem_UTF_8_russian.h +16 -0
  72. data/ext/libstemmer_c/src_c/stem_UTF_8_spanish.c +1097 -0
  73. data/ext/libstemmer_c/src_c/stem_UTF_8_spanish.h +16 -0
  74. data/ext/libstemmer_c/src_c/stem_UTF_8_swedish.c +309 -0
  75. data/ext/libstemmer_c/src_c/stem_UTF_8_swedish.h +16 -0
  76. data/ext/libstemmer_c/src_c/stem_UTF_8_turkish.c +2205 -0
  77. data/ext/libstemmer_c/src_c/stem_UTF_8_turkish.h +16 -0
  78. data/ext/re2/bitstate.cc +378 -0
  79. data/ext/re2/compile.cc +1138 -0
  80. data/ext/re2/dfa.cc +2086 -0
  81. data/ext/re2/filtered_re2.cc +100 -0
  82. data/ext/re2/filtered_re2.h +99 -0
  83. data/ext/re2/hash.cc +231 -0
  84. data/ext/re2/mimics_pcre.cc +185 -0
  85. data/ext/re2/nfa.cc +709 -0
  86. data/ext/re2/onepass.cc +614 -0
  87. data/ext/re2/parse.cc +2202 -0
  88. data/ext/re2/perl_groups.cc +119 -0
  89. data/ext/re2/prefilter.cc +671 -0
  90. data/ext/re2/prefilter.h +105 -0
  91. data/ext/re2/prefilter_tree.cc +398 -0
  92. data/ext/re2/prefilter_tree.h +130 -0
  93. data/ext/re2/prog.cc +341 -0
  94. data/ext/re2/prog.h +376 -0
  95. data/ext/re2/re2.cc +1180 -0
  96. data/ext/re2/re2.h +837 -0
  97. data/ext/re2/regexp.cc +920 -0
  98. data/ext/re2/regexp.h +632 -0
  99. data/ext/re2/rune.cc +258 -0
  100. data/ext/re2/set.cc +113 -0
  101. data/ext/re2/set.h +55 -0
  102. data/ext/re2/simplify.cc +393 -0
  103. data/ext/re2/stringpiece.cc +87 -0
  104. data/ext/re2/stringpiece.h +182 -0
  105. data/ext/re2/tostring.cc +341 -0
  106. data/ext/re2/unicode_casefold.cc +469 -0
  107. data/ext/re2/unicode_casefold.h +75 -0
  108. data/ext/re2/unicode_groups.cc +4851 -0
  109. data/ext/re2/unicode_groups.h +64 -0
  110. data/ext/re2/valgrind.cc +24 -0
  111. data/ext/re2/variadic_function.h +346 -0
  112. data/ext/re2/walker-inl.h +244 -0
  113. data/ext/src/chipper.cc +626 -0
  114. data/ext/src/version.h +1 -0
  115. data/ext/stemmer.rb +40 -0
  116. data/ext/util/arena.h +103 -0
  117. data/ext/util/atomicops.h +79 -0
  118. data/ext/util/benchmark.h +41 -0
  119. data/ext/util/flags.h +27 -0
  120. data/ext/util/logging.h +78 -0
  121. data/ext/util/mutex.h +190 -0
  122. data/ext/util/pcre.h +679 -0
  123. data/ext/util/random.h +29 -0
  124. data/ext/util/sparse_array.h +451 -0
  125. data/ext/util/sparse_set.h +177 -0
  126. data/ext/util/test.h +57 -0
  127. data/ext/util/thread.h +26 -0
  128. data/ext/util/utf.h +43 -0
  129. data/ext/util/util.h +127 -0
  130. data/ext/util/valgrind.h +4517 -0
  131. data/test/helper.rb +5 -0
  132. data/test/test_entities.rb +57 -0
  133. data/test/test_tokens.rb +118 -0
  134. metadata +199 -0
@@ -0,0 +1,4517 @@
1
+ /* -*- c -*-
2
+ ----------------------------------------------------------------
3
+
4
+ Notice that the following BSD-style license applies to this one
5
+ file (valgrind.h) only. The rest of Valgrind is licensed under the
6
+ terms of the GNU General Public License, version 2, unless
7
+ otherwise indicated. See the COPYING file in the source
8
+ distribution for details.
9
+
10
+ ----------------------------------------------------------------
11
+
12
+ This file is part of Valgrind, a dynamic binary instrumentation
13
+ framework.
14
+
15
+ Copyright (C) 2000-2009 Julian Seward. All rights reserved.
16
+
17
+ Redistribution and use in source and binary forms, with or without
18
+ modification, are permitted provided that the following conditions
19
+ are met:
20
+
21
+ 1. Redistributions of source code must retain the above copyright
22
+ notice, this list of conditions and the following disclaimer.
23
+
24
+ 2. The origin of this software must not be misrepresented; you must
25
+ not claim that you wrote the original software. If you use this
26
+ software in a product, an acknowledgment in the product
27
+ documentation would be appreciated but is not required.
28
+
29
+ 3. Altered source versions must be plainly marked as such, and must
30
+ not be misrepresented as being the original software.
31
+
32
+ 4. The name of the author may not be used to endorse or promote
33
+ products derived from this software without specific prior written
34
+ permission.
35
+
36
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
37
+ OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
38
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
40
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
42
+ GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
43
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
44
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
45
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
46
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47
+
48
+ ----------------------------------------------------------------
49
+
50
+ Notice that the above BSD-style license applies to this one file
51
+ (valgrind.h) only. The entire rest of Valgrind is licensed under
52
+ the terms of the GNU General Public License, version 2. See the
53
+ COPYING file in the source distribution for details.
54
+
55
+ ----------------------------------------------------------------
56
+ */
57
+
58
+
59
+ /* This file is for inclusion into client (your!) code.
60
+
61
+ You can use these macros to manipulate and query Valgrind's
62
+ execution inside your own programs.
63
+
64
+ The resulting executables will still run without Valgrind, just a
65
+ little bit more slowly than they otherwise would, but otherwise
66
+ unchanged. When not running on valgrind, each client request
67
+ consumes very few (eg. 7) instructions, so the resulting performance
68
+ loss is negligible unless you plan to execute client requests
69
+ millions of times per second. Nevertheless, if that is still a
70
+ problem, you can compile with the NVALGRIND symbol defined (gcc
71
+ -DNVALGRIND) so that client requests are not even compiled in. */
72
+
73
+ #ifndef __VALGRIND_H
74
+ #define __VALGRIND_H
75
+
76
+ #include <stdarg.h>
77
+
78
+ /* Nb: this file might be included in a file compiled with -ansi. So
79
+ we can't use C++ style "//" comments nor the "asm" keyword (instead
80
+ use "__asm__"). */
81
+
82
+ /* Derive some tags indicating what the target platform is. Note
83
+ that in this file we're using the compiler's CPP symbols for
84
+ identifying architectures, which are different to the ones we use
85
+ within the rest of Valgrind. Note, __powerpc__ is active for both
86
+ 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
87
+ latter (on Linux, that is).
88
+
89
+ Misc note: how to find out what's predefined in gcc by default:
90
+ gcc -Wp,-dM somefile.c
91
+ */
92
+ #undef PLAT_ppc64_aix5
93
+ #undef PLAT_ppc32_aix5
94
+ #undef PLAT_x86_darwin
95
+ #undef PLAT_amd64_darwin
96
+ #undef PLAT_x86_linux
97
+ #undef PLAT_amd64_linux
98
+ #undef PLAT_ppc32_linux
99
+ #undef PLAT_ppc64_linux
100
+ #undef PLAT_arm_linux
101
+
102
+ #if defined(_AIX) && defined(__64BIT__)
103
+ # define PLAT_ppc64_aix5 1
104
+ #elif defined(_AIX) && !defined(__64BIT__)
105
+ # define PLAT_ppc32_aix5 1
106
+ #elif defined(__APPLE__) && defined(__i386__)
107
+ # define PLAT_x86_darwin 1
108
+ #elif defined(__APPLE__) && defined(__x86_64__)
109
+ # define PLAT_amd64_darwin 1
110
+ #elif defined(__linux__) && defined(__i386__)
111
+ # define PLAT_x86_linux 1
112
+ #elif defined(__linux__) && defined(__x86_64__)
113
+ # define PLAT_amd64_linux 1
114
+ #elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
115
+ # define PLAT_ppc32_linux 1
116
+ #elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
117
+ # define PLAT_ppc64_linux 1
118
+ #elif defined(__linux__) && defined(__arm__)
119
+ # define PLAT_arm_linux 1
120
+ #else
121
+ /* If we're not compiling for our target platform, don't generate
122
+ any inline asms. */
123
+ # if !defined(NVALGRIND)
124
+ # define NVALGRIND 1
125
+ # endif
126
+ #endif
127
+
128
+
129
+ /* ------------------------------------------------------------------ */
130
+ /* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
131
+ /* in here of use to end-users -- skip to the next section. */
132
+ /* ------------------------------------------------------------------ */
133
+
134
+ #if defined(NVALGRIND)
135
+
136
+ /* Define NVALGRIND to completely remove the Valgrind magic sequence
137
+ from the compiled code (analogous to NDEBUG's effects on
138
+ assert()) */
139
+ #define VALGRIND_DO_CLIENT_REQUEST( \
140
+ _zzq_rlval, _zzq_default, _zzq_request, \
141
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
142
+ { \
143
+ (_zzq_rlval) = (_zzq_default); \
144
+ }
145
+
146
+ #else /* ! NVALGRIND */
147
+
148
+ /* The following defines the magic code sequences which the JITter
149
+ spots and handles magically. Don't look too closely at them as
150
+ they will rot your brain.
151
+
152
+ The assembly code sequences for all architectures is in this one
153
+ file. This is because this file must be stand-alone, and we don't
154
+ want to have multiple files.
155
+
156
+ For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
157
+ value gets put in the return slot, so that everything works when
158
+ this is executed not under Valgrind. Args are passed in a memory
159
+ block, and so there's no intrinsic limit to the number that could
160
+ be passed, but it's currently five.
161
+
162
+ The macro args are:
163
+ _zzq_rlval result lvalue
164
+ _zzq_default default value (result returned when running on real CPU)
165
+ _zzq_request request code
166
+ _zzq_arg1..5 request params
167
+
168
+ The other two macros are used to support function wrapping, and are
169
+ a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
170
+ guest's NRADDR pseudo-register and whatever other information is
171
+ needed to safely run the call original from the wrapper: on
172
+ ppc64-linux, the R2 value at the divert point is also needed. This
173
+ information is abstracted into a user-visible type, OrigFn.
174
+
175
+ VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
176
+ guest, but guarantees that the branch instruction will not be
177
+ redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
178
+ branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
179
+ complete inline asm, since it needs to be combined with more magic
180
+ inline asm stuff to be useful.
181
+ */
182
+
183
+ /* ------------------------- x86-{linux,darwin} ---------------- */
184
+
185
+ #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin)
186
+
187
+ typedef
188
+ struct {
189
+ unsigned int nraddr; /* where's the code? */
190
+ }
191
+ OrigFn;
192
+
193
+ #define __SPECIAL_INSTRUCTION_PREAMBLE \
194
+ "roll $3, %%edi ; roll $13, %%edi\n\t" \
195
+ "roll $29, %%edi ; roll $19, %%edi\n\t"
196
+
197
+ #define VALGRIND_DO_CLIENT_REQUEST( \
198
+ _zzq_rlval, _zzq_default, _zzq_request, \
199
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
200
+ { volatile unsigned int _zzq_args[6]; \
201
+ volatile unsigned int _zzq_result; \
202
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
203
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
204
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
205
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
206
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
207
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
208
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
209
+ /* %EDX = client_request ( %EAX ) */ \
210
+ "xchgl %%ebx,%%ebx" \
211
+ : "=d" (_zzq_result) \
212
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
213
+ : "cc", "memory" \
214
+ ); \
215
+ _zzq_rlval = _zzq_result; \
216
+ }
217
+
218
+ #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
219
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
220
+ volatile unsigned int __addr; \
221
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
222
+ /* %EAX = guest_NRADDR */ \
223
+ "xchgl %%ecx,%%ecx" \
224
+ : "=a" (__addr) \
225
+ : \
226
+ : "cc", "memory" \
227
+ ); \
228
+ _zzq_orig->nraddr = __addr; \
229
+ }
230
+
231
+ #define VALGRIND_CALL_NOREDIR_EAX \
232
+ __SPECIAL_INSTRUCTION_PREAMBLE \
233
+ /* call-noredir *%EAX */ \
234
+ "xchgl %%edx,%%edx\n\t"
235
+ #endif /* PLAT_x86_linux || PLAT_x86_darwin */
236
+
237
+ /* ------------------------ amd64-{linux,darwin} --------------- */
238
+
239
+ #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
240
+
241
+ typedef
242
+ struct {
243
+ unsigned long long int nraddr; /* where's the code? */
244
+ }
245
+ OrigFn;
246
+
247
+ #define __SPECIAL_INSTRUCTION_PREAMBLE \
248
+ "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
249
+ "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
250
+
251
+ #define VALGRIND_DO_CLIENT_REQUEST( \
252
+ _zzq_rlval, _zzq_default, _zzq_request, \
253
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
254
+ { volatile unsigned long long int _zzq_args[6]; \
255
+ volatile unsigned long long int _zzq_result; \
256
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
257
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
258
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
259
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
260
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
261
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
262
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
263
+ /* %RDX = client_request ( %RAX ) */ \
264
+ "xchgq %%rbx,%%rbx" \
265
+ : "=d" (_zzq_result) \
266
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
267
+ : "cc", "memory" \
268
+ ); \
269
+ _zzq_rlval = _zzq_result; \
270
+ }
271
+
272
+ #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
273
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
274
+ volatile unsigned long long int __addr; \
275
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
276
+ /* %RAX = guest_NRADDR */ \
277
+ "xchgq %%rcx,%%rcx" \
278
+ : "=a" (__addr) \
279
+ : \
280
+ : "cc", "memory" \
281
+ ); \
282
+ _zzq_orig->nraddr = __addr; \
283
+ }
284
+
285
+ #define VALGRIND_CALL_NOREDIR_RAX \
286
+ __SPECIAL_INSTRUCTION_PREAMBLE \
287
+ /* call-noredir *%RAX */ \
288
+ "xchgq %%rdx,%%rdx\n\t"
289
+ #endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
290
+
291
+ /* ------------------------ ppc32-linux ------------------------ */
292
+
293
+ #if defined(PLAT_ppc32_linux)
294
+
295
+ typedef
296
+ struct {
297
+ unsigned int nraddr; /* where's the code? */
298
+ }
299
+ OrigFn;
300
+
301
+ #define __SPECIAL_INSTRUCTION_PREAMBLE \
302
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
303
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
304
+
305
+ #define VALGRIND_DO_CLIENT_REQUEST( \
306
+ _zzq_rlval, _zzq_default, _zzq_request, \
307
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
308
+ \
309
+ { unsigned int _zzq_args[6]; \
310
+ unsigned int _zzq_result; \
311
+ unsigned int* _zzq_ptr; \
312
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
313
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
314
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
315
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
316
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
317
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
318
+ _zzq_ptr = _zzq_args; \
319
+ __asm__ volatile("mr 3,%1\n\t" /*default*/ \
320
+ "mr 4,%2\n\t" /*ptr*/ \
321
+ __SPECIAL_INSTRUCTION_PREAMBLE \
322
+ /* %R3 = client_request ( %R4 ) */ \
323
+ "or 1,1,1\n\t" \
324
+ "mr %0,3" /*result*/ \
325
+ : "=b" (_zzq_result) \
326
+ : "b" (_zzq_default), "b" (_zzq_ptr) \
327
+ : "cc", "memory", "r3", "r4"); \
328
+ _zzq_rlval = _zzq_result; \
329
+ }
330
+
331
+ #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
332
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
333
+ unsigned int __addr; \
334
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
335
+ /* %R3 = guest_NRADDR */ \
336
+ "or 2,2,2\n\t" \
337
+ "mr %0,3" \
338
+ : "=b" (__addr) \
339
+ : \
340
+ : "cc", "memory", "r3" \
341
+ ); \
342
+ _zzq_orig->nraddr = __addr; \
343
+ }
344
+
345
+ #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
346
+ __SPECIAL_INSTRUCTION_PREAMBLE \
347
+ /* branch-and-link-to-noredir *%R11 */ \
348
+ "or 3,3,3\n\t"
349
+ #endif /* PLAT_ppc32_linux */
350
+
351
+ /* ------------------------ ppc64-linux ------------------------ */
352
+
353
+ #if defined(PLAT_ppc64_linux)
354
+
355
+ typedef
356
+ struct {
357
+ unsigned long long int nraddr; /* where's the code? */
358
+ unsigned long long int r2; /* what tocptr do we need? */
359
+ }
360
+ OrigFn;
361
+
362
+ #define __SPECIAL_INSTRUCTION_PREAMBLE \
363
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
364
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
365
+
366
+ #define VALGRIND_DO_CLIENT_REQUEST( \
367
+ _zzq_rlval, _zzq_default, _zzq_request, \
368
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
369
+ \
370
+ { unsigned long long int _zzq_args[6]; \
371
+ register unsigned long long int _zzq_result __asm__("r3"); \
372
+ register unsigned long long int* _zzq_ptr __asm__("r4"); \
373
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
374
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
375
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
376
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
377
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
378
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
379
+ _zzq_ptr = _zzq_args; \
380
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
381
+ /* %R3 = client_request ( %R4 ) */ \
382
+ "or 1,1,1" \
383
+ : "=r" (_zzq_result) \
384
+ : "0" (_zzq_default), "r" (_zzq_ptr) \
385
+ : "cc", "memory"); \
386
+ _zzq_rlval = _zzq_result; \
387
+ }
388
+
389
+ #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
390
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
391
+ register unsigned long long int __addr __asm__("r3"); \
392
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
393
+ /* %R3 = guest_NRADDR */ \
394
+ "or 2,2,2" \
395
+ : "=r" (__addr) \
396
+ : \
397
+ : "cc", "memory" \
398
+ ); \
399
+ _zzq_orig->nraddr = __addr; \
400
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
401
+ /* %R3 = guest_NRADDR_GPR2 */ \
402
+ "or 4,4,4" \
403
+ : "=r" (__addr) \
404
+ : \
405
+ : "cc", "memory" \
406
+ ); \
407
+ _zzq_orig->r2 = __addr; \
408
+ }
409
+
410
+ #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
411
+ __SPECIAL_INSTRUCTION_PREAMBLE \
412
+ /* branch-and-link-to-noredir *%R11 */ \
413
+ "or 3,3,3\n\t"
414
+
415
+ #endif /* PLAT_ppc64_linux */
416
+
417
+ /* ------------------------- arm-linux ------------------------- */
418
+
419
+ #if defined(PLAT_arm_linux)
420
+
421
+ typedef
422
+ struct {
423
+ unsigned int nraddr; /* where's the code? */
424
+ }
425
+ OrigFn;
426
+
427
+ #define __SPECIAL_INSTRUCTION_PREAMBLE \
428
+ "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
429
+ "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
430
+
431
+ #define VALGRIND_DO_CLIENT_REQUEST( \
432
+ _zzq_rlval, _zzq_default, _zzq_request, \
433
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
434
+ \
435
+ { volatile unsigned int _zzq_args[6]; \
436
+ volatile unsigned int _zzq_result; \
437
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
438
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
439
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
440
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
441
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
442
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
443
+ __asm__ volatile("mov r3, %1\n\t" /*default*/ \
444
+ "mov r4, %2\n\t" /*ptr*/ \
445
+ __SPECIAL_INSTRUCTION_PREAMBLE \
446
+ /* R3 = client_request ( R4 ) */ \
447
+ "orr r10, r10, r10\n\t" \
448
+ "mov %0, r3" /*result*/ \
449
+ : "=r" (_zzq_result) \
450
+ : "r" (_zzq_default), "r" (&_zzq_args[0]) \
451
+ : "cc","memory", "r3", "r4"); \
452
+ _zzq_rlval = _zzq_result; \
453
+ }
454
+
455
+ #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
456
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
457
+ unsigned int __addr; \
458
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
459
+ /* R3 = guest_NRADDR */ \
460
+ "orr r11, r11, r11\n\t" \
461
+ "mov %0, r3" \
462
+ : "=r" (__addr) \
463
+ : \
464
+ : "cc", "memory", "r3" \
465
+ ); \
466
+ _zzq_orig->nraddr = __addr; \
467
+ }
468
+
469
+ #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
470
+ __SPECIAL_INSTRUCTION_PREAMBLE \
471
+ /* branch-and-link-to-noredir *%R4 */ \
472
+ "orr r12, r12, r12\n\t"
473
+
474
+ #endif /* PLAT_arm_linux */
475
+
476
+ /* ------------------------ ppc32-aix5 ------------------------- */
477
+
478
+ #if defined(PLAT_ppc32_aix5)
479
+
480
+ typedef
481
+ struct {
482
+ unsigned int nraddr; /* where's the code? */
483
+ unsigned int r2; /* what tocptr do we need? */
484
+ }
485
+ OrigFn;
486
+
487
+ #define __SPECIAL_INSTRUCTION_PREAMBLE \
488
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
489
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
490
+
491
+ #define VALGRIND_DO_CLIENT_REQUEST( \
492
+ _zzq_rlval, _zzq_default, _zzq_request, \
493
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
494
+ \
495
+ { unsigned int _zzq_args[7]; \
496
+ register unsigned int _zzq_result; \
497
+ register unsigned int* _zzq_ptr; \
498
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
499
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
500
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
501
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
502
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
503
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
504
+ _zzq_args[6] = (unsigned int)(_zzq_default); \
505
+ _zzq_ptr = _zzq_args; \
506
+ __asm__ volatile("mr 4,%1\n\t" \
507
+ "lwz 3, 24(4)\n\t" \
508
+ __SPECIAL_INSTRUCTION_PREAMBLE \
509
+ /* %R3 = client_request ( %R4 ) */ \
510
+ "or 1,1,1\n\t" \
511
+ "mr %0,3" \
512
+ : "=b" (_zzq_result) \
513
+ : "b" (_zzq_ptr) \
514
+ : "r3", "r4", "cc", "memory"); \
515
+ _zzq_rlval = _zzq_result; \
516
+ }
517
+
518
+ #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
519
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
520
+ register unsigned int __addr; \
521
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
522
+ /* %R3 = guest_NRADDR */ \
523
+ "or 2,2,2\n\t" \
524
+ "mr %0,3" \
525
+ : "=b" (__addr) \
526
+ : \
527
+ : "r3", "cc", "memory" \
528
+ ); \
529
+ _zzq_orig->nraddr = __addr; \
530
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
531
+ /* %R3 = guest_NRADDR_GPR2 */ \
532
+ "or 4,4,4\n\t" \
533
+ "mr %0,3" \
534
+ : "=b" (__addr) \
535
+ : \
536
+ : "r3", "cc", "memory" \
537
+ ); \
538
+ _zzq_orig->r2 = __addr; \
539
+ }
540
+
541
+ #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
542
+ __SPECIAL_INSTRUCTION_PREAMBLE \
543
+ /* branch-and-link-to-noredir *%R11 */ \
544
+ "or 3,3,3\n\t"
545
+
546
+ #endif /* PLAT_ppc32_aix5 */
547
+
548
+ /* ------------------------ ppc64-aix5 ------------------------- */
549
+
550
+ #if defined(PLAT_ppc64_aix5)
551
+
552
+ typedef
553
+ struct {
554
+ unsigned long long int nraddr; /* where's the code? */
555
+ unsigned long long int r2; /* what tocptr do we need? */
556
+ }
557
+ OrigFn;
558
+
559
+ #define __SPECIAL_INSTRUCTION_PREAMBLE \
560
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
561
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
562
+
563
+ #define VALGRIND_DO_CLIENT_REQUEST( \
564
+ _zzq_rlval, _zzq_default, _zzq_request, \
565
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
566
+ \
567
+ { unsigned long long int _zzq_args[7]; \
568
+ register unsigned long long int _zzq_result; \
569
+ register unsigned long long int* _zzq_ptr; \
570
+ _zzq_args[0] = (unsigned int long long)(_zzq_request); \
571
+ _zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
572
+ _zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
573
+ _zzq_args[3] = (unsigned int long long)(_zzq_arg3); \
574
+ _zzq_args[4] = (unsigned int long long)(_zzq_arg4); \
575
+ _zzq_args[5] = (unsigned int long long)(_zzq_arg5); \
576
+ _zzq_args[6] = (unsigned int long long)(_zzq_default); \
577
+ _zzq_ptr = _zzq_args; \
578
+ __asm__ volatile("mr 4,%1\n\t" \
579
+ "ld 3, 48(4)\n\t" \
580
+ __SPECIAL_INSTRUCTION_PREAMBLE \
581
+ /* %R3 = client_request ( %R4 ) */ \
582
+ "or 1,1,1\n\t" \
583
+ "mr %0,3" \
584
+ : "=b" (_zzq_result) \
585
+ : "b" (_zzq_ptr) \
586
+ : "r3", "r4", "cc", "memory"); \
587
+ _zzq_rlval = _zzq_result; \
588
+ }
589
+
590
+ #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
591
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
592
+ register unsigned long long int __addr; \
593
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
594
+ /* %R3 = guest_NRADDR */ \
595
+ "or 2,2,2\n\t" \
596
+ "mr %0,3" \
597
+ : "=b" (__addr) \
598
+ : \
599
+ : "r3", "cc", "memory" \
600
+ ); \
601
+ _zzq_orig->nraddr = __addr; \
602
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
603
+ /* %R3 = guest_NRADDR_GPR2 */ \
604
+ "or 4,4,4\n\t" \
605
+ "mr %0,3" \
606
+ : "=b" (__addr) \
607
+ : \
608
+ : "r3", "cc", "memory" \
609
+ ); \
610
+ _zzq_orig->r2 = __addr; \
611
+ }
612
+
613
+ #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
614
+ __SPECIAL_INSTRUCTION_PREAMBLE \
615
+ /* branch-and-link-to-noredir *%R11 */ \
616
+ "or 3,3,3\n\t"
617
+
618
+ #endif /* PLAT_ppc64_aix5 */
619
+
620
+ /* Insert assembly code for other platforms here... */
621
+
622
+ #endif /* NVALGRIND */
623
+
624
+
625
+ /* ------------------------------------------------------------------ */
626
+ /* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
627
+ /* ugly. It's the least-worst tradeoff I can think of. */
628
+ /* ------------------------------------------------------------------ */
629
+
630
+ /* This section defines magic (a.k.a appalling-hack) macros for doing
631
+ guaranteed-no-redirection macros, so as to get from function
632
+ wrappers to the functions they are wrapping. The whole point is to
633
+ construct standard call sequences, but to do the call itself with a
634
+ special no-redirect call pseudo-instruction that the JIT
635
+ understands and handles specially. This section is long and
636
+ repetitious, and I can't see a way to make it shorter.
637
+
638
+ The naming scheme is as follows:
639
+
640
+ CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
641
+
642
+ 'W' stands for "word" and 'v' for "void". Hence there are
643
+ different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
644
+ and for each, the possibility of returning a word-typed result, or
645
+ no result.
646
+ */
647
+
648
+ /* Use these to write the name of your wrapper. NOTE: duplicates
649
+ VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
650
+
651
+ /* Use an extra level of macroisation so as to ensure the soname/fnname
652
+ args are fully macro-expanded before pasting them together. */
653
+ #define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
654
+
655
+ #define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
656
+ VG_CONCAT4(_vgwZU_,soname,_,fnname)
657
+
658
+ #define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
659
+ VG_CONCAT4(_vgwZZ_,soname,_,fnname)
660
+
661
+ /* Use this macro from within a wrapper function to collect the
662
+ context (address and possibly other info) of the original function.
663
+ Once you have that you can then use it in one of the CALL_FN_
664
+ macros. The type of the argument _lval is OrigFn. */
665
+ #define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
666
+
667
+ /* Derivatives of the main macros below, for calling functions
668
+ returning void. */
669
+
670
+ #define CALL_FN_v_v(fnptr) \
671
+ do { volatile unsigned long _junk; \
672
+ CALL_FN_W_v(_junk,fnptr); } while (0)
673
+
674
+ #define CALL_FN_v_W(fnptr, arg1) \
675
+ do { volatile unsigned long _junk; \
676
+ CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
677
+
678
+ #define CALL_FN_v_WW(fnptr, arg1,arg2) \
679
+ do { volatile unsigned long _junk; \
680
+ CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
681
+
682
+ #define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
683
+ do { volatile unsigned long _junk; \
684
+ CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
685
+
686
+ #define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
687
+ do { volatile unsigned long _junk; \
688
+ CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
689
+
690
+ #define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
691
+ do { volatile unsigned long _junk; \
692
+ CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
693
+
694
+ #define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
695
+ do { volatile unsigned long _junk; \
696
+ CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
697
+
698
+ #define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
699
+ do { volatile unsigned long _junk; \
700
+ CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
701
+
702
+ /* ------------------------- x86-{linux,darwin} ---------------- */
703
+
704
+ #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin)
705
+
706
+ /* These regs are trashed by the hidden call. No need to mention eax
707
+ as gcc can already see that, plus causes gcc to bomb. */
708
+ #define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
709
+
710
+ /* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
711
+ long) == 4. */
712
+
713
+ #define CALL_FN_W_v(lval, orig) \
714
+ do { \
715
+ volatile OrigFn _orig = (orig); \
716
+ volatile unsigned long _argvec[1]; \
717
+ volatile unsigned long _res; \
718
+ _argvec[0] = (unsigned long)_orig.nraddr; \
719
+ __asm__ volatile( \
720
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
721
+ VALGRIND_CALL_NOREDIR_EAX \
722
+ : /*out*/ "=a" (_res) \
723
+ : /*in*/ "a" (&_argvec[0]) \
724
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
725
+ ); \
726
+ lval = (__typeof__(lval)) _res; \
727
+ } while (0)
728
+
729
+ #define CALL_FN_W_W(lval, orig, arg1) \
730
+ do { \
731
+ volatile OrigFn _orig = (orig); \
732
+ volatile unsigned long _argvec[2]; \
733
+ volatile unsigned long _res; \
734
+ _argvec[0] = (unsigned long)_orig.nraddr; \
735
+ _argvec[1] = (unsigned long)(arg1); \
736
+ __asm__ volatile( \
737
+ "pushl 4(%%eax)\n\t" \
738
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
739
+ VALGRIND_CALL_NOREDIR_EAX \
740
+ "addl $4, %%esp\n" \
741
+ : /*out*/ "=a" (_res) \
742
+ : /*in*/ "a" (&_argvec[0]) \
743
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
744
+ ); \
745
+ lval = (__typeof__(lval)) _res; \
746
+ } while (0)
747
+
748
+ #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
749
+ do { \
750
+ volatile OrigFn _orig = (orig); \
751
+ volatile unsigned long _argvec[3]; \
752
+ volatile unsigned long _res; \
753
+ _argvec[0] = (unsigned long)_orig.nraddr; \
754
+ _argvec[1] = (unsigned long)(arg1); \
755
+ _argvec[2] = (unsigned long)(arg2); \
756
+ __asm__ volatile( \
757
+ "pushl 8(%%eax)\n\t" \
758
+ "pushl 4(%%eax)\n\t" \
759
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
760
+ VALGRIND_CALL_NOREDIR_EAX \
761
+ "addl $8, %%esp\n" \
762
+ : /*out*/ "=a" (_res) \
763
+ : /*in*/ "a" (&_argvec[0]) \
764
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
765
+ ); \
766
+ lval = (__typeof__(lval)) _res; \
767
+ } while (0)
768
+
769
+ #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
770
+ do { \
771
+ volatile OrigFn _orig = (orig); \
772
+ volatile unsigned long _argvec[4]; \
773
+ volatile unsigned long _res; \
774
+ _argvec[0] = (unsigned long)_orig.nraddr; \
775
+ _argvec[1] = (unsigned long)(arg1); \
776
+ _argvec[2] = (unsigned long)(arg2); \
777
+ _argvec[3] = (unsigned long)(arg3); \
778
+ __asm__ volatile( \
779
+ "pushl 12(%%eax)\n\t" \
780
+ "pushl 8(%%eax)\n\t" \
781
+ "pushl 4(%%eax)\n\t" \
782
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
783
+ VALGRIND_CALL_NOREDIR_EAX \
784
+ "addl $12, %%esp\n" \
785
+ : /*out*/ "=a" (_res) \
786
+ : /*in*/ "a" (&_argvec[0]) \
787
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
788
+ ); \
789
+ lval = (__typeof__(lval)) _res; \
790
+ } while (0)
791
+
792
+ #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
793
+ do { \
794
+ volatile OrigFn _orig = (orig); \
795
+ volatile unsigned long _argvec[5]; \
796
+ volatile unsigned long _res; \
797
+ _argvec[0] = (unsigned long)_orig.nraddr; \
798
+ _argvec[1] = (unsigned long)(arg1); \
799
+ _argvec[2] = (unsigned long)(arg2); \
800
+ _argvec[3] = (unsigned long)(arg3); \
801
+ _argvec[4] = (unsigned long)(arg4); \
802
+ __asm__ volatile( \
803
+ "pushl 16(%%eax)\n\t" \
804
+ "pushl 12(%%eax)\n\t" \
805
+ "pushl 8(%%eax)\n\t" \
806
+ "pushl 4(%%eax)\n\t" \
807
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
808
+ VALGRIND_CALL_NOREDIR_EAX \
809
+ "addl $16, %%esp\n" \
810
+ : /*out*/ "=a" (_res) \
811
+ : /*in*/ "a" (&_argvec[0]) \
812
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
813
+ ); \
814
+ lval = (__typeof__(lval)) _res; \
815
+ } while (0)
816
+
817
+ #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
818
+ do { \
819
+ volatile OrigFn _orig = (orig); \
820
+ volatile unsigned long _argvec[6]; \
821
+ volatile unsigned long _res; \
822
+ _argvec[0] = (unsigned long)_orig.nraddr; \
823
+ _argvec[1] = (unsigned long)(arg1); \
824
+ _argvec[2] = (unsigned long)(arg2); \
825
+ _argvec[3] = (unsigned long)(arg3); \
826
+ _argvec[4] = (unsigned long)(arg4); \
827
+ _argvec[5] = (unsigned long)(arg5); \
828
+ __asm__ volatile( \
829
+ "pushl 20(%%eax)\n\t" \
830
+ "pushl 16(%%eax)\n\t" \
831
+ "pushl 12(%%eax)\n\t" \
832
+ "pushl 8(%%eax)\n\t" \
833
+ "pushl 4(%%eax)\n\t" \
834
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
835
+ VALGRIND_CALL_NOREDIR_EAX \
836
+ "addl $20, %%esp\n" \
837
+ : /*out*/ "=a" (_res) \
838
+ : /*in*/ "a" (&_argvec[0]) \
839
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
840
+ ); \
841
+ lval = (__typeof__(lval)) _res; \
842
+ } while (0)
843
+
844
+ #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
845
+ do { \
846
+ volatile OrigFn _orig = (orig); \
847
+ volatile unsigned long _argvec[7]; \
848
+ volatile unsigned long _res; \
849
+ _argvec[0] = (unsigned long)_orig.nraddr; \
850
+ _argvec[1] = (unsigned long)(arg1); \
851
+ _argvec[2] = (unsigned long)(arg2); \
852
+ _argvec[3] = (unsigned long)(arg3); \
853
+ _argvec[4] = (unsigned long)(arg4); \
854
+ _argvec[5] = (unsigned long)(arg5); \
855
+ _argvec[6] = (unsigned long)(arg6); \
856
+ __asm__ volatile( \
857
+ "pushl 24(%%eax)\n\t" \
858
+ "pushl 20(%%eax)\n\t" \
859
+ "pushl 16(%%eax)\n\t" \
860
+ "pushl 12(%%eax)\n\t" \
861
+ "pushl 8(%%eax)\n\t" \
862
+ "pushl 4(%%eax)\n\t" \
863
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
864
+ VALGRIND_CALL_NOREDIR_EAX \
865
+ "addl $24, %%esp\n" \
866
+ : /*out*/ "=a" (_res) \
867
+ : /*in*/ "a" (&_argvec[0]) \
868
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
869
+ ); \
870
+ lval = (__typeof__(lval)) _res; \
871
+ } while (0)
872
+
873
+ #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
874
+ arg7) \
875
+ do { \
876
+ volatile OrigFn _orig = (orig); \
877
+ volatile unsigned long _argvec[8]; \
878
+ volatile unsigned long _res; \
879
+ _argvec[0] = (unsigned long)_orig.nraddr; \
880
+ _argvec[1] = (unsigned long)(arg1); \
881
+ _argvec[2] = (unsigned long)(arg2); \
882
+ _argvec[3] = (unsigned long)(arg3); \
883
+ _argvec[4] = (unsigned long)(arg4); \
884
+ _argvec[5] = (unsigned long)(arg5); \
885
+ _argvec[6] = (unsigned long)(arg6); \
886
+ _argvec[7] = (unsigned long)(arg7); \
887
+ __asm__ volatile( \
888
+ "pushl 28(%%eax)\n\t" \
889
+ "pushl 24(%%eax)\n\t" \
890
+ "pushl 20(%%eax)\n\t" \
891
+ "pushl 16(%%eax)\n\t" \
892
+ "pushl 12(%%eax)\n\t" \
893
+ "pushl 8(%%eax)\n\t" \
894
+ "pushl 4(%%eax)\n\t" \
895
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
896
+ VALGRIND_CALL_NOREDIR_EAX \
897
+ "addl $28, %%esp\n" \
898
+ : /*out*/ "=a" (_res) \
899
+ : /*in*/ "a" (&_argvec[0]) \
900
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
901
+ ); \
902
+ lval = (__typeof__(lval)) _res; \
903
+ } while (0)
904
+
905
+ #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
906
+ arg7,arg8) \
907
+ do { \
908
+ volatile OrigFn _orig = (orig); \
909
+ volatile unsigned long _argvec[9]; \
910
+ volatile unsigned long _res; \
911
+ _argvec[0] = (unsigned long)_orig.nraddr; \
912
+ _argvec[1] = (unsigned long)(arg1); \
913
+ _argvec[2] = (unsigned long)(arg2); \
914
+ _argvec[3] = (unsigned long)(arg3); \
915
+ _argvec[4] = (unsigned long)(arg4); \
916
+ _argvec[5] = (unsigned long)(arg5); \
917
+ _argvec[6] = (unsigned long)(arg6); \
918
+ _argvec[7] = (unsigned long)(arg7); \
919
+ _argvec[8] = (unsigned long)(arg8); \
920
+ __asm__ volatile( \
921
+ "pushl 32(%%eax)\n\t" \
922
+ "pushl 28(%%eax)\n\t" \
923
+ "pushl 24(%%eax)\n\t" \
924
+ "pushl 20(%%eax)\n\t" \
925
+ "pushl 16(%%eax)\n\t" \
926
+ "pushl 12(%%eax)\n\t" \
927
+ "pushl 8(%%eax)\n\t" \
928
+ "pushl 4(%%eax)\n\t" \
929
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
930
+ VALGRIND_CALL_NOREDIR_EAX \
931
+ "addl $32, %%esp\n" \
932
+ : /*out*/ "=a" (_res) \
933
+ : /*in*/ "a" (&_argvec[0]) \
934
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
935
+ ); \
936
+ lval = (__typeof__(lval)) _res; \
937
+ } while (0)
938
+
939
+ #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
940
+ arg7,arg8,arg9) \
941
+ do { \
942
+ volatile OrigFn _orig = (orig); \
943
+ volatile unsigned long _argvec[10]; \
944
+ volatile unsigned long _res; \
945
+ _argvec[0] = (unsigned long)_orig.nraddr; \
946
+ _argvec[1] = (unsigned long)(arg1); \
947
+ _argvec[2] = (unsigned long)(arg2); \
948
+ _argvec[3] = (unsigned long)(arg3); \
949
+ _argvec[4] = (unsigned long)(arg4); \
950
+ _argvec[5] = (unsigned long)(arg5); \
951
+ _argvec[6] = (unsigned long)(arg6); \
952
+ _argvec[7] = (unsigned long)(arg7); \
953
+ _argvec[8] = (unsigned long)(arg8); \
954
+ _argvec[9] = (unsigned long)(arg9); \
955
+ __asm__ volatile( \
956
+ "pushl 36(%%eax)\n\t" \
957
+ "pushl 32(%%eax)\n\t" \
958
+ "pushl 28(%%eax)\n\t" \
959
+ "pushl 24(%%eax)\n\t" \
960
+ "pushl 20(%%eax)\n\t" \
961
+ "pushl 16(%%eax)\n\t" \
962
+ "pushl 12(%%eax)\n\t" \
963
+ "pushl 8(%%eax)\n\t" \
964
+ "pushl 4(%%eax)\n\t" \
965
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
966
+ VALGRIND_CALL_NOREDIR_EAX \
967
+ "addl $36, %%esp\n" \
968
+ : /*out*/ "=a" (_res) \
969
+ : /*in*/ "a" (&_argvec[0]) \
970
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
971
+ ); \
972
+ lval = (__typeof__(lval)) _res; \
973
+ } while (0)
974
+
975
+ #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
976
+ arg7,arg8,arg9,arg10) \
977
+ do { \
978
+ volatile OrigFn _orig = (orig); \
979
+ volatile unsigned long _argvec[11]; \
980
+ volatile unsigned long _res; \
981
+ _argvec[0] = (unsigned long)_orig.nraddr; \
982
+ _argvec[1] = (unsigned long)(arg1); \
983
+ _argvec[2] = (unsigned long)(arg2); \
984
+ _argvec[3] = (unsigned long)(arg3); \
985
+ _argvec[4] = (unsigned long)(arg4); \
986
+ _argvec[5] = (unsigned long)(arg5); \
987
+ _argvec[6] = (unsigned long)(arg6); \
988
+ _argvec[7] = (unsigned long)(arg7); \
989
+ _argvec[8] = (unsigned long)(arg8); \
990
+ _argvec[9] = (unsigned long)(arg9); \
991
+ _argvec[10] = (unsigned long)(arg10); \
992
+ __asm__ volatile( \
993
+ "pushl 40(%%eax)\n\t" \
994
+ "pushl 36(%%eax)\n\t" \
995
+ "pushl 32(%%eax)\n\t" \
996
+ "pushl 28(%%eax)\n\t" \
997
+ "pushl 24(%%eax)\n\t" \
998
+ "pushl 20(%%eax)\n\t" \
999
+ "pushl 16(%%eax)\n\t" \
1000
+ "pushl 12(%%eax)\n\t" \
1001
+ "pushl 8(%%eax)\n\t" \
1002
+ "pushl 4(%%eax)\n\t" \
1003
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1004
+ VALGRIND_CALL_NOREDIR_EAX \
1005
+ "addl $40, %%esp\n" \
1006
+ : /*out*/ "=a" (_res) \
1007
+ : /*in*/ "a" (&_argvec[0]) \
1008
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1009
+ ); \
1010
+ lval = (__typeof__(lval)) _res; \
1011
+ } while (0)
1012
+
1013
+ #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
1014
+ arg6,arg7,arg8,arg9,arg10, \
1015
+ arg11) \
1016
+ do { \
1017
+ volatile OrigFn _orig = (orig); \
1018
+ volatile unsigned long _argvec[12]; \
1019
+ volatile unsigned long _res; \
1020
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1021
+ _argvec[1] = (unsigned long)(arg1); \
1022
+ _argvec[2] = (unsigned long)(arg2); \
1023
+ _argvec[3] = (unsigned long)(arg3); \
1024
+ _argvec[4] = (unsigned long)(arg4); \
1025
+ _argvec[5] = (unsigned long)(arg5); \
1026
+ _argvec[6] = (unsigned long)(arg6); \
1027
+ _argvec[7] = (unsigned long)(arg7); \
1028
+ _argvec[8] = (unsigned long)(arg8); \
1029
+ _argvec[9] = (unsigned long)(arg9); \
1030
+ _argvec[10] = (unsigned long)(arg10); \
1031
+ _argvec[11] = (unsigned long)(arg11); \
1032
+ __asm__ volatile( \
1033
+ "pushl 44(%%eax)\n\t" \
1034
+ "pushl 40(%%eax)\n\t" \
1035
+ "pushl 36(%%eax)\n\t" \
1036
+ "pushl 32(%%eax)\n\t" \
1037
+ "pushl 28(%%eax)\n\t" \
1038
+ "pushl 24(%%eax)\n\t" \
1039
+ "pushl 20(%%eax)\n\t" \
1040
+ "pushl 16(%%eax)\n\t" \
1041
+ "pushl 12(%%eax)\n\t" \
1042
+ "pushl 8(%%eax)\n\t" \
1043
+ "pushl 4(%%eax)\n\t" \
1044
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1045
+ VALGRIND_CALL_NOREDIR_EAX \
1046
+ "addl $44, %%esp\n" \
1047
+ : /*out*/ "=a" (_res) \
1048
+ : /*in*/ "a" (&_argvec[0]) \
1049
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1050
+ ); \
1051
+ lval = (__typeof__(lval)) _res; \
1052
+ } while (0)
1053
+
1054
+ #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
1055
+ arg6,arg7,arg8,arg9,arg10, \
1056
+ arg11,arg12) \
1057
+ do { \
1058
+ volatile OrigFn _orig = (orig); \
1059
+ volatile unsigned long _argvec[13]; \
1060
+ volatile unsigned long _res; \
1061
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1062
+ _argvec[1] = (unsigned long)(arg1); \
1063
+ _argvec[2] = (unsigned long)(arg2); \
1064
+ _argvec[3] = (unsigned long)(arg3); \
1065
+ _argvec[4] = (unsigned long)(arg4); \
1066
+ _argvec[5] = (unsigned long)(arg5); \
1067
+ _argvec[6] = (unsigned long)(arg6); \
1068
+ _argvec[7] = (unsigned long)(arg7); \
1069
+ _argvec[8] = (unsigned long)(arg8); \
1070
+ _argvec[9] = (unsigned long)(arg9); \
1071
+ _argvec[10] = (unsigned long)(arg10); \
1072
+ _argvec[11] = (unsigned long)(arg11); \
1073
+ _argvec[12] = (unsigned long)(arg12); \
1074
+ __asm__ volatile( \
1075
+ "pushl 48(%%eax)\n\t" \
1076
+ "pushl 44(%%eax)\n\t" \
1077
+ "pushl 40(%%eax)\n\t" \
1078
+ "pushl 36(%%eax)\n\t" \
1079
+ "pushl 32(%%eax)\n\t" \
1080
+ "pushl 28(%%eax)\n\t" \
1081
+ "pushl 24(%%eax)\n\t" \
1082
+ "pushl 20(%%eax)\n\t" \
1083
+ "pushl 16(%%eax)\n\t" \
1084
+ "pushl 12(%%eax)\n\t" \
1085
+ "pushl 8(%%eax)\n\t" \
1086
+ "pushl 4(%%eax)\n\t" \
1087
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1088
+ VALGRIND_CALL_NOREDIR_EAX \
1089
+ "addl $48, %%esp\n" \
1090
+ : /*out*/ "=a" (_res) \
1091
+ : /*in*/ "a" (&_argvec[0]) \
1092
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1093
+ ); \
1094
+ lval = (__typeof__(lval)) _res; \
1095
+ } while (0)
1096
+
1097
+ #endif /* PLAT_x86_linux || PLAT_x86_darwin */
1098
+
1099
+ /* ------------------------ amd64-{linux,darwin} --------------- */
1100
+
1101
+ #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
1102
+
1103
+ /* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
1104
+
1105
+ /* These regs are trashed by the hidden call. */
1106
+ #define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
1107
+ "rdi", "r8", "r9", "r10", "r11"
1108
+
1109
+ /* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
1110
+ long) == 8. */
1111
+
1112
+ /* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
1113
+ macros. In order not to trash the stack redzone, we need to drop
1114
+ %rsp by 128 before the hidden call, and restore afterwards. The
1115
+ nastyness is that it is only by luck that the stack still appears
1116
+ to be unwindable during the hidden call - since then the behaviour
1117
+ of any routine using this macro does not match what the CFI data
1118
+ says. Sigh.
1119
+
1120
+ Why is this important? Imagine that a wrapper has a stack
1121
+ allocated local, and passes to the hidden call, a pointer to it.
1122
+ Because gcc does not know about the hidden call, it may allocate
1123
+ that local in the redzone. Unfortunately the hidden call may then
1124
+ trash it before it comes to use it. So we must step clear of the
1125
+ redzone, for the duration of the hidden call, to make it safe.
1126
+
1127
+ Probably the same problem afflicts the other redzone-style ABIs too
1128
+ (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
1129
+ self describing (none of this CFI nonsense) so at least messing
1130
+ with the stack pointer doesn't give a danger of non-unwindable
1131
+ stack. */
1132
+
1133
+ #define CALL_FN_W_v(lval, orig) \
1134
+ do { \
1135
+ volatile OrigFn _orig = (orig); \
1136
+ volatile unsigned long _argvec[1]; \
1137
+ volatile unsigned long _res; \
1138
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1139
+ __asm__ volatile( \
1140
+ "subq $128,%%rsp\n\t" \
1141
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1142
+ VALGRIND_CALL_NOREDIR_RAX \
1143
+ "addq $128,%%rsp\n\t" \
1144
+ : /*out*/ "=a" (_res) \
1145
+ : /*in*/ "a" (&_argvec[0]) \
1146
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1147
+ ); \
1148
+ lval = (__typeof__(lval)) _res; \
1149
+ } while (0)
1150
+
1151
+ #define CALL_FN_W_W(lval, orig, arg1) \
1152
+ do { \
1153
+ volatile OrigFn _orig = (orig); \
1154
+ volatile unsigned long _argvec[2]; \
1155
+ volatile unsigned long _res; \
1156
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1157
+ _argvec[1] = (unsigned long)(arg1); \
1158
+ __asm__ volatile( \
1159
+ "subq $128,%%rsp\n\t" \
1160
+ "movq 8(%%rax), %%rdi\n\t" \
1161
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1162
+ VALGRIND_CALL_NOREDIR_RAX \
1163
+ "addq $128,%%rsp\n\t" \
1164
+ : /*out*/ "=a" (_res) \
1165
+ : /*in*/ "a" (&_argvec[0]) \
1166
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1167
+ ); \
1168
+ lval = (__typeof__(lval)) _res; \
1169
+ } while (0)
1170
+
1171
+ #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
1172
+ do { \
1173
+ volatile OrigFn _orig = (orig); \
1174
+ volatile unsigned long _argvec[3]; \
1175
+ volatile unsigned long _res; \
1176
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1177
+ _argvec[1] = (unsigned long)(arg1); \
1178
+ _argvec[2] = (unsigned long)(arg2); \
1179
+ __asm__ volatile( \
1180
+ "subq $128,%%rsp\n\t" \
1181
+ "movq 16(%%rax), %%rsi\n\t" \
1182
+ "movq 8(%%rax), %%rdi\n\t" \
1183
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1184
+ VALGRIND_CALL_NOREDIR_RAX \
1185
+ "addq $128,%%rsp\n\t" \
1186
+ : /*out*/ "=a" (_res) \
1187
+ : /*in*/ "a" (&_argvec[0]) \
1188
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1189
+ ); \
1190
+ lval = (__typeof__(lval)) _res; \
1191
+ } while (0)
1192
+
1193
+ #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
1194
+ do { \
1195
+ volatile OrigFn _orig = (orig); \
1196
+ volatile unsigned long _argvec[4]; \
1197
+ volatile unsigned long _res; \
1198
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1199
+ _argvec[1] = (unsigned long)(arg1); \
1200
+ _argvec[2] = (unsigned long)(arg2); \
1201
+ _argvec[3] = (unsigned long)(arg3); \
1202
+ __asm__ volatile( \
1203
+ "subq $128,%%rsp\n\t" \
1204
+ "movq 24(%%rax), %%rdx\n\t" \
1205
+ "movq 16(%%rax), %%rsi\n\t" \
1206
+ "movq 8(%%rax), %%rdi\n\t" \
1207
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1208
+ VALGRIND_CALL_NOREDIR_RAX \
1209
+ "addq $128,%%rsp\n\t" \
1210
+ : /*out*/ "=a" (_res) \
1211
+ : /*in*/ "a" (&_argvec[0]) \
1212
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1213
+ ); \
1214
+ lval = (__typeof__(lval)) _res; \
1215
+ } while (0)
1216
+
1217
+ #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
1218
+ do { \
1219
+ volatile OrigFn _orig = (orig); \
1220
+ volatile unsigned long _argvec[5]; \
1221
+ volatile unsigned long _res; \
1222
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1223
+ _argvec[1] = (unsigned long)(arg1); \
1224
+ _argvec[2] = (unsigned long)(arg2); \
1225
+ _argvec[3] = (unsigned long)(arg3); \
1226
+ _argvec[4] = (unsigned long)(arg4); \
1227
+ __asm__ volatile( \
1228
+ "subq $128,%%rsp\n\t" \
1229
+ "movq 32(%%rax), %%rcx\n\t" \
1230
+ "movq 24(%%rax), %%rdx\n\t" \
1231
+ "movq 16(%%rax), %%rsi\n\t" \
1232
+ "movq 8(%%rax), %%rdi\n\t" \
1233
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1234
+ VALGRIND_CALL_NOREDIR_RAX \
1235
+ "addq $128,%%rsp\n\t" \
1236
+ : /*out*/ "=a" (_res) \
1237
+ : /*in*/ "a" (&_argvec[0]) \
1238
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1239
+ ); \
1240
+ lval = (__typeof__(lval)) _res; \
1241
+ } while (0)
1242
+
1243
+ #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
1244
+ do { \
1245
+ volatile OrigFn _orig = (orig); \
1246
+ volatile unsigned long _argvec[6]; \
1247
+ volatile unsigned long _res; \
1248
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1249
+ _argvec[1] = (unsigned long)(arg1); \
1250
+ _argvec[2] = (unsigned long)(arg2); \
1251
+ _argvec[3] = (unsigned long)(arg3); \
1252
+ _argvec[4] = (unsigned long)(arg4); \
1253
+ _argvec[5] = (unsigned long)(arg5); \
1254
+ __asm__ volatile( \
1255
+ "subq $128,%%rsp\n\t" \
1256
+ "movq 40(%%rax), %%r8\n\t" \
1257
+ "movq 32(%%rax), %%rcx\n\t" \
1258
+ "movq 24(%%rax), %%rdx\n\t" \
1259
+ "movq 16(%%rax), %%rsi\n\t" \
1260
+ "movq 8(%%rax), %%rdi\n\t" \
1261
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1262
+ VALGRIND_CALL_NOREDIR_RAX \
1263
+ "addq $128,%%rsp\n\t" \
1264
+ : /*out*/ "=a" (_res) \
1265
+ : /*in*/ "a" (&_argvec[0]) \
1266
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1267
+ ); \
1268
+ lval = (__typeof__(lval)) _res; \
1269
+ } while (0)
1270
+
1271
+ #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
1272
+ do { \
1273
+ volatile OrigFn _orig = (orig); \
1274
+ volatile unsigned long _argvec[7]; \
1275
+ volatile unsigned long _res; \
1276
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1277
+ _argvec[1] = (unsigned long)(arg1); \
1278
+ _argvec[2] = (unsigned long)(arg2); \
1279
+ _argvec[3] = (unsigned long)(arg3); \
1280
+ _argvec[4] = (unsigned long)(arg4); \
1281
+ _argvec[5] = (unsigned long)(arg5); \
1282
+ _argvec[6] = (unsigned long)(arg6); \
1283
+ __asm__ volatile( \
1284
+ "subq $128,%%rsp\n\t" \
1285
+ "movq 48(%%rax), %%r9\n\t" \
1286
+ "movq 40(%%rax), %%r8\n\t" \
1287
+ "movq 32(%%rax), %%rcx\n\t" \
1288
+ "movq 24(%%rax), %%rdx\n\t" \
1289
+ "movq 16(%%rax), %%rsi\n\t" \
1290
+ "movq 8(%%rax), %%rdi\n\t" \
1291
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1292
+ "addq $128,%%rsp\n\t" \
1293
+ VALGRIND_CALL_NOREDIR_RAX \
1294
+ : /*out*/ "=a" (_res) \
1295
+ : /*in*/ "a" (&_argvec[0]) \
1296
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1297
+ ); \
1298
+ lval = (__typeof__(lval)) _res; \
1299
+ } while (0)
1300
+
1301
+ #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1302
+ arg7) \
1303
+ do { \
1304
+ volatile OrigFn _orig = (orig); \
1305
+ volatile unsigned long _argvec[8]; \
1306
+ volatile unsigned long _res; \
1307
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1308
+ _argvec[1] = (unsigned long)(arg1); \
1309
+ _argvec[2] = (unsigned long)(arg2); \
1310
+ _argvec[3] = (unsigned long)(arg3); \
1311
+ _argvec[4] = (unsigned long)(arg4); \
1312
+ _argvec[5] = (unsigned long)(arg5); \
1313
+ _argvec[6] = (unsigned long)(arg6); \
1314
+ _argvec[7] = (unsigned long)(arg7); \
1315
+ __asm__ volatile( \
1316
+ "subq $128,%%rsp\n\t" \
1317
+ "pushq 56(%%rax)\n\t" \
1318
+ "movq 48(%%rax), %%r9\n\t" \
1319
+ "movq 40(%%rax), %%r8\n\t" \
1320
+ "movq 32(%%rax), %%rcx\n\t" \
1321
+ "movq 24(%%rax), %%rdx\n\t" \
1322
+ "movq 16(%%rax), %%rsi\n\t" \
1323
+ "movq 8(%%rax), %%rdi\n\t" \
1324
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1325
+ VALGRIND_CALL_NOREDIR_RAX \
1326
+ "addq $8, %%rsp\n" \
1327
+ "addq $128,%%rsp\n\t" \
1328
+ : /*out*/ "=a" (_res) \
1329
+ : /*in*/ "a" (&_argvec[0]) \
1330
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1331
+ ); \
1332
+ lval = (__typeof__(lval)) _res; \
1333
+ } while (0)
1334
+
1335
+ #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1336
+ arg7,arg8) \
1337
+ do { \
1338
+ volatile OrigFn _orig = (orig); \
1339
+ volatile unsigned long _argvec[9]; \
1340
+ volatile unsigned long _res; \
1341
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1342
+ _argvec[1] = (unsigned long)(arg1); \
1343
+ _argvec[2] = (unsigned long)(arg2); \
1344
+ _argvec[3] = (unsigned long)(arg3); \
1345
+ _argvec[4] = (unsigned long)(arg4); \
1346
+ _argvec[5] = (unsigned long)(arg5); \
1347
+ _argvec[6] = (unsigned long)(arg6); \
1348
+ _argvec[7] = (unsigned long)(arg7); \
1349
+ _argvec[8] = (unsigned long)(arg8); \
1350
+ __asm__ volatile( \
1351
+ "subq $128,%%rsp\n\t" \
1352
+ "pushq 64(%%rax)\n\t" \
1353
+ "pushq 56(%%rax)\n\t" \
1354
+ "movq 48(%%rax), %%r9\n\t" \
1355
+ "movq 40(%%rax), %%r8\n\t" \
1356
+ "movq 32(%%rax), %%rcx\n\t" \
1357
+ "movq 24(%%rax), %%rdx\n\t" \
1358
+ "movq 16(%%rax), %%rsi\n\t" \
1359
+ "movq 8(%%rax), %%rdi\n\t" \
1360
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1361
+ VALGRIND_CALL_NOREDIR_RAX \
1362
+ "addq $16, %%rsp\n" \
1363
+ "addq $128,%%rsp\n\t" \
1364
+ : /*out*/ "=a" (_res) \
1365
+ : /*in*/ "a" (&_argvec[0]) \
1366
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1367
+ ); \
1368
+ lval = (__typeof__(lval)) _res; \
1369
+ } while (0)
1370
+
1371
+ #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1372
+ arg7,arg8,arg9) \
1373
+ do { \
1374
+ volatile OrigFn _orig = (orig); \
1375
+ volatile unsigned long _argvec[10]; \
1376
+ volatile unsigned long _res; \
1377
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1378
+ _argvec[1] = (unsigned long)(arg1); \
1379
+ _argvec[2] = (unsigned long)(arg2); \
1380
+ _argvec[3] = (unsigned long)(arg3); \
1381
+ _argvec[4] = (unsigned long)(arg4); \
1382
+ _argvec[5] = (unsigned long)(arg5); \
1383
+ _argvec[6] = (unsigned long)(arg6); \
1384
+ _argvec[7] = (unsigned long)(arg7); \
1385
+ _argvec[8] = (unsigned long)(arg8); \
1386
+ _argvec[9] = (unsigned long)(arg9); \
1387
+ __asm__ volatile( \
1388
+ "subq $128,%%rsp\n\t" \
1389
+ "pushq 72(%%rax)\n\t" \
1390
+ "pushq 64(%%rax)\n\t" \
1391
+ "pushq 56(%%rax)\n\t" \
1392
+ "movq 48(%%rax), %%r9\n\t" \
1393
+ "movq 40(%%rax), %%r8\n\t" \
1394
+ "movq 32(%%rax), %%rcx\n\t" \
1395
+ "movq 24(%%rax), %%rdx\n\t" \
1396
+ "movq 16(%%rax), %%rsi\n\t" \
1397
+ "movq 8(%%rax), %%rdi\n\t" \
1398
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1399
+ VALGRIND_CALL_NOREDIR_RAX \
1400
+ "addq $24, %%rsp\n" \
1401
+ "addq $128,%%rsp\n\t" \
1402
+ : /*out*/ "=a" (_res) \
1403
+ : /*in*/ "a" (&_argvec[0]) \
1404
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1405
+ ); \
1406
+ lval = (__typeof__(lval)) _res; \
1407
+ } while (0)
1408
+
1409
+ #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1410
+ arg7,arg8,arg9,arg10) \
1411
+ do { \
1412
+ volatile OrigFn _orig = (orig); \
1413
+ volatile unsigned long _argvec[11]; \
1414
+ volatile unsigned long _res; \
1415
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1416
+ _argvec[1] = (unsigned long)(arg1); \
1417
+ _argvec[2] = (unsigned long)(arg2); \
1418
+ _argvec[3] = (unsigned long)(arg3); \
1419
+ _argvec[4] = (unsigned long)(arg4); \
1420
+ _argvec[5] = (unsigned long)(arg5); \
1421
+ _argvec[6] = (unsigned long)(arg6); \
1422
+ _argvec[7] = (unsigned long)(arg7); \
1423
+ _argvec[8] = (unsigned long)(arg8); \
1424
+ _argvec[9] = (unsigned long)(arg9); \
1425
+ _argvec[10] = (unsigned long)(arg10); \
1426
+ __asm__ volatile( \
1427
+ "subq $128,%%rsp\n\t" \
1428
+ "pushq 80(%%rax)\n\t" \
1429
+ "pushq 72(%%rax)\n\t" \
1430
+ "pushq 64(%%rax)\n\t" \
1431
+ "pushq 56(%%rax)\n\t" \
1432
+ "movq 48(%%rax), %%r9\n\t" \
1433
+ "movq 40(%%rax), %%r8\n\t" \
1434
+ "movq 32(%%rax), %%rcx\n\t" \
1435
+ "movq 24(%%rax), %%rdx\n\t" \
1436
+ "movq 16(%%rax), %%rsi\n\t" \
1437
+ "movq 8(%%rax), %%rdi\n\t" \
1438
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1439
+ VALGRIND_CALL_NOREDIR_RAX \
1440
+ "addq $32, %%rsp\n" \
1441
+ "addq $128,%%rsp\n\t" \
1442
+ : /*out*/ "=a" (_res) \
1443
+ : /*in*/ "a" (&_argvec[0]) \
1444
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1445
+ ); \
1446
+ lval = (__typeof__(lval)) _res; \
1447
+ } while (0)
1448
+
1449
+ #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1450
+ arg7,arg8,arg9,arg10,arg11) \
1451
+ do { \
1452
+ volatile OrigFn _orig = (orig); \
1453
+ volatile unsigned long _argvec[12]; \
1454
+ volatile unsigned long _res; \
1455
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1456
+ _argvec[1] = (unsigned long)(arg1); \
1457
+ _argvec[2] = (unsigned long)(arg2); \
1458
+ _argvec[3] = (unsigned long)(arg3); \
1459
+ _argvec[4] = (unsigned long)(arg4); \
1460
+ _argvec[5] = (unsigned long)(arg5); \
1461
+ _argvec[6] = (unsigned long)(arg6); \
1462
+ _argvec[7] = (unsigned long)(arg7); \
1463
+ _argvec[8] = (unsigned long)(arg8); \
1464
+ _argvec[9] = (unsigned long)(arg9); \
1465
+ _argvec[10] = (unsigned long)(arg10); \
1466
+ _argvec[11] = (unsigned long)(arg11); \
1467
+ __asm__ volatile( \
1468
+ "subq $128,%%rsp\n\t" \
1469
+ "pushq 88(%%rax)\n\t" \
1470
+ "pushq 80(%%rax)\n\t" \
1471
+ "pushq 72(%%rax)\n\t" \
1472
+ "pushq 64(%%rax)\n\t" \
1473
+ "pushq 56(%%rax)\n\t" \
1474
+ "movq 48(%%rax), %%r9\n\t" \
1475
+ "movq 40(%%rax), %%r8\n\t" \
1476
+ "movq 32(%%rax), %%rcx\n\t" \
1477
+ "movq 24(%%rax), %%rdx\n\t" \
1478
+ "movq 16(%%rax), %%rsi\n\t" \
1479
+ "movq 8(%%rax), %%rdi\n\t" \
1480
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1481
+ VALGRIND_CALL_NOREDIR_RAX \
1482
+ "addq $40, %%rsp\n" \
1483
+ "addq $128,%%rsp\n\t" \
1484
+ : /*out*/ "=a" (_res) \
1485
+ : /*in*/ "a" (&_argvec[0]) \
1486
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1487
+ ); \
1488
+ lval = (__typeof__(lval)) _res; \
1489
+ } while (0)
1490
+
1491
+ #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1492
+ arg7,arg8,arg9,arg10,arg11,arg12) \
1493
+ do { \
1494
+ volatile OrigFn _orig = (orig); \
1495
+ volatile unsigned long _argvec[13]; \
1496
+ volatile unsigned long _res; \
1497
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1498
+ _argvec[1] = (unsigned long)(arg1); \
1499
+ _argvec[2] = (unsigned long)(arg2); \
1500
+ _argvec[3] = (unsigned long)(arg3); \
1501
+ _argvec[4] = (unsigned long)(arg4); \
1502
+ _argvec[5] = (unsigned long)(arg5); \
1503
+ _argvec[6] = (unsigned long)(arg6); \
1504
+ _argvec[7] = (unsigned long)(arg7); \
1505
+ _argvec[8] = (unsigned long)(arg8); \
1506
+ _argvec[9] = (unsigned long)(arg9); \
1507
+ _argvec[10] = (unsigned long)(arg10); \
1508
+ _argvec[11] = (unsigned long)(arg11); \
1509
+ _argvec[12] = (unsigned long)(arg12); \
1510
+ __asm__ volatile( \
1511
+ "subq $128,%%rsp\n\t" \
1512
+ "pushq 96(%%rax)\n\t" \
1513
+ "pushq 88(%%rax)\n\t" \
1514
+ "pushq 80(%%rax)\n\t" \
1515
+ "pushq 72(%%rax)\n\t" \
1516
+ "pushq 64(%%rax)\n\t" \
1517
+ "pushq 56(%%rax)\n\t" \
1518
+ "movq 48(%%rax), %%r9\n\t" \
1519
+ "movq 40(%%rax), %%r8\n\t" \
1520
+ "movq 32(%%rax), %%rcx\n\t" \
1521
+ "movq 24(%%rax), %%rdx\n\t" \
1522
+ "movq 16(%%rax), %%rsi\n\t" \
1523
+ "movq 8(%%rax), %%rdi\n\t" \
1524
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1525
+ VALGRIND_CALL_NOREDIR_RAX \
1526
+ "addq $48, %%rsp\n" \
1527
+ "addq $128,%%rsp\n\t" \
1528
+ : /*out*/ "=a" (_res) \
1529
+ : /*in*/ "a" (&_argvec[0]) \
1530
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1531
+ ); \
1532
+ lval = (__typeof__(lval)) _res; \
1533
+ } while (0)
1534
+
1535
+ #endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
1536
+
1537
+ /* ------------------------ ppc32-linux ------------------------ */
1538
+
1539
+ #if defined(PLAT_ppc32_linux)
1540
+
1541
+ /* This is useful for finding out about the on-stack stuff:
1542
+
1543
+ extern int f9 ( int,int,int,int,int,int,int,int,int );
1544
+ extern int f10 ( int,int,int,int,int,int,int,int,int,int );
1545
+ extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
1546
+ extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
1547
+
1548
+ int g9 ( void ) {
1549
+ return f9(11,22,33,44,55,66,77,88,99);
1550
+ }
1551
+ int g10 ( void ) {
1552
+ return f10(11,22,33,44,55,66,77,88,99,110);
1553
+ }
1554
+ int g11 ( void ) {
1555
+ return f11(11,22,33,44,55,66,77,88,99,110,121);
1556
+ }
1557
+ int g12 ( void ) {
1558
+ return f12(11,22,33,44,55,66,77,88,99,110,121,132);
1559
+ }
1560
+ */
1561
+
1562
+ /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
1563
+
1564
+ /* These regs are trashed by the hidden call. */
1565
+ #define __CALLER_SAVED_REGS \
1566
+ "lr", "ctr", "xer", \
1567
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
1568
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
1569
+ "r11", "r12", "r13"
1570
+
1571
+ /* These CALL_FN_ macros assume that on ppc32-linux,
1572
+ sizeof(unsigned long) == 4. */
1573
+
1574
+ #define CALL_FN_W_v(lval, orig) \
1575
+ do { \
1576
+ volatile OrigFn _orig = (orig); \
1577
+ volatile unsigned long _argvec[1]; \
1578
+ volatile unsigned long _res; \
1579
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1580
+ __asm__ volatile( \
1581
+ "mr 11,%1\n\t" \
1582
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1583
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1584
+ "mr %0,3" \
1585
+ : /*out*/ "=r" (_res) \
1586
+ : /*in*/ "r" (&_argvec[0]) \
1587
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1588
+ ); \
1589
+ lval = (__typeof__(lval)) _res; \
1590
+ } while (0)
1591
+
1592
+ #define CALL_FN_W_W(lval, orig, arg1) \
1593
+ do { \
1594
+ volatile OrigFn _orig = (orig); \
1595
+ volatile unsigned long _argvec[2]; \
1596
+ volatile unsigned long _res; \
1597
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1598
+ _argvec[1] = (unsigned long)arg1; \
1599
+ __asm__ volatile( \
1600
+ "mr 11,%1\n\t" \
1601
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1602
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1603
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1604
+ "mr %0,3" \
1605
+ : /*out*/ "=r" (_res) \
1606
+ : /*in*/ "r" (&_argvec[0]) \
1607
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1608
+ ); \
1609
+ lval = (__typeof__(lval)) _res; \
1610
+ } while (0)
1611
+
1612
+ #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
1613
+ do { \
1614
+ volatile OrigFn _orig = (orig); \
1615
+ volatile unsigned long _argvec[3]; \
1616
+ volatile unsigned long _res; \
1617
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1618
+ _argvec[1] = (unsigned long)arg1; \
1619
+ _argvec[2] = (unsigned long)arg2; \
1620
+ __asm__ volatile( \
1621
+ "mr 11,%1\n\t" \
1622
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1623
+ "lwz 4,8(11)\n\t" \
1624
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1625
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1626
+ "mr %0,3" \
1627
+ : /*out*/ "=r" (_res) \
1628
+ : /*in*/ "r" (&_argvec[0]) \
1629
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1630
+ ); \
1631
+ lval = (__typeof__(lval)) _res; \
1632
+ } while (0)
1633
+
1634
+ #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
1635
+ do { \
1636
+ volatile OrigFn _orig = (orig); \
1637
+ volatile unsigned long _argvec[4]; \
1638
+ volatile unsigned long _res; \
1639
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1640
+ _argvec[1] = (unsigned long)arg1; \
1641
+ _argvec[2] = (unsigned long)arg2; \
1642
+ _argvec[3] = (unsigned long)arg3; \
1643
+ __asm__ volatile( \
1644
+ "mr 11,%1\n\t" \
1645
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1646
+ "lwz 4,8(11)\n\t" \
1647
+ "lwz 5,12(11)\n\t" \
1648
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1649
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1650
+ "mr %0,3" \
1651
+ : /*out*/ "=r" (_res) \
1652
+ : /*in*/ "r" (&_argvec[0]) \
1653
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1654
+ ); \
1655
+ lval = (__typeof__(lval)) _res; \
1656
+ } while (0)
1657
+
1658
+ #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
1659
+ do { \
1660
+ volatile OrigFn _orig = (orig); \
1661
+ volatile unsigned long _argvec[5]; \
1662
+ volatile unsigned long _res; \
1663
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1664
+ _argvec[1] = (unsigned long)arg1; \
1665
+ _argvec[2] = (unsigned long)arg2; \
1666
+ _argvec[3] = (unsigned long)arg3; \
1667
+ _argvec[4] = (unsigned long)arg4; \
1668
+ __asm__ volatile( \
1669
+ "mr 11,%1\n\t" \
1670
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1671
+ "lwz 4,8(11)\n\t" \
1672
+ "lwz 5,12(11)\n\t" \
1673
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1674
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1675
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1676
+ "mr %0,3" \
1677
+ : /*out*/ "=r" (_res) \
1678
+ : /*in*/ "r" (&_argvec[0]) \
1679
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1680
+ ); \
1681
+ lval = (__typeof__(lval)) _res; \
1682
+ } while (0)
1683
+
1684
+ #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
1685
+ do { \
1686
+ volatile OrigFn _orig = (orig); \
1687
+ volatile unsigned long _argvec[6]; \
1688
+ volatile unsigned long _res; \
1689
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1690
+ _argvec[1] = (unsigned long)arg1; \
1691
+ _argvec[2] = (unsigned long)arg2; \
1692
+ _argvec[3] = (unsigned long)arg3; \
1693
+ _argvec[4] = (unsigned long)arg4; \
1694
+ _argvec[5] = (unsigned long)arg5; \
1695
+ __asm__ volatile( \
1696
+ "mr 11,%1\n\t" \
1697
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1698
+ "lwz 4,8(11)\n\t" \
1699
+ "lwz 5,12(11)\n\t" \
1700
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1701
+ "lwz 7,20(11)\n\t" \
1702
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1703
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1704
+ "mr %0,3" \
1705
+ : /*out*/ "=r" (_res) \
1706
+ : /*in*/ "r" (&_argvec[0]) \
1707
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1708
+ ); \
1709
+ lval = (__typeof__(lval)) _res; \
1710
+ } while (0)
1711
+
1712
+ #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
1713
+ do { \
1714
+ volatile OrigFn _orig = (orig); \
1715
+ volatile unsigned long _argvec[7]; \
1716
+ volatile unsigned long _res; \
1717
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1718
+ _argvec[1] = (unsigned long)arg1; \
1719
+ _argvec[2] = (unsigned long)arg2; \
1720
+ _argvec[3] = (unsigned long)arg3; \
1721
+ _argvec[4] = (unsigned long)arg4; \
1722
+ _argvec[5] = (unsigned long)arg5; \
1723
+ _argvec[6] = (unsigned long)arg6; \
1724
+ __asm__ volatile( \
1725
+ "mr 11,%1\n\t" \
1726
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1727
+ "lwz 4,8(11)\n\t" \
1728
+ "lwz 5,12(11)\n\t" \
1729
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1730
+ "lwz 7,20(11)\n\t" \
1731
+ "lwz 8,24(11)\n\t" \
1732
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1733
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1734
+ "mr %0,3" \
1735
+ : /*out*/ "=r" (_res) \
1736
+ : /*in*/ "r" (&_argvec[0]) \
1737
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1738
+ ); \
1739
+ lval = (__typeof__(lval)) _res; \
1740
+ } while (0)
1741
+
1742
+ #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1743
+ arg7) \
1744
+ do { \
1745
+ volatile OrigFn _orig = (orig); \
1746
+ volatile unsigned long _argvec[8]; \
1747
+ volatile unsigned long _res; \
1748
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1749
+ _argvec[1] = (unsigned long)arg1; \
1750
+ _argvec[2] = (unsigned long)arg2; \
1751
+ _argvec[3] = (unsigned long)arg3; \
1752
+ _argvec[4] = (unsigned long)arg4; \
1753
+ _argvec[5] = (unsigned long)arg5; \
1754
+ _argvec[6] = (unsigned long)arg6; \
1755
+ _argvec[7] = (unsigned long)arg7; \
1756
+ __asm__ volatile( \
1757
+ "mr 11,%1\n\t" \
1758
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1759
+ "lwz 4,8(11)\n\t" \
1760
+ "lwz 5,12(11)\n\t" \
1761
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1762
+ "lwz 7,20(11)\n\t" \
1763
+ "lwz 8,24(11)\n\t" \
1764
+ "lwz 9,28(11)\n\t" \
1765
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1766
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1767
+ "mr %0,3" \
1768
+ : /*out*/ "=r" (_res) \
1769
+ : /*in*/ "r" (&_argvec[0]) \
1770
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1771
+ ); \
1772
+ lval = (__typeof__(lval)) _res; \
1773
+ } while (0)
1774
+
1775
+ #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1776
+ arg7,arg8) \
1777
+ do { \
1778
+ volatile OrigFn _orig = (orig); \
1779
+ volatile unsigned long _argvec[9]; \
1780
+ volatile unsigned long _res; \
1781
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1782
+ _argvec[1] = (unsigned long)arg1; \
1783
+ _argvec[2] = (unsigned long)arg2; \
1784
+ _argvec[3] = (unsigned long)arg3; \
1785
+ _argvec[4] = (unsigned long)arg4; \
1786
+ _argvec[5] = (unsigned long)arg5; \
1787
+ _argvec[6] = (unsigned long)arg6; \
1788
+ _argvec[7] = (unsigned long)arg7; \
1789
+ _argvec[8] = (unsigned long)arg8; \
1790
+ __asm__ volatile( \
1791
+ "mr 11,%1\n\t" \
1792
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1793
+ "lwz 4,8(11)\n\t" \
1794
+ "lwz 5,12(11)\n\t" \
1795
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1796
+ "lwz 7,20(11)\n\t" \
1797
+ "lwz 8,24(11)\n\t" \
1798
+ "lwz 9,28(11)\n\t" \
1799
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1800
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1801
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1802
+ "mr %0,3" \
1803
+ : /*out*/ "=r" (_res) \
1804
+ : /*in*/ "r" (&_argvec[0]) \
1805
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1806
+ ); \
1807
+ lval = (__typeof__(lval)) _res; \
1808
+ } while (0)
1809
+
1810
+ #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1811
+ arg7,arg8,arg9) \
1812
+ do { \
1813
+ volatile OrigFn _orig = (orig); \
1814
+ volatile unsigned long _argvec[10]; \
1815
+ volatile unsigned long _res; \
1816
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1817
+ _argvec[1] = (unsigned long)arg1; \
1818
+ _argvec[2] = (unsigned long)arg2; \
1819
+ _argvec[3] = (unsigned long)arg3; \
1820
+ _argvec[4] = (unsigned long)arg4; \
1821
+ _argvec[5] = (unsigned long)arg5; \
1822
+ _argvec[6] = (unsigned long)arg6; \
1823
+ _argvec[7] = (unsigned long)arg7; \
1824
+ _argvec[8] = (unsigned long)arg8; \
1825
+ _argvec[9] = (unsigned long)arg9; \
1826
+ __asm__ volatile( \
1827
+ "mr 11,%1\n\t" \
1828
+ "addi 1,1,-16\n\t" \
1829
+ /* arg9 */ \
1830
+ "lwz 3,36(11)\n\t" \
1831
+ "stw 3,8(1)\n\t" \
1832
+ /* args1-8 */ \
1833
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1834
+ "lwz 4,8(11)\n\t" \
1835
+ "lwz 5,12(11)\n\t" \
1836
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1837
+ "lwz 7,20(11)\n\t" \
1838
+ "lwz 8,24(11)\n\t" \
1839
+ "lwz 9,28(11)\n\t" \
1840
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1841
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1842
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1843
+ "addi 1,1,16\n\t" \
1844
+ "mr %0,3" \
1845
+ : /*out*/ "=r" (_res) \
1846
+ : /*in*/ "r" (&_argvec[0]) \
1847
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1848
+ ); \
1849
+ lval = (__typeof__(lval)) _res; \
1850
+ } while (0)
1851
+
1852
+ #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1853
+ arg7,arg8,arg9,arg10) \
1854
+ do { \
1855
+ volatile OrigFn _orig = (orig); \
1856
+ volatile unsigned long _argvec[11]; \
1857
+ volatile unsigned long _res; \
1858
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1859
+ _argvec[1] = (unsigned long)arg1; \
1860
+ _argvec[2] = (unsigned long)arg2; \
1861
+ _argvec[3] = (unsigned long)arg3; \
1862
+ _argvec[4] = (unsigned long)arg4; \
1863
+ _argvec[5] = (unsigned long)arg5; \
1864
+ _argvec[6] = (unsigned long)arg6; \
1865
+ _argvec[7] = (unsigned long)arg7; \
1866
+ _argvec[8] = (unsigned long)arg8; \
1867
+ _argvec[9] = (unsigned long)arg9; \
1868
+ _argvec[10] = (unsigned long)arg10; \
1869
+ __asm__ volatile( \
1870
+ "mr 11,%1\n\t" \
1871
+ "addi 1,1,-16\n\t" \
1872
+ /* arg10 */ \
1873
+ "lwz 3,40(11)\n\t" \
1874
+ "stw 3,12(1)\n\t" \
1875
+ /* arg9 */ \
1876
+ "lwz 3,36(11)\n\t" \
1877
+ "stw 3,8(1)\n\t" \
1878
+ /* args1-8 */ \
1879
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1880
+ "lwz 4,8(11)\n\t" \
1881
+ "lwz 5,12(11)\n\t" \
1882
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1883
+ "lwz 7,20(11)\n\t" \
1884
+ "lwz 8,24(11)\n\t" \
1885
+ "lwz 9,28(11)\n\t" \
1886
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1887
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1888
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1889
+ "addi 1,1,16\n\t" \
1890
+ "mr %0,3" \
1891
+ : /*out*/ "=r" (_res) \
1892
+ : /*in*/ "r" (&_argvec[0]) \
1893
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1894
+ ); \
1895
+ lval = (__typeof__(lval)) _res; \
1896
+ } while (0)
1897
+
1898
+ #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1899
+ arg7,arg8,arg9,arg10,arg11) \
1900
+ do { \
1901
+ volatile OrigFn _orig = (orig); \
1902
+ volatile unsigned long _argvec[12]; \
1903
+ volatile unsigned long _res; \
1904
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1905
+ _argvec[1] = (unsigned long)arg1; \
1906
+ _argvec[2] = (unsigned long)arg2; \
1907
+ _argvec[3] = (unsigned long)arg3; \
1908
+ _argvec[4] = (unsigned long)arg4; \
1909
+ _argvec[5] = (unsigned long)arg5; \
1910
+ _argvec[6] = (unsigned long)arg6; \
1911
+ _argvec[7] = (unsigned long)arg7; \
1912
+ _argvec[8] = (unsigned long)arg8; \
1913
+ _argvec[9] = (unsigned long)arg9; \
1914
+ _argvec[10] = (unsigned long)arg10; \
1915
+ _argvec[11] = (unsigned long)arg11; \
1916
+ __asm__ volatile( \
1917
+ "mr 11,%1\n\t" \
1918
+ "addi 1,1,-32\n\t" \
1919
+ /* arg11 */ \
1920
+ "lwz 3,44(11)\n\t" \
1921
+ "stw 3,16(1)\n\t" \
1922
+ /* arg10 */ \
1923
+ "lwz 3,40(11)\n\t" \
1924
+ "stw 3,12(1)\n\t" \
1925
+ /* arg9 */ \
1926
+ "lwz 3,36(11)\n\t" \
1927
+ "stw 3,8(1)\n\t" \
1928
+ /* args1-8 */ \
1929
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1930
+ "lwz 4,8(11)\n\t" \
1931
+ "lwz 5,12(11)\n\t" \
1932
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1933
+ "lwz 7,20(11)\n\t" \
1934
+ "lwz 8,24(11)\n\t" \
1935
+ "lwz 9,28(11)\n\t" \
1936
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1937
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1938
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1939
+ "addi 1,1,32\n\t" \
1940
+ "mr %0,3" \
1941
+ : /*out*/ "=r" (_res) \
1942
+ : /*in*/ "r" (&_argvec[0]) \
1943
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1944
+ ); \
1945
+ lval = (__typeof__(lval)) _res; \
1946
+ } while (0)
1947
+
1948
+ #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
1949
+ arg7,arg8,arg9,arg10,arg11,arg12) \
1950
+ do { \
1951
+ volatile OrigFn _orig = (orig); \
1952
+ volatile unsigned long _argvec[13]; \
1953
+ volatile unsigned long _res; \
1954
+ _argvec[0] = (unsigned long)_orig.nraddr; \
1955
+ _argvec[1] = (unsigned long)arg1; \
1956
+ _argvec[2] = (unsigned long)arg2; \
1957
+ _argvec[3] = (unsigned long)arg3; \
1958
+ _argvec[4] = (unsigned long)arg4; \
1959
+ _argvec[5] = (unsigned long)arg5; \
1960
+ _argvec[6] = (unsigned long)arg6; \
1961
+ _argvec[7] = (unsigned long)arg7; \
1962
+ _argvec[8] = (unsigned long)arg8; \
1963
+ _argvec[9] = (unsigned long)arg9; \
1964
+ _argvec[10] = (unsigned long)arg10; \
1965
+ _argvec[11] = (unsigned long)arg11; \
1966
+ _argvec[12] = (unsigned long)arg12; \
1967
+ __asm__ volatile( \
1968
+ "mr 11,%1\n\t" \
1969
+ "addi 1,1,-32\n\t" \
1970
+ /* arg12 */ \
1971
+ "lwz 3,48(11)\n\t" \
1972
+ "stw 3,20(1)\n\t" \
1973
+ /* arg11 */ \
1974
+ "lwz 3,44(11)\n\t" \
1975
+ "stw 3,16(1)\n\t" \
1976
+ /* arg10 */ \
1977
+ "lwz 3,40(11)\n\t" \
1978
+ "stw 3,12(1)\n\t" \
1979
+ /* arg9 */ \
1980
+ "lwz 3,36(11)\n\t" \
1981
+ "stw 3,8(1)\n\t" \
1982
+ /* args1-8 */ \
1983
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1984
+ "lwz 4,8(11)\n\t" \
1985
+ "lwz 5,12(11)\n\t" \
1986
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1987
+ "lwz 7,20(11)\n\t" \
1988
+ "lwz 8,24(11)\n\t" \
1989
+ "lwz 9,28(11)\n\t" \
1990
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1991
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
1992
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
1993
+ "addi 1,1,32\n\t" \
1994
+ "mr %0,3" \
1995
+ : /*out*/ "=r" (_res) \
1996
+ : /*in*/ "r" (&_argvec[0]) \
1997
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
1998
+ ); \
1999
+ lval = (__typeof__(lval)) _res; \
2000
+ } while (0)
2001
+
2002
+ #endif /* PLAT_ppc32_linux */
2003
+
2004
+ /* ------------------------ ppc64-linux ------------------------ */
2005
+
2006
+ #if defined(PLAT_ppc64_linux)
2007
+
2008
+ /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
2009
+
2010
+ /* These regs are trashed by the hidden call. */
2011
+ #define __CALLER_SAVED_REGS \
2012
+ "lr", "ctr", "xer", \
2013
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
2014
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
2015
+ "r11", "r12", "r13"
2016
+
2017
+ /* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
2018
+ long) == 8. */
2019
+
2020
+ #define CALL_FN_W_v(lval, orig) \
2021
+ do { \
2022
+ volatile OrigFn _orig = (orig); \
2023
+ volatile unsigned long _argvec[3+0]; \
2024
+ volatile unsigned long _res; \
2025
+ /* _argvec[0] holds current r2 across the call */ \
2026
+ _argvec[1] = (unsigned long)_orig.r2; \
2027
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2028
+ __asm__ volatile( \
2029
+ "mr 11,%1\n\t" \
2030
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2031
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2032
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2033
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2034
+ "mr 11,%1\n\t" \
2035
+ "mr %0,3\n\t" \
2036
+ "ld 2,-16(11)" /* restore tocptr */ \
2037
+ : /*out*/ "=r" (_res) \
2038
+ : /*in*/ "r" (&_argvec[2]) \
2039
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2040
+ ); \
2041
+ lval = (__typeof__(lval)) _res; \
2042
+ } while (0)
2043
+
2044
+ #define CALL_FN_W_W(lval, orig, arg1) \
2045
+ do { \
2046
+ volatile OrigFn _orig = (orig); \
2047
+ volatile unsigned long _argvec[3+1]; \
2048
+ volatile unsigned long _res; \
2049
+ /* _argvec[0] holds current r2 across the call */ \
2050
+ _argvec[1] = (unsigned long)_orig.r2; \
2051
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2052
+ _argvec[2+1] = (unsigned long)arg1; \
2053
+ __asm__ volatile( \
2054
+ "mr 11,%1\n\t" \
2055
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2056
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2057
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2058
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2059
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2060
+ "mr 11,%1\n\t" \
2061
+ "mr %0,3\n\t" \
2062
+ "ld 2,-16(11)" /* restore tocptr */ \
2063
+ : /*out*/ "=r" (_res) \
2064
+ : /*in*/ "r" (&_argvec[2]) \
2065
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2066
+ ); \
2067
+ lval = (__typeof__(lval)) _res; \
2068
+ } while (0)
2069
+
2070
+ #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
2071
+ do { \
2072
+ volatile OrigFn _orig = (orig); \
2073
+ volatile unsigned long _argvec[3+2]; \
2074
+ volatile unsigned long _res; \
2075
+ /* _argvec[0] holds current r2 across the call */ \
2076
+ _argvec[1] = (unsigned long)_orig.r2; \
2077
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2078
+ _argvec[2+1] = (unsigned long)arg1; \
2079
+ _argvec[2+2] = (unsigned long)arg2; \
2080
+ __asm__ volatile( \
2081
+ "mr 11,%1\n\t" \
2082
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2083
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2084
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2085
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2086
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2087
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2088
+ "mr 11,%1\n\t" \
2089
+ "mr %0,3\n\t" \
2090
+ "ld 2,-16(11)" /* restore tocptr */ \
2091
+ : /*out*/ "=r" (_res) \
2092
+ : /*in*/ "r" (&_argvec[2]) \
2093
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2094
+ ); \
2095
+ lval = (__typeof__(lval)) _res; \
2096
+ } while (0)
2097
+
2098
+ #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
2099
+ do { \
2100
+ volatile OrigFn _orig = (orig); \
2101
+ volatile unsigned long _argvec[3+3]; \
2102
+ volatile unsigned long _res; \
2103
+ /* _argvec[0] holds current r2 across the call */ \
2104
+ _argvec[1] = (unsigned long)_orig.r2; \
2105
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2106
+ _argvec[2+1] = (unsigned long)arg1; \
2107
+ _argvec[2+2] = (unsigned long)arg2; \
2108
+ _argvec[2+3] = (unsigned long)arg3; \
2109
+ __asm__ volatile( \
2110
+ "mr 11,%1\n\t" \
2111
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2112
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2113
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2114
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2115
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2116
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2117
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2118
+ "mr 11,%1\n\t" \
2119
+ "mr %0,3\n\t" \
2120
+ "ld 2,-16(11)" /* restore tocptr */ \
2121
+ : /*out*/ "=r" (_res) \
2122
+ : /*in*/ "r" (&_argvec[2]) \
2123
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2124
+ ); \
2125
+ lval = (__typeof__(lval)) _res; \
2126
+ } while (0)
2127
+
2128
+ #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
2129
+ do { \
2130
+ volatile OrigFn _orig = (orig); \
2131
+ volatile unsigned long _argvec[3+4]; \
2132
+ volatile unsigned long _res; \
2133
+ /* _argvec[0] holds current r2 across the call */ \
2134
+ _argvec[1] = (unsigned long)_orig.r2; \
2135
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2136
+ _argvec[2+1] = (unsigned long)arg1; \
2137
+ _argvec[2+2] = (unsigned long)arg2; \
2138
+ _argvec[2+3] = (unsigned long)arg3; \
2139
+ _argvec[2+4] = (unsigned long)arg4; \
2140
+ __asm__ volatile( \
2141
+ "mr 11,%1\n\t" \
2142
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2143
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2144
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2145
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2146
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2147
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2148
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2149
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2150
+ "mr 11,%1\n\t" \
2151
+ "mr %0,3\n\t" \
2152
+ "ld 2,-16(11)" /* restore tocptr */ \
2153
+ : /*out*/ "=r" (_res) \
2154
+ : /*in*/ "r" (&_argvec[2]) \
2155
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2156
+ ); \
2157
+ lval = (__typeof__(lval)) _res; \
2158
+ } while (0)
2159
+
2160
+ #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
2161
+ do { \
2162
+ volatile OrigFn _orig = (orig); \
2163
+ volatile unsigned long _argvec[3+5]; \
2164
+ volatile unsigned long _res; \
2165
+ /* _argvec[0] holds current r2 across the call */ \
2166
+ _argvec[1] = (unsigned long)_orig.r2; \
2167
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2168
+ _argvec[2+1] = (unsigned long)arg1; \
2169
+ _argvec[2+2] = (unsigned long)arg2; \
2170
+ _argvec[2+3] = (unsigned long)arg3; \
2171
+ _argvec[2+4] = (unsigned long)arg4; \
2172
+ _argvec[2+5] = (unsigned long)arg5; \
2173
+ __asm__ volatile( \
2174
+ "mr 11,%1\n\t" \
2175
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2176
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2177
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2178
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2179
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2180
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2181
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2182
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2183
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2184
+ "mr 11,%1\n\t" \
2185
+ "mr %0,3\n\t" \
2186
+ "ld 2,-16(11)" /* restore tocptr */ \
2187
+ : /*out*/ "=r" (_res) \
2188
+ : /*in*/ "r" (&_argvec[2]) \
2189
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2190
+ ); \
2191
+ lval = (__typeof__(lval)) _res; \
2192
+ } while (0)
2193
+
2194
+ #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
2195
+ do { \
2196
+ volatile OrigFn _orig = (orig); \
2197
+ volatile unsigned long _argvec[3+6]; \
2198
+ volatile unsigned long _res; \
2199
+ /* _argvec[0] holds current r2 across the call */ \
2200
+ _argvec[1] = (unsigned long)_orig.r2; \
2201
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2202
+ _argvec[2+1] = (unsigned long)arg1; \
2203
+ _argvec[2+2] = (unsigned long)arg2; \
2204
+ _argvec[2+3] = (unsigned long)arg3; \
2205
+ _argvec[2+4] = (unsigned long)arg4; \
2206
+ _argvec[2+5] = (unsigned long)arg5; \
2207
+ _argvec[2+6] = (unsigned long)arg6; \
2208
+ __asm__ volatile( \
2209
+ "mr 11,%1\n\t" \
2210
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2211
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2212
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2213
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2214
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2215
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2216
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2217
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2218
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2219
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2220
+ "mr 11,%1\n\t" \
2221
+ "mr %0,3\n\t" \
2222
+ "ld 2,-16(11)" /* restore tocptr */ \
2223
+ : /*out*/ "=r" (_res) \
2224
+ : /*in*/ "r" (&_argvec[2]) \
2225
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2226
+ ); \
2227
+ lval = (__typeof__(lval)) _res; \
2228
+ } while (0)
2229
+
2230
+ #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2231
+ arg7) \
2232
+ do { \
2233
+ volatile OrigFn _orig = (orig); \
2234
+ volatile unsigned long _argvec[3+7]; \
2235
+ volatile unsigned long _res; \
2236
+ /* _argvec[0] holds current r2 across the call */ \
2237
+ _argvec[1] = (unsigned long)_orig.r2; \
2238
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2239
+ _argvec[2+1] = (unsigned long)arg1; \
2240
+ _argvec[2+2] = (unsigned long)arg2; \
2241
+ _argvec[2+3] = (unsigned long)arg3; \
2242
+ _argvec[2+4] = (unsigned long)arg4; \
2243
+ _argvec[2+5] = (unsigned long)arg5; \
2244
+ _argvec[2+6] = (unsigned long)arg6; \
2245
+ _argvec[2+7] = (unsigned long)arg7; \
2246
+ __asm__ volatile( \
2247
+ "mr 11,%1\n\t" \
2248
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2249
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2250
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2251
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2252
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2253
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2254
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2255
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2256
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2257
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2258
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2259
+ "mr 11,%1\n\t" \
2260
+ "mr %0,3\n\t" \
2261
+ "ld 2,-16(11)" /* restore tocptr */ \
2262
+ : /*out*/ "=r" (_res) \
2263
+ : /*in*/ "r" (&_argvec[2]) \
2264
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2265
+ ); \
2266
+ lval = (__typeof__(lval)) _res; \
2267
+ } while (0)
2268
+
2269
+ #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2270
+ arg7,arg8) \
2271
+ do { \
2272
+ volatile OrigFn _orig = (orig); \
2273
+ volatile unsigned long _argvec[3+8]; \
2274
+ volatile unsigned long _res; \
2275
+ /* _argvec[0] holds current r2 across the call */ \
2276
+ _argvec[1] = (unsigned long)_orig.r2; \
2277
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2278
+ _argvec[2+1] = (unsigned long)arg1; \
2279
+ _argvec[2+2] = (unsigned long)arg2; \
2280
+ _argvec[2+3] = (unsigned long)arg3; \
2281
+ _argvec[2+4] = (unsigned long)arg4; \
2282
+ _argvec[2+5] = (unsigned long)arg5; \
2283
+ _argvec[2+6] = (unsigned long)arg6; \
2284
+ _argvec[2+7] = (unsigned long)arg7; \
2285
+ _argvec[2+8] = (unsigned long)arg8; \
2286
+ __asm__ volatile( \
2287
+ "mr 11,%1\n\t" \
2288
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2289
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2290
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2291
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2292
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2293
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2294
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2295
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2296
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2297
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2298
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2299
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2300
+ "mr 11,%1\n\t" \
2301
+ "mr %0,3\n\t" \
2302
+ "ld 2,-16(11)" /* restore tocptr */ \
2303
+ : /*out*/ "=r" (_res) \
2304
+ : /*in*/ "r" (&_argvec[2]) \
2305
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2306
+ ); \
2307
+ lval = (__typeof__(lval)) _res; \
2308
+ } while (0)
2309
+
2310
+ #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2311
+ arg7,arg8,arg9) \
2312
+ do { \
2313
+ volatile OrigFn _orig = (orig); \
2314
+ volatile unsigned long _argvec[3+9]; \
2315
+ volatile unsigned long _res; \
2316
+ /* _argvec[0] holds current r2 across the call */ \
2317
+ _argvec[1] = (unsigned long)_orig.r2; \
2318
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2319
+ _argvec[2+1] = (unsigned long)arg1; \
2320
+ _argvec[2+2] = (unsigned long)arg2; \
2321
+ _argvec[2+3] = (unsigned long)arg3; \
2322
+ _argvec[2+4] = (unsigned long)arg4; \
2323
+ _argvec[2+5] = (unsigned long)arg5; \
2324
+ _argvec[2+6] = (unsigned long)arg6; \
2325
+ _argvec[2+7] = (unsigned long)arg7; \
2326
+ _argvec[2+8] = (unsigned long)arg8; \
2327
+ _argvec[2+9] = (unsigned long)arg9; \
2328
+ __asm__ volatile( \
2329
+ "mr 11,%1\n\t" \
2330
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2331
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2332
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
2333
+ /* arg9 */ \
2334
+ "ld 3,72(11)\n\t" \
2335
+ "std 3,112(1)\n\t" \
2336
+ /* args1-8 */ \
2337
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2338
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2339
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2340
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2341
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2342
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2343
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2344
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2345
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2346
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2347
+ "mr 11,%1\n\t" \
2348
+ "mr %0,3\n\t" \
2349
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
2350
+ "addi 1,1,128" /* restore frame */ \
2351
+ : /*out*/ "=r" (_res) \
2352
+ : /*in*/ "r" (&_argvec[2]) \
2353
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2354
+ ); \
2355
+ lval = (__typeof__(lval)) _res; \
2356
+ } while (0)
2357
+
2358
+ #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2359
+ arg7,arg8,arg9,arg10) \
2360
+ do { \
2361
+ volatile OrigFn _orig = (orig); \
2362
+ volatile unsigned long _argvec[3+10]; \
2363
+ volatile unsigned long _res; \
2364
+ /* _argvec[0] holds current r2 across the call */ \
2365
+ _argvec[1] = (unsigned long)_orig.r2; \
2366
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2367
+ _argvec[2+1] = (unsigned long)arg1; \
2368
+ _argvec[2+2] = (unsigned long)arg2; \
2369
+ _argvec[2+3] = (unsigned long)arg3; \
2370
+ _argvec[2+4] = (unsigned long)arg4; \
2371
+ _argvec[2+5] = (unsigned long)arg5; \
2372
+ _argvec[2+6] = (unsigned long)arg6; \
2373
+ _argvec[2+7] = (unsigned long)arg7; \
2374
+ _argvec[2+8] = (unsigned long)arg8; \
2375
+ _argvec[2+9] = (unsigned long)arg9; \
2376
+ _argvec[2+10] = (unsigned long)arg10; \
2377
+ __asm__ volatile( \
2378
+ "mr 11,%1\n\t" \
2379
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2380
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2381
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
2382
+ /* arg10 */ \
2383
+ "ld 3,80(11)\n\t" \
2384
+ "std 3,120(1)\n\t" \
2385
+ /* arg9 */ \
2386
+ "ld 3,72(11)\n\t" \
2387
+ "std 3,112(1)\n\t" \
2388
+ /* args1-8 */ \
2389
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2390
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2391
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2392
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2393
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2394
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2395
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2396
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2397
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2398
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2399
+ "mr 11,%1\n\t" \
2400
+ "mr %0,3\n\t" \
2401
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
2402
+ "addi 1,1,128" /* restore frame */ \
2403
+ : /*out*/ "=r" (_res) \
2404
+ : /*in*/ "r" (&_argvec[2]) \
2405
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2406
+ ); \
2407
+ lval = (__typeof__(lval)) _res; \
2408
+ } while (0)
2409
+
2410
+ #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2411
+ arg7,arg8,arg9,arg10,arg11) \
2412
+ do { \
2413
+ volatile OrigFn _orig = (orig); \
2414
+ volatile unsigned long _argvec[3+11]; \
2415
+ volatile unsigned long _res; \
2416
+ /* _argvec[0] holds current r2 across the call */ \
2417
+ _argvec[1] = (unsigned long)_orig.r2; \
2418
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2419
+ _argvec[2+1] = (unsigned long)arg1; \
2420
+ _argvec[2+2] = (unsigned long)arg2; \
2421
+ _argvec[2+3] = (unsigned long)arg3; \
2422
+ _argvec[2+4] = (unsigned long)arg4; \
2423
+ _argvec[2+5] = (unsigned long)arg5; \
2424
+ _argvec[2+6] = (unsigned long)arg6; \
2425
+ _argvec[2+7] = (unsigned long)arg7; \
2426
+ _argvec[2+8] = (unsigned long)arg8; \
2427
+ _argvec[2+9] = (unsigned long)arg9; \
2428
+ _argvec[2+10] = (unsigned long)arg10; \
2429
+ _argvec[2+11] = (unsigned long)arg11; \
2430
+ __asm__ volatile( \
2431
+ "mr 11,%1\n\t" \
2432
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2433
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2434
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
2435
+ /* arg11 */ \
2436
+ "ld 3,88(11)\n\t" \
2437
+ "std 3,128(1)\n\t" \
2438
+ /* arg10 */ \
2439
+ "ld 3,80(11)\n\t" \
2440
+ "std 3,120(1)\n\t" \
2441
+ /* arg9 */ \
2442
+ "ld 3,72(11)\n\t" \
2443
+ "std 3,112(1)\n\t" \
2444
+ /* args1-8 */ \
2445
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2446
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2447
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2448
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2449
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2450
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2451
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2452
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2453
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2454
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2455
+ "mr 11,%1\n\t" \
2456
+ "mr %0,3\n\t" \
2457
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
2458
+ "addi 1,1,144" /* restore frame */ \
2459
+ : /*out*/ "=r" (_res) \
2460
+ : /*in*/ "r" (&_argvec[2]) \
2461
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2462
+ ); \
2463
+ lval = (__typeof__(lval)) _res; \
2464
+ } while (0)
2465
+
2466
+ #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2467
+ arg7,arg8,arg9,arg10,arg11,arg12) \
2468
+ do { \
2469
+ volatile OrigFn _orig = (orig); \
2470
+ volatile unsigned long _argvec[3+12]; \
2471
+ volatile unsigned long _res; \
2472
+ /* _argvec[0] holds current r2 across the call */ \
2473
+ _argvec[1] = (unsigned long)_orig.r2; \
2474
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2475
+ _argvec[2+1] = (unsigned long)arg1; \
2476
+ _argvec[2+2] = (unsigned long)arg2; \
2477
+ _argvec[2+3] = (unsigned long)arg3; \
2478
+ _argvec[2+4] = (unsigned long)arg4; \
2479
+ _argvec[2+5] = (unsigned long)arg5; \
2480
+ _argvec[2+6] = (unsigned long)arg6; \
2481
+ _argvec[2+7] = (unsigned long)arg7; \
2482
+ _argvec[2+8] = (unsigned long)arg8; \
2483
+ _argvec[2+9] = (unsigned long)arg9; \
2484
+ _argvec[2+10] = (unsigned long)arg10; \
2485
+ _argvec[2+11] = (unsigned long)arg11; \
2486
+ _argvec[2+12] = (unsigned long)arg12; \
2487
+ __asm__ volatile( \
2488
+ "mr 11,%1\n\t" \
2489
+ "std 2,-16(11)\n\t" /* save tocptr */ \
2490
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2491
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
2492
+ /* arg12 */ \
2493
+ "ld 3,96(11)\n\t" \
2494
+ "std 3,136(1)\n\t" \
2495
+ /* arg11 */ \
2496
+ "ld 3,88(11)\n\t" \
2497
+ "std 3,128(1)\n\t" \
2498
+ /* arg10 */ \
2499
+ "ld 3,80(11)\n\t" \
2500
+ "std 3,120(1)\n\t" \
2501
+ /* arg9 */ \
2502
+ "ld 3,72(11)\n\t" \
2503
+ "std 3,112(1)\n\t" \
2504
+ /* args1-8 */ \
2505
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2506
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2507
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2508
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2509
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2510
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2511
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2512
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2513
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
2514
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2515
+ "mr 11,%1\n\t" \
2516
+ "mr %0,3\n\t" \
2517
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
2518
+ "addi 1,1,144" /* restore frame */ \
2519
+ : /*out*/ "=r" (_res) \
2520
+ : /*in*/ "r" (&_argvec[2]) \
2521
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2522
+ ); \
2523
+ lval = (__typeof__(lval)) _res; \
2524
+ } while (0)
2525
+
2526
+ #endif /* PLAT_ppc64_linux */
2527
+
2528
+ /* ------------------------- arm-linux ------------------------- */
2529
+
2530
+ #if defined(PLAT_arm_linux)
2531
+
2532
+ /* These regs are trashed by the hidden call. */
2533
+ #define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
2534
+
2535
+ /* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
2536
+ long) == 4. */
2537
+
2538
+ #define CALL_FN_W_v(lval, orig) \
2539
+ do { \
2540
+ volatile OrigFn _orig = (orig); \
2541
+ volatile unsigned long _argvec[1]; \
2542
+ volatile unsigned long _res; \
2543
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2544
+ __asm__ volatile( \
2545
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2546
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2547
+ "mov %0, r0\n" \
2548
+ : /*out*/ "=r" (_res) \
2549
+ : /*in*/ "0" (&_argvec[0]) \
2550
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2551
+ ); \
2552
+ lval = (__typeof__(lval)) _res; \
2553
+ } while (0)
2554
+
2555
+ #define CALL_FN_W_W(lval, orig, arg1) \
2556
+ do { \
2557
+ volatile OrigFn _orig = (orig); \
2558
+ volatile unsigned long _argvec[2]; \
2559
+ volatile unsigned long _res; \
2560
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2561
+ _argvec[1] = (unsigned long)(arg1); \
2562
+ __asm__ volatile( \
2563
+ "ldr r0, [%1, #4] \n\t" \
2564
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2565
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2566
+ "mov %0, r0\n" \
2567
+ : /*out*/ "=r" (_res) \
2568
+ : /*in*/ "0" (&_argvec[0]) \
2569
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2570
+ ); \
2571
+ lval = (__typeof__(lval)) _res; \
2572
+ } while (0)
2573
+
2574
+ #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
2575
+ do { \
2576
+ volatile OrigFn _orig = (orig); \
2577
+ volatile unsigned long _argvec[3]; \
2578
+ volatile unsigned long _res; \
2579
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2580
+ _argvec[1] = (unsigned long)(arg1); \
2581
+ _argvec[2] = (unsigned long)(arg2); \
2582
+ __asm__ volatile( \
2583
+ "ldr r0, [%1, #4] \n\t" \
2584
+ "ldr r1, [%1, #8] \n\t" \
2585
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2586
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2587
+ "mov %0, r0\n" \
2588
+ : /*out*/ "=r" (_res) \
2589
+ : /*in*/ "0" (&_argvec[0]) \
2590
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2591
+ ); \
2592
+ lval = (__typeof__(lval)) _res; \
2593
+ } while (0)
2594
+
2595
+ #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
2596
+ do { \
2597
+ volatile OrigFn _orig = (orig); \
2598
+ volatile unsigned long _argvec[4]; \
2599
+ volatile unsigned long _res; \
2600
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2601
+ _argvec[1] = (unsigned long)(arg1); \
2602
+ _argvec[2] = (unsigned long)(arg2); \
2603
+ _argvec[3] = (unsigned long)(arg3); \
2604
+ __asm__ volatile( \
2605
+ "ldr r0, [%1, #4] \n\t" \
2606
+ "ldr r1, [%1, #8] \n\t" \
2607
+ "ldr r2, [%1, #12] \n\t" \
2608
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2609
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2610
+ "mov %0, r0\n" \
2611
+ : /*out*/ "=r" (_res) \
2612
+ : /*in*/ "0" (&_argvec[0]) \
2613
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2614
+ ); \
2615
+ lval = (__typeof__(lval)) _res; \
2616
+ } while (0)
2617
+
2618
+ #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
2619
+ do { \
2620
+ volatile OrigFn _orig = (orig); \
2621
+ volatile unsigned long _argvec[5]; \
2622
+ volatile unsigned long _res; \
2623
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2624
+ _argvec[1] = (unsigned long)(arg1); \
2625
+ _argvec[2] = (unsigned long)(arg2); \
2626
+ _argvec[3] = (unsigned long)(arg3); \
2627
+ _argvec[4] = (unsigned long)(arg4); \
2628
+ __asm__ volatile( \
2629
+ "ldr r0, [%1, #4] \n\t" \
2630
+ "ldr r1, [%1, #8] \n\t" \
2631
+ "ldr r2, [%1, #12] \n\t" \
2632
+ "ldr r3, [%1, #16] \n\t" \
2633
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2634
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2635
+ "mov %0, r0" \
2636
+ : /*out*/ "=r" (_res) \
2637
+ : /*in*/ "0" (&_argvec[0]) \
2638
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2639
+ ); \
2640
+ lval = (__typeof__(lval)) _res; \
2641
+ } while (0)
2642
+
2643
+ #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
2644
+ do { \
2645
+ volatile OrigFn _orig = (orig); \
2646
+ volatile unsigned long _argvec[6]; \
2647
+ volatile unsigned long _res; \
2648
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2649
+ _argvec[1] = (unsigned long)(arg1); \
2650
+ _argvec[2] = (unsigned long)(arg2); \
2651
+ _argvec[3] = (unsigned long)(arg3); \
2652
+ _argvec[4] = (unsigned long)(arg4); \
2653
+ _argvec[5] = (unsigned long)(arg5); \
2654
+ __asm__ volatile( \
2655
+ "ldr r0, [%1, #20] \n\t" \
2656
+ "push {r0} \n\t" \
2657
+ "ldr r0, [%1, #4] \n\t" \
2658
+ "ldr r1, [%1, #8] \n\t" \
2659
+ "ldr r2, [%1, #12] \n\t" \
2660
+ "ldr r3, [%1, #16] \n\t" \
2661
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2662
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2663
+ "add sp, sp, #4 \n\t" \
2664
+ "mov %0, r0" \
2665
+ : /*out*/ "=r" (_res) \
2666
+ : /*in*/ "0" (&_argvec[0]) \
2667
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2668
+ ); \
2669
+ lval = (__typeof__(lval)) _res; \
2670
+ } while (0)
2671
+
2672
+ #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
2673
+ do { \
2674
+ volatile OrigFn _orig = (orig); \
2675
+ volatile unsigned long _argvec[7]; \
2676
+ volatile unsigned long _res; \
2677
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2678
+ _argvec[1] = (unsigned long)(arg1); \
2679
+ _argvec[2] = (unsigned long)(arg2); \
2680
+ _argvec[3] = (unsigned long)(arg3); \
2681
+ _argvec[4] = (unsigned long)(arg4); \
2682
+ _argvec[5] = (unsigned long)(arg5); \
2683
+ _argvec[6] = (unsigned long)(arg6); \
2684
+ __asm__ volatile( \
2685
+ "ldr r0, [%1, #20] \n\t" \
2686
+ "ldr r1, [%1, #24] \n\t" \
2687
+ "push {r0, r1} \n\t" \
2688
+ "ldr r0, [%1, #4] \n\t" \
2689
+ "ldr r1, [%1, #8] \n\t" \
2690
+ "ldr r2, [%1, #12] \n\t" \
2691
+ "ldr r3, [%1, #16] \n\t" \
2692
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2693
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2694
+ "add sp, sp, #8 \n\t" \
2695
+ "mov %0, r0" \
2696
+ : /*out*/ "=r" (_res) \
2697
+ : /*in*/ "0" (&_argvec[0]) \
2698
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2699
+ ); \
2700
+ lval = (__typeof__(lval)) _res; \
2701
+ } while (0)
2702
+
2703
+ #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2704
+ arg7) \
2705
+ do { \
2706
+ volatile OrigFn _orig = (orig); \
2707
+ volatile unsigned long _argvec[8]; \
2708
+ volatile unsigned long _res; \
2709
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2710
+ _argvec[1] = (unsigned long)(arg1); \
2711
+ _argvec[2] = (unsigned long)(arg2); \
2712
+ _argvec[3] = (unsigned long)(arg3); \
2713
+ _argvec[4] = (unsigned long)(arg4); \
2714
+ _argvec[5] = (unsigned long)(arg5); \
2715
+ _argvec[6] = (unsigned long)(arg6); \
2716
+ _argvec[7] = (unsigned long)(arg7); \
2717
+ __asm__ volatile( \
2718
+ "ldr r0, [%1, #20] \n\t" \
2719
+ "ldr r1, [%1, #24] \n\t" \
2720
+ "ldr r2, [%1, #28] \n\t" \
2721
+ "push {r0, r1, r2} \n\t" \
2722
+ "ldr r0, [%1, #4] \n\t" \
2723
+ "ldr r1, [%1, #8] \n\t" \
2724
+ "ldr r2, [%1, #12] \n\t" \
2725
+ "ldr r3, [%1, #16] \n\t" \
2726
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2727
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2728
+ "add sp, sp, #12 \n\t" \
2729
+ "mov %0, r0" \
2730
+ : /*out*/ "=r" (_res) \
2731
+ : /*in*/ "0" (&_argvec[0]) \
2732
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2733
+ ); \
2734
+ lval = (__typeof__(lval)) _res; \
2735
+ } while (0)
2736
+
2737
+ #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2738
+ arg7,arg8) \
2739
+ do { \
2740
+ volatile OrigFn _orig = (orig); \
2741
+ volatile unsigned long _argvec[9]; \
2742
+ volatile unsigned long _res; \
2743
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2744
+ _argvec[1] = (unsigned long)(arg1); \
2745
+ _argvec[2] = (unsigned long)(arg2); \
2746
+ _argvec[3] = (unsigned long)(arg3); \
2747
+ _argvec[4] = (unsigned long)(arg4); \
2748
+ _argvec[5] = (unsigned long)(arg5); \
2749
+ _argvec[6] = (unsigned long)(arg6); \
2750
+ _argvec[7] = (unsigned long)(arg7); \
2751
+ _argvec[8] = (unsigned long)(arg8); \
2752
+ __asm__ volatile( \
2753
+ "ldr r0, [%1, #20] \n\t" \
2754
+ "ldr r1, [%1, #24] \n\t" \
2755
+ "ldr r2, [%1, #28] \n\t" \
2756
+ "ldr r3, [%1, #32] \n\t" \
2757
+ "push {r0, r1, r2, r3} \n\t" \
2758
+ "ldr r0, [%1, #4] \n\t" \
2759
+ "ldr r1, [%1, #8] \n\t" \
2760
+ "ldr r2, [%1, #12] \n\t" \
2761
+ "ldr r3, [%1, #16] \n\t" \
2762
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2763
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2764
+ "add sp, sp, #16 \n\t" \
2765
+ "mov %0, r0" \
2766
+ : /*out*/ "=r" (_res) \
2767
+ : /*in*/ "0" (&_argvec[0]) \
2768
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2769
+ ); \
2770
+ lval = (__typeof__(lval)) _res; \
2771
+ } while (0)
2772
+
2773
+ #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2774
+ arg7,arg8,arg9) \
2775
+ do { \
2776
+ volatile OrigFn _orig = (orig); \
2777
+ volatile unsigned long _argvec[10]; \
2778
+ volatile unsigned long _res; \
2779
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2780
+ _argvec[1] = (unsigned long)(arg1); \
2781
+ _argvec[2] = (unsigned long)(arg2); \
2782
+ _argvec[3] = (unsigned long)(arg3); \
2783
+ _argvec[4] = (unsigned long)(arg4); \
2784
+ _argvec[5] = (unsigned long)(arg5); \
2785
+ _argvec[6] = (unsigned long)(arg6); \
2786
+ _argvec[7] = (unsigned long)(arg7); \
2787
+ _argvec[8] = (unsigned long)(arg8); \
2788
+ _argvec[9] = (unsigned long)(arg9); \
2789
+ __asm__ volatile( \
2790
+ "ldr r0, [%1, #20] \n\t" \
2791
+ "ldr r1, [%1, #24] \n\t" \
2792
+ "ldr r2, [%1, #28] \n\t" \
2793
+ "ldr r3, [%1, #32] \n\t" \
2794
+ "ldr r4, [%1, #36] \n\t" \
2795
+ "push {r0, r1, r2, r3, r4} \n\t" \
2796
+ "ldr r0, [%1, #4] \n\t" \
2797
+ "ldr r1, [%1, #8] \n\t" \
2798
+ "ldr r2, [%1, #12] \n\t" \
2799
+ "ldr r3, [%1, #16] \n\t" \
2800
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2801
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2802
+ "add sp, sp, #20 \n\t" \
2803
+ "mov %0, r0" \
2804
+ : /*out*/ "=r" (_res) \
2805
+ : /*in*/ "0" (&_argvec[0]) \
2806
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2807
+ ); \
2808
+ lval = (__typeof__(lval)) _res; \
2809
+ } while (0)
2810
+
2811
+ #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
2812
+ arg7,arg8,arg9,arg10) \
2813
+ do { \
2814
+ volatile OrigFn _orig = (orig); \
2815
+ volatile unsigned long _argvec[11]; \
2816
+ volatile unsigned long _res; \
2817
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2818
+ _argvec[1] = (unsigned long)(arg1); \
2819
+ _argvec[2] = (unsigned long)(arg2); \
2820
+ _argvec[3] = (unsigned long)(arg3); \
2821
+ _argvec[4] = (unsigned long)(arg4); \
2822
+ _argvec[5] = (unsigned long)(arg5); \
2823
+ _argvec[6] = (unsigned long)(arg6); \
2824
+ _argvec[7] = (unsigned long)(arg7); \
2825
+ _argvec[8] = (unsigned long)(arg8); \
2826
+ _argvec[9] = (unsigned long)(arg9); \
2827
+ _argvec[10] = (unsigned long)(arg10); \
2828
+ __asm__ volatile( \
2829
+ "ldr r0, [%1, #40] \n\t" \
2830
+ "push {r0} \n\t" \
2831
+ "ldr r0, [%1, #20] \n\t" \
2832
+ "ldr r1, [%1, #24] \n\t" \
2833
+ "ldr r2, [%1, #28] \n\t" \
2834
+ "ldr r3, [%1, #32] \n\t" \
2835
+ "ldr r4, [%1, #36] \n\t" \
2836
+ "push {r0, r1, r2, r3, r4} \n\t" \
2837
+ "ldr r0, [%1, #4] \n\t" \
2838
+ "ldr r1, [%1, #8] \n\t" \
2839
+ "ldr r2, [%1, #12] \n\t" \
2840
+ "ldr r3, [%1, #16] \n\t" \
2841
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2842
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2843
+ "add sp, sp, #24 \n\t" \
2844
+ "mov %0, r0" \
2845
+ : /*out*/ "=r" (_res) \
2846
+ : /*in*/ "0" (&_argvec[0]) \
2847
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2848
+ ); \
2849
+ lval = (__typeof__(lval)) _res; \
2850
+ } while (0)
2851
+
2852
+ #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
2853
+ arg6,arg7,arg8,arg9,arg10, \
2854
+ arg11) \
2855
+ do { \
2856
+ volatile OrigFn _orig = (orig); \
2857
+ volatile unsigned long _argvec[12]; \
2858
+ volatile unsigned long _res; \
2859
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2860
+ _argvec[1] = (unsigned long)(arg1); \
2861
+ _argvec[2] = (unsigned long)(arg2); \
2862
+ _argvec[3] = (unsigned long)(arg3); \
2863
+ _argvec[4] = (unsigned long)(arg4); \
2864
+ _argvec[5] = (unsigned long)(arg5); \
2865
+ _argvec[6] = (unsigned long)(arg6); \
2866
+ _argvec[7] = (unsigned long)(arg7); \
2867
+ _argvec[8] = (unsigned long)(arg8); \
2868
+ _argvec[9] = (unsigned long)(arg9); \
2869
+ _argvec[10] = (unsigned long)(arg10); \
2870
+ _argvec[11] = (unsigned long)(arg11); \
2871
+ __asm__ volatile( \
2872
+ "ldr r0, [%1, #40] \n\t" \
2873
+ "ldr r1, [%1, #44] \n\t" \
2874
+ "push {r0, r1} \n\t" \
2875
+ "ldr r0, [%1, #20] \n\t" \
2876
+ "ldr r1, [%1, #24] \n\t" \
2877
+ "ldr r2, [%1, #28] \n\t" \
2878
+ "ldr r3, [%1, #32] \n\t" \
2879
+ "ldr r4, [%1, #36] \n\t" \
2880
+ "push {r0, r1, r2, r3, r4} \n\t" \
2881
+ "ldr r0, [%1, #4] \n\t" \
2882
+ "ldr r1, [%1, #8] \n\t" \
2883
+ "ldr r2, [%1, #12] \n\t" \
2884
+ "ldr r3, [%1, #16] \n\t" \
2885
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2886
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2887
+ "add sp, sp, #28 \n\t" \
2888
+ "mov %0, r0" \
2889
+ : /*out*/ "=r" (_res) \
2890
+ : /*in*/ "0" (&_argvec[0]) \
2891
+ : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS \
2892
+ ); \
2893
+ lval = (__typeof__(lval)) _res; \
2894
+ } while (0)
2895
+
2896
+ #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
2897
+ arg6,arg7,arg8,arg9,arg10, \
2898
+ arg11,arg12) \
2899
+ do { \
2900
+ volatile OrigFn _orig = (orig); \
2901
+ volatile unsigned long _argvec[13]; \
2902
+ volatile unsigned long _res; \
2903
+ _argvec[0] = (unsigned long)_orig.nraddr; \
2904
+ _argvec[1] = (unsigned long)(arg1); \
2905
+ _argvec[2] = (unsigned long)(arg2); \
2906
+ _argvec[3] = (unsigned long)(arg3); \
2907
+ _argvec[4] = (unsigned long)(arg4); \
2908
+ _argvec[5] = (unsigned long)(arg5); \
2909
+ _argvec[6] = (unsigned long)(arg6); \
2910
+ _argvec[7] = (unsigned long)(arg7); \
2911
+ _argvec[8] = (unsigned long)(arg8); \
2912
+ _argvec[9] = (unsigned long)(arg9); \
2913
+ _argvec[10] = (unsigned long)(arg10); \
2914
+ _argvec[11] = (unsigned long)(arg11); \
2915
+ _argvec[12] = (unsigned long)(arg12); \
2916
+ __asm__ volatile( \
2917
+ "ldr r0, [%1, #40] \n\t" \
2918
+ "ldr r1, [%1, #44] \n\t" \
2919
+ "ldr r2, [%1, #48] \n\t" \
2920
+ "push {r0, r1, r2} \n\t" \
2921
+ "ldr r0, [%1, #20] \n\t" \
2922
+ "ldr r1, [%1, #24] \n\t" \
2923
+ "ldr r2, [%1, #28] \n\t" \
2924
+ "ldr r3, [%1, #32] \n\t" \
2925
+ "ldr r4, [%1, #36] \n\t" \
2926
+ "push {r0, r1, r2, r3, r4} \n\t" \
2927
+ "ldr r0, [%1, #4] \n\t" \
2928
+ "ldr r1, [%1, #8] \n\t" \
2929
+ "ldr r2, [%1, #12] \n\t" \
2930
+ "ldr r3, [%1, #16] \n\t" \
2931
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
2932
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
2933
+ "add sp, sp, #32 \n\t" \
2934
+ "mov %0, r0" \
2935
+ : /*out*/ "=r" (_res) \
2936
+ : /*in*/ "0" (&_argvec[0]) \
2937
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2938
+ ); \
2939
+ lval = (__typeof__(lval)) _res; \
2940
+ } while (0)
2941
+
2942
+ #endif /* PLAT_arm_linux */
2943
+
2944
+ /* ------------------------ ppc32-aix5 ------------------------- */
2945
+
2946
+ #if defined(PLAT_ppc32_aix5)
2947
+
2948
+ /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
2949
+
2950
+ /* These regs are trashed by the hidden call. */
2951
+ #define __CALLER_SAVED_REGS \
2952
+ "lr", "ctr", "xer", \
2953
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
2954
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
2955
+ "r11", "r12", "r13"
2956
+
2957
+ /* Expand the stack frame, copying enough info that unwinding
2958
+ still works. Trashes r3. */
2959
+
2960
+ #define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
2961
+ "addi 1,1,-" #_n_fr "\n\t" \
2962
+ "lwz 3," #_n_fr "(1)\n\t" \
2963
+ "stw 3,0(1)\n\t"
2964
+
2965
+ #define VG_CONTRACT_FRAME_BY(_n_fr) \
2966
+ "addi 1,1," #_n_fr "\n\t"
2967
+
2968
+ /* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
2969
+ long) == 4. */
2970
+
2971
+ #define CALL_FN_W_v(lval, orig) \
2972
+ do { \
2973
+ volatile OrigFn _orig = (orig); \
2974
+ volatile unsigned long _argvec[3+0]; \
2975
+ volatile unsigned long _res; \
2976
+ /* _argvec[0] holds current r2 across the call */ \
2977
+ _argvec[1] = (unsigned long)_orig.r2; \
2978
+ _argvec[2] = (unsigned long)_orig.nraddr; \
2979
+ __asm__ volatile( \
2980
+ "mr 11,%1\n\t" \
2981
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
2982
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
2983
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
2984
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
2985
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
2986
+ "mr 11,%1\n\t" \
2987
+ "mr %0,3\n\t" \
2988
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
2989
+ VG_CONTRACT_FRAME_BY(512) \
2990
+ : /*out*/ "=r" (_res) \
2991
+ : /*in*/ "r" (&_argvec[2]) \
2992
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
2993
+ ); \
2994
+ lval = (__typeof__(lval)) _res; \
2995
+ } while (0)
2996
+
2997
+ #define CALL_FN_W_W(lval, orig, arg1) \
2998
+ do { \
2999
+ volatile OrigFn _orig = (orig); \
3000
+ volatile unsigned long _argvec[3+1]; \
3001
+ volatile unsigned long _res; \
3002
+ /* _argvec[0] holds current r2 across the call */ \
3003
+ _argvec[1] = (unsigned long)_orig.r2; \
3004
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3005
+ _argvec[2+1] = (unsigned long)arg1; \
3006
+ __asm__ volatile( \
3007
+ "mr 11,%1\n\t" \
3008
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3009
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
3010
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
3011
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
3012
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
3013
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3014
+ "mr 11,%1\n\t" \
3015
+ "mr %0,3\n\t" \
3016
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
3017
+ VG_CONTRACT_FRAME_BY(512) \
3018
+ : /*out*/ "=r" (_res) \
3019
+ : /*in*/ "r" (&_argvec[2]) \
3020
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3021
+ ); \
3022
+ lval = (__typeof__(lval)) _res; \
3023
+ } while (0)
3024
+
3025
+ #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
3026
+ do { \
3027
+ volatile OrigFn _orig = (orig); \
3028
+ volatile unsigned long _argvec[3+2]; \
3029
+ volatile unsigned long _res; \
3030
+ /* _argvec[0] holds current r2 across the call */ \
3031
+ _argvec[1] = (unsigned long)_orig.r2; \
3032
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3033
+ _argvec[2+1] = (unsigned long)arg1; \
3034
+ _argvec[2+2] = (unsigned long)arg2; \
3035
+ __asm__ volatile( \
3036
+ "mr 11,%1\n\t" \
3037
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3038
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
3039
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
3040
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
3041
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
3042
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
3043
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3044
+ "mr 11,%1\n\t" \
3045
+ "mr %0,3\n\t" \
3046
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
3047
+ VG_CONTRACT_FRAME_BY(512) \
3048
+ : /*out*/ "=r" (_res) \
3049
+ : /*in*/ "r" (&_argvec[2]) \
3050
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3051
+ ); \
3052
+ lval = (__typeof__(lval)) _res; \
3053
+ } while (0)
3054
+
3055
+ #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
3056
+ do { \
3057
+ volatile OrigFn _orig = (orig); \
3058
+ volatile unsigned long _argvec[3+3]; \
3059
+ volatile unsigned long _res; \
3060
+ /* _argvec[0] holds current r2 across the call */ \
3061
+ _argvec[1] = (unsigned long)_orig.r2; \
3062
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3063
+ _argvec[2+1] = (unsigned long)arg1; \
3064
+ _argvec[2+2] = (unsigned long)arg2; \
3065
+ _argvec[2+3] = (unsigned long)arg3; \
3066
+ __asm__ volatile( \
3067
+ "mr 11,%1\n\t" \
3068
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3069
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
3070
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
3071
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
3072
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
3073
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
3074
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
3075
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3076
+ "mr 11,%1\n\t" \
3077
+ "mr %0,3\n\t" \
3078
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
3079
+ VG_CONTRACT_FRAME_BY(512) \
3080
+ : /*out*/ "=r" (_res) \
3081
+ : /*in*/ "r" (&_argvec[2]) \
3082
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3083
+ ); \
3084
+ lval = (__typeof__(lval)) _res; \
3085
+ } while (0)
3086
+
3087
+ #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
3088
+ do { \
3089
+ volatile OrigFn _orig = (orig); \
3090
+ volatile unsigned long _argvec[3+4]; \
3091
+ volatile unsigned long _res; \
3092
+ /* _argvec[0] holds current r2 across the call */ \
3093
+ _argvec[1] = (unsigned long)_orig.r2; \
3094
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3095
+ _argvec[2+1] = (unsigned long)arg1; \
3096
+ _argvec[2+2] = (unsigned long)arg2; \
3097
+ _argvec[2+3] = (unsigned long)arg3; \
3098
+ _argvec[2+4] = (unsigned long)arg4; \
3099
+ __asm__ volatile( \
3100
+ "mr 11,%1\n\t" \
3101
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3102
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
3103
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
3104
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
3105
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
3106
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
3107
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
3108
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
3109
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3110
+ "mr 11,%1\n\t" \
3111
+ "mr %0,3\n\t" \
3112
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
3113
+ VG_CONTRACT_FRAME_BY(512) \
3114
+ : /*out*/ "=r" (_res) \
3115
+ : /*in*/ "r" (&_argvec[2]) \
3116
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3117
+ ); \
3118
+ lval = (__typeof__(lval)) _res; \
3119
+ } while (0)
3120
+
3121
+ #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
3122
+ do { \
3123
+ volatile OrigFn _orig = (orig); \
3124
+ volatile unsigned long _argvec[3+5]; \
3125
+ volatile unsigned long _res; \
3126
+ /* _argvec[0] holds current r2 across the call */ \
3127
+ _argvec[1] = (unsigned long)_orig.r2; \
3128
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3129
+ _argvec[2+1] = (unsigned long)arg1; \
3130
+ _argvec[2+2] = (unsigned long)arg2; \
3131
+ _argvec[2+3] = (unsigned long)arg3; \
3132
+ _argvec[2+4] = (unsigned long)arg4; \
3133
+ _argvec[2+5] = (unsigned long)arg5; \
3134
+ __asm__ volatile( \
3135
+ "mr 11,%1\n\t" \
3136
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3137
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
3138
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
3139
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
3140
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
3141
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
3142
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
3143
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
3144
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
3145
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3146
+ "mr 11,%1\n\t" \
3147
+ "mr %0,3\n\t" \
3148
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
3149
+ VG_CONTRACT_FRAME_BY(512) \
3150
+ : /*out*/ "=r" (_res) \
3151
+ : /*in*/ "r" (&_argvec[2]) \
3152
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3153
+ ); \
3154
+ lval = (__typeof__(lval)) _res; \
3155
+ } while (0)
3156
+
3157
+ #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
3158
+ do { \
3159
+ volatile OrigFn _orig = (orig); \
3160
+ volatile unsigned long _argvec[3+6]; \
3161
+ volatile unsigned long _res; \
3162
+ /* _argvec[0] holds current r2 across the call */ \
3163
+ _argvec[1] = (unsigned long)_orig.r2; \
3164
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3165
+ _argvec[2+1] = (unsigned long)arg1; \
3166
+ _argvec[2+2] = (unsigned long)arg2; \
3167
+ _argvec[2+3] = (unsigned long)arg3; \
3168
+ _argvec[2+4] = (unsigned long)arg4; \
3169
+ _argvec[2+5] = (unsigned long)arg5; \
3170
+ _argvec[2+6] = (unsigned long)arg6; \
3171
+ __asm__ volatile( \
3172
+ "mr 11,%1\n\t" \
3173
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3174
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
3175
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
3176
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
3177
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
3178
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
3179
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
3180
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
3181
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
3182
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
3183
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3184
+ "mr 11,%1\n\t" \
3185
+ "mr %0,3\n\t" \
3186
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
3187
+ VG_CONTRACT_FRAME_BY(512) \
3188
+ : /*out*/ "=r" (_res) \
3189
+ : /*in*/ "r" (&_argvec[2]) \
3190
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3191
+ ); \
3192
+ lval = (__typeof__(lval)) _res; \
3193
+ } while (0)
3194
+
3195
+ #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
3196
+ arg7) \
3197
+ do { \
3198
+ volatile OrigFn _orig = (orig); \
3199
+ volatile unsigned long _argvec[3+7]; \
3200
+ volatile unsigned long _res; \
3201
+ /* _argvec[0] holds current r2 across the call */ \
3202
+ _argvec[1] = (unsigned long)_orig.r2; \
3203
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3204
+ _argvec[2+1] = (unsigned long)arg1; \
3205
+ _argvec[2+2] = (unsigned long)arg2; \
3206
+ _argvec[2+3] = (unsigned long)arg3; \
3207
+ _argvec[2+4] = (unsigned long)arg4; \
3208
+ _argvec[2+5] = (unsigned long)arg5; \
3209
+ _argvec[2+6] = (unsigned long)arg6; \
3210
+ _argvec[2+7] = (unsigned long)arg7; \
3211
+ __asm__ volatile( \
3212
+ "mr 11,%1\n\t" \
3213
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3214
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
3215
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
3216
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
3217
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
3218
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
3219
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
3220
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
3221
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
3222
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
3223
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
3224
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3225
+ "mr 11,%1\n\t" \
3226
+ "mr %0,3\n\t" \
3227
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
3228
+ VG_CONTRACT_FRAME_BY(512) \
3229
+ : /*out*/ "=r" (_res) \
3230
+ : /*in*/ "r" (&_argvec[2]) \
3231
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3232
+ ); \
3233
+ lval = (__typeof__(lval)) _res; \
3234
+ } while (0)
3235
+
3236
+ #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
3237
+ arg7,arg8) \
3238
+ do { \
3239
+ volatile OrigFn _orig = (orig); \
3240
+ volatile unsigned long _argvec[3+8]; \
3241
+ volatile unsigned long _res; \
3242
+ /* _argvec[0] holds current r2 across the call */ \
3243
+ _argvec[1] = (unsigned long)_orig.r2; \
3244
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3245
+ _argvec[2+1] = (unsigned long)arg1; \
3246
+ _argvec[2+2] = (unsigned long)arg2; \
3247
+ _argvec[2+3] = (unsigned long)arg3; \
3248
+ _argvec[2+4] = (unsigned long)arg4; \
3249
+ _argvec[2+5] = (unsigned long)arg5; \
3250
+ _argvec[2+6] = (unsigned long)arg6; \
3251
+ _argvec[2+7] = (unsigned long)arg7; \
3252
+ _argvec[2+8] = (unsigned long)arg8; \
3253
+ __asm__ volatile( \
3254
+ "mr 11,%1\n\t" \
3255
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3256
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
3257
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
3258
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
3259
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
3260
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
3261
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
3262
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
3263
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
3264
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
3265
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
3266
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
3267
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3268
+ "mr 11,%1\n\t" \
3269
+ "mr %0,3\n\t" \
3270
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
3271
+ VG_CONTRACT_FRAME_BY(512) \
3272
+ : /*out*/ "=r" (_res) \
3273
+ : /*in*/ "r" (&_argvec[2]) \
3274
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3275
+ ); \
3276
+ lval = (__typeof__(lval)) _res; \
3277
+ } while (0)
3278
+
3279
+ #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
3280
+ arg7,arg8,arg9) \
3281
+ do { \
3282
+ volatile OrigFn _orig = (orig); \
3283
+ volatile unsigned long _argvec[3+9]; \
3284
+ volatile unsigned long _res; \
3285
+ /* _argvec[0] holds current r2 across the call */ \
3286
+ _argvec[1] = (unsigned long)_orig.r2; \
3287
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3288
+ _argvec[2+1] = (unsigned long)arg1; \
3289
+ _argvec[2+2] = (unsigned long)arg2; \
3290
+ _argvec[2+3] = (unsigned long)arg3; \
3291
+ _argvec[2+4] = (unsigned long)arg4; \
3292
+ _argvec[2+5] = (unsigned long)arg5; \
3293
+ _argvec[2+6] = (unsigned long)arg6; \
3294
+ _argvec[2+7] = (unsigned long)arg7; \
3295
+ _argvec[2+8] = (unsigned long)arg8; \
3296
+ _argvec[2+9] = (unsigned long)arg9; \
3297
+ __asm__ volatile( \
3298
+ "mr 11,%1\n\t" \
3299
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3300
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
3301
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
3302
+ VG_EXPAND_FRAME_BY_trashes_r3(64) \
3303
+ /* arg9 */ \
3304
+ "lwz 3,36(11)\n\t" \
3305
+ "stw 3,56(1)\n\t" \
3306
+ /* args1-8 */ \
3307
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
3308
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
3309
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
3310
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
3311
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
3312
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
3313
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
3314
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
3315
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
3316
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3317
+ "mr 11,%1\n\t" \
3318
+ "mr %0,3\n\t" \
3319
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
3320
+ VG_CONTRACT_FRAME_BY(64) \
3321
+ VG_CONTRACT_FRAME_BY(512) \
3322
+ : /*out*/ "=r" (_res) \
3323
+ : /*in*/ "r" (&_argvec[2]) \
3324
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3325
+ ); \
3326
+ lval = (__typeof__(lval)) _res; \
3327
+ } while (0)
3328
+
3329
+ #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
3330
+ arg7,arg8,arg9,arg10) \
3331
+ do { \
3332
+ volatile OrigFn _orig = (orig); \
3333
+ volatile unsigned long _argvec[3+10]; \
3334
+ volatile unsigned long _res; \
3335
+ /* _argvec[0] holds current r2 across the call */ \
3336
+ _argvec[1] = (unsigned long)_orig.r2; \
3337
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3338
+ _argvec[2+1] = (unsigned long)arg1; \
3339
+ _argvec[2+2] = (unsigned long)arg2; \
3340
+ _argvec[2+3] = (unsigned long)arg3; \
3341
+ _argvec[2+4] = (unsigned long)arg4; \
3342
+ _argvec[2+5] = (unsigned long)arg5; \
3343
+ _argvec[2+6] = (unsigned long)arg6; \
3344
+ _argvec[2+7] = (unsigned long)arg7; \
3345
+ _argvec[2+8] = (unsigned long)arg8; \
3346
+ _argvec[2+9] = (unsigned long)arg9; \
3347
+ _argvec[2+10] = (unsigned long)arg10; \
3348
+ __asm__ volatile( \
3349
+ "mr 11,%1\n\t" \
3350
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3351
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
3352
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
3353
+ VG_EXPAND_FRAME_BY_trashes_r3(64) \
3354
+ /* arg10 */ \
3355
+ "lwz 3,40(11)\n\t" \
3356
+ "stw 3,60(1)\n\t" \
3357
+ /* arg9 */ \
3358
+ "lwz 3,36(11)\n\t" \
3359
+ "stw 3,56(1)\n\t" \
3360
+ /* args1-8 */ \
3361
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
3362
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
3363
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
3364
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
3365
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
3366
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
3367
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
3368
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
3369
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
3370
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3371
+ "mr 11,%1\n\t" \
3372
+ "mr %0,3\n\t" \
3373
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
3374
+ VG_CONTRACT_FRAME_BY(64) \
3375
+ VG_CONTRACT_FRAME_BY(512) \
3376
+ : /*out*/ "=r" (_res) \
3377
+ : /*in*/ "r" (&_argvec[2]) \
3378
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3379
+ ); \
3380
+ lval = (__typeof__(lval)) _res; \
3381
+ } while (0)
3382
+
3383
+ #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
3384
+ arg7,arg8,arg9,arg10,arg11) \
3385
+ do { \
3386
+ volatile OrigFn _orig = (orig); \
3387
+ volatile unsigned long _argvec[3+11]; \
3388
+ volatile unsigned long _res; \
3389
+ /* _argvec[0] holds current r2 across the call */ \
3390
+ _argvec[1] = (unsigned long)_orig.r2; \
3391
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3392
+ _argvec[2+1] = (unsigned long)arg1; \
3393
+ _argvec[2+2] = (unsigned long)arg2; \
3394
+ _argvec[2+3] = (unsigned long)arg3; \
3395
+ _argvec[2+4] = (unsigned long)arg4; \
3396
+ _argvec[2+5] = (unsigned long)arg5; \
3397
+ _argvec[2+6] = (unsigned long)arg6; \
3398
+ _argvec[2+7] = (unsigned long)arg7; \
3399
+ _argvec[2+8] = (unsigned long)arg8; \
3400
+ _argvec[2+9] = (unsigned long)arg9; \
3401
+ _argvec[2+10] = (unsigned long)arg10; \
3402
+ _argvec[2+11] = (unsigned long)arg11; \
3403
+ __asm__ volatile( \
3404
+ "mr 11,%1\n\t" \
3405
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3406
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
3407
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
3408
+ VG_EXPAND_FRAME_BY_trashes_r3(72) \
3409
+ /* arg11 */ \
3410
+ "lwz 3,44(11)\n\t" \
3411
+ "stw 3,64(1)\n\t" \
3412
+ /* arg10 */ \
3413
+ "lwz 3,40(11)\n\t" \
3414
+ "stw 3,60(1)\n\t" \
3415
+ /* arg9 */ \
3416
+ "lwz 3,36(11)\n\t" \
3417
+ "stw 3,56(1)\n\t" \
3418
+ /* args1-8 */ \
3419
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
3420
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
3421
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
3422
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
3423
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
3424
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
3425
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
3426
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
3427
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
3428
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3429
+ "mr 11,%1\n\t" \
3430
+ "mr %0,3\n\t" \
3431
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
3432
+ VG_CONTRACT_FRAME_BY(72) \
3433
+ VG_CONTRACT_FRAME_BY(512) \
3434
+ : /*out*/ "=r" (_res) \
3435
+ : /*in*/ "r" (&_argvec[2]) \
3436
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3437
+ ); \
3438
+ lval = (__typeof__(lval)) _res; \
3439
+ } while (0)
3440
+
3441
+ #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
3442
+ arg7,arg8,arg9,arg10,arg11,arg12) \
3443
+ do { \
3444
+ volatile OrigFn _orig = (orig); \
3445
+ volatile unsigned long _argvec[3+12]; \
3446
+ volatile unsigned long _res; \
3447
+ /* _argvec[0] holds current r2 across the call */ \
3448
+ _argvec[1] = (unsigned long)_orig.r2; \
3449
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3450
+ _argvec[2+1] = (unsigned long)arg1; \
3451
+ _argvec[2+2] = (unsigned long)arg2; \
3452
+ _argvec[2+3] = (unsigned long)arg3; \
3453
+ _argvec[2+4] = (unsigned long)arg4; \
3454
+ _argvec[2+5] = (unsigned long)arg5; \
3455
+ _argvec[2+6] = (unsigned long)arg6; \
3456
+ _argvec[2+7] = (unsigned long)arg7; \
3457
+ _argvec[2+8] = (unsigned long)arg8; \
3458
+ _argvec[2+9] = (unsigned long)arg9; \
3459
+ _argvec[2+10] = (unsigned long)arg10; \
3460
+ _argvec[2+11] = (unsigned long)arg11; \
3461
+ _argvec[2+12] = (unsigned long)arg12; \
3462
+ __asm__ volatile( \
3463
+ "mr 11,%1\n\t" \
3464
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3465
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
3466
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
3467
+ VG_EXPAND_FRAME_BY_trashes_r3(72) \
3468
+ /* arg12 */ \
3469
+ "lwz 3,48(11)\n\t" \
3470
+ "stw 3,68(1)\n\t" \
3471
+ /* arg11 */ \
3472
+ "lwz 3,44(11)\n\t" \
3473
+ "stw 3,64(1)\n\t" \
3474
+ /* arg10 */ \
3475
+ "lwz 3,40(11)\n\t" \
3476
+ "stw 3,60(1)\n\t" \
3477
+ /* arg9 */ \
3478
+ "lwz 3,36(11)\n\t" \
3479
+ "stw 3,56(1)\n\t" \
3480
+ /* args1-8 */ \
3481
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
3482
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
3483
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
3484
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
3485
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
3486
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
3487
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
3488
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
3489
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
3490
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3491
+ "mr 11,%1\n\t" \
3492
+ "mr %0,3\n\t" \
3493
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
3494
+ VG_CONTRACT_FRAME_BY(72) \
3495
+ VG_CONTRACT_FRAME_BY(512) \
3496
+ : /*out*/ "=r" (_res) \
3497
+ : /*in*/ "r" (&_argvec[2]) \
3498
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3499
+ ); \
3500
+ lval = (__typeof__(lval)) _res; \
3501
+ } while (0)
3502
+
3503
+ #endif /* PLAT_ppc32_aix5 */
3504
+
3505
+ /* ------------------------ ppc64-aix5 ------------------------- */
3506
+
3507
+ #if defined(PLAT_ppc64_aix5)
3508
+
3509
+ /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
3510
+
3511
+ /* These regs are trashed by the hidden call. */
3512
+ #define __CALLER_SAVED_REGS \
3513
+ "lr", "ctr", "xer", \
3514
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
3515
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
3516
+ "r11", "r12", "r13"
3517
+
3518
+ /* Expand the stack frame, copying enough info that unwinding
3519
+ still works. Trashes r3. */
3520
+
3521
+ #define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
3522
+ "addi 1,1,-" #_n_fr "\n\t" \
3523
+ "ld 3," #_n_fr "(1)\n\t" \
3524
+ "std 3,0(1)\n\t"
3525
+
3526
+ #define VG_CONTRACT_FRAME_BY(_n_fr) \
3527
+ "addi 1,1," #_n_fr "\n\t"
3528
+
3529
+ /* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
3530
+ long) == 8. */
3531
+
3532
+ #define CALL_FN_W_v(lval, orig) \
3533
+ do { \
3534
+ volatile OrigFn _orig = (orig); \
3535
+ volatile unsigned long _argvec[3+0]; \
3536
+ volatile unsigned long _res; \
3537
+ /* _argvec[0] holds current r2 across the call */ \
3538
+ _argvec[1] = (unsigned long)_orig.r2; \
3539
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3540
+ __asm__ volatile( \
3541
+ "mr 11,%1\n\t" \
3542
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3543
+ "std 2,-16(11)\n\t" /* save tocptr */ \
3544
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
3545
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
3546
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3547
+ "mr 11,%1\n\t" \
3548
+ "mr %0,3\n\t" \
3549
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
3550
+ VG_CONTRACT_FRAME_BY(512) \
3551
+ : /*out*/ "=r" (_res) \
3552
+ : /*in*/ "r" (&_argvec[2]) \
3553
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3554
+ ); \
3555
+ lval = (__typeof__(lval)) _res; \
3556
+ } while (0)
3557
+
3558
+ #define CALL_FN_W_W(lval, orig, arg1) \
3559
+ do { \
3560
+ volatile OrigFn _orig = (orig); \
3561
+ volatile unsigned long _argvec[3+1]; \
3562
+ volatile unsigned long _res; \
3563
+ /* _argvec[0] holds current r2 across the call */ \
3564
+ _argvec[1] = (unsigned long)_orig.r2; \
3565
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3566
+ _argvec[2+1] = (unsigned long)arg1; \
3567
+ __asm__ volatile( \
3568
+ "mr 11,%1\n\t" \
3569
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3570
+ "std 2,-16(11)\n\t" /* save tocptr */ \
3571
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
3572
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
3573
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
3574
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3575
+ "mr 11,%1\n\t" \
3576
+ "mr %0,3\n\t" \
3577
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
3578
+ VG_CONTRACT_FRAME_BY(512) \
3579
+ : /*out*/ "=r" (_res) \
3580
+ : /*in*/ "r" (&_argvec[2]) \
3581
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3582
+ ); \
3583
+ lval = (__typeof__(lval)) _res; \
3584
+ } while (0)
3585
+
3586
+ #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
3587
+ do { \
3588
+ volatile OrigFn _orig = (orig); \
3589
+ volatile unsigned long _argvec[3+2]; \
3590
+ volatile unsigned long _res; \
3591
+ /* _argvec[0] holds current r2 across the call */ \
3592
+ _argvec[1] = (unsigned long)_orig.r2; \
3593
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3594
+ _argvec[2+1] = (unsigned long)arg1; \
3595
+ _argvec[2+2] = (unsigned long)arg2; \
3596
+ __asm__ volatile( \
3597
+ "mr 11,%1\n\t" \
3598
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3599
+ "std 2,-16(11)\n\t" /* save tocptr */ \
3600
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
3601
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
3602
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
3603
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
3604
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3605
+ "mr 11,%1\n\t" \
3606
+ "mr %0,3\n\t" \
3607
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
3608
+ VG_CONTRACT_FRAME_BY(512) \
3609
+ : /*out*/ "=r" (_res) \
3610
+ : /*in*/ "r" (&_argvec[2]) \
3611
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3612
+ ); \
3613
+ lval = (__typeof__(lval)) _res; \
3614
+ } while (0)
3615
+
3616
+ #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
3617
+ do { \
3618
+ volatile OrigFn _orig = (orig); \
3619
+ volatile unsigned long _argvec[3+3]; \
3620
+ volatile unsigned long _res; \
3621
+ /* _argvec[0] holds current r2 across the call */ \
3622
+ _argvec[1] = (unsigned long)_orig.r2; \
3623
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3624
+ _argvec[2+1] = (unsigned long)arg1; \
3625
+ _argvec[2+2] = (unsigned long)arg2; \
3626
+ _argvec[2+3] = (unsigned long)arg3; \
3627
+ __asm__ volatile( \
3628
+ "mr 11,%1\n\t" \
3629
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3630
+ "std 2,-16(11)\n\t" /* save tocptr */ \
3631
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
3632
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
3633
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
3634
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
3635
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
3636
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3637
+ "mr 11,%1\n\t" \
3638
+ "mr %0,3\n\t" \
3639
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
3640
+ VG_CONTRACT_FRAME_BY(512) \
3641
+ : /*out*/ "=r" (_res) \
3642
+ : /*in*/ "r" (&_argvec[2]) \
3643
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3644
+ ); \
3645
+ lval = (__typeof__(lval)) _res; \
3646
+ } while (0)
3647
+
3648
+ #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
3649
+ do { \
3650
+ volatile OrigFn _orig = (orig); \
3651
+ volatile unsigned long _argvec[3+4]; \
3652
+ volatile unsigned long _res; \
3653
+ /* _argvec[0] holds current r2 across the call */ \
3654
+ _argvec[1] = (unsigned long)_orig.r2; \
3655
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3656
+ _argvec[2+1] = (unsigned long)arg1; \
3657
+ _argvec[2+2] = (unsigned long)arg2; \
3658
+ _argvec[2+3] = (unsigned long)arg3; \
3659
+ _argvec[2+4] = (unsigned long)arg4; \
3660
+ __asm__ volatile( \
3661
+ "mr 11,%1\n\t" \
3662
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3663
+ "std 2,-16(11)\n\t" /* save tocptr */ \
3664
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
3665
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
3666
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
3667
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
3668
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
3669
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
3670
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3671
+ "mr 11,%1\n\t" \
3672
+ "mr %0,3\n\t" \
3673
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
3674
+ VG_CONTRACT_FRAME_BY(512) \
3675
+ : /*out*/ "=r" (_res) \
3676
+ : /*in*/ "r" (&_argvec[2]) \
3677
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3678
+ ); \
3679
+ lval = (__typeof__(lval)) _res; \
3680
+ } while (0)
3681
+
3682
+ #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
3683
+ do { \
3684
+ volatile OrigFn _orig = (orig); \
3685
+ volatile unsigned long _argvec[3+5]; \
3686
+ volatile unsigned long _res; \
3687
+ /* _argvec[0] holds current r2 across the call */ \
3688
+ _argvec[1] = (unsigned long)_orig.r2; \
3689
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3690
+ _argvec[2+1] = (unsigned long)arg1; \
3691
+ _argvec[2+2] = (unsigned long)arg2; \
3692
+ _argvec[2+3] = (unsigned long)arg3; \
3693
+ _argvec[2+4] = (unsigned long)arg4; \
3694
+ _argvec[2+5] = (unsigned long)arg5; \
3695
+ __asm__ volatile( \
3696
+ "mr 11,%1\n\t" \
3697
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3698
+ "std 2,-16(11)\n\t" /* save tocptr */ \
3699
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
3700
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
3701
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
3702
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
3703
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
3704
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
3705
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
3706
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3707
+ "mr 11,%1\n\t" \
3708
+ "mr %0,3\n\t" \
3709
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
3710
+ VG_CONTRACT_FRAME_BY(512) \
3711
+ : /*out*/ "=r" (_res) \
3712
+ : /*in*/ "r" (&_argvec[2]) \
3713
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3714
+ ); \
3715
+ lval = (__typeof__(lval)) _res; \
3716
+ } while (0)
3717
+
3718
+ #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
3719
+ do { \
3720
+ volatile OrigFn _orig = (orig); \
3721
+ volatile unsigned long _argvec[3+6]; \
3722
+ volatile unsigned long _res; \
3723
+ /* _argvec[0] holds current r2 across the call */ \
3724
+ _argvec[1] = (unsigned long)_orig.r2; \
3725
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3726
+ _argvec[2+1] = (unsigned long)arg1; \
3727
+ _argvec[2+2] = (unsigned long)arg2; \
3728
+ _argvec[2+3] = (unsigned long)arg3; \
3729
+ _argvec[2+4] = (unsigned long)arg4; \
3730
+ _argvec[2+5] = (unsigned long)arg5; \
3731
+ _argvec[2+6] = (unsigned long)arg6; \
3732
+ __asm__ volatile( \
3733
+ "mr 11,%1\n\t" \
3734
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3735
+ "std 2,-16(11)\n\t" /* save tocptr */ \
3736
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
3737
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
3738
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
3739
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
3740
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
3741
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
3742
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
3743
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
3744
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3745
+ "mr 11,%1\n\t" \
3746
+ "mr %0,3\n\t" \
3747
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
3748
+ VG_CONTRACT_FRAME_BY(512) \
3749
+ : /*out*/ "=r" (_res) \
3750
+ : /*in*/ "r" (&_argvec[2]) \
3751
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3752
+ ); \
3753
+ lval = (__typeof__(lval)) _res; \
3754
+ } while (0)
3755
+
3756
+ #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
3757
+ arg7) \
3758
+ do { \
3759
+ volatile OrigFn _orig = (orig); \
3760
+ volatile unsigned long _argvec[3+7]; \
3761
+ volatile unsigned long _res; \
3762
+ /* _argvec[0] holds current r2 across the call */ \
3763
+ _argvec[1] = (unsigned long)_orig.r2; \
3764
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3765
+ _argvec[2+1] = (unsigned long)arg1; \
3766
+ _argvec[2+2] = (unsigned long)arg2; \
3767
+ _argvec[2+3] = (unsigned long)arg3; \
3768
+ _argvec[2+4] = (unsigned long)arg4; \
3769
+ _argvec[2+5] = (unsigned long)arg5; \
3770
+ _argvec[2+6] = (unsigned long)arg6; \
3771
+ _argvec[2+7] = (unsigned long)arg7; \
3772
+ __asm__ volatile( \
3773
+ "mr 11,%1\n\t" \
3774
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3775
+ "std 2,-16(11)\n\t" /* save tocptr */ \
3776
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
3777
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
3778
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
3779
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
3780
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
3781
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
3782
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
3783
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
3784
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
3785
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3786
+ "mr 11,%1\n\t" \
3787
+ "mr %0,3\n\t" \
3788
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
3789
+ VG_CONTRACT_FRAME_BY(512) \
3790
+ : /*out*/ "=r" (_res) \
3791
+ : /*in*/ "r" (&_argvec[2]) \
3792
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3793
+ ); \
3794
+ lval = (__typeof__(lval)) _res; \
3795
+ } while (0)
3796
+
3797
+ #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
3798
+ arg7,arg8) \
3799
+ do { \
3800
+ volatile OrigFn _orig = (orig); \
3801
+ volatile unsigned long _argvec[3+8]; \
3802
+ volatile unsigned long _res; \
3803
+ /* _argvec[0] holds current r2 across the call */ \
3804
+ _argvec[1] = (unsigned long)_orig.r2; \
3805
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3806
+ _argvec[2+1] = (unsigned long)arg1; \
3807
+ _argvec[2+2] = (unsigned long)arg2; \
3808
+ _argvec[2+3] = (unsigned long)arg3; \
3809
+ _argvec[2+4] = (unsigned long)arg4; \
3810
+ _argvec[2+5] = (unsigned long)arg5; \
3811
+ _argvec[2+6] = (unsigned long)arg6; \
3812
+ _argvec[2+7] = (unsigned long)arg7; \
3813
+ _argvec[2+8] = (unsigned long)arg8; \
3814
+ __asm__ volatile( \
3815
+ "mr 11,%1\n\t" \
3816
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3817
+ "std 2,-16(11)\n\t" /* save tocptr */ \
3818
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
3819
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
3820
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
3821
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
3822
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
3823
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
3824
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
3825
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
3826
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
3827
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
3828
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3829
+ "mr 11,%1\n\t" \
3830
+ "mr %0,3\n\t" \
3831
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
3832
+ VG_CONTRACT_FRAME_BY(512) \
3833
+ : /*out*/ "=r" (_res) \
3834
+ : /*in*/ "r" (&_argvec[2]) \
3835
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3836
+ ); \
3837
+ lval = (__typeof__(lval)) _res; \
3838
+ } while (0)
3839
+
3840
+ #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
3841
+ arg7,arg8,arg9) \
3842
+ do { \
3843
+ volatile OrigFn _orig = (orig); \
3844
+ volatile unsigned long _argvec[3+9]; \
3845
+ volatile unsigned long _res; \
3846
+ /* _argvec[0] holds current r2 across the call */ \
3847
+ _argvec[1] = (unsigned long)_orig.r2; \
3848
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3849
+ _argvec[2+1] = (unsigned long)arg1; \
3850
+ _argvec[2+2] = (unsigned long)arg2; \
3851
+ _argvec[2+3] = (unsigned long)arg3; \
3852
+ _argvec[2+4] = (unsigned long)arg4; \
3853
+ _argvec[2+5] = (unsigned long)arg5; \
3854
+ _argvec[2+6] = (unsigned long)arg6; \
3855
+ _argvec[2+7] = (unsigned long)arg7; \
3856
+ _argvec[2+8] = (unsigned long)arg8; \
3857
+ _argvec[2+9] = (unsigned long)arg9; \
3858
+ __asm__ volatile( \
3859
+ "mr 11,%1\n\t" \
3860
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3861
+ "std 2,-16(11)\n\t" /* save tocptr */ \
3862
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
3863
+ VG_EXPAND_FRAME_BY_trashes_r3(128) \
3864
+ /* arg9 */ \
3865
+ "ld 3,72(11)\n\t" \
3866
+ "std 3,112(1)\n\t" \
3867
+ /* args1-8 */ \
3868
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
3869
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
3870
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
3871
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
3872
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
3873
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
3874
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
3875
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
3876
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
3877
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3878
+ "mr 11,%1\n\t" \
3879
+ "mr %0,3\n\t" \
3880
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
3881
+ VG_CONTRACT_FRAME_BY(128) \
3882
+ VG_CONTRACT_FRAME_BY(512) \
3883
+ : /*out*/ "=r" (_res) \
3884
+ : /*in*/ "r" (&_argvec[2]) \
3885
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3886
+ ); \
3887
+ lval = (__typeof__(lval)) _res; \
3888
+ } while (0)
3889
+
3890
+ #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
3891
+ arg7,arg8,arg9,arg10) \
3892
+ do { \
3893
+ volatile OrigFn _orig = (orig); \
3894
+ volatile unsigned long _argvec[3+10]; \
3895
+ volatile unsigned long _res; \
3896
+ /* _argvec[0] holds current r2 across the call */ \
3897
+ _argvec[1] = (unsigned long)_orig.r2; \
3898
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3899
+ _argvec[2+1] = (unsigned long)arg1; \
3900
+ _argvec[2+2] = (unsigned long)arg2; \
3901
+ _argvec[2+3] = (unsigned long)arg3; \
3902
+ _argvec[2+4] = (unsigned long)arg4; \
3903
+ _argvec[2+5] = (unsigned long)arg5; \
3904
+ _argvec[2+6] = (unsigned long)arg6; \
3905
+ _argvec[2+7] = (unsigned long)arg7; \
3906
+ _argvec[2+8] = (unsigned long)arg8; \
3907
+ _argvec[2+9] = (unsigned long)arg9; \
3908
+ _argvec[2+10] = (unsigned long)arg10; \
3909
+ __asm__ volatile( \
3910
+ "mr 11,%1\n\t" \
3911
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3912
+ "std 2,-16(11)\n\t" /* save tocptr */ \
3913
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
3914
+ VG_EXPAND_FRAME_BY_trashes_r3(128) \
3915
+ /* arg10 */ \
3916
+ "ld 3,80(11)\n\t" \
3917
+ "std 3,120(1)\n\t" \
3918
+ /* arg9 */ \
3919
+ "ld 3,72(11)\n\t" \
3920
+ "std 3,112(1)\n\t" \
3921
+ /* args1-8 */ \
3922
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
3923
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
3924
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
3925
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
3926
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
3927
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
3928
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
3929
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
3930
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
3931
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3932
+ "mr 11,%1\n\t" \
3933
+ "mr %0,3\n\t" \
3934
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
3935
+ VG_CONTRACT_FRAME_BY(128) \
3936
+ VG_CONTRACT_FRAME_BY(512) \
3937
+ : /*out*/ "=r" (_res) \
3938
+ : /*in*/ "r" (&_argvec[2]) \
3939
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3940
+ ); \
3941
+ lval = (__typeof__(lval)) _res; \
3942
+ } while (0)
3943
+
3944
+ #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
3945
+ arg7,arg8,arg9,arg10,arg11) \
3946
+ do { \
3947
+ volatile OrigFn _orig = (orig); \
3948
+ volatile unsigned long _argvec[3+11]; \
3949
+ volatile unsigned long _res; \
3950
+ /* _argvec[0] holds current r2 across the call */ \
3951
+ _argvec[1] = (unsigned long)_orig.r2; \
3952
+ _argvec[2] = (unsigned long)_orig.nraddr; \
3953
+ _argvec[2+1] = (unsigned long)arg1; \
3954
+ _argvec[2+2] = (unsigned long)arg2; \
3955
+ _argvec[2+3] = (unsigned long)arg3; \
3956
+ _argvec[2+4] = (unsigned long)arg4; \
3957
+ _argvec[2+5] = (unsigned long)arg5; \
3958
+ _argvec[2+6] = (unsigned long)arg6; \
3959
+ _argvec[2+7] = (unsigned long)arg7; \
3960
+ _argvec[2+8] = (unsigned long)arg8; \
3961
+ _argvec[2+9] = (unsigned long)arg9; \
3962
+ _argvec[2+10] = (unsigned long)arg10; \
3963
+ _argvec[2+11] = (unsigned long)arg11; \
3964
+ __asm__ volatile( \
3965
+ "mr 11,%1\n\t" \
3966
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
3967
+ "std 2,-16(11)\n\t" /* save tocptr */ \
3968
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
3969
+ VG_EXPAND_FRAME_BY_trashes_r3(144) \
3970
+ /* arg11 */ \
3971
+ "ld 3,88(11)\n\t" \
3972
+ "std 3,128(1)\n\t" \
3973
+ /* arg10 */ \
3974
+ "ld 3,80(11)\n\t" \
3975
+ "std 3,120(1)\n\t" \
3976
+ /* arg9 */ \
3977
+ "ld 3,72(11)\n\t" \
3978
+ "std 3,112(1)\n\t" \
3979
+ /* args1-8 */ \
3980
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
3981
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
3982
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
3983
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
3984
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
3985
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
3986
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
3987
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
3988
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
3989
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
3990
+ "mr 11,%1\n\t" \
3991
+ "mr %0,3\n\t" \
3992
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
3993
+ VG_CONTRACT_FRAME_BY(144) \
3994
+ VG_CONTRACT_FRAME_BY(512) \
3995
+ : /*out*/ "=r" (_res) \
3996
+ : /*in*/ "r" (&_argvec[2]) \
3997
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
3998
+ ); \
3999
+ lval = (__typeof__(lval)) _res; \
4000
+ } while (0)
4001
+
4002
+ #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
4003
+ arg7,arg8,arg9,arg10,arg11,arg12) \
4004
+ do { \
4005
+ volatile OrigFn _orig = (orig); \
4006
+ volatile unsigned long _argvec[3+12]; \
4007
+ volatile unsigned long _res; \
4008
+ /* _argvec[0] holds current r2 across the call */ \
4009
+ _argvec[1] = (unsigned long)_orig.r2; \
4010
+ _argvec[2] = (unsigned long)_orig.nraddr; \
4011
+ _argvec[2+1] = (unsigned long)arg1; \
4012
+ _argvec[2+2] = (unsigned long)arg2; \
4013
+ _argvec[2+3] = (unsigned long)arg3; \
4014
+ _argvec[2+4] = (unsigned long)arg4; \
4015
+ _argvec[2+5] = (unsigned long)arg5; \
4016
+ _argvec[2+6] = (unsigned long)arg6; \
4017
+ _argvec[2+7] = (unsigned long)arg7; \
4018
+ _argvec[2+8] = (unsigned long)arg8; \
4019
+ _argvec[2+9] = (unsigned long)arg9; \
4020
+ _argvec[2+10] = (unsigned long)arg10; \
4021
+ _argvec[2+11] = (unsigned long)arg11; \
4022
+ _argvec[2+12] = (unsigned long)arg12; \
4023
+ __asm__ volatile( \
4024
+ "mr 11,%1\n\t" \
4025
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
4026
+ "std 2,-16(11)\n\t" /* save tocptr */ \
4027
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
4028
+ VG_EXPAND_FRAME_BY_trashes_r3(144) \
4029
+ /* arg12 */ \
4030
+ "ld 3,96(11)\n\t" \
4031
+ "std 3,136(1)\n\t" \
4032
+ /* arg11 */ \
4033
+ "ld 3,88(11)\n\t" \
4034
+ "std 3,128(1)\n\t" \
4035
+ /* arg10 */ \
4036
+ "ld 3,80(11)\n\t" \
4037
+ "std 3,120(1)\n\t" \
4038
+ /* arg9 */ \
4039
+ "ld 3,72(11)\n\t" \
4040
+ "std 3,112(1)\n\t" \
4041
+ /* args1-8 */ \
4042
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
4043
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
4044
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
4045
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
4046
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
4047
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
4048
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
4049
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
4050
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
4051
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
4052
+ "mr 11,%1\n\t" \
4053
+ "mr %0,3\n\t" \
4054
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
4055
+ VG_CONTRACT_FRAME_BY(144) \
4056
+ VG_CONTRACT_FRAME_BY(512) \
4057
+ : /*out*/ "=r" (_res) \
4058
+ : /*in*/ "r" (&_argvec[2]) \
4059
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
4060
+ ); \
4061
+ lval = (__typeof__(lval)) _res; \
4062
+ } while (0)
4063
+
4064
+ #endif /* PLAT_ppc64_aix5 */
4065
+
4066
+
4067
+ /* ------------------------------------------------------------------ */
4068
+ /* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
4069
+ /* */
4070
+ /* ------------------------------------------------------------------ */
4071
+
4072
+ /* Some request codes. There are many more of these, but most are not
4073
+ exposed to end-user view. These are the public ones, all of the
4074
+ form 0x1000 + small_number.
4075
+
4076
+ Core ones are in the range 0x00000000--0x0000ffff. The non-public
4077
+ ones start at 0x2000.
4078
+ */
4079
+
4080
+ /* These macros are used by tools -- they must be public, but don't
4081
+ embed them into other programs. */
4082
+ #define VG_USERREQ_TOOL_BASE(a,b) \
4083
+ ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
4084
+ #define VG_IS_TOOL_USERREQ(a, b, v) \
4085
+ (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
4086
+
4087
+ /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
4088
+ This enum comprises an ABI exported by Valgrind to programs
4089
+ which use client requests. DO NOT CHANGE THE ORDER OF THESE
4090
+ ENTRIES, NOR DELETE ANY -- add new ones at the end. */
4091
+ typedef
4092
+ enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
4093
+ VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
4094
+
4095
+ /* These allow any function to be called from the simulated
4096
+ CPU but run on the real CPU. Nb: the first arg passed to
4097
+ the function is always the ThreadId of the running
4098
+ thread! So CLIENT_CALL0 actually requires a 1 arg
4099
+ function, etc. */
4100
+ VG_USERREQ__CLIENT_CALL0 = 0x1101,
4101
+ VG_USERREQ__CLIENT_CALL1 = 0x1102,
4102
+ VG_USERREQ__CLIENT_CALL2 = 0x1103,
4103
+ VG_USERREQ__CLIENT_CALL3 = 0x1104,
4104
+
4105
+ /* Can be useful in regression testing suites -- eg. can
4106
+ send Valgrind's output to /dev/null and still count
4107
+ errors. */
4108
+ VG_USERREQ__COUNT_ERRORS = 0x1201,
4109
+
4110
+ /* These are useful and can be interpreted by any tool that
4111
+ tracks malloc() et al, by using vg_replace_malloc.c. */
4112
+ VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
4113
+ VG_USERREQ__FREELIKE_BLOCK = 0x1302,
4114
+ /* Memory pool support. */
4115
+ VG_USERREQ__CREATE_MEMPOOL = 0x1303,
4116
+ VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
4117
+ VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
4118
+ VG_USERREQ__MEMPOOL_FREE = 0x1306,
4119
+ VG_USERREQ__MEMPOOL_TRIM = 0x1307,
4120
+ VG_USERREQ__MOVE_MEMPOOL = 0x1308,
4121
+ VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
4122
+ VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
4123
+
4124
+ /* Allow printfs to valgrind log. */
4125
+ /* The first two pass the va_list argument by value, which
4126
+ assumes it is the same size as or smaller than a UWord,
4127
+ which generally isn't the case. Hence are deprecated.
4128
+ The second two pass the vargs by reference and so are
4129
+ immune to this problem. */
4130
+ /* both :: char* fmt, va_list vargs (DEPRECATED) */
4131
+ VG_USERREQ__PRINTF = 0x1401,
4132
+ VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
4133
+ /* both :: char* fmt, va_list* vargs */
4134
+ VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
4135
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
4136
+
4137
+ /* Stack support. */
4138
+ VG_USERREQ__STACK_REGISTER = 0x1501,
4139
+ VG_USERREQ__STACK_DEREGISTER = 0x1502,
4140
+ VG_USERREQ__STACK_CHANGE = 0x1503,
4141
+
4142
+ /* Wine support */
4143
+ VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601
4144
+ } Vg_ClientRequest;
4145
+
4146
+ #if !defined(__GNUC__)
4147
+ # define __extension__ /* */
4148
+ #endif
4149
+
4150
+ /* Returns the number of Valgrinds this code is running under. That
4151
+ is, 0 if running natively, 1 if running under Valgrind, 2 if
4152
+ running under Valgrind which is running under another Valgrind,
4153
+ etc. */
4154
+ #define RUNNING_ON_VALGRIND __extension__ \
4155
+ ({unsigned int _qzz_res; \
4156
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \
4157
+ VG_USERREQ__RUNNING_ON_VALGRIND, \
4158
+ 0, 0, 0, 0, 0); \
4159
+ _qzz_res; \
4160
+ })
4161
+
4162
+
4163
+ /* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
4164
+ _qzz_len - 1]. Useful if you are debugging a JITter or some such,
4165
+ since it provides a way to make sure valgrind will retranslate the
4166
+ invalidated area. Returns no value. */
4167
+ #define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
4168
+ {unsigned int _qzz_res; \
4169
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4170
+ VG_USERREQ__DISCARD_TRANSLATIONS, \
4171
+ _qzz_addr, _qzz_len, 0, 0, 0); \
4172
+ }
4173
+
4174
+
4175
+ /* These requests are for getting Valgrind itself to print something.
4176
+ Possibly with a backtrace. This is a really ugly hack. The return value
4177
+ is the number of characters printed, excluding the "**<pid>** " part at the
4178
+ start and the backtrace (if present). */
4179
+
4180
+ #if defined(NVALGRIND)
4181
+
4182
+ # define VALGRIND_PRINTF(...)
4183
+ # define VALGRIND_PRINTF_BACKTRACE(...)
4184
+
4185
+ #else /* NVALGRIND */
4186
+
4187
+ /* Modern GCC will optimize the static routine out if unused,
4188
+ and unused attribute will shut down warnings about it. */
4189
+ static int VALGRIND_PRINTF(const char *format, ...)
4190
+ __attribute__((format(__printf__, 1, 2), __unused__));
4191
+ static int
4192
+ VALGRIND_PRINTF(const char *format, ...)
4193
+ {
4194
+ unsigned long _qzz_res;
4195
+ va_list vargs;
4196
+ va_start(vargs, format);
4197
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
4198
+ VG_USERREQ__PRINTF_VALIST_BY_REF,
4199
+ (unsigned long)format,
4200
+ (unsigned long)&vargs,
4201
+ 0, 0, 0);
4202
+ va_end(vargs);
4203
+ return (int)_qzz_res;
4204
+ }
4205
+
4206
+ static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
4207
+ __attribute__((format(__printf__, 1, 2), __unused__));
4208
+ static int
4209
+ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
4210
+ {
4211
+ unsigned long _qzz_res;
4212
+ va_list vargs;
4213
+ va_start(vargs, format);
4214
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
4215
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
4216
+ (unsigned long)format,
4217
+ (unsigned long)&vargs,
4218
+ 0, 0, 0);
4219
+ va_end(vargs);
4220
+ return (int)_qzz_res;
4221
+ }
4222
+
4223
+ #endif /* NVALGRIND */
4224
+
4225
+
4226
+ /* These requests allow control to move from the simulated CPU to the
4227
+ real CPU, calling an arbitary function.
4228
+
4229
+ Note that the current ThreadId is inserted as the first argument.
4230
+ So this call:
4231
+
4232
+ VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
4233
+
4234
+ requires f to have this signature:
4235
+
4236
+ Word f(Word tid, Word arg1, Word arg2)
4237
+
4238
+ where "Word" is a word-sized type.
4239
+
4240
+ Note that these client requests are not entirely reliable. For example,
4241
+ if you call a function with them that subsequently calls printf(),
4242
+ there's a high chance Valgrind will crash. Generally, your prospects of
4243
+ these working are made higher if the called function does not refer to
4244
+ any global variables, and does not refer to any libc or other functions
4245
+ (printf et al). Any kind of entanglement with libc or dynamic linking is
4246
+ likely to have a bad outcome, for tricky reasons which we've grappled
4247
+ with a lot in the past.
4248
+ */
4249
+ #define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
4250
+ __extension__ \
4251
+ ({unsigned long _qyy_res; \
4252
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
4253
+ VG_USERREQ__CLIENT_CALL0, \
4254
+ _qyy_fn, \
4255
+ 0, 0, 0, 0); \
4256
+ _qyy_res; \
4257
+ })
4258
+
4259
+ #define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
4260
+ __extension__ \
4261
+ ({unsigned long _qyy_res; \
4262
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
4263
+ VG_USERREQ__CLIENT_CALL1, \
4264
+ _qyy_fn, \
4265
+ _qyy_arg1, 0, 0, 0); \
4266
+ _qyy_res; \
4267
+ })
4268
+
4269
+ #define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
4270
+ __extension__ \
4271
+ ({unsigned long _qyy_res; \
4272
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
4273
+ VG_USERREQ__CLIENT_CALL2, \
4274
+ _qyy_fn, \
4275
+ _qyy_arg1, _qyy_arg2, 0, 0); \
4276
+ _qyy_res; \
4277
+ })
4278
+
4279
+ #define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
4280
+ __extension__ \
4281
+ ({unsigned long _qyy_res; \
4282
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
4283
+ VG_USERREQ__CLIENT_CALL3, \
4284
+ _qyy_fn, \
4285
+ _qyy_arg1, _qyy_arg2, \
4286
+ _qyy_arg3, 0); \
4287
+ _qyy_res; \
4288
+ })
4289
+
4290
+
4291
+ /* Counts the number of errors that have been recorded by a tool. Nb:
4292
+ the tool must record the errors with VG_(maybe_record_error)() or
4293
+ VG_(unique_error)() for them to be counted. */
4294
+ #define VALGRIND_COUNT_ERRORS \
4295
+ __extension__ \
4296
+ ({unsigned int _qyy_res; \
4297
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
4298
+ VG_USERREQ__COUNT_ERRORS, \
4299
+ 0, 0, 0, 0, 0); \
4300
+ _qyy_res; \
4301
+ })
4302
+
4303
+ /* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
4304
+ when heap blocks are allocated in order to give accurate results. This
4305
+ happens automatically for the standard allocator functions such as
4306
+ malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
4307
+ delete[], etc.
4308
+
4309
+ But if your program uses a custom allocator, this doesn't automatically
4310
+ happen, and Valgrind will not do as well. For example, if you allocate
4311
+ superblocks with mmap() and then allocates chunks of the superblocks, all
4312
+ Valgrind's observations will be at the mmap() level and it won't know that
4313
+ the chunks should be considered separate entities. In Memcheck's case,
4314
+ that means you probably won't get heap block overrun detection (because
4315
+ there won't be redzones marked as unaddressable) and you definitely won't
4316
+ get any leak detection.
4317
+
4318
+ The following client requests allow a custom allocator to be annotated so
4319
+ that it can be handled accurately by Valgrind.
4320
+
4321
+ VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
4322
+ by a malloc()-like function. For Memcheck (an illustrative case), this
4323
+ does two things:
4324
+
4325
+ - It records that the block has been allocated. This means any addresses
4326
+ within the block mentioned in error messages will be
4327
+ identified as belonging to the block. It also means that if the block
4328
+ isn't freed it will be detected by the leak checker.
4329
+
4330
+ - It marks the block as being addressable and undefined (if 'is_zeroed' is
4331
+ not set), or addressable and defined (if 'is_zeroed' is set). This
4332
+ controls how accesses to the block by the program are handled.
4333
+
4334
+ 'addr' is the start of the usable block (ie. after any
4335
+ redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
4336
+ can apply redzones -- these are blocks of padding at the start and end of
4337
+ each block. Adding redzones is recommended as it makes it much more likely
4338
+ Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
4339
+ zeroed (or filled with another predictable value), as is the case for
4340
+ calloc().
4341
+
4342
+ VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
4343
+ heap block -- that will be used by the client program -- is allocated.
4344
+ It's best to put it at the outermost level of the allocator if possible;
4345
+ for example, if you have a function my_alloc() which calls
4346
+ internal_alloc(), and the client request is put inside internal_alloc(),
4347
+ stack traces relating to the heap block will contain entries for both
4348
+ my_alloc() and internal_alloc(), which is probably not what you want.
4349
+
4350
+ For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
4351
+ custom blocks from within a heap block, B, that has been allocated with
4352
+ malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
4353
+ -- the custom blocks will take precedence.
4354
+
4355
+ VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
4356
+ Memcheck, it does two things:
4357
+
4358
+ - It records that the block has been deallocated. This assumes that the
4359
+ block was annotated as having been allocated via
4360
+ VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
4361
+
4362
+ - It marks the block as being unaddressable.
4363
+
4364
+ VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
4365
+ heap block is deallocated.
4366
+
4367
+ In many cases, these two client requests will not be enough to get your
4368
+ allocator working well with Memcheck. More specifically, if your allocator
4369
+ writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
4370
+ will be necessary to mark the memory as addressable just before the zeroing
4371
+ occurs, otherwise you'll get a lot of invalid write errors. For example,
4372
+ you'll need to do this if your allocator recycles freed blocks, but it
4373
+ zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
4374
+ Alternatively, if your allocator reuses freed blocks for allocator-internal
4375
+ data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
4376
+
4377
+ Really, what's happening is a blurring of the lines between the client
4378
+ program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
4379
+ memory should be considered unaddressable to the client program, but the
4380
+ allocator knows more than the rest of the client program and so may be able
4381
+ to safely access it. Extra client requests are necessary for Valgrind to
4382
+ understand the distinction between the allocator and the rest of the
4383
+ program.
4384
+
4385
+ Note: there is currently no VALGRIND_REALLOCLIKE_BLOCK client request; it
4386
+ has to be emulated with MALLOCLIKE/FREELIKE and memory copying.
4387
+
4388
+ Ignored if addr == 0.
4389
+ */
4390
+ #define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
4391
+ {unsigned int _qzz_res; \
4392
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4393
+ VG_USERREQ__MALLOCLIKE_BLOCK, \
4394
+ addr, sizeB, rzB, is_zeroed, 0); \
4395
+ }
4396
+
4397
+ /* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
4398
+ Ignored if addr == 0.
4399
+ */
4400
+ #define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
4401
+ {unsigned int _qzz_res; \
4402
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4403
+ VG_USERREQ__FREELIKE_BLOCK, \
4404
+ addr, rzB, 0, 0, 0); \
4405
+ }
4406
+
4407
+ /* Create a memory pool. */
4408
+ #define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
4409
+ {unsigned int _qzz_res; \
4410
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4411
+ VG_USERREQ__CREATE_MEMPOOL, \
4412
+ pool, rzB, is_zeroed, 0, 0); \
4413
+ }
4414
+
4415
+ /* Destroy a memory pool. */
4416
+ #define VALGRIND_DESTROY_MEMPOOL(pool) \
4417
+ {unsigned int _qzz_res; \
4418
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4419
+ VG_USERREQ__DESTROY_MEMPOOL, \
4420
+ pool, 0, 0, 0, 0); \
4421
+ }
4422
+
4423
+ /* Associate a piece of memory with a memory pool. */
4424
+ #define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
4425
+ {unsigned int _qzz_res; \
4426
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4427
+ VG_USERREQ__MEMPOOL_ALLOC, \
4428
+ pool, addr, size, 0, 0); \
4429
+ }
4430
+
4431
+ /* Disassociate a piece of memory from a memory pool. */
4432
+ #define VALGRIND_MEMPOOL_FREE(pool, addr) \
4433
+ {unsigned int _qzz_res; \
4434
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4435
+ VG_USERREQ__MEMPOOL_FREE, \
4436
+ pool, addr, 0, 0, 0); \
4437
+ }
4438
+
4439
+ /* Disassociate any pieces outside a particular range. */
4440
+ #define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
4441
+ {unsigned int _qzz_res; \
4442
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4443
+ VG_USERREQ__MEMPOOL_TRIM, \
4444
+ pool, addr, size, 0, 0); \
4445
+ }
4446
+
4447
+ /* Resize and/or move a piece associated with a memory pool. */
4448
+ #define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
4449
+ {unsigned int _qzz_res; \
4450
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4451
+ VG_USERREQ__MOVE_MEMPOOL, \
4452
+ poolA, poolB, 0, 0, 0); \
4453
+ }
4454
+
4455
+ /* Resize and/or move a piece associated with a memory pool. */
4456
+ #define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
4457
+ {unsigned int _qzz_res; \
4458
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4459
+ VG_USERREQ__MEMPOOL_CHANGE, \
4460
+ pool, addrA, addrB, size, 0); \
4461
+ }
4462
+
4463
+ /* Return 1 if a mempool exists, else 0. */
4464
+ #define VALGRIND_MEMPOOL_EXISTS(pool) \
4465
+ __extension__ \
4466
+ ({unsigned int _qzz_res; \
4467
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4468
+ VG_USERREQ__MEMPOOL_EXISTS, \
4469
+ pool, 0, 0, 0, 0); \
4470
+ _qzz_res; \
4471
+ })
4472
+
4473
+ /* Mark a piece of memory as being a stack. Returns a stack id. */
4474
+ #define VALGRIND_STACK_REGISTER(start, end) \
4475
+ __extension__ \
4476
+ ({unsigned int _qzz_res; \
4477
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4478
+ VG_USERREQ__STACK_REGISTER, \
4479
+ start, end, 0, 0, 0); \
4480
+ _qzz_res; \
4481
+ })
4482
+
4483
+ /* Unmark the piece of memory associated with a stack id as being a
4484
+ stack. */
4485
+ #define VALGRIND_STACK_DEREGISTER(id) \
4486
+ {unsigned int _qzz_res; \
4487
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4488
+ VG_USERREQ__STACK_DEREGISTER, \
4489
+ id, 0, 0, 0, 0); \
4490
+ }
4491
+
4492
+ /* Change the start and end address of the stack id. */
4493
+ #define VALGRIND_STACK_CHANGE(id, start, end) \
4494
+ {unsigned int _qzz_res; \
4495
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4496
+ VG_USERREQ__STACK_CHANGE, \
4497
+ id, start, end, 0, 0); \
4498
+ }
4499
+
4500
+ /* Load PDB debug info for Wine PE image_map. */
4501
+ #define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
4502
+ {unsigned int _qzz_res; \
4503
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
4504
+ VG_USERREQ__LOAD_PDB_DEBUGINFO, \
4505
+ fd, ptr, total_size, delta, 0); \
4506
+ }
4507
+
4508
+
4509
+ #undef PLAT_x86_linux
4510
+ #undef PLAT_amd64_linux
4511
+ #undef PLAT_ppc32_linux
4512
+ #undef PLAT_ppc64_linux
4513
+ #undef PLAT_arm_linux
4514
+ #undef PLAT_ppc32_aix5
4515
+ #undef PLAT_ppc64_aix5
4516
+
4517
+ #endif /* __VALGRIND_H */