gccxml_gem 0.9.2-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (274) hide show
  1. data/Rakefile +84 -0
  2. data/bin/gccxml.exe +0 -0
  3. data/bin/gccxml_cc1plus.exe +0 -0
  4. data/bin/gccxml_vcconfig.bat +52 -0
  5. data/bin/gccxml_vcconfig.exe +0 -0
  6. data/gccxml.rb +57 -0
  7. data/share/doc/gccxml-0.9/Copyright.txt +55 -0
  8. data/share/doc/gccxml-0.9/gccxml.html +168 -0
  9. data/share/doc/gccxml-0.9/gccxml.txt +293 -0
  10. data/share/gccxml-0.9/Borland/5.5/Wrappers/algorithm +7 -0
  11. data/share/gccxml-0.9/Borland/5.5/Wrappers/algorithm.h +7 -0
  12. data/share/gccxml-0.9/Borland/5.5/Wrappers/bitset +7 -0
  13. data/share/gccxml-0.9/Borland/5.5/Wrappers/bitset.h +7 -0
  14. data/share/gccxml-0.9/Borland/5.5/Wrappers/cctype +1 -0
  15. data/share/gccxml-0.9/Borland/5.5/Wrappers/cfloat +1 -0
  16. data/share/gccxml-0.9/Borland/5.5/Wrappers/climits +1 -0
  17. data/share/gccxml-0.9/Borland/5.5/Wrappers/cmath +1 -0
  18. data/share/gccxml-0.9/Borland/5.5/Wrappers/complex +7 -0
  19. data/share/gccxml-0.9/Borland/5.5/Wrappers/complex.h +7 -0
  20. data/share/gccxml-0.9/Borland/5.5/Wrappers/cstddef +1 -0
  21. data/share/gccxml-0.9/Borland/5.5/Wrappers/cstdio +1 -0
  22. data/share/gccxml-0.9/Borland/5.5/Wrappers/cstdlib +1 -0
  23. data/share/gccxml-0.9/Borland/5.5/Wrappers/cstring +7 -0
  24. data/share/gccxml-0.9/Borland/5.5/Wrappers/cstring.h +7 -0
  25. data/share/gccxml-0.9/Borland/5.5/Wrappers/ctime +1 -0
  26. data/share/gccxml-0.9/Borland/5.5/Wrappers/cwchar +1 -0
  27. data/share/gccxml-0.9/Borland/5.5/Wrappers/cwctype +1 -0
  28. data/share/gccxml-0.9/Borland/5.5/Wrappers/deque +7 -0
  29. data/share/gccxml-0.9/Borland/5.5/Wrappers/deque.h +7 -0
  30. data/share/gccxml-0.9/Borland/5.5/Wrappers/exception +7 -0
  31. data/share/gccxml-0.9/Borland/5.5/Wrappers/exception.h +7 -0
  32. data/share/gccxml-0.9/Borland/5.5/Wrappers/fstream +7 -0
  33. data/share/gccxml-0.9/Borland/5.5/Wrappers/fstream.h +7 -0
  34. data/share/gccxml-0.9/Borland/5.5/Wrappers/function +7 -0
  35. data/share/gccxml-0.9/Borland/5.5/Wrappers/function.h +7 -0
  36. data/share/gccxml-0.9/Borland/5.5/Wrappers/functional +7 -0
  37. data/share/gccxml-0.9/Borland/5.5/Wrappers/gccxml_typeinfo.h +146 -0
  38. data/share/gccxml-0.9/Borland/5.5/Wrappers/iomanip +7 -0
  39. data/share/gccxml-0.9/Borland/5.5/Wrappers/iomanip.h +7 -0
  40. data/share/gccxml-0.9/Borland/5.5/Wrappers/ios +7 -0
  41. data/share/gccxml-0.9/Borland/5.5/Wrappers/ios.h +7 -0
  42. data/share/gccxml-0.9/Borland/5.5/Wrappers/iosfwd +7 -0
  43. data/share/gccxml-0.9/Borland/5.5/Wrappers/iosfwd.h +7 -0
  44. data/share/gccxml-0.9/Borland/5.5/Wrappers/iostream +7 -0
  45. data/share/gccxml-0.9/Borland/5.5/Wrappers/iostream.h +7 -0
  46. data/share/gccxml-0.9/Borland/5.5/Wrappers/istream +7 -0
  47. data/share/gccxml-0.9/Borland/5.5/Wrappers/istream.h +7 -0
  48. data/share/gccxml-0.9/Borland/5.5/Wrappers/iterator +7 -0
  49. data/share/gccxml-0.9/Borland/5.5/Wrappers/iterator.h +7 -0
  50. data/share/gccxml-0.9/Borland/5.5/Wrappers/limits +7 -0
  51. data/share/gccxml-0.9/Borland/5.5/Wrappers/limits.h +7 -0
  52. data/share/gccxml-0.9/Borland/5.5/Wrappers/list +7 -0
  53. data/share/gccxml-0.9/Borland/5.5/Wrappers/list.h +7 -0
  54. data/share/gccxml-0.9/Borland/5.5/Wrappers/locale +7 -0
  55. data/share/gccxml-0.9/Borland/5.5/Wrappers/locale.h +7 -0
  56. data/share/gccxml-0.9/Borland/5.5/Wrappers/map +7 -0
  57. data/share/gccxml-0.9/Borland/5.5/Wrappers/map.h +7 -0
  58. data/share/gccxml-0.9/Borland/5.5/Wrappers/memory +7 -0
  59. data/share/gccxml-0.9/Borland/5.5/Wrappers/memory.h +7 -0
  60. data/share/gccxml-0.9/Borland/5.5/Wrappers/new +7 -0
  61. data/share/gccxml-0.9/Borland/5.5/Wrappers/new.h +7 -0
  62. data/share/gccxml-0.9/Borland/5.5/Wrappers/numeric +7 -0
  63. data/share/gccxml-0.9/Borland/5.5/Wrappers/numeric.h +7 -0
  64. data/share/gccxml-0.9/Borland/5.5/Wrappers/ostream +7 -0
  65. data/share/gccxml-0.9/Borland/5.5/Wrappers/ostream.h +7 -0
  66. data/share/gccxml-0.9/Borland/5.5/Wrappers/queue +7 -0
  67. data/share/gccxml-0.9/Borland/5.5/Wrappers/queue.h +7 -0
  68. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/codecvt +1 -0
  69. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/collate +1 -0
  70. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/ctype +1 -0
  71. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/iosbase +1 -0
  72. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/iotraits +1 -0
  73. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/iterator +1 -0
  74. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/lochelp +1 -0
  75. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/locimpl +1 -0
  76. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/locvector +1 -0
  77. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/messages +1 -0
  78. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/money +1 -0
  79. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/numbrw +1 -0
  80. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/numeral +1 -0
  81. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/rwdispatch.h +1 -0
  82. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/rwlocale +1 -0
  83. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/rwstderr_macros.h +64 -0
  84. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/string_ref +1 -0
  85. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/time +1 -0
  86. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/traits +1 -0
  87. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/tree +1 -0
  88. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/usefacet +1 -0
  89. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/valimp +1 -0
  90. data/share/gccxml-0.9/Borland/5.5/Wrappers/rw/vendor +1 -0
  91. data/share/gccxml-0.9/Borland/5.5/Wrappers/set +7 -0
  92. data/share/gccxml-0.9/Borland/5.5/Wrappers/set.h +7 -0
  93. data/share/gccxml-0.9/Borland/5.5/Wrappers/sstream +7 -0
  94. data/share/gccxml-0.9/Borland/5.5/Wrappers/sstream.h +7 -0
  95. data/share/gccxml-0.9/Borland/5.5/Wrappers/stack +7 -0
  96. data/share/gccxml-0.9/Borland/5.5/Wrappers/stack.h +7 -0
  97. data/share/gccxml-0.9/Borland/5.5/Wrappers/stdexcept +7 -0
  98. data/share/gccxml-0.9/Borland/5.5/Wrappers/stdexcept.h +7 -0
  99. data/share/gccxml-0.9/Borland/5.5/Wrappers/streambuf +7 -0
  100. data/share/gccxml-0.9/Borland/5.5/Wrappers/streambuf.h +7 -0
  101. data/share/gccxml-0.9/Borland/5.5/Wrappers/string +7 -0
  102. data/share/gccxml-0.9/Borland/5.5/Wrappers/string.h +7 -0
  103. data/share/gccxml-0.9/Borland/5.5/Wrappers/strstream +7 -0
  104. data/share/gccxml-0.9/Borland/5.5/Wrappers/strstream.h +7 -0
  105. data/share/gccxml-0.9/Borland/5.5/Wrappers/typeinfo +7 -0
  106. data/share/gccxml-0.9/Borland/5.5/Wrappers/typeinfo.h +7 -0
  107. data/share/gccxml-0.9/Borland/5.5/Wrappers/utility +7 -0
  108. data/share/gccxml-0.9/Borland/5.5/Wrappers/utility.h +7 -0
  109. data/share/gccxml-0.9/Borland/5.5/Wrappers/valarray +7 -0
  110. data/share/gccxml-0.9/Borland/5.5/Wrappers/valarray.h +7 -0
  111. data/share/gccxml-0.9/Borland/5.5/Wrappers/vector +7 -0
  112. data/share/gccxml-0.9/Borland/5.5/Wrappers/vector.h +7 -0
  113. data/share/gccxml-0.9/Borland/5.5/except.h +154 -0
  114. data/share/gccxml-0.9/Borland/5.5/gccxml_fstream.h +431 -0
  115. data/share/gccxml-0.9/Borland/5.5/gccxml_new.h +166 -0
  116. data/share/gccxml-0.9/Borland/5.5/gccxml_valarray.h +786 -0
  117. data/share/gccxml-0.9/Borland/5.5/gccxml_vector.h +1457 -0
  118. data/share/gccxml-0.9/Borland/5.5/io.h +352 -0
  119. data/share/gccxml-0.9/Borland/5.5/oaidl.h +7006 -0
  120. data/share/gccxml-0.9/Borland/5.5/propidl.h +1477 -0
  121. data/share/gccxml-0.9/Borland/5.5/rw/locimpl.h +477 -0
  122. data/share/gccxml-0.9/Borland/5.5/rw/rwlocale.h +619 -0
  123. data/share/gccxml-0.9/Borland/5.5/stdarg.h +55 -0
  124. data/share/gccxml-0.9/Borland/5.5/valarray.cc +2883 -0
  125. data/share/gccxml-0.9/GCC/2.95/algorithm +76 -0
  126. data/share/gccxml-0.9/GCC/2.95/bitset +17 -0
  127. data/share/gccxml-0.9/GCC/2.95/cctype +24 -0
  128. data/share/gccxml-0.9/GCC/2.95/clocale +14 -0
  129. data/share/gccxml-0.9/GCC/2.95/cmath +33 -0
  130. data/share/gccxml-0.9/GCC/2.95/complex +38 -0
  131. data/share/gccxml-0.9/GCC/2.95/csetjmp +13 -0
  132. data/share/gccxml-0.9/GCC/2.95/csignal +14 -0
  133. data/share/gccxml-0.9/GCC/2.95/cstdarg +12 -0
  134. data/share/gccxml-0.9/GCC/2.95/cstddef +13 -0
  135. data/share/gccxml-0.9/GCC/2.95/cstdio +55 -0
  136. data/share/gccxml-0.9/GCC/2.95/cstdlib +66 -0
  137. data/share/gccxml-0.9/GCC/2.95/cstring +34 -0
  138. data/share/gccxml-0.9/GCC/2.95/ctime +24 -0
  139. data/share/gccxml-0.9/GCC/2.95/cwchar +65 -0
  140. data/share/gccxml-0.9/GCC/2.95/cwctype +31 -0
  141. data/share/gccxml-0.9/GCC/2.95/deque +19 -0
  142. data/share/gccxml-0.9/GCC/2.95/exception +20 -0
  143. data/share/gccxml-0.9/GCC/2.95/fstream +23 -0
  144. data/share/gccxml-0.9/GCC/2.95/functional +64 -0
  145. data/share/gccxml-0.9/GCC/2.95/gccxml_bitset +1066 -0
  146. data/share/gccxml-0.9/GCC/2.95/iomanip +20 -0
  147. data/share/gccxml-0.9/GCC/2.95/iosfwd +20 -0
  148. data/share/gccxml-0.9/GCC/2.95/iostream +27 -0
  149. data/share/gccxml-0.9/GCC/2.95/iterator +39 -0
  150. data/share/gccxml-0.9/GCC/2.95/list +19 -0
  151. data/share/gccxml-0.9/GCC/2.95/map +20 -0
  152. data/share/gccxml-0.9/GCC/2.95/memory +21 -0
  153. data/share/gccxml-0.9/GCC/2.95/new +13 -0
  154. data/share/gccxml-0.9/GCC/2.95/numeric +15 -0
  155. data/share/gccxml-0.9/GCC/2.95/pthread.h +16 -0
  156. data/share/gccxml-0.9/GCC/2.95/queue +20 -0
  157. data/share/gccxml-0.9/GCC/2.95/set +20 -0
  158. data/share/gccxml-0.9/GCC/2.95/sstream +24 -0
  159. data/share/gccxml-0.9/GCC/2.95/stack +19 -0
  160. data/share/gccxml-0.9/GCC/2.95/std/bastring.cc +524 -0
  161. data/share/gccxml-0.9/GCC/2.95/std/complext.h +397 -0
  162. data/share/gccxml-0.9/GCC/2.95/std/dcomplex.h +92 -0
  163. data/share/gccxml-0.9/GCC/2.95/std/fcomplex.h +88 -0
  164. data/share/gccxml-0.9/GCC/2.95/std/gslice_array.h +170 -0
  165. data/share/gccxml-0.9/GCC/2.95/std/indirect_array.h +157 -0
  166. data/share/gccxml-0.9/GCC/2.95/std/ldcomplex.h +96 -0
  167. data/share/gccxml-0.9/GCC/2.95/std/mask_array.h +154 -0
  168. data/share/gccxml-0.9/GCC/2.95/std/slice_array.h +156 -0
  169. data/share/gccxml-0.9/GCC/2.95/std/std_valarray.h +728 -0
  170. data/share/gccxml-0.9/GCC/2.95/std/straits.h +162 -0
  171. data/share/gccxml-0.9/GCC/2.95/std/valarray_meta.h +1035 -0
  172. data/share/gccxml-0.9/GCC/2.95/stdexcept +17 -0
  173. data/share/gccxml-0.9/GCC/2.95/stl_alloc.h +1057 -0
  174. data/share/gccxml-0.9/GCC/2.95/stl_bvector.h +836 -0
  175. data/share/gccxml-0.9/GCC/2.95/stl_deque.h +1699 -0
  176. data/share/gccxml-0.9/GCC/2.95/stl_list.h +843 -0
  177. data/share/gccxml-0.9/GCC/2.95/stl_tree.h +1331 -0
  178. data/share/gccxml-0.9/GCC/2.95/stl_vector.h +828 -0
  179. data/share/gccxml-0.9/GCC/2.95/string +26 -0
  180. data/share/gccxml-0.9/GCC/2.95/strstream +23 -0
  181. data/share/gccxml-0.9/GCC/2.95/typeinfo +11 -0
  182. data/share/gccxml-0.9/GCC/2.95/utility +25 -0
  183. data/share/gccxml-0.9/GCC/2.95/valarray +52 -0
  184. data/share/gccxml-0.9/GCC/2.95/vector +19 -0
  185. data/share/gccxml-0.9/GCC/2.96/sstream +305 -0
  186. data/share/gccxml-0.9/GCC/3.0/pthread.h +16 -0
  187. data/share/gccxml-0.9/GCC/3.1/pthread.h +16 -0
  188. data/share/gccxml-0.9/GCC/3.2/bits/fstream.tcc +500 -0
  189. data/share/gccxml-0.9/GCC/3.2/bits/gthr-default.h +581 -0
  190. data/share/gccxml-0.9/GCC/3.2/bits/istream.tcc +1207 -0
  191. data/share/gccxml-0.9/GCC/3.2/bits/locale_facets.h +1810 -0
  192. data/share/gccxml-0.9/GCC/3.2/bits/locale_facets.tcc +2397 -0
  193. data/share/gccxml-0.9/GCC/3.2/bits/messages_members.h +108 -0
  194. data/share/gccxml-0.9/GCC/3.2/bits/ostream.tcc +713 -0
  195. data/share/gccxml-0.9/GCC/3.2/bits/sstream.tcc +241 -0
  196. data/share/gccxml-0.9/GCC/3.2/bits/stl_deque.h +1682 -0
  197. data/share/gccxml-0.9/GCC/3.2/bits/stl_list.h +989 -0
  198. data/share/gccxml-0.9/GCC/3.2/bits/stl_tree.h +1462 -0
  199. data/share/gccxml-0.9/GCC/3.2/bits/stl_vector.h +1085 -0
  200. data/share/gccxml-0.9/GCC/3.2/bits/valarray_meta.h +1063 -0
  201. data/share/gccxml-0.9/GCC/3.2/fstream +579 -0
  202. data/share/gccxml-0.9/GCC/3.2/pthread.h +16 -0
  203. data/share/gccxml-0.9/GCC/3.2/sstream +384 -0
  204. data/share/gccxml-0.9/GCC/3.3/bits/fstream.tcc +530 -0
  205. data/share/gccxml-0.9/GCC/3.3/bits/list.tcc +378 -0
  206. data/share/gccxml-0.9/GCC/3.3/bits/locale_facets.h +2050 -0
  207. data/share/gccxml-0.9/GCC/3.3/bits/messages_members.h +108 -0
  208. data/share/gccxml-0.9/GCC/3.3/bits/sstream.tcc +243 -0
  209. data/share/gccxml-0.9/GCC/3.3/bits/stl_deque.h +1603 -0
  210. data/share/gccxml-0.9/GCC/3.3/bits/stl_list.h +1167 -0
  211. data/share/gccxml-0.9/GCC/3.3/bits/stl_tree.h +1462 -0
  212. data/share/gccxml-0.9/GCC/3.3/bits/stl_vector.h +992 -0
  213. data/share/gccxml-0.9/GCC/3.3/bits/valarray_meta.h +1135 -0
  214. data/share/gccxml-0.9/GCC/3.3/fstream +842 -0
  215. data/share/gccxml-0.9/GCC/3.3/gccxml_builtins.h +22 -0
  216. data/share/gccxml-0.9/GCC/3.3/sstream +638 -0
  217. data/share/gccxml-0.9/GCC/3.4/bits/gthr-default.h +664 -0
  218. data/share/gccxml-0.9/GCC/3.4/gccxml_builtins.h +91 -0
  219. data/share/gccxml-0.9/GCC/4.0/gccxml_builtins.h +128 -0
  220. data/share/gccxml-0.9/GCC/4.1/bits/gthr-default.h +618 -0
  221. data/share/gccxml-0.9/GCC/4.1/gccxml_builtins.h +131 -0
  222. data/share/gccxml-0.9/GCC/4.2/gccxml_builtins.h +131 -0
  223. data/share/gccxml-0.9/GCC/4.3/gccxml_builtins.h +182 -0
  224. data/share/gccxml-0.9/IBM/8.0/adapt_headers.sh +34 -0
  225. data/share/gccxml-0.9/IBM/8.0/stdlib.h.patch +27 -0
  226. data/share/gccxml-0.9/IBM/8.0/xstring.patch +19 -0
  227. data/share/gccxml-0.9/IBM/README +8 -0
  228. data/share/gccxml-0.9/IBM/find_flags +51 -0
  229. data/share/gccxml-0.9/IBM/find_flags_common +48 -0
  230. data/share/gccxml-0.9/Intel/find_flags +56 -0
  231. data/share/gccxml-0.9/Intel/pthread.h +16 -0
  232. data/share/gccxml-0.9/MIPSpro/7.3/exception +9 -0
  233. data/share/gccxml-0.9/MIPSpro/7.3/gccxml_mpro_internals.h +21 -0
  234. data/share/gccxml-0.9/MIPSpro/7.3/iomanip +161 -0
  235. data/share/gccxml-0.9/MIPSpro/7.3/ostream +9 -0
  236. data/share/gccxml-0.9/MIPSpro/7.3/stddef.h +9 -0
  237. data/share/gccxml-0.9/MIPSpro/7.3/stl_config.h +9 -0
  238. data/share/gccxml-0.9/MIPSpro/7.3/stl_locale.h +17 -0
  239. data/share/gccxml-0.9/MIPSpro/7.3/stl_monetary.h +14 -0
  240. data/share/gccxml-0.9/MIPSpro/7.3/stl_numeric_facets.h +13 -0
  241. data/share/gccxml-0.9/MIPSpro/7.3/stl_threads.h +11 -0
  242. data/share/gccxml-0.9/MIPSpro/7.3/string +18 -0
  243. data/share/gccxml-0.9/MIPSpro/find_flags +70 -0
  244. data/share/gccxml-0.9/MIPSpro/mipspro_defs.cxx +63 -0
  245. data/share/gccxml-0.9/Sun/5.8/Cstd.patch +156 -0
  246. data/share/gccxml-0.9/Sun/5.8/adapt_headers.sh +32 -0
  247. data/share/gccxml-0.9/Sun/5.8/std-5.10.patch +22 -0
  248. data/share/gccxml-0.9/Sun/README +8 -0
  249. data/share/gccxml-0.9/Sun/find_flags +51 -0
  250. data/share/gccxml-0.9/Sun/find_flags_common +42 -0
  251. data/share/gccxml-0.9/VcInstall/vc6Include.patch +4560 -0
  252. data/share/gccxml-0.9/VcInstall/vc71Include.patch +921 -0
  253. data/share/gccxml-0.9/VcInstall/vc71PlatformSDK.patch +146 -0
  254. data/share/gccxml-0.9/VcInstall/vc7Include.patch +1139 -0
  255. data/share/gccxml-0.9/VcInstall/vc7PlatformSDK.patch +146 -0
  256. data/share/gccxml-0.9/VcInstall/vc8ExpressInclude.patch +1887 -0
  257. data/share/gccxml-0.9/VcInstall/vc8ExpressPlatformSDK.patch +252 -0
  258. data/share/gccxml-0.9/VcInstall/vc8Include.patch +1887 -0
  259. data/share/gccxml-0.9/VcInstall/vc8PlatformSDK.patch +252 -0
  260. data/share/gccxml-0.9/VcInstall/vc8sp1ExpressInclude.patch +1941 -0
  261. data/share/gccxml-0.9/VcInstall/vc8sp1Include.patch +1970 -0
  262. data/share/gccxml-0.9/VcInstall/vc8sp1PlatformSDK.patch +252 -0
  263. data/share/gccxml-0.9/VcInstall/vc9Include.patch +2047 -0
  264. data/share/gccxml-0.9/VcInstall/vc9PlatformSDK.patch +383 -0
  265. data/share/gccxml-0.9/VcInstall/vc9sp1Include.patch +2565 -0
  266. data/share/gccxml-0.9/VcInstall/vcCat.exe +0 -0
  267. data/share/gccxml-0.9/VcInstall/vcCat.readme.txt +3 -0
  268. data/share/gccxml-0.9/VcInstall/vcPatch.exe +0 -0
  269. data/share/gccxml-0.9/VcInstall/vcPatch.readme.txt +27 -0
  270. data/share/gccxml-0.9/VcInstall/vc_helper +1 -0
  271. data/share/gccxml-0.9/gccxml_config +2 -0
  272. data/share/gccxml-0.9/gccxml_identify_compiler.cc +13 -0
  273. data/share/man/man1/gccxml.1 +246 -0
  274. metadata +331 -0
@@ -0,0 +1,1057 @@
1
+ /*
2
+ * Copyright (c) 1996-1997
3
+ * Silicon Graphics Computer Systems, Inc.
4
+ *
5
+ * Permission to use, copy, modify, distribute and sell this software
6
+ * and its documentation for any purpose is hereby granted without fee,
7
+ * provided that the above copyright notice appear in all copies and
8
+ * that both that copyright notice and this permission notice appear
9
+ * in supporting documentation. Silicon Graphics makes no
10
+ * representations about the suitability of this software for any
11
+ * purpose. It is provided "as is" without express or implied warranty.
12
+ */
13
+
14
+ /* NOTE: This is an internal header file, included by other STL headers.
15
+ * You should not attempt to use it directly.
16
+ */
17
+
18
+ #ifndef __SGI_STL_INTERNAL_ALLOC_H
19
+ #define __SGI_STL_INTERNAL_ALLOC_H
20
+
21
+ #ifdef __SUNPRO_CC
22
+ # define __PRIVATE public
23
+ // Extra access restrictions prevent us from really making some things
24
+ // private.
25
+ #else
26
+ # define __PRIVATE private
27
+ #endif
28
+
29
+ #ifdef __STL_STATIC_TEMPLATE_MEMBER_BUG
30
+ # define __USE_MALLOC
31
+ #endif
32
+
33
+
34
+ // This implements some standard node allocators. These are
35
+ // NOT the same as the allocators in the C++ draft standard or in
36
+ // in the original STL. They do not encapsulate different pointer
37
+ // types; indeed we assume that there is only one pointer type.
38
+ // The allocation primitives are intended to allocate individual objects,
39
+ // not larger arenas as with the original STL allocators.
40
+
41
+ #ifdef __STL_USE_EXCEPTIONS
42
+ # include <new>
43
+ # define __THROW_BAD_ALLOC throw bad_alloc()
44
+ #elif !defined(__THROW_BAD_ALLOC)
45
+ # include <iostream.h>
46
+ # define __THROW_BAD_ALLOC cerr << "out of memory" << endl; exit(1)
47
+ #endif
48
+
49
+ #ifdef __STL_WIN32THREADS
50
+ # include <windows.h>
51
+ #endif
52
+
53
+ #include <stddef.h>
54
+ #include <stdlib.h>
55
+ #include <string.h>
56
+ #include <assert.h>
57
+ #ifndef __RESTRICT
58
+ # define __RESTRICT
59
+ #endif
60
+
61
+ #if !defined(__STL_PTHREADS) && !defined(__STL_SOLTHREADS) \
62
+ && !defined(_NOTHREADS) \
63
+ && !defined(__STL_SGI_THREADS) && !defined(__STL_WIN32THREADS)
64
+ # define _NOTHREADS
65
+ #endif
66
+
67
+ # ifdef __STL_PTHREADS
68
+ // POSIX Threads
69
+ // This is dubious, since this is likely to be a high contention
70
+ // lock. Performance may not be adequate.
71
+ # include <pthread.h>
72
+ # define __NODE_ALLOCATOR_LOCK \
73
+ if (threads) pthread_mutex_lock(&_S_node_allocator_lock)
74
+ # define __NODE_ALLOCATOR_UNLOCK \
75
+ if (threads) pthread_mutex_unlock(&_S_node_allocator_lock)
76
+ # define __NODE_ALLOCATOR_THREADS true
77
+ # define __VOLATILE volatile // Needed at -O3 on SGI
78
+ # endif
79
+ # ifdef __STL_SOLTHREADS
80
+ # include <thread.h>
81
+ # define __NODE_ALLOCATOR_LOCK \
82
+ if (threads) mutex_lock(&_S_node_allocator_lock)
83
+ # define __NODE_ALLOCATOR_UNLOCK \
84
+ if (threads) mutex_unlock(&_S_node_allocator_lock)
85
+ # define __NODE_ALLOCATOR_THREADS true
86
+ # define __VOLATILE
87
+ # endif
88
+ # ifdef __STL_WIN32THREADS
89
+ // The lock needs to be initialized by constructing an allocator
90
+ // objects of the right type. We do that here explicitly for alloc.
91
+ # define __NODE_ALLOCATOR_LOCK \
92
+ EnterCriticalSection(&_S_node_allocator_lock)
93
+ # define __NODE_ALLOCATOR_UNLOCK \
94
+ LeaveCriticalSection(&_S_node_allocator_lock)
95
+ # define __NODE_ALLOCATOR_THREADS true
96
+ # define __VOLATILE volatile // may not be needed
97
+ # endif /* WIN32THREADS */
98
+ # ifdef __STL_SGI_THREADS
99
+ // This should work without threads, with sproc threads, or with
100
+ // pthreads. It is suboptimal in all cases.
101
+ // It is unlikely to even compile on nonSGI machines.
102
+
103
+ extern "C" {
104
+ extern int __us_rsthread_malloc;
105
+ }
106
+ // The above is copied from malloc.h. Including <malloc.h>
107
+ // would be cleaner but fails with certain levels of standard
108
+ // conformance.
109
+ # define __NODE_ALLOCATOR_LOCK if (threads && __us_rsthread_malloc) \
110
+ { _S_lock(&_S_node_allocator_lock); }
111
+ # define __NODE_ALLOCATOR_UNLOCK if (threads && __us_rsthread_malloc) \
112
+ { _S_unlock(&_S_node_allocator_lock); }
113
+ # define __NODE_ALLOCATOR_THREADS true
114
+ # define __VOLATILE volatile // Needed at -O3 on SGI
115
+ # endif
116
+ # ifdef _NOTHREADS
117
+ // Thread-unsafe
118
+ # define __NODE_ALLOCATOR_LOCK
119
+ # define __NODE_ALLOCATOR_UNLOCK
120
+ # define __NODE_ALLOCATOR_THREADS false
121
+ # define __VOLATILE
122
+ # endif
123
+
124
+ __STL_BEGIN_NAMESPACE
125
+
126
+ #if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32)
127
+ #pragma set woff 1174
128
+ #endif
129
+
130
+ // Malloc-based allocator. Typically slower than default alloc below.
131
+ // Typically thread-safe and more storage efficient.
132
+ #ifdef __STL_STATIC_TEMPLATE_MEMBER_BUG
133
+ # ifdef __DECLARE_GLOBALS_HERE
134
+ void (* __malloc_alloc_oom_handler)() = 0;
135
+ // g++ 2.7.2 does not handle static template data members.
136
+ # else
137
+ extern void (* __malloc_alloc_oom_handler)();
138
+ # endif
139
+ #endif
140
+
141
+ template <int __inst>
142
+ class __malloc_alloc_template {
143
+
144
+ private:
145
+
146
+ static void* _S_oom_malloc(size_t);
147
+ static void* _S_oom_realloc(void*, size_t);
148
+
149
+ #ifndef __STL_STATIC_TEMPLATE_MEMBER_BUG
150
+ static void (* __malloc_alloc_oom_handler)();
151
+ #endif
152
+
153
+ public:
154
+
155
+ static void* allocate(size_t __n)
156
+ {
157
+ void* __result = malloc(__n);
158
+ if (0 == __result) __result = _S_oom_malloc(__n);
159
+ return __result;
160
+ }
161
+
162
+ static void deallocate(void* __p, size_t /* __n */)
163
+ {
164
+ free(__p);
165
+ }
166
+
167
+ static void* reallocate(void* __p, size_t /* old_sz */, size_t __new_sz)
168
+ {
169
+ void* __result = realloc(__p, __new_sz);
170
+ if (0 == __result) __result = _S_oom_realloc(__p, __new_sz);
171
+ return __result;
172
+ }
173
+
174
+ static void (* __set_malloc_handler(void (*__f)()))()
175
+ {
176
+ void (* __old)() = __malloc_alloc_oom_handler;
177
+ __malloc_alloc_oom_handler = __f;
178
+ return(__old);
179
+ }
180
+
181
+ };
182
+
183
+ // malloc_alloc out-of-memory handling
184
+
185
+ #ifndef __STL_STATIC_TEMPLATE_MEMBER_BUG
186
+ template <int __inst>
187
+ void (* __malloc_alloc_template<__inst>::__malloc_alloc_oom_handler)() = 0;
188
+ #endif
189
+
190
+ template <int __inst>
191
+ void*
192
+ __malloc_alloc_template<__inst>::_S_oom_malloc(size_t __n)
193
+ {
194
+ void (* __my_malloc_handler)();
195
+ void* __result;
196
+
197
+ for (;;) {
198
+ __my_malloc_handler = __malloc_alloc_oom_handler;
199
+ if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
200
+ (*__my_malloc_handler)();
201
+ __result = malloc(__n);
202
+ if (__result) return(__result);
203
+ }
204
+ }
205
+
206
+ template <int __inst>
207
+ void* __malloc_alloc_template<__inst>::_S_oom_realloc(void* __p, size_t __n)
208
+ {
209
+ void (* __my_malloc_handler)();
210
+ void* __result;
211
+
212
+ for (;;) {
213
+ __my_malloc_handler = __malloc_alloc_oom_handler;
214
+ if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
215
+ (*__my_malloc_handler)();
216
+ __result = realloc(__p, __n);
217
+ if (__result) return(__result);
218
+ }
219
+ }
220
+
221
+ typedef __malloc_alloc_template<0> malloc_alloc;
222
+
223
+ template<class _Tp, class _Alloc>
224
+ class simple_alloc {
225
+
226
+ public:
227
+ static _Tp* allocate(size_t __n)
228
+ { return 0 == __n ? 0 : (_Tp*) _Alloc::allocate(__n * sizeof (_Tp)); }
229
+ static _Tp* allocate(void)
230
+ { return (_Tp*) _Alloc::allocate(sizeof (_Tp)); }
231
+ static void deallocate(_Tp* __p, size_t __n)
232
+ { if (0 != __n) _Alloc::deallocate(__p, __n * sizeof (_Tp)); }
233
+ static void deallocate(_Tp* __p)
234
+ { _Alloc::deallocate(__p, sizeof (_Tp)); }
235
+ };
236
+
237
+ // Allocator adaptor to check size arguments for debugging.
238
+ // Reports errors using assert. Checking can be disabled with
239
+ // NDEBUG, but it's far better to just use the underlying allocator
240
+ // instead when no checking is desired.
241
+ // There is some evidence that this can confuse Purify.
242
+ template <class _Alloc>
243
+ class debug_alloc {
244
+
245
+ private:
246
+
247
+ enum {_S_extra = 8}; // Size of space used to store size. Note
248
+ // that this must be large enough to preserve
249
+ // alignment.
250
+
251
+ public:
252
+
253
+ static void* allocate(size_t __n)
254
+ {
255
+ char* __result = (char*)_Alloc::allocate(__n + _S_extra);
256
+ *(size_t*)__result = __n;
257
+ return __result + _S_extra;
258
+ }
259
+
260
+ static void deallocate(void* __p, size_t __n)
261
+ {
262
+ char* __real_p = (char*)__p - _S_extra;
263
+ assert(*(size_t*)__real_p == __n);
264
+ _Alloc::deallocate(__real_p, __n + _S_extra);
265
+ }
266
+
267
+ static void* reallocate(void* __p, size_t __old_sz, size_t __new_sz)
268
+ {
269
+ char* __real_p = (char*)__p - _S_extra;
270
+ assert(*(size_t*)__real_p == __old_sz);
271
+ char* __result = (char*)
272
+ _Alloc::reallocate(__real_p, __old_sz + _S_extra, __new_sz + _S_extra);
273
+ *(size_t*)__result = __new_sz;
274
+ return __result + _S_extra;
275
+ }
276
+
277
+ };
278
+
279
+
280
+ # ifdef __USE_MALLOC
281
+
282
+ typedef malloc_alloc alloc;
283
+ typedef malloc_alloc single_client_alloc;
284
+
285
+ # else
286
+
287
+
288
+ // Default node allocator.
289
+ // With a reasonable compiler, this should be roughly as fast as the
290
+ // original STL class-specific allocators, but with less fragmentation.
291
+ // Default_alloc_template parameters are experimental and MAY
292
+ // DISAPPEAR in the future. Clients should just use alloc for now.
293
+ //
294
+ // Important implementation properties:
295
+ // 1. If the client request an object of size > _MAX_BYTES, the resulting
296
+ // object will be obtained directly from malloc.
297
+ // 2. In all other cases, we allocate an object of size exactly
298
+ // _S_round_up(requested_size). Thus the client has enough size
299
+ // information that we can return the object to the proper free list
300
+ // without permanently losing part of the object.
301
+ //
302
+
303
+ // The first template parameter specifies whether more than one thread
304
+ // may use this allocator. It is safe to allocate an object from
305
+ // one instance of a default_alloc and deallocate it with another
306
+ // one. This effectively transfers its ownership to the second one.
307
+ // This may have undesirable effects on reference locality.
308
+ // The second parameter is unreferenced and serves only to allow the
309
+ // creation of multiple default_alloc instances.
310
+ // Node that containers built on different allocator instances have
311
+ // different types, limiting the utility of this approach.
312
+ #ifdef __SUNPRO_CC
313
+ // breaks if we make these template class members:
314
+ enum {_ALIGN = 8};
315
+ enum {_MAX_BYTES = 128};
316
+ enum {_NFREELISTS = _MAX_BYTES/_ALIGN};
317
+ #endif
318
+
319
+ template <bool threads, int inst>
320
+ class __default_alloc_template {
321
+
322
+ private:
323
+ // Really we should use static const int x = N
324
+ // instead of enum { x = N }, but few compilers accept the former.
325
+ # ifndef __SUNPRO_CC
326
+ enum {_ALIGN = 8};
327
+ enum {_MAX_BYTES = 128};
328
+ enum {_NFREELISTS = _MAX_BYTES/_ALIGN};
329
+ # endif
330
+ static size_t
331
+ _S_round_up(size_t __bytes)
332
+ { return (((__bytes) + _ALIGN-1) & ~(_ALIGN - 1)); }
333
+
334
+ __PRIVATE:
335
+ union _Obj {
336
+ union _Obj* _M_free_list_link;
337
+ char _M_client_data[1]; /* The client sees this. */
338
+ };
339
+ private:
340
+ # ifdef __SUNPRO_CC
341
+ static _Obj* __VOLATILE _S_free_list[];
342
+ // Specifying a size results in duplicate def for 4.1
343
+ # else
344
+ static _Obj* __VOLATILE _S_free_list[_NFREELISTS];
345
+ # endif
346
+ static size_t _S_freelist_index(size_t __bytes) {
347
+ return (((__bytes) + _ALIGN-1)/_ALIGN - 1);
348
+ }
349
+
350
+ // Returns an object of size __n, and optionally adds to size __n free list.
351
+ static void* _S_refill(size_t __n);
352
+ // Allocates a chunk for nobjs of size "size". nobjs may be reduced
353
+ // if it is inconvenient to allocate the requested number.
354
+ static char* _S_chunk_alloc(size_t __size, int& __nobjs);
355
+
356
+ // Chunk allocation state.
357
+ static char* _S_start_free;
358
+ static char* _S_end_free;
359
+ static size_t _S_heap_size;
360
+
361
+ # ifdef __STL_SGI_THREADS
362
+ static volatile unsigned long _S_node_allocator_lock;
363
+ static void _S_lock(volatile unsigned long*);
364
+ static inline void _S_unlock(volatile unsigned long*);
365
+ # endif
366
+
367
+ # ifdef __STL_PTHREADS
368
+ static pthread_mutex_t _S_node_allocator_lock;
369
+ # endif
370
+
371
+ # ifdef __STL_SOLTHREADS
372
+ static mutex_t _S_node_allocator_lock;
373
+ # endif
374
+
375
+ # ifdef __STL_WIN32THREADS
376
+ static CRITICAL_SECTION _S_node_allocator_lock;
377
+ static bool _S_node_allocator_lock_initialized;
378
+
379
+ public:
380
+ __default_alloc_template() {
381
+ // This assumes the first constructor is called before threads
382
+ // are started.
383
+ if (!_S_node_allocator_lock_initialized) {
384
+ InitializeCriticalSection(&_S_node_allocator_lock);
385
+ _S_node_allocator_lock_initialized = true;
386
+ }
387
+ }
388
+ private:
389
+ # endif
390
+
391
+ class _Lock {
392
+ public:
393
+ _Lock() { __NODE_ALLOCATOR_LOCK; }
394
+ ~_Lock() { __NODE_ALLOCATOR_UNLOCK; }
395
+ };
396
+ friend class _Lock;
397
+
398
+ public:
399
+
400
+ /* __n must be > 0 */
401
+ static void* allocate(size_t __n)
402
+ {
403
+ _Obj* __VOLATILE* __my_free_list;
404
+ _Obj* __RESTRICT __result;
405
+
406
+ if (__n > (size_t) _MAX_BYTES) {
407
+ return(malloc_alloc::allocate(__n));
408
+ }
409
+ __my_free_list = _S_free_list + _S_freelist_index(__n);
410
+ // Acquire the lock here with a constructor call.
411
+ // This ensures that it is released in exit or during stack
412
+ // unwinding.
413
+ # ifndef _NOTHREADS
414
+ /*REFERENCED*/
415
+ _Lock __lock_instance;
416
+ # endif
417
+ __result = *__my_free_list;
418
+ if (__result == 0) {
419
+ void* __r = _S_refill(_S_round_up(__n));
420
+ return __r;
421
+ }
422
+ *__my_free_list = __result -> _M_free_list_link;
423
+ return (__result);
424
+ };
425
+
426
+ /* __p may not be 0 */
427
+ static void deallocate(void* __p, size_t __n)
428
+ {
429
+ _Obj* __q = (_Obj*)__p;
430
+ _Obj* __VOLATILE* __my_free_list;
431
+
432
+ if (__n > (size_t) _MAX_BYTES) {
433
+ malloc_alloc::deallocate(__p, __n);
434
+ return;
435
+ }
436
+ __my_free_list = _S_free_list + _S_freelist_index(__n);
437
+ // acquire lock
438
+ # ifndef _NOTHREADS
439
+ /*REFERENCED*/
440
+ _Lock __lock_instance;
441
+ # endif /* _NOTHREADS */
442
+ __q -> _M_free_list_link = *__my_free_list;
443
+ *__my_free_list = __q;
444
+ // lock is released here
445
+ }
446
+
447
+ static void* reallocate(void* __p, size_t __old_sz, size_t __new_sz);
448
+
449
+ } ;
450
+
451
+ typedef __default_alloc_template<__NODE_ALLOCATOR_THREADS, 0> alloc;
452
+ typedef __default_alloc_template<false, 0> single_client_alloc;
453
+
454
+
455
+
456
+ /* We allocate memory in large chunks in order to avoid fragmenting */
457
+ /* the malloc heap too much. */
458
+ /* We assume that size is properly aligned. */
459
+ /* We hold the allocation lock. */
460
+ template <bool __threads, int __inst>
461
+ char*
462
+ __default_alloc_template<__threads, __inst>::_S_chunk_alloc(size_t __size,
463
+ int& __nobjs)
464
+ {
465
+ char* __result;
466
+ size_t __total_bytes = __size * __nobjs;
467
+ size_t __bytes_left = _S_end_free - _S_start_free;
468
+
469
+ if (__bytes_left >= __total_bytes) {
470
+ __result = _S_start_free;
471
+ _S_start_free += __total_bytes;
472
+ return(__result);
473
+ } else if (__bytes_left >= __size) {
474
+ __nobjs = (int)(__bytes_left/__size);
475
+ __total_bytes = __size * __nobjs;
476
+ __result = _S_start_free;
477
+ _S_start_free += __total_bytes;
478
+ return(__result);
479
+ } else {
480
+ size_t __bytes_to_get =
481
+ 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
482
+ // Try to make use of the left-over piece.
483
+ if (__bytes_left > 0) {
484
+ _Obj* __VOLATILE* __my_free_list =
485
+ _S_free_list + _S_freelist_index(__bytes_left);
486
+
487
+ ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;
488
+ *__my_free_list = (_Obj*)_S_start_free;
489
+ }
490
+ _S_start_free = (char*)malloc(__bytes_to_get);
491
+ if (0 == _S_start_free) {
492
+ size_t __i;
493
+ _Obj* __VOLATILE* __my_free_list;
494
+ _Obj* __p;
495
+ // Try to make do with what we have. That can't
496
+ // hurt. We do not try smaller requests, since that tends
497
+ // to result in disaster on multi-process machines.
498
+ for (__i = __size; __i <= _MAX_BYTES; __i += _ALIGN) {
499
+ __my_free_list = _S_free_list + _S_freelist_index(__i);
500
+ __p = *__my_free_list;
501
+ if (0 != __p) {
502
+ *__my_free_list = __p -> _M_free_list_link;
503
+ _S_start_free = (char*)__p;
504
+ _S_end_free = _S_start_free + __i;
505
+ return(_S_chunk_alloc(__size, __nobjs));
506
+ // Any leftover piece will eventually make it to the
507
+ // right free list.
508
+ }
509
+ }
510
+ _S_end_free = 0; // In case of exception.
511
+ _S_start_free = (char*)malloc_alloc::allocate(__bytes_to_get);
512
+ // This should either throw an
513
+ // exception or remedy the situation. Thus we assume it
514
+ // succeeded.
515
+ }
516
+ _S_heap_size += __bytes_to_get;
517
+ _S_end_free = _S_start_free + __bytes_to_get;
518
+ return(_S_chunk_alloc(__size, __nobjs));
519
+ }
520
+ }
521
+
522
+
523
+ /* Returns an object of size __n, and optionally adds to size __n free list.*/
524
+ /* We assume that __n is properly aligned. */
525
+ /* We hold the allocation lock. */
526
+ template <bool __threads, int __inst>
527
+ void*
528
+ __default_alloc_template<__threads, __inst>::_S_refill(size_t __n)
529
+ {
530
+ int __nobjs = 20;
531
+ char* __chunk = _S_chunk_alloc(__n, __nobjs);
532
+ _Obj* __VOLATILE* __my_free_list;
533
+ _Obj* __result;
534
+ _Obj* __current_obj;
535
+ _Obj* __next_obj;
536
+ int __i;
537
+
538
+ if (1 == __nobjs) return(__chunk);
539
+ __my_free_list = _S_free_list + _S_freelist_index(__n);
540
+
541
+ /* Build free list in chunk */
542
+ __result = (_Obj*)__chunk;
543
+ *__my_free_list = __next_obj = (_Obj*)(__chunk + __n);
544
+ for (__i = 1; ; __i++) {
545
+ __current_obj = __next_obj;
546
+ __next_obj = (_Obj*)((char*)__next_obj + __n);
547
+ if (__nobjs - 1 == __i) {
548
+ __current_obj -> _M_free_list_link = 0;
549
+ break;
550
+ } else {
551
+ __current_obj -> _M_free_list_link = __next_obj;
552
+ }
553
+ }
554
+ return(__result);
555
+ }
556
+
557
+ template <bool threads, int inst>
558
+ void*
559
+ __default_alloc_template<threads, inst>::reallocate(void* __p,
560
+ size_t __old_sz,
561
+ size_t __new_sz)
562
+ {
563
+ void* __result;
564
+ size_t __copy_sz;
565
+
566
+ if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES) {
567
+ return(realloc(__p, __new_sz));
568
+ }
569
+ if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return(__p);
570
+ __result = allocate(__new_sz);
571
+ __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
572
+ memcpy(__result, __p, __copy_sz);
573
+ deallocate(__p, __old_sz);
574
+ return(__result);
575
+ }
576
+
577
+ #ifdef __STL_PTHREADS
578
+ template <bool __threads, int __inst>
579
+ pthread_mutex_t
580
+ __default_alloc_template<__threads, __inst>::_S_node_allocator_lock
581
+ = PTHREAD_MUTEX_INITIALIZER;
582
+ #endif
583
+
584
+ #ifdef __STL_SOLTHREADS
585
+ template <bool __threads, int __inst>
586
+ mutex_t
587
+ __default_alloc_template<__threads, __inst>::_S_node_allocator_lock
588
+ = DEFAULTMUTEX;
589
+ #endif
590
+
591
+ #ifdef __STL_WIN32THREADS
592
+ template <bool __threads, int __inst>
593
+ CRITICAL_SECTION
594
+ __default_alloc_template<__threads, __inst>::
595
+ _S_node_allocator_lock;
596
+
597
+ template <bool __threads, int __inst>
598
+ bool
599
+ __default_alloc_template<__threads, __inst>::
600
+ _S_node_allocator_lock_initialized
601
+ = false;
602
+ #endif
603
+
604
+ #ifdef __STL_SGI_THREADS
605
+ __STL_END_NAMESPACE
606
+ #include <mutex.h>
607
+ #include <time.h> /* XXX should use <ctime> */
608
+ __STL_BEGIN_NAMESPACE
609
+ // Somewhat generic lock implementations. We need only test-and-set
610
+ // and some way to sleep. These should work with both SGI pthreads
611
+ // and sproc threads. They may be useful on other systems.
612
+ template <bool __threads, int __inst>
613
+ volatile unsigned long
614
+ __default_alloc_template<__threads, __inst>::_S_node_allocator_lock = 0;
615
+
616
+ #if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) || defined(__GNUC__)
617
+ # define __test_and_set(l,v) test_and_set(l,v)
618
+ #endif
619
+
620
+ template <bool __threads, int __inst>
621
+ void
622
+ __default_alloc_template<__threads, __inst>::
623
+ _S_lock(volatile unsigned long* __lock)
624
+ {
625
+ const unsigned __low_spin_max = 30; // spins if we suspect uniprocessor
626
+ const unsigned __high_spin_max = 1000; // spins for multiprocessor
627
+ static unsigned __spin_max = __low_spin_max;
628
+ unsigned __my_spin_max;
629
+ static unsigned __last_spins = 0;
630
+ unsigned __my_last_spins;
631
+ unsigned __junk;
632
+ # define __ALLOC_PAUSE \
633
+ __junk *= __junk; __junk *= __junk; __junk *= __junk; __junk *= __junk
634
+ int __i;
635
+
636
+ if (!__test_and_set((unsigned long*)__lock, 1)) {
637
+ return;
638
+ }
639
+ __my_spin_max = __spin_max;
640
+ __my_last_spins = __last_spins;
641
+ for (__i = 0; __i < __my_spin_max; __i++) {
642
+ if (__i < __my_last_spins/2 || *__lock) {
643
+ __ALLOC_PAUSE;
644
+ continue;
645
+ }
646
+ if (!__test_and_set((unsigned long*)__lock, 1)) {
647
+ // got it!
648
+ // Spinning worked. Thus we're probably not being scheduled
649
+ // against the other process with which we were contending.
650
+ // Thus it makes sense to spin longer the next time.
651
+ __last_spins = __i;
652
+ __spin_max = __high_spin_max;
653
+ return;
654
+ }
655
+ }
656
+ // We are probably being scheduled against the other process. Sleep.
657
+ __spin_max = __low_spin_max;
658
+ for (__i = 0 ;; ++__i) {
659
+ struct timespec __ts;
660
+ int __log_nsec = __i + 6;
661
+
662
+ if (!__test_and_set((unsigned long *)__lock, 1)) {
663
+ return;
664
+ }
665
+ if (__log_nsec > 27) __log_nsec = 27;
666
+ /* Max sleep is 2**27nsec ~ 60msec */
667
+ __ts.tv_sec = 0;
668
+ __ts.tv_nsec = 1 << __log_nsec;
669
+ nanosleep(&__ts, 0);
670
+ }
671
+ }
672
+
673
+ template <bool __threads, int __inst>
674
+ inline void
675
+ __default_alloc_template<__threads, __inst>::_S_unlock(
676
+ volatile unsigned long* __lock)
677
+ {
678
+ # if defined(__GNUC__) && __mips >= 3
679
+ asm("sync");
680
+ *__lock = 0;
681
+ # elif __mips >= 3 && (defined (_ABIN32) || defined(_ABI64))
682
+ __lock_release(__lock);
683
+ # else
684
+ *__lock = 0;
685
+ // This is not sufficient on many multiprocessors, since
686
+ // writes to protected variables and the lock may be reordered.
687
+ # endif
688
+ }
689
+ #endif
690
+
691
+ template <bool __threads, int __inst>
692
+ char* __default_alloc_template<__threads, __inst>::_S_start_free = 0;
693
+
694
+ template <bool __threads, int __inst>
695
+ char* __default_alloc_template<__threads, __inst>::_S_end_free = 0;
696
+
697
+ template <bool __threads, int __inst>
698
+ size_t __default_alloc_template<__threads, __inst>::_S_heap_size = 0;
699
+
700
+ template <bool __threads, int __inst>
701
+ typename __default_alloc_template<__threads, __inst>::_Obj* __VOLATILE
702
+ __default_alloc_template<__threads, __inst> ::_S_free_list[
703
+ _NFREELISTS
704
+ ] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
705
+ // The 16 zeros are necessary to make version 4.1 of the SunPro
706
+ // compiler happy. Otherwise it appears to allocate too little
707
+ // space for the array.
708
+
709
+ # ifdef __STL_WIN32THREADS
710
+ // Create one to get critical section initialized.
711
+ // We do this onece per file, but only the first constructor
712
+ // does anything.
713
+ static alloc __node_allocator_dummy_instance;
714
+ # endif
715
+
716
+ #endif /* ! __USE_MALLOC */
717
+
718
+ // This implements allocators as specified in the C++ standard.
719
+ //
720
+ // Note that standard-conforming allocators use many language features
721
+ // that are not yet widely implemented. In particular, they rely on
722
+ // member templates, partial specialization, partial ordering of function
723
+ // templates, the typename keyword, and the use of the template keyword
724
+ // to refer to a template member of a dependent type.
725
+
726
+ #ifdef __STL_USE_STD_ALLOCATORS
727
+
728
+ template <class _Tp>
729
+ class allocator {
730
+ typedef alloc _Alloc; // The underlying allocator.
731
+ public:
732
+ typedef size_t size_type;
733
+ typedef ptrdiff_t difference_type;
734
+ typedef _Tp* pointer;
735
+ typedef const _Tp* const_pointer;
736
+ typedef _Tp& reference;
737
+ typedef const _Tp& const_reference;
738
+ typedef _Tp value_type;
739
+
740
+ template <class _Tp1> struct rebind {
741
+ typedef allocator<_Tp1> other;
742
+ };
743
+
744
+ allocator() __STL_NOTHROW {}
745
+ allocator(const allocator&) __STL_NOTHROW {}
746
+ template <class _Tp1> allocator(const allocator<_Tp1>&) __STL_NOTHROW {}
747
+ ~allocator() __STL_NOTHROW {}
748
+
749
+ pointer address(reference __x) const { return &__x; }
750
+ const_pointer address(const_reference __x) const { return &__x; }
751
+
752
+ // __n is permitted to be 0. The C++ standard says nothing about what
753
+ // the return value is when __n == 0.
754
+ _Tp* allocate(size_type __n, const void* = 0) {
755
+ return __n != 0 ? static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp)))
756
+ : 0;
757
+ }
758
+
759
+ // __p is not permitted to be a null pointer.
760
+ void deallocate(pointer __p, size_type __n)
761
+ { _Alloc::deallocate(__p, __n * sizeof(_Tp)); }
762
+
763
+ size_type max_size() const __STL_NOTHROW
764
+ { return size_t(-1) / sizeof(_Tp); }
765
+
766
+ void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
767
+ void destroy(pointer __p) { __p->~_Tp(); }
768
+ };
769
+
770
+ template<>
771
+ class allocator<void> {
772
+ typedef size_t size_type;
773
+ typedef ptrdiff_t difference_type;
774
+ typedef void* pointer;
775
+ typedef const void* const_pointer;
776
+ typedef void value_type;
777
+
778
+ template <class _Tp1> struct rebind {
779
+ typedef allocator<_Tp1> other;
780
+ };
781
+ };
782
+
783
+
784
+ template <class _T1, class _T2>
785
+ inline bool operator==(const allocator<_T1>&, const allocator<_T2>&)
786
+ {
787
+ return true;
788
+ }
789
+
790
+ template <class _T1, class _T2>
791
+ inline bool operator!=(const allocator<_T1>&, const allocator<_T2>&)
792
+ {
793
+ return false;
794
+ }
795
+
796
+ // Allocator adaptor to turn an SGI-style allocator (e.g. alloc, malloc_alloc)
797
+ // into a standard-conforming allocator. Note that this adaptor does
798
+ // *not* assume that all objects of the underlying alloc class are
799
+ // identical, nor does it assume that all of the underlying alloc's
800
+ // member functions are static member functions. Note, also, that
801
+ // __allocator<_Tp, alloc> is essentially the same thing as allocator<_Tp>.
802
+
803
+ template <class _Tp, class _Alloc>
804
+ struct __allocator {
805
+ _Alloc __underlying_alloc;
806
+
807
+ typedef size_t size_type;
808
+ typedef ptrdiff_t difference_type;
809
+ typedef _Tp* pointer;
810
+ typedef const _Tp* const_pointer;
811
+ typedef _Tp& reference;
812
+ typedef const _Tp& const_reference;
813
+ typedef _Tp value_type;
814
+
815
+ template <class _Tp1> struct rebind {
816
+ typedef __allocator<_Tp1, _Alloc> other;
817
+ };
818
+
819
+ __allocator() __STL_NOTHROW {}
820
+ __allocator(const __allocator& __a) __STL_NOTHROW
821
+ : __underlying_alloc(__a.__underlying_alloc) {}
822
+ template <class _Tp1>
823
+ __allocator(const __allocator<_Tp1, _Alloc>& __a) __STL_NOTHROW
824
+ : __underlying_alloc(__a.__underlying_alloc) {}
825
+ ~__allocator() __STL_NOTHROW {}
826
+
827
+ pointer address(reference __x) const { return &__x; }
828
+ const_pointer address(const_reference __x) const { return &__x; }
829
+
830
+ // __n is permitted to be 0.
831
+ _Tp* allocate(size_type __n, const void* = 0) {
832
+ return __n != 0
833
+ ? static_cast<_Tp*>(__underlying_alloc.allocate(__n * sizeof(_Tp)))
834
+ : 0;
835
+ }
836
+
837
+ // __p is not permitted to be a null pointer.
838
+ void deallocate(pointer __p, size_type __n)
839
+ { __underlying_alloc.deallocate(__p, __n * sizeof(_Tp)); }
840
+
841
+ size_type max_size() const __STL_NOTHROW
842
+ { return size_t(-1) / sizeof(_Tp); }
843
+
844
+ void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
845
+ void destroy(pointer __p) { __p->~_Tp(); }
846
+ };
847
+
848
+ template <class _Alloc>
849
+ class __allocator<void, _Alloc> {
850
+ typedef size_t size_type;
851
+ typedef ptrdiff_t difference_type;
852
+ typedef void* pointer;
853
+ typedef const void* const_pointer;
854
+ typedef void value_type;
855
+
856
+ template <class _Tp1> struct rebind {
857
+ typedef __allocator<_Tp1, _Alloc> other;
858
+ };
859
+ };
860
+
861
+ template <class _Tp, class _Alloc>
862
+ inline bool operator==(const __allocator<_Tp, _Alloc>& __a1,
863
+ const __allocator<_Tp, _Alloc>& __a2)
864
+ {
865
+ return __a1.__underlying_alloc == __a2.__underlying_alloc;
866
+ }
867
+
868
+ #ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER
869
+ template <class _Tp, class _Alloc>
870
+ inline bool operator!=(const __allocator<_Tp, _Alloc>& __a1,
871
+ const __allocator<_Tp, _Alloc>& __a2)
872
+ {
873
+ return __a1.__underlying_alloc != __a2.__underlying_alloc;
874
+ }
875
+ #endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */
876
+
877
+ // Comparison operators for all of the predifined SGI-style allocators.
878
+ // This ensures that __allocator<malloc_alloc> (for example) will
879
+ // work correctly.
880
+
881
+ template <int inst>
882
+ inline bool operator==(const __malloc_alloc_template<inst>&,
883
+ const __malloc_alloc_template<inst>&)
884
+ {
885
+ return true;
886
+ }
887
+
888
+ #ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER
889
+ template <int __inst>
890
+ inline bool operator!=(const __malloc_alloc_template<__inst>&,
891
+ const __malloc_alloc_template<__inst>&)
892
+ {
893
+ return false;
894
+ }
895
+ #endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */
896
+
897
+ #ifndef __USE_MALLOC
898
+ template <bool __threads, int __inst>
899
+ inline bool operator==(const __default_alloc_template<__threads, __inst>&,
900
+ const __default_alloc_template<__threads, __inst>&)
901
+ {
902
+ return true;
903
+ }
904
+
905
+ # ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER
906
+ template <bool __threads, int __inst>
907
+ inline bool operator!=(const __default_alloc_template<__threads, __inst>&,
908
+ const __default_alloc_template<__threads, __inst>&)
909
+ {
910
+ return false;
911
+ }
912
+ # endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */
913
+ #endif
914
+
915
+ template <class _Alloc>
916
+ inline bool operator==(const debug_alloc<_Alloc>&,
917
+ const debug_alloc<_Alloc>&) {
918
+ return true;
919
+ }
920
+
921
+ #ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER
922
+ template <class _Alloc>
923
+ inline bool operator!=(const debug_alloc<_Alloc>&,
924
+ const debug_alloc<_Alloc>&) {
925
+ return false;
926
+ }
927
+ #endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */
928
+
929
+ // Another allocator adaptor: _Alloc_traits. This serves two
930
+ // purposes. First, make it possible to write containers that can use
931
+ // either SGI-style allocators or standard-conforming allocator.
932
+ // Second, provide a mechanism so that containers can query whether or
933
+ // not the allocator has distinct instances. If not, the container
934
+ // can avoid wasting a word of memory to store an empty object.
935
+
936
+ // This adaptor uses partial specialization. The general case of
937
+ // _Alloc_traits<_Tp, _Alloc> assumes that _Alloc is a
938
+ // standard-conforming allocator, possibly with non-equal instances
939
+ // and non-static members. (It still behaves correctly even if _Alloc
940
+ // has static member and if all instances are equal. Refinements
941
+ // affect performance, not correctness.)
942
+
943
+ // There are always two members: allocator_type, which is a standard-
944
+ // conforming allocator type for allocating objects of type _Tp, and
945
+ // _S_instanceless, a static const member of type bool. If
946
+ // _S_instanceless is true, this means that there is no difference
947
+ // between any two instances of type allocator_type. Furthermore, if
948
+ // _S_instanceless is true, then _Alloc_traits has one additional
949
+ // member: _Alloc_type. This type encapsulates allocation and
950
+ // deallocation of objects of type _Tp through a static interface; it
951
+ // has two member functions, whose signatures are
952
+ // static _Tp* allocate(size_t)
953
+ // static void deallocate(_Tp*, size_t)
954
+
955
+ // The fully general version.
956
+
957
+ template <class _Tp, class _Allocator>
958
+ struct _Alloc_traits
959
+ {
960
+ static const bool _S_instanceless = false;
961
+ typedef typename _Allocator::template rebind<_Tp>::other
962
+ allocator_type;
963
+ };
964
+
965
+ template <class _Tp, class _Allocator>
966
+ const bool _Alloc_traits<_Tp, _Allocator>::_S_instanceless;
967
+
968
+ // The version for the default allocator.
969
+
970
+ template <class _Tp, class _Tp1>
971
+ struct _Alloc_traits<_Tp, allocator<_Tp1> >
972
+ {
973
+ static const bool _S_instanceless = true;
974
+ typedef simple_alloc<_Tp, alloc> _Alloc_type;
975
+ typedef allocator<_Tp> allocator_type;
976
+ };
977
+
978
+ // Versions for the predefined SGI-style allocators.
979
+
980
+ template <class _Tp, int __inst>
981
+ struct _Alloc_traits<_Tp, __malloc_alloc_template<__inst> >
982
+ {
983
+ static const bool _S_instanceless = true;
984
+ typedef simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
985
+ typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type;
986
+ };
987
+
988
+ #ifndef __USE_MALLOC
989
+ template <class _Tp, bool __threads, int __inst>
990
+ struct _Alloc_traits<_Tp, __default_alloc_template<__threads, __inst> >
991
+ {
992
+ static const bool _S_instanceless = true;
993
+ typedef simple_alloc<_Tp, __default_alloc_template<__threads, __inst> >
994
+ _Alloc_type;
995
+ typedef __allocator<_Tp, __default_alloc_template<__threads, __inst> >
996
+ allocator_type;
997
+ };
998
+ #endif
999
+
1000
+ template <class _Tp, class _Alloc>
1001
+ struct _Alloc_traits<_Tp, debug_alloc<_Alloc> >
1002
+ {
1003
+ static const bool _S_instanceless = true;
1004
+ typedef simple_alloc<_Tp, debug_alloc<_Alloc> > _Alloc_type;
1005
+ typedef __allocator<_Tp, debug_alloc<_Alloc> > allocator_type;
1006
+ };
1007
+
1008
+ // Versions for the __allocator adaptor used with the predefined
1009
+ // SGI-style allocators.
1010
+
1011
+ template <class _Tp, class _Tp1, int __inst>
1012
+ struct _Alloc_traits<_Tp,
1013
+ __allocator<_Tp1, __malloc_alloc_template<__inst> > >
1014
+ {
1015
+ static const bool _S_instanceless = true;
1016
+ typedef simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
1017
+ typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type;
1018
+ };
1019
+
1020
+ #ifndef __USE_MALLOC
1021
+ template <class _Tp, class _Tp1, bool __thr, int __inst>
1022
+ struct _Alloc_traits<_Tp,
1023
+ __allocator<_Tp1,
1024
+ __default_alloc_template<__thr, __inst> > >
1025
+ {
1026
+ static const bool _S_instanceless = true;
1027
+ typedef simple_alloc<_Tp, __default_alloc_template<__thr,__inst> >
1028
+ _Alloc_type;
1029
+ typedef __allocator<_Tp, __default_alloc_template<__thr,__inst> >
1030
+ allocator_type;
1031
+ };
1032
+ #endif
1033
+
1034
+ template <class _Tp, class _Tp1, class _Alloc>
1035
+ struct _Alloc_traits<_Tp, __allocator<_Tp1, debug_alloc<_Alloc> > >
1036
+ {
1037
+ static const bool _S_instanceless = true;
1038
+ typedef simple_alloc<_Tp, debug_alloc<_Alloc> > _Alloc_type;
1039
+ typedef __allocator<_Tp, debug_alloc<_Alloc> > allocator_type;
1040
+ };
1041
+
1042
+
1043
+ #endif /* __STL_USE_STD_ALLOCATORS */
1044
+
1045
+ #if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32)
1046
+ #pragma reset woff 1174
1047
+ #endif
1048
+
1049
+ __STL_END_NAMESPACE
1050
+
1051
+ #undef __PRIVATE
1052
+
1053
+ #endif /* __SGI_STL_INTERNAL_ALLOC_H */
1054
+
1055
+ // Local Variables:
1056
+ // mode:C++
1057
+ // End: