passenger 6.0.16 → 6.0.18

Sign up to get free protection for your applications and to get access to all the features.
Files changed (290) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG +46 -2
  3. data/CONTRIBUTORS +2 -0
  4. data/build/integration_tests.rb +6 -6
  5. data/build/ruby_tests.rb +1 -1
  6. data/build/test_basics.rb +0 -1
  7. data/src/agent/Core/Config.h +1 -1
  8. data/src/agent/Core/Controller/Config.h +1 -1
  9. data/src/agent/Core/SpawningKit/PipeWatcher.h +18 -3
  10. data/src/agent/Watchdog/Config.h +1 -1
  11. data/src/cxx_supportlib/Constants.h +1 -1
  12. data/src/cxx_supportlib/vendor-modified/boost/align/aligned_alloc.hpp +1 -1
  13. data/src/cxx_supportlib/vendor-modified/boost/array.hpp +1 -1
  14. data/src/cxx_supportlib/vendor-modified/boost/asio/any_completion_executor.hpp +344 -0
  15. data/src/cxx_supportlib/vendor-modified/boost/asio/any_completion_handler.hpp +686 -0
  16. data/src/cxx_supportlib/vendor-modified/boost/asio/any_io_executor.hpp +56 -0
  17. data/src/cxx_supportlib/vendor-modified/boost/asio/associated_allocator.hpp +81 -25
  18. data/src/cxx_supportlib/vendor-modified/boost/asio/associated_cancellation_slot.hpp +68 -8
  19. data/src/cxx_supportlib/vendor-modified/boost/asio/associated_executor.hpp +46 -11
  20. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_datagram_socket.hpp +4 -4
  21. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_deadline_timer.hpp +2 -2
  22. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_file.hpp +1 -1
  23. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_random_access_file.hpp +2 -2
  24. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_raw_socket.hpp +4 -4
  25. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_readable_pipe.hpp +2 -2
  26. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_seq_packet_socket.hpp +2 -2
  27. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_serial_port.hpp +3 -3
  28. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_signal_set.hpp +2 -2
  29. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_socket.hpp +3 -3
  30. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_socket_acceptor.hpp +4 -4
  31. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_stream_file.hpp +2 -2
  32. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_stream_socket.hpp +2 -2
  33. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_waitable_timer.hpp +2 -2
  34. data/src/cxx_supportlib/vendor-modified/boost/asio/basic_writable_pipe.hpp +2 -2
  35. data/src/cxx_supportlib/vendor-modified/boost/asio/bind_allocator.hpp +13 -3
  36. data/src/cxx_supportlib/vendor-modified/boost/asio/bind_cancellation_slot.hpp +14 -3
  37. data/src/cxx_supportlib/vendor-modified/boost/asio/bind_executor.hpp +13 -3
  38. data/src/cxx_supportlib/vendor-modified/boost/asio/compose.hpp +30 -305
  39. data/src/cxx_supportlib/vendor-modified/boost/asio/consign.hpp +90 -0
  40. data/src/cxx_supportlib/vendor-modified/boost/asio/defer.hpp +17 -18
  41. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/bind_handler.hpp +98 -24
  42. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/composed_work.hpp +330 -0
  43. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/config.hpp +81 -15
  44. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/handler_alloc_helpers.hpp +4 -4
  45. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/handler_work.hpp +19 -11
  46. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/impl/descriptor_ops.ipp +37 -0
  47. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/impl/handler_tracking.ipp +3 -1
  48. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/impl/select_reactor.ipp +1 -1
  49. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/impl/socket_ops.ipp +10 -2
  50. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/impl/strand_executor_service.hpp +14 -1
  51. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/memory.hpp +18 -0
  52. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/utility.hpp +1 -2
  53. data/src/cxx_supportlib/vendor-modified/boost/asio/detail/work_dispatcher.hpp +7 -3
  54. data/src/cxx_supportlib/vendor-modified/boost/asio/dispatch.hpp +4 -14
  55. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/allocator.hpp +22 -3
  56. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/any_executor.hpp +447 -142
  57. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/blocking.hpp +57 -8
  58. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/blocking_adaptation.hpp +51 -6
  59. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/bulk_execute.hpp +5 -0
  60. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/bulk_guarantee.hpp +41 -4
  61. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/connect.hpp +5 -0
  62. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/context.hpp +13 -2
  63. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/context_as.hpp +13 -2
  64. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/detail/as_operation.hpp +4 -0
  65. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/execute.hpp +9 -2
  66. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/executor.hpp +11 -1
  67. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/mapping.hpp +52 -8
  68. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/occupancy.hpp +13 -2
  69. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/operation_state.hpp +5 -0
  70. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/outstanding_work.hpp +39 -6
  71. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/receiver.hpp +5 -0
  72. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/relationship.hpp +39 -6
  73. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/schedule.hpp +5 -0
  74. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/scheduler.hpp +5 -0
  75. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/sender.hpp +5 -0
  76. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/set_done.hpp +5 -0
  77. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/set_error.hpp +5 -0
  78. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/set_value.hpp +5 -0
  79. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/start.hpp +5 -0
  80. data/src/cxx_supportlib/vendor-modified/boost/asio/execution/submit.hpp +5 -0
  81. data/src/cxx_supportlib/vendor-modified/boost/asio/executor_work_guard.hpp +6 -9
  82. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/basic_channel.hpp +3 -3
  83. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/basic_concurrent_channel.hpp +3 -3
  84. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/channel_traits.hpp +70 -0
  85. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/co_composed.hpp +146 -0
  86. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/coro.hpp +35 -5
  87. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/detail/channel_handler.hpp +13 -3
  88. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/detail/channel_operation.hpp +7 -0
  89. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/detail/channel_service.hpp +180 -0
  90. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/detail/coro_completion_handler.hpp +171 -0
  91. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/detail/coro_promise_allocator.hpp +89 -66
  92. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/detail/partial_promise.hpp +66 -45
  93. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/impl/as_single.hpp +13 -3
  94. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/impl/co_composed.hpp +1134 -0
  95. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/impl/coro.hpp +130 -106
  96. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/impl/parallel_group.hpp +377 -3
  97. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/impl/promise.hpp +168 -29
  98. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/impl/use_coro.hpp +149 -203
  99. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/impl/use_promise.hpp +68 -0
  100. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/parallel_group.hpp +256 -0
  101. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/promise.hpp +76 -80
  102. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/use_coro.hpp +40 -14
  103. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/use_promise.hpp +113 -0
  104. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/any_completion_executor.ipp +132 -0
  105. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/any_io_executor.ipp +12 -0
  106. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/append.hpp +12 -3
  107. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/as_tuple.hpp +12 -3
  108. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/buffered_read_stream.hpp +25 -6
  109. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/buffered_write_stream.hpp +25 -6
  110. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/co_spawn.hpp +34 -45
  111. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/connect.hpp +28 -6
  112. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/consign.hpp +204 -0
  113. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/defer.hpp +33 -1
  114. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/deferred.hpp +13 -3
  115. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/dispatch.hpp +21 -9
  116. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/handler_alloc_hook.ipp +1 -1
  117. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/post.hpp +33 -1
  118. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/prepend.hpp +12 -3
  119. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/read.hpp +40 -9
  120. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/read_at.hpp +27 -6
  121. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/read_until.hpp +104 -24
  122. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/redirect_error.hpp +12 -3
  123. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/spawn.hpp +70 -15
  124. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/src.hpp +1 -0
  125. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/use_future.hpp +25 -0
  126. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/write.hpp +38 -30
  127. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/write_at.hpp +27 -6
  128. data/src/cxx_supportlib/vendor-modified/boost/asio/io_context.hpp +0 -13
  129. data/src/cxx_supportlib/vendor-modified/boost/asio/ip/impl/network_v4.ipp +3 -1
  130. data/src/cxx_supportlib/vendor-modified/boost/asio/ip/impl/network_v6.ipp +3 -1
  131. data/src/cxx_supportlib/vendor-modified/boost/asio/posix/basic_descriptor.hpp +2 -2
  132. data/src/cxx_supportlib/vendor-modified/boost/asio/posix/basic_stream_descriptor.hpp +2 -2
  133. data/src/cxx_supportlib/vendor-modified/boost/asio/post.hpp +17 -18
  134. data/src/cxx_supportlib/vendor-modified/boost/asio/spawn.hpp +3 -1
  135. data/src/cxx_supportlib/vendor-modified/boost/asio/ssl/detail/io.hpp +13 -3
  136. data/src/cxx_supportlib/vendor-modified/boost/asio/strand.hpp +11 -7
  137. data/src/cxx_supportlib/vendor-modified/boost/asio/system_executor.hpp +0 -13
  138. data/src/cxx_supportlib/vendor-modified/boost/asio/thread_pool.hpp +23 -18
  139. data/src/cxx_supportlib/vendor-modified/boost/asio/version.hpp +1 -1
  140. data/src/cxx_supportlib/vendor-modified/boost/asio/windows/basic_object_handle.hpp +2 -2
  141. data/src/cxx_supportlib/vendor-modified/boost/asio/windows/basic_overlapped_handle.hpp +1 -1
  142. data/src/cxx_supportlib/vendor-modified/boost/asio/windows/basic_random_access_handle.hpp +2 -2
  143. data/src/cxx_supportlib/vendor-modified/boost/asio/windows/basic_stream_handle.hpp +2 -2
  144. data/src/cxx_supportlib/vendor-modified/boost/asio.hpp +4 -0
  145. data/src/cxx_supportlib/vendor-modified/boost/bind/bind.hpp +1 -1
  146. data/src/cxx_supportlib/vendor-modified/boost/chrono/config.hpp +1 -0
  147. data/src/cxx_supportlib/vendor-modified/boost/chrono/detail/inlined/mac/chrono.hpp +4 -4
  148. data/src/cxx_supportlib/vendor-modified/boost/chrono/detail/scan_keyword.hpp +4 -2
  149. data/src/cxx_supportlib/vendor-modified/boost/chrono/duration.hpp +2 -2
  150. data/src/cxx_supportlib/vendor-modified/boost/chrono/io/duration_io.hpp +3 -3
  151. data/src/cxx_supportlib/vendor-modified/boost/chrono/io/duration_put.hpp +5 -5
  152. data/src/cxx_supportlib/vendor-modified/boost/chrono/io/duration_units.hpp +2 -2
  153. data/src/cxx_supportlib/vendor-modified/boost/chrono/io/time_point_io.hpp +2 -2
  154. data/src/cxx_supportlib/vendor-modified/boost/chrono/io/utility/ios_base_state_ptr.hpp +7 -7
  155. data/src/cxx_supportlib/vendor-modified/boost/chrono/time_point.hpp +1 -1
  156. data/src/cxx_supportlib/vendor-modified/boost/circular_buffer/details.hpp +5 -1
  157. data/src/cxx_supportlib/vendor-modified/boost/config/compiler/clang.hpp +10 -3
  158. data/src/cxx_supportlib/vendor-modified/boost/config/compiler/clang_version.hpp +9 -3
  159. data/src/cxx_supportlib/vendor-modified/boost/config/compiler/gcc.hpp +9 -3
  160. data/src/cxx_supportlib/vendor-modified/boost/config/compiler/sunpro_cc.hpp +6 -0
  161. data/src/cxx_supportlib/vendor-modified/boost/config/compiler/visualc.hpp +8 -0
  162. data/src/cxx_supportlib/vendor-modified/boost/config/compiler/xlcpp.hpp +4 -0
  163. data/src/cxx_supportlib/vendor-modified/boost/config/detail/suffix.hpp +33 -0
  164. data/src/cxx_supportlib/vendor-modified/boost/config/header_deprecated.hpp +1 -1
  165. data/src/cxx_supportlib/vendor-modified/boost/config/stdlib/libcpp.hpp +9 -0
  166. data/src/cxx_supportlib/vendor-modified/boost/config/stdlib/libstdcpp3.hpp +4 -2
  167. data/src/cxx_supportlib/vendor-modified/boost/container/allocator.hpp +1 -1
  168. data/src/cxx_supportlib/vendor-modified/boost/container/container_fwd.hpp +29 -0
  169. data/src/cxx_supportlib/vendor-modified/boost/container/deque.hpp +13 -13
  170. data/src/cxx_supportlib/vendor-modified/boost/container/detail/advanced_insert_int.hpp +80 -95
  171. data/src/cxx_supportlib/vendor-modified/boost/container/detail/config_begin.hpp +10 -0
  172. data/src/cxx_supportlib/vendor-modified/boost/container/detail/config_end.hpp +3 -0
  173. data/src/cxx_supportlib/vendor-modified/boost/container/detail/copy_move_algo.hpp +738 -34
  174. data/src/cxx_supportlib/vendor-modified/boost/container/detail/destroyers.hpp +38 -10
  175. data/src/cxx_supportlib/vendor-modified/boost/container/detail/flat_tree.hpp +1 -0
  176. data/src/cxx_supportlib/vendor-modified/boost/container/detail/iterator.hpp +2 -0
  177. data/src/cxx_supportlib/vendor-modified/boost/container/detail/workaround.hpp +3 -4
  178. data/src/cxx_supportlib/vendor-modified/boost/container/devector.hpp +1150 -1213
  179. data/src/cxx_supportlib/vendor-modified/boost/container/node_allocator.hpp +1 -1
  180. data/src/cxx_supportlib/vendor-modified/boost/container/options.hpp +104 -12
  181. data/src/cxx_supportlib/vendor-modified/boost/container/stable_vector.hpp +1 -0
  182. data/src/cxx_supportlib/vendor-modified/boost/container/string.hpp +11 -1
  183. data/src/cxx_supportlib/vendor-modified/boost/container/vector.hpp +31 -331
  184. data/src/cxx_supportlib/vendor-modified/boost/container_hash/detail/hash_mix.hpp +113 -0
  185. data/src/cxx_supportlib/vendor-modified/boost/container_hash/detail/hash_range.hpp +173 -0
  186. data/src/cxx_supportlib/vendor-modified/boost/container_hash/detail/hash_tuple.hpp +133 -0
  187. data/src/cxx_supportlib/vendor-modified/boost/container_hash/hash.hpp +461 -566
  188. data/src/cxx_supportlib/vendor-modified/boost/container_hash/hash_fwd.hpp +24 -24
  189. data/src/cxx_supportlib/vendor-modified/boost/container_hash/is_contiguous_range.hpp +91 -0
  190. data/src/cxx_supportlib/vendor-modified/boost/container_hash/is_described_class.hpp +38 -0
  191. data/src/cxx_supportlib/vendor-modified/boost/container_hash/is_range.hpp +73 -0
  192. data/src/cxx_supportlib/vendor-modified/boost/container_hash/is_unordered_range.hpp +39 -0
  193. data/src/cxx_supportlib/vendor-modified/boost/core/bit.hpp +32 -4
  194. data/src/cxx_supportlib/vendor-modified/boost/core/detail/string_view.hpp +16 -0
  195. data/src/cxx_supportlib/vendor-modified/boost/core/empty_value.hpp +16 -16
  196. data/src/cxx_supportlib/vendor-modified/boost/core/fclose_deleter.hpp +46 -0
  197. data/src/cxx_supportlib/vendor-modified/boost/date_time/date.hpp +1 -1
  198. data/src/cxx_supportlib/vendor-modified/boost/date_time/gregorian/formatters.hpp +3 -3
  199. data/src/cxx_supportlib/vendor-modified/boost/date_time/gregorian/formatters_limited.hpp +3 -3
  200. data/src/cxx_supportlib/vendor-modified/boost/date_time/gregorian/greg_date.hpp +12 -13
  201. data/src/cxx_supportlib/vendor-modified/boost/date_time/gregorian/parsers.hpp +2 -2
  202. data/src/cxx_supportlib/vendor-modified/boost/date_time/gregorian_calendar.ipp +2 -2
  203. data/src/cxx_supportlib/vendor-modified/boost/date_time/iso_format.hpp +13 -13
  204. data/src/cxx_supportlib/vendor-modified/boost/date_time/local_time/local_date_time.hpp +2 -2
  205. data/src/cxx_supportlib/vendor-modified/boost/date_time/posix_time/time_formatters.hpp +4 -4
  206. data/src/cxx_supportlib/vendor-modified/boost/date_time/posix_time/time_formatters_limited.hpp +2 -2
  207. data/src/cxx_supportlib/vendor-modified/boost/date_time/special_values_parser.hpp +1 -1
  208. data/src/cxx_supportlib/vendor-modified/boost/date_time/time_facet.hpp +1 -1
  209. data/src/cxx_supportlib/vendor-modified/boost/date_time/time_parsing.hpp +2 -2
  210. data/src/cxx_supportlib/vendor-modified/boost/describe/bases.hpp +50 -0
  211. data/src/cxx_supportlib/vendor-modified/boost/describe/detail/config.hpp +40 -0
  212. data/src/cxx_supportlib/vendor-modified/boost/describe/detail/cx_streq.hpp +30 -0
  213. data/src/cxx_supportlib/vendor-modified/boost/describe/detail/void_t.hpp +32 -0
  214. data/src/cxx_supportlib/vendor-modified/boost/describe/members.hpp +159 -0
  215. data/src/cxx_supportlib/vendor-modified/boost/describe/modifiers.hpp +33 -0
  216. data/src/cxx_supportlib/vendor-modified/boost/intrusive/avltree_algorithms.hpp +9 -9
  217. data/src/cxx_supportlib/vendor-modified/boost/intrusive/bstree_algorithms.hpp +45 -45
  218. data/src/cxx_supportlib/vendor-modified/boost/intrusive/detail/any_node_and_algorithms.hpp +8 -8
  219. data/src/cxx_supportlib/vendor-modified/boost/intrusive/detail/bstree_algorithms_base.hpp +37 -38
  220. data/src/cxx_supportlib/vendor-modified/boost/intrusive/detail/iterator.hpp +16 -0
  221. data/src/cxx_supportlib/vendor-modified/boost/intrusive/detail/workaround.hpp +1 -1
  222. data/src/cxx_supportlib/vendor-modified/boost/intrusive/hashtable.hpp +145 -90
  223. data/src/cxx_supportlib/vendor-modified/boost/intrusive/pack_options.hpp +2 -0
  224. data/src/cxx_supportlib/vendor-modified/boost/intrusive/rbtree_algorithms.hpp +7 -7
  225. data/src/cxx_supportlib/vendor-modified/boost/intrusive/sgtree_algorithms.hpp +5 -5
  226. data/src/cxx_supportlib/vendor-modified/boost/intrusive/splaytree_algorithms.hpp +11 -9
  227. data/src/cxx_supportlib/vendor-modified/boost/intrusive/treap_algorithms.hpp +7 -7
  228. data/src/cxx_supportlib/vendor-modified/boost/iterator/iterator_facade.hpp +106 -25
  229. data/src/cxx_supportlib/vendor-modified/boost/lexical_cast/detail/converter_lexical_streams.hpp +1 -1
  230. data/src/cxx_supportlib/vendor-modified/boost/lexical_cast/detail/converter_numeric.hpp +8 -5
  231. data/src/cxx_supportlib/vendor-modified/boost/lexical_cast/detail/lcast_unsigned_converters.hpp +1 -1
  232. data/src/cxx_supportlib/vendor-modified/boost/move/algo/detail/adaptive_sort_merge.hpp +4 -2
  233. data/src/cxx_supportlib/vendor-modified/boost/move/algo/detail/pdqsort.hpp +2 -1
  234. data/src/cxx_supportlib/vendor-modified/boost/move/detail/type_traits.hpp +8 -4
  235. data/src/cxx_supportlib/vendor-modified/boost/mp11/bind.hpp +111 -0
  236. data/src/cxx_supportlib/vendor-modified/boost/mp11/version.hpp +1 -1
  237. data/src/cxx_supportlib/vendor-modified/boost/numeric/conversion/detail/int_float_mixture.hpp +5 -5
  238. data/src/cxx_supportlib/vendor-modified/boost/numeric/conversion/detail/sign_mixture.hpp +5 -5
  239. data/src/cxx_supportlib/vendor-modified/boost/numeric/conversion/detail/udt_builtin_mixture.hpp +5 -5
  240. data/src/cxx_supportlib/vendor-modified/boost/preprocessor/variadic/has_opt.hpp +6 -2
  241. data/src/cxx_supportlib/vendor-modified/boost/smart_ptr/detail/spinlock_gcc_atomic.hpp +11 -2
  242. data/src/cxx_supportlib/vendor-modified/boost/smart_ptr/intrusive_ptr.hpp +1 -1
  243. data/src/cxx_supportlib/vendor-modified/boost/system/detail/config.hpp +7 -1
  244. data/src/cxx_supportlib/vendor-modified/boost/system/detail/error_category.hpp +2 -2
  245. data/src/cxx_supportlib/vendor-modified/boost/system/detail/error_category_impl.hpp +10 -1
  246. data/src/cxx_supportlib/vendor-modified/boost/system/detail/error_code.hpp +38 -43
  247. data/src/cxx_supportlib/vendor-modified/boost/system/detail/error_condition.hpp +52 -0
  248. data/src/cxx_supportlib/vendor-modified/boost/throw_exception.hpp +1 -1
  249. data/src/cxx_supportlib/vendor-modified/boost/type_traits/detail/is_function_ptr_helper.hpp +27 -27
  250. data/src/cxx_supportlib/vendor-modified/boost/type_traits/detail/is_mem_fun_pointer_impl.hpp +27 -27
  251. data/src/cxx_supportlib/vendor-modified/boost/type_traits/intrinsics.hpp +22 -8
  252. data/src/cxx_supportlib/vendor-modified/boost/unordered/detail/fca.hpp +37 -7
  253. data/src/cxx_supportlib/vendor-modified/boost/unordered/detail/foa.hpp +1921 -0
  254. data/src/cxx_supportlib/vendor-modified/boost/unordered/detail/implementation.hpp +66 -82
  255. data/src/cxx_supportlib/vendor-modified/boost/unordered/detail/prime_fmod.hpp +6 -0
  256. data/src/cxx_supportlib/vendor-modified/boost/unordered/detail/type_traits.hpp +109 -0
  257. data/src/cxx_supportlib/vendor-modified/boost/unordered/detail/xmx.hpp +75 -0
  258. data/src/cxx_supportlib/vendor-modified/boost/unordered/hash_traits.hpp +45 -0
  259. data/src/cxx_supportlib/vendor-modified/boost/unordered/unordered_flat_map.hpp +732 -0
  260. data/src/cxx_supportlib/vendor-modified/boost/unordered/unordered_flat_map_fwd.hpp +49 -0
  261. data/src/cxx_supportlib/vendor-modified/boost/unordered/unordered_flat_set.hpp +586 -0
  262. data/src/cxx_supportlib/vendor-modified/boost/unordered/unordered_flat_set_fwd.hpp +49 -0
  263. data/src/cxx_supportlib/vendor-modified/boost/unordered/unordered_map.hpp +166 -66
  264. data/src/cxx_supportlib/vendor-modified/boost/unordered/unordered_set.hpp +145 -28
  265. data/src/cxx_supportlib/vendor-modified/boost/version.hpp +2 -2
  266. data/src/cxx_supportlib/vendor-modified/websocketpp/websocketpp/common/md5.hpp +1 -1
  267. data/src/ruby_native_extension/extconf.rb +1 -1
  268. data/src/ruby_supportlib/phusion_passenger/native_support.rb +4 -2
  269. data/src/ruby_supportlib/phusion_passenger/platform_info/operating_system.rb +1 -1
  270. data/src/ruby_supportlib/phusion_passenger/public_api.rb +3 -0
  271. data/src/ruby_supportlib/phusion_passenger/utils.rb +1 -0
  272. data/src/ruby_supportlib/phusion_passenger.rb +5 -5
  273. metadata +36 -19
  274. data/src/cxx_supportlib/vendor-modified/boost/align/align.hpp +0 -19
  275. data/src/cxx_supportlib/vendor-modified/boost/align/alignment_of.hpp +0 -54
  276. data/src/cxx_supportlib/vendor-modified/boost/align/alignment_of_forward.hpp +0 -20
  277. data/src/cxx_supportlib/vendor-modified/boost/align/detail/align_cxx11.hpp +0 -21
  278. data/src/cxx_supportlib/vendor-modified/boost/align/detail/aligned_alloc.hpp +0 -52
  279. data/src/cxx_supportlib/vendor-modified/boost/align/detail/alignment_of.hpp +0 -31
  280. data/src/cxx_supportlib/vendor-modified/boost/align/detail/alignment_of_cxx11.hpp +0 -23
  281. data/src/cxx_supportlib/vendor-modified/boost/align/detail/element_type.hpp +0 -91
  282. data/src/cxx_supportlib/vendor-modified/boost/align/detail/integral_constant.hpp +0 -53
  283. data/src/cxx_supportlib/vendor-modified/boost/align/detail/min_size.hpp +0 -26
  284. data/src/cxx_supportlib/vendor-modified/boost/asio/experimental/detail/completion_handler_erasure.hpp +0 -196
  285. data/src/cxx_supportlib/vendor-modified/boost/asio/impl/compose.hpp +0 -709
  286. data/src/cxx_supportlib/vendor-modified/boost/container_hash/detail/float_functions.hpp +0 -336
  287. data/src/cxx_supportlib/vendor-modified/boost/container_hash/detail/hash_float.hpp +0 -271
  288. data/src/cxx_supportlib/vendor-modified/boost/container_hash/detail/limits.hpp +0 -62
  289. data/src/cxx_supportlib/vendor-modified/boost/container_hash/extensions.hpp +0 -361
  290. data/src/cxx_supportlib/vendor-modified/boost/detail/container_fwd.hpp +0 -157
@@ -0,0 +1,1921 @@
1
+ /* Fast open-addressing hash table.
2
+ *
3
+ * Copyright 2022 Joaquin M Lopez Munoz.
4
+ * Distributed under the Boost Software License, Version 1.0.
5
+ * (See accompanying file LICENSE_1_0.txt or copy at
6
+ * http://www.boost.org/LICENSE_1_0.txt)
7
+ *
8
+ * See https://www.boost.org/libs/unordered for library home page.
9
+ */
10
+
11
+ #ifndef BOOST_UNORDERED_DETAIL_FOA_HPP
12
+ #define BOOST_UNORDERED_DETAIL_FOA_HPP
13
+
14
+ #include <boost/assert.hpp>
15
+ #include <boost/config.hpp>
16
+ #include <boost/config/workaround.hpp>
17
+ #include <boost/core/allocator_traits.hpp>
18
+ #include <boost/core/bit.hpp>
19
+ #include <boost/core/empty_value.hpp>
20
+ #include <boost/core/no_exceptions_support.hpp>
21
+ #include <boost/core/pointer_traits.hpp>
22
+ #include <boost/cstdint.hpp>
23
+ #include <boost/predef.h>
24
+ #include <boost/type_traits/is_nothrow_swappable.hpp>
25
+ #include <boost/unordered/detail/xmx.hpp>
26
+ #include <boost/unordered/hash_traits.hpp>
27
+ #include <climits>
28
+ #include <cmath>
29
+ #include <cstddef>
30
+ #include <cstring>
31
+ #include <iterator>
32
+ #include <limits>
33
+ #include <tuple>
34
+ #include <type_traits>
35
+ #include <utility>
36
+
37
+ #if defined(__SSE2__)||\
38
+ defined(_M_X64)||(defined(_M_IX86_FP)&&_M_IX86_FP>=2)
39
+ #define BOOST_UNORDERED_SSE2
40
+ #include <emmintrin.h>
41
+ #elif defined(__ARM_NEON)&&!defined(__ARM_BIG_ENDIAN)
42
+ #define BOOST_UNORDERED_LITTLE_ENDIAN_NEON
43
+ #include <arm_neon.h>
44
+ #endif
45
+
46
+ #ifdef __has_builtin
47
+ #define BOOST_UNORDERED_HAS_BUILTIN(x) __has_builtin(x)
48
+ #else
49
+ #define BOOST_UNORDERED_HAS_BUILTIN(x) 0
50
+ #endif
51
+
52
+ #if !defined(NDEBUG)
53
+ #define BOOST_UNORDERED_ASSUME(cond) BOOST_ASSERT(cond)
54
+ #elif BOOST_UNORDERED_HAS_BUILTIN(__builtin_assume)
55
+ #define BOOST_UNORDERED_ASSUME(cond) __builtin_assume(cond)
56
+ #elif defined(__GNUC__) || BOOST_UNORDERED_HAS_BUILTIN(__builtin_unreachable)
57
+ #define BOOST_UNORDERED_ASSUME(cond) \
58
+ do{ \
59
+ if(!(cond))__builtin_unreachable(); \
60
+ }while(0)
61
+ #elif defined(_MSC_VER)
62
+ #define BOOST_UNORDERED_ASSUME(cond) __assume(cond)
63
+ #else
64
+ #define BOOST_UNORDERED_ASSUME(cond) \
65
+ do{ \
66
+ static_cast<void>(false&&(cond)); \
67
+ }while(0)
68
+ #endif
69
+
70
+ namespace boost{
71
+ namespace unordered{
72
+ namespace detail{
73
+ namespace foa{
74
+
75
+ static const std::size_t default_bucket_count = 0;
76
+
77
+ /* foa::table is an open-addressing hash table serving as the foundational core
78
+ * of boost::unordered_flat_[map|set]. Its main internal design aspects are:
79
+ *
80
+ * - Element slots are logically split into groups of size N=15. The number
81
+ * of groups is always a power of two, so the number of allocated slots
82
+ is of the form (N*2^n)-1 (final slot reserved for a sentinel mark).
83
+ * - Positioning is done at the group level rather than the slot level, that
84
+ * is, for any given element its hash value is used to locate a group and
85
+ * insertion is performed on the first available element of that group;
86
+ * if the group is full (overflow), further groups are tried using
87
+ * quadratic probing.
88
+ * - Each group has an associated 16B metadata word holding reduced hash
89
+ * values and overflow information. Reduced hash values are used to
90
+ * accelerate lookup within the group by using 128-bit SIMD or 64-bit word
91
+ * operations.
92
+ */
93
+
94
+ /* group15 controls metadata information of a group of N=15 element slots.
95
+ * The 16B metadata word is organized as follows (LSB depicted rightmost):
96
+ *
97
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
98
+ * |ofw|h14|h13|h13|h11|h10|h09|h08|h07|h06|h05|h04|h03|h02|h01|h00|
99
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
100
+ *
101
+ * hi is 0 if the i-th element slot is avalaible, 1 to mark a sentinel and,
102
+ * when the slot is occupied, a value in the range [2,255] obtained from the
103
+ * element's original hash value.
104
+ * ofw is the so-called overflow byte. If insertion of an element with hash
105
+ * value h is tried on a full group, then the (h%8)-th bit of the overflow
106
+ * byte is set to 1 and a further group is probed. Having an overflow byte
107
+ * brings two advantages:
108
+ *
109
+ * - There's no need to reserve a special value of hi to mark tombstone
110
+ * slots; each reduced hash value keeps then log2(254)=7.99 bits of the
111
+ * original hash (alternative approaches reserve one full bit to mark
112
+ * if the slot is available/deleted, so their reduced hash values are 7 bit
113
+ * strong only).
114
+ * - When doing an unsuccessful lookup (i.e. the element is not present in
115
+ * the table), probing stops at the first non-overflowed group. Having 8
116
+ * bits for signalling overflow makes it very likely that we stop at the
117
+ * current group (this happens when no element with the same (h%8) value
118
+ * has overflowed in the group), saving us an additional group check even
119
+ * under high-load/high-erase conditions. It is critical that hash
120
+ * reduction is invariant under modulo 8 (see maybe_caused_overflow).
121
+ *
122
+ * When looking for an element with hash value h, match(h) returns a bitmask
123
+ * signalling which slots have the same reduced hash value. If available,
124
+ * match uses SSE2 or (little endian) Neon 128-bit SIMD operations. On non-SIMD
125
+ * scenarios, the logical layout described above is physically mapped to two
126
+ * 64-bit words with *bit interleaving*, i.e. the least significant 16 bits of
127
+ * the first 64-bit word contain the least significant bits of each byte in the
128
+ * "logical" 128-bit word, and so forth. With this layout, match can be
129
+ * implemented with 4 ANDs, 3 shifts, 2 XORs, 1 OR and 1 NOT.
130
+ *
131
+ * group15 has no user-defined ctor so that it's a trivial type and can be
132
+ * initialized via memset etc. Where needed, group15::initialize sets the
133
+ * metadata to all zeros.
134
+ */
135
+
136
+ #if defined(BOOST_UNORDERED_SSE2)
137
+
138
+ struct group15
139
+ {
140
+ static constexpr int N=15;
141
+
142
+ struct dummy_group_type
143
+ {
144
+ alignas(16) unsigned char storage[N+1]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0};
145
+ };
146
+
147
+ inline void initialize(){m=_mm_setzero_si128();}
148
+
149
+ inline void set(std::size_t pos,std::size_t hash)
150
+ {
151
+ BOOST_ASSERT(pos<N);
152
+ at(pos)=reduced_hash(hash);
153
+ }
154
+
155
+ inline void set_sentinel()
156
+ {
157
+ at(N-1)=sentinel_;
158
+ }
159
+
160
+ inline bool is_sentinel(std::size_t pos)const
161
+ {
162
+ BOOST_ASSERT(pos<N);
163
+ return at(pos)==sentinel_;
164
+ }
165
+
166
+ inline void reset(std::size_t pos)
167
+ {
168
+ BOOST_ASSERT(pos<N);
169
+ at(pos)=available_;
170
+ }
171
+
172
+ static inline void reset(unsigned char* pc)
173
+ {
174
+ *pc=available_;
175
+ }
176
+
177
+ inline int match(std::size_t hash)const
178
+ {
179
+ return _mm_movemask_epi8(
180
+ _mm_cmpeq_epi8(m,_mm_set1_epi32(match_word(hash))))&0x7FFF;
181
+ }
182
+
183
+ inline bool is_not_overflowed(std::size_t hash)const
184
+ {
185
+ static constexpr unsigned char shift[]={1,2,4,8,16,32,64,128};
186
+
187
+ return !(overflow()&shift[hash%8]);
188
+ }
189
+
190
+ inline void mark_overflow(std::size_t hash)
191
+ {
192
+ #if BOOST_WORKAROUND(BOOST_GCC, >= 50000 && BOOST_GCC < 60000)
193
+ overflow() = static_cast<unsigned char>( overflow() | static_cast<unsigned char>(1<<(hash%8)) );
194
+ #else
195
+ overflow()|=static_cast<unsigned char>(1<<(hash%8));
196
+ #endif
197
+ }
198
+
199
+ static inline bool maybe_caused_overflow(unsigned char* pc)
200
+ {
201
+ std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15);
202
+ group15 *pg=reinterpret_cast<group15*>(pc-pos);
203
+ return !pg->is_not_overflowed(*pc);
204
+ };
205
+
206
+ inline int match_available()const
207
+ {
208
+ return _mm_movemask_epi8(
209
+ _mm_cmpeq_epi8(m,_mm_setzero_si128()))&0x7FFF;
210
+ }
211
+
212
+ inline int match_occupied()const
213
+ {
214
+ return (~match_available())&0x7FFF;
215
+ }
216
+
217
+ inline int match_really_occupied()const /* excluding sentinel */
218
+ {
219
+ return at(N-1)==sentinel_?match_occupied()&0x3FFF:match_occupied();
220
+ }
221
+
222
+ private:
223
+ static constexpr unsigned char available_=0,
224
+ sentinel_=1;
225
+
226
+ inline static int match_word(std::size_t hash)
227
+ {
228
+ static constexpr boost::uint32_t word[]=
229
+ {
230
+ 0x08080808u,0x09090909u,0x02020202u,0x03030303u,0x04040404u,0x05050505u,0x06060606u,0x07070707u,
231
+ 0x08080808u,0x09090909u,0x0A0A0A0Au,0x0B0B0B0Bu,0x0C0C0C0Cu,0x0D0D0D0Du,0x0E0E0E0Eu,0x0F0F0F0Fu,
232
+ 0x10101010u,0x11111111u,0x12121212u,0x13131313u,0x14141414u,0x15151515u,0x16161616u,0x17171717u,
233
+ 0x18181818u,0x19191919u,0x1A1A1A1Au,0x1B1B1B1Bu,0x1C1C1C1Cu,0x1D1D1D1Du,0x1E1E1E1Eu,0x1F1F1F1Fu,
234
+ 0x20202020u,0x21212121u,0x22222222u,0x23232323u,0x24242424u,0x25252525u,0x26262626u,0x27272727u,
235
+ 0x28282828u,0x29292929u,0x2A2A2A2Au,0x2B2B2B2Bu,0x2C2C2C2Cu,0x2D2D2D2Du,0x2E2E2E2Eu,0x2F2F2F2Fu,
236
+ 0x30303030u,0x31313131u,0x32323232u,0x33333333u,0x34343434u,0x35353535u,0x36363636u,0x37373737u,
237
+ 0x38383838u,0x39393939u,0x3A3A3A3Au,0x3B3B3B3Bu,0x3C3C3C3Cu,0x3D3D3D3Du,0x3E3E3E3Eu,0x3F3F3F3Fu,
238
+ 0x40404040u,0x41414141u,0x42424242u,0x43434343u,0x44444444u,0x45454545u,0x46464646u,0x47474747u,
239
+ 0x48484848u,0x49494949u,0x4A4A4A4Au,0x4B4B4B4Bu,0x4C4C4C4Cu,0x4D4D4D4Du,0x4E4E4E4Eu,0x4F4F4F4Fu,
240
+ 0x50505050u,0x51515151u,0x52525252u,0x53535353u,0x54545454u,0x55555555u,0x56565656u,0x57575757u,
241
+ 0x58585858u,0x59595959u,0x5A5A5A5Au,0x5B5B5B5Bu,0x5C5C5C5Cu,0x5D5D5D5Du,0x5E5E5E5Eu,0x5F5F5F5Fu,
242
+ 0x60606060u,0x61616161u,0x62626262u,0x63636363u,0x64646464u,0x65656565u,0x66666666u,0x67676767u,
243
+ 0x68686868u,0x69696969u,0x6A6A6A6Au,0x6B6B6B6Bu,0x6C6C6C6Cu,0x6D6D6D6Du,0x6E6E6E6Eu,0x6F6F6F6Fu,
244
+ 0x70707070u,0x71717171u,0x72727272u,0x73737373u,0x74747474u,0x75757575u,0x76767676u,0x77777777u,
245
+ 0x78787878u,0x79797979u,0x7A7A7A7Au,0x7B7B7B7Bu,0x7C7C7C7Cu,0x7D7D7D7Du,0x7E7E7E7Eu,0x7F7F7F7Fu,
246
+ 0x80808080u,0x81818181u,0x82828282u,0x83838383u,0x84848484u,0x85858585u,0x86868686u,0x87878787u,
247
+ 0x88888888u,0x89898989u,0x8A8A8A8Au,0x8B8B8B8Bu,0x8C8C8C8Cu,0x8D8D8D8Du,0x8E8E8E8Eu,0x8F8F8F8Fu,
248
+ 0x90909090u,0x91919191u,0x92929292u,0x93939393u,0x94949494u,0x95959595u,0x96969696u,0x97979797u,
249
+ 0x98989898u,0x99999999u,0x9A9A9A9Au,0x9B9B9B9Bu,0x9C9C9C9Cu,0x9D9D9D9Du,0x9E9E9E9Eu,0x9F9F9F9Fu,
250
+ 0xA0A0A0A0u,0xA1A1A1A1u,0xA2A2A2A2u,0xA3A3A3A3u,0xA4A4A4A4u,0xA5A5A5A5u,0xA6A6A6A6u,0xA7A7A7A7u,
251
+ 0xA8A8A8A8u,0xA9A9A9A9u,0xAAAAAAAAu,0xABABABABu,0xACACACACu,0xADADADADu,0xAEAEAEAEu,0xAFAFAFAFu,
252
+ 0xB0B0B0B0u,0xB1B1B1B1u,0xB2B2B2B2u,0xB3B3B3B3u,0xB4B4B4B4u,0xB5B5B5B5u,0xB6B6B6B6u,0xB7B7B7B7u,
253
+ 0xB8B8B8B8u,0xB9B9B9B9u,0xBABABABAu,0xBBBBBBBBu,0xBCBCBCBCu,0xBDBDBDBDu,0xBEBEBEBEu,0xBFBFBFBFu,
254
+ 0xC0C0C0C0u,0xC1C1C1C1u,0xC2C2C2C2u,0xC3C3C3C3u,0xC4C4C4C4u,0xC5C5C5C5u,0xC6C6C6C6u,0xC7C7C7C7u,
255
+ 0xC8C8C8C8u,0xC9C9C9C9u,0xCACACACAu,0xCBCBCBCBu,0xCCCCCCCCu,0xCDCDCDCDu,0xCECECECEu,0xCFCFCFCFu,
256
+ 0xD0D0D0D0u,0xD1D1D1D1u,0xD2D2D2D2u,0xD3D3D3D3u,0xD4D4D4D4u,0xD5D5D5D5u,0xD6D6D6D6u,0xD7D7D7D7u,
257
+ 0xD8D8D8D8u,0xD9D9D9D9u,0xDADADADAu,0xDBDBDBDBu,0xDCDCDCDCu,0xDDDDDDDDu,0xDEDEDEDEu,0xDFDFDFDFu,
258
+ 0xE0E0E0E0u,0xE1E1E1E1u,0xE2E2E2E2u,0xE3E3E3E3u,0xE4E4E4E4u,0xE5E5E5E5u,0xE6E6E6E6u,0xE7E7E7E7u,
259
+ 0xE8E8E8E8u,0xE9E9E9E9u,0xEAEAEAEAu,0xEBEBEBEBu,0xECECECECu,0xEDEDEDEDu,0xEEEEEEEEu,0xEFEFEFEFu,
260
+ 0xF0F0F0F0u,0xF1F1F1F1u,0xF2F2F2F2u,0xF3F3F3F3u,0xF4F4F4F4u,0xF5F5F5F5u,0xF6F6F6F6u,0xF7F7F7F7u,
261
+ 0xF8F8F8F8u,0xF9F9F9F9u,0xFAFAFAFAu,0xFBFBFBFBu,0xFCFCFCFCu,0xFDFDFDFDu,0xFEFEFEFEu,0xFFFFFFFFu,
262
+ };
263
+
264
+ #if defined(__MSVC_RUNTIME_CHECKS)
265
+ return (int)word[hash&0xffu];
266
+ #else
267
+ return (int)word[(unsigned char)hash];
268
+ #endif
269
+ }
270
+
271
+ inline static unsigned char reduced_hash(std::size_t hash)
272
+ {
273
+ #if defined(__MSVC_RUNTIME_CHECKS)
274
+ return match_word(hash)&0xffu;
275
+ #else
276
+ return (unsigned char)match_word(hash);
277
+ #endif
278
+ }
279
+
280
+ inline unsigned char& at(std::size_t pos)
281
+ {
282
+ return reinterpret_cast<unsigned char*>(&m)[pos];
283
+ }
284
+
285
+ inline unsigned char at(std::size_t pos)const
286
+ {
287
+ return reinterpret_cast<const unsigned char*>(&m)[pos];
288
+ }
289
+
290
+ inline unsigned char& overflow()
291
+ {
292
+ return at(N);
293
+ }
294
+
295
+ inline unsigned char overflow()const
296
+ {
297
+ return at(N);
298
+ }
299
+
300
+ alignas(16) __m128i m;
301
+ };
302
+
303
+ #elif defined(BOOST_UNORDERED_LITTLE_ENDIAN_NEON)
304
+
305
+ struct group15
306
+ {
307
+ static constexpr int N=15;
308
+
309
+ struct dummy_group_type
310
+ {
311
+ alignas(16) unsigned char storage[N+1]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0};
312
+ };
313
+
314
+ inline void initialize(){m=vdupq_n_s8(0);}
315
+
316
+ inline void set(std::size_t pos,std::size_t hash)
317
+ {
318
+ BOOST_ASSERT(pos<N);
319
+ at(pos)=reduced_hash(hash);
320
+ }
321
+
322
+ inline void set_sentinel()
323
+ {
324
+ at(N-1)=sentinel_;
325
+ }
326
+
327
+ inline bool is_sentinel(std::size_t pos)const
328
+ {
329
+ BOOST_ASSERT(pos<N);
330
+ return pos==N-1&&at(N-1)==sentinel_;
331
+ }
332
+
333
+ inline void reset(std::size_t pos)
334
+ {
335
+ BOOST_ASSERT(pos<N);
336
+ at(pos)=available_;
337
+ }
338
+
339
+ static inline void reset(unsigned char* pc)
340
+ {
341
+ *pc=available_;
342
+ }
343
+
344
+ inline int match(std::size_t hash)const
345
+ {
346
+ return simde_mm_movemask_epi8(
347
+ vceqq_s8(m,vdupq_n_s8(reduced_hash(hash))))&0x7FFF;
348
+ }
349
+
350
+ inline bool is_not_overflowed(std::size_t hash)const
351
+ {
352
+ static constexpr unsigned char shift[]={1,2,4,8,16,32,64,128};
353
+
354
+ return !(overflow()&shift[hash%8]);
355
+ }
356
+
357
+ inline void mark_overflow(std::size_t hash)
358
+ {
359
+ overflow()|=static_cast<unsigned char>(1<<(hash%8));
360
+ }
361
+
362
+ static inline bool maybe_caused_overflow(unsigned char* pc)
363
+ {
364
+ std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15);
365
+ group15 *pg=reinterpret_cast<group15*>(pc-pos);
366
+ return !pg->is_not_overflowed(*pc);
367
+ };
368
+
369
+ inline int match_available()const
370
+ {
371
+ return simde_mm_movemask_epi8(vceqq_s8(m,vdupq_n_s8(0)))&0x7FFF;
372
+ }
373
+
374
+ inline int match_occupied()const
375
+ {
376
+ return simde_mm_movemask_epi8(
377
+ vcgtq_u8(vreinterpretq_u8_s8(m),vdupq_n_u8(0)))&0x7FFF;
378
+ }
379
+
380
+ inline int match_really_occupied()const /* excluding sentinel */
381
+ {
382
+ return at(N-1)==sentinel_?match_occupied()&0x3FFF:match_occupied();
383
+ }
384
+
385
+ private:
386
+ static constexpr unsigned char available_=0,
387
+ sentinel_=1;
388
+
389
+ inline static unsigned char reduced_hash(std::size_t hash)
390
+ {
391
+ static constexpr unsigned char table[]={
392
+ 8,9,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
393
+ 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
394
+ 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
395
+ 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
396
+ 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
397
+ 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
398
+ 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
399
+ 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
400
+ 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
401
+ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,
402
+ 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
403
+ 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
404
+ 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
405
+ 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
406
+ 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,
407
+ 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,
408
+ };
409
+
410
+ return table[(unsigned char)hash];
411
+ }
412
+
413
+ /* Copied from
414
+ * https://github.com/simd-everywhere/simde/blob/master/simde/x86/sse2.h#L3763
415
+ */
416
+
417
+ static inline int simde_mm_movemask_epi8(uint8x16_t a)
418
+ {
419
+ static constexpr uint8_t md[16]={
420
+ 1 << 0, 1 << 1, 1 << 2, 1 << 3,
421
+ 1 << 4, 1 << 5, 1 << 6, 1 << 7,
422
+ 1 << 0, 1 << 1, 1 << 2, 1 << 3,
423
+ 1 << 4, 1 << 5, 1 << 6, 1 << 7,
424
+ };
425
+
426
+ uint8x16_t masked=vandq_u8(vld1q_u8(md),a);
427
+ uint8x8x2_t tmp=vzip_u8(vget_low_u8(masked),vget_high_u8(masked));
428
+ uint16x8_t x=vreinterpretq_u16_u8(vcombine_u8(tmp.val[0],tmp.val[1]));
429
+
430
+ #if defined(__ARM_ARCH_ISA_A64)
431
+ return vaddvq_u16(x);
432
+ #else
433
+ uint64x2_t t64=vpaddlq_u32(vpaddlq_u16(x));
434
+ return int(vgetq_lane_u64(t64,0))+int(vgetq_lane_u64(t64,1));
435
+ #endif
436
+ }
437
+
438
+ inline unsigned char& at(std::size_t pos)
439
+ {
440
+ return reinterpret_cast<unsigned char*>(&m)[pos];
441
+ }
442
+
443
+ inline unsigned char at(std::size_t pos)const
444
+ {
445
+ return reinterpret_cast<const unsigned char*>(&m)[pos];
446
+ }
447
+
448
+ inline unsigned char& overflow()
449
+ {
450
+ return at(N);
451
+ }
452
+
453
+ inline unsigned char overflow()const
454
+ {
455
+ return at(N);
456
+ }
457
+
458
+ alignas(16) int8x16_t m;
459
+ };
460
+
461
+ #else /* non-SIMD */
462
+
463
+ struct group15
464
+ {
465
+ static constexpr int N=15;
466
+
467
+ struct dummy_group_type
468
+ {
469
+ alignas(16) boost::uint64_t m[2]=
470
+ {0x0000000000004000ull,0x0000000000000000ull};
471
+ };
472
+
473
+ inline void initialize(){m[0]=0;m[1]=0;}
474
+
475
+ inline void set(std::size_t pos,std::size_t hash)
476
+ {
477
+ BOOST_ASSERT(pos<N);
478
+ set_impl(pos,reduced_hash(hash));
479
+ }
480
+
481
+ inline void set_sentinel()
482
+ {
483
+ set_impl(N-1,sentinel_);
484
+ }
485
+
486
+ inline bool is_sentinel(std::size_t pos)const
487
+ {
488
+ BOOST_ASSERT(pos<N);
489
+ return
490
+ pos==N-1&&
491
+ (m[0] & boost::uint64_t(0x4000400040004000ull))==boost::uint64_t(0x4000ull)&&
492
+ (m[1] & boost::uint64_t(0x4000400040004000ull))==0;
493
+ }
494
+
495
+ inline void reset(std::size_t pos)
496
+ {
497
+ BOOST_ASSERT(pos<N);
498
+ set_impl(pos,available_);
499
+ }
500
+
501
+ static inline void reset(unsigned char* pc)
502
+ {
503
+ std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15);
504
+ pc-=pos;
505
+ reinterpret_cast<group15*>(pc)->reset(pos);
506
+ }
507
+
508
+ inline int match(std::size_t hash)const
509
+ {
510
+ return match_impl(reduced_hash(hash));
511
+ }
512
+
513
+ inline bool is_not_overflowed(std::size_t hash)const
514
+ {
515
+ return !(reinterpret_cast<const boost::uint16_t*>(m)[hash%8] & 0x8000u);
516
+ }
517
+
518
+ inline void mark_overflow(std::size_t hash)
519
+ {
520
+ reinterpret_cast<boost::uint16_t*>(m)[hash%8]|=0x8000u;
521
+ }
522
+
523
+ static inline bool maybe_caused_overflow(unsigned char* pc)
524
+ {
525
+ std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15);
526
+ group15 *pg=reinterpret_cast<group15*>(pc-pos);
527
+ boost::uint64_t x=((pg->m[0])>>pos)&0x000100010001ull;
528
+ #if defined(__MSVC_RUNTIME_CHECKS)
529
+ boost::uint32_t y=(x|(x>>15)|(x>>30))&0xffffffffu;
530
+ #else
531
+ boost::uint32_t y=static_cast<boost::uint32_t>(x|(x>>15)|(x>>30));
532
+ #endif
533
+ return !pg->is_not_overflowed(y);
534
+ };
535
+
536
+ inline int match_available()const
537
+ {
538
+ boost::uint64_t x=~(m[0]|m[1]);
539
+ boost::uint32_t y=static_cast<boost::uint32_t>(x&(x>>32));
540
+ y&=y>>16;
541
+ return y&0x7FFF;
542
+ }
543
+
544
+ inline int match_occupied()const
545
+ {
546
+ boost::uint64_t x=m[0]|m[1];
547
+ #if defined(__MSVC_RUNTIME_CHECKS)
548
+ boost::uint32_t y=(x|(x>>32))&0xffffffffu;
549
+ #else
550
+ boost::uint32_t y=static_cast<boost::uint32_t>(x|(x>>32));
551
+ #endif
552
+ y|=y>>16;
553
+ return y&0x7FFF;
554
+ }
555
+
556
+ inline int match_really_occupied()const /* excluding sentinel */
557
+ {
558
+ return ~(match_impl(0)|match_impl(1))&0x7FFF;
559
+ }
560
+
561
+ private:
562
+ static constexpr unsigned char available_=0,
563
+ sentinel_=1;
564
+
565
+ inline static unsigned char reduced_hash(std::size_t hash)
566
+ {
567
+ static constexpr unsigned char table[]={
568
+ 8,9,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
569
+ 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
570
+ 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
571
+ 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
572
+ 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
573
+ 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
574
+ 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
575
+ 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
576
+ 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
577
+ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,
578
+ 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
579
+ 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
580
+ 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
581
+ 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
582
+ 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,
583
+ 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,
584
+ };
585
+
586
+ #if defined(__MSVC_RUNTIME_CHECKS)
587
+ return table[hash&0xffu];
588
+ #else
589
+ return table[(unsigned char)hash];
590
+ #endif
591
+ }
592
+
593
+ inline void set_impl(std::size_t pos,std::size_t n)
594
+ {
595
+ BOOST_ASSERT(n<256);
596
+ set_impl(m[0],pos,n&0xFu);
597
+ set_impl(m[1],pos,n>>4);
598
+ }
599
+
600
+ static inline void set_impl(boost::uint64_t& x,std::size_t pos,std::size_t n)
601
+ {
602
+ static constexpr boost::uint64_t mask[]=
603
+ {
604
+ 0x0000000000000000ull,0x0000000000000001ull,0x0000000000010000ull,
605
+ 0x0000000000010001ull,0x0000000100000000ull,0x0000000100000001ull,
606
+ 0x0000000100010000ull,0x0000000100010001ull,0x0001000000000000ull,
607
+ 0x0001000000000001ull,0x0001000000010000ull,0x0001000000010001ull,
608
+ 0x0001000100000000ull,0x0001000100000001ull,0x0001000100010000ull,
609
+ 0x0001000100010001ull,
610
+ };
611
+ static constexpr boost::uint64_t imask[]=
612
+ {
613
+ 0x0001000100010001ull,0x0001000100010000ull,0x0001000100000001ull,
614
+ 0x0001000100000000ull,0x0001000000010001ull,0x0001000000010000ull,
615
+ 0x0001000000000001ull,0x0001000000000000ull,0x0000000100010001ull,
616
+ 0x0000000100010000ull,0x0000000100000001ull,0x0000000100000000ull,
617
+ 0x0000000000010001ull,0x0000000000010000ull,0x0000000000000001ull,
618
+ 0x0000000000000000ull,
619
+ };
620
+
621
+ BOOST_ASSERT(pos<16&&n<16);
622
+ x|= mask[n]<<pos;
623
+ x&=~(imask[n]<<pos);
624
+ }
625
+
626
+ inline int match_impl(std::size_t n)const
627
+ {
628
+ static constexpr boost::uint64_t mask[]=
629
+ {
630
+ 0x0000000000000000ull,0x000000000000ffffull,0x00000000ffff0000ull,
631
+ 0x00000000ffffffffull,0x0000ffff00000000ull,0x0000ffff0000ffffull,
632
+ 0x0000ffffffff0000ull,0x0000ffffffffffffull,0xffff000000000000ull,
633
+ 0xffff00000000ffffull,0xffff0000ffff0000ull,0xffff0000ffffffffull,
634
+ 0xffffffff00000000ull,0xffffffff0000ffffull,0xffffffffffff0000ull,
635
+ 0xffffffffffffffffull,
636
+ };
637
+
638
+ BOOST_ASSERT(n<256);
639
+ boost::uint64_t x=m[0]^mask[n&0xFu];
640
+ x=~((m[1]^mask[n>>4])|x);
641
+ boost::uint32_t y=static_cast<boost::uint32_t>(x&(x>>32));
642
+ y&=y>>16;
643
+ return y&0x7FFF;
644
+ }
645
+
646
+ alignas(16) boost::uint64_t m[2];
647
+ };
648
+
649
+ #endif
650
+
651
+ /* foa::table uses a size policy to obtain the permissible sizes of the group
652
+ * array (and, by implication, the element array) and to do the hash->group
653
+ * mapping.
654
+ *
655
+ * - size_index(n) returns an unspecified "index" number used in other policy
656
+ * operations.
657
+ * - size(size_index_) returns the number of groups for the given index. It is
658
+ * guaranteed that size(size_index(n)) >= n.
659
+ * - min_size() is the minimum number of groups permissible, i.e.
660
+ * size(size_index(0)).
661
+ * - position(hash,size_index_) maps hash to a position in the range
662
+ * [0,size(size_index_)).
663
+ *
664
+ * The reason we're introducing the intermediate index value for calculating
665
+ * sizes and positions is that it allows us to optimize the implementation of
666
+ * position, which is in the hot path of lookup and insertion operations:
667
+ * pow2_size_policy, the actual size policy used by foa::table, returns 2^n
668
+ * (n>0) as permissible sizes and returns the n most significant bits
669
+ * of the hash value as the position in the group array; using a size index
670
+ * defined as i = (bits in std::size_t) - n, we have an unbeatable
671
+ * implementation of position(hash) as hash>>i.
672
+ * There's a twofold reason for choosing the high bits of hash for positioning:
673
+ * - Multiplication-based mixing tends to yield better entropy in the high
674
+ * part of its result.
675
+ * - group15 reduced-hash values take the *low* bits of hash, and we want
676
+ * these values and positioning to be as uncorrelated as possible.
677
+ */
678
+
679
+ struct pow2_size_policy
680
+ {
681
+ static inline std::size_t size_index(std::size_t n)
682
+ {
683
+ // TODO: min size is 2, see if we can bring it down to 1 without loss
684
+ // of performance
685
+
686
+ return sizeof(std::size_t)*CHAR_BIT-
687
+ (n<=2?1:((std::size_t)(boost::core::bit_width(n-1))));
688
+ }
689
+
690
+ static inline std::size_t size(std::size_t size_index_)
691
+ {
692
+ return std::size_t(1)<<(sizeof(std::size_t)*CHAR_BIT-size_index_);
693
+ }
694
+
695
+ static constexpr std::size_t min_size(){return 2;}
696
+
697
+ static inline std::size_t position(std::size_t hash,std::size_t size_index_)
698
+ {
699
+ return hash>>size_index_;
700
+ }
701
+ };
702
+
703
+ /* size index of a group array for a given *element* capacity */
704
+
705
+ template<typename Group,typename SizePolicy>
706
+ static inline std::size_t size_index_for(std::size_t n)
707
+ {
708
+ /* n/N+1 == ceil((n+1)/N) (extra +1 for the sentinel) */
709
+ return SizePolicy::size_index(n/Group::N+1);
710
+ }
711
+
712
+ /* Quadratic prober over a power-of-two range using triangular numbers.
713
+ * mask in next(mask) must be the range size minus one (and since size is 2^n,
714
+ * mask has exactly its n first bits set to 1).
715
+ */
716
+
717
+ struct pow2_quadratic_prober
718
+ {
719
+ pow2_quadratic_prober(std::size_t pos_):pos{pos_}{}
720
+
721
+ inline std::size_t get()const{return pos;}
722
+
723
+ /* next returns false when the whole array has been traversed, which ends
724
+ * probing (in practice, full-table probing will only happen with very small
725
+ * arrays).
726
+ */
727
+
728
+ inline bool next(std::size_t mask)
729
+ {
730
+ step+=1;
731
+ pos=(pos+step)&mask;
732
+ return step<=mask;
733
+ }
734
+
735
+ private:
736
+ std::size_t pos,step=0;
737
+ };
738
+
739
+ /* Mixing policies: no_mix is the identity function and xmx_mix uses the
740
+ * xmx function defined in <boost/unordered/detail/xmx.hpp>.
741
+ * foa::table mixes hash results with xmx_mix unless the hash is marked as
742
+ * avalanching, i.e. of good quality (see <boost/unordered/hash_traits.hpp>).
743
+ */
744
+
745
+ struct no_mix
746
+ {
747
+ template<typename Hash,typename T>
748
+ static inline std::size_t mix(const Hash& h,const T& x)
749
+ {
750
+ return h(x);
751
+ }
752
+ };
753
+
754
+ struct xmx_mix
755
+ {
756
+ template<typename Hash,typename T>
757
+ static inline std::size_t mix(const Hash& h,const T& x)
758
+ {
759
+ return xmx(h(x));
760
+ }
761
+ };
762
+
763
+ /* boost::core::countr_zero has a potentially costly check for
764
+ * the case x==0.
765
+ */
766
+
767
+ inline unsigned int unchecked_countr_zero(int x)
768
+ {
769
+ #if defined(BOOST_MSVC)
770
+ unsigned long r;
771
+ _BitScanForward(&r,(unsigned long)x);
772
+ return (unsigned int)r;
773
+ #else
774
+ BOOST_UNORDERED_ASSUME(x!=0);
775
+ return (unsigned int)boost::core::countr_zero((unsigned int)x);
776
+ #endif
777
+ }
778
+
779
+ template<typename,typename,typename,typename>
780
+ class table;
781
+
782
+ /* table_iterator keeps two pointers:
783
+ *
784
+ * - A pointer p to the element slot.
785
+ * - A pointer pc to the n-th byte of the associated group metadata, where n
786
+ * is the position of the element in the group.
787
+ *
788
+ * A simpler solution would have been to keep a pointer p to the element, a
789
+ * pointer pg to the group, and the position n, but that would increase
790
+ * sizeof(table_iterator) by 4/8 bytes. In order to make this compact
791
+ * representation feasible, it is required that group objects are aligned
792
+ * to their size, so that we can recover pg and n as
793
+ *
794
+ * - n = pc%sizeof(group)
795
+ * - pg = pc-n
796
+ *
797
+ * (for explanatory purposes pg and pc are treated above as if they were memory
798
+ * addresses rather than pointers).The main drawback of this two-pointer
799
+ * representation is that iterator increment is relatively slow.
800
+ *
801
+ * p = nullptr is conventionally used to mark end() iterators.
802
+ */
803
+
804
+ /* internal conversion from const_iterator to iterator */
805
+ class const_iterator_cast_tag {};
806
+
807
+ template<typename Value,typename Group,bool Const>
808
+ class table_iterator
809
+ {
810
+ public:
811
+ using difference_type=std::ptrdiff_t;
812
+ using value_type=Value;
813
+ using pointer=
814
+ typename std::conditional<Const,value_type const*,value_type*>::type;
815
+ using reference=
816
+ typename std::conditional<Const,value_type const&,value_type&>::type;
817
+ using iterator_category=std::forward_iterator_tag;
818
+ using element_type=
819
+ typename std::conditional<Const,value_type const,value_type>::type;
820
+
821
+ table_iterator()=default;
822
+ template<bool Const2,typename std::enable_if<!Const2>::type* =nullptr>
823
+ table_iterator(const table_iterator<Value,Group,Const2>& x):
824
+ pc{x.pc},p{x.p}{}
825
+ table_iterator(
826
+ const_iterator_cast_tag, const table_iterator<Value,Group,true>& x):
827
+ pc{x.pc},p{x.p}{}
828
+
829
+ inline reference operator*()const noexcept{return *p;}
830
+ inline pointer operator->()const noexcept{return p;}
831
+ inline table_iterator& operator++()noexcept{increment();return *this;}
832
+ inline table_iterator operator++(int)noexcept
833
+ {auto x=*this;increment();return x;}
834
+ friend inline bool operator==(
835
+ const table_iterator& x,const table_iterator& y)
836
+ {return x.p==y.p;}
837
+ friend inline bool operator!=(
838
+ const table_iterator& x,const table_iterator& y)
839
+ {return !(x==y);}
840
+
841
+ private:
842
+ template<typename,typename,bool> friend class table_iterator;
843
+ template<typename,typename,typename,typename> friend class table;
844
+
845
+ table_iterator(Group* pg,std::size_t n,const Value* p_):
846
+ pc{reinterpret_cast<unsigned char*>(const_cast<Group*>(pg))+n},
847
+ p{const_cast<Value*>(p_)}
848
+ {}
849
+
850
+ inline std::size_t rebase() noexcept
851
+ {
852
+ std::size_t off=reinterpret_cast<uintptr_t>(pc)%sizeof(Group);
853
+ pc-=off;
854
+ return off;
855
+ }
856
+
857
+ inline void increment()noexcept
858
+ {
859
+ std::size_t n0=rebase();
860
+
861
+ int mask=(reinterpret_cast<Group*>(pc)->match_occupied()>>(n0+1))<<(n0+1);
862
+ if(!mask){
863
+ do{
864
+ pc+=sizeof(Group);
865
+ p+=Group::N;
866
+ }
867
+ while((mask=reinterpret_cast<Group*>(pc)->match_occupied())==0);
868
+ }
869
+
870
+ auto n=unchecked_countr_zero(mask);
871
+ if(BOOST_UNLIKELY(reinterpret_cast<Group*>(pc)->is_sentinel(n))){
872
+ p=nullptr;
873
+ }
874
+ else{
875
+ pc+=n;
876
+ p-=n0;
877
+ p+=n;
878
+ }
879
+ }
880
+
881
+ unsigned char *pc=nullptr;
882
+ Value *p=nullptr;
883
+ };
884
+
885
+ /* table_arrays controls allocation, initialization and deallocation of
886
+ * paired arrays of groups and element slots. Only one chunk of memory is
887
+ * allocated to place both arrays: this is not done for efficiency reasons,
888
+ * but in order to be able to properly align the group array without storing
889
+ * additional offset information --the alignment required (16B) is usually
890
+ * greater than alignof(std::max_align_t) and thus not guaranteed by
891
+ * allocators.
892
+ */
893
+
894
+ template<typename Group,std::size_t Size>
895
+ Group* dummy_groups()
896
+ {
897
+ /* Dummy storage initialized as if in an empty container (actually, each
898
+ * of its groups is initialized like a separate empty container).
899
+ * We make table_arrays::groups point to this when capacity()==0, so that
900
+ * we are not allocating any dynamic memory and yet lookup can be implemented
901
+ * without checking for groups==nullptr. This space won't ever be used for
902
+ * insertion as the container's capacity is precisely zero.
903
+ */
904
+
905
+ static constexpr typename Group::dummy_group_type
906
+ storage[Size]={typename Group::dummy_group_type(),};
907
+
908
+ return reinterpret_cast<Group*>(
909
+ const_cast<typename Group::dummy_group_type*>(storage));
910
+ }
911
+
912
+ template<typename Value,typename Group,typename SizePolicy>
913
+ struct table_arrays
914
+ {
915
+ using value_type=Value;
916
+ using group_type=Group;
917
+ static constexpr auto N=group_type::N;
918
+ using size_policy=SizePolicy;
919
+
920
+ template<typename Allocator>
921
+ static table_arrays new_(Allocator& al,std::size_t n)
922
+ {
923
+ using alloc_traits=boost::allocator_traits<Allocator>;
924
+
925
+ auto groups_size_index=size_index_for<group_type,size_policy>(n);
926
+ auto groups_size=size_policy::size(groups_size_index);
927
+ table_arrays arrays{groups_size_index,groups_size-1,nullptr,nullptr};
928
+
929
+ if(!n){
930
+ arrays.groups=dummy_groups<group_type,size_policy::min_size()>();
931
+ }
932
+ else{
933
+ arrays.elements=
934
+ boost::to_address(alloc_traits::allocate(al,buffer_size(groups_size)));
935
+
936
+ /* Align arrays.groups to sizeof(group_type). table_iterator critically
937
+ * depends on such alignment for its increment operation.
938
+ */
939
+
940
+ auto p=reinterpret_cast<unsigned char*>(arrays.elements+groups_size*N-1);
941
+ p+=(uintptr_t(sizeof(group_type))-
942
+ reinterpret_cast<uintptr_t>(p))%sizeof(group_type);
943
+ arrays.groups=reinterpret_cast<group_type*>(p);
944
+
945
+ /* memset is faster/not slower than initializing groups individually.
946
+ * This assumes all zeros is group_type's default layout.
947
+ */
948
+
949
+ std::memset(arrays.groups,0,sizeof(group_type)*groups_size);
950
+ arrays.groups[groups_size-1].set_sentinel();
951
+ }
952
+ return arrays;
953
+ }
954
+
955
+ template<typename Allocator>
956
+ static void delete_(Allocator& al,table_arrays& arrays)noexcept
957
+ {
958
+ using alloc_traits=boost::allocator_traits<Allocator>;
959
+ using pointer=typename alloc_traits::pointer;
960
+ using pointer_traits=boost::pointer_traits<pointer>;
961
+
962
+ if(arrays.elements){
963
+ alloc_traits::deallocate(
964
+ al,pointer_traits::pointer_to(*arrays.elements),
965
+ buffer_size(arrays.groups_size_mask+1));
966
+ }
967
+ }
968
+
969
+ /* combined space for elements and groups measured in sizeof(value_type)s */
970
+
971
+ static std::size_t buffer_size(std::size_t groups_size)
972
+ {
973
+ auto buffer_bytes=
974
+ /* space for elements (we subtract 1 because of the sentinel) */
975
+ sizeof(value_type)*(groups_size*N-1)+
976
+ /* space for groups + padding for group alignment */
977
+ sizeof(group_type)*(groups_size+1)-1;
978
+
979
+ /* ceil(buffer_bytes/sizeof(value_type)) */
980
+ return (buffer_bytes+sizeof(value_type)-1)/sizeof(value_type);
981
+ }
982
+
983
+ std::size_t groups_size_index;
984
+ std::size_t groups_size_mask;
985
+ group_type *groups;
986
+ value_type *elements;
987
+ };
988
+
989
+ struct if_constexpr_void_else{void operator()()const{}};
990
+
991
+ template<bool B,typename F,typename G=if_constexpr_void_else>
992
+ void if_constexpr(F f,G g={})
993
+ {
994
+ std::get<B?0:1>(std::forward_as_tuple(f,g))();
995
+ }
996
+
997
+ template<bool B,typename T,typename std::enable_if<B>::type* =nullptr>
998
+ void copy_assign_if(T& x,const T& y){x=y;}
999
+
1000
+ template<bool B,typename T,typename std::enable_if<!B>::type* =nullptr>
1001
+ void copy_assign_if(T&,const T&){}
1002
+
1003
+ template<bool B,typename T,typename std::enable_if<B>::type* =nullptr>
1004
+ void move_assign_if(T& x,T& y){x=std::move(y);}
1005
+
1006
+ template<bool B,typename T,typename std::enable_if<!B>::type* =nullptr>
1007
+ void move_assign_if(T&,T&){}
1008
+
1009
+ template<bool B,typename T,typename std::enable_if<B>::type* =nullptr>
1010
+ void swap_if(T& x,T& y){using std::swap; swap(x,y);}
1011
+
1012
+ template<bool B,typename T,typename std::enable_if<!B>::type* =nullptr>
1013
+ void swap_if(T&,T&){}
1014
+
1015
+ inline void prefetch(const void* p)
1016
+ {
1017
+ (void) p;
1018
+ #if defined(BOOST_GCC)||defined(BOOST_CLANG)
1019
+ __builtin_prefetch((const char*)p);
1020
+ #elif defined(BOOST_UNORDERED_SSE2)
1021
+ _mm_prefetch((const char*)p,_MM_HINT_T0);
1022
+ #endif
1023
+ }
1024
+
1025
+ #if defined(BOOST_GCC)
1026
+ /* GCC's -Wshadow triggers at scenarios like this:
1027
+ *
1028
+ * struct foo{};
1029
+ * template<typename Base>
1030
+ * struct derived:Base
1031
+ * {
1032
+ * void f(){int foo;}
1033
+ * };
1034
+ *
1035
+ * derived<foo>x;
1036
+ * x.f(); // declaration of "foo" in derived::f shadows base type "foo"
1037
+ *
1038
+ * This makes shadowing warnings unavoidable in general when a class template
1039
+ * derives from user-provided classes, as is the case with table and
1040
+ * empty_value's below.
1041
+ */
1042
+
1043
+ #pragma GCC diagnostic push
1044
+ #pragma GCC diagnostic ignored "-Wshadow"
1045
+ #endif
1046
+
1047
+ #if defined(BOOST_MSVC)
1048
+ #pragma warning(push)
1049
+ #pragma warning(disable:4714) /* marked as __forceinline not inlined */
1050
+ #endif
1051
+
1052
+ #if BOOST_WORKAROUND(BOOST_MSVC,<=1900)
1053
+ /* VS2015 marks as unreachable generic catch clauses around non-throwing
1054
+ * code.
1055
+ */
1056
+ #pragma warning(push)
1057
+ #pragma warning(disable:4702)
1058
+ #endif
1059
+
1060
+ /* foa::table interface departs in a number of ways from that of C++ unordered
1061
+ * associative containers because it's not for end-user consumption
1062
+ * (boost::unordered_flat_[map|set] wrappers complete it as appropriate) and,
1063
+ * more importantly, because of fundamental restrictions imposed by open
1064
+ * addressing:
1065
+ *
1066
+ * - value_type must be moveable.
1067
+ * - Pointer stability is not kept under rehashing.
1068
+ * - begin() is not O(1).
1069
+ * - No bucket API.
1070
+ * - Load factor is fixed and can't be set by the user.
1071
+ * - No extract API.
1072
+ *
1073
+ * The TypePolicy template parameter is used to generate instantiations
1074
+ * suitable for either maps or sets, and introduces non-standard init_type:
1075
+ *
1076
+ * - TypePolicy::key_type and TypePolicy::value_type have the obvious
1077
+ * meaning.
1078
+ * - TypePolicy::init_type is the type implicitly converted to when
1079
+ * writing x.insert({...}). For maps, this is std::pair<Key,T> rather
1080
+ * than std::pair<const Key,T> so that, for instance, x.insert({"hello",0})
1081
+ * produces a cheaply moveable std::string&& ("hello") rather than
1082
+ * a copyable const std::string&&. foa::table::insert is extended to accept
1083
+ * both init_type and value_type references.
1084
+ * - TypePolicy::move(value_type&) returns a temporary object for value
1085
+ * transfer on rehashing, move copy/assignment, and merge. For maps, this
1086
+ * object is a std::pair<Key&&,T&&>, which is generally cheaper to move
1087
+ * than std::pair<const Key,T>&& because of the constness in Key.
1088
+ * - TypePolicy::extract returns a const reference to the key part of
1089
+ * a value of type value_type, init_type or
1090
+ * decltype(TypePolicy::move(...)).
1091
+ *
1092
+ * try_emplace, erase and find support heterogenous lookup by default, that is,
1093
+ * without checking for any ::is_transparent typedefs --the checking is done by
1094
+ * boost::unordered_flat_[map|set].
1095
+ *
1096
+ * At the moment, we're not supporting allocators with fancy pointers.
1097
+ * Allocator::pointer must be convertible to/from regular pointers.
1098
+ */
1099
+
1100
+ /* We pull this out so the tests don't have to rely on a magic constant or
1101
+ * instantiate the table class template as it can be quite gory.
1102
+ */
1103
+ constexpr static float const mlf = 0.875f;
1104
+
1105
+ template<typename TypePolicy,typename Hash,typename Pred,typename Allocator>
1106
+ class
1107
+
1108
+ #if defined(_MSC_VER)&&_MSC_FULL_VER>=190023918
1109
+ __declspec(empty_bases) /* activate EBO with multiple inheritance */
1110
+ #endif
1111
+
1112
+ table:empty_value<Hash,0>,empty_value<Pred,1>,empty_value<Allocator,2>
1113
+ {
1114
+ using hash_base=empty_value<Hash,0>;
1115
+ using pred_base=empty_value<Pred,1>;
1116
+ using allocator_base=empty_value<Allocator,2>;
1117
+ using type_policy=TypePolicy;
1118
+ using group_type=group15;
1119
+ static constexpr auto N=group_type::N;
1120
+ using size_policy=pow2_size_policy;
1121
+ using prober=pow2_quadratic_prober;
1122
+ using mix_policy=typename std::conditional<
1123
+ hash_is_avalanching<Hash>::value,
1124
+ no_mix,
1125
+ xmx_mix
1126
+ >::type;
1127
+ using alloc_traits=boost::allocator_traits<Allocator>;
1128
+
1129
+ public:
1130
+ using key_type=typename type_policy::key_type;
1131
+ using init_type=typename type_policy::init_type;
1132
+ using value_type=typename type_policy::value_type;
1133
+
1134
+ private:
1135
+ static constexpr bool has_mutable_iterator=
1136
+ !std::is_same<key_type,value_type>::value;
1137
+
1138
+ public:
1139
+ using hasher=Hash;
1140
+ using key_equal=Pred;
1141
+ using allocator_type=Allocator;
1142
+ using pointer=value_type*;
1143
+ using const_pointer=const value_type*;
1144
+ using reference=value_type&;
1145
+ using const_reference=const value_type&;
1146
+ using size_type=std::size_t;
1147
+ using difference_type=std::ptrdiff_t;
1148
+ using const_iterator=table_iterator<value_type,group_type,true>;
1149
+ using iterator=typename std::conditional<
1150
+ has_mutable_iterator,
1151
+ table_iterator<value_type,group_type,false>,
1152
+ const_iterator>::type;
1153
+
1154
+ table(
1155
+ std::size_t n=0,const Hash& h_=Hash(),const Pred& pred_=Pred(),
1156
+ const Allocator& al_=Allocator()):
1157
+ hash_base{empty_init,h_},pred_base{empty_init,pred_},
1158
+ allocator_base{empty_init,al_},size_{0},arrays(new_arrays(n)),
1159
+ ml{initial_max_load()}
1160
+ {}
1161
+
1162
+ table(const table& x):
1163
+ table{x,alloc_traits::select_on_container_copy_construction(x.al())}{}
1164
+
1165
+ table(table&& x)
1166
+ noexcept(
1167
+ std::is_nothrow_move_constructible<Hash>::value&&
1168
+ std::is_nothrow_move_constructible<Pred>::value&&
1169
+ std::is_nothrow_move_constructible<Allocator>::value):
1170
+ hash_base{empty_init,std::move(x.h())},
1171
+ pred_base{empty_init,std::move(x.pred())},
1172
+ allocator_base{empty_init,std::move(x.al())},
1173
+ size_{x.size_},arrays(x.arrays),ml{x.ml}
1174
+ {
1175
+ x.size_=0;
1176
+ x.arrays=x.new_arrays(0);
1177
+ x.ml=x.initial_max_load();
1178
+ }
1179
+
1180
+ table(const table& x,const Allocator& al_):
1181
+ table{std::size_t(std::ceil(float(x.size())/mlf)),x.h(),x.pred(),al_}
1182
+ {
1183
+ x.for_all_elements([this](value_type* p){
1184
+ unchecked_insert(*p);
1185
+ });
1186
+ }
1187
+
1188
+ table(table&& x,const Allocator& al_):
1189
+ table{0,std::move(x.h()),std::move(x.pred()),al_}
1190
+ {
1191
+ if(al()==x.al()){
1192
+ std::swap(size_,x.size_);
1193
+ std::swap(arrays,x.arrays);
1194
+ std::swap(ml,x.ml);
1195
+ }
1196
+ else{
1197
+ reserve(x.size());
1198
+ clear_on_exit c{x};
1199
+ (void)c; /* unused var warning */
1200
+
1201
+ /* This works because subsequent x.clear() does not depend on the
1202
+ * elements' values.
1203
+ */
1204
+ x.for_all_elements([this](value_type* p){
1205
+ unchecked_insert(type_policy::move(*p));
1206
+ });
1207
+ }
1208
+ }
1209
+
1210
+ ~table()noexcept
1211
+ {
1212
+ for_all_elements([this](value_type* p){
1213
+ destroy_element(p);
1214
+ });
1215
+ delete_arrays(arrays);
1216
+ }
1217
+
1218
+ table& operator=(const table& x)
1219
+ {
1220
+ static constexpr auto pocca=
1221
+ alloc_traits::propagate_on_container_copy_assignment::value;
1222
+
1223
+ if(this!=std::addressof(x)){
1224
+ clear();
1225
+ h()=x.h();
1226
+ pred()=x.pred();
1227
+ if_constexpr<pocca>([&,this]{
1228
+ if(al()!=x.al())reserve(0);
1229
+ copy_assign_if<pocca>(al(),x.al());
1230
+ });
1231
+ /* noshrink: favor memory reuse over tightness */
1232
+ noshrink_reserve(x.size());
1233
+ x.for_all_elements([this](value_type* p){
1234
+ unchecked_insert(*p);
1235
+ });
1236
+ }
1237
+ return *this;
1238
+ }
1239
+
1240
+ #if defined(BOOST_MSVC)
1241
+ #pragma warning(push)
1242
+ #pragma warning(disable:4127) /* conditional expression is constant */
1243
+ #endif
1244
+
1245
+ table& operator=(table&& x)
1246
+ noexcept(
1247
+ alloc_traits::is_always_equal::value&&
1248
+ std::is_nothrow_move_assignable<Hash>::value&&
1249
+ std::is_nothrow_move_assignable<Pred>::value)
1250
+ {
1251
+ static constexpr auto pocma=
1252
+ alloc_traits::propagate_on_container_move_assignment::value;
1253
+
1254
+ if(this!=std::addressof(x)){
1255
+ clear();
1256
+ h()=std::move(x.h());
1257
+ pred()=std::move(x.pred());
1258
+ if(pocma||al()==x.al()){
1259
+ using std::swap;
1260
+ reserve(0);
1261
+ move_assign_if<pocma>(al(),x.al());
1262
+ swap(size_,x.size_);
1263
+ swap(arrays,x.arrays);
1264
+ swap(ml,x.ml);
1265
+ }
1266
+ else{
1267
+ /* noshrink: favor memory reuse over tightness */
1268
+ noshrink_reserve(x.size());
1269
+ clear_on_exit c{x};
1270
+ (void)c; /* unused var warning */
1271
+
1272
+ /* This works because subsequent x.clear() does not depend on the
1273
+ * elements' values.
1274
+ */
1275
+ x.for_all_elements([this](value_type* p){
1276
+ unchecked_insert(type_policy::move(*p));
1277
+ });
1278
+ }
1279
+ }
1280
+ return *this;
1281
+ }
1282
+
1283
+ #if defined(BOOST_MSVC)
1284
+ #pragma warning(pop) /* C4127 */
1285
+ #endif
1286
+
1287
+ allocator_type get_allocator()const noexcept{return al();}
1288
+
1289
+ iterator begin()noexcept
1290
+ {
1291
+ iterator it{arrays.groups,0,arrays.elements};
1292
+ if(!(arrays.groups[0].match_occupied()&0x1))++it;
1293
+ return it;
1294
+ }
1295
+
1296
+ const_iterator begin()const noexcept
1297
+ {return const_cast<table*>(this)->begin();}
1298
+ iterator end()noexcept{return {};}
1299
+ const_iterator end()const noexcept{return const_cast<table*>(this)->end();}
1300
+ const_iterator cbegin()const noexcept{return begin();}
1301
+ const_iterator cend()const noexcept{return end();}
1302
+
1303
+ bool empty()const noexcept{return size()==0;}
1304
+ std::size_t size()const noexcept{return size_;}
1305
+ std::size_t max_size()const noexcept{return SIZE_MAX;}
1306
+
1307
+ template<typename... Args>
1308
+ BOOST_FORCEINLINE std::pair<iterator,bool> emplace(Args&&... args)
1309
+ {
1310
+ using emplace_type = typename std::conditional<
1311
+ std::is_constructible<
1312
+ init_type, Args...
1313
+ >::value,
1314
+ init_type,
1315
+ value_type
1316
+ >::type;
1317
+ return emplace_impl(emplace_type(std::forward<Args>(args)...));
1318
+ }
1319
+
1320
+ template<typename Key,typename... Args>
1321
+ BOOST_FORCEINLINE std::pair<iterator,bool> try_emplace(
1322
+ Key&& k,Args&&... args)
1323
+ {
1324
+ return emplace_impl(
1325
+ std::piecewise_construct,
1326
+ std::forward_as_tuple(std::forward<Key>(k)),
1327
+ std::forward_as_tuple(std::forward<Args>(args)...));
1328
+ }
1329
+
1330
+ BOOST_FORCEINLINE std::pair<iterator,bool>
1331
+ insert(const init_type& x){return emplace_impl(x);}
1332
+
1333
+ BOOST_FORCEINLINE std::pair<iterator,bool>
1334
+ insert(init_type&& x){return emplace_impl(std::move(x));}
1335
+
1336
+ /* template<typename=void> tilts call ambiguities in favor of init_type */
1337
+
1338
+ template<typename=void>
1339
+ BOOST_FORCEINLINE std::pair<iterator,bool>
1340
+ insert(const value_type& x){return emplace_impl(x);}
1341
+
1342
+ template<typename=void>
1343
+ BOOST_FORCEINLINE std::pair<iterator,bool>
1344
+ insert(value_type&& x){return emplace_impl(std::move(x));}
1345
+
1346
+ template<
1347
+ bool dependent_value=false,
1348
+ typename std::enable_if<
1349
+ has_mutable_iterator||dependent_value>::type* =nullptr
1350
+ >
1351
+ void erase(iterator pos)noexcept{return erase(const_iterator(pos));}
1352
+
1353
+ BOOST_FORCEINLINE
1354
+ void erase(const_iterator pos)noexcept
1355
+ {
1356
+ destroy_element(pos.p);
1357
+ recover_slot(pos.pc);
1358
+ }
1359
+
1360
+ template<typename Key>
1361
+ BOOST_FORCEINLINE
1362
+ auto erase(Key&& x) -> typename std::enable_if<
1363
+ !std::is_convertible<Key,iterator>::value&&
1364
+ !std::is_convertible<Key,const_iterator>::value, std::size_t>::type
1365
+ {
1366
+ auto it=find(x);
1367
+ if(it!=end()){
1368
+ erase(it);
1369
+ return 1;
1370
+ }
1371
+ else return 0;
1372
+ }
1373
+
1374
+ void swap(table& x)
1375
+ noexcept(
1376
+ alloc_traits::is_always_equal::value&&
1377
+ boost::is_nothrow_swappable<Hash>::value&&
1378
+ boost::is_nothrow_swappable<Pred>::value)
1379
+ {
1380
+ static constexpr auto pocs=
1381
+ alloc_traits::propagate_on_container_swap::value;
1382
+
1383
+ using std::swap;
1384
+ swap(h(),x.h());
1385
+ swap(pred(),x.pred());
1386
+ if_constexpr<pocs>([&,this]{
1387
+ swap_if<pocs>(al(),x.al());
1388
+ },
1389
+ [&,this]{ /* else */
1390
+ BOOST_ASSERT(al()==x.al());
1391
+ (void)this; /* makes sure captured this is used */
1392
+ });
1393
+ swap(size_,x.size_);
1394
+ swap(arrays,x.arrays);
1395
+ swap(ml,x.ml);
1396
+ }
1397
+
1398
+ void clear()noexcept
1399
+ {
1400
+ auto p=arrays.elements;
1401
+ if(p){
1402
+ for(auto pg=arrays.groups,last=pg+arrays.groups_size_mask+1;
1403
+ pg!=last;++pg,p+=N){
1404
+ auto mask=pg->match_really_occupied();
1405
+ while(mask){
1406
+ destroy_element(p+unchecked_countr_zero(mask));
1407
+ mask&=mask-1;
1408
+ }
1409
+ /* we wipe the entire metadata to reset the overflow byte as well */
1410
+ pg->initialize();
1411
+ }
1412
+ arrays.groups[arrays.groups_size_mask].set_sentinel();
1413
+ size_=0;
1414
+ ml=initial_max_load();
1415
+ }
1416
+ }
1417
+
1418
+ // TODO: should we accept different allocator too?
1419
+ template<typename Hash2,typename Pred2>
1420
+ void merge(table<TypePolicy,Hash2,Pred2,Allocator>& x)
1421
+ {
1422
+ x.for_all_elements([&,this](group_type* pg,unsigned int n,value_type* p){
1423
+ if(emplace_impl(type_policy::move(*p)).second){
1424
+ x.erase(iterator{pg,n,p});
1425
+ }
1426
+ });
1427
+ }
1428
+
1429
+ template<typename Hash2,typename Pred2>
1430
+ void merge(table<TypePolicy,Hash2,Pred2,Allocator>&& x){merge(x);}
1431
+
1432
+ hasher hash_function()const{return h();}
1433
+ key_equal key_eq()const{return pred();}
1434
+
1435
+ template<typename Key>
1436
+ BOOST_FORCEINLINE iterator find(const Key& x)
1437
+ {
1438
+ auto hash=hash_for(x);
1439
+ return find_impl(x,position_for(hash),hash);
1440
+ }
1441
+
1442
+ template<typename Key>
1443
+ BOOST_FORCEINLINE const_iterator find(const Key& x)const
1444
+ {
1445
+ return const_cast<table*>(this)->find(x);
1446
+ }
1447
+
1448
+ std::size_t capacity()const noexcept
1449
+ {
1450
+ return arrays.elements?(arrays.groups_size_mask+1)*N-1:0;
1451
+ }
1452
+
1453
+ float load_factor()const noexcept
1454
+ {
1455
+ if (capacity() == 0) { return 0; }
1456
+ return float(size())/float(capacity());
1457
+ }
1458
+
1459
+ float max_load_factor()const noexcept{return mlf;}
1460
+
1461
+ std::size_t max_load()const noexcept{return ml;}
1462
+
1463
+ void rehash(std::size_t n)
1464
+ {
1465
+ auto m=size_t(std::ceil(float(size())/mlf));
1466
+ if(m>n)n=m;
1467
+ if(n)n=capacity_for(n); /* exact resulting capacity */
1468
+
1469
+ if(n!=capacity())unchecked_rehash(n);
1470
+ }
1471
+
1472
+ void reserve(std::size_t n)
1473
+ {
1474
+ rehash(std::size_t(std::ceil(float(n)/mlf)));
1475
+ }
1476
+
1477
+ template<typename Predicate>
1478
+ friend std::size_t erase_if(table& x,Predicate pr)
1479
+ {
1480
+ return x.erase_if_impl(pr);
1481
+ }
1482
+
1483
+ private:
1484
+ template<typename,typename,typename,typename> friend class table;
1485
+ using arrays_type=table_arrays<value_type,group_type,size_policy>;
1486
+
1487
+ struct clear_on_exit
1488
+ {
1489
+ ~clear_on_exit(){x.clear();}
1490
+ table& x;
1491
+ };
1492
+
1493
+ Hash& h(){return hash_base::get();}
1494
+ const Hash& h()const{return hash_base::get();}
1495
+ Pred& pred(){return pred_base::get();}
1496
+ const Pred& pred()const{return pred_base::get();}
1497
+ Allocator& al(){return allocator_base::get();}
1498
+ const Allocator& al()const{return allocator_base::get();}
1499
+
1500
+ arrays_type new_arrays(std::size_t n)
1501
+ {
1502
+ return arrays_type::new_(al(),n);
1503
+ }
1504
+
1505
+ void delete_arrays(arrays_type& arrays_)noexcept
1506
+ {
1507
+ arrays_type::delete_(al(),arrays_);
1508
+ }
1509
+
1510
+ template<typename... Args>
1511
+ void construct_element(value_type* p,Args&&... args)
1512
+ {
1513
+ alloc_traits::construct(al(),p,std::forward<Args>(args)...);
1514
+ }
1515
+
1516
+ void destroy_element(value_type* p)noexcept
1517
+ {
1518
+ alloc_traits::destroy(al(),p);
1519
+ }
1520
+
1521
+ struct destroy_element_on_exit
1522
+ {
1523
+ ~destroy_element_on_exit(){this_->destroy_element(p);}
1524
+ table *this_;
1525
+ value_type *p;
1526
+ };
1527
+
1528
+ void recover_slot(unsigned char* pc)
1529
+ {
1530
+ /* If this slot potentially caused overflow, we decrease the maximum load so
1531
+ * that average probe length won't increase unboundedly in repeated
1532
+ * insert/erase cycles (drift).
1533
+ */
1534
+ ml-=group_type::maybe_caused_overflow(pc);
1535
+ group_type::reset(pc);
1536
+ --size_;
1537
+ }
1538
+
1539
+ void recover_slot(group_type* pg,std::size_t pos)
1540
+ {
1541
+ recover_slot(reinterpret_cast<unsigned char*>(pg)+pos);
1542
+ }
1543
+
1544
+ std::size_t initial_max_load()const
1545
+ {
1546
+ static constexpr std::size_t small_capacity=2*N-1;
1547
+
1548
+ auto capacity_=capacity();
1549
+ if(capacity_<=small_capacity){
1550
+ return capacity_; /* we allow 100% usage */
1551
+ }
1552
+ else{
1553
+ return (std::size_t)(mlf*(float)(capacity_));
1554
+ }
1555
+ }
1556
+
1557
+ template<typename T>
1558
+ static inline auto key_from(const T& x)
1559
+ ->decltype(type_policy::extract(x))
1560
+ {
1561
+ return type_policy::extract(x);
1562
+ }
1563
+
1564
+ template<typename Arg1,typename Arg2>
1565
+ static inline auto key_from(
1566
+ std::piecewise_construct_t,const Arg1& k,const Arg2&)
1567
+ ->decltype(std::get<0>(k))
1568
+ {
1569
+ return std::get<0>(k);
1570
+ }
1571
+
1572
+ template<typename Key>
1573
+ inline std::size_t hash_for(const Key& x)const
1574
+ {
1575
+ return mix_policy::mix(h(),x);
1576
+ }
1577
+
1578
+ inline std::size_t position_for(std::size_t hash)const
1579
+ {
1580
+ return position_for(hash,arrays);
1581
+ }
1582
+
1583
+ static inline std::size_t position_for(
1584
+ std::size_t hash,const arrays_type& arrays_)
1585
+ {
1586
+ return size_policy::position(hash,arrays_.groups_size_index);
1587
+ }
1588
+
1589
+ static inline void prefetch_elements(const value_type* p)
1590
+ {
1591
+ /* We have experimentally confirmed that ARM architectures get a higher
1592
+ * speedup when around the first half of the element slots in a group are
1593
+ * prefetched, whereas for Intel just the first cache line is best.
1594
+ * Please report back if you find better tunings for some particular
1595
+ * architectures.
1596
+ */
1597
+
1598
+ #if BOOST_ARCH_ARM
1599
+ /* Cache line size can't be known at compile time, so we settle on
1600
+ * the very frequent value of 64B.
1601
+ */
1602
+ constexpr int cache_line=64;
1603
+ const char *p0=reinterpret_cast<const char*>(p),
1604
+ *p1=p0+sizeof(value_type)*N/2;
1605
+ for(;p0<p1;p0+=cache_line)prefetch(p0);
1606
+ #else
1607
+ prefetch(p);
1608
+ #endif
1609
+ }
1610
+
1611
+ #if defined(BOOST_MSVC)
1612
+ /* warning: forcing value to bool 'true' or 'false' in bool(pred()...) */
1613
+ #pragma warning(push)
1614
+ #pragma warning(disable:4800)
1615
+ #endif
1616
+
1617
+ template<typename Key>
1618
+ BOOST_FORCEINLINE iterator find_impl(
1619
+ const Key& x,std::size_t pos0,std::size_t hash)const
1620
+ {
1621
+ prober pb(pos0);
1622
+ do{
1623
+ auto pos=pb.get();
1624
+ auto pg=arrays.groups+pos;
1625
+ auto mask=pg->match(hash);
1626
+ if(mask){
1627
+ auto p=arrays.elements+pos*N;
1628
+ prefetch_elements(p);
1629
+ do{
1630
+ auto n=unchecked_countr_zero(mask);
1631
+ if(BOOST_LIKELY(bool(pred()(x,key_from(p[n]))))){
1632
+ return {pg,n,p+n};
1633
+ }
1634
+ mask&=mask-1;
1635
+ }while(mask);
1636
+ }
1637
+ if(BOOST_LIKELY(pg->is_not_overflowed(hash))){
1638
+ return {}; /* end() */
1639
+ }
1640
+ }
1641
+ while(BOOST_LIKELY(pb.next(arrays.groups_size_mask)));
1642
+ return {}; /* end() */
1643
+ }
1644
+
1645
+ #if defined(BOOST_MSVC)
1646
+ #pragma warning(pop) /* C4800 */
1647
+ #endif
1648
+
1649
+ template<typename... Args>
1650
+ BOOST_FORCEINLINE std::pair<iterator,bool> emplace_impl(Args&&... args)
1651
+ {
1652
+ const auto &k=key_from(std::forward<Args>(args)...);
1653
+ auto hash=hash_for(k);
1654
+ auto pos0=position_for(hash);
1655
+ auto it=find_impl(k,pos0,hash);
1656
+
1657
+ if(it!=end()){
1658
+ return {it,false};
1659
+ }
1660
+ if(BOOST_LIKELY(size_<ml)){
1661
+ return {
1662
+ unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...),
1663
+ true
1664
+ };
1665
+ }
1666
+ else{
1667
+ return {
1668
+ unchecked_emplace_with_rehash(hash,std::forward<Args>(args)...),
1669
+ true
1670
+ };
1671
+ }
1672
+ }
1673
+
1674
+ static std::size_t capacity_for(std::size_t n)
1675
+ {
1676
+ return size_policy::size(size_index_for<group_type,size_policy>(n))*N-1;
1677
+ }
1678
+
1679
+ template<typename... Args>
1680
+ BOOST_NOINLINE iterator
1681
+ unchecked_emplace_with_rehash(std::size_t hash,Args&&... args)
1682
+ {
1683
+ /* Due to the anti-drift mechanism (see recover_slot), new_arrays_ may be
1684
+ * of the same size as the old arrays; in the limit, erasing one element at
1685
+ * full load and then inserting could bring us back to the same capacity
1686
+ * after a costly rehash. To avoid this, we jump to the next capacity level
1687
+ * when the number of erased elements is <= 10% of total elements at full
1688
+ * load, which is implemented by requesting additional F*size elements,
1689
+ * with F = P * 10% / (1 - P * 10%), where P is the probability of an
1690
+ * element having caused overflow; P has been measured as ~0.162 under
1691
+ * ideal conditions, yielding F ~ 0.0165 ~ 1/61.
1692
+ */
1693
+ auto new_arrays_=new_arrays(std::size_t(
1694
+ std::ceil(static_cast<float>(size_+size_/61+1)/mlf)));
1695
+ iterator it;
1696
+ BOOST_TRY{
1697
+ /* strong exception guarantee -> try insertion before rehash */
1698
+ it=nosize_unchecked_emplace_at(
1699
+ new_arrays_,position_for(hash,new_arrays_),
1700
+ hash,std::forward<Args>(args)...);
1701
+ }
1702
+ BOOST_CATCH(...){
1703
+ delete_arrays(new_arrays_);
1704
+ BOOST_RETHROW
1705
+ }
1706
+ BOOST_CATCH_END
1707
+
1708
+ /* new_arrays_ lifetime taken care of by unchecked_rehash */
1709
+ unchecked_rehash(new_arrays_);
1710
+ ++size_;
1711
+ return it;
1712
+ }
1713
+
1714
+ BOOST_NOINLINE void unchecked_rehash(std::size_t n)
1715
+ {
1716
+ auto new_arrays_=new_arrays(n);
1717
+ unchecked_rehash(new_arrays_);
1718
+ }
1719
+
1720
+ BOOST_NOINLINE void unchecked_rehash(arrays_type& new_arrays_)
1721
+ {
1722
+ std::size_t num_destroyed=0;
1723
+ BOOST_TRY{
1724
+ for_all_elements([&,this](value_type* p){
1725
+ nosize_transfer_element(p,new_arrays_,num_destroyed);
1726
+ });
1727
+ }
1728
+ BOOST_CATCH(...){
1729
+ if(num_destroyed){
1730
+ for(auto pg=arrays.groups;;++pg){
1731
+ auto mask=pg->match_occupied();
1732
+ while(mask){
1733
+ auto nz=unchecked_countr_zero(mask);
1734
+ recover_slot(pg,nz);
1735
+ if(!(--num_destroyed))goto continue_;
1736
+ mask&=mask-1;
1737
+ }
1738
+ }
1739
+ }
1740
+ continue_:
1741
+ for_all_elements(new_arrays_,[this](value_type* p){
1742
+ destroy_element(p);
1743
+ });
1744
+ delete_arrays(new_arrays_);
1745
+ BOOST_RETHROW
1746
+ }
1747
+ BOOST_CATCH_END
1748
+
1749
+ /* either all moved and destroyed or all copied */
1750
+ BOOST_ASSERT(num_destroyed==size()||num_destroyed==0);
1751
+ if(num_destroyed!=size()){
1752
+ for_all_elements([this](value_type* p){
1753
+ destroy_element(p);
1754
+ });
1755
+ }
1756
+ delete_arrays(arrays);
1757
+ arrays=new_arrays_;
1758
+ ml=initial_max_load();
1759
+ }
1760
+
1761
+ void noshrink_reserve(std::size_t n)
1762
+ {
1763
+ /* used only on assignment after element clearance */
1764
+ BOOST_ASSERT(empty());
1765
+
1766
+ if(n){
1767
+ n=std::size_t(std::ceil(float(n)/mlf)); /* elements -> slots */
1768
+ n=capacity_for(n); /* exact resulting capacity */
1769
+
1770
+ if(n>capacity()){
1771
+ auto new_arrays_=new_arrays(n);
1772
+ delete_arrays(arrays);
1773
+ arrays=new_arrays_;
1774
+ ml=initial_max_load();
1775
+ }
1776
+ }
1777
+ }
1778
+
1779
+ template<typename Value>
1780
+ void unchecked_insert(Value&& x)
1781
+ {
1782
+ auto hash=hash_for(key_from(x));
1783
+ unchecked_emplace_at(position_for(hash),hash,std::forward<Value>(x));
1784
+ }
1785
+
1786
+ void nosize_transfer_element(
1787
+ value_type* p,const arrays_type& arrays_,std::size_t& num_destroyed)
1788
+ {
1789
+ nosize_transfer_element(
1790
+ p,hash_for(key_from(*p)),arrays_,num_destroyed,
1791
+ std::integral_constant< /* std::move_if_noexcept semantics */
1792
+ bool,
1793
+ std::is_nothrow_move_constructible<init_type>::value||
1794
+ !std::is_copy_constructible<init_type>::value>{});
1795
+ }
1796
+
1797
+ void nosize_transfer_element(
1798
+ value_type* p,std::size_t hash,const arrays_type& arrays_,
1799
+ std::size_t& num_destroyed,std::true_type /* ->move */)
1800
+ {
1801
+ /* Destroy p even if an an exception is thrown in the middle of move
1802
+ * construction, which could leave the source half-moved.
1803
+ */
1804
+ ++num_destroyed;
1805
+ destroy_element_on_exit d{this,p};
1806
+ (void)d; /* unused var warning */
1807
+ nosize_unchecked_emplace_at(
1808
+ arrays_,position_for(hash,arrays_),hash,type_policy::move(*p));
1809
+ }
1810
+
1811
+ void nosize_transfer_element(
1812
+ value_type* p,std::size_t hash,const arrays_type& arrays_,
1813
+ std::size_t& /*num_destroyed*/,std::false_type /* ->copy */)
1814
+ {
1815
+ nosize_unchecked_emplace_at(
1816
+ arrays_,position_for(hash,arrays_),hash,
1817
+ const_cast<const value_type&>(*p));
1818
+ }
1819
+
1820
+ template<typename... Args>
1821
+ iterator unchecked_emplace_at(
1822
+ std::size_t pos0,std::size_t hash,Args&&... args)
1823
+ {
1824
+ auto res=nosize_unchecked_emplace_at(
1825
+ arrays,pos0,hash,std::forward<Args>(args)...);
1826
+ ++size_;
1827
+ return res;
1828
+ }
1829
+
1830
+ template<typename... Args>
1831
+ iterator nosize_unchecked_emplace_at(
1832
+ const arrays_type& arrays_,std::size_t pos0,std::size_t hash,
1833
+ Args&&... args)
1834
+ {
1835
+ for(prober pb(pos0);;pb.next(arrays_.groups_size_mask)){
1836
+ auto pos=pb.get();
1837
+ auto pg=arrays_.groups+pos;
1838
+ auto mask=pg->match_available();
1839
+ if(BOOST_LIKELY(mask!=0)){
1840
+ auto n=unchecked_countr_zero(mask);
1841
+ auto p=arrays_.elements+pos*N+n;
1842
+ construct_element(p,std::forward<Args>(args)...);
1843
+ pg->set(n,hash);
1844
+ return {pg,n,p};
1845
+ }
1846
+ else pg->mark_overflow(hash);
1847
+ }
1848
+ }
1849
+
1850
+ template<typename Predicate>
1851
+ std::size_t erase_if_impl(Predicate pr)
1852
+ {
1853
+ std::size_t s=size();
1854
+ for_all_elements([&,this](group_type* pg,unsigned int n,value_type* p){
1855
+ if(pr(*p)) erase(iterator{pg,n,p});
1856
+ });
1857
+ return std::size_t(s-size());
1858
+ }
1859
+
1860
+ template<typename F>
1861
+ void for_all_elements(F f)const
1862
+ {
1863
+ for_all_elements(arrays,f);
1864
+ }
1865
+
1866
+ template<typename F>
1867
+ static auto for_all_elements(const arrays_type& arrays_,F f)
1868
+ ->decltype(f(nullptr),void())
1869
+ {
1870
+ for_all_elements(
1871
+ arrays_,[&](group_type*,unsigned int,value_type* p){return f(p);});
1872
+ }
1873
+
1874
+ template<typename F>
1875
+ static auto for_all_elements(const arrays_type& arrays_,F f)
1876
+ ->decltype(f(nullptr,0,nullptr),void())
1877
+ {
1878
+ auto p=arrays_.elements;
1879
+ if(!p){return;}
1880
+ for(auto pg=arrays_.groups,last=pg+arrays_.groups_size_mask+1;
1881
+ pg!=last;++pg,p+=N){
1882
+ auto mask=pg->match_really_occupied();
1883
+ while(mask){
1884
+ auto n=unchecked_countr_zero(mask);
1885
+ f(pg,n,p+n);
1886
+ mask&=mask-1;
1887
+ }
1888
+ }
1889
+ }
1890
+
1891
+ std::size_t size_;
1892
+ arrays_type arrays;
1893
+ std::size_t ml;
1894
+ };
1895
+
1896
+ #if BOOST_WORKAROUND(BOOST_MSVC,<=1900)
1897
+ #pragma warning(pop) /* C4702 */
1898
+ #endif
1899
+
1900
+ #if defined(BOOST_MSVC)
1901
+ #pragma warning(pop) /* C4714 */
1902
+ #endif
1903
+
1904
+ #if defined(BOOST_GCC)
1905
+ #pragma GCC diagnostic pop /* ignored "-Wshadow" */
1906
+ #endif
1907
+
1908
+ } /* namespace foa */
1909
+ } /* namespace detail */
1910
+ } /* namespace unordered */
1911
+ } /* namespace boost */
1912
+
1913
+ #undef BOOST_UNORDERED_ASSUME
1914
+ #undef BOOST_UNORDERED_HAS_BUILTIN
1915
+ #ifdef BOOST_UNORDERED_LITTLE_ENDIAN_NEON
1916
+ #undef BOOST_UNORDERED_LITTLE_ENDIAN_NEON
1917
+ #endif
1918
+ #ifdef BOOST_UNORDERED_SSE2
1919
+ #undef BOOST_UNORDERED_SSE2
1920
+ #endif
1921
+ #endif