rocksdb-native 2.6.2 → 2.6.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (956) hide show
  1. package/CMakeLists.txt +29 -17
  2. package/binding.js +1 -1
  3. package/package.json +7 -4
  4. package/prebuilds/darwin-arm64/rocksdb-native.bare +0 -0
  5. package/prebuilds/darwin-arm64/rocksdb-native.node +0 -0
  6. package/prebuilds/darwin-x64/rocksdb-native.bare +0 -0
  7. package/prebuilds/darwin-x64/rocksdb-native.node +0 -0
  8. package/prebuilds/ios-arm64/rocksdb-native.bare +0 -0
  9. package/prebuilds/ios-arm64-simulator/rocksdb-native.bare +0 -0
  10. package/prebuilds/ios-x64-simulator/rocksdb-native.bare +0 -0
  11. package/prebuilds/linux-arm64/rocksdb-native.bare +0 -0
  12. package/prebuilds/linux-arm64/rocksdb-native.node +0 -0
  13. package/prebuilds/linux-x64/rocksdb-native.bare +0 -0
  14. package/prebuilds/linux-x64/rocksdb-native.node +0 -0
  15. package/prebuilds/win32-arm64/rocksdb-native.bare +0 -0
  16. package/prebuilds/win32-arm64/rocksdb-native.node +0 -0
  17. package/prebuilds/win32-x64/rocksdb-native.bare +0 -0
  18. package/prebuilds/win32-x64/rocksdb-native.node +0 -0
  19. package/vendor/librocksdb/CMakeLists.txt +0 -94
  20. package/vendor/librocksdb/LICENSE +0 -201
  21. package/vendor/librocksdb/NOTICE +0 -13
  22. package/vendor/librocksdb/README.md +0 -11
  23. package/vendor/librocksdb/include/rocksdb.h +0 -294
  24. package/vendor/librocksdb/src/rocksdb.cc +0 -714
  25. package/vendor/librocksdb/vendor/rocksdb/CMakeLists.txt +0 -1642
  26. package/vendor/librocksdb/vendor/rocksdb/cache/cache.cc +0 -193
  27. package/vendor/librocksdb/vendor/rocksdb/cache/cache_bench.cc +0 -20
  28. package/vendor/librocksdb/vendor/rocksdb/cache/cache_bench_tool.cc +0 -1177
  29. package/vendor/librocksdb/vendor/rocksdb/cache/cache_entry_roles.cc +0 -104
  30. package/vendor/librocksdb/vendor/rocksdb/cache/cache_entry_roles.h +0 -20
  31. package/vendor/librocksdb/vendor/rocksdb/cache/cache_entry_stats.h +0 -182
  32. package/vendor/librocksdb/vendor/rocksdb/cache/cache_helpers.cc +0 -41
  33. package/vendor/librocksdb/vendor/rocksdb/cache/cache_helpers.h +0 -139
  34. package/vendor/librocksdb/vendor/rocksdb/cache/cache_key.cc +0 -364
  35. package/vendor/librocksdb/vendor/rocksdb/cache/cache_key.h +0 -143
  36. package/vendor/librocksdb/vendor/rocksdb/cache/cache_reservation_manager.cc +0 -184
  37. package/vendor/librocksdb/vendor/rocksdb/cache/cache_reservation_manager.h +0 -318
  38. package/vendor/librocksdb/vendor/rocksdb/cache/charged_cache.cc +0 -111
  39. package/vendor/librocksdb/vendor/rocksdb/cache/charged_cache.h +0 -61
  40. package/vendor/librocksdb/vendor/rocksdb/cache/clock_cache.cc +0 -3657
  41. package/vendor/librocksdb/vendor/rocksdb/cache/clock_cache.h +0 -1165
  42. package/vendor/librocksdb/vendor/rocksdb/cache/compressed_secondary_cache.cc +0 -414
  43. package/vendor/librocksdb/vendor/rocksdb/cache/compressed_secondary_cache.h +0 -151
  44. package/vendor/librocksdb/vendor/rocksdb/cache/lru_cache.cc +0 -726
  45. package/vendor/librocksdb/vendor/rocksdb/cache/lru_cache.h +0 -467
  46. package/vendor/librocksdb/vendor/rocksdb/cache/secondary_cache.cc +0 -12
  47. package/vendor/librocksdb/vendor/rocksdb/cache/secondary_cache_adapter.cc +0 -743
  48. package/vendor/librocksdb/vendor/rocksdb/cache/secondary_cache_adapter.h +0 -103
  49. package/vendor/librocksdb/vendor/rocksdb/cache/sharded_cache.cc +0 -147
  50. package/vendor/librocksdb/vendor/rocksdb/cache/sharded_cache.h +0 -322
  51. package/vendor/librocksdb/vendor/rocksdb/cache/tiered_secondary_cache.cc +0 -125
  52. package/vendor/librocksdb/vendor/rocksdb/cache/tiered_secondary_cache.h +0 -158
  53. package/vendor/librocksdb/vendor/rocksdb/cache/typed_cache.h +0 -380
  54. package/vendor/librocksdb/vendor/rocksdb/cmake/RocksDBConfig.cmake.in +0 -54
  55. package/vendor/librocksdb/vendor/rocksdb/db/arena_wrapped_db_iter.cc +0 -182
  56. package/vendor/librocksdb/vendor/rocksdb/db/arena_wrapped_db_iter.h +0 -128
  57. package/vendor/librocksdb/vendor/rocksdb/db/attribute_group_iterator_impl.cc +0 -20
  58. package/vendor/librocksdb/vendor/rocksdb/db/attribute_group_iterator_impl.h +0 -83
  59. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_constants.h +0 -16
  60. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_contents.cc +0 -42
  61. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_contents.h +0 -60
  62. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_counting_iterator.h +0 -150
  63. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_fetcher.cc +0 -34
  64. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_fetcher.h +0 -37
  65. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_addition.cc +0 -156
  66. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_addition.h +0 -67
  67. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_builder.cc +0 -429
  68. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_builder.h +0 -113
  69. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_cache.cc +0 -101
  70. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_cache.h +0 -56
  71. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_completion_callback.h +0 -84
  72. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_garbage.cc +0 -134
  73. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_garbage.h +0 -57
  74. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_meta.cc +0 -62
  75. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_meta.h +0 -170
  76. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_reader.cc +0 -622
  77. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_file_reader.h +0 -111
  78. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_garbage_meter.cc +0 -100
  79. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_garbage_meter.h +0 -102
  80. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_index.h +0 -187
  81. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_log_format.cc +0 -143
  82. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_log_format.h +0 -164
  83. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_log_sequential_reader.cc +0 -133
  84. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_log_sequential_reader.h +0 -83
  85. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_log_writer.cc +0 -207
  86. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_log_writer.h +0 -87
  87. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_read_request.h +0 -58
  88. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_source.cc +0 -459
  89. package/vendor/librocksdb/vendor/rocksdb/db/blob/blob_source.h +0 -161
  90. package/vendor/librocksdb/vendor/rocksdb/db/blob/prefetch_buffer_collection.cc +0 -23
  91. package/vendor/librocksdb/vendor/rocksdb/db/blob/prefetch_buffer_collection.h +0 -38
  92. package/vendor/librocksdb/vendor/rocksdb/db/builder.cc +0 -517
  93. package/vendor/librocksdb/vendor/rocksdb/db/builder.h +0 -76
  94. package/vendor/librocksdb/vendor/rocksdb/db/c.cc +0 -7133
  95. package/vendor/librocksdb/vendor/rocksdb/db/c_test.c +0 -4052
  96. package/vendor/librocksdb/vendor/rocksdb/db/coalescing_iterator.cc +0 -47
  97. package/vendor/librocksdb/vendor/rocksdb/db/coalescing_iterator.h +0 -79
  98. package/vendor/librocksdb/vendor/rocksdb/db/column_family.cc +0 -1859
  99. package/vendor/librocksdb/vendor/rocksdb/db/column_family.h +0 -918
  100. package/vendor/librocksdb/vendor/rocksdb/db/compaction/clipping_iterator.h +0 -281
  101. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction.cc +0 -995
  102. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction.h +0 -602
  103. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_iteration_stats.h +0 -56
  104. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_iterator.cc +0 -1523
  105. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_iterator.h +0 -557
  106. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_job.cc +0 -2155
  107. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_job.h +0 -520
  108. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_outputs.cc +0 -802
  109. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_outputs.h +0 -411
  110. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_picker.cc +0 -1245
  111. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_picker.h +0 -322
  112. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_picker_fifo.cc +0 -478
  113. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_picker_fifo.h +0 -61
  114. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_picker_level.cc +0 -977
  115. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_picker_level.h +0 -32
  116. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_picker_universal.cc +0 -1578
  117. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_picker_universal.h +0 -29
  118. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_service_job.cc +0 -835
  119. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_state.cc +0 -46
  120. package/vendor/librocksdb/vendor/rocksdb/db/compaction/compaction_state.h +0 -42
  121. package/vendor/librocksdb/vendor/rocksdb/db/compaction/file_pri.h +0 -94
  122. package/vendor/librocksdb/vendor/rocksdb/db/compaction/sst_partitioner.cc +0 -83
  123. package/vendor/librocksdb/vendor/rocksdb/db/compaction/subcompaction_state.cc +0 -106
  124. package/vendor/librocksdb/vendor/rocksdb/db/compaction/subcompaction_state.h +0 -220
  125. package/vendor/librocksdb/vendor/rocksdb/db/convenience.cc +0 -101
  126. package/vendor/librocksdb/vendor/rocksdb/db/convenience_impl.h +0 -15
  127. package/vendor/librocksdb/vendor/rocksdb/db/db_filesnapshot.cc +0 -506
  128. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/compacted_db_impl.cc +0 -275
  129. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/compacted_db_impl.h +0 -147
  130. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl.cc +0 -6767
  131. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl.h +0 -3056
  132. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl_compaction_flush.cc +0 -4390
  133. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl_debug.cc +0 -327
  134. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl_experimental.cc +0 -164
  135. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl_files.cc +0 -1032
  136. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl_follower.cc +0 -348
  137. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl_follower.h +0 -54
  138. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl_open.cc +0 -2325
  139. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl_readonly.cc +0 -376
  140. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl_readonly.h +0 -179
  141. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl_secondary.cc +0 -1025
  142. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl_secondary.h +0 -322
  143. package/vendor/librocksdb/vendor/rocksdb/db/db_impl/db_impl_write.cc +0 -2624
  144. package/vendor/librocksdb/vendor/rocksdb/db/db_info_dumper.cc +0 -165
  145. package/vendor/librocksdb/vendor/rocksdb/db/db_info_dumper.h +0 -15
  146. package/vendor/librocksdb/vendor/rocksdb/db/db_iter.cc +0 -1774
  147. package/vendor/librocksdb/vendor/rocksdb/db/db_iter.h +0 -444
  148. package/vendor/librocksdb/vendor/rocksdb/db/db_test2.cc +0 -7839
  149. package/vendor/librocksdb/vendor/rocksdb/db/db_test_util.cc +0 -1800
  150. package/vendor/librocksdb/vendor/rocksdb/db/db_test_util.h +0 -1427
  151. package/vendor/librocksdb/vendor/rocksdb/db/db_with_timestamp_test_util.cc +0 -96
  152. package/vendor/librocksdb/vendor/rocksdb/db/db_with_timestamp_test_util.h +0 -126
  153. package/vendor/librocksdb/vendor/rocksdb/db/dbformat.cc +0 -282
  154. package/vendor/librocksdb/vendor/rocksdb/db/dbformat.h +0 -1101
  155. package/vendor/librocksdb/vendor/rocksdb/db/error_handler.cc +0 -806
  156. package/vendor/librocksdb/vendor/rocksdb/db/error_handler.h +0 -149
  157. package/vendor/librocksdb/vendor/rocksdb/db/event_helpers.cc +0 -332
  158. package/vendor/librocksdb/vendor/rocksdb/db/event_helpers.h +0 -78
  159. package/vendor/librocksdb/vendor/rocksdb/db/experimental.cc +0 -1212
  160. package/vendor/librocksdb/vendor/rocksdb/db/external_sst_file_ingestion_job.cc +0 -1249
  161. package/vendor/librocksdb/vendor/rocksdb/db/external_sst_file_ingestion_job.h +0 -268
  162. package/vendor/librocksdb/vendor/rocksdb/db/file_indexer.cc +0 -216
  163. package/vendor/librocksdb/vendor/rocksdb/db/file_indexer.h +0 -140
  164. package/vendor/librocksdb/vendor/rocksdb/db/flush_job.cc +0 -1217
  165. package/vendor/librocksdb/vendor/rocksdb/db/flush_job.h +0 -240
  166. package/vendor/librocksdb/vendor/rocksdb/db/flush_scheduler.cc +0 -86
  167. package/vendor/librocksdb/vendor/rocksdb/db/flush_scheduler.h +0 -55
  168. package/vendor/librocksdb/vendor/rocksdb/db/forward_iterator.cc +0 -1082
  169. package/vendor/librocksdb/vendor/rocksdb/db/forward_iterator.h +0 -166
  170. package/vendor/librocksdb/vendor/rocksdb/db/forward_iterator_bench.cc +0 -378
  171. package/vendor/librocksdb/vendor/rocksdb/db/history_trimming_iterator.h +0 -95
  172. package/vendor/librocksdb/vendor/rocksdb/db/import_column_family_job.cc +0 -463
  173. package/vendor/librocksdb/vendor/rocksdb/db/import_column_family_job.h +0 -91
  174. package/vendor/librocksdb/vendor/rocksdb/db/internal_stats.cc +0 -2198
  175. package/vendor/librocksdb/vendor/rocksdb/db/internal_stats.h +0 -896
  176. package/vendor/librocksdb/vendor/rocksdb/db/job_context.h +0 -254
  177. package/vendor/librocksdb/vendor/rocksdb/db/kv_checksum.h +0 -484
  178. package/vendor/librocksdb/vendor/rocksdb/db/log_format.h +0 -55
  179. package/vendor/librocksdb/vendor/rocksdb/db/log_reader.cc +0 -958
  180. package/vendor/librocksdb/vendor/rocksdb/db/log_reader.h +0 -245
  181. package/vendor/librocksdb/vendor/rocksdb/db/log_writer.cc +0 -355
  182. package/vendor/librocksdb/vendor/rocksdb/db/log_writer.h +0 -151
  183. package/vendor/librocksdb/vendor/rocksdb/db/logs_with_prep_tracker.cc +0 -67
  184. package/vendor/librocksdb/vendor/rocksdb/db/logs_with_prep_tracker.h +0 -62
  185. package/vendor/librocksdb/vendor/rocksdb/db/lookup_key.h +0 -68
  186. package/vendor/librocksdb/vendor/rocksdb/db/malloc_stats.cc +0 -52
  187. package/vendor/librocksdb/vendor/rocksdb/db/malloc_stats.h +0 -22
  188. package/vendor/librocksdb/vendor/rocksdb/db/memtable.cc +0 -1684
  189. package/vendor/librocksdb/vendor/rocksdb/db/memtable.h +0 -712
  190. package/vendor/librocksdb/vendor/rocksdb/db/memtable_list.cc +0 -1028
  191. package/vendor/librocksdb/vendor/rocksdb/db/memtable_list.h +0 -514
  192. package/vendor/librocksdb/vendor/rocksdb/db/merge_context.h +0 -150
  193. package/vendor/librocksdb/vendor/rocksdb/db/merge_helper.cc +0 -699
  194. package/vendor/librocksdb/vendor/rocksdb/db/merge_helper.h +0 -318
  195. package/vendor/librocksdb/vendor/rocksdb/db/merge_operator.cc +0 -167
  196. package/vendor/librocksdb/vendor/rocksdb/db/multi_cf_iterator_impl.h +0 -296
  197. package/vendor/librocksdb/vendor/rocksdb/db/output_validator.cc +0 -29
  198. package/vendor/librocksdb/vendor/rocksdb/db/output_validator.h +0 -45
  199. package/vendor/librocksdb/vendor/rocksdb/db/periodic_task_scheduler.cc +0 -110
  200. package/vendor/librocksdb/vendor/rocksdb/db/periodic_task_scheduler.h +0 -108
  201. package/vendor/librocksdb/vendor/rocksdb/db/pinned_iterators_manager.h +0 -92
  202. package/vendor/librocksdb/vendor/rocksdb/db/post_memtable_callback.h +0 -25
  203. package/vendor/librocksdb/vendor/rocksdb/db/pre_release_callback.h +0 -37
  204. package/vendor/librocksdb/vendor/rocksdb/db/range_del_aggregator.cc +0 -553
  205. package/vendor/librocksdb/vendor/rocksdb/db/range_del_aggregator.h +0 -481
  206. package/vendor/librocksdb/vendor/rocksdb/db/range_del_aggregator_bench.cc +0 -280
  207. package/vendor/librocksdb/vendor/rocksdb/db/range_tombstone_fragmenter.cc +0 -515
  208. package/vendor/librocksdb/vendor/rocksdb/db/range_tombstone_fragmenter.h +0 -361
  209. package/vendor/librocksdb/vendor/rocksdb/db/read_callback.h +0 -54
  210. package/vendor/librocksdb/vendor/rocksdb/db/repair.cc +0 -864
  211. package/vendor/librocksdb/vendor/rocksdb/db/seqno_to_time_mapping.cc +0 -573
  212. package/vendor/librocksdb/vendor/rocksdb/db/seqno_to_time_mapping.h +0 -307
  213. package/vendor/librocksdb/vendor/rocksdb/db/snapshot_checker.h +0 -58
  214. package/vendor/librocksdb/vendor/rocksdb/db/snapshot_impl.cc +0 -25
  215. package/vendor/librocksdb/vendor/rocksdb/db/snapshot_impl.h +0 -239
  216. package/vendor/librocksdb/vendor/rocksdb/db/table_cache.cc +0 -745
  217. package/vendor/librocksdb/vendor/rocksdb/db/table_cache.h +0 -298
  218. package/vendor/librocksdb/vendor/rocksdb/db/table_cache_sync_and_async.h +0 -135
  219. package/vendor/librocksdb/vendor/rocksdb/db/table_properties_collector.cc +0 -74
  220. package/vendor/librocksdb/vendor/rocksdb/db/table_properties_collector.h +0 -185
  221. package/vendor/librocksdb/vendor/rocksdb/db/transaction_log_impl.cc +0 -296
  222. package/vendor/librocksdb/vendor/rocksdb/db/transaction_log_impl.h +0 -128
  223. package/vendor/librocksdb/vendor/rocksdb/db/trim_history_scheduler.cc +0 -54
  224. package/vendor/librocksdb/vendor/rocksdb/db/trim_history_scheduler.h +0 -46
  225. package/vendor/librocksdb/vendor/rocksdb/db/version_builder.cc +0 -1431
  226. package/vendor/librocksdb/vendor/rocksdb/db/version_builder.h +0 -93
  227. package/vendor/librocksdb/vendor/rocksdb/db/version_edit.cc +0 -1119
  228. package/vendor/librocksdb/vendor/rocksdb/db/version_edit.h +0 -769
  229. package/vendor/librocksdb/vendor/rocksdb/db/version_edit_handler.cc +0 -1264
  230. package/vendor/librocksdb/vendor/rocksdb/db/version_edit_handler.h +0 -390
  231. package/vendor/librocksdb/vendor/rocksdb/db/version_set.cc +0 -7562
  232. package/vendor/librocksdb/vendor/rocksdb/db/version_set.h +0 -1799
  233. package/vendor/librocksdb/vendor/rocksdb/db/version_set_sync_and_async.h +0 -169
  234. package/vendor/librocksdb/vendor/rocksdb/db/version_util.h +0 -77
  235. package/vendor/librocksdb/vendor/rocksdb/db/wal_edit.cc +0 -211
  236. package/vendor/librocksdb/vendor/rocksdb/db/wal_edit.h +0 -177
  237. package/vendor/librocksdb/vendor/rocksdb/db/wal_manager.cc +0 -539
  238. package/vendor/librocksdb/vendor/rocksdb/db/wal_manager.h +0 -138
  239. package/vendor/librocksdb/vendor/rocksdb/db/wide/wide_column_serialization.cc +0 -166
  240. package/vendor/librocksdb/vendor/rocksdb/db/wide/wide_column_serialization.h +0 -57
  241. package/vendor/librocksdb/vendor/rocksdb/db/wide/wide_columns.cc +0 -22
  242. package/vendor/librocksdb/vendor/rocksdb/db/wide/wide_columns_helper.cc +0 -52
  243. package/vendor/librocksdb/vendor/rocksdb/db/wide/wide_columns_helper.h +0 -40
  244. package/vendor/librocksdb/vendor/rocksdb/db/write_batch.cc +0 -3394
  245. package/vendor/librocksdb/vendor/rocksdb/db/write_batch_base.cc +0 -94
  246. package/vendor/librocksdb/vendor/rocksdb/db/write_batch_internal.h +0 -408
  247. package/vendor/librocksdb/vendor/rocksdb/db/write_callback.h +0 -27
  248. package/vendor/librocksdb/vendor/rocksdb/db/write_controller.cc +0 -121
  249. package/vendor/librocksdb/vendor/rocksdb/db/write_controller.h +0 -148
  250. package/vendor/librocksdb/vendor/rocksdb/db/write_stall_stats.cc +0 -179
  251. package/vendor/librocksdb/vendor/rocksdb/db/write_stall_stats.h +0 -47
  252. package/vendor/librocksdb/vendor/rocksdb/db/write_thread.cc +0 -931
  253. package/vendor/librocksdb/vendor/rocksdb/db/write_thread.h +0 -498
  254. package/vendor/librocksdb/vendor/rocksdb/env/composite_env.cc +0 -534
  255. package/vendor/librocksdb/vendor/rocksdb/env/composite_env_wrapper.h +0 -399
  256. package/vendor/librocksdb/vendor/rocksdb/env/emulated_clock.h +0 -114
  257. package/vendor/librocksdb/vendor/rocksdb/env/env.cc +0 -1253
  258. package/vendor/librocksdb/vendor/rocksdb/env/env_chroot.cc +0 -149
  259. package/vendor/librocksdb/vendor/rocksdb/env/env_chroot.h +0 -55
  260. package/vendor/librocksdb/vendor/rocksdb/env/env_encryption.cc +0 -1192
  261. package/vendor/librocksdb/vendor/rocksdb/env/env_encryption_ctr.h +0 -97
  262. package/vendor/librocksdb/vendor/rocksdb/env/env_posix.cc +0 -530
  263. package/vendor/librocksdb/vendor/rocksdb/env/file_system.cc +0 -278
  264. package/vendor/librocksdb/vendor/rocksdb/env/file_system_tracer.cc +0 -564
  265. package/vendor/librocksdb/vendor/rocksdb/env/file_system_tracer.h +0 -461
  266. package/vendor/librocksdb/vendor/rocksdb/env/fs_on_demand.cc +0 -331
  267. package/vendor/librocksdb/vendor/rocksdb/env/fs_on_demand.h +0 -139
  268. package/vendor/librocksdb/vendor/rocksdb/env/fs_posix.cc +0 -1285
  269. package/vendor/librocksdb/vendor/rocksdb/env/fs_readonly.h +0 -105
  270. package/vendor/librocksdb/vendor/rocksdb/env/fs_remap.cc +0 -341
  271. package/vendor/librocksdb/vendor/rocksdb/env/fs_remap.h +0 -137
  272. package/vendor/librocksdb/vendor/rocksdb/env/io_posix.cc +0 -1738
  273. package/vendor/librocksdb/vendor/rocksdb/env/io_posix.h +0 -517
  274. package/vendor/librocksdb/vendor/rocksdb/env/mock_env.cc +0 -1058
  275. package/vendor/librocksdb/vendor/rocksdb/env/mock_env.h +0 -144
  276. package/vendor/librocksdb/vendor/rocksdb/env/unique_id_gen.cc +0 -243
  277. package/vendor/librocksdb/vendor/rocksdb/env/unique_id_gen.h +0 -119
  278. package/vendor/librocksdb/vendor/rocksdb/file/delete_scheduler.cc +0 -513
  279. package/vendor/librocksdb/vendor/rocksdb/file/delete_scheduler.h +0 -200
  280. package/vendor/librocksdb/vendor/rocksdb/file/file_prefetch_buffer.cc +0 -992
  281. package/vendor/librocksdb/vendor/rocksdb/file/file_prefetch_buffer.h +0 -629
  282. package/vendor/librocksdb/vendor/rocksdb/file/file_util.cc +0 -308
  283. package/vendor/librocksdb/vendor/rocksdb/file/file_util.h +0 -123
  284. package/vendor/librocksdb/vendor/rocksdb/file/filename.cc +0 -538
  285. package/vendor/librocksdb/vendor/rocksdb/file/filename.h +0 -186
  286. package/vendor/librocksdb/vendor/rocksdb/file/line_file_reader.cc +0 -73
  287. package/vendor/librocksdb/vendor/rocksdb/file/line_file_reader.h +0 -60
  288. package/vendor/librocksdb/vendor/rocksdb/file/random_access_file_reader.cc +0 -639
  289. package/vendor/librocksdb/vendor/rocksdb/file/random_access_file_reader.h +0 -197
  290. package/vendor/librocksdb/vendor/rocksdb/file/read_write_util.cc +0 -33
  291. package/vendor/librocksdb/vendor/rocksdb/file/read_write_util.h +0 -31
  292. package/vendor/librocksdb/vendor/rocksdb/file/readahead_file_info.h +0 -33
  293. package/vendor/librocksdb/vendor/rocksdb/file/readahead_raf.cc +0 -169
  294. package/vendor/librocksdb/vendor/rocksdb/file/readahead_raf.h +0 -29
  295. package/vendor/librocksdb/vendor/rocksdb/file/sequence_file_reader.cc +0 -324
  296. package/vendor/librocksdb/vendor/rocksdb/file/sequence_file_reader.h +0 -127
  297. package/vendor/librocksdb/vendor/rocksdb/file/sst_file_manager_impl.cc +0 -525
  298. package/vendor/librocksdb/vendor/rocksdb/file/sst_file_manager_impl.h +0 -220
  299. package/vendor/librocksdb/vendor/rocksdb/file/writable_file_writer.cc +0 -1007
  300. package/vendor/librocksdb/vendor/rocksdb/file/writable_file_writer.h +0 -370
  301. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/advanced_cache.h +0 -665
  302. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/advanced_options.h +0 -1101
  303. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/attribute_groups.h +0 -114
  304. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/block_cache_trace_writer.h +0 -149
  305. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/c.h +0 -3122
  306. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/cache.h +0 -579
  307. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/cache_bench_tool.h +0 -14
  308. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/cleanable.h +0 -128
  309. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/compaction_filter.h +0 -374
  310. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/compaction_job_stats.h +0 -112
  311. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/comparator.h +0 -231
  312. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/compression_type.h +0 -186
  313. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/concurrent_task_limiter.h +0 -51
  314. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/configurable.h +0 -390
  315. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/convenience.h +0 -466
  316. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/customizable.h +0 -229
  317. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/data_structure.h +0 -186
  318. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/db.h +0 -2174
  319. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/db_bench_tool.h +0 -11
  320. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/db_dump_tool.h +0 -43
  321. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/db_stress_tool.h +0 -11
  322. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/env.h +0 -1920
  323. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/env_encryption.h +0 -363
  324. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/experimental.h +0 -492
  325. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/file_checksum.h +0 -146
  326. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/file_system.h +0 -1961
  327. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/filter_policy.h +0 -211
  328. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/flush_block_policy.h +0 -75
  329. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/functor_wrapper.h +0 -56
  330. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/io_status.h +0 -244
  331. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/iostats_context.h +0 -98
  332. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/iterator.h +0 -104
  333. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/iterator_base.h +0 -90
  334. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/ldb_tool.h +0 -42
  335. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/listener.h +0 -869
  336. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/memory_allocator.h +0 -87
  337. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/memtablerep.h +0 -421
  338. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/merge_operator.h +0 -337
  339. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/metadata.h +0 -258
  340. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/options.h +0 -2339
  341. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/perf_context.h +0 -319
  342. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/perf_level.h +0 -39
  343. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/persistent_cache.h +0 -74
  344. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/port_defs.h +0 -26
  345. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/rate_limiter.h +0 -172
  346. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/rocksdb_namespace.h +0 -16
  347. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/secondary_cache.h +0 -220
  348. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/slice.h +0 -264
  349. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/slice_transform.h +0 -135
  350. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/snapshot.h +0 -53
  351. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/sst_dump_tool.h +0 -17
  352. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/sst_file_manager.h +0 -139
  353. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/sst_file_reader.h +0 -61
  354. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/sst_file_writer.h +0 -203
  355. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/sst_partitioner.h +0 -142
  356. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/statistics.h +0 -794
  357. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/stats_history.h +0 -70
  358. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/status.h +0 -609
  359. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/system_clock.h +0 -129
  360. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/table.h +0 -938
  361. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/table_properties.h +0 -380
  362. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/table_reader_caller.h +0 -41
  363. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/thread_status.h +0 -197
  364. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/threadpool.h +0 -67
  365. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/trace_reader_writer.h +0 -52
  366. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/trace_record.h +0 -248
  367. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/trace_record_result.h +0 -187
  368. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/transaction_log.h +0 -128
  369. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/types.h +0 -113
  370. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/unique_id.h +0 -55
  371. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/universal_compaction.h +0 -127
  372. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/user_write_callback.h +0 -29
  373. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/agg_merge.h +0 -138
  374. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/backup_engine.h +0 -689
  375. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/cache_dump_load.h +0 -144
  376. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/checkpoint.h +0 -65
  377. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/convenience.h +0 -10
  378. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/customizable_util.h +0 -321
  379. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/db_ttl.h +0 -70
  380. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/debug.h +0 -46
  381. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/env_mirror.h +0 -179
  382. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/info_log_finder.h +0 -19
  383. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/ldb_cmd.h +0 -338
  384. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/ldb_cmd_execute_result.h +0 -75
  385. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/leveldb_options.h +0 -145
  386. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/lua/rocks_lua_custom_library.h +0 -43
  387. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/lua/rocks_lua_util.h +0 -55
  388. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/memory_util.h +0 -48
  389. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/object_registry.h +0 -583
  390. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/optimistic_transaction_db.h +0 -129
  391. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/option_change_migration.h +0 -24
  392. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/options_type.h +0 -1222
  393. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/options_util.h +0 -105
  394. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/replayer.h +0 -85
  395. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/sim_cache.h +0 -92
  396. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/stackable_db.h +0 -593
  397. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/table_properties_collectors.h +0 -133
  398. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/transaction.h +0 -765
  399. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/transaction_db.h +0 -510
  400. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/transaction_db_mutex.h +0 -89
  401. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/types_util.h +0 -36
  402. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/utilities/write_batch_with_index.h +0 -402
  403. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/version.h +0 -43
  404. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/wal_filter.h +0 -111
  405. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/wide_columns.h +0 -303
  406. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/write_batch.h +0 -518
  407. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/write_batch_base.h +0 -165
  408. package/vendor/librocksdb/vendor/rocksdb/include/rocksdb/write_buffer_manager.h +0 -183
  409. package/vendor/librocksdb/vendor/rocksdb/logging/auto_roll_logger.cc +0 -366
  410. package/vendor/librocksdb/vendor/rocksdb/logging/auto_roll_logger.h +0 -165
  411. package/vendor/librocksdb/vendor/rocksdb/logging/env_logger.h +0 -195
  412. package/vendor/librocksdb/vendor/rocksdb/logging/event_logger.cc +0 -68
  413. package/vendor/librocksdb/vendor/rocksdb/logging/event_logger.h +0 -202
  414. package/vendor/librocksdb/vendor/rocksdb/logging/log_buffer.cc +0 -91
  415. package/vendor/librocksdb/vendor/rocksdb/logging/log_buffer.h +0 -57
  416. package/vendor/librocksdb/vendor/rocksdb/logging/logging.h +0 -62
  417. package/vendor/librocksdb/vendor/rocksdb/memory/allocator.h +0 -58
  418. package/vendor/librocksdb/vendor/rocksdb/memory/arena.cc +0 -170
  419. package/vendor/librocksdb/vendor/rocksdb/memory/arena.h +0 -146
  420. package/vendor/librocksdb/vendor/rocksdb/memory/concurrent_arena.cc +0 -45
  421. package/vendor/librocksdb/vendor/rocksdb/memory/concurrent_arena.h +0 -215
  422. package/vendor/librocksdb/vendor/rocksdb/memory/jemalloc_nodump_allocator.cc +0 -304
  423. package/vendor/librocksdb/vendor/rocksdb/memory/jemalloc_nodump_allocator.h +0 -99
  424. package/vendor/librocksdb/vendor/rocksdb/memory/memkind_kmem_allocator.cc +0 -44
  425. package/vendor/librocksdb/vendor/rocksdb/memory/memkind_kmem_allocator.h +0 -43
  426. package/vendor/librocksdb/vendor/rocksdb/memory/memory_allocator.cc +0 -81
  427. package/vendor/librocksdb/vendor/rocksdb/memory/memory_allocator_impl.h +0 -47
  428. package/vendor/librocksdb/vendor/rocksdb/memory/memory_usage.h +0 -38
  429. package/vendor/librocksdb/vendor/rocksdb/memtable/alloc_tracker.cc +0 -63
  430. package/vendor/librocksdb/vendor/rocksdb/memtable/hash_linklist_rep.cc +0 -925
  431. package/vendor/librocksdb/vendor/rocksdb/memtable/hash_skiplist_rep.cc +0 -392
  432. package/vendor/librocksdb/vendor/rocksdb/memtable/inlineskiplist.h +0 -1051
  433. package/vendor/librocksdb/vendor/rocksdb/memtable/memtablerep_bench.cc +0 -687
  434. package/vendor/librocksdb/vendor/rocksdb/memtable/skiplist.h +0 -498
  435. package/vendor/librocksdb/vendor/rocksdb/memtable/skiplistrep.cc +0 -368
  436. package/vendor/librocksdb/vendor/rocksdb/memtable/stl_wrappers.h +0 -33
  437. package/vendor/librocksdb/vendor/rocksdb/memtable/vectorrep.cc +0 -307
  438. package/vendor/librocksdb/vendor/rocksdb/memtable/write_buffer_manager.cc +0 -185
  439. package/vendor/librocksdb/vendor/rocksdb/monitoring/file_read_sample.h +0 -23
  440. package/vendor/librocksdb/vendor/rocksdb/monitoring/histogram.cc +0 -280
  441. package/vendor/librocksdb/vendor/rocksdb/monitoring/histogram.h +0 -143
  442. package/vendor/librocksdb/vendor/rocksdb/monitoring/histogram_windowing.cc +0 -198
  443. package/vendor/librocksdb/vendor/rocksdb/monitoring/histogram_windowing.h +0 -84
  444. package/vendor/librocksdb/vendor/rocksdb/monitoring/in_memory_stats_history.cc +0 -50
  445. package/vendor/librocksdb/vendor/rocksdb/monitoring/in_memory_stats_history.h +0 -74
  446. package/vendor/librocksdb/vendor/rocksdb/monitoring/instrumented_mutex.cc +0 -90
  447. package/vendor/librocksdb/vendor/rocksdb/monitoring/instrumented_mutex.h +0 -126
  448. package/vendor/librocksdb/vendor/rocksdb/monitoring/iostats_context.cc +0 -78
  449. package/vendor/librocksdb/vendor/rocksdb/monitoring/iostats_context_imp.h +0 -62
  450. package/vendor/librocksdb/vendor/rocksdb/monitoring/perf_context.cc +0 -317
  451. package/vendor/librocksdb/vendor/rocksdb/monitoring/perf_context_imp.h +0 -103
  452. package/vendor/librocksdb/vendor/rocksdb/monitoring/perf_level.cc +0 -23
  453. package/vendor/librocksdb/vendor/rocksdb/monitoring/perf_level_imp.h +0 -14
  454. package/vendor/librocksdb/vendor/rocksdb/monitoring/perf_step_timer.h +0 -77
  455. package/vendor/librocksdb/vendor/rocksdb/monitoring/persistent_stats_history.cc +0 -173
  456. package/vendor/librocksdb/vendor/rocksdb/monitoring/persistent_stats_history.h +0 -83
  457. package/vendor/librocksdb/vendor/rocksdb/monitoring/statistics.cc +0 -561
  458. package/vendor/librocksdb/vendor/rocksdb/monitoring/statistics_impl.h +0 -143
  459. package/vendor/librocksdb/vendor/rocksdb/monitoring/thread_status_impl.cc +0 -163
  460. package/vendor/librocksdb/vendor/rocksdb/monitoring/thread_status_updater.cc +0 -328
  461. package/vendor/librocksdb/vendor/rocksdb/monitoring/thread_status_updater.h +0 -226
  462. package/vendor/librocksdb/vendor/rocksdb/monitoring/thread_status_updater_debug.cc +0 -43
  463. package/vendor/librocksdb/vendor/rocksdb/monitoring/thread_status_util.cc +0 -214
  464. package/vendor/librocksdb/vendor/rocksdb/monitoring/thread_status_util.h +0 -139
  465. package/vendor/librocksdb/vendor/rocksdb/monitoring/thread_status_util_debug.cc +0 -60
  466. package/vendor/librocksdb/vendor/rocksdb/options/cf_options.cc +0 -1218
  467. package/vendor/librocksdb/vendor/rocksdb/options/cf_options.h +0 -352
  468. package/vendor/librocksdb/vendor/rocksdb/options/configurable.cc +0 -720
  469. package/vendor/librocksdb/vendor/rocksdb/options/configurable_helper.h +0 -185
  470. package/vendor/librocksdb/vendor/rocksdb/options/configurable_test.h +0 -116
  471. package/vendor/librocksdb/vendor/rocksdb/options/customizable.cc +0 -133
  472. package/vendor/librocksdb/vendor/rocksdb/options/db_options.cc +0 -1113
  473. package/vendor/librocksdb/vendor/rocksdb/options/db_options.h +0 -160
  474. package/vendor/librocksdb/vendor/rocksdb/options/offpeak_time_info.cc +0 -59
  475. package/vendor/librocksdb/vendor/rocksdb/options/offpeak_time_info.h +0 -37
  476. package/vendor/librocksdb/vendor/rocksdb/options/options.cc +0 -717
  477. package/vendor/librocksdb/vendor/rocksdb/options/options_helper.cc +0 -1438
  478. package/vendor/librocksdb/vendor/rocksdb/options/options_helper.h +0 -115
  479. package/vendor/librocksdb/vendor/rocksdb/options/options_parser.cc +0 -745
  480. package/vendor/librocksdb/vendor/rocksdb/options/options_parser.h +0 -151
  481. package/vendor/librocksdb/vendor/rocksdb/port/jemalloc_helper.h +0 -107
  482. package/vendor/librocksdb/vendor/rocksdb/port/lang.h +0 -97
  483. package/vendor/librocksdb/vendor/rocksdb/port/likely.h +0 -18
  484. package/vendor/librocksdb/vendor/rocksdb/port/malloc.h +0 -17
  485. package/vendor/librocksdb/vendor/rocksdb/port/mmap.cc +0 -98
  486. package/vendor/librocksdb/vendor/rocksdb/port/mmap.h +0 -90
  487. package/vendor/librocksdb/vendor/rocksdb/port/port.h +0 -21
  488. package/vendor/librocksdb/vendor/rocksdb/port/port_dirent.h +0 -44
  489. package/vendor/librocksdb/vendor/rocksdb/port/port_example.h +0 -101
  490. package/vendor/librocksdb/vendor/rocksdb/port/port_posix.cc +0 -300
  491. package/vendor/librocksdb/vendor/rocksdb/port/port_posix.h +0 -246
  492. package/vendor/librocksdb/vendor/rocksdb/port/stack_trace.cc +0 -418
  493. package/vendor/librocksdb/vendor/rocksdb/port/stack_trace.h +0 -31
  494. package/vendor/librocksdb/vendor/rocksdb/port/sys_time.h +0 -63
  495. package/vendor/librocksdb/vendor/rocksdb/port/util_logger.h +0 -18
  496. package/vendor/librocksdb/vendor/rocksdb/port/win/env_default.cc +0 -45
  497. package/vendor/librocksdb/vendor/rocksdb/port/win/env_win.cc +0 -1436
  498. package/vendor/librocksdb/vendor/rocksdb/port/win/env_win.h +0 -305
  499. package/vendor/librocksdb/vendor/rocksdb/port/win/io_win.cc +0 -1101
  500. package/vendor/librocksdb/vendor/rocksdb/port/win/io_win.h +0 -504
  501. package/vendor/librocksdb/vendor/rocksdb/port/win/port_win.cc +0 -305
  502. package/vendor/librocksdb/vendor/rocksdb/port/win/port_win.h +0 -382
  503. package/vendor/librocksdb/vendor/rocksdb/port/win/win_jemalloc.cc +0 -80
  504. package/vendor/librocksdb/vendor/rocksdb/port/win/win_logger.cc +0 -192
  505. package/vendor/librocksdb/vendor/rocksdb/port/win/win_logger.h +0 -64
  506. package/vendor/librocksdb/vendor/rocksdb/port/win/win_thread.cc +0 -170
  507. package/vendor/librocksdb/vendor/rocksdb/port/win/win_thread.h +0 -117
  508. package/vendor/librocksdb/vendor/rocksdb/port/win/xpress_win.cc +0 -210
  509. package/vendor/librocksdb/vendor/rocksdb/port/win/xpress_win.h +0 -26
  510. package/vendor/librocksdb/vendor/rocksdb/port/xpress.h +0 -17
  511. package/vendor/librocksdb/vendor/rocksdb/rocksdb.pc.in +0 -10
  512. package/vendor/librocksdb/vendor/rocksdb/table/adaptive/adaptive_table_factory.cc +0 -119
  513. package/vendor/librocksdb/vendor/rocksdb/table/adaptive/adaptive_table_factory.h +0 -56
  514. package/vendor/librocksdb/vendor/rocksdb/table/block_based/binary_search_index_reader.cc +0 -73
  515. package/vendor/librocksdb/vendor/rocksdb/table/block_based/binary_search_index_reader.h +0 -48
  516. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block.cc +0 -1341
  517. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block.h +0 -969
  518. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_based_table_builder.cc +0 -2148
  519. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_based_table_builder.h +0 -208
  520. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_based_table_factory.cc +0 -980
  521. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_based_table_factory.h +0 -102
  522. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_based_table_iterator.cc +0 -893
  523. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_based_table_iterator.h +0 -445
  524. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_based_table_reader.cc +0 -3296
  525. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_based_table_reader.h +0 -785
  526. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_based_table_reader_impl.h +0 -205
  527. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_based_table_reader_sync_and_async.h +0 -819
  528. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_builder.cc +0 -266
  529. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_builder.h +0 -128
  530. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_cache.cc +0 -108
  531. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_cache.h +0 -190
  532. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_prefetcher.cc +0 -158
  533. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_prefetcher.h +0 -74
  534. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_prefix_index.cc +0 -226
  535. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_prefix_index.h +0 -70
  536. package/vendor/librocksdb/vendor/rocksdb/table/block_based/block_type.h +0 -34
  537. package/vendor/librocksdb/vendor/rocksdb/table/block_based/cachable_entry.h +0 -249
  538. package/vendor/librocksdb/vendor/rocksdb/table/block_based/data_block_footer.cc +0 -59
  539. package/vendor/librocksdb/vendor/rocksdb/table/block_based/data_block_footer.h +0 -25
  540. package/vendor/librocksdb/vendor/rocksdb/table/block_based/data_block_hash_index.cc +0 -94
  541. package/vendor/librocksdb/vendor/rocksdb/table/block_based/data_block_hash_index.h +0 -137
  542. package/vendor/librocksdb/vendor/rocksdb/table/block_based/filter_block.h +0 -189
  543. package/vendor/librocksdb/vendor/rocksdb/table/block_based/filter_block_reader_common.cc +0 -169
  544. package/vendor/librocksdb/vendor/rocksdb/table/block_based/filter_block_reader_common.h +0 -79
  545. package/vendor/librocksdb/vendor/rocksdb/table/block_based/filter_policy.cc +0 -1989
  546. package/vendor/librocksdb/vendor/rocksdb/table/block_based/filter_policy_internal.h +0 -341
  547. package/vendor/librocksdb/vendor/rocksdb/table/block_based/flush_block_policy.cc +0 -132
  548. package/vendor/librocksdb/vendor/rocksdb/table/block_based/flush_block_policy_impl.h +0 -40
  549. package/vendor/librocksdb/vendor/rocksdb/table/block_based/full_filter_block.cc +0 -297
  550. package/vendor/librocksdb/vendor/rocksdb/table/block_based/full_filter_block.h +0 -142
  551. package/vendor/librocksdb/vendor/rocksdb/table/block_based/hash_index_reader.cc +0 -146
  552. package/vendor/librocksdb/vendor/rocksdb/table/block_based/hash_index_reader.h +0 -49
  553. package/vendor/librocksdb/vendor/rocksdb/table/block_based/index_builder.cc +0 -305
  554. package/vendor/librocksdb/vendor/rocksdb/table/block_based/index_builder.h +0 -534
  555. package/vendor/librocksdb/vendor/rocksdb/table/block_based/index_reader_common.cc +0 -62
  556. package/vendor/librocksdb/vendor/rocksdb/table/block_based/index_reader_common.h +0 -94
  557. package/vendor/librocksdb/vendor/rocksdb/table/block_based/mock_block_based_table.h +0 -62
  558. package/vendor/librocksdb/vendor/rocksdb/table/block_based/parsed_full_filter_block.cc +0 -23
  559. package/vendor/librocksdb/vendor/rocksdb/table/block_based/parsed_full_filter_block.h +0 -47
  560. package/vendor/librocksdb/vendor/rocksdb/table/block_based/partitioned_filter_block.cc +0 -610
  561. package/vendor/librocksdb/vendor/rocksdb/table/block_based/partitioned_filter_block.h +0 -188
  562. package/vendor/librocksdb/vendor/rocksdb/table/block_based/partitioned_index_iterator.cc +0 -164
  563. package/vendor/librocksdb/vendor/rocksdb/table/block_based/partitioned_index_iterator.h +0 -160
  564. package/vendor/librocksdb/vendor/rocksdb/table/block_based/partitioned_index_reader.cc +0 -264
  565. package/vendor/librocksdb/vendor/rocksdb/table/block_based/partitioned_index_reader.h +0 -58
  566. package/vendor/librocksdb/vendor/rocksdb/table/block_based/reader_common.cc +0 -64
  567. package/vendor/librocksdb/vendor/rocksdb/table/block_based/reader_common.h +0 -36
  568. package/vendor/librocksdb/vendor/rocksdb/table/block_based/uncompression_dict_reader.cc +0 -118
  569. package/vendor/librocksdb/vendor/rocksdb/table/block_based/uncompression_dict_reader.h +0 -60
  570. package/vendor/librocksdb/vendor/rocksdb/table/block_fetcher.cc +0 -458
  571. package/vendor/librocksdb/vendor/rocksdb/table/block_fetcher.h +0 -168
  572. package/vendor/librocksdb/vendor/rocksdb/table/compaction_merging_iterator.cc +0 -370
  573. package/vendor/librocksdb/vendor/rocksdb/table/compaction_merging_iterator.h +0 -45
  574. package/vendor/librocksdb/vendor/rocksdb/table/cuckoo/cuckoo_table_builder.cc +0 -555
  575. package/vendor/librocksdb/vendor/rocksdb/table/cuckoo/cuckoo_table_builder.h +0 -136
  576. package/vendor/librocksdb/vendor/rocksdb/table/cuckoo/cuckoo_table_factory.cc +0 -100
  577. package/vendor/librocksdb/vendor/rocksdb/table/cuckoo/cuckoo_table_factory.h +0 -80
  578. package/vendor/librocksdb/vendor/rocksdb/table/cuckoo/cuckoo_table_reader.cc +0 -416
  579. package/vendor/librocksdb/vendor/rocksdb/table/cuckoo/cuckoo_table_reader.h +0 -100
  580. package/vendor/librocksdb/vendor/rocksdb/table/format.cc +0 -708
  581. package/vendor/librocksdb/vendor/rocksdb/table/format.h +0 -439
  582. package/vendor/librocksdb/vendor/rocksdb/table/get_context.cc +0 -622
  583. package/vendor/librocksdb/vendor/rocksdb/table/get_context.h +0 -259
  584. package/vendor/librocksdb/vendor/rocksdb/table/internal_iterator.h +0 -242
  585. package/vendor/librocksdb/vendor/rocksdb/table/iter_heap.h +0 -44
  586. package/vendor/librocksdb/vendor/rocksdb/table/iterator.cc +0 -134
  587. package/vendor/librocksdb/vendor/rocksdb/table/iterator_wrapper.h +0 -225
  588. package/vendor/librocksdb/vendor/rocksdb/table/merging_iterator.cc +0 -1755
  589. package/vendor/librocksdb/vendor/rocksdb/table/merging_iterator.h +0 -100
  590. package/vendor/librocksdb/vendor/rocksdb/table/meta_blocks.cc +0 -590
  591. package/vendor/librocksdb/vendor/rocksdb/table/meta_blocks.h +0 -181
  592. package/vendor/librocksdb/vendor/rocksdb/table/mock_table.cc +0 -355
  593. package/vendor/librocksdb/vendor/rocksdb/table/mock_table.h +0 -92
  594. package/vendor/librocksdb/vendor/rocksdb/table/multiget_context.h +0 -405
  595. package/vendor/librocksdb/vendor/rocksdb/table/persistent_cache_helper.cc +0 -110
  596. package/vendor/librocksdb/vendor/rocksdb/table/persistent_cache_helper.h +0 -46
  597. package/vendor/librocksdb/vendor/rocksdb/table/persistent_cache_options.h +0 -34
  598. package/vendor/librocksdb/vendor/rocksdb/table/plain/plain_table_bloom.cc +0 -78
  599. package/vendor/librocksdb/vendor/rocksdb/table/plain/plain_table_bloom.h +0 -132
  600. package/vendor/librocksdb/vendor/rocksdb/table/plain/plain_table_builder.cc +0 -348
  601. package/vendor/librocksdb/vendor/rocksdb/table/plain/plain_table_builder.h +0 -151
  602. package/vendor/librocksdb/vendor/rocksdb/table/plain/plain_table_factory.cc +0 -295
  603. package/vendor/librocksdb/vendor/rocksdb/table/plain/plain_table_factory.h +0 -180
  604. package/vendor/librocksdb/vendor/rocksdb/table/plain/plain_table_index.cc +0 -211
  605. package/vendor/librocksdb/vendor/rocksdb/table/plain/plain_table_index.h +0 -246
  606. package/vendor/librocksdb/vendor/rocksdb/table/plain/plain_table_key_coding.cc +0 -508
  607. package/vendor/librocksdb/vendor/rocksdb/table/plain/plain_table_key_coding.h +0 -199
  608. package/vendor/librocksdb/vendor/rocksdb/table/plain/plain_table_reader.cc +0 -778
  609. package/vendor/librocksdb/vendor/rocksdb/table/plain/plain_table_reader.h +0 -243
  610. package/vendor/librocksdb/vendor/rocksdb/table/sst_file_dumper.cc +0 -601
  611. package/vendor/librocksdb/vendor/rocksdb/table/sst_file_dumper.h +0 -104
  612. package/vendor/librocksdb/vendor/rocksdb/table/sst_file_reader.cc +0 -166
  613. package/vendor/librocksdb/vendor/rocksdb/table/sst_file_writer.cc +0 -536
  614. package/vendor/librocksdb/vendor/rocksdb/table/sst_file_writer_collectors.h +0 -97
  615. package/vendor/librocksdb/vendor/rocksdb/table/table_builder.h +0 -239
  616. package/vendor/librocksdb/vendor/rocksdb/table/table_factory.cc +0 -52
  617. package/vendor/librocksdb/vendor/rocksdb/table/table_iterator.h +0 -69
  618. package/vendor/librocksdb/vendor/rocksdb/table/table_properties.cc +0 -357
  619. package/vendor/librocksdb/vendor/rocksdb/table/table_properties_internal.h +0 -14
  620. package/vendor/librocksdb/vendor/rocksdb/table/table_reader.h +0 -202
  621. package/vendor/librocksdb/vendor/rocksdb/table/table_reader_bench.cc +0 -341
  622. package/vendor/librocksdb/vendor/rocksdb/table/two_level_iterator.cc +0 -222
  623. package/vendor/librocksdb/vendor/rocksdb/table/two_level_iterator.h +0 -43
  624. package/vendor/librocksdb/vendor/rocksdb/table/unique_id.cc +0 -223
  625. package/vendor/librocksdb/vendor/rocksdb/table/unique_id_impl.h +0 -93
  626. package/vendor/librocksdb/vendor/rocksdb/test_util/mock_time_env.cc +0 -38
  627. package/vendor/librocksdb/vendor/rocksdb/test_util/mock_time_env.h +0 -109
  628. package/vendor/librocksdb/vendor/rocksdb/test_util/secondary_cache_test_util.cc +0 -93
  629. package/vendor/librocksdb/vendor/rocksdb/test_util/secondary_cache_test_util.h +0 -131
  630. package/vendor/librocksdb/vendor/rocksdb/test_util/sync_point.cc +0 -82
  631. package/vendor/librocksdb/vendor/rocksdb/test_util/sync_point.h +0 -182
  632. package/vendor/librocksdb/vendor/rocksdb/test_util/sync_point_impl.cc +0 -152
  633. package/vendor/librocksdb/vendor/rocksdb/test_util/sync_point_impl.h +0 -96
  634. package/vendor/librocksdb/vendor/rocksdb/test_util/testharness.cc +0 -105
  635. package/vendor/librocksdb/vendor/rocksdb/test_util/testharness.h +0 -124
  636. package/vendor/librocksdb/vendor/rocksdb/test_util/testutil.cc +0 -776
  637. package/vendor/librocksdb/vendor/rocksdb/test_util/testutil.h +0 -908
  638. package/vendor/librocksdb/vendor/rocksdb/test_util/transaction_test_util.cc +0 -400
  639. package/vendor/librocksdb/vendor/rocksdb/test_util/transaction_test_util.h +0 -147
  640. package/vendor/librocksdb/vendor/rocksdb/tools/CMakeLists.txt +0 -30
  641. package/vendor/librocksdb/vendor/rocksdb/tools/blob_dump.cc +0 -103
  642. package/vendor/librocksdb/vendor/rocksdb/tools/block_cache_analyzer/block_cache_trace_analyzer.cc +0 -2331
  643. package/vendor/librocksdb/vendor/rocksdb/tools/block_cache_analyzer/block_cache_trace_analyzer.h +0 -398
  644. package/vendor/librocksdb/vendor/rocksdb/tools/block_cache_analyzer/block_cache_trace_analyzer_tool.cc +0 -17
  645. package/vendor/librocksdb/vendor/rocksdb/tools/db_bench.cc +0 -21
  646. package/vendor/librocksdb/vendor/rocksdb/tools/db_bench_tool.cc +0 -8743
  647. package/vendor/librocksdb/vendor/rocksdb/tools/db_repl_stress.cc +0 -132
  648. package/vendor/librocksdb/vendor/rocksdb/tools/dump/db_dump_tool.cc +0 -264
  649. package/vendor/librocksdb/vendor/rocksdb/tools/dump/rocksdb_dump.cc +0 -60
  650. package/vendor/librocksdb/vendor/rocksdb/tools/dump/rocksdb_undump.cc +0 -59
  651. package/vendor/librocksdb/vendor/rocksdb/tools/io_tracer_parser.cc +0 -17
  652. package/vendor/librocksdb/vendor/rocksdb/tools/io_tracer_parser_tool.cc +0 -142
  653. package/vendor/librocksdb/vendor/rocksdb/tools/io_tracer_parser_tool.h +0 -38
  654. package/vendor/librocksdb/vendor/rocksdb/tools/ldb.cc +0 -13
  655. package/vendor/librocksdb/vendor/rocksdb/tools/ldb_cmd.cc +0 -5044
  656. package/vendor/librocksdb/vendor/rocksdb/tools/ldb_cmd_impl.h +0 -814
  657. package/vendor/librocksdb/vendor/rocksdb/tools/ldb_tool.cc +0 -192
  658. package/vendor/librocksdb/vendor/rocksdb/tools/simulated_hybrid_file_system.cc +0 -244
  659. package/vendor/librocksdb/vendor/rocksdb/tools/simulated_hybrid_file_system.h +0 -124
  660. package/vendor/librocksdb/vendor/rocksdb/tools/sst_dump.cc +0 -12
  661. package/vendor/librocksdb/vendor/rocksdb/tools/sst_dump_tool.cc +0 -588
  662. package/vendor/librocksdb/vendor/rocksdb/tools/trace_analyzer.cc +0 -17
  663. package/vendor/librocksdb/vendor/rocksdb/tools/trace_analyzer_tool.cc +0 -1935
  664. package/vendor/librocksdb/vendor/rocksdb/tools/trace_analyzer_tool.h +0 -329
  665. package/vendor/librocksdb/vendor/rocksdb/tools/write_stress.cc +0 -305
  666. package/vendor/librocksdb/vendor/rocksdb/trace_replay/block_cache_tracer.cc +0 -509
  667. package/vendor/librocksdb/vendor/rocksdb/trace_replay/block_cache_tracer.h +0 -239
  668. package/vendor/librocksdb/vendor/rocksdb/trace_replay/io_tracer.cc +0 -303
  669. package/vendor/librocksdb/vendor/rocksdb/trace_replay/io_tracer.h +0 -185
  670. package/vendor/librocksdb/vendor/rocksdb/trace_replay/trace_record.cc +0 -206
  671. package/vendor/librocksdb/vendor/rocksdb/trace_replay/trace_record_handler.cc +0 -190
  672. package/vendor/librocksdb/vendor/rocksdb/trace_replay/trace_record_handler.h +0 -46
  673. package/vendor/librocksdb/vendor/rocksdb/trace_replay/trace_record_result.cc +0 -146
  674. package/vendor/librocksdb/vendor/rocksdb/trace_replay/trace_replay.cc +0 -632
  675. package/vendor/librocksdb/vendor/rocksdb/trace_replay/trace_replay.h +0 -184
  676. package/vendor/librocksdb/vendor/rocksdb/util/aligned_buffer.h +0 -235
  677. package/vendor/librocksdb/vendor/rocksdb/util/aligned_storage.h +0 -24
  678. package/vendor/librocksdb/vendor/rocksdb/util/async_file_reader.cc +0 -84
  679. package/vendor/librocksdb/vendor/rocksdb/util/async_file_reader.h +0 -144
  680. package/vendor/librocksdb/vendor/rocksdb/util/atomic.h +0 -111
  681. package/vendor/librocksdb/vendor/rocksdb/util/autovector.h +0 -397
  682. package/vendor/librocksdb/vendor/rocksdb/util/bloom_impl.h +0 -489
  683. package/vendor/librocksdb/vendor/rocksdb/util/build_version.cc.in +0 -79
  684. package/vendor/librocksdb/vendor/rocksdb/util/cast_util.h +0 -88
  685. package/vendor/librocksdb/vendor/rocksdb/util/channel.h +0 -69
  686. package/vendor/librocksdb/vendor/rocksdb/util/cleanable.cc +0 -181
  687. package/vendor/librocksdb/vendor/rocksdb/util/coding.cc +0 -90
  688. package/vendor/librocksdb/vendor/rocksdb/util/coding.h +0 -385
  689. package/vendor/librocksdb/vendor/rocksdb/util/coding_lean.h +0 -101
  690. package/vendor/librocksdb/vendor/rocksdb/util/compaction_job_stats_impl.cc +0 -94
  691. package/vendor/librocksdb/vendor/rocksdb/util/comparator.cc +0 -443
  692. package/vendor/librocksdb/vendor/rocksdb/util/compression.cc +0 -122
  693. package/vendor/librocksdb/vendor/rocksdb/util/compression.h +0 -1879
  694. package/vendor/librocksdb/vendor/rocksdb/util/compression_context_cache.cc +0 -106
  695. package/vendor/librocksdb/vendor/rocksdb/util/compression_context_cache.h +0 -47
  696. package/vendor/librocksdb/vendor/rocksdb/util/concurrent_task_limiter_impl.cc +0 -64
  697. package/vendor/librocksdb/vendor/rocksdb/util/concurrent_task_limiter_impl.h +0 -67
  698. package/vendor/librocksdb/vendor/rocksdb/util/core_local.h +0 -85
  699. package/vendor/librocksdb/vendor/rocksdb/util/coro_utils.h +0 -112
  700. package/vendor/librocksdb/vendor/rocksdb/util/crc32c.cc +0 -1295
  701. package/vendor/librocksdb/vendor/rocksdb/util/crc32c.h +0 -56
  702. package/vendor/librocksdb/vendor/rocksdb/util/crc32c_arm64.cc +0 -213
  703. package/vendor/librocksdb/vendor/rocksdb/util/crc32c_arm64.h +0 -51
  704. package/vendor/librocksdb/vendor/rocksdb/util/crc32c_ppc.c +0 -94
  705. package/vendor/librocksdb/vendor/rocksdb/util/crc32c_ppc.h +0 -21
  706. package/vendor/librocksdb/vendor/rocksdb/util/crc32c_ppc_asm.S +0 -756
  707. package/vendor/librocksdb/vendor/rocksdb/util/crc32c_ppc_constants.h +0 -900
  708. package/vendor/librocksdb/vendor/rocksdb/util/data_structure.cc +0 -16
  709. package/vendor/librocksdb/vendor/rocksdb/util/defer.h +0 -82
  710. package/vendor/librocksdb/vendor/rocksdb/util/distributed_mutex.h +0 -50
  711. package/vendor/librocksdb/vendor/rocksdb/util/duplicate_detector.h +0 -69
  712. package/vendor/librocksdb/vendor/rocksdb/util/dynamic_bloom.cc +0 -70
  713. package/vendor/librocksdb/vendor/rocksdb/util/dynamic_bloom.h +0 -214
  714. package/vendor/librocksdb/vendor/rocksdb/util/fastrange.h +0 -114
  715. package/vendor/librocksdb/vendor/rocksdb/util/file_checksum_helper.cc +0 -170
  716. package/vendor/librocksdb/vendor/rocksdb/util/file_checksum_helper.h +0 -101
  717. package/vendor/librocksdb/vendor/rocksdb/util/filter_bench.cc +0 -840
  718. package/vendor/librocksdb/vendor/rocksdb/util/gflags_compat.h +0 -29
  719. package/vendor/librocksdb/vendor/rocksdb/util/hash.cc +0 -201
  720. package/vendor/librocksdb/vendor/rocksdb/util/hash.h +0 -141
  721. package/vendor/librocksdb/vendor/rocksdb/util/hash128.h +0 -26
  722. package/vendor/librocksdb/vendor/rocksdb/util/hash_containers.h +0 -51
  723. package/vendor/librocksdb/vendor/rocksdb/util/hash_map.h +0 -67
  724. package/vendor/librocksdb/vendor/rocksdb/util/heap.h +0 -174
  725. package/vendor/librocksdb/vendor/rocksdb/util/kv_map.h +0 -33
  726. package/vendor/librocksdb/vendor/rocksdb/util/log_write_bench.cc +0 -88
  727. package/vendor/librocksdb/vendor/rocksdb/util/math.h +0 -351
  728. package/vendor/librocksdb/vendor/rocksdb/util/math128.h +0 -338
  729. package/vendor/librocksdb/vendor/rocksdb/util/murmurhash.cc +0 -196
  730. package/vendor/librocksdb/vendor/rocksdb/util/murmurhash.h +0 -43
  731. package/vendor/librocksdb/vendor/rocksdb/util/mutexlock.h +0 -189
  732. package/vendor/librocksdb/vendor/rocksdb/util/overload.h +0 -23
  733. package/vendor/librocksdb/vendor/rocksdb/util/ppc-opcode.h +0 -27
  734. package/vendor/librocksdb/vendor/rocksdb/util/random.cc +0 -63
  735. package/vendor/librocksdb/vendor/rocksdb/util/random.h +0 -190
  736. package/vendor/librocksdb/vendor/rocksdb/util/rate_limiter.cc +0 -391
  737. package/vendor/librocksdb/vendor/rocksdb/util/rate_limiter_impl.h +0 -156
  738. package/vendor/librocksdb/vendor/rocksdb/util/repeatable_thread.h +0 -149
  739. package/vendor/librocksdb/vendor/rocksdb/util/ribbon_alg.h +0 -1225
  740. package/vendor/librocksdb/vendor/rocksdb/util/ribbon_config.cc +0 -498
  741. package/vendor/librocksdb/vendor/rocksdb/util/ribbon_config.h +0 -182
  742. package/vendor/librocksdb/vendor/rocksdb/util/ribbon_impl.h +0 -1137
  743. package/vendor/librocksdb/vendor/rocksdb/util/set_comparator.h +0 -24
  744. package/vendor/librocksdb/vendor/rocksdb/util/single_thread_executor.h +0 -57
  745. package/vendor/librocksdb/vendor/rocksdb/util/slice.cc +0 -366
  746. package/vendor/librocksdb/vendor/rocksdb/util/status.cc +0 -163
  747. package/vendor/librocksdb/vendor/rocksdb/util/stderr_logger.cc +0 -62
  748. package/vendor/librocksdb/vendor/rocksdb/util/stderr_logger.h +0 -41
  749. package/vendor/librocksdb/vendor/rocksdb/util/stop_watch.h +0 -136
  750. package/vendor/librocksdb/vendor/rocksdb/util/string_util.cc +0 -554
  751. package/vendor/librocksdb/vendor/rocksdb/util/string_util.h +0 -185
  752. package/vendor/librocksdb/vendor/rocksdb/util/thread_guard.h +0 -41
  753. package/vendor/librocksdb/vendor/rocksdb/util/thread_local.cc +0 -521
  754. package/vendor/librocksdb/vendor/rocksdb/util/thread_local.h +0 -100
  755. package/vendor/librocksdb/vendor/rocksdb/util/thread_operation.h +0 -122
  756. package/vendor/librocksdb/vendor/rocksdb/util/threadpool_imp.cc +0 -550
  757. package/vendor/librocksdb/vendor/rocksdb/util/threadpool_imp.h +0 -120
  758. package/vendor/librocksdb/vendor/rocksdb/util/timer.h +0 -340
  759. package/vendor/librocksdb/vendor/rocksdb/util/timer_queue.h +0 -231
  760. package/vendor/librocksdb/vendor/rocksdb/util/udt_util.cc +0 -418
  761. package/vendor/librocksdb/vendor/rocksdb/util/udt_util.h +0 -275
  762. package/vendor/librocksdb/vendor/rocksdb/util/user_comparator_wrapper.h +0 -64
  763. package/vendor/librocksdb/vendor/rocksdb/util/vector_iterator.h +0 -114
  764. package/vendor/librocksdb/vendor/rocksdb/util/work_queue.h +0 -150
  765. package/vendor/librocksdb/vendor/rocksdb/util/write_batch_util.cc +0 -25
  766. package/vendor/librocksdb/vendor/rocksdb/util/write_batch_util.h +0 -90
  767. package/vendor/librocksdb/vendor/rocksdb/util/xxhash.cc +0 -48
  768. package/vendor/librocksdb/vendor/rocksdb/util/xxhash.h +0 -6364
  769. package/vendor/librocksdb/vendor/rocksdb/util/xxph3.h +0 -1760
  770. package/vendor/librocksdb/vendor/rocksdb/utilities/agg_merge/agg_merge.cc +0 -237
  771. package/vendor/librocksdb/vendor/rocksdb/utilities/agg_merge/agg_merge_impl.h +0 -49
  772. package/vendor/librocksdb/vendor/rocksdb/utilities/agg_merge/test_agg_merge.cc +0 -103
  773. package/vendor/librocksdb/vendor/rocksdb/utilities/agg_merge/test_agg_merge.h +0 -47
  774. package/vendor/librocksdb/vendor/rocksdb/utilities/backup/backup_engine.cc +0 -3357
  775. package/vendor/librocksdb/vendor/rocksdb/utilities/backup/backup_engine_impl.h +0 -34
  776. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_compaction_filter.cc +0 -490
  777. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_compaction_filter.h +0 -202
  778. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_db.cc +0 -109
  779. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_db.h +0 -231
  780. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_db_gc_stats.h +0 -54
  781. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_db_impl.cc +0 -2269
  782. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_db_impl.h +0 -514
  783. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_db_impl_filesnapshot.cc +0 -127
  784. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_db_iterator.h +0 -148
  785. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_db_listener.h +0 -71
  786. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_dump_tool.cc +0 -276
  787. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_dump_tool.h +0 -56
  788. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_file.cc +0 -311
  789. package/vendor/librocksdb/vendor/rocksdb/utilities/blob_db/blob_file.h +0 -245
  790. package/vendor/librocksdb/vendor/rocksdb/utilities/cache_dump_load.cc +0 -67
  791. package/vendor/librocksdb/vendor/rocksdb/utilities/cache_dump_load_impl.cc +0 -389
  792. package/vendor/librocksdb/vendor/rocksdb/utilities/cache_dump_load_impl.h +0 -368
  793. package/vendor/librocksdb/vendor/rocksdb/utilities/cassandra/cassandra_compaction_filter.cc +0 -104
  794. package/vendor/librocksdb/vendor/rocksdb/utilities/cassandra/cassandra_compaction_filter.h +0 -57
  795. package/vendor/librocksdb/vendor/rocksdb/utilities/cassandra/cassandra_options.h +0 -41
  796. package/vendor/librocksdb/vendor/rocksdb/utilities/cassandra/format.cc +0 -365
  797. package/vendor/librocksdb/vendor/rocksdb/utilities/cassandra/format.h +0 -183
  798. package/vendor/librocksdb/vendor/rocksdb/utilities/cassandra/merge_operator.cc +0 -76
  799. package/vendor/librocksdb/vendor/rocksdb/utilities/cassandra/merge_operator.h +0 -43
  800. package/vendor/librocksdb/vendor/rocksdb/utilities/cassandra/serialize.h +0 -81
  801. package/vendor/librocksdb/vendor/rocksdb/utilities/cassandra/test_utils.cc +0 -67
  802. package/vendor/librocksdb/vendor/rocksdb/utilities/cassandra/test_utils.h +0 -42
  803. package/vendor/librocksdb/vendor/rocksdb/utilities/checkpoint/checkpoint_impl.cc +0 -472
  804. package/vendor/librocksdb/vendor/rocksdb/utilities/checkpoint/checkpoint_impl.h +0 -64
  805. package/vendor/librocksdb/vendor/rocksdb/utilities/compaction_filters/layered_compaction_filter_base.h +0 -41
  806. package/vendor/librocksdb/vendor/rocksdb/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc +0 -24
  807. package/vendor/librocksdb/vendor/rocksdb/utilities/compaction_filters/remove_emptyvalue_compactionfilter.h +0 -26
  808. package/vendor/librocksdb/vendor/rocksdb/utilities/compaction_filters.cc +0 -52
  809. package/vendor/librocksdb/vendor/rocksdb/utilities/convenience/info_log_finder.cc +0 -26
  810. package/vendor/librocksdb/vendor/rocksdb/utilities/counted_fs.cc +0 -379
  811. package/vendor/librocksdb/vendor/rocksdb/utilities/counted_fs.h +0 -158
  812. package/vendor/librocksdb/vendor/rocksdb/utilities/debug.cc +0 -134
  813. package/vendor/librocksdb/vendor/rocksdb/utilities/env_mirror.cc +0 -280
  814. package/vendor/librocksdb/vendor/rocksdb/utilities/env_timed.cc +0 -181
  815. package/vendor/librocksdb/vendor/rocksdb/utilities/env_timed.h +0 -95
  816. package/vendor/librocksdb/vendor/rocksdb/utilities/fault_injection_env.cc +0 -555
  817. package/vendor/librocksdb/vendor/rocksdb/utilities/fault_injection_env.h +0 -252
  818. package/vendor/librocksdb/vendor/rocksdb/utilities/fault_injection_fs.cc +0 -1507
  819. package/vendor/librocksdb/vendor/rocksdb/utilities/fault_injection_fs.h +0 -760
  820. package/vendor/librocksdb/vendor/rocksdb/utilities/fault_injection_secondary_cache.cc +0 -138
  821. package/vendor/librocksdb/vendor/rocksdb/utilities/fault_injection_secondary_cache.h +0 -115
  822. package/vendor/librocksdb/vendor/rocksdb/utilities/leveldb_options/leveldb_options.cc +0 -57
  823. package/vendor/librocksdb/vendor/rocksdb/utilities/memory/memory_util.cc +0 -50
  824. package/vendor/librocksdb/vendor/rocksdb/utilities/memory_allocators.h +0 -103
  825. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/bytesxor.cc +0 -57
  826. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/bytesxor.h +0 -39
  827. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/max.cc +0 -64
  828. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/max_operator.h +0 -35
  829. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/put.cc +0 -74
  830. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/put_operator.h +0 -56
  831. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/sortlist.cc +0 -97
  832. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/sortlist.h +0 -42
  833. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/string_append/stringappend.cc +0 -76
  834. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/string_append/stringappend.h +0 -32
  835. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/string_append/stringappend2.cc +0 -129
  836. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/string_append/stringappend2.h +0 -51
  837. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/uint64add.cc +0 -56
  838. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators/uint64add.h +0 -35
  839. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators.cc +0 -115
  840. package/vendor/librocksdb/vendor/rocksdb/utilities/merge_operators.h +0 -36
  841. package/vendor/librocksdb/vendor/rocksdb/utilities/object_registry.cc +0 -381
  842. package/vendor/librocksdb/vendor/rocksdb/utilities/option_change_migration/option_change_migration.cc +0 -169
  843. package/vendor/librocksdb/vendor/rocksdb/utilities/options/options_util.cc +0 -117
  844. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/block_cache_tier.cc +0 -420
  845. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/block_cache_tier.h +0 -154
  846. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/block_cache_tier_file.cc +0 -607
  847. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/block_cache_tier_file.h +0 -291
  848. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/block_cache_tier_file_buffer.h +0 -127
  849. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/block_cache_tier_metadata.cc +0 -84
  850. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/block_cache_tier_metadata.h +0 -122
  851. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/hash_table.h +0 -237
  852. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/hash_table_bench.cc +0 -310
  853. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/hash_table_evictable.h +0 -166
  854. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/lrulist.h +0 -172
  855. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/persistent_cache_bench.cc +0 -355
  856. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/persistent_cache_test.h +0 -284
  857. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/persistent_cache_tier.cc +0 -165
  858. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/persistent_cache_tier.h +0 -340
  859. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/persistent_cache_util.h +0 -67
  860. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/volatile_tier_impl.cc +0 -138
  861. package/vendor/librocksdb/vendor/rocksdb/utilities/persistent_cache/volatile_tier_impl.h +0 -139
  862. package/vendor/librocksdb/vendor/rocksdb/utilities/simulator_cache/cache_simulator.cc +0 -287
  863. package/vendor/librocksdb/vendor/rocksdb/utilities/simulator_cache/cache_simulator.h +0 -231
  864. package/vendor/librocksdb/vendor/rocksdb/utilities/simulator_cache/sim_cache.cc +0 -375
  865. package/vendor/librocksdb/vendor/rocksdb/utilities/table_properties_collectors/compact_for_tiering_collector.cc +0 -144
  866. package/vendor/librocksdb/vendor/rocksdb/utilities/table_properties_collectors/compact_for_tiering_collector.h +0 -45
  867. package/vendor/librocksdb/vendor/rocksdb/utilities/table_properties_collectors/compact_on_deletion_collector.cc +0 -233
  868. package/vendor/librocksdb/vendor/rocksdb/utilities/table_properties_collectors/compact_on_deletion_collector.h +0 -65
  869. package/vendor/librocksdb/vendor/rocksdb/utilities/trace/file_trace_reader_writer.cc +0 -132
  870. package/vendor/librocksdb/vendor/rocksdb/utilities/trace/file_trace_reader_writer.h +0 -48
  871. package/vendor/librocksdb/vendor/rocksdb/utilities/trace/replayer_impl.cc +0 -313
  872. package/vendor/librocksdb/vendor/rocksdb/utilities/trace/replayer_impl.h +0 -84
  873. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/lock_manager.cc +0 -27
  874. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/lock_manager.h +0 -80
  875. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/lock_tracker.h +0 -207
  876. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/point/point_lock_manager.cc +0 -718
  877. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/point/point_lock_manager.h +0 -222
  878. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/point/point_lock_manager_test.h +0 -324
  879. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/point/point_lock_tracker.cc +0 -255
  880. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/point/point_lock_tracker.h +0 -97
  881. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_lock_manager.h +0 -34
  882. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/db.h +0 -81
  883. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/ft/comparator.h +0 -138
  884. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/ft/ft-status.h +0 -102
  885. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc +0 -137
  886. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.h +0 -174
  887. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/keyrange.cc +0 -220
  888. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/keyrange.h +0 -141
  889. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/lock_request.cc +0 -525
  890. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/lock_request.h +0 -255
  891. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/locktree.cc +0 -1021
  892. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/locktree.h +0 -580
  893. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/manager.cc +0 -525
  894. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/range_buffer.cc +0 -263
  895. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/range_buffer.h +0 -178
  896. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/treenode.cc +0 -518
  897. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/treenode.h +0 -302
  898. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/txnid_set.cc +0 -118
  899. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/txnid_set.h +0 -92
  900. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/wfg.cc +0 -211
  901. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/locktree/wfg.h +0 -124
  902. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/portability/memory.h +0 -215
  903. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/portability/toku_assert_subst.h +0 -43
  904. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/portability/toku_atomic.h +0 -130
  905. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/portability/toku_external_pthread.h +0 -87
  906. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/portability/toku_instrumentation.h +0 -286
  907. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/portability/toku_portability.h +0 -87
  908. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/portability/toku_pthread.h +0 -520
  909. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/portability/toku_race_tools.h +0 -179
  910. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/portability/toku_time.h +0 -197
  911. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/portability/txn_subst.h +0 -31
  912. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc +0 -139
  913. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc +0 -165
  914. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/util/dbt.h +0 -98
  915. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/util/growable_array.h +0 -144
  916. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc +0 -199
  917. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/util/memarena.h +0 -141
  918. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/util/omt.h +0 -794
  919. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/util/omt_impl.h +0 -1295
  920. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/util/partitioned_counter.h +0 -165
  921. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/lib/util/status.h +0 -76
  922. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc +0 -501
  923. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.h +0 -135
  924. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc +0 -156
  925. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.h +0 -146
  926. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/optimistic_transaction.cc +0 -208
  927. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/optimistic_transaction.h +0 -99
  928. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/optimistic_transaction_db_impl.cc +0 -111
  929. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/optimistic_transaction_db_impl.h +0 -110
  930. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/pessimistic_transaction.cc +0 -1278
  931. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/pessimistic_transaction.h +0 -345
  932. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/pessimistic_transaction_db.cc +0 -845
  933. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/pessimistic_transaction_db.h +0 -345
  934. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/snapshot_checker.cc +0 -37
  935. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/transaction_base.cc +0 -912
  936. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/transaction_base.h +0 -455
  937. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/transaction_db_mutex_impl.cc +0 -133
  938. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/transaction_db_mutex_impl.h +0 -24
  939. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/transaction_test.h +0 -589
  940. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/transaction_util.cc +0 -207
  941. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/transaction_util.h +0 -86
  942. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/write_prepared_txn.cc +0 -548
  943. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/write_prepared_txn.h +0 -118
  944. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/write_prepared_txn_db.cc +0 -1100
  945. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/write_prepared_txn_db.h +0 -1149
  946. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/write_unprepared_txn.cc +0 -1089
  947. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/write_unprepared_txn.h +0 -333
  948. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/write_unprepared_txn_db.cc +0 -486
  949. package/vendor/librocksdb/vendor/rocksdb/utilities/transactions/write_unprepared_txn_db.h +0 -105
  950. package/vendor/librocksdb/vendor/rocksdb/utilities/ttl/db_ttl_impl.cc +0 -638
  951. package/vendor/librocksdb/vendor/rocksdb/utilities/ttl/db_ttl_impl.h +0 -239
  952. package/vendor/librocksdb/vendor/rocksdb/utilities/types_util.cc +0 -88
  953. package/vendor/librocksdb/vendor/rocksdb/utilities/wal_filter.cc +0 -22
  954. package/vendor/librocksdb/vendor/rocksdb/utilities/write_batch_with_index/write_batch_with_index.cc +0 -1134
  955. package/vendor/librocksdb/vendor/rocksdb/utilities/write_batch_with_index/write_batch_with_index_internal.cc +0 -934
  956. package/vendor/librocksdb/vendor/rocksdb/utilities/write_batch_with_index/write_batch_with_index_internal.h +0 -468
@@ -1,2624 +0,0 @@
1
- // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2
- // This source code is licensed under both the GPLv2 (found in the
3
- // COPYING file in the root directory) and Apache 2.0 License
4
- // (found in the LICENSE.Apache file in the root directory).
5
- //
6
- // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7
- // Use of this source code is governed by a BSD-style license that can be
8
- // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
- #include <cinttypes>
10
-
11
- #include "db/db_impl/db_impl.h"
12
- #include "db/error_handler.h"
13
- #include "db/event_helpers.h"
14
- #include "logging/logging.h"
15
- #include "monitoring/perf_context_imp.h"
16
- #include "options/options_helper.h"
17
- #include "test_util/sync_point.h"
18
- #include "util/cast_util.h"
19
-
20
- namespace ROCKSDB_NAMESPACE {
21
- // Convenience methods
22
- Status DBImpl::Put(const WriteOptions& o, ColumnFamilyHandle* column_family,
23
- const Slice& key, const Slice& val) {
24
- const Status s = FailIfCfHasTs(column_family);
25
- if (!s.ok()) {
26
- return s;
27
- }
28
- return DB::Put(o, column_family, key, val);
29
- }
30
-
31
- Status DBImpl::Put(const WriteOptions& o, ColumnFamilyHandle* column_family,
32
- const Slice& key, const Slice& ts, const Slice& val) {
33
- const Status s = FailIfTsMismatchCf(column_family, ts);
34
- if (!s.ok()) {
35
- return s;
36
- }
37
- return DB::Put(o, column_family, key, ts, val);
38
- }
39
-
40
- Status DBImpl::PutEntity(const WriteOptions& options,
41
- ColumnFamilyHandle* column_family, const Slice& key,
42
- const WideColumns& columns) {
43
- const Status s = FailIfCfHasTs(column_family);
44
- if (!s.ok()) {
45
- return s;
46
- }
47
-
48
- return DB::PutEntity(options, column_family, key, columns);
49
- }
50
-
51
- Status DBImpl::PutEntity(const WriteOptions& options, const Slice& key,
52
- const AttributeGroups& attribute_groups) {
53
- for (const AttributeGroup& ag : attribute_groups) {
54
- const Status s = FailIfCfHasTs(ag.column_family());
55
- if (!s.ok()) {
56
- return s;
57
- }
58
- }
59
- return DB::PutEntity(options, key, attribute_groups);
60
- }
61
-
62
- Status DBImpl::Merge(const WriteOptions& o, ColumnFamilyHandle* column_family,
63
- const Slice& key, const Slice& val) {
64
- const Status s = FailIfCfHasTs(column_family);
65
- if (!s.ok()) {
66
- return s;
67
- }
68
- auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
69
- if (!cfh->cfd()->ioptions()->merge_operator) {
70
- return Status::NotSupported("Provide a merge_operator when opening DB");
71
- } else {
72
- return DB::Merge(o, column_family, key, val);
73
- }
74
- }
75
-
76
- Status DBImpl::Merge(const WriteOptions& o, ColumnFamilyHandle* column_family,
77
- const Slice& key, const Slice& ts, const Slice& val) {
78
- const Status s = FailIfTsMismatchCf(column_family, ts);
79
- if (!s.ok()) {
80
- return s;
81
- }
82
- return DB::Merge(o, column_family, key, ts, val);
83
- }
84
-
85
- Status DBImpl::Delete(const WriteOptions& write_options,
86
- ColumnFamilyHandle* column_family, const Slice& key) {
87
- const Status s = FailIfCfHasTs(column_family);
88
- if (!s.ok()) {
89
- return s;
90
- }
91
- return DB::Delete(write_options, column_family, key);
92
- }
93
-
94
- Status DBImpl::Delete(const WriteOptions& write_options,
95
- ColumnFamilyHandle* column_family, const Slice& key,
96
- const Slice& ts) {
97
- const Status s = FailIfTsMismatchCf(column_family, ts);
98
- if (!s.ok()) {
99
- return s;
100
- }
101
- return DB::Delete(write_options, column_family, key, ts);
102
- }
103
-
104
- Status DBImpl::SingleDelete(const WriteOptions& write_options,
105
- ColumnFamilyHandle* column_family,
106
- const Slice& key) {
107
- const Status s = FailIfCfHasTs(column_family);
108
- if (!s.ok()) {
109
- return s;
110
- }
111
- return DB::SingleDelete(write_options, column_family, key);
112
- }
113
-
114
- Status DBImpl::SingleDelete(const WriteOptions& write_options,
115
- ColumnFamilyHandle* column_family, const Slice& key,
116
- const Slice& ts) {
117
- const Status s = FailIfTsMismatchCf(column_family, ts);
118
- if (!s.ok()) {
119
- return s;
120
- }
121
- return DB::SingleDelete(write_options, column_family, key, ts);
122
- }
123
-
124
- Status DBImpl::DeleteRange(const WriteOptions& write_options,
125
- ColumnFamilyHandle* column_family,
126
- const Slice& begin_key, const Slice& end_key) {
127
- const Status s = FailIfCfHasTs(column_family);
128
- if (!s.ok()) {
129
- return s;
130
- }
131
- return DB::DeleteRange(write_options, column_family, begin_key, end_key);
132
- }
133
-
134
- Status DBImpl::DeleteRange(const WriteOptions& write_options,
135
- ColumnFamilyHandle* column_family,
136
- const Slice& begin_key, const Slice& end_key,
137
- const Slice& ts) {
138
- const Status s = FailIfTsMismatchCf(column_family, ts);
139
- if (!s.ok()) {
140
- return s;
141
- }
142
- return DB::DeleteRange(write_options, column_family, begin_key, end_key, ts);
143
- }
144
-
145
- void DBImpl::SetRecoverableStatePreReleaseCallback(
146
- PreReleaseCallback* callback) {
147
- recoverable_state_pre_release_callback_.reset(callback);
148
- }
149
-
150
- Status DBImpl::Write(const WriteOptions& write_options, WriteBatch* my_batch) {
151
- Status s;
152
- if (write_options.protection_bytes_per_key > 0) {
153
- s = WriteBatchInternal::UpdateProtectionInfo(
154
- my_batch, write_options.protection_bytes_per_key);
155
- }
156
- if (s.ok()) {
157
- s = WriteImpl(write_options, my_batch, /*callback=*/nullptr,
158
- /*user_write_cb=*/nullptr,
159
- /*log_used=*/nullptr);
160
- }
161
- return s;
162
- }
163
-
164
- Status DBImpl::WriteWithCallback(const WriteOptions& write_options,
165
- WriteBatch* my_batch, WriteCallback* callback,
166
- UserWriteCallback* user_write_cb) {
167
- Status s;
168
- if (write_options.protection_bytes_per_key > 0) {
169
- s = WriteBatchInternal::UpdateProtectionInfo(
170
- my_batch, write_options.protection_bytes_per_key);
171
- }
172
- if (s.ok()) {
173
- s = WriteImpl(write_options, my_batch, callback, user_write_cb);
174
- }
175
- return s;
176
- }
177
-
178
- Status DBImpl::WriteWithCallback(const WriteOptions& write_options,
179
- WriteBatch* my_batch,
180
- UserWriteCallback* user_write_cb) {
181
- Status s;
182
- if (write_options.protection_bytes_per_key > 0) {
183
- s = WriteBatchInternal::UpdateProtectionInfo(
184
- my_batch, write_options.protection_bytes_per_key);
185
- }
186
- if (s.ok()) {
187
- s = WriteImpl(write_options, my_batch, /*callback=*/nullptr, user_write_cb);
188
- }
189
- return s;
190
- }
191
-
192
- // The main write queue. This is the only write queue that updates LastSequence.
193
- // When using one write queue, the same sequence also indicates the last
194
- // published sequence.
195
- Status DBImpl::WriteImpl(const WriteOptions& write_options,
196
- WriteBatch* my_batch, WriteCallback* callback,
197
- UserWriteCallback* user_write_cb, uint64_t* log_used,
198
- uint64_t log_ref, bool disable_memtable,
199
- uint64_t* seq_used, size_t batch_cnt,
200
- PreReleaseCallback* pre_release_callback,
201
- PostMemTableCallback* post_memtable_callback) {
202
- assert(!seq_per_batch_ || batch_cnt != 0);
203
- assert(my_batch == nullptr || my_batch->Count() == 0 ||
204
- write_options.protection_bytes_per_key == 0 ||
205
- write_options.protection_bytes_per_key ==
206
- my_batch->GetProtectionBytesPerKey());
207
- if (my_batch == nullptr) {
208
- return Status::InvalidArgument("Batch is nullptr!");
209
- } else if (!disable_memtable &&
210
- WriteBatchInternal::TimestampsUpdateNeeded(*my_batch)) {
211
- // If writing to memtable, then we require the caller to set/update the
212
- // timestamps for the keys in the write batch.
213
- // Otherwise, it means we are just writing to the WAL, and we allow
214
- // timestamps unset for the keys in the write batch. This can happen if we
215
- // use TransactionDB with write-committed policy, and we currently do not
216
- // support user-defined timestamp with other policies.
217
- // In the prepare phase, a transaction can write the batch to the WAL
218
- // without inserting to memtable. The keys in the batch do not have to be
219
- // assigned timestamps because they will be used only during recovery if
220
- // there is a commit marker which includes their commit timestamp.
221
- return Status::InvalidArgument("write batch must have timestamp(s) set");
222
- } else if (write_options.rate_limiter_priority != Env::IO_TOTAL &&
223
- write_options.rate_limiter_priority != Env::IO_USER) {
224
- return Status::InvalidArgument(
225
- "WriteOptions::rate_limiter_priority only allows "
226
- "Env::IO_TOTAL and Env::IO_USER due to implementation constraints");
227
- } else if (write_options.rate_limiter_priority != Env::IO_TOTAL &&
228
- (write_options.disableWAL || manual_wal_flush_)) {
229
- return Status::InvalidArgument(
230
- "WriteOptions::rate_limiter_priority currently only supports "
231
- "rate-limiting automatic WAL flush, which requires "
232
- "`WriteOptions::disableWAL` and "
233
- "`DBOptions::manual_wal_flush` both set to false");
234
- } else if (write_options.protection_bytes_per_key != 0 &&
235
- write_options.protection_bytes_per_key != 8) {
236
- return Status::InvalidArgument(
237
- "`WriteOptions::protection_bytes_per_key` must be zero or eight");
238
- } else if (write_options.disableWAL &&
239
- immutable_db_options_.recycle_log_file_num > 0 &&
240
- !(two_write_queues_ && disable_memtable)) {
241
- // Corruption detection in recycled WALs relies on sequential sequence
242
- // numbers, but WritePreparedTxnDB uses disableWAL internally for split
243
- // writes
244
- return Status::InvalidArgument(
245
- "WriteOptions::disableWAL option is not supported if "
246
- "DBOptions::recycle_log_file_num > 0");
247
- }
248
- // TODO: this use of operator bool on `tracer_` can avoid unnecessary lock
249
- // grabs but does not seem thread-safe.
250
- if (tracer_) {
251
- InstrumentedMutexLock lock(&trace_mutex_);
252
- if (tracer_ && !tracer_->IsWriteOrderPreserved()) {
253
- // We don't have to preserve write order so can trace anywhere. It's more
254
- // efficient to trace here than to add latency to a phase of the log/apply
255
- // pipeline.
256
- // TODO: maybe handle the tracing status?
257
- tracer_->Write(my_batch).PermitUncheckedError();
258
- }
259
- }
260
- if (write_options.sync && write_options.disableWAL) {
261
- return Status::InvalidArgument("Sync writes has to enable WAL.");
262
- }
263
- if (two_write_queues_ && immutable_db_options_.enable_pipelined_write) {
264
- return Status::NotSupported(
265
- "pipelined_writes is not compatible with concurrent prepares");
266
- }
267
- if (seq_per_batch_ && immutable_db_options_.enable_pipelined_write) {
268
- // TODO(yiwu): update pipeline write with seq_per_batch and batch_cnt
269
- return Status::NotSupported(
270
- "pipelined_writes is not compatible with seq_per_batch");
271
- }
272
- if (immutable_db_options_.unordered_write &&
273
- immutable_db_options_.enable_pipelined_write) {
274
- return Status::NotSupported(
275
- "pipelined_writes is not compatible with unordered_write");
276
- }
277
- if (immutable_db_options_.enable_pipelined_write &&
278
- post_memtable_callback != nullptr) {
279
- return Status::NotSupported(
280
- "pipelined write currently does not honor post_memtable_callback");
281
- }
282
- if (seq_per_batch_ && post_memtable_callback != nullptr) {
283
- return Status::NotSupported(
284
- "seq_per_batch currently does not honor post_memtable_callback");
285
- }
286
- if (my_batch->HasDeleteRange() && immutable_db_options_.row_cache) {
287
- return Status::NotSupported(
288
- "DeleteRange is not compatible with row cache.");
289
- }
290
- // Otherwise IsLatestPersistentState optimization does not make sense
291
- assert(!WriteBatchInternal::IsLatestPersistentState(my_batch) ||
292
- disable_memtable);
293
-
294
- if (write_options.low_pri) {
295
- Status s = ThrottleLowPriWritesIfNeeded(write_options, my_batch);
296
- if (!s.ok()) {
297
- return s;
298
- }
299
- }
300
-
301
- if (two_write_queues_ && disable_memtable) {
302
- AssignOrder assign_order =
303
- seq_per_batch_ ? kDoAssignOrder : kDontAssignOrder;
304
- // Otherwise it is WAL-only Prepare batches in WriteCommitted policy and
305
- // they don't consume sequence.
306
- return WriteImplWALOnly(
307
- &nonmem_write_thread_, write_options, my_batch, callback, user_write_cb,
308
- log_used, log_ref, seq_used, batch_cnt, pre_release_callback,
309
- assign_order, kDontPublishLastSeq, disable_memtable);
310
- }
311
-
312
- if (immutable_db_options_.unordered_write) {
313
- const size_t sub_batch_cnt = batch_cnt != 0
314
- ? batch_cnt
315
- // every key is a sub-batch consuming a seq
316
- : WriteBatchInternal::Count(my_batch);
317
- uint64_t seq = 0;
318
- // Use a write thread to i) optimize for WAL write, ii) publish last
319
- // sequence in in increasing order, iii) call pre_release_callback serially
320
- Status status = WriteImplWALOnly(
321
- &write_thread_, write_options, my_batch, callback, user_write_cb,
322
- log_used, log_ref, &seq, sub_batch_cnt, pre_release_callback,
323
- kDoAssignOrder, kDoPublishLastSeq, disable_memtable);
324
- TEST_SYNC_POINT("DBImpl::WriteImpl:UnorderedWriteAfterWriteWAL");
325
- if (!status.ok()) {
326
- return status;
327
- }
328
- if (seq_used) {
329
- *seq_used = seq;
330
- }
331
- if (!disable_memtable) {
332
- TEST_SYNC_POINT("DBImpl::WriteImpl:BeforeUnorderedWriteMemtable");
333
- status = UnorderedWriteMemtable(write_options, my_batch, callback,
334
- log_ref, seq, sub_batch_cnt);
335
- }
336
- return status;
337
- }
338
-
339
- if (immutable_db_options_.enable_pipelined_write) {
340
- return PipelinedWriteImpl(write_options, my_batch, callback, user_write_cb,
341
- log_used, log_ref, disable_memtable, seq_used);
342
- }
343
-
344
- PERF_TIMER_GUARD(write_pre_and_post_process_time);
345
- WriteThread::Writer w(write_options, my_batch, callback, user_write_cb,
346
- log_ref, disable_memtable, batch_cnt,
347
- pre_release_callback, post_memtable_callback);
348
- StopWatch write_sw(immutable_db_options_.clock, stats_, DB_WRITE);
349
-
350
- write_thread_.JoinBatchGroup(&w);
351
- if (w.state == WriteThread::STATE_PARALLEL_MEMTABLE_CALLER) {
352
- write_thread_.SetMemWritersEachStride(&w);
353
- }
354
- if (w.state == WriteThread::STATE_PARALLEL_MEMTABLE_WRITER) {
355
- // we are a non-leader in a parallel group
356
-
357
- if (w.ShouldWriteToMemtable()) {
358
- PERF_TIMER_STOP(write_pre_and_post_process_time);
359
- PERF_TIMER_FOR_WAIT_GUARD(write_memtable_time);
360
-
361
- ColumnFamilyMemTablesImpl column_family_memtables(
362
- versions_->GetColumnFamilySet());
363
- w.status = WriteBatchInternal::InsertInto(
364
- &w, w.sequence, &column_family_memtables, &flush_scheduler_,
365
- &trim_history_scheduler_,
366
- write_options.ignore_missing_column_families, 0 /*log_number*/, this,
367
- true /*concurrent_memtable_writes*/, seq_per_batch_, w.batch_cnt,
368
- batch_per_txn_, write_options.memtable_insert_hint_per_batch);
369
-
370
- PERF_TIMER_START(write_pre_and_post_process_time);
371
- }
372
-
373
- if (write_thread_.CompleteParallelMemTableWriter(&w)) {
374
- // we're responsible for exit batch group
375
- // TODO(myabandeh): propagate status to write_group
376
- auto last_sequence = w.write_group->last_sequence;
377
- for (auto* tmp_w : *(w.write_group)) {
378
- assert(tmp_w);
379
- if (tmp_w->post_memtable_callback) {
380
- Status tmp_s =
381
- (*tmp_w->post_memtable_callback)(last_sequence, disable_memtable);
382
- // TODO: propagate the execution status of post_memtable_callback to
383
- // caller.
384
- assert(tmp_s.ok());
385
- }
386
- }
387
- versions_->SetLastSequence(last_sequence);
388
- MemTableInsertStatusCheck(w.status);
389
- write_thread_.ExitAsBatchGroupFollower(&w);
390
- }
391
- assert(w.state == WriteThread::STATE_COMPLETED);
392
- // STATE_COMPLETED conditional below handles exit
393
- }
394
- if (w.state == WriteThread::STATE_COMPLETED) {
395
- if (log_used != nullptr) {
396
- *log_used = w.log_used;
397
- }
398
- if (seq_used != nullptr) {
399
- *seq_used = w.sequence;
400
- }
401
- // write is complete and leader has updated sequence
402
- return w.FinalStatus();
403
- }
404
- // else we are the leader of the write batch group
405
- assert(w.state == WriteThread::STATE_GROUP_LEADER);
406
- Status status;
407
- // Once reaches this point, the current writer "w" will try to do its write
408
- // job. It may also pick up some of the remaining writers in the "writers_"
409
- // when it finds suitable, and finish them in the same write batch.
410
- // This is how a write job could be done by the other writer.
411
- WriteContext write_context;
412
- LogContext log_context(write_options.sync);
413
- WriteThread::WriteGroup write_group;
414
- bool in_parallel_group = false;
415
- uint64_t last_sequence = kMaxSequenceNumber;
416
-
417
- assert(!two_write_queues_ || !disable_memtable);
418
- {
419
- // With concurrent writes we do preprocess only in the write thread that
420
- // also does write to memtable to avoid sync issue on shared data structure
421
- // with the other thread
422
-
423
- // PreprocessWrite does its own perf timing.
424
- PERF_TIMER_STOP(write_pre_and_post_process_time);
425
-
426
- status = PreprocessWrite(write_options, &log_context, &write_context);
427
- if (!two_write_queues_) {
428
- // Assign it after ::PreprocessWrite since the sequence might advance
429
- // inside it by WriteRecoverableState
430
- last_sequence = versions_->LastSequence();
431
- }
432
-
433
- PERF_TIMER_START(write_pre_and_post_process_time);
434
- }
435
-
436
- // Add to log and apply to memtable. We can release the lock
437
- // during this phase since &w is currently responsible for logging
438
- // and protects against concurrent loggers and concurrent writes
439
- // into memtables
440
-
441
- TEST_SYNC_POINT("DBImpl::WriteImpl:BeforeLeaderEnters");
442
- last_batch_group_size_ =
443
- write_thread_.EnterAsBatchGroupLeader(&w, &write_group);
444
-
445
- IOStatus io_s;
446
- Status pre_release_cb_status;
447
- if (status.ok()) {
448
- // Rules for when we can update the memtable concurrently
449
- // 1. supported by memtable
450
- // 2. Puts are not okay if inplace_update_support
451
- // 3. Merges are not okay
452
- //
453
- // Rules 1..2 are enforced by checking the options
454
- // during startup (CheckConcurrentWritesSupported), so if
455
- // options.allow_concurrent_memtable_write is true then they can be
456
- // assumed to be true. Rule 3 is checked for each batch. We could
457
- // relax rules 2 if we could prevent write batches from referring
458
- // more than once to a particular key.
459
- bool parallel = immutable_db_options_.allow_concurrent_memtable_write &&
460
- write_group.size > 1;
461
- size_t total_count = 0;
462
- size_t valid_batches = 0;
463
- size_t total_byte_size = 0;
464
- size_t pre_release_callback_cnt = 0;
465
- for (auto* writer : write_group) {
466
- assert(writer);
467
- if (writer->CheckCallback(this)) {
468
- valid_batches += writer->batch_cnt;
469
- if (writer->ShouldWriteToMemtable()) {
470
- total_count += WriteBatchInternal::Count(writer->batch);
471
- total_byte_size = WriteBatchInternal::AppendedByteSize(
472
- total_byte_size, WriteBatchInternal::ByteSize(writer->batch));
473
- parallel = parallel && !writer->batch->HasMerge();
474
- }
475
- if (writer->pre_release_callback) {
476
- pre_release_callback_cnt++;
477
- }
478
- }
479
- }
480
- // TODO: this use of operator bool on `tracer_` can avoid unnecessary lock
481
- // grabs but does not seem thread-safe.
482
- if (tracer_) {
483
- InstrumentedMutexLock lock(&trace_mutex_);
484
- if (tracer_ && tracer_->IsWriteOrderPreserved()) {
485
- for (auto* writer : write_group) {
486
- if (writer->CallbackFailed()) {
487
- continue;
488
- }
489
- // TODO: maybe handle the tracing status?
490
- tracer_->Write(writer->batch).PermitUncheckedError();
491
- }
492
- }
493
- }
494
- // Note about seq_per_batch_: either disableWAL is set for the entire write
495
- // group or not. In either case we inc seq for each write batch with no
496
- // failed callback. This means that there could be a batch with
497
- // disalbe_memtable in between; although we do not write this batch to
498
- // memtable it still consumes a seq. Otherwise, if !seq_per_batch_, we inc
499
- // the seq per valid written key to mem.
500
- size_t seq_inc = seq_per_batch_ ? valid_batches : total_count;
501
-
502
- const bool concurrent_update = two_write_queues_;
503
- // Update stats while we are an exclusive group leader, so we know
504
- // that nobody else can be writing to these particular stats.
505
- // We're optimistic, updating the stats before we successfully
506
- // commit. That lets us release our leader status early.
507
- auto stats = default_cf_internal_stats_;
508
- stats->AddDBStats(InternalStats::kIntStatsNumKeysWritten, total_count,
509
- concurrent_update);
510
- RecordTick(stats_, NUMBER_KEYS_WRITTEN, total_count);
511
- stats->AddDBStats(InternalStats::kIntStatsBytesWritten, total_byte_size,
512
- concurrent_update);
513
- RecordTick(stats_, BYTES_WRITTEN, total_byte_size);
514
- stats->AddDBStats(InternalStats::kIntStatsWriteDoneBySelf, 1,
515
- concurrent_update);
516
- RecordTick(stats_, WRITE_DONE_BY_SELF);
517
- auto write_done_by_other = write_group.size - 1;
518
- if (write_done_by_other > 0) {
519
- stats->AddDBStats(InternalStats::kIntStatsWriteDoneByOther,
520
- write_done_by_other, concurrent_update);
521
- RecordTick(stats_, WRITE_DONE_BY_OTHER, write_done_by_other);
522
- }
523
- RecordInHistogram(stats_, BYTES_PER_WRITE, total_byte_size);
524
-
525
- if (write_options.disableWAL) {
526
- has_unpersisted_data_.store(true, std::memory_order_relaxed);
527
- }
528
-
529
- PERF_TIMER_STOP(write_pre_and_post_process_time);
530
-
531
- if (!two_write_queues_) {
532
- if (status.ok() && !write_options.disableWAL) {
533
- assert(log_context.log_file_number_size);
534
- LogFileNumberSize& log_file_number_size =
535
- *(log_context.log_file_number_size);
536
- PERF_TIMER_GUARD(write_wal_time);
537
- io_s =
538
- WriteToWAL(write_group, log_context.writer, log_used,
539
- log_context.need_log_sync, log_context.need_log_dir_sync,
540
- last_sequence + 1, log_file_number_size);
541
- }
542
- } else {
543
- if (status.ok() && !write_options.disableWAL) {
544
- PERF_TIMER_GUARD(write_wal_time);
545
- // LastAllocatedSequence is increased inside WriteToWAL under
546
- // wal_write_mutex_ to ensure ordered events in WAL
547
- io_s = ConcurrentWriteToWAL(write_group, log_used, &last_sequence,
548
- seq_inc);
549
- } else {
550
- // Otherwise we inc seq number for memtable writes
551
- last_sequence = versions_->FetchAddLastAllocatedSequence(seq_inc);
552
- }
553
- }
554
- status = io_s;
555
- assert(last_sequence != kMaxSequenceNumber);
556
- const SequenceNumber current_sequence = last_sequence + 1;
557
- last_sequence += seq_inc;
558
-
559
- if (log_context.need_log_sync) {
560
- VersionEdit synced_wals;
561
- log_write_mutex_.Lock();
562
- if (status.ok()) {
563
- MarkLogsSynced(logfile_number_, log_context.need_log_dir_sync,
564
- &synced_wals);
565
- } else {
566
- MarkLogsNotSynced(logfile_number_);
567
- }
568
- log_write_mutex_.Unlock();
569
- if (status.ok() && synced_wals.IsWalAddition()) {
570
- InstrumentedMutexLock l(&mutex_);
571
- // TODO: plumb Env::IOActivity, Env::IOPriority
572
- const ReadOptions read_options;
573
- status = ApplyWALToManifest(read_options, write_options, &synced_wals);
574
- }
575
-
576
- // Requesting sync with two_write_queues_ is expected to be very rare. We
577
- // hence provide a simple implementation that is not necessarily
578
- // efficient.
579
- if (status.ok() && two_write_queues_) {
580
- if (manual_wal_flush_) {
581
- status = FlushWAL(true);
582
- } else {
583
- status = SyncWAL();
584
- }
585
- }
586
- }
587
-
588
- // PreReleaseCallback is called after WAL write and before memtable write
589
- if (status.ok()) {
590
- SequenceNumber next_sequence = current_sequence;
591
- size_t index = 0;
592
- // Note: the logic for advancing seq here must be consistent with the
593
- // logic in WriteBatchInternal::InsertInto(write_group...) as well as
594
- // with WriteBatchInternal::InsertInto(write_batch...) that is called on
595
- // the merged batch during recovery from the WAL.
596
- for (auto* writer : write_group) {
597
- if (writer->CallbackFailed()) {
598
- continue;
599
- }
600
- writer->sequence = next_sequence;
601
- if (writer->pre_release_callback) {
602
- Status ws = writer->pre_release_callback->Callback(
603
- writer->sequence, disable_memtable, writer->log_used, index++,
604
- pre_release_callback_cnt);
605
- if (!ws.ok()) {
606
- status = pre_release_cb_status = ws;
607
- break;
608
- }
609
- }
610
- if (seq_per_batch_) {
611
- assert(writer->batch_cnt);
612
- next_sequence += writer->batch_cnt;
613
- } else if (writer->ShouldWriteToMemtable()) {
614
- next_sequence += WriteBatchInternal::Count(writer->batch);
615
- }
616
- }
617
- }
618
-
619
- if (status.ok()) {
620
- PERF_TIMER_FOR_WAIT_GUARD(write_memtable_time);
621
-
622
- if (!parallel) {
623
- // w.sequence will be set inside InsertInto
624
- w.status = WriteBatchInternal::InsertInto(
625
- write_group, current_sequence, column_family_memtables_.get(),
626
- &flush_scheduler_, &trim_history_scheduler_,
627
- write_options.ignore_missing_column_families,
628
- 0 /*recovery_log_number*/, this, parallel, seq_per_batch_,
629
- batch_per_txn_);
630
- } else {
631
- write_group.last_sequence = last_sequence;
632
- write_thread_.LaunchParallelMemTableWriters(&write_group);
633
- in_parallel_group = true;
634
-
635
- // Each parallel follower is doing each own writes. The leader should
636
- // also do its own.
637
- if (w.ShouldWriteToMemtable()) {
638
- ColumnFamilyMemTablesImpl column_family_memtables(
639
- versions_->GetColumnFamilySet());
640
- assert(w.sequence == current_sequence);
641
- w.status = WriteBatchInternal::InsertInto(
642
- &w, w.sequence, &column_family_memtables, &flush_scheduler_,
643
- &trim_history_scheduler_,
644
- write_options.ignore_missing_column_families, 0 /*log_number*/,
645
- this, true /*concurrent_memtable_writes*/, seq_per_batch_,
646
- w.batch_cnt, batch_per_txn_,
647
- write_options.memtable_insert_hint_per_batch);
648
- }
649
- }
650
- if (seq_used != nullptr) {
651
- *seq_used = w.sequence;
652
- }
653
- }
654
- }
655
- PERF_TIMER_START(write_pre_and_post_process_time);
656
-
657
- if (!io_s.ok()) {
658
- // Check WriteToWAL status
659
- IOStatusCheck(io_s);
660
- }
661
- if (!w.CallbackFailed()) {
662
- if (!io_s.ok()) {
663
- assert(pre_release_cb_status.ok());
664
- } else {
665
- WriteStatusCheck(pre_release_cb_status);
666
- }
667
- } else {
668
- assert(pre_release_cb_status.ok());
669
- }
670
-
671
- bool should_exit_batch_group = true;
672
- if (in_parallel_group) {
673
- // CompleteParallelWorker returns true if this thread should
674
- // handle exit, false means somebody else did
675
- should_exit_batch_group = write_thread_.CompleteParallelMemTableWriter(&w);
676
- }
677
- if (should_exit_batch_group) {
678
- if (status.ok()) {
679
- for (auto* tmp_w : write_group) {
680
- assert(tmp_w);
681
- if (tmp_w->post_memtable_callback) {
682
- Status tmp_s =
683
- (*tmp_w->post_memtable_callback)(last_sequence, disable_memtable);
684
- // TODO: propagate the execution status of post_memtable_callback to
685
- // caller.
686
- assert(tmp_s.ok());
687
- }
688
- }
689
- // Note: if we are to resume after non-OK statuses we need to revisit how
690
- // we reacts to non-OK statuses here.
691
- versions_->SetLastSequence(last_sequence);
692
- }
693
- MemTableInsertStatusCheck(w.status);
694
- write_thread_.ExitAsBatchGroupLeader(write_group, status);
695
- }
696
-
697
- if (status.ok()) {
698
- status = w.FinalStatus();
699
- }
700
- return status;
701
- }
702
-
703
- Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options,
704
- WriteBatch* my_batch, WriteCallback* callback,
705
- UserWriteCallback* user_write_cb,
706
- uint64_t* log_used, uint64_t log_ref,
707
- bool disable_memtable, uint64_t* seq_used) {
708
- PERF_TIMER_GUARD(write_pre_and_post_process_time);
709
- StopWatch write_sw(immutable_db_options_.clock, stats_, DB_WRITE);
710
-
711
- WriteContext write_context;
712
-
713
- WriteThread::Writer w(write_options, my_batch, callback, user_write_cb,
714
- log_ref, disable_memtable, /*_batch_cnt=*/0,
715
- /*_pre_release_callback=*/nullptr);
716
- write_thread_.JoinBatchGroup(&w);
717
- TEST_SYNC_POINT("DBImplWrite::PipelinedWriteImpl:AfterJoinBatchGroup");
718
- if (w.state == WriteThread::STATE_GROUP_LEADER) {
719
- WriteThread::WriteGroup wal_write_group;
720
- if (w.callback && !w.callback->AllowWriteBatching()) {
721
- write_thread_.WaitForMemTableWriters();
722
- }
723
- LogContext log_context(!write_options.disableWAL && write_options.sync);
724
- // PreprocessWrite does its own perf timing.
725
- PERF_TIMER_STOP(write_pre_and_post_process_time);
726
- w.status = PreprocessWrite(write_options, &log_context, &write_context);
727
- PERF_TIMER_START(write_pre_and_post_process_time);
728
-
729
- // This can set non-OK status if callback fail.
730
- last_batch_group_size_ =
731
- write_thread_.EnterAsBatchGroupLeader(&w, &wal_write_group);
732
- const SequenceNumber current_sequence =
733
- write_thread_.UpdateLastSequence(versions_->LastSequence()) + 1;
734
- size_t total_count = 0;
735
- size_t total_byte_size = 0;
736
-
737
- if (w.status.ok()) {
738
- // TODO: this use of operator bool on `tracer_` can avoid unnecessary lock
739
- // grabs but does not seem thread-safe.
740
- if (tracer_) {
741
- InstrumentedMutexLock lock(&trace_mutex_);
742
- if (tracer_ != nullptr && tracer_->IsWriteOrderPreserved()) {
743
- for (auto* writer : wal_write_group) {
744
- // TODO: maybe handle the tracing status?
745
- tracer_->Write(writer->batch).PermitUncheckedError();
746
- }
747
- }
748
- }
749
- SequenceNumber next_sequence = current_sequence;
750
- for (auto* writer : wal_write_group) {
751
- assert(writer);
752
- if (writer->CheckCallback(this)) {
753
- if (writer->ShouldWriteToMemtable()) {
754
- writer->sequence = next_sequence;
755
- size_t count = WriteBatchInternal::Count(writer->batch);
756
- total_byte_size = WriteBatchInternal::AppendedByteSize(
757
- total_byte_size, WriteBatchInternal::ByteSize(writer->batch));
758
- next_sequence += count;
759
- total_count += count;
760
- }
761
- }
762
- }
763
- if (w.disable_wal) {
764
- has_unpersisted_data_.store(true, std::memory_order_relaxed);
765
- }
766
- write_thread_.UpdateLastSequence(current_sequence + total_count - 1);
767
- }
768
-
769
- auto stats = default_cf_internal_stats_;
770
- stats->AddDBStats(InternalStats::kIntStatsNumKeysWritten, total_count);
771
- RecordTick(stats_, NUMBER_KEYS_WRITTEN, total_count);
772
- stats->AddDBStats(InternalStats::kIntStatsBytesWritten, total_byte_size);
773
- RecordTick(stats_, BYTES_WRITTEN, total_byte_size);
774
- RecordInHistogram(stats_, BYTES_PER_WRITE, total_byte_size);
775
-
776
- PERF_TIMER_STOP(write_pre_and_post_process_time);
777
-
778
- IOStatus io_s;
779
- io_s.PermitUncheckedError(); // Allow io_s to be uninitialized
780
-
781
- if (w.status.ok() && !write_options.disableWAL) {
782
- PERF_TIMER_GUARD(write_wal_time);
783
- stats->AddDBStats(InternalStats::kIntStatsWriteDoneBySelf, 1);
784
- RecordTick(stats_, WRITE_DONE_BY_SELF, 1);
785
- if (wal_write_group.size > 1) {
786
- stats->AddDBStats(InternalStats::kIntStatsWriteDoneByOther,
787
- wal_write_group.size - 1);
788
- RecordTick(stats_, WRITE_DONE_BY_OTHER, wal_write_group.size - 1);
789
- }
790
- assert(log_context.log_file_number_size);
791
- LogFileNumberSize& log_file_number_size =
792
- *(log_context.log_file_number_size);
793
- io_s =
794
- WriteToWAL(wal_write_group, log_context.writer, log_used,
795
- log_context.need_log_sync, log_context.need_log_dir_sync,
796
- current_sequence, log_file_number_size);
797
- w.status = io_s;
798
- }
799
-
800
- if (!io_s.ok()) {
801
- // Check WriteToWAL status
802
- IOStatusCheck(io_s);
803
- } else if (!w.CallbackFailed()) {
804
- WriteStatusCheck(w.status);
805
- }
806
-
807
- VersionEdit synced_wals;
808
- if (log_context.need_log_sync) {
809
- InstrumentedMutexLock l(&log_write_mutex_);
810
- if (w.status.ok()) {
811
- MarkLogsSynced(logfile_number_, log_context.need_log_dir_sync,
812
- &synced_wals);
813
- } else {
814
- MarkLogsNotSynced(logfile_number_);
815
- }
816
- }
817
- if (w.status.ok() && synced_wals.IsWalAddition()) {
818
- InstrumentedMutexLock l(&mutex_);
819
- // TODO: plumb Env::IOActivity, Env::IOPriority
820
- const ReadOptions read_options;
821
- w.status = ApplyWALToManifest(read_options, write_options, &synced_wals);
822
- }
823
- write_thread_.ExitAsBatchGroupLeader(wal_write_group, w.status);
824
- }
825
-
826
- // NOTE: the memtable_write_group is declared before the following
827
- // `if` statement because its lifetime needs to be longer
828
- // that the inner context of the `if` as a reference to it
829
- // may be used further below within the outer _write_thread
830
- WriteThread::WriteGroup memtable_write_group;
831
-
832
- if (w.state == WriteThread::STATE_MEMTABLE_WRITER_LEADER) {
833
- PERF_TIMER_FOR_WAIT_GUARD(write_memtable_time);
834
- assert(w.ShouldWriteToMemtable());
835
- write_thread_.EnterAsMemTableWriter(&w, &memtable_write_group);
836
- if (memtable_write_group.size > 1 &&
837
- immutable_db_options_.allow_concurrent_memtable_write) {
838
- write_thread_.LaunchParallelMemTableWriters(&memtable_write_group);
839
- } else {
840
- memtable_write_group.status = WriteBatchInternal::InsertInto(
841
- memtable_write_group, w.sequence, column_family_memtables_.get(),
842
- &flush_scheduler_, &trim_history_scheduler_,
843
- write_options.ignore_missing_column_families, 0 /*log_number*/, this,
844
- false /*concurrent_memtable_writes*/, seq_per_batch_, batch_per_txn_);
845
- versions_->SetLastSequence(memtable_write_group.last_sequence);
846
- write_thread_.ExitAsMemTableWriter(&w, memtable_write_group);
847
- }
848
- } else {
849
- // NOTE: the memtable_write_group is never really used,
850
- // so we need to set its status to pass ASSERT_STATUS_CHECKED
851
- memtable_write_group.status.PermitUncheckedError();
852
- }
853
- if (w.state == WriteThread::STATE_PARALLEL_MEMTABLE_CALLER) {
854
- write_thread_.SetMemWritersEachStride(&w);
855
- }
856
- if (w.state == WriteThread::STATE_PARALLEL_MEMTABLE_WRITER) {
857
- PERF_TIMER_STOP(write_pre_and_post_process_time);
858
- PERF_TIMER_FOR_WAIT_GUARD(write_memtable_time);
859
-
860
- assert(w.ShouldWriteToMemtable());
861
- ColumnFamilyMemTablesImpl column_family_memtables(
862
- versions_->GetColumnFamilySet());
863
- w.status = WriteBatchInternal::InsertInto(
864
- &w, w.sequence, &column_family_memtables, &flush_scheduler_,
865
- &trim_history_scheduler_, write_options.ignore_missing_column_families,
866
- 0 /*log_number*/, this, true /*concurrent_memtable_writes*/,
867
- false /*seq_per_batch*/, 0 /*batch_cnt*/, true /*batch_per_txn*/,
868
- write_options.memtable_insert_hint_per_batch);
869
-
870
- PERF_TIMER_STOP(write_memtable_time);
871
- PERF_TIMER_START(write_pre_and_post_process_time);
872
-
873
- if (write_thread_.CompleteParallelMemTableWriter(&w)) {
874
- MemTableInsertStatusCheck(w.status);
875
- versions_->SetLastSequence(w.write_group->last_sequence);
876
- write_thread_.ExitAsMemTableWriter(&w, *w.write_group);
877
- }
878
- }
879
- if (seq_used != nullptr) {
880
- *seq_used = w.sequence;
881
- }
882
-
883
- assert(w.state == WriteThread::STATE_COMPLETED);
884
- return w.FinalStatus();
885
- }
886
-
887
- Status DBImpl::UnorderedWriteMemtable(const WriteOptions& write_options,
888
- WriteBatch* my_batch,
889
- WriteCallback* callback, uint64_t log_ref,
890
- SequenceNumber seq,
891
- const size_t sub_batch_cnt) {
892
- PERF_TIMER_GUARD(write_pre_and_post_process_time);
893
- StopWatch write_sw(immutable_db_options_.clock, stats_, DB_WRITE);
894
-
895
- WriteThread::Writer w(write_options, my_batch, callback,
896
- /*user_write_cb=*/nullptr, log_ref,
897
- false /*disable_memtable*/);
898
-
899
- if (w.CheckCallback(this) && w.ShouldWriteToMemtable()) {
900
- w.sequence = seq;
901
- size_t total_count = WriteBatchInternal::Count(my_batch);
902
- InternalStats* stats = default_cf_internal_stats_;
903
- stats->AddDBStats(InternalStats::kIntStatsNumKeysWritten, total_count);
904
- RecordTick(stats_, NUMBER_KEYS_WRITTEN, total_count);
905
-
906
- PERF_TIMER_STOP(write_pre_and_post_process_time);
907
- PERF_TIMER_FOR_WAIT_GUARD(write_memtable_time);
908
-
909
- ColumnFamilyMemTablesImpl column_family_memtables(
910
- versions_->GetColumnFamilySet());
911
- w.status = WriteBatchInternal::InsertInto(
912
- &w, w.sequence, &column_family_memtables, &flush_scheduler_,
913
- &trim_history_scheduler_, write_options.ignore_missing_column_families,
914
- 0 /*log_number*/, this, true /*concurrent_memtable_writes*/,
915
- seq_per_batch_, sub_batch_cnt, true /*batch_per_txn*/,
916
- write_options.memtable_insert_hint_per_batch);
917
- if (write_options.disableWAL) {
918
- has_unpersisted_data_.store(true, std::memory_order_relaxed);
919
- }
920
-
921
- PERF_TIMER_START(write_pre_and_post_process_time);
922
- }
923
-
924
- size_t pending_cnt = pending_memtable_writes_.fetch_sub(1) - 1;
925
- if (pending_cnt == 0) {
926
- // switch_cv_ waits until pending_memtable_writes_ = 0. Locking its mutex
927
- // before notify ensures that cv is in waiting state when it is notified
928
- // thus not missing the update to pending_memtable_writes_ even though it is
929
- // not modified under the mutex.
930
- std::lock_guard<std::mutex> lck(switch_mutex_);
931
- switch_cv_.notify_all();
932
- }
933
- WriteStatusCheck(w.status);
934
-
935
- if (!w.FinalStatus().ok()) {
936
- return w.FinalStatus();
937
- }
938
- return Status::OK();
939
- }
940
-
941
- // The 2nd write queue. If enabled it will be used only for WAL-only writes.
942
- // This is the only queue that updates LastPublishedSequence which is only
943
- // applicable in a two-queue setting.
944
- Status DBImpl::WriteImplWALOnly(
945
- WriteThread* write_thread, const WriteOptions& write_options,
946
- WriteBatch* my_batch, WriteCallback* callback,
947
- UserWriteCallback* user_write_cb, uint64_t* log_used,
948
- const uint64_t log_ref, uint64_t* seq_used, const size_t sub_batch_cnt,
949
- PreReleaseCallback* pre_release_callback, const AssignOrder assign_order,
950
- const PublishLastSeq publish_last_seq, const bool disable_memtable) {
951
- PERF_TIMER_GUARD(write_pre_and_post_process_time);
952
- WriteThread::Writer w(write_options, my_batch, callback, user_write_cb,
953
- log_ref, disable_memtable, sub_batch_cnt,
954
- pre_release_callback);
955
- StopWatch write_sw(immutable_db_options_.clock, stats_, DB_WRITE);
956
-
957
- write_thread->JoinBatchGroup(&w);
958
- assert(w.state != WriteThread::STATE_PARALLEL_MEMTABLE_WRITER);
959
- if (w.state == WriteThread::STATE_COMPLETED) {
960
- if (log_used != nullptr) {
961
- *log_used = w.log_used;
962
- }
963
- if (seq_used != nullptr) {
964
- *seq_used = w.sequence;
965
- }
966
- return w.FinalStatus();
967
- }
968
- // else we are the leader of the write batch group
969
- assert(w.state == WriteThread::STATE_GROUP_LEADER);
970
-
971
- if (publish_last_seq == kDoPublishLastSeq) {
972
- // Currently we only use kDoPublishLastSeq in unordered_write
973
- assert(immutable_db_options_.unordered_write);
974
-
975
- // TODO(myabandeh): Make preliminary checks thread-safe so we could do them
976
- // without paying the cost of obtaining the mutex.
977
- LogContext log_context;
978
- WriteContext write_context;
979
- Status status =
980
- PreprocessWrite(write_options, &log_context, &write_context);
981
- WriteStatusCheckOnLocked(status);
982
-
983
- if (!status.ok()) {
984
- WriteThread::WriteGroup write_group;
985
- write_thread->EnterAsBatchGroupLeader(&w, &write_group);
986
- write_thread->ExitAsBatchGroupLeader(write_group, status);
987
- return status;
988
- }
989
- } else {
990
- PERF_TIMER_STOP(write_pre_and_post_process_time);
991
- PERF_TIMER_FOR_WAIT_GUARD(write_delay_time);
992
- InstrumentedMutexLock lock(&mutex_);
993
- Status status =
994
- DelayWrite(/*num_bytes=*/0ull, *write_thread, write_options);
995
- PERF_TIMER_STOP(write_delay_time);
996
- PERF_TIMER_START(write_pre_and_post_process_time);
997
- if (!status.ok()) {
998
- WriteThread::WriteGroup write_group;
999
- write_thread->EnterAsBatchGroupLeader(&w, &write_group);
1000
- write_thread->ExitAsBatchGroupLeader(write_group, status);
1001
- return status;
1002
- }
1003
- }
1004
-
1005
- WriteThread::WriteGroup write_group;
1006
- uint64_t last_sequence;
1007
- write_thread->EnterAsBatchGroupLeader(&w, &write_group);
1008
- // Note: no need to update last_batch_group_size_ here since the batch writes
1009
- // to WAL only
1010
- // TODO: this use of operator bool on `tracer_` can avoid unnecessary lock
1011
- // grabs but does not seem thread-safe.
1012
- if (tracer_) {
1013
- InstrumentedMutexLock lock(&trace_mutex_);
1014
- if (tracer_ != nullptr && tracer_->IsWriteOrderPreserved()) {
1015
- for (auto* writer : write_group) {
1016
- // TODO: maybe handle the tracing status?
1017
- tracer_->Write(writer->batch).PermitUncheckedError();
1018
- }
1019
- }
1020
- }
1021
-
1022
- size_t pre_release_callback_cnt = 0;
1023
- size_t total_byte_size = 0;
1024
- for (auto* writer : write_group) {
1025
- assert(writer);
1026
- if (writer->CheckCallback(this)) {
1027
- total_byte_size = WriteBatchInternal::AppendedByteSize(
1028
- total_byte_size, WriteBatchInternal::ByteSize(writer->batch));
1029
- if (writer->pre_release_callback) {
1030
- pre_release_callback_cnt++;
1031
- }
1032
- }
1033
- }
1034
-
1035
- const bool concurrent_update = true;
1036
- // Update stats while we are an exclusive group leader, so we know
1037
- // that nobody else can be writing to these particular stats.
1038
- // We're optimistic, updating the stats before we successfully
1039
- // commit. That lets us release our leader status early.
1040
- auto stats = default_cf_internal_stats_;
1041
- stats->AddDBStats(InternalStats::kIntStatsBytesWritten, total_byte_size,
1042
- concurrent_update);
1043
- RecordTick(stats_, BYTES_WRITTEN, total_byte_size);
1044
- stats->AddDBStats(InternalStats::kIntStatsWriteDoneBySelf, 1,
1045
- concurrent_update);
1046
- RecordTick(stats_, WRITE_DONE_BY_SELF);
1047
- auto write_done_by_other = write_group.size - 1;
1048
- if (write_done_by_other > 0) {
1049
- stats->AddDBStats(InternalStats::kIntStatsWriteDoneByOther,
1050
- write_done_by_other, concurrent_update);
1051
- RecordTick(stats_, WRITE_DONE_BY_OTHER, write_done_by_other);
1052
- }
1053
- RecordInHistogram(stats_, BYTES_PER_WRITE, total_byte_size);
1054
-
1055
- PERF_TIMER_STOP(write_pre_and_post_process_time);
1056
-
1057
- PERF_TIMER_GUARD(write_wal_time);
1058
- // LastAllocatedSequence is increased inside WriteToWAL under
1059
- // wal_write_mutex_ to ensure ordered events in WAL
1060
- size_t seq_inc = 0 /* total_count */;
1061
- if (assign_order == kDoAssignOrder) {
1062
- size_t total_batch_cnt = 0;
1063
- for (auto* writer : write_group) {
1064
- assert(writer->batch_cnt || !seq_per_batch_);
1065
- if (!writer->CallbackFailed()) {
1066
- total_batch_cnt += writer->batch_cnt;
1067
- }
1068
- }
1069
- seq_inc = total_batch_cnt;
1070
- }
1071
- Status status;
1072
- if (!write_options.disableWAL) {
1073
- IOStatus io_s =
1074
- ConcurrentWriteToWAL(write_group, log_used, &last_sequence, seq_inc);
1075
- status = io_s;
1076
- // last_sequence may not be set if there is an error
1077
- // This error checking and return is moved up to avoid using uninitialized
1078
- // last_sequence.
1079
- if (!io_s.ok()) {
1080
- IOStatusCheck(io_s);
1081
- write_thread->ExitAsBatchGroupLeader(write_group, status);
1082
- return status;
1083
- }
1084
- } else {
1085
- // Otherwise we inc seq number to do solely the seq allocation
1086
- last_sequence = versions_->FetchAddLastAllocatedSequence(seq_inc);
1087
- }
1088
-
1089
- size_t memtable_write_cnt = 0;
1090
- auto curr_seq = last_sequence + 1;
1091
- for (auto* writer : write_group) {
1092
- if (writer->CallbackFailed()) {
1093
- continue;
1094
- }
1095
- writer->sequence = curr_seq;
1096
- if (assign_order == kDoAssignOrder) {
1097
- assert(writer->batch_cnt || !seq_per_batch_);
1098
- curr_seq += writer->batch_cnt;
1099
- }
1100
- if (!writer->disable_memtable) {
1101
- memtable_write_cnt++;
1102
- }
1103
- // else seq advances only by memtable writes
1104
- }
1105
- if (status.ok() && write_options.sync) {
1106
- assert(!write_options.disableWAL);
1107
- // Requesting sync with two_write_queues_ is expected to be very rare. We
1108
- // hance provide a simple implementation that is not necessarily efficient.
1109
- if (manual_wal_flush_) {
1110
- status = FlushWAL(true);
1111
- } else {
1112
- status = SyncWAL();
1113
- }
1114
- }
1115
- PERF_TIMER_START(write_pre_and_post_process_time);
1116
-
1117
- if (!w.CallbackFailed()) {
1118
- WriteStatusCheck(status);
1119
- }
1120
- if (status.ok()) {
1121
- size_t index = 0;
1122
- for (auto* writer : write_group) {
1123
- if (!writer->CallbackFailed() && writer->pre_release_callback) {
1124
- assert(writer->sequence != kMaxSequenceNumber);
1125
- Status ws = writer->pre_release_callback->Callback(
1126
- writer->sequence, disable_memtable, writer->log_used, index++,
1127
- pre_release_callback_cnt);
1128
- if (!ws.ok()) {
1129
- status = ws;
1130
- break;
1131
- }
1132
- }
1133
- }
1134
- }
1135
- if (publish_last_seq == kDoPublishLastSeq) {
1136
- versions_->SetLastSequence(last_sequence + seq_inc);
1137
- // Currently we only use kDoPublishLastSeq in unordered_write
1138
- assert(immutable_db_options_.unordered_write);
1139
- }
1140
- if (immutable_db_options_.unordered_write && status.ok()) {
1141
- pending_memtable_writes_ += memtable_write_cnt;
1142
- }
1143
- write_thread->ExitAsBatchGroupLeader(write_group, status);
1144
- if (status.ok()) {
1145
- status = w.FinalStatus();
1146
- }
1147
- if (seq_used != nullptr) {
1148
- *seq_used = w.sequence;
1149
- }
1150
- return status;
1151
- }
1152
-
1153
- void DBImpl::WriteStatusCheckOnLocked(const Status& status) {
1154
- // Is setting bg_error_ enough here? This will at least stop
1155
- // compaction and fail any further writes.
1156
- InstrumentedMutexLock l(&mutex_);
1157
- assert(!status.IsIOFenced() || !error_handler_.GetBGError().ok());
1158
- if (immutable_db_options_.paranoid_checks && !status.ok() &&
1159
- !status.IsBusy() && !status.IsIncomplete()) {
1160
- // Maybe change the return status to void?
1161
- error_handler_.SetBGError(status, BackgroundErrorReason::kWriteCallback);
1162
- }
1163
- }
1164
-
1165
- void DBImpl::WriteStatusCheck(const Status& status) {
1166
- // Is setting bg_error_ enough here? This will at least stop
1167
- // compaction and fail any further writes.
1168
- assert(!status.IsIOFenced() || !error_handler_.GetBGError().ok());
1169
- if (immutable_db_options_.paranoid_checks && !status.ok() &&
1170
- !status.IsBusy() && !status.IsIncomplete()) {
1171
- mutex_.Lock();
1172
- // Maybe change the return status to void?
1173
- error_handler_.SetBGError(status, BackgroundErrorReason::kWriteCallback);
1174
- mutex_.Unlock();
1175
- }
1176
- }
1177
-
1178
- void DBImpl::IOStatusCheck(const IOStatus& io_status) {
1179
- // Is setting bg_error_ enough here? This will at least stop
1180
- // compaction and fail any further writes.
1181
- if ((immutable_db_options_.paranoid_checks && !io_status.ok() &&
1182
- !io_status.IsBusy() && !io_status.IsIncomplete()) ||
1183
- io_status.IsIOFenced()) {
1184
- mutex_.Lock();
1185
- // Maybe change the return status to void?
1186
- error_handler_.SetBGError(io_status, BackgroundErrorReason::kWriteCallback);
1187
- mutex_.Unlock();
1188
- } else {
1189
- // Force writable file to be continue writable.
1190
- logs_.back().writer->file()->reset_seen_error();
1191
- }
1192
- }
1193
-
1194
- void DBImpl::MemTableInsertStatusCheck(const Status& status) {
1195
- // A non-OK status here indicates that the state implied by the
1196
- // WAL has diverged from the in-memory state. This could be
1197
- // because of a corrupt write_batch (very bad), or because the
1198
- // client specified an invalid column family and didn't specify
1199
- // ignore_missing_column_families.
1200
- if (!status.ok()) {
1201
- mutex_.Lock();
1202
- assert(!error_handler_.IsBGWorkStopped());
1203
- // Maybe change the return status to void?
1204
- error_handler_.SetBGError(status, BackgroundErrorReason::kMemTable);
1205
- mutex_.Unlock();
1206
- }
1207
- }
1208
-
1209
- Status DBImpl::PreprocessWrite(const WriteOptions& write_options,
1210
- LogContext* log_context,
1211
- WriteContext* write_context) {
1212
- assert(write_context != nullptr && log_context != nullptr);
1213
- Status status;
1214
-
1215
- if (error_handler_.IsDBStopped()) {
1216
- InstrumentedMutexLock l(&mutex_);
1217
- status = error_handler_.GetBGError();
1218
- }
1219
-
1220
- PERF_TIMER_GUARD(write_scheduling_flushes_compactions_time);
1221
-
1222
- if (UNLIKELY(status.ok() && total_log_size_ > GetMaxTotalWalSize())) {
1223
- assert(versions_);
1224
- InstrumentedMutexLock l(&mutex_);
1225
- const ColumnFamilySet* const column_families =
1226
- versions_->GetColumnFamilySet();
1227
- assert(column_families);
1228
- size_t num_cfs = column_families->NumberOfColumnFamilies();
1229
- assert(num_cfs >= 1);
1230
- if (num_cfs > 1) {
1231
- WaitForPendingWrites();
1232
- status = SwitchWAL(write_context);
1233
- }
1234
- }
1235
-
1236
- if (UNLIKELY(status.ok() && write_buffer_manager_->ShouldFlush())) {
1237
- // Before a new memtable is added in SwitchMemtable(),
1238
- // write_buffer_manager_->ShouldFlush() will keep returning true. If another
1239
- // thread is writing to another DB with the same write buffer, they may also
1240
- // be flushed. We may end up with flushing much more DBs than needed. It's
1241
- // suboptimal but still correct.
1242
- InstrumentedMutexLock l(&mutex_);
1243
- WaitForPendingWrites();
1244
- status = HandleWriteBufferManagerFlush(write_context);
1245
- }
1246
-
1247
- if (UNLIKELY(status.ok() && !trim_history_scheduler_.Empty())) {
1248
- InstrumentedMutexLock l(&mutex_);
1249
- status = TrimMemtableHistory(write_context);
1250
- }
1251
-
1252
- if (UNLIKELY(status.ok() && !flush_scheduler_.Empty())) {
1253
- InstrumentedMutexLock l(&mutex_);
1254
- WaitForPendingWrites();
1255
- status = ScheduleFlushes(write_context);
1256
- }
1257
-
1258
- PERF_TIMER_STOP(write_scheduling_flushes_compactions_time);
1259
- PERF_TIMER_GUARD(write_pre_and_post_process_time);
1260
-
1261
- if (UNLIKELY(status.ok() && (write_controller_.IsStopped() ||
1262
- write_controller_.NeedsDelay()))) {
1263
- PERF_TIMER_STOP(write_pre_and_post_process_time);
1264
- PERF_TIMER_FOR_WAIT_GUARD(write_delay_time);
1265
- // We don't know size of curent batch so that we always use the size
1266
- // for previous one. It might create a fairness issue that expiration
1267
- // might happen for smaller writes but larger writes can go through.
1268
- // Can optimize it if it is an issue.
1269
- InstrumentedMutexLock l(&mutex_);
1270
- status = DelayWrite(last_batch_group_size_, write_thread_, write_options);
1271
- PERF_TIMER_START(write_pre_and_post_process_time);
1272
- }
1273
-
1274
- // If memory usage exceeded beyond a certain threshold,
1275
- // write_buffer_manager_->ShouldStall() returns true to all threads writing to
1276
- // all DBs and writers will be stalled.
1277
- // It does soft checking because WriteBufferManager::buffer_limit_ has already
1278
- // exceeded at this point so no new write (including current one) will go
1279
- // through until memory usage is decreased.
1280
- if (UNLIKELY(status.ok() && write_buffer_manager_->ShouldStall())) {
1281
- default_cf_internal_stats_->AddDBStats(
1282
- InternalStats::kIntStatsWriteBufferManagerLimitStopsCounts, 1,
1283
- true /* concurrent */);
1284
- if (write_options.no_slowdown) {
1285
- status = Status::Incomplete("Write stall");
1286
- } else {
1287
- InstrumentedMutexLock l(&mutex_);
1288
- WriteBufferManagerStallWrites();
1289
- }
1290
- }
1291
- InstrumentedMutexLock l(&log_write_mutex_);
1292
- if (status.ok() && log_context->need_log_sync) {
1293
- // Wait until the parallel syncs are finished. Any sync process has to sync
1294
- // the front log too so it is enough to check the status of front()
1295
- // We do a while loop since log_sync_cv_ is signalled when any sync is
1296
- // finished
1297
- // Note: there does not seem to be a reason to wait for parallel sync at
1298
- // this early step but it is not important since parallel sync (SyncWAL) and
1299
- // need_log_sync are usually not used together.
1300
- while (logs_.front().IsSyncing()) {
1301
- log_sync_cv_.Wait();
1302
- }
1303
- for (auto& log : logs_) {
1304
- // This is just to prevent the logs to be synced by a parallel SyncWAL
1305
- // call. We will do the actual syncing later after we will write to the
1306
- // WAL.
1307
- // Note: there does not seem to be a reason to set this early before we
1308
- // actually write to the WAL
1309
- log.PrepareForSync();
1310
- }
1311
- } else {
1312
- log_context->need_log_sync = false;
1313
- }
1314
- log_context->writer = logs_.back().writer;
1315
- log_context->need_log_dir_sync =
1316
- log_context->need_log_dir_sync && !log_dir_synced_;
1317
- log_context->log_file_number_size = std::addressof(alive_log_files_.back());
1318
-
1319
- return status;
1320
- }
1321
-
1322
- Status DBImpl::MergeBatch(const WriteThread::WriteGroup& write_group,
1323
- WriteBatch* tmp_batch, WriteBatch** merged_batch,
1324
- size_t* write_with_wal,
1325
- WriteBatch** to_be_cached_state) {
1326
- assert(write_with_wal != nullptr);
1327
- assert(tmp_batch != nullptr);
1328
- assert(*to_be_cached_state == nullptr);
1329
- *write_with_wal = 0;
1330
- auto* leader = write_group.leader;
1331
- assert(!leader->disable_wal); // Same holds for all in the batch group
1332
- if (write_group.size == 1 && !leader->CallbackFailed() &&
1333
- leader->batch->GetWalTerminationPoint().is_cleared()) {
1334
- // we simply write the first WriteBatch to WAL if the group only
1335
- // contains one batch, that batch should be written to the WAL,
1336
- // and the batch is not wanting to be truncated
1337
- *merged_batch = leader->batch;
1338
- if (WriteBatchInternal::IsLatestPersistentState(*merged_batch)) {
1339
- *to_be_cached_state = *merged_batch;
1340
- }
1341
- *write_with_wal = 1;
1342
- } else {
1343
- // WAL needs all of the batches flattened into a single batch.
1344
- // We could avoid copying here with an iov-like AddRecord
1345
- // interface
1346
- *merged_batch = tmp_batch;
1347
- for (auto writer : write_group) {
1348
- if (!writer->CallbackFailed()) {
1349
- Status s = WriteBatchInternal::Append(*merged_batch, writer->batch,
1350
- /*WAL_only*/ true);
1351
- if (!s.ok()) {
1352
- tmp_batch->Clear();
1353
- return s;
1354
- }
1355
- if (WriteBatchInternal::IsLatestPersistentState(writer->batch)) {
1356
- // We only need to cache the last of such write batch
1357
- *to_be_cached_state = writer->batch;
1358
- }
1359
- (*write_with_wal)++;
1360
- }
1361
- }
1362
- }
1363
- // return merged_batch;
1364
- return Status::OK();
1365
- }
1366
-
1367
- // When two_write_queues_ is disabled, this function is called from the only
1368
- // write thread. Otherwise this must be called holding log_write_mutex_.
1369
- IOStatus DBImpl::WriteToWAL(const WriteBatch& merged_batch,
1370
- const WriteOptions& write_options,
1371
- log::Writer* log_writer, uint64_t* log_used,
1372
- uint64_t* log_size,
1373
- LogFileNumberSize& log_file_number_size) {
1374
- assert(log_size != nullptr);
1375
-
1376
- Slice log_entry = WriteBatchInternal::Contents(&merged_batch);
1377
- TEST_SYNC_POINT_CALLBACK("DBImpl::WriteToWAL:log_entry", &log_entry);
1378
- auto s = merged_batch.VerifyChecksum();
1379
- if (!s.ok()) {
1380
- return status_to_io_status(std::move(s));
1381
- }
1382
- *log_size = log_entry.size();
1383
- // When two_write_queues_ WriteToWAL has to be protected from concurretn calls
1384
- // from the two queues anyway and log_write_mutex_ is already held. Otherwise
1385
- // if manual_wal_flush_ is enabled we need to protect log_writer->AddRecord
1386
- // from possible concurrent calls via the FlushWAL by the application.
1387
- const bool needs_locking = manual_wal_flush_ && !two_write_queues_;
1388
- // Due to performance cocerns of missed branch prediction penalize the new
1389
- // manual_wal_flush_ feature (by UNLIKELY) instead of the more common case
1390
- // when we do not need any locking.
1391
- if (UNLIKELY(needs_locking)) {
1392
- log_write_mutex_.Lock();
1393
- }
1394
- IOStatus io_s = log_writer->MaybeAddUserDefinedTimestampSizeRecord(
1395
- write_options, versions_->GetColumnFamiliesTimestampSizeForRecord());
1396
- if (!io_s.ok()) {
1397
- return io_s;
1398
- }
1399
- io_s = log_writer->AddRecord(write_options, log_entry);
1400
-
1401
- if (UNLIKELY(needs_locking)) {
1402
- log_write_mutex_.Unlock();
1403
- }
1404
- if (log_used != nullptr) {
1405
- *log_used = logfile_number_;
1406
- }
1407
- total_log_size_ += log_entry.size();
1408
- log_file_number_size.AddSize(*log_size);
1409
- log_empty_ = false;
1410
-
1411
- return io_s;
1412
- }
1413
-
1414
- IOStatus DBImpl::WriteToWAL(const WriteThread::WriteGroup& write_group,
1415
- log::Writer* log_writer, uint64_t* log_used,
1416
- bool need_log_sync, bool need_log_dir_sync,
1417
- SequenceNumber sequence,
1418
- LogFileNumberSize& log_file_number_size) {
1419
- IOStatus io_s;
1420
- assert(!two_write_queues_);
1421
- assert(!write_group.leader->disable_wal);
1422
- // Same holds for all in the batch group
1423
- size_t write_with_wal = 0;
1424
- WriteBatch* to_be_cached_state = nullptr;
1425
- WriteBatch* merged_batch;
1426
- io_s = status_to_io_status(MergeBatch(write_group, &tmp_batch_, &merged_batch,
1427
- &write_with_wal, &to_be_cached_state));
1428
- if (UNLIKELY(!io_s.ok())) {
1429
- return io_s;
1430
- }
1431
-
1432
- if (merged_batch == write_group.leader->batch) {
1433
- write_group.leader->log_used = logfile_number_;
1434
- } else if (write_with_wal > 1) {
1435
- for (auto writer : write_group) {
1436
- writer->log_used = logfile_number_;
1437
- }
1438
- }
1439
-
1440
- WriteBatchInternal::SetSequence(merged_batch, sequence);
1441
-
1442
- uint64_t log_size;
1443
-
1444
- // TODO: plumb Env::IOActivity, Env::IOPriority
1445
- WriteOptions write_options;
1446
- write_options.rate_limiter_priority =
1447
- write_group.leader->rate_limiter_priority;
1448
- io_s = WriteToWAL(*merged_batch, write_options, log_writer, log_used,
1449
- &log_size, log_file_number_size);
1450
- if (to_be_cached_state) {
1451
- cached_recoverable_state_ = *to_be_cached_state;
1452
- cached_recoverable_state_empty_ = false;
1453
- }
1454
-
1455
- if (io_s.ok() && need_log_sync) {
1456
- StopWatch sw(immutable_db_options_.clock, stats_, WAL_FILE_SYNC_MICROS);
1457
- // It's safe to access logs_ with unlocked mutex_ here because:
1458
- // - we've set getting_synced=true for all logs,
1459
- // so other threads won't pop from logs_ while we're here,
1460
- // - only writer thread can push to logs_, and we're in
1461
- // writer thread, so no one will push to logs_,
1462
- // - as long as other threads don't modify it, it's safe to read
1463
- // from std::deque from multiple threads concurrently.
1464
- //
1465
- // Sync operation should work with locked log_write_mutex_, because:
1466
- // when DBOptions.manual_wal_flush_ is set,
1467
- // FlushWAL function will be invoked by another thread.
1468
- // if without locked log_write_mutex_, the log file may get data
1469
- // corruption
1470
-
1471
- const bool needs_locking = manual_wal_flush_ && !two_write_queues_;
1472
- if (UNLIKELY(needs_locking)) {
1473
- log_write_mutex_.Lock();
1474
- }
1475
-
1476
- if (io_s.ok()) {
1477
- for (auto& log : logs_) {
1478
- IOOptions opts;
1479
- io_s = WritableFileWriter::PrepareIOOptions(write_options, opts);
1480
- if (!io_s.ok()) {
1481
- break;
1482
- }
1483
- // If last sync failed on a later WAL, this could be a fully synced
1484
- // and closed WAL that just needs to be recorded as synced in the
1485
- // manifest.
1486
- if (auto* f = log.writer->file()) {
1487
- io_s = f->Sync(opts, immutable_db_options_.use_fsync);
1488
- if (!io_s.ok()) {
1489
- break;
1490
- }
1491
- }
1492
- }
1493
- }
1494
-
1495
- if (UNLIKELY(needs_locking)) {
1496
- log_write_mutex_.Unlock();
1497
- }
1498
-
1499
- if (io_s.ok() && need_log_dir_sync) {
1500
- // We only sync WAL directory the first time WAL syncing is
1501
- // requested, so that in case users never turn on WAL sync,
1502
- // we can avoid the disk I/O in the write code path.
1503
- io_s = directories_.GetWalDir()->FsyncWithDirOptions(
1504
- IOOptions(), nullptr,
1505
- DirFsyncOptions(DirFsyncOptions::FsyncReason::kNewFileSynced));
1506
- }
1507
- }
1508
-
1509
- if (merged_batch == &tmp_batch_) {
1510
- tmp_batch_.Clear();
1511
- }
1512
- if (io_s.ok()) {
1513
- auto stats = default_cf_internal_stats_;
1514
- if (need_log_sync) {
1515
- stats->AddDBStats(InternalStats::kIntStatsWalFileSynced, 1);
1516
- RecordTick(stats_, WAL_FILE_SYNCED);
1517
- }
1518
- stats->AddDBStats(InternalStats::kIntStatsWalFileBytes, log_size);
1519
- RecordTick(stats_, WAL_FILE_BYTES, log_size);
1520
- stats->AddDBStats(InternalStats::kIntStatsWriteWithWal, write_with_wal);
1521
- RecordTick(stats_, WRITE_WITH_WAL, write_with_wal);
1522
- for (auto* writer : write_group) {
1523
- if (!writer->CallbackFailed()) {
1524
- writer->CheckPostWalWriteCallback();
1525
- }
1526
- }
1527
- }
1528
- return io_s;
1529
- }
1530
-
1531
- IOStatus DBImpl::ConcurrentWriteToWAL(
1532
- const WriteThread::WriteGroup& write_group, uint64_t* log_used,
1533
- SequenceNumber* last_sequence, size_t seq_inc) {
1534
- IOStatus io_s;
1535
-
1536
- assert(two_write_queues_ || immutable_db_options_.unordered_write);
1537
- assert(!write_group.leader->disable_wal);
1538
- // Same holds for all in the batch group
1539
- WriteBatch tmp_batch;
1540
- size_t write_with_wal = 0;
1541
- WriteBatch* to_be_cached_state = nullptr;
1542
- WriteBatch* merged_batch;
1543
- io_s = status_to_io_status(MergeBatch(write_group, &tmp_batch, &merged_batch,
1544
- &write_with_wal, &to_be_cached_state));
1545
- if (UNLIKELY(!io_s.ok())) {
1546
- return io_s;
1547
- }
1548
-
1549
- // We need to lock log_write_mutex_ since logs_ and alive_log_files might be
1550
- // pushed back concurrently
1551
- log_write_mutex_.Lock();
1552
- if (merged_batch == write_group.leader->batch) {
1553
- write_group.leader->log_used = logfile_number_;
1554
- } else if (write_with_wal > 1) {
1555
- for (auto writer : write_group) {
1556
- writer->log_used = logfile_number_;
1557
- }
1558
- }
1559
- *last_sequence = versions_->FetchAddLastAllocatedSequence(seq_inc);
1560
- auto sequence = *last_sequence + 1;
1561
- WriteBatchInternal::SetSequence(merged_batch, sequence);
1562
-
1563
- log::Writer* log_writer = logs_.back().writer;
1564
- LogFileNumberSize& log_file_number_size = alive_log_files_.back();
1565
-
1566
- assert(log_writer->get_log_number() == log_file_number_size.number);
1567
-
1568
- uint64_t log_size;
1569
-
1570
- // TODO: plumb Env::IOActivity, Env::IOPriority
1571
- WriteOptions write_options;
1572
- write_options.rate_limiter_priority =
1573
- write_group.leader->rate_limiter_priority;
1574
- io_s = WriteToWAL(*merged_batch, write_options, log_writer, log_used,
1575
- &log_size, log_file_number_size);
1576
- if (to_be_cached_state) {
1577
- cached_recoverable_state_ = *to_be_cached_state;
1578
- cached_recoverable_state_empty_ = false;
1579
- }
1580
- log_write_mutex_.Unlock();
1581
-
1582
- if (io_s.ok()) {
1583
- const bool concurrent = true;
1584
- auto stats = default_cf_internal_stats_;
1585
- stats->AddDBStats(InternalStats::kIntStatsWalFileBytes, log_size,
1586
- concurrent);
1587
- RecordTick(stats_, WAL_FILE_BYTES, log_size);
1588
- stats->AddDBStats(InternalStats::kIntStatsWriteWithWal, write_with_wal,
1589
- concurrent);
1590
- RecordTick(stats_, WRITE_WITH_WAL, write_with_wal);
1591
- for (auto* writer : write_group) {
1592
- if (!writer->CallbackFailed()) {
1593
- writer->CheckPostWalWriteCallback();
1594
- }
1595
- }
1596
- }
1597
- return io_s;
1598
- }
1599
-
1600
- Status DBImpl::WriteRecoverableState() {
1601
- mutex_.AssertHeld();
1602
- if (!cached_recoverable_state_empty_) {
1603
- bool dont_care_bool;
1604
- SequenceNumber next_seq;
1605
- if (two_write_queues_) {
1606
- log_write_mutex_.Lock();
1607
- }
1608
- SequenceNumber seq;
1609
- if (two_write_queues_) {
1610
- seq = versions_->FetchAddLastAllocatedSequence(0);
1611
- } else {
1612
- seq = versions_->LastSequence();
1613
- }
1614
- WriteBatchInternal::SetSequence(&cached_recoverable_state_, seq + 1);
1615
- auto status = WriteBatchInternal::InsertInto(
1616
- &cached_recoverable_state_, column_family_memtables_.get(),
1617
- &flush_scheduler_, &trim_history_scheduler_, true,
1618
- 0 /*recovery_log_number*/, this, false /* concurrent_memtable_writes */,
1619
- &next_seq, &dont_care_bool, seq_per_batch_);
1620
- auto last_seq = next_seq - 1;
1621
- if (two_write_queues_) {
1622
- versions_->FetchAddLastAllocatedSequence(last_seq - seq);
1623
- versions_->SetLastPublishedSequence(last_seq);
1624
- }
1625
- versions_->SetLastSequence(last_seq);
1626
- if (two_write_queues_) {
1627
- log_write_mutex_.Unlock();
1628
- }
1629
- if (status.ok() && recoverable_state_pre_release_callback_) {
1630
- const bool DISABLE_MEMTABLE = true;
1631
- for (uint64_t sub_batch_seq = seq + 1;
1632
- sub_batch_seq < next_seq && status.ok(); sub_batch_seq++) {
1633
- uint64_t const no_log_num = 0;
1634
- // Unlock it since the callback might end up locking mutex. e.g.,
1635
- // AddCommitted -> AdvanceMaxEvictedSeq -> GetSnapshotListFromDB
1636
- mutex_.Unlock();
1637
- status = recoverable_state_pre_release_callback_->Callback(
1638
- sub_batch_seq, !DISABLE_MEMTABLE, no_log_num, 0, 1);
1639
- mutex_.Lock();
1640
- }
1641
- }
1642
- if (status.ok()) {
1643
- cached_recoverable_state_.Clear();
1644
- cached_recoverable_state_empty_ = true;
1645
- } else {
1646
- // FIXME: !ok status is untested
1647
- }
1648
- return status;
1649
- }
1650
- return Status::OK();
1651
- }
1652
-
1653
- void DBImpl::SelectColumnFamiliesForAtomicFlush(
1654
- autovector<ColumnFamilyData*>* selected_cfds,
1655
- const autovector<ColumnFamilyData*>& provided_candidate_cfds,
1656
- FlushReason flush_reason) {
1657
- mutex_.AssertHeld();
1658
- assert(selected_cfds);
1659
-
1660
- autovector<ColumnFamilyData*> candidate_cfds;
1661
-
1662
- // Generate candidate cfds if not provided
1663
- if (provided_candidate_cfds.empty()) {
1664
- for (ColumnFamilyData* cfd : *versions_->GetColumnFamilySet()) {
1665
- if (!cfd->IsDropped() && cfd->initialized()) {
1666
- cfd->Ref();
1667
- candidate_cfds.push_back(cfd);
1668
- }
1669
- }
1670
- } else {
1671
- candidate_cfds = provided_candidate_cfds;
1672
- }
1673
-
1674
- for (ColumnFamilyData* cfd : candidate_cfds) {
1675
- if (cfd->IsDropped()) {
1676
- continue;
1677
- }
1678
- if (cfd->imm()->NumNotFlushed() != 0 || !cfd->mem()->IsEmpty() ||
1679
- !cached_recoverable_state_empty_.load() ||
1680
- IsRecoveryFlush(flush_reason)) {
1681
- selected_cfds->push_back(cfd);
1682
- }
1683
- }
1684
-
1685
- // Unref the newly generated candidate cfds (when not provided) in
1686
- // `candidate_cfds`
1687
- if (provided_candidate_cfds.empty()) {
1688
- for (auto candidate_cfd : candidate_cfds) {
1689
- candidate_cfd->UnrefAndTryDelete();
1690
- }
1691
- }
1692
- }
1693
-
1694
- // Assign sequence number for atomic flush.
1695
- void DBImpl::AssignAtomicFlushSeq(const autovector<ColumnFamilyData*>& cfds) {
1696
- assert(immutable_db_options_.atomic_flush);
1697
- auto seq = versions_->LastSequence();
1698
- for (auto cfd : cfds) {
1699
- cfd->imm()->AssignAtomicFlushSeq(seq);
1700
- }
1701
- }
1702
-
1703
- Status DBImpl::SwitchWAL(WriteContext* write_context) {
1704
- mutex_.AssertHeld();
1705
- assert(write_context != nullptr);
1706
- Status status;
1707
-
1708
- if (alive_log_files_.begin()->getting_flushed) {
1709
- return status;
1710
- }
1711
-
1712
- auto oldest_alive_log = alive_log_files_.begin()->number;
1713
- bool flush_wont_release_oldest_log = false;
1714
- if (allow_2pc()) {
1715
- auto oldest_log_with_uncommitted_prep =
1716
- logs_with_prep_tracker_.FindMinLogContainingOutstandingPrep();
1717
-
1718
- assert(oldest_log_with_uncommitted_prep == 0 ||
1719
- oldest_log_with_uncommitted_prep >= oldest_alive_log);
1720
- if (oldest_log_with_uncommitted_prep > 0 &&
1721
- oldest_log_with_uncommitted_prep == oldest_alive_log) {
1722
- if (unable_to_release_oldest_log_) {
1723
- // we already attempted to flush all column families dependent on
1724
- // the oldest alive log but the log still contained uncommitted
1725
- // transactions so there is still nothing that we can do.
1726
- return status;
1727
- } else {
1728
- ROCKS_LOG_WARN(
1729
- immutable_db_options_.info_log,
1730
- "Unable to release oldest log due to uncommitted transaction");
1731
- unable_to_release_oldest_log_ = true;
1732
- flush_wont_release_oldest_log = true;
1733
- }
1734
- }
1735
- }
1736
- if (!flush_wont_release_oldest_log) {
1737
- // we only mark this log as getting flushed if we have successfully
1738
- // flushed all data in this log. If this log contains outstanding prepared
1739
- // transactions then we cannot flush this log until those transactions are
1740
- // commited.
1741
- unable_to_release_oldest_log_ = false;
1742
- alive_log_files_.begin()->getting_flushed = true;
1743
- }
1744
-
1745
- ROCKS_LOG_INFO(
1746
- immutable_db_options_.info_log,
1747
- "Flushing all column families with data in WAL number %" PRIu64
1748
- ". Total log size is %" PRIu64 " while max_total_wal_size is %" PRIu64,
1749
- oldest_alive_log, total_log_size_.load(), GetMaxTotalWalSize());
1750
- // no need to refcount because drop is happening in write thread, so can't
1751
- // happen while we're in the write thread
1752
- autovector<ColumnFamilyData*> cfds;
1753
- if (immutable_db_options_.atomic_flush) {
1754
- SelectColumnFamiliesForAtomicFlush(&cfds);
1755
- } else {
1756
- for (auto cfd : *versions_->GetColumnFamilySet()) {
1757
- if (cfd->IsDropped()) {
1758
- continue;
1759
- }
1760
- if (cfd->OldestLogToKeep() <= oldest_alive_log) {
1761
- cfds.push_back(cfd);
1762
- }
1763
- }
1764
- MaybeFlushStatsCF(&cfds);
1765
- }
1766
- WriteThread::Writer nonmem_w;
1767
- if (two_write_queues_) {
1768
- nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
1769
- }
1770
-
1771
- for (const auto cfd : cfds) {
1772
- cfd->Ref();
1773
- status = SwitchMemtable(cfd, write_context);
1774
- cfd->UnrefAndTryDelete();
1775
- if (!status.ok()) {
1776
- break;
1777
- }
1778
- }
1779
- if (two_write_queues_) {
1780
- nonmem_write_thread_.ExitUnbatched(&nonmem_w);
1781
- }
1782
-
1783
- if (status.ok()) {
1784
- if (immutable_db_options_.atomic_flush) {
1785
- AssignAtomicFlushSeq(cfds);
1786
- }
1787
- for (auto cfd : cfds) {
1788
- cfd->imm()->FlushRequested();
1789
- if (!immutable_db_options_.atomic_flush) {
1790
- FlushRequest flush_req;
1791
- GenerateFlushRequest({cfd}, FlushReason::kWalFull, &flush_req);
1792
- SchedulePendingFlush(flush_req);
1793
- }
1794
- }
1795
- if (immutable_db_options_.atomic_flush) {
1796
- FlushRequest flush_req;
1797
- GenerateFlushRequest(cfds, FlushReason::kWalFull, &flush_req);
1798
- SchedulePendingFlush(flush_req);
1799
- }
1800
- MaybeScheduleFlushOrCompaction();
1801
- }
1802
- return status;
1803
- }
1804
-
1805
- Status DBImpl::HandleWriteBufferManagerFlush(WriteContext* write_context) {
1806
- mutex_.AssertHeld();
1807
- assert(write_context != nullptr);
1808
- Status status;
1809
-
1810
- // Before a new memtable is added in SwitchMemtable(),
1811
- // write_buffer_manager_->ShouldFlush() will keep returning true. If another
1812
- // thread is writing to another DB with the same write buffer, they may also
1813
- // be flushed. We may end up with flushing much more DBs than needed. It's
1814
- // suboptimal but still correct.
1815
- // no need to refcount because drop is happening in write thread, so can't
1816
- // happen while we're in the write thread
1817
- autovector<ColumnFamilyData*> cfds;
1818
- if (immutable_db_options_.atomic_flush) {
1819
- SelectColumnFamiliesForAtomicFlush(&cfds);
1820
- } else {
1821
- ColumnFamilyData* cfd_picked = nullptr;
1822
- SequenceNumber seq_num_for_cf_picked = kMaxSequenceNumber;
1823
-
1824
- for (auto cfd : *versions_->GetColumnFamilySet()) {
1825
- if (cfd->IsDropped()) {
1826
- continue;
1827
- }
1828
- if (!cfd->mem()->IsEmpty() && !cfd->imm()->IsFlushPendingOrRunning()) {
1829
- // We only consider flush on CFs with bytes in the mutable memtable,
1830
- // and no immutable memtables for which flush has yet to finish. If
1831
- // we triggered flush on CFs already trying to flush, we would risk
1832
- // creating too many immutable memtables leading to write stalls.
1833
- uint64_t seq = cfd->mem()->GetCreationSeq();
1834
- if (cfd_picked == nullptr || seq < seq_num_for_cf_picked) {
1835
- cfd_picked = cfd;
1836
- seq_num_for_cf_picked = seq;
1837
- }
1838
- }
1839
- }
1840
- if (cfd_picked != nullptr) {
1841
- cfds.push_back(cfd_picked);
1842
- }
1843
- MaybeFlushStatsCF(&cfds);
1844
- }
1845
- if (!cfds.empty()) {
1846
- ROCKS_LOG_INFO(
1847
- immutable_db_options_.info_log,
1848
- "Flushing triggered to alleviate write buffer memory usage. Write "
1849
- "buffer is using %" ROCKSDB_PRIszt
1850
- " bytes out of a total of %" ROCKSDB_PRIszt ".",
1851
- write_buffer_manager_->memory_usage(),
1852
- write_buffer_manager_->buffer_size());
1853
- }
1854
-
1855
- WriteThread::Writer nonmem_w;
1856
- if (two_write_queues_) {
1857
- nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
1858
- }
1859
- for (const auto cfd : cfds) {
1860
- if (cfd->mem()->IsEmpty()) {
1861
- continue;
1862
- }
1863
- cfd->Ref();
1864
- status = SwitchMemtable(cfd, write_context);
1865
- cfd->UnrefAndTryDelete();
1866
- if (!status.ok()) {
1867
- break;
1868
- }
1869
- }
1870
- if (two_write_queues_) {
1871
- nonmem_write_thread_.ExitUnbatched(&nonmem_w);
1872
- }
1873
-
1874
- if (status.ok()) {
1875
- if (immutable_db_options_.atomic_flush) {
1876
- AssignAtomicFlushSeq(cfds);
1877
- }
1878
- for (const auto cfd : cfds) {
1879
- cfd->imm()->FlushRequested();
1880
- if (!immutable_db_options_.atomic_flush) {
1881
- FlushRequest flush_req;
1882
- GenerateFlushRequest({cfd}, FlushReason::kWriteBufferManager,
1883
- &flush_req);
1884
- SchedulePendingFlush(flush_req);
1885
- }
1886
- }
1887
- if (immutable_db_options_.atomic_flush) {
1888
- FlushRequest flush_req;
1889
- GenerateFlushRequest(cfds, FlushReason::kWriteBufferManager, &flush_req);
1890
- SchedulePendingFlush(flush_req);
1891
- }
1892
- MaybeScheduleFlushOrCompaction();
1893
- }
1894
- return status;
1895
- }
1896
-
1897
- uint64_t DBImpl::GetMaxTotalWalSize() const {
1898
- uint64_t max_total_wal_size =
1899
- max_total_wal_size_.load(std::memory_order_acquire);
1900
- if (max_total_wal_size > 0) {
1901
- return max_total_wal_size;
1902
- }
1903
- return 4 * max_total_in_memory_state_.load(std::memory_order_acquire);
1904
- }
1905
-
1906
- // REQUIRES: mutex_ is held
1907
- // REQUIRES: this thread is currently at the leader for write_thread
1908
- Status DBImpl::DelayWrite(uint64_t num_bytes, WriteThread& write_thread,
1909
- const WriteOptions& write_options) {
1910
- mutex_.AssertHeld();
1911
- uint64_t start_time = 0;
1912
- bool delayed = false;
1913
- {
1914
- // To avoid parallel timed delays (bad throttling), only support them
1915
- // on the primary write queue.
1916
- uint64_t delay;
1917
- if (&write_thread == &write_thread_) {
1918
- delay =
1919
- write_controller_.GetDelay(immutable_db_options_.clock, num_bytes);
1920
- } else {
1921
- assert(num_bytes == 0);
1922
- delay = 0;
1923
- }
1924
- TEST_SYNC_POINT("DBImpl::DelayWrite:Start");
1925
- start_time = immutable_db_options_.clock->NowMicros();
1926
-
1927
- if (delay > 0) {
1928
- if (write_options.no_slowdown) {
1929
- return Status::Incomplete("Write stall");
1930
- }
1931
- TEST_SYNC_POINT("DBImpl::DelayWrite:Sleep");
1932
-
1933
- // Notify write_thread about the stall so it can setup a barrier and
1934
- // fail any pending writers with no_slowdown
1935
- write_thread.BeginWriteStall();
1936
- mutex_.Unlock();
1937
- TEST_SYNC_POINT("DBImpl::DelayWrite:BeginWriteStallDone");
1938
- // We will delay the write until we have slept for `delay` microseconds
1939
- // or we don't need a delay anymore. We check for cancellation every 1ms
1940
- // (slightly longer because WriteController minimum delay is 1ms, in
1941
- // case of sleep imprecision, rounding, etc.)
1942
- const uint64_t kDelayInterval = 1001;
1943
- uint64_t stall_end = start_time + delay;
1944
- while (write_controller_.NeedsDelay()) {
1945
- if (immutable_db_options_.clock->NowMicros() >= stall_end) {
1946
- // We already delayed this write `delay` microseconds
1947
- break;
1948
- }
1949
-
1950
- delayed = true;
1951
- // Sleep for 0.001 seconds
1952
- immutable_db_options_.clock->SleepForMicroseconds(kDelayInterval);
1953
- }
1954
- mutex_.Lock();
1955
- write_thread.EndWriteStall();
1956
- }
1957
-
1958
- // Don't wait if there's a background error that is not pending recovery
1959
- // since recovery might never be attempted.
1960
- while ((error_handler_.GetBGError().ok() ||
1961
- error_handler_.IsRecoveryInProgress()) &&
1962
- write_controller_.IsStopped() &&
1963
- !shutting_down_.load(std::memory_order_relaxed)) {
1964
- if (write_options.no_slowdown) {
1965
- return Status::Incomplete("Write stall");
1966
- }
1967
- delayed = true;
1968
-
1969
- // Notify write_thread about the stall so it can setup a barrier and
1970
- // fail any pending writers with no_slowdown
1971
- write_thread.BeginWriteStall();
1972
- if (&write_thread == &write_thread_) {
1973
- TEST_SYNC_POINT("DBImpl::DelayWrite:Wait");
1974
- } else {
1975
- TEST_SYNC_POINT("DBImpl::DelayWrite:NonmemWait");
1976
- }
1977
- bg_cv_.Wait();
1978
- TEST_SYNC_POINT_CALLBACK("DBImpl::DelayWrite:AfterWait", &mutex_);
1979
- write_thread.EndWriteStall();
1980
- }
1981
- }
1982
- assert(!delayed || !write_options.no_slowdown);
1983
- if (delayed) {
1984
- auto time_delayed = immutable_db_options_.clock->NowMicros() - start_time;
1985
- default_cf_internal_stats_->AddDBStats(
1986
- InternalStats::kIntStatsWriteStallMicros, time_delayed);
1987
- RecordTick(stats_, STALL_MICROS, time_delayed);
1988
- RecordInHistogram(stats_, WRITE_STALL, time_delayed);
1989
- }
1990
-
1991
- // If DB is not in read-only mode and write_controller is not stopping
1992
- // writes, we can ignore any background errors and allow the write to
1993
- // proceed
1994
- Status s;
1995
- if (write_controller_.IsStopped()) {
1996
- if (!shutting_down_.load(std::memory_order_relaxed)) {
1997
- // If writes are still stopped and db not shutdown, it means we bailed
1998
- // due to a background error
1999
- s = Status::Incomplete(error_handler_.GetBGError().ToString());
2000
- } else {
2001
- s = Status::ShutdownInProgress("stalled writes");
2002
- }
2003
- }
2004
- if (error_handler_.IsDBStopped()) {
2005
- s = error_handler_.GetBGError();
2006
- }
2007
- return s;
2008
- }
2009
-
2010
- // REQUIRES: mutex_ is held
2011
- // REQUIRES: this thread is currently at the front of the writer queue
2012
- void DBImpl::WriteBufferManagerStallWrites() {
2013
- mutex_.AssertHeld();
2014
- // First block future writer threads who want to add themselves to the queue
2015
- // of WriteThread.
2016
- write_thread_.BeginWriteStall();
2017
- mutex_.Unlock();
2018
-
2019
- // Change the state to State::Blocked.
2020
- static_cast<WBMStallInterface*>(wbm_stall_.get())
2021
- ->SetState(WBMStallInterface::State::BLOCKED);
2022
- // Then WriteBufferManager will add DB instance to its queue
2023
- // and block this thread by calling WBMStallInterface::Block().
2024
- write_buffer_manager_->BeginWriteStall(wbm_stall_.get());
2025
- wbm_stall_->Block();
2026
-
2027
- mutex_.Lock();
2028
- // Stall has ended. Signal writer threads so that they can add
2029
- // themselves to the WriteThread queue for writes.
2030
- write_thread_.EndWriteStall();
2031
- }
2032
-
2033
- Status DBImpl::ThrottleLowPriWritesIfNeeded(const WriteOptions& write_options,
2034
- WriteBatch* my_batch) {
2035
- assert(write_options.low_pri);
2036
- // This is called outside the DB mutex. Although it is safe to make the call,
2037
- // the consistency condition is not guaranteed to hold. It's OK to live with
2038
- // it in this case.
2039
- // If we need to speed compaction, it means the compaction is left behind
2040
- // and we start to limit low pri writes to a limit.
2041
- if (write_controller_.NeedSpeedupCompaction()) {
2042
- if (allow_2pc() && (my_batch->HasCommit() || my_batch->HasRollback())) {
2043
- // For 2PC, we only rate limit prepare, not commit.
2044
- return Status::OK();
2045
- }
2046
- if (write_options.no_slowdown) {
2047
- return Status::Incomplete("Low priority write stall");
2048
- } else {
2049
- assert(my_batch != nullptr);
2050
- // Rate limit those writes. The reason that we don't completely wait
2051
- // is that in case the write is heavy, low pri writes may never have
2052
- // a chance to run. Now we guarantee we are still slowly making
2053
- // progress.
2054
- PERF_TIMER_FOR_WAIT_GUARD(write_delay_time);
2055
- auto data_size = my_batch->GetDataSize();
2056
- while (data_size > 0) {
2057
- size_t allowed = write_controller_.low_pri_rate_limiter()->RequestToken(
2058
- data_size, 0 /* alignment */, Env::IO_HIGH, nullptr /* stats */,
2059
- RateLimiter::OpType::kWrite);
2060
- data_size -= allowed;
2061
- }
2062
- }
2063
- }
2064
- return Status::OK();
2065
- }
2066
-
2067
- void DBImpl::MaybeFlushStatsCF(autovector<ColumnFamilyData*>* cfds) {
2068
- assert(cfds != nullptr);
2069
- if (!cfds->empty() && immutable_db_options_.persist_stats_to_disk) {
2070
- ColumnFamilyData* cfd_stats =
2071
- versions_->GetColumnFamilySet()->GetColumnFamily(
2072
- kPersistentStatsColumnFamilyName);
2073
- if (cfd_stats != nullptr && !cfd_stats->mem()->IsEmpty()) {
2074
- for (ColumnFamilyData* cfd : *cfds) {
2075
- if (cfd == cfd_stats) {
2076
- // stats CF already included in cfds
2077
- return;
2078
- }
2079
- }
2080
- // force flush stats CF when its log number is less than all other CF's
2081
- // log numbers
2082
- bool force_flush_stats_cf = true;
2083
- for (auto* loop_cfd : *versions_->GetColumnFamilySet()) {
2084
- if (loop_cfd == cfd_stats) {
2085
- continue;
2086
- }
2087
- if (loop_cfd->GetLogNumber() <= cfd_stats->GetLogNumber()) {
2088
- force_flush_stats_cf = false;
2089
- }
2090
- }
2091
- if (force_flush_stats_cf) {
2092
- cfds->push_back(cfd_stats);
2093
- ROCKS_LOG_INFO(immutable_db_options_.info_log,
2094
- "Force flushing stats CF with automated flush "
2095
- "to avoid holding old logs");
2096
- }
2097
- }
2098
- }
2099
- }
2100
-
2101
- Status DBImpl::TrimMemtableHistory(WriteContext* context) {
2102
- autovector<ColumnFamilyData*> cfds;
2103
- ColumnFamilyData* tmp_cfd;
2104
- while ((tmp_cfd = trim_history_scheduler_.TakeNextColumnFamily()) !=
2105
- nullptr) {
2106
- cfds.push_back(tmp_cfd);
2107
- }
2108
- for (auto& cfd : cfds) {
2109
- autovector<MemTable*> to_delete;
2110
- bool trimmed = cfd->imm()->TrimHistory(&context->memtables_to_free_,
2111
- cfd->mem()->MemoryAllocatedBytes());
2112
- if (trimmed) {
2113
- context->superversion_context.NewSuperVersion();
2114
- assert(context->superversion_context.new_superversion.get() != nullptr);
2115
- cfd->InstallSuperVersion(&context->superversion_context, &mutex_);
2116
- }
2117
-
2118
- if (cfd->UnrefAndTryDelete()) {
2119
- cfd = nullptr;
2120
- }
2121
- }
2122
- return Status::OK();
2123
- }
2124
-
2125
- Status DBImpl::ScheduleFlushes(WriteContext* context) {
2126
- autovector<ColumnFamilyData*> cfds;
2127
- if (immutable_db_options_.atomic_flush) {
2128
- SelectColumnFamiliesForAtomicFlush(&cfds);
2129
- for (auto cfd : cfds) {
2130
- cfd->Ref();
2131
- }
2132
- flush_scheduler_.Clear();
2133
- } else {
2134
- ColumnFamilyData* tmp_cfd;
2135
- while ((tmp_cfd = flush_scheduler_.TakeNextColumnFamily()) != nullptr) {
2136
- cfds.push_back(tmp_cfd);
2137
- }
2138
- MaybeFlushStatsCF(&cfds);
2139
- }
2140
- Status status;
2141
- WriteThread::Writer nonmem_w;
2142
- if (two_write_queues_) {
2143
- nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
2144
- }
2145
-
2146
- TEST_SYNC_POINT_CALLBACK("DBImpl::ScheduleFlushes:PreSwitchMemtable",
2147
- nullptr);
2148
- for (auto& cfd : cfds) {
2149
- if (status.ok() && !cfd->mem()->IsEmpty()) {
2150
- status = SwitchMemtable(cfd, context);
2151
- }
2152
- if (cfd->UnrefAndTryDelete()) {
2153
- cfd = nullptr;
2154
- }
2155
- }
2156
-
2157
- if (two_write_queues_) {
2158
- nonmem_write_thread_.ExitUnbatched(&nonmem_w);
2159
- }
2160
-
2161
- if (status.ok()) {
2162
- if (immutable_db_options_.atomic_flush) {
2163
- AssignAtomicFlushSeq(cfds);
2164
- FlushRequest flush_req;
2165
- GenerateFlushRequest(cfds, FlushReason::kWriteBufferFull, &flush_req);
2166
- SchedulePendingFlush(flush_req);
2167
- } else {
2168
- for (auto* cfd : cfds) {
2169
- FlushRequest flush_req;
2170
- GenerateFlushRequest({cfd}, FlushReason::kWriteBufferFull, &flush_req);
2171
- SchedulePendingFlush(flush_req);
2172
- }
2173
- }
2174
- MaybeScheduleFlushOrCompaction();
2175
- }
2176
- return status;
2177
- }
2178
-
2179
- void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* /*cfd*/,
2180
- const MemTableInfo& mem_table_info) {
2181
- if (immutable_db_options_.listeners.size() == 0U) {
2182
- return;
2183
- }
2184
- if (shutting_down_.load(std::memory_order_acquire)) {
2185
- return;
2186
- }
2187
-
2188
- mutex_.Unlock();
2189
- for (const auto& listener : immutable_db_options_.listeners) {
2190
- listener->OnMemTableSealed(mem_table_info);
2191
- }
2192
- mutex_.Lock();
2193
- }
2194
-
2195
- // REQUIRES: mutex_ is held
2196
- // REQUIRES: this thread is currently at the front of the writer queue
2197
- // REQUIRES: this thread is currently at the front of the 2nd writer queue if
2198
- // two_write_queues_ is true (This is to simplify the reasoning.)
2199
- Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
2200
- mutex_.AssertHeld();
2201
- assert(lock_wal_count_ == 0);
2202
-
2203
- // TODO: plumb Env::IOActivity, Env::IOPriority
2204
- const ReadOptions read_options;
2205
- const WriteOptions write_options;
2206
-
2207
- log::Writer* new_log = nullptr;
2208
- MemTable* new_mem = nullptr;
2209
- IOStatus io_s;
2210
-
2211
- // Recoverable state is persisted in WAL. After memtable switch, WAL might
2212
- // be deleted, so we write the state to memtable to be persisted as well.
2213
- Status s = WriteRecoverableState();
2214
- if (!s.ok()) {
2215
- return s;
2216
- }
2217
-
2218
- // Attempt to switch to a new memtable and trigger flush of old.
2219
- // Do this without holding the dbmutex lock.
2220
- assert(versions_->prev_log_number() == 0);
2221
- if (two_write_queues_) {
2222
- log_write_mutex_.Lock();
2223
- }
2224
- bool creating_new_log = !log_empty_;
2225
- if (two_write_queues_) {
2226
- log_write_mutex_.Unlock();
2227
- }
2228
- uint64_t recycle_log_number = 0;
2229
- // If file deletion is disabled, don't recycle logs since it'll result in
2230
- // the file getting renamed
2231
- if (creating_new_log && immutable_db_options_.recycle_log_file_num &&
2232
- !log_recycle_files_.empty() && IsFileDeletionsEnabled()) {
2233
- recycle_log_number = log_recycle_files_.front();
2234
- }
2235
- uint64_t new_log_number =
2236
- creating_new_log ? versions_->NewFileNumber() : logfile_number_;
2237
- const MutableCFOptions mutable_cf_options = *cfd->GetLatestMutableCFOptions();
2238
-
2239
- // Set memtable_info for memtable sealed callback
2240
- MemTableInfo memtable_info;
2241
- memtable_info.cf_name = cfd->GetName();
2242
- memtable_info.first_seqno = cfd->mem()->GetFirstSequenceNumber();
2243
- memtable_info.earliest_seqno = cfd->mem()->GetEarliestSequenceNumber();
2244
- memtable_info.num_entries = cfd->mem()->num_entries();
2245
- memtable_info.num_deletes = cfd->mem()->num_deletes();
2246
- if (!cfd->ioptions()->persist_user_defined_timestamps &&
2247
- cfd->user_comparator()->timestamp_size() > 0) {
2248
- const Slice& newest_udt = cfd->mem()->GetNewestUDT();
2249
- memtable_info.newest_udt.assign(newest_udt.data(), newest_udt.size());
2250
- }
2251
- // Log this later after lock release. It may be outdated, e.g., if background
2252
- // flush happens before logging, but that should be ok.
2253
- int num_imm_unflushed = cfd->imm()->NumNotFlushed();
2254
- const auto preallocate_block_size =
2255
- GetWalPreallocateBlockSize(mutable_cf_options.write_buffer_size);
2256
- mutex_.Unlock();
2257
- if (creating_new_log) {
2258
- // TODO: Write buffer size passed in should be max of all CF's instead
2259
- // of mutable_cf_options.write_buffer_size.
2260
- io_s = CreateWAL(write_options, new_log_number, recycle_log_number,
2261
- preallocate_block_size, &new_log);
2262
- if (s.ok()) {
2263
- s = io_s;
2264
- }
2265
- }
2266
- if (s.ok()) {
2267
- SequenceNumber seq = versions_->LastSequence();
2268
- new_mem = cfd->ConstructNewMemtable(mutable_cf_options, seq);
2269
- context->superversion_context.NewSuperVersion();
2270
-
2271
- ROCKS_LOG_INFO(immutable_db_options_.info_log,
2272
- "[%s] New memtable created with log file: #%" PRIu64
2273
- ". Immutable memtables: %d.\n",
2274
- cfd->GetName().c_str(), new_log_number, num_imm_unflushed);
2275
- // There should be no concurrent write as the thread is at the front of
2276
- // writer queue
2277
- cfd->mem()->ConstructFragmentedRangeTombstones();
2278
- }
2279
-
2280
- mutex_.Lock();
2281
- if (recycle_log_number != 0) {
2282
- // Since renaming the file is done outside DB mutex, we need to ensure
2283
- // concurrent full purges don't delete the file while we're recycling it.
2284
- // To achieve that we hold the old log number in the recyclable list until
2285
- // after it has been renamed.
2286
- assert(log_recycle_files_.front() == recycle_log_number);
2287
- log_recycle_files_.pop_front();
2288
- }
2289
- if (s.ok() && creating_new_log) {
2290
- InstrumentedMutexLock l(&log_write_mutex_);
2291
- assert(new_log != nullptr);
2292
- if (!logs_.empty()) {
2293
- // Alway flush the buffer of the last log before switching to a new one
2294
- log::Writer* cur_log_writer = logs_.back().writer;
2295
- if (error_handler_.IsRecoveryInProgress()) {
2296
- // In recovery path, we force another try of writing WAL buffer.
2297
- cur_log_writer->file()->reset_seen_error();
2298
- }
2299
- io_s = cur_log_writer->WriteBuffer(write_options);
2300
- if (s.ok()) {
2301
- s = io_s;
2302
- }
2303
- if (!s.ok()) {
2304
- ROCKS_LOG_WARN(immutable_db_options_.info_log,
2305
- "[%s] Failed to switch from #%" PRIu64 " to #%" PRIu64
2306
- " WAL file\n",
2307
- cfd->GetName().c_str(), cur_log_writer->get_log_number(),
2308
- new_log_number);
2309
- }
2310
- }
2311
- if (s.ok()) {
2312
- logfile_number_ = new_log_number;
2313
- log_empty_ = true;
2314
- log_dir_synced_ = false;
2315
- logs_.emplace_back(logfile_number_, new_log);
2316
- alive_log_files_.emplace_back(logfile_number_);
2317
- }
2318
- }
2319
-
2320
- if (!s.ok()) {
2321
- // how do we fail if we're not creating new log?
2322
- assert(creating_new_log);
2323
- delete new_mem;
2324
- delete new_log;
2325
- context->superversion_context.new_superversion.reset();
2326
- // We may have lost data from the WritableFileBuffer in-memory buffer for
2327
- // the current log, so treat it as a fatal error and set bg_error
2328
- if (!io_s.ok()) {
2329
- error_handler_.SetBGError(io_s, BackgroundErrorReason::kMemTable);
2330
- } else {
2331
- error_handler_.SetBGError(s, BackgroundErrorReason::kMemTable);
2332
- }
2333
- // Read back bg_error in order to get the right severity
2334
- s = error_handler_.GetBGError();
2335
- return s;
2336
- }
2337
-
2338
- bool empty_cf_updated = false;
2339
- if (immutable_db_options_.track_and_verify_wals_in_manifest &&
2340
- !immutable_db_options_.allow_2pc && creating_new_log) {
2341
- // In non-2pc mode, WALs become obsolete if they do not contain unflushed
2342
- // data. Updating the empty CF's log number might cause some WALs to become
2343
- // obsolete. So we should track the WAL obsoletion event before actually
2344
- // updating the empty CF's log number.
2345
- uint64_t min_wal_number_to_keep =
2346
- versions_->PreComputeMinLogNumberWithUnflushedData(logfile_number_);
2347
- if (min_wal_number_to_keep >
2348
- versions_->GetWalSet().GetMinWalNumberToKeep()) {
2349
- // Get a snapshot of the empty column families.
2350
- // LogAndApply may release and reacquire db
2351
- // mutex, during that period, column family may become empty (e.g. its
2352
- // flush succeeds), then it affects the computed min_log_number_to_keep,
2353
- // so we take a snapshot for consistency of column family data
2354
- // status. If a column family becomes non-empty afterwards, its active log
2355
- // should still be the created new log, so the min_log_number_to_keep is
2356
- // not affected.
2357
- autovector<ColumnFamilyData*> empty_cfs;
2358
- for (auto cf : *versions_->GetColumnFamilySet()) {
2359
- if (cf->IsEmpty()) {
2360
- empty_cfs.push_back(cf);
2361
- }
2362
- }
2363
-
2364
- VersionEdit wal_deletion;
2365
- wal_deletion.DeleteWalsBefore(min_wal_number_to_keep);
2366
- s = versions_->LogAndApplyToDefaultColumnFamily(
2367
- read_options, write_options, &wal_deletion, &mutex_,
2368
- directories_.GetDbDir());
2369
- if (!s.ok() && versions_->io_status().IsIOError()) {
2370
- error_handler_.SetBGError(versions_->io_status(),
2371
- BackgroundErrorReason::kManifestWrite);
2372
- }
2373
- if (!s.ok()) {
2374
- return s;
2375
- }
2376
-
2377
- for (auto cf : empty_cfs) {
2378
- if (cf->IsEmpty()) {
2379
- cf->SetLogNumber(logfile_number_);
2380
- // MEMPURGE: No need to change this, because new adds
2381
- // should still receive new sequence numbers.
2382
- cf->mem()->SetCreationSeq(versions_->LastSequence());
2383
- } // cf may become non-empty.
2384
- }
2385
- empty_cf_updated = true;
2386
- }
2387
- }
2388
- if (!empty_cf_updated) {
2389
- for (auto cf : *versions_->GetColumnFamilySet()) {
2390
- // all this is just optimization to delete logs that
2391
- // are no longer needed -- if CF is empty, that means it
2392
- // doesn't need that particular log to stay alive, so we just
2393
- // advance the log number. no need to persist this in the manifest
2394
- if (cf->IsEmpty()) {
2395
- if (creating_new_log) {
2396
- cf->SetLogNumber(logfile_number_);
2397
- }
2398
- cf->mem()->SetCreationSeq(versions_->LastSequence());
2399
- }
2400
- }
2401
- }
2402
-
2403
- cfd->mem()->SetNextLogNumber(logfile_number_);
2404
- assert(new_mem != nullptr);
2405
- cfd->imm()->Add(cfd->mem(), &context->memtables_to_free_);
2406
- new_mem->Ref();
2407
- cfd->SetMemtable(new_mem);
2408
- InstallSuperVersionAndScheduleWork(cfd, &context->superversion_context,
2409
- mutable_cf_options);
2410
-
2411
- // Notify client that memtable is sealed, now that we have successfully
2412
- // installed a new memtable
2413
- NotifyOnMemTableSealed(cfd, memtable_info);
2414
- // It is possible that we got here without checking the value of i_os, but
2415
- // that is okay. If we did, it most likely means that s was already an error.
2416
- // In any case, ignore any unchecked error for i_os here.
2417
- io_s.PermitUncheckedError();
2418
- return s;
2419
- }
2420
-
2421
- size_t DBImpl::GetWalPreallocateBlockSize(uint64_t write_buffer_size) const {
2422
- mutex_.AssertHeld();
2423
- size_t bsize =
2424
- static_cast<size_t>(write_buffer_size / 10 + write_buffer_size);
2425
- // Some users might set very high write_buffer_size and rely on
2426
- // max_total_wal_size or other parameters to control the WAL size.
2427
- if (mutable_db_options_.max_total_wal_size > 0) {
2428
- bsize = std::min<size_t>(
2429
- bsize, static_cast<size_t>(mutable_db_options_.max_total_wal_size));
2430
- }
2431
- if (immutable_db_options_.db_write_buffer_size > 0) {
2432
- bsize = std::min<size_t>(bsize, immutable_db_options_.db_write_buffer_size);
2433
- }
2434
- if (immutable_db_options_.write_buffer_manager &&
2435
- immutable_db_options_.write_buffer_manager->enabled()) {
2436
- bsize = std::min<size_t>(
2437
- bsize, immutable_db_options_.write_buffer_manager->buffer_size());
2438
- }
2439
-
2440
- return bsize;
2441
- }
2442
-
2443
- // Default implementations of convenience methods that subclasses of DB
2444
- // can call if they wish
2445
- Status DB::Put(const WriteOptions& opt, ColumnFamilyHandle* column_family,
2446
- const Slice& key, const Slice& value) {
2447
- // Pre-allocate size of write batch conservatively.
2448
- // 8 bytes are taken by header, 4 bytes for count, 1 byte for type,
2449
- // and we allocate 11 extra bytes for key length, as well as value length.
2450
- WriteBatch batch(key.size() + value.size() + 24, 0 /* max_bytes */,
2451
- opt.protection_bytes_per_key, 0 /* default_cf_ts_sz */);
2452
- Status s = batch.Put(column_family, key, value);
2453
- if (!s.ok()) {
2454
- return s;
2455
- }
2456
- return Write(opt, &batch);
2457
- }
2458
-
2459
- Status DB::Put(const WriteOptions& opt, ColumnFamilyHandle* column_family,
2460
- const Slice& key, const Slice& ts, const Slice& value) {
2461
- ColumnFamilyHandle* default_cf = DefaultColumnFamily();
2462
- assert(default_cf);
2463
- const Comparator* const default_cf_ucmp = default_cf->GetComparator();
2464
- assert(default_cf_ucmp);
2465
- WriteBatch batch(0 /* reserved_bytes */, 0 /* max_bytes */,
2466
- opt.protection_bytes_per_key,
2467
- default_cf_ucmp->timestamp_size());
2468
- Status s = batch.Put(column_family, key, ts, value);
2469
- if (!s.ok()) {
2470
- return s;
2471
- }
2472
- return Write(opt, &batch);
2473
- }
2474
-
2475
- Status DB::PutEntity(const WriteOptions& options,
2476
- ColumnFamilyHandle* column_family, const Slice& key,
2477
- const WideColumns& columns) {
2478
- const ColumnFamilyHandle* const default_cf = DefaultColumnFamily();
2479
- assert(default_cf);
2480
-
2481
- const Comparator* const default_cf_ucmp = default_cf->GetComparator();
2482
- assert(default_cf_ucmp);
2483
-
2484
- WriteBatch batch(/* reserved_bytes */ 0, /* max_bytes */ 0,
2485
- options.protection_bytes_per_key,
2486
- default_cf_ucmp->timestamp_size());
2487
-
2488
- const Status s = batch.PutEntity(column_family, key, columns);
2489
- if (!s.ok()) {
2490
- return s;
2491
- }
2492
-
2493
- return Write(options, &batch);
2494
- }
2495
-
2496
- Status DB::PutEntity(const WriteOptions& options, const Slice& key,
2497
- const AttributeGroups& attribute_groups) {
2498
- ColumnFamilyHandle* default_cf = DefaultColumnFamily();
2499
- assert(default_cf);
2500
- const Comparator* const default_cf_ucmp = default_cf->GetComparator();
2501
- assert(default_cf_ucmp);
2502
- WriteBatch batch(0 /* reserved_bytes */, 0 /* max_bytes */,
2503
- options.protection_bytes_per_key,
2504
- default_cf_ucmp->timestamp_size());
2505
- const Status s = batch.PutEntity(key, attribute_groups);
2506
- if (!s.ok()) {
2507
- return s;
2508
- }
2509
- return Write(options, &batch);
2510
- }
2511
-
2512
- Status DB::Delete(const WriteOptions& opt, ColumnFamilyHandle* column_family,
2513
- const Slice& key) {
2514
- WriteBatch batch(0 /* reserved_bytes */, 0 /* max_bytes */,
2515
- opt.protection_bytes_per_key, 0 /* default_cf_ts_sz */);
2516
- Status s = batch.Delete(column_family, key);
2517
- if (!s.ok()) {
2518
- return s;
2519
- }
2520
- return Write(opt, &batch);
2521
- }
2522
-
2523
- Status DB::Delete(const WriteOptions& opt, ColumnFamilyHandle* column_family,
2524
- const Slice& key, const Slice& ts) {
2525
- ColumnFamilyHandle* default_cf = DefaultColumnFamily();
2526
- assert(default_cf);
2527
- const Comparator* const default_cf_ucmp = default_cf->GetComparator();
2528
- assert(default_cf_ucmp);
2529
- WriteBatch batch(0 /* reserved_bytes */, 0 /* max_bytes */,
2530
- opt.protection_bytes_per_key,
2531
- default_cf_ucmp->timestamp_size());
2532
- Status s = batch.Delete(column_family, key, ts);
2533
- if (!s.ok()) {
2534
- return s;
2535
- }
2536
- return Write(opt, &batch);
2537
- }
2538
-
2539
- Status DB::SingleDelete(const WriteOptions& opt,
2540
- ColumnFamilyHandle* column_family, const Slice& key) {
2541
- WriteBatch batch(0 /* reserved_bytes */, 0 /* max_bytes */,
2542
- opt.protection_bytes_per_key, 0 /* default_cf_ts_sz */);
2543
- Status s = batch.SingleDelete(column_family, key);
2544
- if (!s.ok()) {
2545
- return s;
2546
- }
2547
- return Write(opt, &batch);
2548
- }
2549
-
2550
- Status DB::SingleDelete(const WriteOptions& opt,
2551
- ColumnFamilyHandle* column_family, const Slice& key,
2552
- const Slice& ts) {
2553
- ColumnFamilyHandle* default_cf = DefaultColumnFamily();
2554
- assert(default_cf);
2555
- const Comparator* const default_cf_ucmp = default_cf->GetComparator();
2556
- assert(default_cf_ucmp);
2557
- WriteBatch batch(0 /* reserved_bytes */, 0 /* max_bytes */,
2558
- opt.protection_bytes_per_key,
2559
- default_cf_ucmp->timestamp_size());
2560
- Status s = batch.SingleDelete(column_family, key, ts);
2561
- if (!s.ok()) {
2562
- return s;
2563
- }
2564
- return Write(opt, &batch);
2565
- }
2566
-
2567
- Status DB::DeleteRange(const WriteOptions& opt,
2568
- ColumnFamilyHandle* column_family,
2569
- const Slice& begin_key, const Slice& end_key) {
2570
- WriteBatch batch(0 /* reserved_bytes */, 0 /* max_bytes */,
2571
- opt.protection_bytes_per_key, 0 /* default_cf_ts_sz */);
2572
- Status s = batch.DeleteRange(column_family, begin_key, end_key);
2573
- if (!s.ok()) {
2574
- return s;
2575
- }
2576
- return Write(opt, &batch);
2577
- }
2578
-
2579
- Status DB::DeleteRange(const WriteOptions& opt,
2580
- ColumnFamilyHandle* column_family,
2581
- const Slice& begin_key, const Slice& end_key,
2582
- const Slice& ts) {
2583
- ColumnFamilyHandle* default_cf = DefaultColumnFamily();
2584
- assert(default_cf);
2585
- const Comparator* const default_cf_ucmp = default_cf->GetComparator();
2586
- assert(default_cf_ucmp);
2587
- WriteBatch batch(0 /* reserved_bytes */, 0 /* max_bytes */,
2588
- opt.protection_bytes_per_key,
2589
- default_cf_ucmp->timestamp_size());
2590
- Status s = batch.DeleteRange(column_family, begin_key, end_key, ts);
2591
- if (!s.ok()) {
2592
- return s;
2593
- }
2594
- return Write(opt, &batch);
2595
- }
2596
-
2597
- Status DB::Merge(const WriteOptions& opt, ColumnFamilyHandle* column_family,
2598
- const Slice& key, const Slice& value) {
2599
- WriteBatch batch(0 /* reserved_bytes */, 0 /* max_bytes */,
2600
- opt.protection_bytes_per_key, 0 /* default_cf_ts_sz */);
2601
- Status s = batch.Merge(column_family, key, value);
2602
- if (!s.ok()) {
2603
- return s;
2604
- }
2605
- return Write(opt, &batch);
2606
- }
2607
-
2608
- Status DB::Merge(const WriteOptions& opt, ColumnFamilyHandle* column_family,
2609
- const Slice& key, const Slice& ts, const Slice& value) {
2610
- ColumnFamilyHandle* default_cf = DefaultColumnFamily();
2611
- assert(default_cf);
2612
- const Comparator* const default_cf_ucmp = default_cf->GetComparator();
2613
- assert(default_cf_ucmp);
2614
- WriteBatch batch(0 /* reserved_bytes */, 0 /* max_bytes */,
2615
- opt.protection_bytes_per_key,
2616
- default_cf_ucmp->timestamp_size());
2617
- Status s = batch.Merge(column_family, key, ts, value);
2618
- if (!s.ok()) {
2619
- return s;
2620
- }
2621
- return Write(opt, &batch);
2622
- }
2623
-
2624
- } // namespace ROCKSDB_NAMESPACE