@nxtedition/rocksdb 7.0.0 → 7.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/binding.cc +33 -40
  2. package/deps/rocksdb/rocksdb/CMakeLists.txt +1 -1
  3. package/deps/rocksdb/rocksdb/cache/cache_bench_tool.cc +3 -1
  4. package/deps/rocksdb/rocksdb/cache/cache_entry_roles.cc +2 -0
  5. package/deps/rocksdb/rocksdb/cache/cache_reservation_manager.cc +1 -0
  6. package/deps/rocksdb/rocksdb/cache/cache_reservation_manager.h +28 -0
  7. package/deps/rocksdb/rocksdb/cache/cache_test.cc +5 -2
  8. package/deps/rocksdb/rocksdb/cache/fast_lru_cache.cc +48 -60
  9. package/deps/rocksdb/rocksdb/cache/fast_lru_cache.h +18 -20
  10. package/deps/rocksdb/rocksdb/cache/lru_cache_test.cc +2 -2
  11. package/deps/rocksdb/rocksdb/db/c.cc +5 -0
  12. package/deps/rocksdb/rocksdb/db/column_family.cc +20 -0
  13. package/deps/rocksdb/rocksdb/db/column_family.h +9 -0
  14. package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator.cc +44 -26
  15. package/deps/rocksdb/rocksdb/db/comparator_db_test.cc +32 -14
  16. package/deps/rocksdb/rocksdb/db/db_basic_test.cc +73 -44
  17. package/deps/rocksdb/rocksdb/db/db_block_cache_test.cc +3 -1
  18. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.cc +6 -1
  19. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.h +10 -5
  20. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_files.cc +47 -35
  21. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_open.cc +2 -1
  22. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_write.cc +54 -32
  23. package/deps/rocksdb/rocksdb/db/db_kv_checksum_test.cc +426 -61
  24. package/deps/rocksdb/rocksdb/db/db_options_test.cc +1 -0
  25. package/deps/rocksdb/rocksdb/db/db_test.cc +102 -24
  26. package/deps/rocksdb/rocksdb/db/db_test2.cc +159 -30
  27. package/deps/rocksdb/rocksdb/db/db_test_util.cc +1 -0
  28. package/deps/rocksdb/rocksdb/db/dbformat.h +1 -1
  29. package/deps/rocksdb/rocksdb/db/version_builder.cc +39 -10
  30. package/deps/rocksdb/rocksdb/db/version_builder.h +4 -1
  31. package/deps/rocksdb/rocksdb/db/version_edit.h +20 -0
  32. package/deps/rocksdb/rocksdb/db/version_set.cc +2 -1
  33. package/deps/rocksdb/rocksdb/db/version_set.h +17 -2
  34. package/deps/rocksdb/rocksdb/db/version_set_test.cc +119 -0
  35. package/deps/rocksdb/rocksdb/db/write_batch.cc +96 -0
  36. package/deps/rocksdb/rocksdb/db/write_batch_internal.h +4 -0
  37. package/deps/rocksdb/rocksdb/db/write_thread.cc +1 -0
  38. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.h +3 -0
  39. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_gflags.cc +9 -0
  40. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.cc +18 -2
  41. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.h +4 -0
  42. package/deps/rocksdb/rocksdb/db_stress_tool/multi_ops_txns_stress.cc +12 -0
  43. package/deps/rocksdb/rocksdb/db_stress_tool/no_batched_ops_stress.cc +1 -1
  44. package/deps/rocksdb/rocksdb/env/fs_posix.cc +96 -6
  45. package/deps/rocksdb/rocksdb/env/io_posix.cc +51 -18
  46. package/deps/rocksdb/rocksdb/env/io_posix.h +2 -0
  47. package/deps/rocksdb/rocksdb/file/file_prefetch_buffer.cc +12 -5
  48. package/deps/rocksdb/rocksdb/file/file_prefetch_buffer.h +22 -6
  49. package/deps/rocksdb/rocksdb/file/prefetch_test.cc +99 -8
  50. package/deps/rocksdb/rocksdb/include/rocksdb/advanced_options.h +9 -1
  51. package/deps/rocksdb/rocksdb/include/rocksdb/c.h +3 -0
  52. package/deps/rocksdb/rocksdb/include/rocksdb/cache.h +3 -0
  53. package/deps/rocksdb/rocksdb/include/rocksdb/comparator.h +4 -0
  54. package/deps/rocksdb/rocksdb/include/rocksdb/file_system.h +1 -1
  55. package/deps/rocksdb/rocksdb/include/rocksdb/io_status.h +7 -0
  56. package/deps/rocksdb/rocksdb/include/rocksdb/options.h +11 -1
  57. package/deps/rocksdb/rocksdb/include/rocksdb/slice_transform.h +4 -1
  58. package/deps/rocksdb/rocksdb/include/rocksdb/table.h +14 -1
  59. package/deps/rocksdb/rocksdb/include/rocksdb/write_batch.h +6 -0
  60. package/deps/rocksdb/rocksdb/options/cf_options.cc +12 -1
  61. package/deps/rocksdb/rocksdb/options/cf_options.h +2 -0
  62. package/deps/rocksdb/rocksdb/options/options.cc +8 -1
  63. package/deps/rocksdb/rocksdb/options/options_helper.cc +1 -0
  64. package/deps/rocksdb/rocksdb/options/options_parser.cc +2 -1
  65. package/deps/rocksdb/rocksdb/options/options_settable_test.cc +7 -2
  66. package/deps/rocksdb/rocksdb/options/options_test.cc +52 -0
  67. package/deps/rocksdb/rocksdb/port/port_posix.h +10 -1
  68. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_factory.cc +1 -1
  69. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_iterator.cc +1 -1
  70. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.cc +1 -1
  71. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.h +5 -5
  72. package/deps/rocksdb/rocksdb/table/block_based/block_prefetcher.cc +16 -10
  73. package/deps/rocksdb/rocksdb/table/block_based/block_prefetcher.h +1 -1
  74. package/deps/rocksdb/rocksdb/table/block_based/partitioned_filter_block.cc +1 -1
  75. package/deps/rocksdb/rocksdb/table/block_based/partitioned_index_iterator.cc +4 -4
  76. package/deps/rocksdb/rocksdb/table/block_based/partitioned_index_reader.cc +1 -1
  77. package/deps/rocksdb/rocksdb/tools/db_bench_tool.cc +39 -12
  78. package/deps/rocksdb/rocksdb/util/comparator.cc +10 -0
  79. package/deps/rocksdb/rocksdb/util/ribbon_alg.h +1 -1
  80. package/deps/rocksdb/rocksdb/util/xxhash.h +2 -1
  81. package/index.js +2 -2
  82. package/package.json +1 -1
  83. package/prebuilds/darwin-arm64/node.napi.node +0 -0
  84. package/prebuilds/linux-x64/node.napi.node +0 -0
  85. package/prebuilds/darwin-x64/node.napi.node +0 -0
@@ -533,15 +533,18 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options,
533
533
  }
534
534
  PERF_TIMER_START(write_pre_and_post_process_time);
535
535
 
536
+ if (!io_s.ok()) {
537
+ // Check WriteToWAL status
538
+ IOStatusCheck(io_s);
539
+ }
536
540
  if (!w.CallbackFailed()) {
537
541
  if (!io_s.ok()) {
538
542
  assert(pre_release_cb_status.ok());
539
- IOStatusCheck(io_s);
540
543
  } else {
541
544
  WriteStatusCheck(pre_release_cb_status);
542
545
  }
543
546
  } else {
544
- assert(io_s.ok() && pre_release_cb_status.ok());
547
+ assert(pre_release_cb_status.ok());
545
548
  }
546
549
 
547
550
  if (need_log_sync) {
@@ -695,12 +698,11 @@ Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options,
695
698
  w.status = io_s;
696
699
  }
697
700
 
698
- if (!w.CallbackFailed()) {
699
- if (!io_s.ok()) {
700
- IOStatusCheck(io_s);
701
- } else {
702
- WriteStatusCheck(w.status);
703
- }
701
+ if (!io_s.ok()) {
702
+ // Check WriteToWAL status
703
+ IOStatusCheck(io_s);
704
+ } else if (!w.CallbackFailed()) {
705
+ WriteStatusCheck(w.status);
704
706
  }
705
707
 
706
708
  if (need_log_sync) {
@@ -936,11 +938,18 @@ Status DBImpl::WriteImplWALOnly(
936
938
  seq_inc = total_batch_cnt;
937
939
  }
938
940
  Status status;
939
- IOStatus io_s;
940
- io_s.PermitUncheckedError(); // Allow io_s to be uninitialized
941
941
  if (!write_options.disableWAL) {
942
- io_s = ConcurrentWriteToWAL(write_group, log_used, &last_sequence, seq_inc);
942
+ IOStatus io_s =
943
+ ConcurrentWriteToWAL(write_group, log_used, &last_sequence, seq_inc);
943
944
  status = io_s;
945
+ // last_sequence may not be set if there is an error
946
+ // This error checking and return is moved up to avoid using uninitialized
947
+ // last_sequence.
948
+ if (!io_s.ok()) {
949
+ IOStatusCheck(io_s);
950
+ write_thread->ExitAsBatchGroupLeader(write_group, status);
951
+ return status;
952
+ }
944
953
  } else {
945
954
  // Otherwise we inc seq number to do solely the seq allocation
946
955
  last_sequence = versions_->FetchAddLastAllocatedSequence(seq_inc);
@@ -975,11 +984,7 @@ Status DBImpl::WriteImplWALOnly(
975
984
  PERF_TIMER_START(write_pre_and_post_process_time);
976
985
 
977
986
  if (!w.CallbackFailed()) {
978
- if (!io_s.ok()) {
979
- IOStatusCheck(io_s);
980
- } else {
981
- WriteStatusCheck(status);
982
- }
987
+ WriteStatusCheck(status);
983
988
  }
984
989
  if (status.ok()) {
985
990
  size_t index = 0;
@@ -1171,13 +1176,13 @@ Status DBImpl::PreprocessWrite(const WriteOptions& write_options,
1171
1176
  return status;
1172
1177
  }
1173
1178
 
1174
- WriteBatch* DBImpl::MergeBatch(const WriteThread::WriteGroup& write_group,
1175
- WriteBatch* tmp_batch, size_t* write_with_wal,
1176
- WriteBatch** to_be_cached_state) {
1179
+ Status DBImpl::MergeBatch(const WriteThread::WriteGroup& write_group,
1180
+ WriteBatch* tmp_batch, WriteBatch** merged_batch,
1181
+ size_t* write_with_wal,
1182
+ WriteBatch** to_be_cached_state) {
1177
1183
  assert(write_with_wal != nullptr);
1178
1184
  assert(tmp_batch != nullptr);
1179
1185
  assert(*to_be_cached_state == nullptr);
1180
- WriteBatch* merged_batch = nullptr;
1181
1186
  *write_with_wal = 0;
1182
1187
  auto* leader = write_group.leader;
1183
1188
  assert(!leader->disable_wal); // Same holds for all in the batch group
@@ -1186,22 +1191,24 @@ WriteBatch* DBImpl::MergeBatch(const WriteThread::WriteGroup& write_group,
1186
1191
  // we simply write the first WriteBatch to WAL if the group only
1187
1192
  // contains one batch, that batch should be written to the WAL,
1188
1193
  // and the batch is not wanting to be truncated
1189
- merged_batch = leader->batch;
1190
- if (WriteBatchInternal::IsLatestPersistentState(merged_batch)) {
1191
- *to_be_cached_state = merged_batch;
1194
+ *merged_batch = leader->batch;
1195
+ if (WriteBatchInternal::IsLatestPersistentState(*merged_batch)) {
1196
+ *to_be_cached_state = *merged_batch;
1192
1197
  }
1193
1198
  *write_with_wal = 1;
1194
1199
  } else {
1195
1200
  // WAL needs all of the batches flattened into a single batch.
1196
1201
  // We could avoid copying here with an iov-like AddRecord
1197
1202
  // interface
1198
- merged_batch = tmp_batch;
1203
+ *merged_batch = tmp_batch;
1199
1204
  for (auto writer : write_group) {
1200
1205
  if (!writer->CallbackFailed()) {
1201
- Status s = WriteBatchInternal::Append(merged_batch, writer->batch,
1206
+ Status s = WriteBatchInternal::Append(*merged_batch, writer->batch,
1202
1207
  /*WAL_only*/ true);
1203
- // Always returns Status::OK.
1204
- assert(s.ok());
1208
+ if (!s.ok()) {
1209
+ tmp_batch->Clear();
1210
+ return s;
1211
+ }
1205
1212
  if (WriteBatchInternal::IsLatestPersistentState(writer->batch)) {
1206
1213
  // We only need to cache the last of such write batch
1207
1214
  *to_be_cached_state = writer->batch;
@@ -1210,7 +1217,8 @@ WriteBatch* DBImpl::MergeBatch(const WriteThread::WriteGroup& write_group,
1210
1217
  }
1211
1218
  }
1212
1219
  }
1213
- return merged_batch;
1220
+ // return merged_batch;
1221
+ return Status::OK();
1214
1222
  }
1215
1223
 
1216
1224
  // When two_write_queues_ is disabled, this function is called from the only
@@ -1223,6 +1231,11 @@ IOStatus DBImpl::WriteToWAL(const WriteBatch& merged_batch,
1223
1231
  assert(log_size != nullptr);
1224
1232
 
1225
1233
  Slice log_entry = WriteBatchInternal::Contents(&merged_batch);
1234
+ TEST_SYNC_POINT_CALLBACK("DBImpl::WriteToWAL:log_entry", &log_entry);
1235
+ auto s = merged_batch.VerifyChecksum();
1236
+ if (!s.ok()) {
1237
+ return status_to_io_status(std::move(s));
1238
+ }
1226
1239
  *log_size = log_entry.size();
1227
1240
  // When two_write_queues_ WriteToWAL has to be protected from concurretn calls
1228
1241
  // from the two queues anyway and log_write_mutex_ is already held. Otherwise
@@ -1260,8 +1273,13 @@ IOStatus DBImpl::WriteToWAL(const WriteThread::WriteGroup& write_group,
1260
1273
  // Same holds for all in the batch group
1261
1274
  size_t write_with_wal = 0;
1262
1275
  WriteBatch* to_be_cached_state = nullptr;
1263
- WriteBatch* merged_batch = MergeBatch(write_group, &tmp_batch_,
1264
- &write_with_wal, &to_be_cached_state);
1276
+ WriteBatch* merged_batch;
1277
+ io_s = status_to_io_status(MergeBatch(write_group, &tmp_batch_, &merged_batch,
1278
+ &write_with_wal, &to_be_cached_state));
1279
+ if (UNLIKELY(!io_s.ok())) {
1280
+ return io_s;
1281
+ }
1282
+
1265
1283
  if (merged_batch == write_group.leader->batch) {
1266
1284
  write_group.leader->log_used = logfile_number_;
1267
1285
  } else if (write_with_wal > 1) {
@@ -1351,8 +1369,12 @@ IOStatus DBImpl::ConcurrentWriteToWAL(
1351
1369
  WriteBatch tmp_batch;
1352
1370
  size_t write_with_wal = 0;
1353
1371
  WriteBatch* to_be_cached_state = nullptr;
1354
- WriteBatch* merged_batch =
1355
- MergeBatch(write_group, &tmp_batch, &write_with_wal, &to_be_cached_state);
1372
+ WriteBatch* merged_batch;
1373
+ io_s = status_to_io_status(MergeBatch(write_group, &tmp_batch, &merged_batch,
1374
+ &write_with_wal, &to_be_cached_state));
1375
+ if (UNLIKELY(!io_s.ok())) {
1376
+ return io_s;
1377
+ }
1356
1378
 
1357
1379
  // We need to lock log_write_mutex_ since logs_ and alive_log_files might be
1358
1380
  // pushed back concurrently