@nxtedition/rocksdb 5.2.35 → 5.2.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/binding.cc CHANGED
@@ -101,7 +101,7 @@ static napi_value GetProperty(napi_env env, napi_value obj, const std::string_vi
101
101
  return value;
102
102
  }
103
103
 
104
- static bool BooleanProperty(napi_env env, napi_value obj, const std::string_view& key, bool defaultValue) {
104
+ static std::optional<bool> BooleanProperty(napi_env env, napi_value obj, const std::string_view& key) {
105
105
  if (HasProperty(env, obj, key.data())) {
106
106
  const auto value = GetProperty(env, obj, key.data());
107
107
  bool result;
@@ -109,7 +109,7 @@ static bool BooleanProperty(napi_env env, napi_value obj, const std::string_view
109
109
  return result;
110
110
  }
111
111
 
112
- return defaultValue;
112
+ return {};
113
113
  }
114
114
 
115
115
  static bool EncodingIsBuffer(napi_env env, napi_value obj, const std::string_view& option) {
@@ -125,7 +125,7 @@ static bool EncodingIsBuffer(napi_env env, napi_value obj, const std::string_vie
125
125
  return false;
126
126
  }
127
127
 
128
- static uint32_t Uint32Property(napi_env env, napi_value obj, const std::string_view& key, uint32_t defaultValue) {
128
+ static std::optional<uint32_t> Uint32Property(napi_env env, napi_value obj, const std::string_view& key) {
129
129
  if (HasProperty(env, obj, key.data())) {
130
130
  const auto value = GetProperty(env, obj, key.data());
131
131
  uint32_t result;
@@ -133,10 +133,10 @@ static uint32_t Uint32Property(napi_env env, napi_value obj, const std::string_v
133
133
  return result;
134
134
  }
135
135
 
136
- return defaultValue;
136
+ return {};
137
137
  }
138
138
 
139
- static int Int32Property(napi_env env, napi_value obj, const std::string_view& key, int defaultValue) {
139
+ static std::optional<int> Int32Property(napi_env env, napi_value obj, const std::string_view& key) {
140
140
  if (HasProperty(env, obj, key.data())) {
141
141
  const auto value = GetProperty(env, obj, key.data());
142
142
  int result;
@@ -144,7 +144,7 @@ static int Int32Property(napi_env env, napi_value obj, const std::string_view& k
144
144
  return result;
145
145
  }
146
146
 
147
- return defaultValue;
147
+ return {};
148
148
  }
149
149
 
150
150
  static std::string ToString(napi_env env, napi_value from, const std::string& defaultValue = "") {
@@ -164,26 +164,11 @@ static std::string ToString(napi_env env, napi_value from, const std::string& de
164
164
  return defaultValue;
165
165
  }
166
166
 
167
- static std::string StringProperty(napi_env env,
168
- napi_value obj,
169
- const std::string_view& key,
170
- const std::string& defaultValue = "") {
171
- if (HasProperty(env, obj, key)) {
172
- napi_value value = GetProperty(env, obj, key);
173
- if (IsString(env, value)) {
174
- return ToString(env, value);
175
- }
176
- }
177
-
178
- return defaultValue;
179
- }
180
-
181
- static std::optional<std::string> RangeOption(napi_env env, napi_value opts, const std::string_view& name) {
167
+ static std::optional<std::string> StringProperty(napi_env env, napi_value opts, const std::string_view& name) {
182
168
  if (HasProperty(env, opts, name)) {
183
169
  const auto value = GetProperty(env, opts, name);
184
170
  return ToString(env, value);
185
171
  }
186
-
187
172
  return {};
188
173
  }
189
174
 
@@ -238,6 +223,16 @@ napi_status Convert(napi_env env, std::string s, bool asBuffer, napi_value& resu
238
223
  }
239
224
  }
240
225
 
226
+ napi_status Convert(napi_env env, rocksdb::PinnableSlice s, bool asBuffer, napi_value& result) {
227
+ if (asBuffer) {
228
+ auto ptr = new rocksdb::PinnableSlice(std::move(s));
229
+ return napi_create_external_buffer(env, ptr->size(), const_cast<char*>(ptr->data()), Finalize<std::string>, ptr,
230
+ &result);
231
+ } else {
232
+ return napi_create_string_utf8(env, s.data(), s.size(), &result);
233
+ }
234
+ }
235
+
241
236
  struct NapiSlice : public rocksdb::Slice {
242
237
  NapiSlice(napi_env env, napi_value from) {
243
238
  if (IsString(env, from)) {
@@ -383,21 +378,21 @@ struct BaseIterator {
383
378
  limit_(limit),
384
379
  fillCache_(fillCache) {
385
380
  if (lte) {
386
- upper_bound_ = std::make_unique<rocksdb::PinnableSlice>();
381
+ upper_bound_ = rocksdb::PinnableSlice();
387
382
  *upper_bound_->GetSelf() = std::move(*lte) + '\0';
388
383
  upper_bound_->PinSelf();
389
384
  } else if (lt) {
390
- upper_bound_ = std::make_unique<rocksdb::PinnableSlice>();
385
+ upper_bound_ = rocksdb::PinnableSlice();
391
386
  *upper_bound_->GetSelf() = std::move(*lt);
392
387
  upper_bound_->PinSelf();
393
388
  }
394
389
 
395
390
  if (gte) {
396
- lower_bound_ = std::make_unique<rocksdb::PinnableSlice>();
391
+ lower_bound_ = rocksdb::PinnableSlice();
397
392
  *lower_bound_->GetSelf() = std::move(*gte);
398
393
  lower_bound_->PinSelf();
399
394
  } else if (gt) {
400
- lower_bound_ = std::make_unique<rocksdb::PinnableSlice>();
395
+ lower_bound_ = rocksdb::PinnableSlice();
401
396
  *lower_bound_->GetSelf() = std::move(*gt) + '\0';
402
397
  lower_bound_->PinSelf();
403
398
  }
@@ -477,8 +472,8 @@ struct BaseIterator {
477
472
  iterator_.reset(database_->db_->NewIterator(options));
478
473
  }
479
474
 
480
- std::unique_ptr<rocksdb::PinnableSlice> lower_bound_;
481
- std::unique_ptr<rocksdb::PinnableSlice> upper_bound_;
475
+ std::optional<rocksdb::PinnableSlice> lower_bound_;
476
+ std::optional<rocksdb::PinnableSlice> upper_bound_;
482
477
  std::shared_ptr<const rocksdb::Snapshot> snapshot_;
483
478
  std::unique_ptr<rocksdb::Iterator> iterator_;
484
479
  const bool reverse_;
@@ -506,9 +501,7 @@ struct Iterator final : public BaseIterator {
506
501
  values_(values),
507
502
  keyAsBuffer_(keyAsBuffer),
508
503
  valueAsBuffer_(valueAsBuffer),
509
- highWaterMarkBytes_(highWaterMarkBytes),
510
- first_(true),
511
- ref_(nullptr) {}
504
+ highWaterMarkBytes_(highWaterMarkBytes) {}
512
505
 
513
506
  void Attach(napi_env env, napi_value context) {
514
507
  napi_create_reference(env, context, 1, &ref_);
@@ -527,10 +520,10 @@ struct Iterator final : public BaseIterator {
527
520
  const bool keyAsBuffer_;
528
521
  const bool valueAsBuffer_;
529
522
  const uint32_t highWaterMarkBytes_;
530
- bool first_;
523
+ bool first_ = true;
531
524
 
532
525
  private:
533
- napi_ref ref_;
526
+ napi_ref ref_ = nullptr;
534
527
  };
535
528
 
536
529
  /**
@@ -593,11 +586,10 @@ struct OpenWorker final : public Worker {
593
586
  readOnly_(readOnly),
594
587
  location_(location) {}
595
588
 
596
- rocksdb::Status Execute(Database& database) override {
589
+ rocksdb::Status Execute(Database& database) override {
597
590
  rocksdb::DB* db;
598
- const auto status = readOnly_
599
- ? rocksdb::DB::OpenForReadOnly(options_, location_, &db)
600
- : rocksdb::DB::Open(options_, location_, &db);
591
+ const auto status = readOnly_ ? rocksdb::DB::OpenForReadOnly(options_, location_, &db)
592
+ : rocksdb::DB::Open(options_, location_, &db);
601
593
  database.db_.reset(db);
602
594
  return status;
603
595
  }
@@ -613,19 +605,16 @@ NAPI_METHOD(db_open) {
613
605
 
614
606
  rocksdb::Options options;
615
607
 
616
- options.IncreaseParallelism(Uint32Property(env, argv[2], "parallelism", 4));
608
+ options.IncreaseParallelism(Uint32Property(env, argv[2], "parallelism").value_or(4));
617
609
 
618
610
  const auto location = ToString(env, argv[1]);
619
- options.create_if_missing = BooleanProperty(env, argv[2], "createIfMissing", true);
620
- options.error_if_exists = BooleanProperty(env, argv[2], "errorIfExists", false);
621
- options.compression =
622
- BooleanProperty(env, argv[2], "compression", true) ? rocksdb::kSnappyCompression : rocksdb::kNoCompression;
623
- options.max_open_files = Uint32Property(env, argv[2], "maxOpenFiles", 1000);
624
- options.max_log_file_size = Uint32Property(env, argv[2], "maxFileSize", 2 << 20);
625
- options.write_buffer_size = Uint32Property(env, argv[2], "writeBufferSize", 4 << 20);
611
+ options.create_if_missing = BooleanProperty(env, argv[2], "createIfMissing").value_or(true);
612
+ options.error_if_exists = BooleanProperty(env, argv[2], "errorIfExists").value_or(false);
613
+ options.compression = BooleanProperty(env, argv[2], "compression").value_or((true)) ? rocksdb::kSnappyCompression
614
+ : rocksdb::kNoCompression;
626
615
  options.use_adaptive_mutex = true;
627
616
 
628
- const auto infoLogLevel = StringProperty(env, argv[2], "infoLogLevel");
617
+ const auto infoLogLevel = StringProperty(env, argv[2], "infoLogLevel").value_or("");
629
618
  if (infoLogLevel.size() > 0) {
630
619
  rocksdb::InfoLogLevel lvl = {};
631
620
 
@@ -652,8 +641,8 @@ NAPI_METHOD(db_open) {
652
641
  options.info_log.reset(new NullLogger());
653
642
  }
654
643
 
655
- const auto readOnly = BooleanProperty(env, argv[2], "readOnly", false);
656
- const auto cacheSize = Uint32Property(env, argv[2], "cacheSize", 8 << 20);
644
+ const auto readOnly = BooleanProperty(env, argv[2], "readOnly").value_or(false);
645
+ const auto cacheSize = Uint32Property(env, argv[2], "cacheSize").value_or(8 << 20);
657
646
 
658
647
  rocksdb::BlockBasedTableOptions tableOptions;
659
648
 
@@ -663,8 +652,8 @@ NAPI_METHOD(db_open) {
663
652
  tableOptions.no_block_cache = true;
664
653
  }
665
654
 
666
- tableOptions.block_size = Uint32Property(env, argv[2], "blockSize", 4096);
667
- tableOptions.block_restart_interval = Uint32Property(env, argv[2], "blockRestartInterval", 16);
655
+ tableOptions.block_size = Uint32Property(env, argv[2], "blockSize").value_or(4096);
656
+ tableOptions.block_restart_interval = Uint32Property(env, argv[2], "blockRestartInterval").value_or(16);
668
657
  tableOptions.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10));
669
658
  tableOptions.format_version = 5;
670
659
  tableOptions.checksum = rocksdb::kxxHash64;
@@ -778,7 +767,7 @@ NAPI_METHOD(db_get) {
778
767
  const auto key = ToString(env, argv[1]);
779
768
  const auto options = argv[2];
780
769
  const auto asBuffer = EncodingIsBuffer(env, options, "valueEncoding");
781
- const auto fillCache = BooleanProperty(env, options, "fillCache", true);
770
+ const auto fillCache = BooleanProperty(env, options, "fillCache").value_or(true);
782
771
  const auto callback = argv[3];
783
772
 
784
773
  NAPI_PENDING_EXCEPTION();
@@ -810,12 +799,24 @@ struct GetManyWorker final : public Worker {
810
799
  options.fill_cache = fillCache_;
811
800
  options.snapshot = snapshot_.get();
812
801
 
813
- status_ = database.db_->MultiGet(options, std::vector<rocksdb::Slice>(keys_.begin(), keys_.end()), &values_);
802
+ const auto numKeys = keys_.size();
803
+
804
+ std::vector<rocksdb::Slice> keys;
805
+ keys.reserve(keys_.size());
806
+ for (const auto& key : keys_) {
807
+ keys.emplace_back(key);
808
+ }
809
+
810
+ statuses_.resize(numKeys);
811
+ values_.resize(numKeys);
812
+
813
+ database.db_->MultiGet(options, database.db_->DefaultColumnFamily(), numKeys, keys.data(), values_.data(),
814
+ statuses_.data());
814
815
 
815
816
  keys_.clear();
816
817
  snapshot_ = nullptr;
817
818
 
818
- for (auto status : status_) {
819
+ for (auto status : statuses_) {
819
820
  if (!status.ok() && !status.IsNotFound()) {
820
821
  return status;
821
822
  }
@@ -832,7 +833,7 @@ struct GetManyWorker final : public Worker {
832
833
 
833
834
  for (size_t idx = 0; idx < size; idx++) {
834
835
  napi_value element;
835
- if (status_[idx].ok()) {
836
+ if (statuses_[idx].ok()) {
836
837
  NAPI_STATUS_RETURN(Convert(env, std::move(values_[idx]), valueAsBuffer_, element));
837
838
  } else {
838
839
  NAPI_STATUS_RETURN(napi_get_undefined(env, &element));
@@ -841,7 +842,7 @@ struct GetManyWorker final : public Worker {
841
842
  }
842
843
 
843
844
  values_.clear();
844
- status_.clear();
845
+ statuses_.clear();
845
846
 
846
847
  napi_value argv[2];
847
848
  NAPI_STATUS_RETURN(napi_get_null(env, &argv[0]));
@@ -856,8 +857,8 @@ struct GetManyWorker final : public Worker {
856
857
 
857
858
  private:
858
859
  std::vector<std::string> keys_;
859
- std::vector<std::string> values_;
860
- std::vector<rocksdb::Status> status_;
860
+ std::vector<rocksdb::PinnableSlice> values_;
861
+ std::vector<rocksdb::Status> statuses_;
861
862
  const bool valueAsBuffer_;
862
863
  const bool fillCache_;
863
864
  std::shared_ptr<const rocksdb::Snapshot> snapshot_;
@@ -884,7 +885,7 @@ NAPI_METHOD(db_get_many) {
884
885
 
885
886
  const auto options = argv[2];
886
887
  const bool asBuffer = EncodingIsBuffer(env, options, "valueEncoding");
887
- const bool fillCache = BooleanProperty(env, options, "fillCache", true);
888
+ const bool fillCache = BooleanProperty(env, options, "fillCache").value_or(true);
888
889
  const auto callback = argv[3];
889
890
 
890
891
  NAPI_PENDING_EXCEPTION();
@@ -911,15 +912,13 @@ NAPI_METHOD(db_clear) {
911
912
  NAPI_ARGV(2);
912
913
  NAPI_DB_CONTEXT();
913
914
 
914
- const auto reverse = BooleanProperty(env, argv[1], "reverse", false);
915
- const auto limit = Int32Property(env, argv[1], "limit", -1);
915
+ const auto reverse = BooleanProperty(env, argv[1], "reverse").value_or(false);
916
+ const auto limit = Int32Property(env, argv[1], "limit").value_or(-1);
916
917
 
917
- const auto lt = RangeOption(env, argv[1], "lt");
918
- const auto lte = RangeOption(env, argv[1], "lte");
919
- const auto gt = RangeOption(env, argv[1], "gt");
920
- const auto gte = RangeOption(env, argv[1], "gte");
921
-
922
- NAPI_PENDING_EXCEPTION();
918
+ const auto lt = StringProperty(env, argv[1], "lt");
919
+ const auto lte = StringProperty(env, argv[1], "lte");
920
+ const auto gt = StringProperty(env, argv[1], "gt");
921
+ const auto gte = StringProperty(env, argv[1], "gte");
923
922
 
924
923
  // TODO (perf): Use DeleteRange.
925
924
 
@@ -984,24 +983,22 @@ NAPI_METHOD(iterator_init) {
984
983
  NAPI_DB_CONTEXT();
985
984
 
986
985
  const auto options = argv[1];
987
- const auto reverse = BooleanProperty(env, options, "reverse", false);
988
- const auto keys = BooleanProperty(env, options, "keys", true);
989
- const auto values = BooleanProperty(env, options, "values", true);
990
- const auto fillCache = BooleanProperty(env, options, "fillCache", false);
986
+ const auto reverse = BooleanProperty(env, options, "reverse").value_or(false);
987
+ const auto keys = BooleanProperty(env, options, "keys").value_or(true);
988
+ const auto values = BooleanProperty(env, options, "values").value_or(true);
989
+ const auto fillCache = BooleanProperty(env, options, "fillCache").value_or(false);
991
990
  const bool keyAsBuffer = EncodingIsBuffer(env, options, "keyEncoding");
992
991
  const bool valueAsBuffer = EncodingIsBuffer(env, options, "valueEncoding");
993
- const auto limit = Int32Property(env, options, "limit", -1);
994
- const auto highWaterMarkBytes = Uint32Property(env, options, "highWaterMarkBytes", 16 * 1024);
992
+ const auto limit = Int32Property(env, options, "limit").value_or(-1);
993
+ const auto highWaterMarkBytes = Uint32Property(env, options, "highWaterMarkBytes").value_or(16 * 1024);
995
994
 
996
- const auto lt = RangeOption(env, options, "lt");
997
- const auto lte = RangeOption(env, options, "lte");
998
- const auto gt = RangeOption(env, options, "gt");
999
- const auto gte = RangeOption(env, options, "gte");
1000
-
1001
- NAPI_PENDING_EXCEPTION();
995
+ const auto lt = StringProperty(env, options, "lt");
996
+ const auto lte = StringProperty(env, options, "lte");
997
+ const auto gt = StringProperty(env, options, "gt");
998
+ const auto gte = StringProperty(env, options, "gte");
1002
999
 
1003
- auto iterator = std::make_unique<Iterator>(database, reverse, keys, values, limit, lt, lte, gt, gte, fillCache, keyAsBuffer,
1004
- valueAsBuffer, highWaterMarkBytes);
1000
+ auto iterator = std::make_unique<Iterator>(database, reverse, keys, values, limit, lt, lte, gt, gte, fillCache,
1001
+ keyAsBuffer, valueAsBuffer, highWaterMarkBytes);
1005
1002
 
1006
1003
  napi_value result;
1007
1004
  NAPI_STATUS_THROWS(napi_create_external(env, iterator.get(), Finalize<Iterator>, iterator.get(), &result));
@@ -1158,6 +1155,8 @@ NAPI_METHOD(batch_do) {
1158
1155
 
1159
1156
  const auto key = NapiSlice(env, GetProperty(env, element, "key"));
1160
1157
 
1158
+ NAPI_PENDING_EXCEPTION();
1159
+
1161
1160
  batch.Delete(key);
1162
1161
  } else if (type == "put") {
1163
1162
  if (!HasProperty(env, element, "key"))
@@ -1168,12 +1167,12 @@ NAPI_METHOD(batch_do) {
1168
1167
  const auto key = NapiSlice(env, GetProperty(env, element, "key"));
1169
1168
  const auto value = NapiSlice(env, GetProperty(env, element, "value"));
1170
1169
 
1170
+ NAPI_PENDING_EXCEPTION();
1171
+
1171
1172
  batch.Put(key, value);
1172
1173
  }
1173
1174
  }
1174
1175
 
1175
- NAPI_PENDING_EXCEPTION();
1176
-
1177
1176
  rocksdb::WriteOptions options;
1178
1177
  return ToError(env, database->db_->Write(options, &batch));
1179
1178
  }
package/binding.gyp CHANGED
@@ -62,7 +62,7 @@
62
62
  ],
63
63
  "OTHER_CPLUSPLUSFLAGS": [
64
64
  "-mmacosx-version-min=10.15",
65
- "-std=c++17",
65
+ "-std=c++20",
66
66
  "-fno-omit-frame-pointer",
67
67
  "-momit-leaf-frame-pointer",
68
68
  "-arch x86_64",
@@ -0,0 +1,32 @@
1
+ ## RocksDB: A Persistent Key-Value Store for Flash and RAM Storage
2
+
3
+ [![CircleCI Status](https://circleci.com/gh/facebook/rocksdb.svg?style=svg)](https://circleci.com/gh/facebook/rocksdb)
4
+ [![TravisCI Status](https://api.travis-ci.com/facebook/rocksdb.svg?branch=main)](https://travis-ci.com/github/facebook/rocksdb)
5
+ [![Appveyor Build status](https://ci.appveyor.com/api/projects/status/fbgfu0so3afcno78/branch/main?svg=true)](https://ci.appveyor.com/project/Facebook/rocksdb/branch/main)
6
+ [![PPC64le Build Status](http://140-211-168-68-openstack.osuosl.org:8080/buildStatus/icon?job=rocksdb&style=plastic)](http://140-211-168-68-openstack.osuosl.org:8080/job/rocksdb)
7
+
8
+ RocksDB is developed and maintained by Facebook Database Engineering Team.
9
+ It is built on earlier work on [LevelDB](https://github.com/google/leveldb) by Sanjay Ghemawat (sanjay@google.com)
10
+ and Jeff Dean (jeff@google.com)
11
+
12
+ This code is a library that forms the core building block for a fast
13
+ key-value server, especially suited for storing data on flash drives.
14
+ It has a Log-Structured-Merge-Database (LSM) design with flexible tradeoffs
15
+ between Write-Amplification-Factor (WAF), Read-Amplification-Factor (RAF)
16
+ and Space-Amplification-Factor (SAF). It has multi-threaded compactions,
17
+ making it especially suitable for storing multiple terabytes of data in a
18
+ single database.
19
+
20
+ Start with example usage here: https://github.com/facebook/rocksdb/tree/main/examples
21
+
22
+ See the [github wiki](https://github.com/facebook/rocksdb/wiki) for more explanation.
23
+
24
+ The public interface is in `include/`. Callers should not include or
25
+ rely on the details of any other header files in this package. Those
26
+ internal APIs may be changed without warning.
27
+
28
+ Questions and discussions are welcome on the [RocksDB Developers Public](https://www.facebook.com/groups/rocksdb.dev/) Facebook group and [email list](https://groups.google.com/g/rocksdb) on Google Groups.
29
+
30
+ ## License
31
+
32
+ RocksDB is dual-licensed under both the GPLv2 (found in the COPYING file in the root directory) and Apache 2.0 License (found in the LICENSE.Apache file in the root directory). You may select, at your option, one of the above-listed licenses.
@@ -0,0 +1,60 @@
1
+ # RocksDB Micro-Benchmark
2
+
3
+ ## Overview
4
+
5
+ RocksDB micro-benchmark is a set of tests for benchmarking a single component or simple DB operations. The test artificially generates input data and executes the same operation with it to collect and report performance metrics. As it's focusing on testing a single, well-defined operation, the result is more precise and reproducible, which also has its limitation of not representing a real production use case. The test author needs to carefully design the microbench to represent its true purpose.
6
+
7
+ The tests are based on [Google Benchmark](https://github.com/google/benchmark) library, which provides a standard framework for writing benchmarks.
8
+
9
+ ## How to Run
10
+ ### Prerequisite
11
+ Install the [Google Benchmark](https://github.com/google/benchmark) version `1.6.0` or above.
12
+
13
+ *Note: Google Benchmark `1.6.x` is incompatible with previous versions like `1.5.x`, please make sure you're using the newer version.*
14
+
15
+ ### Build and Run
16
+ With `Makefile`:
17
+ ```bash
18
+ $ DEBUG_LEVEL=0 make run_microbench
19
+ ```
20
+ Or with cmake:
21
+ ```bash
22
+ $ mkdir build && cd build && cmake .. -DCMAKE_BUILD_TYPE=Release -DWITH_BENCHMARK
23
+ $ make run_microbench
24
+ ```
25
+
26
+ *Note: Please run the benchmark code in release build.*
27
+ ### Run Single Test
28
+ Example:
29
+ ```bash
30
+ $ make db_basic_bench
31
+ $ ./db_basic_bench --benchmark_filter=<TEST_NAME>
32
+ ```
33
+
34
+ ## Best Practices
35
+ #### * Use the Same Test Directory Setting as Unittest
36
+ Most of the Micro-benchmark tests use the same test directory setup as unittest, so it could be overridden by:
37
+ ```bash
38
+ $ TEST_TMPDIR=/mydata/tmp/ ./db_basic_bench --benchmark_filter=<TEST_NAME>
39
+ ```
40
+ Please also follow that when designing new tests.
41
+
42
+ #### * Avoid Using Debug API
43
+ Even though micro-benchmark is a test, avoid using internal Debug API like TEST_WaitForRun() which is designed for unittest. As benchmark tests are designed for release build, don't use any of that.
44
+
45
+ #### * Pay Attention to Local Optimization
46
+ As a micro-benchmark is focusing on a single component or area, make sure it is a key part for impacting the overall application performance.
47
+
48
+ The compiler might be able to optimize the code that not the same way as the whole application, and if the test data input is simple and small, it may be able to all cached in CPU memory, which is leading to a wrong metric. Take these into consideration when designing the tests.
49
+
50
+ #### * Names of user-defined counters/metrics has to be `[A-Za-z0-9_]`
51
+ It's a restriction of the metrics collecting and reporting system RocksDB is using internally. It will also help integrate with more systems.
52
+
53
+ #### * Minimize the Metrics Variation
54
+ Try reducing the test result variation, one way to check that is running the test multiple times and check the CV (Coefficient of Variation) reported by gbenchmark.
55
+ ```bash
56
+ $ ./db_basic_bench --benchmark_filter=<TEST_NAME> --benchmark_repetitions=10
57
+ ...
58
+ <TEST_NAME>_cv 3.2%
59
+ ```
60
+ RocksDB has background compaction jobs which may cause the test result to vary a lot. If the micro-benchmark is not purposely testing the operation while compaction is in progress, it should wait for the compaction to finish (`db_impl->WaitForCompact()`) or disable auto-compaction.
@@ -0,0 +1,43 @@
1
+ ## Building external plugins together with RocksDB
2
+
3
+ RocksDB offers several plugin interfaces for developers to customize its behavior. One difficulty developers face is how to make their plugin available to end users. The approach discussed here involves building the external code together with the RocksDB code into a single binary. Note another approach we plan to support involves loading plugins dynamically from shared libraries.
4
+
5
+ ### Discovery
6
+
7
+ We hope developers will mention their work in "PLUGINS.md" so users can easily discover and reuse solutions for customizing RocksDB.
8
+
9
+ ### Directory organization
10
+
11
+ External plugins will be linked according to their name into a subdirectory of "plugin/". For example, a plugin called "dedupfs" would be linked into "plugin/dedupfs/".
12
+
13
+ ### Build standard
14
+
15
+ Currently the only supported build system are make and cmake.
16
+
17
+ For make, files in the plugin directory ending in the .mk extension can define the following variables.
18
+
19
+ * `$(PLUGIN_NAME)_SOURCES`: these files will be compiled and linked with RocksDB. They can access RocksDB public header files.
20
+ * `$(PLUGIN_NAME)_HEADERS`: these files will be installed in the RocksDB header directory. Their paths will be prefixed by "rocksdb/plugin/$(PLUGIN_NAME)/".
21
+ * `$(PLUGIN_NAME)_LDFLAGS`: these flags will be passed to the final link step. For example, library dependencies can be propagated here, or symbols can be forcibly included, e.g., for static registration.
22
+ * `$(PLUGIN_NAME)_CXXFLAGS`: these flags will be passed to the compiler. For example, they can specify locations of header files in non-standard locations.
23
+
24
+ Users will run the usual make commands from the RocksDB directory, specifying the plugins to include in a space-separated list in the variable `ROCKSDB_PLUGINS`.
25
+
26
+ For CMake, the CMakeLists.txt file in the plugin directory can define the following variables.
27
+
28
+ * `${PLUGIN_NAME}_SOURCES`: these files will be compiled and linked with RocksDB. They can access RocksDB public header files.
29
+ * `${PLUGIN_NAME}_COMPILE_FLAGS`: these flags will be passed to the compiler. For example, they can specify locations of header files in non-standard locations.
30
+ * `${PLUGIN_NAME}_INCLUDE_PATHS`: paths to directories to search for plugin-specific header files during compilation.
31
+ * `${PLUGIN_NAME}_LIBS`: list of library names required to build the plugin, e.g. `dl`, `java`, `jvm`, `rados`, etc. CMake will generate proper flags for linking.
32
+ * `${PLUGIN_NAME}_LINK_PATHS`: list of paths for the linker to search for required libraries in additional to standard locations.
33
+ * `${PLUGIN_NAME}_CMAKE_SHARED_LINKER_FLAGS` additional linker flags used to generate shared libraries. For example, symbols can be forcibly included, e.g., for static registration.
34
+ * `${PLUGIN_NAME}_CMAKE_EXE_LINKER_FLAGS`: additional linker flags used to generate executables. For example, symbols can be forcibly included, e.g., for static registration.
35
+
36
+ Users will run the usual cmake commands, specifying the plugins to include in a space-separated list in the command line variable `ROCKSDB_PLUGINS` when invoking cmake.
37
+ ```
38
+ cmake .. -DROCKSDB_PLUGINS="dedupfs hdfs rados"
39
+ ```
40
+
41
+ ### Example
42
+
43
+ For a working example, see [Dedupfs](https://github.com/ajkr/dedupfs).
@@ -0,0 +1,10 @@
1
+ This directory contains interfaces and implementations that isolate the
2
+ rest of the package from platform details.
3
+
4
+ Code in the rest of the package includes "port.h" from this directory.
5
+ "port.h" in turn includes a platform specific "port_<platform>.h" file
6
+ that provides the platform specific implementation.
7
+
8
+ See port_posix.h for an example of what must be provided in a platform
9
+ specific header file.
10
+
@@ -0,0 +1,13 @@
1
+ The files in this directory originally come from
2
+ https://github.com/percona/PerconaFT/.
3
+
4
+ This directory only includes the "locktree" part of PerconaFT, and its
5
+ dependencies.
6
+
7
+ The following modifications were made:
8
+ - Make locktree usable outside of PerconaFT library
9
+ - Add shared read-only lock support
10
+
11
+ The files named *_subst.* are substitutes of the PerconaFT's files, they
12
+ contain replacements of PerconaFT's functionality.
13
+
@@ -118,7 +118,7 @@
118
118
  "xcode_settings": {
119
119
  "OTHER_CPLUSPLUSFLAGS": [
120
120
  "-mmacosx-version-min=10.15",
121
- "-std=c++17",
121
+ "-std=c++20",
122
122
  "-fno-omit-frame-pointer",
123
123
  "-momit-leaf-frame-pointer",
124
124
  "-arch x86_64",