@nxtedition/rocksdb 10.0.1 → 10.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/binding.cc CHANGED
@@ -50,51 +50,62 @@ struct Closable {
50
50
  };
51
51
 
52
52
  struct Database final {
53
- Database(std::string location) : location(std::move(location)) {}
53
+ Database(std::string location) : location(std::move(location)) {}
54
54
  ~Database() { assert(!db); }
55
55
 
56
56
  rocksdb::Status Close() {
57
+ std::lock_guard<std::mutex> lock(mutex_);
58
+
57
59
  if (!db) {
58
60
  return rocksdb::Status::OK();
59
61
  }
60
62
 
61
- for (auto closable : closables) {
63
+ for (auto closable : closables_) {
62
64
  closable->Close();
63
65
  }
64
- closables.clear();
66
+ closables_.clear();
65
67
 
66
68
  db->FlushWAL(true);
67
69
 
68
- for (auto& [id, column] : columns) {
69
- db->DestroyColumnFamilyHandle(column.handle);
70
- }
71
- columns.clear();
70
+ for (auto& [id, column] : columns) {
71
+ db->DestroyColumnFamilyHandle(column.handle);
72
+ }
73
+ columns.clear();
74
+
75
+ auto db2 = std::move(db);
76
+ return db2->Close();
77
+ }
78
+
79
+ void Ref() { refs_++; }
72
80
 
73
- auto db2 = std::move(db);
74
- return db2->Close();
81
+ void Unref() {
82
+ if (--refs_ == 0) {
83
+ Close();
84
+ delete this;
85
+ }
75
86
  }
76
87
 
77
- void Ref() {
78
- std::lock_guard<std::mutex> lock(mutex);
88
+ void Attach(Closable* closable) {
89
+ std::lock_guard<std::mutex> lock(mutex_);
79
90
 
80
- refs++;
81
- }
91
+ closables_.insert(closable);
92
+ }
93
+
94
+ void Detach(Closable* closable) {
95
+ std::lock_guard<std::mutex> lock(mutex_);
82
96
 
83
- void Unref() {
84
- std::lock_guard<std::mutex> lock(mutex);
97
+ closables_.erase(closable);
98
+ }
85
99
 
86
- if (--refs == 0) {
87
- Close();
88
- delete this;
89
- }
90
- }
100
+ const std::string location;
91
101
 
92
- std::mutex mutex;
93
- int refs = 0;
94
- std::string location;
95
102
  std::unique_ptr<rocksdb::DB> db;
96
- std::set<Closable*> closables;
97
103
  std::map<int32_t, ColumnFamily> columns;
104
+
105
+ private:
106
+ mutable std::mutex mutex_;
107
+ std::set<Closable*> closables_;
108
+ std::atomic<int> refs_ = 0;
98
109
  };
99
110
 
100
111
  enum BatchOp { Empty, Put, Delete, Merge, Data };
@@ -443,12 +454,12 @@ struct Iterator final : public BaseIterator {
443
454
 
444
455
  napi_status Attach(napi_env env, napi_value context) {
445
456
  NAPI_STATUS_RETURN(napi_create_reference(env, context, 1, &ref_));
446
- database_->closables.insert(this);
457
+ database_->Attach(this);
447
458
  return napi_ok;
448
459
  }
449
460
 
450
461
  napi_status Detach(napi_env env) {
451
- database_->closables.erase(this);
462
+ database_->Detach(this);
452
463
  if (ref_) {
453
464
  NAPI_STATUS_RETURN(napi_delete_reference(env, ref_));
454
465
  }
@@ -466,42 +477,65 @@ struct Iterator final : public BaseIterator {
466
477
  napi_ref ref_ = nullptr;
467
478
  };
468
479
 
480
+ /**
481
+ * Hook for when the environment exits. This hook will be called after
482
+ * already-scheduled napi_async_work items have finished, which gives us
483
+ * the guarantee that no db operations will be in-flight at this time.
484
+ */
485
+ static void env_cleanup_hook(void* data) {
486
+ auto database = reinterpret_cast<Database*>(data);
487
+
488
+ // Do everything that db_close() does but synchronously. We're expecting that GC
489
+ // did not (yet) collect the database because that would be a user mistake (not
490
+ // closing their db) made during the lifetime of the environment. That's different
491
+ // from an environment being torn down (like the main process or a worker thread)
492
+ // where it's our responsibility to clean up. Note also, the following code must
493
+ // be a safe noop if called before db_open() or after db_close().
494
+ if (database) {
495
+ database->Unref();
496
+ }
497
+ }
498
+
469
499
  static void FinalizeDatabase(napi_env env, void* data, void* hint) {
470
- if (data) {
471
- reinterpret_cast<Database*>(data)->Unref();
500
+ auto database = reinterpret_cast<Database*>(data);
501
+ if (database) {
502
+ napi_remove_env_cleanup_hook(env, env_cleanup_hook, database);
503
+ database->Unref();
472
504
  }
473
505
  }
474
506
 
475
507
  NAPI_METHOD(db_init) {
476
508
  NAPI_ARGV(1);
477
509
 
478
- Database* database = nullptr;
510
+ Database* database = nullptr;
479
511
 
480
512
  napi_valuetype type;
481
513
  NAPI_STATUS_THROWS(napi_typeof(env, argv[0], &type));
482
514
 
483
515
  if (type == napi_string) {
484
- std::string location;
516
+ std::string location;
485
517
  size_t length = 0;
486
518
  NAPI_STATUS_THROWS(napi_get_value_string_utf8(env, argv[0], nullptr, 0, &length));
487
519
  location.resize(length, '\0');
488
520
  NAPI_STATUS_THROWS(napi_get_value_string_utf8(env, argv[0], &location[0], length + 1, &length));
489
521
 
490
- database = new Database(location);
522
+ database = new Database(location);
491
523
  } else if (type == napi_bigint) {
492
- int64_t value;
493
- bool lossless;
494
- NAPI_STATUS_THROWS(napi_get_value_bigint_int64(env, argv[0], &value, &lossless));
524
+ int64_t value;
525
+ bool lossless;
526
+ NAPI_STATUS_THROWS(napi_get_value_bigint_int64(env, argv[0], &value, &lossless));
495
527
 
496
- database = reinterpret_cast<Database*>(value);
528
+ database = reinterpret_cast<Database*>(value);
497
529
  } else {
498
- NAPI_STATUS_THROWS(napi_invalid_arg);
499
- }
530
+ NAPI_STATUS_THROWS(napi_invalid_arg);
531
+ }
532
+
533
+ napi_add_env_cleanup_hook(env, env_cleanup_hook, database);
500
534
 
501
535
  napi_value result;
502
536
  NAPI_STATUS_THROWS(napi_create_external(env, database, FinalizeDatabase, nullptr, &result));
503
537
 
504
- database->Ref();
538
+ database->Ref();
505
539
 
506
540
  return result;
507
541
  }
@@ -641,7 +675,7 @@ napi_status InitOptions(napi_env env, T& columnOptions, const U& options) {
641
675
  rocksdb::BlockBasedTableOptions tableOptions;
642
676
 
643
677
  if (cacheSize) {
644
- tableOptions.block_cache = rocksdb::NewLRUCache(cacheSize);
678
+ tableOptions.block_cache = rocksdb::HyperClockCacheOptions(cacheSize, 0).MakeSharedCache();
645
679
  tableOptions.cache_index_and_filter_blocks = true;
646
680
  NAPI_STATUS_RETURN(
647
681
  GetProperty(env, options, "cacheIndexAndFilterBlocks", tableOptions.cache_index_and_filter_blocks));
@@ -767,152 +801,151 @@ NAPI_METHOD(db_open) {
767
801
  Database* database;
768
802
  NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
769
803
 
770
- if (database->db) {
771
- napi_value columns;
772
- NAPI_STATUS_THROWS(napi_create_object(env, &columns));
773
- for (auto& [id, column] : database->columns) {
774
- napi_value val;
775
- NAPI_STATUS_THROWS(napi_create_external(env, column.handle, nullptr, nullptr, &val));
776
- NAPI_STATUS_THROWS(napi_set_named_property(env, columns, column.descriptor.name.c_str(), val));
777
- }
778
- return columns;
779
- } else {
780
- rocksdb::Options dbOptions;
781
-
782
- const auto options = argv[1];
783
-
784
- int parallelism = std::max<int>(1, std::thread::hardware_concurrency() / 2);
785
- NAPI_STATUS_THROWS(GetProperty(env, options, "parallelism", parallelism));
786
- dbOptions.IncreaseParallelism(parallelism);
787
-
788
- uint32_t walTTL = 0;
789
- NAPI_STATUS_THROWS(GetProperty(env, options, "walTTL", walTTL));
790
- dbOptions.WAL_ttl_seconds = walTTL / 1e3;
791
-
792
- uint32_t walSizeLimit = 0;
793
- NAPI_STATUS_THROWS(GetProperty(env, options, "walSizeLimit", walSizeLimit));
794
- dbOptions.WAL_size_limit_MB = walSizeLimit / 1e6;
795
-
796
- uint32_t maxTotalWalSize = 0;
797
- NAPI_STATUS_THROWS(GetProperty(env, options, "walTotalSizeLimit", walSizeLimit));
798
- dbOptions.max_total_wal_size = maxTotalWalSize / 1e6;
799
-
800
- bool walCompression = false;
801
- NAPI_STATUS_THROWS(GetProperty(env, options, "walCompression", walCompression));
802
- dbOptions.wal_compression =
803
- walCompression ? rocksdb::CompressionType::kZSTD : rocksdb::CompressionType::kNoCompression;
804
-
805
- dbOptions.avoid_unnecessary_blocking_io = true;
806
- dbOptions.write_dbid_to_manifest = true;
807
- dbOptions.enable_pipelined_write = true; // We only write in the main thread...
808
- dbOptions.create_missing_column_families = true;
809
- dbOptions.fail_if_options_file_error = true;
810
-
811
- NAPI_STATUS_THROWS(GetProperty(env, options, "createIfMissing", dbOptions.create_if_missing));
812
- NAPI_STATUS_THROWS(GetProperty(env, options, "errorIfExists", dbOptions.error_if_exists));
813
- NAPI_STATUS_THROWS(GetProperty(env, options, "pipelinedWrite", dbOptions.enable_pipelined_write));
814
-
815
- // TODO (feat): dbOptions.listeners
816
-
817
- std::string infoLogLevel;
818
- NAPI_STATUS_THROWS(GetProperty(env, options, "infoLogLevel", infoLogLevel));
819
- if (infoLogLevel.size() > 0) {
820
- rocksdb::InfoLogLevel lvl = {};
821
-
822
- if (infoLogLevel == "debug")
823
- lvl = rocksdb::InfoLogLevel::DEBUG_LEVEL;
824
- else if (infoLogLevel == "info")
825
- lvl = rocksdb::InfoLogLevel::INFO_LEVEL;
826
- else if (infoLogLevel == "warn")
827
- lvl = rocksdb::InfoLogLevel::WARN_LEVEL;
828
- else if (infoLogLevel == "error")
829
- lvl = rocksdb::InfoLogLevel::ERROR_LEVEL;
830
- else if (infoLogLevel == "fatal")
831
- lvl = rocksdb::InfoLogLevel::FATAL_LEVEL;
832
- else if (infoLogLevel == "header")
833
- lvl = rocksdb::InfoLogLevel::HEADER_LEVEL;
834
- else
835
- napi_throw_error(env, nullptr, "invalid log level");
836
-
837
- dbOptions.info_log_level = lvl;
838
- } else {
839
- // In some places RocksDB checks this option to see if it should prepare
840
- // debug information (ahead of logging), so set it to the highest level.
841
- dbOptions.info_log_level = rocksdb::InfoLogLevel::HEADER_LEVEL;
842
- dbOptions.info_log.reset(new NullLogger());
843
- }
844
-
845
- NAPI_STATUS_THROWS(InitOptions(env, dbOptions, options));
846
-
847
- std::vector<rocksdb::ColumnFamilyDescriptor> descriptors;
848
-
849
- bool hasColumns;
850
- NAPI_STATUS_THROWS(napi_has_named_property(env, options, "columns", &hasColumns));
851
-
852
- if (hasColumns) {
853
- napi_value columns;
854
- NAPI_STATUS_THROWS(napi_get_named_property(env, options, "columns", &columns));
855
-
856
- napi_value keys;
857
- NAPI_STATUS_THROWS(napi_get_property_names(env, columns, &keys));
858
-
859
- uint32_t len;
860
- NAPI_STATUS_THROWS(napi_get_array_length(env, keys, &len));
861
-
862
- descriptors.resize(len);
863
- for (uint32_t n = 0; n < len; ++n) {
864
- napi_value key;
865
- NAPI_STATUS_THROWS(napi_get_element(env, keys, n, &key));
866
-
867
- napi_value column;
868
- NAPI_STATUS_THROWS(napi_get_property(env, columns, key, &column));
869
-
870
- NAPI_STATUS_THROWS(InitOptions(env, descriptors[n].options, column));
871
-
872
- NAPI_STATUS_THROWS(GetValue(env, key, descriptors[n].name));
873
- }
874
- }
875
-
876
- auto callback = argv[2];
877
-
878
- runAsync<std::vector<rocksdb::ColumnFamilyHandle*>>(
879
- "leveldown.open", env, callback,
880
- [=](auto& handles) {
881
- assert(!database->db);
882
-
883
- rocksdb::DB* db = nullptr;
884
-
885
- const auto status = descriptors.empty()
886
- ? rocksdb::DB::Open(dbOptions, database->location, &db)
887
- : rocksdb::DB::Open(dbOptions, database->location, descriptors, &handles, &db);
888
-
889
- database->db.reset(db);
890
-
891
- return status;
892
- },
893
- [=](auto& handles, auto env, auto& argv) {
894
- argv.resize(2);
895
-
896
- NAPI_STATUS_RETURN(napi_create_object(env, &argv[1]));
897
-
898
- for (size_t n = 0; n < handles.size(); ++n) {
899
- ColumnFamily column;
900
- column.handle = handles[n];
901
- column.descriptor = descriptors[n];
902
- database->columns[column.handle->GetID()] = column;
903
- }
904
-
905
- napi_value columns;
906
- NAPI_STATUS_RETURN(napi_create_object(env, &columns));
907
- for (auto& [id, column] : database->columns) {
908
- napi_value val;
909
- NAPI_STATUS_RETURN(napi_create_external(env, column.handle, nullptr, nullptr, &val));
910
- NAPI_STATUS_RETURN(napi_set_named_property(env, columns, column.descriptor.name.c_str(), val));
911
- }
912
-
913
- return napi_ok;
914
- });
915
- }
804
+ if (database->db) {
805
+ napi_value columns;
806
+ NAPI_STATUS_THROWS(napi_create_object(env, &columns));
807
+ for (auto& [id, column] : database->columns) {
808
+ napi_value val;
809
+ NAPI_STATUS_THROWS(napi_create_external(env, column.handle, nullptr, nullptr, &val));
810
+ NAPI_STATUS_THROWS(napi_set_named_property(env, columns, column.descriptor.name.c_str(), val));
811
+ }
812
+ return columns;
813
+ } else {
814
+ rocksdb::Options dbOptions;
815
+
816
+ const auto options = argv[1];
817
+
818
+ int parallelism = std::max<int>(1, std::thread::hardware_concurrency() / 2);
819
+ NAPI_STATUS_THROWS(GetProperty(env, options, "parallelism", parallelism));
820
+ dbOptions.IncreaseParallelism(parallelism);
821
+
822
+ uint32_t walTTL = 0;
823
+ NAPI_STATUS_THROWS(GetProperty(env, options, "walTTL", walTTL));
824
+ dbOptions.WAL_ttl_seconds = walTTL / 1e3;
825
+
826
+ uint32_t walSizeLimit = 0;
827
+ NAPI_STATUS_THROWS(GetProperty(env, options, "walSizeLimit", walSizeLimit));
828
+ dbOptions.WAL_size_limit_MB = walSizeLimit / 1e6;
829
+
830
+ uint32_t maxTotalWalSize = 0;
831
+ NAPI_STATUS_THROWS(GetProperty(env, options, "walTotalSizeLimit", walSizeLimit));
832
+ dbOptions.max_total_wal_size = maxTotalWalSize / 1e6;
833
+
834
+ bool walCompression = false;
835
+ NAPI_STATUS_THROWS(GetProperty(env, options, "walCompression", walCompression));
836
+ dbOptions.wal_compression =
837
+ walCompression ? rocksdb::CompressionType::kZSTD : rocksdb::CompressionType::kNoCompression;
838
+
839
+ dbOptions.avoid_unnecessary_blocking_io = true;
840
+ dbOptions.write_dbid_to_manifest = true;
841
+ dbOptions.enable_pipelined_write = true; // We only write in the main thread...
842
+ dbOptions.create_missing_column_families = true;
843
+ dbOptions.fail_if_options_file_error = true;
844
+
845
+ NAPI_STATUS_THROWS(GetProperty(env, options, "createIfMissing", dbOptions.create_if_missing));
846
+ NAPI_STATUS_THROWS(GetProperty(env, options, "errorIfExists", dbOptions.error_if_exists));
847
+ NAPI_STATUS_THROWS(GetProperty(env, options, "pipelinedWrite", dbOptions.enable_pipelined_write));
848
+
849
+ // TODO (feat): dbOptions.listeners
850
+
851
+ std::string infoLogLevel;
852
+ NAPI_STATUS_THROWS(GetProperty(env, options, "infoLogLevel", infoLogLevel));
853
+ if (infoLogLevel.size() > 0) {
854
+ rocksdb::InfoLogLevel lvl = {};
855
+
856
+ if (infoLogLevel == "debug")
857
+ lvl = rocksdb::InfoLogLevel::DEBUG_LEVEL;
858
+ else if (infoLogLevel == "info")
859
+ lvl = rocksdb::InfoLogLevel::INFO_LEVEL;
860
+ else if (infoLogLevel == "warn")
861
+ lvl = rocksdb::InfoLogLevel::WARN_LEVEL;
862
+ else if (infoLogLevel == "error")
863
+ lvl = rocksdb::InfoLogLevel::ERROR_LEVEL;
864
+ else if (infoLogLevel == "fatal")
865
+ lvl = rocksdb::InfoLogLevel::FATAL_LEVEL;
866
+ else if (infoLogLevel == "header")
867
+ lvl = rocksdb::InfoLogLevel::HEADER_LEVEL;
868
+ else
869
+ napi_throw_error(env, nullptr, "invalid log level");
870
+
871
+ dbOptions.info_log_level = lvl;
872
+ } else {
873
+ // In some places RocksDB checks this option to see if it should prepare
874
+ // debug information (ahead of logging), so set it to the highest level.
875
+ dbOptions.info_log_level = rocksdb::InfoLogLevel::HEADER_LEVEL;
876
+ dbOptions.info_log.reset(new NullLogger());
877
+ }
878
+
879
+ NAPI_STATUS_THROWS(InitOptions(env, dbOptions, options));
880
+
881
+ std::vector<rocksdb::ColumnFamilyDescriptor> descriptors;
882
+
883
+ bool hasColumns;
884
+ NAPI_STATUS_THROWS(napi_has_named_property(env, options, "columns", &hasColumns));
885
+
886
+ if (hasColumns) {
887
+ napi_value columns;
888
+ NAPI_STATUS_THROWS(napi_get_named_property(env, options, "columns", &columns));
889
+
890
+ napi_value keys;
891
+ NAPI_STATUS_THROWS(napi_get_property_names(env, columns, &keys));
892
+
893
+ uint32_t len;
894
+ NAPI_STATUS_THROWS(napi_get_array_length(env, keys, &len));
895
+
896
+ descriptors.resize(len);
897
+ for (uint32_t n = 0; n < len; ++n) {
898
+ napi_value key;
899
+ NAPI_STATUS_THROWS(napi_get_element(env, keys, n, &key));
900
+
901
+ napi_value column;
902
+ NAPI_STATUS_THROWS(napi_get_property(env, columns, key, &column));
903
+
904
+ NAPI_STATUS_THROWS(InitOptions(env, descriptors[n].options, column));
905
+
906
+ NAPI_STATUS_THROWS(GetValue(env, key, descriptors[n].name));
907
+ }
908
+ }
909
+
910
+ auto callback = argv[2];
911
+
912
+ runAsync<std::vector<rocksdb::ColumnFamilyHandle*>>(
913
+ "leveldown.open", env, callback,
914
+ [=](auto& handles) {
915
+ assert(!database->db);
916
+
917
+ rocksdb::DB* db = nullptr;
918
+
919
+ const auto status = descriptors.empty()
920
+ ? rocksdb::DB::Open(dbOptions, database->location, &db)
921
+ : rocksdb::DB::Open(dbOptions, database->location, descriptors, &handles, &db);
922
+
923
+ database->db.reset(db);
924
+
925
+ return status;
926
+ },
927
+ [=](auto& handles, auto env, auto& argv) {
928
+ argv.resize(2);
929
+
930
+ NAPI_STATUS_RETURN(napi_create_object(env, &argv[1]));
931
+
932
+ for (size_t n = 0; n < handles.size(); ++n) {
933
+ ColumnFamily column;
934
+ column.handle = handles[n];
935
+ column.descriptor = descriptors[n];
936
+ database->columns[column.handle->GetID()] = column;
937
+ }
938
+
939
+ napi_value columns = argv[1];
940
+ for (auto& [id, column] : database->columns) {
941
+ napi_value val;
942
+ NAPI_STATUS_RETURN(napi_create_external(env, column.handle, nullptr, nullptr, &val));
943
+ NAPI_STATUS_RETURN(napi_set_named_property(env, columns, column.descriptor.name.c_str(), val));
944
+ }
945
+
946
+ return napi_ok;
947
+ });
948
+ }
916
949
 
917
950
  return 0;
918
951
  }
package/build.sh ADDED
@@ -0,0 +1,17 @@
1
+ #!/bin/bash
2
+ set -e
3
+ export DOCKER_HOST=${DOCKER_HOST:-ssh://root@test-srv3.hq.bmux}
4
+
5
+ echo "Building image..."
6
+ docker build --iidfile prebuilds.iid .
7
+
8
+ echo "Extracting prebuilds from image..."
9
+ IMG=$(cat prebuilds.iid)
10
+ ID=$(docker create $IMG)
11
+ docker cp "$ID:/rocks-level/prebuilds" ./
12
+
13
+ echo "Cleaning up..."
14
+ docker rm $ID > /dev/null
15
+ rm prebuilds.iid
16
+
17
+ echo "All done!"
@@ -87,14 +87,8 @@
87
87
  "ROCKSDB_PTHREAD_ADAPTIVE_MUTEX=1",
88
88
  "ROCKSDB_RANGESYNC_PRESENT=1",
89
89
  "ROCKSDB_SCHED_GETCPU_PRESENT=1",
90
- # "ROCKSDB_IOURING_PRESENT=1",
90
+ "ROCKSDB_IOURING_PRESENT=1",
91
91
  "USE_FOLLY=1",
92
- "FOLLY_NO_CONFIG=1"
93
- "HAVE_SSE42=1",
94
- "HAVE_BMI=1",
95
- "HAVE_LZCNT=1",
96
- "HAVE_AVX2=1",
97
- "HAVE_PCLMUL=1",
98
92
  "HAVE_UINT128_EXTENSION=1",
99
93
  "HAVE_ALIGNED_NEW=1",
100
94
  # "HAVE_FULLFSYNC=1",
@@ -107,6 +101,7 @@
107
101
  "libraries": [
108
102
  "/usr/lib/x86_64-linux-gnu/libzstd.a",
109
103
  "/usr/lib/x86_64-linux-gnu/libfolly.a",
104
+ "/usr/lib/x86_64-linux-gnu/liburing.a",
110
105
  # "/usr/lib/x86_64-linux-gnu/libjemalloc.a",
111
106
  ],
112
107
  },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nxtedition/rocksdb",
3
- "version": "10.0.1",
3
+ "version": "10.0.3",
4
4
  "description": "A low-level Node.js RocksDB binding",
5
5
  "license": "MIT",
6
6
  "main": "index.js",