@nxtedition/rocksdb 7.0.39 → 7.0.42

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/binding.cc +59 -30
  2. package/deps/rocksdb/rocksdb/cache/cache_bench_tool.cc +27 -11
  3. package/deps/rocksdb/rocksdb/cache/clock_cache.cc +310 -337
  4. package/deps/rocksdb/rocksdb/cache/clock_cache.h +394 -352
  5. package/deps/rocksdb/rocksdb/db/blob/blob_file_reader.cc +1 -1
  6. package/deps/rocksdb/rocksdb/db/column_family.cc +2 -2
  7. package/deps/rocksdb/rocksdb/db/column_family_test.cc +1 -1
  8. package/deps/rocksdb/rocksdb/db/compaction/compaction.cc +13 -3
  9. package/deps/rocksdb/rocksdb/db/compaction/compaction_job.cc +273 -134
  10. package/deps/rocksdb/rocksdb/db/compaction/compaction_job.h +33 -2
  11. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker.cc +11 -3
  12. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker.h +2 -1
  13. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_fifo.cc +2 -2
  14. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_level.cc +133 -5
  15. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_test.cc +130 -1
  16. package/deps/rocksdb/rocksdb/db/compaction/compaction_service_job.cc +8 -4
  17. package/deps/rocksdb/rocksdb/db/compaction/subcompaction_state.h +11 -9
  18. package/deps/rocksdb/rocksdb/db/db_compaction_test.cc +209 -12
  19. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.cc +54 -39
  20. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.h +102 -19
  21. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_compaction_flush.cc +30 -11
  22. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_debug.cc +1 -1
  23. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_files.cc +28 -25
  24. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_open.cc +0 -14
  25. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_write.cc +63 -54
  26. package/deps/rocksdb/rocksdb/db/db_test.cc +6 -6
  27. package/deps/rocksdb/rocksdb/db/error_handler.cc +7 -0
  28. package/deps/rocksdb/rocksdb/db/error_handler.h +10 -9
  29. package/deps/rocksdb/rocksdb/db/log_test.cc +13 -6
  30. package/deps/rocksdb/rocksdb/db/perf_context_test.cc +1 -1
  31. package/deps/rocksdb/rocksdb/db/table_cache.cc +21 -0
  32. package/deps/rocksdb/rocksdb/db/table_cache.h +5 -0
  33. package/deps/rocksdb/rocksdb/db/version_set.cc +3 -2
  34. package/deps/rocksdb/rocksdb/db/version_set.h +6 -4
  35. package/deps/rocksdb/rocksdb/db/version_set_test.cc +8 -6
  36. package/deps/rocksdb/rocksdb/db/wal_edit.cc +22 -15
  37. package/deps/rocksdb/rocksdb/db/wal_edit.h +10 -0
  38. package/deps/rocksdb/rocksdb/db/wal_edit_test.cc +4 -5
  39. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.cc +0 -36
  40. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_driver.cc +1 -12
  41. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.cc +23 -29
  42. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.h +0 -5
  43. package/deps/rocksdb/rocksdb/db_stress_tool/multi_ops_txns_stress.cc +7 -0
  44. package/deps/rocksdb/rocksdb/env/env_test.cc +0 -5
  45. package/deps/rocksdb/rocksdb/env/io_posix.cc +1 -7
  46. package/deps/rocksdb/rocksdb/memtable/hash_linklist_rep.cc +100 -78
  47. package/deps/rocksdb/rocksdb/options/options_test.cc +16 -0
  48. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.cc +51 -0
  49. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.h +3 -0
  50. package/deps/rocksdb/rocksdb/table/table_reader.h +14 -0
  51. package/deps/rocksdb/rocksdb/table/table_test.cc +52 -0
  52. package/deps/rocksdb/rocksdb/tools/db_bench_tool.cc +8 -38
  53. package/deps/rocksdb/rocksdb/util/rate_limiter.cc +27 -21
  54. package/deps/rocksdb/rocksdb/util/rate_limiter.h +12 -10
  55. package/deps/rocksdb/rocksdb/util/rate_limiter_test.cc +11 -8
  56. package/deps/rocksdb/rocksdb/utilities/backup/backup_engine_test.cc +2 -1
  57. package/deps/rocksdb/rocksdb/utilities/transactions/pessimistic_transaction_db.cc +59 -0
  58. package/deps/rocksdb/rocksdb/utilities/transactions/pessimistic_transaction_db.h +12 -0
  59. package/deps/rocksdb/rocksdb/utilities/transactions/transaction_test.cc +31 -0
  60. package/deps/rocksdb/rocksdb/utilities/transactions/write_prepared_transaction_test.cc +0 -3
  61. package/max_rev_operator.h +101 -0
  62. package/package.json +1 -1
  63. package/prebuilds/darwin-arm64/node.napi.node +0 -0
  64. package/prebuilds/linux-x64/node.napi.node +0 -0
@@ -390,7 +390,7 @@ void ProfileQueries(bool enabled_time = false) {
390
390
  EXPECT_GT(hist_write_scheduling_time.Average(), 0);
391
391
 
392
392
  #ifndef NDEBUG
393
- ASSERT_GT(total_db_mutex_nanos, 2000U);
393
+ ASSERT_LT(total_db_mutex_nanos, 100U);
394
394
  #endif
395
395
  }
396
396
 
@@ -527,6 +527,27 @@ Status TableCache::GetTableProperties(
527
527
  return s;
528
528
  }
529
529
 
530
+ Status TableCache::ApproximateKeyAnchors(
531
+ const ReadOptions& ro, const InternalKeyComparator& internal_comparator,
532
+ const FileDescriptor& fd, std::vector<TableReader::Anchor>& anchors) {
533
+ Status s;
534
+ TableReader* t = fd.table_reader;
535
+ Cache::Handle* handle = nullptr;
536
+ if (t == nullptr) {
537
+ s = FindTable(ro, file_options_, internal_comparator, fd, &handle);
538
+ if (s.ok()) {
539
+ t = GetTableReaderFromHandle(handle);
540
+ }
541
+ }
542
+ if (s.ok() && t != nullptr) {
543
+ s = t->ApproximateKeyAnchors(ro, anchors);
544
+ }
545
+ if (handle != nullptr) {
546
+ ReleaseHandle(handle);
547
+ }
548
+ return s;
549
+ }
550
+
530
551
  size_t TableCache::GetMemoryUsageByTableReader(
531
552
  const FileOptions& file_options,
532
553
  const InternalKeyComparator& internal_comparator, const FileDescriptor& fd,
@@ -165,6 +165,11 @@ class TableCache {
165
165
  const std::shared_ptr<const SliceTransform>& prefix_extractor = nullptr,
166
166
  bool no_io = false);
167
167
 
168
+ Status ApproximateKeyAnchors(const ReadOptions& ro,
169
+ const InternalKeyComparator& internal_comparator,
170
+ const FileDescriptor& file_meta,
171
+ std::vector<TableReader::Anchor>& anchors);
172
+
168
173
  // Return total memory usage of the table reader of the file.
169
174
  // 0 if table reader of the file is not loaded.
170
175
  size_t GetMemoryUsageByTableReader(
@@ -3209,7 +3209,7 @@ void SortFileByRoundRobin(const InternalKeyComparator& icmp,
3209
3209
  }
3210
3210
 
3211
3211
  bool should_move_files =
3212
- compact_cursor->at(level).Valid() && temp->size() > 1;
3212
+ compact_cursor->at(level).size() > 0 && temp->size() > 1;
3213
3213
 
3214
3214
  // The iterator points to the Fsize with smallest key larger than or equal to
3215
3215
  // the given cursor
@@ -3225,7 +3225,8 @@ void SortFileByRoundRobin(const InternalKeyComparator& icmp,
3225
3225
  return icmp.Compare(cursor, f.file->smallest) > 0;
3226
3226
  });
3227
3227
 
3228
- should_move_files = current_file_iter != temp->end();
3228
+ should_move_files =
3229
+ current_file_iter != temp->end() && current_file_iter != temp->begin();
3229
3230
  }
3230
3231
  if (should_move_files) {
3231
3232
  // Construct a local temporary vector
@@ -146,10 +146,12 @@ class VersionStorageInfo {
146
146
  }
147
147
 
148
148
  // REQUIRES: lock is held
149
- // Update the compact cursor and advance the file index so that it can point
150
- // to the next cursor
151
- const InternalKey& GetNextCompactCursor(int level) {
152
- int cmp_idx = next_file_to_compact_by_size_[level] + 1;
149
+ // Update the compact cursor and advance the file index using increment
150
+ // so that it can point to the next cursor (increment means the number of
151
+ // input files in this level of the last compaction)
152
+ const InternalKey& GetNextCompactCursor(int level, size_t increment) {
153
+ int cmp_idx = next_file_to_compact_by_size_[level] + (int)increment;
154
+ assert(cmp_idx <= (int)files_by_compaction_pri_[level].size());
153
155
  // TODO(zichen): may need to update next_file_to_compact_by_size_
154
156
  // for parallel compaction.
155
157
  InternalKey new_cursor;
@@ -1978,6 +1978,7 @@ TEST_F(VersionSetTest, WalCreateAfterClose) {
1978
1978
 
1979
1979
  TEST_F(VersionSetTest, AddWalWithSmallerSize) {
1980
1980
  NewDB();
1981
+ assert(versions_);
1981
1982
 
1982
1983
  constexpr WalNumber kLogNumber = 10;
1983
1984
  constexpr uint64_t kSizeInBytes = 111;
@@ -1990,6 +1991,9 @@ TEST_F(VersionSetTest, AddWalWithSmallerSize) {
1990
1991
 
1991
1992
  ASSERT_OK(LogAndApplyToDefaultCF(edit));
1992
1993
  }
1994
+ // Copy for future comparison.
1995
+ const std::map<WalNumber, WalMetadata> wals1 =
1996
+ versions_->GetWalSet().GetWals();
1993
1997
 
1994
1998
  {
1995
1999
  // Add the same WAL with smaller synced size.
@@ -1998,13 +2002,11 @@ TEST_F(VersionSetTest, AddWalWithSmallerSize) {
1998
2002
  edit.AddWal(kLogNumber, wal);
1999
2003
 
2000
2004
  Status s = LogAndApplyToDefaultCF(edit);
2001
- ASSERT_TRUE(s.IsCorruption());
2002
- ASSERT_TRUE(
2003
- s.ToString().find(
2004
- "WAL 10 must not have smaller synced size than previous one") !=
2005
- std::string::npos)
2006
- << s.ToString();
2005
+ ASSERT_OK(s);
2007
2006
  }
2007
+ const std::map<WalNumber, WalMetadata> wals2 =
2008
+ versions_->GetWalSet().GetWals();
2009
+ ASSERT_EQ(wals1, wals2);
2008
2010
  }
2009
2011
 
2010
2012
  TEST_F(VersionSetTest, DeleteWalsBeforeNonExistingWalNumber) {
@@ -112,26 +112,33 @@ Status WalSet::AddWal(const WalAddition& wal) {
112
112
 
113
113
  auto it = wals_.lower_bound(wal.GetLogNumber());
114
114
  bool existing = it != wals_.end() && it->first == wal.GetLogNumber();
115
- if (existing && !wal.GetMetadata().HasSyncedSize()) {
116
- std::stringstream ss;
117
- ss << "WAL " << wal.GetLogNumber() << " is created more than once";
118
- return Status::Corruption("WalSet::AddWal", ss.str());
115
+
116
+ if (!existing) {
117
+ wals_.insert(it, {wal.GetLogNumber(), wal.GetMetadata()});
118
+ return Status::OK();
119
119
  }
120
- // If the WAL has synced size, it must >= the previous size.
121
- if (wal.GetMetadata().HasSyncedSize() && existing &&
122
- it->second.HasSyncedSize() &&
123
- wal.GetMetadata().GetSyncedSizeInBytes() <
124
- it->second.GetSyncedSizeInBytes()) {
120
+
121
+ assert(existing);
122
+ if (!wal.GetMetadata().HasSyncedSize()) {
125
123
  std::stringstream ss;
126
- ss << "WAL " << wal.GetLogNumber()
127
- << " must not have smaller synced size than previous one";
124
+ ss << "WAL " << wal.GetLogNumber() << " is created more than once";
128
125
  return Status::Corruption("WalSet::AddWal", ss.str());
129
126
  }
130
- if (existing) {
131
- it->second.SetSyncedSizeInBytes(wal.GetMetadata().GetSyncedSizeInBytes());
132
- } else {
133
- wals_.insert(it, {wal.GetLogNumber(), wal.GetMetadata()});
127
+
128
+ assert(wal.GetMetadata().HasSyncedSize());
129
+ if (it->second.HasSyncedSize() && wal.GetMetadata().GetSyncedSizeInBytes() <=
130
+ it->second.GetSyncedSizeInBytes()) {
131
+ // This is possible because version edits with different synced WAL sizes
132
+ // for the same WAL can be committed out-of-order. For example, thread
133
+ // 1 synces the first 10 bytes of 1.log, while thread 2 synces the first 20
134
+ // bytes of 1.log. It's possible that thread 1 calls LogAndApply() after
135
+ // thread 2.
136
+ // In this case, just return ok.
137
+ return Status::OK();
134
138
  }
139
+
140
+ // Update synced size for the given WAL.
141
+ it->second.SetSyncedSizeInBytes(wal.GetMetadata().GetSyncedSizeInBytes());
135
142
  return Status::OK();
136
143
  }
137
144
 
@@ -42,6 +42,8 @@ class WalMetadata {
42
42
  uint64_t GetSyncedSizeInBytes() const { return synced_size_bytes_; }
43
43
 
44
44
  private:
45
+ friend bool operator==(const WalMetadata& lhs, const WalMetadata& rhs);
46
+ friend bool operator!=(const WalMetadata& lhs, const WalMetadata& rhs);
45
47
  // The size of WAL is unknown, used when the WAL is not synced yet or is
46
48
  // empty.
47
49
  constexpr static uint64_t kUnknownWalSize =
@@ -51,6 +53,14 @@ class WalMetadata {
51
53
  uint64_t synced_size_bytes_ = kUnknownWalSize;
52
54
  };
53
55
 
56
+ inline bool operator==(const WalMetadata& lhs, const WalMetadata& rhs) {
57
+ return lhs.synced_size_bytes_ == rhs.synced_size_bytes_;
58
+ }
59
+
60
+ inline bool operator!=(const WalMetadata& lhs, const WalMetadata& rhs) {
61
+ return !(lhs == rhs);
62
+ }
63
+
54
64
  // These tags are persisted to MANIFEST, so it's part of the user API.
55
65
  enum class WalAdditionTag : uint32_t {
56
66
  // Indicates that there are no more tags.
@@ -54,12 +54,11 @@ TEST(WalSet, SmallerSyncedSize) {
54
54
  constexpr uint64_t kBytes = 100;
55
55
  WalSet wals;
56
56
  ASSERT_OK(wals.AddWal(WalAddition(kNumber, WalMetadata(kBytes))));
57
+ const auto wals1 = wals.GetWals();
57
58
  Status s = wals.AddWal(WalAddition(kNumber, WalMetadata(0)));
58
- ASSERT_TRUE(s.IsCorruption());
59
- ASSERT_TRUE(
60
- s.ToString().find(
61
- "WAL 100 must not have smaller synced size than previous one") !=
62
- std::string::npos);
59
+ const auto wals2 = wals.GetWals();
60
+ ASSERT_OK(s);
61
+ ASSERT_EQ(wals1, wals2);
63
62
  }
64
63
 
65
64
  TEST(WalSet, CreateTwice) {
@@ -148,42 +148,6 @@ void DbVerificationThread(void* v) {
148
148
  }
149
149
  }
150
150
 
151
- void TimestampedSnapshotsThread(void* v) {
152
- assert(FLAGS_create_timestamped_snapshot_one_in > 0);
153
- auto* thread = reinterpret_cast<ThreadState*>(v);
154
- assert(thread);
155
- SharedState* shared = thread->shared;
156
- assert(shared);
157
- StressTest* stress_test = shared->GetStressTest();
158
- assert(stress_test);
159
- while (true) {
160
- {
161
- MutexLock l(shared->GetMutex());
162
- if (shared->ShouldStopBgThread()) {
163
- shared->IncBgThreadsFinished();
164
- if (shared->BgThreadsFinished()) {
165
- shared->GetCondVar()->SignalAll();
166
- }
167
- return;
168
- }
169
- }
170
-
171
- uint64_t now = db_stress_env->NowNanos();
172
- std::pair<Status, std::shared_ptr<const Snapshot>> res =
173
- stress_test->CreateTimestampedSnapshot(now);
174
- if (res.first.ok()) {
175
- assert(res.second);
176
- assert(res.second->GetTimestamp() == now);
177
- } else {
178
- assert(!res.second);
179
- }
180
- constexpr uint64_t time_diff = static_cast<uint64_t>(1000) * 1000 * 1000;
181
- stress_test->ReleaseOldTimestampedSnapshots(now - time_diff);
182
-
183
- db_stress_env->SleepForMicroseconds(1000 * 1000);
184
- }
185
- }
186
-
187
151
  void PrintKeyValue(int cf, uint64_t key, const char* value, size_t sz) {
188
152
  if (!FLAGS_verbose) {
189
153
  return;
@@ -84,10 +84,6 @@ bool RunStressTest(StressTest* stress) {
84
84
  shared.IncBgThreads();
85
85
  }
86
86
 
87
- if (FLAGS_create_timestamped_snapshot_one_in > 0) {
88
- shared.IncBgThreads();
89
- }
90
-
91
87
  std::vector<ThreadState*> threads(n);
92
88
  for (uint32_t i = 0; i < n; i++) {
93
89
  threads[i] = new ThreadState(i, &shared);
@@ -105,12 +101,6 @@ bool RunStressTest(StressTest* stress) {
105
101
  &continuous_verification_thread);
106
102
  }
107
103
 
108
- ThreadState timestamped_snapshots_thread(0, &shared);
109
- if (FLAGS_create_timestamped_snapshot_one_in > 0) {
110
- db_stress_env->StartThread(TimestampedSnapshotsThread,
111
- &timestamped_snapshots_thread);
112
- }
113
-
114
104
  // Each thread goes through the following states:
115
105
  // initializing -> wait for others to init -> read/populate/depopulate
116
106
  // wait for others to operate -> verify -> done
@@ -179,8 +169,7 @@ bool RunStressTest(StressTest* stress) {
179
169
  stress->PrintStatistics();
180
170
 
181
171
  if (FLAGS_compaction_thread_pool_adjust_interval > 0 ||
182
- FLAGS_continuous_verification_interval > 0 ||
183
- FLAGS_create_timestamped_snapshot_one_in > 0) {
172
+ FLAGS_continuous_verification_interval > 0) {
184
173
  MutexLock l(shared.GetMutex());
185
174
  shared.SetShouldStopBgThread();
186
175
  while (!shared.BgThreadsFinished()) {
@@ -421,35 +421,6 @@ void StressTest::PrintStatistics() {
421
421
  }
422
422
  }
423
423
 
424
- void StressTest::ReleaseOldTimestampedSnapshots(uint64_t ts) {
425
- #ifndef ROCKSDB_LITE
426
- if (!txn_db_) {
427
- return;
428
- }
429
- assert(txn_db_);
430
- txn_db_->ReleaseTimestampedSnapshotsOlderThan(ts);
431
- #else
432
- (void)ts;
433
- fprintf(stderr, "timestamped snapshots not supported in LITE mode\n");
434
- exit(1);
435
- #endif // ROCKSDB_LITE
436
- }
437
-
438
- std::pair<Status, std::shared_ptr<const Snapshot>>
439
- StressTest::CreateTimestampedSnapshot(uint64_t ts) {
440
- #ifndef ROCKSDB_LITE
441
- if (!txn_db_) {
442
- return std::make_pair(Status::InvalidArgument(), nullptr);
443
- }
444
- assert(txn_db_);
445
- return txn_db_->CreateTimestampedSnapshot(ts);
446
- #else
447
- (void)ts;
448
- fprintf(stderr, "timestamped snapshots not supported in LITE mode\n");
449
- exit(1);
450
- #endif // ROCKSDB_LITE
451
- }
452
-
453
424
  // Currently PreloadDb has to be single-threaded.
454
425
  void StressTest::PreloadDbAndReopenAsReadOnly(int64_t number_of_keys,
455
426
  SharedState* shared) {
@@ -594,6 +565,7 @@ Status StressTest::CommitTxn(Transaction* txn, ThreadState* thread) {
594
565
  if (!FLAGS_use_txn) {
595
566
  return Status::InvalidArgument("CommitTxn when FLAGS_use_txn is not set");
596
567
  }
568
+ assert(txn_db_);
597
569
  Status s = txn->Prepare();
598
570
  std::shared_ptr<const Snapshot> timestamped_snapshot;
599
571
  if (s.ok()) {
@@ -602,10 +574,32 @@ Status StressTest::CommitTxn(Transaction* txn, ThreadState* thread) {
602
574
  uint64_t ts = db_stress_env->NowNanos();
603
575
  s = txn->CommitAndTryCreateSnapshot(/*notifier=*/nullptr, ts,
604
576
  &timestamped_snapshot);
577
+
578
+ std::pair<Status, std::shared_ptr<const Snapshot>> res;
579
+ if (thread->tid == 0) {
580
+ uint64_t now = db_stress_env->NowNanos();
581
+ res = txn_db_->CreateTimestampedSnapshot(now);
582
+ if (res.first.ok()) {
583
+ assert(res.second);
584
+ assert(res.second->GetTimestamp() == now);
585
+ if (timestamped_snapshot) {
586
+ assert(res.second->GetTimestamp() >
587
+ timestamped_snapshot->GetTimestamp());
588
+ }
589
+ } else {
590
+ assert(!res.second);
591
+ }
592
+ }
605
593
  } else {
606
594
  s = txn->Commit();
607
595
  }
608
596
  }
597
+ if (thread && FLAGS_create_timestamped_snapshot_one_in > 0 &&
598
+ thread->rand.OneInOpt(50000)) {
599
+ uint64_t now = db_stress_env->NowNanos();
600
+ constexpr uint64_t time_diff = static_cast<uint64_t>(1000) * 1000 * 1000;
601
+ txn_db_->ReleaseTimestampedSnapshotsOlderThan(now - time_diff);
602
+ }
609
603
  delete txn;
610
604
  return s;
611
605
  }
@@ -43,11 +43,6 @@ class StressTest {
43
43
 
44
44
  void PrintStatistics();
45
45
 
46
- void ReleaseOldTimestampedSnapshots(uint64_t ts);
47
-
48
- std::pair<Status, std::shared_ptr<const Snapshot>> CreateTimestampedSnapshot(
49
- uint64_t ts);
50
-
51
46
  protected:
52
47
  Status AssertSame(DB* db, ColumnFamilyHandle* cf,
53
48
  ThreadState::SnapshotState& snap_state);
@@ -1382,6 +1382,13 @@ Status MultiOpsTxnsStressTest::CommitAndCreateTimestampedSnapshotIfNeeded(
1382
1382
  } else {
1383
1383
  s = txn.Commit();
1384
1384
  }
1385
+ assert(txn_db_);
1386
+ if (FLAGS_create_timestamped_snapshot_one_in > 0 &&
1387
+ thread->rand.OneInOpt(50000)) {
1388
+ uint64_t now = db_stress_env->NowNanos();
1389
+ constexpr uint64_t time_diff = static_cast<uint64_t>(1000) * 1000 * 1000;
1390
+ txn_db_->ReleaseTimestampedSnapshotsOlderThan(now - time_diff);
1391
+ }
1385
1392
  return s;
1386
1393
  }
1387
1394
 
@@ -969,10 +969,6 @@ TEST_P(EnvPosixTestWithParam, ReserveThreads) {
969
969
  }
970
970
 
971
971
  #if (defined OS_LINUX || defined OS_WIN)
972
- // Travis doesn't support fallocate or getting unique ID from files for whatever
973
- // reason.
974
- #ifndef TRAVIS
975
-
976
972
  namespace {
977
973
  bool IsSingleVarint(const std::string& s) {
978
974
  Slice slice(s);
@@ -1780,7 +1776,6 @@ TEST_P(EnvPosixTestWithParam, InvalidateCache) {
1780
1776
  ASSERT_OK(env_->DeleteFile(fname));
1781
1777
  ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearTrace();
1782
1778
  }
1783
- #endif // not TRAVIS
1784
1779
  #endif // OS_LINUX || OS_WIN
1785
1780
 
1786
1781
  class TestLogger : public Logger {
@@ -1354,8 +1354,7 @@ IOStatus PosixWritableFile::Close(const IOOptions& /*opts*/,
1354
1354
  // but it will be nice to log these errors.
1355
1355
  int dummy __attribute__((__unused__));
1356
1356
  dummy = ftruncate(fd_, filesize_);
1357
- #if defined(ROCKSDB_FALLOCATE_PRESENT) && defined(FALLOC_FL_PUNCH_HOLE) && \
1358
- !defined(TRAVIS)
1357
+ #if defined(ROCKSDB_FALLOCATE_PRESENT) && defined(FALLOC_FL_PUNCH_HOLE)
1359
1358
  // in some file systems, ftruncate only trims trailing space if the
1360
1359
  // new file size is smaller than the current size. Calling fallocate
1361
1360
  // with FALLOC_FL_PUNCH_HOLE flag to explicitly release these unused
@@ -1367,11 +1366,6 @@ IOStatus PosixWritableFile::Close(const IOOptions& /*opts*/,
1367
1366
  // tmpfs (since Linux 3.5)
1368
1367
  // We ignore error since failure of this operation does not affect
1369
1368
  // correctness.
1370
- // TRAVIS - this code does not work on TRAVIS filesystems.
1371
- // the FALLOC_FL_KEEP_SIZE option is expected to not change the size
1372
- // of the file, but it does. Simple strace report will show that.
1373
- // While we work with Travis-CI team to figure out if this is a
1374
- // quirk of Docker/AUFS, we will comment this out.
1375
1369
  struct stat file_stats;
1376
1370
  int result = fstat(fd_, &file_stats);
1377
1371
  // After ftruncate, we check whether ftruncate has the correct behavior.