rubcask 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (47) hide show
  1. checksums.yaml +7 -0
  2. data/.standard.yml +3 -0
  3. data/Gemfile +20 -0
  4. data/Gemfile.lock +74 -0
  5. data/LICENSE.txt +21 -0
  6. data/README.md +111 -0
  7. data/Rakefile +14 -0
  8. data/benchmark/benchmark_io.rb +49 -0
  9. data/benchmark/benchmark_server.rb +10 -0
  10. data/benchmark/benchmark_server_pipeline.rb +24 -0
  11. data/benchmark/benchmark_worker.rb +46 -0
  12. data/benchmark/op_times.rb +32 -0
  13. data/benchmark/profile.rb +15 -0
  14. data/benchmark/server_benchmark_helper.rb +138 -0
  15. data/example/server_runner.rb +15 -0
  16. data/lib/rubcask/bytes.rb +11 -0
  17. data/lib/rubcask/concurrency/fake_atomic_fixnum.rb +34 -0
  18. data/lib/rubcask/concurrency/fake_lock.rb +41 -0
  19. data/lib/rubcask/concurrency/fake_monitor_mixin.rb +21 -0
  20. data/lib/rubcask/config.rb +55 -0
  21. data/lib/rubcask/data_entry.rb +9 -0
  22. data/lib/rubcask/data_file.rb +91 -0
  23. data/lib/rubcask/directory.rb +437 -0
  24. data/lib/rubcask/expirable_entry.rb +9 -0
  25. data/lib/rubcask/hint_entry.rb +9 -0
  26. data/lib/rubcask/hint_file.rb +56 -0
  27. data/lib/rubcask/hinted_file.rb +148 -0
  28. data/lib/rubcask/keydir_entry.rb +9 -0
  29. data/lib/rubcask/merge_directory.rb +75 -0
  30. data/lib/rubcask/protocol.rb +74 -0
  31. data/lib/rubcask/server/abstract_server.rb +113 -0
  32. data/lib/rubcask/server/async.rb +78 -0
  33. data/lib/rubcask/server/client.rb +131 -0
  34. data/lib/rubcask/server/config.rb +31 -0
  35. data/lib/rubcask/server/pipeline.rb +49 -0
  36. data/lib/rubcask/server/runner/config.rb +43 -0
  37. data/lib/rubcask/server/runner.rb +107 -0
  38. data/lib/rubcask/server/threaded.rb +171 -0
  39. data/lib/rubcask/task/clean_directory.rb +19 -0
  40. data/lib/rubcask/tombstone.rb +40 -0
  41. data/lib/rubcask/version.rb +5 -0
  42. data/lib/rubcask/worker/direct_worker.rb +23 -0
  43. data/lib/rubcask/worker/factory.rb +42 -0
  44. data/lib/rubcask/worker/ractor_worker.rb +40 -0
  45. data/lib/rubcask/worker/thread_worker.rb +40 -0
  46. data/lib/rubcask.rb +19 -0
  47. metadata +102 -0
@@ -0,0 +1,21 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rubcask
4
+ module Concurrency
5
+ # Fake of MonitorMixin module that implements
6
+ # a subset of methods
7
+ # It does not do any synchronization
8
+ module FakeMonitorMixin
9
+ def mon_synchronize
10
+ yield
11
+ end
12
+ alias_method :synchronize, :mon_synchronize
13
+
14
+ def mon_enter
15
+ end
16
+
17
+ def mon_exit
18
+ end
19
+ end
20
+ end
21
+ end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "bytes"
4
+ module Rubcask
5
+ # @!attribute max_file_size
6
+ # Maximum single file size.
7
+ #
8
+ # New file is created after the current file is larger than this field
9
+ #
10
+ # Default: Bytes::GIGABYTE * 2
11
+ # @return [Integer]
12
+ # @!attribute io_strategy
13
+ # Guarantees; listed fastest first
14
+ #
15
+ # :ruby is safe as long as you exit gracefully
16
+ #
17
+ # :os is safe as long as no os or hardware failures occures
18
+ #
19
+ # :os_sync is always safe
20
+ #
21
+ # Default :ruby
22
+ # @return [:ruby, :os, :os_sync]
23
+ # @!attribute threadsafe
24
+ # Set to true if you want to use Rubcask with many threads concurrently
25
+ #
26
+ # Default: true
27
+ # @return [boolean]
28
+ # @!attribute worker
29
+ # Type of worker used for async jobs
30
+ #
31
+ # Currently it is only used for deleting files after merge
32
+ #
33
+ # Default: :direct
34
+ # @return [:direct, :ractor, :thread]
35
+ # Server runner config
36
+ Config = Struct.new(:max_file_size, :io_strategy, :threadsafe, :worker) do
37
+ # @yieldparam [self] config
38
+ def initialize
39
+ self.max_file_size = Bytes::GIGABYTE * 2
40
+ self.io_strategy = :ruby
41
+ self.threadsafe = true
42
+ self.worker = :direct
43
+
44
+ yield(self) if block_given?
45
+ end
46
+
47
+ def self.configure(&block)
48
+ new(&block).freeze
49
+ end
50
+ end
51
+
52
+ class Config
53
+ DEFAULT_SERVER_CONFIG = configure { |c| c.io_strategy = :os }
54
+ end
55
+ end
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "expirable_entry"
4
+
5
+ module Rubcask
6
+ DataEntry = Struct.new(:expire_timestamp, :key, :value) do
7
+ include ExpirableEntry
8
+ end
9
+ end
@@ -0,0 +1,91 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "forwardable"
4
+ require "stringio"
5
+ require "zlib"
6
+
7
+ module Rubcask
8
+ # DataFile is a file where the key and values are actually stored
9
+ class DataFile
10
+ extend Forwardable
11
+ def_delegators :@file, :seek, :pos, :close, :sync=, :flush
12
+
13
+ attr_reader :write_pos
14
+
15
+ HEADER_FORMAT = "NQ>nN"
16
+ HEADER_WITHOUT_CRC_FORMAT = "Q>nN"
17
+
18
+ # @param [File] file File with the data
19
+ # @param [Integer] file_size Current size of `file` in bytes
20
+ def initialize(file, file_size)
21
+ @file = file
22
+ @write_pos = file_size
23
+ end
24
+
25
+ # Fetch entry at given offset.
26
+ # Optional size parameter is size of the record. With it we make one less I/O
27
+ # @param [Integer] offset File offset in bytes
28
+ # @param [Integer, nil] size Record size in bytes
29
+ def [](offset, size = nil)
30
+ seek(offset)
31
+ read(size)
32
+ end
33
+
34
+ # yields each record in the file
35
+ # @return [Enumerator] if no block given
36
+ # @yieldparam [DataEntry]
37
+ def each
38
+ return to_enum(__method__) unless block_given?
39
+
40
+ seek(0)
41
+
42
+ loop do
43
+ val = read
44
+ break unless val
45
+ yield val
46
+ end
47
+ end
48
+
49
+ # Read entry at the current file position
50
+ # @return [DataEntry]
51
+ # @return [nil] if at the end of file
52
+ # @raise [ChecksumError] if the entry has an incorrect checksum
53
+ def read(size = nil)
54
+ io = size ? StringIO.new(@file.read(size)) : @file
55
+ header = io.read(18)
56
+
57
+ return nil unless header
58
+
59
+ crc, expire_timestamp, key_size, value_size = header.unpack(HEADER_FORMAT)
60
+ key = io.read(key_size)
61
+ value = io.read(value_size)
62
+
63
+ raise ChecksumError, "Checksums do not match" if crc != Zlib.crc32(header[4..] + key + value)
64
+ DataEntry.new(expire_timestamp, key, value)
65
+ end
66
+
67
+ AppendResult = Struct.new(:value_pos, :value_size)
68
+ # Append a record at the end of the file
69
+ # @param [DataEntry] entry Entry to write to the file
70
+ # @return [AppendResult] struct containing position and size of the record
71
+ def append(entry)
72
+ current_pos = @write_pos
73
+
74
+ key_size = entry.key.bytesize
75
+ value_size = entry.value.bytesize
76
+
77
+ crc = Zlib.crc32([
78
+ entry.expire_timestamp,
79
+ key_size,
80
+ value_size
81
+ ].pack(HEADER_WITHOUT_CRC_FORMAT) + entry.key + entry.value)
82
+ @write_pos += @file.write(
83
+ [crc, entry.expire_timestamp, key_size, value_size].pack(HEADER_FORMAT),
84
+ entry.key,
85
+ entry.value
86
+ )
87
+ @file.flush
88
+ AppendResult.new(current_pos, @write_pos - current_pos)
89
+ end
90
+ end
91
+ end
@@ -0,0 +1,437 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "concurrent"
4
+
5
+ require "forwardable"
6
+ require "logger"
7
+ require "monitor"
8
+ require "tmpdir"
9
+
10
+ require_relative "concurrency/fake_lock"
11
+ require_relative "concurrency/fake_atomic_fixnum"
12
+ require_relative "concurrency/fake_monitor_mixin"
13
+
14
+ require_relative "task/clean_directory"
15
+
16
+ require_relative "worker/factory"
17
+
18
+ require_relative "config"
19
+ require_relative "data_entry"
20
+ require_relative "data_file"
21
+ require_relative "hint_entry"
22
+ require_relative "hint_file"
23
+ require_relative "hinted_file"
24
+ require_relative "keydir_entry"
25
+ require_relative "merge_directory"
26
+ require_relative "tombstone"
27
+
28
+ module Rubcask
29
+ class Directory
30
+ extend Forwardable
31
+
32
+ # yields directory to the block and closes it after the block is terminated
33
+ # @see #initialize
34
+ # @yieldparam [Directory] directory
35
+ # @return [void]
36
+ def self.with_directory(dir, config: Config.new)
37
+ directory = new(dir, config: config)
38
+ begin
39
+ yield directory
40
+ ensure
41
+ directory.close
42
+ end
43
+ end
44
+
45
+ # @!macro [new] key_is_bytearray
46
+ # @note key is always treated as byte array, encoding is ignored
47
+
48
+ # @!macro [new] deleted_keys
49
+ # @note It might include deleted keys
50
+
51
+ # @!macro [new] lock_block_for_iteration
52
+ # @note This method blocks writes for the entire iteration
53
+
54
+ # @!macro [new] key_any_order
55
+ # @note Keys might be in any order
56
+
57
+ # @param [String] dir Path to the directory where data is stored
58
+ # @param [Config] config Config of the directory
59
+ def initialize(dir, config: Config.new)
60
+ @dir = dir
61
+ @config = check_config(config)
62
+
63
+ max_id = 0
64
+ files = dir_data_files
65
+ @files = files.each_with_object({}) do |file, hash|
66
+ next if File.executable?(file)
67
+ if file.equal?(files.last) && File.size(file) < config.max_file_size
68
+ hinted_file = open_write_file(file)
69
+ @active = hinted_file
70
+ else
71
+ hinted_file = open_read_only_file(file)
72
+ end
73
+
74
+ id = hinted_file.id
75
+
76
+ hash[id] = hinted_file
77
+ max_id = id # dir_data_files returns an already sorted collection
78
+ end
79
+ @max_id = (config.threadsafe ? Concurrent::AtomicFixnum : Concurrency::FakeAtomicFixnum).new(max_id)
80
+ @lock = config.threadsafe ? Concurrent::ReentrantReadWriteLock.new : Concurrency::FakeLock.new
81
+ @worker = Worker::Factory.new_worker(@config.worker)
82
+
83
+ @logger = Logger.new($stdin)
84
+ @logger.level = Logger::INFO
85
+
86
+ @merge_mutex = Thread::Mutex.new
87
+
88
+ load_keydir!
89
+ create_new_file! unless @active
90
+ end
91
+
92
+ # Set value associated with given key.
93
+ # @macro key_is_bytearray
94
+ # @param [String] key
95
+ # @param [String] value
96
+ # @return [String] the value provided by the user
97
+ def []=(key, value)
98
+ put(key, value, NO_EXPIRE_TIMESTAMP)
99
+ value # rubocop:disable Lint/Void
100
+ end
101
+
102
+ # Set value associated with given key with given ttl
103
+ # @macro key_is_bytearray
104
+ # @param [String] key
105
+ # @param [String] value
106
+ # @param [Integer] ttl Time to live
107
+ # @return [String] the value provided by the user
108
+ # @return [String] the value provided by the user
109
+ # @raise [ArgumentError] if ttl is negative
110
+ def set_with_ttl(key, value, ttl)
111
+ raise ArgumentError, "Negative ttl" if ttl.negative?
112
+ put(key, value, Time.now.to_i + ttl)
113
+ value # rubocop:disable Lint/Void
114
+ end
115
+
116
+ # Gets value associated with the key
117
+ # @macro key_is_bytearray
118
+ # @param [String] key
119
+ # @return [String] value associatiod with the key
120
+ # @return [nil] If no value associated with the key
121
+ def [](key)
122
+ key = normalize_key(key)
123
+ entry = nil
124
+ data_file = nil
125
+ @lock.with_read_lock do
126
+ entry = @keydir[key]
127
+ return nil unless entry
128
+
129
+ if entry.expired?
130
+ return nil
131
+ end
132
+
133
+ data_file = @files[entry.file_id]
134
+ data_file.synchronize do
135
+ value = data_file[entry.value_pos, entry.value_size].value
136
+ return nil if Tombstone.is_tombstone?(value)
137
+ return value
138
+ end
139
+ end
140
+ end
141
+
142
+ # Remove entry associated with the key.
143
+ # @param [String] key
144
+ # @macro key_is_bytearray
145
+ # @return false if the existing value does not exist
146
+ # @return true if the delete was succesfull
147
+ def delete(key)
148
+ key = normalize_key(key)
149
+ @lock.with_write_lock do
150
+ prev_val = @keydir[key]
151
+ if prev_val.nil?
152
+ return false
153
+ end
154
+ if prev_val.expired?
155
+ @keydir.delete(key)
156
+ return false
157
+ end
158
+ do_delete(key, prev_val.file_id)
159
+ true
160
+ end
161
+ end
162
+
163
+ # Starts the merge operation.
164
+ # @raise [MergeAlreadyInProgress] if another merge operation is in progress
165
+ def merge
166
+ unless @merge_mutex.try_lock
167
+ raise MergeAlreadyInProgressError, "Merge is already in progress"
168
+ end
169
+ begin
170
+ non_synced_merge
171
+ rescue => ex
172
+ logger.error("Error while merging #{ex}")
173
+ ensure
174
+ @merge_mutex.unlock
175
+ end
176
+ end
177
+
178
+ # Closes all the files and the worker
179
+ def close
180
+ @lock.with_write_lock do
181
+ @files.each_value(&:close)
182
+ if active.write_pos == 0
183
+ File.delete(active.path)
184
+ end
185
+ end
186
+ worker.close
187
+ end
188
+
189
+ # @yieldparam [String] key
190
+ # @yieldparam [String] value
191
+ # @macro lock_block_for_iteration
192
+ # @macro key_any_order
193
+ # @return Enumerator if block not given
194
+ def each
195
+ return to_enum(__method__) unless block_given?
196
+
197
+ @lock.with_read_lock do
198
+ @keydir.each do |key, entry|
199
+ file = @files[entry.file_id]
200
+ file.mon_enter
201
+ begin
202
+ value = file[entry.value_pos, entry.value_size].value
203
+ next if Tombstone.is_tombstone?(value)
204
+ yield [key, value]
205
+ ensure
206
+ file.mon_exit
207
+ end
208
+ end
209
+ end
210
+ end
211
+
212
+ # @yieldparam [String] key
213
+ # @macro deleted_keys
214
+ # @macro key_any_order
215
+ # @macro lock_block_for_iteration
216
+ # @return Enumerator if block not given
217
+ def each_key(&block)
218
+ return to_enum(__method__) unless block
219
+
220
+ @lock.with_read_lock do
221
+ @keydir.each_key(&block)
222
+ end
223
+ end
224
+
225
+ # Generate hint files for data files that do not have hint files
226
+ def generate_missing_hint_files!
227
+ @lock.with_read_lock do
228
+ @files.each_value do |data_file|
229
+ next if data_file.has_hint_file? && !data_file.dirty?
230
+ data_file.synchronize do
231
+ data_file.save_hint_file
232
+ end
233
+ end
234
+ end
235
+ end
236
+
237
+ # Generate hint files for all the data files
238
+ def regenerate_hint_files!
239
+ @lock.with_read_lock do
240
+ @files.each_value do |data_file|
241
+ data_file.synchronize do
242
+ data_file.save_hint_file
243
+ end
244
+ end
245
+ end
246
+ end
247
+
248
+ # Removes files that are not needed after the merge
249
+ def clear_files
250
+ worker.push(Rubcask::Task::CleanDirectory.new(@dir))
251
+ end
252
+
253
+ # Returns number of keys in the store
254
+ # @note It might count some deleted keys
255
+ # @return [Integer]
256
+ def key_count
257
+ @lock.with_read_lock do
258
+ @keydir.size
259
+ end
260
+ end
261
+
262
+ # Returns array of keys in store
263
+ # @macro deleted_keys
264
+ # @macro key_any_order
265
+ # @return [Array<String>]
266
+ def keys
267
+ @lock.with_read_lock do
268
+ @keydir.keys
269
+ end
270
+ end
271
+
272
+ private
273
+
274
+ attr_reader :config, :active, :worker, :logger
275
+
276
+ def put(key, value, expire_timestamp)
277
+ key = normalize_key(key)
278
+ @lock.with_write_lock do
279
+ @keydir[key] = active.append(
280
+ DataEntry.new(expire_timestamp, key, value)
281
+ )
282
+ if active.write_pos >= @config.max_file_size
283
+ create_new_file!
284
+ end
285
+ end
286
+ value # rubocop:disable Lint/Void
287
+ end
288
+
289
+ # @note This method assumes write lock and normalized key
290
+ def do_delete(key, prev_file_id)
291
+ active.append(
292
+ DataEntry.new(NO_EXPIRE_TIMESTAMP, key, Tombstone.new_tombstone(active.id, prev_file_id))
293
+ )
294
+ @keydir.delete(key)
295
+ if active.write_pos >= @config.max_file_size
296
+ create_new_file!
297
+ end
298
+ end
299
+
300
+ # This methods does not provide synchronization and should be run with write lock
301
+ def close_not_active
302
+ @files.each_value do |file|
303
+ next if file == active
304
+ file.close
305
+ end
306
+ end
307
+
308
+ def synchronize_hinted_file!(file)
309
+ file.extend(
310
+ @config.threadsafe ? MonitorMixin : Concurrency::FakeMonitorMixin
311
+ )
312
+ end
313
+
314
+ def non_synced_merge
315
+ merging_paths = nil
316
+ @lock.with_write_lock do
317
+ merging_paths = @files.sort_by(&:first).map! { |k, v| [k, v.path] }
318
+ create_new_file!
319
+ end
320
+
321
+ Dir.mktmpdir do |tmpdir|
322
+ out = MergeDirectory.new(tmpdir, config: @config, max_id_ref: @max_id)
323
+
324
+ merging_paths.each do |id, path|
325
+ merge_single_file(out, id, path)
326
+ end
327
+
328
+ out.close
329
+
330
+ Dir.each_child(tmpdir) do |child|
331
+ FileUtils.mv(File.join(tmpdir, child), @dir)
332
+ end
333
+ end
334
+
335
+ @lock.with_write_lock do
336
+ close_not_active
337
+ merging_paths.each { |_id, path| FileUtils.chmod("+x", path) }
338
+ reload!
339
+ end
340
+ clear_files
341
+ end
342
+
343
+ def merge_single_file(out, id, path)
344
+ File.open(path, "rb") do |io|
345
+ pos = 0
346
+ file = DataFile.new(io, 0)
347
+ file.each do |entry|
348
+ start_pos = pos
349
+ pos = file.pos
350
+
351
+ next if entry.expired?
352
+ next if Tombstone.is_tombstone?(entry.value)
353
+
354
+ @lock.acquire_read_lock
355
+ begin
356
+ keydir_entry = @keydir[entry.key]
357
+ next unless keydir_entry
358
+
359
+ # Ignore records overwritten in a new file
360
+ next if keydir_entry.file_id > id
361
+ # Ignore records overwritten in the data file
362
+ next if keydir_entry.file_id == id && keydir_entry.value_pos > start_pos
363
+ ensure
364
+ @lock.release_read_lock
365
+ end
366
+ out.append(entry)
367
+ end
368
+ end
369
+ end
370
+
371
+ def reload!
372
+ @files = dir_data_files.each_with_object({}) do |file, hash|
373
+ next if File.executable?(file) || file == @active.path
374
+ hinted_file = open_read_only_file(file)
375
+
376
+ id = hinted_file.id
377
+
378
+ hash[id] = hinted_file
379
+ end
380
+ @files[@active.id] = @active
381
+ load_keydir!
382
+ end
383
+
384
+ def load_keydir!
385
+ @keydir = {}
386
+
387
+ # Note that file iteration is oldest to newest
388
+ @files.each_value do |file|
389
+ file.each_keydir_entry do |key, entry|
390
+ if entry.expired?
391
+ @keydir.delete(key)
392
+ else
393
+ @keydir[key] = entry
394
+ end
395
+ end
396
+ end
397
+ end
398
+
399
+ def open_write_file(file)
400
+ HintedFile.new(
401
+ file,
402
+ os_sync: config.io_strategy == :os_sync,
403
+ read_only: false,
404
+ ruby_sync: config.io_strategy != :ruby
405
+ ).tap { |f| synchronize_hinted_file!(f) }
406
+ end
407
+
408
+ def open_read_only_file(file)
409
+ HintedFile.new(
410
+ file,
411
+ read_only: true
412
+ ).tap { |f| synchronize_hinted_file!(f) }
413
+ end
414
+
415
+ def create_new_file!
416
+ id = @max_id.increment
417
+ file = open_write_file(File.join(@dir, "#{id}.data"))
418
+ @active = file
419
+ @files[id] = file
420
+ end
421
+
422
+ def dir_data_files
423
+ Dir.glob(File.join(@dir, "*.data")).sort_by! { |el| File.basename(el).to_i }
424
+ end
425
+
426
+ def check_config(config)
427
+ config
428
+ end
429
+
430
+ def normalize_key(key)
431
+ key = key.to_s
432
+ return key if key.encoding.equal?(Encoding::ASCII_8BIT)
433
+
434
+ key.b
435
+ end
436
+ end
437
+ end
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rubcask
4
+ module ExpirableEntry
5
+ def expired?
6
+ expire_timestamp != Rubcask::NO_EXPIRE_TIMESTAMP && expire_timestamp < Time.now.to_i
7
+ end
8
+ end
9
+ end
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "expirable_entry"
4
+
5
+ module Rubcask
6
+ HintEntry = Struct.new(:expire_timestamp, :key, :value_pos, :value_size) do
7
+ include ExpirableEntry
8
+ end
9
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rubcask
4
+ # HintFile stores only keys, and information on where the value of the key is located
5
+ class HintFile
6
+ extend Forwardable
7
+ def_delegators :@file, :seek, :close, :flush
8
+
9
+ HEADER_FORMAT = "Q>nNQ>"
10
+
11
+ # @param [File] file An already opened file
12
+ def initialize(file)
13
+ @file = file
14
+ end
15
+
16
+ # Yields each hint entry from the file
17
+ # @yieldparam [HintEntry] hint_entry
18
+ # @return [Enumerator] if no block given
19
+ def each
20
+ return to_enum(__method__) unless block_given?
21
+
22
+ seek(0)
23
+
24
+ loop do
25
+ val = read
26
+ break unless val
27
+ yield val
28
+ end
29
+ end
30
+
31
+ # Reads hint entry at the current offset
32
+ # @return [HintEntry]
33
+ # @return [nil] If at the end of file
34
+ # @raise LoadError if unable to read from the file
35
+ def read
36
+ header = @file.read(22)
37
+
38
+ return nil unless header
39
+
40
+ expire_timestamp, key_size, value_size, value_pos = header.unpack(HEADER_FORMAT)
41
+ key = @file.read(key_size)
42
+
43
+ HintEntry.new(expire_timestamp, key, value_pos, value_size)
44
+ end
45
+
46
+ # Appends an entry to the file
47
+ # @param [HintEntry] entry
48
+ # @return [Integer] Number of bytes written
49
+ def append(entry)
50
+ @file.write(
51
+ [entry.expire_timestamp, entry.key.bytesize, entry.value_size, entry.value_pos].pack(HEADER_FORMAT),
52
+ entry.key
53
+ )
54
+ end
55
+ end
56
+ end