daybreak 0.2.1 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README +5 -5
- data/Rakefile +1 -4
- data/lib/daybreak.rb +1 -0
- data/lib/daybreak/db.rb +119 -236
- data/lib/daybreak/format.rb +13 -2
- data/lib/daybreak/journal.rb +205 -0
- data/lib/daybreak/queue/mri.rb +10 -6
- data/lib/daybreak/queue/threaded.rb +5 -2
- data/lib/daybreak/serializer.rb +10 -1
- data/lib/daybreak/version.rb +1 -1
- data/script/bench +3 -3
- data/test/test.rb +60 -30
- metadata +50 -61
data/README
CHANGED
@@ -1,7 +1,7 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
4
|
-
~^~ ^ ^~^~ ~^~ ~ ~^~~^~^-=~=~=-~^~^~^~
|
1
|
+
^^ |
|
2
|
+
daybreak ^^ \ _ /
|
3
|
+
-= / \ =-
|
4
|
+
~^~ ^ ^~^~ ~^~ ~ ~^~~^~^-=~=~=-~^~^~^~
|
5
5
|
|
6
6
|
Daybreak is a simple key value store for ruby. It has user defined persistence,
|
7
7
|
and all data is stored in a table in memory so ruby niceties are available.
|
@@ -9,4 +9,4 @@ Daybreak is faster than any other ruby options like pstore and dbm.
|
|
9
9
|
|
10
10
|
$ gem install daybreak
|
11
11
|
|
12
|
-
|
12
|
+
You find a detailed documentation at http://propublica.github.com/daybreak.
|
data/Rakefile
CHANGED
data/lib/daybreak.rb
CHANGED
data/lib/daybreak/db.rb
CHANGED
@@ -5,88 +5,75 @@ module Daybreak
|
|
5
5
|
class DB
|
6
6
|
include Enumerable
|
7
7
|
|
8
|
-
# Database file name
|
9
|
-
attr_reader :file
|
10
|
-
|
11
|
-
# Counter of how many records are in
|
12
|
-
attr_reader :logsize
|
13
|
-
|
14
8
|
# Set default value, can be a callable
|
15
9
|
attr_writer :default
|
16
10
|
|
17
|
-
@@databases = []
|
18
|
-
@@databases_mutex = Mutex.new
|
19
|
-
|
20
|
-
# A handler that will ensure that databases are closed and synced when the
|
21
|
-
# current process exits.
|
22
|
-
# @api private
|
23
|
-
def self.exit_handler
|
24
|
-
loop do
|
25
|
-
db = @@databases_mutex.synchronize { @@databases.first }
|
26
|
-
break unless db
|
27
|
-
warn "Daybreak database #{db.file} was not closed, state might be inconsistent"
|
28
|
-
begin
|
29
|
-
db.close
|
30
|
-
rescue Exception => ex
|
31
|
-
warn "Failed to close daybreak database: #{ex.message}"
|
32
|
-
end
|
33
|
-
end
|
34
|
-
end
|
35
|
-
|
36
|
-
at_exit { Daybreak::DB.exit_handler }
|
37
|
-
|
38
11
|
# Create a new Daybreak::DB. The second argument is the default value
|
39
12
|
# to store when accessing a previously unset key, this follows the
|
40
13
|
# Hash standard.
|
41
14
|
# @param [String] file the path to the db file
|
42
|
-
# @param [Hash] options a hash that contains the options for creating a new
|
43
|
-
#
|
15
|
+
# @param [Hash] options a hash that contains the options for creating a new database
|
16
|
+
# @option options [Class] :serializer Serializer class
|
17
|
+
# @option options [Class] :format Format class
|
18
|
+
# @option options [Object] :default Default value
|
44
19
|
# @yield [key] a block that will return the default value to store.
|
45
20
|
# @yieldparam [String] key the key to be stored.
|
46
21
|
def initialize(file, options = {}, &block)
|
47
|
-
@file = file
|
48
22
|
@serializer = (options[:serializer] || Serializer::Default).new
|
49
|
-
@
|
50
|
-
@
|
51
|
-
|
23
|
+
@table = Hash.new &method(:hash_default)
|
24
|
+
@journal = Journal.new(file, (options[:format] || Format).new, @serializer) do |record|
|
25
|
+
if !record
|
26
|
+
@table.clear
|
27
|
+
elsif record.size == 1
|
28
|
+
@table.delete(record.first)
|
29
|
+
else
|
30
|
+
@table[record.first] = @serializer.load(record.last)
|
31
|
+
end
|
32
|
+
end
|
52
33
|
@default = block ? block : options[:default]
|
53
|
-
|
54
|
-
@mutex = Mutex.new # Mutex to make #lock thread safe
|
55
|
-
@worker = Thread.new(&method(:worker))
|
56
|
-
@worker.priority = -1
|
57
|
-
load
|
34
|
+
@mutex = Mutex.new # Mutex used by #synchronize and #lock
|
58
35
|
@@databases_mutex.synchronize { @@databases << self }
|
59
36
|
end
|
60
37
|
|
38
|
+
# Database file name
|
39
|
+
# @return [String] database file name
|
40
|
+
def file
|
41
|
+
@journal.file
|
42
|
+
end
|
43
|
+
|
61
44
|
# Return default value belonging to key
|
62
|
-
# @param key the default value to retrieve.
|
45
|
+
# @param [Object] key the default value to retrieve.
|
46
|
+
# @return [Object] value the default value
|
63
47
|
def default(key = nil)
|
64
|
-
@table.default(key)
|
48
|
+
@table.default(@serializer.key_for(key))
|
65
49
|
end
|
66
50
|
|
67
51
|
# Retrieve a value at key from the database. If the default value was specified
|
68
52
|
# when this database was created, that value will be set and returned. Aliased
|
69
53
|
# as <tt>get</tt>.
|
70
|
-
# @param key the value to retrieve from the database.
|
54
|
+
# @param [Object] key the value to retrieve from the database.
|
55
|
+
# @return [Object] the value
|
71
56
|
def [](key)
|
72
57
|
@table[@serializer.key_for(key)]
|
73
58
|
end
|
74
|
-
alias_method :get,
|
59
|
+
alias_method :get, '[]'
|
75
60
|
|
76
61
|
# Set a key in the database to be written at some future date. If the data
|
77
62
|
# needs to be persisted immediately, call <tt>db.set(key, value, true)</tt>.
|
78
|
-
# @param [
|
79
|
-
# @param value the value to store
|
63
|
+
# @param [Object] key the key of the storage slot in the database
|
64
|
+
# @param [Object] value the value to store
|
65
|
+
# @return [Object] the value
|
80
66
|
def []=(key, value)
|
81
67
|
key = @serializer.key_for(key)
|
82
|
-
@
|
68
|
+
@journal << [key, value]
|
83
69
|
@table[key] = value
|
84
70
|
end
|
85
|
-
alias_method :set,
|
71
|
+
alias_method :set, '[]='
|
86
72
|
|
87
73
|
# set! flushes data immediately to disk.
|
88
|
-
# @param key the key of the storage slot in the database
|
89
|
-
# @param value the value to store
|
74
|
+
# @param [Object] key the key of the storage slot in the database
|
75
|
+
# @param [Object] value the value to store
|
76
|
+
# @return [Object] the value
|
90
77
|
def set!(key, value)
|
91
78
|
set(key, value)
|
92
79
|
flush
|
@@ -94,15 +81,17 @@ module Daybreak
|
|
94
81
|
end
|
95
82
|
|
96
83
|
# Delete a key from the database
|
97
|
-
# @param key the key of the storage slot in the database
|
84
|
+
# @param [Object] key the key of the storage slot in the database
|
85
|
+
# @return [Object] the value
|
98
86
|
def delete(key)
|
99
87
|
key = @serializer.key_for(key)
|
100
|
-
@
|
88
|
+
@journal << [key]
|
101
89
|
@table.delete(key)
|
102
90
|
end
|
103
91
|
|
104
92
|
# Immediately delete the key on disk.
|
105
|
-
# @param key the key of the storage slot in the database
|
93
|
+
# @param [Object] key the key of the storage slot in the database
|
94
|
+
# @return [Object] the value
|
106
95
|
def delete!(key)
|
107
96
|
value = delete(key)
|
108
97
|
flush
|
@@ -110,24 +99,29 @@ module Daybreak
|
|
110
99
|
end
|
111
100
|
|
112
101
|
# Update database with hash (Fast batch update)
|
102
|
+
# @param [Hash] hash the key/value hash
|
103
|
+
# @return [DB] self
|
113
104
|
def update(hash)
|
114
105
|
shash = {}
|
115
106
|
hash.each do |key, value|
|
116
107
|
shash[@serializer.key_for(key)] = value
|
117
108
|
end
|
118
|
-
@
|
109
|
+
@journal << shash
|
119
110
|
@table.update(shash)
|
120
111
|
self
|
121
112
|
end
|
122
113
|
|
123
114
|
# Updata database and flush data to disk.
|
115
|
+
# @param [Hash] hash the key/value hash
|
116
|
+
# @return [DB] self
|
124
117
|
def update!(hash)
|
125
118
|
update(hash)
|
126
|
-
flush
|
119
|
+
@journal.flush
|
127
120
|
end
|
128
121
|
|
129
|
-
# Does this db have
|
130
|
-
# @param key the key to check if the DB has
|
122
|
+
# Does this db have this key?
|
123
|
+
# @param [Object] key the key to check if the DB has it
|
124
|
+
# @return [Boolean]
|
131
125
|
def has_key?(key)
|
132
126
|
@table.has_key?(@serializer.key_for(key))
|
133
127
|
end
|
@@ -135,13 +129,16 @@ module Daybreak
|
|
135
129
|
alias_method :include?, :has_key?
|
136
130
|
alias_method :member?, :has_key?
|
137
131
|
|
132
|
+
# Does this db have this value?
|
133
|
+
# @param [Object] value the value to check if the DB has it
|
134
|
+
# @return [Boolean]
|
138
135
|
def has_value?(value)
|
139
136
|
@table.has_value?(value)
|
140
137
|
end
|
141
138
|
alias_method :value?, :has_value?
|
142
139
|
|
143
140
|
# Return the number of stored items.
|
144
|
-
# @return [
|
141
|
+
# @return [Fixnum]
|
145
142
|
def size
|
146
143
|
@table.size
|
147
144
|
end
|
@@ -149,8 +146,15 @@ module Daybreak
|
|
149
146
|
|
150
147
|
# Utility method that will return the size of the database in bytes,
|
151
148
|
# useful for determining when to compact
|
149
|
+
# @return [Fixnum]
|
152
150
|
def bytesize
|
153
|
-
@
|
151
|
+
@journal.bytesize
|
152
|
+
end
|
153
|
+
|
154
|
+
# Counter of how many records are in the journal
|
155
|
+
# @return [Fixnum]
|
156
|
+
def logsize
|
157
|
+
@journal.size
|
154
158
|
end
|
155
159
|
|
156
160
|
# Return true if database is empty.
|
@@ -168,232 +172,111 @@ module Daybreak
|
|
168
172
|
end
|
169
173
|
|
170
174
|
# Return the keys in the db.
|
171
|
-
# @return [Array]
|
175
|
+
# @return [Array<String>]
|
172
176
|
def keys
|
173
177
|
@table.keys
|
174
178
|
end
|
175
179
|
|
176
180
|
# Flush all changes to disk.
|
181
|
+
# @return [DB] self
|
177
182
|
def flush
|
178
|
-
@
|
183
|
+
@journal.flush
|
179
184
|
self
|
180
185
|
end
|
181
186
|
|
182
187
|
# Sync the database with what is on disk, by first flushing changes, and
|
183
|
-
# then
|
184
|
-
|
185
|
-
|
186
|
-
load
|
188
|
+
# then loading the new records if necessary.
|
189
|
+
# @return [DB] self
|
190
|
+
def load
|
191
|
+
@journal.load
|
192
|
+
self
|
187
193
|
end
|
194
|
+
alias_method :sunrise, :load
|
188
195
|
|
189
|
-
# Lock the database for an exclusive commit
|
196
|
+
# Lock the database for an exclusive commit across processes and threads
|
197
|
+
# @note This method performs an expensive locking over process boundaries.
|
198
|
+
# If you want to synchronize only between threads, use #synchronize.
|
199
|
+
# @see #synchronize
|
190
200
|
# @yield a block where every change to the database is synced
|
201
|
+
# @yieldparam [DB] db
|
202
|
+
# @return result of the block
|
191
203
|
def lock
|
192
|
-
@mutex.synchronize
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
+
@mutex.synchronize { @journal.lock { yield self } }
|
205
|
+
end
|
206
|
+
|
207
|
+
# Synchronize access to the database from multiple threads
|
208
|
+
# @note Daybreak is not thread safe, if you want to access it from
|
209
|
+
# multiple threads, all accesses have to be in the #synchronize block.
|
210
|
+
# @see #lock
|
211
|
+
# @yield a block where every change to the database is synced
|
212
|
+
# @yieldparam [DB] db
|
213
|
+
# @return result of the block
|
214
|
+
def synchronize
|
215
|
+
@mutex.synchronize { yield self }
|
204
216
|
end
|
205
217
|
|
206
218
|
# Remove all keys and values from the database.
|
219
|
+
# @return [DB] self
|
207
220
|
def clear
|
208
|
-
flush
|
209
|
-
with_tmpfile do |path, file|
|
210
|
-
file.write(@format.header)
|
211
|
-
file.close
|
212
|
-
# Clear acts like a compactification
|
213
|
-
File.rename(path, @file)
|
214
|
-
end
|
215
221
|
@table.clear
|
216
|
-
|
222
|
+
@journal.clear
|
217
223
|
self
|
218
224
|
end
|
219
225
|
|
220
226
|
# Compact the database to remove stale commits and reduce the file size.
|
227
|
+
# @return [DB] self
|
221
228
|
def compact
|
222
|
-
|
223
|
-
|
224
|
-
# Compactified database has the same size -> return
|
225
|
-
return self if @pos == file.write(dump)
|
226
|
-
with_flock(File::LOCK_EX) do
|
227
|
-
# Database was compactified in the meantime
|
228
|
-
if @pos != nil
|
229
|
-
# Append changed journal records if the database changed during compactification
|
230
|
-
file.write(read)
|
231
|
-
file.close
|
232
|
-
File.rename(path, @file)
|
233
|
-
end
|
234
|
-
end
|
235
|
-
end
|
236
|
-
open
|
237
|
-
load
|
229
|
+
@journal.compact { @table }
|
230
|
+
self
|
238
231
|
end
|
239
232
|
|
240
233
|
# Close the database for reading and writing.
|
234
|
+
# @return nil
|
241
235
|
def close
|
242
|
-
@
|
243
|
-
@worker.join
|
244
|
-
@fd.close
|
245
|
-
@queue.stop if @queue.respond_to?(:stop)
|
236
|
+
@journal.close
|
246
237
|
@@databases_mutex.synchronize { @@databases.delete(self) }
|
247
238
|
nil
|
248
239
|
end
|
249
240
|
|
250
241
|
# Check to see if we've already closed the database.
|
242
|
+
# @return [Boolean]
|
251
243
|
def closed?
|
252
|
-
@
|
244
|
+
@journal.closed?
|
253
245
|
end
|
254
246
|
|
255
247
|
private
|
256
248
|
|
257
|
-
#
|
258
|
-
|
259
|
-
if @default != nil
|
260
|
-
value = @default.respond_to?(:call) ? @default.call(key) : @default
|
261
|
-
@queue << [key, value]
|
262
|
-
@table[key] = value
|
263
|
-
end
|
264
|
-
end
|
265
|
-
|
266
|
-
# Update the @table with records
|
267
|
-
def load
|
268
|
-
buf = read
|
269
|
-
until buf.empty?
|
270
|
-
record = @format.parse(buf)
|
271
|
-
if record.size == 1
|
272
|
-
@table.delete(record.first)
|
273
|
-
else
|
274
|
-
@table[record.first] = @serializer.load(record.last)
|
275
|
-
end
|
276
|
-
@logsize += 1
|
277
|
-
end
|
278
|
-
self
|
279
|
-
end
|
280
|
-
|
281
|
-
# Open or reopen file
|
282
|
-
def open
|
283
|
-
@fd.close if @fd
|
284
|
-
@fd = File.open(@file, 'ab+')
|
285
|
-
@fd.advise(:sequential) if @fd.respond_to? :advise
|
286
|
-
stat = @fd.stat
|
287
|
-
@inode = stat.ino
|
288
|
-
@logsize = 0
|
289
|
-
write(@format.header) if stat.size == 0
|
290
|
-
@pos = nil
|
291
|
-
end
|
292
|
-
|
293
|
-
# Read new file content
|
294
|
-
def read
|
295
|
-
with_flock(File::LOCK_SH) do
|
296
|
-
# File was opened
|
297
|
-
unless @pos
|
298
|
-
@fd.pos = 0
|
299
|
-
@format.read_header(@fd)
|
300
|
-
else
|
301
|
-
@fd.pos = @pos
|
302
|
-
end
|
303
|
-
buf = @fd.read
|
304
|
-
@pos = @fd.pos
|
305
|
-
buf
|
306
|
-
end
|
307
|
-
end
|
249
|
+
# @private
|
250
|
+
@@databases = []
|
308
251
|
|
309
|
-
#
|
310
|
-
|
311
|
-
dump = @format.header
|
312
|
-
# each is faster than inject
|
313
|
-
@table.each do |record|
|
314
|
-
record[1] = @serializer.dump(record.last)
|
315
|
-
dump << @format.dump(record)
|
316
|
-
end
|
317
|
-
dump
|
318
|
-
end
|
252
|
+
# @private
|
253
|
+
@@databases_mutex = Mutex.new
|
319
254
|
|
320
|
-
#
|
321
|
-
|
255
|
+
# A handler that will ensure that databases are closed and synced when the
|
256
|
+
# current process exits.
|
257
|
+
# @private
|
258
|
+
def self.exit_handler
|
322
259
|
loop do
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
write_record(record)
|
260
|
+
db = @@databases_mutex.synchronize { @@databases.first }
|
261
|
+
break unless db
|
262
|
+
warn "Daybreak database #{db.file} was not closed, state might be inconsistent"
|
263
|
+
begin
|
264
|
+
db.close
|
265
|
+
rescue Exception => ex
|
266
|
+
warn "Failed to close daybreak database: #{ex.message}"
|
331
267
|
end
|
332
|
-
@queue.pop
|
333
268
|
end
|
334
|
-
rescue Exception => ex
|
335
|
-
warn "Daybreak worker: #{ex.message}"
|
336
|
-
retry
|
337
269
|
end
|
338
270
|
|
339
|
-
|
340
|
-
def write_batch(records)
|
341
|
-
dump = ''
|
342
|
-
records.each do |record|
|
343
|
-
record[1] = @serializer.dump(record.last)
|
344
|
-
dump << @format.dump(record)
|
345
|
-
end
|
346
|
-
write(dump)
|
347
|
-
@logsize += records.size
|
348
|
-
end
|
271
|
+
at_exit { Daybreak::DB.exit_handler }
|
349
272
|
|
350
|
-
#
|
351
|
-
def
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
# Write data to output stream and advance @pos
|
358
|
-
def write(dump)
|
359
|
-
with_flock(File::LOCK_EX) do
|
360
|
-
@fd.write(dump)
|
361
|
-
# Flush to make sure the file is really updated
|
362
|
-
@fd.flush
|
363
|
-
end
|
364
|
-
@pos = @fd.pos if @pos && @fd.pos == @pos + dump.bytesize
|
365
|
-
end
|
366
|
-
|
367
|
-
# Block with file lock
|
368
|
-
def with_flock(mode)
|
369
|
-
return yield if @locked
|
370
|
-
begin
|
371
|
-
loop do
|
372
|
-
# HACK: JRuby returns false if the process is already hold by the same process
|
373
|
-
# see https://github.com/jruby/jruby/issues/496
|
374
|
-
Thread.pass until @fd.flock(mode)
|
375
|
-
# Check if database was compactified in the meantime
|
376
|
-
# break if not
|
377
|
-
stat = @fd.stat
|
378
|
-
break if stat.nlink > 0 && stat.ino == @inode
|
379
|
-
open
|
380
|
-
end
|
381
|
-
@locked = true
|
382
|
-
yield
|
383
|
-
ensure
|
384
|
-
@fd.flock(File::LOCK_UN)
|
385
|
-
@locked = false
|
273
|
+
# The block used in @table for new records
|
274
|
+
def hash_default(_, key)
|
275
|
+
if @default != nil
|
276
|
+
value = @default.respond_to?(:call) ? @default.call(key) : @default
|
277
|
+
@journal << [key, value]
|
278
|
+
@table[key] = value
|
386
279
|
end
|
387
280
|
end
|
388
|
-
|
389
|
-
# Open temporary file and pass it to the block
|
390
|
-
def with_tmpfile
|
391
|
-
path = [@file, $$.to_s(36), Thread.current.object_id.to_s(36)].join
|
392
|
-
file = File.open(path, 'wb')
|
393
|
-
yield(path, file)
|
394
|
-
ensure
|
395
|
-
file.close unless file.closed?
|
396
|
-
File.unlink(path) if File.exists?(path)
|
397
|
-
end
|
398
281
|
end
|
399
282
|
end
|
data/lib/daybreak/format.rb
CHANGED
@@ -6,6 +6,7 @@ module Daybreak
|
|
6
6
|
class Format
|
7
7
|
# Read database header from input stream
|
8
8
|
# @param [#read] input the input stream
|
9
|
+
# @return void
|
9
10
|
def read_header(input)
|
10
11
|
raise 'Not a Daybreak database' if input.read(MAGIC.bytesize) != MAGIC
|
11
12
|
ver = input.read(2).unpack('n').first
|
@@ -13,13 +14,14 @@ module Daybreak
|
|
13
14
|
end
|
14
15
|
|
15
16
|
# Return database header as string
|
17
|
+
# @return [String] database file header
|
16
18
|
def header
|
17
19
|
MAGIC + [VERSION].pack('n')
|
18
20
|
end
|
19
21
|
|
20
22
|
# Serialize record and return string
|
21
|
-
# @param [Array] record an array with [key, value] or [key] if the record is
|
22
|
-
#
|
23
|
+
# @param [Array] record an array with [key, value] or [key] if the record is deleted
|
24
|
+
# @return [String] serialized record
|
23
25
|
def dump(record)
|
24
26
|
data =
|
25
27
|
if record.size == 1
|
@@ -32,6 +34,7 @@ module Daybreak
|
|
32
34
|
|
33
35
|
# Deserialize record from buffer
|
34
36
|
# @param [String] buf the buffer to read from
|
37
|
+
# @return [Array] deserialized record [key, value] or [key] if the record is deleted
|
35
38
|
def parse(buf)
|
36
39
|
key_size, value_size = buf[0, 8].unpack('NN')
|
37
40
|
data = buf.slice!(0, 8 + key_size + (value_size == DELETE ? 0 : value_size))
|
@@ -41,10 +44,18 @@ module Daybreak
|
|
41
44
|
|
42
45
|
protected
|
43
46
|
|
47
|
+
# Magic string of the file header
|
44
48
|
MAGIC = 'DAYBREAK'
|
49
|
+
|
50
|
+
# Database file format version
|
45
51
|
VERSION = 1
|
52
|
+
|
53
|
+
# Special value size used for deleted records
|
46
54
|
DELETE = (1 << 32) - 1
|
47
55
|
|
56
|
+
# Compute crc32 of string
|
57
|
+
# @param [String] s a string
|
58
|
+
# @return [Fixnum]
|
48
59
|
def crc32(s)
|
49
60
|
[Zlib.crc32(s, 0)].pack('N')
|
50
61
|
end
|
@@ -0,0 +1,205 @@
|
|
1
|
+
module Daybreak
|
2
|
+
# Daybreak::Journal handles background io, compaction and is the arbiter
|
3
|
+
# of multiprocess safety
|
4
|
+
# @api private
|
5
|
+
class Journal < Queue
|
6
|
+
attr_reader :size, :file
|
7
|
+
|
8
|
+
def initialize(file, format, serializer, &block)
|
9
|
+
super()
|
10
|
+
@file, @format, @serializer, @emit = file, format, serializer, block
|
11
|
+
open
|
12
|
+
@worker = Thread.new(&method(:worker))
|
13
|
+
@worker.priority = -1
|
14
|
+
load
|
15
|
+
end
|
16
|
+
|
17
|
+
# Is the journal closed?
|
18
|
+
def closed?
|
19
|
+
@fd.closed?
|
20
|
+
end
|
21
|
+
|
22
|
+
# Clear the queue and close the file handler
|
23
|
+
def close
|
24
|
+
self << nil
|
25
|
+
@worker.join
|
26
|
+
@fd.close
|
27
|
+
super
|
28
|
+
end
|
29
|
+
|
30
|
+
# Load new journal entries
|
31
|
+
def load
|
32
|
+
flush
|
33
|
+
replay
|
34
|
+
end
|
35
|
+
|
36
|
+
# Lock the logfile across thread and process boundaries
|
37
|
+
def lock
|
38
|
+
# Flush everything to start with a clean state
|
39
|
+
# and to protect the @locked variable
|
40
|
+
flush
|
41
|
+
|
42
|
+
with_flock(File::LOCK_EX) do
|
43
|
+
replay
|
44
|
+
result = yield
|
45
|
+
flush
|
46
|
+
result
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
# Clear the database log and yield
|
51
|
+
def clear
|
52
|
+
flush
|
53
|
+
with_tmpfile do |path, file|
|
54
|
+
file.write(@format.header)
|
55
|
+
file.close
|
56
|
+
# Clear replaces the database file like a compactification does
|
57
|
+
with_flock(File::LOCK_EX) do
|
58
|
+
File.rename(path, @file)
|
59
|
+
end
|
60
|
+
end
|
61
|
+
open
|
62
|
+
end
|
63
|
+
|
64
|
+
# Compact the logfile to represent the in-memory state
|
65
|
+
def compact
|
66
|
+
load
|
67
|
+
with_tmpfile do |path, file|
|
68
|
+
# Compactified database has the same size -> return
|
69
|
+
return self if @pos == file.write(dump(yield, @format.header))
|
70
|
+
with_flock(File::LOCK_EX) do
|
71
|
+
# Database was replaced (cleared or compactified) in the meantime
|
72
|
+
if @pos != nil
|
73
|
+
# Append changed journal records if the database changed during compactification
|
74
|
+
file.write(read)
|
75
|
+
file.close
|
76
|
+
File.rename(path, @file)
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
open
|
81
|
+
replay
|
82
|
+
end
|
83
|
+
|
84
|
+
# Return byte size of journal
|
85
|
+
def bytesize
|
86
|
+
@fd.stat.size
|
87
|
+
end
|
88
|
+
|
89
|
+
private
|
90
|
+
|
91
|
+
# Emit records as we parse them
|
92
|
+
def replay
|
93
|
+
buf = read
|
94
|
+
until buf.empty?
|
95
|
+
@emit.call(@format.parse(buf))
|
96
|
+
@size += 1
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
# Open or reopen file
|
101
|
+
def open
|
102
|
+
@fd.close if @fd
|
103
|
+
@fd = File.open(@file, 'ab+')
|
104
|
+
@fd.advise(:sequential) if @fd.respond_to? :advise
|
105
|
+
stat = @fd.stat
|
106
|
+
@inode = stat.ino
|
107
|
+
write(@format.header) if stat.size == 0
|
108
|
+
@pos = nil
|
109
|
+
end
|
110
|
+
|
111
|
+
# Read new file content
|
112
|
+
def read
|
113
|
+
with_flock(File::LOCK_SH) do
|
114
|
+
# File was opened
|
115
|
+
unless @pos
|
116
|
+
@fd.pos = 0
|
117
|
+
@format.read_header(@fd)
|
118
|
+
@size = 0
|
119
|
+
@emit.call(nil)
|
120
|
+
else
|
121
|
+
@fd.pos = @pos
|
122
|
+
end
|
123
|
+
buf = @fd.read
|
124
|
+
@pos = @fd.pos
|
125
|
+
buf
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
# Return database dump as string
|
130
|
+
def dump(records, dump = '')
|
131
|
+
# each is faster than inject
|
132
|
+
records.each do |record|
|
133
|
+
record[1] = @serializer.dump(record.last)
|
134
|
+
dump << @format.dump(record)
|
135
|
+
end
|
136
|
+
dump
|
137
|
+
end
|
138
|
+
|
139
|
+
# Worker thread
|
140
|
+
def worker
|
141
|
+
loop do
|
142
|
+
case record = first
|
143
|
+
when Hash
|
144
|
+
# Write batch update
|
145
|
+
write(dump(record))
|
146
|
+
@size += record.size
|
147
|
+
when nil
|
148
|
+
pop
|
149
|
+
break
|
150
|
+
else
|
151
|
+
# Write single record
|
152
|
+
record[1] = @serializer.dump(record.last) if record.size > 1
|
153
|
+
write(@format.dump(record))
|
154
|
+
@size += 1
|
155
|
+
end
|
156
|
+
pop
|
157
|
+
end
|
158
|
+
rescue Exception => ex
|
159
|
+
warn "Daybreak worker: #{ex.message}"
|
160
|
+
retry
|
161
|
+
end
|
162
|
+
|
163
|
+
# Write data to output stream and advance @pos
|
164
|
+
def write(dump)
|
165
|
+
with_flock(File::LOCK_EX) do
|
166
|
+
@fd.write(dump)
|
167
|
+
# Flush to make sure the file is really updated
|
168
|
+
@fd.flush
|
169
|
+
end
|
170
|
+
@pos = @fd.pos if @pos && @fd.pos == @pos + dump.bytesize
|
171
|
+
end
|
172
|
+
|
173
|
+
# Block with file lock
|
174
|
+
def with_flock(mode)
|
175
|
+
return yield if @locked
|
176
|
+
begin
|
177
|
+
loop do
|
178
|
+
# HACK: JRuby returns false if the process is already hold by the same process
|
179
|
+
# see https://github.com/jruby/jruby/issues/496
|
180
|
+
Thread.pass until @fd.flock(mode)
|
181
|
+
# Check if database was replaced (cleared or compactified) in the meantime
|
182
|
+
# break if not
|
183
|
+
stat = @fd.stat
|
184
|
+
break if stat.nlink > 0 && stat.ino == @inode
|
185
|
+
open
|
186
|
+
end
|
187
|
+
@locked = true
|
188
|
+
yield
|
189
|
+
ensure
|
190
|
+
@fd.flock(File::LOCK_UN)
|
191
|
+
@locked = false
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
# Open temporary file and pass it to the block
|
196
|
+
def with_tmpfile
|
197
|
+
path = [@file, $$.to_s(36), Thread.current.object_id.to_s(36)].join
|
198
|
+
file = File.open(path, 'wb')
|
199
|
+
yield(path, file)
|
200
|
+
ensure
|
201
|
+
file.close unless file.closed?
|
202
|
+
File.unlink(path) if File.exists?(path)
|
203
|
+
end
|
204
|
+
end
|
205
|
+
end
|
data/lib/daybreak/queue/mri.rb
CHANGED
@@ -1,8 +1,12 @@
|
|
1
1
|
module Daybreak
|
2
|
+
# A queue for ruby implementations with a GIL
|
3
|
+
#
|
4
|
+
# HACK: Dangerous optimization on MRI which has a
|
5
|
+
# global interpreter lock and makes the @queue array
|
6
|
+
# thread safe.
|
7
|
+
#
|
8
|
+
# @api private
|
2
9
|
class Queue
|
3
|
-
# HACK: Dangerous optimization on MRI which has a
|
4
|
-
# global interpreter lock and makes the @queue array
|
5
|
-
# thread safe.
|
6
10
|
def initialize
|
7
11
|
@queue, @full, @empty = [], [], []
|
8
12
|
@stop = false
|
@@ -24,7 +28,7 @@ module Daybreak
|
|
24
28
|
end
|
25
29
|
end
|
26
30
|
|
27
|
-
def
|
31
|
+
def first
|
28
32
|
while @queue.empty?
|
29
33
|
begin
|
30
34
|
@full << Thread.current
|
@@ -49,7 +53,7 @@ module Daybreak
|
|
49
53
|
end
|
50
54
|
end
|
51
55
|
|
52
|
-
def
|
56
|
+
def close
|
53
57
|
@stop = true
|
54
58
|
@heartbeat.join
|
55
59
|
end
|
@@ -66,4 +70,4 @@ module Daybreak
|
|
66
70
|
end
|
67
71
|
end
|
68
72
|
end
|
69
|
-
end
|
73
|
+
end
|
@@ -23,7 +23,7 @@ module Daybreak
|
|
23
23
|
end
|
24
24
|
end
|
25
25
|
|
26
|
-
def
|
26
|
+
def first
|
27
27
|
@mutex.synchronize do
|
28
28
|
@full.wait(@mutex) while @queue.empty?
|
29
29
|
@queue.first
|
@@ -35,5 +35,8 @@ module Daybreak
|
|
35
35
|
@empty.wait(@mutex) until @queue.empty?
|
36
36
|
end
|
37
37
|
end
|
38
|
+
|
39
|
+
def close
|
40
|
+
end
|
38
41
|
end
|
39
|
-
end
|
42
|
+
end
|
data/lib/daybreak/serializer.rb
CHANGED
@@ -4,17 +4,23 @@ module Daybreak
|
|
4
4
|
# keys to strings and marshalls values
|
5
5
|
# @api public
|
6
6
|
class Default
|
7
|
-
#
|
7
|
+
# Transform the key to a string
|
8
|
+
# @param [Object] key
|
9
|
+
# @return [String] key transformed to string
|
8
10
|
def key_for(key)
|
9
11
|
key.to_s
|
10
12
|
end
|
11
13
|
|
12
14
|
# Serialize a value
|
15
|
+
# @param [Object] value
|
16
|
+
# @return [String] value transformed to string
|
13
17
|
def dump(value)
|
14
18
|
Marshal.dump(value)
|
15
19
|
end
|
16
20
|
|
17
21
|
# Parse a value
|
22
|
+
# @param [String] value
|
23
|
+
# @return [Object] deserialized value
|
18
24
|
def load(value)
|
19
25
|
Marshal.load(value)
|
20
26
|
end
|
@@ -23,14 +29,17 @@ module Daybreak
|
|
23
29
|
# Serializer which does nothing
|
24
30
|
# @api public
|
25
31
|
class None
|
32
|
+
# (see Daybreak::Serializer::Default#key_for)
|
26
33
|
def key_for(key)
|
27
34
|
key
|
28
35
|
end
|
29
36
|
|
37
|
+
# (see Daybreak::Serializer::Default#dump)
|
30
38
|
def dump(value)
|
31
39
|
value
|
32
40
|
end
|
33
41
|
|
42
|
+
# (see Daybreak::Serializer::Default#load)
|
34
43
|
def load(value)
|
35
44
|
value
|
36
45
|
end
|
data/lib/daybreak/version.rb
CHANGED
data/script/bench
CHANGED
@@ -85,14 +85,14 @@ run db, 'with lock' do
|
|
85
85
|
end
|
86
86
|
|
87
87
|
db = Daybreak::DB.new DB_PATH
|
88
|
-
run db, 'with
|
88
|
+
run db, 'with load' do
|
89
89
|
DATA.each do |i|
|
90
90
|
db[i] = i
|
91
|
-
db.
|
91
|
+
db.sunrise
|
92
92
|
end
|
93
93
|
DATA.each do |i|
|
94
94
|
$errors += 1 unless db[i] == i
|
95
|
-
db.
|
95
|
+
db.sunrise
|
96
96
|
end
|
97
97
|
end
|
98
98
|
|
data/test/test.rb
CHANGED
@@ -31,22 +31,22 @@ describe Daybreak::DB do
|
|
31
31
|
it 'should persist values' do
|
32
32
|
@db['1'] = '4'
|
33
33
|
@db['4'] = '1'
|
34
|
-
assert_equal @db.
|
34
|
+
assert_equal @db.sunrise, @db
|
35
35
|
|
36
36
|
assert_equal @db['1'], '4'
|
37
|
-
|
38
|
-
assert_equal
|
39
|
-
assert_equal
|
40
|
-
assert_equal
|
37
|
+
db = Daybreak::DB.new DB_PATH
|
38
|
+
assert_equal db['1'], '4'
|
39
|
+
assert_equal db['4'], '1'
|
40
|
+
assert_equal db.close, nil
|
41
41
|
end
|
42
42
|
|
43
43
|
it 'should persist after batch update' do
|
44
44
|
@db.update!(1 => :a, 2 => :b)
|
45
45
|
|
46
|
-
|
47
|
-
assert_equal
|
48
|
-
assert_equal
|
49
|
-
assert_equal
|
46
|
+
db = Daybreak::DB.new DB_PATH
|
47
|
+
assert_equal db[1], :a
|
48
|
+
assert_equal db[2], :b
|
49
|
+
assert_equal db.close, nil
|
50
50
|
end
|
51
51
|
|
52
52
|
it 'should persist after clear' do
|
@@ -84,7 +84,7 @@ describe Daybreak::DB do
|
|
84
84
|
@db['4'] = '1'
|
85
85
|
assert_equal @db.flush, @db
|
86
86
|
|
87
|
-
db.
|
87
|
+
db.sunrise
|
88
88
|
assert_equal db['1'], '4'
|
89
89
|
assert_equal db['4'], '1'
|
90
90
|
db.close
|
@@ -100,7 +100,7 @@ describe Daybreak::DB do
|
|
100
100
|
@db['4'] = '1'
|
101
101
|
@db.flush
|
102
102
|
|
103
|
-
db.
|
103
|
+
db.sunrise
|
104
104
|
assert_equal db['1'], '4'
|
105
105
|
assert_equal db['4'], '1'
|
106
106
|
db.close
|
@@ -109,7 +109,7 @@ describe Daybreak::DB do
|
|
109
109
|
it 'should compact cleanly' do
|
110
110
|
@db[1] = 1
|
111
111
|
@db[1] = 1
|
112
|
-
@db.
|
112
|
+
@db.sunrise
|
113
113
|
|
114
114
|
size = File.stat(DB_PATH).size
|
115
115
|
@db.compact
|
@@ -119,6 +119,7 @@ describe Daybreak::DB do
|
|
119
119
|
|
120
120
|
it 'should allow for default values' do
|
121
121
|
db = Daybreak::DB.new(DB_PATH, :default => 0)
|
122
|
+
assert_equal db.default(1), 0
|
122
123
|
assert_equal db[1], 0
|
123
124
|
assert db.include? '1'
|
124
125
|
db[1] = 1
|
@@ -130,6 +131,7 @@ describe Daybreak::DB do
|
|
130
131
|
|
131
132
|
it 'should handle default values that are procs' do
|
132
133
|
db = Daybreak::DB.new(DB_PATH) {|key| set = Set.new; set << key }
|
134
|
+
assert db.default(:test).include? 'test'
|
133
135
|
assert db['foo'].is_a? Set
|
134
136
|
assert db.include? 'foo'
|
135
137
|
assert db['bar'].include? 'bar'
|
@@ -141,42 +143,56 @@ describe Daybreak::DB do
|
|
141
143
|
|
142
144
|
it 'should be able to sync competing writes' do
|
143
145
|
@db.set! '1', 4
|
144
|
-
|
145
|
-
|
146
|
-
@db.
|
146
|
+
db = Daybreak::DB.new DB_PATH
|
147
|
+
db.set! '1', 5
|
148
|
+
@db.sunrise
|
147
149
|
assert_equal @db['1'], 5
|
148
|
-
|
150
|
+
db.close
|
149
151
|
end
|
150
152
|
|
151
153
|
it 'should be able to handle another process\'s call to compact' do
|
152
154
|
@db.lock { 20.times {|i| @db[i] = i } }
|
153
|
-
|
155
|
+
db = Daybreak::DB.new DB_PATH
|
154
156
|
@db.lock { 20.times {|i| @db[i] = i } }
|
155
157
|
@db.compact
|
156
|
-
|
157
|
-
assert_equal 19,
|
158
|
-
|
158
|
+
db.sunrise
|
159
|
+
assert_equal 19, db['19']
|
160
|
+
db.close
|
159
161
|
end
|
160
162
|
|
161
163
|
it 'can empty the database' do
|
162
164
|
20.times {|i| @db[i] = i }
|
163
165
|
@db.clear
|
164
|
-
|
165
|
-
assert_equal nil,
|
166
|
-
|
166
|
+
db = Daybreak::DB.new DB_PATH
|
167
|
+
assert_equal nil, db['19']
|
168
|
+
db.close
|
167
169
|
end
|
168
170
|
|
169
171
|
it 'should handle deletions' do
|
170
|
-
@db[
|
171
|
-
@db[
|
172
|
+
@db['one'] = 1
|
173
|
+
@db['two'] = 2
|
172
174
|
@db.delete! 'two'
|
173
175
|
assert !@db.has_key?('two')
|
174
176
|
assert_equal @db['two'], nil
|
175
177
|
|
176
|
-
|
177
|
-
assert !
|
178
|
-
assert_equal
|
179
|
-
|
178
|
+
db = Daybreak::DB.new DB_PATH
|
179
|
+
assert !db.has_key?('two')
|
180
|
+
assert_equal db['two'], nil
|
181
|
+
db.close
|
182
|
+
end
|
183
|
+
|
184
|
+
it 'should synchronize deletions after compact' do
|
185
|
+
@db['one'] = 1
|
186
|
+
@db['two'] = 2
|
187
|
+
@db.flush
|
188
|
+
db = Daybreak::DB.new DB_PATH
|
189
|
+
assert db.has_key?('two')
|
190
|
+
@db.delete! 'two'
|
191
|
+
@db.compact
|
192
|
+
db.sunrise
|
193
|
+
assert !db.has_key?('two')
|
194
|
+
assert_equal db['two'], nil
|
195
|
+
db.close
|
180
196
|
end
|
181
197
|
|
182
198
|
it 'should close and reopen the file when clearing the database' do
|
@@ -189,7 +205,17 @@ describe Daybreak::DB do
|
|
189
205
|
|
190
206
|
it 'should have threadsafe lock' do
|
191
207
|
@db[1] = 0
|
192
|
-
inc = proc { 1000.times { @db.lock {
|
208
|
+
inc = proc { 1000.times { @db.lock {|d| d[1] += 1 } } }
|
209
|
+
a = Thread.new &inc
|
210
|
+
b = Thread.new &inc
|
211
|
+
a.join
|
212
|
+
b.join
|
213
|
+
assert_equal @db[1], 2000
|
214
|
+
end
|
215
|
+
|
216
|
+
it 'should have threadsafe synchronize' do
|
217
|
+
@db[1] = 0
|
218
|
+
inc = proc { 1000.times { @db.synchronize {|d| d[1] += 1 } } }
|
193
219
|
a = Thread.new &inc
|
194
220
|
b = Thread.new &inc
|
195
221
|
a.join
|
@@ -358,6 +384,10 @@ describe Daybreak::DB do
|
|
358
384
|
db.close
|
359
385
|
end
|
360
386
|
|
387
|
+
it 'should report the bytesize' do
|
388
|
+
assert @db.bytesize > 0
|
389
|
+
end
|
390
|
+
|
361
391
|
after do
|
362
392
|
@db.clear
|
363
393
|
@db.close
|
metadata
CHANGED
@@ -1,62 +1,57 @@
|
|
1
|
-
--- !ruby/object:Gem::Specification
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
2
|
name: daybreak
|
3
|
-
version: !ruby/object:Gem::Version
|
4
|
-
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.2.2
|
5
5
|
prerelease:
|
6
|
-
segments:
|
7
|
-
- 0
|
8
|
-
- 2
|
9
|
-
- 1
|
10
|
-
version: 0.2.1
|
11
6
|
platform: ruby
|
12
|
-
authors:
|
7
|
+
authors:
|
13
8
|
- Jeff Larson
|
14
9
|
- Daniel Mendler
|
15
10
|
autorequire:
|
16
11
|
bindir: bin
|
17
12
|
cert_chain: []
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
- !ruby/object:Gem::Dependency
|
13
|
+
date: 2013-01-18 00:00:00.000000000 Z
|
14
|
+
dependencies:
|
15
|
+
- !ruby/object:Gem::Dependency
|
22
16
|
name: rake
|
23
|
-
|
24
|
-
requirement: &id001 !ruby/object:Gem::Requirement
|
17
|
+
requirement: !ruby/object:Gem::Requirement
|
25
18
|
none: false
|
26
|
-
requirements:
|
27
|
-
- -
|
28
|
-
- !ruby/object:Gem::Version
|
29
|
-
|
30
|
-
segments:
|
31
|
-
- 0
|
32
|
-
version: "0"
|
19
|
+
requirements:
|
20
|
+
- - ! '>='
|
21
|
+
- !ruby/object:Gem::Version
|
22
|
+
version: '0'
|
33
23
|
type: :development
|
34
|
-
version_requirements: *id001
|
35
|
-
- !ruby/object:Gem::Dependency
|
36
|
-
name: minitest
|
37
24
|
prerelease: false
|
38
|
-
|
25
|
+
version_requirements: !ruby/object:Gem::Requirement
|
26
|
+
none: false
|
27
|
+
requirements:
|
28
|
+
- - ! '>='
|
29
|
+
- !ruby/object:Gem::Version
|
30
|
+
version: '0'
|
31
|
+
- !ruby/object:Gem::Dependency
|
32
|
+
name: minitest
|
33
|
+
requirement: !ruby/object:Gem::Requirement
|
39
34
|
none: false
|
40
|
-
requirements:
|
41
|
-
- -
|
42
|
-
- !ruby/object:Gem::Version
|
43
|
-
|
44
|
-
segments:
|
45
|
-
- 0
|
46
|
-
version: "0"
|
35
|
+
requirements:
|
36
|
+
- - ! '>='
|
37
|
+
- !ruby/object:Gem::Version
|
38
|
+
version: '0'
|
47
39
|
type: :development
|
48
|
-
|
40
|
+
prerelease: false
|
41
|
+
version_requirements: !ruby/object:Gem::Requirement
|
42
|
+
none: false
|
43
|
+
requirements:
|
44
|
+
- - ! '>='
|
45
|
+
- !ruby/object:Gem::Version
|
46
|
+
version: '0'
|
49
47
|
description: Incredibly fast pure-ruby key-value store
|
50
|
-
email:
|
48
|
+
email:
|
51
49
|
- thejefflarson@gmail.com
|
52
50
|
- mail@daniel-mendler.de
|
53
51
|
executables: []
|
54
|
-
|
55
52
|
extensions: []
|
56
|
-
|
57
53
|
extra_rdoc_files: []
|
58
|
-
|
59
|
-
files:
|
54
|
+
files:
|
60
55
|
- .gitignore
|
61
56
|
- .travis.yml
|
62
57
|
- .yardopts
|
@@ -69,6 +64,7 @@ files:
|
|
69
64
|
- lib/daybreak.rb
|
70
65
|
- lib/daybreak/db.rb
|
71
66
|
- lib/daybreak/format.rb
|
67
|
+
- lib/daybreak/journal.rb
|
72
68
|
- lib/daybreak/queue.rb
|
73
69
|
- lib/daybreak/queue/mri.rb
|
74
70
|
- lib/daybreak/queue/threaded.rb
|
@@ -80,39 +76,32 @@ files:
|
|
80
76
|
- test/test.rb
|
81
77
|
- test/test_helper.rb
|
82
78
|
homepage: http://propublica.github.com/daybreak/
|
83
|
-
licenses:
|
79
|
+
licenses:
|
84
80
|
- MIT
|
85
81
|
post_install_message:
|
86
82
|
rdoc_options: []
|
87
|
-
|
88
|
-
require_paths:
|
83
|
+
require_paths:
|
89
84
|
- lib
|
90
|
-
required_ruby_version: !ruby/object:Gem::Requirement
|
85
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
91
86
|
none: false
|
92
|
-
requirements:
|
93
|
-
- -
|
94
|
-
- !ruby/object:Gem::Version
|
95
|
-
|
96
|
-
|
97
|
-
- 0
|
98
|
-
version: "0"
|
99
|
-
required_rubygems_version: !ruby/object:Gem::Requirement
|
87
|
+
requirements:
|
88
|
+
- - ! '>='
|
89
|
+
- !ruby/object:Gem::Version
|
90
|
+
version: '0'
|
91
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
100
92
|
none: false
|
101
|
-
requirements:
|
102
|
-
- -
|
103
|
-
- !ruby/object:Gem::Version
|
104
|
-
|
105
|
-
segments:
|
106
|
-
- 0
|
107
|
-
version: "0"
|
93
|
+
requirements:
|
94
|
+
- - ! '>='
|
95
|
+
- !ruby/object:Gem::Version
|
96
|
+
version: '0'
|
108
97
|
requirements: []
|
109
|
-
|
110
98
|
rubyforge_project:
|
111
99
|
rubygems_version: 1.8.24
|
112
100
|
signing_key:
|
113
101
|
specification_version: 3
|
114
|
-
summary: Daybreak provides an incredibly fast pure-ruby in memory key-value store,
|
115
|
-
|
102
|
+
summary: Daybreak provides an incredibly fast pure-ruby in memory key-value store,
|
103
|
+
which is multi-process safe and uses a journal log to store the data.
|
104
|
+
test_files:
|
116
105
|
- test/prof.rb
|
117
106
|
- test/test.rb
|
118
107
|
- test/test_helper.rb
|