strokedb 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CONTRIBUTORS +7 -0
- data/CREDITS +13 -0
- data/README +44 -0
- data/bin/sdbc +2 -0
- data/lib/config/config.rb +161 -0
- data/lib/data_structures/inverted_list.rb +297 -0
- data/lib/data_structures/point_query.rb +24 -0
- data/lib/data_structures/skiplist.rb +302 -0
- data/lib/document/associations.rb +107 -0
- data/lib/document/callback.rb +11 -0
- data/lib/document/coercions.rb +57 -0
- data/lib/document/delete.rb +28 -0
- data/lib/document/document.rb +684 -0
- data/lib/document/meta.rb +261 -0
- data/lib/document/slot.rb +199 -0
- data/lib/document/util.rb +27 -0
- data/lib/document/validations.rb +704 -0
- data/lib/document/versions.rb +106 -0
- data/lib/document/virtualize.rb +82 -0
- data/lib/init.rb +57 -0
- data/lib/stores/chainable_storage.rb +57 -0
- data/lib/stores/inverted_list_index/inverted_list_file_storage.rb +56 -0
- data/lib/stores/inverted_list_index/inverted_list_index.rb +49 -0
- data/lib/stores/remote_store.rb +172 -0
- data/lib/stores/skiplist_store/chunk.rb +119 -0
- data/lib/stores/skiplist_store/chunk_storage.rb +21 -0
- data/lib/stores/skiplist_store/file_chunk_storage.rb +44 -0
- data/lib/stores/skiplist_store/memory_chunk_storage.rb +37 -0
- data/lib/stores/skiplist_store/skiplist_store.rb +217 -0
- data/lib/stores/store.rb +5 -0
- data/lib/sync/chain_sync.rb +38 -0
- data/lib/sync/diff.rb +126 -0
- data/lib/sync/lamport_timestamp.rb +81 -0
- data/lib/sync/store_sync.rb +79 -0
- data/lib/sync/stroke_diff/array.rb +102 -0
- data/lib/sync/stroke_diff/default.rb +21 -0
- data/lib/sync/stroke_diff/hash.rb +186 -0
- data/lib/sync/stroke_diff/string.rb +116 -0
- data/lib/sync/stroke_diff/stroke_diff.rb +9 -0
- data/lib/util/blankslate.rb +42 -0
- data/lib/util/ext/blank.rb +50 -0
- data/lib/util/ext/enumerable.rb +36 -0
- data/lib/util/ext/fixnum.rb +16 -0
- data/lib/util/ext/hash.rb +22 -0
- data/lib/util/ext/object.rb +8 -0
- data/lib/util/ext/string.rb +35 -0
- data/lib/util/inflect.rb +217 -0
- data/lib/util/java_util.rb +9 -0
- data/lib/util/lazy_array.rb +54 -0
- data/lib/util/lazy_mapping_array.rb +64 -0
- data/lib/util/lazy_mapping_hash.rb +46 -0
- data/lib/util/serialization.rb +29 -0
- data/lib/util/trigger_partition.rb +136 -0
- data/lib/util/util.rb +38 -0
- data/lib/util/xml.rb +6 -0
- data/lib/view/view.rb +55 -0
- data/script/console +70 -0
- data/strokedb.rb +75 -0
- metadata +148 -0
@@ -0,0 +1,21 @@
|
|
1
|
+
module StrokeDB
|
2
|
+
class ChunkStorage
|
3
|
+
include ChainableStorage
|
4
|
+
|
5
|
+
attr_accessor :authoritative_source
|
6
|
+
|
7
|
+
def initialize(opts={})
|
8
|
+
end
|
9
|
+
|
10
|
+
def find(uuid)
|
11
|
+
unless result = read(chunk_path(uuid))
|
12
|
+
if authoritative_source
|
13
|
+
result = authoritative_source.find(uuid)
|
14
|
+
save!(result, authoritative_source) if result
|
15
|
+
end
|
16
|
+
end
|
17
|
+
result
|
18
|
+
end
|
19
|
+
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
module StrokeDB
|
2
|
+
class FileChunkStorage < ChunkStorage
|
3
|
+
attr_accessor :path
|
4
|
+
|
5
|
+
def initialize(opts={})
|
6
|
+
opts = opts.stringify_keys
|
7
|
+
@path = opts['path']
|
8
|
+
end
|
9
|
+
|
10
|
+
def delete!(chunk_uuid)
|
11
|
+
FileUtils.rm_rf(chunk_path(chunk_uuid))
|
12
|
+
end
|
13
|
+
|
14
|
+
def clear!
|
15
|
+
FileUtils.rm_rf @path
|
16
|
+
end
|
17
|
+
|
18
|
+
private
|
19
|
+
|
20
|
+
def perform_save!(chunk)
|
21
|
+
FileUtils.mkdir_p @path
|
22
|
+
write(chunk_path(chunk.uuid), chunk)
|
23
|
+
end
|
24
|
+
|
25
|
+
def read(path)
|
26
|
+
return nil unless File.exist?(path)
|
27
|
+
raw_chunk = StrokeDB.deserialize(IO.read(path))
|
28
|
+
Chunk.from_raw(raw_chunk) do |chunk|
|
29
|
+
chunk.next_chunk = find(chunk.next_chunk_uuid) if chunk.next_chunk_uuid
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
def write(path, chunk)
|
34
|
+
File.open path, "w+" do |f|
|
35
|
+
f.write StrokeDB.serialize(chunk.to_raw)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def chunk_path(uuid)
|
40
|
+
"#{@path}/#{uuid}"
|
41
|
+
end
|
42
|
+
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
module StrokeDB
|
2
|
+
class MemoryChunkStorage < ChunkStorage
|
3
|
+
attr_accessor :chunks_cache
|
4
|
+
|
5
|
+
def initialize(opts={})
|
6
|
+
@chunks_cache = {}
|
7
|
+
end
|
8
|
+
|
9
|
+
def delete!(chunk_uuid)
|
10
|
+
write(chunk_path(chunk_uuid), nil)
|
11
|
+
end
|
12
|
+
|
13
|
+
def clear!
|
14
|
+
@chunks_cache.clear
|
15
|
+
end
|
16
|
+
|
17
|
+
private
|
18
|
+
|
19
|
+
def perform_save!(chunk)
|
20
|
+
write(chunk_path(chunk.uuid), chunk)
|
21
|
+
end
|
22
|
+
|
23
|
+
def read(path)
|
24
|
+
@chunks_cache[path]
|
25
|
+
end
|
26
|
+
|
27
|
+
def write(path, chunk)
|
28
|
+
@chunks_cache[path] = chunk
|
29
|
+
end
|
30
|
+
|
31
|
+
def chunk_path(uuid)
|
32
|
+
uuid
|
33
|
+
end
|
34
|
+
|
35
|
+
|
36
|
+
end
|
37
|
+
end
|
@@ -0,0 +1,217 @@
|
|
1
|
+
module StrokeDB
|
2
|
+
class SkiplistStore < Store
|
3
|
+
include Enumerable
|
4
|
+
attr_accessor :chunk_storage, :cut_level, :index_store
|
5
|
+
|
6
|
+
def initialize(opts={})
|
7
|
+
opts = opts.stringify_keys
|
8
|
+
@chunk_storage = opts['storage']
|
9
|
+
@cut_level = opts['cut_level'] || 4
|
10
|
+
@index_store = opts['index']
|
11
|
+
autosync! unless opts['noautosync']
|
12
|
+
raise "Missing chunk storage" unless @chunk_storage
|
13
|
+
end
|
14
|
+
|
15
|
+
def find(uuid, version=nil, opts = {})
|
16
|
+
uuid_version = uuid + (version ? ".#{version}" : "")
|
17
|
+
master_chunk = @chunk_storage.find('MASTER')
|
18
|
+
return nil unless master_chunk # no master chunk yet
|
19
|
+
chunk_uuid = master_chunk.find_nearest(uuid_version, nil)
|
20
|
+
return nil unless chunk_uuid # no chunks in master chunk yet
|
21
|
+
chunk = @chunk_storage.find(chunk_uuid)
|
22
|
+
return nil unless chunk
|
23
|
+
|
24
|
+
raw_doc = chunk.find(uuid_version)
|
25
|
+
|
26
|
+
if raw_doc
|
27
|
+
return raw_doc if opts[:no_instantiation]
|
28
|
+
doc = Document.from_raw(self, raw_doc.freeze)
|
29
|
+
doc.extend(VersionedDocument) if version
|
30
|
+
return doc
|
31
|
+
end
|
32
|
+
nil
|
33
|
+
end
|
34
|
+
|
35
|
+
def search(*args)
|
36
|
+
return [] unless @index_store
|
37
|
+
@index_store.find(*args)
|
38
|
+
end
|
39
|
+
|
40
|
+
def exists?(uuid, version=nil)
|
41
|
+
!!find(uuid, version, :no_instantiation => true)
|
42
|
+
end
|
43
|
+
|
44
|
+
def head_version(uuid)
|
45
|
+
raw_doc = find(uuid, nil, :no_instantiation => true)
|
46
|
+
return raw_doc['version'] if raw_doc
|
47
|
+
nil
|
48
|
+
end
|
49
|
+
|
50
|
+
def save!(doc)
|
51
|
+
master_chunk = find_or_create_master_chunk
|
52
|
+
next_timestamp
|
53
|
+
|
54
|
+
insert_with_cut(doc.uuid, doc, master_chunk) unless doc.is_a?(VersionedDocument)
|
55
|
+
insert_with_cut("#{doc.uuid}.#{doc.version}", doc, master_chunk)
|
56
|
+
|
57
|
+
update_master_chunk!(doc, master_chunk)
|
58
|
+
end
|
59
|
+
|
60
|
+
def save_as_head!(doc)
|
61
|
+
master_chunk = find_or_create_master_chunk
|
62
|
+
insert_with_cut(doc.uuid, doc, master_chunk)
|
63
|
+
update_master_chunk!(doc, master_chunk)
|
64
|
+
end
|
65
|
+
|
66
|
+
|
67
|
+
def full_dump
|
68
|
+
puts "Full storage dump:"
|
69
|
+
m = @chunk_storage.find('MASTER')
|
70
|
+
puts "No master!" unless m
|
71
|
+
m.each do |node|
|
72
|
+
puts "[chunk: #{node.key}]"
|
73
|
+
chunk = @chunk_storage.find(node.value)
|
74
|
+
if chunk
|
75
|
+
chunk.each do |node|
|
76
|
+
puts " [doc: #{node.key}] => {uuid: #{node.value['__uuid__']}, version: #{node.value['version']}, previous_version: #{node.value['previous_version']}"
|
77
|
+
end
|
78
|
+
else
|
79
|
+
puts " nil! (but in MASTER somehow?...)"
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
def each(options = {})
|
85
|
+
return nil unless m = @chunk_storage.find('MASTER') # no master chunk yet
|
86
|
+
after = options[:after_timestamp]
|
87
|
+
include_versions = options[:include_versions]
|
88
|
+
m.each do |node|
|
89
|
+
chunk = @chunk_storage.find(node.value)
|
90
|
+
next unless chunk
|
91
|
+
next if after && chunk.timestamp <= after
|
92
|
+
|
93
|
+
chunk.each do |node|
|
94
|
+
next if after && (node.timestamp <= after)
|
95
|
+
if uuid_match = node.key.match(/^#{UUID_RE}$/) || (include_versions && uuid_match = node.key.match(/#{UUID_RE}./) )
|
96
|
+
yield Document.from_raw(self, node.value)
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
def timestamp
|
103
|
+
@timestamp ||= (lts = find_or_create_master_chunk.timestamp) ? LTS.from_raw(lts) : LTS.zero(uuid)
|
104
|
+
end
|
105
|
+
def next_timestamp
|
106
|
+
@timestamp = timestamp.next
|
107
|
+
end
|
108
|
+
|
109
|
+
def uuid
|
110
|
+
return @uuid if @uuid
|
111
|
+
master_chunk = @chunk_storage.find('MASTER')
|
112
|
+
unless master_chunk
|
113
|
+
@uuid = Util.random_uuid
|
114
|
+
else
|
115
|
+
@uuid = master_chunk.store_uuid
|
116
|
+
end
|
117
|
+
@uuid
|
118
|
+
end
|
119
|
+
|
120
|
+
def document
|
121
|
+
find(uuid) || StoreInfo.create!(self,:kind => 'skiplist', :uuid => uuid)
|
122
|
+
end
|
123
|
+
|
124
|
+
def empty?
|
125
|
+
!@chunk_storage.find('MASTER')
|
126
|
+
end
|
127
|
+
|
128
|
+
def inspect
|
129
|
+
"#<Skiplist store #{uuid}#{empty? ? " (empty)" : ""}>"
|
130
|
+
end
|
131
|
+
|
132
|
+
def autosync!
|
133
|
+
@autosync_mutex ||= Mutex.new
|
134
|
+
@autosync = nil if @autosync && !@autosync.status
|
135
|
+
at_exit { stop_autosync! }
|
136
|
+
@autosync ||= Thread.new do
|
137
|
+
until @stop_autosync
|
138
|
+
@autosync_mutex.synchronize { chunk_storage.sync_chained_storages! }
|
139
|
+
sleep(1)
|
140
|
+
end
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
def stop_autosync!
|
145
|
+
if @autosync_mutex
|
146
|
+
@autosync_mutex.synchronize { @stop_autosync = true; chunk_storage.sync_chained_storages! }
|
147
|
+
end
|
148
|
+
end
|
149
|
+
|
150
|
+
|
151
|
+
private
|
152
|
+
|
153
|
+
def insert_with_cut(uuid, doc, master_chunk)
|
154
|
+
chunk_uuid = master_chunk.find_nearest(uuid)
|
155
|
+
unless chunk_uuid && chunk = @chunk_storage.find(chunk_uuid)
|
156
|
+
chunk = Chunk.new(@cut_level)
|
157
|
+
end
|
158
|
+
a, b = chunk.insert(uuid, doc.to_raw, nil, timestamp.counter)
|
159
|
+
[a,b].compact.each do |chunk|
|
160
|
+
chunk.store_uuid = self.uuid
|
161
|
+
chunk.timestamp = timestamp.counter
|
162
|
+
end
|
163
|
+
# if split
|
164
|
+
if b
|
165
|
+
# rename chunk if the first chunk inconsistency detected
|
166
|
+
if a.uuid != a.first_uuid
|
167
|
+
old_uuid = a.uuid
|
168
|
+
a.uuid = a.first_uuid
|
169
|
+
@chunk_storage.save!(a)
|
170
|
+
master_chunk.insert(a.uuid, a.uuid)
|
171
|
+
# remove old chunk
|
172
|
+
@chunk_storage.delete!(old_uuid)
|
173
|
+
master_chunk.delete(old_uuid)
|
174
|
+
else
|
175
|
+
@chunk_storage.save!(a)
|
176
|
+
master_chunk.insert(a.uuid, a.uuid)
|
177
|
+
end
|
178
|
+
@chunk_storage.save!(b)
|
179
|
+
master_chunk.insert(b.uuid, b.uuid)
|
180
|
+
else
|
181
|
+
@chunk_storage.save!(a)
|
182
|
+
master_chunk.insert(a.uuid, a.uuid)
|
183
|
+
end
|
184
|
+
end
|
185
|
+
|
186
|
+
def find_or_create_master_chunk
|
187
|
+
if master_chunk = @chunk_storage.find('MASTER')
|
188
|
+
return master_chunk
|
189
|
+
end
|
190
|
+
master_chunk = Chunk.new(999)
|
191
|
+
master_chunk.uuid = 'MASTER'
|
192
|
+
master_chunk.store_uuid = uuid
|
193
|
+
@chunk_storage.save!(master_chunk)
|
194
|
+
master_chunk
|
195
|
+
end
|
196
|
+
|
197
|
+
|
198
|
+
def update_master_chunk!(doc, master_chunk)
|
199
|
+
@chunk_storage.save!(master_chunk)
|
200
|
+
|
201
|
+
# Update index
|
202
|
+
if @index_store
|
203
|
+
if doc.previous_version
|
204
|
+
raw_pdoc = find(doc.uuid, doc.previous_version, :no_instantiation => true)
|
205
|
+
pdoc = Document.from_raw(self, raw_pdoc.freeze, :skip_callbacks => true)
|
206
|
+
pdoc.extend(VersionedDocument)
|
207
|
+
@index_store.delete(pdoc)
|
208
|
+
end
|
209
|
+
@index_store.insert(doc)
|
210
|
+
@index_store.save!
|
211
|
+
end
|
212
|
+
end
|
213
|
+
|
214
|
+
|
215
|
+
|
216
|
+
end
|
217
|
+
end
|
data/lib/stores/store.rb
ADDED
@@ -0,0 +1,38 @@
|
|
1
|
+
module StrokeDB
|
2
|
+
# You may mix-in this into specific sync implementations
|
3
|
+
module ChainSync
|
4
|
+
# We have 2 chains as an input: our chain ("to") and
|
5
|
+
# a foreign chain ("from"). We're going to calculate
|
6
|
+
# the difference between those chains to know how to
|
7
|
+
# implement synchronization.
|
8
|
+
#
|
9
|
+
# There're two cases:
|
10
|
+
# 1) from is a subset of to -> nothing to sync
|
11
|
+
# 2) to is a subset of from -> fast-forward merge
|
12
|
+
# 3) else: merge case: return base, head_from & head_to
|
13
|
+
def sync_chains(from, to)
|
14
|
+
common = from & to
|
15
|
+
raise NonMatchingChains, "no common element found" if common.empty?
|
16
|
+
base = common[common.size - 1]
|
17
|
+
ifrom = from.index(base)
|
18
|
+
ito = to.index(base)
|
19
|
+
|
20
|
+
# from: -------------base
|
21
|
+
# to: -----base----head
|
22
|
+
if ifrom == from.size - 1
|
23
|
+
:up_to_date
|
24
|
+
|
25
|
+
# from: -----base----head
|
26
|
+
# to: -------------base
|
27
|
+
elsif ito == to.size - 1
|
28
|
+
[ :fast_forward, from[ifrom..-1] ]
|
29
|
+
|
30
|
+
# from: -----base--------head
|
31
|
+
# to: --------base-----head
|
32
|
+
else
|
33
|
+
[ :merge, from[ifrom..-1], to[ito..-1] ]
|
34
|
+
end
|
35
|
+
end
|
36
|
+
class NonMatchingChains < Exception; end
|
37
|
+
end
|
38
|
+
end
|
data/lib/sync/diff.rb
ADDED
@@ -0,0 +1,126 @@
|
|
1
|
+
require 'diff/lcs'
|
2
|
+
module StrokeDB
|
3
|
+
|
4
|
+
class SlotDiffStrategy
|
5
|
+
def self.diff(from, to)
|
6
|
+
to
|
7
|
+
end
|
8
|
+
end
|
9
|
+
|
10
|
+
class DefaultSlotDiff < SlotDiffStrategy
|
11
|
+
def self.diff(from, to)
|
12
|
+
unless from.class == to.class # if value types are not the same
|
13
|
+
to # then return new value
|
14
|
+
else
|
15
|
+
case to
|
16
|
+
when /@##{UUID_RE}/, /@##{UUID_RE}.#{VERSION_RE}/
|
17
|
+
to
|
18
|
+
when Array, String
|
19
|
+
::Diff::LCS.diff(from, to).map do |d|
|
20
|
+
d.map do |change|
|
21
|
+
change.to_a
|
22
|
+
end
|
23
|
+
end
|
24
|
+
when Hash
|
25
|
+
::Diff::LCS.diff(from.sort_by{|e| e.to_s}, to.sort_by{|e| e.to_s}).map do |d|
|
26
|
+
d.map do |change|
|
27
|
+
[change.to_a.first, {change.to_a.last.first => change.to_a.last.last}]
|
28
|
+
end
|
29
|
+
end
|
30
|
+
else
|
31
|
+
to
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
def self.patch(from, patch)
|
37
|
+
case from
|
38
|
+
when /@##{UUID_RE}/, /@##{UUID_RE}.#{VERSION_RE}/
|
39
|
+
patch
|
40
|
+
when String, Array
|
41
|
+
lcs_patch = patch.map do |d|
|
42
|
+
d.map do |change|
|
43
|
+
::Diff::LCS::Change.from_a(change)
|
44
|
+
end
|
45
|
+
end
|
46
|
+
::Diff::LCS.patch!(from, lcs_patch)
|
47
|
+
when Hash
|
48
|
+
lcs_patch = patch.map do |d|
|
49
|
+
d.map_with_index do |change, index|
|
50
|
+
::Diff::LCS::Change.from_a([change.first, index, [change.last.keys.first, change.last.values.first]])
|
51
|
+
end
|
52
|
+
end
|
53
|
+
diff = ::Diff::LCS.patch!(from.sort_by{|e| e.to_s}, lcs_patch)
|
54
|
+
hash = {}
|
55
|
+
diff.each do |v|
|
56
|
+
hash[v.first] = v.last
|
57
|
+
end
|
58
|
+
hash
|
59
|
+
else
|
60
|
+
patch
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
Diff = Meta.new(:uuid => DIFF_UUID) do
|
66
|
+
|
67
|
+
on_initialization do |diff|
|
68
|
+
diff.added_slots = {} unless diff[:added_slots]
|
69
|
+
diff.removed_slots = {} unless diff[:removed_slots]
|
70
|
+
diff.updated_slots = {} unless diff[:updated_slots]
|
71
|
+
diff.send!(:compute_diff) if diff.new?
|
72
|
+
end
|
73
|
+
|
74
|
+
def different?
|
75
|
+
!updated_slots.empty? || !removed_slots.empty? || !added_slots.empty?
|
76
|
+
end
|
77
|
+
|
78
|
+
def patch!(document)
|
79
|
+
added_slots.each_pair do |addition, value|
|
80
|
+
document[addition] = value
|
81
|
+
end
|
82
|
+
removed_slots.keys.each do |removal|
|
83
|
+
document.remove_slot!(removal)
|
84
|
+
end
|
85
|
+
updated_slots.each_pair do |update, value|
|
86
|
+
if sk = strategy_class_for(update)
|
87
|
+
document[update] = sk.patch(document[update], value)
|
88
|
+
else
|
89
|
+
document[update] =value
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
|
95
|
+
protected
|
96
|
+
|
97
|
+
def compute_diff
|
98
|
+
additions = to.slotnames - from.slotnames
|
99
|
+
additions.each do |addition|
|
100
|
+
self.added_slots[addition] = to[addition]
|
101
|
+
end
|
102
|
+
removals = from.slotnames - to.slotnames
|
103
|
+
removals.each do |removal|
|
104
|
+
self.removed_slots[removal] = from[removal]
|
105
|
+
end
|
106
|
+
updates = (to.slotnames - additions - ['version']).select {|slotname| to[slotname] != from[slotname]}
|
107
|
+
updates.each do |update|
|
108
|
+
unless sk = strategy_class_for(update)
|
109
|
+
self.updated_slots[update] = to[update]
|
110
|
+
else
|
111
|
+
self.updated_slots[update] = sk.diff(from[update], to[update])
|
112
|
+
end
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
def strategy_class_for(slotname)
|
117
|
+
if from.meta && strategy = from.meta["diff_strategy_#{slotname}"]
|
118
|
+
_strategy_class = strategy.camelize.constantize rescue nil
|
119
|
+
return _strategy_class if _strategy_class && _strategy_class.ancestors.include?(SlotDiffStrategy)
|
120
|
+
end
|
121
|
+
false
|
122
|
+
end
|
123
|
+
|
124
|
+
end
|
125
|
+
|
126
|
+
end
|
@@ -0,0 +1,81 @@
|
|
1
|
+
module StrokeDB
|
2
|
+
class LamportTimestamp
|
3
|
+
MAX_COUNTER = 2**64
|
4
|
+
BASE = 16
|
5
|
+
BASE_LENGTH = 16
|
6
|
+
|
7
|
+
attr_reader :counter, :uuid
|
8
|
+
|
9
|
+
def initialize(c = 0, __uuid = Util.random_uuid)
|
10
|
+
if c > MAX_COUNTER
|
11
|
+
raise CounterOverflow.new, "Max counter value is 2**64"
|
12
|
+
end
|
13
|
+
@counter = c
|
14
|
+
@uuid = __uuid
|
15
|
+
end
|
16
|
+
def next
|
17
|
+
LamportTimestamp.new(@counter + 1, @uuid)
|
18
|
+
end
|
19
|
+
def next!
|
20
|
+
@counter += 1
|
21
|
+
self
|
22
|
+
end
|
23
|
+
def dup
|
24
|
+
LamportTimestamp.new(@counter, @uuid)
|
25
|
+
end
|
26
|
+
def marshal_dump
|
27
|
+
@counter.to_s(BASE).rjust(BASE_LENGTH, '0') + @uuid
|
28
|
+
end
|
29
|
+
def marshal_load(dumped)
|
30
|
+
@counter = dumped[0, BASE_LENGTH].to_i(BASE)
|
31
|
+
@uuid = dumped[BASE_LENGTH, 36]
|
32
|
+
self
|
33
|
+
end
|
34
|
+
|
35
|
+
def to_json
|
36
|
+
marshal_dump.to_json
|
37
|
+
end
|
38
|
+
|
39
|
+
# Raw format
|
40
|
+
def self.from_raw(raw_string)
|
41
|
+
new.marshal_load(raw_string)
|
42
|
+
end
|
43
|
+
def to_raw
|
44
|
+
marshal_dump
|
45
|
+
end
|
46
|
+
|
47
|
+
def to_s
|
48
|
+
marshal_dump
|
49
|
+
end
|
50
|
+
def <=>(other)
|
51
|
+
primary = (@counter <=> other.counter)
|
52
|
+
primary == 0 ? (@uuid <=> other.uuid) : primary
|
53
|
+
end
|
54
|
+
def ==(other)
|
55
|
+
@counter == other.counter && @uuid == other.uuid
|
56
|
+
end
|
57
|
+
def <(other)
|
58
|
+
(self <=> other) < 0
|
59
|
+
end
|
60
|
+
def <=(other)
|
61
|
+
(self <=> other) <= 0
|
62
|
+
end
|
63
|
+
def >(other)
|
64
|
+
(self <=> other) > 0
|
65
|
+
end
|
66
|
+
def >=(other)
|
67
|
+
(self <=> other) >= 0
|
68
|
+
end
|
69
|
+
def self.zero(__uuid = Util.random_uuid)
|
70
|
+
ts = new(0)
|
71
|
+
ts.instance_variable_set(:@uuid, __uuid)
|
72
|
+
ts
|
73
|
+
end
|
74
|
+
def self.zero_string
|
75
|
+
"0"*BASE_LENGTH + NIL_UUID
|
76
|
+
end
|
77
|
+
class CounterOverflow < Exception; end
|
78
|
+
end
|
79
|
+
LTS = LamportTimestamp
|
80
|
+
end
|
81
|
+
|
@@ -0,0 +1,79 @@
|
|
1
|
+
module StrokeDB
|
2
|
+
|
3
|
+
SynchronizationReport = Meta.new(:uuid => SYNCHRONIZATION_REPORT_UUID) do
|
4
|
+
on_new_document do |report|
|
5
|
+
report.conflicts = []
|
6
|
+
report.added_documents = []
|
7
|
+
report.fast_forwarded_documents = []
|
8
|
+
report.non_matching_documents = []
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
SynchronizationConflict = Meta.new(:uuid => SYNCHRONIZATION_CONFLICT_UUID) do
|
13
|
+
def resolve!
|
14
|
+
# by default, do nothing
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
class Store
|
19
|
+
def sync!(docs, _timestamp=nil)
|
20
|
+
_timestamp_counter = timestamp.counter
|
21
|
+
report = SynchronizationReport.new(self, :store_document => document, :timestamp => _timestamp_counter)
|
22
|
+
existing_chain = {}
|
23
|
+
docs.group_by {|doc| doc.uuid}.each_pair do |uuid, versions|
|
24
|
+
doc = find(uuid)
|
25
|
+
existing_chain[uuid] = doc.versions.all_versions if doc
|
26
|
+
end
|
27
|
+
case _timestamp
|
28
|
+
when Numeric
|
29
|
+
@timestamp = LTS.new(_timestamp, timestamp.uuid)
|
30
|
+
when LamportTimestamp
|
31
|
+
@timestamp = LTS.new(_timestamp.counter, timestamp.uuid)
|
32
|
+
else
|
33
|
+
end
|
34
|
+
docs.each {|doc| save!(doc) unless exists?(doc.uuid, doc.version)}
|
35
|
+
docs.group_by {|doc| doc.uuid}.each_pair do |uuid, versions|
|
36
|
+
incoming_chain = find(uuid, versions.last.version).versions.all_versions
|
37
|
+
if existing_chain[uuid].nil? or existing_chain[uuid].empty? # It is a new document
|
38
|
+
added_doc = find(uuid, versions.last.version)
|
39
|
+
save_as_head!(added_doc)
|
40
|
+
report.added_documents << added_doc
|
41
|
+
else
|
42
|
+
begin
|
43
|
+
sync = sync_chains(incoming_chain.reverse, existing_chain[uuid].reverse)
|
44
|
+
rescue NonMatchingChains
|
45
|
+
# raise NonMatchingDocumentCondition.new(uuid) # that will definitely leave garbage in the store (FIXME?)
|
46
|
+
non_matching_doc = find(uuid)
|
47
|
+
report.non_matching_documents << non_matching_doc
|
48
|
+
next
|
49
|
+
end
|
50
|
+
resolution = sync.is_a?(Array) ? sync.first : sync
|
51
|
+
case resolution
|
52
|
+
when :up_to_date
|
53
|
+
# nothing to do
|
54
|
+
when :merge
|
55
|
+
report.conflicts << SynchronizationConflict.create!(self, :document => find(uuid), :rev1 => sync[1], :rev2 => sync[2])
|
56
|
+
when :fast_forward
|
57
|
+
fast_forwarded_doc = find(uuid, sync[1].last)
|
58
|
+
save_as_head!(fast_forwarded_doc)
|
59
|
+
report.fast_forwarded_documents << fast_forwarded_doc
|
60
|
+
else
|
61
|
+
raise "Invalid sync resolution #{resolution}"
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
65
|
+
report.conflicts.each do |conflict|
|
66
|
+
if resolution_strategy = conflict.document.meta[:resolution_strategy]
|
67
|
+
conflict.metas << resolution_strategy
|
68
|
+
conflict.save!
|
69
|
+
end
|
70
|
+
conflict.resolve!
|
71
|
+
end
|
72
|
+
report.save!
|
73
|
+
end
|
74
|
+
private
|
75
|
+
|
76
|
+
include ChainSync
|
77
|
+
|
78
|
+
end
|
79
|
+
end
|