pontoon 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/lib/pontoon.rb +536 -0
- data/lib/pontoon/goliath.rb +248 -0
- metadata +115 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: 3d547ad0a54b22d03d8862391c8dfacdffb59fa0
|
4
|
+
data.tar.gz: f0140570f9bc1ad3c756748bf3e2d56ccb8972d7
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 4dfe51bc72db1c90793fd6da0f69bdbca28a2b71360134dbb222bc27b8deed8960b2fde0a71f37222b55154d4f3dbf2260f753cd4ebc27a91005b452f6440ad9
|
7
|
+
data.tar.gz: ed25f5d79a2f4d5526424c3c38fb1a9c59519c42461ac82755f86e8b640714288f633ca5479dfff4c544f9e966041a977daf7ee670acda569734ff563dcccee6
|
data/lib/pontoon.rb
ADDED
@@ -0,0 +1,536 @@
|
|
1
|
+
require 'delegate'
|
2
|
+
|
3
|
+
module Pontoon
|
4
|
+
Config = Struct.new(:rpc_provider, :async_provider, :election_timeout, :election_splay, :update_interval, :heartbeat_interval)
|
5
|
+
|
6
|
+
class Cluster
|
7
|
+
attr_reader :node_ids
|
8
|
+
|
9
|
+
def initialize(*node_ids)
|
10
|
+
@node_ids = node_ids
|
11
|
+
end
|
12
|
+
|
13
|
+
def quorum
|
14
|
+
@node_ids.count / 2 + 1 # integer division rounds down
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
class LogEntry
|
19
|
+
attr_reader :term, :index, :command
|
20
|
+
|
21
|
+
def initialize(term, index, command)
|
22
|
+
@term, @index, @command = term, index, command
|
23
|
+
end
|
24
|
+
|
25
|
+
def ==(other)
|
26
|
+
[:term, :index, :command].all? do |attr|
|
27
|
+
self.send(attr) == other.send(attr)
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def eql?(other)
|
32
|
+
self == other
|
33
|
+
end
|
34
|
+
|
35
|
+
def hash
|
36
|
+
[:term, :index, :command].reduce(0) do |h, attr|
|
37
|
+
h ^= self.send(attr)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
class Log < DelegateClass(Array)
|
43
|
+
def last(*args)
|
44
|
+
self.any? ? super(*args) : LogEntry.new(nil, nil, nil)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
class PersistentState #= Struct.new(:current_term, :voted_for, :log)
|
49
|
+
attr_reader :current_term, :voted_for, :log
|
50
|
+
|
51
|
+
def initialize
|
52
|
+
@current_term = 0
|
53
|
+
@voted_for = nil
|
54
|
+
@log = Log.new([])
|
55
|
+
end
|
56
|
+
|
57
|
+
def current_term=(new_term)
|
58
|
+
raise 'cannot restart an old term' unless @current_term < new_term
|
59
|
+
@current_term = new_term
|
60
|
+
@voted_for = nil
|
61
|
+
end
|
62
|
+
|
63
|
+
def voted_for=(new_votee)
|
64
|
+
raise 'cannot change vote for this term' unless @voted_for.nil?
|
65
|
+
@voted_for = new_votee
|
66
|
+
end
|
67
|
+
|
68
|
+
def log=(new_log)
|
69
|
+
@log = Log.new(new_log)
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
class TemporaryState
|
74
|
+
attr_reader :commit_index
|
75
|
+
attr_accessor :leader_id
|
76
|
+
|
77
|
+
def initialize(commit_index, leader_id)
|
78
|
+
@commit_index, @leader_id = commit_index, leader_id
|
79
|
+
end
|
80
|
+
|
81
|
+
def commit_index=(new_commit_index)
|
82
|
+
raise 'cannot uncommit log entries' unless @commit_index.nil? || @commit_index <= new_commit_index
|
83
|
+
@commit_index = new_commit_index
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
class LeadershipState
|
88
|
+
def followers
|
89
|
+
@followers ||= {}
|
90
|
+
end
|
91
|
+
|
92
|
+
attr_reader :update_timer
|
93
|
+
|
94
|
+
def initialize(update_interval)
|
95
|
+
@update_timer = Timer.new(update_interval)
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
FollowerState = Struct.new(:next_index, :succeeded)
|
100
|
+
|
101
|
+
RequestVoteRequest = Struct.new(:term, :candidate_id, :last_log_index, :last_log_term)
|
102
|
+
|
103
|
+
#class RequestVoteRequest < Struct.new(:term, :candidate_id, :last_log_index, :last_log_term)
|
104
|
+
# def term; @term.to_i; end
|
105
|
+
# def last_log_index; @last_log_index.to_i; end
|
106
|
+
# def last_log_term; @last_log_term.to_i; end
|
107
|
+
#end
|
108
|
+
|
109
|
+
RequestVoteResponse = Struct.new(:term, :vote_granted)
|
110
|
+
|
111
|
+
#class RequestVoteResponse < Struct.new(:term, :vote_granted)
|
112
|
+
# def term; @term.to_i; end
|
113
|
+
#end
|
114
|
+
|
115
|
+
AppendEntriesRequest = Struct.new(:term, :leader_id, :prev_log_index, :prev_log_term, :entries, :commit_index)
|
116
|
+
|
117
|
+
AppendEntriesResponse = Struct.new(:term, :success)
|
118
|
+
|
119
|
+
CommandRequest = Struct.new(:command)
|
120
|
+
|
121
|
+
CommandResponse = Struct.new(:success)
|
122
|
+
|
123
|
+
class RpcProvider
|
124
|
+
def request_votes(request, cluster)
|
125
|
+
raise "Your RpcProvider subclass must implement #request_votes"
|
126
|
+
end
|
127
|
+
|
128
|
+
def append_entries(request, cluster)
|
129
|
+
raise "Your RpcProvider subclass must implement #append_entries"
|
130
|
+
end
|
131
|
+
|
132
|
+
def append_entries_to_follower(request, node_id)
|
133
|
+
raise "Your RpcProvider subclass must implement #append_entries_to_follower"
|
134
|
+
end
|
135
|
+
|
136
|
+
def command(request, node_id)
|
137
|
+
raise "Your RpcProvider subclass must implement #command"
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
class AsyncProvider
|
142
|
+
def await
|
143
|
+
raise "Your AsyncProvider subclass must implement #await"
|
144
|
+
end
|
145
|
+
end
|
146
|
+
|
147
|
+
class Timer
|
148
|
+
def initialize(interval, splay=0.0)
|
149
|
+
@interval = interval.to_f
|
150
|
+
@splay = splay.to_f
|
151
|
+
@start = Time.now - @interval + (rand * @splay)
|
152
|
+
end
|
153
|
+
|
154
|
+
def splayed_interval
|
155
|
+
(@interval + (rand * @splay))#.tap {|t|STDOUT.write("\nsplayed interval is #{t}\n")}
|
156
|
+
end
|
157
|
+
|
158
|
+
def reset!
|
159
|
+
@start = Time.now + splayed_interval
|
160
|
+
#STDOUT.write("\ntimer will elapse at #{timeout.strftime('%H:%M:%S:%L')} (timeout is #{timeout.class})\n")
|
161
|
+
end
|
162
|
+
|
163
|
+
def timeout
|
164
|
+
@start + @interval
|
165
|
+
end
|
166
|
+
|
167
|
+
def timed_out?
|
168
|
+
#STDOUT.write("\ntime is #{Time.now.strftime('%M:%S:%L')}\n")
|
169
|
+
Time.now > timeout
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
class Node
|
174
|
+
attr_reader :id
|
175
|
+
attr_reader :role
|
176
|
+
attr_reader :config
|
177
|
+
attr_reader :cluster
|
178
|
+
attr_reader :persistent_state
|
179
|
+
attr_reader :temporary_state
|
180
|
+
attr_reader :election_timer
|
181
|
+
|
182
|
+
FOLLOWER_ROLE = 0
|
183
|
+
CANDIDATE_ROLE = 1
|
184
|
+
LEADER_ROLE = 2
|
185
|
+
|
186
|
+
def initialize(id, config, cluster, commit_handler=nil, &block)
|
187
|
+
@id = id
|
188
|
+
@role = FOLLOWER_ROLE
|
189
|
+
@config = config
|
190
|
+
@cluster = cluster
|
191
|
+
@persistent_state = PersistentState.new
|
192
|
+
@temporary_state = TemporaryState.new(nil, nil)
|
193
|
+
@election_timer = Timer.new(config.election_timeout, config.election_splay)
|
194
|
+
@commit_handler = commit_handler || (block.to_proc if block_given?)
|
195
|
+
end
|
196
|
+
|
197
|
+
def update
|
198
|
+
return if @updating
|
199
|
+
@updating = true
|
200
|
+
indent = "\t" * (@id.to_i % 3)
|
201
|
+
#STDOUT.write("\n\n#{indent}update #{@id}, role #{@role}, log length #{@persistent_state.log.count}\n\n")
|
202
|
+
case @role
|
203
|
+
when FOLLOWER_ROLE
|
204
|
+
follower_update
|
205
|
+
when CANDIDATE_ROLE
|
206
|
+
candidate_update
|
207
|
+
when LEADER_ROLE
|
208
|
+
leader_update
|
209
|
+
end
|
210
|
+
@updating = false
|
211
|
+
end
|
212
|
+
|
213
|
+
def follower_update
|
214
|
+
if @election_timer.timed_out?
|
215
|
+
#STDOUT.write("follower node #{@id} election timed out at #{Time.now.strftime('%H:%M:%S:%L')}\n")
|
216
|
+
@role = CANDIDATE_ROLE
|
217
|
+
candidate_update
|
218
|
+
end
|
219
|
+
end
|
220
|
+
|
221
|
+
protected :follower_update
|
222
|
+
|
223
|
+
def candidate_update
|
224
|
+
if @election_timer.timed_out?
|
225
|
+
#STDOUT.write("candidate node #{@id} election timed out at #{Time.now.strftime('%H:%M:%S:%L')}\n")
|
226
|
+
@persistent_state.current_term += 1
|
227
|
+
@persistent_state.voted_for = @id
|
228
|
+
reset_election_timeout
|
229
|
+
last_log_entry = @persistent_state.log.last
|
230
|
+
log_index = last_log_entry ? last_log_entry.index : nil
|
231
|
+
log_term = last_log_entry ? last_log_entry.term : nil
|
232
|
+
request = RequestVoteRequest.new(@persistent_state.current_term, @id, log_index, log_term)
|
233
|
+
votes_for = 1 # candidate always votes for self
|
234
|
+
votes_against = 0
|
235
|
+
quorum = @cluster.quorum
|
236
|
+
#STDOUT.write("\n\t\t#{@id} requests votes for term #{@persistent_state.current_term}\n\n")
|
237
|
+
@config.rpc_provider.request_votes(request, @cluster) do |voter_id, request, response|
|
238
|
+
#STDOUT.write("\n\t\t#{@id} receives vote #{response.vote_granted} from #{voter_id}\n\n")
|
239
|
+
elected = nil # no majority result yet
|
240
|
+
if request.term != @persistent_state.current_term
|
241
|
+
# this is a response to an out-of-date request, just ignore it
|
242
|
+
elsif response.term > @persistent_state.current_term
|
243
|
+
@role = FOLLOWER_ROLE
|
244
|
+
elected = false
|
245
|
+
elsif response.vote_granted
|
246
|
+
votes_for += 1
|
247
|
+
elected = true if votes_for >= quorum
|
248
|
+
else
|
249
|
+
votes_against += 1
|
250
|
+
elected = false if votes_against >= quorum
|
251
|
+
end
|
252
|
+
#STDOUT.write("\n\t\t#{@id} receives vote #{response.vote_granted}, elected is #{elected.inspect}\n\n")
|
253
|
+
elected
|
254
|
+
end
|
255
|
+
if votes_for >= quorum
|
256
|
+
#STDOUT.write("\n#{@id} becomes leader for term #{@persistent_state.current_term}\n\n")
|
257
|
+
@role = LEADER_ROLE
|
258
|
+
establish_leadership
|
259
|
+
else
|
260
|
+
#STDOUT.write("\n\t\t#{@id} not elected leader (for #{votes_for}, against #{votes_against})\n\n")
|
261
|
+
end
|
262
|
+
end
|
263
|
+
end
|
264
|
+
|
265
|
+
protected :candidate_update
|
266
|
+
|
267
|
+
def leader_update
|
268
|
+
#STDOUT.write("\nLEADER UPDATE BEGINS\n")
|
269
|
+
if @leadership_state.update_timer.timed_out?
|
270
|
+
@leadership_state.update_timer.reset!
|
271
|
+
send_heartbeats
|
272
|
+
end
|
273
|
+
if @leadership_state.followers.any?
|
274
|
+
new_commit_index = @leadership_state.followers.values.
|
275
|
+
select { |follower_state| follower_state.succeeded }.
|
276
|
+
map { |follower_state| follower_state.next_index - 1 }.
|
277
|
+
sort[@cluster.quorum - 1]
|
278
|
+
else
|
279
|
+
new_commit_index = @persistent_state.log.size - 1
|
280
|
+
end
|
281
|
+
handle_commits(new_commit_index)
|
282
|
+
#STDOUT.write("\nLEADER UPDATE ENDS\n")
|
283
|
+
end
|
284
|
+
|
285
|
+
protected :leader_update
|
286
|
+
|
287
|
+
def handle_commits(new_commit_index)
|
288
|
+
#STDOUT.write("\nnode #{@id} handle_commits(new_commit_index = #{new_commit_index}) (@temporary_state.commit_index = #{@temporary_state.commit_index}\n")
|
289
|
+
return if new_commit_index == @temporary_state.commit_index
|
290
|
+
next_commit = @temporary_state.commit_index.nil? ? 0 : @temporary_state.commit_index + 1
|
291
|
+
while next_commit <= new_commit_index
|
292
|
+
@commit_handler.call(@persistent_state.log[next_commit].command) if @commit_handler
|
293
|
+
@temporary_state.commit_index = next_commit
|
294
|
+
next_commit += 1
|
295
|
+
#STDOUT.write("\n\tnode #{@id} handle_commits(new_commit_index = #{new_commit_index}) (new @temporary_state.commit_index = #{@temporary_state.commit_index}\n")
|
296
|
+
end
|
297
|
+
end
|
298
|
+
|
299
|
+
protected :handle_commits
|
300
|
+
|
301
|
+
def establish_leadership
|
302
|
+
@leadership_state = LeadershipState.new(@config.update_interval)
|
303
|
+
@temporary_state.leader_id = @id
|
304
|
+
@cluster.node_ids.each do |node_id|
|
305
|
+
next if node_id == @id
|
306
|
+
follower_state = (@leadership_state.followers[node_id] ||= FollowerState.new)
|
307
|
+
follower_state.next_index = @persistent_state.log.size
|
308
|
+
follower_state.succeeded = false
|
309
|
+
end
|
310
|
+
send_heartbeats
|
311
|
+
end
|
312
|
+
|
313
|
+
protected :establish_leadership
|
314
|
+
|
315
|
+
def send_heartbeats
|
316
|
+
#STDOUT.write("\nnode #{@id} sending heartbeats at #{Time.now.strftime('%H:%M:%S:%L')}\n")
|
317
|
+
last_log_entry = @persistent_state.log.last
|
318
|
+
log_index = last_log_entry ? last_log_entry.index : nil
|
319
|
+
log_term = last_log_entry ? last_log_entry.term : nil
|
320
|
+
request = AppendEntriesRequest.new(
|
321
|
+
@persistent_state.current_term,
|
322
|
+
@id,
|
323
|
+
log_index,
|
324
|
+
log_term,
|
325
|
+
[],
|
326
|
+
@temporary_state.commit_index)
|
327
|
+
|
328
|
+
@config.rpc_provider.append_entries(request, @cluster) do |node_id, response|
|
329
|
+
append_entries_to_follower(node_id, request, response)
|
330
|
+
end
|
331
|
+
end
|
332
|
+
|
333
|
+
protected :send_heartbeats
|
334
|
+
|
335
|
+
def append_entries_to_follower(node_id, request, response)
|
336
|
+
if @role != LEADER_ROLE
|
337
|
+
# we lost the leadership
|
338
|
+
elsif response.success
|
339
|
+
#STDOUT.write("\nappend_entries_to_follower #{node_id} request #{request.pretty_inspect} succeeded\n")
|
340
|
+
@leadership_state.followers[node_id].next_index = (request.prev_log_index || -1) + request.entries.count + 1
|
341
|
+
@leadership_state.followers[node_id].succeeded = true
|
342
|
+
elsif response.term <= @persistent_state.current_term
|
343
|
+
#STDOUT.write("\nappend_entries_to_follower #{node_id} request failed (#{request.pretty_inspect}) and responded with #{response.pretty_inspect}\n")
|
344
|
+
@config.rpc_provider.append_entries_to_follower(request, node_id) do |node_id, response|
|
345
|
+
if @role == LEADER_ROLE # make sure leadership wasn't lost since the request
|
346
|
+
#STDOUT.write("\nappend_entries_to_follower #{node_id} callback...\n")
|
347
|
+
prev_log_index = (request.prev_log_index.nil? || request.prev_log_index <= 0) ? nil : request.prev_log_index - 1
|
348
|
+
prev_log_term = nil
|
349
|
+
entries = @persistent_state.log
|
350
|
+
unless prev_log_index.nil?
|
351
|
+
prev_log_term = @persistent_state.log[prev_log_index].term
|
352
|
+
entries = @persistent_state.log.slice((prev_log_index + 1)..-1)
|
353
|
+
end
|
354
|
+
next_request = AppendEntriesRequest.new(
|
355
|
+
@persistent_state.current_term,
|
356
|
+
@id,
|
357
|
+
prev_log_index,
|
358
|
+
prev_log_term,
|
359
|
+
entries,
|
360
|
+
@temporary_state.commit_index)
|
361
|
+
#STDOUT.write("\nappend_entries_to_follower #{node_id} request #{request.pretty_inspect} failed...\n")
|
362
|
+
#STDOUT.write("sending updated request #{next_request.pretty_inspect}\n")
|
363
|
+
@config.rpc_provider.append_entries_to_follower(next_request, node_id) do |node_id, response|
|
364
|
+
append_entries_to_follower(node_id, next_request, response)
|
365
|
+
end
|
366
|
+
end
|
367
|
+
end
|
368
|
+
end
|
369
|
+
end
|
370
|
+
|
371
|
+
protected :append_entries_to_follower
|
372
|
+
|
373
|
+
def handle_request_vote(request)
|
374
|
+
#STDOUT.write("\nnode #{@id} handling vote request from #{request.candidate_id} (request.last_log_index: #{request.last_log_index}, vs #{@persistent_state.log.last.index}\n")
|
375
|
+
response = RequestVoteResponse.new
|
376
|
+
response.term = @persistent_state.current_term
|
377
|
+
response.vote_granted = false
|
378
|
+
|
379
|
+
return response if request.term < @persistent_state.current_term
|
380
|
+
|
381
|
+
@temporary_state.leader_id = nil if request.term > @persistent_state.current_term
|
382
|
+
|
383
|
+
step_down_if_new_term(request.term)
|
384
|
+
|
385
|
+
if FOLLOWER_ROLE == @role
|
386
|
+
if @persistent_state.voted_for == request.candidate_id
|
387
|
+
response.vote_granted = true
|
388
|
+
elsif @persistent_state.voted_for.nil?
|
389
|
+
if @persistent_state.log.empty?
|
390
|
+
# this node has no log so it can't be ahead
|
391
|
+
@persistent_state.voted_for = request.candidate_id
|
392
|
+
response.vote_granted = true
|
393
|
+
elsif request.last_log_term == @persistent_state.log.last.term &&
|
394
|
+
(request.last_log_index || -1) < @persistent_state.log.last.index
|
395
|
+
# candidate's log is incomplete compared to this node
|
396
|
+
elsif (request.last_log_term || -1) < @persistent_state.log.last.term
|
397
|
+
# candidate's log is incomplete compared to this node
|
398
|
+
else
|
399
|
+
@persistent_state.voted_for = request.candidate_id
|
400
|
+
response.vote_granted = true
|
401
|
+
end
|
402
|
+
end
|
403
|
+
reset_election_timeout if response.vote_granted
|
404
|
+
end
|
405
|
+
|
406
|
+
response
|
407
|
+
end
|
408
|
+
|
409
|
+
def handle_append_entries(request)
|
410
|
+
#STDOUT.write("\n\nnode #{@id} handle_append_entries: #{request.entries.pretty_inspect}\n\n") #if request.prev_log_index.nil?
|
411
|
+
response = AppendEntriesResponse.new
|
412
|
+
response.term = @persistent_state.current_term
|
413
|
+
response.success = false
|
414
|
+
|
415
|
+
#STDOUT.write("\n\nnode #{@id} handle_append_entries for term #{request.term} (current is #{@persistent_state.current_term})\n")# if request.prev_log_index.nil?
|
416
|
+
return response if request.term < @persistent_state.current_term
|
417
|
+
#STDOUT.write("\n\nnode #{@id} handle_append_entries stage 2\n") if request.prev_log_index.nil?
|
418
|
+
|
419
|
+
step_down_if_new_term(request.term)
|
420
|
+
|
421
|
+
reset_election_timeout
|
422
|
+
|
423
|
+
@temporary_state.leader_id = request.leader_id
|
424
|
+
|
425
|
+
abs_log_index = abs_log_index_for(request.prev_log_index, request.prev_log_term)
|
426
|
+
return response if abs_log_index.nil? && !request.prev_log_index.nil? && !request.prev_log_term.nil?
|
427
|
+
#STDOUT.write("\n\nnode #{@id} handle_append_entries stage 3\n") if request.prev_log_index.nil?
|
428
|
+
if @temporary_state.commit_index &&
|
429
|
+
abs_log_index &&
|
430
|
+
abs_log_index < @temporary_state.commit_index
|
431
|
+
raise "Cannot truncate committed logs; @temporary_state.commit_index = #{@temporary_state.commit_index}; abs_log_index = #{abs_log_index}"
|
432
|
+
end
|
433
|
+
|
434
|
+
truncate_and_update_log(abs_log_index, request.entries)
|
435
|
+
|
436
|
+
return response unless update_commit_index(request.commit_index)
|
437
|
+
#STDOUT.write("\n\nnode #{@id} handle_append_entries stage 4\n") if request.prev_log_index.nil?
|
438
|
+
|
439
|
+
response.success = true
|
440
|
+
response
|
441
|
+
end
|
442
|
+
|
443
|
+
def handle_command(request)
|
444
|
+
response = CommandResponse.new(false)
|
445
|
+
case @role
|
446
|
+
when FOLLOWER_ROLE
|
447
|
+
await_leader
|
448
|
+
if @role == LEADER_ROLE
|
449
|
+
handle_command(request)
|
450
|
+
else
|
451
|
+
# forward the command to the leader
|
452
|
+
response = @config.rpc_provider.command(request, @temporary_state.leader_id)
|
453
|
+
end
|
454
|
+
when CANDIDATE_ROLE
|
455
|
+
await_leader
|
456
|
+
response = handle_command(request)
|
457
|
+
when LEADER_ROLE
|
458
|
+
last_log = @persistent_state.log.last
|
459
|
+
log_entry = LogEntry.new(@persistent_state.current_term, last_log.index ? last_log.index + 1 : 0, request.command)
|
460
|
+
@persistent_state.log << log_entry
|
461
|
+
await_consensus(log_entry)
|
462
|
+
response = CommandResponse.new(true)
|
463
|
+
end
|
464
|
+
response
|
465
|
+
end
|
466
|
+
|
467
|
+
def await_consensus(log_entry)
|
468
|
+
@config.async_provider.await do
|
469
|
+
persisted_log_entry = @persistent_state.log[log_entry.index]
|
470
|
+
!@temporary_state.commit_index.nil? &&
|
471
|
+
@temporary_state.commit_index >= log_entry.index &&
|
472
|
+
persisted_log_entry.term == log_entry.term &&
|
473
|
+
persisted_log_entry.command == log_entry.command
|
474
|
+
end
|
475
|
+
end
|
476
|
+
|
477
|
+
protected :await_consensus
|
478
|
+
|
479
|
+
def await_leader
|
480
|
+
if @temporary_state.leader_id.nil?
|
481
|
+
@role = CANDIDATE_ROLE
|
482
|
+
end
|
483
|
+
@config.async_provider.await do
|
484
|
+
@role != CANDIDATE_ROLE && !@temporary_state.leader_id.nil?
|
485
|
+
end
|
486
|
+
end
|
487
|
+
|
488
|
+
protected :await_leader
|
489
|
+
|
490
|
+
def step_down_if_new_term(request_term)
|
491
|
+
if request_term > @persistent_state.current_term
|
492
|
+
@persistent_state.current_term = request_term
|
493
|
+
@role = FOLLOWER_ROLE
|
494
|
+
end
|
495
|
+
end
|
496
|
+
|
497
|
+
protected :step_down_if_new_term
|
498
|
+
|
499
|
+
def reset_election_timeout
|
500
|
+
@election_timer.reset!
|
501
|
+
end
|
502
|
+
|
503
|
+
protected :reset_election_timeout
|
504
|
+
|
505
|
+
def abs_log_index_for(prev_log_index, prev_log_term)
|
506
|
+
@persistent_state.log.rindex { |log_entry| log_entry.index == prev_log_index && log_entry.term == prev_log_term }
|
507
|
+
end
|
508
|
+
|
509
|
+
protected :abs_log_index_for
|
510
|
+
|
511
|
+
def truncate_and_update_log(abs_log_index, entries)
|
512
|
+
log = @persistent_state.log
|
513
|
+
if abs_log_index.nil?
|
514
|
+
log = []
|
515
|
+
elsif log.length == abs_log_index + 1
|
516
|
+
# no truncation required, past log is the same
|
517
|
+
else
|
518
|
+
log = log.slice(0..abs_log_index)
|
519
|
+
end
|
520
|
+
#STDOUT.write("\n\nentries is: #{entries.pretty_inspect}\n\n")
|
521
|
+
log = log.concat(entries) unless entries.empty?
|
522
|
+
@persistent_state.log = log
|
523
|
+
end
|
524
|
+
|
525
|
+
protected :truncate_and_update_log
|
526
|
+
|
527
|
+
def update_commit_index(new_commit_index)
|
528
|
+
#STDOUT.write("\n\n%%%%%%%%%%%%%%%%%%%%% node #{@id} update_commit_index(new_commit_index = #{new_commit_index})\n")
|
529
|
+
return false if @temporary_state.commit_index && @temporary_state.commit_index > new_commit_index
|
530
|
+
handle_commits(new_commit_index)
|
531
|
+
true
|
532
|
+
end
|
533
|
+
|
534
|
+
protected :update_commit_index
|
535
|
+
end
|
536
|
+
end
|
@@ -0,0 +1,248 @@
|
|
1
|
+
require_relative '../pontoon'
|
2
|
+
|
3
|
+
require 'goliath'
|
4
|
+
|
5
|
+
module Pontoon
|
6
|
+
class Goliath
|
7
|
+
|
8
|
+
def self.log(message)
|
9
|
+
#STDOUT.write("\n\n")
|
10
|
+
#STDOUT.write(message)
|
11
|
+
#STDOUT.write("\n\n")
|
12
|
+
end
|
13
|
+
|
14
|
+
class HttpJsonRpcResponder < ::Goliath::API
|
15
|
+
use ::Goliath::Rack::Render, 'json'
|
16
|
+
use ::Goliath::Rack::Validation::RequestMethod, %w(POST)
|
17
|
+
use ::Goliath::Rack::Params
|
18
|
+
|
19
|
+
def initialize(node)
|
20
|
+
@node = node
|
21
|
+
end
|
22
|
+
|
23
|
+
HEADERS = { 'Content-Type' => 'application/json' }
|
24
|
+
|
25
|
+
def response(env)
|
26
|
+
case env['REQUEST_PATH']
|
27
|
+
when '/request_vote'
|
28
|
+
handle_errors { request_vote_response(env['params']) }
|
29
|
+
when '/append_entries'
|
30
|
+
handle_errors { append_entries_response(env['params']) }
|
31
|
+
when '/command'
|
32
|
+
handle_errors { command_response(env['params']) }
|
33
|
+
else
|
34
|
+
error_response(404, 'not found')
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def request_vote_response(params)
|
39
|
+
#STDOUT.write("\nnode #{@node.id} received request_vote from #{params['candidate_id']}, term #{params['term']}\n")
|
40
|
+
request = Pontoon::RequestVoteRequest.new(
|
41
|
+
params['term'],
|
42
|
+
params['candidate_id'],
|
43
|
+
params['last_log_index'],
|
44
|
+
params['last_log_term'])
|
45
|
+
response = @node.handle_request_vote(request)
|
46
|
+
[200, HEADERS, { 'term' => response.term, 'vote_granted' => response.vote_granted }]
|
47
|
+
end
|
48
|
+
|
49
|
+
def append_entries_response(params)
|
50
|
+
#STDOUT.write("\nnode #{@node.id} received append_entries from #{params['leader_id']}, term #{params['term']}\n")
|
51
|
+
entries = params['entries'].map {|entry| Pontoon::LogEntry.new(entry['term'], entry['index'], entry['command'])}
|
52
|
+
request = Pontoon::AppendEntriesRequest.new(
|
53
|
+
params['term'],
|
54
|
+
params['leader_id'],
|
55
|
+
params['prev_log_index'],
|
56
|
+
params['prev_log_term'],
|
57
|
+
entries,
|
58
|
+
params['commit_index'])
|
59
|
+
#STDOUT.write("\nnode #{@node.id} received entries: #{request.entries.pretty_inspect}\n")
|
60
|
+
response = @node.handle_append_entries(request)
|
61
|
+
#STDOUT.write("\nnode #{@node.id} completed append_entries from #{params['leader_id']}, term #{params['term']} (#{response})\n")
|
62
|
+
[200, HEADERS, { 'term' => response.term, 'success' => response.success }]
|
63
|
+
end
|
64
|
+
|
65
|
+
def command_response(params)
|
66
|
+
request = Pontoon::CommandRequest.new(params['command'])
|
67
|
+
response = @node.handle_command(request)
|
68
|
+
[response.success ? 200 : 409, HEADERS, { 'success' => response.success }]
|
69
|
+
end
|
70
|
+
|
71
|
+
def handle_errors
|
72
|
+
yield
|
73
|
+
rescue StandardError => se
|
74
|
+
error_response(422, se)
|
75
|
+
rescue Exception => e
|
76
|
+
error_response(500, e)
|
77
|
+
end
|
78
|
+
|
79
|
+
def error_message(exception)
|
80
|
+
"#{exception.message}\n\t#{exception.backtrace.join("\n\t")}".tap {|m| STDOUT.write("\n\n\t#{m}\n\n")}
|
81
|
+
end
|
82
|
+
|
83
|
+
def error_response(code, exception)
|
84
|
+
[code, HEADERS, { 'error' => error_message(exception) }]
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
module HashMarshalling
|
89
|
+
def self.hash_to_object(hash, klass)
|
90
|
+
object = klass.new
|
91
|
+
hash.each_pair do |k, v|
|
92
|
+
object.send("#{k}=", v)
|
93
|
+
end
|
94
|
+
object
|
95
|
+
end
|
96
|
+
|
97
|
+
def self.object_to_hash(object, attrs)
|
98
|
+
attrs.reduce({}) { |hash, attr|
|
99
|
+
hash[attr] = object.send(attr); hash
|
100
|
+
}
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
class HttpJsonRpcProvider < Pontoon::RpcProvider
|
105
|
+
attr_reader :uri_generator
|
106
|
+
|
107
|
+
def initialize(uri_generator)
|
108
|
+
@uri_generator = uri_generator
|
109
|
+
end
|
110
|
+
|
111
|
+
def request_votes(request, cluster, &block)
|
112
|
+
sent_hash = HashMarshalling.object_to_hash(request, %w(term candidate_id last_log_index last_log_term))
|
113
|
+
sent_json = MultiJson.dump(sent_hash)
|
114
|
+
deferred_calls = []
|
115
|
+
EM.synchrony do
|
116
|
+
cluster.node_ids.each do |node_id|
|
117
|
+
next if node_id == request.candidate_id
|
118
|
+
http = EventMachine::HttpRequest.new(uri_generator.call(node_id, 'request_vote')).apost(
|
119
|
+
:body => sent_json,
|
120
|
+
:head => { 'Content-Type' => 'application/json' })
|
121
|
+
http.callback do
|
122
|
+
if http.response_header.status == 200
|
123
|
+
received_hash = MultiJson.load(http.response)
|
124
|
+
response = HashMarshalling.hash_to_object(received_hash, Pontoon::RequestVoteResponse)
|
125
|
+
#STDOUT.write("\n\t#{node_id} responded #{response.vote_granted} to #{request.candidate_id}\n\n")
|
126
|
+
yield node_id, request, response
|
127
|
+
else
|
128
|
+
Pontoon::Goliath.log("request_vote failed for node '#{node_id}' with code #{http.response_header.status}")
|
129
|
+
end
|
130
|
+
end
|
131
|
+
deferred_calls << http
|
132
|
+
end
|
133
|
+
end
|
134
|
+
deferred_calls.each do |http|
|
135
|
+
EM::Synchrony.sync http
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
def append_entries(request, cluster, &block)
|
140
|
+
deferred_calls = []
|
141
|
+
EM.synchrony do
|
142
|
+
cluster.node_ids.each do |node_id|
|
143
|
+
next if node_id == request.leader_id
|
144
|
+
deferred_calls << create_append_entries_to_follower_request(request, node_id, &block)
|
145
|
+
end
|
146
|
+
end
|
147
|
+
deferred_calls.each do |http|
|
148
|
+
EM::Synchrony.sync http
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
def append_entries_to_follower(request, node_id, &block)
|
153
|
+
# EM.synchrony do
|
154
|
+
create_append_entries_to_follower_request(request, node_id, &block)
|
155
|
+
# end
|
156
|
+
end
|
157
|
+
|
158
|
+
def create_append_entries_to_follower_request(request, node_id, &block)
|
159
|
+
sent_hash = HashMarshalling.object_to_hash(request, %w(term leader_id prev_log_index prev_log_term entries commit_index))
|
160
|
+
sent_hash['entries'] = sent_hash['entries'].map {|obj| HashMarshalling.object_to_hash(obj, %w(term index command))}
|
161
|
+
sent_json = MultiJson.dump(sent_hash)
|
162
|
+
raise "replicating to self!" if request.leader_id == node_id
|
163
|
+
#STDOUT.write("\nleader #{request.leader_id} replicating entries to #{node_id}: #{sent_hash.pretty_inspect}\n")#"\t#{caller[0..4].join("\n\t")}")
|
164
|
+
|
165
|
+
http = EventMachine::HttpRequest.new(uri_generator.call(node_id, 'append_entries')).apost(
|
166
|
+
:body => sent_json,
|
167
|
+
:head => { 'Content-Type' => 'application/json' })
|
168
|
+
http.callback do
|
169
|
+
#STDOUT.write("\nleader #{request.leader_id} calling back to #{node_id} to append entries\n")
|
170
|
+
if http.response_header.status == 200
|
171
|
+
received_hash = MultiJson.load(http.response)
|
172
|
+
response = HashMarshalling.hash_to_object(received_hash, Pontoon::AppendEntriesResponse)
|
173
|
+
yield node_id, response
|
174
|
+
else
|
175
|
+
Pontoon::Goliath.log("append_entries failed for node '#{node_id}' with code #{http.response_header.status}")
|
176
|
+
end
|
177
|
+
end
|
178
|
+
http
|
179
|
+
end
|
180
|
+
|
181
|
+
def command(request, node_id)
|
182
|
+
sent_hash = HashMarshalling.object_to_hash(request, %w(command))
|
183
|
+
sent_json = MultiJson.dump(sent_hash)
|
184
|
+
http = EventMachine::HttpRequest.new(uri_generator.call(node_id, 'command')).apost(
|
185
|
+
:body => sent_json,
|
186
|
+
:head => { 'Content-Type' => 'application/json' })
|
187
|
+
http = EM::Synchrony.sync(http)
|
188
|
+
if http.response_header.status == 200
|
189
|
+
received_hash = MultiJson.load(http.response)
|
190
|
+
HashMarshalling.hash_to_object(received_hash, Pontoon::CommandResponse)
|
191
|
+
else
|
192
|
+
Pontoon::Goliath.log("command failed for node '#{node_id}' with code #{http.response_header.status}")
|
193
|
+
CommandResponse.new(false)
|
194
|
+
end
|
195
|
+
end
|
196
|
+
end
|
197
|
+
|
198
|
+
class EventMachineAsyncProvider < Pontoon::AsyncProvider
|
199
|
+
def await
|
200
|
+
f = Fiber.current
|
201
|
+
until yield
|
202
|
+
EM.next_tick {f.resume}
|
203
|
+
Fiber.yield
|
204
|
+
end
|
205
|
+
end
|
206
|
+
end
|
207
|
+
|
208
|
+
def self.rpc_provider(uri_generator)
|
209
|
+
HttpJsonRpcProvider.new(uri_generator)
|
210
|
+
end
|
211
|
+
|
212
|
+
def self.async_provider
|
213
|
+
EventMachineAsyncProvider.new
|
214
|
+
end
|
215
|
+
|
216
|
+
def initialize(node)
|
217
|
+
@node = node
|
218
|
+
end
|
219
|
+
|
220
|
+
attr_reader :node
|
221
|
+
attr_reader :update_fiber
|
222
|
+
attr_reader :running
|
223
|
+
|
224
|
+
def start(options = {})
|
225
|
+
@runner = ::Goliath::Runner.new(ARGV, nil)
|
226
|
+
@runner.api = HttpJsonRpcResponder.new(node)
|
227
|
+
@runner.app = ::Goliath::Rack::Builder.build(HttpJsonRpcResponder, @runner.api)
|
228
|
+
@runner.address = options[:address] if options[:address]
|
229
|
+
@runner.port = options[:port] if options[:port]
|
230
|
+
@runner.run
|
231
|
+
@running = true
|
232
|
+
|
233
|
+
update_proc = Proc.new do
|
234
|
+
EM.synchrony do
|
235
|
+
@node.update
|
236
|
+
end
|
237
|
+
end
|
238
|
+
@update_timer = EventMachine.add_periodic_timer(node.config.update_interval, update_proc)
|
239
|
+
# @node.update
|
240
|
+
end
|
241
|
+
|
242
|
+
def stop
|
243
|
+
@update_timer.cancel
|
244
|
+
end
|
245
|
+
end
|
246
|
+
end
|
247
|
+
|
248
|
+
|
metadata
ADDED
@@ -0,0 +1,115 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: pontoon
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Anthony Corletti
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2016-04-20 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: goliath
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '1.0'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '1.0'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: multi_json
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - "~>"
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '1.3'
|
34
|
+
type: :runtime
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - "~>"
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '1.3'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: cucumber
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - "~>"
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '1.0'
|
48
|
+
type: :development
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - "~>"
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '1.0'
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: em-http-request
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - "~>"
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: '1.0'
|
62
|
+
type: :development
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - "~>"
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: '1.0'
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: rspec
|
71
|
+
requirement: !ruby/object:Gem::Requirement
|
72
|
+
requirements:
|
73
|
+
- - "~>"
|
74
|
+
- !ruby/object:Gem::Version
|
75
|
+
version: '2.0'
|
76
|
+
type: :development
|
77
|
+
prerelease: false
|
78
|
+
version_requirements: !ruby/object:Gem::Requirement
|
79
|
+
requirements:
|
80
|
+
- - "~>"
|
81
|
+
- !ruby/object:Gem::Version
|
82
|
+
version: '2.0'
|
83
|
+
description: A simple Raft distributed consensus implementation
|
84
|
+
email: anthcor@gmail.com
|
85
|
+
executables: []
|
86
|
+
extensions: []
|
87
|
+
extra_rdoc_files: []
|
88
|
+
files:
|
89
|
+
- lib/pontoon.rb
|
90
|
+
- lib/pontoon/goliath.rb
|
91
|
+
homepage: http://github.com/anthcor/pontoon
|
92
|
+
licenses:
|
93
|
+
- MIT
|
94
|
+
metadata: {}
|
95
|
+
post_install_message:
|
96
|
+
rdoc_options: []
|
97
|
+
require_paths:
|
98
|
+
- lib
|
99
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
100
|
+
requirements:
|
101
|
+
- - ">="
|
102
|
+
- !ruby/object:Gem::Version
|
103
|
+
version: '0'
|
104
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
105
|
+
requirements:
|
106
|
+
- - ">="
|
107
|
+
- !ruby/object:Gem::Version
|
108
|
+
version: '0'
|
109
|
+
requirements: []
|
110
|
+
rubyforge_project:
|
111
|
+
rubygems_version: 2.4.8
|
112
|
+
signing_key:
|
113
|
+
specification_version: 4
|
114
|
+
summary: A simple Raft distributed consensus implementation
|
115
|
+
test_files: []
|