evinrude 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.editorconfig +23 -0
- data/.gitignore +6 -0
- data/.yardopts +1 -0
- data/CODE_OF_CONDUCT.md +49 -0
- data/CONTRIBUTING.md +10 -0
- data/LICENCE +674 -0
- data/README.md +410 -0
- data/evinrude.gemspec +42 -0
- data/lib/evinrude.rb +1233 -0
- data/lib/evinrude/backoff.rb +19 -0
- data/lib/evinrude/cluster_configuration.rb +162 -0
- data/lib/evinrude/config_change_queue_entry.rb +19 -0
- data/lib/evinrude/config_change_queue_entry/add_node.rb +13 -0
- data/lib/evinrude/config_change_queue_entry/remove_node.rb +14 -0
- data/lib/evinrude/freedom_patches/range.rb +5 -0
- data/lib/evinrude/log.rb +102 -0
- data/lib/evinrude/log_entries.rb +3 -0
- data/lib/evinrude/log_entry.rb +13 -0
- data/lib/evinrude/log_entry/cluster_configuration.rb +15 -0
- data/lib/evinrude/log_entry/null.rb +6 -0
- data/lib/evinrude/log_entry/state_machine_command.rb +13 -0
- data/lib/evinrude/logging_helpers.rb +40 -0
- data/lib/evinrude/message.rb +19 -0
- data/lib/evinrude/message/append_entries_reply.rb +13 -0
- data/lib/evinrude/message/append_entries_request.rb +18 -0
- data/lib/evinrude/message/command_reply.rb +13 -0
- data/lib/evinrude/message/command_request.rb +18 -0
- data/lib/evinrude/message/install_snapshot_reply.rb +13 -0
- data/lib/evinrude/message/install_snapshot_request.rb +18 -0
- data/lib/evinrude/message/join_reply.rb +13 -0
- data/lib/evinrude/message/join_request.rb +18 -0
- data/lib/evinrude/message/node_removal_reply.rb +13 -0
- data/lib/evinrude/message/node_removal_request.rb +18 -0
- data/lib/evinrude/message/read_reply.rb +13 -0
- data/lib/evinrude/message/read_request.rb +18 -0
- data/lib/evinrude/message/vote_reply.rb +13 -0
- data/lib/evinrude/message/vote_request.rb +18 -0
- data/lib/evinrude/messages.rb +14 -0
- data/lib/evinrude/metrics.rb +50 -0
- data/lib/evinrude/network.rb +69 -0
- data/lib/evinrude/network/connection.rb +144 -0
- data/lib/evinrude/network/protocol.rb +69 -0
- data/lib/evinrude/node_info.rb +35 -0
- data/lib/evinrude/peer.rb +50 -0
- data/lib/evinrude/resolver.rb +96 -0
- data/lib/evinrude/snapshot.rb +9 -0
- data/lib/evinrude/state_machine.rb +15 -0
- data/lib/evinrude/state_machine/register.rb +25 -0
- data/smoke_tests/001_single_node_cluster.rb +20 -0
- data/smoke_tests/002_three_node_cluster.rb +43 -0
- data/smoke_tests/003_spill.rb +25 -0
- data/smoke_tests/004_stale_read.rb +67 -0
- data/smoke_tests/005_sleepy_master.rb +28 -0
- data/smoke_tests/006_join_via_follower.rb +26 -0
- data/smoke_tests/007_snapshot_madness.rb +97 -0
- data/smoke_tests/008_downsizing.rb +43 -0
- data/smoke_tests/009_disaster_recovery.rb +46 -0
- data/smoke_tests/999_final_smoke_test.rb +279 -0
- data/smoke_tests/run +22 -0
- data/smoke_tests/smoke_test_helper.rb +199 -0
- metadata +318 -0
@@ -0,0 +1,19 @@
|
|
1
|
+
class Evinrude
|
2
|
+
class Backoff
|
3
|
+
def initialize(slot_time: 0.5, max_slots: 30)
|
4
|
+
@slot_time, @max_slots = slot_time, max_slots
|
5
|
+
|
6
|
+
@fail_count = 0
|
7
|
+
end
|
8
|
+
|
9
|
+
def wait_time
|
10
|
+
@fail_count += 1
|
11
|
+
|
12
|
+
[2 ** @fail_count, @max_slots].min * rand * @slot_time
|
13
|
+
end
|
14
|
+
|
15
|
+
def wait
|
16
|
+
sleep wait_time
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,162 @@
|
|
1
|
+
class Evinrude
|
2
|
+
class ClusterConfiguration
|
3
|
+
class TransitionInProgressError < StandardError; end
|
4
|
+
|
5
|
+
include Evinrude::LoggingHelpers
|
6
|
+
|
7
|
+
def initialize(logger:, metrics:)
|
8
|
+
@logger, @metrics = logger, metrics
|
9
|
+
@old = []
|
10
|
+
@new = []
|
11
|
+
@transition_in_progress = false
|
12
|
+
@metrics.joint_configuration.set(0)
|
13
|
+
@m = Mutex.new
|
14
|
+
end
|
15
|
+
|
16
|
+
def transitioning?
|
17
|
+
@transition_in_progress
|
18
|
+
end
|
19
|
+
|
20
|
+
def nodes
|
21
|
+
locked = false
|
22
|
+
unless @m.owned?
|
23
|
+
@m.lock
|
24
|
+
locked = true
|
25
|
+
end
|
26
|
+
|
27
|
+
(@old + @new).uniq
|
28
|
+
ensure
|
29
|
+
@m.unlock if locked
|
30
|
+
end
|
31
|
+
|
32
|
+
def add_node(node_info)
|
33
|
+
@m.synchronize do
|
34
|
+
if @transition_in_progress
|
35
|
+
raise TransitionInProgressError,
|
36
|
+
"Cannot add a node whilst a config transition is in progress (@old=#{@old.inspect}, @new=#{@new.inspect})"
|
37
|
+
end
|
38
|
+
|
39
|
+
logger.debug(logloc) { "Commencing addition of #{node_info.inspect} to cluster config" }
|
40
|
+
|
41
|
+
# Adding a new node with the same name but, presumably, a different
|
42
|
+
# address and/or port triggers a config change in which the old
|
43
|
+
# address/port is removed and the new address/port is added.
|
44
|
+
existing_node = @old.find { |n| n.name == node_info.name }
|
45
|
+
|
46
|
+
@new = @old + [node_info] - [existing_node].compact
|
47
|
+
if @metrics
|
48
|
+
@metrics.node_count&.set(nodes.length)
|
49
|
+
@metrics.joint_configuration.set(1)
|
50
|
+
end
|
51
|
+
@transition_in_progress = true
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def remove_node(node_info, force: false)
|
56
|
+
@m.synchronize do
|
57
|
+
if @transition_in_progress && !force
|
58
|
+
raise TransitionInProgressError,
|
59
|
+
"Cannot remove a node whilst a config transition is in progress"
|
60
|
+
end
|
61
|
+
|
62
|
+
logger.debug(logloc) { "Commencing #{force ? "forced " : ""}removal of #{node_info.inspect} from cluster config" }
|
63
|
+
|
64
|
+
@new = @old - [node_info]
|
65
|
+
if @metrics
|
66
|
+
@metrics.node_count&.set(nodes.length)
|
67
|
+
@metrics.joint_configuration.set(1)
|
68
|
+
end
|
69
|
+
@transition_in_progress = true
|
70
|
+
|
71
|
+
if force
|
72
|
+
joint_configuration_replicated
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
def joint_configuration_replicated
|
78
|
+
unlock = false
|
79
|
+
|
80
|
+
unless @m.owned?
|
81
|
+
@m.lock
|
82
|
+
unlock = true
|
83
|
+
end
|
84
|
+
|
85
|
+
logger.debug(logloc) { "Joint configuration has been replicated" }
|
86
|
+
@old = @new
|
87
|
+
@new = []
|
88
|
+
@transition_in_progress = false
|
89
|
+
@metrics&.joint_configuration&.set(0)
|
90
|
+
ensure
|
91
|
+
@m.unlock if unlock
|
92
|
+
end
|
93
|
+
|
94
|
+
def quorum_met?(present_nodes)
|
95
|
+
@m.synchronize do
|
96
|
+
group_quorum?(@old, present_nodes) && group_quorum?(@new, present_nodes)
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
def [](id)
|
101
|
+
nodes.find { |n| n.id == id }
|
102
|
+
end
|
103
|
+
|
104
|
+
def encode_with(coder)
|
105
|
+
@m.synchronize do
|
106
|
+
instance_variables.each do |iv|
|
107
|
+
next if %i{@logger @metrics @m}.include?(iv)
|
108
|
+
coder[iv.to_s.sub(/^@/, '')] = instance_variable_get(iv)
|
109
|
+
end
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
def init_with(coder)
|
114
|
+
@m = Mutex.new
|
115
|
+
|
116
|
+
coder.map.each do |k, v|
|
117
|
+
instance_variable_set(:"@#{k}", v)
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
def inspect
|
122
|
+
@m.synchronize do
|
123
|
+
"#<#{self.class}:0x#{object_id.to_s(16)} " +
|
124
|
+
instance_variables.map do |iv|
|
125
|
+
next nil if iv == :@logger || iv == :@metrics
|
126
|
+
"#{iv}=#{instance_variable_get(iv).inspect}"
|
127
|
+
end.compact.join(" ")
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
def logger=(l)
|
132
|
+
if @logger
|
133
|
+
raise ArgumentError, "Logger cannot be changed once set"
|
134
|
+
end
|
135
|
+
|
136
|
+
@logger = l
|
137
|
+
end
|
138
|
+
|
139
|
+
def metrics=(m)
|
140
|
+
if @metrics
|
141
|
+
raise ArgumentError, "Metrics cannot be changed once set"
|
142
|
+
end
|
143
|
+
|
144
|
+
@metrics = m
|
145
|
+
end
|
146
|
+
|
147
|
+
private
|
148
|
+
|
149
|
+
attr_reader :old, :new
|
150
|
+
|
151
|
+
def group_quorum?(group, present_nodes)
|
152
|
+
if group.length < 2
|
153
|
+
# Quorum is automatically met if the group isn't in use (empty) or
|
154
|
+
# if the group is just one (which can only be "us")
|
155
|
+
true
|
156
|
+
else
|
157
|
+
logger.debug(logloc) { "Checking if #{present_nodes.inspect} meets quorum requirement for #{group.inspect}" }
|
158
|
+
(group.select { |m| present_nodes.include?(m) }.length.to_f / group.length.to_f > 0.5).tap { |v| logger.debug(logloc) { v ? "Quorum met" : "Quorum failed" } }
|
159
|
+
end
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
class Evinrude
|
2
|
+
class ConfigChangeQueueEntry
|
3
|
+
def initialize(msg, conn = nil)
|
4
|
+
@msg, @conn = msg, conn
|
5
|
+
end
|
6
|
+
|
7
|
+
def node_info
|
8
|
+
@msg.node_info
|
9
|
+
end
|
10
|
+
|
11
|
+
def send_successful_reply
|
12
|
+
@conn.send_reply(reply_class.new(success: true))
|
13
|
+
end
|
14
|
+
|
15
|
+
def send_redirect_reply(leader_info)
|
16
|
+
@conn.send_reply(reply_class.new(success: false, leader_info: leader_info))
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
data/lib/evinrude/log.rb
ADDED
@@ -0,0 +1,102 @@
|
|
1
|
+
require "yaml"
|
2
|
+
|
3
|
+
class Evinrude
|
4
|
+
class Log
|
5
|
+
include Evinrude::LoggingHelpers
|
6
|
+
|
7
|
+
class TruncationUnderflowError < Error; end
|
8
|
+
class SnapshottedEntryError < Error; end
|
9
|
+
|
10
|
+
attr_reader :snapshot_last_term, :snapshot_last_index
|
11
|
+
|
12
|
+
def initialize(logger:, snapshot_last_term: 0, snapshot_last_index: 0)
|
13
|
+
@logger, @snapshot_last_term, @snapshot_last_index = logger, snapshot_last_term, snapshot_last_index
|
14
|
+
|
15
|
+
@entries = []
|
16
|
+
end
|
17
|
+
|
18
|
+
def append(entry)
|
19
|
+
logger.debug(logloc) { "Appending new entry #{entry.inspect} as ##{@snapshot_last_index + @entries.length + 1}" }
|
20
|
+
@entries << entry
|
21
|
+
|
22
|
+
if @entries.length > 1000
|
23
|
+
old_len = @entries.length
|
24
|
+
snapshotted_entries = @entries[0..499]
|
25
|
+
@entries = @entries[500..]
|
26
|
+
@snapshot_last_index += 500
|
27
|
+
@snapshot_last_term = snapshotted_entries.last.term
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def new_snapshot(last_term, last_index)
|
32
|
+
@snapshot_last_term = last_term
|
33
|
+
@snapshot_last_index = last_index
|
34
|
+
|
35
|
+
@entries = []
|
36
|
+
end
|
37
|
+
|
38
|
+
def has_entry?(n)
|
39
|
+
n == 0 || n <= @snapshot_last_index + @entries.length
|
40
|
+
end
|
41
|
+
|
42
|
+
def snapshotted_entry?(n)
|
43
|
+
n > 0 && n <= @snapshot_last_index
|
44
|
+
end
|
45
|
+
|
46
|
+
def last_index
|
47
|
+
@entries.length + @snapshot_last_index
|
48
|
+
end
|
49
|
+
|
50
|
+
def last_entry_term
|
51
|
+
if @entries.empty?
|
52
|
+
@snapshot_last_term
|
53
|
+
else
|
54
|
+
@entries.last.term
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def entry_term(n)
|
59
|
+
if n == @snapshot_last_index
|
60
|
+
@snapshot_last_term
|
61
|
+
else
|
62
|
+
self[n]&.term
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
def entries_from(n)
|
67
|
+
@entries[(n-@snapshot_last_index-1)..] || []
|
68
|
+
end
|
69
|
+
|
70
|
+
# Make the last entry kept in the log the nth.
|
71
|
+
def truncate_to(n)
|
72
|
+
if n > @snapshot_last_index
|
73
|
+
@entries = @entries[0..n-@snapshot_last_index-1]
|
74
|
+
elsif n == @snapshot_last_index
|
75
|
+
@entries = []
|
76
|
+
else
|
77
|
+
raise TruncationUnderflowError,
|
78
|
+
"Cannot truncate to log entry ##{n}; into the snapshot"
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
def [](n)
|
83
|
+
if n == 0
|
84
|
+
zeroth_log_entry
|
85
|
+
elsif n <= @snapshot_last_index
|
86
|
+
raise SnapshottedEntryError
|
87
|
+
else
|
88
|
+
@entries[n - @snapshot_last_index - 1]
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
private
|
93
|
+
|
94
|
+
def zeroth_log_entry
|
95
|
+
@zeroth_log_entry ||= LogEntry::Null.new(term: 0)
|
96
|
+
end
|
97
|
+
|
98
|
+
def snapshot_log_entry
|
99
|
+
@snapshot_log_entry ||= LogEntry::Null.new(term: @snapshot_last_term)
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
@@ -0,0 +1,13 @@
|
|
1
|
+
class Evinrude
|
2
|
+
class LogEntry
|
3
|
+
class StateMachineCommand < LogEntry
|
4
|
+
attr_reader :command, :id, :node_name
|
5
|
+
|
6
|
+
def initialize(term:, command:, id:, node_name:)
|
7
|
+
super(term: term)
|
8
|
+
|
9
|
+
@command, @id, @node_name = command, id, node_name
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
@@ -0,0 +1,40 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "logger"
|
4
|
+
|
5
|
+
class Evinrude
|
6
|
+
module LoggingHelpers
|
7
|
+
private
|
8
|
+
|
9
|
+
def logger
|
10
|
+
@logger || Logger.new("/dev/null")
|
11
|
+
end
|
12
|
+
|
13
|
+
def logloc
|
14
|
+
loc = caller_locations.first
|
15
|
+
"#{self.class}##{loc.label}"
|
16
|
+
end
|
17
|
+
|
18
|
+
def log_exception(ex, progname = nil)
|
19
|
+
progname ||= "#{self.class.to_s}##{caller_locations(2, 1).first.label}"
|
20
|
+
|
21
|
+
logger.error(progname) do
|
22
|
+
explanation = if block_given?
|
23
|
+
yield
|
24
|
+
else
|
25
|
+
nil
|
26
|
+
end
|
27
|
+
|
28
|
+
format_backtrace("#{explanation}#{explanation ? ": " : ""}#{ex.message} (#{ex.class})", ex.backtrace)
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
def with_backtrace(msg)
|
33
|
+
format_backtrace(msg, caller[1..])
|
34
|
+
end
|
35
|
+
|
36
|
+
def format_backtrace(msg, backtrace)
|
37
|
+
([msg] + backtrace).join("\n ")
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
require "yaml"
|
2
|
+
|
3
|
+
class Evinrude
|
4
|
+
class Message
|
5
|
+
class ParseError < Evinrude::Error; end
|
6
|
+
|
7
|
+
def self.parse(m)
|
8
|
+
YAML.safe_load(m, permitted_classes: Evinrude::Message.permitted_classes, aliases: true)
|
9
|
+
end
|
10
|
+
|
11
|
+
def self.permitted_classes
|
12
|
+
Evinrude::Message.classes + Evinrude::LogEntry.classes + [Evinrude::NodeInfo, Evinrude::ClusterConfiguration, Symbol]
|
13
|
+
end
|
14
|
+
|
15
|
+
def self.classes
|
16
|
+
Evinrude::Message.constants.map { |c| Evinrude::Message.const_get(c) }.select { |c| Class === c }
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|