big_brother 0.4.1 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/lib/big_brother/app.rb +2 -4
- data/lib/big_brother/cluster.rb +73 -6
- data/lib/big_brother/cluster_collection.rb +34 -0
- data/lib/big_brother/configuration.rb +25 -9
- data/lib/big_brother/ipvs.rb +2 -2
- data/lib/big_brother/nagios.rb +30 -0
- data/lib/big_brother/node.rb +32 -7
- data/lib/big_brother/status_file.rb +1 -1
- data/lib/big_brother/ticker.rb +1 -1
- data/lib/big_brother/version.rb +1 -1
- data/lib/big_brother.rb +6 -3
- data/spec/big_brother/app_spec.rb +1 -1
- data/spec/big_brother/cluster_collection_spec.rb +83 -0
- data/spec/big_brother/cluster_spec.rb +150 -1
- data/spec/big_brother/configuration_spec.rb +71 -5
- data/spec/big_brother/node_spec.rb +58 -0
- data/spec/big_brother/status_file_spec.rb +9 -0
- data/spec/big_brother_spec.rb +52 -4
- data/spec/spec_helper.rb +2 -1
- data/spec/support/example_config.yml +10 -3
- data/spec/support/factories/cluster_factory.rb +6 -5
- data/spec/support/factories/node_factory.rb +5 -3
- data/spec/support/stub_executor.rb +4 -0
- metadata +35 -31
data/lib/big_brother/app.rb
CHANGED
@@ -5,15 +5,13 @@ module BigBrother
|
|
5
5
|
set :raise_errors, false
|
6
6
|
|
7
7
|
get "/" do
|
8
|
-
running, stopped = BigBrother.clusters.values.partition(&:monitored?)
|
9
|
-
|
10
8
|
[200, <<-CONTENT]
|
11
9
|
Big Brother: #{BigBrother::VERSION}
|
12
10
|
|
13
11
|
Running:
|
14
|
-
#{running.map { |cluster| "+ #{cluster}\n" }.join}
|
12
|
+
#{BigBrother.clusters.running.map { |cluster| "+ #{cluster}\n" }.join}
|
15
13
|
Stopped:
|
16
|
-
#{stopped.map { |cluster| "- #{cluster}\n" }.join}
|
14
|
+
#{BigBrother.clusters.stopped.map { |cluster| "- #{cluster}\n" }.join}
|
17
15
|
CONTENT
|
18
16
|
end
|
19
17
|
|
data/lib/big_brother/cluster.rb
CHANGED
@@ -1,17 +1,37 @@
|
|
1
1
|
module BigBrother
|
2
2
|
class Cluster
|
3
|
-
attr_reader :fwmark, :scheduler, :check_interval, :nodes, :name
|
3
|
+
attr_reader :fwmark, :scheduler, :check_interval, :nodes, :name, :persistent, :ramp_up_time, :nagios
|
4
4
|
|
5
5
|
def initialize(name, attributes = {})
|
6
6
|
@name = name
|
7
|
-
@fwmark = attributes[
|
8
|
-
@scheduler = attributes[
|
9
|
-
@
|
7
|
+
@fwmark = attributes[:fwmark]
|
8
|
+
@scheduler = attributes[:scheduler]
|
9
|
+
@persistent = attributes.fetch(:persistent, 300)
|
10
|
+
@check_interval = attributes.fetch(:check_interval, 1)
|
10
11
|
@monitored = false
|
11
|
-
@nodes = attributes.fetch(
|
12
|
+
@nodes = attributes.fetch(:nodes, []).map { |node_config| _coerce_node(node_config) }
|
12
13
|
@last_check = Time.new(0)
|
13
14
|
@up_file = BigBrother::StatusFile.new('up', @name)
|
14
15
|
@down_file = BigBrother::StatusFile.new('down', @name)
|
16
|
+
@ramp_up_time = attributes.fetch(:ramp_up_time, 60)
|
17
|
+
@has_downpage = attributes[:has_downpage]
|
18
|
+
@nagios = attributes[:nagios]
|
19
|
+
end
|
20
|
+
|
21
|
+
def _coerce_node(node_config)
|
22
|
+
node_config.is_a?(Node) ? node_config : Node.new(node_config)
|
23
|
+
end
|
24
|
+
|
25
|
+
def downpage_enabled?
|
26
|
+
@downpage_enabled
|
27
|
+
end
|
28
|
+
|
29
|
+
def find_node(address, port)
|
30
|
+
nodes.find{|node| node.address == address && node.port == port}
|
31
|
+
end
|
32
|
+
|
33
|
+
def has_downpage?
|
34
|
+
@has_downpage
|
15
35
|
end
|
16
36
|
|
17
37
|
def monitored?
|
@@ -20,7 +40,7 @@ module BigBrother
|
|
20
40
|
|
21
41
|
def start_monitoring!
|
22
42
|
BigBrother.logger.info "starting monitoring on cluster #{to_s}"
|
23
|
-
BigBrother.ipvs.start_cluster(@fwmark, @scheduler)
|
43
|
+
BigBrother.ipvs.start_cluster(@fwmark, @scheduler, @persistent)
|
24
44
|
@nodes.each do |node|
|
25
45
|
BigBrother.ipvs.start_node(@fwmark, node.address, 100)
|
26
46
|
end
|
@@ -62,12 +82,19 @@ module BigBrother
|
|
62
82
|
def monitor_nodes
|
63
83
|
@last_check = Time.now
|
64
84
|
@nodes.each { |node| node.monitor(self) }
|
85
|
+
|
86
|
+
_check_downpage if has_downpage?
|
87
|
+
_notify_nagios if nagios
|
65
88
|
end
|
66
89
|
|
67
90
|
def to_s
|
68
91
|
"#{@name} (#{@fwmark})"
|
69
92
|
end
|
70
93
|
|
94
|
+
def ==(other)
|
95
|
+
fwmark == other.fwmark
|
96
|
+
end
|
97
|
+
|
71
98
|
def up_file_exists?
|
72
99
|
@up_file.exists?
|
73
100
|
end
|
@@ -76,6 +103,13 @@ module BigBrother
|
|
76
103
|
@down_file.exists?
|
77
104
|
end
|
78
105
|
|
106
|
+
def incorporate_state(another_cluster)
|
107
|
+
nodes.each do |node|
|
108
|
+
node.incorporate_state(another_cluster.find_node(node.address, node.port))
|
109
|
+
end
|
110
|
+
self
|
111
|
+
end
|
112
|
+
|
79
113
|
def _add_nodes(addresses)
|
80
114
|
addresses.each do |address|
|
81
115
|
BigBrother.logger.info "adding #{address} to cluster #{self}"
|
@@ -83,6 +117,39 @@ module BigBrother
|
|
83
117
|
end
|
84
118
|
end
|
85
119
|
|
120
|
+
def _add_maintenance_node
|
121
|
+
BigBrother.logger.info "adding 127.0.0.1 to cluster #{self}"
|
122
|
+
BigBrother.ipvs.start_node(fwmark, '127.0.0.1', 1)
|
123
|
+
end
|
124
|
+
|
125
|
+
def _check_downpage
|
126
|
+
total_health = @nodes.collect{ |n| n.weight || 0 }.reduce(:+)
|
127
|
+
if total_health <= 0
|
128
|
+
_add_maintenance_node unless downpage_enabled?
|
129
|
+
@downpage_enabled = true
|
130
|
+
else
|
131
|
+
_remove_maintenance_node if downpage_enabled?
|
132
|
+
@downpage_enabled = false
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
def _notify_nagios
|
137
|
+
nodes_down = @nodes.count{|n| n.weight == 0}
|
138
|
+
return if @last_node_count == nodes_down
|
139
|
+
if ((nodes_down / @nodes.count.to_f) >= 0.5)
|
140
|
+
BigBrother.nagios.send_critical(nagios[:host], nagios[:check], "50% of nodes are down", nagios[:server])
|
141
|
+
elsif nodes_down > 0
|
142
|
+
BigBrother.nagios.send_warning(nagios[:host], nagios[:check], "a node is down", nagios[:server])
|
143
|
+
else
|
144
|
+
BigBrother.nagios.send_ok(nagios[:host], nagios[:check], "all nodes up", nagios[:server])
|
145
|
+
end
|
146
|
+
@last_node_count = nodes_down
|
147
|
+
end
|
148
|
+
|
149
|
+
def _remove_maintenance_node
|
150
|
+
BigBrother.ipvs.stop_node(fwmark, '127.0.0.1')
|
151
|
+
end
|
152
|
+
|
86
153
|
def _remove_nodes(addresses)
|
87
154
|
addresses.each do |address|
|
88
155
|
BigBrother.logger.info "removing #{address} to cluster #{self}"
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require 'forwardable'
|
2
|
+
|
3
|
+
module BigBrother
|
4
|
+
class ClusterCollection
|
5
|
+
extend Forwardable
|
6
|
+
def_delegators :@clusters, :[], :[]=, :size, :clear
|
7
|
+
|
8
|
+
def initialize
|
9
|
+
@clusters = {}
|
10
|
+
end
|
11
|
+
|
12
|
+
def config(new_clusters)
|
13
|
+
new_clusters.each do |cluster_name, cluster|
|
14
|
+
if @clusters.key?(cluster_name)
|
15
|
+
@clusters[cluster_name] = cluster.incorporate_state(@clusters[cluster_name])
|
16
|
+
else
|
17
|
+
@clusters[cluster_name] = cluster
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
def running
|
23
|
+
@clusters.values.select(&:monitored?)
|
24
|
+
end
|
25
|
+
|
26
|
+
def stopped
|
27
|
+
@clusters.values.reject(&:monitored?)
|
28
|
+
end
|
29
|
+
|
30
|
+
def ready_for_check
|
31
|
+
@clusters.values.select(&:needs_check?)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -1,18 +1,34 @@
|
|
1
1
|
module BigBrother
|
2
2
|
class Configuration
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
3
|
+
GLOBAL_CONFIG_KEY = '_big_brother'
|
4
|
+
|
5
|
+
def self.from_file(config_file)
|
6
|
+
config = YAML.load_file(config_file)
|
7
|
+
defaults = config.delete(GLOBAL_CONFIG_KEY)
|
8
|
+
|
9
|
+
config.inject({}) do |clusters, (cluster_name, cluster_values)|
|
10
|
+
cluster_details = _apply_defaults(defaults, cluster_values)
|
11
|
+
clusters.merge(cluster_name => Cluster.new(cluster_name, _deeply_symbolize_keys(cluster_details)))
|
8
12
|
end
|
13
|
+
end
|
9
14
|
|
10
|
-
|
15
|
+
def self._deeply_symbolize_keys(value)
|
16
|
+
if value.is_a?(Hash)
|
17
|
+
value.inject({}) do |symbolized_hash, (hash_key, hash_value)|
|
18
|
+
symbolized_hash[hash_key.to_sym] = _deeply_symbolize_keys(hash_value)
|
19
|
+
symbolized_hash
|
20
|
+
end
|
21
|
+
elsif value.is_a?(Array)
|
22
|
+
value.map { |item| _deeply_symbolize_keys(item) }
|
23
|
+
else
|
24
|
+
value
|
25
|
+
end
|
11
26
|
end
|
12
27
|
|
13
|
-
def self.
|
14
|
-
|
15
|
-
|
28
|
+
def self._apply_defaults(defaults_hash, settings_hash)
|
29
|
+
return settings_hash unless defaults_hash
|
30
|
+
defaults_hash.merge(settings_hash) do |key, oldval, newval|
|
31
|
+
oldval.is_a?(Hash) && newval.is_a?(Hash) ? _apply_defaults(oldval, newval) : newval
|
16
32
|
end
|
17
33
|
end
|
18
34
|
end
|
data/lib/big_brother/ipvs.rb
CHANGED
@@ -4,8 +4,8 @@ module BigBrother
|
|
4
4
|
@executor = executor
|
5
5
|
end
|
6
6
|
|
7
|
-
def start_cluster(fwmark, scheduler)
|
8
|
-
@executor.invoke("ipvsadm --add-service --fwmark-service #{fwmark} --scheduler #{scheduler}")
|
7
|
+
def start_cluster(fwmark, scheduler, persistent)
|
8
|
+
@executor.invoke("ipvsadm --add-service --fwmark-service #{fwmark} --scheduler #{scheduler} --persistent #{persistent}")
|
9
9
|
end
|
10
10
|
|
11
11
|
def stop_cluster(fwmark)
|
@@ -0,0 +1,30 @@
|
|
1
|
+
module BigBrother
|
2
|
+
class Nagios
|
3
|
+
module Code
|
4
|
+
Ok = 0
|
5
|
+
Warning = 1
|
6
|
+
Critical = 2
|
7
|
+
Unknown = 3
|
8
|
+
end
|
9
|
+
|
10
|
+
def initialize(executor = ShellExecutor.new)
|
11
|
+
@executor = executor
|
12
|
+
end
|
13
|
+
|
14
|
+
def send_critical(host, check, message, server)
|
15
|
+
_send_passive(host, check, Code::Critical, "CRITICAL #{message}", server)
|
16
|
+
end
|
17
|
+
|
18
|
+
def send_ok(host, check, message, server)
|
19
|
+
_send_passive(host, check, Code::Ok, "OK #{message}", server)
|
20
|
+
end
|
21
|
+
|
22
|
+
def send_warning(host, check, message, server)
|
23
|
+
_send_passive(host, check, Code::Warning, "WARNING #{message}", server)
|
24
|
+
end
|
25
|
+
|
26
|
+
def _send_passive(host, check, code, message, server)
|
27
|
+
@executor.invoke("echo '#{host},#{check},#{code},#{message}' | send_nsca -H #{server} -d ,")
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
data/lib/big_brother/node.rb
CHANGED
@@ -2,13 +2,25 @@ require 'net/http'
|
|
2
2
|
|
3
3
|
module BigBrother
|
4
4
|
class Node
|
5
|
-
attr_reader :address, :port, :path
|
5
|
+
attr_reader :address, :port, :path, :start_time, :weight
|
6
6
|
|
7
|
-
def initialize(
|
8
|
-
@address = address
|
9
|
-
@port = port
|
10
|
-
@path = path
|
11
|
-
@weight =
|
7
|
+
def initialize(attributes={})
|
8
|
+
@address = attributes[:address]
|
9
|
+
@port = attributes[:port]
|
10
|
+
@path = attributes[:path]
|
11
|
+
@weight = attributes[:weight]
|
12
|
+
@start_time = attributes.fetch(:start_time, Time.now.to_i)
|
13
|
+
end
|
14
|
+
|
15
|
+
def age
|
16
|
+
Time.now.to_i - @start_time
|
17
|
+
end
|
18
|
+
|
19
|
+
def incorporate_state(another_node)
|
20
|
+
if another_node
|
21
|
+
@weight = another_node.weight
|
22
|
+
@start_time = another_node.start_time
|
23
|
+
end
|
12
24
|
end
|
13
25
|
|
14
26
|
def invalidate_weight!
|
@@ -24,13 +36,26 @@ module BigBrother
|
|
24
36
|
end
|
25
37
|
end
|
26
38
|
|
39
|
+
def ==(other)
|
40
|
+
address == other.address && port == other.port
|
41
|
+
end
|
42
|
+
|
27
43
|
def _determine_weight(cluster)
|
28
44
|
if cluster.up_file_exists?
|
29
45
|
100
|
30
46
|
elsif cluster.down_file_exists?
|
31
47
|
0
|
32
48
|
else
|
33
|
-
BigBrother::HealthFetcher.current_health(@address, @port, @path)
|
49
|
+
_weight_health(BigBrother::HealthFetcher.current_health(@address, @port, @path), cluster.ramp_up_time)
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
def _weight_health(health, ramp_up_time)
|
54
|
+
current_age = age
|
55
|
+
if current_age < ramp_up_time
|
56
|
+
(health * (current_age / ramp_up_time.to_f)).to_i
|
57
|
+
else
|
58
|
+
health
|
34
59
|
end
|
35
60
|
end
|
36
61
|
end
|
data/lib/big_brother/ticker.rb
CHANGED
@@ -19,7 +19,7 @@ module BigBrother
|
|
19
19
|
|
20
20
|
def self.tick
|
21
21
|
@outstanding_ticks += 1
|
22
|
-
BigBrother.clusters.
|
22
|
+
BigBrother.clusters.ready_for_check.each do |cluster|
|
23
23
|
BigBrother.logger.debug("Monitoring cluster #{cluster.name}")
|
24
24
|
cluster.monitor_nodes
|
25
25
|
end
|
data/lib/big_brother/version.rb
CHANGED
data/lib/big_brother.rb
CHANGED
@@ -9,10 +9,12 @@ require 'sinatra/synchrony'
|
|
9
9
|
|
10
10
|
require 'big_brother/app'
|
11
11
|
require 'big_brother/cluster'
|
12
|
+
require 'big_brother/cluster_collection'
|
12
13
|
require 'big_brother/configuration'
|
13
14
|
require 'big_brother/health_fetcher'
|
14
15
|
require 'big_brother/ipvs'
|
15
16
|
require 'big_brother/logger'
|
17
|
+
require 'big_brother/nagios'
|
16
18
|
require 'big_brother/node'
|
17
19
|
require 'big_brother/shell_executor'
|
18
20
|
require 'big_brother/status_file'
|
@@ -25,16 +27,17 @@ require 'thin/callback_rack_handler'
|
|
25
27
|
|
26
28
|
module BigBrother
|
27
29
|
class << self
|
28
|
-
attr_accessor :ipvs, :clusters, :config_dir, :logger
|
30
|
+
attr_accessor :ipvs, :nagios, :clusters, :config_dir, :logger
|
29
31
|
end
|
30
32
|
|
31
33
|
self.ipvs = IPVS.new
|
32
|
-
self.
|
34
|
+
self.nagios = Nagios.new
|
35
|
+
self.clusters = BigBrother::ClusterCollection.new
|
33
36
|
self.logger = BigBrother::Logger.new
|
34
37
|
|
35
38
|
def self.configure(filename)
|
36
39
|
@config_file = filename
|
37
|
-
@clusters
|
40
|
+
@clusters.config(BigBrother::Configuration.from_file(filename))
|
38
41
|
end
|
39
42
|
|
40
43
|
def self.start_ticker!
|
@@ -106,7 +106,7 @@ module BigBrother
|
|
106
106
|
last_response.status.should == 200
|
107
107
|
last_response.body.should == "OK"
|
108
108
|
BigBrother.clusters['test'].should be_monitored
|
109
|
-
@stub_executor.commands.should include("ipvsadm --add-service --fwmark-service 100 --scheduler wrr")
|
109
|
+
@stub_executor.commands.should include("ipvsadm --add-service --fwmark-service 100 --scheduler wrr --persistent 300")
|
110
110
|
@stub_executor.commands.should include("ipvsadm --add-server --fwmark-service 100 --real-server 127.0.0.1 --ipip --weight 100")
|
111
111
|
@stub_executor.commands.should include("ipvsadm --add-server --fwmark-service 100 --real-server 127.0.0.2 --ipip --weight 100")
|
112
112
|
end
|
@@ -0,0 +1,83 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
describe BigBrother::ClusterCollection do
|
4
|
+
describe "config" do
|
5
|
+
it "adds the provided clusters into its collection" do
|
6
|
+
clusters_from_config = {
|
7
|
+
'test1' => Factory.cluster(:name => 'test1', :fwmark => 101),
|
8
|
+
'test2' => Factory.cluster(:name => 'test2', :fwmark => 102),
|
9
|
+
'test3' => Factory.cluster(:name => 'test3', :fwmark => 103)
|
10
|
+
}
|
11
|
+
collection = BigBrother::ClusterCollection.new
|
12
|
+
|
13
|
+
collection.config(clusters_from_config)
|
14
|
+
|
15
|
+
collection['test1'].should == clusters_from_config['test1']
|
16
|
+
collection['test2'].should == clusters_from_config['test2']
|
17
|
+
collection['test3'].should == clusters_from_config['test3']
|
18
|
+
end
|
19
|
+
|
20
|
+
it "incorporates the state of clusters that exist" do
|
21
|
+
collection = BigBrother::ClusterCollection.new
|
22
|
+
|
23
|
+
collection['existing_cluster'] = Factory.cluster(:name => 'existing_cluster')
|
24
|
+
|
25
|
+
cluster_from_config = Factory.cluster(:name => 'existing_cluster')
|
26
|
+
cluster_from_config.should_receive(:incorporate_state).with(collection['existing_cluster'])
|
27
|
+
|
28
|
+
collection.config({'existing_cluster' => cluster_from_config})
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
describe "running" do
|
33
|
+
it "returns the clusters in the collection that are currently running" do
|
34
|
+
clusters_from_config = {
|
35
|
+
'test1' => Factory.cluster(:name => 'test1', :fwmark => 101),
|
36
|
+
'test2' => Factory.cluster(:name => 'test2', :fwmark => 102),
|
37
|
+
'test3' => Factory.cluster(:name => 'test3', :fwmark => 103)
|
38
|
+
}
|
39
|
+
collection = BigBrother::ClusterCollection.new
|
40
|
+
|
41
|
+
collection.config(clusters_from_config)
|
42
|
+
clusters_from_config['test1'].start_monitoring!
|
43
|
+
clusters_from_config['test2'].start_monitoring!
|
44
|
+
|
45
|
+
collection.running.should == [clusters_from_config['test1'], clusters_from_config['test2']]
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
describe "stopped" do
|
50
|
+
it "returns the clusters in the collection that are not running" do
|
51
|
+
clusters_from_config = {
|
52
|
+
'test1' => Factory.cluster(:name => 'test1', :fwmark => 101),
|
53
|
+
'test2' => Factory.cluster(:name => 'test2', :fwmark => 102),
|
54
|
+
'test3' => Factory.cluster(:name => 'test3', :fwmark => 103)
|
55
|
+
}
|
56
|
+
collection = BigBrother::ClusterCollection.new
|
57
|
+
|
58
|
+
collection.config(clusters_from_config)
|
59
|
+
clusters_from_config['test1'].start_monitoring!
|
60
|
+
clusters_from_config['test2'].start_monitoring!
|
61
|
+
|
62
|
+
collection.stopped.should == [clusters_from_config['test3']]
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
describe "ready_for_check" do
|
67
|
+
it "returns the clusters in the collection that need checking" do
|
68
|
+
clusters_from_config = {
|
69
|
+
'test1' => Factory.cluster(:name => 'test1', :fwmark => 101),
|
70
|
+
'test2' => Factory.cluster(:name => 'test2', :fwmark => 102),
|
71
|
+
'test3' => Factory.cluster(:name => 'test3', :fwmark => 103)
|
72
|
+
}
|
73
|
+
collection = BigBrother::ClusterCollection.new
|
74
|
+
|
75
|
+
collection.config(clusters_from_config)
|
76
|
+
clusters_from_config['test1'].stub(:needs_check?).and_return(true)
|
77
|
+
clusters_from_config['test2'].stub(:needs_check?).and_return(true)
|
78
|
+
clusters_from_config['test3'].stub(:needs_check?).and_return(false)
|
79
|
+
|
80
|
+
collection.ready_for_check.should == [clusters_from_config['test1'], clusters_from_config['test2']]
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
@@ -13,7 +13,7 @@ describe BigBrother::Cluster do
|
|
13
13
|
cluster = Factory.cluster(:fwmark => 100, :scheduler => 'wrr')
|
14
14
|
|
15
15
|
cluster.start_monitoring!
|
16
|
-
@stub_executor.commands.should include('ipvsadm --add-service --fwmark-service 100 --scheduler wrr')
|
16
|
+
@stub_executor.commands.should include('ipvsadm --add-service --fwmark-service 100 --scheduler wrr --persistent 300')
|
17
17
|
end
|
18
18
|
end
|
19
19
|
|
@@ -77,6 +77,124 @@ describe BigBrother::Cluster do
|
|
77
77
|
|
78
78
|
cluster.monitor_nodes
|
79
79
|
end
|
80
|
+
|
81
|
+
it "enables a downpage if none of the nodes have health > 0" do
|
82
|
+
node1 = Factory.node
|
83
|
+
node2 = Factory.node
|
84
|
+
cluster = Factory.cluster(:has_downpage => true, :nodes => [node1, node2])
|
85
|
+
|
86
|
+
BigBrother::HealthFetcher.stub(:current_health).and_return(0)
|
87
|
+
|
88
|
+
cluster.start_monitoring!
|
89
|
+
cluster.monitor_nodes
|
90
|
+
cluster.downpage_enabled?.should be_true
|
91
|
+
end
|
92
|
+
|
93
|
+
it "does not enable a downpage if the cluster does not have a downpage enabled" do
|
94
|
+
node1 = Factory.node
|
95
|
+
node2 = Factory.node
|
96
|
+
cluster = Factory.cluster(:has_downpage => false, :nodes => [node1, node2])
|
97
|
+
|
98
|
+
BigBrother::HealthFetcher.stub(:current_health).and_return(0)
|
99
|
+
|
100
|
+
cluster.start_monitoring!
|
101
|
+
cluster.monitor_nodes
|
102
|
+
cluster.downpage_enabled?.should be_false
|
103
|
+
end
|
104
|
+
|
105
|
+
it "adds a downpage node to IPVS when down" do
|
106
|
+
node1 = Factory.node
|
107
|
+
node2 = Factory.node
|
108
|
+
cluster = Factory.cluster(:has_downpage => true, :nodes => [node1, node2], :fwmark => 1)
|
109
|
+
|
110
|
+
BigBrother::HealthFetcher.stub(:current_health).and_return(0)
|
111
|
+
|
112
|
+
cluster.start_monitoring!
|
113
|
+
cluster.monitor_nodes
|
114
|
+
|
115
|
+
@stub_executor.commands.last.should == "ipvsadm --add-server --fwmark-service 1 --real-server 127.0.0.1 --ipip --weight 1"
|
116
|
+
end
|
117
|
+
|
118
|
+
it "removes downpage node from IPVS if it exists and cluster is up" do
|
119
|
+
node1 = Factory.node
|
120
|
+
node2 = Factory.node
|
121
|
+
cluster = Factory.cluster(:has_downpage => true, :nodes => [node1, node2], :fwmark => 1)
|
122
|
+
|
123
|
+
BigBrother::HealthFetcher.stub(:current_health).and_return(0)
|
124
|
+
|
125
|
+
cluster.start_monitoring!
|
126
|
+
cluster.monitor_nodes
|
127
|
+
|
128
|
+
BigBrother::HealthFetcher.stub(:current_health).and_return(10)
|
129
|
+
cluster.monitor_nodes
|
130
|
+
|
131
|
+
@stub_executor.commands.last.should == "ipvsadm --delete-server --fwmark-service 1 --real-server 127.0.0.1"
|
132
|
+
end
|
133
|
+
|
134
|
+
context "nagios" do
|
135
|
+
it "sends critical if at least half of all nodes are down" do
|
136
|
+
node1 = Factory.node(:address => '192.168.0.1')
|
137
|
+
node2 = Factory.node(:address => '192.168.0.2')
|
138
|
+
cluster = Factory.cluster(:nodes => [node1, node2], :nagios => {:host => "prod.load", :check => "test1_check", :server => "server.foo"})
|
139
|
+
|
140
|
+
node1.stub(:_determine_weight).and_return(0)
|
141
|
+
node2.stub(:_determine_weight).and_return(10)
|
142
|
+
|
143
|
+
cluster.start_monitoring!
|
144
|
+
cluster.monitor_nodes
|
145
|
+
@stub_executor.commands.should include("echo 'prod.load,test1_check,2,CRITICAL 50% of nodes are down' | send_nsca -H server.foo -d ,")
|
146
|
+
end
|
147
|
+
|
148
|
+
it "does not resend a Nagios check if the state does not change" do
|
149
|
+
node1 = Factory.node(:address => '192.168.0.1')
|
150
|
+
node2 = Factory.node(:address => '192.168.0.2')
|
151
|
+
cluster = Factory.cluster(:nodes => [node1, node2], :nagios => {:host => "prod.load", :check => "test1_check", :server => "server.foo"})
|
152
|
+
|
153
|
+
node1.stub(:_determine_weight).and_return(0)
|
154
|
+
node2.stub(:_determine_weight).and_return(10)
|
155
|
+
|
156
|
+
cluster.start_monitoring!
|
157
|
+
cluster.monitor_nodes
|
158
|
+
@stub_executor.clear_commands!
|
159
|
+
|
160
|
+
cluster.monitor_nodes
|
161
|
+
@stub_executor.commands.should_not include("echo 'prod.load,test1_check,2,CRITICAL 50% of nodes are down' | send_nsca -H server.foo -d ,")
|
162
|
+
end
|
163
|
+
|
164
|
+
it "sends info if at least half of one node is down" do
|
165
|
+
node1 = Factory.node(:address => '192.168.0.1')
|
166
|
+
node2 = Factory.node(:address => '192.168.0.2')
|
167
|
+
node3 = Factory.node(:address => '192.168.0.3')
|
168
|
+
cluster = Factory.cluster(:nodes => [node1, node2, node3], :nagios => {:host => "prod.load", :check => "test1_check", :server => "server.foo"})
|
169
|
+
|
170
|
+
node1.stub(:_determine_weight).and_return(0)
|
171
|
+
node2.stub(:_determine_weight).and_return(10)
|
172
|
+
node3.stub(:_determine_weight).and_return(10)
|
173
|
+
|
174
|
+
cluster.start_monitoring!
|
175
|
+
cluster.monitor_nodes
|
176
|
+
@stub_executor.commands.should include("echo 'prod.load,test1_check,1,WARNING a node is down' | send_nsca -H server.foo -d ,")
|
177
|
+
end
|
178
|
+
|
179
|
+
it "sends ok if all nodes back up" do
|
180
|
+
node1 = Factory.node(:address => '192.168.0.1')
|
181
|
+
node2 = Factory.node(:address => '192.168.0.2')
|
182
|
+
cluster = Factory.cluster(:nodes => [node1, node2], :nagios => {:host => "prod.load", :check => "test1_check", :server => "server.foo"})
|
183
|
+
node1.stub(:_determine_weight).and_return(0)
|
184
|
+
node2.stub(:_determine_weight).and_return(10)
|
185
|
+
|
186
|
+
cluster.start_monitoring!
|
187
|
+
cluster.monitor_nodes
|
188
|
+
@stub_executor.commands.should include("echo 'prod.load,test1_check,2,CRITICAL 50% of nodes are down' | send_nsca -H server.foo -d ,")
|
189
|
+
@stub_executor.clear_commands!
|
190
|
+
|
191
|
+
node1.stub(:_determine_weight).and_return(10)
|
192
|
+
node2.stub(:_determine_weight).and_return(10)
|
193
|
+
cluster.monitor_nodes
|
194
|
+
@stub_executor.commands.should include("echo 'prod.load,test1_check,0,OK all nodes up' | send_nsca -H server.foo -d ,")
|
195
|
+
end
|
196
|
+
end
|
197
|
+
|
80
198
|
end
|
81
199
|
|
82
200
|
describe "#resume_monitoring!" do
|
@@ -143,6 +261,18 @@ describe BigBrother::Cluster do
|
|
143
261
|
end
|
144
262
|
end
|
145
263
|
|
264
|
+
describe "#==" do
|
265
|
+
it "is true if two clusters have the same fwmark" do
|
266
|
+
cluster1 = Factory.cluster(:fwmark => '100')
|
267
|
+
cluster2 = Factory.cluster(:fwmark => '200')
|
268
|
+
|
269
|
+
cluster1.should_not == cluster2
|
270
|
+
|
271
|
+
cluster2 = Factory.cluster(:fwmark => '100')
|
272
|
+
cluster1.should == cluster2
|
273
|
+
end
|
274
|
+
end
|
275
|
+
|
146
276
|
describe "#up_file_exists?" do
|
147
277
|
it "returns true when an up file exists" do
|
148
278
|
cluster = Factory.cluster(:name => 'name')
|
@@ -164,4 +294,23 @@ describe BigBrother::Cluster do
|
|
164
294
|
cluster.down_file_exists?.should be_true
|
165
295
|
end
|
166
296
|
end
|
297
|
+
|
298
|
+
describe "incorporate_state" do
|
299
|
+
it "finds any equivalent nodes from the provided cluster, incorporates their state, and returns self" do
|
300
|
+
original_node1 = Factory.node(:address => '127.0.0.1')
|
301
|
+
original_node2 = Factory.node(:address => '127.0.1.1')
|
302
|
+
original_cluster = Factory.cluster(:nodes => [original_node1, original_node2])
|
303
|
+
|
304
|
+
config_node1 = Factory.node(:address => '127.0.0.1')
|
305
|
+
config_node2 = Factory.node(:address => '127.0.1.1')
|
306
|
+
config_cluster = Factory.cluster(:nodes => [config_node1, config_node2])
|
307
|
+
|
308
|
+
config_node1.should_receive(:incorporate_state).with(original_node1)
|
309
|
+
config_node2.should_receive(:incorporate_state).with(original_node2)
|
310
|
+
|
311
|
+
retval = config_cluster.incorporate_state(original_cluster)
|
312
|
+
|
313
|
+
retval.should == config_cluster
|
314
|
+
end
|
315
|
+
end
|
167
316
|
end
|
@@ -1,17 +1,24 @@
|
|
1
1
|
require 'spec_helper'
|
2
2
|
|
3
3
|
describe BigBrother::Configuration do
|
4
|
-
describe '.
|
5
|
-
it '
|
6
|
-
clusters = BigBrother::Configuration.
|
4
|
+
describe 'self.from_file' do
|
5
|
+
it 'maintain a collection of clusters' do
|
6
|
+
clusters = BigBrother::Configuration.from_file(TEST_CONFIG)
|
7
7
|
|
8
8
|
clusters['test1'].check_interval.should == 1
|
9
9
|
clusters['test1'].scheduler.should == 'wrr'
|
10
10
|
clusters['test1'].fwmark.should == 1
|
11
|
+
clusters['test1'].persistent.should == 20
|
12
|
+
clusters['test1'].ramp_up_time.should == 120
|
13
|
+
clusters['test1'].has_downpage?.should == true
|
14
|
+
clusters['test1'].nagios[:check].should == 'test1_status'
|
15
|
+
clusters['test1'].nagios[:host].should == 'prod-load'
|
16
|
+
clusters['test1'].nagios[:server].should == 'nsca.host'
|
11
17
|
|
12
|
-
clusters['test2'].check_interval.should ==
|
18
|
+
clusters['test2'].check_interval.should == 2
|
13
19
|
clusters['test2'].scheduler.should == 'wrr'
|
14
20
|
clusters['test2'].fwmark.should == 2
|
21
|
+
clusters['test2'].ramp_up_time.should == 60
|
15
22
|
|
16
23
|
clusters['test3'].check_interval.should == 1
|
17
24
|
clusters['test3'].scheduler.should == 'wrr'
|
@@ -19,7 +26,7 @@ describe BigBrother::Configuration do
|
|
19
26
|
end
|
20
27
|
|
21
28
|
it 'populates a clusters nodes' do
|
22
|
-
clusters = BigBrother::Configuration.
|
29
|
+
clusters = BigBrother::Configuration.from_file(TEST_CONFIG)
|
23
30
|
|
24
31
|
clusters['test1'].nodes.length.should == 2
|
25
32
|
|
@@ -31,5 +38,64 @@ describe BigBrother::Configuration do
|
|
31
38
|
clusters['test1'].nodes[1].port == '9002'
|
32
39
|
clusters['test1'].nodes[1].path == '/test/valid'
|
33
40
|
end
|
41
|
+
|
42
|
+
it 'allows a default cluster configuration under the global config key' do
|
43
|
+
config_file = Tempfile.new('config.yml')
|
44
|
+
File.open(config_file, 'w') do |f|
|
45
|
+
f.puts(<<-EOF.gsub(/^ {10}/,''))
|
46
|
+
---
|
47
|
+
_big_brother:
|
48
|
+
check_interval: 2
|
49
|
+
scheduler: wrr
|
50
|
+
nagios:
|
51
|
+
server: 127.0.0.2
|
52
|
+
host: ha-services
|
53
|
+
test_without_overrides:
|
54
|
+
fwmark: 2
|
55
|
+
nagios:
|
56
|
+
check: test_check
|
57
|
+
nodes:
|
58
|
+
- address: 127.0.0.1
|
59
|
+
port: 9001
|
60
|
+
path: /test/invalid
|
61
|
+
test_with_overrides:
|
62
|
+
fwmark: 3
|
63
|
+
scheduler: wlc
|
64
|
+
nagios:
|
65
|
+
host: override-host
|
66
|
+
check: test_overrides_check
|
67
|
+
nodes:
|
68
|
+
- address: 127.0.0.1
|
69
|
+
port: 9001
|
70
|
+
path: /test/invalid
|
71
|
+
EOF
|
72
|
+
end
|
73
|
+
|
74
|
+
clusters = BigBrother::Configuration.from_file(config_file)
|
75
|
+
|
76
|
+
clusters['test_without_overrides'].check_interval.should == 2
|
77
|
+
clusters['test_without_overrides'].scheduler.should == 'wrr'
|
78
|
+
clusters['test_without_overrides'].fwmark.should == 2
|
79
|
+
clusters['test_without_overrides'].nagios[:server].should == '127.0.0.2'
|
80
|
+
clusters['test_without_overrides'].nagios[:host].should == 'ha-services'
|
81
|
+
clusters['test_without_overrides'].nagios[:check].should == 'test_check'
|
82
|
+
|
83
|
+
|
84
|
+
clusters['test_with_overrides'].check_interval.should == 2
|
85
|
+
clusters['test_with_overrides'].scheduler.should == 'wlc'
|
86
|
+
clusters['test_with_overrides'].fwmark.should == 3
|
87
|
+
clusters['test_with_overrides'].nagios[:server].should == '127.0.0.2'
|
88
|
+
clusters['test_with_overrides'].nagios[:host].should == 'override-host'
|
89
|
+
clusters['test_with_overrides'].nagios[:check].should == 'test_overrides_check'
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
describe '_apply_defaults' do
|
94
|
+
it 'returns a new hash with the defaults hash and the settings hash merged recursively' do
|
95
|
+
defaults = {:foo => {:bar => 1}}
|
96
|
+
settings = {:foo => {:baz => 2}}
|
97
|
+
h = BigBrother::Configuration._apply_defaults(defaults, settings)
|
98
|
+
h.should == {:foo => {:bar => 1, :baz => 2}}
|
99
|
+
end
|
34
100
|
end
|
35
101
|
end
|
@@ -15,6 +15,27 @@ describe BigBrother::Node do
|
|
15
15
|
@stub_executor.commands.should include("ipvsadm --edit-server --fwmark-service 100 --real-server 127.0.0.1 --ipip --weight 56")
|
16
16
|
end
|
17
17
|
|
18
|
+
it "a node's health should increase linearly over the specified ramp up time" do
|
19
|
+
BigBrother::HealthFetcher.stub(:current_health).and_return(100)
|
20
|
+
Time.stub(:now).and_return(1345043600)
|
21
|
+
|
22
|
+
node = Factory.node(:address => '127.0.0.1')
|
23
|
+
cluster = Factory.cluster(:ramp_up_time => 60, :fwmark => 100, :nodes => [node])
|
24
|
+
cluster.start_monitoring!
|
25
|
+
|
26
|
+
Time.stub(:now).and_return(1345043630)
|
27
|
+
node.monitor(cluster)
|
28
|
+
@stub_executor.commands.last.should == "ipvsadm --edit-server --fwmark-service 100 --real-server 127.0.0.1 --ipip --weight 50"
|
29
|
+
|
30
|
+
Time.stub(:now).and_return(1345043645)
|
31
|
+
node.monitor(cluster)
|
32
|
+
@stub_executor.commands.last.should == "ipvsadm --edit-server --fwmark-service 100 --real-server 127.0.0.1 --ipip --weight 75"
|
33
|
+
|
34
|
+
Time.stub(:now).and_return(1345043720)
|
35
|
+
node.monitor(cluster)
|
36
|
+
@stub_executor.commands.last.should == "ipvsadm --edit-server --fwmark-service 100 --real-server 127.0.0.1 --ipip --weight 100"
|
37
|
+
end
|
38
|
+
|
18
39
|
it "sets the weight to 100 for each node if an up file exists" do
|
19
40
|
BigBrother::HealthFetcher.stub(:current_health).and_return(56)
|
20
41
|
node = Factory.node(:address => '127.0.0.1')
|
@@ -86,4 +107,41 @@ describe BigBrother::Node do
|
|
86
107
|
@stub_executor.commands.should == []
|
87
108
|
end
|
88
109
|
end
|
110
|
+
|
111
|
+
describe "#==" do
|
112
|
+
it "is true when two nodes have the same address and port" do
|
113
|
+
node1 = Factory.node(:address => "127.0.0.1", :port => "8000")
|
114
|
+
node2 = Factory.node(:address => "127.0.0.1", :port => "8001")
|
115
|
+
node1.should_not == node2
|
116
|
+
|
117
|
+
node2 = Factory.node(:address => "127.0.0.2", :port => "8000")
|
118
|
+
node1.should_not == node2
|
119
|
+
|
120
|
+
node2 = Factory.node(:address => "127.0.0.1", :port => "8000")
|
121
|
+
node1.should == node2
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
125
|
+
describe "age" do
|
126
|
+
it "is the time in seconds since the node started" do
|
127
|
+
Time.stub(:now).and_return(1345043612)
|
128
|
+
node = Factory.node(:address => "127.0.0.1")
|
129
|
+
|
130
|
+
node.age.should == 0
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
describe "incorporate_state" do
|
135
|
+
it "takes the weight and the start time from the other node, but leaves rest of config" do
|
136
|
+
original_start_time = Time.now
|
137
|
+
node_with_state = Factory.node(:path => '/old/path', :start_time => original_start_time, :weight => 65)
|
138
|
+
node_from_config = Factory.node(:path => '/new/path', :start_time => Time.now, :weight => 100)
|
139
|
+
|
140
|
+
node_from_config.incorporate_state(node_with_state)
|
141
|
+
|
142
|
+
node_from_config.path.should == '/new/path'
|
143
|
+
node_from_config.start_time.should == original_start_time
|
144
|
+
node_from_config.weight.should == 65
|
145
|
+
end
|
146
|
+
end
|
89
147
|
end
|
@@ -1,6 +1,15 @@
|
|
1
1
|
require 'spec_helper'
|
2
2
|
|
3
3
|
describe BigBrother::StatusFile do
|
4
|
+
describe "initialize" do
|
5
|
+
it "accepts symbols as arguments, since that's how they will come from configuration" do
|
6
|
+
status_file = BigBrother::StatusFile.new(:foo)
|
7
|
+
status_file.create("for testing")
|
8
|
+
|
9
|
+
status_file.exists?.should == true
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
4
13
|
describe "create" do
|
5
14
|
it "creates a nested file" do
|
6
15
|
status_file = BigBrother::StatusFile.new("foo", "bar")
|
data/spec/big_brother_spec.rb
CHANGED
@@ -21,13 +21,14 @@ HTTP
|
|
21
21
|
spec.run
|
22
22
|
server.stop
|
23
23
|
end
|
24
|
+
|
24
25
|
it "reconfigures the clusters" do
|
25
26
|
config_file = Tempfile.new('config.yml')
|
26
27
|
File.open(config_file, 'w') do |f|
|
27
28
|
f.puts(<<-EOF)
|
28
29
|
---
|
29
30
|
test1:
|
30
|
-
|
31
|
+
check_interval: 1
|
31
32
|
scheduler: wrr
|
32
33
|
fwmark: 1
|
33
34
|
nodes:
|
@@ -44,7 +45,7 @@ EOF
|
|
44
45
|
f.puts(<<-EOF)
|
45
46
|
---
|
46
47
|
test1:
|
47
|
-
|
48
|
+
check_interval: 1
|
48
49
|
scheduler: wrr
|
49
50
|
fwmark: 1
|
50
51
|
nodes:
|
@@ -57,15 +58,62 @@ EOF
|
|
57
58
|
BigBrother.clusters['test1'].nodes.first.path.should == "/test/another/path"
|
58
59
|
end
|
59
60
|
|
61
|
+
it "maintains the start_time and weight of existing nodes after reconfiguring" do
|
62
|
+
Time.stub(:now).and_return(Time.at(1345043600))
|
63
|
+
config_file = Tempfile.new('config.yml')
|
64
|
+
File.open(config_file, 'w') do |f|
|
65
|
+
f.puts(<<-EOF)
|
66
|
+
---
|
67
|
+
test1:
|
68
|
+
check_interval: 1
|
69
|
+
scheduler: wrr
|
70
|
+
fwmark: 1
|
71
|
+
nodes:
|
72
|
+
- address: 127.0.0.1
|
73
|
+
port: 9001
|
74
|
+
path: /test/valid
|
75
|
+
EOF
|
76
|
+
end
|
77
|
+
BigBrother.configure(config_file)
|
78
|
+
BigBrother.start_ticker!
|
79
|
+
|
80
|
+
Time.stub(:now).and_return(Time.at(1345043700))
|
81
|
+
start_time = BigBrother.clusters['test1'].nodes[0].start_time
|
82
|
+
weight = BigBrother.clusters['test1'].nodes[0].weight
|
83
|
+
|
84
|
+
File.open(config_file, 'w') do |f|
|
85
|
+
f.puts(<<-EOF)
|
86
|
+
---
|
87
|
+
test1:
|
88
|
+
check_interval: 1
|
89
|
+
scheduler: wrr
|
90
|
+
fwmark: 1
|
91
|
+
nodes:
|
92
|
+
- address: 127.0.0.1
|
93
|
+
port: 9001
|
94
|
+
path: /test/valid
|
95
|
+
- address: 127.0.0.2
|
96
|
+
port: 9001
|
97
|
+
path: /test/valid
|
98
|
+
EOF
|
99
|
+
end
|
100
|
+
BigBrother.reconfigure
|
101
|
+
BigBrother.clusters['test1'].nodes[0].start_time.should == start_time
|
102
|
+
BigBrother.clusters['test1'].nodes[0].weight.should == weight
|
103
|
+
BigBrother.clusters['test1'].nodes[1].start_time.should == 1345043700
|
104
|
+
BigBrother.clusters['test1'].nodes[1].weight.should be_nil
|
105
|
+
end
|
106
|
+
|
60
107
|
it "stops the ticker and reconfigures after it has finished all its ticks" do
|
61
108
|
config_file = Tempfile.new('config.yml')
|
62
109
|
File.open(config_file, 'w') do |f|
|
63
110
|
f.puts(<<-EOF)
|
64
111
|
---
|
65
112
|
test1:
|
66
|
-
|
113
|
+
check_interval: 1
|
67
114
|
scheduler: wrr
|
68
115
|
fwmark: 1
|
116
|
+
ramp_up_time: 0
|
69
117
|
nodes:
|
70
118
|
- address: 127.0.0.1
|
71
119
|
port: 9001
|
@@ -84,7 +132,7 @@ EOF
|
|
84
132
|
f.puts(<<-EOF)
|
85
133
|
---
|
86
134
|
test1:
|
87
|
-
|
135
|
+
check_interval: 1
|
88
136
|
scheduler: wrr
|
89
137
|
fwmark: 1
|
90
138
|
nodes:
|
data/spec/spec_helper.rb
CHANGED
@@ -15,12 +15,13 @@ RSpec.configure do |config|
|
|
15
15
|
ipvs = BigBrother.ipvs
|
16
16
|
@stub_executor = StubExecutor.new
|
17
17
|
BigBrother.ipvs = BigBrother::IPVS.new(@stub_executor)
|
18
|
+
BigBrother.nagios = BigBrother::Nagios.new(@stub_executor)
|
18
19
|
spec.run
|
19
20
|
BigBrother.ipvs = ipvs
|
20
21
|
end
|
21
22
|
|
22
23
|
config.before(:each) do
|
23
|
-
BigBrother.clusters.
|
24
|
+
BigBrother.clusters.clear
|
24
25
|
FileUtils.rm_rf(BigBrother.config_dir)
|
25
26
|
BigBrother.logger = NullLogger.new
|
26
27
|
end
|
@@ -1,8 +1,15 @@
|
|
1
1
|
---
|
2
2
|
test1:
|
3
|
-
|
3
|
+
check_interval: 1
|
4
4
|
scheduler: wrr
|
5
5
|
fwmark: 1
|
6
|
+
persistent: 20
|
7
|
+
ramp_up_time: 120
|
8
|
+
has_downpage: true
|
9
|
+
nagios:
|
10
|
+
server: nsca.host
|
11
|
+
check: test1_status
|
12
|
+
host: prod-load
|
6
13
|
nodes:
|
7
14
|
- address: 127.0.0.1
|
8
15
|
port: 9001
|
@@ -11,7 +18,7 @@ test1:
|
|
11
18
|
port: 9002
|
12
19
|
path: /test/valid
|
13
20
|
test2:
|
14
|
-
|
21
|
+
check_interval: 2
|
15
22
|
scheduler: wrr
|
16
23
|
fwmark: 2
|
17
24
|
nodes:
|
@@ -22,7 +29,7 @@ test2:
|
|
22
29
|
port: 9002
|
23
30
|
path: /test/invalid
|
24
31
|
test3:
|
25
|
-
|
32
|
+
check_interval: 1
|
26
33
|
scheduler: wrr
|
27
34
|
fwmark: 3
|
28
35
|
nodes:
|
@@ -3,11 +3,12 @@ class Factory
|
|
3
3
|
BigBrother::Cluster.new(
|
4
4
|
overrides.fetch(:name, 'test'),
|
5
5
|
{
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
6
|
+
:fwmark => 100,
|
7
|
+
:scheduler => 'wrr',
|
8
|
+
:check_interval => 1,
|
9
|
+
:nodes => [Factory.node],
|
10
|
+
:ramp_up_time => 0
|
11
|
+
}.merge(overrides)
|
11
12
|
)
|
12
13
|
end
|
13
14
|
end
|
@@ -1,9 +1,11 @@
|
|
1
1
|
class Factory
|
2
2
|
def self.node(overrides = {})
|
3
3
|
BigBrother::Node.new(
|
4
|
-
|
5
|
-
|
6
|
-
|
4
|
+
{
|
5
|
+
:address => 'localhost',
|
6
|
+
:port => 8081,
|
7
|
+
:path => '/test/status'
|
8
|
+
}.merge(overrides)
|
7
9
|
)
|
8
10
|
end
|
9
11
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: big_brother
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.5.0
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -9,11 +9,11 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2012-
|
12
|
+
date: 2012-08-24 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: thin
|
16
|
-
requirement: &
|
16
|
+
requirement: &70176230223860 !ruby/object:Gem::Requirement
|
17
17
|
none: false
|
18
18
|
requirements:
|
19
19
|
- - ~>
|
@@ -21,10 +21,10 @@ dependencies:
|
|
21
21
|
version: 1.3.1
|
22
22
|
type: :runtime
|
23
23
|
prerelease: false
|
24
|
-
version_requirements: *
|
24
|
+
version_requirements: *70176230223860
|
25
25
|
- !ruby/object:Gem::Dependency
|
26
26
|
name: async-rack
|
27
|
-
requirement: &
|
27
|
+
requirement: &70176230223060 !ruby/object:Gem::Requirement
|
28
28
|
none: false
|
29
29
|
requirements:
|
30
30
|
- - ~>
|
@@ -32,10 +32,10 @@ dependencies:
|
|
32
32
|
version: 0.5.1
|
33
33
|
type: :runtime
|
34
34
|
prerelease: false
|
35
|
-
version_requirements: *
|
35
|
+
version_requirements: *70176230223060
|
36
36
|
- !ruby/object:Gem::Dependency
|
37
37
|
name: sinatra
|
38
|
-
requirement: &
|
38
|
+
requirement: &70176230222360 !ruby/object:Gem::Requirement
|
39
39
|
none: false
|
40
40
|
requirements:
|
41
41
|
- - ~>
|
@@ -43,10 +43,10 @@ dependencies:
|
|
43
43
|
version: '1.0'
|
44
44
|
type: :runtime
|
45
45
|
prerelease: false
|
46
|
-
version_requirements: *
|
46
|
+
version_requirements: *70176230222360
|
47
47
|
- !ruby/object:Gem::Dependency
|
48
48
|
name: rack-fiber_pool
|
49
|
-
requirement: &
|
49
|
+
requirement: &70176230221420 !ruby/object:Gem::Requirement
|
50
50
|
none: false
|
51
51
|
requirements:
|
52
52
|
- - ~>
|
@@ -54,10 +54,10 @@ dependencies:
|
|
54
54
|
version: '0.9'
|
55
55
|
type: :runtime
|
56
56
|
prerelease: false
|
57
|
-
version_requirements: *
|
57
|
+
version_requirements: *70176230221420
|
58
58
|
- !ruby/object:Gem::Dependency
|
59
59
|
name: eventmachine
|
60
|
-
requirement: &
|
60
|
+
requirement: &70176230220360 !ruby/object:Gem::Requirement
|
61
61
|
none: false
|
62
62
|
requirements:
|
63
63
|
- - ! '>'
|
@@ -68,10 +68,10 @@ dependencies:
|
|
68
68
|
version: 1.0.0.beta.100
|
69
69
|
type: :runtime
|
70
70
|
prerelease: false
|
71
|
-
version_requirements: *
|
71
|
+
version_requirements: *70176230220360
|
72
72
|
- !ruby/object:Gem::Dependency
|
73
73
|
name: em-http-request
|
74
|
-
requirement: &
|
74
|
+
requirement: &70176230219060 !ruby/object:Gem::Requirement
|
75
75
|
none: false
|
76
76
|
requirements:
|
77
77
|
- - ~>
|
@@ -79,10 +79,10 @@ dependencies:
|
|
79
79
|
version: '1.0'
|
80
80
|
type: :runtime
|
81
81
|
prerelease: false
|
82
|
-
version_requirements: *
|
82
|
+
version_requirements: *70176230219060
|
83
83
|
- !ruby/object:Gem::Dependency
|
84
84
|
name: em-synchrony
|
85
|
-
requirement: &
|
85
|
+
requirement: &70176230303940 !ruby/object:Gem::Requirement
|
86
86
|
none: false
|
87
87
|
requirements:
|
88
88
|
- - ~>
|
@@ -90,10 +90,10 @@ dependencies:
|
|
90
90
|
version: '1.0'
|
91
91
|
type: :runtime
|
92
92
|
prerelease: false
|
93
|
-
version_requirements: *
|
93
|
+
version_requirements: *70176230303940
|
94
94
|
- !ruby/object:Gem::Dependency
|
95
95
|
name: em-resolv-replace
|
96
|
-
requirement: &
|
96
|
+
requirement: &70176230302240 !ruby/object:Gem::Requirement
|
97
97
|
none: false
|
98
98
|
requirements:
|
99
99
|
- - ~>
|
@@ -101,10 +101,10 @@ dependencies:
|
|
101
101
|
version: '1.1'
|
102
102
|
type: :runtime
|
103
103
|
prerelease: false
|
104
|
-
version_requirements: *
|
104
|
+
version_requirements: *70176230302240
|
105
105
|
- !ruby/object:Gem::Dependency
|
106
106
|
name: em-syslog
|
107
|
-
requirement: &
|
107
|
+
requirement: &70176230301680 !ruby/object:Gem::Requirement
|
108
108
|
none: false
|
109
109
|
requirements:
|
110
110
|
- - ~>
|
@@ -112,10 +112,10 @@ dependencies:
|
|
112
112
|
version: 0.0.2
|
113
113
|
type: :runtime
|
114
114
|
prerelease: false
|
115
|
-
version_requirements: *
|
115
|
+
version_requirements: *70176230301680
|
116
116
|
- !ruby/object:Gem::Dependency
|
117
117
|
name: rspec
|
118
|
-
requirement: &
|
118
|
+
requirement: &70176230300780 !ruby/object:Gem::Requirement
|
119
119
|
none: false
|
120
120
|
requirements:
|
121
121
|
- - ~>
|
@@ -123,10 +123,10 @@ dependencies:
|
|
123
123
|
version: 2.9.0
|
124
124
|
type: :development
|
125
125
|
prerelease: false
|
126
|
-
version_requirements: *
|
126
|
+
version_requirements: *70176230300780
|
127
127
|
- !ruby/object:Gem::Dependency
|
128
128
|
name: rack-test
|
129
|
-
requirement: &
|
129
|
+
requirement: &70176230300280 !ruby/object:Gem::Requirement
|
130
130
|
none: false
|
131
131
|
requirements:
|
132
132
|
- - ~>
|
@@ -134,10 +134,10 @@ dependencies:
|
|
134
134
|
version: 0.6.1
|
135
135
|
type: :development
|
136
136
|
prerelease: false
|
137
|
-
version_requirements: *
|
137
|
+
version_requirements: *70176230300280
|
138
138
|
- !ruby/object:Gem::Dependency
|
139
139
|
name: rake
|
140
|
-
requirement: &
|
140
|
+
requirement: &70176230299880 !ruby/object:Gem::Requirement
|
141
141
|
none: false
|
142
142
|
requirements:
|
143
143
|
- - ! '>='
|
@@ -145,10 +145,10 @@ dependencies:
|
|
145
145
|
version: '0'
|
146
146
|
type: :development
|
147
147
|
prerelease: false
|
148
|
-
version_requirements: *
|
148
|
+
version_requirements: *70176230299880
|
149
149
|
- !ruby/object:Gem::Dependency
|
150
150
|
name: rake_commit
|
151
|
-
requirement: &
|
151
|
+
requirement: &70176230299300 !ruby/object:Gem::Requirement
|
152
152
|
none: false
|
153
153
|
requirements:
|
154
154
|
- - ~>
|
@@ -156,10 +156,10 @@ dependencies:
|
|
156
156
|
version: '0.13'
|
157
157
|
type: :development
|
158
158
|
prerelease: false
|
159
|
-
version_requirements: *
|
159
|
+
version_requirements: *70176230299300
|
160
160
|
- !ruby/object:Gem::Dependency
|
161
161
|
name: vagrant
|
162
|
-
requirement: &
|
162
|
+
requirement: &70176230298860 !ruby/object:Gem::Requirement
|
163
163
|
none: false
|
164
164
|
requirements:
|
165
165
|
- - ! '>='
|
@@ -167,7 +167,7 @@ dependencies:
|
|
167
167
|
version: '0'
|
168
168
|
type: :development
|
169
169
|
prerelease: false
|
170
|
-
version_requirements: *
|
170
|
+
version_requirements: *70176230298860
|
171
171
|
description: IPVS backend supervisor
|
172
172
|
email:
|
173
173
|
- code@getbraintree.com
|
@@ -191,10 +191,12 @@ files:
|
|
191
191
|
- lib/big_brother/app.rb
|
192
192
|
- lib/big_brother/cli.rb
|
193
193
|
- lib/big_brother/cluster.rb
|
194
|
+
- lib/big_brother/cluster_collection.rb
|
194
195
|
- lib/big_brother/configuration.rb
|
195
196
|
- lib/big_brother/health_fetcher.rb
|
196
197
|
- lib/big_brother/ipvs.rb
|
197
198
|
- lib/big_brother/logger.rb
|
199
|
+
- lib/big_brother/nagios.rb
|
198
200
|
- lib/big_brother/node.rb
|
199
201
|
- lib/big_brother/shell_executor.rb
|
200
202
|
- lib/big_brother/status_file.rb
|
@@ -205,6 +207,7 @@ files:
|
|
205
207
|
- lib/thin/callback_rack_handler.rb
|
206
208
|
- lib/thin/callbacks.rb
|
207
209
|
- spec/big_brother/app_spec.rb
|
210
|
+
- spec/big_brother/cluster_collection_spec.rb
|
208
211
|
- spec/big_brother/cluster_spec.rb
|
209
212
|
- spec/big_brother/configuration_spec.rb
|
210
213
|
- spec/big_brother/health_fetcher_spec.rb
|
@@ -245,12 +248,13 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
245
248
|
version: '0'
|
246
249
|
requirements: []
|
247
250
|
rubyforge_project:
|
248
|
-
rubygems_version: 1.8.
|
251
|
+
rubygems_version: 1.8.12
|
249
252
|
signing_key:
|
250
253
|
specification_version: 3
|
251
254
|
summary: Process to monitor and update weights for servers in an IPVS pool
|
252
255
|
test_files:
|
253
256
|
- spec/big_brother/app_spec.rb
|
257
|
+
- spec/big_brother/cluster_collection_spec.rb
|
254
258
|
- spec/big_brother/cluster_spec.rb
|
255
259
|
- spec/big_brother/configuration_spec.rb
|
256
260
|
- spec/big_brother/health_fetcher_spec.rb
|