port-authority 0.3.11 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,15 +1,15 @@
1
1
  ---
2
2
  !binary "U0hBMQ==":
3
3
  metadata.gz: !binary |-
4
- ODI4MTA4MjMzMGNkNDVkN2YzMzg2Y2U2YzhiMTgwNTJhMTZiNzZkNw==
4
+ NjQxZGVlNDk3YzA1OTNkY2ZiYzM1ZjY3MmJkZGU0OTVjZGM1Y2JlMA==
5
5
  data.tar.gz: !binary |-
6
- NTdlMmYyYTVhYzM5MmI0NmFkNTk3MDQ2MGY1ZTI4N2YyODUyMWU3Mg==
6
+ YWMwYTk0NjYzYjI2OTAyYTc0OWQwOGJhZWY5MjE5M2U4MWZhMDMzMg==
7
7
  SHA512:
8
8
  metadata.gz: !binary |-
9
- MTNkYjYxZTk2YTc2YTRmNTYwYTNiZTI4ZGIzOTRiNjMxMTE3MGVhNDYzMDg4
10
- NTNjNTQzODAwMDcwZjM3NjU5OTQzMDE2MWM2ZWFlMTUxMTk1NGJlNzMxM2Nk
11
- NGM5ODgwNWQyNWNjZDMwNTk4OGY3ZmE5OGZjNGFlNGMzOTZlNTg=
9
+ NGRkYzljOGQ2ZmJkZjRlYTA1YjliNDZjNjhiYTcxYjNjNTNlY2E2MzFmYWQ4
10
+ NjRjYzNmZDU5ZWFlZDNlMTczY2I1YmFjMWI5MDdiZDhmNDVkNTc1NGRjY2M5
11
+ YzIyY2JjZmJiM2I5N2U5ODBjNDBkZmM1MmZjZDk0MjgwYjlhYWI=
12
12
  data.tar.gz: !binary |-
13
- YjQ0ZDBkNjA1ZjdkOTQ3NmE2YzI4ZDY1MmU3ODg4NDU4NDc3MTRkYjBhZjIz
14
- Y2ZlZjgwZWM0OWE3NjY1MjVmNTk5MWUwMTZiNTg5MjkyMDQzY2IwODc3NzU5
15
- NjJkOWUyZGM5OTZjODIyNmQ3YmFhMDU0OTg2MGE1OTk4Yzc3M2E=
13
+ NTY4MWVkODk1NzE5ZjU3ZTNlYTYwNzA0MjkxZDg3MDkxNjhjN2Y0YWQ4MGY3
14
+ MzNiNDM2ZWM0MjFjN2FhMmNmYTJhODkxMGFmMmE5ZmZjMzVkOTk0MDFiNDNh
15
+ ZGFlYzA3MGQ4MjMwOWUwNDYxNmZiOGQyZDEwMDk3NGQwMTI5OTg=
data/bin/pa-manager CHANGED
@@ -1,3 +1,3 @@
1
1
  #!/usr/bin/env ruby
2
2
  require 'port-authority/manager/app'
3
- PortAuthority::Manager::App.new.run
3
+ PortAuthority::Manager::App.new('pa-manager').run
@@ -1,4 +1,6 @@
1
+ # rubocop:disable MethodLength, CyclomaticComplexity, Metrics/BlockNesting, Metrics/LineLength, Metrics/AbcSize, Metrics/PerceivedComplexity
1
2
  require 'ipaddr'
3
+ require 'port-authority'
2
4
  require 'port-authority/util/vip'
3
5
  require 'port-authority/util/etcd'
4
6
  require 'port-authority/util/loadbalancer'
@@ -8,8 +10,10 @@ require 'port-authority/manager/threads/swarm'
8
10
 
9
11
  module PortAuthority
10
12
  module Manager
13
+ ##
14
+ # Port Authority Manager - manages floating VIP and lb placement
15
+ #
11
16
  class App < PortAuthority::Manager::Init
12
-
13
17
  include PortAuthority::Util::Etcd
14
18
  include PortAuthority::Util::Vip
15
19
  include PortAuthority::Util::LoadBalancer
@@ -18,25 +22,17 @@ module PortAuthority
18
22
  def run
19
23
  # exit if not root
20
24
  if Process.euid != 0
21
- $stderr.puts 'Must run under root user!'
25
+ alert 'must run under root user!'
22
26
  exit! 1
23
27
  end
24
28
 
25
- # set process name and nice level (default: -20)
26
- setup 'pa-manager'
29
+ Signal.trap('USR1') { @lb_update_hook = true }
27
30
 
28
31
  # prepare semaphores
29
- @semaphore = {
30
- log: Mutex.new,
31
- swarm: Mutex.new,
32
- icmp: Mutex.new
33
- }
32
+ @semaphore.merge!(swarm: Mutex.new, icmp: Mutex.new)
34
33
 
35
34
  # prepare threads
36
- @thread = {
37
- icmp: thread_icmp,
38
- swarm: thread_swarm
39
- }
35
+ @thread = {icmp: thread_icmp,swarm: thread_swarm}
40
36
 
41
37
  # prepare status vars
42
38
  @status_swarm = false
@@ -49,18 +45,24 @@ module PortAuthority
49
45
  lb_docker_setup! || @exit = true
50
46
 
51
47
  # prepare container with load-balancer
52
- lb_create || @exit = true
48
+ lb_create!
53
49
 
54
50
  # wait for threads to make sure they gather something
55
51
  debug 'waiting for threads to gather something...'
56
52
  sleep @config[:vip][:interval]
57
53
  first_cycle = true
54
+ status_time = Time.now.to_i - 60
58
55
 
59
56
  # main loop
60
57
  until @exit
61
58
  # initialize local state vars on first iteration
62
59
  status_swarm = status_icmp = false if first_cycle
63
60
 
61
+ if @lb_update_hook
62
+ notice 'updating LB image'
63
+ lb_update!
64
+ end
65
+
64
66
  # iteration interval
65
67
  sleep @config[:vip][:interval]
66
68
 
@@ -70,10 +72,11 @@ module PortAuthority
70
72
 
71
73
  # the logic (should be self-explanatory ;))
72
74
  if status_swarm
75
+ debug 'i am the leader'
73
76
  if got_vip?
74
- debug 'i am the leader with VIP, that is OK'
77
+ debug 'got VIP, that is OK'
75
78
  else
76
- info 'i am the leader without VIP, checking whether it is free'
79
+ info 'no VIP here, checking whether it is free'
77
80
  if status_icmp
78
81
  info 'VIP is still up! (ICMP)'
79
82
  # FIXME: notify by sensu client socket
@@ -89,68 +92,62 @@ module PortAuthority
89
92
  # info 'updating other hosts about change'
90
93
  # vip_update_arp!
91
94
  # end
95
+ notice 'VIP is free :) assigning'
96
+ vip_handle! status_swarm
97
+ notice 'updating other hosts about change'
98
+ vip_update_arp!
92
99
  end
93
- info 'VIP is free :) assigning'
94
- vip_handle! status_swarm
95
- info 'updating other hosts about change'
96
- vip_update_arp!
97
100
  end
98
101
  if lb_up?
99
- debug 'i am the leader and load-balancer is up, that is OK'
102
+ debug 'load-balancer is up, that is OK'
100
103
  else
101
- info 'i am the leader and load-balancer is down, starting'
104
+ notice 'load-balancer is down, starting'
102
105
  lb_start!
103
106
  end
104
107
  else
108
+ debug 'i am not the leader'
105
109
  if got_vip?
106
- info 'i got VIP and should not, removing'
110
+ notice 'i got VIP and should not, removing'
107
111
  vip_handle! status_swarm
108
- info 'updating other hosts about change'
112
+ notice 'updating other hosts about change'
109
113
  vip_update_arp!
110
114
  else
111
- debug 'i am not the leader and i do not have the VIP, that is OK'
115
+ debug 'no VIP here, that is OK'
112
116
  end
113
117
  if lb_up?
114
- info 'i am not the leader and load-balancer is up, stopping'
118
+ notice 'load-balancer is up, stopping'
115
119
  lb_stop!
116
120
  else
117
- debug 'i am not the leader and load-balancer is down, that is OK'
121
+ debug 'load-balancer is down, that is OK'
118
122
  end
119
123
  end
120
124
 
121
- next unless first_cycle
125
+ if status_time + 60 <= Time.now.to_i
126
+ info "STATUS_REPORT { leader: '#{status_swarm ? 'yes' : 'no'}', vip: '#{got_vip? ? 'yes' : 'no'}/#{status_icmp ? 'up' : 'down'}', lb: '#{lb_up? ? 'yes' : 'no'}' }"
127
+ status_time = Time.now.to_i
128
+ end
122
129
 
123
- # short report on first cycle
124
- info "i #{status_swarm ? 'AM' : 'am NOT'} the leader"
125
- info "i #{got_vip? ? 'DO' : 'do NOT'} have the VIP"
126
- info "i #{status_icmp ? 'CAN' : 'CANNOT'} see the VIP"
127
- info "i #{lb_up? ? 'AM' : 'am NOT'} running the LB"
128
- first_cycle = false
129
130
  end
130
131
 
131
132
  # this is triggerred on exit
132
- info 'SIGTERM received'
133
- info 'waiting for threads to finish...'
134
133
  @thread.each_value(&:join)
135
134
 
136
135
  # remove VIP on shutdown
137
136
  if got_vip?
138
- info 'removing VIP'
137
+ notice 'removing VIP'
139
138
  vip_handle! false
140
139
  vip_update_arp!
141
140
  end
142
141
 
143
142
  # stop LB on shutdown
144
143
  if lb_up?
145
- info 'stopping load-balancer'
144
+ notice 'stopping load-balancer'
146
145
  lb_stop!
147
146
  end
148
147
 
149
148
  info 'exiting...'
150
149
  exit 0
151
150
  end
152
-
153
-
154
151
  end
155
152
  end
156
153
  end
@@ -8,28 +8,33 @@ require 'port-authority/util/helpers'
8
8
  module PortAuthority
9
9
  module Manager
10
10
  class Init
11
-
12
11
  include PortAuthority::Util::Config
13
12
  include PortAuthority::Util::Logger
14
13
  include PortAuthority::Util::Helpers
15
14
 
16
- def initialize
17
- @config = { debug: false }
15
+ def initialize(proc_name='dummy')
18
16
  @config = config
19
17
  @exit = false
20
- @exit_sigs = ['INT', 'TERM']
18
+ @semaphore = { log: Mutex.new }
19
+ Thread.current[:name] = 'main'
20
+ syslog_init proc_name if @config[:syslog]
21
+ setup proc_name
22
+ info 'starting main thread'
23
+ debug 'setting signal handling'
24
+ @exit_sigs = %w(INT TERM)
21
25
  @exit_sigs.each { |sig| Signal.trap(sig) { @exit = true } }
22
- Signal.trap('USR1') { @config[:debug] = false }
23
- Signal.trap('USR2') { @config[:debug] = true }
26
+ Signal.trap('USR2') { @config[:debug] = !@config[:debug] }
24
27
  Signal.trap('HUP') { @config = config }
25
28
  end
26
29
 
27
30
  def setup(proc_name, nice = -20)
31
+ debug 'setting process name'
28
32
  if RUBY_VERSION >= '2.1'
29
33
  Process.setproctitle(proc_name)
30
34
  else
31
35
  $0 = proc_name
32
36
  end
37
+ debug 'setting process title'
33
38
  Process.setpriority(Process::PRIO_PROCESS, 0, nice)
34
39
  # FIXME: Process.daemon ...
35
40
  end
@@ -1,3 +1,4 @@
1
+ # rubocop:disable Metrics/MethodLength
1
2
  require 'net/ping'
2
3
 
3
4
  module PortAuthority
@@ -5,16 +6,23 @@ module PortAuthority
5
6
  module Threads
6
7
  def thread_icmp
7
8
  Thread.new do
8
- debug 'starting ICMP thread...'
9
- icmp = Net::Ping::ICMP.new(@config[:vip][:ip])
10
- until @exit
11
- debug 'checking state by ICMP echo'
12
- status = vip_alive? icmp
13
- @semaphore[:icmp].synchronize { @status_icmp = status }
14
- debug "VIP is #{status ? 'alive' : 'down' } according to ICMP"
15
- sleep @config[:icmp][:interval]
9
+ Thread.current[:name] = 'icmp'
10
+ begin
11
+ info 'starting ICMP thread...'
12
+ icmp = Net::Ping::ICMP.new(@config[:vip][:ip])
13
+ until @exit
14
+ debug 'checking state by ICMP echo'
15
+ status = vip_alive? icmp
16
+ @semaphore[:icmp].synchronize { @status_icmp = status }
17
+ debug "VIP is #{status ? 'alive' : 'down'} according to ICMP"
18
+ sleep @config[:icmp][:interval]
19
+ end
20
+ info 'ending ICMP thread...'
21
+ rescue StandardError => e
22
+ alert "#{e.class}: #{e.message}"
23
+ alert e.backtrace
24
+ @exit = true
16
25
  end
17
- info 'ending ICMP thread...'
18
26
  end
19
27
  end
20
28
  end
@@ -1,18 +1,40 @@
1
+ # rubocop:disable Metrics/MethodLength
1
2
  module PortAuthority
2
3
  module Manager
3
4
  module Threads
4
5
  def thread_swarm
5
6
  Thread.new do
6
- debug 'starting swarm thread...'
7
- etcd = etcd_connect!
8
- until @exit
9
- debug 'checking swarm state'
10
- status = am_i_leader? etcd
11
- @semaphore[:swarm].synchronize { @status_swarm = status }
12
- debug "i am #{status ? 'the leader' : 'not the leader' }"
7
+ Thread.current[:name] = 'swarm'
8
+ info 'starting swarm thread...'
9
+ begin
10
+ etcd = etcd_connect!
11
+ until @exit
12
+ debug 'checking ETCD state'
13
+ etcd_healthy? etcd
14
+ debug 'checking swarm state'
15
+ status = am_i_leader? etcd
16
+ @semaphore[:swarm].synchronize { @status_swarm = status }
17
+ debug "i am #{status ? 'the leader' : 'not the leader' }"
18
+ sleep @config[:etcd][:interval]
19
+ end
20
+ info 'ending swarm thread...'
21
+ rescue PortAuthority::Errors::ETCDIsSick => e
22
+ notice "#{e.class}: #{e.message}"
23
+ notice "connection: " + e.etcd.to_s
24
+ @semaphore[:swarm].synchronize { @status_swarm = false }
13
25
  sleep @config[:etcd][:interval]
26
+ retry unless @exit
27
+ rescue PortAuthority::Errors::ETCDConnectFailed => e
28
+ err "#{e.class}: #{e.message}"
29
+ err "connection: " + e.etcd.to_s
30
+ @semaphore[:swarm].synchronize { @status_swarm = false }
31
+ sleep @config[:etcd][:interval]
32
+ retry unless @exit
33
+ rescue StandardError => e
34
+ alert e.message
35
+ alert e.backtrace.to_s
36
+ @exit = true
14
37
  end
15
- info 'ending swarm thread...'
16
38
  end
17
39
  end
18
40
  end
@@ -4,19 +4,34 @@ require 'etcd-tools'
4
4
  module PortAuthority
5
5
  module Util
6
6
  module Config
7
+
8
+ def config
9
+ cfg = default_config
10
+ if File.exist? '/etc/port-authority.yaml'
11
+ cfg = cfg.deep_merge YAML.load_file('/etc/port-authority.yaml')
12
+ puts 'loaded config from /etc/port-authority.yaml'
13
+ elsif File.exist? './port-authority.yaml'
14
+ cfg = cfg.deep_merge YAML.load_file('./port-authority.yaml')
15
+ puts 'loaded config from ./port-authority.yaml'
16
+ else
17
+ puts 'no config file loaded, using defaults'
18
+ end
19
+ cfg
20
+ end
21
+
7
22
  private
8
23
 
9
24
  def default_config
10
25
  { debug: false,
11
26
  syslog: false,
12
27
  etcd: {
13
- endpoint: 'http://localhost:4001',
14
- interval: 1,
15
- timeout: 2
28
+ endpoints: ['http://localhost:2379'],
29
+ interval: 5,
30
+ timeout: 5
16
31
  },
17
32
  icmp: {
18
- count: 2,
19
- interval: 1
33
+ count: 5,
34
+ interval: 2
20
35
  },
21
36
  arping: {
22
37
  count: 1,
@@ -41,20 +56,6 @@ module PortAuthority
41
56
  }
42
57
  }
43
58
  end
44
-
45
- def config
46
- cfg = default_config
47
- if File.exist? '/etc/port-authority.yaml'
48
- cfg = cfg.deep_merge YAML.load_file('/etc/port-authority.yaml')
49
- puts 'loaded config from /etc/port-authority.yaml'
50
- elsif File.exist? './port-authority.yaml'
51
- cfg = cfg.deep_merge YAML.load_file('./port-authority.yaml')
52
- puts 'loaded config from ./port-authority.yaml'
53
- else
54
- puts 'no config file loaded, using defaults'
55
- end
56
- cfg
57
- end
58
59
  end
59
60
  end
60
61
  end
@@ -1,25 +1,21 @@
1
1
  require 'etcd'
2
+ require 'etcd-tools/mixins'
2
3
 
3
4
  module PortAuthority
4
5
  module Util
5
6
  module Etcd
6
7
  # connect to ETCD
7
8
  def etcd_connect!
8
- (host, port) = @config[:etcd][:endpoint].gsub(/^https?:\/\//, '').gsub(/\/$/, '').split(':')
9
- etcd = ::Etcd.client(host: host, port: port)
10
- begin
11
- versions = JSON.parse(etcd.version)
12
- info "conncted to ETCD at #{@config[:etcd][:endpoint]}"
13
- info "server version: #{versions['etcdserver']}"
14
- info "cluster version: #{versions['etcdcluster']}"
15
- info "healthy: #{etcd.healthy?}"
16
- return etcd
17
- rescue Exception => e
18
- err "couldn't connect to etcd at #{host}:#{port}"
19
- err "#{e.message}"
20
- @exit = true
21
- return nil
22
- end
9
+ endpoints = @config[:etcd][:endpoints].map { |e| e = e.gsub!(/^https?:\/\//, '').gsub(/\/$/, '').split(':'); { host: e[0], port: e[1].to_i } }
10
+ debug "parsed ETCD endpoints: #{endpoints.to_s}"
11
+ etcd = ::Etcd::Client.new(cluster: endpoints, read_timeout: @config[:etcd][:timeout])
12
+ etcd if etcd.version
13
+ rescue
14
+ raise PortAuthority::Errors::ETCDConnectFailed.new(@config[:etcd][:endpoints])
15
+ end
16
+
17
+ def etcd_healthy?(etcd)
18
+ raise PortAuthority::Errors::ETCDIsSick.new(@config[:etcd][:endpoints]) unless etcd.healthy?
23
19
  end
24
20
 
25
21
  def swarm_leader(etcd)
@@ -27,8 +23,8 @@ module PortAuthority
27
23
  end
28
24
 
29
25
  def am_i_leader?(etcd)
30
- Socket.ip_address_list.map(){|a| a.ip_address }.member?(swarm_leader(etcd).split(':').first)
31
- rescue Exception => e
26
+ Socket.ip_address_list.map(&:ip_address).member?(swarm_leader(etcd).split(':').first)
27
+ rescue StandardError => e
32
28
  false
33
29
  end
34
30
 
@@ -8,7 +8,7 @@ module PortAuthority
8
8
  end
9
9
 
10
10
  def my_ip
11
- @my_ip ||= Socket.ip_address_list.detect { |i| i.ipv4_private? }.ip_address
11
+ @my_ip ||= Socket.ip_address_list.detect(&:ipv4_private?).ip_address
12
12
  end
13
13
 
14
14
  def arping
@@ -12,7 +12,21 @@ module PortAuthority
12
12
  false
13
13
  end
14
14
 
15
- def lb_create
15
+ def lb_update!
16
+ lb_stop! if lb_up?
17
+ lb_remove!
18
+ lb_create!
19
+ @lb_update_hook = false
20
+ end
21
+
22
+ def lb_remove!
23
+ Docker::Container.get(@config[:lb][:name]).delete
24
+ rescue Docker::Error::NotFoundError
25
+ end
26
+
27
+
28
+ def lb_create!
29
+ lb_remove!
16
30
  img = Docker::Image.create('fromImage' => @config[:lb][:image])
17
31
 
18
32
  # setup port bindings hash
@@ -21,19 +35,12 @@ module PortAuthority
21
35
  port_bindings[port] = [ { 'HostPort' => "#{port.split('/').first}" } ]
22
36
  end
23
37
 
24
- begin
25
- Docker::Container.get(@config[:lb][:name]).delete
26
- info 'old LB removed'
27
- rescue Docker::Error::NotFoundError
28
- debug 'no LB found here, not removing'
29
- end
30
-
31
38
  # create container with
32
39
  @lb_container = Docker::Container.create(
33
40
  'Image' => img.json['Id'],
34
41
  'name' => @config[:lb][:name],
35
42
  'Hostname' => @config[:lb][:name],
36
- 'Env' => [ "ETCDCTL_ENDPOINT=http://#{@config[:vip][:ip]}:4001" ],
43
+ 'Env' => [ "ETCDCTL_ENDPOINT=#{@config[:etcd][:endpoints].join(',')}" ],
37
44
  'RestartPolicy' => { 'Name' => 'never' },
38
45
  'HostConfig' => {
39
46
  'PortBindings' => port_bindings,
@@ -1,42 +1,57 @@
1
- require 'logger'
1
+ # rubocop:disable Metrics/LineLength, Metrics/AbcSize, Metrics/MethodLength
2
+ require 'syslog'
2
3
 
3
4
  module PortAuthority
4
5
  module Util
5
6
  module Logger
7
+ def debug(message)
8
+ log :debug, message if @config[:debug]
9
+ end
10
+
6
11
  def info(message)
7
- if @config[:debug]
8
- @semaphore[:log].synchronize do
9
- $stdout.puts(Time.now.to_s + ' INFO (TID:' + Thread.current.object_id.to_s + ') ' + message.to_s)
10
- $stdout.flush
11
- end
12
- else
13
- @semaphore[:log].synchronize do
14
- $stdout.puts(Time.now.to_s + ' INFO ' + message.to_s)
15
- $stdout.flush
16
- end
17
- end
12
+ log :info, message
13
+ end
14
+
15
+ def notice(message)
16
+ log :notice, message
18
17
  end
19
18
 
20
19
  def err(message)
21
- if @config[:debug]
20
+ log :err, message
21
+ end
22
+
23
+ def alert(message)
24
+ log :alert, message if @config[:debug]
25
+ end
26
+
27
+ def syslog_init(proc_name)
28
+ Syslog.open(proc_name, Syslog::LOG_PID, Syslog::LOG_DAEMON)
29
+ end
30
+
31
+ def log(lvl, msg)
32
+ if @config[:syslog]
33
+ case lvl
34
+ when :debug
35
+ l = Syslog::LOG_DEBUG
36
+ when :info
37
+ l = Syslog::LOG_INFO
38
+ when :notice
39
+ l = Syslog::LOG_NOTICE
40
+ when :err
41
+ l = Syslog::LOG_ERR
42
+ when :alert
43
+ l = Syslog::LOG_ALERT
44
+ end
22
45
  @semaphore[:log].synchronize do
23
- $stdout.puts(Time.now.to_s + ' ERROR (TID:' + Thread.current.object_id.to_s + ') ' + message.to_s)
24
- $stdout.flush
46
+ Syslog.log(l, "(%s) %s", Thread.current[:name], msg.to_s)
25
47
  end
26
48
  else
27
49
  @semaphore[:log].synchronize do
28
- $stdout.puts(Time.now.to_s + ' ERROR ' + message.to_s)
50
+ $stdout.puts("#{Time.now.to_s} #{lvl.to_s[0].capitalize} (#{Thread.current[:name]} #{msg.to_s}")
29
51
  $stdout.flush
30
52
  end
31
53
  end
32
54
  end
33
-
34
- def debug(message)
35
- @semaphore[:log].synchronize do
36
- $stdout.puts(Time.now.to_s + ' DEBUG (TID:' + Thread.current.object_id.to_s + ') ' + message.to_s)
37
- $stdout.flush
38
- end if @config[:debug]
39
- end
40
55
  end
41
56
  end
42
57
  end
@@ -1,3 +1,4 @@
1
+ # rubocop:disable Metrics/LineLength, Metrics/AbcSize
1
2
  module PortAuthority
2
3
  module Util
3
4
  module Vip
@@ -6,16 +7,7 @@ module PortAuthority
6
7
  def vip_handle!(leader)
7
8
  ip = IPAddr.new(@config[:vip][:ip])
8
9
  mask = @config[:vip][:mask]
9
- cmd = [ iproute,
10
- 'address',
11
- '',
12
- "#{ip}/#{mask}",
13
- 'dev',
14
- @config[:vip][:interface],
15
- 'label',
16
- @config[:vip][:interface] + '-vip',
17
- '>/dev/null 2>&1'
18
- ]
10
+ cmd = [iproute, 'address', '', "#{ip}/#{mask}", 'dev', @config[:vip][:interface], 'label', @config[:vip][:interface] + '-vip', '>/dev/null 2>&1']
19
11
  leader ? cmd[2] = 'add' : cmd[2] = 'delete'
20
12
  debug "#{cmd.join(' ')}"
21
13
  if system(cmd.join(' '))
@@ -27,10 +19,7 @@ module PortAuthority
27
19
 
28
20
  # send gratuitous ARP to the network
29
21
  def vip_update_arp!
30
- cmd = [ arping, '-U', '-q',
31
- '-c', @config[:arping][:count],
32
- '-I', @config[:vip][:interface],
33
- @config[:vip][:ip] ]
22
+ cmd = [arping, '-U', '-q', '-c', @config[:arping][:count], '-I', @config[:vip][:interface], @config[:vip][:ip]]
34
23
  debug "#{cmd.join(' ')}"
35
24
  if system(cmd.join(' '))
36
25
  return true
@@ -41,23 +30,19 @@ module PortAuthority
41
30
 
42
31
  # check whether VIP is assigned to me
43
32
  def got_vip?
44
- Socket.ip_address_list.map(){|a| a.ip_address }.member?(@config[:vip][:ip])
33
+ Socket.ip_address_list.map(&:ip_address).member?(@config[:vip][:ip])
45
34
  end
46
35
 
47
36
  # check reachability of VIP by ICMP echo
48
37
  def vip_alive?(icmp)
49
38
  (1..@config[:icmp][:count]).each { return true if icmp.ping }
50
- return false
39
+ false
51
40
  end
52
41
 
53
42
  # check whether the IP is registered anywhere
54
43
  def vip_dup?
55
- cmd_arp = [ arp, '-d', @config[:vip][:ip], '>/dev/null 2>&1' ]
56
- cmd_arping = [ arping, '-D', '-q',
57
- '-c', @config[:arping][:count],
58
- '-w', @config[:arping][:wait],
59
- '-I', @config[:vip][:interface],
60
- @config[:vip][:ip] ]
44
+ cmd_arp = [arp, '-d', @config[:vip][:ip], '>/dev/null 2>&1']
45
+ cmd_arping = [arping, '-D', '-q', '-c', @config[:arping][:count], '-w', @config[:arping][:wait], '-I', @config[:vip][:interface], @config[:vip][:ip]]
61
46
  debug "#{cmd_arp.join(' ')}"
62
47
  system(cmd_arp.join(' '))
63
48
  debug "#{cmd_arping.join(' ')}"
@@ -4,4 +4,25 @@ module PortAuthority
4
4
 
5
5
  module Util
6
6
  end
7
+
8
+ module Errors
9
+
10
+ class ETCDConnectFailed < StandardError
11
+ attr_reader :etcd, :message
12
+ def initialize(etcd, message = "Can't connect to ETCD")
13
+ @message = message
14
+ @etcd = etcd
15
+ end
16
+ end
17
+
18
+ class ETCDIsSick < StandardError
19
+ attr_reader :etcd, :message
20
+ def initialize(etcd, message = 'ETCD is not healthy')
21
+ @message = message
22
+ @etcd = etcd
23
+ end
24
+ end
25
+
26
+ end
27
+
7
28
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: port-authority
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.11
4
+ version: 0.4.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Radek 'blufor' Slavicinsky
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-01-21 00:00:00.000000000 Z
11
+ date: 2016-01-25 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: etcd
@@ -36,20 +36,20 @@ dependencies:
36
36
  requirements:
37
37
  - - ~>
38
38
  - !ruby/object:Gem::Version
39
- version: '0.2'
39
+ version: '0.4'
40
40
  - - ! '>='
41
41
  - !ruby/object:Gem::Version
42
- version: 0.3.0
42
+ version: 0.4.0
43
43
  type: :runtime
44
44
  prerelease: false
45
45
  version_requirements: !ruby/object:Gem::Requirement
46
46
  requirements:
47
47
  - - ~>
48
48
  - !ruby/object:Gem::Version
49
- version: '0.2'
49
+ version: '0.4'
50
50
  - - ! '>='
51
51
  - !ruby/object:Gem::Version
52
- version: 0.3.0
52
+ version: 0.4.0
53
53
  - !ruby/object:Gem::Dependency
54
54
  name: net-ping
55
55
  requirement: !ruby/object:Gem::Requirement