proxymgr 0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +8 -0
- data/.rspec +2 -0
- data/.rubocop.yml +11 -0
- data/Gemfile +8 -0
- data/Gemfile.lock +52 -0
- data/README.md +99 -0
- data/Rakefile +5 -0
- data/bin/proxymgr +74 -0
- data/etc/haproxy.cfg.erb +11 -0
- data/examples/config.yml +5 -0
- data/lib/proxymgr.rb +20 -0
- data/lib/proxymgr/callbacks.rb +17 -0
- data/lib/proxymgr/config.rb +130 -0
- data/lib/proxymgr/haproxy.rb +51 -0
- data/lib/proxymgr/haproxy/control.rb +46 -0
- data/lib/proxymgr/haproxy/process.rb +107 -0
- data/lib/proxymgr/haproxy/server.rb +24 -0
- data/lib/proxymgr/haproxy/socket.rb +67 -0
- data/lib/proxymgr/haproxy/socket_manager.rb +62 -0
- data/lib/proxymgr/haproxy/state.rb +124 -0
- data/lib/proxymgr/haproxy/updater.rb +74 -0
- data/lib/proxymgr/logging.rb +26 -0
- data/lib/proxymgr/platform.rb +16 -0
- data/lib/proxymgr/platform/linux.rb +9 -0
- data/lib/proxymgr/process_manager.rb +101 -0
- data/lib/proxymgr/process_manager/signal_handler.rb +44 -0
- data/lib/proxymgr/service_config.rb +12 -0
- data/lib/proxymgr/service_config/base.rb +16 -0
- data/lib/proxymgr/service_config/zookeeper.rb +33 -0
- data/lib/proxymgr/service_manager.rb +53 -0
- data/lib/proxymgr/sink.rb +100 -0
- data/lib/proxymgr/watcher.rb +9 -0
- data/lib/proxymgr/watcher/base.rb +75 -0
- data/lib/proxymgr/watcher/campanja_zk.rb +20 -0
- data/lib/proxymgr/watcher/dns.rb +36 -0
- data/lib/proxymgr/watcher/file.rb +45 -0
- data/lib/proxymgr/watcher/zookeeper.rb +61 -0
- data/packaging/profile.sh +1 -0
- data/packaging/recipe.rb +35 -0
- data/proxymgr.gemspec +20 -0
- data/spec/spec_helper.rb +23 -0
- data/spec/support/dummy_watcher.rb +21 -0
- data/spec/support/fake_proxy.rb +15 -0
- data/spec/support/fake_zookeeper.rb +170 -0
- data/spec/support/mock_servers.rb +7 -0
- data/spec/unit/haproxy/socket_manager_spec.rb +40 -0
- data/spec/unit/haproxy/updater_spec.rb +123 -0
- data/spec/unit/service_manager_spec.rb +49 -0
- data/spec/unit/sink_spec.rb +41 -0
- data/spec/unit/watcher/base_spec.rb +27 -0
- metadata +188 -0
@@ -0,0 +1,74 @@
|
|
1
|
+
module ProxyMgr
|
2
|
+
class Haproxy
|
3
|
+
class Updater
|
4
|
+
include Logging
|
5
|
+
|
6
|
+
def initialize(socket)
|
7
|
+
@socket = socket
|
8
|
+
@old_watchers = {}
|
9
|
+
end
|
10
|
+
|
11
|
+
def produce_changeset(watchers)
|
12
|
+
if @socket and @socket.connected?
|
13
|
+
new_state = Hash[watchers.map do |name, watcher|
|
14
|
+
[name, watcher.servers]
|
15
|
+
end]
|
16
|
+
proxy_state = haproxy_state
|
17
|
+
restart_needed = false
|
18
|
+
(proxy_state.keys + new_state.keys).uniq.each do |name|
|
19
|
+
if @old_watchers[name] and watchers[name]
|
20
|
+
restart_needed = @old_watchers[name] != watchers[name]
|
21
|
+
else
|
22
|
+
restart_needed = true
|
23
|
+
end
|
24
|
+
break if restart_needed
|
25
|
+
end
|
26
|
+
changeset = Set.new(restart_needed, {}, {})
|
27
|
+
diff(new_state, proxy_state, changeset) unless restart_needed
|
28
|
+
@old_watchers = watchers
|
29
|
+
changeset
|
30
|
+
else
|
31
|
+
logger.debug 'No socket, not doing diffing'
|
32
|
+
Set.new(true, {}, {})
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
private
|
37
|
+
|
38
|
+
def haproxy_state
|
39
|
+
@socket.servers.each_with_object({}) do |server, servers|
|
40
|
+
backend = servers[server.backend] ||= { :disabled => [], :enabled => [] }
|
41
|
+
if server.disabled?
|
42
|
+
backend[:disabled] << server.name
|
43
|
+
else
|
44
|
+
backend[:enabled] << server.name
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
def diff(new_state, proxy_state, changeset)
|
50
|
+
new_state.each_with_object(changeset) do |(backend, servers), cs|
|
51
|
+
if proxy_state[backend]
|
52
|
+
enabled = proxy_state[backend][:enabled]
|
53
|
+
to_disable = enabled - servers
|
54
|
+
|
55
|
+
disabled = proxy_state[backend][:disabled]
|
56
|
+
to_enable = (disabled & servers)
|
57
|
+
if ((enabled - to_disable) + to_enable).sort != servers.sort
|
58
|
+
cs.restart_needed = true
|
59
|
+
end
|
60
|
+
|
61
|
+
cs.disable[backend] = to_disable
|
62
|
+
cs.enable[backend] = to_enable
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
class Set < Struct.new(:restart_needed, :disable, :enable)
|
68
|
+
def restart_needed?
|
69
|
+
restart_needed
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
module ProxyMgr
|
2
|
+
module Logging
|
3
|
+
require 'logger'
|
4
|
+
require 'stringio'
|
5
|
+
|
6
|
+
def logger
|
7
|
+
@logger ||= Logging.logger(self.class)
|
8
|
+
end
|
9
|
+
|
10
|
+
class << self
|
11
|
+
attr_accessor :level
|
12
|
+
|
13
|
+
def disable!
|
14
|
+
@disable = true
|
15
|
+
end
|
16
|
+
|
17
|
+
def logger(name)
|
18
|
+
sink = @disable ? StringIO.new : STDOUT
|
19
|
+
logger = Logger.new(sink)
|
20
|
+
logger.level = @level || Logger::INFO
|
21
|
+
logger.progname = name
|
22
|
+
logger
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,16 @@
|
|
1
|
+
module ProxyMgr
|
2
|
+
module Platform
|
3
|
+
require 'proxymgr/platform/linux'
|
4
|
+
|
5
|
+
def self.method_missing(sym, *args)
|
6
|
+
case RUBY_PLATFORM
|
7
|
+
when /linux/
|
8
|
+
Platform::Linux.send(sym, *args)
|
9
|
+
else
|
10
|
+
fail UnsupportedPlatform "Your platform is not supported"
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
class UnsupportedPlatform < Exception; end
|
15
|
+
end
|
16
|
+
end
|
@@ -0,0 +1,101 @@
|
|
1
|
+
module ProxyMgr
|
2
|
+
class ProcessManager
|
3
|
+
require 'timeout'
|
4
|
+
require 'proxymgr/process_manager/signal_handler'
|
5
|
+
|
6
|
+
include Callbacks
|
7
|
+
|
8
|
+
attr_reader :exit_code, :pid
|
9
|
+
|
10
|
+
def initialize(cmd, args = [], opts = {})
|
11
|
+
@cmd = cmd
|
12
|
+
@args = args
|
13
|
+
@pid = nil
|
14
|
+
@exit_code = nil
|
15
|
+
|
16
|
+
@timeout = opts[:timeout] || 10
|
17
|
+
@setsid = opts[:setsid] || true
|
18
|
+
@fds = opts[:fds] || []
|
19
|
+
|
20
|
+
@io_handler = nil
|
21
|
+
|
22
|
+
callbacks :on_stdout, :on_stderr, :on_stop
|
23
|
+
end
|
24
|
+
|
25
|
+
def start
|
26
|
+
stdout_read, stdout_write = IO.pipe
|
27
|
+
stderr_read, stderr_write = IO.pipe
|
28
|
+
sync_pipe = IO.pipe
|
29
|
+
|
30
|
+
@pid = Process.fork do
|
31
|
+
$stdout.reopen stdout_write
|
32
|
+
$stderr.reopen stderr_write
|
33
|
+
[stderr_read, stdout_read].each(&:close)
|
34
|
+
begin
|
35
|
+
Process.setsid if @setsid
|
36
|
+
rescue Errno::EPERM
|
37
|
+
end
|
38
|
+
sync_pipe[0].read(1)
|
39
|
+
3.upto(Platform.max_fd).each do |fd|
|
40
|
+
begin
|
41
|
+
IO.for_fd(fd).close unless @fds.include? fd
|
42
|
+
rescue ArgumentError, Errno::EBADF
|
43
|
+
end
|
44
|
+
end
|
45
|
+
Process.exec(*([@cmd] + @args), :close_others => false)
|
46
|
+
end
|
47
|
+
self.class.register(@pid) { |status| call(:on_stop, status) }
|
48
|
+
sync_pipe[1].write(1)
|
49
|
+
([stdout_write, stderr_write] + sync_pipe).each(&:close)
|
50
|
+
|
51
|
+
@thread = Thread.new do
|
52
|
+
stop = false
|
53
|
+
fdset = [stdout_read, stderr_read]
|
54
|
+
until stop
|
55
|
+
r = IO.select(fdset, [], fdset).first
|
56
|
+
out = {}
|
57
|
+
r.each do |pipe|
|
58
|
+
stream = pipe == stdout_read ? :stdout : :stderr
|
59
|
+
buf = out[stream] ||= ''
|
60
|
+
begin
|
61
|
+
loop { buf << pipe.read_nonblock(4096) }
|
62
|
+
rescue Errno::EWOULDBLOCK
|
63
|
+
rescue EOFError
|
64
|
+
stop = true
|
65
|
+
end
|
66
|
+
end
|
67
|
+
out.each do |stream, buf|
|
68
|
+
buf.split(/\n/).each { |line| call("on_#{stream}".to_sym, line) }
|
69
|
+
end
|
70
|
+
end
|
71
|
+
fdset.each(&:close)
|
72
|
+
end
|
73
|
+
@thread.abort_on_exception = true
|
74
|
+
|
75
|
+
@pid
|
76
|
+
end
|
77
|
+
|
78
|
+
def stop
|
79
|
+
Process.kill('TERM', @pid)
|
80
|
+
begin
|
81
|
+
Timeout.timeout(@timeout) { wait }
|
82
|
+
rescue Timeout::Error
|
83
|
+
Process.kill('KILL', @pid)
|
84
|
+
end
|
85
|
+
@thread.join if @thread
|
86
|
+
end
|
87
|
+
|
88
|
+
def wait
|
89
|
+
begin
|
90
|
+
_pid, result = Process.waitpid2(@pid)
|
91
|
+
@exit_code = result.exitstatus || result.termsig
|
92
|
+
rescue Errno::ECHILD
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
def self.register(pid, &blk)
|
97
|
+
@handler ||= SignalHandler.new
|
98
|
+
@handler.register(pid, &blk)
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
module ProxyMgr
|
2
|
+
class ProcessManager
|
3
|
+
class SignalHandler
|
4
|
+
include Logging
|
5
|
+
|
6
|
+
def initialize
|
7
|
+
@pids = {}
|
8
|
+
|
9
|
+
start
|
10
|
+
end
|
11
|
+
|
12
|
+
def register(pid, &blk)
|
13
|
+
@pids[pid] = blk
|
14
|
+
end
|
15
|
+
|
16
|
+
private
|
17
|
+
|
18
|
+
def start
|
19
|
+
Signal.trap(:CHLD) do
|
20
|
+
handled = {}
|
21
|
+
begin
|
22
|
+
loop do
|
23
|
+
pid, status = Process.waitpid2(-1, Process::WNOHANG)
|
24
|
+
break unless pid
|
25
|
+
handled[pid] = result(status)
|
26
|
+
end
|
27
|
+
rescue Errno::ECHILD
|
28
|
+
end
|
29
|
+
handled.each do |pid, result|
|
30
|
+
handle(pid, result)
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def handle(pid, status = nil)
|
36
|
+
@pids.delete(pid).call(status) if @pids[pid]
|
37
|
+
end
|
38
|
+
|
39
|
+
def result(status)
|
40
|
+
status && (status.exitstatus || status.termsig)
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,12 @@
|
|
1
|
+
module ProxyMgr
|
2
|
+
module ServiceConfig
|
3
|
+
require 'proxymgr/service_config/base'
|
4
|
+
require 'proxymgr/service_config/zookeeper'
|
5
|
+
|
6
|
+
def self.create(manager, config)
|
7
|
+
type = config.delete('type')
|
8
|
+
impl = ServiceConfig.const_get(type.capitalize)
|
9
|
+
impl.new(manager, config)
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
module ProxyMgr
|
2
|
+
module ServiceConfig
|
3
|
+
class Zookeeper < Base
|
4
|
+
require 'yajl/json_gem'
|
5
|
+
require 'zoology'
|
6
|
+
|
7
|
+
include Logging
|
8
|
+
|
9
|
+
def start
|
10
|
+
@services = {}
|
11
|
+
|
12
|
+
@client = Zoology::Client.new(@config['servers'])
|
13
|
+
@path_cache = Zoology::PathCache.new(@client,
|
14
|
+
@config['path'],
|
15
|
+
&method(:watch))
|
16
|
+
@client.connect
|
17
|
+
end
|
18
|
+
|
19
|
+
private
|
20
|
+
|
21
|
+
def watch(path, type, req)
|
22
|
+
name = File.basename(path)
|
23
|
+
if type == :update
|
24
|
+
config = JSON.parse(req[:data])
|
25
|
+
@manager.update_service(name, config)
|
26
|
+
else
|
27
|
+
logger.debug "deleting service #{name}"
|
28
|
+
@manager.delete_service(name)
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
module ProxyMgr
|
2
|
+
class ServiceManager
|
3
|
+
include Logging
|
4
|
+
|
5
|
+
def initialize(sink)
|
6
|
+
@services = {}
|
7
|
+
@sink = sink
|
8
|
+
|
9
|
+
@service_mutex = Mutex.new
|
10
|
+
@sink_mutex = Mutex.new
|
11
|
+
end
|
12
|
+
|
13
|
+
def update_service(name, config)
|
14
|
+
logger.info "Received service: #{name}"
|
15
|
+
|
16
|
+
type = config.delete('type')
|
17
|
+
begin
|
18
|
+
klass = watcher_class(type)
|
19
|
+
@service_mutex.synchronize do
|
20
|
+
@services[name].shutdown if @services[name]
|
21
|
+
w = @services[name] = klass.new(name, config, self)
|
22
|
+
w.watch if w.valid?
|
23
|
+
end
|
24
|
+
rescue NameError
|
25
|
+
logger.warn "Could not find implementation for #{type}. Not adding service #{name}"
|
26
|
+
nil
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def delete_service(name)
|
31
|
+
@service_mutex.synchronize do
|
32
|
+
svc = @services.delete(name)
|
33
|
+
svc.shutdown if svc
|
34
|
+
end
|
35
|
+
update_backends
|
36
|
+
end
|
37
|
+
|
38
|
+
def update_backends
|
39
|
+
@sink_mutex.synchronize { @sink.update_backends @services }
|
40
|
+
end
|
41
|
+
|
42
|
+
def shutdown
|
43
|
+
@sink.shutdown
|
44
|
+
@services.each { |_name, watcher| watcher.shutdown }
|
45
|
+
end
|
46
|
+
|
47
|
+
private
|
48
|
+
|
49
|
+
def watcher_class(type)
|
50
|
+
Watcher.const_get(type.capitalize)
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
@@ -0,0 +1,100 @@
|
|
1
|
+
module ProxyMgr
|
2
|
+
class Sink
|
3
|
+
require 'absolute_time'
|
4
|
+
|
5
|
+
include Logging
|
6
|
+
|
7
|
+
def initialize(haproxy, opts = {})
|
8
|
+
@file = opts[:haproxy_config_file] || '/tmp/haproxy.cfg'
|
9
|
+
@default_timeout = opts[:default_timeout] || 2
|
10
|
+
@max_timeout = opts[:max_timeout] || 10
|
11
|
+
@haproxy = haproxy
|
12
|
+
@timeout = nil
|
13
|
+
@thread = nil
|
14
|
+
@cv = ConditionVariable.new
|
15
|
+
@mutex = Mutex.new
|
16
|
+
@start_cv = ConditionVariable.new
|
17
|
+
@start_mutex = Mutex.new
|
18
|
+
@backends = nil
|
19
|
+
@haproxy.start
|
20
|
+
start
|
21
|
+
end
|
22
|
+
|
23
|
+
def update_backends(backends)
|
24
|
+
@mutex.synchronize do
|
25
|
+
@backends ||= {}
|
26
|
+
backends.each do |name, watcher|
|
27
|
+
next if watcher.servers.empty?
|
28
|
+
@backends[name] = watcher
|
29
|
+
end
|
30
|
+
signal
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
def shutdown
|
35
|
+
@thread.kill
|
36
|
+
@thread.join
|
37
|
+
@haproxy.shutdown
|
38
|
+
end
|
39
|
+
|
40
|
+
private
|
41
|
+
|
42
|
+
def start
|
43
|
+
@thread = Thread.new do
|
44
|
+
t1 = nil
|
45
|
+
@mutex.synchronize do
|
46
|
+
loop do
|
47
|
+
started! unless started?
|
48
|
+
|
49
|
+
if @timeout && t1 && AbsoluteTime.now - t1 >= @timeout && @backends
|
50
|
+
@haproxy.update_backends(@backends)
|
51
|
+
|
52
|
+
@timeout = nil
|
53
|
+
@backends = nil
|
54
|
+
elsif t1
|
55
|
+
set_timeout
|
56
|
+
logger.debug "Waiting for #{@timeout}s or signal"
|
57
|
+
end
|
58
|
+
|
59
|
+
t1 = AbsoluteTime.now
|
60
|
+
logger.debug 'Waiting to be signalled'
|
61
|
+
wait
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
65
|
+
@thread.abort_on_exception = true
|
66
|
+
|
67
|
+
wait_for_started
|
68
|
+
end
|
69
|
+
|
70
|
+
def signal
|
71
|
+
@cv.signal
|
72
|
+
end
|
73
|
+
|
74
|
+
def wait
|
75
|
+
@cv.wait(@mutex, @timeout)
|
76
|
+
end
|
77
|
+
|
78
|
+
def set_timeout
|
79
|
+
@timeout = @timeout ? @timeout * @timeout : @default_timeout
|
80
|
+
@timeout = @max_timeout if @timeout > @max_timeout
|
81
|
+
end
|
82
|
+
|
83
|
+
def started?
|
84
|
+
@start_cv == nil
|
85
|
+
end
|
86
|
+
|
87
|
+
def started!
|
88
|
+
@start_mutex.synchronize do
|
89
|
+
@start_cv.signal
|
90
|
+
@start_cv = nil
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
def wait_for_started
|
95
|
+
@start_mutex.synchronize do
|
96
|
+
@start_cv.wait(@start_mutex) if @start_cv
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|