synapse 0.15.1 → 0.16.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -13
- data/README.md +4 -2
- data/bin/synapse +7 -1
- data/config/synapse.conf.json +12 -1
- data/lib/synapse.rb +60 -20
- data/lib/synapse/config_generator/file_output.rb +10 -2
- data/lib/synapse/config_generator/haproxy.rb +42 -7
- data/lib/synapse/service_watcher/base.rb +2 -0
- data/lib/synapse/service_watcher/dns.rb +7 -1
- data/lib/synapse/service_watcher/zookeeper.rb +123 -72
- data/lib/synapse/statsd.rb +47 -0
- data/lib/synapse/version.rb +1 -1
- data/spec/bin/synapse_spec.rb +56 -0
- data/spec/lib/synapse/file_output_spec.rb +9 -2
- data/spec/lib/synapse/haproxy_spec.rb +158 -1
- data/spec/lib/synapse/service_watcher_dns_spec.rb +51 -0
- data/spec/lib/synapse/service_watcher_zookeeper_spec.rb +45 -0
- data/spec/support/configuration.rb +3 -1
- data/spec/support/minimum.conf.yaml +2 -0
- data/synapse.gemspec +1 -0
- metadata +52 -33
checksums.yaml
CHANGED
@@ -1,15 +1,7 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
|
5
|
-
data.tar.gz: !binary |-
|
6
|
-
MTVjZWRhYjM0YjJkNWVhY2Q2ZTY2NWU4NjBmZGI0YmNhZWNjNjI1OQ==
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: 849f3da2b6429321568cbe08a89e21f19a6a3413
|
4
|
+
data.tar.gz: b0bf173b22a7a7a158c346b325e25e7df4dc0f29
|
7
5
|
SHA512:
|
8
|
-
metadata.gz:
|
9
|
-
|
10
|
-
M2QwMjliOGVjOGVhYzBkYjMwYzliM2Y0Y2Q4ZDJjMDM4ODRmOTJiYjA5OWRl
|
11
|
-
ODc0N2ZmNzg2ZWE4OWMxNTQ3M2M3ZGJlMTY5MmZkYmViN2ZiZWQ=
|
12
|
-
data.tar.gz: !binary |-
|
13
|
-
ZGIxNDA5ZDJjZWU2MjY0YjU4NmI2ODE0YjlkNmFlNmM4OTM4OTQyMWNhZDJl
|
14
|
-
ZDM5NGRmOWVmYTljMmE0YmU3MTc0NDE5NTZiMWFjYTI1ZjM2Y2NkYjJmZDFi
|
15
|
-
YmM0Njc1NDQ1ZmNkMzkwMDZkMWRlM2IzNzczZGZhODI2MDJkNjU=
|
6
|
+
metadata.gz: b2ef9bab0eb75e9eba82eb66c2214cfd6e3e68120019c7f84dc7ee4f1152d67bee1fdc8e68e3b143b346b1784d5ad45898e9d0a6f9e16150e6d9c5a47edaaa07
|
7
|
+
data.tar.gz: cec2427ff960706bff57e38bb1bef434774698cc875bb9f32318bdba5c95c9035d04eface6e1d1502bcbdd0e5515281f486a256f9f8c1cdd3f832b30e1e30e91
|
data/README.md
CHANGED
@@ -230,7 +230,8 @@ It takes the following mandatory arguments:
|
|
230
230
|
|
231
231
|
The watcher assumes that each node under `path` represents a service server.
|
232
232
|
|
233
|
-
The watcher assumes that the data (if any) retrieved at znode `path` is a hash, where each key is named by a valid `config_generator` (e.g. `haproxy`) and the value is a hash that configs the generator.
|
233
|
+
The watcher assumes that the data (if any) retrieved at znode `path` is a hash, where each key is named by a valid `config_generator` (e.g. `haproxy`) and the value is a hash that configs the generator. Alternatively, if a `generator_config_path` argument is specified, the watcher will attempt to read generator config from that znode instead.
|
234
|
+
If `generator_config_path` has the value `disabled`, then generator config will not be read from zookeeper at all.
|
234
235
|
|
235
236
|
The following arguments are optional:
|
236
237
|
|
@@ -338,7 +339,7 @@ different addresses (example: service1 listen on 127.0.0.2:443 and service2
|
|
338
339
|
listen on 127.0.0.3:443) allows /etc/hosts entries to point to services.
|
339
340
|
* `bind_options`: optional: default value is an empty string, specify additional bind parameters, such as ssl accept-proxy, crt, ciphers etc.
|
340
341
|
* `server_port_override`: **DEPRECATED**. Renamed [`backend_port_override`](#backend_port_override) and moved to the top level hash. This will be removed in future versions.
|
341
|
-
* `server_options`: the haproxy options for each `server` line of the service in HAProxy config; it may be left out.
|
342
|
+
* `server_options`: the haproxy options for each `server` line of the service in HAProxy config; it may be left out. This field supports some basic templating: you can add include `%{port}`, `%{host}`, or `%{name}` in this string, and those will be replaced with the appropriate values for the particular server being configured.
|
342
343
|
* `frontend`: additional lines passed to the HAProxy config in the `frontend` stanza of this service
|
343
344
|
* `backend`: additional lines passed to the HAProxy config in the `backend` stanza of this service
|
344
345
|
* `backend_name`: The name of the generated HAProxy backend for this service
|
@@ -352,6 +353,7 @@ listen on 127.0.0.3:443) allows /etc/hosts entries to point to services.
|
|
352
353
|
ordering is deterministic across HAProxy reloads.
|
353
354
|
* `shared_frontend`: optional: haproxy configuration directives for a shared http frontend (see below)
|
354
355
|
* `cookie_value_method`: optional: default value is `name`, it defines the way your backends receive a cookie value in http mode. If equal to `hash`, synapse hashes backend names on cookie value assignation of your discovered backends, useful when you want to use haproxy cookie feature but you do not want that your end users receive a Set-Cookie with your server name and ip readable in clear.
|
356
|
+
* `use_nerve_weights`: optional: this option enables reading the weights from nerve and applying them to the haproxy configuration. By default this is disabled in the case where users apply weights using `server_options` or `haproxy_server_options`. This option will also remove the weight parameter from `server_options` and `haproxy_server_options`
|
355
357
|
|
356
358
|
<a name="haproxy"/>
|
357
359
|
|
data/bin/synapse
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
require 'yaml'
|
4
4
|
require 'optparse'
|
5
|
+
require 'erb'
|
5
6
|
|
6
7
|
require 'synapse'
|
7
8
|
|
@@ -32,13 +33,18 @@ optparse.parse!
|
|
32
33
|
def parseconfig(filename)
|
33
34
|
# parse synapse config file
|
34
35
|
begin
|
35
|
-
c = YAML::parse(File.read(filename))
|
36
|
+
c = YAML::parse(ERB.new(File.read(filename)).result)
|
36
37
|
rescue Errno::ENOENT => e
|
37
38
|
raise ArgumentError, "config file does not exist:\n#{e.inspect}"
|
38
39
|
rescue Errno::EACCES => e
|
39
40
|
raise ArgumentError, "could not open config file:\n#{e.inspect}"
|
40
41
|
rescue YAML::SyntaxError => e
|
41
42
|
raise "config file #{filename} is not yaml:\n#{e.inspect}"
|
43
|
+
rescue SyntaxError => e
|
44
|
+
raise SyntaxError, "ERB syntax error in config file #{filename}:\n#{e.inspect}"
|
45
|
+
rescue
|
46
|
+
puts "failed to parse config file #{filename}"
|
47
|
+
raise
|
42
48
|
end
|
43
49
|
return c.to_ruby
|
44
50
|
end
|
data/config/synapse.conf.json
CHANGED
@@ -25,7 +25,7 @@
|
|
25
25
|
"http-check expect string OK"
|
26
26
|
]
|
27
27
|
}
|
28
|
-
|
28
|
+
|
29
29
|
},
|
30
30
|
"service2": {
|
31
31
|
"default_servers": [
|
@@ -87,5 +87,16 @@
|
|
87
87
|
"stats refresh 5s"
|
88
88
|
]
|
89
89
|
}
|
90
|
+
},
|
91
|
+
"statsd": {
|
92
|
+
"host": "localhost",
|
93
|
+
"port": 8125,
|
94
|
+
"sample_rate": {
|
95
|
+
"synapse.watcher.ping.count": 0.1,
|
96
|
+
"synapse.watcher.zk.discovery": 0.7,
|
97
|
+
"synapse.watcher.zk.discovery.elapsed_time": 0.7,
|
98
|
+
"synapse.watcher.zk.get.elapsed_time": 0.5,
|
99
|
+
"synapse.watcher.zk.watch.elapsed_time": 0.7
|
100
|
+
}
|
90
101
|
}
|
91
102
|
}
|
data/lib/synapse.rb
CHANGED
@@ -3,6 +3,7 @@ require 'json'
|
|
3
3
|
|
4
4
|
require 'synapse/version'
|
5
5
|
require 'synapse/log'
|
6
|
+
require 'synapse/statsd'
|
6
7
|
require 'synapse/config_generator'
|
7
8
|
require 'synapse/service_watcher'
|
8
9
|
|
@@ -11,8 +12,11 @@ module Synapse
|
|
11
12
|
class Synapse
|
12
13
|
|
13
14
|
include Logging
|
15
|
+
include StatsD
|
14
16
|
|
15
17
|
def initialize(opts={})
|
18
|
+
StatsD.configure_statsd(opts["statsd"] || {})
|
19
|
+
|
16
20
|
# create objects that need to be notified of service changes
|
17
21
|
@config_generators = create_config_generators(opts)
|
18
22
|
raise "no config generators supplied" if @config_generators.empty?
|
@@ -29,47 +33,76 @@ module Synapse
|
|
29
33
|
Thread.abort_on_exception = true
|
30
34
|
|
31
35
|
log.debug "synapse: completed init"
|
36
|
+
rescue Exception => e
|
37
|
+
statsd && statsd_increment('synapse.stop', ['stop_avenue:abort', 'stop_location:init', "exception_name:#{e.class.name}", "exception_message:#{e.message}"])
|
38
|
+
raise e
|
32
39
|
end
|
33
40
|
|
34
41
|
# start all the watchers and enable haproxy configuration
|
35
42
|
def run
|
36
43
|
log.info "synapse: starting..."
|
44
|
+
statsd_increment('synapse.start')
|
37
45
|
|
38
46
|
# start all the watchers
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
47
|
+
statsd_time('synapse.watchers.start.time') do
|
48
|
+
@service_watchers.map do |watcher|
|
49
|
+
begin
|
50
|
+
watcher.start
|
51
|
+
statsd_increment("synapse.watcher.start", ['start_result:success', "watcher_name:#{watcher.name}"])
|
52
|
+
rescue Exception => e
|
53
|
+
statsd_increment("synapse.watcher.start", ['start_result:fail', "watcher_name:#{watcher.name}", "exception_name:#{e.class.name}", "exception_message:#{e.message}"])
|
54
|
+
raise e
|
55
|
+
end
|
46
56
|
end
|
57
|
+
end
|
58
|
+
|
59
|
+
statsd_time('synapse.main_loop.elapsed_time') do
|
60
|
+
# main loop
|
61
|
+
loops = 0
|
62
|
+
loop do
|
63
|
+
@service_watchers.each do |w|
|
64
|
+
alive = w.ping?
|
65
|
+
statsd_increment('synapse.watcher.ping.count', ["watcher_name:#{w.name}", "ping_result:#{alive ? "success" : "failure"}"])
|
66
|
+
raise "synapse: service watcher #{w.name} failed ping!" unless alive
|
67
|
+
end
|
47
68
|
|
48
|
-
|
49
|
-
|
69
|
+
if @config_updated
|
70
|
+
@config_updated = false
|
71
|
+
statsd_increment('synapse.config.update')
|
72
|
+
@config_generators.each do |config_generator|
|
73
|
+
log.info "synapse: configuring #{config_generator.name}"
|
74
|
+
config_generator.update_config(@service_watchers)
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
sleep 1
|
50
79
|
@config_generators.each do |config_generator|
|
51
|
-
|
52
|
-
config_generator.update_config(@service_watchers)
|
80
|
+
config_generator.tick(@service_watchers)
|
53
81
|
end
|
54
|
-
end
|
55
82
|
|
56
|
-
|
57
|
-
|
58
|
-
config_generator.tick(@service_watchers)
|
83
|
+
loops += 1
|
84
|
+
log.debug "synapse: still running at #{Time.now}" if (loops % 60) == 0
|
59
85
|
end
|
60
|
-
|
61
|
-
loops += 1
|
62
|
-
log.debug "synapse: still running at #{Time.now}" if (loops % 60) == 0
|
63
86
|
end
|
64
87
|
|
65
88
|
rescue StandardError => e
|
89
|
+
statsd_increment('synapse.stop', ['stop_avenue:abort', 'stop_location:main_loop', "exception_name:#{e.class.name}", "exception_message:#{e.message}"])
|
66
90
|
log.error "synapse: encountered unexpected exception #{e.inspect} in main thread"
|
67
91
|
raise e
|
68
92
|
ensure
|
69
93
|
log.warn "synapse: exiting; sending stop signal to all watchers"
|
70
94
|
|
71
95
|
# stop all the watchers
|
72
|
-
@service_watchers.map
|
96
|
+
@service_watchers.map do |w|
|
97
|
+
begin
|
98
|
+
w.stop
|
99
|
+
statsd_increment("synapse.watcher.stop", ['stop_avenue:clean', 'stop_location:main_loop', "watcher_name:#{w.name}"])
|
100
|
+
rescue Exception => e
|
101
|
+
statsd_increment("synapse.watcher.stop", ['stop_avenue:exception', 'stop_location:main_loop', "watcher_name:#{w.name}", "exception_name:#{e.class.name}", "exception_message:#{e.message}"])
|
102
|
+
raise e
|
103
|
+
end
|
104
|
+
end
|
105
|
+
statsd_increment('synapse.stop', ['stop_avenue:clean', 'stop_location:main_loop'])
|
73
106
|
end
|
74
107
|
|
75
108
|
def reconfigure!
|
@@ -91,11 +124,18 @@ module Synapse
|
|
91
124
|
end
|
92
125
|
|
93
126
|
private
|
127
|
+
|
128
|
+
WAIVED_CONFIG_SECTIONS = [
|
129
|
+
'services',
|
130
|
+
'service_conf_dir',
|
131
|
+
'statsd',
|
132
|
+
].freeze
|
133
|
+
|
94
134
|
def create_config_generators(opts={})
|
95
135
|
config_generators = []
|
96
136
|
opts.each do |type, generator_opts|
|
97
137
|
# Skip the "services" top level key
|
98
|
-
next if
|
138
|
+
next if WAIVED_CONFIG_SECTIONS.include? type
|
99
139
|
config_generators << ConfigGenerator.create(type, generator_opts)
|
100
140
|
end
|
101
141
|
|
@@ -31,7 +31,7 @@ class Synapse::ConfigGenerator
|
|
31
31
|
|
32
32
|
def update_config(watchers)
|
33
33
|
watchers.each do |watcher|
|
34
|
-
next if watcher
|
34
|
+
next if writer_disabled?(watcher)
|
35
35
|
|
36
36
|
unless @watcher_revisions[watcher.name] == watcher.revision
|
37
37
|
@watcher_revisions[watcher.name] = watcher.revision
|
@@ -68,12 +68,20 @@ class Synapse::ConfigGenerator
|
|
68
68
|
# Cleanup old services that Synapse no longer manages
|
69
69
|
FileUtils.cd(opts['output_directory']) do
|
70
70
|
present_files = Dir.glob('*.json')
|
71
|
-
managed_watchers = current_watchers.reject
|
71
|
+
managed_watchers = current_watchers.reject do |watcher|
|
72
|
+
writer_disabled?(watcher)
|
73
|
+
end
|
72
74
|
managed_files = managed_watchers.collect {|watcher| "#{watcher.name}.json"}
|
73
75
|
files_to_purge = present_files.select {|svc| not managed_files.include?(svc)}
|
74
76
|
log.info "synapse: purging unknown service files #{files_to_purge}" if files_to_purge.length > 0
|
75
77
|
FileUtils.rm(files_to_purge)
|
76
78
|
end
|
77
79
|
end
|
80
|
+
|
81
|
+
private
|
82
|
+
|
83
|
+
def writer_disabled?(watcher)
|
84
|
+
watcher.config_for_generator[name].nil? || watcher.config_for_generator[name]['disabled']
|
85
|
+
end
|
78
86
|
end
|
79
87
|
end
|
@@ -814,7 +814,6 @@ class Synapse::ConfigGenerator
|
|
814
814
|
@opts['do_writes'] = true unless @opts.key?('do_writes')
|
815
815
|
@opts['do_socket'] = true unless @opts.key?('do_socket')
|
816
816
|
@opts['do_reloads'] = true unless @opts.key?('do_reloads')
|
817
|
-
|
818
817
|
req_pairs = {
|
819
818
|
'do_writes' => 'config_file_path',
|
820
819
|
'do_socket' => 'socket_file_path',
|
@@ -1090,6 +1089,11 @@ class Synapse::ConfigGenerator
|
|
1090
1089
|
log.info "synapse: restart required because haproxy_server_options changed for #{backend_name}"
|
1091
1090
|
@restart_required = true
|
1092
1091
|
end
|
1092
|
+
|
1093
|
+
if(@opts['use_nerve_weights'] && old_backend.fetch('weight', "") != backend.fetch('weight', ""))
|
1094
|
+
log.info "synapse: restart required because weight changed for #{backend_name}"
|
1095
|
+
@restart_required = true
|
1096
|
+
end
|
1093
1097
|
end
|
1094
1098
|
|
1095
1099
|
backends[backend_name] = backend.merge('enabled' => true)
|
@@ -1099,8 +1103,8 @@ class Synapse::ConfigGenerator
|
|
1099
1103
|
# does define an id, then we will write that out below, so that must be what
|
1100
1104
|
# is in the id_map as well.
|
1101
1105
|
@server_id_map[watcher.name][backend_name] = backend['haproxy_server_id'].to_i if backend['haproxy_server_id']
|
1102
|
-
server_opts =
|
1103
|
-
@server_id_map[watcher.name][backend_name] = server_opts[server_opts.index('id') + 1].to_i if server_opts.include?('id')
|
1106
|
+
server_opts = backend['haproxy_server_options'].split(' ') if backend['haproxy_server_options'].is_a? String
|
1107
|
+
@server_id_map[watcher.name][backend_name] = server_opts[server_opts.index('id') + 1].to_i if server_opts && server_opts.include?('id')
|
1104
1108
|
@id_server_map[watcher.name][@server_id_map[watcher.name][backend_name]] = backend_name
|
1105
1109
|
end
|
1106
1110
|
|
@@ -1137,10 +1141,15 @@ class Synapse::ConfigGenerator
|
|
1137
1141
|
config.map {|c| "\t#{c}"},
|
1138
1142
|
keys.map {|backend_name|
|
1139
1143
|
backend = backends[backend_name]
|
1144
|
+
backend_template_vars = {
|
1145
|
+
:host => backend['host'],
|
1146
|
+
:port => backend['port'],
|
1147
|
+
:name => backend_name,
|
1148
|
+
}
|
1140
1149
|
b = "\tserver #{backend_name} #{backend['host']}:#{backend['port']}"
|
1141
1150
|
|
1142
1151
|
# Again, if the registry defines an id, we can't set it.
|
1143
|
-
has_id =
|
1152
|
+
has_id = backend['haproxy_server_options'].split(' ').include?('id') if backend['haproxy_server_options'].is_a? String
|
1144
1153
|
if (!has_id && @server_id_map[watcher.name][backend_name])
|
1145
1154
|
b = "#{b} id #{@server_id_map[watcher.name][backend_name]}"
|
1146
1155
|
end
|
@@ -1153,13 +1162,37 @@ class Synapse::ConfigGenerator
|
|
1153
1162
|
b = "#{b} cookie #{backend_name}"
|
1154
1163
|
end
|
1155
1164
|
end
|
1156
|
-
|
1157
|
-
|
1165
|
+
|
1166
|
+
if @opts['use_nerve_weights'] && backend['weight'] && (backend['weight'].is_a? Integer)
|
1167
|
+
clean_server_options = remove_weight_option watcher_config['server_options']
|
1168
|
+
clean_haproxy_server_options = remove_weight_option backend['haproxy_server_options']
|
1169
|
+
if clean_server_options != watcher_config['server_options']
|
1170
|
+
log.warn "synapse: weight is defined in both server_options and nerve. nerve weight will take precedence"
|
1171
|
+
end
|
1172
|
+
if clean_haproxy_server_options != backend['haproxy_server_options']
|
1173
|
+
log.warn "synapse: weight is defined in both haproxy_server_options and nerve. nerve weight will take precedence"
|
1174
|
+
end
|
1175
|
+
b = "#{b} #{clean_server_options % backend_template_vars}" if clean_server_options
|
1176
|
+
b = "#{b} #{clean_haproxy_server_options % backend_template_vars}" if clean_haproxy_server_options
|
1177
|
+
|
1178
|
+
weight = backend['weight'].to_i
|
1179
|
+
b = "#{b} weight #{weight}".squeeze(" ")
|
1180
|
+
else
|
1181
|
+
b = "#{b} #{watcher_config['server_options'] % backend_template_vars}" if watcher_config['server_options'].is_a? String
|
1182
|
+
b = "#{b} #{backend['haproxy_server_options'] % backend_template_vars}" if backend['haproxy_server_options'].is_a? String
|
1183
|
+
end
|
1158
1184
|
b = "#{b} disabled" unless backend['enabled']
|
1159
1185
|
b }
|
1160
1186
|
]
|
1161
1187
|
end
|
1162
1188
|
|
1189
|
+
def remove_weight_option(server_options)
|
1190
|
+
if server_options.is_a? String
|
1191
|
+
server_options = server_options.sub /weight +[0-9]+/,''
|
1192
|
+
end
|
1193
|
+
server_options
|
1194
|
+
end
|
1195
|
+
|
1163
1196
|
def find_next_id(watcher_name, backend_name)
|
1164
1197
|
probe = nil
|
1165
1198
|
if @server_id_map[watcher_name].size >= @max_server_id
|
@@ -1286,7 +1319,9 @@ class Synapse::ConfigGenerator
|
|
1286
1319
|
if old_config == new_config
|
1287
1320
|
return false
|
1288
1321
|
else
|
1289
|
-
|
1322
|
+
tmp_file_path = "#{opts['config_file_path']}.tmp"
|
1323
|
+
File.write(tmp_file_path, new_config)
|
1324
|
+
FileUtils.mv(tmp_file_path, opts['config_file_path'])
|
1290
1325
|
return true
|
1291
1326
|
end
|
1292
1327
|
end
|
@@ -59,10 +59,16 @@ class Synapse::ServiceWatcher
|
|
59
59
|
end
|
60
60
|
end
|
61
61
|
|
62
|
+
IP_REGEX = Regexp.union([Resolv::IPv4::Regex, Resolv::IPv6::Regex])
|
63
|
+
|
62
64
|
def resolve_servers
|
63
65
|
resolver.tap do |dns|
|
64
66
|
resolution = discovery_servers.map do |server|
|
65
|
-
|
67
|
+
if server['host'] =~ IP_REGEX
|
68
|
+
addresses = [server['host']]
|
69
|
+
else
|
70
|
+
addresses = dns.getaddresses(server['host']).map(&:to_s)
|
71
|
+
end
|
66
72
|
[server, addresses.sort]
|
67
73
|
end
|
68
74
|
|
@@ -30,12 +30,14 @@ class Synapse::ServiceWatcher
|
|
30
30
|
end
|
31
31
|
|
32
32
|
def start
|
33
|
-
|
33
|
+
zk_host_list = @discovery['hosts'].sort
|
34
|
+
@zk_cluster = host_list_to_cluster(zk_host_list)
|
35
|
+
@zk_hosts = zk_host_list.join(',')
|
34
36
|
|
35
37
|
@watcher = nil
|
36
38
|
@zk = nil
|
37
39
|
|
38
|
-
log.info "synapse: starting ZK watcher #{@name} @
|
40
|
+
log.info "synapse: starting ZK watcher #{@name} @ cluster #{@zk_cluster} path: #{@discovery['path']}"
|
39
41
|
zk_connect
|
40
42
|
end
|
41
43
|
|
@@ -47,11 +49,22 @@ class Synapse::ServiceWatcher
|
|
47
49
|
def ping?
|
48
50
|
# @zk being nil implies no session *or* a lost session, do not remove
|
49
51
|
# the check on @zk being truthy
|
50
|
-
|
52
|
+
# if the client is in any of the three states: associating, connecting, connected
|
53
|
+
# we consider it alive. this can avoid synapse restart on short network dis-connection
|
54
|
+
@zk && (@zk.associating? || @zk.connecting? || @zk.connected?)
|
51
55
|
end
|
52
56
|
|
53
57
|
private
|
54
58
|
|
59
|
+
def host_list_to_cluster(list)
|
60
|
+
first_host = list.sort.first
|
61
|
+
first_token = first_host.split('.').first
|
62
|
+
# extract cluster name by filtering name of first host
|
63
|
+
# remove domain extents and trailing numbers
|
64
|
+
last_non_number = first_token.rindex(/[^0-9]/)
|
65
|
+
last_non_number ? first_token[0..last_non_number] : first_host
|
66
|
+
end
|
67
|
+
|
55
68
|
def validate_discovery_opts
|
56
69
|
raise ArgumentError, "invalid discovery method #{@discovery['method']}" \
|
57
70
|
unless @discovery['method'] == 'zookeeper'
|
@@ -123,49 +136,78 @@ class Synapse::ServiceWatcher
|
|
123
136
|
|
124
137
|
# find the current backends at the discovery path
|
125
138
|
def discover
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
139
|
+
statsd_increment('synapse.watcher.zk.discovery', ["zk_cluster:#{@zk_cluster}", "zk_path:#{@discovery['path']}", "service_name:#{@name}"])
|
140
|
+
statsd_time('synapse.watcher.zk.discovery.elapsed_time', ["zk_cluster:#{@zk_cluster}", "zk_path:#{@discovery['path']}", "service_name:#{@name}"]) do
|
141
|
+
log.info "synapse: discovering backends for service #{@name}"
|
142
|
+
|
143
|
+
new_backends = []
|
144
|
+
@zk.children(@discovery['path'], :watch => true).each do |id|
|
145
|
+
begin
|
146
|
+
node = statsd_time('synapse.watcher.zk.get.elapsed_time', ["zk_cluster:#{@zk_cluster}", "service_name:#{@name}"]) do
|
147
|
+
@zk.get("#{@discovery['path']}/#{id}")
|
148
|
+
end
|
149
|
+
rescue ZK::Exceptions::NoNode => e
|
150
|
+
# This can happen when the registry unregisters a service node between
|
151
|
+
# the call to @zk.children and @zk.get(path). ZK does not guarantee
|
152
|
+
# a read to ``get`` of a child returned by ``children`` will succeed
|
153
|
+
log.error("synapse: #{@discovery['path']}/#{id} disappeared before it could be read: #{e}")
|
154
|
+
next
|
155
|
+
end
|
156
|
+
|
157
|
+
begin
|
158
|
+
# TODO: Do less munging, or refactor out this processing
|
159
|
+
host, port, name, weight, haproxy_server_options, labels = deserialize_service_instance(node.first)
|
160
|
+
rescue StandardError => e
|
161
|
+
log.error "synapse: invalid data in ZK node #{id} at #{@discovery['path']}: #{e}"
|
162
|
+
else
|
163
|
+
# find the numberic id in the node name; used for leader elections if enabled
|
164
|
+
numeric_id = id.split('_').last
|
165
|
+
numeric_id = NUMBERS_RE =~ numeric_id ? numeric_id.to_i : nil
|
166
|
+
|
167
|
+
log.debug "synapse: discovered backend #{name} at #{host}:#{port} for service #{@name}"
|
168
|
+
new_backends << {
|
169
|
+
'name' => name, 'host' => host, 'port' => port,
|
170
|
+
'id' => numeric_id, 'weight' => weight,
|
171
|
+
'haproxy_server_options' => haproxy_server_options,
|
172
|
+
'labels' => labels
|
173
|
+
}
|
174
|
+
end
|
138
175
|
end
|
139
176
|
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
177
|
+
# support for a separate 'generator_config_path' key, for reading the
|
178
|
+
# generator config block, that may be different from the 'path' key where
|
179
|
+
# we discover service instances. if generator_config_path is present and
|
180
|
+
# the value is "disabled", then skip all zk-based discovery of the
|
181
|
+
# generator config (and use the values from the local config.json
|
182
|
+
# instead).
|
183
|
+
case @discovery.fetch('generator_config_path', nil)
|
184
|
+
when 'disabled'
|
185
|
+
discovery_key = nil
|
186
|
+
when nil
|
187
|
+
discovery_key = 'path'
|
145
188
|
else
|
146
|
-
|
147
|
-
numeric_id = id.split('_').last
|
148
|
-
numeric_id = NUMBERS_RE =~ numeric_id ? numeric_id.to_i : nil
|
149
|
-
|
150
|
-
log.debug "synapse: discovered backend #{name} at #{host}:#{port} for service #{@name}"
|
151
|
-
new_backends << {
|
152
|
-
'name' => name, 'host' => host, 'port' => port,
|
153
|
-
'id' => numeric_id, 'weight' => weight,
|
154
|
-
'haproxy_server_options' => haproxy_server_options,
|
155
|
-
'labels' => labels
|
156
|
-
}
|
189
|
+
discovery_key = 'generator_config_path'
|
157
190
|
end
|
158
|
-
end
|
159
191
|
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
192
|
+
if discovery_key
|
193
|
+
begin
|
194
|
+
node = statsd_time('synapse.watcher.zk.get.elapsed_time', ["zk_cluster:#{@zk_cluster}", "service_name:#{@name}"]) do
|
195
|
+
@zk.get(@discovery[discovery_key], :watch => true)
|
196
|
+
end
|
197
|
+
new_config_for_generator = parse_service_config(node.first)
|
198
|
+
rescue ZK::Exceptions::NoNode => e
|
199
|
+
log.error "synapse: No ZK node for config data at #{@discovery[discovery_key]}: #{e}"
|
200
|
+
new_config_for_generator = {}
|
201
|
+
rescue StandardError => e
|
202
|
+
log.error "synapse: invalid config data in ZK node at #{@discovery[discovery_key]}: #{e}"
|
203
|
+
new_config_for_generator = {}
|
204
|
+
end
|
205
|
+
else
|
206
|
+
new_config_for_generator = {}
|
207
|
+
end
|
167
208
|
|
168
|
-
|
209
|
+
set_backends(new_backends, new_config_for_generator)
|
210
|
+
end
|
169
211
|
end
|
170
212
|
|
171
213
|
# sets up zookeeper callbacks if the data at the discovery path changes
|
@@ -173,12 +215,14 @@ class Synapse::ServiceWatcher
|
|
173
215
|
return if @zk.nil?
|
174
216
|
log.debug "synapse: setting watch at #{@discovery['path']}"
|
175
217
|
|
176
|
-
|
218
|
+
statsd_time('synapse.watcher.zk.watch.elapsed_time', ["zk_cluster:#{@zk_cluster}", "zk_path:#{@discovery['path']}", "service_name:#{@name}"]) do
|
219
|
+
@watcher = @zk.register(@discovery['path'], &watcher_callback) unless @watcher
|
177
220
|
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
221
|
+
# Verify that we actually set up the watcher.
|
222
|
+
unless @zk.exists?(@discovery['path'], :watch => true)
|
223
|
+
log.error "synapse: zookeeper watcher path #{@discovery['path']} does not exist!"
|
224
|
+
zk_cleanup
|
225
|
+
end
|
182
226
|
end
|
183
227
|
log.debug "synapse: set watch at #{@discovery['path']}"
|
184
228
|
end
|
@@ -221,37 +265,44 @@ class Synapse::ServiceWatcher
|
|
221
265
|
end
|
222
266
|
|
223
267
|
def zk_connect
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
268
|
+
statsd_time('synapse.watcher.zk.connect.elapsed_time', ["zk_cluster:#{@zk_cluster}", "service_name:#{@name}"]) do
|
269
|
+
log.info "synapse: zookeeper watcher connecting to ZK at #{@zk_hosts}"
|
270
|
+
|
271
|
+
# Ensure that all Zookeeper watcher re-use a single zookeeper
|
272
|
+
# connection to any given set of zk hosts.
|
273
|
+
@@zk_pool_lock.synchronize {
|
274
|
+
unless @@zk_pool.has_key?(@zk_hosts)
|
275
|
+
log.info "synapse: creating pooled connection to #{@zk_hosts}"
|
276
|
+
@@zk_pool[@zk_hosts] = ZK.new(@zk_hosts, :timeout => 5, :thread => :per_callback)
|
277
|
+
@@zk_pool_count[@zk_hosts] = 1
|
278
|
+
log.info "synapse: successfully created zk connection to #{@zk_hosts}"
|
279
|
+
statsd_increment('synapse.watcher.zk.client.created', ["zk_cluster:#{@zk_cluster}", "service_name:#{@name}"])
|
280
|
+
else
|
281
|
+
@@zk_pool_count[@zk_hosts] += 1
|
282
|
+
log.info "synapse: re-using existing zookeeper connection to #{@zk_hosts}"
|
283
|
+
statsd_increment('synapse.watcher.zk.client.reused', ["zk_cluster:#{@zk_cluster}", "service_name:#{@name}"])
|
284
|
+
end
|
285
|
+
}
|
239
286
|
|
240
|
-
|
241
|
-
|
287
|
+
@zk = @@zk_pool[@zk_hosts]
|
288
|
+
log.info "synapse: retrieved zk connection to #{@zk_hosts}"
|
242
289
|
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
290
|
+
# handle session expiry -- by cleaning up zk, this will make `ping?`
|
291
|
+
# fail and so synapse will exit
|
292
|
+
@zk.on_expired_session do
|
293
|
+
statsd_increment('synapse.watcher.zk.session.expired', ["zk_cluster:#{@zk_cluster}", "service_name:#{@name}"])
|
294
|
+
log.warn "synapse: zookeeper watcher ZK session expired!"
|
295
|
+
zk_cleanup
|
296
|
+
end
|
249
297
|
|
250
|
-
|
251
|
-
|
298
|
+
# the path must exist, otherwise watch callbacks will not work
|
299
|
+
statsd_time('synapse.watcher.zk.create_path.elapsed_time', ["zk_cluster:#{@zk_cluster}", "service_name:#{@name}"]) do
|
300
|
+
create(@discovery['path'])
|
301
|
+
end
|
252
302
|
|
253
|
-
|
254
|
-
|
303
|
+
# call the callback to bootstrap the process
|
304
|
+
watcher_callback.call
|
305
|
+
end
|
255
306
|
end
|
256
307
|
|
257
308
|
# decode the data at a zookeeper endpoint
|