synapse 0.14.6 → 0.14.7
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +13 -5
- data/README.md +19 -1
- data/lib/synapse/config_generator/haproxy.rb +74 -2
- data/lib/synapse/service_watcher/zookeeper_dns.rb +8 -1
- data/lib/synapse/version.rb +1 -1
- data/spec/lib/synapse/haproxy_spec.rb +65 -14
- data/spec/support/minimum.conf.yaml +0 -2
- metadata +31 -31
checksums.yaml
CHANGED
@@ -1,7 +1,15 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
|
2
|
+
!binary "U0hBMQ==":
|
3
|
+
metadata.gz: !binary |-
|
4
|
+
OTM5MmQ3MDE5NDg5MzU4MjVhZjEyNTNhOGM3NzhlYmQ1ZmIyM2JlNg==
|
5
|
+
data.tar.gz: !binary |-
|
6
|
+
ZTRlOTFjZjUxZTNjYTgzMmQxYzI3NTg5ZWY4NGU2ZGZmZmQ2OTJhYw==
|
5
7
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
|
8
|
+
metadata.gz: !binary |-
|
9
|
+
OGQyNjNmNjFmMWFkM2RhZjMzNzk3N2JmNmE5ZjM2ZTdjMmMzNjg5MmFiZDQ3
|
10
|
+
NDU0ZmQyMmU2NWM3Mzc1NmRjZjZlZTM4NjVkMTQxYjAxZGUzYTBkMzY5OThm
|
11
|
+
ZDNhMDM0NGY0ZjVjNjUyY2QzYzIwMzA1ZjNhNzk1ODg4N2I4NTg=
|
12
|
+
data.tar.gz: !binary |-
|
13
|
+
NjFhY2UyODI3NDA2MjU1NzIxYWUyOGJmODdhNDFhOWRhNjcxZDI2OTkwMDg3
|
14
|
+
YzJjMTQ2ZmMzZTNjZjI0YmM2ZjE2ZDVjNmNmMTkwYWM4YTA0MTQxMGJmOGFm
|
15
|
+
OWU4YjAxZDFmNjc4MGIyYzFkMzY4MTg4OTg1MWQyNWY3MmM1ZDY=
|
data/README.md
CHANGED
@@ -342,7 +342,12 @@ listen on 127.0.0.3:443) allows /etc/hosts entries to point to services.
|
|
342
342
|
* `backend_name`: The name of the generated HAProxy backend for this service
|
343
343
|
(defaults to the service's key in the `services` section)
|
344
344
|
* `listen`: these lines will be parsed and placed in the correct `frontend`/`backend` section as applicable; you can put lines which are the same for the frontend and backend here.
|
345
|
-
* `backend_order`: optional: how backends should be ordered in the `backend` stanza. (default is shuffling).
|
345
|
+
* `backend_order`: optional: how backends should be ordered in the `backend` stanza. (default is shuffling).
|
346
|
+
Setting to `asc` means sorting backends in ascending alphabetical order before generating stanza.
|
347
|
+
`desc` means descending alphabetical order.
|
348
|
+
`no_shuffle` means no shuffling or sorting.
|
349
|
+
If you shuffle consider setting `server_order_seed` at the top level so that your backend
|
350
|
+
ordering is deterministic across HAProxy reloads.
|
346
351
|
* `shared_frontend`: optional: haproxy configuration directives for a shared http frontend (see below)
|
347
352
|
* `cookie_value_method`: optional: default value is `name`, it defines the way your backends receive a cookie value in http mode. If equal to `hash`, synapse hashes backend names on cookie value assignation of your discovered backends, useful when you want to use haproxy cookie feature but you do not want that your end users receive a Set-Cookie with your server name and ip readable in clear.
|
348
353
|
|
@@ -376,6 +381,19 @@ The top level `haproxy` section of the config file has the following options:
|
|
376
381
|
* `state_file_ttl`: the number of seconds that backends should be kept in the
|
377
382
|
state file cache. This only applies if `state_file_path` is provided.
|
378
383
|
(default: 86400)
|
384
|
+
* `server_order_seed`: A number to seed random actions with so that all orders are
|
385
|
+
deterministic. You can use this so that backend ordering is deterministic
|
386
|
+
but still shuffled, for example by setting this to the hash of your machine's
|
387
|
+
IP address you guarantee that HAProxy on different machines have different
|
388
|
+
orders, but within that machine you always choose the same order.
|
389
|
+
(default: ``rand(2000)``)
|
390
|
+
* `max_server_id`: Synapse will try to ensure that server lines are written out
|
391
|
+
with HAProxy "id"s that are unique and associated 1:1 with a service backend
|
392
|
+
(host + port + name). To ensure these are unique Synapse internally counts
|
393
|
+
up from 1 until `max_server_id`, so you can have no more than this number
|
394
|
+
of servers in a backend. If the default (65k) is not enough, make this higher
|
395
|
+
but be wary that HAProxy internally uses an int to store this id, so ...
|
396
|
+
your mileage may vary trying to make this higher. (default: 65535)
|
379
397
|
|
380
398
|
Note that a non-default `bind_address` can be dangerous.
|
381
399
|
If you configure an `address:port` combination that is already in use on the system, haproxy will fail to start.
|
@@ -797,6 +797,9 @@ class Synapse::ConfigGenerator
|
|
797
797
|
DEFAULT_STATE_FILE_TTL = (60 * 60 * 24).freeze # 24 hours
|
798
798
|
STATE_FILE_UPDATE_INTERVAL = 60.freeze # iterations; not a unit of time
|
799
799
|
DEFAULT_BIND_ADDRESS = 'localhost'
|
800
|
+
# It's unclear how many servers HAProxy can have in one backend, but 65k
|
801
|
+
# should be enough for anyone right (famous last words)?
|
802
|
+
MAX_SERVER_ID = (2**16 - 1).freeze
|
800
803
|
|
801
804
|
def initialize(opts)
|
802
805
|
super(opts)
|
@@ -844,6 +847,17 @@ class Synapse::ConfigGenerator
|
|
844
847
|
|
845
848
|
@state_file_path = @opts['state_file_path']
|
846
849
|
@state_file_ttl = @opts.fetch('state_file_ttl', DEFAULT_STATE_FILE_TTL).to_i
|
850
|
+
|
851
|
+
# For giving consistent orders, even if they are random
|
852
|
+
@server_order_seed = @opts.fetch('server_order_seed', rand(2000)).to_i
|
853
|
+
@max_server_id = @opts.fetch('max_server_id', MAX_SERVER_ID).to_i
|
854
|
+
# Map of backend names -> hash of HAProxy server names -> puids
|
855
|
+
# (server->id aka "name") to their proxy unique id (server->puid aka "id")
|
856
|
+
@server_id_map = Hash.new{|h,k| h[k] = {}}
|
857
|
+
# Map of backend names -> hash of HAProxy server puids -> names
|
858
|
+
# (server->puid aka "id") to their name (server->id aka "name")
|
859
|
+
@id_server_map = Hash.new{|h,k| h[k] = {}}
|
860
|
+
|
847
861
|
end
|
848
862
|
|
849
863
|
def normalize_watcher_provided_config(service_watcher_name, service_watcher_config)
|
@@ -1039,6 +1053,11 @@ class Synapse::ConfigGenerator
|
|
1039
1053
|
# disabled state...
|
1040
1054
|
seen.fetch(watcher.name, []).each do |backend_name, backend|
|
1041
1055
|
backends[backend_name] = backend.merge('enabled' => false)
|
1056
|
+
# We remember the haproxy_server_id from a previous reload here.
|
1057
|
+
# Note though that if live servers below define haproxy_server_id
|
1058
|
+
# that overrides the remembered value
|
1059
|
+
@server_id_map[watcher.name][backend_name] ||= backends[backend_name]['haproxy_server_id']
|
1060
|
+
@id_server_map[watcher.name][@server_id_map[watcher.name][backend_name]] = backend_name if @server_id_map[watcher.name][backend_name]
|
1042
1061
|
end
|
1043
1062
|
|
1044
1063
|
# ... and then we overwite any backends that the watchers know about,
|
@@ -1055,8 +1074,30 @@ class Synapse::ConfigGenerator
|
|
1055
1074
|
@restart_required = true
|
1056
1075
|
end
|
1057
1076
|
end
|
1077
|
+
|
1058
1078
|
backends[backend_name] = backend.merge('enabled' => true)
|
1079
|
+
|
1080
|
+
# If the the registry defines the haproxy_server_id that must be preferred.
|
1081
|
+
# Note that the order here is important, because if haproxy_server_options
|
1082
|
+
# does define an id, then we will write that out below, so that must be what
|
1083
|
+
# is in the id_map as well.
|
1084
|
+
@server_id_map[watcher.name][backend_name] = backend['haproxy_server_id'].to_i if backend['haproxy_server_id']
|
1085
|
+
server_opts = (backend['haproxy_server_options'] || 'no match').split(' ')
|
1086
|
+
@server_id_map[watcher.name][backend_name] = server_opts[server_opts.index('id') + 1].to_i if server_opts.include?('id')
|
1087
|
+
@id_server_map[watcher.name][@server_id_map[watcher.name][backend_name]] = backend_name
|
1088
|
+
end
|
1089
|
+
|
1090
|
+
# Now that we know the maximum possible existing haproxy_server_id for
|
1091
|
+
# this backend, we can set any that don't exist yet.
|
1092
|
+
watcher.backends.each do |backend|
|
1093
|
+
backend_name = construct_name(backend)
|
1094
|
+
@server_id_map[watcher.name][backend_name] ||= find_next_id(watcher.name, backend_name)
|
1095
|
+
@id_server_map[watcher.name][@server_id_map[watcher.name][backend_name]] = backend_name
|
1059
1096
|
end
|
1097
|
+
# Remove any servers that don't exist anymore from the server_id_map
|
1098
|
+
# to control memory growth
|
1099
|
+
@server_id_map[watcher.name].keep_if { |server_name| backends.has_key?(server_name) }
|
1100
|
+
@id_server_map[watcher.name].keep_if { |_, server_name| @server_id_map[watcher.name][server_name] }
|
1060
1101
|
|
1061
1102
|
if watcher.backends.empty?
|
1062
1103
|
log.debug "synapse: no backends found for watcher #{watcher.name}"
|
@@ -1071,7 +1112,7 @@ class Synapse::ConfigGenerator
|
|
1071
1112
|
when 'no_shuffle'
|
1072
1113
|
backends.keys
|
1073
1114
|
else
|
1074
|
-
backends.keys.shuffle
|
1115
|
+
backends.keys.shuffle(random: Random.new(@server_order_seed))
|
1075
1116
|
end
|
1076
1117
|
|
1077
1118
|
stanza = [
|
@@ -1080,6 +1121,13 @@ class Synapse::ConfigGenerator
|
|
1080
1121
|
keys.map {|backend_name|
|
1081
1122
|
backend = backends[backend_name]
|
1082
1123
|
b = "\tserver #{backend_name} #{backend['host']}:#{backend['port']}"
|
1124
|
+
|
1125
|
+
# Again, if the registry defines an id, we can't set it.
|
1126
|
+
has_id = (backend['haproxy_server_options'] || 'no match').split(' ').include?('id')
|
1127
|
+
if (!has_id && @server_id_map[watcher.name][backend_name])
|
1128
|
+
b = "#{b} id #{@server_id_map[watcher.name][backend_name]}"
|
1129
|
+
end
|
1130
|
+
|
1083
1131
|
unless config.include?('mode tcp')
|
1084
1132
|
b = case watcher_config['cookie_value_method']
|
1085
1133
|
when 'hash'
|
@@ -1095,6 +1143,22 @@ class Synapse::ConfigGenerator
|
|
1095
1143
|
]
|
1096
1144
|
end
|
1097
1145
|
|
1146
|
+
def find_next_id(watcher_name, backend_name)
|
1147
|
+
probe = nil
|
1148
|
+
if @server_id_map[watcher_name].size >= @max_server_id
|
1149
|
+
log.error "synapse: ran out of server ids for #{watcher_name}, if you need more increase the max_server_id option"
|
1150
|
+
return probe
|
1151
|
+
end
|
1152
|
+
|
1153
|
+
probe = 1
|
1154
|
+
|
1155
|
+
while @id_server_map[watcher_name].include?(probe)
|
1156
|
+
probe = (probe % @max_server_id) + 1
|
1157
|
+
end
|
1158
|
+
|
1159
|
+
probe
|
1160
|
+
end
|
1161
|
+
|
1098
1162
|
def talk_to_socket(socket_file_path, command)
|
1099
1163
|
s = UNIXSocket.new(socket_file_path)
|
1100
1164
|
s.write(command)
|
@@ -1282,7 +1346,15 @@ class Synapse::ConfigGenerator
|
|
1282
1346
|
|
1283
1347
|
watcher.backends.each do |backend|
|
1284
1348
|
backend_name = construct_name(backend)
|
1285
|
-
|
1349
|
+
data = {
|
1350
|
+
'timestamp' => timestamp,
|
1351
|
+
}
|
1352
|
+
server_id = @server_id_map[watcher.name][backend_name].to_i
|
1353
|
+
if server_id && server_id > 0 && server_id <= MAX_SERVER_ID
|
1354
|
+
data['haproxy_server_id'] = server_id
|
1355
|
+
end
|
1356
|
+
|
1357
|
+
seen[watcher.name][backend_name] = data.merge(backend)
|
1286
1358
|
end
|
1287
1359
|
end
|
1288
1360
|
|
@@ -51,8 +51,9 @@ class Synapse::ServiceWatcher
|
|
51
51
|
# Overrides the discovery_servers method on the parent class
|
52
52
|
attr_accessor :discovery_servers
|
53
53
|
|
54
|
-
def initialize(opts={}, synapse, message_queue)
|
54
|
+
def initialize(opts={}, parent=nil, synapse, message_queue)
|
55
55
|
@message_queue = message_queue
|
56
|
+
@parent = parent
|
56
57
|
|
57
58
|
super(opts, synapse)
|
58
59
|
end
|
@@ -94,6 +95,11 @@ class Synapse::ServiceWatcher
|
|
94
95
|
unless last_resolution == current_resolution
|
95
96
|
last_resolution = current_resolution
|
96
97
|
configure_backends(last_resolution)
|
98
|
+
|
99
|
+
# Propagate revision updates down to ZookeeperDnsWatcher, so
|
100
|
+
# that stanza cache can work properly.
|
101
|
+
@revision += 1
|
102
|
+
@parent.reconfigure! unless @parent.nil?
|
97
103
|
end
|
98
104
|
end
|
99
105
|
end
|
@@ -147,6 +153,7 @@ class Synapse::ServiceWatcher
|
|
147
153
|
|
148
154
|
@dns = Dns.new(
|
149
155
|
mk_child_watcher_opts(dns_discovery_opts),
|
156
|
+
self,
|
150
157
|
@synapse,
|
151
158
|
@message_queue
|
152
159
|
)
|
data/lib/synapse/version.rb
CHANGED
@@ -6,6 +6,10 @@ class MockWatcher; end;
|
|
6
6
|
describe Synapse::ConfigGenerator::Haproxy do
|
7
7
|
subject { Synapse::ConfigGenerator::Haproxy.new(config['haproxy']) }
|
8
8
|
|
9
|
+
let(:maxid) do
|
10
|
+
Synapse::ConfigGenerator::Haproxy::MAX_SERVER_ID
|
11
|
+
end
|
12
|
+
|
9
13
|
let(:mockwatcher) do
|
10
14
|
mockWatcher = double(Synapse::ServiceWatcher)
|
11
15
|
allow(mockWatcher).to receive(:name).and_return('example_service')
|
@@ -21,7 +25,7 @@ describe Synapse::ConfigGenerator::Haproxy do
|
|
21
25
|
let(:mockwatcher_with_server_options) do
|
22
26
|
mockWatcher = double(Synapse::ServiceWatcher)
|
23
27
|
allow(mockWatcher).to receive(:name).and_return('example_service2')
|
24
|
-
backends = [{ 'host' => 'somehost', 'port' => 5555, 'haproxy_server_options' => 'backup'}]
|
28
|
+
backends = [{ 'host' => 'somehost', 'port' => 5555, 'haproxy_server_options' => 'id 12 backup'}]
|
25
29
|
allow(mockWatcher).to receive(:backends).and_return(backends)
|
26
30
|
allow(mockWatcher).to receive(:config_for_generator).and_return({
|
27
31
|
'haproxy' => {'server_options' => "check inter 2000 rise 3 fall 2"}
|
@@ -29,6 +33,24 @@ describe Synapse::ConfigGenerator::Haproxy do
|
|
29
33
|
mockWatcher
|
30
34
|
end
|
31
35
|
|
36
|
+
let(:mockwatcher_with_server_id) do
|
37
|
+
mockWatcher = double(Synapse::ServiceWatcher)
|
38
|
+
allow(mockWatcher).to receive(:name).and_return('server_id_svc')
|
39
|
+
backends = [
|
40
|
+
{'host' => 'host1', 'port' => 5555, 'haproxy_server_id' => 1},
|
41
|
+
{'host' => 'host2', 'port' => 5555},
|
42
|
+
{'host' => 'host3', 'port' => 5555, 'haproxy_server_options' => "id #{maxid}"},
|
43
|
+
]
|
44
|
+
allow(mockWatcher).to receive(:backends).and_return(backends)
|
45
|
+
allow(mockWatcher).to receive(:config_for_generator).and_return({
|
46
|
+
'haproxy' => {
|
47
|
+
'server_options' => "check inter 2000 rise 3 fall 2",
|
48
|
+
'backend_order' => 'asc',
|
49
|
+
},
|
50
|
+
})
|
51
|
+
mockWatcher
|
52
|
+
end
|
53
|
+
|
32
54
|
let(:mockwatcher_with_cookie_value_method_hash) do
|
33
55
|
mockWatcher = double(Synapse::ServiceWatcher)
|
34
56
|
allow(mockWatcher).to receive(:name).and_return('example_service3')
|
@@ -367,7 +389,7 @@ describe Synapse::ConfigGenerator::Haproxy do
|
|
367
389
|
|
368
390
|
it 'generates backend stanza' do
|
369
391
|
mockConfig = []
|
370
|
-
expect(subject.generate_backend_stanza(mockwatcher, mockConfig)).to eql(["\nbackend example_service", [], ["\tserver somehost:5555 somehost:5555 cookie somehost:5555 check inter 2000 rise 3 fall 2"]])
|
392
|
+
expect(subject.generate_backend_stanza(mockwatcher, mockConfig)).to eql(["\nbackend example_service", [], ["\tserver somehost:5555 somehost:5555 id 1 cookie somehost:5555 check inter 2000 rise 3 fall 2"]])
|
371
393
|
end
|
372
394
|
|
373
395
|
describe 'generate backend stanza in correct order' do
|
@@ -376,25 +398,25 @@ describe Synapse::ConfigGenerator::Haproxy do
|
|
376
398
|
'asc' => [
|
377
399
|
"\nbackend example_service",
|
378
400
|
[],
|
379
|
-
["\tserver somehost1_10.11.11.11:5555 10.11.11.11:5555 cookie somehost1_10.11.11.11:5555 check inter 2000 rise 3 fall 2",
|
380
|
-
"\tserver somehost2_10.10.10.10:5555 10.10.10.10:5555 cookie somehost2_10.10.10.10:5555 check inter 2000 rise 3 fall 2",
|
381
|
-
"\tserver somehost3_10.22.22.22:5555 10.22.22.22:5555 cookie somehost3_10.22.22.22:5555 check inter 2000 rise 3 fall 2"
|
401
|
+
["\tserver somehost1_10.11.11.11:5555 10.11.11.11:5555 id 1 cookie somehost1_10.11.11.11:5555 check inter 2000 rise 3 fall 2",
|
402
|
+
"\tserver somehost2_10.10.10.10:5555 10.10.10.10:5555 id 3 cookie somehost2_10.10.10.10:5555 check inter 2000 rise 3 fall 2",
|
403
|
+
"\tserver somehost3_10.22.22.22:5555 10.22.22.22:5555 id 2 cookie somehost3_10.22.22.22:5555 check inter 2000 rise 3 fall 2"
|
382
404
|
]
|
383
405
|
],
|
384
406
|
'desc' => [
|
385
407
|
"\nbackend example_service",
|
386
408
|
[],
|
387
|
-
["\tserver somehost3_10.22.22.22:5555 10.22.22.22:5555 cookie somehost3_10.22.22.22:5555 check inter 2000 rise 3 fall 2",
|
388
|
-
"\tserver somehost2_10.10.10.10:5555 10.10.10.10:5555 cookie somehost2_10.10.10.10:5555 check inter 2000 rise 3 fall 2",
|
389
|
-
"\tserver somehost1_10.11.11.11:5555 10.11.11.11:5555 cookie somehost1_10.11.11.11:5555 check inter 2000 rise 3 fall 2"
|
409
|
+
["\tserver somehost3_10.22.22.22:5555 10.22.22.22:5555 id 2 cookie somehost3_10.22.22.22:5555 check inter 2000 rise 3 fall 2",
|
410
|
+
"\tserver somehost2_10.10.10.10:5555 10.10.10.10:5555 id 3 cookie somehost2_10.10.10.10:5555 check inter 2000 rise 3 fall 2",
|
411
|
+
"\tserver somehost1_10.11.11.11:5555 10.11.11.11:5555 id 1 cookie somehost1_10.11.11.11:5555 check inter 2000 rise 3 fall 2"
|
390
412
|
]
|
391
413
|
],
|
392
414
|
'no_shuffle' => [
|
393
415
|
"\nbackend example_service",
|
394
416
|
[],
|
395
|
-
["\tserver somehost1_10.11.11.11:5555 10.11.11.11:5555 cookie somehost1_10.11.11.11:5555 check inter 2000 rise 3 fall 2",
|
396
|
-
"\tserver somehost3_10.22.22.22:5555 10.22.22.22:5555 cookie somehost3_10.22.22.22:5555 check inter 2000 rise 3 fall 2",
|
397
|
-
"\tserver somehost2_10.10.10.10:5555 10.10.10.10:5555 cookie somehost2_10.10.10.10:5555 check inter 2000 rise 3 fall 2"
|
417
|
+
["\tserver somehost1_10.11.11.11:5555 10.11.11.11:5555 id 1 cookie somehost1_10.11.11.11:5555 check inter 2000 rise 3 fall 2",
|
418
|
+
"\tserver somehost3_10.22.22.22:5555 10.22.22.22:5555 id 2 cookie somehost3_10.22.22.22:5555 check inter 2000 rise 3 fall 2",
|
419
|
+
"\tserver somehost2_10.10.10.10:5555 10.10.10.10:5555 id 3 cookie somehost2_10.10.10.10:5555 check inter 2000 rise 3 fall 2"
|
398
420
|
]
|
399
421
|
]
|
400
422
|
}
|
@@ -424,21 +446,50 @@ describe Synapse::ConfigGenerator::Haproxy do
|
|
424
446
|
end
|
425
447
|
end
|
426
448
|
end
|
449
|
+
|
450
|
+
context "when shuffle is specified for backend_order" do
|
451
|
+
it 'generates backend stanza in reproducible order' do
|
452
|
+
mockConfig = []
|
453
|
+
allow(mockwatcher_with_multiple_backends).to receive(:config_for_generator).and_return({
|
454
|
+
'haproxy' => {
|
455
|
+
'server_options' => "check inter 2000 rise 3 fall 2",
|
456
|
+
'backend_order' => 'shuffle',
|
457
|
+
'server_order_seed' => 1234,
|
458
|
+
}
|
459
|
+
})
|
460
|
+
runs = (1..5).collect { |_| subject.generate_backend_stanza(mockwatcher_with_multiple_backends, mockConfig) }
|
461
|
+
expect(runs.length).to eq(5)
|
462
|
+
expect(runs.uniq.length).to eq(1)
|
463
|
+
end
|
464
|
+
end
|
427
465
|
end
|
428
466
|
|
429
467
|
it 'hashes backend name as cookie value' do
|
430
468
|
mockConfig = []
|
431
|
-
expect(subject.generate_backend_stanza(mockwatcher_with_cookie_value_method_hash, mockConfig)).to eql(["\nbackend example_service3", [], ["\tserver somehost:5555 somehost:5555 cookie 9e736eef2f5a1d441e34ade3d2a8eb1e3abb1c92 check inter 2000 rise 3 fall 2"]])
|
469
|
+
expect(subject.generate_backend_stanza(mockwatcher_with_cookie_value_method_hash, mockConfig)).to eql(["\nbackend example_service3", [], ["\tserver somehost:5555 somehost:5555 id 1 cookie 9e736eef2f5a1d441e34ade3d2a8eb1e3abb1c92 check inter 2000 rise 3 fall 2"]])
|
432
470
|
end
|
433
471
|
|
434
472
|
it 'generates backend stanza without cookies for tcp mode' do
|
435
473
|
mockConfig = ['mode tcp']
|
436
|
-
expect(subject.generate_backend_stanza(mockwatcher, mockConfig)).to eql(["\nbackend example_service", ["\tmode tcp"], ["\tserver somehost:5555 somehost:5555 check inter 2000 rise 3 fall 2"]])
|
474
|
+
expect(subject.generate_backend_stanza(mockwatcher, mockConfig)).to eql(["\nbackend example_service", ["\tmode tcp"], ["\tserver somehost:5555 somehost:5555 id 1 check inter 2000 rise 3 fall 2"]])
|
437
475
|
end
|
438
476
|
|
439
477
|
it 'respects haproxy_server_options' do
|
440
478
|
mockConfig = []
|
441
|
-
expect(subject.generate_backend_stanza(mockwatcher_with_server_options, mockConfig)).to eql(["\nbackend example_service2", [], ["\tserver somehost:5555 somehost:5555 cookie somehost:5555 check inter 2000 rise 3 fall 2 backup"]])
|
479
|
+
expect(subject.generate_backend_stanza(mockwatcher_with_server_options, mockConfig)).to eql(["\nbackend example_service2", [], ["\tserver somehost:5555 somehost:5555 cookie somehost:5555 check inter 2000 rise 3 fall 2 id 12 backup"]])
|
480
|
+
end
|
481
|
+
|
482
|
+
it 'respects haproxy_server_id' do
|
483
|
+
mockConfig = []
|
484
|
+
expect(subject.generate_backend_stanza(mockwatcher_with_server_id, mockConfig)).to eql(
|
485
|
+
["\nbackend server_id_svc", [],
|
486
|
+
[
|
487
|
+
"\tserver host1:5555 host1:5555 id 1 cookie host1:5555 check inter 2000 rise 3 fall 2",
|
488
|
+
"\tserver host2:5555 host2:5555 id 2 cookie host2:5555 check inter 2000 rise 3 fall 2",
|
489
|
+
"\tserver host3:5555 host3:5555 cookie host3:5555 check inter 2000 rise 3 fall 2 id #{maxid}",
|
490
|
+
]
|
491
|
+
]
|
492
|
+
)
|
442
493
|
end
|
443
494
|
|
444
495
|
it 'generates frontend stanza ' do
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: synapse
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.14.
|
4
|
+
version: 0.14.7
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Martin Rhoads
|
@@ -10,160 +10,160 @@ authors:
|
|
10
10
|
autorequire:
|
11
11
|
bindir: bin
|
12
12
|
cert_chain: []
|
13
|
-
date: 2017-
|
13
|
+
date: 2017-08-10 00:00:00.000000000 Z
|
14
14
|
dependencies:
|
15
15
|
- !ruby/object:Gem::Dependency
|
16
16
|
name: aws-sdk
|
17
17
|
requirement: !ruby/object:Gem::Requirement
|
18
18
|
requirements:
|
19
|
-
- -
|
19
|
+
- - ~>
|
20
20
|
- !ruby/object:Gem::Version
|
21
21
|
version: '1.39'
|
22
22
|
type: :runtime
|
23
23
|
prerelease: false
|
24
24
|
version_requirements: !ruby/object:Gem::Requirement
|
25
25
|
requirements:
|
26
|
-
- -
|
26
|
+
- - ~>
|
27
27
|
- !ruby/object:Gem::Version
|
28
28
|
version: '1.39'
|
29
29
|
- !ruby/object:Gem::Dependency
|
30
30
|
name: docker-api
|
31
31
|
requirement: !ruby/object:Gem::Requirement
|
32
32
|
requirements:
|
33
|
-
- -
|
33
|
+
- - ~>
|
34
34
|
- !ruby/object:Gem::Version
|
35
35
|
version: '1.7'
|
36
36
|
type: :runtime
|
37
37
|
prerelease: false
|
38
38
|
version_requirements: !ruby/object:Gem::Requirement
|
39
39
|
requirements:
|
40
|
-
- -
|
40
|
+
- - ~>
|
41
41
|
- !ruby/object:Gem::Version
|
42
42
|
version: '1.7'
|
43
43
|
- !ruby/object:Gem::Dependency
|
44
44
|
name: zk
|
45
45
|
requirement: !ruby/object:Gem::Requirement
|
46
46
|
requirements:
|
47
|
-
- -
|
47
|
+
- - ~>
|
48
48
|
- !ruby/object:Gem::Version
|
49
49
|
version: 1.9.4
|
50
50
|
type: :runtime
|
51
51
|
prerelease: false
|
52
52
|
version_requirements: !ruby/object:Gem::Requirement
|
53
53
|
requirements:
|
54
|
-
- -
|
54
|
+
- - ~>
|
55
55
|
- !ruby/object:Gem::Version
|
56
56
|
version: 1.9.4
|
57
57
|
- !ruby/object:Gem::Dependency
|
58
58
|
name: logging
|
59
59
|
requirement: !ruby/object:Gem::Requirement
|
60
60
|
requirements:
|
61
|
-
- -
|
61
|
+
- - ~>
|
62
62
|
- !ruby/object:Gem::Version
|
63
63
|
version: '1.8'
|
64
64
|
type: :runtime
|
65
65
|
prerelease: false
|
66
66
|
version_requirements: !ruby/object:Gem::Requirement
|
67
67
|
requirements:
|
68
|
-
- -
|
68
|
+
- - ~>
|
69
69
|
- !ruby/object:Gem::Version
|
70
70
|
version: '1.8'
|
71
71
|
- !ruby/object:Gem::Dependency
|
72
72
|
name: rake
|
73
73
|
requirement: !ruby/object:Gem::Requirement
|
74
74
|
requirements:
|
75
|
-
- -
|
75
|
+
- - ! '>='
|
76
76
|
- !ruby/object:Gem::Version
|
77
77
|
version: '0'
|
78
78
|
type: :development
|
79
79
|
prerelease: false
|
80
80
|
version_requirements: !ruby/object:Gem::Requirement
|
81
81
|
requirements:
|
82
|
-
- -
|
82
|
+
- - ! '>='
|
83
83
|
- !ruby/object:Gem::Version
|
84
84
|
version: '0'
|
85
85
|
- !ruby/object:Gem::Dependency
|
86
86
|
name: rspec
|
87
87
|
requirement: !ruby/object:Gem::Requirement
|
88
88
|
requirements:
|
89
|
-
- -
|
89
|
+
- - ~>
|
90
90
|
- !ruby/object:Gem::Version
|
91
91
|
version: 3.1.0
|
92
92
|
type: :development
|
93
93
|
prerelease: false
|
94
94
|
version_requirements: !ruby/object:Gem::Requirement
|
95
95
|
requirements:
|
96
|
-
- -
|
96
|
+
- - ~>
|
97
97
|
- !ruby/object:Gem::Version
|
98
98
|
version: 3.1.0
|
99
99
|
- !ruby/object:Gem::Dependency
|
100
100
|
name: factory_girl
|
101
101
|
requirement: !ruby/object:Gem::Requirement
|
102
102
|
requirements:
|
103
|
-
- -
|
103
|
+
- - ! '>='
|
104
104
|
- !ruby/object:Gem::Version
|
105
105
|
version: '0'
|
106
106
|
type: :development
|
107
107
|
prerelease: false
|
108
108
|
version_requirements: !ruby/object:Gem::Requirement
|
109
109
|
requirements:
|
110
|
-
- -
|
110
|
+
- - ! '>='
|
111
111
|
- !ruby/object:Gem::Version
|
112
112
|
version: '0'
|
113
113
|
- !ruby/object:Gem::Dependency
|
114
114
|
name: pry
|
115
115
|
requirement: !ruby/object:Gem::Requirement
|
116
116
|
requirements:
|
117
|
-
- -
|
117
|
+
- - ! '>='
|
118
118
|
- !ruby/object:Gem::Version
|
119
119
|
version: '0'
|
120
120
|
type: :development
|
121
121
|
prerelease: false
|
122
122
|
version_requirements: !ruby/object:Gem::Requirement
|
123
123
|
requirements:
|
124
|
-
- -
|
124
|
+
- - ! '>='
|
125
125
|
- !ruby/object:Gem::Version
|
126
126
|
version: '0'
|
127
127
|
- !ruby/object:Gem::Dependency
|
128
128
|
name: pry-nav
|
129
129
|
requirement: !ruby/object:Gem::Requirement
|
130
130
|
requirements:
|
131
|
-
- -
|
131
|
+
- - ! '>='
|
132
132
|
- !ruby/object:Gem::Version
|
133
133
|
version: '0'
|
134
134
|
type: :development
|
135
135
|
prerelease: false
|
136
136
|
version_requirements: !ruby/object:Gem::Requirement
|
137
137
|
requirements:
|
138
|
-
- -
|
138
|
+
- - ! '>='
|
139
139
|
- !ruby/object:Gem::Version
|
140
140
|
version: '0'
|
141
141
|
- !ruby/object:Gem::Dependency
|
142
142
|
name: webmock
|
143
143
|
requirement: !ruby/object:Gem::Requirement
|
144
144
|
requirements:
|
145
|
-
- -
|
145
|
+
- - ! '>='
|
146
146
|
- !ruby/object:Gem::Version
|
147
147
|
version: '0'
|
148
148
|
type: :development
|
149
149
|
prerelease: false
|
150
150
|
version_requirements: !ruby/object:Gem::Requirement
|
151
151
|
requirements:
|
152
|
-
- -
|
152
|
+
- - ! '>='
|
153
153
|
- !ruby/object:Gem::Version
|
154
154
|
version: '0'
|
155
155
|
- !ruby/object:Gem::Dependency
|
156
156
|
name: timecop
|
157
157
|
requirement: !ruby/object:Gem::Requirement
|
158
158
|
requirements:
|
159
|
-
- -
|
159
|
+
- - ! '>='
|
160
160
|
- !ruby/object:Gem::Version
|
161
161
|
version: '0'
|
162
162
|
type: :development
|
163
163
|
prerelease: false
|
164
164
|
version_requirements: !ruby/object:Gem::Requirement
|
165
165
|
requirements:
|
166
|
-
- -
|
166
|
+
- - ! '>='
|
167
167
|
- !ruby/object:Gem::Version
|
168
168
|
version: '0'
|
169
169
|
description: Synapse is a daemon used to dynamically configure and manage local instances
|
@@ -179,10 +179,10 @@ executables:
|
|
179
179
|
extensions: []
|
180
180
|
extra_rdoc_files: []
|
181
181
|
files:
|
182
|
-
-
|
183
|
-
-
|
184
|
-
-
|
185
|
-
-
|
182
|
+
- .gitignore
|
183
|
+
- .mailmap
|
184
|
+
- .rspec
|
185
|
+
- .travis.yml
|
186
186
|
- Gemfile
|
187
187
|
- Gemfile.lock
|
188
188
|
- LICENSE.txt
|
@@ -235,17 +235,17 @@ require_paths:
|
|
235
235
|
- lib
|
236
236
|
required_ruby_version: !ruby/object:Gem::Requirement
|
237
237
|
requirements:
|
238
|
-
- -
|
238
|
+
- - ! '>='
|
239
239
|
- !ruby/object:Gem::Version
|
240
240
|
version: '0'
|
241
241
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
242
242
|
requirements:
|
243
|
-
- -
|
243
|
+
- - ! '>='
|
244
244
|
- !ruby/object:Gem::Version
|
245
245
|
version: '0'
|
246
246
|
requirements: []
|
247
247
|
rubyforge_project:
|
248
|
-
rubygems_version: 2.
|
248
|
+
rubygems_version: 2.5.1
|
249
249
|
signing_key:
|
250
250
|
specification_version: 4
|
251
251
|
summary: Dynamic HAProxy configuration daemon
|