synapse-aurora 0.11.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. data/.gitignore +23 -0
  2. data/.mailmap +3 -0
  3. data/.nix/Gemfile.nix +141 -0
  4. data/.nix/rubylibs.nix +42 -0
  5. data/.rspec +2 -0
  6. data/.travis.yml +5 -0
  7. data/Gemfile +4 -0
  8. data/LICENSE.txt +22 -0
  9. data/Makefile +6 -0
  10. data/README.md +339 -0
  11. data/Rakefile +8 -0
  12. data/bin/synapse +62 -0
  13. data/config/hostheader_test.json +71 -0
  14. data/config/svcdir_test.json +46 -0
  15. data/config/synapse.conf.json +90 -0
  16. data/config/synapse_services/service1.json +24 -0
  17. data/config/synapse_services/service2.json +24 -0
  18. data/default.nix +66 -0
  19. data/lib/synapse.rb +85 -0
  20. data/lib/synapse/base.rb +5 -0
  21. data/lib/synapse/haproxy.rb +797 -0
  22. data/lib/synapse/log.rb +24 -0
  23. data/lib/synapse/service_watcher.rb +36 -0
  24. data/lib/synapse/service_watcher/base.rb +109 -0
  25. data/lib/synapse/service_watcher/dns.rb +109 -0
  26. data/lib/synapse/service_watcher/docker.rb +120 -0
  27. data/lib/synapse/service_watcher/ec2tag.rb +133 -0
  28. data/lib/synapse/service_watcher/zookeeper.rb +153 -0
  29. data/lib/synapse/service_watcher/zookeeper_aurora.rb +76 -0
  30. data/lib/synapse/service_watcher/zookeeper_dns.rb +232 -0
  31. data/lib/synapse/version.rb +3 -0
  32. data/spec/lib/synapse/haproxy_spec.rb +32 -0
  33. data/spec/lib/synapse/service_watcher_base_spec.rb +55 -0
  34. data/spec/lib/synapse/service_watcher_docker_spec.rb +152 -0
  35. data/spec/lib/synapse/service_watcher_ec2tags_spec.rb +220 -0
  36. data/spec/spec_helper.rb +22 -0
  37. data/spec/support/configuration.rb +9 -0
  38. data/spec/support/minimum.conf.yaml +27 -0
  39. data/synapse.gemspec +33 -0
  40. metadata +227 -0
data/.gitignore ADDED
@@ -0,0 +1,23 @@
1
+ *.gem
2
+ *.rbc
3
+ .bundle
4
+ .config
5
+ .yardoc
6
+ Gemfile.lock
7
+ InstalledFiles
8
+ _yardoc
9
+ coverage
10
+ doc/
11
+ lib/bundler/man
12
+ pkg
13
+ rdoc
14
+ spec/reports
15
+ test/tmp
16
+ test/version_tmp
17
+ tmp
18
+ *~
19
+ .vagrant
20
+ .*sw?
21
+ vendor/
22
+
23
+ synapse.jar
data/.mailmap ADDED
@@ -0,0 +1,3 @@
1
+ <igor.serebryany@airbedandbreakfast.com> <igor47@moomers.org>
2
+ <martin.rhoads@airbnb.com> <ermal14@gmail.com>
3
+ Pierre Carrier <pierre@gcarrier.fr>
data/.nix/Gemfile.nix ADDED
@@ -0,0 +1,141 @@
1
+ # WARNING: automatically generated file
2
+ # Generated by 'gem nix' command that comes from 'nix' gem
3
+ g: # Get dependencies from patched gems
4
+ {
5
+ aliases = {
6
+ archive_tar_minitar = g.archive_tar_minitar_0_5_2;
7
+ docker_api = g.docker_api_1_7_6;
8
+ excon = g.excon_0_42_1;
9
+ json = g.json_1_8_1;
10
+ little_plugger = g.little_plugger_1_1_3;
11
+ logging = g.logging_1_8_2;
12
+ multi_json = g.multi_json_1_10_1;
13
+ synapse = g.synapse_0_11_1;
14
+ zk = g.zk_1_9_4;
15
+ zookeeper = g.zookeeper_1_4_9;
16
+ };
17
+ gem_nix_args = [ ''synapse'' ];
18
+ gems = {
19
+ archive_tar_minitar_0_5_2 = {
20
+ basename = ''archive_tar_minitar'';
21
+ meta = {
22
+ description = ''Provides POSIX tarchive management from Ruby programs.'';
23
+ homepage = ''http://rubyforge.org/projects/ruwiki/'';
24
+ longDescription = ''Archive::Tar::Minitar is a pure-Ruby library and command-line utility that provides the ability to deal with POSIX tar(1) archive files. The implementation is based heavily on Mauricio Ferna'ndez's implementation in rpa-base, but has been reorganised to promote reuse in other projects.'';
25
+ };
26
+ name = ''archive-tar-minitar-0.5.2'';
27
+ requiredGems = [ ];
28
+ sha256 = ''1j666713r3cc3wb0042x0wcmq2v11vwwy5pcaayy5f0lnd26iqig'';
29
+ };
30
+ docker_api_1_7_6 = {
31
+ basename = ''docker_api'';
32
+ meta = {
33
+ description = ''A simple REST client for the Docker Remote API'';
34
+ homepage = ''https://github.com/swipely/docker-api'';
35
+ longDescription = ''A simple REST client for the Docker Remote API'';
36
+ };
37
+ name = ''docker-api-1.7.6'';
38
+ requiredGems = [ g.excon_0_42_1 g.json_1_8_1 g.archive_tar_minitar_0_5_2 ];
39
+ sha256 = ''1ari4f2rk9w5j5mci7wlqiabqispd2pr9m6qwbqq1ryrlqvksr28'';
40
+ };
41
+ excon_0_42_1 = {
42
+ basename = ''excon'';
43
+ meta = {
44
+ description = ''speed, persistence, http(s)'';
45
+ homepage = ''https://github.com/excon/excon'';
46
+ longDescription = ''EXtended http(s) CONnections'';
47
+ };
48
+ name = ''excon-0.42.1'';
49
+ requiredGems = [ ];
50
+ sha256 = ''1za1jmp83149qmykih2bfgxlwyyz3hrpaq4kxww7467fvgwh58xj'';
51
+ };
52
+ json_1_8_1 = {
53
+ basename = ''json'';
54
+ meta = {
55
+ description = ''JSON Implementation for Ruby'';
56
+ homepage = ''http://flori.github.com/json'';
57
+ longDescription = ''This is a JSON implementation as a Ruby extension in C.'';
58
+ };
59
+ name = ''json-1.8.1'';
60
+ requiredGems = [ ];
61
+ sha256 = ''0002bsycvizvkmk1jyv8px1hskk6wrjfk4f7x5byi8gxm6zzn6wn'';
62
+ };
63
+ little_plugger_1_1_3 = {
64
+ basename = ''little_plugger'';
65
+ meta = {
66
+ description = ''LittlePlugger is a module that provides Gem based plugin management.'';
67
+ homepage = ''http://gemcutter.org/gems/little-plugger'';
68
+ longDescription = ''LittlePlugger is a module that provides Gem based plugin management.
69
+ By extending your own class or module with LittlePlugger you can easily
70
+ manage the loading and initializing of plugins provided by other gems.'';
71
+ };
72
+ name = ''little-plugger-1.1.3'';
73
+ requiredGems = [ ];
74
+ sha256 = ''0crxv0yl5iwmqzj2y7hh9s7qbwr7s7305vgdbsanbzq059ca98yp'';
75
+ };
76
+ logging_1_8_2 = {
77
+ basename = ''logging'';
78
+ meta = {
79
+ description = ''A flexible and extendable logging library for Ruby'';
80
+ homepage = ''http://rubygems.org/gems/logging'';
81
+ longDescription = ''Logging is a flexible logging library for use in Ruby programs based on the
82
+ design of Java's log4j library. It features a hierarchical logging system,
83
+ custom level names, multiple output destinations per log event, custom
84
+ formatting, and more.'';
85
+ };
86
+ name = ''logging-1.8.2'';
87
+ requiredGems = [ g.little_plugger_1_1_3 g.multi_json_1_10_1 ];
88
+ sha256 = ''0vcckpk3sffhz9phpzkbbqzzcffsg2n292rmq5b4gx6dp9g4n86p'';
89
+ };
90
+ multi_json_1_10_1 = {
91
+ basename = ''multi_json'';
92
+ meta = {
93
+ description = ''A common interface to multiple JSON libraries.'';
94
+ homepage = ''http://github.com/intridea/multi_json'';
95
+ longDescription = ''A common interface to multiple JSON libraries, including Oj, Yajl, the JSON gem (with C-extensions), the pure-Ruby JSON gem, NSJSONSerialization, gson.rb, JrJackson, and OkJson.'';
96
+ };
97
+ name = ''multi_json-1.10.1'';
98
+ requiredGems = [ ];
99
+ sha256 = ''1ll21dz01jjiplr846n1c8yzb45kj5hcixgb72rz0zg8fyc9g61c'';
100
+ };
101
+ synapse_0_11_1 = {
102
+ basename = ''synapse'';
103
+ meta = {
104
+ description = '': Write a gem summary'';
105
+ longDescription = '': Write a gem description'';
106
+ };
107
+ name = ''synapse-0.11.1'';
108
+ requiredGems = [ g.zk_1_9_4 g.docker_api_1_7_6 ];
109
+ sha256 = ''121ls0ypbz7i24acrldq8dzp8z6a4brl4vxngy51asgjwpalrx52'';
110
+ };
111
+ zk_1_9_4 = {
112
+ basename = ''zk'';
113
+ meta = {
114
+ description = ''A high-level wrapper around the zookeeper driver'';
115
+ homepage = ''https://github.com/slyphon/zk'';
116
+ longDescription = ''A high-level wrapper around the zookeeper driver
117
+ '';
118
+ };
119
+ name = ''zk-1.9.4'';
120
+ requiredGems = [ g.zookeeper_1_4_9 g.logging_1_8_2 ];
121
+ sha256 = ''1rgghyhnbqp7lcn0vik3rn93msv51igsh5cwag88rp6hhnvd141j'';
122
+ };
123
+ zookeeper_1_4_9 = {
124
+ basename = ''zookeeper'';
125
+ meta = {
126
+ description = ''Apache ZooKeeper driver for Rubies'';
127
+ homepage = ''https://github.com/slyphon/zookeeper'';
128
+ longDescription = ''A low-level multi-Ruby wrapper around the ZooKeeper API bindings. For a
129
+ friendlier interface, see http://github.com/slyphon/zk. Currently supported:
130
+ MRI: {1.8.7, 1.9.2, 1.9.3}, JRuby: ~&gt; 1.6.7, Rubinius: 2.0.testing, REE 1.8.7.
131
+
132
+ This library uses version 3.4.5 of zookeeper bindings.
133
+
134
+ '';
135
+ };
136
+ name = ''zookeeper-1.4.9'';
137
+ requiredGems = [ ];
138
+ sha256 = ''1zjb8sri15nqyqv1w9v34dv2d7q1lf3phr126mcfrj8v6vawhzcc'';
139
+ };
140
+ };
141
+ }
data/.nix/rubylibs.nix ADDED
@@ -0,0 +1,42 @@
1
+ { stdenv, config, fetchurl, callPackage
2
+ , gemfile ? ./Gemfile.nix
3
+ , patches ? null
4
+ , overrides ? null
5
+ }:
6
+
7
+ let
8
+ inherit (stdenv.lib) fold optional;
9
+ gemsMergeableFun = { generatedFuns ? [], patchFuns ? [], overrideFuns ? [] }:
10
+ let
11
+ generatedAttrs = map (f: f customGems) generatedFuns;
12
+ generatedGems = map (a: a.gems) generatedAttrs;
13
+ gem = callPackage <nixpkgs/pkgs/development/interpreters/ruby/gem.nix> {
14
+ patches = map (f: callPackage f { inherit gems; }) patchFuns;
15
+ overrides = map (f: callPackage f { }) overrideFuns;
16
+ };
17
+ customGems = stdenv.lib.mapAttrs gem (fold (x: y: x // y) { } generatedGems);
18
+ gems = fold (x: y: x // y) customGems (map (a: a.aliases) generatedAttrs);
19
+ in
20
+ gems // {
21
+ merge = { generated ? null, patches ? null, overrides ? null }:
22
+ gemsMergeableFun {
23
+ generatedFuns = generatedFuns ++ optional (generated != null) generated;
24
+ patchFuns = patchFuns ++ optional (patches != null) patches;
25
+ overrideFuns = overrideFuns ++ optional (overrides != null) overrides;
26
+ };
27
+ };
28
+ in
29
+ ((gemsMergeableFun { }).merge {
30
+ generated = import gemfile;
31
+ inherit patches;
32
+ inherit overrides;
33
+ }).merge (
34
+ let
35
+ getLocalGemFun = name: stdenv.lib.attrByPath [ "gems" name ] null config;
36
+ in
37
+ {
38
+ generated = getLocalGemFun "generated";
39
+ patches = getLocalGemFun "patches";
40
+ overrides = getLocalGemFun "overrides";
41
+ }
42
+ )
data/.rspec ADDED
@@ -0,0 +1,2 @@
1
+ --color
2
+ --format progress
data/.travis.yml ADDED
@@ -0,0 +1,5 @@
1
+ language: ruby
2
+ cache: bundler
3
+ rvm:
4
+ - 1.9.3
5
+
data/Gemfile ADDED
@@ -0,0 +1,4 @@
1
+ source 'https://rubygems.org'
2
+
3
+ # Specify your gem's dependencies in synapse.gemspec
4
+ gemspec
data/LICENSE.txt ADDED
@@ -0,0 +1,22 @@
1
+ Copyright (c) 2013 Airbnb, Inc.
2
+
3
+ MIT License
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining
6
+ a copy of this software and associated documentation files (the
7
+ "Software"), to deal in the Software without restriction, including
8
+ without limitation the rights to use, copy, modify, merge, publish,
9
+ distribute, sublicense, and/or sell copies of the Software, and to
10
+ permit persons to whom the Software is furnished to do so, subject to
11
+ the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be
14
+ included in all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/Makefile ADDED
@@ -0,0 +1,6 @@
1
+ build: synapse.jar
2
+
3
+ synapse.jar:
4
+ jruby -S warble jar
5
+
6
+ .PHONY: build push
data/README.md ADDED
@@ -0,0 +1,339 @@
1
+ [![Build Status](https://travis-ci.org/airbnb/synapse.png?branch=master)](https://travis-ci.org/airbnb/synapse)
2
+ [![Inline docs](http://inch-ci.org/github/airbnb/synapse.png)](http://inch-ci.org/github/airbnb/synapse)
3
+
4
+ # Synapse #
5
+
6
+ Synapse is Airbnb's new system for service discovery.
7
+ Synapse solves the problem of automated fail-over in the cloud, where failover via network re-configuration is impossible.
8
+ The end result is the ability to connect internal services together in a scalable, fault-tolerant way.
9
+
10
+ ## Motivation ##
11
+
12
+ Synapse emerged from the need to maintain high-availability applications in the cloud.
13
+ Traditional high-availability techniques, which involve using a CRM like [pacemaker](http://linux-ha.org/wiki/Pacemaker), do not work in environments where the end-user has no control over the networking.
14
+ In an environment like Amazon's EC2, all of the available workarounds are suboptimal:
15
+
16
+ * Round-robin DNS: Slow to converge, and doesn't work when applications cache DNS lookups (which is frequent)
17
+ * Elastic IPs: slow to converge, limited in number, public-facing-only, which makes them less useful for internal services
18
+ * ELB: Again, public-facing only, and only useful for HTTP
19
+
20
+ One solution to this problem is a discovery service, like [Apache Zookeeper](http://zookeeper.apache.org/).
21
+ However, Zookeeper and similar services have their own problems:
22
+
23
+ * Service discovery is embedded in all of your apps; often, integration is not simple
24
+ * The discovery layer itself is subject to failure
25
+ * Requires additional servers/instances
26
+
27
+ Synapse solves these difficulties in a simple and fault-tolerant way.
28
+
29
+ ## How Synapse Works ##
30
+
31
+ Synapse runs on your application servers; here at Airbnb, we just run it on every box we deploy.
32
+ The heart of synapse is actually [HAProxy](http://haproxy.1wt.eu/), a stable and proven routing component.
33
+ For every external service that your application talks to, we assign a synapse local port on localhost.
34
+ Synapse creates a proxy from the local port to the service, and you reconfigure your application to talk to the proxy.
35
+
36
+ Synapse comes with a number of `watchers`, which are responsible for service discovery.
37
+ The synapse watchers take care of re-configuring the proxy so that it always points at available servers.
38
+ We've included a number of default watchers, including ones that query zookeeper and ones using the AWS API.
39
+ It is easy to write your own watchers for your use case, and we encourage submitting them back to the project.
40
+
41
+ ## Example Migration ##
42
+
43
+ Let's suppose your rails application depends on a Postgres database instance.
44
+ The database.yaml file has the DB host and port hardcoded:
45
+
46
+ ```yaml
47
+ production:
48
+ database: mydb
49
+ host: mydb.example.com
50
+ port: 5432
51
+ ```
52
+
53
+ You would like to be able to fail over to a different database in case the original dies.
54
+ Let's suppose your instance is running in AWS and you're using the tag 'proddb' set to 'true' to indicate the prod DB.
55
+ You set up synapse to proxy the DB connection on `localhost:3219` in the `synapse.conf.yaml` file.
56
+ Add a hash under `services` that looks like this:
57
+
58
+ ```yaml
59
+ ---
60
+ services:
61
+ proddb:
62
+ default_servers:
63
+ -
64
+ name: "default-db"
65
+ host: "mydb.example.com"
66
+ port: 5432
67
+ discovery:
68
+ method: "awstag"
69
+ tag_name: "proddb"
70
+ tag_value: "true"
71
+ haproxy:
72
+ port: 3219
73
+ server_options: "check inter 2000 rise 3 fall 2"
74
+ frontend: mode tcp
75
+ backend: mode tcp
76
+ ```
77
+
78
+ And then change your database.yaml file to look like this:
79
+
80
+ ```yaml
81
+ production:
82
+ database: mydb
83
+ host: localhost
84
+ port: 3219
85
+ ```
86
+
87
+ Start up synapse.
88
+ It will configure HAProxy with a proxy from `localhost:3219` to your DB.
89
+ It will attempt to find the DB using the AWS API; if that does not work, it will default to the DB given in `default_servers`.
90
+ In the worst case, if AWS API is down and you need to change which DB your application talks to, simply edit the `synapse.conf.json` file, update the `default_servers` and restart synapse.
91
+ HAProxy will be transparently reloaded, and your application will keep running without a hiccup.
92
+
93
+ ## Installation
94
+
95
+ Add this line to your application's Gemfile:
96
+
97
+ gem 'synapse'
98
+
99
+ And then execute:
100
+
101
+ $ bundle
102
+
103
+ Or install it yourself as:
104
+
105
+ $ gem install synapse
106
+
107
+
108
+ Don't forget to install HAProxy prior to installing Synapse.
109
+
110
+ ## Configuration ##
111
+
112
+ Synapse depends on a single config file in JSON format; it's usually called `synapse.conf.json`.
113
+ The file has two main sections.
114
+ The first is the `services` section, which lists the services you'd like to connect.
115
+ The second is the `haproxy` section, which specifies how to configure and interact with HAProxy.
116
+
117
+ ### Configuring a Service ###
118
+
119
+ The services are a hash, where the keys are the `name` of the service to be configured.
120
+ The name is just a human-readable string; it will be used in logs and notifications.
121
+ Each value in the services hash is also a hash, and should contain the following keys:
122
+
123
+ * `discovery`: how synapse will discover hosts providing this service (see below)
124
+ * `default_servers`: the list of default servers providing this service; synapse uses these if no others can be discovered
125
+ * `haproxy`: how will the haproxy section for this service be configured
126
+
127
+ #### Service Discovery ####
128
+
129
+ We've included a number of `watchers` which provide service discovery.
130
+ Put these into the `discovery` section of the service hash, with these options:
131
+
132
+ ##### Stub #####
133
+
134
+ The stub watcher is useful in situations where you only want to use the servers in the `default_servers` list.
135
+ It has only one option:
136
+
137
+ * `method`: stub
138
+
139
+ ##### Zookeeper #####
140
+
141
+ This watcher retrieves a list of servers from zookeeper.
142
+ It takes the following options:
143
+
144
+ * `method`: zookeeper
145
+ * `path`: the zookeeper path where ephemeral nodes will be created for each available service server
146
+ * `hosts`: the list of zookeeper servers to query
147
+
148
+ The watcher assumes that each node under `path` represents a service server.
149
+ Synapse attempts to decode the data in each of these nodes using JSON and also using Thrift under the standard Twitter service encoding.
150
+ We assume that the data contains a hostname and a port for service servers.
151
+
152
+ ##### Docker #####
153
+
154
+ This watcher retrieves a list of [docker](http://www.docker.io/) containers via docker's [HTTP API](http://docs.docker.io/en/latest/reference/api/docker_remote_api/).
155
+ It takes the following options:
156
+
157
+ * `method`: docker
158
+ * `servers`: a list of servers running docker as a daemon. Format is `{"name":"...", "host": "..."[, port: 4243]}`
159
+ * `image_name`: find containers running this image
160
+ * `container_port`: find containers forwarding this port
161
+ * `check_interval`: how often to poll the docker API on each server. Default is 15s.
162
+
163
+ ##### AWS EC2 tags #####
164
+
165
+ This watcher retrieves a list of Amazon EC2 instances that have a tag
166
+ with particular value using the AWS API.
167
+ It takes the following options:
168
+
169
+ * `method`: ec2tag
170
+ * `tag_name`: the name of the tag to inspect. As per the AWS docs,
171
+ this is case-sensitive.
172
+ * `tag_value`: the value to match on. Case-sensitive.
173
+
174
+ Additionally, you MUST supply `server_port_override` in the `haproxy`
175
+ section of the configuration as this watcher does not know which port
176
+ the backend service is listening on.
177
+
178
+ The following options are optional, provided the well-known `AWS_`
179
+ environment variables shown are set. If supplied, these options will
180
+ be used in preference to the `AWS_` environment variables.
181
+
182
+ * `aws_access_key_id`: AWS key or set `AWS_ACCESS_KEY_ID` in the environment.
183
+ * `aws_secret_access_key`: AWS secret key or set `AWS_SECRET_ACCESS_KEY` in the environment.
184
+ * `aws_region`: AWS region (i.e. `us-east-1`) or set `AWS_REGION` in the environment.
185
+
186
+ #### Listing Default Servers ####
187
+
188
+ You may list a number of default servers providing a service.
189
+ Each hash in that section has the following options:
190
+
191
+ * `name`: a human-readable name for the default server; must be unique
192
+ * `host`: the host or IP address of the server
193
+ * `port`: the port where the service runs on the `host`
194
+
195
+ The `default_servers` list is used only when service discovery returns no servers.
196
+ In that case, the service proxy will be created with the servers listed here.
197
+ If you do not list any default servers, no proxy will be created. The
198
+ `default_servers` will also be used in addition to discovered servers if the
199
+ `keep_default_servers` option is set.
200
+
201
+ #### The `haproxy` Section ####
202
+
203
+ This section is its own hash, which should contain the following keys:
204
+
205
+ * `port`: the port (on localhost) where HAProxy will listen for connections to the service. If this is omitted, only a backend stanza (and no frontend stanza) will be generated for this service; you'll need to get traffic to your service yourself via the `shared_frontend` or manual frontends in `extra_sections`
206
+ * `server_port_override`: the port that discovered servers listen on; you should specify this if your discovery mechanism only discovers names or addresses (like the DNS watcher). If the discovery method discovers a port along with hostnames (like the zookeeper watcher) this option may be left out, but will be used in preference if given.
207
+ * `server_options`: the haproxy options for each `server` line of the service in HAProxy config; it may be left out.
208
+ * `frontend`: additional lines passed to the HAProxy config in the `frontend` stanza of this service
209
+ * `backend`: additional lines passed to the HAProxy config in the `backend` stanza of this service
210
+ * `listen`: these lines will be parsed and placed in the correct `frontend`/`backend` section as applicable; you can put lines which are the same for the frontend and backend here.
211
+ * `shared_frontend`: optional: haproxy configuration directives for a shared http frontend (see below)
212
+
213
+ ### Configuring HAProxy ###
214
+
215
+ The `haproxy` section of the config file has the following options:
216
+
217
+ * `reload_command`: the command Synapse will run to reload HAProxy
218
+ * `config_file_path`: where Synapse will write the HAProxy config file
219
+ * `do_writes`: whether or not the config file will be written (default to `true`)
220
+ * `do_reloads`: whether or not Synapse will reload HAProxy (default to `true`)
221
+ * `global`: options listed here will be written into the `global` section of the HAProxy config
222
+ * `defaults`: options listed here will be written into the `defaults` section of the HAProxy config
223
+ * `extra_sections`: additional, manually-configured `frontend`, `backend`, or `listen` stanzas
224
+ * `bind_address`: force HAProxy to listen on this address (default is localhost)
225
+ * `shared_fronted`: (OPTIONAL) additional lines passed to the HAProxy config used to configure a shared HTTP frontend (see below)
226
+
227
+ Note that a non-default `bind_address` can be dangerous.
228
+ If you configure an `address:port` combination that is already in use on the system, haproxy will fail to start.
229
+
230
+ ### HAProxy shared HTTP Frontend ###
231
+
232
+ For HTTP-only services, it is not always necessary or desirable to dedicate a TCP port per service, since HAProxy can route traffic based on host headers.
233
+ To support this, the optional `shared_fronted` section can be added to both the `haproxy` section and each indvidual service definition.
234
+ Synapse will concatenate them all into a single frontend section in the generated haproxy.cfg file.
235
+ Note that synapse does not assemble the routing ACLs for you; you have to do that yourself based on your needs.
236
+ This is probably most useful in combination with the `service_conf_dir` directive in a case where the individual service config files are being distributed by a configuration manager such as puppet or chef, or bundled into service packages.
237
+ For example:
238
+
239
+ ```yaml
240
+ haproxy:
241
+ shared_frontend: "bind 127.0.0.1:8081"
242
+ reload_command: "service haproxy reload"
243
+ config_file_path: "/etc/haproxy/haproxy.cfg"
244
+ socket_file_path: "/var/run/haproxy.sock"
245
+ global:
246
+ - "daemon"
247
+ - "user haproxy"
248
+ - "group haproxy"
249
+ - "maxconn 4096"
250
+ - "log 127.0.0.1 local2 notice"
251
+ - "stats socket /var/run/haproxy.sock"
252
+ defaults:
253
+ - "log global"
254
+ - "balance roundrobin"
255
+ services:
256
+ service1:
257
+ discovery:
258
+ method: "zookeeper"
259
+ path: "/nerve/services/service1"
260
+ hosts: "0.zookeeper.example.com:2181"
261
+ haproxy:
262
+ server_options: "check inter 2s rise 3 fall 2"
263
+ shared_frontend:
264
+ - "acl is_service1 hdr_dom(host) -i service1.lb.example.com"
265
+ - "use_backend service1 if is_service1"
266
+ backend: "mode http"
267
+
268
+ service2:
269
+ discovery:
270
+ method: "zookeeper"
271
+ path: "/nerve/services/service2"
272
+ hosts: "0.zookeeper.example.com:2181"
273
+
274
+ haproxy:
275
+ server_options: "check inter 2s rise 3 fall 2"
276
+ shared_frontend:
277
+ - "acl is_service1 hdr_dom(host) -i service2.lb.example.com"
278
+ - "use_backend service2 if is_service2
279
+ backend: "mode http"
280
+
281
+ ```
282
+
283
+ This would produce an haproxy.cfg much like the following:
284
+
285
+ ```
286
+ backend service1
287
+ mode http
288
+ server server1.example.net:80 server1.example.net:80 check inter 2s rise 3 fall 2
289
+
290
+ backend service2
291
+ mode http
292
+ server server2.example.net:80 server2.example.net:80 check inter 2s rise 3 fall 2
293
+
294
+ frontend shared-frontend
295
+ bind 127.0.0.1:8081
296
+ acl is_service1 hdr_dom(host) -i service1.lb
297
+ use_backend service1 if is_service1
298
+ acl is_service2 hdr_dom(host) -i service2.lb
299
+ use_backend service2 if is_service2
300
+ ```
301
+
302
+ Non-HTTP backends such as MySQL or RabbitMQ will obviously continue to need their own dedicated ports.
303
+
304
+ ## Contributing
305
+
306
+ 1. Fork it
307
+ 2. Create your feature branch (`git checkout -b my-new-feature`)
308
+ 3. Commit your changes (`git commit -am 'Add some feature'`)
309
+ 4. Push to the branch (`git push origin my-new-feature`)
310
+ 5. Create new Pull Request
311
+
312
+ ### Creating a Service Watcher ###
313
+
314
+ If you'd like to create a new service watcher:
315
+
316
+ 1. Create a file for your watcher in `service_watcher` dir
317
+ 2. Use the following template:
318
+ ```ruby
319
+ require 'synapse/service_watcher/base'
320
+
321
+ module Synapse
322
+ class NewWatcher < BaseWatcher
323
+ def start
324
+ # write code which begins running service discovery
325
+ end
326
+
327
+ private
328
+ def validate_discovery_opts
329
+ # here, validate any required options in @discovery
330
+ end
331
+ end
332
+ end
333
+ ```
334
+
335
+ 3. Implement the `start` and `validate_discovery_opts` methods
336
+ 4. Implement whatever additional methods your discovery requires
337
+
338
+ When your watcher detects a list of new backends, they should be written to `@backends`.
339
+ You should then call `@synapse.configure` to force synapse to update the HAProxy config.