synapse 0.0.1 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,20 @@
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module org.jetbrains.idea.maven.project.MavenProjectsManager.isMavenModule="true" type="JAVA_MODULE" version="4">
3
+ <component name="NewModuleRootManager" inherit-compiler-output="false">
4
+ <output url="file://$MODULE_DIR$/target/classes" />
5
+ <output-test url="file://$MODULE_DIR$/target/test-classes" />
6
+ <content url="file://$MODULE_DIR$">
7
+ <sourceFolder url="file://$MODULE_DIR$/src/main/java" isTestSource="false" />
8
+ <sourceFolder url="file://$MODULE_DIR$/src/test/java" isTestSource="true" />
9
+ <excludeFolder url="file://$MODULE_DIR$/target" />
10
+ </content>
11
+ <orderEntry type="inheritedJdk" />
12
+ <orderEntry type="sourceFolder" forTests="false" />
13
+ <orderEntry type="library" name="Maven: org.apache.httpcomponents:httpclient:4.2.1" level="project" />
14
+ <orderEntry type="library" name="Maven: org.apache.httpcomponents:httpcore:4.2.1" level="project" />
15
+ <orderEntry type="library" name="Maven: commons-logging:commons-logging:1.1.1" level="project" />
16
+ <orderEntry type="library" name="Maven: commons-codec:commons-codec:1.6" level="project" />
17
+ <orderEntry type="library" name="Maven: com.google.guava:guava:13.0-final" level="project" />
18
+ </component>
19
+ </module>
20
+
data/client/pom.xml ADDED
@@ -0,0 +1,45 @@
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project xmlns="http://maven.apache.org/POM/4.0.0"
3
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
4
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
5
+ <modelVersion>4.0.0</modelVersion>
6
+
7
+ <groupId>benchmark-client</groupId>
8
+ <artifactId>benchmark-client</artifactId>
9
+ <version>1.0</version>
10
+
11
+ <dependencies>
12
+ <dependency>
13
+ <groupId>org.apache.httpcomponents</groupId>
14
+ <artifactId>httpclient</artifactId>
15
+ <version>4.2.1</version>
16
+ </dependency>
17
+ <dependency>
18
+ <groupId>com.google.guava</groupId>
19
+ <artifactId>guava</artifactId>
20
+ <version>13.0-final</version>
21
+ </dependency>
22
+ </dependencies>
23
+
24
+ <build>
25
+ <plugins>
26
+ <plugin>
27
+ <groupId>org.apache.maven.plugins</groupId>
28
+ <artifactId>maven-shade-plugin</artifactId>
29
+ <version>1.6</version>
30
+ <configuration>
31
+ <!-- put your configurations here -->
32
+ </configuration>
33
+ <executions>
34
+ <execution>
35
+ <phase>package</phase>
36
+ <goals>
37
+ <goal>shade</goal>
38
+ </goals>
39
+ </execution>
40
+ </executions>
41
+ </plugin>
42
+ </plugins>
43
+ </build>
44
+
45
+ </project>
@@ -0,0 +1,68 @@
1
+ import org.apache.http.HttpEntity;
2
+ import org.apache.http.HttpResponse;
3
+ import org.apache.http.client.methods.HttpPost;
4
+ import org.apache.http.impl.client.DefaultHttpClient;
5
+ import org.apache.http.impl.conn.PoolingClientConnectionManager;
6
+
7
+ import java.io.IOException;
8
+ import java.util.concurrent.Executors;
9
+ import java.util.concurrent.ScheduledExecutorService;
10
+ import java.util.concurrent.ScheduledFuture;
11
+ import java.util.concurrent.TimeUnit;
12
+
13
+ /**
14
+ * @author Tobi Knaup
15
+ */
16
+ public class ClientArsch {
17
+
18
+ public static final int NUM_THREADS = 10;
19
+ public static final int INTERVAL_MS = 25;
20
+
21
+ private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
22
+
23
+ public static void main(String[] args) {
24
+ new ClientArsch().run();
25
+ }
26
+
27
+ public void run() {
28
+
29
+ // final PoolingClientConnectionManager cm = new PoolingClientConnectionManager();
30
+ // cm.setMaxTotal(100);
31
+
32
+ for (int i = 0; i < NUM_THREADS; i++) {
33
+ scheduler.scheduleAtFixedRate(new PostThread(String.valueOf(i)), 0, INTERVAL_MS, TimeUnit.MILLISECONDS);
34
+ }
35
+
36
+ // final ScheduledFuture<?> clientHandle =
37
+ }
38
+
39
+ class PostThread implements Runnable {
40
+
41
+ DefaultHttpClient httpClient;
42
+ HttpPost httpPost;
43
+ String id;
44
+
45
+ public PostThread(String id) {
46
+ super();
47
+ this.id = id;
48
+ this.httpClient = new DefaultHttpClient();
49
+ this.httpPost = new HttpPost("http://localhost:8088/search/test");
50
+ }
51
+
52
+ public void run() {
53
+ try {
54
+ Long tic = System.nanoTime();
55
+ HttpResponse response = httpClient.execute(httpPost);
56
+ Long toc = System.nanoTime();
57
+
58
+ System.out.printf("%s\t%d\t%d\t%d\n", id, (toc / 1000L), (toc - tic) / 1000L, response.getStatusLine().getStatusCode());
59
+
60
+ HttpEntity entity = response.getEntity();
61
+ entity.getContent().close(); // release client
62
+ } catch (IOException e) {
63
+ e.printStackTrace();
64
+ }
65
+ }
66
+ }
67
+
68
+ }
@@ -0,0 +1,3 @@
1
+ Manifest-Version: 1.0
2
+ Main-Class: ClientArsch
3
+
@@ -0,0 +1,96 @@
1
+ {
2
+ "services": [
3
+ {
4
+ "name": "service1",
5
+ "local_port": 3213,
6
+ "server_options": "check inter 2s rise 3 fall 2",
7
+ "default_servers": [
8
+ {
9
+ "name": "default1",
10
+ "host": "localhost",
11
+ "port": 8423
12
+ }
13
+ ],
14
+ "discovery": {
15
+ "method": "zookeeper",
16
+ "path": "/airbnb/service/service1",
17
+ "hosts": [
18
+ "zk0.airbnb.com:2181",
19
+ "zk1.airbnb.com:2181"
20
+ ]
21
+ },
22
+ "listen": [
23
+ "mode http",
24
+ "option httpchk /health",
25
+ "http-check expect string OK"
26
+ ]
27
+ },
28
+ {
29
+ "name": "service2",
30
+ "local_port": 3214,
31
+ "server_options": "check inter 2s rise 3 fall 2",
32
+ "default_servers": [
33
+ {
34
+ "name": "default1",
35
+ "host": "localhost",
36
+ "port": 8422
37
+ }
38
+ ],
39
+ "discovery": {
40
+ "method": "zookeeper",
41
+ "path": "/airbnb/service/service2",
42
+ "hosts": [
43
+ "zk0.airbnb.com:2181",
44
+ "zk1.airbnb.com:2181"
45
+ ]
46
+ },
47
+ "listen": [
48
+ "mode http",
49
+ "option httpchk /health",
50
+ ]
51
+ }
52
+ ],
53
+ "haproxy": {
54
+ "reload_command": "sudo service haproxy reload",
55
+ "config_file_path": "/etc/haproxy/haproxy.cfg",
56
+ "socket_file_path": "/var/haproxy/stats.sock",
57
+ "do_writes": false,
58
+ "do_reloads": false,
59
+ "do_socket": false,
60
+ "global": [
61
+ "daemon",
62
+ "user haproxy",
63
+ "group haproxy",
64
+ "maxconn 4096",
65
+ "log 127.0.0.1 local0",
66
+ "log 127.0.0.1 local1 notice",
67
+ "stats socket /var/haproxy/stats.sock mode 666 level admin"
68
+ ],
69
+ "defaults": [
70
+ "log global",
71
+ "option dontlognull",
72
+ "maxconn 2000",
73
+ "retries 3",
74
+ "timeout connect 5s",
75
+ "timeout client 1m",
76
+ "timeout server 1m",
77
+ "option redispatch",
78
+ "balance roundrobin"
79
+ ],
80
+ "extra_sections": {
81
+ "listen stats :3212": [
82
+ "mode http",
83
+ "stats enable",
84
+ "stats uri /",
85
+ "stats refresh 5s"
86
+ ]
87
+ }
88
+ },
89
+ "synapse": {
90
+ "self_check_port": 3210,
91
+ "self_check_proxy_port": 3211,
92
+ "zk_servers": [
93
+ "localhost:2181"
94
+ ]
95
+ }
96
+ }
data/haproxy.pid ADDED
@@ -0,0 +1 @@
1
+ 189
@@ -0,0 +1,65 @@
1
+ #
2
+ # Autogenerated by Thrift
3
+ #
4
+ # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
5
+ #
6
+
7
+ require 'set'
8
+ require 'thrift'
9
+
10
+ module Twitter
11
+ module Thrift
12
+ module Status
13
+ DEAD = 0
14
+ STARTING = 1
15
+ ALIVE = 2
16
+ STOPPING = 3
17
+ STOPPED = 4
18
+ WARNING = 5
19
+ VALUE_MAP = {0 => "DEAD", 1 => "STARTING", 2 => "ALIVE", 3 => "STOPPING", 4 => "STOPPED", 5 => "WARNING"}
20
+ VALID_VALUES = Set.new([DEAD, STARTING, ALIVE, STOPPING, STOPPED, WARNING]).freeze
21
+ end
22
+
23
+ class Endpoint
24
+ include ::Thrift::Struct, ::Thrift::Struct_Union
25
+ HOST = 1
26
+ PORT = 2
27
+
28
+ FIELDS = {
29
+ HOST => {:type => ::Thrift::Types::STRING, :name => 'host'},
30
+ PORT => {:type => ::Thrift::Types::I32, :name => 'port'}
31
+ }
32
+
33
+ def struct_fields; FIELDS; end
34
+
35
+ def validate
36
+ end
37
+
38
+ ::Thrift::Struct.generate_accessors self
39
+ end
40
+
41
+ class ServiceInstance
42
+ include ::Thrift::Struct, ::Thrift::Struct_Union
43
+ SERVICEENDPOINT = 1
44
+ ADDITIONALENDPOINTS = 2
45
+ STATUS = 3
46
+
47
+ FIELDS = {
48
+ SERVICEENDPOINT => {:type => ::Thrift::Types::STRUCT, :name => 'serviceEndpoint', :class => Twitter::Thrift::Endpoint},
49
+ ADDITIONALENDPOINTS => {:type => ::Thrift::Types::MAP, :name => 'additionalEndpoints', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRUCT, :class => Twitter::Thrift::Endpoint}},
50
+ STATUS => {:type => ::Thrift::Types::I32, :name => 'status', :enum_class => Twitter::Thrift::Status}
51
+ }
52
+
53
+ def struct_fields; FIELDS; end
54
+
55
+ def validate
56
+ unless @status.nil? || Twitter::Thrift::Status::VALID_VALUES.include?(@status)
57
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field status!')
58
+ end
59
+ end
60
+
61
+ ::Thrift::Struct.generate_accessors self
62
+ end
63
+
64
+ end
65
+ end
@@ -0,0 +1,65 @@
1
+ #
2
+ # Autogenerated by Thrift
3
+ #
4
+ # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
5
+ #
6
+
7
+ require 'set'
8
+ require 'thrift'
9
+
10
+ module Twitter
11
+ module Thrift
12
+ module Status
13
+ DEAD = 0
14
+ STARTING = 1
15
+ ALIVE = 2
16
+ STOPPING = 3
17
+ STOPPED = 4
18
+ WARNING = 5
19
+ VALUE_MAP = {0 => "DEAD", 1 => "STARTING", 2 => "ALIVE", 3 => "STOPPING", 4 => "STOPPED", 5 => "WARNING"}
20
+ VALID_VALUES = Set.new([DEAD, STARTING, ALIVE, STOPPING, STOPPED, WARNING]).freeze
21
+ end
22
+
23
+ class Endpoint
24
+ include ::Thrift::Struct, ::Thrift::Struct_Union
25
+ HOST = 1
26
+ PORT = 2
27
+
28
+ FIELDS = {
29
+ HOST => {:type => ::Thrift::Types::STRING, :name => 'host'},
30
+ PORT => {:type => ::Thrift::Types::I32, :name => 'port'}
31
+ }
32
+
33
+ def struct_fields; FIELDS; end
34
+
35
+ def validate
36
+ end
37
+
38
+ ::Thrift::Struct.generate_accessors self
39
+ end
40
+
41
+ class ServiceInstance
42
+ include ::Thrift::Struct, ::Thrift::Struct_Union
43
+ SERVICEENDPOINT = 1
44
+ ADDITIONALENDPOINTS = 2
45
+ STATUS = 3
46
+
47
+ FIELDS = {
48
+ SERVICEENDPOINT => {:type => ::Thrift::Types::STRUCT, :name => 'serviceEndpoint', :class => Twitter::Thrift::Endpoint},
49
+ ADDITIONALENDPOINTS => {:type => ::Thrift::Types::MAP, :name => 'additionalEndpoints', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRUCT, :class => Twitter::Thrift::Endpoint}},
50
+ STATUS => {:type => ::Thrift::Types::I32, :name => 'status', :enum_class => Twitter::Thrift::Status}
51
+ }
52
+
53
+ def struct_fields; FIELDS; end
54
+
55
+ def validate
56
+ unless @status.nil? || Twitter::Thrift::Status::VALID_VALUES.include?(@status)
57
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field status!')
58
+ end
59
+ end
60
+
61
+ ::Thrift::Struct.generate_accessors self
62
+ end
63
+
64
+ end
65
+ end
@@ -0,0 +1,5 @@
1
+ module Synapse
2
+ def log
3
+ @@log ||= Logger.new(STDOUT)
4
+ end
5
+ end
@@ -0,0 +1,200 @@
1
+ require 'socket'
2
+
3
+ module Synapse
4
+ class Haproxy
5
+ attr_reader :opts
6
+ def initialize(opts)
7
+ super()
8
+
9
+ %w{global defaults reload_command}.each do |req|
10
+ raise ArgumentError, "haproxy requires a #{req} section" if !opts.has_key?(req)
11
+ end
12
+
13
+ req_pairs = {
14
+ 'do_writes' => 'config_file_path',
15
+ 'do_socket' => 'socket_file_path',
16
+ 'do_reloads' => 'reload_command'}
17
+
18
+ req_pairs.each do |cond, req|
19
+ if opts[cond]
20
+ raise ArgumentError, "the `#{req}` option is required when `#{cond}` is true" unless opts[req]
21
+ end
22
+ end
23
+
24
+ @opts = opts
25
+ @restart_required = true
26
+ end
27
+
28
+ def update_config(watchers)
29
+ # if we support updating backends, try that whenever possible
30
+ if @opts['do_socket']
31
+ update_backends(watchers) unless @restart_required
32
+ else
33
+ @restart_required = true
34
+ end
35
+
36
+ # generate a new config
37
+ new_config = generate_config(watchers)
38
+
39
+ # if we write config files, lets do that and then possibly restart
40
+ if @opts['do_writes']
41
+ write_config(new_config)
42
+ restart if @opts['do_reloads'] && @restart_required
43
+ end
44
+ end
45
+
46
+ # generates a new config based on the state of the watchers
47
+ def generate_config(watchers)
48
+ new_config = generate_base_config + "\n"
49
+ new_config << watchers.map {|w| generate_listen_stanza(w)}.join("\n")
50
+
51
+ log.debug "synapse: new haproxy config: #{new_config}"
52
+ return new_config
53
+ end
54
+
55
+ # generates the global and defaults sections of the config file
56
+ def generate_base_config
57
+ base_config = "# auto-generated by synapse at #{Time.now}\n"
58
+
59
+ %w{global defaults}.each do |section|
60
+ base_config << "\n#{section}\n"
61
+ @opts[section].each do |option|
62
+ base_config << "\t#{option}\n"
63
+ end
64
+ end
65
+
66
+ if @opts['extra_sections']
67
+ @opts['extra_sections'].each do |title, section|
68
+ base_config << "\n#{title}\n"
69
+ section.each do |option|
70
+ base_config << "\t#{option}\n"
71
+ end
72
+ end
73
+ end
74
+
75
+ return base_config
76
+ end
77
+
78
+ # generates an individual stanza for a particular watcher
79
+ def generate_listen_stanza(watcher)
80
+ if watcher.backends.empty?
81
+ log.warn "synapse: no backends found for watcher #{watcher.name}"
82
+ return ""
83
+ end
84
+
85
+ stanza = "listen #{watcher.name} localhost:#{watcher.local_port}\n"
86
+
87
+ watcher.listen.each do |line|
88
+ stanza << "\t#{line}\n"
89
+ end
90
+
91
+ watcher.backends.shuffle.each do |backend|
92
+ stanza << "\tserver #{backend['name']} #{backend['host']}:#{backend['port']} #{watcher.server_options}\n"
93
+ end
94
+
95
+ return stanza
96
+ end
97
+
98
+ # tries to set active backends via haproxy's stats socket
99
+ # because we can't add backends via the socket, we might still need to restart haproxy
100
+ def update_backends(watchers)
101
+ # first, get a list of existing servers for various backends
102
+ begin
103
+ s = UNIXSocket.new(@opts['socket_file_path'])
104
+ s.write('show stat;')
105
+ info = s.read()
106
+ rescue StandardError => e
107
+ log.warn "synapse: unhandled error reading stats socket: #{e.inspect}"
108
+ @restart_required = true
109
+ return
110
+ end
111
+
112
+ # parse the stats output to get current backends
113
+ cur_backends = {}
114
+ info.split("\n").each do |line|
115
+ next if line[0] == '#'
116
+
117
+ parts = line.split(',')
118
+ next if ['FRONTEND', 'BACKEND'].include?(parts[1])
119
+
120
+ cur_backends[parts[0]] ||= []
121
+ cur_backends[parts[0]] << parts[1]
122
+ end
123
+
124
+ # build a list of backends that should be enabled
125
+ enabled_backends = {}
126
+ watchers.each do |watcher|
127
+ enabled_backends[watcher.name] = []
128
+ next if watcher.backends.empty?
129
+
130
+ unless cur_backends.include? watcher.name
131
+ log.debug "synapse: restart required because we added new section #{watcher.name}"
132
+ @restart_required = true
133
+ return
134
+ end
135
+
136
+ watcher.backends.each do |backend|
137
+ unless cur_backends[watcher.name].include? backend['name']
138
+ log.debug "synapse: restart required because we have a new backend #{watcher.name}/#{backend['name']}"
139
+ @restart_required = true
140
+ return
141
+ end
142
+
143
+ enabled_backends[watcher.name] << backend['name']
144
+ end
145
+ end
146
+
147
+ # actually enable the enabled backends, and disable the disabled ones
148
+ cur_backends.each do |section, backends|
149
+ backends.each do |backend|
150
+ if enabled_backends[section].include? backend
151
+ command = "enable server #{section}/#{backend};"
152
+ else
153
+ command = "disable server #{section}/#{backend};"
154
+ end
155
+
156
+ # actually write the command to the socket
157
+ begin
158
+ s = UNIXSocket.new(@opts['socket_file_path'])
159
+ s.write(command)
160
+ output = s.read()
161
+ rescue StandardError => e
162
+ log.warn "synapse: unknown error writing to socket"
163
+ @restart_required = true
164
+ return
165
+ else
166
+ unless output == "\n"
167
+ log.warn "synapse: socket command #{command} failed: #{output}"
168
+ @restart_required = true
169
+ return
170
+ end
171
+ end
172
+ end
173
+ end
174
+ end
175
+
176
+ # writes the config
177
+ def write_config(new_config)
178
+ begin
179
+ old_config = File.read(@opts['config_file_path'])
180
+ rescue Errno::ENOENT => e
181
+ log.info "synapse: could not open haproxy config file at #{@opts['config_file_path']}"
182
+ old_config = ""
183
+ end
184
+
185
+ if old_config == new_config
186
+ return false
187
+ else
188
+ File.open(@opts['config_file_path'],'w') {|f| f.write(new_config)}
189
+ return true
190
+ end
191
+ end
192
+
193
+ # restarts haproxy
194
+ def restart
195
+ res = `#{opts['reload_command']}`.chomp
196
+ raise "failed to reload haproxy via #{opts['reload_command']}: #{res}" unless $?.success?
197
+ @restart_required = false
198
+ end
199
+ end
200
+ end
@@ -0,0 +1,44 @@
1
+
2
+ module Synapse
3
+ class BaseWatcher
4
+ attr_reader :backends, :name, :listen, :local_port, :server_options
5
+
6
+ def initialize(opts={}, synapse)
7
+ super()
8
+
9
+ @synapse = synapse
10
+
11
+ # set required service parameters
12
+ %w{name discovery local_port}.each do |req|
13
+ raise ArgumentError, "missing required option #{req}" unless opts[req]
14
+ end
15
+
16
+ @name = opts['name']
17
+ @discovery = opts['discovery']
18
+ @local_port = opts['local_port']
19
+
20
+ # optional service parameters
21
+ @listen = opts['listen'] || []
22
+ @server_options = opts['server_options'] || ""
23
+ @default_servers = opts['default_servers'] || []
24
+ @server_port_override = opts['server_port_override']
25
+
26
+ # set initial backends to default servers
27
+ @backends = @default_servers
28
+
29
+ validate_discovery_opts
30
+ end
31
+
32
+ def start
33
+ log.info "synapse: starting stub watcher; this means doing nothing at all!"
34
+ end
35
+
36
+ private
37
+ def validate_discovery_opts
38
+ raise ArgumentError, "invalid discovery method '#{@discovery['method']}' for base watcher" \
39
+ unless @discovery['method'] == 'base'
40
+
41
+ log.warn "synapse: warning: a stub watcher with no default servers is pretty useless" if @default_servers.empty?
42
+ end
43
+ end
44
+ end