swiftiply 0.6.1.1 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CONTRIBUTORS +2 -0
- data/README.md +62 -0
- data/bin/{mongrel_rails → evented_mongrel_rails} +6 -14
- data/bin/swiftiplied_mongrel_rails +246 -0
- data/bin/swiftiply +136 -116
- data/bin/swiftiply_mongrel_rails +2 -2
- data/bin/swiftiplyctl +283 -0
- data/cleanup.sh +5 -0
- data/ext/deque/extconf.rb +162 -0
- data/ext/deque/swiftcore/rubymain.cpp +435 -0
- data/ext/fastfilereader/extconf.rb +2 -2
- data/ext/fastfilereader/mapper.cpp +2 -0
- data/ext/map/extconf.rb +161 -0
- data/ext/map/rubymain.cpp +500 -0
- data/ext/splaytree/extconf.rb +161 -0
- data/ext/splaytree/swiftcore/rubymain.cpp +580 -0
- data/ext/splaytree/swiftcore/splay_map.h +635 -0
- data/ext/splaytree/swiftcore/splay_set.h +575 -0
- data/ext/splaytree/swiftcore/splay_tree.h +1127 -0
- data/external/httpclient.rb +231 -0
- data/external/package.rb +13 -13
- data/setup.rb +18 -2
- data/src/swiftcore/Swiftiply.rb +417 -773
- data/src/swiftcore/Swiftiply/backend_protocol.rb +213 -0
- data/src/swiftcore/Swiftiply/cache_base.rb +49 -0
- data/src/swiftcore/Swiftiply/cache_base_mixin.rb +52 -0
- data/src/swiftcore/Swiftiply/cluster_managers/rest_based_cluster_manager.rb +9 -0
- data/src/swiftcore/Swiftiply/cluster_protocol.rb +70 -0
- data/src/swiftcore/Swiftiply/config.rb +370 -0
- data/src/swiftcore/Swiftiply/config/rest_updater.rb +26 -0
- data/src/swiftcore/Swiftiply/constants.rb +101 -0
- data/src/swiftcore/Swiftiply/content_cache_entry.rb +44 -0
- data/src/swiftcore/Swiftiply/content_response.rb +45 -0
- data/src/swiftcore/Swiftiply/control_protocol.rb +49 -0
- data/src/swiftcore/Swiftiply/dynamic_request_cache.rb +41 -0
- data/src/swiftcore/Swiftiply/etag_cache.rb +64 -0
- data/src/swiftcore/Swiftiply/file_cache.rb +46 -0
- data/src/swiftcore/Swiftiply/hash_cache_base.rb +22 -0
- data/src/swiftcore/Swiftiply/http_recognizer.rb +267 -0
- data/src/swiftcore/Swiftiply/loggers/Analogger.rb +21 -0
- data/src/swiftcore/Swiftiply/loggers/stderror.rb +13 -0
- data/src/swiftcore/Swiftiply/mocklog.rb +10 -0
- data/src/swiftcore/Swiftiply/proxy.rb +15 -0
- data/src/swiftcore/Swiftiply/proxy_backends/keepalive.rb +286 -0
- data/src/swiftcore/Swiftiply/proxy_backends/traditional.rb +286 -0
- data/src/swiftcore/Swiftiply/proxy_backends/traditional/redis_directory.rb +87 -0
- data/src/swiftcore/Swiftiply/proxy_backends/traditional/static_directory.rb +69 -0
- data/src/swiftcore/Swiftiply/proxy_bag.rb +716 -0
- data/src/swiftcore/Swiftiply/rest_based_cluster_manager.rb +15 -0
- data/src/swiftcore/Swiftiply/splay_cache_base.rb +21 -0
- data/src/swiftcore/Swiftiply/support_pagecache.rb +6 -3
- data/src/swiftcore/Swiftiply/swiftiply_2_http_proxy.rb +7 -0
- data/src/swiftcore/Swiftiply/swiftiply_client.rb +20 -5
- data/src/swiftcore/Swiftiply/version.rb +5 -0
- data/src/swiftcore/evented_mongrel.rb +26 -8
- data/src/swiftcore/hash.rb +43 -0
- data/src/swiftcore/method_builder.rb +28 -0
- data/src/swiftcore/streamer.rb +46 -0
- data/src/swiftcore/swiftiplied_mongrel.rb +91 -23
- data/src/swiftcore/types.rb +20 -3
- data/swiftiply.gemspec +14 -8
- data/test/TC_Deque.rb +152 -0
- data/test/TC_ProxyBag.rb +147 -166
- data/test/TC_Swiftiply.rb +576 -169
- data/test/TC_Swiftiply/mongrel/evented_hello.rb +1 -1
- data/test/TC_Swiftiply/mongrel/swiftiplied_hello.rb +1 -1
- data/test/TC_Swiftiply/test_serve_static_file_xsendfile/sendfile_client.rb +27 -0
- data/test/TC_Swiftiply/test_ssl/bin/validate_ssl_capability.rb +21 -0
- data/test/TC_Swiftiply/test_ssl/test.cert +16 -0
- data/test/TC_Swiftiply/test_ssl/test.key +15 -0
- data/{bin → test/bin}/echo_client +0 -0
- metadata +136 -94
- data/README +0 -126
- data/ext/swiftiply_parse/parse.rl +0 -90
@@ -0,0 +1,286 @@
|
|
1
|
+
# Encoding:ascii-8bit
|
2
|
+
|
3
|
+
require 'swiftcore/Swiftiply/config'
|
4
|
+
# Standard style proxy.
|
5
|
+
module Swiftcore
|
6
|
+
module Swiftiply
|
7
|
+
module Proxies
|
8
|
+
class Traditional < EventMachine::Connection
|
9
|
+
Directories = {
|
10
|
+
'static' => ['swiftcore/Swiftiply/proxy_backends/traditional/static_directory.rb','::Swiftcore::Swiftiply::Proxies::TraditionalStaticDirectory'],
|
11
|
+
'redis' => ['swiftcore/Swiftiply/proxy_backends/traditional/redis_directory.rb','::Swiftcore::Swiftiply::Proxies::TraditionalRedisDirectory']
|
12
|
+
}
|
13
|
+
|
14
|
+
def self.is_a_server?
|
15
|
+
false
|
16
|
+
end
|
17
|
+
|
18
|
+
def self.parse_connection_params(config, directory)
|
19
|
+
{}
|
20
|
+
end
|
21
|
+
|
22
|
+
#
|
23
|
+
# directory: DIRECTORY_TYPE [static]
|
24
|
+
#
|
25
|
+
def self.config(conf, new_config)
|
26
|
+
directory = nil
|
27
|
+
if conf[Cdirectory]
|
28
|
+
require Directories[conf[Cdirectory]].first
|
29
|
+
directory = Directories[conf[Cdirectory]].last
|
30
|
+
end
|
31
|
+
unless directory && !directory.empty?
|
32
|
+
require Directories['static'].first
|
33
|
+
directory = Directories['static'].last
|
34
|
+
end
|
35
|
+
|
36
|
+
directory_class = Swiftcore::Swiftiply::class_by_name(directory)
|
37
|
+
|
38
|
+
owners = conf[Cincoming].sort.join('|')
|
39
|
+
hash = Digest::SHA256.hexdigest(owners).intern
|
40
|
+
config_data = {:hash => hash, :owners => owners}
|
41
|
+
|
42
|
+
Config.configure_logging(conf, config_data)
|
43
|
+
file_cache = Config.configure_file_cache(conf, config_data)
|
44
|
+
dynamic_request_cache = Config.configure_dynamic_request_cache(conf, config_data)
|
45
|
+
etag_cache = Config.configure_etag_cache(conf, config_data)
|
46
|
+
|
47
|
+
# For each incoming entry, do setup.
|
48
|
+
new_config[Cincoming] = {}
|
49
|
+
conf[Cincoming].each do |p_|
|
50
|
+
ProxyBag.logger.log(Cinfo,"Configuring incoming #{p_}") if Swiftcore::Swiftiply::log_level > 1
|
51
|
+
p = p_.intern
|
52
|
+
|
53
|
+
Config.setup_caches(new_config, config_data.merge({:p => p, :file_cache => file_cache, :dynamic_request_cache => dynamic_request_cache, :etag_cache => etag_cache}))
|
54
|
+
|
55
|
+
ProxyBag.add_backup_mapping(conf[Cbackup].intern,p) if conf.has_key?(Cbackup)
|
56
|
+
Config.configure_docroot(conf, p)
|
57
|
+
config_data[:permit_xsendfile] = Config.configure_sendfileroot(conf, p)
|
58
|
+
Config.configure_xforwardedfor(conf, p)
|
59
|
+
Config.configure_redeployable(conf, p)
|
60
|
+
Config.configure_key(conf, p, config_data)
|
61
|
+
Config.configure_staticmask(conf, p)
|
62
|
+
Config.configure_cache_extensions(conf,p)
|
63
|
+
Config.configure_cluster_manager(conf,p)
|
64
|
+
Config.configure_backends('groups', {
|
65
|
+
:config => conf,
|
66
|
+
:p => p,
|
67
|
+
:config_data => config_data,
|
68
|
+
:new_config => new_config,
|
69
|
+
:self => self,
|
70
|
+
:directory_class => directory_class,
|
71
|
+
:directory_args => [conf]})
|
72
|
+
Config.stop_unused_servers(new_config)
|
73
|
+
# directory_class.config(conf, new_config)
|
74
|
+
# Config.set_server_queue(config_data, directory_class, [conf])
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
# Here lies the protocol definition. A traditional proxy is super simple -- pass on what you get.
|
79
|
+
attr_accessor :associate, :id
|
80
|
+
|
81
|
+
C0rnrn = "0\r\n\r\n".freeze
|
82
|
+
Crnrn = "\r\n\r\n".freeze
|
83
|
+
|
84
|
+
def initialize(host=nil, port=nil)
|
85
|
+
@name = self.class.bname
|
86
|
+
@caching_enabled = self.class.caching
|
87
|
+
@permit_xsendfile = self.class.xsendfile
|
88
|
+
@enable_sendfile_404 = self.class.enable_sendfile_404
|
89
|
+
@host = host
|
90
|
+
@port = port
|
91
|
+
super
|
92
|
+
end
|
93
|
+
|
94
|
+
def name
|
95
|
+
@name
|
96
|
+
end
|
97
|
+
|
98
|
+
# Call setup() and add the backend to the ProxyBag queue.
|
99
|
+
|
100
|
+
def post_init
|
101
|
+
setup
|
102
|
+
end
|
103
|
+
|
104
|
+
# Setup the initial variables for receiving headers and content.
|
105
|
+
|
106
|
+
def setup
|
107
|
+
@headers = ''
|
108
|
+
@headers_completed = @dont_send_data = false
|
109
|
+
@content_sent = 0
|
110
|
+
@filter = self.class.filter
|
111
|
+
end
|
112
|
+
|
113
|
+
# Receive data from the backend process. Headers are parsed from
|
114
|
+
# the rest of the content. If a Content-Length header is present,
|
115
|
+
# that is used to determine how much data to expect. Otherwise,
|
116
|
+
# if 'Transfer-encoding: chunked' is present, assume chunked
|
117
|
+
# encoding. Otherwise just read until the connection is closed.
|
118
|
+
# SO MUCH functionality has to be duplicated and maintained between
|
119
|
+
# here and the keepalive protocol. That funcationality need to be
|
120
|
+
# refactored so that it's encapsulated better.
|
121
|
+
|
122
|
+
def receive_data data
|
123
|
+
unless @headers_completed
|
124
|
+
if data.include?(Crnrn)
|
125
|
+
@headers_completed = true
|
126
|
+
h,data = data.split(/\r\n\r\n/,2)
|
127
|
+
#@headers << h << Crnrn
|
128
|
+
if @headers.length > 0
|
129
|
+
@headers << h
|
130
|
+
else
|
131
|
+
@headers = h
|
132
|
+
end
|
133
|
+
|
134
|
+
if @headers =~ /Content-[Ll]ength: *([^\r]+)/
|
135
|
+
@content_length = $1.to_i
|
136
|
+
elsif @headers =~ /Transfer-encoding: *chunked/
|
137
|
+
@content_length = nil
|
138
|
+
else
|
139
|
+
@content_length = nil
|
140
|
+
end
|
141
|
+
|
142
|
+
if @caching_enabled && @associate && @associate.request_method == CGET && @headers =~ /Etag:/ && @headers !~ /Cache-Control: *no-/ # stupid granularity -- it's on or off, only
|
143
|
+
@do_caching = true
|
144
|
+
@cacheable_data = ''
|
145
|
+
end
|
146
|
+
|
147
|
+
if @permit_xsendfile && @headers =~ /X-[Ss]endfile: *([^\r]+)/
|
148
|
+
@associate.uri = $1
|
149
|
+
if ProxyBag.serve_static_file(@associate,ProxyBag.get_sendfileroot(@associate.name))
|
150
|
+
@dont_send_data = true
|
151
|
+
else
|
152
|
+
if @enable_sendfile_404
|
153
|
+
msg = "#{@associate.uri} could not be found."
|
154
|
+
@associate.send_data "HTTP/1.1 404 Not Found\r\nConnection: close\r\n\r\nContent-Type: text/html\r\nContent-Length: #{msg.length}\r\n\r\n#{msg}"
|
155
|
+
@associate.close_connection_after_writing
|
156
|
+
@dont_send_data = true
|
157
|
+
else
|
158
|
+
@associate.send_data (@headers + Crnrn)
|
159
|
+
end
|
160
|
+
end
|
161
|
+
else
|
162
|
+
@associate.send_data (@headers + Crnrn)
|
163
|
+
end
|
164
|
+
|
165
|
+
# If keepalive is turned on, the assumption is that it will stay
|
166
|
+
# on, unless the headers being returned indicate that the connection
|
167
|
+
# should be closed.
|
168
|
+
# So, check for a 'Connection: Closed' header.
|
169
|
+
if keepalive = @associate.keepalive
|
170
|
+
keepalive = false if @headers =~ /Connection: [Cc]lose/
|
171
|
+
if @associate_http_version == C1_0
|
172
|
+
keepalive = false unless @headers == /Connection: Keep-Alive/i
|
173
|
+
end
|
174
|
+
end
|
175
|
+
else
|
176
|
+
@headers << data
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
if @headers_completed
|
181
|
+
@associate.send_data data unless @dont_send_data
|
182
|
+
@cacheable_data << data if @do_caching
|
183
|
+
@content_sent += data.length
|
184
|
+
|
185
|
+
if @content_length and @content_sent >= @content_length or data[-6..-1] == C0rnrn
|
186
|
+
# If @dont_send_data is set, then the connection is going to be closed elsewhere.
|
187
|
+
unless @dont_send_data
|
188
|
+
# Check to see if keepalive is enabled.
|
189
|
+
if keepalive
|
190
|
+
@associate.reset_state
|
191
|
+
ProxyBag.remove_client(self) unless @associate
|
192
|
+
else
|
193
|
+
@associate.close_connection_after_writing
|
194
|
+
end
|
195
|
+
end
|
196
|
+
self.close_connection_after_writing
|
197
|
+
# add(path_info,path,data,etag,mtime,header)
|
198
|
+
|
199
|
+
if @do_caching && associate_name = @associate.name
|
200
|
+
ProxyBag.file_cache_map[associate_name].add(@associate.unparsed_uri,
|
201
|
+
'',
|
202
|
+
@cacheable_data,
|
203
|
+
'',
|
204
|
+
0,
|
205
|
+
@headers.scan(/^Set-Cookie:.*/).collect {|c| c =~ /: (.*)$/; $1},
|
206
|
+
@headers)
|
207
|
+
ProxyBag.dynamic_request_cache[associate_name].delete(@associate.uri)
|
208
|
+
end
|
209
|
+
end
|
210
|
+
end
|
211
|
+
# TODO: Log these errors!
|
212
|
+
rescue Exception => e
|
213
|
+
puts "Kaboom: #{e} -- #{e.backtrace.inspect}"
|
214
|
+
@associate.close_connection_after_writing if @associate
|
215
|
+
@associate = nil
|
216
|
+
self.close_connection_after_writing
|
217
|
+
end
|
218
|
+
|
219
|
+
# This is called when the backend disconnects from the proxy.
|
220
|
+
|
221
|
+
def unbind
|
222
|
+
associate_name = @associate.name
|
223
|
+
sq = ProxyBag.server_queue(ProxyBag.incoming_mapping(associate_name))
|
224
|
+
sq && sq.requeue(associate_name, @host, @port)
|
225
|
+
ProxyBag.check_for_queued_requests(@name)
|
226
|
+
if @associate
|
227
|
+
if !@associate.redeployable or @content_sent
|
228
|
+
@associate.close_connection_after_writing
|
229
|
+
else
|
230
|
+
@associate.associate = nil
|
231
|
+
@associate.setup_for_redeployment
|
232
|
+
ProxyBag.rebind_frontend_client(@associate)
|
233
|
+
end
|
234
|
+
else
|
235
|
+
# ProxyBag.remove_server(self)
|
236
|
+
end
|
237
|
+
# ProxyBag.remove_id(self)
|
238
|
+
end
|
239
|
+
|
240
|
+
def self.bname=(val)
|
241
|
+
@bname = val
|
242
|
+
end
|
243
|
+
|
244
|
+
def self.bname
|
245
|
+
@bname
|
246
|
+
end
|
247
|
+
|
248
|
+
def self.xsendfile=(val)
|
249
|
+
@xsendfile = val
|
250
|
+
end
|
251
|
+
|
252
|
+
def self.xsendfile
|
253
|
+
@xsendfile
|
254
|
+
end
|
255
|
+
|
256
|
+
def self.enable_sendfile_404=(val)
|
257
|
+
@enable_sendfile_404 = val
|
258
|
+
end
|
259
|
+
|
260
|
+
def self.enable_sendfile_404
|
261
|
+
@enable_sendfile_404
|
262
|
+
end
|
263
|
+
|
264
|
+
def self.filter=(val)
|
265
|
+
@filter = val
|
266
|
+
end
|
267
|
+
|
268
|
+
def self.filter
|
269
|
+
@filter
|
270
|
+
end
|
271
|
+
|
272
|
+
def filter
|
273
|
+
@filter
|
274
|
+
end
|
275
|
+
|
276
|
+
def self.caching
|
277
|
+
@caching
|
278
|
+
end
|
279
|
+
|
280
|
+
def self.caching=(val)
|
281
|
+
@caching = val
|
282
|
+
end
|
283
|
+
end
|
284
|
+
end
|
285
|
+
end
|
286
|
+
end
|
@@ -0,0 +1,87 @@
|
|
1
|
+
require 'redis'
|
2
|
+
|
3
|
+
# Lookup directory information from redis repo.
|
4
|
+
module Swiftcore
|
5
|
+
module Swiftiply
|
6
|
+
module Proxies
|
7
|
+
class TraditionalRedisDirectory
|
8
|
+
# servers:
|
9
|
+
# host: HOSTNAME (defaults to 127.0.0.1)
|
10
|
+
# port: PORT (defaults to 6379)
|
11
|
+
# db: Redis DB (defaults to 0)
|
12
|
+
# password: (defaults to none)
|
13
|
+
|
14
|
+
def self.config(conf,new_config)
|
15
|
+
redis_config = {}
|
16
|
+
(conf[Cservers] || {}).each {|k,v| redis_config[k.intern] = v}
|
17
|
+
@redis = Redis.new(redis_config)
|
18
|
+
rescue Exception => e
|
19
|
+
puts "Failed to connect to the Redis server using these parameters: #{redis_config.to_yaml}"
|
20
|
+
raise e
|
21
|
+
end
|
22
|
+
|
23
|
+
def self.redis
|
24
|
+
@redis
|
25
|
+
end
|
26
|
+
|
27
|
+
def self.backend_class
|
28
|
+
@backend_class
|
29
|
+
end
|
30
|
+
|
31
|
+
def self.backend_class=(val)
|
32
|
+
@backend_class = val
|
33
|
+
end
|
34
|
+
|
35
|
+
def initialize(*args)
|
36
|
+
@redis = self.class.redis
|
37
|
+
@backend_class = self.class.backend_class
|
38
|
+
end
|
39
|
+
|
40
|
+
def pop
|
41
|
+
key = ProxyBag.current_client_name
|
42
|
+
data = @redis.rpoplpush(key,"#{key}.inuse")
|
43
|
+
if data
|
44
|
+
host, port = data.split(C_colon,2)
|
45
|
+
host ||= C_localhost
|
46
|
+
port ||= C80
|
47
|
+
EventMachine.connect(host,port,@backend_class,host,port)
|
48
|
+
else
|
49
|
+
false
|
50
|
+
end
|
51
|
+
rescue Exception => e
|
52
|
+
false
|
53
|
+
end
|
54
|
+
|
55
|
+
def unshift(val);end
|
56
|
+
|
57
|
+
def push(val);end
|
58
|
+
|
59
|
+
def delete(val);end
|
60
|
+
|
61
|
+
def requeue(key, host, port)
|
62
|
+
hp = "#{host}:#{port}"
|
63
|
+
@redis.lrem("#{key}.inuse", 1, hp)
|
64
|
+
@redis.lpush(key, hp)
|
65
|
+
rescue Exception => e
|
66
|
+
# Use an EM timer to reschedule the requeue just a very short time into the future?
|
67
|
+
# The only time this will occur is if the redis server goes away.
|
68
|
+
end
|
69
|
+
|
70
|
+
def status
|
71
|
+
r = ''
|
72
|
+
keys = @redis.keys('*')
|
73
|
+
r << "#{@redis.dbsize} -- #{keys.to_yaml}"
|
74
|
+
keys.each do |k|
|
75
|
+
r << " #{k}(#{@redis.llen(k)})\n #{@redis.lrange(k,0,@redis.llen(k)).to_yaml}"
|
76
|
+
end
|
77
|
+
r
|
78
|
+
rescue Exception => e
|
79
|
+
r << self.inspect
|
80
|
+
r << e
|
81
|
+
r << e.backtrace.to_yaml
|
82
|
+
r
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
@@ -0,0 +1,69 @@
|
|
1
|
+
# Config looks for a list of backends which are defined statically and puts them into a queue.
|
2
|
+
module Swiftcore
|
3
|
+
module Swiftiply
|
4
|
+
module Proxies
|
5
|
+
class TraditionalStaticDirectory
|
6
|
+
# servers:
|
7
|
+
# - http://site.com:port/url
|
8
|
+
# - http://site2.com:port2/url2
|
9
|
+
def self.config(conf,new_config)
|
10
|
+
@queue = ::Swiftcore.const_defined?(:Deque) ? Swiftcore::Deque.new : []
|
11
|
+
servers = conf[Cservers]
|
12
|
+
if Array === servers
|
13
|
+
servers.each do |server|
|
14
|
+
queue.push server
|
15
|
+
end
|
16
|
+
elsif servers
|
17
|
+
queue.push servers
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
def self.queue
|
22
|
+
@queue
|
23
|
+
end
|
24
|
+
|
25
|
+
def self.backend_class
|
26
|
+
@backend_class
|
27
|
+
end
|
28
|
+
|
29
|
+
def self.backend_class=(val)
|
30
|
+
@backend_class = val
|
31
|
+
end
|
32
|
+
|
33
|
+
def initialize(*args)
|
34
|
+
@queue = self.class.queue
|
35
|
+
@backend_class = self.class.backend_class
|
36
|
+
end
|
37
|
+
|
38
|
+
# The queue is circular. Any element that is popped off the end is shifted back onto the front and vice versa.
|
39
|
+
def pop
|
40
|
+
server = @queue.pop
|
41
|
+
host, port = server.split(C_colon,2)
|
42
|
+
@queue.unshift server
|
43
|
+
host ||= C_localhost
|
44
|
+
port ||= C80
|
45
|
+
EventMachine.connect(host,port,@backend_class)
|
46
|
+
rescue Exception # In an ideal world, we do something useful with regard to logging/reporting this exception.
|
47
|
+
false
|
48
|
+
end
|
49
|
+
|
50
|
+
def unshift(val)
|
51
|
+
@queue.unshift val
|
52
|
+
end
|
53
|
+
|
54
|
+
def push(val)
|
55
|
+
@queue.push val
|
56
|
+
end
|
57
|
+
|
58
|
+
def delete(val)
|
59
|
+
@queue.delete val
|
60
|
+
end
|
61
|
+
|
62
|
+
def requeue(*args); end
|
63
|
+
|
64
|
+
def status
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
@@ -0,0 +1,716 @@
|
|
1
|
+
# Encoding:ascii-8bit
|
2
|
+
|
3
|
+
module Swiftcore
|
4
|
+
module Swiftiply
|
5
|
+
|
6
|
+
# The ProxyBag is a class that holds the client and the server queues,
|
7
|
+
# and that is responsible for managing them, matching them, and expiring
|
8
|
+
# them, if necessary.
|
9
|
+
|
10
|
+
class ProxyBag
|
11
|
+
|
12
|
+
attr_reader :keepalive_queue
|
13
|
+
|
14
|
+
@client_q = Hash.new {|h,k| h[k] = Deque.new}
|
15
|
+
#@client_q = Hash.new {|h,k| h[k] = []}
|
16
|
+
# @server_q = Hash.new {|h,k| h[k] = Deque.new}
|
17
|
+
@server_q = {}
|
18
|
+
@backup_map = {}
|
19
|
+
@worker_request_semaphores = {}
|
20
|
+
@keepalive_q = Deque.new
|
21
|
+
@logger = nil
|
22
|
+
@ctime = Time.now
|
23
|
+
@dateheader = "Date: #{@ctime.httpdate}\r\n\r\n"
|
24
|
+
@server_unavailable_timeout = 10
|
25
|
+
@id_map = {}
|
26
|
+
@reverse_id_map = {}
|
27
|
+
@incoming_map = {}
|
28
|
+
@docroot_map = {}
|
29
|
+
@sendfileroot_map = {}
|
30
|
+
@log_map = {}
|
31
|
+
@redeployable_map = {}
|
32
|
+
@file_cache_map = {}
|
33
|
+
@dynamic_request_map = {}
|
34
|
+
@etag_cache_map = {}
|
35
|
+
@x_forwarded_for = {}
|
36
|
+
@keepalive = {}
|
37
|
+
@static_mask = {}
|
38
|
+
@keys = {}
|
39
|
+
@filters = {}
|
40
|
+
@cluster_managers = {}
|
41
|
+
@demanding_clients = Hash.new {|h,k| h[k] = Deque.new}
|
42
|
+
@hitcounters = Hash.new {|h,k| h[k] = 0}
|
43
|
+
# Kids, don't do this at home. It's gross.
|
44
|
+
@typer = MIME::Types.instance_variable_get('@__types__')
|
45
|
+
|
46
|
+
MockLog = Swiftcore::Swiftiply::MockLog.new
|
47
|
+
|
48
|
+
class << self
|
49
|
+
|
50
|
+
def client_q
|
51
|
+
@client_q
|
52
|
+
end
|
53
|
+
|
54
|
+
def now
|
55
|
+
@ctime
|
56
|
+
end
|
57
|
+
|
58
|
+
# Setter and Getter accessors for the logger.
|
59
|
+
def logger=(val)
|
60
|
+
@logger = val
|
61
|
+
end
|
62
|
+
|
63
|
+
def logger
|
64
|
+
@logger
|
65
|
+
end
|
66
|
+
|
67
|
+
def log_level=(val)
|
68
|
+
@log_level = val
|
69
|
+
end
|
70
|
+
|
71
|
+
def log_level
|
72
|
+
@log_level
|
73
|
+
end
|
74
|
+
|
75
|
+
# Returns the access key. If an access key is set, then all new
|
76
|
+
# backend connections must send the correct access key before
|
77
|
+
# being added to the cluster as a valid backend.
|
78
|
+
|
79
|
+
def get_key(h)
|
80
|
+
@keys[h] || C_empty
|
81
|
+
end
|
82
|
+
|
83
|
+
def set_key(h,val)
|
84
|
+
@keys[h] = val
|
85
|
+
end
|
86
|
+
|
87
|
+
def add_id(who,what)
|
88
|
+
@id_map[who] = what
|
89
|
+
@reverse_id_map[what] = who
|
90
|
+
end
|
91
|
+
|
92
|
+
def remove_id(who)
|
93
|
+
what = @id_map.delete(who)
|
94
|
+
@reverse_id_map.delete(what)
|
95
|
+
end
|
96
|
+
|
97
|
+
def incoming_mapping(name)
|
98
|
+
@incoming_map[name]
|
99
|
+
end
|
100
|
+
|
101
|
+
def add_incoming_mapping(hashcode,name)
|
102
|
+
@incoming_map[name] = hashcode
|
103
|
+
end
|
104
|
+
|
105
|
+
def remove_incoming_mapping(name)
|
106
|
+
@incoming_map.delete(name)
|
107
|
+
end
|
108
|
+
|
109
|
+
def set_server_queue(hashcode, klass, data)
|
110
|
+
@server_q[hashcode] = klass.new(*data)
|
111
|
+
end
|
112
|
+
|
113
|
+
def server_queue(hashcode)
|
114
|
+
@server_q[hashcode]
|
115
|
+
end
|
116
|
+
|
117
|
+
def backup_mapping(name)
|
118
|
+
@backup_map[name]
|
119
|
+
end
|
120
|
+
|
121
|
+
def add_backup_mapping(backup,name)
|
122
|
+
@backup_map[name] = backup
|
123
|
+
end
|
124
|
+
|
125
|
+
def remove_backup_mapping(name)
|
126
|
+
@backup_map.delete(map)
|
127
|
+
end
|
128
|
+
|
129
|
+
def add_docroot(path,name)
|
130
|
+
@docroot_map[name] = File.expand_path(path)
|
131
|
+
end
|
132
|
+
|
133
|
+
def remove_docroot(name)
|
134
|
+
@docroot_map.delete(name)
|
135
|
+
end
|
136
|
+
|
137
|
+
def add_sendfileroot(path,name)
|
138
|
+
@sendfileroot_map[name] = path
|
139
|
+
end
|
140
|
+
|
141
|
+
def remove_sendfileroot(name)
|
142
|
+
@sendfileroot_map.delete(name)
|
143
|
+
end
|
144
|
+
|
145
|
+
def get_sendfileroot(name)
|
146
|
+
@sendfileroot_map[name]
|
147
|
+
end
|
148
|
+
|
149
|
+
def add_redeployable(limit,name)
|
150
|
+
@redeployable_map[name] = limit
|
151
|
+
end
|
152
|
+
|
153
|
+
def remove_redeployable(name)
|
154
|
+
@redeployable_map.delete(name)
|
155
|
+
end
|
156
|
+
|
157
|
+
def add_log(log,name)
|
158
|
+
@log_map[name] = [log,1]
|
159
|
+
end
|
160
|
+
|
161
|
+
def log(name)
|
162
|
+
(@log_map[name] && @log_map[name].first) || MockLog
|
163
|
+
end
|
164
|
+
|
165
|
+
def remove_log(name)
|
166
|
+
@log_map[name].close if @log_map[name].respond_to? :close
|
167
|
+
@log_map.delete(name)
|
168
|
+
end
|
169
|
+
|
170
|
+
def set_level(level,name)
|
171
|
+
@log_map[name][1] = level
|
172
|
+
end
|
173
|
+
|
174
|
+
def level(name)
|
175
|
+
(@log_map[name] && @log_map[name].last) || 0
|
176
|
+
end
|
177
|
+
|
178
|
+
def add_file_cache(cache,name)
|
179
|
+
@file_cache_map[name] = cache
|
180
|
+
end
|
181
|
+
|
182
|
+
def file_cache_map
|
183
|
+
@file_cache_map
|
184
|
+
end
|
185
|
+
|
186
|
+
def add_dynamic_request_cache(cache,name)
|
187
|
+
@dynamic_request_map[name] = cache
|
188
|
+
end
|
189
|
+
|
190
|
+
def dynamic_request_cache
|
191
|
+
@dynamic_request_map
|
192
|
+
end
|
193
|
+
|
194
|
+
def add_etag_cache(cache,name)
|
195
|
+
@etag_cache_map[name] = cache
|
196
|
+
end
|
197
|
+
|
198
|
+
def x_forwarded_for(name)
|
199
|
+
@x_forwarded_for[name]
|
200
|
+
end
|
201
|
+
|
202
|
+
def set_x_forwarded_for(name)
|
203
|
+
@x_forwarded_for[name] = true
|
204
|
+
end
|
205
|
+
|
206
|
+
def unset_x_forwarded_for(name)
|
207
|
+
@x_forwarded_for[name] = false
|
208
|
+
end
|
209
|
+
|
210
|
+
def add_static_mask(regexp, name)
|
211
|
+
@static_mask[name] = regexp
|
212
|
+
end
|
213
|
+
|
214
|
+
def static_mask(name)
|
215
|
+
@static_mask[name]
|
216
|
+
end
|
217
|
+
|
218
|
+
def remove_static_mask(name)
|
219
|
+
@static_mask.delete(name)
|
220
|
+
end
|
221
|
+
|
222
|
+
def add_filter(filter, name)
|
223
|
+
(@filters[name] ||= []) << filter
|
224
|
+
end
|
225
|
+
|
226
|
+
def filter(name)
|
227
|
+
@filters[name]
|
228
|
+
end
|
229
|
+
|
230
|
+
def remove_filters(name)
|
231
|
+
@filters[name].clear if @filters[name]
|
232
|
+
end
|
233
|
+
|
234
|
+
def set_server_queue_as_filter_queue(name,klass)
|
235
|
+
@server_q[name] = Hash.new {|h,k| h[k] = klass.new}
|
236
|
+
end
|
237
|
+
|
238
|
+
def worker_request_config(name)
|
239
|
+
@worker_request_config[name]
|
240
|
+
end
|
241
|
+
|
242
|
+
def add_worker_request_config(name, config)
|
243
|
+
@worker_request_config[name] = config
|
244
|
+
end
|
245
|
+
|
246
|
+
def remove_worker_request_config(name)
|
247
|
+
@worker_request_config.delete(name)
|
248
|
+
end
|
249
|
+
|
250
|
+
def add_keepalive(timeout, name)
|
251
|
+
@keepalive[name] = timeout == 0 ? false : timeout
|
252
|
+
end
|
253
|
+
|
254
|
+
def keepalive(name)
|
255
|
+
@keepalive[name]
|
256
|
+
end
|
257
|
+
|
258
|
+
def remove_keepalive(name)
|
259
|
+
@keepalive[name] = false
|
260
|
+
end
|
261
|
+
|
262
|
+
def add_cluster_manager(cluster_manager_params, name)
|
263
|
+
@cluster_managers[name] = cluster_manager_params
|
264
|
+
end
|
265
|
+
|
266
|
+
def cluster_manager(name)
|
267
|
+
@cluster_managers[name]
|
268
|
+
end
|
269
|
+
|
270
|
+
def remove_cluster_manager(name)
|
271
|
+
@cluster_managers.delete(name)
|
272
|
+
end
|
273
|
+
|
274
|
+
# Sets the default proxy destination, if requests are received
|
275
|
+
# which do not match a defined destination.
|
276
|
+
|
277
|
+
def default_name
|
278
|
+
@default_name
|
279
|
+
end
|
280
|
+
|
281
|
+
def default_name=(val)
|
282
|
+
@default_name = val
|
283
|
+
end
|
284
|
+
|
285
|
+
# This timeout is the amount of time a connection will sit in queue
|
286
|
+
# waiting for a backend to process it. A client connection that
|
287
|
+
# sits for longer than this timeout receives a 503 response and
|
288
|
+
# is dropped.
|
289
|
+
|
290
|
+
def server_unavailable_timeout
|
291
|
+
@server_unavailable_timeout
|
292
|
+
end
|
293
|
+
|
294
|
+
def server_unavailable_timeout=(val)
|
295
|
+
@server_unavailable_timeout = val
|
296
|
+
end
|
297
|
+
|
298
|
+
def current_client_name
|
299
|
+
@current_client_name
|
300
|
+
end
|
301
|
+
|
302
|
+
# The chunked_encoding_threshold is a file size limit. Files
|
303
|
+
# which fall below this limit are sent in one chunk of data.
|
304
|
+
# Files which hit or exceed this limit are delivered via chunked
|
305
|
+
# encoding. This enforces a maximum threshold of 32k.
|
306
|
+
|
307
|
+
def chunked_encoding_threshold
|
308
|
+
@chunked_enconding_threshold || 32768
|
309
|
+
end
|
310
|
+
|
311
|
+
def chunked_encoding_threshold=(val)
|
312
|
+
@chunked_encoding_threshold = val > 10485760 ? 10485760 : val
|
313
|
+
end
|
314
|
+
|
315
|
+
def cache_threshold
|
316
|
+
@cache_threshold || 32768
|
317
|
+
end
|
318
|
+
|
319
|
+
def cache_threshold=(val)
|
320
|
+
@cache_threshold = val > 256*1024 ? 256*1024 : val
|
321
|
+
end
|
322
|
+
|
323
|
+
# Swiftiply maintains caches of small static files, etags, and
|
324
|
+
# dynamnic request paths for each cluster of backends.
|
325
|
+
# A timer is created when each cache is created, to do the
|
326
|
+
# initial update. Thereafer, the verification method on the
|
327
|
+
# cache returns the number of seconds to wait before running
|
328
|
+
# again.
|
329
|
+
|
330
|
+
def verify_cache(cache)
|
331
|
+
log(cache.owner_hash).log(Cinfo,"Checking #{cache.class.name}(#{cache.vqlength}/#{cache.length}) for #{cache.owners}") if level(cache.owner_hash) > 2
|
332
|
+
new_interval = cache.check_verification_queue
|
333
|
+
log(cache.owner_hash).log(Cinfo," Next #{cache.class.name} check in #{new_interval} seconds") if level(cache.owner_hash) > 2
|
334
|
+
EventMachine.add_timer(new_interval) do
|
335
|
+
verify_cache(cache)
|
336
|
+
end
|
337
|
+
end
|
338
|
+
|
339
|
+
# Handle static files. It employs an extension to efficiently
|
340
|
+
# handle large files, and depends on an addition to
|
341
|
+
# EventMachine, send_file_data(), to efficiently handle small
|
342
|
+
# files. In my tests, it streams in excess of 120 megabytes of
|
343
|
+
# data per second for large files, and does as much as 25000
|
344
|
+
# requests per second with small files (i.e. under 4k). I think
|
345
|
+
# this can still be improved upon for small files.
|
346
|
+
#
|
347
|
+
# This code is damn ugly.
|
348
|
+
|
349
|
+
def serve_static_file(clnt,docroot = nil)
|
350
|
+
request_method = clnt.request_method
|
351
|
+
|
352
|
+
# Only GET and HEAD requests can return a file.
|
353
|
+
if request_method == CGET || request_method == CHEAD
|
354
|
+
path_info = clnt.uri
|
355
|
+
client_name = clnt.name
|
356
|
+
docroot ||= @docroot_map[client_name]
|
357
|
+
filecache = @file_cache_map[client_name]
|
358
|
+
|
359
|
+
# If it is in the file cache...
|
360
|
+
data = filecache[path_info] || filecache[clnt.unparsed_uri]
|
361
|
+
if data && (data[4].nil? || clnt.header_data == data[4])
|
362
|
+
none_match = clnt.none_match
|
363
|
+
same_response = case
|
364
|
+
when request_method == CHEAD then false
|
365
|
+
when none_match && none_match == C_asterisk then false
|
366
|
+
when none_match && !none_match.strip.split(/\s*,\s*/).include?(data[1]) then false
|
367
|
+
else none_match
|
368
|
+
end
|
369
|
+
if same_response
|
370
|
+
clnt.send_data "#{C_304}#{clnt.connection_header}Content-Length: 0\r\n#{@dateheader}"
|
371
|
+
owner_hash = filecache.owner_hash
|
372
|
+
log(owner_hash).log(Cinfo,"#{Socket::unpack_sockaddr_in(clnt.get_peername || UnknownSocket).last} \"GET #{path_info} HTTP/#{clnt.http_version}\" 304 -") if level(owner_hash) > 1
|
373
|
+
else
|
374
|
+
unless request_method == CHEAD
|
375
|
+
clnt.send_data "#{data.last}#{clnt.connection_header}#{@dateheader}#{data.first}"
|
376
|
+
owner_hash = filecache.owner_hash
|
377
|
+
log(owner_hash).log(Cinfo,"#{Socket::unpack_sockaddr_in(clnt.get_peername || UnknownSocket).last} \"GET #{path_info} HTTP/#{clnt.http_version}\" 200 #{data.first.length}") if level(owner_hash) > 1
|
378
|
+
else
|
379
|
+
clnt.send_data "#{data.last}#{clnt.connection_header}#{@dateheader}"
|
380
|
+
owner_hash = filecache.owner_hash
|
381
|
+
log(owner_hash).log(Cinfo,"#{Socket::unpack_sockaddr_in(clnt.get_peername || UnknownSocket).last} \"HEAD #{path_info} HTTP/#{clnt.http_version}\" 200 -") if level(owner_hash) > 1
|
382
|
+
end
|
383
|
+
end
|
384
|
+
|
385
|
+
unless clnt.keepalive
|
386
|
+
clnt.close_connection_after_writing
|
387
|
+
else
|
388
|
+
clnt.reset_state
|
389
|
+
end
|
390
|
+
|
391
|
+
true
|
392
|
+
elsif path = find_static_file(docroot,path_info,client_name)
|
393
|
+
#TODO: There is a race condition here between when we detect
|
394
|
+
# whether the file is there, and when we start to deliver it.
|
395
|
+
# It'd be nice to handle an exception when trying to read the file
|
396
|
+
# in a graceful way, by falling out as if no static file had been
|
397
|
+
# found. That way, if the file is deleted between detection and
|
398
|
+
# the start of delivery, such as might happen when delivering
|
399
|
+
# files out of some sort of page cache, it can be handled in a
|
400
|
+
# reasonable manner. This should be easily doable, so DO IT SOON!
|
401
|
+
none_match = clnt.none_match
|
402
|
+
etag,mtime = @etag_cache_map[client_name].etag_mtime(path)
|
403
|
+
same_response = nil
|
404
|
+
same_response = case
|
405
|
+
when request_method == CHEAD then false
|
406
|
+
when none_match && none_match == C_asterisk then false
|
407
|
+
when none_match && !none_match.strip.split(/\s*,\s*/).include?(etag) then false
|
408
|
+
else none_match
|
409
|
+
end
|
410
|
+
|
411
|
+
if same_response
|
412
|
+
clnt.send_data "#{C_304}#{clnt.connection_header}Content-Length: 0\r\n#{@dateheader}"
|
413
|
+
|
414
|
+
unless clnt.keepalive
|
415
|
+
clnt.close_connection_after_writing
|
416
|
+
else
|
417
|
+
clnt.reset_state
|
418
|
+
end
|
419
|
+
|
420
|
+
owner_hash = filecache.owner_hash
|
421
|
+
log(owner_hash).log(Cinfo,"#{Socket::unpack_sockaddr_in(clnt.get_peername || UnknownSocket).last} \"GET #{path_info} HTTP/#{clnt.http_version}\" 304 -") if level(owner_hash) > 1
|
422
|
+
else
|
423
|
+
ct = @typer.simple_type_for(path) || Caos
|
424
|
+
fsize = File.size(path)
|
425
|
+
|
426
|
+
header_line = "HTTP/1.1 200 OK\r\nETag: #{etag}\r\nContent-Type: #{ct}\r\nContent-Length: #{fsize}\r\n"
|
427
|
+
|
428
|
+
fd = nil
|
429
|
+
if fsize < @chunked_encoding_threshold
|
430
|
+
File.open(path) {|fh| fd = fh.sysread(fsize)}
|
431
|
+
clnt.send_data "#{header_line}#{clnt.connection_header}#{@dateheader}"
|
432
|
+
unless request_method == CHEAD
|
433
|
+
if fsize < 32768
|
434
|
+
clnt.send_file_data path
|
435
|
+
else
|
436
|
+
clnt.send_data fd
|
437
|
+
end
|
438
|
+
end
|
439
|
+
|
440
|
+
unless clnt.keepalive
|
441
|
+
clnt.close_connection_after_writing
|
442
|
+
else
|
443
|
+
clnt.reset_state
|
444
|
+
end
|
445
|
+
|
446
|
+
elsif clnt.http_version != C1_0 && fsize > @chunked_encoding_threshold
|
447
|
+
clnt.send_data "HTTP/1.1 200 OK\r\n#{clnt.connection_header}ETag: #{etag}\r\nContent-Type: #{ct}\r\nTransfer-Encoding: chunked\r\n#{@dateheader}"
|
448
|
+
EM::Deferrable.future(clnt.stream_file_data(path, :http_chunks=>true)) {clnt.close_connection_after_writing} unless request_method == CHEAD
|
449
|
+
else
|
450
|
+
clnt.send_data "#{header_line}#{clnt.connection_header}#{@dateheader}"
|
451
|
+
EM::Deferrable.future(clnt.stream_file_data(path, :http_chunks=>false)) {clnt.close_connection_after_writing} unless request_method == CHEAD
|
452
|
+
end
|
453
|
+
|
454
|
+
filecache.add(path_info, path, fd || File.read(path),etag,mtime,nil,header_line) if fsize < @cache_threshold
|
455
|
+
|
456
|
+
owner_hash = filecache.owner_hash
|
457
|
+
log(owner_hash).log(Cinfo,"#{Socket::unpack_sockaddr_in(clnt.get_peername || UnknownSocket).last} \"#{request_method} #{path_info} HTTP/#{clnt.http_version}\" 200 #{request_method == CHEAD ? C_empty : fsize}") if level(owner_hash) > 1
|
458
|
+
end
|
459
|
+
true
|
460
|
+
end
|
461
|
+
else
|
462
|
+
false
|
463
|
+
end
|
464
|
+
# The exception is going to be eaten here, because some
|
465
|
+
# dumb file IO error shouldn't take Swiftiply down.
|
466
|
+
rescue Object => e
|
467
|
+
puts "KABOOM: #{e}\n#{e.backtrace.inspect}"
|
468
|
+
@logger.log('error',"Failed request for #{docroot.inspect}/#{path.inspect} -- #{e} @ #{e.backtrace.inspect}") if @log_level > 0
|
469
|
+
|
470
|
+
# TODO: This is uncivilized; if there is an unexpected error, a reasonable response MUST be returned.
|
471
|
+
clnt.close_connection_after_writing
|
472
|
+
false
|
473
|
+
end
|
474
|
+
|
475
|
+
# Determine if the requested file, in the given docroot, exists
|
476
|
+
# and is a file (i.e. not a directory).
|
477
|
+
#
|
478
|
+
# If Rails style page caching is enabled, this method will be
|
479
|
+
# dynamically replaced by a more sophisticated version.
|
480
|
+
|
481
|
+
def find_static_file(docroot,path_info,client_name)
|
482
|
+
return unless docroot
|
483
|
+
path = File.join(docroot,path_info)
|
484
|
+
path if FileTest.exist?(path) and FileTest.file?(path) and File.expand_path(path).index(docroot) == 0 and !(x = static_mask(client_name) and path =~ x) ? path : false
|
485
|
+
end
|
486
|
+
|
487
|
+
# Pushes a front end client (web browser) into the queue of
|
488
|
+
# clients waiting to be serviced if there's no server available
|
489
|
+
# to handle it right now.
|
490
|
+
|
491
|
+
def add_frontend_client(clnt,data_q,data)
|
492
|
+
clnt.create_time = @ctime
|
493
|
+
|
494
|
+
# Initialize parameters relevant to redeployable requests, if this client
|
495
|
+
# has them enabled.
|
496
|
+
clnt.data_pos = clnt.data_len = 0 if clnt.redeployable = @redeployable_map[clnt.name]
|
497
|
+
|
498
|
+
uri = clnt.uri
|
499
|
+
name = clnt.name
|
500
|
+
drm = @dynamic_request_map[name]
|
501
|
+
if drm[uri] || !serve_static_file(clnt)
|
502
|
+
# It takes two requests to add it to the dynamic verification
|
503
|
+
# queue. So, go from nil to false, then from false to
|
504
|
+
# insertion into the queue.
|
505
|
+
unless drmval = drm[uri]
|
506
|
+
if drmval == false
|
507
|
+
drm[uri] = drm.add_to_verification_queue(uri)
|
508
|
+
log(drm.owner_hash).log(Cinfo,"Adding request #{uri} to dynamic request cache") if level(drm.owner_hash) > 2
|
509
|
+
else
|
510
|
+
drm[uri] = false
|
511
|
+
end
|
512
|
+
end
|
513
|
+
|
514
|
+
# A lot of sites won't need to check X-FORWARDED-FOR, so
|
515
|
+
# we'll only take the time to munge the headers to add
|
516
|
+
# it if the config calls for it.
|
517
|
+
if x_forwarded_for(clnt.name) and peername = clnt.get_peername
|
518
|
+
data.sub!(/\r\n\r\n/,"\r\nX-FORWARDED-FOR: #{Socket::unpack_sockaddr_in(peername).last}\r\n\r\n")
|
519
|
+
end
|
520
|
+
|
521
|
+
data_q.unshift data
|
522
|
+
unless match_client_to_server_now(clnt)
|
523
|
+
if clnt.uri =~ /\w+-\w+-\w+\.\w+\.[\w\.]+-(\w+)?$/
|
524
|
+
@demanding_clients[$1].unshift clnt
|
525
|
+
else
|
526
|
+
@client_q[@incoming_map[name]].unshift(clnt)
|
527
|
+
end
|
528
|
+
end
|
529
|
+
end
|
530
|
+
end
|
531
|
+
|
532
|
+
def rebind_frontend_client(clnt)
|
533
|
+
clnt.create_time = @ctime
|
534
|
+
clnt.data_pos = clnt.data_len = 0
|
535
|
+
|
536
|
+
unless match_client_to_server_now(clnt)
|
537
|
+
if clnt.uri =~ /\w+-\w+-\w+\.\w+\.[\w\.]+-(\w+)?$/
|
538
|
+
#if $& ####
|
539
|
+
@demanding_clients[$1].unshift clnt
|
540
|
+
else
|
541
|
+
@client_q[@incoming_map[clnt.name]].unshift(clnt)
|
542
|
+
end
|
543
|
+
end
|
544
|
+
end
|
545
|
+
|
546
|
+
# Pushes a backend server into the queue of servers waiting for
|
547
|
+
# a client to service if there are no clients waiting to be
|
548
|
+
# serviced.
|
549
|
+
|
550
|
+
def add_server srvr
|
551
|
+
if f = srvr.filter
|
552
|
+
#q[f].unshift(srvr) unless match_server_to_client_now(srvr)
|
553
|
+
q[f].unshift(srvr)
|
554
|
+
else
|
555
|
+
#@server_q[srvr.name].unshift(srvr) unless match_server_to_client_now(srvr)
|
556
|
+
@server_q[srvr.name].unshift(srvr)
|
557
|
+
end
|
558
|
+
end
|
559
|
+
|
560
|
+
# Deletes the provided server from the server queue.
|
561
|
+
|
562
|
+
def remove_server srvr
|
563
|
+
@server_q[srvr.name].delete srvr
|
564
|
+
end
|
565
|
+
|
566
|
+
# Removes the named client from the client queue.
|
567
|
+
# TODO: Try replacing this with ...something. Performance
|
568
|
+
# here has to be bad when the list is long.
|
569
|
+
|
570
|
+
def remove_client clnt
|
571
|
+
@client_q[clnt.name].delete clnt
|
572
|
+
end
|
573
|
+
|
574
|
+
# Tries to match the client (passed as an argument) to a
|
575
|
+
# server.
|
576
|
+
|
577
|
+
def match_client_to_server_now(client)
|
578
|
+
@current_client_name = client.name
|
579
|
+
hash = @incoming_map[@current_client_name]
|
580
|
+
|
581
|
+
if outgoing_filters = @filters[hash]
|
582
|
+
outgoing_filters.each do |f|
|
583
|
+
# This is inefficient if there are a lot of filters. Maybe instead
|
584
|
+
# of a regex, filters need something faster/more basic, like a
|
585
|
+
# trie that just does prefix matching?
|
586
|
+
if client.uri =~ f
|
587
|
+
sq = @server_q[@incoming_map[client.name][f]]
|
588
|
+
break
|
589
|
+
end
|
590
|
+
end
|
591
|
+
end
|
592
|
+
|
593
|
+
sq ||= @server_q[hash]
|
594
|
+
|
595
|
+
# 0b9b883b-552f2e61-693d1970.a.1.5-7f0000015a97
|
596
|
+
if client.uri =~ /\w+-\w+-\w+\.\w+\.[\w\.]+-(\w+)?$/
|
597
|
+
if sidx = sq.index(@reverse_id_map[$1])
|
598
|
+
server = sq[sidx]
|
599
|
+
sq.delete_at(sidx)
|
600
|
+
#server = sq.slice!(sidx,1)
|
601
|
+
server.associate = client
|
602
|
+
client.associate = server
|
603
|
+
client.push
|
604
|
+
true
|
605
|
+
else
|
606
|
+
# This is an IOWA session request, but the desired worker is busy.
|
607
|
+
false
|
608
|
+
end
|
609
|
+
elsif server = sq.pop
|
610
|
+
server.associate = client
|
611
|
+
client.associate = server
|
612
|
+
client.push
|
613
|
+
true
|
614
|
+
else
|
615
|
+
# There are no available workers.
|
616
|
+
@worker_request_semaphores[hash] = [client.name, C_plus] if @cluster_managers.has_key?(hash)
|
617
|
+
false
|
618
|
+
end
|
619
|
+
end
|
620
|
+
|
621
|
+
# Tries to match the server (passed as an argument) to a
|
622
|
+
# client.
|
623
|
+
|
624
|
+
def match_server_to_client_now(server)
|
625
|
+
if client = @demanding_clients[server.id].pop
|
626
|
+
server.associate = client
|
627
|
+
client.associate = server
|
628
|
+
client.push
|
629
|
+
true
|
630
|
+
elsif client = @client_q[server.name].pop
|
631
|
+
server.associate = client
|
632
|
+
client.associate = server
|
633
|
+
client.push
|
634
|
+
true
|
635
|
+
else
|
636
|
+
false
|
637
|
+
end
|
638
|
+
end
|
639
|
+
|
640
|
+
def do_and_requeue_recheck_or_expire_clients
|
641
|
+
recheck_or_expire_clients if rand() > 0.9
|
642
|
+
EventMachine.next_tick { do_and_requeue_recheck_or_expire_clients }
|
643
|
+
end
|
644
|
+
|
645
|
+
# Walk through the waiting clients if there is no server
|
646
|
+
# available to process clients and expire any clients that
|
647
|
+
# have been waiting longer than @server_unavailable_timeout
|
648
|
+
# seconds. Clients which are expired will receive a 503
|
649
|
+
# response. If this is happening, either you need more
|
650
|
+
# backend processes, or your @server_unavailable_timeout is
|
651
|
+
# too short.
|
652
|
+
|
653
|
+
def recheck_or_expire_clients
|
654
|
+
if @server_q.any?
|
655
|
+
now = Time.now
|
656
|
+
@client_q.each_key do |name|
|
657
|
+
while c = @client_q[name].pop
|
658
|
+
if (now - c.create_time) >= @server_unavailable_timeout
|
659
|
+
c.send_503_response
|
660
|
+
elsif !match_client_to_server_now(c)
|
661
|
+
@client_q[name].push c
|
662
|
+
break
|
663
|
+
end
|
664
|
+
end
|
665
|
+
end
|
666
|
+
@demanding_clients.each_key do |name|
|
667
|
+
while c = @demanding_clients[name].pop
|
668
|
+
if (now - c.create_time) >= @server_unavailable_timeout
|
669
|
+
c.send_503_response
|
670
|
+
elsif !match_client_to_server_now(c)
|
671
|
+
@demanding_clients[name].push c
|
672
|
+
break
|
673
|
+
end
|
674
|
+
end
|
675
|
+
end
|
676
|
+
end
|
677
|
+
end
|
678
|
+
|
679
|
+
def check_for_queued_requests(client_name)
|
680
|
+
if client = @client_q[client_name].pop
|
681
|
+
unless match_client_to_server_now(client)
|
682
|
+
@client_q[client_name].push client
|
683
|
+
end
|
684
|
+
end
|
685
|
+
end
|
686
|
+
|
687
|
+
# This is called by a periodic timer once a second to update
|
688
|
+
# the time.
|
689
|
+
|
690
|
+
def update_ctime
|
691
|
+
@ctime = Time.now
|
692
|
+
@dateheader[C_date_header_range] = @ctime.httpdate
|
693
|
+
end
|
694
|
+
|
695
|
+
# Run through the list of sites that encountered a situation where
|
696
|
+
# there were no available workers to handle a request, and fire off
|
697
|
+
# a request for more resources.
|
698
|
+
# Note that this is only an advisory request -- there is no requirement
|
699
|
+
# for anything to actually deploy more resources in response to this.
|
700
|
+
#
|
701
|
+
# In an ideal world, there's a cluster manager that can receive this
|
702
|
+
# request, examine the current load situation, and do _something_ to
|
703
|
+
# deploy more requests if they are available.
|
704
|
+
|
705
|
+
def request_worker_resources
|
706
|
+
@worker_request_semaphores.each do |name, request|
|
707
|
+
cluster_manager = @cluster_managers[name]
|
708
|
+
params = cluster_manager[:params] # This needs to be more sophisticated so that details from the request can get inserted dynamically.
|
709
|
+
@worker_request_semaphores.delete(name) if cluster_manager[:class].call(cluster_manager[:callsite], request, params)
|
710
|
+
end
|
711
|
+
end
|
712
|
+
|
713
|
+
end
|
714
|
+
end
|
715
|
+
end
|
716
|
+
end
|