swiftiply 0.6.1.1 → 1.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (75) hide show
  1. checksums.yaml +7 -0
  2. data/CONTRIBUTORS +2 -0
  3. data/README.md +62 -0
  4. data/bin/{mongrel_rails → evented_mongrel_rails} +6 -14
  5. data/bin/swiftiplied_mongrel_rails +246 -0
  6. data/bin/swiftiply +136 -116
  7. data/bin/swiftiply_mongrel_rails +2 -2
  8. data/bin/swiftiplyctl +283 -0
  9. data/cleanup.sh +5 -0
  10. data/ext/deque/extconf.rb +162 -0
  11. data/ext/deque/swiftcore/rubymain.cpp +435 -0
  12. data/ext/fastfilereader/extconf.rb +2 -2
  13. data/ext/fastfilereader/mapper.cpp +2 -0
  14. data/ext/map/extconf.rb +161 -0
  15. data/ext/map/rubymain.cpp +500 -0
  16. data/ext/splaytree/extconf.rb +161 -0
  17. data/ext/splaytree/swiftcore/rubymain.cpp +580 -0
  18. data/ext/splaytree/swiftcore/splay_map.h +635 -0
  19. data/ext/splaytree/swiftcore/splay_set.h +575 -0
  20. data/ext/splaytree/swiftcore/splay_tree.h +1127 -0
  21. data/external/httpclient.rb +231 -0
  22. data/external/package.rb +13 -13
  23. data/setup.rb +18 -2
  24. data/src/swiftcore/Swiftiply.rb +417 -773
  25. data/src/swiftcore/Swiftiply/backend_protocol.rb +213 -0
  26. data/src/swiftcore/Swiftiply/cache_base.rb +49 -0
  27. data/src/swiftcore/Swiftiply/cache_base_mixin.rb +52 -0
  28. data/src/swiftcore/Swiftiply/cluster_managers/rest_based_cluster_manager.rb +9 -0
  29. data/src/swiftcore/Swiftiply/cluster_protocol.rb +70 -0
  30. data/src/swiftcore/Swiftiply/config.rb +370 -0
  31. data/src/swiftcore/Swiftiply/config/rest_updater.rb +26 -0
  32. data/src/swiftcore/Swiftiply/constants.rb +101 -0
  33. data/src/swiftcore/Swiftiply/content_cache_entry.rb +44 -0
  34. data/src/swiftcore/Swiftiply/content_response.rb +45 -0
  35. data/src/swiftcore/Swiftiply/control_protocol.rb +49 -0
  36. data/src/swiftcore/Swiftiply/dynamic_request_cache.rb +41 -0
  37. data/src/swiftcore/Swiftiply/etag_cache.rb +64 -0
  38. data/src/swiftcore/Swiftiply/file_cache.rb +46 -0
  39. data/src/swiftcore/Swiftiply/hash_cache_base.rb +22 -0
  40. data/src/swiftcore/Swiftiply/http_recognizer.rb +267 -0
  41. data/src/swiftcore/Swiftiply/loggers/Analogger.rb +21 -0
  42. data/src/swiftcore/Swiftiply/loggers/stderror.rb +13 -0
  43. data/src/swiftcore/Swiftiply/mocklog.rb +10 -0
  44. data/src/swiftcore/Swiftiply/proxy.rb +15 -0
  45. data/src/swiftcore/Swiftiply/proxy_backends/keepalive.rb +286 -0
  46. data/src/swiftcore/Swiftiply/proxy_backends/traditional.rb +286 -0
  47. data/src/swiftcore/Swiftiply/proxy_backends/traditional/redis_directory.rb +87 -0
  48. data/src/swiftcore/Swiftiply/proxy_backends/traditional/static_directory.rb +69 -0
  49. data/src/swiftcore/Swiftiply/proxy_bag.rb +716 -0
  50. data/src/swiftcore/Swiftiply/rest_based_cluster_manager.rb +15 -0
  51. data/src/swiftcore/Swiftiply/splay_cache_base.rb +21 -0
  52. data/src/swiftcore/Swiftiply/support_pagecache.rb +6 -3
  53. data/src/swiftcore/Swiftiply/swiftiply_2_http_proxy.rb +7 -0
  54. data/src/swiftcore/Swiftiply/swiftiply_client.rb +20 -5
  55. data/src/swiftcore/Swiftiply/version.rb +5 -0
  56. data/src/swiftcore/evented_mongrel.rb +26 -8
  57. data/src/swiftcore/hash.rb +43 -0
  58. data/src/swiftcore/method_builder.rb +28 -0
  59. data/src/swiftcore/streamer.rb +46 -0
  60. data/src/swiftcore/swiftiplied_mongrel.rb +91 -23
  61. data/src/swiftcore/types.rb +20 -3
  62. data/swiftiply.gemspec +14 -8
  63. data/test/TC_Deque.rb +152 -0
  64. data/test/TC_ProxyBag.rb +147 -166
  65. data/test/TC_Swiftiply.rb +576 -169
  66. data/test/TC_Swiftiply/mongrel/evented_hello.rb +1 -1
  67. data/test/TC_Swiftiply/mongrel/swiftiplied_hello.rb +1 -1
  68. data/test/TC_Swiftiply/test_serve_static_file_xsendfile/sendfile_client.rb +27 -0
  69. data/test/TC_Swiftiply/test_ssl/bin/validate_ssl_capability.rb +21 -0
  70. data/test/TC_Swiftiply/test_ssl/test.cert +16 -0
  71. data/test/TC_Swiftiply/test_ssl/test.key +15 -0
  72. data/{bin → test/bin}/echo_client +0 -0
  73. metadata +136 -94
  74. data/README +0 -126
  75. data/ext/swiftiply_parse/parse.rl +0 -90
@@ -0,0 +1,213 @@
1
+ module Swiftcore
2
+ module Swiftiply
3
+
4
+ # The BackendProtocol is the EventMachine::Connection subclass that
5
+ # handles the communications between Swiftiply and the backend process
6
+ # it is proxying to.
7
+
8
+ class BackendProtocol < EventMachine::Connection
9
+ attr_accessor :associate, :id
10
+
11
+ C0rnrn = "0\r\n\r\n".freeze
12
+ Crnrn = "\r\n\r\n".freeze
13
+
14
+ def initialize *args
15
+ @name = self.class.bname
16
+ @permit_xsendfile = self.class.xsendfile
17
+ @enable_sendfile_404 = self.class.enable_sendfile_404
18
+ super
19
+ end
20
+
21
+ def name
22
+ @name
23
+ end
24
+
25
+ # Call setup() and add the backend to the ProxyBag queue.
26
+
27
+ def post_init
28
+ setup
29
+ @initialized = nil
30
+ ProxyBag.add_server self
31
+ end
32
+
33
+ # Setup the initial variables for receiving headers and content.
34
+
35
+ def setup
36
+ @headers = ''
37
+ @headers_completed = @dont_send_data = false
38
+ #@content_length = nil
39
+ @content_sent = 0
40
+ @filter = self.class.filter
41
+ end
42
+
43
+ # Receive data from the backend process. Headers are parsed from
44
+ # the rest of the content. If a Content-Length header is present,
45
+ # that is used to determine how much data to expect. Otherwise,
46
+ # if 'Transfer-encoding: chunked' is present, assume chunked
47
+ # encoding. Otherwise be paranoid; something isn't the way we like
48
+ # it to be.
49
+
50
+ def receive_data data
51
+ unless @initialized
52
+ # preamble = data.slice!(0..24)
53
+ preamble = data[0..24]
54
+ data = data[25..-1] || C_empty
55
+ keylen = preamble[23..24].to_i(16)
56
+ keylen = 0 if keylen < 0
57
+ key = keylen > 0 ? data.slice!(0..(keylen - 1)) : C_empty
58
+ #if preamble[0..10] == Cswiftclient and key == ProxyBag.get_key(@name)
59
+ if preamble.index(Cswiftclient) == 0 and key == ProxyBag.get_key(@name)
60
+ @id = preamble[11..22]
61
+ ProxyBag.add_id(self,@id)
62
+ @initialized = true
63
+ else
64
+ # The worker that connected did not present the proper authentication,
65
+ # so something is fishy; time to cut bait.
66
+ close_connection
67
+ return
68
+ end
69
+ end
70
+
71
+ unless @headers_completed
72
+ if data.include?(Crnrn)
73
+ @headers_completed = true
74
+ h,data = data.split(/\r\n\r\n/,2)
75
+ #@headers << h << Crnrn
76
+ if @headers.length > 0
77
+ @headers << h
78
+ else
79
+ @headers = h
80
+ end
81
+
82
+ if @headers =~ /Content-[Ll]ength: *([^\r]+)/
83
+ @content_length = $1.to_i
84
+ elsif @headers =~ /Transfer-encoding:\s*chunked/
85
+ @content_length = nil
86
+ else
87
+ @content_length = 0
88
+ end
89
+
90
+ if @permit_xsendfile && @headers =~ /X-[Ss]endfile: *([^\r]+)/
91
+ @associate.uri = $1
92
+ if ProxyBag.serve_static_file(@associate,ProxyBag.get_sendfileroot(@associate.name))
93
+ @dont_send_data = true
94
+ else
95
+ if @enable_sendfile_404
96
+ msg = "#{@associate.uri} could not be found."
97
+ @associate.send_data "HTTP/1.1 404 Not Found\r\nConnection: close\r\n\r\nContent-Type: text/html\r\nContent-Length: #{msg.length}\r\n\r\n#{msg}"
98
+ @associate.close_connection_after_writing
99
+ @dont_send_data = true
100
+ else
101
+ @associate.send_data @headers + Crnrn
102
+ end
103
+ end
104
+ else
105
+ @associate.send_data @headers + Crnrn
106
+ end
107
+
108
+ # If keepalive is turned on, the assumption is that it will stay
109
+ # on, unless the headers being returned indicate that the connection
110
+ # should be closed.
111
+ # So, check for a 'Connection: Closed' header.
112
+ if keepalive = @associate.keepalive
113
+ keepalive = false if @headers =~ /Connection: [Cc]lose/
114
+ if @associate_http_version == C1_0
115
+ keepalive = false unless @headers == /Connection: Keep-Alive/i
116
+ end
117
+ end
118
+ else
119
+ @headers << data
120
+ end
121
+ end
122
+
123
+ if @headers_completed
124
+ @associate.send_data data unless @dont_send_data
125
+ @content_sent += data.length
126
+ if @content_length and @content_sent >= @content_length or data[-6..-1] == C0rnrn
127
+ # If @dont_send_data is set, then the connection is going to be closed elsewhere.
128
+ unless @dont_send_data
129
+ # Check to see if keepalive is enabled.
130
+ if keepalive
131
+ @associate.reset_state
132
+ ProxyBag.remove_client(self) unless @associate
133
+ else
134
+ @associate.close_connection_after_writing
135
+ end
136
+ end
137
+ @associate = @headers_completed = @dont_send_data = nil
138
+ @headers = ''
139
+ #@headers_completed = false
140
+ #@content_length = nil
141
+ @content_sent = 0
142
+ #setup
143
+ ProxyBag.add_server self
144
+ end
145
+ end
146
+ # TODO: Log these errors!
147
+ rescue Exception => e
148
+ puts "Kaboom: #{e} -- #{e.backtrace.inspect}"
149
+ @associate.close_connection_after_writing if @associate
150
+ @associate = nil
151
+ setup
152
+ ProxyBag.add_server self
153
+ end
154
+
155
+ # This is called when the backend disconnects from the proxy.
156
+ # If the backend is currently associated with a web browser client,
157
+ # that connection will be closed. Otherwise, the backend will be
158
+ # removed from the ProxyBag's backend queue.
159
+
160
+ def unbind
161
+ if @associate
162
+ if !@associate.redeployable or @content_length
163
+ @associate.close_connection_after_writing
164
+ else
165
+ @associate.associate = nil
166
+ @associate.setup_for_redeployment
167
+ ProxyBag.rebind_frontend_client(@associate)
168
+ end
169
+ else
170
+ ProxyBag.remove_server(self)
171
+ end
172
+ ProxyBag.remove_id(self)
173
+ end
174
+
175
+ def self.bname=(val)
176
+ @bname = val
177
+ end
178
+
179
+ def self.bname
180
+ @bname
181
+ end
182
+
183
+ def self.xsendfile=(val)
184
+ @xsendfile = val
185
+ end
186
+
187
+ def self.xsendfile
188
+ @xsendfile
189
+ end
190
+
191
+ def self.enable_sendfile_404=(val)
192
+ @enable_sendfile_404 = val
193
+ end
194
+
195
+ def self.enable_sendfile_404
196
+ @enable_sendfile_404
197
+ end
198
+
199
+ def self.filter=(val)
200
+ @filter = val
201
+ end
202
+
203
+ def self.filter
204
+ @filter
205
+ end
206
+
207
+ def filter
208
+ @filter
209
+ end
210
+ end
211
+
212
+ end
213
+ end
@@ -0,0 +1,49 @@
1
+ module Swiftcore
2
+ begin
3
+ # Attempt to load the SplayTreeMap and Deque. If it is not found, rubygems
4
+ # will be required, and SplayTreeMap will be checked for again. If it is
5
+ # still not found, then it will be recorded as unavailable, and the
6
+ # remainder of the requires will be performed. If any of them are not
7
+ # found, and rubygems has not been required, it will be required and the
8
+ # code will retry, once.
9
+ load_state ||= :start
10
+ rubygems_loaded ||= false
11
+ load_state = :splaytreemap
12
+ require 'swiftcore/splaytreemap' unless const_defined?(:HasSplayTree)
13
+ HasSplayTree = true unless const_defined?(:HasSplayTree)
14
+
15
+ load_state = :deque
16
+ require 'swiftcore/deque' unless const_defined?(:HasDeque)
17
+ HasDeque = true unless const_defined?(:HasDeque)
18
+
19
+ load_state = :remainder
20
+ rescue LoadError => e
21
+ if !rubygems_loaded
22
+ begin
23
+ require 'rubygems'
24
+ rubygems_loaded = true
25
+ rescue LoadError
26
+ raise e
27
+ end
28
+ retry
29
+ end
30
+
31
+ case load_state
32
+ when :deque
33
+ HasDeque = false
34
+ retry
35
+ when :splaytreemap
36
+ HasSplayTreeMap = false
37
+ retry
38
+ end
39
+
40
+ raise e
41
+ end
42
+
43
+
44
+ if HasSplayTree
45
+ require 'swiftcore/Swiftiply/splay_cache_base'
46
+ else
47
+ require 'swiftcore/Swiftiply/hash_cache_base'
48
+ end
49
+ end
@@ -0,0 +1,52 @@
1
+ module Swiftcore
2
+ module Swiftiply
3
+ module CacheBaseMixin
4
+ attr_accessor :vw, :owners, :owner_hash, :name
5
+
6
+ def add_to_verification_queue(path)
7
+ @vq.unshift(path)
8
+ true
9
+ end
10
+
11
+ def vqlength
12
+ @vq.length
13
+ end
14
+
15
+ def check_verification_queue
16
+ start = Time.now
17
+ count = 0
18
+ @push_to_vq = {}
19
+ vql = @vq.length
20
+ qg = vql - @old_vql
21
+ while Time.now < start + @tl && !@vq.empty?
22
+ count += 1
23
+ path = @vq.pop
24
+ verify(path) ? @push_to_vq[path] = 1 : delete(path)
25
+ end
26
+ @push_to_vq.each_key {|x| add_to_verification_queue(x)}
27
+ @old_vql = @vq.length
28
+
29
+ rt = Time.now - start
30
+
31
+ # This algorithm is self adaptive based on the amount of work
32
+ # completed in the time slice, and the amount of remaining work
33
+ # in the queue.
34
+ # (verification_window / (verification_queue_length / count)) * (real_time / time_limit)
35
+
36
+ # If the queue growth in the last time period exceeded the count of items consumed this time,
37
+ # use the ratio of the two to reduce the count number. This will result in a shorter period of
38
+ # of time before the next check cycle. This lets the system stay on top of things when there
39
+ # are bursts.
40
+ if qg > count
41
+ count *= count/qg
42
+ end
43
+ if vql == 0
44
+ @vw / 2
45
+ else
46
+ wait_time = (@vwtl * count) / (vql * rt)
47
+ wait_time < rt ? rt * 2.0 : wait_time > @vw ? @vw : wait_time
48
+ end
49
+ end
50
+ end
51
+ end
52
+ end
@@ -0,0 +1,9 @@
1
+ module Swiftcore
2
+ module Swiftiply
3
+ module ClusterManagers
4
+ class RestBasedClusterManager
5
+
6
+ end
7
+ end
8
+ end
9
+ end
@@ -0,0 +1,70 @@
1
+ # Encoding:ascii-8bit
2
+
3
+ require "swiftcore/Swiftiply/http_recognizer"
4
+
5
+ module Swiftcore
6
+ module Swiftiply
7
+
8
+ # The ClusterProtocol is the subclass of EventMachine::Connection used
9
+ # to communicate between Swiftiply and the web browser clients.
10
+
11
+ class ClusterProtocol < HttpRecognizer
12
+
13
+ proxy_bag_class_is Swiftcore::Swiftiply::ProxyBag
14
+
15
+ C503Header = "HTTP/1.1 503 Server Unavailable\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n"
16
+
17
+ def self.init_class_variables
18
+ @count_503 = 0
19
+ super
20
+ end
21
+
22
+ def self.increment_503_count
23
+ @count_503 += 1
24
+ end
25
+
26
+ # Hardcoded 503 response that is sent if a connection is timed out while
27
+ # waiting for a backend to handle it.
28
+
29
+ def send_503_response
30
+ ip = Socket::unpack_sockaddr_in(get_peername).last rescue Cunknown_host
31
+ error = "The request (#{@uri} --> #{@name}), received on #{create_time.asctime} from #{ip} timed out before being deployed to a server for processing."
32
+ send_data "#{C503Header}Server Unavailable\n\n#{error}"
33
+ ProxyBag.logger.log(Cinfo,"Server Unavailable -- #{error}")
34
+ close_connection_after_writing
35
+ increment_503_count
36
+ end
37
+
38
+ def increment_503_count
39
+ @klass.increment_503_count
40
+ end
41
+
42
+ def push
43
+ if @associate
44
+ unless @redeployable
45
+ # normal data push
46
+ data = nil
47
+ @associate.send_data data while data = @data.pop
48
+ else
49
+ # redeployable data push; just send the stuff that has
50
+ # not already been sent.
51
+ (@data.length - 1 - @data_pos).downto(0) do |p|
52
+ d = @data[p]
53
+ @associate.send_data d
54
+ @data_len += d.length
55
+ end
56
+ @data_pos = @data.length
57
+
58
+ # If the request size crosses the size limit, then
59
+ # disallow redeployent of this request.
60
+ if @data_len > @redeployable
61
+ @redeployable = false
62
+ @data.clear
63
+ end
64
+ end
65
+ end
66
+ end
67
+
68
+ end
69
+ end
70
+ end
@@ -0,0 +1,370 @@
1
+ module Swiftcore
2
+ module Swiftiply
3
+ class Config
4
+ def self.configure_logging(config, data)
5
+ ProxyBag.remove_log(data[:hash])
6
+ if config['logger'] and (!ProxyBag.log(data[:hash]) or MockLog === ProxyBag.log(data[:hash]))
7
+ new_log = ::Swiftcore::Swiftiply::handle_logger_config(config['logger'])
8
+
9
+ ProxyBag.add_log(new_log[:logger],data[:hash])
10
+ ProxyBag.set_level(new_log[:log_level],data[:hash])
11
+ end
12
+ end
13
+
14
+ def self.configure_file_cache(config, data)
15
+ # The File Cache defaults to a max size of 100 elements, with a refresh
16
+ # window of five minues, and a time slice of a hundredth of a second.
17
+ sz = 100
18
+ vw = 300
19
+ ts = 0.01
20
+
21
+ if config.has_key?(Cfile_cache)
22
+ sz = config[Cfile_cache][Csize] || 100
23
+ sz = 100 if sz < 0
24
+ vw = config[Cfile_cache][Cwindow] || 900
25
+ vw = 900 if vw < 0
26
+ ts = config[Cfile_cache][Ctimeslice] || 0.01
27
+ ts = 0.01 if ts < 0
28
+ end
29
+
30
+ ProxyBag.logger.log('debug',"Creating File Cache; size=#{sz}, window=#{vw}, timeslice=#{ts}") if ProxyBag.log_level > 2
31
+ file_cache = Swiftcore::Swiftiply::FileCache.new(vw,ts,sz)
32
+ file_cache.owners = data[:owners]
33
+ file_cache.owner_hash = data[:hash]
34
+ EventMachine.add_timer(vw/2) {ProxyBag.verify_cache(file_cache)} unless RunningConfig[:initialized]
35
+ file_cache
36
+ end
37
+
38
+ def self.configure_dynamic_request_cache(config, data)
39
+ # The Dynamic Request Cache defaults to a max size of 100, with a 15 minute
40
+ # refresh window, and a time slice of a hundredth of a second.
41
+ sz = 100
42
+ vw = 900
43
+ ts = 0.01
44
+ if config.has_key?(Cdynamic_request_cache)
45
+ sz = config[Cdynamic_request_cache][Csize] || 100
46
+ sz = 100 if sz < 0
47
+ vw = config[Cdynamic_request_cache][Cwindow] || 900
48
+ vw = 900 if vw < 0
49
+ ts = config[Cdynamic_request_cache][Ctimeslice] || 0.01
50
+ ts = 0.01 if ts < 0
51
+ end
52
+ ProxyBag.logger.log('debug',"Creating Dynamic Request Cache; size=#{sz}, window=#{vw}, timeslice=#{ts}") if ProxyBag.log_level > 2
53
+ dynamic_request_cache = Swiftcore::Swiftiply::DynamicRequestCache.new(config[Cdocroot],vw,ts,sz)
54
+ dynamic_request_cache.owners = data[:owners]
55
+ dynamic_request_cache.owner_hash = data[:hash]
56
+ EventMachine.add_timer(vw/2) {ProxyBag.verify_cache(dynamic_request_cache)} unless Swiftcore::Swiftiply::RunningConfig[:initialized]
57
+ dynamic_request_cache
58
+ end
59
+
60
+ def self.configure_etag_cache(config, data)
61
+ # The ETag Cache defaults to a max size of 10000 (it doesn't take a lot
62
+ # of RAM to hold an etag), with a 5 minute refresh window and a time
63
+ # slice of a hundredth of a second.
64
+ sz = 10000
65
+ vw = 300
66
+ ts = 0.01
67
+ if config.has_key?(Cetag_cache)
68
+ sz = config[Cetag_cache][Csize] || 100
69
+ sz = 100 if sz < 0
70
+ vw = config[Cetag_cache][Cwindow] || 900
71
+ vw = 900 if vw < 0
72
+ ts = config[Cetag_cache][Ctimeslice] || 0.01
73
+ ts = 0.01 if ts < 0
74
+ end
75
+ ProxyBag.logger.log('debug',"Creating ETag Cache; size=#{sz}, window=#{vw}, timeslice=#{ts}") if ProxyBag.log_level > 2
76
+ etag_cache = Swiftcore::Swiftiply::EtagCache.new(vw,ts,sz)
77
+ etag_cache.owners = data[:owners]
78
+ etag_cache.owner_hash = data[:hash]
79
+ EventMachine.add_timer(vw/2) {ProxyBag.verify_cache(etag_cache)} unless Swiftcore::Swiftiply::RunningConfig[:initialized]
80
+ etag_cache
81
+ end
82
+
83
+ def self.setup_caches(new_config,data)
84
+ # The dynamic request cache may need to know a valid client name.
85
+ data[:dynamic_request_cache].one_client_name ||= data[:p]
86
+
87
+ new_config[Cincoming][data[:p]] = {}
88
+ ProxyBag.add_incoming_mapping(data[:hash],data[:p])
89
+ ProxyBag.add_file_cache(data[:file_cache],data[:p])
90
+ ProxyBag.add_dynamic_request_cache(data[:dynamic_request_cache],data[:p])
91
+ ProxyBag.add_etag_cache(data[:etag_cache],data[:p])
92
+ end
93
+
94
+ def self.configure_docroot(config, p)
95
+ if config.has_key?(Cdocroot)
96
+ ProxyBag.add_docroot(config[Cdocroot],p)
97
+ else
98
+ ProxyBag.remove_docroot(p)
99
+ end
100
+ end
101
+
102
+ def self.configure_sendfileroot(config, p)
103
+ if config.has_key?(Csendfileroot)
104
+ ProxyBag.add_sendfileroot(config[Csendfileroot],p)
105
+ true
106
+ else
107
+ ProxyBag.remove_sendfileroot(p)
108
+ false
109
+ end
110
+ end
111
+
112
+ def self.configure_xforwardedfor(config, p)
113
+ if config[Cxforwardedfor]
114
+ ProxyBag.set_x_forwarded_for(p)
115
+ else
116
+ ProxyBag.unset_x_forwarded_for(p)
117
+ end
118
+ end
119
+
120
+ def self.configure_redeployable(config, p)
121
+ if config[Credeployable]
122
+ ProxyBag.add_redeployable(config[Credeployment_sizelimit] || 16384,p)
123
+ else
124
+ ProxyBag.remove_redeployable(p)
125
+ end
126
+ end
127
+
128
+ def self.configure_key(config, p, data)
129
+ if config.has_key?(Ckey)
130
+ ProxyBag.set_key(data[:hash],config[Ckey])
131
+ else
132
+ ProxyBag.set_key(data[:hash],C_empty)
133
+ end
134
+ end
135
+
136
+ def self.configure_staticmask(config, p)
137
+ if config.has_key?(Cstaticmask)
138
+ ProxyBag.add_static_mask(Regexp.new(config[Cstaticmask]),p)
139
+ else
140
+ ProxyBag.remove_static_mask(p)
141
+ end
142
+ end
143
+
144
+ def self.configure_cache_extensions(config, p)
145
+ if config.has_key?(Ccache_extensions) or config.has_key?(Ccache_directory)
146
+ require 'swiftcore/Swiftiply/support_pagecache'
147
+ ProxyBag.add_suffix_list((config[Ccache_extensions] || ProxyBag.const_get(:DefaultSuffixes)),p)
148
+ ProxyBag.add_cache_dir((config[Ccache_directory] || ProxyBag.const_get(:DefaultCacheDir)),p)
149
+ else
150
+ ProxyBag.remove_suffix_list(p) if ProxyBag.respond_to?(:remove_suffix_list)
151
+ ProxyBag.remove_cache_dir(p) if ProxyBag.respond_to?(:remove_cache_dir)
152
+ end
153
+ end
154
+
155
+ def self.configure_cluster_manager(config, p)
156
+ # Check for a cluster management section and do setup.
157
+ # manager: URL
158
+ #
159
+ # manager:
160
+ # callsite: URL
161
+ #
162
+ # manager:
163
+ # require: FILENAME
164
+ # class: CLASSNAME
165
+ # callsite: CLASS specific destination
166
+ # params: param list to pass to the class
167
+ #
168
+ # If a filename is not given, cluster management will default to the
169
+ # URL triggered system, which requires a URL
170
+
171
+ if config.has_key?(Cmanager)
172
+ config[Cmanager].each do |manager|
173
+ cluster_manager_params = {}
174
+ if Hash === manager
175
+ cluster_manager_params[:callsite] = manager[Ccallsite]
176
+ require manager[Crequire] || "swiftcore/Swiftiply/rest_based_cluster_manager"
177
+ cluster_manager_params[:class] = get_const_from_name(manager[Cclassname] || "RestBasedClusterManager", ::Swiftcore::Swiftiply::ManagerProtocols)
178
+ cluster_manager_params[:params] = manager[Cparams] || []
179
+ else
180
+ cluster_manager_params[:callsite] = manager
181
+ require "swiftcore/Swiftiply/rest_based_cluster_manager"
182
+ cluster_manager_params[:class] = ::Swiftcore::Swiftiply::ManagerProtocols::RestBasedClusterManager
183
+ cluster_manager_params[:params] = []
184
+ end
185
+ ProxyBag.add_cluster_manager(cluster_manager_params, hash)
186
+ end
187
+ else
188
+ ProxyBag.remove_cluster_manager(hash)
189
+ end
190
+ end
191
+
192
+ def self.configure_backends(k,args)
193
+ config = args[:config]
194
+ p = args[:p]
195
+ config_data = args[:config_data]
196
+ new_config = args[:new_config]
197
+ klass = args[:self]
198
+ directory_class = args[:directory_class]
199
+ directory_args = args[:directory_args]
200
+
201
+ ProxyBag.remove_filters(p)
202
+
203
+ is_a_server = klass.respond_to?(:is_a_server?) && klass.is_a_server?
204
+
205
+ if config[k]
206
+ #
207
+ # outgoing: 127.0.0.1:12340
208
+ # outgoing:
209
+ # to: 127.0.0.1:12340
210
+ #
211
+ # outgoing:
212
+ # match: php$
213
+ # to: 127.0.0.1:12342
214
+ #
215
+ # outgoing:
216
+ # prefix: /blah
217
+ # to: 127.0.0.1:12345
218
+ #
219
+ #####
220
+ #
221
+ # If the outgoing is a simple host:port, then all
222
+ # requests go striaght to a backend connected to
223
+ # that socket location.
224
+ #
225
+ # If the outgoing is a hash, and the hash only has
226
+ # a 'to' key, then the behavior is the same as if
227
+ # it were a simple host:port.
228
+ #
229
+ # If the outgoing hash has a 'to' and a 'match',
230
+ # then the incoming request's uri will be compared
231
+ # to the regular expression contained in the
232
+ # 'match' parameter.
233
+ #
234
+ # If the outgoing hash has a 'to' and a 'prefix',
235
+ # then the incoming request's uri will be compared
236
+ # with the prefix using a trie classifier.
237
+ # THE PREFIX OPTION IS NOT FULLY IMPLEMENTED!!!
238
+ config[k].each do |o|
239
+ # Directory classes have a lot of power to customize what's happening. So, make a new instance right away.
240
+ new_directory_class = generate_new_directory_class(directory_class, o, new_config)
241
+
242
+ if is_a_server
243
+ if klass.respond_to?(:parse_connection_params)
244
+ params = klass.parse_connection_params(o, new_directory_class) # Provide the directory class, as it might have an opinion on how to parse this information.
245
+ out = params[:out]
246
+ host = params[:host]
247
+ port = params[:port]
248
+ filter = params[:filter]
249
+ else
250
+ if Hash === o
251
+ out = [o['to'],o['match'],o['prefix']].compact.join('::')
252
+ host, port = o['to'].split(/:/,2)
253
+ filter = Regexp.new(o['match'])
254
+ else
255
+ out = o
256
+ host, port = out.split(/:/,2)
257
+ filter = nil
258
+ end
259
+ end
260
+
261
+ ProxyBag.logger.log(Cinfo," Configuring outgoing server #{out}") if ::Swiftcore::Swiftiply::log_level > 0
262
+ ProxyBag.default_name = p if config[Cdefault]
263
+ if Swiftcore::Swiftiply::existing_backends.has_key?(out)
264
+ ProxyBag.logger.log(Cinfo,' Already running; skipping') if ::Swiftcore::Swiftiply::log_level > 2
265
+ new_config[Coutgoing][out] ||= Swiftcore::Swiftiply::RunningConfig[Coutgoing][out]
266
+ next
267
+ else
268
+ # TODO: Add ability to create filters for outgoing destinations, so one can send different path patterns to different outgoing hosts/ports.
269
+ Swiftcore::Swiftiply::existing_backends[out] = true
270
+ backend_class = setup_backends(args.dup.merge(:directory_class => new_directory_class), filter)
271
+
272
+ begin
273
+ new_config[Coutgoing][out] = EventMachine.start_server(host, port.to_i, backend_class)
274
+ rescue RuntimeError => e
275
+ puts e.inspect
276
+ advice = ''
277
+ if port.to_i < 1024
278
+ advice << 'Make sure you have the correct permissions to use that port, and make sure there is nothing else running on that port.'
279
+ else
280
+ advice << 'Make sure there is nothing else running on that port.'
281
+ end
282
+ advice << " The original error was: #{e}\n"
283
+ raise EMStartServerError.new("The listener on #{host}:#{port} could not be started.\n#{advice}")
284
+ end
285
+ end
286
+ else # it's not a server
287
+ if klass.respond_to?(:parse_connection_params)
288
+ params = klass.parse_connection_params(o, new_directory_class) # Provide the directory class, as it might have an opinion on how to parse this information.
289
+ out = params[:out] # this should be some sort of identifier, just for logging purposes.
290
+ filter = params[:out]
291
+ else
292
+ if Hash === o
293
+ filter = Regexp.new(o['match'])
294
+ end
295
+ end
296
+
297
+ out ||= p
298
+ ProxyBag.logger.log(Cinfo," Configuring outgoing protocol for #{out}") if ::Swiftcore::Swiftiply::log_level > 0
299
+ ProxyBag.default_name = p if config[Cdefault]
300
+
301
+ setup_backends(args.dup.merge(:directory_class => new_directory_class), filter)
302
+ end
303
+ end # done iterating on config
304
+ else
305
+ new_directory_class = generate_new_directory_class(directory_class, config, new_config)
306
+
307
+ if klass.respond_to?(:parse_connection_params)
308
+ params = klass.parse_connection_params({}, new_directory_class) # Provide the directory class, as it might have an opinion on how to parse this information.
309
+ out = params[:out] # this should be some sort of identifier, just for logging purposes.
310
+ filter = params[:out]
311
+ end
312
+
313
+ out ||= p
314
+ ProxyBag.logger.log(Cinfo," Configuring outgoing protocol for #{out}") if ::Swiftcore::Swiftiply::log_level > 0
315
+ ProxyBag.default_name = p if config[Cdefault]
316
+
317
+ setup_backends(args.dup.merge(:directory_class => new_directory_class), filter)
318
+ end
319
+ end
320
+
321
+ def self.stop_unused_servers(new_config)
322
+ # Now stop everything that is still running but which isn't needed.
323
+ if Swiftcore::Swiftiply::RunningConfig.has_key?(Coutgoing)
324
+ (Swiftcore::Swiftiply::RunningConfig[Coutgoing].keys - new_config[Coutgoing].keys).each do |unneeded_server_key|
325
+ EventMachine.stop_server(Swiftcore::Swiftiply::RunningConfig[Coutgoing][unneeded_server_key])
326
+ end
327
+ end
328
+ end
329
+
330
+ def self.set_server_queue(data, klass, config)
331
+ klass ||= ::Swiftcore::Deque
332
+ ProxyBag.set_server_queue(data[:hash], klass, config)
333
+ end
334
+
335
+ def self.generate_new_directory_class(directory_class, config, new_config)
336
+ new_directory_class = Class.new(directory_class)
337
+ new_directory_class.config(config, new_config) if new_directory_class.respond_to? :config
338
+ new_directory_class
339
+ end
340
+
341
+ def self.setup_backends(args, filter)
342
+ config = args[:config]
343
+ p = args[:p]
344
+ config_data = args[:config_data]
345
+ new_config = args[:new_config]
346
+ klass = args[:self]
347
+ directory_class = args[:directory_class]
348
+ directory_args = args[:directory_args]
349
+
350
+ backend_class = Class.new(klass)
351
+ backend_class.bname = config_data[:hash]
352
+ ProxyBag.logger.log(Cinfo," Do Caching") if config['caching'] and ::Swiftcore::Swiftiply::log_level > 0
353
+ backend_class.caching = config['caching'] if backend_class.respond_to? :caching
354
+ ProxyBag.logger.log(Cinfo," Permit X-Sendfile") if config_data[:permit_xsendfile] and ::Swiftcore::Swiftiply::log_level > 0
355
+ backend_class.xsendfile = config_data[:permit_xsendfile]
356
+ ProxyBag.logger.log(Cinfo," Enable 404 on missing Sendfile resource") if config[Cenable_sendfile_404] and ::Swiftcore::Swiftiply::log_level > 0
357
+ backend_class.enable_sendfile_404 = true if config[Cenable_sendfile_404]
358
+ backend_class.filter = !filter.nil?
359
+
360
+ directory_class.backend_class = backend_class if directory_class.respond_to? :backend_class
361
+ Config.set_server_queue(config_data, directory_class, directory_args)
362
+
363
+ ProxyBag.add_filter(filter,config_data[:hash]) if filter
364
+ ProxyBag.set_server_queue_as_filter_queue(config_data[:hash],backend_class) if filter
365
+ backend_class
366
+ end
367
+
368
+ end
369
+ end
370
+ end