mongrel 1.1.4-x86-mswin32-60

Sign up to get free protection for your applications and to get access to all the features.
Files changed (73) hide show
  1. data.tar.gz.sig +0 -0
  2. data/CHANGELOG +16 -0
  3. data/COPYING +55 -0
  4. data/LICENSE +55 -0
  5. data/Manifest +69 -0
  6. data/README +74 -0
  7. data/TODO +5 -0
  8. data/bin/mongrel_rails +283 -0
  9. data/examples/builder.rb +29 -0
  10. data/examples/camping/README +3 -0
  11. data/examples/camping/blog.rb +294 -0
  12. data/examples/camping/tepee.rb +149 -0
  13. data/examples/httpd.conf +474 -0
  14. data/examples/mime.yaml +3 -0
  15. data/examples/mongrel.conf +9 -0
  16. data/examples/mongrel_simple_ctrl.rb +92 -0
  17. data/examples/mongrel_simple_service.rb +116 -0
  18. data/examples/monitrc +57 -0
  19. data/examples/random_thrash.rb +19 -0
  20. data/examples/simpletest.rb +52 -0
  21. data/examples/webrick_compare.rb +20 -0
  22. data/ext/http11/ext_help.h +14 -0
  23. data/ext/http11/extconf.rb +6 -0
  24. data/ext/http11/http11.c +402 -0
  25. data/ext/http11/http11_parser.c +1221 -0
  26. data/ext/http11/http11_parser.h +49 -0
  27. data/ext/http11/http11_parser.java.rl +170 -0
  28. data/ext/http11/http11_parser.rl +152 -0
  29. data/ext/http11/http11_parser_common.rl +54 -0
  30. data/ext/http11_java/Http11Service.java +13 -0
  31. data/ext/http11_java/org/jruby/mongrel/Http11.java +266 -0
  32. data/ext/http11_java/org/jruby/mongrel/Http11Parser.java +572 -0
  33. data/lib/http11.so +0 -0
  34. data/lib/mongrel.rb +355 -0
  35. data/lib/mongrel/camping.rb +107 -0
  36. data/lib/mongrel/cgi.rb +181 -0
  37. data/lib/mongrel/command.rb +222 -0
  38. data/lib/mongrel/configurator.rb +388 -0
  39. data/lib/mongrel/const.rb +110 -0
  40. data/lib/mongrel/debug.rb +203 -0
  41. data/lib/mongrel/gems.rb +22 -0
  42. data/lib/mongrel/handlers.rb +468 -0
  43. data/lib/mongrel/header_out.rb +28 -0
  44. data/lib/mongrel/http_request.rb +155 -0
  45. data/lib/mongrel/http_response.rb +163 -0
  46. data/lib/mongrel/init.rb +10 -0
  47. data/lib/mongrel/mime_types.yml +616 -0
  48. data/lib/mongrel/rails.rb +185 -0
  49. data/lib/mongrel/stats.rb +89 -0
  50. data/lib/mongrel/tcphack.rb +18 -0
  51. data/lib/mongrel/uri_classifier.rb +76 -0
  52. data/mongrel-public_cert.pem +20 -0
  53. data/mongrel.gemspec +231 -0
  54. data/setup.rb +1585 -0
  55. data/test/mime.yaml +3 -0
  56. data/test/mongrel.conf +1 -0
  57. data/test/test_cgi_wrapper.rb +26 -0
  58. data/test/test_command.rb +86 -0
  59. data/test/test_conditional.rb +107 -0
  60. data/test/test_configurator.rb +87 -0
  61. data/test/test_debug.rb +25 -0
  62. data/test/test_handlers.rb +123 -0
  63. data/test/test_http11.rb +156 -0
  64. data/test/test_redirect_handler.rb +44 -0
  65. data/test/test_request_progress.rb +99 -0
  66. data/test/test_response.rb +127 -0
  67. data/test/test_stats.rb +35 -0
  68. data/test/test_uriclassifier.rb +261 -0
  69. data/test/test_ws.rb +115 -0
  70. data/test/testhelp.rb +66 -0
  71. data/tools/trickletest.rb +45 -0
  72. metadata +152 -0
  73. metadata.gz.sig +0 -0
Binary file
@@ -0,0 +1,355 @@
1
+
2
+ # Standard libraries
3
+ require 'socket'
4
+ require 'tempfile'
5
+ require 'yaml'
6
+ require 'time'
7
+ require 'etc'
8
+ require 'uri'
9
+ require 'stringio'
10
+
11
+ # Compiled Mongrel extension
12
+ require 'http11'
13
+
14
+ # Gem conditional loader
15
+ require 'mongrel/gems'
16
+ Mongrel::Gems.require 'cgi_multipart_eof_fix'
17
+ Mongrel::Gems.require 'fastthread'
18
+ require 'thread'
19
+
20
+ # Ruby Mongrel
21
+ require 'mongrel/cgi'
22
+ require 'mongrel/handlers'
23
+ require 'mongrel/command'
24
+ require 'mongrel/tcphack'
25
+ require 'mongrel/configurator'
26
+ require 'mongrel/uri_classifier'
27
+ require 'mongrel/const'
28
+ require 'mongrel/http_request'
29
+ require 'mongrel/header_out'
30
+ require 'mongrel/http_response'
31
+
32
+ # Mongrel module containing all of the classes (include C extensions) for running
33
+ # a Mongrel web server. It contains a minimalist HTTP server with just enough
34
+ # functionality to service web application requests fast as possible.
35
+ module Mongrel
36
+
37
+ # Used to stop the HttpServer via Thread.raise.
38
+ class StopServer < Exception; end
39
+
40
+ # Thrown at a thread when it is timed out.
41
+ class TimeoutError < Exception; end
42
+
43
+ # A Hash with one extra parameter for the HTTP body, used internally.
44
+ class HttpParams < Hash
45
+ attr_accessor :http_body
46
+ end
47
+
48
+
49
+ # This is the main driver of Mongrel, while the Mongrel::HttpParser and Mongrel::URIClassifier
50
+ # make up the majority of how the server functions. It's a very simple class that just
51
+ # has a thread accepting connections and a simple HttpServer.process_client function
52
+ # to do the heavy lifting with the IO and Ruby.
53
+ #
54
+ # You use it by doing the following:
55
+ #
56
+ # server = HttpServer.new("0.0.0.0", 3000)
57
+ # server.register("/stuff", MyNiftyHandler.new)
58
+ # server.run.join
59
+ #
60
+ # The last line can be just server.run if you don't want to join the thread used.
61
+ # If you don't though Ruby will mysteriously just exit on you.
62
+ #
63
+ # Ruby's thread implementation is "interesting" to say the least. Experiments with
64
+ # *many* different types of IO processing simply cannot make a dent in it. Future
65
+ # releases of Mongrel will find other creative ways to make threads faster, but don't
66
+ # hold your breath until Ruby 1.9 is actually finally useful.
67
+ class HttpServer
68
+ attr_reader :acceptor
69
+ attr_reader :workers
70
+ attr_reader :classifier
71
+ attr_reader :host
72
+ attr_reader :port
73
+ attr_reader :throttle
74
+ attr_reader :timeout
75
+ attr_reader :num_processors
76
+
77
+ # Creates a working server on host:port (strange things happen if port isn't a Number).
78
+ # Use HttpServer::run to start the server and HttpServer.acceptor.join to
79
+ # join the thread that's processing incoming requests on the socket.
80
+ #
81
+ # The num_processors optional argument is the maximum number of concurrent
82
+ # processors to accept, anything over this is closed immediately to maintain
83
+ # server processing performance. This may seem mean but it is the most efficient
84
+ # way to deal with overload. Other schemes involve still parsing the client's request
85
+ # which defeats the point of an overload handling system.
86
+ #
87
+ # The throttle parameter is a sleep timeout (in hundredths of a second) that is placed between
88
+ # socket.accept calls in order to give the server a cheap throttle time. It defaults to 0 and
89
+ # actually if it is 0 then the sleep is not done at all.
90
+ def initialize(host, port, num_processors=950, throttle=0, timeout=60)
91
+
92
+ tries = 0
93
+ @socket = TCPServer.new(host, port)
94
+
95
+ @classifier = URIClassifier.new
96
+ @host = host
97
+ @port = port
98
+ @workers = ThreadGroup.new
99
+ @throttle = throttle / 100.0
100
+ @num_processors = num_processors
101
+ @timeout = timeout
102
+ end
103
+
104
+ # Does the majority of the IO processing. It has been written in Ruby using
105
+ # about 7 different IO processing strategies and no matter how it's done
106
+ # the performance just does not improve. It is currently carefully constructed
107
+ # to make sure that it gets the best possible performance, but anyone who
108
+ # thinks they can make it faster is more than welcome to take a crack at it.
109
+ def process_client(client)
110
+ begin
111
+ parser = HttpParser.new
112
+ params = HttpParams.new
113
+ request = nil
114
+ data = client.readpartial(Const::CHUNK_SIZE)
115
+ nparsed = 0
116
+
117
+ # Assumption: nparsed will always be less since data will get filled with more
118
+ # after each parsing. If it doesn't get more then there was a problem
119
+ # with the read operation on the client socket. Effect is to stop processing when the
120
+ # socket can't fill the buffer for further parsing.
121
+ while nparsed < data.length
122
+ nparsed = parser.execute(params, data, nparsed)
123
+
124
+ if parser.finished?
125
+ if not params[Const::REQUEST_PATH]
126
+ # it might be a dumbass full host request header
127
+ uri = URI.parse(params[Const::REQUEST_URI])
128
+ params[Const::REQUEST_PATH] = uri.path
129
+ end
130
+
131
+ raise "No REQUEST PATH" if not params[Const::REQUEST_PATH]
132
+
133
+ script_name, path_info, handlers = @classifier.resolve(params[Const::REQUEST_PATH])
134
+
135
+ if handlers
136
+ params[Const::PATH_INFO] = path_info
137
+ params[Const::SCRIPT_NAME] = script_name
138
+
139
+ # From http://www.ietf.org/rfc/rfc3875 :
140
+ # "Script authors should be aware that the REMOTE_ADDR and REMOTE_HOST
141
+ # meta-variables (see sections 4.1.8 and 4.1.9) may not identify the
142
+ # ultimate source of the request. They identify the client for the
143
+ # immediate request to the server; that client may be a proxy, gateway,
144
+ # or other intermediary acting on behalf of the actual source client."
145
+ params[Const::REMOTE_ADDR] = client.peeraddr.last
146
+
147
+ # select handlers that want more detailed request notification
148
+ notifiers = handlers.select { |h| h.request_notify }
149
+ request = HttpRequest.new(params, client, notifiers)
150
+
151
+ # in the case of large file uploads the user could close the socket, so skip those requests
152
+ break if request.body == nil # nil signals from HttpRequest::initialize that the request was aborted
153
+
154
+ # request is good so far, continue processing the response
155
+ response = HttpResponse.new(client)
156
+
157
+ # Process each handler in registered order until we run out or one finalizes the response.
158
+ handlers.each do |handler|
159
+ handler.process(request, response)
160
+ break if response.done or client.closed?
161
+ end
162
+
163
+ # And finally, if nobody closed the response off, we finalize it.
164
+ unless response.done or client.closed?
165
+ response.finished
166
+ end
167
+ else
168
+ # Didn't find it, return a stock 404 response.
169
+ client.write(Const::ERROR_404_RESPONSE)
170
+ end
171
+
172
+ break #done
173
+ else
174
+ # Parser is not done, queue up more data to read and continue parsing
175
+ chunk = client.readpartial(Const::CHUNK_SIZE)
176
+ break if !chunk or chunk.length == 0 # read failed, stop processing
177
+
178
+ data << chunk
179
+ if data.length >= Const::MAX_HEADER
180
+ raise HttpParserError.new("HEADER is longer than allowed, aborting client early.")
181
+ end
182
+ end
183
+ end
184
+ rescue EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
185
+ client.close rescue nil
186
+ rescue HttpParserError => e
187
+ STDERR.puts "#{Time.now}: HTTP parse error, malformed request (#{params[Const::HTTP_X_FORWARDED_FOR] || client.peeraddr.last}): #{e.inspect}"
188
+ STDERR.puts "#{Time.now}: REQUEST DATA: #{data.inspect}\n---\nPARAMS: #{params.inspect}\n---\n"
189
+ rescue Errno::EMFILE
190
+ reap_dead_workers('too many files')
191
+ rescue Object => e
192
+ STDERR.puts "#{Time.now}: Read error: #{e.inspect}"
193
+ STDERR.puts e.backtrace.join("\n")
194
+ ensure
195
+ begin
196
+ client.close
197
+ rescue IOError
198
+ # Already closed
199
+ rescue Object => e
200
+ STDERR.puts "#{Time.now}: Client error: #{e.inspect}"
201
+ STDERR.puts e.backtrace.join("\n")
202
+ end
203
+ request.body.delete if request and request.body.class == Tempfile
204
+ end
205
+ end
206
+
207
+ # Used internally to kill off any worker threads that have taken too long
208
+ # to complete processing. Only called if there are too many processors
209
+ # currently servicing. It returns the count of workers still active
210
+ # after the reap is done. It only runs if there are workers to reap.
211
+ def reap_dead_workers(reason='unknown')
212
+ if @workers.list.length > 0
213
+ STDERR.puts "#{Time.now}: Reaping #{@workers.list.length} threads for slow workers because of '#{reason}'"
214
+ error_msg = "Mongrel timed out this thread: #{reason}"
215
+ mark = Time.now
216
+ @workers.list.each do |worker|
217
+ worker[:started_on] = Time.now if not worker[:started_on]
218
+
219
+ if mark - worker[:started_on] > @timeout + @throttle
220
+ STDERR.puts "Thread #{worker.inspect} is too old, killing."
221
+ worker.raise(TimeoutError.new(error_msg))
222
+ end
223
+ end
224
+ end
225
+
226
+ return @workers.list.length
227
+ end
228
+
229
+ # Performs a wait on all the currently running threads and kills any that take
230
+ # too long. It waits by @timeout seconds, which can be set in .initialize or
231
+ # via mongrel_rails. The @throttle setting does extend this waiting period by
232
+ # that much longer.
233
+ def graceful_shutdown
234
+ while reap_dead_workers("shutdown") > 0
235
+ STDERR.puts "Waiting for #{@workers.list.length} requests to finish, could take #{@timeout + @throttle} seconds."
236
+ sleep @timeout / 10
237
+ end
238
+ end
239
+
240
+ def configure_socket_options
241
+ case RUBY_PLATFORM
242
+ when /linux/
243
+ # 9 is currently TCP_DEFER_ACCEPT
244
+ $tcp_defer_accept_opts = [Socket::SOL_TCP, 9, 1]
245
+ $tcp_cork_opts = [Socket::SOL_TCP, 3, 1]
246
+ when /freebsd(([1-4]\..{1,2})|5\.[0-4])/
247
+ # Do nothing, just closing a bug when freebsd <= 5.4
248
+ when /freebsd/
249
+ # Use the HTTP accept filter if available.
250
+ # The struct made by pack() is defined in /usr/include/sys/socket.h as accept_filter_arg
251
+ unless `/sbin/sysctl -nq net.inet.accf.http`.empty?
252
+ $tcp_defer_accept_opts = [Socket::SOL_SOCKET, Socket::SO_ACCEPTFILTER, ['httpready', nil].pack('a16a240')]
253
+ end
254
+ end
255
+ end
256
+
257
+ # Runs the thing. It returns the thread used so you can "join" it. You can also
258
+ # access the HttpServer::acceptor attribute to get the thread later.
259
+ def run
260
+ BasicSocket.do_not_reverse_lookup=true
261
+
262
+ configure_socket_options
263
+
264
+ if defined?($tcp_defer_accept_opts) and $tcp_defer_accept_opts
265
+ @socket.setsockopt(*$tcp_defer_accept_opts) rescue nil
266
+ end
267
+
268
+ @acceptor = Thread.new do
269
+ begin
270
+ while true
271
+ begin
272
+ client = @socket.accept
273
+
274
+ if defined?($tcp_cork_opts) and $tcp_cork_opts
275
+ client.setsockopt(*$tcp_cork_opts) rescue nil
276
+ end
277
+
278
+ worker_list = @workers.list
279
+
280
+ if worker_list.length >= @num_processors
281
+ STDERR.puts "Server overloaded with #{worker_list.length} processors (#@num_processors max). Dropping connection."
282
+ client.close rescue nil
283
+ reap_dead_workers("max processors")
284
+ else
285
+ thread = Thread.new(client) {|c| process_client(c) }
286
+ thread[:started_on] = Time.now
287
+ @workers.add(thread)
288
+
289
+ sleep @throttle if @throttle > 0
290
+ end
291
+ rescue StopServer
292
+ break
293
+ rescue Errno::EMFILE
294
+ reap_dead_workers("too many open files")
295
+ sleep 0.5
296
+ rescue Errno::ECONNABORTED
297
+ # client closed the socket even before accept
298
+ client.close rescue nil
299
+ rescue Object => e
300
+ STDERR.puts "#{Time.now}: Unhandled listen loop exception #{e.inspect}."
301
+ STDERR.puts e.backtrace.join("\n")
302
+ end
303
+ end
304
+ graceful_shutdown
305
+ ensure
306
+ @socket.close
307
+ # STDERR.puts "#{Time.now}: Closed socket."
308
+ end
309
+ end
310
+
311
+ return @acceptor
312
+ end
313
+
314
+ # Simply registers a handler with the internal URIClassifier. When the URI is
315
+ # found in the prefix of a request then your handler's HttpHandler::process method
316
+ # is called. See Mongrel::URIClassifier#register for more information.
317
+ #
318
+ # If you set in_front=true then the passed in handler will be put in the front of the list
319
+ # for that particular URI. Otherwise it's placed at the end of the list.
320
+ def register(uri, handler, in_front=false)
321
+ begin
322
+ @classifier.register(uri, [handler])
323
+ rescue URIClassifier::RegistrationError
324
+ handlers = @classifier.resolve(uri)[2]
325
+ method_name = in_front ? 'unshift' : 'push'
326
+ handlers.send(method_name, handler)
327
+ end
328
+ handler.listener = self
329
+ end
330
+
331
+ # Removes any handlers registered at the given URI. See Mongrel::URIClassifier#unregister
332
+ # for more information. Remember this removes them *all* so the entire
333
+ # processing chain goes away.
334
+ def unregister(uri)
335
+ @classifier.unregister(uri)
336
+ end
337
+
338
+ # Stops the acceptor thread and then causes the worker threads to finish
339
+ # off the request queue before finally exiting.
340
+ def stop(synchronous=false)
341
+ @acceptor.raise(StopServer.new)
342
+
343
+ if synchronous
344
+ sleep(0.5) while @acceptor.alive?
345
+ end
346
+ end
347
+
348
+ end
349
+ end
350
+
351
+ # Load experimental library, if present. We put it here so it can override anything
352
+ # in regular Mongrel.
353
+
354
+ $LOAD_PATH.unshift 'projects/mongrel_experimental/lib/'
355
+ Mongrel::Gems.require 'mongrel_experimental', ">=#{Mongrel::Const::MONGREL_VERSION}"
@@ -0,0 +1,107 @@
1
+ # Copyright (c) 2005 Zed A. Shaw
2
+ # You can redistribute it and/or modify it under the same terms as Ruby.
3
+ #
4
+ # Additional work donated by contributors. See http://mongrel.rubyforge.org/attributions.html
5
+ # for more information.
6
+
7
+ require 'mongrel'
8
+
9
+
10
+ module Mongrel
11
+ # Support for the Camping micro framework at http://camping.rubyforge.org
12
+ # This implements the unusually long Postamble that Camping usually
13
+ # needs and shrinks it down to just a single line or two.
14
+ #
15
+ # Your Postamble would now be:
16
+ #
17
+ # Mongrel::Camping::start("0.0.0.0",3001,"/tepee",Tepee).join
18
+ #
19
+ # If you wish to get fancier than this then you can use the
20
+ # Camping::CampingHandler directly instead and do your own
21
+ # wiring:
22
+ #
23
+ # h = Mongrel::HttpServer.new(server, port)
24
+ # h.register(uri, CampingHandler.new(Tepee))
25
+ # h.register("/favicon.ico", Mongrel::Error404Handler.new(""))
26
+ #
27
+ # I add the /favicon.ico since camping apps typically don't
28
+ # have them and it's just annoying anyway.
29
+ module Camping
30
+
31
+ # This is a specialized handler for Camping applications
32
+ # that has them process the request and then translates
33
+ # the results into something the Mongrel::HttpResponse
34
+ # needs.
35
+ class CampingHandler < Mongrel::HttpHandler
36
+ attr_reader :files
37
+ attr_reader :guard
38
+ @@file_only_methods = ["GET","HEAD"]
39
+
40
+ def initialize(klass)
41
+ @files = Mongrel::DirHandler.new(nil, false)
42
+ @guard = Mutex.new
43
+ @klass = klass
44
+ end
45
+
46
+ def process(request, response)
47
+ if response.socket.closed?
48
+ return
49
+ end
50
+
51
+ controller = nil
52
+ @guard.synchronize {
53
+ controller = @klass.run(request.body, request.params)
54
+ }
55
+
56
+ sendfile, clength = nil
57
+ response.status = controller.status
58
+ controller.headers.each do |k, v|
59
+ if k =~ /^X-SENDFILE$/i
60
+ sendfile = v
61
+ elsif k =~ /^CONTENT-LENGTH$/i
62
+ clength = v.to_i
63
+ else
64
+ [*v].each do |vi|
65
+ response.header[k] = vi
66
+ end
67
+ end
68
+ end
69
+
70
+ if sendfile
71
+ request.params[Mongrel::Const::PATH_INFO] = sendfile
72
+ @files.process(request, response)
73
+ elsif controller.body.respond_to? :read
74
+ response.send_status(clength)
75
+ response.send_header
76
+ while chunk = controller.body.read(16384)
77
+ response.write(chunk)
78
+ end
79
+ if controller.body.respond_to? :close
80
+ controller.body.close
81
+ end
82
+ else
83
+ body = controller.body.to_s
84
+ response.send_status(body.length)
85
+ response.send_header
86
+ response.write(body)
87
+ end
88
+ end
89
+ end
90
+
91
+ # This is a convenience method that wires up a CampingHandler
92
+ # for your application on a given port and uri. It's pretty
93
+ # much all you need for a camping application to work right.
94
+ #
95
+ # It returns the Mongrel::HttpServer which you should either
96
+ # join or somehow manage. The thread is running when
97
+ # returned.
98
+
99
+ def Camping.start(server, port, uri, klass)
100
+ h = Mongrel::HttpServer.new(server, port)
101
+ h.register(uri, CampingHandler.new(klass))
102
+ h.register("/favicon.ico", Mongrel::Error404Handler.new(""))
103
+ h.run
104
+ return h
105
+ end
106
+ end
107
+ end