mongrel 1.2.0.pre2-x86-mswin32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (72) hide show
  1. data/COPYING +55 -0
  2. data/History.txt +68 -0
  3. data/LICENSE +55 -0
  4. data/Manifest.txt +69 -0
  5. data/README.txt +80 -0
  6. data/Rakefile +8 -0
  7. data/TODO +5 -0
  8. data/bin/mongrel_rails +284 -0
  9. data/examples/builder.rb +29 -0
  10. data/examples/camping/README +3 -0
  11. data/examples/camping/blog.rb +294 -0
  12. data/examples/camping/tepee.rb +149 -0
  13. data/examples/httpd.conf +474 -0
  14. data/examples/mime.yaml +3 -0
  15. data/examples/mongrel.conf +9 -0
  16. data/examples/monitrc +57 -0
  17. data/examples/random_thrash.rb +19 -0
  18. data/examples/simpletest.rb +52 -0
  19. data/examples/webrick_compare.rb +20 -0
  20. data/ext/http11/Http11Service.java +13 -0
  21. data/ext/http11/ext_help.h +15 -0
  22. data/ext/http11/extconf.rb +6 -0
  23. data/ext/http11/http11.c +534 -0
  24. data/ext/http11/http11_parser.c +1243 -0
  25. data/ext/http11/http11_parser.h +49 -0
  26. data/ext/http11/http11_parser.java.rl +159 -0
  27. data/ext/http11/http11_parser.rl +153 -0
  28. data/ext/http11/http11_parser_common.rl +54 -0
  29. data/ext/http11/org/jruby/mongrel/Http11.java +241 -0
  30. data/ext/http11/org/jruby/mongrel/Http11Parser.java +486 -0
  31. data/lib/1.8/http11.so +0 -0
  32. data/lib/1.9/http11.so +0 -0
  33. data/lib/mongrel.rb +366 -0
  34. data/lib/mongrel/camping.rb +107 -0
  35. data/lib/mongrel/cgi.rb +181 -0
  36. data/lib/mongrel/command.rb +220 -0
  37. data/lib/mongrel/configurator.rb +388 -0
  38. data/lib/mongrel/const.rb +110 -0
  39. data/lib/mongrel/debug.rb +203 -0
  40. data/lib/mongrel/gems.rb +22 -0
  41. data/lib/mongrel/handlers.rb +468 -0
  42. data/lib/mongrel/header_out.rb +28 -0
  43. data/lib/mongrel/http_request.rb +155 -0
  44. data/lib/mongrel/http_response.rb +166 -0
  45. data/lib/mongrel/init.rb +10 -0
  46. data/lib/mongrel/mime_types.yml +616 -0
  47. data/lib/mongrel/rails.rb +185 -0
  48. data/lib/mongrel/stats.rb +89 -0
  49. data/lib/mongrel/tcphack.rb +18 -0
  50. data/lib/mongrel/uri_classifier.rb +76 -0
  51. data/setup.rb +1585 -0
  52. data/tasks/gem.rake +28 -0
  53. data/tasks/native.rake +24 -0
  54. data/tasks/ragel.rake +20 -0
  55. data/test/mime.yaml +3 -0
  56. data/test/mongrel.conf +1 -0
  57. data/test/test_cgi_wrapper.rb +26 -0
  58. data/test/test_command.rb +86 -0
  59. data/test/test_conditional.rb +107 -0
  60. data/test/test_configurator.rb +87 -0
  61. data/test/test_debug.rb +25 -0
  62. data/test/test_handlers.rb +135 -0
  63. data/test/test_http11.rb +156 -0
  64. data/test/test_redirect_handler.rb +44 -0
  65. data/test/test_request_progress.rb +99 -0
  66. data/test/test_response.rb +127 -0
  67. data/test/test_stats.rb +35 -0
  68. data/test/test_uriclassifier.rb +261 -0
  69. data/test/test_ws.rb +117 -0
  70. data/test/testhelp.rb +71 -0
  71. data/tools/trickletest.rb +45 -0
  72. metadata +197 -0
Binary file
Binary file
@@ -0,0 +1,366 @@
1
+
2
+ # Standard libraries
3
+ require 'socket'
4
+ require 'tempfile'
5
+ require 'yaml'
6
+ require 'time'
7
+ require 'etc'
8
+ require 'uri'
9
+ require 'stringio'
10
+
11
+ # Compiled Mongrel extension
12
+ # support multiple ruby version (fat binaries under windows)
13
+ begin
14
+ require 'http11'
15
+ rescue LoadError
16
+ RUBY_VERSION =~ /(\d+.\d+)/
17
+ require "#{$1}/http11"
18
+ end
19
+
20
+ # Gem conditional loader
21
+ require 'mongrel/gems'
22
+ Mongrel::Gems.require 'cgi_multipart_eof_fix'
23
+ Mongrel::Gems.require 'fastthread'
24
+ require 'thread'
25
+
26
+ # Ruby Mongrel
27
+ require 'mongrel/cgi'
28
+ require 'mongrel/handlers'
29
+ require 'mongrel/command'
30
+ require 'mongrel/tcphack'
31
+ require 'mongrel/configurator'
32
+ require 'mongrel/uri_classifier'
33
+ require 'mongrel/const'
34
+ require 'mongrel/http_request'
35
+ require 'mongrel/header_out'
36
+ require 'mongrel/http_response'
37
+
38
+ # Mongrel module containing all of the classes (include C extensions) for running
39
+ # a Mongrel web server. It contains a minimalist HTTP server with just enough
40
+ # functionality to service web application requests fast as possible.
41
+ module Mongrel
42
+
43
+ # Used to stop the HttpServer via Thread.raise.
44
+ class StopServer < Exception; end
45
+
46
+ # Thrown at a thread when it is timed out.
47
+ class TimeoutError < Exception; end
48
+
49
+ # A Hash with one extra parameter for the HTTP body, used internally.
50
+ class HttpParams < Hash
51
+ attr_accessor :http_body
52
+ end
53
+
54
+
55
+ # This is the main driver of Mongrel, while the Mongrel::HttpParser and Mongrel::URIClassifier
56
+ # make up the majority of how the server functions. It's a very simple class that just
57
+ # has a thread accepting connections and a simple HttpServer.process_client function
58
+ # to do the heavy lifting with the IO and Ruby.
59
+ #
60
+ # You use it by doing the following:
61
+ #
62
+ # server = HttpServer.new("0.0.0.0", 3000)
63
+ # server.register("/stuff", MyNiftyHandler.new)
64
+ # server.run.join
65
+ #
66
+ # The last line can be just server.run if you don't want to join the thread used.
67
+ # If you don't though Ruby will mysteriously just exit on you.
68
+ #
69
+ # Ruby's thread implementation is "interesting" to say the least. Experiments with
70
+ # *many* different types of IO processing simply cannot make a dent in it. Future
71
+ # releases of Mongrel will find other creative ways to make threads faster, but don't
72
+ # hold your breath until Ruby 1.9 is actually finally useful.
73
+ class HttpServer
74
+ attr_reader :acceptor
75
+ attr_reader :workers
76
+ attr_reader :classifier
77
+ attr_reader :host
78
+ attr_reader :port
79
+ attr_reader :throttle
80
+ attr_reader :timeout
81
+ attr_reader :num_processors
82
+
83
+ # Creates a working server on host:port (strange things happen if port isn't a Number).
84
+ # Use HttpServer::run to start the server and HttpServer.acceptor.join to
85
+ # join the thread that's processing incoming requests on the socket.
86
+ #
87
+ # The num_processors optional argument is the maximum number of concurrent
88
+ # processors to accept, anything over this is closed immediately to maintain
89
+ # server processing performance. This may seem mean but it is the most efficient
90
+ # way to deal with overload. Other schemes involve still parsing the client's request
91
+ # which defeats the point of an overload handling system.
92
+ #
93
+ # The throttle parameter is a sleep timeout (in hundredths of a second) that is placed between
94
+ # socket.accept calls in order to give the server a cheap throttle time. It defaults to 0 and
95
+ # actually if it is 0 then the sleep is not done at all.
96
+ def initialize(host, port, num_processors=950, throttle=0, timeout=60)
97
+
98
+ tries = 0
99
+ @socket = TCPServer.new(host, port)
100
+
101
+ @classifier = URIClassifier.new
102
+ @host = host
103
+ @port = port
104
+ @workers = ThreadGroup.new
105
+ @throttle = throttle / 100.0
106
+ @num_processors = num_processors
107
+ @timeout = timeout
108
+ end
109
+
110
+ # Does the majority of the IO processing. It has been written in Ruby using
111
+ # about 7 different IO processing strategies and no matter how it's done
112
+ # the performance just does not improve. It is currently carefully constructed
113
+ # to make sure that it gets the best possible performance, but anyone who
114
+ # thinks they can make it faster is more than welcome to take a crack at it.
115
+ def process_client(client)
116
+ begin
117
+ parser = HttpParser.new
118
+ params = HttpParams.new
119
+ request = nil
120
+ data = client.readpartial(Const::CHUNK_SIZE)
121
+ nparsed = 0
122
+
123
+ # Assumption: nparsed will always be less since data will get filled with more
124
+ # after each parsing. If it doesn't get more then there was a problem
125
+ # with the read operation on the client socket. Effect is to stop processing when the
126
+ # socket can't fill the buffer for further parsing.
127
+ while nparsed < data.length
128
+ nparsed = parser.execute(params, data, nparsed)
129
+
130
+ if parser.finished?
131
+ if not params[Const::REQUEST_PATH]
132
+ # it might be a dumbass full host request header
133
+ uri = URI.parse(params[Const::REQUEST_URI])
134
+ params[Const::REQUEST_PATH] = uri.path
135
+ end
136
+
137
+ raise "No REQUEST PATH" if not params[Const::REQUEST_PATH]
138
+
139
+ script_name, path_info, handlers = @classifier.resolve(params[Const::REQUEST_PATH])
140
+
141
+ if handlers
142
+ params[Const::PATH_INFO] = path_info
143
+ params[Const::SCRIPT_NAME] = script_name
144
+
145
+ # From http://www.ietf.org/rfc/rfc3875 :
146
+ # "Script authors should be aware that the REMOTE_ADDR and REMOTE_HOST
147
+ # meta-variables (see sections 4.1.8 and 4.1.9) may not identify the
148
+ # ultimate source of the request. They identify the client for the
149
+ # immediate request to the server; that client may be a proxy, gateway,
150
+ # or other intermediary acting on behalf of the actual source client."
151
+ params[Const::REMOTE_ADDR] = client.peeraddr.last
152
+
153
+ # select handlers that want more detailed request notification
154
+ notifiers = handlers.select { |h| h.request_notify }
155
+ request = HttpRequest.new(params, client, notifiers)
156
+
157
+ # in the case of large file uploads the user could close the socket, so skip those requests
158
+ break if request.body == nil # nil signals from HttpRequest::initialize that the request was aborted
159
+
160
+ # request is good so far, continue processing the response
161
+ response = HttpResponse.new(client)
162
+
163
+ # Process each handler in registered order until we run out or one finalizes the response.
164
+ handlers.each do |handler|
165
+ handler.process(request, response)
166
+ break if response.done or client.closed?
167
+ end
168
+
169
+ # And finally, if nobody closed the response off, we finalize it.
170
+ unless response.done or client.closed?
171
+ response.finished
172
+ end
173
+ else
174
+ # Didn't find it, return a stock 404 response.
175
+ client.write(Const::ERROR_404_RESPONSE)
176
+ end
177
+
178
+ break #done
179
+ else
180
+ # Parser is not done, queue up more data to read and continue parsing
181
+ chunk = client.readpartial(Const::CHUNK_SIZE)
182
+ break if !chunk or chunk.length == 0 # read failed, stop processing
183
+
184
+ data << chunk
185
+ if data.length >= Const::MAX_HEADER
186
+ raise HttpParserError.new("HEADER is longer than allowed, aborting client early.")
187
+ end
188
+ end
189
+ end
190
+ rescue EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
191
+ client.close rescue nil
192
+ rescue HttpParserError => e
193
+ STDERR.puts "#{Time.now}: HTTP parse error, malformed request (#{params[Const::HTTP_X_FORWARDED_FOR] || client.peeraddr.last}): #{e.inspect}"
194
+ STDERR.puts "#{Time.now}: REQUEST DATA: #{data.inspect}\n---\nPARAMS: #{params.inspect}\n---\n"
195
+ rescue Errno::EMFILE
196
+ reap_dead_workers('too many files')
197
+ rescue Object => e
198
+ STDERR.puts "#{Time.now}: Read error: #{e.inspect}"
199
+ STDERR.puts e.backtrace.join("\n")
200
+ ensure
201
+ begin
202
+ client.close
203
+ rescue IOError
204
+ # Already closed
205
+ rescue Object => e
206
+ STDERR.puts "#{Time.now}: Client error: #{e.inspect}"
207
+ STDERR.puts e.backtrace.join("\n")
208
+ end
209
+ request.body.close! if request and request.body.class == Tempfile
210
+ end
211
+ end
212
+
213
+ # Used internally to kill off any worker threads that have taken too long
214
+ # to complete processing. Only called if there are too many processors
215
+ # currently servicing. It returns the count of workers still active
216
+ # after the reap is done. It only runs if there are workers to reap.
217
+ def reap_dead_workers(reason='unknown')
218
+ if @workers.list.length > 0
219
+ STDERR.puts "#{Time.now}: Reaping #{@workers.list.length} threads for slow workers because of '#{reason}'"
220
+ error_msg = "Mongrel timed out this thread: #{reason}"
221
+ mark = Time.now
222
+ @workers.list.each do |worker|
223
+ worker[:started_on] = Time.now if not worker[:started_on]
224
+
225
+ if mark - worker[:started_on] > @timeout + @throttle
226
+ STDERR.puts "Thread #{worker.inspect} is too old, killing."
227
+ worker.raise(TimeoutError.new(error_msg))
228
+ end
229
+ end
230
+ end
231
+
232
+ return @workers.list.length
233
+ end
234
+
235
+ # Performs a wait on all the currently running threads and kills any that take
236
+ # too long. It waits by @timeout seconds, which can be set in .initialize or
237
+ # via mongrel_rails. The @throttle setting does extend this waiting period by
238
+ # that much longer.
239
+ def graceful_shutdown
240
+ while reap_dead_workers("shutdown") > 0
241
+ STDERR.puts "Waiting for #{@workers.list.length} requests to finish, could take #{@timeout + @throttle} seconds."
242
+ sleep @timeout / 10
243
+ end
244
+ end
245
+
246
+ def configure_socket_options
247
+ case RUBY_PLATFORM
248
+ when /linux/
249
+ # 9 is currently TCP_DEFER_ACCEPT
250
+ $tcp_defer_accept_opts = [Socket::SOL_TCP, 9, 1]
251
+ $tcp_cork_opts = [Socket::SOL_TCP, 3, 1]
252
+ when /freebsd(([1-4]\..{1,2})|5\.[0-4])/
253
+ # Do nothing, just closing a bug when freebsd <= 5.4
254
+ when /freebsd/
255
+ # Use the HTTP accept filter if available.
256
+ # The struct made by pack() is defined in /usr/include/sys/socket.h as accept_filter_arg
257
+ unless `/sbin/sysctl -nq net.inet.accf.http`.empty?
258
+ $tcp_defer_accept_opts = [Socket::SOL_SOCKET, Socket::SO_ACCEPTFILTER, ['httpready', nil].pack('a16a240')]
259
+ end
260
+ end
261
+ end
262
+
263
+ # Runs the thing. It returns the thread used so you can "join" it. You can also
264
+ # access the HttpServer::acceptor attribute to get the thread later.
265
+ def run
266
+ BasicSocket.do_not_reverse_lookup=true
267
+
268
+ configure_socket_options
269
+
270
+ if defined?($tcp_defer_accept_opts) and $tcp_defer_accept_opts
271
+ @socket.setsockopt(*$tcp_defer_accept_opts) rescue nil
272
+ end
273
+
274
+ @acceptor = Thread.new do
275
+ begin
276
+ while true
277
+ begin
278
+ client = @socket.accept
279
+
280
+ if defined?($tcp_cork_opts) and $tcp_cork_opts
281
+ client.setsockopt(*$tcp_cork_opts) rescue nil
282
+ end
283
+
284
+ worker_list = @workers.list
285
+
286
+ if worker_list.length >= @num_processors
287
+ STDERR.puts "Server overloaded with #{worker_list.length} processors (#@num_processors max). Dropping connection."
288
+ client.close rescue nil
289
+ reap_dead_workers("max processors")
290
+ else
291
+ thread = Thread.new(client) {|c| process_client(c) }
292
+ thread[:started_on] = Time.now
293
+ @workers.add(thread)
294
+
295
+ sleep @throttle if @throttle > 0
296
+ end
297
+ rescue StopServer
298
+ break
299
+ rescue Errno::EMFILE
300
+ reap_dead_workers("too many open files")
301
+ sleep 0.5
302
+ rescue Errno::ECONNABORTED
303
+ # client closed the socket even before accept
304
+ client.close rescue nil
305
+ rescue Object => e
306
+ STDERR.puts "#{Time.now}: Unhandled listen loop exception #{e.inspect}."
307
+ STDERR.puts e.backtrace.join("\n")
308
+ end
309
+ end
310
+ graceful_shutdown
311
+ ensure
312
+ @socket.close
313
+ # STDERR.puts "#{Time.now}: Closed socket."
314
+ end
315
+ end
316
+
317
+ return @acceptor
318
+ end
319
+
320
+ # Simply registers a handler with the internal URIClassifier. When the URI is
321
+ # found in the prefix of a request then your handler's HttpHandler::process method
322
+ # is called. See Mongrel::URIClassifier#register for more information.
323
+ #
324
+ # If you set in_front=true then the passed in handler will be put in the front of the list
325
+ # for that particular URI. Otherwise it's placed at the end of the list.
326
+ def register(uri, handler, in_front=false)
327
+ begin
328
+ @classifier.register(uri, [handler])
329
+ rescue URIClassifier::RegistrationError => e
330
+ handlers = @classifier.resolve(uri)[2]
331
+ if handlers
332
+ # Already registered
333
+ method_name = in_front ? 'unshift' : 'push'
334
+ handlers.send(method_name, handler)
335
+ else
336
+ raise
337
+ end
338
+ end
339
+ handler.listener = self
340
+ end
341
+
342
+ # Removes any handlers registered at the given URI. See Mongrel::URIClassifier#unregister
343
+ # for more information. Remember this removes them *all* so the entire
344
+ # processing chain goes away.
345
+ def unregister(uri)
346
+ @classifier.unregister(uri)
347
+ end
348
+
349
+ # Stops the acceptor thread and then causes the worker threads to finish
350
+ # off the request queue before finally exiting.
351
+ def stop(synchronous=false)
352
+ @acceptor.raise(StopServer.new)
353
+
354
+ if synchronous
355
+ sleep(0.5) while @acceptor.alive?
356
+ end
357
+ end
358
+
359
+ end
360
+ end
361
+
362
+ # Load experimental library, if present. We put it here so it can override anything
363
+ # in regular Mongrel.
364
+
365
+ $LOAD_PATH.unshift 'projects/mongrel_experimental/lib/'
366
+ Mongrel::Gems.require 'mongrel_experimental', ">=#{Mongrel::Const::MONGREL_VERSION}"
@@ -0,0 +1,107 @@
1
+ # Copyright (c) 2005 Zed A. Shaw
2
+ # You can redistribute it and/or modify it under the same terms as Ruby.
3
+ #
4
+ # Additional work donated by contributors. See http://mongrel.rubyforge.org/attributions.html
5
+ # for more information.
6
+
7
+ require 'mongrel'
8
+
9
+
10
+ module Mongrel
11
+ # Support for the Camping micro framework at http://camping.rubyforge.org
12
+ # This implements the unusually long Postamble that Camping usually
13
+ # needs and shrinks it down to just a single line or two.
14
+ #
15
+ # Your Postamble would now be:
16
+ #
17
+ # Mongrel::Camping::start("0.0.0.0",3001,"/tepee",Tepee).join
18
+ #
19
+ # If you wish to get fancier than this then you can use the
20
+ # Camping::CampingHandler directly instead and do your own
21
+ # wiring:
22
+ #
23
+ # h = Mongrel::HttpServer.new(server, port)
24
+ # h.register(uri, CampingHandler.new(Tepee))
25
+ # h.register("/favicon.ico", Mongrel::Error404Handler.new(""))
26
+ #
27
+ # I add the /favicon.ico since camping apps typically don't
28
+ # have them and it's just annoying anyway.
29
+ module Camping
30
+
31
+ # This is a specialized handler for Camping applications
32
+ # that has them process the request and then translates
33
+ # the results into something the Mongrel::HttpResponse
34
+ # needs.
35
+ class CampingHandler < Mongrel::HttpHandler
36
+ attr_reader :files
37
+ attr_reader :guard
38
+ @@file_only_methods = ["GET","HEAD"]
39
+
40
+ def initialize(klass)
41
+ @files = Mongrel::DirHandler.new(nil, false)
42
+ @guard = Mutex.new
43
+ @klass = klass
44
+ end
45
+
46
+ def process(request, response)
47
+ if response.socket.closed?
48
+ return
49
+ end
50
+
51
+ controller = nil
52
+ @guard.synchronize {
53
+ controller = @klass.run(request.body, request.params)
54
+ }
55
+
56
+ sendfile, clength = nil
57
+ response.status = controller.status
58
+ controller.headers.each do |k, v|
59
+ if k =~ /^X-SENDFILE$/i
60
+ sendfile = v
61
+ elsif k =~ /^CONTENT-LENGTH$/i
62
+ clength = v.to_i
63
+ else
64
+ [*v].each do |vi|
65
+ response.header[k] = vi
66
+ end
67
+ end
68
+ end
69
+
70
+ if sendfile
71
+ request.params[Mongrel::Const::PATH_INFO] = sendfile
72
+ @files.process(request, response)
73
+ elsif controller.body.respond_to? :read
74
+ response.send_status(clength)
75
+ response.send_header
76
+ while chunk = controller.body.read(16384)
77
+ response.write(chunk)
78
+ end
79
+ if controller.body.respond_to? :close
80
+ controller.body.close
81
+ end
82
+ else
83
+ body = controller.body.to_s
84
+ response.send_status(body.length)
85
+ response.send_header
86
+ response.write(body)
87
+ end
88
+ end
89
+ end
90
+
91
+ # This is a convenience method that wires up a CampingHandler
92
+ # for your application on a given port and uri. It's pretty
93
+ # much all you need for a camping application to work right.
94
+ #
95
+ # It returns the Mongrel::HttpServer which you should either
96
+ # join or somehow manage. The thread is running when
97
+ # returned.
98
+
99
+ def Camping.start(server, port, uri, klass)
100
+ h = Mongrel::HttpServer.new(server, port)
101
+ h.register(uri, CampingHandler.new(klass))
102
+ h.register("/favicon.ico", Mongrel::Error404Handler.new(""))
103
+ h.run
104
+ return h
105
+ end
106
+ end
107
+ end