mongrel_esi 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/COPYING +53 -0
- data/LICENSE +471 -0
- data/README +186 -0
- data/Rakefile +141 -0
- data/bin/mongrel_esi +271 -0
- data/ext/esi/common.rl +41 -0
- data/ext/esi/esi_parser.c +387 -0
- data/ext/esi/extconf.rb +6 -0
- data/ext/esi/machine.rb +499 -0
- data/ext/esi/parser.c +1675 -0
- data/ext/esi/parser.h +113 -0
- data/ext/esi/parser.rb +49 -0
- data/ext/esi/parser.rl +398 -0
- data/ext/esi/ruby_esi.rl +135 -0
- data/ext/esi/run-test.rb +3 -0
- data/ext/esi/test/common.rl +41 -0
- data/ext/esi/test/parser.c +1676 -0
- data/ext/esi/test/parser.h +113 -0
- data/ext/esi/test/parser.rl +398 -0
- data/ext/esi/test/test.c +373 -0
- data/ext/esi/test1.rb +56 -0
- data/ext/esi/test2.rb +45 -0
- data/lib/esi/cache.rb +207 -0
- data/lib/esi/config.rb +154 -0
- data/lib/esi/dispatcher.rb +27 -0
- data/lib/esi/handler.rb +236 -0
- data/lib/esi/invalidator.rb +40 -0
- data/lib/esi/logger.rb +46 -0
- data/lib/esi/router.rb +84 -0
- data/lib/esi/tag/attempt.rb +6 -0
- data/lib/esi/tag/base.rb +85 -0
- data/lib/esi/tag/except.rb +24 -0
- data/lib/esi/tag/include.rb +190 -0
- data/lib/esi/tag/invalidate.rb +54 -0
- data/lib/esi/tag/try.rb +40 -0
- data/lib/multi_dirhandler.rb +70 -0
- data/setup.rb +1585 -0
- data/test/integration/basic_test.rb +39 -0
- data/test/integration/cache_test.rb +37 -0
- data/test/integration/docs/content/500.html +16 -0
- data/test/integration/docs/content/500_with_failover.html +16 -0
- data/test/integration/docs/content/500_with_failover_to_alt.html +8 -0
- data/test/integration/docs/content/ajax_test_page.html +15 -0
- data/test/integration/docs/content/cookie_variable.html +3 -0
- data/test/integration/docs/content/foo.html +15 -0
- data/test/integration/docs/content/include_in_include.html +15 -0
- data/test/integration/docs/content/malformed_transforms.html +16 -0
- data/test/integration/docs/content/malformed_transforms.html-correct +11 -0
- data/test/integration/docs/content/static-failover.html +20 -0
- data/test/integration/docs/content/test2.html +1 -0
- data/test/integration/docs/content/test3.html +17 -0
- data/test/integration/docs/esi_invalidate.html +6 -0
- data/test/integration/docs/esi_mixed_content.html +15 -0
- data/test/integration/docs/esi_test_content.html +27 -0
- data/test/integration/docs/index.html +688 -0
- data/test/integration/docs/test1.html +1 -0
- data/test/integration/docs/test3.html +9 -0
- data/test/integration/docs/test_failover.html +1 -0
- data/test/integration/handler_test.rb +270 -0
- data/test/integration/help.rb +234 -0
- data/test/net/get_test.rb +197 -0
- data/test/net/net_helper.rb +16 -0
- data/test/net/server_test.rb +249 -0
- data/test/unit/base_tag_test.rb +44 -0
- data/test/unit/esi-sample.html +56 -0
- data/test/unit/help.rb +77 -0
- data/test/unit/include_request_test.rb +69 -0
- data/test/unit/include_tag_test.rb +14 -0
- data/test/unit/parser_test.rb +478 -0
- data/test/unit/router_test.rb +34 -0
- data/test/unit/sample.html +21 -0
- data/tools/rakehelp.rb +119 -0
- metadata +182 -0
@@ -0,0 +1,197 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require 'rubygems'
|
3
|
+
require 'fastthread'
|
4
|
+
require 'thread'
|
5
|
+
require 'benchmark'
|
6
|
+
require "#{File.dirname(__FILE__)}/net_helper"
|
7
|
+
|
8
|
+
if RUBY_VERSION < '1.8.5'
|
9
|
+
STDERR.puts "This test requires at a minum ruby 1.8.5 for asynchronous i/o routines"
|
10
|
+
exit(1)
|
11
|
+
end
|
12
|
+
|
13
|
+
# evalutate different paterns for retrieving multiple documents at the same time
|
14
|
+
|
15
|
+
class Array
|
16
|
+
|
17
|
+
def invert
|
18
|
+
h={}
|
19
|
+
self.each_with_index{|x,i| h[x]=i}
|
20
|
+
h
|
21
|
+
end
|
22
|
+
|
23
|
+
end
|
24
|
+
|
25
|
+
|
26
|
+
module SocketTests
|
27
|
+
include Socket::Constants
|
28
|
+
extend self
|
29
|
+
READ = 0
|
30
|
+
WRITE = 1
|
31
|
+
|
32
|
+
def simple_get(host)
|
33
|
+
host, port = host.split(':')
|
34
|
+
socket = Socket.new( AF_INET, SOCK_STREAM, 0 )
|
35
|
+
sockaddr = Socket.pack_sockaddr_in( port, host )
|
36
|
+
socket.connect( sockaddr )
|
37
|
+
socket.write( "GET / HTTP/1.0\r\n\r\n" )
|
38
|
+
socket.read
|
39
|
+
end
|
40
|
+
|
41
|
+
def blocking_get(hosts)
|
42
|
+
results = {}
|
43
|
+
hosts.each do|host|
|
44
|
+
results[:host] = simple_get(host)
|
45
|
+
STDERR.print "."
|
46
|
+
end
|
47
|
+
results
|
48
|
+
end
|
49
|
+
|
50
|
+
def multithreaded_get(hosts)
|
51
|
+
results = Queue.new
|
52
|
+
id=0
|
53
|
+
ids = []
|
54
|
+
threads = hosts.collect do|host|
|
55
|
+
thread = Thread.new(host,id) do|host,tid|
|
56
|
+
result = simple_get(host)
|
57
|
+
results << { :buffer => result, :host => host, :id => tid }
|
58
|
+
end
|
59
|
+
ids << id
|
60
|
+
id += 1
|
61
|
+
thread
|
62
|
+
end
|
63
|
+
until ids.empty?
|
64
|
+
result = results.pop
|
65
|
+
STDERR.print "." #result size => #{result[:buffer].size} from host => #{result[:host]}"
|
66
|
+
ids.reject!{|id| id == result[:id]}
|
67
|
+
end
|
68
|
+
threads.each do|t|
|
69
|
+
t.join
|
70
|
+
end
|
71
|
+
results
|
72
|
+
end
|
73
|
+
|
74
|
+
def nonblocking_get(hosts)
|
75
|
+
results = {}
|
76
|
+
|
77
|
+
sockets = hosts.collect do|host|
|
78
|
+
host, port = host.split(':')
|
79
|
+
socket = Socket.new(AF_INET, SOCK_STREAM, 0)
|
80
|
+
sockaddr = Socket.sockaddr_in(port, host)
|
81
|
+
socket.connect_nonblock(sockaddr) rescue Errno::EINPROGRESS
|
82
|
+
results[socket] = { :host => host, :buffer => "" }
|
83
|
+
socket
|
84
|
+
end
|
85
|
+
writes = sockets
|
86
|
+
reads = []
|
87
|
+
|
88
|
+
begin
|
89
|
+
ready = IO.select(reads, writes,[],10)
|
90
|
+
ready[WRITE].each do|socket|
|
91
|
+
socket.write("GET / HTTP/1.0\r\n\r\n")
|
92
|
+
reads << socket
|
93
|
+
# remove the write from the write pool
|
94
|
+
writes = writes.reject{|s| s == socket}
|
95
|
+
end
|
96
|
+
|
97
|
+
ready[READ].each do|socket|
|
98
|
+
begin
|
99
|
+
result = results[socket]
|
100
|
+
result[:buffer] << socket.read_nonblock(2048)
|
101
|
+
rescue EOFError
|
102
|
+
# finished, report on results
|
103
|
+
STDERR.print "."
|
104
|
+
# remove the socket from the read pool
|
105
|
+
reads.reject!{|s| s == socket}
|
106
|
+
end
|
107
|
+
end
|
108
|
+
# break if (reads.empty? and writes.empty?)
|
109
|
+
# ready = IO.select(reads, writes)
|
110
|
+
end until (reads.empty? and writes.empty?)
|
111
|
+
|
112
|
+
results
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
TRIALS=5
|
117
|
+
DELAY=0.5
|
118
|
+
PORT_START=9990
|
119
|
+
PORT_END=9999
|
120
|
+
|
121
|
+
def network_trial_average(trial, &block)
|
122
|
+
STDERR.print "average: #{trial} "
|
123
|
+
b = Benchmark.measure do
|
124
|
+
TRIALS.times do|i|
|
125
|
+
yield
|
126
|
+
sleep DELAY
|
127
|
+
end
|
128
|
+
end
|
129
|
+
time = b.real - (DELAY*TRIALS)
|
130
|
+
STDERR.puts "( #{time/TRIALS} seconds )"
|
131
|
+
time/TRIALS
|
132
|
+
end
|
133
|
+
|
134
|
+
def network_trial_variance(trial, average, &block)
|
135
|
+
STDERR.print "variance: #{trial} "
|
136
|
+
sumsqrs = 0
|
137
|
+
TRIALS.times do|i|
|
138
|
+
timer = Time.now
|
139
|
+
yield
|
140
|
+
sleep DELAY
|
141
|
+
duration = (Time.now - timer) - DELAY
|
142
|
+
sumsqrs += ((duration - average) * (duration - average))
|
143
|
+
end
|
144
|
+
STDERR.puts "( #{sumsqrs/TRIALS} seconds )"
|
145
|
+
sumsqrs/TRIALS
|
146
|
+
end
|
147
|
+
|
148
|
+
multi_trials_average = 0
|
149
|
+
sync_trials_average = 0
|
150
|
+
async_trials_average = 0
|
151
|
+
test_servers = []
|
152
|
+
test_hosts = []
|
153
|
+
|
154
|
+
((PORT_END+1)-PORT_START).times do|port|
|
155
|
+
port = PORT_START + port
|
156
|
+
config = start_net_server(port)
|
157
|
+
test_servers << config
|
158
|
+
test_hosts << "127.0.0.1:#{port}"
|
159
|
+
end
|
160
|
+
puts "started #{test_hosts.size} servers..."
|
161
|
+
|
162
|
+
pid = fork do
|
163
|
+
test_average = [ lambda {multi_trials_average += network_trial_average("Multi trial") { SocketTests.multithreaded_get(test_hosts) }},
|
164
|
+
lambda {sync_trials_average += network_trial_average("Sync trial") { SocketTests.blocking_get(test_hosts) }},
|
165
|
+
lambda {async_trials_average += network_trial_average("ASync trial") { SocketTests.nonblocking_get(test_hosts) }} ]
|
166
|
+
test_variance = [ lambda {network_trial_variance("Multi trial",multi_trials_average) { SocketTests.multithreaded_get(test_hosts) }},
|
167
|
+
lambda {network_trial_variance("Sync trial",sync_trials_average) { SocketTests.blocking_get(test_hosts) }},
|
168
|
+
lambda {network_trial_variance("ASync trial",async_trials_average) { SocketTests.nonblocking_get(test_hosts) }} ]
|
169
|
+
|
170
|
+
# first pass run forwards and backwards
|
171
|
+
test_average.each { |trial| trial.call }
|
172
|
+
|
173
|
+
# now with averages compute std
|
174
|
+
test_variance.each {|trial| trial.call }
|
175
|
+
|
176
|
+
# run in reverse
|
177
|
+
multi_trials_average = 0
|
178
|
+
sync_trials_average = 0
|
179
|
+
async_trials_average = 0
|
180
|
+
test_average.reverse.each { |trial| trial.call }
|
181
|
+
test_variance.reverse.each { |trial| trial.call }
|
182
|
+
|
183
|
+
# seconds pass reorder tests by swapping odd/even
|
184
|
+
# even = tests.invert.collect{ |trial,index| trial if (index % 2) == 0 }.compact
|
185
|
+
# odd = tests.invert.collect{ |trial,index| trial if (index % 2) == 1 }.compact
|
186
|
+
# tests = (odd + even).flatten
|
187
|
+
# rerun
|
188
|
+
# tests.each { |trial| trial.call }
|
189
|
+
# tests.reverse.each { |trial| trial.call }
|
190
|
+
puts "Multi-Threads Average => #{multi_trials_average}"
|
191
|
+
puts "Syncrhonous Average => #{sync_trials_average}"
|
192
|
+
puts "ASyncrhonous Average => #{async_trials_average}"
|
193
|
+
end
|
194
|
+
|
195
|
+
Process::waitpid(pid,0)
|
196
|
+
|
197
|
+
test_servers.each {|server| server.shutdown }
|
@@ -0,0 +1,16 @@
|
|
1
|
+
require 'webrick'
|
2
|
+
class ::WEBrick::HTTPServer ; def access_log(config, req, res) ; end ; end
|
3
|
+
class ::WEBrick::BasicLog ; def log(level, data) ; end ; end
|
4
|
+
|
5
|
+
def start_net_server(port)
|
6
|
+
server = WEBrick::HTTPServer.new( :Port => port )
|
7
|
+
server.mount_proc("/") do|req,res|
|
8
|
+
res.body = %Q(<html><body>Test Document</body></html>)
|
9
|
+
sleep 0.001 # use a small delay to simulate network latency...
|
10
|
+
res['Content-Type'] = "text/html"
|
11
|
+
end
|
12
|
+
@thread = Thread.new(server) do|s|
|
13
|
+
s.start
|
14
|
+
end
|
15
|
+
server
|
16
|
+
end
|
@@ -0,0 +1,249 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require 'thread'
|
3
|
+
require 'benchmark'
|
4
|
+
require "#{File.dirname(__FILE__)}/net_helper"
|
5
|
+
require 'test/unit'
|
6
|
+
|
7
|
+
# open up the mongrel http server
|
8
|
+
# reimplement with nonblocking io
|
9
|
+
module Mongrel
|
10
|
+
|
11
|
+
class HttpServer
|
12
|
+
READ = 0
|
13
|
+
WRITE = 1
|
14
|
+
ERROR = 2
|
15
|
+
|
16
|
+
# Does the majority of the IO processing. It has been written in Ruby using
|
17
|
+
# about 7 different IO processing strategies and no matter how it's done
|
18
|
+
# the performance just does not improve. It is currently carefully constructed
|
19
|
+
# to make sure that it gets the best possible performance, but anyone who
|
20
|
+
# thinks they can make it faster is more than welcome to take a crack at it.
|
21
|
+
def process_client(client)
|
22
|
+
begin
|
23
|
+
parser = HttpParser.new
|
24
|
+
params = HttpParams.new
|
25
|
+
request = nil
|
26
|
+
data = client.readpartial(Const::CHUNK_SIZE)
|
27
|
+
nparsed = 0
|
28
|
+
|
29
|
+
# Assumption: nparsed will always be less since data will get filled with more
|
30
|
+
# after each parsing. If it doesn't get more then there was a problem
|
31
|
+
# with the read operation on the client socket. Effect is to stop processing when the
|
32
|
+
# socket can't fill the buffer for further parsing.
|
33
|
+
while nparsed < data.length
|
34
|
+
nparsed = parser.execute(params, data, nparsed)
|
35
|
+
|
36
|
+
if parser.finished?
|
37
|
+
if not params[Const::REQUEST_PATH]
|
38
|
+
# it might be a dumbass full host request header
|
39
|
+
uri = URI.parse(params[Const::REQUEST_URI])
|
40
|
+
params[Const::REQUEST_PATH] = uri.request_uri
|
41
|
+
end
|
42
|
+
|
43
|
+
raise "No REQUEST PATH" if not params[Const::REQUEST_PATH]
|
44
|
+
|
45
|
+
script_name, path_info, handlers = @classifier.resolve(params[Const::REQUEST_PATH])
|
46
|
+
|
47
|
+
if handlers
|
48
|
+
params[Const::PATH_INFO] = path_info
|
49
|
+
params[Const::SCRIPT_NAME] = script_name
|
50
|
+
params[Const::REMOTE_ADDR] = params[Const::HTTP_X_FORWARDED_FOR] || client.peeraddr.last
|
51
|
+
|
52
|
+
# select handlers that want more detailed request notification
|
53
|
+
notifiers = handlers.select { |h| h.request_notify }
|
54
|
+
request = HttpRequest.new(params, client, notifiers)
|
55
|
+
|
56
|
+
# in the case of large file uploads the user could close the socket, so skip those requests
|
57
|
+
break if request.body == nil # nil signals from HttpRequest::initialize that the request was aborted
|
58
|
+
|
59
|
+
# request is good so far, continue processing the response
|
60
|
+
response = HttpResponse.new(client)
|
61
|
+
|
62
|
+
# Process each handler in registered order until we run out or one finalizes the response.
|
63
|
+
handlers.each do |handler|
|
64
|
+
handler.process(request, response)
|
65
|
+
break if response.done or client.closed?
|
66
|
+
end
|
67
|
+
|
68
|
+
# And finally, if nobody closed the response off, we finalize it.
|
69
|
+
unless response.done or client.closed?
|
70
|
+
response.finished
|
71
|
+
end
|
72
|
+
else
|
73
|
+
# Didn't find it, return a stock 404 response.
|
74
|
+
client.write(Const::ERROR_404_RESPONSE)
|
75
|
+
end
|
76
|
+
|
77
|
+
break #done
|
78
|
+
else
|
79
|
+
# Parser is not done, queue up more data to read and continue parsing
|
80
|
+
chunk = client.readpartial(Const::CHUNK_SIZE)
|
81
|
+
break if !chunk or chunk.length == 0 # read failed, stop processing
|
82
|
+
|
83
|
+
data << chunk
|
84
|
+
if data.length >= Const::MAX_HEADER
|
85
|
+
raise HttpParserError.new("HEADER is longer than allowed, aborting client early.")
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
rescue EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
|
90
|
+
client.close rescue Object
|
91
|
+
rescue HttpParserError
|
92
|
+
if $mongrel_debug_client
|
93
|
+
STDERR.puts "#{Time.now}: BAD CLIENT (#{params[Const::HTTP_X_FORWARDED_FOR] || client.peeraddr.last}): #$!"
|
94
|
+
STDERR.puts "#{Time.now}: REQUEST DATA: #{data.inspect}\n---\nPARAMS: #{params.inspect}\n---\n"
|
95
|
+
end
|
96
|
+
rescue Errno::EMFILE
|
97
|
+
reap_dead_workers('too many files')
|
98
|
+
rescue Object
|
99
|
+
STDERR.puts "#{Time.now}: ERROR: #$!"
|
100
|
+
STDERR.puts $!.backtrace.join("\n") if $mongrel_debug_client
|
101
|
+
ensure
|
102
|
+
client.close rescue Object
|
103
|
+
request.body.delete if request and request.body.class == Tempfile
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
def run
|
108
|
+
BasicSocket.do_not_reverse_lookup=true
|
109
|
+
|
110
|
+
configure_socket_options
|
111
|
+
|
112
|
+
if $tcp_defer_accept_opts
|
113
|
+
@socket.setsockopt(*$tcp_defer_accept_opts) rescue nil
|
114
|
+
end
|
115
|
+
@running = true
|
116
|
+
|
117
|
+
@acceptor = Thread.new do
|
118
|
+
readers = [@socket]
|
119
|
+
writers = []
|
120
|
+
errors = []
|
121
|
+
@timeout = @timeout <= 0 ? 1 : @timeout
|
122
|
+
puts @timeout.inspect
|
123
|
+
|
124
|
+
while @running
|
125
|
+
|
126
|
+
begin
|
127
|
+
begin
|
128
|
+
client_socket, client_sockaddr = @socket.accept_nonblock
|
129
|
+
rescue Errno::EAGAIN, Errno::ECONNABORTED, Errno::EPROTO, Errno::EINTR
|
130
|
+
if $tcp_cork_opts
|
131
|
+
client_socket.setsockopt(*$tcp_cork_opts) rescue nil
|
132
|
+
end
|
133
|
+
readers << client_socket if client_socket
|
134
|
+
ready = IO.select(readers, writers, errors, @timeout)
|
135
|
+
|
136
|
+
puts "Socket ready => #{readers.size}, #{client_socket.inspect}\n#{ready.inspect}"
|
137
|
+
if ready
|
138
|
+
|
139
|
+
ready[READ].each do|client|
|
140
|
+
if read_from_client(client)
|
141
|
+
ready[READ].reject do|c| c == client end
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
ready[WRITE].each do|client|
|
146
|
+
write_to_client(client)
|
147
|
+
end
|
148
|
+
|
149
|
+
ready[ERROR].each do|client|
|
150
|
+
end
|
151
|
+
|
152
|
+
end
|
153
|
+
|
154
|
+
end
|
155
|
+
puts @running
|
156
|
+
|
157
|
+
rescue StopServer
|
158
|
+
puts "recieved stop"
|
159
|
+
@socket.close rescue Object
|
160
|
+
break
|
161
|
+
rescue Errno::EMFILE
|
162
|
+
reap_dead_workers("too many open files")
|
163
|
+
sleep 0.5
|
164
|
+
rescue Errno::ECONNABORTED
|
165
|
+
# client closed the socket even before accept
|
166
|
+
client.close rescue Object
|
167
|
+
rescue Object => exc
|
168
|
+
STDERR.puts "!!!!!! UNHANDLED EXCEPTION! #{exc.class}:#{exc}. TELL ZED HE'S A MORON."
|
169
|
+
STDERR.puts $!.backtrace.join("\n")# if $mongrel_debug_client
|
170
|
+
exit 1
|
171
|
+
end
|
172
|
+
end
|
173
|
+
graceful_shutdown
|
174
|
+
end
|
175
|
+
|
176
|
+
return @acceptor
|
177
|
+
end
|
178
|
+
|
179
|
+
def read_from_client(socket)
|
180
|
+
begin
|
181
|
+
result = socket.read_nonblock(Const::CHUNK_SIZE)
|
182
|
+
rescue Errno::EAGAIN, EOFError
|
183
|
+
puts "done"
|
184
|
+
return true
|
185
|
+
rescue Errno::ENOTCONN
|
186
|
+
end
|
187
|
+
puts result
|
188
|
+
return false
|
189
|
+
end
|
190
|
+
|
191
|
+
def write_to_client(socket)
|
192
|
+
end
|
193
|
+
|
194
|
+
# Stops the acceptor thread and then causes the worker threads to finish
|
195
|
+
# off the request queue before finally exiting.
|
196
|
+
def stop
|
197
|
+
puts "send stop"
|
198
|
+
@running = false
|
199
|
+
stopper = Thread.new do
|
200
|
+
exc = StopServer.new
|
201
|
+
@acceptor.raise(exc)
|
202
|
+
end
|
203
|
+
stopper.priority = 10
|
204
|
+
end
|
205
|
+
|
206
|
+
end
|
207
|
+
|
208
|
+
end
|
209
|
+
|
210
|
+
class TestServer < Test::Unit::TestCase
|
211
|
+
|
212
|
+
def setup
|
213
|
+
@server = start_net_server(9999)
|
214
|
+
end
|
215
|
+
|
216
|
+
def teardown
|
217
|
+
puts "call stop"
|
218
|
+
@server.stop
|
219
|
+
end
|
220
|
+
|
221
|
+
=begin
|
222
|
+
def test_single_request
|
223
|
+
res = issue_request
|
224
|
+
puts "request complete"
|
225
|
+
assert_not_nil res
|
226
|
+
assert_not_nil res.header
|
227
|
+
assert_not_nil res.body
|
228
|
+
assert_equal Net::HTTPOK, res.header.class
|
229
|
+
assert_match "Test Document", res.body, "Document body missing"
|
230
|
+
end
|
231
|
+
def test_multiple_request
|
232
|
+
puts Benchmark.measure {
|
233
|
+
threads = []
|
234
|
+
2.times do
|
235
|
+
threads << Thread.new do
|
236
|
+
res = issue_request
|
237
|
+
assert_equal Net::HTTPOK, res.header.class
|
238
|
+
end
|
239
|
+
end
|
240
|
+
threads.each do|t| t.join end
|
241
|
+
}
|
242
|
+
end
|
243
|
+
=end
|
244
|
+
|
245
|
+
def issue_request
|
246
|
+
Net::HTTP.start("127.0.0.1", 9999) { |h| h.get("/") }
|
247
|
+
end
|
248
|
+
|
249
|
+
end
|