mongrel_esi 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/COPYING +53 -0
- data/LICENSE +471 -0
- data/README +186 -0
- data/Rakefile +141 -0
- data/bin/mongrel_esi +271 -0
- data/ext/esi/common.rl +41 -0
- data/ext/esi/esi_parser.c +387 -0
- data/ext/esi/extconf.rb +6 -0
- data/ext/esi/machine.rb +499 -0
- data/ext/esi/parser.c +1675 -0
- data/ext/esi/parser.h +113 -0
- data/ext/esi/parser.rb +49 -0
- data/ext/esi/parser.rl +398 -0
- data/ext/esi/ruby_esi.rl +135 -0
- data/ext/esi/run-test.rb +3 -0
- data/ext/esi/test/common.rl +41 -0
- data/ext/esi/test/parser.c +1676 -0
- data/ext/esi/test/parser.h +113 -0
- data/ext/esi/test/parser.rl +398 -0
- data/ext/esi/test/test.c +373 -0
- data/ext/esi/test1.rb +56 -0
- data/ext/esi/test2.rb +45 -0
- data/lib/esi/cache.rb +207 -0
- data/lib/esi/config.rb +154 -0
- data/lib/esi/dispatcher.rb +27 -0
- data/lib/esi/handler.rb +236 -0
- data/lib/esi/invalidator.rb +40 -0
- data/lib/esi/logger.rb +46 -0
- data/lib/esi/router.rb +84 -0
- data/lib/esi/tag/attempt.rb +6 -0
- data/lib/esi/tag/base.rb +85 -0
- data/lib/esi/tag/except.rb +24 -0
- data/lib/esi/tag/include.rb +190 -0
- data/lib/esi/tag/invalidate.rb +54 -0
- data/lib/esi/tag/try.rb +40 -0
- data/lib/multi_dirhandler.rb +70 -0
- data/setup.rb +1585 -0
- data/test/integration/basic_test.rb +39 -0
- data/test/integration/cache_test.rb +37 -0
- data/test/integration/docs/content/500.html +16 -0
- data/test/integration/docs/content/500_with_failover.html +16 -0
- data/test/integration/docs/content/500_with_failover_to_alt.html +8 -0
- data/test/integration/docs/content/ajax_test_page.html +15 -0
- data/test/integration/docs/content/cookie_variable.html +3 -0
- data/test/integration/docs/content/foo.html +15 -0
- data/test/integration/docs/content/include_in_include.html +15 -0
- data/test/integration/docs/content/malformed_transforms.html +16 -0
- data/test/integration/docs/content/malformed_transforms.html-correct +11 -0
- data/test/integration/docs/content/static-failover.html +20 -0
- data/test/integration/docs/content/test2.html +1 -0
- data/test/integration/docs/content/test3.html +17 -0
- data/test/integration/docs/esi_invalidate.html +6 -0
- data/test/integration/docs/esi_mixed_content.html +15 -0
- data/test/integration/docs/esi_test_content.html +27 -0
- data/test/integration/docs/index.html +688 -0
- data/test/integration/docs/test1.html +1 -0
- data/test/integration/docs/test3.html +9 -0
- data/test/integration/docs/test_failover.html +1 -0
- data/test/integration/handler_test.rb +270 -0
- data/test/integration/help.rb +234 -0
- data/test/net/get_test.rb +197 -0
- data/test/net/net_helper.rb +16 -0
- data/test/net/server_test.rb +249 -0
- data/test/unit/base_tag_test.rb +44 -0
- data/test/unit/esi-sample.html +56 -0
- data/test/unit/help.rb +77 -0
- data/test/unit/include_request_test.rb +69 -0
- data/test/unit/include_tag_test.rb +14 -0
- data/test/unit/parser_test.rb +478 -0
- data/test/unit/router_test.rb +34 -0
- data/test/unit/sample.html +21 -0
- data/tools/rakehelp.rb +119 -0
- metadata +182 -0
data/lib/esi/cache.rb
ADDED
@@ -0,0 +1,207 @@
|
|
1
|
+
require 'digest/sha1'
|
2
|
+
require 'thread'
|
3
|
+
|
4
|
+
require 'esi/logger'
|
5
|
+
|
6
|
+
##
|
7
|
+
# Provide classes with an efficient method for dynamically changing from
|
8
|
+
# multiple threaded to single threaded when the class is created.
|
9
|
+
# rather then requiring multiple object types just change the object behavior
|
10
|
+
|
11
|
+
module Synchronizable
|
12
|
+
def synchronize_methods( *methods_names )
|
13
|
+
methods_names.flatten.each do|name|
|
14
|
+
base_name = name
|
15
|
+
if name.to_s.match(/!$/)
|
16
|
+
suffix = "!"
|
17
|
+
base_name = name.to_s.gsub(/!$/,'')
|
18
|
+
end
|
19
|
+
unlocked_name = "#{base_name}_unlocked#{suffix}".to_sym
|
20
|
+
locked_name = "#{base_name}_locked#{suffix}".to_sym
|
21
|
+
|
22
|
+
self.class.send :alias_method, unlocked_name, name
|
23
|
+
|
24
|
+
next unless @semaphore
|
25
|
+
|
26
|
+
self.class.send(:define_method, locked_name) do
|
27
|
+
@semaphore.synchronize do
|
28
|
+
send unlocked_name, args
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
module ESI
|
36
|
+
|
37
|
+
# cache interface
|
38
|
+
#
|
39
|
+
# all caches store Fragments
|
40
|
+
#
|
41
|
+
# get - uri,params => Fragment
|
42
|
+
#
|
43
|
+
# cached? - uri,params => boolean
|
44
|
+
#
|
45
|
+
# put - uri,params, max_age, body => nil
|
46
|
+
#
|
47
|
+
# sweep! - tells the cache to expire anything that needs expiring
|
48
|
+
#
|
49
|
+
# keys - is an optional method for iterating over all keys, may not be exposed by all cache backends
|
50
|
+
#
|
51
|
+
# delete - key remove the key from the cache
|
52
|
+
|
53
|
+
# Each object in the cache has a ttl
|
54
|
+
class Fragment
|
55
|
+
attr_accessor :body, :max_age, :uri
|
56
|
+
alias read_body body
|
57
|
+
|
58
|
+
def initialize(uri,max_age,body)
|
59
|
+
@uri = uri
|
60
|
+
max_age.gsub!(/\+.*$/,'') if max_age.class == String
|
61
|
+
@max_age = Time.now.to_i + max_age.to_i
|
62
|
+
@body = body
|
63
|
+
end
|
64
|
+
|
65
|
+
def valid?
|
66
|
+
(Time.now.to_i < @max_age)
|
67
|
+
end
|
68
|
+
|
69
|
+
end
|
70
|
+
|
71
|
+
|
72
|
+
# Base Cache object
|
73
|
+
class Cache
|
74
|
+
include Synchronizable
|
75
|
+
|
76
|
+
def initialize( options = OpenStruct.new({}) )
|
77
|
+
options = OpenStruct.new(options) if options.is_a?(Hash)
|
78
|
+
if( options.locked )
|
79
|
+
@semaphore = Mutex.new
|
80
|
+
end
|
81
|
+
synchronize_methods :get, :put, :put, :sweep!, :keys, :delete
|
82
|
+
# TODO: load the cache_headers config
|
83
|
+
end
|
84
|
+
|
85
|
+
protected
|
86
|
+
def cache_key( uri, params )
|
87
|
+
http_x_requested_with = params['HTTP_X_REQUESTED_WITH'] || params["X-Requested-With"]
|
88
|
+
key = Digest::SHA1.hexdigest("#{uri}-#{http_x_requested_with}")
|
89
|
+
key
|
90
|
+
end
|
91
|
+
|
92
|
+
end
|
93
|
+
|
94
|
+
#
|
95
|
+
# A memcache cache store. Uses the memcached ruby client, see => http://seattlerb.rubyforge.org/memcache-client/
|
96
|
+
# and http://www.danga.com/memcached/
|
97
|
+
#
|
98
|
+
# There are few issues to consider about providing a memcached backed.
|
99
|
+
#
|
100
|
+
# First, there's no good way to iterate over all the keys within memcached and doing so is problematic beyound the impelmentation
|
101
|
+
# more details can be found here => http://lists.danga.com/pipermail/memcached/2007-February/003610.html
|
102
|
+
#
|
103
|
+
# Okay, so now what? Well we still have options:
|
104
|
+
# - We could try Tugela Cache => http://meta.wikimedia.org/wiki/Tugela_Cache
|
105
|
+
# - This looks promising => http://www.marcworrell.com/article-500-en.htmlhttp://www.marcworrell.com/article-500-en.html
|
106
|
+
# - We could also store the keys in the ruby process or alternatively look in INVALIDATION-WITH-MEMCACHED
|
107
|
+
#
|
108
|
+
# We can't support advanced selector with memcached backend
|
109
|
+
class MemcachedCache < Cache
|
110
|
+
require 'rubygems'
|
111
|
+
gem 'memcache-client' if respond_to? :gem
|
112
|
+
require 'memcache'
|
113
|
+
include ESI::Log
|
114
|
+
|
115
|
+
def initialize( options )
|
116
|
+
super
|
117
|
+
# TODO: OpenStruct.marshal_dump !!!
|
118
|
+
options = { :servers => options.servers,
|
119
|
+
:debug => options.debug,
|
120
|
+
:namespace => options.namespace,
|
121
|
+
:readonly => options.readonly }
|
122
|
+
puts "Using memcache with options => #{options.inspect}"
|
123
|
+
@cache = MemCache.new options
|
124
|
+
puts "Using memcache servers at #{options['servers'].inspect}"
|
125
|
+
@cache.servers = options[:servers]
|
126
|
+
end
|
127
|
+
|
128
|
+
def cached?( uri, params )
|
129
|
+
fragment = get(uri,params)
|
130
|
+
fragment and fragment.valid?
|
131
|
+
rescue Object => e # never raise an exception from this method
|
132
|
+
false
|
133
|
+
end
|
134
|
+
|
135
|
+
def get( uri, params )
|
136
|
+
@cache.get(cache_key(uri,params))
|
137
|
+
end
|
138
|
+
|
139
|
+
def put( uri, params, max_age, body )
|
140
|
+
fragment = Fragment.new(uri,max_age,body)
|
141
|
+
key = cache_key(uri,params)
|
142
|
+
@cache.add( key, fragment, fragment.max_age )
|
143
|
+
rescue Object => e
|
144
|
+
log_error e.message
|
145
|
+
end
|
146
|
+
|
147
|
+
# run through the cache and dump anything that has expired
|
148
|
+
def sweep!
|
149
|
+
# TODO: not really a memcached equivalent??
|
150
|
+
end
|
151
|
+
|
152
|
+
def keys(&block)
|
153
|
+
# TODO: can't implement this method directly with memcached, unless we introduce another key
|
154
|
+
# and increase the data, but it is possible...
|
155
|
+
end
|
156
|
+
|
157
|
+
def delete( key )
|
158
|
+
@cache.delete(key)
|
159
|
+
end
|
160
|
+
end
|
161
|
+
|
162
|
+
#
|
163
|
+
# A ruby thread safe cache, stores cached fragments in the current ruby process memory
|
164
|
+
#
|
165
|
+
# A hash table indexed by cache_key of Fragments.
|
166
|
+
# the cache is made thread safe if the external invalidator is active otherwise the Mutex is a no op
|
167
|
+
class RubyCache < Cache
|
168
|
+
|
169
|
+
def initialize( options = {} )
|
170
|
+
super
|
171
|
+
@cache = {}
|
172
|
+
end
|
173
|
+
|
174
|
+
def cached?( uri, params )
|
175
|
+
key = cache_key(uri,params)
|
176
|
+
fragment = @cache[key]
|
177
|
+
fragment and fragment.valid?
|
178
|
+
end
|
179
|
+
|
180
|
+
def get( uri, params )
|
181
|
+
@cache[cache_key(uri,params)]
|
182
|
+
end
|
183
|
+
|
184
|
+
def put( uri, params, max_age, body )
|
185
|
+
key = cache_key(uri,params)
|
186
|
+
@cache[key] = Fragment.new(uri,max_age,body)
|
187
|
+
sweep_unlocked!
|
188
|
+
end
|
189
|
+
|
190
|
+
# run through the cache and dump anything that has expired
|
191
|
+
def sweep!
|
192
|
+
@cache.reject! {|k,v| !v.valid? }
|
193
|
+
end
|
194
|
+
|
195
|
+
def keys(&block)
|
196
|
+
@cache.each do|key,data|
|
197
|
+
yield key, data
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
def delete( key )
|
202
|
+
@cache.delete(key)
|
203
|
+
end
|
204
|
+
|
205
|
+
end
|
206
|
+
|
207
|
+
end
|
data/lib/esi/config.rb
ADDED
@@ -0,0 +1,154 @@
|
|
1
|
+
require 'ostruct'
|
2
|
+
|
3
|
+
module ESI
|
4
|
+
class Config
|
5
|
+
|
6
|
+
attr_reader :config
|
7
|
+
|
8
|
+
def initialize(options)
|
9
|
+
@config = options
|
10
|
+
end
|
11
|
+
|
12
|
+
# access configuration values
|
13
|
+
def [](key)
|
14
|
+
@config[key]
|
15
|
+
end
|
16
|
+
|
17
|
+
def enable_esi_processor?( headers )
|
18
|
+
# check for surrogate control configuration
|
19
|
+
# check for matching content type
|
20
|
+
# if both are set it surrogate takes presendence
|
21
|
+
use_esi = false
|
22
|
+
allowed_content_types = @config[:allowed_content_types]
|
23
|
+
|
24
|
+
if allowed_content_types and headers["Content-Type"] and allowed_content_types.respond_to?(:detect)
|
25
|
+
use_esi = allowed_content_types.detect do |type|
|
26
|
+
headers["Content-Type"].match( type )
|
27
|
+
end
|
28
|
+
use_esi = true if use_esi
|
29
|
+
end
|
30
|
+
|
31
|
+
if @config[:enable_for_surrogate_only]
|
32
|
+
use_esi = headers["Surrogate-Control"] and /ESI/.match(headers["Surrogate-Control"])
|
33
|
+
end
|
34
|
+
|
35
|
+
use_esi
|
36
|
+
end
|
37
|
+
|
38
|
+
class CacheConfig
|
39
|
+
attr_reader :options
|
40
|
+
def initialize
|
41
|
+
@memcached = false
|
42
|
+
@options = OpenStruct.new({})
|
43
|
+
end
|
44
|
+
|
45
|
+
def memcached
|
46
|
+
@memcached = true
|
47
|
+
yield @options
|
48
|
+
end
|
49
|
+
|
50
|
+
def memcached?
|
51
|
+
@memcached
|
52
|
+
end
|
53
|
+
|
54
|
+
def locked
|
55
|
+
!@memcached
|
56
|
+
end
|
57
|
+
|
58
|
+
def ttl=( ttl )
|
59
|
+
@options.ttl = ttl
|
60
|
+
end
|
61
|
+
|
62
|
+
end
|
63
|
+
|
64
|
+
# returns the cache object as given in the config/esi.yml
|
65
|
+
# cache: key, or defaults to ruby as in uses this process
|
66
|
+
# the options allowed are ruby and memcache
|
67
|
+
def cache
|
68
|
+
if block_given?
|
69
|
+
# allow this method to be called in config scripts
|
70
|
+
cache_options = CacheConfig.new
|
71
|
+
yield cache_options
|
72
|
+
if cache_options.memcached?
|
73
|
+
@config[:cache] = 'memcached'
|
74
|
+
end
|
75
|
+
@config[:cache_options] = cache_options.options
|
76
|
+
else
|
77
|
+
cache_type = @config[:cache]
|
78
|
+
options = @config[:cache_options]
|
79
|
+
# always return the same cache object, per process
|
80
|
+
$cache ||= case cache_type
|
81
|
+
when 'ruby'
|
82
|
+
ESI::RubyCache.new(options)
|
83
|
+
when 'memcached'
|
84
|
+
ESI::MemcachedCache.new(options)
|
85
|
+
else
|
86
|
+
raise "Unsupported cache"
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
def esi
|
92
|
+
options = OpenStruct.new({})
|
93
|
+
yield options
|
94
|
+
@config[:allowed_content_types] = options.allowed_content_types if options.allowed_content_types
|
95
|
+
@config[:enable_for_surrogate_only] = options.enable_for_surrogate_only if options.enable_for_surrogate_only
|
96
|
+
@config[:chunk_size] = options.chunk_size if options.chunk_size
|
97
|
+
@config[:max_depth] = options.max_depth if options.max_depth
|
98
|
+
end
|
99
|
+
|
100
|
+
def router
|
101
|
+
Router.new( @config[:routing] )
|
102
|
+
end
|
103
|
+
|
104
|
+
class ConfigRouter
|
105
|
+
attr_accessor :servers
|
106
|
+
attr_reader :routes
|
107
|
+
|
108
|
+
def initialize
|
109
|
+
@routes = []
|
110
|
+
@servers = []
|
111
|
+
end
|
112
|
+
|
113
|
+
def match( expr )
|
114
|
+
yield self
|
115
|
+
@routes << { :host => @servers.first.split(':').first,
|
116
|
+
:port => @servers.last.split(':').last,
|
117
|
+
:match_url => expr }
|
118
|
+
end
|
119
|
+
|
120
|
+
def default
|
121
|
+
yield self
|
122
|
+
@routes << { :host => @servers.first.split(':').first,
|
123
|
+
:port => @servers.last.split(':').last,
|
124
|
+
:match_url => 'default' }
|
125
|
+
end
|
126
|
+
|
127
|
+
end
|
128
|
+
|
129
|
+
def routes
|
130
|
+
config_router = ConfigRouter.new
|
131
|
+
yield config_router
|
132
|
+
@config[:routing] = config_router.routes
|
133
|
+
end
|
134
|
+
|
135
|
+
# return true/false depending on the value of
|
136
|
+
# invalidator: on/off
|
137
|
+
# within in config/esi.yml
|
138
|
+
def start_invalidator?
|
139
|
+
@config[:invalidator]
|
140
|
+
end
|
141
|
+
|
142
|
+
def self.define( listeners )
|
143
|
+
listeners.each do|host,server|
|
144
|
+
esi_handlers = server.classifier.handler_map.select do|uri,handler|
|
145
|
+
handler.first.class == ESI::Dispatcher
|
146
|
+
end
|
147
|
+
config = esi_handlers.first.last.first.config
|
148
|
+
yield config
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
@@ -0,0 +1,27 @@
|
|
1
|
+
require 'esi/handler'
|
2
|
+
require 'mongrel'
|
3
|
+
|
4
|
+
module ESI
|
5
|
+
|
6
|
+
class Dispatcher < Mongrel::HttpHandler
|
7
|
+
attr_reader :config, :router
|
8
|
+
|
9
|
+
Thread.abort_on_exception = false
|
10
|
+
|
11
|
+
def initialize( options )
|
12
|
+
super()
|
13
|
+
@config = ESI::Config.new( options )
|
14
|
+
@router = nil
|
15
|
+
if @config.start_invalidator?
|
16
|
+
ESI::Invalidator.start( self )
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
def process(request, response)
|
21
|
+
@router = @config.router
|
22
|
+
handler = ESI::Handler.new(self)
|
23
|
+
handler.process(request, response)
|
24
|
+
end
|
25
|
+
|
26
|
+
end
|
27
|
+
end
|
data/lib/esi/handler.rb
ADDED
@@ -0,0 +1,236 @@
|
|
1
|
+
require 'uri'
|
2
|
+
require 'timeout'
|
3
|
+
require 'net/http'
|
4
|
+
|
5
|
+
require 'esi/logger'
|
6
|
+
require 'esi/cache'
|
7
|
+
require 'esi/config'
|
8
|
+
require 'esi/router'
|
9
|
+
require 'esi/esi'
|
10
|
+
require 'esi/tag/base'
|
11
|
+
require 'esi/tag/include'
|
12
|
+
require 'esi/tag/invalidate'
|
13
|
+
require 'esi/tag/attempt'
|
14
|
+
require 'esi/tag/except'
|
15
|
+
require 'esi/tag/try'
|
16
|
+
|
17
|
+
module ESI
|
18
|
+
|
19
|
+
class Handler
|
20
|
+
attr_reader :config
|
21
|
+
include ESI::Log
|
22
|
+
|
23
|
+
def initialize(dispatcher)
|
24
|
+
@config = dispatcher.config
|
25
|
+
@router = dispatcher.router
|
26
|
+
end
|
27
|
+
|
28
|
+
def process(request, response)
|
29
|
+
|
30
|
+
start = Time.now
|
31
|
+
status = 200
|
32
|
+
url = @router.url_for(request.params["REQUEST_URI"])
|
33
|
+
|
34
|
+
params = http_params(request.params)
|
35
|
+
|
36
|
+
proxy_error = nil
|
37
|
+
|
38
|
+
|
39
|
+
log_debug "#{request.params["REQUEST_METHOD"]} => #{url}"
|
40
|
+
chunk_count = 0
|
41
|
+
uri = URI.parse(url)
|
42
|
+
|
43
|
+
path_with_query = uri.query ? "#{uri.path}?#{uri.query}" : uri.path
|
44
|
+
|
45
|
+
proxy_request = (request.params["REQUEST_METHOD"] == "POST") ?
|
46
|
+
Net::HTTP::Post.new( path_with_query, params ) :
|
47
|
+
Net::HTTP::Get.new( path_with_query, params )
|
48
|
+
|
49
|
+
proxy_connection = Net::HTTP.start(uri.host, uri.port)
|
50
|
+
|
51
|
+
# open the conneciton up so we can start to stream the connection
|
52
|
+
proxy_connection.request(proxy_request,request.body.read) do|proxy_response|
|
53
|
+
|
54
|
+
status = read_status( proxy_response )
|
55
|
+
|
56
|
+
copy_headers( response.header, proxy_response ) unless status >= 500
|
57
|
+
|
58
|
+
if status >= 500 or (status < 400 and status >= 300) or !@config.enable_esi_processor?( proxy_response )
|
59
|
+
response.start(status, true) do|head,out|
|
60
|
+
|
61
|
+
if status >= 500
|
62
|
+
# TODO: only report this if configured to expose it
|
63
|
+
out << proxy_error
|
64
|
+
end
|
65
|
+
|
66
|
+
# proxy the 500 response
|
67
|
+
proxy_response.read_body do|fragment|
|
68
|
+
out << fragment
|
69
|
+
end
|
70
|
+
|
71
|
+
end
|
72
|
+
else
|
73
|
+
# NOTE: It's very important that surrogate control headers are set to parse only if the
|
74
|
+
# page has esi:include tags. Because of the nature of Transfer-Encoding: chunked if we keep
|
75
|
+
# everything in memory until we reach an esi tag. Then we load the tag into memory and send the next
|
76
|
+
# chunk and so on. This means that the density of tags to markup will result in more or less of the document
|
77
|
+
# being stored in memory. A way we can get around this and attempt to keep a fixed size of the document in
|
78
|
+
# memory at all time is by setting a buffer size of say 1024. Then no matter what we'll always chunk the document
|
79
|
+
# by 1024 or some other size chunk.
|
80
|
+
begin
|
81
|
+
# Use the ESI Parser
|
82
|
+
|
83
|
+
response.header["Transfer-Encoding"] = "chunked"
|
84
|
+
# this is the important part, rather then send the whole document back we send in chunks
|
85
|
+
# each fragment is roughly in it's own chunk, this does mean we require http 1.1, chunk size is still a limit
|
86
|
+
header = Mongrel::Const::STATUS_FORMAT % [status, Mongrel::HTTP_STATUS_CODES[status]]
|
87
|
+
header.gsub!(/Connection: close\r\n/,'')
|
88
|
+
response.header.out.rewind
|
89
|
+
header << response.header.out.read + Mongrel::Const::LINE_END
|
90
|
+
header.gsub!(/Status:.*?\r\n/,'')
|
91
|
+
response.write( header )
|
92
|
+
|
93
|
+
#print header
|
94
|
+
|
95
|
+
parser = ESI::CParser.new
|
96
|
+
chunk_size = @config[:chunk_size] || 4096
|
97
|
+
max_depth = @config[:max_depth] || 3
|
98
|
+
buffer = "" # when buffer reaches chunk_size write to the response socket
|
99
|
+
|
100
|
+
# handle start tags
|
101
|
+
parser.start_tag_handler do|tag_name, attrs|
|
102
|
+
tag = ESI::Tag::Base.create( @router,
|
103
|
+
request.params,
|
104
|
+
params,
|
105
|
+
tag_name.gsub(/esi:/,''),
|
106
|
+
attrs,
|
107
|
+
@config.cache )
|
108
|
+
# set the tag depth
|
109
|
+
tag.depth = parser.depth if tag.respond_to?(:depth=)
|
110
|
+
tag.max_depth = max_depth if tag.respond_to?(:max_depth=)
|
111
|
+
|
112
|
+
if parser.esi_tag
|
113
|
+
parser.esi_tag.add_child(tag)
|
114
|
+
else
|
115
|
+
parser.esi_tag = tag
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
# handle end tags
|
120
|
+
parser.end_tag_handler do|tag_name|
|
121
|
+
#ct = Time.now
|
122
|
+
if parser.esi_tag.name == tag_name.gsub(/esi:/,'')
|
123
|
+
parser.esi_tag.close(parser.output)
|
124
|
+
parser.esi_tag = nil
|
125
|
+
else
|
126
|
+
parser.esi_tag.close_child(parser.output,tag_name)
|
127
|
+
end
|
128
|
+
#puts "\t[#{tag_name}] Time to close: #{Time.now - ct}"
|
129
|
+
end
|
130
|
+
|
131
|
+
# handle data streaming
|
132
|
+
parser.output_handler do|chars|
|
133
|
+
buffer << chars
|
134
|
+
if buffer.size >= chunk_size
|
135
|
+
#print buffer
|
136
|
+
send_chunk( response, buffer )
|
137
|
+
chunk_count += 1
|
138
|
+
buffer = ""
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
#t = Time.now
|
143
|
+
proxy_response.read_body do|data|
|
144
|
+
begin
|
145
|
+
#pt = Time.now
|
146
|
+
parser.process data
|
147
|
+
#puts "Time in process: #{Time.now - pt}"
|
148
|
+
rescue => e
|
149
|
+
puts e.message
|
150
|
+
puts e.backtrace.join("\n")
|
151
|
+
raise e
|
152
|
+
end
|
153
|
+
end
|
154
|
+
#puts "Response Time: #{Time.now - t}"
|
155
|
+
|
156
|
+
parser.finish
|
157
|
+
parser = nil
|
158
|
+
|
159
|
+
if buffer.size > 0
|
160
|
+
#print buffer
|
161
|
+
send_chunk( response, buffer )
|
162
|
+
chunk_count += 1
|
163
|
+
end
|
164
|
+
|
165
|
+
rescue => e
|
166
|
+
status, error = error_response(e,url)
|
167
|
+
response.write( error )
|
168
|
+
end
|
169
|
+
response.write( "0\r\n\r\n" )
|
170
|
+
response.done = true
|
171
|
+
end
|
172
|
+
end # end request
|
173
|
+
|
174
|
+
rescue => e
|
175
|
+
status = error_response(e,url).first
|
176
|
+
ensure
|
177
|
+
|
178
|
+
log_request "\nCompleted => #{url}, #{Time.now - start} seconds with status #{status} and #{chunk_count} chunks\n"
|
179
|
+
|
180
|
+
end
|
181
|
+
|
182
|
+
protected
|
183
|
+
|
184
|
+
def send_chunk( response, buffer )
|
185
|
+
# send a new chunk
|
186
|
+
size = buffer.size
|
187
|
+
chunk_header = "#{"%x" % size}" + Mongrel::Const::LINE_END
|
188
|
+
#puts chunk_header.inspect
|
189
|
+
response.write( chunk_header ) # write the chunk size
|
190
|
+
#puts buffer.inspect
|
191
|
+
response.write( buffer + Mongrel::Const::LINE_END ) # write the chunk
|
192
|
+
end
|
193
|
+
|
194
|
+
def error_response(e,url)
|
195
|
+
status = 500
|
196
|
+
error = "<h1>Internal Server Errror</h1><h4>Failed while requesting => '#{url}'</h4>\n<pre>#{e.message}#{e.backtrace.join("\n")}</pre>"
|
197
|
+
log_error e.backtrace.join("\n")
|
198
|
+
log_error error
|
199
|
+
[status, "<html><body>#{error}</body></html>"]
|
200
|
+
end
|
201
|
+
|
202
|
+
def read_status(response)
|
203
|
+
Net::HTTPResponse::CODE_TO_OBJ.select { |k,v| v == response.class }.first[0].to_i rescue 500
|
204
|
+
end
|
205
|
+
|
206
|
+
def http_params(params)
|
207
|
+
updated_params = {}
|
208
|
+
params.each do|k,v|
|
209
|
+
k = k.split('_').collect { |t| t.capitalize }.join('-')
|
210
|
+
if k[0,5] =='Http-'
|
211
|
+
k[0,5] = ''
|
212
|
+
updated_params[k] = v
|
213
|
+
end
|
214
|
+
end
|
215
|
+
updated_params
|
216
|
+
end
|
217
|
+
|
218
|
+
def copy_headers(head,response)
|
219
|
+
response.to_hash.each do |k,v|
|
220
|
+
# for Set-Cookie we need to split on ,
|
221
|
+
# some edge cases with , since things like expires might be a date with , in them.
|
222
|
+
k = k.split(/-/).map{|s| s.capitalize }.join('-')
|
223
|
+
if k == "Set-Cookie"
|
224
|
+
v.each do|cookie|
|
225
|
+
head["Set-Cookie"] = cookie.strip # mongrel is case sensitive about handling duplicates
|
226
|
+
end
|
227
|
+
else
|
228
|
+
head[k] = v unless k == "Content-Length" or k == "Surrogate-Control" or k == "Server"
|
229
|
+
end
|
230
|
+
end
|
231
|
+
head["Server"] = "MongrelESI 0.4"
|
232
|
+
end
|
233
|
+
|
234
|
+
end # Handler
|
235
|
+
|
236
|
+
end # ESI
|