ably-em-http-request 1.1.8
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gemtest +0 -0
- data/.github/workflows/ci.yml +22 -0
- data/.gitignore +9 -0
- data/.rspec +0 -0
- data/Changelog.md +78 -0
- data/Gemfile +14 -0
- data/LICENSE +21 -0
- data/README.md +66 -0
- data/Rakefile +10 -0
- data/ably-em-http-request.gemspec +33 -0
- data/benchmarks/clients.rb +170 -0
- data/benchmarks/em-excon.rb +87 -0
- data/benchmarks/em-profile.gif +0 -0
- data/benchmarks/em-profile.txt +65 -0
- data/benchmarks/server.rb +48 -0
- data/examples/.gitignore +1 -0
- data/examples/digest_auth/client.rb +25 -0
- data/examples/digest_auth/server.rb +28 -0
- data/examples/fetch.rb +30 -0
- data/examples/fibered-http.rb +51 -0
- data/examples/multi.rb +25 -0
- data/examples/oauth-tweet.rb +35 -0
- data/examples/socks5.rb +23 -0
- data/lib/em/io_streamer.rb +51 -0
- data/lib/em-http/client.rb +343 -0
- data/lib/em-http/core_ext/bytesize.rb +6 -0
- data/lib/em-http/decoders.rb +252 -0
- data/lib/em-http/http_client_options.rb +51 -0
- data/lib/em-http/http_connection.rb +408 -0
- data/lib/em-http/http_connection_options.rb +72 -0
- data/lib/em-http/http_encoding.rb +151 -0
- data/lib/em-http/http_header.rb +85 -0
- data/lib/em-http/http_status_codes.rb +59 -0
- data/lib/em-http/middleware/digest_auth.rb +114 -0
- data/lib/em-http/middleware/json_response.rb +17 -0
- data/lib/em-http/middleware/oauth.rb +42 -0
- data/lib/em-http/middleware/oauth2.rb +30 -0
- data/lib/em-http/multi.rb +59 -0
- data/lib/em-http/request.rb +25 -0
- data/lib/em-http/version.rb +7 -0
- data/lib/em-http-request.rb +1 -0
- data/lib/em-http.rb +20 -0
- data/spec/client_fiber_spec.rb +23 -0
- data/spec/client_spec.rb +1000 -0
- data/spec/digest_auth_spec.rb +48 -0
- data/spec/dns_spec.rb +41 -0
- data/spec/encoding_spec.rb +49 -0
- data/spec/external_spec.rb +146 -0
- data/spec/fixtures/google.ca +16 -0
- data/spec/fixtures/gzip-sample.gz +0 -0
- data/spec/gzip_spec.rb +91 -0
- data/spec/helper.rb +27 -0
- data/spec/http_proxy_spec.rb +268 -0
- data/spec/middleware/oauth2_spec.rb +15 -0
- data/spec/middleware_spec.rb +143 -0
- data/spec/multi_spec.rb +104 -0
- data/spec/pipelining_spec.rb +62 -0
- data/spec/redirect_spec.rb +430 -0
- data/spec/socksify_proxy_spec.rb +56 -0
- data/spec/spec_helper.rb +25 -0
- data/spec/ssl_spec.rb +67 -0
- data/spec/stallion.rb +334 -0
- data/spec/stub_server.rb +45 -0
- metadata +269 -0
@@ -0,0 +1,252 @@
|
|
1
|
+
require 'zlib'
|
2
|
+
require 'stringio'
|
3
|
+
|
4
|
+
##
|
5
|
+
# Provides a unified callback interface to decompression libraries.
|
6
|
+
module EventMachine::AblyHttpRequest::HttpDecoders
|
7
|
+
|
8
|
+
class DecoderError < StandardError
|
9
|
+
end
|
10
|
+
|
11
|
+
class << self
|
12
|
+
def accepted_encodings
|
13
|
+
DECODERS.inject([]) { |r, d| r + d.encoding_names }
|
14
|
+
end
|
15
|
+
|
16
|
+
def decoder_for_encoding(encoding)
|
17
|
+
DECODERS.each { |d|
|
18
|
+
return d if d.encoding_names.include? encoding
|
19
|
+
}
|
20
|
+
nil
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
class Base
|
25
|
+
def self.encoding_names
|
26
|
+
name = to_s.split('::').last.downcase
|
27
|
+
[name]
|
28
|
+
end
|
29
|
+
|
30
|
+
##
|
31
|
+
# chunk_callback:: [Block] To handle a decompressed chunk
|
32
|
+
def initialize(&chunk_callback)
|
33
|
+
@chunk_callback = chunk_callback
|
34
|
+
end
|
35
|
+
|
36
|
+
def <<(compressed)
|
37
|
+
return unless compressed && compressed.size > 0
|
38
|
+
|
39
|
+
decompressed = decompress(compressed)
|
40
|
+
receive_decompressed decompressed
|
41
|
+
end
|
42
|
+
|
43
|
+
def finalize!
|
44
|
+
decompressed = finalize
|
45
|
+
receive_decompressed decompressed
|
46
|
+
end
|
47
|
+
|
48
|
+
private
|
49
|
+
|
50
|
+
def receive_decompressed(decompressed)
|
51
|
+
if decompressed && decompressed.size > 0
|
52
|
+
@chunk_callback.call(decompressed)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
protected
|
57
|
+
|
58
|
+
##
|
59
|
+
# Must return a part of decompressed
|
60
|
+
def decompress(compressed)
|
61
|
+
nil
|
62
|
+
end
|
63
|
+
|
64
|
+
##
|
65
|
+
# May return last part
|
66
|
+
def finalize
|
67
|
+
nil
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
class Deflate < Base
|
72
|
+
def decompress(compressed)
|
73
|
+
begin
|
74
|
+
@zstream ||= Zlib::Inflate.new(-Zlib::MAX_WBITS)
|
75
|
+
@zstream.inflate(compressed)
|
76
|
+
rescue Zlib::Error
|
77
|
+
raise DecoderError
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def finalize
|
82
|
+
return nil unless @zstream
|
83
|
+
|
84
|
+
begin
|
85
|
+
r = @zstream.inflate(nil)
|
86
|
+
@zstream.close
|
87
|
+
r
|
88
|
+
rescue Zlib::Error
|
89
|
+
raise DecoderError
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
##
|
95
|
+
# Partial implementation of RFC 1952 to extract the deflate stream from a gzip file
|
96
|
+
class GZipHeader
|
97
|
+
def initialize
|
98
|
+
@state = :begin
|
99
|
+
@data = ""
|
100
|
+
@pos = 0
|
101
|
+
end
|
102
|
+
|
103
|
+
def finished?
|
104
|
+
@state == :finish
|
105
|
+
end
|
106
|
+
|
107
|
+
def read(n, buffer)
|
108
|
+
if (@pos + n) <= @data.size
|
109
|
+
buffer << @data[@pos..(@pos + n - 1)]
|
110
|
+
@pos += n
|
111
|
+
return true
|
112
|
+
else
|
113
|
+
return false
|
114
|
+
end
|
115
|
+
end
|
116
|
+
|
117
|
+
def readbyte
|
118
|
+
if (@pos + 1) <= @data.size
|
119
|
+
@pos += 1
|
120
|
+
@data.getbyte(@pos - 1)
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
def eof?
|
125
|
+
@pos >= @data.size
|
126
|
+
end
|
127
|
+
|
128
|
+
def extract_stream(compressed)
|
129
|
+
@data << compressed
|
130
|
+
|
131
|
+
while !eof? && !finished?
|
132
|
+
buffer = ""
|
133
|
+
|
134
|
+
case @state
|
135
|
+
when :begin
|
136
|
+
break if !read(10, buffer)
|
137
|
+
|
138
|
+
if buffer.getbyte(0) != 0x1f || buffer.getbyte(1) != 0x8b
|
139
|
+
raise DecoderError.new("magic header not found")
|
140
|
+
end
|
141
|
+
|
142
|
+
if buffer.getbyte(2) != 0x08
|
143
|
+
raise DecoderError.new("unknown compression method")
|
144
|
+
end
|
145
|
+
|
146
|
+
@flags = buffer.getbyte(3)
|
147
|
+
if (@flags & 0xe0).nonzero?
|
148
|
+
raise DecoderError.new("unknown header flags set")
|
149
|
+
end
|
150
|
+
|
151
|
+
# We don't care about these values, I'm leaving the code for reference
|
152
|
+
# @time = buffer[4..7].unpack("V")[0] # little-endian uint32
|
153
|
+
# @extra_flags = buffer.getbyte(8)
|
154
|
+
# @os = buffer.getbyte(9)
|
155
|
+
|
156
|
+
@state = :extra_length
|
157
|
+
|
158
|
+
when :extra_length
|
159
|
+
if (@flags & 0x04).nonzero?
|
160
|
+
break if !read(2, buffer)
|
161
|
+
@extra_length = buffer.unpack("v")[0] # little-endian uint16
|
162
|
+
@state = :extra
|
163
|
+
else
|
164
|
+
@state = :extra
|
165
|
+
end
|
166
|
+
|
167
|
+
when :extra
|
168
|
+
if (@flags & 0x04).nonzero?
|
169
|
+
break if read(@extra_length, buffer)
|
170
|
+
@state = :name
|
171
|
+
else
|
172
|
+
@state = :name
|
173
|
+
end
|
174
|
+
|
175
|
+
when :name
|
176
|
+
if (@flags & 0x08).nonzero?
|
177
|
+
while !(buffer = readbyte).nil?
|
178
|
+
if buffer == 0
|
179
|
+
@state = :comment
|
180
|
+
break
|
181
|
+
end
|
182
|
+
end
|
183
|
+
else
|
184
|
+
@state = :comment
|
185
|
+
end
|
186
|
+
|
187
|
+
when :comment
|
188
|
+
if (@flags & 0x10).nonzero?
|
189
|
+
while !(buffer = readbyte).nil?
|
190
|
+
if buffer == 0
|
191
|
+
@state = :hcrc
|
192
|
+
break
|
193
|
+
end
|
194
|
+
end
|
195
|
+
else
|
196
|
+
@state = :hcrc
|
197
|
+
end
|
198
|
+
|
199
|
+
when :hcrc
|
200
|
+
if (@flags & 0x02).nonzero?
|
201
|
+
break if !read(2, buffer)
|
202
|
+
@state = :finish
|
203
|
+
else
|
204
|
+
@state = :finish
|
205
|
+
end
|
206
|
+
end
|
207
|
+
end
|
208
|
+
|
209
|
+
if finished?
|
210
|
+
compressed[(@pos - (@data.length - compressed.length))..-1]
|
211
|
+
else
|
212
|
+
""
|
213
|
+
end
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
class GZip < Base
|
218
|
+
def self.encoding_names
|
219
|
+
%w(gzip compressed)
|
220
|
+
end
|
221
|
+
|
222
|
+
def decompress(compressed)
|
223
|
+
@header ||= GZipHeader.new
|
224
|
+
if !@header.finished?
|
225
|
+
compressed = @header.extract_stream(compressed)
|
226
|
+
end
|
227
|
+
|
228
|
+
@zstream ||= Zlib::Inflate.new(-Zlib::MAX_WBITS)
|
229
|
+
@zstream.inflate(compressed)
|
230
|
+
rescue Zlib::Error
|
231
|
+
raise DecoderError
|
232
|
+
end
|
233
|
+
|
234
|
+
def finalize
|
235
|
+
if @zstream
|
236
|
+
if !@zstream.finished?
|
237
|
+
r = @zstream.finish
|
238
|
+
end
|
239
|
+
@zstream.close
|
240
|
+
r
|
241
|
+
else
|
242
|
+
nil
|
243
|
+
end
|
244
|
+
rescue Zlib::Error
|
245
|
+
raise DecoderError
|
246
|
+
end
|
247
|
+
|
248
|
+
end
|
249
|
+
|
250
|
+
DECODERS = [Deflate, GZip]
|
251
|
+
|
252
|
+
end
|
@@ -0,0 +1,51 @@
|
|
1
|
+
module AblyHttpRequest
|
2
|
+
class HttpClientOptions
|
3
|
+
attr_reader :uri, :method, :host, :port
|
4
|
+
attr_reader :headers, :file, :body, :query, :path
|
5
|
+
attr_reader :keepalive, :pass_cookies, :decoding, :compressed
|
6
|
+
|
7
|
+
attr_accessor :followed, :redirects
|
8
|
+
|
9
|
+
def initialize(uri, options, method)
|
10
|
+
@keepalive = options[:keepalive] || false # default to single request per connection
|
11
|
+
@redirects = options[:redirects] ||= 0 # default number of redirects to follow
|
12
|
+
@followed = options[:followed] ||= 0 # keep track of number of followed requests
|
13
|
+
|
14
|
+
@method = method.to_s.upcase
|
15
|
+
@headers = options[:head] || {}
|
16
|
+
|
17
|
+
@file = options[:file]
|
18
|
+
@body = options[:body]
|
19
|
+
|
20
|
+
@pass_cookies = options.fetch(:pass_cookies, true) # pass cookies between redirects
|
21
|
+
@decoding = options.fetch(:decoding, true) # auto-decode compressed response
|
22
|
+
@compressed = options.fetch(:compressed, true) # auto-negotiated compressed response
|
23
|
+
|
24
|
+
set_uri(uri, options[:path], options[:query])
|
25
|
+
end
|
26
|
+
|
27
|
+
def follow_redirect?; @followed < @redirects; end
|
28
|
+
def ssl?; @uri.scheme == "https" || @uri.port == 443; end
|
29
|
+
def no_body?; @method == "HEAD"; end
|
30
|
+
|
31
|
+
def set_uri(uri, path = nil, query = nil)
|
32
|
+
uri = uri.kind_of?(Addressable::URI) ? uri : Addressable::URI::parse(uri.to_s)
|
33
|
+
uri.path = path if path
|
34
|
+
uri.path = '/' if uri.path.empty?
|
35
|
+
|
36
|
+
@uri = uri
|
37
|
+
@path = uri.path
|
38
|
+
@host = uri.hostname
|
39
|
+
@port = uri.port
|
40
|
+
@query = query
|
41
|
+
|
42
|
+
# Make sure the ports are set as Addressable::URI doesn't
|
43
|
+
# set the port if it isn't there
|
44
|
+
if @port.nil?
|
45
|
+
@port = @uri.scheme == "https" ? 443 : 80
|
46
|
+
end
|
47
|
+
|
48
|
+
uri
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
@@ -0,0 +1,408 @@
|
|
1
|
+
require 'em/io_streamer'
|
2
|
+
|
3
|
+
module EventMachine
|
4
|
+
module AblyHttpRequest
|
5
|
+
|
6
|
+
module HTTPMethods
|
7
|
+
def get options = {}, &blk; setup_request(:get, options, &blk); end
|
8
|
+
def head options = {}, &blk; setup_request(:head, options, &blk); end
|
9
|
+
def delete options = {}, &blk; setup_request(:delete, options, &blk); end
|
10
|
+
def put options = {}, &blk; setup_request(:put, options, &blk); end
|
11
|
+
def post options = {}, &blk; setup_request(:post, options, &blk); end
|
12
|
+
def patch options = {}, &blk; setup_request(:patch, options, &blk); end
|
13
|
+
def options options = {}, &blk; setup_request(:options, options, &blk); end
|
14
|
+
end
|
15
|
+
|
16
|
+
class HttpStubConnection < Connection
|
17
|
+
include Deferrable
|
18
|
+
attr_reader :parent
|
19
|
+
|
20
|
+
def parent=(p)
|
21
|
+
@parent = p
|
22
|
+
@parent.conn = self
|
23
|
+
end
|
24
|
+
|
25
|
+
def receive_data(data)
|
26
|
+
begin
|
27
|
+
@parent.receive_data data
|
28
|
+
rescue EventMachine::Connectify::CONNECTError => e
|
29
|
+
@parent.close(e.message)
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
def connection_completed
|
34
|
+
@parent.connection_completed
|
35
|
+
end
|
36
|
+
|
37
|
+
def unbind(reason=nil)
|
38
|
+
@parent.unbind(reason)
|
39
|
+
end
|
40
|
+
|
41
|
+
# TLS verification support, original implementation by Mislav Marohnić
|
42
|
+
# https://github.com/lostisland/faraday/blob/63cf47c95b573539f047c729bd9ad67560bc83ff/lib/faraday/adapter/em_http_ssl_patch.rb
|
43
|
+
#
|
44
|
+
# Updated by Ably, here’s why:
|
45
|
+
#
|
46
|
+
# We noticed that the existing verification mechanism is failing in the
|
47
|
+
# case where the certificate chain presented by the server contains a
|
48
|
+
# certificate that’s signed by an expired trust anchor. At the time of
|
49
|
+
# writing, this is the case with some Let’s Encrypt certificate chains,
|
50
|
+
# which contain a cross-sign by the expired DST Root X3 CA.
|
51
|
+
#
|
52
|
+
# This isn’t meant to be an issue; the certificate chain presented by the
|
53
|
+
# server still contains a certificate that’s a trust anchor in most
|
54
|
+
# modern systems. So in the case where this trust anchor exists, OpenSSL
|
55
|
+
# would instead construct a certification path that goes straight to that
|
56
|
+
# anchor, bypassing the expired certificate.
|
57
|
+
#
|
58
|
+
# Unfortunately, as described in
|
59
|
+
# https://github.com/eventmachine/eventmachine/issues/954#issue-1014842247,
|
60
|
+
# EventMachine misuses OpenSSL in a variety of ways. One of them is that
|
61
|
+
# it does not configure a list of trust anchors, meaning that OpenSSL is
|
62
|
+
# unable to construct the correct certification path in the manner
|
63
|
+
# described above.
|
64
|
+
#
|
65
|
+
# This means that we end up in a degenerate situation where
|
66
|
+
# ssl_verify_peer just receives the certificates in the chain provided by
|
67
|
+
# the peer. In the scenario described above, one of these certificates is
|
68
|
+
# expired and hence the existing verification mechanism, which "verifies"
|
69
|
+
# each certificate provided to ssl_verify_peer, fails.
|
70
|
+
#
|
71
|
+
# So, instead we remove the existing ad-hoc mechanism for verification
|
72
|
+
# (which did things I’m not sure it should have done, like putting
|
73
|
+
# non-trust-anchors into an OpenSSL::X509::Store) and instead employ
|
74
|
+
# OpenSSL (configured to use the system trust store, and hence able to
|
75
|
+
# construct the correct certification path) to do all the hard work of
|
76
|
+
# constructing the certification path and then verifying the peer
|
77
|
+
# certificate. (This is what, in my opinion, EventMachine ideally would
|
78
|
+
# be allowing OpenSSL to do in the first place. Instead, as far as I can
|
79
|
+
# tell, it pushes all of this responsibility onto its users, and then
|
80
|
+
# provides them with an insufficient API for meeting this
|
81
|
+
# responsibility.)
|
82
|
+
def ssl_verify_peer(cert_string)
|
83
|
+
# We use ssl_verify_peer simply as a mechanism for gathering the
|
84
|
+
# certificate chain presented by the peer. In ssl_handshake_completed,
|
85
|
+
# we’ll make use of this information in order to verify the peer.
|
86
|
+
@peer_certificate_chain ||= []
|
87
|
+
begin
|
88
|
+
cert = OpenSSL::X509::Certificate.new(cert_string)
|
89
|
+
@peer_certificate_chain << cert
|
90
|
+
true
|
91
|
+
rescue OpenSSL::X509::CertificateError
|
92
|
+
return false
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
def ssl_handshake_completed
|
97
|
+
# Warning message updated by Ably — the previous message suggested that
|
98
|
+
# when verify_peer is false, the server certificate would be verified
|
99
|
+
# but not checked against the hostname. This is not true — when
|
100
|
+
# verify_peer is false, the server certificate is not verified at all.
|
101
|
+
unless verify_peer?
|
102
|
+
warn "[WARNING; ably-em-http-request] TLS server certificate validation is disabled (use 'tls: {verify_peer: true}'), see" +
|
103
|
+
" CVE-2020-13482 and https://github.com/igrigorik/em-http-request/issues/339 for details" unless parent.connopts.tls.has_key?(:verify_peer)
|
104
|
+
return true
|
105
|
+
end
|
106
|
+
|
107
|
+
# It’s not great to have to perform the server certificate verification
|
108
|
+
# after the handshake has completed, because it means:
|
109
|
+
#
|
110
|
+
# - We have to be sure that we don’t send any data over the TLS
|
111
|
+
# connection until we’ve verified the certificate. Created
|
112
|
+
# https://github.com/ably/ably-ruby/issues/400 to understand whether
|
113
|
+
# there’s anything we need to change to be sure of this.
|
114
|
+
#
|
115
|
+
# - If verification does fail, we have no way of failing the handshake
|
116
|
+
# with a bad_certificate error.
|
117
|
+
#
|
118
|
+
# Unfortunately there doesn’t seem to be a better alternative within
|
119
|
+
# the TLS-related APIs provided to us by EventMachine. (Admittedly I am
|
120
|
+
# not familiar with EventMachine.)
|
121
|
+
#
|
122
|
+
# (Performing the verification post-handshake is not new to the Ably
|
123
|
+
# implementation of certificate verification; the previous
|
124
|
+
# implementation performed hostname verification after the handshake
|
125
|
+
# was complete.)
|
126
|
+
|
127
|
+
# I was quite worried by the description in the aforementioned issue
|
128
|
+
# eventmachine/eventmachine#954 of how EventMachine "ignores all errors
|
129
|
+
# from the chain construction" and hence I don’t know if there is some
|
130
|
+
# weird scenario where, somehow, the calls to ssl_verify_peer terminate
|
131
|
+
# with some intermediate certificate instead of with the certificate of
|
132
|
+
# the server we’re communicating with. (It's quite possible that this
|
133
|
+
# can’t occur and I’m just being paranoid, but I think a bit of
|
134
|
+
# paranoia when it comes to security isn't a bad thing.)
|
135
|
+
#
|
136
|
+
# That's why, instead of the previous code which passed
|
137
|
+
# certificate_store.verify the final certificate received by
|
138
|
+
# ssl_verify_peer, I explicitly use the result of get_peer_cert, to be
|
139
|
+
# sure that the certificate that we’re verifying is the one that the
|
140
|
+
# server has demonstrated that they hold the private key for.
|
141
|
+
server_certificate = OpenSSL::X509::Certificate.new(get_peer_cert)
|
142
|
+
|
143
|
+
# A sense check to confirm my understanding of what’s in @peer_certificate_chain.
|
144
|
+
#
|
145
|
+
# (As mentioned above, unless something has gone very wrong, these two
|
146
|
+
# certificates should be identical.)
|
147
|
+
unless server_certificate.to_der == @peer_certificate_chain.last.to_der
|
148
|
+
raise OpenSSL::SSL::SSLError.new(%(Peer certificate sense check failed for "#{host}"));
|
149
|
+
end
|
150
|
+
|
151
|
+
# Verify the server’s certificate against the default trust anchors,
|
152
|
+
# aided by the intermediate certificates provided by the server.
|
153
|
+
unless create_certificate_store.verify(server_certificate, @peer_certificate_chain[0...-1])
|
154
|
+
raise OpenSSL::SSL::SSLError.new(%(unable to verify the server certificate for "#{host}"))
|
155
|
+
end
|
156
|
+
|
157
|
+
# Verify that the peer’s certificate matches the hostname.
|
158
|
+
unless OpenSSL::SSL.verify_certificate_identity(server_certificate, host)
|
159
|
+
raise OpenSSL::SSL::SSLError.new(%(host "#{host}" does not match the server certificate))
|
160
|
+
else
|
161
|
+
true
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
def verify_peer?
|
166
|
+
parent.connopts.tls[:verify_peer]
|
167
|
+
end
|
168
|
+
|
169
|
+
def host
|
170
|
+
parent.connopts.host
|
171
|
+
end
|
172
|
+
|
173
|
+
def create_certificate_store
|
174
|
+
store = OpenSSL::X509::Store.new
|
175
|
+
store.set_default_paths
|
176
|
+
ca_file = parent.connopts.tls[:cert_chain_file]
|
177
|
+
store.add_file(ca_file) if ca_file
|
178
|
+
store
|
179
|
+
end
|
180
|
+
end
|
181
|
+
|
182
|
+
class HttpConnection
|
183
|
+
include HTTPMethods
|
184
|
+
include Socksify
|
185
|
+
include Connectify
|
186
|
+
|
187
|
+
attr_reader :deferred, :conn
|
188
|
+
attr_accessor :error, :connopts, :uri
|
189
|
+
|
190
|
+
def initialize
|
191
|
+
@deferred = true
|
192
|
+
@middleware = []
|
193
|
+
end
|
194
|
+
|
195
|
+
def conn=(c)
|
196
|
+
@conn = c
|
197
|
+
@deferred = false
|
198
|
+
end
|
199
|
+
|
200
|
+
def activate_connection(client)
|
201
|
+
begin
|
202
|
+
EventMachine.bind_connect(@connopts.bind, @connopts.bind_port,
|
203
|
+
@connopts.host, @connopts.port,
|
204
|
+
HttpStubConnection) do |conn|
|
205
|
+
post_init
|
206
|
+
|
207
|
+
@deferred = false
|
208
|
+
@conn = conn
|
209
|
+
|
210
|
+
conn.parent = self
|
211
|
+
conn.pending_connect_timeout = @connopts.connect_timeout
|
212
|
+
conn.comm_inactivity_timeout = @connopts.inactivity_timeout
|
213
|
+
end
|
214
|
+
|
215
|
+
finalize_request(client)
|
216
|
+
rescue EventMachine::ConnectionError => e
|
217
|
+
#
|
218
|
+
# Currently, this can only fire on initial connection setup
|
219
|
+
# since #connect is a synchronous method. Hence, rescue the exception,
|
220
|
+
# and return a failed deferred which fail any client request at next
|
221
|
+
# tick. We fail at next tick to keep a consistent API when the newly
|
222
|
+
# created HttpClient is failed. This approach has the advantage to
|
223
|
+
# remove a state check of @deferred_status after creating a new
|
224
|
+
# HttpRequest. The drawback is that users may setup a callback which we
|
225
|
+
# know won't be used.
|
226
|
+
#
|
227
|
+
# Once there is async-DNS, then we'll iterate over the outstanding
|
228
|
+
# client requests and fail them in order.
|
229
|
+
#
|
230
|
+
# Net outcome: failed connection will invoke the same ConnectionError
|
231
|
+
# message on the connection deferred, and on the client deferred.
|
232
|
+
#
|
233
|
+
EM.next_tick{client.close(e.message)}
|
234
|
+
end
|
235
|
+
end
|
236
|
+
|
237
|
+
def setup_request(method, options = {}, c = nil)
|
238
|
+
c ||= HttpClient.new(self, ::AblyHttpRequest::HttpClientOptions.new(@uri, options, method))
|
239
|
+
@deferred ? activate_connection(c) : finalize_request(c)
|
240
|
+
c
|
241
|
+
end
|
242
|
+
|
243
|
+
def finalize_request(c)
|
244
|
+
@conn.callback { c.connection_completed }
|
245
|
+
|
246
|
+
middleware.each do |m|
|
247
|
+
c.callback(&m.method(:response)) if m.respond_to?(:response)
|
248
|
+
end
|
249
|
+
|
250
|
+
@clients.push c
|
251
|
+
end
|
252
|
+
|
253
|
+
def middleware
|
254
|
+
[HttpRequest.middleware, @middleware].flatten
|
255
|
+
end
|
256
|
+
|
257
|
+
def post_init
|
258
|
+
@clients = []
|
259
|
+
@pending = []
|
260
|
+
|
261
|
+
@p = Http::Parser.new
|
262
|
+
@p.header_value_type = :mixed
|
263
|
+
@p.on_headers_complete = proc do |h|
|
264
|
+
if client
|
265
|
+
if @p.status_code == 100
|
266
|
+
client.send_request_body
|
267
|
+
@p.reset!
|
268
|
+
else
|
269
|
+
client.parse_response_header(h, @p.http_version, @p.status_code)
|
270
|
+
:reset if client.req.no_body?
|
271
|
+
end
|
272
|
+
else
|
273
|
+
# if we receive unexpected data without a pending client request
|
274
|
+
# reset the parser to avoid firing any further callbacks and close
|
275
|
+
# the connection because we're processing invalid HTTP
|
276
|
+
@p.reset!
|
277
|
+
unbind
|
278
|
+
:stop
|
279
|
+
end
|
280
|
+
end
|
281
|
+
|
282
|
+
@p.on_body = proc do |b|
|
283
|
+
client.on_body_data(b)
|
284
|
+
end
|
285
|
+
|
286
|
+
@p.on_message_complete = proc do
|
287
|
+
if !client.continue?
|
288
|
+
c = @clients.shift
|
289
|
+
c.state = :finished
|
290
|
+
c.on_request_complete
|
291
|
+
end
|
292
|
+
end
|
293
|
+
end
|
294
|
+
|
295
|
+
def use(klass, *args, &block)
|
296
|
+
@middleware << klass.new(*args, &block)
|
297
|
+
end
|
298
|
+
|
299
|
+
def peer
|
300
|
+
Socket.unpack_sockaddr_in(@peer)[1] rescue nil
|
301
|
+
end
|
302
|
+
|
303
|
+
def receive_data(data)
|
304
|
+
begin
|
305
|
+
@p << data
|
306
|
+
rescue HTTP::Parser::Error => e
|
307
|
+
c = @clients.shift
|
308
|
+
c.nil? ? unbind(e.message) : c.on_error(e.message)
|
309
|
+
end
|
310
|
+
end
|
311
|
+
|
312
|
+
def connection_completed
|
313
|
+
@peer = @conn.get_peername
|
314
|
+
|
315
|
+
if @connopts.socks_proxy?
|
316
|
+
socksify(client.req.uri.hostname, client.req.uri.inferred_port, *@connopts.proxy[:authorization]) { start }
|
317
|
+
elsif @connopts.connect_proxy?
|
318
|
+
connectify(client.req.uri.hostname, client.req.uri.inferred_port, *@connopts.proxy[:authorization]) { start }
|
319
|
+
else
|
320
|
+
start
|
321
|
+
end
|
322
|
+
end
|
323
|
+
|
324
|
+
def start
|
325
|
+
@conn.start_tls(@connopts.tls) if client && client.req.ssl?
|
326
|
+
@conn.succeed
|
327
|
+
end
|
328
|
+
|
329
|
+
def redirect(client, new_location)
|
330
|
+
old_location = client.req.uri
|
331
|
+
new_location = client.req.set_uri(new_location)
|
332
|
+
|
333
|
+
if client.req.keepalive
|
334
|
+
# Application requested a keep-alive connection but one of the requests
|
335
|
+
# hits a cross-origin redirect. We need to open a new connection and
|
336
|
+
# let both connections proceed simultaneously.
|
337
|
+
if old_location.origin != new_location.origin
|
338
|
+
conn = HttpConnection.new
|
339
|
+
client.conn = conn
|
340
|
+
conn.connopts = @connopts
|
341
|
+
conn.connopts.https = new_location.scheme == "https"
|
342
|
+
conn.uri = client.req.uri
|
343
|
+
conn.activate_connection(client)
|
344
|
+
|
345
|
+
# If the redirect is a same-origin redirect on a keep-alive request
|
346
|
+
# then immidiately dispatch the request over existing connection.
|
347
|
+
else
|
348
|
+
@clients.push client
|
349
|
+
client.connection_completed
|
350
|
+
end
|
351
|
+
else
|
352
|
+
# If connection is not keep-alive the unbind will fire and we'll
|
353
|
+
# reconnect using the same connection object.
|
354
|
+
@pending.push client
|
355
|
+
end
|
356
|
+
end
|
357
|
+
|
358
|
+
def unbind(reason = nil)
|
359
|
+
@clients.map { |c| c.unbind(reason) }
|
360
|
+
|
361
|
+
if r = @pending.shift
|
362
|
+
@clients.push r
|
363
|
+
|
364
|
+
r.reset!
|
365
|
+
@p.reset!
|
366
|
+
|
367
|
+
begin
|
368
|
+
@conn.set_deferred_status :unknown
|
369
|
+
|
370
|
+
if @connopts.proxy
|
371
|
+
@conn.reconnect(@connopts.host, @connopts.port)
|
372
|
+
else
|
373
|
+
@conn.reconnect(r.req.host, r.req.port)
|
374
|
+
end
|
375
|
+
|
376
|
+
@conn.pending_connect_timeout = @connopts.connect_timeout
|
377
|
+
@conn.comm_inactivity_timeout = @connopts.inactivity_timeout
|
378
|
+
@conn.callback { r.connection_completed }
|
379
|
+
rescue EventMachine::ConnectionError => e
|
380
|
+
@clients.pop.close(e.message)
|
381
|
+
end
|
382
|
+
else
|
383
|
+
@deferred = true
|
384
|
+
@conn.close_connection
|
385
|
+
end
|
386
|
+
end
|
387
|
+
alias :close :unbind
|
388
|
+
|
389
|
+
def send_data(data)
|
390
|
+
@conn.send_data data
|
391
|
+
end
|
392
|
+
|
393
|
+
def stream_file_data(filename, args = {})
|
394
|
+
@conn.stream_file_data filename, args
|
395
|
+
end
|
396
|
+
|
397
|
+
def stream_data(io, opts = {})
|
398
|
+
EventMachine::AblyHttpRequest::IOStreamer.new(self, io, opts)
|
399
|
+
end
|
400
|
+
|
401
|
+
private
|
402
|
+
|
403
|
+
def client
|
404
|
+
@clients.first
|
405
|
+
end
|
406
|
+
end
|
407
|
+
end
|
408
|
+
end
|