zkruby 3.4.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data.tar.gz.sig +0 -0
- data/.gemtest +0 -0
- data/History.txt +18 -0
- data/Manifest.txt +39 -0
- data/README.rdoc +119 -0
- data/Rakefile +39 -0
- data/jute/jute.citrus +105 -0
- data/jute/lib/hoe/jute.rb +56 -0
- data/jute/lib/jute.rb +120 -0
- data/lib/em_zkruby.rb +4 -0
- data/lib/jute/zookeeper.rb +203 -0
- data/lib/zkruby.rb +4 -0
- data/lib/zkruby/bindata.rb +45 -0
- data/lib/zkruby/client.rb +608 -0
- data/lib/zkruby/enum.rb +108 -0
- data/lib/zkruby/eventmachine.rb +186 -0
- data/lib/zkruby/multi.rb +47 -0
- data/lib/zkruby/protocol.rb +182 -0
- data/lib/zkruby/rubyio.rb +310 -0
- data/lib/zkruby/session.rb +445 -0
- data/lib/zkruby/util.rb +141 -0
- data/lib/zkruby/zkruby.rb +27 -0
- data/spec/bindata_spec.rb +51 -0
- data/spec/enum_spec.rb +84 -0
- data/spec/eventmachine_spec.rb +50 -0
- data/spec/multi_spec.rb +93 -0
- data/spec/protocol_spec.rb +72 -0
- data/spec/recipe_helper.rb +68 -0
- data/spec/rubyio_spec.rb +8 -0
- data/spec/sequences_spec.rb +19 -0
- data/spec/server_helper.rb +45 -0
- data/spec/shared/auth.rb +40 -0
- data/spec/shared/basic.rb +180 -0
- data/spec/shared/binding.rb +33 -0
- data/spec/shared/chroot.rb +61 -0
- data/spec/shared/multi.rb +38 -0
- data/spec/shared/util.rb +56 -0
- data/spec/shared/watch.rb +49 -0
- data/spec/spec_helper.rb +12 -0
- data/src/jute/zookeeper.jute +288 -0
- data/yard_ext/enum_handler.rb +16 -0
- metadata +243 -0
- metadata.gz.sig +0 -0
@@ -0,0 +1,310 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require 'thread'
|
3
|
+
require 'monitor'
|
4
|
+
|
5
|
+
# Binding over standard ruby sockets
|
6
|
+
#
|
7
|
+
# Manages 3 threads per zookeeper session
|
8
|
+
#
|
9
|
+
# Read thread
|
10
|
+
# manages connecting to and reading from the tcp socket. Uses non blocking io to manage timeouts
|
11
|
+
# and initiate the required ping requests.
|
12
|
+
#
|
13
|
+
# Write thread
|
14
|
+
# each new connection spawns a new thread. Requests coming from the session in response
|
15
|
+
# to multiple threads are written to a blocking queue. While the connection is alive
|
16
|
+
# this thread reads from the queue and writes to the socket, all in blocking fashion
|
17
|
+
# TODO: Is it really ok to do a non-blocking read during a blocking write?
|
18
|
+
#
|
19
|
+
# Event thread
|
20
|
+
# All response and watch callbacks are put on another blocking queue to be read and executed
|
21
|
+
# by this thread.
|
22
|
+
#
|
23
|
+
# All interaction with the session is synchronized
|
24
|
+
#
|
25
|
+
# Client synchronous code is implemented with a condition variable that waits on the callback/errback
|
26
|
+
module ZooKeeper::RubyIO
|
27
|
+
|
28
|
+
class Connection
|
29
|
+
include ZooKeeper::Protocol
|
30
|
+
include Slf4r::Logger
|
31
|
+
include Socket::Constants
|
32
|
+
|
33
|
+
def initialize(host,port,timeout,session)
|
34
|
+
@session = session
|
35
|
+
@write_queue = Queue.new()
|
36
|
+
|
37
|
+
# JRuby cannot do non-blocking connects, which means there is
|
38
|
+
# no way to properly implement the connection-timeout
|
39
|
+
# See http://jira.codehaus.org/browse/JRUBY-5165
|
40
|
+
# In any case this should be encapsulated in TCPSocket.open(host,port,timeout)
|
41
|
+
if RUBY_PLATFORM == "java"
|
42
|
+
begin
|
43
|
+
sock = TCPSocket.new(host,port.to_i)
|
44
|
+
rescue Errno::ECONNREFUSED
|
45
|
+
logger.warn("TCP Connection refused to #{host}:#{port}")
|
46
|
+
sock = nil
|
47
|
+
end
|
48
|
+
else
|
49
|
+
addr = Socket.getaddrinfo(host, nil)
|
50
|
+
sock = Socket.new(Socket.const_get(addr[0][0]), Socket::SOCK_STREAM, 0)
|
51
|
+
sock.setsockopt(SOL_SOCKET, SO_LINGER, [0,-1].pack("ii"))
|
52
|
+
sock.setsockopt(SOL_TCP, TCP_NODELAY,[0].pack("i_"))
|
53
|
+
sockaddr = Socket.pack_sockaddr_in(port, addr[0][3])
|
54
|
+
begin
|
55
|
+
sock.connect_nonblock(sockaddr)
|
56
|
+
rescue Errno::EINPROGRESS
|
57
|
+
resp = IO.select(nil, [sock], nil, timeout)
|
58
|
+
begin
|
59
|
+
sock.connect_nonblock(sockaddr)
|
60
|
+
rescue Errno::ECONNREFUSED
|
61
|
+
logger.warn("Connection refused to #{ host }:#{ port }")
|
62
|
+
sock = nil
|
63
|
+
rescue Errno::EISCONN
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
@socket = sock
|
68
|
+
Thread.new(sock) { |sock| write_loop(sock) } if sock
|
69
|
+
end
|
70
|
+
|
71
|
+
# This is called from random client threads, but only within
|
72
|
+
# a @session.synchronized() block
|
73
|
+
def send_data(data)
|
74
|
+
@write_queue.push(data)
|
75
|
+
end
|
76
|
+
|
77
|
+
# Since this runs in its very own thread
|
78
|
+
# we can use boring blocking IO
|
79
|
+
def write_loop(socket)
|
80
|
+
Thread.current[:name] = "ZooKeeper::RubyIO::WriteLoop"
|
81
|
+
begin
|
82
|
+
while socket
|
83
|
+
data = @write_queue.pop()
|
84
|
+
if socket.write(data) != data.length()
|
85
|
+
#TODO - will this really ever happen
|
86
|
+
logger.warn("Incomplete write!")
|
87
|
+
end
|
88
|
+
logger.debug { "Sending: " + data.unpack("H*")[0] }
|
89
|
+
end
|
90
|
+
rescue Exception => ex
|
91
|
+
logger.warn("Exception in write loop",ex)
|
92
|
+
disconnect()
|
93
|
+
end
|
94
|
+
|
95
|
+
end
|
96
|
+
|
97
|
+
def read_loop()
|
98
|
+
socket = @socket
|
99
|
+
ping = 0
|
100
|
+
while socket # effectively forever
|
101
|
+
begin
|
102
|
+
data = socket.read_nonblock(1024)
|
103
|
+
logger.debug { "Received (#{data.length})" + data.unpack("H*")[0] }
|
104
|
+
receive_data(data)
|
105
|
+
ping = 0
|
106
|
+
rescue IO::WaitReadable
|
107
|
+
select_result = IO.select([socket],[],[],@session.ping_interval)
|
108
|
+
unless select_result
|
109
|
+
ping += 1
|
110
|
+
# two timeouts in a row mean we need to send a ping
|
111
|
+
case ping
|
112
|
+
when 1 ; @session.synchronize { @session.ping() }
|
113
|
+
when 2
|
114
|
+
logger.debug{"No response to ping in #{@session.ping_interval}*2"}
|
115
|
+
break
|
116
|
+
end
|
117
|
+
end
|
118
|
+
rescue EOFError
|
119
|
+
logger.debug { "EOF reading from socket" }
|
120
|
+
break
|
121
|
+
rescue Exception => ex
|
122
|
+
logger.warn( "#{ex.class} exception in readloop",ex )
|
123
|
+
break
|
124
|
+
end
|
125
|
+
end
|
126
|
+
disconnect()
|
127
|
+
end
|
128
|
+
|
129
|
+
def disconnect()
|
130
|
+
socket = @socket
|
131
|
+
@socket = nil
|
132
|
+
socket.close if socket
|
133
|
+
rescue Exception => ex
|
134
|
+
#oh well
|
135
|
+
logger.debug("Exception closing socket",ex)
|
136
|
+
end
|
137
|
+
|
138
|
+
# Protocol requirement
|
139
|
+
def receive_records(packet_io)
|
140
|
+
@session.synchronize { @session.receive_records(packet_io) }
|
141
|
+
end
|
142
|
+
|
143
|
+
end #Class connection
|
144
|
+
|
145
|
+
class Binding
|
146
|
+
include Slf4r::Logger
|
147
|
+
attr_reader :session
|
148
|
+
def self.available?
|
149
|
+
true
|
150
|
+
end
|
151
|
+
|
152
|
+
def self.context(&context_block)
|
153
|
+
yield Thread
|
154
|
+
end
|
155
|
+
|
156
|
+
def initialize()
|
157
|
+
@event_queue = Queue.new()
|
158
|
+
end
|
159
|
+
|
160
|
+
def pop_event_queue()
|
161
|
+
queued = @event_queue.pop()
|
162
|
+
return false unless queued
|
163
|
+
logger.debug { "Invoking #{queued[0]}" }
|
164
|
+
callback,*args = queued
|
165
|
+
callback.call(*args)
|
166
|
+
logger.debug { "Completed #{queued[0]}" }
|
167
|
+
return true
|
168
|
+
rescue Exception => ex
|
169
|
+
logger.warn( "Exception in event thread", ex )
|
170
|
+
return true
|
171
|
+
end
|
172
|
+
|
173
|
+
def start(client,session)
|
174
|
+
@session = session
|
175
|
+
@session.extend(MonitorMixin)
|
176
|
+
|
177
|
+
# start the event thread
|
178
|
+
@event_thread = Thread.new() do
|
179
|
+
Thread.current[:name] = "ZooKeeper::RubyIO::EventLoop"
|
180
|
+
|
181
|
+
# In this thread, the current client is always this client!
|
182
|
+
Thread.current[ZooKeeper::CURRENT] = [client]
|
183
|
+
loop do
|
184
|
+
break unless pop_event_queue()
|
185
|
+
end
|
186
|
+
end
|
187
|
+
|
188
|
+
# and the read thread
|
189
|
+
Thread.new() do
|
190
|
+
begin
|
191
|
+
Thread.current[:name] = "ZooKeeper::RubyIO::ReadLoop"
|
192
|
+
conn = session.synchronize { session.start(); session.conn() } # will invoke connect
|
193
|
+
loop do
|
194
|
+
break unless conn
|
195
|
+
conn.read_loop()
|
196
|
+
conn = session.synchronize { session.disconnected(); session.conn() }
|
197
|
+
end
|
198
|
+
#event of death
|
199
|
+
logger.debug("Pushing nil (event of death) to event queue")
|
200
|
+
@event_queue.push(nil)
|
201
|
+
rescue Exception => ex
|
202
|
+
logger.error( "Exception in session thread", ex )
|
203
|
+
end
|
204
|
+
end
|
205
|
+
end
|
206
|
+
|
207
|
+
# session callback, IO thread
|
208
|
+
def connect(host,port,delay,timeout)
|
209
|
+
sleep(delay)
|
210
|
+
conn = Connection.new(host,port,timeout,session)
|
211
|
+
session.synchronize() { session.prime_connection(conn) }
|
212
|
+
end
|
213
|
+
|
214
|
+
|
215
|
+
def close(&callback)
|
216
|
+
op = AsyncOp.new(self,&callback)
|
217
|
+
|
218
|
+
session.synchronize do
|
219
|
+
session.close() do |error,response|
|
220
|
+
op.resume(error,response)
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
224
|
+
op
|
225
|
+
|
226
|
+
end
|
227
|
+
|
228
|
+
def queue_request(*args,&callback)
|
229
|
+
|
230
|
+
op = AsyncOp.new(self,&callback)
|
231
|
+
|
232
|
+
session.synchronize do
|
233
|
+
session.queue_request(*args) do |error,response|
|
234
|
+
op.resume(error,response)
|
235
|
+
end
|
236
|
+
end
|
237
|
+
|
238
|
+
op
|
239
|
+
end
|
240
|
+
|
241
|
+
def event_thread?
|
242
|
+
Thread.current.equal?(@event_thread)
|
243
|
+
end
|
244
|
+
|
245
|
+
def invoke(*args)
|
246
|
+
@event_queue.push(args)
|
247
|
+
end
|
248
|
+
|
249
|
+
end #Binding
|
250
|
+
|
251
|
+
class AsyncOp < ::ZooKeeper::AsyncOp
|
252
|
+
|
253
|
+
def initialize(binding,&callback)
|
254
|
+
@mutex = Monitor.new
|
255
|
+
@cv = @mutex.new_cond()
|
256
|
+
@callback = callback
|
257
|
+
@rubyio = binding
|
258
|
+
end
|
259
|
+
|
260
|
+
def resume(error,response)
|
261
|
+
mutex.synchronize do
|
262
|
+
@error = error
|
263
|
+
@result = nil
|
264
|
+
begin
|
265
|
+
@result = @callback.call(response) unless error
|
266
|
+
rescue StandardError => ex
|
267
|
+
@error = ex
|
268
|
+
end
|
269
|
+
|
270
|
+
if @error && @errback
|
271
|
+
begin
|
272
|
+
@result = @errback.call(@error)
|
273
|
+
@error = nil
|
274
|
+
rescue StandardError => ex
|
275
|
+
@error = ex
|
276
|
+
end
|
277
|
+
end
|
278
|
+
|
279
|
+
cv.signal()
|
280
|
+
end
|
281
|
+
end
|
282
|
+
|
283
|
+
private
|
284
|
+
attr_reader :mutex, :cv, :error, :result
|
285
|
+
|
286
|
+
def set_error_handler(errback)
|
287
|
+
@errback = errback
|
288
|
+
end
|
289
|
+
|
290
|
+
def wait_value()
|
291
|
+
if @rubyio.event_thread?
|
292
|
+
#Waiting in the event thread (eg made a synchronous call inside a callback)
|
293
|
+
#Keep processing events until we are resumed
|
294
|
+
until defined?(@result)
|
295
|
+
break unless @rubyio.pop_event_queue()
|
296
|
+
end
|
297
|
+
else
|
298
|
+
mutex.synchronize do
|
299
|
+
cv.wait()
|
300
|
+
end
|
301
|
+
end
|
302
|
+
|
303
|
+
raise error if error
|
304
|
+
result
|
305
|
+
end
|
306
|
+
end
|
307
|
+
|
308
|
+
end #ZooKeeper::RubyIO
|
309
|
+
# Add our binding
|
310
|
+
ZooKeeper.add_binding(ZooKeeper::RubyIO::Binding)
|
@@ -0,0 +1,445 @@
|
|
1
|
+
require 'set'
|
2
|
+
module ZooKeeper
|
3
|
+
|
4
|
+
# Represents an session that may span connections
|
5
|
+
class Session
|
6
|
+
|
7
|
+
DEFAULT_TIMEOUT = 4
|
8
|
+
DEFAULT_CONNECT_DELAY = 0.2
|
9
|
+
DEFAULT_PORT = 2181
|
10
|
+
|
11
|
+
include Slf4r::Logger
|
12
|
+
|
13
|
+
attr_reader :ping_interval
|
14
|
+
attr_reader :ping_logger
|
15
|
+
attr_reader :timeout
|
16
|
+
attr_reader :conn
|
17
|
+
attr_accessor :watcher
|
18
|
+
|
19
|
+
def initialize(binding,addresses,options=nil)
|
20
|
+
|
21
|
+
@binding = binding
|
22
|
+
|
23
|
+
@addresses = parse_addresses(addresses)
|
24
|
+
parse_options(options)
|
25
|
+
|
26
|
+
# These are the server states
|
27
|
+
# :disconnected, :connected, :auth_failed, :expired
|
28
|
+
@keeper_state = nil
|
29
|
+
|
30
|
+
# Client state is
|
31
|
+
# :ready, :closing, :closed
|
32
|
+
@client_state = :ready
|
33
|
+
|
34
|
+
@xid=0
|
35
|
+
@pending_queue = []
|
36
|
+
|
37
|
+
# Create the watch list
|
38
|
+
# hash by watch type of hashes by path of set of watchers
|
39
|
+
@watches = [ :children, :data, :exists ].inject({}) do |ws,wtype|
|
40
|
+
ws[wtype] = Hash.new() { |h,k| h[k] = Set.new() }
|
41
|
+
ws
|
42
|
+
end
|
43
|
+
|
44
|
+
@watcher = nil
|
45
|
+
|
46
|
+
@ping_logger = Slf4r::LoggerFacade.new("ZooKeeper::Session::Ping")
|
47
|
+
|
48
|
+
end
|
49
|
+
|
50
|
+
def chroot(path)
|
51
|
+
return @chroot + path
|
52
|
+
end
|
53
|
+
|
54
|
+
def unchroot(path)
|
55
|
+
return path unless path
|
56
|
+
path.slice(@chroot.length..-1)
|
57
|
+
end
|
58
|
+
|
59
|
+
# close won't run your block if the connection is
|
60
|
+
# already closed, so this is how you can check
|
61
|
+
def closed?
|
62
|
+
@client_state == :closed
|
63
|
+
end
|
64
|
+
|
65
|
+
# Connection API - testing whether to send a ping
|
66
|
+
def connected?()
|
67
|
+
@keeper_state == :connected
|
68
|
+
end
|
69
|
+
|
70
|
+
# Connection API - Injects a new connection that is ready to receive records
|
71
|
+
# @param conn that responds to #send_records(record...) and #disconnect()
|
72
|
+
def prime_connection(conn)
|
73
|
+
@conn = conn
|
74
|
+
send_session_connect()
|
75
|
+
send_auth_data()
|
76
|
+
reset_watches()
|
77
|
+
end
|
78
|
+
|
79
|
+
|
80
|
+
# Connection API - called when data is available, reads and processes one packet/event
|
81
|
+
# @param io <IO>
|
82
|
+
def receive_records(io)
|
83
|
+
case @keeper_state
|
84
|
+
when :disconnected
|
85
|
+
complete_connection(io)
|
86
|
+
when :connected
|
87
|
+
process_reply(io)
|
88
|
+
else
|
89
|
+
logger.warn { "Receive packet for closed session #{@keeper_state}" }
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
# Connection API - called when no data has been received for #ping_interval
|
94
|
+
def ping()
|
95
|
+
if @keeper_state == :connected
|
96
|
+
ping_logger.debug { "Ping send" }
|
97
|
+
hdr = Proto::RequestHeader.new(:xid => -2, :_type => 11)
|
98
|
+
conn.send_records(hdr)
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
# Connection API - called when the connection has dropped from either end
|
103
|
+
def disconnected()
|
104
|
+
@conn = nil
|
105
|
+
logger.info { "Disconnected id=#{@session_id}, keeper=:#{@keeper_state}, client=:#{@client_state}" }
|
106
|
+
|
107
|
+
# We keep trying to reconnect until the session expiration time is reached
|
108
|
+
@disconnect_time = Time.now if @keeper_state == :connected
|
109
|
+
time_since_first_disconnect = (Time.now - @disconnect_time)
|
110
|
+
|
111
|
+
if @client_state == :closed || time_since_first_disconnect > timeout
|
112
|
+
session_expired()
|
113
|
+
else
|
114
|
+
# if we are connected then everything in the pending queue has been sent so
|
115
|
+
# we must clear
|
116
|
+
# if not, then we'll keep them and hope the next reconnect works
|
117
|
+
if @keeper_state == :connected
|
118
|
+
clear_pending_queue(:disconnected)
|
119
|
+
invoke_watch(@watcher,KeeperState::DISCONNECTED,nil,WatchEvent::NONE) if @watcher
|
120
|
+
end
|
121
|
+
@keeper_state = :disconnected
|
122
|
+
reconnect()
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
# Start the session - called by the ProtocolBinding
|
127
|
+
def start()
|
128
|
+
raise ProtocolError, "Already started!" unless @keeper_state.nil?
|
129
|
+
@keeper_state = :disconnected
|
130
|
+
@disconnect_time = Time.now
|
131
|
+
logger.debug("Starting new zookeeper client session")
|
132
|
+
reconnect()
|
133
|
+
end
|
134
|
+
|
135
|
+
def queue_request(request,op,opcode,response=nil,watch_type=nil,watcher=nil,ptype=Packet,&callback)
|
136
|
+
raise Error.SESSION_EXPIRED, "Session expired due to client state #{@client_state}" unless @client_state == :ready
|
137
|
+
watch_type, watcher = resolve_watcher(watch_type,watcher)
|
138
|
+
|
139
|
+
xid = next_xid
|
140
|
+
|
141
|
+
packet = ptype.new(xid,op,opcode,request,response,watch_type,watcher, callback)
|
142
|
+
|
143
|
+
queue_packet(packet)
|
144
|
+
end
|
145
|
+
|
146
|
+
def close(&callback)
|
147
|
+
case @client_state
|
148
|
+
when :ready
|
149
|
+
# we keep the requested block in a close packet
|
150
|
+
@close_packet = ClosePacket.new(next_xid(),:close,-11,nil,nil,nil,nil,callback)
|
151
|
+
close_packet = @close_packet
|
152
|
+
@client_state = :closing
|
153
|
+
|
154
|
+
# If there are other requests in flight, then we wait for them to finish
|
155
|
+
# before sending the close packet since it immediately causes the socket
|
156
|
+
# to close.
|
157
|
+
queue_close_packet_if_necessary()
|
158
|
+
@close_packet
|
159
|
+
when :closed, :closing
|
160
|
+
raise ProtocolError, "Already closed"
|
161
|
+
else
|
162
|
+
raise ProtocolError, "Unexpected state #{@client_state}"
|
163
|
+
end
|
164
|
+
end
|
165
|
+
|
166
|
+
private
|
167
|
+
attr_reader :watches
|
168
|
+
attr_reader :binding
|
169
|
+
|
170
|
+
def parse_addresses(addresses)
|
171
|
+
case addresses
|
172
|
+
when String
|
173
|
+
parse_addresses(addresses.split(","))
|
174
|
+
when Array
|
175
|
+
result = addresses.collect() { |addr| parse_address(addr) }
|
176
|
+
#Randomise the connection order
|
177
|
+
result.shuffle!
|
178
|
+
else
|
179
|
+
raise ArgumentError "Not able to parse addresses from #{addresses}"
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
def parse_address(address)
|
184
|
+
case address
|
185
|
+
when String
|
186
|
+
host,port = address.split(":")
|
187
|
+
port = DEFAULT_PORT unless port
|
188
|
+
[host,port]
|
189
|
+
when Array
|
190
|
+
address[0..1]
|
191
|
+
end
|
192
|
+
end
|
193
|
+
|
194
|
+
def parse_options(options)
|
195
|
+
@timeout = options.fetch(:timeout,DEFAULT_TIMEOUT)
|
196
|
+
@max_connect_delay = options.fetch(:connect_delay,DEFAULT_CONNECT_DELAY)
|
197
|
+
@connect_timeout = options.fetch(:connect_timeout,@timeout * 1.0 / 7.0)
|
198
|
+
@scheme = options.fetch(:scheme,nil)
|
199
|
+
@auth = options.fetch(:auth,nil)
|
200
|
+
@chroot = options.fetch(:chroot,"").chomp("/")
|
201
|
+
end
|
202
|
+
|
203
|
+
def reconnect()
|
204
|
+
|
205
|
+
#Rotate address
|
206
|
+
host,port = @addresses.shift
|
207
|
+
@addresses.push([host,port])
|
208
|
+
|
209
|
+
delay = rand() * @max_connect_delay
|
210
|
+
|
211
|
+
logger.debug { "Connecting id=#{@session_id} to #{host}:#{port} with delay=#{delay}, timeout=#{@connect_timeout}" }
|
212
|
+
binding.connect(host,port,delay,@connect_timeout)
|
213
|
+
end
|
214
|
+
|
215
|
+
|
216
|
+
def session_expired(reason=:expired)
|
217
|
+
clear_pending_queue(reason)
|
218
|
+
|
219
|
+
invoke_response(*@close_packet.error(reason)) if @close_packet
|
220
|
+
|
221
|
+
if @client_state == :closed
|
222
|
+
logger.info { "Session closed id=#{@session_id}, keeper=:#{@keeper_state}, client=:#{@client_state}" }
|
223
|
+
else
|
224
|
+
logger.warn { "Session expired id=#{@session_id}, keeper=:#{@keeper_state}, client=:#{@client_state}" }
|
225
|
+
end
|
226
|
+
|
227
|
+
invoke_watch(@watcher,KeeperState::EXPIRED,nil,WatchEvent::NONE) if @watcher
|
228
|
+
@keeper_state = reason
|
229
|
+
@client_state = :closed
|
230
|
+
end
|
231
|
+
|
232
|
+
def complete_connection(response)
|
233
|
+
result = Proto::ConnectResponse.read(response)
|
234
|
+
if (result.time_out <= 0)
|
235
|
+
#We're dead!
|
236
|
+
session_expired()
|
237
|
+
else
|
238
|
+
@timeout = result.time_out.to_f / 1000.0
|
239
|
+
@keeper_state = :connected
|
240
|
+
|
241
|
+
# Why 2 / 7 of the timeout?. If a binding sees no server response in this period it is required to
|
242
|
+
# generate a ping request
|
243
|
+
# if 2 periods go by without activity it is required to disconnect
|
244
|
+
# so we are already more than half way through the session timeout
|
245
|
+
# and we need to give ourselves time to reconnect to another server
|
246
|
+
@ping_interval = @timeout * 2.0 / 7.0
|
247
|
+
@session_id = result.session_id
|
248
|
+
@session_passwd = result.passwd
|
249
|
+
logger.info { "Connected session_id=#{@session_id}, timeout=#{@time_out}, ping=#{@ping_interval}" }
|
250
|
+
|
251
|
+
logger.debug { "Sending #{@pending_queue.length} queued packets" }
|
252
|
+
@pending_queue.each { |p| send_packet(p) }
|
253
|
+
|
254
|
+
queue_close_packet_if_necessary()
|
255
|
+
invoke_watch(@watcher,KeeperState::CONNECTED,nil,WatchEvent::NONE) if @watcher
|
256
|
+
end
|
257
|
+
end
|
258
|
+
|
259
|
+
def send_session_connect()
|
260
|
+
req = Proto::ConnectRequest.new( :timeout => timeout )
|
261
|
+
req.last_zxid_seen = @last_zxid_seen if @last_zxid_seen
|
262
|
+
req.session_id = @session_id if @session_id
|
263
|
+
req.passwd = @session_passwd if @session_passwd
|
264
|
+
|
265
|
+
conn.send_records(req)
|
266
|
+
end
|
267
|
+
|
268
|
+
def send_auth_data()
|
269
|
+
if @scheme
|
270
|
+
req = Proto::AuthPacket.new(:scheme => @scheme, :auth => @auth)
|
271
|
+
packet = Packet.new(-4,:auth,100,req,nil,nil,nil,nil)
|
272
|
+
send_packet(packet)
|
273
|
+
end
|
274
|
+
end
|
275
|
+
# Watches are dropped on disconnect, we reset them here
|
276
|
+
# dropping connections can be a good way of cleaning up on the server side
|
277
|
+
# #TODO If watch reset is disabled the watches will be notified of connection loss
|
278
|
+
# otherwise they will be seemlessly re-added
|
279
|
+
# This way a watch is only ever triggered exactly once
|
280
|
+
def reset_watches()
|
281
|
+
unless watches[:children].empty? && watches[:data].empty? && watches[:exists].empty?
|
282
|
+
req = Proto::SetWatches.new()
|
283
|
+
req.relative_zxid = @last_zxid_seen
|
284
|
+
req.data_watches = watches[:data].keys
|
285
|
+
req.child_watches = watches[:children].keys
|
286
|
+
req.exist_watches = watches[:exists].keys
|
287
|
+
|
288
|
+
packet = Packet.new(-8,:set_watches,101,req,nil,nil,nil,nil)
|
289
|
+
send_packet(packet)
|
290
|
+
end
|
291
|
+
end
|
292
|
+
|
293
|
+
def process_reply(packet_io)
|
294
|
+
header = Proto::ReplyHeader.read(packet_io)
|
295
|
+
|
296
|
+
case header.xid.to_i
|
297
|
+
when -2
|
298
|
+
ping_logger.debug { "Ping reply" }
|
299
|
+
when -4
|
300
|
+
logger.debug { "Auth reply" }
|
301
|
+
session_expired(:auth_failed) unless header.err.to_i == 0
|
302
|
+
when -1
|
303
|
+
#Watch notification
|
304
|
+
event = Proto::WatcherEvent.read(packet_io)
|
305
|
+
logger.debug { "Watch notification #{event.inspect} " }
|
306
|
+
process_watch_notification(event.state.to_i,event.path,event._type.to_i)
|
307
|
+
when -8
|
308
|
+
#Reset watch reply
|
309
|
+
logger.debug { "SetWatch reply"}
|
310
|
+
#TODO If error, send :disconnected to all watches
|
311
|
+
else
|
312
|
+
# A normal packet reply. They should come in the order we sent them
|
313
|
+
# so we just match it to the packet at the front of the queue
|
314
|
+
packet = @pending_queue.shift
|
315
|
+
logger.debug { "Packet reply: #{packet.inspect}" }
|
316
|
+
|
317
|
+
if (packet.xid.to_i != header.xid.to_i)
|
318
|
+
|
319
|
+
logger.error { "Bad XID! expected=#{packet.xid}, received=#{header.xid}" }
|
320
|
+
|
321
|
+
# Treat this like a dropped connection, and then force the connection
|
322
|
+
# to be dropped. But wait for the connection to notify us before
|
323
|
+
# we actually update our keeper_state
|
324
|
+
invoke_response(*packet.error(:disconnected))
|
325
|
+
@conn.disconnect()
|
326
|
+
else
|
327
|
+
|
328
|
+
@last_zxid_seen = header.zxid
|
329
|
+
|
330
|
+
callback, error, response, watch_type = packet.result(header.err.to_i)
|
331
|
+
invoke_response(callback, error, response, packet_io)
|
332
|
+
|
333
|
+
if (watch_type)
|
334
|
+
@watches[watch_type][packet.path] << packet.watcher
|
335
|
+
logger.debug { "Registered #{packet.watcher} for type=#{watch_type} at #{packet.path}" }
|
336
|
+
end
|
337
|
+
queue_close_packet_if_necessary()
|
338
|
+
end
|
339
|
+
end
|
340
|
+
end
|
341
|
+
|
342
|
+
|
343
|
+
def process_watch_notification(state,path,event)
|
344
|
+
|
345
|
+
watch_event = WatchEvent.fetch(event)
|
346
|
+
watch_types = watch_event.watch_types()
|
347
|
+
|
348
|
+
keeper_state = KeeperState.fetch(state)
|
349
|
+
|
350
|
+
watches = watch_types.inject(Set.new()) do | result, watch_type |
|
351
|
+
more_watches = @watches[watch_type].delete(path)
|
352
|
+
result.merge(more_watches) if more_watches
|
353
|
+
result
|
354
|
+
end
|
355
|
+
|
356
|
+
if watches.empty?
|
357
|
+
logger.warn { "Received notification for unregistered watch #{state} #{path} #{event}" }
|
358
|
+
end
|
359
|
+
watches.each { | watch | invoke_watch(watch,keeper_state,path,watch_event) }
|
360
|
+
|
361
|
+
end
|
362
|
+
|
363
|
+
def invoke_watch(watch,state,path,event)
|
364
|
+
logger.debug { "Watch #{watch} triggered with #{state}, #{path}. #{event}" }
|
365
|
+
if watch.respond_to?(:process_watch)
|
366
|
+
callback = Proc.new() { |state,path,event| watch.process_watch(state,path,event) }
|
367
|
+
elsif watch.respond_to?(:call)
|
368
|
+
callback = watch
|
369
|
+
else
|
370
|
+
raise ProtocolError("Bad watcher #{watch}")
|
371
|
+
end
|
372
|
+
|
373
|
+
binding.invoke(callback,state,unchroot(path),event)
|
374
|
+
end
|
375
|
+
|
376
|
+
def clear_pending_queue(reason)
|
377
|
+
@pending_queue.each { |p| invoke_response(*p.error(reason)) }
|
378
|
+
@pending_queue.clear
|
379
|
+
end
|
380
|
+
|
381
|
+
def queue_close_packet_if_necessary
|
382
|
+
if @pending_queue.empty? && @keeper_state == :connected && @close_packet
|
383
|
+
logger.debug { "Sending close packet!" }
|
384
|
+
@client_state = :closed
|
385
|
+
queue_packet(@close_packet)
|
386
|
+
@close_packet = nil
|
387
|
+
end
|
388
|
+
end
|
389
|
+
|
390
|
+
def invoke_response(callback,error,response,packet_io = nil)
|
391
|
+
if callback
|
392
|
+
|
393
|
+
result = if error
|
394
|
+
nil
|
395
|
+
elsif response.respond_to?(:read) && packet_io
|
396
|
+
response.read(packet_io)
|
397
|
+
elsif response
|
398
|
+
response
|
399
|
+
else
|
400
|
+
nil
|
401
|
+
end
|
402
|
+
|
403
|
+
logger.debug { "Invoking response cb=#{callback} err=#{error} resp=#{result}" }
|
404
|
+
binding.invoke(callback,error,result)
|
405
|
+
end
|
406
|
+
end
|
407
|
+
|
408
|
+
def resolve_watcher(watch_type,watcher)
|
409
|
+
if watcher == true && @watcher
|
410
|
+
#the actual TrueClass refers to the default watcher
|
411
|
+
watcher = @watcher
|
412
|
+
elsif watcher.respond_to?(:call) || watcher.respond_to?(:process_watch)
|
413
|
+
# ok a proc or quacks like a watcher
|
414
|
+
elsif watcher
|
415
|
+
# something, but not something we can handle
|
416
|
+
raise ArgumentError, "Not a watcher #{watcher.inspect}"
|
417
|
+
else
|
418
|
+
watch_type = nil
|
419
|
+
end
|
420
|
+
[watch_type,watcher]
|
421
|
+
end
|
422
|
+
|
423
|
+
|
424
|
+
def queue_packet(packet)
|
425
|
+
@pending_queue.push(packet)
|
426
|
+
logger.debug { "Queued: #{packet.inspect}" }
|
427
|
+
|
428
|
+
if @keeper_state == :connected
|
429
|
+
send_packet(packet)
|
430
|
+
end
|
431
|
+
end
|
432
|
+
|
433
|
+
def next_xid
|
434
|
+
@xid += 1
|
435
|
+
end
|
436
|
+
|
437
|
+
def send_packet(packet)
|
438
|
+
records = [] << Proto::RequestHeader.new(:xid => packet.xid, :_type => packet.opcode)
|
439
|
+
records << packet.request if packet.request
|
440
|
+
conn.send_records(*records)
|
441
|
+
end
|
442
|
+
|
443
|
+
|
444
|
+
end # Session
|
445
|
+
end
|