zk 0.6.5 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.dotfiles/rspec-logging +4 -0
- data/.gitignore +2 -0
- data/.yardopts +8 -0
- data/Gemfile +6 -1
- data/README.markdown +86 -0
- data/lib/z_k/client/base.rb +692 -0
- data/lib/z_k/client/conveniences.rb +134 -0
- data/lib/z_k/client/state_mixin.rb +94 -0
- data/lib/z_k/client/unixisms.rb +89 -0
- data/lib/z_k/client.rb +12 -891
- data/lib/z_k/election.rb +3 -0
- data/lib/z_k/event_handler.rb +7 -5
- data/lib/z_k/mongoid.rb +1 -1
- data/lib/z_k/pool.rb +70 -27
- data/lib/z_k/threadpool.rb +7 -2
- data/lib/z_k/version.rb +1 -1
- data/lib/z_k.rb +1 -2
- data/spec/spec_helper.rb +1 -0
- data/spec/support/logging_progress_bar_formatter.rb +14 -0
- data/spec/watch_spec.rb +26 -8
- data/spec/{client_spec.rb → z_k/client_spec.rb} +1 -1
- data/spec/{election_spec.rb → z_k/election_spec.rb} +2 -3
- data/spec/{locker_spec.rb → z_k/locker_spec.rb} +1 -1
- data/spec/{mongoid_spec.rb → z_k/mongoid_spec.rb} +1 -1
- data/spec/{client_pool_spec.rb → z_k/pool_spec.rb} +98 -126
- data/spec/{threadpool_spec.rb → z_k/threadpool_spec.rb} +6 -3
- data/zk.gemspec +1 -0
- metadata +37 -26
data/lib/z_k/election.rb
CHANGED
@@ -347,6 +347,8 @@ module ZK
|
|
347
347
|
@observing = true
|
348
348
|
|
349
349
|
@leader_ack_sub ||= @zk.watcher.register(leader_ack_path) do |event|
|
350
|
+
logger.debug { "leader_ack_callback, event.node_deleted? #{event.node_deleted?}, event.node_created? #{event.node_created?}" }
|
351
|
+
|
350
352
|
if event.node_deleted?
|
351
353
|
the_king_is_dead
|
352
354
|
elsif event.node_created?
|
@@ -354,6 +356,7 @@ module ZK
|
|
354
356
|
else
|
355
357
|
acked = leader_acked?(true)
|
356
358
|
|
359
|
+
|
357
360
|
# If the current state of the system is not what we think it should be
|
358
361
|
# a transition has occurred and we should fire our callbacks
|
359
362
|
if (acked and !@leader_alive)
|
data/lib/z_k/event_handler.rb
CHANGED
@@ -43,7 +43,7 @@ module ZK
|
|
43
43
|
# @see ZooKeeper::WatcherEvent
|
44
44
|
# @see ZooKeeper::EventHandlerSubscription
|
45
45
|
def register(path, &block)
|
46
|
-
|
46
|
+
# logger.debug { "EventHandler#register path=#{path.inspect}" }
|
47
47
|
EventHandlerSubscription.new(self, path, block).tap do |subscription|
|
48
48
|
synchronize { @callbacks[path] << subscription }
|
49
49
|
end
|
@@ -94,7 +94,7 @@ module ZK
|
|
94
94
|
|
95
95
|
# called from the client-registered callback when an event fires
|
96
96
|
def process(event) #:nodoc:
|
97
|
-
|
97
|
+
# logger.debug { "EventHandler#process dispatching event: #{event.inspect}" }# unless event.type == -1
|
98
98
|
event.zk = @zk
|
99
99
|
|
100
100
|
cb_key =
|
@@ -106,10 +106,12 @@ module ZK
|
|
106
106
|
raise ZKError, "don't know how to process event: #{event.inspect}"
|
107
107
|
end
|
108
108
|
|
109
|
+
# logger.debug { "EventHandler#process: cb_key: #{cb_key}" }
|
110
|
+
|
109
111
|
cb_ary = synchronize do
|
110
112
|
if event.node_event?
|
111
113
|
if watch_type = ZOOKEEPER_WATCH_TYPE_MAP[event.type]
|
112
|
-
|
114
|
+
# logger.debug { "re-allowing #{watch_type.inspect} watches on path #{event.path.inspect}" }
|
113
115
|
|
114
116
|
# we recieved a watch event for this path, now we allow code to set new watchers
|
115
117
|
@outstanding_watches[watch_type].delete(event.path)
|
@@ -161,7 +163,7 @@ module ZK
|
|
161
163
|
opts[:watcher] = watcher_callback
|
162
164
|
else
|
163
165
|
# outstanding watch for path and data pair already exists, so ignore
|
164
|
-
|
166
|
+
# logger.debug { "outstanding watch request for path #{path.inspect} and watcher type #{watch_type.inspect}, not re-registering" }
|
165
167
|
end
|
166
168
|
end
|
167
169
|
end
|
@@ -188,7 +190,7 @@ module ZK
|
|
188
190
|
end
|
189
191
|
|
190
192
|
def safe_call(callbacks, *args)
|
191
|
-
callbacks.
|
193
|
+
while cb = callbacks.shift
|
192
194
|
begin
|
193
195
|
cb.call(*args) if cb.respond_to?(:call)
|
194
196
|
rescue Exception => e
|
data/lib/z_k/mongoid.rb
CHANGED
@@ -5,7 +5,7 @@ module ZK
|
|
5
5
|
#
|
6
6
|
# Before use (in one of your Rails initializers, for example) you should
|
7
7
|
# assign either a ZK::Client or ZK::Pool subclass to
|
8
|
-
#
|
8
|
+
# ZK::Mongoid::Locking.zk_lock_pool.
|
9
9
|
#
|
10
10
|
# this class assumes the availability of a 'logger' method in the mixee
|
11
11
|
#
|
data/lib/z_k/pool.rb
CHANGED
@@ -6,9 +6,15 @@ module ZK
|
|
6
6
|
def initialize
|
7
7
|
@state = :init
|
8
8
|
|
9
|
-
@
|
10
|
-
@
|
11
|
-
|
9
|
+
@mutex = Monitor.new
|
10
|
+
@checkin_cond = @mutex.new_cond
|
11
|
+
|
12
|
+
@connections = [] # all connections we control
|
13
|
+
@pool = [] # currently available connections
|
14
|
+
|
15
|
+
# this is required for 1.8.7 compatibility
|
16
|
+
@on_connected_subs = {}
|
17
|
+
@on_connected_subs.extend(MonitorMixin)
|
12
18
|
end
|
13
19
|
|
14
20
|
# has close_all! been called on this ConnectionPool ?
|
@@ -34,7 +40,7 @@ module ZK
|
|
34
40
|
# close all the connections on the pool
|
35
41
|
# @param optional Boolean graceful allow the checked out connections to come back first?
|
36
42
|
def close_all!
|
37
|
-
synchronize do
|
43
|
+
@mutex.synchronize do
|
38
44
|
return unless open?
|
39
45
|
@state = :closing
|
40
46
|
|
@@ -47,7 +53,7 @@ module ZK
|
|
47
53
|
# calls close! on all connection objects, whether or not they're back in the pool
|
48
54
|
# this is DANGEROUS!
|
49
55
|
def force_close! #:nodoc:
|
50
|
-
synchronize do
|
56
|
+
@mutex.synchronize do
|
51
57
|
return if (closed? or forced?)
|
52
58
|
@state = :forced
|
53
59
|
|
@@ -99,7 +105,7 @@ module ZK
|
|
99
105
|
end
|
100
106
|
|
101
107
|
def size #:nodoc:
|
102
|
-
@pool.size
|
108
|
+
@connection.synchronize { @pool.size }
|
103
109
|
end
|
104
110
|
|
105
111
|
def pool_state #:nodoc:
|
@@ -108,7 +114,7 @@ module ZK
|
|
108
114
|
|
109
115
|
protected
|
110
116
|
def synchronize
|
111
|
-
@
|
117
|
+
@mutex.synchronize { yield }
|
112
118
|
end
|
113
119
|
|
114
120
|
def assert_open!
|
@@ -140,10 +146,9 @@ module ZK
|
|
140
146
|
@max_clients = Integer(opts.delete(:max_clients))
|
141
147
|
@connection_timeout = opts.delete(:timeout)
|
142
148
|
|
143
|
-
|
144
|
-
@pool = [] # currently available connections
|
149
|
+
@count_waiters = 0
|
145
150
|
|
146
|
-
synchronize do
|
151
|
+
@mutex.synchronize do
|
147
152
|
populate_pool!(@min_clients)
|
148
153
|
@state = :open
|
149
154
|
end
|
@@ -152,41 +157,48 @@ module ZK
|
|
152
157
|
# returns the current number of allocated clients in the pool (not
|
153
158
|
# available clients)
|
154
159
|
def size
|
155
|
-
@connections.length
|
160
|
+
@mutex.synchronize { @connections.length }
|
156
161
|
end
|
157
162
|
|
158
163
|
# clients available for checkout (at time of call)
|
159
164
|
def available_size
|
160
|
-
@pool.length
|
165
|
+
@mutex.synchronize { @pool.length }
|
161
166
|
end
|
162
167
|
|
163
168
|
def checkin(connection)
|
164
|
-
synchronize do
|
165
|
-
|
169
|
+
@mutex.synchronize do
|
170
|
+
if @pool.include?(connection)
|
171
|
+
logger.debug { "Pool already contains connection: #{connection.object_id}, @connections.include? #{@connections.include?(connection).inspect}" }
|
172
|
+
return
|
173
|
+
end
|
174
|
+
|
175
|
+
@pool << connection
|
166
176
|
|
167
|
-
@pool.unshift(connection)
|
168
177
|
@checkin_cond.signal
|
169
178
|
end
|
170
179
|
end
|
171
180
|
|
172
181
|
# number of threads waiting for connections
|
173
182
|
def count_waiters #:nodoc:
|
174
|
-
@
|
183
|
+
@mutex.synchronize { @count_waiters }
|
175
184
|
end
|
176
185
|
|
177
186
|
def checkout(blocking=true)
|
178
187
|
raise ArgumentError, "checkout does not take a block, use .with_connection" if block_given?
|
179
|
-
synchronize do
|
188
|
+
@mutex.synchronize do
|
180
189
|
while true
|
181
190
|
assert_open!
|
182
191
|
|
183
192
|
if @pool.length > 0
|
184
193
|
cnx = @pool.shift
|
185
194
|
|
186
|
-
# if the
|
187
|
-
#
|
188
|
-
|
189
|
-
|
195
|
+
# if the connection isn't connected, then set up an on_connection
|
196
|
+
# handler and try the next one in the pool
|
197
|
+
unless cnx.connected?
|
198
|
+
logger.debug { "connection #{cnx.object_id} is not connected" }
|
199
|
+
handle_checkin_on_connection(cnx)
|
200
|
+
next
|
201
|
+
end
|
190
202
|
|
191
203
|
# otherwise we return the cnx
|
192
204
|
return cnx
|
@@ -199,26 +211,57 @@ module ZK
|
|
199
211
|
else
|
200
212
|
return false
|
201
213
|
end
|
202
|
-
end
|
214
|
+
end # while
|
203
215
|
end
|
204
216
|
end
|
205
217
|
|
218
|
+
# @private
|
219
|
+
def can_grow_pool?
|
220
|
+
@mutex.synchronize { @connections.size < @max_clients }
|
221
|
+
end
|
222
|
+
|
206
223
|
protected
|
207
224
|
def populate_pool!(num_cnx)
|
208
225
|
num_cnx.times { add_connection! }
|
209
226
|
end
|
210
227
|
|
211
228
|
def add_connection!
|
212
|
-
synchronize do
|
229
|
+
@mutex.synchronize do
|
213
230
|
cnx = create_connection
|
214
231
|
@connections << cnx
|
215
232
|
|
216
|
-
|
217
|
-
end
|
233
|
+
handle_checkin_on_connection(cnx)
|
234
|
+
end # synchronize
|
218
235
|
end
|
219
236
|
|
220
|
-
def
|
221
|
-
|
237
|
+
def handle_checkin_on_connection(cnx)
|
238
|
+
@mutex.synchronize do
|
239
|
+
do_checkin = lambda do
|
240
|
+
checkin(cnx)
|
241
|
+
end
|
242
|
+
|
243
|
+
if cnx.connected?
|
244
|
+
do_checkin.call
|
245
|
+
return
|
246
|
+
else
|
247
|
+
@on_connected_subs.synchronize do
|
248
|
+
|
249
|
+
sub = cnx.on_connected do
|
250
|
+
# this synchronization is to prevent a race between setting up the subscription
|
251
|
+
# and assigning it to the @on_connected_subs hash. It's possible that the callback
|
252
|
+
# would fire before we had a chance to add the sub to the hash.
|
253
|
+
@on_connected_subs.synchronize do
|
254
|
+
if sub = @on_connected_subs.delete(cnx)
|
255
|
+
sub.unsubscribe
|
256
|
+
do_checkin.call
|
257
|
+
end
|
258
|
+
end
|
259
|
+
end
|
260
|
+
|
261
|
+
@on_connected_subs[cnx] = sub
|
262
|
+
end
|
263
|
+
end
|
264
|
+
end
|
222
265
|
end
|
223
266
|
|
224
267
|
def create_connection
|
data/lib/z_k/threadpool.rb
CHANGED
@@ -72,7 +72,9 @@ module ZK
|
|
72
72
|
@threadqueue.clear
|
73
73
|
@size.times { @threadqueue << KILL_TOKEN }
|
74
74
|
|
75
|
-
|
75
|
+
threads, @threadpool = @threadpool, []
|
76
|
+
|
77
|
+
while th = threads.shift
|
76
78
|
begin
|
77
79
|
th.join(timeout)
|
78
80
|
rescue Exception => e
|
@@ -80,6 +82,8 @@ module ZK
|
|
80
82
|
logger.error { e.to_std_format }
|
81
83
|
end
|
82
84
|
end
|
85
|
+
|
86
|
+
@threadqueue = ::Queue.new
|
83
87
|
end
|
84
88
|
|
85
89
|
nil
|
@@ -87,11 +91,12 @@ module ZK
|
|
87
91
|
|
88
92
|
private
|
89
93
|
def spawn_threadpool #:nodoc:
|
90
|
-
until @threadpool.size
|
94
|
+
until @threadpool.size >= @size.to_i
|
91
95
|
thread = Thread.new do
|
92
96
|
while @running
|
93
97
|
begin
|
94
98
|
op = @threadqueue.pop
|
99
|
+
# $stderr.puts "thread #{Thread.current.inspect} got #{op.inspect}"
|
95
100
|
break if op == KILL_TOKEN
|
96
101
|
op.call
|
97
102
|
rescue Exception => e
|
data/lib/z_k/version.rb
CHANGED
data/lib/z_k.rb
CHANGED
@@ -1,5 +1,4 @@
|
|
1
1
|
require 'rubygems'
|
2
|
-
require 'bundler/setup'
|
3
2
|
|
4
3
|
require 'logger'
|
5
4
|
require 'zookeeper'
|
@@ -26,7 +25,7 @@ require 'z_k/find'
|
|
26
25
|
module ZK
|
27
26
|
ZK_ROOT = File.expand_path('../..', __FILE__)
|
28
27
|
|
29
|
-
KILL_TOKEN = :
|
28
|
+
KILL_TOKEN = :__ZK_kILL_tOkEn__ #:nodoc:
|
30
29
|
|
31
30
|
|
32
31
|
# The logger used by the ZK library. uses a Logger to +/dev/null+ by default
|
data/spec/spec_helper.rb
CHANGED
@@ -0,0 +1,14 @@
|
|
1
|
+
require 'rspec/core/formatters/progress_formatter'
|
2
|
+
|
3
|
+
module Motionbox
|
4
|
+
# essentially a monkey-patch to the ProgressBarFormatter, outputs
|
5
|
+
# '== #{example_proxy.description} ==' in the logs before each test. makes it
|
6
|
+
# easier to match up tests with the SQL they produce
|
7
|
+
class LoggingProgressBarFormatter < RSpec::Core::Formatters::ProgressFormatter
|
8
|
+
def example_started(example)
|
9
|
+
ZK.logger.info(yellow("\n=====<([ #{example.full_description} ])>=====\n"))
|
10
|
+
super
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
14
|
+
|
data/spec/watch_spec.rb
CHANGED
@@ -100,18 +100,36 @@ describe ZK do
|
|
100
100
|
end
|
101
101
|
|
102
102
|
describe 'state watcher' do
|
103
|
-
|
104
|
-
|
105
|
-
|
103
|
+
describe 'live-fire test' do
|
104
|
+
before do
|
105
|
+
@event = nil
|
106
|
+
@cnx_str = "localhost:#{ZK_TEST_PORT}"
|
107
|
+
|
108
|
+
@zk = ZK.new(@cnx_str) do |zk|
|
109
|
+
@cnx_reg = zk.on_connected { |event| @event = event }
|
110
|
+
end
|
111
|
+
end
|
106
112
|
|
107
|
-
|
108
|
-
|
113
|
+
it %[should fire the registered callback] do
|
114
|
+
wait_while { @event.nil? }
|
115
|
+
@event.should_not be_nil
|
109
116
|
end
|
110
117
|
end
|
111
118
|
|
112
|
-
|
113
|
-
|
114
|
-
|
119
|
+
describe 'registered listeners' do
|
120
|
+
before do
|
121
|
+
@event = flexmock(:event) do |m|
|
122
|
+
m.should_receive(:type).and_return(-1)
|
123
|
+
m.should_receive(:zk=).with(any())
|
124
|
+
m.should_receive(:node_event?).and_return(false)
|
125
|
+
m.should_receive(:state_event?).and_return(true)
|
126
|
+
m.should_receive(:state).and_return(ZookeeperConstants::ZOO_CONNECTED_STATE)
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
it %[should only fire the callback once] do
|
131
|
+
pending "not sure if this is the behavior we want"
|
132
|
+
end
|
115
133
|
end
|
116
134
|
end
|
117
135
|
end
|
@@ -1,4 +1,4 @@
|
|
1
|
-
require
|
1
|
+
require 'spec_helper'
|
2
2
|
|
3
3
|
describe ZK::Election do
|
4
4
|
before do
|
@@ -262,10 +262,9 @@ describe ZK::Election do
|
|
262
262
|
describe 'leadership transition' do
|
263
263
|
before do
|
264
264
|
@obama.vote!
|
265
|
-
@palin.vote!
|
266
|
-
|
267
265
|
wait_until { @obama.leader? }
|
268
266
|
|
267
|
+
@palin.vote!
|
269
268
|
@palin.should_not be_leader
|
270
269
|
|
271
270
|
@got_life_event = @got_death_event = false
|