zeevex_concurrency 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +17 -0
- data/Gemfile +16 -0
- data/LICENSE.txt +22 -0
- data/README.md +29 -0
- data/Rakefile +1 -0
- data/lib/zeevex_concurrency/delay.rb +50 -0
- data/lib/zeevex_concurrency/delayed.rb +233 -0
- data/lib/zeevex_concurrency/event_loop.rb +154 -0
- data/lib/zeevex_concurrency/future.rb +60 -0
- data/lib/zeevex_concurrency/logging.rb +7 -0
- data/lib/zeevex_concurrency/nil_logger.rb +7 -0
- data/lib/zeevex_concurrency/promise.rb +32 -0
- data/lib/zeevex_concurrency/synchronized.rb +46 -0
- data/lib/zeevex_concurrency/thread_pool.rb +346 -0
- data/lib/zeevex_concurrency/version.rb +3 -0
- data/lib/zeevex_concurrency.rb +29 -0
- data/script/repl +10 -0
- data/script/testall +2 -0
- data/spec/delay_spec.rb +172 -0
- data/spec/delayed_spec.rb +104 -0
- data/spec/event_loop_spec.rb +161 -0
- data/spec/future_spec.rb +316 -0
- data/spec/promise_spec.rb +172 -0
- data/spec/spec_helper.rb +8 -0
- data/spec/thread_pool_spec.rb +281 -0
- data/zeevex_concurrency.gemspec +30 -0
- metadata +187 -0
@@ -0,0 +1,346 @@
|
|
1
|
+
require 'zeevex_concurrency'
|
2
|
+
require 'zeevex_concurrency/event_loop'
|
3
|
+
require 'countdownlatch'
|
4
|
+
require 'thread'
|
5
|
+
require 'atomic'
|
6
|
+
|
7
|
+
module ZeevexConcurrency::ThreadPool
|
8
|
+
module Stubs
|
9
|
+
def busy?
|
10
|
+
free_count == 0
|
11
|
+
end
|
12
|
+
|
13
|
+
def worker_count
|
14
|
+
-1
|
15
|
+
end
|
16
|
+
|
17
|
+
def busy_count
|
18
|
+
-1
|
19
|
+
end
|
20
|
+
|
21
|
+
def free_count
|
22
|
+
(worker_count - busy_count)
|
23
|
+
end
|
24
|
+
|
25
|
+
#
|
26
|
+
# flush any queued but un-executed tasks
|
27
|
+
#
|
28
|
+
def flush
|
29
|
+
true
|
30
|
+
end
|
31
|
+
|
32
|
+
#
|
33
|
+
# Returns after all currently enqueued tasks complete - does not guarantee
|
34
|
+
# that tasks are not enqueued while waiting
|
35
|
+
#
|
36
|
+
def join
|
37
|
+
latch = CountDownLatch.new(1)
|
38
|
+
enqueue do
|
39
|
+
latch.countdown!
|
40
|
+
end
|
41
|
+
latch.wait
|
42
|
+
true
|
43
|
+
end
|
44
|
+
|
45
|
+
#
|
46
|
+
# how many tasks are waiting
|
47
|
+
#
|
48
|
+
def backlog
|
49
|
+
0
|
50
|
+
end
|
51
|
+
|
52
|
+
protected
|
53
|
+
|
54
|
+
def _check_args(*args)
|
55
|
+
args = args.reject {|f| f.nil? || !f.respond_to?(:call) }
|
56
|
+
raise ArgumentError, "Must supply a callable or block" unless args.length == 1
|
57
|
+
args[0]
|
58
|
+
end
|
59
|
+
end
|
60
|
+
#
|
61
|
+
# Use a single-threaded event loop to process jobs
|
62
|
+
#
|
63
|
+
class EventLoopAdapter
|
64
|
+
include Stubs
|
65
|
+
|
66
|
+
def initialize(loop = nil)
|
67
|
+
@loop ||= ZeevexConcurrency::EventLoop.new
|
68
|
+
start
|
69
|
+
end
|
70
|
+
|
71
|
+
def start
|
72
|
+
@loop.start
|
73
|
+
end
|
74
|
+
|
75
|
+
def stop
|
76
|
+
@loop.stop
|
77
|
+
end
|
78
|
+
|
79
|
+
def enqueue(callable = nil, &block)
|
80
|
+
@loop.enqueue _check_args(callable, block)
|
81
|
+
end
|
82
|
+
|
83
|
+
def flush
|
84
|
+
@loop.flush
|
85
|
+
true
|
86
|
+
end
|
87
|
+
|
88
|
+
def backlog
|
89
|
+
@loop.backlog
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
#
|
94
|
+
# Run job semi-synchronously (on a separate thread, but block on it)
|
95
|
+
# We use a separate thread
|
96
|
+
#
|
97
|
+
class InlineThreadPool
|
98
|
+
include Stubs
|
99
|
+
|
100
|
+
def initialize(loop = nil)
|
101
|
+
start
|
102
|
+
end
|
103
|
+
|
104
|
+
def start
|
105
|
+
@started = true
|
106
|
+
end
|
107
|
+
|
108
|
+
def stop
|
109
|
+
@started = false
|
110
|
+
end
|
111
|
+
|
112
|
+
def join
|
113
|
+
true
|
114
|
+
end
|
115
|
+
|
116
|
+
def enqueue(callable = nil, &block)
|
117
|
+
raise "Must be started" unless @started
|
118
|
+
callable = _check_args(callable, block)
|
119
|
+
thr = Thread.new do
|
120
|
+
callable.call
|
121
|
+
end
|
122
|
+
thr.join
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
#
|
127
|
+
# Launch a concurrent thread for every new task enqueued
|
128
|
+
#
|
129
|
+
class ThreadPerJobPool
|
130
|
+
include Stubs
|
131
|
+
|
132
|
+
def initialize
|
133
|
+
@mutex = Mutex.new
|
134
|
+
@group = ThreadGroup.new
|
135
|
+
@busy_count = Atomic.new(0)
|
136
|
+
|
137
|
+
start
|
138
|
+
end
|
139
|
+
|
140
|
+
def enqueue(runnable = nil, &block)
|
141
|
+
raise "Must be started" unless @started
|
142
|
+
callable = _check_args(runnable, block)
|
143
|
+
thr = Thread.new do
|
144
|
+
@busy_count.update {|x| x + 1}
|
145
|
+
callable.call
|
146
|
+
@busy_count.update {|x| x - 1}
|
147
|
+
end
|
148
|
+
@group.add(thr)
|
149
|
+
end
|
150
|
+
|
151
|
+
def start
|
152
|
+
@started = true
|
153
|
+
end
|
154
|
+
|
155
|
+
def join
|
156
|
+
@group.list.dup.each do |thr|
|
157
|
+
thr.join
|
158
|
+
end
|
159
|
+
true
|
160
|
+
end
|
161
|
+
|
162
|
+
def stop
|
163
|
+
@mutex.synchronize do
|
164
|
+
return unless @started
|
165
|
+
|
166
|
+
@group.list.dup.each do |thr|
|
167
|
+
thr.kill
|
168
|
+
end
|
169
|
+
|
170
|
+
@started = false
|
171
|
+
@busy_count.set 0
|
172
|
+
end
|
173
|
+
end
|
174
|
+
|
175
|
+
def busy_count
|
176
|
+
@busy_count.value
|
177
|
+
end
|
178
|
+
|
179
|
+
def busy
|
180
|
+
false
|
181
|
+
end
|
182
|
+
|
183
|
+
def worker_count
|
184
|
+
@busy_count.value
|
185
|
+
end
|
186
|
+
end
|
187
|
+
|
188
|
+
#
|
189
|
+
# Use a fixed pool of N threads to process jobs
|
190
|
+
#
|
191
|
+
class FixedPool
|
192
|
+
include Stubs
|
193
|
+
|
194
|
+
def initialize(count = -1)
|
195
|
+
if count == -1
|
196
|
+
count = ZeevexConcurrency::ThreadPool.cpu_count * 2
|
197
|
+
end
|
198
|
+
@count = count
|
199
|
+
@queue = Queue.new
|
200
|
+
@mutex = Mutex.new
|
201
|
+
@group = ThreadGroup.new
|
202
|
+
@busy_count = Atomic.new(0)
|
203
|
+
|
204
|
+
start
|
205
|
+
end
|
206
|
+
|
207
|
+
def enqueue(runnable = nil, &block)
|
208
|
+
@queue << _check_args(runnable, block)
|
209
|
+
end
|
210
|
+
|
211
|
+
def start
|
212
|
+
@mutex.synchronize do
|
213
|
+
return if @started
|
214
|
+
|
215
|
+
@stop_requested = false
|
216
|
+
|
217
|
+
@count.times do
|
218
|
+
thr = Thread.new(@queue) do
|
219
|
+
while !@stop_requested
|
220
|
+
begin
|
221
|
+
work = @queue.pop
|
222
|
+
|
223
|
+
# notify that this thread is stopping and wait for the signal to continue
|
224
|
+
if work.is_a?(HaltObject)
|
225
|
+
work.halt!
|
226
|
+
continue
|
227
|
+
end
|
228
|
+
|
229
|
+
_start_work
|
230
|
+
work.call
|
231
|
+
_end_work
|
232
|
+
rescue Exception
|
233
|
+
ZeevexConcurrency.logger.error %{Exception caught in thread pool: #{$!.inspect}: #{$!.backtrace.join("\n")}}
|
234
|
+
end
|
235
|
+
end
|
236
|
+
end
|
237
|
+
@group.add(thr)
|
238
|
+
end
|
239
|
+
|
240
|
+
@started = true
|
241
|
+
end
|
242
|
+
end
|
243
|
+
|
244
|
+
def stop
|
245
|
+
@mutex.synchronize do
|
246
|
+
return unless @started
|
247
|
+
|
248
|
+
@stop_requested = true
|
249
|
+
|
250
|
+
@group.list.each do |thr|
|
251
|
+
thr.kill
|
252
|
+
end
|
253
|
+
|
254
|
+
@busy_count.set 0
|
255
|
+
@started = false
|
256
|
+
end
|
257
|
+
end
|
258
|
+
|
259
|
+
def busy?
|
260
|
+
free_count == 0
|
261
|
+
end
|
262
|
+
|
263
|
+
def worker_count
|
264
|
+
@count
|
265
|
+
end
|
266
|
+
|
267
|
+
def busy_count
|
268
|
+
@busy_count.value
|
269
|
+
end
|
270
|
+
|
271
|
+
def free_count
|
272
|
+
(worker_count - busy_count)
|
273
|
+
end
|
274
|
+
|
275
|
+
#
|
276
|
+
# how many tasks are waiting
|
277
|
+
#
|
278
|
+
def backlog
|
279
|
+
@queue.size
|
280
|
+
end
|
281
|
+
|
282
|
+
# flush queued jobs
|
283
|
+
def flush
|
284
|
+
@queue.clear
|
285
|
+
end
|
286
|
+
|
287
|
+
#
|
288
|
+
# this is tricky as there may be one or more workers stuck in VERY long running jobs
|
289
|
+
# so what we do is:
|
290
|
+
#
|
291
|
+
# Insert a job that stops processing
|
292
|
+
# When it runs, we can be sure that all previous jobs have popped off the queue
|
293
|
+
# However, previous jobs may still be running
|
294
|
+
# So we have to ask each thread to pause until they've all paused
|
295
|
+
#
|
296
|
+
def join
|
297
|
+
halter = HaltObject.new(@count)
|
298
|
+
|
299
|
+
# ensure each thread gets a copy
|
300
|
+
@count.times { @queue << halter }
|
301
|
+
|
302
|
+
# wait until every thread has entered
|
303
|
+
halter.wait
|
304
|
+
end
|
305
|
+
|
306
|
+
class HaltObject
|
307
|
+
def initialize(count)
|
308
|
+
@count = count
|
309
|
+
@latch = CountDownLatch.new(count)
|
310
|
+
end
|
311
|
+
|
312
|
+
def halt!
|
313
|
+
# notify that we're now waiting
|
314
|
+
@latch.countdown!
|
315
|
+
@latch.wait
|
316
|
+
end
|
317
|
+
|
318
|
+
def wait
|
319
|
+
@latch.wait
|
320
|
+
end
|
321
|
+
end
|
322
|
+
|
323
|
+
protected
|
324
|
+
|
325
|
+
def _start_work
|
326
|
+
@busy_count.update {|x| x + 1 }
|
327
|
+
end
|
328
|
+
|
329
|
+
def _end_work
|
330
|
+
@busy_count.update {|x| x - 1 }
|
331
|
+
end
|
332
|
+
|
333
|
+
end
|
334
|
+
|
335
|
+
#
|
336
|
+
# Return the number of CPUs reported by the system
|
337
|
+
#
|
338
|
+
def self.cpu_count
|
339
|
+
return Java::Java.lang.Runtime.getRuntime.availableProcessors if defined? Java::Java
|
340
|
+
return File.read('/proc/cpuinfo').scan(/^processor\s*:/).size if File.exist? '/proc/cpuinfo'
|
341
|
+
require 'win32ole'
|
342
|
+
WIN32OLE.connect("winmgmts://").ExecQuery("select * from Win32_ComputerSystem").NumberOfProcessors
|
343
|
+
rescue LoadError
|
344
|
+
Integer `sysctl -n hw.ncpu 2>/dev/null` rescue 1
|
345
|
+
end
|
346
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
require "zeevex_concurrency/version"
|
2
|
+
|
3
|
+
module ZeevexConcurrency
|
4
|
+
module All
|
5
|
+
def self.included(base)
|
6
|
+
base.class_eval do
|
7
|
+
include ZeevexConcurrency::Logging
|
8
|
+
end
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
def self.logger
|
13
|
+
@logger
|
14
|
+
end
|
15
|
+
|
16
|
+
def self.logger=(logger)
|
17
|
+
@logger = ZeevexConcurrency::Synchronized(logger)
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
require 'zeevex_concurrency/synchronized'
|
22
|
+
|
23
|
+
require 'logger'
|
24
|
+
require 'zeevex_concurrency/nil_logger'
|
25
|
+
|
26
|
+
ZeevexConcurrency.logger = ZeevexConcurrency::NilLogger.new
|
27
|
+
|
28
|
+
require 'zeevex_concurrency/logging'
|
29
|
+
require 'zeevex_concurrency/event_loop'
|
data/script/repl
ADDED
@@ -0,0 +1,10 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
$: << File.join(File.dirname(__FILE__), "../lib")
|
3
|
+
require 'pry'
|
4
|
+
require 'zeevex_concurrency'
|
5
|
+
require 'zeevex_concurrency/delayed'
|
6
|
+
require 'zeevex_concurrency/future'
|
7
|
+
require 'zeevex_concurrency/promise'
|
8
|
+
require 'zeevex_concurrency/delay'
|
9
|
+
|
10
|
+
binding.pry
|
data/script/testall
ADDED
data/spec/delay_spec.rb
ADDED
@@ -0,0 +1,172 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__), 'spec_helper')
|
2
|
+
require 'zeevex_concurrency/delay.rb'
|
3
|
+
|
4
|
+
describe ZeevexConcurrency::Delay do
|
5
|
+
clazz = ZeevexConcurrency::Delay
|
6
|
+
|
7
|
+
before do
|
8
|
+
@counter = 200
|
9
|
+
end
|
10
|
+
let :proccy do
|
11
|
+
Proc.new { @counter += 1}
|
12
|
+
end
|
13
|
+
|
14
|
+
around :each do |ex|
|
15
|
+
Timeout::timeout(30) do
|
16
|
+
ex.run
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
context 'argument checking' do
|
21
|
+
it 'should not allow neither a callable nor a block' do
|
22
|
+
expect { clazz.new }.
|
23
|
+
to raise_error(ArgumentError)
|
24
|
+
end
|
25
|
+
|
26
|
+
it 'should not allow both a callable AND a block' do
|
27
|
+
expect {
|
28
|
+
clazz.new(Proc.new { 2 }) do
|
29
|
+
1
|
30
|
+
end
|
31
|
+
}.to raise_error(ArgumentError)
|
32
|
+
end
|
33
|
+
|
34
|
+
it 'should accept a proc' do
|
35
|
+
expect { clazz.new(Proc.new {}) }.
|
36
|
+
not_to raise_error(ArgumentError)
|
37
|
+
end
|
38
|
+
|
39
|
+
it 'should accept a block' do
|
40
|
+
expect {
|
41
|
+
clazz.new do
|
42
|
+
1
|
43
|
+
end
|
44
|
+
}.not_to raise_error(ArgumentError)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
context 'at creation time' do
|
49
|
+
subject { clazz.new(proccy) }
|
50
|
+
it { should be_ready }
|
51
|
+
end
|
52
|
+
|
53
|
+
context 'after first deference' do
|
54
|
+
subject { clazz.new(proccy) }
|
55
|
+
before do
|
56
|
+
subject.value
|
57
|
+
end
|
58
|
+
|
59
|
+
it { should be_ready }
|
60
|
+
its(:value) { should == 201 }
|
61
|
+
it 'should return same value for repeated calls' do
|
62
|
+
subject.value
|
63
|
+
subject.value.should == 201
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
context 'with exception' do
|
68
|
+
class FooBar < StandardError; end
|
69
|
+
subject do
|
70
|
+
clazz.new lambda {
|
71
|
+
raise FooBar, "test"
|
72
|
+
}
|
73
|
+
end
|
74
|
+
|
75
|
+
it { should be_ready }
|
76
|
+
it 'should reraise exception' do
|
77
|
+
expect { subject.value }.
|
78
|
+
to raise_error(FooBar)
|
79
|
+
end
|
80
|
+
|
81
|
+
it 'should optionally not reraise' do
|
82
|
+
expect { subject.value(false) }.
|
83
|
+
not_to raise_error(FooBar)
|
84
|
+
subject.value(false).should be_a(FooBar)
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
context '#wait' do
|
89
|
+
subject { clazz.new(proccy) }
|
90
|
+
|
91
|
+
it 'should return immediately' do
|
92
|
+
t_start = Time.now
|
93
|
+
res = subject.wait 2
|
94
|
+
t_end = Time.now
|
95
|
+
(t_end-t_start).round.should == 0
|
96
|
+
res.should be_true
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
context 'observing' do
|
101
|
+
subject { clazz.new(proccy) }
|
102
|
+
let :observer do
|
103
|
+
mock()
|
104
|
+
end
|
105
|
+
|
106
|
+
it 'should notify observer after value deref' do
|
107
|
+
observer.should_receive(:update).with(subject, 201, true)
|
108
|
+
subject.add_observer observer
|
109
|
+
subject.value
|
110
|
+
end
|
111
|
+
|
112
|
+
it 'should notify observer after value deref raises exception' do
|
113
|
+
edelay = clazz.new(Proc.new { raise "foo" })
|
114
|
+
observer.should_receive(:update).with(edelay, kind_of(Exception), false)
|
115
|
+
edelay.add_observer observer
|
116
|
+
edelay.value rescue nil
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
context 'access from multiple threads' do
|
121
|
+
|
122
|
+
let :pause_queue do
|
123
|
+
Queue.new
|
124
|
+
end
|
125
|
+
|
126
|
+
subject {
|
127
|
+
clazz.new do
|
128
|
+
pause_queue.pop
|
129
|
+
@counter += 1
|
130
|
+
end
|
131
|
+
}
|
132
|
+
let :queue do
|
133
|
+
Queue.new
|
134
|
+
end
|
135
|
+
|
136
|
+
before do
|
137
|
+
subject
|
138
|
+
queue
|
139
|
+
pause_queue
|
140
|
+
|
141
|
+
threads = []
|
142
|
+
5.times do
|
143
|
+
threads << Thread.new do
|
144
|
+
queue << subject.value
|
145
|
+
end
|
146
|
+
end
|
147
|
+
Thread.pass
|
148
|
+
@queue_size_before_set = queue.size
|
149
|
+
pause_queue << "foo"
|
150
|
+
threads.map &:join
|
151
|
+
end
|
152
|
+
|
153
|
+
it 'should block all threads before value derefed' do
|
154
|
+
@queue_size_before_set.should == 0
|
155
|
+
end
|
156
|
+
|
157
|
+
it 'should allow all threads to receive a value' do
|
158
|
+
queue.size.should == 5
|
159
|
+
end
|
160
|
+
|
161
|
+
it 'should only evaluate the computation once' do
|
162
|
+
@counter.should == 201
|
163
|
+
end
|
164
|
+
|
165
|
+
it 'should send the same value to all threads' do
|
166
|
+
list = []
|
167
|
+
5.times { list << queue.pop }
|
168
|
+
list.should == [201,201,201,201,201]
|
169
|
+
end
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
@@ -0,0 +1,104 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__), 'spec_helper')
|
2
|
+
require 'zeevex_concurrency/delayed.rb'
|
3
|
+
require 'zeevex_concurrency/promise.rb'
|
4
|
+
require 'zeevex_concurrency/future.rb'
|
5
|
+
require 'zeevex_concurrency/delay.rb'
|
6
|
+
|
7
|
+
describe ZeevexConcurrency::Delayed do
|
8
|
+
clazz = ZeevexConcurrency
|
9
|
+
|
10
|
+
context 'creation' do
|
11
|
+
context '#promise' do
|
12
|
+
it 'should create a promise with a block' do
|
13
|
+
clazz.promise do
|
14
|
+
end.should be_a(ZeevexConcurrency::Promise)
|
15
|
+
end
|
16
|
+
|
17
|
+
it 'should create a promise with no arg or block' do
|
18
|
+
clazz.promise.should be_a(ZeevexConcurrency::Promise)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
context '#future' do
|
23
|
+
it 'should create a future' do
|
24
|
+
clazz.future do
|
25
|
+
end.should be_a(ZeevexConcurrency::Future)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
context '#delay' do
|
30
|
+
it 'should create a delay given a block' do
|
31
|
+
clazz.delay do
|
32
|
+
end.should be_a(ZeevexConcurrency::Delay)
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
context 'typing' do
|
38
|
+
let :efuture do
|
39
|
+
ZeevexConcurrency.future(Proc.new {})
|
40
|
+
end
|
41
|
+
let :epromise do
|
42
|
+
ZeevexConcurrency.promise(Proc.new {})
|
43
|
+
end
|
44
|
+
let :edelay do
|
45
|
+
ZeevexConcurrency.delay(Proc.new {})
|
46
|
+
end
|
47
|
+
let :eproc do
|
48
|
+
Proc.new {}
|
49
|
+
end
|
50
|
+
context '#delayed?' do
|
51
|
+
it 'should be true for a promise' do
|
52
|
+
clazz.delayed?(epromise).should be_true
|
53
|
+
end
|
54
|
+
it 'should be true for a future' do
|
55
|
+
clazz.delayed?(efuture).should be_true
|
56
|
+
end
|
57
|
+
it 'should be true for a delay' do
|
58
|
+
clazz.delayed?(edelay).should be_true
|
59
|
+
end
|
60
|
+
it 'should not be true for a proc' do
|
61
|
+
clazz.delayed?(eproc).should be_false
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
context '#future?' do
|
66
|
+
it 'should be true for a future' do
|
67
|
+
clazz.future?(efuture).should be_true
|
68
|
+
end
|
69
|
+
|
70
|
+
it 'should be false for a promise' do
|
71
|
+
clazz.future?(epromise).should be_false
|
72
|
+
end
|
73
|
+
|
74
|
+
it 'should be false for a delay' do
|
75
|
+
clazz.future?(edelay).should be_false
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
context '#promise?' do
|
80
|
+
it 'should be true for a promise' do
|
81
|
+
clazz.promise?(epromise).should be_true
|
82
|
+
end
|
83
|
+
it 'should be false for a future' do
|
84
|
+
clazz.promise?(efuture).should be_false
|
85
|
+
end
|
86
|
+
it 'should be false for a delay' do
|
87
|
+
clazz.promise?(edelay).should be_false
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
context '#delay?' do
|
92
|
+
it 'should be true for a delay' do
|
93
|
+
clazz.delay?(edelay).should be_true
|
94
|
+
end
|
95
|
+
it 'should be false for a promise' do
|
96
|
+
clazz.delay?(epromise).should be_false
|
97
|
+
end
|
98
|
+
it 'should be false for a future' do
|
99
|
+
clazz.delay?(efuture).should be_false
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|