concurrent-ruby 1.1.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +478 -0
- data/Gemfile +41 -0
- data/LICENSE.md +23 -0
- data/README.md +381 -0
- data/Rakefile +327 -0
- data/ext/concurrent-ruby/ConcurrentRubyService.java +17 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java +175 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java +248 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java +93 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java +113 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java +159 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java +307 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java +31 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java +3863 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java +203 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java +342 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java +3800 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java +204 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java +291 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java +199 -0
- data/lib/concurrent-ruby.rb +1 -0
- data/lib/concurrent.rb +134 -0
- data/lib/concurrent/agent.rb +587 -0
- data/lib/concurrent/array.rb +66 -0
- data/lib/concurrent/async.rb +459 -0
- data/lib/concurrent/atom.rb +222 -0
- data/lib/concurrent/atomic/abstract_thread_local_var.rb +66 -0
- data/lib/concurrent/atomic/atomic_boolean.rb +126 -0
- data/lib/concurrent/atomic/atomic_fixnum.rb +143 -0
- data/lib/concurrent/atomic/atomic_markable_reference.rb +164 -0
- data/lib/concurrent/atomic/atomic_reference.rb +204 -0
- data/lib/concurrent/atomic/count_down_latch.rb +100 -0
- data/lib/concurrent/atomic/cyclic_barrier.rb +128 -0
- data/lib/concurrent/atomic/event.rb +109 -0
- data/lib/concurrent/atomic/java_count_down_latch.rb +42 -0
- data/lib/concurrent/atomic/java_thread_local_var.rb +37 -0
- data/lib/concurrent/atomic/mutex_atomic_boolean.rb +62 -0
- data/lib/concurrent/atomic/mutex_atomic_fixnum.rb +75 -0
- data/lib/concurrent/atomic/mutex_count_down_latch.rb +44 -0
- data/lib/concurrent/atomic/mutex_semaphore.rb +115 -0
- data/lib/concurrent/atomic/read_write_lock.rb +254 -0
- data/lib/concurrent/atomic/reentrant_read_write_lock.rb +379 -0
- data/lib/concurrent/atomic/ruby_thread_local_var.rb +161 -0
- data/lib/concurrent/atomic/semaphore.rb +145 -0
- data/lib/concurrent/atomic/thread_local_var.rb +104 -0
- data/lib/concurrent/atomic_reference/mutex_atomic.rb +56 -0
- data/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb +28 -0
- data/lib/concurrent/atomics.rb +10 -0
- data/lib/concurrent/collection/copy_on_notify_observer_set.rb +107 -0
- data/lib/concurrent/collection/copy_on_write_observer_set.rb +111 -0
- data/lib/concurrent/collection/java_non_concurrent_priority_queue.rb +84 -0
- data/lib/concurrent/collection/lock_free_stack.rb +158 -0
- data/lib/concurrent/collection/map/atomic_reference_map_backend.rb +927 -0
- data/lib/concurrent/collection/map/mri_map_backend.rb +66 -0
- data/lib/concurrent/collection/map/non_concurrent_map_backend.rb +140 -0
- data/lib/concurrent/collection/map/synchronized_map_backend.rb +82 -0
- data/lib/concurrent/collection/non_concurrent_priority_queue.rb +143 -0
- data/lib/concurrent/collection/ruby_non_concurrent_priority_queue.rb +150 -0
- data/lib/concurrent/concern/deprecation.rb +34 -0
- data/lib/concurrent/concern/dereferenceable.rb +73 -0
- data/lib/concurrent/concern/logging.rb +32 -0
- data/lib/concurrent/concern/obligation.rb +220 -0
- data/lib/concurrent/concern/observable.rb +110 -0
- data/lib/concurrent/concurrent_ruby.jar +0 -0
- data/lib/concurrent/configuration.rb +184 -0
- data/lib/concurrent/constants.rb +8 -0
- data/lib/concurrent/dataflow.rb +81 -0
- data/lib/concurrent/delay.rb +199 -0
- data/lib/concurrent/errors.rb +69 -0
- data/lib/concurrent/exchanger.rb +352 -0
- data/lib/concurrent/executor/abstract_executor_service.rb +134 -0
- data/lib/concurrent/executor/cached_thread_pool.rb +62 -0
- data/lib/concurrent/executor/executor_service.rb +185 -0
- data/lib/concurrent/executor/fixed_thread_pool.rb +206 -0
- data/lib/concurrent/executor/immediate_executor.rb +66 -0
- data/lib/concurrent/executor/indirect_immediate_executor.rb +44 -0
- data/lib/concurrent/executor/java_executor_service.rb +91 -0
- data/lib/concurrent/executor/java_single_thread_executor.rb +29 -0
- data/lib/concurrent/executor/java_thread_pool_executor.rb +123 -0
- data/lib/concurrent/executor/ruby_executor_service.rb +78 -0
- data/lib/concurrent/executor/ruby_single_thread_executor.rb +22 -0
- data/lib/concurrent/executor/ruby_thread_pool_executor.rb +362 -0
- data/lib/concurrent/executor/safe_task_executor.rb +35 -0
- data/lib/concurrent/executor/serial_executor_service.rb +34 -0
- data/lib/concurrent/executor/serialized_execution.rb +107 -0
- data/lib/concurrent/executor/serialized_execution_delegator.rb +28 -0
- data/lib/concurrent/executor/simple_executor_service.rb +100 -0
- data/lib/concurrent/executor/single_thread_executor.rb +56 -0
- data/lib/concurrent/executor/thread_pool_executor.rb +87 -0
- data/lib/concurrent/executor/timer_set.rb +173 -0
- data/lib/concurrent/executors.rb +20 -0
- data/lib/concurrent/future.rb +141 -0
- data/lib/concurrent/hash.rb +59 -0
- data/lib/concurrent/immutable_struct.rb +93 -0
- data/lib/concurrent/ivar.rb +207 -0
- data/lib/concurrent/map.rb +337 -0
- data/lib/concurrent/maybe.rb +229 -0
- data/lib/concurrent/mutable_struct.rb +229 -0
- data/lib/concurrent/mvar.rb +242 -0
- data/lib/concurrent/options.rb +42 -0
- data/lib/concurrent/promise.rb +579 -0
- data/lib/concurrent/promises.rb +2167 -0
- data/lib/concurrent/re_include.rb +58 -0
- data/lib/concurrent/scheduled_task.rb +318 -0
- data/lib/concurrent/set.rb +66 -0
- data/lib/concurrent/settable_struct.rb +129 -0
- data/lib/concurrent/synchronization.rb +30 -0
- data/lib/concurrent/synchronization/abstract_lockable_object.rb +98 -0
- data/lib/concurrent/synchronization/abstract_object.rb +24 -0
- data/lib/concurrent/synchronization/abstract_struct.rb +160 -0
- data/lib/concurrent/synchronization/condition.rb +60 -0
- data/lib/concurrent/synchronization/jruby_lockable_object.rb +13 -0
- data/lib/concurrent/synchronization/jruby_object.rb +45 -0
- data/lib/concurrent/synchronization/lock.rb +36 -0
- data/lib/concurrent/synchronization/lockable_object.rb +74 -0
- data/lib/concurrent/synchronization/mri_object.rb +44 -0
- data/lib/concurrent/synchronization/mutex_lockable_object.rb +76 -0
- data/lib/concurrent/synchronization/object.rb +183 -0
- data/lib/concurrent/synchronization/rbx_lockable_object.rb +65 -0
- data/lib/concurrent/synchronization/rbx_object.rb +49 -0
- data/lib/concurrent/synchronization/truffleruby_object.rb +47 -0
- data/lib/concurrent/synchronization/volatile.rb +36 -0
- data/lib/concurrent/thread_safe/synchronized_delegator.rb +50 -0
- data/lib/concurrent/thread_safe/util.rb +16 -0
- data/lib/concurrent/thread_safe/util/adder.rb +74 -0
- data/lib/concurrent/thread_safe/util/cheap_lockable.rb +118 -0
- data/lib/concurrent/thread_safe/util/data_structures.rb +63 -0
- data/lib/concurrent/thread_safe/util/power_of_two_tuple.rb +38 -0
- data/lib/concurrent/thread_safe/util/striped64.rb +246 -0
- data/lib/concurrent/thread_safe/util/volatile.rb +75 -0
- data/lib/concurrent/thread_safe/util/xor_shift_random.rb +50 -0
- data/lib/concurrent/timer_task.rb +334 -0
- data/lib/concurrent/tuple.rb +86 -0
- data/lib/concurrent/tvar.rb +258 -0
- data/lib/concurrent/utility/at_exit.rb +97 -0
- data/lib/concurrent/utility/engine.rb +56 -0
- data/lib/concurrent/utility/monotonic_time.rb +58 -0
- data/lib/concurrent/utility/native_extension_loader.rb +79 -0
- data/lib/concurrent/utility/native_integer.rb +53 -0
- data/lib/concurrent/utility/processor_counter.rb +158 -0
- data/lib/concurrent/version.rb +3 -0
- metadata +193 -0
@@ -0,0 +1,199 @@
|
|
1
|
+
/*
|
2
|
+
* Written by Doug Lea with assistance from members of JCP JSR-166
|
3
|
+
* Expert Group and released to the public domain, as explained at
|
4
|
+
* http://creativecommons.org/publicdomain/zero/1.0/
|
5
|
+
*/
|
6
|
+
|
7
|
+
// This is based on 1.16 version
|
8
|
+
|
9
|
+
package com.concurrent_ruby.ext.jsr166y;
|
10
|
+
|
11
|
+
import java.util.Random;
|
12
|
+
|
13
|
+
/**
|
14
|
+
* A random number generator isolated to the current thread. Like the
|
15
|
+
* global {@link java.util.Random} generator used by the {@link
|
16
|
+
* java.lang.Math} class, a {@code ThreadLocalRandom} is initialized
|
17
|
+
* with an internally generated seed that may not otherwise be
|
18
|
+
* modified. When applicable, use of {@code ThreadLocalRandom} rather
|
19
|
+
* than shared {@code Random} objects in concurrent programs will
|
20
|
+
* typically encounter much less overhead and contention. Use of
|
21
|
+
* {@code ThreadLocalRandom} is particularly appropriate when multiple
|
22
|
+
* tasks (for example, each a {@link ForkJoinTask}) use random numbers
|
23
|
+
* in parallel in thread pools.
|
24
|
+
*
|
25
|
+
* <p>Usages of this class should typically be of the form:
|
26
|
+
* {@code ThreadLocalRandom.current().nextX(...)} (where
|
27
|
+
* {@code X} is {@code Int}, {@code Long}, etc).
|
28
|
+
* When all usages are of this form, it is never possible to
|
29
|
+
* accidently share a {@code ThreadLocalRandom} across multiple threads.
|
30
|
+
*
|
31
|
+
* <p>This class also provides additional commonly used bounded random
|
32
|
+
* generation methods.
|
33
|
+
*
|
34
|
+
* @since 1.7
|
35
|
+
* @author Doug Lea
|
36
|
+
*/
|
37
|
+
public class ThreadLocalRandom extends Random {
|
38
|
+
// same constants as Random, but must be redeclared because private
|
39
|
+
private static final long multiplier = 0x5DEECE66DL;
|
40
|
+
private static final long addend = 0xBL;
|
41
|
+
private static final long mask = (1L << 48) - 1;
|
42
|
+
|
43
|
+
/**
|
44
|
+
* The random seed. We can't use super.seed.
|
45
|
+
*/
|
46
|
+
private long rnd;
|
47
|
+
|
48
|
+
/**
|
49
|
+
* Initialization flag to permit calls to setSeed to succeed only
|
50
|
+
* while executing the Random constructor. We can't allow others
|
51
|
+
* since it would cause setting seed in one part of a program to
|
52
|
+
* unintentionally impact other usages by the thread.
|
53
|
+
*/
|
54
|
+
boolean initialized;
|
55
|
+
|
56
|
+
// Padding to help avoid memory contention among seed updates in
|
57
|
+
// different TLRs in the common case that they are located near
|
58
|
+
// each other.
|
59
|
+
private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
|
60
|
+
|
61
|
+
/**
|
62
|
+
* The actual ThreadLocal
|
63
|
+
*/
|
64
|
+
private static final ThreadLocal<ThreadLocalRandom> localRandom =
|
65
|
+
new ThreadLocal<ThreadLocalRandom>() {
|
66
|
+
protected ThreadLocalRandom initialValue() {
|
67
|
+
return new ThreadLocalRandom();
|
68
|
+
}
|
69
|
+
};
|
70
|
+
|
71
|
+
|
72
|
+
/**
|
73
|
+
* Constructor called only by localRandom.initialValue.
|
74
|
+
*/
|
75
|
+
ThreadLocalRandom() {
|
76
|
+
super();
|
77
|
+
initialized = true;
|
78
|
+
}
|
79
|
+
|
80
|
+
/**
|
81
|
+
* Returns the current thread's {@code ThreadLocalRandom}.
|
82
|
+
*
|
83
|
+
* @return the current thread's {@code ThreadLocalRandom}
|
84
|
+
*/
|
85
|
+
public static ThreadLocalRandom current() {
|
86
|
+
return localRandom.get();
|
87
|
+
}
|
88
|
+
|
89
|
+
/**
|
90
|
+
* Throws {@code UnsupportedOperationException}. Setting seeds in
|
91
|
+
* this generator is not supported.
|
92
|
+
*
|
93
|
+
* @throws UnsupportedOperationException always
|
94
|
+
*/
|
95
|
+
public void setSeed(long seed) {
|
96
|
+
if (initialized)
|
97
|
+
throw new UnsupportedOperationException();
|
98
|
+
rnd = (seed ^ multiplier) & mask;
|
99
|
+
}
|
100
|
+
|
101
|
+
protected int next(int bits) {
|
102
|
+
rnd = (rnd * multiplier + addend) & mask;
|
103
|
+
return (int) (rnd >>> (48-bits));
|
104
|
+
}
|
105
|
+
|
106
|
+
/**
|
107
|
+
* Returns a pseudorandom, uniformly distributed value between the
|
108
|
+
* given least value (inclusive) and bound (exclusive).
|
109
|
+
*
|
110
|
+
* @param least the least value returned
|
111
|
+
* @param bound the upper bound (exclusive)
|
112
|
+
* @throws IllegalArgumentException if least greater than or equal
|
113
|
+
* to bound
|
114
|
+
* @return the next value
|
115
|
+
*/
|
116
|
+
public int nextInt(int least, int bound) {
|
117
|
+
if (least >= bound)
|
118
|
+
throw new IllegalArgumentException();
|
119
|
+
return nextInt(bound - least) + least;
|
120
|
+
}
|
121
|
+
|
122
|
+
/**
|
123
|
+
* Returns a pseudorandom, uniformly distributed value
|
124
|
+
* between 0 (inclusive) and the specified value (exclusive).
|
125
|
+
*
|
126
|
+
* @param n the bound on the random number to be returned. Must be
|
127
|
+
* positive.
|
128
|
+
* @return the next value
|
129
|
+
* @throws IllegalArgumentException if n is not positive
|
130
|
+
*/
|
131
|
+
public long nextLong(long n) {
|
132
|
+
if (n <= 0)
|
133
|
+
throw new IllegalArgumentException("n must be positive");
|
134
|
+
// Divide n by two until small enough for nextInt. On each
|
135
|
+
// iteration (at most 31 of them but usually much less),
|
136
|
+
// randomly choose both whether to include high bit in result
|
137
|
+
// (offset) and whether to continue with the lower vs upper
|
138
|
+
// half (which makes a difference only if odd).
|
139
|
+
long offset = 0;
|
140
|
+
while (n >= Integer.MAX_VALUE) {
|
141
|
+
int bits = next(2);
|
142
|
+
long half = n >>> 1;
|
143
|
+
long nextn = ((bits & 2) == 0) ? half : n - half;
|
144
|
+
if ((bits & 1) == 0)
|
145
|
+
offset += n - nextn;
|
146
|
+
n = nextn;
|
147
|
+
}
|
148
|
+
return offset + nextInt((int) n);
|
149
|
+
}
|
150
|
+
|
151
|
+
/**
|
152
|
+
* Returns a pseudorandom, uniformly distributed value between the
|
153
|
+
* given least value (inclusive) and bound (exclusive).
|
154
|
+
*
|
155
|
+
* @param least the least value returned
|
156
|
+
* @param bound the upper bound (exclusive)
|
157
|
+
* @return the next value
|
158
|
+
* @throws IllegalArgumentException if least greater than or equal
|
159
|
+
* to bound
|
160
|
+
*/
|
161
|
+
public long nextLong(long least, long bound) {
|
162
|
+
if (least >= bound)
|
163
|
+
throw new IllegalArgumentException();
|
164
|
+
return nextLong(bound - least) + least;
|
165
|
+
}
|
166
|
+
|
167
|
+
/**
|
168
|
+
* Returns a pseudorandom, uniformly distributed {@code double} value
|
169
|
+
* between 0 (inclusive) and the specified value (exclusive).
|
170
|
+
*
|
171
|
+
* @param n the bound on the random number to be returned. Must be
|
172
|
+
* positive.
|
173
|
+
* @return the next value
|
174
|
+
* @throws IllegalArgumentException if n is not positive
|
175
|
+
*/
|
176
|
+
public double nextDouble(double n) {
|
177
|
+
if (n <= 0)
|
178
|
+
throw new IllegalArgumentException("n must be positive");
|
179
|
+
return nextDouble() * n;
|
180
|
+
}
|
181
|
+
|
182
|
+
/**
|
183
|
+
* Returns a pseudorandom, uniformly distributed value between the
|
184
|
+
* given least value (inclusive) and bound (exclusive).
|
185
|
+
*
|
186
|
+
* @param least the least value returned
|
187
|
+
* @param bound the upper bound (exclusive)
|
188
|
+
* @return the next value
|
189
|
+
* @throws IllegalArgumentException if least greater than or equal
|
190
|
+
* to bound
|
191
|
+
*/
|
192
|
+
public double nextDouble(double least, double bound) {
|
193
|
+
if (least >= bound)
|
194
|
+
throw new IllegalArgumentException();
|
195
|
+
return nextDouble() * (bound - least) + least;
|
196
|
+
}
|
197
|
+
|
198
|
+
private static final long serialVersionUID = -5851777807851030925L;
|
199
|
+
}
|
@@ -0,0 +1 @@
|
|
1
|
+
require_relative "./concurrent"
|
data/lib/concurrent.rb
ADDED
@@ -0,0 +1,134 @@
|
|
1
|
+
require 'concurrent/version'
|
2
|
+
require 'concurrent/constants'
|
3
|
+
require 'concurrent/errors'
|
4
|
+
require 'concurrent/configuration'
|
5
|
+
|
6
|
+
require 'concurrent/atomics'
|
7
|
+
require 'concurrent/executors'
|
8
|
+
require 'concurrent/synchronization'
|
9
|
+
|
10
|
+
require 'concurrent/atomic/atomic_markable_reference'
|
11
|
+
require 'concurrent/atomic/atomic_reference'
|
12
|
+
require 'concurrent/agent'
|
13
|
+
require 'concurrent/atom'
|
14
|
+
require 'concurrent/array'
|
15
|
+
require 'concurrent/hash'
|
16
|
+
require 'concurrent/set'
|
17
|
+
require 'concurrent/map'
|
18
|
+
require 'concurrent/tuple'
|
19
|
+
require 'concurrent/async'
|
20
|
+
require 'concurrent/dataflow'
|
21
|
+
require 'concurrent/delay'
|
22
|
+
require 'concurrent/exchanger'
|
23
|
+
require 'concurrent/future'
|
24
|
+
require 'concurrent/immutable_struct'
|
25
|
+
require 'concurrent/ivar'
|
26
|
+
require 'concurrent/maybe'
|
27
|
+
require 'concurrent/mutable_struct'
|
28
|
+
require 'concurrent/mvar'
|
29
|
+
require 'concurrent/promise'
|
30
|
+
require 'concurrent/scheduled_task'
|
31
|
+
require 'concurrent/settable_struct'
|
32
|
+
require 'concurrent/timer_task'
|
33
|
+
require 'concurrent/tvar'
|
34
|
+
require 'concurrent/promises'
|
35
|
+
|
36
|
+
require 'concurrent/thread_safe/synchronized_delegator'
|
37
|
+
require 'concurrent/thread_safe/util'
|
38
|
+
|
39
|
+
require 'concurrent/options'
|
40
|
+
|
41
|
+
# @!macro internal_implementation_note
|
42
|
+
#
|
43
|
+
# @note **Private Implementation:** This abstraction is a private, internal
|
44
|
+
# implementation detail. It should never be used directly.
|
45
|
+
|
46
|
+
# @!macro monotonic_clock_warning
|
47
|
+
#
|
48
|
+
# @note Time calculations on all platforms and languages are sensitive to
|
49
|
+
# changes to the system clock. To alleviate the potential problems
|
50
|
+
# associated with changing the system clock while an application is running,
|
51
|
+
# most modern operating systems provide a monotonic clock that operates
|
52
|
+
# independently of the system clock. A monotonic clock cannot be used to
|
53
|
+
# determine human-friendly clock times. A monotonic clock is used exclusively
|
54
|
+
# for calculating time intervals. Not all Ruby platforms provide access to an
|
55
|
+
# operating system monotonic clock. On these platforms a pure-Ruby monotonic
|
56
|
+
# clock will be used as a fallback. An operating system monotonic clock is both
|
57
|
+
# faster and more reliable than the pure-Ruby implementation. The pure-Ruby
|
58
|
+
# implementation should be fast and reliable enough for most non-realtime
|
59
|
+
# operations. At this time the common Ruby platforms that provide access to an
|
60
|
+
# operating system monotonic clock are MRI 2.1 and above and JRuby (all versions).
|
61
|
+
#
|
62
|
+
# @see http://linux.die.net/man/3/clock_gettime Linux clock_gettime(3)
|
63
|
+
|
64
|
+
# @!macro copy_options
|
65
|
+
#
|
66
|
+
# ## Copy Options
|
67
|
+
#
|
68
|
+
# Object references in Ruby are mutable. This can lead to serious
|
69
|
+
# problems when the {#value} of an object is a mutable reference. Which
|
70
|
+
# is always the case unless the value is a `Fixnum`, `Symbol`, or similar
|
71
|
+
# "primitive" data type. Each instance can be configured with a few
|
72
|
+
# options that can help protect the program from potentially dangerous
|
73
|
+
# operations. Each of these options can be optionally set when the object
|
74
|
+
# instance is created:
|
75
|
+
#
|
76
|
+
# * `:dup_on_deref` When true the object will call the `#dup` method on
|
77
|
+
# the `value` object every time the `#value` method is called
|
78
|
+
# (default: false)
|
79
|
+
# * `:freeze_on_deref` When true the object will call the `#freeze`
|
80
|
+
# method on the `value` object every time the `#value` method is called
|
81
|
+
# (default: false)
|
82
|
+
# * `:copy_on_deref` When given a `Proc` object the `Proc` will be run
|
83
|
+
# every time the `#value` method is called. The `Proc` will be given
|
84
|
+
# the current `value` as its only argument and the result returned by
|
85
|
+
# the block will be the return value of the `#value` call. When `nil`
|
86
|
+
# this option will be ignored (default: nil)
|
87
|
+
#
|
88
|
+
# When multiple deref options are set the order of operations is strictly defined.
|
89
|
+
# The order of deref operations is:
|
90
|
+
# * `:copy_on_deref`
|
91
|
+
# * `:dup_on_deref`
|
92
|
+
# * `:freeze_on_deref`
|
93
|
+
#
|
94
|
+
# Because of this ordering there is no need to `#freeze` an object created by a
|
95
|
+
# provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`.
|
96
|
+
# Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is
|
97
|
+
# as close to the behavior of a "pure" functional language (like Erlang, Clojure,
|
98
|
+
# or Haskell) as we are likely to get in Ruby.
|
99
|
+
|
100
|
+
# @!macro deref_options
|
101
|
+
#
|
102
|
+
# @option opts [Boolean] :dup_on_deref (false) Call `#dup` before
|
103
|
+
# returning the data from {#value}
|
104
|
+
# @option opts [Boolean] :freeze_on_deref (false) Call `#freeze` before
|
105
|
+
# returning the data from {#value}
|
106
|
+
# @option opts [Proc] :copy_on_deref (nil) When calling the {#value}
|
107
|
+
# method, call the given proc passing the internal value as the sole
|
108
|
+
# argument then return the new value returned from the proc.
|
109
|
+
|
110
|
+
# @!macro executor_and_deref_options
|
111
|
+
#
|
112
|
+
# @param [Hash] opts the options used to define the behavior at update and deref
|
113
|
+
# and to specify the executor on which to perform actions
|
114
|
+
# @option opts [Executor] :executor when set use the given `Executor` instance.
|
115
|
+
# Three special values are also supported: `:io` returns the global pool for
|
116
|
+
# long, blocking (IO) tasks, `:fast` returns the global pool for short, fast
|
117
|
+
# operations, and `:immediate` returns the global `ImmediateExecutor` object.
|
118
|
+
# @!macro deref_options
|
119
|
+
|
120
|
+
# @!macro warn.edge
|
121
|
+
# @api Edge
|
122
|
+
# @note **Edge Features** are under active development and may change frequently.
|
123
|
+
#
|
124
|
+
# - Deprecations are not added before incompatible changes.
|
125
|
+
# - Edge version: _major_ is always 0, _minor_ bump means incompatible change,
|
126
|
+
# _patch_ bump means compatible change.
|
127
|
+
# - Edge features may also lack tests and documentation.
|
128
|
+
# - Features developed in `concurrent-ruby-edge` are expected to move
|
129
|
+
# to `concurrent-ruby` when finalised.
|
130
|
+
|
131
|
+
|
132
|
+
# {include:file:README.md}
|
133
|
+
module Concurrent
|
134
|
+
end
|
@@ -0,0 +1,587 @@
|
|
1
|
+
require 'concurrent/configuration'
|
2
|
+
require 'concurrent/atomic/atomic_reference'
|
3
|
+
require 'concurrent/atomic/thread_local_var'
|
4
|
+
require 'concurrent/collection/copy_on_write_observer_set'
|
5
|
+
require 'concurrent/concern/observable'
|
6
|
+
require 'concurrent/synchronization'
|
7
|
+
|
8
|
+
module Concurrent
|
9
|
+
|
10
|
+
# `Agent` is inspired by Clojure's [agent](http://clojure.org/agents)
|
11
|
+
# function. An agent is a shared, mutable variable providing independent,
|
12
|
+
# uncoordinated, *asynchronous* change of individual values. Best used when
|
13
|
+
# the value will undergo frequent, complex updates. Suitable when the result
|
14
|
+
# of an update does not need to be known immediately. `Agent` is (mostly)
|
15
|
+
# functionally equivalent to Clojure's agent, except where the runtime
|
16
|
+
# prevents parity.
|
17
|
+
#
|
18
|
+
# Agents are reactive, not autonomous - there is no imperative message loop
|
19
|
+
# and no blocking receive. The state of an Agent should be itself immutable
|
20
|
+
# and the `#value` of an Agent is always immediately available for reading by
|
21
|
+
# any thread without any messages, i.e. observation does not require
|
22
|
+
# cooperation or coordination.
|
23
|
+
#
|
24
|
+
# Agent action dispatches are made using the various `#send` methods. These
|
25
|
+
# methods always return immediately. At some point later, in another thread,
|
26
|
+
# the following will happen:
|
27
|
+
#
|
28
|
+
# 1. The given `action` will be applied to the state of the Agent and the
|
29
|
+
# `args`, if any were supplied.
|
30
|
+
# 2. The return value of `action` will be passed to the validator lambda,
|
31
|
+
# if one has been set on the Agent.
|
32
|
+
# 3. If the validator succeeds or if no validator was given, the return value
|
33
|
+
# of the given `action` will become the new `#value` of the Agent. See
|
34
|
+
# `#initialize` for details.
|
35
|
+
# 4. If any observers were added to the Agent, they will be notified. See
|
36
|
+
# `#add_observer` for details.
|
37
|
+
# 5. If during the `action` execution any other dispatches are made (directly
|
38
|
+
# or indirectly), they will be held until after the `#value` of the Agent
|
39
|
+
# has been changed.
|
40
|
+
#
|
41
|
+
# If any exceptions are thrown by an action function, no nested dispatches
|
42
|
+
# will occur, and the exception will be cached in the Agent itself. When an
|
43
|
+
# Agent has errors cached, any subsequent interactions will immediately throw
|
44
|
+
# an exception, until the agent's errors are cleared. Agent errors can be
|
45
|
+
# examined with `#error` and the agent restarted with `#restart`.
|
46
|
+
#
|
47
|
+
# The actions of all Agents get interleaved amongst threads in a thread pool.
|
48
|
+
# At any point in time, at most one action for each Agent is being executed.
|
49
|
+
# Actions dispatched to an agent from another single agent or thread will
|
50
|
+
# occur in the order they were sent, potentially interleaved with actions
|
51
|
+
# dispatched to the same agent from other sources. The `#send` method should
|
52
|
+
# be used for actions that are CPU limited, while the `#send_off` method is
|
53
|
+
# appropriate for actions that may block on IO.
|
54
|
+
#
|
55
|
+
# Unlike in Clojure, `Agent` cannot participate in `Concurrent::TVar` transactions.
|
56
|
+
#
|
57
|
+
# ## Example
|
58
|
+
#
|
59
|
+
# ```
|
60
|
+
# def next_fibonacci(set = nil)
|
61
|
+
# return [0, 1] if set.nil?
|
62
|
+
# set + [set[-2..-1].reduce{|sum,x| sum + x }]
|
63
|
+
# end
|
64
|
+
#
|
65
|
+
# # create an agent with an initial value
|
66
|
+
# agent = Concurrent::Agent.new(next_fibonacci)
|
67
|
+
#
|
68
|
+
# # send a few update requests
|
69
|
+
# 5.times do
|
70
|
+
# agent.send{|set| next_fibonacci(set) }
|
71
|
+
# end
|
72
|
+
#
|
73
|
+
# # wait for them to complete
|
74
|
+
# agent.await
|
75
|
+
#
|
76
|
+
# # get the current value
|
77
|
+
# agent.value #=> [0, 1, 1, 2, 3, 5, 8]
|
78
|
+
# ```
|
79
|
+
#
|
80
|
+
# ## Observation
|
81
|
+
#
|
82
|
+
# Agents support observers through the {Concurrent::Observable} mixin module.
|
83
|
+
# Notification of observers occurs every time an action dispatch returns and
|
84
|
+
# the new value is successfully validated. Observation will *not* occur if the
|
85
|
+
# action raises an exception, if validation fails, or when a {#restart} occurs.
|
86
|
+
#
|
87
|
+
# When notified the observer will receive three arguments: `time`, `old_value`,
|
88
|
+
# and `new_value`. The `time` argument is the time at which the value change
|
89
|
+
# occurred. The `old_value` is the value of the Agent when the action began
|
90
|
+
# processing. The `new_value` is the value to which the Agent was set when the
|
91
|
+
# action completed. Note that `old_value` and `new_value` may be the same.
|
92
|
+
# This is not an error. It simply means that the action returned the same
|
93
|
+
# value.
|
94
|
+
#
|
95
|
+
# ## Nested Actions
|
96
|
+
#
|
97
|
+
# It is possible for an Agent action to post further actions back to itself.
|
98
|
+
# The nested actions will be enqueued normally then processed *after* the
|
99
|
+
# outer action completes, in the order they were sent, possibly interleaved
|
100
|
+
# with action dispatches from other threads. Nested actions never deadlock
|
101
|
+
# with one another and a failure in a nested action will never affect the
|
102
|
+
# outer action.
|
103
|
+
#
|
104
|
+
# Nested actions can be called using the Agent reference from the enclosing
|
105
|
+
# scope or by passing the reference in as a "send" argument. Nested actions
|
106
|
+
# cannot be post using `self` from within the action block/proc/lambda; `self`
|
107
|
+
# in this context will not reference the Agent. The preferred method for
|
108
|
+
# dispatching nested actions is to pass the Agent as an argument. This allows
|
109
|
+
# Ruby to more effectively manage the closing scope.
|
110
|
+
#
|
111
|
+
# Prefer this:
|
112
|
+
#
|
113
|
+
# ```
|
114
|
+
# agent = Concurrent::Agent.new(0)
|
115
|
+
# agent.send(agent) do |value, this|
|
116
|
+
# this.send {|v| v + 42 }
|
117
|
+
# 3.14
|
118
|
+
# end
|
119
|
+
# agent.value #=> 45.14
|
120
|
+
# ```
|
121
|
+
#
|
122
|
+
# Over this:
|
123
|
+
#
|
124
|
+
# ```
|
125
|
+
# agent = Concurrent::Agent.new(0)
|
126
|
+
# agent.send do |value|
|
127
|
+
# agent.send {|v| v + 42 }
|
128
|
+
# 3.14
|
129
|
+
# end
|
130
|
+
# ```
|
131
|
+
#
|
132
|
+
# @!macro agent_await_warning
|
133
|
+
#
|
134
|
+
# **NOTE** Never, *under any circumstances*, call any of the "await" methods
|
135
|
+
# ({#await}, {#await_for}, {#await_for!}, and {#wait}) from within an action
|
136
|
+
# block/proc/lambda. The call will block the Agent and will always fail.
|
137
|
+
# Calling either {#await} or {#wait} (with a timeout of `nil`) will
|
138
|
+
# hopelessly deadlock the Agent with no possibility of recovery.
|
139
|
+
#
|
140
|
+
# @!macro thread_safe_variable_comparison
|
141
|
+
#
|
142
|
+
# @see http://clojure.org/Agents Clojure Agents
|
143
|
+
# @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State
|
144
|
+
class Agent < Synchronization::LockableObject
|
145
|
+
include Concern::Observable
|
146
|
+
|
147
|
+
ERROR_MODES = [:continue, :fail].freeze
|
148
|
+
private_constant :ERROR_MODES
|
149
|
+
|
150
|
+
AWAIT_FLAG = ::Object.new
|
151
|
+
private_constant :AWAIT_FLAG
|
152
|
+
|
153
|
+
AWAIT_ACTION = ->(value, latch) { latch.count_down; AWAIT_FLAG }
|
154
|
+
private_constant :AWAIT_ACTION
|
155
|
+
|
156
|
+
DEFAULT_ERROR_HANDLER = ->(agent, error) { nil }
|
157
|
+
private_constant :DEFAULT_ERROR_HANDLER
|
158
|
+
|
159
|
+
DEFAULT_VALIDATOR = ->(value) { true }
|
160
|
+
private_constant :DEFAULT_VALIDATOR
|
161
|
+
|
162
|
+
Job = Struct.new(:action, :args, :executor, :caller)
|
163
|
+
private_constant :Job
|
164
|
+
|
165
|
+
# Raised during action processing or any other time in an Agent's lifecycle.
|
166
|
+
class Error < StandardError
|
167
|
+
def initialize(message = nil)
|
168
|
+
message ||= 'agent must be restarted before jobs can post'
|
169
|
+
super(message)
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
# Raised when a new value obtained during action processing or at `#restart`
|
174
|
+
# fails validation.
|
175
|
+
class ValidationError < Error
|
176
|
+
def initialize(message = nil)
|
177
|
+
message ||= 'invalid value'
|
178
|
+
super(message)
|
179
|
+
end
|
180
|
+
end
|
181
|
+
|
182
|
+
# The error mode this Agent is operating in. See {#initialize} for details.
|
183
|
+
attr_reader :error_mode
|
184
|
+
|
185
|
+
# Create a new `Agent` with the given initial value and options.
|
186
|
+
#
|
187
|
+
# The `:validator` option must be `nil` or a side-effect free proc/lambda
|
188
|
+
# which takes one argument. On any intended value change the validator, if
|
189
|
+
# provided, will be called. If the new value is invalid the validator should
|
190
|
+
# return `false` or raise an error.
|
191
|
+
#
|
192
|
+
# The `:error_handler` option must be `nil` or a proc/lambda which takes two
|
193
|
+
# arguments. When an action raises an error or validation fails, either by
|
194
|
+
# returning false or raising an error, the error handler will be called. The
|
195
|
+
# arguments to the error handler will be a reference to the agent itself and
|
196
|
+
# the error object which was raised.
|
197
|
+
#
|
198
|
+
# The `:error_mode` may be either `:continue` (the default if an error
|
199
|
+
# handler is given) or `:fail` (the default if error handler nil or not
|
200
|
+
# given).
|
201
|
+
#
|
202
|
+
# If an action being run by the agent throws an error or doesn't pass
|
203
|
+
# validation the error handler, if present, will be called. After the
|
204
|
+
# handler executes if the error mode is `:continue` the Agent will continue
|
205
|
+
# as if neither the action that caused the error nor the error itself ever
|
206
|
+
# happened.
|
207
|
+
#
|
208
|
+
# If the mode is `:fail` the Agent will become {#failed?} and will stop
|
209
|
+
# accepting new action dispatches. Any previously queued actions will be
|
210
|
+
# held until {#restart} is called. The {#value} method will still work,
|
211
|
+
# returning the value of the Agent before the error.
|
212
|
+
#
|
213
|
+
# @param [Object] initial the initial value
|
214
|
+
# @param [Hash] opts the configuration options
|
215
|
+
#
|
216
|
+
# @option opts [Symbol] :error_mode either `:continue` or `:fail`
|
217
|
+
# @option opts [nil, Proc] :error_handler the (optional) error handler
|
218
|
+
# @option opts [nil, Proc] :validator the (optional) validation procedure
|
219
|
+
def initialize(initial, opts = {})
|
220
|
+
super()
|
221
|
+
synchronize { ns_initialize(initial, opts) }
|
222
|
+
end
|
223
|
+
|
224
|
+
# The current value (state) of the Agent, irrespective of any pending or
|
225
|
+
# in-progress actions. The value is always available and is non-blocking.
|
226
|
+
#
|
227
|
+
# @return [Object] the current value
|
228
|
+
def value
|
229
|
+
@current.value # TODO (pitr 12-Sep-2015): broken unsafe read?
|
230
|
+
end
|
231
|
+
|
232
|
+
alias_method :deref, :value
|
233
|
+
|
234
|
+
# When {#failed?} and {#error_mode} is `:fail`, returns the error object
|
235
|
+
# which caused the failure, else `nil`. When {#error_mode} is `:continue`
|
236
|
+
# will *always* return `nil`.
|
237
|
+
#
|
238
|
+
# @return [nil, Error] the error which caused the failure when {#failed?}
|
239
|
+
def error
|
240
|
+
@error.value
|
241
|
+
end
|
242
|
+
|
243
|
+
alias_method :reason, :error
|
244
|
+
|
245
|
+
# @!macro agent_send
|
246
|
+
#
|
247
|
+
# Dispatches an action to the Agent and returns immediately. Subsequently,
|
248
|
+
# in a thread from a thread pool, the {#value} will be set to the return
|
249
|
+
# value of the action. Action dispatches are only allowed when the Agent
|
250
|
+
# is not {#failed?}.
|
251
|
+
#
|
252
|
+
# The action must be a block/proc/lambda which takes 1 or more arguments.
|
253
|
+
# The first argument is the current {#value} of the Agent. Any arguments
|
254
|
+
# passed to the send method via the `args` parameter will be passed to the
|
255
|
+
# action as the remaining arguments. The action must return the new value
|
256
|
+
# of the Agent.
|
257
|
+
#
|
258
|
+
# * {#send} and {#send!} should be used for actions that are CPU limited
|
259
|
+
# * {#send_off}, {#send_off!}, and {#<<} are appropriate for actions that
|
260
|
+
# may block on IO
|
261
|
+
# * {#send_via} and {#send_via!} are used when a specific executor is to
|
262
|
+
# be used for the action
|
263
|
+
#
|
264
|
+
# @param [Array<Object>] args zero or more arguments to be passed to
|
265
|
+
# the action
|
266
|
+
# @param [Proc] action the action dispatch to be enqueued
|
267
|
+
#
|
268
|
+
# @yield [agent, value, *args] process the old value and return the new
|
269
|
+
# @yieldparam [Object] value the current {#value} of the Agent
|
270
|
+
# @yieldparam [Array<Object>] args zero or more arguments to pass to the
|
271
|
+
# action
|
272
|
+
# @yieldreturn [Object] the new value of the Agent
|
273
|
+
#
|
274
|
+
# @!macro send_return
|
275
|
+
# @return [Boolean] true if the action is successfully enqueued, false if
|
276
|
+
# the Agent is {#failed?}
|
277
|
+
def send(*args, &action)
|
278
|
+
enqueue_action_job(action, args, Concurrent.global_fast_executor)
|
279
|
+
end
|
280
|
+
|
281
|
+
# @!macro agent_send
|
282
|
+
#
|
283
|
+
# @!macro send_bang_return_and_raise
|
284
|
+
# @return [Boolean] true if the action is successfully enqueued
|
285
|
+
# @raise [Concurrent::Agent::Error] if the Agent is {#failed?}
|
286
|
+
def send!(*args, &action)
|
287
|
+
raise Error.new unless send(*args, &action)
|
288
|
+
true
|
289
|
+
end
|
290
|
+
|
291
|
+
# @!macro agent_send
|
292
|
+
# @!macro send_return
|
293
|
+
def send_off(*args, &action)
|
294
|
+
enqueue_action_job(action, args, Concurrent.global_io_executor)
|
295
|
+
end
|
296
|
+
|
297
|
+
alias_method :post, :send_off
|
298
|
+
|
299
|
+
# @!macro agent_send
|
300
|
+
# @!macro send_bang_return_and_raise
|
301
|
+
def send_off!(*args, &action)
|
302
|
+
raise Error.new unless send_off(*args, &action)
|
303
|
+
true
|
304
|
+
end
|
305
|
+
|
306
|
+
# @!macro agent_send
|
307
|
+
# @!macro send_return
|
308
|
+
# @param [Concurrent::ExecutorService] executor the executor on which the
|
309
|
+
# action is to be dispatched
|
310
|
+
def send_via(executor, *args, &action)
|
311
|
+
enqueue_action_job(action, args, executor)
|
312
|
+
end
|
313
|
+
|
314
|
+
# @!macro agent_send
|
315
|
+
# @!macro send_bang_return_and_raise
|
316
|
+
# @param [Concurrent::ExecutorService] executor the executor on which the
|
317
|
+
# action is to be dispatched
|
318
|
+
def send_via!(executor, *args, &action)
|
319
|
+
raise Error.new unless send_via(executor, *args, &action)
|
320
|
+
true
|
321
|
+
end
|
322
|
+
|
323
|
+
# Dispatches an action to the Agent and returns immediately. Subsequently,
|
324
|
+
# in a thread from a thread pool, the {#value} will be set to the return
|
325
|
+
# value of the action. Appropriate for actions that may block on IO.
|
326
|
+
#
|
327
|
+
# @param [Proc] action the action dispatch to be enqueued
|
328
|
+
# @return [Concurrent::Agent] self
|
329
|
+
# @see #send_off
|
330
|
+
def <<(action)
|
331
|
+
send_off(&action)
|
332
|
+
self
|
333
|
+
end
|
334
|
+
|
335
|
+
# Blocks the current thread (indefinitely!) until all actions dispatched
|
336
|
+
# thus far, from this thread or nested by the Agent, have occurred. Will
|
337
|
+
# block when {#failed?}. Will never return if a failed Agent is {#restart}
|
338
|
+
# with `:clear_actions` true.
|
339
|
+
#
|
340
|
+
# Returns a reference to `self` to support method chaining:
|
341
|
+
#
|
342
|
+
# ```
|
343
|
+
# current_value = agent.await.value
|
344
|
+
# ```
|
345
|
+
#
|
346
|
+
# @return [Boolean] self
|
347
|
+
#
|
348
|
+
# @!macro agent_await_warning
|
349
|
+
def await
|
350
|
+
wait(nil)
|
351
|
+
self
|
352
|
+
end
|
353
|
+
|
354
|
+
# Blocks the current thread until all actions dispatched thus far, from this
|
355
|
+
# thread or nested by the Agent, have occurred, or the timeout (in seconds)
|
356
|
+
# has elapsed.
|
357
|
+
#
|
358
|
+
# @param [Float] timeout the maximum number of seconds to wait
|
359
|
+
# @return [Boolean] true if all actions complete before timeout else false
|
360
|
+
#
|
361
|
+
# @!macro agent_await_warning
|
362
|
+
def await_for(timeout)
|
363
|
+
wait(timeout.to_f)
|
364
|
+
end
|
365
|
+
|
366
|
+
# Blocks the current thread until all actions dispatched thus far, from this
|
367
|
+
# thread or nested by the Agent, have occurred, or the timeout (in seconds)
|
368
|
+
# has elapsed.
|
369
|
+
#
|
370
|
+
# @param [Float] timeout the maximum number of seconds to wait
|
371
|
+
# @return [Boolean] true if all actions complete before timeout
|
372
|
+
#
|
373
|
+
# @raise [Concurrent::TimeoutError] when timout is reached
|
374
|
+
#
|
375
|
+
# @!macro agent_await_warning
|
376
|
+
def await_for!(timeout)
|
377
|
+
raise Concurrent::TimeoutError unless wait(timeout.to_f)
|
378
|
+
true
|
379
|
+
end
|
380
|
+
|
381
|
+
# Blocks the current thread until all actions dispatched thus far, from this
|
382
|
+
# thread or nested by the Agent, have occurred, or the timeout (in seconds)
|
383
|
+
# has elapsed. Will block indefinitely when timeout is nil or not given.
|
384
|
+
#
|
385
|
+
# Provided mainly for consistency with other classes in this library. Prefer
|
386
|
+
# the various `await` methods instead.
|
387
|
+
#
|
388
|
+
# @param [Float] timeout the maximum number of seconds to wait
|
389
|
+
# @return [Boolean] true if all actions complete before timeout else false
|
390
|
+
#
|
391
|
+
# @!macro agent_await_warning
|
392
|
+
def wait(timeout = nil)
|
393
|
+
latch = Concurrent::CountDownLatch.new(1)
|
394
|
+
enqueue_await_job(latch)
|
395
|
+
latch.wait(timeout)
|
396
|
+
end
|
397
|
+
|
398
|
+
# Is the Agent in a failed state?
|
399
|
+
#
|
400
|
+
# @see #restart
|
401
|
+
def failed?
|
402
|
+
!@error.value.nil?
|
403
|
+
end
|
404
|
+
|
405
|
+
alias_method :stopped?, :failed?
|
406
|
+
|
407
|
+
# When an Agent is {#failed?}, changes the Agent {#value} to `new_value`
|
408
|
+
# then un-fails the Agent so that action dispatches are allowed again. If
|
409
|
+
# the `:clear_actions` option is give and true, any actions queued on the
|
410
|
+
# Agent that were being held while it was failed will be discarded,
|
411
|
+
# otherwise those held actions will proceed. The `new_value` must pass the
|
412
|
+
# validator if any, or `restart` will raise an exception and the Agent will
|
413
|
+
# remain failed with its old {#value} and {#error}. Observers, if any, will
|
414
|
+
# not be notified of the new state.
|
415
|
+
#
|
416
|
+
# @param [Object] new_value the new value for the Agent once restarted
|
417
|
+
# @param [Hash] opts the configuration options
|
418
|
+
# @option opts [Symbol] :clear_actions true if all enqueued but unprocessed
|
419
|
+
# actions should be discarded on restart, else false (default: false)
|
420
|
+
# @return [Boolean] true
|
421
|
+
#
|
422
|
+
# @raise [Concurrent:AgentError] when not failed
|
423
|
+
def restart(new_value, opts = {})
|
424
|
+
clear_actions = opts.fetch(:clear_actions, false)
|
425
|
+
synchronize do
|
426
|
+
raise Error.new('agent is not failed') unless failed?
|
427
|
+
raise ValidationError unless ns_validate(new_value)
|
428
|
+
@current.value = new_value
|
429
|
+
@error.value = nil
|
430
|
+
@queue.clear if clear_actions
|
431
|
+
ns_post_next_job unless @queue.empty?
|
432
|
+
end
|
433
|
+
true
|
434
|
+
end
|
435
|
+
|
436
|
+
class << self
|
437
|
+
|
438
|
+
# Blocks the current thread (indefinitely!) until all actions dispatched
|
439
|
+
# thus far to all the given Agents, from this thread or nested by the
|
440
|
+
# given Agents, have occurred. Will block when any of the agents are
|
441
|
+
# failed. Will never return if a failed Agent is restart with
|
442
|
+
# `:clear_actions` true.
|
443
|
+
#
|
444
|
+
# @param [Array<Concurrent::Agent>] agents the Agents on which to wait
|
445
|
+
# @return [Boolean] true
|
446
|
+
#
|
447
|
+
# @!macro agent_await_warning
|
448
|
+
def await(*agents)
|
449
|
+
agents.each { |agent| agent.await }
|
450
|
+
true
|
451
|
+
end
|
452
|
+
|
453
|
+
# Blocks the current thread until all actions dispatched thus far to all
|
454
|
+
# the given Agents, from this thread or nested by the given Agents, have
|
455
|
+
# occurred, or the timeout (in seconds) has elapsed.
|
456
|
+
#
|
457
|
+
# @param [Float] timeout the maximum number of seconds to wait
|
458
|
+
# @param [Array<Concurrent::Agent>] agents the Agents on which to wait
|
459
|
+
# @return [Boolean] true if all actions complete before timeout else false
|
460
|
+
#
|
461
|
+
# @!macro agent_await_warning
|
462
|
+
def await_for(timeout, *agents)
|
463
|
+
end_at = Concurrent.monotonic_time + timeout.to_f
|
464
|
+
ok = agents.length.times do |i|
|
465
|
+
break false if (delay = end_at - Concurrent.monotonic_time) < 0
|
466
|
+
break false unless agents[i].await_for(delay)
|
467
|
+
end
|
468
|
+
!!ok
|
469
|
+
end
|
470
|
+
|
471
|
+
# Blocks the current thread until all actions dispatched thus far to all
|
472
|
+
# the given Agents, from this thread or nested by the given Agents, have
|
473
|
+
# occurred, or the timeout (in seconds) has elapsed.
|
474
|
+
#
|
475
|
+
# @param [Float] timeout the maximum number of seconds to wait
|
476
|
+
# @param [Array<Concurrent::Agent>] agents the Agents on which to wait
|
477
|
+
# @return [Boolean] true if all actions complete before timeout
|
478
|
+
#
|
479
|
+
# @raise [Concurrent::TimeoutError] when timout is reached
|
480
|
+
# @!macro agent_await_warning
|
481
|
+
def await_for!(timeout, *agents)
|
482
|
+
raise Concurrent::TimeoutError unless await_for(timeout, *agents)
|
483
|
+
true
|
484
|
+
end
|
485
|
+
end
|
486
|
+
|
487
|
+
private
|
488
|
+
|
489
|
+
def ns_initialize(initial, opts)
|
490
|
+
@error_mode = opts[:error_mode]
|
491
|
+
@error_handler = opts[:error_handler]
|
492
|
+
|
493
|
+
if @error_mode && !ERROR_MODES.include?(@error_mode)
|
494
|
+
raise ArgumentError.new('unrecognized error mode')
|
495
|
+
elsif @error_mode.nil?
|
496
|
+
@error_mode = @error_handler ? :continue : :fail
|
497
|
+
end
|
498
|
+
|
499
|
+
@error_handler ||= DEFAULT_ERROR_HANDLER
|
500
|
+
@validator = opts.fetch(:validator, DEFAULT_VALIDATOR)
|
501
|
+
@current = Concurrent::AtomicReference.new(initial)
|
502
|
+
@error = Concurrent::AtomicReference.new(nil)
|
503
|
+
@caller = Concurrent::ThreadLocalVar.new(nil)
|
504
|
+
@queue = []
|
505
|
+
|
506
|
+
self.observers = Collection::CopyOnNotifyObserverSet.new
|
507
|
+
end
|
508
|
+
|
509
|
+
def enqueue_action_job(action, args, executor)
|
510
|
+
raise ArgumentError.new('no action given') unless action
|
511
|
+
job = Job.new(action, args, executor, @caller.value || Thread.current.object_id)
|
512
|
+
synchronize { ns_enqueue_job(job) }
|
513
|
+
end
|
514
|
+
|
515
|
+
def enqueue_await_job(latch)
|
516
|
+
synchronize do
|
517
|
+
if (index = ns_find_last_job_for_thread)
|
518
|
+
job = Job.new(AWAIT_ACTION, [latch], Concurrent.global_immediate_executor,
|
519
|
+
Thread.current.object_id)
|
520
|
+
ns_enqueue_job(job, index+1)
|
521
|
+
else
|
522
|
+
latch.count_down
|
523
|
+
true
|
524
|
+
end
|
525
|
+
end
|
526
|
+
end
|
527
|
+
|
528
|
+
def ns_enqueue_job(job, index = nil)
|
529
|
+
# a non-nil index means this is an await job
|
530
|
+
return false if index.nil? && failed?
|
531
|
+
index ||= @queue.length
|
532
|
+
@queue.insert(index, job)
|
533
|
+
# if this is the only job, post to executor
|
534
|
+
ns_post_next_job if @queue.length == 1
|
535
|
+
true
|
536
|
+
end
|
537
|
+
|
538
|
+
def ns_post_next_job
|
539
|
+
@queue.first.executor.post { execute_next_job }
|
540
|
+
end
|
541
|
+
|
542
|
+
def execute_next_job
|
543
|
+
job = synchronize { @queue.first }
|
544
|
+
old_value = @current.value
|
545
|
+
|
546
|
+
@caller.value = job.caller # for nested actions
|
547
|
+
new_value = job.action.call(old_value, *job.args)
|
548
|
+
@caller.value = nil
|
549
|
+
|
550
|
+
return if new_value == AWAIT_FLAG
|
551
|
+
|
552
|
+
if ns_validate(new_value)
|
553
|
+
@current.value = new_value
|
554
|
+
observers.notify_observers(Time.now, old_value, new_value)
|
555
|
+
else
|
556
|
+
handle_error(ValidationError.new)
|
557
|
+
end
|
558
|
+
rescue => error
|
559
|
+
handle_error(error)
|
560
|
+
ensure
|
561
|
+
synchronize do
|
562
|
+
@queue.shift
|
563
|
+
unless failed? || @queue.empty?
|
564
|
+
ns_post_next_job
|
565
|
+
end
|
566
|
+
end
|
567
|
+
end
|
568
|
+
|
569
|
+
def ns_validate(value)
|
570
|
+
@validator.call(value)
|
571
|
+
rescue
|
572
|
+
false
|
573
|
+
end
|
574
|
+
|
575
|
+
def handle_error(error)
|
576
|
+
# stop new jobs from posting
|
577
|
+
@error.value = error if @error_mode == :fail
|
578
|
+
@error_handler.call(self, error)
|
579
|
+
rescue
|
580
|
+
# do nothing
|
581
|
+
end
|
582
|
+
|
583
|
+
def ns_find_last_job_for_thread
|
584
|
+
@queue.rindex { |job| job.caller == Thread.current.object_id }
|
585
|
+
end
|
586
|
+
end
|
587
|
+
end
|