concurrent-ruby 1.0.5 → 1.1.0.pre1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (107) hide show
  1. checksums.yaml +5 -5
  2. data/CHANGELOG.md +42 -0
  3. data/Gemfile +39 -0
  4. data/{LICENSE.txt → LICENSE.md} +2 -0
  5. data/README.md +203 -105
  6. data/Rakefile +278 -0
  7. data/ext/concurrent-ruby/ConcurrentRubyService.java +17 -0
  8. data/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java +175 -0
  9. data/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java +248 -0
  10. data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java +93 -0
  11. data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java +113 -0
  12. data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java +159 -0
  13. data/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java +304 -0
  14. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java +31 -0
  15. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java +3863 -0
  16. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java +203 -0
  17. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java +342 -0
  18. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java +3800 -0
  19. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java +204 -0
  20. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java +291 -0
  21. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java +199 -0
  22. data/lib/concurrent-ruby.rb +1 -0
  23. data/lib/concurrent.rb +24 -20
  24. data/lib/concurrent/agent.rb +7 -7
  25. data/lib/concurrent/array.rb +59 -32
  26. data/lib/concurrent/async.rb +4 -4
  27. data/lib/concurrent/atom.rb +9 -9
  28. data/lib/concurrent/atomic/atomic_boolean.rb +24 -20
  29. data/lib/concurrent/atomic/atomic_fixnum.rb +27 -23
  30. data/lib/concurrent/atomic/atomic_markable_reference.rb +164 -0
  31. data/lib/concurrent/atomic/atomic_reference.rb +176 -33
  32. data/lib/concurrent/atomic/count_down_latch.rb +6 -6
  33. data/lib/concurrent/atomic/cyclic_barrier.rb +1 -1
  34. data/lib/concurrent/atomic/event.rb +1 -1
  35. data/lib/concurrent/atomic/java_count_down_latch.rb +6 -5
  36. data/lib/concurrent/atomic/mutex_count_down_latch.rb +1 -0
  37. data/lib/concurrent/atomic/read_write_lock.rb +2 -1
  38. data/lib/concurrent/atomic/reentrant_read_write_lock.rb +3 -1
  39. data/lib/concurrent/atomic/semaphore.rb +8 -8
  40. data/lib/concurrent/atomic/thread_local_var.rb +7 -7
  41. data/lib/concurrent/atomic_reference/mutex_atomic.rb +3 -8
  42. data/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb +1 -1
  43. data/lib/concurrent/atomics.rb +0 -43
  44. data/lib/concurrent/collection/lock_free_stack.rb +127 -0
  45. data/lib/concurrent/collection/map/atomic_reference_map_backend.rb +3 -3
  46. data/lib/concurrent/collection/map/non_concurrent_map_backend.rb +1 -2
  47. data/lib/concurrent/collection/non_concurrent_priority_queue.rb +29 -29
  48. data/lib/concurrent/concern/dereferenceable.rb +1 -1
  49. data/lib/concurrent/concern/logging.rb +6 -1
  50. data/lib/concurrent/concern/observable.rb +7 -7
  51. data/lib/concurrent/concurrent_ruby.jar +0 -0
  52. data/lib/concurrent/configuration.rb +1 -6
  53. data/lib/concurrent/constants.rb +1 -1
  54. data/lib/concurrent/dataflow.rb +2 -1
  55. data/lib/concurrent/delay.rb +9 -7
  56. data/lib/concurrent/exchanger.rb +13 -21
  57. data/lib/concurrent/executor/abstract_executor_service.rb +2 -2
  58. data/lib/concurrent/executor/cached_thread_pool.rb +1 -1
  59. data/lib/concurrent/executor/executor_service.rb +15 -15
  60. data/lib/concurrent/executor/fixed_thread_pool.rb +18 -18
  61. data/lib/concurrent/executor/java_thread_pool_executor.rb +10 -7
  62. data/lib/concurrent/executor/single_thread_executor.rb +2 -2
  63. data/lib/concurrent/executor/thread_pool_executor.rb +6 -6
  64. data/lib/concurrent/executor/timer_set.rb +1 -1
  65. data/lib/concurrent/future.rb +4 -1
  66. data/lib/concurrent/hash.rb +53 -30
  67. data/lib/concurrent/ivar.rb +5 -6
  68. data/lib/concurrent/map.rb +20 -25
  69. data/lib/concurrent/maybe.rb +1 -1
  70. data/lib/concurrent/mutable_struct.rb +15 -14
  71. data/lib/concurrent/mvar.rb +2 -2
  72. data/lib/concurrent/promise.rb +53 -21
  73. data/lib/concurrent/promises.rb +1938 -0
  74. data/lib/concurrent/re_include.rb +58 -0
  75. data/lib/concurrent/set.rb +66 -0
  76. data/lib/concurrent/settable_struct.rb +1 -0
  77. data/lib/concurrent/synchronization.rb +4 -5
  78. data/lib/concurrent/synchronization/abstract_lockable_object.rb +5 -5
  79. data/lib/concurrent/synchronization/abstract_struct.rb +6 -4
  80. data/lib/concurrent/synchronization/lockable_object.rb +6 -6
  81. data/lib/concurrent/synchronization/{mri_lockable_object.rb → mutex_lockable_object.rb} +19 -14
  82. data/lib/concurrent/synchronization/object.rb +8 -4
  83. data/lib/concurrent/synchronization/truffleruby_object.rb +46 -0
  84. data/lib/concurrent/synchronization/volatile.rb +11 -9
  85. data/lib/concurrent/thread_safe/util/data_structures.rb +55 -0
  86. data/lib/concurrent/thread_safe/util/striped64.rb +9 -4
  87. data/lib/concurrent/timer_task.rb +5 -2
  88. data/lib/concurrent/tuple.rb +1 -1
  89. data/lib/concurrent/tvar.rb +2 -2
  90. data/lib/concurrent/utility/at_exit.rb +1 -1
  91. data/lib/concurrent/utility/engine.rb +2 -2
  92. data/lib/concurrent/utility/monotonic_time.rb +3 -3
  93. data/lib/concurrent/utility/native_extension_loader.rb +31 -33
  94. data/lib/concurrent/utility/processor_counter.rb +0 -2
  95. data/lib/concurrent/version.rb +2 -2
  96. metadata +35 -21
  97. data/lib/concurrent/atomic_reference/concurrent_update_error.rb +0 -8
  98. data/lib/concurrent/atomic_reference/direct_update.rb +0 -81
  99. data/lib/concurrent/atomic_reference/jruby+truffle.rb +0 -2
  100. data/lib/concurrent/atomic_reference/jruby.rb +0 -16
  101. data/lib/concurrent/atomic_reference/rbx.rb +0 -22
  102. data/lib/concurrent/atomic_reference/ruby.rb +0 -32
  103. data/lib/concurrent/edge.rb +0 -26
  104. data/lib/concurrent/lazy_register.rb +0 -81
  105. data/lib/concurrent/synchronization/truffle_lockable_object.rb +0 -9
  106. data/lib/concurrent/synchronization/truffle_object.rb +0 -31
  107. data/lib/concurrent/thread_safe/util/array_hash_rbx.rb +0 -30
@@ -0,0 +1,3800 @@
1
+ /*
2
+ * Written by Doug Lea with assistance from members of JCP JSR-166
3
+ * Expert Group and released to the public domain, as explained at
4
+ * http://creativecommons.org/publicdomain/zero/1.0/
5
+ */
6
+
7
+ // This is based on the 1.79 version.
8
+
9
+ package com.concurrent_ruby.ext.jsr166e.nounsafe;
10
+
11
+ import org.jruby.RubyClass;
12
+ import org.jruby.RubyNumeric;
13
+ import org.jruby.RubyObject;
14
+ import org.jruby.exceptions.RaiseException;
15
+ import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMap;
16
+ import com.concurrent_ruby.ext.jsr166y.ThreadLocalRandom;
17
+ import org.jruby.runtime.ThreadContext;
18
+ import org.jruby.runtime.builtin.IRubyObject;
19
+
20
+ import java.util.Arrays;
21
+ import java.util.Map;
22
+ import java.util.Set;
23
+ import java.util.Collection;
24
+ import java.util.Hashtable;
25
+ import java.util.HashMap;
26
+ import java.util.Iterator;
27
+ import java.util.Enumeration;
28
+ import java.util.ConcurrentModificationException;
29
+ import java.util.NoSuchElementException;
30
+ import java.util.concurrent.ConcurrentMap;
31
+ import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
32
+ import java.util.concurrent.atomic.AtomicReferenceArray;
33
+ import java.util.concurrent.locks.AbstractQueuedSynchronizer;
34
+
35
+ import java.io.Serializable;
36
+
37
+ /**
38
+ * A hash table supporting full concurrency of retrievals and
39
+ * high expected concurrency for updates. This class obeys the
40
+ * same functional specification as {@link java.util.Hashtable}, and
41
+ * includes versions of methods corresponding to each method of
42
+ * {@code Hashtable}. However, even though all operations are
43
+ * thread-safe, retrieval operations do <em>not</em> entail locking,
44
+ * and there is <em>not</em> any support for locking the entire table
45
+ * in a way that prevents all access. This class is fully
46
+ * interoperable with {@code Hashtable} in programs that rely on its
47
+ * thread safety but not on its synchronization details.
48
+ *
49
+ * <p>Retrieval operations (including {@code get}) generally do not
50
+ * block, so may overlap with update operations (including {@code put}
51
+ * and {@code remove}). Retrievals reflect the results of the most
52
+ * recently <em>completed</em> update operations holding upon their
53
+ * onset. (More formally, an update operation for a given key bears a
54
+ * <em>happens-before</em> relation with any (non-null) retrieval for
55
+ * that key reporting the updated value.) For aggregate operations
56
+ * such as {@code putAll} and {@code clear}, concurrent retrievals may
57
+ * reflect insertion or removal of only some entries. Similarly,
58
+ * Iterators and Enumerations return elements reflecting the state of
59
+ * the hash table at some point at or since the creation of the
60
+ * iterator/enumeration. They do <em>not</em> throw {@link
61
+ * ConcurrentModificationException}. However, iterators are designed
62
+ * to be used by only one thread at a time. Bear in mind that the
63
+ * results of aggregate status methods including {@code size}, {@code
64
+ * isEmpty}, and {@code containsValue} are typically useful only when
65
+ * a map is not undergoing concurrent updates in other threads.
66
+ * Otherwise the results of these methods reflect transient states
67
+ * that may be adequate for monitoring or estimation purposes, but not
68
+ * for program control.
69
+ *
70
+ * <p>The table is dynamically expanded when there are too many
71
+ * collisions (i.e., keys that have distinct hash codes but fall into
72
+ * the same slot modulo the table size), with the expected average
73
+ * effect of maintaining roughly two bins per mapping (corresponding
74
+ * to a 0.75 load factor threshold for resizing). There may be much
75
+ * variance around this average as mappings are added and removed, but
76
+ * overall, this maintains a commonly accepted time/space tradeoff for
77
+ * hash tables. However, resizing this or any other kind of hash
78
+ * table may be a relatively slow operation. When possible, it is a
79
+ * good idea to provide a size estimate as an optional {@code
80
+ * initialCapacity} constructor argument. An additional optional
81
+ * {@code loadFactor} constructor argument provides a further means of
82
+ * customizing initial table capacity by specifying the table density
83
+ * to be used in calculating the amount of space to allocate for the
84
+ * given number of elements. Also, for compatibility with previous
85
+ * versions of this class, constructors may optionally specify an
86
+ * expected {@code concurrencyLevel} as an additional hint for
87
+ * internal sizing. Note that using many keys with exactly the same
88
+ * {@code hashCode()} is a sure way to slow down performance of any
89
+ * hash table.
90
+ *
91
+ * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
92
+ * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
93
+ * (using {@link #keySet(Object)} when only keys are of interest, and the
94
+ * mapped values are (perhaps transiently) not used or all take the
95
+ * same mapping value.
96
+ *
97
+ * <p>A ConcurrentHashMapV8 can be used as scalable frequency map (a
98
+ * form of histogram or multiset) by using {@link LongAdder} values
99
+ * and initializing via {@link #computeIfAbsent}. For example, to add
100
+ * a count to a {@code ConcurrentHashMapV8<String,LongAdder> freqs}, you
101
+ * can use {@code freqs.computeIfAbsent(k -> new
102
+ * LongAdder()).increment();}
103
+ *
104
+ * <p>This class and its views and iterators implement all of the
105
+ * <em>optional</em> methods of the {@link Map} and {@link Iterator}
106
+ * interfaces.
107
+ *
108
+ * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
109
+ * does <em>not</em> allow {@code null} to be used as a key or value.
110
+ *
111
+ * <p>ConcurrentHashMapV8s support parallel operations using the {@link
112
+ * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts
113
+ * are available in class {@link ForkJoinTasks}). These operations are
114
+ * designed to be safely, and often sensibly, applied even with maps
115
+ * that are being concurrently updated by other threads; for example,
116
+ * when computing a snapshot summary of the values in a shared
117
+ * registry. There are three kinds of operation, each with four
118
+ * forms, accepting functions with Keys, Values, Entries, and (Key,
119
+ * Value) arguments and/or return values. (The first three forms are
120
+ * also available via the {@link #keySet()}, {@link #values()} and
121
+ * {@link #entrySet()} views). Because the elements of a
122
+ * ConcurrentHashMapV8 are not ordered in any particular way, and may be
123
+ * processed in different orders in different parallel executions, the
124
+ * correctness of supplied functions should not depend on any
125
+ * ordering, or on any other objects or values that may transiently
126
+ * change while computation is in progress; and except for forEach
127
+ * actions, should ideally be side-effect-free.
128
+ *
129
+ * <ul>
130
+ * <li> forEach: Perform a given action on each element.
131
+ * A variant form applies a given transformation on each element
132
+ * before performing the action.</li>
133
+ *
134
+ * <li> search: Return the first available non-null result of
135
+ * applying a given function on each element; skipping further
136
+ * search when a result is found.</li>
137
+ *
138
+ * <li> reduce: Accumulate each element. The supplied reduction
139
+ * function cannot rely on ordering (more formally, it should be
140
+ * both associative and commutative). There are five variants:
141
+ *
142
+ * <ul>
143
+ *
144
+ * <li> Plain reductions. (There is not a form of this method for
145
+ * (key, value) function arguments since there is no corresponding
146
+ * return type.)</li>
147
+ *
148
+ * <li> Mapped reductions that accumulate the results of a given
149
+ * function applied to each element.</li>
150
+ *
151
+ * <li> Reductions to scalar doubles, longs, and ints, using a
152
+ * given basis value.</li>
153
+ *
154
+ * </li>
155
+ * </ul>
156
+ * </ul>
157
+ *
158
+ * <p>The concurrency properties of bulk operations follow
159
+ * from those of ConcurrentHashMapV8: Any non-null result returned
160
+ * from {@code get(key)} and related access methods bears a
161
+ * happens-before relation with the associated insertion or
162
+ * update. The result of any bulk operation reflects the
163
+ * composition of these per-element relations (but is not
164
+ * necessarily atomic with respect to the map as a whole unless it
165
+ * is somehow known to be quiescent). Conversely, because keys
166
+ * and values in the map are never null, null serves as a reliable
167
+ * atomic indicator of the current lack of any result. To
168
+ * maintain this property, null serves as an implicit basis for
169
+ * all non-scalar reduction operations. For the double, long, and
170
+ * int versions, the basis should be one that, when combined with
171
+ * any other value, returns that other value (more formally, it
172
+ * should be the identity element for the reduction). Most common
173
+ * reductions have these properties; for example, computing a sum
174
+ * with basis 0 or a minimum with basis MAX_VALUE.
175
+ *
176
+ * <p>Search and transformation functions provided as arguments
177
+ * should similarly return null to indicate the lack of any result
178
+ * (in which case it is not used). In the case of mapped
179
+ * reductions, this also enables transformations to serve as
180
+ * filters, returning null (or, in the case of primitive
181
+ * specializations, the identity basis) if the element should not
182
+ * be combined. You can create compound transformations and
183
+ * filterings by composing them yourself under this "null means
184
+ * there is nothing there now" rule before using them in search or
185
+ * reduce operations.
186
+ *
187
+ * <p>Methods accepting and/or returning Entry arguments maintain
188
+ * key-value associations. They may be useful for example when
189
+ * finding the key for the greatest value. Note that "plain" Entry
190
+ * arguments can be supplied using {@code new
191
+ * AbstractMap.SimpleEntry(k,v)}.
192
+ *
193
+ * <p>Bulk operations may complete abruptly, throwing an
194
+ * exception encountered in the application of a supplied
195
+ * function. Bear in mind when handling such exceptions that other
196
+ * concurrently executing functions could also have thrown
197
+ * exceptions, or would have done so if the first exception had
198
+ * not occurred.
199
+ *
200
+ * <p>Parallel speedups for bulk operations compared to sequential
201
+ * processing are common but not guaranteed. Operations involving
202
+ * brief functions on small maps may execute more slowly than
203
+ * sequential loops if the underlying work to parallelize the
204
+ * computation is more expensive than the computation itself.
205
+ * Similarly, parallelization may not lead to much actual parallelism
206
+ * if all processors are busy performing unrelated tasks.
207
+ *
208
+ * <p>All arguments to all task methods must be non-null.
209
+ *
210
+ * <p><em>jsr166e note: During transition, this class
211
+ * uses nested functional interfaces with different names but the
212
+ * same forms as those expected for JDK8.</em>
213
+ *
214
+ * <p>This class is a member of the
215
+ * <a href="{@docRoot}/../technotes/guides/collections/index.html">
216
+ * Java Collections Framework</a>.
217
+ *
218
+ * @since 1.5
219
+ * @author Doug Lea
220
+ * @param <K> the type of keys maintained by this map
221
+ * @param <V> the type of mapped values
222
+ */
223
+ public class ConcurrentHashMapV8<K, V>
224
+ implements ConcurrentMap<K, V>, Serializable, ConcurrentHashMap<K, V> {
225
+ private static final long serialVersionUID = 7249069246763182397L;
226
+
227
+ /**
228
+ * A partitionable iterator. A Spliterator can be traversed
229
+ * directly, but can also be partitioned (before traversal) by
230
+ * creating another Spliterator that covers a non-overlapping
231
+ * portion of the elements, and so may be amenable to parallel
232
+ * execution.
233
+ *
234
+ * <p>This interface exports a subset of expected JDK8
235
+ * functionality.
236
+ *
237
+ * <p>Sample usage: Here is one (of the several) ways to compute
238
+ * the sum of the values held in a map using the ForkJoin
239
+ * framework. As illustrated here, Spliterators are well suited to
240
+ * designs in which a task repeatedly splits off half its work
241
+ * into forked subtasks until small enough to process directly,
242
+ * and then joins these subtasks. Variants of this style can also
243
+ * be used in completion-based designs.
244
+ *
245
+ * <pre>
246
+ * {@code ConcurrentHashMapV8<String, Long> m = ...
247
+ * // split as if have 8 * parallelism, for load balance
248
+ * int n = m.size();
249
+ * int p = aForkJoinPool.getParallelism() * 8;
250
+ * int split = (n < p)? n : p;
251
+ * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
252
+ * // ...
253
+ * static class SumValues extends RecursiveTask<Long> {
254
+ * final Spliterator<Long> s;
255
+ * final int split; // split while > 1
256
+ * final SumValues nextJoin; // records forked subtasks to join
257
+ * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
258
+ * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
259
+ * }
260
+ * public Long compute() {
261
+ * long sum = 0;
262
+ * SumValues subtasks = null; // fork subtasks
263
+ * for (int s = split >>> 1; s > 0; s >>>= 1)
264
+ * (subtasks = new SumValues(s.split(), s, subtasks)).fork();
265
+ * while (s.hasNext()) // directly process remaining elements
266
+ * sum += s.next();
267
+ * for (SumValues t = subtasks; t != null; t = t.nextJoin)
268
+ * sum += t.join(); // collect subtask results
269
+ * return sum;
270
+ * }
271
+ * }
272
+ * }</pre>
273
+ */
274
+ public static interface Spliterator<T> extends Iterator<T> {
275
+ /**
276
+ * Returns a Spliterator covering approximately half of the
277
+ * elements, guaranteed not to overlap with those subsequently
278
+ * returned by this Spliterator. After invoking this method,
279
+ * the current Spliterator will <em>not</em> produce any of
280
+ * the elements of the returned Spliterator, but the two
281
+ * Spliterators together will produce all of the elements that
282
+ * would have been produced by this Spliterator had this
283
+ * method not been called. The exact number of elements
284
+ * produced by the returned Spliterator is not guaranteed, and
285
+ * may be zero (i.e., with {@code hasNext()} reporting {@code
286
+ * false}) if this Spliterator cannot be further split.
287
+ *
288
+ * @return a Spliterator covering approximately half of the
289
+ * elements
290
+ * @throws IllegalStateException if this Spliterator has
291
+ * already commenced traversing elements
292
+ */
293
+ Spliterator<T> split();
294
+ }
295
+
296
+
297
+ /*
298
+ * Overview:
299
+ *
300
+ * The primary design goal of this hash table is to maintain
301
+ * concurrent readability (typically method get(), but also
302
+ * iterators and related methods) while minimizing update
303
+ * contention. Secondary goals are to keep space consumption about
304
+ * the same or better than java.util.HashMap, and to support high
305
+ * initial insertion rates on an empty table by many threads.
306
+ *
307
+ * Each key-value mapping is held in a Node. Because Node fields
308
+ * can contain special values, they are defined using plain Object
309
+ * types. Similarly in turn, all internal methods that use them
310
+ * work off Object types. And similarly, so do the internal
311
+ * methods of auxiliary iterator and view classes. All public
312
+ * generic typed methods relay in/out of these internal methods,
313
+ * supplying null-checks and casts as needed. This also allows
314
+ * many of the public methods to be factored into a smaller number
315
+ * of internal methods (although sadly not so for the five
316
+ * variants of put-related operations). The validation-based
317
+ * approach explained below leads to a lot of code sprawl because
318
+ * retry-control precludes factoring into smaller methods.
319
+ *
320
+ * The table is lazily initialized to a power-of-two size upon the
321
+ * first insertion. Each bin in the table normally contains a
322
+ * list of Nodes (most often, the list has only zero or one Node).
323
+ * Table accesses require volatile/atomic reads, writes, and
324
+ * CASes. Because there is no other way to arrange this without
325
+ * adding further indirections, we use intrinsics
326
+ * (sun.misc.Unsafe) operations. The lists of nodes within bins
327
+ * are always accurately traversable under volatile reads, so long
328
+ * as lookups check hash code and non-nullness of value before
329
+ * checking key equality.
330
+ *
331
+ * We use the top two bits of Node hash fields for control
332
+ * purposes -- they are available anyway because of addressing
333
+ * constraints. As explained further below, these top bits are
334
+ * used as follows:
335
+ * 00 - Normal
336
+ * 01 - Locked
337
+ * 11 - Locked and may have a thread waiting for lock
338
+ * 10 - Node is a forwarding node
339
+ *
340
+ * The lower 30 bits of each Node's hash field contain a
341
+ * transformation of the key's hash code, except for forwarding
342
+ * nodes, for which the lower bits are zero (and so always have
343
+ * hash field == MOVED).
344
+ *
345
+ * Insertion (via put or its variants) of the first node in an
346
+ * empty bin is performed by just CASing it to the bin. This is
347
+ * by far the most common case for put operations under most
348
+ * key/hash distributions. Other update operations (insert,
349
+ * delete, and replace) require locks. We do not want to waste
350
+ * the space required to associate a distinct lock object with
351
+ * each bin, so instead use the first node of a bin list itself as
352
+ * a lock. Blocking support for these locks relies on the builtin
353
+ * "synchronized" monitors. However, we also need a tryLock
354
+ * construction, so we overlay these by using bits of the Node
355
+ * hash field for lock control (see above), and so normally use
356
+ * builtin monitors only for blocking and signalling using
357
+ * wait/notifyAll constructions. See Node.tryAwaitLock.
358
+ *
359
+ * Using the first node of a list as a lock does not by itself
360
+ * suffice though: When a node is locked, any update must first
361
+ * validate that it is still the first node after locking it, and
362
+ * retry if not. Because new nodes are always appended to lists,
363
+ * once a node is first in a bin, it remains first until deleted
364
+ * or the bin becomes invalidated (upon resizing). However,
365
+ * operations that only conditionally update may inspect nodes
366
+ * until the point of update. This is a converse of sorts to the
367
+ * lazy locking technique described by Herlihy & Shavit.
368
+ *
369
+ * The main disadvantage of per-bin locks is that other update
370
+ * operations on other nodes in a bin list protected by the same
371
+ * lock can stall, for example when user equals() or mapping
372
+ * functions take a long time. However, statistically, under
373
+ * random hash codes, this is not a common problem. Ideally, the
374
+ * frequency of nodes in bins follows a Poisson distribution
375
+ * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
376
+ * parameter of about 0.5 on average, given the resizing threshold
377
+ * of 0.75, although with a large variance because of resizing
378
+ * granularity. Ignoring variance, the expected occurrences of
379
+ * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
380
+ * first values are:
381
+ *
382
+ * 0: 0.60653066
383
+ * 1: 0.30326533
384
+ * 2: 0.07581633
385
+ * 3: 0.01263606
386
+ * 4: 0.00157952
387
+ * 5: 0.00015795
388
+ * 6: 0.00001316
389
+ * 7: 0.00000094
390
+ * 8: 0.00000006
391
+ * more: less than 1 in ten million
392
+ *
393
+ * Lock contention probability for two threads accessing distinct
394
+ * elements is roughly 1 / (8 * #elements) under random hashes.
395
+ *
396
+ * Actual hash code distributions encountered in practice
397
+ * sometimes deviate significantly from uniform randomness. This
398
+ * includes the case when N > (1<<30), so some keys MUST collide.
399
+ * Similarly for dumb or hostile usages in which multiple keys are
400
+ * designed to have identical hash codes. Also, although we guard
401
+ * against the worst effects of this (see method spread), sets of
402
+ * hashes may differ only in bits that do not impact their bin
403
+ * index for a given power-of-two mask. So we use a secondary
404
+ * strategy that applies when the number of nodes in a bin exceeds
405
+ * a threshold, and at least one of the keys implements
406
+ * Comparable. These TreeBins use a balanced tree to hold nodes
407
+ * (a specialized form of red-black trees), bounding search time
408
+ * to O(log N). Each search step in a TreeBin is around twice as
409
+ * slow as in a regular list, but given that N cannot exceed
410
+ * (1<<64) (before running out of addresses) this bounds search
411
+ * steps, lock hold times, etc, to reasonable constants (roughly
412
+ * 100 nodes inspected per operation worst case) so long as keys
413
+ * are Comparable (which is very common -- String, Long, etc).
414
+ * TreeBin nodes (TreeNodes) also maintain the same "next"
415
+ * traversal pointers as regular nodes, so can be traversed in
416
+ * iterators in the same way.
417
+ *
418
+ * The table is resized when occupancy exceeds a percentage
419
+ * threshold (nominally, 0.75, but see below). Only a single
420
+ * thread performs the resize (using field "sizeCtl", to arrange
421
+ * exclusion), but the table otherwise remains usable for reads
422
+ * and updates. Resizing proceeds by transferring bins, one by
423
+ * one, from the table to the next table. Because we are using
424
+ * power-of-two expansion, the elements from each bin must either
425
+ * stay at same index, or move with a power of two offset. We
426
+ * eliminate unnecessary node creation by catching cases where old
427
+ * nodes can be reused because their next fields won't change. On
428
+ * average, only about one-sixth of them need cloning when a table
429
+ * doubles. The nodes they replace will be garbage collectable as
430
+ * soon as they are no longer referenced by any reader thread that
431
+ * may be in the midst of concurrently traversing table. Upon
432
+ * transfer, the old table bin contains only a special forwarding
433
+ * node (with hash field "MOVED") that contains the next table as
434
+ * its key. On encountering a forwarding node, access and update
435
+ * operations restart, using the new table.
436
+ *
437
+ * Each bin transfer requires its bin lock. However, unlike other
438
+ * cases, a transfer can skip a bin if it fails to acquire its
439
+ * lock, and revisit it later (unless it is a TreeBin). Method
440
+ * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
441
+ * have been skipped because of failure to acquire a lock, and
442
+ * blocks only if none are available (i.e., only very rarely).
443
+ * The transfer operation must also ensure that all accessible
444
+ * bins in both the old and new table are usable by any traversal.
445
+ * When there are no lock acquisition failures, this is arranged
446
+ * simply by proceeding from the last bin (table.length - 1) up
447
+ * towards the first. Upon seeing a forwarding node, traversals
448
+ * (see class Iter) arrange to move to the new table
449
+ * without revisiting nodes. However, when any node is skipped
450
+ * during a transfer, all earlier table bins may have become
451
+ * visible, so are initialized with a reverse-forwarding node back
452
+ * to the old table until the new ones are established. (This
453
+ * sometimes requires transiently locking a forwarding node, which
454
+ * is possible under the above encoding.) These more expensive
455
+ * mechanics trigger only when necessary.
456
+ *
457
+ * The traversal scheme also applies to partial traversals of
458
+ * ranges of bins (via an alternate Traverser constructor)
459
+ * to support partitioned aggregate operations. Also, read-only
460
+ * operations give up if ever forwarded to a null table, which
461
+ * provides support for shutdown-style clearing, which is also not
462
+ * currently implemented.
463
+ *
464
+ * Lazy table initialization minimizes footprint until first use,
465
+ * and also avoids resizings when the first operation is from a
466
+ * putAll, constructor with map argument, or deserialization.
467
+ * These cases attempt to override the initial capacity settings,
468
+ * but harmlessly fail to take effect in cases of races.
469
+ *
470
+ * The element count is maintained using a LongAdder, which avoids
471
+ * contention on updates but can encounter cache thrashing if read
472
+ * too frequently during concurrent access. To avoid reading so
473
+ * often, resizing is attempted either when a bin lock is
474
+ * contended, or upon adding to a bin already holding two or more
475
+ * nodes (checked before adding in the xIfAbsent methods, after
476
+ * adding in others). Under uniform hash distributions, the
477
+ * probability of this occurring at threshold is around 13%,
478
+ * meaning that only about 1 in 8 puts check threshold (and after
479
+ * resizing, many fewer do so). But this approximation has high
480
+ * variance for small table sizes, so we check on any collision
481
+ * for sizes <= 64. The bulk putAll operation further reduces
482
+ * contention by only committing count updates upon these size
483
+ * checks.
484
+ *
485
+ * Maintaining API and serialization compatibility with previous
486
+ * versions of this class introduces several oddities. Mainly: We
487
+ * leave untouched but unused constructor arguments refering to
488
+ * concurrencyLevel. We accept a loadFactor constructor argument,
489
+ * but apply it only to initial table capacity (which is the only
490
+ * time that we can guarantee to honor it.) We also declare an
491
+ * unused "Segment" class that is instantiated in minimal form
492
+ * only when serializing.
493
+ */
494
+
495
+ /* ---------------- Constants -------------- */
496
+
497
+ /**
498
+ * The largest possible table capacity. This value must be
499
+ * exactly 1<<30 to stay within Java array allocation and indexing
500
+ * bounds for power of two table sizes, and is further required
501
+ * because the top two bits of 32bit hash fields are used for
502
+ * control purposes.
503
+ */
504
+ private static final int MAXIMUM_CAPACITY = 1 << 30;
505
+
506
+ /**
507
+ * The default initial table capacity. Must be a power of 2
508
+ * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
509
+ */
510
+ private static final int DEFAULT_CAPACITY = 16;
511
+
512
+ /**
513
+ * The largest possible (non-power of two) array size.
514
+ * Needed by toArray and related methods.
515
+ */
516
+ static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
517
+
518
+ /**
519
+ * The default concurrency level for this table. Unused but
520
+ * defined for compatibility with previous versions of this class.
521
+ */
522
+ private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
523
+
524
+ /**
525
+ * The load factor for this table. Overrides of this value in
526
+ * constructors affect only the initial table capacity. The
527
+ * actual floating point value isn't normally used -- it is
528
+ * simpler to use expressions such as {@code n - (n >>> 2)} for
529
+ * the associated resizing threshold.
530
+ */
531
+ private static final float LOAD_FACTOR = 0.75f;
532
+
533
+ /**
534
+ * The buffer size for skipped bins during transfers. The
535
+ * value is arbitrary but should be large enough to avoid
536
+ * most locking stalls during resizes.
537
+ */
538
+ private static final int TRANSFER_BUFFER_SIZE = 32;
539
+
540
+ /**
541
+ * The bin count threshold for using a tree rather than list for a
542
+ * bin. The value reflects the approximate break-even point for
543
+ * using tree-based operations.
544
+ * Note that Doug's version defaults to 8, but when dealing with
545
+ * Ruby objects it is actually beneficial to avoid TreeNodes
546
+ * as long as possible as it usually means going into Ruby land.
547
+ */
548
+ private static final int TREE_THRESHOLD = 16;
549
+
550
+ /*
551
+ * Encodings for special uses of Node hash fields. See above for
552
+ * explanation.
553
+ */
554
+ static final int MOVED = 0x80000000; // hash field for forwarding nodes
555
+ static final int LOCKED = 0x40000000; // set/tested only as a bit
556
+ static final int WAITING = 0xc0000000; // both bits set/tested together
557
+ static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
558
+
559
+ /* ---------------- Fields -------------- */
560
+
561
+ /**
562
+ * The array of bins. Lazily initialized upon first insertion.
563
+ * Size is always a power of two. Accessed directly by iterators.
564
+ */
565
+ transient volatile AtomicReferenceArray<Node> table;
566
+
567
+ /**
568
+ * The counter maintaining number of elements.
569
+ */
570
+ private transient LongAdder counter;
571
+
572
+ /**
573
+ * Table initialization and resizing control. When negative, the
574
+ * table is being initialized or resized. Otherwise, when table is
575
+ * null, holds the initial table size to use upon creation, or 0
576
+ * for default. After initialization, holds the next element count
577
+ * value upon which to resize the table.
578
+ */
579
+ private transient volatile int sizeCtl;
580
+
581
+ // views
582
+ private transient KeySetView<K,V> keySet;
583
+ private transient ValuesView<K,V> values;
584
+ private transient EntrySetView<K,V> entrySet;
585
+
586
+ /** For serialization compatibility. Null unless serialized; see below */
587
+ private Segment<K,V>[] segments;
588
+
589
+ static AtomicIntegerFieldUpdater SIZE_CTRL_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ConcurrentHashMapV8.class, "sizeCtl");
590
+
591
+ /* ---------------- Table element access -------------- */
592
+
593
+ /*
594
+ * Volatile access methods are used for table elements as well as
595
+ * elements of in-progress next table while resizing. Uses are
596
+ * null checked by callers, and implicitly bounds-checked, relying
597
+ * on the invariants that tab arrays have non-zero size, and all
598
+ * indices are masked with (tab.length - 1) which is never
599
+ * negative and always less than length. Note that, to be correct
600
+ * wrt arbitrary concurrency errors by users, bounds checks must
601
+ * operate on local variables, which accounts for some odd-looking
602
+ * inline assignments below.
603
+ */
604
+
605
+ static final Node tabAt(AtomicReferenceArray<Node> tab, int i) { // used by Iter
606
+ return tab.get(i);
607
+ }
608
+
609
+ private static final boolean casTabAt(AtomicReferenceArray<Node> tab, int i, Node c, Node v) {
610
+ return tab.compareAndSet(i, c, v);
611
+ }
612
+
613
+ private static final void setTabAt(AtomicReferenceArray<Node> tab, int i, Node v) {
614
+ tab.set(i, v);
615
+ }
616
+
617
+ /* ---------------- Nodes -------------- */
618
+
619
+ /**
620
+ * Key-value entry. Note that this is never exported out as a
621
+ * user-visible Map.Entry (see MapEntry below). Nodes with a hash
622
+ * field of MOVED are special, and do not contain user keys or
623
+ * values. Otherwise, keys are never null, and null val fields
624
+ * indicate that a node is in the process of being deleted or
625
+ * created. For purposes of read-only access, a key may be read
626
+ * before a val, but can only be used after checking val to be
627
+ * non-null.
628
+ */
629
+ static class Node {
630
+ volatile int hash;
631
+ final Object key;
632
+ volatile Object val;
633
+ volatile Node next;
634
+
635
+ static AtomicIntegerFieldUpdater HASH_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Node.class, "hash");
636
+
637
+ Node(int hash, Object key, Object val, Node next) {
638
+ this.hash = hash;
639
+ this.key = key;
640
+ this.val = val;
641
+ this.next = next;
642
+ }
643
+
644
+ /** CompareAndSet the hash field */
645
+ final boolean casHash(int cmp, int val) {
646
+ return HASH_UPDATER.compareAndSet(this, cmp, val);
647
+ }
648
+
649
+ /** The number of spins before blocking for a lock */
650
+ static final int MAX_SPINS =
651
+ Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
652
+
653
+ /**
654
+ * Spins a while if LOCKED bit set and this node is the first
655
+ * of its bin, and then sets WAITING bits on hash field and
656
+ * blocks (once) if they are still set. It is OK for this
657
+ * method to return even if lock is not available upon exit,
658
+ * which enables these simple single-wait mechanics.
659
+ *
660
+ * The corresponding signalling operation is performed within
661
+ * callers: Upon detecting that WAITING has been set when
662
+ * unlocking lock (via a failed CAS from non-waiting LOCKED
663
+ * state), unlockers acquire the sync lock and perform a
664
+ * notifyAll.
665
+ *
666
+ * The initial sanity check on tab and bounds is not currently
667
+ * necessary in the only usages of this method, but enables
668
+ * use in other future contexts.
669
+ */
670
+ final void tryAwaitLock(AtomicReferenceArray<Node> tab, int i) {
671
+ if (tab != null && i >= 0 && i < tab.length()) { // sanity check
672
+ int r = ThreadLocalRandom.current().nextInt(); // randomize spins
673
+ int spins = MAX_SPINS, h;
674
+ while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
675
+ if (spins >= 0) {
676
+ r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
677
+ if (r >= 0 && --spins == 0)
678
+ Thread.yield(); // yield before block
679
+ }
680
+ else if (casHash(h, h | WAITING)) {
681
+ synchronized (this) {
682
+ if (tabAt(tab, i) == this &&
683
+ (hash & WAITING) == WAITING) {
684
+ try {
685
+ wait();
686
+ } catch (InterruptedException ie) {
687
+ Thread.currentThread().interrupt();
688
+ }
689
+ }
690
+ else
691
+ notifyAll(); // possibly won race vs signaller
692
+ }
693
+ break;
694
+ }
695
+ }
696
+ }
697
+ }
698
+ }
699
+
700
+ /* ---------------- TreeBins -------------- */
701
+
702
+ /**
703
+ * Nodes for use in TreeBins
704
+ */
705
+ static final class TreeNode extends Node {
706
+ TreeNode parent; // red-black tree links
707
+ TreeNode left;
708
+ TreeNode right;
709
+ TreeNode prev; // needed to unlink next upon deletion
710
+ boolean red;
711
+
712
+ TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
713
+ super(hash, key, val, next);
714
+ this.parent = parent;
715
+ }
716
+ }
717
+
718
+ /**
719
+ * A specialized form of red-black tree for use in bins
720
+ * whose size exceeds a threshold.
721
+ *
722
+ * TreeBins use a special form of comparison for search and
723
+ * related operations (which is the main reason we cannot use
724
+ * existing collections such as TreeMaps). TreeBins contain
725
+ * Comparable elements, but may contain others, as well as
726
+ * elements that are Comparable but not necessarily Comparable<T>
727
+ * for the same T, so we cannot invoke compareTo among them. To
728
+ * handle this, the tree is ordered primarily by hash value, then
729
+ * by getClass().getName() order, and then by Comparator order
730
+ * among elements of the same class. On lookup at a node, if
731
+ * elements are not comparable or compare as 0, both left and
732
+ * right children may need to be searched in the case of tied hash
733
+ * values. (This corresponds to the full list search that would be
734
+ * necessary if all elements were non-Comparable and had tied
735
+ * hashes.) The red-black balancing code is updated from
736
+ * pre-jdk-collections
737
+ * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
738
+ * based in turn on Cormen, Leiserson, and Rivest "Introduction to
739
+ * Algorithms" (CLR).
740
+ *
741
+ * TreeBins also maintain a separate locking discipline than
742
+ * regular bins. Because they are forwarded via special MOVED
743
+ * nodes at bin heads (which can never change once established),
744
+ * we cannot use those nodes as locks. Instead, TreeBin
745
+ * extends AbstractQueuedSynchronizer to support a simple form of
746
+ * read-write lock. For update operations and table validation,
747
+ * the exclusive form of lock behaves in the same way as bin-head
748
+ * locks. However, lookups use shared read-lock mechanics to allow
749
+ * multiple readers in the absence of writers. Additionally,
750
+ * these lookups do not ever block: While the lock is not
751
+ * available, they proceed along the slow traversal path (via
752
+ * next-pointers) until the lock becomes available or the list is
753
+ * exhausted, whichever comes first. (These cases are not fast,
754
+ * but maximize aggregate expected throughput.) The AQS mechanics
755
+ * for doing this are straightforward. The lock state is held as
756
+ * AQS getState(). Read counts are negative; the write count (1)
757
+ * is positive. There are no signalling preferences among readers
758
+ * and writers. Since we don't need to export full Lock API, we
759
+ * just override the minimal AQS methods and use them directly.
760
+ */
761
+ static final class TreeBin extends AbstractQueuedSynchronizer {
762
+ private static final long serialVersionUID = 2249069246763182397L;
763
+ transient TreeNode root; // root of tree
764
+ transient TreeNode first; // head of next-pointer list
765
+
766
+ /* AQS overrides */
767
+ public final boolean isHeldExclusively() { return getState() > 0; }
768
+ public final boolean tryAcquire(int ignore) {
769
+ if (compareAndSetState(0, 1)) {
770
+ setExclusiveOwnerThread(Thread.currentThread());
771
+ return true;
772
+ }
773
+ return false;
774
+ }
775
+ public final boolean tryRelease(int ignore) {
776
+ setExclusiveOwnerThread(null);
777
+ setState(0);
778
+ return true;
779
+ }
780
+ public final int tryAcquireShared(int ignore) {
781
+ for (int c;;) {
782
+ if ((c = getState()) > 0)
783
+ return -1;
784
+ if (compareAndSetState(c, c -1))
785
+ return 1;
786
+ }
787
+ }
788
+ public final boolean tryReleaseShared(int ignore) {
789
+ int c;
790
+ do {} while (!compareAndSetState(c = getState(), c + 1));
791
+ return c == -1;
792
+ }
793
+
794
+ /** From CLR */
795
+ private void rotateLeft(TreeNode p) {
796
+ if (p != null) {
797
+ TreeNode r = p.right, pp, rl;
798
+ if ((rl = p.right = r.left) != null)
799
+ rl.parent = p;
800
+ if ((pp = r.parent = p.parent) == null)
801
+ root = r;
802
+ else if (pp.left == p)
803
+ pp.left = r;
804
+ else
805
+ pp.right = r;
806
+ r.left = p;
807
+ p.parent = r;
808
+ }
809
+ }
810
+
811
+ /** From CLR */
812
+ private void rotateRight(TreeNode p) {
813
+ if (p != null) {
814
+ TreeNode l = p.left, pp, lr;
815
+ if ((lr = p.left = l.right) != null)
816
+ lr.parent = p;
817
+ if ((pp = l.parent = p.parent) == null)
818
+ root = l;
819
+ else if (pp.right == p)
820
+ pp.right = l;
821
+ else
822
+ pp.left = l;
823
+ l.right = p;
824
+ p.parent = l;
825
+ }
826
+ }
827
+
828
+ @SuppressWarnings("unchecked") final TreeNode getTreeNode
829
+ (int h, Object k, TreeNode p) {
830
+ return getTreeNode(h, (RubyObject)k, p);
831
+ }
832
+
833
+ /**
834
+ * Returns the TreeNode (or null if not found) for the given key
835
+ * starting at given root.
836
+ */
837
+ @SuppressWarnings("unchecked") final TreeNode getTreeNode
838
+ (int h, RubyObject k, TreeNode p) {
839
+ RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>");
840
+ while (p != null) {
841
+ int dir, ph; RubyObject pk; RubyClass pc;
842
+ if ((ph = p.hash) == h) {
843
+ if ((pk = (RubyObject)p.key) == k || k.equals(pk))
844
+ return p;
845
+ if (c != (pc = (RubyClass)pk.getMetaClass()) ||
846
+ kNotComparable ||
847
+ (dir = rubyCompare(k, pk)) == 0) {
848
+ dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
849
+ if (dir == 0) { // if still stuck, need to check both sides
850
+ TreeNode r = null, pl, pr;
851
+ // try to recurse on the right
852
+ if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null)
853
+ return r;
854
+ // try to continue iterating on the left side
855
+ else if ((pl = p.left) != null && h <= pl.hash)
856
+ dir = -1;
857
+ else // no matching node found
858
+ return null;
859
+ }
860
+ }
861
+ }
862
+ else
863
+ dir = (h < ph) ? -1 : 1;
864
+ p = (dir > 0) ? p.right : p.left;
865
+ }
866
+ return null;
867
+ }
868
+
869
+ int rubyCompare(RubyObject l, RubyObject r) {
870
+ ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext();
871
+ IRubyObject result;
872
+ try {
873
+ result = l.callMethod(context, "<=>", r);
874
+ } catch (RaiseException e) {
875
+ // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys
876
+ if (context.runtime.getNoMethodError().isInstance(e.getException())) {
877
+ return 0;
878
+ }
879
+ throw e;
880
+ }
881
+
882
+ return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger());
883
+ }
884
+
885
+ /**
886
+ * Wrapper for getTreeNode used by CHM.get. Tries to obtain
887
+ * read-lock to call getTreeNode, but during failure to get
888
+ * lock, searches along next links.
889
+ */
890
+ final Object getValue(int h, Object k) {
891
+ Node r = null;
892
+ int c = getState(); // Must read lock state first
893
+ for (Node e = first; e != null; e = e.next) {
894
+ if (c <= 0 && compareAndSetState(c, c - 1)) {
895
+ try {
896
+ r = getTreeNode(h, k, root);
897
+ } finally {
898
+ releaseShared(0);
899
+ }
900
+ break;
901
+ }
902
+ else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
903
+ r = e;
904
+ break;
905
+ }
906
+ else
907
+ c = getState();
908
+ }
909
+ return r == null ? null : r.val;
910
+ }
911
+
912
+ @SuppressWarnings("unchecked") final TreeNode putTreeNode
913
+ (int h, Object k, Object v) {
914
+ return putTreeNode(h, (RubyObject)k, v);
915
+ }
916
+
917
+ /**
918
+ * Finds or adds a node.
919
+ * @return null if added
920
+ */
921
+ @SuppressWarnings("unchecked") final TreeNode putTreeNode
922
+ (int h, RubyObject k, Object v) {
923
+ RubyClass c = k.getMetaClass();
924
+ boolean kNotComparable = !k.respondsTo("<=>");
925
+ TreeNode pp = root, p = null;
926
+ int dir = 0;
927
+ while (pp != null) { // find existing node or leaf to insert at
928
+ int ph; RubyObject pk; RubyClass pc;
929
+ p = pp;
930
+ if ((ph = p.hash) == h) {
931
+ if ((pk = (RubyObject)p.key) == k || k.equals(pk))
932
+ return p;
933
+ if (c != (pc = pk.getMetaClass()) ||
934
+ kNotComparable ||
935
+ (dir = rubyCompare(k, pk)) == 0) {
936
+ dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
937
+ if (dir == 0) { // if still stuck, need to check both sides
938
+ TreeNode r = null, pr;
939
+ // try to recurse on the right
940
+ if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null)
941
+ return r;
942
+ else // continue descending down the left subtree
943
+ dir = -1;
944
+ }
945
+ }
946
+ }
947
+ else
948
+ dir = (h < ph) ? -1 : 1;
949
+ pp = (dir > 0) ? p.right : p.left;
950
+ }
951
+
952
+ TreeNode f = first;
953
+ TreeNode x = first = new TreeNode(h, (Object)k, v, f, p);
954
+ if (p == null)
955
+ root = x;
956
+ else { // attach and rebalance; adapted from CLR
957
+ TreeNode xp, xpp;
958
+ if (f != null)
959
+ f.prev = x;
960
+ if (dir <= 0)
961
+ p.left = x;
962
+ else
963
+ p.right = x;
964
+ x.red = true;
965
+ while (x != null && (xp = x.parent) != null && xp.red &&
966
+ (xpp = xp.parent) != null) {
967
+ TreeNode xppl = xpp.left;
968
+ if (xp == xppl) {
969
+ TreeNode y = xpp.right;
970
+ if (y != null && y.red) {
971
+ y.red = false;
972
+ xp.red = false;
973
+ xpp.red = true;
974
+ x = xpp;
975
+ }
976
+ else {
977
+ if (x == xp.right) {
978
+ rotateLeft(x = xp);
979
+ xpp = (xp = x.parent) == null ? null : xp.parent;
980
+ }
981
+ if (xp != null) {
982
+ xp.red = false;
983
+ if (xpp != null) {
984
+ xpp.red = true;
985
+ rotateRight(xpp);
986
+ }
987
+ }
988
+ }
989
+ }
990
+ else {
991
+ TreeNode y = xppl;
992
+ if (y != null && y.red) {
993
+ y.red = false;
994
+ xp.red = false;
995
+ xpp.red = true;
996
+ x = xpp;
997
+ }
998
+ else {
999
+ if (x == xp.left) {
1000
+ rotateRight(x = xp);
1001
+ xpp = (xp = x.parent) == null ? null : xp.parent;
1002
+ }
1003
+ if (xp != null) {
1004
+ xp.red = false;
1005
+ if (xpp != null) {
1006
+ xpp.red = true;
1007
+ rotateLeft(xpp);
1008
+ }
1009
+ }
1010
+ }
1011
+ }
1012
+ }
1013
+ TreeNode r = root;
1014
+ if (r != null && r.red)
1015
+ r.red = false;
1016
+ }
1017
+ return null;
1018
+ }
1019
+
1020
+ /**
1021
+ * Removes the given node, that must be present before this
1022
+ * call. This is messier than typical red-black deletion code
1023
+ * because we cannot swap the contents of an interior node
1024
+ * with a leaf successor that is pinned by "next" pointers
1025
+ * that are accessible independently of lock. So instead we
1026
+ * swap the tree linkages.
1027
+ */
1028
+ final void deleteTreeNode(TreeNode p) {
1029
+ TreeNode next = (TreeNode)p.next; // unlink traversal pointers
1030
+ TreeNode pred = p.prev;
1031
+ if (pred == null)
1032
+ first = next;
1033
+ else
1034
+ pred.next = next;
1035
+ if (next != null)
1036
+ next.prev = pred;
1037
+ TreeNode replacement;
1038
+ TreeNode pl = p.left;
1039
+ TreeNode pr = p.right;
1040
+ if (pl != null && pr != null) {
1041
+ TreeNode s = pr, sl;
1042
+ while ((sl = s.left) != null) // find successor
1043
+ s = sl;
1044
+ boolean c = s.red; s.red = p.red; p.red = c; // swap colors
1045
+ TreeNode sr = s.right;
1046
+ TreeNode pp = p.parent;
1047
+ if (s == pr) { // p was s's direct parent
1048
+ p.parent = s;
1049
+ s.right = p;
1050
+ }
1051
+ else {
1052
+ TreeNode sp = s.parent;
1053
+ if ((p.parent = sp) != null) {
1054
+ if (s == sp.left)
1055
+ sp.left = p;
1056
+ else
1057
+ sp.right = p;
1058
+ }
1059
+ if ((s.right = pr) != null)
1060
+ pr.parent = s;
1061
+ }
1062
+ p.left = null;
1063
+ if ((p.right = sr) != null)
1064
+ sr.parent = p;
1065
+ if ((s.left = pl) != null)
1066
+ pl.parent = s;
1067
+ if ((s.parent = pp) == null)
1068
+ root = s;
1069
+ else if (p == pp.left)
1070
+ pp.left = s;
1071
+ else
1072
+ pp.right = s;
1073
+ replacement = sr;
1074
+ }
1075
+ else
1076
+ replacement = (pl != null) ? pl : pr;
1077
+ TreeNode pp = p.parent;
1078
+ if (replacement == null) {
1079
+ if (pp == null) {
1080
+ root = null;
1081
+ return;
1082
+ }
1083
+ replacement = p;
1084
+ }
1085
+ else {
1086
+ replacement.parent = pp;
1087
+ if (pp == null)
1088
+ root = replacement;
1089
+ else if (p == pp.left)
1090
+ pp.left = replacement;
1091
+ else
1092
+ pp.right = replacement;
1093
+ p.left = p.right = p.parent = null;
1094
+ }
1095
+ if (!p.red) { // rebalance, from CLR
1096
+ TreeNode x = replacement;
1097
+ while (x != null) {
1098
+ TreeNode xp, xpl;
1099
+ if (x.red || (xp = x.parent) == null) {
1100
+ x.red = false;
1101
+ break;
1102
+ }
1103
+ if (x == (xpl = xp.left)) {
1104
+ TreeNode sib = xp.right;
1105
+ if (sib != null && sib.red) {
1106
+ sib.red = false;
1107
+ xp.red = true;
1108
+ rotateLeft(xp);
1109
+ sib = (xp = x.parent) == null ? null : xp.right;
1110
+ }
1111
+ if (sib == null)
1112
+ x = xp;
1113
+ else {
1114
+ TreeNode sl = sib.left, sr = sib.right;
1115
+ if ((sr == null || !sr.red) &&
1116
+ (sl == null || !sl.red)) {
1117
+ sib.red = true;
1118
+ x = xp;
1119
+ }
1120
+ else {
1121
+ if (sr == null || !sr.red) {
1122
+ if (sl != null)
1123
+ sl.red = false;
1124
+ sib.red = true;
1125
+ rotateRight(sib);
1126
+ sib = (xp = x.parent) == null ? null : xp.right;
1127
+ }
1128
+ if (sib != null) {
1129
+ sib.red = (xp == null) ? false : xp.red;
1130
+ if ((sr = sib.right) != null)
1131
+ sr.red = false;
1132
+ }
1133
+ if (xp != null) {
1134
+ xp.red = false;
1135
+ rotateLeft(xp);
1136
+ }
1137
+ x = root;
1138
+ }
1139
+ }
1140
+ }
1141
+ else { // symmetric
1142
+ TreeNode sib = xpl;
1143
+ if (sib != null && sib.red) {
1144
+ sib.red = false;
1145
+ xp.red = true;
1146
+ rotateRight(xp);
1147
+ sib = (xp = x.parent) == null ? null : xp.left;
1148
+ }
1149
+ if (sib == null)
1150
+ x = xp;
1151
+ else {
1152
+ TreeNode sl = sib.left, sr = sib.right;
1153
+ if ((sl == null || !sl.red) &&
1154
+ (sr == null || !sr.red)) {
1155
+ sib.red = true;
1156
+ x = xp;
1157
+ }
1158
+ else {
1159
+ if (sl == null || !sl.red) {
1160
+ if (sr != null)
1161
+ sr.red = false;
1162
+ sib.red = true;
1163
+ rotateLeft(sib);
1164
+ sib = (xp = x.parent) == null ? null : xp.left;
1165
+ }
1166
+ if (sib != null) {
1167
+ sib.red = (xp == null) ? false : xp.red;
1168
+ if ((sl = sib.left) != null)
1169
+ sl.red = false;
1170
+ }
1171
+ if (xp != null) {
1172
+ xp.red = false;
1173
+ rotateRight(xp);
1174
+ }
1175
+ x = root;
1176
+ }
1177
+ }
1178
+ }
1179
+ }
1180
+ }
1181
+ if (p == replacement && (pp = p.parent) != null) {
1182
+ if (p == pp.left) // detach pointers
1183
+ pp.left = null;
1184
+ else if (p == pp.right)
1185
+ pp.right = null;
1186
+ p.parent = null;
1187
+ }
1188
+ }
1189
+ }
1190
+
1191
+ /* ---------------- Collision reduction methods -------------- */
1192
+
1193
+ /**
1194
+ * Spreads higher bits to lower, and also forces top 2 bits to 0.
1195
+ * Because the table uses power-of-two masking, sets of hashes
1196
+ * that vary only in bits above the current mask will always
1197
+ * collide. (Among known examples are sets of Float keys holding
1198
+ * consecutive whole numbers in small tables.) To counter this,
1199
+ * we apply a transform that spreads the impact of higher bits
1200
+ * downward. There is a tradeoff between speed, utility, and
1201
+ * quality of bit-spreading. Because many common sets of hashes
1202
+ * are already reasonably distributed across bits (so don't benefit
1203
+ * from spreading), and because we use trees to handle large sets
1204
+ * of collisions in bins, we don't need excessively high quality.
1205
+ */
1206
+ private static final int spread(int h) {
1207
+ h ^= (h >>> 18) ^ (h >>> 12);
1208
+ return (h ^ (h >>> 10)) & HASH_BITS;
1209
+ }
1210
+
1211
+ /**
1212
+ * Replaces a list bin with a tree bin. Call only when locked.
1213
+ * Fails to replace if the given key is non-comparable or table
1214
+ * is, or needs, resizing.
1215
+ */
1216
+ private final void replaceWithTreeBin(AtomicReferenceArray<Node> tab, int index, Object key) {
1217
+ if ((key instanceof Comparable) &&
1218
+ (tab.length() >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
1219
+ TreeBin t = new TreeBin();
1220
+ for (Node e = tabAt(tab, index); e != null; e = e.next)
1221
+ t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
1222
+ setTabAt(tab, index, new Node(MOVED, t, null, null));
1223
+ }
1224
+ }
1225
+
1226
+ /* ---------------- Internal access and update methods -------------- */
1227
+
1228
+ /** Implementation for get and containsKey */
1229
+ private final Object internalGet(Object k) {
1230
+ int h = spread(k.hashCode());
1231
+ retry: for (AtomicReferenceArray<Node> tab = table; tab != null;) {
1232
+ Node e, p; Object ek, ev; int eh; // locals to read fields once
1233
+ for (e = tabAt(tab, (tab.length() - 1) & h); e != null; e = e.next) {
1234
+ if ((eh = e.hash) == MOVED) {
1235
+ if ((ek = e.key) instanceof TreeBin) // search TreeBin
1236
+ return ((TreeBin)ek).getValue(h, k);
1237
+ else { // restart with new table
1238
+ tab = (AtomicReferenceArray<Node>)ek;
1239
+ continue retry;
1240
+ }
1241
+ }
1242
+ else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
1243
+ ((ek = e.key) == k || k.equals(ek)))
1244
+ return ev;
1245
+ }
1246
+ break;
1247
+ }
1248
+ return null;
1249
+ }
1250
+
1251
+ /**
1252
+ * Implementation for the four public remove/replace methods:
1253
+ * Replaces node value with v, conditional upon match of cv if
1254
+ * non-null. If resulting value is null, delete.
1255
+ */
1256
+ private final Object internalReplace(Object k, Object v, Object cv) {
1257
+ int h = spread(k.hashCode());
1258
+ Object oldVal = null;
1259
+ for (AtomicReferenceArray<Node> tab = table;;) {
1260
+ Node f; int i, fh; Object fk;
1261
+ if (tab == null ||
1262
+ (f = tabAt(tab, i = (tab.length() - 1) & h)) == null)
1263
+ break;
1264
+ else if ((fh = f.hash) == MOVED) {
1265
+ if ((fk = f.key) instanceof TreeBin) {
1266
+ TreeBin t = (TreeBin)fk;
1267
+ boolean validated = false;
1268
+ boolean deleted = false;
1269
+ t.acquire(0);
1270
+ try {
1271
+ if (tabAt(tab, i) == f) {
1272
+ validated = true;
1273
+ TreeNode p = t.getTreeNode(h, k, t.root);
1274
+ if (p != null) {
1275
+ Object pv = p.val;
1276
+ if (cv == null || cv == pv || cv.equals(pv)) {
1277
+ oldVal = pv;
1278
+ if ((p.val = v) == null) {
1279
+ deleted = true;
1280
+ t.deleteTreeNode(p);
1281
+ }
1282
+ }
1283
+ }
1284
+ }
1285
+ } finally {
1286
+ t.release(0);
1287
+ }
1288
+ if (validated) {
1289
+ if (deleted)
1290
+ counter.add(-1L);
1291
+ break;
1292
+ }
1293
+ }
1294
+ else
1295
+ tab = (AtomicReferenceArray<Node>)fk;
1296
+ }
1297
+ else if ((fh & HASH_BITS) != h && f.next == null) // precheck
1298
+ break; // rules out possible existence
1299
+ else if ((fh & LOCKED) != 0) {
1300
+ checkForResize(); // try resizing if can't get lock
1301
+ f.tryAwaitLock(tab, i);
1302
+ }
1303
+ else if (f.casHash(fh, fh | LOCKED)) {
1304
+ boolean validated = false;
1305
+ boolean deleted = false;
1306
+ try {
1307
+ if (tabAt(tab, i) == f) {
1308
+ validated = true;
1309
+ for (Node e = f, pred = null;;) {
1310
+ Object ek, ev;
1311
+ if ((e.hash & HASH_BITS) == h &&
1312
+ ((ev = e.val) != null) &&
1313
+ ((ek = e.key) == k || k.equals(ek))) {
1314
+ if (cv == null || cv == ev || cv.equals(ev)) {
1315
+ oldVal = ev;
1316
+ if ((e.val = v) == null) {
1317
+ deleted = true;
1318
+ Node en = e.next;
1319
+ if (pred != null)
1320
+ pred.next = en;
1321
+ else
1322
+ setTabAt(tab, i, en);
1323
+ }
1324
+ }
1325
+ break;
1326
+ }
1327
+ pred = e;
1328
+ if ((e = e.next) == null)
1329
+ break;
1330
+ }
1331
+ }
1332
+ } finally {
1333
+ if (!f.casHash(fh | LOCKED, fh)) {
1334
+ f.hash = fh;
1335
+ synchronized (f) { f.notifyAll(); };
1336
+ }
1337
+ }
1338
+ if (validated) {
1339
+ if (deleted)
1340
+ counter.add(-1L);
1341
+ break;
1342
+ }
1343
+ }
1344
+ }
1345
+ return oldVal;
1346
+ }
1347
+
1348
+ /*
1349
+ * Internal versions of the six insertion methods, each a
1350
+ * little more complicated than the last. All have
1351
+ * the same basic structure as the first (internalPut):
1352
+ * 1. If table uninitialized, create
1353
+ * 2. If bin empty, try to CAS new node
1354
+ * 3. If bin stale, use new table
1355
+ * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1356
+ * 5. Lock and validate; if valid, scan and add or update
1357
+ *
1358
+ * The others interweave other checks and/or alternative actions:
1359
+ * * Plain put checks for and performs resize after insertion.
1360
+ * * putIfAbsent prescans for mapping without lock (and fails to add
1361
+ * if present), which also makes pre-emptive resize checks worthwhile.
1362
+ * * computeIfAbsent extends form used in putIfAbsent with additional
1363
+ * mechanics to deal with, calls, potential exceptions and null
1364
+ * returns from function call.
1365
+ * * compute uses the same function-call mechanics, but without
1366
+ * the prescans
1367
+ * * merge acts as putIfAbsent in the absent case, but invokes the
1368
+ * update function if present
1369
+ * * putAll attempts to pre-allocate enough table space
1370
+ * and more lazily performs count updates and checks.
1371
+ *
1372
+ * Someday when details settle down a bit more, it might be worth
1373
+ * some factoring to reduce sprawl.
1374
+ */
1375
+
1376
+ /** Implementation for put */
1377
+ private final Object internalPut(Object k, Object v) {
1378
+ int h = spread(k.hashCode());
1379
+ int count = 0;
1380
+ for (AtomicReferenceArray<Node> tab = table;;) {
1381
+ int i; Node f; int fh; Object fk;
1382
+ if (tab == null)
1383
+ tab = initTable();
1384
+ else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) {
1385
+ if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1386
+ break; // no lock when adding to empty bin
1387
+ }
1388
+ else if ((fh = f.hash) == MOVED) {
1389
+ if ((fk = f.key) instanceof TreeBin) {
1390
+ TreeBin t = (TreeBin)fk;
1391
+ Object oldVal = null;
1392
+ t.acquire(0);
1393
+ try {
1394
+ if (tabAt(tab, i) == f) {
1395
+ count = 2;
1396
+ TreeNode p = t.putTreeNode(h, k, v);
1397
+ if (p != null) {
1398
+ oldVal = p.val;
1399
+ p.val = v;
1400
+ }
1401
+ }
1402
+ } finally {
1403
+ t.release(0);
1404
+ }
1405
+ if (count != 0) {
1406
+ if (oldVal != null)
1407
+ return oldVal;
1408
+ break;
1409
+ }
1410
+ }
1411
+ else
1412
+ tab = (AtomicReferenceArray<Node>)fk;
1413
+ }
1414
+ else if ((fh & LOCKED) != 0) {
1415
+ checkForResize();
1416
+ f.tryAwaitLock(tab, i);
1417
+ }
1418
+ else if (f.casHash(fh, fh | LOCKED)) {
1419
+ Object oldVal = null;
1420
+ try { // needed in case equals() throws
1421
+ if (tabAt(tab, i) == f) {
1422
+ count = 1;
1423
+ for (Node e = f;; ++count) {
1424
+ Object ek, ev;
1425
+ if ((e.hash & HASH_BITS) == h &&
1426
+ (ev = e.val) != null &&
1427
+ ((ek = e.key) == k || k.equals(ek))) {
1428
+ oldVal = ev;
1429
+ e.val = v;
1430
+ break;
1431
+ }
1432
+ Node last = e;
1433
+ if ((e = e.next) == null) {
1434
+ last.next = new Node(h, k, v, null);
1435
+ if (count >= TREE_THRESHOLD)
1436
+ replaceWithTreeBin(tab, i, k);
1437
+ break;
1438
+ }
1439
+ }
1440
+ }
1441
+ } finally { // unlock and signal if needed
1442
+ if (!f.casHash(fh | LOCKED, fh)) {
1443
+ f.hash = fh;
1444
+ synchronized (f) { f.notifyAll(); };
1445
+ }
1446
+ }
1447
+ if (count != 0) {
1448
+ if (oldVal != null)
1449
+ return oldVal;
1450
+ if (tab.length() <= 64)
1451
+ count = 2;
1452
+ break;
1453
+ }
1454
+ }
1455
+ }
1456
+ counter.add(1L);
1457
+ if (count > 1)
1458
+ checkForResize();
1459
+ return null;
1460
+ }
1461
+
1462
+ /** Implementation for putIfAbsent */
1463
+ private final Object internalPutIfAbsent(Object k, Object v) {
1464
+ int h = spread(k.hashCode());
1465
+ int count = 0;
1466
+ for (AtomicReferenceArray<Node> tab = table;;) {
1467
+ int i; Node f; int fh; Object fk, fv;
1468
+ if (tab == null)
1469
+ tab = initTable();
1470
+ else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) {
1471
+ if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1472
+ break;
1473
+ }
1474
+ else if ((fh = f.hash) == MOVED) {
1475
+ if ((fk = f.key) instanceof TreeBin) {
1476
+ TreeBin t = (TreeBin)fk;
1477
+ Object oldVal = null;
1478
+ t.acquire(0);
1479
+ try {
1480
+ if (tabAt(tab, i) == f) {
1481
+ count = 2;
1482
+ TreeNode p = t.putTreeNode(h, k, v);
1483
+ if (p != null)
1484
+ oldVal = p.val;
1485
+ }
1486
+ } finally {
1487
+ t.release(0);
1488
+ }
1489
+ if (count != 0) {
1490
+ if (oldVal != null)
1491
+ return oldVal;
1492
+ break;
1493
+ }
1494
+ }
1495
+ else
1496
+ tab = (AtomicReferenceArray<Node>)fk;
1497
+ }
1498
+ else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1499
+ ((fk = f.key) == k || k.equals(fk)))
1500
+ return fv;
1501
+ else {
1502
+ Node g = f.next;
1503
+ if (g != null) { // at least 2 nodes -- search and maybe resize
1504
+ for (Node e = g;;) {
1505
+ Object ek, ev;
1506
+ if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1507
+ ((ek = e.key) == k || k.equals(ek)))
1508
+ return ev;
1509
+ if ((e = e.next) == null) {
1510
+ checkForResize();
1511
+ break;
1512
+ }
1513
+ }
1514
+ }
1515
+ if (((fh = f.hash) & LOCKED) != 0) {
1516
+ checkForResize();
1517
+ f.tryAwaitLock(tab, i);
1518
+ }
1519
+ else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1520
+ Object oldVal = null;
1521
+ try {
1522
+ if (tabAt(tab, i) == f) {
1523
+ count = 1;
1524
+ for (Node e = f;; ++count) {
1525
+ Object ek, ev;
1526
+ if ((e.hash & HASH_BITS) == h &&
1527
+ (ev = e.val) != null &&
1528
+ ((ek = e.key) == k || k.equals(ek))) {
1529
+ oldVal = ev;
1530
+ break;
1531
+ }
1532
+ Node last = e;
1533
+ if ((e = e.next) == null) {
1534
+ last.next = new Node(h, k, v, null);
1535
+ if (count >= TREE_THRESHOLD)
1536
+ replaceWithTreeBin(tab, i, k);
1537
+ break;
1538
+ }
1539
+ }
1540
+ }
1541
+ } finally {
1542
+ if (!f.casHash(fh | LOCKED, fh)) {
1543
+ f.hash = fh;
1544
+ synchronized (f) { f.notifyAll(); };
1545
+ }
1546
+ }
1547
+ if (count != 0) {
1548
+ if (oldVal != null)
1549
+ return oldVal;
1550
+ if (tab.length() <= 64)
1551
+ count = 2;
1552
+ break;
1553
+ }
1554
+ }
1555
+ }
1556
+ }
1557
+ counter.add(1L);
1558
+ if (count > 1)
1559
+ checkForResize();
1560
+ return null;
1561
+ }
1562
+
1563
+ /** Implementation for computeIfAbsent */
1564
+ private final Object internalComputeIfAbsent(K k,
1565
+ Fun<? super K, ?> mf) {
1566
+ int h = spread(k.hashCode());
1567
+ Object val = null;
1568
+ int count = 0;
1569
+ for (AtomicReferenceArray<Node> tab = table;;) {
1570
+ Node f; int i, fh; Object fk, fv;
1571
+ if (tab == null)
1572
+ tab = initTable();
1573
+ else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) {
1574
+ Node node = new Node(fh = h | LOCKED, k, null, null);
1575
+ if (casTabAt(tab, i, null, node)) {
1576
+ count = 1;
1577
+ try {
1578
+ if ((val = mf.apply(k)) != null)
1579
+ node.val = val;
1580
+ } finally {
1581
+ if (val == null)
1582
+ setTabAt(tab, i, null);
1583
+ if (!node.casHash(fh, h)) {
1584
+ node.hash = h;
1585
+ synchronized (node) { node.notifyAll(); };
1586
+ }
1587
+ }
1588
+ }
1589
+ if (count != 0)
1590
+ break;
1591
+ }
1592
+ else if ((fh = f.hash) == MOVED) {
1593
+ if ((fk = f.key) instanceof TreeBin) {
1594
+ TreeBin t = (TreeBin)fk;
1595
+ boolean added = false;
1596
+ t.acquire(0);
1597
+ try {
1598
+ if (tabAt(tab, i) == f) {
1599
+ count = 1;
1600
+ TreeNode p = t.getTreeNode(h, k, t.root);
1601
+ if (p != null)
1602
+ val = p.val;
1603
+ else if ((val = mf.apply(k)) != null) {
1604
+ added = true;
1605
+ count = 2;
1606
+ t.putTreeNode(h, k, val);
1607
+ }
1608
+ }
1609
+ } finally {
1610
+ t.release(0);
1611
+ }
1612
+ if (count != 0) {
1613
+ if (!added)
1614
+ return val;
1615
+ break;
1616
+ }
1617
+ }
1618
+ else
1619
+ tab = (AtomicReferenceArray<Node>)fk;
1620
+ }
1621
+ else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1622
+ ((fk = f.key) == k || k.equals(fk)))
1623
+ return fv;
1624
+ else {
1625
+ Node g = f.next;
1626
+ if (g != null) {
1627
+ for (Node e = g;;) {
1628
+ Object ek, ev;
1629
+ if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1630
+ ((ek = e.key) == k || k.equals(ek)))
1631
+ return ev;
1632
+ if ((e = e.next) == null) {
1633
+ checkForResize();
1634
+ break;
1635
+ }
1636
+ }
1637
+ }
1638
+ if (((fh = f.hash) & LOCKED) != 0) {
1639
+ checkForResize();
1640
+ f.tryAwaitLock(tab, i);
1641
+ }
1642
+ else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1643
+ boolean added = false;
1644
+ try {
1645
+ if (tabAt(tab, i) == f) {
1646
+ count = 1;
1647
+ for (Node e = f;; ++count) {
1648
+ Object ek, ev;
1649
+ if ((e.hash & HASH_BITS) == h &&
1650
+ (ev = e.val) != null &&
1651
+ ((ek = e.key) == k || k.equals(ek))) {
1652
+ val = ev;
1653
+ break;
1654
+ }
1655
+ Node last = e;
1656
+ if ((e = e.next) == null) {
1657
+ if ((val = mf.apply(k)) != null) {
1658
+ added = true;
1659
+ last.next = new Node(h, k, val, null);
1660
+ if (count >= TREE_THRESHOLD)
1661
+ replaceWithTreeBin(tab, i, k);
1662
+ }
1663
+ break;
1664
+ }
1665
+ }
1666
+ }
1667
+ } finally {
1668
+ if (!f.casHash(fh | LOCKED, fh)) {
1669
+ f.hash = fh;
1670
+ synchronized (f) { f.notifyAll(); };
1671
+ }
1672
+ }
1673
+ if (count != 0) {
1674
+ if (!added)
1675
+ return val;
1676
+ if (tab.length() <= 64)
1677
+ count = 2;
1678
+ break;
1679
+ }
1680
+ }
1681
+ }
1682
+ }
1683
+ if (val != null) {
1684
+ counter.add(1L);
1685
+ if (count > 1)
1686
+ checkForResize();
1687
+ }
1688
+ return val;
1689
+ }
1690
+
1691
+ /** Implementation for compute */
1692
+ @SuppressWarnings("unchecked") private final Object internalCompute
1693
+ (K k, boolean onlyIfPresent, BiFun<? super K, ? super V, ? extends V> mf) {
1694
+ int h = spread(k.hashCode());
1695
+ Object val = null;
1696
+ int delta = 0;
1697
+ int count = 0;
1698
+ for (AtomicReferenceArray<Node> tab = table;;) {
1699
+ Node f; int i, fh; Object fk;
1700
+ if (tab == null)
1701
+ tab = initTable();
1702
+ else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) {
1703
+ if (onlyIfPresent)
1704
+ break;
1705
+ Node node = new Node(fh = h | LOCKED, k, null, null);
1706
+ if (casTabAt(tab, i, null, node)) {
1707
+ try {
1708
+ count = 1;
1709
+ if ((val = mf.apply(k, null)) != null) {
1710
+ node.val = val;
1711
+ delta = 1;
1712
+ }
1713
+ } finally {
1714
+ if (delta == 0)
1715
+ setTabAt(tab, i, null);
1716
+ if (!node.casHash(fh, h)) {
1717
+ node.hash = h;
1718
+ synchronized (node) { node.notifyAll(); };
1719
+ }
1720
+ }
1721
+ }
1722
+ if (count != 0)
1723
+ break;
1724
+ }
1725
+ else if ((fh = f.hash) == MOVED) {
1726
+ if ((fk = f.key) instanceof TreeBin) {
1727
+ TreeBin t = (TreeBin)fk;
1728
+ t.acquire(0);
1729
+ try {
1730
+ if (tabAt(tab, i) == f) {
1731
+ count = 1;
1732
+ TreeNode p = t.getTreeNode(h, k, t.root);
1733
+ Object pv;
1734
+ if (p == null) {
1735
+ if (onlyIfPresent)
1736
+ break;
1737
+ pv = null;
1738
+ } else
1739
+ pv = p.val;
1740
+ if ((val = mf.apply(k, (V)pv)) != null) {
1741
+ if (p != null)
1742
+ p.val = val;
1743
+ else {
1744
+ count = 2;
1745
+ delta = 1;
1746
+ t.putTreeNode(h, k, val);
1747
+ }
1748
+ }
1749
+ else if (p != null) {
1750
+ delta = -1;
1751
+ t.deleteTreeNode(p);
1752
+ }
1753
+ }
1754
+ } finally {
1755
+ t.release(0);
1756
+ }
1757
+ if (count != 0)
1758
+ break;
1759
+ }
1760
+ else
1761
+ tab = (AtomicReferenceArray<Node>)fk;
1762
+ }
1763
+ else if ((fh & LOCKED) != 0) {
1764
+ checkForResize();
1765
+ f.tryAwaitLock(tab, i);
1766
+ }
1767
+ else if (f.casHash(fh, fh | LOCKED)) {
1768
+ try {
1769
+ if (tabAt(tab, i) == f) {
1770
+ count = 1;
1771
+ for (Node e = f, pred = null;; ++count) {
1772
+ Object ek, ev;
1773
+ if ((e.hash & HASH_BITS) == h &&
1774
+ (ev = e.val) != null &&
1775
+ ((ek = e.key) == k || k.equals(ek))) {
1776
+ val = mf.apply(k, (V)ev);
1777
+ if (val != null)
1778
+ e.val = val;
1779
+ else {
1780
+ delta = -1;
1781
+ Node en = e.next;
1782
+ if (pred != null)
1783
+ pred.next = en;
1784
+ else
1785
+ setTabAt(tab, i, en);
1786
+ }
1787
+ break;
1788
+ }
1789
+ pred = e;
1790
+ if ((e = e.next) == null) {
1791
+ if (!onlyIfPresent && (val = mf.apply(k, null)) != null) {
1792
+ pred.next = new Node(h, k, val, null);
1793
+ delta = 1;
1794
+ if (count >= TREE_THRESHOLD)
1795
+ replaceWithTreeBin(tab, i, k);
1796
+ }
1797
+ break;
1798
+ }
1799
+ }
1800
+ }
1801
+ } finally {
1802
+ if (!f.casHash(fh | LOCKED, fh)) {
1803
+ f.hash = fh;
1804
+ synchronized (f) { f.notifyAll(); };
1805
+ }
1806
+ }
1807
+ if (count != 0) {
1808
+ if (tab.length() <= 64)
1809
+ count = 2;
1810
+ break;
1811
+ }
1812
+ }
1813
+ }
1814
+ if (delta != 0) {
1815
+ counter.add((long)delta);
1816
+ if (count > 1)
1817
+ checkForResize();
1818
+ }
1819
+ return val;
1820
+ }
1821
+
1822
+ /** Implementation for merge */
1823
+ @SuppressWarnings("unchecked") private final Object internalMerge
1824
+ (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) {
1825
+ int h = spread(k.hashCode());
1826
+ Object val = null;
1827
+ int delta = 0;
1828
+ int count = 0;
1829
+ for (AtomicReferenceArray<Node> tab = table;;) {
1830
+ int i; Node f; int fh; Object fk, fv;
1831
+ if (tab == null)
1832
+ tab = initTable();
1833
+ else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) {
1834
+ if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1835
+ delta = 1;
1836
+ val = v;
1837
+ break;
1838
+ }
1839
+ }
1840
+ else if ((fh = f.hash) == MOVED) {
1841
+ if ((fk = f.key) instanceof TreeBin) {
1842
+ TreeBin t = (TreeBin)fk;
1843
+ t.acquire(0);
1844
+ try {
1845
+ if (tabAt(tab, i) == f) {
1846
+ count = 1;
1847
+ TreeNode p = t.getTreeNode(h, k, t.root);
1848
+ val = (p == null) ? v : mf.apply((V)p.val, v);
1849
+ if (val != null) {
1850
+ if (p != null)
1851
+ p.val = val;
1852
+ else {
1853
+ count = 2;
1854
+ delta = 1;
1855
+ t.putTreeNode(h, k, val);
1856
+ }
1857
+ }
1858
+ else if (p != null) {
1859
+ delta = -1;
1860
+ t.deleteTreeNode(p);
1861
+ }
1862
+ }
1863
+ } finally {
1864
+ t.release(0);
1865
+ }
1866
+ if (count != 0)
1867
+ break;
1868
+ }
1869
+ else
1870
+ tab = (AtomicReferenceArray<Node>)fk;
1871
+ }
1872
+ else if ((fh & LOCKED) != 0) {
1873
+ checkForResize();
1874
+ f.tryAwaitLock(tab, i);
1875
+ }
1876
+ else if (f.casHash(fh, fh | LOCKED)) {
1877
+ try {
1878
+ if (tabAt(tab, i) == f) {
1879
+ count = 1;
1880
+ for (Node e = f, pred = null;; ++count) {
1881
+ Object ek, ev;
1882
+ if ((e.hash & HASH_BITS) == h &&
1883
+ (ev = e.val) != null &&
1884
+ ((ek = e.key) == k || k.equals(ek))) {
1885
+ val = mf.apply((V)ev, v);
1886
+ if (val != null)
1887
+ e.val = val;
1888
+ else {
1889
+ delta = -1;
1890
+ Node en = e.next;
1891
+ if (pred != null)
1892
+ pred.next = en;
1893
+ else
1894
+ setTabAt(tab, i, en);
1895
+ }
1896
+ break;
1897
+ }
1898
+ pred = e;
1899
+ if ((e = e.next) == null) {
1900
+ val = v;
1901
+ pred.next = new Node(h, k, val, null);
1902
+ delta = 1;
1903
+ if (count >= TREE_THRESHOLD)
1904
+ replaceWithTreeBin(tab, i, k);
1905
+ break;
1906
+ }
1907
+ }
1908
+ }
1909
+ } finally {
1910
+ if (!f.casHash(fh | LOCKED, fh)) {
1911
+ f.hash = fh;
1912
+ synchronized (f) { f.notifyAll(); };
1913
+ }
1914
+ }
1915
+ if (count != 0) {
1916
+ if (tab.length() <= 64)
1917
+ count = 2;
1918
+ break;
1919
+ }
1920
+ }
1921
+ }
1922
+ if (delta != 0) {
1923
+ counter.add((long)delta);
1924
+ if (count > 1)
1925
+ checkForResize();
1926
+ }
1927
+ return val;
1928
+ }
1929
+
1930
+ /** Implementation for putAll */
1931
+ private final void internalPutAll(Map<?, ?> m) {
1932
+ tryPresize(m.size());
1933
+ long delta = 0L; // number of uncommitted additions
1934
+ boolean npe = false; // to throw exception on exit for nulls
1935
+ try { // to clean up counts on other exceptions
1936
+ for (Map.Entry<?, ?> entry : m.entrySet()) {
1937
+ Object k, v;
1938
+ if (entry == null || (k = entry.getKey()) == null ||
1939
+ (v = entry.getValue()) == null) {
1940
+ npe = true;
1941
+ break;
1942
+ }
1943
+ int h = spread(k.hashCode());
1944
+ for (AtomicReferenceArray<Node> tab = table;;) {
1945
+ int i; Node f; int fh; Object fk;
1946
+ if (tab == null)
1947
+ tab = initTable();
1948
+ else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null){
1949
+ if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1950
+ ++delta;
1951
+ break;
1952
+ }
1953
+ }
1954
+ else if ((fh = f.hash) == MOVED) {
1955
+ if ((fk = f.key) instanceof TreeBin) {
1956
+ TreeBin t = (TreeBin)fk;
1957
+ boolean validated = false;
1958
+ t.acquire(0);
1959
+ try {
1960
+ if (tabAt(tab, i) == f) {
1961
+ validated = true;
1962
+ TreeNode p = t.getTreeNode(h, k, t.root);
1963
+ if (p != null)
1964
+ p.val = v;
1965
+ else {
1966
+ t.putTreeNode(h, k, v);
1967
+ ++delta;
1968
+ }
1969
+ }
1970
+ } finally {
1971
+ t.release(0);
1972
+ }
1973
+ if (validated)
1974
+ break;
1975
+ }
1976
+ else
1977
+ tab = (AtomicReferenceArray<Node>)fk;
1978
+ }
1979
+ else if ((fh & LOCKED) != 0) {
1980
+ counter.add(delta);
1981
+ delta = 0L;
1982
+ checkForResize();
1983
+ f.tryAwaitLock(tab, i);
1984
+ }
1985
+ else if (f.casHash(fh, fh | LOCKED)) {
1986
+ int count = 0;
1987
+ try {
1988
+ if (tabAt(tab, i) == f) {
1989
+ count = 1;
1990
+ for (Node e = f;; ++count) {
1991
+ Object ek, ev;
1992
+ if ((e.hash & HASH_BITS) == h &&
1993
+ (ev = e.val) != null &&
1994
+ ((ek = e.key) == k || k.equals(ek))) {
1995
+ e.val = v;
1996
+ break;
1997
+ }
1998
+ Node last = e;
1999
+ if ((e = e.next) == null) {
2000
+ ++delta;
2001
+ last.next = new Node(h, k, v, null);
2002
+ if (count >= TREE_THRESHOLD)
2003
+ replaceWithTreeBin(tab, i, k);
2004
+ break;
2005
+ }
2006
+ }
2007
+ }
2008
+ } finally {
2009
+ if (!f.casHash(fh | LOCKED, fh)) {
2010
+ f.hash = fh;
2011
+ synchronized (f) { f.notifyAll(); };
2012
+ }
2013
+ }
2014
+ if (count != 0) {
2015
+ if (count > 1) {
2016
+ counter.add(delta);
2017
+ delta = 0L;
2018
+ checkForResize();
2019
+ }
2020
+ break;
2021
+ }
2022
+ }
2023
+ }
2024
+ }
2025
+ } finally {
2026
+ if (delta != 0)
2027
+ counter.add(delta);
2028
+ }
2029
+ if (npe)
2030
+ throw new NullPointerException();
2031
+ }
2032
+
2033
+ /* ---------------- Table Initialization and Resizing -------------- */
2034
+
2035
+ /**
2036
+ * Returns a power of two table size for the given desired capacity.
2037
+ * See Hackers Delight, sec 3.2
2038
+ */
2039
+ private static final int tableSizeFor(int c) {
2040
+ int n = c - 1;
2041
+ n |= n >>> 1;
2042
+ n |= n >>> 2;
2043
+ n |= n >>> 4;
2044
+ n |= n >>> 8;
2045
+ n |= n >>> 16;
2046
+ return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
2047
+ }
2048
+
2049
+ /**
2050
+ * Initializes table, using the size recorded in sizeCtl.
2051
+ */
2052
+ private final AtomicReferenceArray<Node> initTable() {
2053
+ AtomicReferenceArray<Node> tab; int sc;
2054
+ while ((tab = table) == null) {
2055
+ if ((sc = sizeCtl) < 0)
2056
+ Thread.yield(); // lost initialization race; just spin
2057
+ else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) {
2058
+ try {
2059
+ if ((tab = table) == null) {
2060
+ int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2061
+ tab = table = new AtomicReferenceArray<Node>(n);
2062
+ sc = n - (n >>> 2);
2063
+ }
2064
+ } finally {
2065
+ sizeCtl = sc;
2066
+ }
2067
+ break;
2068
+ }
2069
+ }
2070
+ return tab;
2071
+ }
2072
+
2073
+ /**
2074
+ * If table is too small and not already resizing, creates next
2075
+ * table and transfers bins. Rechecks occupancy after a transfer
2076
+ * to see if another resize is already needed because resizings
2077
+ * are lagging additions.
2078
+ */
2079
+ private final void checkForResize() {
2080
+ AtomicReferenceArray<Node> tab; int n, sc;
2081
+ while ((tab = table) != null &&
2082
+ (n = tab.length()) < MAXIMUM_CAPACITY &&
2083
+ (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
2084
+ SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) {
2085
+ try {
2086
+ if (tab == table) {
2087
+ table = rebuild(tab);
2088
+ sc = (n << 1) - (n >>> 1);
2089
+ }
2090
+ } finally {
2091
+ sizeCtl = sc;
2092
+ }
2093
+ }
2094
+ }
2095
+
2096
+ /**
2097
+ * Tries to presize table to accommodate the given number of elements.
2098
+ *
2099
+ * @param size number of elements (doesn't need to be perfectly accurate)
2100
+ */
2101
+ private final void tryPresize(int size) {
2102
+ int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2103
+ tableSizeFor(size + (size >>> 1) + 1);
2104
+ int sc;
2105
+ while ((sc = sizeCtl) >= 0) {
2106
+ AtomicReferenceArray<Node> tab = table; int n;
2107
+ if (tab == null || (n = tab.length()) == 0) {
2108
+ n = (sc > c) ? sc : c;
2109
+ if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) {
2110
+ try {
2111
+ if (table == tab) {
2112
+ table = new AtomicReferenceArray<Node>(n);
2113
+ sc = n - (n >>> 2);
2114
+ }
2115
+ } finally {
2116
+ sizeCtl = sc;
2117
+ }
2118
+ }
2119
+ }
2120
+ else if (c <= sc || n >= MAXIMUM_CAPACITY)
2121
+ break;
2122
+ else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) {
2123
+ try {
2124
+ if (table == tab) {
2125
+ table = rebuild(tab);
2126
+ sc = (n << 1) - (n >>> 1);
2127
+ }
2128
+ } finally {
2129
+ sizeCtl = sc;
2130
+ }
2131
+ }
2132
+ }
2133
+ }
2134
+
2135
+ /*
2136
+ * Moves and/or copies the nodes in each bin to new table. See
2137
+ * above for explanation.
2138
+ *
2139
+ * @return the new table
2140
+ */
2141
+ private static final AtomicReferenceArray<Node> rebuild(AtomicReferenceArray<Node> tab) {
2142
+ int n = tab.length();
2143
+ AtomicReferenceArray<Node> nextTab = new AtomicReferenceArray<Node>(n << 1);
2144
+ Node fwd = new Node(MOVED, nextTab, null, null);
2145
+ int[] buffer = null; // holds bins to revisit; null until needed
2146
+ Node rev = null; // reverse forwarder; null until needed
2147
+ int nbuffered = 0; // the number of bins in buffer list
2148
+ int bufferIndex = 0; // buffer index of current buffered bin
2149
+ int bin = n - 1; // current non-buffered bin or -1 if none
2150
+
2151
+ for (int i = bin;;) { // start upwards sweep
2152
+ int fh; Node f;
2153
+ if ((f = tabAt(tab, i)) == null) {
2154
+ if (bin >= 0) { // Unbuffered; no lock needed (or available)
2155
+ if (!casTabAt(tab, i, f, fwd))
2156
+ continue;
2157
+ }
2158
+ else { // transiently use a locked forwarding node
2159
+ Node g = new Node(MOVED|LOCKED, nextTab, null, null);
2160
+ if (!casTabAt(tab, i, f, g))
2161
+ continue;
2162
+ setTabAt(nextTab, i, null);
2163
+ setTabAt(nextTab, i + n, null);
2164
+ setTabAt(tab, i, fwd);
2165
+ if (!g.casHash(MOVED|LOCKED, MOVED)) {
2166
+ g.hash = MOVED;
2167
+ synchronized (g) { g.notifyAll(); }
2168
+ }
2169
+ }
2170
+ }
2171
+ else if ((fh = f.hash) == MOVED) {
2172
+ Object fk = f.key;
2173
+ if (fk instanceof TreeBin) {
2174
+ TreeBin t = (TreeBin)fk;
2175
+ boolean validated = false;
2176
+ t.acquire(0);
2177
+ try {
2178
+ if (tabAt(tab, i) == f) {
2179
+ validated = true;
2180
+ splitTreeBin(nextTab, i, t);
2181
+ setTabAt(tab, i, fwd);
2182
+ }
2183
+ } finally {
2184
+ t.release(0);
2185
+ }
2186
+ if (!validated)
2187
+ continue;
2188
+ }
2189
+ }
2190
+ else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
2191
+ boolean validated = false;
2192
+ try { // split to lo and hi lists; copying as needed
2193
+ if (tabAt(tab, i) == f) {
2194
+ validated = true;
2195
+ splitBin(nextTab, i, f);
2196
+ setTabAt(tab, i, fwd);
2197
+ }
2198
+ } finally {
2199
+ if (!f.casHash(fh | LOCKED, fh)) {
2200
+ f.hash = fh;
2201
+ synchronized (f) { f.notifyAll(); };
2202
+ }
2203
+ }
2204
+ if (!validated)
2205
+ continue;
2206
+ }
2207
+ else {
2208
+ if (buffer == null) // initialize buffer for revisits
2209
+ buffer = new int[TRANSFER_BUFFER_SIZE];
2210
+ if (bin < 0 && bufferIndex > 0) {
2211
+ int j = buffer[--bufferIndex];
2212
+ buffer[bufferIndex] = i;
2213
+ i = j; // swap with another bin
2214
+ continue;
2215
+ }
2216
+ if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
2217
+ f.tryAwaitLock(tab, i);
2218
+ continue; // no other options -- block
2219
+ }
2220
+ if (rev == null) // initialize reverse-forwarder
2221
+ rev = new Node(MOVED, tab, null, null);
2222
+ if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
2223
+ continue; // recheck before adding to list
2224
+ buffer[nbuffered++] = i;
2225
+ setTabAt(nextTab, i, rev); // install place-holders
2226
+ setTabAt(nextTab, i + n, rev);
2227
+ }
2228
+
2229
+ if (bin > 0)
2230
+ i = --bin;
2231
+ else if (buffer != null && nbuffered > 0) {
2232
+ bin = -1;
2233
+ i = buffer[bufferIndex = --nbuffered];
2234
+ }
2235
+ else
2236
+ return nextTab;
2237
+ }
2238
+ }
2239
+
2240
+ /**
2241
+ * Splits a normal bin with list headed by e into lo and hi parts;
2242
+ * installs in given table.
2243
+ */
2244
+ private static void splitBin(AtomicReferenceArray<Node> nextTab, int i, Node e) {
2245
+ int bit = nextTab.length() >>> 1; // bit to split on
2246
+ int runBit = e.hash & bit;
2247
+ Node lastRun = e, lo = null, hi = null;
2248
+ for (Node p = e.next; p != null; p = p.next) {
2249
+ int b = p.hash & bit;
2250
+ if (b != runBit) {
2251
+ runBit = b;
2252
+ lastRun = p;
2253
+ }
2254
+ }
2255
+ if (runBit == 0)
2256
+ lo = lastRun;
2257
+ else
2258
+ hi = lastRun;
2259
+ for (Node p = e; p != lastRun; p = p.next) {
2260
+ int ph = p.hash & HASH_BITS;
2261
+ Object pk = p.key, pv = p.val;
2262
+ if ((ph & bit) == 0)
2263
+ lo = new Node(ph, pk, pv, lo);
2264
+ else
2265
+ hi = new Node(ph, pk, pv, hi);
2266
+ }
2267
+ setTabAt(nextTab, i, lo);
2268
+ setTabAt(nextTab, i + bit, hi);
2269
+ }
2270
+
2271
+ /**
2272
+ * Splits a tree bin into lo and hi parts; installs in given table.
2273
+ */
2274
+ private static void splitTreeBin(AtomicReferenceArray<Node> nextTab, int i, TreeBin t) {
2275
+ int bit = nextTab.length() >>> 1;
2276
+ TreeBin lt = new TreeBin();
2277
+ TreeBin ht = new TreeBin();
2278
+ int lc = 0, hc = 0;
2279
+ for (Node e = t.first; e != null; e = e.next) {
2280
+ int h = e.hash & HASH_BITS;
2281
+ Object k = e.key, v = e.val;
2282
+ if ((h & bit) == 0) {
2283
+ ++lc;
2284
+ lt.putTreeNode(h, k, v);
2285
+ }
2286
+ else {
2287
+ ++hc;
2288
+ ht.putTreeNode(h, k, v);
2289
+ }
2290
+ }
2291
+ Node ln, hn; // throw away trees if too small
2292
+ if (lc <= (TREE_THRESHOLD >>> 1)) {
2293
+ ln = null;
2294
+ for (Node p = lt.first; p != null; p = p.next)
2295
+ ln = new Node(p.hash, p.key, p.val, ln);
2296
+ }
2297
+ else
2298
+ ln = new Node(MOVED, lt, null, null);
2299
+ setTabAt(nextTab, i, ln);
2300
+ if (hc <= (TREE_THRESHOLD >>> 1)) {
2301
+ hn = null;
2302
+ for (Node p = ht.first; p != null; p = p.next)
2303
+ hn = new Node(p.hash, p.key, p.val, hn);
2304
+ }
2305
+ else
2306
+ hn = new Node(MOVED, ht, null, null);
2307
+ setTabAt(nextTab, i + bit, hn);
2308
+ }
2309
+
2310
+ /**
2311
+ * Implementation for clear. Steps through each bin, removing all
2312
+ * nodes.
2313
+ */
2314
+ private final void internalClear() {
2315
+ long delta = 0L; // negative number of deletions
2316
+ int i = 0;
2317
+ AtomicReferenceArray<Node> tab = table;
2318
+ while (tab != null && i < tab.length()) {
2319
+ int fh; Object fk;
2320
+ Node f = tabAt(tab, i);
2321
+ if (f == null)
2322
+ ++i;
2323
+ else if ((fh = f.hash) == MOVED) {
2324
+ if ((fk = f.key) instanceof TreeBin) {
2325
+ TreeBin t = (TreeBin)fk;
2326
+ t.acquire(0);
2327
+ try {
2328
+ if (tabAt(tab, i) == f) {
2329
+ for (Node p = t.first; p != null; p = p.next) {
2330
+ if (p.val != null) { // (currently always true)
2331
+ p.val = null;
2332
+ --delta;
2333
+ }
2334
+ }
2335
+ t.first = null;
2336
+ t.root = null;
2337
+ ++i;
2338
+ }
2339
+ } finally {
2340
+ t.release(0);
2341
+ }
2342
+ }
2343
+ else
2344
+ tab = (AtomicReferenceArray<Node>)fk;
2345
+ }
2346
+ else if ((fh & LOCKED) != 0) {
2347
+ counter.add(delta); // opportunistically update count
2348
+ delta = 0L;
2349
+ f.tryAwaitLock(tab, i);
2350
+ }
2351
+ else if (f.casHash(fh, fh | LOCKED)) {
2352
+ try {
2353
+ if (tabAt(tab, i) == f) {
2354
+ for (Node e = f; e != null; e = e.next) {
2355
+ if (e.val != null) { // (currently always true)
2356
+ e.val = null;
2357
+ --delta;
2358
+ }
2359
+ }
2360
+ setTabAt(tab, i, null);
2361
+ ++i;
2362
+ }
2363
+ } finally {
2364
+ if (!f.casHash(fh | LOCKED, fh)) {
2365
+ f.hash = fh;
2366
+ synchronized (f) { f.notifyAll(); };
2367
+ }
2368
+ }
2369
+ }
2370
+ }
2371
+ if (delta != 0)
2372
+ counter.add(delta);
2373
+ }
2374
+
2375
+ /* ----------------Table Traversal -------------- */
2376
+
2377
+ /**
2378
+ * Encapsulates traversal for methods such as containsValue; also
2379
+ * serves as a base class for other iterators and bulk tasks.
2380
+ *
2381
+ * At each step, the iterator snapshots the key ("nextKey") and
2382
+ * value ("nextVal") of a valid node (i.e., one that, at point of
2383
+ * snapshot, has a non-null user value). Because val fields can
2384
+ * change (including to null, indicating deletion), field nextVal
2385
+ * might not be accurate at point of use, but still maintains the
2386
+ * weak consistency property of holding a value that was once
2387
+ * valid. To support iterator.remove, the nextKey field is not
2388
+ * updated (nulled out) when the iterator cannot advance.
2389
+ *
2390
+ * Internal traversals directly access these fields, as in:
2391
+ * {@code while (it.advance() != null) { process(it.nextKey); }}
2392
+ *
2393
+ * Exported iterators must track whether the iterator has advanced
2394
+ * (in hasNext vs next) (by setting/checking/nulling field
2395
+ * nextVal), and then extract key, value, or key-value pairs as
2396
+ * return values of next().
2397
+ *
2398
+ * The iterator visits once each still-valid node that was
2399
+ * reachable upon iterator construction. It might miss some that
2400
+ * were added to a bin after the bin was visited, which is OK wrt
2401
+ * consistency guarantees. Maintaining this property in the face
2402
+ * of possible ongoing resizes requires a fair amount of
2403
+ * bookkeeping state that is difficult to optimize away amidst
2404
+ * volatile accesses. Even so, traversal maintains reasonable
2405
+ * throughput.
2406
+ *
2407
+ * Normally, iteration proceeds bin-by-bin traversing lists.
2408
+ * However, if the table has been resized, then all future steps
2409
+ * must traverse both the bin at the current index as well as at
2410
+ * (index + baseSize); and so on for further resizings. To
2411
+ * paranoically cope with potential sharing by users of iterators
2412
+ * across threads, iteration terminates if a bounds checks fails
2413
+ * for a table read.
2414
+ *
2415
+ * This class extends ForkJoinTask to streamline parallel
2416
+ * iteration in bulk operations (see BulkTask). This adds only an
2417
+ * int of space overhead, which is close enough to negligible in
2418
+ * cases where it is not needed to not worry about it. Because
2419
+ * ForkJoinTask is Serializable, but iterators need not be, we
2420
+ * need to add warning suppressions.
2421
+ */
2422
+ @SuppressWarnings("serial") static class Traverser<K,V,R> {
2423
+ final ConcurrentHashMapV8<K, V> map;
2424
+ Node next; // the next entry to use
2425
+ K nextKey; // cached key field of next
2426
+ V nextVal; // cached val field of next
2427
+ AtomicReferenceArray<Node> tab; // current table; updated if resized
2428
+ int index; // index of bin to use next
2429
+ int baseIndex; // current index of initial table
2430
+ int baseLimit; // index bound for initial table
2431
+ int baseSize; // initial table size
2432
+
2433
+ /** Creates iterator for all entries in the table. */
2434
+ Traverser(ConcurrentHashMapV8<K, V> map) {
2435
+ this.map = map;
2436
+ }
2437
+
2438
+ /** Creates iterator for split() methods */
2439
+ Traverser(Traverser<K,V,?> it) {
2440
+ ConcurrentHashMapV8<K, V> m; AtomicReferenceArray<Node> t;
2441
+ if ((m = this.map = it.map) == null)
2442
+ t = null;
2443
+ else if ((t = it.tab) == null && // force parent tab initialization
2444
+ (t = it.tab = m.table) != null)
2445
+ it.baseLimit = it.baseSize = t.length();
2446
+ this.tab = t;
2447
+ this.baseSize = it.baseSize;
2448
+ it.baseLimit = this.index = this.baseIndex =
2449
+ ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1;
2450
+ }
2451
+
2452
+ /**
2453
+ * Advances next; returns nextVal or null if terminated.
2454
+ * See above for explanation.
2455
+ */
2456
+ final V advance() {
2457
+ Node e = next;
2458
+ V ev = null;
2459
+ outer: do {
2460
+ if (e != null) // advance past used/skipped node
2461
+ e = e.next;
2462
+ while (e == null) { // get to next non-null bin
2463
+ ConcurrentHashMapV8<K, V> m;
2464
+ AtomicReferenceArray<Node> t; int b, i, n; Object ek; // checks must use locals
2465
+ if ((t = tab) != null)
2466
+ n = t.length();
2467
+ else if ((m = map) != null && (t = tab = m.table) != null)
2468
+ n = baseLimit = baseSize = t.length();
2469
+ else
2470
+ break outer;
2471
+ if ((b = baseIndex) >= baseLimit ||
2472
+ (i = index) < 0 || i >= n)
2473
+ break outer;
2474
+ if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
2475
+ if ((ek = e.key) instanceof TreeBin)
2476
+ e = ((TreeBin)ek).first;
2477
+ else {
2478
+ tab = (AtomicReferenceArray<Node>)ek;
2479
+ continue; // restarts due to null val
2480
+ }
2481
+ } // visit upper slots if present
2482
+ index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2483
+ }
2484
+ nextKey = (K) e.key;
2485
+ } while ((ev = (V) e.val) == null); // skip deleted or special nodes
2486
+ next = e;
2487
+ return nextVal = ev;
2488
+ }
2489
+
2490
+ public final void remove() {
2491
+ Object k = nextKey;
2492
+ if (k == null && (advance() == null || (k = nextKey) == null))
2493
+ throw new IllegalStateException();
2494
+ map.internalReplace(k, null, null);
2495
+ }
2496
+
2497
+ public final boolean hasNext() {
2498
+ return nextVal != null || advance() != null;
2499
+ }
2500
+
2501
+ public final boolean hasMoreElements() { return hasNext(); }
2502
+ public final void setRawResult(Object x) { }
2503
+ public R getRawResult() { return null; }
2504
+ public boolean exec() { return true; }
2505
+ }
2506
+
2507
+ /* ---------------- Public operations -------------- */
2508
+
2509
+ /**
2510
+ * Creates a new, empty map with the default initial table size (16).
2511
+ */
2512
+ public ConcurrentHashMapV8() {
2513
+ this.counter = new LongAdder();
2514
+ }
2515
+
2516
+ /**
2517
+ * Creates a new, empty map with an initial table size
2518
+ * accommodating the specified number of elements without the need
2519
+ * to dynamically resize.
2520
+ *
2521
+ * @param initialCapacity The implementation performs internal
2522
+ * sizing to accommodate this many elements.
2523
+ * @throws IllegalArgumentException if the initial capacity of
2524
+ * elements is negative
2525
+ */
2526
+ public ConcurrentHashMapV8(int initialCapacity) {
2527
+ if (initialCapacity < 0)
2528
+ throw new IllegalArgumentException();
2529
+ int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2530
+ MAXIMUM_CAPACITY :
2531
+ tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2532
+ this.counter = new LongAdder();
2533
+ this.sizeCtl = cap;
2534
+ }
2535
+
2536
+ /**
2537
+ * Creates a new map with the same mappings as the given map.
2538
+ *
2539
+ * @param m the map
2540
+ */
2541
+ public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
2542
+ this.counter = new LongAdder();
2543
+ this.sizeCtl = DEFAULT_CAPACITY;
2544
+ internalPutAll(m);
2545
+ }
2546
+
2547
+ /**
2548
+ * Creates a new, empty map with an initial table size based on
2549
+ * the given number of elements ({@code initialCapacity}) and
2550
+ * initial table density ({@code loadFactor}).
2551
+ *
2552
+ * @param initialCapacity the initial capacity. The implementation
2553
+ * performs internal sizing to accommodate this many elements,
2554
+ * given the specified load factor.
2555
+ * @param loadFactor the load factor (table density) for
2556
+ * establishing the initial table size
2557
+ * @throws IllegalArgumentException if the initial capacity of
2558
+ * elements is negative or the load factor is nonpositive
2559
+ *
2560
+ * @since 1.6
2561
+ */
2562
+ public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
2563
+ this(initialCapacity, loadFactor, 1);
2564
+ }
2565
+
2566
+ /**
2567
+ * Creates a new, empty map with an initial table size based on
2568
+ * the given number of elements ({@code initialCapacity}), table
2569
+ * density ({@code loadFactor}), and number of concurrently
2570
+ * updating threads ({@code concurrencyLevel}).
2571
+ *
2572
+ * @param initialCapacity the initial capacity. The implementation
2573
+ * performs internal sizing to accommodate this many elements,
2574
+ * given the specified load factor.
2575
+ * @param loadFactor the load factor (table density) for
2576
+ * establishing the initial table size
2577
+ * @param concurrencyLevel the estimated number of concurrently
2578
+ * updating threads. The implementation may use this value as
2579
+ * a sizing hint.
2580
+ * @throws IllegalArgumentException if the initial capacity is
2581
+ * negative or the load factor or concurrencyLevel are
2582
+ * nonpositive
2583
+ */
2584
+ public ConcurrentHashMapV8(int initialCapacity,
2585
+ float loadFactor, int concurrencyLevel) {
2586
+ if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2587
+ throw new IllegalArgumentException();
2588
+ if (initialCapacity < concurrencyLevel) // Use at least as many bins
2589
+ initialCapacity = concurrencyLevel; // as estimated threads
2590
+ long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2591
+ int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2592
+ MAXIMUM_CAPACITY : tableSizeFor((int)size);
2593
+ this.counter = new LongAdder();
2594
+ this.sizeCtl = cap;
2595
+ }
2596
+
2597
+ /**
2598
+ * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2599
+ * from the given type to {@code Boolean.TRUE}.
2600
+ *
2601
+ * @return the new set
2602
+ */
2603
+ public static <K> KeySetView<K,Boolean> newKeySet() {
2604
+ return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(),
2605
+ Boolean.TRUE);
2606
+ }
2607
+
2608
+ /**
2609
+ * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2610
+ * from the given type to {@code Boolean.TRUE}.
2611
+ *
2612
+ * @param initialCapacity The implementation performs internal
2613
+ * sizing to accommodate this many elements.
2614
+ * @throws IllegalArgumentException if the initial capacity of
2615
+ * elements is negative
2616
+ * @return the new set
2617
+ */
2618
+ public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2619
+ return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(initialCapacity),
2620
+ Boolean.TRUE);
2621
+ }
2622
+
2623
+ /**
2624
+ * {@inheritDoc}
2625
+ */
2626
+ public boolean isEmpty() {
2627
+ return counter.sum() <= 0L; // ignore transient negative values
2628
+ }
2629
+
2630
+ /**
2631
+ * {@inheritDoc}
2632
+ */
2633
+ public int size() {
2634
+ long n = counter.sum();
2635
+ return ((n < 0L) ? 0 :
2636
+ (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2637
+ (int)n);
2638
+ }
2639
+
2640
+ /**
2641
+ * Returns the number of mappings. This method should be used
2642
+ * instead of {@link #size} because a ConcurrentHashMapV8 may
2643
+ * contain more mappings than can be represented as an int. The
2644
+ * value returned is a snapshot; the actual count may differ if
2645
+ * there are ongoing concurrent insertions or removals.
2646
+ *
2647
+ * @return the number of mappings
2648
+ */
2649
+ public long mappingCount() {
2650
+ long n = counter.sum();
2651
+ return (n < 0L) ? 0L : n; // ignore transient negative values
2652
+ }
2653
+
2654
+ /**
2655
+ * Returns the value to which the specified key is mapped,
2656
+ * or {@code null} if this map contains no mapping for the key.
2657
+ *
2658
+ * <p>More formally, if this map contains a mapping from a key
2659
+ * {@code k} to a value {@code v} such that {@code key.equals(k)},
2660
+ * then this method returns {@code v}; otherwise it returns
2661
+ * {@code null}. (There can be at most one such mapping.)
2662
+ *
2663
+ * @throws NullPointerException if the specified key is null
2664
+ */
2665
+ @SuppressWarnings("unchecked") public V get(Object key) {
2666
+ if (key == null)
2667
+ throw new NullPointerException();
2668
+ return (V)internalGet(key);
2669
+ }
2670
+
2671
+ /**
2672
+ * Returns the value to which the specified key is mapped,
2673
+ * or the given defaultValue if this map contains no mapping for the key.
2674
+ *
2675
+ * @param key the key
2676
+ * @param defaultValue the value to return if this map contains
2677
+ * no mapping for the given key
2678
+ * @return the mapping for the key, if present; else the defaultValue
2679
+ * @throws NullPointerException if the specified key is null
2680
+ */
2681
+ @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) {
2682
+ if (key == null)
2683
+ throw new NullPointerException();
2684
+ V v = (V) internalGet(key);
2685
+ return v == null ? defaultValue : v;
2686
+ }
2687
+
2688
+ /**
2689
+ * Tests if the specified object is a key in this table.
2690
+ *
2691
+ * @param key possible key
2692
+ * @return {@code true} if and only if the specified object
2693
+ * is a key in this table, as determined by the
2694
+ * {@code equals} method; {@code false} otherwise
2695
+ * @throws NullPointerException if the specified key is null
2696
+ */
2697
+ public boolean containsKey(Object key) {
2698
+ if (key == null)
2699
+ throw new NullPointerException();
2700
+ return internalGet(key) != null;
2701
+ }
2702
+
2703
+ /**
2704
+ * Returns {@code true} if this map maps one or more keys to the
2705
+ * specified value. Note: This method may require a full traversal
2706
+ * of the map, and is much slower than method {@code containsKey}.
2707
+ *
2708
+ * @param value value whose presence in this map is to be tested
2709
+ * @return {@code true} if this map maps one or more keys to the
2710
+ * specified value
2711
+ * @throws NullPointerException if the specified value is null
2712
+ */
2713
+ public boolean containsValue(Object value) {
2714
+ if (value == null)
2715
+ throw new NullPointerException();
2716
+ Object v;
2717
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2718
+ while ((v = it.advance()) != null) {
2719
+ if (v == value || value.equals(v))
2720
+ return true;
2721
+ }
2722
+ return false;
2723
+ }
2724
+
2725
+ public K findKey(Object value) {
2726
+ if (value == null)
2727
+ throw new NullPointerException();
2728
+ Object v;
2729
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2730
+ while ((v = it.advance()) != null) {
2731
+ if (v == value || value.equals(v))
2732
+ return it.nextKey;
2733
+ }
2734
+ return null;
2735
+ }
2736
+
2737
+ /**
2738
+ * Legacy method testing if some key maps into the specified value
2739
+ * in this table. This method is identical in functionality to
2740
+ * {@link #containsValue}, and exists solely to ensure
2741
+ * full compatibility with class {@link java.util.Hashtable},
2742
+ * which supported this method prior to introduction of the
2743
+ * Java Collections framework.
2744
+ *
2745
+ * @param value a value to search for
2746
+ * @return {@code true} if and only if some key maps to the
2747
+ * {@code value} argument in this table as
2748
+ * determined by the {@code equals} method;
2749
+ * {@code false} otherwise
2750
+ * @throws NullPointerException if the specified value is null
2751
+ */
2752
+ public boolean contains(Object value) {
2753
+ return containsValue(value);
2754
+ }
2755
+
2756
+ /**
2757
+ * Maps the specified key to the specified value in this table.
2758
+ * Neither the key nor the value can be null.
2759
+ *
2760
+ * <p>The value can be retrieved by calling the {@code get} method
2761
+ * with a key that is equal to the original key.
2762
+ *
2763
+ * @param key key with which the specified value is to be associated
2764
+ * @param value value to be associated with the specified key
2765
+ * @return the previous value associated with {@code key}, or
2766
+ * {@code null} if there was no mapping for {@code key}
2767
+ * @throws NullPointerException if the specified key or value is null
2768
+ */
2769
+ @SuppressWarnings("unchecked") public V put(K key, V value) {
2770
+ if (key == null || value == null)
2771
+ throw new NullPointerException();
2772
+ return (V)internalPut(key, value);
2773
+ }
2774
+
2775
+ /**
2776
+ * {@inheritDoc}
2777
+ *
2778
+ * @return the previous value associated with the specified key,
2779
+ * or {@code null} if there was no mapping for the key
2780
+ * @throws NullPointerException if the specified key or value is null
2781
+ */
2782
+ @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) {
2783
+ if (key == null || value == null)
2784
+ throw new NullPointerException();
2785
+ return (V)internalPutIfAbsent(key, value);
2786
+ }
2787
+
2788
+ /**
2789
+ * Copies all of the mappings from the specified map to this one.
2790
+ * These mappings replace any mappings that this map had for any of the
2791
+ * keys currently in the specified map.
2792
+ *
2793
+ * @param m mappings to be stored in this map
2794
+ */
2795
+ public void putAll(Map<? extends K, ? extends V> m) {
2796
+ internalPutAll(m);
2797
+ }
2798
+
2799
+ /**
2800
+ * If the specified key is not already associated with a value,
2801
+ * computes its value using the given mappingFunction and enters
2802
+ * it into the map unless null. This is equivalent to
2803
+ * <pre> {@code
2804
+ * if (map.containsKey(key))
2805
+ * return map.get(key);
2806
+ * value = mappingFunction.apply(key);
2807
+ * if (value != null)
2808
+ * map.put(key, value);
2809
+ * return value;}</pre>
2810
+ *
2811
+ * except that the action is performed atomically. If the
2812
+ * function returns {@code null} no mapping is recorded. If the
2813
+ * function itself throws an (unchecked) exception, the exception
2814
+ * is rethrown to its caller, and no mapping is recorded. Some
2815
+ * attempted update operations on this map by other threads may be
2816
+ * blocked while computation is in progress, so the computation
2817
+ * should be short and simple, and must not attempt to update any
2818
+ * other mappings of this Map. The most appropriate usage is to
2819
+ * construct a new object serving as an initial mapped value, or
2820
+ * memoized result, as in:
2821
+ *
2822
+ * <pre> {@code
2823
+ * map.computeIfAbsent(key, new Fun<K, V>() {
2824
+ * public V map(K k) { return new Value(f(k)); }});}</pre>
2825
+ *
2826
+ * @param key key with which the specified value is to be associated
2827
+ * @param mappingFunction the function to compute a value
2828
+ * @return the current (existing or computed) value associated with
2829
+ * the specified key, or null if the computed value is null
2830
+ * @throws NullPointerException if the specified key or mappingFunction
2831
+ * is null
2832
+ * @throws IllegalStateException if the computation detectably
2833
+ * attempts a recursive update to this map that would
2834
+ * otherwise never complete
2835
+ * @throws RuntimeException or Error if the mappingFunction does so,
2836
+ * in which case the mapping is left unestablished
2837
+ */
2838
+ @SuppressWarnings("unchecked") public V computeIfAbsent
2839
+ (K key, Fun<? super K, ? extends V> mappingFunction) {
2840
+ if (key == null || mappingFunction == null)
2841
+ throw new NullPointerException();
2842
+ return (V)internalComputeIfAbsent(key, mappingFunction);
2843
+ }
2844
+
2845
+ /**
2846
+ * If the given key is present, computes a new mapping value given a key and
2847
+ * its current mapped value. This is equivalent to
2848
+ * <pre> {@code
2849
+ * if (map.containsKey(key)) {
2850
+ * value = remappingFunction.apply(key, map.get(key));
2851
+ * if (value != null)
2852
+ * map.put(key, value);
2853
+ * else
2854
+ * map.remove(key);
2855
+ * }
2856
+ * }</pre>
2857
+ *
2858
+ * except that the action is performed atomically. If the
2859
+ * function returns {@code null}, the mapping is removed. If the
2860
+ * function itself throws an (unchecked) exception, the exception
2861
+ * is rethrown to its caller, and the current mapping is left
2862
+ * unchanged. Some attempted update operations on this map by
2863
+ * other threads may be blocked while computation is in progress,
2864
+ * so the computation should be short and simple, and must not
2865
+ * attempt to update any other mappings of this Map. For example,
2866
+ * to either create or append new messages to a value mapping:
2867
+ *
2868
+ * @param key key with which the specified value is to be associated
2869
+ * @param remappingFunction the function to compute a value
2870
+ * @return the new value associated with the specified key, or null if none
2871
+ * @throws NullPointerException if the specified key or remappingFunction
2872
+ * is null
2873
+ * @throws IllegalStateException if the computation detectably
2874
+ * attempts a recursive update to this map that would
2875
+ * otherwise never complete
2876
+ * @throws RuntimeException or Error if the remappingFunction does so,
2877
+ * in which case the mapping is unchanged
2878
+ */
2879
+ @SuppressWarnings("unchecked") public V computeIfPresent
2880
+ (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2881
+ if (key == null || remappingFunction == null)
2882
+ throw new NullPointerException();
2883
+ return (V)internalCompute(key, true, remappingFunction);
2884
+ }
2885
+
2886
+ /**
2887
+ * Computes a new mapping value given a key and
2888
+ * its current mapped value (or {@code null} if there is no current
2889
+ * mapping). This is equivalent to
2890
+ * <pre> {@code
2891
+ * value = remappingFunction.apply(key, map.get(key));
2892
+ * if (value != null)
2893
+ * map.put(key, value);
2894
+ * else
2895
+ * map.remove(key);
2896
+ * }</pre>
2897
+ *
2898
+ * except that the action is performed atomically. If the
2899
+ * function returns {@code null}, the mapping is removed. If the
2900
+ * function itself throws an (unchecked) exception, the exception
2901
+ * is rethrown to its caller, and the current mapping is left
2902
+ * unchanged. Some attempted update operations on this map by
2903
+ * other threads may be blocked while computation is in progress,
2904
+ * so the computation should be short and simple, and must not
2905
+ * attempt to update any other mappings of this Map. For example,
2906
+ * to either create or append new messages to a value mapping:
2907
+ *
2908
+ * <pre> {@code
2909
+ * Map<Key, String> map = ...;
2910
+ * final String msg = ...;
2911
+ * map.compute(key, new BiFun<Key, String, String>() {
2912
+ * public String apply(Key k, String v) {
2913
+ * return (v == null) ? msg : v + msg;});}}</pre>
2914
+ *
2915
+ * @param key key with which the specified value is to be associated
2916
+ * @param remappingFunction the function to compute a value
2917
+ * @return the new value associated with the specified key, or null if none
2918
+ * @throws NullPointerException if the specified key or remappingFunction
2919
+ * is null
2920
+ * @throws IllegalStateException if the computation detectably
2921
+ * attempts a recursive update to this map that would
2922
+ * otherwise never complete
2923
+ * @throws RuntimeException or Error if the remappingFunction does so,
2924
+ * in which case the mapping is unchanged
2925
+ */
2926
+ @SuppressWarnings("unchecked") public V compute
2927
+ (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2928
+ if (key == null || remappingFunction == null)
2929
+ throw new NullPointerException();
2930
+ return (V)internalCompute(key, false, remappingFunction);
2931
+ }
2932
+
2933
+ /**
2934
+ * If the specified key is not already associated
2935
+ * with a value, associate it with the given value.
2936
+ * Otherwise, replace the value with the results of
2937
+ * the given remapping function. This is equivalent to:
2938
+ * <pre> {@code
2939
+ * if (!map.containsKey(key))
2940
+ * map.put(value);
2941
+ * else {
2942
+ * newValue = remappingFunction.apply(map.get(key), value);
2943
+ * if (value != null)
2944
+ * map.put(key, value);
2945
+ * else
2946
+ * map.remove(key);
2947
+ * }
2948
+ * }</pre>
2949
+ * except that the action is performed atomically. If the
2950
+ * function returns {@code null}, the mapping is removed. If the
2951
+ * function itself throws an (unchecked) exception, the exception
2952
+ * is rethrown to its caller, and the current mapping is left
2953
+ * unchanged. Some attempted update operations on this map by
2954
+ * other threads may be blocked while computation is in progress,
2955
+ * so the computation should be short and simple, and must not
2956
+ * attempt to update any other mappings of this Map.
2957
+ */
2958
+ @SuppressWarnings("unchecked") public V merge
2959
+ (K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
2960
+ if (key == null || value == null || remappingFunction == null)
2961
+ throw new NullPointerException();
2962
+ return (V)internalMerge(key, value, remappingFunction);
2963
+ }
2964
+
2965
+ /**
2966
+ * Removes the key (and its corresponding value) from this map.
2967
+ * This method does nothing if the key is not in the map.
2968
+ *
2969
+ * @param key the key that needs to be removed
2970
+ * @return the previous value associated with {@code key}, or
2971
+ * {@code null} if there was no mapping for {@code key}
2972
+ * @throws NullPointerException if the specified key is null
2973
+ */
2974
+ @SuppressWarnings("unchecked") public V remove(Object key) {
2975
+ if (key == null)
2976
+ throw new NullPointerException();
2977
+ return (V)internalReplace(key, null, null);
2978
+ }
2979
+
2980
+ /**
2981
+ * {@inheritDoc}
2982
+ *
2983
+ * @throws NullPointerException if the specified key is null
2984
+ */
2985
+ public boolean remove(Object key, Object value) {
2986
+ if (key == null)
2987
+ throw new NullPointerException();
2988
+ if (value == null)
2989
+ return false;
2990
+ return internalReplace(key, null, value) != null;
2991
+ }
2992
+
2993
+ /**
2994
+ * {@inheritDoc}
2995
+ *
2996
+ * @throws NullPointerException if any of the arguments are null
2997
+ */
2998
+ public boolean replace(K key, V oldValue, V newValue) {
2999
+ if (key == null || oldValue == null || newValue == null)
3000
+ throw new NullPointerException();
3001
+ return internalReplace(key, newValue, oldValue) != null;
3002
+ }
3003
+
3004
+ /**
3005
+ * {@inheritDoc}
3006
+ *
3007
+ * @return the previous value associated with the specified key,
3008
+ * or {@code null} if there was no mapping for the key
3009
+ * @throws NullPointerException if the specified key or value is null
3010
+ */
3011
+ @SuppressWarnings("unchecked") public V replace(K key, V value) {
3012
+ if (key == null || value == null)
3013
+ throw new NullPointerException();
3014
+ return (V)internalReplace(key, value, null);
3015
+ }
3016
+
3017
+ /**
3018
+ * Removes all of the mappings from this map.
3019
+ */
3020
+ public void clear() {
3021
+ internalClear();
3022
+ }
3023
+
3024
+ /**
3025
+ * Returns a {@link Set} view of the keys contained in this map.
3026
+ * The set is backed by the map, so changes to the map are
3027
+ * reflected in the set, and vice-versa.
3028
+ *
3029
+ * @return the set view
3030
+ */
3031
+ public KeySetView<K,V> keySet() {
3032
+ KeySetView<K,V> ks = keySet;
3033
+ return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null));
3034
+ }
3035
+
3036
+ /**
3037
+ * Returns a {@link Set} view of the keys in this map, using the
3038
+ * given common mapped value for any additions (i.e., {@link
3039
+ * Collection#add} and {@link Collection#addAll}). This is of
3040
+ * course only appropriate if it is acceptable to use the same
3041
+ * value for all additions from this view.
3042
+ *
3043
+ * @param mappedValue the mapped value to use for any
3044
+ * additions.
3045
+ * @return the set view
3046
+ * @throws NullPointerException if the mappedValue is null
3047
+ */
3048
+ public KeySetView<K,V> keySet(V mappedValue) {
3049
+ if (mappedValue == null)
3050
+ throw new NullPointerException();
3051
+ return new KeySetView<K,V>(this, mappedValue);
3052
+ }
3053
+
3054
+ /**
3055
+ * Returns a {@link Collection} view of the values contained in this map.
3056
+ * The collection is backed by the map, so changes to the map are
3057
+ * reflected in the collection, and vice-versa.
3058
+ */
3059
+ public ValuesView<K,V> values() {
3060
+ ValuesView<K,V> vs = values;
3061
+ return (vs != null) ? vs : (values = new ValuesView<K,V>(this));
3062
+ }
3063
+
3064
+ /**
3065
+ * Returns a {@link Set} view of the mappings contained in this map.
3066
+ * The set is backed by the map, so changes to the map are
3067
+ * reflected in the set, and vice-versa. The set supports element
3068
+ * removal, which removes the corresponding mapping from the map,
3069
+ * via the {@code Iterator.remove}, {@code Set.remove},
3070
+ * {@code removeAll}, {@code retainAll}, and {@code clear}
3071
+ * operations. It does not support the {@code add} or
3072
+ * {@code addAll} operations.
3073
+ *
3074
+ * <p>The view's {@code iterator} is a "weakly consistent" iterator
3075
+ * that will never throw {@link ConcurrentModificationException},
3076
+ * and guarantees to traverse elements as they existed upon
3077
+ * construction of the iterator, and may (but is not guaranteed to)
3078
+ * reflect any modifications subsequent to construction.
3079
+ */
3080
+ public Set<Map.Entry<K,V>> entrySet() {
3081
+ EntrySetView<K,V> es = entrySet;
3082
+ return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this));
3083
+ }
3084
+
3085
+ /**
3086
+ * Returns an enumeration of the keys in this table.
3087
+ *
3088
+ * @return an enumeration of the keys in this table
3089
+ * @see #keySet()
3090
+ */
3091
+ public Enumeration<K> keys() {
3092
+ return new KeyIterator<K,V>(this);
3093
+ }
3094
+
3095
+ /**
3096
+ * Returns an enumeration of the values in this table.
3097
+ *
3098
+ * @return an enumeration of the values in this table
3099
+ * @see #values()
3100
+ */
3101
+ public Enumeration<V> elements() {
3102
+ return new ValueIterator<K,V>(this);
3103
+ }
3104
+
3105
+ /**
3106
+ * Returns a partitionable iterator of the keys in this map.
3107
+ *
3108
+ * @return a partitionable iterator of the keys in this map
3109
+ */
3110
+ public Spliterator<K> keySpliterator() {
3111
+ return new KeyIterator<K,V>(this);
3112
+ }
3113
+
3114
+ /**
3115
+ * Returns a partitionable iterator of the values in this map.
3116
+ *
3117
+ * @return a partitionable iterator of the values in this map
3118
+ */
3119
+ public Spliterator<V> valueSpliterator() {
3120
+ return new ValueIterator<K,V>(this);
3121
+ }
3122
+
3123
+ /**
3124
+ * Returns a partitionable iterator of the entries in this map.
3125
+ *
3126
+ * @return a partitionable iterator of the entries in this map
3127
+ */
3128
+ public Spliterator<Map.Entry<K,V>> entrySpliterator() {
3129
+ return new EntryIterator<K,V>(this);
3130
+ }
3131
+
3132
+ /**
3133
+ * Returns the hash code value for this {@link Map}, i.e.,
3134
+ * the sum of, for each key-value pair in the map,
3135
+ * {@code key.hashCode() ^ value.hashCode()}.
3136
+ *
3137
+ * @return the hash code value for this map
3138
+ */
3139
+ public int hashCode() {
3140
+ int h = 0;
3141
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3142
+ Object v;
3143
+ while ((v = it.advance()) != null) {
3144
+ h += it.nextKey.hashCode() ^ v.hashCode();
3145
+ }
3146
+ return h;
3147
+ }
3148
+
3149
+ /**
3150
+ * Returns a string representation of this map. The string
3151
+ * representation consists of a list of key-value mappings (in no
3152
+ * particular order) enclosed in braces ("{@code {}}"). Adjacent
3153
+ * mappings are separated by the characters {@code ", "} (comma
3154
+ * and space). Each key-value mapping is rendered as the key
3155
+ * followed by an equals sign ("{@code =}") followed by the
3156
+ * associated value.
3157
+ *
3158
+ * @return a string representation of this map
3159
+ */
3160
+ public String toString() {
3161
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3162
+ StringBuilder sb = new StringBuilder();
3163
+ sb.append('{');
3164
+ Object v;
3165
+ if ((v = it.advance()) != null) {
3166
+ for (;;) {
3167
+ Object k = it.nextKey;
3168
+ sb.append(k == this ? "(this Map)" : k);
3169
+ sb.append('=');
3170
+ sb.append(v == this ? "(this Map)" : v);
3171
+ if ((v = it.advance()) == null)
3172
+ break;
3173
+ sb.append(',').append(' ');
3174
+ }
3175
+ }
3176
+ return sb.append('}').toString();
3177
+ }
3178
+
3179
+ /**
3180
+ * Compares the specified object with this map for equality.
3181
+ * Returns {@code true} if the given object is a map with the same
3182
+ * mappings as this map. This operation may return misleading
3183
+ * results if either map is concurrently modified during execution
3184
+ * of this method.
3185
+ *
3186
+ * @param o object to be compared for equality with this map
3187
+ * @return {@code true} if the specified object is equal to this map
3188
+ */
3189
+ public boolean equals(Object o) {
3190
+ if (o != this) {
3191
+ if (!(o instanceof Map))
3192
+ return false;
3193
+ Map<?,?> m = (Map<?,?>) o;
3194
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3195
+ Object val;
3196
+ while ((val = it.advance()) != null) {
3197
+ Object v = m.get(it.nextKey);
3198
+ if (v == null || (v != val && !v.equals(val)))
3199
+ return false;
3200
+ }
3201
+ for (Map.Entry<?,?> e : m.entrySet()) {
3202
+ Object mk, mv, v;
3203
+ if ((mk = e.getKey()) == null ||
3204
+ (mv = e.getValue()) == null ||
3205
+ (v = internalGet(mk)) == null ||
3206
+ (mv != v && !mv.equals(v)))
3207
+ return false;
3208
+ }
3209
+ }
3210
+ return true;
3211
+ }
3212
+
3213
+ /* ----------------Iterators -------------- */
3214
+
3215
+ @SuppressWarnings("serial") static final class KeyIterator<K,V> extends Traverser<K,V,Object>
3216
+ implements Spliterator<K>, Enumeration<K> {
3217
+ KeyIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3218
+ KeyIterator(Traverser<K,V,Object> it) {
3219
+ super(it);
3220
+ }
3221
+ public KeyIterator<K,V> split() {
3222
+ if (nextKey != null)
3223
+ throw new IllegalStateException();
3224
+ return new KeyIterator<K,V>(this);
3225
+ }
3226
+ @SuppressWarnings("unchecked") public final K next() {
3227
+ if (nextVal == null && advance() == null)
3228
+ throw new NoSuchElementException();
3229
+ Object k = nextKey;
3230
+ nextVal = null;
3231
+ return (K) k;
3232
+ }
3233
+
3234
+ public final K nextElement() { return next(); }
3235
+ }
3236
+
3237
+ @SuppressWarnings("serial") static final class ValueIterator<K,V> extends Traverser<K,V,Object>
3238
+ implements Spliterator<V>, Enumeration<V> {
3239
+ ValueIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3240
+ ValueIterator(Traverser<K,V,Object> it) {
3241
+ super(it);
3242
+ }
3243
+ public ValueIterator<K,V> split() {
3244
+ if (nextKey != null)
3245
+ throw new IllegalStateException();
3246
+ return new ValueIterator<K,V>(this);
3247
+ }
3248
+
3249
+ @SuppressWarnings("unchecked") public final V next() {
3250
+ Object v;
3251
+ if ((v = nextVal) == null && (v = advance()) == null)
3252
+ throw new NoSuchElementException();
3253
+ nextVal = null;
3254
+ return (V) v;
3255
+ }
3256
+
3257
+ public final V nextElement() { return next(); }
3258
+ }
3259
+
3260
+ @SuppressWarnings("serial") static final class EntryIterator<K,V> extends Traverser<K,V,Object>
3261
+ implements Spliterator<Map.Entry<K,V>> {
3262
+ EntryIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3263
+ EntryIterator(Traverser<K,V,Object> it) {
3264
+ super(it);
3265
+ }
3266
+ public EntryIterator<K,V> split() {
3267
+ if (nextKey != null)
3268
+ throw new IllegalStateException();
3269
+ return new EntryIterator<K,V>(this);
3270
+ }
3271
+
3272
+ @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() {
3273
+ Object v;
3274
+ if ((v = nextVal) == null && (v = advance()) == null)
3275
+ throw new NoSuchElementException();
3276
+ Object k = nextKey;
3277
+ nextVal = null;
3278
+ return new MapEntry<K,V>((K)k, (V)v, map);
3279
+ }
3280
+ }
3281
+
3282
+ /**
3283
+ * Exported Entry for iterators
3284
+ */
3285
+ static final class MapEntry<K,V> implements Map.Entry<K, V> {
3286
+ final K key; // non-null
3287
+ V val; // non-null
3288
+ final ConcurrentHashMapV8<K, V> map;
3289
+ MapEntry(K key, V val, ConcurrentHashMapV8<K, V> map) {
3290
+ this.key = key;
3291
+ this.val = val;
3292
+ this.map = map;
3293
+ }
3294
+ public final K getKey() { return key; }
3295
+ public final V getValue() { return val; }
3296
+ public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
3297
+ public final String toString(){ return key + "=" + val; }
3298
+
3299
+ public final boolean equals(Object o) {
3300
+ Object k, v; Map.Entry<?,?> e;
3301
+ return ((o instanceof Map.Entry) &&
3302
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3303
+ (v = e.getValue()) != null &&
3304
+ (k == key || k.equals(key)) &&
3305
+ (v == val || v.equals(val)));
3306
+ }
3307
+
3308
+ /**
3309
+ * Sets our entry's value and writes through to the map. The
3310
+ * value to return is somewhat arbitrary here. Since we do not
3311
+ * necessarily track asynchronous changes, the most recent
3312
+ * "previous" value could be different from what we return (or
3313
+ * could even have been removed in which case the put will
3314
+ * re-establish). We do not and cannot guarantee more.
3315
+ */
3316
+ public final V setValue(V value) {
3317
+ if (value == null) throw new NullPointerException();
3318
+ V v = val;
3319
+ val = value;
3320
+ map.put(key, value);
3321
+ return v;
3322
+ }
3323
+ }
3324
+
3325
+ /* ---------------- Serialization Support -------------- */
3326
+
3327
+ /**
3328
+ * Stripped-down version of helper class used in previous version,
3329
+ * declared for the sake of serialization compatibility
3330
+ */
3331
+ static class Segment<K,V> implements Serializable {
3332
+ private static final long serialVersionUID = 2249069246763182397L;
3333
+ final float loadFactor;
3334
+ Segment(float lf) { this.loadFactor = lf; }
3335
+ }
3336
+
3337
+ /**
3338
+ * Saves the state of the {@code ConcurrentHashMapV8} instance to a
3339
+ * stream (i.e., serializes it).
3340
+ * @param s the stream
3341
+ * @serialData
3342
+ * the key (Object) and value (Object)
3343
+ * for each key-value mapping, followed by a null pair.
3344
+ * The key-value mappings are emitted in no particular order.
3345
+ */
3346
+ @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s)
3347
+ throws java.io.IOException {
3348
+ if (segments == null) { // for serialization compatibility
3349
+ segments = (Segment<K,V>[])
3350
+ new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3351
+ for (int i = 0; i < segments.length; ++i)
3352
+ segments[i] = new Segment<K,V>(LOAD_FACTOR);
3353
+ }
3354
+ s.defaultWriteObject();
3355
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3356
+ Object v;
3357
+ while ((v = it.advance()) != null) {
3358
+ s.writeObject(it.nextKey);
3359
+ s.writeObject(v);
3360
+ }
3361
+ s.writeObject(null);
3362
+ s.writeObject(null);
3363
+ segments = null; // throw away
3364
+ }
3365
+
3366
+ /**
3367
+ * Reconstitutes the instance from a stream (that is, deserializes it).
3368
+ * @param s the stream
3369
+ */
3370
+ @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s)
3371
+ throws java.io.IOException, ClassNotFoundException {
3372
+ s.defaultReadObject();
3373
+ this.segments = null; // unneeded
3374
+ // initialize transient final field
3375
+ this.counter = new LongAdder();
3376
+
3377
+ // Create all nodes, then place in table once size is known
3378
+ long size = 0L;
3379
+ Node p = null;
3380
+ for (;;) {
3381
+ K k = (K) s.readObject();
3382
+ V v = (V) s.readObject();
3383
+ if (k != null && v != null) {
3384
+ int h = spread(k.hashCode());
3385
+ p = new Node(h, k, v, p);
3386
+ ++size;
3387
+ }
3388
+ else
3389
+ break;
3390
+ }
3391
+ if (p != null) {
3392
+ boolean init = false;
3393
+ int n;
3394
+ if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3395
+ n = MAXIMUM_CAPACITY;
3396
+ else {
3397
+ int sz = (int)size;
3398
+ n = tableSizeFor(sz + (sz >>> 1) + 1);
3399
+ }
3400
+ int sc = sizeCtl;
3401
+ boolean collide = false;
3402
+ if (n > sc &&
3403
+ SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) {
3404
+ try {
3405
+ if (table == null) {
3406
+ init = true;
3407
+ AtomicReferenceArray<Node> tab = new AtomicReferenceArray<Node>(n);
3408
+ int mask = n - 1;
3409
+ while (p != null) {
3410
+ int j = p.hash & mask;
3411
+ Node next = p.next;
3412
+ Node q = p.next = tabAt(tab, j);
3413
+ setTabAt(tab, j, p);
3414
+ if (!collide && q != null && q.hash == p.hash)
3415
+ collide = true;
3416
+ p = next;
3417
+ }
3418
+ table = tab;
3419
+ counter.add(size);
3420
+ sc = n - (n >>> 2);
3421
+ }
3422
+ } finally {
3423
+ sizeCtl = sc;
3424
+ }
3425
+ if (collide) { // rescan and convert to TreeBins
3426
+ AtomicReferenceArray<Node> tab = table;
3427
+ for (int i = 0; i < tab.length(); ++i) {
3428
+ int c = 0;
3429
+ for (Node e = tabAt(tab, i); e != null; e = e.next) {
3430
+ if (++c > TREE_THRESHOLD &&
3431
+ (e.key instanceof Comparable)) {
3432
+ replaceWithTreeBin(tab, i, e.key);
3433
+ break;
3434
+ }
3435
+ }
3436
+ }
3437
+ }
3438
+ }
3439
+ if (!init) { // Can only happen if unsafely published.
3440
+ while (p != null) {
3441
+ internalPut(p.key, p.val);
3442
+ p = p.next;
3443
+ }
3444
+ }
3445
+ }
3446
+ }
3447
+
3448
+
3449
+ // -------------------------------------------------------
3450
+
3451
+ // Sams
3452
+ /** Interface describing a void action of one argument */
3453
+ public interface Action<A> { void apply(A a); }
3454
+ /** Interface describing a void action of two arguments */
3455
+ public interface BiAction<A,B> { void apply(A a, B b); }
3456
+ /** Interface describing a function of one argument */
3457
+ public interface Generator<T> { T apply(); }
3458
+ /** Interface describing a function mapping its argument to a double */
3459
+ public interface ObjectToDouble<A> { double apply(A a); }
3460
+ /** Interface describing a function mapping its argument to a long */
3461
+ public interface ObjectToLong<A> { long apply(A a); }
3462
+ /** Interface describing a function mapping its argument to an int */
3463
+ public interface ObjectToInt<A> {int apply(A a); }
3464
+ /** Interface describing a function mapping two arguments to a double */
3465
+ public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
3466
+ /** Interface describing a function mapping two arguments to a long */
3467
+ public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
3468
+ /** Interface describing a function mapping two arguments to an int */
3469
+ public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
3470
+ /** Interface describing a function mapping a double to a double */
3471
+ public interface DoubleToDouble { double apply(double a); }
3472
+ /** Interface describing a function mapping a long to a long */
3473
+ public interface LongToLong { long apply(long a); }
3474
+ /** Interface describing a function mapping an int to an int */
3475
+ public interface IntToInt { int apply(int a); }
3476
+ /** Interface describing a function mapping two doubles to a double */
3477
+ public interface DoubleByDoubleToDouble { double apply(double a, double b); }
3478
+ /** Interface describing a function mapping two longs to a long */
3479
+ public interface LongByLongToLong { long apply(long a, long b); }
3480
+ /** Interface describing a function mapping two ints to an int */
3481
+ public interface IntByIntToInt { int apply(int a, int b); }
3482
+
3483
+
3484
+ /* ----------------Views -------------- */
3485
+
3486
+ /**
3487
+ * Base class for views.
3488
+ */
3489
+ static abstract class CHMView<K, V> {
3490
+ final ConcurrentHashMapV8<K, V> map;
3491
+ CHMView(ConcurrentHashMapV8<K, V> map) { this.map = map; }
3492
+
3493
+ /**
3494
+ * Returns the map backing this view.
3495
+ *
3496
+ * @return the map backing this view
3497
+ */
3498
+ public ConcurrentHashMapV8<K,V> getMap() { return map; }
3499
+
3500
+ public final int size() { return map.size(); }
3501
+ public final boolean isEmpty() { return map.isEmpty(); }
3502
+ public final void clear() { map.clear(); }
3503
+
3504
+ // implementations below rely on concrete classes supplying these
3505
+ abstract public Iterator<?> iterator();
3506
+ abstract public boolean contains(Object o);
3507
+ abstract public boolean remove(Object o);
3508
+
3509
+ private static final String oomeMsg = "Required array size too large";
3510
+
3511
+ public final Object[] toArray() {
3512
+ long sz = map.mappingCount();
3513
+ if (sz > (long)(MAX_ARRAY_SIZE))
3514
+ throw new OutOfMemoryError(oomeMsg);
3515
+ int n = (int)sz;
3516
+ Object[] r = new Object[n];
3517
+ int i = 0;
3518
+ Iterator<?> it = iterator();
3519
+ while (it.hasNext()) {
3520
+ if (i == n) {
3521
+ if (n >= MAX_ARRAY_SIZE)
3522
+ throw new OutOfMemoryError(oomeMsg);
3523
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3524
+ n = MAX_ARRAY_SIZE;
3525
+ else
3526
+ n += (n >>> 1) + 1;
3527
+ r = Arrays.copyOf(r, n);
3528
+ }
3529
+ r[i++] = it.next();
3530
+ }
3531
+ return (i == n) ? r : Arrays.copyOf(r, i);
3532
+ }
3533
+
3534
+ @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) {
3535
+ long sz = map.mappingCount();
3536
+ if (sz > (long)(MAX_ARRAY_SIZE))
3537
+ throw new OutOfMemoryError(oomeMsg);
3538
+ int m = (int)sz;
3539
+ T[] r = (a.length >= m) ? a :
3540
+ (T[])java.lang.reflect.Array
3541
+ .newInstance(a.getClass().getComponentType(), m);
3542
+ int n = r.length;
3543
+ int i = 0;
3544
+ Iterator<?> it = iterator();
3545
+ while (it.hasNext()) {
3546
+ if (i == n) {
3547
+ if (n >= MAX_ARRAY_SIZE)
3548
+ throw new OutOfMemoryError(oomeMsg);
3549
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3550
+ n = MAX_ARRAY_SIZE;
3551
+ else
3552
+ n += (n >>> 1) + 1;
3553
+ r = Arrays.copyOf(r, n);
3554
+ }
3555
+ r[i++] = (T)it.next();
3556
+ }
3557
+ if (a == r && i < n) {
3558
+ r[i] = null; // null-terminate
3559
+ return r;
3560
+ }
3561
+ return (i == n) ? r : Arrays.copyOf(r, i);
3562
+ }
3563
+
3564
+ public final int hashCode() {
3565
+ int h = 0;
3566
+ for (Iterator<?> it = iterator(); it.hasNext();)
3567
+ h += it.next().hashCode();
3568
+ return h;
3569
+ }
3570
+
3571
+ public final String toString() {
3572
+ StringBuilder sb = new StringBuilder();
3573
+ sb.append('[');
3574
+ Iterator<?> it = iterator();
3575
+ if (it.hasNext()) {
3576
+ for (;;) {
3577
+ Object e = it.next();
3578
+ sb.append(e == this ? "(this Collection)" : e);
3579
+ if (!it.hasNext())
3580
+ break;
3581
+ sb.append(',').append(' ');
3582
+ }
3583
+ }
3584
+ return sb.append(']').toString();
3585
+ }
3586
+
3587
+ public final boolean containsAll(Collection<?> c) {
3588
+ if (c != this) {
3589
+ for (Iterator<?> it = c.iterator(); it.hasNext();) {
3590
+ Object e = it.next();
3591
+ if (e == null || !contains(e))
3592
+ return false;
3593
+ }
3594
+ }
3595
+ return true;
3596
+ }
3597
+
3598
+ public final boolean removeAll(Collection<?> c) {
3599
+ boolean modified = false;
3600
+ for (Iterator<?> it = iterator(); it.hasNext();) {
3601
+ if (c.contains(it.next())) {
3602
+ it.remove();
3603
+ modified = true;
3604
+ }
3605
+ }
3606
+ return modified;
3607
+ }
3608
+
3609
+ public final boolean retainAll(Collection<?> c) {
3610
+ boolean modified = false;
3611
+ for (Iterator<?> it = iterator(); it.hasNext();) {
3612
+ if (!c.contains(it.next())) {
3613
+ it.remove();
3614
+ modified = true;
3615
+ }
3616
+ }
3617
+ return modified;
3618
+ }
3619
+
3620
+ }
3621
+
3622
+ /**
3623
+ * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
3624
+ * which additions may optionally be enabled by mapping to a
3625
+ * common value. This class cannot be directly instantiated. See
3626
+ * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()},
3627
+ * {@link #newKeySet(int)}.
3628
+ */
3629
+ public static class KeySetView<K,V> extends CHMView<K,V> implements Set<K>, java.io.Serializable {
3630
+ private static final long serialVersionUID = 7249069246763182397L;
3631
+ private final V value;
3632
+ KeySetView(ConcurrentHashMapV8<K, V> map, V value) { // non-public
3633
+ super(map);
3634
+ this.value = value;
3635
+ }
3636
+
3637
+ /**
3638
+ * Returns the default mapped value for additions,
3639
+ * or {@code null} if additions are not supported.
3640
+ *
3641
+ * @return the default mapped value for additions, or {@code null}
3642
+ * if not supported.
3643
+ */
3644
+ public V getMappedValue() { return value; }
3645
+
3646
+ // implement Set API
3647
+
3648
+ public boolean contains(Object o) { return map.containsKey(o); }
3649
+ public boolean remove(Object o) { return map.remove(o) != null; }
3650
+
3651
+ /**
3652
+ * Returns a "weakly consistent" iterator that will never
3653
+ * throw {@link ConcurrentModificationException}, and
3654
+ * guarantees to traverse elements as they existed upon
3655
+ * construction of the iterator, and may (but is not
3656
+ * guaranteed to) reflect any modifications subsequent to
3657
+ * construction.
3658
+ *
3659
+ * @return an iterator over the keys of this map
3660
+ */
3661
+ public Iterator<K> iterator() { return new KeyIterator<K,V>(map); }
3662
+ public boolean add(K e) {
3663
+ V v;
3664
+ if ((v = value) == null)
3665
+ throw new UnsupportedOperationException();
3666
+ if (e == null)
3667
+ throw new NullPointerException();
3668
+ return map.internalPutIfAbsent(e, v) == null;
3669
+ }
3670
+ public boolean addAll(Collection<? extends K> c) {
3671
+ boolean added = false;
3672
+ V v;
3673
+ if ((v = value) == null)
3674
+ throw new UnsupportedOperationException();
3675
+ for (K e : c) {
3676
+ if (e == null)
3677
+ throw new NullPointerException();
3678
+ if (map.internalPutIfAbsent(e, v) == null)
3679
+ added = true;
3680
+ }
3681
+ return added;
3682
+ }
3683
+ public boolean equals(Object o) {
3684
+ Set<?> c;
3685
+ return ((o instanceof Set) &&
3686
+ ((c = (Set<?>)o) == this ||
3687
+ (containsAll(c) && c.containsAll(this))));
3688
+ }
3689
+ }
3690
+
3691
+ /**
3692
+ * A view of a ConcurrentHashMapV8 as a {@link Collection} of
3693
+ * values, in which additions are disabled. This class cannot be
3694
+ * directly instantiated. See {@link #values},
3695
+ *
3696
+ * <p>The view's {@code iterator} is a "weakly consistent" iterator
3697
+ * that will never throw {@link ConcurrentModificationException},
3698
+ * and guarantees to traverse elements as they existed upon
3699
+ * construction of the iterator, and may (but is not guaranteed to)
3700
+ * reflect any modifications subsequent to construction.
3701
+ */
3702
+ public static final class ValuesView<K,V> extends CHMView<K,V>
3703
+ implements Collection<V> {
3704
+ ValuesView(ConcurrentHashMapV8<K, V> map) { super(map); }
3705
+ public final boolean contains(Object o) { return map.containsValue(o); }
3706
+ public final boolean remove(Object o) {
3707
+ if (o != null) {
3708
+ Iterator<V> it = new ValueIterator<K,V>(map);
3709
+ while (it.hasNext()) {
3710
+ if (o.equals(it.next())) {
3711
+ it.remove();
3712
+ return true;
3713
+ }
3714
+ }
3715
+ }
3716
+ return false;
3717
+ }
3718
+
3719
+ /**
3720
+ * Returns a "weakly consistent" iterator that will never
3721
+ * throw {@link ConcurrentModificationException}, and
3722
+ * guarantees to traverse elements as they existed upon
3723
+ * construction of the iterator, and may (but is not
3724
+ * guaranteed to) reflect any modifications subsequent to
3725
+ * construction.
3726
+ *
3727
+ * @return an iterator over the values of this map
3728
+ */
3729
+ public final Iterator<V> iterator() {
3730
+ return new ValueIterator<K,V>(map);
3731
+ }
3732
+ public final boolean add(V e) {
3733
+ throw new UnsupportedOperationException();
3734
+ }
3735
+ public final boolean addAll(Collection<? extends V> c) {
3736
+ throw new UnsupportedOperationException();
3737
+ }
3738
+ }
3739
+
3740
+ /**
3741
+ * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
3742
+ * entries. This class cannot be directly instantiated. See
3743
+ * {@link #entrySet}.
3744
+ */
3745
+ public static final class EntrySetView<K,V> extends CHMView<K,V>
3746
+ implements Set<Map.Entry<K,V>> {
3747
+ EntrySetView(ConcurrentHashMapV8<K, V> map) { super(map); }
3748
+ public final boolean contains(Object o) {
3749
+ Object k, v, r; Map.Entry<?,?> e;
3750
+ return ((o instanceof Map.Entry) &&
3751
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3752
+ (r = map.get(k)) != null &&
3753
+ (v = e.getValue()) != null &&
3754
+ (v == r || v.equals(r)));
3755
+ }
3756
+ public final boolean remove(Object o) {
3757
+ Object k, v; Map.Entry<?,?> e;
3758
+ return ((o instanceof Map.Entry) &&
3759
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3760
+ (v = e.getValue()) != null &&
3761
+ map.remove(k, v));
3762
+ }
3763
+
3764
+ /**
3765
+ * Returns a "weakly consistent" iterator that will never
3766
+ * throw {@link ConcurrentModificationException}, and
3767
+ * guarantees to traverse elements as they existed upon
3768
+ * construction of the iterator, and may (but is not
3769
+ * guaranteed to) reflect any modifications subsequent to
3770
+ * construction.
3771
+ *
3772
+ * @return an iterator over the entries of this map
3773
+ */
3774
+ public final Iterator<Map.Entry<K,V>> iterator() {
3775
+ return new EntryIterator<K,V>(map);
3776
+ }
3777
+
3778
+ public final boolean add(Entry<K,V> e) {
3779
+ K key = e.getKey();
3780
+ V value = e.getValue();
3781
+ if (key == null || value == null)
3782
+ throw new NullPointerException();
3783
+ return map.internalPut(key, value) == null;
3784
+ }
3785
+ public final boolean addAll(Collection<? extends Entry<K,V>> c) {
3786
+ boolean added = false;
3787
+ for (Entry<K,V> e : c) {
3788
+ if (add(e))
3789
+ added = true;
3790
+ }
3791
+ return added;
3792
+ }
3793
+ public boolean equals(Object o) {
3794
+ Set<?> c;
3795
+ return ((o instanceof Set) &&
3796
+ ((c = (Set<?>)o) == this ||
3797
+ (containsAll(c) && c.containsAll(this))));
3798
+ }
3799
+ }
3800
+ }