thread_safe 0.1.1-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +21 -0
  3. data/Gemfile +4 -0
  4. data/LICENSE +144 -0
  5. data/README.md +34 -0
  6. data/Rakefile +36 -0
  7. data/examples/bench_cache.rb +35 -0
  8. data/ext/org/jruby/ext/thread_safe/JRubyCacheBackendLibrary.java +200 -0
  9. data/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java +3842 -0
  10. data/ext/org/jruby/ext/thread_safe/jsr166e/LongAdder.java +204 -0
  11. data/ext/org/jruby/ext/thread_safe/jsr166e/Striped64.java +342 -0
  12. data/ext/org/jruby/ext/thread_safe/jsr166y/ThreadLocalRandom.java +199 -0
  13. data/ext/thread_safe/JrubyCacheBackendService.java +15 -0
  14. data/lib/thread_safe.rb +65 -0
  15. data/lib/thread_safe/atomic_reference_cache_backend.rb +922 -0
  16. data/lib/thread_safe/cache.rb +137 -0
  17. data/lib/thread_safe/mri_cache_backend.rb +62 -0
  18. data/lib/thread_safe/non_concurrent_cache_backend.rb +133 -0
  19. data/lib/thread_safe/synchronized_cache_backend.rb +76 -0
  20. data/lib/thread_safe/synchronized_delegator.rb +35 -0
  21. data/lib/thread_safe/util.rb +16 -0
  22. data/lib/thread_safe/util/adder.rb +59 -0
  23. data/lib/thread_safe/util/atomic_reference.rb +12 -0
  24. data/lib/thread_safe/util/cheap_lockable.rb +105 -0
  25. data/lib/thread_safe/util/power_of_two_tuple.rb +26 -0
  26. data/lib/thread_safe/util/striped64.rb +226 -0
  27. data/lib/thread_safe/util/volatile.rb +62 -0
  28. data/lib/thread_safe/util/volatile_tuple.rb +46 -0
  29. data/lib/thread_safe/util/xor_shift_random.rb +39 -0
  30. data/lib/thread_safe/version.rb +3 -0
  31. data/test/test_array.rb +20 -0
  32. data/test/test_cache.rb +792 -0
  33. data/test/test_cache_loops.rb +453 -0
  34. data/test/test_hash.rb +20 -0
  35. data/test/test_helper.rb +73 -0
  36. data/test/test_synchronized_delegator.rb +42 -0
  37. data/thread_safe.gemspec +21 -0
  38. metadata +100 -0
@@ -0,0 +1,3842 @@
1
+ /*
2
+ * Written by Doug Lea with assistance from members of JCP JSR-166
3
+ * Expert Group and released to the public domain, as explained at
4
+ * http://creativecommons.org/publicdomain/zero/1.0/
5
+ */
6
+
7
+ // This is based on the 1.79 version.
8
+
9
+ package org.jruby.ext.thread_safe.jsr166e;
10
+
11
+ import org.jruby.RubyClass;
12
+ import org.jruby.RubyNumeric;
13
+ import org.jruby.RubyObject;
14
+ import org.jruby.ext.thread_safe.jsr166y.ThreadLocalRandom;
15
+ import org.jruby.runtime.ThreadContext;
16
+ import org.jruby.runtime.builtin.IRubyObject;
17
+
18
+ import java.util.Arrays;
19
+ import java.util.Map;
20
+ import java.util.Set;
21
+ import java.util.Collection;
22
+ import java.util.Hashtable;
23
+ import java.util.HashMap;
24
+ import java.util.Iterator;
25
+ import java.util.Enumeration;
26
+ import java.util.ConcurrentModificationException;
27
+ import java.util.NoSuchElementException;
28
+ import java.util.concurrent.ConcurrentMap;
29
+ import java.util.concurrent.locks.AbstractQueuedSynchronizer;
30
+
31
+ import java.io.Serializable;
32
+
33
+ /**
34
+ * A hash table supporting full concurrency of retrievals and
35
+ * high expected concurrency for updates. This class obeys the
36
+ * same functional specification as {@link java.util.Hashtable}, and
37
+ * includes versions of methods corresponding to each method of
38
+ * {@code Hashtable}. However, even though all operations are
39
+ * thread-safe, retrieval operations do <em>not</em> entail locking,
40
+ * and there is <em>not</em> any support for locking the entire table
41
+ * in a way that prevents all access. This class is fully
42
+ * interoperable with {@code Hashtable} in programs that rely on its
43
+ * thread safety but not on its synchronization details.
44
+ *
45
+ * <p>Retrieval operations (including {@code get}) generally do not
46
+ * block, so may overlap with update operations (including {@code put}
47
+ * and {@code remove}). Retrievals reflect the results of the most
48
+ * recently <em>completed</em> update operations holding upon their
49
+ * onset. (More formally, an update operation for a given key bears a
50
+ * <em>happens-before</em> relation with any (non-null) retrieval for
51
+ * that key reporting the updated value.) For aggregate operations
52
+ * such as {@code putAll} and {@code clear}, concurrent retrievals may
53
+ * reflect insertion or removal of only some entries. Similarly,
54
+ * Iterators and Enumerations return elements reflecting the state of
55
+ * the hash table at some point at or since the creation of the
56
+ * iterator/enumeration. They do <em>not</em> throw {@link
57
+ * ConcurrentModificationException}. However, iterators are designed
58
+ * to be used by only one thread at a time. Bear in mind that the
59
+ * results of aggregate status methods including {@code size}, {@code
60
+ * isEmpty}, and {@code containsValue} are typically useful only when
61
+ * a map is not undergoing concurrent updates in other threads.
62
+ * Otherwise the results of these methods reflect transient states
63
+ * that may be adequate for monitoring or estimation purposes, but not
64
+ * for program control.
65
+ *
66
+ * <p>The table is dynamically expanded when there are too many
67
+ * collisions (i.e., keys that have distinct hash codes but fall into
68
+ * the same slot modulo the table size), with the expected average
69
+ * effect of maintaining roughly two bins per mapping (corresponding
70
+ * to a 0.75 load factor threshold for resizing). There may be much
71
+ * variance around this average as mappings are added and removed, but
72
+ * overall, this maintains a commonly accepted time/space tradeoff for
73
+ * hash tables. However, resizing this or any other kind of hash
74
+ * table may be a relatively slow operation. When possible, it is a
75
+ * good idea to provide a size estimate as an optional {@code
76
+ * initialCapacity} constructor argument. An additional optional
77
+ * {@code loadFactor} constructor argument provides a further means of
78
+ * customizing initial table capacity by specifying the table density
79
+ * to be used in calculating the amount of space to allocate for the
80
+ * given number of elements. Also, for compatibility with previous
81
+ * versions of this class, constructors may optionally specify an
82
+ * expected {@code concurrencyLevel} as an additional hint for
83
+ * internal sizing. Note that using many keys with exactly the same
84
+ * {@code hashCode()} is a sure way to slow down performance of any
85
+ * hash table.
86
+ *
87
+ * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
88
+ * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
89
+ * (using {@link #keySet(Object)} when only keys are of interest, and the
90
+ * mapped values are (perhaps transiently) not used or all take the
91
+ * same mapping value.
92
+ *
93
+ * <p>A ConcurrentHashMapV8 can be used as scalable frequency map (a
94
+ * form of histogram or multiset) by using {@link LongAdder} values
95
+ * and initializing via {@link #computeIfAbsent}. For example, to add
96
+ * a count to a {@code ConcurrentHashMapV8<String,LongAdder> freqs}, you
97
+ * can use {@code freqs.computeIfAbsent(k -> new
98
+ * LongAdder()).increment();}
99
+ *
100
+ * <p>This class and its views and iterators implement all of the
101
+ * <em>optional</em> methods of the {@link Map} and {@link Iterator}
102
+ * interfaces.
103
+ *
104
+ * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
105
+ * does <em>not</em> allow {@code null} to be used as a key or value.
106
+ *
107
+ * <p>ConcurrentHashMapV8s support parallel operations using the {@link
108
+ * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts
109
+ * are available in class {@link ForkJoinTasks}). These operations are
110
+ * designed to be safely, and often sensibly, applied even with maps
111
+ * that are being concurrently updated by other threads; for example,
112
+ * when computing a snapshot summary of the values in a shared
113
+ * registry. There are three kinds of operation, each with four
114
+ * forms, accepting functions with Keys, Values, Entries, and (Key,
115
+ * Value) arguments and/or return values. (The first three forms are
116
+ * also available via the {@link #keySet()}, {@link #values()} and
117
+ * {@link #entrySet()} views). Because the elements of a
118
+ * ConcurrentHashMapV8 are not ordered in any particular way, and may be
119
+ * processed in different orders in different parallel executions, the
120
+ * correctness of supplied functions should not depend on any
121
+ * ordering, or on any other objects or values that may transiently
122
+ * change while computation is in progress; and except for forEach
123
+ * actions, should ideally be side-effect-free.
124
+ *
125
+ * <ul>
126
+ * <li> forEach: Perform a given action on each element.
127
+ * A variant form applies a given transformation on each element
128
+ * before performing the action.</li>
129
+ *
130
+ * <li> search: Return the first available non-null result of
131
+ * applying a given function on each element; skipping further
132
+ * search when a result is found.</li>
133
+ *
134
+ * <li> reduce: Accumulate each element. The supplied reduction
135
+ * function cannot rely on ordering (more formally, it should be
136
+ * both associative and commutative). There are five variants:
137
+ *
138
+ * <ul>
139
+ *
140
+ * <li> Plain reductions. (There is not a form of this method for
141
+ * (key, value) function arguments since there is no corresponding
142
+ * return type.)</li>
143
+ *
144
+ * <li> Mapped reductions that accumulate the results of a given
145
+ * function applied to each element.</li>
146
+ *
147
+ * <li> Reductions to scalar doubles, longs, and ints, using a
148
+ * given basis value.</li>
149
+ *
150
+ * </li>
151
+ * </ul>
152
+ * </ul>
153
+ *
154
+ * <p>The concurrency properties of bulk operations follow
155
+ * from those of ConcurrentHashMapV8: Any non-null result returned
156
+ * from {@code get(key)} and related access methods bears a
157
+ * happens-before relation with the associated insertion or
158
+ * update. The result of any bulk operation reflects the
159
+ * composition of these per-element relations (but is not
160
+ * necessarily atomic with respect to the map as a whole unless it
161
+ * is somehow known to be quiescent). Conversely, because keys
162
+ * and values in the map are never null, null serves as a reliable
163
+ * atomic indicator of the current lack of any result. To
164
+ * maintain this property, null serves as an implicit basis for
165
+ * all non-scalar reduction operations. For the double, long, and
166
+ * int versions, the basis should be one that, when combined with
167
+ * any other value, returns that other value (more formally, it
168
+ * should be the identity element for the reduction). Most common
169
+ * reductions have these properties; for example, computing a sum
170
+ * with basis 0 or a minimum with basis MAX_VALUE.
171
+ *
172
+ * <p>Search and transformation functions provided as arguments
173
+ * should similarly return null to indicate the lack of any result
174
+ * (in which case it is not used). In the case of mapped
175
+ * reductions, this also enables transformations to serve as
176
+ * filters, returning null (or, in the case of primitive
177
+ * specializations, the identity basis) if the element should not
178
+ * be combined. You can create compound transformations and
179
+ * filterings by composing them yourself under this "null means
180
+ * there is nothing there now" rule before using them in search or
181
+ * reduce operations.
182
+ *
183
+ * <p>Methods accepting and/or returning Entry arguments maintain
184
+ * key-value associations. They may be useful for example when
185
+ * finding the key for the greatest value. Note that "plain" Entry
186
+ * arguments can be supplied using {@code new
187
+ * AbstractMap.SimpleEntry(k,v)}.
188
+ *
189
+ * <p>Bulk operations may complete abruptly, throwing an
190
+ * exception encountered in the application of a supplied
191
+ * function. Bear in mind when handling such exceptions that other
192
+ * concurrently executing functions could also have thrown
193
+ * exceptions, or would have done so if the first exception had
194
+ * not occurred.
195
+ *
196
+ * <p>Parallel speedups for bulk operations compared to sequential
197
+ * processing are common but not guaranteed. Operations involving
198
+ * brief functions on small maps may execute more slowly than
199
+ * sequential loops if the underlying work to parallelize the
200
+ * computation is more expensive than the computation itself.
201
+ * Similarly, parallelization may not lead to much actual parallelism
202
+ * if all processors are busy performing unrelated tasks.
203
+ *
204
+ * <p>All arguments to all task methods must be non-null.
205
+ *
206
+ * <p><em>jsr166e note: During transition, this class
207
+ * uses nested functional interfaces with different names but the
208
+ * same forms as those expected for JDK8.</em>
209
+ *
210
+ * <p>This class is a member of the
211
+ * <a href="{@docRoot}/../technotes/guides/collections/index.html">
212
+ * Java Collections Framework</a>.
213
+ *
214
+ * @since 1.5
215
+ * @author Doug Lea
216
+ * @param <K> the type of keys maintained by this map
217
+ * @param <V> the type of mapped values
218
+ */
219
+ public class ConcurrentHashMapV8<K, V>
220
+ implements ConcurrentMap<K, V>, Serializable {
221
+ private static final long serialVersionUID = 7249069246763182397L;
222
+
223
+ /**
224
+ * A partitionable iterator. A Spliterator can be traversed
225
+ * directly, but can also be partitioned (before traversal) by
226
+ * creating another Spliterator that covers a non-overlapping
227
+ * portion of the elements, and so may be amenable to parallel
228
+ * execution.
229
+ *
230
+ * <p>This interface exports a subset of expected JDK8
231
+ * functionality.
232
+ *
233
+ * <p>Sample usage: Here is one (of the several) ways to compute
234
+ * the sum of the values held in a map using the ForkJoin
235
+ * framework. As illustrated here, Spliterators are well suited to
236
+ * designs in which a task repeatedly splits off half its work
237
+ * into forked subtasks until small enough to process directly,
238
+ * and then joins these subtasks. Variants of this style can also
239
+ * be used in completion-based designs.
240
+ *
241
+ * <pre>
242
+ * {@code ConcurrentHashMapV8<String, Long> m = ...
243
+ * // split as if have 8 * parallelism, for load balance
244
+ * int n = m.size();
245
+ * int p = aForkJoinPool.getParallelism() * 8;
246
+ * int split = (n < p)? n : p;
247
+ * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
248
+ * // ...
249
+ * static class SumValues extends RecursiveTask<Long> {
250
+ * final Spliterator<Long> s;
251
+ * final int split; // split while > 1
252
+ * final SumValues nextJoin; // records forked subtasks to join
253
+ * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
254
+ * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
255
+ * }
256
+ * public Long compute() {
257
+ * long sum = 0;
258
+ * SumValues subtasks = null; // fork subtasks
259
+ * for (int s = split >>> 1; s > 0; s >>>= 1)
260
+ * (subtasks = new SumValues(s.split(), s, subtasks)).fork();
261
+ * while (s.hasNext()) // directly process remaining elements
262
+ * sum += s.next();
263
+ * for (SumValues t = subtasks; t != null; t = t.nextJoin)
264
+ * sum += t.join(); // collect subtask results
265
+ * return sum;
266
+ * }
267
+ * }
268
+ * }</pre>
269
+ */
270
+ public static interface Spliterator<T> extends Iterator<T> {
271
+ /**
272
+ * Returns a Spliterator covering approximately half of the
273
+ * elements, guaranteed not to overlap with those subsequently
274
+ * returned by this Spliterator. After invoking this method,
275
+ * the current Spliterator will <em>not</em> produce any of
276
+ * the elements of the returned Spliterator, but the two
277
+ * Spliterators together will produce all of the elements that
278
+ * would have been produced by this Spliterator had this
279
+ * method not been called. The exact number of elements
280
+ * produced by the returned Spliterator is not guaranteed, and
281
+ * may be zero (i.e., with {@code hasNext()} reporting {@code
282
+ * false}) if this Spliterator cannot be further split.
283
+ *
284
+ * @return a Spliterator covering approximately half of the
285
+ * elements
286
+ * @throws IllegalStateException if this Spliterator has
287
+ * already commenced traversing elements
288
+ */
289
+ Spliterator<T> split();
290
+ }
291
+
292
+
293
+ /*
294
+ * Overview:
295
+ *
296
+ * The primary design goal of this hash table is to maintain
297
+ * concurrent readability (typically method get(), but also
298
+ * iterators and related methods) while minimizing update
299
+ * contention. Secondary goals are to keep space consumption about
300
+ * the same or better than java.util.HashMap, and to support high
301
+ * initial insertion rates on an empty table by many threads.
302
+ *
303
+ * Each key-value mapping is held in a Node. Because Node fields
304
+ * can contain special values, they are defined using plain Object
305
+ * types. Similarly in turn, all internal methods that use them
306
+ * work off Object types. And similarly, so do the internal
307
+ * methods of auxiliary iterator and view classes. All public
308
+ * generic typed methods relay in/out of these internal methods,
309
+ * supplying null-checks and casts as needed. This also allows
310
+ * many of the public methods to be factored into a smaller number
311
+ * of internal methods (although sadly not so for the five
312
+ * variants of put-related operations). The validation-based
313
+ * approach explained below leads to a lot of code sprawl because
314
+ * retry-control precludes factoring into smaller methods.
315
+ *
316
+ * The table is lazily initialized to a power-of-two size upon the
317
+ * first insertion. Each bin in the table normally contains a
318
+ * list of Nodes (most often, the list has only zero or one Node).
319
+ * Table accesses require volatile/atomic reads, writes, and
320
+ * CASes. Because there is no other way to arrange this without
321
+ * adding further indirections, we use intrinsics
322
+ * (sun.misc.Unsafe) operations. The lists of nodes within bins
323
+ * are always accurately traversable under volatile reads, so long
324
+ * as lookups check hash code and non-nullness of value before
325
+ * checking key equality.
326
+ *
327
+ * We use the top two bits of Node hash fields for control
328
+ * purposes -- they are available anyway because of addressing
329
+ * constraints. As explained further below, these top bits are
330
+ * used as follows:
331
+ * 00 - Normal
332
+ * 01 - Locked
333
+ * 11 - Locked and may have a thread waiting for lock
334
+ * 10 - Node is a forwarding node
335
+ *
336
+ * The lower 30 bits of each Node's hash field contain a
337
+ * transformation of the key's hash code, except for forwarding
338
+ * nodes, for which the lower bits are zero (and so always have
339
+ * hash field == MOVED).
340
+ *
341
+ * Insertion (via put or its variants) of the first node in an
342
+ * empty bin is performed by just CASing it to the bin. This is
343
+ * by far the most common case for put operations under most
344
+ * key/hash distributions. Other update operations (insert,
345
+ * delete, and replace) require locks. We do not want to waste
346
+ * the space required to associate a distinct lock object with
347
+ * each bin, so instead use the first node of a bin list itself as
348
+ * a lock. Blocking support for these locks relies on the builtin
349
+ * "synchronized" monitors. However, we also need a tryLock
350
+ * construction, so we overlay these by using bits of the Node
351
+ * hash field for lock control (see above), and so normally use
352
+ * builtin monitors only for blocking and signalling using
353
+ * wait/notifyAll constructions. See Node.tryAwaitLock.
354
+ *
355
+ * Using the first node of a list as a lock does not by itself
356
+ * suffice though: When a node is locked, any update must first
357
+ * validate that it is still the first node after locking it, and
358
+ * retry if not. Because new nodes are always appended to lists,
359
+ * once a node is first in a bin, it remains first until deleted
360
+ * or the bin becomes invalidated (upon resizing). However,
361
+ * operations that only conditionally update may inspect nodes
362
+ * until the point of update. This is a converse of sorts to the
363
+ * lazy locking technique described by Herlihy & Shavit.
364
+ *
365
+ * The main disadvantage of per-bin locks is that other update
366
+ * operations on other nodes in a bin list protected by the same
367
+ * lock can stall, for example when user equals() or mapping
368
+ * functions take a long time. However, statistically, under
369
+ * random hash codes, this is not a common problem. Ideally, the
370
+ * frequency of nodes in bins follows a Poisson distribution
371
+ * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
372
+ * parameter of about 0.5 on average, given the resizing threshold
373
+ * of 0.75, although with a large variance because of resizing
374
+ * granularity. Ignoring variance, the expected occurrences of
375
+ * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
376
+ * first values are:
377
+ *
378
+ * 0: 0.60653066
379
+ * 1: 0.30326533
380
+ * 2: 0.07581633
381
+ * 3: 0.01263606
382
+ * 4: 0.00157952
383
+ * 5: 0.00015795
384
+ * 6: 0.00001316
385
+ * 7: 0.00000094
386
+ * 8: 0.00000006
387
+ * more: less than 1 in ten million
388
+ *
389
+ * Lock contention probability for two threads accessing distinct
390
+ * elements is roughly 1 / (8 * #elements) under random hashes.
391
+ *
392
+ * Actual hash code distributions encountered in practice
393
+ * sometimes deviate significantly from uniform randomness. This
394
+ * includes the case when N > (1<<30), so some keys MUST collide.
395
+ * Similarly for dumb or hostile usages in which multiple keys are
396
+ * designed to have identical hash codes. Also, although we guard
397
+ * against the worst effects of this (see method spread), sets of
398
+ * hashes may differ only in bits that do not impact their bin
399
+ * index for a given power-of-two mask. So we use a secondary
400
+ * strategy that applies when the number of nodes in a bin exceeds
401
+ * a threshold, and at least one of the keys implements
402
+ * Comparable. These TreeBins use a balanced tree to hold nodes
403
+ * (a specialized form of red-black trees), bounding search time
404
+ * to O(log N). Each search step in a TreeBin is around twice as
405
+ * slow as in a regular list, but given that N cannot exceed
406
+ * (1<<64) (before running out of addresses) this bounds search
407
+ * steps, lock hold times, etc, to reasonable constants (roughly
408
+ * 100 nodes inspected per operation worst case) so long as keys
409
+ * are Comparable (which is very common -- String, Long, etc).
410
+ * TreeBin nodes (TreeNodes) also maintain the same "next"
411
+ * traversal pointers as regular nodes, so can be traversed in
412
+ * iterators in the same way.
413
+ *
414
+ * The table is resized when occupancy exceeds a percentage
415
+ * threshold (nominally, 0.75, but see below). Only a single
416
+ * thread performs the resize (using field "sizeCtl", to arrange
417
+ * exclusion), but the table otherwise remains usable for reads
418
+ * and updates. Resizing proceeds by transferring bins, one by
419
+ * one, from the table to the next table. Because we are using
420
+ * power-of-two expansion, the elements from each bin must either
421
+ * stay at same index, or move with a power of two offset. We
422
+ * eliminate unnecessary node creation by catching cases where old
423
+ * nodes can be reused because their next fields won't change. On
424
+ * average, only about one-sixth of them need cloning when a table
425
+ * doubles. The nodes they replace will be garbage collectable as
426
+ * soon as they are no longer referenced by any reader thread that
427
+ * may be in the midst of concurrently traversing table. Upon
428
+ * transfer, the old table bin contains only a special forwarding
429
+ * node (with hash field "MOVED") that contains the next table as
430
+ * its key. On encountering a forwarding node, access and update
431
+ * operations restart, using the new table.
432
+ *
433
+ * Each bin transfer requires its bin lock. However, unlike other
434
+ * cases, a transfer can skip a bin if it fails to acquire its
435
+ * lock, and revisit it later (unless it is a TreeBin). Method
436
+ * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
437
+ * have been skipped because of failure to acquire a lock, and
438
+ * blocks only if none are available (i.e., only very rarely).
439
+ * The transfer operation must also ensure that all accessible
440
+ * bins in both the old and new table are usable by any traversal.
441
+ * When there are no lock acquisition failures, this is arranged
442
+ * simply by proceeding from the last bin (table.length - 1) up
443
+ * towards the first. Upon seeing a forwarding node, traversals
444
+ * (see class Iter) arrange to move to the new table
445
+ * without revisiting nodes. However, when any node is skipped
446
+ * during a transfer, all earlier table bins may have become
447
+ * visible, so are initialized with a reverse-forwarding node back
448
+ * to the old table until the new ones are established. (This
449
+ * sometimes requires transiently locking a forwarding node, which
450
+ * is possible under the above encoding.) These more expensive
451
+ * mechanics trigger only when necessary.
452
+ *
453
+ * The traversal scheme also applies to partial traversals of
454
+ * ranges of bins (via an alternate Traverser constructor)
455
+ * to support partitioned aggregate operations. Also, read-only
456
+ * operations give up if ever forwarded to a null table, which
457
+ * provides support for shutdown-style clearing, which is also not
458
+ * currently implemented.
459
+ *
460
+ * Lazy table initialization minimizes footprint until first use,
461
+ * and also avoids resizings when the first operation is from a
462
+ * putAll, constructor with map argument, or deserialization.
463
+ * These cases attempt to override the initial capacity settings,
464
+ * but harmlessly fail to take effect in cases of races.
465
+ *
466
+ * The element count is maintained using a LongAdder, which avoids
467
+ * contention on updates but can encounter cache thrashing if read
468
+ * too frequently during concurrent access. To avoid reading so
469
+ * often, resizing is attempted either when a bin lock is
470
+ * contended, or upon adding to a bin already holding two or more
471
+ * nodes (checked before adding in the xIfAbsent methods, after
472
+ * adding in others). Under uniform hash distributions, the
473
+ * probability of this occurring at threshold is around 13%,
474
+ * meaning that only about 1 in 8 puts check threshold (and after
475
+ * resizing, many fewer do so). But this approximation has high
476
+ * variance for small table sizes, so we check on any collision
477
+ * for sizes <= 64. The bulk putAll operation further reduces
478
+ * contention by only committing count updates upon these size
479
+ * checks.
480
+ *
481
+ * Maintaining API and serialization compatibility with previous
482
+ * versions of this class introduces several oddities. Mainly: We
483
+ * leave untouched but unused constructor arguments refering to
484
+ * concurrencyLevel. We accept a loadFactor constructor argument,
485
+ * but apply it only to initial table capacity (which is the only
486
+ * time that we can guarantee to honor it.) We also declare an
487
+ * unused "Segment" class that is instantiated in minimal form
488
+ * only when serializing.
489
+ */
490
+
491
+ /* ---------------- Constants -------------- */
492
+
493
+ /**
494
+ * The largest possible table capacity. This value must be
495
+ * exactly 1<<30 to stay within Java array allocation and indexing
496
+ * bounds for power of two table sizes, and is further required
497
+ * because the top two bits of 32bit hash fields are used for
498
+ * control purposes.
499
+ */
500
+ private static final int MAXIMUM_CAPACITY = 1 << 30;
501
+
502
+ /**
503
+ * The default initial table capacity. Must be a power of 2
504
+ * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
505
+ */
506
+ private static final int DEFAULT_CAPACITY = 16;
507
+
508
+ /**
509
+ * The largest possible (non-power of two) array size.
510
+ * Needed by toArray and related methods.
511
+ */
512
+ static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
513
+
514
+ /**
515
+ * The default concurrency level for this table. Unused but
516
+ * defined for compatibility with previous versions of this class.
517
+ */
518
+ private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
519
+
520
+ /**
521
+ * The load factor for this table. Overrides of this value in
522
+ * constructors affect only the initial table capacity. The
523
+ * actual floating point value isn't normally used -- it is
524
+ * simpler to use expressions such as {@code n - (n >>> 2)} for
525
+ * the associated resizing threshold.
526
+ */
527
+ private static final float LOAD_FACTOR = 0.75f;
528
+
529
+ /**
530
+ * The buffer size for skipped bins during transfers. The
531
+ * value is arbitrary but should be large enough to avoid
532
+ * most locking stalls during resizes.
533
+ */
534
+ private static final int TRANSFER_BUFFER_SIZE = 32;
535
+
536
+ /**
537
+ * The bin count threshold for using a tree rather than list for a
538
+ * bin. The value reflects the approximate break-even point for
539
+ * using tree-based operations.
540
+ */
541
+ private static final int TREE_THRESHOLD = 8;
542
+
543
+ /*
544
+ * Encodings for special uses of Node hash fields. See above for
545
+ * explanation.
546
+ */
547
+ static final int MOVED = 0x80000000; // hash field for forwarding nodes
548
+ static final int LOCKED = 0x40000000; // set/tested only as a bit
549
+ static final int WAITING = 0xc0000000; // both bits set/tested together
550
+ static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
551
+
552
+ /* ---------------- Fields -------------- */
553
+
554
+ /**
555
+ * The array of bins. Lazily initialized upon first insertion.
556
+ * Size is always a power of two. Accessed directly by iterators.
557
+ */
558
+ transient volatile Node[] table;
559
+
560
+ /**
561
+ * The counter maintaining number of elements.
562
+ */
563
+ private transient final LongAdder counter;
564
+
565
+ /**
566
+ * Table initialization and resizing control. When negative, the
567
+ * table is being initialized or resized. Otherwise, when table is
568
+ * null, holds the initial table size to use upon creation, or 0
569
+ * for default. After initialization, holds the next element count
570
+ * value upon which to resize the table.
571
+ */
572
+ private transient volatile int sizeCtl;
573
+
574
+ // views
575
+ private transient KeySetView<K,V> keySet;
576
+ private transient ValuesView<K,V> values;
577
+ private transient EntrySetView<K,V> entrySet;
578
+
579
+ /** For serialization compatibility. Null unless serialized; see below */
580
+ private Segment<K,V>[] segments;
581
+
582
+ /* ---------------- Table element access -------------- */
583
+
584
+ /*
585
+ * Volatile access methods are used for table elements as well as
586
+ * elements of in-progress next table while resizing. Uses are
587
+ * null checked by callers, and implicitly bounds-checked, relying
588
+ * on the invariants that tab arrays have non-zero size, and all
589
+ * indices are masked with (tab.length - 1) which is never
590
+ * negative and always less than length. Note that, to be correct
591
+ * wrt arbitrary concurrency errors by users, bounds checks must
592
+ * operate on local variables, which accounts for some odd-looking
593
+ * inline assignments below.
594
+ */
595
+
596
+ static final Node tabAt(Node[] tab, int i) { // used by Iter
597
+ return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE);
598
+ }
599
+
600
+ private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
601
+ return UNSAFE.compareAndSwapObject(tab, ((long)i<<ASHIFT)+ABASE, c, v);
602
+ }
603
+
604
+ private static final void setTabAt(Node[] tab, int i, Node v) {
605
+ UNSAFE.putObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE, v);
606
+ }
607
+
608
+ /* ---------------- Nodes -------------- */
609
+
610
+ /**
611
+ * Key-value entry. Note that this is never exported out as a
612
+ * user-visible Map.Entry (see MapEntry below). Nodes with a hash
613
+ * field of MOVED are special, and do not contain user keys or
614
+ * values. Otherwise, keys are never null, and null val fields
615
+ * indicate that a node is in the process of being deleted or
616
+ * created. For purposes of read-only access, a key may be read
617
+ * before a val, but can only be used after checking val to be
618
+ * non-null.
619
+ */
620
+ static class Node {
621
+ volatile int hash;
622
+ final Object key;
623
+ volatile Object val;
624
+ volatile Node next;
625
+
626
+ Node(int hash, Object key, Object val, Node next) {
627
+ this.hash = hash;
628
+ this.key = key;
629
+ this.val = val;
630
+ this.next = next;
631
+ }
632
+
633
+ /** CompareAndSet the hash field */
634
+ final boolean casHash(int cmp, int val) {
635
+ return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val);
636
+ }
637
+
638
+ /** The number of spins before blocking for a lock */
639
+ static final int MAX_SPINS =
640
+ Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
641
+
642
+ /**
643
+ * Spins a while if LOCKED bit set and this node is the first
644
+ * of its bin, and then sets WAITING bits on hash field and
645
+ * blocks (once) if they are still set. It is OK for this
646
+ * method to return even if lock is not available upon exit,
647
+ * which enables these simple single-wait mechanics.
648
+ *
649
+ * The corresponding signalling operation is performed within
650
+ * callers: Upon detecting that WAITING has been set when
651
+ * unlocking lock (via a failed CAS from non-waiting LOCKED
652
+ * state), unlockers acquire the sync lock and perform a
653
+ * notifyAll.
654
+ *
655
+ * The initial sanity check on tab and bounds is not currently
656
+ * necessary in the only usages of this method, but enables
657
+ * use in other future contexts.
658
+ */
659
+ final void tryAwaitLock(Node[] tab, int i) {
660
+ if (tab != null && i >= 0 && i < tab.length) { // sanity check
661
+ int r = ThreadLocalRandom.current().nextInt(); // randomize spins
662
+ int spins = MAX_SPINS, h;
663
+ while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
664
+ if (spins >= 0) {
665
+ r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
666
+ if (r >= 0 && --spins == 0)
667
+ Thread.yield(); // yield before block
668
+ }
669
+ else if (casHash(h, h | WAITING)) {
670
+ synchronized (this) {
671
+ if (tabAt(tab, i) == this &&
672
+ (hash & WAITING) == WAITING) {
673
+ try {
674
+ wait();
675
+ } catch (InterruptedException ie) {
676
+ Thread.currentThread().interrupt();
677
+ }
678
+ }
679
+ else
680
+ notifyAll(); // possibly won race vs signaller
681
+ }
682
+ break;
683
+ }
684
+ }
685
+ }
686
+ }
687
+
688
+ // Unsafe mechanics for casHash
689
+ private static final sun.misc.Unsafe UNSAFE;
690
+ private static final long hashOffset;
691
+
692
+ static {
693
+ try {
694
+ UNSAFE = getUnsafe();
695
+ Class<?> k = Node.class;
696
+ hashOffset = UNSAFE.objectFieldOffset
697
+ (k.getDeclaredField("hash"));
698
+ } catch (Exception e) {
699
+ throw new Error(e);
700
+ }
701
+ }
702
+ }
703
+
704
+ /* ---------------- TreeBins -------------- */
705
+
706
+ /**
707
+ * Nodes for use in TreeBins
708
+ */
709
+ static final class TreeNode extends Node {
710
+ TreeNode parent; // red-black tree links
711
+ TreeNode left;
712
+ TreeNode right;
713
+ TreeNode prev; // needed to unlink next upon deletion
714
+ boolean red;
715
+
716
+ TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
717
+ super(hash, key, val, next);
718
+ this.parent = parent;
719
+ }
720
+ }
721
+
722
+ /**
723
+ * A specialized form of red-black tree for use in bins
724
+ * whose size exceeds a threshold.
725
+ *
726
+ * TreeBins use a special form of comparison for search and
727
+ * related operations (which is the main reason we cannot use
728
+ * existing collections such as TreeMaps). TreeBins contain
729
+ * Comparable elements, but may contain others, as well as
730
+ * elements that are Comparable but not necessarily Comparable<T>
731
+ * for the same T, so we cannot invoke compareTo among them. To
732
+ * handle this, the tree is ordered primarily by hash value, then
733
+ * by getClass().getName() order, and then by Comparator order
734
+ * among elements of the same class. On lookup at a node, if
735
+ * elements are not comparable or compare as 0, both left and
736
+ * right children may need to be searched in the case of tied hash
737
+ * values. (This corresponds to the full list search that would be
738
+ * necessary if all elements were non-Comparable and had tied
739
+ * hashes.) The red-black balancing code is updated from
740
+ * pre-jdk-collections
741
+ * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
742
+ * based in turn on Cormen, Leiserson, and Rivest "Introduction to
743
+ * Algorithms" (CLR).
744
+ *
745
+ * TreeBins also maintain a separate locking discipline than
746
+ * regular bins. Because they are forwarded via special MOVED
747
+ * nodes at bin heads (which can never change once established),
748
+ * we cannot use those nodes as locks. Instead, TreeBin
749
+ * extends AbstractQueuedSynchronizer to support a simple form of
750
+ * read-write lock. For update operations and table validation,
751
+ * the exclusive form of lock behaves in the same way as bin-head
752
+ * locks. However, lookups use shared read-lock mechanics to allow
753
+ * multiple readers in the absence of writers. Additionally,
754
+ * these lookups do not ever block: While the lock is not
755
+ * available, they proceed along the slow traversal path (via
756
+ * next-pointers) until the lock becomes available or the list is
757
+ * exhausted, whichever comes first. (These cases are not fast,
758
+ * but maximize aggregate expected throughput.) The AQS mechanics
759
+ * for doing this are straightforward. The lock state is held as
760
+ * AQS getState(). Read counts are negative; the write count (1)
761
+ * is positive. There are no signalling preferences among readers
762
+ * and writers. Since we don't need to export full Lock API, we
763
+ * just override the minimal AQS methods and use them directly.
764
+ */
765
+ static final class TreeBin extends AbstractQueuedSynchronizer {
766
+ private static final long serialVersionUID = 2249069246763182397L;
767
+ transient TreeNode root; // root of tree
768
+ transient TreeNode first; // head of next-pointer list
769
+
770
+ /* AQS overrides */
771
+ public final boolean isHeldExclusively() { return getState() > 0; }
772
+ public final boolean tryAcquire(int ignore) {
773
+ if (compareAndSetState(0, 1)) {
774
+ setExclusiveOwnerThread(Thread.currentThread());
775
+ return true;
776
+ }
777
+ return false;
778
+ }
779
+ public final boolean tryRelease(int ignore) {
780
+ setExclusiveOwnerThread(null);
781
+ setState(0);
782
+ return true;
783
+ }
784
+ public final int tryAcquireShared(int ignore) {
785
+ for (int c;;) {
786
+ if ((c = getState()) > 0)
787
+ return -1;
788
+ if (compareAndSetState(c, c -1))
789
+ return 1;
790
+ }
791
+ }
792
+ public final boolean tryReleaseShared(int ignore) {
793
+ int c;
794
+ do {} while (!compareAndSetState(c = getState(), c + 1));
795
+ return c == -1;
796
+ }
797
+
798
+ /** From CLR */
799
+ private void rotateLeft(TreeNode p) {
800
+ if (p != null) {
801
+ TreeNode r = p.right, pp, rl;
802
+ if ((rl = p.right = r.left) != null)
803
+ rl.parent = p;
804
+ if ((pp = r.parent = p.parent) == null)
805
+ root = r;
806
+ else if (pp.left == p)
807
+ pp.left = r;
808
+ else
809
+ pp.right = r;
810
+ r.left = p;
811
+ p.parent = r;
812
+ }
813
+ }
814
+
815
+ /** From CLR */
816
+ private void rotateRight(TreeNode p) {
817
+ if (p != null) {
818
+ TreeNode l = p.left, pp, lr;
819
+ if ((lr = p.left = l.right) != null)
820
+ lr.parent = p;
821
+ if ((pp = l.parent = p.parent) == null)
822
+ root = l;
823
+ else if (pp.right == p)
824
+ pp.right = l;
825
+ else
826
+ pp.left = l;
827
+ l.right = p;
828
+ p.parent = l;
829
+ }
830
+ }
831
+
832
+ @SuppressWarnings("unchecked") final TreeNode getTreeNode
833
+ (int h, Object k, TreeNode p) {
834
+ return getTreeNode(h, (RubyObject)k, p);
835
+ }
836
+
837
+ /**
838
+ * Returns the TreeNode (or null if not found) for the given key
839
+ * starting at given root.
840
+ */
841
+ @SuppressWarnings("unchecked") final TreeNode getTreeNode
842
+ (int h, RubyObject k, TreeNode p) {
843
+ RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>");
844
+ while (p != null) {
845
+ int dir, ph; RubyObject pk; RubyClass pc;
846
+ if ((ph = p.hash) == h) {
847
+ if ((pk = (RubyObject)p.key) == k || k.equals(pk))
848
+ return p;
849
+ if (c != (pc = (RubyClass)pk.getMetaClass()) ||
850
+ kNotComparable ||
851
+ (dir = rubyCompare(k, pk)) == 0) {
852
+ dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
853
+ if (dir == 0) { // if still stuck, need to check both sides
854
+ TreeNode r = null, pl, pr;
855
+ // try to recurse on the right
856
+ if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null)
857
+ return r;
858
+ // try to continue iterating on the left side
859
+ else if ((pl = p.left) != null && h <= pl.hash)
860
+ dir = -1;
861
+ else // no matching node found
862
+ return null;
863
+ }
864
+ }
865
+ }
866
+ else
867
+ dir = (h < ph) ? -1 : 1;
868
+ p = (dir > 0) ? p.right : p.left;
869
+ }
870
+ return null;
871
+ }
872
+
873
+ int rubyCompare(RubyObject l, RubyObject r) {
874
+ ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext();
875
+ IRubyObject result = l.callMethod(context, "<=>", r);
876
+ int res = result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger());
877
+ return res;
878
+ }
879
+
880
+ /**
881
+ * Wrapper for getTreeNode used by CHM.get. Tries to obtain
882
+ * read-lock to call getTreeNode, but during failure to get
883
+ * lock, searches along next links.
884
+ */
885
+ final Object getValue(int h, Object k) {
886
+ Node r = null;
887
+ int c = getState(); // Must read lock state first
888
+ for (Node e = first; e != null; e = e.next) {
889
+ if (c <= 0 && compareAndSetState(c, c - 1)) {
890
+ try {
891
+ r = getTreeNode(h, k, root);
892
+ } finally {
893
+ releaseShared(0);
894
+ }
895
+ break;
896
+ }
897
+ else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
898
+ r = e;
899
+ break;
900
+ }
901
+ else
902
+ c = getState();
903
+ }
904
+ return r == null ? null : r.val;
905
+ }
906
+
907
+ @SuppressWarnings("unchecked") final TreeNode putTreeNode
908
+ (int h, Object k, Object v) {
909
+ return putTreeNode(h, (RubyObject)k, v);
910
+ }
911
+
912
+ /**
913
+ * Finds or adds a node.
914
+ * @return null if added
915
+ */
916
+ @SuppressWarnings("unchecked") final TreeNode putTreeNode
917
+ (int h, RubyObject k, Object v) {
918
+ RubyClass c = k.getMetaClass();
919
+ boolean kNotComparable = !k.respondsTo("<=>");
920
+ TreeNode pp = root, p = null;
921
+ int dir = 0;
922
+ while (pp != null) { // find existing node or leaf to insert at
923
+ int ph; RubyObject pk; RubyClass pc;
924
+ p = pp;
925
+ if ((ph = p.hash) == h) {
926
+ if ((pk = (RubyObject)p.key) == k || k.equals(pk))
927
+ return p;
928
+ if (c != (pc = pk.getMetaClass()) ||
929
+ kNotComparable ||
930
+ (dir = rubyCompare(k, pk)) == 0) {
931
+ dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
932
+ if (dir == 0) { // if still stuck, need to check both sides
933
+ TreeNode r = null, pr;
934
+ // try to recurse on the right
935
+ if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null)
936
+ return r;
937
+ else // continue descending down the left subtree
938
+ dir = -1;
939
+ }
940
+ }
941
+ }
942
+ else
943
+ dir = (h < ph) ? -1 : 1;
944
+ pp = (dir > 0) ? p.right : p.left;
945
+ }
946
+
947
+ TreeNode f = first;
948
+ TreeNode x = first = new TreeNode(h, (Object)k, v, f, p);
949
+ if (p == null)
950
+ root = x;
951
+ else { // attach and rebalance; adapted from CLR
952
+ TreeNode xp, xpp;
953
+ if (f != null)
954
+ f.prev = x;
955
+ if (dir <= 0)
956
+ p.left = x;
957
+ else
958
+ p.right = x;
959
+ x.red = true;
960
+ while (x != null && (xp = x.parent) != null && xp.red &&
961
+ (xpp = xp.parent) != null) {
962
+ TreeNode xppl = xpp.left;
963
+ if (xp == xppl) {
964
+ TreeNode y = xpp.right;
965
+ if (y != null && y.red) {
966
+ y.red = false;
967
+ xp.red = false;
968
+ xpp.red = true;
969
+ x = xpp;
970
+ }
971
+ else {
972
+ if (x == xp.right) {
973
+ rotateLeft(x = xp);
974
+ xpp = (xp = x.parent) == null ? null : xp.parent;
975
+ }
976
+ if (xp != null) {
977
+ xp.red = false;
978
+ if (xpp != null) {
979
+ xpp.red = true;
980
+ rotateRight(xpp);
981
+ }
982
+ }
983
+ }
984
+ }
985
+ else {
986
+ TreeNode y = xppl;
987
+ if (y != null && y.red) {
988
+ y.red = false;
989
+ xp.red = false;
990
+ xpp.red = true;
991
+ x = xpp;
992
+ }
993
+ else {
994
+ if (x == xp.left) {
995
+ rotateRight(x = xp);
996
+ xpp = (xp = x.parent) == null ? null : xp.parent;
997
+ }
998
+ if (xp != null) {
999
+ xp.red = false;
1000
+ if (xpp != null) {
1001
+ xpp.red = true;
1002
+ rotateLeft(xpp);
1003
+ }
1004
+ }
1005
+ }
1006
+ }
1007
+ }
1008
+ TreeNode r = root;
1009
+ if (r != null && r.red)
1010
+ r.red = false;
1011
+ }
1012
+ return null;
1013
+ }
1014
+
1015
+ /**
1016
+ * Removes the given node, that must be present before this
1017
+ * call. This is messier than typical red-black deletion code
1018
+ * because we cannot swap the contents of an interior node
1019
+ * with a leaf successor that is pinned by "next" pointers
1020
+ * that are accessible independently of lock. So instead we
1021
+ * swap the tree linkages.
1022
+ */
1023
+ final void deleteTreeNode(TreeNode p) {
1024
+ TreeNode next = (TreeNode)p.next; // unlink traversal pointers
1025
+ TreeNode pred = p.prev;
1026
+ if (pred == null)
1027
+ first = next;
1028
+ else
1029
+ pred.next = next;
1030
+ if (next != null)
1031
+ next.prev = pred;
1032
+ TreeNode replacement;
1033
+ TreeNode pl = p.left;
1034
+ TreeNode pr = p.right;
1035
+ if (pl != null && pr != null) {
1036
+ TreeNode s = pr, sl;
1037
+ while ((sl = s.left) != null) // find successor
1038
+ s = sl;
1039
+ boolean c = s.red; s.red = p.red; p.red = c; // swap colors
1040
+ TreeNode sr = s.right;
1041
+ TreeNode pp = p.parent;
1042
+ if (s == pr) { // p was s's direct parent
1043
+ p.parent = s;
1044
+ s.right = p;
1045
+ }
1046
+ else {
1047
+ TreeNode sp = s.parent;
1048
+ if ((p.parent = sp) != null) {
1049
+ if (s == sp.left)
1050
+ sp.left = p;
1051
+ else
1052
+ sp.right = p;
1053
+ }
1054
+ if ((s.right = pr) != null)
1055
+ pr.parent = s;
1056
+ }
1057
+ p.left = null;
1058
+ if ((p.right = sr) != null)
1059
+ sr.parent = p;
1060
+ if ((s.left = pl) != null)
1061
+ pl.parent = s;
1062
+ if ((s.parent = pp) == null)
1063
+ root = s;
1064
+ else if (p == pp.left)
1065
+ pp.left = s;
1066
+ else
1067
+ pp.right = s;
1068
+ replacement = sr;
1069
+ }
1070
+ else
1071
+ replacement = (pl != null) ? pl : pr;
1072
+ TreeNode pp = p.parent;
1073
+ if (replacement == null) {
1074
+ if (pp == null) {
1075
+ root = null;
1076
+ return;
1077
+ }
1078
+ replacement = p;
1079
+ }
1080
+ else {
1081
+ replacement.parent = pp;
1082
+ if (pp == null)
1083
+ root = replacement;
1084
+ else if (p == pp.left)
1085
+ pp.left = replacement;
1086
+ else
1087
+ pp.right = replacement;
1088
+ p.left = p.right = p.parent = null;
1089
+ }
1090
+ if (!p.red) { // rebalance, from CLR
1091
+ TreeNode x = replacement;
1092
+ while (x != null) {
1093
+ TreeNode xp, xpl;
1094
+ if (x.red || (xp = x.parent) == null) {
1095
+ x.red = false;
1096
+ break;
1097
+ }
1098
+ if (x == (xpl = xp.left)) {
1099
+ TreeNode sib = xp.right;
1100
+ if (sib != null && sib.red) {
1101
+ sib.red = false;
1102
+ xp.red = true;
1103
+ rotateLeft(xp);
1104
+ sib = (xp = x.parent) == null ? null : xp.right;
1105
+ }
1106
+ if (sib == null)
1107
+ x = xp;
1108
+ else {
1109
+ TreeNode sl = sib.left, sr = sib.right;
1110
+ if ((sr == null || !sr.red) &&
1111
+ (sl == null || !sl.red)) {
1112
+ sib.red = true;
1113
+ x = xp;
1114
+ }
1115
+ else {
1116
+ if (sr == null || !sr.red) {
1117
+ if (sl != null)
1118
+ sl.red = false;
1119
+ sib.red = true;
1120
+ rotateRight(sib);
1121
+ sib = (xp = x.parent) == null ? null : xp.right;
1122
+ }
1123
+ if (sib != null) {
1124
+ sib.red = (xp == null) ? false : xp.red;
1125
+ if ((sr = sib.right) != null)
1126
+ sr.red = false;
1127
+ }
1128
+ if (xp != null) {
1129
+ xp.red = false;
1130
+ rotateLeft(xp);
1131
+ }
1132
+ x = root;
1133
+ }
1134
+ }
1135
+ }
1136
+ else { // symmetric
1137
+ TreeNode sib = xpl;
1138
+ if (sib != null && sib.red) {
1139
+ sib.red = false;
1140
+ xp.red = true;
1141
+ rotateRight(xp);
1142
+ sib = (xp = x.parent) == null ? null : xp.left;
1143
+ }
1144
+ if (sib == null)
1145
+ x = xp;
1146
+ else {
1147
+ TreeNode sl = sib.left, sr = sib.right;
1148
+ if ((sl == null || !sl.red) &&
1149
+ (sr == null || !sr.red)) {
1150
+ sib.red = true;
1151
+ x = xp;
1152
+ }
1153
+ else {
1154
+ if (sl == null || !sl.red) {
1155
+ if (sr != null)
1156
+ sr.red = false;
1157
+ sib.red = true;
1158
+ rotateLeft(sib);
1159
+ sib = (xp = x.parent) == null ? null : xp.left;
1160
+ }
1161
+ if (sib != null) {
1162
+ sib.red = (xp == null) ? false : xp.red;
1163
+ if ((sl = sib.left) != null)
1164
+ sl.red = false;
1165
+ }
1166
+ if (xp != null) {
1167
+ xp.red = false;
1168
+ rotateRight(xp);
1169
+ }
1170
+ x = root;
1171
+ }
1172
+ }
1173
+ }
1174
+ }
1175
+ }
1176
+ if (p == replacement && (pp = p.parent) != null) {
1177
+ if (p == pp.left) // detach pointers
1178
+ pp.left = null;
1179
+ else if (p == pp.right)
1180
+ pp.right = null;
1181
+ p.parent = null;
1182
+ }
1183
+ }
1184
+ }
1185
+
1186
+ /* ---------------- Collision reduction methods -------------- */
1187
+
1188
+ /**
1189
+ * Spreads higher bits to lower, and also forces top 2 bits to 0.
1190
+ * Because the table uses power-of-two masking, sets of hashes
1191
+ * that vary only in bits above the current mask will always
1192
+ * collide. (Among known examples are sets of Float keys holding
1193
+ * consecutive whole numbers in small tables.) To counter this,
1194
+ * we apply a transform that spreads the impact of higher bits
1195
+ * downward. There is a tradeoff between speed, utility, and
1196
+ * quality of bit-spreading. Because many common sets of hashes
1197
+ * are already reasonably distributed across bits (so don't benefit
1198
+ * from spreading), and because we use trees to handle large sets
1199
+ * of collisions in bins, we don't need excessively high quality.
1200
+ */
1201
+ private static final int spread(int h) {
1202
+ h ^= (h >>> 18) ^ (h >>> 12);
1203
+ return (h ^ (h >>> 10)) & HASH_BITS;
1204
+ }
1205
+
1206
+ /**
1207
+ * Replaces a list bin with a tree bin. Call only when locked.
1208
+ * Fails to replace if the given key is non-comparable or table
1209
+ * is, or needs, resizing.
1210
+ */
1211
+ private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
1212
+ if ((key instanceof Comparable) &&
1213
+ (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
1214
+ TreeBin t = new TreeBin();
1215
+ for (Node e = tabAt(tab, index); e != null; e = e.next)
1216
+ t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
1217
+ setTabAt(tab, index, new Node(MOVED, t, null, null));
1218
+ }
1219
+ }
1220
+
1221
+ /* ---------------- Internal access and update methods -------------- */
1222
+
1223
+ /** Implementation for get and containsKey */
1224
+ private final Object internalGet(Object k) {
1225
+ int h = spread(k.hashCode());
1226
+ retry: for (Node[] tab = table; tab != null;) {
1227
+ Node e, p; Object ek, ev; int eh; // locals to read fields once
1228
+ for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
1229
+ if ((eh = e.hash) == MOVED) {
1230
+ if ((ek = e.key) instanceof TreeBin) // search TreeBin
1231
+ return ((TreeBin)ek).getValue(h, k);
1232
+ else { // restart with new table
1233
+ tab = (Node[])ek;
1234
+ continue retry;
1235
+ }
1236
+ }
1237
+ else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
1238
+ ((ek = e.key) == k || k.equals(ek)))
1239
+ return ev;
1240
+ }
1241
+ break;
1242
+ }
1243
+ return null;
1244
+ }
1245
+
1246
+ /**
1247
+ * Implementation for the four public remove/replace methods:
1248
+ * Replaces node value with v, conditional upon match of cv if
1249
+ * non-null. If resulting value is null, delete.
1250
+ */
1251
+ private final Object internalReplace(Object k, Object v, Object cv) {
1252
+ int h = spread(k.hashCode());
1253
+ Object oldVal = null;
1254
+ for (Node[] tab = table;;) {
1255
+ Node f; int i, fh; Object fk;
1256
+ if (tab == null ||
1257
+ (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1258
+ break;
1259
+ else if ((fh = f.hash) == MOVED) {
1260
+ if ((fk = f.key) instanceof TreeBin) {
1261
+ TreeBin t = (TreeBin)fk;
1262
+ boolean validated = false;
1263
+ boolean deleted = false;
1264
+ t.acquire(0);
1265
+ try {
1266
+ if (tabAt(tab, i) == f) {
1267
+ validated = true;
1268
+ TreeNode p = t.getTreeNode(h, k, t.root);
1269
+ if (p != null) {
1270
+ Object pv = p.val;
1271
+ if (cv == null || cv == pv || cv.equals(pv)) {
1272
+ oldVal = pv;
1273
+ if ((p.val = v) == null) {
1274
+ deleted = true;
1275
+ t.deleteTreeNode(p);
1276
+ }
1277
+ }
1278
+ }
1279
+ }
1280
+ } finally {
1281
+ t.release(0);
1282
+ }
1283
+ if (validated) {
1284
+ if (deleted)
1285
+ counter.add(-1L);
1286
+ break;
1287
+ }
1288
+ }
1289
+ else
1290
+ tab = (Node[])fk;
1291
+ }
1292
+ else if ((fh & HASH_BITS) != h && f.next == null) // precheck
1293
+ break; // rules out possible existence
1294
+ else if ((fh & LOCKED) != 0) {
1295
+ checkForResize(); // try resizing if can't get lock
1296
+ f.tryAwaitLock(tab, i);
1297
+ }
1298
+ else if (f.casHash(fh, fh | LOCKED)) {
1299
+ boolean validated = false;
1300
+ boolean deleted = false;
1301
+ try {
1302
+ if (tabAt(tab, i) == f) {
1303
+ validated = true;
1304
+ for (Node e = f, pred = null;;) {
1305
+ Object ek, ev;
1306
+ if ((e.hash & HASH_BITS) == h &&
1307
+ ((ev = e.val) != null) &&
1308
+ ((ek = e.key) == k || k.equals(ek))) {
1309
+ if (cv == null || cv == ev || cv.equals(ev)) {
1310
+ oldVal = ev;
1311
+ if ((e.val = v) == null) {
1312
+ deleted = true;
1313
+ Node en = e.next;
1314
+ if (pred != null)
1315
+ pred.next = en;
1316
+ else
1317
+ setTabAt(tab, i, en);
1318
+ }
1319
+ }
1320
+ break;
1321
+ }
1322
+ pred = e;
1323
+ if ((e = e.next) == null)
1324
+ break;
1325
+ }
1326
+ }
1327
+ } finally {
1328
+ if (!f.casHash(fh | LOCKED, fh)) {
1329
+ f.hash = fh;
1330
+ synchronized (f) { f.notifyAll(); };
1331
+ }
1332
+ }
1333
+ if (validated) {
1334
+ if (deleted)
1335
+ counter.add(-1L);
1336
+ break;
1337
+ }
1338
+ }
1339
+ }
1340
+ return oldVal;
1341
+ }
1342
+
1343
+ /*
1344
+ * Internal versions of the six insertion methods, each a
1345
+ * little more complicated than the last. All have
1346
+ * the same basic structure as the first (internalPut):
1347
+ * 1. If table uninitialized, create
1348
+ * 2. If bin empty, try to CAS new node
1349
+ * 3. If bin stale, use new table
1350
+ * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1351
+ * 5. Lock and validate; if valid, scan and add or update
1352
+ *
1353
+ * The others interweave other checks and/or alternative actions:
1354
+ * * Plain put checks for and performs resize after insertion.
1355
+ * * putIfAbsent prescans for mapping without lock (and fails to add
1356
+ * if present), which also makes pre-emptive resize checks worthwhile.
1357
+ * * computeIfAbsent extends form used in putIfAbsent with additional
1358
+ * mechanics to deal with, calls, potential exceptions and null
1359
+ * returns from function call.
1360
+ * * compute uses the same function-call mechanics, but without
1361
+ * the prescans
1362
+ * * merge acts as putIfAbsent in the absent case, but invokes the
1363
+ * update function if present
1364
+ * * putAll attempts to pre-allocate enough table space
1365
+ * and more lazily performs count updates and checks.
1366
+ *
1367
+ * Someday when details settle down a bit more, it might be worth
1368
+ * some factoring to reduce sprawl.
1369
+ */
1370
+
1371
+ /** Implementation for put */
1372
+ private final Object internalPut(Object k, Object v) {
1373
+ int h = spread(k.hashCode());
1374
+ int count = 0;
1375
+ for (Node[] tab = table;;) {
1376
+ int i; Node f; int fh; Object fk;
1377
+ if (tab == null)
1378
+ tab = initTable();
1379
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1380
+ if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1381
+ break; // no lock when adding to empty bin
1382
+ }
1383
+ else if ((fh = f.hash) == MOVED) {
1384
+ if ((fk = f.key) instanceof TreeBin) {
1385
+ TreeBin t = (TreeBin)fk;
1386
+ Object oldVal = null;
1387
+ t.acquire(0);
1388
+ try {
1389
+ if (tabAt(tab, i) == f) {
1390
+ count = 2;
1391
+ TreeNode p = t.putTreeNode(h, k, v);
1392
+ if (p != null) {
1393
+ oldVal = p.val;
1394
+ p.val = v;
1395
+ }
1396
+ }
1397
+ } finally {
1398
+ t.release(0);
1399
+ }
1400
+ if (count != 0) {
1401
+ if (oldVal != null)
1402
+ return oldVal;
1403
+ break;
1404
+ }
1405
+ }
1406
+ else
1407
+ tab = (Node[])fk;
1408
+ }
1409
+ else if ((fh & LOCKED) != 0) {
1410
+ checkForResize();
1411
+ f.tryAwaitLock(tab, i);
1412
+ }
1413
+ else if (f.casHash(fh, fh | LOCKED)) {
1414
+ Object oldVal = null;
1415
+ try { // needed in case equals() throws
1416
+ if (tabAt(tab, i) == f) {
1417
+ count = 1;
1418
+ for (Node e = f;; ++count) {
1419
+ Object ek, ev;
1420
+ if ((e.hash & HASH_BITS) == h &&
1421
+ (ev = e.val) != null &&
1422
+ ((ek = e.key) == k || k.equals(ek))) {
1423
+ oldVal = ev;
1424
+ e.val = v;
1425
+ break;
1426
+ }
1427
+ Node last = e;
1428
+ if ((e = e.next) == null) {
1429
+ last.next = new Node(h, k, v, null);
1430
+ if (count >= TREE_THRESHOLD)
1431
+ replaceWithTreeBin(tab, i, k);
1432
+ break;
1433
+ }
1434
+ }
1435
+ }
1436
+ } finally { // unlock and signal if needed
1437
+ if (!f.casHash(fh | LOCKED, fh)) {
1438
+ f.hash = fh;
1439
+ synchronized (f) { f.notifyAll(); };
1440
+ }
1441
+ }
1442
+ if (count != 0) {
1443
+ if (oldVal != null)
1444
+ return oldVal;
1445
+ if (tab.length <= 64)
1446
+ count = 2;
1447
+ break;
1448
+ }
1449
+ }
1450
+ }
1451
+ counter.add(1L);
1452
+ if (count > 1)
1453
+ checkForResize();
1454
+ return null;
1455
+ }
1456
+
1457
+ /** Implementation for putIfAbsent */
1458
+ private final Object internalPutIfAbsent(Object k, Object v) {
1459
+ int h = spread(k.hashCode());
1460
+ int count = 0;
1461
+ for (Node[] tab = table;;) {
1462
+ int i; Node f; int fh; Object fk, fv;
1463
+ if (tab == null)
1464
+ tab = initTable();
1465
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1466
+ if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1467
+ break;
1468
+ }
1469
+ else if ((fh = f.hash) == MOVED) {
1470
+ if ((fk = f.key) instanceof TreeBin) {
1471
+ TreeBin t = (TreeBin)fk;
1472
+ Object oldVal = null;
1473
+ t.acquire(0);
1474
+ try {
1475
+ if (tabAt(tab, i) == f) {
1476
+ count = 2;
1477
+ TreeNode p = t.putTreeNode(h, k, v);
1478
+ if (p != null)
1479
+ oldVal = p.val;
1480
+ }
1481
+ } finally {
1482
+ t.release(0);
1483
+ }
1484
+ if (count != 0) {
1485
+ if (oldVal != null)
1486
+ return oldVal;
1487
+ break;
1488
+ }
1489
+ }
1490
+ else
1491
+ tab = (Node[])fk;
1492
+ }
1493
+ else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1494
+ ((fk = f.key) == k || k.equals(fk)))
1495
+ return fv;
1496
+ else {
1497
+ Node g = f.next;
1498
+ if (g != null) { // at least 2 nodes -- search and maybe resize
1499
+ for (Node e = g;;) {
1500
+ Object ek, ev;
1501
+ if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1502
+ ((ek = e.key) == k || k.equals(ek)))
1503
+ return ev;
1504
+ if ((e = e.next) == null) {
1505
+ checkForResize();
1506
+ break;
1507
+ }
1508
+ }
1509
+ }
1510
+ if (((fh = f.hash) & LOCKED) != 0) {
1511
+ checkForResize();
1512
+ f.tryAwaitLock(tab, i);
1513
+ }
1514
+ else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1515
+ Object oldVal = null;
1516
+ try {
1517
+ if (tabAt(tab, i) == f) {
1518
+ count = 1;
1519
+ for (Node e = f;; ++count) {
1520
+ Object ek, ev;
1521
+ if ((e.hash & HASH_BITS) == h &&
1522
+ (ev = e.val) != null &&
1523
+ ((ek = e.key) == k || k.equals(ek))) {
1524
+ oldVal = ev;
1525
+ break;
1526
+ }
1527
+ Node last = e;
1528
+ if ((e = e.next) == null) {
1529
+ last.next = new Node(h, k, v, null);
1530
+ if (count >= TREE_THRESHOLD)
1531
+ replaceWithTreeBin(tab, i, k);
1532
+ break;
1533
+ }
1534
+ }
1535
+ }
1536
+ } finally {
1537
+ if (!f.casHash(fh | LOCKED, fh)) {
1538
+ f.hash = fh;
1539
+ synchronized (f) { f.notifyAll(); };
1540
+ }
1541
+ }
1542
+ if (count != 0) {
1543
+ if (oldVal != null)
1544
+ return oldVal;
1545
+ if (tab.length <= 64)
1546
+ count = 2;
1547
+ break;
1548
+ }
1549
+ }
1550
+ }
1551
+ }
1552
+ counter.add(1L);
1553
+ if (count > 1)
1554
+ checkForResize();
1555
+ return null;
1556
+ }
1557
+
1558
+ /** Implementation for computeIfAbsent */
1559
+ private final Object internalComputeIfAbsent(K k,
1560
+ Fun<? super K, ?> mf) {
1561
+ int h = spread(k.hashCode());
1562
+ Object val = null;
1563
+ int count = 0;
1564
+ for (Node[] tab = table;;) {
1565
+ Node f; int i, fh; Object fk, fv;
1566
+ if (tab == null)
1567
+ tab = initTable();
1568
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1569
+ Node node = new Node(fh = h | LOCKED, k, null, null);
1570
+ if (casTabAt(tab, i, null, node)) {
1571
+ count = 1;
1572
+ try {
1573
+ if ((val = mf.apply(k)) != null)
1574
+ node.val = val;
1575
+ } finally {
1576
+ if (val == null)
1577
+ setTabAt(tab, i, null);
1578
+ if (!node.casHash(fh, h)) {
1579
+ node.hash = h;
1580
+ synchronized (node) { node.notifyAll(); };
1581
+ }
1582
+ }
1583
+ }
1584
+ if (count != 0)
1585
+ break;
1586
+ }
1587
+ else if ((fh = f.hash) == MOVED) {
1588
+ if ((fk = f.key) instanceof TreeBin) {
1589
+ TreeBin t = (TreeBin)fk;
1590
+ boolean added = false;
1591
+ t.acquire(0);
1592
+ try {
1593
+ if (tabAt(tab, i) == f) {
1594
+ count = 1;
1595
+ TreeNode p = t.getTreeNode(h, k, t.root);
1596
+ if (p != null)
1597
+ val = p.val;
1598
+ else if ((val = mf.apply(k)) != null) {
1599
+ added = true;
1600
+ count = 2;
1601
+ t.putTreeNode(h, k, val);
1602
+ }
1603
+ }
1604
+ } finally {
1605
+ t.release(0);
1606
+ }
1607
+ if (count != 0) {
1608
+ if (!added)
1609
+ return val;
1610
+ break;
1611
+ }
1612
+ }
1613
+ else
1614
+ tab = (Node[])fk;
1615
+ }
1616
+ else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1617
+ ((fk = f.key) == k || k.equals(fk)))
1618
+ return fv;
1619
+ else {
1620
+ Node g = f.next;
1621
+ if (g != null) {
1622
+ for (Node e = g;;) {
1623
+ Object ek, ev;
1624
+ if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1625
+ ((ek = e.key) == k || k.equals(ek)))
1626
+ return ev;
1627
+ if ((e = e.next) == null) {
1628
+ checkForResize();
1629
+ break;
1630
+ }
1631
+ }
1632
+ }
1633
+ if (((fh = f.hash) & LOCKED) != 0) {
1634
+ checkForResize();
1635
+ f.tryAwaitLock(tab, i);
1636
+ }
1637
+ else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1638
+ boolean added = false;
1639
+ try {
1640
+ if (tabAt(tab, i) == f) {
1641
+ count = 1;
1642
+ for (Node e = f;; ++count) {
1643
+ Object ek, ev;
1644
+ if ((e.hash & HASH_BITS) == h &&
1645
+ (ev = e.val) != null &&
1646
+ ((ek = e.key) == k || k.equals(ek))) {
1647
+ val = ev;
1648
+ break;
1649
+ }
1650
+ Node last = e;
1651
+ if ((e = e.next) == null) {
1652
+ if ((val = mf.apply(k)) != null) {
1653
+ added = true;
1654
+ last.next = new Node(h, k, val, null);
1655
+ if (count >= TREE_THRESHOLD)
1656
+ replaceWithTreeBin(tab, i, k);
1657
+ }
1658
+ break;
1659
+ }
1660
+ }
1661
+ }
1662
+ } finally {
1663
+ if (!f.casHash(fh | LOCKED, fh)) {
1664
+ f.hash = fh;
1665
+ synchronized (f) { f.notifyAll(); };
1666
+ }
1667
+ }
1668
+ if (count != 0) {
1669
+ if (!added)
1670
+ return val;
1671
+ if (tab.length <= 64)
1672
+ count = 2;
1673
+ break;
1674
+ }
1675
+ }
1676
+ }
1677
+ }
1678
+ if (val != null) {
1679
+ counter.add(1L);
1680
+ if (count > 1)
1681
+ checkForResize();
1682
+ }
1683
+ return val;
1684
+ }
1685
+
1686
+ /** Implementation for compute */
1687
+ @SuppressWarnings("unchecked") private final Object internalCompute
1688
+ (K k, boolean onlyIfPresent, BiFun<? super K, ? super V, ? extends V> mf) {
1689
+ int h = spread(k.hashCode());
1690
+ Object val = null;
1691
+ int delta = 0;
1692
+ int count = 0;
1693
+ for (Node[] tab = table;;) {
1694
+ Node f; int i, fh; Object fk;
1695
+ if (tab == null)
1696
+ tab = initTable();
1697
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1698
+ if (onlyIfPresent)
1699
+ break;
1700
+ Node node = new Node(fh = h | LOCKED, k, null, null);
1701
+ if (casTabAt(tab, i, null, node)) {
1702
+ try {
1703
+ count = 1;
1704
+ if ((val = mf.apply(k, null)) != null) {
1705
+ node.val = val;
1706
+ delta = 1;
1707
+ }
1708
+ } finally {
1709
+ if (delta == 0)
1710
+ setTabAt(tab, i, null);
1711
+ if (!node.casHash(fh, h)) {
1712
+ node.hash = h;
1713
+ synchronized (node) { node.notifyAll(); };
1714
+ }
1715
+ }
1716
+ }
1717
+ if (count != 0)
1718
+ break;
1719
+ }
1720
+ else if ((fh = f.hash) == MOVED) {
1721
+ if ((fk = f.key) instanceof TreeBin) {
1722
+ TreeBin t = (TreeBin)fk;
1723
+ t.acquire(0);
1724
+ try {
1725
+ if (tabAt(tab, i) == f) {
1726
+ count = 1;
1727
+ TreeNode p = t.getTreeNode(h, k, t.root);
1728
+ Object pv;
1729
+ if (p == null) {
1730
+ if (onlyIfPresent)
1731
+ break;
1732
+ pv = null;
1733
+ } else
1734
+ pv = p.val;
1735
+ if ((val = mf.apply(k, (V)pv)) != null) {
1736
+ if (p != null)
1737
+ p.val = val;
1738
+ else {
1739
+ count = 2;
1740
+ delta = 1;
1741
+ t.putTreeNode(h, k, val);
1742
+ }
1743
+ }
1744
+ else if (p != null) {
1745
+ delta = -1;
1746
+ t.deleteTreeNode(p);
1747
+ }
1748
+ }
1749
+ } finally {
1750
+ t.release(0);
1751
+ }
1752
+ if (count != 0)
1753
+ break;
1754
+ }
1755
+ else
1756
+ tab = (Node[])fk;
1757
+ }
1758
+ else if ((fh & LOCKED) != 0) {
1759
+ checkForResize();
1760
+ f.tryAwaitLock(tab, i);
1761
+ }
1762
+ else if (f.casHash(fh, fh | LOCKED)) {
1763
+ try {
1764
+ if (tabAt(tab, i) == f) {
1765
+ count = 1;
1766
+ for (Node e = f, pred = null;; ++count) {
1767
+ Object ek, ev;
1768
+ if ((e.hash & HASH_BITS) == h &&
1769
+ (ev = e.val) != null &&
1770
+ ((ek = e.key) == k || k.equals(ek))) {
1771
+ val = mf.apply(k, (V)ev);
1772
+ if (val != null)
1773
+ e.val = val;
1774
+ else {
1775
+ delta = -1;
1776
+ Node en = e.next;
1777
+ if (pred != null)
1778
+ pred.next = en;
1779
+ else
1780
+ setTabAt(tab, i, en);
1781
+ }
1782
+ break;
1783
+ }
1784
+ pred = e;
1785
+ if ((e = e.next) == null) {
1786
+ if (!onlyIfPresent && (val = mf.apply(k, null)) != null) {
1787
+ pred.next = new Node(h, k, val, null);
1788
+ delta = 1;
1789
+ if (count >= TREE_THRESHOLD)
1790
+ replaceWithTreeBin(tab, i, k);
1791
+ }
1792
+ break;
1793
+ }
1794
+ }
1795
+ }
1796
+ } finally {
1797
+ if (!f.casHash(fh | LOCKED, fh)) {
1798
+ f.hash = fh;
1799
+ synchronized (f) { f.notifyAll(); };
1800
+ }
1801
+ }
1802
+ if (count != 0) {
1803
+ if (tab.length <= 64)
1804
+ count = 2;
1805
+ break;
1806
+ }
1807
+ }
1808
+ }
1809
+ if (delta != 0) {
1810
+ counter.add((long)delta);
1811
+ if (count > 1)
1812
+ checkForResize();
1813
+ }
1814
+ return val;
1815
+ }
1816
+
1817
+ /** Implementation for merge */
1818
+ @SuppressWarnings("unchecked") private final Object internalMerge
1819
+ (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) {
1820
+ int h = spread(k.hashCode());
1821
+ Object val = null;
1822
+ int delta = 0;
1823
+ int count = 0;
1824
+ for (Node[] tab = table;;) {
1825
+ int i; Node f; int fh; Object fk, fv;
1826
+ if (tab == null)
1827
+ tab = initTable();
1828
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1829
+ if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1830
+ delta = 1;
1831
+ val = v;
1832
+ break;
1833
+ }
1834
+ }
1835
+ else if ((fh = f.hash) == MOVED) {
1836
+ if ((fk = f.key) instanceof TreeBin) {
1837
+ TreeBin t = (TreeBin)fk;
1838
+ t.acquire(0);
1839
+ try {
1840
+ if (tabAt(tab, i) == f) {
1841
+ count = 1;
1842
+ TreeNode p = t.getTreeNode(h, k, t.root);
1843
+ val = (p == null) ? v : mf.apply((V)p.val, v);
1844
+ if (val != null) {
1845
+ if (p != null)
1846
+ p.val = val;
1847
+ else {
1848
+ count = 2;
1849
+ delta = 1;
1850
+ t.putTreeNode(h, k, val);
1851
+ }
1852
+ }
1853
+ else if (p != null) {
1854
+ delta = -1;
1855
+ t.deleteTreeNode(p);
1856
+ }
1857
+ }
1858
+ } finally {
1859
+ t.release(0);
1860
+ }
1861
+ if (count != 0)
1862
+ break;
1863
+ }
1864
+ else
1865
+ tab = (Node[])fk;
1866
+ }
1867
+ else if ((fh & LOCKED) != 0) {
1868
+ checkForResize();
1869
+ f.tryAwaitLock(tab, i);
1870
+ }
1871
+ else if (f.casHash(fh, fh | LOCKED)) {
1872
+ try {
1873
+ if (tabAt(tab, i) == f) {
1874
+ count = 1;
1875
+ for (Node e = f, pred = null;; ++count) {
1876
+ Object ek, ev;
1877
+ if ((e.hash & HASH_BITS) == h &&
1878
+ (ev = e.val) != null &&
1879
+ ((ek = e.key) == k || k.equals(ek))) {
1880
+ val = mf.apply((V)ev, v);
1881
+ if (val != null)
1882
+ e.val = val;
1883
+ else {
1884
+ delta = -1;
1885
+ Node en = e.next;
1886
+ if (pred != null)
1887
+ pred.next = en;
1888
+ else
1889
+ setTabAt(tab, i, en);
1890
+ }
1891
+ break;
1892
+ }
1893
+ pred = e;
1894
+ if ((e = e.next) == null) {
1895
+ val = v;
1896
+ pred.next = new Node(h, k, val, null);
1897
+ delta = 1;
1898
+ if (count >= TREE_THRESHOLD)
1899
+ replaceWithTreeBin(tab, i, k);
1900
+ break;
1901
+ }
1902
+ }
1903
+ }
1904
+ } finally {
1905
+ if (!f.casHash(fh | LOCKED, fh)) {
1906
+ f.hash = fh;
1907
+ synchronized (f) { f.notifyAll(); };
1908
+ }
1909
+ }
1910
+ if (count != 0) {
1911
+ if (tab.length <= 64)
1912
+ count = 2;
1913
+ break;
1914
+ }
1915
+ }
1916
+ }
1917
+ if (delta != 0) {
1918
+ counter.add((long)delta);
1919
+ if (count > 1)
1920
+ checkForResize();
1921
+ }
1922
+ return val;
1923
+ }
1924
+
1925
+ /** Implementation for putAll */
1926
+ private final void internalPutAll(Map<?, ?> m) {
1927
+ tryPresize(m.size());
1928
+ long delta = 0L; // number of uncommitted additions
1929
+ boolean npe = false; // to throw exception on exit for nulls
1930
+ try { // to clean up counts on other exceptions
1931
+ for (Map.Entry<?, ?> entry : m.entrySet()) {
1932
+ Object k, v;
1933
+ if (entry == null || (k = entry.getKey()) == null ||
1934
+ (v = entry.getValue()) == null) {
1935
+ npe = true;
1936
+ break;
1937
+ }
1938
+ int h = spread(k.hashCode());
1939
+ for (Node[] tab = table;;) {
1940
+ int i; Node f; int fh; Object fk;
1941
+ if (tab == null)
1942
+ tab = initTable();
1943
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1944
+ if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1945
+ ++delta;
1946
+ break;
1947
+ }
1948
+ }
1949
+ else if ((fh = f.hash) == MOVED) {
1950
+ if ((fk = f.key) instanceof TreeBin) {
1951
+ TreeBin t = (TreeBin)fk;
1952
+ boolean validated = false;
1953
+ t.acquire(0);
1954
+ try {
1955
+ if (tabAt(tab, i) == f) {
1956
+ validated = true;
1957
+ TreeNode p = t.getTreeNode(h, k, t.root);
1958
+ if (p != null)
1959
+ p.val = v;
1960
+ else {
1961
+ t.putTreeNode(h, k, v);
1962
+ ++delta;
1963
+ }
1964
+ }
1965
+ } finally {
1966
+ t.release(0);
1967
+ }
1968
+ if (validated)
1969
+ break;
1970
+ }
1971
+ else
1972
+ tab = (Node[])fk;
1973
+ }
1974
+ else if ((fh & LOCKED) != 0) {
1975
+ counter.add(delta);
1976
+ delta = 0L;
1977
+ checkForResize();
1978
+ f.tryAwaitLock(tab, i);
1979
+ }
1980
+ else if (f.casHash(fh, fh | LOCKED)) {
1981
+ int count = 0;
1982
+ try {
1983
+ if (tabAt(tab, i) == f) {
1984
+ count = 1;
1985
+ for (Node e = f;; ++count) {
1986
+ Object ek, ev;
1987
+ if ((e.hash & HASH_BITS) == h &&
1988
+ (ev = e.val) != null &&
1989
+ ((ek = e.key) == k || k.equals(ek))) {
1990
+ e.val = v;
1991
+ break;
1992
+ }
1993
+ Node last = e;
1994
+ if ((e = e.next) == null) {
1995
+ ++delta;
1996
+ last.next = new Node(h, k, v, null);
1997
+ if (count >= TREE_THRESHOLD)
1998
+ replaceWithTreeBin(tab, i, k);
1999
+ break;
2000
+ }
2001
+ }
2002
+ }
2003
+ } finally {
2004
+ if (!f.casHash(fh | LOCKED, fh)) {
2005
+ f.hash = fh;
2006
+ synchronized (f) { f.notifyAll(); };
2007
+ }
2008
+ }
2009
+ if (count != 0) {
2010
+ if (count > 1) {
2011
+ counter.add(delta);
2012
+ delta = 0L;
2013
+ checkForResize();
2014
+ }
2015
+ break;
2016
+ }
2017
+ }
2018
+ }
2019
+ }
2020
+ } finally {
2021
+ if (delta != 0)
2022
+ counter.add(delta);
2023
+ }
2024
+ if (npe)
2025
+ throw new NullPointerException();
2026
+ }
2027
+
2028
+ /* ---------------- Table Initialization and Resizing -------------- */
2029
+
2030
+ /**
2031
+ * Returns a power of two table size for the given desired capacity.
2032
+ * See Hackers Delight, sec 3.2
2033
+ */
2034
+ private static final int tableSizeFor(int c) {
2035
+ int n = c - 1;
2036
+ n |= n >>> 1;
2037
+ n |= n >>> 2;
2038
+ n |= n >>> 4;
2039
+ n |= n >>> 8;
2040
+ n |= n >>> 16;
2041
+ return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
2042
+ }
2043
+
2044
+ /**
2045
+ * Initializes table, using the size recorded in sizeCtl.
2046
+ */
2047
+ private final Node[] initTable() {
2048
+ Node[] tab; int sc;
2049
+ while ((tab = table) == null) {
2050
+ if ((sc = sizeCtl) < 0)
2051
+ Thread.yield(); // lost initialization race; just spin
2052
+ else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2053
+ try {
2054
+ if ((tab = table) == null) {
2055
+ int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2056
+ tab = table = new Node[n];
2057
+ sc = n - (n >>> 2);
2058
+ }
2059
+ } finally {
2060
+ sizeCtl = sc;
2061
+ }
2062
+ break;
2063
+ }
2064
+ }
2065
+ return tab;
2066
+ }
2067
+
2068
+ /**
2069
+ * If table is too small and not already resizing, creates next
2070
+ * table and transfers bins. Rechecks occupancy after a transfer
2071
+ * to see if another resize is already needed because resizings
2072
+ * are lagging additions.
2073
+ */
2074
+ private final void checkForResize() {
2075
+ Node[] tab; int n, sc;
2076
+ while ((tab = table) != null &&
2077
+ (n = tab.length) < MAXIMUM_CAPACITY &&
2078
+ (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
2079
+ UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2080
+ try {
2081
+ if (tab == table) {
2082
+ table = rebuild(tab);
2083
+ sc = (n << 1) - (n >>> 1);
2084
+ }
2085
+ } finally {
2086
+ sizeCtl = sc;
2087
+ }
2088
+ }
2089
+ }
2090
+
2091
+ /**
2092
+ * Tries to presize table to accommodate the given number of elements.
2093
+ *
2094
+ * @param size number of elements (doesn't need to be perfectly accurate)
2095
+ */
2096
+ private final void tryPresize(int size) {
2097
+ int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2098
+ tableSizeFor(size + (size >>> 1) + 1);
2099
+ int sc;
2100
+ while ((sc = sizeCtl) >= 0) {
2101
+ Node[] tab = table; int n;
2102
+ if (tab == null || (n = tab.length) == 0) {
2103
+ n = (sc > c) ? sc : c;
2104
+ if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2105
+ try {
2106
+ if (table == tab) {
2107
+ table = new Node[n];
2108
+ sc = n - (n >>> 2);
2109
+ }
2110
+ } finally {
2111
+ sizeCtl = sc;
2112
+ }
2113
+ }
2114
+ }
2115
+ else if (c <= sc || n >= MAXIMUM_CAPACITY)
2116
+ break;
2117
+ else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2118
+ try {
2119
+ if (table == tab) {
2120
+ table = rebuild(tab);
2121
+ sc = (n << 1) - (n >>> 1);
2122
+ }
2123
+ } finally {
2124
+ sizeCtl = sc;
2125
+ }
2126
+ }
2127
+ }
2128
+ }
2129
+
2130
+ /*
2131
+ * Moves and/or copies the nodes in each bin to new table. See
2132
+ * above for explanation.
2133
+ *
2134
+ * @return the new table
2135
+ */
2136
+ private static final Node[] rebuild(Node[] tab) {
2137
+ int n = tab.length;
2138
+ Node[] nextTab = new Node[n << 1];
2139
+ Node fwd = new Node(MOVED, nextTab, null, null);
2140
+ int[] buffer = null; // holds bins to revisit; null until needed
2141
+ Node rev = null; // reverse forwarder; null until needed
2142
+ int nbuffered = 0; // the number of bins in buffer list
2143
+ int bufferIndex = 0; // buffer index of current buffered bin
2144
+ int bin = n - 1; // current non-buffered bin or -1 if none
2145
+
2146
+ for (int i = bin;;) { // start upwards sweep
2147
+ int fh; Node f;
2148
+ if ((f = tabAt(tab, i)) == null) {
2149
+ if (bin >= 0) { // Unbuffered; no lock needed (or available)
2150
+ if (!casTabAt(tab, i, f, fwd))
2151
+ continue;
2152
+ }
2153
+ else { // transiently use a locked forwarding node
2154
+ Node g = new Node(MOVED|LOCKED, nextTab, null, null);
2155
+ if (!casTabAt(tab, i, f, g))
2156
+ continue;
2157
+ setTabAt(nextTab, i, null);
2158
+ setTabAt(nextTab, i + n, null);
2159
+ setTabAt(tab, i, fwd);
2160
+ if (!g.casHash(MOVED|LOCKED, MOVED)) {
2161
+ g.hash = MOVED;
2162
+ synchronized (g) { g.notifyAll(); }
2163
+ }
2164
+ }
2165
+ }
2166
+ else if ((fh = f.hash) == MOVED) {
2167
+ Object fk = f.key;
2168
+ if (fk instanceof TreeBin) {
2169
+ TreeBin t = (TreeBin)fk;
2170
+ boolean validated = false;
2171
+ t.acquire(0);
2172
+ try {
2173
+ if (tabAt(tab, i) == f) {
2174
+ validated = true;
2175
+ splitTreeBin(nextTab, i, t);
2176
+ setTabAt(tab, i, fwd);
2177
+ }
2178
+ } finally {
2179
+ t.release(0);
2180
+ }
2181
+ if (!validated)
2182
+ continue;
2183
+ }
2184
+ }
2185
+ else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
2186
+ boolean validated = false;
2187
+ try { // split to lo and hi lists; copying as needed
2188
+ if (tabAt(tab, i) == f) {
2189
+ validated = true;
2190
+ splitBin(nextTab, i, f);
2191
+ setTabAt(tab, i, fwd);
2192
+ }
2193
+ } finally {
2194
+ if (!f.casHash(fh | LOCKED, fh)) {
2195
+ f.hash = fh;
2196
+ synchronized (f) { f.notifyAll(); };
2197
+ }
2198
+ }
2199
+ if (!validated)
2200
+ continue;
2201
+ }
2202
+ else {
2203
+ if (buffer == null) // initialize buffer for revisits
2204
+ buffer = new int[TRANSFER_BUFFER_SIZE];
2205
+ if (bin < 0 && bufferIndex > 0) {
2206
+ int j = buffer[--bufferIndex];
2207
+ buffer[bufferIndex] = i;
2208
+ i = j; // swap with another bin
2209
+ continue;
2210
+ }
2211
+ if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
2212
+ f.tryAwaitLock(tab, i);
2213
+ continue; // no other options -- block
2214
+ }
2215
+ if (rev == null) // initialize reverse-forwarder
2216
+ rev = new Node(MOVED, tab, null, null);
2217
+ if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
2218
+ continue; // recheck before adding to list
2219
+ buffer[nbuffered++] = i;
2220
+ setTabAt(nextTab, i, rev); // install place-holders
2221
+ setTabAt(nextTab, i + n, rev);
2222
+ }
2223
+
2224
+ if (bin > 0)
2225
+ i = --bin;
2226
+ else if (buffer != null && nbuffered > 0) {
2227
+ bin = -1;
2228
+ i = buffer[bufferIndex = --nbuffered];
2229
+ }
2230
+ else
2231
+ return nextTab;
2232
+ }
2233
+ }
2234
+
2235
+ /**
2236
+ * Splits a normal bin with list headed by e into lo and hi parts;
2237
+ * installs in given table.
2238
+ */
2239
+ private static void splitBin(Node[] nextTab, int i, Node e) {
2240
+ int bit = nextTab.length >>> 1; // bit to split on
2241
+ int runBit = e.hash & bit;
2242
+ Node lastRun = e, lo = null, hi = null;
2243
+ for (Node p = e.next; p != null; p = p.next) {
2244
+ int b = p.hash & bit;
2245
+ if (b != runBit) {
2246
+ runBit = b;
2247
+ lastRun = p;
2248
+ }
2249
+ }
2250
+ if (runBit == 0)
2251
+ lo = lastRun;
2252
+ else
2253
+ hi = lastRun;
2254
+ for (Node p = e; p != lastRun; p = p.next) {
2255
+ int ph = p.hash & HASH_BITS;
2256
+ Object pk = p.key, pv = p.val;
2257
+ if ((ph & bit) == 0)
2258
+ lo = new Node(ph, pk, pv, lo);
2259
+ else
2260
+ hi = new Node(ph, pk, pv, hi);
2261
+ }
2262
+ setTabAt(nextTab, i, lo);
2263
+ setTabAt(nextTab, i + bit, hi);
2264
+ }
2265
+
2266
+ /**
2267
+ * Splits a tree bin into lo and hi parts; installs in given table.
2268
+ */
2269
+ private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) {
2270
+ int bit = nextTab.length >>> 1;
2271
+ TreeBin lt = new TreeBin();
2272
+ TreeBin ht = new TreeBin();
2273
+ int lc = 0, hc = 0;
2274
+ for (Node e = t.first; e != null; e = e.next) {
2275
+ int h = e.hash & HASH_BITS;
2276
+ Object k = e.key, v = e.val;
2277
+ if ((h & bit) == 0) {
2278
+ ++lc;
2279
+ lt.putTreeNode(h, k, v);
2280
+ }
2281
+ else {
2282
+ ++hc;
2283
+ ht.putTreeNode(h, k, v);
2284
+ }
2285
+ }
2286
+ Node ln, hn; // throw away trees if too small
2287
+ if (lc <= (TREE_THRESHOLD >>> 1)) {
2288
+ ln = null;
2289
+ for (Node p = lt.first; p != null; p = p.next)
2290
+ ln = new Node(p.hash, p.key, p.val, ln);
2291
+ }
2292
+ else
2293
+ ln = new Node(MOVED, lt, null, null);
2294
+ setTabAt(nextTab, i, ln);
2295
+ if (hc <= (TREE_THRESHOLD >>> 1)) {
2296
+ hn = null;
2297
+ for (Node p = ht.first; p != null; p = p.next)
2298
+ hn = new Node(p.hash, p.key, p.val, hn);
2299
+ }
2300
+ else
2301
+ hn = new Node(MOVED, ht, null, null);
2302
+ setTabAt(nextTab, i + bit, hn);
2303
+ }
2304
+
2305
+ /**
2306
+ * Implementation for clear. Steps through each bin, removing all
2307
+ * nodes.
2308
+ */
2309
+ private final void internalClear() {
2310
+ long delta = 0L; // negative number of deletions
2311
+ int i = 0;
2312
+ Node[] tab = table;
2313
+ while (tab != null && i < tab.length) {
2314
+ int fh; Object fk;
2315
+ Node f = tabAt(tab, i);
2316
+ if (f == null)
2317
+ ++i;
2318
+ else if ((fh = f.hash) == MOVED) {
2319
+ if ((fk = f.key) instanceof TreeBin) {
2320
+ TreeBin t = (TreeBin)fk;
2321
+ t.acquire(0);
2322
+ try {
2323
+ if (tabAt(tab, i) == f) {
2324
+ for (Node p = t.first; p != null; p = p.next) {
2325
+ if (p.val != null) { // (currently always true)
2326
+ p.val = null;
2327
+ --delta;
2328
+ }
2329
+ }
2330
+ t.first = null;
2331
+ t.root = null;
2332
+ ++i;
2333
+ }
2334
+ } finally {
2335
+ t.release(0);
2336
+ }
2337
+ }
2338
+ else
2339
+ tab = (Node[])fk;
2340
+ }
2341
+ else if ((fh & LOCKED) != 0) {
2342
+ counter.add(delta); // opportunistically update count
2343
+ delta = 0L;
2344
+ f.tryAwaitLock(tab, i);
2345
+ }
2346
+ else if (f.casHash(fh, fh | LOCKED)) {
2347
+ try {
2348
+ if (tabAt(tab, i) == f) {
2349
+ for (Node e = f; e != null; e = e.next) {
2350
+ if (e.val != null) { // (currently always true)
2351
+ e.val = null;
2352
+ --delta;
2353
+ }
2354
+ }
2355
+ setTabAt(tab, i, null);
2356
+ ++i;
2357
+ }
2358
+ } finally {
2359
+ if (!f.casHash(fh | LOCKED, fh)) {
2360
+ f.hash = fh;
2361
+ synchronized (f) { f.notifyAll(); };
2362
+ }
2363
+ }
2364
+ }
2365
+ }
2366
+ if (delta != 0)
2367
+ counter.add(delta);
2368
+ }
2369
+
2370
+ /* ----------------Table Traversal -------------- */
2371
+
2372
+ /**
2373
+ * Encapsulates traversal for methods such as containsValue; also
2374
+ * serves as a base class for other iterators and bulk tasks.
2375
+ *
2376
+ * At each step, the iterator snapshots the key ("nextKey") and
2377
+ * value ("nextVal") of a valid node (i.e., one that, at point of
2378
+ * snapshot, has a non-null user value). Because val fields can
2379
+ * change (including to null, indicating deletion), field nextVal
2380
+ * might not be accurate at point of use, but still maintains the
2381
+ * weak consistency property of holding a value that was once
2382
+ * valid. To support iterator.remove, the nextKey field is not
2383
+ * updated (nulled out) when the iterator cannot advance.
2384
+ *
2385
+ * Internal traversals directly access these fields, as in:
2386
+ * {@code while (it.advance() != null) { process(it.nextKey); }}
2387
+ *
2388
+ * Exported iterators must track whether the iterator has advanced
2389
+ * (in hasNext vs next) (by setting/checking/nulling field
2390
+ * nextVal), and then extract key, value, or key-value pairs as
2391
+ * return values of next().
2392
+ *
2393
+ * The iterator visits once each still-valid node that was
2394
+ * reachable upon iterator construction. It might miss some that
2395
+ * were added to a bin after the bin was visited, which is OK wrt
2396
+ * consistency guarantees. Maintaining this property in the face
2397
+ * of possible ongoing resizes requires a fair amount of
2398
+ * bookkeeping state that is difficult to optimize away amidst
2399
+ * volatile accesses. Even so, traversal maintains reasonable
2400
+ * throughput.
2401
+ *
2402
+ * Normally, iteration proceeds bin-by-bin traversing lists.
2403
+ * However, if the table has been resized, then all future steps
2404
+ * must traverse both the bin at the current index as well as at
2405
+ * (index + baseSize); and so on for further resizings. To
2406
+ * paranoically cope with potential sharing by users of iterators
2407
+ * across threads, iteration terminates if a bounds checks fails
2408
+ * for a table read.
2409
+ *
2410
+ * This class extends ForkJoinTask to streamline parallel
2411
+ * iteration in bulk operations (see BulkTask). This adds only an
2412
+ * int of space overhead, which is close enough to negligible in
2413
+ * cases where it is not needed to not worry about it. Because
2414
+ * ForkJoinTask is Serializable, but iterators need not be, we
2415
+ * need to add warning suppressions.
2416
+ */
2417
+ @SuppressWarnings("serial") static class Traverser<K,V,R> {
2418
+ final ConcurrentHashMapV8<K, V> map;
2419
+ Node next; // the next entry to use
2420
+ Object nextKey; // cached key field of next
2421
+ Object nextVal; // cached val field of next
2422
+ Node[] tab; // current table; updated if resized
2423
+ int index; // index of bin to use next
2424
+ int baseIndex; // current index of initial table
2425
+ int baseLimit; // index bound for initial table
2426
+ int baseSize; // initial table size
2427
+
2428
+ /** Creates iterator for all entries in the table. */
2429
+ Traverser(ConcurrentHashMapV8<K, V> map) {
2430
+ this.map = map;
2431
+ }
2432
+
2433
+ /** Creates iterator for split() methods */
2434
+ Traverser(Traverser<K,V,?> it) {
2435
+ ConcurrentHashMapV8<K, V> m; Node[] t;
2436
+ if ((m = this.map = it.map) == null)
2437
+ t = null;
2438
+ else if ((t = it.tab) == null && // force parent tab initialization
2439
+ (t = it.tab = m.table) != null)
2440
+ it.baseLimit = it.baseSize = t.length;
2441
+ this.tab = t;
2442
+ this.baseSize = it.baseSize;
2443
+ it.baseLimit = this.index = this.baseIndex =
2444
+ ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1;
2445
+ }
2446
+
2447
+ /**
2448
+ * Advances next; returns nextVal or null if terminated.
2449
+ * See above for explanation.
2450
+ */
2451
+ final Object advance() {
2452
+ Node e = next;
2453
+ Object ev = null;
2454
+ outer: do {
2455
+ if (e != null) // advance past used/skipped node
2456
+ e = e.next;
2457
+ while (e == null) { // get to next non-null bin
2458
+ ConcurrentHashMapV8<K, V> m;
2459
+ Node[] t; int b, i, n; Object ek; // checks must use locals
2460
+ if ((t = tab) != null)
2461
+ n = t.length;
2462
+ else if ((m = map) != null && (t = tab = m.table) != null)
2463
+ n = baseLimit = baseSize = t.length;
2464
+ else
2465
+ break outer;
2466
+ if ((b = baseIndex) >= baseLimit ||
2467
+ (i = index) < 0 || i >= n)
2468
+ break outer;
2469
+ if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
2470
+ if ((ek = e.key) instanceof TreeBin)
2471
+ e = ((TreeBin)ek).first;
2472
+ else {
2473
+ tab = (Node[])ek;
2474
+ continue; // restarts due to null val
2475
+ }
2476
+ } // visit upper slots if present
2477
+ index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2478
+ }
2479
+ nextKey = e.key;
2480
+ } while ((ev = e.val) == null); // skip deleted or special nodes
2481
+ next = e;
2482
+ return nextVal = ev;
2483
+ }
2484
+
2485
+ public final void remove() {
2486
+ Object k = nextKey;
2487
+ if (k == null && (advance() == null || (k = nextKey) == null))
2488
+ throw new IllegalStateException();
2489
+ map.internalReplace(k, null, null);
2490
+ }
2491
+
2492
+ public final boolean hasNext() {
2493
+ return nextVal != null || advance() != null;
2494
+ }
2495
+
2496
+ public final boolean hasMoreElements() { return hasNext(); }
2497
+ public final void setRawResult(Object x) { }
2498
+ public R getRawResult() { return null; }
2499
+ public boolean exec() { return true; }
2500
+ }
2501
+
2502
+ /* ---------------- Public operations -------------- */
2503
+
2504
+ /**
2505
+ * Creates a new, empty map with the default initial table size (16).
2506
+ */
2507
+ public ConcurrentHashMapV8() {
2508
+ this.counter = new LongAdder();
2509
+ }
2510
+
2511
+ /**
2512
+ * Creates a new, empty map with an initial table size
2513
+ * accommodating the specified number of elements without the need
2514
+ * to dynamically resize.
2515
+ *
2516
+ * @param initialCapacity The implementation performs internal
2517
+ * sizing to accommodate this many elements.
2518
+ * @throws IllegalArgumentException if the initial capacity of
2519
+ * elements is negative
2520
+ */
2521
+ public ConcurrentHashMapV8(int initialCapacity) {
2522
+ if (initialCapacity < 0)
2523
+ throw new IllegalArgumentException();
2524
+ int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2525
+ MAXIMUM_CAPACITY :
2526
+ tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2527
+ this.counter = new LongAdder();
2528
+ this.sizeCtl = cap;
2529
+ }
2530
+
2531
+ /**
2532
+ * Creates a new map with the same mappings as the given map.
2533
+ *
2534
+ * @param m the map
2535
+ */
2536
+ public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
2537
+ this.counter = new LongAdder();
2538
+ this.sizeCtl = DEFAULT_CAPACITY;
2539
+ internalPutAll(m);
2540
+ }
2541
+
2542
+ /**
2543
+ * Creates a new, empty map with an initial table size based on
2544
+ * the given number of elements ({@code initialCapacity}) and
2545
+ * initial table density ({@code loadFactor}).
2546
+ *
2547
+ * @param initialCapacity the initial capacity. The implementation
2548
+ * performs internal sizing to accommodate this many elements,
2549
+ * given the specified load factor.
2550
+ * @param loadFactor the load factor (table density) for
2551
+ * establishing the initial table size
2552
+ * @throws IllegalArgumentException if the initial capacity of
2553
+ * elements is negative or the load factor is nonpositive
2554
+ *
2555
+ * @since 1.6
2556
+ */
2557
+ public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
2558
+ this(initialCapacity, loadFactor, 1);
2559
+ }
2560
+
2561
+ /**
2562
+ * Creates a new, empty map with an initial table size based on
2563
+ * the given number of elements ({@code initialCapacity}), table
2564
+ * density ({@code loadFactor}), and number of concurrently
2565
+ * updating threads ({@code concurrencyLevel}).
2566
+ *
2567
+ * @param initialCapacity the initial capacity. The implementation
2568
+ * performs internal sizing to accommodate this many elements,
2569
+ * given the specified load factor.
2570
+ * @param loadFactor the load factor (table density) for
2571
+ * establishing the initial table size
2572
+ * @param concurrencyLevel the estimated number of concurrently
2573
+ * updating threads. The implementation may use this value as
2574
+ * a sizing hint.
2575
+ * @throws IllegalArgumentException if the initial capacity is
2576
+ * negative or the load factor or concurrencyLevel are
2577
+ * nonpositive
2578
+ */
2579
+ public ConcurrentHashMapV8(int initialCapacity,
2580
+ float loadFactor, int concurrencyLevel) {
2581
+ if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2582
+ throw new IllegalArgumentException();
2583
+ if (initialCapacity < concurrencyLevel) // Use at least as many bins
2584
+ initialCapacity = concurrencyLevel; // as estimated threads
2585
+ long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2586
+ int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2587
+ MAXIMUM_CAPACITY : tableSizeFor((int)size);
2588
+ this.counter = new LongAdder();
2589
+ this.sizeCtl = cap;
2590
+ }
2591
+
2592
+ /**
2593
+ * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2594
+ * from the given type to {@code Boolean.TRUE}.
2595
+ *
2596
+ * @return the new set
2597
+ */
2598
+ public static <K> KeySetView<K,Boolean> newKeySet() {
2599
+ return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(),
2600
+ Boolean.TRUE);
2601
+ }
2602
+
2603
+ /**
2604
+ * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2605
+ * from the given type to {@code Boolean.TRUE}.
2606
+ *
2607
+ * @param initialCapacity The implementation performs internal
2608
+ * sizing to accommodate this many elements.
2609
+ * @throws IllegalArgumentException if the initial capacity of
2610
+ * elements is negative
2611
+ * @return the new set
2612
+ */
2613
+ public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2614
+ return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(initialCapacity),
2615
+ Boolean.TRUE);
2616
+ }
2617
+
2618
+ /**
2619
+ * {@inheritDoc}
2620
+ */
2621
+ public boolean isEmpty() {
2622
+ return counter.sum() <= 0L; // ignore transient negative values
2623
+ }
2624
+
2625
+ /**
2626
+ * {@inheritDoc}
2627
+ */
2628
+ public int size() {
2629
+ long n = counter.sum();
2630
+ return ((n < 0L) ? 0 :
2631
+ (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2632
+ (int)n);
2633
+ }
2634
+
2635
+ /**
2636
+ * Returns the number of mappings. This method should be used
2637
+ * instead of {@link #size} because a ConcurrentHashMapV8 may
2638
+ * contain more mappings than can be represented as an int. The
2639
+ * value returned is a snapshot; the actual count may differ if
2640
+ * there are ongoing concurrent insertions or removals.
2641
+ *
2642
+ * @return the number of mappings
2643
+ */
2644
+ public long mappingCount() {
2645
+ long n = counter.sum();
2646
+ return (n < 0L) ? 0L : n; // ignore transient negative values
2647
+ }
2648
+
2649
+ /**
2650
+ * Returns the value to which the specified key is mapped,
2651
+ * or {@code null} if this map contains no mapping for the key.
2652
+ *
2653
+ * <p>More formally, if this map contains a mapping from a key
2654
+ * {@code k} to a value {@code v} such that {@code key.equals(k)},
2655
+ * then this method returns {@code v}; otherwise it returns
2656
+ * {@code null}. (There can be at most one such mapping.)
2657
+ *
2658
+ * @throws NullPointerException if the specified key is null
2659
+ */
2660
+ @SuppressWarnings("unchecked") public V get(Object key) {
2661
+ if (key == null)
2662
+ throw new NullPointerException();
2663
+ return (V)internalGet(key);
2664
+ }
2665
+
2666
+ /**
2667
+ * Returns the value to which the specified key is mapped,
2668
+ * or the given defaultValue if this map contains no mapping for the key.
2669
+ *
2670
+ * @param key the key
2671
+ * @param defaultValue the value to return if this map contains
2672
+ * no mapping for the given key
2673
+ * @return the mapping for the key, if present; else the defaultValue
2674
+ * @throws NullPointerException if the specified key is null
2675
+ */
2676
+ @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) {
2677
+ if (key == null)
2678
+ throw new NullPointerException();
2679
+ V v = (V) internalGet(key);
2680
+ return v == null ? defaultValue : v;
2681
+ }
2682
+
2683
+ /**
2684
+ * Tests if the specified object is a key in this table.
2685
+ *
2686
+ * @param key possible key
2687
+ * @return {@code true} if and only if the specified object
2688
+ * is a key in this table, as determined by the
2689
+ * {@code equals} method; {@code false} otherwise
2690
+ * @throws NullPointerException if the specified key is null
2691
+ */
2692
+ public boolean containsKey(Object key) {
2693
+ if (key == null)
2694
+ throw new NullPointerException();
2695
+ return internalGet(key) != null;
2696
+ }
2697
+
2698
+ /**
2699
+ * Returns {@code true} if this map maps one or more keys to the
2700
+ * specified value. Note: This method may require a full traversal
2701
+ * of the map, and is much slower than method {@code containsKey}.
2702
+ *
2703
+ * @param value value whose presence in this map is to be tested
2704
+ * @return {@code true} if this map maps one or more keys to the
2705
+ * specified value
2706
+ * @throws NullPointerException if the specified value is null
2707
+ */
2708
+ public boolean containsValue(Object value) {
2709
+ if (value == null)
2710
+ throw new NullPointerException();
2711
+ Object v;
2712
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2713
+ while ((v = it.advance()) != null) {
2714
+ if (v == value || value.equals(v))
2715
+ return true;
2716
+ }
2717
+ return false;
2718
+ }
2719
+
2720
+ /**
2721
+ * Legacy method testing if some key maps into the specified value
2722
+ * in this table. This method is identical in functionality to
2723
+ * {@link #containsValue}, and exists solely to ensure
2724
+ * full compatibility with class {@link java.util.Hashtable},
2725
+ * which supported this method prior to introduction of the
2726
+ * Java Collections framework.
2727
+ *
2728
+ * @param value a value to search for
2729
+ * @return {@code true} if and only if some key maps to the
2730
+ * {@code value} argument in this table as
2731
+ * determined by the {@code equals} method;
2732
+ * {@code false} otherwise
2733
+ * @throws NullPointerException if the specified value is null
2734
+ */
2735
+ public boolean contains(Object value) {
2736
+ return containsValue(value);
2737
+ }
2738
+
2739
+ /**
2740
+ * Maps the specified key to the specified value in this table.
2741
+ * Neither the key nor the value can be null.
2742
+ *
2743
+ * <p>The value can be retrieved by calling the {@code get} method
2744
+ * with a key that is equal to the original key.
2745
+ *
2746
+ * @param key key with which the specified value is to be associated
2747
+ * @param value value to be associated with the specified key
2748
+ * @return the previous value associated with {@code key}, or
2749
+ * {@code null} if there was no mapping for {@code key}
2750
+ * @throws NullPointerException if the specified key or value is null
2751
+ */
2752
+ @SuppressWarnings("unchecked") public V put(K key, V value) {
2753
+ if (key == null || value == null)
2754
+ throw new NullPointerException();
2755
+ return (V)internalPut(key, value);
2756
+ }
2757
+
2758
+ /**
2759
+ * {@inheritDoc}
2760
+ *
2761
+ * @return the previous value associated with the specified key,
2762
+ * or {@code null} if there was no mapping for the key
2763
+ * @throws NullPointerException if the specified key or value is null
2764
+ */
2765
+ @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) {
2766
+ if (key == null || value == null)
2767
+ throw new NullPointerException();
2768
+ return (V)internalPutIfAbsent(key, value);
2769
+ }
2770
+
2771
+ /**
2772
+ * Copies all of the mappings from the specified map to this one.
2773
+ * These mappings replace any mappings that this map had for any of the
2774
+ * keys currently in the specified map.
2775
+ *
2776
+ * @param m mappings to be stored in this map
2777
+ */
2778
+ public void putAll(Map<? extends K, ? extends V> m) {
2779
+ internalPutAll(m);
2780
+ }
2781
+
2782
+ /**
2783
+ * If the specified key is not already associated with a value,
2784
+ * computes its value using the given mappingFunction and enters
2785
+ * it into the map unless null. This is equivalent to
2786
+ * <pre> {@code
2787
+ * if (map.containsKey(key))
2788
+ * return map.get(key);
2789
+ * value = mappingFunction.apply(key);
2790
+ * if (value != null)
2791
+ * map.put(key, value);
2792
+ * return value;}</pre>
2793
+ *
2794
+ * except that the action is performed atomically. If the
2795
+ * function returns {@code null} no mapping is recorded. If the
2796
+ * function itself throws an (unchecked) exception, the exception
2797
+ * is rethrown to its caller, and no mapping is recorded. Some
2798
+ * attempted update operations on this map by other threads may be
2799
+ * blocked while computation is in progress, so the computation
2800
+ * should be short and simple, and must not attempt to update any
2801
+ * other mappings of this Map. The most appropriate usage is to
2802
+ * construct a new object serving as an initial mapped value, or
2803
+ * memoized result, as in:
2804
+ *
2805
+ * <pre> {@code
2806
+ * map.computeIfAbsent(key, new Fun<K, V>() {
2807
+ * public V map(K k) { return new Value(f(k)); }});}</pre>
2808
+ *
2809
+ * @param key key with which the specified value is to be associated
2810
+ * @param mappingFunction the function to compute a value
2811
+ * @return the current (existing or computed) value associated with
2812
+ * the specified key, or null if the computed value is null
2813
+ * @throws NullPointerException if the specified key or mappingFunction
2814
+ * is null
2815
+ * @throws IllegalStateException if the computation detectably
2816
+ * attempts a recursive update to this map that would
2817
+ * otherwise never complete
2818
+ * @throws RuntimeException or Error if the mappingFunction does so,
2819
+ * in which case the mapping is left unestablished
2820
+ */
2821
+ @SuppressWarnings("unchecked") public V computeIfAbsent
2822
+ (K key, Fun<? super K, ? extends V> mappingFunction) {
2823
+ if (key == null || mappingFunction == null)
2824
+ throw new NullPointerException();
2825
+ return (V)internalComputeIfAbsent(key, mappingFunction);
2826
+ }
2827
+
2828
+ /**
2829
+ * If the given key is present, computes a new mapping value given a key and
2830
+ * its current mapped value. This is equivalent to
2831
+ * <pre> {@code
2832
+ * if (map.containsKey(key)) {
2833
+ * value = remappingFunction.apply(key, map.get(key));
2834
+ * if (value != null)
2835
+ * map.put(key, value);
2836
+ * else
2837
+ * map.remove(key);
2838
+ * }
2839
+ * }</pre>
2840
+ *
2841
+ * except that the action is performed atomically. If the
2842
+ * function returns {@code null}, the mapping is removed. If the
2843
+ * function itself throws an (unchecked) exception, the exception
2844
+ * is rethrown to its caller, and the current mapping is left
2845
+ * unchanged. Some attempted update operations on this map by
2846
+ * other threads may be blocked while computation is in progress,
2847
+ * so the computation should be short and simple, and must not
2848
+ * attempt to update any other mappings of this Map. For example,
2849
+ * to either create or append new messages to a value mapping:
2850
+ *
2851
+ * @param key key with which the specified value is to be associated
2852
+ * @param remappingFunction the function to compute a value
2853
+ * @return the new value associated with the specified key, or null if none
2854
+ * @throws NullPointerException if the specified key or remappingFunction
2855
+ * is null
2856
+ * @throws IllegalStateException if the computation detectably
2857
+ * attempts a recursive update to this map that would
2858
+ * otherwise never complete
2859
+ * @throws RuntimeException or Error if the remappingFunction does so,
2860
+ * in which case the mapping is unchanged
2861
+ */
2862
+ @SuppressWarnings("unchecked") public V computeIfPresent
2863
+ (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2864
+ if (key == null || remappingFunction == null)
2865
+ throw new NullPointerException();
2866
+ return (V)internalCompute(key, true, remappingFunction);
2867
+ }
2868
+
2869
+ /**
2870
+ * Computes a new mapping value given a key and
2871
+ * its current mapped value (or {@code null} if there is no current
2872
+ * mapping). This is equivalent to
2873
+ * <pre> {@code
2874
+ * value = remappingFunction.apply(key, map.get(key));
2875
+ * if (value != null)
2876
+ * map.put(key, value);
2877
+ * else
2878
+ * map.remove(key);
2879
+ * }</pre>
2880
+ *
2881
+ * except that the action is performed atomically. If the
2882
+ * function returns {@code null}, the mapping is removed. If the
2883
+ * function itself throws an (unchecked) exception, the exception
2884
+ * is rethrown to its caller, and the current mapping is left
2885
+ * unchanged. Some attempted update operations on this map by
2886
+ * other threads may be blocked while computation is in progress,
2887
+ * so the computation should be short and simple, and must not
2888
+ * attempt to update any other mappings of this Map. For example,
2889
+ * to either create or append new messages to a value mapping:
2890
+ *
2891
+ * <pre> {@code
2892
+ * Map<Key, String> map = ...;
2893
+ * final String msg = ...;
2894
+ * map.compute(key, new BiFun<Key, String, String>() {
2895
+ * public String apply(Key k, String v) {
2896
+ * return (v == null) ? msg : v + msg;});}}</pre>
2897
+ *
2898
+ * @param key key with which the specified value is to be associated
2899
+ * @param remappingFunction the function to compute a value
2900
+ * @return the new value associated with the specified key, or null if none
2901
+ * @throws NullPointerException if the specified key or remappingFunction
2902
+ * is null
2903
+ * @throws IllegalStateException if the computation detectably
2904
+ * attempts a recursive update to this map that would
2905
+ * otherwise never complete
2906
+ * @throws RuntimeException or Error if the remappingFunction does so,
2907
+ * in which case the mapping is unchanged
2908
+ */
2909
+ @SuppressWarnings("unchecked") public V compute
2910
+ (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2911
+ if (key == null || remappingFunction == null)
2912
+ throw new NullPointerException();
2913
+ return (V)internalCompute(key, false, remappingFunction);
2914
+ }
2915
+
2916
+ /**
2917
+ * If the specified key is not already associated
2918
+ * with a value, associate it with the given value.
2919
+ * Otherwise, replace the value with the results of
2920
+ * the given remapping function. This is equivalent to:
2921
+ * <pre> {@code
2922
+ * if (!map.containsKey(key))
2923
+ * map.put(value);
2924
+ * else {
2925
+ * newValue = remappingFunction.apply(map.get(key), value);
2926
+ * if (value != null)
2927
+ * map.put(key, value);
2928
+ * else
2929
+ * map.remove(key);
2930
+ * }
2931
+ * }</pre>
2932
+ * except that the action is performed atomically. If the
2933
+ * function returns {@code null}, the mapping is removed. If the
2934
+ * function itself throws an (unchecked) exception, the exception
2935
+ * is rethrown to its caller, and the current mapping is left
2936
+ * unchanged. Some attempted update operations on this map by
2937
+ * other threads may be blocked while computation is in progress,
2938
+ * so the computation should be short and simple, and must not
2939
+ * attempt to update any other mappings of this Map.
2940
+ */
2941
+ @SuppressWarnings("unchecked") public V merge
2942
+ (K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
2943
+ if (key == null || value == null || remappingFunction == null)
2944
+ throw new NullPointerException();
2945
+ return (V)internalMerge(key, value, remappingFunction);
2946
+ }
2947
+
2948
+ /**
2949
+ * Removes the key (and its corresponding value) from this map.
2950
+ * This method does nothing if the key is not in the map.
2951
+ *
2952
+ * @param key the key that needs to be removed
2953
+ * @return the previous value associated with {@code key}, or
2954
+ * {@code null} if there was no mapping for {@code key}
2955
+ * @throws NullPointerException if the specified key is null
2956
+ */
2957
+ @SuppressWarnings("unchecked") public V remove(Object key) {
2958
+ if (key == null)
2959
+ throw new NullPointerException();
2960
+ return (V)internalReplace(key, null, null);
2961
+ }
2962
+
2963
+ /**
2964
+ * {@inheritDoc}
2965
+ *
2966
+ * @throws NullPointerException if the specified key is null
2967
+ */
2968
+ public boolean remove(Object key, Object value) {
2969
+ if (key == null)
2970
+ throw new NullPointerException();
2971
+ if (value == null)
2972
+ return false;
2973
+ return internalReplace(key, null, value) != null;
2974
+ }
2975
+
2976
+ /**
2977
+ * {@inheritDoc}
2978
+ *
2979
+ * @throws NullPointerException if any of the arguments are null
2980
+ */
2981
+ public boolean replace(K key, V oldValue, V newValue) {
2982
+ if (key == null || oldValue == null || newValue == null)
2983
+ throw new NullPointerException();
2984
+ return internalReplace(key, newValue, oldValue) != null;
2985
+ }
2986
+
2987
+ /**
2988
+ * {@inheritDoc}
2989
+ *
2990
+ * @return the previous value associated with the specified key,
2991
+ * or {@code null} if there was no mapping for the key
2992
+ * @throws NullPointerException if the specified key or value is null
2993
+ */
2994
+ @SuppressWarnings("unchecked") public V replace(K key, V value) {
2995
+ if (key == null || value == null)
2996
+ throw new NullPointerException();
2997
+ return (V)internalReplace(key, value, null);
2998
+ }
2999
+
3000
+ /**
3001
+ * Removes all of the mappings from this map.
3002
+ */
3003
+ public void clear() {
3004
+ internalClear();
3005
+ }
3006
+
3007
+ /**
3008
+ * Returns a {@link Set} view of the keys contained in this map.
3009
+ * The set is backed by the map, so changes to the map are
3010
+ * reflected in the set, and vice-versa.
3011
+ *
3012
+ * @return the set view
3013
+ */
3014
+ public KeySetView<K,V> keySet() {
3015
+ KeySetView<K,V> ks = keySet;
3016
+ return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null));
3017
+ }
3018
+
3019
+ /**
3020
+ * Returns a {@link Set} view of the keys in this map, using the
3021
+ * given common mapped value for any additions (i.e., {@link
3022
+ * Collection#add} and {@link Collection#addAll}). This is of
3023
+ * course only appropriate if it is acceptable to use the same
3024
+ * value for all additions from this view.
3025
+ *
3026
+ * @param mappedValue the mapped value to use for any
3027
+ * additions.
3028
+ * @return the set view
3029
+ * @throws NullPointerException if the mappedValue is null
3030
+ */
3031
+ public KeySetView<K,V> keySet(V mappedValue) {
3032
+ if (mappedValue == null)
3033
+ throw new NullPointerException();
3034
+ return new KeySetView<K,V>(this, mappedValue);
3035
+ }
3036
+
3037
+ /**
3038
+ * Returns a {@link Collection} view of the values contained in this map.
3039
+ * The collection is backed by the map, so changes to the map are
3040
+ * reflected in the collection, and vice-versa.
3041
+ */
3042
+ public ValuesView<K,V> values() {
3043
+ ValuesView<K,V> vs = values;
3044
+ return (vs != null) ? vs : (values = new ValuesView<K,V>(this));
3045
+ }
3046
+
3047
+ /**
3048
+ * Returns a {@link Set} view of the mappings contained in this map.
3049
+ * The set is backed by the map, so changes to the map are
3050
+ * reflected in the set, and vice-versa. The set supports element
3051
+ * removal, which removes the corresponding mapping from the map,
3052
+ * via the {@code Iterator.remove}, {@code Set.remove},
3053
+ * {@code removeAll}, {@code retainAll}, and {@code clear}
3054
+ * operations. It does not support the {@code add} or
3055
+ * {@code addAll} operations.
3056
+ *
3057
+ * <p>The view's {@code iterator} is a "weakly consistent" iterator
3058
+ * that will never throw {@link ConcurrentModificationException},
3059
+ * and guarantees to traverse elements as they existed upon
3060
+ * construction of the iterator, and may (but is not guaranteed to)
3061
+ * reflect any modifications subsequent to construction.
3062
+ */
3063
+ public Set<Map.Entry<K,V>> entrySet() {
3064
+ EntrySetView<K,V> es = entrySet;
3065
+ return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this));
3066
+ }
3067
+
3068
+ /**
3069
+ * Returns an enumeration of the keys in this table.
3070
+ *
3071
+ * @return an enumeration of the keys in this table
3072
+ * @see #keySet()
3073
+ */
3074
+ public Enumeration<K> keys() {
3075
+ return new KeyIterator<K,V>(this);
3076
+ }
3077
+
3078
+ /**
3079
+ * Returns an enumeration of the values in this table.
3080
+ *
3081
+ * @return an enumeration of the values in this table
3082
+ * @see #values()
3083
+ */
3084
+ public Enumeration<V> elements() {
3085
+ return new ValueIterator<K,V>(this);
3086
+ }
3087
+
3088
+ /**
3089
+ * Returns a partitionable iterator of the keys in this map.
3090
+ *
3091
+ * @return a partitionable iterator of the keys in this map
3092
+ */
3093
+ public Spliterator<K> keySpliterator() {
3094
+ return new KeyIterator<K,V>(this);
3095
+ }
3096
+
3097
+ /**
3098
+ * Returns a partitionable iterator of the values in this map.
3099
+ *
3100
+ * @return a partitionable iterator of the values in this map
3101
+ */
3102
+ public Spliterator<V> valueSpliterator() {
3103
+ return new ValueIterator<K,V>(this);
3104
+ }
3105
+
3106
+ /**
3107
+ * Returns a partitionable iterator of the entries in this map.
3108
+ *
3109
+ * @return a partitionable iterator of the entries in this map
3110
+ */
3111
+ public Spliterator<Map.Entry<K,V>> entrySpliterator() {
3112
+ return new EntryIterator<K,V>(this);
3113
+ }
3114
+
3115
+ /**
3116
+ * Returns the hash code value for this {@link Map}, i.e.,
3117
+ * the sum of, for each key-value pair in the map,
3118
+ * {@code key.hashCode() ^ value.hashCode()}.
3119
+ *
3120
+ * @return the hash code value for this map
3121
+ */
3122
+ public int hashCode() {
3123
+ int h = 0;
3124
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3125
+ Object v;
3126
+ while ((v = it.advance()) != null) {
3127
+ h += it.nextKey.hashCode() ^ v.hashCode();
3128
+ }
3129
+ return h;
3130
+ }
3131
+
3132
+ /**
3133
+ * Returns a string representation of this map. The string
3134
+ * representation consists of a list of key-value mappings (in no
3135
+ * particular order) enclosed in braces ("{@code {}}"). Adjacent
3136
+ * mappings are separated by the characters {@code ", "} (comma
3137
+ * and space). Each key-value mapping is rendered as the key
3138
+ * followed by an equals sign ("{@code =}") followed by the
3139
+ * associated value.
3140
+ *
3141
+ * @return a string representation of this map
3142
+ */
3143
+ public String toString() {
3144
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3145
+ StringBuilder sb = new StringBuilder();
3146
+ sb.append('{');
3147
+ Object v;
3148
+ if ((v = it.advance()) != null) {
3149
+ for (;;) {
3150
+ Object k = it.nextKey;
3151
+ sb.append(k == this ? "(this Map)" : k);
3152
+ sb.append('=');
3153
+ sb.append(v == this ? "(this Map)" : v);
3154
+ if ((v = it.advance()) == null)
3155
+ break;
3156
+ sb.append(',').append(' ');
3157
+ }
3158
+ }
3159
+ return sb.append('}').toString();
3160
+ }
3161
+
3162
+ /**
3163
+ * Compares the specified object with this map for equality.
3164
+ * Returns {@code true} if the given object is a map with the same
3165
+ * mappings as this map. This operation may return misleading
3166
+ * results if either map is concurrently modified during execution
3167
+ * of this method.
3168
+ *
3169
+ * @param o object to be compared for equality with this map
3170
+ * @return {@code true} if the specified object is equal to this map
3171
+ */
3172
+ public boolean equals(Object o) {
3173
+ if (o != this) {
3174
+ if (!(o instanceof Map))
3175
+ return false;
3176
+ Map<?,?> m = (Map<?,?>) o;
3177
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3178
+ Object val;
3179
+ while ((val = it.advance()) != null) {
3180
+ Object v = m.get(it.nextKey);
3181
+ if (v == null || (v != val && !v.equals(val)))
3182
+ return false;
3183
+ }
3184
+ for (Map.Entry<?,?> e : m.entrySet()) {
3185
+ Object mk, mv, v;
3186
+ if ((mk = e.getKey()) == null ||
3187
+ (mv = e.getValue()) == null ||
3188
+ (v = internalGet(mk)) == null ||
3189
+ (mv != v && !mv.equals(v)))
3190
+ return false;
3191
+ }
3192
+ }
3193
+ return true;
3194
+ }
3195
+
3196
+ /* ----------------Iterators -------------- */
3197
+
3198
+ @SuppressWarnings("serial") static final class KeyIterator<K,V> extends Traverser<K,V,Object>
3199
+ implements Spliterator<K>, Enumeration<K> {
3200
+ KeyIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3201
+ KeyIterator(Traverser<K,V,Object> it) {
3202
+ super(it);
3203
+ }
3204
+ public KeyIterator<K,V> split() {
3205
+ if (nextKey != null)
3206
+ throw new IllegalStateException();
3207
+ return new KeyIterator<K,V>(this);
3208
+ }
3209
+ @SuppressWarnings("unchecked") public final K next() {
3210
+ if (nextVal == null && advance() == null)
3211
+ throw new NoSuchElementException();
3212
+ Object k = nextKey;
3213
+ nextVal = null;
3214
+ return (K) k;
3215
+ }
3216
+
3217
+ public final K nextElement() { return next(); }
3218
+ }
3219
+
3220
+ @SuppressWarnings("serial") static final class ValueIterator<K,V> extends Traverser<K,V,Object>
3221
+ implements Spliterator<V>, Enumeration<V> {
3222
+ ValueIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3223
+ ValueIterator(Traverser<K,V,Object> it) {
3224
+ super(it);
3225
+ }
3226
+ public ValueIterator<K,V> split() {
3227
+ if (nextKey != null)
3228
+ throw new IllegalStateException();
3229
+ return new ValueIterator<K,V>(this);
3230
+ }
3231
+
3232
+ @SuppressWarnings("unchecked") public final V next() {
3233
+ Object v;
3234
+ if ((v = nextVal) == null && (v = advance()) == null)
3235
+ throw new NoSuchElementException();
3236
+ nextVal = null;
3237
+ return (V) v;
3238
+ }
3239
+
3240
+ public final V nextElement() { return next(); }
3241
+ }
3242
+
3243
+ @SuppressWarnings("serial") static final class EntryIterator<K,V> extends Traverser<K,V,Object>
3244
+ implements Spliterator<Map.Entry<K,V>> {
3245
+ EntryIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3246
+ EntryIterator(Traverser<K,V,Object> it) {
3247
+ super(it);
3248
+ }
3249
+ public EntryIterator<K,V> split() {
3250
+ if (nextKey != null)
3251
+ throw new IllegalStateException();
3252
+ return new EntryIterator<K,V>(this);
3253
+ }
3254
+
3255
+ @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() {
3256
+ Object v;
3257
+ if ((v = nextVal) == null && (v = advance()) == null)
3258
+ throw new NoSuchElementException();
3259
+ Object k = nextKey;
3260
+ nextVal = null;
3261
+ return new MapEntry<K,V>((K)k, (V)v, map);
3262
+ }
3263
+ }
3264
+
3265
+ /**
3266
+ * Exported Entry for iterators
3267
+ */
3268
+ static final class MapEntry<K,V> implements Map.Entry<K, V> {
3269
+ final K key; // non-null
3270
+ V val; // non-null
3271
+ final ConcurrentHashMapV8<K, V> map;
3272
+ MapEntry(K key, V val, ConcurrentHashMapV8<K, V> map) {
3273
+ this.key = key;
3274
+ this.val = val;
3275
+ this.map = map;
3276
+ }
3277
+ public final K getKey() { return key; }
3278
+ public final V getValue() { return val; }
3279
+ public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
3280
+ public final String toString(){ return key + "=" + val; }
3281
+
3282
+ public final boolean equals(Object o) {
3283
+ Object k, v; Map.Entry<?,?> e;
3284
+ return ((o instanceof Map.Entry) &&
3285
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3286
+ (v = e.getValue()) != null &&
3287
+ (k == key || k.equals(key)) &&
3288
+ (v == val || v.equals(val)));
3289
+ }
3290
+
3291
+ /**
3292
+ * Sets our entry's value and writes through to the map. The
3293
+ * value to return is somewhat arbitrary here. Since we do not
3294
+ * necessarily track asynchronous changes, the most recent
3295
+ * "previous" value could be different from what we return (or
3296
+ * could even have been removed in which case the put will
3297
+ * re-establish). We do not and cannot guarantee more.
3298
+ */
3299
+ public final V setValue(V value) {
3300
+ if (value == null) throw new NullPointerException();
3301
+ V v = val;
3302
+ val = value;
3303
+ map.put(key, value);
3304
+ return v;
3305
+ }
3306
+ }
3307
+
3308
+ /* ---------------- Serialization Support -------------- */
3309
+
3310
+ /**
3311
+ * Stripped-down version of helper class used in previous version,
3312
+ * declared for the sake of serialization compatibility
3313
+ */
3314
+ static class Segment<K,V> implements Serializable {
3315
+ private static final long serialVersionUID = 2249069246763182397L;
3316
+ final float loadFactor;
3317
+ Segment(float lf) { this.loadFactor = lf; }
3318
+ }
3319
+
3320
+ /**
3321
+ * Saves the state of the {@code ConcurrentHashMapV8} instance to a
3322
+ * stream (i.e., serializes it).
3323
+ * @param s the stream
3324
+ * @serialData
3325
+ * the key (Object) and value (Object)
3326
+ * for each key-value mapping, followed by a null pair.
3327
+ * The key-value mappings are emitted in no particular order.
3328
+ */
3329
+ @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s)
3330
+ throws java.io.IOException {
3331
+ if (segments == null) { // for serialization compatibility
3332
+ segments = (Segment<K,V>[])
3333
+ new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3334
+ for (int i = 0; i < segments.length; ++i)
3335
+ segments[i] = new Segment<K,V>(LOAD_FACTOR);
3336
+ }
3337
+ s.defaultWriteObject();
3338
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3339
+ Object v;
3340
+ while ((v = it.advance()) != null) {
3341
+ s.writeObject(it.nextKey);
3342
+ s.writeObject(v);
3343
+ }
3344
+ s.writeObject(null);
3345
+ s.writeObject(null);
3346
+ segments = null; // throw away
3347
+ }
3348
+
3349
+ /**
3350
+ * Reconstitutes the instance from a stream (that is, deserializes it).
3351
+ * @param s the stream
3352
+ */
3353
+ @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s)
3354
+ throws java.io.IOException, ClassNotFoundException {
3355
+ s.defaultReadObject();
3356
+ this.segments = null; // unneeded
3357
+ // initialize transient final field
3358
+ UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder());
3359
+
3360
+ // Create all nodes, then place in table once size is known
3361
+ long size = 0L;
3362
+ Node p = null;
3363
+ for (;;) {
3364
+ K k = (K) s.readObject();
3365
+ V v = (V) s.readObject();
3366
+ if (k != null && v != null) {
3367
+ int h = spread(k.hashCode());
3368
+ p = new Node(h, k, v, p);
3369
+ ++size;
3370
+ }
3371
+ else
3372
+ break;
3373
+ }
3374
+ if (p != null) {
3375
+ boolean init = false;
3376
+ int n;
3377
+ if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3378
+ n = MAXIMUM_CAPACITY;
3379
+ else {
3380
+ int sz = (int)size;
3381
+ n = tableSizeFor(sz + (sz >>> 1) + 1);
3382
+ }
3383
+ int sc = sizeCtl;
3384
+ boolean collide = false;
3385
+ if (n > sc &&
3386
+ UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
3387
+ try {
3388
+ if (table == null) {
3389
+ init = true;
3390
+ Node[] tab = new Node[n];
3391
+ int mask = n - 1;
3392
+ while (p != null) {
3393
+ int j = p.hash & mask;
3394
+ Node next = p.next;
3395
+ Node q = p.next = tabAt(tab, j);
3396
+ setTabAt(tab, j, p);
3397
+ if (!collide && q != null && q.hash == p.hash)
3398
+ collide = true;
3399
+ p = next;
3400
+ }
3401
+ table = tab;
3402
+ counter.add(size);
3403
+ sc = n - (n >>> 2);
3404
+ }
3405
+ } finally {
3406
+ sizeCtl = sc;
3407
+ }
3408
+ if (collide) { // rescan and convert to TreeBins
3409
+ Node[] tab = table;
3410
+ for (int i = 0; i < tab.length; ++i) {
3411
+ int c = 0;
3412
+ for (Node e = tabAt(tab, i); e != null; e = e.next) {
3413
+ if (++c > TREE_THRESHOLD &&
3414
+ (e.key instanceof Comparable)) {
3415
+ replaceWithTreeBin(tab, i, e.key);
3416
+ break;
3417
+ }
3418
+ }
3419
+ }
3420
+ }
3421
+ }
3422
+ if (!init) { // Can only happen if unsafely published.
3423
+ while (p != null) {
3424
+ internalPut(p.key, p.val);
3425
+ p = p.next;
3426
+ }
3427
+ }
3428
+ }
3429
+ }
3430
+
3431
+
3432
+ // -------------------------------------------------------
3433
+
3434
+ // Sams
3435
+ /** Interface describing a void action of one argument */
3436
+ public interface Action<A> { void apply(A a); }
3437
+ /** Interface describing a void action of two arguments */
3438
+ public interface BiAction<A,B> { void apply(A a, B b); }
3439
+ /** Interface describing a function of one argument */
3440
+ public interface Fun<A,T> { T apply(A a); }
3441
+ /** Interface describing a function of two arguments */
3442
+ public interface BiFun<A,B,T> { T apply(A a, B b); }
3443
+ /** Interface describing a function of no arguments */
3444
+ public interface Generator<T> { T apply(); }
3445
+ /** Interface describing a function mapping its argument to a double */
3446
+ public interface ObjectToDouble<A> { double apply(A a); }
3447
+ /** Interface describing a function mapping its argument to a long */
3448
+ public interface ObjectToLong<A> { long apply(A a); }
3449
+ /** Interface describing a function mapping its argument to an int */
3450
+ public interface ObjectToInt<A> {int apply(A a); }
3451
+ /** Interface describing a function mapping two arguments to a double */
3452
+ public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
3453
+ /** Interface describing a function mapping two arguments to a long */
3454
+ public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
3455
+ /** Interface describing a function mapping two arguments to an int */
3456
+ public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
3457
+ /** Interface describing a function mapping a double to a double */
3458
+ public interface DoubleToDouble { double apply(double a); }
3459
+ /** Interface describing a function mapping a long to a long */
3460
+ public interface LongToLong { long apply(long a); }
3461
+ /** Interface describing a function mapping an int to an int */
3462
+ public interface IntToInt { int apply(int a); }
3463
+ /** Interface describing a function mapping two doubles to a double */
3464
+ public interface DoubleByDoubleToDouble { double apply(double a, double b); }
3465
+ /** Interface describing a function mapping two longs to a long */
3466
+ public interface LongByLongToLong { long apply(long a, long b); }
3467
+ /** Interface describing a function mapping two ints to an int */
3468
+ public interface IntByIntToInt { int apply(int a, int b); }
3469
+
3470
+
3471
+ /* ----------------Views -------------- */
3472
+
3473
+ /**
3474
+ * Base class for views.
3475
+ */
3476
+ static abstract class CHMView<K, V> {
3477
+ final ConcurrentHashMapV8<K, V> map;
3478
+ CHMView(ConcurrentHashMapV8<K, V> map) { this.map = map; }
3479
+
3480
+ /**
3481
+ * Returns the map backing this view.
3482
+ *
3483
+ * @return the map backing this view
3484
+ */
3485
+ public ConcurrentHashMapV8<K,V> getMap() { return map; }
3486
+
3487
+ public final int size() { return map.size(); }
3488
+ public final boolean isEmpty() { return map.isEmpty(); }
3489
+ public final void clear() { map.clear(); }
3490
+
3491
+ // implementations below rely on concrete classes supplying these
3492
+ abstract public Iterator<?> iterator();
3493
+ abstract public boolean contains(Object o);
3494
+ abstract public boolean remove(Object o);
3495
+
3496
+ private static final String oomeMsg = "Required array size too large";
3497
+
3498
+ public final Object[] toArray() {
3499
+ long sz = map.mappingCount();
3500
+ if (sz > (long)(MAX_ARRAY_SIZE))
3501
+ throw new OutOfMemoryError(oomeMsg);
3502
+ int n = (int)sz;
3503
+ Object[] r = new Object[n];
3504
+ int i = 0;
3505
+ Iterator<?> it = iterator();
3506
+ while (it.hasNext()) {
3507
+ if (i == n) {
3508
+ if (n >= MAX_ARRAY_SIZE)
3509
+ throw new OutOfMemoryError(oomeMsg);
3510
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3511
+ n = MAX_ARRAY_SIZE;
3512
+ else
3513
+ n += (n >>> 1) + 1;
3514
+ r = Arrays.copyOf(r, n);
3515
+ }
3516
+ r[i++] = it.next();
3517
+ }
3518
+ return (i == n) ? r : Arrays.copyOf(r, i);
3519
+ }
3520
+
3521
+ @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) {
3522
+ long sz = map.mappingCount();
3523
+ if (sz > (long)(MAX_ARRAY_SIZE))
3524
+ throw new OutOfMemoryError(oomeMsg);
3525
+ int m = (int)sz;
3526
+ T[] r = (a.length >= m) ? a :
3527
+ (T[])java.lang.reflect.Array
3528
+ .newInstance(a.getClass().getComponentType(), m);
3529
+ int n = r.length;
3530
+ int i = 0;
3531
+ Iterator<?> it = iterator();
3532
+ while (it.hasNext()) {
3533
+ if (i == n) {
3534
+ if (n >= MAX_ARRAY_SIZE)
3535
+ throw new OutOfMemoryError(oomeMsg);
3536
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3537
+ n = MAX_ARRAY_SIZE;
3538
+ else
3539
+ n += (n >>> 1) + 1;
3540
+ r = Arrays.copyOf(r, n);
3541
+ }
3542
+ r[i++] = (T)it.next();
3543
+ }
3544
+ if (a == r && i < n) {
3545
+ r[i] = null; // null-terminate
3546
+ return r;
3547
+ }
3548
+ return (i == n) ? r : Arrays.copyOf(r, i);
3549
+ }
3550
+
3551
+ public final int hashCode() {
3552
+ int h = 0;
3553
+ for (Iterator<?> it = iterator(); it.hasNext();)
3554
+ h += it.next().hashCode();
3555
+ return h;
3556
+ }
3557
+
3558
+ public final String toString() {
3559
+ StringBuilder sb = new StringBuilder();
3560
+ sb.append('[');
3561
+ Iterator<?> it = iterator();
3562
+ if (it.hasNext()) {
3563
+ for (;;) {
3564
+ Object e = it.next();
3565
+ sb.append(e == this ? "(this Collection)" : e);
3566
+ if (!it.hasNext())
3567
+ break;
3568
+ sb.append(',').append(' ');
3569
+ }
3570
+ }
3571
+ return sb.append(']').toString();
3572
+ }
3573
+
3574
+ public final boolean containsAll(Collection<?> c) {
3575
+ if (c != this) {
3576
+ for (Iterator<?> it = c.iterator(); it.hasNext();) {
3577
+ Object e = it.next();
3578
+ if (e == null || !contains(e))
3579
+ return false;
3580
+ }
3581
+ }
3582
+ return true;
3583
+ }
3584
+
3585
+ public final boolean removeAll(Collection<?> c) {
3586
+ boolean modified = false;
3587
+ for (Iterator<?> it = iterator(); it.hasNext();) {
3588
+ if (c.contains(it.next())) {
3589
+ it.remove();
3590
+ modified = true;
3591
+ }
3592
+ }
3593
+ return modified;
3594
+ }
3595
+
3596
+ public final boolean retainAll(Collection<?> c) {
3597
+ boolean modified = false;
3598
+ for (Iterator<?> it = iterator(); it.hasNext();) {
3599
+ if (!c.contains(it.next())) {
3600
+ it.remove();
3601
+ modified = true;
3602
+ }
3603
+ }
3604
+ return modified;
3605
+ }
3606
+
3607
+ }
3608
+
3609
+ /**
3610
+ * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
3611
+ * which additions may optionally be enabled by mapping to a
3612
+ * common value. This class cannot be directly instantiated. See
3613
+ * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()},
3614
+ * {@link #newKeySet(int)}.
3615
+ */
3616
+ public static class KeySetView<K,V> extends CHMView<K,V> implements Set<K>, java.io.Serializable {
3617
+ private static final long serialVersionUID = 7249069246763182397L;
3618
+ private final V value;
3619
+ KeySetView(ConcurrentHashMapV8<K, V> map, V value) { // non-public
3620
+ super(map);
3621
+ this.value = value;
3622
+ }
3623
+
3624
+ /**
3625
+ * Returns the default mapped value for additions,
3626
+ * or {@code null} if additions are not supported.
3627
+ *
3628
+ * @return the default mapped value for additions, or {@code null}
3629
+ * if not supported.
3630
+ */
3631
+ public V getMappedValue() { return value; }
3632
+
3633
+ // implement Set API
3634
+
3635
+ public boolean contains(Object o) { return map.containsKey(o); }
3636
+ public boolean remove(Object o) { return map.remove(o) != null; }
3637
+
3638
+ /**
3639
+ * Returns a "weakly consistent" iterator that will never
3640
+ * throw {@link ConcurrentModificationException}, and
3641
+ * guarantees to traverse elements as they existed upon
3642
+ * construction of the iterator, and may (but is not
3643
+ * guaranteed to) reflect any modifications subsequent to
3644
+ * construction.
3645
+ *
3646
+ * @return an iterator over the keys of this map
3647
+ */
3648
+ public Iterator<K> iterator() { return new KeyIterator<K,V>(map); }
3649
+ public boolean add(K e) {
3650
+ V v;
3651
+ if ((v = value) == null)
3652
+ throw new UnsupportedOperationException();
3653
+ if (e == null)
3654
+ throw new NullPointerException();
3655
+ return map.internalPutIfAbsent(e, v) == null;
3656
+ }
3657
+ public boolean addAll(Collection<? extends K> c) {
3658
+ boolean added = false;
3659
+ V v;
3660
+ if ((v = value) == null)
3661
+ throw new UnsupportedOperationException();
3662
+ for (K e : c) {
3663
+ if (e == null)
3664
+ throw new NullPointerException();
3665
+ if (map.internalPutIfAbsent(e, v) == null)
3666
+ added = true;
3667
+ }
3668
+ return added;
3669
+ }
3670
+ public boolean equals(Object o) {
3671
+ Set<?> c;
3672
+ return ((o instanceof Set) &&
3673
+ ((c = (Set<?>)o) == this ||
3674
+ (containsAll(c) && c.containsAll(this))));
3675
+ }
3676
+ }
3677
+
3678
+ /**
3679
+ * A view of a ConcurrentHashMapV8 as a {@link Collection} of
3680
+ * values, in which additions are disabled. This class cannot be
3681
+ * directly instantiated. See {@link #values},
3682
+ *
3683
+ * <p>The view's {@code iterator} is a "weakly consistent" iterator
3684
+ * that will never throw {@link ConcurrentModificationException},
3685
+ * and guarantees to traverse elements as they existed upon
3686
+ * construction of the iterator, and may (but is not guaranteed to)
3687
+ * reflect any modifications subsequent to construction.
3688
+ */
3689
+ public static final class ValuesView<K,V> extends CHMView<K,V>
3690
+ implements Collection<V> {
3691
+ ValuesView(ConcurrentHashMapV8<K, V> map) { super(map); }
3692
+ public final boolean contains(Object o) { return map.containsValue(o); }
3693
+ public final boolean remove(Object o) {
3694
+ if (o != null) {
3695
+ Iterator<V> it = new ValueIterator<K,V>(map);
3696
+ while (it.hasNext()) {
3697
+ if (o.equals(it.next())) {
3698
+ it.remove();
3699
+ return true;
3700
+ }
3701
+ }
3702
+ }
3703
+ return false;
3704
+ }
3705
+
3706
+ /**
3707
+ * Returns a "weakly consistent" iterator that will never
3708
+ * throw {@link ConcurrentModificationException}, and
3709
+ * guarantees to traverse elements as they existed upon
3710
+ * construction of the iterator, and may (but is not
3711
+ * guaranteed to) reflect any modifications subsequent to
3712
+ * construction.
3713
+ *
3714
+ * @return an iterator over the values of this map
3715
+ */
3716
+ public final Iterator<V> iterator() {
3717
+ return new ValueIterator<K,V>(map);
3718
+ }
3719
+ public final boolean add(V e) {
3720
+ throw new UnsupportedOperationException();
3721
+ }
3722
+ public final boolean addAll(Collection<? extends V> c) {
3723
+ throw new UnsupportedOperationException();
3724
+ }
3725
+ }
3726
+
3727
+ /**
3728
+ * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
3729
+ * entries. This class cannot be directly instantiated. See
3730
+ * {@link #entrySet}.
3731
+ */
3732
+ public static final class EntrySetView<K,V> extends CHMView<K,V>
3733
+ implements Set<Map.Entry<K,V>> {
3734
+ EntrySetView(ConcurrentHashMapV8<K, V> map) { super(map); }
3735
+ public final boolean contains(Object o) {
3736
+ Object k, v, r; Map.Entry<?,?> e;
3737
+ return ((o instanceof Map.Entry) &&
3738
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3739
+ (r = map.get(k)) != null &&
3740
+ (v = e.getValue()) != null &&
3741
+ (v == r || v.equals(r)));
3742
+ }
3743
+ public final boolean remove(Object o) {
3744
+ Object k, v; Map.Entry<?,?> e;
3745
+ return ((o instanceof Map.Entry) &&
3746
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3747
+ (v = e.getValue()) != null &&
3748
+ map.remove(k, v));
3749
+ }
3750
+
3751
+ /**
3752
+ * Returns a "weakly consistent" iterator that will never
3753
+ * throw {@link ConcurrentModificationException}, and
3754
+ * guarantees to traverse elements as they existed upon
3755
+ * construction of the iterator, and may (but is not
3756
+ * guaranteed to) reflect any modifications subsequent to
3757
+ * construction.
3758
+ *
3759
+ * @return an iterator over the entries of this map
3760
+ */
3761
+ public final Iterator<Map.Entry<K,V>> iterator() {
3762
+ return new EntryIterator<K,V>(map);
3763
+ }
3764
+
3765
+ public final boolean add(Entry<K,V> e) {
3766
+ K key = e.getKey();
3767
+ V value = e.getValue();
3768
+ if (key == null || value == null)
3769
+ throw new NullPointerException();
3770
+ return map.internalPut(key, value) == null;
3771
+ }
3772
+ public final boolean addAll(Collection<? extends Entry<K,V>> c) {
3773
+ boolean added = false;
3774
+ for (Entry<K,V> e : c) {
3775
+ if (add(e))
3776
+ added = true;
3777
+ }
3778
+ return added;
3779
+ }
3780
+ public boolean equals(Object o) {
3781
+ Set<?> c;
3782
+ return ((o instanceof Set) &&
3783
+ ((c = (Set<?>)o) == this ||
3784
+ (containsAll(c) && c.containsAll(this))));
3785
+ }
3786
+ }
3787
+
3788
+ // Unsafe mechanics
3789
+ private static final sun.misc.Unsafe UNSAFE;
3790
+ private static final long counterOffset;
3791
+ private static final long sizeCtlOffset;
3792
+ private static final long ABASE;
3793
+ private static final int ASHIFT;
3794
+
3795
+ static {
3796
+ int ss;
3797
+ try {
3798
+ UNSAFE = getUnsafe();
3799
+ Class<?> k = ConcurrentHashMapV8.class;
3800
+ counterOffset = UNSAFE.objectFieldOffset
3801
+ (k.getDeclaredField("counter"));
3802
+ sizeCtlOffset = UNSAFE.objectFieldOffset
3803
+ (k.getDeclaredField("sizeCtl"));
3804
+ Class<?> sc = Node[].class;
3805
+ ABASE = UNSAFE.arrayBaseOffset(sc);
3806
+ ss = UNSAFE.arrayIndexScale(sc);
3807
+ } catch (Exception e) {
3808
+ throw new Error(e);
3809
+ }
3810
+ if ((ss & (ss-1)) != 0)
3811
+ throw new Error("data type scale not a power of two");
3812
+ ASHIFT = 31 - Integer.numberOfLeadingZeros(ss);
3813
+ }
3814
+
3815
+ /**
3816
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
3817
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
3818
+ * into a jdk.
3819
+ *
3820
+ * @return a sun.misc.Unsafe
3821
+ */
3822
+ private static sun.misc.Unsafe getUnsafe() {
3823
+ try {
3824
+ return sun.misc.Unsafe.getUnsafe();
3825
+ } catch (SecurityException se) {
3826
+ try {
3827
+ return java.security.AccessController.doPrivileged
3828
+ (new java.security
3829
+ .PrivilegedExceptionAction<sun.misc.Unsafe>() {
3830
+ public sun.misc.Unsafe run() throws Exception {
3831
+ java.lang.reflect.Field f = sun.misc
3832
+ .Unsafe.class.getDeclaredField("theUnsafe");
3833
+ f.setAccessible(true);
3834
+ return (sun.misc.Unsafe) f.get(null);
3835
+ }});
3836
+ } catch (java.security.PrivilegedActionException e) {
3837
+ throw new RuntimeException("Could not initialize intrinsics",
3838
+ e.getCause());
3839
+ }
3840
+ }
3841
+ }
3842
+ }