thread_safe 0.1.3-java → 0.2.0-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +24 -0
  3. data/README.md +19 -5
  4. data/Rakefile +13 -6
  5. data/examples/bench_cache.rb +1 -1
  6. data/ext/org/jruby/ext/thread_safe/JRubyCacheBackendLibrary.java +54 -15
  7. data/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMap.java +28 -0
  8. data/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java +19 -10
  9. data/ext/org/jruby/ext/thread_safe/jsr166e/LongAdder.java +1 -2
  10. data/ext/org/jruby/ext/thread_safe/jsr166e/Striped64.java +1 -1
  11. data/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/ConcurrentHashMapV8.java +3788 -0
  12. data/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/LongAdder.java +204 -0
  13. data/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/Striped64.java +291 -0
  14. data/ext/org/jruby/ext/thread_safe/jsr166y/ThreadLocalRandom.java +1 -1
  15. data/ext/thread_safe/JrubyCacheBackendService.java +2 -2
  16. data/lib/thread_safe.rb +1 -1
  17. data/lib/thread_safe/atomic_reference_cache_backend.rb +1 -1
  18. data/lib/thread_safe/cache.rb +6 -3
  19. data/lib/thread_safe/mri_cache_backend.rb +2 -2
  20. data/lib/thread_safe/non_concurrent_cache_backend.rb +1 -1
  21. data/lib/thread_safe/synchronized_cache_backend.rb +1 -1
  22. data/lib/thread_safe/synchronized_delegator.rb +36 -19
  23. data/lib/thread_safe/util.rb +1 -1
  24. data/lib/thread_safe/util/adder.rb +1 -1
  25. data/lib/thread_safe/util/atomic_reference.rb +1 -1
  26. data/lib/thread_safe/util/cheap_lockable.rb +1 -1
  27. data/lib/thread_safe/util/power_of_two_tuple.rb +1 -1
  28. data/lib/thread_safe/util/striped64.rb +1 -1
  29. data/lib/thread_safe/util/volatile.rb +1 -1
  30. data/lib/thread_safe/util/volatile_tuple.rb +1 -1
  31. data/lib/thread_safe/util/xor_shift_random.rb +1 -1
  32. data/lib/thread_safe/version.rb +1 -1
  33. data/test/src/thread_safe/SecurityManager.java +21 -0
  34. data/test/test_array.rb +1 -1
  35. data/test/test_cache.rb +27 -10
  36. data/test/test_cache_loops.rb +377 -376
  37. data/test/test_hash.rb +1 -2
  38. data/test/test_helper.rb +33 -3
  39. data/test/test_synchronized_delegator.rb +67 -17
  40. data/thread_safe.gemspec +6 -3
  41. metadata +38 -11
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 3479f71a9882aaee73a2f38a3b6c5eac7d8cf618
4
- data.tar.gz: 4295c99e7af99e38c834735b144e931c1a3a3f90
3
+ metadata.gz: 6ec4092b3c27528c0451830f13a0f1331b3d2073
4
+ data.tar.gz: 5e8f84e18a076856ce0d666f24c27060b6173124
5
5
  SHA512:
6
- metadata.gz: 5f0fc0f55f7bdafbae4017d8e64b46688b708ad045ac62ba175d960769395ee234984c43483ff27be55bc71be1be05e48e252f4f539ef77bf3d8d60b63c07c0c
7
- data.tar.gz: 36b25bab3a9eabf08f4857bf8acb3cb0fa96ebf22c2411ef061fc2729699b287300d54ffc65ce8a3f01db8d7460f1234e5c3b1afd0afd9859d0071fc7cde8674
6
+ metadata.gz: 8d3761a208f3e25afadcf16182b03a296a119c4bf74b5b9db304237c9486c69f4f0df8bd385b3ee30355daf96405a4a6986e099c3e1c5394c7e236428a0a7520
7
+ data.tar.gz: aa4cc676aaa4c66290a454aecec9c57c23fac7be246bb1a42210792ba4f30dd81a6c4e2b91d3f547d923f0dd42d470714cd9f7ae73564c5123069028e3013891
data/.travis.yml ADDED
@@ -0,0 +1,24 @@
1
+ language: ruby
2
+ rvm:
3
+ - jruby-18mode
4
+ - jruby-19mode
5
+ - rbx-2
6
+ - 1.8.7
7
+ - 1.9.3
8
+ - 2.0.0
9
+ - 2.1.0
10
+ jdk: # for JRuby only
11
+ - openjdk7
12
+ - oraclejdk8
13
+ matrix:
14
+ exclude:
15
+ - rvm: rbx-2
16
+ jdk: oraclejdk8
17
+ - rvm: 1.8.7
18
+ jdk: oraclejdk8
19
+ - rvm: 1.9.3
20
+ jdk: oraclejdk8
21
+ - rvm: 2.0.0
22
+ jdk: oraclejdk8
23
+ - rvm: 2.1.0
24
+ jdk: oraclejdk8
data/README.md CHANGED
@@ -1,5 +1,7 @@
1
1
  # Threadsafe
2
2
 
3
+ [![Build Status](https://travis-ci.org/headius/thread_safe.png)](https://travis-ci.org/headius/thread_safe)
4
+
3
5
  A collection of thread-safe versions of common core Ruby classes.
4
6
 
5
7
  ## Installation
@@ -18,17 +20,29 @@ Or install it yourself as:
18
20
 
19
21
  ## Usage
20
22
 
21
- ```
23
+ ```ruby
22
24
  require 'thread_safe'
23
25
 
24
26
  sa = ThreadSafe::Array.new # supports standard Array.new forms
25
27
  sh = ThreadSafe::Hash.new # supports standard Hash.new forms
26
28
  ```
27
29
 
30
+ `ThreadSafe::Cache` also exists, as a hash-like object, and should have
31
+ much better performance characteristics under concurrency than
32
+ `ThreadSafe::Hash`. However, `ThreadSafe::Cache` is not strictly semantically
33
+ equivalent to ruby Hash -- for instance, it does not neccesarily ordered by
34
+ insertion time as Hash is. For most uses it should do fine though, and we
35
+ recommend you consider `ThreadSafe::Cache` instead of `ThreadSafe::Hash` for your
36
+ concurrency-safe hash needs.
37
+
38
+
28
39
  ## Contributing
29
40
 
30
41
  1. Fork it
31
- 2. Create your feature branch (`git checkout -b my-new-feature`)
32
- 3. Commit your changes (`git commit -am 'Added some feature'`)
33
- 4. Push to the branch (`git push origin my-new-feature`)
34
- 5. Create new Pull Request
42
+ 2. Clone it (`git clone git@github.com:you/thread_safe.git`)
43
+ 3. Create your feature branch (`git checkout -b my-new-feature`)
44
+ 4. Build the jar (`rake jar`) NOTE: Requires JRuby
45
+ 5. Install dependencies (`bundle install`)
46
+ 6. Commit your changes (`git commit -am 'Added some feature'`)
47
+ 7. Push to the branch (`git push origin my-new-feature`)
48
+ 8. Create new Pull Request
data/Rakefile CHANGED
@@ -1,17 +1,18 @@
1
- #!/usr/bin/env rake
2
1
  require "bundler/gem_tasks"
3
- require 'rake/testtask'
2
+ require "rake/testtask"
4
3
 
5
4
  task :default => :test
6
5
 
7
6
  if defined?(JRUBY_VERSION)
8
- require 'ant'
7
+ require "ant"
9
8
 
10
9
  directory "pkg/classes"
10
+ directory 'pkg/tests'
11
11
 
12
12
  desc "Clean up build artifacts"
13
13
  task :clean do
14
14
  rm_rf "pkg/classes"
15
+ rm_rf "pkg/tests"
15
16
  rm_rf "lib/thread_safe/jruby_cache_backend.jar"
16
17
  end
17
18
 
@@ -27,7 +28,15 @@ if defined?(JRUBY_VERSION)
27
28
  ant.jar :basedir => "pkg/classes", :destfile => "lib/thread_safe/jruby_cache_backend.jar", :includes => "**/*.class"
28
29
  end
29
30
 
30
- task :package => :jar
31
+ desc "Build test jar"
32
+ task 'test-jar' => 'pkg/tests' do |t|
33
+ ant.javac :srcdir => 'test/src', :destdir => t.prerequisites.first,
34
+ :source => "1.5", :target => "1.5", :debug => true
35
+
36
+ ant.jar :basedir => 'pkg/tests', :destfile => 'test/package.jar', :includes => '**/*.class'
37
+ end
38
+
39
+ task :package => [ :jar, 'test-jar' ]
31
40
  else
32
41
  # No need to package anything for non-jruby rubies
33
42
  task :package
@@ -37,5 +46,3 @@ Rake::TestTask.new :test => :package do |t|
37
46
  t.libs << "lib"
38
47
  t.test_files = FileList["test/**/*.rb"]
39
48
  end
40
-
41
-
@@ -32,4 +32,4 @@ Benchmark.bmbm do |results|
32
32
  results.report('Cache#each_pair') do
33
33
  (TESTS / ENTRIES).times { cache.each_pair {|k,v| v} }
34
34
  end
35
- end
35
+ end
@@ -3,7 +3,9 @@ package org.jruby.ext.thread_safe;
3
3
  import org.jruby.*;
4
4
  import org.jruby.anno.JRubyClass;
5
5
  import org.jruby.anno.JRubyMethod;
6
+ import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMap;
6
7
  import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMapV8;
8
+ import org.jruby.ext.thread_safe.jsr166e.nounsafe.*;
7
9
  import org.jruby.runtime.Block;
8
10
  import org.jruby.runtime.ObjectAllocator;
9
11
  import org.jruby.runtime.ThreadContext;
@@ -17,7 +19,7 @@ import static org.jruby.runtime.Visibility.PRIVATE;
17
19
 
18
20
  /**
19
21
  * Native Java implementation to avoid the JI overhead.
20
- *
22
+ *
21
23
  * @author thedarkone
22
24
  */
23
25
  public class JRubyCacheBackendLibrary implements Library {
@@ -26,7 +28,7 @@ public class JRubyCacheBackendLibrary implements Library {
26
28
  jrubyRefClass.setAllocator(BACKEND_ALLOCATOR);
27
29
  jrubyRefClass.defineAnnotatedMethods(JRubyCacheBackend.class);
28
30
  }
29
-
31
+
30
32
  private static final ObjectAllocator BACKEND_ALLOCATOR = new ObjectAllocator() {
31
33
  public IRubyObject allocate(Ruby runtime, RubyClass klazz) {
32
34
  return new JRubyCacheBackend(runtime, klazz);
@@ -39,7 +41,44 @@ public class JRubyCacheBackendLibrary implements Library {
39
41
  static final int DEFAULT_INITIAL_CAPACITY = 16;
40
42
  static final float DEFAULT_LOAD_FACTOR = 0.75f;
41
43
 
42
- private ConcurrentHashMapV8<IRubyObject, IRubyObject> map;
44
+ public static final boolean CAN_USE_UNSAFE_CHM = canUseUnsafeCHM();
45
+
46
+ private ConcurrentHashMap<IRubyObject, IRubyObject> map;
47
+
48
+ private static ConcurrentHashMap<IRubyObject, IRubyObject> newCHM(int initialCapacity, float loadFactor) {
49
+ if (CAN_USE_UNSAFE_CHM) {
50
+ return new ConcurrentHashMapV8<IRubyObject, IRubyObject>(initialCapacity, loadFactor);
51
+ } else {
52
+ return new org.jruby.ext.thread_safe.jsr166e.nounsafe.ConcurrentHashMapV8<IRubyObject, IRubyObject>(initialCapacity, loadFactor);
53
+ }
54
+ }
55
+
56
+ private static ConcurrentHashMap<IRubyObject, IRubyObject> newCHM() {
57
+ return newCHM(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
58
+ }
59
+
60
+ private static boolean canUseUnsafeCHM() {
61
+ try {
62
+ new org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMapV8(); // force class load and initialization
63
+ return true;
64
+ } catch (Throwable t) { // ensuring we really do catch everything
65
+ // Doug's Unsafe setup errors always have this "Could not ini.." message
66
+ if (t.getMessage().contains("Could not initialize intrinsics") || isCausedBySecurityException(t)) {
67
+ return false;
68
+ }
69
+ throw (t instanceof RuntimeException ? (RuntimeException) t : new RuntimeException(t));
70
+ }
71
+ }
72
+
73
+ private static boolean isCausedBySecurityException(Throwable t) {
74
+ while (t != null) {
75
+ if (t instanceof SecurityException) {
76
+ return true;
77
+ }
78
+ t = t.getCause();
79
+ }
80
+ return false;
81
+ }
43
82
 
44
83
  public JRubyCacheBackend(Ruby runtime, RubyClass klass) {
45
84
  super(runtime, klass);
@@ -47,7 +86,7 @@ public class JRubyCacheBackendLibrary implements Library {
47
86
 
48
87
  @JRubyMethod
49
88
  public IRubyObject initialize(ThreadContext context) {
50
- map = new ConcurrentHashMapV8<IRubyObject, IRubyObject>();
89
+ map = newCHM();
51
90
  return context.getRuntime().getNil();
52
91
  }
53
92
 
@@ -57,16 +96,16 @@ public class JRubyCacheBackendLibrary implements Library {
57
96
  return context.getRuntime().getNil();
58
97
  }
59
98
 
60
- private ConcurrentHashMapV8<IRubyObject, IRubyObject> toCHM(ThreadContext context, IRubyObject options) {
99
+ private ConcurrentHashMap<IRubyObject, IRubyObject> toCHM(ThreadContext context, IRubyObject options) {
61
100
  Ruby runtime = context.getRuntime();
62
101
  if (!options.isNil() && options.respondsTo("[]")) {
63
102
  IRubyObject rInitialCapacity = options.callMethod(context, "[]", runtime.newSymbol("initial_capacity"));
64
103
  IRubyObject rLoadFactor = options.callMethod(context, "[]", runtime.newSymbol("load_factor"));
65
104
  int initialCapacity = !rInitialCapacity.isNil() ? RubyNumeric.num2int(rInitialCapacity.convertToInteger()) : DEFAULT_INITIAL_CAPACITY;
66
105
  float loadFactor = !rLoadFactor.isNil() ? (float)RubyNumeric.num2dbl(rLoadFactor.convertToFloat()) : DEFAULT_LOAD_FACTOR;
67
- return new ConcurrentHashMapV8<IRubyObject, IRubyObject>(initialCapacity, loadFactor);
106
+ return newCHM(initialCapacity, loadFactor);
68
107
  } else {
69
- return new ConcurrentHashMapV8<IRubyObject, IRubyObject>();
108
+ return newCHM();
70
109
  }
71
110
  }
72
111
 
@@ -90,7 +129,7 @@ public class JRubyCacheBackendLibrary implements Library {
90
129
 
91
130
  @JRubyMethod
92
131
  public IRubyObject compute_if_absent(final ThreadContext context, final IRubyObject key, final Block block) {
93
- return map.computeIfAbsent(key, new ConcurrentHashMapV8.Fun<IRubyObject, IRubyObject>() {
132
+ return map.computeIfAbsent(key, new ConcurrentHashMap.Fun<IRubyObject, IRubyObject>() {
94
133
  @Override
95
134
  public IRubyObject apply(IRubyObject key) {
96
135
  return block.yieldSpecific(context);
@@ -100,10 +139,10 @@ public class JRubyCacheBackendLibrary implements Library {
100
139
 
101
140
  @JRubyMethod
102
141
  public IRubyObject compute_if_present(final ThreadContext context, final IRubyObject key, final Block block) {
103
- IRubyObject result = map.computeIfPresent(key, new ConcurrentHashMapV8.BiFun<IRubyObject, IRubyObject, IRubyObject>() {
142
+ IRubyObject result = map.computeIfPresent(key, new ConcurrentHashMap.BiFun<IRubyObject, IRubyObject, IRubyObject>() {
104
143
  @Override
105
144
  public IRubyObject apply(IRubyObject key, IRubyObject oldValue) {
106
- IRubyObject result = block.yieldSpecific(context, oldValue);
145
+ IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue);
107
146
  return result.isNil() ? null : result;
108
147
  }
109
148
  });
@@ -112,10 +151,10 @@ public class JRubyCacheBackendLibrary implements Library {
112
151
 
113
152
  @JRubyMethod
114
153
  public IRubyObject compute(final ThreadContext context, final IRubyObject key, final Block block) {
115
- IRubyObject result = map.compute(key, new ConcurrentHashMapV8.BiFun<IRubyObject, IRubyObject, IRubyObject>() {
154
+ IRubyObject result = map.compute(key, new ConcurrentHashMap.BiFun<IRubyObject, IRubyObject, IRubyObject>() {
116
155
  @Override
117
156
  public IRubyObject apply(IRubyObject key, IRubyObject oldValue) {
118
- IRubyObject result = block.yieldSpecific(context, oldValue);
157
+ IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue);
119
158
  return result.isNil() ? null : result;
120
159
  }
121
160
  });
@@ -124,10 +163,10 @@ public class JRubyCacheBackendLibrary implements Library {
124
163
 
125
164
  @JRubyMethod
126
165
  public IRubyObject merge_pair(final ThreadContext context, final IRubyObject key, final IRubyObject value, final Block block) {
127
- IRubyObject result = map.merge(key, value, new ConcurrentHashMapV8.BiFun<IRubyObject, IRubyObject, IRubyObject>() {
166
+ IRubyObject result = map.merge(key, value, new ConcurrentHashMap.BiFun<IRubyObject, IRubyObject, IRubyObject>() {
128
167
  @Override
129
168
  public IRubyObject apply(IRubyObject oldValue, IRubyObject newValue) {
130
- IRubyObject result = block.yieldSpecific(context, oldValue);
169
+ IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue);
131
170
  return result.isNil() ? null : result;
132
171
  }
133
172
  });
@@ -193,7 +232,7 @@ public class JRubyCacheBackendLibrary implements Library {
193
232
 
194
233
  @JRubyMethod(visibility = PRIVATE)
195
234
  public JRubyCacheBackend initialize_copy(ThreadContext context, IRubyObject other) {
196
- this.map = new ConcurrentHashMapV8<IRubyObject, IRubyObject>();
235
+ map = newCHM();
197
236
  return this;
198
237
  }
199
238
  }
@@ -0,0 +1,28 @@
1
+ package org.jruby.ext.thread_safe.jsr166e;
2
+
3
+ import java.util.Map;
4
+ import java.util.Set;
5
+
6
+ public interface ConcurrentHashMap<K, V> {
7
+ /** Interface describing a function of one argument */
8
+ public interface Fun<A,T> { T apply(A a); }
9
+ /** Interface describing a function of two arguments */
10
+ public interface BiFun<A,B,T> { T apply(A a, B b); }
11
+
12
+ public V get(K key);
13
+ public V put(K key, V value);
14
+ public V putIfAbsent(K key, V value);
15
+ public V computeIfAbsent(K key, Fun<? super K, ? extends V> mf);
16
+ public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> mf);
17
+ public V compute(K key, BiFun<? super K, ? super V, ? extends V> mf);
18
+ public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> mf);
19
+ public boolean replace(K key, V oldVal, V newVal);
20
+ public V replace(K key, V value);
21
+ public boolean containsKey(K key);
22
+ public boolean remove(Object key, Object value);
23
+ public V remove(K key);
24
+ public void clear();
25
+ public Set<Map.Entry<K,V>> entrySet();
26
+ public int size();
27
+ public V getValueOrDefault(Object key, V defaultValue);
28
+ }
@@ -11,6 +11,7 @@ package org.jruby.ext.thread_safe.jsr166e;
11
11
  import org.jruby.RubyClass;
12
12
  import org.jruby.RubyNumeric;
13
13
  import org.jruby.RubyObject;
14
+ import org.jruby.exceptions.RaiseException;
14
15
  import org.jruby.ext.thread_safe.jsr166y.ThreadLocalRandom;
15
16
  import org.jruby.runtime.ThreadContext;
16
17
  import org.jruby.runtime.builtin.IRubyObject;
@@ -217,7 +218,7 @@ import java.io.Serializable;
217
218
  * @param <V> the type of mapped values
218
219
  */
219
220
  public class ConcurrentHashMapV8<K, V>
220
- implements ConcurrentMap<K, V>, Serializable {
221
+ implements ConcurrentMap<K, V>, Serializable, ConcurrentHashMap<K, V> {
221
222
  private static final long serialVersionUID = 7249069246763182397L;
222
223
 
223
224
  /**
@@ -537,8 +538,11 @@ public class ConcurrentHashMapV8<K, V>
537
538
  * The bin count threshold for using a tree rather than list for a
538
539
  * bin. The value reflects the approximate break-even point for
539
540
  * using tree-based operations.
541
+ * Note that Doug's version defaults to 8, but when dealing with
542
+ * Ruby objects it is actually beneficial to avoid TreeNodes
543
+ * as long as possible as it usually means going into Ruby land.
540
544
  */
541
- private static final int TREE_THRESHOLD = 8;
545
+ private static final int TREE_THRESHOLD = 16;
542
546
 
543
547
  /*
544
548
  * Encodings for special uses of Node hash fields. See above for
@@ -872,9 +876,18 @@ public class ConcurrentHashMapV8<K, V>
872
876
 
873
877
  int rubyCompare(RubyObject l, RubyObject r) {
874
878
  ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext();
875
- IRubyObject result = l.callMethod(context, "<=>", r);
876
- int res = result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger());
877
- return res;
879
+ IRubyObject result;
880
+ try {
881
+ result = l.callMethod(context, "<=>", r);
882
+ } catch (RaiseException e) {
883
+ // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys
884
+ if (context.runtime.getNoMethodError().isInstance(e.getException())) {
885
+ return 0;
886
+ }
887
+ throw e;
888
+ }
889
+
890
+ return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger());
878
891
  }
879
892
 
880
893
  /**
@@ -3437,10 +3450,6 @@ public class ConcurrentHashMapV8<K, V>
3437
3450
  /** Interface describing a void action of two arguments */
3438
3451
  public interface BiAction<A,B> { void apply(A a, B b); }
3439
3452
  /** Interface describing a function of one argument */
3440
- public interface Fun<A,T> { T apply(A a); }
3441
- /** Interface describing a function of two arguments */
3442
- public interface BiFun<A,B,T> { T apply(A a, B b); }
3443
- /** Interface describing a function of no arguments */
3444
3453
  public interface Generator<T> { T apply(); }
3445
3454
  /** Interface describing a function mapping its argument to a double */
3446
3455
  public interface ObjectToDouble<A> { double apply(A a); }
@@ -3839,4 +3848,4 @@ public class ConcurrentHashMapV8<K, V>
3839
3848
  }
3840
3849
  }
3841
3850
  }
3842
- }
3851
+ }
@@ -11,7 +11,6 @@ import java.util.concurrent.atomic.AtomicLong;
11
11
  import java.io.IOException;
12
12
  import java.io.Serializable;
13
13
  import java.io.ObjectInputStream;
14
- import java.io.ObjectOutputStream;
15
14
 
16
15
  /**
17
16
  * One or more variables that together maintain an initially zero
@@ -201,4 +200,4 @@ public class LongAdder extends Striped64 implements Serializable {
201
200
  base = s.readLong();
202
201
  }
203
202
 
204
- }
203
+ }
@@ -339,4 +339,4 @@ abstract class Striped64 extends Number {
339
339
  }
340
340
  }
341
341
 
342
- }
342
+ }
@@ -0,0 +1,3788 @@
1
+ /*
2
+ * Written by Doug Lea with assistance from members of JCP JSR-166
3
+ * Expert Group and released to the public domain, as explained at
4
+ * http://creativecommons.org/publicdomain/zero/1.0/
5
+ */
6
+
7
+ // This is based on the 1.79 version.
8
+
9
+ package org.jruby.ext.thread_safe.jsr166e.nounsafe;
10
+
11
+ import org.jruby.RubyClass;
12
+ import org.jruby.RubyNumeric;
13
+ import org.jruby.RubyObject;
14
+ import org.jruby.exceptions.RaiseException;
15
+ import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMap;
16
+ import org.jruby.ext.thread_safe.jsr166y.ThreadLocalRandom;
17
+ import org.jruby.runtime.ThreadContext;
18
+ import org.jruby.runtime.builtin.IRubyObject;
19
+
20
+ import java.util.Arrays;
21
+ import java.util.Map;
22
+ import java.util.Set;
23
+ import java.util.Collection;
24
+ import java.util.Hashtable;
25
+ import java.util.HashMap;
26
+ import java.util.Iterator;
27
+ import java.util.Enumeration;
28
+ import java.util.ConcurrentModificationException;
29
+ import java.util.NoSuchElementException;
30
+ import java.util.concurrent.ConcurrentMap;
31
+ import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
32
+ import java.util.concurrent.atomic.AtomicReferenceArray;
33
+ import java.util.concurrent.locks.AbstractQueuedSynchronizer;
34
+
35
+ import java.io.Serializable;
36
+
37
+ /**
38
+ * A hash table supporting full concurrency of retrievals and
39
+ * high expected concurrency for updates. This class obeys the
40
+ * same functional specification as {@link java.util.Hashtable}, and
41
+ * includes versions of methods corresponding to each method of
42
+ * {@code Hashtable}. However, even though all operations are
43
+ * thread-safe, retrieval operations do <em>not</em> entail locking,
44
+ * and there is <em>not</em> any support for locking the entire table
45
+ * in a way that prevents all access. This class is fully
46
+ * interoperable with {@code Hashtable} in programs that rely on its
47
+ * thread safety but not on its synchronization details.
48
+ *
49
+ * <p>Retrieval operations (including {@code get}) generally do not
50
+ * block, so may overlap with update operations (including {@code put}
51
+ * and {@code remove}). Retrievals reflect the results of the most
52
+ * recently <em>completed</em> update operations holding upon their
53
+ * onset. (More formally, an update operation for a given key bears a
54
+ * <em>happens-before</em> relation with any (non-null) retrieval for
55
+ * that key reporting the updated value.) For aggregate operations
56
+ * such as {@code putAll} and {@code clear}, concurrent retrievals may
57
+ * reflect insertion or removal of only some entries. Similarly,
58
+ * Iterators and Enumerations return elements reflecting the state of
59
+ * the hash table at some point at or since the creation of the
60
+ * iterator/enumeration. They do <em>not</em> throw {@link
61
+ * ConcurrentModificationException}. However, iterators are designed
62
+ * to be used by only one thread at a time. Bear in mind that the
63
+ * results of aggregate status methods including {@code size}, {@code
64
+ * isEmpty}, and {@code containsValue} are typically useful only when
65
+ * a map is not undergoing concurrent updates in other threads.
66
+ * Otherwise the results of these methods reflect transient states
67
+ * that may be adequate for monitoring or estimation purposes, but not
68
+ * for program control.
69
+ *
70
+ * <p>The table is dynamically expanded when there are too many
71
+ * collisions (i.e., keys that have distinct hash codes but fall into
72
+ * the same slot modulo the table size), with the expected average
73
+ * effect of maintaining roughly two bins per mapping (corresponding
74
+ * to a 0.75 load factor threshold for resizing). There may be much
75
+ * variance around this average as mappings are added and removed, but
76
+ * overall, this maintains a commonly accepted time/space tradeoff for
77
+ * hash tables. However, resizing this or any other kind of hash
78
+ * table may be a relatively slow operation. When possible, it is a
79
+ * good idea to provide a size estimate as an optional {@code
80
+ * initialCapacity} constructor argument. An additional optional
81
+ * {@code loadFactor} constructor argument provides a further means of
82
+ * customizing initial table capacity by specifying the table density
83
+ * to be used in calculating the amount of space to allocate for the
84
+ * given number of elements. Also, for compatibility with previous
85
+ * versions of this class, constructors may optionally specify an
86
+ * expected {@code concurrencyLevel} as an additional hint for
87
+ * internal sizing. Note that using many keys with exactly the same
88
+ * {@code hashCode()} is a sure way to slow down performance of any
89
+ * hash table.
90
+ *
91
+ * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
92
+ * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
93
+ * (using {@link #keySet(Object)} when only keys are of interest, and the
94
+ * mapped values are (perhaps transiently) not used or all take the
95
+ * same mapping value.
96
+ *
97
+ * <p>A ConcurrentHashMapV8 can be used as scalable frequency map (a
98
+ * form of histogram or multiset) by using {@link LongAdder} values
99
+ * and initializing via {@link #computeIfAbsent}. For example, to add
100
+ * a count to a {@code ConcurrentHashMapV8<String,LongAdder> freqs}, you
101
+ * can use {@code freqs.computeIfAbsent(k -> new
102
+ * LongAdder()).increment();}
103
+ *
104
+ * <p>This class and its views and iterators implement all of the
105
+ * <em>optional</em> methods of the {@link Map} and {@link Iterator}
106
+ * interfaces.
107
+ *
108
+ * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
109
+ * does <em>not</em> allow {@code null} to be used as a key or value.
110
+ *
111
+ * <p>ConcurrentHashMapV8s support parallel operations using the {@link
112
+ * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts
113
+ * are available in class {@link ForkJoinTasks}). These operations are
114
+ * designed to be safely, and often sensibly, applied even with maps
115
+ * that are being concurrently updated by other threads; for example,
116
+ * when computing a snapshot summary of the values in a shared
117
+ * registry. There are three kinds of operation, each with four
118
+ * forms, accepting functions with Keys, Values, Entries, and (Key,
119
+ * Value) arguments and/or return values. (The first three forms are
120
+ * also available via the {@link #keySet()}, {@link #values()} and
121
+ * {@link #entrySet()} views). Because the elements of a
122
+ * ConcurrentHashMapV8 are not ordered in any particular way, and may be
123
+ * processed in different orders in different parallel executions, the
124
+ * correctness of supplied functions should not depend on any
125
+ * ordering, or on any other objects or values that may transiently
126
+ * change while computation is in progress; and except for forEach
127
+ * actions, should ideally be side-effect-free.
128
+ *
129
+ * <ul>
130
+ * <li> forEach: Perform a given action on each element.
131
+ * A variant form applies a given transformation on each element
132
+ * before performing the action.</li>
133
+ *
134
+ * <li> search: Return the first available non-null result of
135
+ * applying a given function on each element; skipping further
136
+ * search when a result is found.</li>
137
+ *
138
+ * <li> reduce: Accumulate each element. The supplied reduction
139
+ * function cannot rely on ordering (more formally, it should be
140
+ * both associative and commutative). There are five variants:
141
+ *
142
+ * <ul>
143
+ *
144
+ * <li> Plain reductions. (There is not a form of this method for
145
+ * (key, value) function arguments since there is no corresponding
146
+ * return type.)</li>
147
+ *
148
+ * <li> Mapped reductions that accumulate the results of a given
149
+ * function applied to each element.</li>
150
+ *
151
+ * <li> Reductions to scalar doubles, longs, and ints, using a
152
+ * given basis value.</li>
153
+ *
154
+ * </li>
155
+ * </ul>
156
+ * </ul>
157
+ *
158
+ * <p>The concurrency properties of bulk operations follow
159
+ * from those of ConcurrentHashMapV8: Any non-null result returned
160
+ * from {@code get(key)} and related access methods bears a
161
+ * happens-before relation with the associated insertion or
162
+ * update. The result of any bulk operation reflects the
163
+ * composition of these per-element relations (but is not
164
+ * necessarily atomic with respect to the map as a whole unless it
165
+ * is somehow known to be quiescent). Conversely, because keys
166
+ * and values in the map are never null, null serves as a reliable
167
+ * atomic indicator of the current lack of any result. To
168
+ * maintain this property, null serves as an implicit basis for
169
+ * all non-scalar reduction operations. For the double, long, and
170
+ * int versions, the basis should be one that, when combined with
171
+ * any other value, returns that other value (more formally, it
172
+ * should be the identity element for the reduction). Most common
173
+ * reductions have these properties; for example, computing a sum
174
+ * with basis 0 or a minimum with basis MAX_VALUE.
175
+ *
176
+ * <p>Search and transformation functions provided as arguments
177
+ * should similarly return null to indicate the lack of any result
178
+ * (in which case it is not used). In the case of mapped
179
+ * reductions, this also enables transformations to serve as
180
+ * filters, returning null (or, in the case of primitive
181
+ * specializations, the identity basis) if the element should not
182
+ * be combined. You can create compound transformations and
183
+ * filterings by composing them yourself under this "null means
184
+ * there is nothing there now" rule before using them in search or
185
+ * reduce operations.
186
+ *
187
+ * <p>Methods accepting and/or returning Entry arguments maintain
188
+ * key-value associations. They may be useful for example when
189
+ * finding the key for the greatest value. Note that "plain" Entry
190
+ * arguments can be supplied using {@code new
191
+ * AbstractMap.SimpleEntry(k,v)}.
192
+ *
193
+ * <p>Bulk operations may complete abruptly, throwing an
194
+ * exception encountered in the application of a supplied
195
+ * function. Bear in mind when handling such exceptions that other
196
+ * concurrently executing functions could also have thrown
197
+ * exceptions, or would have done so if the first exception had
198
+ * not occurred.
199
+ *
200
+ * <p>Parallel speedups for bulk operations compared to sequential
201
+ * processing are common but not guaranteed. Operations involving
202
+ * brief functions on small maps may execute more slowly than
203
+ * sequential loops if the underlying work to parallelize the
204
+ * computation is more expensive than the computation itself.
205
+ * Similarly, parallelization may not lead to much actual parallelism
206
+ * if all processors are busy performing unrelated tasks.
207
+ *
208
+ * <p>All arguments to all task methods must be non-null.
209
+ *
210
+ * <p><em>jsr166e note: During transition, this class
211
+ * uses nested functional interfaces with different names but the
212
+ * same forms as those expected for JDK8.</em>
213
+ *
214
+ * <p>This class is a member of the
215
+ * <a href="{@docRoot}/../technotes/guides/collections/index.html">
216
+ * Java Collections Framework</a>.
217
+ *
218
+ * @since 1.5
219
+ * @author Doug Lea
220
+ * @param <K> the type of keys maintained by this map
221
+ * @param <V> the type of mapped values
222
+ */
223
+ public class ConcurrentHashMapV8<K, V>
224
+ implements ConcurrentMap<K, V>, Serializable, ConcurrentHashMap<K, V> {
225
+ private static final long serialVersionUID = 7249069246763182397L;
226
+
227
+ /**
228
+ * A partitionable iterator. A Spliterator can be traversed
229
+ * directly, but can also be partitioned (before traversal) by
230
+ * creating another Spliterator that covers a non-overlapping
231
+ * portion of the elements, and so may be amenable to parallel
232
+ * execution.
233
+ *
234
+ * <p>This interface exports a subset of expected JDK8
235
+ * functionality.
236
+ *
237
+ * <p>Sample usage: Here is one (of the several) ways to compute
238
+ * the sum of the values held in a map using the ForkJoin
239
+ * framework. As illustrated here, Spliterators are well suited to
240
+ * designs in which a task repeatedly splits off half its work
241
+ * into forked subtasks until small enough to process directly,
242
+ * and then joins these subtasks. Variants of this style can also
243
+ * be used in completion-based designs.
244
+ *
245
+ * <pre>
246
+ * {@code ConcurrentHashMapV8<String, Long> m = ...
247
+ * // split as if have 8 * parallelism, for load balance
248
+ * int n = m.size();
249
+ * int p = aForkJoinPool.getParallelism() * 8;
250
+ * int split = (n < p)? n : p;
251
+ * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
252
+ * // ...
253
+ * static class SumValues extends RecursiveTask<Long> {
254
+ * final Spliterator<Long> s;
255
+ * final int split; // split while > 1
256
+ * final SumValues nextJoin; // records forked subtasks to join
257
+ * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
258
+ * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
259
+ * }
260
+ * public Long compute() {
261
+ * long sum = 0;
262
+ * SumValues subtasks = null; // fork subtasks
263
+ * for (int s = split >>> 1; s > 0; s >>>= 1)
264
+ * (subtasks = new SumValues(s.split(), s, subtasks)).fork();
265
+ * while (s.hasNext()) // directly process remaining elements
266
+ * sum += s.next();
267
+ * for (SumValues t = subtasks; t != null; t = t.nextJoin)
268
+ * sum += t.join(); // collect subtask results
269
+ * return sum;
270
+ * }
271
+ * }
272
+ * }</pre>
273
+ */
274
+ public static interface Spliterator<T> extends Iterator<T> {
275
+ /**
276
+ * Returns a Spliterator covering approximately half of the
277
+ * elements, guaranteed not to overlap with those subsequently
278
+ * returned by this Spliterator. After invoking this method,
279
+ * the current Spliterator will <em>not</em> produce any of
280
+ * the elements of the returned Spliterator, but the two
281
+ * Spliterators together will produce all of the elements that
282
+ * would have been produced by this Spliterator had this
283
+ * method not been called. The exact number of elements
284
+ * produced by the returned Spliterator is not guaranteed, and
285
+ * may be zero (i.e., with {@code hasNext()} reporting {@code
286
+ * false}) if this Spliterator cannot be further split.
287
+ *
288
+ * @return a Spliterator covering approximately half of the
289
+ * elements
290
+ * @throws IllegalStateException if this Spliterator has
291
+ * already commenced traversing elements
292
+ */
293
+ Spliterator<T> split();
294
+ }
295
+
296
+
297
+ /*
298
+ * Overview:
299
+ *
300
+ * The primary design goal of this hash table is to maintain
301
+ * concurrent readability (typically method get(), but also
302
+ * iterators and related methods) while minimizing update
303
+ * contention. Secondary goals are to keep space consumption about
304
+ * the same or better than java.util.HashMap, and to support high
305
+ * initial insertion rates on an empty table by many threads.
306
+ *
307
+ * Each key-value mapping is held in a Node. Because Node fields
308
+ * can contain special values, they are defined using plain Object
309
+ * types. Similarly in turn, all internal methods that use them
310
+ * work off Object types. And similarly, so do the internal
311
+ * methods of auxiliary iterator and view classes. All public
312
+ * generic typed methods relay in/out of these internal methods,
313
+ * supplying null-checks and casts as needed. This also allows
314
+ * many of the public methods to be factored into a smaller number
315
+ * of internal methods (although sadly not so for the five
316
+ * variants of put-related operations). The validation-based
317
+ * approach explained below leads to a lot of code sprawl because
318
+ * retry-control precludes factoring into smaller methods.
319
+ *
320
+ * The table is lazily initialized to a power-of-two size upon the
321
+ * first insertion. Each bin in the table normally contains a
322
+ * list of Nodes (most often, the list has only zero or one Node).
323
+ * Table accesses require volatile/atomic reads, writes, and
324
+ * CASes. Because there is no other way to arrange this without
325
+ * adding further indirections, we use intrinsics
326
+ * (sun.misc.Unsafe) operations. The lists of nodes within bins
327
+ * are always accurately traversable under volatile reads, so long
328
+ * as lookups check hash code and non-nullness of value before
329
+ * checking key equality.
330
+ *
331
+ * We use the top two bits of Node hash fields for control
332
+ * purposes -- they are available anyway because of addressing
333
+ * constraints. As explained further below, these top bits are
334
+ * used as follows:
335
+ * 00 - Normal
336
+ * 01 - Locked
337
+ * 11 - Locked and may have a thread waiting for lock
338
+ * 10 - Node is a forwarding node
339
+ *
340
+ * The lower 30 bits of each Node's hash field contain a
341
+ * transformation of the key's hash code, except for forwarding
342
+ * nodes, for which the lower bits are zero (and so always have
343
+ * hash field == MOVED).
344
+ *
345
+ * Insertion (via put or its variants) of the first node in an
346
+ * empty bin is performed by just CASing it to the bin. This is
347
+ * by far the most common case for put operations under most
348
+ * key/hash distributions. Other update operations (insert,
349
+ * delete, and replace) require locks. We do not want to waste
350
+ * the space required to associate a distinct lock object with
351
+ * each bin, so instead use the first node of a bin list itself as
352
+ * a lock. Blocking support for these locks relies on the builtin
353
+ * "synchronized" monitors. However, we also need a tryLock
354
+ * construction, so we overlay these by using bits of the Node
355
+ * hash field for lock control (see above), and so normally use
356
+ * builtin monitors only for blocking and signalling using
357
+ * wait/notifyAll constructions. See Node.tryAwaitLock.
358
+ *
359
+ * Using the first node of a list as a lock does not by itself
360
+ * suffice though: When a node is locked, any update must first
361
+ * validate that it is still the first node after locking it, and
362
+ * retry if not. Because new nodes are always appended to lists,
363
+ * once a node is first in a bin, it remains first until deleted
364
+ * or the bin becomes invalidated (upon resizing). However,
365
+ * operations that only conditionally update may inspect nodes
366
+ * until the point of update. This is a converse of sorts to the
367
+ * lazy locking technique described by Herlihy & Shavit.
368
+ *
369
+ * The main disadvantage of per-bin locks is that other update
370
+ * operations on other nodes in a bin list protected by the same
371
+ * lock can stall, for example when user equals() or mapping
372
+ * functions take a long time. However, statistically, under
373
+ * random hash codes, this is not a common problem. Ideally, the
374
+ * frequency of nodes in bins follows a Poisson distribution
375
+ * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
376
+ * parameter of about 0.5 on average, given the resizing threshold
377
+ * of 0.75, although with a large variance because of resizing
378
+ * granularity. Ignoring variance, the expected occurrences of
379
+ * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
380
+ * first values are:
381
+ *
382
+ * 0: 0.60653066
383
+ * 1: 0.30326533
384
+ * 2: 0.07581633
385
+ * 3: 0.01263606
386
+ * 4: 0.00157952
387
+ * 5: 0.00015795
388
+ * 6: 0.00001316
389
+ * 7: 0.00000094
390
+ * 8: 0.00000006
391
+ * more: less than 1 in ten million
392
+ *
393
+ * Lock contention probability for two threads accessing distinct
394
+ * elements is roughly 1 / (8 * #elements) under random hashes.
395
+ *
396
+ * Actual hash code distributions encountered in practice
397
+ * sometimes deviate significantly from uniform randomness. This
398
+ * includes the case when N > (1<<30), so some keys MUST collide.
399
+ * Similarly for dumb or hostile usages in which multiple keys are
400
+ * designed to have identical hash codes. Also, although we guard
401
+ * against the worst effects of this (see method spread), sets of
402
+ * hashes may differ only in bits that do not impact their bin
403
+ * index for a given power-of-two mask. So we use a secondary
404
+ * strategy that applies when the number of nodes in a bin exceeds
405
+ * a threshold, and at least one of the keys implements
406
+ * Comparable. These TreeBins use a balanced tree to hold nodes
407
+ * (a specialized form of red-black trees), bounding search time
408
+ * to O(log N). Each search step in a TreeBin is around twice as
409
+ * slow as in a regular list, but given that N cannot exceed
410
+ * (1<<64) (before running out of addresses) this bounds search
411
+ * steps, lock hold times, etc, to reasonable constants (roughly
412
+ * 100 nodes inspected per operation worst case) so long as keys
413
+ * are Comparable (which is very common -- String, Long, etc).
414
+ * TreeBin nodes (TreeNodes) also maintain the same "next"
415
+ * traversal pointers as regular nodes, so can be traversed in
416
+ * iterators in the same way.
417
+ *
418
+ * The table is resized when occupancy exceeds a percentage
419
+ * threshold (nominally, 0.75, but see below). Only a single
420
+ * thread performs the resize (using field "sizeCtl", to arrange
421
+ * exclusion), but the table otherwise remains usable for reads
422
+ * and updates. Resizing proceeds by transferring bins, one by
423
+ * one, from the table to the next table. Because we are using
424
+ * power-of-two expansion, the elements from each bin must either
425
+ * stay at same index, or move with a power of two offset. We
426
+ * eliminate unnecessary node creation by catching cases where old
427
+ * nodes can be reused because their next fields won't change. On
428
+ * average, only about one-sixth of them need cloning when a table
429
+ * doubles. The nodes they replace will be garbage collectable as
430
+ * soon as they are no longer referenced by any reader thread that
431
+ * may be in the midst of concurrently traversing table. Upon
432
+ * transfer, the old table bin contains only a special forwarding
433
+ * node (with hash field "MOVED") that contains the next table as
434
+ * its key. On encountering a forwarding node, access and update
435
+ * operations restart, using the new table.
436
+ *
437
+ * Each bin transfer requires its bin lock. However, unlike other
438
+ * cases, a transfer can skip a bin if it fails to acquire its
439
+ * lock, and revisit it later (unless it is a TreeBin). Method
440
+ * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
441
+ * have been skipped because of failure to acquire a lock, and
442
+ * blocks only if none are available (i.e., only very rarely).
443
+ * The transfer operation must also ensure that all accessible
444
+ * bins in both the old and new table are usable by any traversal.
445
+ * When there are no lock acquisition failures, this is arranged
446
+ * simply by proceeding from the last bin (table.length - 1) up
447
+ * towards the first. Upon seeing a forwarding node, traversals
448
+ * (see class Iter) arrange to move to the new table
449
+ * without revisiting nodes. However, when any node is skipped
450
+ * during a transfer, all earlier table bins may have become
451
+ * visible, so are initialized with a reverse-forwarding node back
452
+ * to the old table until the new ones are established. (This
453
+ * sometimes requires transiently locking a forwarding node, which
454
+ * is possible under the above encoding.) These more expensive
455
+ * mechanics trigger only when necessary.
456
+ *
457
+ * The traversal scheme also applies to partial traversals of
458
+ * ranges of bins (via an alternate Traverser constructor)
459
+ * to support partitioned aggregate operations. Also, read-only
460
+ * operations give up if ever forwarded to a null table, which
461
+ * provides support for shutdown-style clearing, which is also not
462
+ * currently implemented.
463
+ *
464
+ * Lazy table initialization minimizes footprint until first use,
465
+ * and also avoids resizings when the first operation is from a
466
+ * putAll, constructor with map argument, or deserialization.
467
+ * These cases attempt to override the initial capacity settings,
468
+ * but harmlessly fail to take effect in cases of races.
469
+ *
470
+ * The element count is maintained using a LongAdder, which avoids
471
+ * contention on updates but can encounter cache thrashing if read
472
+ * too frequently during concurrent access. To avoid reading so
473
+ * often, resizing is attempted either when a bin lock is
474
+ * contended, or upon adding to a bin already holding two or more
475
+ * nodes (checked before adding in the xIfAbsent methods, after
476
+ * adding in others). Under uniform hash distributions, the
477
+ * probability of this occurring at threshold is around 13%,
478
+ * meaning that only about 1 in 8 puts check threshold (and after
479
+ * resizing, many fewer do so). But this approximation has high
480
+ * variance for small table sizes, so we check on any collision
481
+ * for sizes <= 64. The bulk putAll operation further reduces
482
+ * contention by only committing count updates upon these size
483
+ * checks.
484
+ *
485
+ * Maintaining API and serialization compatibility with previous
486
+ * versions of this class introduces several oddities. Mainly: We
487
+ * leave untouched but unused constructor arguments refering to
488
+ * concurrencyLevel. We accept a loadFactor constructor argument,
489
+ * but apply it only to initial table capacity (which is the only
490
+ * time that we can guarantee to honor it.) We also declare an
491
+ * unused "Segment" class that is instantiated in minimal form
492
+ * only when serializing.
493
+ */
494
+
495
+ /* ---------------- Constants -------------- */
496
+
497
+ /**
498
+ * The largest possible table capacity. This value must be
499
+ * exactly 1<<30 to stay within Java array allocation and indexing
500
+ * bounds for power of two table sizes, and is further required
501
+ * because the top two bits of 32bit hash fields are used for
502
+ * control purposes.
503
+ */
504
+ private static final int MAXIMUM_CAPACITY = 1 << 30;
505
+
506
+ /**
507
+ * The default initial table capacity. Must be a power of 2
508
+ * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
509
+ */
510
+ private static final int DEFAULT_CAPACITY = 16;
511
+
512
+ /**
513
+ * The largest possible (non-power of two) array size.
514
+ * Needed by toArray and related methods.
515
+ */
516
+ static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
517
+
518
+ /**
519
+ * The default concurrency level for this table. Unused but
520
+ * defined for compatibility with previous versions of this class.
521
+ */
522
+ private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
523
+
524
+ /**
525
+ * The load factor for this table. Overrides of this value in
526
+ * constructors affect only the initial table capacity. The
527
+ * actual floating point value isn't normally used -- it is
528
+ * simpler to use expressions such as {@code n - (n >>> 2)} for
529
+ * the associated resizing threshold.
530
+ */
531
+ private static final float LOAD_FACTOR = 0.75f;
532
+
533
+ /**
534
+ * The buffer size for skipped bins during transfers. The
535
+ * value is arbitrary but should be large enough to avoid
536
+ * most locking stalls during resizes.
537
+ */
538
+ private static final int TRANSFER_BUFFER_SIZE = 32;
539
+
540
+ /**
541
+ * The bin count threshold for using a tree rather than list for a
542
+ * bin. The value reflects the approximate break-even point for
543
+ * using tree-based operations.
544
+ * Note that Doug's version defaults to 8, but when dealing with
545
+ * Ruby objects it is actually beneficial to avoid TreeNodes
546
+ * as long as possible as it usually means going into Ruby land.
547
+ */
548
+ private static final int TREE_THRESHOLD = 16;
549
+
550
+ /*
551
+ * Encodings for special uses of Node hash fields. See above for
552
+ * explanation.
553
+ */
554
+ static final int MOVED = 0x80000000; // hash field for forwarding nodes
555
+ static final int LOCKED = 0x40000000; // set/tested only as a bit
556
+ static final int WAITING = 0xc0000000; // both bits set/tested together
557
+ static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
558
+
559
+ /* ---------------- Fields -------------- */
560
+
561
+ /**
562
+ * The array of bins. Lazily initialized upon first insertion.
563
+ * Size is always a power of two. Accessed directly by iterators.
564
+ */
565
+ transient volatile AtomicReferenceArray<Node> table;
566
+
567
+ /**
568
+ * The counter maintaining number of elements.
569
+ */
570
+ private transient LongAdder counter;
571
+
572
+ /**
573
+ * Table initialization and resizing control. When negative, the
574
+ * table is being initialized or resized. Otherwise, when table is
575
+ * null, holds the initial table size to use upon creation, or 0
576
+ * for default. After initialization, holds the next element count
577
+ * value upon which to resize the table.
578
+ */
579
+ private transient volatile int sizeCtl;
580
+
581
+ // views
582
+ private transient KeySetView<K,V> keySet;
583
+ private transient ValuesView<K,V> values;
584
+ private transient EntrySetView<K,V> entrySet;
585
+
586
+ /** For serialization compatibility. Null unless serialized; see below */
587
+ private Segment<K,V>[] segments;
588
+
589
+ static AtomicIntegerFieldUpdater SIZE_CTRL_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ConcurrentHashMapV8.class, "sizeCtl");
590
+
591
+ /* ---------------- Table element access -------------- */
592
+
593
+ /*
594
+ * Volatile access methods are used for table elements as well as
595
+ * elements of in-progress next table while resizing. Uses are
596
+ * null checked by callers, and implicitly bounds-checked, relying
597
+ * on the invariants that tab arrays have non-zero size, and all
598
+ * indices are masked with (tab.length - 1) which is never
599
+ * negative and always less than length. Note that, to be correct
600
+ * wrt arbitrary concurrency errors by users, bounds checks must
601
+ * operate on local variables, which accounts for some odd-looking
602
+ * inline assignments below.
603
+ */
604
+
605
+ static final Node tabAt(AtomicReferenceArray<Node> tab, int i) { // used by Iter
606
+ return tab.get(i);
607
+ }
608
+
609
+ private static final boolean casTabAt(AtomicReferenceArray<Node> tab, int i, Node c, Node v) {
610
+ return tab.compareAndSet(i, c, v);
611
+ }
612
+
613
+ private static final void setTabAt(AtomicReferenceArray<Node> tab, int i, Node v) {
614
+ tab.set(i, v);
615
+ }
616
+
617
+ /* ---------------- Nodes -------------- */
618
+
619
+ /**
620
+ * Key-value entry. Note that this is never exported out as a
621
+ * user-visible Map.Entry (see MapEntry below). Nodes with a hash
622
+ * field of MOVED are special, and do not contain user keys or
623
+ * values. Otherwise, keys are never null, and null val fields
624
+ * indicate that a node is in the process of being deleted or
625
+ * created. For purposes of read-only access, a key may be read
626
+ * before a val, but can only be used after checking val to be
627
+ * non-null.
628
+ */
629
+ static class Node {
630
+ volatile int hash;
631
+ final Object key;
632
+ volatile Object val;
633
+ volatile Node next;
634
+
635
+ static AtomicIntegerFieldUpdater HASH_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Node.class, "hash");
636
+
637
+ Node(int hash, Object key, Object val, Node next) {
638
+ this.hash = hash;
639
+ this.key = key;
640
+ this.val = val;
641
+ this.next = next;
642
+ }
643
+
644
+ /** CompareAndSet the hash field */
645
+ final boolean casHash(int cmp, int val) {
646
+ return HASH_UPDATER.compareAndSet(this, cmp, val);
647
+ }
648
+
649
+ /** The number of spins before blocking for a lock */
650
+ static final int MAX_SPINS =
651
+ Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
652
+
653
+ /**
654
+ * Spins a while if LOCKED bit set and this node is the first
655
+ * of its bin, and then sets WAITING bits on hash field and
656
+ * blocks (once) if they are still set. It is OK for this
657
+ * method to return even if lock is not available upon exit,
658
+ * which enables these simple single-wait mechanics.
659
+ *
660
+ * The corresponding signalling operation is performed within
661
+ * callers: Upon detecting that WAITING has been set when
662
+ * unlocking lock (via a failed CAS from non-waiting LOCKED
663
+ * state), unlockers acquire the sync lock and perform a
664
+ * notifyAll.
665
+ *
666
+ * The initial sanity check on tab and bounds is not currently
667
+ * necessary in the only usages of this method, but enables
668
+ * use in other future contexts.
669
+ */
670
+ final void tryAwaitLock(AtomicReferenceArray<Node> tab, int i) {
671
+ if (tab != null && i >= 0 && i < tab.length()) { // sanity check
672
+ int r = ThreadLocalRandom.current().nextInt(); // randomize spins
673
+ int spins = MAX_SPINS, h;
674
+ while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
675
+ if (spins >= 0) {
676
+ r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
677
+ if (r >= 0 && --spins == 0)
678
+ Thread.yield(); // yield before block
679
+ }
680
+ else if (casHash(h, h | WAITING)) {
681
+ synchronized (this) {
682
+ if (tabAt(tab, i) == this &&
683
+ (hash & WAITING) == WAITING) {
684
+ try {
685
+ wait();
686
+ } catch (InterruptedException ie) {
687
+ Thread.currentThread().interrupt();
688
+ }
689
+ }
690
+ else
691
+ notifyAll(); // possibly won race vs signaller
692
+ }
693
+ break;
694
+ }
695
+ }
696
+ }
697
+ }
698
+ }
699
+
700
+ /* ---------------- TreeBins -------------- */
701
+
702
+ /**
703
+ * Nodes for use in TreeBins
704
+ */
705
+ static final class TreeNode extends Node {
706
+ TreeNode parent; // red-black tree links
707
+ TreeNode left;
708
+ TreeNode right;
709
+ TreeNode prev; // needed to unlink next upon deletion
710
+ boolean red;
711
+
712
+ TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
713
+ super(hash, key, val, next);
714
+ this.parent = parent;
715
+ }
716
+ }
717
+
718
+ /**
719
+ * A specialized form of red-black tree for use in bins
720
+ * whose size exceeds a threshold.
721
+ *
722
+ * TreeBins use a special form of comparison for search and
723
+ * related operations (which is the main reason we cannot use
724
+ * existing collections such as TreeMaps). TreeBins contain
725
+ * Comparable elements, but may contain others, as well as
726
+ * elements that are Comparable but not necessarily Comparable<T>
727
+ * for the same T, so we cannot invoke compareTo among them. To
728
+ * handle this, the tree is ordered primarily by hash value, then
729
+ * by getClass().getName() order, and then by Comparator order
730
+ * among elements of the same class. On lookup at a node, if
731
+ * elements are not comparable or compare as 0, both left and
732
+ * right children may need to be searched in the case of tied hash
733
+ * values. (This corresponds to the full list search that would be
734
+ * necessary if all elements were non-Comparable and had tied
735
+ * hashes.) The red-black balancing code is updated from
736
+ * pre-jdk-collections
737
+ * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
738
+ * based in turn on Cormen, Leiserson, and Rivest "Introduction to
739
+ * Algorithms" (CLR).
740
+ *
741
+ * TreeBins also maintain a separate locking discipline than
742
+ * regular bins. Because they are forwarded via special MOVED
743
+ * nodes at bin heads (which can never change once established),
744
+ * we cannot use those nodes as locks. Instead, TreeBin
745
+ * extends AbstractQueuedSynchronizer to support a simple form of
746
+ * read-write lock. For update operations and table validation,
747
+ * the exclusive form of lock behaves in the same way as bin-head
748
+ * locks. However, lookups use shared read-lock mechanics to allow
749
+ * multiple readers in the absence of writers. Additionally,
750
+ * these lookups do not ever block: While the lock is not
751
+ * available, they proceed along the slow traversal path (via
752
+ * next-pointers) until the lock becomes available or the list is
753
+ * exhausted, whichever comes first. (These cases are not fast,
754
+ * but maximize aggregate expected throughput.) The AQS mechanics
755
+ * for doing this are straightforward. The lock state is held as
756
+ * AQS getState(). Read counts are negative; the write count (1)
757
+ * is positive. There are no signalling preferences among readers
758
+ * and writers. Since we don't need to export full Lock API, we
759
+ * just override the minimal AQS methods and use them directly.
760
+ */
761
+ static final class TreeBin extends AbstractQueuedSynchronizer {
762
+ private static final long serialVersionUID = 2249069246763182397L;
763
+ transient TreeNode root; // root of tree
764
+ transient TreeNode first; // head of next-pointer list
765
+
766
+ /* AQS overrides */
767
+ public final boolean isHeldExclusively() { return getState() > 0; }
768
+ public final boolean tryAcquire(int ignore) {
769
+ if (compareAndSetState(0, 1)) {
770
+ setExclusiveOwnerThread(Thread.currentThread());
771
+ return true;
772
+ }
773
+ return false;
774
+ }
775
+ public final boolean tryRelease(int ignore) {
776
+ setExclusiveOwnerThread(null);
777
+ setState(0);
778
+ return true;
779
+ }
780
+ public final int tryAcquireShared(int ignore) {
781
+ for (int c;;) {
782
+ if ((c = getState()) > 0)
783
+ return -1;
784
+ if (compareAndSetState(c, c -1))
785
+ return 1;
786
+ }
787
+ }
788
+ public final boolean tryReleaseShared(int ignore) {
789
+ int c;
790
+ do {} while (!compareAndSetState(c = getState(), c + 1));
791
+ return c == -1;
792
+ }
793
+
794
+ /** From CLR */
795
+ private void rotateLeft(TreeNode p) {
796
+ if (p != null) {
797
+ TreeNode r = p.right, pp, rl;
798
+ if ((rl = p.right = r.left) != null)
799
+ rl.parent = p;
800
+ if ((pp = r.parent = p.parent) == null)
801
+ root = r;
802
+ else if (pp.left == p)
803
+ pp.left = r;
804
+ else
805
+ pp.right = r;
806
+ r.left = p;
807
+ p.parent = r;
808
+ }
809
+ }
810
+
811
+ /** From CLR */
812
+ private void rotateRight(TreeNode p) {
813
+ if (p != null) {
814
+ TreeNode l = p.left, pp, lr;
815
+ if ((lr = p.left = l.right) != null)
816
+ lr.parent = p;
817
+ if ((pp = l.parent = p.parent) == null)
818
+ root = l;
819
+ else if (pp.right == p)
820
+ pp.right = l;
821
+ else
822
+ pp.left = l;
823
+ l.right = p;
824
+ p.parent = l;
825
+ }
826
+ }
827
+
828
+ @SuppressWarnings("unchecked") final TreeNode getTreeNode
829
+ (int h, Object k, TreeNode p) {
830
+ return getTreeNode(h, (RubyObject)k, p);
831
+ }
832
+
833
+ /**
834
+ * Returns the TreeNode (or null if not found) for the given key
835
+ * starting at given root.
836
+ */
837
+ @SuppressWarnings("unchecked") final TreeNode getTreeNode
838
+ (int h, RubyObject k, TreeNode p) {
839
+ RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>");
840
+ while (p != null) {
841
+ int dir, ph; RubyObject pk; RubyClass pc;
842
+ if ((ph = p.hash) == h) {
843
+ if ((pk = (RubyObject)p.key) == k || k.equals(pk))
844
+ return p;
845
+ if (c != (pc = (RubyClass)pk.getMetaClass()) ||
846
+ kNotComparable ||
847
+ (dir = rubyCompare(k, pk)) == 0) {
848
+ dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
849
+ if (dir == 0) { // if still stuck, need to check both sides
850
+ TreeNode r = null, pl, pr;
851
+ // try to recurse on the right
852
+ if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null)
853
+ return r;
854
+ // try to continue iterating on the left side
855
+ else if ((pl = p.left) != null && h <= pl.hash)
856
+ dir = -1;
857
+ else // no matching node found
858
+ return null;
859
+ }
860
+ }
861
+ }
862
+ else
863
+ dir = (h < ph) ? -1 : 1;
864
+ p = (dir > 0) ? p.right : p.left;
865
+ }
866
+ return null;
867
+ }
868
+
869
+ int rubyCompare(RubyObject l, RubyObject r) {
870
+ ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext();
871
+ IRubyObject result;
872
+ try {
873
+ result = l.callMethod(context, "<=>", r);
874
+ } catch (RaiseException e) {
875
+ // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys
876
+ if (context.runtime.getNoMethodError().isInstance(e.getException())) {
877
+ return 0;
878
+ }
879
+ throw e;
880
+ }
881
+
882
+ return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger());
883
+ }
884
+
885
+ /**
886
+ * Wrapper for getTreeNode used by CHM.get. Tries to obtain
887
+ * read-lock to call getTreeNode, but during failure to get
888
+ * lock, searches along next links.
889
+ */
890
+ final Object getValue(int h, Object k) {
891
+ Node r = null;
892
+ int c = getState(); // Must read lock state first
893
+ for (Node e = first; e != null; e = e.next) {
894
+ if (c <= 0 && compareAndSetState(c, c - 1)) {
895
+ try {
896
+ r = getTreeNode(h, k, root);
897
+ } finally {
898
+ releaseShared(0);
899
+ }
900
+ break;
901
+ }
902
+ else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
903
+ r = e;
904
+ break;
905
+ }
906
+ else
907
+ c = getState();
908
+ }
909
+ return r == null ? null : r.val;
910
+ }
911
+
912
+ @SuppressWarnings("unchecked") final TreeNode putTreeNode
913
+ (int h, Object k, Object v) {
914
+ return putTreeNode(h, (RubyObject)k, v);
915
+ }
916
+
917
+ /**
918
+ * Finds or adds a node.
919
+ * @return null if added
920
+ */
921
+ @SuppressWarnings("unchecked") final TreeNode putTreeNode
922
+ (int h, RubyObject k, Object v) {
923
+ RubyClass c = k.getMetaClass();
924
+ boolean kNotComparable = !k.respondsTo("<=>");
925
+ TreeNode pp = root, p = null;
926
+ int dir = 0;
927
+ while (pp != null) { // find existing node or leaf to insert at
928
+ int ph; RubyObject pk; RubyClass pc;
929
+ p = pp;
930
+ if ((ph = p.hash) == h) {
931
+ if ((pk = (RubyObject)p.key) == k || k.equals(pk))
932
+ return p;
933
+ if (c != (pc = pk.getMetaClass()) ||
934
+ kNotComparable ||
935
+ (dir = rubyCompare(k, pk)) == 0) {
936
+ dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
937
+ if (dir == 0) { // if still stuck, need to check both sides
938
+ TreeNode r = null, pr;
939
+ // try to recurse on the right
940
+ if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null)
941
+ return r;
942
+ else // continue descending down the left subtree
943
+ dir = -1;
944
+ }
945
+ }
946
+ }
947
+ else
948
+ dir = (h < ph) ? -1 : 1;
949
+ pp = (dir > 0) ? p.right : p.left;
950
+ }
951
+
952
+ TreeNode f = first;
953
+ TreeNode x = first = new TreeNode(h, (Object)k, v, f, p);
954
+ if (p == null)
955
+ root = x;
956
+ else { // attach and rebalance; adapted from CLR
957
+ TreeNode xp, xpp;
958
+ if (f != null)
959
+ f.prev = x;
960
+ if (dir <= 0)
961
+ p.left = x;
962
+ else
963
+ p.right = x;
964
+ x.red = true;
965
+ while (x != null && (xp = x.parent) != null && xp.red &&
966
+ (xpp = xp.parent) != null) {
967
+ TreeNode xppl = xpp.left;
968
+ if (xp == xppl) {
969
+ TreeNode y = xpp.right;
970
+ if (y != null && y.red) {
971
+ y.red = false;
972
+ xp.red = false;
973
+ xpp.red = true;
974
+ x = xpp;
975
+ }
976
+ else {
977
+ if (x == xp.right) {
978
+ rotateLeft(x = xp);
979
+ xpp = (xp = x.parent) == null ? null : xp.parent;
980
+ }
981
+ if (xp != null) {
982
+ xp.red = false;
983
+ if (xpp != null) {
984
+ xpp.red = true;
985
+ rotateRight(xpp);
986
+ }
987
+ }
988
+ }
989
+ }
990
+ else {
991
+ TreeNode y = xppl;
992
+ if (y != null && y.red) {
993
+ y.red = false;
994
+ xp.red = false;
995
+ xpp.red = true;
996
+ x = xpp;
997
+ }
998
+ else {
999
+ if (x == xp.left) {
1000
+ rotateRight(x = xp);
1001
+ xpp = (xp = x.parent) == null ? null : xp.parent;
1002
+ }
1003
+ if (xp != null) {
1004
+ xp.red = false;
1005
+ if (xpp != null) {
1006
+ xpp.red = true;
1007
+ rotateLeft(xpp);
1008
+ }
1009
+ }
1010
+ }
1011
+ }
1012
+ }
1013
+ TreeNode r = root;
1014
+ if (r != null && r.red)
1015
+ r.red = false;
1016
+ }
1017
+ return null;
1018
+ }
1019
+
1020
+ /**
1021
+ * Removes the given node, that must be present before this
1022
+ * call. This is messier than typical red-black deletion code
1023
+ * because we cannot swap the contents of an interior node
1024
+ * with a leaf successor that is pinned by "next" pointers
1025
+ * that are accessible independently of lock. So instead we
1026
+ * swap the tree linkages.
1027
+ */
1028
+ final void deleteTreeNode(TreeNode p) {
1029
+ TreeNode next = (TreeNode)p.next; // unlink traversal pointers
1030
+ TreeNode pred = p.prev;
1031
+ if (pred == null)
1032
+ first = next;
1033
+ else
1034
+ pred.next = next;
1035
+ if (next != null)
1036
+ next.prev = pred;
1037
+ TreeNode replacement;
1038
+ TreeNode pl = p.left;
1039
+ TreeNode pr = p.right;
1040
+ if (pl != null && pr != null) {
1041
+ TreeNode s = pr, sl;
1042
+ while ((sl = s.left) != null) // find successor
1043
+ s = sl;
1044
+ boolean c = s.red; s.red = p.red; p.red = c; // swap colors
1045
+ TreeNode sr = s.right;
1046
+ TreeNode pp = p.parent;
1047
+ if (s == pr) { // p was s's direct parent
1048
+ p.parent = s;
1049
+ s.right = p;
1050
+ }
1051
+ else {
1052
+ TreeNode sp = s.parent;
1053
+ if ((p.parent = sp) != null) {
1054
+ if (s == sp.left)
1055
+ sp.left = p;
1056
+ else
1057
+ sp.right = p;
1058
+ }
1059
+ if ((s.right = pr) != null)
1060
+ pr.parent = s;
1061
+ }
1062
+ p.left = null;
1063
+ if ((p.right = sr) != null)
1064
+ sr.parent = p;
1065
+ if ((s.left = pl) != null)
1066
+ pl.parent = s;
1067
+ if ((s.parent = pp) == null)
1068
+ root = s;
1069
+ else if (p == pp.left)
1070
+ pp.left = s;
1071
+ else
1072
+ pp.right = s;
1073
+ replacement = sr;
1074
+ }
1075
+ else
1076
+ replacement = (pl != null) ? pl : pr;
1077
+ TreeNode pp = p.parent;
1078
+ if (replacement == null) {
1079
+ if (pp == null) {
1080
+ root = null;
1081
+ return;
1082
+ }
1083
+ replacement = p;
1084
+ }
1085
+ else {
1086
+ replacement.parent = pp;
1087
+ if (pp == null)
1088
+ root = replacement;
1089
+ else if (p == pp.left)
1090
+ pp.left = replacement;
1091
+ else
1092
+ pp.right = replacement;
1093
+ p.left = p.right = p.parent = null;
1094
+ }
1095
+ if (!p.red) { // rebalance, from CLR
1096
+ TreeNode x = replacement;
1097
+ while (x != null) {
1098
+ TreeNode xp, xpl;
1099
+ if (x.red || (xp = x.parent) == null) {
1100
+ x.red = false;
1101
+ break;
1102
+ }
1103
+ if (x == (xpl = xp.left)) {
1104
+ TreeNode sib = xp.right;
1105
+ if (sib != null && sib.red) {
1106
+ sib.red = false;
1107
+ xp.red = true;
1108
+ rotateLeft(xp);
1109
+ sib = (xp = x.parent) == null ? null : xp.right;
1110
+ }
1111
+ if (sib == null)
1112
+ x = xp;
1113
+ else {
1114
+ TreeNode sl = sib.left, sr = sib.right;
1115
+ if ((sr == null || !sr.red) &&
1116
+ (sl == null || !sl.red)) {
1117
+ sib.red = true;
1118
+ x = xp;
1119
+ }
1120
+ else {
1121
+ if (sr == null || !sr.red) {
1122
+ if (sl != null)
1123
+ sl.red = false;
1124
+ sib.red = true;
1125
+ rotateRight(sib);
1126
+ sib = (xp = x.parent) == null ? null : xp.right;
1127
+ }
1128
+ if (sib != null) {
1129
+ sib.red = (xp == null) ? false : xp.red;
1130
+ if ((sr = sib.right) != null)
1131
+ sr.red = false;
1132
+ }
1133
+ if (xp != null) {
1134
+ xp.red = false;
1135
+ rotateLeft(xp);
1136
+ }
1137
+ x = root;
1138
+ }
1139
+ }
1140
+ }
1141
+ else { // symmetric
1142
+ TreeNode sib = xpl;
1143
+ if (sib != null && sib.red) {
1144
+ sib.red = false;
1145
+ xp.red = true;
1146
+ rotateRight(xp);
1147
+ sib = (xp = x.parent) == null ? null : xp.left;
1148
+ }
1149
+ if (sib == null)
1150
+ x = xp;
1151
+ else {
1152
+ TreeNode sl = sib.left, sr = sib.right;
1153
+ if ((sl == null || !sl.red) &&
1154
+ (sr == null || !sr.red)) {
1155
+ sib.red = true;
1156
+ x = xp;
1157
+ }
1158
+ else {
1159
+ if (sl == null || !sl.red) {
1160
+ if (sr != null)
1161
+ sr.red = false;
1162
+ sib.red = true;
1163
+ rotateLeft(sib);
1164
+ sib = (xp = x.parent) == null ? null : xp.left;
1165
+ }
1166
+ if (sib != null) {
1167
+ sib.red = (xp == null) ? false : xp.red;
1168
+ if ((sl = sib.left) != null)
1169
+ sl.red = false;
1170
+ }
1171
+ if (xp != null) {
1172
+ xp.red = false;
1173
+ rotateRight(xp);
1174
+ }
1175
+ x = root;
1176
+ }
1177
+ }
1178
+ }
1179
+ }
1180
+ }
1181
+ if (p == replacement && (pp = p.parent) != null) {
1182
+ if (p == pp.left) // detach pointers
1183
+ pp.left = null;
1184
+ else if (p == pp.right)
1185
+ pp.right = null;
1186
+ p.parent = null;
1187
+ }
1188
+ }
1189
+ }
1190
+
1191
+ /* ---------------- Collision reduction methods -------------- */
1192
+
1193
+ /**
1194
+ * Spreads higher bits to lower, and also forces top 2 bits to 0.
1195
+ * Because the table uses power-of-two masking, sets of hashes
1196
+ * that vary only in bits above the current mask will always
1197
+ * collide. (Among known examples are sets of Float keys holding
1198
+ * consecutive whole numbers in small tables.) To counter this,
1199
+ * we apply a transform that spreads the impact of higher bits
1200
+ * downward. There is a tradeoff between speed, utility, and
1201
+ * quality of bit-spreading. Because many common sets of hashes
1202
+ * are already reasonably distributed across bits (so don't benefit
1203
+ * from spreading), and because we use trees to handle large sets
1204
+ * of collisions in bins, we don't need excessively high quality.
1205
+ */
1206
+ private static final int spread(int h) {
1207
+ h ^= (h >>> 18) ^ (h >>> 12);
1208
+ return (h ^ (h >>> 10)) & HASH_BITS;
1209
+ }
1210
+
1211
+ /**
1212
+ * Replaces a list bin with a tree bin. Call only when locked.
1213
+ * Fails to replace if the given key is non-comparable or table
1214
+ * is, or needs, resizing.
1215
+ */
1216
+ private final void replaceWithTreeBin(AtomicReferenceArray<Node> tab, int index, Object key) {
1217
+ if ((key instanceof Comparable) &&
1218
+ (tab.length() >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
1219
+ TreeBin t = new TreeBin();
1220
+ for (Node e = tabAt(tab, index); e != null; e = e.next)
1221
+ t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
1222
+ setTabAt(tab, index, new Node(MOVED, t, null, null));
1223
+ }
1224
+ }
1225
+
1226
+ /* ---------------- Internal access and update methods -------------- */
1227
+
1228
+ /** Implementation for get and containsKey */
1229
+ private final Object internalGet(Object k) {
1230
+ int h = spread(k.hashCode());
1231
+ retry: for (AtomicReferenceArray<Node> tab = table; tab != null;) {
1232
+ Node e, p; Object ek, ev; int eh; // locals to read fields once
1233
+ for (e = tabAt(tab, (tab.length() - 1) & h); e != null; e = e.next) {
1234
+ if ((eh = e.hash) == MOVED) {
1235
+ if ((ek = e.key) instanceof TreeBin) // search TreeBin
1236
+ return ((TreeBin)ek).getValue(h, k);
1237
+ else { // restart with new table
1238
+ tab = (AtomicReferenceArray<Node>)ek;
1239
+ continue retry;
1240
+ }
1241
+ }
1242
+ else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
1243
+ ((ek = e.key) == k || k.equals(ek)))
1244
+ return ev;
1245
+ }
1246
+ break;
1247
+ }
1248
+ return null;
1249
+ }
1250
+
1251
+ /**
1252
+ * Implementation for the four public remove/replace methods:
1253
+ * Replaces node value with v, conditional upon match of cv if
1254
+ * non-null. If resulting value is null, delete.
1255
+ */
1256
+ private final Object internalReplace(Object k, Object v, Object cv) {
1257
+ int h = spread(k.hashCode());
1258
+ Object oldVal = null;
1259
+ for (AtomicReferenceArray<Node> tab = table;;) {
1260
+ Node f; int i, fh; Object fk;
1261
+ if (tab == null ||
1262
+ (f = tabAt(tab, i = (tab.length() - 1) & h)) == null)
1263
+ break;
1264
+ else if ((fh = f.hash) == MOVED) {
1265
+ if ((fk = f.key) instanceof TreeBin) {
1266
+ TreeBin t = (TreeBin)fk;
1267
+ boolean validated = false;
1268
+ boolean deleted = false;
1269
+ t.acquire(0);
1270
+ try {
1271
+ if (tabAt(tab, i) == f) {
1272
+ validated = true;
1273
+ TreeNode p = t.getTreeNode(h, k, t.root);
1274
+ if (p != null) {
1275
+ Object pv = p.val;
1276
+ if (cv == null || cv == pv || cv.equals(pv)) {
1277
+ oldVal = pv;
1278
+ if ((p.val = v) == null) {
1279
+ deleted = true;
1280
+ t.deleteTreeNode(p);
1281
+ }
1282
+ }
1283
+ }
1284
+ }
1285
+ } finally {
1286
+ t.release(0);
1287
+ }
1288
+ if (validated) {
1289
+ if (deleted)
1290
+ counter.add(-1L);
1291
+ break;
1292
+ }
1293
+ }
1294
+ else
1295
+ tab = (AtomicReferenceArray<Node>)fk;
1296
+ }
1297
+ else if ((fh & HASH_BITS) != h && f.next == null) // precheck
1298
+ break; // rules out possible existence
1299
+ else if ((fh & LOCKED) != 0) {
1300
+ checkForResize(); // try resizing if can't get lock
1301
+ f.tryAwaitLock(tab, i);
1302
+ }
1303
+ else if (f.casHash(fh, fh | LOCKED)) {
1304
+ boolean validated = false;
1305
+ boolean deleted = false;
1306
+ try {
1307
+ if (tabAt(tab, i) == f) {
1308
+ validated = true;
1309
+ for (Node e = f, pred = null;;) {
1310
+ Object ek, ev;
1311
+ if ((e.hash & HASH_BITS) == h &&
1312
+ ((ev = e.val) != null) &&
1313
+ ((ek = e.key) == k || k.equals(ek))) {
1314
+ if (cv == null || cv == ev || cv.equals(ev)) {
1315
+ oldVal = ev;
1316
+ if ((e.val = v) == null) {
1317
+ deleted = true;
1318
+ Node en = e.next;
1319
+ if (pred != null)
1320
+ pred.next = en;
1321
+ else
1322
+ setTabAt(tab, i, en);
1323
+ }
1324
+ }
1325
+ break;
1326
+ }
1327
+ pred = e;
1328
+ if ((e = e.next) == null)
1329
+ break;
1330
+ }
1331
+ }
1332
+ } finally {
1333
+ if (!f.casHash(fh | LOCKED, fh)) {
1334
+ f.hash = fh;
1335
+ synchronized (f) { f.notifyAll(); };
1336
+ }
1337
+ }
1338
+ if (validated) {
1339
+ if (deleted)
1340
+ counter.add(-1L);
1341
+ break;
1342
+ }
1343
+ }
1344
+ }
1345
+ return oldVal;
1346
+ }
1347
+
1348
+ /*
1349
+ * Internal versions of the six insertion methods, each a
1350
+ * little more complicated than the last. All have
1351
+ * the same basic structure as the first (internalPut):
1352
+ * 1. If table uninitialized, create
1353
+ * 2. If bin empty, try to CAS new node
1354
+ * 3. If bin stale, use new table
1355
+ * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1356
+ * 5. Lock and validate; if valid, scan and add or update
1357
+ *
1358
+ * The others interweave other checks and/or alternative actions:
1359
+ * * Plain put checks for and performs resize after insertion.
1360
+ * * putIfAbsent prescans for mapping without lock (and fails to add
1361
+ * if present), which also makes pre-emptive resize checks worthwhile.
1362
+ * * computeIfAbsent extends form used in putIfAbsent with additional
1363
+ * mechanics to deal with, calls, potential exceptions and null
1364
+ * returns from function call.
1365
+ * * compute uses the same function-call mechanics, but without
1366
+ * the prescans
1367
+ * * merge acts as putIfAbsent in the absent case, but invokes the
1368
+ * update function if present
1369
+ * * putAll attempts to pre-allocate enough table space
1370
+ * and more lazily performs count updates and checks.
1371
+ *
1372
+ * Someday when details settle down a bit more, it might be worth
1373
+ * some factoring to reduce sprawl.
1374
+ */
1375
+
1376
+ /** Implementation for put */
1377
+ private final Object internalPut(Object k, Object v) {
1378
+ int h = spread(k.hashCode());
1379
+ int count = 0;
1380
+ for (AtomicReferenceArray<Node> tab = table;;) {
1381
+ int i; Node f; int fh; Object fk;
1382
+ if (tab == null)
1383
+ tab = initTable();
1384
+ else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) {
1385
+ if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1386
+ break; // no lock when adding to empty bin
1387
+ }
1388
+ else if ((fh = f.hash) == MOVED) {
1389
+ if ((fk = f.key) instanceof TreeBin) {
1390
+ TreeBin t = (TreeBin)fk;
1391
+ Object oldVal = null;
1392
+ t.acquire(0);
1393
+ try {
1394
+ if (tabAt(tab, i) == f) {
1395
+ count = 2;
1396
+ TreeNode p = t.putTreeNode(h, k, v);
1397
+ if (p != null) {
1398
+ oldVal = p.val;
1399
+ p.val = v;
1400
+ }
1401
+ }
1402
+ } finally {
1403
+ t.release(0);
1404
+ }
1405
+ if (count != 0) {
1406
+ if (oldVal != null)
1407
+ return oldVal;
1408
+ break;
1409
+ }
1410
+ }
1411
+ else
1412
+ tab = (AtomicReferenceArray<Node>)fk;
1413
+ }
1414
+ else if ((fh & LOCKED) != 0) {
1415
+ checkForResize();
1416
+ f.tryAwaitLock(tab, i);
1417
+ }
1418
+ else if (f.casHash(fh, fh | LOCKED)) {
1419
+ Object oldVal = null;
1420
+ try { // needed in case equals() throws
1421
+ if (tabAt(tab, i) == f) {
1422
+ count = 1;
1423
+ for (Node e = f;; ++count) {
1424
+ Object ek, ev;
1425
+ if ((e.hash & HASH_BITS) == h &&
1426
+ (ev = e.val) != null &&
1427
+ ((ek = e.key) == k || k.equals(ek))) {
1428
+ oldVal = ev;
1429
+ e.val = v;
1430
+ break;
1431
+ }
1432
+ Node last = e;
1433
+ if ((e = e.next) == null) {
1434
+ last.next = new Node(h, k, v, null);
1435
+ if (count >= TREE_THRESHOLD)
1436
+ replaceWithTreeBin(tab, i, k);
1437
+ break;
1438
+ }
1439
+ }
1440
+ }
1441
+ } finally { // unlock and signal if needed
1442
+ if (!f.casHash(fh | LOCKED, fh)) {
1443
+ f.hash = fh;
1444
+ synchronized (f) { f.notifyAll(); };
1445
+ }
1446
+ }
1447
+ if (count != 0) {
1448
+ if (oldVal != null)
1449
+ return oldVal;
1450
+ if (tab.length() <= 64)
1451
+ count = 2;
1452
+ break;
1453
+ }
1454
+ }
1455
+ }
1456
+ counter.add(1L);
1457
+ if (count > 1)
1458
+ checkForResize();
1459
+ return null;
1460
+ }
1461
+
1462
+ /** Implementation for putIfAbsent */
1463
+ private final Object internalPutIfAbsent(Object k, Object v) {
1464
+ int h = spread(k.hashCode());
1465
+ int count = 0;
1466
+ for (AtomicReferenceArray<Node> tab = table;;) {
1467
+ int i; Node f; int fh; Object fk, fv;
1468
+ if (tab == null)
1469
+ tab = initTable();
1470
+ else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) {
1471
+ if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1472
+ break;
1473
+ }
1474
+ else if ((fh = f.hash) == MOVED) {
1475
+ if ((fk = f.key) instanceof TreeBin) {
1476
+ TreeBin t = (TreeBin)fk;
1477
+ Object oldVal = null;
1478
+ t.acquire(0);
1479
+ try {
1480
+ if (tabAt(tab, i) == f) {
1481
+ count = 2;
1482
+ TreeNode p = t.putTreeNode(h, k, v);
1483
+ if (p != null)
1484
+ oldVal = p.val;
1485
+ }
1486
+ } finally {
1487
+ t.release(0);
1488
+ }
1489
+ if (count != 0) {
1490
+ if (oldVal != null)
1491
+ return oldVal;
1492
+ break;
1493
+ }
1494
+ }
1495
+ else
1496
+ tab = (AtomicReferenceArray<Node>)fk;
1497
+ }
1498
+ else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1499
+ ((fk = f.key) == k || k.equals(fk)))
1500
+ return fv;
1501
+ else {
1502
+ Node g = f.next;
1503
+ if (g != null) { // at least 2 nodes -- search and maybe resize
1504
+ for (Node e = g;;) {
1505
+ Object ek, ev;
1506
+ if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1507
+ ((ek = e.key) == k || k.equals(ek)))
1508
+ return ev;
1509
+ if ((e = e.next) == null) {
1510
+ checkForResize();
1511
+ break;
1512
+ }
1513
+ }
1514
+ }
1515
+ if (((fh = f.hash) & LOCKED) != 0) {
1516
+ checkForResize();
1517
+ f.tryAwaitLock(tab, i);
1518
+ }
1519
+ else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1520
+ Object oldVal = null;
1521
+ try {
1522
+ if (tabAt(tab, i) == f) {
1523
+ count = 1;
1524
+ for (Node e = f;; ++count) {
1525
+ Object ek, ev;
1526
+ if ((e.hash & HASH_BITS) == h &&
1527
+ (ev = e.val) != null &&
1528
+ ((ek = e.key) == k || k.equals(ek))) {
1529
+ oldVal = ev;
1530
+ break;
1531
+ }
1532
+ Node last = e;
1533
+ if ((e = e.next) == null) {
1534
+ last.next = new Node(h, k, v, null);
1535
+ if (count >= TREE_THRESHOLD)
1536
+ replaceWithTreeBin(tab, i, k);
1537
+ break;
1538
+ }
1539
+ }
1540
+ }
1541
+ } finally {
1542
+ if (!f.casHash(fh | LOCKED, fh)) {
1543
+ f.hash = fh;
1544
+ synchronized (f) { f.notifyAll(); };
1545
+ }
1546
+ }
1547
+ if (count != 0) {
1548
+ if (oldVal != null)
1549
+ return oldVal;
1550
+ if (tab.length() <= 64)
1551
+ count = 2;
1552
+ break;
1553
+ }
1554
+ }
1555
+ }
1556
+ }
1557
+ counter.add(1L);
1558
+ if (count > 1)
1559
+ checkForResize();
1560
+ return null;
1561
+ }
1562
+
1563
+ /** Implementation for computeIfAbsent */
1564
+ private final Object internalComputeIfAbsent(K k,
1565
+ Fun<? super K, ?> mf) {
1566
+ int h = spread(k.hashCode());
1567
+ Object val = null;
1568
+ int count = 0;
1569
+ for (AtomicReferenceArray<Node> tab = table;;) {
1570
+ Node f; int i, fh; Object fk, fv;
1571
+ if (tab == null)
1572
+ tab = initTable();
1573
+ else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) {
1574
+ Node node = new Node(fh = h | LOCKED, k, null, null);
1575
+ if (casTabAt(tab, i, null, node)) {
1576
+ count = 1;
1577
+ try {
1578
+ if ((val = mf.apply(k)) != null)
1579
+ node.val = val;
1580
+ } finally {
1581
+ if (val == null)
1582
+ setTabAt(tab, i, null);
1583
+ if (!node.casHash(fh, h)) {
1584
+ node.hash = h;
1585
+ synchronized (node) { node.notifyAll(); };
1586
+ }
1587
+ }
1588
+ }
1589
+ if (count != 0)
1590
+ break;
1591
+ }
1592
+ else if ((fh = f.hash) == MOVED) {
1593
+ if ((fk = f.key) instanceof TreeBin) {
1594
+ TreeBin t = (TreeBin)fk;
1595
+ boolean added = false;
1596
+ t.acquire(0);
1597
+ try {
1598
+ if (tabAt(tab, i) == f) {
1599
+ count = 1;
1600
+ TreeNode p = t.getTreeNode(h, k, t.root);
1601
+ if (p != null)
1602
+ val = p.val;
1603
+ else if ((val = mf.apply(k)) != null) {
1604
+ added = true;
1605
+ count = 2;
1606
+ t.putTreeNode(h, k, val);
1607
+ }
1608
+ }
1609
+ } finally {
1610
+ t.release(0);
1611
+ }
1612
+ if (count != 0) {
1613
+ if (!added)
1614
+ return val;
1615
+ break;
1616
+ }
1617
+ }
1618
+ else
1619
+ tab = (AtomicReferenceArray<Node>)fk;
1620
+ }
1621
+ else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1622
+ ((fk = f.key) == k || k.equals(fk)))
1623
+ return fv;
1624
+ else {
1625
+ Node g = f.next;
1626
+ if (g != null) {
1627
+ for (Node e = g;;) {
1628
+ Object ek, ev;
1629
+ if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1630
+ ((ek = e.key) == k || k.equals(ek)))
1631
+ return ev;
1632
+ if ((e = e.next) == null) {
1633
+ checkForResize();
1634
+ break;
1635
+ }
1636
+ }
1637
+ }
1638
+ if (((fh = f.hash) & LOCKED) != 0) {
1639
+ checkForResize();
1640
+ f.tryAwaitLock(tab, i);
1641
+ }
1642
+ else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1643
+ boolean added = false;
1644
+ try {
1645
+ if (tabAt(tab, i) == f) {
1646
+ count = 1;
1647
+ for (Node e = f;; ++count) {
1648
+ Object ek, ev;
1649
+ if ((e.hash & HASH_BITS) == h &&
1650
+ (ev = e.val) != null &&
1651
+ ((ek = e.key) == k || k.equals(ek))) {
1652
+ val = ev;
1653
+ break;
1654
+ }
1655
+ Node last = e;
1656
+ if ((e = e.next) == null) {
1657
+ if ((val = mf.apply(k)) != null) {
1658
+ added = true;
1659
+ last.next = new Node(h, k, val, null);
1660
+ if (count >= TREE_THRESHOLD)
1661
+ replaceWithTreeBin(tab, i, k);
1662
+ }
1663
+ break;
1664
+ }
1665
+ }
1666
+ }
1667
+ } finally {
1668
+ if (!f.casHash(fh | LOCKED, fh)) {
1669
+ f.hash = fh;
1670
+ synchronized (f) { f.notifyAll(); };
1671
+ }
1672
+ }
1673
+ if (count != 0) {
1674
+ if (!added)
1675
+ return val;
1676
+ if (tab.length() <= 64)
1677
+ count = 2;
1678
+ break;
1679
+ }
1680
+ }
1681
+ }
1682
+ }
1683
+ if (val != null) {
1684
+ counter.add(1L);
1685
+ if (count > 1)
1686
+ checkForResize();
1687
+ }
1688
+ return val;
1689
+ }
1690
+
1691
+ /** Implementation for compute */
1692
+ @SuppressWarnings("unchecked") private final Object internalCompute
1693
+ (K k, boolean onlyIfPresent, BiFun<? super K, ? super V, ? extends V> mf) {
1694
+ int h = spread(k.hashCode());
1695
+ Object val = null;
1696
+ int delta = 0;
1697
+ int count = 0;
1698
+ for (AtomicReferenceArray<Node> tab = table;;) {
1699
+ Node f; int i, fh; Object fk;
1700
+ if (tab == null)
1701
+ tab = initTable();
1702
+ else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) {
1703
+ if (onlyIfPresent)
1704
+ break;
1705
+ Node node = new Node(fh = h | LOCKED, k, null, null);
1706
+ if (casTabAt(tab, i, null, node)) {
1707
+ try {
1708
+ count = 1;
1709
+ if ((val = mf.apply(k, null)) != null) {
1710
+ node.val = val;
1711
+ delta = 1;
1712
+ }
1713
+ } finally {
1714
+ if (delta == 0)
1715
+ setTabAt(tab, i, null);
1716
+ if (!node.casHash(fh, h)) {
1717
+ node.hash = h;
1718
+ synchronized (node) { node.notifyAll(); };
1719
+ }
1720
+ }
1721
+ }
1722
+ if (count != 0)
1723
+ break;
1724
+ }
1725
+ else if ((fh = f.hash) == MOVED) {
1726
+ if ((fk = f.key) instanceof TreeBin) {
1727
+ TreeBin t = (TreeBin)fk;
1728
+ t.acquire(0);
1729
+ try {
1730
+ if (tabAt(tab, i) == f) {
1731
+ count = 1;
1732
+ TreeNode p = t.getTreeNode(h, k, t.root);
1733
+ Object pv;
1734
+ if (p == null) {
1735
+ if (onlyIfPresent)
1736
+ break;
1737
+ pv = null;
1738
+ } else
1739
+ pv = p.val;
1740
+ if ((val = mf.apply(k, (V)pv)) != null) {
1741
+ if (p != null)
1742
+ p.val = val;
1743
+ else {
1744
+ count = 2;
1745
+ delta = 1;
1746
+ t.putTreeNode(h, k, val);
1747
+ }
1748
+ }
1749
+ else if (p != null) {
1750
+ delta = -1;
1751
+ t.deleteTreeNode(p);
1752
+ }
1753
+ }
1754
+ } finally {
1755
+ t.release(0);
1756
+ }
1757
+ if (count != 0)
1758
+ break;
1759
+ }
1760
+ else
1761
+ tab = (AtomicReferenceArray<Node>)fk;
1762
+ }
1763
+ else if ((fh & LOCKED) != 0) {
1764
+ checkForResize();
1765
+ f.tryAwaitLock(tab, i);
1766
+ }
1767
+ else if (f.casHash(fh, fh | LOCKED)) {
1768
+ try {
1769
+ if (tabAt(tab, i) == f) {
1770
+ count = 1;
1771
+ for (Node e = f, pred = null;; ++count) {
1772
+ Object ek, ev;
1773
+ if ((e.hash & HASH_BITS) == h &&
1774
+ (ev = e.val) != null &&
1775
+ ((ek = e.key) == k || k.equals(ek))) {
1776
+ val = mf.apply(k, (V)ev);
1777
+ if (val != null)
1778
+ e.val = val;
1779
+ else {
1780
+ delta = -1;
1781
+ Node en = e.next;
1782
+ if (pred != null)
1783
+ pred.next = en;
1784
+ else
1785
+ setTabAt(tab, i, en);
1786
+ }
1787
+ break;
1788
+ }
1789
+ pred = e;
1790
+ if ((e = e.next) == null) {
1791
+ if (!onlyIfPresent && (val = mf.apply(k, null)) != null) {
1792
+ pred.next = new Node(h, k, val, null);
1793
+ delta = 1;
1794
+ if (count >= TREE_THRESHOLD)
1795
+ replaceWithTreeBin(tab, i, k);
1796
+ }
1797
+ break;
1798
+ }
1799
+ }
1800
+ }
1801
+ } finally {
1802
+ if (!f.casHash(fh | LOCKED, fh)) {
1803
+ f.hash = fh;
1804
+ synchronized (f) { f.notifyAll(); };
1805
+ }
1806
+ }
1807
+ if (count != 0) {
1808
+ if (tab.length() <= 64)
1809
+ count = 2;
1810
+ break;
1811
+ }
1812
+ }
1813
+ }
1814
+ if (delta != 0) {
1815
+ counter.add((long)delta);
1816
+ if (count > 1)
1817
+ checkForResize();
1818
+ }
1819
+ return val;
1820
+ }
1821
+
1822
+ /** Implementation for merge */
1823
+ @SuppressWarnings("unchecked") private final Object internalMerge
1824
+ (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) {
1825
+ int h = spread(k.hashCode());
1826
+ Object val = null;
1827
+ int delta = 0;
1828
+ int count = 0;
1829
+ for (AtomicReferenceArray<Node> tab = table;;) {
1830
+ int i; Node f; int fh; Object fk, fv;
1831
+ if (tab == null)
1832
+ tab = initTable();
1833
+ else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) {
1834
+ if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1835
+ delta = 1;
1836
+ val = v;
1837
+ break;
1838
+ }
1839
+ }
1840
+ else if ((fh = f.hash) == MOVED) {
1841
+ if ((fk = f.key) instanceof TreeBin) {
1842
+ TreeBin t = (TreeBin)fk;
1843
+ t.acquire(0);
1844
+ try {
1845
+ if (tabAt(tab, i) == f) {
1846
+ count = 1;
1847
+ TreeNode p = t.getTreeNode(h, k, t.root);
1848
+ val = (p == null) ? v : mf.apply((V)p.val, v);
1849
+ if (val != null) {
1850
+ if (p != null)
1851
+ p.val = val;
1852
+ else {
1853
+ count = 2;
1854
+ delta = 1;
1855
+ t.putTreeNode(h, k, val);
1856
+ }
1857
+ }
1858
+ else if (p != null) {
1859
+ delta = -1;
1860
+ t.deleteTreeNode(p);
1861
+ }
1862
+ }
1863
+ } finally {
1864
+ t.release(0);
1865
+ }
1866
+ if (count != 0)
1867
+ break;
1868
+ }
1869
+ else
1870
+ tab = (AtomicReferenceArray<Node>)fk;
1871
+ }
1872
+ else if ((fh & LOCKED) != 0) {
1873
+ checkForResize();
1874
+ f.tryAwaitLock(tab, i);
1875
+ }
1876
+ else if (f.casHash(fh, fh | LOCKED)) {
1877
+ try {
1878
+ if (tabAt(tab, i) == f) {
1879
+ count = 1;
1880
+ for (Node e = f, pred = null;; ++count) {
1881
+ Object ek, ev;
1882
+ if ((e.hash & HASH_BITS) == h &&
1883
+ (ev = e.val) != null &&
1884
+ ((ek = e.key) == k || k.equals(ek))) {
1885
+ val = mf.apply((V)ev, v);
1886
+ if (val != null)
1887
+ e.val = val;
1888
+ else {
1889
+ delta = -1;
1890
+ Node en = e.next;
1891
+ if (pred != null)
1892
+ pred.next = en;
1893
+ else
1894
+ setTabAt(tab, i, en);
1895
+ }
1896
+ break;
1897
+ }
1898
+ pred = e;
1899
+ if ((e = e.next) == null) {
1900
+ val = v;
1901
+ pred.next = new Node(h, k, val, null);
1902
+ delta = 1;
1903
+ if (count >= TREE_THRESHOLD)
1904
+ replaceWithTreeBin(tab, i, k);
1905
+ break;
1906
+ }
1907
+ }
1908
+ }
1909
+ } finally {
1910
+ if (!f.casHash(fh | LOCKED, fh)) {
1911
+ f.hash = fh;
1912
+ synchronized (f) { f.notifyAll(); };
1913
+ }
1914
+ }
1915
+ if (count != 0) {
1916
+ if (tab.length() <= 64)
1917
+ count = 2;
1918
+ break;
1919
+ }
1920
+ }
1921
+ }
1922
+ if (delta != 0) {
1923
+ counter.add((long)delta);
1924
+ if (count > 1)
1925
+ checkForResize();
1926
+ }
1927
+ return val;
1928
+ }
1929
+
1930
+ /** Implementation for putAll */
1931
+ private final void internalPutAll(Map<?, ?> m) {
1932
+ tryPresize(m.size());
1933
+ long delta = 0L; // number of uncommitted additions
1934
+ boolean npe = false; // to throw exception on exit for nulls
1935
+ try { // to clean up counts on other exceptions
1936
+ for (Map.Entry<?, ?> entry : m.entrySet()) {
1937
+ Object k, v;
1938
+ if (entry == null || (k = entry.getKey()) == null ||
1939
+ (v = entry.getValue()) == null) {
1940
+ npe = true;
1941
+ break;
1942
+ }
1943
+ int h = spread(k.hashCode());
1944
+ for (AtomicReferenceArray<Node> tab = table;;) {
1945
+ int i; Node f; int fh; Object fk;
1946
+ if (tab == null)
1947
+ tab = initTable();
1948
+ else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null){
1949
+ if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1950
+ ++delta;
1951
+ break;
1952
+ }
1953
+ }
1954
+ else if ((fh = f.hash) == MOVED) {
1955
+ if ((fk = f.key) instanceof TreeBin) {
1956
+ TreeBin t = (TreeBin)fk;
1957
+ boolean validated = false;
1958
+ t.acquire(0);
1959
+ try {
1960
+ if (tabAt(tab, i) == f) {
1961
+ validated = true;
1962
+ TreeNode p = t.getTreeNode(h, k, t.root);
1963
+ if (p != null)
1964
+ p.val = v;
1965
+ else {
1966
+ t.putTreeNode(h, k, v);
1967
+ ++delta;
1968
+ }
1969
+ }
1970
+ } finally {
1971
+ t.release(0);
1972
+ }
1973
+ if (validated)
1974
+ break;
1975
+ }
1976
+ else
1977
+ tab = (AtomicReferenceArray<Node>)fk;
1978
+ }
1979
+ else if ((fh & LOCKED) != 0) {
1980
+ counter.add(delta);
1981
+ delta = 0L;
1982
+ checkForResize();
1983
+ f.tryAwaitLock(tab, i);
1984
+ }
1985
+ else if (f.casHash(fh, fh | LOCKED)) {
1986
+ int count = 0;
1987
+ try {
1988
+ if (tabAt(tab, i) == f) {
1989
+ count = 1;
1990
+ for (Node e = f;; ++count) {
1991
+ Object ek, ev;
1992
+ if ((e.hash & HASH_BITS) == h &&
1993
+ (ev = e.val) != null &&
1994
+ ((ek = e.key) == k || k.equals(ek))) {
1995
+ e.val = v;
1996
+ break;
1997
+ }
1998
+ Node last = e;
1999
+ if ((e = e.next) == null) {
2000
+ ++delta;
2001
+ last.next = new Node(h, k, v, null);
2002
+ if (count >= TREE_THRESHOLD)
2003
+ replaceWithTreeBin(tab, i, k);
2004
+ break;
2005
+ }
2006
+ }
2007
+ }
2008
+ } finally {
2009
+ if (!f.casHash(fh | LOCKED, fh)) {
2010
+ f.hash = fh;
2011
+ synchronized (f) { f.notifyAll(); };
2012
+ }
2013
+ }
2014
+ if (count != 0) {
2015
+ if (count > 1) {
2016
+ counter.add(delta);
2017
+ delta = 0L;
2018
+ checkForResize();
2019
+ }
2020
+ break;
2021
+ }
2022
+ }
2023
+ }
2024
+ }
2025
+ } finally {
2026
+ if (delta != 0)
2027
+ counter.add(delta);
2028
+ }
2029
+ if (npe)
2030
+ throw new NullPointerException();
2031
+ }
2032
+
2033
+ /* ---------------- Table Initialization and Resizing -------------- */
2034
+
2035
+ /**
2036
+ * Returns a power of two table size for the given desired capacity.
2037
+ * See Hackers Delight, sec 3.2
2038
+ */
2039
+ private static final int tableSizeFor(int c) {
2040
+ int n = c - 1;
2041
+ n |= n >>> 1;
2042
+ n |= n >>> 2;
2043
+ n |= n >>> 4;
2044
+ n |= n >>> 8;
2045
+ n |= n >>> 16;
2046
+ return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
2047
+ }
2048
+
2049
+ /**
2050
+ * Initializes table, using the size recorded in sizeCtl.
2051
+ */
2052
+ private final AtomicReferenceArray<Node> initTable() {
2053
+ AtomicReferenceArray<Node> tab; int sc;
2054
+ while ((tab = table) == null) {
2055
+ if ((sc = sizeCtl) < 0)
2056
+ Thread.yield(); // lost initialization race; just spin
2057
+ else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) {
2058
+ try {
2059
+ if ((tab = table) == null) {
2060
+ int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2061
+ tab = table = new AtomicReferenceArray<Node>(n);
2062
+ sc = n - (n >>> 2);
2063
+ }
2064
+ } finally {
2065
+ sizeCtl = sc;
2066
+ }
2067
+ break;
2068
+ }
2069
+ }
2070
+ return tab;
2071
+ }
2072
+
2073
+ /**
2074
+ * If table is too small and not already resizing, creates next
2075
+ * table and transfers bins. Rechecks occupancy after a transfer
2076
+ * to see if another resize is already needed because resizings
2077
+ * are lagging additions.
2078
+ */
2079
+ private final void checkForResize() {
2080
+ AtomicReferenceArray<Node> tab; int n, sc;
2081
+ while ((tab = table) != null &&
2082
+ (n = tab.length()) < MAXIMUM_CAPACITY &&
2083
+ (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
2084
+ SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) {
2085
+ try {
2086
+ if (tab == table) {
2087
+ table = rebuild(tab);
2088
+ sc = (n << 1) - (n >>> 1);
2089
+ }
2090
+ } finally {
2091
+ sizeCtl = sc;
2092
+ }
2093
+ }
2094
+ }
2095
+
2096
+ /**
2097
+ * Tries to presize table to accommodate the given number of elements.
2098
+ *
2099
+ * @param size number of elements (doesn't need to be perfectly accurate)
2100
+ */
2101
+ private final void tryPresize(int size) {
2102
+ int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2103
+ tableSizeFor(size + (size >>> 1) + 1);
2104
+ int sc;
2105
+ while ((sc = sizeCtl) >= 0) {
2106
+ AtomicReferenceArray<Node> tab = table; int n;
2107
+ if (tab == null || (n = tab.length()) == 0) {
2108
+ n = (sc > c) ? sc : c;
2109
+ if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) {
2110
+ try {
2111
+ if (table == tab) {
2112
+ table = new AtomicReferenceArray<Node>(n);
2113
+ sc = n - (n >>> 2);
2114
+ }
2115
+ } finally {
2116
+ sizeCtl = sc;
2117
+ }
2118
+ }
2119
+ }
2120
+ else if (c <= sc || n >= MAXIMUM_CAPACITY)
2121
+ break;
2122
+ else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) {
2123
+ try {
2124
+ if (table == tab) {
2125
+ table = rebuild(tab);
2126
+ sc = (n << 1) - (n >>> 1);
2127
+ }
2128
+ } finally {
2129
+ sizeCtl = sc;
2130
+ }
2131
+ }
2132
+ }
2133
+ }
2134
+
2135
+ /*
2136
+ * Moves and/or copies the nodes in each bin to new table. See
2137
+ * above for explanation.
2138
+ *
2139
+ * @return the new table
2140
+ */
2141
+ private static final AtomicReferenceArray<Node> rebuild(AtomicReferenceArray<Node> tab) {
2142
+ int n = tab.length();
2143
+ AtomicReferenceArray<Node> nextTab = new AtomicReferenceArray<Node>(n << 1);
2144
+ Node fwd = new Node(MOVED, nextTab, null, null);
2145
+ int[] buffer = null; // holds bins to revisit; null until needed
2146
+ Node rev = null; // reverse forwarder; null until needed
2147
+ int nbuffered = 0; // the number of bins in buffer list
2148
+ int bufferIndex = 0; // buffer index of current buffered bin
2149
+ int bin = n - 1; // current non-buffered bin or -1 if none
2150
+
2151
+ for (int i = bin;;) { // start upwards sweep
2152
+ int fh; Node f;
2153
+ if ((f = tabAt(tab, i)) == null) {
2154
+ if (bin >= 0) { // Unbuffered; no lock needed (or available)
2155
+ if (!casTabAt(tab, i, f, fwd))
2156
+ continue;
2157
+ }
2158
+ else { // transiently use a locked forwarding node
2159
+ Node g = new Node(MOVED|LOCKED, nextTab, null, null);
2160
+ if (!casTabAt(tab, i, f, g))
2161
+ continue;
2162
+ setTabAt(nextTab, i, null);
2163
+ setTabAt(nextTab, i + n, null);
2164
+ setTabAt(tab, i, fwd);
2165
+ if (!g.casHash(MOVED|LOCKED, MOVED)) {
2166
+ g.hash = MOVED;
2167
+ synchronized (g) { g.notifyAll(); }
2168
+ }
2169
+ }
2170
+ }
2171
+ else if ((fh = f.hash) == MOVED) {
2172
+ Object fk = f.key;
2173
+ if (fk instanceof TreeBin) {
2174
+ TreeBin t = (TreeBin)fk;
2175
+ boolean validated = false;
2176
+ t.acquire(0);
2177
+ try {
2178
+ if (tabAt(tab, i) == f) {
2179
+ validated = true;
2180
+ splitTreeBin(nextTab, i, t);
2181
+ setTabAt(tab, i, fwd);
2182
+ }
2183
+ } finally {
2184
+ t.release(0);
2185
+ }
2186
+ if (!validated)
2187
+ continue;
2188
+ }
2189
+ }
2190
+ else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
2191
+ boolean validated = false;
2192
+ try { // split to lo and hi lists; copying as needed
2193
+ if (tabAt(tab, i) == f) {
2194
+ validated = true;
2195
+ splitBin(nextTab, i, f);
2196
+ setTabAt(tab, i, fwd);
2197
+ }
2198
+ } finally {
2199
+ if (!f.casHash(fh | LOCKED, fh)) {
2200
+ f.hash = fh;
2201
+ synchronized (f) { f.notifyAll(); };
2202
+ }
2203
+ }
2204
+ if (!validated)
2205
+ continue;
2206
+ }
2207
+ else {
2208
+ if (buffer == null) // initialize buffer for revisits
2209
+ buffer = new int[TRANSFER_BUFFER_SIZE];
2210
+ if (bin < 0 && bufferIndex > 0) {
2211
+ int j = buffer[--bufferIndex];
2212
+ buffer[bufferIndex] = i;
2213
+ i = j; // swap with another bin
2214
+ continue;
2215
+ }
2216
+ if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
2217
+ f.tryAwaitLock(tab, i);
2218
+ continue; // no other options -- block
2219
+ }
2220
+ if (rev == null) // initialize reverse-forwarder
2221
+ rev = new Node(MOVED, tab, null, null);
2222
+ if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
2223
+ continue; // recheck before adding to list
2224
+ buffer[nbuffered++] = i;
2225
+ setTabAt(nextTab, i, rev); // install place-holders
2226
+ setTabAt(nextTab, i + n, rev);
2227
+ }
2228
+
2229
+ if (bin > 0)
2230
+ i = --bin;
2231
+ else if (buffer != null && nbuffered > 0) {
2232
+ bin = -1;
2233
+ i = buffer[bufferIndex = --nbuffered];
2234
+ }
2235
+ else
2236
+ return nextTab;
2237
+ }
2238
+ }
2239
+
2240
+ /**
2241
+ * Splits a normal bin with list headed by e into lo and hi parts;
2242
+ * installs in given table.
2243
+ */
2244
+ private static void splitBin(AtomicReferenceArray<Node> nextTab, int i, Node e) {
2245
+ int bit = nextTab.length() >>> 1; // bit to split on
2246
+ int runBit = e.hash & bit;
2247
+ Node lastRun = e, lo = null, hi = null;
2248
+ for (Node p = e.next; p != null; p = p.next) {
2249
+ int b = p.hash & bit;
2250
+ if (b != runBit) {
2251
+ runBit = b;
2252
+ lastRun = p;
2253
+ }
2254
+ }
2255
+ if (runBit == 0)
2256
+ lo = lastRun;
2257
+ else
2258
+ hi = lastRun;
2259
+ for (Node p = e; p != lastRun; p = p.next) {
2260
+ int ph = p.hash & HASH_BITS;
2261
+ Object pk = p.key, pv = p.val;
2262
+ if ((ph & bit) == 0)
2263
+ lo = new Node(ph, pk, pv, lo);
2264
+ else
2265
+ hi = new Node(ph, pk, pv, hi);
2266
+ }
2267
+ setTabAt(nextTab, i, lo);
2268
+ setTabAt(nextTab, i + bit, hi);
2269
+ }
2270
+
2271
+ /**
2272
+ * Splits a tree bin into lo and hi parts; installs in given table.
2273
+ */
2274
+ private static void splitTreeBin(AtomicReferenceArray<Node> nextTab, int i, TreeBin t) {
2275
+ int bit = nextTab.length() >>> 1;
2276
+ TreeBin lt = new TreeBin();
2277
+ TreeBin ht = new TreeBin();
2278
+ int lc = 0, hc = 0;
2279
+ for (Node e = t.first; e != null; e = e.next) {
2280
+ int h = e.hash & HASH_BITS;
2281
+ Object k = e.key, v = e.val;
2282
+ if ((h & bit) == 0) {
2283
+ ++lc;
2284
+ lt.putTreeNode(h, k, v);
2285
+ }
2286
+ else {
2287
+ ++hc;
2288
+ ht.putTreeNode(h, k, v);
2289
+ }
2290
+ }
2291
+ Node ln, hn; // throw away trees if too small
2292
+ if (lc <= (TREE_THRESHOLD >>> 1)) {
2293
+ ln = null;
2294
+ for (Node p = lt.first; p != null; p = p.next)
2295
+ ln = new Node(p.hash, p.key, p.val, ln);
2296
+ }
2297
+ else
2298
+ ln = new Node(MOVED, lt, null, null);
2299
+ setTabAt(nextTab, i, ln);
2300
+ if (hc <= (TREE_THRESHOLD >>> 1)) {
2301
+ hn = null;
2302
+ for (Node p = ht.first; p != null; p = p.next)
2303
+ hn = new Node(p.hash, p.key, p.val, hn);
2304
+ }
2305
+ else
2306
+ hn = new Node(MOVED, ht, null, null);
2307
+ setTabAt(nextTab, i + bit, hn);
2308
+ }
2309
+
2310
+ /**
2311
+ * Implementation for clear. Steps through each bin, removing all
2312
+ * nodes.
2313
+ */
2314
+ private final void internalClear() {
2315
+ long delta = 0L; // negative number of deletions
2316
+ int i = 0;
2317
+ AtomicReferenceArray<Node> tab = table;
2318
+ while (tab != null && i < tab.length()) {
2319
+ int fh; Object fk;
2320
+ Node f = tabAt(tab, i);
2321
+ if (f == null)
2322
+ ++i;
2323
+ else if ((fh = f.hash) == MOVED) {
2324
+ if ((fk = f.key) instanceof TreeBin) {
2325
+ TreeBin t = (TreeBin)fk;
2326
+ t.acquire(0);
2327
+ try {
2328
+ if (tabAt(tab, i) == f) {
2329
+ for (Node p = t.first; p != null; p = p.next) {
2330
+ if (p.val != null) { // (currently always true)
2331
+ p.val = null;
2332
+ --delta;
2333
+ }
2334
+ }
2335
+ t.first = null;
2336
+ t.root = null;
2337
+ ++i;
2338
+ }
2339
+ } finally {
2340
+ t.release(0);
2341
+ }
2342
+ }
2343
+ else
2344
+ tab = (AtomicReferenceArray<Node>)fk;
2345
+ }
2346
+ else if ((fh & LOCKED) != 0) {
2347
+ counter.add(delta); // opportunistically update count
2348
+ delta = 0L;
2349
+ f.tryAwaitLock(tab, i);
2350
+ }
2351
+ else if (f.casHash(fh, fh | LOCKED)) {
2352
+ try {
2353
+ if (tabAt(tab, i) == f) {
2354
+ for (Node e = f; e != null; e = e.next) {
2355
+ if (e.val != null) { // (currently always true)
2356
+ e.val = null;
2357
+ --delta;
2358
+ }
2359
+ }
2360
+ setTabAt(tab, i, null);
2361
+ ++i;
2362
+ }
2363
+ } finally {
2364
+ if (!f.casHash(fh | LOCKED, fh)) {
2365
+ f.hash = fh;
2366
+ synchronized (f) { f.notifyAll(); };
2367
+ }
2368
+ }
2369
+ }
2370
+ }
2371
+ if (delta != 0)
2372
+ counter.add(delta);
2373
+ }
2374
+
2375
+ /* ----------------Table Traversal -------------- */
2376
+
2377
+ /**
2378
+ * Encapsulates traversal for methods such as containsValue; also
2379
+ * serves as a base class for other iterators and bulk tasks.
2380
+ *
2381
+ * At each step, the iterator snapshots the key ("nextKey") and
2382
+ * value ("nextVal") of a valid node (i.e., one that, at point of
2383
+ * snapshot, has a non-null user value). Because val fields can
2384
+ * change (including to null, indicating deletion), field nextVal
2385
+ * might not be accurate at point of use, but still maintains the
2386
+ * weak consistency property of holding a value that was once
2387
+ * valid. To support iterator.remove, the nextKey field is not
2388
+ * updated (nulled out) when the iterator cannot advance.
2389
+ *
2390
+ * Internal traversals directly access these fields, as in:
2391
+ * {@code while (it.advance() != null) { process(it.nextKey); }}
2392
+ *
2393
+ * Exported iterators must track whether the iterator has advanced
2394
+ * (in hasNext vs next) (by setting/checking/nulling field
2395
+ * nextVal), and then extract key, value, or key-value pairs as
2396
+ * return values of next().
2397
+ *
2398
+ * The iterator visits once each still-valid node that was
2399
+ * reachable upon iterator construction. It might miss some that
2400
+ * were added to a bin after the bin was visited, which is OK wrt
2401
+ * consistency guarantees. Maintaining this property in the face
2402
+ * of possible ongoing resizes requires a fair amount of
2403
+ * bookkeeping state that is difficult to optimize away amidst
2404
+ * volatile accesses. Even so, traversal maintains reasonable
2405
+ * throughput.
2406
+ *
2407
+ * Normally, iteration proceeds bin-by-bin traversing lists.
2408
+ * However, if the table has been resized, then all future steps
2409
+ * must traverse both the bin at the current index as well as at
2410
+ * (index + baseSize); and so on for further resizings. To
2411
+ * paranoically cope with potential sharing by users of iterators
2412
+ * across threads, iteration terminates if a bounds checks fails
2413
+ * for a table read.
2414
+ *
2415
+ * This class extends ForkJoinTask to streamline parallel
2416
+ * iteration in bulk operations (see BulkTask). This adds only an
2417
+ * int of space overhead, which is close enough to negligible in
2418
+ * cases where it is not needed to not worry about it. Because
2419
+ * ForkJoinTask is Serializable, but iterators need not be, we
2420
+ * need to add warning suppressions.
2421
+ */
2422
+ @SuppressWarnings("serial") static class Traverser<K,V,R> {
2423
+ final ConcurrentHashMapV8<K, V> map;
2424
+ Node next; // the next entry to use
2425
+ Object nextKey; // cached key field of next
2426
+ Object nextVal; // cached val field of next
2427
+ AtomicReferenceArray<Node> tab; // current table; updated if resized
2428
+ int index; // index of bin to use next
2429
+ int baseIndex; // current index of initial table
2430
+ int baseLimit; // index bound for initial table
2431
+ int baseSize; // initial table size
2432
+
2433
+ /** Creates iterator for all entries in the table. */
2434
+ Traverser(ConcurrentHashMapV8<K, V> map) {
2435
+ this.map = map;
2436
+ }
2437
+
2438
+ /** Creates iterator for split() methods */
2439
+ Traverser(Traverser<K,V,?> it) {
2440
+ ConcurrentHashMapV8<K, V> m; AtomicReferenceArray<Node> t;
2441
+ if ((m = this.map = it.map) == null)
2442
+ t = null;
2443
+ else if ((t = it.tab) == null && // force parent tab initialization
2444
+ (t = it.tab = m.table) != null)
2445
+ it.baseLimit = it.baseSize = t.length();
2446
+ this.tab = t;
2447
+ this.baseSize = it.baseSize;
2448
+ it.baseLimit = this.index = this.baseIndex =
2449
+ ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1;
2450
+ }
2451
+
2452
+ /**
2453
+ * Advances next; returns nextVal or null if terminated.
2454
+ * See above for explanation.
2455
+ */
2456
+ final Object advance() {
2457
+ Node e = next;
2458
+ Object ev = null;
2459
+ outer: do {
2460
+ if (e != null) // advance past used/skipped node
2461
+ e = e.next;
2462
+ while (e == null) { // get to next non-null bin
2463
+ ConcurrentHashMapV8<K, V> m;
2464
+ AtomicReferenceArray<Node> t; int b, i, n; Object ek; // checks must use locals
2465
+ if ((t = tab) != null)
2466
+ n = t.length();
2467
+ else if ((m = map) != null && (t = tab = m.table) != null)
2468
+ n = baseLimit = baseSize = t.length();
2469
+ else
2470
+ break outer;
2471
+ if ((b = baseIndex) >= baseLimit ||
2472
+ (i = index) < 0 || i >= n)
2473
+ break outer;
2474
+ if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
2475
+ if ((ek = e.key) instanceof TreeBin)
2476
+ e = ((TreeBin)ek).first;
2477
+ else {
2478
+ tab = (AtomicReferenceArray<Node>)ek;
2479
+ continue; // restarts due to null val
2480
+ }
2481
+ } // visit upper slots if present
2482
+ index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2483
+ }
2484
+ nextKey = e.key;
2485
+ } while ((ev = e.val) == null); // skip deleted or special nodes
2486
+ next = e;
2487
+ return nextVal = ev;
2488
+ }
2489
+
2490
+ public final void remove() {
2491
+ Object k = nextKey;
2492
+ if (k == null && (advance() == null || (k = nextKey) == null))
2493
+ throw new IllegalStateException();
2494
+ map.internalReplace(k, null, null);
2495
+ }
2496
+
2497
+ public final boolean hasNext() {
2498
+ return nextVal != null || advance() != null;
2499
+ }
2500
+
2501
+ public final boolean hasMoreElements() { return hasNext(); }
2502
+ public final void setRawResult(Object x) { }
2503
+ public R getRawResult() { return null; }
2504
+ public boolean exec() { return true; }
2505
+ }
2506
+
2507
+ /* ---------------- Public operations -------------- */
2508
+
2509
+ /**
2510
+ * Creates a new, empty map with the default initial table size (16).
2511
+ */
2512
+ public ConcurrentHashMapV8() {
2513
+ this.counter = new LongAdder();
2514
+ }
2515
+
2516
+ /**
2517
+ * Creates a new, empty map with an initial table size
2518
+ * accommodating the specified number of elements without the need
2519
+ * to dynamically resize.
2520
+ *
2521
+ * @param initialCapacity The implementation performs internal
2522
+ * sizing to accommodate this many elements.
2523
+ * @throws IllegalArgumentException if the initial capacity of
2524
+ * elements is negative
2525
+ */
2526
+ public ConcurrentHashMapV8(int initialCapacity) {
2527
+ if (initialCapacity < 0)
2528
+ throw new IllegalArgumentException();
2529
+ int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2530
+ MAXIMUM_CAPACITY :
2531
+ tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2532
+ this.counter = new LongAdder();
2533
+ this.sizeCtl = cap;
2534
+ }
2535
+
2536
+ /**
2537
+ * Creates a new map with the same mappings as the given map.
2538
+ *
2539
+ * @param m the map
2540
+ */
2541
+ public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
2542
+ this.counter = new LongAdder();
2543
+ this.sizeCtl = DEFAULT_CAPACITY;
2544
+ internalPutAll(m);
2545
+ }
2546
+
2547
+ /**
2548
+ * Creates a new, empty map with an initial table size based on
2549
+ * the given number of elements ({@code initialCapacity}) and
2550
+ * initial table density ({@code loadFactor}).
2551
+ *
2552
+ * @param initialCapacity the initial capacity. The implementation
2553
+ * performs internal sizing to accommodate this many elements,
2554
+ * given the specified load factor.
2555
+ * @param loadFactor the load factor (table density) for
2556
+ * establishing the initial table size
2557
+ * @throws IllegalArgumentException if the initial capacity of
2558
+ * elements is negative or the load factor is nonpositive
2559
+ *
2560
+ * @since 1.6
2561
+ */
2562
+ public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
2563
+ this(initialCapacity, loadFactor, 1);
2564
+ }
2565
+
2566
+ /**
2567
+ * Creates a new, empty map with an initial table size based on
2568
+ * the given number of elements ({@code initialCapacity}), table
2569
+ * density ({@code loadFactor}), and number of concurrently
2570
+ * updating threads ({@code concurrencyLevel}).
2571
+ *
2572
+ * @param initialCapacity the initial capacity. The implementation
2573
+ * performs internal sizing to accommodate this many elements,
2574
+ * given the specified load factor.
2575
+ * @param loadFactor the load factor (table density) for
2576
+ * establishing the initial table size
2577
+ * @param concurrencyLevel the estimated number of concurrently
2578
+ * updating threads. The implementation may use this value as
2579
+ * a sizing hint.
2580
+ * @throws IllegalArgumentException if the initial capacity is
2581
+ * negative or the load factor or concurrencyLevel are
2582
+ * nonpositive
2583
+ */
2584
+ public ConcurrentHashMapV8(int initialCapacity,
2585
+ float loadFactor, int concurrencyLevel) {
2586
+ if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2587
+ throw new IllegalArgumentException();
2588
+ if (initialCapacity < concurrencyLevel) // Use at least as many bins
2589
+ initialCapacity = concurrencyLevel; // as estimated threads
2590
+ long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2591
+ int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2592
+ MAXIMUM_CAPACITY : tableSizeFor((int)size);
2593
+ this.counter = new LongAdder();
2594
+ this.sizeCtl = cap;
2595
+ }
2596
+
2597
+ /**
2598
+ * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2599
+ * from the given type to {@code Boolean.TRUE}.
2600
+ *
2601
+ * @return the new set
2602
+ */
2603
+ public static <K> KeySetView<K,Boolean> newKeySet() {
2604
+ return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(),
2605
+ Boolean.TRUE);
2606
+ }
2607
+
2608
+ /**
2609
+ * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2610
+ * from the given type to {@code Boolean.TRUE}.
2611
+ *
2612
+ * @param initialCapacity The implementation performs internal
2613
+ * sizing to accommodate this many elements.
2614
+ * @throws IllegalArgumentException if the initial capacity of
2615
+ * elements is negative
2616
+ * @return the new set
2617
+ */
2618
+ public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2619
+ return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(initialCapacity),
2620
+ Boolean.TRUE);
2621
+ }
2622
+
2623
+ /**
2624
+ * {@inheritDoc}
2625
+ */
2626
+ public boolean isEmpty() {
2627
+ return counter.sum() <= 0L; // ignore transient negative values
2628
+ }
2629
+
2630
+ /**
2631
+ * {@inheritDoc}
2632
+ */
2633
+ public int size() {
2634
+ long n = counter.sum();
2635
+ return ((n < 0L) ? 0 :
2636
+ (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2637
+ (int)n);
2638
+ }
2639
+
2640
+ /**
2641
+ * Returns the number of mappings. This method should be used
2642
+ * instead of {@link #size} because a ConcurrentHashMapV8 may
2643
+ * contain more mappings than can be represented as an int. The
2644
+ * value returned is a snapshot; the actual count may differ if
2645
+ * there are ongoing concurrent insertions or removals.
2646
+ *
2647
+ * @return the number of mappings
2648
+ */
2649
+ public long mappingCount() {
2650
+ long n = counter.sum();
2651
+ return (n < 0L) ? 0L : n; // ignore transient negative values
2652
+ }
2653
+
2654
+ /**
2655
+ * Returns the value to which the specified key is mapped,
2656
+ * or {@code null} if this map contains no mapping for the key.
2657
+ *
2658
+ * <p>More formally, if this map contains a mapping from a key
2659
+ * {@code k} to a value {@code v} such that {@code key.equals(k)},
2660
+ * then this method returns {@code v}; otherwise it returns
2661
+ * {@code null}. (There can be at most one such mapping.)
2662
+ *
2663
+ * @throws NullPointerException if the specified key is null
2664
+ */
2665
+ @SuppressWarnings("unchecked") public V get(Object key) {
2666
+ if (key == null)
2667
+ throw new NullPointerException();
2668
+ return (V)internalGet(key);
2669
+ }
2670
+
2671
+ /**
2672
+ * Returns the value to which the specified key is mapped,
2673
+ * or the given defaultValue if this map contains no mapping for the key.
2674
+ *
2675
+ * @param key the key
2676
+ * @param defaultValue the value to return if this map contains
2677
+ * no mapping for the given key
2678
+ * @return the mapping for the key, if present; else the defaultValue
2679
+ * @throws NullPointerException if the specified key is null
2680
+ */
2681
+ @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) {
2682
+ if (key == null)
2683
+ throw new NullPointerException();
2684
+ V v = (V) internalGet(key);
2685
+ return v == null ? defaultValue : v;
2686
+ }
2687
+
2688
+ /**
2689
+ * Tests if the specified object is a key in this table.
2690
+ *
2691
+ * @param key possible key
2692
+ * @return {@code true} if and only if the specified object
2693
+ * is a key in this table, as determined by the
2694
+ * {@code equals} method; {@code false} otherwise
2695
+ * @throws NullPointerException if the specified key is null
2696
+ */
2697
+ public boolean containsKey(Object key) {
2698
+ if (key == null)
2699
+ throw new NullPointerException();
2700
+ return internalGet(key) != null;
2701
+ }
2702
+
2703
+ /**
2704
+ * Returns {@code true} if this map maps one or more keys to the
2705
+ * specified value. Note: This method may require a full traversal
2706
+ * of the map, and is much slower than method {@code containsKey}.
2707
+ *
2708
+ * @param value value whose presence in this map is to be tested
2709
+ * @return {@code true} if this map maps one or more keys to the
2710
+ * specified value
2711
+ * @throws NullPointerException if the specified value is null
2712
+ */
2713
+ public boolean containsValue(Object value) {
2714
+ if (value == null)
2715
+ throw new NullPointerException();
2716
+ Object v;
2717
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2718
+ while ((v = it.advance()) != null) {
2719
+ if (v == value || value.equals(v))
2720
+ return true;
2721
+ }
2722
+ return false;
2723
+ }
2724
+
2725
+ /**
2726
+ * Legacy method testing if some key maps into the specified value
2727
+ * in this table. This method is identical in functionality to
2728
+ * {@link #containsValue}, and exists solely to ensure
2729
+ * full compatibility with class {@link java.util.Hashtable},
2730
+ * which supported this method prior to introduction of the
2731
+ * Java Collections framework.
2732
+ *
2733
+ * @param value a value to search for
2734
+ * @return {@code true} if and only if some key maps to the
2735
+ * {@code value} argument in this table as
2736
+ * determined by the {@code equals} method;
2737
+ * {@code false} otherwise
2738
+ * @throws NullPointerException if the specified value is null
2739
+ */
2740
+ public boolean contains(Object value) {
2741
+ return containsValue(value);
2742
+ }
2743
+
2744
+ /**
2745
+ * Maps the specified key to the specified value in this table.
2746
+ * Neither the key nor the value can be null.
2747
+ *
2748
+ * <p>The value can be retrieved by calling the {@code get} method
2749
+ * with a key that is equal to the original key.
2750
+ *
2751
+ * @param key key with which the specified value is to be associated
2752
+ * @param value value to be associated with the specified key
2753
+ * @return the previous value associated with {@code key}, or
2754
+ * {@code null} if there was no mapping for {@code key}
2755
+ * @throws NullPointerException if the specified key or value is null
2756
+ */
2757
+ @SuppressWarnings("unchecked") public V put(K key, V value) {
2758
+ if (key == null || value == null)
2759
+ throw new NullPointerException();
2760
+ return (V)internalPut(key, value);
2761
+ }
2762
+
2763
+ /**
2764
+ * {@inheritDoc}
2765
+ *
2766
+ * @return the previous value associated with the specified key,
2767
+ * or {@code null} if there was no mapping for the key
2768
+ * @throws NullPointerException if the specified key or value is null
2769
+ */
2770
+ @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) {
2771
+ if (key == null || value == null)
2772
+ throw new NullPointerException();
2773
+ return (V)internalPutIfAbsent(key, value);
2774
+ }
2775
+
2776
+ /**
2777
+ * Copies all of the mappings from the specified map to this one.
2778
+ * These mappings replace any mappings that this map had for any of the
2779
+ * keys currently in the specified map.
2780
+ *
2781
+ * @param m mappings to be stored in this map
2782
+ */
2783
+ public void putAll(Map<? extends K, ? extends V> m) {
2784
+ internalPutAll(m);
2785
+ }
2786
+
2787
+ /**
2788
+ * If the specified key is not already associated with a value,
2789
+ * computes its value using the given mappingFunction and enters
2790
+ * it into the map unless null. This is equivalent to
2791
+ * <pre> {@code
2792
+ * if (map.containsKey(key))
2793
+ * return map.get(key);
2794
+ * value = mappingFunction.apply(key);
2795
+ * if (value != null)
2796
+ * map.put(key, value);
2797
+ * return value;}</pre>
2798
+ *
2799
+ * except that the action is performed atomically. If the
2800
+ * function returns {@code null} no mapping is recorded. If the
2801
+ * function itself throws an (unchecked) exception, the exception
2802
+ * is rethrown to its caller, and no mapping is recorded. Some
2803
+ * attempted update operations on this map by other threads may be
2804
+ * blocked while computation is in progress, so the computation
2805
+ * should be short and simple, and must not attempt to update any
2806
+ * other mappings of this Map. The most appropriate usage is to
2807
+ * construct a new object serving as an initial mapped value, or
2808
+ * memoized result, as in:
2809
+ *
2810
+ * <pre> {@code
2811
+ * map.computeIfAbsent(key, new Fun<K, V>() {
2812
+ * public V map(K k) { return new Value(f(k)); }});}</pre>
2813
+ *
2814
+ * @param key key with which the specified value is to be associated
2815
+ * @param mappingFunction the function to compute a value
2816
+ * @return the current (existing or computed) value associated with
2817
+ * the specified key, or null if the computed value is null
2818
+ * @throws NullPointerException if the specified key or mappingFunction
2819
+ * is null
2820
+ * @throws IllegalStateException if the computation detectably
2821
+ * attempts a recursive update to this map that would
2822
+ * otherwise never complete
2823
+ * @throws RuntimeException or Error if the mappingFunction does so,
2824
+ * in which case the mapping is left unestablished
2825
+ */
2826
+ @SuppressWarnings("unchecked") public V computeIfAbsent
2827
+ (K key, Fun<? super K, ? extends V> mappingFunction) {
2828
+ if (key == null || mappingFunction == null)
2829
+ throw new NullPointerException();
2830
+ return (V)internalComputeIfAbsent(key, mappingFunction);
2831
+ }
2832
+
2833
+ /**
2834
+ * If the given key is present, computes a new mapping value given a key and
2835
+ * its current mapped value. This is equivalent to
2836
+ * <pre> {@code
2837
+ * if (map.containsKey(key)) {
2838
+ * value = remappingFunction.apply(key, map.get(key));
2839
+ * if (value != null)
2840
+ * map.put(key, value);
2841
+ * else
2842
+ * map.remove(key);
2843
+ * }
2844
+ * }</pre>
2845
+ *
2846
+ * except that the action is performed atomically. If the
2847
+ * function returns {@code null}, the mapping is removed. If the
2848
+ * function itself throws an (unchecked) exception, the exception
2849
+ * is rethrown to its caller, and the current mapping is left
2850
+ * unchanged. Some attempted update operations on this map by
2851
+ * other threads may be blocked while computation is in progress,
2852
+ * so the computation should be short and simple, and must not
2853
+ * attempt to update any other mappings of this Map. For example,
2854
+ * to either create or append new messages to a value mapping:
2855
+ *
2856
+ * @param key key with which the specified value is to be associated
2857
+ * @param remappingFunction the function to compute a value
2858
+ * @return the new value associated with the specified key, or null if none
2859
+ * @throws NullPointerException if the specified key or remappingFunction
2860
+ * is null
2861
+ * @throws IllegalStateException if the computation detectably
2862
+ * attempts a recursive update to this map that would
2863
+ * otherwise never complete
2864
+ * @throws RuntimeException or Error if the remappingFunction does so,
2865
+ * in which case the mapping is unchanged
2866
+ */
2867
+ @SuppressWarnings("unchecked") public V computeIfPresent
2868
+ (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2869
+ if (key == null || remappingFunction == null)
2870
+ throw new NullPointerException();
2871
+ return (V)internalCompute(key, true, remappingFunction);
2872
+ }
2873
+
2874
+ /**
2875
+ * Computes a new mapping value given a key and
2876
+ * its current mapped value (or {@code null} if there is no current
2877
+ * mapping). This is equivalent to
2878
+ * <pre> {@code
2879
+ * value = remappingFunction.apply(key, map.get(key));
2880
+ * if (value != null)
2881
+ * map.put(key, value);
2882
+ * else
2883
+ * map.remove(key);
2884
+ * }</pre>
2885
+ *
2886
+ * except that the action is performed atomically. If the
2887
+ * function returns {@code null}, the mapping is removed. If the
2888
+ * function itself throws an (unchecked) exception, the exception
2889
+ * is rethrown to its caller, and the current mapping is left
2890
+ * unchanged. Some attempted update operations on this map by
2891
+ * other threads may be blocked while computation is in progress,
2892
+ * so the computation should be short and simple, and must not
2893
+ * attempt to update any other mappings of this Map. For example,
2894
+ * to either create or append new messages to a value mapping:
2895
+ *
2896
+ * <pre> {@code
2897
+ * Map<Key, String> map = ...;
2898
+ * final String msg = ...;
2899
+ * map.compute(key, new BiFun<Key, String, String>() {
2900
+ * public String apply(Key k, String v) {
2901
+ * return (v == null) ? msg : v + msg;});}}</pre>
2902
+ *
2903
+ * @param key key with which the specified value is to be associated
2904
+ * @param remappingFunction the function to compute a value
2905
+ * @return the new value associated with the specified key, or null if none
2906
+ * @throws NullPointerException if the specified key or remappingFunction
2907
+ * is null
2908
+ * @throws IllegalStateException if the computation detectably
2909
+ * attempts a recursive update to this map that would
2910
+ * otherwise never complete
2911
+ * @throws RuntimeException or Error if the remappingFunction does so,
2912
+ * in which case the mapping is unchanged
2913
+ */
2914
+ @SuppressWarnings("unchecked") public V compute
2915
+ (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2916
+ if (key == null || remappingFunction == null)
2917
+ throw new NullPointerException();
2918
+ return (V)internalCompute(key, false, remappingFunction);
2919
+ }
2920
+
2921
+ /**
2922
+ * If the specified key is not already associated
2923
+ * with a value, associate it with the given value.
2924
+ * Otherwise, replace the value with the results of
2925
+ * the given remapping function. This is equivalent to:
2926
+ * <pre> {@code
2927
+ * if (!map.containsKey(key))
2928
+ * map.put(value);
2929
+ * else {
2930
+ * newValue = remappingFunction.apply(map.get(key), value);
2931
+ * if (value != null)
2932
+ * map.put(key, value);
2933
+ * else
2934
+ * map.remove(key);
2935
+ * }
2936
+ * }</pre>
2937
+ * except that the action is performed atomically. If the
2938
+ * function returns {@code null}, the mapping is removed. If the
2939
+ * function itself throws an (unchecked) exception, the exception
2940
+ * is rethrown to its caller, and the current mapping is left
2941
+ * unchanged. Some attempted update operations on this map by
2942
+ * other threads may be blocked while computation is in progress,
2943
+ * so the computation should be short and simple, and must not
2944
+ * attempt to update any other mappings of this Map.
2945
+ */
2946
+ @SuppressWarnings("unchecked") public V merge
2947
+ (K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
2948
+ if (key == null || value == null || remappingFunction == null)
2949
+ throw new NullPointerException();
2950
+ return (V)internalMerge(key, value, remappingFunction);
2951
+ }
2952
+
2953
+ /**
2954
+ * Removes the key (and its corresponding value) from this map.
2955
+ * This method does nothing if the key is not in the map.
2956
+ *
2957
+ * @param key the key that needs to be removed
2958
+ * @return the previous value associated with {@code key}, or
2959
+ * {@code null} if there was no mapping for {@code key}
2960
+ * @throws NullPointerException if the specified key is null
2961
+ */
2962
+ @SuppressWarnings("unchecked") public V remove(Object key) {
2963
+ if (key == null)
2964
+ throw new NullPointerException();
2965
+ return (V)internalReplace(key, null, null);
2966
+ }
2967
+
2968
+ /**
2969
+ * {@inheritDoc}
2970
+ *
2971
+ * @throws NullPointerException if the specified key is null
2972
+ */
2973
+ public boolean remove(Object key, Object value) {
2974
+ if (key == null)
2975
+ throw new NullPointerException();
2976
+ if (value == null)
2977
+ return false;
2978
+ return internalReplace(key, null, value) != null;
2979
+ }
2980
+
2981
+ /**
2982
+ * {@inheritDoc}
2983
+ *
2984
+ * @throws NullPointerException if any of the arguments are null
2985
+ */
2986
+ public boolean replace(K key, V oldValue, V newValue) {
2987
+ if (key == null || oldValue == null || newValue == null)
2988
+ throw new NullPointerException();
2989
+ return internalReplace(key, newValue, oldValue) != null;
2990
+ }
2991
+
2992
+ /**
2993
+ * {@inheritDoc}
2994
+ *
2995
+ * @return the previous value associated with the specified key,
2996
+ * or {@code null} if there was no mapping for the key
2997
+ * @throws NullPointerException if the specified key or value is null
2998
+ */
2999
+ @SuppressWarnings("unchecked") public V replace(K key, V value) {
3000
+ if (key == null || value == null)
3001
+ throw new NullPointerException();
3002
+ return (V)internalReplace(key, value, null);
3003
+ }
3004
+
3005
+ /**
3006
+ * Removes all of the mappings from this map.
3007
+ */
3008
+ public void clear() {
3009
+ internalClear();
3010
+ }
3011
+
3012
+ /**
3013
+ * Returns a {@link Set} view of the keys contained in this map.
3014
+ * The set is backed by the map, so changes to the map are
3015
+ * reflected in the set, and vice-versa.
3016
+ *
3017
+ * @return the set view
3018
+ */
3019
+ public KeySetView<K,V> keySet() {
3020
+ KeySetView<K,V> ks = keySet;
3021
+ return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null));
3022
+ }
3023
+
3024
+ /**
3025
+ * Returns a {@link Set} view of the keys in this map, using the
3026
+ * given common mapped value for any additions (i.e., {@link
3027
+ * Collection#add} and {@link Collection#addAll}). This is of
3028
+ * course only appropriate if it is acceptable to use the same
3029
+ * value for all additions from this view.
3030
+ *
3031
+ * @param mappedValue the mapped value to use for any
3032
+ * additions.
3033
+ * @return the set view
3034
+ * @throws NullPointerException if the mappedValue is null
3035
+ */
3036
+ public KeySetView<K,V> keySet(V mappedValue) {
3037
+ if (mappedValue == null)
3038
+ throw new NullPointerException();
3039
+ return new KeySetView<K,V>(this, mappedValue);
3040
+ }
3041
+
3042
+ /**
3043
+ * Returns a {@link Collection} view of the values contained in this map.
3044
+ * The collection is backed by the map, so changes to the map are
3045
+ * reflected in the collection, and vice-versa.
3046
+ */
3047
+ public ValuesView<K,V> values() {
3048
+ ValuesView<K,V> vs = values;
3049
+ return (vs != null) ? vs : (values = new ValuesView<K,V>(this));
3050
+ }
3051
+
3052
+ /**
3053
+ * Returns a {@link Set} view of the mappings contained in this map.
3054
+ * The set is backed by the map, so changes to the map are
3055
+ * reflected in the set, and vice-versa. The set supports element
3056
+ * removal, which removes the corresponding mapping from the map,
3057
+ * via the {@code Iterator.remove}, {@code Set.remove},
3058
+ * {@code removeAll}, {@code retainAll}, and {@code clear}
3059
+ * operations. It does not support the {@code add} or
3060
+ * {@code addAll} operations.
3061
+ *
3062
+ * <p>The view's {@code iterator} is a "weakly consistent" iterator
3063
+ * that will never throw {@link ConcurrentModificationException},
3064
+ * and guarantees to traverse elements as they existed upon
3065
+ * construction of the iterator, and may (but is not guaranteed to)
3066
+ * reflect any modifications subsequent to construction.
3067
+ */
3068
+ public Set<Map.Entry<K,V>> entrySet() {
3069
+ EntrySetView<K,V> es = entrySet;
3070
+ return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this));
3071
+ }
3072
+
3073
+ /**
3074
+ * Returns an enumeration of the keys in this table.
3075
+ *
3076
+ * @return an enumeration of the keys in this table
3077
+ * @see #keySet()
3078
+ */
3079
+ public Enumeration<K> keys() {
3080
+ return new KeyIterator<K,V>(this);
3081
+ }
3082
+
3083
+ /**
3084
+ * Returns an enumeration of the values in this table.
3085
+ *
3086
+ * @return an enumeration of the values in this table
3087
+ * @see #values()
3088
+ */
3089
+ public Enumeration<V> elements() {
3090
+ return new ValueIterator<K,V>(this);
3091
+ }
3092
+
3093
+ /**
3094
+ * Returns a partitionable iterator of the keys in this map.
3095
+ *
3096
+ * @return a partitionable iterator of the keys in this map
3097
+ */
3098
+ public Spliterator<K> keySpliterator() {
3099
+ return new KeyIterator<K,V>(this);
3100
+ }
3101
+
3102
+ /**
3103
+ * Returns a partitionable iterator of the values in this map.
3104
+ *
3105
+ * @return a partitionable iterator of the values in this map
3106
+ */
3107
+ public Spliterator<V> valueSpliterator() {
3108
+ return new ValueIterator<K,V>(this);
3109
+ }
3110
+
3111
+ /**
3112
+ * Returns a partitionable iterator of the entries in this map.
3113
+ *
3114
+ * @return a partitionable iterator of the entries in this map
3115
+ */
3116
+ public Spliterator<Map.Entry<K,V>> entrySpliterator() {
3117
+ return new EntryIterator<K,V>(this);
3118
+ }
3119
+
3120
+ /**
3121
+ * Returns the hash code value for this {@link Map}, i.e.,
3122
+ * the sum of, for each key-value pair in the map,
3123
+ * {@code key.hashCode() ^ value.hashCode()}.
3124
+ *
3125
+ * @return the hash code value for this map
3126
+ */
3127
+ public int hashCode() {
3128
+ int h = 0;
3129
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3130
+ Object v;
3131
+ while ((v = it.advance()) != null) {
3132
+ h += it.nextKey.hashCode() ^ v.hashCode();
3133
+ }
3134
+ return h;
3135
+ }
3136
+
3137
+ /**
3138
+ * Returns a string representation of this map. The string
3139
+ * representation consists of a list of key-value mappings (in no
3140
+ * particular order) enclosed in braces ("{@code {}}"). Adjacent
3141
+ * mappings are separated by the characters {@code ", "} (comma
3142
+ * and space). Each key-value mapping is rendered as the key
3143
+ * followed by an equals sign ("{@code =}") followed by the
3144
+ * associated value.
3145
+ *
3146
+ * @return a string representation of this map
3147
+ */
3148
+ public String toString() {
3149
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3150
+ StringBuilder sb = new StringBuilder();
3151
+ sb.append('{');
3152
+ Object v;
3153
+ if ((v = it.advance()) != null) {
3154
+ for (;;) {
3155
+ Object k = it.nextKey;
3156
+ sb.append(k == this ? "(this Map)" : k);
3157
+ sb.append('=');
3158
+ sb.append(v == this ? "(this Map)" : v);
3159
+ if ((v = it.advance()) == null)
3160
+ break;
3161
+ sb.append(',').append(' ');
3162
+ }
3163
+ }
3164
+ return sb.append('}').toString();
3165
+ }
3166
+
3167
+ /**
3168
+ * Compares the specified object with this map for equality.
3169
+ * Returns {@code true} if the given object is a map with the same
3170
+ * mappings as this map. This operation may return misleading
3171
+ * results if either map is concurrently modified during execution
3172
+ * of this method.
3173
+ *
3174
+ * @param o object to be compared for equality with this map
3175
+ * @return {@code true} if the specified object is equal to this map
3176
+ */
3177
+ public boolean equals(Object o) {
3178
+ if (o != this) {
3179
+ if (!(o instanceof Map))
3180
+ return false;
3181
+ Map<?,?> m = (Map<?,?>) o;
3182
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3183
+ Object val;
3184
+ while ((val = it.advance()) != null) {
3185
+ Object v = m.get(it.nextKey);
3186
+ if (v == null || (v != val && !v.equals(val)))
3187
+ return false;
3188
+ }
3189
+ for (Map.Entry<?,?> e : m.entrySet()) {
3190
+ Object mk, mv, v;
3191
+ if ((mk = e.getKey()) == null ||
3192
+ (mv = e.getValue()) == null ||
3193
+ (v = internalGet(mk)) == null ||
3194
+ (mv != v && !mv.equals(v)))
3195
+ return false;
3196
+ }
3197
+ }
3198
+ return true;
3199
+ }
3200
+
3201
+ /* ----------------Iterators -------------- */
3202
+
3203
+ @SuppressWarnings("serial") static final class KeyIterator<K,V> extends Traverser<K,V,Object>
3204
+ implements Spliterator<K>, Enumeration<K> {
3205
+ KeyIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3206
+ KeyIterator(Traverser<K,V,Object> it) {
3207
+ super(it);
3208
+ }
3209
+ public KeyIterator<K,V> split() {
3210
+ if (nextKey != null)
3211
+ throw new IllegalStateException();
3212
+ return new KeyIterator<K,V>(this);
3213
+ }
3214
+ @SuppressWarnings("unchecked") public final K next() {
3215
+ if (nextVal == null && advance() == null)
3216
+ throw new NoSuchElementException();
3217
+ Object k = nextKey;
3218
+ nextVal = null;
3219
+ return (K) k;
3220
+ }
3221
+
3222
+ public final K nextElement() { return next(); }
3223
+ }
3224
+
3225
+ @SuppressWarnings("serial") static final class ValueIterator<K,V> extends Traverser<K,V,Object>
3226
+ implements Spliterator<V>, Enumeration<V> {
3227
+ ValueIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3228
+ ValueIterator(Traverser<K,V,Object> it) {
3229
+ super(it);
3230
+ }
3231
+ public ValueIterator<K,V> split() {
3232
+ if (nextKey != null)
3233
+ throw new IllegalStateException();
3234
+ return new ValueIterator<K,V>(this);
3235
+ }
3236
+
3237
+ @SuppressWarnings("unchecked") public final V next() {
3238
+ Object v;
3239
+ if ((v = nextVal) == null && (v = advance()) == null)
3240
+ throw new NoSuchElementException();
3241
+ nextVal = null;
3242
+ return (V) v;
3243
+ }
3244
+
3245
+ public final V nextElement() { return next(); }
3246
+ }
3247
+
3248
+ @SuppressWarnings("serial") static final class EntryIterator<K,V> extends Traverser<K,V,Object>
3249
+ implements Spliterator<Map.Entry<K,V>> {
3250
+ EntryIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3251
+ EntryIterator(Traverser<K,V,Object> it) {
3252
+ super(it);
3253
+ }
3254
+ public EntryIterator<K,V> split() {
3255
+ if (nextKey != null)
3256
+ throw new IllegalStateException();
3257
+ return new EntryIterator<K,V>(this);
3258
+ }
3259
+
3260
+ @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() {
3261
+ Object v;
3262
+ if ((v = nextVal) == null && (v = advance()) == null)
3263
+ throw new NoSuchElementException();
3264
+ Object k = nextKey;
3265
+ nextVal = null;
3266
+ return new MapEntry<K,V>((K)k, (V)v, map);
3267
+ }
3268
+ }
3269
+
3270
+ /**
3271
+ * Exported Entry for iterators
3272
+ */
3273
+ static final class MapEntry<K,V> implements Map.Entry<K, V> {
3274
+ final K key; // non-null
3275
+ V val; // non-null
3276
+ final ConcurrentHashMapV8<K, V> map;
3277
+ MapEntry(K key, V val, ConcurrentHashMapV8<K, V> map) {
3278
+ this.key = key;
3279
+ this.val = val;
3280
+ this.map = map;
3281
+ }
3282
+ public final K getKey() { return key; }
3283
+ public final V getValue() { return val; }
3284
+ public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
3285
+ public final String toString(){ return key + "=" + val; }
3286
+
3287
+ public final boolean equals(Object o) {
3288
+ Object k, v; Map.Entry<?,?> e;
3289
+ return ((o instanceof Map.Entry) &&
3290
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3291
+ (v = e.getValue()) != null &&
3292
+ (k == key || k.equals(key)) &&
3293
+ (v == val || v.equals(val)));
3294
+ }
3295
+
3296
+ /**
3297
+ * Sets our entry's value and writes through to the map. The
3298
+ * value to return is somewhat arbitrary here. Since we do not
3299
+ * necessarily track asynchronous changes, the most recent
3300
+ * "previous" value could be different from what we return (or
3301
+ * could even have been removed in which case the put will
3302
+ * re-establish). We do not and cannot guarantee more.
3303
+ */
3304
+ public final V setValue(V value) {
3305
+ if (value == null) throw new NullPointerException();
3306
+ V v = val;
3307
+ val = value;
3308
+ map.put(key, value);
3309
+ return v;
3310
+ }
3311
+ }
3312
+
3313
+ /* ---------------- Serialization Support -------------- */
3314
+
3315
+ /**
3316
+ * Stripped-down version of helper class used in previous version,
3317
+ * declared for the sake of serialization compatibility
3318
+ */
3319
+ static class Segment<K,V> implements Serializable {
3320
+ private static final long serialVersionUID = 2249069246763182397L;
3321
+ final float loadFactor;
3322
+ Segment(float lf) { this.loadFactor = lf; }
3323
+ }
3324
+
3325
+ /**
3326
+ * Saves the state of the {@code ConcurrentHashMapV8} instance to a
3327
+ * stream (i.e., serializes it).
3328
+ * @param s the stream
3329
+ * @serialData
3330
+ * the key (Object) and value (Object)
3331
+ * for each key-value mapping, followed by a null pair.
3332
+ * The key-value mappings are emitted in no particular order.
3333
+ */
3334
+ @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s)
3335
+ throws java.io.IOException {
3336
+ if (segments == null) { // for serialization compatibility
3337
+ segments = (Segment<K,V>[])
3338
+ new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3339
+ for (int i = 0; i < segments.length; ++i)
3340
+ segments[i] = new Segment<K,V>(LOAD_FACTOR);
3341
+ }
3342
+ s.defaultWriteObject();
3343
+ Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3344
+ Object v;
3345
+ while ((v = it.advance()) != null) {
3346
+ s.writeObject(it.nextKey);
3347
+ s.writeObject(v);
3348
+ }
3349
+ s.writeObject(null);
3350
+ s.writeObject(null);
3351
+ segments = null; // throw away
3352
+ }
3353
+
3354
+ /**
3355
+ * Reconstitutes the instance from a stream (that is, deserializes it).
3356
+ * @param s the stream
3357
+ */
3358
+ @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s)
3359
+ throws java.io.IOException, ClassNotFoundException {
3360
+ s.defaultReadObject();
3361
+ this.segments = null; // unneeded
3362
+ // initialize transient final field
3363
+ this.counter = new LongAdder();
3364
+
3365
+ // Create all nodes, then place in table once size is known
3366
+ long size = 0L;
3367
+ Node p = null;
3368
+ for (;;) {
3369
+ K k = (K) s.readObject();
3370
+ V v = (V) s.readObject();
3371
+ if (k != null && v != null) {
3372
+ int h = spread(k.hashCode());
3373
+ p = new Node(h, k, v, p);
3374
+ ++size;
3375
+ }
3376
+ else
3377
+ break;
3378
+ }
3379
+ if (p != null) {
3380
+ boolean init = false;
3381
+ int n;
3382
+ if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3383
+ n = MAXIMUM_CAPACITY;
3384
+ else {
3385
+ int sz = (int)size;
3386
+ n = tableSizeFor(sz + (sz >>> 1) + 1);
3387
+ }
3388
+ int sc = sizeCtl;
3389
+ boolean collide = false;
3390
+ if (n > sc &&
3391
+ SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) {
3392
+ try {
3393
+ if (table == null) {
3394
+ init = true;
3395
+ AtomicReferenceArray<Node> tab = new AtomicReferenceArray<Node>(n);
3396
+ int mask = n - 1;
3397
+ while (p != null) {
3398
+ int j = p.hash & mask;
3399
+ Node next = p.next;
3400
+ Node q = p.next = tabAt(tab, j);
3401
+ setTabAt(tab, j, p);
3402
+ if (!collide && q != null && q.hash == p.hash)
3403
+ collide = true;
3404
+ p = next;
3405
+ }
3406
+ table = tab;
3407
+ counter.add(size);
3408
+ sc = n - (n >>> 2);
3409
+ }
3410
+ } finally {
3411
+ sizeCtl = sc;
3412
+ }
3413
+ if (collide) { // rescan and convert to TreeBins
3414
+ AtomicReferenceArray<Node> tab = table;
3415
+ for (int i = 0; i < tab.length(); ++i) {
3416
+ int c = 0;
3417
+ for (Node e = tabAt(tab, i); e != null; e = e.next) {
3418
+ if (++c > TREE_THRESHOLD &&
3419
+ (e.key instanceof Comparable)) {
3420
+ replaceWithTreeBin(tab, i, e.key);
3421
+ break;
3422
+ }
3423
+ }
3424
+ }
3425
+ }
3426
+ }
3427
+ if (!init) { // Can only happen if unsafely published.
3428
+ while (p != null) {
3429
+ internalPut(p.key, p.val);
3430
+ p = p.next;
3431
+ }
3432
+ }
3433
+ }
3434
+ }
3435
+
3436
+
3437
+ // -------------------------------------------------------
3438
+
3439
+ // Sams
3440
+ /** Interface describing a void action of one argument */
3441
+ public interface Action<A> { void apply(A a); }
3442
+ /** Interface describing a void action of two arguments */
3443
+ public interface BiAction<A,B> { void apply(A a, B b); }
3444
+ /** Interface describing a function of one argument */
3445
+ public interface Generator<T> { T apply(); }
3446
+ /** Interface describing a function mapping its argument to a double */
3447
+ public interface ObjectToDouble<A> { double apply(A a); }
3448
+ /** Interface describing a function mapping its argument to a long */
3449
+ public interface ObjectToLong<A> { long apply(A a); }
3450
+ /** Interface describing a function mapping its argument to an int */
3451
+ public interface ObjectToInt<A> {int apply(A a); }
3452
+ /** Interface describing a function mapping two arguments to a double */
3453
+ public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
3454
+ /** Interface describing a function mapping two arguments to a long */
3455
+ public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
3456
+ /** Interface describing a function mapping two arguments to an int */
3457
+ public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
3458
+ /** Interface describing a function mapping a double to a double */
3459
+ public interface DoubleToDouble { double apply(double a); }
3460
+ /** Interface describing a function mapping a long to a long */
3461
+ public interface LongToLong { long apply(long a); }
3462
+ /** Interface describing a function mapping an int to an int */
3463
+ public interface IntToInt { int apply(int a); }
3464
+ /** Interface describing a function mapping two doubles to a double */
3465
+ public interface DoubleByDoubleToDouble { double apply(double a, double b); }
3466
+ /** Interface describing a function mapping two longs to a long */
3467
+ public interface LongByLongToLong { long apply(long a, long b); }
3468
+ /** Interface describing a function mapping two ints to an int */
3469
+ public interface IntByIntToInt { int apply(int a, int b); }
3470
+
3471
+
3472
+ /* ----------------Views -------------- */
3473
+
3474
+ /**
3475
+ * Base class for views.
3476
+ */
3477
+ static abstract class CHMView<K, V> {
3478
+ final ConcurrentHashMapV8<K, V> map;
3479
+ CHMView(ConcurrentHashMapV8<K, V> map) { this.map = map; }
3480
+
3481
+ /**
3482
+ * Returns the map backing this view.
3483
+ *
3484
+ * @return the map backing this view
3485
+ */
3486
+ public ConcurrentHashMapV8<K,V> getMap() { return map; }
3487
+
3488
+ public final int size() { return map.size(); }
3489
+ public final boolean isEmpty() { return map.isEmpty(); }
3490
+ public final void clear() { map.clear(); }
3491
+
3492
+ // implementations below rely on concrete classes supplying these
3493
+ abstract public Iterator<?> iterator();
3494
+ abstract public boolean contains(Object o);
3495
+ abstract public boolean remove(Object o);
3496
+
3497
+ private static final String oomeMsg = "Required array size too large";
3498
+
3499
+ public final Object[] toArray() {
3500
+ long sz = map.mappingCount();
3501
+ if (sz > (long)(MAX_ARRAY_SIZE))
3502
+ throw new OutOfMemoryError(oomeMsg);
3503
+ int n = (int)sz;
3504
+ Object[] r = new Object[n];
3505
+ int i = 0;
3506
+ Iterator<?> it = iterator();
3507
+ while (it.hasNext()) {
3508
+ if (i == n) {
3509
+ if (n >= MAX_ARRAY_SIZE)
3510
+ throw new OutOfMemoryError(oomeMsg);
3511
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3512
+ n = MAX_ARRAY_SIZE;
3513
+ else
3514
+ n += (n >>> 1) + 1;
3515
+ r = Arrays.copyOf(r, n);
3516
+ }
3517
+ r[i++] = it.next();
3518
+ }
3519
+ return (i == n) ? r : Arrays.copyOf(r, i);
3520
+ }
3521
+
3522
+ @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) {
3523
+ long sz = map.mappingCount();
3524
+ if (sz > (long)(MAX_ARRAY_SIZE))
3525
+ throw new OutOfMemoryError(oomeMsg);
3526
+ int m = (int)sz;
3527
+ T[] r = (a.length >= m) ? a :
3528
+ (T[])java.lang.reflect.Array
3529
+ .newInstance(a.getClass().getComponentType(), m);
3530
+ int n = r.length;
3531
+ int i = 0;
3532
+ Iterator<?> it = iterator();
3533
+ while (it.hasNext()) {
3534
+ if (i == n) {
3535
+ if (n >= MAX_ARRAY_SIZE)
3536
+ throw new OutOfMemoryError(oomeMsg);
3537
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3538
+ n = MAX_ARRAY_SIZE;
3539
+ else
3540
+ n += (n >>> 1) + 1;
3541
+ r = Arrays.copyOf(r, n);
3542
+ }
3543
+ r[i++] = (T)it.next();
3544
+ }
3545
+ if (a == r && i < n) {
3546
+ r[i] = null; // null-terminate
3547
+ return r;
3548
+ }
3549
+ return (i == n) ? r : Arrays.copyOf(r, i);
3550
+ }
3551
+
3552
+ public final int hashCode() {
3553
+ int h = 0;
3554
+ for (Iterator<?> it = iterator(); it.hasNext();)
3555
+ h += it.next().hashCode();
3556
+ return h;
3557
+ }
3558
+
3559
+ public final String toString() {
3560
+ StringBuilder sb = new StringBuilder();
3561
+ sb.append('[');
3562
+ Iterator<?> it = iterator();
3563
+ if (it.hasNext()) {
3564
+ for (;;) {
3565
+ Object e = it.next();
3566
+ sb.append(e == this ? "(this Collection)" : e);
3567
+ if (!it.hasNext())
3568
+ break;
3569
+ sb.append(',').append(' ');
3570
+ }
3571
+ }
3572
+ return sb.append(']').toString();
3573
+ }
3574
+
3575
+ public final boolean containsAll(Collection<?> c) {
3576
+ if (c != this) {
3577
+ for (Iterator<?> it = c.iterator(); it.hasNext();) {
3578
+ Object e = it.next();
3579
+ if (e == null || !contains(e))
3580
+ return false;
3581
+ }
3582
+ }
3583
+ return true;
3584
+ }
3585
+
3586
+ public final boolean removeAll(Collection<?> c) {
3587
+ boolean modified = false;
3588
+ for (Iterator<?> it = iterator(); it.hasNext();) {
3589
+ if (c.contains(it.next())) {
3590
+ it.remove();
3591
+ modified = true;
3592
+ }
3593
+ }
3594
+ return modified;
3595
+ }
3596
+
3597
+ public final boolean retainAll(Collection<?> c) {
3598
+ boolean modified = false;
3599
+ for (Iterator<?> it = iterator(); it.hasNext();) {
3600
+ if (!c.contains(it.next())) {
3601
+ it.remove();
3602
+ modified = true;
3603
+ }
3604
+ }
3605
+ return modified;
3606
+ }
3607
+
3608
+ }
3609
+
3610
+ /**
3611
+ * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
3612
+ * which additions may optionally be enabled by mapping to a
3613
+ * common value. This class cannot be directly instantiated. See
3614
+ * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()},
3615
+ * {@link #newKeySet(int)}.
3616
+ */
3617
+ public static class KeySetView<K,V> extends CHMView<K,V> implements Set<K>, java.io.Serializable {
3618
+ private static final long serialVersionUID = 7249069246763182397L;
3619
+ private final V value;
3620
+ KeySetView(ConcurrentHashMapV8<K, V> map, V value) { // non-public
3621
+ super(map);
3622
+ this.value = value;
3623
+ }
3624
+
3625
+ /**
3626
+ * Returns the default mapped value for additions,
3627
+ * or {@code null} if additions are not supported.
3628
+ *
3629
+ * @return the default mapped value for additions, or {@code null}
3630
+ * if not supported.
3631
+ */
3632
+ public V getMappedValue() { return value; }
3633
+
3634
+ // implement Set API
3635
+
3636
+ public boolean contains(Object o) { return map.containsKey(o); }
3637
+ public boolean remove(Object o) { return map.remove(o) != null; }
3638
+
3639
+ /**
3640
+ * Returns a "weakly consistent" iterator that will never
3641
+ * throw {@link ConcurrentModificationException}, and
3642
+ * guarantees to traverse elements as they existed upon
3643
+ * construction of the iterator, and may (but is not
3644
+ * guaranteed to) reflect any modifications subsequent to
3645
+ * construction.
3646
+ *
3647
+ * @return an iterator over the keys of this map
3648
+ */
3649
+ public Iterator<K> iterator() { return new KeyIterator<K,V>(map); }
3650
+ public boolean add(K e) {
3651
+ V v;
3652
+ if ((v = value) == null)
3653
+ throw new UnsupportedOperationException();
3654
+ if (e == null)
3655
+ throw new NullPointerException();
3656
+ return map.internalPutIfAbsent(e, v) == null;
3657
+ }
3658
+ public boolean addAll(Collection<? extends K> c) {
3659
+ boolean added = false;
3660
+ V v;
3661
+ if ((v = value) == null)
3662
+ throw new UnsupportedOperationException();
3663
+ for (K e : c) {
3664
+ if (e == null)
3665
+ throw new NullPointerException();
3666
+ if (map.internalPutIfAbsent(e, v) == null)
3667
+ added = true;
3668
+ }
3669
+ return added;
3670
+ }
3671
+ public boolean equals(Object o) {
3672
+ Set<?> c;
3673
+ return ((o instanceof Set) &&
3674
+ ((c = (Set<?>)o) == this ||
3675
+ (containsAll(c) && c.containsAll(this))));
3676
+ }
3677
+ }
3678
+
3679
+ /**
3680
+ * A view of a ConcurrentHashMapV8 as a {@link Collection} of
3681
+ * values, in which additions are disabled. This class cannot be
3682
+ * directly instantiated. See {@link #values},
3683
+ *
3684
+ * <p>The view's {@code iterator} is a "weakly consistent" iterator
3685
+ * that will never throw {@link ConcurrentModificationException},
3686
+ * and guarantees to traverse elements as they existed upon
3687
+ * construction of the iterator, and may (but is not guaranteed to)
3688
+ * reflect any modifications subsequent to construction.
3689
+ */
3690
+ public static final class ValuesView<K,V> extends CHMView<K,V>
3691
+ implements Collection<V> {
3692
+ ValuesView(ConcurrentHashMapV8<K, V> map) { super(map); }
3693
+ public final boolean contains(Object o) { return map.containsValue(o); }
3694
+ public final boolean remove(Object o) {
3695
+ if (o != null) {
3696
+ Iterator<V> it = new ValueIterator<K,V>(map);
3697
+ while (it.hasNext()) {
3698
+ if (o.equals(it.next())) {
3699
+ it.remove();
3700
+ return true;
3701
+ }
3702
+ }
3703
+ }
3704
+ return false;
3705
+ }
3706
+
3707
+ /**
3708
+ * Returns a "weakly consistent" iterator that will never
3709
+ * throw {@link ConcurrentModificationException}, and
3710
+ * guarantees to traverse elements as they existed upon
3711
+ * construction of the iterator, and may (but is not
3712
+ * guaranteed to) reflect any modifications subsequent to
3713
+ * construction.
3714
+ *
3715
+ * @return an iterator over the values of this map
3716
+ */
3717
+ public final Iterator<V> iterator() {
3718
+ return new ValueIterator<K,V>(map);
3719
+ }
3720
+ public final boolean add(V e) {
3721
+ throw new UnsupportedOperationException();
3722
+ }
3723
+ public final boolean addAll(Collection<? extends V> c) {
3724
+ throw new UnsupportedOperationException();
3725
+ }
3726
+ }
3727
+
3728
+ /**
3729
+ * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
3730
+ * entries. This class cannot be directly instantiated. See
3731
+ * {@link #entrySet}.
3732
+ */
3733
+ public static final class EntrySetView<K,V> extends CHMView<K,V>
3734
+ implements Set<Map.Entry<K,V>> {
3735
+ EntrySetView(ConcurrentHashMapV8<K, V> map) { super(map); }
3736
+ public final boolean contains(Object o) {
3737
+ Object k, v, r; Map.Entry<?,?> e;
3738
+ return ((o instanceof Map.Entry) &&
3739
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3740
+ (r = map.get(k)) != null &&
3741
+ (v = e.getValue()) != null &&
3742
+ (v == r || v.equals(r)));
3743
+ }
3744
+ public final boolean remove(Object o) {
3745
+ Object k, v; Map.Entry<?,?> e;
3746
+ return ((o instanceof Map.Entry) &&
3747
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3748
+ (v = e.getValue()) != null &&
3749
+ map.remove(k, v));
3750
+ }
3751
+
3752
+ /**
3753
+ * Returns a "weakly consistent" iterator that will never
3754
+ * throw {@link ConcurrentModificationException}, and
3755
+ * guarantees to traverse elements as they existed upon
3756
+ * construction of the iterator, and may (but is not
3757
+ * guaranteed to) reflect any modifications subsequent to
3758
+ * construction.
3759
+ *
3760
+ * @return an iterator over the entries of this map
3761
+ */
3762
+ public final Iterator<Map.Entry<K,V>> iterator() {
3763
+ return new EntryIterator<K,V>(map);
3764
+ }
3765
+
3766
+ public final boolean add(Entry<K,V> e) {
3767
+ K key = e.getKey();
3768
+ V value = e.getValue();
3769
+ if (key == null || value == null)
3770
+ throw new NullPointerException();
3771
+ return map.internalPut(key, value) == null;
3772
+ }
3773
+ public final boolean addAll(Collection<? extends Entry<K,V>> c) {
3774
+ boolean added = false;
3775
+ for (Entry<K,V> e : c) {
3776
+ if (add(e))
3777
+ added = true;
3778
+ }
3779
+ return added;
3780
+ }
3781
+ public boolean equals(Object o) {
3782
+ Set<?> c;
3783
+ return ((o instanceof Set) &&
3784
+ ((c = (Set<?>)o) == this ||
3785
+ (containsAll(c) && c.containsAll(this))));
3786
+ }
3787
+ }
3788
+ }