thread_safe 0.3.4 → 0.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: ae6423a7b9dcc55d72b4902edbe19f7250be3e7a
4
- data.tar.gz: 6ebfedec76eb35f880df879162da2cdcd5690882
3
+ metadata.gz: c96499b75d23ede7f61f92d312c2bf7ae74faca6
4
+ data.tar.gz: ea25ab899f080309bc6e377abf862659cf7182fd
5
5
  SHA512:
6
- metadata.gz: 54b9546b99a8ca9414f17d1e4f210c5b4938028df6dcf8f43ffdcbb51e25b5cd2dea3e7dbc36e8bb8aabc0ec48845105f9d2b1ff42b2334ad8332f1d4b656ec2
7
- data.tar.gz: 1b681c10cd317386b4f49668433889f7e8962eac41a7b8e2be208ac07d5c3758b2ef1c0a278095aa8a4bbb901ed7d071ed9887892fdc463cd864915aa2e3ae26
6
+ metadata.gz: 6013a1a1ef823ce1e2a41f846d72ca6be5783e038f7a236e29b424dbc03d03249b9d63cee18b78598bc1442ee7fda2ba239ea5bc97816fa043283d12e97a01f4
7
+ data.tar.gz: 9b55f254f97e693dc1b18ff9cb5b3a2fdc1f7e16525fb68af1616308cf5dc2bc1623a4af71905493f04f633a7861e117c8987ffa85da2034865f6d91de476604
@@ -1,24 +1,43 @@
1
1
  language: ruby
2
2
  rvm:
3
- - jruby-18mode
4
- - jruby-19mode
5
- - rbx-2
6
- - 1.8.7
7
- - 1.9.3
3
+ - 2.2.0
4
+ - 2.1.5
5
+ - 2.1.4
8
6
  - 2.0.0
9
- - 2.1.0
7
+ - 1.9.3
8
+ - ruby-head
9
+ - jruby-1.7.18
10
+ - jruby-head
11
+ - rbx-2
10
12
  jdk: # for JRuby only
11
13
  - openjdk7
12
14
  - oraclejdk8
13
15
  matrix:
14
16
  exclude:
15
- - rvm: rbx-2
17
+ - rvm: 2.2.0
18
+ jdk: openjdk7
16
19
  jdk: oraclejdk8
17
- - rvm: 1.8.7
20
+ - rvm: 2.1.5
21
+ jdk: openjdk7
18
22
  jdk: oraclejdk8
19
- - rvm: 1.9.3
23
+ - rvm: 2.1.4
24
+ jdk: openjdk7
20
25
  jdk: oraclejdk8
21
26
  - rvm: 2.0.0
27
+ jdk: openjdk7
22
28
  jdk: oraclejdk8
23
- - rvm: 2.1.0
24
- jdk: oraclejdk8
29
+ - rvm: 1.9.3
30
+ jdk: openjdk7
31
+ jdk: oraclejdk8
32
+ - rvm: ruby-head
33
+ jdk: openjdk7
34
+ jdk: oraclejdk8
35
+ - rvm: rbx-2
36
+ jdk: openjdk7
37
+ jdk: oraclejdk8
38
+ allow_failures:
39
+ - rvm: ruby-head
40
+ - rvm: jruby-head
41
+ - rvm: 1.9.3
42
+
43
+ script: "rake TESTOPTS='--seed=1'"
@@ -0,0 +1,13 @@
1
+ --protected
2
+ --no-private
3
+ --embed-mixins
4
+ --output-dir ./yardoc
5
+ --markup markdown
6
+ --title=Concurrent Ruby
7
+ --template default
8
+ --template-path ./yard-template
9
+
10
+ ./lib/**/*.rb
11
+ -
12
+ README.md
13
+ LICENSE
data/Gemfile CHANGED
@@ -1,4 +1,17 @@
1
1
  source 'https://rubygems.org'
2
2
 
3
- # Specify your gem's dependencies in thread_safe.gemspec
4
3
  gemspec
4
+
5
+ group :development, :test do
6
+ gem 'minitest', '~> 5.5.1'
7
+ gem 'minitest-reporters', '~> 1.0.11'
8
+ gem 'simplecov', '~> 0.9.2', :require => false
9
+ gem 'coveralls', '~> 0.7.11', :require => false
10
+ end
11
+
12
+ group :documentation do
13
+ gem 'countloc', '~> 0.4.0', :platforms => :mri, :require => false
14
+ gem 'yard', '~> 0.8.7.6', :require => false
15
+ gem 'inch', '~> 0.5.10', :platforms => :mri, :require => false
16
+ gem 'redcarpet', '~> 3.2.2', platforms: :mri # understands github markdown
17
+ end
data/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Threadsafe
2
2
 
3
- [![Build Status](https://travis-ci.org/headius/thread_safe.png)](https://travis-ci.org/headius/thread_safe)
3
+ [![Gem Version](https://badge.fury.io/rb/thread_safe.svg)](http://badge.fury.io/rb/thread_safe) [![Build Status](https://travis-ci.org/ruby-concurrency/thread_safe.svg?branch=master)](https://travis-ci.org/ruby-concurrency/thread_safe) [![Coverage Status](https://img.shields.io/coveralls/ruby-concurrency/thread_safe/master.svg)](https://coveralls.io/r/ruby-concurrency/thread_safe) [![Code Climate](https://codeclimate.com/github/ruby-concurrency/thread_safe.svg)](https://codeclimate.com/github/ruby-concurrency/thread_safe) [![Dependency Status](https://gemnasium.com/ruby-concurrency/thread_safe.svg)](https://gemnasium.com/ruby-concurrency/thread_safe) [![License](https://img.shields.io/badge/license-apache-green.svg)](http://opensource.org/licenses/MIT) [![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/ruby-concurrency/concurrent-ruby)
4
4
 
5
5
  A collection of thread-safe versions of common core Ruby classes.
6
6
 
data/Rakefile CHANGED
@@ -1,6 +1,19 @@
1
1
  require "bundler/gem_tasks"
2
2
  require "rake/testtask"
3
3
 
4
+ ## safely load all the rake tasks in the `tasks` directory
5
+ def safe_load(file)
6
+ begin
7
+ load file
8
+ rescue LoadError => ex
9
+ puts "Error loading rake tasks from '#{file}' but will continue..."
10
+ puts ex.message
11
+ end
12
+ end
13
+ Dir.glob('tasks/**/*.rake').each do |rakefile|
14
+ safe_load rakefile
15
+ end
16
+
4
17
  task :default => :test
5
18
 
6
19
  if defined?(JRUBY_VERSION)
@@ -63,7 +63,7 @@ public class JRubyCacheBackendLibrary implements Library {
63
63
  return true;
64
64
  } catch (Throwable t) { // ensuring we really do catch everything
65
65
  // Doug's Unsafe setup errors always have this "Could not ini.." message
66
- if (t.getMessage().contains("Could not initialize intrinsics") || isCausedBySecurityException(t)) {
66
+ if (isCausedBySecurityException(t)) {
67
67
  return false;
68
68
  }
69
69
  throw (t instanceof RuntimeException ? (RuntimeException) t : new RuntimeException(t));
@@ -72,7 +72,7 @@ public class JRubyCacheBackendLibrary implements Library {
72
72
 
73
73
  private static boolean isCausedBySecurityException(Throwable t) {
74
74
  while (t != null) {
75
- if (t instanceof SecurityException) {
75
+ if ((t.getMessage() != null && t.getMessage().contains("Could not initialize intrinsics")) || t instanceof SecurityException) {
76
76
  return true;
77
77
  }
78
78
  t = t.getCause();
@@ -1,169 +1,158 @@
1
1
  module ThreadSafe
2
- # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59 available in public domain.
3
- # Original source code available here: http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59
2
+ # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59
3
+ # available in public domain.
4
4
  #
5
- # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins
6
- # whose size exceeds a threshold).
5
+ # Original source code available here:
6
+ # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59
7
7
  #
8
- # A hash table supporting full concurrency of retrievals and
9
- # high expected concurrency for updates. However, even though all
10
- # operations are thread-safe, retrieval operations do _not_ entail locking,
11
- # and there is _not_ any support for locking the entire table
12
- # in a way that prevents all access.
8
+ # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose
9
+ # size exceeds a threshold).
13
10
  #
14
- # Retrieval operations generally do not block, so may overlap with
15
- # update operations. Retrievals reflect the results of the most
16
- # recently _completed_ update operations holding upon their
17
- # onset. (More formally, an update operation for a given key bears a
18
- # _happens-before_ relation with any (non +nil+) retrieval for
19
- # that key reporting the updated value.) For aggregate operations
20
- # such as +clear()+, concurrent retrievals may reflect insertion or removal
21
- # of only some entries. Similarly, the +each_pair+ iterator yields elements
22
- # reflecting the state of the hash table at some point at or since
23
- # the start of the +each_pair+. Bear in mind that the results of
24
- # aggregate status methods including +size()+ and +empty?+} are typically
25
- # useful only when a map is not undergoing concurrent updates in other
26
- # threads. Otherwise the results of these methods reflect transient
27
- # states that may be adequate for monitoring or estimation purposes, but not
28
- # for program control.
11
+ # A hash table supporting full concurrency of retrievals and high expected
12
+ # concurrency for updates. However, even though all operations are
13
+ # thread-safe, retrieval operations do _not_ entail locking, and there is
14
+ # _not_ any support for locking the entire table in a way that prevents all
15
+ # access.
29
16
  #
30
- # The table is dynamically expanded when there are too many
31
- # collisions (i.e., keys that have distinct hash codes but fall into
32
- # the same slot modulo the table size), with the expected average
33
- # effect of maintaining roughly two bins per mapping (corresponding
34
- # to a 0.75 load factor threshold for resizing). There may be much
35
- # variance around this average as mappings are added and removed, but
36
- # overall, this maintains a commonly accepted time/space tradeoff for
37
- # hash tables. However, resizing this or any other kind of hash
38
- # table may be a relatively slow operation. When possible, it is a
39
- # good idea to provide a size estimate as an optional :initial_capacity
17
+ # Retrieval operations generally do not block, so may overlap with update
18
+ # operations. Retrievals reflect the results of the most recently _completed_
19
+ # update operations holding upon their onset. (More formally, an update
20
+ # operation for a given key bears a _happens-before_ relation with any (non
21
+ # +nil+) retrieval for that key reporting the updated value.) For aggregate
22
+ # operations such as +clear()+, concurrent retrievals may reflect insertion or
23
+ # removal of only some entries. Similarly, the +each_pair+ iterator yields
24
+ # elements reflecting the state of the hash table at some point at or since
25
+ # the start of the +each_pair+. Bear in mind that the results of aggregate
26
+ # status methods including +size()+ and +empty?+} are typically useful only
27
+ # when a map is not undergoing concurrent updates in other threads. Otherwise
28
+ # the results of these methods reflect transient states that may be adequate
29
+ # for monitoring or estimation purposes, but not for program control.
30
+ #
31
+ # The table is dynamically expanded when there are too many collisions (i.e.,
32
+ # keys that have distinct hash codes but fall into the same slot modulo the
33
+ # table size), with the expected average effect of maintaining roughly two
34
+ # bins per mapping (corresponding to a 0.75 load factor threshold for
35
+ # resizing). There may be much variance around this average as mappings are
36
+ # added and removed, but overall, this maintains a commonly accepted
37
+ # time/space tradeoff for hash tables. However, resizing this or any other
38
+ # kind of hash table may be a relatively slow operation. When possible, it is
39
+ # a good idea to provide a size estimate as an optional :initial_capacity
40
40
  # initializer argument. An additional optional :load_factor constructor
41
- # argument provides a further means of customizing initial table capacity
42
- # by specifying the table density to be used in calculating the amount of
43
- # space to allocate for the given number of elements. Note that using
44
- # many keys with exactly the same +hash+ is a sure way to slow down
45
- # performance of any hash table.
41
+ # argument provides a further means of customizing initial table capacity by
42
+ # specifying the table density to be used in calculating the amount of space
43
+ # to allocate for the given number of elements. Note that using many keys with
44
+ # exactly the same +hash+ is a sure way to slow down performance of any hash
45
+ # table.
46
46
  #
47
- # == Design overview
47
+ # ## Design overview
48
48
  #
49
- # The primary design goal of this hash table is to maintain
50
- # concurrent readability (typically method +[]+, but also
51
- # iteration and related methods) while minimizing update
52
- # contention. Secondary goals are to keep space consumption about
53
- # the same or better than plain +Hash+, and to support high
49
+ # The primary design goal of this hash table is to maintain concurrent
50
+ # readability (typically method +[]+, but also iteration and related methods)
51
+ # while minimizing update contention. Secondary goals are to keep space
52
+ # consumption about the same or better than plain +Hash+, and to support high
54
53
  # initial insertion rates on an empty table by many threads.
55
54
  #
56
- # Each key-value mapping is held in a +Node+. The validation-based
57
- # approach explained below leads to a lot of code sprawl because
58
- # retry-control precludes factoring into smaller methods.
55
+ # Each key-value mapping is held in a +Node+. The validation-based approach
56
+ # explained below leads to a lot of code sprawl because retry-control
57
+ # precludes factoring into smaller methods.
58
+ #
59
+ # The table is lazily initialized to a power-of-two size upon the first
60
+ # insertion. Each bin in the table normally contains a list of +Node+s (most
61
+ # often, the list has only zero or one +Node+). Table accesses require
62
+ # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are
63
+ # always accurately traversable under volatile reads, so long as lookups check
64
+ # hash code and non-nullness of value before checking key equality.
59
65
  #
60
- # The table is lazily initialized to a power-of-two size upon the
61
- # first insertion. Each bin in the table normally contains a
62
- # list of +Node+s (most often, the list has only zero or one +Node+).
63
- # Table accesses require volatile/atomic reads, writes, and
64
- # CASes. The lists of nodes within bins are always accurately traversable
65
- # under volatile reads, so long as lookups check hash code
66
- # and non-nullness of value before checking key equality.
66
+ # We use the top two bits of +Node+ hash fields for control purposes -- they
67
+ # are available anyway because of addressing constraints. As explained further
68
+ # below, these top bits are used as follows:
67
69
  #
68
- # We use the top two bits of +Node+ hash fields for control
69
- # purposes -- they are available anyway because of addressing
70
- # constraints. As explained further below, these top bits are
71
- # used as follows:
72
- # 00 - Normal
73
- # 01 - Locked
74
- # 11 - Locked and may have a thread waiting for lock
75
- # 10 - +Node+ is a forwarding node
70
+ # - 00 - Normal
71
+ # - 01 - Locked
72
+ # - 11 - Locked and may have a thread waiting for lock
73
+ # - 10 - +Node+ is a forwarding node
76
74
  #
77
- # The lower 28 bits of each +Node+'s hash field contain a
78
- # the key's hash code, except for forwarding nodes, for which
79
- # the lower bits are zero (and so always have hash field == +MOVED+).
75
+ # The lower 28 bits of each +Node+'s hash field contain a the key's hash code,
76
+ # except for forwarding nodes, for which the lower bits are zero (and so
77
+ # always have hash field == +MOVED+).
80
78
  #
81
- # Insertion (via +[]=+ or its variants) of the first node in an
82
- # empty bin is performed by just CASing it to the bin. This is
83
- # by far the most common case for put operations under most
84
- # key/hash distributions. Other update operations (insert,
85
- # delete, and replace) require locks. We do not want to waste
86
- # the space required to associate a distinct lock object with
87
- # each bin, so instead use the first node of a bin list itself as
88
- # a lock. Blocking support for these locks relies +Util::CheapLockable.
89
- # However, we also need a +try_lock+ construction, so we overlay
90
- # these by using bits of the +Node+ hash field for lock control (see above),
91
- # and so normally use builtin monitors only for blocking and signalling using
79
+ # Insertion (via +[]=+ or its variants) of the first node in an empty bin is
80
+ # performed by just CASing it to the bin. This is by far the most common case
81
+ # for put operations under most key/hash distributions. Other update
82
+ # operations (insert, delete, and replace) require locks. We do not want to
83
+ # waste the space required to associate a distinct lock object with each bin,
84
+ # so instead use the first node of a bin list itself as a lock. Blocking
85
+ # support for these locks relies +Util::CheapLockable. However, we also need a
86
+ # +try_lock+ construction, so we overlay these by using bits of the +Node+
87
+ # hash field for lock control (see above), and so normally use builtin
88
+ # monitors only for blocking and signalling using
92
89
  # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+.
93
90
  #
94
- # Using the first node of a list as a lock does not by itself
95
- # suffice though: When a node is locked, any update must first
96
- # validate that it is still the first node after locking it, and
97
- # retry if not. Because new nodes are always appended to lists,
98
- # once a node is first in a bin, it remains first until deleted
99
- # or the bin becomes invalidated (upon resizing). However,
100
- # operations that only conditionally update may inspect nodes
101
- # until the point of update. This is a converse of sorts to the
102
- # lazy locking technique described by Herlihy & Shavit.
91
+ # Using the first node of a list as a lock does not by itself suffice though:
92
+ # When a node is locked, any update must first validate that it is still the
93
+ # first node after locking it, and retry if not. Because new nodes are always
94
+ # appended to lists, once a node is first in a bin, it remains first until
95
+ # deleted or the bin becomes invalidated (upon resizing). However, operations
96
+ # that only conditionally update may inspect nodes until the point of update.
97
+ # This is a converse of sorts to the lazy locking technique described by
98
+ # Herlihy & Shavit.
103
99
  #
104
- # The main disadvantage of per-bin locks is that other update
105
- # operations on other nodes in a bin list protected by the same
106
- # lock can stall, for example when user +eql?+ or mapping
107
- # functions take a long time. However, statistically, under
108
- # random hash codes, this is not a common problem. Ideally, the
109
- # frequency of nodes in bins follows a Poisson distribution
110
- # (http://en.wikipedia.org/wiki/Poisson_distribution) with a
111
- # parameter of about 0.5 on average, given the resizing threshold
112
- # of 0.75, although with a large variance because of resizing
113
- # granularity. Ignoring variance, the expected occurrences of
114
- # list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
115
- # first values are:
100
+ # The main disadvantage of per-bin locks is that other update operations on
101
+ # other nodes in a bin list protected by the same lock can stall, for example
102
+ # when user +eql?+ or mapping functions take a long time. However,
103
+ # statistically, under random hash codes, this is not a common problem.
104
+ # Ideally, the frequency of nodes in bins follows a Poisson distribution
105
+ # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of
106
+ # about 0.5 on average, given the resizing threshold of 0.75, although with a
107
+ # large variance because of resizing granularity. Ignoring variance, the
108
+ # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
109
+ # factorial(k)). The first values are:
116
110
  #
117
- # 0: 0.60653066
118
- # 1: 0.30326533
119
- # 2: 0.07581633
120
- # 3: 0.01263606
121
- # 4: 0.00157952
122
- # 5: 0.00015795
123
- # 6: 0.00001316
124
- # 7: 0.00000094
125
- # 8: 0.00000006
126
- # more: less than 1 in ten million
111
+ # - 0: 0.60653066
112
+ # - 1: 0.30326533
113
+ # - 2: 0.07581633
114
+ # - 3: 0.01263606
115
+ # - 4: 0.00157952
116
+ # - 5: 0.00015795
117
+ # - 6: 0.00001316
118
+ # - 7: 0.00000094
119
+ # - 8: 0.00000006
120
+ # - more: less than 1 in ten million
127
121
  #
128
- # Lock contention probability for two threads accessing distinct
129
- # elements is roughly 1 / (8 * #elements) under random hashes.
122
+ # Lock contention probability for two threads accessing distinct elements is
123
+ # roughly 1 / (8 * #elements) under random hashes.
130
124
  #
131
- # The table is resized when occupancy exceeds a percentage
132
- # threshold (nominally, 0.75, but see below). Only a single
133
- # thread performs the resize (using field +size_control+, to arrange
134
- # exclusion), but the table otherwise remains usable for reads
135
- # and updates. Resizing proceeds by transferring bins, one by
136
- # one, from the table to the next table. Because we are using
137
- # power-of-two expansion, the elements from each bin must either
138
- # stay at same index, or move with a power of two offset. We
139
- # eliminate unnecessary node creation by catching cases where old
140
- # nodes can be reused because their next fields won't change. On
141
- # average, only about one-sixth of them need cloning when a table
142
- # doubles. The nodes they replace will be garbage collectable as
143
- # soon as they are no longer referenced by any reader thread that
144
- # may be in the midst of concurrently traversing table. Upon
145
- # transfer, the old table bin contains only a special forwarding
146
- # node (with hash field +MOVED+) that contains the next table as
147
- # its key. On encountering a forwarding node, access and update
148
- # operations restart, using the new table.
125
+ # The table is resized when occupancy exceeds a percentage threshold
126
+ # (nominally, 0.75, but see below). Only a single thread performs the resize
127
+ # (using field +size_control+, to arrange exclusion), but the table otherwise
128
+ # remains usable for reads and updates. Resizing proceeds by transferring
129
+ # bins, one by one, from the table to the next table. Because we are using
130
+ # power-of-two expansion, the elements from each bin must either stay at same
131
+ # index, or move with a power of two offset. We eliminate unnecessary node
132
+ # creation by catching cases where old nodes can be reused because their next
133
+ # fields won't change. On average, only about one-sixth of them need cloning
134
+ # when a table doubles. The nodes they replace will be garbage collectable as
135
+ # soon as they are no longer referenced by any reader thread that may be in
136
+ # the midst of concurrently traversing table. Upon transfer, the old table bin
137
+ # contains only a special forwarding node (with hash field +MOVED+) that
138
+ # contains the next table as its key. On encountering a forwarding node,
139
+ # access and update operations restart, using the new table.
149
140
  #
150
- # Each bin transfer requires its bin lock. However, unlike other
151
- # cases, a transfer can skip a bin if it fails to acquire its
152
- # lock, and revisit it later. Method +rebuild+ maintains a buffer of
153
- # TRANSFER_BUFFER_SIZE bins that have been skipped because of failure
154
- # to acquire a lock, and blocks only if none are available
155
- # (i.e., only very rarely). The transfer operation must also ensure
156
- # that all accessible bins in both the old and new table are usable by
157
- # any traversal. When there are no lock acquisition failures, this is
158
- # arranged simply by proceeding from the last bin (+table.size - 1+) up
159
- # towards the first. Upon seeing a forwarding node, traversals arrange
160
- # to move to the new table without revisiting nodes. However, when any
161
- # node is skipped during a transfer, all earlier table bins may have
162
- # become visible, so are initialized with a reverse-forwarding node back
163
- # to the old table until the new ones are established. (This
164
- # sometimes requires transiently locking a forwarding node, which
165
- # is possible under the above encoding.) These more expensive
166
- # mechanics trigger only when necessary.
141
+ # Each bin transfer requires its bin lock. However, unlike other cases, a
142
+ # transfer can skip a bin if it fails to acquire its lock, and revisit it
143
+ # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that
144
+ # have been skipped because of failure to acquire a lock, and blocks only if
145
+ # none are available (i.e., only very rarely). The transfer operation must
146
+ # also ensure that all accessible bins in both the old and new table are
147
+ # usable by any traversal. When there are no lock acquisition failures, this
148
+ # is arranged simply by proceeding from the last bin (+table.size - 1+) up
149
+ # towards the first. Upon seeing a forwarding node, traversals arrange to move
150
+ # to the new table without revisiting nodes. However, when any node is skipped
151
+ # during a transfer, all earlier table bins may have become visible, so are
152
+ # initialized with a reverse-forwarding node back to the old table until the
153
+ # new ones are established. (This sometimes requires transiently locking a
154
+ # forwarding node, which is possible under the above encoding.) These more
155
+ # expensive mechanics trigger only when necessary.
167
156
  #
168
157
  # The traversal scheme also applies to partial traversals of
169
158
  # ranges of bins (via an alternate Traverser constructor)
@@ -229,10 +218,10 @@ module ThreadSafe
229
218
  end
230
219
  end
231
220
 
232
- # Key-value entry. Nodes with a hash field of +MOVED+ are special,
233
- # and do not contain user keys or values. Otherwise, keys are never +nil+,
234
- # and +NULL+ +value+ fields indicate that a node is in the process
235
- # of being deleted or created. For purposes of read-only access, a key may be read
221
+ # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do
222
+ # not contain user keys or values. Otherwise, keys are never +nil+, and
223
+ # +NULL+ +value+ fields indicate that a node is in the process of being
224
+ # deleted or created. For purposes of read-only access, a key may be read
236
225
  # before a value, but can only be used after checking value to be +!= NULL+.
237
226
  class Node
238
227
  extend Util::Volatile
@@ -259,17 +248,15 @@ module ThreadSafe
259
248
  self.next = next_node
260
249
  end
261
250
 
262
- # Spins a while if +LOCKED+ bit set and this node is the first
263
- # of its bin, and then sets +WAITING+ bits on hash field and
264
- # blocks (once) if they are still set. It is OK for this
265
- # method to return even if lock is not available upon exit,
266
- # which enables these simple single-wait mechanics.
251
+ # Spins a while if +LOCKED+ bit set and this node is the first of its bin,
252
+ # and then sets +WAITING+ bits on hash field and blocks (once) if they are
253
+ # still set. It is OK for this method to return even if lock is not
254
+ # available upon exit, which enables these simple single-wait mechanics.
267
255
  #
268
- # The corresponding signalling operation is performed within
269
- # callers: Upon detecting that +WAITING+ has been set when
270
- # unlocking lock (via a failed CAS from non-waiting +LOCKED+
271
- # state), unlockers acquire the +cheap_synchronize+ lock and
272
- # perform a +cheap_broadcast+.
256
+ # The corresponding signalling operation is performed within callers: Upon
257
+ # detecting that +WAITING+ has been set when unlocking lock (via a failed
258
+ # CAS from non-waiting +LOCKED+ state), unlockers acquire the
259
+ # +cheap_synchronize+ lock and perform a +cheap_broadcast+.
273
260
  def try_await_lock(table, i)
274
261
  if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking?
275
262
  spins = SPIN_LOCK_ATTEMPTS
@@ -360,12 +347,12 @@ module ThreadSafe
360
347
  extend Util::Volatile
361
348
  attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two.
362
349
 
363
- # Table initialization and resizing control. When negative, the
364
- # table is being initialized or resized. Otherwise, when table is
365
- # null, holds the initial table size to use upon creation, or 0
366
- # for default. After initialization, holds the next element count
367
- # value upon which to resize the table.
368
- :size_control
350
+ # Table initialization and resizing control. When negative, the
351
+ # table is being initialized or resized. Otherwise, when table is
352
+ # null, holds the initial table size to use upon creation, or 0
353
+ # for default. After initialization, holds the next element count
354
+ # value upon which to resize the table.
355
+ :size_control
369
356
 
370
357
  def initialize(options = nil)
371
358
  super()
@@ -786,10 +773,9 @@ module ThreadSafe
786
773
  current_table
787
774
  end
788
775
 
789
- # If table is too small and not already resizing, creates next
790
- # table and transfers bins. Rechecks occupancy after a transfer
791
- # to see if another resize is already needed because resizings
792
- # are lagging additions.
776
+ # If table is too small and not already resizing, creates next table and
777
+ # transfers bins. Rechecks occupancy after a transfer to see if another
778
+ # resize is already needed because resizings are lagging additions.
793
779
  def check_for_resize
794
780
  while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum
795
781
  try_in_resize_lock(current_table, size_ctrl) do