atomic_cache 0.3.0.rc1 → 0.4.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: becad2b31944cbd2f93353a4b5e015f8f39c6f508f605f3adcd672fd7e8482b9
4
- data.tar.gz: a76b093ad299c779adabe12053731f6582eca33b169a4b05c3a2c619f8747855
3
+ metadata.gz: 0f8d7906f63f08bf7e8d7b9f9f88d05a209d65c37375c464f60448ffd9db427e
4
+ data.tar.gz: ac2b7c09f3299ccbb4dcf588cbe163134c8e81b94d79c08fe908afb6200911a7
5
5
  SHA512:
6
- metadata.gz: 71ea9a439a06adde2a837132820555a564aa2168344a9b9b455fcf04e3293aa3ce147ca9a534ed97266dda92a1db09aa0cc33af22262c6997f8dca334ac0ec66
7
- data.tar.gz: 3e4c87333014f120f9ef8055492886734525cbc85ef5245fe87c32a3e908ce0ab059f5ea297a6bc3c2c05517c294ac6ee7891e0caa3d3d4f44db83ea3583fc64
6
+ metadata.gz: fa4f82e7cd8b729461b5b122a9f9cb92c2c36898c295a63bdd18a510133920ca5694d3e44b9cceceb7d5e2a078b2781f75481b32bda1e21804f720a00287eacb
7
+ data.tar.gz: 18ca04880e1f4c57308d3442fa4bbb50318121ba9d663ca2858819838ddcd27c118ad79b4767c60708872778deca668b7315f7cad32fdcb951cec2a18dc168ac
data/docs/USAGE.md CHANGED
@@ -38,17 +38,6 @@ The ideal `generate_ttl_ms` time is just slightly longer than the average genera
38
38
 
39
39
  If metrics are enabled, the `<namespace>.generate.run` can be used to determine the min/max/average generate time for a particular cache and the `generate_ttl_ms` tuned using that.
40
40
 
41
- #### `quick_retry_ms`
42
- _`false` to disable. Defaults to false._
43
-
44
- In the case where another process is computing the new cache value, before falling back to the last known value, if `quick_retry_ms` has a value the atomic client will check the new cache once after the given duration (in milliseconds).
45
-
46
- The danger with `quick_retry_ms` is that when enabled it applies a delay to all fall-through requests at the cost of only benefitting some customers. As the average generate block duration increases, the effectiveness of `quick_retry_ms` decreases because there is less of a likelihood that a customer will get a fresh value. Consider the graph below. For example, a cache with an average generate duration of 200ms, configured with a `quick_retry_ms` of 50ms (red) will only likely get a fresh value for 25% of customers.
47
-
48
- `quick_retry_ms` is most effective for caches that are quick to generate but whose values are slow to change. `quick_retry_ms` is least effective for caches that are slow to update but quick to change.
49
-
50
- ![quick_retry_ms graph](https://github.com/Ibotta/atomic_cache/raw/ca473f28e179da8c24f638eeeeb48750bc8cbe64/docs/img/quick_retry_graph.png)
51
-
52
41
  #### `max_retries` & `backoff_duration_ms`
53
42
  _`max_retries` defaults to 5._
54
43
  _`backoff_duration_ms` defaults to 50ms._
@@ -6,7 +6,6 @@ require 'active_support/core_ext/hash'
6
6
  module AtomicCache
7
7
  class AtomicCacheClient
8
8
 
9
- DEFAULT_quick_retry_ms = false
10
9
  DEFAULT_MAX_RETRIES = 5
11
10
  DEFAULT_GENERATE_TIME_MS = 30000 # 30 seconds
12
11
  BACKOFF_DURATION_MS = 50
@@ -32,7 +31,6 @@ module AtomicCache
32
31
  #
33
32
  # @param keyspace [AtomicCache::Keyspace] the keyspace to fetch
34
33
  # @option options [Numeric] :generate_ttl_ms (30000) Max generate duration in ms
35
- # @option options [Numeric] :quick_retry_ms (false) Short duration to check back before using last known value
36
34
  # @option options [Numeric] :max_retries (5) Max times to rety in waiting case
37
35
  # @option options [Numeric] :backoff_duration_ms (50) Duration in ms to wait between retries
38
36
  # @yield Generates a new value when cache is expired
@@ -57,9 +55,8 @@ module AtomicCache
57
55
  return new_value unless new_value.nil?
58
56
  end
59
57
 
60
- # quick check back to see if the other process has finished
61
- # or fall back to the last known value
62
- value = quick_retry(key, options, tags) || last_known_value(keyspace, options, tags)
58
+ # attempt to fall back to the last known value
59
+ value = last_known_value(keyspace, options, tags)
63
60
  return value if value.present?
64
61
 
65
62
  # wait for the other process if a last known value isn't there
@@ -109,22 +106,6 @@ module AtomicCache
109
106
  nil
110
107
  end
111
108
 
112
- def quick_retry(key, options, tags)
113
- duration = option(:quick_retry_ms, options, DEFAULT_quick_retry_ms)
114
- if duration.present? and key.present?
115
- sleep(duration.to_f / 1000)
116
- value = @storage.read(key, options)
117
-
118
- if !value.nil?
119
- metrics(:increment, 'empty-cache-retry.present', tags: tags)
120
- return value
121
- end
122
- metrics(:increment, 'empty-cache-retry.not-present', tags: tags)
123
- end
124
-
125
- nil
126
- end
127
-
128
109
  def last_known_value(keyspace, options, tags)
129
110
  lkk = @timestamp_manager.last_known_key(keyspace)
130
111
 
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module AtomicCache
4
- VERSION = "0.3.0.rc1"
4
+ VERSION = "0.4.0.rc1"
5
5
  end
@@ -138,17 +138,6 @@ describe 'AtomicCacheClient' do
138
138
  timestamp_manager.lock(keyspace, 100)
139
139
  end
140
140
 
141
- it 'waits for a short duration to see if the other thread generated the value' do
142
- timestamp_manager.promote(keyspace, last_known_key: 'lkk', timestamp: 1420090000)
143
- key_storage.set('lkk', 'old:value')
144
- new_value = 'value from another thread'
145
- allow(cache_storage).to receive(:read)
146
- .with(timestamp_manager.current_key(keyspace), anything)
147
- .and_return(nil, new_value)
148
-
149
- expect(subject.fetch(keyspace, quick_retry_ms: 5) { 'value' }).to eq(new_value)
150
- end
151
-
152
141
  context 'when the last known value is present' do
153
142
  it 'returns the last known value' do
154
143
  timestamp_manager.promote(keyspace, last_known_key: 'lkk', timestamp: 1420090000)
@@ -191,17 +180,6 @@ describe 'AtomicCacheClient' do
191
180
  end
192
181
 
193
182
  context 'and when a block is NOT given' do
194
- it 'waits for a short duration to see if the other thread generated the value' do
195
- timestamp_manager.promote(keyspace, last_known_key: 'asdf', timestamp: 1420090000)
196
- new_value = 'value from another thread'
197
- allow(cache_storage).to receive(:read)
198
- .with(timestamp_manager.current_key(keyspace), anything)
199
- .and_return(nil, new_value)
200
-
201
- result = subject.fetch(keyspace, quick_retry_ms: 50)
202
- expect(result).to eq(new_value)
203
- end
204
-
205
183
  it 'returns nil if nothing is present' do
206
184
  expect(subject.fetch(keyspace)).to eq(nil)
207
185
  end
@@ -0,0 +1,137 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'spec_helper'
4
+
5
+ describe 'Integration -' do
6
+ let(:key_storage) { AtomicCache::Storage::SharedMemory.new }
7
+ let(:cache_storage) { AtomicCache::Storage::SharedMemory.new }
8
+ let(:keyspace) { AtomicCache::Keyspace.new(namespace: 'int.waiting') }
9
+ let(:timestamp_manager) { AtomicCache::LastModTimeKeyManager.new(keyspace: keyspace, storage: key_storage) }
10
+
11
+ before(:each) do
12
+ key_storage.reset
13
+ cache_storage.reset
14
+ end
15
+
16
+ describe 'fallback:' do
17
+ let(:generating_client) { AtomicCache::AtomicCacheClient.new(storage: cache_storage, timestamp_manager: timestamp_manager) }
18
+ let(:fallback_client) { AtomicCache::AtomicCacheClient.new(storage: cache_storage, timestamp_manager: timestamp_manager) }
19
+
20
+ it 'falls back to the old value when a lock is present' do
21
+ old_time = Time.local(2021, 1, 1, 15, 30, 0)
22
+ new_time = Time.local(2021, 1, 1, 16, 30, 0)
23
+
24
+ # prime cache with an old value
25
+
26
+ Timecop.freeze(old_time) do
27
+ generating_client.fetch(keyspace) { "old value" }
28
+ end
29
+ timestamp_manager.last_modified_time = new_time
30
+
31
+ # start generating process for new time
32
+ generating_thread = ClientThread.new(generating_client, keyspace)
33
+ generating_thread.start
34
+ sleep 0.05
35
+
36
+ value = fallback_client.fetch(keyspace)
37
+ generating_thread.terminate
38
+
39
+ expect(value).to eq("old value")
40
+ end
41
+ end
42
+
43
+ describe 'waiting:' do
44
+ let(:generating_client) { AtomicCache::AtomicCacheClient.new(storage: cache_storage, timestamp_manager: timestamp_manager) }
45
+ let(:waiting_client) { AtomicCache::AtomicCacheClient.new(storage: cache_storage, timestamp_manager: timestamp_manager) }
46
+
47
+ it 'waits for a key when no last know value is available' do
48
+ generating_thread = ClientThread.new(generating_client, keyspace)
49
+ generating_thread.start
50
+ waiting_thread = ClientThread.new(waiting_client, keyspace)
51
+ waiting_thread.start
52
+
53
+ generating_thread.generate
54
+ sleep 0.05
55
+ waiting_thread.fetch
56
+ sleep 0.05
57
+ generating_thread.complete
58
+ sleep 0.05
59
+
60
+ generating_thread.terminate
61
+ waiting_thread.terminate
62
+
63
+ expect(generating_thread.result).to eq([1, 2, 3])
64
+ expect(waiting_thread.result).to eq([1, 2, 3])
65
+ end
66
+ end
67
+ end
68
+
69
+
70
+ # Avert your eyes:
71
+ # this class allows atomic client interaction to happen asynchronously so that
72
+ # the waiting behavior of the client can be tested simultaneous to controlling how
73
+ # long the 'generate' behavior takes
74
+ #
75
+ # It works by accepting an incoming 'message' which it places onto one of two queues
76
+ class ClientThread
77
+ attr_reader :result
78
+
79
+ # idea: maybe make the return value set when the thread is initialized
80
+ def initialize(client, keyspace)
81
+ @keyspace = keyspace
82
+ @client = client
83
+ @msg_queue = Queue.new
84
+ @generate_queue = Queue.new
85
+ @result = nil
86
+ end
87
+
88
+ def start
89
+ @thread = Thread.new(&method(:run))
90
+ end
91
+
92
+ def fetch
93
+ @msg_queue << :fetch
94
+ end
95
+
96
+ def generate
97
+ @msg_queue << :generate
98
+ end
99
+
100
+ def complete
101
+ @generate_queue << :complete
102
+ end
103
+
104
+ def terminate
105
+ @msg_queue << :terminate
106
+ end
107
+
108
+ private
109
+
110
+ def run
111
+ loop do
112
+ msg = @msg_queue.pop
113
+ sleep 0.001; next unless msg
114
+
115
+ case msg
116
+ when :terminate
117
+ Thread.stop
118
+ when :generate
119
+ do_generate
120
+ when :fetch
121
+ @result = @client.fetch(@keyspace)
122
+ end
123
+ end
124
+ end
125
+
126
+ def do_generate
127
+ @client.fetch(@keyspace) do
128
+ loop do
129
+ msg = @generate_queue.pop
130
+ sleep 0.001; next unless msg
131
+ break if msg == :complete
132
+ end
133
+ @result = [1, 2, 3] # generated value
134
+ @result
135
+ end
136
+ end
137
+ end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: atomic_cache
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.0.rc1
4
+ version: 0.4.0.rc1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ibotta Developers
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2021-07-02 00:00:00.000000000 Z
12
+ date: 2021-07-07 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: bundler
@@ -185,7 +185,6 @@ files:
185
185
  - docs/MODEL_SETUP.md
186
186
  - docs/PROJECT_SETUP.md
187
187
  - docs/USAGE.md
188
- - docs/img/quick_retry_graph.png
189
188
  - lib/atomic_cache.rb
190
189
  - lib/atomic_cache/atomic_cache_client.rb
191
190
  - lib/atomic_cache/concerns/global_lmt_cache_concern.rb
@@ -201,7 +200,7 @@ files:
201
200
  - spec/atomic_cache/atomic_cache_client_spec.rb
202
201
  - spec/atomic_cache/concerns/global_lmt_cache_concern_spec.rb
203
202
  - spec/atomic_cache/default_config_spec.rb
204
- - spec/atomic_cache/integration/waiting_spec.rb
203
+ - spec/atomic_cache/integration/integration_spec.rb
205
204
  - spec/atomic_cache/key/keyspace_spec.rb
206
205
  - spec/atomic_cache/key/last_mod_time_key_manager_spec.rb
207
206
  - spec/atomic_cache/storage/dalli_spec.rb
Binary file
@@ -1,102 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require 'spec_helper'
4
-
5
- describe 'Integration' do
6
- let(:key_storage) { AtomicCache::Storage::SharedMemory.new }
7
- let(:cache_storage) { AtomicCache::Storage::SharedMemory.new }
8
- let(:keyspace) { AtomicCache::Keyspace.new(namespace: 'int.waiting') }
9
- let(:timestamp_manager) { AtomicCache::LastModTimeKeyManager.new(keyspace: keyspace, storage: key_storage) }
10
-
11
- let(:generating_client) { AtomicCache::AtomicCacheClient.new(storage: cache_storage, timestamp_manager: timestamp_manager) }
12
- let(:waiting_client) { AtomicCache::AtomicCacheClient.new(storage: cache_storage, timestamp_manager: timestamp_manager) }
13
-
14
- it 'correctly waits for a key when no last know value is available' do
15
- generating_thread = ClientThread.new(generating_client, keyspace)
16
- generating_thread.start
17
- waiting_thread = ClientThread.new(waiting_client, keyspace)
18
- waiting_thread.start
19
-
20
- generating_thread.generate
21
- sleep 0.05
22
- waiting_thread.fetch
23
- sleep 0.05
24
- generating_thread.complete
25
- sleep 0.05
26
-
27
- generating_thread.terminate
28
- waiting_thread.terminate
29
-
30
- expect(generating_thread.result).to eq([1, 2, 3])
31
- expect(waiting_thread.result).to eq([1, 2, 3])
32
- end
33
- end
34
-
35
-
36
- # Avert your eyes:
37
- # this class allows atomic client interaction to happen asynchronously so that
38
- # the waiting behavior of the client can be tested simultaneous to controlling how
39
- # long the 'generate' behavior takes
40
- #
41
- # It works by accepting an incoming 'message' which it places onto one of two queues
42
- class ClientThread
43
- attr_reader :result
44
-
45
- def initialize(client, keyspace)
46
- @keyspace = keyspace
47
- @client = client
48
- @msg_queue = Queue.new
49
- @generate_queue = Queue.new
50
- @result = nil
51
- end
52
-
53
- def start
54
- @thread = Thread.new(&method(:run))
55
- end
56
-
57
- def fetch
58
- @msg_queue << :fetch
59
- end
60
-
61
- def generate
62
- @msg_queue << :generate
63
- end
64
-
65
- def complete
66
- @generate_queue << :complete
67
- end
68
-
69
- def terminate
70
- @msg_queue << :terminate
71
- end
72
-
73
- private
74
-
75
- def run
76
- loop do
77
- msg = @msg_queue.pop
78
- sleep 0.001; next unless msg
79
-
80
- case msg
81
- when :terminate
82
- Thread.stop
83
- when :generate
84
- do_generate
85
- when :fetch
86
- @result = @client.fetch(@keyspace)
87
- end
88
- end
89
- end
90
-
91
- def do_generate
92
- @client.fetch(@keyspace) do
93
- loop do
94
- msg = @generate_queue.pop
95
- sleep 0.001; next unless msg
96
- break if msg == :complete
97
- end
98
- @result = [1, 2, 3] # generated value
99
- @result
100
- end
101
- end
102
- end