solid_cache 0.2.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. checksums.yaml +4 -4
  2. data/MIT-LICENSE +1 -1
  3. data/README.md +10 -6
  4. data/Rakefile +2 -0
  5. data/app/jobs/solid_cache/expiry_job.rb +2 -0
  6. data/app/models/solid_cache/entry/expiration.rb +49 -0
  7. data/app/models/solid_cache/entry.rb +88 -44
  8. data/app/models/solid_cache/record.rb +7 -7
  9. data/db/migrate/20240108155507_add_key_hash_and_byte_size_to_solid_cache_entries.rb +8 -0
  10. data/db/migrate/20240110111600_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.rb +11 -0
  11. data/db/migrate/20240110111702_remove_key_index_from_solid_cache_entries.rb +7 -0
  12. data/lib/active_support/cache/solid_cache_store.rb +2 -0
  13. data/lib/generators/solid_cache/install/install_generator.rb +2 -0
  14. data/lib/solid_cache/cluster/connections.rb +2 -0
  15. data/lib/solid_cache/cluster/execution.rb +3 -1
  16. data/lib/solid_cache/cluster/expiry.rb +16 -28
  17. data/lib/solid_cache/cluster/stats.rb +3 -1
  18. data/lib/solid_cache/cluster.rb +1 -0
  19. data/lib/solid_cache/connections/sharded.rb +2 -0
  20. data/lib/solid_cache/connections/single.rb +2 -0
  21. data/lib/solid_cache/connections/unmanaged.rb +2 -0
  22. data/lib/solid_cache/connections.rb +2 -0
  23. data/lib/solid_cache/engine.rb +8 -0
  24. data/lib/solid_cache/maglev_hash.rb +2 -0
  25. data/lib/solid_cache/store/api.rb +4 -16
  26. data/lib/solid_cache/store/clusters.rb +6 -8
  27. data/lib/solid_cache/store/entries.rb +8 -6
  28. data/lib/solid_cache/store/failsafe.rb +2 -0
  29. data/lib/solid_cache/store.rb +2 -0
  30. data/lib/solid_cache/version.rb +3 -1
  31. data/lib/solid_cache.rb +5 -2
  32. data/lib/tasks/solid_cache_tasks.rake +2 -0
  33. metadata +39 -5
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 98f4ca743bf44353244b4e1bb870df48b426fcb8b082941ad0f07e150f05cffb
4
- data.tar.gz: 85a3660e7c8b63b8f838039228b887f6a4ecde5d17401707f243804efb7f98d6
3
+ metadata.gz: 542cf01d73ff23edf9308fa57ef549ddfc823837bcfd01097b207d4f2db8ff41
4
+ data.tar.gz: b15fcfd3d56129511d7b19ca3758afc37927a4c4171a3dd12dcdf82fb2ced1de
5
5
  SHA512:
6
- metadata.gz: 2251bc348573da1c8492f299cd7fadbd87450ad22252ae388b25c842b71bf6cf6525d40243587d3c2951c8af1a71fb0ac42af6eedde767465ab08a7cfcfbe9b6
7
- data.tar.gz: 1e69812734160e29b7ae9430303243f23f09f24a75440da61e7dd5d0efd0ac6e4934e0d7a6566791c60f3cf1d88e8d45364f9904bd60b742f16aa8241e0c1fbb
6
+ metadata.gz: cb4ebf1d02ee1fce3c99176864b52225d4570e145a6226345ce9c47fb562987352fbd85ae3d90050cac41986a4d3190c27928c0713c57bd33cf1bb576e321902
7
+ data.tar.gz: c5684061ed4f60a0b9708912aa412362adb954bf44326a0151ff6e47d625afa9f6b8ed0d329dae5b2ae5129dea6dc1e02cc2850e27a06a892fac649d6495c9c3
data/MIT-LICENSE CHANGED
@@ -1,4 +1,4 @@
1
- Copyright (c) 2023 37signals
1
+ Copyright (c) 37signals, LLC
2
2
 
3
3
  Permission is hereby granted, free of charge, to any person obtaining
4
4
  a copy of this software and associated documentation files (the
data/README.md CHANGED
@@ -1,5 +1,7 @@
1
1
  # Solid Cache
2
2
 
3
+ **Upgrading from v0.3.0 or earlier? Please see [upgrading to version 0.4.0](upgrading_to_version_0.4.x.md)**
4
+
3
5
  Solid Cache is a database-backed Active Support cache store implementation.
4
6
 
5
7
  Using SQL databases backed by SSDs we can have caches that are much larger and cheaper than traditional memory only Redis or Memcached backed caches.
@@ -80,12 +82,13 @@ Solid Cache supports these options in addition to the standard `ActiveSupport::C
80
82
  - `error_handler` - a Proc to call to handle any `ActiveRecord::ActiveRecordError`s that are raises (default: log errors as warnings)
81
83
  - `expiry_batch_size` - the batch size to use when deleting old records (default: `100`)
82
84
  - `expiry_method` - what expiry method to use `thread` or `job` (default: `thread`)
83
- - `max_age` - the maximum age of entries in the cache (default: `2.weeks.to_i`)
85
+ - `expiry_queue` - which queue to add expiry jobs to (default: `default`)
86
+ - `max_age` - the maximum age of entries in the cache (default: `2.weeks.to_i`). Can be set to `nil`, but this is not recommended unless using `max_entries` to limit the size of the cache.
84
87
  - `max_entries` - the maximum number of entries allowed in the cache (default: `nil`, meaning no limit)
85
88
  - `cluster` - a Hash of options for the cache database cluster, e.g `{ shards: [:database1, :database2, :database3] }`
86
89
  - `clusters` - and Array of Hashes for multiple cache clusters (ignored if `:cluster` is set)
87
90
  - `active_record_instrumentation` - whether to instrument the cache's queries (default: `true`)
88
- - `clear_with` - clear the cache with `:truncate` or `:delete` (default `truncate`, except for when Rails.env.test? then `delete`)
91
+ - `clear_with` - clear the cache with `:truncate` or `:delete` (default `truncate`, except for when `Rails.env.test?` then `delete`)
89
92
  - `max_key_bytesize` - the maximum size of a normalized key in bytes (default `1024`)
90
93
 
91
94
  For more information on cache clusters see [Sharding the cache](#sharding-the-cache)
@@ -155,7 +158,7 @@ To shard:
155
158
  3. Pass the shards for the cache to use via the cluster option
156
159
 
157
160
  For example:
158
- ```ruby
161
+ ```yml
159
162
  # config/database.yml
160
163
  production:
161
164
  cache_shard1:
@@ -167,8 +170,9 @@ production:
167
170
  cache_shard3:
168
171
  database: cache3_production
169
172
  host: cache3-db
173
+ ```
170
174
 
171
-
175
+ ```ruby
172
176
  # config/environment/production.rb
173
177
  Rails.application.configure do
174
178
  config.solid_cache.connects_to = {
@@ -203,7 +207,7 @@ Rails.application.configure do
203
207
  }
204
208
 
205
209
  primary_cluster = { shards: [ :cache_primary_shard1, :cache_primary_shard2 ] }
206
- secondary_cluster = { shards: [ :cache_primary_shard1, :cache_primary_shard2 ] }
210
+ secondary_cluster = { shards: [ :cache_secondary_shard1, :cache_secondary_shard2 ] }
207
211
  config.cache_store = [ :solid_cache_store, clusters: [ primary_cluster, secondary_cluster ] ]
208
212
  end
209
213
  ```
@@ -284,7 +288,7 @@ To run a test for a specific version run:
284
288
  bundle exec appraisal rails-7-1 bin/rails test
285
289
  ```
286
290
 
287
- After updating the dependencies in then `Gemfile` please run:
291
+ After updating the dependencies in the `Gemfile` please run:
288
292
 
289
293
  ```shell
290
294
  $ bundle
data/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "bundler/setup"
2
4
 
3
5
  APP_RAKEFILE = File.expand_path("test/dummy/Rakefile", __dir__)
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  class ExpiryJob < ActiveJob::Base
3
5
  def perform(count, shard: nil, max_age:, max_entries:)
@@ -0,0 +1,49 @@
1
+ # frozen_string_literal: true
2
+
3
+ module SolidCache
4
+ class Entry
5
+ module Expiration
6
+ extend ActiveSupport::Concern
7
+
8
+ class_methods do
9
+ def id_range
10
+ uncached do
11
+ pick(Arel.sql("max(id) - min(id) + 1")) || 0
12
+ end
13
+ end
14
+
15
+ def expire(count, max_age:, max_entries:)
16
+ if (ids = expiry_candidate_ids(count, max_age: max_age, max_entries: max_entries)).any?
17
+ delete(ids)
18
+ end
19
+ end
20
+
21
+ private
22
+ def expiry_candidate_ids(count, max_age:, max_entries:)
23
+ cache_full = max_entries && max_entries < id_range
24
+ return [] unless cache_full || max_age
25
+
26
+ # In the case of multiple concurrent expiry operations, it is desirable to
27
+ # reduce the overlap of entries being addressed by each. For that reason,
28
+ # retrieve more ids than are being expired, and use random
29
+ # sampling to reduce that number to the actual intended count.
30
+ retrieve_count = count * 3
31
+
32
+ uncached do
33
+ candidates = order(:id).limit(retrieve_count)
34
+
35
+ candidate_ids = if cache_full
36
+ candidates.pluck(:id)
37
+ else
38
+ min_created_at = max_age.seconds.ago
39
+ candidates.pluck(:id, :created_at)
40
+ .filter_map { |id, created_at| id if created_at < min_created_at }
41
+ end
42
+
43
+ candidate_ids.sample(count)
44
+ end
45
+ end
46
+ end
47
+ end
48
+ end
49
+ end
@@ -1,8 +1,17 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  class Entry < Record
3
- # This is all quite awkward but it achieves a couple of performance aims
4
- # 1. We skip the query cache
5
- # 2. We avoid the overhead of building queries and active record objects
5
+ include Expiration
6
+
7
+ ID_BYTE_SIZE = 8
8
+ CREATED_AT_BYTE_SIZE = 8
9
+ KEY_HASH_BYTE_SIZE = 8
10
+ VALUE_BYTE_SIZE = 4
11
+ FIXED_SIZE_COLUMNS_BYTE_SIZE = ID_BYTE_SIZE + CREATED_AT_BYTE_SIZE + KEY_HASH_BYTE_SIZE + VALUE_BYTE_SIZE
12
+
13
+ self.ignored_columns += [ :key_hash, :byte_size] if SolidCache.key_hash_stage == :ignored
14
+
6
15
  class << self
7
16
  def write(key, value)
8
17
  upsert_all_no_query_cache([ { key: key, value: value } ])
@@ -13,23 +22,23 @@ module SolidCache
13
22
  end
14
23
 
15
24
  def read(key)
16
- select_all_no_query_cache(get_sql, to_binary(key)).first
25
+ result = select_all_no_query_cache(get_sql, lookup_value(key)).first
26
+ result[1] if result&.first == key
17
27
  end
18
28
 
19
29
  def read_multi(keys)
20
- serialized_keys = keys.map { |key| to_binary(key) }
21
- select_all_no_query_cache(get_all_sql(serialized_keys), serialized_keys).to_h
30
+ key_hashes = keys.map { |key| lookup_value(key) }
31
+ results = select_all_no_query_cache(get_all_sql(key_hashes), key_hashes).to_h
32
+ results.except!(results.keys - keys)
22
33
  end
23
34
 
24
35
  def delete_by_key(key)
25
- delete_no_query_cache(:key, to_binary(key))
36
+ delete_no_query_cache(lookup_column, lookup_value(key))
26
37
  end
27
38
 
28
- def delete_matched(matcher, batch_size:)
29
- like_matcher = arel_table[:key].matches(matcher, nil, true)
30
- where(like_matcher).select(:id).find_in_batches(batch_size: batch_size) do |entries|
31
- delete_no_query_cache(:id, entries.map(&:id))
32
- end
39
+ def delete_multi(keys)
40
+ serialized_keys = keys.map { |key| lookup_value(key) }
41
+ delete_no_query_cache(lookup_column, serialized_keys)
33
42
  end
34
43
 
35
44
  def clear_truncate
@@ -43,7 +52,8 @@ module SolidCache
43
52
  def increment(key, amount)
44
53
  transaction do
45
54
  uncached do
46
- amount += lock.where(key: key).pick(:value).to_i
55
+ result = lock.where(lookup_column => lookup_value(key)).pick(:key, :value)
56
+ amount += result[1].to_i if result&.first == key
47
57
  write(key, amount)
48
58
  amount
49
59
  end
@@ -54,48 +64,86 @@ module SolidCache
54
64
  increment(key, -amount)
55
65
  end
56
66
 
57
- def id_range
58
- uncached do
59
- pick(Arel.sql("max(id) - min(id) + 1")) || 0
60
- end
61
- end
62
-
63
- def expire(count, max_age:, max_entries:)
64
- if (ids = expiry_candidate_ids(count, max_age: max_age, max_entries: max_entries)).any?
65
- delete(ids)
66
- end
67
- end
68
-
69
67
  private
70
- def upsert_all_no_query_cache(attributes)
71
- insert_all = ActiveRecord::InsertAll.new(self, attributes, unique_by: upsert_unique_by, on_duplicate: :update, update_only: [ :value ])
68
+ def upsert_all_no_query_cache(payloads)
69
+ insert_all = ActiveRecord::InsertAll.new(
70
+ self,
71
+ add_key_hash_and_byte_size(payloads),
72
+ unique_by: upsert_unique_by,
73
+ on_duplicate: :update,
74
+ update_only: upsert_update_only
75
+ )
72
76
  sql = connection.build_insert_sql(ActiveRecord::InsertAll::Builder.new(insert_all))
73
77
 
74
78
  message = +"#{self} "
75
- message << "Bulk " if attributes.many?
79
+ message << "Bulk " if payloads.many?
76
80
  message << "Upsert"
77
81
  # exec_query_method does not clear the query cache, exec_insert_all does
78
82
  connection.send exec_query_method, sql, message
79
83
  end
80
84
 
85
+ def add_key_hash_and_byte_size(payloads)
86
+ payloads.map do |payload|
87
+ payload.dup.tap do |payload|
88
+ if key_hash?
89
+ payload[:key_hash] = key_hash_for(payload[:key])
90
+ payload[:byte_size] = byte_size_for(payload)
91
+ end
92
+ end
93
+ end
94
+ end
95
+
96
+ def key_hash?
97
+ @key_hash ||= [ :indexed, :unindexed ].include?(SolidCache.key_hash_stage) &&
98
+ connection.column_exists?(table_name, :key_hash)
99
+ end
100
+
101
+ def key_hash_indexed?
102
+ key_hash? && SolidCache.key_hash_stage == :indexed
103
+ end
104
+
105
+ def lookup_column
106
+ key_hash_indexed? ? :key_hash : :key
107
+ end
108
+
109
+ def lookup_value(key)
110
+ key_hash_indexed? ? key_hash_for(key) : to_binary(key)
111
+ end
112
+
113
+ def lookup_placeholder
114
+ key_hash_indexed? ? 1 : "placeholder"
115
+ end
116
+
81
117
  def exec_query_method
82
118
  connection.respond_to?(:internal_exec_query) ? :internal_exec_query : :exec_query
83
119
  end
84
120
 
85
121
  def upsert_unique_by
86
- connection.supports_insert_conflict_target? ? :key : nil
122
+ connection.supports_insert_conflict_target? ? lookup_column : nil
123
+ end
124
+
125
+ def upsert_update_only
126
+ if key_hash_indexed?
127
+ [ :key, :value, :byte_size ]
128
+ elsif key_hash?
129
+ [ :value, :key_hash, :byte_size ]
130
+ else
131
+ [ :value ]
132
+ end
87
133
  end
88
134
 
89
135
  def get_sql
90
- @get_sql ||= build_sql(where(key: "placeholder").select(:value))
136
+ @get_sql ||= {}
137
+ @get_sql[lookup_column] ||= build_sql(where(lookup_column => lookup_placeholder).select(:key, :value))
91
138
  end
92
139
 
93
- def get_all_sql(keys)
140
+ def get_all_sql(key_hashes)
94
141
  if connection.prepared_statements?
95
142
  @get_all_sql_binds ||= {}
96
- @get_all_sql_binds[keys.count] ||= build_sql(where(key: keys).select(:key, :value))
143
+ @get_all_sql_binds[[key_hashes.count, lookup_column]] ||= build_sql(where(lookup_column => key_hashes).select(:key, :value))
97
144
  else
98
- @get_all_sql_no_binds ||= build_sql(where(key: [ "placeholder1", "placeholder2" ]).select(:key, :value)).gsub("?, ?", "?")
145
+ @get_all_sql_no_binds ||= {}
146
+ @get_all_sql_no_binds[lookup_column] ||= build_sql(where(lookup_column => [ lookup_placeholder, lookup_placeholder ]).select(:key, :value)).gsub("?, ?", "?")
99
147
  end
100
148
  end
101
149
 
@@ -113,7 +161,7 @@ module SolidCache
113
161
  if connection.prepared_statements?
114
162
  result = connection.select_all(sanitize_sql(query), "#{name} Load", Array(values), preparable: true)
115
163
  else
116
- result = connection.select_all(sanitize_sql([ query, values ]), "#{name} Load", nil, preparable: false)
164
+ result = connection.select_all(sanitize_sql([ query, values ]), "#{name} Load", Array(values), preparable: false)
117
165
  end
118
166
 
119
167
  result.cast_values(SolidCache::Entry.attribute_types)
@@ -138,17 +186,13 @@ module SolidCache
138
186
  ActiveModel::Type::Binary.new.serialize(key)
139
187
  end
140
188
 
141
- def expiry_candidate_ids(count, max_age:, max_entries:)
142
- cache_full = max_entries && max_entries < id_range
143
- min_created_at = max_age.seconds.ago
189
+ def key_hash_for(key)
190
+ # Need to unpack this as a signed integer - Postgresql and SQLite don't support unsigned integers
191
+ Digest::SHA256.digest(key.to_s).unpack("q>").first
192
+ end
144
193
 
145
- uncached do
146
- order(:id)
147
- .limit(count * 3)
148
- .pluck(:id, :created_at)
149
- .filter_map { |id, created_at| id if cache_full || created_at < min_created_at }
150
- .sample(count)
151
- end
194
+ def byte_size_for(payload)
195
+ payload[:key].to_s.bytesize + payload[:value].to_s.bytesize + FIXED_SIZE_COLUMNS_BYTE_SIZE
152
196
  end
153
197
  end
154
198
  end
@@ -1,21 +1,21 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  class Record < ActiveRecord::Base
3
5
  NULL_INSTRUMENTER = ActiveSupport::Notifications::Instrumenter.new(ActiveSupport::Notifications::Fanout.new)
4
6
 
5
7
  self.abstract_class = true
6
8
 
7
- connects_to **SolidCache.connects_to if SolidCache.connects_to
9
+ connects_to(**SolidCache.connects_to) if SolidCache.connects_to
8
10
 
9
11
  class << self
10
- def disable_instrumentation
11
- connection.with_instrumenter(NULL_INSTRUMENTER) do
12
- yield
13
- end
12
+ def disable_instrumentation(&block)
13
+ connection.with_instrumenter(NULL_INSTRUMENTER, &block)
14
14
  end
15
15
 
16
16
  def with_shard(shard, &block)
17
- if shard && shard != Record.current_shard
18
- Record.connected_to(shard: shard, &block)
17
+ if shard && SolidCache.connects_to
18
+ connected_to(shard: shard, role: default_role, prevent_writes: false, &block)
19
19
  else
20
20
  block.call
21
21
  end
@@ -0,0 +1,8 @@
1
+ class AddKeyHashAndByteSizeToSolidCacheEntries < ActiveRecord::Migration[7.1]
2
+ def change
3
+ change_table :solid_cache_entries do |t|
4
+ t.column :key_hash, :integer, null: true, limit: 8
5
+ t.column :byte_size, :integer, null: true, limit: 4
6
+ end
7
+ end
8
+ end
@@ -0,0 +1,11 @@
1
+ class AddKeyHashAndByteSizeIndexesAndNullConstraintsToSolidCacheEntries < ActiveRecord::Migration[7.1]
2
+ def change
3
+ change_table :solid_cache_entries, bulk: true do |t|
4
+ t.change_null :key_hash, false
5
+ t.change_null :byte_size, false
6
+ t.index :key_hash, unique: true
7
+ t.index [:key_hash, :byte_size]
8
+ t.index :byte_size
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,7 @@
1
+ class RemoveKeyIndexFromSolidCacheEntries < ActiveRecord::Migration[7.1]
2
+ def change
3
+ change_table :solid_cache_entries do |t|
4
+ t.remove_index :key
5
+ end
6
+ end
7
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module ActiveSupport
2
4
  module Cache
3
5
  SolidCacheStore = SolidCache::Store
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  class SolidCache::InstallGenerator < Rails::Generators::Base
2
4
  class_option :skip_migrations, type: :boolean, default: nil,
3
5
  desc: "Skip migrations"
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  class Cluster
3
5
  module Connections
@@ -1,9 +1,11 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  class Cluster
3
5
  module Execution
4
6
  def initialize(options = {})
5
7
  super(options)
6
- @background = Concurrent::SingleThreadExecutor.new(max_queue: 100, fallback_policy: :discard)
8
+ @background = Concurrent::FixedThreadPool.new(1, max_queue: 100, fallback_policy: :discard)
7
9
  @active_record_instrumentation = options.fetch(:active_record_instrumentation, true)
8
10
  end
9
11
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "concurrent/atomic/atomic_fixnum"
2
4
 
3
5
  module SolidCache
@@ -7,13 +9,14 @@ module SolidCache
7
9
  # This ensures there is downward pressure on the cache size while there is valid data to delete
8
10
  EXPIRY_MULTIPLIER = 1.25
9
11
 
10
- attr_reader :expiry_batch_size, :expiry_method, :expire_every, :max_age, :max_entries
12
+ attr_reader :expiry_batch_size, :expiry_method, :expiry_queue, :expires_per_write, :max_age, :max_entries
11
13
 
12
14
  def initialize(options = {})
13
15
  super(options)
14
16
  @expiry_batch_size = options.fetch(:expiry_batch_size, 100)
15
17
  @expiry_method = options.fetch(:expiry_method, :thread)
16
- @expire_every = [ (expiry_batch_size / EXPIRY_MULTIPLIER).floor, 1 ].max
18
+ @expiry_queue = options.fetch(:expiry_queue, :default)
19
+ @expires_per_write = (1 / expiry_batch_size.to_f) * EXPIRY_MULTIPLIER
17
20
  @max_age = options.fetch(:max_age, 2.weeks.to_i)
18
21
  @max_entries = options.fetch(:max_entries, nil)
19
22
 
@@ -21,41 +24,26 @@ module SolidCache
21
24
  end
22
25
 
23
26
  def track_writes(count)
24
- expire_later if expiry_counter.count(count)
27
+ expiry_batches(count).times { expire_later }
25
28
  end
26
29
 
27
30
  private
31
+ def expiry_batches(count)
32
+ batches = (count * expires_per_write).floor
33
+ overflow_batch_chance = count * expires_per_write - batches
34
+ batches += 1 if rand < overflow_batch_chance
35
+ batches
36
+ end
37
+
28
38
  def expire_later
29
39
  if expiry_method == :job
30
- ExpiryJob.perform_later(expiry_batch_size, shard: Entry.current_shard, max_age: max_age, max_entries: max_entries)
40
+ ExpiryJob
41
+ .set(queue: expiry_queue)
42
+ .perform_later(expiry_batch_size, shard: Entry.current_shard, max_age: max_age, max_entries: max_entries)
31
43
  else
32
44
  async { Entry.expire(expiry_batch_size, max_age: max_age, max_entries: max_entries) }
33
45
  end
34
46
  end
35
-
36
- def expiry_counter
37
- @expiry_counters ||= connection_names.to_h { |connection_name| [ connection_name, Counter.new(expire_every) ] }
38
- @expiry_counters[Entry.current_shard]
39
- end
40
-
41
- class Counter
42
- attr_reader :expire_every, :counter
43
-
44
- def initialize(expire_every)
45
- @expire_every = expire_every
46
- @counter = Concurrent::AtomicFixnum.new(rand(expire_every).to_i)
47
- end
48
-
49
- def count(count)
50
- value = counter.increment(count)
51
- new_multiple_of_expire_every?(value - count, value)
52
- end
53
-
54
- private
55
- def new_multiple_of_expire_every?(first_value, second_value)
56
- first_value / expire_every != second_value / expire_every
57
- end
58
- end
59
47
  end
60
48
  end
61
49
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  class Cluster
3
5
  module Stats
@@ -6,7 +8,7 @@ module SolidCache
6
8
  end
7
9
 
8
10
  def stats
9
- stats = {
11
+ {
10
12
  connections: connections.count,
11
13
  connection_stats: connections_stats
12
14
  }
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  module SolidCache
3
4
  class Cluster
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  module Connections
3
5
  class Sharded
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  module Connections
3
5
  class Single
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  module Connections
3
5
  class Unmanaged
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  module Connections
3
5
  def self.from_config(options)
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "active_support"
2
4
 
3
5
  module SolidCache
@@ -11,6 +13,12 @@ module SolidCache
11
13
 
12
14
  SolidCache.executor = config.solid_cache.executor
13
15
  SolidCache.connects_to = config.solid_cache.connects_to
16
+ if config.solid_cache.key_hash_stage
17
+ unless [:ignored, :unindexed, :indexed].include?(config.solid_cache.key_hash_stage)
18
+ raise "ArgumentError, :key_hash_stage must be :ignored, :unindexed or :indexed"
19
+ end
20
+ SolidCache.key_hash_stage = config.solid_cache.key_hash_stage
21
+ end
14
22
  end
15
23
 
16
24
  config.after_initialize do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # See https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/44824.pdf
2
4
 
3
5
  module SolidCache
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  class Store
3
5
  module Api
@@ -12,20 +14,6 @@ module SolidCache
12
14
  @max_key_bytesize = options.fetch(:max_key_bytesize, DEFAULT_MAX_KEY_BYTESIZE)
13
15
  end
14
16
 
15
- def delete_matched(matcher, options = {})
16
- instrument :delete_matched, matcher do
17
- raise ArgumentError, "Only strings are supported: #{matcher.inspect}" unless String === matcher
18
- raise ArgumentError, "Strings cannot start with wildcards" if SQL_WILDCARD_CHARS.include?(matcher[0])
19
-
20
- options ||= {}
21
- batch_size = options.fetch(:batch_size, 1000)
22
-
23
- matcher = namespace_key(matcher, options)
24
-
25
- entry_delete_matched(matcher, batch_size)
26
- end
27
- end
28
-
29
17
  def increment(name, amount = 1, options = nil)
30
18
  options = merged_options(options)
31
19
  key = normalize_key(name, options)
@@ -74,7 +62,7 @@ module SolidCache
74
62
  end
75
63
 
76
64
  def read_multi_entries(names, **options)
77
- keys_and_names = names.to_h { |name| [ normalize_key(name, options), name ] }
65
+ keys_and_names = names.index_by { |name| normalize_key(name, options) }
78
66
  serialized_entries = read_serialized_entries(keys_and_names.keys)
79
67
 
80
68
  keys_and_names.each_with_object({}) do |(key, name), results|
@@ -117,7 +105,7 @@ module SolidCache
117
105
  end
118
106
 
119
107
  def delete_multi_entries(entries, **options)
120
- entries.count { |key| delete_entry(key, **options) }
108
+ entry_delete_multi(entries).compact.sum
121
109
  end
122
110
 
123
111
  def serialize_entry(entry, raw: false, **options)
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  class Store
3
5
  module Clusters
@@ -20,11 +22,9 @@ module SolidCache
20
22
  end
21
23
 
22
24
  private
23
- def reading_key(key, failsafe:, failsafe_returning: nil)
25
+ def reading_key(key, failsafe:, failsafe_returning: nil, &block)
24
26
  failsafe(failsafe, returning: failsafe_returning) do
25
- primary_cluster.with_connection_for(key) do
26
- yield
27
- end
27
+ primary_cluster.with_connection_for(key, &block)
28
28
  end
29
29
  end
30
30
 
@@ -65,13 +65,11 @@ module SolidCache
65
65
  end
66
66
  end
67
67
 
68
- def writing_all(failsafe:, failsafe_returning: nil)
68
+ def writing_all(failsafe:, failsafe_returning: nil, &block)
69
69
  first_cluster_sync_rest_async do |cluster, async|
70
70
  cluster.connection_names.each do |connection|
71
71
  failsafe(failsafe, returning: failsafe_returning) do
72
- cluster.with_connection(connection, async: async) do
73
- yield
74
- end
72
+ cluster.with_connection(connection, async: async, &block)
75
73
  end
76
74
  end
77
75
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  class Store
3
5
  module Entries
@@ -15,12 +17,6 @@ module SolidCache
15
17
  end
16
18
 
17
19
  private
18
- def entry_delete_matched(matcher, batch_size)
19
- writing_all(failsafe: :delete_matched) do
20
- Entry.delete_matched(matcher, batch_size: batch_size)
21
- end
22
- end
23
-
24
20
  def entry_clear
25
21
  writing_all(failsafe: :clear) do
26
22
  if clear_with == :truncate
@@ -76,6 +72,12 @@ module SolidCache
76
72
  Entry.delete_by_key(key)
77
73
  end
78
74
  end
75
+
76
+ def entry_delete_multi(entries)
77
+ writing_keys(entries, failsafe: :delete_multi_entries, failsafe_returning: false) do
78
+ Entry.delete_multi(entries)
79
+ end
80
+ end
79
81
  end
80
82
  end
81
83
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  class Store
3
5
  module Failsafe
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
4
  class Store < ActiveSupport::Cache::Store
3
5
  include Api, Clusters, Entries, Failsafe
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
- VERSION = "0.2.0"
4
+ VERSION = "0.4.0"
3
5
  end
data/lib/solid_cache.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "zeitwerk"
2
4
  require "solid_cache/engine"
3
5
 
@@ -8,6 +10,7 @@ loader.setup
8
10
 
9
11
  module SolidCache
10
12
  mattr_accessor :executor, :connects_to
13
+ mattr_accessor :key_hash_stage, default: :indexed
11
14
 
12
15
  def self.all_shard_keys
13
16
  all_shards_config&.keys || []
@@ -17,12 +20,12 @@ module SolidCache
17
20
  connects_to && connects_to[:shards]
18
21
  end
19
22
 
20
- def self.each_shard
23
+ def self.each_shard(&block)
21
24
  return to_enum(:each_shard) unless block_given?
22
25
 
23
26
  if (shards = all_shards_config&.keys)
24
27
  shards.each do |shard|
25
- Record.connected_to(shard: shard) { yield }
28
+ Record.with_shard(shard, &block)
26
29
  end
27
30
  else
28
31
  yield
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  desc "Copy over the migration, and set cache"
2
4
  namespace :solid_cache do
3
5
  task :install do
metadata CHANGED
@@ -1,17 +1,45 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: solid_cache
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.0
4
+ version: 0.4.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Donal McBreen
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-11-16 00:00:00.000000000 Z
11
+ date: 2024-01-25 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
- name: rails
14
+ name: activerecord
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - ">="
18
+ - !ruby/object:Gem::Version
19
+ version: '7'
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - ">="
25
+ - !ruby/object:Gem::Version
26
+ version: '7'
27
+ - !ruby/object:Gem::Dependency
28
+ name: activejob
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - ">="
32
+ - !ruby/object:Gem::Version
33
+ version: '7'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - ">="
39
+ - !ruby/object:Gem::Version
40
+ version: '7'
41
+ - !ruby/object:Gem::Dependency
42
+ name: railties
15
43
  requirement: !ruby/object:Gem::Requirement
16
44
  requirements:
17
45
  - - ">="
@@ -64,8 +92,12 @@ files:
64
92
  - Rakefile
65
93
  - app/jobs/solid_cache/expiry_job.rb
66
94
  - app/models/solid_cache/entry.rb
95
+ - app/models/solid_cache/entry/expiration.rb
67
96
  - app/models/solid_cache/record.rb
68
97
  - db/migrate/20230724121448_create_solid_cache_entries.rb
98
+ - db/migrate/20240108155507_add_key_hash_and_byte_size_to_solid_cache_entries.rb
99
+ - db/migrate/20240110111600_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.rb
100
+ - db/migrate/20240110111702_remove_key_index_from_solid_cache_entries.rb
69
101
  - lib/active_support/cache/solid_cache_store.rb
70
102
  - lib/generators/solid_cache/install/USAGE
71
103
  - lib/generators/solid_cache/install/install_generator.rb
@@ -94,7 +126,9 @@ licenses:
94
126
  metadata:
95
127
  homepage_uri: http://github.com/rails/solid_cache
96
128
  source_code_uri: http://github.com/rails/solid_cache
97
- post_install_message:
129
+ post_install_message: |
130
+ Solid Cache v0.4 contains new database migrations.
131
+ See https://github.com/rails/solid_cache/blob/main/upgrading_to_version_0.4.x.md for upgrade instructions.
98
132
  rdoc_options: []
99
133
  require_paths:
100
134
  - lib
@@ -109,7 +143,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
109
143
  - !ruby/object:Gem::Version
110
144
  version: '0'
111
145
  requirements: []
112
- rubygems_version: 3.4.21
146
+ rubygems_version: 3.5.4
113
147
  signing_key:
114
148
  specification_version: 4
115
149
  summary: A database backed ActiveSupport::Cache::Store