solid_cache 0.3.0 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d92ba4d7c045821d0c54facd1329613e497a46a2b21daffe3a14931bf9621f86
4
- data.tar.gz: 46c4b4b81c48a5a5598cc2460e8c1159e69497f130116e9f855a58f8f3e52a36
3
+ metadata.gz: d61c50ee1ef52b34cbc3d656a65b3f7274ddd21b77963d419787ecb978e01ede
4
+ data.tar.gz: 612114bb0eade6e73c2d06f2cf342e1245d41ea547bda43b090a5aa6e0d552a1
5
5
  SHA512:
6
- metadata.gz: 4f3505047c30f24d6f8928d439172f826becc79054a05f32b0c670b3cbb227a10b52889b1e64e2430fe2d279e98c6615bb92634c475fdb6016cd7d3c9c170c07
7
- data.tar.gz: 4cb0c61cddd64a4d8093b73b06524ae5585cf5d0c70233bd31fd688b837d38c500df28479bbbb7ffa99b24e26dc3230789dad0d108f9d7fc3df4e02b0eeb0d55
6
+ metadata.gz: 82a744e8cec593d0cdba4acd0d7d55727d0754507664700c25a2750d2bac011f7db965f751791286e7f918802a12f38fe5e1f251cf5daf5285503d4fae664d15
7
+ data.tar.gz: 4fe8bf0a189e137a6ec53697d2bfb4fc755c29a2f7d3ce1ca207694a5d9ede2d17871b714416b46fac52d405c6d94dad45951c8aae97b36e8be8d66df5eed720
data/MIT-LICENSE CHANGED
@@ -1,4 +1,4 @@
1
- Copyright (c) 2023 37signals
1
+ Copyright (c) 37signals, LLC
2
2
 
3
3
  Permission is hereby granted, free of charge, to any person obtaining
4
4
  a copy of this software and associated documentation files (the
data/README.md CHANGED
@@ -1,5 +1,7 @@
1
1
  # Solid Cache
2
2
 
3
+ **Upgrading from v0.3.0 or earlier? Please see [upgrading to version 0.4.0](upgrading_to_version_0.4.x.md)**
4
+
3
5
  Solid Cache is a database-backed Active Support cache store implementation.
4
6
 
5
7
  Using SQL databases backed by SSDs we can have caches that are much larger and cheaper than traditional memory only Redis or Memcached backed caches.
@@ -80,6 +82,7 @@ Solid Cache supports these options in addition to the standard `ActiveSupport::C
80
82
  - `error_handler` - a Proc to call to handle any `ActiveRecord::ActiveRecordError`s that are raises (default: log errors as warnings)
81
83
  - `expiry_batch_size` - the batch size to use when deleting old records (default: `100`)
82
84
  - `expiry_method` - what expiry method to use `thread` or `job` (default: `thread`)
85
+ - `expiry_queue` - which queue to add expiry jobs to (default: `default`)
83
86
  - `max_age` - the maximum age of entries in the cache (default: `2.weeks.to_i`). Can be set to `nil`, but this is not recommended unless using `max_entries` to limit the size of the cache.
84
87
  - `max_entries` - the maximum number of entries allowed in the cache (default: `nil`, meaning no limit)
85
88
  - `cluster` - a Hash of options for the cache database cluster, e.g `{ shards: [:database1, :database2, :database3] }`
@@ -285,7 +288,7 @@ To run a test for a specific version run:
285
288
  bundle exec appraisal rails-7-1 bin/rails test
286
289
  ```
287
290
 
288
- After updating the dependencies in then `Gemfile` please run:
291
+ After updating the dependencies in the `Gemfile` please run:
289
292
 
290
293
  ```shell
291
294
  $ bundle
@@ -0,0 +1,49 @@
1
+ # frozen_string_literal: true
2
+
3
+ module SolidCache
4
+ class Entry
5
+ module Expiration
6
+ extend ActiveSupport::Concern
7
+
8
+ class_methods do
9
+ def id_range
10
+ uncached do
11
+ pick(Arel.sql("max(id) - min(id) + 1")) || 0
12
+ end
13
+ end
14
+
15
+ def expire(count, max_age:, max_entries:)
16
+ if (ids = expiry_candidate_ids(count, max_age: max_age, max_entries: max_entries)).any?
17
+ delete(ids)
18
+ end
19
+ end
20
+
21
+ private
22
+ def expiry_candidate_ids(count, max_age:, max_entries:)
23
+ cache_full = max_entries && max_entries < id_range
24
+ return [] unless cache_full || max_age
25
+
26
+ # In the case of multiple concurrent expiry operations, it is desirable to
27
+ # reduce the overlap of entries being addressed by each. For that reason,
28
+ # retrieve more ids than are being expired, and use random
29
+ # sampling to reduce that number to the actual intended count.
30
+ retrieve_count = count * 3
31
+
32
+ uncached do
33
+ candidates = order(:id).limit(retrieve_count)
34
+
35
+ candidate_ids = if cache_full
36
+ candidates.pluck(:id)
37
+ else
38
+ min_created_at = max_age.seconds.ago
39
+ candidates.pluck(:id, :created_at)
40
+ .filter_map { |id, created_at| id if created_at < min_created_at }
41
+ end
42
+
43
+ candidate_ids.sample(count)
44
+ end
45
+ end
46
+ end
47
+ end
48
+ end
49
+ end
@@ -2,9 +2,16 @@
2
2
 
3
3
  module SolidCache
4
4
  class Entry < Record
5
- # This is all quite awkward but it achieves a couple of performance aims
6
- # 1. We skip the query cache
7
- # 2. We avoid the overhead of building queries and active record objects
5
+ include Expiration
6
+
7
+ ID_BYTE_SIZE = 8
8
+ CREATED_AT_BYTE_SIZE = 8
9
+ KEY_HASH_BYTE_SIZE = 8
10
+ VALUE_BYTE_SIZE = 4
11
+ FIXED_SIZE_COLUMNS_BYTE_SIZE = ID_BYTE_SIZE + CREATED_AT_BYTE_SIZE + KEY_HASH_BYTE_SIZE + VALUE_BYTE_SIZE
12
+
13
+ self.ignored_columns += [ :key_hash, :byte_size] if SolidCache.key_hash_stage == :ignored
14
+
8
15
  class << self
9
16
  def write(key, value)
10
17
  upsert_all_no_query_cache([ { key: key, value: value } ])
@@ -15,23 +22,23 @@ module SolidCache
15
22
  end
16
23
 
17
24
  def read(key)
18
- select_all_no_query_cache(get_sql, to_binary(key)).first
25
+ result = select_all_no_query_cache(get_sql, lookup_value(key)).first
26
+ result[1] if result&.first == key
19
27
  end
20
28
 
21
29
  def read_multi(keys)
22
- serialized_keys = keys.map { |key| to_binary(key) }
23
- select_all_no_query_cache(get_all_sql(serialized_keys), serialized_keys).to_h
30
+ key_hashes = keys.map { |key| lookup_value(key) }
31
+ results = select_all_no_query_cache(get_all_sql(key_hashes), key_hashes).to_h
32
+ results.except!(results.keys - keys)
24
33
  end
25
34
 
26
35
  def delete_by_key(key)
27
- delete_no_query_cache(:key, to_binary(key))
36
+ delete_no_query_cache(lookup_column, lookup_value(key))
28
37
  end
29
38
 
30
- def delete_matched(matcher, batch_size:)
31
- like_matcher = arel_table[:key].matches(matcher, nil, true)
32
- where(like_matcher).select(:id).find_in_batches(batch_size: batch_size) do |entries|
33
- delete_no_query_cache(:id, entries.map(&:id))
34
- end
39
+ def delete_multi(keys)
40
+ serialized_keys = keys.map { |key| lookup_value(key) }
41
+ delete_no_query_cache(lookup_column, serialized_keys)
35
42
  end
36
43
 
37
44
  def clear_truncate
@@ -45,7 +52,8 @@ module SolidCache
45
52
  def increment(key, amount)
46
53
  transaction do
47
54
  uncached do
48
- amount += lock.where(key: key).pick(:value).to_i
55
+ result = lock.where(lookup_column => lookup_value(key)).pick(:key, :value)
56
+ amount += result[1].to_i if result&.first == key
49
57
  write(key, amount)
50
58
  amount
51
59
  end
@@ -56,48 +64,86 @@ module SolidCache
56
64
  increment(key, -amount)
57
65
  end
58
66
 
59
- def id_range
60
- uncached do
61
- pick(Arel.sql("max(id) - min(id) + 1")) || 0
62
- end
63
- end
64
-
65
- def expire(count, max_age:, max_entries:)
66
- if (ids = expiry_candidate_ids(count, max_age: max_age, max_entries: max_entries)).any?
67
- delete(ids)
68
- end
69
- end
70
-
71
67
  private
72
- def upsert_all_no_query_cache(attributes)
73
- insert_all = ActiveRecord::InsertAll.new(self, attributes, unique_by: upsert_unique_by, on_duplicate: :update, update_only: [ :value ])
68
+ def upsert_all_no_query_cache(payloads)
69
+ insert_all = ActiveRecord::InsertAll.new(
70
+ self,
71
+ add_key_hash_and_byte_size(payloads),
72
+ unique_by: upsert_unique_by,
73
+ on_duplicate: :update,
74
+ update_only: upsert_update_only
75
+ )
74
76
  sql = connection.build_insert_sql(ActiveRecord::InsertAll::Builder.new(insert_all))
75
77
 
76
78
  message = +"#{self} "
77
- message << "Bulk " if attributes.many?
79
+ message << "Bulk " if payloads.many?
78
80
  message << "Upsert"
79
81
  # exec_query_method does not clear the query cache, exec_insert_all does
80
82
  connection.send exec_query_method, sql, message
81
83
  end
82
84
 
85
+ def add_key_hash_and_byte_size(payloads)
86
+ payloads.map do |payload|
87
+ payload.dup.tap do |payload|
88
+ if key_hash?
89
+ payload[:key_hash] = key_hash_for(payload[:key])
90
+ payload[:byte_size] = byte_size_for(payload)
91
+ end
92
+ end
93
+ end
94
+ end
95
+
96
+ def key_hash?
97
+ @key_hash ||= [ :indexed, :unindexed ].include?(SolidCache.key_hash_stage) &&
98
+ connection.column_exists?(table_name, :key_hash)
99
+ end
100
+
101
+ def key_hash_indexed?
102
+ key_hash? && SolidCache.key_hash_stage == :indexed
103
+ end
104
+
105
+ def lookup_column
106
+ key_hash_indexed? ? :key_hash : :key
107
+ end
108
+
109
+ def lookup_value(key)
110
+ key_hash_indexed? ? key_hash_for(key) : to_binary(key)
111
+ end
112
+
113
+ def lookup_placeholder
114
+ key_hash_indexed? ? 1 : "placeholder"
115
+ end
116
+
83
117
  def exec_query_method
84
118
  connection.respond_to?(:internal_exec_query) ? :internal_exec_query : :exec_query
85
119
  end
86
120
 
87
121
  def upsert_unique_by
88
- connection.supports_insert_conflict_target? ? :key : nil
122
+ connection.supports_insert_conflict_target? ? lookup_column : nil
123
+ end
124
+
125
+ def upsert_update_only
126
+ if key_hash_indexed?
127
+ [ :key, :value, :byte_size ]
128
+ elsif key_hash?
129
+ [ :value, :key_hash, :byte_size ]
130
+ else
131
+ [ :value ]
132
+ end
89
133
  end
90
134
 
91
135
  def get_sql
92
- @get_sql ||= build_sql(where(key: "placeholder").select(:value))
136
+ @get_sql ||= {}
137
+ @get_sql[lookup_column] ||= build_sql(where(lookup_column => lookup_placeholder).select(:key, :value))
93
138
  end
94
139
 
95
- def get_all_sql(keys)
140
+ def get_all_sql(key_hashes)
96
141
  if connection.prepared_statements?
97
142
  @get_all_sql_binds ||= {}
98
- @get_all_sql_binds[keys.count] ||= build_sql(where(key: keys).select(:key, :value))
143
+ @get_all_sql_binds[[key_hashes.count, lookup_column]] ||= build_sql(where(lookup_column => key_hashes).select(:key, :value))
99
144
  else
100
- @get_all_sql_no_binds ||= build_sql(where(key: [ "placeholder1", "placeholder2" ]).select(:key, :value)).gsub("?, ?", "?")
145
+ @get_all_sql_no_binds ||= {}
146
+ @get_all_sql_no_binds[lookup_column] ||= build_sql(where(lookup_column => [ lookup_placeholder, lookup_placeholder ]).select(:key, :value)).gsub("?, ?", "?")
101
147
  end
102
148
  end
103
149
 
@@ -140,29 +186,13 @@ module SolidCache
140
186
  ActiveModel::Type::Binary.new.serialize(key)
141
187
  end
142
188
 
143
- def expiry_candidate_ids(count, max_age:, max_entries:)
144
- cache_full = max_entries && max_entries < id_range
145
- return [] unless cache_full || max_age
146
-
147
- # In the case of multiple concurrent expiry operations, it is desirable to
148
- # reduce the overlap of entries being addressed by each. For that reason,
149
- # retrieve more ids than are being expired, and use random
150
- # sampling to reduce that number to the actual intended count.
151
- retrieve_count = count * 3
152
-
153
- uncached do
154
- candidates = order(:id).limit(retrieve_count)
155
-
156
- candidate_ids = if cache_full
157
- candidates.pluck(:id)
158
- else
159
- min_created_at = max_age.seconds.ago
160
- candidates.pluck(:id, :created_at)
161
- .filter_map { |id, created_at| id if created_at < min_created_at }
162
- end
189
+ def key_hash_for(key)
190
+ # Need to unpack this as a signed integer - Postgresql and SQLite don't support unsigned integers
191
+ Digest::SHA256.digest(key.to_s).unpack("q>").first
192
+ end
163
193
 
164
- candidate_ids.sample(count)
165
- end
194
+ def byte_size_for(payload)
195
+ payload[:key].to_s.bytesize + payload[:value].to_s.bytesize + FIXED_SIZE_COLUMNS_BYTE_SIZE
166
196
  end
167
197
  end
168
198
  end
@@ -1,5 +1,3 @@
1
- # frozen_string_literal: true
2
-
3
1
  class CreateSolidCacheEntries < ActiveRecord::Migration[7.0]
4
2
  def change
5
3
  create_table :solid_cache_entries do |t|
@@ -0,0 +1,8 @@
1
+ class AddKeyHashAndByteSizeToSolidCacheEntries < ActiveRecord::Migration[7.1]
2
+ def change
3
+ change_table :solid_cache_entries do |t|
4
+ t.column :key_hash, :integer, null: true, limit: 8
5
+ t.column :byte_size, :integer, null: true, limit: 4
6
+ end
7
+ end
8
+ end
@@ -0,0 +1,11 @@
1
+ class AddKeyHashAndByteSizeIndexesAndNullConstraintsToSolidCacheEntries < ActiveRecord::Migration[7.1]
2
+ def change
3
+ change_table :solid_cache_entries, bulk: true do |t|
4
+ t.change_null :key_hash, false
5
+ t.change_null :byte_size, false
6
+ t.index :key_hash, unique: true
7
+ t.index [:key_hash, :byte_size]
8
+ t.index :byte_size
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,7 @@
1
+ class RemoveKeyIndexFromSolidCacheEntries < ActiveRecord::Migration[7.1]
2
+ def change
3
+ change_table :solid_cache_entries do |t|
4
+ t.remove_index :key
5
+ end
6
+ end
7
+ end
@@ -5,7 +5,7 @@ module SolidCache
5
5
  module Execution
6
6
  def initialize(options = {})
7
7
  super(options)
8
- @background = Concurrent::SingleThreadExecutor.new(max_queue: 100, fallback_policy: :discard)
8
+ @background = Concurrent::FixedThreadPool.new(1, max_queue: 100, fallback_policy: :discard)
9
9
  @active_record_instrumentation = options.fetch(:active_record_instrumentation, true)
10
10
  end
11
11
 
@@ -9,13 +9,14 @@ module SolidCache
9
9
  # This ensures there is downward pressure on the cache size while there is valid data to delete
10
10
  EXPIRY_MULTIPLIER = 1.25
11
11
 
12
- attr_reader :expiry_batch_size, :expiry_method, :expire_every, :max_age, :max_entries
12
+ attr_reader :expiry_batch_size, :expiry_method, :expiry_queue, :expires_per_write, :max_age, :max_entries
13
13
 
14
14
  def initialize(options = {})
15
15
  super(options)
16
16
  @expiry_batch_size = options.fetch(:expiry_batch_size, 100)
17
17
  @expiry_method = options.fetch(:expiry_method, :thread)
18
- @expire_every = [ (expiry_batch_size / EXPIRY_MULTIPLIER).floor, 1 ].max
18
+ @expiry_queue = options.fetch(:expiry_queue, :default)
19
+ @expires_per_write = (1 / expiry_batch_size.to_f) * EXPIRY_MULTIPLIER
19
20
  @max_age = options.fetch(:max_age, 2.weeks.to_i)
20
21
  @max_entries = options.fetch(:max_entries, nil)
21
22
 
@@ -23,41 +24,26 @@ module SolidCache
23
24
  end
24
25
 
25
26
  def track_writes(count)
26
- expire_later if expiry_counter.count(count)
27
+ expiry_batches(count).times { expire_later }
27
28
  end
28
29
 
29
30
  private
31
+ def expiry_batches(count)
32
+ batches = (count * expires_per_write).floor
33
+ overflow_batch_chance = count * expires_per_write - batches
34
+ batches += 1 if rand < overflow_batch_chance
35
+ batches
36
+ end
37
+
30
38
  def expire_later
31
39
  if expiry_method == :job
32
- ExpiryJob.perform_later(expiry_batch_size, shard: Entry.current_shard, max_age: max_age, max_entries: max_entries)
40
+ ExpiryJob
41
+ .set(queue: expiry_queue)
42
+ .perform_later(expiry_batch_size, shard: Entry.current_shard, max_age: max_age, max_entries: max_entries)
33
43
  else
34
44
  async { Entry.expire(expiry_batch_size, max_age: max_age, max_entries: max_entries) }
35
45
  end
36
46
  end
37
-
38
- def expiry_counter
39
- @expiry_counters ||= connection_names.index_with { |connection_name| Counter.new(expire_every) }
40
- @expiry_counters[Entry.current_shard]
41
- end
42
-
43
- class Counter
44
- attr_reader :expire_every, :counter
45
-
46
- def initialize(expire_every)
47
- @expire_every = expire_every
48
- @counter = Concurrent::AtomicFixnum.new(rand(expire_every).to_i)
49
- end
50
-
51
- def count(count)
52
- value = counter.increment(count)
53
- new_multiple_of_expire_every?(value - count, value)
54
- end
55
-
56
- private
57
- def new_multiple_of_expire_every?(first_value, second_value)
58
- first_value / expire_every != second_value / expire_every
59
- end
60
- end
61
47
  end
62
48
  end
63
49
  end
@@ -13,6 +13,12 @@ module SolidCache
13
13
 
14
14
  SolidCache.executor = config.solid_cache.executor
15
15
  SolidCache.connects_to = config.solid_cache.connects_to
16
+ if config.solid_cache.key_hash_stage
17
+ unless [:ignored, :unindexed, :indexed].include?(config.solid_cache.key_hash_stage)
18
+ raise "ArgumentError, :key_hash_stage must be :ignored, :unindexed or :indexed"
19
+ end
20
+ SolidCache.key_hash_stage = config.solid_cache.key_hash_stage
21
+ end
16
22
  end
17
23
 
18
24
  config.after_initialize do
@@ -14,20 +14,6 @@ module SolidCache
14
14
  @max_key_bytesize = options.fetch(:max_key_bytesize, DEFAULT_MAX_KEY_BYTESIZE)
15
15
  end
16
16
 
17
- def delete_matched(matcher, options = {})
18
- instrument :delete_matched, matcher do
19
- raise ArgumentError, "Only strings are supported: #{matcher.inspect}" unless String === matcher
20
- raise ArgumentError, "Strings cannot start with wildcards" if SQL_WILDCARD_CHARS.include?(matcher[0])
21
-
22
- options ||= {}
23
- batch_size = options.fetch(:batch_size, 1000)
24
-
25
- matcher = namespace_key(matcher, options)
26
-
27
- entry_delete_matched(matcher, batch_size)
28
- end
29
- end
30
-
31
17
  def increment(name, amount = 1, options = nil)
32
18
  options = merged_options(options)
33
19
  key = normalize_key(name, options)
@@ -119,7 +105,7 @@ module SolidCache
119
105
  end
120
106
 
121
107
  def delete_multi_entries(entries, **options)
122
- entries.count { |key| delete_entry(key, **options) }
108
+ entry_delete_multi(entries).compact.sum
123
109
  end
124
110
 
125
111
  def serialize_entry(entry, raw: false, **options)
@@ -17,12 +17,6 @@ module SolidCache
17
17
  end
18
18
 
19
19
  private
20
- def entry_delete_matched(matcher, batch_size)
21
- writing_all(failsafe: :delete_matched) do
22
- Entry.delete_matched(matcher, batch_size: batch_size)
23
- end
24
- end
25
-
26
20
  def entry_clear
27
21
  writing_all(failsafe: :clear) do
28
22
  if clear_with == :truncate
@@ -78,6 +72,12 @@ module SolidCache
78
72
  Entry.delete_by_key(key)
79
73
  end
80
74
  end
75
+
76
+ def entry_delete_multi(entries)
77
+ writing_keys(entries, failsafe: :delete_multi_entries, failsafe_returning: false) do
78
+ Entry.delete_multi(entries)
79
+ end
80
+ end
81
81
  end
82
82
  end
83
83
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module SolidCache
2
- VERSION = "0.3.0"
4
+ VERSION = "0.4.1"
3
5
  end
data/lib/solid_cache.rb CHANGED
@@ -10,6 +10,7 @@ loader.setup
10
10
 
11
11
  module SolidCache
12
12
  mattr_accessor :executor, :connects_to
13
+ mattr_accessor :key_hash_stage, default: :indexed
13
14
 
14
15
  def self.all_shard_keys
15
16
  all_shards_config&.keys || []
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: solid_cache
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.0
4
+ version: 0.4.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Donal McBreen
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2024-01-08 00:00:00.000000000 Z
11
+ date: 2024-01-25 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: activerecord
@@ -92,8 +92,12 @@ files:
92
92
  - Rakefile
93
93
  - app/jobs/solid_cache/expiry_job.rb
94
94
  - app/models/solid_cache/entry.rb
95
+ - app/models/solid_cache/entry/expiration.rb
95
96
  - app/models/solid_cache/record.rb
96
97
  - db/migrate/20230724121448_create_solid_cache_entries.rb
98
+ - db/migrate/20240108155507_add_key_hash_and_byte_size_to_solid_cache_entries.rb
99
+ - db/migrate/20240110111600_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.rb
100
+ - db/migrate/20240110111702_remove_key_index_from_solid_cache_entries.rb
97
101
  - lib/active_support/cache/solid_cache_store.rb
98
102
  - lib/generators/solid_cache/install/USAGE
99
103
  - lib/generators/solid_cache/install/install_generator.rb
@@ -122,7 +126,9 @@ licenses:
122
126
  metadata:
123
127
  homepage_uri: http://github.com/rails/solid_cache
124
128
  source_code_uri: http://github.com/rails/solid_cache
125
- post_install_message:
129
+ post_install_message: |
130
+ Solid Cache v0.4 contains new database migrations.
131
+ See https://github.com/rails/solid_cache/blob/main/upgrading_to_version_0.4.x.md for upgrade instructions.
126
132
  rdoc_options: []
127
133
  require_paths:
128
134
  - lib
@@ -137,7 +143,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
137
143
  - !ruby/object:Gem::Version
138
144
  version: '0'
139
145
  requirements: []
140
- rubygems_version: 3.5.1
146
+ rubygems_version: 3.5.4
141
147
  signing_key:
142
148
  specification_version: 4
143
149
  summary: A database backed ActiveSupport::Cache::Store