solid_cache 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/MIT-LICENSE +20 -0
- data/README.md +268 -0
- data/Rakefile +8 -0
- data/app/jobs/solid_cache/expiry_job.rb +9 -0
- data/app/models/solid_cache/entry.rb +157 -0
- data/app/models/solid_cache/record.rb +27 -0
- data/db/migrate/20230724121448_create_solid_cache_entries.rb +11 -0
- data/lib/active_support/cache/solid_cache_store.rb +5 -0
- data/lib/generators/solid_cache/install/USAGE +9 -0
- data/lib/generators/solid_cache/install/install_generator.rb +18 -0
- data/lib/solid_cache/cluster/connections.rb +51 -0
- data/lib/solid_cache/cluster/execution.rb +52 -0
- data/lib/solid_cache/cluster/expiry.rb +61 -0
- data/lib/solid_cache/cluster/stats.rb +32 -0
- data/lib/solid_cache/cluster.rb +14 -0
- data/lib/solid_cache/connections/sharded.rb +40 -0
- data/lib/solid_cache/connections/single.rb +37 -0
- data/lib/solid_cache/connections/unmanaged.rb +31 -0
- data/lib/solid_cache/connections.rb +31 -0
- data/lib/solid_cache/engine.rb +20 -0
- data/lib/solid_cache/maglev_hash.rb +77 -0
- data/lib/solid_cache/store/api.rb +153 -0
- data/lib/solid_cache/store/clusters.rb +85 -0
- data/lib/solid_cache/store/entries.rb +81 -0
- data/lib/solid_cache/store/failsafe.rb +28 -0
- data/lib/solid_cache/store.rb +18 -0
- data/lib/solid_cache/version.rb +3 -0
- data/lib/solid_cache.rb +33 -0
- data/lib/tasks/solid_cache_tasks.rake +6 -0
- metadata +116 -0
@@ -0,0 +1,61 @@
|
|
1
|
+
require "concurrent/atomic/atomic_fixnum"
|
2
|
+
|
3
|
+
module SolidCache
|
4
|
+
class Cluster
|
5
|
+
module Expiry
|
6
|
+
# For every write that we do, we attempt to delete EXPIRY_MULTIPLIER times as many records.
|
7
|
+
# This ensures there is downward pressure on the cache size while there is valid data to delete
|
8
|
+
EXPIRY_MULTIPLIER = 1.25
|
9
|
+
|
10
|
+
attr_reader :expiry_batch_size, :expiry_method, :expire_every, :max_age, :max_entries
|
11
|
+
|
12
|
+
def initialize(options = {})
|
13
|
+
super(options)
|
14
|
+
@expiry_batch_size = options.fetch(:expiry_batch_size, 100)
|
15
|
+
@expiry_method = options.fetch(:expiry_method, :thread)
|
16
|
+
@expire_every = [ (expiry_batch_size / EXPIRY_MULTIPLIER).floor, 1 ].max
|
17
|
+
@max_age = options.fetch(:max_age, 2.weeks.to_i)
|
18
|
+
@max_entries = options.fetch(:max_entries, nil)
|
19
|
+
|
20
|
+
raise ArgumentError, "Expiry method must be one of `:thread` or `:job`" unless [ :thread, :job ].include?(expiry_method)
|
21
|
+
end
|
22
|
+
|
23
|
+
def track_writes(count)
|
24
|
+
expire_later if expiry_counter.count(count)
|
25
|
+
end
|
26
|
+
|
27
|
+
private
|
28
|
+
def expire_later
|
29
|
+
if expiry_method == :job
|
30
|
+
ExpiryJob.perform_later(expiry_batch_size, shard: Entry.current_shard, max_age: max_age, max_entries: max_entries)
|
31
|
+
else
|
32
|
+
async { Entry.expire(expiry_batch_size, max_age: max_age, max_entries: max_entries) }
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
def expiry_counter
|
37
|
+
@expiry_counters ||= connection_names.to_h { |connection_name| [ connection_name, Counter.new(expire_every) ] }
|
38
|
+
@expiry_counters[Entry.current_shard]
|
39
|
+
end
|
40
|
+
|
41
|
+
class Counter
|
42
|
+
attr_reader :expire_every, :counter
|
43
|
+
|
44
|
+
def initialize(expire_every)
|
45
|
+
@expire_every = expire_every
|
46
|
+
@counter = Concurrent::AtomicFixnum.new(rand(expire_every).to_i)
|
47
|
+
end
|
48
|
+
|
49
|
+
def count(count)
|
50
|
+
value = counter.increment(count)
|
51
|
+
new_multiple_of_expire_every?(value - count, value)
|
52
|
+
end
|
53
|
+
|
54
|
+
private
|
55
|
+
def new_multiple_of_expire_every?(first_value, second_value)
|
56
|
+
first_value / expire_every != second_value / expire_every
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
@@ -0,0 +1,32 @@
|
|
1
|
+
module SolidCache
|
2
|
+
class Cluster
|
3
|
+
module Stats
|
4
|
+
def initialize(options = {})
|
5
|
+
super()
|
6
|
+
end
|
7
|
+
|
8
|
+
def stats
|
9
|
+
stats = {
|
10
|
+
connections: connections.count,
|
11
|
+
connection_stats: connections_stats
|
12
|
+
}
|
13
|
+
end
|
14
|
+
|
15
|
+
private
|
16
|
+
def connections_stats
|
17
|
+
with_each_connection.to_h { |connection| [ Entry.current_shard, connection_stats ] }
|
18
|
+
end
|
19
|
+
|
20
|
+
def connection_stats
|
21
|
+
oldest_created_at = Entry.order(:id).pick(:created_at)
|
22
|
+
|
23
|
+
{
|
24
|
+
max_age: max_age,
|
25
|
+
oldest_age: oldest_created_at ? Time.now - oldest_created_at : nil,
|
26
|
+
max_entries: max_entries,
|
27
|
+
entries: Entry.id_range
|
28
|
+
}
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
@@ -0,0 +1,40 @@
|
|
1
|
+
module SolidCache
|
2
|
+
module Connections
|
3
|
+
class Sharded
|
4
|
+
attr_reader :names, :nodes, :consistent_hash
|
5
|
+
|
6
|
+
def initialize(names, nodes)
|
7
|
+
@names = names
|
8
|
+
@nodes = nodes
|
9
|
+
@consistent_hash = MaglevHash.new(@nodes.keys)
|
10
|
+
end
|
11
|
+
|
12
|
+
def with_each(&block)
|
13
|
+
return enum_for(:with_each) unless block_given?
|
14
|
+
|
15
|
+
names.each { |name| with(name, &block) }
|
16
|
+
end
|
17
|
+
|
18
|
+
def with(name, &block)
|
19
|
+
Record.with_shard(name, &block)
|
20
|
+
end
|
21
|
+
|
22
|
+
def with_connection_for(key, &block)
|
23
|
+
with(shard_for(key), &block)
|
24
|
+
end
|
25
|
+
|
26
|
+
def assign(keys)
|
27
|
+
keys.group_by { |key| shard_for(key.is_a?(Hash) ? key[:key] : key) }
|
28
|
+
end
|
29
|
+
|
30
|
+
def count
|
31
|
+
names.count
|
32
|
+
end
|
33
|
+
|
34
|
+
private
|
35
|
+
def shard_for(key)
|
36
|
+
nodes[consistent_hash.node(key)]
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
module SolidCache
|
2
|
+
module Connections
|
3
|
+
class Single
|
4
|
+
attr_reader :name
|
5
|
+
|
6
|
+
def initialize(name)
|
7
|
+
@name = name
|
8
|
+
end
|
9
|
+
|
10
|
+
def with_each(&block)
|
11
|
+
return enum_for(:with_each) unless block_given?
|
12
|
+
|
13
|
+
with(name, &block)
|
14
|
+
end
|
15
|
+
|
16
|
+
def with(name, &block)
|
17
|
+
Record.with_shard(name, &block)
|
18
|
+
end
|
19
|
+
|
20
|
+
def with_connection_for(key, &block)
|
21
|
+
with(name, &block)
|
22
|
+
end
|
23
|
+
|
24
|
+
def assign(keys)
|
25
|
+
{ name => keys }
|
26
|
+
end
|
27
|
+
|
28
|
+
def count
|
29
|
+
1
|
30
|
+
end
|
31
|
+
|
32
|
+
def names
|
33
|
+
[ name ]
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
module SolidCache
|
2
|
+
module Connections
|
3
|
+
class Unmanaged
|
4
|
+
def with_each
|
5
|
+
return enum_for(:with_each) unless block_given?
|
6
|
+
|
7
|
+
yield
|
8
|
+
end
|
9
|
+
|
10
|
+
def with(name)
|
11
|
+
yield
|
12
|
+
end
|
13
|
+
|
14
|
+
def with_connection_for(key)
|
15
|
+
yield
|
16
|
+
end
|
17
|
+
|
18
|
+
def assign(keys)
|
19
|
+
{ default: keys }
|
20
|
+
end
|
21
|
+
|
22
|
+
def count
|
23
|
+
1
|
24
|
+
end
|
25
|
+
|
26
|
+
def names
|
27
|
+
[ :default ]
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
module SolidCache
|
2
|
+
module Connections
|
3
|
+
def self.from_config(options)
|
4
|
+
if options.present? || SolidCache.all_shards_config.present?
|
5
|
+
case options
|
6
|
+
when NilClass
|
7
|
+
names = SolidCache.all_shard_keys
|
8
|
+
nodes = names.to_h { |name| [ name, name ] }
|
9
|
+
when Array
|
10
|
+
names = options
|
11
|
+
nodes = names.to_h { |name| [ name, name ] }
|
12
|
+
when Hash
|
13
|
+
names = options.keys
|
14
|
+
nodes = options.invert
|
15
|
+
end
|
16
|
+
|
17
|
+
if (unknown_shards = names - SolidCache.all_shard_keys).any?
|
18
|
+
raise ArgumentError, "Unknown #{"shard".pluralize(unknown_shards)}: #{unknown_shards.join(", ")}"
|
19
|
+
end
|
20
|
+
|
21
|
+
if names.size == 1
|
22
|
+
Single.new(names.first)
|
23
|
+
else
|
24
|
+
Sharded.new(names, nodes)
|
25
|
+
end
|
26
|
+
else
|
27
|
+
Unmanaged.new
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
require "active_support"
|
2
|
+
|
3
|
+
module SolidCache
|
4
|
+
class Engine < ::Rails::Engine
|
5
|
+
isolate_namespace SolidCache
|
6
|
+
|
7
|
+
config.solid_cache = ActiveSupport::OrderedOptions.new
|
8
|
+
|
9
|
+
initializer "solid_cache", before: :run_prepare_callbacks do |app|
|
10
|
+
config.solid_cache.executor ||= app.executor
|
11
|
+
|
12
|
+
SolidCache.executor = config.solid_cache.executor
|
13
|
+
SolidCache.connects_to = config.solid_cache.connects_to
|
14
|
+
end
|
15
|
+
|
16
|
+
config.after_initialize do
|
17
|
+
Rails.cache.setup! if Rails.cache.is_a?(Store)
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,77 @@
|
|
1
|
+
# See https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/44824.pdf
|
2
|
+
|
3
|
+
module SolidCache
|
4
|
+
class MaglevHash
|
5
|
+
attr_reader :nodes
|
6
|
+
|
7
|
+
# Must be prime
|
8
|
+
TABLE_SIZE = 2053
|
9
|
+
|
10
|
+
def initialize(nodes)
|
11
|
+
raise ArgumentError, "No nodes specified" if nodes.count == 0
|
12
|
+
raise ArgumentError, "Maximum node count is #{TABLE_SIZE}" if nodes.count > TABLE_SIZE
|
13
|
+
|
14
|
+
@nodes = nodes.uniq.sort
|
15
|
+
@lookup = build_lookup
|
16
|
+
end
|
17
|
+
|
18
|
+
def node(key)
|
19
|
+
nodes[lookup[quick_hash(key) % TABLE_SIZE]]
|
20
|
+
end
|
21
|
+
|
22
|
+
private
|
23
|
+
attr_reader :lookup, :node_count
|
24
|
+
|
25
|
+
def build_lookup
|
26
|
+
lookup = Array.new(TABLE_SIZE, nil)
|
27
|
+
|
28
|
+
node_preferences = nodes.map { |node| build_preferences(node) }
|
29
|
+
node_count = nodes.count
|
30
|
+
|
31
|
+
TABLE_SIZE.times do |i|
|
32
|
+
node_index = i % node_count
|
33
|
+
preferences = node_preferences[node_index]
|
34
|
+
slot = preferences.preferred_free_slot(lookup)
|
35
|
+
lookup[slot] = node_index
|
36
|
+
end
|
37
|
+
|
38
|
+
lookup
|
39
|
+
end
|
40
|
+
|
41
|
+
def build_preferences(node)
|
42
|
+
offset = md5(node, :offset) % TABLE_SIZE
|
43
|
+
skip = md5(node, :skip) % (TABLE_SIZE - 1) + 1
|
44
|
+
|
45
|
+
Preferences.new(offset, skip)
|
46
|
+
end
|
47
|
+
|
48
|
+
def md5(*args)
|
49
|
+
::Digest::MD5.digest(args.join).unpack1("L>")
|
50
|
+
end
|
51
|
+
|
52
|
+
def quick_hash(key)
|
53
|
+
Zlib.crc32(key.to_s)
|
54
|
+
end
|
55
|
+
|
56
|
+
class Preferences
|
57
|
+
def initialize(offset, skip)
|
58
|
+
@preferred_slots = TABLE_SIZE.times.map { |i| (offset + i * skip) % TABLE_SIZE }
|
59
|
+
@rank = 0
|
60
|
+
end
|
61
|
+
|
62
|
+
def preferred_free_slot(lookup)
|
63
|
+
loop do
|
64
|
+
slot = next_slot
|
65
|
+
return slot if lookup[slot].nil?
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
private
|
70
|
+
attr_reader :rank, :preferred_slots
|
71
|
+
|
72
|
+
def next_slot
|
73
|
+
preferred_slots[rank].tap { @rank += 1 }
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
@@ -0,0 +1,153 @@
|
|
1
|
+
module SolidCache
|
2
|
+
class Store
|
3
|
+
module Api
|
4
|
+
DEFAULT_MAX_KEY_BYTESIZE = 1024
|
5
|
+
SQL_WILDCARD_CHARS = [ "_", "%" ]
|
6
|
+
|
7
|
+
attr_reader :max_key_bytesize
|
8
|
+
|
9
|
+
def initialize(options = {})
|
10
|
+
super(options)
|
11
|
+
|
12
|
+
@max_key_bytesize = options.fetch(:max_key_bytesize, DEFAULT_MAX_KEY_BYTESIZE)
|
13
|
+
end
|
14
|
+
|
15
|
+
def delete_matched(matcher, options = {})
|
16
|
+
instrument :delete_matched, matcher do
|
17
|
+
raise ArgumentError, "Only strings are supported: #{matcher.inspect}" unless String === matcher
|
18
|
+
raise ArgumentError, "Strings cannot start with wildcards" if SQL_WILDCARD_CHARS.include?(matcher[0])
|
19
|
+
|
20
|
+
options ||= {}
|
21
|
+
batch_size = options.fetch(:batch_size, 1000)
|
22
|
+
|
23
|
+
matcher = namespace_key(matcher, options)
|
24
|
+
|
25
|
+
entry_delete_matched(matcher, batch_size)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
def increment(name, amount = 1, options = nil)
|
30
|
+
options = merged_options(options)
|
31
|
+
key = normalize_key(name, options)
|
32
|
+
|
33
|
+
entry_increment(key, amount)
|
34
|
+
end
|
35
|
+
|
36
|
+
def decrement(name, amount = 1, options = nil)
|
37
|
+
options = merged_options(options)
|
38
|
+
key = normalize_key(name, options)
|
39
|
+
|
40
|
+
entry_decrement(key, amount)
|
41
|
+
end
|
42
|
+
|
43
|
+
def cleanup(options = nil)
|
44
|
+
raise NotImplementedError.new("#{self.class.name} does not support cleanup")
|
45
|
+
end
|
46
|
+
|
47
|
+
def clear(options = nil)
|
48
|
+
entry_clear
|
49
|
+
end
|
50
|
+
|
51
|
+
private
|
52
|
+
def read_entry(key, **options)
|
53
|
+
deserialize_entry(read_serialized_entry(key, **options), **options)
|
54
|
+
end
|
55
|
+
|
56
|
+
def read_serialized_entry(key, raw: false, **options)
|
57
|
+
entry_read(key)
|
58
|
+
end
|
59
|
+
|
60
|
+
def write_entry(key, entry, raw: false, **options)
|
61
|
+
payload = serialize_entry(entry, raw: raw, **options)
|
62
|
+
# No-op for us, but this writes it to the local cache
|
63
|
+
write_serialized_entry(key, payload, raw: raw, **options)
|
64
|
+
|
65
|
+
entry_write(key, payload)
|
66
|
+
end
|
67
|
+
|
68
|
+
def write_serialized_entry(key, payload, raw: false, unless_exist: false, expires_in: nil, race_condition_ttl: nil, **options)
|
69
|
+
true
|
70
|
+
end
|
71
|
+
|
72
|
+
def read_serialized_entries(keys)
|
73
|
+
entry_read_multi(keys).reduce(&:merge!)
|
74
|
+
end
|
75
|
+
|
76
|
+
def read_multi_entries(names, **options)
|
77
|
+
keys_and_names = names.to_h { |name| [ normalize_key(name, options), name ] }
|
78
|
+
serialized_entries = read_serialized_entries(keys_and_names.keys)
|
79
|
+
|
80
|
+
keys_and_names.each_with_object({}) do |(key, name), results|
|
81
|
+
serialized_entry = serialized_entries[key]
|
82
|
+
entry = deserialize_entry(serialized_entry, **options)
|
83
|
+
|
84
|
+
next unless entry
|
85
|
+
|
86
|
+
version = normalize_version(name, options)
|
87
|
+
|
88
|
+
if entry.expired?
|
89
|
+
delete_entry(key, **options)
|
90
|
+
elsif !entry.mismatched?(version)
|
91
|
+
results[name] = entry.value
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
def write_multi_entries(entries, expires_in: nil, **options)
|
97
|
+
if entries.any?
|
98
|
+
serialized_entries = serialize_entries(entries, **options)
|
99
|
+
# to add them to the local cache
|
100
|
+
serialized_entries.each do |entries|
|
101
|
+
write_serialized_entry(entries[:key], entries[:value])
|
102
|
+
end
|
103
|
+
|
104
|
+
entry_write_multi(serialized_entries).all?
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
def delete_entry(key, **options)
|
109
|
+
entry_delete(key)
|
110
|
+
end
|
111
|
+
|
112
|
+
def delete_multi_entries(entries, **options)
|
113
|
+
entries.count { |key| delete_entry(key, **options) }
|
114
|
+
end
|
115
|
+
|
116
|
+
def serialize_entry(entry, raw: false, **options)
|
117
|
+
if raw
|
118
|
+
entry.value.to_s
|
119
|
+
else
|
120
|
+
super(entry, raw: raw, **options)
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
def serialize_entries(entries, **options)
|
125
|
+
entries.map do |key, entry|
|
126
|
+
{ key: key, value: serialize_entry(entry, **options) }
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
def deserialize_entry(payload, raw: false, **)
|
131
|
+
if payload && raw
|
132
|
+
ActiveSupport::Cache::Entry.new(payload)
|
133
|
+
else
|
134
|
+
super(payload)
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
def normalize_key(key, options)
|
139
|
+
truncate_key super&.b
|
140
|
+
end
|
141
|
+
|
142
|
+
def truncate_key(key)
|
143
|
+
if key && key.bytesize > max_key_bytesize
|
144
|
+
suffix = ":hash:#{ActiveSupport::Digest.hexdigest(key)}"
|
145
|
+
truncate_at = max_key_bytesize - suffix.bytesize
|
146
|
+
"#{key.byteslice(0, truncate_at)}#{suffix}".b
|
147
|
+
else
|
148
|
+
key
|
149
|
+
end
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
@@ -0,0 +1,85 @@
|
|
1
|
+
module SolidCache
|
2
|
+
class Store
|
3
|
+
module Clusters
|
4
|
+
attr_reader :primary_cluster, :clusters
|
5
|
+
|
6
|
+
def initialize(options = {})
|
7
|
+
super(options)
|
8
|
+
|
9
|
+
clusters_options = options.fetch(:clusters) { [ options.fetch(:cluster, {}) ] }
|
10
|
+
|
11
|
+
@clusters = clusters_options.map.with_index do |cluster_options, index|
|
12
|
+
Cluster.new(options.merge(cluster_options).merge(async_writes: index != 0))
|
13
|
+
end
|
14
|
+
|
15
|
+
@primary_cluster = clusters.first
|
16
|
+
end
|
17
|
+
|
18
|
+
def setup!
|
19
|
+
clusters.each(&:setup!)
|
20
|
+
end
|
21
|
+
|
22
|
+
private
|
23
|
+
def reading_key(key, failsafe:, failsafe_returning: nil)
|
24
|
+
failsafe(failsafe, returning: failsafe_returning) do
|
25
|
+
primary_cluster.with_connection_for(key) do
|
26
|
+
yield
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def reading_keys(keys, failsafe:, failsafe_returning: nil)
|
32
|
+
connection_keys = primary_cluster.group_by_connection(keys)
|
33
|
+
|
34
|
+
connection_keys.map do |connection, keys|
|
35
|
+
failsafe(failsafe, returning: failsafe_returning) do
|
36
|
+
primary_cluster.with_connection(connection) do
|
37
|
+
yield keys
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
|
44
|
+
def writing_key(key, failsafe:, failsafe_returning: nil)
|
45
|
+
first_cluster_sync_rest_async do |cluster, async|
|
46
|
+
failsafe(failsafe, returning: failsafe_returning) do
|
47
|
+
cluster.with_connection_for(key, async: async) do
|
48
|
+
yield cluster
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
def writing_keys(entries, failsafe:, failsafe_returning: nil)
|
55
|
+
first_cluster_sync_rest_async do |cluster, async|
|
56
|
+
connection_entries = cluster.group_by_connection(entries)
|
57
|
+
|
58
|
+
connection_entries.map do |connection, entries|
|
59
|
+
failsafe(failsafe, returning: failsafe_returning) do
|
60
|
+
cluster.with_connection(connection, async: async) do
|
61
|
+
yield cluster, entries
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
def writing_all(failsafe:, failsafe_returning: nil)
|
69
|
+
first_cluster_sync_rest_async do |cluster, async|
|
70
|
+
cluster.connection_names.each do |connection|
|
71
|
+
failsafe(failsafe, returning: failsafe_returning) do
|
72
|
+
cluster.with_connection(connection, async: async) do
|
73
|
+
yield
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
def first_cluster_sync_rest_async
|
81
|
+
clusters.map.with_index { |cluster, index| yield cluster, index != 0 }.first
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
@@ -0,0 +1,81 @@
|
|
1
|
+
module SolidCache
|
2
|
+
class Store
|
3
|
+
module Entries
|
4
|
+
attr_reader :clear_with
|
5
|
+
|
6
|
+
def initialize(options = {})
|
7
|
+
super(options)
|
8
|
+
|
9
|
+
# Truncating in test mode breaks transactional tests in MySQL (not in Postgres though)
|
10
|
+
@clear_with = options.fetch(:clear_with) { Rails.env.test? ? :delete : :truncate }&.to_sym
|
11
|
+
|
12
|
+
unless [ :truncate, :delete ].include?(clear_with)
|
13
|
+
raise ArgumentError, "`clear_with` must be either ``:truncate`` or ``:delete`"
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
private
|
18
|
+
def entry_delete_matched(matcher, batch_size)
|
19
|
+
writing_all(failsafe: :delete_matched) do
|
20
|
+
Entry.delete_matched(matcher, batch_size: batch_size)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
def entry_clear
|
25
|
+
writing_all(failsafe: :clear) do
|
26
|
+
if clear_with == :truncate
|
27
|
+
Entry.clear_truncate
|
28
|
+
else
|
29
|
+
Entry.clear_delete
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
def entry_increment(key, amount)
|
35
|
+
writing_key(key, failsafe: :increment) do
|
36
|
+
Entry.increment(key, amount)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
def entry_decrement(key, amount)
|
41
|
+
writing_key(key, failsafe: :decrement) do
|
42
|
+
Entry.decrement(key, amount)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
def entry_read(key)
|
47
|
+
reading_key(key, failsafe: :read_entry) do
|
48
|
+
Entry.read(key)
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
def entry_read_multi(keys)
|
53
|
+
reading_keys(keys, failsafe: :read_multi_mget, failsafe_returning: {}) do |keys|
|
54
|
+
Entry.read_multi(keys)
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def entry_write(key, payload)
|
59
|
+
writing_key(key, failsafe: :write_entry, failsafe_returning: false) do |cluster|
|
60
|
+
Entry.write(key, payload)
|
61
|
+
cluster.track_writes(1)
|
62
|
+
true
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
def entry_write_multi(entries)
|
67
|
+
writing_keys(entries, failsafe: :write_multi_entries, failsafe_returning: false) do |cluster, entries|
|
68
|
+
Entry.write_multi(entries)
|
69
|
+
cluster.track_writes(entries.count)
|
70
|
+
true
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
def entry_delete(key)
|
75
|
+
writing_key(key, failsafe: :delete_entry, failsafe_returning: false) do
|
76
|
+
Entry.delete_by_key(key)
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
@@ -0,0 +1,28 @@
|
|
1
|
+
module SolidCache
|
2
|
+
class Store
|
3
|
+
module Failsafe
|
4
|
+
DEFAULT_ERROR_HANDLER = ->(method:, returning:, exception:) do
|
5
|
+
if Store.logger
|
6
|
+
Store.logger.error { "SolidCacheStore: #{method} failed, returned #{returning.inspect}: #{exception.class}: #{exception.message}" }
|
7
|
+
end
|
8
|
+
end
|
9
|
+
|
10
|
+
def initialize(options = {})
|
11
|
+
super(options)
|
12
|
+
|
13
|
+
@error_handler = options.fetch(:error_handler, DEFAULT_ERROR_HANDLER)
|
14
|
+
end
|
15
|
+
|
16
|
+
private
|
17
|
+
attr_reader :error_handler
|
18
|
+
|
19
|
+
def failsafe(method, returning: nil)
|
20
|
+
yield
|
21
|
+
rescue ActiveRecord::ActiveRecordError => error
|
22
|
+
ActiveSupport.error_reporter&.report(error, handled: true, severity: :warning)
|
23
|
+
error_handler&.call(method: method, exception: error, returning: returning)
|
24
|
+
returning
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|