mudis 0.7.2 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/mudis.rb CHANGED
@@ -1,521 +1,631 @@
1
- # frozen_string_literal: true
2
-
3
- require "json"
4
- require "thread" # rubocop:disable Lint/RedundantRequireStatement
5
- require "zlib"
6
-
7
- require_relative "mudis_config"
8
-
9
- # Mudis is a thread-safe, in-memory, sharded, LRU cache with optional compression and expiry.
10
- # It is designed for high concurrency and performance within a Ruby application.
11
- class Mudis # rubocop:disable Metrics/ClassLength
12
- # --- Global Configuration and State ---
13
-
14
- @serializer = JSON # Default serializer (can be changed to Marshal or Oj)
15
- @compress = false # Whether to compress values with Zlib
16
- @metrics = { hits: 0, misses: 0, evictions: 0, rejected: 0 } # Metrics tracking read/write behaviour
17
- @metrics_mutex = Mutex.new # Mutex for synchronizing access to metrics
18
- @max_value_bytes = nil # Optional size cap per value
19
- @stop_expiry = false # Signal for stopping expiry thread
20
- @max_ttl = nil # Optional maximum TTL for cache entries
21
- @default_ttl = nil # Default TTL for cache entries if not specified
22
-
23
- class << self
24
- attr_accessor :serializer, :compress, :hard_memory_limit, :max_ttl, :default_ttl
25
- attr_reader :max_bytes, :max_value_bytes
26
-
27
- # Configures Mudis with a block, allowing customization of settings
28
- def configure
29
- yield(config)
30
- apply_config!
31
- end
32
-
33
- # Returns the current configuration object
34
- def config
35
- @config ||= MudisConfig.new
36
- end
37
-
38
- # Applies the current configuration to Mudis
39
- def apply_config! # rubocop:disable Metrics/AbcSize,Metrics/MethodLength
40
- validate_config!
41
-
42
- self.serializer = config.serializer
43
- self.compress = config.compress
44
- self.max_value_bytes = config.max_value_bytes
45
- self.hard_memory_limit = config.hard_memory_limit
46
- self.max_bytes = config.max_bytes
47
- self.max_ttl = config.max_ttl
48
- self.default_ttl = config.default_ttl
49
-
50
- if config.buckets # rubocop:disable Style/GuardClause
51
- @buckets = config.buckets
52
- reset!
53
- end
54
- end
55
-
56
- # Validates the current configuration, raising errors for invalid settings
57
- def validate_config! # rubocop:disable Metrics/AbcSize,Metrics/CyclomaticComplexity,Metrics/PerceivedComplexity
58
- if config.max_value_bytes && config.max_value_bytes > config.max_bytes
59
- raise ArgumentError,
60
- "max_value_bytes cannot exceed max_bytes"
61
- end
62
-
63
- raise ArgumentError, "max_value_bytes must be > 0" if config.max_value_bytes && config.max_value_bytes <= 0
64
-
65
- raise ArgumentError, "buckets must be > 0" if config.buckets && config.buckets <= 0
66
- raise ArgumentError, "max_ttl must be > 0" if config.max_ttl && config.max_ttl <= 0
67
- raise ArgumentError, "default_ttl must be > 0" if config.default_ttl && config.default_ttl <= 0
68
- end
69
-
70
- # Returns a snapshot of metrics (thread-safe)
71
- def metrics # rubocop:disable Metrics/MethodLength
72
- @metrics_mutex.synchronize do
73
- {
74
- hits: @metrics[:hits],
75
- misses: @metrics[:misses],
76
- evictions: @metrics[:evictions],
77
- rejected: @metrics[:rejected],
78
- total_memory: current_memory_bytes,
79
- least_touched: least_touched(10),
80
- buckets: buckets.times.map do |idx|
81
- {
82
- index: idx,
83
- keys: @stores[idx].size,
84
- memory_bytes: @current_bytes[idx],
85
- lru_size: @lru_nodes[idx].size
86
- }
87
- end
88
- }
89
- end
90
- end
91
-
92
- # Resets metric counters (thread-safe)
93
- def reset_metrics!
94
- @metrics_mutex.synchronize do
95
- @metrics = { hits: 0, misses: 0, evictions: 0, rejected: 0 }
96
- end
97
- end
98
-
99
- # Fully resets all internal state (except config)
100
- def reset!
101
- stop_expiry_thread
102
-
103
- @buckets = nil
104
- b = buckets
105
-
106
- @stores = Array.new(b) { {} }
107
- @mutexes = Array.new(b) { Mutex.new }
108
- @lru_heads = Array.new(b) { nil }
109
- @lru_tails = Array.new(b) { nil }
110
- @lru_nodes = Array.new(b) { {} }
111
- @current_bytes = Array.new(b, 0)
112
-
113
- reset_metrics!
114
- end
115
-
116
- # Sets the maximum size for a single value in bytes
117
- def max_bytes=(value)
118
- raise ArgumentError, "max_bytes must be > 0" if value.to_i <= 0
119
-
120
- @max_bytes = value
121
- @threshold_bytes = (@max_bytes * 0.9).to_i
122
- end
123
-
124
- # Sets the maximum size for a single value in bytes, raising an error if invalid
125
- def max_value_bytes=(value)
126
- raise ArgumentError, "max_value_bytes must be > 0" if value && value.to_i <= 0
127
-
128
- @max_value_bytes = value
129
- end
130
- end
131
-
132
- # Node structure for the LRU doubly-linked list
133
- class LRUNode
134
- attr_accessor :key, :prev, :next
135
-
136
- def initialize(key)
137
- @key = key
138
- @prev = nil
139
- @next = nil
140
- end
141
- end
142
-
143
- # Number of cache buckets (shards). Default: 32
144
- def self.buckets
145
- return @buckets if @buckets
146
-
147
- val = config.buckets || ENV["MUDIS_BUCKETS"]&.to_i || 32
148
- raise ArgumentError, "bucket count must be > 0" if val <= 0
149
-
150
- @buckets = val
151
- end
152
-
153
- # --- Internal Structures ---
154
-
155
- @stores = Array.new(buckets) { {} } # Array of hash buckets for storage
156
- @mutexes = Array.new(buckets) { Mutex.new } # Per-bucket mutexes
157
- @lru_heads = Array.new(buckets) { nil } # Head node for each LRU list
158
- @lru_tails = Array.new(buckets) { nil } # Tail node for each LRU list
159
- @lru_nodes = Array.new(buckets) { {} } # Map of key => LRU node
160
- @current_bytes = Array.new(buckets, 0) # Memory usage per bucket
161
- @max_bytes = 1_073_741_824 # 1 GB global max cache size
162
- @threshold_bytes = (@max_bytes * 0.9).to_i # Eviction threshold at 90%
163
- @expiry_thread = nil # Background thread for expiry cleanup
164
- @hard_memory_limit = false # Whether to enforce hard memory cap
165
-
166
- class << self
167
- # Starts a thread that periodically removes expired entries
168
- def start_expiry_thread(interval: 60)
169
- return if @expiry_thread&.alive?
170
-
171
- @stop_expiry = false
172
- @expiry_thread = Thread.new do
173
- loop do
174
- break if @stop_expiry
175
-
176
- sleep interval
177
- cleanup_expired!
178
- end
179
- end
180
- end
181
-
182
- # Signals and joins the expiry thread
183
- def stop_expiry_thread
184
- @stop_expiry = true
185
- @expiry_thread&.join
186
- @expiry_thread = nil
187
- end
188
-
189
- # Computes which bucket a key belongs to
190
- def bucket_index(key)
191
- key.hash % buckets
192
- end
193
-
194
- # Checks if a key exists and is not expired
195
- def exists?(key, namespace: nil)
196
- key = namespaced_key(key, namespace)
197
- !!read(key)
198
- end
199
-
200
- # Reads and returns the value for a key, updating LRU and metrics
201
- def read(key, namespace: nil) # rubocop:disable Metrics/MethodLength,Metrics/AbcSize,Metrics/CyclomaticComplexity,Metrics/PerceivedComplexity
202
- key = namespaced_key(key, namespace)
203
- raw_entry = nil
204
- idx = bucket_index(key)
205
- mutex = @mutexes[idx]
206
- store = @stores[idx]
207
-
208
- mutex.synchronize do
209
- raw_entry = @stores[idx][key]
210
- if raw_entry && raw_entry[:expires_at] && Time.now > raw_entry[:expires_at]
211
- evict_key(idx, key)
212
- raw_entry = nil
213
- end
214
-
215
- store[key][:touches] = (store[key][:touches] || 0) + 1 if store[key]
216
-
217
- metric(:hits) if raw_entry
218
- metric(:misses) unless raw_entry
219
- end
220
-
221
- return nil unless raw_entry
222
-
223
- value = decompress_and_deserialize(raw_entry[:value])
224
- promote_lru(idx, key)
225
- value
226
- end
227
-
228
- # Writes a value to the cache with optional expiry and LRU tracking
229
- def write(key, value, expires_in: nil, namespace: nil) # rubocop:disable Metrics/MethodLength,Metrics/CyclomaticComplexity,Metrics/AbcSize,Metrics/PerceivedComplexity
230
- key = namespaced_key(key, namespace)
231
- raw = serializer.dump(value)
232
- raw = Zlib::Deflate.deflate(raw) if compress
233
- size = key.bytesize + raw.bytesize
234
- return if max_value_bytes && raw.bytesize > max_value_bytes
235
-
236
- if hard_memory_limit && current_memory_bytes + size > max_memory_bytes
237
- metric(:rejected)
238
- return
239
- end
240
-
241
- # Ensure expires_in respects max_ttl and default_ttl
242
- expires_in = effective_ttl(expires_in)
243
-
244
- idx = bucket_index(key)
245
- mutex = @mutexes[idx]
246
- store = @stores[idx]
247
-
248
- mutex.synchronize do
249
- evict_key(idx, key) if store[key]
250
-
251
- while @current_bytes[idx] + size > (@threshold_bytes / buckets) && @lru_tails[idx]
252
- evict_key(idx, @lru_tails[idx].key)
253
- metric(:evictions)
254
- end
255
-
256
- store[key] = {
257
- value: raw,
258
- expires_at: expires_in ? Time.now + expires_in : nil,
259
- created_at: Time.now,
260
- touches: 0
261
- }
262
-
263
- insert_lru(idx, key)
264
- @current_bytes[idx] += size
265
- end
266
- end
267
-
268
- # Atomically updates the value for a key using a block
269
- def update(key, namespace: nil) # rubocop:disable Metrics/AbcSize,Metrics/MethodLength
270
- key = namespaced_key(key, namespace)
271
- idx = bucket_index(key)
272
- mutex = @mutexes[idx]
273
- store = @stores[idx]
274
-
275
- raw_entry = nil
276
- mutex.synchronize do
277
- raw_entry = store[key]
278
- return nil unless raw_entry
279
- end
280
-
281
- value = decompress_and_deserialize(raw_entry[:value])
282
- new_value = yield(value)
283
- new_raw = serializer.dump(new_value)
284
- new_raw = Zlib::Deflate.deflate(new_raw) if compress
285
-
286
- mutex.synchronize do
287
- old_size = key.bytesize + raw_entry[:value].bytesize
288
- new_size = key.bytesize + new_raw.bytesize
289
- store[key][:value] = new_raw
290
- @current_bytes[idx] += (new_size - old_size)
291
- promote_lru(idx, key)
292
- end
293
- end
294
-
295
- # Deletes a key from the cache
296
- def delete(key, namespace: nil)
297
- key = namespaced_key(key, namespace)
298
- idx = bucket_index(key)
299
- mutex = @mutexes[idx]
300
-
301
- mutex.synchronize do
302
- evict_key(idx, key)
303
- end
304
- end
305
-
306
- # Fetches a value for a key, writing it if not present or expired
307
- # The block is executed to generate the value if it doesn't exist
308
- # Optionally accepts an expiration time
309
- # If force is true, it always fetches and writes the value
310
- def fetch(key, expires_in: nil, force: false, namespace: nil)
311
- key = namespaced_key(key, namespace)
312
- unless force
313
- cached = read(key)
314
- return cached if cached
315
- end
316
-
317
- value = yield
318
- write(key, value, expires_in: expires_in)
319
- value
320
- end
321
-
322
- # Clears a specific key from the cache, a semantic synonym for delete
323
- # This method is provided for clarity in usage
324
- # It behaves the same as delete
325
- def clear(key, namespace: nil)
326
- delete(key, namespace: namespace)
327
- end
328
-
329
- # Replaces the value for a key if it exists, otherwise does nothing
330
- # This is useful for updating values without needing to check existence first
331
- # It will write the new value and update the expiration if provided
332
- # If the key does not exist, it will not create a new entry
333
- def replace(key, value, expires_in: nil, namespace: nil)
334
- return unless exists?(key, namespace: namespace)
335
-
336
- write(key, value, expires_in: expires_in, namespace: namespace)
337
- end
338
-
339
- # Inspects a key and returns all meta data for it
340
- def inspect(key, namespace: nil) # rubocop:disable Metrics/MethodLength
341
- key = namespaced_key(key, namespace)
342
- idx = bucket_index(key)
343
- store = @stores[idx]
344
- mutex = @mutexes[idx]
345
-
346
- mutex.synchronize do
347
- entry = store[key]
348
- return nil unless entry
349
-
350
- {
351
- key: key,
352
- bucket: idx,
353
- expires_at: entry[:expires_at],
354
- created_at: entry[:created_at],
355
- size_bytes: key.bytesize + entry[:value].bytesize,
356
- compressed: compress
357
- }
358
- end
359
- end
360
-
361
- # Removes expired keys across all buckets
362
- def cleanup_expired!
363
- now = Time.now
364
- buckets.times do |idx|
365
- mutex = @mutexes[idx]
366
- store = @stores[idx]
367
- mutex.synchronize do
368
- store.keys.each do |key| # rubocop:disable Style/HashEachMethods
369
- evict_key(idx, key) if store[key][:expires_at] && now > store[key][:expires_at]
370
- end
371
- end
372
- end
373
- end
374
-
375
- # Returns an array of all cache keys
376
- def all_keys
377
- keys = []
378
- buckets.times do |idx|
379
- mutex = @mutexes[idx]
380
- store = @stores[idx]
381
- mutex.synchronize { keys.concat(store.keys) }
382
- end
383
- keys
384
- end
385
-
386
- # Returns all keys in a specific namespace
387
- def keys(namespace:)
388
- raise ArgumentError, "namespace is required" unless namespace
389
-
390
- prefix = "#{namespace}:"
391
- all_keys.select { |key| key.start_with?(prefix) }.map { |key| key.delete_prefix(prefix) }
392
- end
393
-
394
- # Clears all keys in a specific namespace
395
- def clear_namespace(namespace:)
396
- raise ArgumentError, "namespace is required" unless namespace
397
-
398
- prefix = "#{namespace}:"
399
- buckets.times do |idx|
400
- mutex = @mutexes[idx]
401
- store = @stores[idx]
402
-
403
- mutex.synchronize do
404
- keys_to_delete = store.keys.select { |key| key.start_with?(prefix) }
405
- keys_to_delete.each { |key| evict_key(idx, key) }
406
- end
407
- end
408
- end
409
-
410
- # Returns the least-touched keys across all buckets
411
- def least_touched(n = 10) # rubocop:disable Metrics/MethodLength,Naming/MethodParameterName
412
- keys_with_touches = []
413
-
414
- buckets.times do |idx|
415
- mutex = @mutexes[idx]
416
- store = @stores[idx]
417
-
418
- mutex.synchronize do
419
- store.each do |key, entry|
420
- keys_with_touches << [key, entry[:touches] || 0]
421
- end
422
- end
423
- end
424
-
425
- keys_with_touches.sort_by { |_, count| count }.first(n)
426
- end
427
-
428
- # Returns total memory used across all buckets
429
- def current_memory_bytes
430
- @current_bytes.sum
431
- end
432
-
433
- # Returns configured maximum memory allowed
434
- def max_memory_bytes
435
- @max_bytes
436
- end
437
-
438
- # Executes a block with a specific namespace, restoring the old namespace afterwards
439
- def with_namespace(namespace)
440
- old_ns = Thread.current[:mudis_namespace]
441
- Thread.current[:mudis_namespace] = namespace
442
- yield
443
- ensure
444
- Thread.current[:mudis_namespace] = old_ns
445
- end
446
-
447
- private
448
-
449
- # Decompresses and deserializes a raw value
450
- def decompress_and_deserialize(raw)
451
- val = compress ? Zlib::Inflate.inflate(raw) : raw
452
- serializer.load(val)
453
- end
454
-
455
- # Thread-safe metric increment
456
- def metric(name)
457
- @metrics_mutex.synchronize { @metrics[name] += 1 }
458
- end
459
-
460
- # Removes a key from storage and LRU
461
- def evict_key(idx, key)
462
- store = @stores[idx]
463
- entry = store.delete(key)
464
- return unless entry
465
-
466
- @current_bytes[idx] -= (key.bytesize + entry[:value].bytesize)
467
-
468
- node = @lru_nodes[idx].delete(key)
469
- remove_node(idx, node) if node
470
- end
471
-
472
- # Inserts a key at the head of the LRU list
473
- def insert_lru(idx, key)
474
- node = LRUNode.new(key)
475
- node.next = @lru_heads[idx]
476
- @lru_heads[idx].prev = node if @lru_heads[idx]
477
- @lru_heads[idx] = node
478
- @lru_tails[idx] ||= node
479
- @lru_nodes[idx][key] = node
480
- end
481
-
482
- # Promotes a key to the front of the LRU list
483
- def promote_lru(idx, key)
484
- node = @lru_nodes[idx][key]
485
- return unless node && @lru_heads[idx] != node
486
-
487
- remove_node(idx, node)
488
- insert_lru(idx, key)
489
- end
490
-
491
- # Removes a node from the LRU list
492
- def remove_node(idx, node)
493
- if node.prev
494
- node.prev.next = node.next
495
- else
496
- @lru_heads[idx] = node.next
497
- end
498
-
499
- if node.next
500
- node.next.prev = node.prev
501
- else
502
- @lru_tails[idx] = node.prev
503
- end
504
- end
505
-
506
- # Namespaces a key with an optional namespace
507
- def namespaced_key(key, namespace = nil)
508
- ns = namespace || Thread.current[:mudis_namespace]
509
- ns ? "#{ns}:#{key}" : key
510
- end
511
-
512
- # Calculates the effective TTL for an entry, respecting max_ttl if set
513
- def effective_ttl(expires_in)
514
- ttl = expires_in || @default_ttl
515
- return nil unless ttl
516
- return ttl unless @max_ttl
517
-
518
- [ttl, @max_ttl].min
519
- end
520
- end
521
- end
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require "thread" # rubocop:disable Lint/RedundantRequireStatement
5
+ require "zlib"
6
+
7
+ require_relative "mudis_config"
8
+
9
+ # Mudis is a thread-safe, in-memory, sharded, LRU cache with optional compression and expiry.
10
+ # It is designed for high concurrency and performance within a Ruby application.
11
+ class Mudis # rubocop:disable Metrics/ClassLength
12
+ # --- Global Configuration and State ---
13
+
14
+ @serializer = JSON # Default serializer (can be changed to Marshal or Oj)
15
+ @compress = false # Whether to compress values with Zlib
16
+ @metrics = { hits: 0, misses: 0, evictions: 0, rejected: 0 } # Metrics tracking read/write behaviour
17
+ @metrics_mutex = Mutex.new # Mutex for synchronizing access to metrics
18
+ @max_value_bytes = nil # Optional size cap per value
19
+ @stop_expiry = false # Signal for stopping expiry thread
20
+ @max_ttl = nil # Optional maximum TTL for cache entries
21
+ @default_ttl = nil # Default TTL for cache entries if not specified
22
+
23
+ class << self
24
+ attr_accessor :serializer, :compress, :hard_memory_limit, :max_ttl, :default_ttl
25
+ attr_reader :max_bytes, :max_value_bytes
26
+
27
+ # Configures Mudis with a block, allowing customization of settings
28
+ def configure
29
+ yield(config)
30
+ apply_config!
31
+ end
32
+
33
+ # Returns the current configuration object
34
+ def config
35
+ @config ||= MudisConfig.new
36
+ end
37
+
38
+ # Applies the current configuration to Mudis
39
+ def apply_config! # rubocop:disable Metrics/AbcSize,Metrics/MethodLength
40
+ validate_config!
41
+
42
+ self.serializer = config.serializer
43
+ self.compress = config.compress
44
+ self.max_value_bytes = config.max_value_bytes
45
+ self.hard_memory_limit = config.hard_memory_limit
46
+ self.max_bytes = config.max_bytes
47
+ self.max_ttl = config.max_ttl
48
+ self.default_ttl = config.default_ttl
49
+
50
+ @persistence_enabled = config.persistence_enabled
51
+ @persistence_path = config.persistence_path
52
+ @persistence_format = config.persistence_format
53
+ @persistence_safe_write = config.persistence_safe_write
54
+
55
+ if config.buckets # rubocop:disable Style/GuardClause
56
+ @buckets = config.buckets
57
+ reset!
58
+ end
59
+ end
60
+
61
+ # Validates the current configuration, raising errors for invalid settings
62
+ def validate_config! # rubocop:disable Metrics/AbcSize,Metrics/CyclomaticComplexity,Metrics/PerceivedComplexity
63
+ if config.max_value_bytes && config.max_value_bytes > config.max_bytes
64
+ raise ArgumentError,
65
+ "max_value_bytes cannot exceed max_bytes"
66
+ end
67
+
68
+ raise ArgumentError, "max_value_bytes must be > 0" if config.max_value_bytes && config.max_value_bytes <= 0
69
+
70
+ raise ArgumentError, "buckets must be > 0" if config.buckets && config.buckets <= 0
71
+ raise ArgumentError, "max_ttl must be > 0" if config.max_ttl && config.max_ttl <= 0
72
+ raise ArgumentError, "default_ttl must be > 0" if config.default_ttl && config.default_ttl <= 0
73
+ end
74
+
75
+ # Returns a snapshot of metrics (thread-safe)
76
+ def metrics # rubocop:disable Metrics/MethodLength
77
+ @metrics_mutex.synchronize do
78
+ {
79
+ hits: @metrics[:hits],
80
+ misses: @metrics[:misses],
81
+ evictions: @metrics[:evictions],
82
+ rejected: @metrics[:rejected],
83
+ total_memory: current_memory_bytes,
84
+ least_touched: least_touched(10),
85
+ buckets: buckets.times.map do |idx|
86
+ {
87
+ index: idx,
88
+ keys: @stores[idx].size,
89
+ memory_bytes: @current_bytes[idx],
90
+ lru_size: @lru_nodes[idx].size
91
+ }
92
+ end
93
+ }
94
+ end
95
+ end
96
+
97
+ # Resets metric counters (thread-safe)
98
+ def reset_metrics!
99
+ @metrics_mutex.synchronize do
100
+ @metrics = { hits: 0, misses: 0, evictions: 0, rejected: 0 }
101
+ end
102
+ end
103
+
104
+ # Fully resets all internal state (except config)
105
+ def reset!
106
+ stop_expiry_thread
107
+
108
+ @buckets = nil
109
+ b = buckets
110
+
111
+ @stores = Array.new(b) { {} }
112
+ @mutexes = Array.new(b) { Mutex.new }
113
+ @lru_heads = Array.new(b) { nil }
114
+ @lru_tails = Array.new(b) { nil }
115
+ @lru_nodes = Array.new(b) { {} }
116
+ @current_bytes = Array.new(b, 0)
117
+
118
+ reset_metrics!
119
+ end
120
+
121
+ # Sets the maximum size for a single value in bytes
122
+ def max_bytes=(value)
123
+ raise ArgumentError, "max_bytes must be > 0" if value.to_i <= 0
124
+
125
+ @max_bytes = value
126
+ @threshold_bytes = (@max_bytes * 0.9).to_i
127
+ end
128
+
129
+ # Sets the maximum size for a single value in bytes, raising an error if invalid
130
+ def max_value_bytes=(value)
131
+ raise ArgumentError, "max_value_bytes must be > 0" if value && value.to_i <= 0
132
+
133
+ @max_value_bytes = value
134
+ end
135
+ end
136
+
137
+ # Node structure for the LRU doubly-linked list
138
+ class LRUNode
139
+ attr_accessor :key, :prev, :next
140
+
141
+ def initialize(key)
142
+ @key = key
143
+ @prev = nil
144
+ @next = nil
145
+ end
146
+ end
147
+
148
+ # Number of cache buckets (shards). Default: 32
149
+ def self.buckets
150
+ return @buckets if @buckets
151
+
152
+ val = config.buckets || ENV["MUDIS_BUCKETS"]&.to_i || 32
153
+ raise ArgumentError, "bucket count must be > 0" if val <= 0
154
+
155
+ @buckets = val
156
+ end
157
+
158
+ # --- Internal Structures ---
159
+
160
+ @stores = Array.new(buckets) { {} } # Array of hash buckets for storage
161
+ @mutexes = Array.new(buckets) { Mutex.new } # Per-bucket mutexes
162
+ @lru_heads = Array.new(buckets) { nil } # Head node for each LRU list
163
+ @lru_tails = Array.new(buckets) { nil } # Tail node for each LRU list
164
+ @lru_nodes = Array.new(buckets) { {} } # Map of key => LRU node
165
+ @current_bytes = Array.new(buckets, 0) # Memory usage per bucket
166
+ @max_bytes = 1_073_741_824 # 1 GB global max cache size
167
+ @threshold_bytes = (@max_bytes * 0.9).to_i # Eviction threshold at 90%
168
+ @expiry_thread = nil # Background thread for expiry cleanup
169
+ @hard_memory_limit = false # Whether to enforce hard memory cap
170
+
171
+ class << self
172
+ # Starts a thread that periodically removes expired entries
173
+ def start_expiry_thread(interval: 60)
174
+ return if @expiry_thread&.alive?
175
+
176
+ @stop_expiry = false
177
+ @expiry_thread = Thread.new do
178
+ loop do
179
+ break if @stop_expiry
180
+
181
+ sleep interval
182
+ cleanup_expired!
183
+ end
184
+ end
185
+ end
186
+
187
+ # Signals and joins the expiry thread
188
+ def stop_expiry_thread
189
+ @stop_expiry = true
190
+ @expiry_thread&.join
191
+ @expiry_thread = nil
192
+ end
193
+
194
+ # Computes which bucket a key belongs to
195
+ def bucket_index(key)
196
+ key.hash % buckets
197
+ end
198
+
199
+ # Checks if a key exists and is not expired
200
+ def exists?(key, namespace: nil)
201
+ key = namespaced_key(key, namespace)
202
+ !!read(key)
203
+ end
204
+
205
+ # Reads and returns the value for a key, updating LRU and metrics
206
+ def read(key, namespace: nil) # rubocop:disable Metrics/MethodLength,Metrics/AbcSize,Metrics/CyclomaticComplexity,Metrics/PerceivedComplexity
207
+ key = namespaced_key(key, namespace)
208
+ raw_entry = nil
209
+ idx = bucket_index(key)
210
+ mutex = @mutexes[idx]
211
+ store = @stores[idx]
212
+
213
+ mutex.synchronize do
214
+ raw_entry = @stores[idx][key]
215
+ if raw_entry && raw_entry[:expires_at] && Time.now > raw_entry[:expires_at]
216
+ evict_key(idx, key)
217
+ raw_entry = nil
218
+ end
219
+
220
+ store[key][:touches] = (store[key][:touches] || 0) + 1 if store[key]
221
+
222
+ metric(:hits) if raw_entry
223
+ metric(:misses) unless raw_entry
224
+ end
225
+
226
+ return nil unless raw_entry
227
+
228
+ value = decompress_and_deserialize(raw_entry[:value])
229
+ promote_lru(idx, key)
230
+ value
231
+ end
232
+
233
+ # Writes a value to the cache with optional expiry and LRU tracking
234
+ def write(key, value, expires_in: nil, namespace: nil) # rubocop:disable Metrics/MethodLength,Metrics/CyclomaticComplexity,Metrics/AbcSize,Metrics/PerceivedComplexity
235
+ key = namespaced_key(key, namespace)
236
+ raw = serializer.dump(value)
237
+ raw = Zlib::Deflate.deflate(raw) if compress
238
+ size = key.bytesize + raw.bytesize
239
+ return if max_value_bytes && raw.bytesize > max_value_bytes
240
+
241
+ if hard_memory_limit && current_memory_bytes + size > max_memory_bytes
242
+ metric(:rejected)
243
+ return
244
+ end
245
+
246
+ # Ensure expires_in respects max_ttl and default_ttl
247
+ expires_in = effective_ttl(expires_in)
248
+
249
+ idx = bucket_index(key)
250
+ mutex = @mutexes[idx]
251
+ store = @stores[idx]
252
+
253
+ mutex.synchronize do
254
+ evict_key(idx, key) if store[key]
255
+
256
+ while @current_bytes[idx] + size > (@threshold_bytes / buckets) && @lru_tails[idx]
257
+ evict_key(idx, @lru_tails[idx].key)
258
+ metric(:evictions)
259
+ end
260
+
261
+ store[key] = {
262
+ value: raw,
263
+ expires_at: expires_in ? Time.now + expires_in : nil,
264
+ created_at: Time.now,
265
+ touches: 0
266
+ }
267
+
268
+ insert_lru(idx, key)
269
+ @current_bytes[idx] += size
270
+ end
271
+ end
272
+
273
+ # Atomically updates the value for a key using a block
274
+ def update(key, namespace: nil) # rubocop:disable Metrics/AbcSize,Metrics/MethodLength
275
+ key = namespaced_key(key, namespace)
276
+ idx = bucket_index(key)
277
+ mutex = @mutexes[idx]
278
+ store = @stores[idx]
279
+
280
+ raw_entry = nil
281
+ mutex.synchronize do
282
+ raw_entry = store[key]
283
+ return nil unless raw_entry
284
+ end
285
+
286
+ value = decompress_and_deserialize(raw_entry[:value])
287
+ new_value = yield(value)
288
+ new_raw = serializer.dump(new_value)
289
+ new_raw = Zlib::Deflate.deflate(new_raw) if compress
290
+
291
+ mutex.synchronize do
292
+ old_size = key.bytesize + raw_entry[:value].bytesize
293
+ new_size = key.bytesize + new_raw.bytesize
294
+ store[key][:value] = new_raw
295
+ @current_bytes[idx] += (new_size - old_size)
296
+ promote_lru(idx, key)
297
+ end
298
+ end
299
+
300
+ # Deletes a key from the cache
301
+ def delete(key, namespace: nil)
302
+ key = namespaced_key(key, namespace)
303
+ idx = bucket_index(key)
304
+ mutex = @mutexes[idx]
305
+
306
+ mutex.synchronize do
307
+ evict_key(idx, key)
308
+ end
309
+ end
310
+
311
+ # Fetches a value for a key, writing it if not present or expired
312
+ # The block is executed to generate the value if it doesn't exist
313
+ # Optionally accepts an expiration time
314
+ # If force is true, it always fetches and writes the value
315
+ def fetch(key, expires_in: nil, force: false, namespace: nil)
316
+ key = namespaced_key(key, namespace)
317
+ unless force
318
+ cached = read(key)
319
+ return cached if cached
320
+ end
321
+
322
+ value = yield
323
+ write(key, value, expires_in: expires_in)
324
+ value
325
+ end
326
+
327
+ # Clears a specific key from the cache, a semantic synonym for delete
328
+ # This method is provided for clarity in usage
329
+ # It behaves the same as delete
330
+ def clear(key, namespace: nil)
331
+ delete(key, namespace: namespace)
332
+ end
333
+
334
+ # Replaces the value for a key if it exists, otherwise does nothing
335
+ # This is useful for updating values without needing to check existence first
336
+ # It will write the new value and update the expiration if provided
337
+ # If the key does not exist, it will not create a new entry
338
+ def replace(key, value, expires_in: nil, namespace: nil)
339
+ return unless exists?(key, namespace: namespace)
340
+
341
+ write(key, value, expires_in: expires_in, namespace: namespace)
342
+ end
343
+
344
+ # Inspects a key and returns all meta data for it
345
+ def inspect(key, namespace: nil) # rubocop:disable Metrics/MethodLength
346
+ key = namespaced_key(key, namespace)
347
+ idx = bucket_index(key)
348
+ store = @stores[idx]
349
+ mutex = @mutexes[idx]
350
+
351
+ mutex.synchronize do
352
+ entry = store[key]
353
+ return nil unless entry
354
+
355
+ {
356
+ key: key,
357
+ bucket: idx,
358
+ expires_at: entry[:expires_at],
359
+ created_at: entry[:created_at],
360
+ size_bytes: key.bytesize + entry[:value].bytesize,
361
+ compressed: compress
362
+ }
363
+ end
364
+ end
365
+
366
+ # Removes expired keys across all buckets
367
+ def cleanup_expired!
368
+ now = Time.now
369
+ buckets.times do |idx|
370
+ mutex = @mutexes[idx]
371
+ store = @stores[idx]
372
+ mutex.synchronize do
373
+ store.keys.each do |key| # rubocop:disable Style/HashEachMethods
374
+ evict_key(idx, key) if store[key][:expires_at] && now > store[key][:expires_at]
375
+ end
376
+ end
377
+ end
378
+ end
379
+
380
+ # Returns an array of all cache keys
381
+ def all_keys
382
+ keys = []
383
+ buckets.times do |idx|
384
+ mutex = @mutexes[idx]
385
+ store = @stores[idx]
386
+ mutex.synchronize { keys.concat(store.keys) }
387
+ end
388
+ keys
389
+ end
390
+
391
+ # Returns all keys in a specific namespace
392
+ def keys(namespace:)
393
+ raise ArgumentError, "namespace is required" unless namespace
394
+
395
+ prefix = "#{namespace}:"
396
+ all_keys.select { |key| key.start_with?(prefix) }.map { |key| key.delete_prefix(prefix) }
397
+ end
398
+
399
+ # Clears all keys in a specific namespace
400
+ def clear_namespace(namespace:)
401
+ raise ArgumentError, "namespace is required" unless namespace
402
+
403
+ prefix = "#{namespace}:"
404
+ buckets.times do |idx|
405
+ mutex = @mutexes[idx]
406
+ store = @stores[idx]
407
+
408
+ mutex.synchronize do
409
+ keys_to_delete = store.keys.select { |key| key.start_with?(prefix) }
410
+ keys_to_delete.each { |key| evict_key(idx, key) }
411
+ end
412
+ end
413
+ end
414
+
415
+ # Returns the least-touched keys across all buckets
416
+ def least_touched(n = 10) # rubocop:disable Metrics/MethodLength,Naming/MethodParameterName
417
+ keys_with_touches = []
418
+
419
+ buckets.times do |idx|
420
+ mutex = @mutexes[idx]
421
+ store = @stores[idx]
422
+
423
+ mutex.synchronize do
424
+ store.each do |key, entry|
425
+ keys_with_touches << [key, entry[:touches] || 0]
426
+ end
427
+ end
428
+ end
429
+
430
+ keys_with_touches.sort_by { |_, count| count }.first(n)
431
+ end
432
+
433
+ # Returns total memory used across all buckets
434
+ def current_memory_bytes
435
+ @current_bytes.sum
436
+ end
437
+
438
+ # Returns configured maximum memory allowed
439
+ def max_memory_bytes
440
+ @max_bytes
441
+ end
442
+
443
+ # Executes a block with a specific namespace, restoring the old namespace afterwards
444
+ def with_namespace(namespace)
445
+ old_ns = Thread.current[:mudis_namespace]
446
+ Thread.current[:mudis_namespace] = namespace
447
+ yield
448
+ ensure
449
+ Thread.current[:mudis_namespace] = old_ns
450
+ end
451
+
452
+ private
453
+
454
+ # Decompresses and deserializes a raw value
455
+ def decompress_and_deserialize(raw)
456
+ val = compress ? Zlib::Inflate.inflate(raw) : raw
457
+ serializer.load(val)
458
+ end
459
+
460
+ # Thread-safe metric increment
461
+ def metric(name)
462
+ @metrics_mutex.synchronize { @metrics[name] += 1 }
463
+ end
464
+
465
+ # Removes a key from storage and LRU
466
+ def evict_key(idx, key)
467
+ store = @stores[idx]
468
+ entry = store.delete(key)
469
+ return unless entry
470
+
471
+ @current_bytes[idx] -= (key.bytesize + entry[:value].bytesize)
472
+
473
+ node = @lru_nodes[idx].delete(key)
474
+ remove_node(idx, node) if node
475
+ end
476
+
477
+ # Inserts a key at the head of the LRU list
478
+ def insert_lru(idx, key)
479
+ node = LRUNode.new(key)
480
+ node.next = @lru_heads[idx]
481
+ @lru_heads[idx].prev = node if @lru_heads[idx]
482
+ @lru_heads[idx] = node
483
+ @lru_tails[idx] ||= node
484
+ @lru_nodes[idx][key] = node
485
+ end
486
+
487
+ # Promotes a key to the front of the LRU list
488
+ def promote_lru(idx, key)
489
+ node = @lru_nodes[idx][key]
490
+ return unless node && @lru_heads[idx] != node
491
+
492
+ remove_node(idx, node)
493
+ insert_lru(idx, key)
494
+ end
495
+
496
+ # Removes a node from the LRU list
497
+ def remove_node(idx, node)
498
+ if node.prev
499
+ node.prev.next = node.next
500
+ else
501
+ @lru_heads[idx] = node.next
502
+ end
503
+
504
+ if node.next
505
+ node.next.prev = node.prev
506
+ else
507
+ @lru_tails[idx] = node.prev
508
+ end
509
+ end
510
+
511
+ # Namespaces a key with an optional namespace
512
+ def namespaced_key(key, namespace = nil)
513
+ ns = namespace || Thread.current[:mudis_namespace]
514
+ ns ? "#{ns}:#{key}" : key
515
+ end
516
+
517
+ # Calculates the effective TTL for an entry, respecting max_ttl if set
518
+ def effective_ttl(expires_in)
519
+ ttl = expires_in || @default_ttl
520
+ return nil unless ttl
521
+ return ttl unless @max_ttl
522
+
523
+ [ttl, @max_ttl].min
524
+ end
525
+ end
526
+
527
+ class << self
528
+
529
+ # Saves the current cache state to disk for persistence
530
+ def save_snapshot!
531
+ return unless @persistence_enabled
532
+ data = snapshot_dump
533
+ safe_write_snapshot(data)
534
+ rescue => e
535
+ warn "[Mudis] Failed to save snapshot: #{e.class}: #{e.message}"
536
+ end
537
+
538
+ # Loads the cache state from disk for persistence
539
+ def load_snapshot!
540
+ return unless @persistence_enabled
541
+ return unless File.exist?(@persistence_path)
542
+ data = read_snapshot
543
+ snapshot_restore(data)
544
+ rescue => e
545
+ warn "[Mudis] Failed to load snapshot: #{e.class}: #{e.message}"
546
+ end
547
+
548
+ # Installs an at_exit hook to save the snapshot on process exit
549
+ def install_persistence_hook!
550
+ return unless @persistence_enabled
551
+ return if defined?(@persistence_hook_installed) && @persistence_hook_installed
552
+ at_exit { save_snapshot! }
553
+ @persistence_hook_installed = true
554
+ end
555
+ end
556
+
557
+ class << self
558
+ private
559
+ # Collect a JSON/Marshal-safe array of { key, value, expires_in }
560
+ def snapshot_dump
561
+ entries = []
562
+ now = Time.now
563
+ @buckets.times do |idx|
564
+ mutex = @mutexes[idx]
565
+ store = @stores[idx]
566
+ mutex.synchronize do
567
+ store.each do |key, raw|
568
+ exp_at = raw[:expires_at]
569
+ next if exp_at && now > exp_at
570
+ value = decompress_and_deserialize(raw[:value])
571
+ expires_in = exp_at ? (exp_at - now).to_i : nil
572
+ entries << { key: key, value: value, expires_in: expires_in }
573
+ end
574
+ end
575
+ end
576
+ entries
577
+ end
578
+
579
+ # Restore via existing write-path so LRU/limits/compression/TTL are honored
580
+ def snapshot_restore(entries)
581
+ return unless entries && !entries.empty?
582
+ entries.each do |e|
583
+ begin
584
+ write(e[:key], e[:value], expires_in: e[:expires_in])
585
+ rescue => ex
586
+ warn "[Mudis] Failed to restore key #{e[:key].inspect}: #{ex.message}"
587
+ end
588
+ end
589
+ end
590
+
591
+ # Serializer for snapshot persistence
592
+ # Defaults to Marshal if not JSON
593
+ def serializer_for_snapshot
594
+ (@persistence_format || :marshal).to_sym == :json ? JSON : :marshal
595
+ end
596
+
597
+ # Safely writes snapshot data to disk
598
+ # Uses safe write if configured
599
+ def safe_write_snapshot(data)
600
+ path = @persistence_path
601
+ dir = File.dirname(path)
602
+ Dir.mkdir(dir) unless Dir.exist?(dir)
603
+
604
+ payload =
605
+ if (@persistence_format || :marshal).to_sym == :json
606
+ serializer_for_snapshot.dump(data)
607
+ else
608
+ Marshal.dump(data)
609
+ end
610
+
611
+ if @persistence_safe_write
612
+ tmp = "#{path}.tmp-#{$$}-#{Thread.current.object_id}"
613
+ File.open(tmp, "wb") { |f| f.write(payload) }
614
+ File.rename(tmp, path)
615
+ else
616
+ File.open(path, "wb") { |f| f.write(payload) }
617
+ end
618
+ end
619
+
620
+ # Reads snapshot data from disk
621
+ # Uses safe read if configured
622
+ def read_snapshot
623
+ if (@persistence_format || :marshal).to_sym == :json
624
+ serializer_for_snapshot.load(File.binread(@persistence_path))
625
+ else
626
+ Marshal.load(File.binread(@persistence_path))
627
+ end
628
+ end
629
+ end
630
+
631
+ end