factorix 0.6.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +26 -0
- data/exe/factorix +17 -0
- data/lib/factorix/api/mod_download_api.rb +10 -5
- data/lib/factorix/api/mod_portal_api.rb +6 -49
- data/lib/factorix/cache/base.rb +116 -0
- data/lib/factorix/cache/entry.rb +25 -0
- data/lib/factorix/cache/file_system.rb +137 -57
- data/lib/factorix/cache/redis.rb +287 -0
- data/lib/factorix/cache/s3.rb +388 -0
- data/lib/factorix/cli/commands/cache/evict.rb +17 -22
- data/lib/factorix/cli/commands/cache/stat.rb +57 -58
- data/lib/factorix/cli/commands/download_support.rb +1 -6
- data/lib/factorix/cli/commands/mod/download.rb +2 -3
- data/lib/factorix/cli/commands/mod/edit.rb +1 -4
- data/lib/factorix/cli/commands/mod/image/add.rb +1 -4
- data/lib/factorix/cli/commands/mod/image/edit.rb +1 -4
- data/lib/factorix/cli/commands/mod/image/list.rb +1 -4
- data/lib/factorix/cli/commands/mod/install.rb +2 -3
- data/lib/factorix/cli/commands/mod/search.rb +2 -3
- data/lib/factorix/cli/commands/mod/show.rb +2 -3
- data/lib/factorix/cli/commands/mod/sync.rb +2 -3
- data/lib/factorix/cli/commands/mod/update.rb +6 -39
- data/lib/factorix/cli/commands/mod/upload.rb +1 -4
- data/lib/factorix/cli/commands/portal_support.rb +27 -0
- data/lib/factorix/container.rb +27 -13
- data/lib/factorix/errors.rb +3 -0
- data/lib/factorix/http/cache_decorator.rb +5 -5
- data/lib/factorix/info_json.rb +5 -5
- data/lib/factorix/portal.rb +3 -2
- data/lib/factorix/transfer/downloader.rb +19 -11
- data/lib/factorix/version.rb +1 -1
- data/lib/factorix.rb +45 -53
- data/sig/factorix/api/mod_download_api.rbs +1 -2
- data/sig/factorix/cache/base.rbs +28 -0
- data/sig/factorix/cache/entry.rbs +14 -0
- data/sig/factorix/cache/file_system.rbs +7 -6
- data/sig/factorix/cache/redis.rbs +36 -0
- data/sig/factorix/cache/s3.rbs +38 -0
- data/sig/factorix/errors.rbs +3 -0
- data/sig/factorix/portal.rbs +1 -1
- metadata +25 -2
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
begin
|
|
4
|
+
require "redis"
|
|
5
|
+
rescue LoadError
|
|
6
|
+
raise Factorix::Error, "redis gem is required for Redis cache backend. Add it to your Gemfile."
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
require "securerandom"
|
|
10
|
+
|
|
11
|
+
module Factorix
|
|
12
|
+
module Cache
|
|
13
|
+
# Redis-based cache storage implementation.
|
|
14
|
+
#
|
|
15
|
+
# Stores cache entries in Redis with automatic namespace prefixing.
|
|
16
|
+
# Metadata (size, created_at) stored in separate hash keys.
|
|
17
|
+
# Supports distributed locking with Lua script for atomic release.
|
|
18
|
+
#
|
|
19
|
+
# @example Configuration
|
|
20
|
+
# Factorix.configure do |config|
|
|
21
|
+
# config.cache.api.backend = :redis
|
|
22
|
+
# config.cache.api.redis.url = "redis://localhost:6379/0"
|
|
23
|
+
# config.cache.api.redis.lock_timeout = 30
|
|
24
|
+
# end
|
|
25
|
+
class Redis < Base
|
|
26
|
+
# @!parse
|
|
27
|
+
# # @return [Dry::Logger::Dispatcher]
|
|
28
|
+
# attr_reader :logger
|
|
29
|
+
include Import[:logger]
|
|
30
|
+
|
|
31
|
+
# Default timeout for distributed lock acquisition in seconds.
|
|
32
|
+
DEFAULT_LOCK_TIMEOUT = 30
|
|
33
|
+
public_constant :DEFAULT_LOCK_TIMEOUT
|
|
34
|
+
|
|
35
|
+
# TTL for distributed locks in seconds.
|
|
36
|
+
LOCK_TTL = 30
|
|
37
|
+
private_constant :LOCK_TTL
|
|
38
|
+
|
|
39
|
+
# Lua script for atomic lock release (only release if we own it).
|
|
40
|
+
RELEASE_LOCK_SCRIPT = <<~LUA
|
|
41
|
+
if redis.call("get", KEYS[1]) == ARGV[1] then
|
|
42
|
+
return redis.call("del", KEYS[1])
|
|
43
|
+
else
|
|
44
|
+
return 0
|
|
45
|
+
end
|
|
46
|
+
LUA
|
|
47
|
+
private_constant :RELEASE_LOCK_SCRIPT
|
|
48
|
+
|
|
49
|
+
# Initialize a new Redis cache storage.
|
|
50
|
+
#
|
|
51
|
+
# @param url [String, nil] Redis URL (defaults to REDIS_URL env)
|
|
52
|
+
# @param cache_type [String, Symbol] Cache type for namespace (e.g., :api, :download)
|
|
53
|
+
# @param lock_timeout [Integer] Timeout for lock acquisition in seconds
|
|
54
|
+
# @param ttl [Integer, nil] time-to-live in seconds (nil for unlimited)
|
|
55
|
+
def initialize(cache_type:, url: nil, lock_timeout: DEFAULT_LOCK_TIMEOUT, **)
|
|
56
|
+
super(**)
|
|
57
|
+
@url = url || ENV.fetch("REDIS_URL", nil)
|
|
58
|
+
@redis = ::Redis.new(url: @url)
|
|
59
|
+
@namespace = "factorix-cache:#{cache_type}"
|
|
60
|
+
@lock_timeout = lock_timeout
|
|
61
|
+
logger.info("Initializing Redis cache", namespace: @namespace, ttl: @ttl, lock_timeout: @lock_timeout)
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
# Check if a cache entry exists.
|
|
65
|
+
#
|
|
66
|
+
# @param key [String] logical cache key
|
|
67
|
+
# @return [Boolean] true if the cache entry exists
|
|
68
|
+
def exist?(key) = @redis.exists?(data_key(key))
|
|
69
|
+
|
|
70
|
+
# Read a cached entry.
|
|
71
|
+
#
|
|
72
|
+
# @param key [String] logical cache key
|
|
73
|
+
# @return [String, nil] cached content or nil if not found
|
|
74
|
+
def read(key)
|
|
75
|
+
@redis.get(data_key(key))
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
# Write cached content to a file.
|
|
79
|
+
#
|
|
80
|
+
# @param key [String] logical cache key
|
|
81
|
+
# @param output [Pathname] path to write the cached content
|
|
82
|
+
# @return [Boolean] true if written successfully, false if not found
|
|
83
|
+
def write_to(key, output)
|
|
84
|
+
data = @redis.get(data_key(key))
|
|
85
|
+
return false if data.nil?
|
|
86
|
+
|
|
87
|
+
output.binwrite(data)
|
|
88
|
+
logger.debug("Cache hit", key:)
|
|
89
|
+
true
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
# Store data in the cache.
|
|
93
|
+
#
|
|
94
|
+
# @param key [String] logical cache key
|
|
95
|
+
# @param src [Pathname] path to the source file
|
|
96
|
+
# @return [Boolean] true if stored successfully
|
|
97
|
+
def store(key, src)
|
|
98
|
+
data = src.binread
|
|
99
|
+
data_k = data_key(key)
|
|
100
|
+
meta_k = meta_key(key)
|
|
101
|
+
|
|
102
|
+
@redis.multi do |tx|
|
|
103
|
+
tx.set(data_k, data)
|
|
104
|
+
tx.hset(meta_k, "size", data.bytesize, "created_at", Time.now.to_i)
|
|
105
|
+
|
|
106
|
+
if @ttl
|
|
107
|
+
tx.expire(data_k, @ttl)
|
|
108
|
+
tx.expire(meta_k, @ttl)
|
|
109
|
+
end
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
logger.debug("Stored in cache", key:, size_bytes: data.bytesize)
|
|
113
|
+
true
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
# Delete a cache entry.
|
|
117
|
+
#
|
|
118
|
+
# @param key [String] logical cache key
|
|
119
|
+
# @return [Boolean] true if deleted, false if not found
|
|
120
|
+
def delete(key)
|
|
121
|
+
deleted = @redis.del(data_key(key), meta_key(key))
|
|
122
|
+
logger.debug("Deleted from cache", key:) if deleted.positive?
|
|
123
|
+
deleted.positive?
|
|
124
|
+
end
|
|
125
|
+
|
|
126
|
+
# Clear all cache entries in this namespace.
|
|
127
|
+
#
|
|
128
|
+
# @return [void]
|
|
129
|
+
def clear
|
|
130
|
+
logger.info("Clearing Redis cache namespace", namespace: @namespace)
|
|
131
|
+
count = 0
|
|
132
|
+
cursor = "0"
|
|
133
|
+
pattern = "#{@namespace}:*"
|
|
134
|
+
|
|
135
|
+
loop do
|
|
136
|
+
cursor, keys = @redis.scan(cursor, match: pattern, count: 100)
|
|
137
|
+
unless keys.empty?
|
|
138
|
+
@redis.del(*keys)
|
|
139
|
+
count += keys.size
|
|
140
|
+
end
|
|
141
|
+
break if cursor == "0"
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
logger.info("Cache cleared", keys_removed: count)
|
|
145
|
+
end
|
|
146
|
+
|
|
147
|
+
# Get the age of a cache entry in seconds.
|
|
148
|
+
#
|
|
149
|
+
# @param key [String] logical cache key
|
|
150
|
+
# @return [Integer, nil] age in seconds, or nil if entry doesn't exist
|
|
151
|
+
def age(key)
|
|
152
|
+
value = @redis.hget(meta_key(key), "created_at")
|
|
153
|
+
return nil if value.nil?
|
|
154
|
+
|
|
155
|
+
created_at = Integer(value, 10)
|
|
156
|
+
return nil if created_at.zero?
|
|
157
|
+
|
|
158
|
+
Time.now.to_i - created_at
|
|
159
|
+
end
|
|
160
|
+
|
|
161
|
+
# Check if a cache entry has expired.
|
|
162
|
+
# With Redis native EXPIRE, non-existent keys are considered expired.
|
|
163
|
+
#
|
|
164
|
+
# @param key [String] logical cache key
|
|
165
|
+
# @return [Boolean] true if expired (or doesn't exist), false otherwise
|
|
166
|
+
def expired?(key) = !exist?(key)
|
|
167
|
+
|
|
168
|
+
# Get the size of a cached entry in bytes.
|
|
169
|
+
#
|
|
170
|
+
# @param key [String] logical cache key
|
|
171
|
+
# @return [Integer, nil] size in bytes, or nil if entry doesn't exist
|
|
172
|
+
def size(key)
|
|
173
|
+
return nil unless exist?(key)
|
|
174
|
+
|
|
175
|
+
value = @redis.hget(meta_key(key), "size")
|
|
176
|
+
value.nil? ? nil : Integer(value, 10)
|
|
177
|
+
end
|
|
178
|
+
|
|
179
|
+
# Execute a block with a distributed lock.
|
|
180
|
+
# Uses Redis SET NX EX for lock acquisition and Lua script for atomic release.
|
|
181
|
+
#
|
|
182
|
+
# @param key [String] logical cache key
|
|
183
|
+
# @yield block to execute with lock held
|
|
184
|
+
# @raise [LockTimeoutError] if lock cannot be acquired within timeout
|
|
185
|
+
def with_lock(key)
|
|
186
|
+
lkey = lock_key(key)
|
|
187
|
+
lock_value = SecureRandom.uuid
|
|
188
|
+
deadline = Time.now + @lock_timeout
|
|
189
|
+
|
|
190
|
+
until @redis.set(lkey, lock_value, nx: true, ex: LOCK_TTL)
|
|
191
|
+
raise LockTimeoutError, "Failed to acquire lock for key: #{key}" if Time.now > deadline
|
|
192
|
+
|
|
193
|
+
sleep 0.1
|
|
194
|
+
end
|
|
195
|
+
|
|
196
|
+
logger.debug("Acquired lock", key:)
|
|
197
|
+
begin
|
|
198
|
+
yield
|
|
199
|
+
ensure
|
|
200
|
+
@redis.eval(RELEASE_LOCK_SCRIPT, keys: [lkey], argv: [lock_value])
|
|
201
|
+
logger.debug("Released lock", key:)
|
|
202
|
+
end
|
|
203
|
+
end
|
|
204
|
+
|
|
205
|
+
# Enumerate cache entries.
|
|
206
|
+
#
|
|
207
|
+
# @yield [key, entry] logical key and Entry object
|
|
208
|
+
# @yieldparam key [String] logical cache key
|
|
209
|
+
# @yieldparam entry [Entry] cache entry metadata
|
|
210
|
+
# @return [Enumerator] if no block given
|
|
211
|
+
def each
|
|
212
|
+
return enum_for(__method__) unless block_given?
|
|
213
|
+
|
|
214
|
+
cursor = "0"
|
|
215
|
+
pattern = "#{@namespace}:*"
|
|
216
|
+
|
|
217
|
+
loop do
|
|
218
|
+
cursor, keys = @redis.scan(cursor, match: pattern, count: 100)
|
|
219
|
+
|
|
220
|
+
keys.each do |data_k|
|
|
221
|
+
next if data_k.include?(":meta:") || data_k.include?(":lock:")
|
|
222
|
+
|
|
223
|
+
logical_key = logical_key_from_data_key(data_k)
|
|
224
|
+
meta = @redis.hgetall(meta_key(logical_key))
|
|
225
|
+
|
|
226
|
+
entry = Entry.new(
|
|
227
|
+
size: meta["size"] ? Integer(meta["size"], 10) : 0,
|
|
228
|
+
age: meta["created_at"] ? Time.now.to_i - Integer(meta["created_at"], 10) : 0,
|
|
229
|
+
expired: false # Redis handles expiry natively
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
yield logical_key, entry
|
|
233
|
+
end
|
|
234
|
+
|
|
235
|
+
break if cursor == "0"
|
|
236
|
+
end
|
|
237
|
+
end
|
|
238
|
+
|
|
239
|
+
# Return backend-specific information.
|
|
240
|
+
#
|
|
241
|
+
# @return [Hash] backend configuration
|
|
242
|
+
def backend_info
|
|
243
|
+
{
|
|
244
|
+
type: "redis",
|
|
245
|
+
url: mask_url(@url),
|
|
246
|
+
namespace: @namespace,
|
|
247
|
+
lock_timeout: @lock_timeout
|
|
248
|
+
}
|
|
249
|
+
end
|
|
250
|
+
|
|
251
|
+
# Generate data key for the given logical key.
|
|
252
|
+
#
|
|
253
|
+
# @param logical_key [String] logical key
|
|
254
|
+
# @return [String] namespaced data key
|
|
255
|
+
private def data_key(logical_key) = "#{@namespace}:#{logical_key}"
|
|
256
|
+
|
|
257
|
+
# Generate metadata key for the given logical key.
|
|
258
|
+
#
|
|
259
|
+
# @param logical_key [String] logical key
|
|
260
|
+
# @return [String] namespaced metadata key
|
|
261
|
+
private def meta_key(logical_key) = "#{@namespace}:meta:#{logical_key}"
|
|
262
|
+
|
|
263
|
+
# Generate lock key for the given logical key.
|
|
264
|
+
#
|
|
265
|
+
# @param logical_key [String] logical key
|
|
266
|
+
# @return [String] namespaced lock key
|
|
267
|
+
private def lock_key(logical_key) = "#{@namespace}:lock:#{logical_key}"
|
|
268
|
+
|
|
269
|
+
# Extract logical key from data key.
|
|
270
|
+
#
|
|
271
|
+
# @param data_k [String] namespaced data key
|
|
272
|
+
# @return [String] logical key
|
|
273
|
+
private def logical_key_from_data_key(data_k) = data_k.delete_prefix("#{@namespace}:")
|
|
274
|
+
|
|
275
|
+
DEFAULT_URL = "redis://localhost:6379/0"
|
|
276
|
+
private_constant :DEFAULT_URL
|
|
277
|
+
|
|
278
|
+
# Mask credentials in Redis URL for safe display.
|
|
279
|
+
#
|
|
280
|
+
# @param url [String, nil] Redis URL
|
|
281
|
+
# @return [String] URL with credentials masked (defaults to redis://localhost:6379/0)
|
|
282
|
+
private def mask_url(url)
|
|
283
|
+
URI.parse(url || DEFAULT_URL).tap {|uri| uri.userinfo = "***:***" if uri.userinfo }.to_s
|
|
284
|
+
end
|
|
285
|
+
end
|
|
286
|
+
end
|
|
287
|
+
end
|
|
@@ -0,0 +1,388 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
begin
|
|
4
|
+
require "aws-sdk-s3"
|
|
5
|
+
rescue LoadError
|
|
6
|
+
raise Factorix::Error, "aws-sdk-s3 gem is required for S3 cache backend. Add it to your Gemfile."
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
require "securerandom"
|
|
10
|
+
|
|
11
|
+
module Factorix
|
|
12
|
+
module Cache
|
|
13
|
+
# S3-based cache storage implementation.
|
|
14
|
+
#
|
|
15
|
+
# Stores cache entries in AWS S3 with automatic prefix generation.
|
|
16
|
+
# TTL is managed via custom metadata on objects.
|
|
17
|
+
# Supports distributed locking using conditional PUT operations.
|
|
18
|
+
#
|
|
19
|
+
# @example Configuration
|
|
20
|
+
# Factorix.configure do |config|
|
|
21
|
+
# config.cache.download.backend = :s3
|
|
22
|
+
# config.cache.download.s3.bucket = "factorix-develop"
|
|
23
|
+
# config.cache.download.s3.region = "ap-northeast-1"
|
|
24
|
+
# config.cache.download.s3.lock_timeout = 30
|
|
25
|
+
# end
|
|
26
|
+
class S3 < Base
|
|
27
|
+
# @!parse
|
|
28
|
+
# # @return [Dry::Logger::Dispatcher]
|
|
29
|
+
# attr_reader :logger
|
|
30
|
+
include Import[:logger]
|
|
31
|
+
|
|
32
|
+
# Default timeout for distributed lock acquisition in seconds.
|
|
33
|
+
DEFAULT_LOCK_TIMEOUT = 30
|
|
34
|
+
public_constant :DEFAULT_LOCK_TIMEOUT
|
|
35
|
+
|
|
36
|
+
# TTL for distributed locks in seconds.
|
|
37
|
+
LOCK_TTL = 30
|
|
38
|
+
private_constant :LOCK_TTL
|
|
39
|
+
|
|
40
|
+
# Metadata key for storing expiration timestamp.
|
|
41
|
+
EXPIRES_AT_KEY = "expires-at"
|
|
42
|
+
private_constant :EXPIRES_AT_KEY
|
|
43
|
+
|
|
44
|
+
# Metadata key for storing logical key.
|
|
45
|
+
LOGICAL_KEY_KEY = "logical-key"
|
|
46
|
+
private_constant :LOGICAL_KEY_KEY
|
|
47
|
+
|
|
48
|
+
# Initialize a new S3 cache storage.
|
|
49
|
+
#
|
|
50
|
+
# @param bucket [String] S3 bucket name (required)
|
|
51
|
+
# @param region [String, nil] AWS region (defaults to AWS_REGION env or SDK default)
|
|
52
|
+
# @param cache_type [String, Symbol] Cache type for prefix (e.g., :api, :download)
|
|
53
|
+
# @param lock_timeout [Integer] Timeout for lock acquisition in seconds
|
|
54
|
+
# @param ttl [Integer, nil] time-to-live in seconds (nil for unlimited)
|
|
55
|
+
def initialize(bucket:, cache_type:, region: nil, lock_timeout: DEFAULT_LOCK_TIMEOUT, **)
|
|
56
|
+
super(**)
|
|
57
|
+
@client = Aws::S3::Client.new(**{region:}.compact)
|
|
58
|
+
@bucket = bucket
|
|
59
|
+
@prefix = "cache/#{cache_type}/"
|
|
60
|
+
@lock_timeout = lock_timeout
|
|
61
|
+
logger.info("Initializing S3 cache", bucket: @bucket, prefix: @prefix, ttl: @ttl, lock_timeout: @lock_timeout)
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
# Check if a cache entry exists and is not expired.
|
|
65
|
+
#
|
|
66
|
+
# @param key [String] logical cache key
|
|
67
|
+
# @return [Boolean] true if the cache entry exists and is valid
|
|
68
|
+
def exist?(key)
|
|
69
|
+
head_object(key)
|
|
70
|
+
!expired?(key)
|
|
71
|
+
rescue Aws::S3::Errors::NotFound
|
|
72
|
+
false
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
# Read a cached entry.
|
|
76
|
+
#
|
|
77
|
+
# @param key [String] logical cache key
|
|
78
|
+
# @return [String, nil] cached content or nil if not found/expired
|
|
79
|
+
def read(key)
|
|
80
|
+
return nil if expired?(key)
|
|
81
|
+
|
|
82
|
+
resp = @client.get_object(bucket: @bucket, key: storage_key(key))
|
|
83
|
+
resp.body.read
|
|
84
|
+
rescue Aws::S3::Errors::NoSuchKey
|
|
85
|
+
nil
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
# Write cached content to a file.
|
|
89
|
+
#
|
|
90
|
+
# @param key [String] logical cache key
|
|
91
|
+
# @param output [Pathname] path to write the cached content
|
|
92
|
+
# @return [Boolean] true if written successfully, false if not found/expired
|
|
93
|
+
def write_to(key, output)
|
|
94
|
+
return false if expired?(key)
|
|
95
|
+
|
|
96
|
+
@client.get_object(bucket: @bucket, key: storage_key(key), response_target: output.to_s)
|
|
97
|
+
logger.debug("Cache hit", key:)
|
|
98
|
+
true
|
|
99
|
+
rescue Aws::S3::Errors::NoSuchKey
|
|
100
|
+
false
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
# Store data in the cache.
|
|
104
|
+
#
|
|
105
|
+
# @param key [String] logical cache key
|
|
106
|
+
# @param src [Pathname] path to the source file
|
|
107
|
+
# @return [Boolean] true if stored successfully
|
|
108
|
+
def store(key, src)
|
|
109
|
+
metadata = {LOGICAL_KEY_KEY => key}
|
|
110
|
+
metadata[EXPIRES_AT_KEY] = (Time.now.to_i + @ttl).to_s if @ttl
|
|
111
|
+
|
|
112
|
+
@client.put_object(
|
|
113
|
+
bucket: @bucket,
|
|
114
|
+
key: storage_key(key),
|
|
115
|
+
body: src.binread,
|
|
116
|
+
metadata:
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
logger.debug("Stored in cache", key:, size_bytes: src.size)
|
|
120
|
+
true
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
# Delete a cache entry.
|
|
124
|
+
#
|
|
125
|
+
# @param key [String] logical cache key
|
|
126
|
+
# @return [Boolean] true if deleted, false if not found
|
|
127
|
+
def delete(key)
|
|
128
|
+
return false unless exist_without_expiry_check?(key)
|
|
129
|
+
|
|
130
|
+
@client.delete_object(bucket: @bucket, key: storage_key(key))
|
|
131
|
+
logger.debug("Deleted from cache", key:)
|
|
132
|
+
true
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
# Clear all cache entries in this prefix.
|
|
136
|
+
#
|
|
137
|
+
# @return [void]
|
|
138
|
+
def clear
|
|
139
|
+
logger.info("Clearing S3 cache prefix", bucket: @bucket, prefix: @prefix)
|
|
140
|
+
count = 0
|
|
141
|
+
|
|
142
|
+
list_all_objects do |objects|
|
|
143
|
+
keys_to_delete = objects.filter_map {|obj| {key: obj.key} unless obj.key.end_with?(".lock") }
|
|
144
|
+
next if keys_to_delete.empty?
|
|
145
|
+
|
|
146
|
+
@client.delete_objects(bucket: @bucket, delete: {objects: keys_to_delete})
|
|
147
|
+
count += keys_to_delete.size
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
logger.info("Cache cleared", objects_removed: count)
|
|
151
|
+
end
|
|
152
|
+
|
|
153
|
+
# Get the age of a cache entry in seconds.
|
|
154
|
+
#
|
|
155
|
+
# @param key [String] logical cache key
|
|
156
|
+
# @return [Float, nil] age in seconds, or nil if entry doesn't exist
|
|
157
|
+
def age(key)
|
|
158
|
+
resp = head_object(key)
|
|
159
|
+
Time.now - resp.last_modified
|
|
160
|
+
rescue Aws::S3::Errors::NotFound
|
|
161
|
+
nil
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
# Check if a cache entry has expired based on TTL.
|
|
165
|
+
#
|
|
166
|
+
# @param key [String] logical cache key
|
|
167
|
+
# @return [Boolean] true if expired, false otherwise
|
|
168
|
+
def expired?(key)
|
|
169
|
+
return false if @ttl.nil?
|
|
170
|
+
|
|
171
|
+
resp = head_object(key)
|
|
172
|
+
value = resp.metadata[EXPIRES_AT_KEY]
|
|
173
|
+
return false if value.nil?
|
|
174
|
+
|
|
175
|
+
Time.now.to_i > Integer(value, 10)
|
|
176
|
+
rescue Aws::S3::Errors::NotFound
|
|
177
|
+
true
|
|
178
|
+
end
|
|
179
|
+
|
|
180
|
+
# Get the size of a cached entry in bytes.
|
|
181
|
+
#
|
|
182
|
+
# @param key [String] logical cache key
|
|
183
|
+
# @return [Integer, nil] size in bytes, or nil if entry doesn't exist/expired
|
|
184
|
+
def size(key)
|
|
185
|
+
return nil if expired?(key)
|
|
186
|
+
|
|
187
|
+
resp = head_object(key)
|
|
188
|
+
resp.content_length
|
|
189
|
+
rescue Aws::S3::Errors::NotFound
|
|
190
|
+
nil
|
|
191
|
+
end
|
|
192
|
+
|
|
193
|
+
# Execute a block with a distributed lock.
|
|
194
|
+
# Uses conditional PUT for lock acquisition.
|
|
195
|
+
#
|
|
196
|
+
# @param key [String] logical cache key
|
|
197
|
+
# @yield block to execute with lock held
|
|
198
|
+
# @raise [LockTimeoutError] if lock cannot be acquired within timeout
|
|
199
|
+
def with_lock(key)
|
|
200
|
+
lkey = lock_key(key)
|
|
201
|
+
lock_value = SecureRandom.uuid
|
|
202
|
+
deadline = Time.now + @lock_timeout
|
|
203
|
+
|
|
204
|
+
loop do
|
|
205
|
+
if try_acquire_lock(lkey, lock_value)
|
|
206
|
+
logger.debug("Acquired lock", key:)
|
|
207
|
+
break
|
|
208
|
+
end
|
|
209
|
+
|
|
210
|
+
cleanup_stale_lock(lkey)
|
|
211
|
+
raise LockTimeoutError, "Failed to acquire lock for key: #{key}" if Time.now > deadline
|
|
212
|
+
|
|
213
|
+
sleep 0.1
|
|
214
|
+
end
|
|
215
|
+
|
|
216
|
+
begin
|
|
217
|
+
yield
|
|
218
|
+
ensure
|
|
219
|
+
@client.delete_object(bucket: @bucket, key: lkey)
|
|
220
|
+
logger.debug("Released lock", key:)
|
|
221
|
+
end
|
|
222
|
+
end
|
|
223
|
+
|
|
224
|
+
# Enumerate cache entries.
|
|
225
|
+
#
|
|
226
|
+
# @yield [key, entry] logical key and Entry object
|
|
227
|
+
# @yieldparam key [String] logical cache key
|
|
228
|
+
# @yieldparam entry [Entry] cache entry metadata
|
|
229
|
+
# @return [Enumerator] if no block given
|
|
230
|
+
def each
|
|
231
|
+
return enum_for(__method__) unless block_given?
|
|
232
|
+
|
|
233
|
+
list_all_objects do |objects|
|
|
234
|
+
objects.each do |obj|
|
|
235
|
+
next if obj.key.end_with?(".lock")
|
|
236
|
+
|
|
237
|
+
logical_key, entry = build_entry_with_metadata(obj)
|
|
238
|
+
next if logical_key.nil? # Skip entries without logical key metadata
|
|
239
|
+
|
|
240
|
+
yield logical_key, entry
|
|
241
|
+
end
|
|
242
|
+
end
|
|
243
|
+
end
|
|
244
|
+
|
|
245
|
+
# Return backend-specific information.
|
|
246
|
+
#
|
|
247
|
+
# @return [Hash] backend configuration
|
|
248
|
+
def backend_info
|
|
249
|
+
{
|
|
250
|
+
type: "s3",
|
|
251
|
+
bucket: @bucket,
|
|
252
|
+
prefix: @prefix,
|
|
253
|
+
lock_timeout: @lock_timeout
|
|
254
|
+
}
|
|
255
|
+
end
|
|
256
|
+
|
|
257
|
+
# Generate a hashed internal key for the given logical key.
|
|
258
|
+
# Uses SHA1 to create a unique, deterministic key.
|
|
259
|
+
# Use Digest(:SHA1) instead of Digest::SHA1 for thread-safety (Ruby 2.2+)
|
|
260
|
+
#
|
|
261
|
+
# @param logical_key [String] logical key to hash
|
|
262
|
+
# @return [String] SHA1 hash of the logical key
|
|
263
|
+
private def storage_key_for(logical_key) = Digest(:SHA1).hexdigest(logical_key)
|
|
264
|
+
|
|
265
|
+
# Generate storage key for the given logical key.
|
|
266
|
+
#
|
|
267
|
+
# @param logical_key [String] logical key
|
|
268
|
+
# @return [String] prefixed hashed storage key
|
|
269
|
+
private def storage_key(logical_key) = "#{@prefix}#{storage_key_for(logical_key)}"
|
|
270
|
+
|
|
271
|
+
# Generate lock key for the given logical key.
|
|
272
|
+
#
|
|
273
|
+
# @param logical_key [String] logical key
|
|
274
|
+
# @return [String] lock key
|
|
275
|
+
private def lock_key(logical_key) = "#{@prefix}#{storage_key_for(logical_key)}.lock"
|
|
276
|
+
|
|
277
|
+
# Get object metadata.
|
|
278
|
+
#
|
|
279
|
+
# @param key [String] logical key
|
|
280
|
+
# @return [Aws::S3::Types::HeadObjectOutput] object metadata
|
|
281
|
+
private def head_object(key)
|
|
282
|
+
@client.head_object(bucket: @bucket, key: storage_key(key))
|
|
283
|
+
end
|
|
284
|
+
|
|
285
|
+
# Check if object exists without expiry check.
|
|
286
|
+
#
|
|
287
|
+
# @param key [String] logical key
|
|
288
|
+
# @return [Boolean] true if exists
|
|
289
|
+
private def exist_without_expiry_check?(key)
|
|
290
|
+
head_object(key)
|
|
291
|
+
true
|
|
292
|
+
rescue Aws::S3::Errors::NotFound
|
|
293
|
+
false
|
|
294
|
+
end
|
|
295
|
+
|
|
296
|
+
# Try to acquire a distributed lock.
|
|
297
|
+
#
|
|
298
|
+
# @param lkey [String] lock key
|
|
299
|
+
# @param lock_value [String] unique lock value
|
|
300
|
+
# @return [Boolean] true if lock acquired
|
|
301
|
+
private def try_acquire_lock(lkey, lock_value)
|
|
302
|
+
lock_body = "#{lock_value}:#{Time.now.to_i + LOCK_TTL}"
|
|
303
|
+
@client.put_object(
|
|
304
|
+
bucket: @bucket,
|
|
305
|
+
key: lkey,
|
|
306
|
+
body: lock_body,
|
|
307
|
+
if_none_match: "*"
|
|
308
|
+
)
|
|
309
|
+
true
|
|
310
|
+
rescue Aws::S3::Errors::PreconditionFailed
|
|
311
|
+
false
|
|
312
|
+
end
|
|
313
|
+
|
|
314
|
+
# Clean up stale lock if expired.
|
|
315
|
+
#
|
|
316
|
+
# @param lkey [String] lock key
|
|
317
|
+
private def cleanup_stale_lock(lkey)
|
|
318
|
+
resp = @client.get_object(bucket: @bucket, key: lkey)
|
|
319
|
+
lock_data = resp.body.read
|
|
320
|
+
_lock_value, expires_at = lock_data.split(":")
|
|
321
|
+
|
|
322
|
+
if expires_at && Time.now.to_i > Integer(expires_at, 10)
|
|
323
|
+
@client.delete_object(bucket: @bucket, key: lkey)
|
|
324
|
+
logger.debug("Cleaned up stale lock", key: lkey)
|
|
325
|
+
end
|
|
326
|
+
rescue Aws::S3::Errors::NoSuchKey
|
|
327
|
+
# Lock doesn't exist, nothing to clean up
|
|
328
|
+
end
|
|
329
|
+
|
|
330
|
+
# List all objects in the prefix with pagination.
|
|
331
|
+
#
|
|
332
|
+
# @yield [Array<Aws::S3::Types::Object>] batch of objects
|
|
333
|
+
private def list_all_objects
|
|
334
|
+
continuation_token = nil
|
|
335
|
+
|
|
336
|
+
loop do
|
|
337
|
+
resp = @client.list_objects_v2(
|
|
338
|
+
bucket: @bucket,
|
|
339
|
+
prefix: @prefix,
|
|
340
|
+
continuation_token:
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
yield resp.contents if resp.contents.any?
|
|
344
|
+
|
|
345
|
+
break unless resp.is_truncated
|
|
346
|
+
|
|
347
|
+
continuation_token = resp.next_continuation_token
|
|
348
|
+
end
|
|
349
|
+
end
|
|
350
|
+
|
|
351
|
+
# Build an Entry from an S3 object, fetching metadata to get logical key.
|
|
352
|
+
#
|
|
353
|
+
# @param obj [Aws::S3::Types::Object] S3 object
|
|
354
|
+
# @return [Array(String, Entry), Array(nil, nil)] logical key and entry, or nils if metadata missing
|
|
355
|
+
private def build_entry_with_metadata(obj)
|
|
356
|
+
resp = @client.head_object(bucket: @bucket, key: obj.key)
|
|
357
|
+
logical_key = resp.metadata[LOGICAL_KEY_KEY]
|
|
358
|
+
return [nil, nil] if logical_key.nil?
|
|
359
|
+
|
|
360
|
+
age = Time.now - obj.last_modified
|
|
361
|
+
expired = check_expired_from_head_response(resp)
|
|
362
|
+
|
|
363
|
+
entry = Entry.new(
|
|
364
|
+
size: obj.size,
|
|
365
|
+
age:,
|
|
366
|
+
expired:
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
[logical_key, entry]
|
|
370
|
+
rescue Aws::S3::Errors::NotFound
|
|
371
|
+
[nil, nil]
|
|
372
|
+
end
|
|
373
|
+
|
|
374
|
+
# Check if object is expired from head_object response.
|
|
375
|
+
#
|
|
376
|
+
# @param resp [Aws::S3::Types::HeadObjectOutput] head_object response
|
|
377
|
+
# @return [Boolean] true if expired
|
|
378
|
+
private def check_expired_from_head_response(resp)
|
|
379
|
+
return false if @ttl.nil?
|
|
380
|
+
|
|
381
|
+
value = resp.metadata[EXPIRES_AT_KEY]
|
|
382
|
+
return false if value.nil?
|
|
383
|
+
|
|
384
|
+
Time.now.to_i > Integer(value, 10)
|
|
385
|
+
end
|
|
386
|
+
end
|
|
387
|
+
end
|
|
388
|
+
end
|