lazy_init 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +20 -0
- data/.rspec +4 -0
- data/CHANGELOG.md +0 -0
- data/GEMFILE +5 -0
- data/LICENSE +21 -0
- data/RAKEFILE +43 -0
- data/README.md +765 -0
- data/benchmarks/benchmark.rb +796 -0
- data/benchmarks/benchmark_performance.rb +250 -0
- data/benchmarks/benchmark_threads.rb +433 -0
- data/benchmarks/bottleneck_searcher.rb +381 -0
- data/benchmarks/thread_safety_verification.rb +376 -0
- data/lazy_init.gemspec +40 -0
- data/lib/lazy_init/class_methods.rb +549 -0
- data/lib/lazy_init/configuration.rb +57 -0
- data/lib/lazy_init/dependency_resolver.rb +226 -0
- data/lib/lazy_init/errors.rb +23 -0
- data/lib/lazy_init/instance_methods.rb +291 -0
- data/lib/lazy_init/lazy_value.rb +167 -0
- data/lib/lazy_init/version.rb +5 -0
- data/lib/lazy_init.rb +47 -0
- metadata +140 -0
@@ -0,0 +1,226 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LazyInit
|
4
|
+
# Handles dependency resolution and circular dependency detection for lazy attributes.
|
5
|
+
#
|
6
|
+
# This resolver maintains a dependency graph and provides thread-safe resolution
|
7
|
+
# with caching to avoid redundant dependency checking. It prevents circular
|
8
|
+
# dependencies and optimizes performance through intelligent caching strategies.
|
9
|
+
#
|
10
|
+
# @example Basic usage
|
11
|
+
# resolver = DependencyResolver.new(MyClass)
|
12
|
+
# resolver.add_dependency(:database, [:config])
|
13
|
+
# resolver.resolve_dependencies(:database, instance)
|
14
|
+
#
|
15
|
+
# @since 0.1.0
|
16
|
+
class DependencyResolver
|
17
|
+
# Initialize a new dependency resolver for the given class.
|
18
|
+
#
|
19
|
+
# Sets up internal data structures for dependency tracking, resolution
|
20
|
+
# caching, and thread safety mechanisms.
|
21
|
+
#
|
22
|
+
# @param target_class [Class] the class that owns the lazy attributes
|
23
|
+
def initialize(target_class)
|
24
|
+
@target_class = target_class
|
25
|
+
@dependency_graph = {}
|
26
|
+
@resolution_orders = {}
|
27
|
+
@mutex = Mutex.new
|
28
|
+
|
29
|
+
# per-instance caching to avoid redundant dependency resolution
|
30
|
+
@instance_resolution_cache = {}
|
31
|
+
@cache_mutex = Mutex.new
|
32
|
+
end
|
33
|
+
|
34
|
+
# Add a dependency relationship for an attribute.
|
35
|
+
#
|
36
|
+
# Records that the given attribute depends on other attributes and
|
37
|
+
# pre-computes the resolution order for optimal performance.
|
38
|
+
#
|
39
|
+
# @param attribute [Symbol] the attribute that has dependencies
|
40
|
+
# @param dependencies [Array<Symbol>, Symbol] the attributes it depends on
|
41
|
+
# @return [void]
|
42
|
+
def add_dependency(attribute, dependencies)
|
43
|
+
@mutex.synchronize do
|
44
|
+
@dependency_graph[attribute] = Array(dependencies)
|
45
|
+
@resolution_orders[attribute] = compute_resolution_order(attribute)
|
46
|
+
invalidate_dependent_orders(attribute)
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
# Get the pre-computed resolution order for an attribute.
|
51
|
+
#
|
52
|
+
# @param attribute [Symbol] the attribute to get resolution order for
|
53
|
+
# @return [Array<Symbol>, nil] ordered list of dependencies to resolve
|
54
|
+
def resolution_order_for(attribute)
|
55
|
+
@resolution_orders[attribute]
|
56
|
+
end
|
57
|
+
|
58
|
+
# Resolve all dependencies for an attribute on a specific instance.
|
59
|
+
#
|
60
|
+
# Uses intelligent caching to avoid redundant resolution and provides
|
61
|
+
# thread-safe dependency resolution with circular dependency detection.
|
62
|
+
# The resolution is cached per-instance to optimize repeated access.
|
63
|
+
#
|
64
|
+
# @param attribute [Symbol] the attribute whose dependencies to resolve
|
65
|
+
# @param instance [Object] the instance to resolve dependencies on
|
66
|
+
# @return [void]
|
67
|
+
# @raise [DependencyError] if circular dependencies are detected
|
68
|
+
def resolve_dependencies(attribute, instance)
|
69
|
+
resolution_order = @resolution_orders[attribute]
|
70
|
+
return unless resolution_order
|
71
|
+
|
72
|
+
instance_key = instance.object_id
|
73
|
+
cache_key = "#{instance_key}_#{attribute}"
|
74
|
+
|
75
|
+
# fast path: if already resolved, skip everything
|
76
|
+
return if dependency_resolved_cached?(cache_key)
|
77
|
+
|
78
|
+
# prevent recursive mutex locking in nested dependency chains
|
79
|
+
current_thread_resolving = Thread.current[:lazy_init_cache_resolving] ||= false
|
80
|
+
|
81
|
+
if current_thread_resolving
|
82
|
+
# we're already inside a resolution chain, skip caching to avoid deadlocks
|
83
|
+
resolve_dependencies_direct(attribute, instance, resolution_order)
|
84
|
+
return
|
85
|
+
end
|
86
|
+
|
87
|
+
# thread-safe cache update for top-level calls only
|
88
|
+
@cache_mutex.synchronize do
|
89
|
+
# double-check pattern after acquiring lock
|
90
|
+
return if dependency_resolved_cached?(cache_key)
|
91
|
+
|
92
|
+
# mark this thread as currently resolving to prevent recursion
|
93
|
+
Thread.current[:lazy_init_cache_resolving] = true
|
94
|
+
|
95
|
+
begin
|
96
|
+
resolve_dependencies_direct(attribute, instance, resolution_order)
|
97
|
+
mark_dependency_resolved(cache_key)
|
98
|
+
ensure
|
99
|
+
# always clean up thread state
|
100
|
+
Thread.current[:lazy_init_cache_resolving] = false
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
private
|
106
|
+
|
107
|
+
# Perform direct dependency resolution without caching overhead.
|
108
|
+
#
|
109
|
+
# This is the core resolution logic that handles circular dependency
|
110
|
+
# detection and ensures dependencies are resolved in correct order.
|
111
|
+
#
|
112
|
+
# @param attribute [Symbol] the attribute being resolved
|
113
|
+
# @param instance [Object] the target instance
|
114
|
+
# @param resolution_order [Array<Symbol>] pre-computed dependency order
|
115
|
+
# @return [void]
|
116
|
+
# @raise [DependencyError] if circular dependencies detected
|
117
|
+
def resolve_dependencies_direct(attribute, instance, resolution_order)
|
118
|
+
# track resolution stack to detect circular dependencies
|
119
|
+
resolution_stack = Thread.current[:lazy_init_resolution_stack] ||= []
|
120
|
+
|
121
|
+
if resolution_stack.include?(attribute)
|
122
|
+
raise LazyInit::DependencyError,
|
123
|
+
"Circular dependency detected: #{resolution_stack.join(' -> ')} -> #{attribute}"
|
124
|
+
end
|
125
|
+
|
126
|
+
resolution_stack.push(attribute)
|
127
|
+
|
128
|
+
begin
|
129
|
+
# optimization: only resolve dependencies that aren't already computed
|
130
|
+
unresolved_deps = resolution_order.reject do |dep|
|
131
|
+
instance_computed?(instance, dep)
|
132
|
+
end
|
133
|
+
|
134
|
+
# trigger computation for unresolved dependencies
|
135
|
+
unresolved_deps.each do |dep|
|
136
|
+
instance.send(dep)
|
137
|
+
end
|
138
|
+
ensure
|
139
|
+
# always clean up resolution stack
|
140
|
+
resolution_stack.pop
|
141
|
+
Thread.current[:lazy_init_resolution_stack] = nil if resolution_stack.empty?
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
# Check if dependency resolution is already cached for this instance.
|
146
|
+
#
|
147
|
+
# @param cache_key [String] unique key for instance+attribute combination
|
148
|
+
# @return [Boolean] true if dependencies already resolved
|
149
|
+
def dependency_resolved_cached?(cache_key)
|
150
|
+
@instance_resolution_cache[cache_key] == true
|
151
|
+
end
|
152
|
+
|
153
|
+
# Mark dependencies as resolved in the cache.
|
154
|
+
#
|
155
|
+
# Also handles cache cleanup to prevent memory leaks when cache grows too large.
|
156
|
+
#
|
157
|
+
# @param cache_key [String] unique key for instance+attribute combination
|
158
|
+
# @return [void]
|
159
|
+
def mark_dependency_resolved(cache_key)
|
160
|
+
@instance_resolution_cache[cache_key] = true
|
161
|
+
|
162
|
+
# prevent memory leaks by cleaning up oversized cache
|
163
|
+
return unless @instance_resolution_cache.size > 1000
|
164
|
+
|
165
|
+
cleanup_resolution_cache
|
166
|
+
end
|
167
|
+
|
168
|
+
# Clean up old cache entries to prevent unbounded memory growth.
|
169
|
+
#
|
170
|
+
# Removes the oldest 25% of cache entries when cache size exceeds limits.
|
171
|
+
# This is a simple LRU-style cleanup strategy.
|
172
|
+
#
|
173
|
+
# @return [void]
|
174
|
+
def cleanup_resolution_cache
|
175
|
+
entries_to_remove = @instance_resolution_cache.size / 4
|
176
|
+
keys_to_remove = @instance_resolution_cache.keys.first(entries_to_remove)
|
177
|
+
keys_to_remove.each { |key| @instance_resolution_cache.delete(key) }
|
178
|
+
end
|
179
|
+
|
180
|
+
# Check if an attribute is already computed on the given instance.
|
181
|
+
#
|
182
|
+
# This checks for LazyValue-based attributes by looking for the lazy value
|
183
|
+
# wrapper and checking its computed state.
|
184
|
+
#
|
185
|
+
# @param instance [Object] the instance to check
|
186
|
+
# @param attribute [Symbol] the attribute to check
|
187
|
+
# @return [Boolean] true if the attribute has been computed
|
188
|
+
def instance_computed?(instance, attribute)
|
189
|
+
lazy_value = instance.instance_variable_get("@#{attribute}_lazy_value")
|
190
|
+
lazy_value&.computed?
|
191
|
+
end
|
192
|
+
|
193
|
+
# Compute the resolution order for a given attribute.
|
194
|
+
#
|
195
|
+
# Currently uses a simple approach that just returns the direct dependencies.
|
196
|
+
# Future versions could implement more sophisticated dependency ordering.
|
197
|
+
#
|
198
|
+
# @param start_attribute [Symbol] the attribute to compute order for
|
199
|
+
# @return [Array<Symbol>] ordered list of dependencies
|
200
|
+
def compute_resolution_order(start_attribute)
|
201
|
+
dependencies = @dependency_graph[start_attribute]
|
202
|
+
return [] unless dependencies && dependencies.any?
|
203
|
+
|
204
|
+
dependencies.dup
|
205
|
+
end
|
206
|
+
|
207
|
+
# Invalidate cached resolution orders when dependencies change.
|
208
|
+
#
|
209
|
+
# When an attribute's dependencies change, any attributes that depend on it
|
210
|
+
# need their resolution orders recalculated.
|
211
|
+
#
|
212
|
+
# @param changed_attribute [Symbol] the attribute whose dependencies changed
|
213
|
+
# @return [void]
|
214
|
+
def invalidate_dependent_orders(changed_attribute)
|
215
|
+
orders_to_update = {}
|
216
|
+
|
217
|
+
@resolution_orders.each do |attribute, order|
|
218
|
+
orders_to_update[attribute] = compute_resolution_order(attribute) if order.include?(changed_attribute)
|
219
|
+
end
|
220
|
+
|
221
|
+
orders_to_update.each do |attribute, new_order|
|
222
|
+
@resolution_orders[attribute] = new_order
|
223
|
+
end
|
224
|
+
end
|
225
|
+
end
|
226
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LazyInit
|
4
|
+
# Base exception class for all LazyInit errors.
|
5
|
+
#
|
6
|
+
# @since 0.1.0
|
7
|
+
class Error < StandardError; end
|
8
|
+
|
9
|
+
# Raised when an invalid attribute name is provided to lazy_attr_reader.
|
10
|
+
#
|
11
|
+
# @since 0.1.0
|
12
|
+
class InvalidAttributeNameError < Error; end
|
13
|
+
|
14
|
+
# Raised when lazy initialization exceeds the configured timeout.
|
15
|
+
#
|
16
|
+
# @since 0.1.0
|
17
|
+
class TimeoutError < Error; end
|
18
|
+
|
19
|
+
# Raised when circular dependencies are detected in attribute resolution.
|
20
|
+
#
|
21
|
+
# @since 0.1.0
|
22
|
+
class DependencyError < Error; end
|
23
|
+
end
|
@@ -0,0 +1,291 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LazyInit
|
4
|
+
# Provides instance-level utility methods for lazy initialization patterns.
|
5
|
+
#
|
6
|
+
# This module is automatically included when a class includes LazyInit (as opposed to extending it).
|
7
|
+
# It provides method-local memoization capabilities that are useful for expensive computations
|
8
|
+
# that need to be cached per method call location rather than per attribute.
|
9
|
+
#
|
10
|
+
# The lazy_once method is particularly powerful as it provides automatic caching based on
|
11
|
+
# the caller location, making it easy to add memoization to any method without explicit
|
12
|
+
# cache key management.
|
13
|
+
#
|
14
|
+
# @example Basic lazy value creation
|
15
|
+
# class DataProcessor
|
16
|
+
# include LazyInit
|
17
|
+
#
|
18
|
+
# def process_data
|
19
|
+
# expensive_parser = lazy { ExpensiveParser.new }
|
20
|
+
# expensive_parser.value.parse(data)
|
21
|
+
# end
|
22
|
+
# end
|
23
|
+
#
|
24
|
+
# @example Method-local memoization
|
25
|
+
# class ApiClient
|
26
|
+
# include LazyInit
|
27
|
+
#
|
28
|
+
# def fetch_user_data(user_id)
|
29
|
+
# lazy_once(ttl: 5.minutes) do
|
30
|
+
# expensive_api_call(user_id)
|
31
|
+
# end
|
32
|
+
# end
|
33
|
+
# end
|
34
|
+
#
|
35
|
+
# @since 0.1.0
|
36
|
+
module InstanceMethods
|
37
|
+
# Create a standalone lazy value container.
|
38
|
+
#
|
39
|
+
# This is a simple factory method that creates a LazyValue instance.
|
40
|
+
# Useful when you need lazy initialization behavior but don't want to
|
41
|
+
# define a formal lazy attribute on the class.
|
42
|
+
#
|
43
|
+
# @param block [Proc] the computation to execute lazily
|
44
|
+
# @return [LazyValue] a new lazy value container
|
45
|
+
# @raise [ArgumentError] if no block is provided
|
46
|
+
#
|
47
|
+
# @example Standalone lazy computation
|
48
|
+
# def expensive_calculation
|
49
|
+
# result = lazy { perform_heavy_computation }
|
50
|
+
# result.value
|
51
|
+
# end
|
52
|
+
def lazy(&block)
|
53
|
+
LazyValue.new(&block)
|
54
|
+
end
|
55
|
+
|
56
|
+
# Method-local memoization with automatic cache key generation.
|
57
|
+
#
|
58
|
+
# Caches computation results based on the caller location (file and line number),
|
59
|
+
# providing automatic memoization without explicit key management. Each unique
|
60
|
+
# call site gets its own cache entry with optional TTL and LRU eviction.
|
61
|
+
#
|
62
|
+
# This is particularly useful for expensive computations in methods that are
|
63
|
+
# called frequently but where the result can be cached for a period of time.
|
64
|
+
#
|
65
|
+
# @param max_entries [Integer, nil] maximum cache entries before LRU eviction
|
66
|
+
# @param ttl [Numeric, nil] time-to-live in seconds for cache entries
|
67
|
+
# @param block [Proc] the computation to cache
|
68
|
+
# @return [Object] the computed or cached value
|
69
|
+
# @raise [ArgumentError] if no block is provided
|
70
|
+
#
|
71
|
+
# @example Simple method memoization
|
72
|
+
# def expensive_data_processing
|
73
|
+
# lazy_once do
|
74
|
+
# perform_heavy_computation
|
75
|
+
# end
|
76
|
+
# end
|
77
|
+
#
|
78
|
+
# @example With TTL and size limits
|
79
|
+
# def fetch_external_data
|
80
|
+
# lazy_once(ttl: 30.seconds, max_entries: 100) do
|
81
|
+
# external_api.fetch_data
|
82
|
+
# end
|
83
|
+
# end
|
84
|
+
def lazy_once(max_entries: nil, ttl: nil, &block)
|
85
|
+
raise ArgumentError, 'Block is required' unless block
|
86
|
+
|
87
|
+
# apply global configuration defaults
|
88
|
+
max_entries ||= LazyInit.configuration.max_lazy_once_entries
|
89
|
+
ttl ||= LazyInit.configuration.lazy_once_ttl
|
90
|
+
|
91
|
+
# generate cache key from caller location for automatic memoization
|
92
|
+
call_location = caller_locations(1, 1).first
|
93
|
+
location_key = "#{call_location.path}:#{call_location.lineno}"
|
94
|
+
|
95
|
+
# ensure thread-safe cache initialization
|
96
|
+
@lazy_once_mutex ||= Mutex.new
|
97
|
+
|
98
|
+
# fast path: check cache outside mutex for performance
|
99
|
+
if @lazy_once_cache&.key?(location_key)
|
100
|
+
cached_entry = @lazy_once_cache[location_key]
|
101
|
+
|
102
|
+
# handle TTL expiration if configured
|
103
|
+
if ttl && Time.now - cached_entry[:created_at] > ttl
|
104
|
+
@lazy_once_mutex.synchronize do
|
105
|
+
# double-check TTL after acquiring lock
|
106
|
+
if @lazy_once_cache&.key?(location_key)
|
107
|
+
cached_entry = @lazy_once_cache[location_key]
|
108
|
+
if Time.now - cached_entry[:created_at] > ttl
|
109
|
+
@lazy_once_cache.delete(location_key)
|
110
|
+
else
|
111
|
+
# entry is still valid, update access tracking and return
|
112
|
+
cached_entry[:access_count] += 1
|
113
|
+
cached_entry[:last_accessed] = Time.now if ttl
|
114
|
+
return cached_entry[:value]
|
115
|
+
end
|
116
|
+
end
|
117
|
+
end
|
118
|
+
else
|
119
|
+
# cache hit: update access tracking in thread-safe manner
|
120
|
+
@lazy_once_mutex.synchronize do
|
121
|
+
if @lazy_once_cache&.key?(location_key)
|
122
|
+
cached_entry = @lazy_once_cache[location_key]
|
123
|
+
cached_entry[:access_count] += 1
|
124
|
+
cached_entry[:last_accessed] = Time.now if ttl
|
125
|
+
return cached_entry[:value]
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
# slow path: compute value and cache result
|
132
|
+
@lazy_once_mutex.synchronize do
|
133
|
+
# double-check pattern: another thread might have computed while we waited
|
134
|
+
if @lazy_once_cache&.key?(location_key)
|
135
|
+
cached_entry = @lazy_once_cache[location_key]
|
136
|
+
|
137
|
+
# verify TTL hasn't expired while we waited for the lock
|
138
|
+
if ttl && Time.now - cached_entry[:created_at] > ttl
|
139
|
+
@lazy_once_cache.delete(location_key)
|
140
|
+
else
|
141
|
+
cached_entry[:access_count] += 1
|
142
|
+
cached_entry[:last_accessed] = Time.now if ttl
|
143
|
+
return cached_entry[:value]
|
144
|
+
end
|
145
|
+
end
|
146
|
+
|
147
|
+
# initialize cache storage if this is the first lazy_once call
|
148
|
+
@lazy_once_cache ||= {}
|
149
|
+
|
150
|
+
# perform LRU cleanup if cache is getting too large
|
151
|
+
cleanup_lazy_once_cache_simple!(max_entries) if @lazy_once_cache.size >= max_entries
|
152
|
+
|
153
|
+
# compute the value and store in cache with minimal metadata
|
154
|
+
begin
|
155
|
+
computed_value = block.call
|
156
|
+
|
157
|
+
# create cache entry with minimal required metadata for performance
|
158
|
+
cache_entry = {
|
159
|
+
value: computed_value,
|
160
|
+
access_count: 1
|
161
|
+
}
|
162
|
+
|
163
|
+
# add optional metadata only when features are actually used
|
164
|
+
cache_entry[:created_at] = Time.now if ttl
|
165
|
+
cache_entry[:last_accessed] = Time.now if ttl
|
166
|
+
|
167
|
+
@lazy_once_cache[location_key] = cache_entry
|
168
|
+
computed_value
|
169
|
+
rescue StandardError => e
|
170
|
+
# don't cache exceptions to keep implementation simple
|
171
|
+
raise
|
172
|
+
end
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
# Clear all cached lazy_once values for this instance.
|
177
|
+
#
|
178
|
+
# This method is thread-safe and can be used to reset all method-local
|
179
|
+
# memoization caches, useful for testing or when you need to ensure
|
180
|
+
# fresh computation on subsequent calls.
|
181
|
+
#
|
182
|
+
# @return [void]
|
183
|
+
def clear_lazy_once_values!
|
184
|
+
@lazy_once_mutex ||= Mutex.new
|
185
|
+
@lazy_once_mutex.synchronize do
|
186
|
+
@lazy_once_cache&.clear
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
# Get detailed information about all cached lazy_once values.
|
191
|
+
#
|
192
|
+
# Returns a hash mapping call locations to their cache metadata,
|
193
|
+
# useful for debugging and understanding cache behavior.
|
194
|
+
#
|
195
|
+
# @return [Hash<String, Hash>] mapping of call locations to cache information
|
196
|
+
#
|
197
|
+
# @example Inspecting cache state
|
198
|
+
# processor = DataProcessor.new
|
199
|
+
# processor.some_cached_method
|
200
|
+
# info = processor.lazy_once_info
|
201
|
+
# puts info # => { "/path/to/file.rb:42" => { computed: true, access_count: 1, ... } }
|
202
|
+
def lazy_once_info
|
203
|
+
@lazy_once_mutex ||= Mutex.new
|
204
|
+
@lazy_once_mutex.synchronize do
|
205
|
+
return {} unless @lazy_once_cache
|
206
|
+
|
207
|
+
result = {}
|
208
|
+
@lazy_once_cache.each do |location_key, entry|
|
209
|
+
result[location_key] = {
|
210
|
+
computed: true, # always true in this implementation since we don't cache exceptions
|
211
|
+
exception: false, # we don't cache exceptions for simplicity
|
212
|
+
created_at: entry[:created_at],
|
213
|
+
access_count: entry[:access_count],
|
214
|
+
last_accessed: entry[:last_accessed]
|
215
|
+
}
|
216
|
+
end
|
217
|
+
result
|
218
|
+
end
|
219
|
+
end
|
220
|
+
|
221
|
+
# Get statistical summary of lazy_once cache usage.
|
222
|
+
#
|
223
|
+
# Provides aggregated information about cache performance including
|
224
|
+
# total entries, access patterns, and timing information.
|
225
|
+
#
|
226
|
+
# @return [Hash] statistical summary of cache usage
|
227
|
+
#
|
228
|
+
# @example Monitoring cache performance
|
229
|
+
# stats = processor.lazy_once_statistics
|
230
|
+
# puts "Cache hit ratio: #{stats[:total_accesses] / stats[:total_entries].to_f}"
|
231
|
+
# puts "Average accesses per entry: #{stats[:average_accesses]}"
|
232
|
+
def lazy_once_statistics
|
233
|
+
@lazy_once_mutex ||= Mutex.new
|
234
|
+
@lazy_once_mutex.synchronize do
|
235
|
+
# return empty stats if no cache exists yet
|
236
|
+
unless @lazy_once_cache
|
237
|
+
return {
|
238
|
+
total_entries: 0,
|
239
|
+
computed_entries: 0,
|
240
|
+
oldest_entry: nil,
|
241
|
+
newest_entry: nil,
|
242
|
+
total_accesses: 0,
|
243
|
+
average_accesses: 0
|
244
|
+
}
|
245
|
+
end
|
246
|
+
|
247
|
+
total_entries = @lazy_once_cache.size
|
248
|
+
total_accesses = @lazy_once_cache.values.sum { |entry| entry[:access_count] }
|
249
|
+
|
250
|
+
# extract creation timestamps for age analysis (Ruby 2.6 compatible)
|
251
|
+
created_times = @lazy_once_cache.values.map { |entry| entry[:created_at] }.compact
|
252
|
+
|
253
|
+
{
|
254
|
+
total_entries: total_entries,
|
255
|
+
computed_entries: total_entries, # all cached entries are successfully computed
|
256
|
+
oldest_entry: created_times.min,
|
257
|
+
newest_entry: created_times.max,
|
258
|
+
total_accesses: total_accesses,
|
259
|
+
average_accesses: total_entries > 0 ? total_accesses / total_entries.to_f : 0
|
260
|
+
}
|
261
|
+
end
|
262
|
+
end
|
263
|
+
|
264
|
+
private
|
265
|
+
|
266
|
+
# Perform simple LRU-style cache cleanup to prevent unbounded memory growth.
|
267
|
+
#
|
268
|
+
# Removes the least recently used entries when cache size exceeds limits.
|
269
|
+
# Uses a simple strategy: remove 25% of entries to avoid frequent cleanup overhead.
|
270
|
+
#
|
271
|
+
# @param max_entries [Integer] the maximum number of entries to maintain
|
272
|
+
# @return [void]
|
273
|
+
def cleanup_lazy_once_cache_simple!(max_entries)
|
274
|
+
return unless @lazy_once_cache.size > max_entries
|
275
|
+
|
276
|
+
# remove 25% of entries to avoid frequent cleanup cycles
|
277
|
+
entries_to_remove = @lazy_once_cache.size - (max_entries * 0.75).to_i
|
278
|
+
|
279
|
+
# use LRU eviction if we have access time tracking, otherwise just remove oldest entries
|
280
|
+
if @lazy_once_cache.values.first[:last_accessed] # has TTL metadata with access tracking
|
281
|
+
# sort by last access time and remove least recently used
|
282
|
+
sorted_entries = @lazy_once_cache.sort_by { |_, entry| entry[:last_accessed] || Time.at(0) }
|
283
|
+
sorted_entries.first(entries_to_remove).each { |key, _| @lazy_once_cache.delete(key) }
|
284
|
+
else
|
285
|
+
# no access time tracking available, just remove arbitrary entries for speed
|
286
|
+
keys_to_remove = @lazy_once_cache.keys.first(entries_to_remove)
|
287
|
+
keys_to_remove.each { |key| @lazy_once_cache.delete(key) }
|
288
|
+
end
|
289
|
+
end
|
290
|
+
end
|
291
|
+
end
|