lazy_init 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +20 -0
- data/.rspec +4 -0
- data/CHANGELOG.md +0 -0
- data/GEMFILE +5 -0
- data/LICENSE +21 -0
- data/RAKEFILE +43 -0
- data/README.md +765 -0
- data/benchmarks/benchmark.rb +796 -0
- data/benchmarks/benchmark_performance.rb +250 -0
- data/benchmarks/benchmark_threads.rb +433 -0
- data/benchmarks/bottleneck_searcher.rb +381 -0
- data/benchmarks/thread_safety_verification.rb +376 -0
- data/lazy_init.gemspec +40 -0
- data/lib/lazy_init/class_methods.rb +549 -0
- data/lib/lazy_init/configuration.rb +57 -0
- data/lib/lazy_init/dependency_resolver.rb +226 -0
- data/lib/lazy_init/errors.rb +23 -0
- data/lib/lazy_init/instance_methods.rb +291 -0
- data/lib/lazy_init/lazy_value.rb +167 -0
- data/lib/lazy_init/version.rb +5 -0
- data/lib/lazy_init.rb +47 -0
- metadata +140 -0
@@ -0,0 +1,250 @@
|
|
1
|
+
require 'benchmark'
|
2
|
+
require 'lazy_init'
|
3
|
+
|
4
|
+
|
5
|
+
# LazyInit vs Manual Performance Benchmark
|
6
|
+
# Testing with realistic production-like expensive operations:
|
7
|
+
|
8
|
+
# --- Configuration Parsing ---
|
9
|
+
# Description: JSON/YAML parsing with complex nested structures
|
10
|
+
|
11
|
+
# Warming up (performing actual expensive computation)...
|
12
|
+
# Initial computation time:
|
13
|
+
# Manual: 14.25ms
|
14
|
+
# LazyInit: 13.28ms
|
15
|
+
# Difference: -0.96ms
|
16
|
+
|
17
|
+
# Benchmarking cached access (100,000 iterations):
|
18
|
+
# user system total real
|
19
|
+
# Manual 0.012272 0.000034 0.012306 ( 0.012490)
|
20
|
+
# LazyInit 0.041375 0.000231 0.041606 ( 0.041994)
|
21
|
+
|
22
|
+
# ============================================================
|
23
|
+
|
24
|
+
# --- Cryptographic Operations ---
|
25
|
+
# Description: Secure token generation with multiple hash rounds
|
26
|
+
|
27
|
+
# Warming up (performing actual expensive computation)...
|
28
|
+
# Initial computation time:
|
29
|
+
# Manual: 4.65ms
|
30
|
+
# LazyInit: 2.37ms
|
31
|
+
# Difference: -2.28ms
|
32
|
+
|
33
|
+
# Benchmarking cached access (100,000 iterations):
|
34
|
+
# user system total real
|
35
|
+
# Manual 0.012351 0.000057 0.012408 ( 0.012445)
|
36
|
+
# LazyInit 0.042226 0.000220 0.042446 ( 0.042752)
|
37
|
+
|
38
|
+
# ============================================================
|
39
|
+
|
40
|
+
# --- Data Processing ---
|
41
|
+
# Description: ETL-style data aggregation and grouping
|
42
|
+
|
43
|
+
# Warming up (performing actual expensive computation)...
|
44
|
+
# Initial computation time:
|
45
|
+
# Manual: 2.61ms
|
46
|
+
# LazyInit: 2.84ms
|
47
|
+
# Difference: 0.22ms
|
48
|
+
|
49
|
+
# Benchmarking cached access (100,000 iterations):
|
50
|
+
# user system total real
|
51
|
+
# Manual 0.012233 0.000048 0.012281 ( 0.012375)
|
52
|
+
# LazyInit 0.044622 0.000262 0.044884 ( 0.045389)
|
53
|
+
|
54
|
+
# ============================================================
|
55
|
+
|
56
|
+
# Thread Safety Test
|
57
|
+
# Testing concurrent access to verify no race conditions...
|
58
|
+
# Computing in thread: 60
|
59
|
+
# Thread safety results:
|
60
|
+
# Unique object IDs: 1 (should be 1)
|
61
|
+
# All threads got same object: ✅ PASS
|
62
|
+
# Total threads: 10
|
63
|
+
# Test Environment: Ruby 3.0.2, x86_64
|
64
|
+
# Platform: x86_64-darwin19
|
65
|
+
#✅ Thread safety confirmed: All threads received the same computed object
|
66
|
+
|
67
|
+
module ExpensiveOperations
|
68
|
+
def parse_configuration
|
69
|
+
config_data = {
|
70
|
+
database: {
|
71
|
+
host: 'localhost',
|
72
|
+
port: 5432,
|
73
|
+
pool_size: 20,
|
74
|
+
timeout: 30,
|
75
|
+
retries: 3
|
76
|
+
},
|
77
|
+
redis: {
|
78
|
+
url: 'redis://localhost:6379',
|
79
|
+
pool_size: 10,
|
80
|
+
timeout: 5
|
81
|
+
},
|
82
|
+
api_keys: Hash[(1..50).map { |i| ["key_#{i}", "secret_#{rand(100000)}"] }],
|
83
|
+
feature_flags: Hash[(1..100).map { |i| ["feature_#{i}", [true, false].sample] }]
|
84
|
+
}
|
85
|
+
|
86
|
+
serialized = config_data.to_s
|
87
|
+
100.times { serialized.gsub(/\d+/, &:to_i) }
|
88
|
+
|
89
|
+
config_data
|
90
|
+
end
|
91
|
+
|
92
|
+
def generate_secure_token
|
93
|
+
require 'digest'
|
94
|
+
|
95
|
+
base_string = "secure_token_#{Time.now.to_f}_#{rand(1000000)}"
|
96
|
+
|
97
|
+
token = base_string
|
98
|
+
1000.times do |i|
|
99
|
+
token = Digest::SHA256.hexdigest("#{token}_#{i}")
|
100
|
+
end
|
101
|
+
|
102
|
+
token
|
103
|
+
end
|
104
|
+
|
105
|
+
def process_dataset
|
106
|
+
data = (1..1000).map do |i|
|
107
|
+
{
|
108
|
+
id: i,
|
109
|
+
timestamp: Time.now - rand(86400 * 30),
|
110
|
+
value: rand(1000.0).round(2),
|
111
|
+
category: ['A', 'B', 'C', 'D'][rand(4)],
|
112
|
+
metadata: { source: "system_#{rand(10)}", priority: rand(5) }
|
113
|
+
}
|
114
|
+
end
|
115
|
+
|
116
|
+
grouped = data.group_by { |item| item[:category] }
|
117
|
+
aggregated = grouped.transform_values do |items|
|
118
|
+
{
|
119
|
+
count: items.size,
|
120
|
+
total_value: items.sum { |item| item[:value] },
|
121
|
+
avg_value: items.sum { |item| item[:value] } / items.size.to_f,
|
122
|
+
latest: items.max_by { |item| item[:timestamp] }
|
123
|
+
}
|
124
|
+
end
|
125
|
+
|
126
|
+
aggregated
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
class ManualClass
|
131
|
+
include ExpensiveOperations
|
132
|
+
|
133
|
+
def configuration
|
134
|
+
@configuration ||= parse_configuration
|
135
|
+
end
|
136
|
+
|
137
|
+
def secure_token
|
138
|
+
@secure_token ||= generate_secure_token
|
139
|
+
end
|
140
|
+
|
141
|
+
def processed_data
|
142
|
+
@processed_data ||= process_dataset
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
class LazyClass
|
147
|
+
extend LazyInit
|
148
|
+
include ExpensiveOperations
|
149
|
+
|
150
|
+
lazy_attr_reader :configuration do
|
151
|
+
parse_configuration
|
152
|
+
end
|
153
|
+
|
154
|
+
lazy_attr_reader :secure_token do
|
155
|
+
generate_secure_token
|
156
|
+
end
|
157
|
+
|
158
|
+
lazy_attr_reader :processed_data do
|
159
|
+
process_dataset
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
puts "LazyInit vs Manual Performance Benchmark"
|
164
|
+
puts "Testing with realistic production-like expensive operations:\n\n"
|
165
|
+
|
166
|
+
scenarios = [
|
167
|
+
{
|
168
|
+
name: "Configuration Parsing",
|
169
|
+
method: :configuration,
|
170
|
+
description: "JSON/YAML parsing with complex nested structures"
|
171
|
+
},
|
172
|
+
{
|
173
|
+
name: "Cryptographic Operations",
|
174
|
+
method: :secure_token,
|
175
|
+
description: "Secure token generation with multiple hash rounds"
|
176
|
+
},
|
177
|
+
{
|
178
|
+
name: "Data Processing",
|
179
|
+
method: :processed_data,
|
180
|
+
description: "ETL-style data aggregation and grouping"
|
181
|
+
}
|
182
|
+
]
|
183
|
+
|
184
|
+
scenarios.each do |scenario|
|
185
|
+
puts "--- #{scenario[:name]} ---"
|
186
|
+
puts "Description: #{scenario[:description]}\n\n"
|
187
|
+
|
188
|
+
manual = ManualClass.new
|
189
|
+
lazy = LazyClass.new
|
190
|
+
|
191
|
+
puts "Warming up (performing actual expensive computation)..."
|
192
|
+
manual_start = Time.now
|
193
|
+
manual.send(scenario[:method])
|
194
|
+
manual_time = Time.now - manual_start
|
195
|
+
|
196
|
+
lazy_start = Time.now
|
197
|
+
lazy.send(scenario[:method])
|
198
|
+
lazy_time = Time.now - lazy_start
|
199
|
+
|
200
|
+
puts "Initial computation time:"
|
201
|
+
puts " Manual: #{(manual_time * 1000).round(2)}ms"
|
202
|
+
puts " LazyInit: #{(lazy_time * 1000).round(2)}ms"
|
203
|
+
puts " Difference: #{((lazy_time - manual_time) * 1000).round(2)}ms\n\n"
|
204
|
+
|
205
|
+
puts "Benchmarking cached access (100,000 iterations):"
|
206
|
+
Benchmark.bm(10) do |x|
|
207
|
+
x.report("Manual") { 100_000.times { manual.send(scenario[:method]) } }
|
208
|
+
x.report("LazyInit") { 100_000.times { lazy.send(scenario[:method]) } }
|
209
|
+
end
|
210
|
+
|
211
|
+
puts "\n" + "="*60 + "\n\n"
|
212
|
+
end
|
213
|
+
|
214
|
+
puts "Thread Safety Test"
|
215
|
+
puts "Testing concurrent access to verify no race conditions...\n"
|
216
|
+
|
217
|
+
class ThreadTestClass
|
218
|
+
extend LazyInit
|
219
|
+
include ExpensiveOperations
|
220
|
+
|
221
|
+
lazy_attr_reader :thread_safe_data do
|
222
|
+
puts "Computing in thread: #{Thread.current.object_id}"
|
223
|
+
process_dataset
|
224
|
+
end
|
225
|
+
end
|
226
|
+
|
227
|
+
service = ThreadTestClass.new
|
228
|
+
|
229
|
+
results = []
|
230
|
+
threads = 10.times.map do |i|
|
231
|
+
Thread.new do
|
232
|
+
data = service.thread_safe_data
|
233
|
+
results << data.object_id
|
234
|
+
end
|
235
|
+
end
|
236
|
+
|
237
|
+
threads.each(&:join)
|
238
|
+
|
239
|
+
puts "Thread safety results:"
|
240
|
+
puts " Unique object IDs: #{results.uniq.size} (should be 1)"
|
241
|
+
puts " All threads got same object: #{results.uniq.size == 1 ? '✅ PASS' : '❌ FAIL'}"
|
242
|
+
puts " Total threads: #{results.size}"
|
243
|
+
puts "Test Environment: Ruby #{RUBY_VERSION}, #{RbConfig::CONFIG['target_cpu']}"
|
244
|
+
puts "Platform: #{RUBY_PLATFORM}"
|
245
|
+
|
246
|
+
if results.uniq.size == 1
|
247
|
+
puts "\n✅ Thread safety confirmed: All threads received the same computed object"
|
248
|
+
else
|
249
|
+
puts "\n❌ Thread safety failed: Race condition detected!"
|
250
|
+
end
|
@@ -0,0 +1,433 @@
|
|
1
|
+
require 'lazy_init'
|
2
|
+
require 'benchmark'
|
3
|
+
|
4
|
+
# === THREAD SAFETY TESTS ===
|
5
|
+
# Testing LazyInit in production-like concurrent scenarios
|
6
|
+
|
7
|
+
# TEST 1: Traffic Spike Simulation
|
8
|
+
# Simulating 200 simultaneous user requests hitting cached resources
|
9
|
+
# ------------------------------------------------------------
|
10
|
+
# Starting traffic spike test...
|
11
|
+
# Loading DB config in thread: 60
|
12
|
+
# Creating connection pool in thread: 60
|
13
|
+
# Fetching permissions in thread: 60
|
14
|
+
|
15
|
+
# Traffic Spike Results:
|
16
|
+
# Duration: 472.91ms
|
17
|
+
# Threads: 200
|
18
|
+
# Successful requests: 200
|
19
|
+
# Errors: 0
|
20
|
+
|
21
|
+
# Race Condition Analysis:
|
22
|
+
# DB Config - Unique objects: 1 (should be 1)
|
23
|
+
# Connection Pool - Unique objects: 1 (should be 1)
|
24
|
+
# Permissions - Unique objects: 1 (should be 1)
|
25
|
+
# Result: ✅ PASS - No race conditions detected
|
26
|
+
|
27
|
+
# ============================================================
|
28
|
+
# TEST 2: Sustained Load Simulation
|
29
|
+
# Simulating background workers with sustained concurrent access
|
30
|
+
# ------------------------------------------------------------
|
31
|
+
# Starting sustained load test (30 workers x 50 operations each)...
|
32
|
+
# Initializing API client in thread: 140
|
33
|
+
# Fetching permissions in thread: 140
|
34
|
+
|
35
|
+
# Sustained Load Results:
|
36
|
+
# Duration: 0.25s
|
37
|
+
# Total operations: 1500
|
38
|
+
# Errors: 0
|
39
|
+
# Operations/second: 6039
|
40
|
+
|
41
|
+
# Consistency Analysis:
|
42
|
+
# API Client - Unique objects: 1 (should be 1)
|
43
|
+
# Shared Cache - Unique objects: 1 (should be 1)
|
44
|
+
# Result: ✅ PASS - Consistency maintained
|
45
|
+
|
46
|
+
# ============================================================
|
47
|
+
# TEST 3: Dependency Chain Stress Test
|
48
|
+
# Testing complex dependency resolution under concurrent pressure
|
49
|
+
# ------------------------------------------------------------
|
50
|
+
# Starting dependency chain test (100 concurrent accesses to complex dependency)...
|
51
|
+
# Computing base_config in thread: 220
|
52
|
+
# Loading DB config in thread: 220
|
53
|
+
# Computing connection_manager in thread: 220
|
54
|
+
# Creating connection pool in thread: 220
|
55
|
+
# Computing auth_service in thread: 220
|
56
|
+
# Fetching permissions in thread: 220
|
57
|
+
# Computing api_gateway in thread: 220
|
58
|
+
# Initializing API client in thread: 220
|
59
|
+
|
60
|
+
# Dependency Chain Results:
|
61
|
+
# Duration: 550.29ms
|
62
|
+
# All dependencies computed: true
|
63
|
+
# Unique gateway objects: 1 (should be 1)
|
64
|
+
# Result: ✅ PASS - Dependency resolution works correctly
|
65
|
+
|
66
|
+
# ============================================================
|
67
|
+
# TEST 4: Reset and Recovery Under Load
|
68
|
+
# Testing reset operations during concurrent access (production maintenance scenario)
|
69
|
+
# ------------------------------------------------------------
|
70
|
+
# Starting reset test (readers + periodic resets)...
|
71
|
+
# Loading DB config in thread: 260
|
72
|
+
# Reset 1 performed
|
73
|
+
# Reset 2 performed
|
74
|
+
# Loading DB config in thread: 300
|
75
|
+
# Reset 3 performed
|
76
|
+
# Loading DB config in thread: 340
|
77
|
+
# Reset 4 performed
|
78
|
+
# Loading DB config in thread: 380
|
79
|
+
# Reset 5 performed
|
80
|
+
# Loading DB config in thread: 420
|
81
|
+
# Reset 6 performed
|
82
|
+
# Loading DB config in thread: 460
|
83
|
+
# Reset 7 performed
|
84
|
+
# Loading DB config in thread: 300
|
85
|
+
# Reset 8 performed
|
86
|
+
# Loading DB config in thread: 300
|
87
|
+
|
88
|
+
# Reset and Recovery Results:
|
89
|
+
# Total access attempts: 1000
|
90
|
+
# Successful accesses: 1000
|
91
|
+
# Resets performed: 8
|
92
|
+
# Success rate: 100.0%
|
93
|
+
# Unique config objects: 8 (should be > 1 due to resets)
|
94
|
+
# Result: ✅ PASS - Reset and recovery works correctly
|
95
|
+
|
96
|
+
# ============================================================
|
97
|
+
# === FINAL THREAD SAFETY SUMMARY ===
|
98
|
+
# Test Environment: Ruby 3.0.2, x86_64
|
99
|
+
# Platform: x86_64-darwin19
|
100
|
+
|
101
|
+
# Results:
|
102
|
+
# ✅ Traffic Spike (200 concurrent): PASS
|
103
|
+
# ✅ Sustained Load (1500 operations): PASS
|
104
|
+
# ✅ Dependency Chain (100 concurrent): PASS
|
105
|
+
# ✅ Reset Recovery (periodic resets): PASS
|
106
|
+
|
107
|
+
# Overall Result: ✅ ALL TESTS PASSED
|
108
|
+
|
109
|
+
module ProductionOperations
|
110
|
+
def load_database_config
|
111
|
+
puts " Loading DB config in thread: #{Thread.current.object_id}"
|
112
|
+
sleep(0.1)
|
113
|
+
{
|
114
|
+
host: 'prod-db.company.com',
|
115
|
+
port: 5432,
|
116
|
+
pool_size: 20,
|
117
|
+
connections: Array.new(20) { |i| "conn_#{i}_#{rand(10000)}" }
|
118
|
+
}
|
119
|
+
end
|
120
|
+
|
121
|
+
def create_connection_pool
|
122
|
+
puts "Creating connection pool in thread: #{Thread.current.object_id}"
|
123
|
+
sleep(0.2)
|
124
|
+
pool_id = rand(100000)
|
125
|
+
{
|
126
|
+
id: pool_id,
|
127
|
+
connections: Array.new(50) { |i| "active_conn_#{pool_id}_#{i}" },
|
128
|
+
created_at: Time.now
|
129
|
+
}
|
130
|
+
end
|
131
|
+
|
132
|
+
def fetch_user_permissions
|
133
|
+
puts "Fetching permissions in thread: #{Thread.current.object_id}"
|
134
|
+
sleep(0.15)
|
135
|
+
permissions = {}
|
136
|
+
(1..1000).each { |i| permissions["user_#{i}"] = ["read", "write", "admin"].sample }
|
137
|
+
permissions
|
138
|
+
end
|
139
|
+
|
140
|
+
def initialize_api_client
|
141
|
+
puts "Initializing API client in thread: #{Thread.current.object_id}"
|
142
|
+
sleep(0.08)
|
143
|
+
{
|
144
|
+
client_id: "api_#{rand(50000)}",
|
145
|
+
token: "bearer_#{rand(1000000)}",
|
146
|
+
endpoints: %w[users orders payments analytics],
|
147
|
+
initialized_at: Time.now
|
148
|
+
}
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
puts " === THREAD SAFETY TESTS ==="
|
153
|
+
puts "Testing LazyInit in production-like concurrent scenarios\n\n"
|
154
|
+
|
155
|
+
puts " TEST 1: Traffic Spike Simulation"
|
156
|
+
puts "Simulating 200 simultaneous user requests hitting cached resources"
|
157
|
+
puts "-" * 60
|
158
|
+
|
159
|
+
class TrafficSpikeService
|
160
|
+
extend LazyInit
|
161
|
+
include ProductionOperations
|
162
|
+
|
163
|
+
lazy_attr_reader :db_config do
|
164
|
+
load_database_config
|
165
|
+
end
|
166
|
+
|
167
|
+
lazy_attr_reader :connection_pool do
|
168
|
+
create_connection_pool
|
169
|
+
end
|
170
|
+
|
171
|
+
lazy_attr_reader :user_permissions do
|
172
|
+
fetch_user_permissions
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
spike_service = TrafficSpikeService.new
|
177
|
+
spike_results = []
|
178
|
+
spike_errors = []
|
179
|
+
|
180
|
+
puts "Starting traffic spike test..."
|
181
|
+
start_time = Time.now
|
182
|
+
|
183
|
+
spike_threads = 200.times.map do |i|
|
184
|
+
Thread.new do
|
185
|
+
begin
|
186
|
+
config = spike_service.db_config
|
187
|
+
pool = spike_service.connection_pool
|
188
|
+
permissions = spike_service.user_permissions
|
189
|
+
|
190
|
+
spike_results << {
|
191
|
+
thread: i,
|
192
|
+
config_id: config.object_id,
|
193
|
+
pool_id: pool.object_id,
|
194
|
+
permissions_id: permissions.object_id,
|
195
|
+
timestamp: Time.now
|
196
|
+
}
|
197
|
+
rescue => e
|
198
|
+
spike_errors << { thread: i, error: e.message }
|
199
|
+
end
|
200
|
+
end
|
201
|
+
end
|
202
|
+
|
203
|
+
spike_threads.each(&:join)
|
204
|
+
spike_duration = Time.now - start_time
|
205
|
+
|
206
|
+
puts "\n Traffic Spike Results:"
|
207
|
+
puts " Duration: #{(spike_duration * 1000).round(2)}ms"
|
208
|
+
puts " Threads: #{spike_threads.size}"
|
209
|
+
puts " Successful requests: #{spike_results.size}"
|
210
|
+
puts " Errors: #{spike_errors.size}"
|
211
|
+
|
212
|
+
config_ids = spike_results.map { |r| r[:config_id] }.uniq
|
213
|
+
pool_ids = spike_results.map { |r| r[:pool_id] }.uniq
|
214
|
+
permission_ids = spike_results.map { |r| r[:permissions_id] }.uniq
|
215
|
+
|
216
|
+
puts "\n Race Condition Analysis:"
|
217
|
+
puts " DB Config - Unique objects: #{config_ids.size} (should be 1)"
|
218
|
+
puts " Connection Pool - Unique objects: #{pool_ids.size} (should be 1)"
|
219
|
+
puts " Permissions - Unique objects: #{permission_ids.size} (should be 1)"
|
220
|
+
|
221
|
+
spike_passed = config_ids.size == 1 && pool_ids.size == 1 && permission_ids.size == 1
|
222
|
+
puts " Result: #{spike_passed ? '✅ PASS' : '❌ FAIL'} - No race conditions detected"
|
223
|
+
|
224
|
+
puts "\n" + "="*60 + "\n"
|
225
|
+
|
226
|
+
puts "TEST 2: Sustained Load Simulation"
|
227
|
+
puts "Simulating background workers with sustained concurrent access"
|
228
|
+
puts "-" * 60
|
229
|
+
|
230
|
+
class WorkerService
|
231
|
+
extend LazyInit
|
232
|
+
include ProductionOperations
|
233
|
+
|
234
|
+
lazy_attr_reader :api_client do
|
235
|
+
initialize_api_client
|
236
|
+
end
|
237
|
+
|
238
|
+
lazy_attr_reader :shared_cache do
|
239
|
+
fetch_user_permissions
|
240
|
+
end
|
241
|
+
end
|
242
|
+
|
243
|
+
worker_service = WorkerService.new
|
244
|
+
worker_results = []
|
245
|
+
worker_errors = []
|
246
|
+
|
247
|
+
puts "Starting sustained load test (30 workers x 50 operations each)..."
|
248
|
+
|
249
|
+
worker_threads = 30.times.map do |worker_id|
|
250
|
+
Thread.new do
|
251
|
+
thread_results = []
|
252
|
+
50.times do |operation|
|
253
|
+
begin
|
254
|
+
api = worker_service.api_client
|
255
|
+
cache = worker_service.shared_cache
|
256
|
+
|
257
|
+
thread_results << {
|
258
|
+
worker: worker_id,
|
259
|
+
operation: operation,
|
260
|
+
api_id: api.object_id,
|
261
|
+
cache_id: cache.object_id
|
262
|
+
}
|
263
|
+
|
264
|
+
sleep(0.001) if operation % 10 == 0
|
265
|
+
rescue => e
|
266
|
+
worker_errors << { worker: worker_id, operation: operation, error: e.message }
|
267
|
+
end
|
268
|
+
end
|
269
|
+
|
270
|
+
worker_results.concat(thread_results)
|
271
|
+
end
|
272
|
+
end
|
273
|
+
|
274
|
+
worker_start = Time.now
|
275
|
+
worker_threads.each(&:join)
|
276
|
+
worker_duration = Time.now - worker_start
|
277
|
+
|
278
|
+
puts "\n Sustained Load Results:"
|
279
|
+
puts " Duration: #{worker_duration.round(2)}s"
|
280
|
+
puts " Total operations: #{worker_results.size}"
|
281
|
+
puts " Errors: #{worker_errors.size}"
|
282
|
+
puts " Operations/second: #{(worker_results.size / worker_duration).round(0)}"
|
283
|
+
|
284
|
+
api_ids = worker_results.map { |r| r[:api_id] }.uniq
|
285
|
+
cache_ids = worker_results.map { |r| r[:cache_id] }.uniq
|
286
|
+
|
287
|
+
puts "\n Consistency Analysis:"
|
288
|
+
puts " API Client - Unique objects: #{api_ids.size} (should be 1)"
|
289
|
+
puts " Shared Cache - Unique objects: #{cache_ids.size} (should be 1)"
|
290
|
+
|
291
|
+
worker_passed = api_ids.size == 1 && cache_ids.size == 1
|
292
|
+
puts " Result: #{worker_passed ? '✅ PASS' : '❌ FAIL'} - Consistency maintained"
|
293
|
+
|
294
|
+
puts "\n" + "="*60 + "\n"
|
295
|
+
|
296
|
+
puts " TEST 3: Dependency Chain Stress Test"
|
297
|
+
puts "Testing complex dependency resolution under concurrent pressure"
|
298
|
+
puts "-" * 60
|
299
|
+
|
300
|
+
class DependencyChainService
|
301
|
+
extend LazyInit
|
302
|
+
include ProductionOperations
|
303
|
+
|
304
|
+
lazy_attr_reader :base_config do
|
305
|
+
puts "Computing base_config in thread: #{Thread.current.object_id}"
|
306
|
+
load_database_config
|
307
|
+
end
|
308
|
+
|
309
|
+
lazy_attr_reader :connection_manager, depends_on: [:base_config] do
|
310
|
+
puts "Computing connection_manager in thread: #{Thread.current.object_id}"
|
311
|
+
create_connection_pool
|
312
|
+
end
|
313
|
+
|
314
|
+
lazy_attr_reader :auth_service, depends_on: [:base_config] do
|
315
|
+
puts "Computing auth_service in thread: #{Thread.current.object_id}"
|
316
|
+
fetch_user_permissions
|
317
|
+
end
|
318
|
+
|
319
|
+
lazy_attr_reader :api_gateway, depends_on: [:connection_manager, :auth_service] do
|
320
|
+
puts "Computing api_gateway in thread: #{Thread.current.object_id}"
|
321
|
+
initialize_api_client
|
322
|
+
end
|
323
|
+
end
|
324
|
+
|
325
|
+
dependency_service = DependencyChainService.new
|
326
|
+
dependency_results = []
|
327
|
+
|
328
|
+
puts "Starting dependency chain test (100 concurrent accesses to complex dependency)..."
|
329
|
+
|
330
|
+
dependency_threads = 100.times.map do |i|
|
331
|
+
Thread.new do
|
332
|
+
gateway = dependency_service.api_gateway
|
333
|
+
|
334
|
+
dependency_results << {
|
335
|
+
thread: i,
|
336
|
+
gateway_id: gateway.object_id,
|
337
|
+
base_config_computed: dependency_service.base_config_computed?,
|
338
|
+
connection_manager_computed: dependency_service.connection_manager_computed?,
|
339
|
+
auth_service_computed: dependency_service.auth_service_computed?,
|
340
|
+
api_gateway_computed: dependency_service.api_gateway_computed?
|
341
|
+
}
|
342
|
+
end
|
343
|
+
end
|
344
|
+
|
345
|
+
dependency_start = Time.now
|
346
|
+
dependency_threads.each(&:join)
|
347
|
+
dependency_duration = Time.now - dependency_start
|
348
|
+
|
349
|
+
puts "\n Dependency Chain Results:"
|
350
|
+
puts " Duration: #{(dependency_duration * 1000).round(2)}ms"
|
351
|
+
puts " All dependencies computed: #{dependency_results.all? { |r| r[:api_gateway_computed] }}"
|
352
|
+
|
353
|
+
gateway_ids = dependency_results.map { |r| r[:gateway_id] }.uniq
|
354
|
+
puts " Unique gateway objects: #{gateway_ids.size} (should be 1)"
|
355
|
+
|
356
|
+
dependency_passed = gateway_ids.size == 1
|
357
|
+
puts " Result: #{dependency_passed ? '✅ PASS' : '❌ FAIL'} - Dependency resolution works correctly"
|
358
|
+
|
359
|
+
puts "\n" + "="*60 + "\n"
|
360
|
+
|
361
|
+
puts "TEST 4: Reset and Recovery Under Load"
|
362
|
+
puts "Testing reset operations during concurrent access (production maintenance scenario)"
|
363
|
+
puts "-" * 60
|
364
|
+
|
365
|
+
class ResetTestService
|
366
|
+
extend LazyInit
|
367
|
+
include ProductionOperations
|
368
|
+
|
369
|
+
lazy_attr_reader :service_config do
|
370
|
+
load_database_config
|
371
|
+
end
|
372
|
+
end
|
373
|
+
|
374
|
+
reset_service = ResetTestService.new
|
375
|
+
reset_results = []
|
376
|
+
reset_stats = { resets: 0, access_attempts: 0, successes: 0 }
|
377
|
+
|
378
|
+
puts "Starting reset test (readers + periodic resets)..."
|
379
|
+
|
380
|
+
reader_threads = 20.times.map do |i|
|
381
|
+
Thread.new do
|
382
|
+
50.times do |attempt|
|
383
|
+
begin
|
384
|
+
reset_stats[:access_attempts] += 1
|
385
|
+
config = reset_service.service_config
|
386
|
+
reset_results << { thread: i, attempt: attempt, config_id: config.object_id }
|
387
|
+
reset_stats[:successes] += 1
|
388
|
+
sleep(0.01)
|
389
|
+
rescue => e
|
390
|
+
end
|
391
|
+
end
|
392
|
+
end
|
393
|
+
end
|
394
|
+
|
395
|
+
reset_thread = Thread.new do
|
396
|
+
8.times do
|
397
|
+
sleep(0.1)
|
398
|
+
reset_service.reset_service_config!
|
399
|
+
reset_stats[:resets] += 1
|
400
|
+
puts " Reset #{reset_stats[:resets]} performed"
|
401
|
+
end
|
402
|
+
end
|
403
|
+
|
404
|
+
[*reader_threads, reset_thread].each(&:join)
|
405
|
+
|
406
|
+
puts "\n Reset and Recovery Results:"
|
407
|
+
puts " Total access attempts: #{reset_stats[:access_attempts]}"
|
408
|
+
puts " Successful accesses: #{reset_stats[:successes]}"
|
409
|
+
puts " Resets performed: #{reset_stats[:resets]}"
|
410
|
+
puts " Success rate: #{((reset_stats[:successes].to_f / reset_stats[:access_attempts]) * 100).round(1)}%"
|
411
|
+
|
412
|
+
unique_configs = reset_results.map { |r| r[:config_id] }.uniq
|
413
|
+
puts " Unique config objects: #{unique_configs.size} (should be > 1 due to resets)"
|
414
|
+
|
415
|
+
reset_passed = unique_configs.size > 1 && reset_stats[:successes] > 0
|
416
|
+
puts " Result: #{reset_passed ? '✅ PASS' : '❌ FAIL'} - Reset and recovery works correctly"
|
417
|
+
|
418
|
+
puts "\n" + "="*60 + "\n"
|
419
|
+
|
420
|
+
all_tests_passed = spike_passed && worker_passed && dependency_passed && reset_passed
|
421
|
+
|
422
|
+
puts " === FINAL THREAD SAFETY SUMMARY ==="
|
423
|
+
puts "Test Environment: Ruby #{RUBY_VERSION}, #{RbConfig::CONFIG['target_cpu']}"
|
424
|
+
puts "Platform: #{RUBY_PLATFORM}"
|
425
|
+
puts ""
|
426
|
+
puts "Results:"
|
427
|
+
puts " ✅ Traffic Spike (200 concurrent): #{spike_passed ? 'PASS' : 'FAIL'}"
|
428
|
+
puts " ✅ Sustained Load (1500 operations): #{worker_passed ? 'PASS' : 'FAIL'}"
|
429
|
+
puts " ✅ Dependency Chain (100 concurrent): #{dependency_passed ? 'PASS' : 'FAIL'}"
|
430
|
+
puts " ✅ Reset Recovery (periodic resets): #{reset_passed ? 'PASS' : 'FAIL'}"
|
431
|
+
puts ""
|
432
|
+
puts "Overall Result: #{all_tests_passed ? '✅ ALL TESTS PASSED' : '❌ SOME TESTS FAILED'}"
|
433
|
+
puts ""
|