rack-mini-profiler 1.0.1 → 2.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +115 -20
- data/README.md +126 -45
- data/lib/enable_rails_patches.rb +5 -0
- data/lib/html/dot.1.1.2.min.js +2 -0
- data/lib/html/includes.css +136 -35
- data/lib/html/includes.js +1400 -1009
- data/lib/html/includes.scss +546 -441
- data/lib/html/includes.tmpl +231 -148
- data/lib/html/pretty-print.js +810 -0
- data/lib/html/profile_handler.js +1 -1
- data/lib/html/rack-mini-profiler.css +3 -0
- data/lib/html/rack-mini-profiler.js +2 -0
- data/lib/html/share.html +0 -1
- data/lib/html/speedscope/LICENSE +21 -0
- data/lib/html/speedscope/README.md +3 -0
- data/lib/html/speedscope/demangle-cpp.1768f4cc.js +4 -0
- data/lib/html/speedscope/favicon-16x16.f74b3187.png +0 -0
- data/lib/html/speedscope/favicon-32x32.bc503437.png +0 -0
- data/lib/html/speedscope/file-format-schema.json +324 -0
- data/lib/html/speedscope/fonts/source-code-pro-regular.css +8 -0
- data/lib/html/speedscope/fonts/source-code-pro-v13-regular.woff +0 -0
- data/lib/html/speedscope/fonts/source-code-pro-v13-regular.woff2 +0 -0
- data/lib/html/speedscope/import.cf0fa83f.js +115 -0
- data/lib/html/speedscope/index.html +2 -0
- data/lib/html/speedscope/release.txt +3 -0
- data/lib/html/speedscope/reset.8c46b7a1.css +2 -0
- data/lib/html/speedscope/source-map.438fa06b.js +24 -0
- data/lib/html/speedscope/speedscope.44364064.js +200 -0
- data/lib/html/vendor.js +848 -0
- data/lib/mini_profiler/asset_version.rb +3 -2
- data/lib/mini_profiler/client_settings.rb +13 -5
- data/lib/mini_profiler/config.rb +43 -5
- data/lib/mini_profiler/gc_profiler.rb +1 -1
- data/lib/mini_profiler/profiler.rb +310 -42
- data/lib/mini_profiler/profiling_methods.rb +13 -8
- data/lib/mini_profiler/snapshots_transporter.rb +109 -0
- data/lib/mini_profiler/storage/abstract_store.rb +79 -1
- data/lib/mini_profiler/storage/file_store.rb +3 -3
- data/lib/mini_profiler/storage/memcache_store.rb +2 -0
- data/lib/mini_profiler/storage/memory_store.rb +54 -5
- data/lib/mini_profiler/storage/redis_store.rb +136 -2
- data/lib/mini_profiler/timer_struct/custom.rb +1 -0
- data/lib/mini_profiler/timer_struct/page.rb +60 -4
- data/lib/mini_profiler/timer_struct/request.rb +53 -11
- data/lib/mini_profiler/timer_struct/sql.rb +4 -2
- data/lib/mini_profiler/version.rb +1 -1
- data/lib/mini_profiler_rails/railtie.rb +88 -7
- data/lib/mini_profiler_rails/railtie_methods.rb +61 -0
- data/lib/patches/db/activerecord.rb +1 -12
- data/lib/patches/db/mongo.rb +1 -1
- data/lib/patches/db/moped.rb +1 -1
- data/lib/patches/db/mysql2.rb +4 -27
- data/lib/patches/db/mysql2/alias_method.rb +30 -0
- data/lib/patches/db/mysql2/prepend.rb +34 -0
- data/lib/patches/db/plucky.rb +4 -4
- data/lib/patches/net_patches.rb +18 -8
- data/lib/patches/sql_patches.rb +13 -5
- data/lib/prepend_mysql2_patch.rb +5 -0
- data/lib/prepend_net_http_patch.rb +5 -0
- data/lib/rack-mini-profiler.rb +1 -1
- data/rack-mini-profiler.gemspec +15 -6
- metadata +150 -31
- data/lib/html/jquery.1.7.1.js +0 -4
- data/lib/html/jquery.tmpl.js +0 -486
- data/lib/html/list.css +0 -9
- data/lib/html/list.js +0 -38
- data/lib/html/list.tmpl +0 -34
@@ -7,7 +7,14 @@ module Rack
|
|
7
7
|
def record_sql(query, elapsed_ms, params = nil)
|
8
8
|
return unless current && current.current_timer
|
9
9
|
c = current
|
10
|
-
c.current_timer.add_sql(
|
10
|
+
c.current_timer.add_sql(
|
11
|
+
redact_sql_queries? ? nil : query,
|
12
|
+
elapsed_ms,
|
13
|
+
c.page_struct,
|
14
|
+
redact_sql_queries? ? nil : params,
|
15
|
+
c.skip_backtrace,
|
16
|
+
c.full_backtrace
|
17
|
+
)
|
11
18
|
end
|
12
19
|
|
13
20
|
def start_step(name)
|
@@ -108,15 +115,18 @@ module Rack
|
|
108
115
|
end
|
109
116
|
end
|
110
117
|
end
|
118
|
+
if klass.respond_to?(:ruby2_keywords, true)
|
119
|
+
klass.send(:ruby2_keywords, with_profiling)
|
120
|
+
end
|
111
121
|
klass.send :alias_method, method, with_profiling
|
112
122
|
end
|
113
123
|
|
114
124
|
def profile_singleton_method(klass, method, type = :profile, &blk)
|
115
|
-
profile_method(singleton_class
|
125
|
+
profile_method(klass.singleton_class, method, type, &blk)
|
116
126
|
end
|
117
127
|
|
118
128
|
def unprofile_singleton_method(klass, method)
|
119
|
-
unprofile_method(singleton_class
|
129
|
+
unprofile_method(klass.singleton_class, method)
|
120
130
|
end
|
121
131
|
|
122
132
|
# Add a custom timing. These are displayed similar to SQL/query time in
|
@@ -144,14 +154,9 @@ module Rack
|
|
144
154
|
|
145
155
|
private
|
146
156
|
|
147
|
-
def singleton_class(klass)
|
148
|
-
class << klass; self; end
|
149
|
-
end
|
150
|
-
|
151
157
|
def clean_method_name(method)
|
152
158
|
method.to_s.gsub(/[\?\!]/, "")
|
153
159
|
end
|
154
|
-
|
155
160
|
end
|
156
161
|
end
|
157
162
|
end
|
@@ -0,0 +1,109 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class ::Rack::MiniProfiler::SnapshotsTransporter
|
4
|
+
@@transported_snapshots_count = 0
|
5
|
+
@@successful_http_requests_count = 0
|
6
|
+
@@failed_http_requests_count = 0
|
7
|
+
|
8
|
+
class << self
|
9
|
+
def transported_snapshots_count
|
10
|
+
@@transported_snapshots_count
|
11
|
+
end
|
12
|
+
def successful_http_requests_count
|
13
|
+
@@successful_http_requests_count
|
14
|
+
end
|
15
|
+
def failed_http_requests_count
|
16
|
+
@@failed_http_requests_count
|
17
|
+
end
|
18
|
+
|
19
|
+
def transport(snapshot)
|
20
|
+
@transporter ||= self.new(Rack::MiniProfiler.config)
|
21
|
+
@transporter.ship(snapshot)
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
attr_reader :buffer
|
26
|
+
attr_accessor :max_buffer_size, :gzip_requests
|
27
|
+
|
28
|
+
def initialize(config)
|
29
|
+
@uri = URI(config.snapshots_transport_destination_url)
|
30
|
+
@auth_key = config.snapshots_transport_auth_key
|
31
|
+
@gzip_requests = config.snapshots_transport_gzip_requests
|
32
|
+
@thread = nil
|
33
|
+
@thread_mutex = Mutex.new
|
34
|
+
@buffer = []
|
35
|
+
@buffer_mutex = Mutex.new
|
36
|
+
@max_buffer_size = 100
|
37
|
+
@consecutive_failures_count = 0
|
38
|
+
@testing = false
|
39
|
+
end
|
40
|
+
|
41
|
+
def ship(snapshot)
|
42
|
+
@buffer_mutex.synchronize do
|
43
|
+
@buffer << snapshot
|
44
|
+
@buffer.shift if @buffer.size > @max_buffer_size
|
45
|
+
end
|
46
|
+
@thread_mutex.synchronize { start_thread }
|
47
|
+
end
|
48
|
+
|
49
|
+
def flush_buffer
|
50
|
+
buffer_content = @buffer_mutex.synchronize do
|
51
|
+
@buffer.dup if @buffer.size > 0
|
52
|
+
end
|
53
|
+
if buffer_content
|
54
|
+
headers = {
|
55
|
+
'Content-Type' => 'application/json',
|
56
|
+
'Mini-Profiler-Transport-Auth' => @auth_key
|
57
|
+
}
|
58
|
+
json = { snapshots: buffer_content }.to_json
|
59
|
+
body = if @gzip_requests
|
60
|
+
require 'zlib'
|
61
|
+
io = StringIO.new
|
62
|
+
gzip_writer = Zlib::GzipWriter.new(io)
|
63
|
+
gzip_writer.write(json)
|
64
|
+
gzip_writer.close
|
65
|
+
headers['Content-Encoding'] = 'gzip'
|
66
|
+
io.string
|
67
|
+
else
|
68
|
+
json
|
69
|
+
end
|
70
|
+
request = Net::HTTP::Post.new(@uri, headers)
|
71
|
+
request.body = body
|
72
|
+
http = Net::HTTP.new(@uri.hostname, @uri.port)
|
73
|
+
http.use_ssl = @uri.scheme == 'https'
|
74
|
+
res = http.request(request)
|
75
|
+
if res.code.to_i == 200
|
76
|
+
@@successful_http_requests_count += 1
|
77
|
+
@@transported_snapshots_count += buffer_content.size
|
78
|
+
@buffer_mutex.synchronize do
|
79
|
+
@buffer -= buffer_content
|
80
|
+
end
|
81
|
+
@consecutive_failures_count = 0
|
82
|
+
else
|
83
|
+
@@failed_http_requests_count += 1
|
84
|
+
@consecutive_failures_count += 1
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
def requests_interval
|
90
|
+
[30 + backoff_delay, 60 * 60].min
|
91
|
+
end
|
92
|
+
|
93
|
+
private
|
94
|
+
|
95
|
+
def backoff_delay
|
96
|
+
return 0 if @consecutive_failures_count == 0
|
97
|
+
2**@consecutive_failures_count
|
98
|
+
end
|
99
|
+
|
100
|
+
def start_thread
|
101
|
+
return if @thread&.alive? || @testing
|
102
|
+
@thread = Thread.new do
|
103
|
+
while true
|
104
|
+
sleep requests_interval
|
105
|
+
flush_buffer
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
end
|
@@ -36,11 +36,89 @@ module Rack
|
|
36
36
|
""
|
37
37
|
end
|
38
38
|
|
39
|
-
# a list of tokens that are permitted to access profiler in
|
39
|
+
# a list of tokens that are permitted to access profiler in explicit mode
|
40
40
|
def allowed_tokens
|
41
41
|
raise NotImplementedError.new("allowed_tokens is not implemented")
|
42
42
|
end
|
43
43
|
|
44
|
+
def should_take_snapshot?(period)
|
45
|
+
raise NotImplementedError.new("should_take_snapshot? is not implemented")
|
46
|
+
end
|
47
|
+
|
48
|
+
def push_snapshot(page_struct, config)
|
49
|
+
raise NotImplementedError.new("push_snapshot is not implemented")
|
50
|
+
end
|
51
|
+
|
52
|
+
def fetch_snapshots(batch_size: 200, &blk)
|
53
|
+
raise NotImplementedError.new("fetch_snapshots is not implemented")
|
54
|
+
end
|
55
|
+
|
56
|
+
def snapshot_groups_overview
|
57
|
+
groups = {}
|
58
|
+
fetch_snapshots do |batch|
|
59
|
+
batch.each do |snapshot|
|
60
|
+
group_name = default_snapshot_grouping(snapshot)
|
61
|
+
hash = groups[group_name] ||= {}
|
62
|
+
hash[:snapshots_count] ||= 0
|
63
|
+
hash[:snapshots_count] += 1
|
64
|
+
if !hash[:worst_score] || hash[:worst_score] < snapshot.duration_ms
|
65
|
+
groups[group_name][:worst_score] = snapshot.duration_ms
|
66
|
+
end
|
67
|
+
if !hash[:best_score] || hash[:best_score] > snapshot.duration_ms
|
68
|
+
groups[group_name][:best_score] = snapshot.duration_ms
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
groups = groups.to_a
|
73
|
+
groups.sort_by! { |name, hash| hash[:worst_score] }
|
74
|
+
groups.reverse!
|
75
|
+
groups.map! { |name, hash| hash.merge(name: name) }
|
76
|
+
groups
|
77
|
+
end
|
78
|
+
|
79
|
+
def find_snapshots_group(group_name)
|
80
|
+
data = []
|
81
|
+
fetch_snapshots do |batch|
|
82
|
+
batch.each do |snapshot|
|
83
|
+
snapshot_group_name = default_snapshot_grouping(snapshot)
|
84
|
+
if group_name == snapshot_group_name
|
85
|
+
data << {
|
86
|
+
id: snapshot[:id],
|
87
|
+
duration: snapshot.duration_ms,
|
88
|
+
sql_count: snapshot[:sql_count],
|
89
|
+
timestamp: snapshot[:started_at],
|
90
|
+
custom_fields: snapshot[:custom_fields]
|
91
|
+
}
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
95
|
+
data.sort_by! { |s| s[:duration] }
|
96
|
+
data.reverse!
|
97
|
+
data
|
98
|
+
end
|
99
|
+
|
100
|
+
def load_snapshot(id)
|
101
|
+
raise NotImplementedError.new("load_snapshot is not implemented")
|
102
|
+
end
|
103
|
+
|
104
|
+
private
|
105
|
+
|
106
|
+
def default_snapshot_grouping(snapshot)
|
107
|
+
group_name = rails_route_from_path(snapshot[:request_path], snapshot[:request_method])
|
108
|
+
group_name ||= snapshot[:request_path]
|
109
|
+
"#{snapshot[:request_method]} #{group_name}"
|
110
|
+
end
|
111
|
+
|
112
|
+
def rails_route_from_path(path, method)
|
113
|
+
if defined?(Rails) && defined?(ActionController::RoutingError)
|
114
|
+
hash = Rails.application.routes.recognize_path(path, method: method)
|
115
|
+
if hash && hash[:controller] && hash[:action]
|
116
|
+
"#{hash[:controller]}##{hash[:action]}"
|
117
|
+
end
|
118
|
+
end
|
119
|
+
rescue ActionController::RoutingError
|
120
|
+
nil
|
121
|
+
end
|
44
122
|
end
|
45
123
|
end
|
46
124
|
end
|
@@ -17,9 +17,9 @@ module Rack
|
|
17
17
|
def [](key)
|
18
18
|
begin
|
19
19
|
data = ::File.open(path(key), "rb") { |f| f.read }
|
20
|
-
|
20
|
+
Marshal.load data
|
21
21
|
rescue
|
22
|
-
|
22
|
+
nil
|
23
23
|
end
|
24
24
|
end
|
25
25
|
|
@@ -31,7 +31,7 @@ module Rack
|
|
31
31
|
end
|
32
32
|
|
33
33
|
private
|
34
|
-
if
|
34
|
+
if Gem.win_platform?
|
35
35
|
def path(key)
|
36
36
|
@path.dup << "/" << @prefix << "_" << key.gsub(/:/, '_')
|
37
37
|
end
|
@@ -10,8 +10,10 @@ module Rack
|
|
10
10
|
def initialize(args = nil)
|
11
11
|
require 'dalli' unless defined? Dalli
|
12
12
|
args ||= {}
|
13
|
+
|
13
14
|
@prefix = args[:prefix] || "MPMemcacheStore"
|
14
15
|
@prefix += "-#{Rack::MiniProfiler::VERSION}"
|
16
|
+
|
15
17
|
@client = args[:client] || Dalli::Client.new
|
16
18
|
@expires_in_seconds = args[:expires_in] || EXPIRES_IN_SECONDS
|
17
19
|
end
|
@@ -52,17 +52,21 @@ module Rack
|
|
52
52
|
@expires_in_seconds = args.fetch(:expires_in) { EXPIRES_IN_SECONDS }
|
53
53
|
|
54
54
|
@token1, @token2, @cycle_at = nil
|
55
|
+
@snapshots_cycle = 0
|
56
|
+
@snapshots = []
|
55
57
|
|
56
58
|
initialize_locks
|
57
59
|
initialize_cleanup_thread(args)
|
58
60
|
end
|
59
61
|
|
60
62
|
def initialize_locks
|
61
|
-
@token_lock
|
62
|
-
@timer_struct_lock
|
63
|
-
@user_view_lock
|
64
|
-
@
|
65
|
-
@
|
63
|
+
@token_lock = Mutex.new
|
64
|
+
@timer_struct_lock = Mutex.new
|
65
|
+
@user_view_lock = Mutex.new
|
66
|
+
@snapshots_cycle_lock = Mutex.new
|
67
|
+
@snapshots_lock = Mutex.new
|
68
|
+
@timer_struct_cache = {}
|
69
|
+
@user_view_cache = {}
|
66
70
|
end
|
67
71
|
|
68
72
|
#FIXME: use weak ref, trouble it may be broken in 1.9 so need to use the 'ref' gem
|
@@ -135,6 +139,51 @@ module Rack
|
|
135
139
|
|
136
140
|
end
|
137
141
|
end
|
142
|
+
|
143
|
+
def should_take_snapshot?(period)
|
144
|
+
@snapshots_cycle_lock.synchronize do
|
145
|
+
@snapshots_cycle += 1
|
146
|
+
if @snapshots_cycle % period == 0
|
147
|
+
@snapshots_cycle = 0
|
148
|
+
true
|
149
|
+
else
|
150
|
+
false
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
155
|
+
def push_snapshot(page_struct, config)
|
156
|
+
@snapshots_lock.synchronize do
|
157
|
+
@snapshots << page_struct
|
158
|
+
@snapshots.sort_by! { |s| s.duration_ms }
|
159
|
+
@snapshots.reverse!
|
160
|
+
if @snapshots.size > config.snapshots_limit
|
161
|
+
@snapshots.slice!(-1)
|
162
|
+
end
|
163
|
+
end
|
164
|
+
end
|
165
|
+
|
166
|
+
def fetch_snapshots(batch_size: 200, &blk)
|
167
|
+
@snapshots_lock.synchronize do
|
168
|
+
@snapshots.each_slice(batch_size) do |batch|
|
169
|
+
blk.call(batch)
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
def load_snapshot(id)
|
175
|
+
@snapshots_lock.synchronize do
|
176
|
+
@snapshots.find { |s| s[:id] == id }
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
private
|
181
|
+
|
182
|
+
# used in tests only
|
183
|
+
def wipe_snapshots_data
|
184
|
+
@snapshots_cycle = 0
|
185
|
+
@snapshots = []
|
186
|
+
end
|
138
187
|
end
|
139
188
|
end
|
140
189
|
end
|
@@ -1,5 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require 'digest'
|
4
|
+
|
3
5
|
module Rack
|
4
6
|
class MiniProfiler
|
5
7
|
class RedisStore < AbstractStore
|
@@ -33,7 +35,7 @@ module Rack
|
|
33
35
|
|
34
36
|
def set_unviewed(user, id)
|
35
37
|
key = user_key(user)
|
36
|
-
if redis.exists
|
38
|
+
if redis.call([:exists, prefixed_id(id)]) == 1
|
37
39
|
expire_at = Process.clock_gettime(Process::CLOCK_MONOTONIC).to_i + redis.ttl(prefixed_id(id))
|
38
40
|
redis.zadd(key, expire_at, id)
|
39
41
|
end
|
@@ -44,7 +46,7 @@ module Rack
|
|
44
46
|
key = user_key(user)
|
45
47
|
redis.del(key)
|
46
48
|
ids.each do |id|
|
47
|
-
if redis.exists
|
49
|
+
if redis.call([:exists, prefixed_id(id)]) == 1
|
48
50
|
expire_at = Process.clock_gettime(Process::CLOCK_MONOTONIC).to_i + redis.ttl(prefixed_id(id))
|
49
51
|
redis.zadd(key, expire_at, id)
|
50
52
|
end
|
@@ -108,6 +110,106 @@ unviewed_ids: #{get_unviewed_ids(user)}
|
|
108
110
|
[key1, key2].compact
|
109
111
|
end
|
110
112
|
|
113
|
+
COUNTER_LUA = <<~LUA
|
114
|
+
if redis.call("INCR", KEYS[1]) % ARGV[1] == 0 then
|
115
|
+
redis.call("DEL", KEYS[1])
|
116
|
+
return 1
|
117
|
+
else
|
118
|
+
return 0
|
119
|
+
end
|
120
|
+
LUA
|
121
|
+
|
122
|
+
COUNTER_LUA_SHA = Digest::SHA1.hexdigest(COUNTER_LUA)
|
123
|
+
|
124
|
+
def should_take_snapshot?(period)
|
125
|
+
1 == cached_redis_eval(
|
126
|
+
COUNTER_LUA,
|
127
|
+
COUNTER_LUA_SHA,
|
128
|
+
reraise: false,
|
129
|
+
keys: [snapshot_counter_key()],
|
130
|
+
argv: [period]
|
131
|
+
)
|
132
|
+
end
|
133
|
+
|
134
|
+
def push_snapshot(page_struct, config)
|
135
|
+
zset_key = snapshot_zset_key()
|
136
|
+
hash_key = snapshot_hash_key()
|
137
|
+
|
138
|
+
id = page_struct[:id]
|
139
|
+
score = page_struct.duration_ms
|
140
|
+
limit = config.snapshots_limit
|
141
|
+
bytes = Marshal.dump(page_struct)
|
142
|
+
|
143
|
+
lua = <<~LUA
|
144
|
+
local zset_key = KEYS[1]
|
145
|
+
local hash_key = KEYS[2]
|
146
|
+
local id = ARGV[1]
|
147
|
+
local score = tonumber(ARGV[2])
|
148
|
+
local bytes = ARGV[3]
|
149
|
+
local limit = tonumber(ARGV[4])
|
150
|
+
redis.call("ZADD", zset_key, score, id)
|
151
|
+
redis.call("HSET", hash_key, id, bytes)
|
152
|
+
if redis.call("ZCARD", zset_key) > limit then
|
153
|
+
local lowest_snapshot_id = redis.call("ZRANGE", zset_key, 0, 0)[1]
|
154
|
+
redis.call("ZREM", zset_key, lowest_snapshot_id)
|
155
|
+
redis.call("HDEL", hash_key, lowest_snapshot_id)
|
156
|
+
end
|
157
|
+
LUA
|
158
|
+
redis.eval(
|
159
|
+
lua,
|
160
|
+
keys: [zset_key, hash_key],
|
161
|
+
argv: [id, score, bytes, limit]
|
162
|
+
)
|
163
|
+
end
|
164
|
+
|
165
|
+
def fetch_snapshots(batch_size: 200, &blk)
|
166
|
+
zset_key = snapshot_zset_key()
|
167
|
+
hash_key = snapshot_hash_key()
|
168
|
+
iteration = 0
|
169
|
+
corrupt_snapshots = []
|
170
|
+
while true
|
171
|
+
ids = redis.zrange(
|
172
|
+
zset_key,
|
173
|
+
batch_size * iteration,
|
174
|
+
batch_size * iteration + batch_size - 1
|
175
|
+
)
|
176
|
+
break if ids.size == 0
|
177
|
+
batch = redis.mapped_hmget(hash_key, *ids).to_a
|
178
|
+
batch.map! do |id, bytes|
|
179
|
+
begin
|
180
|
+
Marshal.load(bytes)
|
181
|
+
rescue
|
182
|
+
corrupt_snapshots << id
|
183
|
+
nil
|
184
|
+
end
|
185
|
+
end
|
186
|
+
batch.compact!
|
187
|
+
blk.call(batch) if batch.size != 0
|
188
|
+
break if ids.size < batch_size
|
189
|
+
iteration += 1
|
190
|
+
end
|
191
|
+
if corrupt_snapshots.size > 0
|
192
|
+
redis.pipelined do
|
193
|
+
redis.zrem(zset_key, corrupt_snapshots)
|
194
|
+
redis.hdel(hash_key, corrupt_snapshots)
|
195
|
+
end
|
196
|
+
end
|
197
|
+
end
|
198
|
+
|
199
|
+
def load_snapshot(id)
|
200
|
+
hash_key = snapshot_hash_key()
|
201
|
+
bytes = redis.hget(hash_key, id)
|
202
|
+
begin
|
203
|
+
Marshal.load(bytes)
|
204
|
+
rescue
|
205
|
+
redis.pipelined do
|
206
|
+
redis.zrem(snapshot_zset_key(), id)
|
207
|
+
redis.hdel(hash_key, id)
|
208
|
+
end
|
209
|
+
nil
|
210
|
+
end
|
211
|
+
end
|
212
|
+
|
111
213
|
private
|
112
214
|
|
113
215
|
def user_key(user)
|
@@ -125,6 +227,38 @@ unviewed_ids: #{get_unviewed_ids(user)}
|
|
125
227
|
end
|
126
228
|
end
|
127
229
|
|
230
|
+
def snapshot_counter_key
|
231
|
+
@snapshot_counter_key ||= "#{@prefix}-mini-profiler-snapshots-counter"
|
232
|
+
end
|
233
|
+
|
234
|
+
def snapshot_zset_key
|
235
|
+
@snapshot_zset_key ||= "#{@prefix}-mini-profiler-snapshots-zset"
|
236
|
+
end
|
237
|
+
|
238
|
+
def snapshot_hash_key
|
239
|
+
@snapshot_hash_key ||= "#{@prefix}-mini-profiler-snapshots-hash"
|
240
|
+
end
|
241
|
+
|
242
|
+
def cached_redis_eval(script, script_sha, reraise: true, argv: [], keys: [])
|
243
|
+
begin
|
244
|
+
redis.evalsha(script_sha, argv: argv, keys: keys)
|
245
|
+
rescue ::Redis::CommandError => e
|
246
|
+
if e.message.start_with?('NOSCRIPT')
|
247
|
+
redis.eval(script, argv: argv, keys: keys)
|
248
|
+
else
|
249
|
+
raise e if reraise
|
250
|
+
end
|
251
|
+
end
|
252
|
+
end
|
253
|
+
|
254
|
+
# only used in tests
|
255
|
+
def wipe_snapshots_data
|
256
|
+
redis.pipelined do
|
257
|
+
redis.del(snapshot_counter_key())
|
258
|
+
redis.del(snapshot_zset_key())
|
259
|
+
redis.del(snapshot_hash_key())
|
260
|
+
end
|
261
|
+
end
|
128
262
|
end
|
129
263
|
end
|
130
264
|
end
|