rack-mini-profiler 2.0.1 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,7 +7,14 @@ module Rack
7
7
  def record_sql(query, elapsed_ms, params = nil)
8
8
  return unless current && current.current_timer
9
9
  c = current
10
- c.current_timer.add_sql(query, elapsed_ms, c.page_struct, params, c.skip_backtrace, c.full_backtrace)
10
+ c.current_timer.add_sql(
11
+ redact_sql_queries? ? nil : query,
12
+ elapsed_ms,
13
+ c.page_struct,
14
+ redact_sql_queries? ? nil : params,
15
+ c.skip_backtrace,
16
+ c.full_backtrace
17
+ )
11
18
  end
12
19
 
13
20
  def start_step(name)
@@ -0,0 +1,84 @@
1
+ # frozen_string_literal: true
2
+
3
+ class ::Rack::MiniProfiler::SnapshotsTransporter
4
+ @@transported_snapshots_count = 0
5
+ @@successful_http_requests_count = 0
6
+ @@failed_http_requests_count = 0
7
+
8
+ class << self
9
+ def transported_snapshots_count
10
+ @@transported_snapshots_count
11
+ end
12
+ def successful_http_requests_count
13
+ @@successful_http_requests_count
14
+ end
15
+ def failed_http_requests_count
16
+ @@failed_http_requests_count
17
+ end
18
+
19
+ def transport(snapshot)
20
+ @transporter ||= self.new(Rack::MiniProfiler.config)
21
+ @transporter.ship(snapshot)
22
+ end
23
+ end
24
+
25
+ attr_reader :buffer
26
+ attr_accessor :max_buffer_size
27
+
28
+ def initialize(config)
29
+ @uri = URI(config.snapshots_transport_destination_url)
30
+ @auth_key = config.snapshots_transport_auth_key
31
+ @thread = nil
32
+ @thread_mutex = Mutex.new
33
+ @buffer = []
34
+ @buffer_mutex = Mutex.new
35
+ @max_buffer_size = 100
36
+ @testing = false
37
+ end
38
+
39
+ def ship(snapshot)
40
+ @buffer_mutex.synchronize do
41
+ @buffer << snapshot
42
+ @buffer.shift if @buffer.size > @max_buffer_size
43
+ end
44
+ @thread_mutex.synchronize { start_thread }
45
+ end
46
+
47
+ def flush_buffer
48
+ buffer_content = @buffer_mutex.synchronize do
49
+ @buffer.dup if @buffer.size > 0
50
+ end
51
+ if buffer_content
52
+ request = Net::HTTP::Post.new(
53
+ @uri,
54
+ 'Content-Type' => 'application/json',
55
+ 'Mini-Profiler-Transport-Auth' => @auth_key
56
+ )
57
+ request.body = { snapshots: buffer_content }.to_json
58
+ http = Net::HTTP.new(@uri.hostname, @uri.port)
59
+ http.use_ssl = @uri.scheme == 'https'
60
+ res = http.request(request)
61
+ if res.code.to_i == 200
62
+ @@successful_http_requests_count += 1
63
+ @@transported_snapshots_count += buffer_content.size
64
+ @buffer_mutex.synchronize do
65
+ @buffer -= buffer_content
66
+ end
67
+ else
68
+ @@failed_http_requests_count += 1
69
+ end
70
+ end
71
+ end
72
+
73
+ private
74
+
75
+ def start_thread
76
+ return if @thread&.alive? || @testing
77
+ @thread = Thread.new do
78
+ while true
79
+ sleep 10
80
+ flush_buffer
81
+ end
82
+ end
83
+ end
84
+ end
@@ -41,6 +41,84 @@ module Rack
41
41
  raise NotImplementedError.new("allowed_tokens is not implemented")
42
42
  end
43
43
 
44
+ def should_take_snapshot?(period)
45
+ raise NotImplementedError.new("should_take_snapshot? is not implemented")
46
+ end
47
+
48
+ def push_snapshot(page_struct, config)
49
+ raise NotImplementedError.new("push_snapshot is not implemented")
50
+ end
51
+
52
+ def fetch_snapshots(batch_size: 200, &blk)
53
+ raise NotImplementedError.new("fetch_snapshots is not implemented")
54
+ end
55
+
56
+ def snapshot_groups_overview
57
+ groups = {}
58
+ fetch_snapshots do |batch|
59
+ batch.each do |snapshot|
60
+ group_name = default_snapshot_grouping(snapshot)
61
+ hash = groups[group_name] ||= {}
62
+ hash[:snapshots_count] ||= 0
63
+ hash[:snapshots_count] += 1
64
+ if !hash[:worst_score] || hash[:worst_score] < snapshot.duration_ms
65
+ groups[group_name][:worst_score] = snapshot.duration_ms
66
+ end
67
+ if !hash[:best_score] || hash[:best_score] > snapshot.duration_ms
68
+ groups[group_name][:best_score] = snapshot.duration_ms
69
+ end
70
+ end
71
+ end
72
+ groups = groups.to_a
73
+ groups.sort_by! { |name, hash| hash[:worst_score] }
74
+ groups.reverse!
75
+ groups.map! { |name, hash| hash.merge(name: name) }
76
+ groups
77
+ end
78
+
79
+ def find_snapshots_group(group_name)
80
+ data = []
81
+ fetch_snapshots do |batch|
82
+ batch.each do |snapshot|
83
+ snapshot_group_name = default_snapshot_grouping(snapshot)
84
+ if group_name == snapshot_group_name
85
+ data << {
86
+ id: snapshot[:id],
87
+ duration: snapshot.duration_ms,
88
+ sql_count: snapshot[:sql_count],
89
+ timestamp: snapshot[:started_at],
90
+ custom_fields: snapshot[:custom_fields]
91
+ }
92
+ end
93
+ end
94
+ end
95
+ data.sort_by! { |s| s[:duration] }
96
+ data.reverse!
97
+ data
98
+ end
99
+
100
+ def load_snapshot(id)
101
+ raise NotImplementedError.new("load_snapshot is not implemented")
102
+ end
103
+
104
+ private
105
+
106
+ def default_snapshot_grouping(snapshot)
107
+ group_name = rails_route_from_path(snapshot[:request_path], snapshot[:request_method])
108
+ group_name ||= snapshot[:request_path]
109
+ "#{snapshot[:request_method]} #{group_name}"
110
+ end
111
+
112
+ def rails_route_from_path(path, method)
113
+ if defined?(Rails) && defined?(ActionController::RoutingError)
114
+ hash = Rails.application.routes.recognize_path(path, method: method)
115
+ if hash && hash[:controller] && hash[:action]
116
+ "#{hash[:controller]}##{hash[:action]}"
117
+ end
118
+ end
119
+ rescue ActionController::RoutingError
120
+ nil
121
+ end
44
122
  end
45
123
  end
46
124
  end
@@ -52,17 +52,21 @@ module Rack
52
52
  @expires_in_seconds = args.fetch(:expires_in) { EXPIRES_IN_SECONDS }
53
53
 
54
54
  @token1, @token2, @cycle_at = nil
55
+ @snapshots_cycle = 0
56
+ @snapshots = []
55
57
 
56
58
  initialize_locks
57
59
  initialize_cleanup_thread(args)
58
60
  end
59
61
 
60
62
  def initialize_locks
61
- @token_lock = Mutex.new
62
- @timer_struct_lock = Mutex.new
63
- @user_view_lock = Mutex.new
64
- @timer_struct_cache = {}
65
- @user_view_cache = {}
63
+ @token_lock = Mutex.new
64
+ @timer_struct_lock = Mutex.new
65
+ @user_view_lock = Mutex.new
66
+ @snapshots_cycle_lock = Mutex.new
67
+ @snapshots_lock = Mutex.new
68
+ @timer_struct_cache = {}
69
+ @user_view_cache = {}
66
70
  end
67
71
 
68
72
  #FIXME: use weak ref, trouble it may be broken in 1.9 so need to use the 'ref' gem
@@ -135,6 +139,51 @@ module Rack
135
139
 
136
140
  end
137
141
  end
142
+
143
+ def should_take_snapshot?(period)
144
+ @snapshots_cycle_lock.synchronize do
145
+ @snapshots_cycle += 1
146
+ if @snapshots_cycle % period == 0
147
+ @snapshots_cycle = 0
148
+ true
149
+ else
150
+ false
151
+ end
152
+ end
153
+ end
154
+
155
+ def push_snapshot(page_struct, config)
156
+ @snapshots_lock.synchronize do
157
+ @snapshots << page_struct
158
+ @snapshots.sort_by! { |s| s.duration_ms }
159
+ @snapshots.reverse!
160
+ if @snapshots.size > config.snapshots_limit
161
+ @snapshots.slice!(-1)
162
+ end
163
+ end
164
+ end
165
+
166
+ def fetch_snapshots(batch_size: 200, &blk)
167
+ @snapshots_lock.synchronize do
168
+ @snapshots.each_slice(batch_size) do |batch|
169
+ blk.call(batch)
170
+ end
171
+ end
172
+ end
173
+
174
+ def load_snapshot(id)
175
+ @snapshots_lock.synchronize do
176
+ @snapshots.find { |s| s[:id] == id }
177
+ end
178
+ end
179
+
180
+ private
181
+
182
+ # used in tests only
183
+ def wipe_snapshots_data
184
+ @snapshots_cycle = 0
185
+ @snapshots = []
186
+ end
138
187
  end
139
188
  end
140
189
  end
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require 'digest'
4
+
3
5
  module Rack
4
6
  class MiniProfiler
5
7
  class RedisStore < AbstractStore
@@ -33,7 +35,7 @@ module Rack
33
35
 
34
36
  def set_unviewed(user, id)
35
37
  key = user_key(user)
36
- if redis.exists(prefixed_id(id))
38
+ if redis.call([:exists, prefixed_id(id)]) == 1
37
39
  expire_at = Process.clock_gettime(Process::CLOCK_MONOTONIC).to_i + redis.ttl(prefixed_id(id))
38
40
  redis.zadd(key, expire_at, id)
39
41
  end
@@ -44,7 +46,7 @@ module Rack
44
46
  key = user_key(user)
45
47
  redis.del(key)
46
48
  ids.each do |id|
47
- if redis.exists(prefixed_id(id))
49
+ if redis.call([:exists, prefixed_id(id)]) == 1
48
50
  expire_at = Process.clock_gettime(Process::CLOCK_MONOTONIC).to_i + redis.ttl(prefixed_id(id))
49
51
  redis.zadd(key, expire_at, id)
50
52
  end
@@ -108,6 +110,106 @@ unviewed_ids: #{get_unviewed_ids(user)}
108
110
  [key1, key2].compact
109
111
  end
110
112
 
113
+ COUNTER_LUA = <<~LUA
114
+ if redis.call("INCR", KEYS[1]) % ARGV[1] == 0 then
115
+ redis.call("DEL", KEYS[1])
116
+ return 1
117
+ else
118
+ return 0
119
+ end
120
+ LUA
121
+
122
+ COUNTER_LUA_SHA = Digest::SHA1.hexdigest(COUNTER_LUA)
123
+
124
+ def should_take_snapshot?(period)
125
+ 1 == cached_redis_eval(
126
+ COUNTER_LUA,
127
+ COUNTER_LUA_SHA,
128
+ reraise: false,
129
+ keys: [snapshot_counter_key()],
130
+ argv: [period]
131
+ )
132
+ end
133
+
134
+ def push_snapshot(page_struct, config)
135
+ zset_key = snapshot_zset_key()
136
+ hash_key = snapshot_hash_key()
137
+
138
+ id = page_struct[:id]
139
+ score = page_struct.duration_ms
140
+ limit = config.snapshots_limit
141
+ bytes = Marshal.dump(page_struct)
142
+
143
+ lua = <<~LUA
144
+ local zset_key = KEYS[1]
145
+ local hash_key = KEYS[2]
146
+ local id = ARGV[1]
147
+ local score = tonumber(ARGV[2])
148
+ local bytes = ARGV[3]
149
+ local limit = tonumber(ARGV[4])
150
+ redis.call("ZADD", zset_key, score, id)
151
+ redis.call("HSET", hash_key, id, bytes)
152
+ if redis.call("ZCARD", zset_key) > limit then
153
+ local lowest_snapshot_id = redis.call("ZRANGE", zset_key, 0, 0)[1]
154
+ redis.call("ZREM", zset_key, lowest_snapshot_id)
155
+ redis.call("HDEL", hash_key, lowest_snapshot_id)
156
+ end
157
+ LUA
158
+ redis.eval(
159
+ lua,
160
+ keys: [zset_key, hash_key],
161
+ argv: [id, score, bytes, limit]
162
+ )
163
+ end
164
+
165
+ def fetch_snapshots(batch_size: 200, &blk)
166
+ zset_key = snapshot_zset_key()
167
+ hash_key = snapshot_hash_key()
168
+ iteration = 0
169
+ corrupt_snapshots = []
170
+ while true
171
+ ids = redis.zrange(
172
+ zset_key,
173
+ batch_size * iteration,
174
+ batch_size * iteration + batch_size - 1
175
+ )
176
+ break if ids.size == 0
177
+ batch = redis.mapped_hmget(hash_key, *ids).to_a
178
+ batch.map! do |id, bytes|
179
+ begin
180
+ Marshal.load(bytes)
181
+ rescue
182
+ corrupt_snapshots << id
183
+ nil
184
+ end
185
+ end
186
+ batch.compact!
187
+ blk.call(batch) if batch.size != 0
188
+ break if ids.size < batch_size
189
+ iteration += 1
190
+ end
191
+ if corrupt_snapshots.size > 0
192
+ redis.pipelined do
193
+ redis.zrem(zset_key, corrupt_snapshots)
194
+ redis.hdel(hash_key, corrupt_snapshots)
195
+ end
196
+ end
197
+ end
198
+
199
+ def load_snapshot(id)
200
+ hash_key = snapshot_hash_key()
201
+ bytes = redis.hget(hash_key, id)
202
+ begin
203
+ Marshal.load(bytes)
204
+ rescue
205
+ redis.pipelined do
206
+ redis.zrem(snapshot_zset_key(), id)
207
+ redis.hdel(hash_key, id)
208
+ end
209
+ nil
210
+ end
211
+ end
212
+
111
213
  private
112
214
 
113
215
  def user_key(user)
@@ -125,6 +227,38 @@ unviewed_ids: #{get_unviewed_ids(user)}
125
227
  end
126
228
  end
127
229
 
230
+ def snapshot_counter_key
231
+ @snapshot_counter_key ||= "#{@prefix}-mini-profiler-snapshots-counter"
232
+ end
233
+
234
+ def snapshot_zset_key
235
+ @snapshot_zset_key ||= "#{@prefix}-mini-profiler-snapshots-zset"
236
+ end
237
+
238
+ def snapshot_hash_key
239
+ @snapshot_hash_key ||= "#{@prefix}-mini-profiler-snapshots-hash"
240
+ end
241
+
242
+ def cached_redis_eval(script, script_sha, reraise: true, argv: [], keys: [])
243
+ begin
244
+ redis.evalsha(script_sha, argv: argv, keys: keys)
245
+ rescue ::Redis::CommandError => e
246
+ if e.message.start_with?('NOSCRIPT')
247
+ redis.eval(script, argv: argv, keys: keys)
248
+ else
249
+ raise e if reraise
250
+ end
251
+ end
252
+ end
253
+
254
+ # only used in tests
255
+ def wipe_snapshots_data
256
+ redis.pipelined do
257
+ redis.del(snapshot_counter_key())
258
+ redis.del(snapshot_zset_key())
259
+ redis.del(snapshot_hash_key())
260
+ end
261
+ end
128
262
  end
129
263
  end
130
264
  end
@@ -10,6 +10,53 @@ module Rack
10
10
  # :has_many TimerStruct::Sql children
11
11
  # :has_many TimerStruct::Custom children
12
12
  class Page < TimerStruct::Base
13
+ class << self
14
+ def from_hash(hash)
15
+ hash = symbolize_hash(hash)
16
+ if hash.key?(:custom_timing_names)
17
+ hash[:custom_timing_names] = []
18
+ end
19
+ hash.delete(:started_formatted)
20
+ if hash.key?(:duration_milliseconds)
21
+ hash[:duration_milliseconds] = 0
22
+ end
23
+ page = self.allocate
24
+ page.instance_variable_set(:@attributes, hash)
25
+ page
26
+ end
27
+
28
+ private
29
+
30
+ def symbolize_hash(hash)
31
+ new_hash = {}
32
+ hash.each do |k, v|
33
+ sym_k = String === k ? k.to_sym : k
34
+ if Hash === v
35
+ new_hash[sym_k] = symbolize_hash(v)
36
+ elsif Array === v
37
+ new_hash[sym_k] = symbolize_array(v)
38
+ else
39
+ new_hash[sym_k] = v
40
+ end
41
+ end
42
+ new_hash
43
+ end
44
+
45
+ def symbolize_array(array)
46
+ array.map do |item|
47
+ if Array === item
48
+ symbolize_array(item)
49
+ elsif Hash === item
50
+ symbolize_hash(item)
51
+ else
52
+ item
53
+ end
54
+ end
55
+ end
56
+ end
57
+
58
+ attr_reader :attributes
59
+
13
60
  def initialize(env)
14
61
  timer_id = MiniProfiler.generate_id
15
62
  page_name = env['PATH_INFO']
@@ -39,8 +86,11 @@ module Rack
39
86
  executed_scalars: 0,
40
87
  executed_non_queries: 0,
41
88
  custom_timing_names: [],
42
- custom_timing_stats: {}
89
+ custom_timing_stats: {},
90
+ custom_fields: {}
43
91
  )
92
+ self[:request_method] = env['REQUEST_METHOD']
93
+ self[:request_path] = env['PATH_INFO']
44
94
  name = "#{env['REQUEST_METHOD']} http://#{env['SERVER_NAME']}:#{env['SERVER_PORT']}#{env['SCRIPT_NAME']}#{env['PATH_INFO']}"
45
95
  self[:root] = TimerStruct::Request.createRoot(name, self)
46
96
  end
@@ -71,7 +121,7 @@ module Rack
71
121
 
72
122
  def extra_json
73
123
  {
74
- started: '/Date(%d)/' % @attributes[:started_at],
124
+ started_formatted: '/Date(%d)/' % @attributes[:started_at],
75
125
  duration_milliseconds: @attributes[:root][:duration_milliseconds],
76
126
  custom_timing_names: @attributes[:custom_timing_stats].keys.sort
77
127
  }