scout_apm 1.4.6 → 1.5.0.pre

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.markdown +9 -0
  3. data/lib/scout_apm/agent/reporting.rb +8 -6
  4. data/lib/scout_apm/agent.rb +10 -6
  5. data/lib/scout_apm/background_job_integrations/sidekiq.rb +23 -11
  6. data/lib/scout_apm/call_set.rb +61 -0
  7. data/lib/scout_apm/config.rb +2 -1
  8. data/lib/scout_apm/environment.rb +12 -7
  9. data/lib/scout_apm/histogram.rb +124 -0
  10. data/lib/scout_apm/instruments/.DS_Store +0 -0
  11. data/lib/scout_apm/instruments/action_controller_rails_2.rb +1 -0
  12. data/lib/scout_apm/instruments/action_controller_rails_3_rails4.rb +1 -0
  13. data/lib/scout_apm/instruments/delayed_job.rb +1 -0
  14. data/lib/scout_apm/instruments/process/process_memory.rb +1 -1
  15. data/lib/scout_apm/instruments/sinatra.rb +1 -1
  16. data/lib/scout_apm/job_record.rb +76 -0
  17. data/lib/scout_apm/layaway.rb +4 -1
  18. data/lib/scout_apm/layaway_file.rb +4 -4
  19. data/lib/scout_apm/layer.rb +14 -4
  20. data/lib/scout_apm/layer_converters/converter_base.rb +30 -0
  21. data/lib/scout_apm/layer_converters/depth_first_walker.rb +36 -0
  22. data/lib/scout_apm/layer_converters/error_converter.rb +20 -0
  23. data/lib/scout_apm/layer_converters/job_converter.rb +84 -0
  24. data/lib/scout_apm/layer_converters/metric_converter.rb +45 -0
  25. data/lib/scout_apm/layer_converters/request_queue_time_converter.rb +60 -0
  26. data/lib/scout_apm/layer_converters/slow_job_converter.rb +88 -0
  27. data/lib/scout_apm/layer_converters/slow_request_converter.rb +111 -0
  28. data/lib/scout_apm/metric_meta.rb +9 -0
  29. data/lib/scout_apm/metric_set.rb +44 -0
  30. data/lib/scout_apm/reporter.rb +12 -5
  31. data/lib/scout_apm/serializers/jobs_serializer_to_json.rb +28 -0
  32. data/lib/scout_apm/serializers/metrics_to_json_serializer.rb +54 -0
  33. data/lib/scout_apm/serializers/payload_serializer.rb +5 -3
  34. data/lib/scout_apm/serializers/payload_serializer_to_json.rb +9 -4
  35. data/lib/scout_apm/serializers/slow_jobs_serializer_to_json.rb +29 -0
  36. data/lib/scout_apm/slow_item_set.rb +80 -0
  37. data/lib/scout_apm/slow_job_policy.rb +29 -0
  38. data/lib/scout_apm/slow_job_record.rb +33 -0
  39. data/lib/scout_apm/slow_transaction.rb +0 -22
  40. data/lib/scout_apm/stackprof_tree_collapser.rb +7 -8
  41. data/lib/scout_apm/store.rb +55 -35
  42. data/lib/scout_apm/tracked_request.rb +67 -10
  43. data/lib/scout_apm/utils/active_record_metric_name.rb +13 -0
  44. data/lib/scout_apm/utils/backtrace_parser.rb +31 -0
  45. data/lib/scout_apm/utils/fake_stack_prof.rb +1 -1
  46. data/lib/scout_apm/utils/sql_sanitizer.rb +6 -0
  47. data/lib/scout_apm/version.rb +1 -1
  48. data/lib/scout_apm.rb +25 -5
  49. data/test/unit/histogram_test.rb +93 -0
  50. data/test/unit/serializers/payload_serializer_test.rb +5 -5
  51. data/test/unit/{slow_transaction_set_test.rb → slow_item_set_test.rb} +8 -8
  52. data/test/unit/slow_job_policy_test.rb +55 -0
  53. metadata +30 -9
  54. data/lib/scout_apm/layer_converter.rb +0 -222
  55. data/lib/scout_apm/request_queue_time.rb +0 -57
  56. data/lib/scout_apm/slow_transaction_set.rb +0 -67
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: d09339b4d51454f56825e72d7ae8aa53a8aca5a4
4
- data.tar.gz: d50de440976b8f060e5365838941022807c31f0d
3
+ metadata.gz: 9392c2916f2f51cd4d8f6deb672069f15f3d8e68
4
+ data.tar.gz: dd764e893e93e818a22c487a0c986701153160eb
5
5
  SHA512:
6
- metadata.gz: 3884aee4911d383138d928c2afd4aa4c799232c53bcb6e6d91e067963d6992cc0bb3d97851da4badd0e32cda277a12ab1f4f871830c8011c8eaa25e327272202
7
- data.tar.gz: 7b9711b230f8804a334339760955f705b879f3dfbaf5abdc485dd25e5ab720096ec87da11e162cca023e13956b472d1d0eaf661d333bc26c2e85475e6dd39964
6
+ metadata.gz: 8421818c942298451ee690625cb35670088740f2862ecea8d2fceb3b4cc1cb7f15439497e3b2fede3bb52cab4756e0e48f64d30c8fdb71bbafe7b33b577ed8a6
7
+ data.tar.gz: 3486c303a0509ea19042ffba56c9045329a8e91ea25181a7b0fc4817b79468853ef7d1b5608813923e488f373cf494546d0c2b842cf7b318b6693118173e0f94
data/CHANGELOG.markdown CHANGED
@@ -1,7 +1,16 @@
1
+ # 1.5.0
2
+
3
+ * Background Job instrumentation for Sidekiq and Sidekiq-backed ActiveJob
4
+ * Collecting backtraces on n+1 calls
5
+
1
6
  # 1.4.6
2
7
 
3
8
  * Defend against a nil
4
9
 
10
+ # 1.5.0
11
+
12
+ * Background Job instrumentation for Sidekiq and Sidekiq-backed ActiveJob
13
+
5
14
  # 1.4.5
6
15
 
7
16
  * Instrument Elasticsearch
@@ -38,6 +38,9 @@ module ScoutApm
38
38
  def deliver_period(reporting_period)
39
39
  metrics = reporting_period.metrics_payload
40
40
  slow_transactions = reporting_period.slow_transactions_payload
41
+ jobs = reporting_period.jobs
42
+ slow_jobs = reporting_period.slow_jobs_payload
43
+
41
44
  metadata = {
42
45
  :app_root => ScoutApm::Environment.instance.root.to_s,
43
46
  :unique_id => ScoutApm::Utils::UniqueId.simple,
@@ -49,13 +52,12 @@ module ScoutApm
49
52
 
50
53
  log_deliver(metrics, slow_transactions, metadata)
51
54
 
52
- payload = ScoutApm::Serializers::PayloadSerializer.serialize(metadata, metrics, slow_transactions)
53
- response = reporter.report(payload, headers)
54
- unless response && response.is_a?(Net::HTTPSuccess)
55
- logger.warn "Error on checkin to #{reporter.uri.to_s}: #{response.inspect}"
56
- end
55
+ payload = ScoutApm::Serializers::PayloadSerializer.serialize(metadata, metrics, slow_transactions, jobs, slow_jobs)
56
+ logger.debug("Payload: #{payload}")
57
+
58
+ reporter.report(payload, headers)
57
59
  rescue => e
58
- logger.warn "Error on checkin to #{reporter.uri.to_s}"
60
+ logger.warn "Error on checkin"
59
61
  logger.info e.message
60
62
  logger.debug e.backtrace
61
63
  end
@@ -18,6 +18,7 @@ module ScoutApm
18
18
  attr_accessor :options # options passed to the agent when +#start+ is called.
19
19
  attr_accessor :metric_lookup # Hash used to lookup metric ids based on their name and scope
20
20
  attr_reader :slow_request_policy
21
+ attr_reader :slow_job_policy
21
22
 
22
23
  # All access to the agent is thru this class method to ensure multiple Agent instances are not initialized per-Ruby process.
23
24
  def self.instance(options = {})
@@ -31,6 +32,7 @@ module ScoutApm
31
32
  @started = false
32
33
  @options ||= options
33
34
  @config = ScoutApm::Config.new(options[:config_path])
35
+ @slow_job_policy = ScoutApm::SlowJobPolicy.new
34
36
 
35
37
  @store = ScoutApm::Store.new
36
38
  @layaway = ScoutApm::Layaway.new
@@ -78,7 +80,7 @@ module ScoutApm
78
80
 
79
81
  if defined?(::ScoutRails)
80
82
  logger.warn "ScoutAPM is incompatible with the old Scout Rails plugin. Please remove scout_rails from your Gemfile"
81
- return false
83
+ return false unless force?
82
84
  end
83
85
 
84
86
  true
@@ -108,15 +110,17 @@ module ScoutApm
108
110
 
109
111
  app_server_load_hook
110
112
 
113
+ if environment.background_job_integration
114
+ environment.background_job_integration.install
115
+ logger.info "Installed Background Job Integration [#{environment.background_job_name}]"
116
+ end
117
+
111
118
  # start_background_worker? is true on non-forking servers, and directly
112
119
  # starts the background worker. On forking servers, a server-specific
113
120
  # hook is inserted to start the background worker after forking.
114
121
  if start_background_worker?
115
122
  start_background_worker
116
123
  logger.info "Scout Agent [#{ScoutApm::VERSION}] Initialized"
117
- elsif environment.background_job_integration
118
- environment.background_job_integration.install
119
- logger.info "Scout Agent [#{ScoutApm::VERSION}] loaded in [#{environment.background_job_name}] master process. Monitoring will start after background job framework forks its workers."
120
124
  else
121
125
  environment.app_server_integration.install
122
126
  logger.info "Scout Agent [#{ScoutApm::VERSION}] loaded in [#{environment.app_server}] master process. Monitoring will start after server forks its workers."
@@ -244,7 +248,7 @@ module ScoutApm
244
248
  install_instrument(ScoutApm::Instruments::ActionControllerRails3Rails4)
245
249
  install_instrument(ScoutApm::Instruments::MiddlewareSummary)
246
250
  install_instrument(ScoutApm::Instruments::RailsRouter)
247
- when :sinatra then install_instrument(ScoutApm::Instruments::Sinatra)
251
+ # when :sinatra then install_instrument(ScoutApm::Instruments::Sinatra)
248
252
  end
249
253
  end
250
254
 
@@ -272,7 +276,7 @@ module ScoutApm
272
276
 
273
277
  # Allow users to skip individual instruments via the config file
274
278
  instrument_short_name = instrument_klass.name.split("::").last
275
- if (config.value("disabled_instruments") || []).include?(instrument_short_name)
279
+ if config.value("disabled_instruments").include?(instrument_short_name)
276
280
  logger.info "Skipping Disabled Instrument: #{instrument_short_name} - To re-enable, change `disabled_instruments` key in scout_apm.yml"
277
281
  return
278
282
  end
@@ -12,7 +12,7 @@ module ScoutApm
12
12
  end
13
13
 
14
14
  def forking?
15
- true
15
+ false
16
16
  end
17
17
 
18
18
  def install
@@ -20,30 +20,41 @@ module ScoutApm
20
20
  SidekiqMiddleware.class_eval do
21
21
  include ScoutApm::Tracer
22
22
  end
23
+
23
24
  ::Sidekiq.configure_server do |config|
24
25
  config.server_middleware do |chain|
25
26
  chain.add SidekiqMiddleware
26
27
  end
27
28
  end
29
+
28
30
  require 'sidekiq/processor' # sidekiq v4 has not loaded this file by this point
31
+
29
32
  ::Sidekiq::Processor.class_eval do
30
- old = instance_method(:initialize)
31
- define_method(:initialize) do |boss|
32
- ScoutApm::Agent.instance.start_background_worker
33
- old.bind(self).call(boss)
33
+ def initialize_with_scout(boss)
34
+ ::ScoutApm::Agent.instance.start_background_worker unless ::ScoutApm::Agent.instance.background_worker_running?
35
+ initialize_without_scout(boss)
34
36
  end
37
+
38
+ alias_method :initialize_without_scout, :initialize
39
+ alias_method :initialize, :initialize_with_scout
35
40
  end
36
41
  end
37
42
  end
38
43
 
39
44
  class SidekiqMiddleware
40
45
  def call(worker, msg, queue)
41
- msg_args = msg["args"].first
42
- job_class = msg_args["job_class"]
43
- latency = (Time.now.to_f - (msg['enqueued_at'] || msg['created_at'])) * 1000
46
+ job_class = msg["class"] # TODO: Validate this across different versions of Sidekiq
47
+ if job_class == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper" && msg.has_key?("wrapped")
48
+ job_class = msg["wrapped"]
49
+ end
50
+
51
+ latency = (Time.now.to_f - (msg['enqueued_at'] || msg['created_at']))
44
52
 
45
- ScoutApm::Agent.instance.store.track_one!("Queue", queue, 0, {:extra_metrics => {:latency => latency}})
46
53
  req = ScoutApm::RequestManager.lookup
54
+ req.job!
55
+ req.annotate_request(:queue_latency => latency)
56
+
57
+ req.start_layer( ScoutApm::Layer.new("Queue", queue) )
47
58
  req.start_layer( ScoutApm::Layer.new("Job", job_class) )
48
59
 
49
60
  begin
@@ -51,9 +62,10 @@ module ScoutApm
51
62
  rescue
52
63
  req.error!
53
64
  raise
54
- ensure
55
- req.stop_layer
56
65
  end
66
+ ensure
67
+ req.stop_layer # Job
68
+ req.stop_layer # Queue
57
69
  end
58
70
  end
59
71
  end
@@ -0,0 +1,61 @@
1
+ module ScoutApm
2
+ class CallSet
3
+
4
+ N_PLUS_ONE_MAGIC_NUMBER = 5 # Fetch backtraces on this number of calls to a layer. The caller data is only collected on this call to limit overhead.
5
+ N_PLUS_ONE_TIME_THRESHOLD = 150/1000.0 # Minimum time in seconds before we start performing any work. This is to prevent doing a lot of work on already fast calls.
6
+
7
+ attr_reader :call_count
8
+
9
+ def initialize
10
+ @items = [] # An array of Layer descriptions that are associated w/a single Layer name (ex: User/find). Note this may contain nil items.
11
+ @grouped_items = Hash.new { |h, k| h[k] = [] } # items groups by their normalized name since multiple layers could have the same layer name.
12
+ @call_count = 0
13
+ @captured = false # cached for performance
14
+ @start_time = Time.now
15
+ @past_start_time = false # cached for performance
16
+ end
17
+
18
+ def update!(item = nil)
19
+ if @captured # No need to do any work if we've already captured a backtrace.
20
+ return
21
+ end
22
+ @call_count += 1
23
+ @items << item
24
+ if @grouped_items.any? # lazy grouping as normalizing items can be expensive.
25
+ @grouped_items[unique_name_for(item)] << item
26
+ end
27
+ end
28
+
29
+ # Limit our workload if time across this set of calls is small.
30
+ def past_time_threshold?
31
+ return true if @past_time_threshold # no need to check again once past
32
+ @past_time_threshold = (Time.now-@start_time) >= N_PLUS_ONE_TIME_THRESHOLD
33
+ end
34
+
35
+ # We're selective on capturing a backtrace for two reasons:
36
+ # * Grouping ActiveRecord calls requires us to sanitize the SQL. This isn't cheap.
37
+ # * Capturing backtraces isn't cheap.
38
+ def capture_backtrace?
39
+ if !@captured && @call_count >= N_PLUS_ONE_MAGIC_NUMBER && past_time_threshold? && at_magic_number?
40
+ @captured = true
41
+ end
42
+ end
43
+
44
+ def at_magic_number?
45
+ grouped_items[unique_name_for(@items.last)].size >= N_PLUS_ONE_MAGIC_NUMBER
46
+ end
47
+
48
+ def grouped_items
49
+ if @grouped_items.any?
50
+ @grouped_items
51
+ else
52
+ @grouped_items.merge!(@items.group_by { |item| unique_name_for(item) })
53
+ end
54
+ end
55
+
56
+ # Determine this items' "hash key"
57
+ def unique_name_for(item)
58
+ item.to_s
59
+ end
60
+ end
61
+ end
@@ -27,7 +27,8 @@ module ScoutApm
27
27
  'stackprof_interval' => 20000, # microseconds, 1000 = 1 millisecond, so 20k == 20 milliseconds
28
28
  'uri_reporting' => 'full_path',
29
29
  'report_format' => 'json',
30
- 'disabled_instruments' => []
30
+ 'disabled_instruments' => [],
31
+ 'enable_background_jobs' => true,
31
32
  }.freeze
32
33
 
33
34
  def initialize(config_path = nil)
@@ -25,7 +25,7 @@ module ScoutApm
25
25
 
26
26
  BACKGROUND_JOB_INTEGRATIONS = [
27
27
  ScoutApm::BackgroundJobIntegrations::Sidekiq.new,
28
- ScoutApm::BackgroundJobIntegrations::DelayedJob.new
28
+ # ScoutApm::BackgroundJobIntegrations::DelayedJob.new
29
29
  ]
30
30
 
31
31
  FRAMEWORK_INTEGRATIONS = [
@@ -130,9 +130,11 @@ module ScoutApm
130
130
  end
131
131
 
132
132
  def background_job_integration
133
- @background_job_integration ||= BACKGROUND_JOB_INTEGRATIONS.detect {|integration| integration.present?}
134
- #### Temporary Disable
135
- nil
133
+ if Agent.instance.config.value("enable_background_jobs", !Agent.instance.config.config_file_exists?)
134
+ @background_job_integration ||= BACKGROUND_JOB_INTEGRATIONS.detect {|integration| integration.present?}
135
+ else
136
+ nil
137
+ end
136
138
  end
137
139
 
138
140
  def background_job_name
@@ -158,11 +160,15 @@ module ScoutApm
158
160
  end
159
161
 
160
162
  def ruby_19?
161
- defined?(RUBY_ENGINE) && RUBY_ENGINE == "ruby" && RUBY_VERSION.match(/^1\.9/)
163
+ @ruby_19 ||= defined?(RUBY_ENGINE) && RUBY_ENGINE == "ruby" && RUBY_VERSION.match(/^1\.9/)
162
164
  end
163
165
 
164
166
  def ruby_187?
165
- defined?(RUBY_VERSION) && RUBY_VERSION.match(/^1\.8\.7/)
167
+ @ruby_187 ||= defined?(RUBY_VERSION) && RUBY_VERSION.match(/^1\.8\.7/)
168
+ end
169
+
170
+ def ruby_2?
171
+ @ruby_2 ||= defined?(RUBY_VERSION) && RUBY_VERSION.match(/^2/)
166
172
  end
167
173
 
168
174
  ### framework checks
@@ -170,6 +176,5 @@ module ScoutApm
170
176
  def sinatra?
171
177
  framework_integration.name == :sinatra
172
178
  end
173
-
174
179
  end # class Environemnt
175
180
  end
@@ -0,0 +1,124 @@
1
+ module ScoutApm
2
+ HistogramBin = Struct.new(:value, :count)
3
+
4
+ class NumericHistogram
5
+ attr_reader :max_bins
6
+ attr_reader :bins
7
+ attr_accessor :total
8
+
9
+ def initialize(max_bins)
10
+ @max_bins = max_bins
11
+ @bins = []
12
+ @total = 0
13
+ end
14
+
15
+ def add(new_value)
16
+ @total += 1
17
+ create_new_bin(new_value.to_f)
18
+ trim
19
+ end
20
+
21
+ def quantile(q)
22
+ return 0 if total == 0
23
+
24
+ if q > 1
25
+ q = q / 100.0
26
+ end
27
+
28
+ count = q.to_f * total.to_f
29
+
30
+ bins.each_with_index do |bin, index|
31
+ count -= bin.count
32
+
33
+ if count <= 0
34
+ return bin.value
35
+ end
36
+ end
37
+
38
+ # If we fell through, we were asking for the last (max) value
39
+ return bins[-1].value
40
+ end
41
+
42
+ def mean
43
+ if total == 0
44
+ return 0
45
+ end
46
+
47
+ sum = bins.inject(0) { |s, bin| s + (bin.value * bin.count) }
48
+ return sum.to_f / total.to_f
49
+ end
50
+
51
+ def combine!(other)
52
+ @bins = (other.bins + @bins).sort_by {|b| b.value }
53
+ @total += other.total
54
+ trim
55
+ self
56
+ end
57
+
58
+ def as_json
59
+ bins.map{|b| [b.value, b.count]}
60
+ end
61
+
62
+ private
63
+
64
+ # If we exactly match an existing bin, add to it, otherwise create a new bin holding a count for the new value.
65
+ def create_new_bin(new_value)
66
+ bins.each_with_index do |bin, index|
67
+ # If it matches exactly, increment the bin's count
68
+ if bin.value == new_value
69
+ bin.count += 1
70
+ return
71
+ end
72
+
73
+ # We've gone one bin too far, so insert before the current bin.
74
+ if bin.value > new_value
75
+ # Insert at this index
76
+ new_bin = HistogramBin.new(new_value, 1)
77
+ bins.insert(index, new_bin)
78
+ return
79
+ end
80
+ end
81
+
82
+ # If we get to here, the bin needs to be added to the end.
83
+ bins << HistogramBin.new(new_value, 1)
84
+ end
85
+
86
+ def trim
87
+ while bins.length > max_bins
88
+ trim_one
89
+ end
90
+ end
91
+
92
+ def trim_one
93
+ minDelta = Float::MAX
94
+ minDeltaIndex = 0
95
+
96
+ # Which two bins should we merge?
97
+ bins.each_with_index do |_, index|
98
+ next if index == 0
99
+
100
+ delta = bins[index].value - bins[index - 1].value
101
+ if delta < minDelta
102
+ minDelta = delta
103
+ minDeltaIndex = index
104
+ end
105
+ end
106
+
107
+ # Create the merged bin with summed count, and weighted value
108
+ mergedCount = bins[minDeltaIndex - 1].count + bins[minDeltaIndex].count
109
+ mergedValue = (
110
+ bins[minDeltaIndex - 1].value * bins[minDeltaIndex - 1].count +
111
+ bins[minDeltaIndex].value * bins[minDeltaIndex].count
112
+ ) / mergedCount
113
+
114
+ mergedBin = HistogramBin.new(mergedValue, mergedCount)
115
+
116
+ # Remove the two bins we just merged together, then add the merged one
117
+ bins.slice!(minDeltaIndex - 1, 2)
118
+ bins.insert(minDeltaIndex - 1, mergedBin)
119
+ rescue => e
120
+ ScoutApm::Agent.instance.logger.info("Error in NumericHistogram#trim_one. #{e.message}, #{e.backtrace}, #{self.inspect}")
121
+ raise
122
+ end
123
+ end
124
+ end
Binary file
@@ -63,6 +63,7 @@ module ScoutApm
63
63
  req.context.add_user(:ip => request.remote_ip)
64
64
  req.set_headers(request.headers)
65
65
  req.start_layer( ScoutApm::Layer.new("Controller", "#{controller_path}/#{action_name}") )
66
+ req.web!
66
67
 
67
68
  begin
68
69
  perform_action_without_scout_instruments(*args, &block)
@@ -63,6 +63,7 @@ module ScoutApm
63
63
  req.annotate_request(:uri => path)
64
64
  req.context.add_user(:ip => request.remote_ip)
65
65
  req.set_headers(request.headers)
66
+ req.web!
66
67
 
67
68
  req.start_layer( ScoutApm::Layer.new("Controller", "#{controller_path}/#{action_name}") )
68
69
  begin
@@ -33,6 +33,7 @@ module ScoutApm
33
33
 
34
34
  ScoutApm::Agent.instance.store.track_one!("Queue", queue, 0, {:extra_metrics => {:latency => latency}})
35
35
  req = ScoutApm::RequestManager.lookup
36
+ req.job!
36
37
  req.start_layer( ScoutApm::Layer.new("Job", scout_method_name) )
37
38
 
38
39
  begin
@@ -26,7 +26,7 @@ module ScoutApm
26
26
  get_mem_from_procfile
27
27
  when /darwin9/ # 10.5
28
28
  get_mem_from_shell("ps -o rsz")
29
- when /darwin1[0123]/ # 10.6 - 10.10
29
+ when /darwin1[01234]/ # 10.6 - 10.11
30
30
  get_mem_from_shell("ps -o rss")
31
31
  else
32
32
  0 # What default? was nil.
@@ -34,7 +34,7 @@ module ScoutApm
34
34
  req = ScoutApm::RequestManager.lookup
35
35
  req.annotate_request(:uri => @request.path_info)
36
36
  req.context.add_user(:ip => @request.ip)
37
- req.set_headers(request.headers)
37
+ # req.set_headers(env) # TODO: Parse headers with name HTTP_*
38
38
 
39
39
  req.start_layer( ScoutApm::Layer.new("Controller", scout_controller_action) )
40
40
  begin
@@ -0,0 +1,76 @@
1
+ # Records details about all runs of a given job.
2
+ #
3
+ # Contains:
4
+ # Queue Name
5
+ # Job Name
6
+ # Job Runtime - histogram
7
+ # Metrics collected during the run (Database, HTTP, View, etc)
8
+ module ScoutApm
9
+ class JobRecord
10
+ attr_reader :queue_name
11
+ attr_reader :job_name
12
+ attr_reader :total_time
13
+ attr_reader :exclusive_time
14
+ attr_reader :errors
15
+ attr_reader :metric_set
16
+
17
+ def initialize(queue_name, job_name, total_time, exclusive_time, errors, metrics)
18
+ @queue_name = queue_name
19
+ @job_name = job_name
20
+
21
+ @total_time = NumericHistogram.new(50)
22
+ @total_time.add(total_time)
23
+
24
+ @exclusive_time = NumericHistogram.new(50)
25
+ @exclusive_time.add(exclusive_time)
26
+
27
+ @errors = errors.to_i
28
+
29
+ @metric_set = MetricSet.new
30
+ @metric_set.absorb_all(metrics)
31
+ end
32
+
33
+ # Modifies self and returns self, after merging in `other`.
34
+ def combine!(other)
35
+ same_job = queue_name == other.queue_name && job_name == other.job_name
36
+ raise "Mismatched Merge of Background Job" unless same_job
37
+
38
+ @errors += other.errors
39
+ @metric_set = metric_set.combine!(other.metric_set)
40
+ @total_time.combine!(other.total_time)
41
+ @exclusive_time.combine!(other.exclusive_time)
42
+
43
+ self
44
+ end
45
+
46
+ def run_count
47
+ total_time.total
48
+ end
49
+
50
+ def metrics
51
+ metric_set.metrics
52
+ end
53
+
54
+
55
+ ######################
56
+ # Hash Key interface
57
+ ######################
58
+
59
+ def ==(o)
60
+ self.eql?(o)
61
+ end
62
+
63
+ def hash
64
+ h = queue_name.downcase.hash
65
+ h ^= job_name.downcase.hash
66
+ h
67
+ end
68
+
69
+ def eql?(o)
70
+ self.class == o.class &&
71
+ queue_name.downcase == o.queue_name.downcase &&
72
+ job_name.downcase == o.job_name.downcase
73
+ end
74
+ end
75
+ end
76
+
@@ -18,7 +18,10 @@ module ScoutApm
18
18
  new_req = new_val.request_count
19
19
  ScoutApm::Agent.instance.logger.debug("Merging Two reporting periods (#{old_val.timestamp.to_s}, #{new_val.timestamp.to_s}): old req #{old_req}, new req #{new_req}")
20
20
 
21
- old_val.merge_metrics!(new_val.metrics_payload).merge_slow_transactions!(new_val.slow_transactions)
21
+ old_val.
22
+ merge_metrics!(new_val.metrics_payload).
23
+ merge_slow_transactions!(new_val.slow_transactions).
24
+ merge_jobs!(new_val.jobs)
22
25
  }
23
26
 
24
27
  ScoutApm::Agent.instance.logger.debug("AddReportingPeriod: AfterMerge Timestamps: #{existing_data.keys.map(&:to_s).inspect}")
@@ -16,9 +16,9 @@ module ScoutApm
16
16
  return nil
17
17
  end
18
18
  Marshal.load(dump)
19
- rescue ArgumentError, TypeError => e
20
- ScoutApm::Agent.instance.logger.debug("Error loading data from layaway file: #{e.inspect}")
21
- ScoutApm::Agent.instance.logger.debug(e.backtrace.inspect)
19
+ rescue NameError, ArgumentError, TypeError => e
20
+ ScoutApm::Agent.instance.logger.info("Unable to load data from Layaway file, resetting.")
21
+ ScoutApm::Agent.instance.logger.debug("#{e.message}, #{e.backtrace.join("\n\t")}")
22
22
  nil
23
23
  end
24
24
 
@@ -37,7 +37,7 @@ module ScoutApm
37
37
  end
38
38
  end
39
39
  rescue Errno::ENOENT, Exception => e
40
- ScoutApm::Agent.instance.logger.error("Unable to access the layaway file [#{e.message}]. " +
40
+ ScoutApm::Agent.instance.logger.error("Unable to access the layaway file [#{e.class} - #{e.message}]. " +
41
41
  "The user running the app must have read & write access. " +
42
42
  "Change the path by setting the `data_file` key in scout_apm.yml"
43
43
  )
@@ -30,6 +30,7 @@ module ScoutApm
30
30
  # backtrace of where it occurred.
31
31
  attr_reader :backtrace
32
32
 
33
+ BACKTRACE_CALLER_LIMIT = 30 # maximum number of lines to send thru for backtrace analysis
33
34
 
34
35
  def initialize(type, name, start_time = Time.now)
35
36
  @type = type
@@ -66,10 +67,19 @@ module ScoutApm
66
67
  "#{type}/#{name}"
67
68
  end
68
69
 
69
- def store_backtrace(bt)
70
- return unless bt.is_a? Array
71
- return unless bt.length > 0
72
- @backtrace = bt
70
+ def capture_backtrace!
71
+ ScoutApm::Agent.instance.logger.debug { "Capturing Backtrace for Layer [#{type}/#{name}]" }
72
+ @backtrace = caller_array
73
+ end
74
+
75
+ # In Ruby 2.0+, we can pass the range directly to the caller to reduce the memory footprint.
76
+ def caller_array
77
+ # omits the first several callers which are in the ScoutAPM stack.
78
+ if ScoutApm::Environment.instance.ruby_2?
79
+ caller(3...BACKTRACE_CALLER_LIMIT)
80
+ else
81
+ caller[3...BACKTRACE_CALLER_LIMIT]
82
+ end
73
83
  end
74
84
 
75
85
  ######################################
@@ -0,0 +1,30 @@
1
+ module ScoutApm
2
+ module LayerConverters
3
+ class ConverterBase
4
+ attr_reader :walker
5
+ attr_reader :request
6
+ attr_reader :root_layer
7
+
8
+ def initialize(request)
9
+ @request = request
10
+ @root_layer = request.root_layer
11
+ @walker = DepthFirstWalker.new(root_layer)
12
+ end
13
+
14
+ # Scope is determined by the first Controller we hit. Most of the time
15
+ # there will only be 1 anyway. But if you have a controller that calls
16
+ # another controller method, we may pick that up:
17
+ # def update
18
+ # show
19
+ # render :update
20
+ # end
21
+ def scope_layer
22
+ @scope_layer ||= walker.walk do |layer|
23
+ if layer.type == "Controller"
24
+ break layer
25
+ end
26
+ end
27
+ end
28
+ end
29
+ end
30
+ end