scout_apm 2.2.0.pre3 → 2.3.0.pre
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +1 -1
- data/CHANGELOG.markdown +147 -2
- data/Guardfile +43 -0
- data/Rakefile +2 -2
- data/ext/allocations/allocations.c +6 -0
- data/ext/allocations/extconf.rb +1 -0
- data/ext/rusage/README.md +26 -0
- data/ext/rusage/extconf.rb +5 -0
- data/ext/rusage/rusage.c +52 -0
- data/lib/scout_apm.rb +28 -15
- data/lib/scout_apm/agent.rb +89 -37
- data/lib/scout_apm/agent/logging.rb +6 -1
- data/lib/scout_apm/agent/reporting.rb +9 -6
- data/lib/scout_apm/app_server_load.rb +21 -10
- data/lib/scout_apm/attribute_arranger.rb +6 -3
- data/lib/scout_apm/background_job_integrations/delayed_job.rb +71 -1
- data/lib/scout_apm/background_job_integrations/resque.rb +85 -0
- data/lib/scout_apm/background_job_integrations/sidekiq.rb +22 -20
- data/lib/scout_apm/background_recorder.rb +43 -0
- data/lib/scout_apm/background_worker.rb +19 -15
- data/lib/scout_apm/config.rb +138 -28
- data/lib/scout_apm/db_query_metric_set.rb +80 -0
- data/lib/scout_apm/db_query_metric_stats.rb +102 -0
- data/lib/scout_apm/debug.rb +37 -0
- data/lib/scout_apm/environment.rb +22 -15
- data/lib/scout_apm/git_revision.rb +51 -0
- data/lib/scout_apm/histogram.rb +11 -2
- data/lib/scout_apm/instant/assets/xmlhttp_instrumentation.html +2 -2
- data/lib/scout_apm/instant/middleware.rb +196 -54
- data/lib/scout_apm/instruments/action_controller_rails_3_rails4.rb +89 -68
- data/lib/scout_apm/instruments/action_view.rb +49 -0
- data/lib/scout_apm/instruments/active_record.rb +127 -3
- data/lib/scout_apm/instruments/grape.rb +4 -3
- data/lib/scout_apm/instruments/middleware_detailed.rb +4 -6
- data/lib/scout_apm/instruments/mongoid.rb +24 -3
- data/lib/scout_apm/instruments/net_http.rb +7 -2
- data/lib/scout_apm/instruments/percentile_sampler.rb +36 -19
- data/lib/scout_apm/instruments/process/process_cpu.rb +3 -2
- data/lib/scout_apm/instruments/process/process_memory.rb +3 -3
- data/lib/scout_apm/instruments/resque.rb +40 -0
- data/lib/scout_apm/layaway.rb +67 -28
- data/lib/scout_apm/layer.rb +19 -59
- data/lib/scout_apm/layer_children_set.rb +77 -0
- data/lib/scout_apm/layer_converters/allocation_metric_converter.rb +5 -6
- data/lib/scout_apm/layer_converters/converter_base.rb +201 -14
- data/lib/scout_apm/layer_converters/database_converter.rb +55 -0
- data/lib/scout_apm/layer_converters/depth_first_walker.rb +22 -10
- data/lib/scout_apm/layer_converters/error_converter.rb +5 -7
- data/lib/scout_apm/layer_converters/find_layer_by_type.rb +34 -0
- data/lib/scout_apm/layer_converters/histograms.rb +14 -0
- data/lib/scout_apm/layer_converters/job_converter.rb +36 -50
- data/lib/scout_apm/layer_converters/metric_converter.rb +17 -19
- data/lib/scout_apm/layer_converters/request_queue_time_converter.rb +10 -12
- data/lib/scout_apm/layer_converters/slow_job_converter.rb +41 -115
- data/lib/scout_apm/layer_converters/slow_request_converter.rb +33 -117
- data/lib/scout_apm/limited_layer.rb +126 -0
- data/lib/scout_apm/metric_meta.rb +0 -5
- data/lib/scout_apm/metric_set.rb +9 -1
- data/lib/scout_apm/metric_stats.rb +7 -8
- data/lib/scout_apm/rack.rb +26 -0
- data/lib/scout_apm/remote/message.rb +23 -0
- data/lib/scout_apm/remote/recorder.rb +57 -0
- data/lib/scout_apm/remote/router.rb +49 -0
- data/lib/scout_apm/remote/server.rb +58 -0
- data/lib/scout_apm/reporter.rb +51 -15
- data/lib/scout_apm/request_histograms.rb +4 -0
- data/lib/scout_apm/request_manager.rb +2 -1
- data/lib/scout_apm/scored_item_set.rb +7 -0
- data/lib/scout_apm/serializers/db_query_serializer_to_json.rb +15 -0
- data/lib/scout_apm/serializers/histograms_serializer_to_json.rb +21 -0
- data/lib/scout_apm/serializers/payload_serializer.rb +10 -3
- data/lib/scout_apm/serializers/payload_serializer_to_json.rb +6 -6
- data/lib/scout_apm/serializers/slow_jobs_serializer_to_json.rb +2 -1
- data/lib/scout_apm/server_integrations/puma.rb +5 -2
- data/lib/scout_apm/slow_job_policy.rb +1 -10
- data/lib/scout_apm/slow_job_record.rb +6 -1
- data/lib/scout_apm/slow_request_policy.rb +1 -10
- data/lib/scout_apm/slow_transaction.rb +20 -2
- data/lib/scout_apm/store.rb +66 -12
- data/lib/scout_apm/synchronous_recorder.rb +26 -0
- data/lib/scout_apm/tracked_request.rb +136 -71
- data/lib/scout_apm/utils/active_record_metric_name.rb +8 -4
- data/lib/scout_apm/utils/backtrace_parser.rb +3 -3
- data/lib/scout_apm/utils/gzip_helper.rb +24 -0
- data/lib/scout_apm/utils/numbers.rb +14 -0
- data/lib/scout_apm/utils/scm.rb +14 -0
- data/lib/scout_apm/version.rb +1 -1
- data/scout_apm.gemspec +5 -4
- data/test/test_helper.rb +18 -0
- data/test/unit/config_test.rb +59 -8
- data/test/unit/db_query_metric_set_test.rb +56 -0
- data/test/unit/db_query_metric_stats_test.rb +113 -0
- data/test/unit/git_revision_test.rb +15 -0
- data/test/unit/histogram_test.rb +14 -0
- data/test/unit/instruments/net_http_test.rb +21 -0
- data/test/unit/instruments/percentile_sampler_test.rb +137 -0
- data/test/unit/layaway_test.rb +20 -0
- data/test/unit/layer_children_set_test.rb +88 -0
- data/test/unit/layer_converters/depth_first_walker_test.rb +66 -0
- data/test/unit/layer_converters/metric_converter_test.rb +22 -0
- data/test/unit/layer_converters/stubs.rb +33 -0
- data/test/unit/limited_layer_test.rb +53 -0
- data/test/unit/remote/test_message.rb +13 -0
- data/test/unit/remote/test_router.rb +33 -0
- data/test/unit/remote/test_server.rb +15 -0
- data/test/unit/serializers/payload_serializer_test.rb +3 -12
- data/test/unit/store_test.rb +66 -0
- data/test/unit/test_tracked_request.rb +87 -0
- data/test/unit/utils/active_record_metric_name_test.rb +8 -0
- data/test/unit/utils/backtrace_parser_test.rb +5 -0
- data/test/unit/utils/numbers_test.rb +15 -0
- data/test/unit/utils/scm.rb +17 -0
- metadata +125 -30
- data/ext/stacks/extconf.rb +0 -37
- data/ext/stacks/scout_atomics.h +0 -86
- data/ext/stacks/stacks.c +0 -811
- data/lib/scout_apm/capacity.rb +0 -57
- data/lib/scout_apm/deploy_integrations/capistrano_2.cap +0 -12
- data/lib/scout_apm/deploy_integrations/capistrano_2.rb +0 -83
- data/lib/scout_apm/deploy_integrations/capistrano_3.cap +0 -12
- data/lib/scout_apm/deploy_integrations/capistrano_3.rb +0 -88
- data/lib/scout_apm/instruments/delayed_job.rb +0 -57
- data/lib/scout_apm/serializers/deploy_serializer.rb +0 -16
- data/lib/scout_apm/trace_compactor.rb +0 -312
- data/lib/scout_apm/utils/fake_stacks.rb +0 -87
- data/tester.rb +0 -53
data/ext/stacks/extconf.rb
DELETED
@@ -1,37 +0,0 @@
|
|
1
|
-
begin
|
2
|
-
require 'mkmf'
|
3
|
-
can_compile = true
|
4
|
-
rescue Exception
|
5
|
-
# This will appear only in verbose mode.
|
6
|
-
$stderr.puts "Could not require 'mkmf'. Not fatal, the Stacks extension is optional."
|
7
|
-
end
|
8
|
-
|
9
|
-
can_compile &&= have_func('rb_postponed_job_register_one')
|
10
|
-
can_compile &&= have_func('rb_profile_frames')
|
11
|
-
can_compile &&= have_func('rb_profile_frame_absolute_path')
|
12
|
-
can_compile &&= have_func('rb_profile_frame_label')
|
13
|
-
can_compile &&= have_func('rb_profile_frame_classpath')
|
14
|
-
|
15
|
-
# Explicitly link against librt
|
16
|
-
if have_macro('__linux__')
|
17
|
-
can_compile &&= have_library('rt') # for timer_create, timer_settime
|
18
|
-
end
|
19
|
-
|
20
|
-
# Pick the atomics implementation
|
21
|
-
has_atomics_header = have_header("stdatomic.h")
|
22
|
-
if has_atomics_header
|
23
|
-
$defs.push "-DSCOUT_USE_NEW_ATOMICS"
|
24
|
-
else
|
25
|
-
$defs.push "-DSCOUT_USE_OLD_ATOMICS"
|
26
|
-
end
|
27
|
-
|
28
|
-
if can_compile
|
29
|
-
create_makefile('stacks')
|
30
|
-
else
|
31
|
-
# Create a dummy Makefile, to satisfy Gem::Installer#install
|
32
|
-
mfile = open("Makefile", "wb")
|
33
|
-
mfile.puts '.PHONY: install'
|
34
|
-
mfile.puts 'install:'
|
35
|
-
mfile.puts "\t" + '@echo "Stack extension not installed, skipping."'
|
36
|
-
mfile.close
|
37
|
-
end
|
data/ext/stacks/scout_atomics.h
DELETED
@@ -1,86 +0,0 @@
|
|
1
|
-
/////////////////////////////////////////////////////////////////////////////////
|
2
|
-
// ATOMIC DEFS
|
3
|
-
//
|
4
|
-
// GCC added C11 atomics in 4.9, which is after ubuntu 14.04's version. Provide
|
5
|
-
// typedefs around what we really use to allow compatibility
|
6
|
-
//
|
7
|
-
/////////////////////////////////////////////////////////////////////////////////
|
8
|
-
|
9
|
-
|
10
|
-
/////////////////////////////////////////////////////////////////////////////////
|
11
|
-
// Build system MUST set either SCOUT_USE_OLD_ATOMICS or SCOUT_USE_NEW_ATOMICS,
|
12
|
-
// but not both
|
13
|
-
/////////////////////////////////////////////////////////////////////////////////
|
14
|
-
|
15
|
-
|
16
|
-
#ifdef SCOUT_USE_OLD_ATOMICS
|
17
|
-
|
18
|
-
typedef bool atomic_bool_t;
|
19
|
-
typedef uint16_t atomic_uint16_t;
|
20
|
-
typedef uint32_t atomic_uint32_t;
|
21
|
-
|
22
|
-
// Function which abuses compare&swap to set the value to what you want.
|
23
|
-
void scout_macro_fn_atomic_store_bool(bool* p_ai, bool val)
|
24
|
-
{
|
25
|
-
bool ai_was;
|
26
|
-
ai_was = *p_ai;
|
27
|
-
|
28
|
-
do {
|
29
|
-
ai_was = __sync_val_compare_and_swap (p_ai, ai_was, val);
|
30
|
-
} while (ai_was != *p_ai);
|
31
|
-
}
|
32
|
-
|
33
|
-
// Function which abuses compare&swap to set the value to what you want.
|
34
|
-
void scout_macro_fn_atomic_store_int16(atomic_uint16_t* p_ai, atomic_uint16_t val)
|
35
|
-
{
|
36
|
-
atomic_uint16_t ai_was;
|
37
|
-
ai_was = *p_ai;
|
38
|
-
|
39
|
-
do {
|
40
|
-
ai_was = __sync_val_compare_and_swap (p_ai, ai_was, val);
|
41
|
-
} while (ai_was != *p_ai);
|
42
|
-
}
|
43
|
-
|
44
|
-
// Function which abuses compare&swap to set the value to what you want.
|
45
|
-
void scout_macro_fn_atomic_store_int32(atomic_uint32_t* p_ai, atomic_uint32_t val)
|
46
|
-
{
|
47
|
-
atomic_uint32_t ai_was;
|
48
|
-
ai_was = *p_ai;
|
49
|
-
|
50
|
-
do {
|
51
|
-
ai_was = __sync_val_compare_and_swap (p_ai, ai_was, val);
|
52
|
-
} while (ai_was != *p_ai);
|
53
|
-
}
|
54
|
-
|
55
|
-
|
56
|
-
#define ATOMIC_STORE_BOOL(var, value) scout_macro_fn_atomic_store_bool(var, value)
|
57
|
-
#define ATOMIC_STORE_INT16(var, value) scout_macro_fn_atomic_store_int16(var, value)
|
58
|
-
#define ATOMIC_STORE_INT32(var, value) scout_macro_fn_atomic_store_int32(var, value)
|
59
|
-
#define ATOMIC_LOAD(var) __sync_fetch_and_add((var),0)
|
60
|
-
#define ATOMIC_ADD(var, value) __sync_fetch_and_add((var), value)
|
61
|
-
#define ATOMIC_INIT(value) value
|
62
|
-
|
63
|
-
#endif
|
64
|
-
|
65
|
-
|
66
|
-
/////////////////////////////////////////////////////////////////////////////////
|
67
|
-
|
68
|
-
|
69
|
-
#ifdef SCOUT_USE_NEW_ATOMICS
|
70
|
-
|
71
|
-
// We have c11 atomics
|
72
|
-
#include <stdatomic.h>
|
73
|
-
#define ATOMIC_STORE_BOOL(var, value) atomic_store(var, value)
|
74
|
-
#define ATOMIC_STORE_INT16(var, value) atomic_store(var, value)
|
75
|
-
#define ATOMIC_STORE_INT32(var, value) atomic_store(var, value)
|
76
|
-
#define ATOMIC_LOAD(var) atomic_load(var)
|
77
|
-
#define ATOMIC_ADD(var, value) atomic_fetch_add(var, value)
|
78
|
-
#define ATOMIC_INIT(value) ATOMIC_VAR_INIT(value)
|
79
|
-
|
80
|
-
typedef atomic_bool atomic_bool_t;
|
81
|
-
typedef atomic_uint_fast16_t atomic_uint16_t;
|
82
|
-
typedef atomic_uint_fast32_t atomic_uint32_t;
|
83
|
-
|
84
|
-
#endif
|
85
|
-
|
86
|
-
|
data/ext/stacks/stacks.c
DELETED
@@ -1,811 +0,0 @@
|
|
1
|
-
/*
|
2
|
-
* General idioms:
|
3
|
-
* - rb_* functions are attached to Ruby-accessible method calls (See Init_stacks)
|
4
|
-
* General approach:
|
5
|
-
* - Because of how rb_profile_frames works, it must be called from within
|
6
|
-
* each thread running, rather than from a 3rd party thread.
|
7
|
-
* - We setup a global timer tick. The handler simply sends a thread signal
|
8
|
-
* to each registered thread, which causes each thread to capture its own
|
9
|
-
* trace
|
10
|
-
*/
|
11
|
-
|
12
|
-
/*
|
13
|
-
* Ruby lib
|
14
|
-
*/
|
15
|
-
#include <ruby/ruby.h>
|
16
|
-
#include <ruby/debug.h>
|
17
|
-
#include <ruby/st.h>
|
18
|
-
#include <ruby/io.h>
|
19
|
-
#include <ruby/intern.h>
|
20
|
-
|
21
|
-
/*
|
22
|
-
* Std lib
|
23
|
-
*/
|
24
|
-
#include <errno.h>
|
25
|
-
#include <inttypes.h>
|
26
|
-
#include <pthread.h>
|
27
|
-
#include <signal.h>
|
28
|
-
#include <stdbool.h>
|
29
|
-
|
30
|
-
/*
|
31
|
-
* System
|
32
|
-
*/
|
33
|
-
#ifdef __linux__
|
34
|
-
#include <sys/syscall.h>
|
35
|
-
#endif
|
36
|
-
#include <sys/time.h>
|
37
|
-
|
38
|
-
#include "scout_atomics.h"
|
39
|
-
|
40
|
-
|
41
|
-
int scout_profiling_installed = 0;
|
42
|
-
int scout_profiling_running = 0;
|
43
|
-
|
44
|
-
ID sym_ScoutApm;
|
45
|
-
ID sym_Stacks;
|
46
|
-
ID sym_collect;
|
47
|
-
ID sym_scrub_bang;
|
48
|
-
VALUE ScoutApm;
|
49
|
-
VALUE Stacks;
|
50
|
-
|
51
|
-
VALUE mScoutApm;
|
52
|
-
VALUE mInstruments;
|
53
|
-
VALUE cStacks;
|
54
|
-
|
55
|
-
VALUE interval;
|
56
|
-
|
57
|
-
#define BUF_SIZE 512
|
58
|
-
#define MAX_TRACES 2000
|
59
|
-
|
60
|
-
#ifdef __linux__
|
61
|
-
#define NANO_SECOND_MULTIPLIER 1000000 // 1 millisecond = 1,000,000 Nanoseconds
|
62
|
-
const long INTERVAL = 1 * NANO_SECOND_MULTIPLIER; // milliseconds * NANO_SECOND_MULTIPLIER
|
63
|
-
// For support of thread id in timer_create
|
64
|
-
#define sigev_notify_thread_id _sigev_un._tid
|
65
|
-
|
66
|
-
#else // __linux__
|
67
|
-
|
68
|
-
const long INTERVAL = 1000; // 1ms
|
69
|
-
|
70
|
-
#endif
|
71
|
-
|
72
|
-
#ifdef T_IMEMO
|
73
|
-
#define VALID_RUBY_FRAME T_IMEMO
|
74
|
-
#else
|
75
|
-
#define VALID_RUBY_FRAME T_DATA
|
76
|
-
#endif
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
#ifdef RUBY_INTERNAL_EVENT_NEWOBJ
|
81
|
-
|
82
|
-
// Forward Declarations
|
83
|
-
static void init_thread_vars();
|
84
|
-
static void scout_profile_broadcast_signal_handler(int sig);
|
85
|
-
void scout_record_sample();
|
86
|
-
static void scout_start_thread_timer();
|
87
|
-
static void scout_stop_thread_timer();
|
88
|
-
|
89
|
-
////////////////////////////////////////////////////////////////////////////////////////
|
90
|
-
// Per-Thread variables
|
91
|
-
////////////////////////////////////////////////////////////////////////////////////////
|
92
|
-
|
93
|
-
struct c_trace {
|
94
|
-
int num_tracelines;
|
95
|
-
int lines_buf[BUF_SIZE];
|
96
|
-
VALUE frames_buf[BUF_SIZE];
|
97
|
-
};
|
98
|
-
|
99
|
-
static __thread struct c_trace *_traces;
|
100
|
-
|
101
|
-
static __thread atomic_bool_t _thread_registered = ATOMIC_INIT(false);
|
102
|
-
static __thread atomic_bool_t _ok_to_sample = ATOMIC_INIT(false);
|
103
|
-
static __thread atomic_bool_t _in_signal_handler = ATOMIC_INIT(false);
|
104
|
-
|
105
|
-
static __thread atomic_uint16_t _start_frame_index = ATOMIC_INIT(0);
|
106
|
-
static __thread atomic_uint16_t _start_trace_index = ATOMIC_INIT(0);
|
107
|
-
static __thread atomic_uint16_t _cur_traces_num = ATOMIC_INIT(0);
|
108
|
-
|
109
|
-
static __thread atomic_uint32_t _skipped_in_gc = ATOMIC_INIT(0);
|
110
|
-
static __thread atomic_uint32_t _skipped_in_signal_handler = ATOMIC_INIT(0);
|
111
|
-
static __thread atomic_uint32_t _skipped_in_job_registered = ATOMIC_INIT(0);
|
112
|
-
static __thread atomic_uint32_t _skipped_in_not_running = ATOMIC_INIT(0);
|
113
|
-
|
114
|
-
static __thread VALUE _gc_hook;
|
115
|
-
static __thread VALUE _ruby_thread;
|
116
|
-
|
117
|
-
static __thread atomic_bool_t _job_registered = ATOMIC_INIT(false);
|
118
|
-
|
119
|
-
#ifdef __linux__
|
120
|
-
static __thread timer_t _timerid;
|
121
|
-
static __thread struct sigevent _sev;
|
122
|
-
#endif
|
123
|
-
|
124
|
-
////////////////////////////////////////////////////////////////////////////////////////
|
125
|
-
// Global variables
|
126
|
-
////////////////////////////////////////////////////////////////////////////////////////
|
127
|
-
|
128
|
-
static int
|
129
|
-
scout_add_profiled_thread(pthread_t th)
|
130
|
-
{
|
131
|
-
if (ATOMIC_LOAD(&_thread_registered) == true) return 1;
|
132
|
-
|
133
|
-
init_thread_vars();
|
134
|
-
ATOMIC_STORE_BOOL(&_thread_registered, true);
|
135
|
-
|
136
|
-
return 1;
|
137
|
-
}
|
138
|
-
|
139
|
-
/*
|
140
|
-
* rb_scout_add_profiled_thread: adds the currently running thread to the head of the linked list
|
141
|
-
*
|
142
|
-
* Initializes thread locals:
|
143
|
-
* - ok_to_sample to false
|
144
|
-
* - start_frame_index and start_trace_index to 0
|
145
|
-
* - cur_traces_num to 0
|
146
|
-
*/
|
147
|
-
static VALUE
|
148
|
-
rb_scout_add_profiled_thread(VALUE self)
|
149
|
-
{
|
150
|
-
scout_add_profiled_thread(pthread_self());
|
151
|
-
return Qtrue;
|
152
|
-
}
|
153
|
-
|
154
|
-
/*
|
155
|
-
* remove_profiled_thread: removes a thread from the linked list.
|
156
|
-
* if the linked list is empty, this is a noop
|
157
|
-
*/
|
158
|
-
static int
|
159
|
-
remove_profiled_thread(pthread_t th)
|
160
|
-
{
|
161
|
-
if (ATOMIC_LOAD(&_thread_registered) == false) return 1;
|
162
|
-
|
163
|
-
ATOMIC_STORE_BOOL(&_ok_to_sample, false);
|
164
|
-
|
165
|
-
// Unregister the _gc_hook from Ruby ObjectSpace, then free it as well as the _traces struct it wrapped.
|
166
|
-
rb_gc_unregister_address(&_gc_hook);
|
167
|
-
xfree(&_gc_hook);
|
168
|
-
xfree(&_traces);
|
169
|
-
|
170
|
-
#ifdef __linux__
|
171
|
-
timer_delete(_timerid);
|
172
|
-
#endif
|
173
|
-
|
174
|
-
ATOMIC_STORE_BOOL(&_thread_registered, false);
|
175
|
-
return 0;
|
176
|
-
}
|
177
|
-
|
178
|
-
/* rb_scout_remove_profiled_thread: removes a thread from the linked list
|
179
|
-
*
|
180
|
-
*/
|
181
|
-
static VALUE rb_scout_remove_profiled_thread(VALUE self)
|
182
|
-
{
|
183
|
-
remove_profiled_thread(pthread_self());
|
184
|
-
return Qtrue;
|
185
|
-
}
|
186
|
-
|
187
|
-
|
188
|
-
/* rb_scout_start_profiling: Installs the global timer
|
189
|
-
*/
|
190
|
-
static VALUE
|
191
|
-
rb_scout_start_profiling(VALUE self)
|
192
|
-
{
|
193
|
-
if (scout_profiling_running) {
|
194
|
-
return Qtrue;
|
195
|
-
}
|
196
|
-
|
197
|
-
scout_profiling_running = 1;
|
198
|
-
|
199
|
-
return Qtrue;
|
200
|
-
}
|
201
|
-
|
202
|
-
/* rb_scout_uninstall_profiling: called when ruby is shutting down.
|
203
|
-
* NOTE: If ever this method should be called where Ruby should continue to run, we need to free our
|
204
|
-
* memory allocated in each profiled thread.
|
205
|
-
*/
|
206
|
-
static VALUE
|
207
|
-
rb_scout_uninstall_profiling(VALUE self)
|
208
|
-
{
|
209
|
-
return Qnil;
|
210
|
-
}
|
211
|
-
|
212
|
-
static VALUE
|
213
|
-
rb_scout_install_profiling(VALUE self)
|
214
|
-
{
|
215
|
-
#ifndef __linux__
|
216
|
-
struct itimerval timer;
|
217
|
-
#endif
|
218
|
-
struct sigaction new_vtaction, old_vtaction;
|
219
|
-
|
220
|
-
// We can only install once. If uninstall is called, we will NOT be able to call install again.
|
221
|
-
// Instead, stop/start should be used to temporarily disable all ScoutProf sampling.
|
222
|
-
if (scout_profiling_installed) {
|
223
|
-
return Qfalse;
|
224
|
-
}
|
225
|
-
|
226
|
-
// Also set up an interrupt handler for when we broadcast an alarm
|
227
|
-
new_vtaction.sa_handler = scout_profile_broadcast_signal_handler;
|
228
|
-
new_vtaction.sa_flags = SA_RESTART;
|
229
|
-
sigemptyset(&new_vtaction.sa_mask);
|
230
|
-
sigaction(SIGALRM, &new_vtaction, &old_vtaction);
|
231
|
-
|
232
|
-
#ifndef __linux__
|
233
|
-
timer.it_interval.tv_sec = 0;
|
234
|
-
timer.it_interval.tv_usec = INTERVAL; //FIX2INT(interval);
|
235
|
-
timer.it_value = timer.it_interval;
|
236
|
-
setitimer(ITIMER_REAL, &timer, 0);
|
237
|
-
#endif
|
238
|
-
|
239
|
-
rb_define_const(cStacks, "INSTALLED", Qtrue);
|
240
|
-
scout_profiling_installed = 1;
|
241
|
-
|
242
|
-
return Qtrue;
|
243
|
-
}
|
244
|
-
|
245
|
-
////////////////////////////////////////////////////////////////////////////////////////
|
246
|
-
// Per-Thread Handler
|
247
|
-
////////////////////////////////////////////////////////////////////////////////////////
|
248
|
-
|
249
|
-
static void
|
250
|
-
scoutprof_gc_mark(void *data)
|
251
|
-
{
|
252
|
-
uint_fast16_t i;
|
253
|
-
int n;
|
254
|
-
for (i = 0; i < ATOMIC_LOAD(&_cur_traces_num); i++) {
|
255
|
-
for (n = 0; n < _traces[i].num_tracelines; n++) {
|
256
|
-
rb_gc_mark(_traces[i].frames_buf[n]);
|
257
|
-
}
|
258
|
-
}
|
259
|
-
}
|
260
|
-
|
261
|
-
static void
|
262
|
-
scout_parent_atfork_prepare()
|
263
|
-
{
|
264
|
-
// TODO: Should we track how much time the fork took?
|
265
|
-
if (ATOMIC_LOAD(&_ok_to_sample) == true) {
|
266
|
-
scout_stop_thread_timer();
|
267
|
-
}
|
268
|
-
}
|
269
|
-
|
270
|
-
static void
|
271
|
-
scout_parent_atfork_finish()
|
272
|
-
{
|
273
|
-
if (ATOMIC_LOAD(&_ok_to_sample) == true) {
|
274
|
-
scout_start_thread_timer();
|
275
|
-
}
|
276
|
-
}
|
277
|
-
|
278
|
-
static void
|
279
|
-
init_thread_vars()
|
280
|
-
{
|
281
|
-
int res;
|
282
|
-
|
283
|
-
ATOMIC_STORE_BOOL(&_ok_to_sample, false);
|
284
|
-
ATOMIC_STORE_BOOL(&_in_signal_handler, false);
|
285
|
-
ATOMIC_STORE_INT16(&_start_frame_index, 0);
|
286
|
-
ATOMIC_STORE_INT16(&_start_trace_index, 0);
|
287
|
-
ATOMIC_STORE_INT16(&_cur_traces_num, 0);
|
288
|
-
|
289
|
-
_ruby_thread = rb_thread_current(); // Used as a check to avoid any Fiber switching silliness
|
290
|
-
|
291
|
-
_traces = ALLOC_N(struct c_trace, MAX_TRACES); // TODO Check return
|
292
|
-
|
293
|
-
_gc_hook = Data_Wrap_Struct(rb_cObject, &scoutprof_gc_mark, NULL, &_traces);
|
294
|
-
rb_gc_register_address(&_gc_hook);
|
295
|
-
|
296
|
-
res = pthread_atfork(scout_parent_atfork_prepare, scout_parent_atfork_finish, NULL);
|
297
|
-
if (res != 0) {
|
298
|
-
fprintf(stderr, "APM-DEBUG: Pthread_atfork failed: %d\n", res);
|
299
|
-
}
|
300
|
-
|
301
|
-
#ifdef __linux__
|
302
|
-
// Create timer to target this thread
|
303
|
-
_sev.sigev_notify = SIGEV_THREAD_ID;
|
304
|
-
_sev.sigev_signo = SIGALRM;
|
305
|
-
_sev.sigev_notify_thread_id = syscall(SYS_gettid);
|
306
|
-
_sev.sigev_value.sival_ptr = &_timerid;
|
307
|
-
if (timer_create(CLOCK_MONOTONIC, &_sev, &_timerid) == -1) {
|
308
|
-
fprintf(stderr, "APM-DEBUG: Time create failed: %d\n", errno);
|
309
|
-
}
|
310
|
-
#endif
|
311
|
-
|
312
|
-
return;
|
313
|
-
}
|
314
|
-
|
315
|
-
/*
|
316
|
-
* Signal handler for each thread. Invoked from a signal when a job is run within Ruby's postponed_job queue
|
317
|
-
*/
|
318
|
-
static void
|
319
|
-
scout_profile_broadcast_signal_handler(int sig)
|
320
|
-
{
|
321
|
-
int register_result;
|
322
|
-
|
323
|
-
if (ATOMIC_LOAD(&_ok_to_sample) == false) return;
|
324
|
-
|
325
|
-
if (ATOMIC_LOAD(&_in_signal_handler) == true) {
|
326
|
-
ATOMIC_ADD(&_skipped_in_signal_handler, 1);
|
327
|
-
return;
|
328
|
-
}
|
329
|
-
|
330
|
-
|
331
|
-
ATOMIC_STORE_BOOL(&_in_signal_handler, true);
|
332
|
-
|
333
|
-
if (rb_during_gc()) {
|
334
|
-
ATOMIC_ADD(&_skipped_in_gc, 1);
|
335
|
-
} else if (rb_thread_current() != _ruby_thread) {
|
336
|
-
ATOMIC_ADD(&_skipped_in_not_running, 1);
|
337
|
-
} else {
|
338
|
-
if (ATOMIC_LOAD(&_job_registered) == false){
|
339
|
-
register_result = rb_postponed_job_register(0, scout_record_sample, 0);
|
340
|
-
if ((register_result == 1) || (register_result == 2)) {
|
341
|
-
ATOMIC_STORE_BOOL(&_job_registered, true);
|
342
|
-
} else {
|
343
|
-
ATOMIC_ADD(&_skipped_in_job_registered, 1);
|
344
|
-
}
|
345
|
-
} // !_job_registered
|
346
|
-
}
|
347
|
-
|
348
|
-
ATOMIC_STORE_BOOL(&_in_signal_handler, false);
|
349
|
-
}
|
350
|
-
|
351
|
-
/*
|
352
|
-
* scout_record_sample: Defered function run from the per-thread handler
|
353
|
-
*
|
354
|
-
* Note: that this is called from *EVERY PROFILED THREAD FOR EACH CLOCK TICK
|
355
|
-
* INTERVAL*, so the performance of this method is crucial.
|
356
|
-
*
|
357
|
-
*/
|
358
|
-
void
|
359
|
-
scout_record_sample()
|
360
|
-
{
|
361
|
-
int num_frames;
|
362
|
-
uint_fast16_t cur_traces_num, start_frame_index;
|
363
|
-
|
364
|
-
if (ATOMIC_LOAD(&_ok_to_sample) == false) return;
|
365
|
-
if (rb_during_gc()) {
|
366
|
-
ATOMIC_ADD(&_skipped_in_gc, 1);
|
367
|
-
return;
|
368
|
-
}
|
369
|
-
if (rb_thread_current() != _ruby_thread) {
|
370
|
-
ATOMIC_ADD(&_skipped_in_not_running, 1);
|
371
|
-
return;
|
372
|
-
}
|
373
|
-
|
374
|
-
cur_traces_num = ATOMIC_LOAD(&_cur_traces_num);
|
375
|
-
start_frame_index = ATOMIC_LOAD(&_start_frame_index);
|
376
|
-
|
377
|
-
if (cur_traces_num < MAX_TRACES) {
|
378
|
-
num_frames = rb_profile_frames(0, sizeof(_traces[cur_traces_num].frames_buf) / sizeof(VALUE), _traces[cur_traces_num].frames_buf, _traces[cur_traces_num].lines_buf);
|
379
|
-
if (num_frames - start_frame_index > 2) {
|
380
|
-
_traces[cur_traces_num].num_tracelines = num_frames - start_frame_index - 2; // The extra -2 is because there's a bug when reading the very first (bottom) 2 iseq objects for some reason
|
381
|
-
ATOMIC_ADD(&_cur_traces_num, 1);
|
382
|
-
} // TODO: add an else with a counter so we can track if we skipped profiling here
|
383
|
-
}
|
384
|
-
ATOMIC_STORE_BOOL(&_job_registered, false);
|
385
|
-
}
|
386
|
-
|
387
|
-
/* rb_scout_profile_frames: retreive the traces for the layer that is exiting
|
388
|
-
*
|
389
|
-
* Note: Calls to this must have already stopped sampling
|
390
|
-
*/
|
391
|
-
static VALUE rb_scout_profile_frames(VALUE self)
|
392
|
-
{
|
393
|
-
int n;
|
394
|
-
uint_fast16_t i, cur_traces_num, start_trace_index;
|
395
|
-
VALUE traces, trace, trace_line;
|
396
|
-
|
397
|
-
if (ATOMIC_LOAD(&_thread_registered) == false) {
|
398
|
-
fprintf(stderr, "APM-DEBUG: Error: trying to get profiled frames on a non-profiled thread!\n");
|
399
|
-
ATOMIC_STORE_INT16(&_cur_traces_num, 0);
|
400
|
-
return rb_ary_new();
|
401
|
-
}
|
402
|
-
|
403
|
-
cur_traces_num = ATOMIC_LOAD(&_cur_traces_num);
|
404
|
-
start_trace_index = ATOMIC_LOAD(&_start_trace_index);
|
405
|
-
|
406
|
-
if (cur_traces_num - start_trace_index > 0) {
|
407
|
-
traces = rb_ary_new2(cur_traces_num - start_trace_index);
|
408
|
-
for(i = start_trace_index; i < cur_traces_num; i++) {
|
409
|
-
if (_traces[i].num_tracelines > 0) {
|
410
|
-
trace = rb_ary_new2(_traces[i].num_tracelines);
|
411
|
-
for(n = 0; n < _traces[i].num_tracelines; n++) {
|
412
|
-
if (TYPE(_traces[i].frames_buf[n]) == VALID_RUBY_FRAME) { // We should always get valid frames from rb_profile_frames, but that doesn't always seem to be the case
|
413
|
-
trace_line = rb_ary_new2(2);
|
414
|
-
rb_ary_store(trace_line, 0, _traces[i].frames_buf[n]);
|
415
|
-
rb_ary_store(trace_line, 1, INT2FIX(_traces[i].lines_buf[n]));
|
416
|
-
rb_ary_push(trace, trace_line);
|
417
|
-
} else {
|
418
|
-
fprintf(stderr, "APM-DEBUG: Non-data frame is: 0x%04x\n", TYPE(_traces[i].frames_buf[n]));
|
419
|
-
}
|
420
|
-
}
|
421
|
-
rb_ary_push(traces, trace);
|
422
|
-
}
|
423
|
-
}
|
424
|
-
} else {
|
425
|
-
traces = rb_ary_new();
|
426
|
-
}
|
427
|
-
ATOMIC_STORE_INT16(&_cur_traces_num, start_trace_index);
|
428
|
-
return traces;
|
429
|
-
}
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
/*****************************************************/
|
434
|
-
/* Control code */
|
435
|
-
/*****************************************************/
|
436
|
-
|
437
|
-
static void
|
438
|
-
scout_start_thread_timer()
|
439
|
-
{
|
440
|
-
#ifdef __linux__
|
441
|
-
struct itimerspec its;
|
442
|
-
sigset_t mask;
|
443
|
-
#endif
|
444
|
-
|
445
|
-
#ifdef __linux__
|
446
|
-
if (ATOMIC_LOAD(&_thread_registered) == false) return;
|
447
|
-
|
448
|
-
sigemptyset(&mask);
|
449
|
-
sigaddset(&mask, SIGALRM);
|
450
|
-
if (sigprocmask(SIG_SETMASK, &mask, NULL) == -1) {
|
451
|
-
fprintf(stderr, "APM-DEBUG: Block mask failed: %d\n", errno);
|
452
|
-
}
|
453
|
-
|
454
|
-
its.it_value.tv_sec = 0;
|
455
|
-
its.it_value.tv_nsec = INTERVAL;
|
456
|
-
its.it_interval.tv_sec = its.it_value.tv_sec;
|
457
|
-
its.it_interval.tv_nsec = its.it_value.tv_nsec;
|
458
|
-
|
459
|
-
if (timer_settime(_timerid, 0, &its, NULL) == -1) {
|
460
|
-
fprintf(stderr, "APM-DEBUG: Timer set failed in start sampling: %d\n", errno);
|
461
|
-
}
|
462
|
-
|
463
|
-
if (sigprocmask(SIG_UNBLOCK, &mask, NULL) == -1) {
|
464
|
-
fprintf(stderr, "APM-DEBUG: UNBlock mask failed: %d\n", errno);
|
465
|
-
}
|
466
|
-
#endif
|
467
|
-
}
|
468
|
-
|
469
|
-
static void
|
470
|
-
scout_stop_thread_timer()
|
471
|
-
{
|
472
|
-
#ifdef __linux__
|
473
|
-
struct itimerspec its;
|
474
|
-
#endif
|
475
|
-
|
476
|
-
if (ATOMIC_LOAD(&_thread_registered) == false) return;
|
477
|
-
|
478
|
-
#ifdef __linux__
|
479
|
-
memset((void*)&its, 0, sizeof(its));
|
480
|
-
if (timer_settime(_timerid, 0, &its, NULL) == -1 ) {
|
481
|
-
fprintf(stderr, "APM-DEBUG: Timer set failed: %d\n", errno);
|
482
|
-
}
|
483
|
-
#endif
|
484
|
-
}
|
485
|
-
|
486
|
-
/* Per thread start sampling */
|
487
|
-
static VALUE
|
488
|
-
rb_scout_start_sampling(VALUE self)
|
489
|
-
{
|
490
|
-
scout_add_profiled_thread(pthread_self());
|
491
|
-
ATOMIC_STORE_BOOL(&_ok_to_sample, true);
|
492
|
-
#ifdef __linux__
|
493
|
-
scout_start_thread_timer();
|
494
|
-
#endif
|
495
|
-
return Qtrue;
|
496
|
-
}
|
497
|
-
|
498
|
-
/* Per thread stop sampling */
|
499
|
-
static VALUE
|
500
|
-
rb_scout_stop_sampling(VALUE self, VALUE reset)
|
501
|
-
{
|
502
|
-
if(ATOMIC_LOAD(&_ok_to_sample) == true ) {
|
503
|
-
#ifdef __linux__
|
504
|
-
scout_stop_thread_timer();
|
505
|
-
#endif
|
506
|
-
}
|
507
|
-
|
508
|
-
ATOMIC_STORE_BOOL(&_ok_to_sample, false);
|
509
|
-
|
510
|
-
// TODO: I think this can be (reset == Qtrue)
|
511
|
-
if (TYPE(reset) == T_TRUE) {
|
512
|
-
ATOMIC_STORE_BOOL(&_job_registered, 0);
|
513
|
-
ATOMIC_STORE_BOOL(&_in_signal_handler, 0);
|
514
|
-
ATOMIC_STORE_INT16(&_start_trace_index, 0);
|
515
|
-
ATOMIC_STORE_INT16(&_start_frame_index, 0);
|
516
|
-
ATOMIC_STORE_INT16(&_cur_traces_num, 0);
|
517
|
-
ATOMIC_STORE_INT32(&_skipped_in_gc, 0);
|
518
|
-
ATOMIC_STORE_INT32(&_skipped_in_signal_handler, 0);
|
519
|
-
ATOMIC_STORE_INT32(&_skipped_in_job_registered, 0);
|
520
|
-
ATOMIC_STORE_INT32(&_skipped_in_not_running, 0);
|
521
|
-
}
|
522
|
-
return Qtrue;
|
523
|
-
}
|
524
|
-
|
525
|
-
// rb_scout_update_indexes: Called when each layer starts or something
|
526
|
-
static VALUE
|
527
|
-
rb_scout_update_indexes(VALUE self, VALUE frame_index, VALUE trace_index)
|
528
|
-
{
|
529
|
-
ATOMIC_STORE_INT16(&_start_trace_index, NUM2INT(trace_index));
|
530
|
-
ATOMIC_STORE_INT16(&_start_frame_index, NUM2INT(frame_index));
|
531
|
-
return Qtrue;
|
532
|
-
}
|
533
|
-
|
534
|
-
// rb_scout_current_trace_index: Get the current top of the trace stack
|
535
|
-
static VALUE
|
536
|
-
rb_scout_current_trace_index(VALUE self)
|
537
|
-
{
|
538
|
-
return INT2NUM(ATOMIC_LOAD(&_cur_traces_num));
|
539
|
-
}
|
540
|
-
|
541
|
-
// rb_scout_current_trace_index: Get the current top of the trace stack
|
542
|
-
static VALUE
|
543
|
-
rb_scout_current_frame_index(VALUE self)
|
544
|
-
{
|
545
|
-
int num_frames;
|
546
|
-
VALUE frames_buf[BUF_SIZE];
|
547
|
-
int lines_buf[BUF_SIZE];
|
548
|
-
num_frames = rb_profile_frames(0, sizeof(frames_buf) / sizeof(VALUE), frames_buf, lines_buf);
|
549
|
-
if (num_frames > 1) {
|
550
|
-
return INT2NUM(num_frames - 1);
|
551
|
-
} else {
|
552
|
-
return INT2NUM(0);
|
553
|
-
}
|
554
|
-
}
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
static VALUE
|
559
|
-
rb_scout_skipped_in_gc(VALUE self)
|
560
|
-
{
|
561
|
-
return INT2NUM(ATOMIC_LOAD(&_skipped_in_gc));
|
562
|
-
}
|
563
|
-
|
564
|
-
static VALUE
|
565
|
-
rb_scout_skipped_in_handler(VALUE self)
|
566
|
-
{
|
567
|
-
return INT2NUM(ATOMIC_LOAD(&_skipped_in_signal_handler));
|
568
|
-
}
|
569
|
-
|
570
|
-
static VALUE
|
571
|
-
rb_scout_skipped_in_job_registered(VALUE self)
|
572
|
-
{
|
573
|
-
return INT2NUM(ATOMIC_LOAD(&_skipped_in_job_registered));
|
574
|
-
}
|
575
|
-
|
576
|
-
static VALUE
|
577
|
-
rb_scout_skipped_in_not_running(VALUE self)
|
578
|
-
{
|
579
|
-
return INT2NUM(ATOMIC_LOAD(&_skipped_in_not_running));
|
580
|
-
}
|
581
|
-
|
582
|
-
////////////////////////////////////////////////////////////////
|
583
|
-
// Fetch details from a frame
|
584
|
-
////////////////////////////////////////////////////////////////
|
585
|
-
|
586
|
-
static VALUE
|
587
|
-
rb_scout_frame_klass(VALUE self, VALUE frame)
|
588
|
-
{
|
589
|
-
return rb_profile_frame_classpath(frame);
|
590
|
-
}
|
591
|
-
|
592
|
-
static VALUE
|
593
|
-
rb_scout_frame_method(VALUE self, VALUE frame)
|
594
|
-
{
|
595
|
-
return rb_profile_frame_label(frame);
|
596
|
-
}
|
597
|
-
|
598
|
-
static VALUE
|
599
|
-
rb_scout_frame_file(VALUE self, VALUE frame)
|
600
|
-
{
|
601
|
-
return rb_profile_frame_absolute_path(frame);
|
602
|
-
}
|
603
|
-
|
604
|
-
static VALUE
|
605
|
-
rb_scout_frame_lineno(VALUE self, VALUE frame)
|
606
|
-
{
|
607
|
-
return rb_profile_frame_first_lineno(frame);
|
608
|
-
}
|
609
|
-
|
610
|
-
////////////////////////////////////////////////////////////////
|
611
|
-
////////////////////////////////////////////////////////////////
|
612
|
-
|
613
|
-
////////////////////////////////////////////////////////////////
|
614
|
-
////////////////////////////////////////////////////////////////
|
615
|
-
|
616
|
-
// Gem Init. Set up constants, attach methods
|
617
|
-
void Init_stacks()
|
618
|
-
{
|
619
|
-
mScoutApm = rb_define_module("ScoutApm");
|
620
|
-
mInstruments = rb_define_module_under(mScoutApm, "Instruments");
|
621
|
-
cStacks = rb_define_class_under(mInstruments, "Stacks", rb_cObject);
|
622
|
-
|
623
|
-
rb_warning("Initializing ScoutProf Native Extension");
|
624
|
-
|
625
|
-
// Installs/uninstalls the signal handler.
|
626
|
-
rb_define_singleton_method(cStacks, "install", rb_scout_install_profiling, 0);
|
627
|
-
rb_define_singleton_method(cStacks, "uninstall", rb_scout_uninstall_profiling, 0);
|
628
|
-
|
629
|
-
rb_define_singleton_method(cStacks, "start", rb_scout_start_profiling, 0);
|
630
|
-
|
631
|
-
rb_define_singleton_method(cStacks, "add_profiled_thread", rb_scout_add_profiled_thread, 0);
|
632
|
-
rb_define_singleton_method(cStacks, "remove_profiled_thread", rb_scout_remove_profiled_thread, 0);
|
633
|
-
|
634
|
-
rb_define_singleton_method(cStacks, "profile_frames", rb_scout_profile_frames, 0);
|
635
|
-
rb_define_singleton_method(cStacks, "start_sampling", rb_scout_start_sampling, 0);
|
636
|
-
rb_define_singleton_method(cStacks, "stop_sampling", rb_scout_stop_sampling, 1);
|
637
|
-
rb_define_singleton_method(cStacks, "update_indexes", rb_scout_update_indexes, 2);
|
638
|
-
rb_define_singleton_method(cStacks, "current_trace_index", rb_scout_current_trace_index, 0);
|
639
|
-
rb_define_singleton_method(cStacks, "current_frame_index", rb_scout_current_frame_index, 0);
|
640
|
-
|
641
|
-
rb_define_singleton_method(cStacks, "frame_klass", rb_scout_frame_klass, 1);
|
642
|
-
rb_define_singleton_method(cStacks, "frame_method", rb_scout_frame_method, 1);
|
643
|
-
rb_define_singleton_method(cStacks, "frame_file", rb_scout_frame_file, 1);
|
644
|
-
rb_define_singleton_method(cStacks, "frame_lineno", rb_scout_frame_lineno, 1);
|
645
|
-
|
646
|
-
rb_define_singleton_method(cStacks, "skipped_in_gc", rb_scout_skipped_in_gc, 0);
|
647
|
-
rb_define_singleton_method(cStacks, "skipped_in_handler", rb_scout_skipped_in_handler, 0);
|
648
|
-
rb_define_singleton_method(cStacks, "skipped_in_job_registered", rb_scout_skipped_in_job_registered, 0);
|
649
|
-
rb_define_singleton_method(cStacks, "skipped_in_not_running", rb_scout_skipped_in_not_running, 0);
|
650
|
-
|
651
|
-
rb_define_const(cStacks, "ENABLED", Qtrue);
|
652
|
-
rb_warning("Finished Initializing ScoutProf Native Extension");
|
653
|
-
}
|
654
|
-
|
655
|
-
#else
|
656
|
-
|
657
|
-
static VALUE rb_scout_install_profiling(VALUE module)
|
658
|
-
{
|
659
|
-
return Qnil;
|
660
|
-
}
|
661
|
-
|
662
|
-
static VALUE rb_scout_uninstall_profiling(VALUE module)
|
663
|
-
{
|
664
|
-
return Qnil;
|
665
|
-
}
|
666
|
-
|
667
|
-
static VALUE rb_scout_start_profiling(VALUE module)
|
668
|
-
{
|
669
|
-
return Qnil;
|
670
|
-
}
|
671
|
-
|
672
|
-
static VALUE rb_scout_stop_profiling(VALUE module)
|
673
|
-
{
|
674
|
-
return Qnil;
|
675
|
-
}
|
676
|
-
|
677
|
-
static VALUE rb_scout_add_profiled_thread(VALUE module)
|
678
|
-
{
|
679
|
-
return Qnil;
|
680
|
-
}
|
681
|
-
|
682
|
-
static VALUE rb_scout_remove_profiled_thread(VALUE module)
|
683
|
-
{
|
684
|
-
return Qnil;
|
685
|
-
}
|
686
|
-
|
687
|
-
static VALUE rb_scout_profile_frames(VALUE self)
|
688
|
-
{
|
689
|
-
return rb_ary_new();
|
690
|
-
}
|
691
|
-
|
692
|
-
static VALUE
|
693
|
-
rb_scout_start_sampling(VALUE self)
|
694
|
-
{
|
695
|
-
return Qtrue;
|
696
|
-
}
|
697
|
-
|
698
|
-
static VALUE
|
699
|
-
rb_scout_stop_sampling(VALUE self)
|
700
|
-
{
|
701
|
-
return Qtrue;
|
702
|
-
}
|
703
|
-
|
704
|
-
static VALUE
|
705
|
-
rb_scout_update_indexes(VALUE self, VALUE frame_index, VALUE trace_index)
|
706
|
-
{
|
707
|
-
return Qtrue;
|
708
|
-
}
|
709
|
-
|
710
|
-
// rb_scout_current_trace_index: Get the current top of the trace stack
|
711
|
-
static VALUE
|
712
|
-
rb_scout_current_trace_index(VALUE self)
|
713
|
-
{
|
714
|
-
return INT2NUM(0);
|
715
|
-
}
|
716
|
-
|
717
|
-
// rb_scout_current_trace_index: Get the current top of the trace stack
|
718
|
-
static VALUE
|
719
|
-
rb_scout_current_frame_index(VALUE self)
|
720
|
-
{
|
721
|
-
return INT2NUM(0);
|
722
|
-
}
|
723
|
-
|
724
|
-
static VALUE
|
725
|
-
rb_scout_skipped_in_gc(VALUE self)
|
726
|
-
{
|
727
|
-
return INT2NUM(0);
|
728
|
-
}
|
729
|
-
|
730
|
-
static VALUE
|
731
|
-
rb_scout_skipped_in_handler(VALUE self)
|
732
|
-
{
|
733
|
-
return INT2NUM(0);
|
734
|
-
}
|
735
|
-
|
736
|
-
static VALUE
|
737
|
-
rb_scout_skipped_in_job_registered(VALUE self)
|
738
|
-
{
|
739
|
-
return INT2NUM(0);
|
740
|
-
}
|
741
|
-
|
742
|
-
static VALUE
|
743
|
-
rb_scout_skipped_in_not_running(VALUE self)
|
744
|
-
{
|
745
|
-
return INT2NUM(0);
|
746
|
-
}
|
747
|
-
|
748
|
-
static VALUE
|
749
|
-
rb_scout_frame_klass(VALUE self, VALUE frame)
|
750
|
-
{
|
751
|
-
return Qnil;
|
752
|
-
}
|
753
|
-
|
754
|
-
static VALUE
|
755
|
-
rb_scout_frame_method(VALUE self, VALUE frame)
|
756
|
-
{
|
757
|
-
return Qnil;
|
758
|
-
}
|
759
|
-
|
760
|
-
static VALUE
|
761
|
-
rb_scout_frame_file(VALUE self, VALUE frame)
|
762
|
-
{
|
763
|
-
return Qnil;
|
764
|
-
}
|
765
|
-
|
766
|
-
static VALUE
|
767
|
-
rb_scout_frame_lineno(VALUE self, VALUE frame)
|
768
|
-
{
|
769
|
-
return Qnil;
|
770
|
-
}
|
771
|
-
|
772
|
-
void Init_stacks()
|
773
|
-
{
|
774
|
-
mScoutApm = rb_define_module("ScoutApm");
|
775
|
-
mInstruments = rb_define_module_under(mScoutApm, "Instruments");
|
776
|
-
cStacks = rb_define_class_under(mInstruments, "Stacks", rb_cObject);
|
777
|
-
|
778
|
-
// Installs/uninstalls the signal handler.
|
779
|
-
rb_define_singleton_method(cStacks, "install", rb_scout_install_profiling, 0);
|
780
|
-
rb_define_singleton_method(cStacks, "uninstall", rb_scout_uninstall_profiling, 0);
|
781
|
-
|
782
|
-
// Starts/removes the timer tick, leaving the sighandler.
|
783
|
-
rb_define_singleton_method(cStacks, "start", rb_scout_start_profiling, 0);
|
784
|
-
rb_define_singleton_method(cStacks, "stop", rb_scout_stop_profiling, 0);
|
785
|
-
|
786
|
-
rb_define_singleton_method(cStacks, "add_profiled_thread", rb_scout_add_profiled_thread, 0);
|
787
|
-
rb_define_singleton_method(cStacks, "remove_profiled_thread", rb_scout_remove_profiled_thread, 0);
|
788
|
-
|
789
|
-
rb_define_singleton_method(cStacks, "profile_frames", rb_scout_profile_frames, 0);
|
790
|
-
rb_define_singleton_method(cStacks, "start_sampling", rb_scout_start_sampling, 0);
|
791
|
-
rb_define_singleton_method(cStacks, "stop_sampling", rb_scout_stop_sampling, 1);
|
792
|
-
rb_define_singleton_method(cStacks, "update_indexes", rb_scout_update_indexes, 2);
|
793
|
-
rb_define_singleton_method(cStacks, "current_trace_index", rb_scout_current_trace_index, 0);
|
794
|
-
rb_define_singleton_method(cStacks, "current_frame_index", rb_scout_current_frame_index, 0);
|
795
|
-
|
796
|
-
rb_define_singleton_method(cStacks, "frame_klass", rb_scout_frame_klass, 1);
|
797
|
-
rb_define_singleton_method(cStacks, "frame_method", rb_scout_frame_method, 1);
|
798
|
-
rb_define_singleton_method(cStacks, "frame_file", rb_scout_frame_file, 1);
|
799
|
-
rb_define_singleton_method(cStacks, "frame_lineno", rb_scout_frame_lineno, 1);
|
800
|
-
|
801
|
-
rb_define_singleton_method(cStacks, "skipped_in_gc", rb_scout_skipped_in_gc, 0);
|
802
|
-
rb_define_singleton_method(cStacks, "skipped_in_handler", rb_scout_skipped_in_handler, 0);
|
803
|
-
rb_define_singleton_method(cStacks, "skipped_in_job_registered", rb_scout_skipped_in_job_registered, 0);
|
804
|
-
rb_define_singleton_method(cStacks, "skipped_in_not_running", rb_scout_skipped_in_not_running, 0);
|
805
|
-
|
806
|
-
rb_define_const(cStacks, "ENABLED", Qfalse);
|
807
|
-
rb_define_const(cStacks, "INSTALLED", Qfalse);
|
808
|
-
}
|
809
|
-
|
810
|
-
#endif //#ifdef RUBY_INTERNAL_EVENT_NEWOBJ
|
811
|
-
|