stack_trace 0.2.1 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.tool-versions +1 -0
- data/CODE_OF_CONDUCT.md +84 -0
- data/Gemfile +6 -1
- data/Gemfile.lock +19 -23
- data/LICENSE.txt +1 -1
- data/README.md +15 -191
- data/Rakefile +8 -1
- data/ext/stack_trace/configuration.c +23 -0
- data/ext/stack_trace/configuration.h +6 -0
- data/ext/stack_trace/current_trace.c +43 -0
- data/ext/stack_trace/current_trace.h +7 -0
- data/ext/stack_trace/debug.c +43 -0
- data/ext/stack_trace/debug.h +37 -0
- data/ext/stack_trace/event_producer.c +65 -0
- data/ext/stack_trace/event_producer.h +3 -0
- data/ext/stack_trace/event_store.c +109 -0
- data/ext/stack_trace/event_store.h +5 -0
- data/ext/stack_trace/extconf.rb +7 -0
- data/ext/stack_trace/sidecar.c +77 -0
- data/ext/stack_trace/sidecar.h +1 -0
- data/ext/stack_trace/span.c +106 -0
- data/ext/stack_trace/span.h +9 -0
- data/ext/stack_trace/stack_trace.c +54 -0
- data/ext/stack_trace/trace.c +132 -0
- data/ext/stack_trace/trace.h +8 -0
- data/ext/stack_trace/types/event.h +31 -0
- data/ext/stack_trace/types/span.h +22 -0
- data/ext/stack_trace/types/trace.h +15 -0
- data/ext/stack_trace/utils.c +8 -0
- data/ext/stack_trace/utils.h +1 -0
- data/lib/stack_trace/argument_extractor.rb +22 -0
- data/lib/stack_trace/configuration.rb +2 -57
- data/lib/stack_trace/patch/class.rb +7 -0
- data/lib/stack_trace/patch/false_class.rb +7 -0
- data/lib/stack_trace/patch/nil_class.rb +7 -0
- data/lib/stack_trace/patch/numeric.rb +7 -0
- data/lib/stack_trace/patch/object.rb +7 -0
- data/lib/stack_trace/patch/symbol.rb +7 -0
- data/lib/stack_trace/patch/true_class.rb +7 -0
- data/lib/stack_trace/version.rb +3 -1
- data/lib/stack_trace.rb +30 -34
- data/stack_trace.gemspec +22 -20
- metadata +45 -77
- data/.gitignore +0 -11
- data/.travis.yml +0 -7
- data/bin/console +0 -14
- data/bin/setup +0 -8
- data/lib/stack_trace/integration/rspec.rb +0 -79
- data/lib/stack_trace/module_extensions.rb +0 -13
- data/lib/stack_trace/setup.rb +0 -62
- data/lib/stack_trace/span.rb +0 -94
- data/lib/stack_trace/trace.rb +0 -83
@@ -0,0 +1,109 @@
|
|
1
|
+
/*
|
2
|
+
* Implements a ring buffer which blocks the producer if
|
3
|
+
* there is no space left and blocks the consumer if
|
4
|
+
* there is no event available in the queue.
|
5
|
+
*/
|
6
|
+
|
7
|
+
#include <stdlib.h>
|
8
|
+
#include <pthread.h>
|
9
|
+
#include <ruby/digest.h>
|
10
|
+
#include <time.h>
|
11
|
+
|
12
|
+
#include "types/event.h"
|
13
|
+
#include "debug.h"
|
14
|
+
|
15
|
+
#define SIZE 1000
|
16
|
+
#define TEN_MILLISECONDS 10000000
|
17
|
+
|
18
|
+
pthread_cond_t has_space = PTHREAD_COND_INITIALIZER;
|
19
|
+
pthread_cond_t has_event = PTHREAD_COND_INITIALIZER;
|
20
|
+
pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
|
21
|
+
|
22
|
+
static Event **store;
|
23
|
+
int producer_cursor = 0, consumer_cursor = 0, free_space = SIZE;
|
24
|
+
|
25
|
+
void Init_event_store() {
|
26
|
+
store = malloc(sizeof(Event *) * SIZE);
|
27
|
+
|
28
|
+
int i;
|
29
|
+
|
30
|
+
for(i = 0; i < SIZE; i++) {
|
31
|
+
store[i] = malloc(sizeof(Event));
|
32
|
+
}
|
33
|
+
}
|
34
|
+
|
35
|
+
static void wait_free_space() {
|
36
|
+
if(free_space == 0) pthread_cond_wait(&has_space, &lock);
|
37
|
+
}
|
38
|
+
|
39
|
+
static int wait_event() {
|
40
|
+
if(free_space == SIZE) {
|
41
|
+
rb_thread_check_ints(); // Otherwise the GC stucks!
|
42
|
+
|
43
|
+
struct timespec ts;
|
44
|
+
clock_gettime(CLOCK_REALTIME, &ts);
|
45
|
+
ts.tv_nsec += TEN_MILLISECONDS;
|
46
|
+
|
47
|
+
return pthread_cond_timedwait(&has_event, &lock, &ts); // returns 0 if the thread gets signal from another one
|
48
|
+
}
|
49
|
+
|
50
|
+
return 0;
|
51
|
+
}
|
52
|
+
|
53
|
+
static Event *claim_event() {
|
54
|
+
producer_cursor = producer_cursor % SIZE;
|
55
|
+
|
56
|
+
wait_free_space();
|
57
|
+
|
58
|
+
return store[producer_cursor++];
|
59
|
+
}
|
60
|
+
|
61
|
+
static Event *pull_event() {
|
62
|
+
consumer_cursor = consumer_cursor % SIZE;
|
63
|
+
|
64
|
+
if(wait_event() != 0) return NULL; // either timeout or an error
|
65
|
+
|
66
|
+
return store[consumer_cursor++];
|
67
|
+
}
|
68
|
+
|
69
|
+
static void event_produced() {
|
70
|
+
DEBUG_TEXT("Event produced. Free space: %d", free_space - 1);
|
71
|
+
|
72
|
+
free_space--;
|
73
|
+
|
74
|
+
pthread_cond_signal(&has_event);
|
75
|
+
}
|
76
|
+
|
77
|
+
static void event_consumed() {
|
78
|
+
free_space++;
|
79
|
+
|
80
|
+
pthread_cond_signal(&has_space);
|
81
|
+
}
|
82
|
+
|
83
|
+
// Takes a callback function which populates the event information.
|
84
|
+
void produce_event(Event event) {
|
85
|
+
pthread_mutex_lock(&lock);
|
86
|
+
|
87
|
+
Event *slot = claim_event();
|
88
|
+
|
89
|
+
memcpy(slot, &event, sizeof(Event));
|
90
|
+
|
91
|
+
event_produced();
|
92
|
+
|
93
|
+
pthread_mutex_unlock(&lock);
|
94
|
+
}
|
95
|
+
|
96
|
+
// Takes a callback function which consumes the event.
|
97
|
+
void consume_event(void(*processor_func)(Event *event)) {
|
98
|
+
pthread_mutex_lock(&lock);
|
99
|
+
|
100
|
+
Event *event = pull_event();
|
101
|
+
|
102
|
+
if(event != NULL) {
|
103
|
+
processor_func(event);
|
104
|
+
|
105
|
+
event_consumed();
|
106
|
+
}
|
107
|
+
|
108
|
+
pthread_mutex_unlock(&lock);
|
109
|
+
}
|
@@ -0,0 +1,77 @@
|
|
1
|
+
#include <ruby.h>
|
2
|
+
#include <stdbool.h>
|
3
|
+
#include <ruby/debug.h>
|
4
|
+
|
5
|
+
#include "event_store.h"
|
6
|
+
#include "trace.h"
|
7
|
+
|
8
|
+
static bool running = false;
|
9
|
+
static VALUE ractor;
|
10
|
+
|
11
|
+
static VALUE listen_events(VALUE data, VALUE m, int _argc, const VALUE *_argv, VALUE _) {
|
12
|
+
running = true;
|
13
|
+
|
14
|
+
while(running) {
|
15
|
+
consume_event(&process_event);
|
16
|
+
}
|
17
|
+
|
18
|
+
return Qtrue;
|
19
|
+
}
|
20
|
+
|
21
|
+
static VALUE exit_sidecar(VALUE data, VALUE m, int _argc, const VALUE *_argv, VALUE _) {
|
22
|
+
VALUE main_module = rb_const_get(rb_cObject, rb_intern("StackTrace"));
|
23
|
+
VALUE sidecar_class = rb_const_get(main_module, rb_intern("Sidecar"));
|
24
|
+
|
25
|
+
rb_funcall(sidecar_class, rb_intern("stop"), 0);
|
26
|
+
|
27
|
+
return Qtrue;
|
28
|
+
}
|
29
|
+
|
30
|
+
static VALUE rb_run(VALUE self) {
|
31
|
+
if(running) return Qnil;
|
32
|
+
|
33
|
+
VALUE kernel_module = rb_const_get(rb_cObject, rb_intern("Kernel"));
|
34
|
+
rb_block_call(kernel_module, rb_intern("at_exit"), 0, NULL, &exit_sidecar, (VALUE)NULL);
|
35
|
+
|
36
|
+
VALUE main_module = rb_const_get(rb_cObject, rb_intern("StackTrace"));
|
37
|
+
VALUE configuration = rb_funcall(main_module, rb_intern("configuration"), 0);
|
38
|
+
VALUE check_proc = rb_funcall(configuration, rb_intern("check_proc"), 0);
|
39
|
+
set_check_proc(check_proc); // hack to share an object between main thread and ractor
|
40
|
+
|
41
|
+
VALUE ractor_module = rb_const_get(rb_cObject, rb_intern("Ractor"));
|
42
|
+
ractor = rb_block_call(ractor_module, rb_intern("new"), 0, NULL, &listen_events, (VALUE)NULL);
|
43
|
+
|
44
|
+
rb_gc_register_address(&ractor); // So the GC does not try to free this object.
|
45
|
+
|
46
|
+
return Qtrue;
|
47
|
+
}
|
48
|
+
|
49
|
+
static VALUE rb_stop(VALUE self) {
|
50
|
+
if(!running) return Qnil;
|
51
|
+
|
52
|
+
running = false;
|
53
|
+
|
54
|
+
// The sidecar might be waiting for an event to arrive
|
55
|
+
// so here we are sending a no-op event to break the loop.
|
56
|
+
// Same could be done by using `pthread_cond_timedwait` but
|
57
|
+
// then I have to do more changes.
|
58
|
+
Event event = {};
|
59
|
+
event.event = NOOP_EVENT;
|
60
|
+
|
61
|
+
produce_event(event);
|
62
|
+
|
63
|
+
return Qtrue;
|
64
|
+
}
|
65
|
+
|
66
|
+
static VALUE rb_is_running(VALUE self) {
|
67
|
+
return running ? Qtrue : Qfalse;
|
68
|
+
}
|
69
|
+
|
70
|
+
void Init_sidecar() {
|
71
|
+
VALUE main_module = rb_const_get(rb_cObject, rb_intern("StackTrace"));
|
72
|
+
VALUE sidecar_class = rb_define_class_under(main_module, "Sidecar", rb_cObject);
|
73
|
+
|
74
|
+
rb_define_singleton_method(sidecar_class, "run", rb_run, 0);
|
75
|
+
rb_define_singleton_method(sidecar_class, "stop", rb_stop, 0);
|
76
|
+
rb_define_singleton_method(sidecar_class, "running?", rb_is_running, 0);
|
77
|
+
}
|
@@ -0,0 +1 @@
|
|
1
|
+
void Init_sidecar();
|
@@ -0,0 +1,106 @@
|
|
1
|
+
#include "span.h"
|
2
|
+
#include "utils.h"
|
3
|
+
|
4
|
+
#define CHILDREN_BUF_INC_SIZE 10
|
5
|
+
|
6
|
+
Span *create_span(Event *event) {
|
7
|
+
Span *span = malloc(sizeof(Span));
|
8
|
+
|
9
|
+
span->started_at = event->at;
|
10
|
+
span->receiver = event->receiver;
|
11
|
+
span->klass = event->klass;
|
12
|
+
span->self_klass = event->self_klass;
|
13
|
+
span->method = event->method;
|
14
|
+
span->return_value = Qundef;
|
15
|
+
span->arguments = event->arguments;
|
16
|
+
span->exception = Qundef;
|
17
|
+
span->children_count = 0;
|
18
|
+
span->singleton = event->for_singleton ? Qtrue : Qfalse;
|
19
|
+
|
20
|
+
return span;
|
21
|
+
}
|
22
|
+
|
23
|
+
static void allocate_children_buf(Span *parent) {
|
24
|
+
parent->children = malloc(CHILDREN_BUF_INC_SIZE * sizeof(Span *));
|
25
|
+
}
|
26
|
+
|
27
|
+
static void reallocate_children_buf(Span *parent) {
|
28
|
+
size_t new_size = ((parent->children_count / CHILDREN_BUF_INC_SIZE) + 1) * CHILDREN_BUF_INC_SIZE * sizeof(Span *);
|
29
|
+
|
30
|
+
parent->children = realloc(parent->children, new_size);
|
31
|
+
}
|
32
|
+
|
33
|
+
Span *add_child(Span *parent, Span *child) {
|
34
|
+
if(parent->children_count == 0) allocate_children_buf(parent);
|
35
|
+
if(parent->children_count % CHILDREN_BUF_INC_SIZE == 0) reallocate_children_buf(parent);
|
36
|
+
|
37
|
+
parent->children[parent->children_count] = child;
|
38
|
+
parent->children_count++;
|
39
|
+
child->caller = parent;
|
40
|
+
|
41
|
+
return child;
|
42
|
+
}
|
43
|
+
|
44
|
+
Span *close_span(Span *span, Event *event) {
|
45
|
+
span->finished_at = event->at;
|
46
|
+
span->return_value = event->return_value;
|
47
|
+
|
48
|
+
return span->caller;
|
49
|
+
}
|
50
|
+
|
51
|
+
|
52
|
+
// Deallocate the memory occupied by span
|
53
|
+
// and its children.
|
54
|
+
void free_span(Span *span) {
|
55
|
+
int i;
|
56
|
+
|
57
|
+
if(span->children_count > 0) {
|
58
|
+
for(i = 0; i < span->children_count; i++)
|
59
|
+
free_span(span->children[i]);
|
60
|
+
|
61
|
+
free(span->children);
|
62
|
+
}
|
63
|
+
|
64
|
+
free(span);
|
65
|
+
}
|
66
|
+
|
67
|
+
int duration_of(Span *span) {
|
68
|
+
return (int)(span->finished_at - span->started_at);
|
69
|
+
}
|
70
|
+
|
71
|
+
VALUE span_to_ruby_hash(Span *span) {
|
72
|
+
VALUE hash = rb_hash_new();
|
73
|
+
|
74
|
+
rb_hash_aset(hash, rb_str_new2("receiver"), span->receiver);
|
75
|
+
rb_hash_aset(hash, rb_str_new2("defined_class"), span->klass);
|
76
|
+
rb_hash_aset(hash, rb_str_new2("self_class"), span->self_klass);
|
77
|
+
rb_hash_aset(hash, rb_str_new2("method_name"), span->method);
|
78
|
+
rb_hash_aset(hash, rb_str_new2("singleton"), span->singleton);
|
79
|
+
rb_hash_aset(hash, rb_str_new2("duration"), INT2FIX(duration_of(span)));
|
80
|
+
rb_hash_aset(hash, rb_str_new2("spans"), to_ruby_array(span->children_count, span->children));
|
81
|
+
|
82
|
+
if(span->exception != Qundef)
|
83
|
+
rb_hash_aset(hash, rb_str_new2("exception"), span->exception);
|
84
|
+
|
85
|
+
if(span->return_value != Qundef)
|
86
|
+
rb_hash_aset(hash, rb_str_new2("return_value"), rb_funcall(span->return_value, rb_intern("st_name"), 0));
|
87
|
+
|
88
|
+
if(span->arguments != Qundef) {
|
89
|
+
rb_gc_unregister_address(&span->arguments);
|
90
|
+
|
91
|
+
rb_hash_aset(hash, rb_str_new2("arguments"), span->arguments);
|
92
|
+
}
|
93
|
+
|
94
|
+
return hash;
|
95
|
+
}
|
96
|
+
|
97
|
+
VALUE to_ruby_array(int count, Span **spans) {
|
98
|
+
int i;
|
99
|
+
VALUE children = rb_ary_new();
|
100
|
+
|
101
|
+
for(i = 0; i < count; i++) {
|
102
|
+
rb_ary_push(children, span_to_ruby_hash(spans[i]));
|
103
|
+
}
|
104
|
+
|
105
|
+
return children;
|
106
|
+
}
|
@@ -0,0 +1,9 @@
|
|
1
|
+
#include <ruby.h>
|
2
|
+
#include <types/event.h>
|
3
|
+
#include <types/span.h>
|
4
|
+
|
5
|
+
Span *create_span(Event *event);
|
6
|
+
Span *add_child(Span *parent, Span *child);
|
7
|
+
Span *close_span(Span *span, Event *event);
|
8
|
+
void free_span(Span *span);
|
9
|
+
VALUE to_ruby_array(int count, Span **span);
|
@@ -0,0 +1,54 @@
|
|
1
|
+
#include <ruby.h>
|
2
|
+
#include <ruby/debug.h>
|
3
|
+
|
4
|
+
#include "sidecar.h"
|
5
|
+
#include "event_producer.h"
|
6
|
+
#include "event_store.h"
|
7
|
+
#include "current_trace.h"
|
8
|
+
#include "trace.h"
|
9
|
+
#include "configuration.h"
|
10
|
+
|
11
|
+
static rb_event_flag_t traced_events() {
|
12
|
+
VALUE main_module = rb_const_get(rb_cObject, rb_intern("StackTrace"));
|
13
|
+
|
14
|
+
rb_event_flag_t events = 0;
|
15
|
+
|
16
|
+
VALUE configuration = rb_funcall(main_module, rb_intern("configuration"), 0);
|
17
|
+
VALUE trace_ruby = rb_funcall(configuration, rb_intern("trace_ruby"), 0);
|
18
|
+
VALUE trace_c = rb_funcall(configuration, rb_intern("trace_c"), 0);
|
19
|
+
|
20
|
+
if(RTEST(trace_ruby)) events |= RUBY_EVENT_CALL | RUBY_EVENT_RETURN;
|
21
|
+
if(RTEST(trace_c)) events |= RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN;
|
22
|
+
if(events != 0) events |= RUBY_EVENT_RAISE;
|
23
|
+
|
24
|
+
return events;
|
25
|
+
}
|
26
|
+
|
27
|
+
VALUE rb_trace_point(VALUE self) {
|
28
|
+
VALUE tracePoint = rb_iv_get(self, "@trace_point");
|
29
|
+
|
30
|
+
if(NIL_P(tracePoint)) {
|
31
|
+
tracePoint = rb_tracepoint_new(Qnil, traced_events(), create_event, NULL);
|
32
|
+
rb_iv_set(self, "@trace_point", tracePoint);
|
33
|
+
}
|
34
|
+
|
35
|
+
return tracePoint;
|
36
|
+
}
|
37
|
+
|
38
|
+
void Init_stack_trace() {
|
39
|
+
rb_ext_ractor_safe(true);
|
40
|
+
|
41
|
+
VALUE main_module = rb_const_get(rb_cObject, rb_intern("StackTrace"));
|
42
|
+
VALUE configuration_class = rb_const_get(main_module, rb_intern("Configuration"));
|
43
|
+
|
44
|
+
rb_define_singleton_method(main_module, "trace_point", rb_trace_point, 0);
|
45
|
+
rb_define_singleton_method(main_module, "start_trace", rb_create_trace, 0);
|
46
|
+
rb_define_singleton_method(main_module, "complete_trace", rb_send_eot, 0);
|
47
|
+
rb_define_singleton_method(main_module, "current", rb_get_current_trace, 0);
|
48
|
+
|
49
|
+
rb_define_method(configuration_class, "inspect_return_values=", rb_set_inspect_return_values, 1);
|
50
|
+
rb_define_method(configuration_class, "inspect_arguments=", rb_set_inspect_arguments, 1);
|
51
|
+
|
52
|
+
Init_event_store();
|
53
|
+
Init_sidecar();
|
54
|
+
}
|
@@ -0,0 +1,132 @@
|
|
1
|
+
#include <ruby.h>
|
2
|
+
#include <pthread.h>
|
3
|
+
#include <ruby/debug.h>
|
4
|
+
#include <ruby/thread.h>
|
5
|
+
#include <stdbool.h>
|
6
|
+
|
7
|
+
#include "types/trace.h"
|
8
|
+
#include "span.h"
|
9
|
+
#include "debug.h"
|
10
|
+
#include "current_trace.h"
|
11
|
+
|
12
|
+
static VALUE check_proc;
|
13
|
+
|
14
|
+
pthread_cond_t trace_finished = PTHREAD_COND_INITIALIZER;
|
15
|
+
pthread_mutex_t trace_access_mutex = PTHREAD_MUTEX_INITIALIZER;
|
16
|
+
|
17
|
+
void free_trace(Trace *trace) {
|
18
|
+
free_span(trace->top_span);
|
19
|
+
free(trace);
|
20
|
+
}
|
21
|
+
|
22
|
+
void process_obsolote_event(Event *event) {
|
23
|
+
// Free this trace as there is no reference to it anymore!
|
24
|
+
free_trace(event->trace);
|
25
|
+
}
|
26
|
+
|
27
|
+
void set_check_proc(VALUE proc) {
|
28
|
+
check_proc = proc;
|
29
|
+
}
|
30
|
+
|
31
|
+
bool is_tracked_event(Event *event) {
|
32
|
+
if(!RTEST(check_proc)) return true; // Check proc is not configured, all the events will be tracked.
|
33
|
+
|
34
|
+
VALUE result = rb_funcall(check_proc, rb_intern("call"), 2, event->self_klass, event->method);
|
35
|
+
|
36
|
+
return RTEST(result);
|
37
|
+
}
|
38
|
+
|
39
|
+
void create_new_span(Event *event) {
|
40
|
+
if(!is_tracked_event(event)) return;
|
41
|
+
|
42
|
+
Span *new_span = create_span(event);
|
43
|
+
|
44
|
+
add_child(event->trace->current_span, new_span);
|
45
|
+
|
46
|
+
event->trace->current_span = new_span;
|
47
|
+
}
|
48
|
+
|
49
|
+
void close_current_span(Event *event) {
|
50
|
+
if(!is_tracked_event(event)) return;
|
51
|
+
|
52
|
+
Trace *trace = event->trace;
|
53
|
+
|
54
|
+
trace->current_span = close_span(trace->current_span, event);
|
55
|
+
}
|
56
|
+
|
57
|
+
void attach_exception(Event *event) {
|
58
|
+
Trace *trace = event->trace;
|
59
|
+
|
60
|
+
trace->current_span->exception = event->raised_exception;
|
61
|
+
}
|
62
|
+
|
63
|
+
void close_current_trace(Event *event) {
|
64
|
+
pthread_mutex_lock(&trace_access_mutex);
|
65
|
+
event->trace->finished = true;
|
66
|
+
pthread_cond_broadcast(&trace_finished);
|
67
|
+
pthread_mutex_unlock(&trace_access_mutex);
|
68
|
+
}
|
69
|
+
|
70
|
+
void process_event(Event *event) {
|
71
|
+
DEBUG("Event received: ", event);
|
72
|
+
|
73
|
+
switch (event->event) {
|
74
|
+
case RUBY_EVENT_CALL:
|
75
|
+
case RUBY_EVENT_C_CALL:
|
76
|
+
case RUBY_EVENT_B_CALL:
|
77
|
+
return create_new_span(event);
|
78
|
+
case RUBY_EVENT_RETURN:
|
79
|
+
case RUBY_EVENT_C_RETURN:
|
80
|
+
case RUBY_EVENT_B_RETURN:
|
81
|
+
return close_current_span(event);
|
82
|
+
case RUBY_EVENT_RAISE:
|
83
|
+
return attach_exception(event);
|
84
|
+
case END_OF_TRACE:
|
85
|
+
return close_current_trace(event);
|
86
|
+
case END_OF_OBSOLOTE_TRACE_EVENT:
|
87
|
+
return process_obsolote_event(event);
|
88
|
+
}
|
89
|
+
}
|
90
|
+
|
91
|
+
// Ruby threads are preemptive(from the kernel and Ruby programmer POV) but forced to be
|
92
|
+
// cooperative by the VM, therefore, if we don't yield back, other threads won't have chance to run.
|
93
|
+
// For this reason, we need to call this function with `rb_thread_call_without_gvl` to release the GVL
|
94
|
+
// while waiting for the trace to be ready without blocking other Ruby programmer-level threads.
|
95
|
+
void ensure_trace_is_finished() {
|
96
|
+
pthread_mutex_lock(&trace_access_mutex);
|
97
|
+
|
98
|
+
while(!get_current_trace()->finished) { // This is the easiest way to wait for the related trace, not the most efficient one though!
|
99
|
+
DEBUG_TEXT("Waiting for the trace to be ready...");
|
100
|
+
|
101
|
+
pthread_cond_wait(&trace_finished, &trace_access_mutex);
|
102
|
+
}
|
103
|
+
|
104
|
+
pthread_mutex_unlock(&trace_access_mutex);
|
105
|
+
}
|
106
|
+
|
107
|
+
Trace *get_current_trace_without_gvl() {
|
108
|
+
rb_thread_call_without_gvl((void *)ensure_trace_is_finished, NULL, NULL, NULL);
|
109
|
+
|
110
|
+
return get_current_trace();
|
111
|
+
}
|
112
|
+
|
113
|
+
VALUE to_ruby_hash(Trace *trace) {
|
114
|
+
VALUE hash = rb_hash_new();
|
115
|
+
|
116
|
+
rb_hash_aset(hash, rb_str_new2("spans"), to_ruby_array(trace->top_span->children_count, trace->top_span->children));
|
117
|
+
|
118
|
+
return hash;
|
119
|
+
}
|
120
|
+
|
121
|
+
VALUE rb_get_current_trace(VALUE _self) {
|
122
|
+
VALUE main_module = rb_const_get(rb_cObject, rb_intern("StackTrace"));
|
123
|
+
VALUE tracePoint = rb_iv_get(main_module, "@trace_point");
|
124
|
+
VALUE is_tracepoint_enabled = rb_funcall(tracePoint, rb_intern("enabled?"), 0);
|
125
|
+
|
126
|
+
if(RTEST(is_tracepoint_enabled)) rb_raise(rb_eRuntimeError, "Trace is active!");
|
127
|
+
|
128
|
+
Trace *trace = get_current_trace_without_gvl();
|
129
|
+
VALUE ruby_hash = to_ruby_hash(trace);
|
130
|
+
|
131
|
+
return ruby_hash;
|
132
|
+
}
|
@@ -0,0 +1,31 @@
|
|
1
|
+
#include <ruby.h>
|
2
|
+
#include <ruby/debug.h>
|
3
|
+
#include <types/trace.h>
|
4
|
+
#include <stdbool.h>
|
5
|
+
#include <sys/time.h>
|
6
|
+
|
7
|
+
#ifndef EVENT_H
|
8
|
+
#define EVENT_H
|
9
|
+
|
10
|
+
#define END_OF_TRACE 0xfffffff0
|
11
|
+
#define END_OF_OBSOLOTE_TRACE_EVENT 0xffffffff
|
12
|
+
#define NOOP_EVENT 0xfffffff1
|
13
|
+
|
14
|
+
typedef struct EventS Event;
|
15
|
+
|
16
|
+
struct EventS {
|
17
|
+
Trace *trace;
|
18
|
+
VALUE tp_val;
|
19
|
+
rb_event_flag_t event;
|
20
|
+
rb_trace_arg_t *trace_arg;
|
21
|
+
VALUE klass;
|
22
|
+
VALUE self_klass;
|
23
|
+
VALUE receiver;
|
24
|
+
VALUE method;
|
25
|
+
VALUE raised_exception;
|
26
|
+
VALUE return_value;
|
27
|
+
VALUE arguments;
|
28
|
+
bool for_singleton;
|
29
|
+
long int at;
|
30
|
+
};
|
31
|
+
#endif
|
@@ -0,0 +1,22 @@
|
|
1
|
+
#ifndef SPAN_H
|
2
|
+
#define SPAN_H
|
3
|
+
|
4
|
+
typedef struct SpanS Span;
|
5
|
+
|
6
|
+
struct SpanS {
|
7
|
+
long int started_at;
|
8
|
+
long int finished_at;
|
9
|
+
|
10
|
+
VALUE klass;
|
11
|
+
VALUE self_klass;
|
12
|
+
VALUE receiver;
|
13
|
+
VALUE method;
|
14
|
+
VALUE singleton;
|
15
|
+
VALUE exception;
|
16
|
+
VALUE return_value;
|
17
|
+
VALUE arguments;
|
18
|
+
Span *caller;
|
19
|
+
int children_count;
|
20
|
+
Span **children;
|
21
|
+
};
|
22
|
+
#endif
|
@@ -0,0 +1 @@
|
|
1
|
+
long int get_monotonic_m_secs(void);
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module StackTrace
|
4
|
+
class ArgumentExtractor
|
5
|
+
class << self
|
6
|
+
def extract(trace_point)
|
7
|
+
trace_point.parameters
|
8
|
+
.map(&:last)
|
9
|
+
.each_with_object({}) do |parameter, memo|
|
10
|
+
memo[parameter] = extract_argument(trace_point, parameter)
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
private
|
15
|
+
|
16
|
+
def extract_argument(trace_point, parameter)
|
17
|
+
trace_point.binding.eval(parameter.to_s).st_name
|
18
|
+
rescue Exception # SyntaxError can happen as we are calling `eval` here!
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -1,62 +1,7 @@
|
|
1
|
-
#
|
2
|
-
|
3
|
-
require "objspace"
|
1
|
+
# frozen_string_literal: true
|
4
2
|
|
5
3
|
module StackTrace
|
6
4
|
class Configuration
|
7
|
-
|
8
|
-
enabled: false,
|
9
|
-
modules: {},
|
10
|
-
}
|
11
|
-
|
12
|
-
attr_writer *CONFIG_ATTRIBUTES.keys
|
13
|
-
|
14
|
-
CONFIG_ATTRIBUTES.each do |attr_name, default_value|
|
15
|
-
define_method(attr_name) do
|
16
|
-
instance_variable_get("@#{attr_name}") || default_value
|
17
|
-
end
|
18
|
-
end
|
19
|
-
|
20
|
-
def for(klass)
|
21
|
-
config_holder = config_holder_for(klass)
|
22
|
-
modules.find { |module_name_conf, _| config_for_class?(module_name_conf, config_holder) }
|
23
|
-
end
|
24
|
-
|
25
|
-
private
|
26
|
-
|
27
|
-
# Configuration for StackTrace is done by specifying the class/module itself
|
28
|
-
# so if the klass we receive here is a singleton_class, we should get the
|
29
|
-
# class/module of that singleton_class first.
|
30
|
-
def config_holder_for(klass)
|
31
|
-
klass.singleton_class? ? ObjectSpace.each_object(klass).first : klass
|
32
|
-
end
|
33
|
-
|
34
|
-
def config_for_class?(config, klass)
|
35
|
-
case config
|
36
|
-
when Regexp
|
37
|
-
klass.respond_to?(:name) && klass.name =~ config
|
38
|
-
when Hash
|
39
|
-
match_hash_config(config, klass)
|
40
|
-
else
|
41
|
-
[config].flatten.include?(klass)
|
42
|
-
end
|
43
|
-
end
|
44
|
-
|
45
|
-
def match_hash_config(config, klass)
|
46
|
-
inherits_config?(klass, config) || path_config?(klass, config)
|
47
|
-
end
|
48
|
-
|
49
|
-
def inherits_config?(klass, inherits: nil, **)
|
50
|
-
inherits &&
|
51
|
-
klass.ancestors.include?(inherits) &&
|
52
|
-
klass != inherits
|
53
|
-
end
|
54
|
-
|
55
|
-
def path_config?(klass, path: nil, **)
|
56
|
-
path &&
|
57
|
-
klass.respond_to?(:stack_trace_source_location) &&
|
58
|
-
klass.stack_trace_source_location &&
|
59
|
-
klass.stack_trace_source_location.match(path)
|
60
|
-
end
|
5
|
+
attr_accessor :trace_ruby, :trace_c, :check_proc
|
61
6
|
end
|
62
7
|
end
|