ruby-prof 1.2.0 → 1.3.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -18,7 +18,7 @@ typedef enum
18
18
  MEASURE_MEMORY
19
19
  } prof_measure_mode_t;
20
20
 
21
- typedef struct
21
+ typedef struct prof_measurer_t
22
22
  {
23
23
  get_measurement measure;
24
24
  prof_measure_mode_t mode;
@@ -138,7 +138,7 @@ prof_method_t* prof_get_method(VALUE self)
138
138
  {
139
139
  /* Can't use Data_Get_Struct because that triggers the event hook
140
140
  ending up in endless recursion. */
141
- prof_method_t* result = DATA_PTR(self);
141
+ prof_method_t* result = RTYPEDDATA_DATA(self);
142
142
 
143
143
  if (!result)
144
144
  rb_raise(rb_eRuntimeError, "This RubyProf::MethodInfo instance has already been freed, likely because its profile has been freed.");
@@ -146,9 +146,11 @@ prof_method_t* prof_get_method(VALUE self)
146
146
  return result;
147
147
  }
148
148
 
149
- prof_method_t* prof_method_create(VALUE klass, VALUE msym, VALUE source_file, int source_line)
149
+ prof_method_t* prof_method_create(VALUE profile, VALUE klass, VALUE msym, VALUE source_file, int source_line)
150
150
  {
151
151
  prof_method_t* result = ALLOC(prof_method_t);
152
+ result->profile = profile;
153
+
152
154
  result->key = method_key(klass, msym);
153
155
  result->klass_flags = 0;
154
156
 
@@ -181,8 +183,11 @@ prof_method_t* prof_method_create(VALUE klass, VALUE msym, VALUE source_file, in
181
183
  out our Ruby object reference.*/
182
184
  static void prof_method_ruby_gc_free(void* data)
183
185
  {
184
- prof_method_t* method = (prof_method_t*)data;
185
- method->object = Qnil;
186
+ if (data)
187
+ {
188
+ prof_method_t* method = (prof_method_t*)data;
189
+ method->object = Qnil;
190
+ }
186
191
  }
187
192
 
188
193
  static void prof_method_free(prof_method_t* method)
@@ -191,9 +196,7 @@ static void prof_method_free(prof_method_t* method)
191
196
  yes clean it up so to avoid a segmentation fault. */
192
197
  if (method->object != Qnil)
193
198
  {
194
- RDATA(method->object)->dmark = NULL;
195
- RDATA(method->object)->dfree = NULL;
196
- RDATA(method->object)->data = NULL;
199
+ RTYPEDDATA(method->object)->data = NULL;
197
200
  method->object = Qnil;
198
201
  }
199
202
 
@@ -210,8 +213,13 @@ size_t prof_method_size(const void* data)
210
213
 
211
214
  void prof_method_mark(void* data)
212
215
  {
216
+ if (!data) return;
217
+
213
218
  prof_method_t* method = (prof_method_t*)data;
214
219
 
220
+ if (method->profile != Qnil)
221
+ rb_gc_mark(method->profile);
222
+
215
223
  if (method->object != Qnil)
216
224
  rb_gc_mark(method->object);
217
225
 
@@ -229,16 +237,29 @@ void prof_method_mark(void* data)
229
237
 
230
238
  static VALUE prof_method_allocate(VALUE klass)
231
239
  {
232
- prof_method_t* method_data = prof_method_create(Qnil, Qnil, Qnil, 0);
240
+ prof_method_t* method_data = prof_method_create(Qnil, Qnil, Qnil, Qnil, 0);
233
241
  method_data->object = prof_method_wrap(method_data);
234
242
  return method_data->object;
235
243
  }
236
244
 
245
+ static const rb_data_type_t method_info_type =
246
+ {
247
+ .wrap_struct_name = "MethodInfo",
248
+ .function =
249
+ {
250
+ .dmark = prof_method_mark,
251
+ .dfree = prof_method_ruby_gc_free,
252
+ .dsize = prof_method_size,
253
+ },
254
+ .data = NULL,
255
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY
256
+ };
257
+
237
258
  VALUE prof_method_wrap(prof_method_t* method)
238
259
  {
239
260
  if (method->object == Qnil)
240
261
  {
241
- method->object = Data_Wrap_Struct(cRpMethodInfo, prof_method_mark, prof_method_ruby_gc_free, method);
262
+ method->object = TypedData_Wrap_Struct(cRpMethodInfo, &method_info_type, method);
242
263
  }
243
264
  return method->object;
244
265
  }
@@ -392,7 +413,7 @@ static VALUE prof_method_call_trees(VALUE self)
392
413
  /* :nodoc: */
393
414
  static VALUE prof_method_dump(VALUE self)
394
415
  {
395
- prof_method_t* method_data = DATA_PTR(self);
416
+ prof_method_t* method_data = prof_get_method(self);
396
417
  VALUE result = rb_hash_new();
397
418
 
398
419
  rb_hash_aset(result, ID2SYM(rb_intern("klass_name")), prof_method_klass_name(self));
@@ -414,7 +435,7 @@ static VALUE prof_method_dump(VALUE self)
414
435
  /* :nodoc: */
415
436
  static VALUE prof_method_load(VALUE self, VALUE data)
416
437
  {
417
- prof_method_t* method_data = RDATA(self)->data;
438
+ prof_method_t* method_data = prof_get_method(self);
418
439
  method_data->object = self;
419
440
 
420
441
  method_data->klass_name = rb_hash_aref(data, ID2SYM(rb_intern("klass_name")));
@@ -447,7 +468,7 @@ static VALUE prof_method_load(VALUE self, VALUE data)
447
468
  void rp_init_method_info()
448
469
  {
449
470
  /* MethodInfo */
450
- cRpMethodInfo = rb_define_class_under(mProf, "MethodInfo", rb_cData);
471
+ cRpMethodInfo = rb_define_class_under(mProf, "MethodInfo", rb_cObject);
451
472
  rb_undef_method(CLASS_OF(cRpMethodInfo), "new");
452
473
  rb_define_alloc_func(cRpMethodInfo, prof_method_allocate);
453
474
 
@@ -9,38 +9,37 @@
9
9
 
10
10
  extern VALUE cRpMethodInfo;
11
11
 
12
- /* Source relation bit offsets. */
12
+ // Source relation bit offsets.
13
13
  enum {
14
- kModuleIncludee = 0x1, /* Included in module */
15
- kClassSingleton = 0x2, /* Singleton of a class */
16
- kModuleSingleton = 0x4, /* Singleton of a module */
17
- kObjectSingleton = 0x8, /* Singleton of an object */
18
- kOtherSingleton = 0x10 /* Singleton of unkown object */
14
+ kModuleIncludee = 0x1, // Included in module
15
+ kClassSingleton = 0x2, // Singleton of a class
16
+ kModuleSingleton = 0x4, // Singleton of a module
17
+ kObjectSingleton = 0x8, // Singleton of an object
18
+ kOtherSingleton = 0x10 // Singleton of unkown object
19
19
  };
20
20
 
21
- /* Profiling information for each method. */
22
- /* Excluded methods have no call_trees, source_klass, or source_file. */
23
- typedef struct
21
+ // Profiling information for each method.
22
+ // Excluded methods have no call_trees, source_klass, or source_file.
23
+ typedef struct prof_method_t
24
24
  {
25
- st_data_t key; /* Table key */
25
+ VALUE profile; // Profile this method is associated with - needed for mark phase
26
+ struct prof_call_trees_t* call_trees; // Call infos that call this method
27
+ st_table* allocations_table; // Tracks object allocations
26
28
 
27
- int visits; /* Current visits on the stack */
29
+ st_data_t key; // Table key
30
+ unsigned int klass_flags; // Information about the type of class
31
+ VALUE klass; // Resolved klass
32
+ VALUE klass_name; // Resolved klass name for this method
33
+ VALUE method_name; // Resolved method name for this method
28
34
 
29
- struct prof_call_trees_t* call_trees; /* Call infos that call this method */
30
- st_table* allocations_table; /* Tracks object allocations */
31
-
32
- unsigned int klass_flags; /* Information about the type of class */
33
- VALUE klass; /* Resolved klass */
34
- VALUE klass_name; /* Resolved klass name for this method */
35
- VALUE method_name; /* Resolved method name for this method */
36
-
37
- VALUE object; /* Cached ruby object */
35
+ VALUE object; // Cached ruby object
38
36
 
39
37
  bool recursive;
40
- VALUE source_file; /* Source file */
41
- int source_line; /* Line number */
38
+ int visits; // Current visits on the stack
39
+ VALUE source_file; // Source file
40
+ int source_line; // Line number
42
41
 
43
- prof_measurement_t* measurement;
42
+ prof_measurement_t* measurement; // Stores measurement data for this method
44
43
  } prof_method_t;
45
44
 
46
45
  void rp_init_method_info(void);
@@ -51,7 +50,7 @@ st_table* method_table_create(void);
51
50
  prof_method_t* method_table_lookup(st_table* table, st_data_t key);
52
51
  size_t method_table_insert(st_table* table, st_data_t key, prof_method_t* val);
53
52
  void method_table_free(st_table* table);
54
- prof_method_t* prof_method_create(VALUE klass, VALUE msym, VALUE source_file, int source_line);
53
+ prof_method_t* prof_method_create(VALUE profile, VALUE klass, VALUE msym, VALUE source_file, int source_line);
55
54
  prof_method_t* prof_get_method(VALUE self);
56
55
 
57
56
  VALUE prof_method_wrap(prof_method_t* result);
@@ -109,15 +109,17 @@ static int excludes_method(st_data_t key, prof_profile_t* profile)
109
109
  method_table_lookup(profile->exclude_methods_tbl, key) != NULL);
110
110
  }
111
111
 
112
- static prof_method_t* create_method(prof_profile_t* profile, st_data_t key, VALUE klass, VALUE msym, VALUE source_file, int source_line)
112
+ static prof_method_t* create_method(VALUE profile, st_data_t key, VALUE klass, VALUE msym, VALUE source_file, int source_line)
113
113
  {
114
- prof_method_t* result = prof_method_create(klass, msym, source_file, source_line);
115
- method_table_insert(profile->last_thread_data->method_table, result->key, result);
114
+ prof_method_t* result = prof_method_create(profile, klass, msym, source_file, source_line);
115
+
116
+ prof_profile_t* profile_t = prof_get_profile(profile);
117
+ method_table_insert(profile_t->last_thread_data->method_table, result->key, result);
116
118
 
117
119
  return result;
118
120
  }
119
121
 
120
- static prof_method_t* check_parent_method(prof_profile_t* profile, thread_data_t* thread_data)
122
+ static prof_method_t* check_parent_method(VALUE profile, thread_data_t* thread_data)
121
123
  {
122
124
  VALUE msym = ID2SYM(rb_intern("_inserted_parent_"));
123
125
  st_data_t key = method_key(cProfile, msym);
@@ -132,7 +134,7 @@ static prof_method_t* check_parent_method(prof_profile_t* profile, thread_data_t
132
134
  return result;
133
135
  }
134
136
 
135
- prof_method_t* check_method(prof_profile_t* profile, rb_trace_arg_t* trace_arg, rb_event_flag_t event, thread_data_t* thread_data)
137
+ prof_method_t* check_method(VALUE profile, rb_trace_arg_t* trace_arg, rb_event_flag_t event, thread_data_t* thread_data)
136
138
  {
137
139
  VALUE klass = rb_tracearg_defined_class(trace_arg);
138
140
 
@@ -150,7 +152,8 @@ prof_method_t* check_method(prof_profile_t* profile, rb_trace_arg_t* trace_arg,
150
152
 
151
153
  st_data_t key = method_key(klass, msym);
152
154
 
153
- if (excludes_method(key, profile))
155
+ prof_profile_t* profile_t = prof_get_profile(profile);
156
+ if (excludes_method(key, profile_t))
154
157
  return NULL;
155
158
 
156
159
  prof_method_t* result = method_table_lookup(thread_data->method_table, key);
@@ -208,15 +211,17 @@ static void prof_trace(prof_profile_t* profile, rb_trace_arg_t* trace_arg, doubl
208
211
 
209
212
  static void prof_event_hook(VALUE trace_point, void* data)
210
213
  {
211
- prof_profile_t* profile = (prof_profile_t*)data;
214
+ VALUE profile = (VALUE)data;
215
+ prof_profile_t* profile_t = prof_get_profile(profile);
216
+
212
217
  rb_trace_arg_t* trace_arg = rb_tracearg_from_tracepoint(trace_point);
213
- double measurement = prof_measure(profile->measurer, trace_arg);
218
+ double measurement = prof_measure(profile_t->measurer, trace_arg);
214
219
  rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
215
220
  VALUE self = rb_tracearg_self(trace_arg);
216
221
 
217
222
  if (trace_file != NULL)
218
223
  {
219
- prof_trace(profile, trace_arg, measurement);
224
+ prof_trace(profile_t, trace_arg, measurement);
220
225
  }
221
226
 
222
227
  /* Special case - skip any methods from the mProf
@@ -224,7 +229,7 @@ static void prof_event_hook(VALUE trace_point, void* data)
224
229
  if (self == mProf)
225
230
  return;
226
231
 
227
- thread_data_t* thread_data = check_fiber(profile, measurement);
232
+ thread_data_t* thread_data = check_fiber(profile_t, measurement);
228
233
 
229
234
  if (!thread_data->trace)
230
235
  return;
@@ -252,7 +257,7 @@ static void prof_event_hook(VALUE trace_point, void* data)
252
257
  }
253
258
  else
254
259
  {
255
- frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile->paused));
260
+ frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile_t->paused));
256
261
  }
257
262
 
258
263
  thread_data->call_tree = call_tree;
@@ -306,7 +311,7 @@ static void prof_event_hook(VALUE trace_point, void* data)
306
311
  thread_data->call_tree = call_tree;
307
312
 
308
313
  // Push a new frame onto the stack for a new c-call or ruby call (into a method)
309
- prof_frame_t* next_frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile->paused));
314
+ prof_frame_t* next_frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile_t->paused));
310
315
  next_frame->source_file = method->source_file;
311
316
  next_frame->source_line = method->source_line;
312
317
  break;
@@ -347,12 +352,12 @@ void prof_install_hook(VALUE self)
347
352
  RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
348
353
  RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN |
349
354
  RUBY_EVENT_LINE,
350
- prof_event_hook, profile);
355
+ prof_event_hook, (void*)self);
351
356
  rb_ary_push(profile->tracepoints, event_tracepoint);
352
357
 
353
358
  if (profile->measurer->track_allocations)
354
359
  {
355
- VALUE allocation_tracepoint = rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_NEWOBJ, prof_event_hook, profile);
360
+ VALUE allocation_tracepoint = rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_NEWOBJ, prof_event_hook, (void*)self);
356
361
  rb_ary_push(profile->tracepoints, allocation_tracepoint);
357
362
  }
358
363
 
@@ -377,7 +382,7 @@ prof_profile_t* prof_get_profile(VALUE self)
377
382
  {
378
383
  /* Can't use Data_Get_Struct because that triggers the event hook
379
384
  ending up in endless recursion. */
380
- return DATA_PTR(self);
385
+ return RTYPEDDATA_DATA(self);
381
386
  }
382
387
 
383
388
  static int collect_threads(st_data_t key, st_data_t value, st_data_t result)
@@ -399,19 +404,19 @@ static int mark_threads(st_data_t key, st_data_t value, st_data_t result)
399
404
  return ST_CONTINUE;
400
405
  }
401
406
 
402
- static int mark_methods(st_data_t key, st_data_t value, st_data_t result)
407
+ static int prof_profile_mark_methods(st_data_t key, st_data_t value, st_data_t result)
403
408
  {
404
409
  prof_method_t* method = (prof_method_t*)value;
405
410
  prof_method_mark(method);
406
411
  return ST_CONTINUE;
407
412
  }
408
413
 
409
- static void prof_mark(prof_profile_t* profile)
414
+ static void prof_profile_mark(void* data)
410
415
  {
416
+ prof_profile_t* profile = (prof_profile_t*)data;
411
417
  rb_gc_mark(profile->tracepoints);
412
418
  rb_gc_mark(profile->running);
413
419
  rb_gc_mark(profile->paused);
414
- rb_gc_mark(profile->tracepoints);
415
420
 
416
421
  // If GC stress is true (useful for debugging), when threads_table_create is called in the
417
422
  // allocate method Ruby will immediately call this mark method. Thus the threads_tbl will be NULL.
@@ -419,14 +424,14 @@ static void prof_mark(prof_profile_t* profile)
419
424
  rb_st_foreach(profile->threads_tbl, mark_threads, 0);
420
425
 
421
426
  if (profile->exclude_methods_tbl)
422
- rb_st_foreach(profile->exclude_methods_tbl, mark_methods, 0);
427
+ rb_st_foreach(profile->exclude_methods_tbl, prof_profile_mark_methods, 0);
423
428
  }
424
429
 
425
- /* Freeing the profile creates a cascade of freeing.
426
- It fress the thread table, which frees its methods,
427
- which frees its call infos. */
428
- static void prof_free(prof_profile_t* profile)
430
+ /* Freeing the profile creates a cascade of freeing. It frees its threads table, which frees
431
+ each thread and its associated call treee and methods. */
432
+ static void prof_profile_ruby_gc_free(void* data)
429
433
  {
434
+ prof_profile_t* profile = (prof_profile_t*)data;
430
435
  profile->last_thread_data = NULL;
431
436
 
432
437
  threads_table_free(profile->threads_tbl);
@@ -454,11 +459,29 @@ static void prof_free(prof_profile_t* profile)
454
459
  xfree(profile);
455
460
  }
456
461
 
462
+ size_t prof_profile_size(const void* data)
463
+ {
464
+ return sizeof(prof_profile_t);
465
+ }
466
+
467
+ static const rb_data_type_t profile_type =
468
+ {
469
+ .wrap_struct_name = "Profile",
470
+ .function =
471
+ {
472
+ .dmark = prof_profile_mark,
473
+ .dfree = prof_profile_ruby_gc_free,
474
+ .dsize = prof_profile_size,
475
+ },
476
+ .data = NULL,
477
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY
478
+ };
479
+
457
480
  static VALUE prof_allocate(VALUE klass)
458
481
  {
459
482
  VALUE result;
460
483
  prof_profile_t* profile;
461
- result = Data_Make_Struct(klass, prof_profile_t, prof_mark, prof_free, profile);
484
+ result = TypedData_Make_Struct(klass, prof_profile_t, &profile_type, profile);
462
485
  profile->threads_tbl = threads_table_create();
463
486
  profile->exclude_threads_tbl = NULL;
464
487
  profile->include_threads_tbl = NULL;
@@ -846,7 +869,7 @@ static VALUE prof_exclude_method(VALUE self, VALUE klass, VALUE msym)
846
869
 
847
870
  if (!method)
848
871
  {
849
- method = prof_method_create(klass, msym, Qnil, 0);
872
+ method = prof_method_create(self, klass, msym, Qnil, 0);
850
873
  method_table_insert(profile->exclude_methods_tbl, method->key, method);
851
874
  }
852
875
 
@@ -870,7 +893,7 @@ VALUE prof_profile_load(VALUE self, VALUE data)
870
893
  for (int i = 0; i < rb_array_len(threads); i++)
871
894
  {
872
895
  VALUE thread = rb_ary_entry(threads, i);
873
- thread_data_t* thread_data = DATA_PTR(thread);
896
+ thread_data_t* thread_data = prof_get_thread(thread);
874
897
  rb_st_insert(profile->threads_tbl, (st_data_t)thread_data->fiber_id, (st_data_t)thread_data);
875
898
  }
876
899
 
@@ -10,7 +10,7 @@
10
10
 
11
11
  extern VALUE cProfile;
12
12
 
13
- typedef struct
13
+ typedef struct prof_profile_t
14
14
  {
15
15
  VALUE running;
16
16
  VALUE paused;
@@ -10,7 +10,7 @@
10
10
  /* Temporary object that maintains profiling information
11
11
  for active methods. They are created and destroyed
12
12
  as the program moves up and down its stack. */
13
- typedef struct
13
+ typedef struct prof_frame_t
14
14
  {
15
15
  /* Caching prof_method_t values significantly
16
16
  increases performance. */
@@ -34,7 +34,7 @@ void prof_frame_pause(prof_frame_t*, double current_measurement);
34
34
  void prof_frame_unpause(prof_frame_t*, double current_measurement);
35
35
 
36
36
  /* Current stack of active methods.*/
37
- typedef struct
37
+ typedef struct prof_stack_t
38
38
  {
39
39
  prof_frame_t* start;
40
40
  prof_frame_t* end;
@@ -46,11 +46,14 @@ static int mark_methods(st_data_t key, st_data_t value, st_data_t result)
46
46
 
47
47
  size_t prof_thread_size(const void* data)
48
48
  {
49
- return sizeof(prof_call_tree_t);
49
+ return sizeof(thread_data_t);
50
50
  }
51
51
 
52
52
  void prof_thread_mark(void* data)
53
53
  {
54
+ if (!data)
55
+ return;
56
+
54
57
  thread_data_t* thread = (thread_data_t*)data;
55
58
 
56
59
  if (thread->object != Qnil)
@@ -75,8 +78,11 @@ void prof_thread_mark(void* data)
75
78
 
76
79
  void prof_thread_ruby_gc_free(void* data)
77
80
  {
78
- thread_data_t* thread_data = (thread_data_t*)data;
79
- thread_data->object = Qnil;
81
+ if (data)
82
+ {
83
+ thread_data_t* thread_data = (thread_data_t*)data;
84
+ thread_data->object = Qnil;
85
+ }
80
86
  }
81
87
 
82
88
  static void prof_thread_free(thread_data_t* thread_data)
@@ -85,9 +91,7 @@ static void prof_thread_free(thread_data_t* thread_data)
85
91
  yes then set its data to nil to avoid a segmentation fault on the next mark and sweep. */
86
92
  if (thread_data->object != Qnil)
87
93
  {
88
- RDATA(thread_data->object)->dmark = NULL;
89
- RDATA(thread_data->object)->dfree = NULL;
90
- RDATA(thread_data->object)->data = NULL;
94
+ RTYPEDDATA(thread_data->object)->data = NULL;
91
95
  thread_data->object = Qnil;
92
96
  }
93
97
 
@@ -101,11 +105,24 @@ static void prof_thread_free(thread_data_t* thread_data)
101
105
  xfree(thread_data);
102
106
  }
103
107
 
108
+ static const rb_data_type_t thread_type =
109
+ {
110
+ .wrap_struct_name = "ThreadInfo",
111
+ .function =
112
+ {
113
+ .dmark = prof_thread_mark,
114
+ .dfree = prof_thread_ruby_gc_free,
115
+ .dsize = prof_thread_size,
116
+ },
117
+ .data = NULL,
118
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY
119
+ };
120
+
104
121
  VALUE prof_thread_wrap(thread_data_t* thread)
105
122
  {
106
123
  if (thread->object == Qnil)
107
124
  {
108
- thread->object = Data_Wrap_Struct(cRpThread, prof_thread_mark, prof_thread_ruby_gc_free, thread);
125
+ thread->object = TypedData_Wrap_Struct(cRpThread, &thread_type, thread);
109
126
  }
110
127
  return thread->object;
111
128
  }
@@ -117,11 +134,11 @@ static VALUE prof_thread_allocate(VALUE klass)
117
134
  return thread_data->object;
118
135
  }
119
136
 
120
- static thread_data_t* prof_get_thread(VALUE self)
137
+ thread_data_t* prof_get_thread(VALUE self)
121
138
  {
122
139
  /* Can't use Data_Get_Struct because that triggers the event hook
123
140
  ending up in endless recursion. */
124
- thread_data_t* result = DATA_PTR(self);
141
+ thread_data_t* result = RTYPEDDATA_DATA(self);
125
142
  if (!result)
126
143
  rb_raise(rb_eRuntimeError, "This RubyProf::Thread instance has already been freed, likely because its profile has been freed.");
127
144
 
@@ -299,7 +316,7 @@ static VALUE prof_thread_methods(VALUE self)
299
316
  /* :nodoc: */
300
317
  static VALUE prof_thread_dump(VALUE self)
301
318
  {
302
- thread_data_t* thread_data = DATA_PTR(self);
319
+ thread_data_t* thread_data = RTYPEDDATA_DATA(self);
303
320
 
304
321
  VALUE result = rb_hash_new();
305
322
  rb_hash_aset(result, ID2SYM(rb_intern("fiber_id")), thread_data->fiber_id);
@@ -312,7 +329,7 @@ static VALUE prof_thread_dump(VALUE self)
312
329
  /* :nodoc: */
313
330
  static VALUE prof_thread_load(VALUE self, VALUE data)
314
331
  {
315
- thread_data_t* thread_data = DATA_PTR(self);
332
+ thread_data_t* thread_data = RTYPEDDATA_DATA(self);
316
333
 
317
334
  VALUE call_tree = rb_hash_aref(data, ID2SYM(rb_intern("call_tree")));
318
335
  thread_data->call_tree = prof_get_call_tree(call_tree);
@@ -323,7 +340,7 @@ static VALUE prof_thread_load(VALUE self, VALUE data)
323
340
  for (int i = 0; i < rb_array_len(methods); i++)
324
341
  {
325
342
  VALUE method = rb_ary_entry(methods, i);
326
- prof_method_t* method_data = DATA_PTR(method);
343
+ prof_method_t* method_data = RTYPEDDATA_DATA(method);
327
344
  method_table_insert(thread_data->method_table, method_data->key, method_data);
328
345
  }
329
346
 
@@ -332,7 +349,7 @@ static VALUE prof_thread_load(VALUE self, VALUE data)
332
349
 
333
350
  void rp_init_thread(void)
334
351
  {
335
- cRpThread = rb_define_class_under(mProf, "Thread", rb_cData);
352
+ cRpThread = rb_define_class_under(mProf, "Thread", rb_cObject);
336
353
  rb_undef_method(CLASS_OF(cRpThread), "new");
337
354
  rb_define_alloc_func(cRpThread, prof_thread_allocate);
338
355