railsbench 0.9.2 → 0.9.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. data/CHANGELOG +1808 -451
  2. data/GCPATCH +73 -0
  3. data/INSTALL +5 -0
  4. data/Manifest.txt +23 -13
  5. data/PROBLEMS +0 -0
  6. data/README +23 -7
  7. data/Rakefile +1 -2
  8. data/bin/railsbench +7 -1
  9. data/config/benchmarking.rb +0 -0
  10. data/config/benchmarks.rb +3 -2
  11. data/config/benchmarks.yml +0 -0
  12. data/images/empty.png +0 -0
  13. data/images/minus.png +0 -0
  14. data/images/plus.png +0 -0
  15. data/install.rb +1 -1
  16. data/latest_changes.txt +18 -0
  17. data/lib/benchmark.rb +0 -0
  18. data/lib/railsbench/benchmark.rb +576 -0
  19. data/lib/railsbench/benchmark_specs.rb +63 -63
  20. data/lib/railsbench/gc_info.rb +38 -3
  21. data/lib/railsbench/perf_info.rb +1 -1
  22. data/lib/railsbench/perf_utils.rb +202 -179
  23. data/lib/railsbench/railsbenchmark.rb +213 -55
  24. data/lib/railsbench/version.rb +9 -9
  25. data/lib/railsbench/write_headers_only.rb +15 -15
  26. data/postinstall.rb +0 -0
  27. data/ruby185gc.patch +56 -29
  28. data/ruby186gc.patch +564 -0
  29. data/ruby19gc.patch +2425 -0
  30. data/script/convert_raw_data_files +49 -49
  31. data/script/generate_benchmarks +14 -4
  32. data/script/perf_bench +12 -8
  33. data/script/perf_comp +1 -1
  34. data/script/perf_comp_gc +9 -1
  35. data/script/perf_diff +2 -2
  36. data/script/perf_diff_gc +2 -2
  37. data/script/perf_html +1 -1
  38. data/script/perf_plot +192 -75
  39. data/script/perf_plot_gc +213 -74
  40. data/script/perf_prof +29 -10
  41. data/script/perf_run +2 -2
  42. data/script/perf_run_gc +2 -2
  43. data/script/perf_table +2 -2
  44. data/script/perf_tex +1 -1
  45. data/script/perf_times +6 -6
  46. data/script/perf_times_gc +14 -2
  47. data/script/run_urls +16 -10
  48. data/setup.rb +0 -0
  49. data/test/railsbench_test.rb +0 -0
  50. data/test/test_helper.rb +2 -0
  51. metadata +77 -55
data/ruby19gc.patch ADDED
@@ -0,0 +1,2425 @@
1
+ railsbench gc patch
2
+
3
+ From: Stefan Kaes <skaes@gmx.net>
4
+
5
+
6
+ ---
7
+
8
+ gc.c | 1768 ++++++++++++++++++++++++++++++++++++++++--------------------------
9
+ 1 files changed, 1067 insertions(+), 701 deletions(-)
10
+
11
+
12
+ diff --git a/gc.c b/gc.c
13
+ index d691942..19f6a4d 100644
14
+ --- a/gc.c
15
+ +++ b/gc.c
16
+ @@ -25,8 +25,16 @@
17
+ #include <setjmp.h>
18
+ #include <sys/types.h>
19
+
20
+ +#ifdef _WIN32
21
+ +#include <string.h>
22
+ +#else
23
+ +#include <strings.h>
24
+ +#endif
25
+ +
26
+ #ifdef HAVE_SYS_TIME_H
27
+ #include <sys/time.h>
28
+ +#elif defined(_WIN32)
29
+ +#include <time.h>
30
+ #endif
31
+
32
+ #ifdef HAVE_SYS_RESOURCE_H
33
+ @@ -98,26 +106,26 @@ int ruby_gc_debug_indent = 0;
34
+
35
+ typedef struct RVALUE {
36
+ union {
37
+ - struct {
38
+ - VALUE flags; /* always 0 for freed obj */
39
+ - struct RVALUE *next;
40
+ - } free;
41
+ - struct RBasic basic;
42
+ - struct RObject object;
43
+ - struct RClass klass;
44
+ - struct RFloat flonum;
45
+ - struct RString string;
46
+ - struct RArray array;
47
+ - struct RRegexp regexp;
48
+ - struct RHash hash;
49
+ - struct RData data;
50
+ - struct RStruct rstruct;
51
+ - struct RBignum bignum;
52
+ - struct RFile file;
53
+ - struct RNode node;
54
+ - struct RMatch match;
55
+ - struct RRational rational;
56
+ - struct RComplex complex;
57
+ + struct {
58
+ + VALUE flags; /* always 0 for freed obj */
59
+ + struct RVALUE *next;
60
+ + } free;
61
+ + struct RBasic basic;
62
+ + struct RObject object;
63
+ + struct RClass klass;
64
+ + struct RFloat flonum;
65
+ + struct RString string;
66
+ + struct RArray array;
67
+ + struct RRegexp regexp;
68
+ + struct RHash hash;
69
+ + struct RData data;
70
+ + struct RStruct rstruct;
71
+ + struct RBignum bignum;
72
+ + struct RFile file;
73
+ + struct RNode node;
74
+ + struct RMatch match;
75
+ + struct RRational rational;
76
+ + struct RComplex complex;
77
+ } as;
78
+ #ifdef GC_DEBUG
79
+ char *file;
80
+ @@ -138,6 +146,29 @@ struct heaps_slot {
81
+ #define HEAP_MIN_SLOTS 10000
82
+ #define FREE_MIN 4096
83
+
84
+ +static int gc_heaps_increment = 10;
85
+ +static int gc_heap_slots = 10000;
86
+ +static int heap_min_slots = 10000;
87
+ +
88
+ +static int heap_free_min = 4096;
89
+ +static int heap_slots_increment = 10000;
90
+ +static double heap_slots_growth_factor = 1.8;
91
+ +
92
+ +static long initial_malloc_limit = GC_MALLOC_LIMIT;
93
+ +static int verbose_gc_stats = Qfalse;
94
+ +static FILE* gc_data_file = NULL;
95
+ +
96
+ +
97
+ +#if HAVE_LONG_LONG
98
+ +#define GC_TIME_TYPE LONG_LONG
99
+ +#else
100
+ +#define GC_TIME_TYPE long
101
+ +#endif
102
+ +
103
+ +static int gc_statistics = 0;
104
+ +static GC_TIME_TYPE gc_time = 0;
105
+ +static int gc_collections = 0;
106
+ +
107
+ struct gc_list {
108
+ VALUE *varptr;
109
+ struct gc_list *next;
110
+ @@ -147,34 +178,34 @@ struct gc_list {
111
+
112
+ typedef struct rb_objspace {
113
+ struct {
114
+ - size_t limit;
115
+ - size_t increase;
116
+ + size_t limit;
117
+ + size_t increase;
118
+ #if CALC_EXACT_MALLOC_SIZE
119
+ - size_t allocated_size;
120
+ - size_t allocations;
121
+ + size_t allocated_size;
122
+ + size_t allocations;
123
+ #endif
124
+ } malloc_params;
125
+ struct {
126
+ - size_t increment;
127
+ - struct heaps_slot *ptr;
128
+ - size_t length;
129
+ - size_t used;
130
+ - RVALUE *freelist;
131
+ - RVALUE *range[2];
132
+ - RVALUE *freed;
133
+ + size_t increment;
134
+ + struct heaps_slot *ptr;
135
+ + size_t length;
136
+ + size_t used;
137
+ + RVALUE *freelist;
138
+ + RVALUE *range[2];
139
+ + RVALUE *freed;
140
+ } heap;
141
+ struct {
142
+ - int dont_gc;
143
+ - int during_gc;
144
+ + int dont_gc;
145
+ + int during_gc;
146
+ } flags;
147
+ struct {
148
+ - st_table *table;
149
+ - RVALUE *deferred;
150
+ + st_table *table;
151
+ + RVALUE *deferred;
152
+ } final;
153
+ struct {
154
+ - VALUE buffer[MARK_STACK_MAX];
155
+ - VALUE *ptr;
156
+ - int overflow;
157
+ + VALUE buffer[MARK_STACK_MAX];
158
+ + VALUE *ptr;
159
+ + int overflow;
160
+ } markstack;
161
+ struct gc_list *global_list;
162
+ unsigned int count;
163
+ @@ -189,28 +220,28 @@ int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
164
+ static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}, {HEAP_MIN_SLOTS}};
165
+ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
166
+ #endif
167
+ -#define malloc_limit objspace->malloc_params.limit
168
+ -#define malloc_increase objspace->malloc_params.increase
169
+ -#define heap_slots objspace->heap.slots
170
+ -#define heaps objspace->heap.ptr
171
+ -#define heaps_length objspace->heap.length
172
+ -#define heaps_used objspace->heap.used
173
+ -#define freelist objspace->heap.freelist
174
+ -#define lomem objspace->heap.range[0]
175
+ -#define himem objspace->heap.range[1]
176
+ -#define heaps_inc objspace->heap.increment
177
+ -#define heaps_freed objspace->heap.freed
178
+ -#define dont_gc objspace->flags.dont_gc
179
+ -#define during_gc objspace->flags.during_gc
180
+ -#define finalizer_table objspace->final.table
181
+ -#define deferred_final_list objspace->final.deferred
182
+ -#define mark_stack objspace->markstack.buffer
183
+ -#define mark_stack_ptr objspace->markstack.ptr
184
+ -#define mark_stack_overflow objspace->markstack.overflow
185
+ -#define global_List objspace->global_list
186
+ -#define ruby_gc_stress objspace->gc_stress
187
+ -
188
+ -#define need_call_final (finalizer_table && finalizer_table->num_entries)
189
+ +#define malloc_limit objspace->malloc_params.limit
190
+ +#define malloc_increase objspace->malloc_params.increase
191
+ +#define heap_slots objspace->heap.slots
192
+ +#define heaps objspace->heap.ptr
193
+ +#define heaps_length objspace->heap.length
194
+ +#define heaps_used objspace->heap.used
195
+ +#define freelist objspace->heap.freelist
196
+ +#define lomem objspace->heap.range[0]
197
+ +#define himem objspace->heap.range[1]
198
+ +#define heaps_inc objspace->heap.increment
199
+ +#define heaps_freed objspace->heap.freed
200
+ +#define dont_gc objspace->flags.dont_gc
201
+ +#define during_gc objspace->flags.during_gc
202
+ +#define finalizer_table objspace->final.table
203
+ +#define deferred_final_list objspace->final.deferred
204
+ +#define mark_stack objspace->markstack.buffer
205
+ +#define mark_stack_ptr objspace->markstack.ptr
206
+ +#define mark_stack_overflow objspace->markstack.overflow
207
+ +#define global_List objspace->global_list
208
+ +#define ruby_gc_stress objspace->gc_stress
209
+ +
210
+ +#define need_call_final (finalizer_table && finalizer_table->num_entries)
211
+
212
+ #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
213
+ rb_objspace_t *
214
+ @@ -261,14 +292,14 @@ rb_memerror(void)
215
+ {
216
+ rb_thread_t *th = GET_THREAD();
217
+ if (!nomem_error ||
218
+ - (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
219
+ - fprintf(stderr, "[FATAL] failed to allocate memory\n");
220
+ - exit(EXIT_FAILURE);
221
+ + (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
222
+ + fprintf(stderr, "[FATAL] failed to allocate memory\n");
223
+ + exit(EXIT_FAILURE);
224
+ }
225
+ if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
226
+ - rb_thread_raised_clear(th);
227
+ - GET_THREAD()->errinfo = nomem_error;
228
+ - JUMP_TAG(TAG_RAISE);
229
+ + rb_thread_raised_clear(th);
230
+ + GET_THREAD()->errinfo = nomem_error;
231
+ + JUMP_TAG(TAG_RAISE);
232
+ }
233
+ rb_thread_raised_set(th, RAISED_NOMEMORY);
234
+ rb_exc_raise(nomem_error);
235
+ @@ -315,7 +346,7 @@ vm_xmalloc(rb_objspace_t *objspace, size_t size)
236
+ void *mem;
237
+
238
+ if (size < 0) {
239
+ - rb_raise(rb_eNoMemError, "negative allocation size (or too big)");
240
+ + rb_raise(rb_eNoMemError, "negative allocation size (or too big)");
241
+ }
242
+ if (size == 0) size = 1;
243
+
244
+ @@ -324,17 +355,17 @@ vm_xmalloc(rb_objspace_t *objspace, size_t size)
245
+ #endif
246
+
247
+ if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
248
+ - (malloc_increase+size) > malloc_limit) {
249
+ - garbage_collect(objspace);
250
+ + (malloc_increase+size) > malloc_limit) {
251
+ + garbage_collect(objspace);
252
+ }
253
+ RUBY_CRITICAL(mem = malloc(size));
254
+ if (!mem) {
255
+ - if (garbage_collect(objspace)) {
256
+ - RUBY_CRITICAL(mem = malloc(size));
257
+ - }
258
+ - if (!mem) {
259
+ - rb_memerror();
260
+ - }
261
+ + if (garbage_collect(objspace)) {
262
+ + RUBY_CRITICAL(mem = malloc(size));
263
+ + }
264
+ + if (!mem) {
265
+ + rb_memerror();
266
+ + }
267
+ }
268
+ malloc_increase += size;
269
+
270
+ @@ -354,7 +385,7 @@ vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
271
+ void *mem;
272
+
273
+ if (size < 0) {
274
+ - rb_raise(rb_eArgError, "negative re-allocation size");
275
+ + rb_raise(rb_eArgError, "negative re-allocation size");
276
+ }
277
+ if (!ptr) return ruby_xmalloc(size);
278
+ if (size == 0) size = 1;
279
+ @@ -368,11 +399,11 @@ vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
280
+
281
+ RUBY_CRITICAL(mem = realloc(ptr, size));
282
+ if (!mem) {
283
+ - if (garbage_collect(objspace)) {
284
+ - RUBY_CRITICAL(mem = realloc(ptr, size));
285
+ - }
286
+ - if (!mem) {
287
+ - rb_memerror();
288
+ + if (garbage_collect(objspace)) {
289
+ + RUBY_CRITICAL(mem = realloc(ptr, size));
290
+ + }
291
+ + if (!mem) {
292
+ + rb_memerror();
293
+ }
294
+ }
295
+ malloc_increase += size;
296
+ @@ -411,7 +442,7 @@ ruby_xmalloc2(size_t n, size_t size)
297
+ {
298
+ size_t len = size * n;
299
+ if (n != 0 && size != len / n) {
300
+ - rb_raise(rb_eArgError, "malloc: possible integer overflow");
301
+ + rb_raise(rb_eArgError, "malloc: possible integer overflow");
302
+ }
303
+ return vm_xmalloc(&rb_objspace, len);
304
+ }
305
+ @@ -436,7 +467,7 @@ ruby_xrealloc2(void *ptr, size_t n, size_t size)
306
+ {
307
+ size_t len = size * n;
308
+ if (n != 0 && size != len / n) {
309
+ - rb_raise(rb_eArgError, "realloc: possible integer overflow");
310
+ + rb_raise(rb_eArgError, "realloc: possible integer overflow");
311
+ }
312
+ return ruby_xrealloc(ptr, len);
313
+ }
314
+ @@ -479,7 +510,7 @@ rb_gc_enable(void)
315
+ * Disables garbage collection, returning <code>true</code> if garbage
316
+ * collection was already disabled.
317
+ *
318
+ - * GC.disable #=> false
319
+ + * GC.disable #=> false or true
320
+ * GC.disable #=> true
321
+ *
322
+ */
323
+ @@ -494,6 +525,104 @@ rb_gc_disable(void)
324
+ return old;
325
+ }
326
+
327
+ +/*
328
+ + * call-seq:
329
+ + * GC.enable_stats => true or false
330
+ + *
331
+ + * Enables garbage collection statistics, returning <code>true</code> if garbage
332
+ + * collection statistics was already enabled.
333
+ + *
334
+ + * GC.enable_stats #=> false or true
335
+ + * GC.enable_stats #=> true
336
+ + *
337
+ + */
338
+ +
339
+ +VALUE
340
+ +rb_gc_enable_stats()
341
+ +{
342
+ + int old = gc_statistics;
343
+ + gc_statistics = Qtrue;
344
+ + return old;
345
+ +}
346
+ +
347
+ +/*
348
+ + * call-seq:
349
+ + * GC.disable_stats => true or false
350
+ + *
351
+ + * Disables garbage collection statistics, returning <code>true</code> if garbage
352
+ + * collection statistics was already disabled.
353
+ + *
354
+ + * GC.disable_stats #=> false or true
355
+ + * GC.disable_stats #=> true
356
+ + *
357
+ + */
358
+ +
359
+ +VALUE
360
+ +rb_gc_disable_stats()
361
+ +{
362
+ + int old = gc_statistics;
363
+ + gc_statistics = Qfalse;
364
+ + return old;
365
+ +}
366
+ +
367
+ +/*
368
+ + * call-seq:
369
+ + * GC.clear_stats => nil
370
+ + *
371
+ + * Clears garbage collection statistics, returning nil. This resets the number
372
+ + * of collections (GC.collections) and the time used (GC.time) to 0.
373
+ + *
374
+ + * GC.clear_stats #=> nil
375
+ + *
376
+ + */
377
+ +
378
+ +VALUE
379
+ +rb_gc_clear_stats()
380
+ +{
381
+ + gc_collections = 0;
382
+ + gc_time = 0;
383
+ + return Qnil;
384
+ +}
385
+ +
386
+ +/*
387
+ + * call-seq:
388
+ + * GC.collections => Integer
389
+ + *
390
+ + * Returns the number of garbage collections performed while GC statistics collection
391
+ + * was enabled.
392
+ + *
393
+ + * GC.collections #=> 35
394
+ + *
395
+ + */
396
+ +
397
+ +VALUE
398
+ +rb_gc_collections()
399
+ +{
400
+ + return INT2NUM(gc_collections);
401
+ +}
402
+ +
403
+ +/*
404
+ + * call-seq:
405
+ + * GC.time => Integer
406
+ + *
407
+ + * Returns the time spent during garbage collection while GC statistics collection
408
+ + * was enabled (in micro seconds).
409
+ + *
410
+ + * GC.time #=> 20000
411
+ + *
412
+ + */
413
+ +
414
+ +VALUE
415
+ +rb_gc_time()
416
+ +{
417
+ +#if HAVE_LONG_LONG
418
+ + return LL2NUM(gc_time);
419
+ +#else
420
+ + return LONG2NUM(gc_time);
421
+ +#endif
422
+ +}
423
+ +
424
+ +
425
+ VALUE rb_mGC;
426
+
427
+ void
428
+ @@ -522,19 +651,19 @@ rb_gc_unregister_address(VALUE *addr)
429
+ struct gc_list *tmp = global_List;
430
+
431
+ if (tmp->varptr == addr) {
432
+ - global_List = tmp->next;
433
+ - xfree(tmp);
434
+ - return;
435
+ + global_List = tmp->next;
436
+ + xfree(tmp);
437
+ + return;
438
+ }
439
+ while (tmp->next) {
440
+ - if (tmp->next->varptr == addr) {
441
+ - struct gc_list *t = tmp->next;
442
+ + if (tmp->next->varptr == addr) {
443
+ + struct gc_list *t = tmp->next;
444
+
445
+ - tmp->next = tmp->next->next;
446
+ - xfree(t);
447
+ - break;
448
+ - }
449
+ - tmp = tmp->next;
450
+ + tmp->next = tmp->next->next;
451
+ + xfree(t);
452
+ + break;
453
+ + }
454
+ + tmp = tmp->next;
455
+ }
456
+ }
457
+
458
+ @@ -547,17 +676,17 @@ allocate_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
459
+
460
+ size = next_heaps_length*sizeof(struct heaps_slot);
461
+ RUBY_CRITICAL(
462
+ - if (heaps_used > 0) {
463
+ - p = (struct heaps_slot *)realloc(heaps, size);
464
+ - if (p) heaps = p;
465
+ - }
466
+ - else {
467
+ - p = heaps = (struct heaps_slot *)malloc(size);
468
+ - }
469
+ - );
470
+ + if (heaps_used > 0) {
471
+ + p = (struct heaps_slot *)realloc(heaps, size);
472
+ + if (p) heaps = p;
473
+ + }
474
+ + else {
475
+ + p = heaps = (struct heaps_slot *)malloc(size);
476
+ + }
477
+ + );
478
+ if (p == 0) {
479
+ - during_gc = 0;
480
+ - rb_memerror();
481
+ + during_gc = 0;
482
+ + rb_memerror();
483
+ }
484
+ heaps_length = next_heaps_length;
485
+ }
486
+ @@ -568,40 +697,40 @@ assign_heap_slot(rb_objspace_t *objspace)
487
+ RVALUE *p, *pend, *membase;
488
+ size_t hi, lo, mid;
489
+ int objs;
490
+ -
491
+ +
492
+ objs = HEAP_OBJ_LIMIT;
493
+ RUBY_CRITICAL(p = (RVALUE*)malloc(HEAP_SIZE));
494
+ if (p == 0) {
495
+ - during_gc = 0;
496
+ - rb_memerror();
497
+ + during_gc = 0;
498
+ + rb_memerror();
499
+ }
500
+
501
+ membase = p;
502
+ if ((VALUE)p % sizeof(RVALUE) != 0) {
503
+ - p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
504
+ - if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < ((char*)p - (char*)membase)) {
505
+ - objs--;
506
+ - }
507
+ + p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
508
+ + if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < ((char*)p - (char*)membase)) {
509
+ + objs--;
510
+ + }
511
+ }
512
+
513
+ lo = 0;
514
+ hi = heaps_used;
515
+ while (lo < hi) {
516
+ - register RVALUE *mid_membase;
517
+ - mid = (lo + hi) / 2;
518
+ - mid_membase = heaps[mid].membase;
519
+ - if (mid_membase < membase) {
520
+ - lo = mid + 1;
521
+ - }
522
+ - else if (mid_membase > membase) {
523
+ - hi = mid;
524
+ - }
525
+ - else {
526
+ - rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, membase, (VALUE)mid);
527
+ - }
528
+ + register RVALUE *mid_membase;
529
+ + mid = (lo + hi) / 2;
530
+ + mid_membase = heaps[mid].membase;
531
+ + if (mid_membase < membase) {
532
+ + lo = mid + 1;
533
+ + }
534
+ + else if (mid_membase > membase) {
535
+ + hi = mid;
536
+ + }
537
+ + else {
538
+ + rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, membase, (VALUE)mid);
539
+ + }
540
+ }
541
+ if (hi < heaps_used) {
542
+ - MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi);
543
+ + MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi);
544
+ }
545
+ heaps[hi].membase = membase;
546
+ heaps[hi].slot = p;
547
+ @@ -612,26 +741,169 @@ assign_heap_slot(rb_objspace_t *objspace)
548
+ heaps_used++;
549
+
550
+ while (p < pend) {
551
+ - p->as.free.flags = 0;
552
+ - p->as.free.next = freelist;
553
+ - freelist = p;
554
+ - p++;
555
+ + p->as.free.flags = 0;
556
+ + p->as.free.next = freelist;
557
+ + freelist = p;
558
+ + p++;
559
+ }
560
+ }
561
+
562
+ +static void set_gc_parameters(rb_objspace_t *objspace)
563
+ +{
564
+ + char *gc_stats_ptr, *min_slots_ptr, *free_min_ptr, *heap_slots_incr_ptr,
565
+ + *heap_incr_ptr, *malloc_limit_ptr, *gc_heap_file_ptr, *heap_slots_growth_factor_ptr;
566
+ +
567
+ + gc_data_file = stderr;
568
+ +
569
+ + gc_stats_ptr = getenv("RUBY_GC_STATS");
570
+ + if (gc_stats_ptr != NULL) {
571
+ + int gc_stats_i = atoi(gc_stats_ptr);
572
+ + if (gc_stats_i > 0) {
573
+ + verbose_gc_stats = Qtrue;
574
+ + }
575
+ + }
576
+ +
577
+ + gc_heap_file_ptr = getenv("RUBY_GC_DATA_FILE");
578
+ + if (gc_heap_file_ptr != NULL) {
579
+ + FILE* data_file = fopen(gc_heap_file_ptr, "w");
580
+ + if (data_file != NULL) {
581
+ + gc_data_file = data_file;
582
+ + }
583
+ + else {
584
+ + fprintf(stderr,
585
+ + "can't open gc log file %s for writing, using default\n", gc_heap_file_ptr);
586
+ + }
587
+ + }
588
+ +
589
+ + min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
590
+ + if (min_slots_ptr != NULL) {
591
+ + int min_slots_i = atoi(min_slots_ptr);
592
+ + if (verbose_gc_stats) {
593
+ + fprintf(gc_data_file, "RUBY_HEAP_MIN_SLOTS=%s\n", min_slots_ptr);
594
+ + }
595
+ + if (min_slots_i > 0) {
596
+ + gc_heap_slots = min_slots_i;
597
+ + heap_min_slots = min_slots_i;
598
+ + }
599
+ + }
600
+ +
601
+ + free_min_ptr = getenv("RUBY_HEAP_FREE_MIN");
602
+ + if (free_min_ptr != NULL) {
603
+ + int free_min_i = atoi(free_min_ptr);
604
+ + if (verbose_gc_stats) {
605
+ + fprintf(gc_data_file, "RUBY_HEAP_FREE_MIN=%s\n", free_min_ptr);
606
+ + }
607
+ + if (free_min_i > 0) {
608
+ + heap_free_min = free_min_i;
609
+ + }
610
+ + }
611
+ +
612
+ + heap_incr_ptr = getenv("RUBY_HEAP_INCREMENT");
613
+ + if (heap_incr_ptr != NULL) {
614
+ + int heap_incr_i = atoi(heap_incr_ptr);
615
+ + if (verbose_gc_stats) {
616
+ + fprintf(gc_data_file, "RUBY_HEAP_INCREMENT=%s\n", heap_incr_ptr);
617
+ + }
618
+ + if (heap_incr_i > 0) {
619
+ + gc_heaps_increment = heap_incr_i;
620
+ + }
621
+ + }
622
+ +
623
+ + heap_slots_incr_ptr = getenv("RUBY_HEAP_SLOTS_INCREMENT");
624
+ + if (heap_slots_incr_ptr != NULL) {
625
+ + int heap_slots_incr_i = atoi(heap_slots_incr_ptr);
626
+ + if (verbose_gc_stats) {
627
+ + fprintf(gc_data_file, "RUBY_HEAP_SLOTS_INCREMENT=%s\n", heap_slots_incr_ptr);
628
+ + }
629
+ + if (heap_slots_incr_i > 0) {
630
+ + heap_slots_increment = heap_slots_incr_i;
631
+ + }
632
+ + }
633
+ +
634
+ + heap_slots_growth_factor_ptr = getenv("RUBY_HEAP_SLOTS_GROWTH_FACTOR");
635
+ + if (heap_slots_growth_factor_ptr != NULL) {
636
+ + double heap_slots_growth_factor_d = atoi(heap_slots_growth_factor_ptr);
637
+ + if (verbose_gc_stats) {
638
+ + fprintf(gc_data_file, "RUBY_HEAP_SLOTS_GROWTH_FACTOR=%s\n", heap_slots_growth_factor_ptr);
639
+ + }
640
+ + if (heap_slots_growth_factor_d > 0) {
641
+ + heap_slots_growth_factor = heap_slots_growth_factor_d;
642
+ + }
643
+ + }
644
+ +
645
+ + malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
646
+ + if (malloc_limit_ptr != NULL) {
647
+ + int malloc_limit_i = atol(malloc_limit_ptr);
648
+ + if (verbose_gc_stats) {
649
+ + fprintf(gc_data_file, "RUBY_GC_MALLOC_LIMIT=%s\n", malloc_limit_ptr);
650
+ + }
651
+ + if (malloc_limit_i > 0) {
652
+ + initial_malloc_limit = malloc_limit_i;
653
+ + }
654
+ + }
655
+ +}
656
+ +
657
+ +/*
658
+ + * call-seq:
659
+ + * GC.dump => nil
660
+ + *
661
+ + * dumps information about the current GC data structures to the GC log file
662
+ + *
663
+ + * GC.dump #=> nil
664
+ + *
665
+ + */
666
+ +
667
+ +VALUE
668
+ +rb_gc_dump()
669
+ +{
670
+ + int i;
671
+ + rb_objspace_t *objspace = &rb_objspace;
672
+ +
673
+ + for (i = 0; i < heaps_used; i++) {
674
+ + int heap_size = heaps[i].limit;
675
+ + fprintf(gc_data_file, "HEAP[%2d]: size=%7d\n", i, heap_size);
676
+ + }
677
+ +
678
+ + return Qnil;
679
+ +}
680
+ +
681
+ +/*
682
+ + * call-seq:
683
+ + * GC.log String => String
684
+ + *
685
+ + * Logs string to the GC data file and returns it.
686
+ + *
687
+ + * GC.log "manual GC call" #=> "manual GC call"
688
+ + *
689
+ + */
690
+ +
691
+ +VALUE
692
+ +rb_gc_log(VALUE self, VALUE original_str)
693
+ +{
694
+ + if (original_str == Qnil) {
695
+ + fprintf(gc_data_file, "\n");
696
+ + }
697
+ + else {
698
+ + VALUE str = StringValue(original_str);
699
+ + char *p = RSTRING_PTR(str);
700
+ + fprintf(gc_data_file, "%s\n", p);
701
+ + }
702
+ + return original_str;
703
+ +}
704
+ +
705
+ static void
706
+ init_heap(rb_objspace_t *objspace)
707
+ {
708
+ size_t add, i;
709
+
710
+ - add = HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT;
711
+ + add = heap_min_slots / HEAP_OBJ_LIMIT;
712
+
713
+ if ((heaps_used + add) > heaps_length) {
714
+ - allocate_heaps(objspace, heaps_used + add);
715
+ + allocate_heaps(objspace, heaps_used + add);
716
+ }
717
+
718
+ for (i = 0; i < add; i++) {
719
+ - assign_heap_slot(objspace);
720
+ + assign_heap_slot(objspace);
721
+ }
722
+ heaps_inc = 0;
723
+ }
724
+ @@ -640,11 +912,11 @@ init_heap(rb_objspace_t *objspace)
725
+ static void
726
+ set_heaps_increment(rb_objspace_t *objspace)
727
+ {
728
+ - size_t next_heaps_length = heaps_used * 1.8;
729
+ + size_t next_heaps_length = heaps_used * heap_slots_growth_factor;
730
+ heaps_inc = next_heaps_length - heaps_used;
731
+
732
+ if (next_heaps_length > heaps_length) {
733
+ - allocate_heaps(objspace, next_heaps_length);
734
+ + allocate_heaps(objspace, next_heaps_length);
735
+ }
736
+ }
737
+
738
+ @@ -652,9 +924,9 @@ static int
739
+ heaps_increment(rb_objspace_t *objspace)
740
+ {
741
+ if (heaps_inc > 0) {
742
+ - assign_heap_slot(objspace);
743
+ - heaps_inc--;
744
+ - return Qtrue;
745
+ + assign_heap_slot(objspace);
746
+ + heaps_inc--;
747
+ + return Qtrue;
748
+ }
749
+ return Qfalse;
750
+ }
751
+ @@ -665,12 +937,12 @@ static VALUE
752
+ rb_newobj_from_heap(rb_objspace_t *objspace)
753
+ {
754
+ VALUE obj;
755
+ -
756
+ +
757
+ if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) {
758
+ - if (!heaps_increment(objspace) && !garbage_collect(objspace)) {
759
+ - during_gc = 0;
760
+ - rb_memerror();
761
+ - }
762
+ + if (!heaps_increment(objspace) && !garbage_collect(objspace)) {
763
+ + during_gc = 0;
764
+ + rb_memerror();
765
+ + }
766
+ }
767
+
768
+ obj = (VALUE)freelist;
769
+ @@ -695,10 +967,10 @@ rb_fill_value_cache(rb_thread_t *th)
770
+
771
+ /* LOCK */
772
+ for (i=0; i<RUBY_VM_VALUE_CACHE_SIZE; i++) {
773
+ - VALUE v = rb_newobj_from_heap(objspace);
774
+ + VALUE v = rb_newobj_from_heap(objspace);
775
+
776
+ - th->value_cache[i] = v;
777
+ - RBASIC(v)->flags = FL_MARK;
778
+ + th->value_cache[i] = v;
779
+ + RBASIC(v)->flags = FL_MARK;
780
+ }
781
+ th->value_cache_ptr = &th->value_cache[0];
782
+ rv = rb_newobj_from_heap(objspace);
783
+ @@ -713,7 +985,7 @@ rb_during_gc(void)
784
+ rb_objspace_t *objspace = &rb_objspace;
785
+ return during_gc;
786
+ }
787
+ -
788
+ +
789
+ VALUE
790
+ rb_newobj(void)
791
+ {
792
+ @@ -730,23 +1002,23 @@ rb_newobj(void)
793
+ #endif
794
+
795
+ if (during_gc) {
796
+ - dont_gc = 1;
797
+ - during_gc = 0;
798
+ - rb_bug("object allocation during garbage collection phase");
799
+ + dont_gc = 1;
800
+ + during_gc = 0;
801
+ + rb_bug("object allocation during garbage collection phase");
802
+ }
803
+
804
+ #if USE_VALUE_CACHE
805
+ if (v) {
806
+ - RBASIC(v)->flags = 0;
807
+ - th->value_cache_ptr++;
808
+ + RBASIC(v)->flags = 0;
809
+ + th->value_cache_ptr++;
810
+ }
811
+ else {
812
+ - v = rb_fill_value_cache(th);
813
+ + v = rb_fill_value_cache(th);
814
+ }
815
+
816
+ #if defined(GC_DEBUG)
817
+ printf("cache index: %d, v: %p, th: %p\n",
818
+ - th->value_cache_ptr - th->value_cache, v, th);
819
+ + th->value_cache_ptr - th->value_cache, v, th);
820
+ #endif
821
+ return v;
822
+ #else
823
+ @@ -860,14 +1132,14 @@ gc_mark_all(rb_objspace_t *objspace)
824
+
825
+ init_mark_stack(objspace);
826
+ for (i = 0; i < heaps_used; i++) {
827
+ - p = heaps[i].slot; pend = p + heaps[i].limit;
828
+ - while (p < pend) {
829
+ - if ((p->as.basic.flags & FL_MARK) &&
830
+ - (p->as.basic.flags != FL_MARK)) {
831
+ - gc_mark_children(objspace, (VALUE)p, 0);
832
+ - }
833
+ - p++;
834
+ - }
835
+ + p = heaps[i].slot; pend = p + heaps[i].limit;
836
+ + while (p < pend) {
837
+ + if ((p->as.basic.flags & FL_MARK) &&
838
+ + (p->as.basic.flags != FL_MARK)) {
839
+ + gc_mark_children(objspace, (VALUE)p, 0);
840
+ + }
841
+ + p++;
842
+ + }
843
+ }
844
+ }
845
+
846
+ @@ -882,8 +1154,8 @@ gc_mark_rest(rb_objspace_t *objspace)
847
+
848
+ init_mark_stack(objspace);
849
+ while (p != tmp_arry) {
850
+ - p--;
851
+ - gc_mark_children(objspace, *p, 0);
852
+ + p--;
853
+ + gc_mark_children(objspace, *p, 0);
854
+ }
855
+ }
856
+
857
+ @@ -901,16 +1173,16 @@ is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
858
+ lo = 0;
859
+ hi = heaps_used;
860
+ while (lo < hi) {
861
+ - mid = (lo + hi) / 2;
862
+ - heap = &heaps[mid];
863
+ - if (heap->slot <= p) {
864
+ - if (p < heap->slot + heap->limit)
865
+ - return Qtrue;
866
+ - lo = mid + 1;
867
+ - }
868
+ - else {
869
+ - hi = mid;
870
+ - }
871
+ + mid = (lo + hi) / 2;
872
+ + heap = &heaps[mid];
873
+ + if (heap->slot <= p) {
874
+ + if (p < heap->slot + heap->limit)
875
+ + return Qtrue;
876
+ + lo = mid + 1;
877
+ + }
878
+ + else {
879
+ + hi = mid;
880
+ + }
881
+ }
882
+ return Qfalse;
883
+ }
884
+ @@ -922,10 +1194,10 @@ mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n
885
+ while (n--) {
886
+ v = *x;
887
+ VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
888
+ - if (is_pointer_to_heap(objspace, (void *)v)) {
889
+ - gc_mark(objspace, v, 0);
890
+ - }
891
+ - x++;
892
+ + if (is_pointer_to_heap(objspace, (void *)v)) {
893
+ + gc_mark(objspace, v, 0);
894
+ + }
895
+ + x++;
896
+ }
897
+ }
898
+
899
+ @@ -1029,7 +1301,7 @@ void
900
+ rb_gc_mark_maybe(VALUE obj)
901
+ {
902
+ if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
903
+ - gc_mark(&rb_objspace, obj, 0);
904
+ + gc_mark(&rb_objspace, obj, 0);
905
+ }
906
+ }
907
+
908
+ @@ -1047,16 +1319,16 @@ gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev)
909
+ obj->as.basic.flags |= FL_MARK;
910
+
911
+ if (lev > GC_LEVEL_MAX || (lev == 0 && ruby_stack_check())) {
912
+ - if (!mark_stack_overflow) {
913
+ - if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
914
+ - *mark_stack_ptr = ptr;
915
+ - mark_stack_ptr++;
916
+ - }
917
+ - else {
918
+ - mark_stack_overflow = 1;
919
+ - }
920
+ - }
921
+ - return;
922
+ + if (!mark_stack_overflow) {
923
+ + if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
924
+ + *mark_stack_ptr = ptr;
925
+ + mark_stack_ptr++;
926
+ + }
927
+ + else {
928
+ + mark_stack_overflow = 1;
929
+ + }
930
+ + }
931
+ + return;
932
+ }
933
+ gc_mark_children(objspace, ptr, lev+1);
934
+ }
935
+ @@ -1072,7 +1344,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
936
+ {
937
+ register RVALUE *obj = RANY(ptr);
938
+
939
+ - goto marking; /* skip */
940
+ + goto marking; /* skip */
941
+
942
+ again:
943
+ obj = RANY(ptr);
944
+ @@ -1083,149 +1355,149 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
945
+
946
+ marking:
947
+ if (FL_TEST(obj, FL_EXIVAR)) {
948
+ - rb_mark_generic_ivar(ptr);
949
+ + rb_mark_generic_ivar(ptr);
950
+ }
951
+
952
+ switch (BUILTIN_TYPE(obj)) {
953
+ case T_NIL:
954
+ case T_FIXNUM:
955
+ - rb_bug("rb_gc_mark() called for broken object");
956
+ - break;
957
+ + rb_bug("rb_gc_mark() called for broken object");
958
+ + break;
959
+
960
+ case T_NODE:
961
+ - switch (nd_type(obj)) {
962
+ - case NODE_IF: /* 1,2,3 */
963
+ - case NODE_FOR:
964
+ - case NODE_ITER:
965
+ - case NODE_WHEN:
966
+ - case NODE_MASGN:
967
+ - case NODE_RESCUE:
968
+ - case NODE_RESBODY:
969
+ - case NODE_CLASS:
970
+ - case NODE_BLOCK_PASS:
971
+ - gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
972
+ - /* fall through */
973
+ - case NODE_BLOCK: /* 1,3 */
974
+ - case NODE_OPTBLOCK:
975
+ - case NODE_ARRAY:
976
+ - case NODE_DSTR:
977
+ - case NODE_DXSTR:
978
+ - case NODE_DREGX:
979
+ - case NODE_DREGX_ONCE:
980
+ - case NODE_ENSURE:
981
+ - case NODE_CALL:
982
+ - case NODE_DEFS:
983
+ - case NODE_OP_ASGN1:
984
+ - case NODE_ARGS:
985
+ - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
986
+ - /* fall through */
987
+ - case NODE_SUPER: /* 3 */
988
+ - case NODE_FCALL:
989
+ - case NODE_DEFN:
990
+ - case NODE_ARGS_AUX:
991
+ - ptr = (VALUE)obj->as.node.u3.node;
992
+ - goto again;
993
+ -
994
+ - case NODE_METHOD: /* 1,2 */
995
+ - case NODE_WHILE:
996
+ - case NODE_UNTIL:
997
+ - case NODE_AND:
998
+ - case NODE_OR:
999
+ - case NODE_CASE:
1000
+ - case NODE_SCLASS:
1001
+ - case NODE_DOT2:
1002
+ - case NODE_DOT3:
1003
+ - case NODE_FLIP2:
1004
+ - case NODE_FLIP3:
1005
+ - case NODE_MATCH2:
1006
+ - case NODE_MATCH3:
1007
+ - case NODE_OP_ASGN_OR:
1008
+ - case NODE_OP_ASGN_AND:
1009
+ - case NODE_MODULE:
1010
+ - case NODE_ALIAS:
1011
+ - case NODE_VALIAS:
1012
+ - case NODE_ARGSCAT:
1013
+ - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
1014
+ - /* fall through */
1015
+ - case NODE_FBODY: /* 2 */
1016
+ - case NODE_GASGN:
1017
+ - case NODE_LASGN:
1018
+ - case NODE_DASGN:
1019
+ - case NODE_DASGN_CURR:
1020
+ - case NODE_IASGN:
1021
+ - case NODE_IASGN2:
1022
+ - case NODE_CVASGN:
1023
+ - case NODE_COLON3:
1024
+ - case NODE_OPT_N:
1025
+ - case NODE_EVSTR:
1026
+ - case NODE_UNDEF:
1027
+ - case NODE_POSTEXE:
1028
+ - ptr = (VALUE)obj->as.node.u2.node;
1029
+ - goto again;
1030
+ -
1031
+ - case NODE_HASH: /* 1 */
1032
+ - case NODE_LIT:
1033
+ - case NODE_STR:
1034
+ - case NODE_XSTR:
1035
+ - case NODE_DEFINED:
1036
+ - case NODE_MATCH:
1037
+ - case NODE_RETURN:
1038
+ - case NODE_BREAK:
1039
+ - case NODE_NEXT:
1040
+ - case NODE_YIELD:
1041
+ - case NODE_COLON2:
1042
+ - case NODE_SPLAT:
1043
+ - case NODE_TO_ARY:
1044
+ - ptr = (VALUE)obj->as.node.u1.node;
1045
+ - goto again;
1046
+ -
1047
+ - case NODE_SCOPE: /* 2,3 */
1048
+ - case NODE_CDECL:
1049
+ - case NODE_OPT_ARG:
1050
+ - gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
1051
+ - ptr = (VALUE)obj->as.node.u2.node;
1052
+ - goto again;
1053
+ -
1054
+ - case NODE_ZARRAY: /* - */
1055
+ - case NODE_ZSUPER:
1056
+ - case NODE_CFUNC:
1057
+ - case NODE_VCALL:
1058
+ - case NODE_GVAR:
1059
+ - case NODE_LVAR:
1060
+ - case NODE_DVAR:
1061
+ - case NODE_IVAR:
1062
+ - case NODE_CVAR:
1063
+ - case NODE_NTH_REF:
1064
+ - case NODE_BACK_REF:
1065
+ - case NODE_REDO:
1066
+ - case NODE_RETRY:
1067
+ - case NODE_SELF:
1068
+ - case NODE_NIL:
1069
+ - case NODE_TRUE:
1070
+ - case NODE_FALSE:
1071
+ - case NODE_ERRINFO:
1072
+ - case NODE_ATTRSET:
1073
+ - case NODE_BLOCK_ARG:
1074
+ - break;
1075
+ - case NODE_ALLOCA:
1076
+ - mark_locations_array(objspace,
1077
+ - (VALUE*)obj->as.node.u1.value,
1078
+ - obj->as.node.u3.cnt);
1079
+ - ptr = (VALUE)obj->as.node.u2.node;
1080
+ - goto again;
1081
+ -
1082
+ - default: /* unlisted NODE */
1083
+ - if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
1084
+ - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
1085
+ - }
1086
+ - if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
1087
+ - gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
1088
+ - }
1089
+ - if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
1090
+ - gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
1091
+ - }
1092
+ - }
1093
+ - return; /* no need to mark class. */
1094
+ + switch (nd_type(obj)) {
1095
+ + case NODE_IF: /* 1,2,3 */
1096
+ + case NODE_FOR:
1097
+ + case NODE_ITER:
1098
+ + case NODE_WHEN:
1099
+ + case NODE_MASGN:
1100
+ + case NODE_RESCUE:
1101
+ + case NODE_RESBODY:
1102
+ + case NODE_CLASS:
1103
+ + case NODE_BLOCK_PASS:
1104
+ + gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
1105
+ + /* fall through */
1106
+ + case NODE_BLOCK: /* 1,3 */
1107
+ + case NODE_OPTBLOCK:
1108
+ + case NODE_ARRAY:
1109
+ + case NODE_DSTR:
1110
+ + case NODE_DXSTR:
1111
+ + case NODE_DREGX:
1112
+ + case NODE_DREGX_ONCE:
1113
+ + case NODE_ENSURE:
1114
+ + case NODE_CALL:
1115
+ + case NODE_DEFS:
1116
+ + case NODE_OP_ASGN1:
1117
+ + case NODE_ARGS:
1118
+ + gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
1119
+ + /* fall through */
1120
+ + case NODE_SUPER: /* 3 */
1121
+ + case NODE_FCALL:
1122
+ + case NODE_DEFN:
1123
+ + case NODE_ARGS_AUX:
1124
+ + ptr = (VALUE)obj->as.node.u3.node;
1125
+ + goto again;
1126
+ +
1127
+ + case NODE_METHOD: /* 1,2 */
1128
+ + case NODE_WHILE:
1129
+ + case NODE_UNTIL:
1130
+ + case NODE_AND:
1131
+ + case NODE_OR:
1132
+ + case NODE_CASE:
1133
+ + case NODE_SCLASS:
1134
+ + case NODE_DOT2:
1135
+ + case NODE_DOT3:
1136
+ + case NODE_FLIP2:
1137
+ + case NODE_FLIP3:
1138
+ + case NODE_MATCH2:
1139
+ + case NODE_MATCH3:
1140
+ + case NODE_OP_ASGN_OR:
1141
+ + case NODE_OP_ASGN_AND:
1142
+ + case NODE_MODULE:
1143
+ + case NODE_ALIAS:
1144
+ + case NODE_VALIAS:
1145
+ + case NODE_ARGSCAT:
1146
+ + gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
1147
+ + /* fall through */
1148
+ + case NODE_FBODY: /* 2 */
1149
+ + case NODE_GASGN:
1150
+ + case NODE_LASGN:
1151
+ + case NODE_DASGN:
1152
+ + case NODE_DASGN_CURR:
1153
+ + case NODE_IASGN:
1154
+ + case NODE_IASGN2:
1155
+ + case NODE_CVASGN:
1156
+ + case NODE_COLON3:
1157
+ + case NODE_OPT_N:
1158
+ + case NODE_EVSTR:
1159
+ + case NODE_UNDEF:
1160
+ + case NODE_POSTEXE:
1161
+ + ptr = (VALUE)obj->as.node.u2.node;
1162
+ + goto again;
1163
+ +
1164
+ + case NODE_HASH: /* 1 */
1165
+ + case NODE_LIT:
1166
+ + case NODE_STR:
1167
+ + case NODE_XSTR:
1168
+ + case NODE_DEFINED:
1169
+ + case NODE_MATCH:
1170
+ + case NODE_RETURN:
1171
+ + case NODE_BREAK:
1172
+ + case NODE_NEXT:
1173
+ + case NODE_YIELD:
1174
+ + case NODE_COLON2:
1175
+ + case NODE_SPLAT:
1176
+ + case NODE_TO_ARY:
1177
+ + ptr = (VALUE)obj->as.node.u1.node;
1178
+ + goto again;
1179
+ +
1180
+ + case NODE_SCOPE: /* 2,3 */
1181
+ + case NODE_CDECL:
1182
+ + case NODE_OPT_ARG:
1183
+ + gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
1184
+ + ptr = (VALUE)obj->as.node.u2.node;
1185
+ + goto again;
1186
+ +
1187
+ + case NODE_ZARRAY: /* - */
1188
+ + case NODE_ZSUPER:
1189
+ + case NODE_CFUNC:
1190
+ + case NODE_VCALL:
1191
+ + case NODE_GVAR:
1192
+ + case NODE_LVAR:
1193
+ + case NODE_DVAR:
1194
+ + case NODE_IVAR:
1195
+ + case NODE_CVAR:
1196
+ + case NODE_NTH_REF:
1197
+ + case NODE_BACK_REF:
1198
+ + case NODE_REDO:
1199
+ + case NODE_RETRY:
1200
+ + case NODE_SELF:
1201
+ + case NODE_NIL:
1202
+ + case NODE_TRUE:
1203
+ + case NODE_FALSE:
1204
+ + case NODE_ERRINFO:
1205
+ + case NODE_ATTRSET:
1206
+ + case NODE_BLOCK_ARG:
1207
+ + break;
1208
+ + case NODE_ALLOCA:
1209
+ + mark_locations_array(objspace,
1210
+ + (VALUE*)obj->as.node.u1.value,
1211
+ + obj->as.node.u3.cnt);
1212
+ + ptr = (VALUE)obj->as.node.u2.node;
1213
+ + goto again;
1214
+ +
1215
+ + default: /* unlisted NODE */
1216
+ + if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
1217
+ + gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
1218
+ + }
1219
+ + if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
1220
+ + gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
1221
+ + }
1222
+ + if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
1223
+ + gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
1224
+ + }
1225
+ + }
1226
+ + return; /* no need to mark class. */
1227
+ }
1228
+
1229
+ gc_mark(objspace, obj->as.basic.klass, lev);
1230
+ @@ -1233,51 +1505,51 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
1231
+ case T_ICLASS:
1232
+ case T_CLASS:
1233
+ case T_MODULE:
1234
+ - mark_tbl(objspace, RCLASS_M_TBL(obj), lev);
1235
+ - mark_tbl(objspace, RCLASS_IV_TBL(obj), lev);
1236
+ - ptr = RCLASS_SUPER(obj);
1237
+ - goto again;
1238
+ + mark_tbl(objspace, RCLASS_M_TBL(obj), lev);
1239
+ + mark_tbl(objspace, RCLASS_IV_TBL(obj), lev);
1240
+ + ptr = RCLASS_SUPER(obj);
1241
+ + goto again;
1242
+
1243
+ case T_ARRAY:
1244
+ - if (FL_TEST(obj, ELTS_SHARED)) {
1245
+ - ptr = obj->as.array.aux.shared;
1246
+ - goto again;
1247
+ - }
1248
+ - else {
1249
+ - long i, len = RARRAY_LEN(obj);
1250
+ - VALUE *ptr = RARRAY_PTR(obj);
1251
+ - for (i=0; i < len; i++) {
1252
+ - gc_mark(objspace, *ptr++, lev);
1253
+ - }
1254
+ - }
1255
+ - break;
1256
+ + if (FL_TEST(obj, ELTS_SHARED)) {
1257
+ + ptr = obj->as.array.aux.shared;
1258
+ + goto again;
1259
+ + }
1260
+ + else {
1261
+ + long i, len = RARRAY_LEN(obj);
1262
+ + VALUE *ptr = RARRAY_PTR(obj);
1263
+ + for (i=0; i < len; i++) {
1264
+ + gc_mark(objspace, *ptr++, lev);
1265
+ + }
1266
+ + }
1267
+ + break;
1268
+
1269
+ case T_HASH:
1270
+ - mark_hash(objspace, obj->as.hash.ntbl, lev);
1271
+ - ptr = obj->as.hash.ifnone;
1272
+ - goto again;
1273
+ + mark_hash(objspace, obj->as.hash.ntbl, lev);
1274
+ + ptr = obj->as.hash.ifnone;
1275
+ + goto again;
1276
+
1277
+ case T_STRING:
1278
+ #define STR_ASSOC FL_USER3 /* copied from string.c */
1279
+ - if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
1280
+ - ptr = obj->as.string.as.heap.aux.shared;
1281
+ - goto again;
1282
+ - }
1283
+ - break;
1284
+ + if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
1285
+ + ptr = obj->as.string.as.heap.aux.shared;
1286
+ + goto again;
1287
+ + }
1288
+ + break;
1289
+
1290
+ case T_DATA:
1291
+ - if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
1292
+ - break;
1293
+ + if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
1294
+ + break;
1295
+
1296
+ case T_OBJECT:
1297
+ {
1298
+ long i, len = ROBJECT_NUMIV(obj);
1299
+ - VALUE *ptr = ROBJECT_IVPTR(obj);
1300
+ + VALUE *ptr = ROBJECT_IVPTR(obj);
1301
+ for (i = 0; i < len; i++) {
1302
+ - gc_mark(objspace, *ptr++, lev);
1303
+ + gc_mark(objspace, *ptr++, lev);
1304
+ }
1305
+ }
1306
+ - break;
1307
+ + break;
1308
+
1309
+ case T_FILE:
1310
+ if (obj->as.file.fptr)
1311
+ @@ -1290,41 +1562,41 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
1312
+
1313
+ case T_FLOAT:
1314
+ case T_BIGNUM:
1315
+ - break;
1316
+ + break;
1317
+
1318
+ case T_MATCH:
1319
+ - gc_mark(objspace, obj->as.match.regexp, lev);
1320
+ - if (obj->as.match.str) {
1321
+ - ptr = obj->as.match.str;
1322
+ - goto again;
1323
+ - }
1324
+ - break;
1325
+ + gc_mark(objspace, obj->as.match.regexp, lev);
1326
+ + if (obj->as.match.str) {
1327
+ + ptr = obj->as.match.str;
1328
+ + goto again;
1329
+ + }
1330
+ + break;
1331
+
1332
+ case T_RATIONAL:
1333
+ - gc_mark(objspace, obj->as.rational.num, lev);
1334
+ - gc_mark(objspace, obj->as.rational.den, lev);
1335
+ - break;
1336
+ + gc_mark(objspace, obj->as.rational.num, lev);
1337
+ + gc_mark(objspace, obj->as.rational.den, lev);
1338
+ + break;
1339
+
1340
+ case T_COMPLEX:
1341
+ - gc_mark(objspace, obj->as.complex.real, lev);
1342
+ - gc_mark(objspace, obj->as.complex.image, lev);
1343
+ - break;
1344
+ + gc_mark(objspace, obj->as.complex.real, lev);
1345
+ + gc_mark(objspace, obj->as.complex.image, lev);
1346
+ + break;
1347
+
1348
+ case T_STRUCT:
1349
+ - {
1350
+ - long len = RSTRUCT_LEN(obj);
1351
+ - VALUE *ptr = RSTRUCT_PTR(obj);
1352
+ + {
1353
+ + long len = RSTRUCT_LEN(obj);
1354
+ + VALUE *ptr = RSTRUCT_PTR(obj);
1355
+
1356
+ - while (len--) {
1357
+ - gc_mark(objspace, *ptr++, lev);
1358
+ - }
1359
+ - }
1360
+ - break;
1361
+ + while (len--) {
1362
+ + gc_mark(objspace, *ptr++, lev);
1363
+ + }
1364
+ + }
1365
+ + break;
1366
+
1367
+ default:
1368
+ - rb_bug("rb_gc_mark(): unknown data type 0x%lx(%p) %s",
1369
+ - BUILTIN_TYPE(obj), obj,
1370
+ - is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
1371
+ + rb_bug("rb_gc_mark(): unknown data type 0x%lx(%p) %s",
1372
+ + BUILTIN_TYPE(obj), obj,
1373
+ + is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
1374
+ }
1375
+ }
1376
+
1377
+ @@ -1339,20 +1611,52 @@ add_freelist(rb_objspace_t *objspace, RVALUE *p)
1378
+ freelist = p;
1379
+ }
1380
+
1381
+ +static char* obj_type(int tp)
1382
+ +{
1383
+ + switch (tp) {
1384
+ + case T_NIL : return "NIL";
1385
+ + case T_OBJECT : return "OBJECT";
1386
+ + case T_CLASS : return "CLASS";
1387
+ + case T_ICLASS : return "ICLASS";
1388
+ + case T_MODULE : return "MODULE";
1389
+ + case T_FLOAT : return "FLOAT";
1390
+ + case T_STRING : return "STRING";
1391
+ + case T_REGEXP : return "REGEXP";
1392
+ + case T_ARRAY : return "ARRAY";
1393
+ + case T_FIXNUM : return "FIXNUM";
1394
+ + case T_HASH : return "HASH";
1395
+ + case T_STRUCT : return "STRUCT";
1396
+ + case T_BIGNUM : return "BIGNUM";
1397
+ + case T_FILE : return "FILE";
1398
+ +
1399
+ + case T_TRUE : return "TRUE";
1400
+ + case T_FALSE : return "FALSE";
1401
+ + case T_DATA : return "DATA";
1402
+ + case T_MATCH : return "MATCH";
1403
+ + case T_SYMBOL : return "SYMBOL";
1404
+ + case T_RATIONAL: return "RATIONAL";
1405
+ + case T_COMPLEX : return "COMPLEX";
1406
+ +
1407
+ + case T_UNDEF : return "UNDEF";
1408
+ + case T_NODE : return "NODE";
1409
+ + default: return "____";
1410
+ + }
1411
+ +}
1412
+ +
1413
+ static void
1414
+ finalize_list(rb_objspace_t *objspace, RVALUE *p)
1415
+ {
1416
+ while (p) {
1417
+ - RVALUE *tmp = p->as.free.next;
1418
+ - run_final(objspace, (VALUE)p);
1419
+ - if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
1420
+ - add_freelist(objspace, p);
1421
+ - }
1422
+ - else {
1423
+ - struct heaps_slot *slot = (struct heaps_slot *)RDATA(p)->dmark;
1424
+ - slot->limit--;
1425
+ - }
1426
+ - p = tmp;
1427
+ + RVALUE *tmp = p->as.free.next;
1428
+ + run_final(objspace, (VALUE)p);
1429
+ + if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
1430
+ + add_freelist(objspace, p);
1431
+ + }
1432
+ + else {
1433
+ + struct heaps_slot *slot = (struct heaps_slot *)RDATA(p)->dmark;
1434
+ + slot->limit--;
1435
+ + }
1436
+ + p = tmp;
1437
+ }
1438
+ }
1439
+
1440
+ @@ -1363,30 +1667,30 @@ free_unused_heaps(rb_objspace_t *objspace)
1441
+ RVALUE *last = 0;
1442
+
1443
+ for (i = j = 1; j < heaps_used; i++) {
1444
+ - if (heaps[i].limit == 0) {
1445
+ - if (!last) {
1446
+ - last = heaps[i].membase;
1447
+ - }
1448
+ - else {
1449
+ - free(heaps[i].membase);
1450
+ - }
1451
+ - heaps_used--;
1452
+ - }
1453
+ - else {
1454
+ - if (i != j) {
1455
+ - heaps[j] = heaps[i];
1456
+ - }
1457
+ - j++;
1458
+ - }
1459
+ + if (heaps[i].limit == 0) {
1460
+ + if (!last) {
1461
+ + last = heaps[i].membase;
1462
+ + }
1463
+ + else {
1464
+ + free(heaps[i].membase);
1465
+ + }
1466
+ + heaps_used--;
1467
+ + }
1468
+ + else {
1469
+ + if (i != j) {
1470
+ + heaps[j] = heaps[i];
1471
+ + }
1472
+ + j++;
1473
+ + }
1474
+ }
1475
+ if (last) {
1476
+ - if (last < heaps_freed) {
1477
+ - free(heaps_freed);
1478
+ - heaps_freed = last;
1479
+ - }
1480
+ - else {
1481
+ - free(last);
1482
+ - }
1483
+ + if (last < heaps_freed) {
1484
+ + free(heaps_freed);
1485
+ + heaps_freed = last;
1486
+ + }
1487
+ + else {
1488
+ + free(last);
1489
+ + }
1490
+ }
1491
+ }
1492
+
1493
+ @@ -1398,87 +1702,120 @@ gc_sweep(rb_objspace_t *objspace)
1494
+ size_t i;
1495
+ size_t live = 0, free_min = 0, do_heap_free = 0;
1496
+
1497
+ + unsigned long really_freed = 0;
1498
+ + int free_counts[256];
1499
+ + int live_counts[256];
1500
+ + int do_gc_stats = gc_statistics & verbose_gc_stats;
1501
+ +
1502
+ do_heap_free = (heaps_used * HEAP_OBJ_LIMIT) * 0.65;
1503
+ free_min = (heaps_used * HEAP_OBJ_LIMIT) * 0.2;
1504
+
1505
+ - if (free_min < FREE_MIN) {
1506
+ - do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
1507
+ - free_min = FREE_MIN;
1508
+ + if (free_min < heap_free_min) {
1509
+ + do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
1510
+ + free_min = heap_free_min;
1511
+ + }
1512
+ +
1513
+ + if (do_gc_stats) {
1514
+ + for (i = 0 ; i< 256; i++) { free_counts[i] = live_counts[i] = 0; }
1515
+ }
1516
+
1517
+ freelist = 0;
1518
+ final_list = deferred_final_list;
1519
+ deferred_final_list = 0;
1520
+ for (i = 0; i < heaps_used; i++) {
1521
+ - int n = 0;
1522
+ - RVALUE *free = freelist;
1523
+ - RVALUE *final = final_list;
1524
+ - int deferred;
1525
+ -
1526
+ - p = heaps[i].slot; pend = p + heaps[i].limit;
1527
+ - while (p < pend) {
1528
+ - if (!(p->as.basic.flags & FL_MARK)) {
1529
+ - if (p->as.basic.flags &&
1530
+ - ((deferred = obj_free(objspace, (VALUE)p)) ||
1531
+ - ((FL_TEST(p, FL_FINALIZE)) && need_call_final))) {
1532
+ - if (!deferred) {
1533
+ - p->as.free.flags = T_DEFERRED;
1534
+ - RDATA(p)->dfree = 0;
1535
+ - }
1536
+ - p->as.free.flags |= FL_MARK;
1537
+ - p->as.free.next = final_list;
1538
+ - final_list = p;
1539
+ - }
1540
+ - else {
1541
+ - add_freelist(objspace, p);
1542
+ - }
1543
+ - n++;
1544
+ - }
1545
+ - else if (BUILTIN_TYPE(p) == T_DEFERRED) {
1546
+ - /* objects to be finalized */
1547
+ - /* do nothing remain marked */
1548
+ - }
1549
+ - else {
1550
+ - RBASIC(p)->flags &= ~FL_MARK;
1551
+ - live++;
1552
+ - }
1553
+ - p++;
1554
+ - }
1555
+ - if (n == heaps[i].limit && freed > do_heap_free) {
1556
+ - RVALUE *pp;
1557
+ - int f_count = 0;
1558
+ -
1559
+ - for (pp = final_list; pp != final; pp = pp->as.free.next) {
1560
+ - f_count++;
1561
+ - RDATA(pp)->dmark = (void *)&heaps[i];
1562
+ - pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
1563
+ - }
1564
+ - heaps[i].limit = f_count;
1565
+ -
1566
+ - freelist = free; /* cancel this page from freelist */
1567
+ - }
1568
+ - else {
1569
+ - freed += n;
1570
+ - }
1571
+ + int n = 0;
1572
+ + RVALUE *free = freelist;
1573
+ + RVALUE *final = final_list;
1574
+ + int deferred;
1575
+ +
1576
+ + p = heaps[i].slot; pend = p + heaps[i].limit;
1577
+ + while (p < pend) {
1578
+ + if (!(p->as.basic.flags & FL_MARK)) {
1579
+ + if (p->as.basic.flags &&
1580
+ + ((deferred = obj_free(objspace, (VALUE)p)) ||
1581
+ + ((FL_TEST(p, FL_FINALIZE)) && need_call_final))) {
1582
+ + if (!deferred) {
1583
+ + p->as.free.flags = T_DEFERRED;
1584
+ + RDATA(p)->dfree = 0;
1585
+ + }
1586
+ + p->as.free.flags |= FL_MARK;
1587
+ + p->as.free.next = final_list;
1588
+ + final_list = p;
1589
+ + }
1590
+ + else {
1591
+ + if (do_gc_stats) {
1592
+ + really_freed++;
1593
+ + int object_type = p->as.basic.flags & T_MASK;
1594
+ + if (object_type) {
1595
+ + free_counts[object_type]++;
1596
+ + }
1597
+ + }
1598
+ + add_freelist(objspace, p);
1599
+ + }
1600
+ + n++;
1601
+ + }
1602
+ + else if (BUILTIN_TYPE(p) == T_DEFERRED) {
1603
+ + /* objects to be finalized */
1604
+ + /* do nothing remain marked */
1605
+ + }
1606
+ + else {
1607
+ + RBASIC(p)->flags &= ~FL_MARK;
1608
+ + live++;
1609
+ + if (do_gc_stats) {
1610
+ + live_counts[RANY((VALUE)p)->as.basic.flags & T_MASK]++;
1611
+ + }
1612
+ + }
1613
+ + p++;
1614
+ + }
1615
+ + if (n == heaps[i].limit && freed > do_heap_free) {
1616
+ + RVALUE *pp;
1617
+ + int f_count = 0;
1618
+ +
1619
+ + for (pp = final_list; pp != final; pp = pp->as.free.next) {
1620
+ + f_count++;
1621
+ + RDATA(pp)->dmark = (void *)&heaps[i];
1622
+ + pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
1623
+ + }
1624
+ + heaps[i].limit = f_count;
1625
+ +
1626
+ + freelist = free; /* cancel this page from freelist */
1627
+ + }
1628
+ + else {
1629
+ + freed += n;
1630
+ + }
1631
+ }
1632
+ if (malloc_increase > malloc_limit) {
1633
+ - malloc_limit += (malloc_increase - malloc_limit) * (double)live / (live + freed);
1634
+ - if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT;
1635
+ + malloc_limit += (malloc_increase - malloc_limit) * (double)live / (live + freed);
1636
+ + if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit;
1637
+ }
1638
+ malloc_increase = 0;
1639
+ if (freed < free_min) {
1640
+ - set_heaps_increment(objspace);
1641
+ - heaps_increment(objspace);
1642
+ + set_heaps_increment(objspace);
1643
+ + heaps_increment(objspace);
1644
+ }
1645
+ during_gc = 0;
1646
+
1647
+ + if (do_gc_stats) {
1648
+ + fprintf(gc_data_file, "objects processed: %.7d\n", (int)(live+freed));
1649
+ + fprintf(gc_data_file, "live objects : %.7d\n", (int)(live));
1650
+ + fprintf(gc_data_file, "freelist objects : %.7d\n", (int)(freed - really_freed));
1651
+ + fprintf(gc_data_file, "freed objects : %.7d\n", (int)(really_freed));
1652
+ + for(i=0; i<256; i++) {
1653
+ + if (free_counts[i]>0 || live_counts[i]>0) {
1654
+ + fprintf(gc_data_file,
1655
+ + "kept %.7d / freed %.7d objects of type %s\n",
1656
+ + live_counts[i], free_counts[i], obj_type(i));
1657
+ + }
1658
+ + }
1659
+ + }
1660
+ +
1661
+ /* clear finalization list */
1662
+ if (final_list) {
1663
+ - deferred_final_list = final_list;
1664
+ - RUBY_VM_SET_FINALIZER_INTERRUPT(GET_THREAD());
1665
+ + deferred_final_list = final_list;
1666
+ + RUBY_VM_SET_FINALIZER_INTERRUPT(GET_THREAD());
1667
+ }
1668
+ else{
1669
+ - free_unused_heaps(objspace);
1670
+ + free_unused_heaps(objspace);
1671
+ }
1672
+ }
1673
+
1674
+ @@ -1503,117 +1840,117 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
1675
+ case T_FIXNUM:
1676
+ case T_TRUE:
1677
+ case T_FALSE:
1678
+ - rb_bug("obj_free() called for broken object");
1679
+ - break;
1680
+ + rb_bug("obj_free() called for broken object");
1681
+ + break;
1682
+ }
1683
+
1684
+ if (FL_TEST(obj, FL_EXIVAR)) {
1685
+ - rb_free_generic_ivar((VALUE)obj);
1686
+ - FL_UNSET(obj, FL_EXIVAR);
1687
+ + rb_free_generic_ivar((VALUE)obj);
1688
+ + FL_UNSET(obj, FL_EXIVAR);
1689
+ }
1690
+
1691
+ switch (BUILTIN_TYPE(obj)) {
1692
+ case T_OBJECT:
1693
+ - if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
1694
+ + if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
1695
+ RANY(obj)->as.object.as.heap.ivptr) {
1696
+ - xfree(RANY(obj)->as.object.as.heap.ivptr);
1697
+ - }
1698
+ - break;
1699
+ + xfree(RANY(obj)->as.object.as.heap.ivptr);
1700
+ + }
1701
+ + break;
1702
+ case T_MODULE:
1703
+ case T_CLASS:
1704
+ - rb_clear_cache_by_class((VALUE)obj);
1705
+ - st_free_table(RCLASS_M_TBL(obj));
1706
+ - if (RCLASS_IV_TBL(obj)) {
1707
+ - st_free_table(RCLASS_IV_TBL(obj));
1708
+ - }
1709
+ - if (RCLASS_IV_INDEX_TBL(obj)) {
1710
+ - st_free_table(RCLASS_IV_INDEX_TBL(obj));
1711
+ - }
1712
+ + rb_clear_cache_by_class((VALUE)obj);
1713
+ + st_free_table(RCLASS_M_TBL(obj));
1714
+ + if (RCLASS_IV_TBL(obj)) {
1715
+ + st_free_table(RCLASS_IV_TBL(obj));
1716
+ + }
1717
+ + if (RCLASS_IV_INDEX_TBL(obj)) {
1718
+ + st_free_table(RCLASS_IV_INDEX_TBL(obj));
1719
+ + }
1720
+ xfree(RANY(obj)->as.klass.ptr);
1721
+ - break;
1722
+ + break;
1723
+ case T_STRING:
1724
+ - rb_str_free(obj);
1725
+ - break;
1726
+ + rb_str_free(obj);
1727
+ + break;
1728
+ case T_ARRAY:
1729
+ - rb_ary_free(obj);
1730
+ - break;
1731
+ + rb_ary_free(obj);
1732
+ + break;
1733
+ case T_HASH:
1734
+ - if (RANY(obj)->as.hash.ntbl) {
1735
+ - st_free_table(RANY(obj)->as.hash.ntbl);
1736
+ - }
1737
+ - break;
1738
+ + if (RANY(obj)->as.hash.ntbl) {
1739
+ + st_free_table(RANY(obj)->as.hash.ntbl);
1740
+ + }
1741
+ + break;
1742
+ case T_REGEXP:
1743
+ - if (RANY(obj)->as.regexp.ptr) {
1744
+ - onig_free(RANY(obj)->as.regexp.ptr);
1745
+ - }
1746
+ - break;
1747
+ + if (RANY(obj)->as.regexp.ptr) {
1748
+ + onig_free(RANY(obj)->as.regexp.ptr);
1749
+ + }
1750
+ + break;
1751
+ case T_DATA:
1752
+ - if (DATA_PTR(obj)) {
1753
+ - if ((long)RANY(obj)->as.data.dfree == -1) {
1754
+ - xfree(DATA_PTR(obj));
1755
+ - }
1756
+ - else if (RANY(obj)->as.data.dfree) {
1757
+ - make_deferred(RANY(obj));
1758
+ - return 1;
1759
+ - }
1760
+ - }
1761
+ - break;
1762
+ + if (DATA_PTR(obj)) {
1763
+ + if ((long)RANY(obj)->as.data.dfree == -1) {
1764
+ + xfree(DATA_PTR(obj));
1765
+ + }
1766
+ + else if (RANY(obj)->as.data.dfree) {
1767
+ + make_deferred(RANY(obj));
1768
+ + return 1;
1769
+ + }
1770
+ + }
1771
+ + break;
1772
+ case T_MATCH:
1773
+ - if (RANY(obj)->as.match.rmatch) {
1774
+ + if (RANY(obj)->as.match.rmatch) {
1775
+ struct rmatch *rm = RANY(obj)->as.match.rmatch;
1776
+ - onig_region_free(&rm->regs, 0);
1777
+ + onig_region_free(&rm->regs, 0);
1778
+ if (rm->char_offset)
1779
+ - xfree(rm->char_offset);
1780
+ - xfree(rm);
1781
+ - }
1782
+ - break;
1783
+ + xfree(rm->char_offset);
1784
+ + xfree(rm);
1785
+ + }
1786
+ + break;
1787
+ case T_FILE:
1788
+ - if (RANY(obj)->as.file.fptr) {
1789
+ - rb_io_t *fptr = RANY(obj)->as.file.fptr;
1790
+ - make_deferred(RANY(obj));
1791
+ - RDATA(obj)->dfree = (void (*)(void*))rb_io_fptr_finalize;
1792
+ - RDATA(obj)->data = fptr;
1793
+ - return 1;
1794
+ - }
1795
+ - break;
1796
+ + if (RANY(obj)->as.file.fptr) {
1797
+ + rb_io_t *fptr = RANY(obj)->as.file.fptr;
1798
+ + make_deferred(RANY(obj));
1799
+ + RDATA(obj)->dfree = (void (*)(void*))rb_io_fptr_finalize;
1800
+ + RDATA(obj)->data = fptr;
1801
+ + return 1;
1802
+ + }
1803
+ + break;
1804
+ case T_RATIONAL:
1805
+ case T_COMPLEX:
1806
+ - break;
1807
+ + break;
1808
+ case T_ICLASS:
1809
+ - /* iClass shares table with the module */
1810
+ - break;
1811
+ + /* iClass shares table with the module */
1812
+ + break;
1813
+
1814
+ case T_FLOAT:
1815
+ - break;
1816
+ + break;
1817
+
1818
+ case T_BIGNUM:
1819
+ - if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
1820
+ - xfree(RBIGNUM_DIGITS(obj));
1821
+ - }
1822
+ - break;
1823
+ + if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
1824
+ + xfree(RBIGNUM_DIGITS(obj));
1825
+ + }
1826
+ + break;
1827
+ case T_NODE:
1828
+ - switch (nd_type(obj)) {
1829
+ - case NODE_SCOPE:
1830
+ - if (RANY(obj)->as.node.u1.tbl) {
1831
+ - xfree(RANY(obj)->as.node.u1.tbl);
1832
+ - }
1833
+ - break;
1834
+ - case NODE_ALLOCA:
1835
+ - xfree(RANY(obj)->as.node.u1.node);
1836
+ - break;
1837
+ - }
1838
+ - break; /* no need to free iv_tbl */
1839
+ + switch (nd_type(obj)) {
1840
+ + case NODE_SCOPE:
1841
+ + if (RANY(obj)->as.node.u1.tbl) {
1842
+ + xfree(RANY(obj)->as.node.u1.tbl);
1843
+ + }
1844
+ + break;
1845
+ + case NODE_ALLOCA:
1846
+ + xfree(RANY(obj)->as.node.u1.node);
1847
+ + break;
1848
+ + }
1849
+ + break; /* no need to free iv_tbl */
1850
+
1851
+ case T_STRUCT:
1852
+ - if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
1853
+ - RANY(obj)->as.rstruct.as.heap.ptr) {
1854
+ - xfree(RANY(obj)->as.rstruct.as.heap.ptr);
1855
+ - }
1856
+ - break;
1857
+ + if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
1858
+ + RANY(obj)->as.rstruct.as.heap.ptr) {
1859
+ + xfree(RANY(obj)->as.rstruct.as.heap.ptr);
1860
+ + }
1861
+ + break;
1862
+
1863
+ default:
1864
+ - rb_bug("gc_sweep(): unknown data type 0x%lx(%p)",
1865
+ - BUILTIN_TYPE(obj), (void*)obj);
1866
+ + rb_bug("gc_sweep(): unknown data type 0x%lx(%p)",
1867
+ + BUILTIN_TYPE(obj), (void*)obj);
1868
+ }
1869
+
1870
+ return 0;
1871
+ @@ -1627,27 +1964,27 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
1872
+ typedef unsigned long rb_jmp_buf[8];
1873
+ __asm__ (".even\n\
1874
+ _rb_setjmp:\n\
1875
+ - move.l 4(sp),a0\n\
1876
+ - movem.l d3-d7/a3-a5,(a0)\n\
1877
+ - moveq.l #0,d0\n\
1878
+ - rts");
1879
+ + move.l 4(sp),a0\n\
1880
+ + movem.l d3-d7/a3-a5,(a0)\n\
1881
+ + moveq.l #0,d0\n\
1882
+ + rts");
1883
+ #else
1884
+ #if defined(DJGPP)
1885
+ typedef unsigned long rb_jmp_buf[6];
1886
+ __asm__ (".align 4\n\
1887
+ _rb_setjmp:\n\
1888
+ - pushl %ebp\n\
1889
+ - movl %esp,%ebp\n\
1890
+ - movl 8(%ebp),%ebp\n\
1891
+ - movl %eax,(%ebp)\n\
1892
+ - movl %ebx,4(%ebp)\n\
1893
+ - movl %ecx,8(%ebp)\n\
1894
+ - movl %edx,12(%ebp)\n\
1895
+ - movl %esi,16(%ebp)\n\
1896
+ - movl %edi,20(%ebp)\n\
1897
+ - popl %ebp\n\
1898
+ - xorl %eax,%eax\n\
1899
+ - ret");
1900
+ + pushl %ebp\n\
1901
+ + movl %esp,%ebp\n\
1902
+ + movl 8(%ebp),%ebp\n\
1903
+ + movl %eax,(%ebp)\n\
1904
+ + movl %ebx,4(%ebp)\n\
1905
+ + movl %ecx,8(%ebp)\n\
1906
+ + movl %edx,12(%ebp)\n\
1907
+ + movl %esi,16(%ebp)\n\
1908
+ + movl %edi,20(%ebp)\n\
1909
+ + popl %ebp\n\
1910
+ + xorl %eax,%eax\n\
1911
+ + ret");
1912
+ #endif
1913
+ #endif
1914
+ int rb_setjmp (rb_jmp_buf);
1915
+ @@ -1686,8 +2023,8 @@ mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
1916
+ /* This assumes that all registers are saved into the jmp_buf (and stack) */
1917
+ rb_setjmp(save_regs_gc_mark);
1918
+ mark_locations_array(objspace,
1919
+ - (VALUE*)save_regs_gc_mark,
1920
+ - sizeof(save_regs_gc_mark) / sizeof(VALUE));
1921
+ + (VALUE*)save_regs_gc_mark,
1922
+ + sizeof(save_regs_gc_mark) / sizeof(VALUE));
1923
+
1924
+ rb_gc_mark_locations(stack_start, stack_end);
1925
+ #ifdef __ia64
1926
+ @@ -1695,7 +2032,7 @@ mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
1927
+ #endif
1928
+ #if defined(__human68k__) || defined(__mc68000__)
1929
+ mark_locations_array((VALUE*)((char*)STACK_END + 2),
1930
+ - (STACK_START - STACK_END));
1931
+ + (STACK_START - STACK_END));
1932
+ #endif
1933
+ }
1934
+
1935
+ @@ -1705,34 +2042,43 @@ static int
1936
+ garbage_collect(rb_objspace_t *objspace)
1937
+ {
1938
+ struct gc_list *list;
1939
+ + struct timeval gctv1, gctv2;
1940
+ rb_thread_t *th = GET_THREAD();
1941
+
1942
+ if (GC_NOTIFY) printf("start garbage_collect()\n");
1943
+
1944
+ if (!heaps) {
1945
+ - return Qfalse;
1946
+ + return Qfalse;
1947
+ }
1948
+
1949
+ if (dont_gc || during_gc) {
1950
+ - if (!freelist) {
1951
+ + if (!freelist) {
1952
+ if (!heaps_increment(objspace)) {
1953
+ set_heaps_increment(objspace);
1954
+ heaps_increment(objspace);
1955
+ }
1956
+ - }
1957
+ - return Qtrue;
1958
+ + }
1959
+ + return Qtrue;
1960
+ }
1961
+ during_gc++;
1962
+ objspace->count++;
1963
+
1964
+ SET_STACK_END;
1965
+
1966
+ + if (gc_statistics) {
1967
+ + gc_collections++;
1968
+ + gettimeofday(&gctv1, NULL);
1969
+ + if (verbose_gc_stats) {
1970
+ + fprintf(gc_data_file, "Garbage collection started\n");
1971
+ + }
1972
+ + }
1973
+ +
1974
+ init_mark_stack(objspace);
1975
+
1976
+ th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
1977
+
1978
+ if (finalizer_table) {
1979
+ - mark_tbl(objspace, finalizer_table, 0);
1980
+ + mark_tbl(objspace, finalizer_table, 0);
1981
+ }
1982
+
1983
+ mark_current_machine_context(objspace, th);
1984
+ @@ -1743,7 +2089,7 @@ garbage_collect(rb_objspace_t *objspace)
1985
+
1986
+ /* mark protected global variables */
1987
+ for (list = global_List; list; list = list->next) {
1988
+ - rb_gc_mark_maybe(*list->varptr);
1989
+ + rb_gc_mark_maybe(*list->varptr);
1990
+ }
1991
+ rb_mark_end_proc();
1992
+ rb_gc_mark_global_tbl();
1993
+ @@ -1758,16 +2104,27 @@ garbage_collect(rb_objspace_t *objspace)
1994
+
1995
+ /* gc_mark objects whose marking are not completed*/
1996
+ while (!MARK_STACK_EMPTY) {
1997
+ - if (mark_stack_overflow) {
1998
+ - gc_mark_all(objspace);
1999
+ - }
2000
+ - else {
2001
+ - gc_mark_rest(objspace);
2002
+ - }
2003
+ + if (mark_stack_overflow) {
2004
+ + gc_mark_all(objspace);
2005
+ + }
2006
+ + else {
2007
+ + gc_mark_rest(objspace);
2008
+ + }
2009
+ }
2010
+
2011
+ gc_sweep(objspace);
2012
+
2013
+ + if (gc_statistics) {
2014
+ + GC_TIME_TYPE musecs_used;
2015
+ + gettimeofday(&gctv2, NULL);
2016
+ + musecs_used = ((GC_TIME_TYPE)(gctv2.tv_sec - gctv1.tv_sec) * 1000000) + (gctv2.tv_usec - gctv1.tv_usec);
2017
+ + gc_time += musecs_used;
2018
+ +
2019
+ + if (verbose_gc_stats) {
2020
+ + fprintf(gc_data_file, "GC time: %lld msec\n", musecs_used / 1000);
2021
+ + }
2022
+ + }
2023
+ +
2024
+ if (GC_NOTIFY) printf("end garbage_collect()\n");
2025
+ return Qtrue;
2026
+ }
2027
+ @@ -1788,10 +2145,10 @@ rb_gc_mark_machine_stack(rb_thread_t *th)
2028
+ rb_gc_mark_locations(th->machine_stack_start, th->machine_stack_end);
2029
+ #else
2030
+ if (th->machine_stack_start < th->machine_stack_end) {
2031
+ - rb_gc_mark_locations(th->machine_stack_start, th->machine_stack_end);
2032
+ + rb_gc_mark_locations(th->machine_stack_start, th->machine_stack_end);
2033
+ }
2034
+ else {
2035
+ - rb_gc_mark_locations(th->machine_stack_end, th->machine_stack_start);
2036
+ + rb_gc_mark_locations(th->machine_stack_end, th->machine_stack_start);
2037
+ }
2038
+ #endif
2039
+ #ifdef __ia64
2040
+ @@ -1859,6 +2216,7 @@ Init_stack(VALUE *addr)
2041
+ void
2042
+ Init_heap(void)
2043
+ {
2044
+ + set_gc_parameters(&rb_objspace);
2045
+ init_heap(&rb_objspace);
2046
+ }
2047
+
2048
+ @@ -1881,27 +2239,27 @@ os_obj_of(rb_objspace_t *objspace, VALUE of)
2049
+ break;
2050
+ membase = heaps[i].membase;
2051
+
2052
+ - p = heaps[i].slot; pend = p + heaps[i].limit;
2053
+ - for (;p < pend; p++) {
2054
+ - if (p->as.basic.flags) {
2055
+ - switch (BUILTIN_TYPE(p)) {
2056
+ - case T_NONE:
2057
+ - case T_ICLASS:
2058
+ - case T_NODE:
2059
+ - case T_DEFERRED:
2060
+ - continue;
2061
+ - case T_CLASS:
2062
+ - if (FL_TEST(p, FL_SINGLETON)) continue;
2063
+ - default:
2064
+ - if (!p->as.basic.klass) continue;
2065
+ + p = heaps[i].slot; pend = p + heaps[i].limit;
2066
+ + for (;p < pend; p++) {
2067
+ + if (p->as.basic.flags) {
2068
+ + switch (BUILTIN_TYPE(p)) {
2069
+ + case T_NONE:
2070
+ + case T_ICLASS:
2071
+ + case T_NODE:
2072
+ + case T_DEFERRED:
2073
+ + continue;
2074
+ + case T_CLASS:
2075
+ + if (FL_TEST(p, FL_SINGLETON)) continue;
2076
+ + default:
2077
+ + if (!p->as.basic.klass) continue;
2078
+ v = (VALUE)p;
2079
+ - if (!of || rb_obj_is_kind_of(v, of)) {
2080
+ - rb_yield(v);
2081
+ - n++;
2082
+ - }
2083
+ - }
2084
+ - }
2085
+ - }
2086
+ + if (!of || rb_obj_is_kind_of(v, of)) {
2087
+ + rb_yield(v);
2088
+ + n++;
2089
+ + }
2090
+ + }
2091
+ + }
2092
+ + }
2093
+ }
2094
+
2095
+ return SIZET2NUM(n);
2096
+ @@ -1947,10 +2305,10 @@ os_each_obj(int argc, VALUE *argv, VALUE os)
2097
+
2098
+ rb_secure(4);
2099
+ if (argc == 0) {
2100
+ - of = 0;
2101
+ + of = 0;
2102
+ }
2103
+ else {
2104
+ - rb_scan_args(argc, argv, "01", &of);
2105
+ + rb_scan_args(argc, argv, "01", &of);
2106
+ }
2107
+ RETURN_ENUMERATOR(os, 1, &of);
2108
+ return os_obj_of(&rb_objspace, of);
2109
+ @@ -1969,7 +2327,7 @@ undefine_final(VALUE os, VALUE obj)
2110
+ {
2111
+ rb_objspace_t *objspace = &rb_objspace;
2112
+ if (finalizer_table) {
2113
+ - st_delete(finalizer_table, (st_data_t*)&obj, 0);
2114
+ + st_delete(finalizer_table, (st_data_t*)&obj, 0);
2115
+ }
2116
+ return obj;
2117
+ }
2118
+ @@ -1991,24 +2349,24 @@ define_final(int argc, VALUE *argv, VALUE os)
2119
+
2120
+ rb_scan_args(argc, argv, "11", &obj, &block);
2121
+ if (argc == 1) {
2122
+ - block = rb_block_proc();
2123
+ + block = rb_block_proc();
2124
+ }
2125
+ else if (!rb_respond_to(block, rb_intern("call"))) {
2126
+ - rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
2127
+ - rb_obj_classname(block));
2128
+ + rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
2129
+ + rb_obj_classname(block));
2130
+ }
2131
+ FL_SET(obj, FL_FINALIZE);
2132
+
2133
+ block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
2134
+
2135
+ if (!finalizer_table) {
2136
+ - finalizer_table = st_init_numtable();
2137
+ + finalizer_table = st_init_numtable();
2138
+ }
2139
+ if (st_lookup(finalizer_table, obj, &table)) {
2140
+ - rb_ary_push(table, block);
2141
+ + rb_ary_push(table, block);
2142
+ }
2143
+ else {
2144
+ - st_add_direct(finalizer_table, obj, rb_ary_new3(1, block));
2145
+ + st_add_direct(finalizer_table, obj, rb_ary_new3(1, block));
2146
+ }
2147
+ return block;
2148
+ }
2149
+ @@ -2022,7 +2380,7 @@ rb_gc_copy_finalizer(VALUE dest, VALUE obj)
2150
+ if (!finalizer_table) return;
2151
+ if (!FL_TEST(obj, FL_FINALIZE)) return;
2152
+ if (st_lookup(finalizer_table, obj, &table)) {
2153
+ - st_insert(finalizer_table, dest, table);
2154
+ + st_insert(finalizer_table, dest, table);
2155
+ }
2156
+ FL_SET(dest, FL_FINALIZE);
2157
+ }
2158
+ @@ -2042,26 +2400,26 @@ run_final(rb_objspace_t *objspace, VALUE obj)
2159
+ int status;
2160
+ VALUE args[3], table, objid;
2161
+
2162
+ - objid = rb_obj_id(obj); /* make obj into id */
2163
+ + objid = rb_obj_id(obj); /* make obj into id */
2164
+ RBASIC(obj)->klass = 0;
2165
+
2166
+ if (RDATA(obj)->dfree) {
2167
+ - (*RDATA(obj)->dfree)(DATA_PTR(obj));
2168
+ + (*RDATA(obj)->dfree)(DATA_PTR(obj));
2169
+ }
2170
+
2171
+ if (finalizer_table &&
2172
+ - st_delete(finalizer_table, (st_data_t*)&obj, &table)) {
2173
+ - args[1] = 0;
2174
+ - args[2] = (VALUE)rb_safe_level();
2175
+ - if (!args[1] && RARRAY_LEN(table) > 0) {
2176
+ - args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
2177
+ - }
2178
+ - for (i=0; i<RARRAY_LEN(table); i++) {
2179
+ - VALUE final = RARRAY_PTR(table)[i];
2180
+ - args[0] = RARRAY_PTR(final)[1];
2181
+ - args[2] = FIX2INT(RARRAY_PTR(final)[0]);
2182
+ - rb_protect(run_single_final, (VALUE)args, &status);
2183
+ - }
2184
+ + st_delete(finalizer_table, (st_data_t*)&obj, &table)) {
2185
+ + args[1] = 0;
2186
+ + args[2] = (VALUE)rb_safe_level();
2187
+ + if (!args[1] && RARRAY_LEN(table) > 0) {
2188
+ + args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
2189
+ + }
2190
+ + for (i=0; i<RARRAY_LEN(table); i++) {
2191
+ + VALUE final = RARRAY_PTR(table)[i];
2192
+ + args[0] = RARRAY_PTR(final)[1];
2193
+ + args[2] = FIX2INT(RARRAY_PTR(final)[0]);
2194
+ + rb_protect(run_single_final, (VALUE)args, &status);
2195
+ + }
2196
+ }
2197
+ }
2198
+
2199
+ @@ -2072,7 +2430,7 @@ gc_finalize_deferred(rb_objspace_t *objspace)
2200
+ deferred_final_list = 0;
2201
+
2202
+ if (p) {
2203
+ - finalize_list(objspace, p);
2204
+ + finalize_list(objspace, p);
2205
+ }
2206
+ free_unused_heaps(objspace);
2207
+ }
2208
+ @@ -2088,16 +2446,16 @@ chain_finalized_object(st_data_t key, st_data_t val, st_data_t arg)
2209
+ {
2210
+ RVALUE *p = (RVALUE *)key, **final_list = (RVALUE **)arg;
2211
+ if (p->as.basic.flags & FL_FINALIZE) {
2212
+ - if (BUILTIN_TYPE(p) != T_DEFERRED) {
2213
+ - p->as.free.flags = FL_MARK | T_DEFERRED; /* remain marked */
2214
+ - RDATA(p)->dfree = 0;
2215
+ - }
2216
+ - p->as.free.next = *final_list;
2217
+ - *final_list = p;
2218
+ - return ST_CONTINUE;
2219
+ + if (BUILTIN_TYPE(p) != T_DEFERRED) {
2220
+ + p->as.free.flags = FL_MARK | T_DEFERRED; /* remain marked */
2221
+ + RDATA(p)->dfree = 0;
2222
+ + }
2223
+ + p->as.free.next = *final_list;
2224
+ + *final_list = p;
2225
+ + return ST_CONTINUE;
2226
+ }
2227
+ else {
2228
+ - return ST_DELETE;
2229
+ + return ST_DELETE;
2230
+ }
2231
+ }
2232
+
2233
+ @@ -2110,48 +2468,48 @@ rb_gc_call_finalizer_at_exit(void)
2234
+
2235
+ /* run finalizers */
2236
+ if (finalizer_table) {
2237
+ - p = deferred_final_list;
2238
+ - deferred_final_list = 0;
2239
+ - finalize_list(objspace, p);
2240
+ - while (finalizer_table->num_entries > 0) {
2241
+ - RVALUE *final_list = 0;
2242
+ - st_foreach(finalizer_table, chain_finalized_object,
2243
+ - (st_data_t)&final_list);
2244
+ - if (!(p = final_list)) break;
2245
+ - do {
2246
+ - final_list = p->as.free.next;
2247
+ - run_final(objspace, (VALUE)p);
2248
+ - } while ((p = final_list) != 0);
2249
+ - }
2250
+ - st_free_table(finalizer_table);
2251
+ - finalizer_table = 0;
2252
+ + p = deferred_final_list;
2253
+ + deferred_final_list = 0;
2254
+ + finalize_list(objspace, p);
2255
+ + while (finalizer_table->num_entries > 0) {
2256
+ + RVALUE *final_list = 0;
2257
+ + st_foreach(finalizer_table, chain_finalized_object,
2258
+ + (st_data_t)&final_list);
2259
+ + if (!(p = final_list)) break;
2260
+ + do {
2261
+ + final_list = p->as.free.next;
2262
+ + run_final(objspace, (VALUE)p);
2263
+ + } while ((p = final_list) != 0);
2264
+ + }
2265
+ + st_free_table(finalizer_table);
2266
+ + finalizer_table = 0;
2267
+ }
2268
+ /* finalizers are part of garbage collection */
2269
+ during_gc++;
2270
+ /* run data object's finalizers */
2271
+ for (i = 0; i < heaps_used; i++) {
2272
+ - p = heaps[i].slot; pend = p + heaps[i].limit;
2273
+ - while (p < pend) {
2274
+ - if (BUILTIN_TYPE(p) == T_DATA &&
2275
+ - DATA_PTR(p) && RANY(p)->as.data.dfree &&
2276
+ - RANY(p)->as.basic.klass != rb_cThread) {
2277
+ - p->as.free.flags = 0;
2278
+ - if ((long)RANY(p)->as.data.dfree == -1) {
2279
+ - xfree(DATA_PTR(p));
2280
+ - }
2281
+ - else if (RANY(p)->as.data.dfree) {
2282
+ - (*RANY(p)->as.data.dfree)(DATA_PTR(p));
2283
+ - }
2284
+ + p = heaps[i].slot; pend = p + heaps[i].limit;
2285
+ + while (p < pend) {
2286
+ + if (BUILTIN_TYPE(p) == T_DATA &&
2287
+ + DATA_PTR(p) && RANY(p)->as.data.dfree &&
2288
+ + RANY(p)->as.basic.klass != rb_cThread) {
2289
+ + p->as.free.flags = 0;
2290
+ + if ((long)RANY(p)->as.data.dfree == -1) {
2291
+ + xfree(DATA_PTR(p));
2292
+ + }
2293
+ + else if (RANY(p)->as.data.dfree) {
2294
+ + (*RANY(p)->as.data.dfree)(DATA_PTR(p));
2295
+ + }
2296
+ VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
2297
+ - }
2298
+ - else if (BUILTIN_TYPE(p) == T_FILE) {
2299
+ - if (rb_io_fptr_finalize(RANY(p)->as.file.fptr)) {
2300
+ - p->as.free.flags = 0;
2301
+ + }
2302
+ + else if (BUILTIN_TYPE(p) == T_FILE) {
2303
+ + if (rb_io_fptr_finalize(RANY(p)->as.file.fptr)) {
2304
+ + p->as.free.flags = 0;
2305
+ VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
2306
+ - }
2307
+ - }
2308
+ - p++;
2309
+ - }
2310
+ + }
2311
+ + }
2312
+ + p++;
2313
+ + }
2314
+ }
2315
+ during_gc = 0;
2316
+ }
2317
+ @@ -2197,21 +2555,21 @@ id2ref(VALUE obj, VALUE objid)
2318
+ if (ptr == Qfalse) return Qfalse;
2319
+ if (ptr == Qnil) return Qnil;
2320
+ if (FIXNUM_P(ptr)) return (VALUE)ptr;
2321
+ - ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */
2322
+ + ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */
2323
+
2324
+ if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
2325
+ ID symid = ptr / sizeof(RVALUE);
2326
+ if (rb_id2name(symid) == 0)
2327
+ - rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
2328
+ - return ID2SYM(symid);
2329
+ + rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
2330
+ + return ID2SYM(symid);
2331
+ }
2332
+
2333
+ if (!is_pointer_to_heap(objspace, (void *)ptr) ||
2334
+ - BUILTIN_TYPE(ptr) > T_FIXNUM || BUILTIN_TYPE(ptr) == T_ICLASS) {
2335
+ - rb_raise(rb_eRangeError, "%p is not id value", p0);
2336
+ + BUILTIN_TYPE(ptr) > T_FIXNUM || BUILTIN_TYPE(ptr) == T_ICLASS) {
2337
+ + rb_raise(rb_eRangeError, "%p is not id value", p0);
2338
+ }
2339
+ if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) {
2340
+ - rb_raise(rb_eRangeError, "%p is recycled object", p0);
2341
+ + rb_raise(rb_eRangeError, "%p is recycled object", p0);
2342
+ }
2343
+ return (VALUE)ptr;
2344
+ }
2345
+ @@ -2358,31 +2716,31 @@ count_objects(int argc, VALUE *argv, VALUE os)
2346
+ VALUE type;
2347
+ switch (i) {
2348
+ #define COUNT_TYPE(t) case t: type = ID2SYM(rb_intern(#t)); break;
2349
+ - COUNT_TYPE(T_NONE);
2350
+ - COUNT_TYPE(T_OBJECT);
2351
+ - COUNT_TYPE(T_CLASS);
2352
+ - COUNT_TYPE(T_MODULE);
2353
+ - COUNT_TYPE(T_FLOAT);
2354
+ - COUNT_TYPE(T_STRING);
2355
+ - COUNT_TYPE(T_REGEXP);
2356
+ - COUNT_TYPE(T_ARRAY);
2357
+ - COUNT_TYPE(T_HASH);
2358
+ - COUNT_TYPE(T_STRUCT);
2359
+ - COUNT_TYPE(T_BIGNUM);
2360
+ - COUNT_TYPE(T_FILE);
2361
+ - COUNT_TYPE(T_DATA);
2362
+ - COUNT_TYPE(T_MATCH);
2363
+ - COUNT_TYPE(T_COMPLEX);
2364
+ - COUNT_TYPE(T_RATIONAL);
2365
+ - COUNT_TYPE(T_NIL);
2366
+ - COUNT_TYPE(T_TRUE);
2367
+ - COUNT_TYPE(T_FALSE);
2368
+ - COUNT_TYPE(T_SYMBOL);
2369
+ - COUNT_TYPE(T_FIXNUM);
2370
+ - COUNT_TYPE(T_UNDEF);
2371
+ - COUNT_TYPE(T_NODE);
2372
+ - COUNT_TYPE(T_ICLASS);
2373
+ - COUNT_TYPE(T_DEFERRED);
2374
+ + COUNT_TYPE(T_NONE);
2375
+ + COUNT_TYPE(T_OBJECT);
2376
+ + COUNT_TYPE(T_CLASS);
2377
+ + COUNT_TYPE(T_MODULE);
2378
+ + COUNT_TYPE(T_FLOAT);
2379
+ + COUNT_TYPE(T_STRING);
2380
+ + COUNT_TYPE(T_REGEXP);
2381
+ + COUNT_TYPE(T_ARRAY);
2382
+ + COUNT_TYPE(T_HASH);
2383
+ + COUNT_TYPE(T_STRUCT);
2384
+ + COUNT_TYPE(T_BIGNUM);
2385
+ + COUNT_TYPE(T_FILE);
2386
+ + COUNT_TYPE(T_DATA);
2387
+ + COUNT_TYPE(T_MATCH);
2388
+ + COUNT_TYPE(T_COMPLEX);
2389
+ + COUNT_TYPE(T_RATIONAL);
2390
+ + COUNT_TYPE(T_NIL);
2391
+ + COUNT_TYPE(T_TRUE);
2392
+ + COUNT_TYPE(T_FALSE);
2393
+ + COUNT_TYPE(T_SYMBOL);
2394
+ + COUNT_TYPE(T_FIXNUM);
2395
+ + COUNT_TYPE(T_UNDEF);
2396
+ + COUNT_TYPE(T_NODE);
2397
+ + COUNT_TYPE(T_ICLASS);
2398
+ + COUNT_TYPE(T_DEFERRED);
2399
+ #undef COUNT_TYPE
2400
+ default: type = INT2NUM(i); break;
2401
+ }
2402
+ @@ -2461,6 +2819,14 @@ Init_GC(void)
2403
+ rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
2404
+ rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
2405
+
2406
+ + rb_define_singleton_method(rb_mGC, "enable_stats", rb_gc_enable_stats, 0);
2407
+ + rb_define_singleton_method(rb_mGC, "disable_stats", rb_gc_disable_stats, 0);
2408
+ + rb_define_singleton_method(rb_mGC, "clear_stats", rb_gc_clear_stats, 0);
2409
+ + rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0);
2410
+ + rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0);
2411
+ + rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0);
2412
+ + rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1);
2413
+ +
2414
+ rb_mObSpace = rb_define_module("ObjectSpace");
2415
+ rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
2416
+ rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
2417
+ @@ -2471,7 +2837,7 @@ Init_GC(void)
2418
+ rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1);
2419
+
2420
+ nomem_error = rb_exc_new3(rb_eNoMemError,
2421
+ - rb_obj_freeze(rb_str_new2("failed to allocate memory")));
2422
+ + rb_obj_freeze(rb_str_new2("failed to allocate memory")));
2423
+ OBJ_TAINT(nomem_error);
2424
+ OBJ_FREEZE(nomem_error);
2425
+