fastthread 0.6.4.1 → 1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (3) hide show
  1. data/Rakefile +1 -1
  2. data/ext/fastthread/fastthread.c +809 -758
  3. metadata +3 -3
data/Rakefile CHANGED
@@ -4,7 +4,7 @@ require 'rake/testtask'
4
4
  require 'rake/gempackagetask'
5
5
  require 'tools/rakehelp'
6
6
 
7
- GEM_VERSION="0.6.4.1"
7
+ GEM_VERSION="1.0"
8
8
 
9
9
  setup_extension('fastthread', 'fastthread')
10
10
 
@@ -4,6 +4,8 @@
4
4
  *
5
5
  * Copyright 2006-2007 MenTaLguY <mental@rydia.net>
6
6
  *
7
+ * RDoc taken from original.
8
+ *
7
9
  * This file is made available under the same terms as Ruby.
8
10
  */
9
11
 
@@ -11,1125 +13,1174 @@
11
13
  #include <intern.h>
12
14
  #include <rubysig.h>
13
15
 
14
- static VALUE avoid_mem_pools;
15
-
16
- #ifndef USE_MEM_POOLS
17
- #define USE_MEM_POOLS !RTEST(avoid_mem_pools)
18
- #endif
19
-
20
16
  static VALUE rb_cMutex;
21
17
  static VALUE rb_cConditionVariable;
22
- /* post-1.8.5 Ruby exposes rb_eThreadError; earlier versions do not */
23
- static VALUE private_eThreadError;
24
18
  static VALUE rb_cQueue;
25
19
  static VALUE rb_cSizedQueue;
20
+ /* earlier versions of ruby do not export rb_eThreadError */
21
+ static VALUE private_eThreadError;
26
22
 
27
- static VALUE
28
- return_value(value)
29
- VALUE value;
30
- {
31
- return value;
32
- }
23
+ static VALUE set_critical(VALUE value);
24
+
25
+ /*
26
+ * call-seq:
27
+ * Thread.exclusive { block } => obj
28
+ *
29
+ * Wraps a block in Thread.critical, restoring the original value
30
+ * upon exit from the critical section, and returns the value of the
31
+ * block.
32
+ */
33
33
 
34
34
  typedef struct _Entry {
35
- VALUE value;
36
- struct _Entry *next;
35
+ VALUE value;
36
+ struct _Entry *next;
37
37
  } Entry;
38
38
 
39
39
  typedef struct _List {
40
- Entry *entries;
41
- Entry *last_entry;
42
- Entry *entry_pool;
43
- unsigned long size;
40
+ Entry *entries;
41
+ Entry *last_entry;
42
+ Entry *entry_pool;
43
+ unsigned long size;
44
44
  } List;
45
45
 
46
- static void init_list _((List *));
47
-
48
46
  static void
49
- init_list(list)
50
- List *list;
47
+ init_list(List *list)
51
48
  {
52
- list->entries = NULL;
53
- list->last_entry = NULL;
54
- list->entry_pool = NULL;
55
- list->size = 0;
49
+ list->entries = NULL;
50
+ list->last_entry = NULL;
51
+ list->entry_pool = NULL;
52
+ list->size = 0;
56
53
  }
57
54
 
58
- static void mark_list _((List *));
59
-
60
55
  static void
61
- mark_list(list)
62
- List *list;
56
+ mark_list(List *list)
63
57
  {
64
- Entry *entry;
65
- for ( entry = list->entries ; entry ; entry = entry->next ) {
66
- rb_gc_mark(entry->value);
67
- }
58
+ Entry *entry;
59
+ for (entry = list->entries; entry; entry = entry->next) {
60
+ rb_gc_mark(entry->value);
61
+ }
68
62
  }
69
63
 
70
- static void free_entries _((Entry *));
71
-
72
64
  static void
73
- free_entries(first)
74
- Entry *first;
65
+ free_entries(Entry *first)
75
66
  {
76
- Entry *next;
77
- while (first) {
78
- next = first->next;
79
- free(first);
80
- first = next;
81
- }
67
+ Entry *next;
68
+ while (first) {
69
+ next = first->next;
70
+ xfree(first);
71
+ first = next;
72
+ }
82
73
  }
83
74
 
84
- static void finalize_list _((List *));
85
-
86
75
  static void
87
- finalize_list(list)
88
- List *list;
76
+ finalize_list(List *list)
89
77
  {
90
- free_entries(list->entries);
91
- free_entries(list->entry_pool);
78
+ free_entries(list->entries);
79
+ free_entries(list->entry_pool);
92
80
  }
93
81
 
94
- static void push_list _((List *, VALUE));
95
-
96
82
  static void
97
- push_list(list, value)
98
- List *list;
99
- VALUE value;
83
+ push_list(List *list, VALUE value)
100
84
  {
101
- Entry *entry;
85
+ Entry *entry;
102
86
 
103
- if (list->entry_pool) {
104
- entry = list->entry_pool;
105
- list->entry_pool = entry->next;
106
- } else {
107
- entry = (Entry *)malloc(sizeof(Entry));
108
- }
87
+ if (list->entry_pool) {
88
+ entry = list->entry_pool;
89
+ list->entry_pool = entry->next;
90
+ } else {
91
+ entry = ALLOC(Entry);
92
+ }
109
93
 
110
- entry->value = value;
111
- entry->next = NULL;
94
+ entry->value = value;
95
+ entry->next = NULL;
112
96
 
113
- if (list->last_entry) {
114
- list->last_entry->next = entry;
115
- } else {
116
- list->entries = entry;
117
- }
118
- list->last_entry = entry;
97
+ if (list->last_entry) {
98
+ list->last_entry->next = entry;
99
+ } else {
100
+ list->entries = entry;
101
+ }
102
+ list->last_entry = entry;
119
103
 
120
- ++list->size;
104
+ ++list->size;
121
105
  }
122
106
 
123
- static void push_multiple_list _((List *, VALUE *, unsigned));
124
-
125
107
  static void
126
- push_multiple_list(list, values, count)
127
- List *list;
128
- VALUE *values;
129
- unsigned count;
108
+ push_multiple_list(List *list, VALUE *values, unsigned count)
130
109
  {
131
- unsigned i;
132
- for ( i = 0 ; i < count ; i++ ) {
133
- push_list(list, values[i]);
134
- }
110
+ unsigned i;
111
+ for (i = 0; i < count; i++) {
112
+ push_list(list, values[i]);
113
+ }
135
114
  }
136
115
 
137
- static void recycle_entries _((List *, Entry *, Entry *));
138
-
139
116
  static void
140
- recycle_entries(list, first_entry, last_entry)
141
- List *list;
142
- Entry *first_entry;
143
- Entry *last_entry;
117
+ recycle_entries(List *list, Entry *first_entry, Entry *last_entry)
144
118
  {
145
- if (USE_MEM_POOLS) {
119
+ #ifdef USE_MEM_POOLS
146
120
  last_entry->next = list->entry_pool;
147
121
  list->entry_pool = first_entry;
148
- } else {
122
+ #else
149
123
  last_entry->next = NULL;
150
124
  free_entries(first_entry);
151
- }
125
+ #endif
152
126
  }
153
127
 
154
- static VALUE shift_list _((List *));
155
-
156
128
  static VALUE
157
- shift_list(list)
158
- List *list;
129
+ shift_list(List *list)
159
130
  {
160
- Entry *entry;
161
- VALUE value;
131
+ Entry *entry;
132
+ VALUE value;
162
133
 
163
- entry = list->entries;
164
- if (!entry) return Qundef;
134
+ entry = list->entries;
135
+ if (!entry) return Qundef;
165
136
 
166
- list->entries = entry->next;
167
- if ( entry == list->last_entry ) {
168
- list->last_entry = NULL;
169
- }
137
+ list->entries = entry->next;
138
+ if (entry == list->last_entry) {
139
+ list->last_entry = NULL;
140
+ }
170
141
 
171
- --list->size;
142
+ --list->size;
172
143
 
173
- value = entry->value;
174
- recycle_entries(list, entry, entry);
144
+ value = entry->value;
145
+ recycle_entries(list, entry, entry);
175
146
 
176
- return value;
147
+ return value;
177
148
  }
178
149
 
179
- static void remove_one _((List *, VALUE));
180
-
181
150
  static void
182
- remove_one(list, value)
183
- List *list;
184
- VALUE value;
185
- {
186
- Entry **ref;
187
- Entry *entry;
188
- for (ref = &list->entries, entry = list->entries;
189
- entry != NULL;
190
- ref = &entry->next, entry = entry->next)
191
- {
192
- if (entry->value == value) {
193
- *ref = entry->next;
194
- recycle_entries(list, entry, entry);
195
- break;
151
+ remove_one(List *list, VALUE value)
152
+ {
153
+ Entry **ref;
154
+ Entry *prev;
155
+ Entry *entry;
156
+
157
+ for (ref = &list->entries, prev = NULL, entry = list->entries;
158
+ entry != NULL;
159
+ ref = &entry->next, prev = entry, entry = entry->next) {
160
+ if (entry->value == value) {
161
+ *ref = entry->next;
162
+ list->size--;
163
+ if (!entry->next) {
164
+ list->last_entry = prev;
165
+ }
166
+ recycle_entries(list, entry, entry);
167
+ break;
168
+ }
196
169
  }
197
- }
198
170
  }
199
171
 
200
- static void clear_list _((List *));
201
-
202
172
  static void
203
- clear_list(list)
204
- List *list;
173
+ clear_list(List *list)
205
174
  {
206
- if (list->last_entry) {
207
- recycle_entries(list, list->entries, list->last_entry);
208
- list->entries = NULL;
209
- list->last_entry = NULL;
210
- list->size = 0;
211
- }
175
+ if (list->last_entry) {
176
+ recycle_entries(list, list->entries, list->last_entry);
177
+ list->entries = NULL;
178
+ list->last_entry = NULL;
179
+ list->size = 0;
180
+ }
212
181
  }
213
182
 
214
- static VALUE array_from_list _((List const *));
215
-
216
183
  static VALUE
217
- array_from_list(list)
218
- List const *list;
184
+ array_from_list(List const *list)
219
185
  {
220
- VALUE ary;
221
- Entry *entry;
222
- ary = rb_ary_new();
223
- for ( entry = list->entries ; entry ; entry = entry->next ) {
224
- rb_ary_push(ary, entry->value);
225
- }
226
- return ary;
186
+ VALUE ary;
187
+ Entry *entry;
188
+ ary = rb_ary_new();
189
+ for (entry = list->entries; entry; entry = entry->next) {
190
+ rb_ary_push(ary, entry->value);
191
+ }
192
+ return ary;
227
193
  }
228
194
 
229
- static VALUE wake_thread _((VALUE));
230
-
231
195
  static VALUE
232
- wake_thread(thread)
233
- VALUE thread;
196
+ wake_thread(VALUE thread)
234
197
  {
235
- return rb_rescue2(rb_thread_wakeup, thread,
236
- return_value, Qnil, private_eThreadError, 0);
198
+ return rb_rescue2(rb_thread_wakeup, thread,
199
+ NULL, Qundef, private_eThreadError, 0);
237
200
  }
238
201
 
239
- static VALUE run_thread _((VALUE));
240
-
241
202
  static VALUE
242
- run_thread(thread)
243
- VALUE thread;
203
+ run_thread(VALUE thread)
244
204
  {
245
- return rb_rescue2(rb_thread_run, thread,
246
- return_value, Qnil, private_eThreadError, 0);
205
+ return rb_rescue2(rb_thread_run, thread,
206
+ NULL, Qundef, private_eThreadError, 0);
247
207
  }
248
208
 
249
- static VALUE wake_one _((List *));
250
-
251
209
  static VALUE
252
- wake_one(list)
253
- List *list;
210
+ wake_one(List *list)
254
211
  {
255
- VALUE waking;
212
+ VALUE waking;
256
213
 
257
- waking = Qnil;
258
- while ( list->entries && !RTEST(waking) ) {
259
- waking = wake_thread(shift_list(list));
260
- }
214
+ waking = Qnil;
215
+ while (list->entries && !RTEST(waking)) {
216
+ waking = wake_thread(shift_list(list));
217
+ }
261
218
 
262
- return waking;
219
+ return waking;
263
220
  }
264
221
 
265
- static VALUE wake_all _((List *));
266
-
267
222
  static VALUE
268
- wake_all(list)
269
- List *list;
223
+ wake_all(List *list)
270
224
  {
271
- while (list->entries) {
272
- wake_one(list);
273
- }
274
- return Qnil;
225
+ while (list->entries) {
226
+ wake_one(list);
227
+ }
228
+ return Qnil;
275
229
  }
276
230
 
277
- static VALUE wait_list_inner _((List *));
278
-
279
231
  static VALUE
280
- wait_list_inner(list)
281
- List *list;
232
+ wait_list_inner(List *list)
282
233
  {
283
- push_list(list, rb_thread_current());
284
- rb_thread_stop();
285
- return Qnil;
234
+ push_list(list, rb_thread_current());
235
+ rb_thread_stop();
236
+ return Qnil;
286
237
  }
287
238
 
288
- static VALUE wait_list_cleanup _((List *));
289
-
290
239
  static VALUE
291
- wait_list_cleanup(list)
292
- List *list;
240
+ wait_list_cleanup(List *list)
293
241
  {
294
- /* cleanup in case of spurious wakeups */
295
- rb_thread_critical = 1;
296
- remove_one(list, rb_thread_current());
297
- rb_thread_critical = 0;
298
- return Qnil;
242
+ /* cleanup in case of spurious wakeups */
243
+ remove_one(list, rb_thread_current());
244
+ return Qnil;
299
245
  }
300
246
 
301
- static void wait_list _((List *));
302
-
303
247
  static void
304
- wait_list(list)
305
- List *list;
248
+ wait_list(List *list)
306
249
  {
307
- rb_ensure(wait_list_inner, (VALUE)list, wait_list_cleanup, (VALUE)list);
250
+ rb_ensure(wait_list_inner, (VALUE)list, wait_list_cleanup, (VALUE)list);
308
251
  }
309
252
 
310
- static void assert_no_survivors _((List *, const char *, void *));
311
-
312
253
  static void
313
- assert_no_survivors(waiting, label, addr)
314
- List *waiting;
315
- const char *label;
316
- void *addr;
317
- {
318
- Entry *entry;
319
- for ( entry = waiting->entries ; entry ; entry = entry->next ) {
320
- if (RTEST(wake_thread(entry->value))) {
321
- rb_bug("%s %p freed with live thread(s) waiting", label, addr);
254
+ assert_no_survivors(List *waiting, const char *label, void *addr)
255
+ {
256
+ Entry *entry;
257
+ for (entry = waiting->entries; entry; entry = entry->next) {
258
+ if (RTEST(wake_thread(entry->value))) {
259
+ rb_bug("%s %p freed with live thread(s) waiting", label, addr);
260
+ }
322
261
  }
323
- }
324
262
  }
325
263
 
264
+ /*
265
+ * Document-class: Mutex
266
+ *
267
+ * Mutex implements a simple semaphore that can be used to coordinate access to
268
+ * shared data from multiple concurrent threads.
269
+ *
270
+ * Example:
271
+ *
272
+ * require 'thread'
273
+ * semaphore = Mutex.new
274
+ *
275
+ * a = Thread.new {
276
+ * semaphore.synchronize {
277
+ * # access shared resource
278
+ * }
279
+ * }
280
+ *
281
+ * b = Thread.new {
282
+ * semaphore.synchronize {
283
+ * # access shared resource
284
+ * }
285
+ * }
286
+ *
287
+ */
288
+
326
289
  typedef struct _Mutex {
327
- VALUE owner;
328
- List waiting;
290
+ VALUE owner;
291
+ List waiting;
329
292
  } Mutex;
330
293
 
331
- static void mark_mutex _((Mutex *));
332
-
333
294
  static void
334
- mark_mutex(mutex)
335
- Mutex *mutex;
295
+ mark_mutex(Mutex *mutex)
336
296
  {
337
- rb_gc_mark(mutex->owner);
338
- mark_list(&mutex->waiting);
297
+ rb_gc_mark(mutex->owner);
298
+ mark_list(&mutex->waiting);
339
299
  }
340
300
 
341
- static void finalize_mutex _((Mutex *));
342
-
343
301
  static void
344
- finalize_mutex(mutex)
345
- Mutex *mutex;
302
+ finalize_mutex(Mutex *mutex)
346
303
  {
347
- finalize_list(&mutex->waiting);
304
+ finalize_list(&mutex->waiting);
348
305
  }
349
306
 
350
- static void free_mutex _((Mutex *));
351
-
352
307
  static void
353
- free_mutex(mutex)
354
- Mutex *mutex;
308
+ free_mutex(Mutex *mutex)
355
309
  {
356
- assert_no_survivors(&mutex->waiting, "mutex", mutex);
357
- finalize_mutex(mutex);
358
- free(mutex);
310
+ assert_no_survivors(&mutex->waiting, "mutex", mutex);
311
+ finalize_mutex(mutex);
312
+ xfree(mutex);
359
313
  }
360
314
 
361
- static void init_mutex _((Mutex *));
362
-
363
315
  static void
364
- init_mutex(mutex)
365
- Mutex *mutex;
316
+ init_mutex(Mutex *mutex)
366
317
  {
367
- mutex->owner = Qnil;
368
- init_list(&mutex->waiting);
318
+ mutex->owner = Qnil;
319
+ init_list(&mutex->waiting);
369
320
  }
370
321
 
371
- static VALUE rb_mutex_alloc _((VALUE));
322
+ /*
323
+ * Document-method: new
324
+ * call-seq: Mutex.new
325
+ *
326
+ * Creates a new Mutex
327
+ *
328
+ */
372
329
 
373
330
  static VALUE
374
- rb_mutex_alloc(klass)
375
- VALUE klass;
331
+ rb_mutex_alloc(VALUE klass)
376
332
  {
377
- Mutex *mutex;
378
- mutex = (Mutex *)malloc(sizeof(Mutex));
379
- init_mutex(mutex);
380
- return Data_Wrap_Struct(klass, mark_mutex, free_mutex, mutex);
333
+ Mutex *mutex;
334
+ mutex = ALLOC(Mutex);
335
+ init_mutex(mutex);
336
+ return Data_Wrap_Struct(klass, mark_mutex, free_mutex, mutex);
381
337
  }
382
338
 
383
- static VALUE rb_mutex_locked_p _((VALUE));
339
+ /*
340
+ * Document-method: locked?
341
+ * call-seq: locked?
342
+ *
343
+ * Returns +true+ if this lock is currently held by some thread.
344
+ *
345
+ */
384
346
 
385
347
  static VALUE
386
- rb_mutex_locked_p(self)
387
- VALUE self;
348
+ rb_mutex_locked_p(VALUE self)
388
349
  {
389
- Mutex *mutex;
390
- Data_Get_Struct(self, Mutex, mutex);
391
- return ( RTEST(mutex->owner) ? Qtrue : Qfalse );
350
+ Mutex *mutex;
351
+ Data_Get_Struct(self, Mutex, mutex);
352
+ return RTEST(mutex->owner) ? Qtrue : Qfalse;
392
353
  }
393
354
 
394
- static VALUE rb_mutex_try_lock _((VALUE));
355
+ /*
356
+ * Document-method: try_lock
357
+ * call-seq: try_lock
358
+ *
359
+ * Attempts to obtain the lock and returns immediately. Returns +true+ if the
360
+ * lock was granted.
361
+ *
362
+ */
395
363
 
396
364
  static VALUE
397
- rb_mutex_try_lock(self)
398
- VALUE self;
365
+ rb_mutex_try_lock(VALUE self)
399
366
  {
400
- Mutex *mutex;
401
- VALUE result;
367
+ Mutex *mutex;
402
368
 
403
- Data_Get_Struct(self, Mutex, mutex);
369
+ Data_Get_Struct(self, Mutex, mutex);
404
370
 
405
- result = Qfalse;
371
+ if (RTEST(mutex->owner))
372
+ return Qfalse;
406
373
 
407
- rb_thread_critical = 1;
408
- if (!RTEST(mutex->owner)) {
409
374
  mutex->owner = rb_thread_current();
410
- result = Qtrue;
411
- }
412
- rb_thread_critical = 0;
413
-
414
- return result;
375
+ return Qtrue;
415
376
  }
416
377
 
417
- static void lock_mutex _((Mutex *));
378
+ /*
379
+ * Document-method: lock
380
+ * call-seq: lock
381
+ *
382
+ * Attempts to grab the lock and waits if it isn't available.
383
+ *
384
+ */
418
385
 
419
- static void
420
- lock_mutex(mutex)
421
- Mutex *mutex;
386
+ static VALUE
387
+ lock_mutex(Mutex *mutex)
422
388
  {
423
- VALUE current;
424
- current = rb_thread_current();
425
-
426
- rb_thread_critical = 1;
389
+ VALUE current;
390
+ current = rb_thread_current();
427
391
 
428
- while (RTEST(mutex->owner)) {
429
- wait_list(&mutex->waiting);
430
392
  rb_thread_critical = 1;
431
- }
432
- mutex->owner = current;
433
393
 
434
- rb_thread_critical = 0;
435
- }
394
+ while (RTEST(mutex->owner)) {
395
+ wait_list(&mutex->waiting);
396
+ rb_thread_critical = 1;
397
+ }
398
+ mutex->owner = current;
436
399
 
437
- static VALUE rb_mutex_lock _((VALUE));
400
+ rb_thread_critical = 0;
401
+ return Qnil;
402
+ }
438
403
 
439
404
  static VALUE
440
- rb_mutex_lock(self)
441
- VALUE self;
405
+ rb_mutex_lock(VALUE self)
442
406
  {
443
- Mutex *mutex;
444
- Data_Get_Struct(self, Mutex, mutex);
445
- lock_mutex(mutex);
446
- return self;
407
+ Mutex *mutex;
408
+ Data_Get_Struct(self, Mutex, mutex);
409
+ lock_mutex(mutex);
410
+ return self;
447
411
  }
448
412
 
449
- static VALUE unlock_mutex_inner _((Mutex *));
413
+ /*
414
+ * Document-method: unlock
415
+ *
416
+ * Releases the lock. Returns +nil+ if ref wasn't locked.
417
+ *
418
+ */
450
419
 
451
420
  static VALUE
452
- unlock_mutex_inner(mutex)
453
- Mutex *mutex;
421
+ unlock_mutex_inner(Mutex *mutex)
454
422
  {
455
- VALUE waking;
423
+ VALUE waking;
456
424
 
457
- if (!RTEST(mutex->owner)) {
458
- return Qundef;
459
- }
460
- mutex->owner = Qnil;
461
- waking = wake_one(&mutex->waiting);
425
+ if (!RTEST(mutex->owner)) {
426
+ return Qundef;
427
+ }
462
428
 
463
- return waking;
464
- }
429
+ mutex->owner = Qnil;
430
+ waking = wake_one(&mutex->waiting);
465
431
 
466
- static VALUE set_critical _((VALUE));
432
+ return waking;
433
+ }
467
434
 
468
435
  static VALUE
469
- set_critical(value)
470
- VALUE value;
436
+ set_critical(VALUE value)
471
437
  {
472
- rb_thread_critical = (int)value;
473
- return Qnil;
438
+ rb_thread_critical = (int)value;
439
+ return Qundef;
474
440
  }
475
441
 
476
- static VALUE unlock_mutex _((Mutex *));
477
-
478
442
  static VALUE
479
- unlock_mutex(mutex)
480
- Mutex *mutex;
443
+ unlock_mutex(Mutex *mutex)
481
444
  {
482
- VALUE waking;
445
+ VALUE waking;
483
446
 
484
- rb_thread_critical = 1;
485
- waking = rb_ensure(unlock_mutex_inner, (VALUE)mutex, set_critical, 0);
447
+ rb_thread_critical = 1;
448
+ waking = rb_ensure(unlock_mutex_inner, (VALUE)mutex, set_critical, 0);
486
449
 
487
- if ( waking == Qundef ) {
488
- return Qfalse;
489
- }
450
+ if (waking == Qundef) {
451
+ return Qfalse;
452
+ }
490
453
 
491
- if (RTEST(waking)) {
492
- run_thread(waking);
493
- }
454
+ if (RTEST(waking)) {
455
+ run_thread(waking);
456
+ }
494
457
 
495
- return Qtrue;
458
+ return Qtrue;
496
459
  }
497
460
 
498
- static VALUE rb_mutex_unlock _((VALUE));
499
-
500
461
  static VALUE
501
- rb_mutex_unlock(self)
502
- VALUE self;
462
+ rb_mutex_unlock(VALUE self)
503
463
  {
504
- Mutex *mutex;
505
- Data_Get_Struct(self, Mutex, mutex);
464
+ Mutex *mutex;
465
+ Data_Get_Struct(self, Mutex, mutex);
506
466
 
507
- if (RTEST(unlock_mutex(mutex))) {
508
- return self;
509
- } else {
510
- return Qnil;
511
- }
467
+ if (RTEST(unlock_mutex(mutex))) {
468
+ return self;
469
+ } else {
470
+ return Qnil;
471
+ }
512
472
  }
513
473
 
514
- static VALUE rb_mutex_exclusive_unlock_inner _((Mutex *));
474
+ /*
475
+ * Document-method: exclusive_unlock
476
+ * call-seq: exclusive_unlock { ... }
477
+ *
478
+ * If the mutex is locked, unlocks the mutex, wakes one waiting thread, and
479
+ * yields in a critical section.
480
+ *
481
+ */
515
482
 
516
483
  static VALUE
517
- rb_mutex_exclusive_unlock_inner(mutex)
518
- Mutex *mutex;
484
+ rb_mutex_exclusive_unlock_inner(Mutex *mutex)
519
485
  {
520
- VALUE waking;
521
- waking = unlock_mutex_inner(mutex);
522
- rb_yield(Qundef);
523
- return waking;
486
+ VALUE waking;
487
+ waking = unlock_mutex_inner(mutex);
488
+ rb_yield(Qundef);
489
+ return waking;
524
490
  }
525
491
 
526
- static VALUE rb_mutex_exclusive_unlock _((VALUE));
527
-
528
492
  static VALUE
529
- rb_mutex_exclusive_unlock(self)
530
- VALUE self;
493
+ rb_mutex_exclusive_unlock(VALUE self)
531
494
  {
532
- Mutex *mutex;
533
- VALUE waking;
534
- Data_Get_Struct(self, Mutex, mutex);
495
+ Mutex *mutex;
496
+ VALUE waking;
497
+ Data_Get_Struct(self, Mutex, mutex);
535
498
 
536
- rb_thread_critical = 1;
537
- waking = rb_ensure(rb_mutex_exclusive_unlock_inner, (VALUE)mutex, set_critical, 0);
499
+ rb_thread_critical = 1;
500
+ waking = rb_ensure(rb_mutex_exclusive_unlock_inner, (VALUE)mutex, set_critical, 0);
538
501
 
539
- if ( waking == Qundef ) {
540
- return Qnil;
541
- }
502
+ if (waking == Qundef) {
503
+ return Qnil;
504
+ }
542
505
 
543
- if (RTEST(waking)) {
544
- run_thread(waking);
545
- }
506
+ if (RTEST(waking)) {
507
+ run_thread(waking);
508
+ }
546
509
 
547
- return self;
510
+ return self;
548
511
  }
549
512
 
550
- static VALUE rb_mutex_synchronize _((VALUE));
513
+ /*
514
+ * Document-method: synchronize
515
+ * call-seq: synchronize { ... }
516
+ *
517
+ * Obtains a lock, runs the block, and releases the lock when the block
518
+ * completes. See the example under Mutex.
519
+ *
520
+ */
551
521
 
552
522
  static VALUE
553
- rb_mutex_synchronize(self)
554
- VALUE self;
523
+ rb_mutex_synchronize(VALUE self)
555
524
  {
556
- rb_mutex_lock(self);
557
- return rb_ensure(rb_yield, Qundef, rb_mutex_unlock, self);
525
+ rb_mutex_lock(self);
526
+ return rb_ensure(rb_yield, Qundef, rb_mutex_unlock, self);
558
527
  }
559
528
 
529
+ /*
530
+ * Document-class: ConditionVariable
531
+ *
532
+ * ConditionVariable objects augment class Mutex. Using condition variables,
533
+ * it is possible to suspend while in the middle of a critical section until a
534
+ * resource becomes available.
535
+ *
536
+ * Example:
537
+ *
538
+ * require 'thread'
539
+ *
540
+ * mutex = Mutex.new
541
+ * resource = ConditionVariable.new
542
+ *
543
+ * a = Thread.new {
544
+ * mutex.synchronize {
545
+ * # Thread 'a' now needs the resource
546
+ * resource.wait(mutex)
547
+ * # 'a' can now have the resource
548
+ * }
549
+ * }
550
+ *
551
+ * b = Thread.new {
552
+ * mutex.synchronize {
553
+ * # Thread 'b' has finished using the resource
554
+ * resource.signal
555
+ * }
556
+ * }
557
+ *
558
+ */
559
+
560
560
  typedef struct _ConditionVariable {
561
- List waiting;
561
+ List waiting;
562
562
  } ConditionVariable;
563
563
 
564
- static void mark_condvar _((ConditionVariable *));
565
-
566
564
  static void
567
- mark_condvar(condvar)
568
- ConditionVariable *condvar;
565
+ mark_condvar(ConditionVariable *condvar)
569
566
  {
570
- mark_list(&condvar->waiting);
567
+ mark_list(&condvar->waiting);
571
568
  }
572
569
 
573
- static void finalize_condvar _((ConditionVariable *));
574
-
575
570
  static void
576
- finalize_condvar(condvar)
577
- ConditionVariable *condvar;
571
+ finalize_condvar(ConditionVariable *condvar)
578
572
  {
579
- finalize_list(&condvar->waiting);
573
+ finalize_list(&condvar->waiting);
580
574
  }
581
575
 
582
- static void free_condvar _((ConditionVariable *));
583
-
584
576
  static void
585
- free_condvar(condvar)
586
- ConditionVariable *condvar;
577
+ free_condvar(ConditionVariable *condvar)
587
578
  {
588
- assert_no_survivors(&condvar->waiting, "condition variable", condvar);
589
- finalize_condvar(condvar);
590
- free(condvar);
579
+ assert_no_survivors(&condvar->waiting, "condition variable", condvar);
580
+ finalize_condvar(condvar);
581
+ xfree(condvar);
591
582
  }
592
583
 
593
- static void init_condvar _((ConditionVariable *));
594
-
595
584
  static void
596
- init_condvar(condvar)
597
- ConditionVariable *condvar;
585
+ init_condvar(ConditionVariable *condvar)
598
586
  {
599
- init_list(&condvar->waiting);
587
+ init_list(&condvar->waiting);
600
588
  }
601
589
 
602
- static VALUE rb_condvar_alloc _((VALUE));
590
+ /*
591
+ * Document-method: new
592
+ * call-seq: ConditionVariable.new
593
+ *
594
+ * Creates a new ConditionVariable
595
+ *
596
+ */
603
597
 
604
598
  static VALUE
605
- rb_condvar_alloc(klass)
606
- VALUE klass;
599
+ rb_condvar_alloc(VALUE klass)
607
600
  {
608
- ConditionVariable *condvar;
601
+ ConditionVariable *condvar;
609
602
 
610
- condvar = (ConditionVariable *)malloc(sizeof(ConditionVariable));
611
- init_condvar(condvar);
603
+ condvar = ALLOC(ConditionVariable);
604
+ init_condvar(condvar);
612
605
 
613
- return Data_Wrap_Struct(klass, mark_condvar, free_condvar, condvar);
606
+ return Data_Wrap_Struct(klass, mark_condvar, free_condvar, condvar);
614
607
  }
615
608
 
616
- static void wait_condvar _((ConditionVariable *, Mutex *));
609
+ /*
610
+ * Document-method: wait
611
+ * call-seq: wait
612
+ *
613
+ * Releases the lock held in +mutex+ and waits; reacquires the lock on wakeup.
614
+ *
615
+ */
617
616
 
618
617
  static void
619
- wait_condvar(condvar, mutex)
620
- ConditionVariable *condvar;
621
- Mutex *mutex;
618
+ wait_condvar(ConditionVariable *condvar, Mutex *mutex)
622
619
  {
623
- rb_thread_critical = 1;
624
- if (!RTEST(mutex->owner)) {
625
- rb_thread_critical = Qfalse;
626
- return;
627
- }
628
- if ( mutex->owner != rb_thread_current() ) {
629
- rb_thread_critical = Qfalse;
630
- rb_raise(private_eThreadError, "Not owner");
631
- }
632
- mutex->owner = Qnil;
633
- wait_list(&condvar->waiting);
634
-
635
- lock_mutex(mutex);
620
+ rb_thread_critical = 1;
621
+ if (rb_thread_current() != mutex->owner) {
622
+ rb_thread_critical = 0;
623
+ rb_raise(private_eThreadError, "not owner of the synchronization mutex");
624
+ }
625
+ unlock_mutex_inner(mutex);
626
+ rb_ensure(wait_list, (VALUE)&condvar->waiting, lock_mutex, (VALUE)mutex);
636
627
  }
637
628
 
638
- static VALUE legacy_exclusive_unlock _((VALUE));
639
-
640
629
  static VALUE
641
- legacy_exclusive_unlock(mutex)
642
- VALUE mutex;
630
+ legacy_exclusive_unlock(VALUE mutex)
643
631
  {
644
- return rb_funcall(mutex, rb_intern("exclusive_unlock"), 0);
632
+ return rb_funcall(mutex, rb_intern("exclusive_unlock"), 0);
645
633
  }
646
634
 
647
635
  typedef struct {
648
- ConditionVariable *condvar;
649
- VALUE mutex;
636
+ ConditionVariable *condvar;
637
+ VALUE mutex;
650
638
  } legacy_wait_args;
651
639
 
652
- static VALUE legacy_wait _((VALUE, legacy_wait_args *));
653
-
654
640
  static VALUE
655
- legacy_wait(unused, args)
656
- VALUE unused;
657
- legacy_wait_args *args;
641
+ legacy_wait(VALUE unused, legacy_wait_args *args)
658
642
  {
659
- wait_list(&args->condvar->waiting);
660
- rb_funcall(args->mutex, rb_intern("lock"), 0);
661
- return Qnil;
643
+ wait_list(&args->condvar->waiting);
644
+ rb_funcall(args->mutex, rb_intern("lock"), 0);
645
+ return Qnil;
662
646
  }
663
647
 
664
- static VALUE rb_condvar_wait _((VALUE, VALUE));
665
-
666
648
  static VALUE
667
- rb_condvar_wait(self, mutex_v)
668
- VALUE self;
669
- VALUE mutex_v;
670
- {
671
- ConditionVariable *condvar;
672
- Data_Get_Struct(self, ConditionVariable, condvar);
673
-
674
- if ( CLASS_OF(mutex_v) != rb_cMutex ) {
675
- /* interoperate with legacy mutex */
676
- legacy_wait_args args;
677
- args.condvar = condvar;
678
- args.mutex = mutex_v;
679
- rb_iterate(legacy_exclusive_unlock, mutex_v, legacy_wait, (VALUE)&args);
680
- } else {
681
- Mutex *mutex;
682
- Data_Get_Struct(mutex_v, Mutex, mutex);
683
- wait_condvar(condvar, mutex);
684
- }
649
+ rb_condvar_wait(VALUE self, VALUE mutex_v)
650
+ {
651
+ ConditionVariable *condvar;
652
+ Data_Get_Struct(self, ConditionVariable, condvar);
653
+
654
+ if (CLASS_OF(mutex_v) != rb_cMutex) {
655
+ /* interoperate with legacy mutex */
656
+ legacy_wait_args args;
657
+ args.condvar = condvar;
658
+ args.mutex = mutex_v;
659
+ rb_iterate(legacy_exclusive_unlock, mutex_v, legacy_wait, (VALUE)&args);
660
+ } else {
661
+ Mutex *mutex;
662
+ Data_Get_Struct(mutex_v, Mutex, mutex);
663
+ wait_condvar(condvar, mutex);
664
+ }
685
665
 
686
- return self;
666
+ return self;
687
667
  }
688
668
 
689
- static VALUE rb_condvar_broadcast _((VALUE));
669
+ /*
670
+ * Document-method: broadcast
671
+ * call-seq: broadcast
672
+ *
673
+ * Wakes up all threads waiting for this condition.
674
+ *
675
+ */
690
676
 
691
677
  static VALUE
692
- rb_condvar_broadcast(self)
693
- VALUE self;
678
+ rb_condvar_broadcast(VALUE self)
694
679
  {
695
- ConditionVariable *condvar;
680
+ ConditionVariable *condvar;
696
681
 
697
- Data_Get_Struct(self, ConditionVariable, condvar);
682
+ Data_Get_Struct(self, ConditionVariable, condvar);
698
683
 
699
- rb_thread_critical = 1;
700
- rb_ensure(wake_all, (VALUE)&condvar->waiting, set_critical, 0);
701
- rb_thread_schedule();
684
+ rb_thread_critical = 1;
685
+ rb_ensure(wake_all, (VALUE)&condvar->waiting, set_critical, 0);
686
+ rb_thread_schedule();
702
687
 
703
- return self;
688
+ return self;
704
689
  }
705
690
 
706
- static void signal_condvar _((ConditionVariable *condvar));
691
+ /*
692
+ * Document-method: signal
693
+ * call-seq: signal
694
+ *
695
+ * Wakes up the first thread in line waiting for this condition.
696
+ *
697
+ */
707
698
 
708
699
  static void
709
- signal_condvar(condvar)
710
- ConditionVariable *condvar;
700
+ signal_condvar(ConditionVariable *condvar)
711
701
  {
712
- VALUE waking;
713
- rb_thread_critical = 1;
714
- waking = rb_ensure(wake_one, (VALUE)&condvar->waiting, set_critical, 0);
715
- if (RTEST(waking)) {
716
- run_thread(waking);
717
- }
702
+ VALUE waking;
703
+ rb_thread_critical = 1;
704
+ waking = rb_ensure(wake_one, (VALUE)&condvar->waiting, set_critical, 0);
705
+ if (RTEST(waking)) {
706
+ run_thread(waking);
707
+ }
718
708
  }
719
709
 
720
- static VALUE rb_condvar_signal _((VALUE));
721
-
722
710
  static VALUE
723
- rb_condvar_signal(self)
724
- VALUE self;
711
+ rb_condvar_signal(VALUE self)
725
712
  {
726
- ConditionVariable *condvar;
727
- Data_Get_Struct(self, ConditionVariable, condvar);
728
- signal_condvar(condvar);
729
- return self;
713
+ ConditionVariable *condvar;
714
+ Data_Get_Struct(self, ConditionVariable, condvar);
715
+ signal_condvar(condvar);
716
+ return self;
730
717
  }
731
718
 
719
+ /*
720
+ * Document-class: Queue
721
+ *
722
+ * This class provides a way to synchronize communication between threads.
723
+ *
724
+ * Example:
725
+ *
726
+ * require 'thread'
727
+ *
728
+ * queue = Queue.new
729
+ *
730
+ * producer = Thread.new do
731
+ * 5.times do |i|
732
+ * sleep rand(i) # simulate expense
733
+ * queue << i
734
+ * puts "#{i} produced"
735
+ * end
736
+ * end
737
+ *
738
+ * consumer = Thread.new do
739
+ * 5.times do |i|
740
+ * value = queue.pop
741
+ * sleep rand(i/2) # simulate expense
742
+ * puts "consumed #{value}"
743
+ * end
744
+ * end
745
+ *
746
+ * consumer.join
747
+ *
748
+ */
749
+
732
750
  typedef struct _Queue {
733
- Mutex mutex;
734
- ConditionVariable value_available;
735
- ConditionVariable space_available;
736
- List values;
737
- unsigned long capacity;
751
+ Mutex mutex;
752
+ ConditionVariable value_available;
753
+ ConditionVariable space_available;
754
+ List values;
755
+ unsigned long capacity;
738
756
  } Queue;
739
757
 
740
- static void mark_queue _((Queue *));
741
-
742
758
  static void
743
- mark_queue(queue)
744
- Queue *queue;
759
+ mark_queue(Queue *queue)
745
760
  {
746
- mark_mutex(&queue->mutex);
747
- mark_condvar(&queue->value_available);
748
- mark_condvar(&queue->space_available);
749
- mark_list(&queue->values);
761
+ mark_mutex(&queue->mutex);
762
+ mark_condvar(&queue->value_available);
763
+ mark_condvar(&queue->space_available);
764
+ mark_list(&queue->values);
750
765
  }
751
766
 
752
- static void finalize_queue _((Queue *));
753
-
754
767
  static void
755
- finalize_queue(queue)
756
- Queue *queue;
768
+ finalize_queue(Queue *queue)
757
769
  {
758
- finalize_mutex(&queue->mutex);
759
- finalize_condvar(&queue->value_available);
760
- finalize_condvar(&queue->space_available);
761
- finalize_list(&queue->values);
770
+ finalize_mutex(&queue->mutex);
771
+ finalize_condvar(&queue->value_available);
772
+ finalize_condvar(&queue->space_available);
773
+ finalize_list(&queue->values);
762
774
  }
763
775
 
764
- static void free_queue _((Queue *));
765
-
766
776
  static void
767
- free_queue(queue)
768
- Queue *queue;
777
+ free_queue(Queue *queue)
769
778
  {
770
- assert_no_survivors(&queue->mutex.waiting, "queue", queue);
771
- assert_no_survivors(&queue->space_available.waiting, "queue", queue);
772
- assert_no_survivors(&queue->value_available.waiting, "queue", queue);
773
- finalize_queue(queue);
774
- free(queue);
779
+ assert_no_survivors(&queue->mutex.waiting, "queue", queue);
780
+ assert_no_survivors(&queue->space_available.waiting, "queue", queue);
781
+ assert_no_survivors(&queue->value_available.waiting, "queue", queue);
782
+ finalize_queue(queue);
783
+ xfree(queue);
775
784
  }
776
785
 
777
- static void init_queue _((Queue *));
778
-
779
786
  static void
780
- init_queue(queue)
781
- Queue *queue;
787
+ init_queue(Queue *queue)
782
788
  {
783
- init_mutex(&queue->mutex);
784
- init_condvar(&queue->value_available);
785
- init_condvar(&queue->space_available);
786
- init_list(&queue->values);
787
- queue->capacity = 0;
789
+ init_mutex(&queue->mutex);
790
+ init_condvar(&queue->value_available);
791
+ init_condvar(&queue->space_available);
792
+ init_list(&queue->values);
793
+ queue->capacity = 0;
788
794
  }
789
795
 
790
- static VALUE rb_queue_alloc _((VALUE));
796
+ /*
797
+ * Document-method: new
798
+ * call-seq: new
799
+ *
800
+ * Creates a new queue.
801
+ *
802
+ */
791
803
 
792
804
  static VALUE
793
- rb_queue_alloc(klass)
794
- VALUE klass;
805
+ rb_queue_alloc(VALUE klass)
795
806
  {
796
- Queue *queue;
797
- queue = (Queue *)malloc(sizeof(Queue));
798
- init_queue(queue);
799
- return Data_Wrap_Struct(klass, mark_queue, free_queue, queue);
807
+ Queue *queue;
808
+ queue = ALLOC(Queue);
809
+ init_queue(queue);
810
+ return Data_Wrap_Struct(klass, mark_queue, free_queue, queue);
800
811
  }
801
812
 
802
- static VALUE rb_queue_marshal_load _((VALUE, VALUE));
803
-
804
813
  static VALUE
805
- rb_queue_marshal_load(self, data)
806
- VALUE self;
807
- VALUE data;
814
+ rb_queue_marshal_load(VALUE self, VALUE data)
808
815
  {
809
- Queue *queue;
810
- VALUE array;
811
- Data_Get_Struct(self, Queue, queue);
816
+ Queue *queue;
817
+ VALUE array;
818
+ Data_Get_Struct(self, Queue, queue);
812
819
 
813
- array = rb_marshal_load(data);
814
- if ( TYPE(array) != T_ARRAY ) {
815
- rb_raise(rb_eRuntimeError, "expected Array of queue data");
816
- }
817
- if ( RARRAY(array)->len < 1 ) {
818
- rb_raise(rb_eRuntimeError, "missing capacity value");
819
- }
820
- queue->capacity = NUM2ULONG(rb_ary_shift(array));
821
- push_multiple_list(&queue->values, RARRAY(array)->ptr, (unsigned)RARRAY(array)->len);
820
+ array = rb_marshal_load(data);
821
+ if (TYPE(array) != T_ARRAY) {
822
+ rb_raise(rb_eRuntimeError, "expected Array of queue data");
823
+ }
824
+ if (RARRAY(array)->len < 1) {
825
+ rb_raise(rb_eRuntimeError, "missing capacity value");
826
+ }
827
+ queue->capacity = NUM2ULONG(rb_ary_shift(array));
828
+ push_multiple_list(&queue->values, RARRAY(array)->ptr, (unsigned)RARRAY(array)->len);
822
829
 
823
- return self;
830
+ return self;
824
831
  }
825
832
 
826
- static VALUE rb_queue_marshal_dump _((VALUE));
827
-
828
833
  static VALUE
829
- rb_queue_marshal_dump(self)
830
- VALUE self;
834
+ rb_queue_marshal_dump(VALUE self)
831
835
  {
832
- Queue *queue;
833
- VALUE array;
834
- Data_Get_Struct(self, Queue, queue);
836
+ Queue *queue;
837
+ VALUE array;
838
+ Data_Get_Struct(self, Queue, queue);
835
839
 
836
- array = array_from_list(&queue->values);
837
- rb_ary_unshift(array, ULONG2NUM(queue->capacity));
838
- return rb_marshal_dump(array, Qnil);
840
+ array = array_from_list(&queue->values);
841
+ rb_ary_unshift(array, ULONG2NUM(queue->capacity));
842
+ return rb_marshal_dump(array, Qnil);
839
843
  }
840
844
 
841
- static VALUE rb_queue_clear _((VALUE));
845
+ /*
846
+ * Document-method: clear
847
+ * call-seq: clear
848
+ *
849
+ * Removes all objects from the queue.
850
+ *
851
+ */
842
852
 
843
853
  static VALUE
844
- rb_queue_clear(self)
845
- VALUE self;
854
+ rb_queue_clear(VALUE self)
846
855
  {
847
- Queue *queue;
848
- Data_Get_Struct(self, Queue, queue);
856
+ Queue *queue;
857
+ Data_Get_Struct(self, Queue, queue);
849
858
 
850
- lock_mutex(&queue->mutex);
851
- clear_list(&queue->values);
852
- signal_condvar(&queue->space_available);
853
- unlock_mutex(&queue->mutex);
859
+ lock_mutex(&queue->mutex);
860
+ clear_list(&queue->values);
861
+ signal_condvar(&queue->space_available);
862
+ unlock_mutex(&queue->mutex);
854
863
 
855
- return self;
864
+ return self;
856
865
  }
857
866
 
858
- static VALUE rb_queue_empty_p _((VALUE));
867
+ /*
868
+ * Document-method: empty?
869
+ * call-seq: empty?
870
+ *
871
+ * Returns +true+ if the queue is empty.
872
+ *
873
+ */
859
874
 
860
875
  static VALUE
861
- rb_queue_empty_p(self)
862
- VALUE self;
876
+ rb_queue_empty_p(VALUE self)
863
877
  {
864
- Queue *queue;
865
- VALUE result;
866
- Data_Get_Struct(self, Queue, queue);
878
+ Queue *queue;
879
+ VALUE result;
880
+ Data_Get_Struct(self, Queue, queue);
867
881
 
868
- lock_mutex(&queue->mutex);
869
- result = ( ( queue->values.size == 0 ) ? Qtrue : Qfalse );
870
- unlock_mutex(&queue->mutex);
882
+ lock_mutex(&queue->mutex);
883
+ result = queue->values.size == 0 ? Qtrue : Qfalse;
884
+ unlock_mutex(&queue->mutex);
871
885
 
872
- return result;
886
+ return result;
873
887
  }
874
888
 
875
- static VALUE rb_queue_length _((VALUE));
889
+ /*
890
+ * Document-method: length
891
+ * call-seq: length
892
+ *
893
+ * Returns the length of the queue.
894
+ *
895
+ */
876
896
 
877
897
  static VALUE
878
- rb_queue_length(self)
879
- VALUE self;
898
+ rb_queue_length(VALUE self)
880
899
  {
881
- Queue *queue;
882
- VALUE result;
883
- Data_Get_Struct(self, Queue, queue);
900
+ Queue *queue;
901
+ VALUE result;
902
+ Data_Get_Struct(self, Queue, queue);
884
903
 
885
- lock_mutex(&queue->mutex);
886
- result = ULONG2NUM(queue->values.size);
887
- unlock_mutex(&queue->mutex);
904
+ lock_mutex(&queue->mutex);
905
+ result = ULONG2NUM(queue->values.size);
906
+ unlock_mutex(&queue->mutex);
888
907
 
889
- return result;
908
+ return result;
890
909
  }
891
910
 
892
- static VALUE rb_queue_num_waiting _((VALUE));
911
+ /*
912
+ * Document-method: num_waiting
913
+ * call-seq: num_waiting
914
+ *
915
+ * Returns the number of threads waiting on the queue.
916
+ *
917
+ */
893
918
 
894
919
  static VALUE
895
- rb_queue_num_waiting(self)
896
- VALUE self;
920
+ rb_queue_num_waiting(VALUE self)
897
921
  {
898
- Queue *queue;
899
- VALUE result;
900
- Data_Get_Struct(self, Queue, queue);
922
+ Queue *queue;
923
+ VALUE result;
924
+ Data_Get_Struct(self, Queue, queue);
901
925
 
902
- lock_mutex(&queue->mutex);
903
- result = ULONG2NUM(queue->value_available.waiting.size +
904
- queue->space_available.waiting.size);
905
- unlock_mutex(&queue->mutex);
926
+ lock_mutex(&queue->mutex);
927
+ result = ULONG2NUM(queue->value_available.waiting.size +
928
+ queue->space_available.waiting.size);
929
+ unlock_mutex(&queue->mutex);
906
930
 
907
- return result;
931
+ return result;
908
932
  }
909
933
 
910
- static VALUE rb_queue_pop _((int, VALUE *, VALUE));
934
+ /*
935
+ * Document-method: pop
936
+ * call_seq: pop(non_block=false)
937
+ *
938
+ * Retrieves data from the queue. If the queue is empty, the calling thread is
939
+ * suspended until data is pushed onto the queue. If +non_block+ is true, the
940
+ * thread isn't suspended, and an exception is raised.
941
+ *
942
+ */
911
943
 
912
944
  static VALUE
913
- rb_queue_pop(argc, argv, self)
914
- int argc;
915
- VALUE *argv;
916
- VALUE self;
917
- {
918
- Queue *queue;
919
- int should_block;
920
- VALUE result;
921
- Data_Get_Struct(self, Queue, queue);
922
-
923
- if ( argc == 0 ) {
924
- should_block = 1;
925
- } else if ( argc == 1 ) {
926
- should_block = !RTEST(argv[0]);
927
- } else {
928
- rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", argc);
929
- }
930
-
931
- lock_mutex(&queue->mutex);
932
- if ( !queue->values.entries && !should_block ) {
933
- unlock_mutex(&queue->mutex);
934
- rb_raise(private_eThreadError, "queue empty");
935
- }
945
+ rb_queue_pop(int argc, VALUE *argv, VALUE self)
946
+ {
947
+ Queue *queue;
948
+ int should_block;
949
+ VALUE result;
950
+ Data_Get_Struct(self, Queue, queue);
951
+
952
+ if (argc == 0) {
953
+ should_block = 1;
954
+ } else if (argc == 1) {
955
+ should_block = !RTEST(argv[0]);
956
+ } else {
957
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", argc);
958
+ }
936
959
 
937
- while (!queue->values.entries) {
938
- wait_condvar(&queue->value_available, &queue->mutex);
939
- }
960
+ lock_mutex(&queue->mutex);
961
+ if (!queue->values.entries && !should_block) {
962
+ unlock_mutex(&queue->mutex);
963
+ rb_raise(private_eThreadError, "queue empty");
964
+ }
940
965
 
941
- result = shift_list(&queue->values);
942
- if ( queue->capacity && queue->values.size < queue->capacity ) {
943
- signal_condvar(&queue->space_available);
944
- }
945
- unlock_mutex(&queue->mutex);
966
+ while (!queue->values.entries) {
967
+ wait_condvar(&queue->value_available, &queue->mutex);
968
+ }
946
969
 
947
- return result;
970
+ result = shift_list(&queue->values);
971
+ if (queue->capacity && queue->values.size < queue->capacity) {
972
+ signal_condvar(&queue->space_available);
973
+ }
974
+ unlock_mutex(&queue->mutex);
975
+
976
+ return result;
948
977
  }
949
978
 
950
- static VALUE rb_queue_push _((VALUE, VALUE));
979
+ /*
980
+ * Document-method: push
981
+ * call-seq: push(obj)
982
+ *
983
+ * Pushes +obj+ to the queue.
984
+ *
985
+ */
951
986
 
952
987
  static VALUE
953
- rb_queue_push(self, value)
954
- VALUE self;
955
- VALUE value;
988
+ rb_queue_push(VALUE self, VALUE value)
956
989
  {
957
- Queue *queue;
958
- Data_Get_Struct(self, Queue, queue);
990
+ Queue *queue;
991
+ Data_Get_Struct(self, Queue, queue);
959
992
 
960
- lock_mutex(&queue->mutex);
961
- while ( queue->capacity && queue->values.size >= queue->capacity ) {
962
- wait_condvar(&queue->space_available, &queue->mutex);
963
- }
964
- push_list(&queue->values, value);
965
- signal_condvar(&queue->value_available);
966
- unlock_mutex(&queue->mutex);
993
+ lock_mutex(&queue->mutex);
994
+ while (queue->capacity && queue->values.size >= queue->capacity) {
995
+ wait_condvar(&queue->space_available, &queue->mutex);
996
+ }
997
+ push_list(&queue->values, value);
998
+ signal_condvar(&queue->value_available);
999
+ unlock_mutex(&queue->mutex);
967
1000
 
968
- return self;
1001
+ return self;
969
1002
  }
970
1003
 
971
- static VALUE rb_sized_queue_max _((VALUE));
1004
+ /*
1005
+ * Document-class: SizedQueue
1006
+ *
1007
+ * This class represents queues of specified size capacity. The push operation
1008
+ * may be blocked if the capacity is full.
1009
+ *
1010
+ * See Queue for an example of how a SizedQueue works.
1011
+ *
1012
+ */
1013
+
1014
+ /*
1015
+ * Document-method: new
1016
+ * call-seq: new
1017
+ *
1018
+ * Creates a fixed-length queue with a maximum size of +max+.
1019
+ *
1020
+ */
1021
+
1022
+ /*
1023
+ * Document-method: max
1024
+ * call-seq: max
1025
+ *
1026
+ * Returns the maximum size of the queue.
1027
+ *
1028
+ */
972
1029
 
973
1030
  static VALUE
974
- rb_sized_queue_max(self)
975
- VALUE self;
1031
+ rb_sized_queue_max(VALUE self)
976
1032
  {
977
- Queue *queue;
978
- VALUE result;
979
- Data_Get_Struct(self, Queue, queue);
1033
+ Queue *queue;
1034
+ VALUE result;
1035
+ Data_Get_Struct(self, Queue, queue);
980
1036
 
981
- lock_mutex(&queue->mutex);
982
- result = ULONG2NUM(queue->capacity);
983
- unlock_mutex(&queue->mutex);
1037
+ lock_mutex(&queue->mutex);
1038
+ result = ULONG2NUM(queue->capacity);
1039
+ unlock_mutex(&queue->mutex);
984
1040
 
985
- return result;
1041
+ return result;
986
1042
  }
987
1043
 
988
- static VALUE rb_sized_queue_max_set _((VALUE, VALUE));
1044
+ /*
1045
+ * Document-method: max=
1046
+ * call-seq: max=(size)
1047
+ *
1048
+ * Sets the maximum size of the queue.
1049
+ *
1050
+ */
989
1051
 
990
1052
  static VALUE
991
- rb_sized_queue_max_set(self, value)
992
- VALUE self;
993
- VALUE value;
994
- {
995
- Queue *queue;
996
- unsigned long new_capacity;
997
- unsigned long difference;
998
- Data_Get_Struct(self, Queue, queue);
999
-
1000
- new_capacity = NUM2ULONG(value);
1001
-
1002
- if ( new_capacity < 1 ) {
1003
- rb_raise(rb_eArgError, "value must be positive");
1004
- }
1005
-
1006
- lock_mutex(&queue->mutex);
1007
- if ( queue->capacity && new_capacity > queue->capacity ) {
1008
- difference = new_capacity - queue->capacity;
1009
- } else {
1010
- difference = 0;
1011
- }
1012
- queue->capacity = new_capacity;
1013
- for ( ; difference > 0 ; --difference ) {
1014
- signal_condvar(&queue->space_available);
1015
- }
1016
- unlock_mutex(&queue->mutex);
1053
+ rb_sized_queue_max_set(VALUE self, VALUE value)
1054
+ {
1055
+ Queue *queue;
1056
+ unsigned long new_capacity;
1057
+ unsigned long difference;
1058
+ Data_Get_Struct(self, Queue, queue);
1059
+
1060
+ new_capacity = NUM2ULONG(value);
1017
1061
 
1018
- return self;
1062
+ if (new_capacity < 1) {
1063
+ rb_raise(rb_eArgError, "value must be positive");
1064
+ }
1065
+
1066
+ lock_mutex(&queue->mutex);
1067
+ if (queue->capacity && new_capacity > queue->capacity) {
1068
+ difference = new_capacity - queue->capacity;
1069
+ } else {
1070
+ difference = 0;
1071
+ }
1072
+ queue->capacity = new_capacity;
1073
+ for (; difference > 0; --difference) {
1074
+ signal_condvar(&queue->space_available);
1075
+ }
1076
+ unlock_mutex(&queue->mutex);
1077
+
1078
+ return self;
1019
1079
  }
1020
1080
 
1021
- /* Existing code expects to be able to serialize Mutexes... */
1081
+ /*
1082
+ * Document-method: push
1083
+ * call-seq: push(obj)
1084
+ *
1085
+ * Pushes +obj+ to the queue. If there is no space left in the queue, waits
1086
+ * until space becomes available.
1087
+ *
1088
+ */
1022
1089
 
1023
- static VALUE dummy_load _((VALUE, VALUE));
1090
+ /*
1091
+ * Document-method: pop
1092
+ * call-seq: pop(non_block=false)
1093
+ *
1094
+ * Retrieves data from the queue and runs a waiting thread, if any.
1095
+ *
1096
+ */
1097
+
1098
+ /* for marshalling mutexes and condvars */
1024
1099
 
1025
1100
  static VALUE
1026
- dummy_load(self, string)
1027
- VALUE self;
1028
- VALUE string;
1101
+ dummy_load(VALUE self, VALUE string)
1029
1102
  {
1030
- return Qnil;
1103
+ return Qnil;
1104
+ }
1105
+
1106
+ static VALUE
1107
+ dummy_dump(VALUE self)
1108
+ {
1109
+ return rb_str_new2("");
1031
1110
  }
1032
1111
 
1033
- static VALUE dummy_dump _((VALUE));
1034
1112
 
1035
1113
  static VALUE
1036
- dummy_dump(self)
1037
- VALUE self;
1038
- {
1039
- return rb_str_new2("");
1040
- }
1041
-
1042
- static VALUE setup_classes _((VALUE));
1043
-
1044
- static VALUE setup_classes(unused)
1045
- VALUE unused;
1046
- {
1047
- rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("Mutex")));
1048
- rb_cMutex = rb_define_class("Mutex", rb_cObject);
1049
- rb_define_alloc_func(rb_cMutex, rb_mutex_alloc);
1050
- rb_define_method(rb_cMutex, "marshal_load", dummy_load, 1);
1051
- rb_define_method(rb_cMutex, "marshal_dump", dummy_dump, 0);
1052
- rb_define_method(rb_cMutex, "initialize", return_value, 0);
1053
- rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
1054
- rb_define_method(rb_cMutex, "try_lock", rb_mutex_try_lock, 0);
1055
- rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
1056
- rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
1057
- rb_define_method(rb_cMutex, "exclusive_unlock", rb_mutex_exclusive_unlock, 0);
1058
- rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize, 0);
1059
-
1060
- rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("ConditionVariable")));
1061
- rb_cConditionVariable = rb_define_class("ConditionVariable", rb_cObject);
1062
- rb_define_alloc_func(rb_cConditionVariable, rb_condvar_alloc);
1063
- rb_define_method(rb_cConditionVariable, "marshal_load", dummy_load, 1);
1064
- rb_define_method(rb_cConditionVariable, "marshal_dump", dummy_dump, 0);
1065
- rb_define_method(rb_cConditionVariable, "initialize", return_value, 0);
1066
- rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, 1);
1067
- rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
1068
- rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
1069
-
1070
- rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("Queue")));
1071
- rb_cQueue = rb_define_class("Queue", rb_cObject);
1072
- rb_define_alloc_func(rb_cQueue, rb_queue_alloc);
1073
- rb_define_method(rb_cQueue, "marshal_load", rb_queue_marshal_load, 1);
1074
- rb_define_method(rb_cQueue, "marshal_dump", rb_queue_marshal_dump, 0);
1075
- rb_define_method(rb_cQueue, "initialize", return_value, 0);
1076
- rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
1077
- rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
1078
- rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
1079
- rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
1080
- rb_define_method(rb_cQueue, "pop", rb_queue_pop, -1);
1081
- rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
1082
- rb_alias(rb_cQueue, rb_intern("<<"), rb_intern("push"));
1083
- rb_alias(rb_cQueue, rb_intern("deq"), rb_intern("pop"));
1084
- rb_alias(rb_cQueue, rb_intern("shift"), rb_intern("pop"));
1085
- rb_alias(rb_cQueue, rb_intern("size"), rb_intern("length"));
1086
-
1087
- rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("SizedQueue")));
1088
- rb_cSizedQueue = rb_define_class("SizedQueue", rb_cQueue);
1089
- rb_define_method(rb_cSizedQueue, "initialize", rb_sized_queue_max_set, 1);
1090
- rb_define_method(rb_cSizedQueue, "clear", rb_queue_clear, 0);
1091
- rb_define_method(rb_cSizedQueue, "empty?", rb_queue_empty_p, 0);
1092
- rb_define_method(rb_cSizedQueue, "length", rb_queue_length, 0);
1093
- rb_define_method(rb_cSizedQueue, "num_waiting", rb_queue_num_waiting, 0);
1094
- rb_define_method(rb_cSizedQueue, "pop", rb_queue_pop, -1);
1095
- rb_define_method(rb_cSizedQueue, "push", rb_queue_push, 1);
1096
- rb_define_method(rb_cSizedQueue, "max", rb_sized_queue_max, 0);
1097
- rb_define_method(rb_cSizedQueue, "max=", rb_sized_queue_max_set, 1);
1098
- rb_alias(rb_cSizedQueue, rb_intern("<<"), rb_intern("push"));
1099
- rb_alias(rb_cSizedQueue, rb_intern("deq"), rb_intern("pop"));
1100
- rb_alias(rb_cSizedQueue, rb_intern("shift"), rb_intern("pop"));
1101
-
1102
- return Qnil;
1114
+ setup_classes(VALUE unused)
1115
+ {
1116
+ rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("Mutex")));
1117
+ rb_cMutex = rb_define_class("Mutex", rb_cObject);
1118
+ rb_define_alloc_func(rb_cMutex, rb_mutex_alloc);
1119
+ rb_define_method(rb_cMutex, "marshal_load", dummy_load, 1);
1120
+ rb_define_method(rb_cMutex, "marshal_dump", dummy_dump, 0);
1121
+ rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
1122
+ rb_define_method(rb_cMutex, "try_lock", rb_mutex_try_lock, 0);
1123
+ rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
1124
+ rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
1125
+ rb_define_method(rb_cMutex, "exclusive_unlock", rb_mutex_exclusive_unlock, 0);
1126
+ rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize, 0);
1127
+
1128
+ rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("ConditionVariable")));
1129
+ rb_cConditionVariable = rb_define_class("ConditionVariable", rb_cObject);
1130
+ rb_define_alloc_func(rb_cConditionVariable, rb_condvar_alloc);
1131
+ rb_define_method(rb_cConditionVariable, "marshal_load", dummy_load, 1);
1132
+ rb_define_method(rb_cConditionVariable, "marshal_dump", dummy_dump, 0);
1133
+ rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, 1);
1134
+ rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
1135
+ rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
1136
+
1137
+ rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("Queue")));
1138
+ rb_cQueue = rb_define_class("Queue", rb_cObject);
1139
+ rb_define_alloc_func(rb_cQueue, rb_queue_alloc);
1140
+ rb_define_method(rb_cQueue, "marshal_load", rb_queue_marshal_load, 1);
1141
+ rb_define_method(rb_cQueue, "marshal_dump", rb_queue_marshal_dump, 0);
1142
+ rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
1143
+ rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
1144
+ rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
1145
+ rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
1146
+ rb_define_method(rb_cQueue, "pop", rb_queue_pop, -1);
1147
+ rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
1148
+ rb_alias(rb_cQueue, rb_intern("enq"), rb_intern("push"));
1149
+ rb_alias(rb_cQueue, rb_intern("<<"), rb_intern("push"));
1150
+ rb_alias(rb_cQueue, rb_intern("deq"), rb_intern("pop"));
1151
+ rb_alias(rb_cQueue, rb_intern("shift"), rb_intern("pop"));
1152
+ rb_alias(rb_cQueue, rb_intern("size"), rb_intern("length"));
1153
+
1154
+ rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("SizedQueue")));
1155
+ rb_cSizedQueue = rb_define_class("SizedQueue", rb_cQueue);
1156
+ rb_define_method(rb_cSizedQueue, "initialize", rb_sized_queue_max_set, 1);
1157
+ rb_define_method(rb_cSizedQueue, "clear", rb_queue_clear, 0);
1158
+ rb_define_method(rb_cSizedQueue, "empty?", rb_queue_empty_p, 0);
1159
+ rb_define_method(rb_cSizedQueue, "length", rb_queue_length, 0);
1160
+ rb_define_method(rb_cSizedQueue, "num_waiting", rb_queue_num_waiting, 0);
1161
+ rb_define_method(rb_cSizedQueue, "pop", rb_queue_pop, -1);
1162
+ rb_define_method(rb_cSizedQueue, "push", rb_queue_push, 1);
1163
+ rb_define_method(rb_cSizedQueue, "max", rb_sized_queue_max, 0);
1164
+ rb_define_method(rb_cSizedQueue, "max=", rb_sized_queue_max_set, 1);
1165
+ rb_alias(rb_cSizedQueue, rb_intern("<<"), rb_intern("push"));
1166
+ rb_alias(rb_cSizedQueue, rb_intern("deq"), rb_intern("pop"));
1167
+ rb_alias(rb_cSizedQueue, rb_intern("shift"), rb_intern("pop"));
1168
+
1169
+ return Qnil;
1103
1170
  }
1104
1171
 
1105
1172
  void
1106
1173
  Init_fastthread()
1107
1174
  {
1108
- VALUE global_variables;
1109
- VALUE fastthread_avoid_mem_pools;
1110
- int saved_critical;
1111
- int i;
1112
-
1113
- avoid_mem_pools = Qnil;
1114
- fastthread_avoid_mem_pools = rb_str_new2("$fastthread_avoid_mem_pools");
1115
- global_variables = rb_f_global_variables();
1116
- for ( i = 0 ; i < RARRAY(global_variables)->len ; i++ ) {
1117
- if (RTEST(rb_equal(RARRAY(global_variables)->ptr[i], fastthread_avoid_mem_pools))) {
1118
- avoid_mem_pools = rb_gv_get("$fastthread_avoid_mem_pools");
1119
- break;
1120
- }
1121
- }
1122
-
1123
- rb_global_variable(&avoid_mem_pools);
1124
- rb_define_variable("$fastthread_avoid_mem_pools", &avoid_mem_pools);
1175
+ int saved_critical;
1125
1176
 
1126
- rb_require("thread");
1177
+ rb_require("thread");
1127
1178
 
1128
- private_eThreadError = rb_const_get(rb_cObject, rb_intern("ThreadError"));
1179
+ private_eThreadError = rb_const_get(rb_cObject, rb_intern("ThreadError"));
1129
1180
 
1130
- /* ensure that classes get replaced atomically */
1131
- saved_critical = rb_thread_critical;
1132
- rb_thread_critical = 1;
1133
- rb_ensure(setup_classes, Qnil, set_critical, (VALUE)saved_critical);
1181
+ /* ensure that classes get replaced atomically */
1182
+ saved_critical = rb_thread_critical;
1183
+ rb_thread_critical = 1;
1184
+ rb_ensure(setup_classes, Qnil, set_critical, (VALUE)saved_critical);
1134
1185
  }
1135
1186