fastthread 1.0.1-i386-mswin32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data.tar.gz.sig +3 -0
- data/CHANGELOG +3 -0
- data/Manifest +9 -0
- data/ext/fastthread/extconf.rb +3 -0
- data/ext/fastthread/fastthread.c +1186 -0
- data/fastthread.gemspec +67 -0
- data/lib/fastthread.so +0 -0
- data/setup.rb +1585 -0
- data/test/test_all.rb +6 -0
- data/test/test_condvar.rb +34 -0
- data/test/test_mutex.rb +74 -0
- data/test/test_queue.rb +79 -0
- metadata +101 -0
- metadata.gz.sig +0 -0
data.tar.gz.sig
ADDED
data/CHANGELOG
ADDED
data/Manifest
ADDED
@@ -0,0 +1,1186 @@
|
|
1
|
+
/*
|
2
|
+
* Optimized Ruby Mutex implementation, loosely based on thread.rb by
|
3
|
+
* Yukihiro Matsumoto <matz@ruby-lang.org>
|
4
|
+
*
|
5
|
+
* Copyright 2006-2007 MenTaLguY <mental@rydia.net>
|
6
|
+
*
|
7
|
+
* RDoc taken from original.
|
8
|
+
*
|
9
|
+
* This file is made available under the same terms as Ruby.
|
10
|
+
*/
|
11
|
+
|
12
|
+
#include <ruby.h>
|
13
|
+
#include <intern.h>
|
14
|
+
#include <rubysig.h>
|
15
|
+
|
16
|
+
static VALUE rb_cMutex;
|
17
|
+
static VALUE rb_cConditionVariable;
|
18
|
+
static VALUE rb_cQueue;
|
19
|
+
static VALUE rb_cSizedQueue;
|
20
|
+
/* earlier versions of ruby do not export rb_eThreadError */
|
21
|
+
static VALUE private_eThreadError;
|
22
|
+
|
23
|
+
static VALUE set_critical(VALUE value);
|
24
|
+
|
25
|
+
/*
|
26
|
+
* call-seq:
|
27
|
+
* Thread.exclusive { block } => obj
|
28
|
+
*
|
29
|
+
* Wraps a block in Thread.critical, restoring the original value
|
30
|
+
* upon exit from the critical section, and returns the value of the
|
31
|
+
* block.
|
32
|
+
*/
|
33
|
+
|
34
|
+
typedef struct _Entry {
|
35
|
+
VALUE value;
|
36
|
+
struct _Entry *next;
|
37
|
+
} Entry;
|
38
|
+
|
39
|
+
typedef struct _List {
|
40
|
+
Entry *entries;
|
41
|
+
Entry *last_entry;
|
42
|
+
Entry *entry_pool;
|
43
|
+
unsigned long size;
|
44
|
+
} List;
|
45
|
+
|
46
|
+
static void
|
47
|
+
init_list(List *list)
|
48
|
+
{
|
49
|
+
list->entries = NULL;
|
50
|
+
list->last_entry = NULL;
|
51
|
+
list->entry_pool = NULL;
|
52
|
+
list->size = 0;
|
53
|
+
}
|
54
|
+
|
55
|
+
static void
|
56
|
+
mark_list(List *list)
|
57
|
+
{
|
58
|
+
Entry *entry;
|
59
|
+
for (entry = list->entries; entry; entry = entry->next) {
|
60
|
+
rb_gc_mark(entry->value);
|
61
|
+
}
|
62
|
+
}
|
63
|
+
|
64
|
+
static void
|
65
|
+
free_entries(Entry *first)
|
66
|
+
{
|
67
|
+
Entry *next;
|
68
|
+
while (first) {
|
69
|
+
next = first->next;
|
70
|
+
xfree(first);
|
71
|
+
first = next;
|
72
|
+
}
|
73
|
+
}
|
74
|
+
|
75
|
+
static void
|
76
|
+
finalize_list(List *list)
|
77
|
+
{
|
78
|
+
free_entries(list->entries);
|
79
|
+
free_entries(list->entry_pool);
|
80
|
+
}
|
81
|
+
|
82
|
+
static void
|
83
|
+
push_list(List *list, VALUE value)
|
84
|
+
{
|
85
|
+
Entry *entry;
|
86
|
+
|
87
|
+
if (list->entry_pool) {
|
88
|
+
entry = list->entry_pool;
|
89
|
+
list->entry_pool = entry->next;
|
90
|
+
} else {
|
91
|
+
entry = ALLOC(Entry);
|
92
|
+
}
|
93
|
+
|
94
|
+
entry->value = value;
|
95
|
+
entry->next = NULL;
|
96
|
+
|
97
|
+
if (list->last_entry) {
|
98
|
+
list->last_entry->next = entry;
|
99
|
+
} else {
|
100
|
+
list->entries = entry;
|
101
|
+
}
|
102
|
+
list->last_entry = entry;
|
103
|
+
|
104
|
+
++list->size;
|
105
|
+
}
|
106
|
+
|
107
|
+
static void
|
108
|
+
push_multiple_list(List *list, VALUE *values, unsigned count)
|
109
|
+
{
|
110
|
+
unsigned i;
|
111
|
+
for (i = 0; i < count; i++) {
|
112
|
+
push_list(list, values[i]);
|
113
|
+
}
|
114
|
+
}
|
115
|
+
|
116
|
+
static void
|
117
|
+
recycle_entries(List *list, Entry *first_entry, Entry *last_entry)
|
118
|
+
{
|
119
|
+
#ifdef USE_MEM_POOLS
|
120
|
+
last_entry->next = list->entry_pool;
|
121
|
+
list->entry_pool = first_entry;
|
122
|
+
#else
|
123
|
+
last_entry->next = NULL;
|
124
|
+
free_entries(first_entry);
|
125
|
+
#endif
|
126
|
+
}
|
127
|
+
|
128
|
+
static VALUE
|
129
|
+
shift_list(List *list)
|
130
|
+
{
|
131
|
+
Entry *entry;
|
132
|
+
VALUE value;
|
133
|
+
|
134
|
+
entry = list->entries;
|
135
|
+
if (!entry) return Qundef;
|
136
|
+
|
137
|
+
list->entries = entry->next;
|
138
|
+
if (entry == list->last_entry) {
|
139
|
+
list->last_entry = NULL;
|
140
|
+
}
|
141
|
+
|
142
|
+
--list->size;
|
143
|
+
|
144
|
+
value = entry->value;
|
145
|
+
recycle_entries(list, entry, entry);
|
146
|
+
|
147
|
+
return value;
|
148
|
+
}
|
149
|
+
|
150
|
+
static void
|
151
|
+
remove_one(List *list, VALUE value)
|
152
|
+
{
|
153
|
+
Entry **ref;
|
154
|
+
Entry *prev;
|
155
|
+
Entry *entry;
|
156
|
+
|
157
|
+
for (ref = &list->entries, prev = NULL, entry = list->entries;
|
158
|
+
entry != NULL;
|
159
|
+
ref = &entry->next, prev = entry, entry = entry->next) {
|
160
|
+
if (entry->value == value) {
|
161
|
+
*ref = entry->next;
|
162
|
+
list->size--;
|
163
|
+
if (!entry->next) {
|
164
|
+
list->last_entry = prev;
|
165
|
+
}
|
166
|
+
recycle_entries(list, entry, entry);
|
167
|
+
break;
|
168
|
+
}
|
169
|
+
}
|
170
|
+
}
|
171
|
+
|
172
|
+
static void
|
173
|
+
clear_list(List *list)
|
174
|
+
{
|
175
|
+
if (list->last_entry) {
|
176
|
+
recycle_entries(list, list->entries, list->last_entry);
|
177
|
+
list->entries = NULL;
|
178
|
+
list->last_entry = NULL;
|
179
|
+
list->size = 0;
|
180
|
+
}
|
181
|
+
}
|
182
|
+
|
183
|
+
static VALUE
|
184
|
+
array_from_list(List const *list)
|
185
|
+
{
|
186
|
+
VALUE ary;
|
187
|
+
Entry *entry;
|
188
|
+
ary = rb_ary_new();
|
189
|
+
for (entry = list->entries; entry; entry = entry->next) {
|
190
|
+
rb_ary_push(ary, entry->value);
|
191
|
+
}
|
192
|
+
return ary;
|
193
|
+
}
|
194
|
+
|
195
|
+
static VALUE
|
196
|
+
wake_thread(VALUE thread)
|
197
|
+
{
|
198
|
+
return rb_rescue2(rb_thread_wakeup, thread,
|
199
|
+
NULL, Qundef, private_eThreadError, 0);
|
200
|
+
}
|
201
|
+
|
202
|
+
static VALUE
|
203
|
+
run_thread(VALUE thread)
|
204
|
+
{
|
205
|
+
return rb_rescue2(rb_thread_run, thread,
|
206
|
+
NULL, Qundef, private_eThreadError, 0);
|
207
|
+
}
|
208
|
+
|
209
|
+
static VALUE
|
210
|
+
wake_one(List *list)
|
211
|
+
{
|
212
|
+
VALUE waking;
|
213
|
+
|
214
|
+
waking = Qnil;
|
215
|
+
while (list->entries && !RTEST(waking)) {
|
216
|
+
waking = wake_thread(shift_list(list));
|
217
|
+
}
|
218
|
+
|
219
|
+
return waking;
|
220
|
+
}
|
221
|
+
|
222
|
+
static VALUE
|
223
|
+
wake_all(List *list)
|
224
|
+
{
|
225
|
+
while (list->entries) {
|
226
|
+
wake_one(list);
|
227
|
+
}
|
228
|
+
return Qnil;
|
229
|
+
}
|
230
|
+
|
231
|
+
static VALUE
|
232
|
+
wait_list_inner(List *list)
|
233
|
+
{
|
234
|
+
push_list(list, rb_thread_current());
|
235
|
+
rb_thread_stop();
|
236
|
+
return Qnil;
|
237
|
+
}
|
238
|
+
|
239
|
+
static VALUE
|
240
|
+
wait_list_cleanup(List *list)
|
241
|
+
{
|
242
|
+
/* cleanup in case of spurious wakeups */
|
243
|
+
remove_one(list, rb_thread_current());
|
244
|
+
return Qnil;
|
245
|
+
}
|
246
|
+
|
247
|
+
static void
|
248
|
+
wait_list(List *list)
|
249
|
+
{
|
250
|
+
rb_ensure(wait_list_inner, (VALUE)list, wait_list_cleanup, (VALUE)list);
|
251
|
+
}
|
252
|
+
|
253
|
+
static void
|
254
|
+
assert_no_survivors(List *waiting, const char *label, void *addr)
|
255
|
+
{
|
256
|
+
Entry *entry;
|
257
|
+
for (entry = waiting->entries; entry; entry = entry->next) {
|
258
|
+
if (RTEST(wake_thread(entry->value))) {
|
259
|
+
rb_bug("%s %p freed with live thread(s) waiting", label, addr);
|
260
|
+
}
|
261
|
+
}
|
262
|
+
}
|
263
|
+
|
264
|
+
/*
|
265
|
+
* Document-class: Mutex
|
266
|
+
*
|
267
|
+
* Mutex implements a simple semaphore that can be used to coordinate access to
|
268
|
+
* shared data from multiple concurrent threads.
|
269
|
+
*
|
270
|
+
* Example:
|
271
|
+
*
|
272
|
+
* require 'thread'
|
273
|
+
* semaphore = Mutex.new
|
274
|
+
*
|
275
|
+
* a = Thread.new {
|
276
|
+
* semaphore.synchronize {
|
277
|
+
* # access shared resource
|
278
|
+
* }
|
279
|
+
* }
|
280
|
+
*
|
281
|
+
* b = Thread.new {
|
282
|
+
* semaphore.synchronize {
|
283
|
+
* # access shared resource
|
284
|
+
* }
|
285
|
+
* }
|
286
|
+
*
|
287
|
+
*/
|
288
|
+
|
289
|
+
typedef struct _Mutex {
|
290
|
+
VALUE owner;
|
291
|
+
List waiting;
|
292
|
+
} Mutex;
|
293
|
+
|
294
|
+
static void
|
295
|
+
mark_mutex(Mutex *mutex)
|
296
|
+
{
|
297
|
+
rb_gc_mark(mutex->owner);
|
298
|
+
mark_list(&mutex->waiting);
|
299
|
+
}
|
300
|
+
|
301
|
+
static void
|
302
|
+
finalize_mutex(Mutex *mutex)
|
303
|
+
{
|
304
|
+
finalize_list(&mutex->waiting);
|
305
|
+
}
|
306
|
+
|
307
|
+
static void
|
308
|
+
free_mutex(Mutex *mutex)
|
309
|
+
{
|
310
|
+
assert_no_survivors(&mutex->waiting, "mutex", mutex);
|
311
|
+
finalize_mutex(mutex);
|
312
|
+
xfree(mutex);
|
313
|
+
}
|
314
|
+
|
315
|
+
static void
|
316
|
+
init_mutex(Mutex *mutex)
|
317
|
+
{
|
318
|
+
mutex->owner = Qnil;
|
319
|
+
init_list(&mutex->waiting);
|
320
|
+
}
|
321
|
+
|
322
|
+
/*
|
323
|
+
* Document-method: new
|
324
|
+
* call-seq: Mutex.new
|
325
|
+
*
|
326
|
+
* Creates a new Mutex
|
327
|
+
*
|
328
|
+
*/
|
329
|
+
|
330
|
+
static VALUE
|
331
|
+
rb_mutex_alloc(VALUE klass)
|
332
|
+
{
|
333
|
+
Mutex *mutex;
|
334
|
+
mutex = ALLOC(Mutex);
|
335
|
+
init_mutex(mutex);
|
336
|
+
return Data_Wrap_Struct(klass, mark_mutex, free_mutex, mutex);
|
337
|
+
}
|
338
|
+
|
339
|
+
/*
|
340
|
+
* Document-method: locked?
|
341
|
+
* call-seq: locked?
|
342
|
+
*
|
343
|
+
* Returns +true+ if this lock is currently held by some thread.
|
344
|
+
*
|
345
|
+
*/
|
346
|
+
|
347
|
+
static VALUE
|
348
|
+
rb_mutex_locked_p(VALUE self)
|
349
|
+
{
|
350
|
+
Mutex *mutex;
|
351
|
+
Data_Get_Struct(self, Mutex, mutex);
|
352
|
+
return RTEST(mutex->owner) ? Qtrue : Qfalse;
|
353
|
+
}
|
354
|
+
|
355
|
+
/*
|
356
|
+
* Document-method: try_lock
|
357
|
+
* call-seq: try_lock
|
358
|
+
*
|
359
|
+
* Attempts to obtain the lock and returns immediately. Returns +true+ if the
|
360
|
+
* lock was granted.
|
361
|
+
*
|
362
|
+
*/
|
363
|
+
|
364
|
+
static VALUE
|
365
|
+
rb_mutex_try_lock(VALUE self)
|
366
|
+
{
|
367
|
+
Mutex *mutex;
|
368
|
+
|
369
|
+
Data_Get_Struct(self, Mutex, mutex);
|
370
|
+
|
371
|
+
if (RTEST(mutex->owner))
|
372
|
+
return Qfalse;
|
373
|
+
|
374
|
+
mutex->owner = rb_thread_current();
|
375
|
+
return Qtrue;
|
376
|
+
}
|
377
|
+
|
378
|
+
/*
|
379
|
+
* Document-method: lock
|
380
|
+
* call-seq: lock
|
381
|
+
*
|
382
|
+
* Attempts to grab the lock and waits if it isn't available.
|
383
|
+
*
|
384
|
+
*/
|
385
|
+
|
386
|
+
static VALUE
|
387
|
+
lock_mutex(Mutex *mutex)
|
388
|
+
{
|
389
|
+
VALUE current;
|
390
|
+
current = rb_thread_current();
|
391
|
+
|
392
|
+
rb_thread_critical = 1;
|
393
|
+
|
394
|
+
while (RTEST(mutex->owner)) {
|
395
|
+
wait_list(&mutex->waiting);
|
396
|
+
rb_thread_critical = 1;
|
397
|
+
}
|
398
|
+
mutex->owner = current;
|
399
|
+
|
400
|
+
rb_thread_critical = 0;
|
401
|
+
return Qnil;
|
402
|
+
}
|
403
|
+
|
404
|
+
static VALUE
|
405
|
+
rb_mutex_lock(VALUE self)
|
406
|
+
{
|
407
|
+
Mutex *mutex;
|
408
|
+
Data_Get_Struct(self, Mutex, mutex);
|
409
|
+
lock_mutex(mutex);
|
410
|
+
return self;
|
411
|
+
}
|
412
|
+
|
413
|
+
/*
|
414
|
+
* Document-method: unlock
|
415
|
+
*
|
416
|
+
* Releases the lock. Returns +nil+ if ref wasn't locked.
|
417
|
+
*
|
418
|
+
*/
|
419
|
+
|
420
|
+
static VALUE
|
421
|
+
unlock_mutex_inner(Mutex *mutex)
|
422
|
+
{
|
423
|
+
VALUE waking;
|
424
|
+
|
425
|
+
if (!RTEST(mutex->owner)) {
|
426
|
+
return Qundef;
|
427
|
+
}
|
428
|
+
|
429
|
+
mutex->owner = Qnil;
|
430
|
+
waking = wake_one(&mutex->waiting);
|
431
|
+
|
432
|
+
return waking;
|
433
|
+
}
|
434
|
+
|
435
|
+
static VALUE
|
436
|
+
set_critical(VALUE value)
|
437
|
+
{
|
438
|
+
rb_thread_critical = (int)value;
|
439
|
+
return Qundef;
|
440
|
+
}
|
441
|
+
|
442
|
+
static VALUE
|
443
|
+
unlock_mutex(Mutex *mutex)
|
444
|
+
{
|
445
|
+
VALUE waking;
|
446
|
+
|
447
|
+
rb_thread_critical = 1;
|
448
|
+
waking = rb_ensure(unlock_mutex_inner, (VALUE)mutex, set_critical, 0);
|
449
|
+
|
450
|
+
if (waking == Qundef) {
|
451
|
+
return Qfalse;
|
452
|
+
}
|
453
|
+
|
454
|
+
if (RTEST(waking)) {
|
455
|
+
run_thread(waking);
|
456
|
+
}
|
457
|
+
|
458
|
+
return Qtrue;
|
459
|
+
}
|
460
|
+
|
461
|
+
static VALUE
|
462
|
+
rb_mutex_unlock(VALUE self)
|
463
|
+
{
|
464
|
+
Mutex *mutex;
|
465
|
+
Data_Get_Struct(self, Mutex, mutex);
|
466
|
+
|
467
|
+
if (RTEST(unlock_mutex(mutex))) {
|
468
|
+
return self;
|
469
|
+
} else {
|
470
|
+
return Qnil;
|
471
|
+
}
|
472
|
+
}
|
473
|
+
|
474
|
+
/*
|
475
|
+
* Document-method: exclusive_unlock
|
476
|
+
* call-seq: exclusive_unlock { ... }
|
477
|
+
*
|
478
|
+
* If the mutex is locked, unlocks the mutex, wakes one waiting thread, and
|
479
|
+
* yields in a critical section.
|
480
|
+
*
|
481
|
+
*/
|
482
|
+
|
483
|
+
static VALUE
|
484
|
+
rb_mutex_exclusive_unlock_inner(Mutex *mutex)
|
485
|
+
{
|
486
|
+
VALUE waking;
|
487
|
+
waking = unlock_mutex_inner(mutex);
|
488
|
+
rb_yield(Qundef);
|
489
|
+
return waking;
|
490
|
+
}
|
491
|
+
|
492
|
+
static VALUE
|
493
|
+
rb_mutex_exclusive_unlock(VALUE self)
|
494
|
+
{
|
495
|
+
Mutex *mutex;
|
496
|
+
VALUE waking;
|
497
|
+
Data_Get_Struct(self, Mutex, mutex);
|
498
|
+
|
499
|
+
rb_thread_critical = 1;
|
500
|
+
waking = rb_ensure(rb_mutex_exclusive_unlock_inner, (VALUE)mutex, set_critical, 0);
|
501
|
+
|
502
|
+
if (waking == Qundef) {
|
503
|
+
return Qnil;
|
504
|
+
}
|
505
|
+
|
506
|
+
if (RTEST(waking)) {
|
507
|
+
run_thread(waking);
|
508
|
+
}
|
509
|
+
|
510
|
+
return self;
|
511
|
+
}
|
512
|
+
|
513
|
+
/*
|
514
|
+
* Document-method: synchronize
|
515
|
+
* call-seq: synchronize { ... }
|
516
|
+
*
|
517
|
+
* Obtains a lock, runs the block, and releases the lock when the block
|
518
|
+
* completes. See the example under Mutex.
|
519
|
+
*
|
520
|
+
*/
|
521
|
+
|
522
|
+
static VALUE
|
523
|
+
rb_mutex_synchronize(VALUE self)
|
524
|
+
{
|
525
|
+
rb_mutex_lock(self);
|
526
|
+
return rb_ensure(rb_yield, Qundef, rb_mutex_unlock, self);
|
527
|
+
}
|
528
|
+
|
529
|
+
/*
|
530
|
+
* Document-class: ConditionVariable
|
531
|
+
*
|
532
|
+
* ConditionVariable objects augment class Mutex. Using condition variables,
|
533
|
+
* it is possible to suspend while in the middle of a critical section until a
|
534
|
+
* resource becomes available.
|
535
|
+
*
|
536
|
+
* Example:
|
537
|
+
*
|
538
|
+
* require 'thread'
|
539
|
+
*
|
540
|
+
* mutex = Mutex.new
|
541
|
+
* resource = ConditionVariable.new
|
542
|
+
*
|
543
|
+
* a = Thread.new {
|
544
|
+
* mutex.synchronize {
|
545
|
+
* # Thread 'a' now needs the resource
|
546
|
+
* resource.wait(mutex)
|
547
|
+
* # 'a' can now have the resource
|
548
|
+
* }
|
549
|
+
* }
|
550
|
+
*
|
551
|
+
* b = Thread.new {
|
552
|
+
* mutex.synchronize {
|
553
|
+
* # Thread 'b' has finished using the resource
|
554
|
+
* resource.signal
|
555
|
+
* }
|
556
|
+
* }
|
557
|
+
*
|
558
|
+
*/
|
559
|
+
|
560
|
+
typedef struct _ConditionVariable {
|
561
|
+
List waiting;
|
562
|
+
} ConditionVariable;
|
563
|
+
|
564
|
+
static void
|
565
|
+
mark_condvar(ConditionVariable *condvar)
|
566
|
+
{
|
567
|
+
mark_list(&condvar->waiting);
|
568
|
+
}
|
569
|
+
|
570
|
+
static void
|
571
|
+
finalize_condvar(ConditionVariable *condvar)
|
572
|
+
{
|
573
|
+
finalize_list(&condvar->waiting);
|
574
|
+
}
|
575
|
+
|
576
|
+
static void
|
577
|
+
free_condvar(ConditionVariable *condvar)
|
578
|
+
{
|
579
|
+
assert_no_survivors(&condvar->waiting, "condition variable", condvar);
|
580
|
+
finalize_condvar(condvar);
|
581
|
+
xfree(condvar);
|
582
|
+
}
|
583
|
+
|
584
|
+
static void
|
585
|
+
init_condvar(ConditionVariable *condvar)
|
586
|
+
{
|
587
|
+
init_list(&condvar->waiting);
|
588
|
+
}
|
589
|
+
|
590
|
+
/*
|
591
|
+
* Document-method: new
|
592
|
+
* call-seq: ConditionVariable.new
|
593
|
+
*
|
594
|
+
* Creates a new ConditionVariable
|
595
|
+
*
|
596
|
+
*/
|
597
|
+
|
598
|
+
static VALUE
|
599
|
+
rb_condvar_alloc(VALUE klass)
|
600
|
+
{
|
601
|
+
ConditionVariable *condvar;
|
602
|
+
|
603
|
+
condvar = ALLOC(ConditionVariable);
|
604
|
+
init_condvar(condvar);
|
605
|
+
|
606
|
+
return Data_Wrap_Struct(klass, mark_condvar, free_condvar, condvar);
|
607
|
+
}
|
608
|
+
|
609
|
+
/*
|
610
|
+
* Document-method: wait
|
611
|
+
* call-seq: wait
|
612
|
+
*
|
613
|
+
* Releases the lock held in +mutex+ and waits; reacquires the lock on wakeup.
|
614
|
+
*
|
615
|
+
*/
|
616
|
+
|
617
|
+
static void
|
618
|
+
wait_condvar(ConditionVariable *condvar, Mutex *mutex)
|
619
|
+
{
|
620
|
+
rb_thread_critical = 1;
|
621
|
+
if (rb_thread_current() != mutex->owner) {
|
622
|
+
rb_thread_critical = 0;
|
623
|
+
rb_raise(private_eThreadError, "not owner of the synchronization mutex");
|
624
|
+
}
|
625
|
+
unlock_mutex_inner(mutex);
|
626
|
+
rb_ensure(wait_list, (VALUE)&condvar->waiting, lock_mutex, (VALUE)mutex);
|
627
|
+
}
|
628
|
+
|
629
|
+
static VALUE
|
630
|
+
legacy_exclusive_unlock(VALUE mutex)
|
631
|
+
{
|
632
|
+
return rb_funcall(mutex, rb_intern("exclusive_unlock"), 0);
|
633
|
+
}
|
634
|
+
|
635
|
+
typedef struct {
|
636
|
+
ConditionVariable *condvar;
|
637
|
+
VALUE mutex;
|
638
|
+
} legacy_wait_args;
|
639
|
+
|
640
|
+
static VALUE
|
641
|
+
legacy_wait(VALUE unused, legacy_wait_args *args)
|
642
|
+
{
|
643
|
+
wait_list(&args->condvar->waiting);
|
644
|
+
rb_funcall(args->mutex, rb_intern("lock"), 0);
|
645
|
+
return Qnil;
|
646
|
+
}
|
647
|
+
|
648
|
+
static VALUE
|
649
|
+
rb_condvar_wait(VALUE self, VALUE mutex_v)
|
650
|
+
{
|
651
|
+
ConditionVariable *condvar;
|
652
|
+
Data_Get_Struct(self, ConditionVariable, condvar);
|
653
|
+
|
654
|
+
if (CLASS_OF(mutex_v) != rb_cMutex) {
|
655
|
+
/* interoperate with legacy mutex */
|
656
|
+
legacy_wait_args args;
|
657
|
+
args.condvar = condvar;
|
658
|
+
args.mutex = mutex_v;
|
659
|
+
rb_iterate(legacy_exclusive_unlock, mutex_v, legacy_wait, (VALUE)&args);
|
660
|
+
} else {
|
661
|
+
Mutex *mutex;
|
662
|
+
Data_Get_Struct(mutex_v, Mutex, mutex);
|
663
|
+
wait_condvar(condvar, mutex);
|
664
|
+
}
|
665
|
+
|
666
|
+
return self;
|
667
|
+
}
|
668
|
+
|
669
|
+
/*
|
670
|
+
* Document-method: broadcast
|
671
|
+
* call-seq: broadcast
|
672
|
+
*
|
673
|
+
* Wakes up all threads waiting for this condition.
|
674
|
+
*
|
675
|
+
*/
|
676
|
+
|
677
|
+
static VALUE
|
678
|
+
rb_condvar_broadcast(VALUE self)
|
679
|
+
{
|
680
|
+
ConditionVariable *condvar;
|
681
|
+
|
682
|
+
Data_Get_Struct(self, ConditionVariable, condvar);
|
683
|
+
|
684
|
+
rb_thread_critical = 1;
|
685
|
+
rb_ensure(wake_all, (VALUE)&condvar->waiting, set_critical, 0);
|
686
|
+
rb_thread_schedule();
|
687
|
+
|
688
|
+
return self;
|
689
|
+
}
|
690
|
+
|
691
|
+
/*
|
692
|
+
* Document-method: signal
|
693
|
+
* call-seq: signal
|
694
|
+
*
|
695
|
+
* Wakes up the first thread in line waiting for this condition.
|
696
|
+
*
|
697
|
+
*/
|
698
|
+
|
699
|
+
static void
|
700
|
+
signal_condvar(ConditionVariable *condvar)
|
701
|
+
{
|
702
|
+
VALUE waking;
|
703
|
+
rb_thread_critical = 1;
|
704
|
+
waking = rb_ensure(wake_one, (VALUE)&condvar->waiting, set_critical, 0);
|
705
|
+
if (RTEST(waking)) {
|
706
|
+
run_thread(waking);
|
707
|
+
}
|
708
|
+
}
|
709
|
+
|
710
|
+
static VALUE
|
711
|
+
rb_condvar_signal(VALUE self)
|
712
|
+
{
|
713
|
+
ConditionVariable *condvar;
|
714
|
+
Data_Get_Struct(self, ConditionVariable, condvar);
|
715
|
+
signal_condvar(condvar);
|
716
|
+
return self;
|
717
|
+
}
|
718
|
+
|
719
|
+
/*
|
720
|
+
* Document-class: Queue
|
721
|
+
*
|
722
|
+
* This class provides a way to synchronize communication between threads.
|
723
|
+
*
|
724
|
+
* Example:
|
725
|
+
*
|
726
|
+
* require 'thread'
|
727
|
+
*
|
728
|
+
* queue = Queue.new
|
729
|
+
*
|
730
|
+
* producer = Thread.new do
|
731
|
+
* 5.times do |i|
|
732
|
+
* sleep rand(i) # simulate expense
|
733
|
+
* queue << i
|
734
|
+
* puts "#{i} produced"
|
735
|
+
* end
|
736
|
+
* end
|
737
|
+
*
|
738
|
+
* consumer = Thread.new do
|
739
|
+
* 5.times do |i|
|
740
|
+
* value = queue.pop
|
741
|
+
* sleep rand(i/2) # simulate expense
|
742
|
+
* puts "consumed #{value}"
|
743
|
+
* end
|
744
|
+
* end
|
745
|
+
*
|
746
|
+
* consumer.join
|
747
|
+
*
|
748
|
+
*/
|
749
|
+
|
750
|
+
typedef struct _Queue {
|
751
|
+
Mutex mutex;
|
752
|
+
ConditionVariable value_available;
|
753
|
+
ConditionVariable space_available;
|
754
|
+
List values;
|
755
|
+
unsigned long capacity;
|
756
|
+
} Queue;
|
757
|
+
|
758
|
+
static void
|
759
|
+
mark_queue(Queue *queue)
|
760
|
+
{
|
761
|
+
mark_mutex(&queue->mutex);
|
762
|
+
mark_condvar(&queue->value_available);
|
763
|
+
mark_condvar(&queue->space_available);
|
764
|
+
mark_list(&queue->values);
|
765
|
+
}
|
766
|
+
|
767
|
+
static void
|
768
|
+
finalize_queue(Queue *queue)
|
769
|
+
{
|
770
|
+
finalize_mutex(&queue->mutex);
|
771
|
+
finalize_condvar(&queue->value_available);
|
772
|
+
finalize_condvar(&queue->space_available);
|
773
|
+
finalize_list(&queue->values);
|
774
|
+
}
|
775
|
+
|
776
|
+
static void
|
777
|
+
free_queue(Queue *queue)
|
778
|
+
{
|
779
|
+
assert_no_survivors(&queue->mutex.waiting, "queue", queue);
|
780
|
+
assert_no_survivors(&queue->space_available.waiting, "queue", queue);
|
781
|
+
assert_no_survivors(&queue->value_available.waiting, "queue", queue);
|
782
|
+
finalize_queue(queue);
|
783
|
+
xfree(queue);
|
784
|
+
}
|
785
|
+
|
786
|
+
static void
|
787
|
+
init_queue(Queue *queue)
|
788
|
+
{
|
789
|
+
init_mutex(&queue->mutex);
|
790
|
+
init_condvar(&queue->value_available);
|
791
|
+
init_condvar(&queue->space_available);
|
792
|
+
init_list(&queue->values);
|
793
|
+
queue->capacity = 0;
|
794
|
+
}
|
795
|
+
|
796
|
+
/*
|
797
|
+
* Document-method: new
|
798
|
+
* call-seq: new
|
799
|
+
*
|
800
|
+
* Creates a new queue.
|
801
|
+
*
|
802
|
+
*/
|
803
|
+
|
804
|
+
static VALUE
|
805
|
+
rb_queue_alloc(VALUE klass)
|
806
|
+
{
|
807
|
+
Queue *queue;
|
808
|
+
queue = ALLOC(Queue);
|
809
|
+
init_queue(queue);
|
810
|
+
return Data_Wrap_Struct(klass, mark_queue, free_queue, queue);
|
811
|
+
}
|
812
|
+
|
813
|
+
static VALUE
|
814
|
+
rb_queue_marshal_load(VALUE self, VALUE data)
|
815
|
+
{
|
816
|
+
Queue *queue;
|
817
|
+
VALUE array;
|
818
|
+
Data_Get_Struct(self, Queue, queue);
|
819
|
+
|
820
|
+
array = rb_marshal_load(data);
|
821
|
+
if (TYPE(array) != T_ARRAY) {
|
822
|
+
rb_raise(rb_eRuntimeError, "expected Array of queue data");
|
823
|
+
}
|
824
|
+
if (RARRAY(array)->len < 1) {
|
825
|
+
rb_raise(rb_eRuntimeError, "missing capacity value");
|
826
|
+
}
|
827
|
+
queue->capacity = NUM2ULONG(rb_ary_shift(array));
|
828
|
+
push_multiple_list(&queue->values, RARRAY(array)->ptr, (unsigned)RARRAY(array)->len);
|
829
|
+
|
830
|
+
return self;
|
831
|
+
}
|
832
|
+
|
833
|
+
static VALUE
|
834
|
+
rb_queue_marshal_dump(VALUE self)
|
835
|
+
{
|
836
|
+
Queue *queue;
|
837
|
+
VALUE array;
|
838
|
+
Data_Get_Struct(self, Queue, queue);
|
839
|
+
|
840
|
+
array = array_from_list(&queue->values);
|
841
|
+
rb_ary_unshift(array, ULONG2NUM(queue->capacity));
|
842
|
+
return rb_marshal_dump(array, Qnil);
|
843
|
+
}
|
844
|
+
|
845
|
+
/*
|
846
|
+
* Document-method: clear
|
847
|
+
* call-seq: clear
|
848
|
+
*
|
849
|
+
* Removes all objects from the queue.
|
850
|
+
*
|
851
|
+
*/
|
852
|
+
|
853
|
+
static VALUE
|
854
|
+
rb_queue_clear(VALUE self)
|
855
|
+
{
|
856
|
+
Queue *queue;
|
857
|
+
Data_Get_Struct(self, Queue, queue);
|
858
|
+
|
859
|
+
lock_mutex(&queue->mutex);
|
860
|
+
clear_list(&queue->values);
|
861
|
+
signal_condvar(&queue->space_available);
|
862
|
+
unlock_mutex(&queue->mutex);
|
863
|
+
|
864
|
+
return self;
|
865
|
+
}
|
866
|
+
|
867
|
+
/*
|
868
|
+
* Document-method: empty?
|
869
|
+
* call-seq: empty?
|
870
|
+
*
|
871
|
+
* Returns +true+ if the queue is empty.
|
872
|
+
*
|
873
|
+
*/
|
874
|
+
|
875
|
+
static VALUE
|
876
|
+
rb_queue_empty_p(VALUE self)
|
877
|
+
{
|
878
|
+
Queue *queue;
|
879
|
+
VALUE result;
|
880
|
+
Data_Get_Struct(self, Queue, queue);
|
881
|
+
|
882
|
+
lock_mutex(&queue->mutex);
|
883
|
+
result = queue->values.size == 0 ? Qtrue : Qfalse;
|
884
|
+
unlock_mutex(&queue->mutex);
|
885
|
+
|
886
|
+
return result;
|
887
|
+
}
|
888
|
+
|
889
|
+
/*
|
890
|
+
* Document-method: length
|
891
|
+
* call-seq: length
|
892
|
+
*
|
893
|
+
* Returns the length of the queue.
|
894
|
+
*
|
895
|
+
*/
|
896
|
+
|
897
|
+
static VALUE
|
898
|
+
rb_queue_length(VALUE self)
|
899
|
+
{
|
900
|
+
Queue *queue;
|
901
|
+
VALUE result;
|
902
|
+
Data_Get_Struct(self, Queue, queue);
|
903
|
+
|
904
|
+
lock_mutex(&queue->mutex);
|
905
|
+
result = ULONG2NUM(queue->values.size);
|
906
|
+
unlock_mutex(&queue->mutex);
|
907
|
+
|
908
|
+
return result;
|
909
|
+
}
|
910
|
+
|
911
|
+
/*
|
912
|
+
* Document-method: num_waiting
|
913
|
+
* call-seq: num_waiting
|
914
|
+
*
|
915
|
+
* Returns the number of threads waiting on the queue.
|
916
|
+
*
|
917
|
+
*/
|
918
|
+
|
919
|
+
static VALUE
|
920
|
+
rb_queue_num_waiting(VALUE self)
|
921
|
+
{
|
922
|
+
Queue *queue;
|
923
|
+
VALUE result;
|
924
|
+
Data_Get_Struct(self, Queue, queue);
|
925
|
+
|
926
|
+
lock_mutex(&queue->mutex);
|
927
|
+
result = ULONG2NUM(queue->value_available.waiting.size +
|
928
|
+
queue->space_available.waiting.size);
|
929
|
+
unlock_mutex(&queue->mutex);
|
930
|
+
|
931
|
+
return result;
|
932
|
+
}
|
933
|
+
|
934
|
+
/*
|
935
|
+
* Document-method: pop
|
936
|
+
* call_seq: pop(non_block=false)
|
937
|
+
*
|
938
|
+
* Retrieves data from the queue. If the queue is empty, the calling thread is
|
939
|
+
* suspended until data is pushed onto the queue. If +non_block+ is true, the
|
940
|
+
* thread isn't suspended, and an exception is raised.
|
941
|
+
*
|
942
|
+
*/
|
943
|
+
|
944
|
+
static VALUE
|
945
|
+
rb_queue_pop(int argc, VALUE *argv, VALUE self)
|
946
|
+
{
|
947
|
+
Queue *queue;
|
948
|
+
int should_block;
|
949
|
+
VALUE result;
|
950
|
+
Data_Get_Struct(self, Queue, queue);
|
951
|
+
|
952
|
+
if (argc == 0) {
|
953
|
+
should_block = 1;
|
954
|
+
} else if (argc == 1) {
|
955
|
+
should_block = !RTEST(argv[0]);
|
956
|
+
} else {
|
957
|
+
rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", argc);
|
958
|
+
}
|
959
|
+
|
960
|
+
lock_mutex(&queue->mutex);
|
961
|
+
if (!queue->values.entries && !should_block) {
|
962
|
+
unlock_mutex(&queue->mutex);
|
963
|
+
rb_raise(private_eThreadError, "queue empty");
|
964
|
+
}
|
965
|
+
|
966
|
+
while (!queue->values.entries) {
|
967
|
+
wait_condvar(&queue->value_available, &queue->mutex);
|
968
|
+
}
|
969
|
+
|
970
|
+
result = shift_list(&queue->values);
|
971
|
+
if (queue->capacity && queue->values.size < queue->capacity) {
|
972
|
+
signal_condvar(&queue->space_available);
|
973
|
+
}
|
974
|
+
unlock_mutex(&queue->mutex);
|
975
|
+
|
976
|
+
return result;
|
977
|
+
}
|
978
|
+
|
979
|
+
/*
|
980
|
+
* Document-method: push
|
981
|
+
* call-seq: push(obj)
|
982
|
+
*
|
983
|
+
* Pushes +obj+ to the queue.
|
984
|
+
*
|
985
|
+
*/
|
986
|
+
|
987
|
+
static VALUE
|
988
|
+
rb_queue_push(VALUE self, VALUE value)
|
989
|
+
{
|
990
|
+
Queue *queue;
|
991
|
+
Data_Get_Struct(self, Queue, queue);
|
992
|
+
|
993
|
+
lock_mutex(&queue->mutex);
|
994
|
+
while (queue->capacity && queue->values.size >= queue->capacity) {
|
995
|
+
wait_condvar(&queue->space_available, &queue->mutex);
|
996
|
+
}
|
997
|
+
push_list(&queue->values, value);
|
998
|
+
signal_condvar(&queue->value_available);
|
999
|
+
unlock_mutex(&queue->mutex);
|
1000
|
+
|
1001
|
+
return self;
|
1002
|
+
}
|
1003
|
+
|
1004
|
+
/*
|
1005
|
+
* Document-class: SizedQueue
|
1006
|
+
*
|
1007
|
+
* This class represents queues of specified size capacity. The push operation
|
1008
|
+
* may be blocked if the capacity is full.
|
1009
|
+
*
|
1010
|
+
* See Queue for an example of how a SizedQueue works.
|
1011
|
+
*
|
1012
|
+
*/
|
1013
|
+
|
1014
|
+
/*
|
1015
|
+
* Document-method: new
|
1016
|
+
* call-seq: new
|
1017
|
+
*
|
1018
|
+
* Creates a fixed-length queue with a maximum size of +max+.
|
1019
|
+
*
|
1020
|
+
*/
|
1021
|
+
|
1022
|
+
/*
|
1023
|
+
* Document-method: max
|
1024
|
+
* call-seq: max
|
1025
|
+
*
|
1026
|
+
* Returns the maximum size of the queue.
|
1027
|
+
*
|
1028
|
+
*/
|
1029
|
+
|
1030
|
+
static VALUE
|
1031
|
+
rb_sized_queue_max(VALUE self)
|
1032
|
+
{
|
1033
|
+
Queue *queue;
|
1034
|
+
VALUE result;
|
1035
|
+
Data_Get_Struct(self, Queue, queue);
|
1036
|
+
|
1037
|
+
lock_mutex(&queue->mutex);
|
1038
|
+
result = ULONG2NUM(queue->capacity);
|
1039
|
+
unlock_mutex(&queue->mutex);
|
1040
|
+
|
1041
|
+
return result;
|
1042
|
+
}
|
1043
|
+
|
1044
|
+
/*
|
1045
|
+
* Document-method: max=
|
1046
|
+
* call-seq: max=(size)
|
1047
|
+
*
|
1048
|
+
* Sets the maximum size of the queue.
|
1049
|
+
*
|
1050
|
+
*/
|
1051
|
+
|
1052
|
+
static VALUE
|
1053
|
+
rb_sized_queue_max_set(VALUE self, VALUE value)
|
1054
|
+
{
|
1055
|
+
Queue *queue;
|
1056
|
+
unsigned long new_capacity;
|
1057
|
+
unsigned long difference;
|
1058
|
+
Data_Get_Struct(self, Queue, queue);
|
1059
|
+
|
1060
|
+
new_capacity = NUM2ULONG(value);
|
1061
|
+
|
1062
|
+
if (new_capacity < 1) {
|
1063
|
+
rb_raise(rb_eArgError, "value must be positive");
|
1064
|
+
}
|
1065
|
+
|
1066
|
+
lock_mutex(&queue->mutex);
|
1067
|
+
if (queue->capacity && new_capacity > queue->capacity) {
|
1068
|
+
difference = new_capacity - queue->capacity;
|
1069
|
+
} else {
|
1070
|
+
difference = 0;
|
1071
|
+
}
|
1072
|
+
queue->capacity = new_capacity;
|
1073
|
+
for (; difference > 0; --difference) {
|
1074
|
+
signal_condvar(&queue->space_available);
|
1075
|
+
}
|
1076
|
+
unlock_mutex(&queue->mutex);
|
1077
|
+
|
1078
|
+
return self;
|
1079
|
+
}
|
1080
|
+
|
1081
|
+
/*
|
1082
|
+
* Document-method: push
|
1083
|
+
* call-seq: push(obj)
|
1084
|
+
*
|
1085
|
+
* Pushes +obj+ to the queue. If there is no space left in the queue, waits
|
1086
|
+
* until space becomes available.
|
1087
|
+
*
|
1088
|
+
*/
|
1089
|
+
|
1090
|
+
/*
|
1091
|
+
* Document-method: pop
|
1092
|
+
* call-seq: pop(non_block=false)
|
1093
|
+
*
|
1094
|
+
* Retrieves data from the queue and runs a waiting thread, if any.
|
1095
|
+
*
|
1096
|
+
*/
|
1097
|
+
|
1098
|
+
/* for marshalling mutexes and condvars */
|
1099
|
+
|
1100
|
+
static VALUE
|
1101
|
+
dummy_load(VALUE self, VALUE string)
|
1102
|
+
{
|
1103
|
+
return Qnil;
|
1104
|
+
}
|
1105
|
+
|
1106
|
+
static VALUE
|
1107
|
+
dummy_dump(VALUE self)
|
1108
|
+
{
|
1109
|
+
return rb_str_new2("");
|
1110
|
+
}
|
1111
|
+
|
1112
|
+
|
1113
|
+
static VALUE
|
1114
|
+
setup_classes(VALUE unused)
|
1115
|
+
{
|
1116
|
+
rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("Mutex")));
|
1117
|
+
rb_cMutex = rb_define_class("Mutex", rb_cObject);
|
1118
|
+
rb_define_alloc_func(rb_cMutex, rb_mutex_alloc);
|
1119
|
+
rb_define_method(rb_cMutex, "marshal_load", dummy_load, 1);
|
1120
|
+
rb_define_method(rb_cMutex, "marshal_dump", dummy_dump, 0);
|
1121
|
+
rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
|
1122
|
+
rb_define_method(rb_cMutex, "try_lock", rb_mutex_try_lock, 0);
|
1123
|
+
rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
|
1124
|
+
rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
|
1125
|
+
rb_define_method(rb_cMutex, "exclusive_unlock", rb_mutex_exclusive_unlock, 0);
|
1126
|
+
rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize, 0);
|
1127
|
+
|
1128
|
+
rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("ConditionVariable")));
|
1129
|
+
rb_cConditionVariable = rb_define_class("ConditionVariable", rb_cObject);
|
1130
|
+
rb_define_alloc_func(rb_cConditionVariable, rb_condvar_alloc);
|
1131
|
+
rb_define_method(rb_cConditionVariable, "marshal_load", dummy_load, 1);
|
1132
|
+
rb_define_method(rb_cConditionVariable, "marshal_dump", dummy_dump, 0);
|
1133
|
+
rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, 1);
|
1134
|
+
rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
|
1135
|
+
rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
|
1136
|
+
|
1137
|
+
rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("Queue")));
|
1138
|
+
rb_cQueue = rb_define_class("Queue", rb_cObject);
|
1139
|
+
rb_define_alloc_func(rb_cQueue, rb_queue_alloc);
|
1140
|
+
rb_define_method(rb_cQueue, "marshal_load", rb_queue_marshal_load, 1);
|
1141
|
+
rb_define_method(rb_cQueue, "marshal_dump", rb_queue_marshal_dump, 0);
|
1142
|
+
rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
|
1143
|
+
rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
|
1144
|
+
rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
|
1145
|
+
rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
|
1146
|
+
rb_define_method(rb_cQueue, "pop", rb_queue_pop, -1);
|
1147
|
+
rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
|
1148
|
+
rb_alias(rb_cQueue, rb_intern("enq"), rb_intern("push"));
|
1149
|
+
rb_alias(rb_cQueue, rb_intern("<<"), rb_intern("push"));
|
1150
|
+
rb_alias(rb_cQueue, rb_intern("deq"), rb_intern("pop"));
|
1151
|
+
rb_alias(rb_cQueue, rb_intern("shift"), rb_intern("pop"));
|
1152
|
+
rb_alias(rb_cQueue, rb_intern("size"), rb_intern("length"));
|
1153
|
+
|
1154
|
+
rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("SizedQueue")));
|
1155
|
+
rb_cSizedQueue = rb_define_class("SizedQueue", rb_cQueue);
|
1156
|
+
rb_define_method(rb_cSizedQueue, "initialize", rb_sized_queue_max_set, 1);
|
1157
|
+
rb_define_method(rb_cSizedQueue, "clear", rb_queue_clear, 0);
|
1158
|
+
rb_define_method(rb_cSizedQueue, "empty?", rb_queue_empty_p, 0);
|
1159
|
+
rb_define_method(rb_cSizedQueue, "length", rb_queue_length, 0);
|
1160
|
+
rb_define_method(rb_cSizedQueue, "num_waiting", rb_queue_num_waiting, 0);
|
1161
|
+
rb_define_method(rb_cSizedQueue, "pop", rb_queue_pop, -1);
|
1162
|
+
rb_define_method(rb_cSizedQueue, "push", rb_queue_push, 1);
|
1163
|
+
rb_define_method(rb_cSizedQueue, "max", rb_sized_queue_max, 0);
|
1164
|
+
rb_define_method(rb_cSizedQueue, "max=", rb_sized_queue_max_set, 1);
|
1165
|
+
rb_alias(rb_cSizedQueue, rb_intern("<<"), rb_intern("push"));
|
1166
|
+
rb_alias(rb_cSizedQueue, rb_intern("deq"), rb_intern("pop"));
|
1167
|
+
rb_alias(rb_cSizedQueue, rb_intern("shift"), rb_intern("pop"));
|
1168
|
+
|
1169
|
+
return Qnil;
|
1170
|
+
}
|
1171
|
+
|
1172
|
+
void
|
1173
|
+
Init_fastthread()
|
1174
|
+
{
|
1175
|
+
int saved_critical;
|
1176
|
+
|
1177
|
+
rb_require("thread");
|
1178
|
+
|
1179
|
+
private_eThreadError = rb_const_get(rb_cObject, rb_intern("ThreadError"));
|
1180
|
+
|
1181
|
+
/* ensure that classes get replaced atomically */
|
1182
|
+
saved_critical = rb_thread_critical;
|
1183
|
+
rb_thread_critical = 1;
|
1184
|
+
rb_ensure(setup_classes, Qnil, set_critical, (VALUE)saved_critical);
|
1185
|
+
}
|
1186
|
+
|