blackwinter-fastthread 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,10 @@
1
+ v1.0.5 Backwards compatibility tweak for 1.8.5
2
+
3
+ v1.0.4 Fix for JRuby, and re-enable fastthread for 1.8.6p111
4
+
5
+ v1.0.3. Make native build a noop on Rubies that have no real need for fastthread
6
+
7
+ v1.0.2. Merged stale wait entries fixes from ruby_1_8, fixed SizedQueue#enq
8
+
9
+ v1.0.1. Signed gem.
10
+
@@ -0,0 +1,9 @@
1
+ test/test_queue.rb
2
+ test/test_mutex.rb
3
+ test/test_condvar.rb
4
+ test/test_all.rb
5
+ setup.rb
6
+ Manifest
7
+ ext/fastthread/fastthread.c
8
+ ext/fastthread/extconf.rb
9
+ CHANGELOG
@@ -0,0 +1,26 @@
1
+
2
+ require 'rubygems'
3
+ gem 'echoe', '>=2.7.11'
4
+ require 'echoe'
5
+
6
+ Echoe.new("fastthread") do |p|
7
+ p.project = "mongrel"
8
+ p.author = "MenTaLguY <mental@rydia.net>"
9
+ p.email = "mental@rydia.net"
10
+ p.summary = "Optimized replacement for thread.rb primitives"
11
+ p.extensions = "ext/fastthread/extconf.rb"
12
+ p.clean_pattern = ['build/*', '**/*.o', '**/*.so', '**/*.a', 'lib/*-*', '**/*.log', "ext/fastthread/*.{bundle,so,obj,pdb,lib,def,exp}", "ext/fastthread/Makefile", "pkg", "lib/*.bundle", "*.gem", ".config"]
13
+
14
+ p.need_tar_gz = false
15
+ p.need_tgz = true
16
+ p.require_signed = false
17
+
18
+ p.eval = proc do
19
+ if Platform.windows?
20
+ self.platform = Gem::Platform::CURRENT
21
+ self.files += ['lib/fastthread.so']
22
+ task :package => [:clean, :compile]
23
+ end
24
+ end
25
+
26
+ end
@@ -0,0 +1,28 @@
1
+ version_components = RUBY_VERSION.split('.').map { |c| c.to_i }
2
+
3
+ need_fastthread = ( !defined? RUBY_ENGINE )
4
+ need_fastthread &= ( RUBY_PLATFORM != 'java' )
5
+ need_fastthread &= ( version_components[0..1] == [1, 8] && ( version_components[2] < 6 || version_components[2] == 6 && RUBY_PATCHLEVEL.to_i < 112 ) )
6
+
7
+ if need_fastthread
8
+ require 'mkmf'
9
+ create_makefile('fastthread')
10
+ else
11
+ require 'rbconfig'
12
+ File.open('Makefile', 'w') do |stream|
13
+ Config::CONFIG.each do |key, value|
14
+ stream.puts "#{key} = #{value}"
15
+ end
16
+ stream.puts
17
+ stream << <<EOS
18
+ RUBYARCHDIR = $(sitearchdir)$(target_prefix)
19
+
20
+ default:
21
+
22
+ install:
23
+ mkdir -p $(RUBYARCHDIR)
24
+ touch $(RUBYARCHDIR)/fastthread.rb
25
+
26
+ EOS
27
+ end
28
+ end
@@ -0,0 +1,1206 @@
1
+ /*
2
+ * Optimized Ruby Mutex implementation, loosely based on thread.rb by
3
+ * Yukihiro Matsumoto <matz@ruby-lang.org>
4
+ *
5
+ * Copyright 2006-2007 MenTaLguY <mental@rydia.net>
6
+ *
7
+ * RDoc taken from original.
8
+ *
9
+ * This file is made available under the same terms as Ruby.
10
+ */
11
+
12
+ #include <ruby.h>
13
+ #include <intern.h>
14
+ #include <rubysig.h>
15
+ #include <version.h>
16
+
17
+ static VALUE rb_cMutex;
18
+ static VALUE rb_cConditionVariable;
19
+ static VALUE rb_cQueue;
20
+ static VALUE rb_cSizedQueue;
21
+ /* earlier versions of ruby do not export rb_eThreadError */
22
+ static VALUE private_eThreadError;
23
+
24
+ static VALUE set_critical(VALUE value);
25
+
26
+ static VALUE
27
+ thread_exclusive(VALUE (*func)(ANYARGS), VALUE arg)
28
+ {
29
+ VALUE critical = rb_thread_critical;
30
+
31
+ rb_thread_critical = 1;
32
+ return rb_ensure(func, arg, set_critical, (VALUE)critical);
33
+ }
34
+
35
+ typedef struct _Entry {
36
+ VALUE value;
37
+ struct _Entry *next;
38
+ } Entry;
39
+
40
+ typedef struct _List {
41
+ Entry *entries;
42
+ Entry *last_entry;
43
+ Entry *entry_pool;
44
+ unsigned long size;
45
+ } List;
46
+
47
+ static void
48
+ init_list(List *list)
49
+ {
50
+ list->entries = NULL;
51
+ list->last_entry = NULL;
52
+ list->entry_pool = NULL;
53
+ list->size = 0;
54
+ }
55
+
56
+ static void
57
+ mark_list(List *list)
58
+ {
59
+ Entry *entry;
60
+ for (entry = list->entries; entry; entry = entry->next) {
61
+ rb_gc_mark(entry->value);
62
+ }
63
+ }
64
+
65
+ static void
66
+ free_entries(Entry *first)
67
+ {
68
+ Entry *next;
69
+ while (first) {
70
+ next = first->next;
71
+ xfree(first);
72
+ first = next;
73
+ }
74
+ }
75
+
76
+ static void
77
+ finalize_list(List *list)
78
+ {
79
+ free_entries(list->entries);
80
+ free_entries(list->entry_pool);
81
+ }
82
+
83
+ static void
84
+ push_list(List *list, VALUE value)
85
+ {
86
+ Entry *entry;
87
+
88
+ if (list->entry_pool) {
89
+ entry = list->entry_pool;
90
+ list->entry_pool = entry->next;
91
+ } else {
92
+ entry = ALLOC(Entry);
93
+ }
94
+
95
+ entry->value = value;
96
+ entry->next = NULL;
97
+
98
+ if (list->last_entry) {
99
+ list->last_entry->next = entry;
100
+ } else {
101
+ list->entries = entry;
102
+ }
103
+ list->last_entry = entry;
104
+
105
+ ++list->size;
106
+ }
107
+
108
+ static void
109
+ push_multiple_list(List *list, VALUE *values, unsigned count)
110
+ {
111
+ unsigned i;
112
+ for (i = 0; i < count; i++) {
113
+ push_list(list, values[i]);
114
+ }
115
+ }
116
+
117
+ static void
118
+ recycle_entries(List *list, Entry *first_entry, Entry *last_entry)
119
+ {
120
+ #ifdef USE_MEM_POOLS
121
+ last_entry->next = list->entry_pool;
122
+ list->entry_pool = first_entry;
123
+ #else
124
+ last_entry->next = NULL;
125
+ free_entries(first_entry);
126
+ #endif
127
+ }
128
+
129
+ static VALUE
130
+ shift_list(List *list)
131
+ {
132
+ Entry *entry;
133
+ VALUE value;
134
+
135
+ entry = list->entries;
136
+ if (!entry) return Qnil;
137
+
138
+ list->entries = entry->next;
139
+ if (entry == list->last_entry) {
140
+ list->last_entry = NULL;
141
+ }
142
+
143
+ --list->size;
144
+
145
+ value = entry->value;
146
+ recycle_entries(list, entry, entry);
147
+
148
+ return value;
149
+ }
150
+
151
+ static void
152
+ remove_one(List *list, VALUE value)
153
+ {
154
+ Entry **ref;
155
+ Entry *prev;
156
+ Entry *entry;
157
+
158
+ for (ref = &list->entries, prev = NULL, entry = list->entries;
159
+ entry != NULL;
160
+ ref = &entry->next, prev = entry, entry = entry->next) {
161
+ if (entry->value == value) {
162
+ *ref = entry->next;
163
+ list->size--;
164
+ if (!entry->next) {
165
+ list->last_entry = prev;
166
+ }
167
+ recycle_entries(list, entry, entry);
168
+ break;
169
+ }
170
+ }
171
+ }
172
+
173
+ static void
174
+ clear_list(List *list)
175
+ {
176
+ if (list->last_entry) {
177
+ recycle_entries(list, list->entries, list->last_entry);
178
+ list->entries = NULL;
179
+ list->last_entry = NULL;
180
+ list->size = 0;
181
+ }
182
+ }
183
+
184
+ static VALUE
185
+ array_from_list(List const *list)
186
+ {
187
+ VALUE ary;
188
+ Entry *entry;
189
+ ary = rb_ary_new();
190
+ for (entry = list->entries; entry; entry = entry->next) {
191
+ rb_ary_push(ary, entry->value);
192
+ }
193
+ return ary;
194
+ }
195
+
196
+ static VALUE return_value(VALUE value) {
197
+ return value;
198
+ }
199
+
200
+ static VALUE
201
+ wake_thread(VALUE thread)
202
+ {
203
+ #if RUBY_VERSION_MINOR == 8 && RUBY_VERSION_TEENY >= 6 && RUBY_PATCHLEVEL > 31
204
+ return rb_thread_wakeup_alive(thread);
205
+ #else
206
+ return rb_rescue2(rb_thread_wakeup, thread, return_value, Qnil, private_eThreadError, (VALUE)0);
207
+ #endif
208
+ }
209
+
210
+ static VALUE
211
+ run_thread(VALUE thread)
212
+ {
213
+ thread = wake_thread(thread);
214
+ if (RTEST(thread) && !rb_thread_critical)
215
+ rb_thread_schedule();
216
+ return thread;
217
+ }
218
+
219
+ static VALUE
220
+ wake_one(List *list)
221
+ {
222
+ VALUE waking;
223
+
224
+ waking = Qnil;
225
+ while (list->entries && !RTEST(waking)) {
226
+ waking = wake_thread(shift_list(list));
227
+ }
228
+
229
+ return waking;
230
+ }
231
+
232
+ static VALUE
233
+ wake_all(List *list)
234
+ {
235
+ while (list->entries) {
236
+ wake_one(list);
237
+ }
238
+ return Qnil;
239
+ }
240
+
241
+ static VALUE
242
+ wait_list_inner(List *list)
243
+ {
244
+ push_list(list, rb_thread_current());
245
+ rb_thread_stop();
246
+ return Qnil;
247
+ }
248
+
249
+ static VALUE
250
+ wait_list_cleanup(List *list)
251
+ {
252
+ /* cleanup in case of spurious wakeups */
253
+ remove_one(list, rb_thread_current());
254
+ return Qnil;
255
+ }
256
+
257
+ static VALUE
258
+ wait_list(List *list)
259
+ {
260
+ return rb_ensure(wait_list_inner, (VALUE)list, wait_list_cleanup, (VALUE)list);
261
+ }
262
+
263
+ static void
264
+ kill_waiting_threads(List *waiting)
265
+ {
266
+ Entry *entry;
267
+
268
+ for (entry = waiting->entries; entry; entry = entry->next) {
269
+ rb_thread_kill(entry->value);
270
+ }
271
+ }
272
+
273
+ /*
274
+ * Document-class: Mutex
275
+ *
276
+ * Mutex implements a simple semaphore that can be used to coordinate access to
277
+ * shared data from multiple concurrent threads.
278
+ *
279
+ * Example:
280
+ *
281
+ * require 'thread'
282
+ * semaphore = Mutex.new
283
+ *
284
+ * a = Thread.new {
285
+ * semaphore.synchronize {
286
+ * # access shared resource
287
+ * }
288
+ * }
289
+ *
290
+ * b = Thread.new {
291
+ * semaphore.synchronize {
292
+ * # access shared resource
293
+ * }
294
+ * }
295
+ *
296
+ */
297
+
298
+ typedef struct _Mutex {
299
+ VALUE owner;
300
+ List waiting;
301
+ } Mutex;
302
+
303
+ #if RUBY_VERSION_MAJOR == 1 && RUBY_VERSION_MINOR == 8 && RUBY_VERSION_TEENY < 6
304
+ #define MUTEX_LOCKED_P(mutex) (RTEST((mutex)->owner))
305
+ #else
306
+ #define MUTEX_LOCKED_P(mutex) (RTEST((mutex)->owner) && rb_thread_alive_p((mutex)->owner))
307
+ #endif
308
+
309
+ static void
310
+ mark_mutex(Mutex *mutex)
311
+ {
312
+ rb_gc_mark(mutex->owner);
313
+ mark_list(&mutex->waiting);
314
+ }
315
+
316
+ static void
317
+ finalize_mutex(Mutex *mutex)
318
+ {
319
+ finalize_list(&mutex->waiting);
320
+ }
321
+
322
+ static void
323
+ free_mutex(Mutex *mutex)
324
+ {
325
+ kill_waiting_threads(&mutex->waiting);
326
+ finalize_mutex(mutex);
327
+ xfree(mutex);
328
+ }
329
+
330
+ static void
331
+ init_mutex(Mutex *mutex)
332
+ {
333
+ mutex->owner = Qnil;
334
+ init_list(&mutex->waiting);
335
+ }
336
+
337
+ /*
338
+ * Document-method: new
339
+ * call-seq: Mutex.new
340
+ *
341
+ * Creates a new Mutex
342
+ *
343
+ */
344
+
345
+ static VALUE
346
+ rb_mutex_alloc(VALUE klass)
347
+ {
348
+ Mutex *mutex;
349
+ mutex = ALLOC(Mutex);
350
+ init_mutex(mutex);
351
+ return Data_Wrap_Struct(klass, mark_mutex, free_mutex, mutex);
352
+ }
353
+
354
+ /*
355
+ * Document-method: locked?
356
+ * call-seq: locked?
357
+ *
358
+ * Returns +true+ if this lock is currently held by some thread.
359
+ *
360
+ */
361
+
362
+ static VALUE
363
+ rb_mutex_locked_p(VALUE self)
364
+ {
365
+ Mutex *mutex;
366
+ Data_Get_Struct(self, Mutex, mutex);
367
+ return MUTEX_LOCKED_P(mutex) ? Qtrue : Qfalse;
368
+ }
369
+
370
+ /*
371
+ * Document-method: try_lock
372
+ * call-seq: try_lock
373
+ *
374
+ * Attempts to obtain the lock and returns immediately. Returns +true+ if the
375
+ * lock was granted.
376
+ *
377
+ */
378
+
379
+ static VALUE
380
+ rb_mutex_try_lock(VALUE self)
381
+ {
382
+ Mutex *mutex;
383
+
384
+ Data_Get_Struct(self, Mutex, mutex);
385
+
386
+ if (MUTEX_LOCKED_P(mutex))
387
+ return Qfalse;
388
+
389
+ mutex->owner = rb_thread_current();
390
+ return Qtrue;
391
+ }
392
+
393
+ /*
394
+ * Document-method: lock
395
+ * call-seq: lock
396
+ *
397
+ * Attempts to grab the lock and waits if it isn't available.
398
+ *
399
+ */
400
+
401
+ static VALUE
402
+ lock_mutex(Mutex *mutex)
403
+ {
404
+ VALUE current;
405
+ current = rb_thread_current();
406
+
407
+ rb_thread_critical = 1;
408
+
409
+ if (!MUTEX_LOCKED_P(mutex)) {
410
+ mutex->owner = current;
411
+ }
412
+ else {
413
+ do {
414
+ wait_list(&mutex->waiting);
415
+ rb_thread_critical = 1;
416
+ if (!MUTEX_LOCKED_P(mutex)) {
417
+ mutex->owner = current;
418
+ break;
419
+ }
420
+ } while (mutex->owner != current);
421
+ }
422
+
423
+ rb_thread_critical = 0;
424
+ return Qnil;
425
+ }
426
+
427
+ static VALUE
428
+ rb_mutex_lock(VALUE self)
429
+ {
430
+ Mutex *mutex;
431
+ Data_Get_Struct(self, Mutex, mutex);
432
+ lock_mutex(mutex);
433
+ return self;
434
+ }
435
+
436
+ /*
437
+ * Document-method: unlock
438
+ *
439
+ * Releases the lock. Returns +nil+ if ref wasn't locked.
440
+ *
441
+ */
442
+
443
+ static VALUE
444
+ unlock_mutex_inner(Mutex *mutex)
445
+ {
446
+ VALUE waking;
447
+
448
+ if (mutex->owner != rb_thread_current()) {
449
+ rb_raise(private_eThreadError, "not owner");
450
+ }
451
+
452
+ mutex->owner = Qnil;
453
+ waking = wake_one(&mutex->waiting);
454
+ mutex->owner = waking;
455
+
456
+ return waking;
457
+ }
458
+
459
+ static VALUE
460
+ set_critical(VALUE value)
461
+ {
462
+ rb_thread_critical = (int)value;
463
+ return Qundef;
464
+ }
465
+
466
+ static VALUE
467
+ unlock_mutex(Mutex *mutex)
468
+ {
469
+ VALUE waking = thread_exclusive(unlock_mutex_inner, (VALUE)mutex);
470
+
471
+ if (!RTEST(waking)) {
472
+ return Qfalse;
473
+ }
474
+
475
+ run_thread(waking);
476
+
477
+ return Qtrue;
478
+ }
479
+
480
+ static VALUE
481
+ rb_mutex_unlock(VALUE self)
482
+ {
483
+ Mutex *mutex;
484
+ Data_Get_Struct(self, Mutex, mutex);
485
+
486
+ if (RTEST(unlock_mutex(mutex))) {
487
+ return self;
488
+ } else {
489
+ return Qnil;
490
+ }
491
+ }
492
+
493
+ /*
494
+ * Document-method: exclusive_unlock
495
+ * call-seq: exclusive_unlock { ... }
496
+ *
497
+ * If the mutex is locked, unlocks the mutex, wakes one waiting thread, and
498
+ * yields in a critical section.
499
+ *
500
+ */
501
+
502
+ static VALUE
503
+ rb_mutex_exclusive_unlock_inner(Mutex *mutex)
504
+ {
505
+ VALUE waking;
506
+ waking = unlock_mutex_inner(mutex);
507
+ rb_yield(Qundef);
508
+ return waking;
509
+ }
510
+
511
+ static VALUE
512
+ rb_mutex_exclusive_unlock(VALUE self)
513
+ {
514
+ Mutex *mutex;
515
+ VALUE waking;
516
+ Data_Get_Struct(self, Mutex, mutex);
517
+
518
+ waking = thread_exclusive(rb_mutex_exclusive_unlock_inner, (VALUE)mutex);
519
+
520
+ if (!RTEST(waking)) {
521
+ return Qnil;
522
+ }
523
+
524
+ run_thread(waking);
525
+
526
+ return self;
527
+ }
528
+
529
+ /*
530
+ * Document-method: synchronize
531
+ * call-seq: synchronize { ... }
532
+ *
533
+ * Obtains a lock, runs the block, and releases the lock when the block
534
+ * completes. See the example under Mutex.
535
+ *
536
+ */
537
+
538
+ static VALUE
539
+ rb_mutex_synchronize(VALUE self)
540
+ {
541
+ rb_mutex_lock(self);
542
+ return rb_ensure(rb_yield, Qundef, rb_mutex_unlock, self);
543
+ }
544
+
545
+ /*
546
+ * Document-class: ConditionVariable
547
+ *
548
+ * ConditionVariable objects augment class Mutex. Using condition variables,
549
+ * it is possible to suspend while in the middle of a critical section until a
550
+ * resource becomes available.
551
+ *
552
+ * Example:
553
+ *
554
+ * require 'thread'
555
+ *
556
+ * mutex = Mutex.new
557
+ * resource = ConditionVariable.new
558
+ *
559
+ * a = Thread.new {
560
+ * mutex.synchronize {
561
+ * # Thread 'a' now needs the resource
562
+ * resource.wait(mutex)
563
+ * # 'a' can now have the resource
564
+ * }
565
+ * }
566
+ *
567
+ * b = Thread.new {
568
+ * mutex.synchronize {
569
+ * # Thread 'b' has finished using the resource
570
+ * resource.signal
571
+ * }
572
+ * }
573
+ *
574
+ */
575
+
576
+ typedef struct _ConditionVariable {
577
+ List waiting;
578
+ } ConditionVariable;
579
+
580
+ static void
581
+ mark_condvar(ConditionVariable *condvar)
582
+ {
583
+ mark_list(&condvar->waiting);
584
+ }
585
+
586
+ static void
587
+ finalize_condvar(ConditionVariable *condvar)
588
+ {
589
+ finalize_list(&condvar->waiting);
590
+ }
591
+
592
+ static void
593
+ free_condvar(ConditionVariable *condvar)
594
+ {
595
+ kill_waiting_threads(&condvar->waiting);
596
+ finalize_condvar(condvar);
597
+ xfree(condvar);
598
+ }
599
+
600
+ static void
601
+ init_condvar(ConditionVariable *condvar)
602
+ {
603
+ init_list(&condvar->waiting);
604
+ }
605
+
606
+ /*
607
+ * Document-method: new
608
+ * call-seq: ConditionVariable.new
609
+ *
610
+ * Creates a new ConditionVariable
611
+ *
612
+ */
613
+
614
+ static VALUE
615
+ rb_condvar_alloc(VALUE klass)
616
+ {
617
+ ConditionVariable *condvar;
618
+
619
+ condvar = ALLOC(ConditionVariable);
620
+ init_condvar(condvar);
621
+
622
+ return Data_Wrap_Struct(klass, mark_condvar, free_condvar, condvar);
623
+ }
624
+
625
+ /*
626
+ * Document-method: wait
627
+ * call-seq: wait
628
+ *
629
+ * Releases the lock held in +mutex+ and waits; reacquires the lock on wakeup.
630
+ *
631
+ */
632
+
633
+ static void
634
+ wait_condvar(ConditionVariable *condvar, Mutex *mutex)
635
+ {
636
+ VALUE waking;
637
+
638
+ rb_thread_critical = 1;
639
+ if (rb_thread_current() != mutex->owner) {
640
+ rb_thread_critical = 0;
641
+ rb_raise(private_eThreadError, "not owner of the synchronization mutex");
642
+ }
643
+ waking = unlock_mutex_inner(mutex);
644
+ if (RTEST(waking)) {
645
+ wake_thread(waking);
646
+ }
647
+ rb_ensure(wait_list, (VALUE)&condvar->waiting, lock_mutex, (VALUE)mutex);
648
+ }
649
+
650
+ static VALUE
651
+ legacy_exclusive_unlock(VALUE mutex)
652
+ {
653
+ return rb_funcall(mutex, rb_intern("exclusive_unlock"), 0);
654
+ }
655
+
656
+ typedef struct {
657
+ ConditionVariable *condvar;
658
+ VALUE mutex;
659
+ } legacy_wait_args;
660
+
661
+ static VALUE
662
+ legacy_wait(VALUE unused, legacy_wait_args *args)
663
+ {
664
+ wait_list(&args->condvar->waiting);
665
+ rb_funcall(args->mutex, rb_intern("lock"), 0);
666
+ return Qnil;
667
+ }
668
+
669
+ static VALUE
670
+ rb_condvar_wait(VALUE self, VALUE mutex_v)
671
+ {
672
+ ConditionVariable *condvar;
673
+ Data_Get_Struct(self, ConditionVariable, condvar);
674
+
675
+ if (CLASS_OF(mutex_v) != rb_cMutex) {
676
+ /* interoperate with legacy mutex */
677
+ legacy_wait_args args;
678
+ args.condvar = condvar;
679
+ args.mutex = mutex_v;
680
+ rb_iterate(legacy_exclusive_unlock, mutex_v, legacy_wait, (VALUE)&args);
681
+ } else {
682
+ Mutex *mutex;
683
+ Data_Get_Struct(mutex_v, Mutex, mutex);
684
+ wait_condvar(condvar, mutex);
685
+ }
686
+
687
+ return self;
688
+ }
689
+
690
+ /*
691
+ * Document-method: broadcast
692
+ * call-seq: broadcast
693
+ *
694
+ * Wakes up all threads waiting for this condition.
695
+ *
696
+ */
697
+
698
+ static VALUE
699
+ rb_condvar_broadcast(VALUE self)
700
+ {
701
+ ConditionVariable *condvar;
702
+
703
+ Data_Get_Struct(self, ConditionVariable, condvar);
704
+
705
+ thread_exclusive(wake_all, (VALUE)&condvar->waiting);
706
+ rb_thread_schedule();
707
+
708
+ return self;
709
+ }
710
+
711
+ /*
712
+ * Document-method: signal
713
+ * call-seq: signal
714
+ *
715
+ * Wakes up the first thread in line waiting for this condition.
716
+ *
717
+ */
718
+
719
+ static void
720
+ signal_condvar(ConditionVariable *condvar)
721
+ {
722
+ VALUE waking = thread_exclusive(wake_one, (VALUE)&condvar->waiting);
723
+
724
+ if (RTEST(waking)) {
725
+ run_thread(waking);
726
+ }
727
+ }
728
+
729
+ static VALUE
730
+ rb_condvar_signal(VALUE self)
731
+ {
732
+ ConditionVariable *condvar;
733
+ Data_Get_Struct(self, ConditionVariable, condvar);
734
+ signal_condvar(condvar);
735
+ return self;
736
+ }
737
+
738
+ /*
739
+ * Document-class: Queue
740
+ *
741
+ * This class provides a way to synchronize communication between threads.
742
+ *
743
+ * Example:
744
+ *
745
+ * require 'thread'
746
+ *
747
+ * queue = Queue.new
748
+ *
749
+ * producer = Thread.new do
750
+ * 5.times do |i|
751
+ * sleep rand(i) # simulate expense
752
+ * queue << i
753
+ * puts "#{i} produced"
754
+ * end
755
+ * end
756
+ *
757
+ * consumer = Thread.new do
758
+ * 5.times do |i|
759
+ * value = queue.pop
760
+ * sleep rand(i/2) # simulate expense
761
+ * puts "consumed #{value}"
762
+ * end
763
+ * end
764
+ *
765
+ * consumer.join
766
+ *
767
+ */
768
+
769
+ typedef struct _Queue {
770
+ Mutex mutex;
771
+ ConditionVariable value_available;
772
+ ConditionVariable space_available;
773
+ List values;
774
+ unsigned long capacity;
775
+ } Queue;
776
+
777
+ static void
778
+ mark_queue(Queue *queue)
779
+ {
780
+ mark_mutex(&queue->mutex);
781
+ mark_condvar(&queue->value_available);
782
+ mark_condvar(&queue->space_available);
783
+ mark_list(&queue->values);
784
+ }
785
+
786
+ static void
787
+ finalize_queue(Queue *queue)
788
+ {
789
+ finalize_mutex(&queue->mutex);
790
+ finalize_condvar(&queue->value_available);
791
+ finalize_condvar(&queue->space_available);
792
+ finalize_list(&queue->values);
793
+ }
794
+
795
+ static void
796
+ free_queue(Queue *queue)
797
+ {
798
+ kill_waiting_threads(&queue->mutex.waiting);
799
+ kill_waiting_threads(&queue->space_available.waiting);
800
+ kill_waiting_threads(&queue->value_available.waiting);
801
+ finalize_queue(queue);
802
+ xfree(queue);
803
+ }
804
+
805
+ static void
806
+ init_queue(Queue *queue)
807
+ {
808
+ init_mutex(&queue->mutex);
809
+ init_condvar(&queue->value_available);
810
+ init_condvar(&queue->space_available);
811
+ init_list(&queue->values);
812
+ queue->capacity = 0;
813
+ }
814
+
815
+ /*
816
+ * Document-method: new
817
+ * call-seq: new
818
+ *
819
+ * Creates a new queue.
820
+ *
821
+ */
822
+
823
+ static VALUE
824
+ rb_queue_alloc(VALUE klass)
825
+ {
826
+ Queue *queue;
827
+ queue = ALLOC(Queue);
828
+ init_queue(queue);
829
+ return Data_Wrap_Struct(klass, mark_queue, free_queue, queue);
830
+ }
831
+
832
+ static VALUE
833
+ rb_queue_marshal_load(VALUE self, VALUE data)
834
+ {
835
+ Queue *queue;
836
+ VALUE array;
837
+ Data_Get_Struct(self, Queue, queue);
838
+
839
+ array = rb_marshal_load(data);
840
+ if (TYPE(array) != T_ARRAY) {
841
+ rb_raise(rb_eTypeError, "expected Array of queue data");
842
+ }
843
+ if (RARRAY(array)->len < 1) {
844
+ rb_raise(rb_eArgError, "missing capacity value");
845
+ }
846
+ queue->capacity = NUM2ULONG(rb_ary_shift(array));
847
+ push_multiple_list(&queue->values, RARRAY(array)->ptr, (unsigned)RARRAY(array)->len);
848
+
849
+ return self;
850
+ }
851
+
852
+ static VALUE
853
+ rb_queue_marshal_dump(VALUE self)
854
+ {
855
+ Queue *queue;
856
+ VALUE array;
857
+ Data_Get_Struct(self, Queue, queue);
858
+
859
+ array = array_from_list(&queue->values);
860
+ rb_ary_unshift(array, ULONG2NUM(queue->capacity));
861
+ return rb_marshal_dump(array, Qnil);
862
+ }
863
+
864
+ /*
865
+ * Document-method: clear
866
+ * call-seq: clear
867
+ *
868
+ * Removes all objects from the queue.
869
+ *
870
+ */
871
+
872
+ static VALUE
873
+ rb_queue_clear(VALUE self)
874
+ {
875
+ Queue *queue;
876
+ Data_Get_Struct(self, Queue, queue);
877
+
878
+ lock_mutex(&queue->mutex);
879
+ clear_list(&queue->values);
880
+ signal_condvar(&queue->space_available);
881
+ unlock_mutex(&queue->mutex);
882
+
883
+ return self;
884
+ }
885
+
886
+ /*
887
+ * Document-method: empty?
888
+ * call-seq: empty?
889
+ *
890
+ * Returns +true+ if the queue is empty.
891
+ *
892
+ */
893
+
894
+ static VALUE
895
+ rb_queue_empty_p(VALUE self)
896
+ {
897
+ Queue *queue;
898
+ VALUE result;
899
+ Data_Get_Struct(self, Queue, queue);
900
+
901
+ lock_mutex(&queue->mutex);
902
+ result = queue->values.size == 0 ? Qtrue : Qfalse;
903
+ unlock_mutex(&queue->mutex);
904
+
905
+ return result;
906
+ }
907
+
908
+ /*
909
+ * Document-method: length
910
+ * call-seq: length
911
+ *
912
+ * Returns the length of the queue.
913
+ *
914
+ */
915
+
916
+ static VALUE
917
+ rb_queue_length(VALUE self)
918
+ {
919
+ Queue *queue;
920
+ VALUE result;
921
+ Data_Get_Struct(self, Queue, queue);
922
+
923
+ lock_mutex(&queue->mutex);
924
+ result = ULONG2NUM(queue->values.size);
925
+ unlock_mutex(&queue->mutex);
926
+
927
+ return result;
928
+ }
929
+
930
+ /*
931
+ * Document-method: num_waiting
932
+ * call-seq: num_waiting
933
+ *
934
+ * Returns the number of threads waiting on the queue.
935
+ *
936
+ */
937
+
938
+ static VALUE
939
+ rb_queue_num_waiting(VALUE self)
940
+ {
941
+ Queue *queue;
942
+ VALUE result;
943
+ Data_Get_Struct(self, Queue, queue);
944
+
945
+ lock_mutex(&queue->mutex);
946
+ result = ULONG2NUM(queue->value_available.waiting.size +
947
+ queue->space_available.waiting.size);
948
+ unlock_mutex(&queue->mutex);
949
+
950
+ return result;
951
+ }
952
+
953
+ /*
954
+ * Document-method: pop
955
+ * call_seq: pop(non_block=false)
956
+ *
957
+ * Retrieves data from the queue. If the queue is empty, the calling thread is
958
+ * suspended until data is pushed onto the queue. If +non_block+ is true, the
959
+ * thread isn't suspended, and an exception is raised.
960
+ *
961
+ */
962
+
963
+ static VALUE
964
+ rb_queue_pop(int argc, VALUE *argv, VALUE self)
965
+ {
966
+ Queue *queue;
967
+ int should_block;
968
+ VALUE result;
969
+ Data_Get_Struct(self, Queue, queue);
970
+
971
+ if (argc == 0) {
972
+ should_block = 1;
973
+ } else if (argc == 1) {
974
+ should_block = !RTEST(argv[0]);
975
+ } else {
976
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", argc);
977
+ }
978
+
979
+ lock_mutex(&queue->mutex);
980
+ if (!queue->values.entries && !should_block) {
981
+ unlock_mutex(&queue->mutex);
982
+ rb_raise(private_eThreadError, "queue empty");
983
+ }
984
+
985
+ while (!queue->values.entries) {
986
+ wait_condvar(&queue->value_available, &queue->mutex);
987
+ }
988
+
989
+ result = shift_list(&queue->values);
990
+ if (queue->capacity && queue->values.size < queue->capacity) {
991
+ signal_condvar(&queue->space_available);
992
+ }
993
+ unlock_mutex(&queue->mutex);
994
+
995
+ return result;
996
+ }
997
+
998
+ /*
999
+ * Document-method: push
1000
+ * call-seq: push(obj)
1001
+ *
1002
+ * Pushes +obj+ to the queue.
1003
+ *
1004
+ */
1005
+
1006
+ static VALUE
1007
+ rb_queue_push(VALUE self, VALUE value)
1008
+ {
1009
+ Queue *queue;
1010
+ Data_Get_Struct(self, Queue, queue);
1011
+
1012
+ lock_mutex(&queue->mutex);
1013
+ while (queue->capacity && queue->values.size >= queue->capacity) {
1014
+ wait_condvar(&queue->space_available, &queue->mutex);
1015
+ }
1016
+ push_list(&queue->values, value);
1017
+ signal_condvar(&queue->value_available);
1018
+ unlock_mutex(&queue->mutex);
1019
+
1020
+ return self;
1021
+ }
1022
+
1023
+ /*
1024
+ * Document-class: SizedQueue
1025
+ *
1026
+ * This class represents queues of specified size capacity. The push operation
1027
+ * may be blocked if the capacity is full.
1028
+ *
1029
+ * See Queue for an example of how a SizedQueue works.
1030
+ *
1031
+ */
1032
+
1033
+ /*
1034
+ * Document-method: new
1035
+ * call-seq: new
1036
+ *
1037
+ * Creates a fixed-length queue with a maximum size of +max+.
1038
+ *
1039
+ */
1040
+
1041
+ /*
1042
+ * Document-method: max
1043
+ * call-seq: max
1044
+ *
1045
+ * Returns the maximum size of the queue.
1046
+ *
1047
+ */
1048
+
1049
+ static VALUE
1050
+ rb_sized_queue_max(VALUE self)
1051
+ {
1052
+ Queue *queue;
1053
+ VALUE result;
1054
+ Data_Get_Struct(self, Queue, queue);
1055
+
1056
+ lock_mutex(&queue->mutex);
1057
+ result = ULONG2NUM(queue->capacity);
1058
+ unlock_mutex(&queue->mutex);
1059
+
1060
+ return result;
1061
+ }
1062
+
1063
+ /*
1064
+ * Document-method: max=
1065
+ * call-seq: max=(size)
1066
+ *
1067
+ * Sets the maximum size of the queue.
1068
+ *
1069
+ */
1070
+
1071
+ static VALUE
1072
+ rb_sized_queue_max_set(VALUE self, VALUE value)
1073
+ {
1074
+ Queue *queue;
1075
+ unsigned long new_capacity;
1076
+ unsigned long difference;
1077
+ Data_Get_Struct(self, Queue, queue);
1078
+
1079
+ new_capacity = NUM2ULONG(value);
1080
+
1081
+ if (new_capacity < 1) {
1082
+ rb_raise(rb_eArgError, "value must be positive");
1083
+ }
1084
+
1085
+ lock_mutex(&queue->mutex);
1086
+ if (queue->capacity && new_capacity > queue->capacity) {
1087
+ difference = new_capacity - queue->capacity;
1088
+ } else {
1089
+ difference = 0;
1090
+ }
1091
+ queue->capacity = new_capacity;
1092
+ for (; difference > 0; --difference) {
1093
+ signal_condvar(&queue->space_available);
1094
+ }
1095
+ unlock_mutex(&queue->mutex);
1096
+
1097
+ return self;
1098
+ }
1099
+
1100
+ /*
1101
+ * Document-method: push
1102
+ * call-seq: push(obj)
1103
+ *
1104
+ * Pushes +obj+ to the queue. If there is no space left in the queue, waits
1105
+ * until space becomes available.
1106
+ *
1107
+ */
1108
+
1109
+ /*
1110
+ * Document-method: pop
1111
+ * call-seq: pop(non_block=false)
1112
+ *
1113
+ * Retrieves data from the queue and runs a waiting thread, if any.
1114
+ *
1115
+ */
1116
+
1117
+ /* for marshalling mutexes and condvars */
1118
+
1119
+ static VALUE
1120
+ dummy_load(VALUE self, VALUE string)
1121
+ {
1122
+ return Qnil;
1123
+ }
1124
+
1125
+ static VALUE
1126
+ dummy_dump(VALUE self)
1127
+ {
1128
+ return rb_str_new2("");
1129
+ }
1130
+
1131
+
1132
+ static VALUE
1133
+ setup_classes(VALUE unused)
1134
+ {
1135
+ rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("Mutex")));
1136
+ rb_cMutex = rb_define_class("Mutex", rb_cObject);
1137
+ rb_define_alloc_func(rb_cMutex, rb_mutex_alloc);
1138
+ rb_define_method(rb_cMutex, "marshal_load", dummy_load, 1);
1139
+ rb_define_method(rb_cMutex, "marshal_dump", dummy_dump, 0);
1140
+ rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
1141
+ rb_define_method(rb_cMutex, "try_lock", rb_mutex_try_lock, 0);
1142
+ rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
1143
+ rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
1144
+ rb_define_method(rb_cMutex, "exclusive_unlock", rb_mutex_exclusive_unlock, 0);
1145
+ rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize, 0);
1146
+
1147
+ rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("ConditionVariable")));
1148
+ rb_cConditionVariable = rb_define_class("ConditionVariable", rb_cObject);
1149
+ rb_define_alloc_func(rb_cConditionVariable, rb_condvar_alloc);
1150
+ rb_define_method(rb_cConditionVariable, "marshal_load", dummy_load, 1);
1151
+ rb_define_method(rb_cConditionVariable, "marshal_dump", dummy_dump, 0);
1152
+ rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, 1);
1153
+ rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
1154
+ rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
1155
+
1156
+ rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("Queue")));
1157
+ rb_cQueue = rb_define_class("Queue", rb_cObject);
1158
+ rb_define_alloc_func(rb_cQueue, rb_queue_alloc);
1159
+ rb_define_method(rb_cQueue, "marshal_load", rb_queue_marshal_load, 1);
1160
+ rb_define_method(rb_cQueue, "marshal_dump", rb_queue_marshal_dump, 0);
1161
+ rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
1162
+ rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
1163
+ rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
1164
+ rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
1165
+ rb_define_method(rb_cQueue, "pop", rb_queue_pop, -1);
1166
+ rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
1167
+ rb_alias(rb_cQueue, rb_intern("enq"), rb_intern("push"));
1168
+ rb_alias(rb_cQueue, rb_intern("<<"), rb_intern("push"));
1169
+ rb_alias(rb_cQueue, rb_intern("deq"), rb_intern("pop"));
1170
+ rb_alias(rb_cQueue, rb_intern("shift"), rb_intern("pop"));
1171
+ rb_alias(rb_cQueue, rb_intern("size"), rb_intern("length"));
1172
+
1173
+ rb_mod_remove_const(rb_cObject, ID2SYM(rb_intern("SizedQueue")));
1174
+ rb_cSizedQueue = rb_define_class("SizedQueue", rb_cQueue);
1175
+ rb_define_method(rb_cSizedQueue, "initialize", rb_sized_queue_max_set, 1);
1176
+ rb_define_method(rb_cSizedQueue, "clear", rb_queue_clear, 0);
1177
+ rb_define_method(rb_cSizedQueue, "empty?", rb_queue_empty_p, 0);
1178
+ rb_define_method(rb_cSizedQueue, "length", rb_queue_length, 0);
1179
+ rb_define_method(rb_cSizedQueue, "num_waiting", rb_queue_num_waiting, 0);
1180
+ rb_define_method(rb_cSizedQueue, "pop", rb_queue_pop, -1);
1181
+ rb_define_method(rb_cSizedQueue, "push", rb_queue_push, 1);
1182
+ rb_define_method(rb_cSizedQueue, "max", rb_sized_queue_max, 0);
1183
+ rb_define_method(rb_cSizedQueue, "max=", rb_sized_queue_max_set, 1);
1184
+ rb_alias(rb_cSizedQueue, rb_intern("enq"), rb_intern("push"));
1185
+ rb_alias(rb_cSizedQueue, rb_intern("<<"), rb_intern("push"));
1186
+ rb_alias(rb_cSizedQueue, rb_intern("deq"), rb_intern("pop"));
1187
+ rb_alias(rb_cSizedQueue, rb_intern("shift"), rb_intern("pop"));
1188
+
1189
+ return Qnil;
1190
+ }
1191
+
1192
+ void
1193
+ Init_fastthread()
1194
+ {
1195
+ int saved_critical;
1196
+
1197
+ rb_require("thread");
1198
+
1199
+ private_eThreadError = rb_const_get(rb_cObject, rb_intern("ThreadError"));
1200
+
1201
+ /* ensure that classes get replaced atomically */
1202
+ saved_critical = rb_thread_critical;
1203
+ rb_thread_critical = 1;
1204
+ rb_ensure(setup_classes, Qnil, set_critical, (VALUE)saved_critical);
1205
+ }
1206
+