fastthread 0.5.3.5
Sign up to get free protection for your applications and to get access to all the features.
- data/Rakefile +58 -0
- data/ext/fastthread/extconf.rb +3 -0
- data/ext/fastthread/fastthread.c +881 -0
- data/setup.rb +1585 -0
- data/test/test_all.rb +6 -0
- data/test/test_condvar.rb +31 -0
- data/test/test_mutex.rb +71 -0
- data/test/test_queue.rb +49 -0
- data/tools/rakehelp.rb +117 -0
- metadata +53 -0
data/Rakefile
ADDED
@@ -0,0 +1,58 @@
|
|
1
|
+
require 'rake'
|
2
|
+
require 'rake/clean'
|
3
|
+
require 'rake/testtask'
|
4
|
+
require 'rake/gempackagetask'
|
5
|
+
require 'tools/rakehelp'
|
6
|
+
|
7
|
+
GEM_VERSION="0.5.3.5"
|
8
|
+
|
9
|
+
setup_extension('fastthread', 'fastthread')
|
10
|
+
|
11
|
+
desc "Compiles native extensions"
|
12
|
+
task :compile => [:fastthread]
|
13
|
+
|
14
|
+
task :default => [:compile, :test]
|
15
|
+
|
16
|
+
Rake::TestTask.new do |task|
|
17
|
+
task.libs << 'test'
|
18
|
+
task.test_files = Dir.glob( 'test/test*.rb' )
|
19
|
+
task.verbose = true
|
20
|
+
end
|
21
|
+
|
22
|
+
gemspec = Gem::Specification.new do |gemspec|
|
23
|
+
gemspec.name = "fastthread"
|
24
|
+
gemspec.version = GEM_VERSION
|
25
|
+
gemspec.author = "MenTaLguY <mental@rydia.net>"
|
26
|
+
gemspec.summary = "Optimized replacement for thread.rb primitives"
|
27
|
+
gemspec.test_file = 'test/test_all.rb'
|
28
|
+
gemspec.files = %w( Rakefile setup.rb ) +
|
29
|
+
Dir.glob( 'test/*.rb' ) +
|
30
|
+
Dir.glob( 'ext/**/*.{c,rb}' ) +
|
31
|
+
Dir.glob( 'tools/*.rb' )
|
32
|
+
|
33
|
+
gemspec.require_path = 'lib'
|
34
|
+
|
35
|
+
if RUBY_PLATFORM.match("win32")
|
36
|
+
gemspec.platform = Gem::Platform::WIN32
|
37
|
+
gemspec.files += ['lib/fastthread.so']
|
38
|
+
else
|
39
|
+
gemspec.platform = Gem::Platform::RUBY
|
40
|
+
gemspec.extensions = Dir.glob( 'ext/**/extconf.rb' )
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
task :package => [:clean, :compile, :test]
|
45
|
+
Rake::GemPackageTask.new( gemspec ) do |task|
|
46
|
+
task.gem_spec = gemspec
|
47
|
+
task.need_tar = true
|
48
|
+
end
|
49
|
+
|
50
|
+
setup_clean ["ext/fastthread/*.{bundle,so,obj,pdb,lib,def,exp}", "ext/fastthread/Makefile", "pkg", "lib/*.bundle", "*.gem", ".config"]
|
51
|
+
|
52
|
+
task :install => [:default] do
|
53
|
+
sh %{ sudo gem install pkg/fastthread-#{GEM_VERSION}.gem }
|
54
|
+
end
|
55
|
+
|
56
|
+
task :uninstall do
|
57
|
+
sh %{ sudo gem uninstall fastthread }
|
58
|
+
end
|
@@ -0,0 +1,881 @@
|
|
1
|
+
/*
|
2
|
+
* Optimized Ruby Mutex implementation, loosely based on thread.rb by
|
3
|
+
* Yukihiro Matsumoto <matz@ruby-lang.org>
|
4
|
+
*
|
5
|
+
* Copyright 2006 MenTaLguY <mental@rydia.net>
|
6
|
+
*
|
7
|
+
* This file is made available under the same terms as Ruby.
|
8
|
+
*/
|
9
|
+
|
10
|
+
#include <ruby.h>
|
11
|
+
#include <intern.h>
|
12
|
+
#include <rubysig.h>
|
13
|
+
|
14
|
+
static VALUE avoid_mem_pools;
|
15
|
+
|
16
|
+
#ifndef USE_MEM_POOLS
|
17
|
+
#define USE_MEM_POOLS !RTEST(avoid_mem_pools)
|
18
|
+
#endif
|
19
|
+
|
20
|
+
static ID mutex_ivar;
|
21
|
+
|
22
|
+
static VALUE rb_cMutex;
|
23
|
+
static VALUE rb_cConditionVariable;
|
24
|
+
/* post-1.8.5 Ruby exposes rb_eThreadError; earlier versions do not */
|
25
|
+
static VALUE private_eThreadError;
|
26
|
+
static VALUE rb_cQueue;
|
27
|
+
static VALUE rb_cSizedQueue;
|
28
|
+
|
29
|
+
static VALUE
|
30
|
+
return_value(value)
|
31
|
+
VALUE value;
|
32
|
+
{
|
33
|
+
return value;
|
34
|
+
}
|
35
|
+
|
36
|
+
typedef struct _Entry {
|
37
|
+
VALUE value;
|
38
|
+
struct _Entry *next;
|
39
|
+
} Entry;
|
40
|
+
|
41
|
+
typedef struct _List {
|
42
|
+
Entry *entries;
|
43
|
+
Entry *last_entry;
|
44
|
+
Entry *entry_pool;
|
45
|
+
unsigned long size;
|
46
|
+
} List;
|
47
|
+
|
48
|
+
static void
|
49
|
+
init_list(list)
|
50
|
+
List *list;
|
51
|
+
{
|
52
|
+
list->entries = NULL;
|
53
|
+
list->last_entry = NULL;
|
54
|
+
list->entry_pool = NULL;
|
55
|
+
list->size = 0;
|
56
|
+
}
|
57
|
+
|
58
|
+
static void
|
59
|
+
mark_list(list)
|
60
|
+
List *list;
|
61
|
+
{
|
62
|
+
Entry *entry;
|
63
|
+
for ( entry = list->entries ; entry ; entry = entry->next ) {
|
64
|
+
rb_gc_mark(entry->value);
|
65
|
+
}
|
66
|
+
}
|
67
|
+
|
68
|
+
static void
|
69
|
+
free_entries(first)
|
70
|
+
Entry *first;
|
71
|
+
{
|
72
|
+
Entry *next;
|
73
|
+
while (first) {
|
74
|
+
next = first->next;
|
75
|
+
free(first);
|
76
|
+
first = next;
|
77
|
+
}
|
78
|
+
}
|
79
|
+
|
80
|
+
static void
|
81
|
+
finalize_list(list)
|
82
|
+
List *list;
|
83
|
+
{
|
84
|
+
free_entries(list->entries);
|
85
|
+
free_entries(list->entry_pool);
|
86
|
+
}
|
87
|
+
|
88
|
+
static void
|
89
|
+
push_list(list, value)
|
90
|
+
List *list;
|
91
|
+
VALUE value;
|
92
|
+
{
|
93
|
+
Entry *entry;
|
94
|
+
|
95
|
+
if (list->entry_pool) {
|
96
|
+
entry = list->entry_pool;
|
97
|
+
list->entry_pool = entry->next;
|
98
|
+
} else {
|
99
|
+
entry = (Entry *)malloc(sizeof(Entry));
|
100
|
+
}
|
101
|
+
|
102
|
+
entry->value = value;
|
103
|
+
entry->next = NULL;
|
104
|
+
|
105
|
+
if (list->last_entry) {
|
106
|
+
list->last_entry->next = entry;
|
107
|
+
} else {
|
108
|
+
list->entries = entry;
|
109
|
+
}
|
110
|
+
list->last_entry = entry;
|
111
|
+
|
112
|
+
++list->size;
|
113
|
+
}
|
114
|
+
|
115
|
+
static void
|
116
|
+
push_multiple_list(list, values, count)
|
117
|
+
List *list;
|
118
|
+
VALUE *values;
|
119
|
+
unsigned count;
|
120
|
+
{
|
121
|
+
unsigned i;
|
122
|
+
for ( i = 0 ; i < count ; i++ ) {
|
123
|
+
push_list(list, values[i]);
|
124
|
+
}
|
125
|
+
}
|
126
|
+
|
127
|
+
static VALUE
|
128
|
+
shift_list(list)
|
129
|
+
List *list;
|
130
|
+
{
|
131
|
+
Entry *entry;
|
132
|
+
VALUE value;
|
133
|
+
|
134
|
+
entry = list->entries;
|
135
|
+
if (!entry) return Qundef;
|
136
|
+
|
137
|
+
list->entries = entry->next;
|
138
|
+
if ( entry == list->last_entry ) {
|
139
|
+
list->last_entry = NULL;
|
140
|
+
}
|
141
|
+
|
142
|
+
--list->size;
|
143
|
+
|
144
|
+
value = entry->value;
|
145
|
+
if (USE_MEM_POOLS) {
|
146
|
+
entry->next = list->entry_pool;
|
147
|
+
list->entry_pool = entry;
|
148
|
+
} else {
|
149
|
+
free(entry);
|
150
|
+
}
|
151
|
+
|
152
|
+
return value;
|
153
|
+
}
|
154
|
+
|
155
|
+
static void
|
156
|
+
clear_list(list)
|
157
|
+
List *list;
|
158
|
+
{
|
159
|
+
if (list->last_entry) {
|
160
|
+
if (USE_MEM_POOLS) {
|
161
|
+
list->last_entry->next = list->entry_pool;
|
162
|
+
list->entry_pool = list->entries;
|
163
|
+
} else {
|
164
|
+
free_entries(list->entries);
|
165
|
+
}
|
166
|
+
list->entries = NULL;
|
167
|
+
list->last_entry = NULL;
|
168
|
+
list->size = 0;
|
169
|
+
}
|
170
|
+
}
|
171
|
+
|
172
|
+
static VALUE
|
173
|
+
array_from_list(list)
|
174
|
+
List const *list;
|
175
|
+
{
|
176
|
+
VALUE ary;
|
177
|
+
Entry *entry;
|
178
|
+
ary = rb_ary_new();
|
179
|
+
for ( entry = list->entries ; entry ; entry = entry->next ) {
|
180
|
+
rb_ary_push(ary, entry->value);
|
181
|
+
}
|
182
|
+
return ary;
|
183
|
+
}
|
184
|
+
|
185
|
+
static VALUE
|
186
|
+
wake_one(list)
|
187
|
+
List *list;
|
188
|
+
{
|
189
|
+
VALUE waking;
|
190
|
+
|
191
|
+
waking = Qnil;
|
192
|
+
while ( list->entries && !RTEST(waking) ) {
|
193
|
+
waking = rb_rescue2(rb_thread_wakeup, shift_list(list),
|
194
|
+
return_value, Qnil, private_eThreadError, 0);
|
195
|
+
}
|
196
|
+
|
197
|
+
return waking;
|
198
|
+
}
|
199
|
+
|
200
|
+
static VALUE
|
201
|
+
wake_all(list)
|
202
|
+
List *list;
|
203
|
+
{
|
204
|
+
while (list->entries) {
|
205
|
+
wake_one(list);
|
206
|
+
}
|
207
|
+
return Qnil;
|
208
|
+
}
|
209
|
+
|
210
|
+
typedef struct _Mutex {
|
211
|
+
VALUE owner;
|
212
|
+
List waiting;
|
213
|
+
} Mutex;
|
214
|
+
|
215
|
+
static void
|
216
|
+
mark_mutex(mutex)
|
217
|
+
Mutex *mutex;
|
218
|
+
{
|
219
|
+
rb_gc_mark(mutex->owner);
|
220
|
+
mark_list(&mutex->waiting);
|
221
|
+
}
|
222
|
+
|
223
|
+
static void
|
224
|
+
finalize_mutex(mutex)
|
225
|
+
Mutex *mutex;
|
226
|
+
{
|
227
|
+
finalize_list(&mutex->waiting);
|
228
|
+
}
|
229
|
+
|
230
|
+
static void
|
231
|
+
free_mutex(mutex)
|
232
|
+
Mutex *mutex;
|
233
|
+
{
|
234
|
+
if (mutex->waiting.entries) {
|
235
|
+
rb_bug("mutex %p freed with thread(s) waiting", mutex);
|
236
|
+
}
|
237
|
+
finalize_mutex(mutex);
|
238
|
+
free(mutex);
|
239
|
+
}
|
240
|
+
|
241
|
+
static void
|
242
|
+
init_mutex(mutex)
|
243
|
+
Mutex *mutex;
|
244
|
+
{
|
245
|
+
mutex->owner = Qnil;
|
246
|
+
init_list(&mutex->waiting);
|
247
|
+
}
|
248
|
+
|
249
|
+
static VALUE rb_mutex_alloc _((VALUE));
|
250
|
+
|
251
|
+
static VALUE
|
252
|
+
rb_mutex_alloc(klass)
|
253
|
+
VALUE klass;
|
254
|
+
{
|
255
|
+
Mutex *mutex;
|
256
|
+
mutex = (Mutex *)malloc(sizeof(Mutex));
|
257
|
+
init_mutex(mutex);
|
258
|
+
return Data_Wrap_Struct(klass, mark_mutex, free_mutex, mutex);
|
259
|
+
}
|
260
|
+
|
261
|
+
static VALUE
|
262
|
+
rb_mutex_locked_p(self)
|
263
|
+
VALUE self;
|
264
|
+
{
|
265
|
+
Mutex *mutex;
|
266
|
+
Data_Get_Struct(self, Mutex, mutex);
|
267
|
+
return ( RTEST(mutex->owner) ? Qtrue : Qfalse );
|
268
|
+
}
|
269
|
+
|
270
|
+
static VALUE
|
271
|
+
rb_mutex_try_lock(self)
|
272
|
+
VALUE self;
|
273
|
+
{
|
274
|
+
Mutex *mutex;
|
275
|
+
VALUE result;
|
276
|
+
|
277
|
+
Data_Get_Struct(self, Mutex, mutex);
|
278
|
+
|
279
|
+
result = Qfalse;
|
280
|
+
|
281
|
+
rb_thread_critical = 1;
|
282
|
+
if (!RTEST(mutex->owner)) {
|
283
|
+
mutex->owner = rb_thread_current();
|
284
|
+
result = Qtrue;
|
285
|
+
}
|
286
|
+
rb_thread_critical = 0;
|
287
|
+
|
288
|
+
return result;
|
289
|
+
}
|
290
|
+
|
291
|
+
static void
|
292
|
+
lock_mutex(mutex)
|
293
|
+
Mutex *mutex;
|
294
|
+
{
|
295
|
+
VALUE current;
|
296
|
+
current = rb_thread_current();
|
297
|
+
|
298
|
+
rb_thread_critical = 1;
|
299
|
+
|
300
|
+
while (RTEST(mutex->owner)) {
|
301
|
+
push_list(&mutex->waiting, current);
|
302
|
+
rb_thread_stop();
|
303
|
+
|
304
|
+
rb_thread_critical = 1;
|
305
|
+
}
|
306
|
+
mutex->owner = current;
|
307
|
+
|
308
|
+
rb_thread_critical = 0;
|
309
|
+
}
|
310
|
+
|
311
|
+
static VALUE
|
312
|
+
rb_mutex_lock(self)
|
313
|
+
VALUE self;
|
314
|
+
{
|
315
|
+
Mutex *mutex;
|
316
|
+
Data_Get_Struct(self, Mutex, mutex);
|
317
|
+
lock_mutex(mutex);
|
318
|
+
return self;
|
319
|
+
}
|
320
|
+
|
321
|
+
static VALUE
|
322
|
+
unlock_mutex_inner(mutex)
|
323
|
+
Mutex *mutex;
|
324
|
+
{
|
325
|
+
VALUE waking;
|
326
|
+
|
327
|
+
if (!RTEST(mutex->owner)) {
|
328
|
+
return Qundef;
|
329
|
+
}
|
330
|
+
mutex->owner = Qnil;
|
331
|
+
waking = wake_one(&mutex->waiting);
|
332
|
+
|
333
|
+
return waking;
|
334
|
+
}
|
335
|
+
|
336
|
+
static VALUE
|
337
|
+
set_critical(value)
|
338
|
+
VALUE value;
|
339
|
+
{
|
340
|
+
rb_thread_critical = (int)value;
|
341
|
+
return Qnil;
|
342
|
+
}
|
343
|
+
|
344
|
+
static VALUE
|
345
|
+
unlock_mutex(mutex)
|
346
|
+
Mutex *mutex;
|
347
|
+
{
|
348
|
+
VALUE waking;
|
349
|
+
|
350
|
+
rb_thread_critical = 1;
|
351
|
+
waking = rb_ensure(unlock_mutex_inner, (VALUE)mutex, set_critical, 0);
|
352
|
+
|
353
|
+
if ( waking == Qundef ) {
|
354
|
+
return Qfalse;
|
355
|
+
}
|
356
|
+
|
357
|
+
if (RTEST(waking)) {
|
358
|
+
rb_rescue2(rb_thread_run, waking, return_value, Qnil, private_eThreadError, 0);
|
359
|
+
}
|
360
|
+
|
361
|
+
return Qtrue;
|
362
|
+
}
|
363
|
+
|
364
|
+
static VALUE
|
365
|
+
rb_mutex_unlock(self)
|
366
|
+
VALUE self;
|
367
|
+
{
|
368
|
+
Mutex *mutex;
|
369
|
+
Data_Get_Struct(self, Mutex, mutex);
|
370
|
+
|
371
|
+
if (RTEST(unlock_mutex(mutex))) {
|
372
|
+
return self;
|
373
|
+
} else {
|
374
|
+
return Qnil;
|
375
|
+
}
|
376
|
+
}
|
377
|
+
|
378
|
+
static VALUE
|
379
|
+
rb_mutex_exclusive_unlock_inner(mutex)
|
380
|
+
Mutex *mutex;
|
381
|
+
{
|
382
|
+
VALUE waking;
|
383
|
+
waking = unlock_mutex_inner(mutex);
|
384
|
+
rb_yield(Qundef);
|
385
|
+
return waking;
|
386
|
+
}
|
387
|
+
|
388
|
+
static VALUE
|
389
|
+
rb_mutex_exclusive_unlock(self)
|
390
|
+
VALUE self;
|
391
|
+
{
|
392
|
+
Mutex *mutex;
|
393
|
+
VALUE waking;
|
394
|
+
Data_Get_Struct(self, Mutex, mutex);
|
395
|
+
|
396
|
+
rb_thread_critical = 1;
|
397
|
+
waking = rb_ensure(rb_mutex_exclusive_unlock_inner, (VALUE)mutex, set_critical, 0);
|
398
|
+
|
399
|
+
if ( waking == Qundef ) {
|
400
|
+
return Qnil;
|
401
|
+
}
|
402
|
+
|
403
|
+
if (RTEST(waking)) {
|
404
|
+
rb_rescue2(rb_thread_run, waking, return_value, Qnil, private_eThreadError, 0);
|
405
|
+
}
|
406
|
+
|
407
|
+
return self;
|
408
|
+
}
|
409
|
+
|
410
|
+
static VALUE
|
411
|
+
rb_mutex_synchronize(self)
|
412
|
+
VALUE self;
|
413
|
+
{
|
414
|
+
rb_mutex_lock(self);
|
415
|
+
return rb_ensure(rb_yield, Qundef, rb_mutex_unlock, self);
|
416
|
+
}
|
417
|
+
|
418
|
+
typedef struct _ConditionVariable {
|
419
|
+
List waiting;
|
420
|
+
} ConditionVariable;
|
421
|
+
|
422
|
+
static void
|
423
|
+
mark_condvar(condvar)
|
424
|
+
ConditionVariable *condvar;
|
425
|
+
{
|
426
|
+
mark_list(&condvar->waiting);
|
427
|
+
}
|
428
|
+
|
429
|
+
static void
|
430
|
+
finalize_condvar(condvar)
|
431
|
+
ConditionVariable *condvar;
|
432
|
+
{
|
433
|
+
finalize_list(&condvar->waiting);
|
434
|
+
}
|
435
|
+
|
436
|
+
static void
|
437
|
+
free_condvar(condvar)
|
438
|
+
ConditionVariable *condvar;
|
439
|
+
{
|
440
|
+
finalize_condvar(condvar);
|
441
|
+
free(condvar);
|
442
|
+
}
|
443
|
+
|
444
|
+
static void
|
445
|
+
init_condvar(condvar)
|
446
|
+
ConditionVariable *condvar;
|
447
|
+
{
|
448
|
+
init_list(&condvar->waiting);
|
449
|
+
}
|
450
|
+
|
451
|
+
static VALUE rb_condvar_alloc _((VALUE));
|
452
|
+
|
453
|
+
static VALUE
|
454
|
+
rb_condvar_alloc(klass)
|
455
|
+
VALUE klass;
|
456
|
+
{
|
457
|
+
ConditionVariable *condvar;
|
458
|
+
|
459
|
+
condvar = (ConditionVariable *)malloc(sizeof(ConditionVariable));
|
460
|
+
init_condvar(condvar);
|
461
|
+
|
462
|
+
return Data_Wrap_Struct(klass, mark_condvar, free_condvar, condvar);
|
463
|
+
}
|
464
|
+
|
465
|
+
static void
|
466
|
+
wait_condvar(condvar, mutex)
|
467
|
+
ConditionVariable *condvar;
|
468
|
+
Mutex *mutex;
|
469
|
+
{
|
470
|
+
rb_thread_critical = 1;
|
471
|
+
if (!RTEST(mutex->owner)) {
|
472
|
+
rb_thread_critical = Qfalse;
|
473
|
+
return;
|
474
|
+
}
|
475
|
+
if ( mutex->owner != rb_thread_current() ) {
|
476
|
+
rb_thread_critical = Qfalse;
|
477
|
+
rb_raise(private_eThreadError, "Not owner");
|
478
|
+
}
|
479
|
+
mutex->owner = Qnil;
|
480
|
+
push_list(&condvar->waiting, rb_thread_current());
|
481
|
+
rb_thread_stop();
|
482
|
+
|
483
|
+
lock_mutex(mutex);
|
484
|
+
}
|
485
|
+
|
486
|
+
static VALUE
|
487
|
+
rb_condvar_wait(VALUE self, VALUE mutex_v)
|
488
|
+
{
|
489
|
+
ConditionVariable *condvar;
|
490
|
+
Mutex *mutex;
|
491
|
+
|
492
|
+
if ( CLASS_OF(mutex_v) != rb_cMutex ) {
|
493
|
+
rb_raise(rb_eTypeError, "Not a Mutex");
|
494
|
+
}
|
495
|
+
Data_Get_Struct(self, ConditionVariable, condvar);
|
496
|
+
Data_Get_Struct(mutex_v, Mutex, mutex);
|
497
|
+
|
498
|
+
wait_condvar(condvar, mutex);
|
499
|
+
|
500
|
+
return self;
|
501
|
+
}
|
502
|
+
|
503
|
+
static VALUE
|
504
|
+
rb_condvar_broadcast(self)
|
505
|
+
VALUE self;
|
506
|
+
{
|
507
|
+
ConditionVariable *condvar;
|
508
|
+
|
509
|
+
Data_Get_Struct(self, ConditionVariable, condvar);
|
510
|
+
|
511
|
+
rb_thread_critical = 1;
|
512
|
+
rb_ensure(wake_all, (VALUE)&condvar->waiting, set_critical, 0);
|
513
|
+
rb_thread_schedule();
|
514
|
+
|
515
|
+
return self;
|
516
|
+
}
|
517
|
+
|
518
|
+
static void
|
519
|
+
signal_condvar(condvar)
|
520
|
+
ConditionVariable *condvar;
|
521
|
+
{
|
522
|
+
VALUE waking;
|
523
|
+
rb_thread_critical = 1;
|
524
|
+
waking = rb_ensure(wake_one, (VALUE)&condvar->waiting, set_critical, 0);
|
525
|
+
if (RTEST(waking)) {
|
526
|
+
rb_rescue2(rb_thread_run, waking, return_value, Qnil, private_eThreadError, 0);
|
527
|
+
}
|
528
|
+
}
|
529
|
+
|
530
|
+
static VALUE
|
531
|
+
rb_condvar_signal(self)
|
532
|
+
VALUE self;
|
533
|
+
{
|
534
|
+
ConditionVariable *condvar;
|
535
|
+
Data_Get_Struct(self, ConditionVariable, condvar);
|
536
|
+
signal_condvar(condvar);
|
537
|
+
return self;
|
538
|
+
}
|
539
|
+
|
540
|
+
typedef struct _Queue {
|
541
|
+
Mutex mutex;
|
542
|
+
ConditionVariable value_available;
|
543
|
+
ConditionVariable space_available;
|
544
|
+
List values;
|
545
|
+
unsigned long capacity;
|
546
|
+
} Queue;
|
547
|
+
|
548
|
+
static void
|
549
|
+
mark_queue(queue)
|
550
|
+
Queue *queue;
|
551
|
+
{
|
552
|
+
mark_mutex(&queue->mutex);
|
553
|
+
mark_condvar(&queue->value_available);
|
554
|
+
mark_condvar(&queue->space_available);
|
555
|
+
mark_list(&queue->values);
|
556
|
+
}
|
557
|
+
|
558
|
+
static void
|
559
|
+
finalize_queue(queue)
|
560
|
+
Queue *queue;
|
561
|
+
{
|
562
|
+
finalize_mutex(&queue->mutex);
|
563
|
+
finalize_condvar(&queue->value_available);
|
564
|
+
finalize_condvar(&queue->space_available);
|
565
|
+
finalize_list(&queue->values);
|
566
|
+
}
|
567
|
+
|
568
|
+
static void
|
569
|
+
free_queue(queue)
|
570
|
+
Queue *queue;
|
571
|
+
{
|
572
|
+
finalize_queue(queue);
|
573
|
+
free(queue);
|
574
|
+
}
|
575
|
+
|
576
|
+
static void
|
577
|
+
init_queue(queue)
|
578
|
+
Queue *queue;
|
579
|
+
{
|
580
|
+
init_mutex(&queue->mutex);
|
581
|
+
init_condvar(&queue->value_available);
|
582
|
+
init_condvar(&queue->space_available);
|
583
|
+
init_list(&queue->values);
|
584
|
+
queue->capacity = 0;
|
585
|
+
}
|
586
|
+
|
587
|
+
static VALUE rb_queue_alloc _((VALUE));
|
588
|
+
|
589
|
+
static VALUE
|
590
|
+
rb_queue_alloc(klass)
|
591
|
+
VALUE klass;
|
592
|
+
{
|
593
|
+
Queue *queue;
|
594
|
+
queue = (Queue *)malloc(sizeof(Queue));
|
595
|
+
init_queue(queue);
|
596
|
+
return Data_Wrap_Struct(klass, mark_queue, free_queue, queue);
|
597
|
+
}
|
598
|
+
|
599
|
+
static VALUE
|
600
|
+
rb_queue_marshal_load(self, data)
|
601
|
+
VALUE self;
|
602
|
+
VALUE data;
|
603
|
+
{
|
604
|
+
Queue *queue;
|
605
|
+
VALUE array;
|
606
|
+
Data_Get_Struct(self, Queue, queue);
|
607
|
+
|
608
|
+
array = rb_marshal_load(data);
|
609
|
+
if ( TYPE(array) != T_ARRAY ) {
|
610
|
+
rb_raise(rb_eRuntimeError, "expected Array of queue data");
|
611
|
+
}
|
612
|
+
if ( RARRAY(array)->len < 1 ) {
|
613
|
+
rb_raise(rb_eRuntimeError, "missing capacity value");
|
614
|
+
}
|
615
|
+
queue->capacity = NUM2ULONG(rb_ary_shift(array));
|
616
|
+
push_multiple_list(&queue->values, RARRAY(array)->ptr, (unsigned)RARRAY(array)->len);
|
617
|
+
|
618
|
+
return self;
|
619
|
+
}
|
620
|
+
|
621
|
+
static VALUE
|
622
|
+
rb_queue_marshal_dump(self)
|
623
|
+
VALUE self;
|
624
|
+
{
|
625
|
+
Queue *queue;
|
626
|
+
VALUE array;
|
627
|
+
Data_Get_Struct(self, Queue, queue);
|
628
|
+
|
629
|
+
array = array_from_list(&queue->values);
|
630
|
+
rb_ary_unshift(array, ULONG2NUM(queue->capacity));
|
631
|
+
return rb_marshal_dump(array, Qnil);
|
632
|
+
}
|
633
|
+
|
634
|
+
static VALUE
|
635
|
+
rb_queue_clear(self)
|
636
|
+
VALUE self;
|
637
|
+
{
|
638
|
+
Queue *queue;
|
639
|
+
Data_Get_Struct(self, Queue, queue);
|
640
|
+
|
641
|
+
lock_mutex(&queue->mutex);
|
642
|
+
clear_list(&queue->values);
|
643
|
+
signal_condvar(&queue->space_available);
|
644
|
+
unlock_mutex(&queue->mutex);
|
645
|
+
|
646
|
+
return self;
|
647
|
+
}
|
648
|
+
|
649
|
+
static VALUE
|
650
|
+
rb_queue_empty_p(self)
|
651
|
+
VALUE self;
|
652
|
+
{
|
653
|
+
Queue *queue;
|
654
|
+
VALUE result;
|
655
|
+
Data_Get_Struct(self, Queue, queue);
|
656
|
+
|
657
|
+
lock_mutex(&queue->mutex);
|
658
|
+
result = ( ( queue->values.size == 0 ) ? Qtrue : Qfalse );
|
659
|
+
unlock_mutex(&queue->mutex);
|
660
|
+
|
661
|
+
return result;
|
662
|
+
}
|
663
|
+
|
664
|
+
static VALUE
|
665
|
+
rb_queue_length(self)
|
666
|
+
VALUE self;
|
667
|
+
{
|
668
|
+
Queue *queue;
|
669
|
+
VALUE result;
|
670
|
+
Data_Get_Struct(self, Queue, queue);
|
671
|
+
|
672
|
+
lock_mutex(&queue->mutex);
|
673
|
+
result = ULONG2NUM(queue->values.size);
|
674
|
+
unlock_mutex(&queue->mutex);
|
675
|
+
|
676
|
+
return result;
|
677
|
+
}
|
678
|
+
|
679
|
+
static VALUE
|
680
|
+
rb_queue_num_waiting(self)
|
681
|
+
VALUE self;
|
682
|
+
{
|
683
|
+
Queue *queue;
|
684
|
+
VALUE result;
|
685
|
+
Data_Get_Struct(self, Queue, queue);
|
686
|
+
|
687
|
+
lock_mutex(&queue->mutex);
|
688
|
+
result = ULONG2NUM(queue->value_available.waiting.size +
|
689
|
+
queue->space_available.waiting.size);
|
690
|
+
unlock_mutex(&queue->mutex);
|
691
|
+
|
692
|
+
return result;
|
693
|
+
}
|
694
|
+
|
695
|
+
static VALUE
|
696
|
+
rb_queue_pop(argc, argv, self)
|
697
|
+
int argc;
|
698
|
+
VALUE *argv;
|
699
|
+
VALUE self;
|
700
|
+
{
|
701
|
+
Queue *queue;
|
702
|
+
int should_block;
|
703
|
+
VALUE result;
|
704
|
+
Data_Get_Struct(self, Queue, queue);
|
705
|
+
|
706
|
+
if ( argc == 0 ) {
|
707
|
+
should_block = 1;
|
708
|
+
} else if ( argc == 1 ) {
|
709
|
+
should_block = !RTEST(argv[0]);
|
710
|
+
} else {
|
711
|
+
rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", argc);
|
712
|
+
}
|
713
|
+
|
714
|
+
lock_mutex(&queue->mutex);
|
715
|
+
if ( !queue->values.entries && !should_block ) {
|
716
|
+
unlock_mutex(&queue->mutex);
|
717
|
+
rb_raise(private_eThreadError, "queue empty");
|
718
|
+
}
|
719
|
+
|
720
|
+
while (!queue->values.entries) {
|
721
|
+
wait_condvar(&queue->value_available, &queue->mutex);
|
722
|
+
}
|
723
|
+
|
724
|
+
result = shift_list(&queue->values);
|
725
|
+
if ( queue->capacity && queue->values.size < queue->capacity ) {
|
726
|
+
signal_condvar(&queue->space_available);
|
727
|
+
}
|
728
|
+
unlock_mutex(&queue->mutex);
|
729
|
+
|
730
|
+
return result;
|
731
|
+
}
|
732
|
+
|
733
|
+
static VALUE
|
734
|
+
rb_queue_push(self, value)
|
735
|
+
VALUE self;
|
736
|
+
VALUE value;
|
737
|
+
{
|
738
|
+
Queue *queue;
|
739
|
+
Data_Get_Struct(self, Queue, queue);
|
740
|
+
|
741
|
+
lock_mutex(&queue->mutex);
|
742
|
+
while ( queue->capacity && queue->values.size >= queue->capacity ) {
|
743
|
+
wait_condvar(&queue->space_available, &queue->mutex);
|
744
|
+
}
|
745
|
+
push_list(&queue->values, value);
|
746
|
+
signal_condvar(&queue->value_available);
|
747
|
+
unlock_mutex(&queue->mutex);
|
748
|
+
|
749
|
+
return self;
|
750
|
+
}
|
751
|
+
|
752
|
+
static VALUE
|
753
|
+
rb_sized_queue_max(self)
|
754
|
+
VALUE self;
|
755
|
+
{
|
756
|
+
Queue *queue;
|
757
|
+
VALUE result;
|
758
|
+
Data_Get_Struct(self, Queue, queue);
|
759
|
+
|
760
|
+
lock_mutex(&queue->mutex);
|
761
|
+
result = ULONG2NUM(queue->capacity);
|
762
|
+
unlock_mutex(&queue->mutex);
|
763
|
+
|
764
|
+
return result;
|
765
|
+
}
|
766
|
+
|
767
|
+
static VALUE
|
768
|
+
rb_sized_queue_max_set(self, value)
|
769
|
+
VALUE self;
|
770
|
+
VALUE value;
|
771
|
+
{
|
772
|
+
Queue *queue;
|
773
|
+
unsigned long new_capacity;
|
774
|
+
unsigned long difference;
|
775
|
+
Data_Get_Struct(self, Queue, queue);
|
776
|
+
|
777
|
+
new_capacity = NUM2ULONG(value);
|
778
|
+
|
779
|
+
if ( new_capacity < 1 ) {
|
780
|
+
rb_raise(rb_eArgError, "value must be positive");
|
781
|
+
}
|
782
|
+
|
783
|
+
lock_mutex(&queue->mutex);
|
784
|
+
if ( queue->capacity && new_capacity > queue->capacity ) {
|
785
|
+
difference = new_capacity - queue->capacity;
|
786
|
+
} else {
|
787
|
+
difference = 0;
|
788
|
+
}
|
789
|
+
queue->capacity = new_capacity;
|
790
|
+
for ( ; difference > 0 ; --difference ) {
|
791
|
+
signal_condvar(&queue->space_available);
|
792
|
+
}
|
793
|
+
unlock_mutex(&queue->mutex);
|
794
|
+
|
795
|
+
return self;
|
796
|
+
}
|
797
|
+
|
798
|
+
/* Existing code expects to be able to serialize Mutexes... */
|
799
|
+
|
800
|
+
static VALUE
|
801
|
+
dummy_load(self, string)
|
802
|
+
VALUE self;
|
803
|
+
VALUE string;
|
804
|
+
{
|
805
|
+
return Qnil;
|
806
|
+
}
|
807
|
+
|
808
|
+
static VALUE
|
809
|
+
dummy_dump(self)
|
810
|
+
VALUE self;
|
811
|
+
{
|
812
|
+
return rb_str_new2("");
|
813
|
+
}
|
814
|
+
|
815
|
+
void
|
816
|
+
Init_fastthread()
|
817
|
+
{
|
818
|
+
avoid_mem_pools = rb_gv_get("$fastthread_avoid_mem_pools");
|
819
|
+
rb_global_variable(&avoid_mem_pools);
|
820
|
+
rb_define_variable("$fastthread_avoid_mem_pools", &avoid_mem_pools);
|
821
|
+
|
822
|
+
mutex_ivar = rb_intern("__mutex__");
|
823
|
+
|
824
|
+
if (!RTEST(rb_require("thread"))) {
|
825
|
+
rb_raise(rb_eRuntimeError, "fastthread must be required before thread");
|
826
|
+
}
|
827
|
+
|
828
|
+
private_eThreadError = rb_const_get(rb_cObject, rb_intern("ThreadError"));
|
829
|
+
|
830
|
+
rb_cMutex = rb_define_class("Mutex", rb_cObject);
|
831
|
+
rb_define_alloc_func(rb_cMutex, rb_mutex_alloc);
|
832
|
+
rb_define_method(rb_cMutex, "marshal_load", dummy_load, 1);
|
833
|
+
rb_define_method(rb_cMutex, "marshal_dump", dummy_dump, 0);
|
834
|
+
rb_define_method(rb_cMutex, "initialize", return_value, 0);
|
835
|
+
rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
|
836
|
+
rb_define_method(rb_cMutex, "try_lock", rb_mutex_try_lock, 0);
|
837
|
+
rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
|
838
|
+
rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
|
839
|
+
rb_define_method(rb_cMutex, "exclusive_unlock", rb_mutex_exclusive_unlock, 0);
|
840
|
+
rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize, 0);
|
841
|
+
|
842
|
+
rb_cConditionVariable = rb_define_class("ConditionVariable", rb_cObject);
|
843
|
+
rb_define_alloc_func(rb_cConditionVariable, rb_condvar_alloc);
|
844
|
+
rb_define_method(rb_cConditionVariable, "marshal_load", dummy_load, 1);
|
845
|
+
rb_define_method(rb_cConditionVariable, "marshal_dump", dummy_dump, 0);
|
846
|
+
rb_define_method(rb_cConditionVariable, "initialize", return_value, 0);
|
847
|
+
rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, 1);
|
848
|
+
rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
|
849
|
+
rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
|
850
|
+
|
851
|
+
rb_cQueue = rb_define_class("Queue", rb_cObject);
|
852
|
+
rb_define_alloc_func(rb_cQueue, rb_queue_alloc);
|
853
|
+
rb_define_method(rb_cQueue, "marshal_load", rb_queue_marshal_load, 1);
|
854
|
+
rb_define_method(rb_cQueue, "marshal_dump", rb_queue_marshal_dump, 0);
|
855
|
+
rb_define_method(rb_cQueue, "initialize", return_value, 0);
|
856
|
+
rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
|
857
|
+
rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
|
858
|
+
rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
|
859
|
+
rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
|
860
|
+
rb_define_method(rb_cQueue, "pop", rb_queue_pop, -1);
|
861
|
+
rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
|
862
|
+
rb_alias(rb_cQueue, rb_intern("<<"), rb_intern("push"));
|
863
|
+
rb_alias(rb_cQueue, rb_intern("deq"), rb_intern("pop"));
|
864
|
+
rb_alias(rb_cQueue, rb_intern("shift"), rb_intern("pop"));
|
865
|
+
rb_alias(rb_cQueue, rb_intern("size"), rb_intern("length"));
|
866
|
+
|
867
|
+
rb_cSizedQueue = rb_define_class("SizedQueue", rb_cQueue);
|
868
|
+
rb_define_method(rb_cSizedQueue, "initialize", rb_sized_queue_max_set, 1);
|
869
|
+
rb_define_method(rb_cSizedQueue, "clear", rb_queue_clear, 0);
|
870
|
+
rb_define_method(rb_cSizedQueue, "empty?", rb_queue_empty_p, 0);
|
871
|
+
rb_define_method(rb_cSizedQueue, "length", rb_queue_length, 0);
|
872
|
+
rb_define_method(rb_cSizedQueue, "num_waiting", rb_queue_num_waiting, 0);
|
873
|
+
rb_define_method(rb_cSizedQueue, "pop", rb_queue_pop, -1);
|
874
|
+
rb_define_method(rb_cSizedQueue, "push", rb_queue_push, 1);
|
875
|
+
rb_define_method(rb_cSizedQueue, "max", rb_sized_queue_max, 0);
|
876
|
+
rb_define_method(rb_cSizedQueue, "max=", rb_sized_queue_max_set, 1);
|
877
|
+
rb_alias(rb_cSizedQueue, rb_intern("<<"), rb_intern("push"));
|
878
|
+
rb_alias(rb_cSizedQueue, rb_intern("deq"), rb_intern("pop"));
|
879
|
+
rb_alias(rb_cSizedQueue, rb_intern("shift"), rb_intern("pop"));
|
880
|
+
}
|
881
|
+
|