ffi 1.0.2-x86-mingw32 → 1.0.3-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of ffi might be problematic. Click here for more details.

@@ -1,3 +1,6 @@
1
+ == 1.0.3 / 2010-12-28
2
+ * Re-add support for ruby 1.8.x
3
+
1
4
  == 1.0.0 / 2010-11-30
2
5
  * Major improvements
3
6
  * Better handling of non-ruby thread callbacks
data/Rakefile CHANGED
@@ -75,7 +75,7 @@ PROJ.name = 'ffi'
75
75
  PROJ.authors = 'Wayne Meissner'
76
76
  PROJ.email = 'wmeissner@gmail.com'
77
77
  PROJ.url = 'http://wiki.github.com/ffi/ffi'
78
- PROJ.version = '1.0.2'
78
+ PROJ.version = '1.0.3'
79
79
  PROJ.rubyforge.name = 'ffi'
80
80
  PROJ.readme_file = 'README.rdoc'
81
81
 
@@ -90,11 +90,11 @@ PROJ.ann.email[:server] = 'smtp.gmail.com'
90
90
  PROJ.gem.need_tar = false
91
91
  PROJ.gem.files = %w(History.txt LICENSE README.rdoc Rakefile) + Dir.glob("{ext,gen,lib,spec,tasks}/**/*")
92
92
  PROJ.gem.platform = Gem::Platform::RUBY
93
- PROJ.gem.required_ruby_version = ">= 1.9.2"
93
+ #PROJ.gem.required_ruby_version = ">= 1.9.2"
94
94
 
95
95
  # Override Mr. Bones autogenerated extensions and force ours in
96
96
  PROJ.gem.extras['extensions'] = %w(ext/ffi_c/extconf.rb gen/Rakefile)
97
- PROJ.gem.extras['required_ruby_version'] = ">= 1.9.2"
97
+ #PROJ.gem.extras['required_ruby_version'] = ">= 1.9.2"
98
98
 
99
99
  # RDoc
100
100
  PROJ.rdoc.exclude << '^ext\/'
@@ -116,23 +116,23 @@ TEST_DEPS = [ LIBTEST ]
116
116
  if RUBY_PLATFORM == "java"
117
117
  desc "Run all specs"
118
118
  task :specs => TEST_DEPS do
119
- sh %{#{Gem.ruby} -S spec #{Dir["spec/ffi/*_spec.rb"].join(" ")} -fs --color}
119
+ sh %{#{Gem.ruby} -S rspec #{Dir["spec/ffi/*_spec.rb"].join(" ")} -fs --color}
120
120
  end
121
121
  desc "Run rubinius specs"
122
122
  task :rbxspecs => TEST_DEPS do
123
- sh %{#{Gem.ruby} -S spec #{Dir["spec/ffi/rbx/*_spec.rb"].join(" ")} -fs --color}
123
+ sh %{#{Gem.ruby} -S rspec #{Dir["spec/ffi/rbx/*_spec.rb"].join(" ")} -fs --color}
124
124
  end
125
125
  else
126
126
  TEST_DEPS.unshift :compile
127
127
  desc "Run all specs"
128
128
  task :specs => TEST_DEPS do
129
129
  ENV["MRI_FFI"] = "1"
130
- sh %{#{Gem.ruby} -Ilib -I#{BUILD_EXT_DIR} -S spec #{Dir["spec/ffi/*_spec.rb"].join(" ")} -fs --color}
130
+ sh %{#{Gem.ruby} -Ilib -I#{BUILD_EXT_DIR} -S rspec #{Dir["spec/ffi/*_spec.rb"].join(" ")} -fs --color}
131
131
  end
132
132
  desc "Run rubinius specs"
133
133
  task :rbxspecs => TEST_DEPS do
134
134
  ENV["MRI_FFI"] = "1"
135
- sh %{#{Gem.ruby} -Ilib -I#{BUILD_EXT_DIR} -S spec #{Dir["spec/ffi/rbx/*_spec.rb"].join(" ")} -fs --color}
135
+ sh %{#{Gem.ruby} -Ilib -I#{BUILD_EXT_DIR} -S rspec #{Dir["spec/ffi/rbx/*_spec.rb"].join(" ")} -fs --color}
136
136
  end
137
137
  end
138
138
 
@@ -133,10 +133,10 @@ memory_get_array_of_##name(VALUE self, VALUE offset, VALUE length) \
133
133
  long count = NUM2LONG(length); \
134
134
  long off = NUM2LONG(offset); \
135
135
  AbstractMemory* memory = MEMORY(self); \
136
+ VALUE retVal = rb_ary_new2(count); \
136
137
  long i; \
137
138
  checkRead(memory); \
138
139
  checkBounds(memory, off, count * sizeof(type)); \
139
- VALUE retVal = rb_ary_new2(count); \
140
140
  for (i = 0; i < count; ++i) { \
141
141
  type tmp; \
142
142
  memcpy(&tmp, memory->address + off + (i * sizeof(type)), sizeof(tmp)); \
@@ -165,7 +165,6 @@ SWAPU16(uint16_t x)
165
165
  return bswap16(x);
166
166
  }
167
167
 
168
- #define SWAP16(x) (x)
169
168
  #if __GNUC__ < 4
170
169
  #define bswap32(x) \
171
170
  (((x << 24) & 0xff000000) | \
@@ -208,10 +207,10 @@ SWAPU64(uint64_t x)
208
207
  }
209
208
 
210
209
  #else
211
- # define SWAPU32(x) __builtin_bswap32(x)
212
- # define SWAPS32(x) __builtin_bswap32(x)
213
- # define SWAPS64(x) __builtin_bswap64(x)
214
- # define SWAPU64(x) __builtin_bswap64(x)
210
+ # define SWAPS32(x) ((int32_t) __builtin_bswap32(x))
211
+ # define SWAPU32(x) ((uint32_t) __builtin_bswap32(x))
212
+ # define SWAPS64(x) ((int64_t) __builtin_bswap64(x))
213
+ # define SWAPU64(x) ((uint64_t) __builtin_bswap64(x))
215
214
  #endif
216
215
 
217
216
  #if LONG_MAX > INT_MAX
@@ -43,6 +43,7 @@
43
43
  #include "LastError.h"
44
44
  #include "Call.h"
45
45
  #include "MappedType.h"
46
+ #include "Thread.h"
46
47
 
47
48
  #ifdef USE_RAW
48
49
  # ifndef __i386__
@@ -239,13 +240,13 @@ rbffi_SetupCallParams(int argc, VALUE* argv, int paramCount, Type** paramTypes,
239
240
  }
240
241
 
241
242
 
242
- #if defined(HAVE_NATIVETHREAD) && defined(HAVE_RB_THREAD_BLOCKING_REGION)
243
-
244
243
  typedef struct BlockingCall_ {
245
244
  void* function;
246
245
  FunctionType* info;
247
246
  void **ffiValues;
248
- FFIStorage* retval;
247
+ void* retval;
248
+ void* stkretval;
249
+ void* params;
249
250
  } BlockingCall;
250
251
 
251
252
  static VALUE
@@ -257,7 +258,28 @@ call_blocking_function(void* data)
257
258
 
258
259
  return Qnil;
259
260
  }
260
- #endif
261
+
262
+ static VALUE
263
+ do_blocking_call(void *data)
264
+ {
265
+ rbffi_thread_blocking_region(call_blocking_function, data, (void *) -1, NULL);
266
+
267
+ return Qnil;
268
+ }
269
+
270
+ static VALUE
271
+ cleanup_blocking_call(void *data)
272
+ {
273
+ BlockingCall* bc = (BlockingCall *) data;
274
+
275
+ memcpy(bc->stkretval, bc->retval, MAX(bc->info->ffi_cif.rtype->size, FFI_SIZEOF_ARG));
276
+ xfree(bc->params);
277
+ xfree(bc->ffiValues);
278
+ xfree(bc->retval);
279
+ xfree(bc);
280
+
281
+ return Qnil;
282
+ }
261
283
 
262
284
  VALUE
263
285
  rbffi_CallFunction(int argc, VALUE* argv, void* function, FunctionType* fnInfo)
@@ -265,38 +287,61 @@ rbffi_CallFunction(int argc, VALUE* argv, void* function, FunctionType* fnInfo)
265
287
  void* retval;
266
288
  void** ffiValues;
267
289
  FFIStorage* params;
290
+ VALUE rbReturnValue;
268
291
 
269
- ffiValues = ALLOCA_N(void *, fnInfo->parameterCount);
270
- params = ALLOCA_N(FFIStorage, fnInfo->parameterCount);
271
- retval = alloca(MAX(fnInfo->ffi_cif.rtype->size, FFI_SIZEOF_ARG));
272
-
273
- rbffi_SetupCallParams(argc, argv,
274
- fnInfo->parameterCount, fnInfo->parameterTypes, params, ffiValues,
275
- fnInfo->callbackParameters, fnInfo->callbackCount, fnInfo->rbEnums);
292
+ #if !defined(HAVE_RUBY_THREAD_HAS_GVL_P)
293
+ rbffi_thread_t oldThread;
294
+ #endif
276
295
 
277
- #if defined(HAVE_NATIVETHREAD) && defined(HAVE_RB_THREAD_BLOCKING_REGION)
296
+ retval = alloca(MAX(fnInfo->ffi_cif.rtype->size, FFI_SIZEOF_ARG));
297
+
278
298
  if (unlikely(fnInfo->blocking)) {
279
- BlockingCall bc;
299
+ BlockingCall* bc;
300
+
301
+ // due to the way thread switching works on older ruby variants, we
302
+ // cannot allocate anything passed to the blocking function on the stack
303
+ ffiValues = ALLOC_N(void *, fnInfo->parameterCount);
304
+ params = ALLOC_N(FFIStorage, fnInfo->parameterCount);
305
+ bc = ALLOC_N(BlockingCall, 1);
306
+ bc->info = fnInfo;
307
+ bc->function = function;
308
+ bc->ffiValues = ffiValues;
309
+ bc->params = params;
310
+ bc->retval = xmalloc(MAX(fnInfo->ffi_cif.rtype->size, FFI_SIZEOF_ARG));
311
+ bc->stkretval = retval;
312
+
313
+ rbffi_SetupCallParams(argc, argv,
314
+ fnInfo->parameterCount, fnInfo->parameterTypes, params, ffiValues,
315
+ fnInfo->callbackParameters, fnInfo->callbackCount, fnInfo->rbEnums);
316
+
317
+ rb_ensure(do_blocking_call, (VALUE) bc, cleanup_blocking_call, (VALUE) bc);
318
+
319
+ } else {
280
320
 
281
- bc.info = fnInfo;
282
- bc.function = function;
283
- bc.ffiValues = ffiValues;
284
- bc.retval = retval;
321
+ ffiValues = ALLOCA_N(void *, fnInfo->parameterCount);
322
+ params = ALLOCA_N(FFIStorage, fnInfo->parameterCount);
285
323
 
286
- rb_thread_blocking_region(call_blocking_function, &bc, (void *) -1, NULL);
287
- } else {
324
+ rbffi_SetupCallParams(argc, argv,
325
+ fnInfo->parameterCount, fnInfo->parameterTypes, params, ffiValues,
326
+ fnInfo->callbackParameters, fnInfo->callbackCount, fnInfo->rbEnums);
327
+
328
+ #if !defined(HAVE_RUBY_THREAD_HAS_GVL_P)
329
+ oldThread = rbffi_active_thread;
330
+ rbffi_active_thread = rbffi_thread_self();
331
+ #endif
332
+ retval = alloca(MAX(fnInfo->ffi_cif.rtype->size, FFI_SIZEOF_ARG));
288
333
  ffi_call(&fnInfo->ffi_cif, FFI_FN(function), retval, ffiValues);
289
- }
290
- #else
291
- ffi_call(&fnInfo->ffi_cif, FFI_FN(function), retval, ffiValues);
334
+
335
+ #if !defined(HAVE_RUBY_THREAD_HAS_GVL_P)
336
+ rbffi_active_thread = oldThread;
292
337
  #endif
338
+ }
293
339
 
294
340
  if (unlikely(!fnInfo->ignoreErrno)) {
295
341
  rbffi_save_errno();
296
342
  }
297
343
 
298
- return rbffi_NativeValue_ToRuby(fnInfo->returnType, fnInfo->rbReturnType, retval,
299
- fnInfo->rbEnums);
344
+ return rbffi_NativeValue_ToRuby(fnInfo->returnType, fnInfo->rbReturnType, retval);
300
345
  }
301
346
 
302
347
  static inline void*
@@ -1,28 +1,20 @@
1
1
  /*
2
- * Copyright (c) 2009, Wayne Meissner
2
+ * Copyright (c) 2009, 2010 Wayne Meissner
3
3
  * All rights reserved.
4
4
  *
5
- * Redistribution and use in source and binary forms, with or without
6
- * modification, are permitted provided that the following conditions are met:
5
+ * This file is part of ruby-ffi.
7
6
  *
8
- * * Redistributions of source code must retain the above copyright notice, this
9
- * list of conditions and the following disclaimer.
10
- * * Redistributions in binary form must reproduce the above copyright notice
11
- * this list of conditions and the following disclaimer in the documentation
12
- * and/or other materials provided with the distribution.
13
- * * The name of the author or authors may not be used to endorse or promote
14
- * products derived from this software without specific prior written permission.
7
+ * This code is free software: you can redistribute it and/or modify it under
8
+ * the terms of the GNU Lesser General Public License version 3 only, as
9
+ * published by the Free Software Foundation.
15
10
  *
16
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19
- * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11
+ * This code is distributed in the hope that it will be useful, but WITHOUT
12
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
14
+ * version 3 for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public License
17
+ * version 3 along with this work. If not, see <http://www.gnu.org/licenses/>.
26
18
  */
27
19
 
28
20
  #include <sys/param.h>
@@ -40,9 +32,6 @@
40
32
  #endif
41
33
  #include <errno.h>
42
34
  #include <ruby.h>
43
- #if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) && !defined(__WIN32__)
44
- # include <pthread.h>
45
- #endif
46
35
 
47
36
  #include <ffi.h>
48
37
  #include "rbffi.h"
@@ -57,10 +46,6 @@
57
46
  #include "ClosurePool.h"
58
47
 
59
48
 
60
- #if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) && !defined(__WIN32__)
61
- # define USE_PTHREAD_LOCAL
62
- #endif
63
-
64
49
  #ifndef roundup
65
50
  # define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
66
51
  #endif
@@ -78,22 +63,11 @@ struct ClosurePool_ {
78
63
  void* ctx;
79
64
  int closureSize;
80
65
  bool (*prep)(void* ctx, void *code, Closure* closure, char* errbuf, size_t errbufsize);
81
- #if defined (HAVE_NATIVETHREAD) && !defined(_WIN32)
82
- pthread_mutex_t mutex;
83
- #endif
84
66
  struct Memory* blocks; /* Keeps track of all the allocated memory for this pool */
85
67
  Closure* list;
86
68
  long refcnt;
87
69
  };
88
70
 
89
- #if defined(HAVE_NATIVETHREAD) && !defined(_WIN32)
90
- # define pool_lock(p) pthread_mutex_lock(&(p)->mutex)
91
- # define pool_unlock(p) pthread_mutex_unlock(&(p)->mutex)
92
- #else
93
- # define pool_lock(p)
94
- # define pool_unlock(p)
95
- #endif
96
-
97
71
  static int pageSize;
98
72
 
99
73
  static void* allocatePage(void);
@@ -113,10 +87,6 @@ rbffi_ClosurePool_New(int closureSize,
113
87
  pool->prep = prep;
114
88
  pool->refcnt = 1;
115
89
 
116
- #if defined(HAVE_NATIVETHREAD) && !defined(_WIN32)
117
- pthread_mutex_init(&pool->mutex, NULL);
118
- #endif
119
-
120
90
  return pool;
121
91
  }
122
92
 
@@ -140,10 +110,7 @@ rbffi_ClosurePool_Free(ClosurePool* pool)
140
110
  {
141
111
  if (pool != NULL) {
142
112
  int refcnt;
143
- pool_lock(pool);
144
113
  refcnt = --(pool->refcnt);
145
- pool_unlock(pool);
146
-
147
114
  if (refcnt == 0) {
148
115
  cleanup_closure_pool(pool);
149
116
  }
@@ -160,13 +127,11 @@ rbffi_Closure_Alloc(ClosurePool* pool)
160
127
  int nclosures, trampolineSize;
161
128
  int i;
162
129
 
163
- pool_lock(pool);
164
130
  if (pool->list != NULL) {
165
131
  Closure* closure = pool->list;
166
132
  pool->list = pool->list->next;
167
133
  pool->refcnt++;
168
- pool_unlock(pool);
169
-
134
+
170
135
  return closure;
171
136
  }
172
137
 
@@ -177,7 +142,6 @@ rbffi_Closure_Alloc(ClosurePool* pool)
177
142
  code = allocatePage();
178
143
 
179
144
  if (block == NULL || list == NULL || code == NULL) {
180
- pool_unlock(pool);
181
145
  snprintf(errmsg, sizeof(errmsg), "failed to allocate a page. errno=%d (%s)", errno, strerror(errno));
182
146
  goto error;
183
147
  }
@@ -208,13 +172,10 @@ rbffi_Closure_Alloc(ClosurePool* pool)
208
172
  pool->list = list->next;
209
173
  pool->refcnt++;
210
174
 
211
- pool_unlock(pool);
212
-
213
175
  /* Use the first one as the new handle */
214
176
  return list;
215
177
 
216
178
  error:
217
- pool_unlock(pool);
218
179
  free(block);
219
180
  free(list);
220
181
  if (code != NULL) {
@@ -232,13 +193,10 @@ rbffi_Closure_Free(Closure* closure)
232
193
  if (closure != NULL) {
233
194
  ClosurePool* pool = closure->pool;
234
195
  int refcnt;
235
- pool_lock(pool);
236
196
  // Just push it on the front of the free list
237
197
  closure->next = pool->list;
238
198
  pool->list = closure;
239
199
  refcnt = --(pool->refcnt);
240
- pool_unlock(pool);
241
-
242
200
  if (refcnt == 0) {
243
201
  cleanup_closure_pool(pool);
244
202
  }
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2009, Wayne Meissner
2
+ * Copyright (c) 2009, 2010 Wayne Meissner
3
3
 
4
4
  * All rights reserved.
5
5
  *
@@ -35,6 +35,7 @@
35
35
  #if defined(HAVE_NATIVETHREAD) && !defined(_WIN32)
36
36
  #include <pthread.h>
37
37
  #endif
38
+ #include <fcntl.h>
38
39
 
39
40
  #include "rbffi.h"
40
41
  #include "compat.h"
@@ -49,6 +50,7 @@
49
50
  #include "ClosurePool.h"
50
51
  #include "Function.h"
51
52
  #include "MappedType.h"
53
+ #include "Thread.h"
52
54
 
53
55
  typedef struct Function_ {
54
56
  AbstractMemory memory;
@@ -67,12 +69,10 @@ static void callback_invoke(ffi_cif* cif, void* retval, void** parameters, void*
67
69
  static bool callback_prep(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize);
68
70
  static void* callback_with_gvl(void* data);
69
71
 
70
- #if defined(HAVE_NATIVETHREAD) && defined(HAVE_RB_THREAD_BLOCKING_REGION)
71
- # define DEFER_ASYNC_CALLBACK 1
72
- #endif
72
+ #define DEFER_ASYNC_CALLBACK 1
73
73
 
74
74
  #if defined(DEFER_ASYNC_CALLBACK)
75
- static VALUE async_cb_event(void);
75
+ static VALUE async_cb_event(void *);
76
76
  static VALUE async_cb_call(void *);
77
77
  #endif
78
78
 
@@ -96,7 +96,7 @@ struct gvl_callback {
96
96
  Closure* closure;
97
97
  void* retval;
98
98
  void** parameters;
99
-
99
+ bool done;
100
100
  #if defined(DEFER_ASYNC_CALLBACK)
101
101
  struct gvl_callback* next;
102
102
  # ifndef _WIN32
@@ -112,8 +112,11 @@ struct gvl_callback {
112
112
  #if defined(DEFER_ASYNC_CALLBACK)
113
113
  static struct gvl_callback* async_cb_list = NULL;
114
114
  # ifndef _WIN32
115
- static pthread_mutex_t async_cb_mutex = PTHREAD_MUTEX_INITIALIZER;
116
- static pthread_cond_t async_cb_cond = PTHREAD_COND_INITIALIZER;
115
+ static pthread_mutex_t async_cb_mutex = PTHREAD_MUTEX_INITIALIZER;
116
+ static pthread_cond_t async_cb_cond = PTHREAD_COND_INITIALIZER;
117
+ # if !defined(HAVE_RB_THREAD_BLOCKING_REGION)
118
+ static int async_cb_pipe[2];
119
+ # endif
117
120
  # else
118
121
  static HANDLE async_cb_cond;
119
122
  static CRITICAL_SECTION async_cb_lock;
@@ -265,9 +268,15 @@ function_init(VALUE self, VALUE rbFunctionInfo, VALUE rbProc)
265
268
  }
266
269
 
267
270
  #if defined(DEFER_ASYNC_CALLBACK)
271
+ # if !defined(HAVE_RB_THREAD_BLOCKING_REGION)
272
+ pipe(async_cb_pipe);
273
+ fcntl(async_cb_pipe[0], F_SETFL, fcntl(async_cb_pipe[0], F_GETFL) | O_NONBLOCK);
274
+ fcntl(async_cb_pipe[1], F_SETFL, fcntl(async_cb_pipe[1], F_GETFL) | O_NONBLOCK);
275
+ # endif
268
276
  if (async_cb_thread == Qnil) {
269
277
  async_cb_thread = rb_thread_create(async_cb_event, NULL);
270
278
  }
279
+
271
280
  #endif
272
281
 
273
282
  fn->closure = rbffi_Closure_Alloc(fn->info->closurePool);
@@ -380,12 +389,9 @@ callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data)
380
389
  cb.closure = (Closure *) user_data;
381
390
  cb.retval = retval;
382
391
  cb.parameters = parameters;
392
+ cb.done = false;
383
393
 
384
- #ifdef HAVE_RUBY_THREAD_HAS_GVL_P
385
- if (ruby_thread_has_gvl_p()) {
386
- #else
387
- if (1) {
388
- #endif
394
+ if (rbffi_thread_has_gvl_p()) {
389
395
  callback_with_gvl(&cb);
390
396
 
391
397
  #if defined(HAVE_RUBY_NATIVE_THREAD_P) && defined (HAVE_RB_THREAD_CALL_WITH_GVL)
@@ -394,21 +400,38 @@ callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data)
394
400
  #endif
395
401
  #if defined(DEFER_ASYNC_CALLBACK) && !defined(_WIN32)
396
402
  } else {
403
+ bool empty = false;
404
+
397
405
  pthread_mutex_init(&cb.async_mutex, NULL);
398
406
  pthread_cond_init(&cb.async_cond, NULL);
399
407
 
400
- pthread_mutex_lock(&cb.async_mutex);
401
-
402
408
  // Now signal the async callback thread
403
409
  pthread_mutex_lock(&async_cb_mutex);
410
+ empty = async_cb_list == NULL;
404
411
  cb.next = async_cb_list;
405
412
  async_cb_list = &cb;
406
- pthread_cond_signal(&async_cb_cond);
407
413
  pthread_mutex_unlock(&async_cb_mutex);
408
414
 
415
+ #if !defined(HAVE_RB_THREAD_BLOCKING_REGION)
416
+ // Only signal if the list was empty
417
+ if (empty) {
418
+ char c;
419
+ write(async_cb_pipe[1], &c, 1);
420
+ }
421
+ #else
422
+ pthread_cond_signal(&async_cb_cond);
423
+ #endif
424
+
409
425
  // Wait for the thread executing the ruby callback to signal it is done
410
- pthread_cond_wait(&cb.async_cond, &cb.async_mutex);
411
- #elif defined(DEFER_ASYNC_CALLBACK) && !defined(_WIN32)
426
+ pthread_mutex_lock(&cb.async_mutex);
427
+ while (!cb.done) {
428
+ pthread_cond_wait(&cb.async_cond, &cb.async_mutex);
429
+ }
430
+ pthread_mutex_unlock(&cb.async_mutex);
431
+ pthread_cond_destroy(&cb.async_cond);
432
+ pthread_mutex_destroy(&cb.async_mutex);
433
+
434
+ #elif defined(DEFER_ASYNC_CALLBACK) && defined(_WIN32)
412
435
  } else {
413
436
  cb.async_event = CreateEvent(NULL, FALSE, FALSE, NULL);
414
437
 
@@ -436,8 +459,9 @@ struct async_wait {
436
459
  static VALUE async_cb_wait(void *);
437
460
  static void async_cb_stop(void *);
438
461
 
462
+ #if defined(HAVE_RB_THREAD_BLOCKING_REGION)
439
463
  static VALUE
440
- async_cb_event(void)
464
+ async_cb_event(void* unused)
441
465
  {
442
466
  struct async_wait w = { 0 };
443
467
 
@@ -453,6 +477,40 @@ async_cb_event(void)
453
477
  return Qnil;
454
478
  }
455
479
 
480
+ #else
481
+ static VALUE
482
+ async_cb_event(void* unused)
483
+ {
484
+ while (true) {
485
+ struct gvl_callback* cb;
486
+ char buf[64];
487
+
488
+ if (read(async_cb_pipe[0], buf, sizeof(buf)) < 0) {
489
+ rb_thread_wait_fd(async_cb_pipe[0]);
490
+ while (read(async_cb_pipe[0], buf, sizeof (buf)) < 0) {
491
+ if (rb_io_wait_readable(async_cb_pipe[0]) != Qtrue) {
492
+ return Qfalse;
493
+ }
494
+ }
495
+ }
496
+
497
+ pthread_mutex_lock(&async_cb_mutex);
498
+ cb = async_cb_list;
499
+ async_cb_list = NULL;
500
+ pthread_mutex_unlock(&async_cb_mutex);
501
+
502
+ while (cb != NULL) {
503
+ struct gvl_callback* next = cb->next;
504
+ // Start up a new ruby thread to run the ruby callback
505
+ rb_thread_create(async_cb_call, cb);
506
+ cb = next;
507
+ }
508
+ }
509
+
510
+ return Qnil;
511
+ }
512
+ #endif
513
+
456
514
  #ifdef _WIN32
457
515
  static VALUE
458
516
  async_cb_wait(void *data)
@@ -538,6 +596,7 @@ async_cb_call(void *data)
538
596
  SetEvent(cb->async_event);
539
597
  #else
540
598
  pthread_mutex_lock(&cb->async_mutex);
599
+ cb->done = true;
541
600
  pthread_cond_signal(&cb->async_cond);
542
601
  pthread_mutex_unlock(&cb->async_mutex);
543
602
  #endif
@@ -624,7 +683,7 @@ callback_with_gvl(void* data)
624
683
  case NATIVE_FUNCTION:
625
684
  case NATIVE_CALLBACK:
626
685
  case NATIVE_STRUCT:
627
- param = rbffi_NativeValue_ToRuby(paramType, rbParamType, parameters[i], Qnil);
686
+ param = rbffi_NativeValue_ToRuby(paramType, rbParamType, parameters[i]);
628
687
  break;
629
688
 
630
689
  default: