ffi 1.1.3-x86-mingw32 → 1.1.4-x86-mingw32
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of ffi might be problematic. Click here for more details.
- data/Rakefile +1 -1
- data/ext/ffi_c/AbstractMemory.c +16 -17
- data/ext/ffi_c/AbstractMemory.h +1 -1
- data/ext/ffi_c/Buffer.c +2 -2
- data/ext/ffi_c/Call.c +5 -4
- data/ext/ffi_c/ClosurePool.c +1 -1
- data/ext/ffi_c/DynamicLibrary.c +2 -2
- data/ext/ffi_c/Function.c +26 -25
- data/ext/ffi_c/Function.h +1 -1
- data/ext/ffi_c/MemoryPointer.c +2 -1
- data/ext/ffi_c/MethodHandle.c +1 -1
- data/ext/ffi_c/Pointer.c +1 -1
- data/ext/ffi_c/Struct.c +5 -5
- data/ext/ffi_c/StructByValue.c +1 -1
- data/ext/ffi_c/StructLayout.c +5 -5
- data/ext/ffi_c/Thread.c +2 -2
- data/ext/ffi_c/Type.c +3 -3
- data/ext/ffi_c/Types.c +4 -2
- data/ext/ffi_c/ffi.c +1 -1
- data/lib/1.8/ffi_c.so +0 -0
- data/lib/1.9/ffi_c.so +0 -0
- data/spec/ffi/rbx/memory_pointer_spec.rb +4 -0
- metadata +4 -4
data/Rakefile
CHANGED
data/ext/ffi_c/AbstractMemory.c
CHANGED
@@ -651,25 +651,24 @@ memory_op_put_strptr(AbstractMemory* ptr, long offset, VALUE value)
|
|
651
651
|
|
652
652
|
static MemoryOp memory_op_strptr = { memory_op_get_strptr, memory_op_put_strptr };
|
653
653
|
|
654
|
-
//static MemoryOp memory_op_pointer = { memory_op_get_pointer, memory_op_put_pointer };
|
655
654
|
|
656
655
|
MemoryOps rbffi_AbstractMemoryOps = {
|
657
|
-
&memory_op_int8,
|
658
|
-
&memory_op_uint8,
|
659
|
-
&memory_op_int16,
|
660
|
-
&memory_op_uint16,
|
661
|
-
&memory_op_int32,
|
662
|
-
&memory_op_uint32,
|
663
|
-
&memory_op_int64,
|
664
|
-
&memory_op_uint64,
|
665
|
-
&memory_op_long,
|
666
|
-
&memory_op_ulong,
|
667
|
-
&memory_op_float32,
|
668
|
-
&memory_op_float64,
|
669
|
-
&memory_op_longdouble,
|
670
|
-
&memory_op_pointer,
|
671
|
-
&memory_op_strptr,
|
672
|
-
&memory_op_bool
|
656
|
+
&memory_op_int8, /*.int8 */
|
657
|
+
&memory_op_uint8, /* .uint8 */
|
658
|
+
&memory_op_int16, /* .int16 */
|
659
|
+
&memory_op_uint16, /* .uint16 */
|
660
|
+
&memory_op_int32, /* .int32 */
|
661
|
+
&memory_op_uint32, /* .uint32 */
|
662
|
+
&memory_op_int64, /* .int64 */
|
663
|
+
&memory_op_uint64, /* .uint64 */
|
664
|
+
&memory_op_long, /* .slong */
|
665
|
+
&memory_op_ulong, /* .uslong */
|
666
|
+
&memory_op_float32, /* .float32 */
|
667
|
+
&memory_op_float64, /* .float64 */
|
668
|
+
&memory_op_longdouble, /* .longdouble */
|
669
|
+
&memory_op_pointer, /* .pointer */
|
670
|
+
&memory_op_strptr, /* .strptr */
|
671
|
+
&memory_op_bool /* .boolOp */
|
673
672
|
};
|
674
673
|
|
675
674
|
void
|
data/ext/ffi_c/AbstractMemory.h
CHANGED
@@ -70,7 +70,7 @@ typedef struct {
|
|
70
70
|
} MemoryOps;
|
71
71
|
|
72
72
|
struct AbstractMemory_ {
|
73
|
-
char* address;
|
73
|
+
char* address; /* Use char* instead of void* to ensure adding to it works correctly */
|
74
74
|
long size;
|
75
75
|
int flags;
|
76
76
|
int typeSize;
|
data/ext/ffi_c/Buffer.c
CHANGED
@@ -41,7 +41,7 @@ typedef struct Buffer {
|
|
41
41
|
union {
|
42
42
|
VALUE rbParent; /* link to parent buffer */
|
43
43
|
char* storage; /* start of malloc area */
|
44
|
-
long embed[BUFFER_EMBED_MAXLEN / sizeof(long)];
|
44
|
+
long embed[BUFFER_EMBED_MAXLEN / sizeof(long)]; /* storage for tiny allocations */
|
45
45
|
} data;
|
46
46
|
} Buffer;
|
47
47
|
|
@@ -151,7 +151,7 @@ buffer_initialize_copy(VALUE self, VALUE other)
|
|
151
151
|
dst->memory.size = src->size;
|
152
152
|
dst->memory.typeSize = src->typeSize;
|
153
153
|
|
154
|
-
|
154
|
+
/* finally, copy the actual buffer contents */
|
155
155
|
memcpy(dst->memory.address, src->address, src->size);
|
156
156
|
|
157
157
|
return self;
|
data/ext/ffi_c/Call.c
CHANGED
@@ -314,8 +314,10 @@ rbffi_CallFunction(int argc, VALUE* argv, void* function, FunctionType* fnInfo)
|
|
314
314
|
if (unlikely(fnInfo->blocking)) {
|
315
315
|
BlockingCall* bc;
|
316
316
|
|
317
|
-
|
318
|
-
|
317
|
+
/*
|
318
|
+
* due to the way thread switching works on older ruby variants, we
|
319
|
+
* cannot allocate anything passed to the blocking function on the stack
|
320
|
+
*/
|
319
321
|
ffiValues = ALLOC_N(void *, fnInfo->parameterCount);
|
320
322
|
params = ALLOC_N(FFIStorage, fnInfo->parameterCount);
|
321
323
|
bc = ALLOC_N(BlockingCall, 1);
|
@@ -432,14 +434,13 @@ callback_param(VALUE proc, VALUE cbInfo)
|
|
432
434
|
return NULL ;
|
433
435
|
}
|
434
436
|
|
435
|
-
|
437
|
+
/* Handle Function pointers here */
|
436
438
|
if (rb_obj_is_kind_of(proc, rbffi_FunctionClass)) {
|
437
439
|
AbstractMemory* ptr;
|
438
440
|
Data_Get_Struct(proc, AbstractMemory, ptr);
|
439
441
|
return ptr->address;
|
440
442
|
}
|
441
443
|
|
442
|
-
//callback = rbffi_NativeCallback_ForProc(proc, cbInfo);
|
443
444
|
callback = rbffi_Function_ForProc(cbInfo, proc);
|
444
445
|
RB_GC_GUARD(callback);
|
445
446
|
|
data/ext/ffi_c/ClosurePool.c
CHANGED
@@ -206,7 +206,7 @@ rbffi_Closure_Free(Closure* closure)
|
|
206
206
|
if (closure != NULL) {
|
207
207
|
ClosurePool* pool = closure->pool;
|
208
208
|
long refcnt;
|
209
|
-
|
209
|
+
/* Just push it on the front of the free list */
|
210
210
|
closure->next = pool->list;
|
211
211
|
pool->list = closure;
|
212
212
|
refcnt = --(pool->refcnt);
|
data/ext/ffi_c/DynamicLibrary.c
CHANGED
@@ -155,7 +155,7 @@ library_dlerror(VALUE self)
|
|
155
155
|
static void
|
156
156
|
library_free(Library* library)
|
157
157
|
{
|
158
|
-
|
158
|
+
/* dlclose() on MacOS tends to segfault - avoid it */
|
159
159
|
#ifndef __APPLE__
|
160
160
|
if (library->handle != NULL) {
|
161
161
|
dl_close(library->handle);
|
@@ -270,7 +270,7 @@ rbffi_DynamicLibrary_Init(VALUE moduleFFI)
|
|
270
270
|
* Document-const: FFI::NativeLibrary
|
271
271
|
* Backward compatibility for FFI::DynamicLibrary
|
272
272
|
*/
|
273
|
-
rb_define_const(moduleFFI, "NativeLibrary", LibraryClass);
|
273
|
+
rb_define_const(moduleFFI, "NativeLibrary", LibraryClass); /* backwards compat library */
|
274
274
|
rb_define_alloc_func(LibraryClass, library_allocate);
|
275
275
|
rb_define_singleton_method(LibraryClass, "open", library_open, 2);
|
276
276
|
rb_define_singleton_method(LibraryClass, "last_error", library_dlerror, 0);
|
data/ext/ffi_c/Function.c
CHANGED
@@ -199,11 +199,11 @@ function_initialize(int argc, VALUE* argv, VALUE self)
|
|
199
199
|
|
200
200
|
nargs = rb_scan_args(argc, argv, "22", &rbReturnType, &rbParamTypes, &rbProc, &rbOptions);
|
201
201
|
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
202
|
+
/*
|
203
|
+
* Callback with block,
|
204
|
+
* e.g. Function.new(:int, [ :int ]) { |i| blah }
|
205
|
+
* or Function.new(:int, [ :int ], { :convention => :stdcall }) { |i| blah }
|
206
|
+
*/
|
207
207
|
if (rb_block_given_p()) {
|
208
208
|
if (nargs > 3) {
|
209
209
|
rb_raise(rb_eArgError, "cannot create function with both proc/address and block");
|
@@ -211,11 +211,12 @@ function_initialize(int argc, VALUE* argv, VALUE self)
|
|
211
211
|
rbOptions = rbProc;
|
212
212
|
rbProc = rb_block_proc();
|
213
213
|
} else {
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
214
|
+
/* Callback with proc, or Function with address
|
215
|
+
* e.g. Function.new(:int, [ :int ], Proc.new { |i| })
|
216
|
+
* Function.new(:int, [ :int ], Proc.new { |i| }, { :convention => :stdcall })
|
217
|
+
* Function.new(:int, [ :int ], addr)
|
218
|
+
* Function.new(:int, [ :int ], addr, { :convention => :stdcall })
|
219
|
+
*/
|
219
220
|
}
|
220
221
|
|
221
222
|
infoArgv[0] = rbReturnType;
|
@@ -381,9 +382,9 @@ function_attach(VALUE self, VALUE module, VALUE name)
|
|
381
382
|
fn->methodHandle = rbffi_MethodHandle_Alloc(fn->info, fn->base.memory.address);
|
382
383
|
}
|
383
384
|
|
384
|
-
|
385
|
-
|
386
|
-
|
385
|
+
/*
|
386
|
+
* Stash the Function in a module variable so it does not get garbage collected
|
387
|
+
*/
|
387
388
|
snprintf(var, sizeof(var), "@@%s", StringValueCStr(name));
|
388
389
|
rb_cv_set(module, var, self);
|
389
390
|
|
@@ -470,7 +471,7 @@ callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data)
|
|
470
471
|
pthread_mutex_init(&cb.async_mutex, NULL);
|
471
472
|
pthread_cond_init(&cb.async_cond, NULL);
|
472
473
|
|
473
|
-
|
474
|
+
/* Now signal the async callback thread */
|
474
475
|
pthread_mutex_lock(&async_cb_mutex);
|
475
476
|
empty = async_cb_list == NULL;
|
476
477
|
cb.next = async_cb_list;
|
@@ -478,7 +479,7 @@ callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data)
|
|
478
479
|
pthread_mutex_unlock(&async_cb_mutex);
|
479
480
|
|
480
481
|
#if !defined(HAVE_RB_THREAD_BLOCKING_REGION)
|
481
|
-
|
482
|
+
/* Only signal if the list was empty */
|
482
483
|
if (empty) {
|
483
484
|
char c;
|
484
485
|
write(async_cb_pipe[1], &c, 1);
|
@@ -487,7 +488,7 @@ callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data)
|
|
487
488
|
pthread_cond_signal(&async_cb_cond);
|
488
489
|
#endif
|
489
490
|
|
490
|
-
|
491
|
+
/* Wait for the thread executing the ruby callback to signal it is done */
|
491
492
|
pthread_mutex_lock(&cb.async_mutex);
|
492
493
|
while (!cb.done) {
|
493
494
|
pthread_cond_wait(&cb.async_cond, &cb.async_mutex);
|
@@ -502,7 +503,7 @@ callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data)
|
|
502
503
|
|
503
504
|
cb.async_event = CreateEvent(NULL, FALSE, FALSE, NULL);
|
504
505
|
|
505
|
-
|
506
|
+
/* Now signal the async callback thread */
|
506
507
|
EnterCriticalSection(&async_cb_lock);
|
507
508
|
empty = async_cb_list == NULL;
|
508
509
|
cb.next = async_cb_list;
|
@@ -510,7 +511,7 @@ callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data)
|
|
510
511
|
LeaveCriticalSection(&async_cb_lock);
|
511
512
|
|
512
513
|
#if !defined(HAVE_RB_THREAD_BLOCKING_REGION)
|
513
|
-
|
514
|
+
/* Only signal if the list was empty */
|
514
515
|
if (empty) {
|
515
516
|
char c;
|
516
517
|
write(async_cb_pipe[1], &c, 1);
|
@@ -519,7 +520,7 @@ callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data)
|
|
519
520
|
SetEvent(async_cb_cond);
|
520
521
|
#endif
|
521
522
|
|
522
|
-
|
523
|
+
/* Wait for the thread executing the ruby callback to signal it is done */
|
523
524
|
WaitForSingleObject(cb.async_event, INFINITE);
|
524
525
|
CloseHandle(cb.async_event);
|
525
526
|
#endif
|
@@ -545,7 +546,7 @@ async_cb_event(void* unused)
|
|
545
546
|
while (!w.stop) {
|
546
547
|
rb_thread_blocking_region(async_cb_wait, &w, async_cb_stop, &w);
|
547
548
|
if (w.cb != NULL) {
|
548
|
-
|
549
|
+
/* Start up a new ruby thread to run the ruby callback */
|
549
550
|
rb_thread_create(async_cb_call, w.cb);
|
550
551
|
}
|
551
552
|
}
|
@@ -574,7 +575,7 @@ async_cb_event(void* unused)
|
|
574
575
|
|
575
576
|
while (cb != NULL) {
|
576
577
|
struct gvl_callback* next = cb->next;
|
577
|
-
|
578
|
+
/* Start up a new ruby thread to run the ruby callback */
|
578
579
|
rb_thread_create(async_cb_call, cb);
|
579
580
|
cb = next;
|
580
581
|
}
|
@@ -606,7 +607,7 @@ async_cb_event(void* unused)
|
|
606
607
|
|
607
608
|
while (cb != NULL) {
|
608
609
|
struct gvl_callback* next = cb->next;
|
609
|
-
|
610
|
+
/* Start up a new ruby thread to run the ruby callback */
|
610
611
|
rb_thread_create(async_cb_call, cb);
|
611
612
|
cb = next;
|
612
613
|
}
|
@@ -696,7 +697,7 @@ async_cb_call(void *data)
|
|
696
697
|
|
697
698
|
callback_with_gvl(cb);
|
698
699
|
|
699
|
-
|
700
|
+
/* Signal the original native thread that the ruby code has completed */
|
700
701
|
#ifdef _WIN32
|
701
702
|
SetEvent(cb->async_event);
|
702
703
|
#else
|
@@ -799,7 +800,7 @@ callback_with_gvl(void* data)
|
|
799
800
|
break;
|
800
801
|
}
|
801
802
|
|
802
|
-
|
803
|
+
/* Convert the native value into a custom ruby value */
|
803
804
|
if (unlikely(cbInfo->parameterTypes[i]->nativeType == NATIVE_MAPPED)) {
|
804
805
|
VALUE values[] = { param, Qnil };
|
805
806
|
param = rb_funcall2(((MappedType *) cbInfo->parameterTypes[i])->rbConverter, id_from_native, 2, values);
|
@@ -853,7 +854,7 @@ callback_with_gvl(void* data)
|
|
853
854
|
if (TYPE(rbReturnValue) == T_DATA && rb_obj_is_kind_of(rbReturnValue, rbffi_PointerClass)) {
|
854
855
|
*((void **) retval) = ((AbstractMemory *) DATA_PTR(rbReturnValue))->address;
|
855
856
|
} else {
|
856
|
-
|
857
|
+
/* Default to returning NULL if not a value pointer object. handles nil case as well */
|
857
858
|
*((void **) retval) = NULL;
|
858
859
|
}
|
859
860
|
break;
|
data/ext/ffi_c/Function.h
CHANGED
@@ -41,7 +41,7 @@ typedef struct FunctionType_ FunctionType;
|
|
41
41
|
#include "ClosurePool.h"
|
42
42
|
|
43
43
|
struct FunctionType_ {
|
44
|
-
Type type;
|
44
|
+
Type type; /* The native type of a FunctionInfo object */
|
45
45
|
VALUE rbReturnType;
|
46
46
|
VALUE rbParameterTypes;
|
47
47
|
|
data/ext/ffi_c/MemoryPointer.c
CHANGED
@@ -144,8 +144,9 @@ memptr_mark(Pointer* ptr)
|
|
144
144
|
}
|
145
145
|
|
146
146
|
static VALUE
|
147
|
-
memptr_s_from_string(VALUE klass, VALUE
|
147
|
+
memptr_s_from_string(VALUE klass, VALUE to_str)
|
148
148
|
{
|
149
|
+
VALUE s = StringValue(to_str);
|
149
150
|
VALUE args[] = { INT2FIX(1), LONG2NUM(RSTRING_LEN(s) + 1), Qfalse };
|
150
151
|
VALUE obj = rb_class_new_instance(3, args, klass);
|
151
152
|
rb_funcall(obj, rb_intern("put_string"), 2, INT2FIX(0), s);
|
data/ext/ffi_c/MethodHandle.c
CHANGED
@@ -311,7 +311,7 @@ prep_trampoline(void* ctx, void* code, Closure* closure, char* errmsg, size_t er
|
|
311
311
|
caddr_t ptr = (caddr_t) code;
|
312
312
|
|
313
313
|
memcpy(ptr, &ffi_trampoline, trampoline_size());
|
314
|
-
|
314
|
+
/* Patch the context and function addresses into the stub code */
|
315
315
|
*(intptr_t *)(ptr + trampoline_ctx_offset) = (intptr_t) closure;
|
316
316
|
*(intptr_t *)(ptr + trampoline_func_offset) = (intptr_t) custom_trampoline;
|
317
317
|
|
data/ext/ffi_c/Pointer.c
CHANGED
@@ -181,7 +181,7 @@ ptr_initialize_copy(VALUE self, VALUE other)
|
|
181
181
|
dst->memory.size = src->size;
|
182
182
|
dst->memory.typeSize = src->typeSize;
|
183
183
|
|
184
|
-
|
184
|
+
/* finally, copy the actual memory contents */
|
185
185
|
memcpy(dst->memory.address, src->address, src->size);
|
186
186
|
|
187
187
|
return self;
|
data/ext/ffi_c/Struct.c
CHANGED
@@ -139,11 +139,11 @@ struct_initialize_copy(VALUE self, VALUE other)
|
|
139
139
|
dst->rbLayout = src->rbLayout;
|
140
140
|
dst->layout = src->layout;
|
141
141
|
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
142
|
+
/*
|
143
|
+
* A new MemoryPointer instance is allocated here instead of just calling
|
144
|
+
* #dup on rbPointer, since the Pointer may not know its length, or may
|
145
|
+
* be longer than just this struct.
|
146
|
+
*/
|
147
147
|
if (src->pointer->address != NULL) {
|
148
148
|
dst->rbPointer = rbffi_MemoryPointer_NewInstance(1, src->layout->size, false);
|
149
149
|
dst->pointer = MEMORY(dst->rbPointer);
|
data/ext/ffi_c/StructByValue.c
CHANGED
@@ -87,7 +87,7 @@ sbv_initialize(VALUE self, VALUE rbStructClass)
|
|
87
87
|
sbv->rbStructClass = rbStructClass;
|
88
88
|
sbv->rbStructLayout = rbLayout;
|
89
89
|
|
90
|
-
|
90
|
+
/* We can just use everything from the ffi_type directly */
|
91
91
|
*sbv->base.ffiType = *layout->base.ffiType;
|
92
92
|
|
93
93
|
return self;
|
data/ext/ffi_c/StructLayout.c
CHANGED
@@ -281,7 +281,7 @@ array_field_put(VALUE self, VALUE pointer, VALUE value)
|
|
281
281
|
rb_raise(rb_eIndexError, "array too large");
|
282
282
|
}
|
283
283
|
|
284
|
-
|
284
|
+
/* clear the contents in case of a short write */
|
285
285
|
checkWrite(memory);
|
286
286
|
checkBounds(memory, f->offset, f->type->ffiType->size);
|
287
287
|
if (count < array->length) {
|
@@ -289,7 +289,7 @@ array_field_put(VALUE self, VALUE pointer, VALUE value)
|
|
289
289
|
0, (array->length - count) * array->componentType->ffiType->size);
|
290
290
|
}
|
291
291
|
|
292
|
-
|
292
|
+
/* now copy each element in */
|
293
293
|
if ((op = get_memory_op(array->componentType)) != NULL) {
|
294
294
|
|
295
295
|
for (i = 0; i < count; ++i) {
|
@@ -357,7 +357,7 @@ struct_layout_initialize(VALUE self, VALUE fields, VALUE size, VALUE align)
|
|
357
357
|
layout->fieldCount = (int) RARRAY_LEN(fields);
|
358
358
|
layout->rbFieldMap = rb_hash_new();
|
359
359
|
layout->rbFieldNames = rb_ary_new2(layout->fieldCount);
|
360
|
-
layout->size = FFI_ALIGN(NUM2INT(size), NUM2INT(align));
|
360
|
+
layout->size = (int) FFI_ALIGN(NUM2INT(size), NUM2INT(align));
|
361
361
|
layout->align = NUM2INT(align);
|
362
362
|
layout->fields = xcalloc(layout->fieldCount, sizeof(StructField *));
|
363
363
|
layout->ffiTypes = xcalloc(layout->fieldCount + 1, sizeof(ffi_type *));
|
@@ -423,7 +423,7 @@ struct_layout_union_bang(VALUE self)
|
|
423
423
|
|
424
424
|
for (i = 0; alignment_types[i] != NULL; ++i) {
|
425
425
|
if (alignment_types[i]->alignment == layout->align) {
|
426
|
-
t = alignment_types[i];
|
426
|
+
t = (ffi_type *) alignment_types[i];
|
427
427
|
break;
|
428
428
|
}
|
429
429
|
}
|
@@ -432,7 +432,7 @@ struct_layout_union_bang(VALUE self)
|
|
432
432
|
return Qnil;
|
433
433
|
}
|
434
434
|
|
435
|
-
count = layout->size / t->size;
|
435
|
+
count = (int) layout->size / t->size;
|
436
436
|
xfree(layout->ffiTypes);
|
437
437
|
layout->ffiTypes = xcalloc(count + 1, sizeof(ffi_type *));
|
438
438
|
layout->base.ffiType->elements = layout->ffiTypes;
|
data/ext/ffi_c/Thread.c
CHANGED
@@ -76,7 +76,7 @@ rbffi_thread_has_gvl_p(void)
|
|
76
76
|
return rbffi_active_thread.valid && pthread_equal(rbffi_active_thread.id, pthread_self());
|
77
77
|
#endif
|
78
78
|
}
|
79
|
-
#endif
|
79
|
+
#endif /* HAVE_RUBY_THREAD_HAS_GVL_P */
|
80
80
|
|
81
81
|
#ifndef HAVE_RB_THREAD_BLOCKING_REGION
|
82
82
|
|
@@ -322,5 +322,5 @@ rbffi_thread_blocking_region(VALUE (*func)(void *), void *data1, void (*ubf)(voi
|
|
322
322
|
|
323
323
|
#endif /* !_WIN32 */
|
324
324
|
|
325
|
-
#endif
|
325
|
+
#endif /* HAVE_RB_THREAD_BLOCKING_REGION */
|
326
326
|
|
data/ext/ffi_c/Type.c
CHANGED
@@ -193,7 +193,7 @@ rbffi_type_size(VALUE type)
|
|
193
193
|
}
|
194
194
|
}
|
195
195
|
|
196
|
-
|
196
|
+
/* Not found - call up to the ruby version to resolve */
|
197
197
|
return NUM2INT(rb_funcall2(rbffi_FFIModule, id_type_size, 1, &type));
|
198
198
|
|
199
199
|
} else {
|
@@ -321,14 +321,14 @@ rbffi_Type_Init(VALUE moduleFFI)
|
|
321
321
|
rb_define_method(rbffi_TypeClass, "alignment", type_alignment, 0);
|
322
322
|
rb_define_method(rbffi_TypeClass, "inspect", type_inspect, 0);
|
323
323
|
|
324
|
-
|
324
|
+
/* Make Type::Builtin non-allocatable */
|
325
325
|
rb_undef_method(CLASS_OF(classBuiltinType), "new");
|
326
326
|
rb_define_method(classBuiltinType, "inspect", builtin_type_inspect, 0);
|
327
327
|
|
328
328
|
rb_global_variable(&rbffi_TypeClass);
|
329
329
|
rb_global_variable(&classBuiltinType);
|
330
330
|
|
331
|
-
|
331
|
+
/* Define all the builtin types */
|
332
332
|
#define T(x, ffiType) do { \
|
333
333
|
VALUE t = Qnil; \
|
334
334
|
rb_define_const(rbffi_TypeClass, #x, t = builtin_type_new(classBuiltinType, NATIVE_##x, ffiType, #x)); \
|
data/ext/ffi_c/Types.c
CHANGED
@@ -98,8 +98,10 @@ rbffi_NativeValue_ToRuby(Type* type, VALUE rbType, const void* ptr)
|
|
98
98
|
}
|
99
99
|
|
100
100
|
case NATIVE_MAPPED: {
|
101
|
-
|
102
|
-
|
101
|
+
/*
|
102
|
+
* For mapped types, first convert to the real native type, then upcall to
|
103
|
+
* ruby to convert to the expected return type
|
104
|
+
*/
|
103
105
|
MappedType* m = (MappedType *) type;
|
104
106
|
VALUE values[2], rbReturnValue;
|
105
107
|
|
data/ext/ffi_c/ffi.c
CHANGED
@@ -59,7 +59,7 @@ Init_ffi_c(void) {
|
|
59
59
|
rb_global_variable(&moduleFFI);
|
60
60
|
|
61
61
|
|
62
|
-
|
62
|
+
/* FFI::Type needs to be initialized before most other classes */
|
63
63
|
rbffi_Type_Init(moduleFFI);
|
64
64
|
|
65
65
|
rbffi_DataConverter_Init(moduleFFI);
|
data/lib/1.8/ffi_c.so
CHANGED
Binary file
|
data/lib/1.9/ffi_c.so
CHANGED
Binary file
|
@@ -16,6 +16,10 @@ describe "MemoryPointer" do
|
|
16
16
|
m.type_size.should eq 1
|
17
17
|
end
|
18
18
|
|
19
|
+
it "does not make a pointer from non-strings" do
|
20
|
+
expect { FFI::MemoryPointer.from_string(nil) }.to raise_error(TypeError)
|
21
|
+
end
|
22
|
+
|
19
23
|
it "makes a pointer from a string with multibyte characters" do
|
20
24
|
m = FFI::MemoryPointer.from_string("ぱんだ")
|
21
25
|
m.total.should eq 10
|
metadata
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ffi
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
hash:
|
4
|
+
hash: 27
|
5
5
|
prerelease:
|
6
6
|
segments:
|
7
7
|
- 1
|
8
8
|
- 1
|
9
|
-
-
|
10
|
-
version: 1.1.
|
9
|
+
- 4
|
10
|
+
version: 1.1.4
|
11
11
|
platform: x86-mingw32
|
12
12
|
authors:
|
13
13
|
- Wayne Meissner
|
@@ -15,7 +15,7 @@ autorequire:
|
|
15
15
|
bindir: bin
|
16
16
|
cert_chain: []
|
17
17
|
|
18
|
-
date: 2012-
|
18
|
+
date: 2012-08-04 00:00:00 +10:00
|
19
19
|
default_executable:
|
20
20
|
dependencies: []
|
21
21
|
|