grpc 0.15.0-x64-mingw32 → 1.0.0.pre1-x64-mingw32
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/etc/roots.pem +784 -509
- data/grpc_c.32.ruby +0 -0
- data/grpc_c.64.ruby +0 -0
- data/src/ruby/ext/grpc/rb_byte_buffer.c +4 -1
- data/src/ruby/ext/grpc/rb_call.c +87 -54
- data/src/ruby/ext/grpc/rb_call.h +1 -1
- data/src/ruby/ext/grpc/rb_call_credentials.c +1 -30
- data/src/ruby/ext/grpc/rb_channel.c +25 -50
- data/src/ruby/ext/grpc/rb_channel_credentials.c +1 -31
- data/src/ruby/ext/grpc/rb_completion_queue.c +15 -134
- data/src/ruby/ext/grpc/rb_completion_queue.h +3 -7
- data/src/ruby/ext/grpc/rb_grpc.c +2 -4
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +2 -0
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +4 -1
- data/src/ruby/ext/grpc/rb_server.c +81 -133
- data/src/ruby/ext/grpc/rb_server_credentials.c +4 -33
- data/src/ruby/lib/grpc/2.0/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/2.1/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/2.2/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/2.3/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/generic/active_call.rb +40 -55
- data/src/ruby/lib/grpc/generic/bidi_call.rb +21 -23
- data/src/ruby/lib/grpc/generic/client_stub.rb +20 -15
- data/src/ruby/lib/grpc/generic/rpc_server.rb +15 -37
- data/src/ruby/lib/grpc/generic/service.rb +1 -1
- data/src/ruby/lib/grpc/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/pb/test/client.rb +25 -7
- data/src/ruby/pb/test/server.rb +7 -5
- data/src/ruby/spec/call_spec.rb +1 -2
- data/src/ruby/spec/channel_spec.rb +2 -3
- data/src/ruby/spec/client_server_spec.rb +74 -59
- data/src/ruby/spec/generic/active_call_spec.rb +66 -86
- data/src/ruby/spec/generic/client_stub_spec.rb +27 -48
- data/src/ruby/spec/generic/rpc_server_spec.rb +4 -34
- data/src/ruby/spec/pb/health/checker_spec.rb +0 -2
- data/src/ruby/spec/server_spec.rb +20 -24
- metadata +4 -6
- data/src/ruby/spec/completion_queue_spec.rb +0 -42
@@ -126,36 +126,6 @@ VALUE grpc_rb_wrap_channel_credentials(grpc_channel_credentials *c, VALUE mark)
|
|
126
126
|
return rb_wrapper;
|
127
127
|
}
|
128
128
|
|
129
|
-
/* Clones ChannelCredentials instances.
|
130
|
-
Gives ChannelCredentials a consistent implementation of Ruby's object copy/dup
|
131
|
-
protocol. */
|
132
|
-
static VALUE grpc_rb_channel_credentials_init_copy(VALUE copy, VALUE orig) {
|
133
|
-
grpc_rb_channel_credentials *orig_cred = NULL;
|
134
|
-
grpc_rb_channel_credentials *copy_cred = NULL;
|
135
|
-
|
136
|
-
if (copy == orig) {
|
137
|
-
return copy;
|
138
|
-
}
|
139
|
-
|
140
|
-
/* Raise an error if orig is not a credentials object or a subclass. */
|
141
|
-
if (TYPE(orig) != T_DATA ||
|
142
|
-
RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_channel_credentials_free) {
|
143
|
-
rb_raise(rb_eTypeError, "not a %s",
|
144
|
-
rb_obj_classname(grpc_rb_cChannelCredentials));
|
145
|
-
}
|
146
|
-
|
147
|
-
TypedData_Get_Struct(orig, grpc_rb_channel_credentials,
|
148
|
-
&grpc_rb_channel_credentials_data_type, orig_cred);
|
149
|
-
TypedData_Get_Struct(copy, grpc_rb_channel_credentials,
|
150
|
-
&grpc_rb_channel_credentials_data_type, copy_cred);
|
151
|
-
|
152
|
-
/* use ruby's MEMCPY to make a byte-for-byte copy of the credentials
|
153
|
-
* wrapper object. */
|
154
|
-
MEMCPY(copy_cred, orig_cred, grpc_rb_channel_credentials, 1);
|
155
|
-
return copy;
|
156
|
-
}
|
157
|
-
|
158
|
-
|
159
129
|
/* The attribute used on the mark object to hold the pem_root_certs. */
|
160
130
|
static ID id_pem_root_certs;
|
161
131
|
|
@@ -271,7 +241,7 @@ void Init_grpc_channel_credentials() {
|
|
271
241
|
rb_define_method(grpc_rb_cChannelCredentials, "initialize",
|
272
242
|
grpc_rb_channel_credentials_init, -1);
|
273
243
|
rb_define_method(grpc_rb_cChannelCredentials, "initialize_copy",
|
274
|
-
|
244
|
+
grpc_rb_cannot_init_copy, 1);
|
275
245
|
rb_define_method(grpc_rb_cChannelCredentials, "compose",
|
276
246
|
grpc_rb_channel_credentials_compose, -1);
|
277
247
|
rb_define_module_function(grpc_rb_cChannelCredentials,
|
@@ -40,12 +40,9 @@
|
|
40
40
|
|
41
41
|
#include <grpc/grpc.h>
|
42
42
|
#include <grpc/support/time.h>
|
43
|
+
#include <grpc/support/log.h>
|
43
44
|
#include "rb_grpc.h"
|
44
45
|
|
45
|
-
/* grpc_rb_cCompletionQueue is the ruby class that proxies
|
46
|
-
* grpc_completion_queue. */
|
47
|
-
static VALUE grpc_rb_cCompletionQueue = Qnil;
|
48
|
-
|
49
46
|
/* Used to allow grpc_completion_queue_next call to release the GIL */
|
50
47
|
typedef struct next_call_stack {
|
51
48
|
grpc_completion_queue *cq;
|
@@ -55,23 +52,6 @@ typedef struct next_call_stack {
|
|
55
52
|
volatile int interrupted;
|
56
53
|
} next_call_stack;
|
57
54
|
|
58
|
-
/* Calls grpc_completion_queue_next without holding the ruby GIL */
|
59
|
-
static void *grpc_rb_completion_queue_next_no_gil(void *param) {
|
60
|
-
next_call_stack *const next_call = (next_call_stack*)param;
|
61
|
-
gpr_timespec increment = gpr_time_from_millis(20, GPR_TIMESPAN);
|
62
|
-
gpr_timespec deadline;
|
63
|
-
do {
|
64
|
-
deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), increment);
|
65
|
-
next_call->event = grpc_completion_queue_next(next_call->cq,
|
66
|
-
deadline, NULL);
|
67
|
-
if (next_call->event.type != GRPC_QUEUE_TIMEOUT ||
|
68
|
-
gpr_time_cmp(deadline, next_call->timeout) > 0) {
|
69
|
-
break;
|
70
|
-
}
|
71
|
-
} while (!next_call->interrupted);
|
72
|
-
return NULL;
|
73
|
-
}
|
74
|
-
|
75
55
|
/* Calls grpc_completion_queue_pluck without holding the ruby GIL */
|
76
56
|
static void *grpc_rb_completion_queue_pluck_no_gil(void *param) {
|
77
57
|
next_call_stack *const next_call = (next_call_stack*)param;
|
@@ -90,107 +70,32 @@ static void *grpc_rb_completion_queue_pluck_no_gil(void *param) {
|
|
90
70
|
return NULL;
|
91
71
|
}
|
92
72
|
|
93
|
-
/* Shuts down and drains the completion queue if necessary.
|
94
|
-
*
|
95
|
-
* This is done when the ruby completion queue object is about to be GCed.
|
96
|
-
*/
|
97
|
-
static void grpc_rb_completion_queue_shutdown_drain(grpc_completion_queue *cq) {
|
98
|
-
next_call_stack next_call;
|
99
|
-
grpc_completion_type type;
|
100
|
-
int drained = 0;
|
101
|
-
MEMZERO(&next_call, next_call_stack, 1);
|
102
|
-
|
103
|
-
grpc_completion_queue_shutdown(cq);
|
104
|
-
next_call.cq = cq;
|
105
|
-
next_call.event.type = GRPC_QUEUE_TIMEOUT;
|
106
|
-
/* TODO: the timeout should be a module level constant that defaults
|
107
|
-
* to gpr_inf_future(GPR_CLOCK_REALTIME).
|
108
|
-
*
|
109
|
-
* - at the moment this does not work, it stalls. Using a small timeout like
|
110
|
-
* this one works, and leads to fast test run times; a longer timeout was
|
111
|
-
* causing unnecessary delays in the test runs.
|
112
|
-
*
|
113
|
-
* - investigate further, this is probably another example of C-level cleanup
|
114
|
-
* not working consistently in all cases.
|
115
|
-
*/
|
116
|
-
next_call.timeout = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
|
117
|
-
gpr_time_from_micros(5e3, GPR_TIMESPAN));
|
118
|
-
do {
|
119
|
-
rb_thread_call_without_gvl(grpc_rb_completion_queue_next_no_gil,
|
120
|
-
(void *)&next_call, NULL, NULL);
|
121
|
-
type = next_call.event.type;
|
122
|
-
if (type == GRPC_QUEUE_TIMEOUT) break;
|
123
|
-
if (type != GRPC_QUEUE_SHUTDOWN) {
|
124
|
-
++drained;
|
125
|
-
rb_warning("completion queue shutdown: %d undrained events", drained);
|
126
|
-
}
|
127
|
-
} while (type != GRPC_QUEUE_SHUTDOWN);
|
128
|
-
}
|
129
|
-
|
130
73
|
/* Helper function to free a completion queue. */
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
cq
|
137
|
-
grpc_rb_completion_queue_shutdown_drain(cq);
|
74
|
+
void grpc_rb_completion_queue_destroy(grpc_completion_queue *cq) {
|
75
|
+
/* Every function that adds an event to a queue also synchronously plucks
|
76
|
+
that event from the queue, and holds a reference to the Ruby object that
|
77
|
+
holds the queue, so we only get to this point if all of those functions
|
78
|
+
have completed, and the queue is empty */
|
79
|
+
grpc_completion_queue_shutdown(cq);
|
138
80
|
grpc_completion_queue_destroy(cq);
|
139
81
|
}
|
140
82
|
|
141
|
-
static rb_data_type_t grpc_rb_completion_queue_data_type = {
|
142
|
-
"grpc_completion_queue",
|
143
|
-
{GRPC_RB_GC_NOT_MARKED, grpc_rb_completion_queue_destroy,
|
144
|
-
GRPC_RB_MEMSIZE_UNAVAILABLE, {NULL, NULL}},
|
145
|
-
NULL, NULL,
|
146
|
-
#ifdef RUBY_TYPED_FREE_IMMEDIATELY
|
147
|
-
/* cannot immediately free because grpc_rb_completion_queue_shutdown_drain
|
148
|
-
* calls rb_thread_call_without_gvl. */
|
149
|
-
0,
|
150
|
-
#endif
|
151
|
-
};
|
152
|
-
|
153
|
-
/* Releases the c-level resources associated with a completion queue */
|
154
|
-
static VALUE grpc_rb_completion_queue_close(VALUE self) {
|
155
|
-
grpc_completion_queue* cq = grpc_rb_get_wrapped_completion_queue(self);
|
156
|
-
grpc_rb_completion_queue_destroy(cq);
|
157
|
-
RTYPEDDATA_DATA(self) = NULL;
|
158
|
-
return Qnil;
|
159
|
-
}
|
160
|
-
|
161
|
-
/* Allocates a completion queue. */
|
162
|
-
static VALUE grpc_rb_completion_queue_alloc(VALUE cls) {
|
163
|
-
grpc_completion_queue *cq = grpc_completion_queue_create(NULL);
|
164
|
-
if (cq == NULL) {
|
165
|
-
rb_raise(rb_eArgError, "could not create a completion queue: not sure why");
|
166
|
-
}
|
167
|
-
return TypedData_Wrap_Struct(cls, &grpc_rb_completion_queue_data_type, cq);
|
168
|
-
}
|
169
|
-
|
170
83
|
static void unblock_func(void *param) {
|
171
84
|
next_call_stack *const next_call = (next_call_stack*)param;
|
172
85
|
next_call->interrupted = 1;
|
173
86
|
}
|
174
87
|
|
175
|
-
/*
|
176
|
-
|
177
|
-
grpc_event
|
178
|
-
|
88
|
+
/* Does the same thing as grpc_completion_queue_pluck, while properly releasing
|
89
|
+
the GVL and handling interrupts */
|
90
|
+
grpc_event rb_completion_queue_pluck(grpc_completion_queue *queue, void *tag,
|
91
|
+
gpr_timespec deadline, void *reserved) {
|
179
92
|
next_call_stack next_call;
|
180
93
|
MEMZERO(&next_call, next_call_stack, 1);
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
next_call.timeout = gpr_inf_future(GPR_CLOCK_REALTIME);
|
185
|
-
} else {
|
186
|
-
next_call.timeout = grpc_rb_time_timeval(timeout, /* absolute time*/ 0);
|
187
|
-
}
|
188
|
-
if (TYPE(tag) == T_NIL) {
|
189
|
-
next_call.tag = NULL;
|
190
|
-
} else {
|
191
|
-
next_call.tag = ROBJECT(tag);
|
192
|
-
}
|
94
|
+
next_call.cq = queue;
|
95
|
+
next_call.timeout = deadline;
|
96
|
+
next_call.tag = tag;
|
193
97
|
next_call.event.type = GRPC_QUEUE_TIMEOUT;
|
98
|
+
(void)reserved;
|
194
99
|
/* Loop until we finish a pluck without an interruption. The internal
|
195
100
|
pluck function runs either until it is interrupted or it gets an
|
196
101
|
event, or time runs out.
|
@@ -210,27 +115,3 @@ grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag,
|
|
210
115
|
next_call.event.type == GRPC_QUEUE_TIMEOUT);
|
211
116
|
return next_call.event;
|
212
117
|
}
|
213
|
-
|
214
|
-
void Init_grpc_completion_queue() {
|
215
|
-
grpc_rb_cCompletionQueue =
|
216
|
-
rb_define_class_under(grpc_rb_mGrpcCore, "CompletionQueue", rb_cObject);
|
217
|
-
|
218
|
-
/* constructor: uses an alloc func without an initializer. Using a simple
|
219
|
-
alloc func works here as the grpc header does not specify any args for
|
220
|
-
this func, so no separate initialization step is necessary. */
|
221
|
-
rb_define_alloc_func(grpc_rb_cCompletionQueue,
|
222
|
-
grpc_rb_completion_queue_alloc);
|
223
|
-
|
224
|
-
/* close: Provides a way to close the underlying file descriptor without
|
225
|
-
waiting for ruby garbage collection. */
|
226
|
-
rb_define_method(grpc_rb_cCompletionQueue, "close",
|
227
|
-
grpc_rb_completion_queue_close, 0);
|
228
|
-
}
|
229
|
-
|
230
|
-
/* Gets the wrapped completion queue from the ruby wrapper */
|
231
|
-
grpc_completion_queue *grpc_rb_get_wrapped_completion_queue(VALUE v) {
|
232
|
-
grpc_completion_queue *cq = NULL;
|
233
|
-
TypedData_Get_Struct(v, grpc_completion_queue,
|
234
|
-
&grpc_rb_completion_queue_data_type, cq);
|
235
|
-
return cq;
|
236
|
-
}
|
@@ -38,18 +38,14 @@
|
|
38
38
|
|
39
39
|
#include <grpc/grpc.h>
|
40
40
|
|
41
|
-
|
42
|
-
grpc_completion_queue *grpc_rb_get_wrapped_completion_queue(VALUE v);
|
41
|
+
void grpc_rb_completion_queue_destroy(grpc_completion_queue *cq);
|
43
42
|
|
44
43
|
/**
|
45
44
|
* Makes the implementation of CompletionQueue#pluck available in other files
|
46
45
|
*
|
47
46
|
* This avoids having code that holds the GIL repeated at multiple sites.
|
48
47
|
*/
|
49
|
-
grpc_event
|
50
|
-
|
51
|
-
|
52
|
-
/* Initializes the CompletionQueue class. */
|
53
|
-
void Init_grpc_completion_queue();
|
48
|
+
grpc_event rb_completion_queue_pluck(grpc_completion_queue *queue, void *tag,
|
49
|
+
gpr_timespec deadline, void *reserved);
|
54
50
|
|
55
51
|
#endif /* GRPC_RB_COMPLETION_QUEUE_H_ */
|
data/src/ruby/ext/grpc/rb_grpc.c
CHANGED
@@ -46,7 +46,6 @@
|
|
46
46
|
#include "rb_call_credentials.h"
|
47
47
|
#include "rb_channel.h"
|
48
48
|
#include "rb_channel_credentials.h"
|
49
|
-
#include "rb_completion_queue.h"
|
50
49
|
#include "rb_loader.h"
|
51
50
|
#include "rb_server.h"
|
52
51
|
#include "rb_server_credentials.h"
|
@@ -85,7 +84,7 @@ VALUE grpc_rb_cannot_init(VALUE self) {
|
|
85
84
|
VALUE grpc_rb_cannot_init_copy(VALUE copy, VALUE self) {
|
86
85
|
(void)self;
|
87
86
|
rb_raise(rb_eTypeError,
|
88
|
-
"initialization of %s
|
87
|
+
"Copy initialization of %s is not supported",
|
89
88
|
rb_obj_classname(copy));
|
90
89
|
return Qnil;
|
91
90
|
}
|
@@ -318,7 +317,7 @@ void Init_grpc_c() {
|
|
318
317
|
grpc_rb_mGrpcCore = rb_define_module_under(grpc_rb_mGRPC, "Core");
|
319
318
|
grpc_rb_sNewServerRpc =
|
320
319
|
rb_struct_define("NewServerRpc", "method", "host",
|
321
|
-
"deadline", "metadata", "call",
|
320
|
+
"deadline", "metadata", "call", NULL);
|
322
321
|
grpc_rb_sStatus =
|
323
322
|
rb_struct_define("Status", "code", "details", "metadata", NULL);
|
324
323
|
sym_code = ID2SYM(rb_intern("code"));
|
@@ -326,7 +325,6 @@ void Init_grpc_c() {
|
|
326
325
|
sym_metadata = ID2SYM(rb_intern("metadata"));
|
327
326
|
|
328
327
|
Init_grpc_channel();
|
329
|
-
Init_grpc_completion_queue();
|
330
328
|
Init_grpc_call();
|
331
329
|
Init_grpc_call_credentials();
|
332
330
|
Init_grpc_channel_credentials();
|
@@ -128,6 +128,7 @@ grpc_is_binary_header_type grpc_is_binary_header_import;
|
|
128
128
|
grpc_call_error_to_string_type grpc_call_error_to_string_import;
|
129
129
|
grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
|
130
130
|
grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
|
131
|
+
grpc_use_signal_type grpc_use_signal_import;
|
131
132
|
grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import;
|
132
133
|
grpc_auth_context_property_iterator_type grpc_auth_context_property_iterator_import;
|
133
134
|
grpc_auth_context_peer_identity_type grpc_auth_context_peer_identity_import;
|
@@ -399,6 +400,7 @@ void grpc_rb_load_imports(HMODULE library) {
|
|
399
400
|
grpc_call_error_to_string_import = (grpc_call_error_to_string_type) GetProcAddress(library, "grpc_call_error_to_string");
|
400
401
|
grpc_insecure_channel_create_from_fd_import = (grpc_insecure_channel_create_from_fd_type) GetProcAddress(library, "grpc_insecure_channel_create_from_fd");
|
401
402
|
grpc_server_add_insecure_channel_from_fd_import = (grpc_server_add_insecure_channel_from_fd_type) GetProcAddress(library, "grpc_server_add_insecure_channel_from_fd");
|
403
|
+
grpc_use_signal_import = (grpc_use_signal_type) GetProcAddress(library, "grpc_use_signal");
|
402
404
|
grpc_auth_property_iterator_next_import = (grpc_auth_property_iterator_next_type) GetProcAddress(library, "grpc_auth_property_iterator_next");
|
403
405
|
grpc_auth_context_property_iterator_import = (grpc_auth_context_property_iterator_type) GetProcAddress(library, "grpc_auth_context_property_iterator");
|
404
406
|
grpc_auth_context_peer_identity_import = (grpc_auth_context_peer_identity_type) GetProcAddress(library, "grpc_auth_context_peer_identity");
|
@@ -335,6 +335,9 @@ extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_fr
|
|
335
335
|
typedef void(*grpc_server_add_insecure_channel_from_fd_type)(grpc_server *server, grpc_completion_queue *cq, int fd);
|
336
336
|
extern grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
|
337
337
|
#define grpc_server_add_insecure_channel_from_fd grpc_server_add_insecure_channel_from_fd_import
|
338
|
+
typedef void(*grpc_use_signal_type)(int signum);
|
339
|
+
extern grpc_use_signal_type grpc_use_signal_import;
|
340
|
+
#define grpc_use_signal grpc_use_signal_import
|
338
341
|
typedef const grpc_auth_property *(*grpc_auth_property_iterator_next_type)(grpc_auth_property_iterator *it);
|
339
342
|
extern grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import;
|
340
343
|
#define grpc_auth_property_iterator_next grpc_auth_property_iterator_next_import
|
@@ -467,7 +470,7 @@ extern grpc_byte_buffer_length_type grpc_byte_buffer_length_import;
|
|
467
470
|
typedef void(*grpc_byte_buffer_destroy_type)(grpc_byte_buffer *byte_buffer);
|
468
471
|
extern grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import;
|
469
472
|
#define grpc_byte_buffer_destroy grpc_byte_buffer_destroy_import
|
470
|
-
typedef
|
473
|
+
typedef int(*grpc_byte_buffer_reader_init_type)(grpc_byte_buffer_reader *reader, grpc_byte_buffer *buffer);
|
471
474
|
extern grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import;
|
472
475
|
#define grpc_byte_buffer_reader_init grpc_byte_buffer_reader_init_import
|
473
476
|
typedef void(*grpc_byte_buffer_reader_destroy_type)(grpc_byte_buffer_reader *reader);
|
@@ -38,6 +38,7 @@
|
|
38
38
|
|
39
39
|
#include <grpc/grpc.h>
|
40
40
|
#include <grpc/grpc_security.h>
|
41
|
+
#include <grpc/support/log.h>
|
41
42
|
#include "rb_call.h"
|
42
43
|
#include "rb_channel_args.h"
|
43
44
|
#include "rb_completion_queue.h"
|
@@ -53,53 +54,51 @@ static ID id_at;
|
|
53
54
|
/* id_insecure_server is used to indicate that a server is insecure */
|
54
55
|
static VALUE id_insecure_server;
|
55
56
|
|
56
|
-
/* grpc_rb_server wraps a grpc_server.
|
57
|
-
'mark' to minimize copying when a server is created from ruby. */
|
57
|
+
/* grpc_rb_server wraps a grpc_server. */
|
58
58
|
typedef struct grpc_rb_server {
|
59
|
-
/* Holder of ruby objects involved in constructing the server */
|
60
|
-
VALUE mark;
|
61
59
|
/* The actual server */
|
62
60
|
grpc_server *wrapped;
|
63
61
|
grpc_completion_queue *queue;
|
64
62
|
} grpc_rb_server;
|
65
63
|
|
64
|
+
static void destroy_server(grpc_rb_server *server, gpr_timespec deadline) {
|
65
|
+
grpc_event ev;
|
66
|
+
if (server->wrapped != NULL) {
|
67
|
+
grpc_server_shutdown_and_notify(server->wrapped, server->queue, NULL);
|
68
|
+
ev = rb_completion_queue_pluck(server->queue, NULL, deadline, NULL);
|
69
|
+
if (ev.type == GRPC_QUEUE_TIMEOUT) {
|
70
|
+
grpc_server_cancel_all_calls(server->wrapped);
|
71
|
+
rb_completion_queue_pluck(server->queue, NULL,
|
72
|
+
gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
|
73
|
+
}
|
74
|
+
grpc_server_destroy(server->wrapped);
|
75
|
+
grpc_rb_completion_queue_destroy(server->queue);
|
76
|
+
server->wrapped = NULL;
|
77
|
+
server->queue = NULL;
|
78
|
+
}
|
79
|
+
}
|
80
|
+
|
66
81
|
/* Destroys server instances. */
|
67
82
|
static void grpc_rb_server_free(void *p) {
|
68
83
|
grpc_rb_server *svr = NULL;
|
84
|
+
gpr_timespec deadline;
|
69
85
|
if (p == NULL) {
|
70
86
|
return;
|
71
87
|
};
|
72
88
|
svr = (grpc_rb_server *)p;
|
73
89
|
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
or delete it */
|
78
|
-
if (svr->wrapped != NULL && svr->mark == Qnil) {
|
79
|
-
// grpc_server_shutdown(svr->wrapped);
|
80
|
-
// Aborting to indicate a bug
|
81
|
-
abort();
|
82
|
-
grpc_server_destroy(svr->wrapped);
|
83
|
-
}
|
90
|
+
deadline = gpr_time_add(
|
91
|
+
gpr_now(GPR_CLOCK_REALTIME),
|
92
|
+
gpr_time_from_seconds(2, GPR_TIMESPAN));
|
84
93
|
|
85
|
-
|
86
|
-
}
|
94
|
+
destroy_server(svr, deadline);
|
87
95
|
|
88
|
-
|
89
|
-
static void grpc_rb_server_mark(void *p) {
|
90
|
-
grpc_rb_server *server = NULL;
|
91
|
-
if (p == NULL) {
|
92
|
-
return;
|
93
|
-
}
|
94
|
-
server = (grpc_rb_server *)p;
|
95
|
-
if (server->mark != Qnil) {
|
96
|
-
rb_gc_mark(server->mark);
|
97
|
-
}
|
96
|
+
xfree(p);
|
98
97
|
}
|
99
98
|
|
100
99
|
static const rb_data_type_t grpc_rb_server_data_type = {
|
101
100
|
"grpc_server",
|
102
|
-
{
|
101
|
+
{GRPC_RB_GC_NOT_MARKED, grpc_rb_server_free, GRPC_RB_MEMSIZE_UNAVAILABLE,
|
103
102
|
{NULL, NULL}},
|
104
103
|
NULL,
|
105
104
|
NULL,
|
@@ -116,23 +115,20 @@ static const rb_data_type_t grpc_rb_server_data_type = {
|
|
116
115
|
static VALUE grpc_rb_server_alloc(VALUE cls) {
|
117
116
|
grpc_rb_server *wrapper = ALLOC(grpc_rb_server);
|
118
117
|
wrapper->wrapped = NULL;
|
119
|
-
wrapper->mark = Qnil;
|
120
118
|
return TypedData_Wrap_Struct(cls, &grpc_rb_server_data_type, wrapper);
|
121
119
|
}
|
122
120
|
|
123
121
|
/*
|
124
122
|
call-seq:
|
125
|
-
|
126
|
-
server = Server.new(cq, {'arg1': 'value1'})
|
123
|
+
server = Server.new({'arg1': 'value1'})
|
127
124
|
|
128
125
|
Initializes server instances. */
|
129
|
-
static VALUE grpc_rb_server_init(VALUE self, VALUE
|
130
|
-
grpc_completion_queue *cq = NULL;
|
126
|
+
static VALUE grpc_rb_server_init(VALUE self, VALUE channel_args) {
|
127
|
+
grpc_completion_queue *cq = grpc_completion_queue_create(NULL);
|
131
128
|
grpc_rb_server *wrapper = NULL;
|
132
129
|
grpc_server *srv = NULL;
|
133
130
|
grpc_channel_args args;
|
134
131
|
MEMZERO(&args, grpc_channel_args, 1);
|
135
|
-
cq = grpc_rb_get_wrapped_completion_queue(cqueue);
|
136
132
|
TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type,
|
137
133
|
wrapper);
|
138
134
|
grpc_rb_hash_convert_to_channel_args(channel_args, &args);
|
@@ -148,41 +144,9 @@ static VALUE grpc_rb_server_init(VALUE self, VALUE cqueue, VALUE channel_args) {
|
|
148
144
|
wrapper->wrapped = srv;
|
149
145
|
wrapper->queue = cq;
|
150
146
|
|
151
|
-
/* Add the cq as the server's mark object. This ensures the ruby cq can't be
|
152
|
-
GCed before the server */
|
153
|
-
wrapper->mark = cqueue;
|
154
147
|
return self;
|
155
148
|
}
|
156
149
|
|
157
|
-
/* Clones Server instances.
|
158
|
-
|
159
|
-
Gives Server a consistent implementation of Ruby's object copy/dup
|
160
|
-
protocol. */
|
161
|
-
static VALUE grpc_rb_server_init_copy(VALUE copy, VALUE orig) {
|
162
|
-
grpc_rb_server *orig_srv = NULL;
|
163
|
-
grpc_rb_server *copy_srv = NULL;
|
164
|
-
|
165
|
-
if (copy == orig) {
|
166
|
-
return copy;
|
167
|
-
}
|
168
|
-
|
169
|
-
/* Raise an error if orig is not a server object or a subclass. */
|
170
|
-
if (TYPE(orig) != T_DATA ||
|
171
|
-
RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_server_free) {
|
172
|
-
rb_raise(rb_eTypeError, "not a %s", rb_obj_classname(grpc_rb_cServer));
|
173
|
-
}
|
174
|
-
|
175
|
-
TypedData_Get_Struct(orig, grpc_rb_server, &grpc_rb_server_data_type,
|
176
|
-
orig_srv);
|
177
|
-
TypedData_Get_Struct(copy, grpc_rb_server, &grpc_rb_server_data_type,
|
178
|
-
copy_srv);
|
179
|
-
|
180
|
-
/* use ruby's MEMCPY to make a byte-for-byte copy of the server wrapper
|
181
|
-
object. */
|
182
|
-
MEMCPY(copy_srv, orig_srv, grpc_rb_server, 1);
|
183
|
-
return copy;
|
184
|
-
}
|
185
|
-
|
186
150
|
/* request_call_stack holds various values used by the
|
187
151
|
* grpc_rb_server_request_call function */
|
188
152
|
typedef struct request_call_stack {
|
@@ -208,65 +172,57 @@ static void grpc_request_call_stack_cleanup(request_call_stack* st) {
|
|
208
172
|
}
|
209
173
|
|
210
174
|
/* call-seq:
|
211
|
-
|
212
|
-
tag = Object.new
|
213
|
-
timeout = 10
|
214
|
-
server.request_call(cqueue, tag, timeout)
|
175
|
+
server.request_call
|
215
176
|
|
216
177
|
Requests notification of a new call on a server. */
|
217
|
-
static VALUE grpc_rb_server_request_call(VALUE self
|
218
|
-
VALUE tag_new, VALUE timeout) {
|
178
|
+
static VALUE grpc_rb_server_request_call(VALUE self) {
|
219
179
|
grpc_rb_server *s = NULL;
|
220
180
|
grpc_call *call = NULL;
|
221
181
|
grpc_event ev;
|
222
182
|
grpc_call_error err;
|
223
183
|
request_call_stack st;
|
224
184
|
VALUE result;
|
185
|
+
void *tag = (void*)&st;
|
186
|
+
grpc_completion_queue *call_queue = grpc_completion_queue_create(NULL);
|
225
187
|
gpr_timespec deadline;
|
226
188
|
TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
|
227
189
|
if (s->wrapped == NULL) {
|
228
190
|
rb_raise(rb_eRuntimeError, "destroyed!");
|
229
191
|
return Qnil;
|
230
|
-
}
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
return Qnil;
|
245
|
-
}
|
246
|
-
|
247
|
-
ev = grpc_rb_completion_queue_pluck_event(s->mark, tag_new, timeout);
|
248
|
-
if (ev.type == GRPC_QUEUE_TIMEOUT) {
|
249
|
-
grpc_request_call_stack_cleanup(&st);
|
250
|
-
return Qnil;
|
251
|
-
}
|
252
|
-
if (!ev.success) {
|
253
|
-
grpc_request_call_stack_cleanup(&st);
|
254
|
-
rb_raise(grpc_rb_eCallError, "request_call completion failed");
|
255
|
-
return Qnil;
|
256
|
-
}
|
192
|
+
}
|
193
|
+
grpc_request_call_stack_init(&st);
|
194
|
+
/* call grpc_server_request_call, then wait for it to complete using
|
195
|
+
* pluck_event */
|
196
|
+
err = grpc_server_request_call(
|
197
|
+
s->wrapped, &call, &st.details, &st.md_ary,
|
198
|
+
call_queue, s->queue, tag);
|
199
|
+
if (err != GRPC_CALL_OK) {
|
200
|
+
grpc_request_call_stack_cleanup(&st);
|
201
|
+
rb_raise(grpc_rb_eCallError,
|
202
|
+
"grpc_server_request_call failed: %s (code=%d)",
|
203
|
+
grpc_call_error_detail_of(err), err);
|
204
|
+
return Qnil;
|
205
|
+
}
|
257
206
|
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
grpc_rb_sNewServerRpc, rb_str_new2(st.details.method),
|
262
|
-
rb_str_new2(st.details.host),
|
263
|
-
rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec),
|
264
|
-
INT2NUM(deadline.tv_nsec)),
|
265
|
-
grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call), cqueue, NULL);
|
207
|
+
ev = rb_completion_queue_pluck(s->queue, tag,
|
208
|
+
gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
|
209
|
+
if (!ev.success) {
|
266
210
|
grpc_request_call_stack_cleanup(&st);
|
267
|
-
|
211
|
+
rb_raise(grpc_rb_eCallError, "request_call completion failed");
|
212
|
+
return Qnil;
|
268
213
|
}
|
269
|
-
|
214
|
+
|
215
|
+
/* build the NewServerRpc struct result */
|
216
|
+
deadline = gpr_convert_clock_type(st.details.deadline, GPR_CLOCK_REALTIME);
|
217
|
+
result = rb_struct_new(
|
218
|
+
grpc_rb_sNewServerRpc, rb_str_new2(st.details.method),
|
219
|
+
rb_str_new2(st.details.host),
|
220
|
+
rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec),
|
221
|
+
INT2NUM(deadline.tv_nsec)),
|
222
|
+
grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call, call_queue),
|
223
|
+
NULL);
|
224
|
+
grpc_request_call_stack_cleanup(&st);
|
225
|
+
return result;
|
270
226
|
}
|
271
227
|
|
272
228
|
static VALUE grpc_rb_server_start(VALUE self) {
|
@@ -282,41 +238,33 @@ static VALUE grpc_rb_server_start(VALUE self) {
|
|
282
238
|
|
283
239
|
/*
|
284
240
|
call-seq:
|
285
|
-
|
286
|
-
server = Server.new(cq, {'arg1': 'value1'})
|
241
|
+
server = Server.new({'arg1': 'value1'})
|
287
242
|
... // do stuff with server
|
288
243
|
...
|
289
244
|
... // to shutdown the server
|
290
|
-
server.destroy(
|
245
|
+
server.destroy()
|
291
246
|
|
292
247
|
... // to shutdown the server with a timeout
|
293
|
-
server.destroy(
|
248
|
+
server.destroy(timeout)
|
294
249
|
|
295
250
|
Destroys server instances. */
|
296
251
|
static VALUE grpc_rb_server_destroy(int argc, VALUE *argv, VALUE self) {
|
297
|
-
VALUE cqueue = Qnil;
|
298
252
|
VALUE timeout = Qnil;
|
299
|
-
|
300
|
-
grpc_event ev;
|
253
|
+
gpr_timespec deadline;
|
301
254
|
grpc_rb_server *s = NULL;
|
302
255
|
|
303
|
-
/* "
|
304
|
-
rb_scan_args(argc, argv, "
|
305
|
-
cq = grpc_rb_get_wrapped_completion_queue(cqueue);
|
256
|
+
/* "01" == 0 mandatory args, 1 (timeout) is optional */
|
257
|
+
rb_scan_args(argc, argv, "01", &timeout);
|
306
258
|
TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
if (!ev.success) {
|
312
|
-
rb_warn("server shutdown failed, cancelling the calls, objects may leak");
|
313
|
-
grpc_server_cancel_all_calls(s->wrapped);
|
314
|
-
return Qfalse;
|
315
|
-
}
|
316
|
-
grpc_server_destroy(s->wrapped);
|
317
|
-
s->wrapped = NULL;
|
259
|
+
if (TYPE(timeout) == T_NIL) {
|
260
|
+
deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
|
261
|
+
} else {
|
262
|
+
deadline = grpc_rb_time_timeval(timeout, /* absolute time*/ 0);
|
318
263
|
}
|
319
|
-
|
264
|
+
|
265
|
+
destroy_server(s, deadline);
|
266
|
+
|
267
|
+
return Qnil;
|
320
268
|
}
|
321
269
|
|
322
270
|
/*
|
@@ -376,13 +324,13 @@ void Init_grpc_server() {
|
|
376
324
|
rb_define_alloc_func(grpc_rb_cServer, grpc_rb_server_alloc);
|
377
325
|
|
378
326
|
/* Provides a ruby constructor and support for dup/clone. */
|
379
|
-
rb_define_method(grpc_rb_cServer, "initialize", grpc_rb_server_init,
|
327
|
+
rb_define_method(grpc_rb_cServer, "initialize", grpc_rb_server_init, 1);
|
380
328
|
rb_define_method(grpc_rb_cServer, "initialize_copy",
|
381
|
-
|
329
|
+
grpc_rb_cannot_init_copy, 1);
|
382
330
|
|
383
331
|
/* Add the server methods. */
|
384
332
|
rb_define_method(grpc_rb_cServer, "request_call",
|
385
|
-
grpc_rb_server_request_call,
|
333
|
+
grpc_rb_server_request_call, 0);
|
386
334
|
rb_define_method(grpc_rb_cServer, "start", grpc_rb_server_start, 0);
|
387
335
|
rb_define_method(grpc_rb_cServer, "destroy", grpc_rb_server_destroy, -1);
|
388
336
|
rb_define_alias(grpc_rb_cServer, "close", "destroy");
|