grpc 1.72.0-x86_64-darwin → 1.74.0.pre2-x86_64-darwin
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/src/ruby/ext/grpc/extconf.rb +3 -1
- data/src/ruby/ext/grpc/rb_call.c +1 -8
- data/src/ruby/ext/grpc/rb_channel.c +72 -568
- data/src/ruby/ext/grpc/rb_channel.h +0 -3
- data/src/ruby/ext/grpc/rb_completion_queue.c +26 -14
- data/src/ruby/ext/grpc/rb_completion_queue.h +1 -7
- data/src/ruby/ext/grpc/rb_grpc.c +9 -5
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +1 -1
- data/src/ruby/ext/grpc/rb_loader.c +0 -4
- data/src/ruby/ext/grpc/rb_server.c +31 -50
- data/src/ruby/lib/grpc/3.1/grpc_c.bundle +0 -0
- data/src/ruby/lib/grpc/3.2/grpc_c.bundle +0 -0
- data/src/ruby/lib/grpc/3.3/grpc_c.bundle +0 -0
- data/src/ruby/lib/grpc/3.4/grpc_c.bundle +0 -0
- data/src/ruby/lib/grpc/generic/client_stub.rb +4 -4
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/spec/core_spec.rb +22 -0
- data/src/ruby/spec/generic/active_call_spec.rb +1 -1
- data/src/ruby/spec/generic/client_stub_spec.rb +2 -6
- data/src/ruby/spec/generic/rpc_server_spec.rb +1 -1
- metadata +5 -5
- data/grpc_c.64-msvcrt.ruby +0 -0
- data/src/ruby/lib/grpc/3.0/grpc_c.bundle +0 -0
@@ -59,137 +59,30 @@ static VALUE grpc_rb_cChannel = Qnil;
|
|
59
59
|
/* Used during the conversion of a hash to channel args during channel setup */
|
60
60
|
static VALUE grpc_rb_cChannelArgs;
|
61
61
|
|
62
|
-
typedef struct bg_watched_channel {
|
63
|
-
grpc_channel* channel;
|
64
|
-
// these fields must only be accessed under global_connection_polling_mu
|
65
|
-
struct bg_watched_channel* next;
|
66
|
-
int channel_destroyed;
|
67
|
-
int refcount;
|
68
|
-
} bg_watched_channel;
|
69
|
-
|
70
62
|
/* grpc_rb_channel wraps a grpc_channel. */
|
71
63
|
typedef struct grpc_rb_channel {
|
72
|
-
VALUE credentials;
|
73
|
-
grpc_channel_args args;
|
74
|
-
/* The actual channel (protected in a wrapper to tell when it's safe to
|
75
|
-
* destroy) */
|
76
|
-
bg_watched_channel* bg_wrapped;
|
77
|
-
} grpc_rb_channel;
|
78
|
-
|
79
|
-
typedef enum { CONTINUOUS_WATCH, WATCH_STATE_API } watch_state_op_type;
|
80
|
-
|
81
|
-
typedef struct watch_state_op {
|
82
|
-
watch_state_op_type op_type;
|
83
|
-
// from event.success
|
84
|
-
union {
|
85
|
-
struct {
|
86
|
-
int success;
|
87
|
-
// has been called back due to a cq next call
|
88
|
-
int called_back;
|
89
|
-
} api_callback_args;
|
90
|
-
struct {
|
91
|
-
bg_watched_channel* bg;
|
92
|
-
} continuous_watch_callback_args;
|
93
|
-
} op;
|
94
|
-
} watch_state_op;
|
95
|
-
|
96
|
-
static bg_watched_channel* bg_watched_channel_list_head = NULL;
|
97
|
-
|
98
|
-
static void grpc_rb_channel_try_register_connection_polling(
|
99
|
-
bg_watched_channel* bg);
|
100
|
-
static void* channel_init_try_register_connection_polling_without_gil(
|
101
|
-
void* arg);
|
102
|
-
|
103
|
-
typedef struct channel_init_try_register_stack {
|
104
64
|
grpc_channel* channel;
|
105
|
-
|
106
|
-
} channel_init_try_register_stack;
|
107
|
-
|
108
|
-
static grpc_completion_queue* g_channel_polling_cq;
|
109
|
-
static gpr_mu global_connection_polling_mu;
|
110
|
-
static gpr_cv global_connection_polling_cv;
|
111
|
-
static int g_abort_channel_polling = 0;
|
112
|
-
static gpr_once g_once_init = GPR_ONCE_INIT;
|
113
|
-
static VALUE g_channel_polling_thread = Qnil;
|
114
|
-
|
115
|
-
static int bg_watched_channel_list_lookup(bg_watched_channel* bg);
|
116
|
-
static bg_watched_channel* bg_watched_channel_list_create_and_add(
|
117
|
-
grpc_channel* channel);
|
118
|
-
static void bg_watched_channel_list_free_and_remove(bg_watched_channel* bg);
|
119
|
-
static void run_poll_channels_loop_unblocking_func(void* arg);
|
120
|
-
static void* run_poll_channels_loop_unblocking_func_wrapper(void* arg);
|
121
|
-
|
122
|
-
// Needs to be called under global_connection_polling_mu
|
123
|
-
static void grpc_rb_channel_watch_connection_state_op_complete(
|
124
|
-
watch_state_op* op, int success) {
|
125
|
-
GRPC_RUBY_ASSERT(!op->op.api_callback_args.called_back);
|
126
|
-
op->op.api_callback_args.called_back = 1;
|
127
|
-
op->op.api_callback_args.success = success;
|
128
|
-
// wake up the watch API call that's waiting on this op
|
129
|
-
gpr_cv_broadcast(&global_connection_polling_cv);
|
130
|
-
}
|
131
|
-
|
132
|
-
/* Avoids destroying a channel twice. */
|
133
|
-
static void grpc_rb_channel_safe_destroy(bg_watched_channel* bg) {
|
134
|
-
gpr_mu_lock(&global_connection_polling_mu);
|
135
|
-
GRPC_RUBY_ASSERT(bg_watched_channel_list_lookup(bg));
|
136
|
-
if (!bg->channel_destroyed) {
|
137
|
-
grpc_channel_destroy(bg->channel);
|
138
|
-
bg->channel_destroyed = 1;
|
139
|
-
}
|
140
|
-
bg->refcount--;
|
141
|
-
if (bg->refcount == 0) {
|
142
|
-
bg_watched_channel_list_free_and_remove(bg);
|
143
|
-
}
|
144
|
-
gpr_mu_unlock(&global_connection_polling_mu);
|
145
|
-
}
|
146
|
-
|
147
|
-
static void* channel_safe_destroy_without_gil(void* arg) {
|
148
|
-
grpc_rb_channel_safe_destroy((bg_watched_channel*)arg);
|
149
|
-
return NULL;
|
150
|
-
}
|
65
|
+
} grpc_rb_channel;
|
151
66
|
|
152
|
-
static void
|
153
|
-
grpc_rb_channel* ch = NULL;
|
67
|
+
static void grpc_rb_channel_free(void* p) {
|
154
68
|
if (p == NULL) {
|
155
69
|
return;
|
156
70
|
};
|
157
|
-
|
158
|
-
if (
|
159
|
-
|
160
|
-
|
161
|
-
* and we can count on this thread to not be interrupted or
|
162
|
-
* yield the gil. */
|
163
|
-
grpc_rb_channel_safe_destroy(ch->bg_wrapped);
|
164
|
-
grpc_rb_channel_args_destroy(&ch->args);
|
71
|
+
grpc_rb_channel* wrapper = (grpc_rb_channel*)p;
|
72
|
+
if (wrapper->channel != NULL) {
|
73
|
+
grpc_channel_destroy(wrapper->channel);
|
74
|
+
wrapper->channel = NULL;
|
165
75
|
}
|
166
76
|
xfree(p);
|
167
77
|
}
|
168
78
|
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
grpc_rb_channel* channel = NULL;
|
175
|
-
if (p == NULL) {
|
176
|
-
return;
|
177
|
-
}
|
178
|
-
channel = (grpc_rb_channel*)p;
|
179
|
-
if (channel->credentials != Qnil) {
|
180
|
-
rb_gc_mark(channel->credentials);
|
181
|
-
}
|
182
|
-
}
|
183
|
-
|
184
|
-
static rb_data_type_t grpc_channel_data_type = {"grpc_channel",
|
185
|
-
{grpc_rb_channel_mark,
|
186
|
-
grpc_rb_channel_free,
|
187
|
-
GRPC_RB_MEMSIZE_UNAVAILABLE,
|
188
|
-
{NULL, NULL}},
|
189
|
-
NULL,
|
190
|
-
NULL,
|
79
|
+
static rb_data_type_t grpc_channel_data_type = {
|
80
|
+
"grpc_channel",
|
81
|
+
{NULL, grpc_rb_channel_free, GRPC_RB_MEMSIZE_UNAVAILABLE, {NULL, NULL}},
|
82
|
+
NULL,
|
83
|
+
NULL,
|
191
84
|
#ifdef RUBY_TYPED_FREE_IMMEDIATELY
|
192
|
-
|
85
|
+
RUBY_TYPED_FREE_IMMEDIATELY
|
193
86
|
#endif
|
194
87
|
};
|
195
88
|
|
@@ -197,9 +90,7 @@ static rb_data_type_t grpc_channel_data_type = {"grpc_channel",
|
|
197
90
|
static VALUE grpc_rb_channel_alloc(VALUE cls) {
|
198
91
|
grpc_ruby_init();
|
199
92
|
grpc_rb_channel* wrapper = ALLOC(grpc_rb_channel);
|
200
|
-
wrapper->
|
201
|
-
wrapper->credentials = Qnil;
|
202
|
-
MEMZERO(&wrapper->args, grpc_channel_args, 1);
|
93
|
+
wrapper->channel = NULL;
|
203
94
|
return TypedData_Wrap_Struct(cls, &grpc_channel_data_type, wrapper);
|
204
95
|
}
|
205
96
|
|
@@ -212,53 +103,45 @@ static VALUE grpc_rb_channel_alloc(VALUE cls) {
|
|
212
103
|
|
213
104
|
Creates channel instances. */
|
214
105
|
static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
|
215
|
-
VALUE
|
216
|
-
VALUE
|
106
|
+
VALUE rb_channel_args = Qnil;
|
107
|
+
VALUE rb_credentials = Qnil;
|
217
108
|
VALUE target = Qnil;
|
218
109
|
grpc_rb_channel* wrapper = NULL;
|
219
|
-
grpc_channel* ch = NULL;
|
220
|
-
grpc_channel_credentials* creds = NULL;
|
221
110
|
char* target_chars = NULL;
|
222
|
-
channel_init_try_register_stack stack;
|
223
|
-
|
224
111
|
grpc_ruby_fork_guard();
|
225
112
|
/* "3" == 3 mandatory args */
|
226
|
-
rb_scan_args(argc, argv, "3", &target, &
|
227
|
-
|
113
|
+
rb_scan_args(argc, argv, "3", &target, &rb_channel_args, &rb_credentials);
|
228
114
|
TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
|
229
115
|
target_chars = StringValueCStr(target);
|
230
|
-
|
231
|
-
|
232
|
-
|
116
|
+
grpc_channel_args channel_args;
|
117
|
+
memset(&channel_args, 0, sizeof(channel_args));
|
118
|
+
grpc_rb_hash_convert_to_channel_args(rb_channel_args, &channel_args);
|
119
|
+
if (TYPE(rb_credentials) == T_SYMBOL) {
|
120
|
+
if (id_insecure_channel != SYM2ID(rb_credentials)) {
|
233
121
|
rb_raise(rb_eTypeError,
|
234
122
|
"bad creds symbol, want :this_channel_is_insecure");
|
235
123
|
return Qnil;
|
236
124
|
}
|
237
125
|
grpc_channel_credentials* insecure_creds =
|
238
126
|
grpc_insecure_credentials_create();
|
239
|
-
|
127
|
+
wrapper->channel =
|
128
|
+
grpc_channel_create(target_chars, insecure_creds, &channel_args);
|
240
129
|
grpc_channel_credentials_release(insecure_creds);
|
241
130
|
} else {
|
242
|
-
|
243
|
-
if (grpc_rb_is_channel_credentials(
|
244
|
-
creds = grpc_rb_get_wrapped_channel_credentials(
|
245
|
-
} else if (grpc_rb_is_xds_channel_credentials(
|
246
|
-
creds = grpc_rb_get_wrapped_xds_channel_credentials(
|
131
|
+
grpc_channel_credentials* creds;
|
132
|
+
if (grpc_rb_is_channel_credentials(rb_credentials)) {
|
133
|
+
creds = grpc_rb_get_wrapped_channel_credentials(rb_credentials);
|
134
|
+
} else if (grpc_rb_is_xds_channel_credentials(rb_credentials)) {
|
135
|
+
creds = grpc_rb_get_wrapped_xds_channel_credentials(rb_credentials);
|
247
136
|
} else {
|
248
137
|
rb_raise(rb_eTypeError,
|
249
138
|
"bad creds, want ChannelCredentials or XdsChannelCredentials");
|
250
139
|
return Qnil;
|
251
140
|
}
|
252
|
-
|
141
|
+
wrapper->channel = grpc_channel_create(target_chars, creds, &channel_args);
|
253
142
|
}
|
254
|
-
|
255
|
-
|
256
|
-
stack.channel = ch;
|
257
|
-
stack.wrapper = wrapper;
|
258
|
-
rb_thread_call_without_gvl(
|
259
|
-
channel_init_try_register_connection_polling_without_gil, &stack, NULL,
|
260
|
-
NULL);
|
261
|
-
if (ch == NULL) {
|
143
|
+
grpc_rb_channel_args_destroy(&channel_args);
|
144
|
+
if (wrapper->channel == NULL) {
|
262
145
|
rb_raise(rb_eRuntimeError, "could not create an rpc channel to target:%s",
|
263
146
|
target_chars);
|
264
147
|
return Qnil;
|
@@ -268,27 +151,6 @@ static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
|
|
268
151
|
return self;
|
269
152
|
}
|
270
153
|
|
271
|
-
typedef struct get_state_stack {
|
272
|
-
bg_watched_channel* bg;
|
273
|
-
int try_to_connect;
|
274
|
-
int out;
|
275
|
-
} get_state_stack;
|
276
|
-
|
277
|
-
static void* get_state_without_gil(void* arg) {
|
278
|
-
get_state_stack* stack = (get_state_stack*)arg;
|
279
|
-
|
280
|
-
gpr_mu_lock(&global_connection_polling_mu);
|
281
|
-
if (stack->bg->channel_destroyed) {
|
282
|
-
stack->out = GRPC_CHANNEL_SHUTDOWN;
|
283
|
-
} else {
|
284
|
-
stack->out = grpc_channel_check_connectivity_state(stack->bg->channel,
|
285
|
-
stack->try_to_connect);
|
286
|
-
}
|
287
|
-
gpr_mu_unlock(&global_connection_polling_mu);
|
288
|
-
|
289
|
-
return NULL;
|
290
|
-
}
|
291
|
-
|
292
154
|
/*
|
293
155
|
call-seq:
|
294
156
|
ch.connectivity_state -> state
|
@@ -302,68 +164,17 @@ static VALUE grpc_rb_channel_get_connectivity_state(int argc, VALUE* argv,
|
|
302
164
|
VALUE self) {
|
303
165
|
VALUE try_to_connect_param = Qfalse;
|
304
166
|
grpc_rb_channel* wrapper = NULL;
|
305
|
-
get_state_stack stack;
|
306
|
-
|
307
167
|
/* "01" == 0 mandatory args, 1 (try_to_connect) is optional */
|
308
168
|
rb_scan_args(argc, argv, "01", &try_to_connect_param);
|
309
|
-
|
310
169
|
TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
|
311
|
-
if (wrapper->
|
170
|
+
if (wrapper->channel == NULL) {
|
312
171
|
rb_raise(rb_eRuntimeError, "closed!");
|
313
172
|
return Qnil;
|
314
173
|
}
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
return LONG2NUM(stack.out);
|
321
|
-
}
|
322
|
-
|
323
|
-
typedef struct watch_state_stack {
|
324
|
-
bg_watched_channel* bg_wrapped;
|
325
|
-
gpr_timespec deadline;
|
326
|
-
int last_state;
|
327
|
-
} watch_state_stack;
|
328
|
-
|
329
|
-
static void* wait_for_watch_state_op_complete_without_gvl(void* arg) {
|
330
|
-
watch_state_stack* stack = (watch_state_stack*)arg;
|
331
|
-
watch_state_op* op = NULL;
|
332
|
-
void* success = (void*)0;
|
333
|
-
|
334
|
-
gpr_mu_lock(&global_connection_polling_mu);
|
335
|
-
// it's unsafe to do a "watch" after "channel polling abort" because the cq
|
336
|
-
// has been shut down.
|
337
|
-
if (g_abort_channel_polling || stack->bg_wrapped->channel_destroyed) {
|
338
|
-
gpr_mu_unlock(&global_connection_polling_mu);
|
339
|
-
return (void*)0;
|
340
|
-
}
|
341
|
-
op = gpr_zalloc(sizeof(watch_state_op));
|
342
|
-
op->op_type = WATCH_STATE_API;
|
343
|
-
grpc_channel_watch_connectivity_state(stack->bg_wrapped->channel,
|
344
|
-
stack->last_state, stack->deadline,
|
345
|
-
g_channel_polling_cq, op);
|
346
|
-
|
347
|
-
while (!op->op.api_callback_args.called_back) {
|
348
|
-
gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
|
349
|
-
gpr_inf_future(GPR_CLOCK_REALTIME));
|
350
|
-
}
|
351
|
-
if (op->op.api_callback_args.success) {
|
352
|
-
success = (void*)1;
|
353
|
-
}
|
354
|
-
gpr_free(op);
|
355
|
-
gpr_mu_unlock(&global_connection_polling_mu);
|
356
|
-
|
357
|
-
return success;
|
358
|
-
}
|
359
|
-
static void wait_for_watch_state_op_complete_unblocking_func(void* arg) {
|
360
|
-
bg_watched_channel* bg = (bg_watched_channel*)arg;
|
361
|
-
gpr_mu_lock(&global_connection_polling_mu);
|
362
|
-
if (!bg->channel_destroyed) {
|
363
|
-
grpc_channel_destroy(bg->channel);
|
364
|
-
bg->channel_destroyed = 1;
|
365
|
-
}
|
366
|
-
gpr_mu_unlock(&global_connection_polling_mu);
|
174
|
+
bool try_to_connect = RTEST(try_to_connect_param) ? true : false;
|
175
|
+
int state =
|
176
|
+
grpc_channel_check_connectivity_state(wrapper->channel, try_to_connect);
|
177
|
+
return LONG2NUM(state);
|
367
178
|
}
|
368
179
|
|
369
180
|
/* Wait until the channel's connectivity state becomes different from
|
@@ -375,79 +186,46 @@ static void wait_for_watch_state_op_complete_unblocking_func(void* arg) {
|
|
375
186
|
* */
|
376
187
|
static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
|
377
188
|
VALUE last_state,
|
378
|
-
VALUE
|
189
|
+
VALUE rb_deadline) {
|
379
190
|
grpc_rb_channel* wrapper = NULL;
|
380
|
-
watch_state_stack stack;
|
381
|
-
void* op_success = 0;
|
382
|
-
|
383
191
|
grpc_ruby_fork_guard();
|
384
192
|
TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
|
385
|
-
|
386
|
-
if (wrapper->bg_wrapped == NULL) {
|
193
|
+
if (wrapper->channel == NULL) {
|
387
194
|
rb_raise(rb_eRuntimeError, "closed!");
|
388
195
|
return Qnil;
|
389
196
|
}
|
390
|
-
|
391
197
|
if (!FIXNUM_P(last_state)) {
|
392
198
|
rb_raise(
|
393
199
|
rb_eTypeError,
|
394
200
|
"bad type for last_state. want a GRPC::Core::ChannelState constant");
|
395
201
|
return Qnil;
|
396
202
|
}
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
if (
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
grpc_channel* channel;
|
424
|
-
if (wrapper->credentials == Qnil) {
|
425
|
-
grpc_channel_credentials* insecure_creds =
|
426
|
-
grpc_insecure_credentials_create();
|
427
|
-
channel = grpc_channel_create(target_str, insecure_creds, &wrapper->args);
|
428
|
-
grpc_channel_credentials_release(insecure_creds);
|
429
|
-
} else {
|
430
|
-
grpc_channel_credentials* creds;
|
431
|
-
if (grpc_rb_is_channel_credentials(wrapper->credentials)) {
|
432
|
-
creds = grpc_rb_get_wrapped_channel_credentials(wrapper->credentials);
|
433
|
-
} else if (grpc_rb_is_xds_channel_credentials(wrapper->credentials)) {
|
434
|
-
creds =
|
435
|
-
grpc_rb_get_wrapped_xds_channel_credentials(wrapper->credentials);
|
436
|
-
} else {
|
437
|
-
rb_raise(rb_eTypeError,
|
438
|
-
"failed to re-create channel after fork: bad creds, want "
|
439
|
-
"ChannelCredentials or XdsChannelCredentials");
|
440
|
-
return;
|
441
|
-
}
|
442
|
-
channel = grpc_channel_create(target_str, creds, &wrapper->args);
|
443
|
-
}
|
444
|
-
// re-register with channel polling thread
|
445
|
-
channel_init_try_register_stack stack;
|
446
|
-
stack.channel = channel;
|
447
|
-
stack.wrapper = wrapper;
|
448
|
-
rb_thread_call_without_gvl(
|
449
|
-
channel_init_try_register_connection_polling_without_gil, &stack, NULL,
|
450
|
-
NULL);
|
203
|
+
const void* tag = &wrapper;
|
204
|
+
gpr_timespec deadline = grpc_rb_time_timeval(rb_deadline, 0);
|
205
|
+
grpc_completion_queue* cq = grpc_completion_queue_create_for_pluck(NULL);
|
206
|
+
grpc_channel_watch_connectivity_state(wrapper->channel, NUM2LONG(last_state),
|
207
|
+
deadline, cq, tag);
|
208
|
+
grpc_event event =
|
209
|
+
rb_completion_queue_pluck(cq, tag, gpr_inf_future(GPR_CLOCK_REALTIME),
|
210
|
+
"grpc_channel_watch_connectivity_state");
|
211
|
+
// TODO(apolcyn): this CQ would leak if the thread were killed
|
212
|
+
// while polling queue_pluck, e.g. with Thread#kill. One fix may be
|
213
|
+
// to make this CQ owned by the channel object. Another fix could be to
|
214
|
+
// busy-poll watch_connectivity_state with a short deadline, without
|
215
|
+
// the GIL, rather than just polling CQ pluck, and destroy the CQ
|
216
|
+
// before exitting the no-GIL block.
|
217
|
+
grpc_completion_queue_shutdown(cq);
|
218
|
+
grpc_rb_completion_queue_destroy(cq);
|
219
|
+
if (event.type == GRPC_OP_COMPLETE) {
|
220
|
+
return Qtrue;
|
221
|
+
} else if (event.type == GRPC_QUEUE_TIMEOUT) {
|
222
|
+
return Qfalse;
|
223
|
+
} else {
|
224
|
+
grpc_absl_log_int(
|
225
|
+
GPR_ERROR,
|
226
|
+
"GRPC_RUBY: unexpected grpc_channel_watch_connectivity_state result:",
|
227
|
+
event.type);
|
228
|
+
return Qfalse;
|
451
229
|
}
|
452
230
|
}
|
453
231
|
|
@@ -466,7 +244,6 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
|
|
466
244
|
grpc_slice host_slice;
|
467
245
|
grpc_slice* host_slice_ptr = NULL;
|
468
246
|
char* tmp_str = NULL;
|
469
|
-
|
470
247
|
grpc_ruby_fork_guard();
|
471
248
|
if (host != Qnil) {
|
472
249
|
host_slice =
|
@@ -479,40 +256,29 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
|
|
479
256
|
if (parent != Qnil) {
|
480
257
|
parent_call = grpc_rb_get_wrapped_call(parent);
|
481
258
|
}
|
482
|
-
|
483
259
|
TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
|
484
|
-
if (wrapper->
|
260
|
+
if (wrapper->channel == NULL) {
|
485
261
|
rb_raise(rb_eRuntimeError, "closed!");
|
486
262
|
return Qnil;
|
487
263
|
}
|
488
|
-
// TODO(apolcyn): only do this check if fork support is enabled
|
489
|
-
rb_mutex_lock(rb_ivar_get(self, id_channel_recreation_mu));
|
490
|
-
grpc_rb_channel_maybe_recreate_channel_after_fork(
|
491
|
-
wrapper, rb_ivar_get(self, id_target));
|
492
|
-
rb_mutex_unlock(rb_ivar_get(self, id_channel_recreation_mu));
|
493
|
-
|
494
264
|
cq = grpc_completion_queue_create_for_pluck(NULL);
|
495
265
|
method_slice =
|
496
266
|
grpc_slice_from_copied_buffer(RSTRING_PTR(method), RSTRING_LEN(method));
|
497
|
-
call = grpc_channel_create_call(wrapper->
|
498
|
-
|
267
|
+
call = grpc_channel_create_call(wrapper->channel, parent_call, flags, cq,
|
268
|
+
method_slice, host_slice_ptr,
|
499
269
|
grpc_rb_time_timeval(deadline,
|
500
270
|
/* absolute time */ 0),
|
501
271
|
NULL);
|
502
|
-
|
503
272
|
if (call == NULL) {
|
504
273
|
tmp_str = grpc_slice_to_c_string(method_slice);
|
505
274
|
rb_raise(rb_eRuntimeError, "cannot create call with method %s", tmp_str);
|
506
275
|
return Qnil;
|
507
276
|
}
|
508
|
-
|
509
277
|
grpc_slice_unref(method_slice);
|
510
278
|
if (host_slice_ptr != NULL) {
|
511
279
|
grpc_slice_unref(host_slice);
|
512
280
|
}
|
513
|
-
|
514
281
|
res = grpc_rb_wrap_call(call, cq);
|
515
|
-
|
516
282
|
/* Make this channel an instance attribute of the call so that it is not GCed
|
517
283
|
* before the call. */
|
518
284
|
rb_ivar_set(res, id_channel, self);
|
@@ -524,14 +290,11 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
|
|
524
290
|
* this */
|
525
291
|
static VALUE grpc_rb_channel_destroy(VALUE self) {
|
526
292
|
grpc_rb_channel* wrapper = NULL;
|
527
|
-
|
528
293
|
TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
|
529
|
-
if (wrapper->
|
530
|
-
|
531
|
-
|
532
|
-
wrapper->bg_wrapped = NULL;
|
294
|
+
if (wrapper->channel != NULL) {
|
295
|
+
grpc_channel_destroy(wrapper->channel);
|
296
|
+
wrapper->channel = NULL;
|
533
297
|
}
|
534
|
-
|
535
298
|
return Qnil;
|
536
299
|
}
|
537
300
|
|
@@ -540,271 +303,13 @@ static VALUE grpc_rb_channel_get_target(VALUE self) {
|
|
540
303
|
grpc_rb_channel* wrapper = NULL;
|
541
304
|
VALUE res = Qnil;
|
542
305
|
char* target = NULL;
|
543
|
-
|
544
306
|
TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
|
545
|
-
target = grpc_channel_get_target(wrapper->
|
307
|
+
target = grpc_channel_get_target(wrapper->channel);
|
546
308
|
res = rb_str_new2(target);
|
547
309
|
gpr_free(target);
|
548
|
-
|
549
310
|
return res;
|
550
311
|
}
|
551
312
|
|
552
|
-
/* Needs to be called under global_connection_polling_mu */
|
553
|
-
static int bg_watched_channel_list_lookup(bg_watched_channel* target) {
|
554
|
-
bg_watched_channel* cur = bg_watched_channel_list_head;
|
555
|
-
|
556
|
-
while (cur != NULL) {
|
557
|
-
if (cur == target) {
|
558
|
-
return 1;
|
559
|
-
}
|
560
|
-
cur = cur->next;
|
561
|
-
}
|
562
|
-
|
563
|
-
return 0;
|
564
|
-
}
|
565
|
-
|
566
|
-
/* Needs to be called under global_connection_polling_mu */
|
567
|
-
static bg_watched_channel* bg_watched_channel_list_create_and_add(
|
568
|
-
grpc_channel* channel) {
|
569
|
-
bg_watched_channel* watched = gpr_zalloc(sizeof(bg_watched_channel));
|
570
|
-
|
571
|
-
watched->channel = channel;
|
572
|
-
watched->next = bg_watched_channel_list_head;
|
573
|
-
watched->refcount = 1;
|
574
|
-
bg_watched_channel_list_head = watched;
|
575
|
-
return watched;
|
576
|
-
}
|
577
|
-
|
578
|
-
/* Needs to be called under global_connection_polling_mu */
|
579
|
-
static void bg_watched_channel_list_free_and_remove(
|
580
|
-
bg_watched_channel* target) {
|
581
|
-
bg_watched_channel* bg = NULL;
|
582
|
-
|
583
|
-
GRPC_RUBY_ASSERT(bg_watched_channel_list_lookup(target));
|
584
|
-
GRPC_RUBY_ASSERT(target->channel_destroyed && target->refcount == 0);
|
585
|
-
if (bg_watched_channel_list_head == target) {
|
586
|
-
bg_watched_channel_list_head = target->next;
|
587
|
-
gpr_free(target);
|
588
|
-
return;
|
589
|
-
}
|
590
|
-
bg = bg_watched_channel_list_head;
|
591
|
-
while (bg != NULL && bg->next != NULL) {
|
592
|
-
if (bg->next == target) {
|
593
|
-
bg->next = bg->next->next;
|
594
|
-
gpr_free(target);
|
595
|
-
return;
|
596
|
-
}
|
597
|
-
bg = bg->next;
|
598
|
-
}
|
599
|
-
GRPC_RUBY_ASSERT(0);
|
600
|
-
}
|
601
|
-
|
602
|
-
/* Initialize a grpc_rb_channel's "protected grpc_channel" and try to push
|
603
|
-
* it onto the background thread for constant watches. */
|
604
|
-
static void* channel_init_try_register_connection_polling_without_gil(
|
605
|
-
void* arg) {
|
606
|
-
channel_init_try_register_stack* stack =
|
607
|
-
(channel_init_try_register_stack*)arg;
|
608
|
-
|
609
|
-
gpr_mu_lock(&global_connection_polling_mu);
|
610
|
-
stack->wrapper->bg_wrapped =
|
611
|
-
bg_watched_channel_list_create_and_add(stack->channel);
|
612
|
-
grpc_rb_channel_try_register_connection_polling(stack->wrapper->bg_wrapped);
|
613
|
-
gpr_mu_unlock(&global_connection_polling_mu);
|
614
|
-
return NULL;
|
615
|
-
}
|
616
|
-
|
617
|
-
// Needs to be called under global_connection_poolling_mu
|
618
|
-
static void grpc_rb_channel_try_register_connection_polling(
|
619
|
-
bg_watched_channel* bg) {
|
620
|
-
grpc_connectivity_state conn_state;
|
621
|
-
watch_state_op* op = NULL;
|
622
|
-
if (bg->refcount == 0) {
|
623
|
-
GRPC_RUBY_ASSERT(bg->channel_destroyed);
|
624
|
-
bg_watched_channel_list_free_and_remove(bg);
|
625
|
-
return;
|
626
|
-
}
|
627
|
-
GRPC_RUBY_ASSERT(bg->refcount == 1);
|
628
|
-
if (bg->channel_destroyed || g_abort_channel_polling) {
|
629
|
-
return;
|
630
|
-
}
|
631
|
-
conn_state = grpc_channel_check_connectivity_state(bg->channel, 0);
|
632
|
-
if (conn_state == GRPC_CHANNEL_SHUTDOWN) {
|
633
|
-
return;
|
634
|
-
}
|
635
|
-
GRPC_RUBY_ASSERT(bg_watched_channel_list_lookup(bg));
|
636
|
-
// prevent bg from being free'd by GC while background thread is watching it
|
637
|
-
bg->refcount++;
|
638
|
-
op = gpr_zalloc(sizeof(watch_state_op));
|
639
|
-
op->op_type = CONTINUOUS_WATCH;
|
640
|
-
op->op.continuous_watch_callback_args.bg = bg;
|
641
|
-
grpc_channel_watch_connectivity_state(bg->channel, conn_state,
|
642
|
-
gpr_inf_future(GPR_CLOCK_REALTIME),
|
643
|
-
g_channel_polling_cq, op);
|
644
|
-
}
|
645
|
-
|
646
|
-
// Note this loop breaks out with a single call of
|
647
|
-
// "run_poll_channels_loop_no_gil".
|
648
|
-
// This assumes that a ruby call the unblocking func
|
649
|
-
// indicates process shutdown.
|
650
|
-
// In the worst case, this stops polling channel connectivity
|
651
|
-
// early and falls back to current behavior.
|
652
|
-
static void* run_poll_channels_loop_no_gil(void* arg) {
|
653
|
-
grpc_event event;
|
654
|
-
watch_state_op* op = NULL;
|
655
|
-
bg_watched_channel* bg = NULL;
|
656
|
-
(void)arg;
|
657
|
-
grpc_absl_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - begin");
|
658
|
-
|
659
|
-
gpr_mu_lock(&global_connection_polling_mu);
|
660
|
-
gpr_cv_broadcast(&global_connection_polling_cv);
|
661
|
-
gpr_mu_unlock(&global_connection_polling_mu);
|
662
|
-
|
663
|
-
for (;;) {
|
664
|
-
event = grpc_completion_queue_next(
|
665
|
-
g_channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
|
666
|
-
if (event.type == GRPC_QUEUE_SHUTDOWN) {
|
667
|
-
break;
|
668
|
-
}
|
669
|
-
gpr_mu_lock(&global_connection_polling_mu);
|
670
|
-
if (event.type == GRPC_OP_COMPLETE) {
|
671
|
-
op = (watch_state_op*)event.tag;
|
672
|
-
if (op->op_type == CONTINUOUS_WATCH) {
|
673
|
-
bg = (bg_watched_channel*)op->op.continuous_watch_callback_args.bg;
|
674
|
-
bg->refcount--;
|
675
|
-
grpc_rb_channel_try_register_connection_polling(bg);
|
676
|
-
gpr_free(op);
|
677
|
-
} else if (op->op_type == WATCH_STATE_API) {
|
678
|
-
grpc_rb_channel_watch_connection_state_op_complete(
|
679
|
-
(watch_state_op*)event.tag, event.success);
|
680
|
-
} else {
|
681
|
-
GRPC_RUBY_ASSERT(0);
|
682
|
-
}
|
683
|
-
}
|
684
|
-
gpr_mu_unlock(&global_connection_polling_mu);
|
685
|
-
}
|
686
|
-
grpc_completion_queue_destroy(g_channel_polling_cq);
|
687
|
-
grpc_absl_log(
|
688
|
-
GPR_DEBUG,
|
689
|
-
"GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling "
|
690
|
-
"loop");
|
691
|
-
return NULL;
|
692
|
-
}
|
693
|
-
|
694
|
-
static void run_poll_channels_loop_unblocking_func(void* arg) {
|
695
|
-
run_poll_channels_loop_unblocking_func_wrapper(arg);
|
696
|
-
}
|
697
|
-
|
698
|
-
// Notify the channel polling loop to cleanup and shutdown.
|
699
|
-
static void* run_poll_channels_loop_unblocking_func_wrapper(void* arg) {
|
700
|
-
bg_watched_channel* bg = NULL;
|
701
|
-
(void)arg;
|
702
|
-
|
703
|
-
gpr_mu_lock(&global_connection_polling_mu);
|
704
|
-
grpc_absl_log(
|
705
|
-
GPR_DEBUG,
|
706
|
-
"GRPC_RUBY: run_poll_channels_loop_unblocking_func - begin aborting "
|
707
|
-
"connection polling");
|
708
|
-
// early out after first time through
|
709
|
-
if (g_abort_channel_polling) {
|
710
|
-
gpr_mu_unlock(&global_connection_polling_mu);
|
711
|
-
return NULL;
|
712
|
-
}
|
713
|
-
g_abort_channel_polling = 1;
|
714
|
-
|
715
|
-
// force pending watches to end by switching to shutdown state
|
716
|
-
bg = bg_watched_channel_list_head;
|
717
|
-
while (bg != NULL) {
|
718
|
-
if (!bg->channel_destroyed) {
|
719
|
-
grpc_channel_destroy(bg->channel);
|
720
|
-
bg->channel_destroyed = 1;
|
721
|
-
}
|
722
|
-
bg = bg->next;
|
723
|
-
}
|
724
|
-
|
725
|
-
grpc_absl_log_int(
|
726
|
-
GPR_DEBUG,
|
727
|
-
"GRPC_RUBY: cq shutdown on global polling cq. pid: ", getpid());
|
728
|
-
grpc_completion_queue_shutdown(g_channel_polling_cq);
|
729
|
-
gpr_cv_broadcast(&global_connection_polling_cv);
|
730
|
-
gpr_mu_unlock(&global_connection_polling_mu);
|
731
|
-
grpc_absl_log(
|
732
|
-
GPR_DEBUG,
|
733
|
-
"GRPC_RUBY: run_poll_channels_loop_unblocking_func - end aborting "
|
734
|
-
"connection polling");
|
735
|
-
return NULL;
|
736
|
-
}
|
737
|
-
|
738
|
-
// Poll channel connectivity states in background thread without the GIL.
|
739
|
-
static VALUE run_poll_channels_loop(void* arg) {
|
740
|
-
(void)arg;
|
741
|
-
grpc_absl_log(
|
742
|
-
GPR_DEBUG,
|
743
|
-
"GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
|
744
|
-
rb_thread_call_without_gvl(run_poll_channels_loop_no_gil, NULL,
|
745
|
-
run_poll_channels_loop_unblocking_func, NULL);
|
746
|
-
return Qnil;
|
747
|
-
}
|
748
|
-
|
749
|
-
static void* set_abort_channel_polling_without_gil(void* arg) {
|
750
|
-
(void)arg;
|
751
|
-
gpr_mu_lock(&global_connection_polling_mu);
|
752
|
-
g_abort_channel_polling = 1;
|
753
|
-
gpr_cv_broadcast(&global_connection_polling_cv);
|
754
|
-
gpr_mu_unlock(&global_connection_polling_mu);
|
755
|
-
return NULL;
|
756
|
-
}
|
757
|
-
|
758
|
-
static void do_basic_init() {
|
759
|
-
gpr_mu_init(&global_connection_polling_mu);
|
760
|
-
gpr_cv_init(&global_connection_polling_cv);
|
761
|
-
}
|
762
|
-
|
763
|
-
/* Temporary fix for
|
764
|
-
* https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
|
765
|
-
* Transports in idle channels can get destroyed. Normally c-core re-connects,
|
766
|
-
* but in grpc-ruby core never gets a thread until an RPC is made, because ruby
|
767
|
-
* only calls c-core's "completion_queu_pluck" API.
|
768
|
-
* This uses a global background thread that calls
|
769
|
-
* "completion_queue_next" on registered "watch_channel_connectivity_state"
|
770
|
-
* calls - so that c-core can reconnect if needed, when there aren't any RPC's.
|
771
|
-
* TODO(apolcyn) remove this when core handles new RPCs on dead connections.
|
772
|
-
*/
|
773
|
-
void grpc_rb_channel_polling_thread_start() {
|
774
|
-
gpr_once_init(&g_once_init, do_basic_init);
|
775
|
-
GRPC_RUBY_ASSERT(!RTEST(g_channel_polling_thread));
|
776
|
-
GRPC_RUBY_ASSERT(!g_abort_channel_polling);
|
777
|
-
GRPC_RUBY_ASSERT(g_channel_polling_cq == NULL);
|
778
|
-
|
779
|
-
g_channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
|
780
|
-
g_channel_polling_thread = rb_thread_create(run_poll_channels_loop, NULL);
|
781
|
-
|
782
|
-
if (!RTEST(g_channel_polling_thread)) {
|
783
|
-
grpc_absl_log(GPR_ERROR,
|
784
|
-
"GRPC_RUBY: failed to spawn channel polling thread");
|
785
|
-
rb_thread_call_without_gvl(set_abort_channel_polling_without_gil, NULL,
|
786
|
-
NULL, NULL);
|
787
|
-
return;
|
788
|
-
}
|
789
|
-
}
|
790
|
-
|
791
|
-
void grpc_rb_channel_polling_thread_stop() {
|
792
|
-
if (!RTEST(g_channel_polling_thread)) {
|
793
|
-
grpc_absl_log(
|
794
|
-
GPR_ERROR,
|
795
|
-
"GRPC_RUBY: channel polling thread stop: thread was not started");
|
796
|
-
return;
|
797
|
-
}
|
798
|
-
rb_thread_call_without_gvl(run_poll_channels_loop_unblocking_func_wrapper,
|
799
|
-
NULL, NULL, NULL);
|
800
|
-
rb_funcall(g_channel_polling_thread, rb_intern("join"), 0);
|
801
|
-
// state associated with the channel polling thread is destroyed, reset so
|
802
|
-
// we can start again later
|
803
|
-
g_channel_polling_thread = Qnil;
|
804
|
-
g_abort_channel_polling = false;
|
805
|
-
g_channel_polling_cq = NULL;
|
806
|
-
}
|
807
|
-
|
808
313
|
static void Init_grpc_propagate_masks() {
|
809
314
|
/* Constants representing call propagation masks in grpc.h */
|
810
315
|
VALUE grpc_rb_mPropagateMasks =
|
@@ -838,7 +343,6 @@ static void Init_grpc_connectivity_states() {
|
|
838
343
|
}
|
839
344
|
|
840
345
|
void Init_grpc_channel() {
|
841
|
-
rb_global_variable(&g_channel_polling_thread);
|
842
346
|
grpc_rb_cChannelArgs = rb_define_class("TmpChannelArgs", rb_cObject);
|
843
347
|
rb_undef_alloc_func(grpc_rb_cChannelArgs);
|
844
348
|
grpc_rb_cChannel =
|
@@ -883,5 +387,5 @@ void Init_grpc_channel() {
|
|
883
387
|
grpc_channel* grpc_rb_get_wrapped_channel(VALUE v) {
|
884
388
|
grpc_rb_channel* wrapper = NULL;
|
885
389
|
TypedData_Get_Struct(v, grpc_rb_channel, &grpc_channel_data_type, wrapper);
|
886
|
-
return wrapper->
|
390
|
+
return wrapper->channel;
|
887
391
|
}
|