grpc 1.73.0-x86-mingw32 → 1.74.0.pre2-x86-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0f009ac043665076f62192b00b239723381b8dc646373ac9314b71ae70ec7f5d
4
- data.tar.gz: 75a045bb329e1455d5ab433f0491669aa86eb3c2396609e6f5ea7a83e1b484c1
3
+ metadata.gz: e4ad97eb1656a9b7c064853b84ccf19f3744d809d09e2877dec7ab863d49a28b
4
+ data.tar.gz: 9d4bdc96e9061c35020824ea9a7ed56c0aa3e88d7e8d11f9da3607446771bdbc
5
5
  SHA512:
6
- metadata.gz: eac250764c78bf7f4f1c3616c7667d17993276ddf350144aaceaecd4179bb0fe083f9788de41d4dec6cb1a088a324c7b82d691efda79e12d5eae33bcd5c6e255
7
- data.tar.gz: 8ff2716597bc52dbb245e1b2439472ec25e53c818567237fa504ee7edae2e5de823a35f57e12ab1aeb8b75a6db86cbdb950dfc05612b4204c2ffc9c743c5f8e4
6
+ metadata.gz: 9b11b71e422079f1ba6d2e0d2302510dee943d4ef51e890660468b67bfa756a74d0a69ea69d15bceaab44bdf38fa5a44fbef5105ce265c94b81463ca72802b3f
7
+ data.tar.gz: b725f9dfebd0aba5c0b8dcd6f74c1bc18468892e0a2e0509565bd85d4f2d48f3b79aab461de49a384bebcc3c29d8bdb1650f9cc3f60588e3396c822001512806
Binary file
@@ -114,6 +114,8 @@ env_append 'CPPFLAGS', '-DGRPC_XDS_USER_AGENT_NAME_SUFFIX="\"RUBY\""'
114
114
  require_relative '../../lib/grpc/version'
115
115
  env_append 'CPPFLAGS', '-DGRPC_XDS_USER_AGENT_VERSION_SUFFIX="\"' + GRPC::VERSION + '\""'
116
116
  env_append 'CPPFLAGS', '-DGRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK=1'
117
+ env_append 'CPPFLAGS', '-DGRPC_ENABLE_FORK_SUPPORT=1'
118
+ env_append 'CPPFLAGS', '-DGRPC_ENABLE_FORK_SUPPORT_DEFAULT=false'
117
119
 
118
120
  output_dir = File.expand_path(RbConfig::CONFIG['topdir'])
119
121
  grpc_lib_dir = File.join(output_dir, 'libs', grpc_config)
@@ -808,12 +808,6 @@ struct call_run_batch_args {
808
808
  run_batch_stack* st;
809
809
  };
810
810
 
811
- static void cancel_call_unblock_func(void* arg) {
812
- grpc_absl_log(GPR_DEBUG, "GRPC_RUBY: cancel_call_unblock_func");
813
- grpc_call* call = (grpc_call*)arg;
814
- grpc_call_cancel(call, NULL);
815
- }
816
-
817
811
  static VALUE grpc_rb_call_run_batch_try(VALUE value_args) {
818
812
  grpc_rb_fork_unsafe_begin();
819
813
  struct call_run_batch_args* args = (struct call_run_batch_args*)value_args;
@@ -836,8 +830,7 @@ static VALUE grpc_rb_call_run_batch_try(VALUE value_args) {
836
830
  grpc_call_error_detail_of(err), err);
837
831
  }
838
832
  ev = rb_completion_queue_pluck(args->call->queue, tag,
839
- gpr_inf_future(GPR_CLOCK_REALTIME),
840
- cancel_call_unblock_func, args->call->wrapped);
833
+ gpr_inf_future(GPR_CLOCK_REALTIME), "call op");
841
834
  if (!ev.success) {
842
835
  rb_raise(grpc_rb_eCallError, "call#run_batch failed somehow");
843
836
  }
@@ -59,137 +59,30 @@ static VALUE grpc_rb_cChannel = Qnil;
59
59
  /* Used during the conversion of a hash to channel args during channel setup */
60
60
  static VALUE grpc_rb_cChannelArgs;
61
61
 
62
- typedef struct bg_watched_channel {
63
- grpc_channel* channel;
64
- // these fields must only be accessed under global_connection_polling_mu
65
- struct bg_watched_channel* next;
66
- int channel_destroyed;
67
- int refcount;
68
- } bg_watched_channel;
69
-
70
62
  /* grpc_rb_channel wraps a grpc_channel. */
71
63
  typedef struct grpc_rb_channel {
72
- VALUE credentials;
73
- grpc_channel_args args;
74
- /* The actual channel (protected in a wrapper to tell when it's safe to
75
- * destroy) */
76
- bg_watched_channel* bg_wrapped;
77
- } grpc_rb_channel;
78
-
79
- typedef enum { CONTINUOUS_WATCH, WATCH_STATE_API } watch_state_op_type;
80
-
81
- typedef struct watch_state_op {
82
- watch_state_op_type op_type;
83
- // from event.success
84
- union {
85
- struct {
86
- int success;
87
- // has been called back due to a cq next call
88
- int called_back;
89
- } api_callback_args;
90
- struct {
91
- bg_watched_channel* bg;
92
- } continuous_watch_callback_args;
93
- } op;
94
- } watch_state_op;
95
-
96
- static bg_watched_channel* bg_watched_channel_list_head = NULL;
97
-
98
- static void grpc_rb_channel_try_register_connection_polling(
99
- bg_watched_channel* bg);
100
- static void* channel_init_try_register_connection_polling_without_gil(
101
- void* arg);
102
-
103
- typedef struct channel_init_try_register_stack {
104
64
  grpc_channel* channel;
105
- grpc_rb_channel* wrapper;
106
- } channel_init_try_register_stack;
107
-
108
- static grpc_completion_queue* g_channel_polling_cq;
109
- static gpr_mu global_connection_polling_mu;
110
- static gpr_cv global_connection_polling_cv;
111
- static int g_abort_channel_polling = 0;
112
- static gpr_once g_once_init = GPR_ONCE_INIT;
113
- static VALUE g_channel_polling_thread = Qnil;
114
-
115
- static int bg_watched_channel_list_lookup(bg_watched_channel* bg);
116
- static bg_watched_channel* bg_watched_channel_list_create_and_add(
117
- grpc_channel* channel);
118
- static void bg_watched_channel_list_free_and_remove(bg_watched_channel* bg);
119
- static void run_poll_channels_loop_unblocking_func(void* arg);
120
- static void* run_poll_channels_loop_unblocking_func_wrapper(void* arg);
121
-
122
- // Needs to be called under global_connection_polling_mu
123
- static void grpc_rb_channel_watch_connection_state_op_complete(
124
- watch_state_op* op, int success) {
125
- GRPC_RUBY_ASSERT(!op->op.api_callback_args.called_back);
126
- op->op.api_callback_args.called_back = 1;
127
- op->op.api_callback_args.success = success;
128
- // wake up the watch API call that's waiting on this op
129
- gpr_cv_broadcast(&global_connection_polling_cv);
130
- }
131
-
132
- /* Avoids destroying a channel twice. */
133
- static void grpc_rb_channel_safe_destroy(bg_watched_channel* bg) {
134
- gpr_mu_lock(&global_connection_polling_mu);
135
- GRPC_RUBY_ASSERT(bg_watched_channel_list_lookup(bg));
136
- if (!bg->channel_destroyed) {
137
- grpc_channel_destroy(bg->channel);
138
- bg->channel_destroyed = 1;
139
- }
140
- bg->refcount--;
141
- if (bg->refcount == 0) {
142
- bg_watched_channel_list_free_and_remove(bg);
143
- }
144
- gpr_mu_unlock(&global_connection_polling_mu);
145
- }
146
-
147
- static void* channel_safe_destroy_without_gil(void* arg) {
148
- grpc_rb_channel_safe_destroy((bg_watched_channel*)arg);
149
- return NULL;
150
- }
65
+ } grpc_rb_channel;
151
66
 
152
- static void grpc_rb_channel_free_internal(void* p) {
153
- grpc_rb_channel* ch = NULL;
67
+ static void grpc_rb_channel_free(void* p) {
154
68
  if (p == NULL) {
155
69
  return;
156
70
  };
157
- ch = (grpc_rb_channel*)p;
158
- if (ch->bg_wrapped != NULL) {
159
- /* assumption made here: it's ok to directly gpr_mu_lock the global
160
- * connection polling mutex because we're in a finalizer,
161
- * and we can count on this thread to not be interrupted or
162
- * yield the gil. */
163
- grpc_rb_channel_safe_destroy(ch->bg_wrapped);
164
- grpc_rb_channel_args_destroy(&ch->args);
71
+ grpc_rb_channel* wrapper = (grpc_rb_channel*)p;
72
+ if (wrapper->channel != NULL) {
73
+ grpc_channel_destroy(wrapper->channel);
74
+ wrapper->channel = NULL;
165
75
  }
166
76
  xfree(p);
167
77
  }
168
78
 
169
- /* Destroys Channel instances. */
170
- static void grpc_rb_channel_free(void* p) { grpc_rb_channel_free_internal(p); }
171
-
172
- /* Protects the mark object from GC */
173
- static void grpc_rb_channel_mark(void* p) {
174
- grpc_rb_channel* channel = NULL;
175
- if (p == NULL) {
176
- return;
177
- }
178
- channel = (grpc_rb_channel*)p;
179
- if (channel->credentials != Qnil) {
180
- rb_gc_mark(channel->credentials);
181
- }
182
- }
183
-
184
- static rb_data_type_t grpc_channel_data_type = {"grpc_channel",
185
- {grpc_rb_channel_mark,
186
- grpc_rb_channel_free,
187
- GRPC_RB_MEMSIZE_UNAVAILABLE,
188
- {NULL, NULL}},
189
- NULL,
190
- NULL,
79
+ static rb_data_type_t grpc_channel_data_type = {
80
+ "grpc_channel",
81
+ {NULL, grpc_rb_channel_free, GRPC_RB_MEMSIZE_UNAVAILABLE, {NULL, NULL}},
82
+ NULL,
83
+ NULL,
191
84
  #ifdef RUBY_TYPED_FREE_IMMEDIATELY
192
- RUBY_TYPED_FREE_IMMEDIATELY
85
+ RUBY_TYPED_FREE_IMMEDIATELY
193
86
  #endif
194
87
  };
195
88
 
@@ -197,9 +90,7 @@ static rb_data_type_t grpc_channel_data_type = {"grpc_channel",
197
90
  static VALUE grpc_rb_channel_alloc(VALUE cls) {
198
91
  grpc_ruby_init();
199
92
  grpc_rb_channel* wrapper = ALLOC(grpc_rb_channel);
200
- wrapper->bg_wrapped = NULL;
201
- wrapper->credentials = Qnil;
202
- MEMZERO(&wrapper->args, grpc_channel_args, 1);
93
+ wrapper->channel = NULL;
203
94
  return TypedData_Wrap_Struct(cls, &grpc_channel_data_type, wrapper);
204
95
  }
205
96
 
@@ -212,53 +103,45 @@ static VALUE grpc_rb_channel_alloc(VALUE cls) {
212
103
 
213
104
  Creates channel instances. */
214
105
  static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
215
- VALUE channel_args = Qnil;
216
- VALUE credentials = Qnil;
106
+ VALUE rb_channel_args = Qnil;
107
+ VALUE rb_credentials = Qnil;
217
108
  VALUE target = Qnil;
218
109
  grpc_rb_channel* wrapper = NULL;
219
- grpc_channel* ch = NULL;
220
- grpc_channel_credentials* creds = NULL;
221
110
  char* target_chars = NULL;
222
- channel_init_try_register_stack stack;
223
-
224
111
  grpc_ruby_fork_guard();
225
112
  /* "3" == 3 mandatory args */
226
- rb_scan_args(argc, argv, "3", &target, &channel_args, &credentials);
227
-
113
+ rb_scan_args(argc, argv, "3", &target, &rb_channel_args, &rb_credentials);
228
114
  TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
229
115
  target_chars = StringValueCStr(target);
230
- grpc_rb_hash_convert_to_channel_args(channel_args, &wrapper->args);
231
- if (TYPE(credentials) == T_SYMBOL) {
232
- if (id_insecure_channel != SYM2ID(credentials)) {
116
+ grpc_channel_args channel_args;
117
+ memset(&channel_args, 0, sizeof(channel_args));
118
+ grpc_rb_hash_convert_to_channel_args(rb_channel_args, &channel_args);
119
+ if (TYPE(rb_credentials) == T_SYMBOL) {
120
+ if (id_insecure_channel != SYM2ID(rb_credentials)) {
233
121
  rb_raise(rb_eTypeError,
234
122
  "bad creds symbol, want :this_channel_is_insecure");
235
123
  return Qnil;
236
124
  }
237
125
  grpc_channel_credentials* insecure_creds =
238
126
  grpc_insecure_credentials_create();
239
- ch = grpc_channel_create(target_chars, insecure_creds, &wrapper->args);
127
+ wrapper->channel =
128
+ grpc_channel_create(target_chars, insecure_creds, &channel_args);
240
129
  grpc_channel_credentials_release(insecure_creds);
241
130
  } else {
242
- wrapper->credentials = credentials;
243
- if (grpc_rb_is_channel_credentials(credentials)) {
244
- creds = grpc_rb_get_wrapped_channel_credentials(credentials);
245
- } else if (grpc_rb_is_xds_channel_credentials(credentials)) {
246
- creds = grpc_rb_get_wrapped_xds_channel_credentials(credentials);
131
+ grpc_channel_credentials* creds;
132
+ if (grpc_rb_is_channel_credentials(rb_credentials)) {
133
+ creds = grpc_rb_get_wrapped_channel_credentials(rb_credentials);
134
+ } else if (grpc_rb_is_xds_channel_credentials(rb_credentials)) {
135
+ creds = grpc_rb_get_wrapped_xds_channel_credentials(rb_credentials);
247
136
  } else {
248
137
  rb_raise(rb_eTypeError,
249
138
  "bad creds, want ChannelCredentials or XdsChannelCredentials");
250
139
  return Qnil;
251
140
  }
252
- ch = grpc_channel_create(target_chars, creds, &wrapper->args);
141
+ wrapper->channel = grpc_channel_create(target_chars, creds, &channel_args);
253
142
  }
254
-
255
- GRPC_RUBY_ASSERT(ch);
256
- stack.channel = ch;
257
- stack.wrapper = wrapper;
258
- rb_thread_call_without_gvl(
259
- channel_init_try_register_connection_polling_without_gil, &stack, NULL,
260
- NULL);
261
- if (ch == NULL) {
143
+ grpc_rb_channel_args_destroy(&channel_args);
144
+ if (wrapper->channel == NULL) {
262
145
  rb_raise(rb_eRuntimeError, "could not create an rpc channel to target:%s",
263
146
  target_chars);
264
147
  return Qnil;
@@ -268,27 +151,6 @@ static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
268
151
  return self;
269
152
  }
270
153
 
271
- typedef struct get_state_stack {
272
- bg_watched_channel* bg;
273
- int try_to_connect;
274
- int out;
275
- } get_state_stack;
276
-
277
- static void* get_state_without_gil(void* arg) {
278
- get_state_stack* stack = (get_state_stack*)arg;
279
-
280
- gpr_mu_lock(&global_connection_polling_mu);
281
- if (stack->bg->channel_destroyed) {
282
- stack->out = GRPC_CHANNEL_SHUTDOWN;
283
- } else {
284
- stack->out = grpc_channel_check_connectivity_state(stack->bg->channel,
285
- stack->try_to_connect);
286
- }
287
- gpr_mu_unlock(&global_connection_polling_mu);
288
-
289
- return NULL;
290
- }
291
-
292
154
  /*
293
155
  call-seq:
294
156
  ch.connectivity_state -> state
@@ -302,68 +164,17 @@ static VALUE grpc_rb_channel_get_connectivity_state(int argc, VALUE* argv,
302
164
  VALUE self) {
303
165
  VALUE try_to_connect_param = Qfalse;
304
166
  grpc_rb_channel* wrapper = NULL;
305
- get_state_stack stack;
306
-
307
167
  /* "01" == 0 mandatory args, 1 (try_to_connect) is optional */
308
168
  rb_scan_args(argc, argv, "01", &try_to_connect_param);
309
-
310
169
  TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
311
- if (wrapper->bg_wrapped == NULL) {
170
+ if (wrapper->channel == NULL) {
312
171
  rb_raise(rb_eRuntimeError, "closed!");
313
172
  return Qnil;
314
173
  }
315
-
316
- stack.bg = wrapper->bg_wrapped;
317
- stack.try_to_connect = RTEST(try_to_connect_param) ? 1 : 0;
318
- rb_thread_call_without_gvl(get_state_without_gil, &stack, NULL, NULL);
319
-
320
- return LONG2NUM(stack.out);
321
- }
322
-
323
- typedef struct watch_state_stack {
324
- bg_watched_channel* bg_wrapped;
325
- gpr_timespec deadline;
326
- int last_state;
327
- } watch_state_stack;
328
-
329
- static void* wait_for_watch_state_op_complete_without_gvl(void* arg) {
330
- watch_state_stack* stack = (watch_state_stack*)arg;
331
- watch_state_op* op = NULL;
332
- void* success = (void*)0;
333
-
334
- gpr_mu_lock(&global_connection_polling_mu);
335
- // it's unsafe to do a "watch" after "channel polling abort" because the cq
336
- // has been shut down.
337
- if (g_abort_channel_polling || stack->bg_wrapped->channel_destroyed) {
338
- gpr_mu_unlock(&global_connection_polling_mu);
339
- return (void*)0;
340
- }
341
- op = gpr_zalloc(sizeof(watch_state_op));
342
- op->op_type = WATCH_STATE_API;
343
- grpc_channel_watch_connectivity_state(stack->bg_wrapped->channel,
344
- stack->last_state, stack->deadline,
345
- g_channel_polling_cq, op);
346
-
347
- while (!op->op.api_callback_args.called_back) {
348
- gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
349
- gpr_inf_future(GPR_CLOCK_REALTIME));
350
- }
351
- if (op->op.api_callback_args.success) {
352
- success = (void*)1;
353
- }
354
- gpr_free(op);
355
- gpr_mu_unlock(&global_connection_polling_mu);
356
-
357
- return success;
358
- }
359
- static void wait_for_watch_state_op_complete_unblocking_func(void* arg) {
360
- bg_watched_channel* bg = (bg_watched_channel*)arg;
361
- gpr_mu_lock(&global_connection_polling_mu);
362
- if (!bg->channel_destroyed) {
363
- grpc_channel_destroy(bg->channel);
364
- bg->channel_destroyed = 1;
365
- }
366
- gpr_mu_unlock(&global_connection_polling_mu);
174
+ bool try_to_connect = RTEST(try_to_connect_param) ? true : false;
175
+ int state =
176
+ grpc_channel_check_connectivity_state(wrapper->channel, try_to_connect);
177
+ return LONG2NUM(state);
367
178
  }
368
179
 
369
180
  /* Wait until the channel's connectivity state becomes different from
@@ -375,79 +186,46 @@ static void wait_for_watch_state_op_complete_unblocking_func(void* arg) {
375
186
  * */
376
187
  static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
377
188
  VALUE last_state,
378
- VALUE deadline) {
189
+ VALUE rb_deadline) {
379
190
  grpc_rb_channel* wrapper = NULL;
380
- watch_state_stack stack;
381
- void* op_success = 0;
382
-
383
191
  grpc_ruby_fork_guard();
384
192
  TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
385
-
386
- if (wrapper->bg_wrapped == NULL) {
193
+ if (wrapper->channel == NULL) {
387
194
  rb_raise(rb_eRuntimeError, "closed!");
388
195
  return Qnil;
389
196
  }
390
-
391
197
  if (!FIXNUM_P(last_state)) {
392
198
  rb_raise(
393
199
  rb_eTypeError,
394
200
  "bad type for last_state. want a GRPC::Core::ChannelState constant");
395
201
  return Qnil;
396
202
  }
397
-
398
- stack.bg_wrapped = wrapper->bg_wrapped;
399
- stack.deadline = grpc_rb_time_timeval(deadline, 0),
400
- stack.last_state = NUM2LONG(last_state);
401
-
402
- op_success = rb_thread_call_without_gvl(
403
- wait_for_watch_state_op_complete_without_gvl, &stack,
404
- wait_for_watch_state_op_complete_unblocking_func, wrapper->bg_wrapped);
405
-
406
- return op_success ? Qtrue : Qfalse;
407
- }
408
-
409
- static void grpc_rb_channel_maybe_recreate_channel_after_fork(
410
- grpc_rb_channel* wrapper, VALUE target) {
411
- // TODO(apolcyn): maybe check if fork support is enabled here.
412
- // The only way we can get bg->channel_destroyed without bg itself being
413
- // NULL is if we destroyed the channel during GRPC::prefork.
414
- bg_watched_channel* bg = wrapper->bg_wrapped;
415
- if (bg->channel_destroyed) {
416
- // There must be one ref at this point, held by the ruby-level channel
417
- // object, drop this one here.
418
- GRPC_RUBY_ASSERT(bg->refcount == 1);
419
- rb_thread_call_without_gvl(channel_safe_destroy_without_gil, bg, NULL,
420
- NULL);
421
- // re-create C-core channel
422
- const char* target_str = StringValueCStr(target);
423
- grpc_channel* channel;
424
- if (wrapper->credentials == Qnil) {
425
- grpc_channel_credentials* insecure_creds =
426
- grpc_insecure_credentials_create();
427
- channel = grpc_channel_create(target_str, insecure_creds, &wrapper->args);
428
- grpc_channel_credentials_release(insecure_creds);
429
- } else {
430
- grpc_channel_credentials* creds;
431
- if (grpc_rb_is_channel_credentials(wrapper->credentials)) {
432
- creds = grpc_rb_get_wrapped_channel_credentials(wrapper->credentials);
433
- } else if (grpc_rb_is_xds_channel_credentials(wrapper->credentials)) {
434
- creds =
435
- grpc_rb_get_wrapped_xds_channel_credentials(wrapper->credentials);
436
- } else {
437
- rb_raise(rb_eTypeError,
438
- "failed to re-create channel after fork: bad creds, want "
439
- "ChannelCredentials or XdsChannelCredentials");
440
- return;
441
- }
442
- channel = grpc_channel_create(target_str, creds, &wrapper->args);
443
- }
444
- // re-register with channel polling thread
445
- channel_init_try_register_stack stack;
446
- stack.channel = channel;
447
- stack.wrapper = wrapper;
448
- rb_thread_call_without_gvl(
449
- channel_init_try_register_connection_polling_without_gil, &stack, NULL,
450
- NULL);
203
+ const void* tag = &wrapper;
204
+ gpr_timespec deadline = grpc_rb_time_timeval(rb_deadline, 0);
205
+ grpc_completion_queue* cq = grpc_completion_queue_create_for_pluck(NULL);
206
+ grpc_channel_watch_connectivity_state(wrapper->channel, NUM2LONG(last_state),
207
+ deadline, cq, tag);
208
+ grpc_event event =
209
+ rb_completion_queue_pluck(cq, tag, gpr_inf_future(GPR_CLOCK_REALTIME),
210
+ "grpc_channel_watch_connectivity_state");
211
+ // TODO(apolcyn): this CQ would leak if the thread were killed
212
+ // while polling queue_pluck, e.g. with Thread#kill. One fix may be
213
+ // to make this CQ owned by the channel object. Another fix could be to
214
+ // busy-poll watch_connectivity_state with a short deadline, without
215
+ // the GIL, rather than just polling CQ pluck, and destroy the CQ
216
+ // before exitting the no-GIL block.
217
+ grpc_completion_queue_shutdown(cq);
218
+ grpc_rb_completion_queue_destroy(cq);
219
+ if (event.type == GRPC_OP_COMPLETE) {
220
+ return Qtrue;
221
+ } else if (event.type == GRPC_QUEUE_TIMEOUT) {
222
+ return Qfalse;
223
+ } else {
224
+ grpc_absl_log_int(
225
+ GPR_ERROR,
226
+ "GRPC_RUBY: unexpected grpc_channel_watch_connectivity_state result:",
227
+ event.type);
228
+ return Qfalse;
451
229
  }
452
230
  }
453
231
 
@@ -466,7 +244,6 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
466
244
  grpc_slice host_slice;
467
245
  grpc_slice* host_slice_ptr = NULL;
468
246
  char* tmp_str = NULL;
469
-
470
247
  grpc_ruby_fork_guard();
471
248
  if (host != Qnil) {
472
249
  host_slice =
@@ -479,40 +256,29 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
479
256
  if (parent != Qnil) {
480
257
  parent_call = grpc_rb_get_wrapped_call(parent);
481
258
  }
482
-
483
259
  TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
484
- if (wrapper->bg_wrapped == NULL) {
260
+ if (wrapper->channel == NULL) {
485
261
  rb_raise(rb_eRuntimeError, "closed!");
486
262
  return Qnil;
487
263
  }
488
- // TODO(apolcyn): only do this check if fork support is enabled
489
- rb_mutex_lock(rb_ivar_get(self, id_channel_recreation_mu));
490
- grpc_rb_channel_maybe_recreate_channel_after_fork(
491
- wrapper, rb_ivar_get(self, id_target));
492
- rb_mutex_unlock(rb_ivar_get(self, id_channel_recreation_mu));
493
-
494
264
  cq = grpc_completion_queue_create_for_pluck(NULL);
495
265
  method_slice =
496
266
  grpc_slice_from_copied_buffer(RSTRING_PTR(method), RSTRING_LEN(method));
497
- call = grpc_channel_create_call(wrapper->bg_wrapped->channel, parent_call,
498
- flags, cq, method_slice, host_slice_ptr,
267
+ call = grpc_channel_create_call(wrapper->channel, parent_call, flags, cq,
268
+ method_slice, host_slice_ptr,
499
269
  grpc_rb_time_timeval(deadline,
500
270
  /* absolute time */ 0),
501
271
  NULL);
502
-
503
272
  if (call == NULL) {
504
273
  tmp_str = grpc_slice_to_c_string(method_slice);
505
274
  rb_raise(rb_eRuntimeError, "cannot create call with method %s", tmp_str);
506
275
  return Qnil;
507
276
  }
508
-
509
277
  grpc_slice_unref(method_slice);
510
278
  if (host_slice_ptr != NULL) {
511
279
  grpc_slice_unref(host_slice);
512
280
  }
513
-
514
281
  res = grpc_rb_wrap_call(call, cq);
515
-
516
282
  /* Make this channel an instance attribute of the call so that it is not GCed
517
283
  * before the call. */
518
284
  rb_ivar_set(res, id_channel, self);
@@ -524,14 +290,11 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
524
290
  * this */
525
291
  static VALUE grpc_rb_channel_destroy(VALUE self) {
526
292
  grpc_rb_channel* wrapper = NULL;
527
-
528
293
  TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
529
- if (wrapper->bg_wrapped != NULL) {
530
- rb_thread_call_without_gvl(channel_safe_destroy_without_gil,
531
- wrapper->bg_wrapped, NULL, NULL);
532
- wrapper->bg_wrapped = NULL;
294
+ if (wrapper->channel != NULL) {
295
+ grpc_channel_destroy(wrapper->channel);
296
+ wrapper->channel = NULL;
533
297
  }
534
-
535
298
  return Qnil;
536
299
  }
537
300
 
@@ -540,271 +303,13 @@ static VALUE grpc_rb_channel_get_target(VALUE self) {
540
303
  grpc_rb_channel* wrapper = NULL;
541
304
  VALUE res = Qnil;
542
305
  char* target = NULL;
543
-
544
306
  TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
545
- target = grpc_channel_get_target(wrapper->bg_wrapped->channel);
307
+ target = grpc_channel_get_target(wrapper->channel);
546
308
  res = rb_str_new2(target);
547
309
  gpr_free(target);
548
-
549
310
  return res;
550
311
  }
551
312
 
552
- /* Needs to be called under global_connection_polling_mu */
553
- static int bg_watched_channel_list_lookup(bg_watched_channel* target) {
554
- bg_watched_channel* cur = bg_watched_channel_list_head;
555
-
556
- while (cur != NULL) {
557
- if (cur == target) {
558
- return 1;
559
- }
560
- cur = cur->next;
561
- }
562
-
563
- return 0;
564
- }
565
-
566
- /* Needs to be called under global_connection_polling_mu */
567
- static bg_watched_channel* bg_watched_channel_list_create_and_add(
568
- grpc_channel* channel) {
569
- bg_watched_channel* watched = gpr_zalloc(sizeof(bg_watched_channel));
570
-
571
- watched->channel = channel;
572
- watched->next = bg_watched_channel_list_head;
573
- watched->refcount = 1;
574
- bg_watched_channel_list_head = watched;
575
- return watched;
576
- }
577
-
578
- /* Needs to be called under global_connection_polling_mu */
579
- static void bg_watched_channel_list_free_and_remove(
580
- bg_watched_channel* target) {
581
- bg_watched_channel* bg = NULL;
582
-
583
- GRPC_RUBY_ASSERT(bg_watched_channel_list_lookup(target));
584
- GRPC_RUBY_ASSERT(target->channel_destroyed && target->refcount == 0);
585
- if (bg_watched_channel_list_head == target) {
586
- bg_watched_channel_list_head = target->next;
587
- gpr_free(target);
588
- return;
589
- }
590
- bg = bg_watched_channel_list_head;
591
- while (bg != NULL && bg->next != NULL) {
592
- if (bg->next == target) {
593
- bg->next = bg->next->next;
594
- gpr_free(target);
595
- return;
596
- }
597
- bg = bg->next;
598
- }
599
- GRPC_RUBY_ASSERT(0);
600
- }
601
-
602
- /* Initialize a grpc_rb_channel's "protected grpc_channel" and try to push
603
- * it onto the background thread for constant watches. */
604
- static void* channel_init_try_register_connection_polling_without_gil(
605
- void* arg) {
606
- channel_init_try_register_stack* stack =
607
- (channel_init_try_register_stack*)arg;
608
-
609
- gpr_mu_lock(&global_connection_polling_mu);
610
- stack->wrapper->bg_wrapped =
611
- bg_watched_channel_list_create_and_add(stack->channel);
612
- grpc_rb_channel_try_register_connection_polling(stack->wrapper->bg_wrapped);
613
- gpr_mu_unlock(&global_connection_polling_mu);
614
- return NULL;
615
- }
616
-
617
- // Needs to be called under global_connection_poolling_mu
618
- static void grpc_rb_channel_try_register_connection_polling(
619
- bg_watched_channel* bg) {
620
- grpc_connectivity_state conn_state;
621
- watch_state_op* op = NULL;
622
- if (bg->refcount == 0) {
623
- GRPC_RUBY_ASSERT(bg->channel_destroyed);
624
- bg_watched_channel_list_free_and_remove(bg);
625
- return;
626
- }
627
- GRPC_RUBY_ASSERT(bg->refcount == 1);
628
- if (bg->channel_destroyed || g_abort_channel_polling) {
629
- return;
630
- }
631
- conn_state = grpc_channel_check_connectivity_state(bg->channel, 0);
632
- if (conn_state == GRPC_CHANNEL_SHUTDOWN) {
633
- return;
634
- }
635
- GRPC_RUBY_ASSERT(bg_watched_channel_list_lookup(bg));
636
- // prevent bg from being free'd by GC while background thread is watching it
637
- bg->refcount++;
638
- op = gpr_zalloc(sizeof(watch_state_op));
639
- op->op_type = CONTINUOUS_WATCH;
640
- op->op.continuous_watch_callback_args.bg = bg;
641
- grpc_channel_watch_connectivity_state(bg->channel, conn_state,
642
- gpr_inf_future(GPR_CLOCK_REALTIME),
643
- g_channel_polling_cq, op);
644
- }
645
-
646
- // Note this loop breaks out with a single call of
647
- // "run_poll_channels_loop_no_gil".
648
- // This assumes that a ruby call the unblocking func
649
- // indicates process shutdown.
650
- // In the worst case, this stops polling channel connectivity
651
- // early and falls back to current behavior.
652
- static void* run_poll_channels_loop_no_gil(void* arg) {
653
- grpc_event event;
654
- watch_state_op* op = NULL;
655
- bg_watched_channel* bg = NULL;
656
- (void)arg;
657
- grpc_absl_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - begin");
658
-
659
- gpr_mu_lock(&global_connection_polling_mu);
660
- gpr_cv_broadcast(&global_connection_polling_cv);
661
- gpr_mu_unlock(&global_connection_polling_mu);
662
-
663
- for (;;) {
664
- event = grpc_completion_queue_next(
665
- g_channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
666
- if (event.type == GRPC_QUEUE_SHUTDOWN) {
667
- break;
668
- }
669
- gpr_mu_lock(&global_connection_polling_mu);
670
- if (event.type == GRPC_OP_COMPLETE) {
671
- op = (watch_state_op*)event.tag;
672
- if (op->op_type == CONTINUOUS_WATCH) {
673
- bg = (bg_watched_channel*)op->op.continuous_watch_callback_args.bg;
674
- bg->refcount--;
675
- grpc_rb_channel_try_register_connection_polling(bg);
676
- gpr_free(op);
677
- } else if (op->op_type == WATCH_STATE_API) {
678
- grpc_rb_channel_watch_connection_state_op_complete(
679
- (watch_state_op*)event.tag, event.success);
680
- } else {
681
- GRPC_RUBY_ASSERT(0);
682
- }
683
- }
684
- gpr_mu_unlock(&global_connection_polling_mu);
685
- }
686
- grpc_completion_queue_destroy(g_channel_polling_cq);
687
- grpc_absl_log(
688
- GPR_DEBUG,
689
- "GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling "
690
- "loop");
691
- return NULL;
692
- }
693
-
694
- static void run_poll_channels_loop_unblocking_func(void* arg) {
695
- run_poll_channels_loop_unblocking_func_wrapper(arg);
696
- }
697
-
698
- // Notify the channel polling loop to cleanup and shutdown.
699
- static void* run_poll_channels_loop_unblocking_func_wrapper(void* arg) {
700
- bg_watched_channel* bg = NULL;
701
- (void)arg;
702
-
703
- gpr_mu_lock(&global_connection_polling_mu);
704
- grpc_absl_log(
705
- GPR_DEBUG,
706
- "GRPC_RUBY: run_poll_channels_loop_unblocking_func - begin aborting "
707
- "connection polling");
708
- // early out after first time through
709
- if (g_abort_channel_polling) {
710
- gpr_mu_unlock(&global_connection_polling_mu);
711
- return NULL;
712
- }
713
- g_abort_channel_polling = 1;
714
-
715
- // force pending watches to end by switching to shutdown state
716
- bg = bg_watched_channel_list_head;
717
- while (bg != NULL) {
718
- if (!bg->channel_destroyed) {
719
- grpc_channel_destroy(bg->channel);
720
- bg->channel_destroyed = 1;
721
- }
722
- bg = bg->next;
723
- }
724
-
725
- grpc_absl_log_int(
726
- GPR_DEBUG,
727
- "GRPC_RUBY: cq shutdown on global polling cq. pid: ", getpid());
728
- grpc_completion_queue_shutdown(g_channel_polling_cq);
729
- gpr_cv_broadcast(&global_connection_polling_cv);
730
- gpr_mu_unlock(&global_connection_polling_mu);
731
- grpc_absl_log(
732
- GPR_DEBUG,
733
- "GRPC_RUBY: run_poll_channels_loop_unblocking_func - end aborting "
734
- "connection polling");
735
- return NULL;
736
- }
737
-
738
- // Poll channel connectivity states in background thread without the GIL.
739
- static VALUE run_poll_channels_loop(void* arg) {
740
- (void)arg;
741
- grpc_absl_log(
742
- GPR_DEBUG,
743
- "GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
744
- rb_thread_call_without_gvl(run_poll_channels_loop_no_gil, NULL,
745
- run_poll_channels_loop_unblocking_func, NULL);
746
- return Qnil;
747
- }
748
-
749
- static void* set_abort_channel_polling_without_gil(void* arg) {
750
- (void)arg;
751
- gpr_mu_lock(&global_connection_polling_mu);
752
- g_abort_channel_polling = 1;
753
- gpr_cv_broadcast(&global_connection_polling_cv);
754
- gpr_mu_unlock(&global_connection_polling_mu);
755
- return NULL;
756
- }
757
-
758
- static void do_basic_init() {
759
- gpr_mu_init(&global_connection_polling_mu);
760
- gpr_cv_init(&global_connection_polling_cv);
761
- }
762
-
763
- /* Temporary fix for
764
- * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
765
- * Transports in idle channels can get destroyed. Normally c-core re-connects,
766
- * but in grpc-ruby core never gets a thread until an RPC is made, because ruby
767
- * only calls c-core's "completion_queu_pluck" API.
768
- * This uses a global background thread that calls
769
- * "completion_queue_next" on registered "watch_channel_connectivity_state"
770
- * calls - so that c-core can reconnect if needed, when there aren't any RPC's.
771
- * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
772
- */
773
- void grpc_rb_channel_polling_thread_start() {
774
- gpr_once_init(&g_once_init, do_basic_init);
775
- GRPC_RUBY_ASSERT(!RTEST(g_channel_polling_thread));
776
- GRPC_RUBY_ASSERT(!g_abort_channel_polling);
777
- GRPC_RUBY_ASSERT(g_channel_polling_cq == NULL);
778
-
779
- g_channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
780
- g_channel_polling_thread = rb_thread_create(run_poll_channels_loop, NULL);
781
-
782
- if (!RTEST(g_channel_polling_thread)) {
783
- grpc_absl_log(GPR_ERROR,
784
- "GRPC_RUBY: failed to spawn channel polling thread");
785
- rb_thread_call_without_gvl(set_abort_channel_polling_without_gil, NULL,
786
- NULL, NULL);
787
- return;
788
- }
789
- }
790
-
791
- void grpc_rb_channel_polling_thread_stop() {
792
- if (!RTEST(g_channel_polling_thread)) {
793
- grpc_absl_log(
794
- GPR_ERROR,
795
- "GRPC_RUBY: channel polling thread stop: thread was not started");
796
- return;
797
- }
798
- rb_thread_call_without_gvl(run_poll_channels_loop_unblocking_func_wrapper,
799
- NULL, NULL, NULL);
800
- rb_funcall(g_channel_polling_thread, rb_intern("join"), 0);
801
- // state associated with the channel polling thread is destroyed, reset so
802
- // we can start again later
803
- g_channel_polling_thread = Qnil;
804
- g_abort_channel_polling = false;
805
- g_channel_polling_cq = NULL;
806
- }
807
-
808
313
  static void Init_grpc_propagate_masks() {
809
314
  /* Constants representing call propagation masks in grpc.h */
810
315
  VALUE grpc_rb_mPropagateMasks =
@@ -838,7 +343,6 @@ static void Init_grpc_connectivity_states() {
838
343
  }
839
344
 
840
345
  void Init_grpc_channel() {
841
- rb_global_variable(&g_channel_polling_thread);
842
346
  grpc_rb_cChannelArgs = rb_define_class("TmpChannelArgs", rb_cObject);
843
347
  rb_undef_alloc_func(grpc_rb_cChannelArgs);
844
348
  grpc_rb_cChannel =
@@ -883,5 +387,5 @@ void Init_grpc_channel() {
883
387
  grpc_channel* grpc_rb_get_wrapped_channel(VALUE v) {
884
388
  grpc_rb_channel* wrapper = NULL;
885
389
  TypedData_Get_Struct(v, grpc_rb_channel, &grpc_channel_data_type, wrapper);
886
- return wrapper->bg_wrapped->channel;
390
+ return wrapper->channel;
887
391
  }
@@ -26,9 +26,6 @@
26
26
  /* Initializes the Channel class. */
27
27
  void Init_grpc_channel();
28
28
 
29
- void grpc_rb_channel_polling_thread_start();
30
- void grpc_rb_channel_polling_thread_stop();
31
-
32
29
  /* Gets the wrapped channel from the ruby wrapper */
33
30
  grpc_channel* grpc_rb_get_wrapped_channel(VALUE v);
34
31
 
@@ -34,15 +34,22 @@ typedef struct next_call_stack {
34
34
  grpc_event event;
35
35
  gpr_timespec timeout;
36
36
  void* tag;
37
- void (*unblock_func)(void*);
38
- void* unblock_func_arg;
37
+ volatile int interrupted;
39
38
  } next_call_stack;
40
39
 
41
40
  /* Calls grpc_completion_queue_pluck without holding the ruby GIL */
42
41
  static void* grpc_rb_completion_queue_pluck_no_gil(void* param) {
43
42
  next_call_stack* const next_call = (next_call_stack*)param;
44
- next_call->event = grpc_completion_queue_pluck(next_call->cq, next_call->tag,
45
- next_call->timeout, NULL);
43
+ gpr_timespec increment = gpr_time_from_millis(200, GPR_TIMESPAN);
44
+ gpr_timespec deadline;
45
+ for (;;) {
46
+ deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), increment);
47
+ next_call->event = grpc_completion_queue_pluck(
48
+ next_call->cq, next_call->tag, deadline, NULL);
49
+ if (next_call->event.type != GRPC_QUEUE_TIMEOUT) break;
50
+ if (gpr_time_cmp(deadline, next_call->timeout) > 0) break;
51
+ if (next_call->interrupted) break;
52
+ }
46
53
  return NULL;
47
54
  }
48
55
 
@@ -56,28 +63,33 @@ void grpc_rb_completion_queue_destroy(grpc_completion_queue* cq) {
56
63
  grpc_completion_queue_destroy(cq);
57
64
  }
58
65
 
59
- static void outer_unblock_func(void* param) {
66
+ static void unblock_func(void* param) {
60
67
  next_call_stack* const next_call = (next_call_stack*)param;
61
- if (next_call->unblock_func == NULL) return;
62
- next_call->unblock_func(next_call->unblock_func_arg);
68
+ next_call->interrupted = 1;
63
69
  }
64
70
 
65
71
  /* Does the same thing as grpc_completion_queue_pluck, while properly releasing
66
72
  the GVL and handling interrupts */
67
73
  grpc_event rb_completion_queue_pluck(grpc_completion_queue* queue, void* tag,
68
74
  gpr_timespec deadline,
69
- void (*unblock_func)(void* param),
70
- void* unblock_func_arg) {
75
+ const char* reason) {
71
76
  next_call_stack next_call;
72
77
  MEMZERO(&next_call, next_call_stack, 1);
73
78
  next_call.cq = queue;
74
79
  next_call.timeout = deadline;
75
80
  next_call.tag = tag;
76
81
  next_call.event.type = GRPC_QUEUE_TIMEOUT;
77
- next_call.unblock_func = unblock_func;
78
- next_call.unblock_func_arg = unblock_func_arg;
79
- rb_thread_call_without_gvl(grpc_rb_completion_queue_pluck_no_gil,
80
- (void*)&next_call, outer_unblock_func,
81
- (void*)&next_call);
82
+ /* Loop until we finish a pluck without an interruption. See
83
+ * https://github.com/grpc/grpc/issues/38210 for an example of why
84
+ * this is necessary. */
85
+ grpc_absl_log_str(GPR_DEBUG, "CQ pluck loop begin: ", reason);
86
+ do {
87
+ next_call.interrupted = 0;
88
+ rb_thread_call_without_gvl(grpc_rb_completion_queue_pluck_no_gil,
89
+ (void*)&next_call, unblock_func,
90
+ (void*)&next_call);
91
+ if (next_call.event.type != GRPC_QUEUE_TIMEOUT) break;
92
+ } while (next_call.interrupted);
93
+ grpc_absl_log_str(GPR_DEBUG, "CQ pluck loop done: ", reason);
82
94
  return next_call.event;
83
95
  }
@@ -29,14 +29,8 @@ void grpc_rb_completion_queue_destroy(grpc_completion_queue* cq);
29
29
  * Makes the implementation of CompletionQueue#pluck available in other files
30
30
  *
31
31
  * This avoids having code that holds the GIL repeated at multiple sites.
32
- *
33
- * unblock_func is invoked with the provided argument to unblock the CQ
34
- * operation in the event of process termination (e.g. a signal), but
35
- * unblock_func may be NULL in which case it's unused.
36
32
  */
37
33
  grpc_event rb_completion_queue_pluck(grpc_completion_queue* queue, void* tag,
38
- gpr_timespec deadline,
39
- void (*unblock_func)(void* param),
40
- void* unblock_func_arg);
34
+ gpr_timespec deadline, const char* reason);
41
35
 
42
36
  #endif /* GRPC_RB_COMPLETION_QUEUE_H_ */
@@ -333,7 +333,6 @@ static void grpc_ruby_init_threads() {
333
333
  rb_mutex_lock(g_bg_thread_init_rb_mu);
334
334
  if (!g_bg_thread_init_done) {
335
335
  grpc_rb_event_queue_thread_start();
336
- grpc_rb_channel_polling_thread_start();
337
336
  g_bg_thread_init_done = true;
338
337
  }
339
338
  rb_mutex_unlock(g_bg_thread_init_rb_mu);
@@ -381,7 +380,7 @@ static VALUE grpc_rb_prefork(VALUE self) {
381
380
  rb_raise(rb_eRuntimeError,
382
381
  "GRPC.prefork and fork need to be called from the same thread "
383
382
  "that GRPC was initialized on (GRPC lazy-initializes when when "
384
- "the first GRPC object is created");
383
+ "the first GRPC object is created)");
385
384
  }
386
385
  if (g_grpc_rb_num_fork_unsafe_threads > 0) {
387
386
  rb_raise(
@@ -395,7 +394,6 @@ static VALUE grpc_rb_prefork(VALUE self) {
395
394
  g_grpc_rb_prefork_pending = true;
396
395
  rb_mutex_lock(g_bg_thread_init_rb_mu);
397
396
  if (g_bg_thread_init_done) {
398
- grpc_rb_channel_polling_thread_stop();
399
397
  grpc_rb_event_queue_thread_stop();
400
398
  // all ruby-level background threads joined at this point
401
399
  g_bg_thread_init_done = false;
@@ -449,9 +447,15 @@ void grpc_rb_fork_unsafe_begin() { g_grpc_rb_num_fork_unsafe_threads++; }
449
447
  void grpc_rb_fork_unsafe_end() { g_grpc_rb_num_fork_unsafe_threads--; }
450
448
 
451
449
  // APIs to mark fork-unsafe sections from ruby code
452
- static VALUE grpc_rb_fork_unsafe_begin_api() { grpc_rb_fork_unsafe_begin(); }
450
+ static VALUE grpc_rb_fork_unsafe_begin_api() {
451
+ grpc_rb_fork_unsafe_begin();
452
+ return Qnil;
453
+ }
453
454
 
454
- static VALUE grpc_rb_fork_unsafe_end_api() { grpc_rb_fork_unsafe_end(); }
455
+ static VALUE grpc_rb_fork_unsafe_end_api() {
456
+ grpc_rb_fork_unsafe_end();
457
+ return Qnil;
458
+ }
455
459
 
456
460
  // One-time initialization
457
461
  void Init_grpc_c() {
@@ -132,7 +132,7 @@ extern grpc_metadata_credentials_create_from_plugin_type grpc_metadata_credentia
132
132
  typedef void(*grpc_call_credentials_release_type)(grpc_call_credentials* creds);
133
133
  extern grpc_call_credentials_release_type grpc_call_credentials_release_import;
134
134
  #define grpc_call_credentials_release grpc_call_credentials_release_import
135
- typedef grpc_channel_credentials*(*grpc_google_default_credentials_create_type)(grpc_call_credentials* call_credentials);
135
+ typedef grpc_channel_credentials*(*grpc_google_default_credentials_create_type)(grpc_call_credentials* call_creds_for_tls, grpc_call_credentials* call_creds_for_alts);
136
136
  extern grpc_google_default_credentials_create_type grpc_google_default_credentials_create_import;
137
137
  #define grpc_google_default_credentials_create grpc_google_default_credentials_create_import
138
138
  typedef grpc_ssl_server_certificate_config*(*grpc_ssl_server_certificate_config_create_type)(const char* pem_root_certs, const grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs, size_t num_key_cert_pairs);
@@ -23,11 +23,7 @@
23
23
 
24
24
  int grpc_rb_load_core() {
25
25
  #if GPR_ARCH_64
26
- #if GRPC_RUBY_WINDOWS_UCRT
27
26
  TCHAR fname[] = _T("grpc_c.64-ucrt.ruby");
28
- #else
29
- TCHAR fname[] = _T("grpc_c.64-msvcrt.ruby");
30
- #endif
31
27
  #else
32
28
  TCHAR fname[] = _T("grpc_c.32-msvcrt.ruby");
33
29
  #endif
@@ -49,28 +49,32 @@ typedef struct grpc_rb_server {
49
49
  /* The actual server */
50
50
  grpc_server* wrapped;
51
51
  grpc_completion_queue* queue;
52
+ int shutdown_and_notify_done;
52
53
  int destroy_done;
53
54
  } grpc_rb_server;
54
55
 
55
- static void grpc_rb_server_shutdown_and_notify_internal(grpc_rb_server* server,
56
- gpr_timespec deadline) {
56
+ static void grpc_rb_server_maybe_shutdown_and_notify(grpc_rb_server* server,
57
+ gpr_timespec deadline) {
57
58
  grpc_event ev;
58
59
  void* tag = &ev;
59
- if (server->wrapped != NULL) {
60
- grpc_server_shutdown_and_notify(server->wrapped, server->queue, tag);
61
- // Following pluck calls will release the GIL and block but cannot
62
- // be interrupted. They should terminate quickly enough though b/c
63
- // we will cancel all server calls after the deadline.
64
- ev = rb_completion_queue_pluck(server->queue, tag, deadline, NULL, NULL);
65
- if (ev.type == GRPC_QUEUE_TIMEOUT) {
66
- grpc_server_cancel_all_calls(server->wrapped);
60
+ if (!server->shutdown_and_notify_done) {
61
+ server->shutdown_and_notify_done = 1;
62
+ if (server->wrapped != NULL) {
63
+ grpc_server_shutdown_and_notify(server->wrapped, server->queue, tag);
67
64
  ev = rb_completion_queue_pluck(
68
- server->queue, tag, gpr_inf_future(GPR_CLOCK_REALTIME), NULL, NULL);
69
- }
70
- if (ev.type != GRPC_OP_COMPLETE) {
71
- grpc_absl_log_int(
72
- GPR_DEBUG,
73
- "GRPC_RUBY: bad grpc_server_shutdown_and_notify result:", ev.type);
65
+ server->queue, tag, deadline,
66
+ "grpc_server_shutdown_and_notify first try");
67
+ if (ev.type == GRPC_QUEUE_TIMEOUT) {
68
+ grpc_server_cancel_all_calls(server->wrapped);
69
+ ev = rb_completion_queue_pluck(
70
+ server->queue, tag, gpr_inf_future(GPR_CLOCK_REALTIME),
71
+ "grpc_server_shutdown_and_notify second try");
72
+ }
73
+ if (ev.type != GRPC_OP_COMPLETE) {
74
+ grpc_absl_log_int(
75
+ GPR_DEBUG,
76
+ "GRPC_RUBY: bad grpc_server_shutdown_and_notify result: ", ev.type);
77
+ }
74
78
  }
75
79
  }
76
80
  }
@@ -89,19 +93,15 @@ static void grpc_rb_server_maybe_destroy(grpc_rb_server* server) {
89
93
  }
90
94
 
91
95
  static void grpc_rb_server_free_internal(void* p) {
92
- grpc_rb_server* svr = NULL;
93
- gpr_timespec deadline;
94
96
  if (p == NULL) {
95
97
  return;
96
98
  };
97
- svr = (grpc_rb_server*)p;
98
-
99
- deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
100
- gpr_time_from_seconds(2, GPR_TIMESPAN));
101
-
102
- grpc_rb_server_shutdown_and_notify_internal(svr, deadline);
103
- grpc_rb_server_maybe_destroy(svr);
104
-
99
+ grpc_rb_server* server = (grpc_rb_server*)p;
100
+ // Shutdown the server first if we haven't already
101
+ gpr_timespec deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
102
+ gpr_time_from_seconds(2, GPR_TIMESPAN));
103
+ grpc_rb_server_maybe_shutdown_and_notify(server, deadline);
104
+ grpc_rb_server_maybe_destroy(server);
105
105
  xfree(p);
106
106
  }
107
107
 
@@ -131,6 +131,7 @@ static VALUE grpc_rb_server_alloc(VALUE cls) {
131
131
  grpc_rb_server* wrapper = ALLOC(grpc_rb_server);
132
132
  wrapper->wrapped = NULL;
133
133
  wrapper->destroy_done = 0;
134
+ wrapper->shutdown_and_notify_done = 0;
134
135
  return TypedData_Wrap_Struct(cls, &grpc_rb_server_data_type, wrapper);
135
136
  }
136
137
 
@@ -190,26 +191,6 @@ struct server_request_call_args {
190
191
  request_call_stack st;
191
192
  };
192
193
 
193
- static void shutdown_server_unblock_func(void* arg) {
194
- grpc_rb_server* server = (grpc_rb_server*)arg;
195
- grpc_absl_log(GPR_DEBUG, "GRPC_RUBY: shutdown_server_unblock_func");
196
- GRPC_RUBY_ASSERT(server->wrapped != NULL);
197
- grpc_event event;
198
- void* tag = &event;
199
- grpc_server_shutdown_and_notify(server->wrapped, server->queue, tag);
200
- grpc_server_cancel_all_calls(server->wrapped);
201
- // Following call is blocking, but should finish quickly since we've
202
- // cancelled all calls.
203
- event = grpc_completion_queue_pluck(server->queue, tag,
204
- gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
205
- grpc_absl_log_int(
206
- GPR_DEBUG,
207
- "GRPC_RUBY: shutdown_server_unblock_func pluck event.type: ", event.type);
208
- grpc_absl_log_int(
209
- GPR_DEBUG,
210
- "GRPC_RUBY: shutdown_server_unblock_func event.success: ", event.success);
211
- }
212
-
213
194
  static VALUE grpc_rb_server_request_call_try(VALUE value_args) {
214
195
  grpc_rb_fork_unsafe_begin();
215
196
  struct server_request_call_args* args =
@@ -232,9 +213,9 @@ static VALUE grpc_rb_server_request_call_try(VALUE value_args) {
232
213
  grpc_call_error_detail_of(err), err);
233
214
  }
234
215
 
235
- grpc_event ev = rb_completion_queue_pluck(
236
- args->server->queue, tag, gpr_inf_future(GPR_CLOCK_REALTIME),
237
- shutdown_server_unblock_func, args->server);
216
+ grpc_event ev = rb_completion_queue_pluck(args->server->queue, tag,
217
+ gpr_inf_future(GPR_CLOCK_REALTIME),
218
+ "server request call");
238
219
  if (!ev.success) {
239
220
  rb_raise(grpc_rb_eCallError, "request_call completion failed");
240
221
  }
@@ -307,7 +288,7 @@ static VALUE grpc_rb_server_shutdown_and_notify(VALUE self, VALUE timeout) {
307
288
  deadline = grpc_rb_time_timeval(timeout, /* absolute time*/ 0);
308
289
  }
309
290
 
310
- grpc_rb_server_shutdown_and_notify_internal(s, deadline);
291
+ grpc_rb_server_maybe_shutdown_and_notify(s, deadline);
311
292
 
312
293
  return Qnil;
313
294
  }
Binary file
Binary file
Binary file
Binary file
@@ -60,7 +60,7 @@ module GRPC
60
60
  # Minimally, a stub is created with the just the host of the gRPC service
61
61
  # it wishes to access, e.g.,
62
62
  #
63
- # my_stub = ClientStub.new(example.host.com:50505,
63
+ # my_stub = ClientStub.new("example.host.com:50505",
64
64
  # :this_channel_is_insecure)
65
65
  #
66
66
  # If a channel_override argument is passed, it will be used as the
@@ -72,7 +72,7 @@ module GRPC
72
72
  #
73
73
  # - :channel_override
74
74
  # when present, this must be a pre-created GRPC::Core::Channel. If it's
75
- # present the host and arbitrary keyword arg areignored, and the RPC
75
+ # present the host and arbitrary keyword args are ignored, and the RPC
76
76
  # connection uses this channel.
77
77
  #
78
78
  # - :timeout
@@ -118,11 +118,11 @@ module GRPC
118
118
  #
119
119
  # * it does not return until a response is received.
120
120
  #
121
- # * the requests is sent only when GRPC core's flow control allows it to
121
+ # * the request is sent only when GRPC core's flow control allows it to
122
122
  # be sent.
123
123
  #
124
124
  # == Errors ==
125
- # An RuntimeError is raised if
125
+ # A RuntimeError is raised if
126
126
  #
127
127
  # * the server responds with a non-OK status
128
128
  #
@@ -14,5 +14,5 @@
14
14
 
15
15
  # GRPC contains the General RPC module.
16
16
  module GRPC
17
- VERSION = '1.73.0'
17
+ VERSION = '1.74.0.pre2'
18
18
  end
@@ -0,0 +1,22 @@
1
+ # Copyright 2015 gRPC authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ require 'spec_helper'
16
+
17
+ describe GRPC::Core do
18
+ it 'returns valid VALUEs from C functions' do
19
+ expect(GRPC::Core.fork_unsafe_begin).to be_nil
20
+ expect(GRPC::Core.fork_unsafe_end).to be_nil
21
+ end
22
+ end
@@ -60,8 +60,8 @@ describe GRPC::ActiveCall do
60
60
 
61
61
  after(:each) do
62
62
  @server.shutdown_and_notify(deadline)
63
- @server.close
64
63
  @server_thread.join
64
+ @server.close
65
65
  # Don't rely on GC to unref the call, since that can prevent
66
66
  # the channel connectivity state polling thread from shutting down.
67
67
  @call.close
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: grpc
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.73.0
4
+ version: 1.74.0.pre2
5
5
  platform: x86-mingw32
6
6
  authors:
7
7
  - gRPC Authors
@@ -205,7 +205,6 @@ extra_rdoc_files: []
205
205
  files:
206
206
  - etc/roots.pem
207
207
  - grpc_c.32-msvcrt.ruby
208
- - grpc_c.64-msvcrt.ruby
209
208
  - grpc_c.64-ucrt.ruby
210
209
  - src/ruby/bin/math_client.rb
211
210
  - src/ruby/bin/math_pb.rb
@@ -254,7 +253,6 @@ files:
254
253
  - src/ruby/ext/grpc/rb_xds_server_credentials.c
255
254
  - src/ruby/ext/grpc/rb_xds_server_credentials.h
256
255
  - src/ruby/lib/grpc.rb
257
- - src/ruby/lib/grpc/3.0/grpc_c.so
258
256
  - src/ruby/lib/grpc/3.1/grpc_c.so
259
257
  - src/ruby/lib/grpc/3.2/grpc_c.so
260
258
  - src/ruby/lib/grpc/3.3/grpc_c.so
@@ -299,6 +297,7 @@ files:
299
297
  - src/ruby/spec/client_auth_spec.rb
300
298
  - src/ruby/spec/client_server_spec.rb
301
299
  - src/ruby/spec/compression_options_spec.rb
300
+ - src/ruby/spec/core_spec.rb
302
301
  - src/ruby/spec/debug_message_spec.rb
303
302
  - src/ruby/spec/error_sanity_spec.rb
304
303
  - src/ruby/spec/errors_spec.rb
@@ -348,7 +347,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
348
347
  requirements:
349
348
  - - ">="
350
349
  - !ruby/object:Gem::Version
351
- version: '3.0'
350
+ version: '3.1'
352
351
  - - "<"
353
352
  - !ruby/object:Gem::Version
354
353
  version: 3.5.dev
@@ -358,7 +357,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
358
357
  - !ruby/object:Gem::Version
359
358
  version: '0'
360
359
  requirements: []
361
- rubygems_version: 3.6.9
360
+ rubygems_version: 3.7.0
362
361
  specification_version: 4
363
362
  summary: GRPC system in Ruby
364
363
  test_files:
@@ -373,6 +372,7 @@ test_files:
373
372
  - src/ruby/spec/client_auth_spec.rb
374
373
  - src/ruby/spec/client_server_spec.rb
375
374
  - src/ruby/spec/compression_options_spec.rb
375
+ - src/ruby/spec/core_spec.rb
376
376
  - src/ruby/spec/debug_message_spec.rb
377
377
  - src/ruby/spec/error_sanity_spec.rb
378
378
  - src/ruby/spec/errors_spec.rb
File without changes
Binary file