grpc 1.56.2-x86-linux → 1.57.0-x86-linux

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -47,6 +47,9 @@ static ID id_channel;
47
47
  * GCed before the channel */
48
48
  static ID id_target;
49
49
 
50
+ /* hidden ivar that synchronizes post-fork channel re-creation */
51
+ static ID id_channel_recreation_mu;
52
+
50
53
  /* id_insecure_channel is used to indicate that a channel is insecure */
51
54
  static VALUE id_insecure_channel;
52
55
 
@@ -67,7 +70,7 @@ typedef struct bg_watched_channel {
67
70
  /* grpc_rb_channel wraps a grpc_channel. */
68
71
  typedef struct grpc_rb_channel {
69
72
  VALUE credentials;
70
-
73
+ grpc_channel_args args;
71
74
  /* The actual channel (protected in a wrapper to tell when it's safe to
72
75
  * destroy) */
73
76
  bg_watched_channel* bg_wrapped;
@@ -94,8 +97,6 @@ static bg_watched_channel* bg_watched_channel_list_head = NULL;
94
97
 
95
98
  static void grpc_rb_channel_try_register_connection_polling(
96
99
  bg_watched_channel* bg);
97
- static void* wait_until_channel_polling_thread_started_no_gil(void*);
98
- static void wait_until_channel_polling_thread_started_unblocking_func(void*);
99
100
  static void* channel_init_try_register_connection_polling_without_gil(
100
101
  void* arg);
101
102
 
@@ -104,11 +105,12 @@ typedef struct channel_init_try_register_stack {
104
105
  grpc_rb_channel* wrapper;
105
106
  } channel_init_try_register_stack;
106
107
 
107
- static grpc_completion_queue* channel_polling_cq;
108
+ static grpc_completion_queue* g_channel_polling_cq;
108
109
  static gpr_mu global_connection_polling_mu;
109
110
  static gpr_cv global_connection_polling_cv;
110
- static int abort_channel_polling = 0;
111
- static int channel_polling_thread_started = 0;
111
+ static int g_abort_channel_polling = 0;
112
+ static gpr_once g_once_init = GPR_ONCE_INIT;
113
+ static VALUE g_channel_polling_thread = Qnil;
112
114
 
113
115
  static int bg_watched_channel_list_lookup(bg_watched_channel* bg);
114
116
  static bg_watched_channel* bg_watched_channel_list_create_and_add(
@@ -158,16 +160,13 @@ static void grpc_rb_channel_free_internal(void* p) {
158
160
  * and we can count on this thread to not be interrupted or
159
161
  * yield the gil. */
160
162
  grpc_rb_channel_safe_destroy(ch->bg_wrapped);
161
- ch->bg_wrapped = NULL;
163
+ grpc_rb_channel_args_destroy(&ch->args);
162
164
  }
163
165
  xfree(p);
164
166
  }
165
167
 
166
168
  /* Destroys Channel instances. */
167
- static void grpc_rb_channel_free(void* p) {
168
- grpc_rb_channel_free_internal(p);
169
- grpc_ruby_shutdown();
170
- }
169
+ static void grpc_rb_channel_free(void* p) { grpc_rb_channel_free_internal(p); }
171
170
 
172
171
  /* Protects the mark object from GC */
173
172
  static void grpc_rb_channel_mark(void* p) {
@@ -199,6 +198,7 @@ static VALUE grpc_rb_channel_alloc(VALUE cls) {
199
198
  grpc_rb_channel* wrapper = ALLOC(grpc_rb_channel);
200
199
  wrapper->bg_wrapped = NULL;
201
200
  wrapper->credentials = Qnil;
201
+ MEMZERO(&wrapper->args, grpc_channel_args, 1);
202
202
  return TypedData_Wrap_Struct(cls, &grpc_channel_data_type, wrapper);
203
203
  }
204
204
 
@@ -218,24 +218,15 @@ static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
218
218
  grpc_channel* ch = NULL;
219
219
  grpc_channel_credentials* creds = NULL;
220
220
  char* target_chars = NULL;
221
- grpc_channel_args args;
222
221
  channel_init_try_register_stack stack;
223
- int stop_waiting_for_thread_start = 0;
224
- MEMZERO(&args, grpc_channel_args, 1);
225
222
 
226
223
  grpc_ruby_fork_guard();
227
- rb_thread_call_without_gvl(
228
- wait_until_channel_polling_thread_started_no_gil,
229
- &stop_waiting_for_thread_start,
230
- wait_until_channel_polling_thread_started_unblocking_func,
231
- &stop_waiting_for_thread_start);
232
-
233
224
  /* "3" == 3 mandatory args */
234
225
  rb_scan_args(argc, argv, "3", &target, &channel_args, &credentials);
235
226
 
236
227
  TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
237
228
  target_chars = StringValueCStr(target);
238
- grpc_rb_hash_convert_to_channel_args(channel_args, &args);
229
+ grpc_rb_hash_convert_to_channel_args(channel_args, &wrapper->args);
239
230
  if (TYPE(credentials) == T_SYMBOL) {
240
231
  if (id_insecure_channel != SYM2ID(credentials)) {
241
232
  rb_raise(rb_eTypeError,
@@ -244,7 +235,7 @@ static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
244
235
  }
245
236
  grpc_channel_credentials* insecure_creds =
246
237
  grpc_insecure_credentials_create();
247
- ch = grpc_channel_create(target_chars, insecure_creds, &args);
238
+ ch = grpc_channel_create(target_chars, insecure_creds, &wrapper->args);
248
239
  grpc_channel_credentials_release(insecure_creds);
249
240
  } else {
250
241
  wrapper->credentials = credentials;
@@ -257,7 +248,7 @@ static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
257
248
  "bad creds, want ChannelCredentials or XdsChannelCredentials");
258
249
  return Qnil;
259
250
  }
260
- ch = grpc_channel_create(target_chars, creds, &args);
251
+ ch = grpc_channel_create(target_chars, creds, &wrapper->args);
261
252
  }
262
253
 
263
254
  GPR_ASSERT(ch);
@@ -266,16 +257,13 @@ static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
266
257
  rb_thread_call_without_gvl(
267
258
  channel_init_try_register_connection_polling_without_gil, &stack, NULL,
268
259
  NULL);
269
-
270
- if (args.args != NULL) {
271
- xfree(args.args); /* Allocated by grpc_rb_hash_convert_to_channel_args */
272
- }
273
260
  if (ch == NULL) {
274
261
  rb_raise(rb_eRuntimeError, "could not create an rpc channel to target:%s",
275
262
  target_chars);
276
263
  return Qnil;
277
264
  }
278
265
  rb_ivar_set(self, id_target, target);
266
+ rb_ivar_set(self, id_channel_recreation_mu, rb_mutex_new());
279
267
  return self;
280
268
  }
281
269
 
@@ -289,7 +277,6 @@ static void* get_state_without_gil(void* arg) {
289
277
  get_state_stack* stack = (get_state_stack*)arg;
290
278
 
291
279
  gpr_mu_lock(&global_connection_polling_mu);
292
- GPR_ASSERT(abort_channel_polling || channel_polling_thread_started);
293
280
  if (stack->bg->channel_destroyed) {
294
281
  stack->out = GRPC_CHANNEL_SHUTDOWN;
295
282
  } else {
@@ -346,7 +333,7 @@ static void* wait_for_watch_state_op_complete_without_gvl(void* arg) {
346
333
  gpr_mu_lock(&global_connection_polling_mu);
347
334
  // it's unsafe to do a "watch" after "channel polling abort" because the cq
348
335
  // has been shut down.
349
- if (abort_channel_polling || stack->bg_wrapped->channel_destroyed) {
336
+ if (g_abort_channel_polling || stack->bg_wrapped->channel_destroyed) {
350
337
  gpr_mu_unlock(&global_connection_polling_mu);
351
338
  return (void*)0;
352
339
  }
@@ -354,7 +341,7 @@ static void* wait_for_watch_state_op_complete_without_gvl(void* arg) {
354
341
  op->op_type = WATCH_STATE_API;
355
342
  grpc_channel_watch_connectivity_state(stack->bg_wrapped->channel,
356
343
  stack->last_state, stack->deadline,
357
- channel_polling_cq, op);
344
+ g_channel_polling_cq, op);
358
345
 
359
346
  while (!op->op.api_callback_args.called_back) {
360
347
  gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
@@ -418,6 +405,51 @@ static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
418
405
  return op_success ? Qtrue : Qfalse;
419
406
  }
420
407
 
408
+ static void grpc_rb_channel_maybe_recreate_channel_after_fork(
409
+ grpc_rb_channel* wrapper, VALUE target) {
410
+ // TODO(apolcyn): maybe check if fork support is enabled here.
411
+ // The only way we can get bg->channel_destroyed without bg itself being
412
+ // NULL is if we destroyed the channel during GRPC::prefork.
413
+ bg_watched_channel* bg = wrapper->bg_wrapped;
414
+ if (bg->channel_destroyed) {
415
+ // There must be one ref at this point, held by the ruby-level channel
416
+ // object, drop this one here.
417
+ GPR_ASSERT(bg->refcount == 1);
418
+ rb_thread_call_without_gvl(channel_safe_destroy_without_gil, bg, NULL,
419
+ NULL);
420
+ // re-create C-core channel
421
+ const char* target_str = StringValueCStr(target);
422
+ grpc_channel* channel;
423
+ if (wrapper->credentials == Qnil) {
424
+ grpc_channel_credentials* insecure_creds =
425
+ grpc_insecure_credentials_create();
426
+ channel = grpc_channel_create(target_str, insecure_creds, &wrapper->args);
427
+ grpc_channel_credentials_release(insecure_creds);
428
+ } else {
429
+ grpc_channel_credentials* creds;
430
+ if (grpc_rb_is_channel_credentials(wrapper->credentials)) {
431
+ creds = grpc_rb_get_wrapped_channel_credentials(wrapper->credentials);
432
+ } else if (grpc_rb_is_xds_channel_credentials(wrapper->credentials)) {
433
+ creds =
434
+ grpc_rb_get_wrapped_xds_channel_credentials(wrapper->credentials);
435
+ } else {
436
+ rb_raise(rb_eTypeError,
437
+ "failed to re-create channel after fork: bad creds, want "
438
+ "ChannelCredentials or XdsChannelCredentials");
439
+ return;
440
+ }
441
+ channel = grpc_channel_create(target_str, creds, &wrapper->args);
442
+ }
443
+ // re-register with channel polling thread
444
+ channel_init_try_register_stack stack;
445
+ stack.channel = channel;
446
+ stack.wrapper = wrapper;
447
+ rb_thread_call_without_gvl(
448
+ channel_init_try_register_connection_polling_without_gil, &stack, NULL,
449
+ NULL);
450
+ }
451
+ }
452
+
421
453
  /* Create a call given a grpc_channel, in order to call method. The request
422
454
  is not sent until grpc_call_invoke is called. */
423
455
  static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
@@ -452,6 +484,11 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
452
484
  rb_raise(rb_eRuntimeError, "closed!");
453
485
  return Qnil;
454
486
  }
487
+ // TODO(apolcyn): only do this check if fork support is enabled
488
+ rb_mutex_lock(rb_ivar_get(self, id_channel_recreation_mu));
489
+ grpc_rb_channel_maybe_recreate_channel_after_fork(
490
+ wrapper, rb_ivar_get(self, id_target));
491
+ rb_mutex_unlock(rb_ivar_get(self, id_channel_recreation_mu));
455
492
 
456
493
  cq = grpc_completion_queue_create_for_pluck(NULL);
457
494
  method_slice =
@@ -581,19 +618,15 @@ static void grpc_rb_channel_try_register_connection_polling(
581
618
  bg_watched_channel* bg) {
582
619
  grpc_connectivity_state conn_state;
583
620
  watch_state_op* op = NULL;
584
-
585
- GPR_ASSERT(channel_polling_thread_started || abort_channel_polling);
586
-
587
621
  if (bg->refcount == 0) {
588
622
  GPR_ASSERT(bg->channel_destroyed);
589
623
  bg_watched_channel_list_free_and_remove(bg);
590
624
  return;
591
625
  }
592
626
  GPR_ASSERT(bg->refcount == 1);
593
- if (bg->channel_destroyed || abort_channel_polling) {
627
+ if (bg->channel_destroyed || g_abort_channel_polling) {
594
628
  return;
595
629
  }
596
-
597
630
  conn_state = grpc_channel_check_connectivity_state(bg->channel, 0);
598
631
  if (conn_state == GRPC_CHANNEL_SHUTDOWN) {
599
632
  return;
@@ -601,13 +634,12 @@ static void grpc_rb_channel_try_register_connection_polling(
601
634
  GPR_ASSERT(bg_watched_channel_list_lookup(bg));
602
635
  // prevent bg from being free'd by GC while background thread is watching it
603
636
  bg->refcount++;
604
-
605
637
  op = gpr_zalloc(sizeof(watch_state_op));
606
638
  op->op_type = CONTINUOUS_WATCH;
607
639
  op->op.continuous_watch_callback_args.bg = bg;
608
640
  grpc_channel_watch_connectivity_state(bg->channel, conn_state,
609
641
  gpr_inf_future(GPR_CLOCK_REALTIME),
610
- channel_polling_cq, op);
642
+ g_channel_polling_cq, op);
611
643
  }
612
644
 
613
645
  // Note this loop breaks out with a single call of
@@ -624,14 +656,12 @@ static void* run_poll_channels_loop_no_gil(void* arg) {
624
656
  gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - begin");
625
657
 
626
658
  gpr_mu_lock(&global_connection_polling_mu);
627
- GPR_ASSERT(!channel_polling_thread_started);
628
- channel_polling_thread_started = 1;
629
659
  gpr_cv_broadcast(&global_connection_polling_cv);
630
660
  gpr_mu_unlock(&global_connection_polling_mu);
631
661
 
632
662
  for (;;) {
633
663
  event = grpc_completion_queue_next(
634
- channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
664
+ g_channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
635
665
  if (event.type == GRPC_QUEUE_SHUTDOWN) {
636
666
  break;
637
667
  }
@@ -652,7 +682,7 @@ static void* run_poll_channels_loop_no_gil(void* arg) {
652
682
  }
653
683
  gpr_mu_unlock(&global_connection_polling_mu);
654
684
  }
655
- grpc_completion_queue_destroy(channel_polling_cq);
685
+ grpc_completion_queue_destroy(g_channel_polling_cq);
656
686
  gpr_log(GPR_DEBUG,
657
687
  "GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling "
658
688
  "loop");
@@ -669,11 +699,11 @@ static void run_poll_channels_loop_unblocking_func(void* arg) {
669
699
  "GRPC_RUBY: run_poll_channels_loop_unblocking_func - begin aborting "
670
700
  "connection polling");
671
701
  // early out after first time through
672
- if (abort_channel_polling) {
702
+ if (g_abort_channel_polling) {
673
703
  gpr_mu_unlock(&global_connection_polling_mu);
674
704
  return;
675
705
  }
676
- abort_channel_polling = 1;
706
+ g_abort_channel_polling = 1;
677
707
 
678
708
  // force pending watches to end by switching to shutdown state
679
709
  bg = bg_watched_channel_list_head;
@@ -685,7 +715,9 @@ static void run_poll_channels_loop_unblocking_func(void* arg) {
685
715
  bg = bg->next;
686
716
  }
687
717
 
688
- grpc_completion_queue_shutdown(channel_polling_cq);
718
+ gpr_log(GPR_DEBUG, "GRPC_RUBY: cq shutdown on global polling cq. pid: %d",
719
+ getpid());
720
+ grpc_completion_queue_shutdown(g_channel_polling_cq);
689
721
  gpr_cv_broadcast(&global_connection_polling_cv);
690
722
  gpr_mu_unlock(&global_connection_polling_mu);
691
723
  gpr_log(GPR_DEBUG,
@@ -699,47 +731,25 @@ static VALUE run_poll_channels_loop(VALUE arg) {
699
731
  gpr_log(
700
732
  GPR_DEBUG,
701
733
  "GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
702
- grpc_ruby_init();
703
734
  rb_thread_call_without_gvl(run_poll_channels_loop_no_gil, NULL,
704
735
  run_poll_channels_loop_unblocking_func, NULL);
705
- grpc_ruby_shutdown();
706
736
  return Qnil;
707
737
  }
708
738
 
709
- static void* wait_until_channel_polling_thread_started_no_gil(void* arg) {
710
- int* stop_waiting = (int*)arg;
711
- gpr_log(GPR_DEBUG, "GRPC_RUBY: wait for channel polling thread to start");
712
- gpr_mu_lock(&global_connection_polling_mu);
713
- while (!channel_polling_thread_started && !abort_channel_polling &&
714
- !*stop_waiting) {
715
- gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
716
- gpr_inf_future(GPR_CLOCK_REALTIME));
717
- }
718
- gpr_mu_unlock(&global_connection_polling_mu);
719
-
720
- return NULL;
721
- }
722
-
723
- static void wait_until_channel_polling_thread_started_unblocking_func(
724
- void* arg) {
725
- int* stop_waiting = (int*)arg;
726
- gpr_mu_lock(&global_connection_polling_mu);
727
- gpr_log(GPR_DEBUG,
728
- "GRPC_RUBY: interrupt wait for channel polling thread to start");
729
- *stop_waiting = 1;
730
- gpr_cv_broadcast(&global_connection_polling_cv);
731
- gpr_mu_unlock(&global_connection_polling_mu);
732
- }
733
-
734
739
  static void* set_abort_channel_polling_without_gil(void* arg) {
735
740
  (void)arg;
736
741
  gpr_mu_lock(&global_connection_polling_mu);
737
- abort_channel_polling = 1;
742
+ g_abort_channel_polling = 1;
738
743
  gpr_cv_broadcast(&global_connection_polling_cv);
739
744
  gpr_mu_unlock(&global_connection_polling_mu);
740
745
  return NULL;
741
746
  }
742
747
 
748
+ static void do_basic_init() {
749
+ gpr_mu_init(&global_connection_polling_mu);
750
+ gpr_cv_init(&global_connection_polling_cv);
751
+ }
752
+
743
753
  /* Temporary fix for
744
754
  * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
745
755
  * Transports in idle channels can get destroyed. Normally c-core re-connects,
@@ -751,23 +761,36 @@ static void* set_abort_channel_polling_without_gil(void* arg) {
751
761
  * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
752
762
  */
753
763
  void grpc_rb_channel_polling_thread_start() {
754
- VALUE background_thread = Qnil;
755
-
756
- GPR_ASSERT(!abort_channel_polling);
757
- GPR_ASSERT(!channel_polling_thread_started);
758
- GPR_ASSERT(channel_polling_cq == NULL);
759
-
760
- gpr_mu_init(&global_connection_polling_mu);
761
- gpr_cv_init(&global_connection_polling_cv);
764
+ gpr_once_init(&g_once_init, do_basic_init);
765
+ GPR_ASSERT(!RTEST(g_channel_polling_thread));
766
+ GPR_ASSERT(!g_abort_channel_polling);
767
+ GPR_ASSERT(g_channel_polling_cq == NULL);
762
768
 
763
- channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
764
- background_thread = rb_thread_create(run_poll_channels_loop, NULL);
769
+ g_channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
770
+ g_channel_polling_thread = rb_thread_create(run_poll_channels_loop, NULL);
765
771
 
766
- if (!RTEST(background_thread)) {
767
- gpr_log(GPR_DEBUG, "GRPC_RUBY: failed to spawn channel polling thread");
772
+ if (!RTEST(g_channel_polling_thread)) {
773
+ gpr_log(GPR_ERROR, "GRPC_RUBY: failed to spawn channel polling thread");
768
774
  rb_thread_call_without_gvl(set_abort_channel_polling_without_gil, NULL,
769
775
  NULL, NULL);
776
+ return;
777
+ }
778
+ }
779
+
780
+ void grpc_rb_channel_polling_thread_stop() {
781
+ if (!RTEST(g_channel_polling_thread)) {
782
+ gpr_log(GPR_ERROR,
783
+ "GRPC_RUBY: channel polling thread stop: thread was not started");
784
+ return;
770
785
  }
786
+ rb_thread_call_without_gvl(run_poll_channels_loop_unblocking_func, NULL, NULL,
787
+ NULL);
788
+ rb_funcall(g_channel_polling_thread, rb_intern("join"), 0);
789
+ // state associated with the channel polling thread is destroyed, reset so
790
+ // we can start again later
791
+ g_channel_polling_thread = Qnil;
792
+ g_abort_channel_polling = false;
793
+ g_channel_polling_cq = NULL;
771
794
  }
772
795
 
773
796
  static void Init_grpc_propagate_masks() {
@@ -803,6 +826,7 @@ static void Init_grpc_connectivity_states() {
803
826
  }
804
827
 
805
828
  void Init_grpc_channel() {
829
+ rb_global_variable(&g_channel_polling_thread);
806
830
  grpc_rb_cChannelArgs = rb_define_class("TmpChannelArgs", rb_cObject);
807
831
  rb_undef_alloc_func(grpc_rb_cChannelArgs);
808
832
  grpc_rb_cChannel =
@@ -829,6 +853,7 @@ void Init_grpc_channel() {
829
853
 
830
854
  id_channel = rb_intern("__channel");
831
855
  id_target = rb_intern("__target");
856
+ id_channel_recreation_mu = rb_intern("__channel_recreation_mu");
832
857
  rb_define_const(grpc_rb_cChannel, "SSL_TARGET",
833
858
  ID2SYM(rb_intern(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)));
834
859
  rb_define_const(grpc_rb_cChannel, "ENABLE_CENSUS",
@@ -27,6 +27,7 @@
27
27
  void Init_grpc_channel();
28
28
 
29
29
  void grpc_rb_channel_polling_thread_start();
30
+ void grpc_rb_channel_polling_thread_stop();
30
31
 
31
32
  /* Gets the wrapped channel from the ruby wrapper */
32
33
  grpc_channel* grpc_rb_get_wrapped_channel(VALUE v);
@@ -24,6 +24,7 @@
24
24
  #include "rb_grpc_imports.generated.h"
25
25
 
26
26
  #include <grpc/grpc.h>
27
+ #include <grpc/support/log.h>
27
28
 
28
29
  static rb_data_type_t grpc_rb_channel_args_data_type = {
29
30
  "grpc_channel_args",
@@ -73,13 +74,14 @@ static int grpc_rb_channel_create_in_process_add_args_hash_cb(VALUE key,
73
74
  case T_SYMBOL:
74
75
  args->args[args->num_args - 1].type = GRPC_ARG_STRING;
75
76
  args->args[args->num_args - 1].value.string =
76
- (char*)rb_id2name(SYM2ID(val));
77
+ strdup(rb_id2name(SYM2ID(val)));
77
78
  --args->num_args;
78
79
  return ST_CONTINUE;
79
80
 
80
81
  case T_STRING:
81
82
  args->args[args->num_args - 1].type = GRPC_ARG_STRING;
82
- args->args[args->num_args - 1].value.string = StringValueCStr(val);
83
+ args->args[args->num_args - 1].value.string =
84
+ strdup(StringValueCStr(val));
83
85
  --args->num_args;
84
86
  return ST_CONTINUE;
85
87
 
@@ -154,3 +156,15 @@ void grpc_rb_hash_convert_to_channel_args(VALUE src_hash,
154
156
  rb_jump_tag(status);
155
157
  }
156
158
  }
159
+
160
+ void grpc_rb_channel_args_destroy(grpc_channel_args* args) {
161
+ GPR_ASSERT(args != NULL);
162
+ if (args->args == NULL) return;
163
+ for (int i = 0; i < args->num_args; i++) {
164
+ if (args->args[i].type == GRPC_ARG_STRING) {
165
+ // we own string pointers, which were created with strdup
166
+ free(args->args[i].value.string);
167
+ }
168
+ }
169
+ xfree(args->args);
170
+ }
@@ -35,4 +35,8 @@
35
35
  void grpc_rb_hash_convert_to_channel_args(VALUE src_hash,
36
36
  grpc_channel_args* dst);
37
37
 
38
+ /* Destroys inner fields of args (does not deallocate the args pointer itself)
39
+ */
40
+ void grpc_rb_channel_args_destroy(grpc_channel_args* args);
41
+
38
42
  #endif /* GRPC_RB_CHANNEL_ARGS_H_ */
@@ -63,7 +63,6 @@ static void grpc_rb_channel_credentials_free_internal(void* p) {
63
63
  /* Destroys the credentials instances. */
64
64
  static void grpc_rb_channel_credentials_free(void* p) {
65
65
  grpc_rb_channel_credentials_free_internal(p);
66
- grpc_ruby_shutdown();
67
66
  }
68
67
 
69
68
  /* Protects the mark object from GC */
@@ -70,7 +70,6 @@ static void grpc_rb_compression_options_free_internal(void* p) {
70
70
  * wrapped grpc compression options. */
71
71
  static void grpc_rb_compression_options_free(void* p) {
72
72
  grpc_rb_compression_options_free_internal(p);
73
- grpc_ruby_shutdown();
74
73
  }
75
74
 
76
75
  /* Ruby recognized data type for the CompressionOptions class. */
@@ -51,6 +51,8 @@ typedef struct grpc_rb_event_queue {
51
51
  } grpc_rb_event_queue;
52
52
 
53
53
  static grpc_rb_event_queue event_queue;
54
+ static VALUE g_event_thread = Qnil;
55
+ static bool g_one_time_init_done = false;
54
56
 
55
57
  void grpc_rb_event_queue_enqueue(void (*callback)(void*), void* argument) {
56
58
  grpc_rb_event* event = gpr_malloc(sizeof(grpc_rb_event));
@@ -117,7 +119,6 @@ static void grpc_rb_event_unblocking_func(void* arg) {
117
119
  static VALUE grpc_rb_event_thread(VALUE arg) {
118
120
  grpc_rb_event* event;
119
121
  (void)arg;
120
- grpc_ruby_init();
121
122
  while (true) {
122
123
  event = (grpc_rb_event*)rb_thread_call_without_gvl(
123
124
  grpc_rb_wait_for_event_no_gil, NULL, grpc_rb_event_unblocking_func,
@@ -131,15 +132,30 @@ static VALUE grpc_rb_event_thread(VALUE arg) {
131
132
  }
132
133
  }
133
134
  grpc_rb_event_queue_destroy();
134
- grpc_ruby_shutdown();
135
135
  return Qnil;
136
136
  }
137
137
 
138
138
  void grpc_rb_event_queue_thread_start() {
139
- event_queue.head = event_queue.tail = NULL;
139
+ if (!g_one_time_init_done) {
140
+ g_one_time_init_done = true;
141
+ gpr_mu_init(&event_queue.mu);
142
+ gpr_cv_init(&event_queue.cv);
143
+ rb_global_variable(&g_event_thread);
144
+ event_queue.head = event_queue.tail = NULL;
145
+ }
140
146
  event_queue.abort = false;
141
- gpr_mu_init(&event_queue.mu);
142
- gpr_cv_init(&event_queue.cv);
147
+ GPR_ASSERT(!RTEST(g_event_thread));
148
+ g_event_thread = rb_thread_create(grpc_rb_event_thread, NULL);
149
+ }
143
150
 
144
- rb_thread_create(grpc_rb_event_thread, NULL);
151
+ void grpc_rb_event_queue_thread_stop() {
152
+ GPR_ASSERT(g_one_time_init_done);
153
+ if (!RTEST(g_event_thread)) {
154
+ gpr_log(GPR_ERROR,
155
+ "GRPC_RUBY: call credentials thread stop: thread not running");
156
+ return;
157
+ }
158
+ rb_thread_call_without_gvl(grpc_rb_event_unblocking_func, NULL, NULL, NULL);
159
+ rb_funcall(g_event_thread, rb_intern("join"), 0);
160
+ g_event_thread = Qnil;
145
161
  }
@@ -17,5 +17,6 @@
17
17
  */
18
18
 
19
19
  void grpc_rb_event_queue_thread_start();
20
+ void grpc_rb_event_queue_thread_stop();
20
21
 
21
22
  void grpc_rb_event_queue_enqueue(void (*callback)(void*), void* argument);