grpc 1.56.2-x86-mingw32 → 1.57.0.pre1-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e70ca163549807a35991b68ccdfffe1d71d4deebb9b199a85add0779e08c3517
4
- data.tar.gz: b5ce3cc5bbc606f7d0286869eeff502faff66d04cbd6eea0f12e84de37c90630
3
+ metadata.gz: 411d3d54ffb301f1177d53a73997ff944f45c6b96c71d5d95d7f02d99631c102
4
+ data.tar.gz: c559ff364aed59c96f2a5cff232a3b532918b1e5669183a4ec6fd31f21852126
5
5
  SHA512:
6
- metadata.gz: b467c259a59ab4717f0d17fb83c546672c77e561148954bb67d8ded3d360df542635c1210c8e7f0b2723603baaa6f6171c3924076f666153714f4c50bb6ac451
7
- data.tar.gz: ced90a44b0d06708d1f552e10ab56f8261481e506e0ed7ae99ea5f5211a7063bf21cf1537d6b268d7324ee967081dcdd3e2cd117f2406ec4fdee636082e0e7eb
6
+ metadata.gz: 74663c184af28dd57506986a414d97c65892b868aa47346bacd5284e416bcb907c3b9c4f9af391cbcba3b0fa70ab22bcd08bbf9e947e78ce2e4ae191fdfffb38
7
+ data.tar.gz: 878fa72f18f81b0c2dcc620151e5d2e683267175918a964e5ebada5def7f580000fd33af22fa18d1e7c305c7fae294d56f33f5cdf51a99cf079ec6b193a95b78
Binary file
@@ -1,28 +1,34 @@
1
+ # frozen_string_literal: true
1
2
  # Generated by the protocol buffer compiler. DO NOT EDIT!
2
3
  # source: math.proto
3
4
 
4
5
  require 'google/protobuf'
5
6
 
6
- Google::Protobuf::DescriptorPool.generated_pool.build do
7
- add_file("math.proto", :syntax => :proto3) do
8
- add_message "math.DivArgs" do
9
- optional :dividend, :int64, 1
10
- optional :divisor, :int64, 2
11
- end
12
- add_message "math.DivReply" do
13
- optional :quotient, :int64, 1
14
- optional :remainder, :int64, 2
15
- end
16
- add_message "math.FibArgs" do
17
- optional :limit, :int64, 1
18
- end
19
- add_message "math.Num" do
20
- optional :num, :int64, 1
21
- end
22
- add_message "math.FibReply" do
23
- optional :count, :int64, 1
7
+
8
+ descriptor_data = "\n\nmath.proto\x12\x04math\",\n\x07\x44ivArgs\x12\x10\n\x08\x64ividend\x18\x01 \x01(\x03\x12\x0f\n\x07\x64ivisor\x18\x02 \x01(\x03\"/\n\x08\x44ivReply\x12\x10\n\x08quotient\x18\x01 \x01(\x03\x12\x11\n\tremainder\x18\x02 \x01(\x03\"\x18\n\x07\x46ibArgs\x12\r\n\x05limit\x18\x01 \x01(\x03\"\x12\n\x03Num\x12\x0b\n\x03num\x18\x01 \x01(\x03\"\x19\n\x08\x46ibReply\x12\r\n\x05\x63ount\x18\x01 \x01(\x03\x32\xa4\x01\n\x04Math\x12&\n\x03\x44iv\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00\x12.\n\x07\x44ivMany\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00(\x01\x30\x01\x12#\n\x03\x46ib\x12\r.math.FibArgs\x1a\t.math.Num\"\x00\x30\x01\x12\x1f\n\x03Sum\x12\t.math.Num\x1a\t.math.Num\"\x00(\x01\x62\x06proto3"
9
+
10
+ pool = Google::Protobuf::DescriptorPool.generated_pool
11
+
12
+ begin
13
+ pool.add_serialized_file(descriptor_data)
14
+ rescue TypeError => e
15
+ # Compatibility code: will be removed in the next major version.
16
+ require 'google/protobuf/descriptor_pb'
17
+ parsed = Google::Protobuf::FileDescriptorProto.decode(descriptor_data)
18
+ parsed.clear_dependency
19
+ serialized = parsed.class.encode(parsed)
20
+ file = pool.add_serialized_file(serialized)
21
+ warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
22
+ imports = [
23
+ ]
24
+ imports.each do |type_name, expected_filename|
25
+ import_file = pool.lookup(type_name).file_descriptor
26
+ if import_file.name != expected_filename
27
+ warn "- #{file.name} imports #{expected_filename}, but that import was loaded as #{import_file.name}"
24
28
  end
25
29
  end
30
+ warn "Each proto file must use a consistent fully-qualified name."
31
+ warn "This will become an error in the next major version."
26
32
  end
27
33
 
28
34
  module Math
@@ -88,11 +88,15 @@ env_append 'CPPFLAGS', '-DGRPC_XDS_USER_AGENT_NAME_SUFFIX="\"RUBY\""'
88
88
 
89
89
  require_relative '../../lib/grpc/version'
90
90
  env_append 'CPPFLAGS', '-DGRPC_XDS_USER_AGENT_VERSION_SUFFIX="\"' + GRPC::VERSION + '\""'
91
+ env_append 'CPPFLAGS', '-DGRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK=1'
91
92
 
92
93
  output_dir = File.expand_path(RbConfig::CONFIG['topdir'])
93
94
  grpc_lib_dir = File.join(output_dir, 'libs', grpc_config)
94
95
  ENV['BUILDDIR'] = output_dir
95
96
 
97
+ strip_tool = RbConfig::CONFIG['STRIP']
98
+ strip_tool += ' -x' if apple_toolchain
99
+
96
100
  unless windows
97
101
  puts 'Building internal gRPC into ' + grpc_lib_dir
98
102
  nproc = 4
@@ -107,6 +111,17 @@ unless windows
107
111
  puts "Building grpc native library: #{cmd}"
108
112
  system(cmd)
109
113
  exit 1 unless $? == 0
114
+
115
+ if grpc_config == 'opt'
116
+ rm_obj_cmd = "rm -rf #{File.join(output_dir, 'objs')}"
117
+ puts "Removing grpc object files: #{rm_obj_cmd}"
118
+ system(rm_obj_cmd)
119
+ exit 1 unless $? == 0
120
+ strip_cmd = "#{strip_tool} #{grpc_lib_dir}/*.a"
121
+ puts "Stripping grpc native library: #{strip_cmd}"
122
+ system(strip_cmd)
123
+ exit 1 unless $? == 0
124
+ end
110
125
  end
111
126
 
112
127
  $CFLAGS << ' -DGRPC_RUBY_WINDOWS_UCRT' if windows_ucrt
@@ -141,7 +156,10 @@ end
141
156
 
142
157
  ext_export_file = File.join(grpc_root, 'src', 'ruby', 'ext', 'grpc', ext_export_filename())
143
158
  $LDFLAGS << ' -Wl,--version-script="' + ext_export_file + '.gcc"' if linux
144
- $LDFLAGS << ' -Wl,-exported_symbols_list,"' + ext_export_file + '.clang"' if apple_toolchain
159
+ if apple_toolchain
160
+ $LDFLAGS << ' -weak_framework CoreFoundation' if RUBY_PLATFORM =~ /arm64/
161
+ $LDFLAGS << ' -Wl,-exported_symbols_list,"' + ext_export_file + '.clang"'
162
+ end
145
163
 
146
164
  $LDFLAGS << ' ' + File.join(grpc_lib_dir, 'libgrpc.a') unless windows
147
165
  if grpc_config == 'gcov'
@@ -169,23 +187,6 @@ output = File.join('grpc', 'grpc_c')
169
187
  puts 'Generating Makefile for ' + output
170
188
  create_makefile(output)
171
189
 
172
- strip_tool = RbConfig::CONFIG['STRIP']
173
- strip_tool += ' -x' if apple_toolchain
174
-
175
- if grpc_config == 'opt'
176
- File.open('Makefile.new', 'w') do |o|
177
- o.puts 'hijack: all strip'
178
- o.puts
179
- File.foreach('Makefile') do |i|
180
- o.puts i
181
- end
182
- o.puts
183
- o.puts 'strip: $(DLLIB)'
184
- o.puts "\t$(ECHO) Stripping $(DLLIB)"
185
- o.puts "\t$(Q) #{strip_tool} $(DLLIB)"
186
- end
187
- File.rename('Makefile.new', 'Makefile')
188
- end
189
190
  if ENV['GRPC_RUBY_TEST_ONLY_WORKAROUND_MAKE_INSTALL_BUG']
190
191
  # Note: this env var setting is intended to work around a problem observed
191
192
  # with the ginstall command on grpc's macos automated test infrastructure,
@@ -801,6 +801,56 @@ static VALUE grpc_run_batch_stack_build_result(run_batch_stack* st) {
801
801
  return result;
802
802
  }
803
803
 
804
+ struct call_run_batch_args {
805
+ grpc_rb_call* call;
806
+ unsigned write_flag;
807
+ VALUE ops_hash;
808
+ run_batch_stack* st;
809
+ };
810
+
811
+ static VALUE grpc_rb_call_run_batch_try(VALUE value_args) {
812
+ grpc_rb_fork_unsafe_begin();
813
+ struct call_run_batch_args* args = (struct call_run_batch_args*)value_args;
814
+ void* tag = (void*)&args->st;
815
+
816
+ grpc_event ev;
817
+ grpc_call_error err;
818
+
819
+ args->st = gpr_malloc(sizeof(run_batch_stack));
820
+ grpc_run_batch_stack_init(args->st, args->write_flag);
821
+ grpc_run_batch_stack_fill_ops(args->st, args->ops_hash);
822
+
823
+ /* call grpc_call_start_batch, then wait for it to complete using
824
+ * pluck_event */
825
+ err = grpc_call_start_batch(args->call->wrapped, args->st->ops,
826
+ args->st->op_num, tag, NULL);
827
+ if (err != GRPC_CALL_OK) {
828
+ rb_raise(grpc_rb_eCallError,
829
+ "grpc_call_start_batch failed with %s (code=%d)",
830
+ grpc_call_error_detail_of(err), err);
831
+ }
832
+ ev = rb_completion_queue_pluck(args->call->queue, tag,
833
+ gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
834
+ if (!ev.success) {
835
+ rb_raise(grpc_rb_eCallError, "call#run_batch failed somehow");
836
+ }
837
+ /* Build and return the BatchResult struct result,
838
+ if there is an error, it's reflected in the status */
839
+ return grpc_run_batch_stack_build_result(args->st);
840
+ }
841
+
842
+ static VALUE grpc_rb_call_run_batch_ensure(VALUE value_args) {
843
+ grpc_rb_fork_unsafe_end();
844
+ struct call_run_batch_args* args = (struct call_run_batch_args*)value_args;
845
+
846
+ if (args->st) {
847
+ grpc_run_batch_stack_cleanup(args->st);
848
+ gpr_free(args->st);
849
+ }
850
+
851
+ return Qnil;
852
+ }
853
+
804
854
  /* call-seq:
805
855
  ops = {
806
856
  GRPC::Core::CallOps::SEND_INITIAL_METADATA => <op_value>,
@@ -819,56 +869,29 @@ static VALUE grpc_run_batch_stack_build_result(run_batch_stack* st) {
819
869
  Only one operation of each type can be active at once in any given
820
870
  batch */
821
871
  static VALUE grpc_rb_call_run_batch(VALUE self, VALUE ops_hash) {
822
- run_batch_stack* st = NULL;
823
- grpc_rb_call* call = NULL;
824
- grpc_event ev;
825
- grpc_call_error err;
826
- VALUE result = Qnil;
827
- VALUE rb_write_flag = rb_ivar_get(self, id_write_flag);
828
- unsigned write_flag = 0;
829
- void* tag = (void*)&st;
830
-
831
872
  grpc_ruby_fork_guard();
832
873
  if (RTYPEDDATA_DATA(self) == NULL) {
833
874
  rb_raise(grpc_rb_eCallError, "Cannot run batch on closed call");
834
- return Qnil;
835
875
  }
876
+
877
+ grpc_rb_call* call = NULL;
836
878
  TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call);
837
879
 
838
880
  /* Validate the ops args, adding them to a ruby array */
839
881
  if (TYPE(ops_hash) != T_HASH) {
840
882
  rb_raise(rb_eTypeError, "call#run_batch: ops hash should be a hash");
841
- return Qnil;
842
883
  }
843
- if (rb_write_flag != Qnil) {
844
- write_flag = NUM2UINT(rb_write_flag);
845
- }
846
- st = gpr_malloc(sizeof(run_batch_stack));
847
- grpc_run_batch_stack_init(st, write_flag);
848
- grpc_run_batch_stack_fill_ops(st, ops_hash);
849
884
 
850
- /* call grpc_call_start_batch, then wait for it to complete using
851
- * pluck_event */
852
- err = grpc_call_start_batch(call->wrapped, st->ops, st->op_num, tag, NULL);
853
- if (err != GRPC_CALL_OK) {
854
- grpc_run_batch_stack_cleanup(st);
855
- gpr_free(st);
856
- rb_raise(grpc_rb_eCallError,
857
- "grpc_call_start_batch failed with %s (code=%d)",
858
- grpc_call_error_detail_of(err), err);
859
- return Qnil;
860
- }
861
- ev = rb_completion_queue_pluck(call->queue, tag,
862
- gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
863
- if (!ev.success) {
864
- rb_raise(grpc_rb_eCallError, "call#run_batch failed somehow");
865
- }
866
- /* Build and return the BatchResult struct result,
867
- if there is an error, it's reflected in the status */
868
- result = grpc_run_batch_stack_build_result(st);
869
- grpc_run_batch_stack_cleanup(st);
870
- gpr_free(st);
871
- return result;
885
+ VALUE rb_write_flag = rb_ivar_get(self, id_write_flag);
886
+
887
+ struct call_run_batch_args args = {
888
+ .call = call,
889
+ .write_flag = rb_write_flag == Qnil ? 0 : NUM2UINT(rb_write_flag),
890
+ .ops_hash = ops_hash,
891
+ .st = NULL};
892
+
893
+ return rb_ensure(grpc_rb_call_run_batch_try, (VALUE)&args,
894
+ grpc_rb_call_run_batch_ensure, (VALUE)&args);
872
895
  }
873
896
 
874
897
  static void Init_grpc_write_flags() {
@@ -193,7 +193,6 @@ static void grpc_rb_call_credentials_free_internal(void* p) {
193
193
  /* Destroys the credentials instances. */
194
194
  static void grpc_rb_call_credentials_free(void* p) {
195
195
  grpc_rb_call_credentials_free_internal(p);
196
- grpc_ruby_shutdown();
197
196
  }
198
197
 
199
198
  /* Protects the mark object from GC */
@@ -47,6 +47,9 @@ static ID id_channel;
47
47
  * GCed before the channel */
48
48
  static ID id_target;
49
49
 
50
+ /* hidden ivar that synchronizes post-fork channel re-creation */
51
+ static ID id_channel_recreation_mu;
52
+
50
53
  /* id_insecure_channel is used to indicate that a channel is insecure */
51
54
  static VALUE id_insecure_channel;
52
55
 
@@ -67,7 +70,7 @@ typedef struct bg_watched_channel {
67
70
  /* grpc_rb_channel wraps a grpc_channel. */
68
71
  typedef struct grpc_rb_channel {
69
72
  VALUE credentials;
70
-
73
+ grpc_channel_args args;
71
74
  /* The actual channel (protected in a wrapper to tell when it's safe to
72
75
  * destroy) */
73
76
  bg_watched_channel* bg_wrapped;
@@ -104,11 +107,13 @@ typedef struct channel_init_try_register_stack {
104
107
  grpc_rb_channel* wrapper;
105
108
  } channel_init_try_register_stack;
106
109
 
107
- static grpc_completion_queue* channel_polling_cq;
110
+ static grpc_completion_queue* g_channel_polling_cq;
108
111
  static gpr_mu global_connection_polling_mu;
109
112
  static gpr_cv global_connection_polling_cv;
110
- static int abort_channel_polling = 0;
111
- static int channel_polling_thread_started = 0;
113
+ static int g_abort_channel_polling = 0;
114
+ static int g_channel_polling_thread_started = 0;
115
+ static gpr_once g_once_init = GPR_ONCE_INIT;
116
+ static VALUE g_channel_polling_thread = Qnil;
112
117
 
113
118
  static int bg_watched_channel_list_lookup(bg_watched_channel* bg);
114
119
  static bg_watched_channel* bg_watched_channel_list_create_and_add(
@@ -158,16 +163,13 @@ static void grpc_rb_channel_free_internal(void* p) {
158
163
  * and we can count on this thread to not be interrupted or
159
164
  * yield the gil. */
160
165
  grpc_rb_channel_safe_destroy(ch->bg_wrapped);
161
- ch->bg_wrapped = NULL;
166
+ grpc_rb_channel_args_destroy(&ch->args);
162
167
  }
163
168
  xfree(p);
164
169
  }
165
170
 
166
171
  /* Destroys Channel instances. */
167
- static void grpc_rb_channel_free(void* p) {
168
- grpc_rb_channel_free_internal(p);
169
- grpc_ruby_shutdown();
170
- }
172
+ static void grpc_rb_channel_free(void* p) { grpc_rb_channel_free_internal(p); }
171
173
 
172
174
  /* Protects the mark object from GC */
173
175
  static void grpc_rb_channel_mark(void* p) {
@@ -199,6 +201,7 @@ static VALUE grpc_rb_channel_alloc(VALUE cls) {
199
201
  grpc_rb_channel* wrapper = ALLOC(grpc_rb_channel);
200
202
  wrapper->bg_wrapped = NULL;
201
203
  wrapper->credentials = Qnil;
204
+ MEMZERO(&wrapper->args, grpc_channel_args, 1);
202
205
  return TypedData_Wrap_Struct(cls, &grpc_channel_data_type, wrapper);
203
206
  }
204
207
 
@@ -218,24 +221,21 @@ static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
218
221
  grpc_channel* ch = NULL;
219
222
  grpc_channel_credentials* creds = NULL;
220
223
  char* target_chars = NULL;
221
- grpc_channel_args args;
222
224
  channel_init_try_register_stack stack;
223
- int stop_waiting_for_thread_start = 0;
224
- MEMZERO(&args, grpc_channel_args, 1);
225
225
 
226
226
  grpc_ruby_fork_guard();
227
+ int stop_waiting_for_thread_start = 0;
227
228
  rb_thread_call_without_gvl(
228
229
  wait_until_channel_polling_thread_started_no_gil,
229
230
  &stop_waiting_for_thread_start,
230
231
  wait_until_channel_polling_thread_started_unblocking_func,
231
232
  &stop_waiting_for_thread_start);
232
-
233
233
  /* "3" == 3 mandatory args */
234
234
  rb_scan_args(argc, argv, "3", &target, &channel_args, &credentials);
235
235
 
236
236
  TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
237
237
  target_chars = StringValueCStr(target);
238
- grpc_rb_hash_convert_to_channel_args(channel_args, &args);
238
+ grpc_rb_hash_convert_to_channel_args(channel_args, &wrapper->args);
239
239
  if (TYPE(credentials) == T_SYMBOL) {
240
240
  if (id_insecure_channel != SYM2ID(credentials)) {
241
241
  rb_raise(rb_eTypeError,
@@ -244,7 +244,7 @@ static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
244
244
  }
245
245
  grpc_channel_credentials* insecure_creds =
246
246
  grpc_insecure_credentials_create();
247
- ch = grpc_channel_create(target_chars, insecure_creds, &args);
247
+ ch = grpc_channel_create(target_chars, insecure_creds, &wrapper->args);
248
248
  grpc_channel_credentials_release(insecure_creds);
249
249
  } else {
250
250
  wrapper->credentials = credentials;
@@ -257,7 +257,7 @@ static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
257
257
  "bad creds, want ChannelCredentials or XdsChannelCredentials");
258
258
  return Qnil;
259
259
  }
260
- ch = grpc_channel_create(target_chars, creds, &args);
260
+ ch = grpc_channel_create(target_chars, creds, &wrapper->args);
261
261
  }
262
262
 
263
263
  GPR_ASSERT(ch);
@@ -266,16 +266,13 @@ static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
266
266
  rb_thread_call_without_gvl(
267
267
  channel_init_try_register_connection_polling_without_gil, &stack, NULL,
268
268
  NULL);
269
-
270
- if (args.args != NULL) {
271
- xfree(args.args); /* Allocated by grpc_rb_hash_convert_to_channel_args */
272
- }
273
269
  if (ch == NULL) {
274
270
  rb_raise(rb_eRuntimeError, "could not create an rpc channel to target:%s",
275
271
  target_chars);
276
272
  return Qnil;
277
273
  }
278
274
  rb_ivar_set(self, id_target, target);
275
+ rb_ivar_set(self, id_channel_recreation_mu, rb_mutex_new());
279
276
  return self;
280
277
  }
281
278
 
@@ -289,7 +286,7 @@ static void* get_state_without_gil(void* arg) {
289
286
  get_state_stack* stack = (get_state_stack*)arg;
290
287
 
291
288
  gpr_mu_lock(&global_connection_polling_mu);
292
- GPR_ASSERT(abort_channel_polling || channel_polling_thread_started);
289
+ GPR_ASSERT(g_abort_channel_polling || g_channel_polling_thread_started);
293
290
  if (stack->bg->channel_destroyed) {
294
291
  stack->out = GRPC_CHANNEL_SHUTDOWN;
295
292
  } else {
@@ -346,7 +343,7 @@ static void* wait_for_watch_state_op_complete_without_gvl(void* arg) {
346
343
  gpr_mu_lock(&global_connection_polling_mu);
347
344
  // it's unsafe to do a "watch" after "channel polling abort" because the cq
348
345
  // has been shut down.
349
- if (abort_channel_polling || stack->bg_wrapped->channel_destroyed) {
346
+ if (g_abort_channel_polling || stack->bg_wrapped->channel_destroyed) {
350
347
  gpr_mu_unlock(&global_connection_polling_mu);
351
348
  return (void*)0;
352
349
  }
@@ -354,7 +351,7 @@ static void* wait_for_watch_state_op_complete_without_gvl(void* arg) {
354
351
  op->op_type = WATCH_STATE_API;
355
352
  grpc_channel_watch_connectivity_state(stack->bg_wrapped->channel,
356
353
  stack->last_state, stack->deadline,
357
- channel_polling_cq, op);
354
+ g_channel_polling_cq, op);
358
355
 
359
356
  while (!op->op.api_callback_args.called_back) {
360
357
  gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
@@ -418,6 +415,58 @@ static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
418
415
  return op_success ? Qtrue : Qfalse;
419
416
  }
420
417
 
418
+ static void grpc_rb_channel_maybe_recreate_channel_after_fork(
419
+ grpc_rb_channel* wrapper, VALUE target) {
420
+ // TODO(apolcyn): maybe check if fork support is enabled here.
421
+ // The only way we can get bg->channel_destroyed without bg itself being
422
+ // NULL is if we destroyed the channel during GRPC::prefork.
423
+ bg_watched_channel* bg = wrapper->bg_wrapped;
424
+ if (bg->channel_destroyed) {
425
+ // There must be one ref at this point, held by the ruby-level channel
426
+ // object.
427
+ GPR_ASSERT(bg->refcount == 1);
428
+ // Wait for channel polling thread to re-initialize
429
+ int stop_waiting_for_thread_start = 0;
430
+ rb_thread_call_without_gvl(
431
+ wait_until_channel_polling_thread_started_no_gil,
432
+ &stop_waiting_for_thread_start,
433
+ wait_until_channel_polling_thread_started_unblocking_func,
434
+ &stop_waiting_for_thread_start);
435
+ rb_thread_call_without_gvl(channel_safe_destroy_without_gil, bg, NULL,
436
+ NULL);
437
+ // re-create C-core channel
438
+ const char* target_str = StringValueCStr(target);
439
+ grpc_channel* channel;
440
+ if (wrapper->credentials == Qnil) {
441
+ grpc_channel_credentials* insecure_creds =
442
+ grpc_insecure_credentials_create();
443
+ channel = grpc_channel_create(target_str, insecure_creds, &wrapper->args);
444
+ grpc_channel_credentials_release(insecure_creds);
445
+ } else {
446
+ grpc_channel_credentials* creds;
447
+ if (grpc_rb_is_channel_credentials(wrapper->credentials)) {
448
+ creds = grpc_rb_get_wrapped_channel_credentials(wrapper->credentials);
449
+ } else if (grpc_rb_is_xds_channel_credentials(wrapper->credentials)) {
450
+ creds =
451
+ grpc_rb_get_wrapped_xds_channel_credentials(wrapper->credentials);
452
+ } else {
453
+ rb_raise(rb_eTypeError,
454
+ "failed to re-create channel after fork: bad creds, want "
455
+ "ChannelCredentials or XdsChannelCredentials");
456
+ return;
457
+ }
458
+ channel = grpc_channel_create(target_str, creds, &wrapper->args);
459
+ }
460
+ // re-register with channel polling thread
461
+ channel_init_try_register_stack stack;
462
+ stack.channel = channel;
463
+ stack.wrapper = wrapper;
464
+ rb_thread_call_without_gvl(
465
+ channel_init_try_register_connection_polling_without_gil, &stack, NULL,
466
+ NULL);
467
+ }
468
+ }
469
+
421
470
  /* Create a call given a grpc_channel, in order to call method. The request
422
471
  is not sent until grpc_call_invoke is called. */
423
472
  static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
@@ -452,6 +501,11 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
452
501
  rb_raise(rb_eRuntimeError, "closed!");
453
502
  return Qnil;
454
503
  }
504
+ // TODO(apolcyn): only do this check if fork support is enabled
505
+ rb_mutex_lock(rb_ivar_get(self, id_channel_recreation_mu));
506
+ grpc_rb_channel_maybe_recreate_channel_after_fork(
507
+ wrapper, rb_ivar_get(self, id_target));
508
+ rb_mutex_unlock(rb_ivar_get(self, id_channel_recreation_mu));
455
509
 
456
510
  cq = grpc_completion_queue_create_for_pluck(NULL);
457
511
  method_slice =
@@ -582,7 +636,7 @@ static void grpc_rb_channel_try_register_connection_polling(
582
636
  grpc_connectivity_state conn_state;
583
637
  watch_state_op* op = NULL;
584
638
 
585
- GPR_ASSERT(channel_polling_thread_started || abort_channel_polling);
639
+ GPR_ASSERT(g_channel_polling_thread_started || g_abort_channel_polling);
586
640
 
587
641
  if (bg->refcount == 0) {
588
642
  GPR_ASSERT(bg->channel_destroyed);
@@ -590,7 +644,7 @@ static void grpc_rb_channel_try_register_connection_polling(
590
644
  return;
591
645
  }
592
646
  GPR_ASSERT(bg->refcount == 1);
593
- if (bg->channel_destroyed || abort_channel_polling) {
647
+ if (bg->channel_destroyed || g_abort_channel_polling) {
594
648
  return;
595
649
  }
596
650
 
@@ -607,7 +661,7 @@ static void grpc_rb_channel_try_register_connection_polling(
607
661
  op->op.continuous_watch_callback_args.bg = bg;
608
662
  grpc_channel_watch_connectivity_state(bg->channel, conn_state,
609
663
  gpr_inf_future(GPR_CLOCK_REALTIME),
610
- channel_polling_cq, op);
664
+ g_channel_polling_cq, op);
611
665
  }
612
666
 
613
667
  // Note this loop breaks out with a single call of
@@ -624,14 +678,15 @@ static void* run_poll_channels_loop_no_gil(void* arg) {
624
678
  gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - begin");
625
679
 
626
680
  gpr_mu_lock(&global_connection_polling_mu);
627
- GPR_ASSERT(!channel_polling_thread_started);
628
- channel_polling_thread_started = 1;
681
+ GPR_ASSERT(!g_abort_channel_polling);
682
+ GPR_ASSERT(!g_channel_polling_thread_started);
683
+ g_channel_polling_thread_started = 1;
629
684
  gpr_cv_broadcast(&global_connection_polling_cv);
630
685
  gpr_mu_unlock(&global_connection_polling_mu);
631
686
 
632
687
  for (;;) {
633
688
  event = grpc_completion_queue_next(
634
- channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
689
+ g_channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
635
690
  if (event.type == GRPC_QUEUE_SHUTDOWN) {
636
691
  break;
637
692
  }
@@ -652,7 +707,7 @@ static void* run_poll_channels_loop_no_gil(void* arg) {
652
707
  }
653
708
  gpr_mu_unlock(&global_connection_polling_mu);
654
709
  }
655
- grpc_completion_queue_destroy(channel_polling_cq);
710
+ grpc_completion_queue_destroy(g_channel_polling_cq);
656
711
  gpr_log(GPR_DEBUG,
657
712
  "GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling "
658
713
  "loop");
@@ -669,11 +724,11 @@ static void run_poll_channels_loop_unblocking_func(void* arg) {
669
724
  "GRPC_RUBY: run_poll_channels_loop_unblocking_func - begin aborting "
670
725
  "connection polling");
671
726
  // early out after first time through
672
- if (abort_channel_polling) {
727
+ if (g_abort_channel_polling) {
673
728
  gpr_mu_unlock(&global_connection_polling_mu);
674
729
  return;
675
730
  }
676
- abort_channel_polling = 1;
731
+ g_abort_channel_polling = 1;
677
732
 
678
733
  // force pending watches to end by switching to shutdown state
679
734
  bg = bg_watched_channel_list_head;
@@ -685,7 +740,9 @@ static void run_poll_channels_loop_unblocking_func(void* arg) {
685
740
  bg = bg->next;
686
741
  }
687
742
 
688
- grpc_completion_queue_shutdown(channel_polling_cq);
743
+ gpr_log(GPR_DEBUG, "GRPC_RUBY: cq shutdown on global polling cq. pid: %d",
744
+ getpid());
745
+ grpc_completion_queue_shutdown(g_channel_polling_cq);
689
746
  gpr_cv_broadcast(&global_connection_polling_cv);
690
747
  gpr_mu_unlock(&global_connection_polling_mu);
691
748
  gpr_log(GPR_DEBUG,
@@ -699,10 +756,8 @@ static VALUE run_poll_channels_loop(VALUE arg) {
699
756
  gpr_log(
700
757
  GPR_DEBUG,
701
758
  "GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
702
- grpc_ruby_init();
703
759
  rb_thread_call_without_gvl(run_poll_channels_loop_no_gil, NULL,
704
760
  run_poll_channels_loop_unblocking_func, NULL);
705
- grpc_ruby_shutdown();
706
761
  return Qnil;
707
762
  }
708
763
 
@@ -710,7 +765,7 @@ static void* wait_until_channel_polling_thread_started_no_gil(void* arg) {
710
765
  int* stop_waiting = (int*)arg;
711
766
  gpr_log(GPR_DEBUG, "GRPC_RUBY: wait for channel polling thread to start");
712
767
  gpr_mu_lock(&global_connection_polling_mu);
713
- while (!channel_polling_thread_started && !abort_channel_polling &&
768
+ while (!g_channel_polling_thread_started && !g_abort_channel_polling &&
714
769
  !*stop_waiting) {
715
770
  gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
716
771
  gpr_inf_future(GPR_CLOCK_REALTIME));
@@ -734,12 +789,17 @@ static void wait_until_channel_polling_thread_started_unblocking_func(
734
789
  static void* set_abort_channel_polling_without_gil(void* arg) {
735
790
  (void)arg;
736
791
  gpr_mu_lock(&global_connection_polling_mu);
737
- abort_channel_polling = 1;
792
+ g_abort_channel_polling = 1;
738
793
  gpr_cv_broadcast(&global_connection_polling_cv);
739
794
  gpr_mu_unlock(&global_connection_polling_mu);
740
795
  return NULL;
741
796
  }
742
797
 
798
+ static void do_basic_init() {
799
+ gpr_mu_init(&global_connection_polling_mu);
800
+ gpr_cv_init(&global_connection_polling_cv);
801
+ }
802
+
743
803
  /* Temporary fix for
744
804
  * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
745
805
  * Transports in idle channels can get destroyed. Normally c-core re-connects,
@@ -751,23 +811,38 @@ static void* set_abort_channel_polling_without_gil(void* arg) {
751
811
  * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
752
812
  */
753
813
  void grpc_rb_channel_polling_thread_start() {
754
- VALUE background_thread = Qnil;
814
+ gpr_once_init(&g_once_init, do_basic_init);
815
+ GPR_ASSERT(!RTEST(g_channel_polling_thread));
816
+ GPR_ASSERT(!g_abort_channel_polling);
817
+ GPR_ASSERT(!g_channel_polling_thread_started);
818
+ GPR_ASSERT(g_channel_polling_cq == NULL);
755
819
 
756
- GPR_ASSERT(!abort_channel_polling);
757
- GPR_ASSERT(!channel_polling_thread_started);
758
- GPR_ASSERT(channel_polling_cq == NULL);
820
+ g_channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
821
+ g_channel_polling_thread = rb_thread_create(run_poll_channels_loop, NULL);
759
822
 
760
- gpr_mu_init(&global_connection_polling_mu);
761
- gpr_cv_init(&global_connection_polling_cv);
762
-
763
- channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
764
- background_thread = rb_thread_create(run_poll_channels_loop, NULL);
765
-
766
- if (!RTEST(background_thread)) {
767
- gpr_log(GPR_DEBUG, "GRPC_RUBY: failed to spawn channel polling thread");
823
+ if (!RTEST(g_channel_polling_thread)) {
824
+ gpr_log(GPR_ERROR, "GRPC_RUBY: failed to spawn channel polling thread");
768
825
  rb_thread_call_without_gvl(set_abort_channel_polling_without_gil, NULL,
769
826
  NULL, NULL);
827
+ return;
828
+ }
829
+ }
830
+
831
+ void grpc_rb_channel_polling_thread_stop() {
832
+ if (!RTEST(g_channel_polling_thread)) {
833
+ gpr_log(GPR_ERROR,
834
+ "GRPC_RUBY: channel polling thread stop: thread was not started");
835
+ return;
770
836
  }
837
+ rb_thread_call_without_gvl(run_poll_channels_loop_unblocking_func, NULL, NULL,
838
+ NULL);
839
+ rb_funcall(g_channel_polling_thread, rb_intern("join"), 0);
840
+ // state associated with the channel polling thread is destroyed, reset so
841
+ // we can start again later
842
+ g_channel_polling_thread = Qnil;
843
+ g_abort_channel_polling = false;
844
+ g_channel_polling_thread_started = false;
845
+ g_channel_polling_cq = NULL;
771
846
  }
772
847
 
773
848
  static void Init_grpc_propagate_masks() {
@@ -803,6 +878,7 @@ static void Init_grpc_connectivity_states() {
803
878
  }
804
879
 
805
880
  void Init_grpc_channel() {
881
+ rb_global_variable(&g_channel_polling_thread);
806
882
  grpc_rb_cChannelArgs = rb_define_class("TmpChannelArgs", rb_cObject);
807
883
  rb_undef_alloc_func(grpc_rb_cChannelArgs);
808
884
  grpc_rb_cChannel =
@@ -829,6 +905,7 @@ void Init_grpc_channel() {
829
905
 
830
906
  id_channel = rb_intern("__channel");
831
907
  id_target = rb_intern("__target");
908
+ id_channel_recreation_mu = rb_intern("__channel_recreation_mu");
832
909
  rb_define_const(grpc_rb_cChannel, "SSL_TARGET",
833
910
  ID2SYM(rb_intern(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)));
834
911
  rb_define_const(grpc_rb_cChannel, "ENABLE_CENSUS",