grpc 1.60.0-aarch64-linux

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. checksums.yaml +7 -0
  2. data/etc/roots.pem +4337 -0
  3. data/grpc_c.32-msvcrt.ruby +0 -0
  4. data/grpc_c.64-msvcrt.ruby +0 -0
  5. data/grpc_c.64-ucrt.ruby +0 -0
  6. data/src/ruby/bin/math_client.rb +140 -0
  7. data/src/ruby/bin/math_pb.rb +40 -0
  8. data/src/ruby/bin/math_server.rb +191 -0
  9. data/src/ruby/bin/math_services_pb.rb +51 -0
  10. data/src/ruby/bin/noproto_client.rb +93 -0
  11. data/src/ruby/bin/noproto_server.rb +97 -0
  12. data/src/ruby/ext/grpc/ext-export-truffleruby-with-ruby-abi-version.clang +2 -0
  13. data/src/ruby/ext/grpc/ext-export-truffleruby-with-ruby-abi-version.gcc +7 -0
  14. data/src/ruby/ext/grpc/ext-export-with-ruby-abi-version.clang +2 -0
  15. data/src/ruby/ext/grpc/ext-export-with-ruby-abi-version.gcc +7 -0
  16. data/src/ruby/ext/grpc/ext-export.clang +1 -0
  17. data/src/ruby/ext/grpc/ext-export.gcc +6 -0
  18. data/src/ruby/ext/grpc/extconf.rb +270 -0
  19. data/src/ruby/ext/grpc/rb_byte_buffer.c +65 -0
  20. data/src/ruby/ext/grpc/rb_byte_buffer.h +35 -0
  21. data/src/ruby/ext/grpc/rb_call.c +1075 -0
  22. data/src/ruby/ext/grpc/rb_call.h +57 -0
  23. data/src/ruby/ext/grpc/rb_call_credentials.c +340 -0
  24. data/src/ruby/ext/grpc/rb_call_credentials.h +31 -0
  25. data/src/ruby/ext/grpc/rb_channel.c +875 -0
  26. data/src/ruby/ext/grpc/rb_channel.h +35 -0
  27. data/src/ruby/ext/grpc/rb_channel_args.c +172 -0
  28. data/src/ruby/ext/grpc/rb_channel_args.h +42 -0
  29. data/src/ruby/ext/grpc/rb_channel_credentials.c +285 -0
  30. data/src/ruby/ext/grpc/rb_channel_credentials.h +37 -0
  31. data/src/ruby/ext/grpc/rb_completion_queue.c +101 -0
  32. data/src/ruby/ext/grpc/rb_completion_queue.h +36 -0
  33. data/src/ruby/ext/grpc/rb_compression_options.c +470 -0
  34. data/src/ruby/ext/grpc/rb_compression_options.h +29 -0
  35. data/src/ruby/ext/grpc/rb_enable_cpp.cc +22 -0
  36. data/src/ruby/ext/grpc/rb_event_thread.c +161 -0
  37. data/src/ruby/ext/grpc/rb_event_thread.h +22 -0
  38. data/src/ruby/ext/grpc/rb_grpc.c +496 -0
  39. data/src/ruby/ext/grpc/rb_grpc.h +83 -0
  40. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +603 -0
  41. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +910 -0
  42. data/src/ruby/ext/grpc/rb_loader.c +61 -0
  43. data/src/ruby/ext/grpc/rb_loader.h +25 -0
  44. data/src/ruby/ext/grpc/rb_server.c +405 -0
  45. data/src/ruby/ext/grpc/rb_server.h +32 -0
  46. data/src/ruby/ext/grpc/rb_server_credentials.c +258 -0
  47. data/src/ruby/ext/grpc/rb_server_credentials.h +37 -0
  48. data/src/ruby/ext/grpc/rb_xds_channel_credentials.c +217 -0
  49. data/src/ruby/ext/grpc/rb_xds_channel_credentials.h +37 -0
  50. data/src/ruby/ext/grpc/rb_xds_server_credentials.c +169 -0
  51. data/src/ruby/ext/grpc/rb_xds_server_credentials.h +37 -0
  52. data/src/ruby/lib/grpc/2.7/grpc_c.so +0 -0
  53. data/src/ruby/lib/grpc/3.0/grpc_c.so +0 -0
  54. data/src/ruby/lib/grpc/3.1/grpc_c.so +0 -0
  55. data/src/ruby/lib/grpc/3.2/grpc_c.so +0 -0
  56. data/src/ruby/lib/grpc/core/status_codes.rb +135 -0
  57. data/src/ruby/lib/grpc/core/time_consts.rb +56 -0
  58. data/src/ruby/lib/grpc/errors.rb +277 -0
  59. data/src/ruby/lib/grpc/generic/active_call.rb +670 -0
  60. data/src/ruby/lib/grpc/generic/bidi_call.rb +237 -0
  61. data/src/ruby/lib/grpc/generic/client_stub.rb +503 -0
  62. data/src/ruby/lib/grpc/generic/interceptor_registry.rb +53 -0
  63. data/src/ruby/lib/grpc/generic/interceptors.rb +186 -0
  64. data/src/ruby/lib/grpc/generic/rpc_desc.rb +204 -0
  65. data/src/ruby/lib/grpc/generic/rpc_server.rb +551 -0
  66. data/src/ruby/lib/grpc/generic/service.rb +211 -0
  67. data/src/ruby/lib/grpc/google_rpc_status_utils.rb +40 -0
  68. data/src/ruby/lib/grpc/grpc.rb +24 -0
  69. data/src/ruby/lib/grpc/logconfig.rb +44 -0
  70. data/src/ruby/lib/grpc/notifier.rb +45 -0
  71. data/src/ruby/lib/grpc/structs.rb +15 -0
  72. data/src/ruby/lib/grpc/version.rb +18 -0
  73. data/src/ruby/lib/grpc.rb +37 -0
  74. data/src/ruby/pb/README.md +42 -0
  75. data/src/ruby/pb/generate_proto_ruby.sh +46 -0
  76. data/src/ruby/pb/grpc/health/checker.rb +75 -0
  77. data/src/ruby/pb/grpc/health/v1/health_pb.rb +42 -0
  78. data/src/ruby/pb/grpc/health/v1/health_services_pb.rb +62 -0
  79. data/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb +44 -0
  80. data/src/ruby/pb/grpc/testing/metrics_pb.rb +28 -0
  81. data/src/ruby/pb/grpc/testing/metrics_services_pb.rb +49 -0
  82. data/src/ruby/pb/src/proto/grpc/testing/empty_pb.rb +38 -0
  83. data/src/ruby/pb/src/proto/grpc/testing/messages_pb.rb +71 -0
  84. data/src/ruby/pb/src/proto/grpc/testing/test_pb.rb +40 -0
  85. data/src/ruby/pb/src/proto/grpc/testing/test_services_pb.rb +174 -0
  86. data/src/ruby/pb/test/client.rb +785 -0
  87. data/src/ruby/pb/test/server.rb +252 -0
  88. data/src/ruby/pb/test/xds_client.rb +415 -0
  89. data/src/ruby/spec/call_credentials_spec.rb +42 -0
  90. data/src/ruby/spec/call_spec.rb +180 -0
  91. data/src/ruby/spec/channel_connection_spec.rb +126 -0
  92. data/src/ruby/spec/channel_credentials_spec.rb +124 -0
  93. data/src/ruby/spec/channel_spec.rb +207 -0
  94. data/src/ruby/spec/client_auth_spec.rb +152 -0
  95. data/src/ruby/spec/client_server_spec.rb +676 -0
  96. data/src/ruby/spec/compression_options_spec.rb +149 -0
  97. data/src/ruby/spec/debug_message_spec.rb +134 -0
  98. data/src/ruby/spec/error_sanity_spec.rb +49 -0
  99. data/src/ruby/spec/errors_spec.rb +142 -0
  100. data/src/ruby/spec/generic/active_call_spec.rb +692 -0
  101. data/src/ruby/spec/generic/client_interceptors_spec.rb +153 -0
  102. data/src/ruby/spec/generic/client_stub_spec.rb +1083 -0
  103. data/src/ruby/spec/generic/interceptor_registry_spec.rb +65 -0
  104. data/src/ruby/spec/generic/rpc_desc_spec.rb +374 -0
  105. data/src/ruby/spec/generic/rpc_server_pool_spec.rb +127 -0
  106. data/src/ruby/spec/generic/rpc_server_spec.rb +748 -0
  107. data/src/ruby/spec/generic/server_interceptors_spec.rb +218 -0
  108. data/src/ruby/spec/generic/service_spec.rb +263 -0
  109. data/src/ruby/spec/google_rpc_status_utils_spec.rb +282 -0
  110. data/src/ruby/spec/pb/codegen/grpc/testing/package_options.proto +28 -0
  111. data/src/ruby/spec/pb/codegen/grpc/testing/package_options_import.proto +22 -0
  112. data/src/ruby/spec/pb/codegen/grpc/testing/package_options_import2.proto +23 -0
  113. data/src/ruby/spec/pb/codegen/grpc/testing/package_options_ruby_style.proto +41 -0
  114. data/src/ruby/spec/pb/codegen/grpc/testing/same_package_service_name.proto +27 -0
  115. data/src/ruby/spec/pb/codegen/grpc/testing/same_ruby_package_service_name.proto +29 -0
  116. data/src/ruby/spec/pb/codegen/package_option_spec.rb +98 -0
  117. data/src/ruby/spec/pb/duplicate/codegen_spec.rb +57 -0
  118. data/src/ruby/spec/pb/health/checker_spec.rb +236 -0
  119. data/src/ruby/spec/server_credentials_spec.rb +104 -0
  120. data/src/ruby/spec/server_spec.rb +231 -0
  121. data/src/ruby/spec/spec_helper.rb +61 -0
  122. data/src/ruby/spec/support/helpers.rb +107 -0
  123. data/src/ruby/spec/support/services.rb +160 -0
  124. data/src/ruby/spec/testdata/README +1 -0
  125. data/src/ruby/spec/testdata/ca.pem +20 -0
  126. data/src/ruby/spec/testdata/client.key +28 -0
  127. data/src/ruby/spec/testdata/client.pem +20 -0
  128. data/src/ruby/spec/testdata/server1.key +28 -0
  129. data/src/ruby/spec/testdata/server1.pem +22 -0
  130. data/src/ruby/spec/time_consts_spec.rb +74 -0
  131. data/src/ruby/spec/user_agent_spec.rb +74 -0
  132. metadata +405 -0
@@ -0,0 +1,875 @@
1
+ /*
2
+ *
3
+ * Copyright 2015 gRPC authors.
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ *
17
+ */
18
+
19
+ #include <ruby/ruby.h>
20
+
21
+ #include "rb_channel.h"
22
+
23
+ #include <ruby/thread.h>
24
+
25
+ #include "rb_byte_buffer.h"
26
+ #include "rb_call.h"
27
+ #include "rb_channel_args.h"
28
+ #include "rb_channel_credentials.h"
29
+ #include "rb_completion_queue.h"
30
+ #include "rb_grpc.h"
31
+ #include "rb_grpc_imports.generated.h"
32
+ #include "rb_server.h"
33
+ #include "rb_xds_channel_credentials.h"
34
+
35
+ #include <grpc/grpc.h>
36
+ #include <grpc/grpc_security.h>
37
+ #include <grpc/support/alloc.h>
38
+ #include <grpc/support/log.h>
39
+ #include <grpc/support/time.h>
40
+
41
+ /* id_channel is the name of the hidden ivar that preserves a reference to the
42
+ * channel on a call, so that calls are not GCed before their channel. */
43
+ static ID id_channel;
44
+
45
+ /* id_target is the name of the hidden ivar that preserves a reference to the
46
+ * target string used to create the call, preserved so that it does not get
47
+ * GCed before the channel */
48
+ static ID id_target;
49
+
50
+ /* hidden ivar that synchronizes post-fork channel re-creation */
51
+ static ID id_channel_recreation_mu;
52
+
53
+ /* id_insecure_channel is used to indicate that a channel is insecure */
54
+ static VALUE id_insecure_channel;
55
+
56
+ /* grpc_rb_cChannel is the ruby class that proxies grpc_channel. */
57
+ static VALUE grpc_rb_cChannel = Qnil;
58
+
59
+ /* Used during the conversion of a hash to channel args during channel setup */
60
+ static VALUE grpc_rb_cChannelArgs;
61
+
62
+ typedef struct bg_watched_channel {
63
+ grpc_channel* channel;
64
+ // these fields must only be accessed under global_connection_polling_mu
65
+ struct bg_watched_channel* next;
66
+ int channel_destroyed;
67
+ int refcount;
68
+ } bg_watched_channel;
69
+
70
+ /* grpc_rb_channel wraps a grpc_channel. */
71
+ typedef struct grpc_rb_channel {
72
+ VALUE credentials;
73
+ grpc_channel_args args;
74
+ /* The actual channel (protected in a wrapper to tell when it's safe to
75
+ * destroy) */
76
+ bg_watched_channel* bg_wrapped;
77
+ } grpc_rb_channel;
78
+
79
+ typedef enum { CONTINUOUS_WATCH, WATCH_STATE_API } watch_state_op_type;
80
+
81
+ typedef struct watch_state_op {
82
+ watch_state_op_type op_type;
83
+ // from event.success
84
+ union {
85
+ struct {
86
+ int success;
87
+ // has been called back due to a cq next call
88
+ int called_back;
89
+ } api_callback_args;
90
+ struct {
91
+ bg_watched_channel* bg;
92
+ } continuous_watch_callback_args;
93
+ } op;
94
+ } watch_state_op;
95
+
96
+ static bg_watched_channel* bg_watched_channel_list_head = NULL;
97
+
98
+ static void grpc_rb_channel_try_register_connection_polling(
99
+ bg_watched_channel* bg);
100
+ static void* channel_init_try_register_connection_polling_without_gil(
101
+ void* arg);
102
+
103
+ typedef struct channel_init_try_register_stack {
104
+ grpc_channel* channel;
105
+ grpc_rb_channel* wrapper;
106
+ } channel_init_try_register_stack;
107
+
108
+ static grpc_completion_queue* g_channel_polling_cq;
109
+ static gpr_mu global_connection_polling_mu;
110
+ static gpr_cv global_connection_polling_cv;
111
+ static int g_abort_channel_polling = 0;
112
+ static gpr_once g_once_init = GPR_ONCE_INIT;
113
+ static VALUE g_channel_polling_thread = Qnil;
114
+
115
+ static int bg_watched_channel_list_lookup(bg_watched_channel* bg);
116
+ static bg_watched_channel* bg_watched_channel_list_create_and_add(
117
+ grpc_channel* channel);
118
+ static void bg_watched_channel_list_free_and_remove(bg_watched_channel* bg);
119
+ static void run_poll_channels_loop_unblocking_func(void* arg);
120
+
121
+ // Needs to be called under global_connection_polling_mu
122
+ static void grpc_rb_channel_watch_connection_state_op_complete(
123
+ watch_state_op* op, int success) {
124
+ GPR_ASSERT(!op->op.api_callback_args.called_back);
125
+ op->op.api_callback_args.called_back = 1;
126
+ op->op.api_callback_args.success = success;
127
+ // wake up the watch API call that's waiting on this op
128
+ gpr_cv_broadcast(&global_connection_polling_cv);
129
+ }
130
+
131
+ /* Avoids destroying a channel twice. */
132
+ static void grpc_rb_channel_safe_destroy(bg_watched_channel* bg) {
133
+ gpr_mu_lock(&global_connection_polling_mu);
134
+ GPR_ASSERT(bg_watched_channel_list_lookup(bg));
135
+ if (!bg->channel_destroyed) {
136
+ grpc_channel_destroy(bg->channel);
137
+ bg->channel_destroyed = 1;
138
+ }
139
+ bg->refcount--;
140
+ if (bg->refcount == 0) {
141
+ bg_watched_channel_list_free_and_remove(bg);
142
+ }
143
+ gpr_mu_unlock(&global_connection_polling_mu);
144
+ }
145
+
146
+ static void* channel_safe_destroy_without_gil(void* arg) {
147
+ grpc_rb_channel_safe_destroy((bg_watched_channel*)arg);
148
+ return NULL;
149
+ }
150
+
151
+ static void grpc_rb_channel_free_internal(void* p) {
152
+ grpc_rb_channel* ch = NULL;
153
+ if (p == NULL) {
154
+ return;
155
+ };
156
+ ch = (grpc_rb_channel*)p;
157
+ if (ch->bg_wrapped != NULL) {
158
+ /* assumption made here: it's ok to directly gpr_mu_lock the global
159
+ * connection polling mutex because we're in a finalizer,
160
+ * and we can count on this thread to not be interrupted or
161
+ * yield the gil. */
162
+ grpc_rb_channel_safe_destroy(ch->bg_wrapped);
163
+ grpc_rb_channel_args_destroy(&ch->args);
164
+ }
165
+ xfree(p);
166
+ }
167
+
168
+ /* Destroys Channel instances. */
169
+ static void grpc_rb_channel_free(void* p) { grpc_rb_channel_free_internal(p); }
170
+
171
+ /* Protects the mark object from GC */
172
+ static void grpc_rb_channel_mark(void* p) {
173
+ grpc_rb_channel* channel = NULL;
174
+ if (p == NULL) {
175
+ return;
176
+ }
177
+ channel = (grpc_rb_channel*)p;
178
+ if (channel->credentials != Qnil) {
179
+ rb_gc_mark(channel->credentials);
180
+ }
181
+ }
182
+
183
+ static rb_data_type_t grpc_channel_data_type = {"grpc_channel",
184
+ {grpc_rb_channel_mark,
185
+ grpc_rb_channel_free,
186
+ GRPC_RB_MEMSIZE_UNAVAILABLE,
187
+ {NULL, NULL}},
188
+ NULL,
189
+ NULL,
190
+ #ifdef RUBY_TYPED_FREE_IMMEDIATELY
191
+ RUBY_TYPED_FREE_IMMEDIATELY
192
+ #endif
193
+ };
194
+
195
+ /* Allocates grpc_rb_channel instances. */
196
+ static VALUE grpc_rb_channel_alloc(VALUE cls) {
197
+ grpc_ruby_init();
198
+ grpc_rb_channel* wrapper = ALLOC(grpc_rb_channel);
199
+ wrapper->bg_wrapped = NULL;
200
+ wrapper->credentials = Qnil;
201
+ MEMZERO(&wrapper->args, grpc_channel_args, 1);
202
+ return TypedData_Wrap_Struct(cls, &grpc_channel_data_type, wrapper);
203
+ }
204
+
205
+ /*
206
+ call-seq:
207
+ insecure_channel = Channel:new("myhost:8080", {'arg1': 'value1'},
208
+ :this_channel_is_insecure)
209
+ creds = ...
210
+ secure_channel = Channel:new("myhost:443", {'arg1': 'value1'}, creds)
211
+
212
+ Creates channel instances. */
213
+ static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
214
+ VALUE channel_args = Qnil;
215
+ VALUE credentials = Qnil;
216
+ VALUE target = Qnil;
217
+ grpc_rb_channel* wrapper = NULL;
218
+ grpc_channel* ch = NULL;
219
+ grpc_channel_credentials* creds = NULL;
220
+ char* target_chars = NULL;
221
+ channel_init_try_register_stack stack;
222
+
223
+ grpc_ruby_fork_guard();
224
+ /* "3" == 3 mandatory args */
225
+ rb_scan_args(argc, argv, "3", &target, &channel_args, &credentials);
226
+
227
+ TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
228
+ target_chars = StringValueCStr(target);
229
+ grpc_rb_hash_convert_to_channel_args(channel_args, &wrapper->args);
230
+ if (TYPE(credentials) == T_SYMBOL) {
231
+ if (id_insecure_channel != SYM2ID(credentials)) {
232
+ rb_raise(rb_eTypeError,
233
+ "bad creds symbol, want :this_channel_is_insecure");
234
+ return Qnil;
235
+ }
236
+ grpc_channel_credentials* insecure_creds =
237
+ grpc_insecure_credentials_create();
238
+ ch = grpc_channel_create(target_chars, insecure_creds, &wrapper->args);
239
+ grpc_channel_credentials_release(insecure_creds);
240
+ } else {
241
+ wrapper->credentials = credentials;
242
+ if (grpc_rb_is_channel_credentials(credentials)) {
243
+ creds = grpc_rb_get_wrapped_channel_credentials(credentials);
244
+ } else if (grpc_rb_is_xds_channel_credentials(credentials)) {
245
+ creds = grpc_rb_get_wrapped_xds_channel_credentials(credentials);
246
+ } else {
247
+ rb_raise(rb_eTypeError,
248
+ "bad creds, want ChannelCredentials or XdsChannelCredentials");
249
+ return Qnil;
250
+ }
251
+ ch = grpc_channel_create(target_chars, creds, &wrapper->args);
252
+ }
253
+
254
+ GPR_ASSERT(ch);
255
+ stack.channel = ch;
256
+ stack.wrapper = wrapper;
257
+ rb_thread_call_without_gvl(
258
+ channel_init_try_register_connection_polling_without_gil, &stack, NULL,
259
+ NULL);
260
+ if (ch == NULL) {
261
+ rb_raise(rb_eRuntimeError, "could not create an rpc channel to target:%s",
262
+ target_chars);
263
+ return Qnil;
264
+ }
265
+ rb_ivar_set(self, id_target, target);
266
+ rb_ivar_set(self, id_channel_recreation_mu, rb_mutex_new());
267
+ return self;
268
+ }
269
+
270
+ typedef struct get_state_stack {
271
+ bg_watched_channel* bg;
272
+ int try_to_connect;
273
+ int out;
274
+ } get_state_stack;
275
+
276
+ static void* get_state_without_gil(void* arg) {
277
+ get_state_stack* stack = (get_state_stack*)arg;
278
+
279
+ gpr_mu_lock(&global_connection_polling_mu);
280
+ if (stack->bg->channel_destroyed) {
281
+ stack->out = GRPC_CHANNEL_SHUTDOWN;
282
+ } else {
283
+ stack->out = grpc_channel_check_connectivity_state(stack->bg->channel,
284
+ stack->try_to_connect);
285
+ }
286
+ gpr_mu_unlock(&global_connection_polling_mu);
287
+
288
+ return NULL;
289
+ }
290
+
291
+ /*
292
+ call-seq:
293
+ ch.connectivity_state -> state
294
+ ch.connectivity_state(true) -> state
295
+
296
+ Indicates the current state of the channel, whose value is one of the
297
+ constants defined in GRPC::Core::ConnectivityStates.
298
+
299
+ It also tries to connect if the channel is idle in the second form. */
300
+ static VALUE grpc_rb_channel_get_connectivity_state(int argc, VALUE* argv,
301
+ VALUE self) {
302
+ VALUE try_to_connect_param = Qfalse;
303
+ grpc_rb_channel* wrapper = NULL;
304
+ get_state_stack stack;
305
+
306
+ /* "01" == 0 mandatory args, 1 (try_to_connect) is optional */
307
+ rb_scan_args(argc, argv, "01", &try_to_connect_param);
308
+
309
+ TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
310
+ if (wrapper->bg_wrapped == NULL) {
311
+ rb_raise(rb_eRuntimeError, "closed!");
312
+ return Qnil;
313
+ }
314
+
315
+ stack.bg = wrapper->bg_wrapped;
316
+ stack.try_to_connect = RTEST(try_to_connect_param) ? 1 : 0;
317
+ rb_thread_call_without_gvl(get_state_without_gil, &stack, NULL, NULL);
318
+
319
+ return LONG2NUM(stack.out);
320
+ }
321
+
322
+ typedef struct watch_state_stack {
323
+ bg_watched_channel* bg_wrapped;
324
+ gpr_timespec deadline;
325
+ int last_state;
326
+ } watch_state_stack;
327
+
328
+ static void* wait_for_watch_state_op_complete_without_gvl(void* arg) {
329
+ watch_state_stack* stack = (watch_state_stack*)arg;
330
+ watch_state_op* op = NULL;
331
+ void* success = (void*)0;
332
+
333
+ gpr_mu_lock(&global_connection_polling_mu);
334
+ // it's unsafe to do a "watch" after "channel polling abort" because the cq
335
+ // has been shut down.
336
+ if (g_abort_channel_polling || stack->bg_wrapped->channel_destroyed) {
337
+ gpr_mu_unlock(&global_connection_polling_mu);
338
+ return (void*)0;
339
+ }
340
+ op = gpr_zalloc(sizeof(watch_state_op));
341
+ op->op_type = WATCH_STATE_API;
342
+ grpc_channel_watch_connectivity_state(stack->bg_wrapped->channel,
343
+ stack->last_state, stack->deadline,
344
+ g_channel_polling_cq, op);
345
+
346
+ while (!op->op.api_callback_args.called_back) {
347
+ gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
348
+ gpr_inf_future(GPR_CLOCK_REALTIME));
349
+ }
350
+ if (op->op.api_callback_args.success) {
351
+ success = (void*)1;
352
+ }
353
+ gpr_free(op);
354
+ gpr_mu_unlock(&global_connection_polling_mu);
355
+
356
+ return success;
357
+ }
358
+ static void wait_for_watch_state_op_complete_unblocking_func(void* arg) {
359
+ bg_watched_channel* bg = (bg_watched_channel*)arg;
360
+ gpr_mu_lock(&global_connection_polling_mu);
361
+ if (!bg->channel_destroyed) {
362
+ grpc_channel_destroy(bg->channel);
363
+ bg->channel_destroyed = 1;
364
+ }
365
+ gpr_mu_unlock(&global_connection_polling_mu);
366
+ }
367
+
368
+ /* Wait until the channel's connectivity state becomes different from
369
+ * "last_state", or "deadline" expires.
370
+ * Returns true if the channel's connectivity state becomes different
371
+ * from "last_state" within "deadline".
372
+ * Returns false if "deadline" expires before the channel's connectivity
373
+ * state changes from "last_state".
374
+ * */
375
+ static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
376
+ VALUE last_state,
377
+ VALUE deadline) {
378
+ grpc_rb_channel* wrapper = NULL;
379
+ watch_state_stack stack;
380
+ void* op_success = 0;
381
+
382
+ grpc_ruby_fork_guard();
383
+ TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
384
+
385
+ if (wrapper->bg_wrapped == NULL) {
386
+ rb_raise(rb_eRuntimeError, "closed!");
387
+ return Qnil;
388
+ }
389
+
390
+ if (!FIXNUM_P(last_state)) {
391
+ rb_raise(
392
+ rb_eTypeError,
393
+ "bad type for last_state. want a GRPC::Core::ChannelState constant");
394
+ return Qnil;
395
+ }
396
+
397
+ stack.bg_wrapped = wrapper->bg_wrapped;
398
+ stack.deadline = grpc_rb_time_timeval(deadline, 0),
399
+ stack.last_state = NUM2LONG(last_state);
400
+
401
+ op_success = rb_thread_call_without_gvl(
402
+ wait_for_watch_state_op_complete_without_gvl, &stack,
403
+ wait_for_watch_state_op_complete_unblocking_func, wrapper->bg_wrapped);
404
+
405
+ return op_success ? Qtrue : Qfalse;
406
+ }
407
+
408
+ static void grpc_rb_channel_maybe_recreate_channel_after_fork(
409
+ grpc_rb_channel* wrapper, VALUE target) {
410
+ // TODO(apolcyn): maybe check if fork support is enabled here.
411
+ // The only way we can get bg->channel_destroyed without bg itself being
412
+ // NULL is if we destroyed the channel during GRPC::prefork.
413
+ bg_watched_channel* bg = wrapper->bg_wrapped;
414
+ if (bg->channel_destroyed) {
415
+ // There must be one ref at this point, held by the ruby-level channel
416
+ // object, drop this one here.
417
+ GPR_ASSERT(bg->refcount == 1);
418
+ rb_thread_call_without_gvl(channel_safe_destroy_without_gil, bg, NULL,
419
+ NULL);
420
+ // re-create C-core channel
421
+ const char* target_str = StringValueCStr(target);
422
+ grpc_channel* channel;
423
+ if (wrapper->credentials == Qnil) {
424
+ grpc_channel_credentials* insecure_creds =
425
+ grpc_insecure_credentials_create();
426
+ channel = grpc_channel_create(target_str, insecure_creds, &wrapper->args);
427
+ grpc_channel_credentials_release(insecure_creds);
428
+ } else {
429
+ grpc_channel_credentials* creds;
430
+ if (grpc_rb_is_channel_credentials(wrapper->credentials)) {
431
+ creds = grpc_rb_get_wrapped_channel_credentials(wrapper->credentials);
432
+ } else if (grpc_rb_is_xds_channel_credentials(wrapper->credentials)) {
433
+ creds =
434
+ grpc_rb_get_wrapped_xds_channel_credentials(wrapper->credentials);
435
+ } else {
436
+ rb_raise(rb_eTypeError,
437
+ "failed to re-create channel after fork: bad creds, want "
438
+ "ChannelCredentials or XdsChannelCredentials");
439
+ return;
440
+ }
441
+ channel = grpc_channel_create(target_str, creds, &wrapper->args);
442
+ }
443
+ // re-register with channel polling thread
444
+ channel_init_try_register_stack stack;
445
+ stack.channel = channel;
446
+ stack.wrapper = wrapper;
447
+ rb_thread_call_without_gvl(
448
+ channel_init_try_register_connection_polling_without_gil, &stack, NULL,
449
+ NULL);
450
+ }
451
+ }
452
+
453
+ /* Create a call given a grpc_channel, in order to call method. The request
454
+ is not sent until grpc_call_invoke is called. */
455
+ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
456
+ VALUE method, VALUE host,
457
+ VALUE deadline) {
458
+ VALUE res = Qnil;
459
+ grpc_rb_channel* wrapper = NULL;
460
+ grpc_call* call = NULL;
461
+ grpc_call* parent_call = NULL;
462
+ grpc_completion_queue* cq = NULL;
463
+ int flags = GRPC_PROPAGATE_DEFAULTS;
464
+ grpc_slice method_slice;
465
+ grpc_slice host_slice;
466
+ grpc_slice* host_slice_ptr = NULL;
467
+ char* tmp_str = NULL;
468
+
469
+ grpc_ruby_fork_guard();
470
+ if (host != Qnil) {
471
+ host_slice =
472
+ grpc_slice_from_copied_buffer(RSTRING_PTR(host), RSTRING_LEN(host));
473
+ host_slice_ptr = &host_slice;
474
+ }
475
+ if (mask != Qnil) {
476
+ flags = NUM2UINT(mask);
477
+ }
478
+ if (parent != Qnil) {
479
+ parent_call = grpc_rb_get_wrapped_call(parent);
480
+ }
481
+
482
+ TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
483
+ if (wrapper->bg_wrapped == NULL) {
484
+ rb_raise(rb_eRuntimeError, "closed!");
485
+ return Qnil;
486
+ }
487
+ // TODO(apolcyn): only do this check if fork support is enabled
488
+ rb_mutex_lock(rb_ivar_get(self, id_channel_recreation_mu));
489
+ grpc_rb_channel_maybe_recreate_channel_after_fork(
490
+ wrapper, rb_ivar_get(self, id_target));
491
+ rb_mutex_unlock(rb_ivar_get(self, id_channel_recreation_mu));
492
+
493
+ cq = grpc_completion_queue_create_for_pluck(NULL);
494
+ method_slice =
495
+ grpc_slice_from_copied_buffer(RSTRING_PTR(method), RSTRING_LEN(method));
496
+ call = grpc_channel_create_call(wrapper->bg_wrapped->channel, parent_call,
497
+ flags, cq, method_slice, host_slice_ptr,
498
+ grpc_rb_time_timeval(deadline,
499
+ /* absolute time */ 0),
500
+ NULL);
501
+
502
+ if (call == NULL) {
503
+ tmp_str = grpc_slice_to_c_string(method_slice);
504
+ rb_raise(rb_eRuntimeError, "cannot create call with method %s", tmp_str);
505
+ return Qnil;
506
+ }
507
+
508
+ grpc_slice_unref(method_slice);
509
+ if (host_slice_ptr != NULL) {
510
+ grpc_slice_unref(host_slice);
511
+ }
512
+
513
+ res = grpc_rb_wrap_call(call, cq);
514
+
515
+ /* Make this channel an instance attribute of the call so that it is not GCed
516
+ * before the call. */
517
+ rb_ivar_set(res, id_channel, self);
518
+ return res;
519
+ }
520
+
521
+ /* Closes the channel, calling it's destroy method */
522
+ /* Note this is an API-level call; a wrapped channel's finalizer doesn't call
523
+ * this */
524
+ static VALUE grpc_rb_channel_destroy(VALUE self) {
525
+ grpc_rb_channel* wrapper = NULL;
526
+
527
+ TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
528
+ if (wrapper->bg_wrapped != NULL) {
529
+ rb_thread_call_without_gvl(channel_safe_destroy_without_gil,
530
+ wrapper->bg_wrapped, NULL, NULL);
531
+ wrapper->bg_wrapped = NULL;
532
+ }
533
+
534
+ return Qnil;
535
+ }
536
+
537
+ /* Called to obtain the target that this channel accesses. */
538
+ static VALUE grpc_rb_channel_get_target(VALUE self) {
539
+ grpc_rb_channel* wrapper = NULL;
540
+ VALUE res = Qnil;
541
+ char* target = NULL;
542
+
543
+ TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
544
+ target = grpc_channel_get_target(wrapper->bg_wrapped->channel);
545
+ res = rb_str_new2(target);
546
+ gpr_free(target);
547
+
548
+ return res;
549
+ }
550
+
551
+ /* Needs to be called under global_connection_polling_mu */
552
+ static int bg_watched_channel_list_lookup(bg_watched_channel* target) {
553
+ bg_watched_channel* cur = bg_watched_channel_list_head;
554
+
555
+ while (cur != NULL) {
556
+ if (cur == target) {
557
+ return 1;
558
+ }
559
+ cur = cur->next;
560
+ }
561
+
562
+ return 0;
563
+ }
564
+
565
+ /* Needs to be called under global_connection_polling_mu */
566
+ static bg_watched_channel* bg_watched_channel_list_create_and_add(
567
+ grpc_channel* channel) {
568
+ bg_watched_channel* watched = gpr_zalloc(sizeof(bg_watched_channel));
569
+
570
+ watched->channel = channel;
571
+ watched->next = bg_watched_channel_list_head;
572
+ watched->refcount = 1;
573
+ bg_watched_channel_list_head = watched;
574
+ return watched;
575
+ }
576
+
577
+ /* Needs to be called under global_connection_polling_mu */
578
+ static void bg_watched_channel_list_free_and_remove(
579
+ bg_watched_channel* target) {
580
+ bg_watched_channel* bg = NULL;
581
+
582
+ GPR_ASSERT(bg_watched_channel_list_lookup(target));
583
+ GPR_ASSERT(target->channel_destroyed && target->refcount == 0);
584
+ if (bg_watched_channel_list_head == target) {
585
+ bg_watched_channel_list_head = target->next;
586
+ gpr_free(target);
587
+ return;
588
+ }
589
+ bg = bg_watched_channel_list_head;
590
+ while (bg != NULL && bg->next != NULL) {
591
+ if (bg->next == target) {
592
+ bg->next = bg->next->next;
593
+ gpr_free(target);
594
+ return;
595
+ }
596
+ bg = bg->next;
597
+ }
598
+ GPR_ASSERT(0);
599
+ }
600
+
601
+ /* Initialize a grpc_rb_channel's "protected grpc_channel" and try to push
602
+ * it onto the background thread for constant watches. */
603
+ static void* channel_init_try_register_connection_polling_without_gil(
604
+ void* arg) {
605
+ channel_init_try_register_stack* stack =
606
+ (channel_init_try_register_stack*)arg;
607
+
608
+ gpr_mu_lock(&global_connection_polling_mu);
609
+ stack->wrapper->bg_wrapped =
610
+ bg_watched_channel_list_create_and_add(stack->channel);
611
+ grpc_rb_channel_try_register_connection_polling(stack->wrapper->bg_wrapped);
612
+ gpr_mu_unlock(&global_connection_polling_mu);
613
+ return NULL;
614
+ }
615
+
616
+ // Needs to be called under global_connection_poolling_mu
617
+ static void grpc_rb_channel_try_register_connection_polling(
618
+ bg_watched_channel* bg) {
619
+ grpc_connectivity_state conn_state;
620
+ watch_state_op* op = NULL;
621
+ if (bg->refcount == 0) {
622
+ GPR_ASSERT(bg->channel_destroyed);
623
+ bg_watched_channel_list_free_and_remove(bg);
624
+ return;
625
+ }
626
+ GPR_ASSERT(bg->refcount == 1);
627
+ if (bg->channel_destroyed || g_abort_channel_polling) {
628
+ return;
629
+ }
630
+ conn_state = grpc_channel_check_connectivity_state(bg->channel, 0);
631
+ if (conn_state == GRPC_CHANNEL_SHUTDOWN) {
632
+ return;
633
+ }
634
+ GPR_ASSERT(bg_watched_channel_list_lookup(bg));
635
+ // prevent bg from being free'd by GC while background thread is watching it
636
+ bg->refcount++;
637
+ op = gpr_zalloc(sizeof(watch_state_op));
638
+ op->op_type = CONTINUOUS_WATCH;
639
+ op->op.continuous_watch_callback_args.bg = bg;
640
+ grpc_channel_watch_connectivity_state(bg->channel, conn_state,
641
+ gpr_inf_future(GPR_CLOCK_REALTIME),
642
+ g_channel_polling_cq, op);
643
+ }
644
+
645
+ // Note this loop breaks out with a single call of
646
+ // "run_poll_channels_loop_no_gil".
647
+ // This assumes that a ruby call the unblocking func
648
+ // indicates process shutdown.
649
+ // In the worst case, this stops polling channel connectivity
650
+ // early and falls back to current behavior.
651
+ static void* run_poll_channels_loop_no_gil(void* arg) {
652
+ grpc_event event;
653
+ watch_state_op* op = NULL;
654
+ bg_watched_channel* bg = NULL;
655
+ (void)arg;
656
+ gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - begin");
657
+
658
+ gpr_mu_lock(&global_connection_polling_mu);
659
+ gpr_cv_broadcast(&global_connection_polling_cv);
660
+ gpr_mu_unlock(&global_connection_polling_mu);
661
+
662
+ for (;;) {
663
+ event = grpc_completion_queue_next(
664
+ g_channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
665
+ if (event.type == GRPC_QUEUE_SHUTDOWN) {
666
+ break;
667
+ }
668
+ gpr_mu_lock(&global_connection_polling_mu);
669
+ if (event.type == GRPC_OP_COMPLETE) {
670
+ op = (watch_state_op*)event.tag;
671
+ if (op->op_type == CONTINUOUS_WATCH) {
672
+ bg = (bg_watched_channel*)op->op.continuous_watch_callback_args.bg;
673
+ bg->refcount--;
674
+ grpc_rb_channel_try_register_connection_polling(bg);
675
+ gpr_free(op);
676
+ } else if (op->op_type == WATCH_STATE_API) {
677
+ grpc_rb_channel_watch_connection_state_op_complete(
678
+ (watch_state_op*)event.tag, event.success);
679
+ } else {
680
+ GPR_ASSERT(0);
681
+ }
682
+ }
683
+ gpr_mu_unlock(&global_connection_polling_mu);
684
+ }
685
+ grpc_completion_queue_destroy(g_channel_polling_cq);
686
+ gpr_log(GPR_DEBUG,
687
+ "GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling "
688
+ "loop");
689
+ return NULL;
690
+ }
691
+
692
+ // Notify the channel polling loop to cleanup and shutdown.
693
+ static void run_poll_channels_loop_unblocking_func(void* arg) {
694
+ bg_watched_channel* bg = NULL;
695
+ (void)arg;
696
+
697
+ gpr_mu_lock(&global_connection_polling_mu);
698
+ gpr_log(GPR_DEBUG,
699
+ "GRPC_RUBY: run_poll_channels_loop_unblocking_func - begin aborting "
700
+ "connection polling");
701
+ // early out after first time through
702
+ if (g_abort_channel_polling) {
703
+ gpr_mu_unlock(&global_connection_polling_mu);
704
+ return;
705
+ }
706
+ g_abort_channel_polling = 1;
707
+
708
+ // force pending watches to end by switching to shutdown state
709
+ bg = bg_watched_channel_list_head;
710
+ while (bg != NULL) {
711
+ if (!bg->channel_destroyed) {
712
+ grpc_channel_destroy(bg->channel);
713
+ bg->channel_destroyed = 1;
714
+ }
715
+ bg = bg->next;
716
+ }
717
+
718
+ gpr_log(GPR_DEBUG, "GRPC_RUBY: cq shutdown on global polling cq. pid: %d",
719
+ getpid());
720
+ grpc_completion_queue_shutdown(g_channel_polling_cq);
721
+ gpr_cv_broadcast(&global_connection_polling_cv);
722
+ gpr_mu_unlock(&global_connection_polling_mu);
723
+ gpr_log(GPR_DEBUG,
724
+ "GRPC_RUBY: run_poll_channels_loop_unblocking_func - end aborting "
725
+ "connection polling");
726
+ }
727
+
728
+ // Poll channel connectivity states in background thread without the GIL.
729
+ static VALUE run_poll_channels_loop(VALUE arg) {
730
+ (void)arg;
731
+ gpr_log(
732
+ GPR_DEBUG,
733
+ "GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
734
+ rb_thread_call_without_gvl(run_poll_channels_loop_no_gil, NULL,
735
+ run_poll_channels_loop_unblocking_func, NULL);
736
+ return Qnil;
737
+ }
738
+
739
+ static void* set_abort_channel_polling_without_gil(void* arg) {
740
+ (void)arg;
741
+ gpr_mu_lock(&global_connection_polling_mu);
742
+ g_abort_channel_polling = 1;
743
+ gpr_cv_broadcast(&global_connection_polling_cv);
744
+ gpr_mu_unlock(&global_connection_polling_mu);
745
+ return NULL;
746
+ }
747
+
748
+ static void do_basic_init() {
749
+ gpr_mu_init(&global_connection_polling_mu);
750
+ gpr_cv_init(&global_connection_polling_cv);
751
+ }
752
+
753
+ /* Temporary fix for
754
+ * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
755
+ * Transports in idle channels can get destroyed. Normally c-core re-connects,
756
+ * but in grpc-ruby core never gets a thread until an RPC is made, because ruby
757
+ * only calls c-core's "completion_queu_pluck" API.
758
+ * This uses a global background thread that calls
759
+ * "completion_queue_next" on registered "watch_channel_connectivity_state"
760
+ * calls - so that c-core can reconnect if needed, when there aren't any RPC's.
761
+ * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
762
+ */
763
+ void grpc_rb_channel_polling_thread_start() {
764
+ gpr_once_init(&g_once_init, do_basic_init);
765
+ GPR_ASSERT(!RTEST(g_channel_polling_thread));
766
+ GPR_ASSERT(!g_abort_channel_polling);
767
+ GPR_ASSERT(g_channel_polling_cq == NULL);
768
+
769
+ g_channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
770
+ g_channel_polling_thread = rb_thread_create(run_poll_channels_loop, NULL);
771
+
772
+ if (!RTEST(g_channel_polling_thread)) {
773
+ gpr_log(GPR_ERROR, "GRPC_RUBY: failed to spawn channel polling thread");
774
+ rb_thread_call_without_gvl(set_abort_channel_polling_without_gil, NULL,
775
+ NULL, NULL);
776
+ return;
777
+ }
778
+ }
779
+
780
+ void grpc_rb_channel_polling_thread_stop() {
781
+ if (!RTEST(g_channel_polling_thread)) {
782
+ gpr_log(GPR_ERROR,
783
+ "GRPC_RUBY: channel polling thread stop: thread was not started");
784
+ return;
785
+ }
786
+ rb_thread_call_without_gvl(run_poll_channels_loop_unblocking_func, NULL, NULL,
787
+ NULL);
788
+ rb_funcall(g_channel_polling_thread, rb_intern("join"), 0);
789
+ // state associated with the channel polling thread is destroyed, reset so
790
+ // we can start again later
791
+ g_channel_polling_thread = Qnil;
792
+ g_abort_channel_polling = false;
793
+ g_channel_polling_cq = NULL;
794
+ }
795
+
796
+ static void Init_grpc_propagate_masks() {
797
+ /* Constants representing call propagation masks in grpc.h */
798
+ VALUE grpc_rb_mPropagateMasks =
799
+ rb_define_module_under(grpc_rb_mGrpcCore, "PropagateMasks");
800
+ rb_define_const(grpc_rb_mPropagateMasks, "DEADLINE",
801
+ UINT2NUM(GRPC_PROPAGATE_DEADLINE));
802
+ rb_define_const(grpc_rb_mPropagateMasks, "CENSUS_STATS_CONTEXT",
803
+ UINT2NUM(GRPC_PROPAGATE_CENSUS_STATS_CONTEXT));
804
+ rb_define_const(grpc_rb_mPropagateMasks, "CENSUS_TRACING_CONTEXT",
805
+ UINT2NUM(GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT));
806
+ rb_define_const(grpc_rb_mPropagateMasks, "CANCELLATION",
807
+ UINT2NUM(GRPC_PROPAGATE_CANCELLATION));
808
+ rb_define_const(grpc_rb_mPropagateMasks, "DEFAULTS",
809
+ UINT2NUM(GRPC_PROPAGATE_DEFAULTS));
810
+ }
811
+
812
+ static void Init_grpc_connectivity_states() {
813
+ /* Constants representing call propagation masks in grpc.h */
814
+ VALUE grpc_rb_mConnectivityStates =
815
+ rb_define_module_under(grpc_rb_mGrpcCore, "ConnectivityStates");
816
+ rb_define_const(grpc_rb_mConnectivityStates, "IDLE",
817
+ LONG2NUM(GRPC_CHANNEL_IDLE));
818
+ rb_define_const(grpc_rb_mConnectivityStates, "CONNECTING",
819
+ LONG2NUM(GRPC_CHANNEL_CONNECTING));
820
+ rb_define_const(grpc_rb_mConnectivityStates, "READY",
821
+ LONG2NUM(GRPC_CHANNEL_READY));
822
+ rb_define_const(grpc_rb_mConnectivityStates, "TRANSIENT_FAILURE",
823
+ LONG2NUM(GRPC_CHANNEL_TRANSIENT_FAILURE));
824
+ rb_define_const(grpc_rb_mConnectivityStates, "FATAL_FAILURE",
825
+ LONG2NUM(GRPC_CHANNEL_SHUTDOWN));
826
+ }
827
+
828
+ void Init_grpc_channel() {
829
+ rb_global_variable(&g_channel_polling_thread);
830
+ grpc_rb_cChannelArgs = rb_define_class("TmpChannelArgs", rb_cObject);
831
+ rb_undef_alloc_func(grpc_rb_cChannelArgs);
832
+ grpc_rb_cChannel =
833
+ rb_define_class_under(grpc_rb_mGrpcCore, "Channel", rb_cObject);
834
+
835
+ /* Allocates an object managed by the ruby runtime */
836
+ rb_define_alloc_func(grpc_rb_cChannel, grpc_rb_channel_alloc);
837
+
838
+ /* Provides a ruby constructor and support for dup/clone. */
839
+ rb_define_method(grpc_rb_cChannel, "initialize", grpc_rb_channel_init, -1);
840
+ rb_define_method(grpc_rb_cChannel, "initialize_copy",
841
+ grpc_rb_cannot_init_copy, 1);
842
+
843
+ /* Add ruby analogues of the Channel methods. */
844
+ rb_define_method(grpc_rb_cChannel, "connectivity_state",
845
+ grpc_rb_channel_get_connectivity_state, -1);
846
+ rb_define_method(grpc_rb_cChannel, "watch_connectivity_state",
847
+ grpc_rb_channel_watch_connectivity_state, 2);
848
+ rb_define_method(grpc_rb_cChannel, "create_call", grpc_rb_channel_create_call,
849
+ 5);
850
+ rb_define_method(grpc_rb_cChannel, "target", grpc_rb_channel_get_target, 0);
851
+ rb_define_method(grpc_rb_cChannel, "destroy", grpc_rb_channel_destroy, 0);
852
+ rb_define_alias(grpc_rb_cChannel, "close", "destroy");
853
+
854
+ id_channel = rb_intern("__channel");
855
+ id_target = rb_intern("__target");
856
+ id_channel_recreation_mu = rb_intern("__channel_recreation_mu");
857
+ rb_define_const(grpc_rb_cChannel, "SSL_TARGET",
858
+ ID2SYM(rb_intern(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)));
859
+ rb_define_const(grpc_rb_cChannel, "ENABLE_CENSUS",
860
+ ID2SYM(rb_intern(GRPC_ARG_ENABLE_CENSUS)));
861
+ rb_define_const(grpc_rb_cChannel, "MAX_CONCURRENT_STREAMS",
862
+ ID2SYM(rb_intern(GRPC_ARG_MAX_CONCURRENT_STREAMS)));
863
+ rb_define_const(grpc_rb_cChannel, "MAX_MESSAGE_LENGTH",
864
+ ID2SYM(rb_intern(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH)));
865
+ id_insecure_channel = rb_intern("this_channel_is_insecure");
866
+ Init_grpc_propagate_masks();
867
+ Init_grpc_connectivity_states();
868
+ }
869
+
870
+ /* Gets the wrapped channel from the ruby wrapper */
871
+ grpc_channel* grpc_rb_get_wrapped_channel(VALUE v) {
872
+ grpc_rb_channel* wrapper = NULL;
873
+ TypedData_Get_Struct(v, grpc_rb_channel, &grpc_channel_data_type, wrapper);
874
+ return wrapper->bg_wrapped->channel;
875
+ }