grpc 1.15.0 → 1.16.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (138) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +158 -80
  3. data/etc/roots.pem +23 -0
  4. data/include/grpc/grpc.h +13 -1
  5. data/include/grpc/grpc_security.h +2 -2
  6. data/include/grpc/grpc_security_constants.h +24 -19
  7. data/include/grpc/impl/codegen/grpc_types.h +23 -5
  8. data/include/grpc/impl/codegen/port_platform.h +1 -0
  9. data/src/core/ext/filters/client_channel/client_channel.cc +95 -10
  10. data/src/core/ext/filters/client_channel/client_channel_channelz.cc +71 -0
  11. data/src/core/ext/filters/client_channel/client_channel_channelz.h +45 -11
  12. data/src/core/ext/filters/client_channel/connector.h +3 -0
  13. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +1 -1
  14. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +5 -3
  15. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +12 -32
  16. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +6 -5
  17. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +20 -15
  18. data/src/core/ext/filters/client_channel/lb_policy_factory.h +2 -4
  19. data/src/core/ext/filters/client_channel/parse_address.cc +27 -4
  20. data/src/core/ext/filters/client_channel/parse_address.h +3 -0
  21. data/src/core/ext/filters/client_channel/resolver.h +1 -12
  22. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +1 -11
  23. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +80 -19
  24. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +9 -3
  25. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc +5 -0
  26. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc +70 -0
  27. data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +1 -11
  28. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +2 -16
  29. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +2 -1
  30. data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +0 -7
  31. data/src/core/ext/filters/client_channel/subchannel.cc +45 -7
  32. data/src/core/ext/filters/client_channel/subchannel.h +16 -1
  33. data/src/core/ext/filters/client_channel/subchannel_index.cc +2 -1
  34. data/src/core/ext/filters/client_channel/subchannel_index.h +1 -4
  35. data/src/core/ext/filters/http/client/http_client_filter.cc +32 -3
  36. data/src/core/ext/filters/http/server/http_server_filter.cc +59 -1
  37. data/src/core/ext/filters/max_age/max_age_filter.cc +1 -2
  38. data/src/core/ext/filters/message_size/message_size_filter.cc +59 -3
  39. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +2 -0
  40. data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +1 -1
  41. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +1 -1
  42. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +286 -228
  43. data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +2 -0
  44. data/src/core/ext/transport/chttp2/transport/frame_data.cc +4 -0
  45. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +14 -3
  46. data/src/core/ext/transport/chttp2/transport/hpack_table.cc +29 -0
  47. data/src/core/ext/transport/chttp2/transport/hpack_table.h +9 -0
  48. data/src/core/ext/transport/chttp2/transport/internal.h +10 -0
  49. data/src/core/ext/transport/chttp2/transport/parsing.cc +85 -54
  50. data/src/core/ext/transport/chttp2/transport/writing.cc +6 -0
  51. data/src/core/lib/channel/channel_trace.cc +51 -56
  52. data/src/core/lib/channel/channel_trace.h +30 -25
  53. data/src/core/lib/channel/channelz.cc +235 -61
  54. data/src/core/lib/channel/channelz.h +179 -48
  55. data/src/core/lib/channel/channelz_registry.cc +95 -23
  56. data/src/core/lib/channel/channelz_registry.h +15 -42
  57. data/src/core/lib/gpr/sync_posix.cc +42 -0
  58. data/src/core/lib/http/httpcli.cc +1 -1
  59. data/src/core/lib/iomgr/buffer_list.cc +134 -0
  60. data/src/core/lib/iomgr/buffer_list.h +96 -0
  61. data/src/core/lib/iomgr/endpoint.cc +2 -2
  62. data/src/core/lib/iomgr/endpoint.h +6 -2
  63. data/src/core/lib/iomgr/endpoint_pair_posix.cc +2 -2
  64. data/src/core/lib/iomgr/error.cc +29 -18
  65. data/src/core/lib/iomgr/error.h +8 -0
  66. data/src/core/lib/iomgr/ev_epoll1_linux.cc +4 -0
  67. data/src/core/lib/iomgr/ev_epollex_linux.cc +4 -0
  68. data/src/core/lib/iomgr/ev_posix.cc +16 -10
  69. data/src/core/lib/iomgr/exec_ctx.h +0 -7
  70. data/src/core/lib/iomgr/{ev_epollsig_linux.h → internal_errqueue.cc} +13 -12
  71. data/src/core/lib/iomgr/internal_errqueue.h +83 -0
  72. data/src/core/lib/iomgr/port.h +11 -2
  73. data/src/core/lib/iomgr/socket_utils_common_posix.cc +90 -0
  74. data/src/core/lib/iomgr/socket_utils_posix.h +7 -0
  75. data/src/core/lib/iomgr/tcp_client_posix.cc +4 -1
  76. data/src/core/lib/iomgr/tcp_custom.cc +1 -1
  77. data/src/core/lib/iomgr/tcp_posix.cc +306 -13
  78. data/src/core/lib/iomgr/tcp_posix.h +3 -0
  79. data/src/core/lib/iomgr/tcp_server_posix.cc +2 -2
  80. data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +4 -1
  81. data/src/core/lib/iomgr/tcp_windows.cc +1 -1
  82. data/src/core/lib/iomgr/timer_generic.cc +13 -12
  83. data/src/core/lib/iomgr/timer_heap.cc +2 -2
  84. data/src/core/lib/iomgr/timer_heap.h +3 -3
  85. data/src/core/lib/iomgr/timer_manager.cc +28 -3
  86. data/src/core/lib/iomgr/timer_manager.h +2 -2
  87. data/src/core/lib/iomgr/udp_server.cc +1 -1
  88. data/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc +2 -1
  89. data/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc +2 -1
  90. data/src/core/lib/security/security_connector/security_connector.cc +7 -7
  91. data/src/core/lib/security/transport/secure_endpoint.cc +2 -2
  92. data/src/core/lib/security/transport/security_handshaker.cc +1 -1
  93. data/src/core/lib/security/transport/server_auth_filter.cc +53 -4
  94. data/src/core/lib/slice/slice.cc +8 -0
  95. data/src/core/lib/slice/slice_internal.h +5 -0
  96. data/src/core/lib/surface/call.cc +149 -253
  97. data/src/core/lib/surface/call.h +1 -0
  98. data/src/core/lib/surface/channel.cc +17 -13
  99. data/src/core/lib/surface/completion_queue.cc +21 -17
  100. data/src/core/lib/surface/completion_queue.h +1 -18
  101. data/src/core/lib/surface/completion_queue_factory.cc +3 -3
  102. data/src/core/lib/surface/init_secure.cc +1 -1
  103. data/src/core/lib/surface/server.cc +77 -4
  104. data/src/core/lib/surface/server.h +4 -0
  105. data/src/core/lib/surface/version.cc +2 -2
  106. data/src/core/lib/transport/metadata.cc +0 -18
  107. data/src/core/lib/transport/metadata.h +0 -3
  108. data/src/core/lib/transport/metadata_batch.cc +2 -2
  109. data/src/core/lib/transport/metadata_batch.h +2 -0
  110. data/src/core/lib/transport/static_metadata.cc +220 -249
  111. data/src/core/lib/transport/static_metadata.h +189 -191
  112. data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +5 -4
  113. data/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.cc +3 -1
  114. data/src/core/tsi/alts/handshaker/alts_tsi_event.cc +4 -2
  115. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +6 -5
  116. data/src/core/tsi/alts/handshaker/alts_tsi_utils.cc +3 -1
  117. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc +2 -2
  118. data/src/core/tsi/alts_transport_security.cc +3 -1
  119. data/src/core/tsi/ssl/session_cache/ssl_session_cache.cc +2 -1
  120. data/src/ruby/ext/grpc/rb_call.c +1 -0
  121. data/src/ruby/ext/grpc/rb_channel.c +3 -0
  122. data/src/ruby/ext/grpc/rb_grpc.c +31 -1
  123. data/src/ruby/ext/grpc/rb_grpc.h +2 -0
  124. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +6 -0
  125. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +12 -3
  126. data/src/ruby/ext/grpc/rb_server.c +2 -0
  127. data/src/ruby/lib/grpc/errors.rb +0 -1
  128. data/src/ruby/lib/grpc/generic/rpc_desc.rb +3 -3
  129. data/src/ruby/lib/grpc/generic/rpc_server.rb +1 -1
  130. data/src/ruby/lib/grpc/version.rb +1 -1
  131. data/src/ruby/spec/channel_spec.rb +44 -0
  132. data/src/ruby/spec/client_auth_spec.rb +5 -5
  133. data/src/ruby/spec/generic/client_stub_spec.rb +13 -9
  134. data/src/ruby/spec/generic/rpc_server_spec.rb +3 -3
  135. data/src/ruby/spec/pb/codegen/package_option_spec.rb +53 -0
  136. data/src/ruby/spec/support/services.rb +28 -22
  137. metadata +35 -31
  138. data/src/core/lib/iomgr/ev_epollsig_linux.cc +0 -1743
@@ -31,7 +31,10 @@
31
31
 
32
32
  #include <grpc/support/port_platform.h>
33
33
 
34
+ #include "src/core/lib/iomgr/port.h"
35
+
34
36
  #include "src/core/lib/debug/trace.h"
37
+ #include "src/core/lib/iomgr/buffer_list.h"
35
38
  #include "src/core/lib/iomgr/endpoint.h"
36
39
  #include "src/core/lib/iomgr/ev_posix.h"
37
40
 
@@ -226,7 +226,7 @@ static void on_read(void* arg, grpc_error* err) {
226
226
  gpr_log(GPR_INFO, "SERVER_CONNECT: incoming connection: %s", addr_str);
227
227
  }
228
228
 
229
- grpc_fd* fdobj = grpc_fd_create(fd, name, false);
229
+ grpc_fd* fdobj = grpc_fd_create(fd, name, true);
230
230
 
231
231
  read_notifier_pollset =
232
232
  sp->server->pollsets[static_cast<size_t>(gpr_atm_no_barrier_fetch_add(
@@ -362,7 +362,7 @@ static grpc_error* clone_port(grpc_tcp_listener* listener, unsigned count) {
362
362
  listener->sibling = sp;
363
363
  sp->server = listener->server;
364
364
  sp->fd = fd;
365
- sp->emfd = grpc_fd_create(fd, name, false);
365
+ sp->emfd = grpc_fd_create(fd, name, true);
366
366
  memcpy(&sp->addr, &listener->addr, sizeof(grpc_resolved_address));
367
367
  sp->port = port;
368
368
  sp->port_index = listener->port_index;
@@ -105,7 +105,7 @@ static grpc_error* add_socket_to_server(grpc_tcp_server* s, int fd,
105
105
  s->tail = sp;
106
106
  sp->server = s;
107
107
  sp->fd = fd;
108
- sp->emfd = grpc_fd_create(fd, name, false);
108
+ sp->emfd = grpc_fd_create(fd, name, true);
109
109
  memcpy(&sp->addr, addr, sizeof(grpc_resolved_address));
110
110
  sp->port = port;
111
111
  sp->port_index = port_index;
@@ -166,6 +166,9 @@ grpc_error* grpc_tcp_server_prepare_socket(grpc_tcp_server* s, int fd,
166
166
  if (err != GRPC_ERROR_NONE) goto error;
167
167
  err = grpc_set_socket_reuse_addr(fd, 1);
168
168
  if (err != GRPC_ERROR_NONE) goto error;
169
+ err = grpc_set_socket_tcp_user_timeout(fd, s->channel_args,
170
+ false /* is_client */);
171
+ if (err != GRPC_ERROR_NONE) goto error;
169
172
  }
170
173
  err = grpc_set_socket_no_sigpipe_if_possible(fd);
171
174
  if (err != GRPC_ERROR_NONE) goto error;
@@ -296,7 +296,7 @@ static void on_write(void* tcpp, grpc_error* error) {
296
296
 
297
297
  /* Initiates a write. */
298
298
  static void win_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
299
- grpc_closure* cb) {
299
+ grpc_closure* cb, void* arg) {
300
300
  grpc_tcp* tcp = (grpc_tcp*)ep;
301
301
  grpc_winsocket* socket = tcp->socket;
302
302
  grpc_winsocket_callback_info* info = &socket->write_info;
@@ -48,22 +48,22 @@ grpc_core::TraceFlag grpc_timer_trace(false, "timer");
48
48
  grpc_core::TraceFlag grpc_timer_check_trace(false, "timer_check");
49
49
 
50
50
  /* A "timer shard". Contains a 'heap' and a 'list' of timers. All timers with
51
- * deadlines earlier than 'queue_deadline" cap are maintained in the heap and
51
+ * deadlines earlier than 'queue_deadline_cap' are maintained in the heap and
52
52
  * others are maintained in the list (unordered). This helps to keep the number
53
53
  * of elements in the heap low.
54
54
  *
55
55
  * The 'queue_deadline_cap' gets recomputed periodically based on the timer
56
56
  * stats maintained in 'stats' and the relevant timers are then moved from the
57
- * 'list' to 'heap'
57
+ * 'list' to 'heap'.
58
58
  */
59
59
  typedef struct {
60
60
  gpr_mu mu;
61
61
  grpc_time_averaged_stats stats;
62
- /* All and only timers with deadlines <= this will be in the heap. */
62
+ /* All and only timers with deadlines < this will be in the heap. */
63
63
  grpc_millis queue_deadline_cap;
64
- /* The deadline of the next timer due in this shard */
64
+ /* The deadline of the next timer due in this shard. */
65
65
  grpc_millis min_deadline;
66
- /* Index of this timer_shard in the g_shard_queue */
66
+ /* Index of this timer_shard in the g_shard_queue. */
67
67
  uint32_t shard_queue_index;
68
68
  /* This holds all timers with deadlines < queue_deadline_cap. Timers in this
69
69
  list have the top bit of their deadline set to 0. */
@@ -85,7 +85,7 @@ static timer_shard** g_shard_queue;
85
85
 
86
86
  #ifndef NDEBUG
87
87
 
88
- /* == Hash table for duplicate timer detection == */
88
+ /* == DEBUG ONLY: hash table for duplicate timer detection == */
89
89
 
90
90
  #define NUM_HASH_BUCKETS 1009 /* Prime number close to 1000 */
91
91
 
@@ -177,7 +177,7 @@ static void remove_from_ht(grpc_timer* t) {
177
177
  t->hash_table_next = nullptr;
178
178
  }
179
179
 
180
- /* If a timer is added to a timer shard (either heap or a list), it cannot
180
+ /* If a timer is added to a timer shard (either heap or a list), it must
181
181
  * be pending. A timer is added to hash table only-if it is added to the
182
182
  * timer shard.
183
183
  * Therefore, if timer->pending is false, it cannot be in hash table */
@@ -256,7 +256,7 @@ static grpc_millis compute_min_deadline(timer_shard* shard) {
256
256
  static void timer_list_init() {
257
257
  uint32_t i;
258
258
 
259
- g_num_shards = GPR_MIN(1, 2 * gpr_cpu_num_cores());
259
+ g_num_shards = GPR_CLAMP(2 * gpr_cpu_num_cores(), 1, 32);
260
260
  g_shards =
261
261
  static_cast<timer_shard*>(gpr_zalloc(g_num_shards * sizeof(*g_shards)));
262
262
  g_shard_queue = static_cast<timer_shard**>(
@@ -291,7 +291,7 @@ static void timer_list_init() {
291
291
  static void timer_list_shutdown() {
292
292
  size_t i;
293
293
  run_some_expired_timers(
294
- GPR_ATM_MAX, nullptr,
294
+ GRPC_MILLIS_INF_FUTURE, nullptr,
295
295
  GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
296
296
  for (i = 0; i < g_num_shards; i++) {
297
297
  timer_shard* shard = &g_shards[i];
@@ -489,7 +489,7 @@ static void timer_cancel(grpc_timer* timer) {
489
489
  'queue_deadline_cap') into into shard->heap.
490
490
  Returns 'true' if shard->heap has atleast ONE element
491
491
  REQUIRES: shard->mu locked */
492
- static int refill_heap(timer_shard* shard, grpc_millis now) {
492
+ static bool refill_heap(timer_shard* shard, grpc_millis now) {
493
493
  /* Compute the new queue window width and bound by the limits: */
494
494
  double computed_deadline_delta =
495
495
  grpc_time_averaged_stats_update_average(&shard->stats) *
@@ -714,9 +714,10 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
714
714
  #if GPR_ARCH_64
715
715
  gpr_log(GPR_INFO,
716
716
  "TIMER CHECK BEGIN: now=%" PRId64 " next=%s tls_min=%" PRId64
717
- " glob_min=%" PRIdPTR,
717
+ " glob_min=%" PRId64,
718
718
  now, next_str, min_timer,
719
- gpr_atm_no_barrier_load((gpr_atm*)(&g_shared_mutables.min_timer)));
719
+ static_cast<grpc_millis>(gpr_atm_no_barrier_load(
720
+ (gpr_atm*)(&g_shared_mutables.min_timer))));
720
721
  #else
721
722
  gpr_log(GPR_INFO, "TIMER CHECK BEGIN: now=%" PRId64 " next=%s min=%" PRId64,
722
723
  now, next_str, min_timer);
@@ -95,7 +95,7 @@ void grpc_timer_heap_init(grpc_timer_heap* heap) {
95
95
 
96
96
  void grpc_timer_heap_destroy(grpc_timer_heap* heap) { gpr_free(heap->timers); }
97
97
 
98
- int grpc_timer_heap_add(grpc_timer_heap* heap, grpc_timer* timer) {
98
+ bool grpc_timer_heap_add(grpc_timer_heap* heap, grpc_timer* timer) {
99
99
  if (heap->timer_count == heap->timer_capacity) {
100
100
  heap->timer_capacity =
101
101
  GPR_MAX(heap->timer_capacity + 1, heap->timer_capacity * 3 / 2);
@@ -122,7 +122,7 @@ void grpc_timer_heap_remove(grpc_timer_heap* heap, grpc_timer* timer) {
122
122
  note_changed_priority(heap, heap->timers[i]);
123
123
  }
124
124
 
125
- int grpc_timer_heap_is_empty(grpc_timer_heap* heap) {
125
+ bool grpc_timer_heap_is_empty(grpc_timer_heap* heap) {
126
126
  return heap->timer_count == 0;
127
127
  }
128
128
 
@@ -29,8 +29,8 @@ typedef struct {
29
29
  uint32_t timer_capacity;
30
30
  } grpc_timer_heap;
31
31
 
32
- /* return 1 if the new timer is the first timer in the heap */
33
- int grpc_timer_heap_add(grpc_timer_heap* heap, grpc_timer* timer);
32
+ /* return true if the new timer is the first timer in the heap */
33
+ bool grpc_timer_heap_add(grpc_timer_heap* heap, grpc_timer* timer);
34
34
 
35
35
  void grpc_timer_heap_init(grpc_timer_heap* heap);
36
36
  void grpc_timer_heap_destroy(grpc_timer_heap* heap);
@@ -39,6 +39,6 @@ void grpc_timer_heap_remove(grpc_timer_heap* heap, grpc_timer* timer);
39
39
  grpc_timer* grpc_timer_heap_top(grpc_timer_heap* heap);
40
40
  void grpc_timer_heap_pop(grpc_timer_heap* heap);
41
41
 
42
- int grpc_timer_heap_is_empty(grpc_timer_heap* heap);
42
+ bool grpc_timer_heap_is_empty(grpc_timer_heap* heap);
43
43
 
44
44
  #endif /* GRPC_CORE_LIB_IOMGR_TIMER_HEAP_H */
@@ -61,6 +61,14 @@ static uint64_t g_timed_waiter_generation;
61
61
 
62
62
  static void timer_thread(void* completed_thread_ptr);
63
63
 
64
+ // For debug of the timer manager crash only.
65
+ // TODO (mxyan): remove after bug is fixed.
66
+ #ifdef GRPC_DEBUG_TIMER_MANAGER
67
+ extern int64_t g_timer_manager_init_count;
68
+ extern int64_t g_timer_manager_shutdown_count;
69
+ extern int64_t g_fork_count;
70
+ #endif // GRPC_DEBUG_TIMER_MANAGER
71
+
64
72
  static void gc_completed_threads(void) {
65
73
  if (g_completed_threads != nullptr) {
66
74
  completed_thread* to_gc = g_completed_threads;
@@ -92,8 +100,7 @@ static void start_timer_thread_and_unlock(void) {
92
100
 
93
101
  void grpc_timer_manager_tick() {
94
102
  grpc_core::ExecCtx exec_ctx;
95
- grpc_millis next = GRPC_MILLIS_INF_FUTURE;
96
- grpc_timer_check(&next);
103
+ grpc_timer_check(nullptr);
97
104
  }
98
105
 
99
106
  static void run_some_timers() {
@@ -102,9 +109,12 @@ static void run_some_timers() {
102
109
  // remove a waiter from the pool, and start another thread if necessary
103
110
  --g_waiter_count;
104
111
  if (g_waiter_count == 0 && g_threaded) {
112
+ // The number of timer threads is always increasing until all the threads
113
+ // are stopped. In rare cases, if a large number of timers fire
114
+ // simultaneously, we may end up using a large number of threads.
105
115
  start_timer_thread_and_unlock();
106
116
  } else {
107
- // if there's no thread waiting with a timeout, kick an existing
117
+ // if there's no thread waiting with a timeout, kick an existing untimed
108
118
  // waiter so that the next deadline is not missed
109
119
  if (!g_has_timed_waiter) {
110
120
  if (grpc_timer_check_trace.enabled()) {
@@ -284,6 +294,11 @@ static void start_threads(void) {
284
294
  void grpc_timer_manager_init(void) {
285
295
  gpr_mu_init(&g_mu);
286
296
  gpr_cv_init(&g_cv_wait);
297
+ #ifdef GRPC_DEBUG_TIMER_MANAGER
298
+ // For debug of the timer manager crash only.
299
+ // TODO (mxyan): remove after bug is fixed.
300
+ g_timer_manager_init_count++;
301
+ #endif
287
302
  gpr_cv_init(&g_cv_shutdown);
288
303
  g_threaded = false;
289
304
  g_thread_count = 0;
@@ -319,6 +334,11 @@ static void stop_threads(void) {
319
334
  }
320
335
 
321
336
  void grpc_timer_manager_shutdown(void) {
337
+ #ifdef GRPC_DEBUG_TIMER_MANAGER
338
+ // For debug of the timer manager crash only.
339
+ // TODO (mxyan): remove after bug is fixed.
340
+ g_timer_manager_shutdown_count++;
341
+ #endif
322
342
  stop_threads();
323
343
 
324
344
  gpr_mu_destroy(&g_mu);
@@ -327,6 +347,11 @@ void grpc_timer_manager_shutdown(void) {
327
347
  }
328
348
 
329
349
  void grpc_timer_manager_set_threading(bool threaded) {
350
+ #ifdef GRPC_DEBUG_TIMER_MANAGER
351
+ // For debug of the timer manager crash only.
352
+ // TODO (mxyan): remove after bug is fixed.
353
+ g_fork_count++;
354
+ #endif
330
355
  if (threaded) {
331
356
  start_threads();
332
357
  } else {
@@ -23,8 +23,8 @@
23
23
 
24
24
  #include <stdbool.h>
25
25
 
26
- /* Timer Manager tries to keep one thread waiting for the next timeout at all
27
- times */
26
+ /* Timer Manager tries to keep only one thread waiting for the next timeout at
27
+ all times, and thus effectively preventing the thundering herd problem. */
28
28
 
29
29
  void grpc_timer_manager_init(void);
30
30
  void grpc_timer_manager_shutdown(void);
@@ -152,7 +152,7 @@ GrpcUdpListener::GrpcUdpListener(grpc_udp_server* server, int fd,
152
152
  grpc_sockaddr_to_string(&addr_str, addr, 1);
153
153
  gpr_asprintf(&name, "udp-server-listener:%s", addr_str);
154
154
  gpr_free(addr_str);
155
- emfd_ = grpc_fd_create(fd, name, false);
155
+ emfd_ = grpc_fd_create(fd, name, true);
156
156
  memcpy(&addr_, addr, sizeof(grpc_resolved_address));
157
157
  GPR_ASSERT(emfd_);
158
158
  gpr_free(name);
@@ -72,7 +72,8 @@ static void target_service_account_destroy(
72
72
  static const grpc_alts_credentials_options_vtable vtable = {
73
73
  alts_client_options_copy, alts_client_options_destroy};
74
74
 
75
- grpc_alts_credentials_options* grpc_alts_credentials_client_options_create() {
75
+ grpc_alts_credentials_options* grpc_alts_credentials_client_options_create(
76
+ void) {
76
77
  auto client_options = static_cast<grpc_alts_credentials_client_options*>(
77
78
  gpr_zalloc(sizeof(grpc_alts_credentials_client_options)));
78
79
  client_options->base.vtable = &vtable;
@@ -36,7 +36,8 @@ static void alts_server_options_destroy(
36
36
  static const grpc_alts_credentials_options_vtable vtable = {
37
37
  alts_server_options_copy, alts_server_options_destroy};
38
38
 
39
- grpc_alts_credentials_options* grpc_alts_credentials_server_options_create() {
39
+ grpc_alts_credentials_options* grpc_alts_credentials_server_options_create(
40
+ void) {
40
41
  grpc_alts_credentials_server_options* server_options =
41
42
  static_cast<grpc_alts_credentials_server_options*>(
42
43
  gpr_zalloc(sizeof(*server_options)));
@@ -59,8 +59,8 @@ static const char* installed_roots_path =
59
59
 
60
60
  /** Environment variable used as a flag to enable/disable loading system root
61
61
  certificates from the OS trust store. */
62
- #ifndef GRPC_USE_SYSTEM_SSL_ROOTS_ENV_VAR
63
- #define GRPC_USE_SYSTEM_SSL_ROOTS_ENV_VAR "GRPC_USE_SYSTEM_SSL_ROOTS"
62
+ #ifndef GRPC_NOT_USE_SYSTEM_SSL_ROOTS_ENV_VAR
63
+ #define GRPC_NOT_USE_SYSTEM_SSL_ROOTS_ENV_VAR "GRPC_NOT_USE_SYSTEM_SSL_ROOTS"
64
64
  #endif
65
65
 
66
66
  #ifndef TSI_OPENSSL_ALPN_SUPPORT
@@ -1192,10 +1192,10 @@ const char* DefaultSslRootStore::GetPemRootCerts() {
1192
1192
 
1193
1193
  grpc_slice DefaultSslRootStore::ComputePemRootCerts() {
1194
1194
  grpc_slice result = grpc_empty_slice();
1195
- char* use_system_roots_env_value =
1196
- gpr_getenv(GRPC_USE_SYSTEM_SSL_ROOTS_ENV_VAR);
1197
- const bool use_system_roots = gpr_is_true(use_system_roots_env_value);
1198
- gpr_free(use_system_roots_env_value);
1195
+ char* not_use_system_roots_env_value =
1196
+ gpr_getenv(GRPC_NOT_USE_SYSTEM_SSL_ROOTS_ENV_VAR);
1197
+ const bool not_use_system_roots = gpr_is_true(not_use_system_roots_env_value);
1198
+ gpr_free(not_use_system_roots_env_value);
1199
1199
  // First try to load the roots from the environment.
1200
1200
  char* default_root_certs_path =
1201
1201
  gpr_getenv(GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR);
@@ -1218,7 +1218,7 @@ grpc_slice DefaultSslRootStore::ComputePemRootCerts() {
1218
1218
  gpr_free(pem_root_certs);
1219
1219
  }
1220
1220
  // Try loading roots from OS trust store if flag is enabled.
1221
- if (GRPC_SLICE_IS_EMPTY(result) && use_system_roots) {
1221
+ if (GRPC_SLICE_IS_EMPTY(result) && !not_use_system_roots) {
1222
1222
  result = LoadSystemRootCerts();
1223
1223
  }
1224
1224
  // Fallback to roots manually shipped with gRPC.
@@ -254,7 +254,7 @@ static void flush_write_staging_buffer(secure_endpoint* ep, uint8_t** cur,
254
254
  }
255
255
 
256
256
  static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
257
- grpc_closure* cb) {
257
+ grpc_closure* cb, void* arg) {
258
258
  GPR_TIMER_SCOPE("secure_endpoint.endpoint_write", 0);
259
259
 
260
260
  unsigned i;
@@ -342,7 +342,7 @@ static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
342
342
  return;
343
343
  }
344
344
 
345
- grpc_endpoint_write(ep->wrapped_ep, &ep->output_buffer, cb);
345
+ grpc_endpoint_write(ep->wrapped_ep, &ep->output_buffer, cb, arg);
346
346
  }
347
347
 
348
348
  static void endpoint_shutdown(grpc_endpoint* secure_ep, grpc_error* why) {
@@ -259,7 +259,7 @@ static grpc_error* on_handshake_next_done_locked(
259
259
  grpc_slice_buffer_reset_and_unref_internal(&h->outgoing);
260
260
  grpc_slice_buffer_add(&h->outgoing, to_send);
261
261
  grpc_endpoint_write(h->args->endpoint, &h->outgoing,
262
- &h->on_handshake_data_sent_to_peer);
262
+ &h->on_handshake_data_sent_to_peer, nullptr);
263
263
  } else if (handshaker_result == nullptr) {
264
264
  // There is nothing to send, but need to read from peer.
265
265
  grpc_endpoint_read(h->args->endpoint, h->args->read_buffer,
@@ -41,6 +41,11 @@ struct call_data {
41
41
  grpc_transport_stream_op_batch* recv_initial_metadata_batch;
42
42
  grpc_closure* original_recv_initial_metadata_ready;
43
43
  grpc_closure recv_initial_metadata_ready;
44
+ grpc_error* recv_initial_metadata_error;
45
+ grpc_closure recv_trailing_metadata_ready;
46
+ grpc_closure* original_recv_trailing_metadata_ready;
47
+ grpc_error* recv_trailing_metadata_error;
48
+ bool seen_recv_trailing_metadata_ready;
44
49
  grpc_metadata_array md;
45
50
  const grpc_metadata* consumed_md;
46
51
  size_t num_consumed_md;
@@ -111,7 +116,16 @@ static void on_md_processing_done_inner(grpc_call_element* elem,
111
116
  batch->payload->recv_initial_metadata.recv_initial_metadata,
112
117
  remove_consumed_md, elem, "Response metadata filtering error");
113
118
  }
114
- GRPC_CLOSURE_SCHED(calld->original_recv_initial_metadata_ready, error);
119
+ calld->recv_initial_metadata_error = GRPC_ERROR_REF(error);
120
+ grpc_closure* closure = calld->original_recv_initial_metadata_ready;
121
+ calld->original_recv_initial_metadata_ready = nullptr;
122
+ if (calld->seen_recv_trailing_metadata_ready) {
123
+ GRPC_CALL_COMBINER_START(calld->call_combiner,
124
+ &calld->recv_trailing_metadata_ready,
125
+ calld->recv_trailing_metadata_error,
126
+ "continue recv_trailing_metadata_ready");
127
+ }
128
+ GRPC_CLOSURE_SCHED(closure, error);
115
129
  }
116
130
 
117
131
  // Called from application code.
@@ -180,8 +194,31 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
180
194
  return;
181
195
  }
182
196
  }
183
- GRPC_CLOSURE_RUN(calld->original_recv_initial_metadata_ready,
184
- GRPC_ERROR_REF(error));
197
+ grpc_closure* closure = calld->original_recv_initial_metadata_ready;
198
+ calld->original_recv_initial_metadata_ready = nullptr;
199
+ if (calld->seen_recv_trailing_metadata_ready) {
200
+ GRPC_CALL_COMBINER_START(calld->call_combiner,
201
+ &calld->recv_trailing_metadata_ready,
202
+ calld->recv_trailing_metadata_error,
203
+ "continue recv_trailing_metadata_ready");
204
+ }
205
+ GRPC_CLOSURE_RUN(closure, GRPC_ERROR_REF(error));
206
+ }
207
+
208
+ static void recv_trailing_metadata_ready(void* user_data, grpc_error* err) {
209
+ grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
210
+ call_data* calld = static_cast<call_data*>(elem->call_data);
211
+ if (calld->original_recv_initial_metadata_ready != nullptr) {
212
+ calld->recv_trailing_metadata_error = GRPC_ERROR_REF(err);
213
+ calld->seen_recv_trailing_metadata_ready = true;
214
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner,
215
+ "deferring recv_trailing_metadata_ready until "
216
+ "after recv_initial_metadata_ready");
217
+ return;
218
+ }
219
+ err = grpc_error_add_child(
220
+ GRPC_ERROR_REF(err), GRPC_ERROR_REF(calld->recv_initial_metadata_error));
221
+ GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready, err);
185
222
  }
186
223
 
187
224
  static void auth_start_transport_stream_op_batch(
@@ -195,6 +232,12 @@ static void auth_start_transport_stream_op_batch(
195
232
  batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
196
233
  &calld->recv_initial_metadata_ready;
197
234
  }
235
+ if (batch->recv_trailing_metadata) {
236
+ calld->original_recv_trailing_metadata_ready =
237
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
238
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
239
+ &calld->recv_trailing_metadata_ready;
240
+ }
198
241
  grpc_call_next_op(elem, batch);
199
242
  }
200
243
 
@@ -208,6 +251,9 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
208
251
  GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
209
252
  recv_initial_metadata_ready, elem,
210
253
  grpc_schedule_on_exec_ctx);
254
+ GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
255
+ recv_trailing_metadata_ready, elem,
256
+ grpc_schedule_on_exec_ctx);
211
257
  // Create server security context. Set its auth context from channel
212
258
  // data and save it in the call context.
213
259
  grpc_server_security_context* server_ctx =
@@ -227,7 +273,10 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
227
273
  /* Destructor for call_data */
228
274
  static void destroy_call_elem(grpc_call_element* elem,
229
275
  const grpc_call_final_info* final_info,
230
- grpc_closure* ignored) {}
276
+ grpc_closure* ignored) {
277
+ call_data* calld = static_cast<call_data*>(elem->call_data);
278
+ GRPC_ERROR_UNREF(calld->recv_initial_metadata_error);
279
+ }
231
280
 
232
281
  /* Constructor for channel_data */
233
282
  static grpc_error* init_channel_elem(grpc_channel_element* elem,