grpc 1.11.1 → 1.12.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (95) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +225 -87
  3. data/etc/roots.pem +0 -33
  4. data/include/grpc/grpc_security.h +70 -0
  5. data/include/grpc/impl/codegen/port_platform.h +11 -0
  6. data/include/grpc/support/log.h +9 -1
  7. data/src/core/ext/filters/client_channel/client_channel.cc +305 -210
  8. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +1 -1
  9. data/src/core/ext/filters/client_channel/lb_policy.cc +2 -2
  10. data/src/core/ext/filters/client_channel/lb_policy.h +4 -0
  11. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +12 -9
  12. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +168 -197
  13. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +368 -373
  14. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +498 -98
  15. data/src/core/ext/filters/client_channel/method_params.h +4 -0
  16. data/src/core/ext/filters/client_channel/resolver.h +4 -0
  17. data/src/core/ext/filters/client_channel/retry_throttle.h +4 -0
  18. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +2 -2
  19. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +40 -15
  20. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +3 -3
  21. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +2 -2
  22. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +1 -1
  23. data/src/core/ext/transport/chttp2/transport/hpack_table.cc +2 -2
  24. data/src/core/ext/transport/chttp2/transport/stream_lists.cc +3 -3
  25. data/src/core/ext/transport/chttp2/transport/writing.cc +5 -5
  26. data/src/core/ext/transport/inproc/inproc_transport.cc +41 -43
  27. data/src/core/lib/channel/channel_args.cc +28 -0
  28. data/src/core/lib/channel/channel_args.h +4 -0
  29. data/src/core/lib/channel/handshaker.cc +47 -0
  30. data/src/core/lib/channel/handshaker.h +4 -0
  31. data/src/core/lib/debug/trace.cc +2 -1
  32. data/src/core/lib/debug/trace.h +10 -1
  33. data/src/core/lib/gpr/log.cc +8 -2
  34. data/src/core/lib/gpr/log_android.cc +4 -0
  35. data/src/core/lib/gpr/log_linux.cc +4 -0
  36. data/src/core/lib/gpr/log_posix.cc +4 -0
  37. data/src/core/lib/gpr/log_windows.cc +5 -0
  38. data/src/core/lib/gprpp/inlined_vector.h +30 -34
  39. data/src/core/lib/gprpp/orphanable.h +4 -4
  40. data/src/core/lib/gprpp/ref_counted.h +4 -4
  41. data/src/core/lib/iomgr/call_combiner.cc +13 -13
  42. data/src/core/lib/iomgr/closure.h +3 -3
  43. data/src/core/lib/iomgr/combiner.cc +11 -11
  44. data/src/core/lib/iomgr/ev_epoll1_linux.cc +24 -24
  45. data/src/core/lib/iomgr/ev_epollex_linux.cc +48 -29
  46. data/src/core/lib/iomgr/ev_epollsig_linux.cc +2 -2
  47. data/src/core/lib/iomgr/ev_poll_posix.cc +9 -3
  48. data/src/core/lib/iomgr/ev_posix.cc +3 -3
  49. data/src/core/lib/iomgr/executor.cc +6 -6
  50. data/src/core/lib/iomgr/resource_quota.cc +10 -11
  51. data/src/core/lib/iomgr/socket_utils_common_posix.cc +24 -0
  52. data/src/core/lib/iomgr/socket_utils_linux.cc +0 -1
  53. data/src/core/lib/iomgr/socket_utils_posix.cc +2 -3
  54. data/src/core/lib/iomgr/socket_utils_posix.h +3 -0
  55. data/src/core/lib/iomgr/tcp_client_custom.cc +2 -2
  56. data/src/core/lib/iomgr/tcp_client_posix.cc +4 -4
  57. data/src/core/lib/iomgr/tcp_custom.cc +10 -10
  58. data/src/core/lib/iomgr/tcp_posix.cc +25 -25
  59. data/src/core/lib/iomgr/tcp_server_custom.cc +5 -5
  60. data/src/core/lib/iomgr/tcp_server_posix.cc +4 -25
  61. data/src/core/lib/iomgr/tcp_server_windows.cc +1 -0
  62. data/src/core/lib/iomgr/tcp_uv.cc +3 -0
  63. data/src/core/lib/iomgr/tcp_windows.cc +16 -0
  64. data/src/core/lib/iomgr/timer_generic.cc +27 -17
  65. data/src/core/lib/iomgr/timer_manager.cc +11 -12
  66. data/src/core/lib/iomgr/timer_uv.cc +3 -0
  67. data/src/core/lib/iomgr/udp_server.cc +104 -49
  68. data/src/core/lib/iomgr/udp_server.h +8 -4
  69. data/src/core/lib/profiling/basic_timers.cc +1 -0
  70. data/src/core/lib/security/credentials/alts/alts_credentials.h +0 -20
  71. data/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc +7 -7
  72. data/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h +1 -38
  73. data/src/core/lib/security/security_connector/security_connector.cc +19 -16
  74. data/src/core/lib/security/security_connector/security_connector.h +4 -3
  75. data/src/core/lib/security/transport/secure_endpoint.cc +2 -2
  76. data/src/core/lib/security/transport/security_handshaker.cc +6 -2
  77. data/src/core/lib/slice/slice.cc +6 -2
  78. data/src/core/lib/slice/slice_buffer.cc +12 -4
  79. data/src/core/lib/slice/slice_hash_table.h +4 -0
  80. data/src/core/lib/slice/slice_weak_hash_table.h +4 -0
  81. data/src/core/lib/surface/call.cc +6 -6
  82. data/src/core/lib/surface/server.cc +16 -0
  83. data/src/core/lib/surface/version.cc +1 -1
  84. data/src/core/lib/transport/bdp_estimator.cc +3 -3
  85. data/src/core/lib/transport/bdp_estimator.h +2 -2
  86. data/src/core/lib/transport/connectivity_state.cc +6 -7
  87. data/src/core/tsi/ssl/session_cache/ssl_session_cache.h +4 -0
  88. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +14 -0
  89. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +21 -0
  90. data/src/ruby/lib/grpc/version.rb +1 -1
  91. data/src/ruby/pb/generate_proto_ruby.sh +7 -1
  92. data/src/ruby/spec/pb/package_with_underscore/checker_spec.rb +2 -5
  93. data/third_party/address_sorting/address_sorting.c +10 -9
  94. metadata +27 -28
  95. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc +0 -253
@@ -222,10 +222,10 @@ static void finish_accept(grpc_tcp_listener* sp, grpc_custom_socket* socket) {
222
222
  }
223
223
  if (grpc_tcp_trace.enabled()) {
224
224
  if (peer_name_string) {
225
- gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection: %s",
225
+ gpr_log(GPR_INFO, "SERVER_CONNECT: %p accepted connection: %s",
226
226
  sp->server, peer_name_string);
227
227
  } else {
228
- gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection", sp->server);
228
+ gpr_log(GPR_INFO, "SERVER_CONNECT: %p accepted connection", sp->server);
229
229
  }
230
230
  }
231
231
  ep = custom_tcp_endpoint_create(socket, sp->server->resource_quota,
@@ -377,10 +377,10 @@ static grpc_error* tcp_server_add_port(grpc_tcp_server* s,
377
377
  grpc_sockaddr_to_string(&port_string, addr, 0);
378
378
  const char* str = grpc_error_string(error);
379
379
  if (port_string) {
380
- gpr_log(GPR_DEBUG, "SERVER %p add_port %s error=%s", s, port_string, str);
380
+ gpr_log(GPR_INFO, "SERVER %p add_port %s error=%s", s, port_string, str);
381
381
  gpr_free(port_string);
382
382
  } else {
383
- gpr_log(GPR_DEBUG, "SERVER %p add_port error=%s", s, str);
383
+ gpr_log(GPR_INFO, "SERVER %p add_port error=%s", s, str);
384
384
  }
385
385
  }
386
386
 
@@ -419,7 +419,7 @@ static void tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets,
419
419
  (void)pollset_count;
420
420
  GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
421
421
  if (grpc_tcp_trace.enabled()) {
422
- gpr_log(GPR_DEBUG, "SERVER_START %p", server);
422
+ gpr_log(GPR_INFO, "SERVER_START %p", server);
423
423
  }
424
424
  GPR_ASSERT(on_accept_cb);
425
425
  GPR_ASSERT(!server->on_accept_cb);
@@ -55,39 +55,18 @@
55
55
  #include "src/core/lib/iomgr/tcp_server_utils_posix.h"
56
56
  #include "src/core/lib/iomgr/unix_sockets_posix.h"
57
57
 
58
- static gpr_once check_init = GPR_ONCE_INIT;
59
- static bool has_so_reuseport = false;
60
-
61
- static void init(void) {
62
- #ifndef GPR_MANYLINUX1
63
- int s = socket(AF_INET, SOCK_STREAM, 0);
64
- if (s < 0) {
65
- /* This might be an ipv6-only environment in which case 'socket(AF_INET,..)'
66
- call would fail. Try creating IPv6 socket in that case */
67
- s = socket(AF_INET6, SOCK_STREAM, 0);
68
- }
69
- if (s >= 0) {
70
- has_so_reuseport = GRPC_LOG_IF_ERROR("check for SO_REUSEPORT",
71
- grpc_set_socket_reuse_port(s, 1));
72
- close(s);
73
- }
74
- #endif
75
- }
76
-
77
58
  static grpc_error* tcp_server_create(grpc_closure* shutdown_complete,
78
59
  const grpc_channel_args* args,
79
60
  grpc_tcp_server** server) {
80
- gpr_once_init(&check_init, init);
81
-
82
61
  grpc_tcp_server* s =
83
62
  static_cast<grpc_tcp_server*>(gpr_zalloc(sizeof(grpc_tcp_server)));
84
- s->so_reuseport = has_so_reuseport;
63
+ s->so_reuseport = grpc_is_socket_reuse_port_supported();
85
64
  s->expand_wildcard_addrs = false;
86
65
  for (size_t i = 0; i < (args == nullptr ? 0 : args->num_args); i++) {
87
66
  if (0 == strcmp(GRPC_ARG_ALLOW_REUSEPORT, args->args[i].key)) {
88
67
  if (args->args[i].type == GRPC_ARG_INTEGER) {
89
- s->so_reuseport =
90
- has_so_reuseport && (args->args[i].value.integer != 0);
68
+ s->so_reuseport = grpc_is_socket_reuse_port_supported() &&
69
+ (args->args[i].value.integer != 0);
91
70
  } else {
92
71
  gpr_free(s);
93
72
  return GRPC_ERROR_CREATE_FROM_STATIC_STRING(GRPC_ARG_ALLOW_REUSEPORT
@@ -249,7 +228,7 @@ static void on_read(void* arg, grpc_error* err) {
249
228
  gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);
250
229
 
251
230
  if (grpc_tcp_trace.enabled()) {
252
- gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
231
+ gpr_log(GPR_INFO, "SERVER_CONNECT: incoming connection: %s", addr_str);
253
232
  }
254
233
 
255
234
  grpc_fd* fdobj = grpc_fd_create(fd, name);
@@ -129,6 +129,7 @@ static void destroy_server(void* arg, grpc_error* error) {
129
129
  gpr_free(sp);
130
130
  }
131
131
  grpc_channel_args_destroy(s->channel_args);
132
+ gpr_mu_destroy(&s->mu);
132
133
  gpr_free(s);
133
134
  }
134
135
 
@@ -204,6 +204,9 @@ static grpc_error* uv_socket_init_helper(uv_socket_t* uv_socket, int domain) {
204
204
  uv_socket->write_buffers = nullptr;
205
205
  uv_socket->read_len = 0;
206
206
  uv_tcp_nodelay(uv_socket->handle, 1);
207
+ // Node uses a garbage collector to call destructors, so we don't
208
+ // want to hold the uv loop open with active gRPC objects.
209
+ uv_unref((uv_handle_t*)uv_socket->handle);
207
210
  uv_socket->pending_connection = false;
208
211
  uv_socket->accept_socket = nullptr;
209
212
  uv_socket->accept_error = GRPC_ERROR_NONE;
@@ -74,12 +74,28 @@ static grpc_error* set_dualstack(SOCKET sock) {
74
74
  : GRPC_WSA_ERROR(WSAGetLastError(), "setsockopt(IPV6_V6ONLY)");
75
75
  }
76
76
 
77
+ static grpc_error* enable_loopback_fast_path(SOCKET sock) {
78
+ int status;
79
+ uint32_t param = 1;
80
+ DWORD ret;
81
+ status = WSAIoctl(sock, /*SIO_LOOPBACK_FAST_PATH==*/_WSAIOW(IOC_VENDOR, 16),
82
+ &param, sizeof(param), NULL, 0, &ret, 0, 0);
83
+ if (status == SOCKET_ERROR) {
84
+ status = WSAGetLastError();
85
+ }
86
+ return status == 0 || status == WSAEOPNOTSUPP
87
+ ? GRPC_ERROR_NONE
88
+ : GRPC_WSA_ERROR(status, "WSAIoctl(SIO_LOOPBACK_FAST_PATH)");
89
+ }
90
+
77
91
  grpc_error* grpc_tcp_prepare_socket(SOCKET sock) {
78
92
  grpc_error* err;
79
93
  err = set_non_block(sock);
80
94
  if (err != GRPC_ERROR_NONE) return err;
81
95
  err = set_dualstack(sock);
82
96
  if (err != GRPC_ERROR_NONE) return err;
97
+ err = enable_loopback_fast_path(sock);
98
+ if (err != GRPC_ERROR_NONE) return err;
83
99
  return GRPC_ERROR_NONE;
84
100
  }
85
101
 
@@ -97,6 +97,12 @@ static void init_timer_ht() {
97
97
  }
98
98
  }
99
99
 
100
+ static void destroy_timer_ht() {
101
+ for (int i = 0; i < NUM_HASH_BUCKETS; i++) {
102
+ gpr_mu_destroy(&g_hash_mu[i]);
103
+ }
104
+ }
105
+
100
106
  static bool is_in_ht(grpc_timer* t) {
101
107
  size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
102
108
 
@@ -188,6 +194,7 @@ static void validate_non_pending_timer(grpc_timer* t) {
188
194
  }
189
195
 
190
196
  #define INIT_TIMER_HASH_TABLE() init_timer_ht()
197
+ #define DESTROY_TIMER_HASH_TABLE() destroy_timer_ht()
191
198
  #define ADD_TO_HASH_TABLE(t) add_to_ht((t))
192
199
  #define REMOVE_FROM_HASH_TABLE(t) remove_from_ht((t))
193
200
  #define VALIDATE_NON_PENDING_TIMER(t) validate_non_pending_timer((t))
@@ -195,6 +202,7 @@ static void validate_non_pending_timer(grpc_timer* t) {
195
202
  #else
196
203
 
197
204
  #define INIT_TIMER_HASH_TABLE()
205
+ #define DESTROY_TIMER_HASH_TABLE()
198
206
  #define ADD_TO_HASH_TABLE(t)
199
207
  #define REMOVE_FROM_HASH_TABLE(t)
200
208
  #define VALIDATE_NON_PENDING_TIMER(t)
@@ -283,6 +291,8 @@ static void timer_list_shutdown() {
283
291
  gpr_free(g_shards);
284
292
  gpr_free(g_shard_queue);
285
293
  g_shared_mutables.initialized = false;
294
+
295
+ DESTROY_TIMER_HASH_TABLE();
286
296
  }
287
297
 
288
298
  /* returns true if the first element in the list */
@@ -336,9 +346,9 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
336
346
  #endif
337
347
 
338
348
  if (grpc_timer_trace.enabled()) {
339
- gpr_log(GPR_DEBUG,
340
- "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer,
341
- deadline, grpc_core::ExecCtx::Get()->Now(), closure, closure->cb);
349
+ gpr_log(GPR_INFO, "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]",
350
+ timer, deadline, grpc_core::ExecCtx::Get()->Now(), closure,
351
+ closure->cb);
342
352
  }
343
353
 
344
354
  if (!g_shared_mutables.initialized) {
@@ -372,7 +382,7 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
372
382
  list_join(&shard->list, timer);
373
383
  }
374
384
  if (grpc_timer_trace.enabled()) {
375
- gpr_log(GPR_DEBUG,
385
+ gpr_log(GPR_INFO,
376
386
  " .. add to shard %d with queue_deadline_cap=%" PRIdPTR
377
387
  " => is_first_timer=%s",
378
388
  static_cast<int>(shard - g_shards), shard->queue_deadline_cap,
@@ -394,7 +404,7 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
394
404
  if (is_first_timer) {
395
405
  gpr_mu_lock(&g_shared_mutables.mu);
396
406
  if (grpc_timer_trace.enabled()) {
397
- gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR,
407
+ gpr_log(GPR_INFO, " .. old shard min_deadline=%" PRIdPTR,
398
408
  shard->min_deadline);
399
409
  }
400
410
  if (deadline < shard->min_deadline) {
@@ -424,7 +434,7 @@ static void timer_cancel(grpc_timer* timer) {
424
434
  timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
425
435
  gpr_mu_lock(&shard->mu);
426
436
  if (grpc_timer_trace.enabled()) {
427
- gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
437
+ gpr_log(GPR_INFO, "TIMER %p: CANCEL pending=%s", timer,
428
438
  timer->pending ? "true" : "false");
429
439
  }
430
440
 
@@ -465,7 +475,7 @@ static int refill_heap(timer_shard* shard, gpr_atm now) {
465
475
  static_cast<gpr_atm>(deadline_delta * 1000.0));
466
476
 
467
477
  if (grpc_timer_check_trace.enabled()) {
468
- gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
478
+ gpr_log(GPR_INFO, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
469
479
  static_cast<int>(shard - g_shards), shard->queue_deadline_cap);
470
480
  }
471
481
  for (timer = shard->list.next; timer != &shard->list; timer = next) {
@@ -473,7 +483,7 @@ static int refill_heap(timer_shard* shard, gpr_atm now) {
473
483
 
474
484
  if (timer->deadline < shard->queue_deadline_cap) {
475
485
  if (grpc_timer_check_trace.enabled()) {
476
- gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap",
486
+ gpr_log(GPR_INFO, " .. add timer with deadline %" PRIdPTR " to heap",
477
487
  timer->deadline);
478
488
  }
479
489
  list_remove(timer);
@@ -490,7 +500,7 @@ static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
490
500
  grpc_timer* timer;
491
501
  for (;;) {
492
502
  if (grpc_timer_check_trace.enabled()) {
493
- gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s",
503
+ gpr_log(GPR_INFO, " .. shard[%d]: heap_empty=%s",
494
504
  static_cast<int>(shard - g_shards),
495
505
  grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
496
506
  }
@@ -500,13 +510,13 @@ static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
500
510
  }
501
511
  timer = grpc_timer_heap_top(&shard->heap);
502
512
  if (grpc_timer_check_trace.enabled()) {
503
- gpr_log(GPR_DEBUG,
513
+ gpr_log(GPR_INFO,
504
514
  " .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR,
505
515
  timer->deadline, now);
506
516
  }
507
517
  if (timer->deadline > now) return nullptr;
508
518
  if (grpc_timer_trace.enabled()) {
509
- gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler",
519
+ gpr_log(GPR_INFO, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler",
510
520
  timer, now - timer->deadline,
511
521
  timer->closure->scheduler->vtable->name);
512
522
  }
@@ -530,7 +540,7 @@ static size_t pop_timers(timer_shard* shard, gpr_atm now,
530
540
  *new_min_deadline = compute_min_deadline(shard);
531
541
  gpr_mu_unlock(&shard->mu);
532
542
  if (grpc_timer_check_trace.enabled()) {
533
- gpr_log(GPR_DEBUG, " .. shard[%d] popped %" PRIdPTR,
543
+ gpr_log(GPR_INFO, " .. shard[%d] popped %" PRIdPTR,
534
544
  static_cast<int>(shard - g_shards), n);
535
545
  }
536
546
  return n;
@@ -553,7 +563,7 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
553
563
  result = GRPC_TIMERS_CHECKED_AND_EMPTY;
554
564
 
555
565
  if (grpc_timer_check_trace.enabled()) {
556
- gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRIdPTR,
566
+ gpr_log(GPR_INFO, " .. shard[%d]->min_deadline = %" PRIdPTR,
557
567
  static_cast<int>(g_shard_queue[0] - g_shards),
558
568
  g_shard_queue[0]->min_deadline);
559
569
  }
@@ -570,7 +580,7 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
570
580
  }
571
581
 
572
582
  if (grpc_timer_check_trace.enabled()) {
573
- gpr_log(GPR_DEBUG,
583
+ gpr_log(GPR_INFO,
574
584
  " .. result --> %d"
575
585
  ", shard[%d]->min_deadline %" PRIdPTR " --> %" PRIdPTR
576
586
  ", now=%" PRIdPTR,
@@ -614,7 +624,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
614
624
  *next = GPR_MIN(*next, min_timer);
615
625
  }
616
626
  if (grpc_timer_check_trace.enabled()) {
617
- gpr_log(GPR_DEBUG,
627
+ gpr_log(GPR_INFO,
618
628
  "TIMER CHECK SKIP: now=%" PRIdPTR " min_timer=%" PRIdPTR, now,
619
629
  min_timer);
620
630
  }
@@ -634,7 +644,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
634
644
  } else {
635
645
  gpr_asprintf(&next_str, "%" PRIdPTR, *next);
636
646
  }
637
- gpr_log(GPR_DEBUG,
647
+ gpr_log(GPR_INFO,
638
648
  "TIMER CHECK BEGIN: now=%" PRIdPTR " next=%s tls_min=%" PRIdPTR
639
649
  " glob_min=%" PRIdPTR,
640
650
  now, next_str, gpr_tls_get(&g_last_seen_min_timer),
@@ -652,7 +662,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
652
662
  } else {
653
663
  gpr_asprintf(&next_str, "%" PRIdPTR, *next);
654
664
  }
655
- gpr_log(GPR_DEBUG, "TIMER CHECK END: r=%d; next=%s", r, next_str);
665
+ gpr_log(GPR_INFO, "TIMER CHECK END: r=%d; next=%s", r, next_str);
656
666
  gpr_free(next_str);
657
667
  }
658
668
  return r;
@@ -82,7 +82,7 @@ static void start_timer_thread_and_unlock(void) {
82
82
  ++g_thread_count;
83
83
  gpr_mu_unlock(&g_mu);
84
84
  if (grpc_timer_check_trace.enabled()) {
85
- gpr_log(GPR_DEBUG, "Spawn timer thread");
85
+ gpr_log(GPR_INFO, "Spawn timer thread");
86
86
  }
87
87
  completed_thread* ct =
88
88
  static_cast<completed_thread*>(gpr_malloc(sizeof(*ct)));
@@ -108,7 +108,7 @@ static void run_some_timers() {
108
108
  // waiter so that the next deadline is not missed
109
109
  if (!g_has_timed_waiter) {
110
110
  if (grpc_timer_check_trace.enabled()) {
111
- gpr_log(GPR_DEBUG, "kick untimed waiter");
111
+ gpr_log(GPR_INFO, "kick untimed waiter");
112
112
  }
113
113
  gpr_cv_signal(&g_cv_wait);
114
114
  }
@@ -116,7 +116,7 @@ static void run_some_timers() {
116
116
  }
117
117
  // without our lock, flush the exec_ctx
118
118
  if (grpc_timer_check_trace.enabled()) {
119
- gpr_log(GPR_DEBUG, "flush exec_ctx");
119
+ gpr_log(GPR_INFO, "flush exec_ctx");
120
120
  }
121
121
  grpc_core::ExecCtx::Get()->Flush();
122
122
  gpr_mu_lock(&g_mu);
@@ -172,8 +172,7 @@ static bool wait_until(grpc_millis next) {
172
172
 
173
173
  if (grpc_timer_check_trace.enabled()) {
174
174
  grpc_millis wait_time = next - grpc_core::ExecCtx::Get()->Now();
175
- gpr_log(GPR_DEBUG, "sleep for a %" PRIdPTR " milliseconds",
176
- wait_time);
175
+ gpr_log(GPR_INFO, "sleep for a %" PRIdPTR " milliseconds", wait_time);
177
176
  }
178
177
  } else { // g_timed_waiter == true && next >= g_timed_waiter_deadline
179
178
  next = GRPC_MILLIS_INF_FUTURE;
@@ -181,14 +180,14 @@ static bool wait_until(grpc_millis next) {
181
180
  }
182
181
 
183
182
  if (grpc_timer_check_trace.enabled() && next == GRPC_MILLIS_INF_FUTURE) {
184
- gpr_log(GPR_DEBUG, "sleep until kicked");
183
+ gpr_log(GPR_INFO, "sleep until kicked");
185
184
  }
186
185
 
187
186
  gpr_cv_wait(&g_cv_wait, &g_mu,
188
187
  grpc_millis_to_timespec(next, GPR_CLOCK_MONOTONIC));
189
188
 
190
189
  if (grpc_timer_check_trace.enabled()) {
191
- gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d",
190
+ gpr_log(GPR_INFO, "wait ended: was_timed:%d kicked:%d",
192
191
  my_timed_waiter_generation == g_timed_waiter_generation,
193
192
  g_kicked);
194
193
  }
@@ -233,7 +232,7 @@ static void timer_main_loop() {
233
232
  Consequently, we can just sleep forever here and be happy at some
234
233
  saved wakeup cycles. */
235
234
  if (grpc_timer_check_trace.enabled()) {
236
- gpr_log(GPR_DEBUG, "timers not checked: expect another thread to");
235
+ gpr_log(GPR_INFO, "timers not checked: expect another thread to");
237
236
  }
238
237
  next = GRPC_MILLIS_INF_FUTURE;
239
238
  /* fall through */
@@ -259,7 +258,7 @@ static void timer_thread_cleanup(completed_thread* ct) {
259
258
  g_completed_threads = ct;
260
259
  gpr_mu_unlock(&g_mu);
261
260
  if (grpc_timer_check_trace.enabled()) {
262
- gpr_log(GPR_DEBUG, "End timer thread");
261
+ gpr_log(GPR_INFO, "End timer thread");
263
262
  }
264
263
  }
265
264
 
@@ -301,18 +300,18 @@ void grpc_timer_manager_init(void) {
301
300
  static void stop_threads(void) {
302
301
  gpr_mu_lock(&g_mu);
303
302
  if (grpc_timer_check_trace.enabled()) {
304
- gpr_log(GPR_DEBUG, "stop timer threads: threaded=%d", g_threaded);
303
+ gpr_log(GPR_INFO, "stop timer threads: threaded=%d", g_threaded);
305
304
  }
306
305
  if (g_threaded) {
307
306
  g_threaded = false;
308
307
  gpr_cv_broadcast(&g_cv_wait);
309
308
  if (grpc_timer_check_trace.enabled()) {
310
- gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
309
+ gpr_log(GPR_INFO, "num timer threads: %d", g_thread_count);
311
310
  }
312
311
  while (g_thread_count > 0) {
313
312
  gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
314
313
  if (grpc_timer_check_trace.enabled()) {
315
- gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
314
+ gpr_log(GPR_INFO, "num timer threads: %d", g_thread_count);
316
315
  }
317
316
  gc_completed_threads();
318
317
  }
@@ -52,6 +52,9 @@ static void timer_start(grpc_custom_timer* t) {
52
52
  uv_timer->data = t;
53
53
  t->timer = (void*)uv_timer;
54
54
  uv_timer_start(uv_timer, run_expired_timer, t->timeout_ms, 0);
55
+ // Node uses a garbage collector to call destructors, so we don't
56
+ // want to hold the uv loop open with active gRPC objects.
57
+ uv_unref((uv_handle_t*)uv_timer);
55
58
  }
56
59
 
57
60
  static void timer_stop(grpc_custom_timer* t) {
@@ -191,6 +191,9 @@ struct grpc_udp_server {
191
191
  size_t pollset_count;
192
192
  /* opaque object to pass to callbacks */
193
193
  void* user_data;
194
+
195
+ /* latch has_so_reuseport during server creation */
196
+ bool so_reuseport;
194
197
  };
195
198
 
196
199
  static grpc_socket_factory* get_socket_factory(const grpc_channel_args* args) {
@@ -214,6 +217,7 @@ grpc_udp_server* grpc_udp_server_create(const grpc_channel_args* args) {
214
217
  s->active_ports = 0;
215
218
  s->destroyed_ports = 0;
216
219
  s->shutdown = 0;
220
+ s->so_reuseport = grpc_is_socket_reuse_port_supported();
217
221
  return s;
218
222
  }
219
223
 
@@ -353,7 +357,7 @@ static int bind_socket(grpc_socket_factory* socket_factory, int sockfd,
353
357
  /* Prepare a recently-created socket for listening. */
354
358
  static int prepare_socket(grpc_socket_factory* socket_factory, int fd,
355
359
  const grpc_resolved_address* addr, int rcv_buf_size,
356
- int snd_buf_size) {
360
+ int snd_buf_size, bool so_reuseport) {
357
361
  grpc_resolved_address sockname_temp;
358
362
  grpc_sockaddr* addr_ptr =
359
363
  reinterpret_cast<grpc_sockaddr*>(const_cast<char*>(addr->addr));
@@ -381,21 +385,6 @@ static int prepare_socket(grpc_socket_factory* socket_factory, int fd,
381
385
  }
382
386
  }
383
387
 
384
- if (bind_socket(socket_factory, fd, addr) < 0) {
385
- char* addr_str;
386
- grpc_sockaddr_to_string(&addr_str, addr, 0);
387
- gpr_log(GPR_ERROR, "bind addr=%s: %s", addr_str, strerror(errno));
388
- gpr_free(addr_str);
389
- goto error;
390
- }
391
-
392
- sockname_temp.len = static_cast<socklen_t>(sizeof(struct sockaddr_storage));
393
-
394
- if (getsockname(fd, reinterpret_cast<grpc_sockaddr*>(sockname_temp.addr),
395
- &sockname_temp.len) < 0) {
396
- goto error;
397
- }
398
-
399
388
  if (grpc_set_socket_sndbuf(fd, snd_buf_size) != GRPC_ERROR_NONE) {
400
389
  gpr_log(GPR_ERROR, "Failed to set send buffer size to %d bytes",
401
390
  snd_buf_size);
@@ -415,6 +404,30 @@ static int prepare_socket(grpc_socket_factory* socket_factory, int fd,
415
404
  gpr_log(GPR_INFO, "Failed to set socket overflow support");
416
405
  }
417
406
  }
407
+
408
+ if (so_reuseport && !grpc_is_unix_socket(addr) &&
409
+ grpc_set_socket_reuse_port(fd, 1) != GRPC_ERROR_NONE) {
410
+ gpr_log(GPR_ERROR, "Failed to set SO_REUSEPORT for fd %d", fd);
411
+ goto error;
412
+ }
413
+
414
+ if (bind_socket(socket_factory, fd, addr) < 0) {
415
+ char* addr_str;
416
+ grpc_sockaddr_to_string(&addr_str, addr, 0);
417
+ gpr_log(GPR_ERROR, "bind addr=%s: %s", addr_str, strerror(errno));
418
+ gpr_free(addr_str);
419
+ goto error;
420
+ }
421
+
422
+ sockname_temp.len = static_cast<socklen_t>(sizeof(struct sockaddr_storage));
423
+
424
+ if (getsockname(fd, reinterpret_cast<grpc_sockaddr*>(sockname_temp.addr),
425
+ &sockname_temp.len) < 0) {
426
+ gpr_log(GPR_ERROR, "Unable to get the address socket %d is bound to: %s",
427
+ fd, strerror(errno));
428
+ goto error;
429
+ }
430
+
418
431
  return grpc_sockaddr_get_port(&sockname_temp);
419
432
 
420
433
  error:
@@ -541,8 +554,8 @@ static int add_socket_to_server(grpc_udp_server* s, int fd,
541
554
  int rcv_buf_size, int snd_buf_size) {
542
555
  gpr_log(GPR_DEBUG, "add socket %d to server", fd);
543
556
 
544
- int port =
545
- prepare_socket(s->socket_factory, fd, addr, rcv_buf_size, snd_buf_size);
557
+ int port = prepare_socket(s->socket_factory, fd, addr, rcv_buf_size,
558
+ snd_buf_size, s->so_reuseport);
546
559
  if (port >= 0) {
547
560
  gpr_mu_lock(&s->mu);
548
561
  s->listeners.emplace_back(s, fd, addr);
@@ -557,7 +570,18 @@ static int add_socket_to_server(grpc_udp_server* s, int fd,
557
570
  int grpc_udp_server_add_port(grpc_udp_server* s,
558
571
  const grpc_resolved_address* addr,
559
572
  int rcv_buf_size, int snd_buf_size,
560
- GrpcUdpHandlerFactory* handler_factory) {
573
+ GrpcUdpHandlerFactory* handler_factory,
574
+ size_t num_listeners) {
575
+ if (num_listeners > 1 && !s->so_reuseport) {
576
+ gpr_log(GPR_ERROR,
577
+ "Try to have multiple listeners on same port, but SO_REUSEPORT is "
578
+ "not supported. Only create 1 listener.");
579
+ }
580
+ char* addr_str;
581
+ grpc_sockaddr_to_string(&addr_str, addr, 1);
582
+ gpr_log(GPR_DEBUG, "add address: %s to server", addr_str);
583
+ gpr_free(addr_str);
584
+
561
585
  int allocated_port1 = -1;
562
586
  int allocated_port2 = -1;
563
587
  int fd;
@@ -568,11 +592,12 @@ int grpc_udp_server_add_port(grpc_udp_server* s,
568
592
  grpc_resolved_address addr4_copy;
569
593
  grpc_resolved_address* allocated_addr = nullptr;
570
594
  grpc_resolved_address sockname_temp;
571
- int port;
595
+ int port = 0;
572
596
 
573
597
  /* Check if this is a wildcard port, and if so, try to keep the port the same
574
598
  as some previously created listener. */
575
599
  if (grpc_sockaddr_get_port(addr) == 0) {
600
+ /* Loop through existing listeners to find the port in use. */
576
601
  for (size_t i = 0; i < s->listeners.size(); ++i) {
577
602
  sockname_temp.len =
578
603
  static_cast<socklen_t>(sizeof(struct sockaddr_storage));
@@ -581,6 +606,7 @@ int grpc_udp_server_add_port(grpc_udp_server* s,
581
606
  &sockname_temp.len)) {
582
607
  port = grpc_sockaddr_get_port(&sockname_temp);
583
608
  if (port > 0) {
609
+ /* Found such a port, update |addr| to reflects this port. */
584
610
  allocated_addr = static_cast<grpc_resolved_address*>(
585
611
  gpr_malloc(sizeof(grpc_resolved_address)));
586
612
  memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
@@ -597,44 +623,73 @@ int grpc_udp_server_add_port(grpc_udp_server* s,
597
623
  }
598
624
 
599
625
  s->handler_factory = handler_factory;
600
- /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
601
- if (grpc_sockaddr_is_wildcard(addr, &port)) {
602
- grpc_sockaddr_make_wildcards(port, &wild4, &wild6);
626
+ for (size_t i = 0; i < num_listeners; ++i) {
627
+ /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
628
+ if (grpc_sockaddr_is_wildcard(addr, &port)) {
629
+ grpc_sockaddr_make_wildcards(port, &wild4, &wild6);
630
+
631
+ /* Try listening on IPv6 first. */
632
+ addr = &wild6;
633
+ // TODO(rjshade): Test and propagate the returned grpc_error*:
634
+ GRPC_ERROR_UNREF(grpc_create_dualstack_socket_using_factory(
635
+ s->socket_factory, addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd));
636
+ allocated_port1 =
637
+ add_socket_to_server(s, fd, addr, rcv_buf_size, snd_buf_size);
638
+ if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
639
+ if (port == 0) {
640
+ /* This is the first time to bind to |addr|. If its port is still
641
+ * wildcard port, update |addr| with the ephermeral port returned by
642
+ * kernel. Thus |addr| can have a specific port in following
643
+ * iterations. */
644
+ grpc_sockaddr_set_port(addr, allocated_port1);
645
+ port = allocated_port1;
646
+ } else if (allocated_port1 >= 0) {
647
+ /* The following sucessfully created socket should have same port as
648
+ * the first one. */
649
+ GPR_ASSERT(port == allocated_port1);
650
+ }
651
+ /* A dualstack socket is created, no need to create corresponding IPV4
652
+ * socket. */
653
+ continue;
654
+ }
655
+
656
+ /* If we didn't get a dualstack socket, also listen on 0.0.0.0. */
657
+ if (port == 0 && allocated_port1 > 0) {
658
+ /* |port| hasn't been assigned to an emphemeral port yet, |wild4| must
659
+ * have a wildcard port. Update it with the emphemeral port created
660
+ * during binding.*/
661
+ grpc_sockaddr_set_port(&wild4, allocated_port1);
662
+ port = allocated_port1;
663
+ }
664
+ /* |wild4| should have been updated with an emphemeral port by now. Use
665
+ * this IPV4 address to create a IPV4 socket. */
666
+ addr = &wild4;
667
+ }
603
668
 
604
- /* Try listening on IPv6 first. */
605
- addr = &wild6;
606
669
  // TODO(rjshade): Test and propagate the returned grpc_error*:
607
670
  GRPC_ERROR_UNREF(grpc_create_dualstack_socket_using_factory(
608
671
  s->socket_factory, addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd));
609
- allocated_port1 =
610
- add_socket_to_server(s, fd, addr, rcv_buf_size, snd_buf_size);
611
- if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
612
- goto done;
672
+ if (fd < 0) {
673
+ gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno));
613
674
  }
614
-
615
- /* If we didn't get a dualstack socket, also listen on 0.0.0.0. */
616
- if (port == 0 && allocated_port1 > 0) {
617
- grpc_sockaddr_set_port(&wild4, allocated_port1);
675
+ if (dsmode == GRPC_DSMODE_IPV4 &&
676
+ grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) {
677
+ addr = &addr4_copy;
678
+ }
679
+ allocated_port2 =
680
+ add_socket_to_server(s, fd, addr, rcv_buf_size, snd_buf_size);
681
+ if (port == 0) {
682
+ /* Update |addr| with the ephermeral port returned by kernel. So |addr|
683
+ * can have a specific port in following iterations. */
684
+ grpc_sockaddr_set_port(addr, allocated_port2);
685
+ port = allocated_port2;
686
+ } else if (allocated_port2 >= 0) {
687
+ GPR_ASSERT(port == allocated_port2);
618
688
  }
619
- addr = &wild4;
620
- }
621
-
622
- // TODO(rjshade): Test and propagate the returned grpc_error*:
623
- GRPC_ERROR_UNREF(grpc_create_dualstack_socket_using_factory(
624
- s->socket_factory, addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd));
625
- if (fd < 0) {
626
- gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno));
627
- }
628
- if (dsmode == GRPC_DSMODE_IPV4 &&
629
- grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) {
630
- addr = &addr4_copy;
631
689
  }
632
- allocated_port2 =
633
- add_socket_to_server(s, fd, addr, rcv_buf_size, snd_buf_size);
634
690
 
635
- done:
636
691
  gpr_free(allocated_addr);
637
- return allocated_port1 >= 0 ? allocated_port1 : allocated_port2;
692
+ return port;
638
693
  }
639
694
 
640
695
  int grpc_udp_server_get_fd(grpc_udp_server* s, unsigned port_index) {