grpc 1.11.1 → 1.12.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (95) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +225 -87
  3. data/etc/roots.pem +0 -33
  4. data/include/grpc/grpc_security.h +70 -0
  5. data/include/grpc/impl/codegen/port_platform.h +11 -0
  6. data/include/grpc/support/log.h +9 -1
  7. data/src/core/ext/filters/client_channel/client_channel.cc +305 -210
  8. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +1 -1
  9. data/src/core/ext/filters/client_channel/lb_policy.cc +2 -2
  10. data/src/core/ext/filters/client_channel/lb_policy.h +4 -0
  11. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +12 -9
  12. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +168 -197
  13. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +368 -373
  14. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +498 -98
  15. data/src/core/ext/filters/client_channel/method_params.h +4 -0
  16. data/src/core/ext/filters/client_channel/resolver.h +4 -0
  17. data/src/core/ext/filters/client_channel/retry_throttle.h +4 -0
  18. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +2 -2
  19. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +40 -15
  20. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +3 -3
  21. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +2 -2
  22. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +1 -1
  23. data/src/core/ext/transport/chttp2/transport/hpack_table.cc +2 -2
  24. data/src/core/ext/transport/chttp2/transport/stream_lists.cc +3 -3
  25. data/src/core/ext/transport/chttp2/transport/writing.cc +5 -5
  26. data/src/core/ext/transport/inproc/inproc_transport.cc +41 -43
  27. data/src/core/lib/channel/channel_args.cc +28 -0
  28. data/src/core/lib/channel/channel_args.h +4 -0
  29. data/src/core/lib/channel/handshaker.cc +47 -0
  30. data/src/core/lib/channel/handshaker.h +4 -0
  31. data/src/core/lib/debug/trace.cc +2 -1
  32. data/src/core/lib/debug/trace.h +10 -1
  33. data/src/core/lib/gpr/log.cc +8 -2
  34. data/src/core/lib/gpr/log_android.cc +4 -0
  35. data/src/core/lib/gpr/log_linux.cc +4 -0
  36. data/src/core/lib/gpr/log_posix.cc +4 -0
  37. data/src/core/lib/gpr/log_windows.cc +5 -0
  38. data/src/core/lib/gprpp/inlined_vector.h +30 -34
  39. data/src/core/lib/gprpp/orphanable.h +4 -4
  40. data/src/core/lib/gprpp/ref_counted.h +4 -4
  41. data/src/core/lib/iomgr/call_combiner.cc +13 -13
  42. data/src/core/lib/iomgr/closure.h +3 -3
  43. data/src/core/lib/iomgr/combiner.cc +11 -11
  44. data/src/core/lib/iomgr/ev_epoll1_linux.cc +24 -24
  45. data/src/core/lib/iomgr/ev_epollex_linux.cc +48 -29
  46. data/src/core/lib/iomgr/ev_epollsig_linux.cc +2 -2
  47. data/src/core/lib/iomgr/ev_poll_posix.cc +9 -3
  48. data/src/core/lib/iomgr/ev_posix.cc +3 -3
  49. data/src/core/lib/iomgr/executor.cc +6 -6
  50. data/src/core/lib/iomgr/resource_quota.cc +10 -11
  51. data/src/core/lib/iomgr/socket_utils_common_posix.cc +24 -0
  52. data/src/core/lib/iomgr/socket_utils_linux.cc +0 -1
  53. data/src/core/lib/iomgr/socket_utils_posix.cc +2 -3
  54. data/src/core/lib/iomgr/socket_utils_posix.h +3 -0
  55. data/src/core/lib/iomgr/tcp_client_custom.cc +2 -2
  56. data/src/core/lib/iomgr/tcp_client_posix.cc +4 -4
  57. data/src/core/lib/iomgr/tcp_custom.cc +10 -10
  58. data/src/core/lib/iomgr/tcp_posix.cc +25 -25
  59. data/src/core/lib/iomgr/tcp_server_custom.cc +5 -5
  60. data/src/core/lib/iomgr/tcp_server_posix.cc +4 -25
  61. data/src/core/lib/iomgr/tcp_server_windows.cc +1 -0
  62. data/src/core/lib/iomgr/tcp_uv.cc +3 -0
  63. data/src/core/lib/iomgr/tcp_windows.cc +16 -0
  64. data/src/core/lib/iomgr/timer_generic.cc +27 -17
  65. data/src/core/lib/iomgr/timer_manager.cc +11 -12
  66. data/src/core/lib/iomgr/timer_uv.cc +3 -0
  67. data/src/core/lib/iomgr/udp_server.cc +104 -49
  68. data/src/core/lib/iomgr/udp_server.h +8 -4
  69. data/src/core/lib/profiling/basic_timers.cc +1 -0
  70. data/src/core/lib/security/credentials/alts/alts_credentials.h +0 -20
  71. data/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc +7 -7
  72. data/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h +1 -38
  73. data/src/core/lib/security/security_connector/security_connector.cc +19 -16
  74. data/src/core/lib/security/security_connector/security_connector.h +4 -3
  75. data/src/core/lib/security/transport/secure_endpoint.cc +2 -2
  76. data/src/core/lib/security/transport/security_handshaker.cc +6 -2
  77. data/src/core/lib/slice/slice.cc +6 -2
  78. data/src/core/lib/slice/slice_buffer.cc +12 -4
  79. data/src/core/lib/slice/slice_hash_table.h +4 -0
  80. data/src/core/lib/slice/slice_weak_hash_table.h +4 -0
  81. data/src/core/lib/surface/call.cc +6 -6
  82. data/src/core/lib/surface/server.cc +16 -0
  83. data/src/core/lib/surface/version.cc +1 -1
  84. data/src/core/lib/transport/bdp_estimator.cc +3 -3
  85. data/src/core/lib/transport/bdp_estimator.h +2 -2
  86. data/src/core/lib/transport/connectivity_state.cc +6 -7
  87. data/src/core/tsi/ssl/session_cache/ssl_session_cache.h +4 -0
  88. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +14 -0
  89. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +21 -0
  90. data/src/ruby/lib/grpc/version.rb +1 -1
  91. data/src/ruby/pb/generate_proto_ruby.sh +7 -1
  92. data/src/ruby/spec/pb/package_with_underscore/checker_spec.rb +2 -5
  93. data/third_party/address_sorting/address_sorting.c +10 -9
  94. metadata +27 -28
  95. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc +0 -253
@@ -60,6 +60,10 @@ class ClientChannelMethodParams : public RefCounted<ClientChannelMethodParams> {
60
60
  template <typename T, typename... Args>
61
61
  friend T* grpc_core::New(Args&&... args);
62
62
 
63
+ // So Delete() can call our private dtor.
64
+ template <typename T>
65
+ friend void grpc_core::Delete(T*);
66
+
63
67
  ClientChannelMethodParams() {}
64
68
  virtual ~ClientChannelMethodParams() {}
65
69
 
@@ -105,6 +105,10 @@ class Resolver : public InternallyRefCountedWithTracing<Resolver> {
105
105
  GRPC_ABSTRACT_BASE_CLASS
106
106
 
107
107
  protected:
108
+ // So Delete() can access our protected dtor.
109
+ template <typename T>
110
+ friend void Delete(T*);
111
+
108
112
  /// Does NOT take ownership of the reference to \a combiner.
109
113
  // TODO(roth): Once we have a C++-like interface for combiners, this
110
114
  // API should change to take a RefCountedPtr<>, so that we always take
@@ -42,6 +42,10 @@ class ServerRetryThrottleData : public RefCounted<ServerRetryThrottleData> {
42
42
  intptr_t milli_token_ratio() const { return milli_token_ratio_; }
43
43
 
44
44
  private:
45
+ // So Delete() can call our private dtor.
46
+ template <typename T>
47
+ friend void grpc_core::Delete(T*);
48
+
45
49
  ~ServerRetryThrottleData();
46
50
 
47
51
  void GetReplacementThrottleDataIfNeeded(
@@ -234,7 +234,7 @@ static void finish_send_message(grpc_call_element* elem) {
234
234
  static_cast<float>(before_size);
235
235
  GPR_ASSERT(grpc_message_compression_algorithm_name(
236
236
  calld->message_compression_algorithm, &algo_name));
237
- gpr_log(GPR_DEBUG,
237
+ gpr_log(GPR_INFO,
238
238
  "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
239
239
  " bytes (%.2f%% savings)",
240
240
  algo_name, before_size, after_size, 100 * savings_ratio);
@@ -246,7 +246,7 @@ static void finish_send_message(grpc_call_element* elem) {
246
246
  const char* algo_name;
247
247
  GPR_ASSERT(grpc_message_compression_algorithm_name(
248
248
  calld->message_compression_algorithm, &algo_name));
249
- gpr_log(GPR_DEBUG,
249
+ gpr_log(GPR_INFO,
250
250
  "Algorithm '%s' enabled but decided not to compress. Input size: "
251
251
  "%" PRIuPTR,
252
252
  algo_name, calld->slices.length);
@@ -807,7 +807,7 @@ static const char* write_state_name(grpc_chttp2_write_state st) {
807
807
 
808
808
  static void set_write_state(grpc_chttp2_transport* t,
809
809
  grpc_chttp2_write_state st, const char* reason) {
810
- GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_DEBUG, "W:%p %s state %s -> %s [%s]", t,
810
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "W:%p %s state %s -> %s [%s]", t,
811
811
  t->is_client ? "CLIENT" : "SERVER",
812
812
  write_state_name(t->write_state),
813
813
  write_state_name(st), reason));
@@ -1072,7 +1072,7 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
1072
1072
  uint32_t goaway_error,
1073
1073
  grpc_slice goaway_text) {
1074
1074
  // GRPC_CHTTP2_IF_TRACING(
1075
- // gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
1075
+ // gpr_log(GPR_INFO, "got goaway [%d]: %s", goaway_error, msg));
1076
1076
 
1077
1077
  // Discard the error from a previous goaway frame (if any)
1078
1078
  if (t->goaway_error != GRPC_ERROR_NONE) {
@@ -1118,7 +1118,7 @@ static void maybe_start_some_streams(grpc_chttp2_transport* t) {
1118
1118
  grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
1119
1119
  /* safe since we can't (legally) be parsing this stream yet */
1120
1120
  GRPC_CHTTP2_IF_TRACING(gpr_log(
1121
- GPR_DEBUG, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d",
1121
+ GPR_INFO, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d",
1122
1122
  t->is_client ? "CLI" : "SVR", s, t->next_stream_id));
1123
1123
 
1124
1124
  GPR_ASSERT(s->id == 0);
@@ -1183,7 +1183,7 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
1183
1183
  if (grpc_http_trace.enabled()) {
1184
1184
  const char* errstr = grpc_error_string(error);
1185
1185
  gpr_log(
1186
- GPR_DEBUG,
1186
+ GPR_INFO,
1187
1187
  "complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
1188
1188
  "write_state=%s",
1189
1189
  t, closure,
@@ -1336,7 +1336,7 @@ static void perform_stream_op_locked(void* stream_op,
1336
1336
 
1337
1337
  if (grpc_http_trace.enabled()) {
1338
1338
  char* str = grpc_transport_stream_op_batch_string(op);
1339
- gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str,
1339
+ gpr_log(GPR_INFO, "perform_stream_op_locked: %s; on_complete = %p", str,
1340
1340
  op->on_complete);
1341
1341
  gpr_free(str);
1342
1342
  if (op->send_initial_metadata) {
@@ -1638,7 +1638,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
1638
1638
 
1639
1639
  if (grpc_http_trace.enabled()) {
1640
1640
  char* str = grpc_transport_stream_op_batch_string(op);
1641
- gpr_log(GPR_DEBUG, "perform_stream_op[s=%p]: %s", s, str);
1641
+ gpr_log(GPR_INFO, "perform_stream_op[s=%p]: %s", s, str);
1642
1642
  gpr_free(str);
1643
1643
  }
1644
1644
 
@@ -1676,6 +1676,33 @@ static void send_ping_locked(grpc_chttp2_transport* t,
1676
1676
  GRPC_ERROR_NONE);
1677
1677
  }
1678
1678
 
1679
+ /*
1680
+ * Specialized form of send_ping_locked for keepalive ping. If there is already
1681
+ * a ping in progress, the keepalive ping would piggyback onto that ping,
1682
+ * instead of waiting for that ping to complete and then starting a new ping.
1683
+ */
1684
+ static void send_keepalive_ping_locked(grpc_chttp2_transport* t) {
1685
+ if (t->closed_with_error != GRPC_ERROR_NONE) {
1686
+ GRPC_CLOSURE_SCHED(&t->start_keepalive_ping_locked,
1687
+ GRPC_ERROR_REF(t->closed_with_error));
1688
+ GRPC_CLOSURE_SCHED(&t->finish_keepalive_ping_locked,
1689
+ GRPC_ERROR_REF(t->closed_with_error));
1690
+ return;
1691
+ }
1692
+ grpc_chttp2_ping_queue* pq = &t->ping_queue;
1693
+ if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) {
1694
+ /* There is a ping in flight. Add yourself to the inflight closure list. */
1695
+ GRPC_CLOSURE_SCHED(&t->start_keepalive_ping_locked, GRPC_ERROR_NONE);
1696
+ grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INFLIGHT],
1697
+ &t->finish_keepalive_ping_locked, GRPC_ERROR_NONE);
1698
+ return;
1699
+ }
1700
+ grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INITIATE],
1701
+ &t->start_keepalive_ping_locked, GRPC_ERROR_NONE);
1702
+ grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT],
1703
+ &t->finish_keepalive_ping_locked, GRPC_ERROR_NONE);
1704
+ }
1705
+
1679
1706
  static void retry_initiate_ping_locked(void* tp, grpc_error* error) {
1680
1707
  grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
1681
1708
  t->ping_state.is_delayed_ping_timer_set = false;
@@ -2502,7 +2529,7 @@ static void schedule_bdp_ping_locked(grpc_chttp2_transport* t) {
2502
2529
  static void start_bdp_ping_locked(void* tp, grpc_error* error) {
2503
2530
  grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
2504
2531
  if (grpc_http_trace.enabled()) {
2505
- gpr_log(GPR_DEBUG, "%s: Start BDP ping err=%s", t->peer_string,
2532
+ gpr_log(GPR_INFO, "%s: Start BDP ping err=%s", t->peer_string,
2506
2533
  grpc_error_string(error));
2507
2534
  }
2508
2535
  /* Reset the keepalive ping timer */
@@ -2515,7 +2542,7 @@ static void start_bdp_ping_locked(void* tp, grpc_error* error) {
2515
2542
  static void finish_bdp_ping_locked(void* tp, grpc_error* error) {
2516
2543
  grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
2517
2544
  if (grpc_http_trace.enabled()) {
2518
- gpr_log(GPR_DEBUG, "%s: Complete BDP ping err=%s", t->peer_string,
2545
+ gpr_log(GPR_INFO, "%s: Complete BDP ping err=%s", t->peer_string,
2519
2546
  grpc_error_string(error));
2520
2547
  }
2521
2548
  if (error != GRPC_ERROR_NONE) {
@@ -2619,8 +2646,7 @@ static void init_keepalive_ping_locked(void* arg, grpc_error* error) {
2619
2646
  grpc_chttp2_stream_map_size(&t->stream_map) > 0) {
2620
2647
  t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_PINGING;
2621
2648
  GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end");
2622
- send_ping_locked(t, &t->start_keepalive_ping_locked,
2623
- &t->finish_keepalive_ping_locked);
2649
+ send_keepalive_ping_locked(t);
2624
2650
  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);
2625
2651
  } else {
2626
2652
  GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
@@ -2690,8 +2716,7 @@ static void keepalive_watchdog_fired_locked(void* arg, grpc_error* error) {
2690
2716
  static void connectivity_state_set(grpc_chttp2_transport* t,
2691
2717
  grpc_connectivity_state state,
2692
2718
  grpc_error* error, const char* reason) {
2693
- GRPC_CHTTP2_IF_TRACING(
2694
- gpr_log(GPR_DEBUG, "set connectivity_state=%d", state));
2719
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "set connectivity_state=%d", state));
2695
2720
  grpc_connectivity_state_set(&t->channel_callback.state_tracker, state, error,
2696
2721
  reason);
2697
2722
  }
@@ -2958,7 +2983,7 @@ static void benign_reclaimer_locked(void* arg, grpc_error* error) {
2958
2983
  /* Channel with no active streams: send a goaway to try and make it
2959
2984
  * disconnect cleanly */
2960
2985
  if (grpc_resource_quota_trace.enabled()) {
2961
- gpr_log(GPR_DEBUG, "HTTP2: %s - send goaway to free memory",
2986
+ gpr_log(GPR_INFO, "HTTP2: %s - send goaway to free memory",
2962
2987
  t->peer_string);
2963
2988
  }
2964
2989
  send_goaway(t,
@@ -2966,7 +2991,7 @@ static void benign_reclaimer_locked(void* arg, grpc_error* error) {
2966
2991
  GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
2967
2992
  GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
2968
2993
  } else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace.enabled()) {
2969
- gpr_log(GPR_DEBUG,
2994
+ gpr_log(GPR_INFO,
2970
2995
  "HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
2971
2996
  " streams",
2972
2997
  t->peer_string, grpc_chttp2_stream_map_size(&t->stream_map));
@@ -2987,7 +3012,7 @@ static void destructive_reclaimer_locked(void* arg, grpc_error* error) {
2987
3012
  grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(
2988
3013
  grpc_chttp2_stream_map_rand(&t->stream_map));
2989
3014
  if (grpc_resource_quota_trace.enabled()) {
2990
- gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
3015
+ gpr_log(GPR_INFO, "HTTP2: %s - abandon stream id %d", t->peer_string,
2991
3016
  s->id);
2992
3017
  }
2993
3018
  grpc_chttp2_cancel_stream(
@@ -217,14 +217,14 @@ grpc_error* grpc_chttp2_settings_parser_parse(void* p, grpc_chttp2_transport* t,
217
217
  t->initial_window_update += static_cast<int64_t>(parser->value) -
218
218
  parser->incoming_settings[id];
219
219
  if (grpc_http_trace.enabled() || grpc_flowctl_trace.enabled()) {
220
- gpr_log(GPR_DEBUG, "%p[%s] adding %d for initial_window change",
221
- t, t->is_client ? "cli" : "svr",
220
+ gpr_log(GPR_INFO, "%p[%s] adding %d for initial_window change", t,
221
+ t->is_client ? "cli" : "svr",
222
222
  static_cast<int>(t->initial_window_update));
223
223
  }
224
224
  }
225
225
  parser->incoming_settings[id] = parser->value;
226
226
  if (grpc_http_trace.enabled()) {
227
- gpr_log(GPR_DEBUG, "CHTTP2:%s:%s: got setting %s = %d",
227
+ gpr_log(GPR_INFO, "CHTTP2:%s:%s: got setting %s = %d",
228
228
  t->is_client ? "CLI" : "SVR", t->peer_string, sp->name,
229
229
  parser->value);
230
230
  }
@@ -470,7 +470,7 @@ static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
470
470
  v = grpc_slice_to_c_string(GRPC_MDVALUE(elem));
471
471
  }
472
472
  gpr_log(
473
- GPR_DEBUG,
473
+ GPR_INFO,
474
474
  "Encode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
475
475
  k, v, GRPC_MDELEM_IS_INTERNED(elem), GRPC_MDELEM_STORAGE(elem),
476
476
  grpc_slice_is_interned(GRPC_MDKEY(elem)),
@@ -654,7 +654,7 @@ void grpc_chttp2_hpack_compressor_set_max_table_size(
654
654
  }
655
655
  c->advertise_table_size_change = 1;
656
656
  if (grpc_http_trace.enabled()) {
657
- gpr_log(GPR_DEBUG, "set max table size from encoder to %d", max_table_size);
657
+ gpr_log(GPR_INFO, "set max table size from encoder to %d", max_table_size);
658
658
  }
659
659
  }
660
660
 
@@ -633,7 +633,7 @@ static grpc_error* on_hdr(grpc_chttp2_hpack_parser* p, grpc_mdelem md,
633
633
  v = grpc_slice_to_c_string(GRPC_MDVALUE(md));
634
634
  }
635
635
  gpr_log(
636
- GPR_DEBUG,
636
+ GPR_INFO,
637
637
  "Decode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
638
638
  k, v, GRPC_MDELEM_IS_INTERNED(md), GRPC_MDELEM_STORAGE(md),
639
639
  grpc_slice_is_interned(GRPC_MDKEY(md)),
@@ -247,7 +247,7 @@ void grpc_chttp2_hptbl_set_max_bytes(grpc_chttp2_hptbl* tbl,
247
247
  return;
248
248
  }
249
249
  if (grpc_http_trace.enabled()) {
250
- gpr_log(GPR_DEBUG, "Update hpack parser max size to %d", max_bytes);
250
+ gpr_log(GPR_INFO, "Update hpack parser max size to %d", max_bytes);
251
251
  }
252
252
  while (tbl->mem_used > max_bytes) {
253
253
  evict1(tbl);
@@ -270,7 +270,7 @@ grpc_error* grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl* tbl,
270
270
  return err;
271
271
  }
272
272
  if (grpc_http_trace.enabled()) {
273
- gpr_log(GPR_DEBUG, "Update hpack parser table size to %d", bytes);
273
+ gpr_log(GPR_INFO, "Update hpack parser table size to %d", bytes);
274
274
  }
275
275
  while (tbl->mem_used > bytes) {
276
276
  evict1(tbl);
@@ -68,7 +68,7 @@ static bool stream_list_pop(grpc_chttp2_transport* t,
68
68
  }
69
69
  *stream = s;
70
70
  if (s && grpc_trace_http2_stream_state.enabled()) {
71
- gpr_log(GPR_DEBUG, "%p[%d][%s]: pop from %s", t, s->id,
71
+ gpr_log(GPR_INFO, "%p[%d][%s]: pop from %s", t, s->id,
72
72
  t->is_client ? "cli" : "svr", stream_list_id_string(id));
73
73
  }
74
74
  return s != nullptr;
@@ -90,7 +90,7 @@ static void stream_list_remove(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
90
90
  t->lists[id].tail = s->links[id].prev;
91
91
  }
92
92
  if (grpc_trace_http2_stream_state.enabled()) {
93
- gpr_log(GPR_DEBUG, "%p[%d][%s]: remove from %s", t, s->id,
93
+ gpr_log(GPR_INFO, "%p[%d][%s]: remove from %s", t, s->id,
94
94
  t->is_client ? "cli" : "svr", stream_list_id_string(id));
95
95
  }
96
96
  }
@@ -122,7 +122,7 @@ static void stream_list_add_tail(grpc_chttp2_transport* t,
122
122
  t->lists[id].tail = s;
123
123
  s->included[id] = 1;
124
124
  if (grpc_trace_http2_stream_state.enabled()) {
125
- gpr_log(GPR_DEBUG, "%p[%d][%s]: add to %s", t, s->id,
125
+ gpr_log(GPR_INFO, "%p[%d][%s]: add to %s", t, s->id,
126
126
  t->is_client ? "cli" : "svr", stream_list_id_string(id));
127
127
  }
128
128
  }
@@ -52,7 +52,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
52
52
  if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) {
53
53
  /* ping already in-flight: wait */
54
54
  if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
55
- gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: already pinging",
55
+ gpr_log(GPR_INFO, "%s: Ping delayed [%p]: already pinging",
56
56
  t->is_client ? "CLIENT" : "SERVER", t->peer_string);
57
57
  }
58
58
  return;
@@ -61,7 +61,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
61
61
  t->ping_policy.max_pings_without_data != 0) {
62
62
  /* need to receive something of substance before sending a ping again */
63
63
  if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
64
- gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: too many recent pings: %d/%d",
64
+ gpr_log(GPR_INFO, "%s: Ping delayed [%p]: too many recent pings: %d/%d",
65
65
  t->is_client ? "CLIENT" : "SERVER", t->peer_string,
66
66
  t->ping_state.pings_before_data_required,
67
67
  t->ping_policy.max_pings_without_data);
@@ -81,7 +81,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
81
81
  if (next_allowed_ping > now) {
82
82
  /* not enough elapsed time between successive pings */
83
83
  if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
84
- gpr_log(GPR_DEBUG,
84
+ gpr_log(GPR_INFO,
85
85
  "%s: Ping delayed [%p]: not enough time elapsed since last ping. "
86
86
  " Last ping %f: Next ping %f: Now %f",
87
87
  t->is_client ? "CLIENT" : "SERVER", t->peer_string,
@@ -107,7 +107,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
107
107
  GRPC_STATS_INC_HTTP2_PINGS_SENT();
108
108
  t->ping_state.last_ping_sent_time = now;
109
109
  if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
110
- gpr_log(GPR_DEBUG, "%s: Ping sent [%p]: %d/%d",
110
+ gpr_log(GPR_INFO, "%s: Ping sent [%p]: %d/%d",
111
111
  t->is_client ? "CLIENT" : "SERVER", t->peer_string,
112
112
  t->ping_state.pings_before_data_required,
113
113
  t->ping_policy.max_pings_without_data);
@@ -401,7 +401,7 @@ class StreamWriteContext {
401
401
  StreamWriteContext(WriteContext* write_context, grpc_chttp2_stream* s)
402
402
  : write_context_(write_context), t_(write_context->transport()), s_(s) {
403
403
  GRPC_CHTTP2_IF_TRACING(
404
- gpr_log(GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_,
404
+ gpr_log(GPR_INFO, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_,
405
405
  t_->is_client ? "CLIENT" : "SERVER", s->id,
406
406
  s->sent_initial_metadata, s->send_initial_metadata != nullptr,
407
407
  (int)(s->flow_control->local_window_delta() -
@@ -125,12 +125,12 @@ static bool cancel_stream_locked(inproc_stream* s, grpc_error* error);
125
125
  static void op_state_machine(void* arg, grpc_error* error);
126
126
 
127
127
  static void ref_transport(inproc_transport* t) {
128
- INPROC_LOG(GPR_DEBUG, "ref_transport %p", t);
128
+ INPROC_LOG(GPR_INFO, "ref_transport %p", t);
129
129
  gpr_ref(&t->refs);
130
130
  }
131
131
 
132
132
  static void really_destroy_transport(inproc_transport* t) {
133
- INPROC_LOG(GPR_DEBUG, "really_destroy_transport %p", t);
133
+ INPROC_LOG(GPR_INFO, "really_destroy_transport %p", t);
134
134
  grpc_connectivity_state_destroy(&t->connectivity);
135
135
  if (gpr_unref(&t->mu->refs)) {
136
136
  gpr_free(t->mu);
@@ -139,7 +139,7 @@ static void really_destroy_transport(inproc_transport* t) {
139
139
  }
140
140
 
141
141
  static void unref_transport(inproc_transport* t) {
142
- INPROC_LOG(GPR_DEBUG, "unref_transport %p", t);
142
+ INPROC_LOG(GPR_INFO, "unref_transport %p", t);
143
143
  if (gpr_unref(&t->refs)) {
144
144
  really_destroy_transport(t);
145
145
  }
@@ -154,17 +154,17 @@ static void unref_transport(inproc_transport* t) {
154
154
  #endif
155
155
 
156
156
  static void ref_stream(inproc_stream* s, const char* reason) {
157
- INPROC_LOG(GPR_DEBUG, "ref_stream %p %s", s, reason);
157
+ INPROC_LOG(GPR_INFO, "ref_stream %p %s", s, reason);
158
158
  STREAM_REF(s->refs, reason);
159
159
  }
160
160
 
161
161
  static void unref_stream(inproc_stream* s, const char* reason) {
162
- INPROC_LOG(GPR_DEBUG, "unref_stream %p %s", s, reason);
162
+ INPROC_LOG(GPR_INFO, "unref_stream %p %s", s, reason);
163
163
  STREAM_UNREF(s->refs, reason);
164
164
  }
165
165
 
166
166
  static void really_destroy_stream(inproc_stream* s) {
167
- INPROC_LOG(GPR_DEBUG, "really_destroy_stream %p", s);
167
+ INPROC_LOG(GPR_INFO, "really_destroy_stream %p", s);
168
168
 
169
169
  GRPC_ERROR_UNREF(s->write_buffer_cancel_error);
170
170
  GRPC_ERROR_UNREF(s->cancel_self_error);
@@ -225,7 +225,7 @@ static grpc_error* fill_in_metadata(inproc_stream* s,
225
225
  static int init_stream(grpc_transport* gt, grpc_stream* gs,
226
226
  grpc_stream_refcount* refcount, const void* server_data,
227
227
  gpr_arena* arena) {
228
- INPROC_LOG(GPR_DEBUG, "init_stream %p %p %p", gt, gs, server_data);
228
+ INPROC_LOG(GPR_INFO, "init_stream %p %p %p", gt, gs, server_data);
229
229
  inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
230
230
  inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
231
231
  s->arena = arena;
@@ -282,8 +282,8 @@ static int init_stream(grpc_transport* gt, grpc_stream* gs,
282
282
  // Pass the client-side stream address to the server-side for a ref
283
283
  ref_stream(s, "inproc_init_stream:clt"); // ref it now on behalf of server
284
284
  // side to avoid destruction
285
- INPROC_LOG(GPR_DEBUG, "calling accept stream cb %p %p",
286
- st->accept_stream_cb, st->accept_stream_data);
285
+ INPROC_LOG(GPR_INFO, "calling accept stream cb %p %p", st->accept_stream_cb,
286
+ st->accept_stream_data);
287
287
  (*st->accept_stream_cb)(st->accept_stream_data, &st->base, (void*)s);
288
288
  } else {
289
289
  // This is the server-side and is being called through accept_stream_cb
@@ -378,7 +378,7 @@ static void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error,
378
378
  int is_rtm = static_cast<int>(op == s->recv_trailing_md_op);
379
379
 
380
380
  if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) {
381
- INPROC_LOG(GPR_DEBUG, "%s %p %p %p", msg, s, op, error);
381
+ INPROC_LOG(GPR_INFO, "%s %p %p %p", msg, s, op, error);
382
382
  GRPC_CLOSURE_SCHED(op->on_complete, GRPC_ERROR_REF(error));
383
383
  }
384
384
  }
@@ -393,7 +393,7 @@ static void maybe_schedule_op_closure_locked(inproc_stream* s,
393
393
  }
394
394
 
395
395
  static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
396
- INPROC_LOG(GPR_DEBUG, "op_state_machine %p fail_helper", s);
396
+ INPROC_LOG(GPR_INFO, "op_state_machine %p fail_helper", s);
397
397
  // If we're failing this side, we need to make sure that
398
398
  // we also send or have already sent trailing metadata
399
399
  if (!s->trailing_md_sent) {
@@ -458,7 +458,7 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
458
458
  *s->recv_initial_md_op->payload->recv_initial_metadata
459
459
  .trailing_metadata_available = true;
460
460
  }
461
- INPROC_LOG(GPR_DEBUG,
461
+ INPROC_LOG(GPR_INFO,
462
462
  "fail_helper %p scheduling initial-metadata-ready %p %p", s,
463
463
  error, err);
464
464
  GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata
@@ -472,7 +472,7 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
472
472
  s->recv_initial_md_op = nullptr;
473
473
  }
474
474
  if (s->recv_message_op) {
475
- INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-ready %p", s,
475
+ INPROC_LOG(GPR_INFO, "fail_helper %p scheduling message-ready %p", s,
476
476
  error);
477
477
  GRPC_CLOSURE_SCHED(
478
478
  s->recv_message_op->payload->recv_message.recv_message_ready,
@@ -496,9 +496,8 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
496
496
  s->send_trailing_md_op = nullptr;
497
497
  }
498
498
  if (s->recv_trailing_md_op) {
499
- INPROC_LOG(GPR_DEBUG,
500
- "fail_helper %p scheduling trailing-md-on-complete %p", s,
501
- error);
499
+ INPROC_LOG(GPR_INFO, "fail_helper %p scheduling trailing-md-on-complete %p",
500
+ s, error);
502
501
  complete_if_batch_end_locked(
503
502
  s, error, s->recv_trailing_md_op,
504
503
  "fail_helper scheduling recv-trailing-metadata-on-complete");
@@ -549,7 +548,7 @@ static void message_transfer_locked(inproc_stream* sender,
549
548
  receiver->recv_stream.Init(&receiver->recv_message, 0);
550
549
  receiver->recv_message_op->payload->recv_message.recv_message->reset(
551
550
  receiver->recv_stream.get());
552
- INPROC_LOG(GPR_DEBUG, "message_transfer_locked %p scheduling message-ready",
551
+ INPROC_LOG(GPR_INFO, "message_transfer_locked %p scheduling message-ready",
553
552
  receiver);
554
553
  GRPC_CLOSURE_SCHED(
555
554
  receiver->recv_message_op->payload->recv_message.recv_message_ready,
@@ -577,7 +576,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
577
576
 
578
577
  bool needs_close = false;
579
578
 
580
- INPROC_LOG(GPR_DEBUG, "op_state_machine %p", arg);
579
+ INPROC_LOG(GPR_INFO, "op_state_machine %p", arg);
581
580
  inproc_stream* s = static_cast<inproc_stream*>(arg);
582
581
  gpr_mu* mu = &s->t->mu->mu; // keep aside in case s gets closed
583
582
  gpr_mu_lock(mu);
@@ -626,7 +625,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
626
625
  : &other->to_read_trailing_md_filled;
627
626
  if (*destfilled || s->trailing_md_sent) {
628
627
  // The buffer is already in use; that's an error!
629
- INPROC_LOG(GPR_DEBUG, "Extra trailing metadata %p", s);
628
+ INPROC_LOG(GPR_INFO, "Extra trailing metadata %p", s);
630
629
  new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata");
631
630
  fail_helper_locked(s, GRPC_ERROR_REF(new_err));
632
631
  goto done;
@@ -639,7 +638,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
639
638
  }
640
639
  s->trailing_md_sent = true;
641
640
  if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
642
- INPROC_LOG(GPR_DEBUG,
641
+ INPROC_LOG(GPR_INFO,
643
642
  "op_state_machine %p scheduling trailing-md-on-complete", s);
644
643
  GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete,
645
644
  GRPC_ERROR_NONE);
@@ -658,7 +657,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
658
657
  new_err =
659
658
  GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd initial md");
660
659
  INPROC_LOG(
661
- GPR_DEBUG,
660
+ GPR_INFO,
662
661
  "op_state_machine %p scheduling on_complete errors for already "
663
662
  "recvd initial md %p",
664
663
  s, new_err);
@@ -684,7 +683,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
684
683
  }
685
684
  grpc_metadata_batch_clear(&s->to_read_initial_md);
686
685
  s->to_read_initial_md_filled = false;
687
- INPROC_LOG(GPR_DEBUG,
686
+ INPROC_LOG(GPR_INFO,
688
687
  "op_state_machine %p scheduling initial-metadata-ready %p", s,
689
688
  new_err);
690
689
  GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata
@@ -696,7 +695,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
696
695
  s->recv_initial_md_op = nullptr;
697
696
 
698
697
  if (new_err != GRPC_ERROR_NONE) {
699
- INPROC_LOG(GPR_DEBUG,
698
+ INPROC_LOG(GPR_INFO,
700
699
  "op_state_machine %p scheduling on_complete errors2 %p", s,
701
700
  new_err);
702
701
  fail_helper_locked(s, GRPC_ERROR_REF(new_err));
@@ -719,7 +718,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
719
718
  new_err =
720
719
  GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd trailing md");
721
720
  INPROC_LOG(
722
- GPR_DEBUG,
721
+ GPR_INFO,
723
722
  "op_state_machine %p scheduling on_complete errors for already "
724
723
  "recvd trailing md %p",
725
724
  s, new_err);
@@ -729,7 +728,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
729
728
  if (s->recv_message_op != nullptr) {
730
729
  // This message needs to be wrapped up because it will never be
731
730
  // satisfied
732
- INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s);
731
+ INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s);
733
732
  GRPC_CLOSURE_SCHED(
734
733
  s->recv_message_op->payload->recv_message.recv_message_ready,
735
734
  GRPC_ERROR_NONE);
@@ -764,7 +763,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
764
763
  // (If the server hasn't already sent its trailing md, it doesn't have
765
764
  // a final status, so don't mark this op complete)
766
765
  if (s->t->is_client || s->trailing_md_sent) {
767
- INPROC_LOG(GPR_DEBUG,
766
+ INPROC_LOG(GPR_INFO,
768
767
  "op_state_machine %p scheduling trailing-md-on-complete %p",
769
768
  s, new_err);
770
769
  GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete,
@@ -772,21 +771,21 @@ static void op_state_machine(void* arg, grpc_error* error) {
772
771
  s->recv_trailing_md_op = nullptr;
773
772
  needs_close = true;
774
773
  } else {
775
- INPROC_LOG(GPR_DEBUG,
774
+ INPROC_LOG(GPR_INFO,
776
775
  "op_state_machine %p server needs to delay handling "
777
776
  "trailing-md-on-complete %p",
778
777
  s, new_err);
779
778
  }
780
779
  } else {
781
780
  INPROC_LOG(
782
- GPR_DEBUG,
781
+ GPR_INFO,
783
782
  "op_state_machine %p has trailing md but not yet waiting for it", s);
784
783
  }
785
784
  }
786
785
  if (s->trailing_md_recvd && s->recv_message_op) {
787
786
  // No further message will come on this stream, so finish off the
788
787
  // recv_message_op
789
- INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s);
788
+ INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s);
790
789
  GRPC_CLOSURE_SCHED(
791
790
  s->recv_message_op->payload->recv_message.recv_message_ready,
792
791
  GRPC_ERROR_NONE);
@@ -810,7 +809,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
810
809
  // Didn't get the item we wanted so we still need to get
811
810
  // rescheduled
812
811
  INPROC_LOG(
813
- GPR_DEBUG, "op_state_machine %p still needs closure %p %p %p %p %p", s,
812
+ GPR_INFO, "op_state_machine %p still needs closure %p %p %p %p %p", s,
814
813
  s->send_message_op, s->send_trailing_md_op, s->recv_initial_md_op,
815
814
  s->recv_message_op, s->recv_trailing_md_op);
816
815
  s->ops_needed = true;
@@ -826,8 +825,7 @@ done:
826
825
 
827
826
  static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
828
827
  bool ret = false; // was the cancel accepted
829
- INPROC_LOG(GPR_DEBUG, "cancel_stream %p with %s", s,
830
- grpc_error_string(error));
828
+ INPROC_LOG(GPR_INFO, "cancel_stream %p with %s", s, grpc_error_string(error));
831
829
  if (s->cancel_self_error == GRPC_ERROR_NONE) {
832
830
  ret = true;
833
831
  s->cancel_self_error = GRPC_ERROR_REF(error);
@@ -877,7 +875,7 @@ static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
877
875
 
878
876
  static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
879
877
  grpc_transport_stream_op_batch* op) {
880
- INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %p %p", gt, gs, op);
878
+ INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", gt, gs, op);
881
879
  inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
882
880
  gpr_mu* mu = &s->t->mu->mu; // save aside in case s gets closed
883
881
  gpr_mu_lock(mu);
@@ -907,7 +905,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
907
905
  // already self-canceled so still give it an error
908
906
  error = GRPC_ERROR_REF(s->cancel_self_error);
909
907
  } else {
910
- INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %s%s%s%s%s%s%s", s,
908
+ INPROC_LOG(GPR_INFO, "perform_stream_op %p %s%s%s%s%s%s%s", s,
911
909
  s->t->is_client ? "client" : "server",
912
910
  op->send_initial_metadata ? " send_initial_metadata" : "",
913
911
  op->send_message ? " send_message" : "",
@@ -936,7 +934,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
936
934
  : &other->to_read_initial_md_filled;
937
935
  if (*destfilled || s->initial_md_sent) {
938
936
  // The buffer is already in use; that's an error!
939
- INPROC_LOG(GPR_DEBUG, "Extra initial metadata %p", s);
937
+ INPROC_LOG(GPR_INFO, "Extra initial metadata %p", s);
940
938
  error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra initial metadata");
941
939
  } else {
942
940
  if (!other || !other->closed) {
@@ -1013,7 +1011,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
1013
1011
  true;
1014
1012
  }
1015
1013
  INPROC_LOG(
1016
- GPR_DEBUG,
1014
+ GPR_INFO,
1017
1015
  "perform_stream_op error %p scheduling initial-metadata-ready %p",
1018
1016
  s, error);
1019
1017
  GRPC_CLOSURE_SCHED(
@@ -1022,14 +1020,14 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
1022
1020
  }
1023
1021
  if (op->recv_message) {
1024
1022
  INPROC_LOG(
1025
- GPR_DEBUG,
1023
+ GPR_INFO,
1026
1024
  "perform_stream_op error %p scheduling recv message-ready %p", s,
1027
1025
  error);
1028
1026
  GRPC_CLOSURE_SCHED(op->payload->recv_message.recv_message_ready,
1029
1027
  GRPC_ERROR_REF(error));
1030
1028
  }
1031
1029
  }
1032
- INPROC_LOG(GPR_DEBUG, "perform_stream_op %p scheduling on_complete %p", s,
1030
+ INPROC_LOG(GPR_INFO, "perform_stream_op %p scheduling on_complete %p", s,
1033
1031
  error);
1034
1032
  GRPC_CLOSURE_SCHED(on_complete, GRPC_ERROR_REF(error));
1035
1033
  }
@@ -1042,7 +1040,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
1042
1040
  }
1043
1041
 
1044
1042
  static void close_transport_locked(inproc_transport* t) {
1045
- INPROC_LOG(GPR_DEBUG, "close_transport %p %d", t, t->is_closed);
1043
+ INPROC_LOG(GPR_INFO, "close_transport %p %d", t, t->is_closed);
1046
1044
  grpc_connectivity_state_set(
1047
1045
  &t->connectivity, GRPC_CHANNEL_SHUTDOWN,
1048
1046
  GRPC_ERROR_CREATE_FROM_STATIC_STRING("Closing transport."),
@@ -1063,7 +1061,7 @@ static void close_transport_locked(inproc_transport* t) {
1063
1061
 
1064
1062
  static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
1065
1063
  inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
1066
- INPROC_LOG(GPR_DEBUG, "perform_transport_op %p %p", t, op);
1064
+ INPROC_LOG(GPR_INFO, "perform_transport_op %p %p", t, op);
1067
1065
  gpr_mu_lock(&t->mu->mu);
1068
1066
  if (op->on_connectivity_state_change) {
1069
1067
  grpc_connectivity_state_notify_on_state_change(
@@ -1096,7 +1094,7 @@ static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
1096
1094
 
1097
1095
  static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
1098
1096
  grpc_closure* then_schedule_closure) {
1099
- INPROC_LOG(GPR_DEBUG, "destroy_stream %p %p", gs, then_schedule_closure);
1097
+ INPROC_LOG(GPR_INFO, "destroy_stream %p %p", gs, then_schedule_closure);
1100
1098
  inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
1101
1099
  s->closure_at_destroy = then_schedule_closure;
1102
1100
  really_destroy_stream(s);
@@ -1104,7 +1102,7 @@ static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
1104
1102
 
1105
1103
  static void destroy_transport(grpc_transport* gt) {
1106
1104
  inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
1107
- INPROC_LOG(GPR_DEBUG, "destroy_transport %p", t);
1105
+ INPROC_LOG(GPR_INFO, "destroy_transport %p", t);
1108
1106
  gpr_mu_lock(&t->mu->mu);
1109
1107
  close_transport_locked(t);
1110
1108
  gpr_mu_unlock(&t->mu->mu);
@@ -1165,7 +1163,7 @@ static void inproc_transports_create(grpc_transport** server_transport,
1165
1163
  const grpc_channel_args* server_args,
1166
1164
  grpc_transport** client_transport,
1167
1165
  const grpc_channel_args* client_args) {
1168
- INPROC_LOG(GPR_DEBUG, "inproc_transports_create");
1166
+ INPROC_LOG(GPR_INFO, "inproc_transports_create");
1169
1167
  inproc_transport* st =
1170
1168
  static_cast<inproc_transport*>(gpr_zalloc(sizeof(*st)));
1171
1169
  inproc_transport* ct =