grpc 1.23.0.pre1 → 1.23.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 615c68ac481aac7f9ee4b892c3771195cff0010ae19368dde61829db1cacd662
4
- data.tar.gz: fa88df8e55e0f8d4bb210d4691e2711009742d6afe32572cc934511d82263108
3
+ metadata.gz: 80bb6eba87011feccebf84a90582f4b30aa4787d27c16d0355d2b8b84c37a491
4
+ data.tar.gz: a96dc9f4ec62474f7dd46a601a1eb91c1e7464ae4a42e08e6ddc6c7db8bc4c4d
5
5
  SHA512:
6
- metadata.gz: 4551f2eb88ec810b1b71e8b8dc942aba4a72d4ebd720d509b33f6b1b97fc1c0c2d459feb2f8820e007c26560ec9e1949f0fb06c1decdc00f23a93f881731f795
7
- data.tar.gz: d4b19292fad08b0b98647ba70bb50817144bb1d43d91d7e281fad0ee41b181fb6d942092ed9b538c20c84db7eea8dc5cf55c3d156b09704ac65046392ef83e70
6
+ metadata.gz: 5271f4ee3b1f7255e7475bb7234e0c70facae888a8bc1ca050642ff334a68afddc900545521ceb755c114c5e4a1189652be9940267a0d111584716f962a5a19d
7
+ data.tar.gz: d9b41a8aa65b9baf6a5f3779ed46eabe0f8b80ce7c6122ff81d4dc24ea1ab098198eb8a836eddd9d1670429f8a4831e0e6e99379989c34be1f522a83fb403de6
data/Makefile CHANGED
@@ -461,8 +461,8 @@ Q = @
461
461
  endif
462
462
 
463
463
  CORE_VERSION = 7.0.0
464
- CPP_VERSION = 1.23.0-pre1
465
- CSHARP_VERSION = 2.23.0-pre1
464
+ CPP_VERSION = 1.23.0
465
+ CSHARP_VERSION = 2.23.0
466
466
 
467
467
  CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES))
468
468
  CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS)
@@ -193,8 +193,6 @@ void ChannelData::EnterIdle() {
193
193
  GRPC_IDLE_FILTER_LOG("the channel will enter IDLE");
194
194
  // Hold a ref to the channel stack for the transport op.
195
195
  GRPC_CHANNEL_STACK_REF(channel_stack_, "idle transport op");
196
- // Initialize the transport op.
197
- memset(&idle_transport_op_, 0, sizeof(idle_transport_op_));
198
196
  idle_transport_op_.disconnect_with_error = grpc_error_set_int(
199
197
  GRPC_ERROR_CREATE_FROM_STATIC_STRING("enter idle"),
200
198
  GRPC_ERROR_INT_CHANNEL_CONNECTIVITY_STATE, GRPC_CHANNEL_IDLE);
@@ -74,6 +74,8 @@
74
74
  #define DEFAULT_MAX_PINGS_BETWEEN_DATA 2
75
75
  #define DEFAULT_MAX_PING_STRIKES 2
76
76
 
77
+ #define DEFAULT_MAX_PENDING_INDUCED_FRAMES 10000
78
+
77
79
  static int g_default_client_keepalive_time_ms =
78
80
  DEFAULT_CLIENT_KEEPALIVE_TIME_MS;
79
81
  static int g_default_client_keepalive_timeout_ms =
@@ -105,6 +107,7 @@ static void write_action(void* t, grpc_error* error);
105
107
  static void write_action_end_locked(void* t, grpc_error* error);
106
108
 
107
109
  static void read_action_locked(void* t, grpc_error* error);
110
+ static void continue_read_action_locked(grpc_chttp2_transport* t);
108
111
 
109
112
  static void complete_fetch_locked(void* gs, grpc_error* error);
110
113
  /** Set a transport level setting, and push it to our peer */
@@ -797,10 +800,8 @@ grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t,
797
800
  !grpc_resource_user_safe_alloc(t->resource_user,
798
801
  GRPC_RESOURCE_QUOTA_CALL_SIZE)) {
799
802
  gpr_log(GPR_ERROR, "Memory exhausted, rejecting the stream.");
800
- grpc_slice_buffer_add(
801
- &t->qbuf,
802
- grpc_chttp2_rst_stream_create(
803
- id, static_cast<uint32_t>(GRPC_HTTP2_REFUSED_STREAM), nullptr));
803
+ grpc_chttp2_add_rst_stream_to_next_write(t, id, GRPC_HTTP2_REFUSED_STREAM,
804
+ nullptr);
804
805
  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
805
806
  return nullptr;
806
807
  }
@@ -1045,6 +1046,19 @@ static void write_action_begin_locked(void* gt, grpc_error* error_ignored) {
1045
1046
  GRPC_CLOSURE_SCHED(
1046
1047
  GRPC_CLOSURE_INIT(&t->write_action, write_action, t, scheduler),
1047
1048
  GRPC_ERROR_NONE);
1049
+ if (t->reading_paused_on_pending_induced_frames) {
1050
+ GPR_ASSERT(t->num_pending_induced_frames == 0);
1051
+ /* We had paused reading, because we had many induced frames (SETTINGS
1052
+ * ACK, PINGS ACK and RST_STREAMS) pending in t->qbuf. Now that we have
1053
+ * been able to flush qbuf, we can resume reading. */
1054
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
1055
+ GPR_INFO,
1056
+ "transport %p : Resuming reading after being paused due to too "
1057
+ "many unwritten SETTINGS ACK, PINGS ACK and RST_STREAM frames",
1058
+ t));
1059
+ t->reading_paused_on_pending_induced_frames = false;
1060
+ continue_read_action_locked(t);
1061
+ }
1048
1062
  } else {
1049
1063
  GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN();
1050
1064
  set_write_state(t, GRPC_CHTTP2_WRITE_STATE_IDLE, "begin writing nothing");
@@ -1114,7 +1128,6 @@ static void write_action_end_locked(void* tp, grpc_error* error) {
1114
1128
  }
1115
1129
 
1116
1130
  grpc_chttp2_end_write(t, GRPC_ERROR_REF(error));
1117
-
1118
1131
  GRPC_CHTTP2_UNREF_TRANSPORT(t, "writing");
1119
1132
  }
1120
1133
 
@@ -2113,10 +2126,8 @@ void grpc_chttp2_cancel_stream(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
2113
2126
  grpc_http2_error_code http_error;
2114
2127
  grpc_error_get_status(due_to_error, s->deadline, nullptr, nullptr,
2115
2128
  &http_error, nullptr);
2116
- grpc_slice_buffer_add(
2117
- &t->qbuf,
2118
- grpc_chttp2_rst_stream_create(
2119
- s->id, static_cast<uint32_t>(http_error), &s->stats.outgoing));
2129
+ grpc_chttp2_add_rst_stream_to_next_write(
2130
+ t, s->id, static_cast<uint32_t>(http_error), &s->stats.outgoing);
2120
2131
  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
2121
2132
  }
2122
2133
  }
@@ -2427,9 +2438,8 @@ static void close_from_api(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
2427
2438
  grpc_slice_buffer_add(&t->qbuf, status_hdr);
2428
2439
  grpc_slice_buffer_add(&t->qbuf, message_pfx);
2429
2440
  grpc_slice_buffer_add(&t->qbuf, grpc_slice_ref_internal(slice));
2430
- grpc_slice_buffer_add(
2431
- &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
2432
- &s->stats.outgoing));
2441
+ grpc_chttp2_add_rst_stream_to_next_write(t, s->id, GRPC_HTTP2_NO_ERROR,
2442
+ &s->stats.outgoing);
2433
2443
 
2434
2444
  grpc_chttp2_mark_stream_closed(t, s, 1, 1, error);
2435
2445
  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API);
@@ -2600,10 +2610,16 @@ static void read_action_locked(void* tp, grpc_error* error) {
2600
2610
  grpc_slice_buffer_reset_and_unref_internal(&t->read_buffer);
2601
2611
 
2602
2612
  if (keep_reading) {
2603
- const bool urgent = t->goaway_error != GRPC_ERROR_NONE;
2604
- grpc_endpoint_read(t->ep, &t->read_buffer, &t->read_action_locked, urgent);
2605
- grpc_chttp2_act_on_flowctl_action(t->flow_control->MakeAction(), t,
2606
- nullptr);
2613
+ if (t->num_pending_induced_frames >= DEFAULT_MAX_PENDING_INDUCED_FRAMES) {
2614
+ t->reading_paused_on_pending_induced_frames = true;
2615
+ GRPC_CHTTP2_IF_TRACING(
2616
+ gpr_log(GPR_INFO,
2617
+ "transport %p : Pausing reading due to too "
2618
+ "many unwritten SETTINGS ACK and RST_STREAM frames",
2619
+ t));
2620
+ } else {
2621
+ continue_read_action_locked(t);
2622
+ }
2607
2623
  GRPC_CHTTP2_UNREF_TRANSPORT(t, "keep_reading");
2608
2624
  } else {
2609
2625
  GRPC_CHTTP2_UNREF_TRANSPORT(t, "reading_action");
@@ -2612,6 +2628,12 @@ static void read_action_locked(void* tp, grpc_error* error) {
2612
2628
  GRPC_ERROR_UNREF(error);
2613
2629
  }
2614
2630
 
2631
+ static void continue_read_action_locked(grpc_chttp2_transport* t) {
2632
+ const bool urgent = t->goaway_error != GRPC_ERROR_NONE;
2633
+ grpc_endpoint_read(t->ep, &t->read_buffer, &t->read_action_locked, urgent);
2634
+ grpc_chttp2_act_on_flowctl_action(t->flow_control->MakeAction(), t, nullptr);
2635
+ }
2636
+
2615
2637
  // t is reffed prior to calling the first time, and once the callback chain
2616
2638
  // that kicks off finishes, it's unreffed
2617
2639
  static void schedule_bdp_ping_locked(grpc_chttp2_transport* t) {
@@ -118,6 +118,7 @@ grpc_error* grpc_chttp2_ping_parser_parse(void* parser,
118
118
  t->ping_acks = static_cast<uint64_t*>(gpr_realloc(
119
119
  t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks)));
120
120
  }
121
+ t->num_pending_induced_frames++;
121
122
  t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes;
122
123
  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE);
123
124
  }
@@ -58,6 +58,14 @@ grpc_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
58
58
  return slice;
59
59
  }
60
60
 
61
+ void grpc_chttp2_add_rst_stream_to_next_write(
62
+ grpc_chttp2_transport* t, uint32_t id, uint32_t code,
63
+ grpc_transport_one_way_stats* stats) {
64
+ t->num_pending_induced_frames++;
65
+ grpc_slice_buffer_add(&t->qbuf,
66
+ grpc_chttp2_rst_stream_create(id, code, stats));
67
+ }
68
+
61
69
  grpc_error* grpc_chttp2_rst_stream_parser_begin_frame(
62
70
  grpc_chttp2_rst_stream_parser* parser, uint32_t length, uint8_t flags) {
63
71
  if (length != 4) {
@@ -33,6 +33,13 @@ typedef struct {
33
33
  grpc_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code,
34
34
  grpc_transport_one_way_stats* stats);
35
35
 
36
+ // Adds RST_STREAM frame to t->qbuf (buffer for the next write). Should be
37
+ // called when we want to add RST_STREAM and we are not in
38
+ // write_action_begin_locked.
39
+ void grpc_chttp2_add_rst_stream_to_next_write(
40
+ grpc_chttp2_transport* t, uint32_t id, uint32_t code,
41
+ grpc_transport_one_way_stats* stats);
42
+
36
43
  grpc_error* grpc_chttp2_rst_stream_parser_begin_frame(
37
44
  grpc_chttp2_rst_stream_parser* parser, uint32_t length, uint8_t flags);
38
45
  grpc_error* grpc_chttp2_rst_stream_parser_parse(void* parser,
@@ -132,6 +132,7 @@ grpc_error* grpc_chttp2_settings_parser_parse(void* p, grpc_chttp2_transport* t,
132
132
  if (is_last) {
133
133
  memcpy(parser->target_settings, parser->incoming_settings,
134
134
  GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
135
+ t->num_pending_induced_frames++;
135
136
  grpc_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
136
137
  if (t->notify_on_receive_settings != nullptr) {
137
138
  GRPC_CLOSURE_SCHED(t->notify_on_receive_settings,
@@ -1668,9 +1668,8 @@ static void force_client_rst_stream(void* sp, grpc_error* error) {
1668
1668
  grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(sp);
1669
1669
  grpc_chttp2_transport* t = s->t;
1670
1670
  if (!s->write_closed) {
1671
- grpc_slice_buffer_add(
1672
- &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
1673
- &s->stats.outgoing));
1671
+ grpc_chttp2_add_rst_stream_to_next_write(t, s->id, GRPC_HTTP2_NO_ERROR,
1672
+ &s->stats.outgoing);
1674
1673
  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM);
1675
1674
  grpc_chttp2_mark_stream_closed(t, s, true, true, GRPC_ERROR_NONE);
1676
1675
  }
@@ -493,6 +493,13 @@ struct grpc_chttp2_transport {
493
493
  grpc_core::ContextList* cl = nullptr;
494
494
  grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> channelz_socket;
495
495
  uint32_t num_messages_in_next_write = 0;
496
+ /** The number of pending induced frames (SETTINGS_ACK, PINGS_ACK and
497
+ * RST_STREAM) in the outgoing buffer (t->qbuf). If this number goes beyond
498
+ * DEFAULT_MAX_PENDING_INDUCED_FRAMES, we pause reading new frames. We would
499
+ * only continue reading when we are able to write to the socket again,
500
+ * thereby reducing the number of induced frames. */
501
+ uint32_t num_pending_induced_frames = 0;
502
+ bool reading_paused_on_pending_induced_frames = false;
496
503
  };
497
504
 
498
505
  typedef enum {
@@ -382,10 +382,9 @@ error_handler:
382
382
  if (s != nullptr) {
383
383
  grpc_chttp2_mark_stream_closed(t, s, true, false, err);
384
384
  }
385
- grpc_slice_buffer_add(
386
- &t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
387
- GRPC_HTTP2_PROTOCOL_ERROR,
388
- &s->stats.outgoing));
385
+ grpc_chttp2_add_rst_stream_to_next_write(t, t->incoming_stream_id,
386
+ GRPC_HTTP2_PROTOCOL_ERROR,
387
+ &s->stats.outgoing);
389
388
  return init_skip_frame_parser(t, 0);
390
389
  } else {
391
390
  return err;
@@ -765,10 +764,9 @@ static grpc_error* parse_frame_slice(grpc_chttp2_transport* t,
765
764
  grpc_chttp2_parsing_become_skip_parser(t);
766
765
  if (s) {
767
766
  s->forced_close_error = err;
768
- grpc_slice_buffer_add(
769
- &t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
770
- GRPC_HTTP2_PROTOCOL_ERROR,
771
- &s->stats.outgoing));
767
+ grpc_chttp2_add_rst_stream_to_next_write(t, t->incoming_stream_id,
768
+ GRPC_HTTP2_PROTOCOL_ERROR,
769
+ &s->stats.outgoing);
772
770
  } else {
773
771
  GRPC_ERROR_UNREF(err);
774
772
  }
@@ -219,6 +219,7 @@ class WriteContext {
219
219
  void FlushQueuedBuffers() {
220
220
  /* simple writes are queued to qbuf, and flushed here */
221
221
  grpc_slice_buffer_move_into(&t_->qbuf, &t_->outbuf);
222
+ t_->num_pending_induced_frames = 0;
222
223
  GPR_ASSERT(t_->qbuf.count == 0);
223
224
  }
224
225
 
@@ -14,5 +14,5 @@
14
14
 
15
15
  # GRPC contains the General RPC module.
16
16
  module GRPC
17
- VERSION = '1.23.0.pre1'
17
+ VERSION = '1.23.0'
18
18
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: grpc
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.23.0.pre1
4
+ version: 1.23.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - gRPC Authors
8
8
  autorequire:
9
9
  bindir: src/ruby/bin
10
10
  cert_chain: []
11
- date: 2019-08-02 00:00:00.000000000 Z
11
+ date: 2019-08-15 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: google-protobuf
@@ -1622,48 +1622,48 @@ required_ruby_version: !ruby/object:Gem::Requirement
1622
1622
  version: 2.3.0
1623
1623
  required_rubygems_version: !ruby/object:Gem::Requirement
1624
1624
  requirements:
1625
- - - ">"
1625
+ - - ">="
1626
1626
  - !ruby/object:Gem::Version
1627
- version: 1.3.1
1627
+ version: '0'
1628
1628
  requirements: []
1629
1629
  rubygems_version: 3.0.4
1630
1630
  signing_key:
1631
1631
  specification_version: 4
1632
1632
  summary: GRPC system in Ruby
1633
1633
  test_files:
1634
- - src/ruby/spec/client_auth_spec.rb
1635
- - src/ruby/spec/errors_spec.rb
1634
+ - src/ruby/spec/server_credentials_spec.rb
1635
+ - src/ruby/spec/channel_connection_spec.rb
1636
1636
  - src/ruby/spec/channel_credentials_spec.rb
1637
+ - src/ruby/spec/google_rpc_status_utils_spec.rb
1638
+ - src/ruby/spec/testdata/client.key
1639
+ - src/ruby/spec/testdata/server1.pem
1640
+ - src/ruby/spec/testdata/ca.pem
1641
+ - src/ruby/spec/testdata/server1.key
1642
+ - src/ruby/spec/testdata/README
1643
+ - src/ruby/spec/testdata/client.pem
1644
+ - src/ruby/spec/client_auth_spec.rb
1645
+ - src/ruby/spec/call_credentials_spec.rb
1637
1646
  - src/ruby/spec/compression_options_spec.rb
1638
- - src/ruby/spec/spec_helper.rb
1639
- - src/ruby/spec/channel_connection_spec.rb
1640
1647
  - src/ruby/spec/server_spec.rb
1641
- - src/ruby/spec/support/services.rb
1642
- - src/ruby/spec/support/helpers.rb
1643
- - src/ruby/spec/call_credentials_spec.rb
1644
- - src/ruby/spec/client_server_spec.rb
1645
- - src/ruby/spec/server_credentials_spec.rb
1646
- - src/ruby/spec/channel_spec.rb
1647
1648
  - src/ruby/spec/call_spec.rb
1648
- - src/ruby/spec/time_consts_spec.rb
1649
- - src/ruby/spec/testdata/server1.key
1650
- - src/ruby/spec/testdata/ca.pem
1651
- - src/ruby/spec/testdata/server1.pem
1652
- - src/ruby/spec/testdata/client.pem
1653
- - src/ruby/spec/testdata/client.key
1654
- - src/ruby/spec/testdata/README
1655
- - src/ruby/spec/pb/health/checker_spec.rb
1656
- - src/ruby/spec/pb/duplicate/codegen_spec.rb
1657
- - src/ruby/spec/pb/codegen/package_option_spec.rb
1658
- - src/ruby/spec/pb/codegen/grpc/testing/package_options.proto
1659
- - src/ruby/spec/error_sanity_spec.rb
1660
- - src/ruby/spec/generic/active_call_spec.rb
1661
- - src/ruby/spec/generic/rpc_desc_spec.rb
1662
- - src/ruby/spec/generic/rpc_server_pool_spec.rb
1663
- - src/ruby/spec/generic/service_spec.rb
1664
- - src/ruby/spec/generic/client_stub_spec.rb
1649
+ - src/ruby/spec/errors_spec.rb
1665
1650
  - src/ruby/spec/generic/server_interceptors_spec.rb
1666
1651
  - src/ruby/spec/generic/interceptor_registry_spec.rb
1652
+ - src/ruby/spec/generic/rpc_desc_spec.rb
1667
1653
  - src/ruby/spec/generic/rpc_server_spec.rb
1668
1654
  - src/ruby/spec/generic/client_interceptors_spec.rb
1669
- - src/ruby/spec/google_rpc_status_utils_spec.rb
1655
+ - src/ruby/spec/generic/active_call_spec.rb
1656
+ - src/ruby/spec/generic/service_spec.rb
1657
+ - src/ruby/spec/generic/client_stub_spec.rb
1658
+ - src/ruby/spec/generic/rpc_server_pool_spec.rb
1659
+ - src/ruby/spec/channel_spec.rb
1660
+ - src/ruby/spec/error_sanity_spec.rb
1661
+ - src/ruby/spec/spec_helper.rb
1662
+ - src/ruby/spec/client_server_spec.rb
1663
+ - src/ruby/spec/pb/health/checker_spec.rb
1664
+ - src/ruby/spec/pb/codegen/grpc/testing/package_options.proto
1665
+ - src/ruby/spec/pb/codegen/package_option_spec.rb
1666
+ - src/ruby/spec/pb/duplicate/codegen_spec.rb
1667
+ - src/ruby/spec/time_consts_spec.rb
1668
+ - src/ruby/spec/support/helpers.rb
1669
+ - src/ruby/spec/support/services.rb