grpc 1.22.0 → 1.22.1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 6a2f7166a040e398aa494db46829ce9aafec84f97936e8de954a310e177509cd
4
- data.tar.gz: 39b0dbdc11865fe94ac5f4a57dcbb29da141e29beef5c7432d12c035e9516d3f
3
+ metadata.gz: 59a46a4356ab4bc01552c37a23751a5d6bfcc36b3c4cf0bb2a720c858501f398
4
+ data.tar.gz: 57bccc2c3faeb63304d3c81a50a86264b2cb21a755b0f92d3a3abe7fd617857d
5
5
  SHA512:
6
- metadata.gz: 4d7e62c1c28fd7e18896b286af756cec2b8b80aac5dd1c92a9ba5c5c26175e1955714bda5ad9347677ffd40c4a3517ca646f8b5fbc156999969ef716dd558521
7
- data.tar.gz: ac8bc19476eab20e036291d94eaf802c073b650672fcd1856c6f139d94db9a993c3aba1a9c8907a99a5732dbd39f1bb0cac9eca9cef6f4a04766f246c0cfaa7d
6
+ metadata.gz: 768b080bb77d979bf4a20e9148227ab7830d4c0f786ca940c157f2f4755098456ac6045cc04f5215f1494e7b5103155ff184561cf822bd751bf527ab6ae5937d
7
+ data.tar.gz: 846f5302f8098027e2e59d72678dfdb97700a8b00d8c8984ea3699d5f098a10896a7147b085f1d671be1e2258925f3c7ec8a0cd14437d7539eb090b8f1db3743
data/Makefile CHANGED
@@ -460,8 +460,8 @@ Q = @
460
460
  endif
461
461
 
462
462
  CORE_VERSION = 7.0.0
463
- CPP_VERSION = 1.22.0
464
- CSHARP_VERSION = 1.22.0
463
+ CPP_VERSION = 1.22.1
464
+ CSHARP_VERSION = 1.22.1
465
465
 
466
466
  CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES))
467
467
  CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS)
@@ -74,6 +74,8 @@
74
74
  #define DEFAULT_MAX_PINGS_BETWEEN_DATA 2
75
75
  #define DEFAULT_MAX_PING_STRIKES 2
76
76
 
77
+ #define DEFAULT_MAX_PENDING_INDUCED_FRAMES 10000
78
+
77
79
  static int g_default_client_keepalive_time_ms =
78
80
  DEFAULT_CLIENT_KEEPALIVE_TIME_MS;
79
81
  static int g_default_client_keepalive_timeout_ms =
@@ -105,6 +107,7 @@ static void write_action(void* t, grpc_error* error);
105
107
  static void write_action_end_locked(void* t, grpc_error* error);
106
108
 
107
109
  static void read_action_locked(void* t, grpc_error* error);
110
+ static void continue_read_action_locked(grpc_chttp2_transport* t);
108
111
 
109
112
  static void complete_fetch_locked(void* gs, grpc_error* error);
110
113
  /** Set a transport level setting, and push it to our peer */
@@ -800,10 +803,8 @@ grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t,
800
803
  !grpc_resource_user_safe_alloc(t->resource_user,
801
804
  GRPC_RESOURCE_QUOTA_CALL_SIZE)) {
802
805
  gpr_log(GPR_ERROR, "Memory exhausted, rejecting the stream.");
803
- grpc_slice_buffer_add(
804
- &t->qbuf,
805
- grpc_chttp2_rst_stream_create(
806
- id, static_cast<uint32_t>(GRPC_HTTP2_REFUSED_STREAM), nullptr));
806
+ grpc_chttp2_add_rst_stream_to_next_write(t, id, GRPC_HTTP2_REFUSED_STREAM,
807
+ nullptr);
807
808
  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
808
809
  return nullptr;
809
810
  }
@@ -1048,6 +1049,19 @@ static void write_action_begin_locked(void* gt, grpc_error* error_ignored) {
1048
1049
  GRPC_CLOSURE_SCHED(
1049
1050
  GRPC_CLOSURE_INIT(&t->write_action, write_action, t, scheduler),
1050
1051
  GRPC_ERROR_NONE);
1052
+ if (t->reading_paused_on_pending_induced_frames) {
1053
+ GPR_ASSERT(t->num_pending_induced_frames == 0);
1054
+ /* We had paused reading, because we had many induced frames (SETTINGS
1055
+ * ACK, PINGS ACK and RST_STREAMS) pending in t->qbuf. Now that we have
1056
+ * been able to flush qbuf, we can resume reading. */
1057
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
1058
+ GPR_INFO,
1059
+ "transport %p : Resuming reading after being paused due to too "
1060
+ "many unwritten SETTINGS ACK, PINGS ACK and RST_STREAM frames",
1061
+ t));
1062
+ t->reading_paused_on_pending_induced_frames = false;
1063
+ continue_read_action_locked(t);
1064
+ }
1051
1065
  } else {
1052
1066
  GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN();
1053
1067
  set_write_state(t, GRPC_CHTTP2_WRITE_STATE_IDLE, "begin writing nothing");
@@ -1117,7 +1131,6 @@ static void write_action_end_locked(void* tp, grpc_error* error) {
1117
1131
  }
1118
1132
 
1119
1133
  grpc_chttp2_end_write(t, GRPC_ERROR_REF(error));
1120
-
1121
1134
  GRPC_CHTTP2_UNREF_TRANSPORT(t, "writing");
1122
1135
  }
1123
1136
 
@@ -2111,10 +2124,8 @@ void grpc_chttp2_cancel_stream(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
2111
2124
  grpc_http2_error_code http_error;
2112
2125
  grpc_error_get_status(due_to_error, s->deadline, nullptr, nullptr,
2113
2126
  &http_error, nullptr);
2114
- grpc_slice_buffer_add(
2115
- &t->qbuf,
2116
- grpc_chttp2_rst_stream_create(
2117
- s->id, static_cast<uint32_t>(http_error), &s->stats.outgoing));
2127
+ grpc_chttp2_add_rst_stream_to_next_write(
2128
+ t, s->id, static_cast<uint32_t>(http_error), &s->stats.outgoing);
2118
2129
  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
2119
2130
  }
2120
2131
  }
@@ -2425,9 +2436,8 @@ static void close_from_api(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
2425
2436
  grpc_slice_buffer_add(&t->qbuf, status_hdr);
2426
2437
  grpc_slice_buffer_add(&t->qbuf, message_pfx);
2427
2438
  grpc_slice_buffer_add(&t->qbuf, grpc_slice_ref_internal(slice));
2428
- grpc_slice_buffer_add(
2429
- &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
2430
- &s->stats.outgoing));
2439
+ grpc_chttp2_add_rst_stream_to_next_write(t, s->id, GRPC_HTTP2_NO_ERROR,
2440
+ &s->stats.outgoing);
2431
2441
 
2432
2442
  grpc_chttp2_mark_stream_closed(t, s, 1, 1, error);
2433
2443
  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API);
@@ -2598,10 +2608,16 @@ static void read_action_locked(void* tp, grpc_error* error) {
2598
2608
  grpc_slice_buffer_reset_and_unref_internal(&t->read_buffer);
2599
2609
 
2600
2610
  if (keep_reading) {
2601
- const bool urgent = t->goaway_error != GRPC_ERROR_NONE;
2602
- grpc_endpoint_read(t->ep, &t->read_buffer, &t->read_action_locked, urgent);
2603
- grpc_chttp2_act_on_flowctl_action(t->flow_control->MakeAction(), t,
2604
- nullptr);
2611
+ if (t->num_pending_induced_frames >= DEFAULT_MAX_PENDING_INDUCED_FRAMES) {
2612
+ t->reading_paused_on_pending_induced_frames = true;
2613
+ GRPC_CHTTP2_IF_TRACING(
2614
+ gpr_log(GPR_INFO,
2615
+ "transport %p : Pausing reading due to too "
2616
+ "many unwritten SETTINGS ACK and RST_STREAM frames",
2617
+ t));
2618
+ } else {
2619
+ continue_read_action_locked(t);
2620
+ }
2605
2621
  GRPC_CHTTP2_UNREF_TRANSPORT(t, "keep_reading");
2606
2622
  } else {
2607
2623
  GRPC_CHTTP2_UNREF_TRANSPORT(t, "reading_action");
@@ -2610,6 +2626,12 @@ static void read_action_locked(void* tp, grpc_error* error) {
2610
2626
  GRPC_ERROR_UNREF(error);
2611
2627
  }
2612
2628
 
2629
+ static void continue_read_action_locked(grpc_chttp2_transport* t) {
2630
+ const bool urgent = t->goaway_error != GRPC_ERROR_NONE;
2631
+ grpc_endpoint_read(t->ep, &t->read_buffer, &t->read_action_locked, urgent);
2632
+ grpc_chttp2_act_on_flowctl_action(t->flow_control->MakeAction(), t, nullptr);
2633
+ }
2634
+
2613
2635
  // t is reffed prior to calling the first time, and once the callback chain
2614
2636
  // that kicks off finishes, it's unreffed
2615
2637
  static void schedule_bdp_ping_locked(grpc_chttp2_transport* t) {
@@ -118,6 +118,7 @@ grpc_error* grpc_chttp2_ping_parser_parse(void* parser,
118
118
  t->ping_acks = static_cast<uint64_t*>(gpr_realloc(
119
119
  t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks)));
120
120
  }
121
+ t->num_pending_induced_frames++;
121
122
  t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes;
122
123
  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE);
123
124
  }
@@ -58,6 +58,14 @@ grpc_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
58
58
  return slice;
59
59
  }
60
60
 
61
+ void grpc_chttp2_add_rst_stream_to_next_write(
62
+ grpc_chttp2_transport* t, uint32_t id, uint32_t code,
63
+ grpc_transport_one_way_stats* stats) {
64
+ t->num_pending_induced_frames++;
65
+ grpc_slice_buffer_add(&t->qbuf,
66
+ grpc_chttp2_rst_stream_create(id, code, stats));
67
+ }
68
+
61
69
  grpc_error* grpc_chttp2_rst_stream_parser_begin_frame(
62
70
  grpc_chttp2_rst_stream_parser* parser, uint32_t length, uint8_t flags) {
63
71
  if (length != 4) {
@@ -33,6 +33,13 @@ typedef struct {
33
33
  grpc_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code,
34
34
  grpc_transport_one_way_stats* stats);
35
35
 
36
+ // Adds RST_STREAM frame to t->qbuf (buffer for the next write). Should be
37
+ // called when we want to add RST_STREAM and we are not in
38
+ // write_action_begin_locked.
39
+ void grpc_chttp2_add_rst_stream_to_next_write(
40
+ grpc_chttp2_transport* t, uint32_t id, uint32_t code,
41
+ grpc_transport_one_way_stats* stats);
42
+
36
43
  grpc_error* grpc_chttp2_rst_stream_parser_begin_frame(
37
44
  grpc_chttp2_rst_stream_parser* parser, uint32_t length, uint8_t flags);
38
45
  grpc_error* grpc_chttp2_rst_stream_parser_parse(void* parser,
@@ -132,6 +132,7 @@ grpc_error* grpc_chttp2_settings_parser_parse(void* p, grpc_chttp2_transport* t,
132
132
  if (is_last) {
133
133
  memcpy(parser->target_settings, parser->incoming_settings,
134
134
  GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
135
+ t->num_pending_induced_frames++;
135
136
  grpc_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
136
137
  if (t->notify_on_receive_settings != nullptr) {
137
138
  GRPC_CLOSURE_SCHED(t->notify_on_receive_settings,
@@ -1612,9 +1612,8 @@ static void force_client_rst_stream(void* sp, grpc_error* error) {
1612
1612
  grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(sp);
1613
1613
  grpc_chttp2_transport* t = s->t;
1614
1614
  if (!s->write_closed) {
1615
- grpc_slice_buffer_add(
1616
- &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
1617
- &s->stats.outgoing));
1615
+ grpc_chttp2_add_rst_stream_to_next_write(t, s->id, GRPC_HTTP2_NO_ERROR,
1616
+ &s->stats.outgoing);
1618
1617
  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM);
1619
1618
  grpc_chttp2_mark_stream_closed(t, s, true, true, GRPC_ERROR_NONE);
1620
1619
  }
@@ -493,6 +493,13 @@ struct grpc_chttp2_transport {
493
493
  grpc_core::ContextList* cl = nullptr;
494
494
  grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> channelz_socket;
495
495
  uint32_t num_messages_in_next_write = 0;
496
+ /** The number of pending induced frames (SETTINGS_ACK, PINGS_ACK and
497
+ * RST_STREAM) in the outgoing buffer (t->qbuf). If this number goes beyond
498
+ * DEFAULT_MAX_PENDING_INDUCED_FRAMES, we pause reading new frames. We would
499
+ * only continue reading when we are able to write to the socket again,
500
+ * thereby reducing the number of induced frames. */
501
+ uint32_t num_pending_induced_frames = 0;
502
+ bool reading_paused_on_pending_induced_frames = false;
496
503
  };
497
504
 
498
505
  typedef enum {
@@ -382,10 +382,9 @@ error_handler:
382
382
  if (s != nullptr) {
383
383
  grpc_chttp2_mark_stream_closed(t, s, true, false, err);
384
384
  }
385
- grpc_slice_buffer_add(
386
- &t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
387
- GRPC_HTTP2_PROTOCOL_ERROR,
388
- &s->stats.outgoing));
385
+ grpc_chttp2_add_rst_stream_to_next_write(t, t->incoming_stream_id,
386
+ GRPC_HTTP2_PROTOCOL_ERROR,
387
+ &s->stats.outgoing);
389
388
  return init_skip_frame_parser(t, 0);
390
389
  } else {
391
390
  return err;
@@ -750,10 +749,9 @@ static grpc_error* parse_frame_slice(grpc_chttp2_transport* t,
750
749
  grpc_chttp2_parsing_become_skip_parser(t);
751
750
  if (s) {
752
751
  s->forced_close_error = err;
753
- grpc_slice_buffer_add(
754
- &t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
755
- GRPC_HTTP2_PROTOCOL_ERROR,
756
- &s->stats.outgoing));
752
+ grpc_chttp2_add_rst_stream_to_next_write(t, t->incoming_stream_id,
753
+ GRPC_HTTP2_PROTOCOL_ERROR,
754
+ &s->stats.outgoing);
757
755
  } else {
758
756
  GRPC_ERROR_UNREF(err);
759
757
  }
@@ -219,6 +219,7 @@ class WriteContext {
219
219
  void FlushQueuedBuffers() {
220
220
  /* simple writes are queued to qbuf, and flushed here */
221
221
  grpc_slice_buffer_move_into(&t_->qbuf, &t_->outbuf);
222
+ t_->num_pending_induced_frames = 0;
222
223
  GPR_ASSERT(t_->qbuf.count == 0);
223
224
  }
224
225
 
@@ -14,5 +14,5 @@
14
14
 
15
15
  # GRPC contains the General RPC module.
16
16
  module GRPC
17
- VERSION = '1.22.0'
17
+ VERSION = '1.22.1'
18
18
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: grpc
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.22.0
4
+ version: 1.22.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - gRPC Authors
8
8
  autorequire:
9
9
  bindir: src/ruby/bin
10
10
  cert_chain: []
11
- date: 2019-07-02 00:00:00.000000000 Z
11
+ date: 2019-08-15 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: google-protobuf
@@ -1625,39 +1625,39 @@ signing_key:
1625
1625
  specification_version: 4
1626
1626
  summary: GRPC system in Ruby
1627
1627
  test_files:
1628
- - src/ruby/spec/error_sanity_spec.rb
1629
- - src/ruby/spec/channel_spec.rb
1630
1628
  - src/ruby/spec/server_spec.rb
1631
- - src/ruby/spec/client_server_spec.rb
1632
- - src/ruby/spec/pb/health/checker_spec.rb
1633
- - src/ruby/spec/pb/codegen/package_option_spec.rb
1634
- - src/ruby/spec/pb/codegen/grpc/testing/package_options.proto
1635
- - src/ruby/spec/pb/duplicate/codegen_spec.rb
1636
- - src/ruby/spec/call_credentials_spec.rb
1637
- - src/ruby/spec/support/helpers.rb
1638
- - src/ruby/spec/support/services.rb
1639
- - src/ruby/spec/errors_spec.rb
1640
- - src/ruby/spec/google_rpc_status_utils_spec.rb
1641
1629
  - src/ruby/spec/server_credentials_spec.rb
1642
- - src/ruby/spec/compression_options_spec.rb
1643
- - src/ruby/spec/client_auth_spec.rb
1644
- - src/ruby/spec/call_spec.rb
1645
- - src/ruby/spec/testdata/README
1646
- - src/ruby/spec/testdata/client.pem
1647
- - src/ruby/spec/testdata/server1.key
1630
+ - src/ruby/spec/call_credentials_spec.rb
1648
1631
  - src/ruby/spec/testdata/ca.pem
1632
+ - src/ruby/spec/testdata/client.pem
1649
1633
  - src/ruby/spec/testdata/client.key
1634
+ - src/ruby/spec/testdata/README
1650
1635
  - src/ruby/spec/testdata/server1.pem
1651
- - src/ruby/spec/channel_connection_spec.rb
1652
- - src/ruby/spec/spec_helper.rb
1653
- - src/ruby/spec/time_consts_spec.rb
1654
- - src/ruby/spec/channel_credentials_spec.rb
1655
- - src/ruby/spec/generic/rpc_server_pool_spec.rb
1636
+ - src/ruby/spec/testdata/server1.key
1637
+ - src/ruby/spec/support/helpers.rb
1638
+ - src/ruby/spec/support/services.rb
1639
+ - src/ruby/spec/errors_spec.rb
1640
+ - src/ruby/spec/generic/client_stub_spec.rb
1656
1641
  - src/ruby/spec/generic/client_interceptors_spec.rb
1657
1642
  - src/ruby/spec/generic/server_interceptors_spec.rb
1658
1643
  - src/ruby/spec/generic/rpc_server_spec.rb
1659
- - src/ruby/spec/generic/service_spec.rb
1660
- - src/ruby/spec/generic/client_stub_spec.rb
1661
- - src/ruby/spec/generic/interceptor_registry_spec.rb
1662
1644
  - src/ruby/spec/generic/rpc_desc_spec.rb
1663
1645
  - src/ruby/spec/generic/active_call_spec.rb
1646
+ - src/ruby/spec/generic/interceptor_registry_spec.rb
1647
+ - src/ruby/spec/generic/rpc_server_pool_spec.rb
1648
+ - src/ruby/spec/generic/service_spec.rb
1649
+ - src/ruby/spec/error_sanity_spec.rb
1650
+ - src/ruby/spec/time_consts_spec.rb
1651
+ - src/ruby/spec/google_rpc_status_utils_spec.rb
1652
+ - src/ruby/spec/client_auth_spec.rb
1653
+ - src/ruby/spec/channel_credentials_spec.rb
1654
+ - src/ruby/spec/channel_connection_spec.rb
1655
+ - src/ruby/spec/client_server_spec.rb
1656
+ - src/ruby/spec/compression_options_spec.rb
1657
+ - src/ruby/spec/call_spec.rb
1658
+ - src/ruby/spec/channel_spec.rb
1659
+ - src/ruby/spec/spec_helper.rb
1660
+ - src/ruby/spec/pb/duplicate/codegen_spec.rb
1661
+ - src/ruby/spec/pb/codegen/grpc/testing/package_options.proto
1662
+ - src/ruby/spec/pb/codegen/package_option_spec.rb
1663
+ - src/ruby/spec/pb/health/checker_spec.rb