grpc 1.59.0 → 1.59.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +7 -1
  3. data/src/core/ext/filters/http/server/http_server_filter.cc +21 -17
  4. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +504 -361
  5. data/src/core/ext/transport/chttp2/transport/frame_ping.cc +11 -1
  6. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +9 -0
  7. data/src/core/ext/transport/chttp2/transport/internal.h +92 -28
  8. data/src/core/ext/transport/chttp2/transport/max_concurrent_streams_policy.cc +44 -0
  9. data/src/core/ext/transport/chttp2/transport/max_concurrent_streams_policy.h +67 -0
  10. data/src/core/ext/transport/chttp2/transport/parsing.cc +103 -14
  11. data/src/core/ext/transport/chttp2/transport/ping_callbacks.cc +108 -0
  12. data/src/core/ext/transport/chttp2/transport/ping_callbacks.h +115 -0
  13. data/src/core/ext/transport/chttp2/transport/ping_rate_policy.cc +26 -4
  14. data/src/core/ext/transport/chttp2/transport/ping_rate_policy.h +16 -1
  15. data/src/core/ext/transport/chttp2/transport/write_size_policy.cc +60 -0
  16. data/src/core/ext/transport/chttp2/transport/write_size_policy.h +66 -0
  17. data/src/core/ext/transport/chttp2/transport/writing.cc +149 -77
  18. data/src/core/lib/channel/promise_based_filter.cc +9 -4
  19. data/src/core/lib/channel/promise_based_filter.h +2 -1
  20. data/src/core/lib/experiments/experiments.cc +222 -0
  21. data/src/core/lib/experiments/experiments.h +135 -0
  22. data/src/core/lib/iomgr/combiner.cc +3 -0
  23. data/src/core/lib/transport/metadata_batch.h +11 -1
  24. data/src/core/lib/transport/transport.h +6 -0
  25. data/src/ruby/lib/grpc/version.rb +1 -1
  26. metadata +9 -3
@@ -0,0 +1,66 @@
1
+ // Copyright 2023 gRPC authors.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ #ifndef GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_WRITE_SIZE_POLICY_H
16
+ #define GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_WRITE_SIZE_POLICY_H
17
+
18
+ #include <grpc/support/port_platform.h>
19
+
20
+ #include <stddef.h>
21
+ #include <stdint.h>
22
+
23
+ #include "src/core/lib/gprpp/time.h"
24
+
25
+ namespace grpc_core {
26
+
27
+ class Chttp2WriteSizePolicy {
28
+ public:
29
+ // Smallest possible WriteTargetSize
30
+ static constexpr size_t MinTarget() { return 32 * 1024; }
31
+ // Largest possible WriteTargetSize
32
+ static constexpr size_t MaxTarget() { return 16 * 1024 * 1024; }
33
+ // How long should a write take to be considered "fast"
34
+ static constexpr Duration FastWrite() { return Duration::Milliseconds(100); }
35
+ // How long should a write take to be considered "slow"
36
+ static constexpr Duration SlowWrite() { return Duration::Seconds(1); }
37
+ // If a read is slow, what target time should we use to try and adjust back
38
+ // to?
39
+ static constexpr Duration TargetWriteTime() {
40
+ return Duration::Milliseconds(300);
41
+ }
42
+
43
+ // What size should be targetted for the next write.
44
+ size_t WriteTargetSize();
45
+ // Notify the policy that a write of some size has begun.
46
+ // EndWrite must be called when the write completes.
47
+ void BeginWrite(size_t size);
48
+ // Notify the policy that a write of some size has ended.
49
+ void EndWrite(bool success);
50
+
51
+ private:
52
+ size_t current_target_ = 128 * 1024;
53
+ Timestamp experiment_start_time_ = Timestamp::InfFuture();
54
+ // State varies from -2...2
55
+ // Every time we do a write faster than kFastWrite, we decrement
56
+ // Every time we do a write slower than kSlowWrite, we increment
57
+ // If we hit -2, we increase the target size and reset state to 0
58
+ // If we hit 2, we decrease the target size and reset state to 0
59
+ // In this way, we need two consecutive fast/slow operations to adjust,
60
+ // denoising the signal significantly
61
+ int8_t state_ = 0;
62
+ };
63
+
64
+ } // namespace grpc_core
65
+
66
+ #endif // GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_WRITE_SIZE_POLICY_H
@@ -22,6 +22,7 @@
22
22
  #include <stddef.h>
23
23
 
24
24
  #include <algorithm>
25
+ #include <limits>
25
26
  #include <memory>
26
27
  #include <string>
27
28
  #include <utility>
@@ -49,21 +50,25 @@
49
50
  #include "src/core/ext/transport/chttp2/transport/http_trace.h"
50
51
  #include "src/core/ext/transport/chttp2/transport/internal.h"
51
52
  #include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
53
+ #include "src/core/ext/transport/chttp2/transport/max_concurrent_streams_policy.h"
54
+ #include "src/core/ext/transport/chttp2/transport/ping_callbacks.h"
52
55
  #include "src/core/ext/transport/chttp2/transport/ping_rate_policy.h"
56
+ #include "src/core/ext/transport/chttp2/transport/write_size_policy.h"
53
57
  #include "src/core/lib/channel/channelz.h"
54
58
  #include "src/core/lib/debug/stats.h"
55
59
  #include "src/core/lib/debug/stats_data.h"
56
60
  #include "src/core/lib/debug/trace.h"
57
- #include "src/core/lib/gprpp/debug_location.h"
61
+ #include "src/core/lib/experiments/experiments.h"
62
+ #include "src/core/lib/gpr/useful.h"
58
63
  #include "src/core/lib/gprpp/match.h"
59
64
  #include "src/core/lib/gprpp/ref_counted.h"
60
65
  #include "src/core/lib/gprpp/ref_counted_ptr.h"
61
66
  #include "src/core/lib/gprpp/time.h"
62
- #include "src/core/lib/iomgr/closure.h"
63
67
  #include "src/core/lib/iomgr/endpoint.h"
64
68
  #include "src/core/lib/iomgr/error.h"
65
69
  #include "src/core/lib/iomgr/exec_ctx.h"
66
70
  #include "src/core/lib/slice/slice.h"
71
+ #include "src/core/lib/slice/slice_buffer.h"
67
72
  #include "src/core/lib/transport/bdp_estimator.h"
68
73
  #include "src/core/lib/transport/http2_errors.h"
69
74
  #include "src/core/lib/transport/metadata_batch.h"
@@ -97,6 +102,9 @@ static grpc_core::Duration NextAllowedPingInterval(grpc_chttp2_transport* t) {
97
102
  // The gRPC keepalive spec doesn't call for any throttling on the server
98
103
  // side, but we are adding some throttling for protection anyway, unless
99
104
  // we are doing a graceful GOAWAY in which case we don't want to wait.
105
+ if (grpc_core::IsMultipingEnabled()) {
106
+ return grpc_core::Duration::Seconds(1);
107
+ }
100
108
  return t->keepalive_time == grpc_core::Duration::Infinity()
101
109
  ? grpc_core::Duration::Seconds(20)
102
110
  : t->keepalive_time / 2;
@@ -105,43 +113,33 @@ static grpc_core::Duration NextAllowedPingInterval(grpc_chttp2_transport* t) {
105
113
  }
106
114
 
107
115
  static void maybe_initiate_ping(grpc_chttp2_transport* t) {
108
- grpc_chttp2_ping_queue* pq = &t->ping_queue;
109
- if (grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
116
+ if (!t->ping_callbacks.ping_requested()) {
110
117
  // no ping needed: wait
111
118
  return;
112
119
  }
113
- if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) {
114
- // ping already in-flight: wait
115
- if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
116
- GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
117
- GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
118
- gpr_log(GPR_INFO, "%s: Ping delayed [%s]: already pinging",
119
- t->is_client ? "CLIENT" : "SERVER",
120
- std::string(t->peer_string.as_string_view()).c_str());
121
- }
122
- return;
123
- }
124
120
  // InvalidateNow to avoid getting stuck re-initializing the ping timer
125
121
  // in a loop while draining the currently-held combiner. Also see
126
122
  // https://github.com/grpc/grpc/issues/26079.
127
123
  grpc_core::ExecCtx::Get()->InvalidateNow();
128
124
  Match(
129
- t->ping_rate_policy.RequestSendPing(NextAllowedPingInterval(t)),
130
- [pq, t](grpc_core::Chttp2PingRatePolicy::SendGranted) {
131
- pq->inflight_id = t->ping_ctr;
132
- t->ping_ctr++;
133
- grpc_core::ExecCtx::RunList(DEBUG_LOCATION,
134
- &pq->lists[GRPC_CHTTP2_PCL_INITIATE]);
135
- grpc_closure_list_move(&pq->lists[GRPC_CHTTP2_PCL_NEXT],
136
- &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
137
- grpc_slice_buffer_add(&t->outbuf,
138
- grpc_chttp2_ping_create(false, pq->inflight_id));
125
+ t->ping_rate_policy.RequestSendPing(NextAllowedPingInterval(t),
126
+ t->ping_callbacks.pings_inflight()),
127
+ [t](grpc_core::Chttp2PingRatePolicy::SendGranted) {
128
+ t->ping_rate_policy.SentPing();
129
+ const uint64_t id = t->ping_callbacks.StartPing(t->bitgen);
130
+ grpc_slice_buffer_add(t->outbuf.c_slice_buffer(),
131
+ grpc_chttp2_ping_create(false, id));
132
+ t->keepalive_incoming_data_wanted = true;
133
+ if (t->channelz_socket != nullptr) {
134
+ t->channelz_socket->RecordKeepaliveSent();
135
+ }
139
136
  grpc_core::global_stats().IncrementHttp2PingsSent();
140
137
  if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
141
138
  GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
142
- GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
143
- gpr_log(GPR_INFO, "%s: Ping sent [%s]: %s",
144
- t->is_client ? "CLIENT" : "SERVER",
139
+ GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace) ||
140
+ GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace)) {
141
+ gpr_log(GPR_INFO, "%s[%p]: Ping %" PRIx64 " sent [%s]: %s",
142
+ t->is_client ? "CLIENT" : "SERVER", t, id,
145
143
  std::string(t->peer_string.as_string_view()).c_str(),
146
144
  t->ping_rate_policy.GetDebugString().c_str());
147
145
  }
@@ -150,9 +148,11 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
150
148
  // need to receive something of substance before sending a ping again
151
149
  if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
152
150
  GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
153
- GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
151
+ GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace) ||
152
+ GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace)) {
154
153
  gpr_log(GPR_INFO,
155
- "CLIENT: Ping delayed [%s]: too many recent pings: %s",
154
+ "%s[%p]: Ping delayed [%s]: too many recent pings: %s",
155
+ t->is_client ? "CLIENT" : "SERVER", t,
156
156
  std::string(t->peer_string.as_string_view()).c_str(),
157
157
  t->ping_rate_policy.GetDebugString().c_str());
158
158
  }
@@ -161,16 +161,17 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
161
161
  // not enough elapsed time between successive pings
162
162
  if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
163
163
  GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
164
- GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
165
- gpr_log(GPR_INFO,
166
- "%s: Ping delayed [%s]: not enough time elapsed since last "
167
- "ping. "
168
- " Last ping:%s, minimum wait:%s need to wait:%s",
169
- t->is_client ? "CLIENT" : "SERVER",
170
- std::string(t->peer_string.as_string_view()).c_str(),
171
- too_soon.last_ping.ToString().c_str(),
172
- too_soon.next_allowed_ping_interval.ToString().c_str(),
173
- too_soon.wait.ToString().c_str());
164
+ GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace) ||
165
+ GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace)) {
166
+ gpr_log(
167
+ GPR_INFO,
168
+ "%s[%p]: Ping delayed [%s]: not enough time elapsed since last "
169
+ "ping. Last ping:%s, minimum wait:%s need to wait:%s",
170
+ t->is_client ? "CLIENT" : "SERVER", t,
171
+ std::string(t->peer_string.as_string_view()).c_str(),
172
+ too_soon.last_ping.ToString().c_str(),
173
+ too_soon.next_allowed_ping_interval.ToString().c_str(),
174
+ too_soon.wait.ToString().c_str());
174
175
  }
175
176
  if (!t->delayed_ping_timer_handle.has_value()) {
176
177
  t->delayed_ping_timer_handle = t->event_engine->RunAfter(
@@ -228,11 +229,6 @@ static void report_stall(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
228
229
  }
229
230
  }
230
231
 
231
- // How many bytes would we like to put on the wire during a single syscall
232
- static uint32_t target_write_size(grpc_chttp2_transport* /*t*/) {
233
- return 1024 * 1024;
234
- }
235
-
236
232
  namespace {
237
233
 
238
234
  class CountDefaultMetadataEncoder {
@@ -268,42 +264,66 @@ class WriteContext {
268
264
  }
269
265
 
270
266
  void FlushSettings() {
271
- if (t_->dirtied_local_settings && !t_->sent_local_settings) {
267
+ const bool dirty =
268
+ t_->dirtied_local_settings ||
269
+ t_->settings[GRPC_SENT_SETTINGS]
270
+ [GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS] !=
271
+ t_->max_concurrent_streams_policy.AdvertiseValue();
272
+ if (dirty && !t_->sent_local_settings) {
273
+ t_->settings[GRPC_LOCAL_SETTINGS]
274
+ [GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS] =
275
+ t_->max_concurrent_streams_policy.AdvertiseValue();
272
276
  grpc_slice_buffer_add(
273
- &t_->outbuf, grpc_chttp2_settings_create(
274
- t_->settings[GRPC_SENT_SETTINGS],
275
- t_->settings[GRPC_LOCAL_SETTINGS],
276
- t_->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
277
+ t_->outbuf.c_slice_buffer(),
278
+ grpc_chttp2_settings_create(t_->settings[GRPC_SENT_SETTINGS],
279
+ t_->settings[GRPC_LOCAL_SETTINGS],
280
+ t_->force_send_settings,
281
+ GRPC_CHTTP2_NUM_SETTINGS));
282
+ if (grpc_core::IsSettingsTimeoutEnabled() &&
283
+ t_->keepalive_timeout != grpc_core::Duration::Infinity()) {
284
+ GPR_ASSERT(
285
+ t_->settings_ack_watchdog ==
286
+ grpc_event_engine::experimental::EventEngine::TaskHandle::kInvalid);
287
+ // We base settings timeout on keepalive timeout, but double it to allow
288
+ // for implementations taking some more time about acking a setting.
289
+ t_->settings_ack_watchdog = t_->event_engine->RunAfter(
290
+ t_->settings_timeout, [t = t_->Ref()]() mutable {
291
+ grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
292
+ grpc_core::ExecCtx exec_ctx;
293
+ grpc_chttp2_settings_timeout(std::move(t));
294
+ });
295
+ }
277
296
  t_->force_send_settings = false;
278
297
  t_->dirtied_local_settings = false;
279
298
  t_->sent_local_settings = true;
280
299
  t_->flow_control.FlushedSettings();
300
+ t_->max_concurrent_streams_policy.FlushedSettings();
281
301
  grpc_core::global_stats().IncrementHttp2SettingsWrites();
282
302
  }
283
303
  }
284
304
 
285
305
  void FlushQueuedBuffers() {
286
306
  // simple writes are queued to qbuf, and flushed here
287
- grpc_slice_buffer_move_into(&t_->qbuf, &t_->outbuf);
307
+ grpc_slice_buffer_move_into(&t_->qbuf, t_->outbuf.c_slice_buffer());
288
308
  t_->num_pending_induced_frames = 0;
289
309
  GPR_ASSERT(t_->qbuf.count == 0);
290
310
  }
291
311
 
292
312
  void FlushWindowUpdates() {
293
- uint32_t transport_announce =
294
- t_->flow_control.MaybeSendUpdate(t_->outbuf.count > 0);
313
+ uint32_t transport_announce = t_->flow_control.MaybeSendUpdate(
314
+ t_->outbuf.c_slice_buffer()->count > 0);
295
315
  if (transport_announce) {
296
316
  grpc_transport_one_way_stats throwaway_stats;
297
- grpc_slice_buffer_add(
298
- &t_->outbuf, grpc_chttp2_window_update_create(0, transport_announce,
299
- &throwaway_stats));
317
+ grpc_slice_buffer_add(t_->outbuf.c_slice_buffer(),
318
+ grpc_chttp2_window_update_create(
319
+ 0, transport_announce, &throwaway_stats));
300
320
  grpc_chttp2_reset_ping_clock(t_);
301
321
  }
302
322
  }
303
323
 
304
324
  void FlushPingAcks() {
305
325
  for (size_t i = 0; i < t_->ping_ack_count; i++) {
306
- grpc_slice_buffer_add(&t_->outbuf,
326
+ grpc_slice_buffer_add(t_->outbuf.c_slice_buffer(),
307
327
  grpc_chttp2_ping_create(true, t_->ping_acks[i]));
308
328
  }
309
329
  t_->ping_ack_count = 0;
@@ -328,7 +348,7 @@ class WriteContext {
328
348
  }
329
349
 
330
350
  grpc_chttp2_stream* NextStream() {
331
- if (t_->outbuf.length > target_write_size(t_)) {
351
+ if (t_->outbuf.c_slice_buffer()->length > target_write_size()) {
332
352
  result_.partial = true;
333
353
  return nullptr;
334
354
  }
@@ -351,12 +371,17 @@ class WriteContext {
351
371
  grpc_chttp2_transport* transport() const { return t_; }
352
372
 
353
373
  grpc_chttp2_begin_write_result Result() {
354
- result_.writing = t_->outbuf.count > 0;
374
+ result_.writing = t_->outbuf.c_slice_buffer()->count > 0;
355
375
  return result_;
356
376
  }
357
377
 
378
+ size_t target_write_size() const { return target_write_size_; }
379
+
358
380
  private:
359
381
  grpc_chttp2_transport* const t_;
382
+ size_t target_write_size_ = grpc_core::IsWriteSizePolicyEnabled()
383
+ ? t_->write_size_policy.WriteTargetSize()
384
+ : 1024 * 1024;
360
385
 
361
386
  // stats histogram counters: we increment these throughout this function,
362
387
  // and at the end publish to the central stats histograms
@@ -386,11 +411,15 @@ class DataSendContext {
386
411
  }
387
412
 
388
413
  uint32_t max_outgoing() const {
389
- return static_cast<uint32_t>(std::min(
390
- t_->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
391
- static_cast<uint32_t>(
392
- std::min(static_cast<int64_t>(stream_remote_window()),
393
- t_->flow_control.remote_window()))));
414
+ return grpc_core::Clamp<uint32_t>(
415
+ std::min<int64_t>(
416
+ {t_->settings[GRPC_PEER_SETTINGS]
417
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
418
+ stream_remote_window(), t_->flow_control.remote_window(),
419
+ grpc_core::IsWriteSizeCapEnabled()
420
+ ? static_cast<int64_t>(write_context_->target_write_size())
421
+ : std::numeric_limits<uint32_t>::max()}),
422
+ 0, std::numeric_limits<uint32_t>::max());
394
423
  }
395
424
 
396
425
  bool AnyOutgoing() const { return max_outgoing() > 0; }
@@ -403,7 +432,8 @@ class DataSendContext {
403
432
  s_->send_trailing_metadata != nullptr &&
404
433
  s_->send_trailing_metadata->empty();
405
434
  grpc_chttp2_encode_data(s_->id, &s_->flow_controlled_buffer, send_bytes,
406
- is_last_frame_, &s_->stats.outgoing, &t_->outbuf);
435
+ is_last_frame_, &s_->stats.outgoing,
436
+ t_->outbuf.c_slice_buffer());
407
437
  sfc_upd_.SentData(send_bytes);
408
438
  s_->sending_bytes += send_bytes;
409
439
  }
@@ -468,7 +498,7 @@ class StreamWriteContext {
468
498
  [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], // max_frame_size
469
499
  &s_->stats.outgoing // stats
470
500
  },
471
- *s_->send_initial_metadata, &t_->outbuf);
501
+ *s_->send_initial_metadata, t_->outbuf.c_slice_buffer());
472
502
  grpc_chttp2_reset_ping_clock(t_);
473
503
  write_context_->IncInitialMetadataWrites();
474
504
  }
@@ -488,9 +518,9 @@ class StreamWriteContext {
488
518
  const uint32_t stream_announce = s_->flow_control.MaybeSendUpdate();
489
519
  if (stream_announce == 0) return;
490
520
 
491
- grpc_slice_buffer_add(
492
- &t_->outbuf, grpc_chttp2_window_update_create(s_->id, stream_announce,
493
- &s_->stats.outgoing));
521
+ grpc_slice_buffer_add(t_->outbuf.c_slice_buffer(),
522
+ grpc_chttp2_window_update_create(
523
+ s_->id, stream_announce, &s_->stats.outgoing));
494
524
  grpc_chttp2_reset_ping_clock(t_);
495
525
  write_context_->IncWindowUpdateWrites();
496
526
  }
@@ -543,7 +573,7 @@ class StreamWriteContext {
543
573
  GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata"));
544
574
  if (s_->send_trailing_metadata->empty()) {
545
575
  grpc_chttp2_encode_data(s_->id, &s_->flow_controlled_buffer, 0, true,
546
- &s_->stats.outgoing, &t_->outbuf);
576
+ &s_->stats.outgoing, t_->outbuf.c_slice_buffer());
547
577
  } else {
548
578
  if (send_status_.has_value()) {
549
579
  s_->send_trailing_metadata->Set(grpc_core::HttpStatusMetadata(),
@@ -563,7 +593,7 @@ class StreamWriteContext {
563
593
  t_->settings[GRPC_PEER_SETTINGS]
564
594
  [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
565
595
  &s_->stats.outgoing},
566
- *s_->send_trailing_metadata, &t_->outbuf);
596
+ *s_->send_trailing_metadata, t_->outbuf.c_slice_buffer());
567
597
  }
568
598
  write_context_->IncTrailingMetadataWrites();
569
599
  grpc_chttp2_reset_ping_clock(t_);
@@ -600,8 +630,9 @@ class StreamWriteContext {
600
630
 
601
631
  if (!t_->is_client && !s_->read_closed) {
602
632
  grpc_slice_buffer_add(
603
- &t_->outbuf, grpc_chttp2_rst_stream_create(
604
- s_->id, GRPC_HTTP2_NO_ERROR, &s_->stats.outgoing));
633
+ t_->outbuf.c_slice_buffer(),
634
+ grpc_chttp2_rst_stream_create(s_->id, GRPC_HTTP2_NO_ERROR,
635
+ &s_->stats.outgoing));
605
636
  }
606
637
  grpc_chttp2_mark_stream_closed(t_, s_, !t_->is_client, true,
607
638
  absl::OkStatus());
@@ -634,15 +665,15 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
634
665
  // (according to available window sizes) and add to the output buffer
635
666
  while (grpc_chttp2_stream* s = ctx.NextStream()) {
636
667
  StreamWriteContext stream_ctx(&ctx, s);
637
- size_t orig_len = t->outbuf.length;
668
+ size_t orig_len = t->outbuf.c_slice_buffer()->length;
638
669
  int64_t num_stream_bytes = 0;
639
670
  stream_ctx.FlushInitialMetadata();
640
671
  stream_ctx.FlushWindowUpdates();
641
672
  stream_ctx.FlushData();
642
673
  stream_ctx.FlushTrailingMetadata();
643
- if (t->outbuf.length > orig_len) {
674
+ if (t->outbuf.c_slice_buffer()->length > orig_len) {
644
675
  // Add this stream to the list of the contexts to be traced at TCP
645
- num_stream_bytes = t->outbuf.length - orig_len;
676
+ num_stream_bytes = t->outbuf.c_slice_buffer()->length - orig_len;
646
677
  s->byte_counter += static_cast<size_t>(num_stream_bytes);
647
678
  if (s->traced && grpc_endpoint_can_track_err(t->ep)) {
648
679
  grpc_core::CopyContextFn copy_context_fn =
@@ -683,6 +714,47 @@ void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error_handle error) {
683
714
  }
684
715
  t->num_messages_in_next_write = 0;
685
716
 
717
+ if (t->ping_callbacks.started_new_ping_without_setting_timeout() &&
718
+ t->keepalive_timeout != grpc_core::Duration::Infinity()) {
719
+ // Set ping timeout after finishing write so we don't measure our own send
720
+ // time.
721
+ const auto timeout = grpc_core::IsSeparatePingFromKeepaliveEnabled()
722
+ ? t->ping_timeout
723
+ : t->keepalive_timeout;
724
+ auto id = t->ping_callbacks.OnPingTimeout(
725
+ timeout, t->event_engine.get(), [t = t->Ref()] {
726
+ grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
727
+ grpc_core::ExecCtx exec_ctx;
728
+ grpc_chttp2_ping_timeout(t);
729
+ });
730
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace) && id.has_value()) {
731
+ gpr_log(GPR_INFO,
732
+ "%s[%p]: Set ping timeout timer of %s for ping id %" PRIx64,
733
+ t->is_client ? "CLIENT" : "SERVER", t, timeout.ToString().c_str(),
734
+ id.value());
735
+ }
736
+
737
+ if (grpc_core::IsSeparatePingFromKeepaliveEnabled() &&
738
+ t->keepalive_incoming_data_wanted &&
739
+ t->keepalive_timeout < t->ping_timeout &&
740
+ t->keepalive_ping_timeout_handle !=
741
+ grpc_event_engine::experimental::EventEngine::TaskHandle::
742
+ kInvalid) {
743
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace) ||
744
+ GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
745
+ gpr_log(GPR_INFO, "%s[%p]: Set keepalive ping timeout timer of %s",
746
+ t->is_client ? "CLIENT" : "SERVER", t,
747
+ t->keepalive_timeout.ToString().c_str());
748
+ }
749
+ t->keepalive_ping_timeout_handle =
750
+ t->event_engine->RunAfter(t->keepalive_timeout, [t = t->Ref()] {
751
+ grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
752
+ grpc_core::ExecCtx exec_ctx;
753
+ grpc_chttp2_keepalive_timeout(t);
754
+ });
755
+ }
756
+ }
757
+
686
758
  while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
687
759
  if (s->sending_bytes != 0) {
688
760
  update_list(t, s, static_cast<int64_t>(s->sending_bytes),
@@ -692,5 +764,5 @@ void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error_handle error) {
692
764
  }
693
765
  GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:end");
694
766
  }
695
- grpc_slice_buffer_reset_and_unref(&t->outbuf);
767
+ grpc_slice_buffer_reset_and_unref(t->outbuf.c_slice_buffer());
696
768
  }
@@ -2047,7 +2047,8 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
2047
2047
  !batch->recv_initial_metadata && !batch->recv_message &&
2048
2048
  !batch->recv_trailing_metadata);
2049
2049
  PollContext poll_ctx(this, &flusher);
2050
- Completed(batch->payload->cancel_stream.cancel_error, &flusher);
2050
+ Completed(batch->payload->cancel_stream.cancel_error,
2051
+ batch->payload->cancel_stream.tarpit, &flusher);
2051
2052
  if (is_last()) {
2052
2053
  batch.CompleteWith(&flusher);
2053
2054
  } else {
@@ -2166,7 +2167,8 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
2166
2167
  }
2167
2168
 
2168
2169
  // Handle cancellation.
2169
- void ServerCallData::Completed(grpc_error_handle error, Flusher* flusher) {
2170
+ void ServerCallData::Completed(grpc_error_handle error,
2171
+ bool tarpit_cancellation, Flusher* flusher) {
2170
2172
  if (grpc_trace_channel.enabled()) {
2171
2173
  gpr_log(
2172
2174
  GPR_DEBUG,
@@ -2196,6 +2198,7 @@ void ServerCallData::Completed(grpc_error_handle error, Flusher* flusher) {
2196
2198
  }));
2197
2199
  batch->cancel_stream = true;
2198
2200
  batch->payload->cancel_stream.cancel_error = error;
2201
+ batch->payload->cancel_stream.tarpit = tarpit_cancellation;
2199
2202
  flusher->Resume(batch);
2200
2203
  }
2201
2204
  break;
@@ -2331,7 +2334,8 @@ void ServerCallData::RecvTrailingMetadataReady(grpc_error_handle error) {
2331
2334
  }
2332
2335
  Flusher flusher(this);
2333
2336
  PollContext poll_ctx(this, &flusher);
2334
- Completed(error, &flusher);
2337
+ Completed(error, recv_trailing_metadata_->get(GrpcTarPit()).has_value(),
2338
+ &flusher);
2335
2339
  flusher.AddClosure(original_recv_trailing_metadata_ready_, std::move(error),
2336
2340
  "continue recv trailing");
2337
2341
  }
@@ -2551,7 +2555,8 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
2551
2555
  break;
2552
2556
  case SendTrailingState::kInitial: {
2553
2557
  GPR_ASSERT(*md->get_pointer(GrpcStatusMetadata()) != GRPC_STATUS_OK);
2554
- Completed(StatusFromMetadata(*md), flusher);
2558
+ Completed(StatusFromMetadata(*md), md->get(GrpcTarPit()).has_value(),
2559
+ flusher);
2555
2560
  } break;
2556
2561
  case SendTrailingState::kCancelled:
2557
2562
  // Nothing to do.
@@ -739,7 +739,8 @@ class ServerCallData : public BaseCallData {
739
739
  struct SendInitialMetadata;
740
740
 
741
741
  // Shut things down when the call completes.
742
- void Completed(grpc_error_handle error, Flusher* flusher);
742
+ void Completed(grpc_error_handle error, bool tarpit_cancellation,
743
+ Flusher* flusher);
743
744
  // Construct a promise that will "call" the next filter.
744
745
  // Effectively:
745
746
  // - put the modified initial metadata into the batch being sent up.