grpc 1.39.0.pre1 → 1.40.0.pre1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +34 -18
- data/include/grpc/event_engine/event_engine.h +10 -14
- data/include/grpc/event_engine/slice_allocator.h +8 -33
- data/include/grpc/impl/codegen/grpc_types.h +18 -8
- data/include/grpc/impl/codegen/port_platform.h +24 -0
- data/src/core/ext/filters/client_channel/client_channel.cc +413 -247
- data/src/core/ext/filters/client_channel/client_channel.h +42 -18
- data/src/core/ext/filters/client_channel/config_selector.h +19 -6
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +7 -8
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +12 -21
- data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +3 -5
- data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +17 -38
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +8 -15
- data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +3 -6
- data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +8 -12
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +14 -22
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +2 -9
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +5 -8
- data/src/core/ext/filters/client_channel/lb_policy.cc +1 -15
- data/src/core/ext/filters/client_channel/lb_policy.h +70 -46
- data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +101 -73
- data/src/core/ext/filters/client_channel/retry_filter.cc +392 -243
- data/src/core/ext/filters/client_channel/retry_service_config.cc +36 -26
- data/src/core/ext/filters/client_channel/retry_service_config.h +1 -1
- data/src/core/ext/filters/client_channel/service_config_call_data.h +45 -5
- data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +0 -6
- data/src/core/ext/filters/http/client/http_client_filter.cc +5 -2
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +5 -1
- data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +1 -1
- data/src/core/{lib/event_engine/slice_allocator.cc → ext/transport/chttp2/transport/chttp2_slice_allocator.cc} +15 -38
- data/src/core/ext/transport/chttp2/transport/chttp2_slice_allocator.h +74 -0
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +2 -6
- data/src/core/ext/transport/chttp2/transport/flow_control.h +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_data.cc +4 -4
- data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +8 -8
- data/src/core/ext/transport/chttp2/transport/frame_settings.cc +5 -5
- data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +639 -752
- data/src/core/ext/transport/chttp2/transport/hpack_parser.h +190 -69
- data/src/core/ext/transport/chttp2/transport/internal.h +1 -1
- data/src/core/ext/transport/chttp2/transport/parsing.cc +70 -54
- data/src/core/ext/transport/chttp2/transport/varint.cc +6 -4
- data/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c +56 -35
- data/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.h +180 -76
- data/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c +35 -27
- data/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.h +97 -48
- data/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c +45 -9
- data/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.h +67 -7
- data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c +66 -9
- data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.h +227 -0
- data/src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c +46 -0
- data/src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.h +121 -0
- data/src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c +1 -0
- data/src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c +35 -0
- data/src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.h +90 -0
- data/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c +32 -24
- data/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.h +120 -73
- data/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c +4 -2
- data/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.h +15 -0
- data/src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c +48 -0
- data/src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.h +171 -0
- data/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c +8 -6
- data/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.h +27 -19
- data/src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c +1 -0
- data/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c +24 -7
- data/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.h +57 -0
- data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c +29 -17
- data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.h +72 -0
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c +3 -2
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.h +4 -0
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c +6 -5
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.h +15 -11
- data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c +85 -43
- data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.h +274 -91
- data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c +11 -8
- data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.h +30 -13
- data/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c +33 -5
- data/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.h +115 -0
- data/src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c +60 -0
- data/src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.h +181 -0
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c +1 -0
- data/src/core/ext/upb-generated/validate/validate.upb.c +82 -66
- data/src/core/ext/upb-generated/validate/validate.upb.h +220 -124
- data/src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c +15 -7
- data/src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c +53 -52
- data/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c +318 -277
- data/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c +437 -410
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c +198 -170
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.h +10 -0
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c +9 -8
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c +219 -163
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.h +15 -0
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c +59 -0
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.h +40 -0
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c +29 -25
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c +52 -0
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.h +35 -0
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c +135 -125
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c +131 -123
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c +90 -0
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.h +35 -0
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c +32 -24
- data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c +69 -55
- data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c +684 -664
- data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c +13 -10
- data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c +13 -10
- data/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c +441 -375
- data/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.h +10 -0
- data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c +122 -114
- data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c +1 -1
- data/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c +112 -79
- data/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c +64 -0
- data/src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.h +50 -0
- data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c +35 -32
- data/src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c +4 -4
- data/src/core/ext/upbdefs-generated/validate/validate.upbdefs.c +182 -160
- data/src/core/ext/xds/certificate_provider_store.h +1 -1
- data/src/core/ext/xds/xds_api.cc +320 -121
- data/src/core/ext/xds/xds_api.h +31 -2
- data/src/core/ext/xds/xds_bootstrap.cc +4 -1
- data/src/core/ext/xds/xds_client.cc +66 -43
- data/src/core/ext/xds/xds_client.h +0 -4
- data/src/core/ext/xds/xds_http_filters.cc +3 -2
- data/src/core/ext/xds/xds_http_filters.h +3 -0
- data/src/core/lib/channel/call_tracer.h +85 -0
- data/src/core/lib/channel/channel_stack.h +1 -1
- data/src/core/lib/channel/context.h +3 -0
- data/src/core/lib/channel/status_util.h +4 -0
- data/src/core/lib/compression/stream_compression.h +1 -1
- data/src/core/lib/compression/stream_compression_gzip.h +1 -1
- data/src/core/lib/compression/stream_compression_identity.h +1 -1
- data/src/core/lib/debug/stats.h +1 -1
- data/src/core/lib/gpr/murmur_hash.cc +4 -2
- data/src/core/lib/gprpp/manual_constructor.h +1 -1
- data/src/core/lib/gprpp/orphanable.h +3 -3
- data/src/core/lib/gprpp/sync.h +2 -30
- data/src/core/lib/iomgr/buffer_list.cc +1 -1
- data/src/core/lib/iomgr/ev_apple.h +1 -1
- data/src/core/lib/iomgr/event_engine/endpoint.cc +6 -8
- data/src/core/lib/iomgr/event_engine/tcp.cc +30 -10
- data/src/core/lib/iomgr/python_util.h +1 -1
- data/src/core/lib/iomgr/resource_quota.cc +2 -0
- data/src/core/lib/iomgr/tcp_client_windows.cc +2 -0
- data/src/core/lib/iomgr/tcp_server_posix.cc +1 -0
- data/src/core/lib/iomgr/timer_manager.cc +1 -1
- data/src/core/lib/json/json_reader.cc +1 -2
- data/src/core/lib/matchers/matchers.cc +8 -20
- data/src/core/lib/matchers/matchers.h +2 -1
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +49 -0
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.h +7 -0
- data/src/core/lib/security/security_connector/tls/tls_security_connector.cc +6 -18
- data/src/core/lib/security/transport/security_handshaker.cc +12 -4
- data/src/core/lib/security/transport/server_auth_filter.cc +0 -7
- data/src/core/lib/slice/slice_internal.h +1 -0
- data/src/core/lib/surface/call.cc +5 -6
- data/src/core/lib/surface/server.cc +3 -1
- data/src/core/lib/surface/server.h +3 -3
- data/src/core/lib/surface/version.cc +2 -4
- data/src/ruby/ext/grpc/extconf.rb +1 -1
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/third_party/xxhash/xxhash.h +77 -195
- metadata +57 -40
- data/src/core/lib/gpr/arena.h +0 -47
@@ -216,6 +216,8 @@ class RetryFilter::CallData {
|
|
216
216
|
explicit CallAttempt(CallData* calld);
|
217
217
|
~CallAttempt() override;
|
218
218
|
|
219
|
+
bool lb_call_committed() const { return lb_call_committed_; }
|
220
|
+
|
219
221
|
// Constructs and starts whatever batches are needed on this call
|
220
222
|
// attempt.
|
221
223
|
void StartRetriableBatches();
|
@@ -255,7 +257,7 @@ class RetryFilter::CallData {
|
|
255
257
|
// Adds retriable recv_trailing_metadata op.
|
256
258
|
void AddRetriableRecvTrailingMetadataOp();
|
257
259
|
// Adds cancel_stream op.
|
258
|
-
void AddCancelStreamOp();
|
260
|
+
void AddCancelStreamOp(grpc_error_handle error);
|
259
261
|
|
260
262
|
private:
|
261
263
|
// Frees cached send ops that were completed by the completed batch in
|
@@ -263,21 +265,25 @@ class RetryFilter::CallData {
|
|
263
265
|
// committed.
|
264
266
|
void FreeCachedSendOpDataForCompletedBatch();
|
265
267
|
|
266
|
-
//
|
267
|
-
|
268
|
-
|
268
|
+
// If there is a pending recv_initial_metadata op, adds a closure
|
269
|
+
// to closures for recv_initial_metadata_ready.
|
270
|
+
void MaybeAddClosureForRecvInitialMetadataCallback(
|
271
|
+
grpc_error_handle error, CallCombinerClosureList* closures);
|
269
272
|
// Intercepts recv_initial_metadata_ready callback for retries.
|
270
273
|
// Commits the call and returns the initial metadata up the stack.
|
271
274
|
static void RecvInitialMetadataReady(void* arg, grpc_error_handle error);
|
272
275
|
|
273
|
-
//
|
274
|
-
|
276
|
+
// If there is a pending recv_message op, adds a closure to closures
|
277
|
+
// for recv_message_ready.
|
278
|
+
void MaybeAddClosureForRecvMessageCallback(
|
279
|
+
grpc_error_handle error, CallCombinerClosureList* closures);
|
275
280
|
// Intercepts recv_message_ready callback for retries.
|
276
281
|
// Commits the call and returns the message up the stack.
|
277
282
|
static void RecvMessageReady(void* arg, grpc_error_handle error);
|
278
283
|
|
279
|
-
//
|
280
|
-
|
284
|
+
// If there is a pending recv_trailing_metadata op, adds a closure to
|
285
|
+
// closures for recv_trailing_metadata_ready.
|
286
|
+
void MaybeAddClosureForRecvTrailingMetadataReady(
|
281
287
|
grpc_error_handle error, CallCombinerClosureList* closures);
|
282
288
|
// Adds any necessary closures for deferred batch completion
|
283
289
|
// callbacks to closures.
|
@@ -306,6 +312,10 @@ class RetryFilter::CallData {
|
|
306
312
|
// Callback used to intercept on_complete from LB calls.
|
307
313
|
static void OnComplete(void* arg, grpc_error_handle error);
|
308
314
|
|
315
|
+
// Callback used to handle on_complete for internally generated
|
316
|
+
// cancel_stream op.
|
317
|
+
static void OnCompleteForCancelOp(void* arg, grpc_error_handle error);
|
318
|
+
|
309
319
|
RefCountedPtr<CallAttempt> call_attempt_;
|
310
320
|
// The batch to use in the LB call.
|
311
321
|
// Its payload field points to CallAttempt::batch_payload_.
|
@@ -314,6 +324,30 @@ class RetryFilter::CallData {
|
|
314
324
|
grpc_closure on_complete_;
|
315
325
|
};
|
316
326
|
|
327
|
+
class AttemptDispatchController
|
328
|
+
: public ConfigSelector::CallDispatchController {
|
329
|
+
public:
|
330
|
+
explicit AttemptDispatchController(CallAttempt* call_attempt)
|
331
|
+
: call_attempt_(call_attempt) {}
|
332
|
+
|
333
|
+
// Will never be called.
|
334
|
+
bool ShouldRetry() override { return false; }
|
335
|
+
|
336
|
+
void Commit() override {
|
337
|
+
call_attempt_->lb_call_committed_ = true;
|
338
|
+
auto* calld = call_attempt_->calld_;
|
339
|
+
if (calld->retry_committed_) {
|
340
|
+
auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
|
341
|
+
calld->call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA]
|
342
|
+
.value);
|
343
|
+
service_config_call_data->call_dispatch_controller()->Commit();
|
344
|
+
}
|
345
|
+
}
|
346
|
+
|
347
|
+
private:
|
348
|
+
CallAttempt* call_attempt_;
|
349
|
+
};
|
350
|
+
|
317
351
|
// Creates a BatchData object on the call's arena with the
|
318
352
|
// specified refcount. If set_on_complete is true, the batch's
|
319
353
|
// on_complete callback will be set to point to on_complete();
|
@@ -333,14 +367,26 @@ class RetryFilter::CallData {
|
|
333
367
|
const char* reason,
|
334
368
|
CallCombinerClosureList* closures);
|
335
369
|
|
370
|
+
// Helper function used to start a recv_trailing_metadata batch. This
|
371
|
+
// is used in the case where a recv_initial_metadata or recv_message
|
372
|
+
// op fails in a way that we know the call is over but when the application
|
373
|
+
// has not yet started its own recv_trailing_metadata op.
|
374
|
+
void AddBatchForInternalRecvTrailingMetadata(
|
375
|
+
CallCombinerClosureList* closures);
|
376
|
+
|
377
|
+
// Adds a batch to closures to cancel this call attempt.
|
378
|
+
void AddBatchForCancelOp(grpc_error_handle error,
|
379
|
+
CallCombinerClosureList* closures);
|
380
|
+
|
336
381
|
// Adds batches for pending batches to closures.
|
337
382
|
void AddBatchesForPendingBatches(CallCombinerClosureList* closures);
|
338
383
|
|
339
384
|
// Adds whatever batches are needed on this attempt to closures.
|
340
385
|
void AddRetriableBatches(CallCombinerClosureList* closures);
|
341
386
|
|
342
|
-
// Returns true if any op in the batch was not yet started on this
|
343
|
-
|
387
|
+
// Returns true if any send op in the batch was not yet started on this
|
388
|
+
// attempt.
|
389
|
+
bool PendingBatchContainsUnstartedSendOps(PendingBatch* pending);
|
344
390
|
|
345
391
|
// Returns true if there are cached send ops to replay.
|
346
392
|
bool HaveSendOpsToReplay();
|
@@ -350,28 +396,23 @@ class RetryFilter::CallData {
|
|
350
396
|
// its ref to us.
|
351
397
|
void MaybeSwitchToFastPath();
|
352
398
|
|
353
|
-
// Helper function used to start a recv_trailing_metadata batch. This
|
354
|
-
// is used in the case where a recv_initial_metadata or recv_message
|
355
|
-
// op fails in a way that we know the call is over but when the application
|
356
|
-
// has not yet started its own recv_trailing_metadata op.
|
357
|
-
void StartInternalRecvTrailingMetadata();
|
358
|
-
|
359
399
|
// Returns true if the call should be retried.
|
360
400
|
// If server_pushback_md is non-null, sets *server_pushback_ms.
|
361
401
|
bool ShouldRetry(absl::optional<grpc_status_code> status, bool is_lb_drop,
|
362
402
|
grpc_mdelem* server_pushback_md,
|
363
403
|
grpc_millis* server_pushback_ms);
|
364
404
|
|
365
|
-
//
|
366
|
-
|
367
|
-
void Cancel(CallCombinerClosureList* closures);
|
405
|
+
// Abandons the call attempt. Unrefs any deferred batches.
|
406
|
+
void Abandon();
|
368
407
|
|
369
408
|
static void OnPerAttemptRecvTimer(void* arg, grpc_error_handle error);
|
370
409
|
static void OnPerAttemptRecvTimerLocked(void* arg, grpc_error_handle error);
|
371
410
|
void MaybeCancelPerAttemptRecvTimer();
|
372
411
|
|
373
412
|
CallData* calld_;
|
374
|
-
|
413
|
+
AttemptDispatchController attempt_dispatch_controller_;
|
414
|
+
OrphanablePtr<ClientChannel::LoadBalancedCall> lb_call_;
|
415
|
+
bool lb_call_committed_ = false;
|
375
416
|
|
376
417
|
grpc_timer per_attempt_recv_timer_;
|
377
418
|
grpc_closure on_per_attempt_recv_timer_;
|
@@ -422,8 +463,16 @@ class RetryFilter::CallData {
|
|
422
463
|
grpc_error_handle recv_initial_metadata_error_ = GRPC_ERROR_NONE;
|
423
464
|
RefCountedPtr<BatchData> recv_message_ready_deferred_batch_;
|
424
465
|
grpc_error_handle recv_message_error_ = GRPC_ERROR_NONE;
|
425
|
-
|
426
|
-
|
466
|
+
struct OnCompleteDeferredBatch {
|
467
|
+
OnCompleteDeferredBatch(RefCountedPtr<BatchData> batch,
|
468
|
+
grpc_error_handle error)
|
469
|
+
: batch(std::move(batch)), error(error) {}
|
470
|
+
RefCountedPtr<BatchData> batch;
|
471
|
+
grpc_error_handle error;
|
472
|
+
};
|
473
|
+
// There cannot be more than 3 pending send op batches at a time.
|
474
|
+
absl::InlinedVector<OnCompleteDeferredBatch, 3>
|
475
|
+
on_complete_deferred_batches_;
|
427
476
|
RefCountedPtr<BatchData> recv_trailing_metadata_internal_batch_;
|
428
477
|
grpc_error_handle recv_trailing_metadata_error_ = GRPC_ERROR_NONE;
|
429
478
|
bool seen_recv_trailing_metadata_from_surface_ : 1;
|
@@ -431,7 +480,7 @@ class RetryFilter::CallData {
|
|
431
480
|
// save space but will also result in a data race because compiler
|
432
481
|
// will generate a 2 byte store which overwrites the meta-data
|
433
482
|
// fields upon setting this field.
|
434
|
-
bool
|
483
|
+
bool abandoned_ : 1;
|
435
484
|
};
|
436
485
|
|
437
486
|
CallData(RetryFilter* chand, const grpc_call_element_args& args);
|
@@ -472,7 +521,8 @@ class RetryFilter::CallData {
|
|
472
521
|
static void OnRetryTimer(void* arg, grpc_error_handle error);
|
473
522
|
static void OnRetryTimerLocked(void* arg, grpc_error_handle error);
|
474
523
|
|
475
|
-
|
524
|
+
OrphanablePtr<ClientChannel::LoadBalancedCall> CreateLoadBalancedCall(
|
525
|
+
ConfigSelector::CallDispatchController* call_dispatch_controller);
|
476
526
|
|
477
527
|
void CreateCallAttempt();
|
478
528
|
|
@@ -483,7 +533,6 @@ class RetryFilter::CallData {
|
|
483
533
|
BackOff retry_backoff_;
|
484
534
|
|
485
535
|
grpc_slice path_; // Request path.
|
486
|
-
gpr_cycle_counter call_start_time_;
|
487
536
|
grpc_millis deadline_;
|
488
537
|
Arena* arena_;
|
489
538
|
grpc_call_stack* owning_call_;
|
@@ -500,7 +549,7 @@ class RetryFilter::CallData {
|
|
500
549
|
// LB call used when we've committed to a call attempt and the retry
|
501
550
|
// state for that attempt is no longer needed. This provides a fast
|
502
551
|
// path for long-running streaming calls that minimizes overhead.
|
503
|
-
|
552
|
+
OrphanablePtr<ClientChannel::LoadBalancedCall> committed_call_;
|
504
553
|
|
505
554
|
// When are are not yet fully committed to a particular call (i.e.,
|
506
555
|
// either we might still retry or we have committed to the call but
|
@@ -614,6 +663,7 @@ RetryFilter::CallData::CallAttempt::CallAttempt(CallData* calld)
|
|
614
663
|
: RefCounted(GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace) ? "CallAttempt"
|
615
664
|
: nullptr),
|
616
665
|
calld_(calld),
|
666
|
+
attempt_dispatch_controller_(this),
|
617
667
|
batch_payload_(calld->call_context_),
|
618
668
|
started_send_initial_metadata_(false),
|
619
669
|
completed_send_initial_metadata_(false),
|
@@ -624,8 +674,8 @@ RetryFilter::CallData::CallAttempt::CallAttempt(CallData* calld)
|
|
624
674
|
started_recv_trailing_metadata_(false),
|
625
675
|
completed_recv_trailing_metadata_(false),
|
626
676
|
seen_recv_trailing_metadata_from_surface_(false),
|
627
|
-
|
628
|
-
lb_call_ = calld->CreateLoadBalancedCall();
|
677
|
+
abandoned_(false) {
|
678
|
+
lb_call_ = calld->CreateLoadBalancedCall(&attempt_dispatch_controller_);
|
629
679
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
|
630
680
|
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: create lb_call=%p",
|
631
681
|
calld->chand_, calld, this, lb_call_.get());
|
@@ -677,13 +727,9 @@ void RetryFilter::CallData::CallAttempt::FreeCachedSendOpDataAfterCommit() {
|
|
677
727
|
}
|
678
728
|
}
|
679
729
|
|
680
|
-
bool RetryFilter::CallData::CallAttempt::
|
730
|
+
bool RetryFilter::CallData::CallAttempt::PendingBatchContainsUnstartedSendOps(
|
681
731
|
PendingBatch* pending) {
|
682
|
-
|
683
|
-
// only recv ops are always started immediately.
|
684
|
-
if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
|
685
|
-
return false;
|
686
|
-
}
|
732
|
+
if (pending->batch->on_complete == nullptr) return false;
|
687
733
|
if (pending->batch->send_initial_metadata &&
|
688
734
|
!started_send_initial_metadata_) {
|
689
735
|
return true;
|
@@ -712,8 +758,8 @@ void RetryFilter::CallData::CallAttempt::MaybeSwitchToFastPath() {
|
|
712
758
|
// If we're not yet committed, we can't switch yet.
|
713
759
|
// TODO(roth): As part of implementing hedging, this logic needs to
|
714
760
|
// check that *this* call attempt is the one that we've committed to.
|
715
|
-
// Might need to replace
|
716
|
-
// in flight,
|
761
|
+
// Might need to replace abandoned_ with an enum indicating whether we're
|
762
|
+
// in flight, abandoned, or the winning call attempt.
|
717
763
|
if (!calld_->retry_committed_) return;
|
718
764
|
// If we've already switched to fast path, there's nothing to do here.
|
719
765
|
if (calld_->committed_call_ != nullptr) return;
|
@@ -735,24 +781,6 @@ void RetryFilter::CallData::CallAttempt::MaybeSwitchToFastPath() {
|
|
735
781
|
calld_->call_attempt_.reset(DEBUG_LOCATION, "MaybeSwitchToFastPath");
|
736
782
|
}
|
737
783
|
|
738
|
-
void RetryFilter::CallData::CallAttempt::StartInternalRecvTrailingMetadata() {
|
739
|
-
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
|
740
|
-
gpr_log(GPR_INFO,
|
741
|
-
"chand=%p calld=%p attempt=%p: call failed but "
|
742
|
-
"recv_trailing_metadata not started; starting it internally",
|
743
|
-
calld_->chand_, calld_, this);
|
744
|
-
}
|
745
|
-
// Create batch_data with 2 refs, since this batch will be unreffed twice:
|
746
|
-
// once for the recv_trailing_metadata_ready callback when the batch
|
747
|
-
// completes, and again when we actually get a recv_trailing_metadata
|
748
|
-
// op from the surface.
|
749
|
-
BatchData* batch_data = CreateBatch(2, false /* set_on_complete */);
|
750
|
-
batch_data->AddRetriableRecvTrailingMetadataOp();
|
751
|
-
recv_trailing_metadata_internal_batch_.reset(batch_data);
|
752
|
-
// Note: This will release the call combiner.
|
753
|
-
lb_call_->StartTransportStreamOpBatch(batch_data->batch());
|
754
|
-
}
|
755
|
-
|
756
784
|
// If there are any cached send ops that need to be replayed on the
|
757
785
|
// current call attempt, creates and returns a new batch to replay those ops.
|
758
786
|
// Otherwise, returns nullptr.
|
@@ -836,12 +864,40 @@ void RetryFilter::CallData::CallAttempt::AddClosureForBatch(
|
|
836
864
|
closures->Add(&batch->handler_private.closure, GRPC_ERROR_NONE, reason);
|
837
865
|
}
|
838
866
|
|
867
|
+
void RetryFilter::CallData::CallAttempt::
|
868
|
+
AddBatchForInternalRecvTrailingMetadata(CallCombinerClosureList* closures) {
|
869
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
|
870
|
+
gpr_log(GPR_INFO,
|
871
|
+
"chand=%p calld=%p attempt=%p: call failed but "
|
872
|
+
"recv_trailing_metadata not started; starting it internally",
|
873
|
+
calld_->chand_, calld_, this);
|
874
|
+
}
|
875
|
+
// Create batch_data with 2 refs, since this batch will be unreffed twice:
|
876
|
+
// once for the recv_trailing_metadata_ready callback when the batch
|
877
|
+
// completes, and again when we actually get a recv_trailing_metadata
|
878
|
+
// op from the surface.
|
879
|
+
BatchData* batch_data = CreateBatch(2, false /* set_on_complete */);
|
880
|
+
batch_data->AddRetriableRecvTrailingMetadataOp();
|
881
|
+
recv_trailing_metadata_internal_batch_.reset(batch_data);
|
882
|
+
AddClosureForBatch(batch_data->batch(),
|
883
|
+
"starting internal recv_trailing_metadata", closures);
|
884
|
+
}
|
885
|
+
|
886
|
+
void RetryFilter::CallData::CallAttempt::AddBatchForCancelOp(
|
887
|
+
grpc_error_handle error, CallCombinerClosureList* closures) {
|
888
|
+
BatchData* cancel_batch_data = CreateBatch(1, /*set_on_complete=*/true);
|
889
|
+
cancel_batch_data->AddCancelStreamOp(error);
|
890
|
+
AddClosureForBatch(cancel_batch_data->batch(),
|
891
|
+
"start cancellation batch on call attempt", closures);
|
892
|
+
}
|
893
|
+
|
839
894
|
void RetryFilter::CallData::CallAttempt::AddBatchesForPendingBatches(
|
840
895
|
CallCombinerClosureList* closures) {
|
841
896
|
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld_->pending_batches_); ++i) {
|
842
897
|
PendingBatch* pending = &calld_->pending_batches_[i];
|
843
898
|
grpc_transport_stream_op_batch* batch = pending->batch;
|
844
899
|
if (batch == nullptr) continue;
|
900
|
+
bool has_send_ops = false;
|
845
901
|
// Skip any batch that either (a) has already been started on this
|
846
902
|
// call attempt or (b) we can't start yet because we're still
|
847
903
|
// replaying send ops that need to be completed first.
|
@@ -852,60 +908,84 @@ void RetryFilter::CallData::CallAttempt::AddBatchesForPendingBatches(
|
|
852
908
|
// starting a recv op due to it being in the same batch with a send
|
853
909
|
// op. If/when we revamp the callback protocol in
|
854
910
|
// transport_stream_op_batch, we may be able to fix this.
|
855
|
-
if (batch->send_initial_metadata
|
856
|
-
continue;
|
911
|
+
if (batch->send_initial_metadata) {
|
912
|
+
if (started_send_initial_metadata_) continue;
|
913
|
+
has_send_ops = true;
|
857
914
|
}
|
858
|
-
if (batch->send_message
|
859
|
-
|
860
|
-
|
915
|
+
if (batch->send_message) {
|
916
|
+
if (completed_send_message_count_ < started_send_message_count_) {
|
917
|
+
continue;
|
918
|
+
}
|
919
|
+
has_send_ops = true;
|
861
920
|
}
|
862
921
|
// Note that we only start send_trailing_metadata if we have no more
|
863
922
|
// send_message ops to start, since we can't send down any more
|
864
923
|
// send_message ops after send_trailing_metadata.
|
865
|
-
if (batch->send_trailing_metadata
|
866
|
-
|
867
|
-
|
868
|
-
|
869
|
-
|
924
|
+
if (batch->send_trailing_metadata) {
|
925
|
+
if (started_send_message_count_ + batch->send_message <
|
926
|
+
calld_->send_messages_.size() ||
|
927
|
+
started_send_trailing_metadata_) {
|
928
|
+
continue;
|
929
|
+
}
|
930
|
+
has_send_ops = true;
|
870
931
|
}
|
871
|
-
|
872
|
-
|
932
|
+
int num_callbacks = has_send_ops; // All send ops share one callback.
|
933
|
+
if (batch->recv_initial_metadata) {
|
934
|
+
if (started_recv_initial_metadata_) continue;
|
935
|
+
++num_callbacks;
|
873
936
|
}
|
874
|
-
if (batch->recv_message
|
875
|
-
|
876
|
-
|
937
|
+
if (batch->recv_message) {
|
938
|
+
if (completed_recv_message_count_ < started_recv_message_count_) {
|
939
|
+
continue;
|
940
|
+
}
|
941
|
+
++num_callbacks;
|
877
942
|
}
|
878
|
-
if (batch->recv_trailing_metadata
|
879
|
-
|
880
|
-
|
881
|
-
|
882
|
-
|
883
|
-
|
884
|
-
|
885
|
-
|
886
|
-
|
887
|
-
|
888
|
-
|
889
|
-
|
890
|
-
|
891
|
-
|
892
|
-
|
893
|
-
|
894
|
-
|
895
|
-
|
896
|
-
|
897
|
-
|
898
|
-
|
899
|
-
|
900
|
-
|
943
|
+
if (batch->recv_trailing_metadata) {
|
944
|
+
if (started_recv_trailing_metadata_) {
|
945
|
+
seen_recv_trailing_metadata_from_surface_ = true;
|
946
|
+
// If we previously completed a recv_trailing_metadata op
|
947
|
+
// initiated by AddBatchForInternalRecvTrailingMetadata(), use the
|
948
|
+
// result of that instead of trying to re-start this op.
|
949
|
+
if (GPR_UNLIKELY(recv_trailing_metadata_internal_batch_ != nullptr)) {
|
950
|
+
// If the batch completed, then trigger the completion callback
|
951
|
+
// directly, so that we return the previously returned results to
|
952
|
+
// the application. Otherwise, just unref the internally started
|
953
|
+
// batch, since we'll propagate the completion when it completes.
|
954
|
+
if (completed_recv_trailing_metadata_) {
|
955
|
+
closures->Add(
|
956
|
+
&recv_trailing_metadata_ready_, recv_trailing_metadata_error_,
|
957
|
+
"re-executing recv_trailing_metadata_ready to propagate "
|
958
|
+
"internally triggered result");
|
959
|
+
// Ref will be released by callback.
|
960
|
+
recv_trailing_metadata_internal_batch_.release();
|
961
|
+
} else {
|
962
|
+
recv_trailing_metadata_internal_batch_.reset(
|
963
|
+
DEBUG_LOCATION,
|
964
|
+
"internally started recv_trailing_metadata batch pending and "
|
965
|
+
"recv_trailing_metadata started from surface");
|
966
|
+
GRPC_ERROR_UNREF(recv_trailing_metadata_error_);
|
967
|
+
}
|
968
|
+
recv_trailing_metadata_error_ = GRPC_ERROR_NONE;
|
901
969
|
}
|
902
|
-
|
970
|
+
// We don't want the fact that we've already started this op internally
|
971
|
+
// to prevent us from adding a batch that may contain other ops.
|
972
|
+
// Instead, we'll just skip adding this op below.
|
973
|
+
if (num_callbacks == 0) continue;
|
974
|
+
} else {
|
975
|
+
++num_callbacks;
|
903
976
|
}
|
904
|
-
continue;
|
905
977
|
}
|
906
|
-
// If we're already committed and
|
907
|
-
// the batch as-is
|
908
|
-
|
978
|
+
// If we're already committed and the following conditions are met,
|
979
|
+
// just send the batch down as-is:
|
980
|
+
// - The batch contains no cached send ops. (If it does, we need
|
981
|
+
// the logic below to use the cached payloads.)
|
982
|
+
// - The batch does not contain recv_trailing_metadata when we have
|
983
|
+
// already started an internal recv_trailing_metadata batch. (If
|
984
|
+
// we've already started an internal recv_trailing_metadata batch,
|
985
|
+
// then we need the logic below to send all ops in the batch
|
986
|
+
// *except* the recv_trailing_metadata op.)
|
987
|
+
if (calld_->retry_committed_ && !pending->send_ops_cached &&
|
988
|
+
(!batch->recv_trailing_metadata || !started_recv_trailing_metadata_)) {
|
909
989
|
AddClosureForBatch(
|
910
990
|
batch,
|
911
991
|
"start non-replayable pending batch on call attempt after commit",
|
@@ -914,12 +994,6 @@ void RetryFilter::CallData::CallAttempt::AddBatchesForPendingBatches(
|
|
914
994
|
continue;
|
915
995
|
}
|
916
996
|
// Create batch with the right number of callbacks.
|
917
|
-
const bool has_send_ops = batch->send_initial_metadata ||
|
918
|
-
batch->send_message ||
|
919
|
-
batch->send_trailing_metadata;
|
920
|
-
const int num_callbacks = has_send_ops + batch->recv_initial_metadata +
|
921
|
-
batch->recv_message +
|
922
|
-
batch->recv_trailing_metadata;
|
923
997
|
BatchData* batch_data =
|
924
998
|
CreateBatch(num_callbacks, has_send_ops /* set_on_complete */);
|
925
999
|
// Cache send ops if needed.
|
@@ -947,7 +1021,7 @@ void RetryFilter::CallData::CallAttempt::AddBatchesForPendingBatches(
|
|
947
1021
|
batch_data->AddRetriableRecvMessageOp();
|
948
1022
|
}
|
949
1023
|
// recv_trailing_metadata.
|
950
|
-
if (batch->recv_trailing_metadata) {
|
1024
|
+
if (batch->recv_trailing_metadata && !started_recv_trailing_metadata_) {
|
951
1025
|
batch_data->AddRetriableRecvTrailingMetadataOp();
|
952
1026
|
}
|
953
1027
|
AddClosureForBatch(batch_data->batch(),
|
@@ -1084,14 +1158,24 @@ bool RetryFilter::CallData::CallAttempt::ShouldRetry(
|
|
1084
1158
|
*server_pushback_ms = static_cast<grpc_millis>(ms);
|
1085
1159
|
}
|
1086
1160
|
}
|
1161
|
+
// Check with call dispatch controller.
|
1162
|
+
auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
|
1163
|
+
calld_->call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
|
1164
|
+
if (!service_config_call_data->call_dispatch_controller()->ShouldRetry()) {
|
1165
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
|
1166
|
+
gpr_log(
|
1167
|
+
GPR_INFO,
|
1168
|
+
"chand=%p calld=%p attempt=%p: call dispatch controller denied retry",
|
1169
|
+
calld_->chand_, calld_, this);
|
1170
|
+
}
|
1171
|
+
return false;
|
1172
|
+
}
|
1087
1173
|
// We should retry.
|
1088
1174
|
return true;
|
1089
1175
|
}
|
1090
1176
|
|
1091
|
-
void RetryFilter::CallData::CallAttempt::
|
1092
|
-
|
1093
|
-
// Record that this attempt has been cancelled.
|
1094
|
-
cancelled_ = true;
|
1177
|
+
void RetryFilter::CallData::CallAttempt::Abandon() {
|
1178
|
+
abandoned_ = true;
|
1095
1179
|
// Unref batches for deferred completion callbacks that will now never
|
1096
1180
|
// be invoked.
|
1097
1181
|
if (started_recv_trailing_metadata_ &&
|
@@ -1112,17 +1196,12 @@ void RetryFilter::CallData::CallAttempt::Cancel(
|
|
1112
1196
|
DEBUG_LOCATION, "unref deferred recv_message_ready batch due to retry");
|
1113
1197
|
GRPC_ERROR_UNREF(recv_message_error_);
|
1114
1198
|
recv_message_error_ = GRPC_ERROR_NONE;
|
1115
|
-
|
1116
|
-
|
1117
|
-
|
1118
|
-
|
1119
|
-
|
1120
|
-
|
1121
|
-
// hasn't received any ops.
|
1122
|
-
BatchData* cancel_batch_data = CreateBatch(1, /*set_on_complete=*/true);
|
1123
|
-
cancel_batch_data->AddCancelStreamOp();
|
1124
|
-
AddClosureForBatch(cancel_batch_data->batch(),
|
1125
|
-
"start cancellation batch on call attempt", closures);
|
1199
|
+
for (auto& on_complete_deferred_batch : on_complete_deferred_batches_) {
|
1200
|
+
on_complete_deferred_batch.batch.reset(
|
1201
|
+
DEBUG_LOCATION, "unref deferred on_complete batch due to retry");
|
1202
|
+
GRPC_ERROR_UNREF(on_complete_deferred_batch.error);
|
1203
|
+
}
|
1204
|
+
on_complete_deferred_batches_.clear();
|
1126
1205
|
}
|
1127
1206
|
|
1128
1207
|
void RetryFilter::CallData::CallAttempt::OnPerAttemptRecvTimer(
|
@@ -1154,11 +1233,17 @@ void RetryFilter::CallData::CallAttempt::OnPerAttemptRecvTimerLocked(
|
|
1154
1233
|
// Cancel this attempt.
|
1155
1234
|
// TODO(roth): When implementing hedging, we should not cancel the
|
1156
1235
|
// current attempt.
|
1157
|
-
call_attempt->
|
1236
|
+
call_attempt->AddBatchForCancelOp(
|
1237
|
+
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
1238
|
+
"retry perAttemptRecvTimeout exceeded"),
|
1239
|
+
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_CANCELLED),
|
1240
|
+
&closures);
|
1158
1241
|
// Check whether we should retry.
|
1159
1242
|
if (call_attempt->ShouldRetry(
|
1160
1243
|
/*status=*/absl::nullopt, /*is_lb_drop=*/false,
|
1161
1244
|
/*server_pushback_md=*/nullptr, /*server_pushback_ms=*/nullptr)) {
|
1245
|
+
// Mark current attempt as abandoned.
|
1246
|
+
call_attempt->Abandon();
|
1162
1247
|
// We are retrying. Start backoff timer.
|
1163
1248
|
calld->StartRetryTimer(/*server_pushback_ms=*/-1);
|
1164
1249
|
} else {
|
@@ -1212,8 +1297,7 @@ RetryFilter::CallData::CallAttempt::BatchData::BatchData(
|
|
1212
1297
|
GRPC_CALL_STACK_REF(call_attempt_->calld_->owning_call_, "Retry BatchData");
|
1213
1298
|
batch_.payload = &call_attempt_->batch_payload_;
|
1214
1299
|
if (set_on_complete) {
|
1215
|
-
GRPC_CLOSURE_INIT(&on_complete_, OnComplete, this,
|
1216
|
-
grpc_schedule_on_exec_ctx);
|
1300
|
+
GRPC_CLOSURE_INIT(&on_complete_, OnComplete, this, nullptr);
|
1217
1301
|
batch_.on_complete = &on_complete_;
|
1218
1302
|
}
|
1219
1303
|
}
|
@@ -1264,22 +1348,27 @@ void RetryFilter::CallData::CallAttempt::BatchData::
|
|
1264
1348
|
//
|
1265
1349
|
|
1266
1350
|
void RetryFilter::CallData::CallAttempt::BatchData::
|
1267
|
-
|
1268
|
-
|
1269
|
-
auto* call_attempt = batch_data->call_attempt_.get();
|
1351
|
+
MaybeAddClosureForRecvInitialMetadataCallback(
|
1352
|
+
grpc_error_handle error, CallCombinerClosureList* closures) {
|
1270
1353
|
// Find pending batch.
|
1271
|
-
PendingBatch* pending =
|
1354
|
+
PendingBatch* pending = call_attempt_->calld_->PendingBatchFind(
|
1272
1355
|
"invoking recv_initial_metadata_ready for",
|
1273
1356
|
[](grpc_transport_stream_op_batch* batch) {
|
1274
1357
|
return batch->recv_initial_metadata &&
|
1275
1358
|
batch->payload->recv_initial_metadata
|
1276
1359
|
.recv_initial_metadata_ready != nullptr;
|
1277
1360
|
});
|
1278
|
-
|
1361
|
+
if (pending == nullptr) {
|
1362
|
+
GRPC_ERROR_UNREF(error);
|
1363
|
+
return;
|
1364
|
+
}
|
1279
1365
|
// Return metadata.
|
1280
1366
|
grpc_metadata_batch_move(
|
1281
|
-
&
|
1367
|
+
&call_attempt_->recv_initial_metadata_,
|
1282
1368
|
pending->batch->payload->recv_initial_metadata.recv_initial_metadata);
|
1369
|
+
// Propagate trailing_metadata_available.
|
1370
|
+
*pending->batch->payload->recv_initial_metadata.trailing_metadata_available =
|
1371
|
+
call_attempt_->trailing_metadata_available_;
|
1283
1372
|
// Update bookkeeping.
|
1284
1373
|
// Note: Need to do this before invoking the callback, since invoking
|
1285
1374
|
// the callback will result in yielding the call combiner.
|
@@ -1288,11 +1377,10 @@ void RetryFilter::CallData::CallAttempt::BatchData::
|
|
1288
1377
|
.recv_initial_metadata_ready;
|
1289
1378
|
pending->batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
|
1290
1379
|
nullptr;
|
1291
|
-
|
1292
|
-
|
1293
|
-
|
1294
|
-
|
1295
|
-
GRPC_ERROR_REF(error));
|
1380
|
+
call_attempt_->calld_->MaybeClearPendingBatch(pending);
|
1381
|
+
// Add callback to closures.
|
1382
|
+
closures->Add(recv_initial_metadata_ready, error,
|
1383
|
+
"recv_initial_metadata_ready for pending batch");
|
1296
1384
|
}
|
1297
1385
|
|
1298
1386
|
void RetryFilter::CallData::CallAttempt::BatchData::RecvInitialMetadataReady(
|
@@ -1302,17 +1390,18 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvInitialMetadataReady(
|
|
1302
1390
|
CallData* calld = call_attempt->calld_;
|
1303
1391
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
|
1304
1392
|
gpr_log(GPR_INFO,
|
1305
|
-
"chand=%p calld=%p attempt=%p:
|
1306
|
-
"error=%s",
|
1307
|
-
calld->chand_, calld, call_attempt,
|
1393
|
+
"chand=%p calld=%p attempt=%p batch_data=%p: "
|
1394
|
+
"got recv_initial_metadata_ready, error=%s",
|
1395
|
+
calld->chand_, calld, call_attempt, batch_data.get(),
|
1308
1396
|
grpc_error_std_string(error).c_str());
|
1309
1397
|
}
|
1310
1398
|
call_attempt->completed_recv_initial_metadata_ = true;
|
1311
|
-
// If this attempt has been
|
1399
|
+
// If this attempt has been abandoned, then we're not going to use the
|
1312
1400
|
// result of this recv_initial_metadata op, so do nothing.
|
1313
|
-
if (call_attempt->
|
1314
|
-
GRPC_CALL_COMBINER_STOP(
|
1315
|
-
|
1401
|
+
if (call_attempt->abandoned_) {
|
1402
|
+
GRPC_CALL_COMBINER_STOP(
|
1403
|
+
calld->call_combiner_,
|
1404
|
+
"recv_initial_metadata_ready for abandoned attempt");
|
1316
1405
|
return;
|
1317
1406
|
}
|
1318
1407
|
// Cancel per-attempt recv timer, if any.
|
@@ -1335,15 +1424,16 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvInitialMetadataReady(
|
|
1335
1424
|
call_attempt->recv_initial_metadata_ready_deferred_batch_ =
|
1336
1425
|
std::move(batch_data);
|
1337
1426
|
call_attempt->recv_initial_metadata_error_ = GRPC_ERROR_REF(error);
|
1427
|
+
CallCombinerClosureList closures;
|
1428
|
+
if (error != GRPC_ERROR_NONE) {
|
1429
|
+
call_attempt->AddBatchForCancelOp(GRPC_ERROR_REF(error), &closures);
|
1430
|
+
}
|
1338
1431
|
if (!call_attempt->started_recv_trailing_metadata_) {
|
1339
1432
|
// recv_trailing_metadata not yet started by application; start it
|
1340
1433
|
// ourselves to get status.
|
1341
|
-
call_attempt->
|
1342
|
-
} else {
|
1343
|
-
GRPC_CALL_COMBINER_STOP(
|
1344
|
-
calld->call_combiner_,
|
1345
|
-
"recv_initial_metadata_ready trailers-only or error");
|
1434
|
+
call_attempt->AddBatchForInternalRecvTrailingMetadata(&closures);
|
1346
1435
|
}
|
1436
|
+
closures.RunClosures(calld->call_combiner_);
|
1347
1437
|
return;
|
1348
1438
|
}
|
1349
1439
|
// Received valid initial metadata, so commit the call.
|
@@ -1353,40 +1443,43 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvInitialMetadataReady(
|
|
1353
1443
|
call_attempt->MaybeSwitchToFastPath();
|
1354
1444
|
}
|
1355
1445
|
// Invoke the callback to return the result to the surface.
|
1356
|
-
|
1357
|
-
|
1446
|
+
CallCombinerClosureList closures;
|
1447
|
+
batch_data->MaybeAddClosureForRecvInitialMetadataCallback(
|
1448
|
+
GRPC_ERROR_REF(error), &closures);
|
1449
|
+
closures.RunClosures(calld->call_combiner_);
|
1358
1450
|
}
|
1359
1451
|
|
1360
1452
|
//
|
1361
1453
|
// recv_message callback handling
|
1362
1454
|
//
|
1363
1455
|
|
1364
|
-
void RetryFilter::CallData::CallAttempt::BatchData::
|
1365
|
-
|
1366
|
-
|
1367
|
-
CallAttempt* call_attempt = batch_data->call_attempt_.get();
|
1368
|
-
CallData* calld = call_attempt->calld_;
|
1456
|
+
void RetryFilter::CallData::CallAttempt::BatchData::
|
1457
|
+
MaybeAddClosureForRecvMessageCallback(grpc_error_handle error,
|
1458
|
+
CallCombinerClosureList* closures) {
|
1369
1459
|
// Find pending op.
|
1370
|
-
PendingBatch* pending =
|
1460
|
+
PendingBatch* pending = call_attempt_->calld_->PendingBatchFind(
|
1371
1461
|
"invoking recv_message_ready for",
|
1372
1462
|
[](grpc_transport_stream_op_batch* batch) {
|
1373
1463
|
return batch->recv_message &&
|
1374
1464
|
batch->payload->recv_message.recv_message_ready != nullptr;
|
1375
1465
|
});
|
1376
|
-
|
1466
|
+
if (pending == nullptr) {
|
1467
|
+
GRPC_ERROR_UNREF(error);
|
1468
|
+
return;
|
1469
|
+
}
|
1377
1470
|
// Return payload.
|
1378
1471
|
*pending->batch->payload->recv_message.recv_message =
|
1379
|
-
std::move(
|
1472
|
+
std::move(call_attempt_->recv_message_);
|
1380
1473
|
// Update bookkeeping.
|
1381
1474
|
// Note: Need to do this before invoking the callback, since invoking
|
1382
1475
|
// the callback will result in yielding the call combiner.
|
1383
1476
|
grpc_closure* recv_message_ready =
|
1384
1477
|
pending->batch->payload->recv_message.recv_message_ready;
|
1385
1478
|
pending->batch->payload->recv_message.recv_message_ready = nullptr;
|
1386
|
-
|
1387
|
-
|
1388
|
-
|
1389
|
-
|
1479
|
+
call_attempt_->calld_->MaybeClearPendingBatch(pending);
|
1480
|
+
// Add callback to closures.
|
1481
|
+
closures->Add(recv_message_ready, error,
|
1482
|
+
"recv_message_ready for pending batch");
|
1390
1483
|
}
|
1391
1484
|
|
1392
1485
|
void RetryFilter::CallData::CallAttempt::BatchData::RecvMessageReady(
|
@@ -1396,16 +1489,17 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvMessageReady(
|
|
1396
1489
|
CallData* calld = call_attempt->calld_;
|
1397
1490
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
|
1398
1491
|
gpr_log(GPR_INFO,
|
1399
|
-
"chand=%p calld=%p attempt=%p:
|
1400
|
-
|
1492
|
+
"chand=%p calld=%p attempt=%p batch_data=%p: "
|
1493
|
+
"got recv_message_ready, error=%s",
|
1494
|
+
calld->chand_, calld, call_attempt, batch_data.get(),
|
1401
1495
|
grpc_error_std_string(error).c_str());
|
1402
1496
|
}
|
1403
1497
|
++call_attempt->completed_recv_message_count_;
|
1404
|
-
// If this attempt has been
|
1498
|
+
// If this attempt has been abandoned, then we're not going to use the
|
1405
1499
|
// result of this recv_message op, so do nothing.
|
1406
|
-
if (call_attempt->
|
1500
|
+
if (call_attempt->abandoned_) {
|
1407
1501
|
GRPC_CALL_COMBINER_STOP(calld->call_combiner_,
|
1408
|
-
"recv_message_ready
|
1502
|
+
"recv_message_ready for abandoned attempt");
|
1409
1503
|
return;
|
1410
1504
|
}
|
1411
1505
|
// Cancel per-attempt recv timer, if any.
|
@@ -1427,14 +1521,16 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvMessageReady(
|
|
1427
1521
|
}
|
1428
1522
|
call_attempt->recv_message_ready_deferred_batch_ = std::move(batch_data);
|
1429
1523
|
call_attempt->recv_message_error_ = GRPC_ERROR_REF(error);
|
1524
|
+
CallCombinerClosureList closures;
|
1525
|
+
if (error != GRPC_ERROR_NONE) {
|
1526
|
+
call_attempt->AddBatchForCancelOp(GRPC_ERROR_REF(error), &closures);
|
1527
|
+
}
|
1430
1528
|
if (!call_attempt->started_recv_trailing_metadata_) {
|
1431
1529
|
// recv_trailing_metadata not yet started by application; start it
|
1432
1530
|
// ourselves to get status.
|
1433
|
-
call_attempt->
|
1434
|
-
} else {
|
1435
|
-
GRPC_CALL_COMBINER_STOP(calld->call_combiner_,
|
1436
|
-
"recv_message_ready null");
|
1531
|
+
call_attempt->AddBatchForInternalRecvTrailingMetadata(&closures);
|
1437
1532
|
}
|
1533
|
+
closures.RunClosures(calld->call_combiner_);
|
1438
1534
|
return;
|
1439
1535
|
}
|
1440
1536
|
// Received a valid message, so commit the call.
|
@@ -1444,8 +1540,10 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvMessageReady(
|
|
1444
1540
|
call_attempt->MaybeSwitchToFastPath();
|
1445
1541
|
}
|
1446
1542
|
// Invoke the callback to return the result to the surface.
|
1447
|
-
|
1448
|
-
|
1543
|
+
CallCombinerClosureList closures;
|
1544
|
+
batch_data->MaybeAddClosureForRecvMessageCallback(GRPC_ERROR_REF(error),
|
1545
|
+
&closures);
|
1546
|
+
closures.RunClosures(calld->call_combiner_);
|
1449
1547
|
}
|
1450
1548
|
|
1451
1549
|
//
|
@@ -1480,23 +1578,28 @@ void GetCallStatus(grpc_millis deadline, grpc_metadata_batch* md_batch,
|
|
1480
1578
|
} // namespace
|
1481
1579
|
|
1482
1580
|
void RetryFilter::CallData::CallAttempt::BatchData::
|
1483
|
-
|
1484
|
-
|
1581
|
+
MaybeAddClosureForRecvTrailingMetadataReady(
|
1582
|
+
grpc_error_handle error, CallCombinerClosureList* closures) {
|
1485
1583
|
auto* calld = call_attempt_->calld_;
|
1486
1584
|
// Find pending batch.
|
1487
1585
|
PendingBatch* pending = calld->PendingBatchFind(
|
1488
|
-
"invoking
|
1586
|
+
"invoking recv_trailing_metadata_ready for",
|
1489
1587
|
[](grpc_transport_stream_op_batch* batch) {
|
1490
1588
|
return batch->recv_trailing_metadata &&
|
1491
1589
|
batch->payload->recv_trailing_metadata
|
1492
1590
|
.recv_trailing_metadata_ready != nullptr;
|
1493
1591
|
});
|
1494
1592
|
// If we generated the recv_trailing_metadata op internally via
|
1495
|
-
//
|
1593
|
+
// AddBatchForInternalRecvTrailingMetadata(), then there will be no
|
1594
|
+
// pending batch.
|
1496
1595
|
if (pending == nullptr) {
|
1497
1596
|
call_attempt_->recv_trailing_metadata_error_ = error;
|
1498
1597
|
return;
|
1499
1598
|
}
|
1599
|
+
// Copy transport stats to be delivered up to the surface.
|
1600
|
+
grpc_transport_move_stats(
|
1601
|
+
&call_attempt_->collect_stats_,
|
1602
|
+
pending->batch->payload->recv_trailing_metadata.collect_stats);
|
1500
1603
|
// Return metadata.
|
1501
1604
|
grpc_metadata_batch_move(
|
1502
1605
|
&call_attempt_->recv_trailing_metadata_,
|
@@ -1514,38 +1617,32 @@ void RetryFilter::CallData::CallAttempt::BatchData::
|
|
1514
1617
|
void RetryFilter::CallData::CallAttempt::BatchData::
|
1515
1618
|
AddClosuresForDeferredCompletionCallbacks(
|
1516
1619
|
CallCombinerClosureList* closures) {
|
1517
|
-
|
1518
|
-
|
1519
|
-
|
1520
|
-
|
1521
|
-
|
1522
|
-
|
1523
|
-
|
1524
|
-
|
1525
|
-
|
1526
|
-
|
1527
|
-
|
1528
|
-
|
1529
|
-
|
1530
|
-
|
1531
|
-
|
1532
|
-
|
1533
|
-
|
1534
|
-
|
1535
|
-
|
1536
|
-
|
1537
|
-
|
1538
|
-
|
1539
|
-
|
1540
|
-
|
1541
|
-
|
1542
|
-
|
1543
|
-
if (GPR_UNLIKELY(call_attempt_->on_complete_deferred_batch_ != nullptr)) {
|
1544
|
-
closures->Add(&call_attempt_->on_complete_deferred_batch_->on_complete_,
|
1545
|
-
call_attempt_->on_complete_error_, "resuming on_complete");
|
1546
|
-
call_attempt_->on_complete_deferred_batch_.release();
|
1547
|
-
}
|
1548
|
-
}
|
1620
|
+
// Add closure for deferred recv_initial_metadata_ready.
|
1621
|
+
if (GPR_UNLIKELY(call_attempt_->recv_initial_metadata_ready_deferred_batch_ !=
|
1622
|
+
nullptr)) {
|
1623
|
+
MaybeAddClosureForRecvInitialMetadataCallback(
|
1624
|
+
call_attempt_->recv_initial_metadata_error_, closures);
|
1625
|
+
call_attempt_->recv_initial_metadata_ready_deferred_batch_.reset(
|
1626
|
+
DEBUG_LOCATION, "resuming deferred recv_initial_metadata_ready");
|
1627
|
+
call_attempt_->recv_initial_metadata_error_ = GRPC_ERROR_NONE;
|
1628
|
+
}
|
1629
|
+
// Add closure for deferred recv_message_ready.
|
1630
|
+
if (GPR_UNLIKELY(call_attempt_->recv_message_ready_deferred_batch_ !=
|
1631
|
+
nullptr)) {
|
1632
|
+
MaybeAddClosureForRecvMessageCallback(call_attempt_->recv_message_error_,
|
1633
|
+
closures);
|
1634
|
+
call_attempt_->recv_message_ready_deferred_batch_.reset(
|
1635
|
+
DEBUG_LOCATION, "resuming deferred recv_message_ready");
|
1636
|
+
call_attempt_->recv_message_error_ = GRPC_ERROR_NONE;
|
1637
|
+
}
|
1638
|
+
// Add closures for deferred on_complete callbacks.
|
1639
|
+
for (auto& on_complete_deferred_batch :
|
1640
|
+
call_attempt_->on_complete_deferred_batches_) {
|
1641
|
+
closures->Add(&on_complete_deferred_batch.batch->on_complete_,
|
1642
|
+
on_complete_deferred_batch.error, "resuming on_complete");
|
1643
|
+
on_complete_deferred_batch.batch.release();
|
1644
|
+
}
|
1645
|
+
call_attempt_->on_complete_deferred_batches_.clear();
|
1549
1646
|
}
|
1550
1647
|
|
1551
1648
|
void RetryFilter::CallData::CallAttempt::BatchData::
|
@@ -1554,13 +1651,8 @@ void RetryFilter::CallData::CallAttempt::BatchData::
|
|
1554
1651
|
auto* calld = call_attempt_->calld_;
|
1555
1652
|
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches_); ++i) {
|
1556
1653
|
PendingBatch* pending = &calld->pending_batches_[i];
|
1557
|
-
if (
|
1558
|
-
|
1559
|
-
gpr_log(GPR_INFO,
|
1560
|
-
"chand=%p calld=%p attempt=%p: failing unstarted pending "
|
1561
|
-
"batch at index %" PRIuPTR,
|
1562
|
-
calld->chand_, calld, call_attempt_.get(), i);
|
1563
|
-
}
|
1654
|
+
if (pending->batch == nullptr) continue;
|
1655
|
+
if (call_attempt_->PendingBatchContainsUnstartedSendOps(pending)) {
|
1564
1656
|
closures->Add(pending->batch->on_complete, GRPC_ERROR_REF(error),
|
1565
1657
|
"failing on_complete for pending batch");
|
1566
1658
|
pending->batch->on_complete = nullptr;
|
@@ -1575,7 +1667,7 @@ void RetryFilter::CallData::CallAttempt::BatchData::RunClosuresForCompletedCall(
|
|
1575
1667
|
// Construct list of closures to execute.
|
1576
1668
|
CallCombinerClosureList closures;
|
1577
1669
|
// First, add closure for recv_trailing_metadata_ready.
|
1578
|
-
|
1670
|
+
MaybeAddClosureForRecvTrailingMetadataReady(GRPC_ERROR_REF(error), &closures);
|
1579
1671
|
// If there are deferred batch completion callbacks, add them to closures.
|
1580
1672
|
AddClosuresForDeferredCompletionCallbacks(&closures);
|
1581
1673
|
// Add closures to fail any pending batches that have not yet been started.
|
@@ -1593,17 +1685,18 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady(
|
|
1593
1685
|
CallData* calld = call_attempt->calld_;
|
1594
1686
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
|
1595
1687
|
gpr_log(GPR_INFO,
|
1596
|
-
"chand=%p calld=%p attempt=%p:
|
1597
|
-
"error=%s",
|
1598
|
-
calld->chand_, calld, call_attempt,
|
1688
|
+
"chand=%p calld=%p attempt=%p batch_data=%p: "
|
1689
|
+
"got recv_trailing_metadata_ready, error=%s",
|
1690
|
+
calld->chand_, calld, call_attempt, batch_data.get(),
|
1599
1691
|
grpc_error_std_string(error).c_str());
|
1600
1692
|
}
|
1601
1693
|
call_attempt->completed_recv_trailing_metadata_ = true;
|
1602
|
-
// If this attempt has been
|
1694
|
+
// If this attempt has been abandoned, then we're not going to use the
|
1603
1695
|
// result of this recv_trailing_metadata op, so do nothing.
|
1604
|
-
if (call_attempt->
|
1605
|
-
GRPC_CALL_COMBINER_STOP(
|
1606
|
-
|
1696
|
+
if (call_attempt->abandoned_) {
|
1697
|
+
GRPC_CALL_COMBINER_STOP(
|
1698
|
+
calld->call_combiner_,
|
1699
|
+
"recv_trailing_metadata_ready for abandoned attempt");
|
1607
1700
|
return;
|
1608
1701
|
}
|
1609
1702
|
// Cancel per-attempt recv timer, if any.
|
@@ -1631,7 +1724,15 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady(
|
|
1631
1724
|
calld->StartRetryTimer(server_pushback_ms);
|
1632
1725
|
// Cancel call attempt.
|
1633
1726
|
CallCombinerClosureList closures;
|
1634
|
-
call_attempt->
|
1727
|
+
call_attempt->AddBatchForCancelOp(
|
1728
|
+
error == GRPC_ERROR_NONE
|
1729
|
+
? grpc_error_set_int(
|
1730
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("call attempt failed"),
|
1731
|
+
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_CANCELLED)
|
1732
|
+
: GRPC_ERROR_REF(error),
|
1733
|
+
&closures);
|
1734
|
+
// Record that this attempt has been abandoned.
|
1735
|
+
call_attempt->Abandon();
|
1635
1736
|
// Yields call combiner.
|
1636
1737
|
closures.RunClosures(calld->call_combiner_);
|
1637
1738
|
return;
|
@@ -1668,6 +1769,11 @@ void RetryFilter::CallData::CallAttempt::BatchData::
|
|
1668
1769
|
GRPC_ERROR_UNREF(error);
|
1669
1770
|
return;
|
1670
1771
|
}
|
1772
|
+
// Propagate payload.
|
1773
|
+
if (batch_.send_message) {
|
1774
|
+
pending->batch->payload->send_message.stream_write_closed =
|
1775
|
+
batch_.payload->send_message.stream_write_closed;
|
1776
|
+
}
|
1671
1777
|
// Add closure.
|
1672
1778
|
closures->Add(pending->batch->on_complete, error,
|
1673
1779
|
"on_complete for pending batch");
|
@@ -1711,16 +1817,17 @@ void RetryFilter::CallData::CallAttempt::BatchData::OnComplete(
|
|
1711
1817
|
CallData* calld = call_attempt->calld_;
|
1712
1818
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
|
1713
1819
|
gpr_log(GPR_INFO,
|
1714
|
-
"chand=%p calld=%p attempt=%p
|
1715
|
-
|
1820
|
+
"chand=%p calld=%p attempt=%p batch_data=%p: "
|
1821
|
+
"got on_complete, error=%s, batch=%s",
|
1822
|
+
calld->chand_, calld, call_attempt, batch_data.get(),
|
1716
1823
|
grpc_error_std_string(error).c_str(),
|
1717
1824
|
grpc_transport_stream_op_batch_string(&batch_data->batch_).c_str());
|
1718
1825
|
}
|
1719
|
-
// If this attempt has been
|
1826
|
+
// If this attempt has been abandoned, then we're not going to propagate
|
1720
1827
|
// the completion of this batch, so do nothing.
|
1721
|
-
if (call_attempt->
|
1828
|
+
if (call_attempt->abandoned_) {
|
1722
1829
|
GRPC_CALL_COMBINER_STOP(calld->call_combiner_,
|
1723
|
-
"on_complete
|
1830
|
+
"on_complete for abandoned attempt");
|
1724
1831
|
return;
|
1725
1832
|
}
|
1726
1833
|
// If we got an error and have not yet gotten the
|
@@ -1733,17 +1840,16 @@ void RetryFilter::CallData::CallAttempt::BatchData::OnComplete(
|
|
1733
1840
|
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: deferring on_complete",
|
1734
1841
|
calld->chand_, calld, call_attempt);
|
1735
1842
|
}
|
1736
|
-
call_attempt->
|
1737
|
-
|
1843
|
+
call_attempt->on_complete_deferred_batches_.emplace_back(
|
1844
|
+
std::move(batch_data), GRPC_ERROR_REF(error));
|
1845
|
+
CallCombinerClosureList closures;
|
1846
|
+
call_attempt->AddBatchForCancelOp(GRPC_ERROR_REF(error), &closures);
|
1738
1847
|
if (!call_attempt->started_recv_trailing_metadata_) {
|
1739
1848
|
// recv_trailing_metadata not yet started by application; start it
|
1740
1849
|
// ourselves to get status.
|
1741
|
-
call_attempt->
|
1742
|
-
} else {
|
1743
|
-
GRPC_CALL_COMBINER_STOP(
|
1744
|
-
calld->call_combiner_,
|
1745
|
-
"on_complete failure before recv_trailing_metadata_ready");
|
1850
|
+
call_attempt->AddBatchForInternalRecvTrailingMetadata(&closures);
|
1746
1851
|
}
|
1852
|
+
closures.RunClosures(calld->call_combiner_);
|
1747
1853
|
return;
|
1748
1854
|
}
|
1749
1855
|
// Update bookkeeping in call_attempt.
|
@@ -1780,6 +1886,24 @@ void RetryFilter::CallData::CallAttempt::BatchData::OnComplete(
|
|
1780
1886
|
closures.RunClosures(calld->call_combiner_);
|
1781
1887
|
}
|
1782
1888
|
|
1889
|
+
void RetryFilter::CallData::CallAttempt::BatchData::OnCompleteForCancelOp(
|
1890
|
+
void* arg, grpc_error_handle error) {
|
1891
|
+
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
|
1892
|
+
CallAttempt* call_attempt = batch_data->call_attempt_.get();
|
1893
|
+
CallData* calld = call_attempt->calld_;
|
1894
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
|
1895
|
+
gpr_log(GPR_INFO,
|
1896
|
+
"chand=%p calld=%p attempt=%p batch_data=%p: "
|
1897
|
+
"got on_complete for cancel_stream batch, error=%s, batch=%s",
|
1898
|
+
calld->chand_, calld, call_attempt, batch_data.get(),
|
1899
|
+
grpc_error_std_string(error).c_str(),
|
1900
|
+
grpc_transport_stream_op_batch_string(&batch_data->batch_).c_str());
|
1901
|
+
}
|
1902
|
+
GRPC_CALL_COMBINER_STOP(
|
1903
|
+
calld->call_combiner_,
|
1904
|
+
"on_complete for internally generated cancel_stream op");
|
1905
|
+
}
|
1906
|
+
|
1783
1907
|
//
|
1784
1908
|
// retriable batch construction
|
1785
1909
|
//
|
@@ -1914,10 +2038,12 @@ void RetryFilter::CallData::CallAttempt::BatchData::
|
|
1914
2038
|
&call_attempt_->recv_trailing_metadata_ready_;
|
1915
2039
|
}
|
1916
2040
|
|
1917
|
-
void RetryFilter::CallData::CallAttempt::BatchData::AddCancelStreamOp(
|
2041
|
+
void RetryFilter::CallData::CallAttempt::BatchData::AddCancelStreamOp(
|
2042
|
+
grpc_error_handle error) {
|
1918
2043
|
batch_.cancel_stream = true;
|
1919
|
-
batch_.payload->cancel_stream.cancel_error =
|
1920
|
-
|
2044
|
+
batch_.payload->cancel_stream.cancel_error = error;
|
2045
|
+
// Override on_complete callback.
|
2046
|
+
GRPC_CLOSURE_INIT(&on_complete_, OnCompleteForCancelOp, this, nullptr);
|
1921
2047
|
}
|
1922
2048
|
|
1923
2049
|
//
|
@@ -1996,7 +2122,6 @@ RetryFilter::CallData::CallData(RetryFilter* chand,
|
|
1996
2122
|
.set_max_backoff(
|
1997
2123
|
retry_policy_ == nullptr ? 0 : retry_policy_->max_backoff())),
|
1998
2124
|
path_(grpc_slice_ref_internal(args.path)),
|
1999
|
-
call_start_time_(args.start_time),
|
2000
2125
|
deadline_(args.deadline),
|
2001
2126
|
arena_(args.arena),
|
2002
2127
|
owning_call_(args.call_stack),
|
@@ -2076,6 +2201,11 @@ void RetryFilter::CallData::StartTransportStreamOpBatch(
|
|
2076
2201
|
}
|
2077
2202
|
// If we do not yet have a call attempt, create one.
|
2078
2203
|
if (call_attempt_ == nullptr) {
|
2204
|
+
// If there is no retry policy, then commit retries immediately.
|
2205
|
+
// This ensures that the code below will always jump to the fast path.
|
2206
|
+
// TODO(roth): Remove this special case when we implement
|
2207
|
+
// transparent retries.
|
2208
|
+
if (retry_policy_ == nullptr) retry_committed_ = true;
|
2079
2209
|
// If this is the first batch and retries are already committed
|
2080
2210
|
// (e.g., if this batch put the call above the buffer size limit), then
|
2081
2211
|
// immediately create an LB call and delegate the batch to it. This
|
@@ -2101,7 +2231,10 @@ void RetryFilter::CallData::StartTransportStreamOpBatch(
|
|
2101
2231
|
chand_, this);
|
2102
2232
|
}
|
2103
2233
|
PendingBatchClear(pending);
|
2104
|
-
|
2234
|
+
auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
|
2235
|
+
call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
|
2236
|
+
committed_call_ = CreateLoadBalancedCall(
|
2237
|
+
service_config_call_data->call_dispatch_controller());
|
2105
2238
|
committed_call_->StartTransportStreamOpBatch(batch);
|
2106
2239
|
return;
|
2107
2240
|
}
|
@@ -2123,16 +2256,20 @@ void RetryFilter::CallData::StartTransportStreamOpBatch(
|
|
2123
2256
|
call_attempt_->StartRetriableBatches();
|
2124
2257
|
}
|
2125
2258
|
|
2126
|
-
|
2127
|
-
RetryFilter::CallData::CreateLoadBalancedCall(
|
2259
|
+
OrphanablePtr<ClientChannel::LoadBalancedCall>
|
2260
|
+
RetryFilter::CallData::CreateLoadBalancedCall(
|
2261
|
+
ConfigSelector::CallDispatchController* call_dispatch_controller) {
|
2128
2262
|
grpc_call_element_args args = {owning_call_, nullptr, call_context_,
|
2129
|
-
path_,
|
2263
|
+
path_, /*start_time=*/0, deadline_,
|
2130
2264
|
arena_, call_combiner_};
|
2131
2265
|
return chand_->client_channel_->CreateLoadBalancedCall(
|
2132
2266
|
args, pollent_,
|
2133
2267
|
// This callback holds a ref to the CallStackDestructionBarrier
|
2134
2268
|
// object until the LB call is destroyed.
|
2135
|
-
call_stack_destruction_barrier_->MakeLbCallDestructionClosure(this)
|
2269
|
+
call_stack_destruction_barrier_->MakeLbCallDestructionClosure(this),
|
2270
|
+
call_dispatch_controller,
|
2271
|
+
// TODO(roth): Change this when we support transparent retries.
|
2272
|
+
/*is_transparent_retry=*/false);
|
2136
2273
|
}
|
2137
2274
|
|
2138
2275
|
void RetryFilter::CallData::CreateCallAttempt() {
|
@@ -2384,6 +2521,18 @@ void RetryFilter::CallData::RetryCommit(CallAttempt* call_attempt) {
|
|
2384
2521
|
gpr_log(GPR_INFO, "chand=%p calld=%p: committing retries", chand_, this);
|
2385
2522
|
}
|
2386
2523
|
if (call_attempt != nullptr) {
|
2524
|
+
// If the call attempt's LB call has been committed, inform the call
|
2525
|
+
// dispatch controller that the call has been committed.
|
2526
|
+
// Note: If call_attempt is null, this is happening before the first
|
2527
|
+
// retry attempt is started, in which case we'll just pass the real
|
2528
|
+
// call dispatch controller down into the LB call, and it won't be
|
2529
|
+
// our problem anymore.
|
2530
|
+
if (call_attempt->lb_call_committed()) {
|
2531
|
+
auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
|
2532
|
+
call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
|
2533
|
+
service_config_call_data->call_dispatch_controller()->Commit();
|
2534
|
+
}
|
2535
|
+
// Free cached send ops.
|
2387
2536
|
call_attempt->FreeCachedSendOpDataAfterCommit();
|
2388
2537
|
}
|
2389
2538
|
}
|