grpc 1.6.7 → 1.7.0.pre1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +579 -77
- data/include/grpc/byte_buffer.h +1 -63
- data/include/grpc/compression.h +27 -5
- data/include/grpc/fork.h +24 -0
- data/include/grpc/grpc.h +12 -6
- data/include/grpc/grpc_security.h +28 -7
- data/include/grpc/impl/codegen/atm.h +1 -0
- data/include/grpc/impl/codegen/byte_buffer.h +86 -0
- data/include/grpc/impl/codegen/compression_types.h +63 -5
- data/include/grpc/impl/codegen/fork.h +48 -0
- data/include/grpc/impl/codegen/grpc_types.h +26 -9
- data/include/grpc/impl/codegen/port_platform.h +11 -4
- data/include/grpc/impl/codegen/slice.h +6 -1
- data/include/grpc/impl/codegen/sync.h +3 -1
- data/include/grpc/impl/codegen/sync_custom.h +36 -0
- data/include/grpc/module.modulemap +75 -3
- data/include/grpc/slice.h +1 -5
- data/include/grpc/support/sync_custom.h +24 -0
- data/src/core/ext/census/base_resources.c +14 -14
- data/src/core/ext/census/context.c +7 -5
- data/src/core/ext/census/grpc_filter.c +12 -14
- data/src/core/ext/census/mlog.c +2 -1
- data/src/core/ext/census/resource.c +13 -9
- data/src/core/ext/filters/client_channel/channel_connectivity.c +15 -8
- data/src/core/ext/filters/client_channel/client_channel.c +418 -439
- data/src/core/ext/filters/client_channel/client_channel_factory.c +4 -5
- data/src/core/ext/filters/client_channel/client_channel_plugin.c +2 -2
- data/src/core/ext/filters/client_channel/http_connect_handshaker.c +7 -5
- data/src/core/ext/filters/client_channel/http_proxy.c +17 -21
- data/src/core/ext/filters/client_channel/lb_policy.c +1 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +7 -7
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +371 -257
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c +7 -5
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c +25 -14
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c +16 -16
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +33 -28
- data/src/core/ext/filters/client_channel/lb_policy_factory.c +10 -8
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +1 -1
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.c +1 -1
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c +7 -6
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c +62 -28
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c +29 -23
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c +25 -14
- data/src/core/ext/filters/client_channel/retry_throttle.c +9 -6
- data/src/core/ext/filters/client_channel/subchannel.c +30 -30
- data/src/core/ext/filters/client_channel/subchannel.h +1 -4
- data/src/core/ext/filters/client_channel/subchannel_index.c +31 -15
- data/src/core/ext/filters/client_channel/subchannel_index.h +7 -0
- data/src/core/ext/filters/client_channel/uri_parser.c +4 -3
- data/src/core/ext/filters/deadline/deadline_filter.c +78 -39
- data/src/core/ext/filters/deadline/deadline_filter.h +7 -1
- data/src/core/ext/filters/http/client/http_client_filter.c +14 -14
- data/src/core/ext/filters/http/http_filters_plugin.c +1 -1
- data/src/core/ext/filters/http/message_compress/message_compress_filter.c +240 -175
- data/src/core/ext/filters/http/server/http_server_filter.c +48 -36
- data/src/core/ext/filters/load_reporting/{load_reporting_filter.c → server_load_reporting_filter.c} +11 -12
- data/src/core/ext/filters/load_reporting/{load_reporting_filter.h → server_load_reporting_filter.h} +6 -5
- data/src/core/ext/filters/load_reporting/{load_reporting.c → server_load_reporting_plugin.c} +19 -13
- data/src/core/ext/filters/load_reporting/{load_reporting.h → server_load_reporting_plugin.h} +4 -3
- data/src/core/ext/filters/max_age/max_age_filter.c +2 -3
- data/src/core/ext/filters/message_size/message_size_filter.c +4 -2
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c +0 -1
- data/src/core/ext/transport/chttp2/client/chttp2_connector.c +5 -5
- data/src/core/ext/transport/chttp2/client/insecure/channel_create.c +1 -1
- data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c +1 -1
- data/src/core/ext/transport/chttp2/server/chttp2_server.c +20 -18
- data/src/core/ext/transport/chttp2/transport/chttp2_plugin.c +1 -0
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +493 -210
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +1 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.c +9 -8
- data/src/core/ext/transport/chttp2/transport/frame_data.c +2 -2
- data/src/core/ext/transport/chttp2/transport/frame_goaway.c +2 -2
- data/src/core/ext/transport/chttp2/transport/frame_ping.c +5 -4
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_settings.c +10 -9
- data/src/core/ext/transport/chttp2/transport/frame_window_update.c +9 -5
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.c +62 -41
- data/src/core/ext/transport/chttp2/transport/hpack_parser.c +52 -8
- data/src/core/ext/transport/chttp2/transport/hpack_table.c +2 -2
- data/src/core/ext/transport/chttp2/transport/incoming_metadata.c +3 -2
- data/src/core/ext/transport/chttp2/transport/internal.h +60 -30
- data/src/core/ext/transport/chttp2/transport/parsing.c +16 -5
- data/src/core/ext/transport/chttp2/transport/stream_lists.c +36 -16
- data/src/core/ext/transport/chttp2/transport/stream_map.c +6 -4
- data/src/core/ext/transport/chttp2/transport/writing.c +133 -105
- data/src/core/ext/transport/inproc/inproc_transport.c +61 -65
- data/src/core/lib/channel/channel_args.c +112 -12
- data/src/core/lib/channel/channel_args.h +31 -0
- data/src/core/lib/channel/channel_stack.c +1 -15
- data/src/core/lib/channel/channel_stack.h +3 -10
- data/src/core/lib/channel/channel_stack_builder.c +41 -10
- data/src/core/lib/channel/channel_stack_builder.h +10 -0
- data/src/core/lib/channel/connected_channel.c +94 -23
- data/src/core/lib/channel/handshaker.c +8 -6
- data/src/core/lib/channel/handshaker_registry.c +1 -1
- data/src/core/lib/compression/algorithm_metadata.h +14 -0
- data/src/core/lib/compression/compression.c +101 -1
- data/src/core/lib/compression/stream_compression.c +32 -146
- data/src/core/lib/compression/stream_compression.h +28 -4
- data/src/core/lib/compression/stream_compression_gzip.c +228 -0
- data/src/core/lib/{iomgr/ev_epoll_thread_pool_linux.h → compression/stream_compression_gzip.h} +5 -7
- data/src/core/lib/compression/stream_compression_identity.c +94 -0
- data/src/core/lib/{iomgr/ev_epoll_limited_pollers_linux.h → compression/stream_compression_identity.h} +7 -8
- data/src/core/lib/debug/stats.c +174 -0
- data/src/core/lib/debug/stats.h +61 -0
- data/src/core/lib/debug/stats_data.c +687 -0
- data/src/core/lib/debug/stats_data.h +470 -0
- data/src/core/lib/debug/trace.c +3 -3
- data/src/core/lib/debug/trace.h +1 -1
- data/src/core/lib/http/format_request.c +1 -1
- data/src/core/lib/http/httpcli.c +8 -7
- data/src/core/lib/http/httpcli_security_connector.c +2 -1
- data/src/core/lib/http/parser.c +4 -3
- data/src/core/lib/iomgr/call_combiner.c +202 -0
- data/src/core/lib/iomgr/call_combiner.h +121 -0
- data/src/core/lib/iomgr/closure.c +18 -4
- data/src/core/lib/iomgr/combiner.c +11 -4
- data/src/core/lib/iomgr/error.c +26 -24
- data/src/core/lib/iomgr/ev_epoll1_linux.c +395 -212
- data/src/core/lib/iomgr/ev_epollex_linux.c +141 -128
- data/src/core/lib/iomgr/ev_epollsig_linux.c +44 -41
- data/src/core/lib/iomgr/ev_poll_posix.c +99 -75
- data/src/core/lib/iomgr/ev_posix.c +5 -9
- data/src/core/lib/iomgr/ev_posix.h +1 -1
- data/src/core/lib/iomgr/exec_ctx.h +6 -1
- data/src/core/lib/iomgr/executor.c +142 -36
- data/src/core/lib/iomgr/executor.h +6 -1
- data/src/core/lib/iomgr/fork_posix.c +88 -0
- data/src/core/lib/iomgr/fork_windows.c +39 -0
- data/src/core/lib/iomgr/iocp_windows.c +2 -0
- data/src/core/lib/iomgr/iomgr.c +2 -8
- data/src/core/lib/iomgr/is_epollexclusive_available.c +6 -6
- data/src/core/lib/iomgr/load_file.c +2 -1
- data/src/core/lib/iomgr/polling_entity.c +9 -9
- data/src/core/lib/iomgr/polling_entity.h +7 -1
- data/src/core/lib/iomgr/pollset.h +1 -1
- data/src/core/lib/iomgr/pollset_uv.c +1 -1
- data/src/core/lib/iomgr/pollset_windows.c +3 -3
- data/src/core/lib/iomgr/port.h +4 -0
- data/src/core/lib/iomgr/resolve_address_posix.c +8 -7
- data/src/core/lib/iomgr/resolve_address_windows.c +1 -1
- data/src/core/lib/iomgr/resource_quota.c +24 -19
- data/src/core/lib/iomgr/socket_factory_posix.c +4 -4
- data/src/core/lib/iomgr/socket_mutator.c +4 -4
- data/src/core/lib/iomgr/socket_utils_windows.c +0 -4
- data/src/core/lib/iomgr/tcp_client_posix.c +5 -4
- data/src/core/lib/iomgr/tcp_posix.c +181 -20
- data/src/core/lib/iomgr/tcp_server_posix.c +8 -7
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.c +1 -1
- data/src/core/lib/iomgr/timer.h +4 -0
- data/src/core/lib/iomgr/timer_generic.c +138 -3
- data/src/core/lib/iomgr/timer_generic.h +3 -0
- data/src/core/lib/iomgr/timer_heap.c +4 -4
- data/src/core/lib/iomgr/timer_manager.c +2 -2
- data/src/core/lib/iomgr/timer_uv.c +2 -0
- data/src/core/lib/iomgr/udp_server.c +10 -8
- data/src/core/lib/iomgr/unix_sockets_posix.c +4 -2
- data/src/core/lib/iomgr/wakeup_fd_cv.c +9 -8
- data/src/core/lib/iomgr/wakeup_fd_cv.h +2 -2
- data/src/core/lib/json/json.c +1 -1
- data/src/core/lib/json/json_string.c +13 -13
- data/src/core/lib/profiling/timers.h +18 -8
- data/src/core/lib/security/credentials/composite/composite_credentials.c +4 -10
- data/src/core/lib/security/credentials/google_default/google_default_credentials.c +2 -1
- data/src/core/lib/security/credentials/jwt/jwt_verifier.c +11 -6
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +4 -4
- data/src/core/lib/security/credentials/plugin/plugin_credentials.c +132 -50
- data/src/core/lib/security/credentials/plugin/plugin_credentials.h +2 -0
- data/src/core/lib/security/transport/client_auth_filter.c +68 -135
- data/src/core/lib/security/transport/secure_endpoint.c +110 -90
- data/src/core/lib/security/transport/secure_endpoint.h +8 -3
- data/src/core/lib/security/transport/security_connector.c +10 -12
- data/src/core/lib/security/transport/security_handshaker.c +45 -24
- data/src/core/lib/security/transport/server_auth_filter.c +71 -20
- data/src/core/lib/slice/b64.c +2 -2
- data/src/core/lib/slice/slice.c +16 -14
- data/src/core/lib/slice/slice_buffer.c +5 -4
- data/src/core/lib/slice/slice_hash_table.c +3 -2
- data/src/core/lib/slice/slice_intern.c +8 -5
- data/src/core/lib/support/block_annotate.h +22 -0
- data/src/core/lib/support/fork.c +62 -0
- data/src/core/lib/support/fork.h +35 -0
- data/src/core/lib/support/log_linux.c +1 -1
- data/src/core/lib/support/string.c +15 -1
- data/src/core/lib/support/string.h +3 -0
- data/src/core/lib/support/thd_internal.h +6 -0
- data/src/core/lib/support/thd_posix.c +56 -0
- data/src/core/lib/support/thd_windows.c +2 -0
- data/src/core/lib/surface/alarm.c +22 -15
- data/src/core/lib/surface/byte_buffer.c +4 -2
- data/src/core/lib/surface/call.c +442 -141
- data/src/core/lib/surface/call.h +6 -6
- data/src/core/lib/surface/call_log_batch.c +1 -1
- data/src/core/lib/surface/call_test_only.h +12 -0
- data/src/core/lib/surface/channel.c +39 -4
- data/src/core/lib/surface/channel_init.c +6 -6
- data/src/core/lib/surface/channel_ping.c +2 -2
- data/src/core/lib/surface/completion_queue.c +56 -57
- data/src/core/lib/surface/init.c +17 -3
- data/src/core/lib/surface/init_secure.c +5 -1
- data/src/core/lib/surface/lame_client.cc +9 -10
- data/src/core/lib/surface/server.c +81 -72
- data/src/core/lib/surface/version.c +2 -2
- data/src/core/lib/transport/byte_stream.c +1 -0
- data/src/core/lib/transport/byte_stream.h +3 -1
- data/src/core/lib/transport/connectivity_state.c +2 -1
- data/src/core/lib/transport/metadata.c +7 -4
- data/src/core/lib/transport/metadata_batch.c +18 -16
- data/src/core/lib/transport/metadata_batch.h +1 -0
- data/src/core/lib/transport/service_config.c +5 -3
- data/src/core/lib/transport/static_metadata.c +395 -614
- data/src/core/lib/transport/static_metadata.h +165 -133
- data/src/core/lib/transport/status_conversion.c +1 -1
- data/src/core/lib/transport/transport.c +20 -20
- data/src/core/lib/transport/transport.h +8 -5
- data/src/core/lib/transport/transport_impl.h +0 -3
- data/src/core/lib/transport/transport_op_string.c +8 -1
- data/src/core/plugin_registry/grpc_plugin_registry.c +4 -4
- data/src/core/tsi/fake_transport_security.c +133 -2
- data/src/core/tsi/fake_transport_security.h +5 -0
- data/src/core/tsi/ssl_transport_security.c +105 -8
- data/src/core/tsi/ssl_transport_security.h +30 -7
- data/src/core/tsi/transport_security.h +8 -2
- data/src/core/tsi/transport_security_grpc.c +20 -13
- data/src/core/tsi/transport_security_grpc.h +13 -9
- data/src/ruby/ext/grpc/rb_call_credentials.c +6 -2
- data/src/ruby/ext/grpc/rb_grpc.c +1 -1
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +30 -20
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +50 -35
- data/src/ruby/lib/grpc.rb +1 -0
- data/src/ruby/lib/grpc/generic/active_call.rb +34 -9
- data/src/ruby/lib/grpc/generic/bidi_call.rb +19 -10
- data/src/ruby/lib/grpc/generic/client_stub.rb +95 -38
- data/src/ruby/lib/grpc/generic/interceptor_registry.rb +53 -0
- data/src/ruby/lib/grpc/generic/interceptors.rb +186 -0
- data/src/ruby/lib/grpc/generic/rpc_desc.rb +66 -20
- data/src/ruby/lib/grpc/generic/rpc_server.rb +15 -3
- data/src/ruby/lib/grpc/google_rpc_status_utils.rb +1 -2
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb +1 -0
- data/src/ruby/spec/channel_connection_spec.rb +1 -34
- data/src/ruby/spec/client_server_spec.rb +188 -82
- data/src/ruby/spec/generic/active_call_spec.rb +65 -11
- data/src/ruby/spec/generic/client_interceptors_spec.rb +153 -0
- data/src/ruby/spec/generic/interceptor_registry_spec.rb +65 -0
- data/src/ruby/spec/generic/rpc_desc_spec.rb +38 -0
- data/src/ruby/spec/generic/rpc_server_spec.rb +1 -34
- data/src/ruby/spec/generic/server_interceptors_spec.rb +218 -0
- data/src/ruby/spec/spec_helper.rb +4 -0
- data/src/ruby/spec/support/helpers.rb +73 -0
- data/src/ruby/spec/support/services.rb +147 -0
- data/third_party/cares/ares_build.h +21 -62
- data/third_party/cares/cares/ares.h +23 -1
- data/third_party/cares/cares/ares__close_sockets.c +2 -2
- data/third_party/cares/cares/ares_create_query.c +3 -3
- data/third_party/cares/cares/ares_expand_name.c +6 -2
- data/third_party/cares/cares/ares_expand_string.c +1 -1
- data/third_party/cares/cares/ares_getnameinfo.c +27 -7
- data/third_party/cares/cares/ares_init.c +407 -39
- data/third_party/cares/cares/ares_library_init.c +10 -0
- data/third_party/cares/cares/ares_library_init.h +2 -1
- data/third_party/cares/cares/ares_nowarn.c +6 -6
- data/third_party/cares/cares/ares_nowarn.h +2 -2
- data/third_party/cares/cares/ares_parse_naptr_reply.c +6 -1
- data/third_party/cares/cares/ares_private.h +11 -0
- data/third_party/cares/cares/ares_process.c +126 -37
- data/third_party/cares/cares/ares_version.h +2 -2
- data/third_party/cares/cares/ares_writev.c +2 -2
- data/third_party/cares/cares/config-win32.h +8 -34
- data/third_party/cares/cares/inet_net_pton.c +2 -2
- data/third_party/cares/cares/setup_once.h +5 -5
- data/third_party/cares/config_darwin/ares_config.h +98 -196
- data/third_party/cares/config_linux/ares_config.h +103 -203
- metadata +47 -20
- data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +0 -1957
- data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +0 -1182
data/src/core/ext/census/mlog.c
CHANGED
@@ -467,7 +467,8 @@ void census_log_initialize(size_t size_in_mb, int discard_old_records) {
|
|
467
467
|
g_log.blocks = (cl_block*)gpr_malloc_aligned(
|
468
468
|
g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
|
469
469
|
memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
|
470
|
-
g_log.buffer =
|
470
|
+
g_log.buffer =
|
471
|
+
(char*)gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
|
471
472
|
memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
|
472
473
|
cl_block_list_initialize(&g_log.free_block_list);
|
473
474
|
cl_block_list_initialize(&g_log.dirty_block_list);
|
@@ -87,7 +87,7 @@ static bool validate_string(pb_istream_t *stream, const pb_field_t *field,
|
|
87
87
|
gpr_log(GPR_INFO, "Zero-length Resource name.");
|
88
88
|
return false;
|
89
89
|
}
|
90
|
-
vresource->name = gpr_malloc(stream->bytes_left + 1);
|
90
|
+
vresource->name = (char *)gpr_malloc(stream->bytes_left + 1);
|
91
91
|
vresource->name[stream->bytes_left] = '\0';
|
92
92
|
if (!pb_read(stream, (uint8_t *)vresource->name, stream->bytes_left)) {
|
93
93
|
return false;
|
@@ -106,7 +106,7 @@ static bool validate_string(pb_istream_t *stream, const pb_field_t *field,
|
|
106
106
|
if (stream->bytes_left == 0) {
|
107
107
|
return true;
|
108
108
|
}
|
109
|
-
vresource->description = gpr_malloc(stream->bytes_left + 1);
|
109
|
+
vresource->description = (char *)gpr_malloc(stream->bytes_left + 1);
|
110
110
|
vresource->description[stream->bytes_left] = '\0';
|
111
111
|
if (!pb_read(stream, (uint8_t *)vresource->description,
|
112
112
|
stream->bytes_left)) {
|
@@ -134,7 +134,8 @@ static bool validate_units_helper(pb_istream_t *stream, int *count,
|
|
134
134
|
// Have to allocate a new array of values. Normal case is 0 or 1, so
|
135
135
|
// this should normally not be an issue.
|
136
136
|
google_census_Resource_BasicUnit *new_bup =
|
137
|
-
|
137
|
+
(google_census_Resource_BasicUnit *)gpr_malloc(
|
138
|
+
(size_t)*count * sizeof(google_census_Resource_BasicUnit));
|
138
139
|
if (*count != 1) {
|
139
140
|
memcpy(new_bup, *bup,
|
140
141
|
(size_t)(*count - 1) * sizeof(google_census_Resource_BasicUnit));
|
@@ -207,7 +208,8 @@ size_t allocate_resource(void) {
|
|
207
208
|
// Expand resources if needed.
|
208
209
|
if (n_resources == n_defined_resources) {
|
209
210
|
size_t new_n_resources = n_resources ? n_resources * 2 : 2;
|
210
|
-
resource **new_resources =
|
211
|
+
resource **new_resources =
|
212
|
+
(resource **)gpr_malloc(new_n_resources * sizeof(resource *));
|
211
213
|
if (n_resources != 0) {
|
212
214
|
memcpy(new_resources, resources, n_resources * sizeof(resource *));
|
213
215
|
}
|
@@ -226,7 +228,7 @@ size_t allocate_resource(void) {
|
|
226
228
|
}
|
227
229
|
}
|
228
230
|
GPR_ASSERT(id < n_resources && resources[id] == NULL);
|
229
|
-
resources[id] = gpr_malloc(sizeof(resource));
|
231
|
+
resources[id] = (resource *)gpr_malloc(sizeof(resource));
|
230
232
|
memset(resources[id], 0, sizeof(resource));
|
231
233
|
n_defined_resources++;
|
232
234
|
next_id = (id + 1) % n_resources;
|
@@ -276,22 +278,24 @@ int32_t define_resource(const resource *base) {
|
|
276
278
|
gpr_mu_lock(&resource_lock);
|
277
279
|
size_t id = allocate_resource();
|
278
280
|
size_t len = strlen(base->name) + 1;
|
279
|
-
resources[id]->name = gpr_malloc(len);
|
281
|
+
resources[id]->name = (char *)gpr_malloc(len);
|
280
282
|
memcpy(resources[id]->name, base->name, len);
|
281
283
|
if (base->description) {
|
282
284
|
len = strlen(base->description) + 1;
|
283
|
-
resources[id]->description = gpr_malloc(len);
|
285
|
+
resources[id]->description = (char *)gpr_malloc(len);
|
284
286
|
memcpy(resources[id]->description, base->description, len);
|
285
287
|
}
|
286
288
|
resources[id]->prefix = base->prefix;
|
287
289
|
resources[id]->n_numerators = base->n_numerators;
|
288
290
|
len = (size_t)base->n_numerators * sizeof(*base->numerators);
|
289
|
-
resources[id]->numerators =
|
291
|
+
resources[id]->numerators =
|
292
|
+
(google_census_Resource_BasicUnit *)gpr_malloc(len);
|
290
293
|
memcpy(resources[id]->numerators, base->numerators, len);
|
291
294
|
resources[id]->n_denominators = base->n_denominators;
|
292
295
|
if (base->n_denominators != 0) {
|
293
296
|
len = (size_t)base->n_denominators * sizeof(*base->denominators);
|
294
|
-
resources[id]->denominators =
|
297
|
+
resources[id]->denominators =
|
298
|
+
(google_census_Resource_BasicUnit *)gpr_malloc(len);
|
295
299
|
memcpy(resources[id]->denominators, base->denominators, len);
|
296
300
|
}
|
297
301
|
gpr_mu_unlock(&resource_lock);
|
@@ -86,20 +86,20 @@ static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
|
|
86
86
|
|
87
87
|
static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
|
88
88
|
grpc_cq_completion *ignored) {
|
89
|
-
|
90
|
-
state_watcher *w = pw;
|
89
|
+
bool should_delete = false;
|
90
|
+
state_watcher *w = (state_watcher *)pw;
|
91
91
|
gpr_mu_lock(&w->mu);
|
92
92
|
switch (w->phase) {
|
93
93
|
case WAITING:
|
94
94
|
case READY_TO_CALL_BACK:
|
95
95
|
GPR_UNREACHABLE_CODE(return );
|
96
96
|
case CALLING_BACK_AND_FINISHED:
|
97
|
-
|
97
|
+
should_delete = true;
|
98
98
|
break;
|
99
99
|
}
|
100
100
|
gpr_mu_unlock(&w->mu);
|
101
101
|
|
102
|
-
if (
|
102
|
+
if (should_delete) {
|
103
103
|
delete_state_watcher(exec_ctx, w);
|
104
104
|
}
|
105
105
|
}
|
@@ -161,12 +161,12 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
|
|
161
161
|
|
162
162
|
static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw,
|
163
163
|
grpc_error *error) {
|
164
|
-
partly_done(exec_ctx, pw, true, GRPC_ERROR_REF(error));
|
164
|
+
partly_done(exec_ctx, (state_watcher *)pw, true, GRPC_ERROR_REF(error));
|
165
165
|
}
|
166
166
|
|
167
167
|
static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw,
|
168
168
|
grpc_error *error) {
|
169
|
-
partly_done(exec_ctx, pw, false, GRPC_ERROR_REF(error));
|
169
|
+
partly_done(exec_ctx, (state_watcher *)pw, false, GRPC_ERROR_REF(error));
|
170
170
|
}
|
171
171
|
|
172
172
|
int grpc_channel_num_external_connectivity_watchers(grpc_channel *channel) {
|
@@ -191,13 +191,19 @@ static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
|
|
191
191
|
gpr_free(wa);
|
192
192
|
}
|
193
193
|
|
194
|
+
int grpc_channel_support_connectivity_watcher(grpc_channel *channel) {
|
195
|
+
grpc_channel_element *client_channel_elem =
|
196
|
+
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
|
197
|
+
return client_channel_elem->filter != &grpc_client_channel_filter ? 0 : 1;
|
198
|
+
}
|
199
|
+
|
194
200
|
void grpc_channel_watch_connectivity_state(
|
195
201
|
grpc_channel *channel, grpc_connectivity_state last_observed_state,
|
196
202
|
gpr_timespec deadline, grpc_completion_queue *cq, void *tag) {
|
197
203
|
grpc_channel_element *client_channel_elem =
|
198
204
|
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
|
199
205
|
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
|
200
|
-
state_watcher *w = gpr_malloc(sizeof(*w));
|
206
|
+
state_watcher *w = (state_watcher *)gpr_malloc(sizeof(*w));
|
201
207
|
|
202
208
|
GRPC_API_TRACE(
|
203
209
|
"grpc_channel_watch_connectivity_state("
|
@@ -222,7 +228,8 @@ void grpc_channel_watch_connectivity_state(
|
|
222
228
|
w->channel = channel;
|
223
229
|
w->error = NULL;
|
224
230
|
|
225
|
-
watcher_timer_init_arg *wa =
|
231
|
+
watcher_timer_init_arg *wa =
|
232
|
+
(watcher_timer_init_arg *)gpr_malloc(sizeof(watcher_timer_init_arg));
|
226
233
|
wa->w = w;
|
227
234
|
wa->deadline = deadline;
|
228
235
|
GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,
|
@@ -85,7 +85,7 @@ static void method_parameters_unref(method_parameters *method_params) {
|
|
85
85
|
}
|
86
86
|
|
87
87
|
static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *value) {
|
88
|
-
method_parameters_unref(value);
|
88
|
+
method_parameters_unref((method_parameters *)value);
|
89
89
|
}
|
90
90
|
|
91
91
|
static bool parse_wait_for_ready(grpc_json *field,
|
@@ -148,7 +148,8 @@ static void *method_parameters_create_from_json(const grpc_json *json) {
|
|
148
148
|
if (!parse_timeout(field, &timeout)) return NULL;
|
149
149
|
}
|
150
150
|
}
|
151
|
-
method_parameters *value =
|
151
|
+
method_parameters *value =
|
152
|
+
(method_parameters *)gpr_malloc(sizeof(method_parameters));
|
152
153
|
gpr_ref_init(&value->refs, 1);
|
153
154
|
value->timeout = timeout;
|
154
155
|
value->wait_for_ready = wait_for_ready;
|
@@ -254,7 +255,7 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
|
|
254
255
|
|
255
256
|
static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
|
256
257
|
void *arg, grpc_error *error) {
|
257
|
-
lb_policy_connectivity_watcher *w = arg;
|
258
|
+
lb_policy_connectivity_watcher *w = (lb_policy_connectivity_watcher *)arg;
|
258
259
|
grpc_connectivity_state publish_state = w->state;
|
259
260
|
/* check if the notification is for the latest policy */
|
260
261
|
if (w->lb_policy == w->chand->lb_policy) {
|
@@ -281,7 +282,8 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
|
|
281
282
|
static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
|
282
283
|
grpc_lb_policy *lb_policy,
|
283
284
|
grpc_connectivity_state current_state) {
|
284
|
-
lb_policy_connectivity_watcher *w =
|
285
|
+
lb_policy_connectivity_watcher *w =
|
286
|
+
(lb_policy_connectivity_watcher *)gpr_malloc(sizeof(*w));
|
285
287
|
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
|
286
288
|
w->chand = chand;
|
287
289
|
GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
|
@@ -310,7 +312,8 @@ typedef struct {
|
|
310
312
|
} service_config_parsing_state;
|
311
313
|
|
312
314
|
static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
|
313
|
-
service_config_parsing_state *parsing_state =
|
315
|
+
service_config_parsing_state *parsing_state =
|
316
|
+
(service_config_parsing_state *)arg;
|
314
317
|
if (strcmp(field->key, "retryThrottling") == 0) {
|
315
318
|
if (parsing_state->retry_throttle_data != NULL) return; // Duplicate.
|
316
319
|
if (field->type != GRPC_JSON_OBJECT) return;
|
@@ -365,14 +368,14 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
|
|
365
368
|
|
366
369
|
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
|
367
370
|
void *arg, grpc_error *error) {
|
368
|
-
channel_data *chand = arg;
|
371
|
+
channel_data *chand = (channel_data *)arg;
|
369
372
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
370
373
|
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
|
371
374
|
grpc_error_string(error));
|
372
375
|
}
|
373
376
|
// Extract the following fields from the resolver result, if non-NULL.
|
374
377
|
bool lb_policy_updated = false;
|
375
|
-
char *
|
378
|
+
char *lb_policy_name_dup = NULL;
|
376
379
|
bool lb_policy_name_changed = false;
|
377
380
|
grpc_lb_policy *new_lb_policy = NULL;
|
378
381
|
char *service_config_json = NULL;
|
@@ -380,6 +383,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
|
|
380
383
|
grpc_slice_hash_table *method_params_table = NULL;
|
381
384
|
if (chand->resolver_result != NULL) {
|
382
385
|
// Find LB policy name.
|
386
|
+
const char *lb_policy_name = NULL;
|
383
387
|
const grpc_arg *channel_arg =
|
384
388
|
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
|
385
389
|
if (channel_arg != NULL) {
|
@@ -391,7 +395,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
|
|
391
395
|
channel_arg =
|
392
396
|
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
|
393
397
|
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_POINTER) {
|
394
|
-
grpc_lb_addresses *addresses =
|
398
|
+
grpc_lb_addresses *addresses =
|
399
|
+
(grpc_lb_addresses *)channel_arg->value.pointer.p;
|
395
400
|
bool found_balancer_address = false;
|
396
401
|
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
397
402
|
if (addresses->addresses[i].is_balancer) {
|
@@ -469,7 +474,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
|
|
469
474
|
// Before we clean up, save a copy of lb_policy_name, since it might
|
470
475
|
// be pointing to data inside chand->resolver_result.
|
471
476
|
// The copy will be saved in chand->lb_policy_name below.
|
472
|
-
|
477
|
+
lb_policy_name_dup = gpr_strdup(lb_policy_name);
|
473
478
|
grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
|
474
479
|
chand->resolver_result = NULL;
|
475
480
|
}
|
@@ -477,8 +482,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
|
|
477
482
|
gpr_log(GPR_DEBUG,
|
478
483
|
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
|
479
484
|
"service_config=\"%s\"",
|
480
|
-
chand,
|
481
|
-
service_config_json);
|
485
|
+
chand, lb_policy_name_dup,
|
486
|
+
lb_policy_name_changed ? " (changed)" : "", service_config_json);
|
482
487
|
}
|
483
488
|
// Now swap out fields in chand. Note that the new values may still
|
484
489
|
// be NULL if (e.g.) the resolver failed to return results or the
|
@@ -486,9 +491,9 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
|
|
486
491
|
//
|
487
492
|
// First, swap out the data used by cc_get_channel_info().
|
488
493
|
gpr_mu_lock(&chand->info_mu);
|
489
|
-
if (
|
494
|
+
if (lb_policy_name_dup != NULL) {
|
490
495
|
gpr_free(chand->info_lb_policy_name);
|
491
|
-
chand->info_lb_policy_name =
|
496
|
+
chand->info_lb_policy_name = lb_policy_name_dup;
|
492
497
|
}
|
493
498
|
if (service_config_json != NULL) {
|
494
499
|
gpr_free(chand->info_service_config_json);
|
@@ -586,9 +591,10 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
|
|
586
591
|
|
587
592
|
static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
588
593
|
grpc_error *error_ignored) {
|
589
|
-
grpc_transport_op *op = arg;
|
590
|
-
grpc_channel_element *elem =
|
591
|
-
|
594
|
+
grpc_transport_op *op = (grpc_transport_op *)arg;
|
595
|
+
grpc_channel_element *elem =
|
596
|
+
(grpc_channel_element *)op->handler_private.extra_arg;
|
597
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
592
598
|
|
593
599
|
if (op->on_connectivity_state_change != NULL) {
|
594
600
|
grpc_connectivity_state_notify_on_state_change(
|
@@ -642,7 +648,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
642
648
|
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
|
643
649
|
grpc_channel_element *elem,
|
644
650
|
grpc_transport_op *op) {
|
645
|
-
channel_data *chand = elem->channel_data;
|
651
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
646
652
|
|
647
653
|
GPR_ASSERT(op->set_accept_stream == false);
|
648
654
|
if (op->bind_pollset != NULL) {
|
@@ -662,7 +668,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
|
|
662
668
|
static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
|
663
669
|
grpc_channel_element *elem,
|
664
670
|
const grpc_channel_info *info) {
|
665
|
-
channel_data *chand = elem->channel_data;
|
671
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
666
672
|
gpr_mu_lock(&chand->info_mu);
|
667
673
|
if (info->lb_policy_name != NULL) {
|
668
674
|
*info->lb_policy_name = chand->info_lb_policy_name == NULL
|
@@ -682,7 +688,7 @@ static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
|
|
682
688
|
static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
|
683
689
|
grpc_channel_element *elem,
|
684
690
|
grpc_channel_element_args *args) {
|
685
|
-
channel_data *chand = elem->channel_data;
|
691
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
686
692
|
GPR_ASSERT(args->is_last);
|
687
693
|
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
|
688
694
|
// Initialize data members.
|
@@ -712,8 +718,10 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
|
|
712
718
|
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
713
719
|
"client channel factory arg must be a pointer");
|
714
720
|
}
|
715
|
-
grpc_client_channel_factory_ref(
|
716
|
-
|
721
|
+
grpc_client_channel_factory_ref(
|
722
|
+
(grpc_client_channel_factory *)arg->value.pointer.p);
|
723
|
+
chand->client_channel_factory =
|
724
|
+
(grpc_client_channel_factory *)arg->value.pointer.p;
|
717
725
|
// Get server name to resolve, using proxy mapper if needed.
|
718
726
|
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
|
719
727
|
if (arg == NULL) {
|
@@ -745,7 +753,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
|
|
745
753
|
|
746
754
|
static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
747
755
|
grpc_error *error) {
|
748
|
-
grpc_resolver *resolver = arg;
|
756
|
+
grpc_resolver *resolver = (grpc_resolver *)arg;
|
749
757
|
grpc_resolver_shutdown_locked(exec_ctx, resolver);
|
750
758
|
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
|
751
759
|
}
|
@@ -753,7 +761,7 @@ static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
753
761
|
/* Destructor for channel_data */
|
754
762
|
static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
|
755
763
|
grpc_channel_element *elem) {
|
756
|
-
channel_data *chand = elem->channel_data;
|
764
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
757
765
|
if (chand->resolver != NULL) {
|
758
766
|
GRPC_CLOSURE_SCHED(
|
759
767
|
exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
|
@@ -796,7 +804,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
|
|
796
804
|
// send_message
|
797
805
|
// recv_trailing_metadata
|
798
806
|
// send_trailing_metadata
|
799
|
-
|
807
|
+
// We also add room for a single cancel_stream batch.
|
808
|
+
#define MAX_WAITING_BATCHES 7
|
800
809
|
|
801
810
|
/** Call data. Holds a pointer to grpc_subchannel_call and the
|
802
811
|
associated machinery to create such a pointer.
|
@@ -807,24 +816,27 @@ typedef struct client_channel_call_data {
|
|
807
816
|
// State for handling deadlines.
|
808
817
|
// The code in deadline_filter.c requires this to be the first field.
|
809
818
|
// TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
|
810
|
-
// and this struct both independently store
|
811
|
-
//
|
812
|
-
//
|
819
|
+
// and this struct both independently store pointers to the call stack
|
820
|
+
// and call combiner. If/when we have time, find a way to avoid this
|
821
|
+
// without breaking the grpc_deadline_state abstraction.
|
813
822
|
grpc_deadline_state deadline_state;
|
814
823
|
|
815
824
|
grpc_slice path; // Request path.
|
816
825
|
gpr_timespec call_start_time;
|
817
826
|
gpr_timespec deadline;
|
827
|
+
gpr_arena *arena;
|
828
|
+
grpc_call_stack *owning_call;
|
829
|
+
grpc_call_combiner *call_combiner;
|
830
|
+
|
818
831
|
grpc_server_retry_throttle_data *retry_throttle_data;
|
819
832
|
method_parameters *method_params;
|
820
833
|
|
821
|
-
|
822
|
-
|
823
|
-
gpr_atm subchannel_call_or_error;
|
824
|
-
gpr_arena *arena;
|
834
|
+
grpc_subchannel_call *subchannel_call;
|
835
|
+
grpc_error *error;
|
825
836
|
|
826
837
|
grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
|
827
838
|
grpc_closure lb_pick_closure;
|
839
|
+
grpc_closure lb_pick_cancel_closure;
|
828
840
|
|
829
841
|
grpc_connected_subchannel *connected_subchannel;
|
830
842
|
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
|
@@ -832,10 +844,9 @@ typedef struct client_channel_call_data {
|
|
832
844
|
|
833
845
|
grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES];
|
834
846
|
size_t waiting_for_pick_batches_count;
|
847
|
+
grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
|
835
848
|
|
836
|
-
|
837
|
-
|
838
|
-
grpc_call_stack *owning_call;
|
849
|
+
grpc_transport_stream_op_batch *initial_metadata_batch;
|
839
850
|
|
840
851
|
grpc_linked_mdelem lb_token_mdelem;
|
841
852
|
|
@@ -843,56 +854,43 @@ typedef struct client_channel_call_data {
|
|
843
854
|
grpc_closure *original_on_complete;
|
844
855
|
} call_data;
|
845
856
|
|
846
|
-
|
847
|
-
|
848
|
-
|
849
|
-
|
850
|
-
|
851
|
-
static call_or_error get_call_or_error(call_data *p) {
|
852
|
-
gpr_atm c = gpr_atm_acq_load(&p->subchannel_call_or_error);
|
853
|
-
if (c == 0)
|
854
|
-
return (call_or_error){NULL, NULL};
|
855
|
-
else if (c & 1)
|
856
|
-
return (call_or_error){NULL, (grpc_error *)((c) & ~(gpr_atm)1)};
|
857
|
-
else
|
858
|
-
return (call_or_error){(grpc_subchannel_call *)c, NULL};
|
857
|
+
grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
|
858
|
+
grpc_call_element *elem) {
|
859
|
+
call_data *calld = (call_data *)elem->call_data;
|
860
|
+
return calld->subchannel_call;
|
859
861
|
}
|
860
862
|
|
861
|
-
|
862
|
-
|
863
|
-
|
864
|
-
if (
|
865
|
-
|
866
|
-
|
867
|
-
}
|
868
|
-
GPR_ASSERT(existing.subchannel_call == NULL);
|
869
|
-
if (coe.error != GRPC_ERROR_NONE) {
|
870
|
-
GPR_ASSERT(coe.subchannel_call == NULL);
|
871
|
-
gpr_atm_rel_store(&p->subchannel_call_or_error, 1 | (gpr_atm)coe.error);
|
863
|
+
// This is called via the call combiner, so access to calld is synchronized.
|
864
|
+
static void waiting_for_pick_batches_add(
|
865
|
+
call_data *calld, grpc_transport_stream_op_batch *batch) {
|
866
|
+
if (batch->send_initial_metadata) {
|
867
|
+
GPR_ASSERT(calld->initial_metadata_batch == NULL);
|
868
|
+
calld->initial_metadata_batch = batch;
|
872
869
|
} else {
|
873
|
-
GPR_ASSERT(
|
874
|
-
|
875
|
-
|
870
|
+
GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
|
871
|
+
calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
|
872
|
+
batch;
|
876
873
|
}
|
877
|
-
return true;
|
878
874
|
}
|
879
875
|
|
880
|
-
|
881
|
-
|
882
|
-
|
883
|
-
|
884
|
-
|
885
|
-
|
886
|
-
|
887
|
-
|
888
|
-
|
889
|
-
|
876
|
+
// This is called via the call combiner, so access to calld is synchronized.
|
877
|
+
static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
|
878
|
+
void *arg, grpc_error *error) {
|
879
|
+
call_data *calld = (call_data *)arg;
|
880
|
+
if (calld->waiting_for_pick_batches_count > 0) {
|
881
|
+
--calld->waiting_for_pick_batches_count;
|
882
|
+
grpc_transport_stream_op_batch_finish_with_failure(
|
883
|
+
exec_ctx,
|
884
|
+
calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
|
885
|
+
GRPC_ERROR_REF(error), calld->call_combiner);
|
886
|
+
}
|
890
887
|
}
|
891
888
|
|
892
|
-
|
893
|
-
|
894
|
-
|
895
|
-
|
889
|
+
// This is called via the call combiner, so access to calld is synchronized.
|
890
|
+
static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
|
891
|
+
grpc_call_element *elem,
|
892
|
+
grpc_error *error) {
|
893
|
+
call_data *calld = (call_data *)elem->call_data;
|
896
894
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
897
895
|
gpr_log(GPR_DEBUG,
|
898
896
|
"chand=%p calld=%p: failing %" PRIdPTR " pending batches: %s",
|
@@ -900,42 +898,68 @@ static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
|
|
900
898
|
grpc_error_string(error));
|
901
899
|
}
|
902
900
|
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
|
901
|
+
GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
|
902
|
+
fail_pending_batch_in_call_combiner, calld,
|
903
|
+
grpc_schedule_on_exec_ctx);
|
904
|
+
GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
|
905
|
+
&calld->handle_pending_batch_in_call_combiner[i],
|
906
|
+
GRPC_ERROR_REF(error),
|
907
|
+
"waiting_for_pick_batches_fail");
|
908
|
+
}
|
909
|
+
if (calld->initial_metadata_batch != NULL) {
|
903
910
|
grpc_transport_stream_op_batch_finish_with_failure(
|
904
|
-
exec_ctx, calld->
|
911
|
+
exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error),
|
912
|
+
calld->call_combiner);
|
913
|
+
} else {
|
914
|
+
GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
|
915
|
+
"waiting_for_pick_batches_fail");
|
905
916
|
}
|
906
|
-
calld->waiting_for_pick_batches_count = 0;
|
907
917
|
GRPC_ERROR_UNREF(error);
|
908
918
|
}
|
909
919
|
|
910
|
-
|
911
|
-
|
912
|
-
|
913
|
-
|
914
|
-
|
915
|
-
|
916
|
-
|
917
|
-
|
918
|
-
|
920
|
+
// This is called via the call combiner, so access to calld is synchronized.
|
921
|
+
static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
|
922
|
+
void *arg, grpc_error *ignored) {
|
923
|
+
call_data *calld = (call_data *)arg;
|
924
|
+
if (calld->waiting_for_pick_batches_count > 0) {
|
925
|
+
--calld->waiting_for_pick_batches_count;
|
926
|
+
grpc_subchannel_call_process_op(
|
927
|
+
exec_ctx, calld->subchannel_call,
|
928
|
+
calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
|
919
929
|
}
|
930
|
+
}
|
931
|
+
|
932
|
+
// This is called via the call combiner, so access to calld is synchronized.
|
933
|
+
static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
|
934
|
+
grpc_call_element *elem) {
|
935
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
936
|
+
call_data *calld = (call_data *)elem->call_data;
|
920
937
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
921
938
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR
|
922
939
|
" pending batches to subchannel_call=%p",
|
923
|
-
|
924
|
-
|
940
|
+
chand, calld, calld->waiting_for_pick_batches_count,
|
941
|
+
calld->subchannel_call);
|
925
942
|
}
|
926
943
|
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
944
|
+
GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
|
945
|
+
run_pending_batch_in_call_combiner, calld,
|
946
|
+
grpc_schedule_on_exec_ctx);
|
947
|
+
GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
|
948
|
+
&calld->handle_pending_batch_in_call_combiner[i],
|
949
|
+
GRPC_ERROR_NONE,
|
950
|
+
"waiting_for_pick_batches_resume");
|
951
|
+
}
|
952
|
+
GPR_ASSERT(calld->initial_metadata_batch != NULL);
|
953
|
+
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
|
954
|
+
calld->initial_metadata_batch);
|
931
955
|
}
|
932
956
|
|
933
957
|
// Applies service config to the call. Must be invoked once we know
|
934
958
|
// that the resolver has returned results to the channel.
|
935
959
|
static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
|
936
960
|
grpc_call_element *elem) {
|
937
|
-
channel_data *chand = elem->channel_data;
|
938
|
-
call_data *calld = elem->call_data;
|
961
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
962
|
+
call_data *calld = (call_data *)elem->call_data;
|
939
963
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
940
964
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
|
941
965
|
chand, calld);
|
@@ -945,7 +969,7 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
|
|
945
969
|
grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
|
946
970
|
}
|
947
971
|
if (chand->method_params_table != NULL) {
|
948
|
-
calld->method_params = grpc_method_config_table_get(
|
972
|
+
calld->method_params = (method_parameters *)grpc_method_config_table_get(
|
949
973
|
exec_ctx, chand->method_params_table, calld->path);
|
950
974
|
if (calld->method_params != NULL) {
|
951
975
|
method_parameters_ref(calld->method_params);
|
@@ -968,194 +992,97 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
|
|
968
992
|
static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
|
969
993
|
grpc_call_element *elem,
|
970
994
|
grpc_error *error) {
|
971
|
-
|
972
|
-
|
995
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
996
|
+
call_data *calld = (call_data *)elem->call_data;
|
973
997
|
const grpc_connected_subchannel_call_args call_args = {
|
974
998
|
.pollent = calld->pollent,
|
975
999
|
.path = calld->path,
|
976
1000
|
.start_time = calld->call_start_time,
|
977
1001
|
.deadline = calld->deadline,
|
978
1002
|
.arena = calld->arena,
|
979
|
-
.context = calld->subchannel_call_context
|
1003
|
+
.context = calld->subchannel_call_context,
|
1004
|
+
.call_combiner = calld->call_combiner};
|
980
1005
|
grpc_error *new_error = grpc_connected_subchannel_create_call(
|
981
|
-
exec_ctx, calld->connected_subchannel, &call_args,
|
1006
|
+
exec_ctx, calld->connected_subchannel, &call_args,
|
1007
|
+
&calld->subchannel_call);
|
982
1008
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
983
1009
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
|
984
|
-
|
985
|
-
grpc_error_string(new_error));
|
1010
|
+
chand, calld, calld->subchannel_call, grpc_error_string(new_error));
|
986
1011
|
}
|
987
|
-
GPR_ASSERT(set_call_or_error(
|
988
|
-
calld, (call_or_error){.subchannel_call = subchannel_call}));
|
989
1012
|
if (new_error != GRPC_ERROR_NONE) {
|
990
1013
|
new_error = grpc_error_add_child(new_error, error);
|
991
|
-
|
1014
|
+
waiting_for_pick_batches_fail(exec_ctx, elem, new_error);
|
992
1015
|
} else {
|
993
|
-
|
1016
|
+
waiting_for_pick_batches_resume(exec_ctx, elem);
|
994
1017
|
}
|
995
1018
|
GRPC_ERROR_UNREF(error);
|
996
1019
|
}
|
997
1020
|
|
998
|
-
|
999
|
-
|
1000
|
-
|
1001
|
-
call_data *calld = elem->call_data;
|
1002
|
-
channel_data *chand = elem->channel_data;
|
1003
|
-
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
|
1004
|
-
chand->interested_parties);
|
1005
|
-
call_or_error coe = get_call_or_error(calld);
|
1021
|
+
// Invoked when a pick is completed, on both success or failure.
|
1022
|
+
static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
|
1023
|
+
grpc_error *error) {
|
1024
|
+
call_data *calld = (call_data *)elem->call_data;
|
1025
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1006
1026
|
if (calld->connected_subchannel == NULL) {
|
1007
1027
|
// Failed to create subchannel.
|
1008
|
-
|
1009
|
-
|
1010
|
-
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
1028
|
+
GRPC_ERROR_UNREF(calld->error);
|
1029
|
+
calld->error = error == GRPC_ERROR_NONE
|
1030
|
+
? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
1031
|
+
"Call dropped by load balancing policy")
|
1032
|
+
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
1033
|
+
"Failed to create subchannel", &error, 1);
|
1014
1034
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1015
1035
|
gpr_log(GPR_DEBUG,
|
1016
1036
|
"chand=%p calld=%p: failed to create subchannel: error=%s", chand,
|
1017
|
-
calld, grpc_error_string(
|
1018
|
-
}
|
1019
|
-
set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)});
|
1020
|
-
waiting_for_pick_batches_fail_locked(exec_ctx, elem, failure);
|
1021
|
-
} else if (coe.error != GRPC_ERROR_NONE) {
|
1022
|
-
/* already cancelled before subchannel became ready */
|
1023
|
-
grpc_error *child_errors[] = {error, coe.error};
|
1024
|
-
grpc_error *cancellation_error =
|
1025
|
-
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
1026
|
-
"Cancelled before creating subchannel", child_errors,
|
1027
|
-
GPR_ARRAY_SIZE(child_errors));
|
1028
|
-
/* if due to deadline, attach the deadline exceeded status to the error */
|
1029
|
-
if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) {
|
1030
|
-
cancellation_error =
|
1031
|
-
grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS,
|
1032
|
-
GRPC_STATUS_DEADLINE_EXCEEDED);
|
1037
|
+
calld, grpc_error_string(calld->error));
|
1033
1038
|
}
|
1034
|
-
|
1035
|
-
gpr_log(GPR_DEBUG,
|
1036
|
-
"chand=%p calld=%p: cancelled before subchannel became ready: %s",
|
1037
|
-
chand, calld, grpc_error_string(cancellation_error));
|
1038
|
-
}
|
1039
|
-
waiting_for_pick_batches_fail_locked(exec_ctx, elem, cancellation_error);
|
1039
|
+
waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error));
|
1040
1040
|
} else {
|
1041
1041
|
/* Create call on subchannel. */
|
1042
1042
|
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
|
1043
1043
|
}
|
1044
|
-
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
|
1045
1044
|
GRPC_ERROR_UNREF(error);
|
1046
1045
|
}
|
1047
1046
|
|
1048
|
-
|
1049
|
-
|
1050
|
-
|
1051
|
-
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1056
|
-
|
1047
|
+
// A wrapper around pick_done_locked() that is used in cases where
|
1048
|
+
// either (a) the pick was deferred pending a resolver result or (b) the
|
1049
|
+
// pick was done asynchronously. Removes the call's polling entity from
|
1050
|
+
// chand->interested_parties before invoking pick_done_locked().
|
1051
|
+
static void async_pick_done_locked(grpc_exec_ctx *exec_ctx,
|
1052
|
+
grpc_call_element *elem, grpc_error *error) {
|
1053
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1054
|
+
call_data *calld = (call_data *)elem->call_data;
|
1055
|
+
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
|
1056
|
+
chand->interested_parties);
|
1057
|
+
pick_done_locked(exec_ctx, elem, error);
|
1057
1058
|
}
|
1058
1059
|
|
1059
|
-
|
1060
|
-
|
1061
|
-
|
1062
|
-
|
1063
|
-
|
1064
|
-
|
1065
|
-
|
1066
|
-
|
1067
|
-
grpc_call_element *elem;
|
1068
|
-
bool cancelled;
|
1069
|
-
grpc_closure closure;
|
1070
|
-
} pick_after_resolver_result_args;
|
1071
|
-
|
1072
|
-
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
|
1073
|
-
void *arg,
|
1074
|
-
grpc_error *error) {
|
1075
|
-
pick_after_resolver_result_args *args = arg;
|
1076
|
-
if (args->cancelled) {
|
1077
|
-
/* cancelled, do nothing */
|
1060
|
+
// Note: This runs under the client_channel combiner, but will NOT be
|
1061
|
+
// holding the call combiner.
|
1062
|
+
static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1063
|
+
grpc_error *error) {
|
1064
|
+
grpc_call_element *elem = (grpc_call_element *)arg;
|
1065
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1066
|
+
call_data *calld = (call_data *)elem->call_data;
|
1067
|
+
if (calld->lb_policy != NULL) {
|
1078
1068
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1079
|
-
gpr_log(GPR_DEBUG, "
|
1080
|
-
|
1081
|
-
} else {
|
1082
|
-
channel_data *chand = args->elem->channel_data;
|
1083
|
-
call_data *calld = args->elem->call_data;
|
1084
|
-
if (error != GRPC_ERROR_NONE) {
|
1085
|
-
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1086
|
-
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
|
1087
|
-
chand, calld);
|
1088
|
-
}
|
1089
|
-
subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
|
1090
|
-
} else {
|
1091
|
-
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1092
|
-
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
|
1093
|
-
chand, calld);
|
1094
|
-
}
|
1095
|
-
if (pick_subchannel_locked(exec_ctx, args->elem)) {
|
1096
|
-
subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE);
|
1097
|
-
}
|
1069
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
|
1070
|
+
chand, calld, calld->lb_policy);
|
1098
1071
|
}
|
1072
|
+
grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
|
1073
|
+
&calld->connected_subchannel,
|
1074
|
+
GRPC_ERROR_REF(error));
|
1099
1075
|
}
|
1100
|
-
|
1101
|
-
}
|
1102
|
-
|
1103
|
-
static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
|
1104
|
-
grpc_call_element *elem) {
|
1105
|
-
channel_data *chand = elem->channel_data;
|
1106
|
-
call_data *calld = elem->call_data;
|
1107
|
-
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1108
|
-
gpr_log(GPR_DEBUG,
|
1109
|
-
"chand=%p calld=%p: deferring pick pending resolver result", chand,
|
1110
|
-
calld);
|
1111
|
-
}
|
1112
|
-
pick_after_resolver_result_args *args =
|
1113
|
-
(pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
|
1114
|
-
args->elem = elem;
|
1115
|
-
GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
|
1116
|
-
args, grpc_combiner_scheduler(chand->combiner));
|
1117
|
-
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
|
1118
|
-
&args->closure, GRPC_ERROR_NONE);
|
1119
|
-
}
|
1120
|
-
|
1121
|
-
static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
|
1122
|
-
grpc_call_element *elem,
|
1123
|
-
grpc_error *error) {
|
1124
|
-
channel_data *chand = elem->channel_data;
|
1125
|
-
call_data *calld = elem->call_data;
|
1126
|
-
// If we don't yet have a resolver result, then a closure for
|
1127
|
-
// pick_after_resolver_result_done_locked() will have been added to
|
1128
|
-
// chand->waiting_for_resolver_result_closures, and it may not be invoked
|
1129
|
-
// until after this call has been destroyed. We mark the operation as
|
1130
|
-
// cancelled, so that when pick_after_resolver_result_done_locked()
|
1131
|
-
// is called, it will be a no-op. We also immediately invoke
|
1132
|
-
// subchannel_ready_locked() to propagate the error back to the caller.
|
1133
|
-
for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head;
|
1134
|
-
closure != NULL; closure = closure->next_data.next) {
|
1135
|
-
pick_after_resolver_result_args *args = closure->cb_arg;
|
1136
|
-
if (!args->cancelled && args->elem == elem) {
|
1137
|
-
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1138
|
-
gpr_log(GPR_DEBUG,
|
1139
|
-
"chand=%p calld=%p: "
|
1140
|
-
"cancelling pick waiting for resolver result",
|
1141
|
-
chand, calld);
|
1142
|
-
}
|
1143
|
-
args->cancelled = true;
|
1144
|
-
subchannel_ready_locked(exec_ctx, elem,
|
1145
|
-
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
1146
|
-
"Pick cancelled", &error, 1));
|
1147
|
-
}
|
1148
|
-
}
|
1149
|
-
GRPC_ERROR_UNREF(error);
|
1076
|
+
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_callback_cancel");
|
1150
1077
|
}
|
1151
1078
|
|
1152
1079
|
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
|
1153
|
-
// Unrefs the LB policy
|
1080
|
+
// Unrefs the LB policy and invokes async_pick_done_locked().
|
1154
1081
|
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1155
1082
|
grpc_error *error) {
|
1156
|
-
grpc_call_element *elem = arg;
|
1157
|
-
channel_data *chand = elem->channel_data;
|
1158
|
-
call_data *calld = elem->call_data;
|
1083
|
+
grpc_call_element *elem = (grpc_call_element *)arg;
|
1084
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1085
|
+
call_data *calld = (call_data *)elem->call_data;
|
1159
1086
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1160
1087
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
|
1161
1088
|
chand, calld);
|
@@ -1163,28 +1090,51 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1163
1090
|
GPR_ASSERT(calld->lb_policy != NULL);
|
1164
1091
|
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
|
1165
1092
|
calld->lb_policy = NULL;
|
1166
|
-
|
1093
|
+
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
|
1167
1094
|
}
|
1168
1095
|
|
1169
1096
|
// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
|
1170
1097
|
// If the pick was completed synchronously, unrefs the LB policy and
|
1171
1098
|
// returns true.
|
1172
1099
|
static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
|
1173
|
-
grpc_call_element *elem
|
1174
|
-
|
1175
|
-
|
1176
|
-
call_data *calld = elem->call_data;
|
1100
|
+
grpc_call_element *elem) {
|
1101
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1102
|
+
call_data *calld = (call_data *)elem->call_data;
|
1177
1103
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1178
1104
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
|
1179
1105
|
chand, calld, chand->lb_policy);
|
1180
1106
|
}
|
1107
|
+
apply_service_config_to_call_locked(exec_ctx, elem);
|
1108
|
+
// If the application explicitly set wait_for_ready, use that.
|
1109
|
+
// Otherwise, if the service config specified a value for this
|
1110
|
+
// method, use that.
|
1111
|
+
uint32_t initial_metadata_flags =
|
1112
|
+
calld->initial_metadata_batch->payload->send_initial_metadata
|
1113
|
+
.send_initial_metadata_flags;
|
1114
|
+
const bool wait_for_ready_set_from_api =
|
1115
|
+
initial_metadata_flags &
|
1116
|
+
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
|
1117
|
+
const bool wait_for_ready_set_from_service_config =
|
1118
|
+
calld->method_params != NULL &&
|
1119
|
+
calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
|
1120
|
+
if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
|
1121
|
+
if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
|
1122
|
+
initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
1123
|
+
} else {
|
1124
|
+
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
1125
|
+
}
|
1126
|
+
}
|
1127
|
+
const grpc_lb_policy_pick_args inputs = {
|
1128
|
+
calld->initial_metadata_batch->payload->send_initial_metadata
|
1129
|
+
.send_initial_metadata,
|
1130
|
+
initial_metadata_flags, &calld->lb_token_mdelem};
|
1181
1131
|
// Keep a ref to the LB policy in calld while the pick is pending.
|
1182
1132
|
GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
|
1183
1133
|
calld->lb_policy = chand->lb_policy;
|
1184
1134
|
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
|
1185
1135
|
grpc_combiner_scheduler(chand->combiner));
|
1186
1136
|
const bool pick_done = grpc_lb_policy_pick_locked(
|
1187
|
-
exec_ctx, chand->lb_policy, inputs, &calld->connected_subchannel,
|
1137
|
+
exec_ctx, chand->lb_policy, &inputs, &calld->connected_subchannel,
|
1188
1138
|
calld->subchannel_call_context, NULL, &calld->lb_pick_closure);
|
1189
1139
|
if (pick_done) {
|
1190
1140
|
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
|
@@ -1194,160 +1144,160 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
|
|
1194
1144
|
}
|
1195
1145
|
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
|
1196
1146
|
calld->lb_policy = NULL;
|
1147
|
+
} else {
|
1148
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
|
1149
|
+
grpc_call_combiner_set_notify_on_cancel(
|
1150
|
+
exec_ctx, calld->call_combiner,
|
1151
|
+
GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
|
1152
|
+
pick_callback_cancel_locked, elem,
|
1153
|
+
grpc_combiner_scheduler(chand->combiner)));
|
1197
1154
|
}
|
1198
1155
|
return pick_done;
|
1199
1156
|
}
|
1200
1157
|
|
1201
|
-
|
1202
|
-
|
1203
|
-
|
1204
|
-
|
1205
|
-
|
1206
|
-
|
1207
|
-
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1208
|
-
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
|
1209
|
-
chand, calld, calld->lb_policy);
|
1210
|
-
}
|
1211
|
-
grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
|
1212
|
-
&calld->connected_subchannel, error);
|
1213
|
-
}
|
1158
|
+
typedef struct {
|
1159
|
+
grpc_call_element *elem;
|
1160
|
+
bool finished;
|
1161
|
+
grpc_closure closure;
|
1162
|
+
grpc_closure cancel_closure;
|
1163
|
+
} pick_after_resolver_result_args;
|
1214
1164
|
|
1215
|
-
|
1216
|
-
|
1217
|
-
|
1218
|
-
|
1219
|
-
|
1220
|
-
|
1221
|
-
|
1222
|
-
|
1223
|
-
|
1224
|
-
|
1225
|
-
// method, use that.
|
1226
|
-
uint32_t initial_metadata_flags =
|
1227
|
-
calld->initial_metadata_payload->send_initial_metadata
|
1228
|
-
.send_initial_metadata_flags;
|
1229
|
-
const bool wait_for_ready_set_from_api =
|
1230
|
-
initial_metadata_flags &
|
1231
|
-
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
|
1232
|
-
const bool wait_for_ready_set_from_service_config =
|
1233
|
-
calld->method_params != NULL &&
|
1234
|
-
calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
|
1235
|
-
if (!wait_for_ready_set_from_api &&
|
1236
|
-
wait_for_ready_set_from_service_config) {
|
1237
|
-
if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
|
1238
|
-
initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
1239
|
-
} else {
|
1240
|
-
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
1241
|
-
}
|
1242
|
-
}
|
1243
|
-
const grpc_lb_policy_pick_args inputs = {
|
1244
|
-
calld->initial_metadata_payload->send_initial_metadata
|
1245
|
-
.send_initial_metadata,
|
1246
|
-
initial_metadata_flags, &calld->lb_token_mdelem};
|
1247
|
-
pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs);
|
1248
|
-
} else if (chand->resolver != NULL) {
|
1249
|
-
if (!chand->started_resolving) {
|
1250
|
-
start_resolving_locked(exec_ctx, chand);
|
1251
|
-
}
|
1252
|
-
pick_after_resolver_result_start_locked(exec_ctx, elem);
|
1253
|
-
} else {
|
1254
|
-
subchannel_ready_locked(
|
1255
|
-
exec_ctx, elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
1165
|
+
// Note: This runs under the client_channel combiner, but will NOT be
|
1166
|
+
// holding the call combiner.
|
1167
|
+
static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
|
1168
|
+
void *arg,
|
1169
|
+
grpc_error *error) {
|
1170
|
+
pick_after_resolver_result_args *args =
|
1171
|
+
(pick_after_resolver_result_args *)arg;
|
1172
|
+
if (args->finished) {
|
1173
|
+
gpr_free(args);
|
1174
|
+
return;
|
1256
1175
|
}
|
1257
|
-
|
1258
|
-
|
1176
|
+
// If we don't yet have a resolver result, then a closure for
|
1177
|
+
// pick_after_resolver_result_done_locked() will have been added to
|
1178
|
+
// chand->waiting_for_resolver_result_closures, and it may not be invoked
|
1179
|
+
// until after this call has been destroyed. We mark the operation as
|
1180
|
+
// finished, so that when pick_after_resolver_result_done_locked()
|
1181
|
+
// is called, it will be a no-op. We also immediately invoke
|
1182
|
+
// async_pick_done_locked() to propagate the error back to the caller.
|
1183
|
+
args->finished = true;
|
1184
|
+
grpc_call_element *elem = args->elem;
|
1185
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1186
|
+
call_data *calld = (call_data *)elem->call_data;
|
1187
|
+
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1188
|
+
gpr_log(GPR_DEBUG,
|
1189
|
+
"chand=%p calld=%p: cancelling pick waiting for resolver result",
|
1190
|
+
chand, calld);
|
1191
|
+
}
|
1192
|
+
// Note: Although we are not in the call combiner here, we are
|
1193
|
+
// basically stealing the call combiner from the pending pick, so
|
1194
|
+
// it's safe to call async_pick_done_locked() here -- we are
|
1195
|
+
// essentially calling it here instead of calling it in
|
1196
|
+
// pick_after_resolver_result_done_locked().
|
1197
|
+
async_pick_done_locked(exec_ctx, elem,
|
1198
|
+
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
1199
|
+
"Pick cancelled", &error, 1));
|
1259
1200
|
}
|
1260
1201
|
|
1261
|
-
static void
|
1202
|
+
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
|
1262
1203
|
void *arg,
|
1263
|
-
grpc_error *
|
1264
|
-
|
1265
|
-
|
1266
|
-
|
1267
|
-
|
1268
|
-
channel_data *chand = elem->channel_data;
|
1269
|
-
/* need to recheck that another thread hasn't set the call */
|
1270
|
-
call_or_error coe = get_call_or_error(calld);
|
1271
|
-
if (coe.error != GRPC_ERROR_NONE) {
|
1204
|
+
grpc_error *error) {
|
1205
|
+
pick_after_resolver_result_args *args =
|
1206
|
+
(pick_after_resolver_result_args *)arg;
|
1207
|
+
if (args->finished) {
|
1208
|
+
/* cancelled, do nothing */
|
1272
1209
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1273
|
-
gpr_log(GPR_DEBUG, "
|
1274
|
-
chand, calld, grpc_error_string(coe.error));
|
1210
|
+
gpr_log(GPR_DEBUG, "call cancelled before resolver result");
|
1275
1211
|
}
|
1276
|
-
|
1277
|
-
|
1278
|
-
goto done;
|
1212
|
+
gpr_free(args);
|
1213
|
+
return;
|
1279
1214
|
}
|
1280
|
-
|
1215
|
+
args->finished = true;
|
1216
|
+
grpc_call_element *elem = args->elem;
|
1217
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1218
|
+
call_data *calld = (call_data *)elem->call_data;
|
1219
|
+
if (error != GRPC_ERROR_NONE) {
|
1281
1220
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1282
|
-
gpr_log(GPR_DEBUG,
|
1283
|
-
|
1284
|
-
calld, coe.subchannel_call);
|
1221
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
|
1222
|
+
chand, calld);
|
1285
1223
|
}
|
1286
|
-
|
1287
|
-
|
1288
|
-
}
|
1289
|
-
// Add to waiting-for-pick list. If we succeed in getting a
|
1290
|
-
// subchannel call below, we'll handle this batch (along with any
|
1291
|
-
// other waiting batches) in waiting_for_pick_batches_resume_locked().
|
1292
|
-
waiting_for_pick_batches_add_locked(calld, batch);
|
1293
|
-
// If this is a cancellation, cancel the pending pick (if any) and
|
1294
|
-
// fail any pending batches.
|
1295
|
-
if (batch->cancel_stream) {
|
1296
|
-
grpc_error *error = batch->payload->cancel_stream.cancel_error;
|
1224
|
+
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
|
1225
|
+
} else {
|
1297
1226
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1298
|
-
gpr_log(GPR_DEBUG, "chand=%p calld=%p:
|
1299
|
-
|
1227
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
|
1228
|
+
chand, calld);
|
1300
1229
|
}
|
1301
|
-
|
1302
|
-
|
1303
|
-
|
1304
|
-
|
1305
|
-
|
1306
|
-
|
1307
|
-
|
1308
|
-
pick_callback_cancel_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
|
1309
|
-
} else {
|
1310
|
-
pick_after_resolver_result_cancel_locked(exec_ctx, elem,
|
1311
|
-
GRPC_ERROR_REF(error));
|
1230
|
+
if (pick_callback_start_locked(exec_ctx, elem)) {
|
1231
|
+
// Even if the LB policy returns a result synchronously, we have
|
1232
|
+
// already added our polling entity to chand->interested_parties
|
1233
|
+
// in order to wait for the resolver result, so we need to
|
1234
|
+
// remove it here. Therefore, we call async_pick_done_locked()
|
1235
|
+
// instead of pick_done_locked().
|
1236
|
+
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
|
1312
1237
|
}
|
1313
|
-
waiting_for_pick_batches_fail_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
|
1314
|
-
goto done;
|
1315
1238
|
}
|
1316
|
-
|
1317
|
-
|
1318
|
-
|
1319
|
-
|
1320
|
-
|
1321
|
-
|
1322
|
-
|
1323
|
-
|
1324
|
-
|
1325
|
-
|
1326
|
-
|
1327
|
-
|
1328
|
-
|
1329
|
-
|
1330
|
-
|
1331
|
-
|
1332
|
-
|
1333
|
-
|
1334
|
-
|
1335
|
-
|
1336
|
-
|
1337
|
-
|
1338
|
-
|
1339
|
-
|
1239
|
+
}
|
1240
|
+
|
1241
|
+
static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
|
1242
|
+
grpc_call_element *elem) {
|
1243
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1244
|
+
call_data *calld = (call_data *)elem->call_data;
|
1245
|
+
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1246
|
+
gpr_log(GPR_DEBUG,
|
1247
|
+
"chand=%p calld=%p: deferring pick pending resolver result", chand,
|
1248
|
+
calld);
|
1249
|
+
}
|
1250
|
+
pick_after_resolver_result_args *args =
|
1251
|
+
(pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
|
1252
|
+
args->elem = elem;
|
1253
|
+
GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
|
1254
|
+
args, grpc_combiner_scheduler(chand->combiner));
|
1255
|
+
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
|
1256
|
+
&args->closure, GRPC_ERROR_NONE);
|
1257
|
+
grpc_call_combiner_set_notify_on_cancel(
|
1258
|
+
exec_ctx, calld->call_combiner,
|
1259
|
+
GRPC_CLOSURE_INIT(&args->cancel_closure,
|
1260
|
+
pick_after_resolver_result_cancel_locked, args,
|
1261
|
+
grpc_combiner_scheduler(chand->combiner)));
|
1262
|
+
}
|
1263
|
+
|
1264
|
+
static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1265
|
+
grpc_error *ignored) {
|
1266
|
+
grpc_call_element *elem = (grpc_call_element *)arg;
|
1267
|
+
call_data *calld = (call_data *)elem->call_data;
|
1268
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1269
|
+
GPR_ASSERT(calld->connected_subchannel == NULL);
|
1270
|
+
if (chand->lb_policy != NULL) {
|
1271
|
+
// We already have an LB policy, so ask it for a pick.
|
1272
|
+
if (pick_callback_start_locked(exec_ctx, elem)) {
|
1273
|
+
// Pick completed synchronously.
|
1274
|
+
pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
|
1275
|
+
return;
|
1276
|
+
}
|
1277
|
+
} else {
|
1278
|
+
// We do not yet have an LB policy, so wait for a resolver result.
|
1279
|
+
if (chand->resolver == NULL) {
|
1280
|
+
pick_done_locked(exec_ctx, elem,
|
1281
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
1282
|
+
return;
|
1283
|
+
}
|
1284
|
+
if (!chand->started_resolving) {
|
1285
|
+
start_resolving_locked(exec_ctx, chand);
|
1340
1286
|
}
|
1287
|
+
pick_after_resolver_result_start_locked(exec_ctx, elem);
|
1341
1288
|
}
|
1342
|
-
|
1343
|
-
|
1344
|
-
|
1345
|
-
|
1289
|
+
// We need to wait for either a resolver result or for an async result
|
1290
|
+
// from the LB policy. Add the polling entity from call_data to the
|
1291
|
+
// channel_data's interested_parties, so that the I/O of the LB policy
|
1292
|
+
// and resolver can be done under it. The polling entity will be
|
1293
|
+
// removed in async_pick_done_locked().
|
1294
|
+
grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
|
1295
|
+
chand->interested_parties);
|
1346
1296
|
}
|
1347
1297
|
|
1348
1298
|
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
|
1349
|
-
grpc_call_element *elem = arg;
|
1350
|
-
call_data *calld = elem->call_data;
|
1299
|
+
grpc_call_element *elem = (grpc_call_element *)arg;
|
1300
|
+
call_data *calld = (call_data *)elem->call_data;
|
1351
1301
|
if (calld->retry_throttle_data != NULL) {
|
1352
1302
|
if (error == GRPC_ERROR_NONE) {
|
1353
1303
|
grpc_server_retry_throttle_data_record_success(
|
@@ -1365,27 +1315,49 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
|
|
1365
1315
|
GRPC_ERROR_REF(error));
|
1366
1316
|
}
|
1367
1317
|
|
1368
|
-
/* The logic here is fairly complicated, due to (a) the fact that we
|
1369
|
-
need to handle the case where we receive the send op before the
|
1370
|
-
initial metadata op, and (b) the need for efficiency, especially in
|
1371
|
-
the streaming case.
|
1372
|
-
|
1373
|
-
We use double-checked locking to initially see if initialization has been
|
1374
|
-
performed. If it has not, we acquire the combiner and perform initialization.
|
1375
|
-
If it has, we proceed on the fast path. */
|
1376
1318
|
static void cc_start_transport_stream_op_batch(
|
1377
1319
|
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
|
1378
1320
|
grpc_transport_stream_op_batch *batch) {
|
1379
|
-
call_data *calld = elem->call_data;
|
1380
|
-
channel_data *chand = elem->channel_data;
|
1381
|
-
if (GRPC_TRACER_ON(grpc_client_channel_trace) ||
|
1382
|
-
GRPC_TRACER_ON(grpc_trace_channel)) {
|
1383
|
-
grpc_call_log_op(GPR_INFO, elem, batch);
|
1384
|
-
}
|
1321
|
+
call_data *calld = (call_data *)elem->call_data;
|
1322
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1385
1323
|
if (chand->deadline_checking_enabled) {
|
1386
1324
|
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
|
1387
1325
|
batch);
|
1388
1326
|
}
|
1327
|
+
GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
|
1328
|
+
// If we've previously been cancelled, immediately fail any new batches.
|
1329
|
+
if (calld->error != GRPC_ERROR_NONE) {
|
1330
|
+
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1331
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
|
1332
|
+
chand, calld, grpc_error_string(calld->error));
|
1333
|
+
}
|
1334
|
+
grpc_transport_stream_op_batch_finish_with_failure(
|
1335
|
+
exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
|
1336
|
+
goto done;
|
1337
|
+
}
|
1338
|
+
if (batch->cancel_stream) {
|
1339
|
+
// Stash a copy of cancel_error in our call data, so that we can use
|
1340
|
+
// it for subsequent operations. This ensures that if the call is
|
1341
|
+
// cancelled before any batches are passed down (e.g., if the deadline
|
1342
|
+
// is in the past when the call starts), we can return the right
|
1343
|
+
// error to the caller when the first batch does get passed down.
|
1344
|
+
GRPC_ERROR_UNREF(calld->error);
|
1345
|
+
calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
|
1346
|
+
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1347
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
|
1348
|
+
calld, grpc_error_string(calld->error));
|
1349
|
+
}
|
1350
|
+
// If we have a subchannel call, send the cancellation batch down.
|
1351
|
+
// Otherwise, fail all pending batches.
|
1352
|
+
if (calld->subchannel_call != NULL) {
|
1353
|
+
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
|
1354
|
+
} else {
|
1355
|
+
waiting_for_pick_batches_add(calld, batch);
|
1356
|
+
waiting_for_pick_batches_fail(exec_ctx, elem,
|
1357
|
+
GRPC_ERROR_REF(calld->error));
|
1358
|
+
}
|
1359
|
+
goto done;
|
1360
|
+
}
|
1389
1361
|
// Intercept on_complete for recv_trailing_metadata so that we can
|
1390
1362
|
// check retry throttle status.
|
1391
1363
|
if (batch->recv_trailing_metadata) {
|
@@ -1395,38 +1367,44 @@ static void cc_start_transport_stream_op_batch(
|
|
1395
1367
|
grpc_schedule_on_exec_ctx);
|
1396
1368
|
batch->on_complete = &calld->on_complete;
|
1397
1369
|
}
|
1398
|
-
|
1399
|
-
|
1400
|
-
|
1401
|
-
|
1370
|
+
// Check if we've already gotten a subchannel call.
|
1371
|
+
// Note that once we have completed the pick, we do not need to enter
|
1372
|
+
// the channel combiner, which is more efficient (especially for
|
1373
|
+
// streaming calls).
|
1374
|
+
if (calld->subchannel_call != NULL) {
|
1402
1375
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1403
|
-
gpr_log(GPR_DEBUG,
|
1404
|
-
chand
|
1376
|
+
gpr_log(GPR_DEBUG,
|
1377
|
+
"chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
|
1378
|
+
calld, calld->subchannel_call);
|
1405
1379
|
}
|
1406
|
-
|
1407
|
-
exec_ctx, batch, GRPC_ERROR_REF(coe.error));
|
1380
|
+
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
|
1408
1381
|
goto done;
|
1409
1382
|
}
|
1410
|
-
|
1383
|
+
// We do not yet have a subchannel call.
|
1384
|
+
// Add the batch to the waiting-for-pick list.
|
1385
|
+
waiting_for_pick_batches_add(calld, batch);
|
1386
|
+
// For batches containing a send_initial_metadata op, enter the channel
|
1387
|
+
// combiner to start a pick.
|
1388
|
+
if (batch->send_initial_metadata) {
|
1389
|
+
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1390
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner",
|
1391
|
+
chand, calld);
|
1392
|
+
}
|
1393
|
+
GRPC_CLOSURE_SCHED(
|
1394
|
+
exec_ctx,
|
1395
|
+
GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked,
|
1396
|
+
elem, grpc_combiner_scheduler(chand->combiner)),
|
1397
|
+
GRPC_ERROR_NONE);
|
1398
|
+
} else {
|
1399
|
+
// For all other batches, release the call combiner.
|
1411
1400
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1412
1401
|
gpr_log(GPR_DEBUG,
|
1413
|
-
"chand=%p calld=%p:
|
1414
|
-
calld
|
1402
|
+
"chand=%p calld=%p: saved batch, yeilding call combiner", chand,
|
1403
|
+
calld);
|
1415
1404
|
}
|
1416
|
-
|
1417
|
-
|
1405
|
+
GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
|
1406
|
+
"batch does not include send_initial_metadata");
|
1418
1407
|
}
|
1419
|
-
/* we failed; lock and figure out what to do */
|
1420
|
-
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
|
1421
|
-
gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld);
|
1422
|
-
}
|
1423
|
-
GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch");
|
1424
|
-
batch->handler_private.extra_arg = elem;
|
1425
|
-
GRPC_CLOSURE_SCHED(
|
1426
|
-
exec_ctx, GRPC_CLOSURE_INIT(&batch->handler_private.closure,
|
1427
|
-
start_transport_stream_op_batch_locked, batch,
|
1428
|
-
grpc_combiner_scheduler(chand->combiner)),
|
1429
|
-
GRPC_ERROR_NONE);
|
1430
1408
|
done:
|
1431
1409
|
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
|
1432
1410
|
}
|
@@ -1435,16 +1413,18 @@ done:
|
|
1435
1413
|
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
|
1436
1414
|
grpc_call_element *elem,
|
1437
1415
|
const grpc_call_element_args *args) {
|
1438
|
-
call_data *calld = elem->call_data;
|
1439
|
-
channel_data *chand = elem->channel_data;
|
1416
|
+
call_data *calld = (call_data *)elem->call_data;
|
1417
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1440
1418
|
// Initialize data members.
|
1441
1419
|
calld->path = grpc_slice_ref_internal(args->path);
|
1442
1420
|
calld->call_start_time = args->start_time;
|
1443
1421
|
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
|
1444
|
-
calld->owning_call = args->call_stack;
|
1445
1422
|
calld->arena = args->arena;
|
1423
|
+
calld->owning_call = args->call_stack;
|
1424
|
+
calld->call_combiner = args->call_combiner;
|
1446
1425
|
if (chand->deadline_checking_enabled) {
|
1447
|
-
grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
|
1426
|
+
grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
|
1427
|
+
args->call_combiner, calld->deadline);
|
1448
1428
|
}
|
1449
1429
|
return GRPC_ERROR_NONE;
|
1450
1430
|
}
|
@@ -1454,8 +1434,8 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
|
|
1454
1434
|
grpc_call_element *elem,
|
1455
1435
|
const grpc_call_final_info *final_info,
|
1456
1436
|
grpc_closure *then_schedule_closure) {
|
1457
|
-
call_data *calld = elem->call_data;
|
1458
|
-
channel_data *chand = elem->channel_data;
|
1437
|
+
call_data *calld = (call_data *)elem->call_data;
|
1438
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1459
1439
|
if (chand->deadline_checking_enabled) {
|
1460
1440
|
grpc_deadline_state_destroy(exec_ctx, elem);
|
1461
1441
|
}
|
@@ -1463,13 +1443,12 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
|
|
1463
1443
|
if (calld->method_params != NULL) {
|
1464
1444
|
method_parameters_unref(calld->method_params);
|
1465
1445
|
}
|
1466
|
-
|
1467
|
-
|
1468
|
-
|
1469
|
-
grpc_subchannel_call_set_cleanup_closure(coe.subchannel_call,
|
1446
|
+
GRPC_ERROR_UNREF(calld->error);
|
1447
|
+
if (calld->subchannel_call != NULL) {
|
1448
|
+
grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
|
1470
1449
|
then_schedule_closure);
|
1471
1450
|
then_schedule_closure = NULL;
|
1472
|
-
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx,
|
1451
|
+
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call,
|
1473
1452
|
"client_channel_destroy_call");
|
1474
1453
|
}
|
1475
1454
|
GPR_ASSERT(calld->lb_policy == NULL);
|
@@ -1490,7 +1469,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
|
|
1490
1469
|
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
|
1491
1470
|
grpc_call_element *elem,
|
1492
1471
|
grpc_polling_entity *pollent) {
|
1493
|
-
call_data *calld = elem->call_data;
|
1472
|
+
call_data *calld = (call_data *)elem->call_data;
|
1494
1473
|
calld->pollent = pollent;
|
1495
1474
|
}
|
1496
1475
|
|
@@ -1508,14 +1487,13 @@ const grpc_channel_filter grpc_client_channel_filter = {
|
|
1508
1487
|
sizeof(channel_data),
|
1509
1488
|
cc_init_channel_elem,
|
1510
1489
|
cc_destroy_channel_elem,
|
1511
|
-
cc_get_peer,
|
1512
1490
|
cc_get_channel_info,
|
1513
1491
|
"client-channel",
|
1514
1492
|
};
|
1515
1493
|
|
1516
1494
|
static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1517
1495
|
grpc_error *error_ignored) {
|
1518
|
-
channel_data *chand = arg;
|
1496
|
+
channel_data *chand = (channel_data *)arg;
|
1519
1497
|
if (chand->lb_policy != NULL) {
|
1520
1498
|
grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
|
1521
1499
|
} else {
|
@@ -1529,7 +1507,7 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1529
1507
|
|
1530
1508
|
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
|
1531
1509
|
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
|
1532
|
-
channel_data *chand = elem->channel_data;
|
1510
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1533
1511
|
grpc_connectivity_state out =
|
1534
1512
|
grpc_connectivity_state_check(&chand->state_tracker);
|
1535
1513
|
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
|
@@ -1600,7 +1578,7 @@ static void external_connectivity_watcher_list_remove(
|
|
1600
1578
|
|
1601
1579
|
int grpc_client_channel_num_external_connectivity_watchers(
|
1602
1580
|
grpc_channel_element *elem) {
|
1603
|
-
channel_data *chand = elem->channel_data;
|
1581
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1604
1582
|
int count = 0;
|
1605
1583
|
|
1606
1584
|
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
|
@@ -1617,7 +1595,7 @@ int grpc_client_channel_num_external_connectivity_watchers(
|
|
1617
1595
|
|
1618
1596
|
static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
|
1619
1597
|
grpc_error *error) {
|
1620
|
-
external_connectivity_watcher *w = arg;
|
1598
|
+
external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
|
1621
1599
|
grpc_closure *follow_up = w->on_complete;
|
1622
1600
|
grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
|
1623
1601
|
w->chand->interested_parties);
|
@@ -1630,7 +1608,7 @@ static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1630
1608
|
|
1631
1609
|
static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1632
1610
|
grpc_error *error_ignored) {
|
1633
|
-
external_connectivity_watcher *w = arg;
|
1611
|
+
external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
|
1634
1612
|
external_connectivity_watcher *found = NULL;
|
1635
1613
|
if (w->state != NULL) {
|
1636
1614
|
external_connectivity_watcher_list_append(w->chand, w);
|
@@ -1659,8 +1637,9 @@ void grpc_client_channel_watch_connectivity_state(
|
|
1659
1637
|
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
|
1660
1638
|
grpc_polling_entity pollent, grpc_connectivity_state *state,
|
1661
1639
|
grpc_closure *closure, grpc_closure *watcher_timer_init) {
|
1662
|
-
channel_data *chand = elem->channel_data;
|
1663
|
-
external_connectivity_watcher *w =
|
1640
|
+
channel_data *chand = (channel_data *)elem->channel_data;
|
1641
|
+
external_connectivity_watcher *w =
|
1642
|
+
(external_connectivity_watcher *)gpr_zalloc(sizeof(*w));
|
1664
1643
|
w->chand = chand;
|
1665
1644
|
w->pollent = pollent;
|
1666
1645
|
w->on_complete = closure;
|