grpc 1.14.2 → 1.15.0.pre1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +307 -12
- data/etc/roots.pem +40 -163
- data/include/grpc/grpc.h +49 -0
- data/include/grpc/grpc_security.h +0 -6
- data/include/grpc/grpc_security_constants.h +6 -0
- data/include/grpc/impl/codegen/grpc_types.h +17 -2
- data/include/grpc/impl/codegen/port_platform.h +41 -4
- data/include/grpc/support/sync.h +0 -16
- data/src/{cpp → core}/ext/filters/census/grpc_context.cc +0 -0
- data/src/core/ext/filters/client_channel/client_channel.cc +40 -11
- data/src/core/ext/filters/client_channel/client_channel_channelz.cc +11 -9
- data/src/core/ext/filters/client_channel/client_channel_channelz.h +4 -2
- data/src/core/ext/filters/client_channel/lb_policy.h +14 -11
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +67 -90
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +108 -91
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +79 -25
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +40 -0
- data/src/core/ext/filters/client_channel/resolver.h +8 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +11 -3
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +13 -10
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +18 -4
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +13 -5
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +537 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +6 -5
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +11 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc +29 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc +29 -0
- data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +9 -0
- data/src/core/ext/filters/client_channel/subchannel.cc +21 -8
- data/src/core/ext/filters/client_channel/subchannel.h +7 -0
- data/src/core/ext/filters/http/client_authority_filter.cc +1 -1
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +24 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.cc +10 -7
- data/src/core/lib/channel/channel_stack.h +1 -1
- data/src/core/lib/channel/channel_trace.cc +1 -1
- data/src/core/lib/channel/channel_trace.h +1 -1
- data/src/core/lib/channel/channelz.cc +37 -27
- data/src/core/lib/channel/channelz.h +13 -4
- data/src/core/lib/channel/channelz_registry.cc +89 -4
- data/src/core/lib/channel/channelz_registry.h +56 -39
- data/src/core/lib/gpr/arena.cc +33 -40
- data/src/core/lib/gprpp/fork.cc +41 -33
- data/src/core/lib/gprpp/fork.h +13 -4
- data/src/core/lib/gprpp/mutex_lock.h +42 -0
- data/src/core/lib/gprpp/orphanable.h +4 -2
- data/src/core/lib/gprpp/ref_counted.h +4 -2
- data/src/core/lib/gprpp/ref_counted_ptr.h +65 -13
- data/src/core/lib/iomgr/call_combiner.h +4 -1
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +77 -17
- data/src/core/lib/iomgr/ev_epollex_linux.cc +8 -26
- data/src/core/lib/iomgr/ev_epollsig_linux.cc +10 -28
- data/src/core/lib/iomgr/ev_poll_posix.cc +144 -35
- data/src/core/lib/iomgr/ev_posix.cc +58 -9
- data/src/core/lib/iomgr/ev_posix.h +22 -8
- data/src/core/lib/iomgr/exec_ctx.cc +6 -0
- data/src/core/lib/iomgr/exec_ctx.h +2 -0
- data/src/core/lib/iomgr/executor.cc +148 -72
- data/src/core/lib/iomgr/executor.h +39 -6
- data/src/core/lib/iomgr/fork_posix.cc +12 -1
- data/src/core/lib/iomgr/iocp_windows.cc +9 -4
- data/src/core/lib/iomgr/lockfree_event.cc +5 -1
- data/src/core/lib/iomgr/port.h +15 -2
- data/src/core/lib/iomgr/resolve_address_posix.cc +3 -2
- data/src/core/lib/iomgr/resolve_address_windows.cc +3 -2
- data/src/core/lib/iomgr/resource_quota.cc +78 -0
- data/src/core/lib/iomgr/resource_quota.h +16 -0
- data/src/core/lib/iomgr/socket_mutator.cc +1 -1
- data/src/core/lib/iomgr/socket_mutator.h +1 -1
- data/src/core/lib/iomgr/socket_windows.cc +33 -0
- data/src/core/lib/iomgr/socket_windows.h +6 -0
- data/src/core/lib/iomgr/tcp_windows.cc +2 -2
- data/src/core/lib/iomgr/tcp_windows.h +2 -0
- data/src/core/lib/iomgr/timer.h +3 -2
- data/src/core/lib/json/json.cc +2 -1
- data/src/core/lib/security/credentials/jwt/json_token.h +2 -0
- data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +2 -0
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +1 -1
- data/src/core/lib/security/security_connector/load_system_roots.h +29 -0
- data/src/core/lib/security/security_connector/load_system_roots_fallback.cc +32 -0
- data/src/core/lib/security/security_connector/load_system_roots_linux.cc +165 -0
- data/src/core/lib/security/security_connector/load_system_roots_linux.h +44 -0
- data/src/core/lib/security/security_connector/security_connector.cc +23 -4
- data/src/core/lib/security/transport/client_auth_filter.cc +0 -4
- data/src/core/lib/security/transport/server_auth_filter.cc +0 -2
- data/src/core/lib/surface/call.cc +7 -3
- data/src/core/lib/surface/channel.cc +18 -2
- data/src/core/lib/surface/completion_queue.cc +152 -15
- data/src/core/lib/surface/completion_queue.h +20 -1
- data/src/core/lib/surface/completion_queue_factory.cc +13 -4
- data/src/core/lib/surface/init.cc +2 -2
- data/src/core/lib/surface/init.h +0 -1
- data/src/core/lib/surface/version.cc +2 -2
- data/src/core/lib/transport/service_config.cc +2 -2
- data/src/core/lib/transport/service_config.h +3 -3
- data/src/core/lib/transport/transport.h +2 -0
- data/src/core/tsi/alts/crypt/aes_gcm.cc +2 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +8 -0
- data/src/core/tsi/grpc_shadow_boringssl.h +3006 -0
- data/src/core/tsi/ssl/session_cache/ssl_session.h +2 -0
- data/src/core/tsi/ssl/session_cache/ssl_session_cache.cc +5 -5
- data/src/core/tsi/ssl/session_cache/ssl_session_cache.h +2 -0
- data/src/core/tsi/ssl_transport_security.cc +5 -3
- data/src/core/tsi/ssl_types.h +2 -0
- data/src/ruby/ext/grpc/extconf.rb +1 -26
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +12 -0
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +18 -0
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/spec/generic/client_stub_spec.rb +3 -3
- data/third_party/address_sorting/address_sorting.c +7 -2
- data/third_party/address_sorting/address_sorting_windows.c +43 -3
- data/third_party/address_sorting/include/address_sorting/address_sorting.h +3 -0
- metadata +40 -31
@@ -107,6 +107,11 @@ class SubchannelData {
|
|
107
107
|
// being unreffed.
|
108
108
|
virtual void UnrefSubchannelLocked(const char* reason);
|
109
109
|
|
110
|
+
// Resets the connection backoff.
|
111
|
+
// TODO(roth): This method should go away when we move the backoff
|
112
|
+
// code out of the subchannel and into the LB policies.
|
113
|
+
void ResetBackoffLocked();
|
114
|
+
|
110
115
|
// Starts watching the connectivity state of the subchannel.
|
111
116
|
// ProcessConnectivityChangeLocked() will be called when the
|
112
117
|
// connectivity state changes.
|
@@ -189,10 +194,28 @@ class SubchannelList
|
|
189
194
|
// Returns true if the subchannel list is shutting down.
|
190
195
|
bool shutting_down() const { return shutting_down_; }
|
191
196
|
|
197
|
+
// Populates refs_list with the uuids of this SubchannelLists's subchannels.
|
198
|
+
void PopulateChildRefsList(ChildRefsList* refs_list) {
|
199
|
+
for (size_t i = 0; i < subchannels_.size(); ++i) {
|
200
|
+
if (subchannels_[i].subchannel() != nullptr) {
|
201
|
+
grpc_core::channelz::SubchannelNode* subchannel_node =
|
202
|
+
grpc_subchannel_get_channelz_node(subchannels_[i].subchannel());
|
203
|
+
if (subchannel_node != nullptr) {
|
204
|
+
refs_list->push_back(subchannel_node->subchannel_uuid());
|
205
|
+
}
|
206
|
+
}
|
207
|
+
}
|
208
|
+
}
|
209
|
+
|
192
210
|
// Accessors.
|
193
211
|
LoadBalancingPolicy* policy() const { return policy_; }
|
194
212
|
TraceFlag* tracer() const { return tracer_; }
|
195
213
|
|
214
|
+
// Resets connection backoff of all subchannels.
|
215
|
+
// TODO(roth): We will probably need to rethink this as part of moving
|
216
|
+
// the backoff code out of subchannels and into LB policies.
|
217
|
+
void ResetBackoffLocked();
|
218
|
+
|
196
219
|
// Note: Caller must ensure that this is invoked inside of the combiner.
|
197
220
|
void Orphan() override {
|
198
221
|
ShutdownLocked();
|
@@ -285,6 +308,14 @@ void SubchannelData<SubchannelListType, SubchannelDataType>::
|
|
285
308
|
}
|
286
309
|
}
|
287
310
|
|
311
|
+
template <typename SubchannelListType, typename SubchannelDataType>
|
312
|
+
void SubchannelData<SubchannelListType,
|
313
|
+
SubchannelDataType>::ResetBackoffLocked() {
|
314
|
+
if (subchannel_ != nullptr) {
|
315
|
+
grpc_subchannel_reset_backoff(subchannel_);
|
316
|
+
}
|
317
|
+
}
|
318
|
+
|
288
319
|
template <typename SubchannelListType, typename SubchannelDataType>
|
289
320
|
void SubchannelData<SubchannelListType,
|
290
321
|
SubchannelDataType>::StartConnectivityWatchLocked() {
|
@@ -531,6 +562,15 @@ void SubchannelList<SubchannelListType, SubchannelDataType>::ShutdownLocked() {
|
|
531
562
|
}
|
532
563
|
}
|
533
564
|
|
565
|
+
template <typename SubchannelListType, typename SubchannelDataType>
|
566
|
+
void SubchannelList<SubchannelListType,
|
567
|
+
SubchannelDataType>::ResetBackoffLocked() {
|
568
|
+
for (size_t i = 0; i < subchannels_.size(); i++) {
|
569
|
+
SubchannelDataType* sd = &subchannels_[i];
|
570
|
+
sd->ResetBackoffLocked();
|
571
|
+
}
|
572
|
+
}
|
573
|
+
|
534
574
|
} // namespace grpc_core
|
535
575
|
|
536
576
|
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H */
|
@@ -94,6 +94,14 @@ class Resolver : public InternallyRefCountedWithTracing<Resolver> {
|
|
94
94
|
/// throw away unselected subchannels.
|
95
95
|
virtual void RequestReresolutionLocked() GRPC_ABSTRACT;
|
96
96
|
|
97
|
+
/// Resets the re-resolution backoff, if any.
|
98
|
+
/// This needs to be implemented only by pull-based implementations;
|
99
|
+
/// for push-based implementations, it will be a no-op.
|
100
|
+
/// TODO(roth): Pull the backoff code out of resolver and into
|
101
|
+
/// client_channel, so that it can be shared across resolver
|
102
|
+
/// implementations. At that point, this method can go away.
|
103
|
+
virtual void ResetBackoffLocked() {}
|
104
|
+
|
97
105
|
void Orphan() override {
|
98
106
|
// Invoke ShutdownAndUnrefLocked() inside of the combiner.
|
99
107
|
GRPC_CLOSURE_SCHED(
|
@@ -23,7 +23,6 @@
|
|
23
23
|
#include <limits.h>
|
24
24
|
#include <stdio.h>
|
25
25
|
#include <string.h>
|
26
|
-
#include <unistd.h>
|
27
26
|
|
28
27
|
#include <grpc/support/alloc.h>
|
29
28
|
#include <grpc/support/string_util.h>
|
@@ -67,6 +66,8 @@ class AresDnsResolver : public Resolver {
|
|
67
66
|
|
68
67
|
void RequestReresolutionLocked() override;
|
69
68
|
|
69
|
+
void ResetBackoffLocked() override;
|
70
|
+
|
70
71
|
void ShutdownLocked() override;
|
71
72
|
|
72
73
|
private:
|
@@ -142,8 +143,8 @@ AresDnsResolver::AresDnsResolver(const ResolverArgs& args)
|
|
142
143
|
channel_args_ = grpc_channel_args_copy(args.args);
|
143
144
|
const grpc_arg* arg = grpc_channel_args_find(
|
144
145
|
channel_args_, GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION);
|
145
|
-
|
146
|
-
|
146
|
+
grpc_integer_options integer_options = {false, false, true};
|
147
|
+
request_service_config_ = !grpc_channel_arg_get_integer(arg, integer_options);
|
147
148
|
arg = grpc_channel_args_find(channel_args_,
|
148
149
|
GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS);
|
149
150
|
min_time_between_resolutions_ =
|
@@ -188,6 +189,13 @@ void AresDnsResolver::RequestReresolutionLocked() {
|
|
188
189
|
}
|
189
190
|
}
|
190
191
|
|
192
|
+
void AresDnsResolver::ResetBackoffLocked() {
|
193
|
+
if (have_next_resolution_timer_) {
|
194
|
+
grpc_timer_cancel(&next_resolution_timer_);
|
195
|
+
}
|
196
|
+
backoff_.Reset();
|
197
|
+
}
|
198
|
+
|
191
199
|
void AresDnsResolver::ShutdownLocked() {
|
192
200
|
if (have_next_resolution_timer_) {
|
193
201
|
grpc_timer_cancel(&next_resolution_timer_);
|
@@ -18,11 +18,10 @@
|
|
18
18
|
#include <grpc/support/port_platform.h>
|
19
19
|
|
20
20
|
#include "src/core/lib/iomgr/port.h"
|
21
|
-
#if GRPC_ARES == 1 && defined(
|
21
|
+
#if GRPC_ARES == 1 && !defined(GRPC_UV)
|
22
22
|
|
23
23
|
#include <ares.h>
|
24
24
|
#include <string.h>
|
25
|
-
#include <sys/ioctl.h>
|
26
25
|
|
27
26
|
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
|
28
27
|
|
@@ -32,7 +31,6 @@
|
|
32
31
|
#include <grpc/support/time.h>
|
33
32
|
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
|
34
33
|
#include "src/core/lib/gpr/string.h"
|
35
|
-
#include "src/core/lib/iomgr/ev_posix.h"
|
36
34
|
#include "src/core/lib/iomgr/iomgr_internal.h"
|
37
35
|
#include "src/core/lib/iomgr/sockaddr_utils.h"
|
38
36
|
|
@@ -76,6 +74,8 @@ struct grpc_ares_ev_driver {
|
|
76
74
|
bool shutting_down;
|
77
75
|
/** request object that's using this ev driver */
|
78
76
|
grpc_ares_request* request;
|
77
|
+
/** Owned by the ev_driver. Creates new GrpcPolledFd's */
|
78
|
+
grpc_core::UniquePtr<grpc_core::GrpcPolledFdFactory> polled_fd_factory;
|
79
79
|
};
|
80
80
|
|
81
81
|
static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver);
|
@@ -95,7 +95,7 @@ static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver* ev_driver) {
|
|
95
95
|
GRPC_COMBINER_UNREF(ev_driver->combiner, "free ares event driver");
|
96
96
|
ares_destroy(ev_driver->channel);
|
97
97
|
grpc_ares_complete_request_locked(ev_driver->request);
|
98
|
-
|
98
|
+
grpc_core::Delete(ev_driver);
|
99
99
|
}
|
100
100
|
}
|
101
101
|
|
@@ -120,13 +120,11 @@ grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
|
|
120
120
|
grpc_pollset_set* pollset_set,
|
121
121
|
grpc_combiner* combiner,
|
122
122
|
grpc_ares_request* request) {
|
123
|
-
*ev_driver =
|
124
|
-
gpr_malloc(sizeof(grpc_ares_ev_driver)));
|
123
|
+
*ev_driver = grpc_core::New<grpc_ares_ev_driver>();
|
125
124
|
ares_options opts;
|
126
125
|
memset(&opts, 0, sizeof(opts));
|
127
126
|
opts.flags |= ARES_FLAG_STAYOPEN;
|
128
127
|
int status = ares_init_options(&(*ev_driver)->channel, &opts, ARES_OPT_FLAGS);
|
129
|
-
grpc_core::ConfigureAresChannelLocked(&(*ev_driver)->channel);
|
130
128
|
gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create_locked");
|
131
129
|
if (status != ARES_SUCCESS) {
|
132
130
|
char* err_msg;
|
@@ -144,6 +142,10 @@ grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
|
|
144
142
|
(*ev_driver)->working = false;
|
145
143
|
(*ev_driver)->shutting_down = false;
|
146
144
|
(*ev_driver)->request = request;
|
145
|
+
(*ev_driver)->polled_fd_factory =
|
146
|
+
grpc_core::NewGrpcPolledFdFactory((*ev_driver)->combiner);
|
147
|
+
(*ev_driver)
|
148
|
+
->polled_fd_factory->ConfigureAresChannelLocked((*ev_driver)->channel);
|
147
149
|
return GRPC_ERROR_NONE;
|
148
150
|
}
|
149
151
|
|
@@ -247,8 +249,9 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) {
|
|
247
249
|
// Create a new fd_node if sock[i] is not in the fd_node list.
|
248
250
|
if (fdn == nullptr) {
|
249
251
|
fdn = static_cast<fd_node*>(gpr_malloc(sizeof(fd_node)));
|
250
|
-
fdn->grpc_polled_fd =
|
251
|
-
|
252
|
+
fdn->grpc_polled_fd =
|
253
|
+
ev_driver->polled_fd_factory->NewGrpcPolledFdLocked(
|
254
|
+
socks[i], ev_driver->pollset_set, ev_driver->combiner);
|
252
255
|
gpr_log(GPR_DEBUG, "new fd: %s", fdn->grpc_polled_fd->GetName());
|
253
256
|
fdn->ev_driver = ev_driver;
|
254
257
|
fdn->readable_registered = false;
|
@@ -314,4 +317,4 @@ void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver) {
|
|
314
317
|
}
|
315
318
|
}
|
316
319
|
|
317
|
-
#endif /* GRPC_ARES == 1 && defined(
|
320
|
+
#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */
|
@@ -81,10 +81,24 @@ class GrpcPolledFd {
|
|
81
81
|
GRPC_ABSTRACT_BASE_CLASS
|
82
82
|
};
|
83
83
|
|
84
|
-
/*
|
85
|
-
|
86
|
-
|
87
|
-
|
84
|
+
/* A GrpcPolledFdFactory is 1-to-1 with and owned by the
|
85
|
+
* ares event driver. It knows how to create GrpcPolledFd's
|
86
|
+
* for the current platform, and the ares driver uses it for all of
|
87
|
+
* its fd's. */
|
88
|
+
class GrpcPolledFdFactory {
|
89
|
+
public:
|
90
|
+
virtual ~GrpcPolledFdFactory() {}
|
91
|
+
/* Creates a new wrapped fd for the current platform */
|
92
|
+
virtual GrpcPolledFd* NewGrpcPolledFdLocked(
|
93
|
+
ares_socket_t as, grpc_pollset_set* driver_pollset_set,
|
94
|
+
grpc_combiner* combiner) GRPC_ABSTRACT;
|
95
|
+
/* Optionally configures the ares channel after creation */
|
96
|
+
virtual void ConfigureAresChannelLocked(ares_channel channel) GRPC_ABSTRACT;
|
97
|
+
|
98
|
+
GRPC_ABSTRACT_BASE_CLASS
|
99
|
+
};
|
100
|
+
|
101
|
+
UniquePtr<GrpcPolledFdFactory> NewGrpcPolledFdFactory(grpc_combiner* combiner);
|
88
102
|
|
89
103
|
} // namespace grpc_core
|
90
104
|
|
@@ -86,12 +86,20 @@ class GrpcPolledFdPosix : public GrpcPolledFd {
|
|
86
86
|
grpc_pollset_set* driver_pollset_set_;
|
87
87
|
};
|
88
88
|
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
89
|
+
class GrpcPolledFdFactoryPosix : public GrpcPolledFdFactory {
|
90
|
+
public:
|
91
|
+
GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as,
|
92
|
+
grpc_pollset_set* driver_pollset_set,
|
93
|
+
grpc_combiner* combiner) override {
|
94
|
+
return New<GrpcPolledFdPosix>(as, driver_pollset_set);
|
95
|
+
}
|
93
96
|
|
94
|
-
void ConfigureAresChannelLocked(ares_channel
|
97
|
+
void ConfigureAresChannelLocked(ares_channel channel) override {}
|
98
|
+
};
|
99
|
+
|
100
|
+
UniquePtr<GrpcPolledFdFactory> NewGrpcPolledFdFactory(grpc_combiner* combiner) {
|
101
|
+
return UniquePtr<GrpcPolledFdFactory>(New<GrpcPolledFdFactoryPosix>());
|
102
|
+
}
|
95
103
|
|
96
104
|
} // namespace grpc_core
|
97
105
|
|
@@ -0,0 +1,537 @@
|
|
1
|
+
/*
|
2
|
+
*
|
3
|
+
* Copyright 2016 gRPC authors.
|
4
|
+
*
|
5
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
* you may not use this file except in compliance with the License.
|
7
|
+
* You may obtain a copy of the License at
|
8
|
+
*
|
9
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
*
|
11
|
+
* Unless required by applicable law or agreed to in writing, software
|
12
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
* See the License for the specific language governing permissions and
|
15
|
+
* limitations under the License.
|
16
|
+
*
|
17
|
+
*/
|
18
|
+
#include <grpc/support/port_platform.h>
|
19
|
+
|
20
|
+
#include "src/core/lib/iomgr/port.h"
|
21
|
+
#if GRPC_ARES == 1 && defined(GPR_WINDOWS)
|
22
|
+
|
23
|
+
#include <ares.h>
|
24
|
+
|
25
|
+
#include <grpc/support/alloc.h>
|
26
|
+
#include <grpc/support/log.h>
|
27
|
+
#include <grpc/support/log_windows.h>
|
28
|
+
#include <grpc/support/string_util.h>
|
29
|
+
#include <grpc/support/time.h>
|
30
|
+
#include <string.h>
|
31
|
+
#include "src/core/lib/gpr/string.h"
|
32
|
+
#include "src/core/lib/gprpp/memory.h"
|
33
|
+
#include "src/core/lib/iomgr/combiner.h"
|
34
|
+
#include "src/core/lib/iomgr/socket_windows.h"
|
35
|
+
#include "src/core/lib/iomgr/tcp_windows.h"
|
36
|
+
#include "src/core/lib/slice/slice_internal.h"
|
37
|
+
|
38
|
+
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
|
39
|
+
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
|
40
|
+
|
41
|
+
/* TODO(apolcyn): remove this hack after fixing upstream.
|
42
|
+
* Our grpc/c-ares code on Windows uses the ares_set_socket_functions API,
|
43
|
+
* which uses "struct iovec" type, which on Windows is defined inside of
|
44
|
+
* a c-ares header that is not public.
|
45
|
+
* See https://github.com/c-ares/c-ares/issues/206. */
|
46
|
+
struct iovec {
|
47
|
+
void* iov_base;
|
48
|
+
size_t iov_len;
|
49
|
+
};
|
50
|
+
|
51
|
+
namespace grpc_core {
|
52
|
+
|
53
|
+
/* c-ares creates its own sockets and is meant to read them when readable and
|
54
|
+
* write them when writeable. To fit this socket usage model into the grpc
|
55
|
+
* windows poller (which gives notifications when attempted reads and writes are
|
56
|
+
* actually fulfilled rather than possible), this GrpcPolledFdWindows class
|
57
|
+
* takes advantage of the ares_set_socket_functions API and acts as a virtual
|
58
|
+
* socket. It holds its own read and write buffers which are written to and read
|
59
|
+
* from c-ares and are used with the grpc windows poller, and it, e.g.,
|
60
|
+
* manufactures virtual socket error codes when it e.g. needs to tell the c-ares
|
61
|
+
* library to wait for an async read. */
|
62
|
+
class GrpcPolledFdWindows : public GrpcPolledFd {
|
63
|
+
public:
|
64
|
+
enum WriteState {
|
65
|
+
WRITE_IDLE,
|
66
|
+
WRITE_REQUESTED,
|
67
|
+
WRITE_PENDING,
|
68
|
+
WRITE_WAITING_FOR_VERIFICATION_UPON_RETRY,
|
69
|
+
};
|
70
|
+
|
71
|
+
GrpcPolledFdWindows(ares_socket_t as, grpc_combiner* combiner)
|
72
|
+
: read_buf_(grpc_empty_slice()),
|
73
|
+
write_buf_(grpc_empty_slice()),
|
74
|
+
write_state_(WRITE_IDLE),
|
75
|
+
gotten_into_driver_list_(false) {
|
76
|
+
gpr_asprintf(&name_, "c-ares socket: %" PRIdPTR, as);
|
77
|
+
winsocket_ = grpc_winsocket_create(as, name_);
|
78
|
+
combiner_ = GRPC_COMBINER_REF(combiner, name_);
|
79
|
+
GRPC_CLOSURE_INIT(&outer_read_closure_,
|
80
|
+
&GrpcPolledFdWindows::OnIocpReadable, this,
|
81
|
+
grpc_combiner_scheduler(combiner_));
|
82
|
+
GRPC_CLOSURE_INIT(&outer_write_closure_,
|
83
|
+
&GrpcPolledFdWindows::OnIocpWriteable, this,
|
84
|
+
grpc_combiner_scheduler(combiner_));
|
85
|
+
}
|
86
|
+
|
87
|
+
~GrpcPolledFdWindows() {
|
88
|
+
GRPC_COMBINER_UNREF(combiner_, name_);
|
89
|
+
grpc_slice_unref_internal(read_buf_);
|
90
|
+
grpc_slice_unref_internal(write_buf_);
|
91
|
+
GPR_ASSERT(read_closure_ == nullptr);
|
92
|
+
GPR_ASSERT(write_closure_ == nullptr);
|
93
|
+
grpc_winsocket_destroy(winsocket_);
|
94
|
+
gpr_free(name_);
|
95
|
+
}
|
96
|
+
|
97
|
+
void ScheduleAndNullReadClosure(grpc_error* error) {
|
98
|
+
GRPC_CLOSURE_SCHED(read_closure_, error);
|
99
|
+
read_closure_ = nullptr;
|
100
|
+
}
|
101
|
+
|
102
|
+
void ScheduleAndNullWriteClosure(grpc_error* error) {
|
103
|
+
GRPC_CLOSURE_SCHED(write_closure_, error);
|
104
|
+
write_closure_ = nullptr;
|
105
|
+
}
|
106
|
+
|
107
|
+
void RegisterForOnReadableLocked(grpc_closure* read_closure) override {
|
108
|
+
GPR_ASSERT(read_closure_ == nullptr);
|
109
|
+
read_closure_ = read_closure;
|
110
|
+
GPR_ASSERT(GRPC_SLICE_LENGTH(read_buf_) == 0);
|
111
|
+
grpc_slice_unref_internal(read_buf_);
|
112
|
+
read_buf_ = GRPC_SLICE_MALLOC(4192);
|
113
|
+
WSABUF buffer;
|
114
|
+
buffer.buf = (char*)GRPC_SLICE_START_PTR(read_buf_);
|
115
|
+
buffer.len = GRPC_SLICE_LENGTH(read_buf_);
|
116
|
+
memset(&winsocket_->read_info.overlapped, 0, sizeof(OVERLAPPED));
|
117
|
+
recv_from_source_addr_len_ = sizeof(recv_from_source_addr_);
|
118
|
+
DWORD flags = 0;
|
119
|
+
if (WSARecvFrom(grpc_winsocket_wrapped_socket(winsocket_), &buffer, 1,
|
120
|
+
nullptr, &flags, (sockaddr*)recv_from_source_addr_,
|
121
|
+
&recv_from_source_addr_len_,
|
122
|
+
&winsocket_->read_info.overlapped, nullptr)) {
|
123
|
+
char* msg = gpr_format_message(WSAGetLastError());
|
124
|
+
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
|
125
|
+
GRPC_CARES_TRACE_LOG(
|
126
|
+
"RegisterForOnReadableLocked: WSARecvFrom error:|%s|. fd:|%s|", msg,
|
127
|
+
GetName());
|
128
|
+
gpr_free(msg);
|
129
|
+
if (WSAGetLastError() != WSA_IO_PENDING) {
|
130
|
+
ScheduleAndNullReadClosure(error);
|
131
|
+
return;
|
132
|
+
}
|
133
|
+
}
|
134
|
+
grpc_socket_notify_on_read(winsocket_, &outer_read_closure_);
|
135
|
+
}
|
136
|
+
|
137
|
+
void RegisterForOnWriteableLocked(grpc_closure* write_closure) override {
|
138
|
+
GRPC_CARES_TRACE_LOG(
|
139
|
+
"RegisterForOnWriteableLocked. fd:|%s|. Current write state: %d",
|
140
|
+
GetName(), write_state_);
|
141
|
+
GPR_ASSERT(write_closure_ == nullptr);
|
142
|
+
write_closure_ = write_closure;
|
143
|
+
switch (write_state_) {
|
144
|
+
case WRITE_IDLE:
|
145
|
+
ScheduleAndNullWriteClosure(GRPC_ERROR_NONE);
|
146
|
+
break;
|
147
|
+
case WRITE_REQUESTED:
|
148
|
+
write_state_ = WRITE_PENDING;
|
149
|
+
SendWriteBuf(nullptr, &winsocket_->write_info.overlapped);
|
150
|
+
grpc_socket_notify_on_write(winsocket_, &outer_write_closure_);
|
151
|
+
break;
|
152
|
+
case WRITE_PENDING:
|
153
|
+
case WRITE_WAITING_FOR_VERIFICATION_UPON_RETRY:
|
154
|
+
abort();
|
155
|
+
}
|
156
|
+
}
|
157
|
+
|
158
|
+
bool IsFdStillReadableLocked() override {
|
159
|
+
return GRPC_SLICE_LENGTH(read_buf_) > 0;
|
160
|
+
}
|
161
|
+
|
162
|
+
void ShutdownLocked(grpc_error* error) override {
|
163
|
+
grpc_winsocket_shutdown(winsocket_);
|
164
|
+
}
|
165
|
+
|
166
|
+
ares_socket_t GetWrappedAresSocketLocked() override {
|
167
|
+
return grpc_winsocket_wrapped_socket(winsocket_);
|
168
|
+
}
|
169
|
+
|
170
|
+
const char* GetName() override { return name_; }
|
171
|
+
|
172
|
+
ares_ssize_t RecvFrom(void* data, ares_socket_t data_len, int flags,
|
173
|
+
struct sockaddr* from, ares_socklen_t* from_len) {
|
174
|
+
GRPC_CARES_TRACE_LOG(
|
175
|
+
"RecvFrom called on fd:|%s|. Current read buf length:|%d|", GetName(),
|
176
|
+
GRPC_SLICE_LENGTH(read_buf_));
|
177
|
+
if (GRPC_SLICE_LENGTH(read_buf_) == 0) {
|
178
|
+
WSASetLastError(WSAEWOULDBLOCK);
|
179
|
+
return -1;
|
180
|
+
}
|
181
|
+
ares_ssize_t bytes_read = 0;
|
182
|
+
for (size_t i = 0; i < GRPC_SLICE_LENGTH(read_buf_) && i < data_len; i++) {
|
183
|
+
((char*)data)[i] = GRPC_SLICE_START_PTR(read_buf_)[i];
|
184
|
+
bytes_read++;
|
185
|
+
}
|
186
|
+
read_buf_ = grpc_slice_sub_no_ref(read_buf_, bytes_read,
|
187
|
+
GRPC_SLICE_LENGTH(read_buf_));
|
188
|
+
/* c-ares overloads this recv_from virtual socket function to receive
|
189
|
+
* data on both UDP and TCP sockets, and from is nullptr for TCP. */
|
190
|
+
if (from != nullptr) {
|
191
|
+
GPR_ASSERT(*from_len <= recv_from_source_addr_len_);
|
192
|
+
memcpy(from, &recv_from_source_addr_, recv_from_source_addr_len_);
|
193
|
+
*from_len = recv_from_source_addr_len_;
|
194
|
+
}
|
195
|
+
return bytes_read;
|
196
|
+
}
|
197
|
+
|
198
|
+
grpc_slice FlattenIovec(const struct iovec* iov, int iov_count) {
|
199
|
+
int total = 0;
|
200
|
+
for (int i = 0; i < iov_count; i++) {
|
201
|
+
total += iov[i].iov_len;
|
202
|
+
}
|
203
|
+
grpc_slice out = GRPC_SLICE_MALLOC(total);
|
204
|
+
size_t cur = 0;
|
205
|
+
for (int i = 0; i < iov_count; i++) {
|
206
|
+
for (int k = 0; k < iov[i].iov_len; k++) {
|
207
|
+
GRPC_SLICE_START_PTR(out)[cur++] = ((char*)iov[i].iov_base)[k];
|
208
|
+
}
|
209
|
+
}
|
210
|
+
return out;
|
211
|
+
}
|
212
|
+
|
213
|
+
int SendWriteBuf(LPDWORD bytes_sent_ptr, LPWSAOVERLAPPED overlapped) {
|
214
|
+
WSABUF buf;
|
215
|
+
buf.len = GRPC_SLICE_LENGTH(write_buf_);
|
216
|
+
buf.buf = (char*)GRPC_SLICE_START_PTR(write_buf_);
|
217
|
+
DWORD flags = 0;
|
218
|
+
int out = WSASend(grpc_winsocket_wrapped_socket(winsocket_), &buf, 1,
|
219
|
+
bytes_sent_ptr, flags, overlapped, nullptr);
|
220
|
+
GRPC_CARES_TRACE_LOG(
|
221
|
+
"WSASend: name:%s. buf len:%d. bytes sent: %d. overlapped %p. return "
|
222
|
+
"val: %d",
|
223
|
+
GetName(), buf.len, *bytes_sent_ptr, overlapped, out);
|
224
|
+
return out;
|
225
|
+
}
|
226
|
+
|
227
|
+
ares_ssize_t TrySendWriteBufSyncNonBlocking() {
|
228
|
+
GPR_ASSERT(write_state_ == WRITE_IDLE);
|
229
|
+
ares_ssize_t total_sent;
|
230
|
+
DWORD bytes_sent = 0;
|
231
|
+
if (SendWriteBuf(&bytes_sent, nullptr) != 0) {
|
232
|
+
char* msg = gpr_format_message(WSAGetLastError());
|
233
|
+
GRPC_CARES_TRACE_LOG(
|
234
|
+
"TrySendWriteBufSyncNonBlocking: SendWriteBuf error:|%s|. fd:|%s|",
|
235
|
+
msg, GetName());
|
236
|
+
gpr_free(msg);
|
237
|
+
if (WSAGetLastError() == WSA_IO_PENDING) {
|
238
|
+
WSASetLastError(WSAEWOULDBLOCK);
|
239
|
+
write_state_ = WRITE_REQUESTED;
|
240
|
+
}
|
241
|
+
}
|
242
|
+
write_buf_ = grpc_slice_sub_no_ref(write_buf_, bytes_sent,
|
243
|
+
GRPC_SLICE_LENGTH(write_buf_));
|
244
|
+
return bytes_sent;
|
245
|
+
}
|
246
|
+
|
247
|
+
ares_ssize_t SendV(const struct iovec* iov, int iov_count) {
|
248
|
+
GRPC_CARES_TRACE_LOG("SendV called on fd:|%s|. Current write state: %d",
|
249
|
+
GetName(), write_state_);
|
250
|
+
switch (write_state_) {
|
251
|
+
case WRITE_IDLE:
|
252
|
+
GPR_ASSERT(GRPC_SLICE_LENGTH(write_buf_) == 0);
|
253
|
+
grpc_slice_unref_internal(write_buf_);
|
254
|
+
write_buf_ = FlattenIovec(iov, iov_count);
|
255
|
+
return TrySendWriteBufSyncNonBlocking();
|
256
|
+
case WRITE_REQUESTED:
|
257
|
+
case WRITE_PENDING:
|
258
|
+
WSASetLastError(WSAEWOULDBLOCK);
|
259
|
+
return -1;
|
260
|
+
case WRITE_WAITING_FOR_VERIFICATION_UPON_RETRY:
|
261
|
+
grpc_slice currently_attempted = FlattenIovec(iov, iov_count);
|
262
|
+
GPR_ASSERT(GRPC_SLICE_LENGTH(currently_attempted) >=
|
263
|
+
GRPC_SLICE_LENGTH(write_buf_));
|
264
|
+
ares_ssize_t total_sent = 0;
|
265
|
+
for (size_t i = 0; i < GRPC_SLICE_LENGTH(write_buf_); i++) {
|
266
|
+
GPR_ASSERT(GRPC_SLICE_START_PTR(currently_attempted)[i] ==
|
267
|
+
GRPC_SLICE_START_PTR(write_buf_)[i]);
|
268
|
+
total_sent++;
|
269
|
+
}
|
270
|
+
grpc_slice_unref_internal(write_buf_);
|
271
|
+
write_buf_ =
|
272
|
+
grpc_slice_sub_no_ref(currently_attempted, total_sent,
|
273
|
+
GRPC_SLICE_LENGTH(currently_attempted));
|
274
|
+
write_state_ = WRITE_IDLE;
|
275
|
+
total_sent += TrySendWriteBufSyncNonBlocking();
|
276
|
+
return total_sent;
|
277
|
+
}
|
278
|
+
abort();
|
279
|
+
}
|
280
|
+
|
281
|
+
int Connect(const struct sockaddr* target, ares_socklen_t target_len) {
|
282
|
+
SOCKET s = grpc_winsocket_wrapped_socket(winsocket_);
|
283
|
+
GRPC_CARES_TRACE_LOG("Connect: fd:|%s|", GetName());
|
284
|
+
int out =
|
285
|
+
WSAConnect(s, target, target_len, nullptr, nullptr, nullptr, nullptr);
|
286
|
+
if (out != 0) {
|
287
|
+
char* msg = gpr_format_message(WSAGetLastError());
|
288
|
+
GRPC_CARES_TRACE_LOG("Connect error code:|%d|, msg:|%s|. fd:|%s|",
|
289
|
+
WSAGetLastError(), msg, GetName());
|
290
|
+
gpr_free(msg);
|
291
|
+
// c-ares expects a posix-style connect API
|
292
|
+
out = -1;
|
293
|
+
}
|
294
|
+
return out;
|
295
|
+
}
|
296
|
+
|
297
|
+
static void OnIocpReadable(void* arg, grpc_error* error) {
|
298
|
+
GrpcPolledFdWindows* polled_fd = static_cast<GrpcPolledFdWindows*>(arg);
|
299
|
+
polled_fd->OnIocpReadableInner(error);
|
300
|
+
}
|
301
|
+
|
302
|
+
void OnIocpReadableInner(grpc_error* error) {
|
303
|
+
if (error == GRPC_ERROR_NONE) {
|
304
|
+
if (winsocket_->read_info.wsa_error != 0) {
|
305
|
+
/* WSAEMSGSIZE would be due to receiving more data
|
306
|
+
* than our read buffer's fixed capacity. Assume that
|
307
|
+
* the connection is TCP and read the leftovers
|
308
|
+
* in subsequent c-ares reads. */
|
309
|
+
if (winsocket_->read_info.wsa_error != WSAEMSGSIZE) {
|
310
|
+
GRPC_ERROR_UNREF(error);
|
311
|
+
char* msg = gpr_format_message(winsocket_->read_info.wsa_error);
|
312
|
+
GRPC_CARES_TRACE_LOG(
|
313
|
+
"OnIocpReadableInner. winsocket error:|%s|. fd:|%s|", msg,
|
314
|
+
GetName());
|
315
|
+
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
|
316
|
+
gpr_free(msg);
|
317
|
+
}
|
318
|
+
}
|
319
|
+
}
|
320
|
+
if (error == GRPC_ERROR_NONE) {
|
321
|
+
read_buf_ = grpc_slice_sub_no_ref(read_buf_, 0,
|
322
|
+
winsocket_->read_info.bytes_transfered);
|
323
|
+
} else {
|
324
|
+
grpc_slice_unref_internal(read_buf_);
|
325
|
+
read_buf_ = grpc_empty_slice();
|
326
|
+
}
|
327
|
+
GRPC_CARES_TRACE_LOG(
|
328
|
+
"OnIocpReadable finishing. read buf length now:|%d|. :fd:|%s|",
|
329
|
+
GRPC_SLICE_LENGTH(read_buf_), GetName());
|
330
|
+
ScheduleAndNullReadClosure(error);
|
331
|
+
}
|
332
|
+
|
333
|
+
static void OnIocpWriteable(void* arg, grpc_error* error) {
|
334
|
+
GrpcPolledFdWindows* polled_fd = static_cast<GrpcPolledFdWindows*>(arg);
|
335
|
+
polled_fd->OnIocpWriteableInner(error);
|
336
|
+
}
|
337
|
+
|
338
|
+
void OnIocpWriteableInner(grpc_error* error) {
|
339
|
+
GRPC_CARES_TRACE_LOG("OnIocpWriteableInner. fd:|%s|", GetName());
|
340
|
+
if (error == GRPC_ERROR_NONE) {
|
341
|
+
if (winsocket_->write_info.wsa_error != 0) {
|
342
|
+
char* msg = gpr_format_message(winsocket_->write_info.wsa_error);
|
343
|
+
GRPC_CARES_TRACE_LOG(
|
344
|
+
"OnIocpWriteableInner. winsocket error:|%s|. fd:|%s|", msg,
|
345
|
+
GetName());
|
346
|
+
GRPC_ERROR_UNREF(error);
|
347
|
+
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
|
348
|
+
gpr_free(msg);
|
349
|
+
}
|
350
|
+
}
|
351
|
+
GPR_ASSERT(write_state_ == WRITE_PENDING);
|
352
|
+
if (error == GRPC_ERROR_NONE) {
|
353
|
+
write_state_ = WRITE_WAITING_FOR_VERIFICATION_UPON_RETRY;
|
354
|
+
write_buf_ = grpc_slice_sub_no_ref(
|
355
|
+
write_buf_, 0, winsocket_->write_info.bytes_transfered);
|
356
|
+
} else {
|
357
|
+
grpc_slice_unref_internal(write_buf_);
|
358
|
+
write_buf_ = grpc_empty_slice();
|
359
|
+
}
|
360
|
+
ScheduleAndNullWriteClosure(error);
|
361
|
+
}
|
362
|
+
|
363
|
+
bool gotten_into_driver_list() const { return gotten_into_driver_list_; }
|
364
|
+
void set_gotten_into_driver_list() { gotten_into_driver_list_ = true; }
|
365
|
+
|
366
|
+
grpc_combiner* combiner_;
|
367
|
+
char recv_from_source_addr_[200];
|
368
|
+
ares_socklen_t recv_from_source_addr_len_;
|
369
|
+
grpc_slice read_buf_;
|
370
|
+
grpc_slice write_buf_;
|
371
|
+
grpc_closure* read_closure_ = nullptr;
|
372
|
+
grpc_closure* write_closure_ = nullptr;
|
373
|
+
grpc_closure outer_read_closure_;
|
374
|
+
grpc_closure outer_write_closure_;
|
375
|
+
grpc_winsocket* winsocket_;
|
376
|
+
WriteState write_state_;
|
377
|
+
char* name_ = nullptr;
|
378
|
+
bool gotten_into_driver_list_;
|
379
|
+
};
|
380
|
+
|
381
|
+
struct SockToPolledFdEntry {
|
382
|
+
SockToPolledFdEntry(SOCKET s, GrpcPolledFdWindows* fd)
|
383
|
+
: socket(s), polled_fd(fd) {}
|
384
|
+
SOCKET socket;
|
385
|
+
GrpcPolledFdWindows* polled_fd;
|
386
|
+
SockToPolledFdEntry* next = nullptr;
|
387
|
+
};
|
388
|
+
|
389
|
+
/* A SockToPolledFdMap can make ares_socket_t types (SOCKET's on windows)
|
390
|
+
* to GrpcPolledFdWindow's, and is used to find the appropriate
|
391
|
+
* GrpcPolledFdWindows to handle a virtual socket call when c-ares makes that
|
392
|
+
* socket call on the ares_socket_t type. Instances are owned by and one-to-one
|
393
|
+
* with a GrpcPolledFdWindows factory and event driver */
|
394
|
+
class SockToPolledFdMap {
|
395
|
+
public:
|
396
|
+
SockToPolledFdMap(grpc_combiner* combiner) {
|
397
|
+
combiner_ = GRPC_COMBINER_REF(combiner, "sock to polled fd map");
|
398
|
+
}
|
399
|
+
|
400
|
+
~SockToPolledFdMap() {
|
401
|
+
GPR_ASSERT(head_ == nullptr);
|
402
|
+
GRPC_COMBINER_UNREF(combiner_, "sock to polled fd map");
|
403
|
+
}
|
404
|
+
|
405
|
+
void AddNewSocket(SOCKET s, GrpcPolledFdWindows* polled_fd) {
|
406
|
+
SockToPolledFdEntry* new_node = New<SockToPolledFdEntry>(s, polled_fd);
|
407
|
+
new_node->next = head_;
|
408
|
+
head_ = new_node;
|
409
|
+
}
|
410
|
+
|
411
|
+
GrpcPolledFdWindows* LookupPolledFd(SOCKET s) {
|
412
|
+
for (SockToPolledFdEntry* node = head_; node != nullptr;
|
413
|
+
node = node->next) {
|
414
|
+
if (node->socket == s) {
|
415
|
+
GPR_ASSERT(node->polled_fd != nullptr);
|
416
|
+
return node->polled_fd;
|
417
|
+
}
|
418
|
+
}
|
419
|
+
abort();
|
420
|
+
}
|
421
|
+
|
422
|
+
void RemoveEntry(SOCKET s) {
|
423
|
+
GPR_ASSERT(head_ != nullptr);
|
424
|
+
SockToPolledFdEntry** prev = &head_;
|
425
|
+
for (SockToPolledFdEntry* node = head_; node != nullptr;
|
426
|
+
node = node->next) {
|
427
|
+
if (node->socket == s) {
|
428
|
+
*prev = node->next;
|
429
|
+
Delete(node);
|
430
|
+
return;
|
431
|
+
}
|
432
|
+
prev = &node->next;
|
433
|
+
}
|
434
|
+
abort();
|
435
|
+
}
|
436
|
+
|
437
|
+
/* These virtual socket functions are called from within the c-ares
|
438
|
+
* library. These methods generally dispatch those socket calls to the
|
439
|
+
* appropriate methods. The virtual "socket" and "close" methods are
|
440
|
+
* special and instead create/add and remove/destroy GrpcPolledFdWindows
|
441
|
+
* objects.
|
442
|
+
*/
|
443
|
+
static ares_socket_t Socket(int af, int type, int protocol, void* user_data) {
|
444
|
+
SockToPolledFdMap* map = static_cast<SockToPolledFdMap*>(user_data);
|
445
|
+
SOCKET s = WSASocket(af, type, protocol, nullptr, 0, WSA_FLAG_OVERLAPPED);
|
446
|
+
if (s == INVALID_SOCKET) {
|
447
|
+
return s;
|
448
|
+
}
|
449
|
+
grpc_tcp_set_non_block(s);
|
450
|
+
GrpcPolledFdWindows* polled_fd =
|
451
|
+
New<GrpcPolledFdWindows>(s, map->combiner_);
|
452
|
+
map->AddNewSocket(s, polled_fd);
|
453
|
+
return s;
|
454
|
+
}
|
455
|
+
|
456
|
+
static int Connect(ares_socket_t as, const struct sockaddr* target,
|
457
|
+
ares_socklen_t target_len, void* user_data) {
|
458
|
+
SockToPolledFdMap* map = static_cast<SockToPolledFdMap*>(user_data);
|
459
|
+
GrpcPolledFdWindows* polled_fd = map->LookupPolledFd(as);
|
460
|
+
return polled_fd->Connect(target, target_len);
|
461
|
+
}
|
462
|
+
|
463
|
+
static ares_ssize_t SendV(ares_socket_t as, const struct iovec* iov,
|
464
|
+
int iovec_count, void* user_data) {
|
465
|
+
SockToPolledFdMap* map = static_cast<SockToPolledFdMap*>(user_data);
|
466
|
+
GrpcPolledFdWindows* polled_fd = map->LookupPolledFd(as);
|
467
|
+
return polled_fd->SendV(iov, iovec_count);
|
468
|
+
}
|
469
|
+
|
470
|
+
static ares_ssize_t RecvFrom(ares_socket_t as, void* data, size_t data_len,
|
471
|
+
int flags, struct sockaddr* from,
|
472
|
+
ares_socklen_t* from_len, void* user_data) {
|
473
|
+
SockToPolledFdMap* map = static_cast<SockToPolledFdMap*>(user_data);
|
474
|
+
GrpcPolledFdWindows* polled_fd = map->LookupPolledFd(as);
|
475
|
+
return polled_fd->RecvFrom(data, data_len, flags, from, from_len);
|
476
|
+
}
|
477
|
+
|
478
|
+
static int CloseSocket(SOCKET s, void* user_data) {
|
479
|
+
SockToPolledFdMap* map = static_cast<SockToPolledFdMap*>(user_data);
|
480
|
+
GrpcPolledFdWindows* polled_fd = map->LookupPolledFd(s);
|
481
|
+
map->RemoveEntry(s);
|
482
|
+
// If a gRPC polled fd has not made it in to the driver's list yet, then
|
483
|
+
// the driver has not and will never see this socket.
|
484
|
+
if (!polled_fd->gotten_into_driver_list()) {
|
485
|
+
polled_fd->ShutdownLocked(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
486
|
+
"Shut down c-ares fd before without it ever having made it into the "
|
487
|
+
"driver's list"));
|
488
|
+
return 0;
|
489
|
+
}
|
490
|
+
return 0;
|
491
|
+
}
|
492
|
+
|
493
|
+
private:
|
494
|
+
SockToPolledFdEntry* head_ = nullptr;
|
495
|
+
grpc_combiner* combiner_;
|
496
|
+
};
|
497
|
+
|
498
|
+
const struct ares_socket_functions custom_ares_sock_funcs = {
|
499
|
+
&SockToPolledFdMap::Socket /* socket */,
|
500
|
+
&SockToPolledFdMap::CloseSocket /* close */,
|
501
|
+
&SockToPolledFdMap::Connect /* connect */,
|
502
|
+
&SockToPolledFdMap::RecvFrom /* recvfrom */,
|
503
|
+
&SockToPolledFdMap::SendV /* sendv */,
|
504
|
+
};
|
505
|
+
|
506
|
+
class GrpcPolledFdFactoryWindows : public GrpcPolledFdFactory {
|
507
|
+
public:
|
508
|
+
GrpcPolledFdFactoryWindows(grpc_combiner* combiner)
|
509
|
+
: sock_to_polled_fd_map_(combiner) {}
|
510
|
+
|
511
|
+
GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as,
|
512
|
+
grpc_pollset_set* driver_pollset_set,
|
513
|
+
grpc_combiner* combiner) override {
|
514
|
+
GrpcPolledFdWindows* polled_fd = sock_to_polled_fd_map_.LookupPolledFd(as);
|
515
|
+
// Set a flag so that the virtual socket "close" method knows it
|
516
|
+
// doesn't need to call ShutdownLocked, since now the driver will.
|
517
|
+
polled_fd->set_gotten_into_driver_list();
|
518
|
+
return polled_fd;
|
519
|
+
}
|
520
|
+
|
521
|
+
void ConfigureAresChannelLocked(ares_channel channel) override {
|
522
|
+
ares_set_socket_functions(channel, &custom_ares_sock_funcs,
|
523
|
+
&sock_to_polled_fd_map_);
|
524
|
+
}
|
525
|
+
|
526
|
+
private:
|
527
|
+
SockToPolledFdMap sock_to_polled_fd_map_;
|
528
|
+
};
|
529
|
+
|
530
|
+
UniquePtr<GrpcPolledFdFactory> NewGrpcPolledFdFactory(grpc_combiner* combiner) {
|
531
|
+
return UniquePtr<GrpcPolledFdFactory>(
|
532
|
+
New<GrpcPolledFdFactoryWindows>(combiner));
|
533
|
+
}
|
534
|
+
|
535
|
+
} // namespace grpc_core
|
536
|
+
|
537
|
+
#endif /* GRPC_ARES == 1 && defined(GPR_WINDOWS) */
|