grpc 1.17.0 → 1.17.1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +2 -2
- data/include/grpc/impl/codegen/grpc_types.h +5 -0
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +1 -1
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +9 -1
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +36 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +1 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +7 -5
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +3 -1
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +2 -1
- data/src/core/lib/iomgr/resolve_address.h +1 -1
- data/src/core/lib/iomgr/tcp_windows.cc +78 -16
- data/src/ruby/lib/grpc/version.rb +1 -1
- metadata +25 -25
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f20c914a8b23d7c5f52b56f8c36ff67bb6cb3814e0a3df616699af08c9a6120b
|
4
|
+
data.tar.gz: c8f6e41a1ff2d6d84c8cb01dcaeea96bcd2b1c33fb93f9989e499f1159aa4c0c
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6ad90b8a1d52eebe5da927096542a012c0c9ffa7430476663b6d227e0ae8fd8e164a75b3c29ce6d076a0dcadae37ce80fa45faf1f24a2b0b7a8e314ad7180b9e
|
7
|
+
data.tar.gz: 12aa5db82f444068b395fd4e7370cfba1afd30fdb3a3c54eab97b09b2b2b04749f7c5db812b1f0aa3b04771383cf0c34b5fb0657573c552c35405456ba60a23f
|
data/Makefile
CHANGED
@@ -438,8 +438,8 @@ Q = @
|
|
438
438
|
endif
|
439
439
|
|
440
440
|
CORE_VERSION = 7.0.0
|
441
|
-
CPP_VERSION = 1.17.
|
442
|
-
CSHARP_VERSION = 1.17.
|
441
|
+
CPP_VERSION = 1.17.1
|
442
|
+
CSHARP_VERSION = 1.17.1
|
443
443
|
|
444
444
|
CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES))
|
445
445
|
CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS)
|
@@ -350,6 +350,11 @@ typedef struct {
|
|
350
350
|
/** If set, inhibits health checking (which may be enabled via the
|
351
351
|
* service config.) */
|
352
352
|
#define GRPC_ARG_INHIBIT_HEALTH_CHECKING "grpc.inhibit_health_checking"
|
353
|
+
/** If set, determines the number of milliseconds that the c-ares based
|
354
|
+
* DNS resolver will wait on queries before cancelling them. The default value
|
355
|
+
* is 10000. Setting this to "0" will disable c-ares query timeouts
|
356
|
+
* entirely. */
|
357
|
+
#define GRPC_ARG_DNS_ARES_QUERY_TIMEOUT_MS "grpc.dns_ares_query_timeout"
|
353
358
|
/** \} */
|
354
359
|
|
355
360
|
/** Result of a grpc call. If the caller satisfies the prerequisites of a
|
@@ -1811,7 +1811,7 @@ class XdsFactory : public LoadBalancingPolicyFactory {
|
|
1811
1811
|
return OrphanablePtr<LoadBalancingPolicy>(New<XdsLb>(addresses, args));
|
1812
1812
|
}
|
1813
1813
|
|
1814
|
-
const char* name() const override { return "
|
1814
|
+
const char* name() const override { return "xds_experimental"; }
|
1815
1815
|
};
|
1816
1816
|
|
1817
1817
|
} // namespace
|
@@ -122,6 +122,8 @@ class AresDnsResolver : public Resolver {
|
|
122
122
|
char* service_config_json_ = nullptr;
|
123
123
|
// has shutdown been initiated
|
124
124
|
bool shutdown_initiated_ = false;
|
125
|
+
// timeout in milliseconds for active DNS queries
|
126
|
+
int query_timeout_ms_;
|
125
127
|
};
|
126
128
|
|
127
129
|
AresDnsResolver::AresDnsResolver(const ResolverArgs& args)
|
@@ -159,6 +161,11 @@ AresDnsResolver::AresDnsResolver(const ResolverArgs& args)
|
|
159
161
|
grpc_combiner_scheduler(combiner()));
|
160
162
|
GRPC_CLOSURE_INIT(&on_resolved_, OnResolvedLocked, this,
|
161
163
|
grpc_combiner_scheduler(combiner()));
|
164
|
+
const grpc_arg* query_timeout_ms_arg =
|
165
|
+
grpc_channel_args_find(channel_args_, GRPC_ARG_DNS_ARES_QUERY_TIMEOUT_MS);
|
166
|
+
query_timeout_ms_ = grpc_channel_arg_get_integer(
|
167
|
+
query_timeout_ms_arg,
|
168
|
+
{GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS, 0, INT_MAX});
|
162
169
|
}
|
163
170
|
|
164
171
|
AresDnsResolver::~AresDnsResolver() {
|
@@ -410,7 +417,8 @@ void AresDnsResolver::StartResolvingLocked() {
|
|
410
417
|
pending_request_ = grpc_dns_lookup_ares_locked(
|
411
418
|
dns_server_, name_to_resolve_, kDefaultPort, interested_parties_,
|
412
419
|
&on_resolved_, &lb_addresses_, true /* check_grpclb */,
|
413
|
-
request_service_config_ ? &service_config_json_ : nullptr,
|
420
|
+
request_service_config_ ? &service_config_json_ : nullptr,
|
421
|
+
query_timeout_ms_, combiner());
|
414
422
|
last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now();
|
415
423
|
}
|
416
424
|
|
@@ -33,6 +33,7 @@
|
|
33
33
|
#include "src/core/lib/gpr/string.h"
|
34
34
|
#include "src/core/lib/iomgr/iomgr_internal.h"
|
35
35
|
#include "src/core/lib/iomgr/sockaddr_utils.h"
|
36
|
+
#include "src/core/lib/iomgr/timer.h"
|
36
37
|
|
37
38
|
typedef struct fd_node {
|
38
39
|
/** the owner of this fd node */
|
@@ -76,6 +77,12 @@ struct grpc_ares_ev_driver {
|
|
76
77
|
grpc_ares_request* request;
|
77
78
|
/** Owned by the ev_driver. Creates new GrpcPolledFd's */
|
78
79
|
grpc_core::UniquePtr<grpc_core::GrpcPolledFdFactory> polled_fd_factory;
|
80
|
+
/** query timeout in milliseconds */
|
81
|
+
int query_timeout_ms;
|
82
|
+
/** alarm to cancel active queries */
|
83
|
+
grpc_timer query_timeout;
|
84
|
+
/** cancels queries on a timeout */
|
85
|
+
grpc_closure on_timeout_locked;
|
79
86
|
};
|
80
87
|
|
81
88
|
static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver);
|
@@ -116,8 +123,11 @@ static void fd_node_shutdown_locked(fd_node* fdn, const char* reason) {
|
|
116
123
|
}
|
117
124
|
}
|
118
125
|
|
126
|
+
static void on_timeout_locked(void* arg, grpc_error* error);
|
127
|
+
|
119
128
|
grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
|
120
129
|
grpc_pollset_set* pollset_set,
|
130
|
+
int query_timeout_ms,
|
121
131
|
grpc_combiner* combiner,
|
122
132
|
grpc_ares_request* request) {
|
123
133
|
*ev_driver = grpc_core::New<grpc_ares_ev_driver>();
|
@@ -146,6 +156,9 @@ grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
|
|
146
156
|
grpc_core::NewGrpcPolledFdFactory((*ev_driver)->combiner);
|
147
157
|
(*ev_driver)
|
148
158
|
->polled_fd_factory->ConfigureAresChannelLocked((*ev_driver)->channel);
|
159
|
+
GRPC_CLOSURE_INIT(&(*ev_driver)->on_timeout_locked, on_timeout_locked,
|
160
|
+
*ev_driver, grpc_combiner_scheduler(combiner));
|
161
|
+
(*ev_driver)->query_timeout_ms = query_timeout_ms;
|
149
162
|
return GRPC_ERROR_NONE;
|
150
163
|
}
|
151
164
|
|
@@ -155,6 +168,7 @@ void grpc_ares_ev_driver_on_queries_complete_locked(
|
|
155
168
|
// is working, grpc_ares_notify_on_event_locked will shut down the
|
156
169
|
// fds; if it's not working, there are no fds to shut down.
|
157
170
|
ev_driver->shutting_down = true;
|
171
|
+
grpc_timer_cancel(&ev_driver->query_timeout);
|
158
172
|
grpc_ares_ev_driver_unref(ev_driver);
|
159
173
|
}
|
160
174
|
|
@@ -185,6 +199,17 @@ static fd_node* pop_fd_node_locked(fd_node** head, ares_socket_t as) {
|
|
185
199
|
return nullptr;
|
186
200
|
}
|
187
201
|
|
202
|
+
static void on_timeout_locked(void* arg, grpc_error* error) {
|
203
|
+
grpc_ares_ev_driver* driver = static_cast<grpc_ares_ev_driver*>(arg);
|
204
|
+
GRPC_CARES_TRACE_LOG(
|
205
|
+
"ev_driver=%p on_timeout_locked. driver->shutting_down=%d. err=%s",
|
206
|
+
driver, driver->shutting_down, grpc_error_string(error));
|
207
|
+
if (!driver->shutting_down && error == GRPC_ERROR_NONE) {
|
208
|
+
grpc_ares_ev_driver_shutdown_locked(driver);
|
209
|
+
}
|
210
|
+
grpc_ares_ev_driver_unref(driver);
|
211
|
+
}
|
212
|
+
|
188
213
|
static void on_readable_locked(void* arg, grpc_error* error) {
|
189
214
|
fd_node* fdn = static_cast<fd_node*>(arg);
|
190
215
|
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
|
@@ -314,6 +339,17 @@ void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver) {
|
|
314
339
|
if (!ev_driver->working) {
|
315
340
|
ev_driver->working = true;
|
316
341
|
grpc_ares_notify_on_event_locked(ev_driver);
|
342
|
+
grpc_millis timeout =
|
343
|
+
ev_driver->query_timeout_ms == 0
|
344
|
+
? GRPC_MILLIS_INF_FUTURE
|
345
|
+
: ev_driver->query_timeout_ms + grpc_core::ExecCtx::Get()->Now();
|
346
|
+
GRPC_CARES_TRACE_LOG(
|
347
|
+
"ev_driver=%p grpc_ares_ev_driver_start_locked. timeout in %" PRId64
|
348
|
+
" ms",
|
349
|
+
ev_driver, timeout);
|
350
|
+
grpc_ares_ev_driver_ref(ev_driver);
|
351
|
+
grpc_timer_init(&ev_driver->query_timeout, timeout,
|
352
|
+
&ev_driver->on_timeout_locked);
|
317
353
|
}
|
318
354
|
}
|
319
355
|
|
@@ -43,6 +43,7 @@ ares_channel* grpc_ares_ev_driver_get_channel_locked(
|
|
43
43
|
created successfully. */
|
44
44
|
grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
|
45
45
|
grpc_pollset_set* pollset_set,
|
46
|
+
int query_timeout_ms,
|
46
47
|
grpc_combiner* combiner,
|
47
48
|
grpc_ares_request* request);
|
48
49
|
|
@@ -359,7 +359,7 @@ done:
|
|
359
359
|
void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
|
360
360
|
grpc_ares_request* r, const char* dns_server, const char* name,
|
361
361
|
const char* default_port, grpc_pollset_set* interested_parties,
|
362
|
-
bool check_grpclb, grpc_combiner* combiner) {
|
362
|
+
bool check_grpclb, int query_timeout_ms, grpc_combiner* combiner) {
|
363
363
|
grpc_error* error = GRPC_ERROR_NONE;
|
364
364
|
grpc_ares_hostbyname_request* hr = nullptr;
|
365
365
|
ares_channel* channel = nullptr;
|
@@ -388,7 +388,7 @@ void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
|
|
388
388
|
port = gpr_strdup(default_port);
|
389
389
|
}
|
390
390
|
error = grpc_ares_ev_driver_create_locked(&r->ev_driver, interested_parties,
|
391
|
-
combiner, r);
|
391
|
+
query_timeout_ms, combiner, r);
|
392
392
|
if (error != GRPC_ERROR_NONE) goto error_cleanup;
|
393
393
|
channel = grpc_ares_ev_driver_get_channel_locked(r->ev_driver);
|
394
394
|
// If dns_server is specified, use it.
|
@@ -522,7 +522,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
|
|
522
522
|
const char* dns_server, const char* name, const char* default_port,
|
523
523
|
grpc_pollset_set* interested_parties, grpc_closure* on_done,
|
524
524
|
grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
|
525
|
-
grpc_combiner* combiner) {
|
525
|
+
int query_timeout_ms, grpc_combiner* combiner) {
|
526
526
|
grpc_ares_request* r =
|
527
527
|
static_cast<grpc_ares_request*>(gpr_zalloc(sizeof(grpc_ares_request)));
|
528
528
|
r->ev_driver = nullptr;
|
@@ -546,7 +546,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
|
|
546
546
|
// Look up name using c-ares lib.
|
547
547
|
grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
|
548
548
|
r, dns_server, name, default_port, interested_parties, check_grpclb,
|
549
|
-
combiner);
|
549
|
+
query_timeout_ms, combiner);
|
550
550
|
return r;
|
551
551
|
}
|
552
552
|
|
@@ -554,6 +554,7 @@ grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
|
|
554
554
|
const char* dns_server, const char* name, const char* default_port,
|
555
555
|
grpc_pollset_set* interested_parties, grpc_closure* on_done,
|
556
556
|
grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
|
557
|
+
int query_timeout_ms,
|
557
558
|
grpc_combiner* combiner) = grpc_dns_lookup_ares_locked_impl;
|
558
559
|
|
559
560
|
static void grpc_cancel_ares_request_locked_impl(grpc_ares_request* r) {
|
@@ -648,7 +649,8 @@ static void grpc_resolve_address_invoke_dns_lookup_ares_locked(
|
|
648
649
|
r->ares_request = grpc_dns_lookup_ares_locked(
|
649
650
|
nullptr /* dns_server */, r->name, r->default_port, r->interested_parties,
|
650
651
|
&r->on_dns_lookup_done_locked, &r->lb_addrs, false /* check_grpclb */,
|
651
|
-
nullptr /* service_config_json */,
|
652
|
+
nullptr /* service_config_json */, GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS,
|
653
|
+
r->combiner);
|
652
654
|
}
|
653
655
|
|
654
656
|
static void grpc_resolve_address_ares_impl(const char* name,
|
@@ -26,6 +26,8 @@
|
|
26
26
|
#include "src/core/lib/iomgr/polling_entity.h"
|
27
27
|
#include "src/core/lib/iomgr/resolve_address.h"
|
28
28
|
|
29
|
+
#define GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS 10000
|
30
|
+
|
29
31
|
extern grpc_core::TraceFlag grpc_trace_cares_address_sorting;
|
30
32
|
|
31
33
|
extern grpc_core::TraceFlag grpc_trace_cares_resolver;
|
@@ -60,7 +62,7 @@ extern grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
|
|
60
62
|
const char* dns_server, const char* name, const char* default_port,
|
61
63
|
grpc_pollset_set* interested_parties, grpc_closure* on_done,
|
62
64
|
grpc_lb_addresses** addresses, bool check_grpclb,
|
63
|
-
char** service_config_json, grpc_combiner* combiner);
|
65
|
+
char** service_config_json, int query_timeout_ms, grpc_combiner* combiner);
|
64
66
|
|
65
67
|
/* Cancel the pending grpc_ares_request \a request */
|
66
68
|
extern void (*grpc_cancel_ares_request_locked)(grpc_ares_request* request);
|
@@ -30,7 +30,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
|
|
30
30
|
const char* dns_server, const char* name, const char* default_port,
|
31
31
|
grpc_pollset_set* interested_parties, grpc_closure* on_done,
|
32
32
|
grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
|
33
|
-
grpc_combiner* combiner) {
|
33
|
+
int query_timeout_ms, grpc_combiner* combiner) {
|
34
34
|
return NULL;
|
35
35
|
}
|
36
36
|
|
@@ -38,6 +38,7 @@ grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
|
|
38
38
|
const char* dns_server, const char* name, const char* default_port,
|
39
39
|
grpc_pollset_set* interested_parties, grpc_closure* on_done,
|
40
40
|
grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
|
41
|
+
int query_timeout_ms,
|
41
42
|
grpc_combiner* combiner) = grpc_dns_lookup_ares_locked_impl;
|
42
43
|
|
43
44
|
static void grpc_cancel_ares_request_locked_impl(grpc_ares_request* r) {}
|
@@ -65,7 +65,7 @@ void grpc_set_resolver_impl(grpc_address_resolver_vtable* vtable);
|
|
65
65
|
|
66
66
|
/* Asynchronously resolve addr. Use default_port if a port isn't designated
|
67
67
|
in addr, otherwise use the port in addr. */
|
68
|
-
/* TODO(
|
68
|
+
/* TODO(apolcyn): add a timeout here */
|
69
69
|
void grpc_resolve_address(const char* addr, const char* default_port,
|
70
70
|
grpc_pollset_set* interested_parties,
|
71
71
|
grpc_closure* on_done,
|
@@ -42,6 +42,7 @@
|
|
42
42
|
#include "src/core/lib/iomgr/tcp_windows.h"
|
43
43
|
#include "src/core/lib/iomgr/timer.h"
|
44
44
|
#include "src/core/lib/slice/slice_internal.h"
|
45
|
+
#include "src/core/lib/slice/slice_string_helpers.h"
|
45
46
|
|
46
47
|
#if defined(__MSYS__) && defined(GPR_ARCH_64)
|
47
48
|
/* Nasty workaround for nasty bug when using the 64 bits msys compiler
|
@@ -112,7 +113,10 @@ typedef struct grpc_tcp {
|
|
112
113
|
|
113
114
|
grpc_closure* read_cb;
|
114
115
|
grpc_closure* write_cb;
|
115
|
-
|
116
|
+
|
117
|
+
/* garbage after the last read */
|
118
|
+
grpc_slice_buffer last_read_buffer;
|
119
|
+
|
116
120
|
grpc_slice_buffer* write_slices;
|
117
121
|
grpc_slice_buffer* read_slices;
|
118
122
|
|
@@ -131,6 +135,7 @@ static void tcp_free(grpc_tcp* tcp) {
|
|
131
135
|
grpc_winsocket_destroy(tcp->socket);
|
132
136
|
gpr_mu_destroy(&tcp->mu);
|
133
137
|
gpr_free(tcp->peer_string);
|
138
|
+
grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
|
134
139
|
grpc_resource_user_unref(tcp->resource_user);
|
135
140
|
if (tcp->shutting_down) GRPC_ERROR_UNREF(tcp->shutdown_error);
|
136
141
|
gpr_free(tcp);
|
@@ -179,9 +184,12 @@ static void on_read(void* tcpp, grpc_error* error) {
|
|
179
184
|
grpc_tcp* tcp = (grpc_tcp*)tcpp;
|
180
185
|
grpc_closure* cb = tcp->read_cb;
|
181
186
|
grpc_winsocket* socket = tcp->socket;
|
182
|
-
grpc_slice sub;
|
183
187
|
grpc_winsocket_callback_info* info = &socket->read_info;
|
184
188
|
|
189
|
+
if (grpc_tcp_trace.enabled()) {
|
190
|
+
gpr_log(GPR_INFO, "TCP:%p on_read", tcp);
|
191
|
+
}
|
192
|
+
|
185
193
|
GRPC_ERROR_REF(error);
|
186
194
|
|
187
195
|
if (error == GRPC_ERROR_NONE) {
|
@@ -189,13 +197,35 @@ static void on_read(void* tcpp, grpc_error* error) {
|
|
189
197
|
char* utf8_message = gpr_format_message(info->wsa_error);
|
190
198
|
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message);
|
191
199
|
gpr_free(utf8_message);
|
192
|
-
|
200
|
+
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
|
193
201
|
} else {
|
194
202
|
if (info->bytes_transfered != 0 && !tcp->shutting_down) {
|
195
|
-
|
196
|
-
|
203
|
+
GPR_ASSERT((size_t)info->bytes_transfered <= tcp->read_slices->length);
|
204
|
+
if (static_cast<size_t>(info->bytes_transfered) !=
|
205
|
+
tcp->read_slices->length) {
|
206
|
+
grpc_slice_buffer_trim_end(
|
207
|
+
tcp->read_slices,
|
208
|
+
tcp->read_slices->length -
|
209
|
+
static_cast<size_t>(info->bytes_transfered),
|
210
|
+
&tcp->last_read_buffer);
|
211
|
+
}
|
212
|
+
GPR_ASSERT((size_t)info->bytes_transfered == tcp->read_slices->length);
|
213
|
+
|
214
|
+
if (grpc_tcp_trace.enabled()) {
|
215
|
+
size_t i;
|
216
|
+
for (i = 0; i < tcp->read_slices->count; i++) {
|
217
|
+
char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
|
218
|
+
GPR_DUMP_HEX | GPR_DUMP_ASCII);
|
219
|
+
gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string,
|
220
|
+
dump);
|
221
|
+
gpr_free(dump);
|
222
|
+
}
|
223
|
+
}
|
197
224
|
} else {
|
198
|
-
|
225
|
+
if (grpc_tcp_trace.enabled()) {
|
226
|
+
gpr_log(GPR_INFO, "TCP:%p unref read_slice", tcp);
|
227
|
+
}
|
228
|
+
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
|
199
229
|
error = tcp->shutting_down
|
200
230
|
? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
201
231
|
"TCP stream shutting down", &tcp->shutdown_error, 1)
|
@@ -209,6 +239,8 @@ static void on_read(void* tcpp, grpc_error* error) {
|
|
209
239
|
GRPC_CLOSURE_SCHED(cb, error);
|
210
240
|
}
|
211
241
|
|
242
|
+
#define DEFAULT_TARGET_READ_SIZE 8192
|
243
|
+
#define MAX_WSABUF_COUNT 16
|
212
244
|
static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
|
213
245
|
grpc_closure* cb) {
|
214
246
|
grpc_tcp* tcp = (grpc_tcp*)ep;
|
@@ -217,7 +249,12 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
|
|
217
249
|
int status;
|
218
250
|
DWORD bytes_read = 0;
|
219
251
|
DWORD flags = 0;
|
220
|
-
WSABUF
|
252
|
+
WSABUF buffers[MAX_WSABUF_COUNT];
|
253
|
+
size_t i;
|
254
|
+
|
255
|
+
if (grpc_tcp_trace.enabled()) {
|
256
|
+
gpr_log(GPR_INFO, "TCP:%p win_read", tcp);
|
257
|
+
}
|
221
258
|
|
222
259
|
if (tcp->shutting_down) {
|
223
260
|
GRPC_CLOSURE_SCHED(
|
@@ -229,18 +266,27 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
|
|
229
266
|
tcp->read_cb = cb;
|
230
267
|
tcp->read_slices = read_slices;
|
231
268
|
grpc_slice_buffer_reset_and_unref_internal(read_slices);
|
269
|
+
grpc_slice_buffer_swap(read_slices, &tcp->last_read_buffer);
|
232
270
|
|
233
|
-
tcp->
|
271
|
+
if (tcp->read_slices->length < DEFAULT_TARGET_READ_SIZE / 2 &&
|
272
|
+
tcp->read_slices->count < MAX_WSABUF_COUNT) {
|
273
|
+
// TODO(jtattermusch): slice should be allocated using resource quota
|
274
|
+
grpc_slice_buffer_add(tcp->read_slices,
|
275
|
+
GRPC_SLICE_MALLOC(DEFAULT_TARGET_READ_SIZE));
|
276
|
+
}
|
234
277
|
|
235
|
-
|
236
|
-
|
237
|
-
|
278
|
+
GPR_ASSERT(tcp->read_slices->count <= MAX_WSABUF_COUNT);
|
279
|
+
for (i = 0; i < tcp->read_slices->count; i++) {
|
280
|
+
buffers[i].len = (ULONG)GRPC_SLICE_LENGTH(
|
281
|
+
tcp->read_slices->slices[i]); // we know slice size fits in 32bit.
|
282
|
+
buffers[i].buf = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[i]);
|
283
|
+
}
|
238
284
|
|
239
285
|
TCP_REF(tcp, "read");
|
240
286
|
|
241
287
|
/* First let's try a synchronous, non-blocking read. */
|
242
|
-
status =
|
243
|
-
|
288
|
+
status = WSARecv(tcp->socket->socket, buffers, (DWORD)tcp->read_slices->count,
|
289
|
+
&bytes_read, &flags, NULL, NULL);
|
244
290
|
info->wsa_error = status == 0 ? 0 : WSAGetLastError();
|
245
291
|
|
246
292
|
/* Did we get data immediately ? Yay. */
|
@@ -252,8 +298,8 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
|
|
252
298
|
|
253
299
|
/* Otherwise, let's retry, by queuing a read. */
|
254
300
|
memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED));
|
255
|
-
status = WSARecv(tcp->socket->socket,
|
256
|
-
&info->overlapped, NULL);
|
301
|
+
status = WSARecv(tcp->socket->socket, buffers, (DWORD)tcp->read_slices->count,
|
302
|
+
&bytes_read, &flags, &info->overlapped, NULL);
|
257
303
|
|
258
304
|
if (status != 0) {
|
259
305
|
int wsa_error = WSAGetLastError();
|
@@ -275,6 +321,10 @@ static void on_write(void* tcpp, grpc_error* error) {
|
|
275
321
|
grpc_winsocket_callback_info* info = &handle->write_info;
|
276
322
|
grpc_closure* cb;
|
277
323
|
|
324
|
+
if (grpc_tcp_trace.enabled()) {
|
325
|
+
gpr_log(GPR_INFO, "TCP:%p on_write", tcp);
|
326
|
+
}
|
327
|
+
|
278
328
|
GRPC_ERROR_REF(error);
|
279
329
|
|
280
330
|
gpr_mu_lock(&tcp->mu);
|
@@ -303,11 +353,21 @@ static void win_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
|
|
303
353
|
unsigned i;
|
304
354
|
DWORD bytes_sent;
|
305
355
|
int status;
|
306
|
-
WSABUF local_buffers[
|
356
|
+
WSABUF local_buffers[MAX_WSABUF_COUNT];
|
307
357
|
WSABUF* allocated = NULL;
|
308
358
|
WSABUF* buffers = local_buffers;
|
309
359
|
size_t len;
|
310
360
|
|
361
|
+
if (grpc_tcp_trace.enabled()) {
|
362
|
+
size_t i;
|
363
|
+
for (i = 0; i < slices->count; i++) {
|
364
|
+
char* data =
|
365
|
+
grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
|
366
|
+
gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
|
367
|
+
gpr_free(data);
|
368
|
+
}
|
369
|
+
}
|
370
|
+
|
311
371
|
if (tcp->shutting_down) {
|
312
372
|
GRPC_CLOSURE_SCHED(
|
313
373
|
cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
@@ -412,6 +472,7 @@ static void win_shutdown(grpc_endpoint* ep, grpc_error* why) {
|
|
412
472
|
static void win_destroy(grpc_endpoint* ep) {
|
413
473
|
grpc_network_status_unregister_endpoint(ep);
|
414
474
|
grpc_tcp* tcp = (grpc_tcp*)ep;
|
475
|
+
grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
|
415
476
|
TCP_UNREF(tcp, "destroy");
|
416
477
|
}
|
417
478
|
|
@@ -460,6 +521,7 @@ grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
|
|
460
521
|
GRPC_CLOSURE_INIT(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
|
461
522
|
GRPC_CLOSURE_INIT(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
|
462
523
|
tcp->peer_string = gpr_strdup(peer_string);
|
524
|
+
grpc_slice_buffer_init(&tcp->last_read_buffer);
|
463
525
|
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
|
464
526
|
/* Tell network status tracking code about the new endpoint */
|
465
527
|
grpc_network_status_register_endpoint(&tcp->base);
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: grpc
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.17.
|
4
|
+
version: 1.17.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- gRPC Authors
|
8
8
|
autorequire:
|
9
9
|
bindir: src/ruby/bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-12-
|
11
|
+
date: 2018-12-13 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: google-protobuf
|
@@ -1586,38 +1586,38 @@ signing_key:
|
|
1586
1586
|
specification_version: 4
|
1587
1587
|
summary: GRPC system in Ruby
|
1588
1588
|
test_files:
|
1589
|
+
- src/ruby/spec/google_rpc_status_utils_spec.rb
|
1589
1590
|
- src/ruby/spec/channel_spec.rb
|
1590
|
-
- src/ruby/spec/error_sanity_spec.rb
|
1591
|
-
- src/ruby/spec/channel_connection_spec.rb
|
1592
|
-
- src/ruby/spec/server_credentials_spec.rb
|
1593
|
-
- src/ruby/spec/pb/health/checker_spec.rb
|
1594
|
-
- src/ruby/spec/pb/codegen/package_option_spec.rb
|
1595
|
-
- src/ruby/spec/pb/codegen/grpc/testing/package_options.proto
|
1596
|
-
- src/ruby/spec/pb/duplicate/codegen_spec.rb
|
1597
1591
|
- src/ruby/spec/compression_options_spec.rb
|
1592
|
+
- src/ruby/spec/time_consts_spec.rb
|
1593
|
+
- src/ruby/spec/spec_helper.rb
|
1598
1594
|
- src/ruby/spec/client_server_spec.rb
|
1595
|
+
- src/ruby/spec/generic/rpc_server_pool_spec.rb
|
1596
|
+
- src/ruby/spec/generic/interceptor_registry_spec.rb
|
1599
1597
|
- src/ruby/spec/generic/rpc_desc_spec.rb
|
1600
|
-
- src/ruby/spec/generic/
|
1601
|
-
- src/ruby/spec/generic/rpc_server_spec.rb
|
1602
|
-
- src/ruby/spec/generic/server_interceptors_spec.rb
|
1598
|
+
- src/ruby/spec/generic/active_call_spec.rb
|
1603
1599
|
- src/ruby/spec/generic/client_interceptors_spec.rb
|
1600
|
+
- src/ruby/spec/generic/server_interceptors_spec.rb
|
1601
|
+
- src/ruby/spec/generic/rpc_server_spec.rb
|
1602
|
+
- src/ruby/spec/generic/client_stub_spec.rb
|
1604
1603
|
- src/ruby/spec/generic/service_spec.rb
|
1605
|
-
- src/ruby/spec/generic/interceptor_registry_spec.rb
|
1606
|
-
- src/ruby/spec/generic/active_call_spec.rb
|
1607
|
-
- src/ruby/spec/generic/rpc_server_pool_spec.rb
|
1608
|
-
- src/ruby/spec/call_spec.rb
|
1609
|
-
- src/ruby/spec/server_spec.rb
|
1610
|
-
- src/ruby/spec/time_consts_spec.rb
|
1611
|
-
- src/ruby/spec/google_rpc_status_utils_spec.rb
|
1612
|
-
- src/ruby/spec/support/helpers.rb
|
1613
1604
|
- src/ruby/spec/support/services.rb
|
1605
|
+
- src/ruby/spec/support/helpers.rb
|
1606
|
+
- src/ruby/spec/server_spec.rb
|
1614
1607
|
- src/ruby/spec/client_auth_spec.rb
|
1608
|
+
- src/ruby/spec/server_credentials_spec.rb
|
1615
1609
|
- src/ruby/spec/channel_credentials_spec.rb
|
1610
|
+
- src/ruby/spec/channel_connection_spec.rb
|
1611
|
+
- src/ruby/spec/pb/codegen/grpc/testing/package_options.proto
|
1612
|
+
- src/ruby/spec/pb/codegen/package_option_spec.rb
|
1613
|
+
- src/ruby/spec/pb/duplicate/codegen_spec.rb
|
1614
|
+
- src/ruby/spec/pb/health/checker_spec.rb
|
1615
|
+
- src/ruby/spec/error_sanity_spec.rb
|
1616
|
+
- src/ruby/spec/call_credentials_spec.rb
|
1617
|
+
- src/ruby/spec/call_spec.rb
|
1618
|
+
- src/ruby/spec/testdata/README
|
1616
1619
|
- src/ruby/spec/testdata/ca.pem
|
1617
1620
|
- src/ruby/spec/testdata/server1.key
|
1618
|
-
- src/ruby/spec/testdata/README
|
1619
|
-
- src/ruby/spec/testdata/server1.pem
|
1620
|
-
- src/ruby/spec/testdata/client.key
|
1621
1621
|
- src/ruby/spec/testdata/client.pem
|
1622
|
-
- src/ruby/spec/
|
1623
|
-
- src/ruby/spec/
|
1622
|
+
- src/ruby/spec/testdata/client.key
|
1623
|
+
- src/ruby/spec/testdata/server1.pem
|