grpc 0.15.0 → 1.0.0.pre1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +127 -159
- data/etc/roots.pem +784 -509
- data/include/grpc/grpc_posix.h +8 -0
- data/include/grpc/impl/codegen/byte_buffer.h +5 -4
- data/include/grpc/impl/codegen/grpc_types.h +2 -0
- data/include/grpc/impl/codegen/port_platform.h +2 -1
- data/include/grpc/module.modulemap +15 -0
- data/src/core/ext/census/grpc_filter.c +3 -0
- data/src/core/ext/client_config/channel_connectivity.c +4 -3
- data/src/core/ext/client_config/client_channel.c +6 -0
- data/src/core/ext/client_config/subchannel.c +2 -0
- data/src/core/ext/client_config/subchannel_call_holder.c +2 -5
- data/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c +2 -1
- data/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c +2 -1
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +171 -104
- data/src/core/ext/transport/chttp2/transport/internal.h +5 -3
- data/src/core/ext/transport/chttp2/transport/parsing.c +4 -3
- data/src/core/ext/transport/chttp2/transport/status_conversion.c +8 -2
- data/src/core/ext/transport/chttp2/transport/status_conversion.h +1 -1
- data/src/core/lib/channel/channel_stack.c +12 -1
- data/src/core/lib/channel/channel_stack.h +5 -0
- data/src/core/lib/channel/http_client_filter.c +7 -1
- data/src/core/lib/debug/trace.c +6 -2
- data/src/core/lib/iomgr/error.c +62 -19
- data/src/core/lib/iomgr/error.h +10 -6
- data/src/core/lib/iomgr/ev_epoll_linux.c +1872 -0
- data/src/core/lib/{surface/surface_trace.h → iomgr/ev_epoll_linux.h} +11 -12
- data/src/core/lib/iomgr/ev_posix.c +9 -6
- data/src/core/lib/iomgr/ev_posix.h +3 -0
- data/src/core/lib/iomgr/network_status_tracker.c +121 -0
- data/{include/grpc/grpc_zookeeper.h → src/core/lib/iomgr/network_status_tracker.h} +8 -26
- data/src/core/lib/iomgr/socket_utils_common_posix.c +22 -0
- data/src/core/lib/iomgr/socket_utils_posix.h +3 -0
- data/src/core/lib/iomgr/tcp_posix.c +6 -2
- data/src/core/lib/iomgr/tcp_server.h +3 -0
- data/src/core/lib/iomgr/tcp_server_posix.c +114 -16
- data/src/core/lib/iomgr/tcp_server_windows.c +1 -0
- data/src/core/lib/iomgr/tcp_windows.c +5 -0
- data/src/core/lib/iomgr/udp_server.c +28 -16
- data/src/core/lib/iomgr/wakeup_fd_eventfd.c +4 -2
- data/src/core/lib/profiling/basic_timers.c +4 -4
- data/src/core/lib/security/credentials/composite/composite_credentials.c +4 -3
- data/src/core/lib/security/credentials/credentials.c +1 -1
- data/src/core/lib/security/credentials/credentials.h +4 -5
- data/src/core/lib/security/credentials/fake/fake_credentials.c +2 -2
- data/src/core/lib/security/credentials/iam/iam_credentials.c +1 -1
- data/src/core/lib/security/credentials/jwt/jwt_credentials.c +7 -6
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +6 -4
- data/src/core/lib/security/credentials/plugin/plugin_credentials.c +4 -3
- data/src/core/lib/security/transport/client_auth_filter.c +10 -7
- data/src/core/lib/surface/byte_buffer_reader.c +6 -4
- data/src/core/lib/surface/call.c +64 -51
- data/src/core/lib/surface/call.h +0 -1
- data/src/core/lib/surface/channel.c +10 -8
- data/src/core/lib/surface/completion_queue.c +26 -12
- data/src/core/lib/surface/completion_queue.h +4 -0
- data/src/core/lib/surface/init.c +6 -1
- data/src/core/lib/surface/version.c +1 -1
- data/src/core/lib/transport/transport.c +62 -29
- data/src/core/lib/transport/transport.h +8 -5
- data/src/core/lib/transport/transport_op_string.c +14 -3
- data/src/ruby/ext/grpc/rb_byte_buffer.c +4 -1
- data/src/ruby/ext/grpc/rb_call.c +87 -54
- data/src/ruby/ext/grpc/rb_call.h +1 -1
- data/src/ruby/ext/grpc/rb_call_credentials.c +1 -30
- data/src/ruby/ext/grpc/rb_channel.c +25 -50
- data/src/ruby/ext/grpc/rb_channel_credentials.c +1 -31
- data/src/ruby/ext/grpc/rb_completion_queue.c +15 -134
- data/src/ruby/ext/grpc/rb_completion_queue.h +3 -7
- data/src/ruby/ext/grpc/rb_grpc.c +2 -4
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +2 -0
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +4 -1
- data/src/ruby/ext/grpc/rb_server.c +81 -133
- data/src/ruby/ext/grpc/rb_server_credentials.c +4 -33
- data/src/ruby/lib/grpc/generic/active_call.rb +40 -55
- data/src/ruby/lib/grpc/generic/bidi_call.rb +21 -23
- data/src/ruby/lib/grpc/generic/client_stub.rb +20 -15
- data/src/ruby/lib/grpc/generic/rpc_server.rb +15 -37
- data/src/ruby/lib/grpc/generic/service.rb +1 -1
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/pb/test/client.rb +25 -7
- data/src/ruby/pb/test/server.rb +7 -5
- data/src/ruby/spec/call_spec.rb +1 -2
- data/src/ruby/spec/channel_spec.rb +2 -3
- data/src/ruby/spec/client_server_spec.rb +74 -59
- data/src/ruby/spec/generic/active_call_spec.rb +66 -86
- data/src/ruby/spec/generic/client_stub_spec.rb +27 -48
- data/src/ruby/spec/generic/rpc_server_spec.rb +4 -34
- data/src/ruby/spec/pb/health/checker_spec.rb +0 -2
- data/src/ruby/spec/server_spec.rb +20 -24
- metadata +9 -8
- data/src/ruby/spec/completion_queue_spec.rb +0 -42
@@ -31,18 +31,17 @@
|
|
31
31
|
*
|
32
32
|
*/
|
33
33
|
|
34
|
-
#ifndef
|
35
|
-
#define
|
34
|
+
#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H
|
35
|
+
#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H
|
36
36
|
|
37
|
-
#include
|
38
|
-
#include "src/core/lib/debug/trace.h"
|
39
|
-
#include "src/core/lib/surface/api_trace.h"
|
37
|
+
#include "src/core/lib/iomgr/ev_posix.h"
|
40
38
|
|
41
|
-
|
42
|
-
if (grpc_api_trace) { \
|
43
|
-
char *_ev = grpc_event_string(event); \
|
44
|
-
gpr_log(GPR_INFO, "RETURN_EVENT[%p]: %s", cq, _ev); \
|
45
|
-
gpr_free(_ev); \
|
46
|
-
}
|
39
|
+
const grpc_event_engine_vtable *grpc_init_epoll_linux(void);
|
47
40
|
|
48
|
-
#
|
41
|
+
#ifdef GPR_LINUX_EPOLL
|
42
|
+
void *grpc_fd_get_polling_island(grpc_fd *fd);
|
43
|
+
void *grpc_pollset_get_polling_island(grpc_pollset *ps);
|
44
|
+
bool grpc_are_polling_islands_equal(void *p, void *q);
|
45
|
+
#endif /* defined(GPR_LINUX_EPOLL) */
|
46
|
+
|
47
|
+
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H */
|
@@ -44,6 +44,7 @@
|
|
44
44
|
#include <grpc/support/string_util.h>
|
45
45
|
#include <grpc/support/useful.h>
|
46
46
|
|
47
|
+
#include "src/core/lib/iomgr/ev_epoll_linux.h"
|
47
48
|
#include "src/core/lib/iomgr/ev_poll_and_epoll_posix.h"
|
48
49
|
#include "src/core/lib/iomgr/ev_poll_posix.h"
|
49
50
|
#include "src/core/lib/support/env.h"
|
@@ -53,6 +54,7 @@
|
|
53
54
|
grpc_poll_function_type grpc_poll_function = poll;
|
54
55
|
|
55
56
|
static const grpc_event_engine_vtable *g_event_engine;
|
57
|
+
static const char *g_poll_strategy_name = NULL;
|
56
58
|
|
57
59
|
typedef const grpc_event_engine_vtable *(*event_engine_factory_fn)(void);
|
58
60
|
|
@@ -62,7 +64,9 @@ typedef struct {
|
|
62
64
|
} event_engine_factory;
|
63
65
|
|
64
66
|
static const event_engine_factory g_factories[] = {
|
65
|
-
{"
|
67
|
+
{"epoll", grpc_init_epoll_linux},
|
68
|
+
{"poll", grpc_init_poll_posix},
|
69
|
+
{"legacy", grpc_init_poll_and_epoll_posix},
|
66
70
|
};
|
67
71
|
|
68
72
|
static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
|
@@ -98,6 +102,7 @@ static void try_engine(const char *engine) {
|
|
98
102
|
for (size_t i = 0; i < GPR_ARRAY_SIZE(g_factories); i++) {
|
99
103
|
if (is(engine, g_factories[i].name)) {
|
100
104
|
if ((g_event_engine = g_factories[i].factory())) {
|
105
|
+
g_poll_strategy_name = g_factories[i].name;
|
101
106
|
gpr_log(GPR_DEBUG, "Using polling engine: %s", g_factories[i].name);
|
102
107
|
return;
|
103
108
|
}
|
@@ -105,6 +110,9 @@ static void try_engine(const char *engine) {
|
|
105
110
|
}
|
106
111
|
}
|
107
112
|
|
113
|
+
/* Call this only after calling grpc_event_engine_init() */
|
114
|
+
const char *grpc_get_poll_strategy_name() { return g_poll_strategy_name; }
|
115
|
+
|
108
116
|
void grpc_event_engine_init(void) {
|
109
117
|
char *s = gpr_getenv("GRPC_POLL_STRATEGY");
|
110
118
|
if (s == NULL) {
|
@@ -167,11 +175,6 @@ void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
|
|
167
175
|
g_event_engine->fd_notify_on_write(exec_ctx, fd, closure);
|
168
176
|
}
|
169
177
|
|
170
|
-
grpc_pollset *grpc_fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
|
171
|
-
grpc_fd *fd) {
|
172
|
-
return g_event_engine->fd_get_read_notifier_pollset(exec_ctx, fd);
|
173
|
-
}
|
174
|
-
|
175
178
|
size_t grpc_pollset_size(void) { return g_event_engine->pollset_size; }
|
176
179
|
|
177
180
|
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
|
@@ -99,6 +99,9 @@ typedef struct grpc_event_engine_vtable {
|
|
99
99
|
void grpc_event_engine_init(void);
|
100
100
|
void grpc_event_engine_shutdown(void);
|
101
101
|
|
102
|
+
/* Return the name of the poll strategy */
|
103
|
+
const char *grpc_get_poll_strategy_name();
|
104
|
+
|
102
105
|
/* Create a wrapped file descriptor.
|
103
106
|
Requires fd is a non-blocking file descriptor.
|
104
107
|
This takes ownership of closing fd. */
|
@@ -0,0 +1,121 @@
|
|
1
|
+
/*
|
2
|
+
*
|
3
|
+
* Copyright 2015, Google Inc.
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions are
|
8
|
+
* met:
|
9
|
+
*
|
10
|
+
* * Redistributions of source code must retain the above copyright
|
11
|
+
* notice, this list of conditions and the following disclaimer.
|
12
|
+
* * Redistributions in binary form must reproduce the above
|
13
|
+
* copyright notice, this list of conditions and the following disclaimer
|
14
|
+
* in the documentation and/or other materials provided with the
|
15
|
+
* distribution.
|
16
|
+
* * Neither the name of Google Inc. nor the names of its
|
17
|
+
* contributors may be used to endorse or promote products derived from
|
18
|
+
* this software without specific prior written permission.
|
19
|
+
*
|
20
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
21
|
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
22
|
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
23
|
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
24
|
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
25
|
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
26
|
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
27
|
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
28
|
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
29
|
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
30
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
31
|
+
*
|
32
|
+
*/
|
33
|
+
|
34
|
+
#include <grpc/support/alloc.h>
|
35
|
+
#include <grpc/support/log.h>
|
36
|
+
#include "src/core/lib/iomgr/endpoint.h"
|
37
|
+
|
38
|
+
typedef struct endpoint_ll_node {
|
39
|
+
grpc_endpoint *ep;
|
40
|
+
struct endpoint_ll_node *next;
|
41
|
+
} endpoint_ll_node;
|
42
|
+
|
43
|
+
static endpoint_ll_node *head = NULL;
|
44
|
+
static gpr_mu g_endpoint_mutex;
|
45
|
+
static bool g_init_done = false;
|
46
|
+
|
47
|
+
void grpc_initialize_network_status_monitor() {
|
48
|
+
g_init_done = true;
|
49
|
+
gpr_mu_init(&g_endpoint_mutex);
|
50
|
+
// TODO(makarandd): Install callback with OS to monitor network status.
|
51
|
+
}
|
52
|
+
|
53
|
+
void grpc_destroy_network_status_monitor() {
|
54
|
+
for (endpoint_ll_node *curr = head; curr != NULL;) {
|
55
|
+
endpoint_ll_node *next = curr->next;
|
56
|
+
gpr_free(curr);
|
57
|
+
curr = next;
|
58
|
+
}
|
59
|
+
gpr_mu_destroy(&g_endpoint_mutex);
|
60
|
+
}
|
61
|
+
|
62
|
+
void grpc_network_status_register_endpoint(grpc_endpoint *ep) {
|
63
|
+
if (!g_init_done) {
|
64
|
+
grpc_initialize_network_status_monitor();
|
65
|
+
}
|
66
|
+
gpr_mu_lock(&g_endpoint_mutex);
|
67
|
+
if (head == NULL) {
|
68
|
+
head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node));
|
69
|
+
head->ep = ep;
|
70
|
+
head->next = NULL;
|
71
|
+
} else {
|
72
|
+
endpoint_ll_node *prev_head = head;
|
73
|
+
head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node));
|
74
|
+
head->ep = ep;
|
75
|
+
head->next = prev_head;
|
76
|
+
}
|
77
|
+
gpr_mu_unlock(&g_endpoint_mutex);
|
78
|
+
}
|
79
|
+
|
80
|
+
void grpc_network_status_unregister_endpoint(grpc_endpoint *ep) {
|
81
|
+
gpr_mu_lock(&g_endpoint_mutex);
|
82
|
+
GPR_ASSERT(head);
|
83
|
+
bool found = false;
|
84
|
+
endpoint_ll_node *prev = head;
|
85
|
+
// if we're unregistering the head, just move head to the next
|
86
|
+
if (ep == head->ep) {
|
87
|
+
head = head->next;
|
88
|
+
gpr_free(prev);
|
89
|
+
found = true;
|
90
|
+
} else {
|
91
|
+
for (endpoint_ll_node *curr = head->next; curr != NULL; curr = curr->next) {
|
92
|
+
if (ep == curr->ep) {
|
93
|
+
prev->next = curr->next;
|
94
|
+
gpr_free(curr);
|
95
|
+
found = true;
|
96
|
+
break;
|
97
|
+
}
|
98
|
+
prev = curr;
|
99
|
+
}
|
100
|
+
}
|
101
|
+
gpr_mu_unlock(&g_endpoint_mutex);
|
102
|
+
GPR_ASSERT(found);
|
103
|
+
}
|
104
|
+
|
105
|
+
// Walk the linked-list from head and execute shutdown. It is possible that
|
106
|
+
// other threads might be in the process of shutdown as well, but that has
|
107
|
+
// no side effect since endpoint shutdown is idempotent.
|
108
|
+
void grpc_network_status_shutdown_all_endpoints() {
|
109
|
+
gpr_mu_lock(&g_endpoint_mutex);
|
110
|
+
if (head == NULL) {
|
111
|
+
gpr_mu_unlock(&g_endpoint_mutex);
|
112
|
+
return;
|
113
|
+
}
|
114
|
+
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
|
115
|
+
|
116
|
+
for (endpoint_ll_node *curr = head; curr != NULL; curr = curr->next) {
|
117
|
+
curr->ep->vtable->shutdown(&exec_ctx, curr->ep);
|
118
|
+
}
|
119
|
+
gpr_mu_unlock(&g_endpoint_mutex);
|
120
|
+
grpc_exec_ctx_finish(&exec_ctx);
|
121
|
+
}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
/*
|
2
2
|
*
|
3
|
-
* Copyright
|
3
|
+
* Copyright 2016, Google Inc.
|
4
4
|
* All rights reserved.
|
5
5
|
*
|
6
6
|
* Redistribution and use in source and binary forms, with or without
|
@@ -31,29 +31,11 @@
|
|
31
31
|
*
|
32
32
|
*/
|
33
33
|
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
*
|
38
|
-
* Where zookeeper is the name system scheme
|
39
|
-
* host:port is the address of a zookeeper server
|
40
|
-
* /path/service/instance is the zookeeper name to be resolved
|
41
|
-
*
|
42
|
-
* Refer doc/naming.md for more details
|
43
|
-
*/
|
44
|
-
|
45
|
-
#ifndef GRPC_GRPC_ZOOKEEPER_H
|
46
|
-
#define GRPC_GRPC_ZOOKEEPER_H
|
47
|
-
|
48
|
-
#ifdef __cplusplus
|
49
|
-
extern "C" {
|
50
|
-
#endif
|
51
|
-
|
52
|
-
/** Register zookeeper name resolver in grpc */
|
53
|
-
void grpc_zookeeper_register();
|
54
|
-
|
55
|
-
#ifdef __cplusplus
|
56
|
-
}
|
57
|
-
#endif
|
34
|
+
#ifndef GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H
|
35
|
+
#define GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H
|
36
|
+
#include "src/core/lib/iomgr/endpoint.h"
|
58
37
|
|
59
|
-
|
38
|
+
void grpc_network_status_register_endpoint(grpc_endpoint *ep);
|
39
|
+
void grpc_network_status_unregister_endpoint(grpc_endpoint *ep);
|
40
|
+
void grpc_network_status_shutdown_all_endpoints();
|
41
|
+
#endif /* GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H */
|
@@ -169,6 +169,28 @@ grpc_error *grpc_set_socket_reuse_addr(int fd, int reuse) {
|
|
169
169
|
return GRPC_ERROR_NONE;
|
170
170
|
}
|
171
171
|
|
172
|
+
/* set a socket to reuse old addresses */
|
173
|
+
grpc_error *grpc_set_socket_reuse_port(int fd, int reuse) {
|
174
|
+
#ifndef SO_REUSEPORT
|
175
|
+
return GRPC_ERROR_CREATE("SO_REUSEPORT unavailable on compiling system");
|
176
|
+
#else
|
177
|
+
int val = (reuse != 0);
|
178
|
+
int newval;
|
179
|
+
socklen_t intlen = sizeof(newval);
|
180
|
+
if (0 != setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val))) {
|
181
|
+
return GRPC_OS_ERROR(errno, "setsockopt(SO_REUSEPORT)");
|
182
|
+
}
|
183
|
+
if (0 != getsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &newval, &intlen)) {
|
184
|
+
return GRPC_OS_ERROR(errno, "getsockopt(SO_REUSEPORT)");
|
185
|
+
}
|
186
|
+
if ((newval != 0) != val) {
|
187
|
+
return GRPC_ERROR_CREATE("Failed to set SO_REUSEPORT");
|
188
|
+
}
|
189
|
+
|
190
|
+
return GRPC_ERROR_NONE;
|
191
|
+
#endif
|
192
|
+
}
|
193
|
+
|
172
194
|
/* disable nagle */
|
173
195
|
grpc_error *grpc_set_socket_low_latency(int fd, int low_latency) {
|
174
196
|
int val = (low_latency != 0);
|
@@ -55,6 +55,9 @@ grpc_error *grpc_set_socket_reuse_addr(int fd, int reuse);
|
|
55
55
|
/* disable nagle */
|
56
56
|
grpc_error *grpc_set_socket_low_latency(int fd, int low_latency);
|
57
57
|
|
58
|
+
/* set SO_REUSEPORT */
|
59
|
+
grpc_error *grpc_set_socket_reuse_port(int fd, int reuse);
|
60
|
+
|
58
61
|
/* Returns true if this system can create AF_INET6 sockets bound to ::1.
|
59
62
|
The value is probed once, and cached for the life of the process.
|
60
63
|
|
@@ -35,6 +35,7 @@
|
|
35
35
|
|
36
36
|
#ifdef GPR_POSIX_SOCKET
|
37
37
|
|
38
|
+
#include "src/core/lib/iomgr/network_status_tracker.h"
|
38
39
|
#include "src/core/lib/iomgr/tcp_posix.h"
|
39
40
|
|
40
41
|
#include <errno.h>
|
@@ -152,6 +153,7 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
|
|
152
153
|
#endif
|
153
154
|
|
154
155
|
static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
|
156
|
+
grpc_network_status_unregister_endpoint(ep);
|
155
157
|
grpc_tcp *tcp = (grpc_tcp *)ep;
|
156
158
|
TCP_UNREF(exec_ctx, tcp, "destroy");
|
157
159
|
}
|
@@ -160,7 +162,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
|
|
160
162
|
grpc_error *error) {
|
161
163
|
grpc_closure *cb = tcp->read_cb;
|
162
164
|
|
163
|
-
if (
|
165
|
+
if (grpc_tcp_trace) {
|
164
166
|
size_t i;
|
165
167
|
const char *str = grpc_error_string(error);
|
166
168
|
gpr_log(GPR_DEBUG, "read: error=%s", str);
|
@@ -394,7 +396,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
|
|
394
396
|
grpc_tcp *tcp = (grpc_tcp *)ep;
|
395
397
|
grpc_error *error = GRPC_ERROR_NONE;
|
396
398
|
|
397
|
-
if (
|
399
|
+
if (grpc_tcp_trace) {
|
398
400
|
size_t i;
|
399
401
|
|
400
402
|
for (i = 0; i < buf->count; i++) {
|
@@ -474,6 +476,8 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
|
|
474
476
|
tcp->write_closure.cb = tcp_handle_write;
|
475
477
|
tcp->write_closure.cb_arg = tcp;
|
476
478
|
gpr_slice_buffer_init(&tcp->last_read_buffer);
|
479
|
+
/* Tell network status tracker about new endpoint */
|
480
|
+
grpc_network_status_register_endpoint(&tcp->base);
|
477
481
|
|
478
482
|
return &tcp->base;
|
479
483
|
}
|
@@ -34,6 +34,8 @@
|
|
34
34
|
#ifndef GRPC_CORE_LIB_IOMGR_TCP_SERVER_H
|
35
35
|
#define GRPC_CORE_LIB_IOMGR_TCP_SERVER_H
|
36
36
|
|
37
|
+
#include <grpc/grpc.h>
|
38
|
+
|
37
39
|
#include "src/core/lib/iomgr/closure.h"
|
38
40
|
#include "src/core/lib/iomgr/endpoint.h"
|
39
41
|
|
@@ -59,6 +61,7 @@ typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg,
|
|
59
61
|
If shutdown_complete is not NULL, it will be used by
|
60
62
|
grpc_tcp_server_unref() when the ref count reaches zero. */
|
61
63
|
grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
|
64
|
+
const grpc_channel_args *args,
|
62
65
|
grpc_tcp_server **server);
|
63
66
|
|
64
67
|
/* Start listening to bound ports */
|
@@ -112,8 +112,10 @@ struct grpc_tcp_server {
|
|
112
112
|
/* destroyed port count: how many ports are completely destroyed */
|
113
113
|
size_t destroyed_ports;
|
114
114
|
|
115
|
-
/* is this server shutting down?
|
116
|
-
|
115
|
+
/* is this server shutting down? */
|
116
|
+
bool shutdown;
|
117
|
+
/* use SO_REUSEPORT */
|
118
|
+
bool so_reuseport;
|
117
119
|
|
118
120
|
/* linked list of server ports */
|
119
121
|
grpc_tcp_listener *head;
|
@@ -132,17 +134,45 @@ struct grpc_tcp_server {
|
|
132
134
|
size_t pollset_count;
|
133
135
|
|
134
136
|
/* next pollset to assign a channel to */
|
135
|
-
|
137
|
+
gpr_atm next_pollset_to_assign;
|
136
138
|
};
|
137
139
|
|
140
|
+
static gpr_once check_init = GPR_ONCE_INIT;
|
141
|
+
static bool has_so_reuseport;
|
142
|
+
|
143
|
+
static void init(void) {
|
144
|
+
int s = socket(AF_INET, SOCK_STREAM, 0);
|
145
|
+
if (s >= 0) {
|
146
|
+
has_so_reuseport = GRPC_LOG_IF_ERROR("check for SO_REUSEPORT",
|
147
|
+
grpc_set_socket_reuse_port(s, 1));
|
148
|
+
close(s);
|
149
|
+
}
|
150
|
+
}
|
151
|
+
|
138
152
|
grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
|
153
|
+
const grpc_channel_args *args,
|
139
154
|
grpc_tcp_server **server) {
|
155
|
+
gpr_once_init(&check_init, init);
|
156
|
+
|
140
157
|
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
|
158
|
+
s->so_reuseport = has_so_reuseport;
|
159
|
+
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
|
160
|
+
if (0 == strcmp(GRPC_ARG_ALLOW_REUSEPORT, args->args[i].key)) {
|
161
|
+
if (args->args[i].type == GRPC_ARG_INTEGER) {
|
162
|
+
s->so_reuseport =
|
163
|
+
has_so_reuseport && (args->args[i].value.integer != 0);
|
164
|
+
} else {
|
165
|
+
gpr_free(s);
|
166
|
+
return GRPC_ERROR_CREATE(GRPC_ARG_ALLOW_REUSEPORT
|
167
|
+
" must be an integer");
|
168
|
+
}
|
169
|
+
}
|
170
|
+
}
|
141
171
|
gpr_ref_init(&s->refs, 1);
|
142
172
|
gpr_mu_init(&s->mu);
|
143
173
|
s->active_ports = 0;
|
144
174
|
s->destroyed_ports = 0;
|
145
|
-
s->shutdown =
|
175
|
+
s->shutdown = false;
|
146
176
|
s->shutdown_starting.head = NULL;
|
147
177
|
s->shutdown_starting.tail = NULL;
|
148
178
|
s->shutdown_complete = shutdown_complete;
|
@@ -151,7 +181,7 @@ grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
|
|
151
181
|
s->head = NULL;
|
152
182
|
s->tail = NULL;
|
153
183
|
s->nports = 0;
|
154
|
-
s->next_pollset_to_assign
|
184
|
+
gpr_atm_no_barrier_store(&s->next_pollset_to_assign, 0);
|
155
185
|
*server = s;
|
156
186
|
return GRPC_ERROR_NONE;
|
157
187
|
}
|
@@ -218,7 +248,7 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
|
|
218
248
|
gpr_mu_lock(&s->mu);
|
219
249
|
|
220
250
|
GPR_ASSERT(!s->shutdown);
|
221
|
-
s->shutdown =
|
251
|
+
s->shutdown = true;
|
222
252
|
|
223
253
|
/* shutdown all fd's */
|
224
254
|
if (s->active_ports) {
|
@@ -268,13 +298,19 @@ static int get_max_accept_queue_size(void) {
|
|
268
298
|
|
269
299
|
/* Prepare a recently-created socket for listening. */
|
270
300
|
static grpc_error *prepare_socket(int fd, const struct sockaddr *addr,
|
271
|
-
size_t addr_len,
|
301
|
+
size_t addr_len, bool so_reuseport,
|
302
|
+
int *port) {
|
272
303
|
struct sockaddr_storage sockname_temp;
|
273
304
|
socklen_t sockname_len;
|
274
305
|
grpc_error *err = GRPC_ERROR_NONE;
|
275
306
|
|
276
307
|
GPR_ASSERT(fd >= 0);
|
277
308
|
|
309
|
+
if (so_reuseport) {
|
310
|
+
err = grpc_set_socket_reuse_port(fd, 1);
|
311
|
+
if (err != GRPC_ERROR_NONE) goto error;
|
312
|
+
}
|
313
|
+
|
278
314
|
err = grpc_set_socket_nonblocking(fd, 1);
|
279
315
|
if (err != GRPC_ERROR_NONE) goto error;
|
280
316
|
err = grpc_set_socket_cloexec(fd, 1);
|
@@ -333,7 +369,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
|
|
333
369
|
}
|
334
370
|
|
335
371
|
read_notifier_pollset =
|
336
|
-
sp->server->pollsets[(
|
372
|
+
sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add(
|
373
|
+
&sp->server->next_pollset_to_assign, 1) %
|
337
374
|
sp->server->pollset_count];
|
338
375
|
|
339
376
|
/* loop until accept4 returns EAGAIN, and then re-arm notification */
|
@@ -407,7 +444,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
|
|
407
444
|
char *addr_str;
|
408
445
|
char *name;
|
409
446
|
|
410
|
-
grpc_error *err = prepare_socket(fd, addr, addr_len, &port);
|
447
|
+
grpc_error *err = prepare_socket(fd, addr, addr_len, s->so_reuseport, &port);
|
411
448
|
if (err == GRPC_ERROR_NONE) {
|
412
449
|
GPR_ASSERT(port > 0);
|
413
450
|
grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
|
@@ -443,6 +480,52 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
|
|
443
480
|
return err;
|
444
481
|
}
|
445
482
|
|
483
|
+
static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
|
484
|
+
grpc_tcp_listener *sp = NULL;
|
485
|
+
char *addr_str;
|
486
|
+
char *name;
|
487
|
+
grpc_error *err;
|
488
|
+
|
489
|
+
for (grpc_tcp_listener *l = listener->next; l && l->is_sibling; l = l->next) {
|
490
|
+
l->fd_index += count;
|
491
|
+
}
|
492
|
+
|
493
|
+
for (unsigned i = 0; i < count; i++) {
|
494
|
+
int fd, port;
|
495
|
+
grpc_dualstack_mode dsmode;
|
496
|
+
err = grpc_create_dualstack_socket(&listener->addr.sockaddr, SOCK_STREAM, 0,
|
497
|
+
&dsmode, &fd);
|
498
|
+
if (err != GRPC_ERROR_NONE) return err;
|
499
|
+
err = prepare_socket(fd, &listener->addr.sockaddr, listener->addr_len, true,
|
500
|
+
&port);
|
501
|
+
if (err != GRPC_ERROR_NONE) return err;
|
502
|
+
listener->server->nports++;
|
503
|
+
grpc_sockaddr_to_string(&addr_str, &listener->addr.sockaddr, 1);
|
504
|
+
gpr_asprintf(&name, "tcp-server-listener:%s/clone-%d", addr_str, i);
|
505
|
+
sp = gpr_malloc(sizeof(grpc_tcp_listener));
|
506
|
+
sp->next = listener->next;
|
507
|
+
listener->next = sp;
|
508
|
+
sp->server = listener->server;
|
509
|
+
sp->fd = fd;
|
510
|
+
sp->emfd = grpc_fd_create(fd, name);
|
511
|
+
memcpy(sp->addr.untyped, listener->addr.untyped, listener->addr_len);
|
512
|
+
sp->addr_len = listener->addr_len;
|
513
|
+
sp->port = port;
|
514
|
+
sp->port_index = listener->port_index;
|
515
|
+
sp->fd_index = listener->fd_index + count - i;
|
516
|
+
sp->is_sibling = 1;
|
517
|
+
sp->sibling = listener->is_sibling ? listener->sibling : listener;
|
518
|
+
GPR_ASSERT(sp->emfd);
|
519
|
+
while (listener->server->tail->next != NULL) {
|
520
|
+
listener->server->tail = listener->server->tail->next;
|
521
|
+
}
|
522
|
+
gpr_free(addr_str);
|
523
|
+
gpr_free(name);
|
524
|
+
}
|
525
|
+
|
526
|
+
return GRPC_ERROR_NONE;
|
527
|
+
}
|
528
|
+
|
446
529
|
grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
|
447
530
|
size_t addr_len, int *out_port) {
|
448
531
|
grpc_tcp_listener *sp;
|
@@ -599,14 +682,29 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
|
|
599
682
|
s->on_accept_cb_arg = on_accept_cb_arg;
|
600
683
|
s->pollsets = pollsets;
|
601
684
|
s->pollset_count = pollset_count;
|
602
|
-
|
603
|
-
|
604
|
-
|
685
|
+
sp = s->head;
|
686
|
+
while (sp != NULL) {
|
687
|
+
if (s->so_reuseport && pollset_count > 1) {
|
688
|
+
GPR_ASSERT(GRPC_LOG_IF_ERROR(
|
689
|
+
"clone_port", clone_port(sp, (unsigned)(pollset_count - 1))));
|
690
|
+
for (i = 0; i < pollset_count; i++) {
|
691
|
+
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
|
692
|
+
sp->read_closure.cb = on_read;
|
693
|
+
sp->read_closure.cb_arg = sp;
|
694
|
+
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
|
695
|
+
s->active_ports++;
|
696
|
+
sp = sp->next;
|
697
|
+
}
|
698
|
+
} else {
|
699
|
+
for (i = 0; i < pollset_count; i++) {
|
700
|
+
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
|
701
|
+
}
|
702
|
+
sp->read_closure.cb = on_read;
|
703
|
+
sp->read_closure.cb_arg = sp;
|
704
|
+
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
|
705
|
+
s->active_ports++;
|
706
|
+
sp = sp->next;
|
605
707
|
}
|
606
|
-
sp->read_closure.cb = on_read;
|
607
|
-
sp->read_closure.cb_arg = sp;
|
608
|
-
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
|
609
|
-
s->active_ports++;
|
610
708
|
}
|
611
709
|
gpr_mu_unlock(&s->mu);
|
612
710
|
}
|