grpc 1.0.0.pre1 → 1.0.0.pre2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (59) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +1 -37
  3. data/include/grpc/impl/codegen/compression_types.h +16 -1
  4. data/include/grpc/impl/codegen/grpc_types.h +23 -15
  5. data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c +4 -2
  6. data/src/core/ext/transport/chttp2/transport/chttp2_plugin.c +3 -0
  7. data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +305 -64
  8. data/src/core/ext/transport/chttp2/transport/internal.h +46 -19
  9. data/src/core/ext/transport/chttp2/transport/parsing.c +6 -5
  10. data/src/core/ext/transport/chttp2/transport/stream_lists.c +11 -9
  11. data/src/core/ext/transport/chttp2/transport/writing.c +13 -3
  12. data/src/core/lib/iomgr/endpoint.c +4 -0
  13. data/src/core/lib/iomgr/endpoint.h +4 -0
  14. data/src/core/lib/iomgr/ev_epoll_linux.c +161 -116
  15. data/src/core/lib/iomgr/ev_poll_and_epoll_posix.c +3 -0
  16. data/src/core/lib/iomgr/ev_poll_posix.c +3 -0
  17. data/src/core/lib/iomgr/ev_posix.c +4 -0
  18. data/src/core/lib/iomgr/ev_posix.h +4 -0
  19. data/src/core/lib/iomgr/exec_ctx.c +7 -3
  20. data/src/core/lib/iomgr/exec_ctx.h +5 -1
  21. data/src/core/lib/iomgr/iomgr.c +3 -0
  22. data/src/core/lib/iomgr/network_status_tracker.c +9 -6
  23. data/src/core/lib/iomgr/network_status_tracker.h +4 -0
  24. data/src/core/lib/iomgr/tcp_posix.c +14 -4
  25. data/src/core/lib/iomgr/tcp_server_posix.c +2 -1
  26. data/src/core/lib/iomgr/tcp_windows.c +10 -3
  27. data/src/core/lib/iomgr/workqueue.h +25 -14
  28. data/src/core/lib/iomgr/workqueue_posix.c +1 -7
  29. data/src/core/lib/iomgr/workqueue_posix.h +5 -0
  30. data/src/core/lib/iomgr/workqueue_windows.c +22 -0
  31. data/src/core/lib/security/transport/secure_endpoint.c +13 -5
  32. data/src/core/lib/support/log.c +10 -9
  33. data/src/core/lib/surface/server.c +45 -31
  34. data/src/core/lib/surface/version.c +1 -1
  35. data/src/core/lib/transport/connectivity_state.c +3 -0
  36. data/src/ruby/bin/math_client.rb +1 -1
  37. data/src/ruby/bin/{math.rb → math_pb.rb} +0 -0
  38. data/src/ruby/bin/math_server.rb +1 -1
  39. data/src/ruby/bin/{math_services.rb → math_services_pb.rb} +4 -4
  40. data/src/ruby/lib/grpc/version.rb +1 -1
  41. data/src/ruby/pb/grpc/health/checker.rb +1 -1
  42. data/src/ruby/pb/grpc/health/v1/{health.rb → health_pb.rb} +0 -0
  43. data/src/ruby/pb/grpc/health/v1/{health_services.rb → health_services_pb.rb} +1 -1
  44. data/src/ruby/pb/grpc/testing/duplicate/{echo_duplicate_services.rb → echo_duplicate_services_pb.rb} +2 -2
  45. data/src/ruby/pb/grpc/testing/{metrics.rb → metrics_pb.rb} +1 -1
  46. data/src/ruby/pb/grpc/testing/{metrics_services.rb → metrics_services_pb.rb} +2 -2
  47. data/src/ruby/pb/src/proto/grpc/testing/{empty.rb → empty_pb.rb} +0 -0
  48. data/src/ruby/pb/src/proto/grpc/testing/{messages.rb → messages_pb.rb} +8 -10
  49. data/src/ruby/pb/src/proto/grpc/testing/{test.rb → test_pb.rb} +2 -2
  50. data/src/ruby/pb/src/proto/grpc/testing/{test_services.rb → test_services_pb.rb} +1 -1
  51. data/src/ruby/pb/test/client.rb +3 -3
  52. data/src/ruby/pb/test/server.rb +3 -3
  53. data/src/ruby/spec/pb/duplicate/codegen_spec.rb +2 -2
  54. data/src/ruby/spec/pb/health/checker_spec.rb +4 -4
  55. metadata +15 -19
  56. data/src/ruby/pb/test/proto/empty.rb +0 -15
  57. data/src/ruby/pb/test/proto/messages.rb +0 -80
  58. data/src/ruby/pb/test/proto/test.rb +0 -14
  59. data/src/ruby/pb/test/proto/test_services.rb +0 -64
@@ -725,6 +725,8 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
725
725
  GRPC_FD_UNREF(fd, "poll");
726
726
  }
727
727
 
728
+ static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; }
729
+
728
730
  /*******************************************************************************
729
731
  * pollset_posix.c
730
732
  */
@@ -2006,6 +2008,7 @@ static const grpc_event_engine_vtable vtable = {
2006
2008
  .fd_notify_on_read = fd_notify_on_read,
2007
2009
  .fd_notify_on_write = fd_notify_on_write,
2008
2010
  .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
2011
+ .fd_get_workqueue = fd_get_workqueue,
2009
2012
 
2010
2013
  .pollset_init = pollset_init,
2011
2014
  .pollset_shutdown = pollset_shutdown,
@@ -617,6 +617,8 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
617
617
  GRPC_FD_UNREF(fd, "poll");
618
618
  }
619
619
 
620
+ static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; }
621
+
620
622
  /*******************************************************************************
621
623
  * pollset_posix.c
622
624
  */
@@ -1234,6 +1236,7 @@ static const grpc_event_engine_vtable vtable = {
1234
1236
  .fd_notify_on_read = fd_notify_on_read,
1235
1237
  .fd_notify_on_write = fd_notify_on_write,
1236
1238
  .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
1239
+ .fd_get_workqueue = fd_get_workqueue,
1237
1240
 
1238
1241
  .pollset_init = pollset_init,
1239
1242
  .pollset_shutdown = pollset_shutdown,
@@ -148,6 +148,10 @@ grpc_fd *grpc_fd_create(int fd, const char *name) {
148
148
  return g_event_engine->fd_create(fd, name);
149
149
  }
150
150
 
151
+ grpc_workqueue *grpc_fd_get_workqueue(grpc_fd *fd) {
152
+ return g_event_engine->fd_get_workqueue(fd);
153
+ }
154
+
151
155
  int grpc_fd_wrapped_fd(grpc_fd *fd) {
152
156
  return g_event_engine->fd_wrapped_fd(fd);
153
157
  }
@@ -56,6 +56,7 @@ typedef struct grpc_event_engine_vtable {
56
56
  void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
57
57
  grpc_closure *closure);
58
58
  bool (*fd_is_shutdown)(grpc_fd *fd);
59
+ grpc_workqueue *(*fd_get_workqueue)(grpc_fd *fd);
59
60
  grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_exec_ctx *exec_ctx,
60
61
  grpc_fd *fd);
61
62
 
@@ -107,6 +108,9 @@ const char *grpc_get_poll_strategy_name();
107
108
  This takes ownership of closing fd. */
108
109
  grpc_fd *grpc_fd_create(int fd, const char *name);
109
110
 
111
+ /* Get a workqueue that's associated with this fd */
112
+ grpc_workqueue *grpc_fd_get_workqueue(grpc_fd *fd);
113
+
110
114
  /* Return the wrapped fd, or -1 if it has been released or closed. */
111
115
  int grpc_fd_wrapped_fd(grpc_fd *fd);
112
116
 
@@ -37,6 +37,7 @@
37
37
  #include <grpc/support/sync.h>
38
38
  #include <grpc/support/thd.h>
39
39
 
40
+ #include "src/core/lib/iomgr/workqueue.h"
40
41
  #include "src/core/lib/profiling/timers.h"
41
42
 
42
43
  bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) {
@@ -85,14 +86,17 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
85
86
  void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
86
87
  grpc_error *error,
87
88
  grpc_workqueue *offload_target_or_null) {
88
- GPR_ASSERT(offload_target_or_null == NULL);
89
- grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
89
+ if (offload_target_or_null == NULL) {
90
+ grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
91
+ } else {
92
+ grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error);
93
+ GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
94
+ }
90
95
  }
91
96
 
92
97
  void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
93
98
  grpc_closure_list *list,
94
99
  grpc_workqueue *offload_target_or_null) {
95
- GPR_ASSERT(offload_target_or_null == NULL);
96
100
  grpc_closure_list_move(list, &exec_ctx->closure_list);
97
101
  }
98
102
 
@@ -93,7 +93,11 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
93
93
  /** Finish any pending work for a grpc_exec_ctx. Must be called before
94
94
  * the instance is destroyed, or work may be lost. */
95
95
  void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx);
96
- /** Add a closure to be executed at the next flush/finish point */
96
+ /** Add a closure to be executed in the future.
97
+ If \a offload_target_or_null is NULL, the closure will be executed at the
98
+ next exec_ctx.{finish,flush} point.
99
+ If \a offload_target_or_null is non-NULL, the closure will be scheduled
100
+ against the workqueue, and a reference to the workqueue will be consumed. */
97
101
  void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
98
102
  grpc_error *error,
99
103
  grpc_workqueue *offload_target_or_null);
@@ -45,6 +45,7 @@
45
45
 
46
46
  #include "src/core/lib/iomgr/exec_ctx.h"
47
47
  #include "src/core/lib/iomgr/iomgr_internal.h"
48
+ #include "src/core/lib/iomgr/network_status_tracker.h"
48
49
  #include "src/core/lib/iomgr/timer.h"
49
50
  #include "src/core/lib/support/env.h"
50
51
  #include "src/core/lib/support/string.h"
@@ -62,6 +63,7 @@ void grpc_iomgr_init(void) {
62
63
  grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
63
64
  g_root_object.next = g_root_object.prev = &g_root_object;
64
65
  g_root_object.name = "root";
66
+ grpc_network_status_init();
65
67
  grpc_iomgr_platform_init();
66
68
  }
67
69
 
@@ -140,6 +142,7 @@ void grpc_iomgr_shutdown(void) {
140
142
 
141
143
  grpc_iomgr_platform_shutdown();
142
144
  grpc_exec_ctx_global_shutdown();
145
+ grpc_network_status_shutdown();
143
146
  gpr_mu_destroy(&g_mu);
144
147
  gpr_cv_destroy(&g_rcv);
145
148
  }
@@ -42,10 +42,16 @@ typedef struct endpoint_ll_node {
42
42
 
43
43
  static endpoint_ll_node *head = NULL;
44
44
  static gpr_mu g_endpoint_mutex;
45
- static bool g_init_done = false;
46
45
 
47
- void grpc_initialize_network_status_monitor() {
48
- g_init_done = true;
46
+ void grpc_network_status_shutdown(void) {
47
+ if (head != NULL) {
48
+ gpr_log(GPR_ERROR,
49
+ "Memory leaked as all network endpoints were not shut down");
50
+ }
51
+ gpr_mu_destroy(&g_endpoint_mutex);
52
+ }
53
+
54
+ void grpc_network_status_init(void) {
49
55
  gpr_mu_init(&g_endpoint_mutex);
50
56
  // TODO(makarandd): Install callback with OS to monitor network status.
51
57
  }
@@ -60,9 +66,6 @@ void grpc_destroy_network_status_monitor() {
60
66
  }
61
67
 
62
68
  void grpc_network_status_register_endpoint(grpc_endpoint *ep) {
63
- if (!g_init_done) {
64
- grpc_initialize_network_status_monitor();
65
- }
66
69
  gpr_mu_lock(&g_endpoint_mutex);
67
70
  if (head == NULL) {
68
71
  head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node));
@@ -35,7 +35,11 @@
35
35
  #define GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H
36
36
  #include "src/core/lib/iomgr/endpoint.h"
37
37
 
38
+ void grpc_network_status_init(void);
39
+ void grpc_network_status_shutdown(void);
40
+
38
41
  void grpc_network_status_register_endpoint(grpc_endpoint *ep);
39
42
  void grpc_network_status_unregister_endpoint(grpc_endpoint *ep);
40
43
  void grpc_network_status_shutdown_all_endpoints();
44
+
41
45
  #endif /* GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H */
@@ -284,7 +284,7 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
284
284
  }
285
285
 
286
286
  /* returns true if done, false if pending; if returning true, *error is set */
287
- #define MAX_WRITE_IOVEC 16
287
+ #define MAX_WRITE_IOVEC 1024
288
288
  static bool tcp_flush(grpc_tcp *tcp, grpc_error **error) {
289
289
  struct msghdr msg;
290
290
  struct iovec iov[MAX_WRITE_IOVEC];
@@ -450,9 +450,19 @@ static char *tcp_get_peer(grpc_endpoint *ep) {
450
450
  return gpr_strdup(tcp->peer_string);
451
451
  }
452
452
 
453
- static const grpc_endpoint_vtable vtable = {
454
- tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set,
455
- tcp_shutdown, tcp_destroy, tcp_get_peer};
453
+ static grpc_workqueue *tcp_get_workqueue(grpc_endpoint *ep) {
454
+ grpc_tcp *tcp = (grpc_tcp *)ep;
455
+ return grpc_fd_get_workqueue(tcp->em_fd);
456
+ }
457
+
458
+ static const grpc_endpoint_vtable vtable = {tcp_read,
459
+ tcp_write,
460
+ tcp_get_workqueue,
461
+ tcp_add_to_pollset,
462
+ tcp_add_to_pollset_set,
463
+ tcp_shutdown,
464
+ tcp_destroy,
465
+ tcp_get_peer};
456
466
 
457
467
  grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
458
468
  const char *peer_string) {
@@ -491,7 +491,8 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
491
491
  }
492
492
 
493
493
  for (unsigned i = 0; i < count; i++) {
494
- int fd, port;
494
+ int fd = -1;
495
+ int port = -1;
495
496
  grpc_dualstack_mode dsmode;
496
497
  err = grpc_create_dualstack_socket(&listener->addr.sockaddr, SOCK_STREAM, 0,
497
498
  &dsmode, &fd);
@@ -389,9 +389,16 @@ static char *win_get_peer(grpc_endpoint *ep) {
389
389
  return gpr_strdup(tcp->peer_string);
390
390
  }
391
391
 
392
- static grpc_endpoint_vtable vtable = {
393
- win_read, win_write, win_add_to_pollset, win_add_to_pollset_set,
394
- win_shutdown, win_destroy, win_get_peer};
392
+ static grpc_workqueue *win_get_workqueue(grpc_endpoint *ep) { return NULL; }
393
+
394
+ static grpc_endpoint_vtable vtable = {win_read,
395
+ win_write,
396
+ win_get_workqueue,
397
+ win_add_to_pollset,
398
+ win_add_to_pollset_set,
399
+ win_shutdown,
400
+ win_destroy,
401
+ win_get_peer};
395
402
 
396
403
  grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
397
404
  grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
@@ -38,6 +38,7 @@
38
38
  #include "src/core/lib/iomgr/exec_ctx.h"
39
39
  #include "src/core/lib/iomgr/iomgr.h"
40
40
  #include "src/core/lib/iomgr/pollset.h"
41
+ #include "src/core/lib/iomgr/pollset_set.h"
41
42
 
42
43
  #ifdef GPR_POSIX_SOCKET
43
44
  #include "src/core/lib/iomgr/workqueue_posix.h"
@@ -49,35 +50,45 @@
49
50
 
50
51
  /* grpc_workqueue is forward declared in exec_ctx.h */
51
52
 
52
- /** Create a work queue */
53
- grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
54
- grpc_workqueue **workqueue);
55
-
53
+ /* Deprecated: do not use.
54
+ This has *already* been removed in a future commit. */
56
55
  void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
57
56
 
58
- #define GRPC_WORKQUEUE_REFCOUNT_DEBUG
57
+ /* Reference counting functions. Use the macro's always
58
+ (GRPC_WORKQUEUE_{REF,UNREF}).
59
+
60
+ Pass in a descriptive reason string for reffing/unreffing as the last
61
+ argument to each macro. When GRPC_WORKQUEUE_REFCOUNT_DEBUG is defined, that
62
+ string will be printed alongside the refcount. When it is not defined, the
63
+ string will be discarded at compilation time. */
64
+
65
+ //#define GRPC_WORKQUEUE_REFCOUNT_DEBUG
59
66
  #ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
60
67
  #define GRPC_WORKQUEUE_REF(p, r) \
61
- grpc_workqueue_ref((p), __FILE__, __LINE__, (r))
62
- #define GRPC_WORKQUEUE_UNREF(cl, p, r) \
63
- grpc_workqueue_unref((cl), (p), __FILE__, __LINE__, (r))
68
+ (grpc_workqueue_ref((p), __FILE__, __LINE__, (r)), (p))
69
+ #define GRPC_WORKQUEUE_UNREF(exec_ctx, p, r) \
70
+ grpc_workqueue_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
64
71
  void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
65
72
  const char *reason);
66
73
  void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
67
74
  const char *file, int line, const char *reason);
68
75
  #else
69
- #define GRPC_WORKQUEUE_REF(p, r) grpc_workqueue_ref((p))
76
+ #define GRPC_WORKQUEUE_REF(p, r) (grpc_workqueue_ref((p)), (p))
70
77
  #define GRPC_WORKQUEUE_UNREF(cl, p, r) grpc_workqueue_unref((cl), (p))
71
78
  void grpc_workqueue_ref(grpc_workqueue *workqueue);
72
79
  void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
73
80
  #endif
74
81
 
75
- /** Bind this workqueue to a pollset */
76
- void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx,
77
- grpc_workqueue *workqueue,
78
- grpc_pollset *pollset);
82
+ /** Add a work item to a workqueue. Items added to a work queue will be started
83
+ in approximately the order they were enqueued, on some thread that may or
84
+ may not be the current thread. Successive closures enqueued onto a workqueue
85
+ MAY be executed concurrently.
86
+
87
+ It is generally more expensive to add a closure to a workqueue than to the
88
+ execution context, both in terms of CPU work and in execution latency.
79
89
 
80
- /** Add a work item to a workqueue */
90
+ Use work queues when it's important that other threads be given a chance to
91
+ tackle some workload. */
81
92
  void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
82
93
  grpc_closure *closure, grpc_error *error);
83
94
 
@@ -70,7 +70,7 @@ grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
70
70
 
71
71
  static void workqueue_destroy(grpc_exec_ctx *exec_ctx,
72
72
  grpc_workqueue *workqueue) {
73
- GPR_ASSERT(grpc_closure_list_empty(workqueue->closure_list));
73
+ grpc_exec_ctx_enqueue_list(exec_ctx, &workqueue->closure_list, NULL);
74
74
  grpc_fd_shutdown(exec_ctx, workqueue->wakeup_read_fd);
75
75
  }
76
76
 
@@ -100,12 +100,6 @@ void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
100
100
  }
101
101
  }
102
102
 
103
- void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx,
104
- grpc_workqueue *workqueue,
105
- grpc_pollset *pollset) {
106
- grpc_pollset_add_fd(exec_ctx, pollset, workqueue->wakeup_read_fd);
107
- }
108
-
109
103
  void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
110
104
  gpr_mu_lock(&workqueue->mu);
111
105
  grpc_exec_ctx_enqueue_list(exec_ctx, &workqueue->closure_list, NULL);
@@ -50,4 +50,9 @@ struct grpc_workqueue {
50
50
  grpc_closure read_closure;
51
51
  };
52
52
 
53
+ /** Create a work queue. Returns an error if creation fails. If creation
54
+ succeeds, sets *workqueue to point to it. */
55
+ grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
56
+ grpc_workqueue **workqueue);
57
+
53
58
  #endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H */
@@ -37,4 +37,26 @@
37
37
 
38
38
  #include "src/core/lib/iomgr/workqueue.h"
39
39
 
40
+ // Minimal implementation of grpc_workqueue for Windows
41
+ // Works by directly enqueuing workqueue items onto the current execution
42
+ // context, which is at least correct, if not performant or in the spirit of
43
+ // workqueues.
44
+
45
+ void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
46
+
47
+ #ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
48
+ void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
49
+ const char *reason) {}
50
+ void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
51
+ const char *file, int line, const char *reason) {}
52
+ #else
53
+ void grpc_workqueue_ref(grpc_workqueue *workqueue) {}
54
+ void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
55
+ #endif
56
+
57
+ void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
58
+ grpc_closure *closure, grpc_error *error) {
59
+ grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
60
+ }
61
+
40
62
  #endif /* GPR_WINDOWS */
@@ -360,11 +360,19 @@ static char *endpoint_get_peer(grpc_endpoint *secure_ep) {
360
360
  return grpc_endpoint_get_peer(ep->wrapped_ep);
361
361
  }
362
362
 
363
- static const grpc_endpoint_vtable vtable = {
364
- endpoint_read, endpoint_write,
365
- endpoint_add_to_pollset, endpoint_add_to_pollset_set,
366
- endpoint_shutdown, endpoint_destroy,
367
- endpoint_get_peer};
363
+ static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) {
364
+ secure_endpoint *ep = (secure_endpoint *)secure_ep;
365
+ return grpc_endpoint_get_workqueue(ep->wrapped_ep);
366
+ }
367
+
368
+ static const grpc_endpoint_vtable vtable = {endpoint_read,
369
+ endpoint_write,
370
+ endpoint_get_workqueue,
371
+ endpoint_add_to_pollset,
372
+ endpoint_add_to_pollset_set,
373
+ endpoint_shutdown,
374
+ endpoint_destroy,
375
+ endpoint_get_peer};
368
376
 
369
377
  grpc_endpoint *grpc_secure_endpoint_create(
370
378
  struct tsi_frame_protector *protector, grpc_endpoint *transport,
@@ -79,17 +79,18 @@ void gpr_set_log_verbosity(gpr_log_severity min_severity_to_print) {
79
79
 
80
80
  void gpr_log_verbosity_init() {
81
81
  char *verbosity = gpr_getenv("GRPC_VERBOSITY");
82
- if (verbosity == NULL) return;
83
82
 
84
- gpr_atm min_severity_to_print = GPR_LOG_VERBOSITY_UNSET;
85
- if (strcmp(verbosity, "DEBUG") == 0) {
86
- min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_DEBUG;
87
- } else if (strcmp(verbosity, "INFO") == 0) {
88
- min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_INFO;
89
- } else if (strcmp(verbosity, "ERROR") == 0) {
90
- min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_ERROR;
83
+ gpr_atm min_severity_to_print = GPR_LOG_SEVERITY_ERROR;
84
+ if (verbosity != NULL) {
85
+ if (strcmp(verbosity, "DEBUG") == 0) {
86
+ min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_DEBUG;
87
+ } else if (strcmp(verbosity, "INFO") == 0) {
88
+ min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_INFO;
89
+ } else if (strcmp(verbosity, "ERROR") == 0) {
90
+ min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_ERROR;
91
+ }
92
+ gpr_free(verbosity);
91
93
  }
92
- gpr_free(verbosity);
93
94
  if ((gpr_atm_no_barrier_load(&g_min_severity_to_print)) ==
94
95
  GPR_LOG_VERBOSITY_UNSET) {
95
96
  gpr_atm_no_barrier_store(&g_min_severity_to_print, min_severity_to_print);
@@ -73,6 +73,7 @@ typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
73
73
 
74
74
  typedef struct requested_call {
75
75
  requested_call_type type;
76
+ size_t cq_idx;
76
77
  void *tag;
77
78
  grpc_server *server;
78
79
  grpc_completion_queue *cq_bound_to_call;
@@ -206,11 +207,11 @@ struct grpc_server {
206
207
  registered_method *registered_methods;
207
208
  /** one request matcher for unregistered methods */
208
209
  request_matcher unregistered_request_matcher;
209
- /** free list of available requested_calls indices */
210
- gpr_stack_lockfree *request_freelist;
210
+ /** free list of available requested_calls_per_cq indices */
211
+ gpr_stack_lockfree **request_freelist_per_cq;
211
212
  /** requested call backing data */
212
- requested_call *requested_calls;
213
- size_t max_requested_calls;
213
+ requested_call **requested_calls_per_cq;
214
+ int max_requested_calls_per_cq;
214
215
 
215
216
  gpr_atm shutdown_flag;
216
217
  uint8_t shutdown_published;
@@ -357,7 +358,8 @@ static void request_matcher_kill_requests(grpc_exec_ctx *exec_ctx,
357
358
  for (size_t i = 0; i < server->cq_count; i++) {
358
359
  while ((request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[i])) !=
359
360
  -1) {
360
- fail_call(exec_ctx, server, i, &server->requested_calls[request_id],
361
+ fail_call(exec_ctx, server, i,
362
+ &server->requested_calls_per_cq[i][request_id],
361
363
  GRPC_ERROR_REF(error));
362
364
  }
363
365
  }
@@ -392,12 +394,16 @@ static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) {
392
394
  }
393
395
  for (i = 0; i < server->cq_count; i++) {
394
396
  GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server");
397
+ if (server->started) {
398
+ gpr_stack_lockfree_destroy(server->request_freelist_per_cq[i]);
399
+ gpr_free(server->requested_calls_per_cq[i]);
400
+ }
395
401
  }
396
- gpr_stack_lockfree_destroy(server->request_freelist);
402
+ gpr_free(server->request_freelist_per_cq);
403
+ gpr_free(server->requested_calls_per_cq);
397
404
  gpr_free(server->cqs);
398
405
  gpr_free(server->pollsets);
399
406
  gpr_free(server->shutdown_tags);
400
- gpr_free(server->requested_calls);
401
407
  gpr_free(server);
402
408
  }
403
409
 
@@ -460,11 +466,13 @@ static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
460
466
  requested_call *rc = req;
461
467
  grpc_server *server = rc->server;
462
468
 
463
- if (rc >= server->requested_calls &&
464
- rc < server->requested_calls + server->max_requested_calls) {
465
- GPR_ASSERT(rc - server->requested_calls <= INT_MAX);
466
- gpr_stack_lockfree_push(server->request_freelist,
467
- (int)(rc - server->requested_calls));
469
+ if (rc >= server->requested_calls_per_cq[rc->cq_idx] &&
470
+ rc < server->requested_calls_per_cq[rc->cq_idx] +
471
+ server->max_requested_calls_per_cq) {
472
+ GPR_ASSERT(rc - server->requested_calls_per_cq[rc->cq_idx] <= INT_MAX);
473
+ gpr_stack_lockfree_push(
474
+ server->request_freelist_per_cq[rc->cq_idx],
475
+ (int)(rc - server->requested_calls_per_cq[rc->cq_idx]));
468
476
  } else {
469
477
  gpr_free(req);
470
478
  }
@@ -540,7 +548,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
540
548
  calld->state = ACTIVATED;
541
549
  gpr_mu_unlock(&calld->mu_state);
542
550
  publish_call(exec_ctx, server, calld, cq_idx,
543
- &server->requested_calls[request_id]);
551
+ &server->requested_calls_per_cq[cq_idx][request_id]);
544
552
  return; /* early out */
545
553
  }
546
554
  }
@@ -979,8 +987,6 @@ void grpc_server_register_non_listening_completion_queue(
979
987
  }
980
988
 
981
989
  grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
982
- size_t i;
983
-
984
990
  GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
985
991
 
986
992
  grpc_server *server = gpr_malloc(sizeof(grpc_server));
@@ -998,15 +1004,7 @@ grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
998
1004
  &server->root_channel_data;
999
1005
 
1000
1006
  /* TODO(ctiller): expose a channel_arg for this */
1001
- server->max_requested_calls = 32768;
1002
- server->request_freelist =
1003
- gpr_stack_lockfree_create(server->max_requested_calls);
1004
- for (i = 0; i < (size_t)server->max_requested_calls; i++) {
1005
- gpr_stack_lockfree_push(server->request_freelist, (int)i);
1006
- }
1007
- server->requested_calls = gpr_malloc(server->max_requested_calls *
1008
- sizeof(*server->requested_calls));
1009
-
1007
+ server->max_requested_calls_per_cq = 32768;
1010
1008
  server->channel_args = grpc_channel_args_copy(args);
1011
1009
 
1012
1010
  return server;
@@ -1066,16 +1064,28 @@ void grpc_server_start(grpc_server *server) {
1066
1064
  server->started = true;
1067
1065
  size_t pollset_count = 0;
1068
1066
  server->pollsets = gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
1067
+ server->request_freelist_per_cq =
1068
+ gpr_malloc(sizeof(*server->request_freelist_per_cq) * server->cq_count);
1069
+ server->requested_calls_per_cq =
1070
+ gpr_malloc(sizeof(*server->requested_calls_per_cq) * server->cq_count);
1069
1071
  for (i = 0; i < server->cq_count; i++) {
1070
1072
  if (!grpc_cq_is_non_listening_server_cq(server->cqs[i])) {
1071
1073
  server->pollsets[pollset_count++] = grpc_cq_pollset(server->cqs[i]);
1072
1074
  }
1075
+ server->request_freelist_per_cq[i] =
1076
+ gpr_stack_lockfree_create((size_t)server->max_requested_calls_per_cq);
1077
+ for (int j = 0; j < server->max_requested_calls_per_cq; j++) {
1078
+ gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j);
1079
+ }
1080
+ server->requested_calls_per_cq[i] =
1081
+ gpr_malloc((size_t)server->max_requested_calls_per_cq *
1082
+ sizeof(*server->requested_calls_per_cq[i]));
1073
1083
  }
1074
1084
  request_matcher_init(&server->unregistered_request_matcher,
1075
- server->max_requested_calls, server);
1085
+ (size_t)server->max_requested_calls_per_cq, server);
1076
1086
  for (registered_method *rm = server->registered_methods; rm; rm = rm->next) {
1077
- request_matcher_init(&rm->request_matcher, server->max_requested_calls,
1078
- server);
1087
+ request_matcher_init(&rm->request_matcher,
1088
+ (size_t)server->max_requested_calls_per_cq, server);
1079
1089
  }
1080
1090
 
1081
1091
  for (l = server->listeners; l; l = l->next) {
@@ -1307,11 +1317,13 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
1307
1317
  GRPC_ERROR_CREATE("Server Shutdown"));
1308
1318
  return GRPC_CALL_OK;
1309
1319
  }
1310
- request_id = gpr_stack_lockfree_pop(server->request_freelist);
1320
+ request_id = gpr_stack_lockfree_pop(server->request_freelist_per_cq[cq_idx]);
1311
1321
  if (request_id == -1) {
1312
1322
  /* out of request ids: just fail this one */
1313
1323
  fail_call(exec_ctx, server, cq_idx, rc,
1314
- GRPC_ERROR_CREATE("Server Shutdown"));
1324
+ grpc_error_set_int(GRPC_ERROR_CREATE("Out of request ids"),
1325
+ GRPC_ERROR_INT_LIMIT,
1326
+ server->max_requested_calls_per_cq));
1315
1327
  return GRPC_CALL_OK;
1316
1328
  }
1317
1329
  switch (rc->type) {
@@ -1322,7 +1334,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
1322
1334
  rm = &rc->data.registered.registered_method->request_matcher;
1323
1335
  break;
1324
1336
  }
1325
- server->requested_calls[request_id] = *rc;
1337
+ server->requested_calls_per_cq[cq_idx][request_id] = *rc;
1326
1338
  gpr_free(rc);
1327
1339
  if (gpr_stack_lockfree_push(rm->requests_per_cq[cq_idx], request_id)) {
1328
1340
  /* this was the first queued request: we need to lock and start
@@ -1346,7 +1358,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
1346
1358
  calld->state = ACTIVATED;
1347
1359
  gpr_mu_unlock(&calld->mu_state);
1348
1360
  publish_call(exec_ctx, server, calld, cq_idx,
1349
- &server->requested_calls[request_id]);
1361
+ &server->requested_calls_per_cq[cq_idx][request_id]);
1350
1362
  }
1351
1363
  gpr_mu_lock(&server->mu_call);
1352
1364
  }
@@ -1382,6 +1394,7 @@ grpc_call_error grpc_server_request_call(
1382
1394
  }
1383
1395
  grpc_cq_begin_op(cq_for_notification, tag);
1384
1396
  details->reserved = NULL;
1397
+ rc->cq_idx = cq_idx;
1385
1398
  rc->type = BATCH_CALL;
1386
1399
  rc->server = server;
1387
1400
  rc->tag = tag;
@@ -1430,6 +1443,7 @@ grpc_call_error grpc_server_request_registered_call(
1430
1443
  goto done;
1431
1444
  }
1432
1445
  grpc_cq_begin_op(cq_for_notification, tag);
1446
+ rc->cq_idx = cq_idx;
1433
1447
  rc->type = REGISTERED_CALL;
1434
1448
  rc->server = server;
1435
1449
  rc->tag = tag;