grpc 0.13.0 → 0.13.1.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (155) hide show
  1. checksums.yaml +4 -4
  2. data/.yardopts +1 -0
  3. data/Makefile +1114 -937
  4. data/include/grpc/census.h +71 -89
  5. data/include/grpc/compression.h +7 -7
  6. data/include/grpc/grpc.h +65 -68
  7. data/include/grpc/grpc_security.h +38 -38
  8. data/include/grpc/impl/codegen/alloc.h +7 -7
  9. data/include/grpc/impl/codegen/byte_buffer.h +13 -13
  10. data/include/grpc/impl/codegen/grpc_types.h +7 -2
  11. data/include/grpc/impl/codegen/log.h +5 -5
  12. data/include/grpc/impl/codegen/port_platform.h +14 -6
  13. data/include/grpc/impl/codegen/slice.h +15 -15
  14. data/include/grpc/impl/codegen/slice_buffer.h +17 -17
  15. data/include/grpc/impl/codegen/sync.h +26 -22
  16. data/include/grpc/impl/codegen/time.h +22 -24
  17. data/include/grpc/support/avl.h +9 -8
  18. data/include/grpc/support/cmdline.h +12 -12
  19. data/include/grpc/support/cpu.h +2 -2
  20. data/include/grpc/support/histogram.h +22 -22
  21. data/include/grpc/support/host_port.h +2 -2
  22. data/include/grpc/support/log_win32.h +1 -1
  23. data/include/grpc/support/string_util.h +2 -2
  24. data/include/grpc/support/subprocess.h +5 -5
  25. data/include/grpc/support/thd.h +9 -9
  26. data/include/grpc/support/useful.h +3 -1
  27. data/src/core/census/context.c +64 -85
  28. data/src/core/census/grpc_filter.c +2 -2
  29. data/src/core/census/mlog.c +600 -0
  30. data/src/core/census/mlog.h +95 -0
  31. data/src/core/channel/channel_args.c +67 -6
  32. data/src/core/channel/channel_args.h +7 -1
  33. data/src/core/channel/client_channel.c +26 -36
  34. data/src/core/channel/client_uchannel.c +1 -1
  35. data/src/core/channel/http_client_filter.c +2 -2
  36. data/src/core/channel/http_server_filter.c +2 -2
  37. data/src/core/channel/subchannel_call_holder.c +5 -7
  38. data/src/core/client_config/connector.c +3 -2
  39. data/src/core/client_config/connector.h +2 -2
  40. data/src/core/client_config/lb_policies/load_balancer_api.c +163 -0
  41. data/src/core/client_config/lb_policies/load_balancer_api.h +85 -0
  42. data/src/core/client_config/lb_policies/pick_first.c +10 -11
  43. data/src/core/client_config/lb_policies/round_robin.c +7 -8
  44. data/src/core/client_config/lb_policy.c +3 -3
  45. data/src/core/client_config/lb_policy.h +3 -2
  46. data/src/core/client_config/subchannel.c +51 -21
  47. data/src/core/client_config/subchannel.h +15 -6
  48. data/src/core/client_config/subchannel_index.c +261 -0
  49. data/src/core/client_config/subchannel_index.h +77 -0
  50. data/src/core/compression/{algorithm.c → compression_algorithm.c} +0 -0
  51. data/src/core/httpcli/httpcli.c +13 -11
  52. data/src/core/httpcli/httpcli.h +3 -2
  53. data/src/core/httpcli/httpcli_security_connector.c +7 -7
  54. data/src/core/iomgr/fd_posix.c +4 -2
  55. data/src/core/iomgr/iocp_windows.c +10 -6
  56. data/src/core/iomgr/iocp_windows.h +9 -2
  57. data/src/core/iomgr/iomgr.c +18 -2
  58. data/src/core/iomgr/iomgr_internal.h +5 -1
  59. data/src/core/iomgr/pollset.h +9 -10
  60. data/src/core/iomgr/pollset_multipoller_with_epoll.c +1 -0
  61. data/src/core/iomgr/pollset_multipoller_with_poll_posix.c +10 -5
  62. data/src/core/iomgr/pollset_posix.c +30 -35
  63. data/src/core/iomgr/pollset_posix.h +10 -6
  64. data/src/core/iomgr/pollset_set.h +3 -9
  65. data/src/core/iomgr/pollset_set_posix.c +23 -3
  66. data/src/core/iomgr/pollset_set_posix.h +2 -18
  67. data/src/core/iomgr/pollset_set_windows.c +3 -3
  68. data/src/core/iomgr/pollset_set_windows.h +2 -2
  69. data/src/core/iomgr/pollset_windows.c +24 -21
  70. data/src/core/iomgr/pollset_windows.h +1 -5
  71. data/src/core/iomgr/tcp_client_posix.c +7 -5
  72. data/src/core/iomgr/tcp_posix.c +4 -2
  73. data/src/core/iomgr/tcp_server_windows.c +1 -2
  74. data/src/core/iomgr/timer.c +2 -3
  75. data/src/core/iomgr/timer.h +21 -1
  76. data/src/core/iomgr/timer_heap.c +10 -12
  77. data/src/core/iomgr/udp_server.c +5 -4
  78. data/src/core/iomgr/udp_server.h +1 -0
  79. data/src/core/iomgr/workqueue_posix.c +1 -0
  80. data/src/core/iomgr/workqueue_posix.h +3 -1
  81. data/src/core/proto/grpc/lb/v0/load_balancer.pb.c +119 -0
  82. data/src/core/proto/grpc/lb/v0/load_balancer.pb.h +182 -0
  83. data/src/core/security/{base64.c → b64.c} +1 -1
  84. data/src/core/security/{base64.h → b64.h} +1 -1
  85. data/src/core/security/client_auth_filter.c +0 -1
  86. data/src/core/security/credentials.c +12 -5
  87. data/src/core/security/credentials.h +3 -3
  88. data/src/core/security/google_default_credentials.c +24 -19
  89. data/src/core/security/handshake.c +15 -7
  90. data/src/core/security/handshake.h +2 -1
  91. data/src/core/security/json_token.c +1 -1
  92. data/src/core/security/jwt_verifier.c +1 -1
  93. data/src/core/security/security_connector.c +84 -64
  94. data/src/core/security/security_connector.h +42 -22
  95. data/src/core/security/security_context.c +8 -3
  96. data/src/core/security/server_auth_filter.c +2 -2
  97. data/src/core/security/server_secure_chttp2.c +7 -7
  98. data/src/core/support/avl.c +2 -2
  99. data/src/core/support/env_linux.c +17 -0
  100. data/src/core/support/{file.c → load_file.c} +2 -2
  101. data/src/core/support/{file.h → load_file.h} +4 -12
  102. data/src/core/support/sync.c +6 -1
  103. data/src/core/support/time_posix.c +1 -1
  104. data/src/core/{iomgr/timer_internal.h → support/tmpfile.h} +17 -23
  105. data/src/core/support/{file_posix.c → tmpfile_posix.c} +2 -2
  106. data/src/core/support/{file_win32.c → tmpfile_win32.c} +2 -2
  107. data/src/core/surface/alarm.c +3 -2
  108. data/src/core/surface/call.c +102 -52
  109. data/src/core/surface/channel_create.c +1 -1
  110. data/src/core/surface/completion_queue.c +73 -41
  111. data/src/core/surface/init.c +4 -0
  112. data/src/core/surface/lame_client.c +1 -2
  113. data/src/core/surface/secure_channel_create.c +6 -7
  114. data/src/core/surface/server.c +13 -5
  115. data/src/core/surface/validate_metadata.c +1 -1
  116. data/src/core/surface/version.c +1 -1
  117. data/src/core/transport/chttp2/internal.h +22 -10
  118. data/src/core/transport/chttp2/parsing.c +3 -3
  119. data/src/core/transport/chttp2/stream_lists.c +39 -21
  120. data/src/core/transport/chttp2/writing.c +19 -28
  121. data/src/core/transport/chttp2_transport.c +80 -37
  122. data/src/core/transport/metadata.c +8 -0
  123. data/src/core/transport/static_metadata.c +17 -17
  124. data/src/core/transport/static_metadata.h +3 -3
  125. data/src/core/transport/transport.c +2 -1
  126. data/src/core/transport/transport.h +12 -5
  127. data/src/ruby/ext/grpc/extconf.rb +1 -0
  128. data/src/ruby/ext/grpc/rb_call.c +6 -0
  129. data/src/ruby/ext/grpc/rb_call_credentials.c +12 -14
  130. data/src/ruby/ext/grpc/rb_channel.c +8 -14
  131. data/src/ruby/ext/grpc/rb_channel_credentials.c +11 -12
  132. data/src/ruby/ext/grpc/rb_grpc.c +19 -18
  133. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +4 -0
  134. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +8 -2
  135. data/src/ruby/lib/grpc/core/time_consts.rb +2 -2
  136. data/src/ruby/lib/grpc/errors.rb +2 -2
  137. data/src/ruby/lib/grpc/generic/rpc_server.rb +58 -39
  138. data/src/ruby/lib/grpc/version.rb +1 -1
  139. data/src/ruby/pb/README.md +2 -2
  140. data/src/ruby/pb/generate_proto_ruby.sh +2 -2
  141. data/src/ruby/pb/grpc/health/checker.rb +11 -11
  142. data/src/ruby/pb/grpc/health/v1/health.rb +28 -0
  143. data/src/ruby/pb/grpc/health/{v1alpha → v1}/health_services.rb +4 -4
  144. data/src/ruby/spec/client_server_spec.rb +2 -1
  145. data/src/ruby/spec/generic/rpc_server_spec.rb +3 -22
  146. data/src/ruby/spec/pb/health/checker_spec.rb +22 -36
  147. data/third_party/nanopb/pb.h +547 -0
  148. data/third_party/nanopb/pb_common.c +97 -0
  149. data/third_party/nanopb/pb_common.h +42 -0
  150. data/third_party/nanopb/pb_decode.c +1319 -0
  151. data/third_party/nanopb/pb_decode.h +149 -0
  152. data/third_party/nanopb/pb_encode.c +690 -0
  153. data/third_party/nanopb/pb_encode.h +154 -0
  154. metadata +32 -16
  155. data/src/ruby/pb/grpc/health/v1alpha/health.rb +0 -29
@@ -63,9 +63,10 @@ grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
63
63
  alarm->cq = cq;
64
64
  alarm->tag = tag;
65
65
 
66
- grpc_timer_init(&exec_ctx, &alarm->alarm, deadline, alarm_cb, alarm,
67
- gpr_now(GPR_CLOCK_MONOTONIC));
68
66
  grpc_cq_begin_op(cq, tag);
67
+ grpc_timer_init(&exec_ctx, &alarm->alarm,
68
+ gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
69
+ alarm_cb, alarm, gpr_now(GPR_CLOCK_MONOTONIC));
69
70
  grpc_exec_ctx_finish(&exec_ctx);
70
71
  return alarm;
71
72
  }
@@ -159,6 +159,9 @@ struct grpc_call {
159
159
  uint8_t receiving_message;
160
160
  uint8_t received_final_op;
161
161
 
162
+ /* have we received initial metadata */
163
+ bool has_initial_md_been_received;
164
+
162
165
  batch_control active_batches[MAX_CONCURRENT_BATCHES];
163
166
 
164
167
  /* first idx: is_receiving, second idx: is_trailing */
@@ -200,6 +203,7 @@ struct grpc_call {
200
203
  gpr_slice receiving_slice;
201
204
  grpc_closure receiving_slice_ready;
202
205
  grpc_closure receiving_stream_ready;
206
+ grpc_closure receiving_initial_metadata_ready;
203
207
  uint32_t test_only_last_message_flags;
204
208
 
205
209
  union {
@@ -212,6 +216,11 @@ struct grpc_call {
212
216
  int *cancelled;
213
217
  } server;
214
218
  } final_op;
219
+
220
+ struct {
221
+ void *bctlp;
222
+ bool success;
223
+ } saved_receiving_stream_ready_ctx;
215
224
  };
216
225
 
217
226
  #define CALL_STACK_FROM_CALL(call) ((grpc_call_stack *)((call) + 1))
@@ -993,6 +1002,94 @@ static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
993
1002
  }
994
1003
  }
995
1004
 
1005
+ static void process_data_after_md(grpc_exec_ctx *exec_ctx, batch_control *bctl,
1006
+ bool success) {
1007
+ grpc_call *call = bctl->call;
1008
+ if (call->receiving_stream == NULL) {
1009
+ *call->receiving_buffer = NULL;
1010
+ call->receiving_message = 0;
1011
+ if (gpr_unref(&bctl->steps_to_complete)) {
1012
+ post_batch_completion(exec_ctx, bctl);
1013
+ }
1014
+ } else if (call->receiving_stream->length >
1015
+ grpc_channel_get_max_message_length(call->channel)) {
1016
+ cancel_with_status(exec_ctx, call, GRPC_STATUS_INTERNAL,
1017
+ "Max message size exceeded");
1018
+ grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
1019
+ call->receiving_stream = NULL;
1020
+ *call->receiving_buffer = NULL;
1021
+ call->receiving_message = 0;
1022
+ if (gpr_unref(&bctl->steps_to_complete)) {
1023
+ post_batch_completion(exec_ctx, bctl);
1024
+ }
1025
+ } else {
1026
+ call->test_only_last_message_flags = call->receiving_stream->flags;
1027
+ if ((call->receiving_stream->flags & GRPC_WRITE_INTERNAL_COMPRESS) &&
1028
+ (call->compression_algorithm > GRPC_COMPRESS_NONE)) {
1029
+ *call->receiving_buffer = grpc_raw_compressed_byte_buffer_create(
1030
+ NULL, 0, call->compression_algorithm);
1031
+ } else {
1032
+ *call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0);
1033
+ }
1034
+ grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready,
1035
+ bctl);
1036
+ continue_receiving_slices(exec_ctx, bctl);
1037
+ /* early out */
1038
+ return;
1039
+ }
1040
+ }
1041
+
1042
+ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
1043
+ bool success) {
1044
+ batch_control *bctl = bctlp;
1045
+ grpc_call *call = bctl->call;
1046
+
1047
+ gpr_mu_lock(&bctl->call->mu);
1048
+ if (bctl->call->has_initial_md_been_received) {
1049
+ gpr_mu_unlock(&bctl->call->mu);
1050
+ process_data_after_md(exec_ctx, bctlp, success);
1051
+ } else {
1052
+ call->saved_receiving_stream_ready_ctx.bctlp = bctlp;
1053
+ call->saved_receiving_stream_ready_ctx.success = success;
1054
+ gpr_mu_unlock(&bctl->call->mu);
1055
+ }
1056
+ }
1057
+
1058
+ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
1059
+ void *bctlp, bool success) {
1060
+ batch_control *bctl = bctlp;
1061
+ grpc_call *call = bctl->call;
1062
+
1063
+ gpr_mu_lock(&call->mu);
1064
+
1065
+ grpc_metadata_batch *md =
1066
+ &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
1067
+ grpc_metadata_batch_filter(md, recv_initial_filter, call);
1068
+ call->has_initial_md_been_received = true;
1069
+
1070
+ if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
1071
+ 0 &&
1072
+ !call->is_client) {
1073
+ GPR_TIMER_BEGIN("set_deadline_alarm", 0);
1074
+ set_deadline_alarm(exec_ctx, call, md->deadline);
1075
+ GPR_TIMER_END("set_deadline_alarm", 0);
1076
+ }
1077
+
1078
+ if (call->saved_receiving_stream_ready_ctx.bctlp != NULL) {
1079
+ grpc_closure *saved_rsr_closure = grpc_closure_create(
1080
+ receiving_stream_ready, call->saved_receiving_stream_ready_ctx.bctlp);
1081
+ grpc_exec_ctx_enqueue(exec_ctx, saved_rsr_closure,
1082
+ call->saved_receiving_stream_ready_ctx.success, NULL);
1083
+ call->saved_receiving_stream_ready_ctx.bctlp = NULL;
1084
+ }
1085
+
1086
+ gpr_mu_unlock(&call->mu);
1087
+
1088
+ if (gpr_unref(&bctl->steps_to_complete)) {
1089
+ post_batch_completion(exec_ctx, bctl);
1090
+ }
1091
+ }
1092
+
996
1093
  static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, bool success) {
997
1094
  batch_control *bctl = bctlp;
998
1095
  grpc_call *call = bctl->call;
@@ -1011,19 +1108,6 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, bool success) {
1011
1108
  grpc_metadata_batch_destroy(
1012
1109
  &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]);
1013
1110
  }
1014
- if (bctl->recv_initial_metadata) {
1015
- grpc_metadata_batch *md =
1016
- &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
1017
- grpc_metadata_batch_filter(md, recv_initial_filter, call);
1018
-
1019
- if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
1020
- 0 &&
1021
- !call->is_client) {
1022
- GPR_TIMER_BEGIN("set_deadline_alarm", 0);
1023
- set_deadline_alarm(exec_ctx, call, md->deadline);
1024
- GPR_TIMER_END("set_deadline_alarm", 0);
1025
- }
1026
- }
1027
1111
  if (bctl->recv_final_op) {
1028
1112
  grpc_metadata_batch *md =
1029
1113
  &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
@@ -1065,45 +1149,6 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, bool success) {
1065
1149
  }
1066
1150
  }
1067
1151
 
1068
- static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
1069
- bool success) {
1070
- batch_control *bctl = bctlp;
1071
- grpc_call *call = bctl->call;
1072
-
1073
- if (call->receiving_stream == NULL) {
1074
- *call->receiving_buffer = NULL;
1075
- call->receiving_message = 0;
1076
- if (gpr_unref(&bctl->steps_to_complete)) {
1077
- post_batch_completion(exec_ctx, bctl);
1078
- }
1079
- } else if (call->receiving_stream->length >
1080
- grpc_channel_get_max_message_length(call->channel)) {
1081
- cancel_with_status(exec_ctx, call, GRPC_STATUS_INTERNAL,
1082
- "Max message size exceeded");
1083
- grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
1084
- call->receiving_stream = NULL;
1085
- *call->receiving_buffer = NULL;
1086
- call->receiving_message = 0;
1087
- if (gpr_unref(&bctl->steps_to_complete)) {
1088
- post_batch_completion(exec_ctx, bctl);
1089
- }
1090
- } else {
1091
- call->test_only_last_message_flags = call->receiving_stream->flags;
1092
- if ((call->receiving_stream->flags & GRPC_WRITE_INTERNAL_COMPRESS) &&
1093
- (call->compression_algorithm > GRPC_COMPRESS_NONE)) {
1094
- *call->receiving_buffer = grpc_raw_compressed_byte_buffer_create(
1095
- NULL, 0, call->compression_algorithm);
1096
- } else {
1097
- *call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0);
1098
- }
1099
- grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready,
1100
- bctl);
1101
- continue_receiving_slices(exec_ctx, bctl);
1102
- /* early out */
1103
- return;
1104
- }
1105
- }
1106
-
1107
1152
  static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
1108
1153
  grpc_call *call, const grpc_op *ops,
1109
1154
  size_t nops, void *notify_tag,
@@ -1273,9 +1318,14 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
1273
1318
  }
1274
1319
  call->received_initial_metadata = 1;
1275
1320
  call->buffered_metadata[0] = op->data.recv_initial_metadata;
1321
+ grpc_closure_init(&call->receiving_initial_metadata_ready,
1322
+ receiving_initial_metadata_ready, bctl);
1276
1323
  bctl->recv_initial_metadata = 1;
1277
1324
  stream_op.recv_initial_metadata =
1278
1325
  &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
1326
+ stream_op.recv_initial_metadata_ready =
1327
+ &call->receiving_initial_metadata_ready;
1328
+ num_completion_callbacks_needed++;
1279
1329
  break;
1280
1330
  case GRPC_OP_RECV_MESSAGE:
1281
1331
  /* Flag validation: currently allow no flags */
@@ -172,7 +172,7 @@ static grpc_subchannel *subchannel_factory_create_subchannel(
172
172
  c->base.vtable = &connector_vtable;
173
173
  gpr_ref_init(&c->refs, 1);
174
174
  args->args = final_args;
175
- s = grpc_subchannel_create(&c->base, args);
175
+ s = grpc_subchannel_create(exec_ctx, &c->base, args);
176
176
  grpc_connector_unref(exec_ctx, &c->base);
177
177
  grpc_channel_args_destroy(final_args);
178
178
  return s;
@@ -36,26 +36,29 @@
36
36
  #include <stdio.h>
37
37
  #include <string.h>
38
38
 
39
- #include "src/core/iomgr/timer.h"
39
+ #include <grpc/support/alloc.h>
40
+ #include <grpc/support/atm.h>
41
+ #include <grpc/support/log.h>
42
+ #include <grpc/support/time.h>
43
+
40
44
  #include "src/core/iomgr/pollset.h"
45
+ #include "src/core/iomgr/timer.h"
46
+ #include "src/core/profiling/timers.h"
41
47
  #include "src/core/support/string.h"
42
48
  #include "src/core/surface/api_trace.h"
43
49
  #include "src/core/surface/call.h"
44
50
  #include "src/core/surface/event_string.h"
45
51
  #include "src/core/surface/surface_trace.h"
46
- #include "src/core/profiling/timers.h"
47
- #include <grpc/support/alloc.h>
48
- #include <grpc/support/atm.h>
49
- #include <grpc/support/log.h>
50
- #include <grpc/support/time.h>
51
52
 
52
53
  typedef struct {
53
- grpc_pollset_worker *worker;
54
+ grpc_pollset_worker **worker;
54
55
  void *tag;
55
56
  } plucker;
56
57
 
57
58
  /* Completion queue structure */
58
59
  struct grpc_completion_queue {
60
+ /** owned by pollset */
61
+ gpr_mu *mu;
59
62
  /** completed events */
60
63
  grpc_cq_completion completed_head;
61
64
  grpc_cq_completion *completed_tail;
@@ -63,8 +66,6 @@ struct grpc_completion_queue {
63
66
  gpr_refcount pending_events;
64
67
  /** Once owning_refs drops to zero, we will destroy the cq */
65
68
  gpr_refcount owning_refs;
66
- /** the set of low level i/o things that concern this cq */
67
- grpc_pollset pollset;
68
69
  /** 0 initially, 1 once we've begun shutting down */
69
70
  int shutdown;
70
71
  int shutdown_called;
@@ -82,6 +83,8 @@ struct grpc_completion_queue {
82
83
  grpc_completion_queue *next_free;
83
84
  };
84
85
 
86
+ #define POLLSET_FROM_CQ(cq) ((grpc_pollset *)(cq + 1))
87
+
85
88
  static gpr_mu g_freelist_mu;
86
89
  grpc_completion_queue *g_freelist;
87
90
 
@@ -94,7 +97,7 @@ void grpc_cq_global_shutdown(void) {
94
97
  gpr_mu_destroy(&g_freelist_mu);
95
98
  while (g_freelist) {
96
99
  grpc_completion_queue *next = g_freelist->next_free;
97
- grpc_pollset_destroy(&g_freelist->pollset);
100
+ grpc_pollset_destroy(POLLSET_FROM_CQ(g_freelist));
98
101
  #ifndef NDEBUG
99
102
  gpr_free(g_freelist->outstanding_tags);
100
103
  #endif
@@ -124,8 +127,8 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
124
127
  if (g_freelist == NULL) {
125
128
  gpr_mu_unlock(&g_freelist_mu);
126
129
 
127
- cc = gpr_malloc(sizeof(grpc_completion_queue));
128
- grpc_pollset_init(&cc->pollset);
130
+ cc = gpr_malloc(sizeof(grpc_completion_queue) + grpc_pollset_size());
131
+ grpc_pollset_init(POLLSET_FROM_CQ(cc), &cc->mu);
129
132
  #ifndef NDEBUG
130
133
  cc->outstanding_tags = NULL;
131
134
  cc->outstanding_tag_capacity = 0;
@@ -184,7 +187,7 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc) {
184
187
  #endif
185
188
  if (gpr_unref(&cc->owning_refs)) {
186
189
  GPR_ASSERT(cc->completed_head.next == (uintptr_t)&cc->completed_head);
187
- grpc_pollset_reset(&cc->pollset);
190
+ grpc_pollset_reset(POLLSET_FROM_CQ(cc));
188
191
  gpr_mu_lock(&g_freelist_mu);
189
192
  cc->next_free = g_freelist;
190
193
  g_freelist = cc;
@@ -194,7 +197,7 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc) {
194
197
 
195
198
  void grpc_cq_begin_op(grpc_completion_queue *cc, void *tag) {
196
199
  #ifndef NDEBUG
197
- gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
200
+ gpr_mu_lock(cc->mu);
198
201
  GPR_ASSERT(!cc->shutdown_called);
199
202
  if (cc->outstanding_tag_count == cc->outstanding_tag_capacity) {
200
203
  cc->outstanding_tag_capacity = GPR_MAX(4, 2 * cc->outstanding_tag_capacity);
@@ -203,7 +206,7 @@ void grpc_cq_begin_op(grpc_completion_queue *cc, void *tag) {
203
206
  cc->outstanding_tag_capacity);
204
207
  }
205
208
  cc->outstanding_tags[cc->outstanding_tag_count++] = tag;
206
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
209
+ gpr_mu_unlock(cc->mu);
207
210
  #endif
208
211
  gpr_ref(&cc->pending_events);
209
212
  }
@@ -231,7 +234,7 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
231
234
  storage->next =
232
235
  ((uintptr_t)&cc->completed_head) | ((uintptr_t)(success != 0));
233
236
 
234
- gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
237
+ gpr_mu_lock(cc->mu);
235
238
  #ifndef NDEBUG
236
239
  for (i = 0; i < (int)cc->outstanding_tag_count; i++) {
237
240
  if (cc->outstanding_tags[i] == tag) {
@@ -252,12 +255,12 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
252
255
  pluck_worker = NULL;
253
256
  for (i = 0; i < cc->num_pluckers; i++) {
254
257
  if (cc->pluckers[i].tag == tag) {
255
- pluck_worker = cc->pluckers[i].worker;
258
+ pluck_worker = *cc->pluckers[i].worker;
256
259
  break;
257
260
  }
258
261
  }
259
- grpc_pollset_kick(&cc->pollset, pluck_worker);
260
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
262
+ grpc_pollset_kick(POLLSET_FROM_CQ(cc), pluck_worker);
263
+ gpr_mu_unlock(cc->mu);
261
264
  } else {
262
265
  cc->completed_tail->next =
263
266
  ((uintptr_t)storage) | (1u & (uintptr_t)cc->completed_tail->next);
@@ -265,8 +268,9 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
265
268
  GPR_ASSERT(!cc->shutdown);
266
269
  GPR_ASSERT(cc->shutdown_called);
267
270
  cc->shutdown = 1;
268
- grpc_pollset_shutdown(exec_ctx, &cc->pollset, &cc->pollset_shutdown_done);
269
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
271
+ grpc_pollset_shutdown(exec_ctx, POLLSET_FROM_CQ(cc),
272
+ &cc->pollset_shutdown_done);
273
+ gpr_mu_unlock(cc->mu);
270
274
  }
271
275
 
272
276
  GPR_TIMER_END("grpc_cq_end_op", 0);
@@ -275,7 +279,7 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
275
279
  grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
276
280
  gpr_timespec deadline, void *reserved) {
277
281
  grpc_event ret;
278
- grpc_pollset_worker worker;
282
+ grpc_pollset_worker *worker = NULL;
279
283
  int first_loop = 1;
280
284
  gpr_timespec now;
281
285
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -294,7 +298,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
294
298
  deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
295
299
 
296
300
  GRPC_CQ_INTERNAL_REF(cc, "next");
297
- gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
301
+ gpr_mu_lock(cc->mu);
298
302
  for (;;) {
299
303
  if (cc->completed_tail != &cc->completed_head) {
300
304
  grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next;
@@ -302,7 +306,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
302
306
  if (c == cc->completed_tail) {
303
307
  cc->completed_tail = &cc->completed_head;
304
308
  }
305
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
309
+ gpr_mu_unlock(cc->mu);
306
310
  ret.type = GRPC_OP_COMPLETE;
307
311
  ret.success = c->next & 1u;
308
312
  ret.tag = c->tag;
@@ -310,20 +314,34 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
310
314
  break;
311
315
  }
312
316
  if (cc->shutdown) {
313
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
317
+ gpr_mu_unlock(cc->mu);
314
318
  memset(&ret, 0, sizeof(ret));
315
319
  ret.type = GRPC_QUEUE_SHUTDOWN;
316
320
  break;
317
321
  }
318
322
  now = gpr_now(GPR_CLOCK_MONOTONIC);
319
323
  if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
320
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
324
+ gpr_mu_unlock(cc->mu);
321
325
  memset(&ret, 0, sizeof(ret));
322
326
  ret.type = GRPC_QUEUE_TIMEOUT;
323
327
  break;
324
328
  }
325
329
  first_loop = 0;
326
- grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now, deadline);
330
+ /* Check alarms - these are a global resource so we just ping
331
+ each time through on every pollset.
332
+ May update deadline to ensure timely wakeups.
333
+ TODO(ctiller): can this work be localized? */
334
+ gpr_timespec iteration_deadline = deadline;
335
+ if (grpc_timer_check(&exec_ctx, now, &iteration_deadline)) {
336
+ GPR_TIMER_MARK("alarm_triggered", 0);
337
+ gpr_mu_unlock(cc->mu);
338
+ grpc_exec_ctx_flush(&exec_ctx);
339
+ gpr_mu_lock(cc->mu);
340
+ continue;
341
+ } else {
342
+ grpc_pollset_work(&exec_ctx, POLLSET_FROM_CQ(cc), &worker, now,
343
+ iteration_deadline);
344
+ }
327
345
  }
328
346
  GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
329
347
  GRPC_CQ_INTERNAL_UNREF(cc, "next");
@@ -335,7 +353,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
335
353
  }
336
354
 
337
355
  static int add_plucker(grpc_completion_queue *cc, void *tag,
338
- grpc_pollset_worker *worker) {
356
+ grpc_pollset_worker **worker) {
339
357
  if (cc->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) {
340
358
  return 0;
341
359
  }
@@ -346,7 +364,7 @@ static int add_plucker(grpc_completion_queue *cc, void *tag,
346
364
  }
347
365
 
348
366
  static void del_plucker(grpc_completion_queue *cc, void *tag,
349
- grpc_pollset_worker *worker) {
367
+ grpc_pollset_worker **worker) {
350
368
  int i;
351
369
  for (i = 0; i < cc->num_pluckers; i++) {
352
370
  if (cc->pluckers[i].tag == tag && cc->pluckers[i].worker == worker) {
@@ -363,7 +381,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
363
381
  grpc_event ret;
364
382
  grpc_cq_completion *c;
365
383
  grpc_cq_completion *prev;
366
- grpc_pollset_worker worker;
384
+ grpc_pollset_worker *worker = NULL;
367
385
  gpr_timespec now;
368
386
  int first_loop = 1;
369
387
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -382,7 +400,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
382
400
  deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
383
401
 
384
402
  GRPC_CQ_INTERNAL_REF(cc, "pluck");
385
- gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
403
+ gpr_mu_lock(cc->mu);
386
404
  for (;;) {
387
405
  prev = &cc->completed_head;
388
406
  while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
@@ -392,7 +410,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
392
410
  if (c == cc->completed_tail) {
393
411
  cc->completed_tail = prev;
394
412
  }
395
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
413
+ gpr_mu_unlock(cc->mu);
396
414
  ret.type = GRPC_OP_COMPLETE;
397
415
  ret.success = c->next & 1u;
398
416
  ret.tag = c->tag;
@@ -402,7 +420,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
402
420
  prev = c;
403
421
  }
404
422
  if (cc->shutdown) {
405
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
423
+ gpr_mu_unlock(cc->mu);
406
424
  memset(&ret, 0, sizeof(ret));
407
425
  ret.type = GRPC_QUEUE_SHUTDOWN;
408
426
  break;
@@ -412,7 +430,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
412
430
  "Too many outstanding grpc_completion_queue_pluck calls: maximum "
413
431
  "is %d",
414
432
  GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
415
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
433
+ gpr_mu_unlock(cc->mu);
416
434
  memset(&ret, 0, sizeof(ret));
417
435
  /* TODO(ctiller): should we use a different result here */
418
436
  ret.type = GRPC_QUEUE_TIMEOUT;
@@ -421,13 +439,26 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
421
439
  now = gpr_now(GPR_CLOCK_MONOTONIC);
422
440
  if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
423
441
  del_plucker(cc, tag, &worker);
424
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
442
+ gpr_mu_unlock(cc->mu);
425
443
  memset(&ret, 0, sizeof(ret));
426
444
  ret.type = GRPC_QUEUE_TIMEOUT;
427
445
  break;
428
446
  }
429
447
  first_loop = 0;
430
- grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now, deadline);
448
+ /* Check alarms - these are a global resource so we just ping
449
+ each time through on every pollset.
450
+ May update deadline to ensure timely wakeups.
451
+ TODO(ctiller): can this work be localized? */
452
+ gpr_timespec iteration_deadline = deadline;
453
+ if (grpc_timer_check(&exec_ctx, now, &iteration_deadline)) {
454
+ GPR_TIMER_MARK("alarm_triggered", 0);
455
+ gpr_mu_unlock(cc->mu);
456
+ grpc_exec_ctx_flush(&exec_ctx);
457
+ gpr_mu_lock(cc->mu);
458
+ } else {
459
+ grpc_pollset_work(&exec_ctx, POLLSET_FROM_CQ(cc), &worker, now,
460
+ iteration_deadline);
461
+ }
431
462
  del_plucker(cc, tag, &worker);
432
463
  }
433
464
  done:
@@ -446,9 +477,9 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
446
477
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
447
478
  GPR_TIMER_BEGIN("grpc_completion_queue_shutdown", 0);
448
479
  GRPC_API_TRACE("grpc_completion_queue_shutdown(cc=%p)", 1, (cc));
449
- gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
480
+ gpr_mu_lock(cc->mu);
450
481
  if (cc->shutdown_called) {
451
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
482
+ gpr_mu_unlock(cc->mu);
452
483
  GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
453
484
  return;
454
485
  }
@@ -456,9 +487,10 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
456
487
  if (gpr_unref(&cc->pending_events)) {
457
488
  GPR_ASSERT(!cc->shutdown);
458
489
  cc->shutdown = 1;
459
- grpc_pollset_shutdown(&exec_ctx, &cc->pollset, &cc->pollset_shutdown_done);
490
+ grpc_pollset_shutdown(&exec_ctx, POLLSET_FROM_CQ(cc),
491
+ &cc->pollset_shutdown_done);
460
492
  }
461
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
493
+ gpr_mu_unlock(cc->mu);
462
494
  grpc_exec_ctx_finish(&exec_ctx);
463
495
  GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
464
496
  }
@@ -472,7 +504,7 @@ void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
472
504
  }
473
505
 
474
506
  grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
475
- return &cc->pollset;
507
+ return POLLSET_FROM_CQ(cc);
476
508
  }
477
509
 
478
510
  void grpc_cq_mark_server_cq(grpc_completion_queue *cc) { cc->is_server_cq = 1; }