grpc 0.13.0 → 0.13.1.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (155) hide show
  1. checksums.yaml +4 -4
  2. data/.yardopts +1 -0
  3. data/Makefile +1114 -937
  4. data/include/grpc/census.h +71 -89
  5. data/include/grpc/compression.h +7 -7
  6. data/include/grpc/grpc.h +65 -68
  7. data/include/grpc/grpc_security.h +38 -38
  8. data/include/grpc/impl/codegen/alloc.h +7 -7
  9. data/include/grpc/impl/codegen/byte_buffer.h +13 -13
  10. data/include/grpc/impl/codegen/grpc_types.h +7 -2
  11. data/include/grpc/impl/codegen/log.h +5 -5
  12. data/include/grpc/impl/codegen/port_platform.h +14 -6
  13. data/include/grpc/impl/codegen/slice.h +15 -15
  14. data/include/grpc/impl/codegen/slice_buffer.h +17 -17
  15. data/include/grpc/impl/codegen/sync.h +26 -22
  16. data/include/grpc/impl/codegen/time.h +22 -24
  17. data/include/grpc/support/avl.h +9 -8
  18. data/include/grpc/support/cmdline.h +12 -12
  19. data/include/grpc/support/cpu.h +2 -2
  20. data/include/grpc/support/histogram.h +22 -22
  21. data/include/grpc/support/host_port.h +2 -2
  22. data/include/grpc/support/log_win32.h +1 -1
  23. data/include/grpc/support/string_util.h +2 -2
  24. data/include/grpc/support/subprocess.h +5 -5
  25. data/include/grpc/support/thd.h +9 -9
  26. data/include/grpc/support/useful.h +3 -1
  27. data/src/core/census/context.c +64 -85
  28. data/src/core/census/grpc_filter.c +2 -2
  29. data/src/core/census/mlog.c +600 -0
  30. data/src/core/census/mlog.h +95 -0
  31. data/src/core/channel/channel_args.c +67 -6
  32. data/src/core/channel/channel_args.h +7 -1
  33. data/src/core/channel/client_channel.c +26 -36
  34. data/src/core/channel/client_uchannel.c +1 -1
  35. data/src/core/channel/http_client_filter.c +2 -2
  36. data/src/core/channel/http_server_filter.c +2 -2
  37. data/src/core/channel/subchannel_call_holder.c +5 -7
  38. data/src/core/client_config/connector.c +3 -2
  39. data/src/core/client_config/connector.h +2 -2
  40. data/src/core/client_config/lb_policies/load_balancer_api.c +163 -0
  41. data/src/core/client_config/lb_policies/load_balancer_api.h +85 -0
  42. data/src/core/client_config/lb_policies/pick_first.c +10 -11
  43. data/src/core/client_config/lb_policies/round_robin.c +7 -8
  44. data/src/core/client_config/lb_policy.c +3 -3
  45. data/src/core/client_config/lb_policy.h +3 -2
  46. data/src/core/client_config/subchannel.c +51 -21
  47. data/src/core/client_config/subchannel.h +15 -6
  48. data/src/core/client_config/subchannel_index.c +261 -0
  49. data/src/core/client_config/subchannel_index.h +77 -0
  50. data/src/core/compression/{algorithm.c → compression_algorithm.c} +0 -0
  51. data/src/core/httpcli/httpcli.c +13 -11
  52. data/src/core/httpcli/httpcli.h +3 -2
  53. data/src/core/httpcli/httpcli_security_connector.c +7 -7
  54. data/src/core/iomgr/fd_posix.c +4 -2
  55. data/src/core/iomgr/iocp_windows.c +10 -6
  56. data/src/core/iomgr/iocp_windows.h +9 -2
  57. data/src/core/iomgr/iomgr.c +18 -2
  58. data/src/core/iomgr/iomgr_internal.h +5 -1
  59. data/src/core/iomgr/pollset.h +9 -10
  60. data/src/core/iomgr/pollset_multipoller_with_epoll.c +1 -0
  61. data/src/core/iomgr/pollset_multipoller_with_poll_posix.c +10 -5
  62. data/src/core/iomgr/pollset_posix.c +30 -35
  63. data/src/core/iomgr/pollset_posix.h +10 -6
  64. data/src/core/iomgr/pollset_set.h +3 -9
  65. data/src/core/iomgr/pollset_set_posix.c +23 -3
  66. data/src/core/iomgr/pollset_set_posix.h +2 -18
  67. data/src/core/iomgr/pollset_set_windows.c +3 -3
  68. data/src/core/iomgr/pollset_set_windows.h +2 -2
  69. data/src/core/iomgr/pollset_windows.c +24 -21
  70. data/src/core/iomgr/pollset_windows.h +1 -5
  71. data/src/core/iomgr/tcp_client_posix.c +7 -5
  72. data/src/core/iomgr/tcp_posix.c +4 -2
  73. data/src/core/iomgr/tcp_server_windows.c +1 -2
  74. data/src/core/iomgr/timer.c +2 -3
  75. data/src/core/iomgr/timer.h +21 -1
  76. data/src/core/iomgr/timer_heap.c +10 -12
  77. data/src/core/iomgr/udp_server.c +5 -4
  78. data/src/core/iomgr/udp_server.h +1 -0
  79. data/src/core/iomgr/workqueue_posix.c +1 -0
  80. data/src/core/iomgr/workqueue_posix.h +3 -1
  81. data/src/core/proto/grpc/lb/v0/load_balancer.pb.c +119 -0
  82. data/src/core/proto/grpc/lb/v0/load_balancer.pb.h +182 -0
  83. data/src/core/security/{base64.c → b64.c} +1 -1
  84. data/src/core/security/{base64.h → b64.h} +1 -1
  85. data/src/core/security/client_auth_filter.c +0 -1
  86. data/src/core/security/credentials.c +12 -5
  87. data/src/core/security/credentials.h +3 -3
  88. data/src/core/security/google_default_credentials.c +24 -19
  89. data/src/core/security/handshake.c +15 -7
  90. data/src/core/security/handshake.h +2 -1
  91. data/src/core/security/json_token.c +1 -1
  92. data/src/core/security/jwt_verifier.c +1 -1
  93. data/src/core/security/security_connector.c +84 -64
  94. data/src/core/security/security_connector.h +42 -22
  95. data/src/core/security/security_context.c +8 -3
  96. data/src/core/security/server_auth_filter.c +2 -2
  97. data/src/core/security/server_secure_chttp2.c +7 -7
  98. data/src/core/support/avl.c +2 -2
  99. data/src/core/support/env_linux.c +17 -0
  100. data/src/core/support/{file.c → load_file.c} +2 -2
  101. data/src/core/support/{file.h → load_file.h} +4 -12
  102. data/src/core/support/sync.c +6 -1
  103. data/src/core/support/time_posix.c +1 -1
  104. data/src/core/{iomgr/timer_internal.h → support/tmpfile.h} +17 -23
  105. data/src/core/support/{file_posix.c → tmpfile_posix.c} +2 -2
  106. data/src/core/support/{file_win32.c → tmpfile_win32.c} +2 -2
  107. data/src/core/surface/alarm.c +3 -2
  108. data/src/core/surface/call.c +102 -52
  109. data/src/core/surface/channel_create.c +1 -1
  110. data/src/core/surface/completion_queue.c +73 -41
  111. data/src/core/surface/init.c +4 -0
  112. data/src/core/surface/lame_client.c +1 -2
  113. data/src/core/surface/secure_channel_create.c +6 -7
  114. data/src/core/surface/server.c +13 -5
  115. data/src/core/surface/validate_metadata.c +1 -1
  116. data/src/core/surface/version.c +1 -1
  117. data/src/core/transport/chttp2/internal.h +22 -10
  118. data/src/core/transport/chttp2/parsing.c +3 -3
  119. data/src/core/transport/chttp2/stream_lists.c +39 -21
  120. data/src/core/transport/chttp2/writing.c +19 -28
  121. data/src/core/transport/chttp2_transport.c +80 -37
  122. data/src/core/transport/metadata.c +8 -0
  123. data/src/core/transport/static_metadata.c +17 -17
  124. data/src/core/transport/static_metadata.h +3 -3
  125. data/src/core/transport/transport.c +2 -1
  126. data/src/core/transport/transport.h +12 -5
  127. data/src/ruby/ext/grpc/extconf.rb +1 -0
  128. data/src/ruby/ext/grpc/rb_call.c +6 -0
  129. data/src/ruby/ext/grpc/rb_call_credentials.c +12 -14
  130. data/src/ruby/ext/grpc/rb_channel.c +8 -14
  131. data/src/ruby/ext/grpc/rb_channel_credentials.c +11 -12
  132. data/src/ruby/ext/grpc/rb_grpc.c +19 -18
  133. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +4 -0
  134. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +8 -2
  135. data/src/ruby/lib/grpc/core/time_consts.rb +2 -2
  136. data/src/ruby/lib/grpc/errors.rb +2 -2
  137. data/src/ruby/lib/grpc/generic/rpc_server.rb +58 -39
  138. data/src/ruby/lib/grpc/version.rb +1 -1
  139. data/src/ruby/pb/README.md +2 -2
  140. data/src/ruby/pb/generate_proto_ruby.sh +2 -2
  141. data/src/ruby/pb/grpc/health/checker.rb +11 -11
  142. data/src/ruby/pb/grpc/health/v1/health.rb +28 -0
  143. data/src/ruby/pb/grpc/health/{v1alpha → v1}/health_services.rb +4 -4
  144. data/src/ruby/spec/client_server_spec.rb +2 -1
  145. data/src/ruby/spec/generic/rpc_server_spec.rb +3 -22
  146. data/src/ruby/spec/pb/health/checker_spec.rb +22 -36
  147. data/third_party/nanopb/pb.h +547 -0
  148. data/third_party/nanopb/pb_common.c +97 -0
  149. data/third_party/nanopb/pb_common.h +42 -0
  150. data/third_party/nanopb/pb_decode.c +1319 -0
  151. data/third_party/nanopb/pb_decode.h +149 -0
  152. data/third_party/nanopb/pb_encode.c +690 -0
  153. data/third_party/nanopb/pb_encode.h +154 -0
  154. metadata +32 -16
  155. data/src/ruby/pb/grpc/health/v1alpha/health.rb +0 -29
@@ -45,6 +45,7 @@
45
45
  #include <grpc/support/log.h>
46
46
  #include <grpc/support/useful.h>
47
47
  #include "src/core/iomgr/fd_posix.h"
48
+ #include "src/core/iomgr/pollset_posix.h"
48
49
  #include "src/core/profiling/timers.h"
49
50
  #include "src/core/support/block_annotate.h"
50
51
 
@@ -42,13 +42,15 @@
42
42
  #include <stdlib.h>
43
43
  #include <string.h>
44
44
 
45
- #include "src/core/iomgr/fd_posix.h"
46
- #include "src/core/iomgr/iomgr_internal.h"
47
- #include "src/core/support/block_annotate.h"
48
45
  #include <grpc/support/alloc.h>
49
46
  #include <grpc/support/log.h>
50
47
  #include <grpc/support/useful.h>
51
48
 
49
+ #include "src/core/iomgr/fd_posix.h"
50
+ #include "src/core/iomgr/iomgr_internal.h"
51
+ #include "src/core/iomgr/pollset_posix.h"
52
+ #include "src/core/support/block_annotate.h"
53
+
52
54
  typedef struct {
53
55
  /* all polled fds */
54
56
  size_t fd_count;
@@ -120,6 +122,7 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
120
122
  } else {
121
123
  h->fds[fd_count++] = h->fds[i];
122
124
  watchers[pfd_count].fd = h->fds[i];
125
+ GRPC_FD_REF(watchers[pfd_count].fd, "multipoller_start");
123
126
  pfds[pfd_count].fd = h->fds[i]->fd;
124
127
  pfds[pfd_count].revents = 0;
125
128
  pfd_count++;
@@ -133,8 +136,10 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
133
136
  gpr_mu_unlock(&pollset->mu);
134
137
 
135
138
  for (i = 2; i < pfd_count; i++) {
136
- pfds[i].events = (short)grpc_fd_begin_poll(watchers[i].fd, pollset, worker,
137
- POLLIN, POLLOUT, &watchers[i]);
139
+ grpc_fd *fd = watchers[i].fd;
140
+ pfds[i].events = (short)grpc_fd_begin_poll(fd, pollset, worker, POLLIN,
141
+ POLLOUT, &watchers[i]);
142
+ GRPC_FD_UNREF(fd, "multipoller_start");
138
143
  }
139
144
 
140
145
  /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
@@ -42,17 +42,16 @@
42
42
  #include <string.h>
43
43
  #include <unistd.h>
44
44
 
45
- #include "src/core/iomgr/timer_internal.h"
46
- #include "src/core/iomgr/fd_posix.h"
47
- #include "src/core/iomgr/iomgr_internal.h"
48
- #include "src/core/iomgr/socket_utils_posix.h"
49
- #include "src/core/profiling/timers.h"
50
- #include "src/core/support/block_annotate.h"
51
45
  #include <grpc/support/alloc.h>
52
46
  #include <grpc/support/log.h>
53
47
  #include <grpc/support/thd.h>
54
48
  #include <grpc/support/tls.h>
55
49
  #include <grpc/support/useful.h>
50
+ #include "src/core/iomgr/fd_posix.h"
51
+ #include "src/core/iomgr/iomgr_internal.h"
52
+ #include "src/core/iomgr/socket_utils_posix.h"
53
+ #include "src/core/profiling/timers.h"
54
+ #include "src/core/support/block_annotate.h"
56
55
 
57
56
  GPR_TLS_DECL(g_current_thread_poller);
58
57
  GPR_TLS_DECL(g_current_thread_worker);
@@ -98,6 +97,8 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
98
97
  worker->prev->next = worker->next->prev = worker;
99
98
  }
100
99
 
100
+ size_t grpc_pollset_size(void) { return sizeof(grpc_pollset); }
101
+
101
102
  void grpc_pollset_kick_ext(grpc_pollset *p,
102
103
  grpc_pollset_worker *specific_worker,
103
104
  uint32_t flags) {
@@ -187,8 +188,9 @@ void grpc_kick_poller(void) { grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd); }
187
188
 
188
189
  static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null);
189
190
 
190
- void grpc_pollset_init(grpc_pollset *pollset) {
191
+ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
191
192
  gpr_mu_init(&pollset->mu);
193
+ *mu = &pollset->mu;
192
194
  pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
193
195
  pollset->in_flight_cbs = 0;
194
196
  pollset->shutting_down = 0;
@@ -205,7 +207,6 @@ void grpc_pollset_destroy(grpc_pollset *pollset) {
205
207
  GPR_ASSERT(!grpc_pollset_has_workers(pollset));
206
208
  GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
207
209
  pollset->vtable->destroy(pollset);
208
- gpr_mu_destroy(&pollset->mu);
209
210
  while (pollset->local_wakeup_cache) {
210
211
  grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next;
211
212
  grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd);
@@ -247,8 +248,11 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
247
248
  }
248
249
 
249
250
  void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
250
- grpc_pollset_worker *worker, gpr_timespec now,
251
+ grpc_pollset_worker **worker_hdl, gpr_timespec now,
251
252
  gpr_timespec deadline) {
253
+ grpc_pollset_worker worker;
254
+ *worker_hdl = &worker;
255
+
252
256
  /* pollset->mu already held */
253
257
  int added_worker = 0;
254
258
  int locked = 1;
@@ -256,16 +260,16 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
256
260
  int keep_polling = 0;
257
261
  GPR_TIMER_BEGIN("grpc_pollset_work", 0);
258
262
  /* this must happen before we (potentially) drop pollset->mu */
259
- worker->next = worker->prev = NULL;
260
- worker->reevaluate_polling_on_wakeup = 0;
263
+ worker.next = worker.prev = NULL;
264
+ worker.reevaluate_polling_on_wakeup = 0;
261
265
  if (pollset->local_wakeup_cache != NULL) {
262
- worker->wakeup_fd = pollset->local_wakeup_cache;
263
- pollset->local_wakeup_cache = worker->wakeup_fd->next;
266
+ worker.wakeup_fd = pollset->local_wakeup_cache;
267
+ pollset->local_wakeup_cache = worker.wakeup_fd->next;
264
268
  } else {
265
- worker->wakeup_fd = gpr_malloc(sizeof(*worker->wakeup_fd));
266
- grpc_wakeup_fd_init(&worker->wakeup_fd->fd);
269
+ worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
270
+ grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
267
271
  }
268
- worker->kicked_specifically = 0;
272
+ worker.kicked_specifically = 0;
269
273
  /* If there's work waiting for the pollset to be idle, and the
270
274
  pollset is idle, then do that work */
271
275
  if (!grpc_pollset_has_workers(pollset) &&
@@ -274,16 +278,6 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
274
278
  grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
275
279
  goto done;
276
280
  }
277
- /* Check alarms - these are a global resource so we just ping
278
- each time through on every pollset.
279
- May update deadline to ensure timely wakeups.
280
- TODO(ctiller): can this work be localized? */
281
- if (grpc_timer_check(exec_ctx, now, &deadline)) {
282
- GPR_TIMER_MARK("grpc_pollset_work.alarm_triggered", 0);
283
- gpr_mu_unlock(&pollset->mu);
284
- locked = 0;
285
- goto done;
286
- }
287
281
  /* If we're shutting down then we don't execute any extended work */
288
282
  if (pollset->shutting_down) {
289
283
  GPR_TIMER_MARK("grpc_pollset_work.shutting_down", 0);
@@ -304,13 +298,13 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
304
298
  keep_polling = 0;
305
299
  if (!pollset->kicked_without_pollers) {
306
300
  if (!added_worker) {
307
- push_front_worker(pollset, worker);
301
+ push_front_worker(pollset, &worker);
308
302
  added_worker = 1;
309
- gpr_tls_set(&g_current_thread_worker, (intptr_t)worker);
303
+ gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
310
304
  }
311
305
  gpr_tls_set(&g_current_thread_poller, (intptr_t)pollset);
312
306
  GPR_TIMER_BEGIN("maybe_work_and_unlock", 0);
313
- pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, worker,
307
+ pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, &worker,
314
308
  deadline, now);
315
309
  GPR_TIMER_END("maybe_work_and_unlock", 0);
316
310
  locked = 0;
@@ -332,10 +326,10 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
332
326
  /* If we're forced to re-evaluate polling (via grpc_pollset_kick with
333
327
  GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) then we land here and force
334
328
  a loop */
335
- if (worker->reevaluate_polling_on_wakeup) {
336
- worker->reevaluate_polling_on_wakeup = 0;
329
+ if (worker.reevaluate_polling_on_wakeup) {
330
+ worker.reevaluate_polling_on_wakeup = 0;
337
331
  pollset->kicked_without_pollers = 0;
338
- if (queued_work || worker->kicked_specifically) {
332
+ if (queued_work || worker.kicked_specifically) {
339
333
  /* If there's queued work on the list, then set the deadline to be
340
334
  immediate so we get back out of the polling loop quickly */
341
335
  deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
@@ -344,12 +338,12 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
344
338
  }
345
339
  }
346
340
  if (added_worker) {
347
- remove_worker(pollset, worker);
341
+ remove_worker(pollset, &worker);
348
342
  gpr_tls_set(&g_current_thread_worker, 0);
349
343
  }
350
344
  /* release wakeup fd to the local pool */
351
- worker->wakeup_fd->next = pollset->local_wakeup_cache;
352
- pollset->local_wakeup_cache = worker->wakeup_fd;
345
+ worker.wakeup_fd->next = pollset->local_wakeup_cache;
346
+ pollset->local_wakeup_cache = worker.wakeup_fd;
353
347
  /* check shutdown conditions */
354
348
  if (pollset->shutting_down) {
355
349
  if (grpc_pollset_has_workers(pollset)) {
@@ -371,6 +365,7 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
371
365
  gpr_mu_lock(&pollset->mu);
372
366
  }
373
367
  }
368
+ *worker_hdl = NULL;
374
369
  GPR_TIMER_END("grpc_pollset_work", 0);
375
370
  }
376
371
 
@@ -37,8 +37,10 @@
37
37
  #include <poll.h>
38
38
 
39
39
  #include <grpc/support/sync.h>
40
+
40
41
  #include "src/core/iomgr/exec_ctx.h"
41
42
  #include "src/core/iomgr/iomgr.h"
43
+ #include "src/core/iomgr/pollset.h"
42
44
  #include "src/core/iomgr/wakeup_fd_posix.h"
43
45
 
44
46
  typedef struct grpc_pollset_vtable grpc_pollset_vtable;
@@ -53,15 +55,15 @@ typedef struct grpc_cached_wakeup_fd {
53
55
  struct grpc_cached_wakeup_fd *next;
54
56
  } grpc_cached_wakeup_fd;
55
57
 
56
- typedef struct grpc_pollset_worker {
58
+ struct grpc_pollset_worker {
57
59
  grpc_cached_wakeup_fd *wakeup_fd;
58
60
  int reevaluate_polling_on_wakeup;
59
61
  int kicked_specifically;
60
62
  struct grpc_pollset_worker *next;
61
63
  struct grpc_pollset_worker *prev;
62
- } grpc_pollset_worker;
64
+ };
63
65
 
64
- typedef struct grpc_pollset {
66
+ struct grpc_pollset {
65
67
  /* pollsets under posix can mutate representation as fds are added and
66
68
  removed.
67
69
  For example, we may choose a poll() based implementation on linux for
@@ -81,7 +83,7 @@ typedef struct grpc_pollset {
81
83
  } data;
82
84
  /* Local cache of eventfds for workers */
83
85
  grpc_cached_wakeup_fd *local_wakeup_cache;
84
- } grpc_pollset;
86
+ };
85
87
 
86
88
  struct grpc_pollset_vtable {
87
89
  void (*add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@@ -93,8 +95,6 @@ struct grpc_pollset_vtable {
93
95
  void (*destroy)(grpc_pollset *pollset);
94
96
  };
95
97
 
96
- #define GRPC_POLLSET_MU(pollset) (&(pollset)->mu)
97
-
98
98
  /* Add an fd to a pollset */
99
99
  void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
100
100
  struct grpc_fd *fd);
@@ -142,6 +142,10 @@ int grpc_pollset_has_workers(grpc_pollset *pollset);
142
142
  void grpc_remove_fd_from_all_epoll_sets(int fd);
143
143
 
144
144
  /* override to allow tests to hook poll() usage */
145
+ /* NOTE: Any changes to grpc_poll_function must take place when the gRPC
146
+ is certainly not doing any polling anywhere.
147
+ Otherwise, there might be a race between changing the variable and actually
148
+ doing a polling operation */
145
149
  typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int);
146
150
  extern grpc_poll_function_type grpc_poll_function;
147
151
  extern grpc_wakeup_fd grpc_global_wakeup_fd;
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  *
3
- * Copyright 2015, Google Inc.
3
+ * Copyright 2015-2016, Google Inc.
4
4
  * All rights reserved.
5
5
  *
6
6
  * Redistribution and use in source and binary forms, with or without
@@ -41,15 +41,9 @@
41
41
  fd's (etc) that have been registered with the set_set to that pollset.
42
42
  Registering fd's automatically adds them to all current pollsets. */
43
43
 
44
- #ifdef GPR_POSIX_SOCKET
45
- #include "src/core/iomgr/pollset_set_posix.h"
46
- #endif
44
+ typedef struct grpc_pollset_set grpc_pollset_set;
47
45
 
48
- #ifdef GPR_WIN32
49
- #include "src/core/iomgr/pollset_set_windows.h"
50
- #endif
51
-
52
- void grpc_pollset_set_init(grpc_pollset_set *pollset_set);
46
+ grpc_pollset_set *grpc_pollset_set_create(void);
53
47
  void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set);
54
48
  void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
55
49
  grpc_pollset_set *pollset_set,
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  *
3
- * Copyright 2015, Google Inc.
3
+ * Copyright 2015-2016, Google Inc.
4
4
  * All rights reserved.
5
5
  *
6
6
  * Redistribution and use in source and binary forms, with or without
@@ -41,11 +41,30 @@
41
41
  #include <grpc/support/alloc.h>
42
42
  #include <grpc/support/useful.h>
43
43
 
44
- #include "src/core/iomgr/pollset_set.h"
44
+ #include "src/core/iomgr/pollset_posix.h"
45
+ #include "src/core/iomgr/pollset_set_posix.h"
45
46
 
46
- void grpc_pollset_set_init(grpc_pollset_set *pollset_set) {
47
+ struct grpc_pollset_set {
48
+ gpr_mu mu;
49
+
50
+ size_t pollset_count;
51
+ size_t pollset_capacity;
52
+ grpc_pollset **pollsets;
53
+
54
+ size_t pollset_set_count;
55
+ size_t pollset_set_capacity;
56
+ struct grpc_pollset_set **pollset_sets;
57
+
58
+ size_t fd_count;
59
+ size_t fd_capacity;
60
+ grpc_fd **fds;
61
+ };
62
+
63
+ grpc_pollset_set *grpc_pollset_set_create(void) {
64
+ grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set));
47
65
  memset(pollset_set, 0, sizeof(*pollset_set));
48
66
  gpr_mu_init(&pollset_set->mu);
67
+ return pollset_set;
49
68
  }
50
69
 
51
70
  void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {
@@ -57,6 +76,7 @@ void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {
57
76
  gpr_free(pollset_set->pollsets);
58
77
  gpr_free(pollset_set->pollset_sets);
59
78
  gpr_free(pollset_set->fds);
79
+ gpr_free(pollset_set);
60
80
  }
61
81
 
62
82
  void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  *
3
- * Copyright 2015, Google Inc.
3
+ * Copyright 2015-2016, Google Inc.
4
4
  * All rights reserved.
5
5
  *
6
6
  * Redistribution and use in source and binary forms, with or without
@@ -35,23 +35,7 @@
35
35
  #define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_POSIX_H
36
36
 
37
37
  #include "src/core/iomgr/fd_posix.h"
38
- #include "src/core/iomgr/pollset_posix.h"
39
-
40
- typedef struct grpc_pollset_set {
41
- gpr_mu mu;
42
-
43
- size_t pollset_count;
44
- size_t pollset_capacity;
45
- grpc_pollset **pollsets;
46
-
47
- size_t pollset_set_count;
48
- size_t pollset_set_capacity;
49
- struct grpc_pollset_set **pollset_sets;
50
-
51
- size_t fd_count;
52
- size_t fd_capacity;
53
- grpc_fd **fds;
54
- } grpc_pollset_set;
38
+ #include "src/core/iomgr/pollset_set.h"
55
39
 
56
40
  void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
57
41
  grpc_pollset_set *pollset_set, grpc_fd *fd);
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  *
3
- * Copyright 2015, Google Inc.
3
+ * Copyright 2015-2016, Google Inc.
4
4
  * All rights reserved.
5
5
  *
6
6
  * Redistribution and use in source and binary forms, with or without
@@ -35,9 +35,9 @@
35
35
 
36
36
  #ifdef GPR_WINSOCK_SOCKET
37
37
 
38
- #include "src/core/iomgr/pollset_set.h"
38
+ #include "src/core/iomgr/pollset_set_windows.h"
39
39
 
40
- void grpc_pollset_set_init(grpc_pollset_set* pollset_set) {}
40
+ grpc_pollset_set* grpc_pollset_set_create(void) { return NULL; }
41
41
 
42
42
  void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
43
43
 
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  *
3
- * Copyright 2015, Google Inc.
3
+ * Copyright 2015-2016, Google Inc.
4
4
  * All rights reserved.
5
5
  *
6
6
  * Redistribution and use in source and binary forms, with or without
@@ -34,6 +34,6 @@
34
34
  #ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_WINDOWS_H
35
35
  #define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_WINDOWS_H
36
36
 
37
- typedef struct grpc_pollset_set { void *unused; } grpc_pollset_set;
37
+ #include "src/core/iomgr/pollset_set.h"
38
38
 
39
39
  #endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */
@@ -38,7 +38,6 @@
38
38
  #include <grpc/support/log.h>
39
39
  #include <grpc/support/thd.h>
40
40
 
41
- #include "src/core/iomgr/timer_internal.h"
42
41
  #include "src/core/iomgr/iomgr_internal.h"
43
42
  #include "src/core/iomgr/iocp_windows.h"
44
43
  #include "src/core/iomgr/pollset.h"
@@ -90,12 +89,15 @@ static void push_front_worker(grpc_pollset_worker *root,
90
89
  worker->links[type].next->links[type].prev = worker;
91
90
  }
92
91
 
92
+ size_t grpc_pollset_size(void) { return sizeof(grpc_pollset); }
93
+
93
94
  /* There isn't really any such thing as a pollset under Windows, due to the
94
95
  nature of the IO completion ports. We're still going to provide a minimal
95
96
  set of features for the sake of the rest of grpc. But grpc_pollset_work
96
97
  won't actually do any polling, and return as quickly as possible. */
97
98
 
98
- void grpc_pollset_init(grpc_pollset *pollset) {
99
+ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
100
+ *mu = &grpc_polling_mu;
99
101
  memset(pollset, 0, sizeof(*pollset));
100
102
  pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
101
103
  pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
@@ -126,25 +128,25 @@ void grpc_pollset_reset(grpc_pollset *pollset) {
126
128
  }
127
129
 
128
130
  void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
129
- grpc_pollset_worker *worker, gpr_timespec now,
131
+ grpc_pollset_worker **worker_hdl, gpr_timespec now,
130
132
  gpr_timespec deadline) {
133
+ grpc_pollset_worker worker;
134
+ *worker_hdl = &worker;
135
+
131
136
  int added_worker = 0;
132
- worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
133
- worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
134
- worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next =
135
- worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].prev = NULL;
136
- worker->kicked = 0;
137
- worker->pollset = pollset;
138
- gpr_cv_init(&worker->cv);
139
- if (grpc_timer_check(exec_ctx, now, &deadline)) {
140
- goto done;
141
- }
137
+ worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
138
+ worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
139
+ worker.links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next =
140
+ worker.links[GRPC_POLLSET_WORKER_LINK_GLOBAL].prev = NULL;
141
+ worker.kicked = 0;
142
+ worker.pollset = pollset;
143
+ gpr_cv_init(&worker.cv);
142
144
  if (!pollset->kicked_without_pollers && !pollset->shutting_down) {
143
145
  if (g_active_poller == NULL) {
144
146
  grpc_pollset_worker *next_worker;
145
147
  /* become poller */
146
148
  pollset->is_iocp_worker = 1;
147
- g_active_poller = worker;
149
+ g_active_poller = &worker;
148
150
  gpr_mu_unlock(&grpc_polling_mu);
149
151
  grpc_iocp_work(exec_ctx, deadline);
150
152
  grpc_exec_ctx_flush(exec_ctx);
@@ -171,12 +173,12 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
171
173
  goto done;
172
174
  }
173
175
  push_front_worker(&g_global_root_worker, GRPC_POLLSET_WORKER_LINK_GLOBAL,
174
- worker);
176
+ &worker);
175
177
  push_front_worker(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET,
176
- worker);
178
+ &worker);
177
179
  added_worker = 1;
178
- while (!worker->kicked) {
179
- if (gpr_cv_wait(&worker->cv, &grpc_polling_mu, deadline)) {
180
+ while (!worker.kicked) {
181
+ if (gpr_cv_wait(&worker.cv, &grpc_polling_mu, deadline)) {
180
182
  break;
181
183
  }
182
184
  }
@@ -190,10 +192,11 @@ done:
190
192
  gpr_mu_lock(&grpc_polling_mu);
191
193
  }
192
194
  if (added_worker) {
193
- remove_worker(worker, GRPC_POLLSET_WORKER_LINK_GLOBAL);
194
- remove_worker(worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
195
+ remove_worker(&worker, GRPC_POLLSET_WORKER_LINK_GLOBAL);
196
+ remove_worker(&worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
195
197
  }
196
- gpr_cv_destroy(&worker->cv);
198
+ gpr_cv_destroy(&worker.cv);
199
+ *worker_hdl = NULL;
197
200
  }
198
201
 
199
202
  void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {