grpc 1.6.7 → 1.7.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (277) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +579 -77
  3. data/include/grpc/byte_buffer.h +1 -63
  4. data/include/grpc/compression.h +27 -5
  5. data/include/grpc/fork.h +24 -0
  6. data/include/grpc/grpc.h +12 -6
  7. data/include/grpc/grpc_security.h +28 -7
  8. data/include/grpc/impl/codegen/atm.h +1 -0
  9. data/include/grpc/impl/codegen/byte_buffer.h +86 -0
  10. data/include/grpc/impl/codegen/compression_types.h +63 -5
  11. data/include/grpc/impl/codegen/fork.h +48 -0
  12. data/include/grpc/impl/codegen/grpc_types.h +26 -9
  13. data/include/grpc/impl/codegen/port_platform.h +11 -4
  14. data/include/grpc/impl/codegen/slice.h +6 -1
  15. data/include/grpc/impl/codegen/sync.h +3 -1
  16. data/include/grpc/impl/codegen/sync_custom.h +36 -0
  17. data/include/grpc/module.modulemap +75 -3
  18. data/include/grpc/slice.h +1 -5
  19. data/include/grpc/support/sync_custom.h +24 -0
  20. data/src/core/ext/census/base_resources.c +14 -14
  21. data/src/core/ext/census/context.c +7 -5
  22. data/src/core/ext/census/grpc_filter.c +12 -14
  23. data/src/core/ext/census/mlog.c +2 -1
  24. data/src/core/ext/census/resource.c +13 -9
  25. data/src/core/ext/filters/client_channel/channel_connectivity.c +15 -8
  26. data/src/core/ext/filters/client_channel/client_channel.c +418 -439
  27. data/src/core/ext/filters/client_channel/client_channel_factory.c +4 -5
  28. data/src/core/ext/filters/client_channel/client_channel_plugin.c +2 -2
  29. data/src/core/ext/filters/client_channel/http_connect_handshaker.c +7 -5
  30. data/src/core/ext/filters/client_channel/http_proxy.c +17 -21
  31. data/src/core/ext/filters/client_channel/lb_policy.c +1 -1
  32. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +7 -7
  33. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +371 -257
  34. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c +7 -5
  35. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c +25 -14
  36. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c +16 -16
  37. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +33 -28
  38. data/src/core/ext/filters/client_channel/lb_policy_factory.c +10 -8
  39. data/src/core/ext/filters/client_channel/lb_policy_factory.h +1 -1
  40. data/src/core/ext/filters/client_channel/proxy_mapper_registry.c +1 -1
  41. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c +7 -6
  42. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c +62 -28
  43. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c +29 -23
  44. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c +25 -14
  45. data/src/core/ext/filters/client_channel/retry_throttle.c +9 -6
  46. data/src/core/ext/filters/client_channel/subchannel.c +30 -30
  47. data/src/core/ext/filters/client_channel/subchannel.h +1 -4
  48. data/src/core/ext/filters/client_channel/subchannel_index.c +31 -15
  49. data/src/core/ext/filters/client_channel/subchannel_index.h +7 -0
  50. data/src/core/ext/filters/client_channel/uri_parser.c +4 -3
  51. data/src/core/ext/filters/deadline/deadline_filter.c +78 -39
  52. data/src/core/ext/filters/deadline/deadline_filter.h +7 -1
  53. data/src/core/ext/filters/http/client/http_client_filter.c +14 -14
  54. data/src/core/ext/filters/http/http_filters_plugin.c +1 -1
  55. data/src/core/ext/filters/http/message_compress/message_compress_filter.c +240 -175
  56. data/src/core/ext/filters/http/server/http_server_filter.c +48 -36
  57. data/src/core/ext/filters/load_reporting/{load_reporting_filter.c → server_load_reporting_filter.c} +11 -12
  58. data/src/core/ext/filters/load_reporting/{load_reporting_filter.h → server_load_reporting_filter.h} +6 -5
  59. data/src/core/ext/filters/load_reporting/{load_reporting.c → server_load_reporting_plugin.c} +19 -13
  60. data/src/core/ext/filters/load_reporting/{load_reporting.h → server_load_reporting_plugin.h} +4 -3
  61. data/src/core/ext/filters/max_age/max_age_filter.c +2 -3
  62. data/src/core/ext/filters/message_size/message_size_filter.c +4 -2
  63. data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c +0 -1
  64. data/src/core/ext/transport/chttp2/client/chttp2_connector.c +5 -5
  65. data/src/core/ext/transport/chttp2/client/insecure/channel_create.c +1 -1
  66. data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c +1 -1
  67. data/src/core/ext/transport/chttp2/server/chttp2_server.c +20 -18
  68. data/src/core/ext/transport/chttp2/transport/chttp2_plugin.c +1 -0
  69. data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +493 -210
  70. data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +1 -0
  71. data/src/core/ext/transport/chttp2/transport/flow_control.c +9 -8
  72. data/src/core/ext/transport/chttp2/transport/frame_data.c +2 -2
  73. data/src/core/ext/transport/chttp2/transport/frame_goaway.c +2 -2
  74. data/src/core/ext/transport/chttp2/transport/frame_ping.c +5 -4
  75. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.c +1 -1
  76. data/src/core/ext/transport/chttp2/transport/frame_settings.c +10 -9
  77. data/src/core/ext/transport/chttp2/transport/frame_window_update.c +9 -5
  78. data/src/core/ext/transport/chttp2/transport/hpack_encoder.c +62 -41
  79. data/src/core/ext/transport/chttp2/transport/hpack_parser.c +52 -8
  80. data/src/core/ext/transport/chttp2/transport/hpack_table.c +2 -2
  81. data/src/core/ext/transport/chttp2/transport/incoming_metadata.c +3 -2
  82. data/src/core/ext/transport/chttp2/transport/internal.h +60 -30
  83. data/src/core/ext/transport/chttp2/transport/parsing.c +16 -5
  84. data/src/core/ext/transport/chttp2/transport/stream_lists.c +36 -16
  85. data/src/core/ext/transport/chttp2/transport/stream_map.c +6 -4
  86. data/src/core/ext/transport/chttp2/transport/writing.c +133 -105
  87. data/src/core/ext/transport/inproc/inproc_transport.c +61 -65
  88. data/src/core/lib/channel/channel_args.c +112 -12
  89. data/src/core/lib/channel/channel_args.h +31 -0
  90. data/src/core/lib/channel/channel_stack.c +1 -15
  91. data/src/core/lib/channel/channel_stack.h +3 -10
  92. data/src/core/lib/channel/channel_stack_builder.c +41 -10
  93. data/src/core/lib/channel/channel_stack_builder.h +10 -0
  94. data/src/core/lib/channel/connected_channel.c +94 -23
  95. data/src/core/lib/channel/handshaker.c +8 -6
  96. data/src/core/lib/channel/handshaker_registry.c +1 -1
  97. data/src/core/lib/compression/algorithm_metadata.h +14 -0
  98. data/src/core/lib/compression/compression.c +101 -1
  99. data/src/core/lib/compression/stream_compression.c +32 -146
  100. data/src/core/lib/compression/stream_compression.h +28 -4
  101. data/src/core/lib/compression/stream_compression_gzip.c +228 -0
  102. data/src/core/lib/{iomgr/ev_epoll_thread_pool_linux.h → compression/stream_compression_gzip.h} +5 -7
  103. data/src/core/lib/compression/stream_compression_identity.c +94 -0
  104. data/src/core/lib/{iomgr/ev_epoll_limited_pollers_linux.h → compression/stream_compression_identity.h} +7 -8
  105. data/src/core/lib/debug/stats.c +174 -0
  106. data/src/core/lib/debug/stats.h +61 -0
  107. data/src/core/lib/debug/stats_data.c +687 -0
  108. data/src/core/lib/debug/stats_data.h +470 -0
  109. data/src/core/lib/debug/trace.c +3 -3
  110. data/src/core/lib/debug/trace.h +1 -1
  111. data/src/core/lib/http/format_request.c +1 -1
  112. data/src/core/lib/http/httpcli.c +8 -7
  113. data/src/core/lib/http/httpcli_security_connector.c +2 -1
  114. data/src/core/lib/http/parser.c +4 -3
  115. data/src/core/lib/iomgr/call_combiner.c +202 -0
  116. data/src/core/lib/iomgr/call_combiner.h +121 -0
  117. data/src/core/lib/iomgr/closure.c +18 -4
  118. data/src/core/lib/iomgr/combiner.c +11 -4
  119. data/src/core/lib/iomgr/error.c +26 -24
  120. data/src/core/lib/iomgr/ev_epoll1_linux.c +395 -212
  121. data/src/core/lib/iomgr/ev_epollex_linux.c +141 -128
  122. data/src/core/lib/iomgr/ev_epollsig_linux.c +44 -41
  123. data/src/core/lib/iomgr/ev_poll_posix.c +99 -75
  124. data/src/core/lib/iomgr/ev_posix.c +5 -9
  125. data/src/core/lib/iomgr/ev_posix.h +1 -1
  126. data/src/core/lib/iomgr/exec_ctx.h +6 -1
  127. data/src/core/lib/iomgr/executor.c +142 -36
  128. data/src/core/lib/iomgr/executor.h +6 -1
  129. data/src/core/lib/iomgr/fork_posix.c +88 -0
  130. data/src/core/lib/iomgr/fork_windows.c +39 -0
  131. data/src/core/lib/iomgr/iocp_windows.c +2 -0
  132. data/src/core/lib/iomgr/iomgr.c +2 -8
  133. data/src/core/lib/iomgr/is_epollexclusive_available.c +6 -6
  134. data/src/core/lib/iomgr/load_file.c +2 -1
  135. data/src/core/lib/iomgr/polling_entity.c +9 -9
  136. data/src/core/lib/iomgr/polling_entity.h +7 -1
  137. data/src/core/lib/iomgr/pollset.h +1 -1
  138. data/src/core/lib/iomgr/pollset_uv.c +1 -1
  139. data/src/core/lib/iomgr/pollset_windows.c +3 -3
  140. data/src/core/lib/iomgr/port.h +4 -0
  141. data/src/core/lib/iomgr/resolve_address_posix.c +8 -7
  142. data/src/core/lib/iomgr/resolve_address_windows.c +1 -1
  143. data/src/core/lib/iomgr/resource_quota.c +24 -19
  144. data/src/core/lib/iomgr/socket_factory_posix.c +4 -4
  145. data/src/core/lib/iomgr/socket_mutator.c +4 -4
  146. data/src/core/lib/iomgr/socket_utils_windows.c +0 -4
  147. data/src/core/lib/iomgr/tcp_client_posix.c +5 -4
  148. data/src/core/lib/iomgr/tcp_posix.c +181 -20
  149. data/src/core/lib/iomgr/tcp_server_posix.c +8 -7
  150. data/src/core/lib/iomgr/tcp_server_utils_posix_common.c +1 -1
  151. data/src/core/lib/iomgr/timer.h +4 -0
  152. data/src/core/lib/iomgr/timer_generic.c +138 -3
  153. data/src/core/lib/iomgr/timer_generic.h +3 -0
  154. data/src/core/lib/iomgr/timer_heap.c +4 -4
  155. data/src/core/lib/iomgr/timer_manager.c +2 -2
  156. data/src/core/lib/iomgr/timer_uv.c +2 -0
  157. data/src/core/lib/iomgr/udp_server.c +10 -8
  158. data/src/core/lib/iomgr/unix_sockets_posix.c +4 -2
  159. data/src/core/lib/iomgr/wakeup_fd_cv.c +9 -8
  160. data/src/core/lib/iomgr/wakeup_fd_cv.h +2 -2
  161. data/src/core/lib/json/json.c +1 -1
  162. data/src/core/lib/json/json_string.c +13 -13
  163. data/src/core/lib/profiling/timers.h +18 -8
  164. data/src/core/lib/security/credentials/composite/composite_credentials.c +4 -10
  165. data/src/core/lib/security/credentials/google_default/google_default_credentials.c +2 -1
  166. data/src/core/lib/security/credentials/jwt/jwt_verifier.c +11 -6
  167. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +4 -4
  168. data/src/core/lib/security/credentials/plugin/plugin_credentials.c +132 -50
  169. data/src/core/lib/security/credentials/plugin/plugin_credentials.h +2 -0
  170. data/src/core/lib/security/transport/client_auth_filter.c +68 -135
  171. data/src/core/lib/security/transport/secure_endpoint.c +110 -90
  172. data/src/core/lib/security/transport/secure_endpoint.h +8 -3
  173. data/src/core/lib/security/transport/security_connector.c +10 -12
  174. data/src/core/lib/security/transport/security_handshaker.c +45 -24
  175. data/src/core/lib/security/transport/server_auth_filter.c +71 -20
  176. data/src/core/lib/slice/b64.c +2 -2
  177. data/src/core/lib/slice/slice.c +16 -14
  178. data/src/core/lib/slice/slice_buffer.c +5 -4
  179. data/src/core/lib/slice/slice_hash_table.c +3 -2
  180. data/src/core/lib/slice/slice_intern.c +8 -5
  181. data/src/core/lib/support/block_annotate.h +22 -0
  182. data/src/core/lib/support/fork.c +62 -0
  183. data/src/core/lib/support/fork.h +35 -0
  184. data/src/core/lib/support/log_linux.c +1 -1
  185. data/src/core/lib/support/string.c +15 -1
  186. data/src/core/lib/support/string.h +3 -0
  187. data/src/core/lib/support/thd_internal.h +6 -0
  188. data/src/core/lib/support/thd_posix.c +56 -0
  189. data/src/core/lib/support/thd_windows.c +2 -0
  190. data/src/core/lib/surface/alarm.c +22 -15
  191. data/src/core/lib/surface/byte_buffer.c +4 -2
  192. data/src/core/lib/surface/call.c +442 -141
  193. data/src/core/lib/surface/call.h +6 -6
  194. data/src/core/lib/surface/call_log_batch.c +1 -1
  195. data/src/core/lib/surface/call_test_only.h +12 -0
  196. data/src/core/lib/surface/channel.c +39 -4
  197. data/src/core/lib/surface/channel_init.c +6 -6
  198. data/src/core/lib/surface/channel_ping.c +2 -2
  199. data/src/core/lib/surface/completion_queue.c +56 -57
  200. data/src/core/lib/surface/init.c +17 -3
  201. data/src/core/lib/surface/init_secure.c +5 -1
  202. data/src/core/lib/surface/lame_client.cc +9 -10
  203. data/src/core/lib/surface/server.c +81 -72
  204. data/src/core/lib/surface/version.c +2 -2
  205. data/src/core/lib/transport/byte_stream.c +1 -0
  206. data/src/core/lib/transport/byte_stream.h +3 -1
  207. data/src/core/lib/transport/connectivity_state.c +2 -1
  208. data/src/core/lib/transport/metadata.c +7 -4
  209. data/src/core/lib/transport/metadata_batch.c +18 -16
  210. data/src/core/lib/transport/metadata_batch.h +1 -0
  211. data/src/core/lib/transport/service_config.c +5 -3
  212. data/src/core/lib/transport/static_metadata.c +395 -614
  213. data/src/core/lib/transport/static_metadata.h +165 -133
  214. data/src/core/lib/transport/status_conversion.c +1 -1
  215. data/src/core/lib/transport/transport.c +20 -20
  216. data/src/core/lib/transport/transport.h +8 -5
  217. data/src/core/lib/transport/transport_impl.h +0 -3
  218. data/src/core/lib/transport/transport_op_string.c +8 -1
  219. data/src/core/plugin_registry/grpc_plugin_registry.c +4 -4
  220. data/src/core/tsi/fake_transport_security.c +133 -2
  221. data/src/core/tsi/fake_transport_security.h +5 -0
  222. data/src/core/tsi/ssl_transport_security.c +105 -8
  223. data/src/core/tsi/ssl_transport_security.h +30 -7
  224. data/src/core/tsi/transport_security.h +8 -2
  225. data/src/core/tsi/transport_security_grpc.c +20 -13
  226. data/src/core/tsi/transport_security_grpc.h +13 -9
  227. data/src/ruby/ext/grpc/rb_call_credentials.c +6 -2
  228. data/src/ruby/ext/grpc/rb_grpc.c +1 -1
  229. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +30 -20
  230. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +50 -35
  231. data/src/ruby/lib/grpc.rb +1 -0
  232. data/src/ruby/lib/grpc/generic/active_call.rb +34 -9
  233. data/src/ruby/lib/grpc/generic/bidi_call.rb +19 -10
  234. data/src/ruby/lib/grpc/generic/client_stub.rb +95 -38
  235. data/src/ruby/lib/grpc/generic/interceptor_registry.rb +53 -0
  236. data/src/ruby/lib/grpc/generic/interceptors.rb +186 -0
  237. data/src/ruby/lib/grpc/generic/rpc_desc.rb +66 -20
  238. data/src/ruby/lib/grpc/generic/rpc_server.rb +15 -3
  239. data/src/ruby/lib/grpc/google_rpc_status_utils.rb +1 -2
  240. data/src/ruby/lib/grpc/version.rb +1 -1
  241. data/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb +1 -0
  242. data/src/ruby/spec/channel_connection_spec.rb +1 -34
  243. data/src/ruby/spec/client_server_spec.rb +188 -82
  244. data/src/ruby/spec/generic/active_call_spec.rb +65 -11
  245. data/src/ruby/spec/generic/client_interceptors_spec.rb +153 -0
  246. data/src/ruby/spec/generic/interceptor_registry_spec.rb +65 -0
  247. data/src/ruby/spec/generic/rpc_desc_spec.rb +38 -0
  248. data/src/ruby/spec/generic/rpc_server_spec.rb +1 -34
  249. data/src/ruby/spec/generic/server_interceptors_spec.rb +218 -0
  250. data/src/ruby/spec/spec_helper.rb +4 -0
  251. data/src/ruby/spec/support/helpers.rb +73 -0
  252. data/src/ruby/spec/support/services.rb +147 -0
  253. data/third_party/cares/ares_build.h +21 -62
  254. data/third_party/cares/cares/ares.h +23 -1
  255. data/third_party/cares/cares/ares__close_sockets.c +2 -2
  256. data/third_party/cares/cares/ares_create_query.c +3 -3
  257. data/third_party/cares/cares/ares_expand_name.c +6 -2
  258. data/third_party/cares/cares/ares_expand_string.c +1 -1
  259. data/third_party/cares/cares/ares_getnameinfo.c +27 -7
  260. data/third_party/cares/cares/ares_init.c +407 -39
  261. data/third_party/cares/cares/ares_library_init.c +10 -0
  262. data/third_party/cares/cares/ares_library_init.h +2 -1
  263. data/third_party/cares/cares/ares_nowarn.c +6 -6
  264. data/third_party/cares/cares/ares_nowarn.h +2 -2
  265. data/third_party/cares/cares/ares_parse_naptr_reply.c +6 -1
  266. data/third_party/cares/cares/ares_private.h +11 -0
  267. data/third_party/cares/cares/ares_process.c +126 -37
  268. data/third_party/cares/cares/ares_version.h +2 -2
  269. data/third_party/cares/cares/ares_writev.c +2 -2
  270. data/third_party/cares/cares/config-win32.h +8 -34
  271. data/third_party/cares/cares/inet_net_pton.c +2 -2
  272. data/third_party/cares/cares/setup_once.h +5 -5
  273. data/third_party/cares/config_darwin/ares_config.h +98 -196
  274. data/third_party/cares/config_linux/ares_config.h +103 -203
  275. metadata +47 -20
  276. data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +0 -1957
  277. data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +0 -1182
@@ -93,7 +93,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
93
93
  gpr_mu_lock(&s->mu);
94
94
  s->nports++;
95
95
  GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
96
- sp = gpr_malloc(sizeof(grpc_tcp_listener));
96
+ sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
97
97
  sp->next = NULL;
98
98
  if (s->head == NULL) {
99
99
  s->head = sp;
@@ -44,6 +44,10 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
44
44
  gpr_timespec deadline, grpc_closure *closure,
45
45
  gpr_timespec now);
46
46
 
47
+ /* Initialize *timer without setting it. This can later be passed through
48
+ the regular init or cancel */
49
+ void grpc_timer_init_unset(grpc_timer *timer);
50
+
47
51
  /* Note that there is no timer destroy function. This is because the
48
52
  timer is a one-time occurrence with a guarantee that the callback will
49
53
  be called exactly once, either at expiration or cancellation. Thus, all
@@ -79,6 +79,125 @@ static timer_shard g_shards[NUM_SHARDS];
79
79
  * Access to this is protected by g_shared_mutables.mu */
80
80
  static timer_shard *g_shard_queue[NUM_SHARDS];
81
81
 
82
+ #ifndef NDEBUG
83
+
84
+ /* == Hash table for duplicate timer detection == */
85
+
86
+ #define NUM_HASH_BUCKETS 1009 /* Prime number close to 1000 */
87
+
88
+ static gpr_mu g_hash_mu[NUM_HASH_BUCKETS]; /* One mutex per bucket */
89
+ static grpc_timer *g_timer_ht[NUM_HASH_BUCKETS] = {NULL};
90
+
91
+ static void init_timer_ht() {
92
+ for (int i = 0; i < NUM_HASH_BUCKETS; i++) {
93
+ gpr_mu_init(&g_hash_mu[i]);
94
+ }
95
+ }
96
+
97
+ static bool is_in_ht(grpc_timer *t) {
98
+ size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
99
+
100
+ gpr_mu_lock(&g_hash_mu[i]);
101
+ grpc_timer *p = g_timer_ht[i];
102
+ while (p != NULL && p != t) {
103
+ p = p->hash_table_next;
104
+ }
105
+ gpr_mu_unlock(&g_hash_mu[i]);
106
+
107
+ return (p == t);
108
+ }
109
+
110
+ static void add_to_ht(grpc_timer *t) {
111
+ GPR_ASSERT(!t->hash_table_next);
112
+ size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
113
+
114
+ gpr_mu_lock(&g_hash_mu[i]);
115
+ grpc_timer *p = g_timer_ht[i];
116
+ while (p != NULL && p != t) {
117
+ p = p->hash_table_next;
118
+ }
119
+
120
+ if (p == t) {
121
+ grpc_closure *c = t->closure;
122
+ gpr_log(GPR_ERROR,
123
+ "** Duplicate timer (%p) being added. Closure: (%p), created at: "
124
+ "(%s:%d), scheduled at: (%s:%d) **",
125
+ t, c, c->file_created, c->line_created, c->file_initiated,
126
+ c->line_initiated);
127
+ abort();
128
+ }
129
+
130
+ /* Timer not present in the bucket. Insert at head of the list */
131
+ t->hash_table_next = g_timer_ht[i];
132
+ g_timer_ht[i] = t;
133
+ gpr_mu_unlock(&g_hash_mu[i]);
134
+ }
135
+
136
+ static void remove_from_ht(grpc_timer *t) {
137
+ size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
138
+ bool removed = false;
139
+
140
+ gpr_mu_lock(&g_hash_mu[i]);
141
+ if (g_timer_ht[i] == t) {
142
+ g_timer_ht[i] = g_timer_ht[i]->hash_table_next;
143
+ removed = true;
144
+ } else if (g_timer_ht[i] != NULL) {
145
+ grpc_timer *p = g_timer_ht[i];
146
+ while (p->hash_table_next != NULL && p->hash_table_next != t) {
147
+ p = p->hash_table_next;
148
+ }
149
+
150
+ if (p->hash_table_next == t) {
151
+ p->hash_table_next = t->hash_table_next;
152
+ removed = true;
153
+ }
154
+ }
155
+ gpr_mu_unlock(&g_hash_mu[i]);
156
+
157
+ if (!removed) {
158
+ grpc_closure *c = t->closure;
159
+ gpr_log(GPR_ERROR,
160
+ "** Removing timer (%p) that is not added to hash table. Closure "
161
+ "(%p), created at: (%s:%d), scheduled at: (%s:%d) **",
162
+ t, c, c->file_created, c->line_created, c->file_initiated,
163
+ c->line_initiated);
164
+ abort();
165
+ }
166
+
167
+ t->hash_table_next = NULL;
168
+ }
169
+
170
+ /* If a timer is added to a timer shard (either heap or a list), it cannot
171
+ * be pending. A timer is added to hash table only-if it is added to the
172
+ * timer shard.
173
+ * Therefore, if timer->pending is false, it cannot be in hash table */
174
+ static void validate_non_pending_timer(grpc_timer *t) {
175
+ if (!t->pending && is_in_ht(t)) {
176
+ grpc_closure *c = t->closure;
177
+ gpr_log(GPR_ERROR,
178
+ "** gpr_timer_cancel() called on a non-pending timer (%p) which "
179
+ "is in the hash table. Closure: (%p), created at: (%s:%d), "
180
+ "scheduled at: (%s:%d) **",
181
+ t, c, c->file_created, c->line_created, c->file_initiated,
182
+ c->line_initiated);
183
+ abort();
184
+ }
185
+ }
186
+
187
+ #define INIT_TIMER_HASH_TABLE() init_timer_ht()
188
+ #define ADD_TO_HASH_TABLE(t) add_to_ht((t))
189
+ #define REMOVE_FROM_HASH_TABLE(t) remove_from_ht((t))
190
+ #define VALIDATE_NON_PENDING_TIMER(t) validate_non_pending_timer((t))
191
+
192
+ #else
193
+
194
+ #define INIT_TIMER_HASH_TABLE()
195
+ #define ADD_TO_HASH_TABLE(t)
196
+ #define REMOVE_FROM_HASH_TABLE(t)
197
+ #define VALIDATE_NON_PENDING_TIMER(t)
198
+
199
+ #endif
200
+
82
201
  /* Thread local variable that stores the deadline of the next timer the thread
83
202
  * has last-seen. This is an optimization to prevent the thread from checking
84
203
  * shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock,
@@ -95,9 +214,7 @@ struct shared_mutables {
95
214
  gpr_mu mu;
96
215
  } GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE);
97
216
 
98
- static struct shared_mutables g_shared_mutables = {
99
- .checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER, .initialized = false,
100
- };
217
+ static struct shared_mutables g_shared_mutables;
101
218
 
102
219
  static gpr_clock_type g_clock_type;
103
220
  static gpr_timespec g_start_time;
@@ -155,6 +272,7 @@ void grpc_timer_list_init(gpr_timespec now) {
155
272
  uint32_t i;
156
273
 
157
274
  g_shared_mutables.initialized = true;
275
+ g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
158
276
  gpr_mu_init(&g_shared_mutables.mu);
159
277
  g_clock_type = now.clock_type;
160
278
  g_start_time = now;
@@ -176,6 +294,8 @@ void grpc_timer_list_init(gpr_timespec now) {
176
294
  shard->min_deadline = compute_min_deadline(shard);
177
295
  g_shard_queue[i] = shard;
178
296
  }
297
+
298
+ INIT_TIMER_HASH_TABLE();
179
299
  }
180
300
 
181
301
  void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
@@ -234,6 +354,8 @@ static void note_deadline_change(timer_shard *shard) {
234
354
  }
235
355
  }
236
356
 
357
+ void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = false; }
358
+
237
359
  void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
238
360
  gpr_timespec deadline, grpc_closure *closure,
239
361
  gpr_timespec now) {
@@ -244,6 +366,10 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
244
366
  timer->closure = closure;
245
367
  gpr_atm deadline_atm = timer->deadline = timespec_to_atm_round_up(deadline);
246
368
 
369
+ #ifndef NDEBUG
370
+ timer->hash_table_next = NULL;
371
+ #endif
372
+
247
373
  if (GRPC_TRACER_ON(grpc_timer_trace)) {
248
374
  gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR
249
375
  "] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]",
@@ -271,6 +397,9 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
271
397
 
272
398
  grpc_time_averaged_stats_add_sample(&shard->stats,
273
399
  ts_to_dbl(gpr_time_sub(deadline, now)));
400
+
401
+ ADD_TO_HASH_TABLE(timer);
402
+
274
403
  if (deadline_atm < shard->queue_deadline_cap) {
275
404
  is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
276
405
  } else {
@@ -332,7 +461,10 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
332
461
  gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
333
462
  timer->pending ? "true" : "false");
334
463
  }
464
+
335
465
  if (timer->pending) {
466
+ REMOVE_FROM_HASH_TABLE(timer);
467
+
336
468
  GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
337
469
  timer->pending = false;
338
470
  if (timer->heap_index == INVALID_HEAP_INDEX) {
@@ -340,6 +472,8 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
340
472
  } else {
341
473
  grpc_timer_heap_remove(&shard->heap, timer);
342
474
  }
475
+ } else {
476
+ VALIDATE_NON_PENDING_TIMER(timer);
343
477
  }
344
478
  gpr_mu_unlock(&shard->mu);
345
479
  }
@@ -423,6 +557,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard,
423
557
  grpc_timer *timer;
424
558
  gpr_mu_lock(&shard->mu);
425
559
  while ((timer = pop_one(shard, now))) {
560
+ REMOVE_FROM_HASH_TABLE(timer);
426
561
  GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
427
562
  n++;
428
563
  }
@@ -29,6 +29,9 @@ struct grpc_timer {
29
29
  struct grpc_timer *next;
30
30
  struct grpc_timer *prev;
31
31
  grpc_closure *closure;
32
+ #ifndef NDEBUG
33
+ struct grpc_timer *hash_table_next;
34
+ #endif
32
35
  };
33
36
 
34
37
  #endif /* GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H */
@@ -74,8 +74,8 @@ static void maybe_shrink(grpc_timer_heap *heap) {
74
74
  if (heap->timer_count >= 8 &&
75
75
  heap->timer_count <= heap->timer_capacity / SHRINK_FULLNESS_FACTOR / 2) {
76
76
  heap->timer_capacity = heap->timer_count * SHRINK_FULLNESS_FACTOR;
77
- heap->timers =
78
- gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
77
+ heap->timers = (grpc_timer **)gpr_realloc(
78
+ heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
79
79
  }
80
80
  }
81
81
 
@@ -99,8 +99,8 @@ int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer) {
99
99
  if (heap->timer_count == heap->timer_capacity) {
100
100
  heap->timer_capacity =
101
101
  GPR_MAX(heap->timer_capacity + 1, heap->timer_capacity * 3 / 2);
102
- heap->timers =
103
- gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
102
+ heap->timers = (grpc_timer **)gpr_realloc(
103
+ heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
104
104
  }
105
105
  timer->heap_index = heap->timer_count;
106
106
  adjust_upwards(heap->timers, heap->timer_count, timer);
@@ -83,7 +83,7 @@ static void start_timer_thread_and_unlock(void) {
83
83
  }
84
84
  gpr_thd_options opt = gpr_thd_options_default();
85
85
  gpr_thd_options_set_joinable(&opt);
86
- completed_thread *ct = gpr_malloc(sizeof(*ct));
86
+ completed_thread *ct = (completed_thread *)gpr_malloc(sizeof(*ct));
87
87
  // The call to gpr_thd_new() has to be under the same lock used by
88
88
  // gc_completed_threads(), particularly due to ct->t, which is written here
89
89
  // (internally by gpr_thd_new) and read there. Otherwise it's possible for ct
@@ -276,7 +276,7 @@ static void timer_thread(void *completed_thread_ptr) {
276
276
  GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
277
277
  timer_main_loop(&exec_ctx);
278
278
  grpc_exec_ctx_finish(&exec_ctx);
279
- timer_thread_cleanup(completed_thread_ptr);
279
+ timer_thread_cleanup((completed_thread *)completed_thread_ptr);
280
280
  }
281
281
 
282
282
  static void start_threads(void) {
@@ -77,6 +77,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
77
77
  uv_unref((uv_handle_t *)uv_timer);
78
78
  }
79
79
 
80
+ void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = 0; }
81
+
80
82
  void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
81
83
  GRPC_UV_ASSERT_SAME_THREAD();
82
84
  if (timer->pending) {
@@ -118,14 +118,14 @@ static grpc_socket_factory *get_socket_factory(const grpc_channel_args *args) {
118
118
  const grpc_arg *arg = grpc_channel_args_find(args, GRPC_ARG_SOCKET_FACTORY);
119
119
  if (arg) {
120
120
  GPR_ASSERT(arg->type == GRPC_ARG_POINTER);
121
- return arg->value.pointer.p;
121
+ return (grpc_socket_factory *)arg->value.pointer.p;
122
122
  }
123
123
  }
124
124
  return NULL;
125
125
  }
126
126
 
127
127
  grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args) {
128
- grpc_udp_server *s = gpr_malloc(sizeof(grpc_udp_server));
128
+ grpc_udp_server *s = (grpc_udp_server *)gpr_malloc(sizeof(grpc_udp_server));
129
129
  gpr_mu_init(&s->mu);
130
130
  s->socket_factory = get_socket_factory(args);
131
131
  if (s->socket_factory) {
@@ -176,7 +176,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
176
176
 
177
177
  static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
178
178
  grpc_error *error) {
179
- grpc_udp_server *s = server;
179
+ grpc_udp_server *s = (grpc_udp_server *)server;
180
180
  gpr_mu_lock(&s->mu);
181
181
  s->destroyed_ports++;
182
182
  if (s->destroyed_ports == s->nports) {
@@ -237,7 +237,8 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
237
237
  if (s->active_ports) {
238
238
  for (sp = s->head; sp; sp = sp->next) {
239
239
  GPR_ASSERT(sp->orphan_cb);
240
- struct shutdown_fd_args *args = gpr_malloc(sizeof(*args));
240
+ struct shutdown_fd_args *args =
241
+ (struct shutdown_fd_args *)gpr_malloc(sizeof(*args));
241
242
  args->fd = sp->emfd;
242
243
  args->server_mu = &s->mu;
243
244
  GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args,
@@ -331,7 +332,7 @@ error:
331
332
 
332
333
  /* event manager callback when reads are ready */
333
334
  static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
334
- grpc_udp_listener *sp = arg;
335
+ grpc_udp_listener *sp = (grpc_udp_listener *)arg;
335
336
 
336
337
  gpr_mu_lock(&sp->server->mu);
337
338
  if (error != GRPC_ERROR_NONE) {
@@ -354,7 +355,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
354
355
  }
355
356
 
356
357
  static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
357
- grpc_udp_listener *sp = arg;
358
+ grpc_udp_listener *sp = (grpc_udp_listener *)arg;
358
359
 
359
360
  gpr_mu_lock(&(sp->server->mu));
360
361
  if (error != GRPC_ERROR_NONE) {
@@ -393,7 +394,7 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
393
394
  gpr_free(addr_str);
394
395
  gpr_mu_lock(&s->mu);
395
396
  s->nports++;
396
- sp = gpr_malloc(sizeof(grpc_udp_listener));
397
+ sp = (grpc_udp_listener *)gpr_malloc(sizeof(grpc_udp_listener));
397
398
  sp->next = NULL;
398
399
  if (s->head == NULL) {
399
400
  s->head = sp;
@@ -444,7 +445,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
444
445
  (socklen_t *)&sockname_temp.len)) {
445
446
  port = grpc_sockaddr_get_port(&sockname_temp);
446
447
  if (port > 0) {
447
- allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
448
+ allocated_addr = (grpc_resolved_address *)gpr_malloc(
449
+ sizeof(grpc_resolved_address));
448
450
  memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
449
451
  grpc_sockaddr_set_port(allocated_addr, port);
450
452
  addr = allocated_addr;
@@ -49,9 +49,11 @@ grpc_error *grpc_resolve_unix_domain_address(const char *name,
49
49
  gpr_free(err_msg);
50
50
  return err;
51
51
  }
52
- *addrs = gpr_malloc(sizeof(grpc_resolved_addresses));
52
+ *addrs =
53
+ (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
53
54
  (*addrs)->naddrs = 1;
54
- (*addrs)->addrs = gpr_malloc(sizeof(grpc_resolved_address));
55
+ (*addrs)->addrs =
56
+ (grpc_resolved_address *)gpr_malloc(sizeof(grpc_resolved_address));
55
57
  un = (struct sockaddr_un *)(*addrs)->addrs->addr;
56
58
  un->sun_family = AF_UNIX;
57
59
  strcpy(un->sun_path, name);
@@ -42,7 +42,8 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
42
42
  gpr_mu_lock(&g_cvfds.mu);
43
43
  if (!g_cvfds.free_fds) {
44
44
  newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE);
45
- g_cvfds.cvfds = gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
45
+ g_cvfds.cvfds =
46
+ (fd_node*)gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
46
47
  for (i = g_cvfds.size; i < newsize; i++) {
47
48
  g_cvfds.cvfds[i].is_set = 0;
48
49
  g_cvfds.cvfds[i].cvs = NULL;
@@ -56,7 +57,7 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
56
57
  g_cvfds.free_fds = g_cvfds.free_fds->next_free;
57
58
  g_cvfds.cvfds[idx].cvs = NULL;
58
59
  g_cvfds.cvfds[idx].is_set = 0;
59
- fd_info->read_fd = IDX_TO_FD(idx);
60
+ fd_info->read_fd = GRPC_IDX_TO_FD(idx);
60
61
  fd_info->write_fd = -1;
61
62
  gpr_mu_unlock(&g_cvfds.mu);
62
63
  return GRPC_ERROR_NONE;
@@ -65,8 +66,8 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
65
66
  static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
66
67
  cv_node* cvn;
67
68
  gpr_mu_lock(&g_cvfds.mu);
68
- g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 1;
69
- cvn = g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs;
69
+ g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 1;
70
+ cvn = g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs;
70
71
  while (cvn) {
71
72
  gpr_cv_signal(cvn->cv);
72
73
  cvn = cvn->next;
@@ -77,7 +78,7 @@ static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
77
78
 
78
79
  static grpc_error* cv_fd_consume(grpc_wakeup_fd* fd_info) {
79
80
  gpr_mu_lock(&g_cvfds.mu);
80
- g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 0;
81
+ g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 0;
81
82
  gpr_mu_unlock(&g_cvfds.mu);
82
83
  return GRPC_ERROR_NONE;
83
84
  }
@@ -88,9 +89,9 @@ static void cv_fd_destroy(grpc_wakeup_fd* fd_info) {
88
89
  }
89
90
  gpr_mu_lock(&g_cvfds.mu);
90
91
  // Assert that there are no active pollers
91
- GPR_ASSERT(!g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs);
92
- g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
93
- g_cvfds.free_fds = &g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)];
92
+ GPR_ASSERT(!g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs);
93
+ g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
94
+ g_cvfds.free_fds = &g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)];
94
95
  gpr_mu_unlock(&g_cvfds.mu);
95
96
  }
96
97
 
@@ -37,8 +37,8 @@
37
37
 
38
38
  #include "src/core/lib/iomgr/ev_posix.h"
39
39
 
40
- #define FD_TO_IDX(fd) (-(fd)-1)
41
- #define IDX_TO_FD(idx) (-(idx)-1)
40
+ #define GRPC_FD_TO_IDX(fd) (-(fd)-1)
41
+ #define GRPC_IDX_TO_FD(idx) (-(idx)-1)
42
42
 
43
43
  typedef struct cv_node {
44
44
  gpr_cv* cv;
@@ -23,7 +23,7 @@
23
23
  #include "src/core/lib/json/json.h"
24
24
 
25
25
  grpc_json* grpc_json_create(grpc_json_type type) {
26
- grpc_json* json = gpr_zalloc(sizeof(*json));
26
+ grpc_json* json = (grpc_json*)gpr_zalloc(sizeof(*json));
27
27
  json->type = type;
28
28
 
29
29
  return json;
@@ -63,19 +63,19 @@ typedef struct {
63
63
  * bytes at a time (or multiples thereof).
64
64
  */
65
65
  static void json_writer_output_check(void *userdata, size_t needed) {
66
- json_writer_userdata *state = userdata;
66
+ json_writer_userdata *state = (json_writer_userdata *)userdata;
67
67
  if (state->free_space >= needed) return;
68
68
  needed -= state->free_space;
69
69
  /* Round up by 256 bytes. */
70
70
  needed = (needed + 0xff) & ~0xffU;
71
- state->output = gpr_realloc(state->output, state->allocated + needed);
71
+ state->output = (char *)gpr_realloc(state->output, state->allocated + needed);
72
72
  state->free_space += needed;
73
73
  state->allocated += needed;
74
74
  }
75
75
 
76
76
  /* These are needed by the writer's implementation. */
77
77
  static void json_writer_output_char(void *userdata, char c) {
78
- json_writer_userdata *state = userdata;
78
+ json_writer_userdata *state = (json_writer_userdata *)userdata;
79
79
  json_writer_output_check(userdata, 1);
80
80
  state->output[state->string_len++] = c;
81
81
  state->free_space--;
@@ -83,7 +83,7 @@ static void json_writer_output_char(void *userdata, char c) {
83
83
 
84
84
  static void json_writer_output_string_with_len(void *userdata, const char *str,
85
85
  size_t len) {
86
- json_writer_userdata *state = userdata;
86
+ json_writer_userdata *state = (json_writer_userdata *)userdata;
87
87
  json_writer_output_check(userdata, len);
88
88
  memcpy(state->output + state->string_len, str, len);
89
89
  state->string_len += len;
@@ -99,7 +99,7 @@ static void json_writer_output_string(void *userdata, const char *str) {
99
99
  * the end of the current string, and advance our output pointer.
100
100
  */
101
101
  static void json_reader_string_clear(void *userdata) {
102
- json_reader_userdata *state = userdata;
102
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
103
103
  if (state->string) {
104
104
  GPR_ASSERT(state->string_ptr < state->input);
105
105
  *state->string_ptr++ = 0;
@@ -108,7 +108,7 @@ static void json_reader_string_clear(void *userdata) {
108
108
  }
109
109
 
110
110
  static void json_reader_string_add_char(void *userdata, uint32_t c) {
111
- json_reader_userdata *state = userdata;
111
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
112
112
  GPR_ASSERT(state->string_ptr < state->input);
113
113
  GPR_ASSERT(c <= 0xff);
114
114
  *state->string_ptr++ = (uint8_t)c;
@@ -149,7 +149,7 @@ static void json_reader_string_add_utf32(void *userdata, uint32_t c) {
149
149
  */
150
150
  static uint32_t json_reader_read_char(void *userdata) {
151
151
  uint32_t r;
152
- json_reader_userdata *state = userdata;
152
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
153
153
 
154
154
  if (state->remaining_input == 0) return GRPC_JSON_READ_CHAR_EOF;
155
155
 
@@ -168,7 +168,7 @@ static uint32_t json_reader_read_char(void *userdata) {
168
168
  * our tree-in-progress inside our opaque structure.
169
169
  */
170
170
  static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
171
- json_reader_userdata *state = userdata;
171
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
172
172
  grpc_json *json = grpc_json_create(type);
173
173
 
174
174
  json->parent = state->current_container;
@@ -194,7 +194,7 @@ static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
194
194
  }
195
195
 
196
196
  static void json_reader_container_begins(void *userdata, grpc_json_type type) {
197
- json_reader_userdata *state = userdata;
197
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
198
198
  grpc_json *container;
199
199
 
200
200
  GPR_ASSERT(type == GRPC_JSON_ARRAY || type == GRPC_JSON_OBJECT);
@@ -215,7 +215,7 @@ static void json_reader_container_begins(void *userdata, grpc_json_type type) {
215
215
  */
216
216
  static grpc_json_type json_reader_container_ends(void *userdata) {
217
217
  grpc_json_type container_type = GRPC_JSON_TOP_LEVEL;
218
- json_reader_userdata *state = userdata;
218
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
219
219
 
220
220
  GPR_ASSERT(state->current_container);
221
221
 
@@ -236,18 +236,18 @@ static grpc_json_type json_reader_container_ends(void *userdata) {
236
236
  * We'll keep it as a string, and leave it to the caller to evaluate it.
237
237
  */
238
238
  static void json_reader_set_key(void *userdata) {
239
- json_reader_userdata *state = userdata;
239
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
240
240
  state->key = state->string;
241
241
  }
242
242
 
243
243
  static void json_reader_set_string(void *userdata) {
244
- json_reader_userdata *state = userdata;
244
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
245
245
  grpc_json *json = json_create_and_link(userdata, GRPC_JSON_STRING);
246
246
  json->value = (char *)state->string;
247
247
  }
248
248
 
249
249
  static int json_reader_set_number(void *userdata) {
250
- json_reader_userdata *state = userdata;
250
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
251
251
  grpc_json *json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
252
252
  json->value = (char *)state->string;
253
253
  return 1;