grpc 1.19.0 → 1.20.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (224) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +4131 -7903
  3. data/include/grpc/grpc.h +11 -6
  4. data/include/grpc/grpc_security.h +51 -9
  5. data/include/grpc/impl/codegen/byte_buffer.h +13 -0
  6. data/include/grpc/impl/codegen/grpc_types.h +4 -0
  7. data/include/grpc/impl/codegen/port_platform.h +37 -6
  8. data/include/grpc/impl/codegen/sync_posix.h +18 -0
  9. data/src/core/ext/filters/client_channel/client_channel.cc +560 -236
  10. data/src/core/ext/filters/client_channel/client_channel_channelz.h +2 -2
  11. data/src/core/ext/filters/client_channel/client_channel_factory.cc +22 -34
  12. data/src/core/ext/filters/client_channel/client_channel_factory.h +19 -38
  13. data/src/core/ext/filters/client_channel/global_subchannel_pool.cc +7 -4
  14. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +2 -2
  15. data/src/core/ext/filters/client_channel/lb_policy.cc +105 -28
  16. data/src/core/ext/filters/client_channel/lb_policy.h +259 -141
  17. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +29 -32
  18. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +789 -803
  19. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h +3 -1
  20. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc +2 -6
  21. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc +1 -1
  22. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +7 -1
  23. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +8 -8
  24. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +2 -2
  25. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +127 -219
  26. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +103 -282
  27. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +4 -10
  28. data/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +709 -906
  29. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc +0 -43
  30. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc +8 -8
  31. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h +2 -2
  32. data/src/core/ext/filters/client_channel/lb_policy_factory.h +1 -6
  33. data/src/core/ext/filters/client_channel/resolver.cc +54 -1
  34. data/src/core/ext/filters/client_channel/resolver.h +51 -22
  35. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +34 -86
  36. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +29 -41
  37. data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +32 -78
  38. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +109 -72
  39. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +13 -8
  40. data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +28 -63
  41. data/src/core/ext/filters/client_channel/resolver_factory.h +3 -1
  42. data/src/core/ext/filters/client_channel/resolver_registry.cc +5 -2
  43. data/src/core/ext/filters/client_channel/resolver_registry.h +5 -4
  44. data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +69 -49
  45. data/src/core/ext/filters/client_channel/resolver_result_parsing.h +11 -8
  46. data/src/core/ext/filters/client_channel/resolving_lb_policy.cc +568 -0
  47. data/src/core/ext/filters/client_channel/resolving_lb_policy.h +141 -0
  48. data/src/core/ext/filters/client_channel/server_address.cc +0 -48
  49. data/src/core/ext/filters/client_channel/server_address.h +0 -10
  50. data/src/core/{lib/transport → ext/filters/client_channel}/service_config.cc +10 -5
  51. data/src/core/{lib/transport → ext/filters/client_channel}/service_config.h +16 -12
  52. data/src/core/ext/filters/client_channel/subchannel.cc +11 -16
  53. data/src/core/ext/filters/client_channel/subchannel.h +3 -0
  54. data/src/core/ext/filters/max_age/max_age_filter.cc +4 -1
  55. data/src/core/ext/filters/message_size/message_size_filter.cc +2 -2
  56. data/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +45 -45
  57. data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +133 -134
  58. data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +4 -4
  59. data/src/core/ext/transport/chttp2/transport/bin_decoder.h +4 -4
  60. data/src/core/ext/transport/chttp2/transport/bin_encoder.cc +7 -6
  61. data/src/core/ext/transport/chttp2/transport/bin_encoder.h +4 -3
  62. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +37 -29
  63. data/src/core/ext/transport/chttp2/transport/flow_control.cc +1 -1
  64. data/src/core/ext/transport/chttp2/transport/frame_data.cc +2 -1
  65. data/src/core/ext/transport/chttp2/transport/frame_data.h +1 -1
  66. data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +6 -5
  67. data/src/core/ext/transport/chttp2/transport/frame_goaway.h +3 -2
  68. data/src/core/ext/transport/chttp2/transport/frame_ping.cc +5 -4
  69. data/src/core/ext/transport/chttp2/transport/frame_ping.h +1 -1
  70. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +5 -4
  71. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.h +2 -1
  72. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +2 -1
  73. data/src/core/ext/transport/chttp2/transport/frame_settings.h +2 -1
  74. data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +4 -4
  75. data/src/core/ext/transport/chttp2/transport/frame_window_update.h +1 -1
  76. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +7 -6
  77. data/src/core/ext/transport/chttp2/transport/hpack_parser.h +3 -2
  78. data/src/core/ext/transport/chttp2/transport/incoming_metadata.cc +9 -5
  79. data/src/core/ext/transport/chttp2/transport/incoming_metadata.h +6 -1
  80. data/src/core/ext/transport/chttp2/transport/internal.h +5 -4
  81. data/src/core/ext/transport/chttp2/transport/parsing.cc +9 -9
  82. data/src/core/ext/transport/chttp2/transport/writing.cc +1 -1
  83. data/src/core/ext/transport/inproc/inproc_transport.cc +8 -0
  84. data/src/core/lib/channel/channel_args.cc +2 -0
  85. data/src/core/lib/channel/channel_args.h +3 -0
  86. data/src/core/lib/channel/channel_stack.h +1 -1
  87. data/src/core/lib/channel/channel_trace.cc +4 -4
  88. data/src/core/lib/channel/channel_trace.h +4 -4
  89. data/src/core/lib/channel/channelz.cc +32 -19
  90. data/src/core/lib/channel/channelz.h +4 -4
  91. data/src/core/lib/channel/channelz_registry.cc +1 -1
  92. data/src/core/lib/channel/context.h +0 -3
  93. data/src/core/lib/channel/handshaker_registry.cc +7 -3
  94. data/src/core/lib/compression/algorithm_metadata.h +3 -3
  95. data/src/core/lib/compression/compression.cc +1 -1
  96. data/src/core/lib/compression/compression_internal.cc +2 -2
  97. data/src/core/lib/compression/stream_compression_gzip.cc +1 -1
  98. data/src/core/lib/debug/trace.h +2 -1
  99. data/src/core/lib/gpr/cpu_posix.cc +5 -3
  100. data/src/core/lib/gpr/sync_posix.cc +65 -4
  101. data/src/core/lib/gprpp/atomic.h +75 -5
  102. data/src/core/lib/gprpp/fork.cc +0 -2
  103. data/src/core/lib/gprpp/orphanable.h +3 -2
  104. data/src/core/lib/gprpp/ref_counted.h +9 -11
  105. data/src/core/lib/gprpp/thd.h +42 -7
  106. data/src/core/lib/gprpp/thd_posix.cc +31 -13
  107. data/src/core/lib/gprpp/thd_windows.cc +47 -34
  108. data/src/core/lib/http/httpcli.cc +3 -2
  109. data/src/core/lib/http/httpcli_security_connector.cc +0 -1
  110. data/src/core/lib/http/parser.cc +2 -1
  111. data/src/core/lib/http/parser.h +2 -1
  112. data/src/core/lib/iomgr/buffer_list.h +1 -1
  113. data/src/core/lib/iomgr/endpoint.cc +2 -2
  114. data/src/core/lib/iomgr/endpoint.h +3 -2
  115. data/src/core/lib/iomgr/error.cc +9 -9
  116. data/src/core/lib/iomgr/error.h +4 -3
  117. data/src/core/lib/iomgr/ev_epoll1_linux.cc +6 -0
  118. data/src/core/lib/iomgr/ev_epollex_linux.cc +14 -9
  119. data/src/core/lib/iomgr/ev_poll_posix.cc +7 -481
  120. data/src/core/lib/iomgr/ev_posix.cc +7 -3
  121. data/src/core/lib/iomgr/ev_posix.h +8 -0
  122. data/src/core/lib/iomgr/executor.cc +13 -0
  123. data/src/core/lib/iomgr/executor.h +2 -1
  124. data/src/core/lib/iomgr/internal_errqueue.cc +2 -4
  125. data/src/core/lib/iomgr/iomgr.cc +5 -0
  126. data/src/core/lib/iomgr/iomgr.h +7 -0
  127. data/src/core/lib/iomgr/iomgr_custom.cc +9 -2
  128. data/src/core/lib/iomgr/iomgr_internal.cc +6 -0
  129. data/src/core/lib/iomgr/iomgr_internal.h +9 -1
  130. data/src/core/lib/iomgr/iomgr_posix.cc +10 -2
  131. data/src/core/lib/iomgr/iomgr_windows.cc +10 -2
  132. data/src/core/lib/iomgr/port.h +19 -0
  133. data/src/core/lib/iomgr/tcp_client_windows.cc +6 -4
  134. data/src/core/lib/iomgr/tcp_custom.cc +1 -1
  135. data/src/core/lib/iomgr/tcp_posix.cc +158 -54
  136. data/src/core/lib/iomgr/tcp_windows.cc +1 -1
  137. data/src/core/lib/iomgr/wakeup_fd_posix.cc +1 -19
  138. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +10 -6
  139. data/src/core/lib/security/credentials/jwt/jwt_verifier.h +2 -1
  140. data/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h +3 -6
  141. data/src/core/lib/security/credentials/tls/spiffe_credentials.cc +129 -0
  142. data/src/core/lib/security/credentials/tls/spiffe_credentials.h +62 -0
  143. data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +7 -2
  144. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +28 -17
  145. data/src/core/lib/security/security_connector/ssl_utils.cc +134 -0
  146. data/src/core/lib/security/security_connector/ssl_utils.h +32 -0
  147. data/src/core/lib/security/security_connector/tls/spiffe_security_connector.cc +426 -0
  148. data/src/core/lib/security/security_connector/tls/spiffe_security_connector.h +122 -0
  149. data/src/core/lib/security/transport/auth_filters.h +2 -2
  150. data/src/core/lib/security/transport/client_auth_filter.cc +35 -39
  151. data/src/core/lib/security/transport/secure_endpoint.cc +2 -2
  152. data/src/core/lib/security/transport/security_handshaker.cc +4 -3
  153. data/src/core/lib/slice/percent_encoding.cc +3 -3
  154. data/src/core/lib/slice/percent_encoding.h +3 -3
  155. data/src/core/lib/slice/slice.cc +27 -30
  156. data/src/core/lib/slice/slice_hash_table.h +2 -2
  157. data/src/core/lib/slice/slice_intern.cc +1 -1
  158. data/src/core/lib/slice/slice_internal.h +14 -3
  159. data/src/core/lib/slice/slice_weak_hash_table.h +4 -4
  160. data/src/core/lib/surface/byte_buffer_reader.cc +17 -0
  161. data/src/core/lib/surface/call.cc +8 -3
  162. data/src/core/lib/surface/completion_queue.cc +134 -148
  163. data/src/core/lib/surface/init.cc +78 -30
  164. data/src/core/lib/surface/init.h +1 -0
  165. data/src/core/lib/surface/lame_client.cc +4 -6
  166. data/src/core/lib/surface/version.cc +1 -1
  167. data/src/core/lib/transport/metadata.cc +66 -33
  168. data/src/core/lib/transport/metadata_batch.cc +1 -1
  169. data/src/core/lib/transport/metadata_batch.h +1 -1
  170. data/src/core/lib/transport/timeout_encoding.cc +1 -1
  171. data/src/core/lib/transport/timeout_encoding.h +1 -1
  172. data/src/core/lib/transport/transport.h +4 -3
  173. data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +3 -3
  174. data/src/core/tsi/alts/handshaker/alts_handshaker_client.h +1 -1
  175. data/src/core/tsi/alts/handshaker/transport_security_common_api.cc +4 -3
  176. data/src/core/tsi/alts/handshaker/transport_security_common_api.h +1 -1
  177. data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +1 -1
  178. data/src/core/tsi/ssl_transport_security.cc +1 -5
  179. data/src/core/tsi/ssl_transport_security.h +24 -4
  180. data/src/ruby/bin/math_pb.rb +18 -16
  181. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +4 -0
  182. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +6 -0
  183. data/src/ruby/lib/grpc/generic/rpc_server.rb +1 -1
  184. data/src/ruby/lib/grpc/version.rb +1 -1
  185. data/src/ruby/pb/README.md +1 -1
  186. data/src/ruby/pb/grpc/health/v1/health_pb.rb +13 -10
  187. data/src/ruby/pb/grpc/health/v1/health_services_pb.rb +18 -0
  188. data/src/ruby/pb/src/proto/grpc/testing/empty_pb.rb +3 -1
  189. data/src/ruby/pb/src/proto/grpc/testing/messages_pb.rb +58 -56
  190. data/src/ruby/pb/src/proto/grpc/testing/test_pb.rb +2 -0
  191. data/third_party/cares/cares/ares.h +12 -0
  192. data/third_party/cares/cares/ares_create_query.c +5 -1
  193. data/third_party/cares/cares/ares_data.c +74 -73
  194. data/third_party/cares/cares/ares_destroy.c +6 -1
  195. data/third_party/cares/cares/ares_gethostbyaddr.c +5 -5
  196. data/third_party/cares/cares/ares_gethostbyname.c +15 -4
  197. data/third_party/cares/cares/ares_getnameinfo.c +11 -0
  198. data/third_party/cares/cares/ares_init.c +274 -173
  199. data/third_party/cares/cares/ares_library_init.c +21 -3
  200. data/third_party/cares/cares/ares_options.c +6 -2
  201. data/third_party/cares/cares/ares_parse_naptr_reply.c +7 -6
  202. data/third_party/cares/cares/ares_parse_ptr_reply.c +4 -2
  203. data/third_party/cares/cares/ares_platform.c +7 -0
  204. data/third_party/cares/cares/ares_private.h +19 -11
  205. data/third_party/cares/cares/ares_process.c +27 -2
  206. data/third_party/cares/cares/ares_rules.h +1 -1
  207. data/third_party/cares/cares/ares_search.c +7 -0
  208. data/third_party/cares/cares/ares_send.c +6 -0
  209. data/third_party/cares/cares/ares_strsplit.c +174 -0
  210. data/third_party/cares/cares/ares_strsplit.h +43 -0
  211. data/third_party/cares/cares/ares_version.h +4 -4
  212. data/third_party/cares/cares/config-win32.h +1 -1
  213. data/third_party/cares/cares/inet_ntop.c +2 -3
  214. data/third_party/cares/config_darwin/ares_config.h +3 -0
  215. data/third_party/cares/config_freebsd/ares_config.h +3 -0
  216. data/third_party/cares/config_linux/ares_config.h +3 -0
  217. data/third_party/cares/config_openbsd/ares_config.h +3 -0
  218. metadata +39 -37
  219. data/src/core/ext/filters/client_channel/request_routing.cc +0 -946
  220. data/src/core/ext/filters/client_channel/request_routing.h +0 -181
  221. data/src/core/lib/gprpp/atomic_with_atm.h +0 -57
  222. data/src/core/lib/gprpp/atomic_with_std.h +0 -35
  223. data/src/core/lib/iomgr/wakeup_fd_cv.cc +0 -107
  224. data/src/core/lib/iomgr/wakeup_fd_cv.h +0 -69
@@ -196,7 +196,7 @@ grpc_slice grpc_slice_maybe_static_intern(grpc_slice slice,
196
196
  return slice;
197
197
  }
198
198
 
199
- bool grpc_slice_is_interned(grpc_slice slice) {
199
+ bool grpc_slice_is_interned(const grpc_slice& slice) {
200
200
  return (slice.refcount && slice.refcount->vtable == &interned_slice_vtable) ||
201
201
  GRPC_IS_STATIC_METADATA_STRING(slice);
202
202
  }
@@ -24,15 +24,26 @@
24
24
  #include <grpc/slice.h>
25
25
  #include <grpc/slice_buffer.h>
26
26
 
27
- grpc_slice grpc_slice_ref_internal(grpc_slice slice);
28
- void grpc_slice_unref_internal(grpc_slice slice);
27
+ inline const grpc_slice& grpc_slice_ref_internal(const grpc_slice& slice) {
28
+ if (slice.refcount) {
29
+ slice.refcount->vtable->ref(slice.refcount);
30
+ }
31
+ return slice;
32
+ }
33
+
34
+ inline void grpc_slice_unref_internal(const grpc_slice& slice) {
35
+ if (slice.refcount) {
36
+ slice.refcount->vtable->unref(slice.refcount);
37
+ }
38
+ }
39
+
29
40
  void grpc_slice_buffer_reset_and_unref_internal(grpc_slice_buffer* sb);
30
41
  void grpc_slice_buffer_partial_unref_internal(grpc_slice_buffer* sb,
31
42
  size_t idx);
32
43
  void grpc_slice_buffer_destroy_internal(grpc_slice_buffer* sb);
33
44
 
34
45
  /* Check if a slice is interned */
35
- bool grpc_slice_is_interned(grpc_slice slice);
46
+ bool grpc_slice_is_interned(const grpc_slice& slice);
36
47
 
37
48
  void grpc_slice_intern_init(void);
38
49
  void grpc_slice_intern_shutdown(void);
@@ -46,7 +46,7 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
46
46
 
47
47
  /// Add a mapping from \a key to \a value, taking ownership of \a key. This
48
48
  /// operation will always succeed. It may discard older entries.
49
- void Add(grpc_slice key, T value) {
49
+ void Add(const grpc_slice& key, T value) {
50
50
  const size_t idx = grpc_slice_hash(key) % Size;
51
51
  entries_[idx].Set(key, std::move(value));
52
52
  return;
@@ -54,7 +54,7 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
54
54
 
55
55
  /// Returns the value from the table associated with / \a key or null if not
56
56
  /// found.
57
- const T* Get(const grpc_slice key) const {
57
+ const T* Get(const grpc_slice& key) const {
58
58
  const size_t idx = grpc_slice_hash(key) % Size;
59
59
  const auto& entry = entries_[idx];
60
60
  return grpc_slice_eq(entry.key(), key) ? entry.value() : nullptr;
@@ -79,7 +79,7 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
79
79
  ~Entry() {
80
80
  if (is_set_) grpc_slice_unref_internal(key_);
81
81
  }
82
- grpc_slice key() const { return key_; }
82
+ const grpc_slice& key() const { return key_; }
83
83
 
84
84
  /// Return the entry's value, or null if unset.
85
85
  const T* value() const {
@@ -88,7 +88,7 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
88
88
  }
89
89
 
90
90
  /// Set the \a key and \a value (which is moved) for the entry.
91
- void Set(grpc_slice key, T&& value) {
91
+ void Set(const grpc_slice& key, T&& value) {
92
92
  if (is_set_) grpc_slice_unref_internal(key_);
93
93
  key_ = key;
94
94
  value_ = std::move(value);
@@ -91,6 +91,23 @@ void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader* reader) {
91
91
  }
92
92
  }
93
93
 
94
+ int grpc_byte_buffer_reader_peek(grpc_byte_buffer_reader* reader,
95
+ grpc_slice** slice) {
96
+ switch (reader->buffer_in->type) {
97
+ case GRPC_BB_RAW: {
98
+ grpc_slice_buffer* slice_buffer;
99
+ slice_buffer = &reader->buffer_out->data.raw.slice_buffer;
100
+ if (reader->current.index < slice_buffer->count) {
101
+ *slice = &slice_buffer->slices[reader->current.index];
102
+ reader->current.index += 1;
103
+ return 1;
104
+ }
105
+ break;
106
+ }
107
+ }
108
+ return 0;
109
+ }
110
+
94
111
  int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
95
112
  grpc_slice* slice) {
96
113
  switch (reader->buffer_in->type) {
@@ -1035,9 +1035,14 @@ static void recv_trailing_filter(void* args, grpc_metadata_batch* b,
1035
1035
  grpc_get_status_code_from_metadata(b->idx.named.grpc_status->md);
1036
1036
  grpc_error* error = GRPC_ERROR_NONE;
1037
1037
  if (status_code != GRPC_STATUS_OK) {
1038
- error = grpc_error_set_int(
1039
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error received from peer"),
1040
- GRPC_ERROR_INT_GRPC_STATUS, static_cast<intptr_t>(status_code));
1038
+ char* peer_msg = nullptr;
1039
+ char* peer = grpc_call_get_peer(call);
1040
+ gpr_asprintf(&peer_msg, "Error received from peer %s", peer);
1041
+ error = grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(peer_msg),
1042
+ GRPC_ERROR_INT_GRPC_STATUS,
1043
+ static_cast<intptr_t>(status_code));
1044
+ gpr_free(peer);
1045
+ gpr_free(peer_msg);
1041
1046
  }
1042
1047
  if (b->idx.named.grpc_message != nullptr) {
1043
1048
  error = grpc_error_set_str(
@@ -33,6 +33,7 @@
33
33
  #include "src/core/lib/gpr/spinlock.h"
34
34
  #include "src/core/lib/gpr/string.h"
35
35
  #include "src/core/lib/gpr/tls.h"
36
+ #include "src/core/lib/gprpp/atomic.h"
36
37
  #include "src/core/lib/iomgr/pollset.h"
37
38
  #include "src/core/lib/iomgr/timer.h"
38
39
  #include "src/core/lib/profiling/timers.h"
@@ -44,6 +45,8 @@ grpc_core::TraceFlag grpc_trace_operation_failures(false, "op_failure");
44
45
  grpc_core::DebugOnlyTraceFlag grpc_trace_pending_tags(false, "pending_tags");
45
46
  grpc_core::DebugOnlyTraceFlag grpc_trace_cq_refcount(false, "cq_refcount");
46
47
 
48
+ namespace {
49
+
47
50
  // Specifies a cq thread local cache.
48
51
  // The first event that occurs on a thread
49
52
  // with a cq cache will go into that cache, and
@@ -84,24 +87,22 @@ typedef struct {
84
87
  grpc_closure* shutdown;
85
88
  } non_polling_poller;
86
89
 
87
- static size_t non_polling_poller_size(void) {
88
- return sizeof(non_polling_poller);
89
- }
90
+ size_t non_polling_poller_size(void) { return sizeof(non_polling_poller); }
90
91
 
91
- static void non_polling_poller_init(grpc_pollset* pollset, gpr_mu** mu) {
92
+ void non_polling_poller_init(grpc_pollset* pollset, gpr_mu** mu) {
92
93
  non_polling_poller* npp = reinterpret_cast<non_polling_poller*>(pollset);
93
94
  gpr_mu_init(&npp->mu);
94
95
  *mu = &npp->mu;
95
96
  }
96
97
 
97
- static void non_polling_poller_destroy(grpc_pollset* pollset) {
98
+ void non_polling_poller_destroy(grpc_pollset* pollset) {
98
99
  non_polling_poller* npp = reinterpret_cast<non_polling_poller*>(pollset);
99
100
  gpr_mu_destroy(&npp->mu);
100
101
  }
101
102
 
102
- static grpc_error* non_polling_poller_work(grpc_pollset* pollset,
103
- grpc_pollset_worker** worker,
104
- grpc_millis deadline) {
103
+ grpc_error* non_polling_poller_work(grpc_pollset* pollset,
104
+ grpc_pollset_worker** worker,
105
+ grpc_millis deadline) {
105
106
  non_polling_poller* npp = reinterpret_cast<non_polling_poller*>(pollset);
106
107
  if (npp->shutdown) return GRPC_ERROR_NONE;
107
108
  if (npp->kicked_without_poller) {
@@ -141,8 +142,8 @@ static grpc_error* non_polling_poller_work(grpc_pollset* pollset,
141
142
  return GRPC_ERROR_NONE;
142
143
  }
143
144
 
144
- static grpc_error* non_polling_poller_kick(
145
- grpc_pollset* pollset, grpc_pollset_worker* specific_worker) {
145
+ grpc_error* non_polling_poller_kick(grpc_pollset* pollset,
146
+ grpc_pollset_worker* specific_worker) {
146
147
  non_polling_poller* p = reinterpret_cast<non_polling_poller*>(pollset);
147
148
  if (specific_worker == nullptr)
148
149
  specific_worker = reinterpret_cast<grpc_pollset_worker*>(p->root);
@@ -159,8 +160,7 @@ static grpc_error* non_polling_poller_kick(
159
160
  return GRPC_ERROR_NONE;
160
161
  }
161
162
 
162
- static void non_polling_poller_shutdown(grpc_pollset* pollset,
163
- grpc_closure* closure) {
163
+ void non_polling_poller_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
164
164
  non_polling_poller* p = reinterpret_cast<non_polling_poller*>(pollset);
165
165
  GPR_ASSERT(closure != nullptr);
166
166
  p->shutdown = closure;
@@ -175,7 +175,7 @@ static void non_polling_poller_shutdown(grpc_pollset* pollset,
175
175
  }
176
176
  }
177
177
 
178
- static const cq_poller_vtable g_poller_vtable_by_poller_type[] = {
178
+ const cq_poller_vtable g_poller_vtable_by_poller_type[] = {
179
179
  /* GRPC_CQ_DEFAULT_POLLING */
180
180
  {true, true, grpc_pollset_size, grpc_pollset_init, grpc_pollset_kick,
181
181
  grpc_pollset_work, grpc_pollset_shutdown, grpc_pollset_destroy},
@@ -188,7 +188,9 @@ static const cq_poller_vtable g_poller_vtable_by_poller_type[] = {
188
188
  non_polling_poller_shutdown, non_polling_poller_destroy},
189
189
  };
190
190
 
191
- typedef struct cq_vtable {
191
+ } // namespace
192
+
193
+ struct cq_vtable {
192
194
  grpc_cq_completion_type cq_completion_type;
193
195
  size_t data_size;
194
196
  void (*init)(void* data,
@@ -203,80 +205,116 @@ typedef struct cq_vtable {
203
205
  void* reserved);
204
206
  grpc_event (*pluck)(grpc_completion_queue* cq, void* tag,
205
207
  gpr_timespec deadline, void* reserved);
206
- } cq_vtable;
208
+ };
209
+
210
+ namespace {
207
211
 
208
212
  /* Queue that holds the cq_completion_events. Internally uses gpr_mpscq queue
209
213
  * (a lockfree multiproducer single consumer queue). It uses a queue_lock
210
214
  * to support multiple consumers.
211
215
  * Only used in completion queues whose completion_type is GRPC_CQ_NEXT */
212
- typedef struct grpc_cq_event_queue {
216
+ class CqEventQueue {
217
+ public:
218
+ CqEventQueue() { gpr_mpscq_init(&queue_); }
219
+ ~CqEventQueue() { gpr_mpscq_destroy(&queue_); }
220
+
221
+ /* Note: The counter is not incremented/decremented atomically with push/pop.
222
+ * The count is only eventually consistent */
223
+ intptr_t num_items() const {
224
+ return num_queue_items_.Load(grpc_core::MemoryOrder::RELAXED);
225
+ }
226
+
227
+ bool Push(grpc_cq_completion* c);
228
+ grpc_cq_completion* Pop();
229
+
230
+ private:
213
231
  /* Spinlock to serialize consumers i.e pop() operations */
214
- gpr_spinlock queue_lock;
232
+ gpr_spinlock queue_lock_ = GPR_SPINLOCK_INITIALIZER;
215
233
 
216
- gpr_mpscq queue;
234
+ gpr_mpscq queue_;
217
235
 
218
236
  /* A lazy counter of number of items in the queue. This is NOT atomically
219
237
  incremented/decremented along with push/pop operations and hence is only
220
238
  eventually consistent */
221
- gpr_atm num_queue_items;
222
- } grpc_cq_event_queue;
239
+ grpc_core::Atomic<intptr_t> num_queue_items_{0};
240
+ };
241
+
242
+ struct cq_next_data {
243
+ ~cq_next_data() { GPR_ASSERT(queue.num_items() == 0); }
223
244
 
224
- typedef struct cq_next_data {
225
245
  /** Completed events for completion-queues of type GRPC_CQ_NEXT */
226
- grpc_cq_event_queue queue;
246
+ CqEventQueue queue;
227
247
 
228
248
  /** Counter of how many things have ever been queued on this completion queue
229
249
  useful for avoiding locks to check the queue */
230
- gpr_atm things_queued_ever;
250
+ grpc_core::Atomic<intptr_t> things_queued_ever{0};
231
251
 
232
- /* Number of outstanding events (+1 if not shut down) */
233
- gpr_atm pending_events;
252
+ /** Number of outstanding events (+1 if not shut down)
253
+ Initial count is dropped by grpc_completion_queue_shutdown */
254
+ grpc_core::Atomic<intptr_t> pending_events{1};
234
255
 
235
256
  /** 0 initially. 1 once we initiated shutdown */
236
- bool shutdown_called;
237
- } cq_next_data;
257
+ bool shutdown_called = false;
258
+ };
259
+
260
+ struct cq_pluck_data {
261
+ cq_pluck_data() {
262
+ completed_tail = &completed_head;
263
+ completed_head.next = reinterpret_cast<uintptr_t>(completed_tail);
264
+ }
265
+
266
+ ~cq_pluck_data() {
267
+ GPR_ASSERT(completed_head.next ==
268
+ reinterpret_cast<uintptr_t>(&completed_head));
269
+ }
238
270
 
239
- typedef struct cq_pluck_data {
240
271
  /** Completed events for completion-queues of type GRPC_CQ_PLUCK */
241
272
  grpc_cq_completion completed_head;
242
273
  grpc_cq_completion* completed_tail;
243
274
 
244
- /** Number of pending events (+1 if we're not shutdown) */
245
- gpr_atm pending_events;
275
+ /** Number of pending events (+1 if we're not shutdown).
276
+ Initial count is dropped by grpc_completion_queue_shutdown. */
277
+ grpc_core::Atomic<intptr_t> pending_events{1};
246
278
 
247
279
  /** Counter of how many things have ever been queued on this completion queue
248
280
  useful for avoiding locks to check the queue */
249
- gpr_atm things_queued_ever;
281
+ grpc_core::Atomic<intptr_t> things_queued_ever{0};
250
282
 
251
283
  /** 0 initially. 1 once we completed shutting */
252
284
  /* TODO: (sreek) This is not needed since (shutdown == 1) if and only if
253
285
  * (pending_events == 0). So consider removing this in future and use
254
286
  * pending_events */
255
- gpr_atm shutdown;
287
+ grpc_core::Atomic<bool> shutdown{false};
256
288
 
257
289
  /** 0 initially. 1 once we initiated shutdown */
258
- bool shutdown_called;
290
+ bool shutdown_called = false;
259
291
 
260
- int num_pluckers;
292
+ int num_pluckers = 0;
261
293
  plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
262
- } cq_pluck_data;
294
+ };
263
295
 
264
- typedef struct cq_callback_data {
296
+ struct cq_callback_data {
297
+ cq_callback_data(
298
+ grpc_experimental_completion_queue_functor* shutdown_callback)
299
+ : shutdown_callback(shutdown_callback) {}
265
300
  /** No actual completed events queue, unlike other types */
266
301
 
267
- /** Number of pending events (+1 if we're not shutdown) */
268
- gpr_atm pending_events;
302
+ /** Number of pending events (+1 if we're not shutdown).
303
+ Initial count is dropped by grpc_completion_queue_shutdown. */
304
+ grpc_core::Atomic<intptr_t> pending_events{1};
269
305
 
270
306
  /** Counter of how many things have ever been queued on this completion queue
271
307
  useful for avoiding locks to check the queue */
272
- gpr_atm things_queued_ever;
308
+ grpc_core::Atomic<intptr_t> things_queued_ever{0};
273
309
 
274
310
  /** 0 initially. 1 once we initiated shutdown */
275
- bool shutdown_called;
311
+ bool shutdown_called = false;
276
312
 
277
313
  /** A callback that gets invoked when the CQ completes shutdown */
278
314
  grpc_experimental_completion_queue_functor* shutdown_callback;
279
- } cq_callback_data;
315
+ };
316
+
317
+ } // namespace
280
318
 
281
319
  /* Completion queue structure */
282
320
  struct grpc_completion_queue {
@@ -408,7 +446,7 @@ int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue* cq,
408
446
  storage->done(storage->done_arg, storage);
409
447
  ret = 1;
410
448
  cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
411
- if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
449
+ if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
412
450
  GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
413
451
  gpr_mu_lock(cq->mu);
414
452
  cq_finish_shutdown_next(cq);
@@ -422,31 +460,21 @@ int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue* cq,
422
460
  return ret;
423
461
  }
424
462
 
425
- static void cq_event_queue_init(grpc_cq_event_queue* q) {
426
- gpr_mpscq_init(&q->queue);
427
- q->queue_lock = GPR_SPINLOCK_INITIALIZER;
428
- gpr_atm_no_barrier_store(&q->num_queue_items, 0);
463
+ bool CqEventQueue::Push(grpc_cq_completion* c) {
464
+ gpr_mpscq_push(&queue_, reinterpret_cast<gpr_mpscq_node*>(c));
465
+ return num_queue_items_.FetchAdd(1, grpc_core::MemoryOrder::RELAXED) == 0;
429
466
  }
430
467
 
431
- static void cq_event_queue_destroy(grpc_cq_event_queue* q) {
432
- gpr_mpscq_destroy(&q->queue);
433
- }
434
-
435
- static bool cq_event_queue_push(grpc_cq_event_queue* q, grpc_cq_completion* c) {
436
- gpr_mpscq_push(&q->queue, reinterpret_cast<gpr_mpscq_node*>(c));
437
- return gpr_atm_no_barrier_fetch_add(&q->num_queue_items, 1) == 0;
438
- }
439
-
440
- static grpc_cq_completion* cq_event_queue_pop(grpc_cq_event_queue* q) {
468
+ grpc_cq_completion* CqEventQueue::Pop() {
441
469
  grpc_cq_completion* c = nullptr;
442
470
 
443
- if (gpr_spinlock_trylock(&q->queue_lock)) {
471
+ if (gpr_spinlock_trylock(&queue_lock_)) {
444
472
  GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES();
445
473
 
446
474
  bool is_empty = false;
447
475
  c = reinterpret_cast<grpc_cq_completion*>(
448
- gpr_mpscq_pop_and_check_end(&q->queue, &is_empty));
449
- gpr_spinlock_unlock(&q->queue_lock);
476
+ gpr_mpscq_pop_and_check_end(&queue_, &is_empty));
477
+ gpr_spinlock_unlock(&queue_lock_);
450
478
 
451
479
  if (c == nullptr && !is_empty) {
452
480
  GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES();
@@ -456,18 +484,12 @@ static grpc_cq_completion* cq_event_queue_pop(grpc_cq_event_queue* q) {
456
484
  }
457
485
 
458
486
  if (c) {
459
- gpr_atm_no_barrier_fetch_add(&q->num_queue_items, -1);
487
+ num_queue_items_.FetchSub(1, grpc_core::MemoryOrder::RELAXED);
460
488
  }
461
489
 
462
490
  return c;
463
491
  }
464
492
 
465
- /* Note: The counter is not incremented/decremented atomically with push/pop.
466
- * The count is only eventually consistent */
467
- static long cq_event_queue_num_items(grpc_cq_event_queue* q) {
468
- return static_cast<long>(gpr_atm_no_barrier_load(&q->num_queue_items));
469
- }
470
-
471
493
  grpc_completion_queue* grpc_completion_queue_create_internal(
472
494
  grpc_cq_completion_type completion_type, grpc_cq_polling_type polling_type,
473
495
  grpc_experimental_completion_queue_functor* shutdown_callback) {
@@ -507,49 +529,33 @@ grpc_completion_queue* grpc_completion_queue_create_internal(
507
529
 
508
530
  static void cq_init_next(
509
531
  void* data, grpc_experimental_completion_queue_functor* shutdown_callback) {
510
- cq_next_data* cqd = static_cast<cq_next_data*>(data);
511
- /* Initial count is dropped by grpc_completion_queue_shutdown */
512
- gpr_atm_no_barrier_store(&cqd->pending_events, 1);
513
- cqd->shutdown_called = false;
514
- gpr_atm_no_barrier_store(&cqd->things_queued_ever, 0);
515
- cq_event_queue_init(&cqd->queue);
532
+ new (data) cq_next_data();
516
533
  }
517
534
 
518
535
  static void cq_destroy_next(void* data) {
519
536
  cq_next_data* cqd = static_cast<cq_next_data*>(data);
520
- GPR_ASSERT(cq_event_queue_num_items(&cqd->queue) == 0);
521
- cq_event_queue_destroy(&cqd->queue);
537
+ cqd->~cq_next_data();
522
538
  }
523
539
 
524
540
  static void cq_init_pluck(
525
541
  void* data, grpc_experimental_completion_queue_functor* shutdown_callback) {
526
- cq_pluck_data* cqd = static_cast<cq_pluck_data*>(data);
527
- /* Initial count is dropped by grpc_completion_queue_shutdown */
528
- gpr_atm_no_barrier_store(&cqd->pending_events, 1);
529
- cqd->completed_tail = &cqd->completed_head;
530
- cqd->completed_head.next = (uintptr_t)cqd->completed_tail;
531
- gpr_atm_no_barrier_store(&cqd->shutdown, 0);
532
- cqd->shutdown_called = false;
533
- cqd->num_pluckers = 0;
534
- gpr_atm_no_barrier_store(&cqd->things_queued_ever, 0);
542
+ new (data) cq_pluck_data();
535
543
  }
536
544
 
537
545
  static void cq_destroy_pluck(void* data) {
538
546
  cq_pluck_data* cqd = static_cast<cq_pluck_data*>(data);
539
- GPR_ASSERT(cqd->completed_head.next == (uintptr_t)&cqd->completed_head);
547
+ cqd->~cq_pluck_data();
540
548
  }
541
549
 
542
550
  static void cq_init_callback(
543
551
  void* data, grpc_experimental_completion_queue_functor* shutdown_callback) {
544
- cq_callback_data* cqd = static_cast<cq_callback_data*>(data);
545
- /* Initial count is dropped by grpc_completion_queue_shutdown */
546
- gpr_atm_no_barrier_store(&cqd->pending_events, 1);
547
- cqd->shutdown_called = false;
548
- gpr_atm_no_barrier_store(&cqd->things_queued_ever, 0);
549
- cqd->shutdown_callback = shutdown_callback;
552
+ new (data) cq_callback_data(shutdown_callback);
550
553
  }
551
554
 
552
- static void cq_destroy_callback(void* data) {}
555
+ static void cq_destroy_callback(void* data) {
556
+ cq_callback_data* cqd = static_cast<cq_callback_data*>(data);
557
+ cqd->~cq_callback_data();
558
+ }
553
559
 
554
560
  grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue* cq) {
555
561
  return cq->vtable->cq_completion_type;
@@ -632,37 +638,19 @@ static void cq_check_tag(grpc_completion_queue* cq, void* tag, bool lock_cq) {
632
638
  static void cq_check_tag(grpc_completion_queue* cq, void* tag, bool lock_cq) {}
633
639
  #endif
634
640
 
635
- /* Atomically increments a counter only if the counter is not zero. Returns
636
- * true if the increment was successful; false if the counter is zero */
637
- static bool atm_inc_if_nonzero(gpr_atm* counter) {
638
- while (true) {
639
- gpr_atm count = gpr_atm_acq_load(counter);
640
- /* If zero, we are done. If not, we must to a CAS (instead of an atomic
641
- * increment) to maintain the contract: do not increment the counter if it
642
- * is zero. */
643
- if (count == 0) {
644
- return false;
645
- } else if (gpr_atm_full_cas(counter, count, count + 1)) {
646
- break;
647
- }
648
- }
649
-
650
- return true;
651
- }
652
-
653
641
  static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* tag) {
654
642
  cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
655
- return atm_inc_if_nonzero(&cqd->pending_events);
643
+ return cqd->pending_events.IncrementIfNonzero();
656
644
  }
657
645
 
658
646
  static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag) {
659
647
  cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
660
- return atm_inc_if_nonzero(&cqd->pending_events);
648
+ return cqd->pending_events.IncrementIfNonzero();
661
649
  }
662
650
 
663
651
  static bool cq_begin_op_for_callback(grpc_completion_queue* cq, void* tag) {
664
652
  cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq);
665
- return atm_inc_if_nonzero(&cqd->pending_events);
653
+ return cqd->pending_events.IncrementIfNonzero();
666
654
  }
667
655
 
668
656
  bool grpc_cq_begin_op(grpc_completion_queue* cq, void* tag) {
@@ -716,17 +704,14 @@ static void cq_end_op_for_next(grpc_completion_queue* cq, void* tag,
716
704
  gpr_tls_set(&g_cached_event, (intptr_t)storage);
717
705
  } else {
718
706
  /* Add the completion to the queue */
719
- bool is_first = cq_event_queue_push(&cqd->queue, storage);
720
- gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1);
721
-
707
+ bool is_first = cqd->queue.Push(storage);
708
+ cqd->things_queued_ever.FetchAdd(1, grpc_core::MemoryOrder::RELAXED);
722
709
  /* Since we do not hold the cq lock here, it is important to do an 'acquire'
723
710
  load here (instead of a 'no_barrier' load) to match with the release
724
711
  store
725
- (done via gpr_atm_full_fetch_add(pending_events, -1)) in cq_shutdown_next
712
+ (done via pending_events.FetchSub(1, ACQ_REL)) in cq_shutdown_next
726
713
  */
727
- bool will_definitely_shutdown = gpr_atm_acq_load(&cqd->pending_events) == 1;
728
-
729
- if (!will_definitely_shutdown) {
714
+ if (cqd->pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) != 1) {
730
715
  /* Only kick if this is the first item queued */
731
716
  if (is_first) {
732
717
  gpr_mu_lock(cq->mu);
@@ -740,7 +725,8 @@ static void cq_end_op_for_next(grpc_completion_queue* cq, void* tag,
740
725
  GRPC_ERROR_UNREF(kick_error);
741
726
  }
742
727
  }
743
- if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
728
+ if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) ==
729
+ 1) {
744
730
  GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
745
731
  gpr_mu_lock(cq->mu);
746
732
  cq_finish_shutdown_next(cq);
@@ -749,7 +735,7 @@ static void cq_end_op_for_next(grpc_completion_queue* cq, void* tag,
749
735
  }
750
736
  } else {
751
737
  GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
752
- gpr_atm_rel_store(&cqd->pending_events, 0);
738
+ cqd->pending_events.Store(0, grpc_core::MemoryOrder::RELEASE);
753
739
  gpr_mu_lock(cq->mu);
754
740
  cq_finish_shutdown_next(cq);
755
741
  gpr_mu_unlock(cq->mu);
@@ -795,12 +781,12 @@ static void cq_end_op_for_pluck(grpc_completion_queue* cq, void* tag,
795
781
  cq_check_tag(cq, tag, false); /* Used in debug builds only */
796
782
 
797
783
  /* Add to the list of completions */
798
- gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1);
784
+ cqd->things_queued_ever.FetchAdd(1, grpc_core::MemoryOrder::RELAXED);
799
785
  cqd->completed_tail->next =
800
786
  ((uintptr_t)storage) | (1u & cqd->completed_tail->next);
801
787
  cqd->completed_tail = storage;
802
788
 
803
- if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
789
+ if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
804
790
  cq_finish_shutdown_pluck(cq);
805
791
  gpr_mu_unlock(cq->mu);
806
792
  } else {
@@ -856,8 +842,8 @@ static void cq_end_op_for_callback(
856
842
 
857
843
  cq_check_tag(cq, tag, true); /* Used in debug builds only */
858
844
 
859
- gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1);
860
- if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
845
+ cqd->things_queued_ever.FetchAdd(1, grpc_core::MemoryOrder::RELAXED);
846
+ if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
861
847
  cq_finish_shutdown_callback(cq);
862
848
  }
863
849
 
@@ -893,20 +879,20 @@ class ExecCtxNext : public grpc_core::ExecCtx {
893
879
  cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
894
880
  GPR_ASSERT(a->stolen_completion == nullptr);
895
881
 
896
- gpr_atm current_last_seen_things_queued_ever =
897
- gpr_atm_no_barrier_load(&cqd->things_queued_ever);
882
+ intptr_t current_last_seen_things_queued_ever =
883
+ cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
898
884
 
899
885
  if (current_last_seen_things_queued_ever !=
900
886
  a->last_seen_things_queued_ever) {
901
887
  a->last_seen_things_queued_ever =
902
- gpr_atm_no_barrier_load(&cqd->things_queued_ever);
888
+ cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
903
889
 
904
890
  /* Pop a cq_completion from the queue. Returns NULL if the queue is empty
905
891
  * might return NULL in some cases even if the queue is not empty; but
906
892
  * that
907
893
  * is ok and doesn't affect correctness. Might effect the tail latencies a
908
894
  * bit) */
909
- a->stolen_completion = cq_event_queue_pop(&cqd->queue);
895
+ a->stolen_completion = cqd->queue.Pop();
910
896
  if (a->stolen_completion != nullptr) {
911
897
  return true;
912
898
  }
@@ -965,7 +951,7 @@ static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
965
951
 
966
952
  grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline);
967
953
  cq_is_finished_arg is_finished_arg = {
968
- gpr_atm_no_barrier_load(&cqd->things_queued_ever),
954
+ cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED),
969
955
  cq,
970
956
  deadline_millis,
971
957
  nullptr,
@@ -985,7 +971,7 @@ static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
985
971
  break;
986
972
  }
987
973
 
988
- grpc_cq_completion* c = cq_event_queue_pop(&cqd->queue);
974
+ grpc_cq_completion* c = cqd->queue.Pop();
989
975
 
990
976
  if (c != nullptr) {
991
977
  ret.type = GRPC_OP_COMPLETE;
@@ -999,16 +985,16 @@ static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
999
985
  so that the thread comes back quickly from poll to make a second
1000
986
  attempt at popping. Not doing this can potentially deadlock this
1001
987
  thread forever (if the deadline is infinity) */
1002
- if (cq_event_queue_num_items(&cqd->queue) > 0) {
988
+ if (cqd->queue.num_items() > 0) {
1003
989
  iteration_deadline = 0;
1004
990
  }
1005
991
  }
1006
992
 
1007
- if (gpr_atm_acq_load(&cqd->pending_events) == 0) {
993
+ if (cqd->pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) == 0) {
1008
994
  /* Before returning, check if the queue has any items left over (since
1009
995
  gpr_mpscq_pop() can sometimes return NULL even if the queue is not
1010
996
  empty. If so, keep retrying but do not return GRPC_QUEUE_SHUTDOWN */
1011
- if (cq_event_queue_num_items(&cqd->queue) > 0) {
997
+ if (cqd->queue.num_items() > 0) {
1012
998
  /* Go to the beginning of the loop. No point doing a poll because
1013
999
  (cq->shutdown == true) is only possible when there is no pending
1014
1000
  work (i.e cq->pending_events == 0) and any outstanding completion
@@ -1049,8 +1035,8 @@ static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
1049
1035
  is_finished_arg.first_loop = false;
1050
1036
  }
1051
1037
 
1052
- if (cq_event_queue_num_items(&cqd->queue) > 0 &&
1053
- gpr_atm_acq_load(&cqd->pending_events) > 0) {
1038
+ if (cqd->queue.num_items() > 0 &&
1039
+ cqd->pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) > 0) {
1054
1040
  gpr_mu_lock(cq->mu);
1055
1041
  cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), nullptr);
1056
1042
  gpr_mu_unlock(cq->mu);
@@ -1074,7 +1060,7 @@ static void cq_finish_shutdown_next(grpc_completion_queue* cq) {
1074
1060
  cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
1075
1061
 
1076
1062
  GPR_ASSERT(cqd->shutdown_called);
1077
- GPR_ASSERT(gpr_atm_no_barrier_load(&cqd->pending_events) == 0);
1063
+ GPR_ASSERT(cqd->pending_events.Load(grpc_core::MemoryOrder::RELAXED) == 0);
1078
1064
 
1079
1065
  cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done);
1080
1066
  }
@@ -1096,10 +1082,10 @@ static void cq_shutdown_next(grpc_completion_queue* cq) {
1096
1082
  return;
1097
1083
  }
1098
1084
  cqd->shutdown_called = true;
1099
- /* Doing a full_fetch_add (i.e acq/release) here to match with
1100
- * cq_begin_op_for_next and and cq_end_op_for_next functions which read/write
1085
+ /* Doing acq/release FetchSub here to match with
1086
+ * cq_begin_op_for_next and cq_end_op_for_next functions which read/write
1101
1087
  * on this counter without necessarily holding a lock on cq */
1102
- if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
1088
+ if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
1103
1089
  cq_finish_shutdown_next(cq);
1104
1090
  }
1105
1091
  gpr_mu_unlock(cq->mu);
@@ -1148,12 +1134,12 @@ class ExecCtxPluck : public grpc_core::ExecCtx {
1148
1134
 
1149
1135
  GPR_ASSERT(a->stolen_completion == nullptr);
1150
1136
  gpr_atm current_last_seen_things_queued_ever =
1151
- gpr_atm_no_barrier_load(&cqd->things_queued_ever);
1137
+ cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
1152
1138
  if (current_last_seen_things_queued_ever !=
1153
1139
  a->last_seen_things_queued_ever) {
1154
1140
  gpr_mu_lock(cq->mu);
1155
1141
  a->last_seen_things_queued_ever =
1156
- gpr_atm_no_barrier_load(&cqd->things_queued_ever);
1142
+ cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
1157
1143
  grpc_cq_completion* c;
1158
1144
  grpc_cq_completion* prev = &cqd->completed_head;
1159
1145
  while ((c = (grpc_cq_completion*)(prev->next &
@@ -1209,7 +1195,7 @@ static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
1209
1195
  gpr_mu_lock(cq->mu);
1210
1196
  grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline);
1211
1197
  cq_is_finished_arg is_finished_arg = {
1212
- gpr_atm_no_barrier_load(&cqd->things_queued_ever),
1198
+ cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED),
1213
1199
  cq,
1214
1200
  deadline_millis,
1215
1201
  nullptr,
@@ -1246,7 +1232,7 @@ static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
1246
1232
  }
1247
1233
  prev = c;
1248
1234
  }
1249
- if (gpr_atm_no_barrier_load(&cqd->shutdown)) {
1235
+ if (cqd->shutdown.Load(grpc_core::MemoryOrder::RELAXED)) {
1250
1236
  gpr_mu_unlock(cq->mu);
1251
1237
  memset(&ret, 0, sizeof(ret));
1252
1238
  ret.type = GRPC_QUEUE_SHUTDOWN;
@@ -1309,8 +1295,8 @@ static void cq_finish_shutdown_pluck(grpc_completion_queue* cq) {
1309
1295
  cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1310
1296
 
1311
1297
  GPR_ASSERT(cqd->shutdown_called);
1312
- GPR_ASSERT(!gpr_atm_no_barrier_load(&cqd->shutdown));
1313
- gpr_atm_no_barrier_store(&cqd->shutdown, 1);
1298
+ GPR_ASSERT(!cqd->shutdown.Load(grpc_core::MemoryOrder::RELAXED));
1299
+ cqd->shutdown.Store(1, grpc_core::MemoryOrder::RELAXED);
1314
1300
 
1315
1301
  cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done);
1316
1302
  }
@@ -1334,7 +1320,7 @@ static void cq_shutdown_pluck(grpc_completion_queue* cq) {
1334
1320
  return;
1335
1321
  }
1336
1322
  cqd->shutdown_called = true;
1337
- if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
1323
+ if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
1338
1324
  cq_finish_shutdown_pluck(cq);
1339
1325
  }
1340
1326
  gpr_mu_unlock(cq->mu);
@@ -1368,7 +1354,7 @@ static void cq_shutdown_callback(grpc_completion_queue* cq) {
1368
1354
  return;
1369
1355
  }
1370
1356
  cqd->shutdown_called = true;
1371
- if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
1357
+ if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
1372
1358
  gpr_mu_unlock(cq->mu);
1373
1359
  cq_finish_shutdown_callback(cq);
1374
1360
  } else {