grpc 1.39.0.pre1 → 1.40.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (168) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +34 -18
  3. data/include/grpc/event_engine/event_engine.h +10 -14
  4. data/include/grpc/event_engine/slice_allocator.h +8 -33
  5. data/include/grpc/impl/codegen/grpc_types.h +18 -8
  6. data/include/grpc/impl/codegen/port_platform.h +24 -0
  7. data/src/core/ext/filters/client_channel/client_channel.cc +413 -247
  8. data/src/core/ext/filters/client_channel/client_channel.h +42 -18
  9. data/src/core/ext/filters/client_channel/config_selector.h +19 -6
  10. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +7 -8
  11. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +12 -21
  12. data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +3 -5
  13. data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +17 -38
  14. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +8 -15
  15. data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +3 -6
  16. data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +8 -12
  17. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +14 -22
  18. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +2 -9
  19. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +5 -8
  20. data/src/core/ext/filters/client_channel/lb_policy.cc +1 -15
  21. data/src/core/ext/filters/client_channel/lb_policy.h +70 -46
  22. data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +101 -73
  23. data/src/core/ext/filters/client_channel/retry_filter.cc +392 -243
  24. data/src/core/ext/filters/client_channel/retry_service_config.cc +36 -26
  25. data/src/core/ext/filters/client_channel/retry_service_config.h +1 -1
  26. data/src/core/ext/filters/client_channel/service_config_call_data.h +45 -5
  27. data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +0 -6
  28. data/src/core/ext/filters/http/client/http_client_filter.cc +5 -2
  29. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +5 -1
  30. data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +1 -1
  31. data/src/core/{lib/event_engine/slice_allocator.cc → ext/transport/chttp2/transport/chttp2_slice_allocator.cc} +15 -38
  32. data/src/core/ext/transport/chttp2/transport/chttp2_slice_allocator.h +74 -0
  33. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +2 -6
  34. data/src/core/ext/transport/chttp2/transport/flow_control.h +1 -1
  35. data/src/core/ext/transport/chttp2/transport/frame_data.cc +4 -4
  36. data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +8 -8
  37. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +5 -5
  38. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +639 -752
  39. data/src/core/ext/transport/chttp2/transport/hpack_parser.h +190 -69
  40. data/src/core/ext/transport/chttp2/transport/internal.h +1 -1
  41. data/src/core/ext/transport/chttp2/transport/parsing.cc +70 -54
  42. data/src/core/ext/transport/chttp2/transport/varint.cc +6 -4
  43. data/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c +56 -35
  44. data/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.h +180 -76
  45. data/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c +35 -27
  46. data/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.h +97 -48
  47. data/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c +45 -9
  48. data/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.h +67 -7
  49. data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c +66 -9
  50. data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.h +227 -0
  51. data/src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c +46 -0
  52. data/src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.h +121 -0
  53. data/src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c +1 -0
  54. data/src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c +35 -0
  55. data/src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.h +90 -0
  56. data/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c +32 -24
  57. data/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.h +120 -73
  58. data/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c +4 -2
  59. data/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.h +15 -0
  60. data/src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c +48 -0
  61. data/src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.h +171 -0
  62. data/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c +8 -6
  63. data/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.h +27 -19
  64. data/src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c +1 -0
  65. data/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c +24 -7
  66. data/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.h +57 -0
  67. data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c +29 -17
  68. data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.h +72 -0
  69. data/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c +3 -2
  70. data/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.h +4 -0
  71. data/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c +6 -5
  72. data/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.h +15 -11
  73. data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c +85 -43
  74. data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.h +274 -91
  75. data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c +11 -8
  76. data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.h +30 -13
  77. data/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c +33 -5
  78. data/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.h +115 -0
  79. data/src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c +60 -0
  80. data/src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.h +181 -0
  81. data/src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c +1 -0
  82. data/src/core/ext/upb-generated/validate/validate.upb.c +82 -66
  83. data/src/core/ext/upb-generated/validate/validate.upb.h +220 -124
  84. data/src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c +15 -7
  85. data/src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c +53 -52
  86. data/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c +318 -277
  87. data/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.h +5 -0
  88. data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c +437 -410
  89. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c +198 -170
  90. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.h +10 -0
  91. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c +9 -8
  92. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c +219 -163
  93. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.h +15 -0
  94. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c +59 -0
  95. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.h +40 -0
  96. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c +29 -25
  97. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c +52 -0
  98. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.h +35 -0
  99. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c +135 -125
  100. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.h +5 -0
  101. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c +131 -123
  102. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c +90 -0
  103. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.h +35 -0
  104. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c +32 -24
  105. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c +69 -55
  106. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.h +5 -0
  107. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c +684 -664
  108. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.h +5 -0
  109. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c +13 -10
  110. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c +13 -10
  111. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c +441 -375
  112. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.h +10 -0
  113. data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c +122 -114
  114. data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c +1 -1
  115. data/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c +112 -79
  116. data/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.h +5 -0
  117. data/src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c +64 -0
  118. data/src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.h +50 -0
  119. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c +35 -32
  120. data/src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c +4 -4
  121. data/src/core/ext/upbdefs-generated/validate/validate.upbdefs.c +182 -160
  122. data/src/core/ext/xds/certificate_provider_store.h +1 -1
  123. data/src/core/ext/xds/xds_api.cc +320 -121
  124. data/src/core/ext/xds/xds_api.h +31 -2
  125. data/src/core/ext/xds/xds_bootstrap.cc +4 -1
  126. data/src/core/ext/xds/xds_client.cc +66 -43
  127. data/src/core/ext/xds/xds_client.h +0 -4
  128. data/src/core/ext/xds/xds_http_filters.cc +3 -2
  129. data/src/core/ext/xds/xds_http_filters.h +3 -0
  130. data/src/core/lib/channel/call_tracer.h +85 -0
  131. data/src/core/lib/channel/channel_stack.h +1 -1
  132. data/src/core/lib/channel/context.h +3 -0
  133. data/src/core/lib/channel/status_util.h +4 -0
  134. data/src/core/lib/compression/stream_compression.h +1 -1
  135. data/src/core/lib/compression/stream_compression_gzip.h +1 -1
  136. data/src/core/lib/compression/stream_compression_identity.h +1 -1
  137. data/src/core/lib/debug/stats.h +1 -1
  138. data/src/core/lib/gpr/murmur_hash.cc +4 -2
  139. data/src/core/lib/gprpp/manual_constructor.h +1 -1
  140. data/src/core/lib/gprpp/orphanable.h +3 -3
  141. data/src/core/lib/gprpp/sync.h +2 -30
  142. data/src/core/lib/iomgr/buffer_list.cc +1 -1
  143. data/src/core/lib/iomgr/ev_apple.h +1 -1
  144. data/src/core/lib/iomgr/event_engine/endpoint.cc +6 -8
  145. data/src/core/lib/iomgr/event_engine/tcp.cc +30 -10
  146. data/src/core/lib/iomgr/python_util.h +1 -1
  147. data/src/core/lib/iomgr/resource_quota.cc +2 -0
  148. data/src/core/lib/iomgr/tcp_client_windows.cc +2 -0
  149. data/src/core/lib/iomgr/tcp_server_posix.cc +1 -0
  150. data/src/core/lib/iomgr/timer_manager.cc +1 -1
  151. data/src/core/lib/json/json_reader.cc +1 -2
  152. data/src/core/lib/matchers/matchers.cc +8 -20
  153. data/src/core/lib/matchers/matchers.h +2 -1
  154. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +49 -0
  155. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.h +7 -0
  156. data/src/core/lib/security/security_connector/tls/tls_security_connector.cc +6 -18
  157. data/src/core/lib/security/transport/security_handshaker.cc +12 -4
  158. data/src/core/lib/security/transport/server_auth_filter.cc +0 -7
  159. data/src/core/lib/slice/slice_internal.h +1 -0
  160. data/src/core/lib/surface/call.cc +5 -6
  161. data/src/core/lib/surface/server.cc +3 -1
  162. data/src/core/lib/surface/server.h +3 -3
  163. data/src/core/lib/surface/version.cc +2 -4
  164. data/src/ruby/ext/grpc/extconf.rb +1 -1
  165. data/src/ruby/lib/grpc/version.rb +1 -1
  166. data/third_party/xxhash/xxhash.h +77 -195
  167. metadata +57 -40
  168. data/src/core/lib/gpr/arena.h +0 -47
@@ -172,9 +172,9 @@ void TlsChannelSecurityConnector::add_handshakers(
172
172
  const grpc_channel_args* args, grpc_pollset_set* /*interested_parties*/,
173
173
  HandshakeManager* handshake_mgr) {
174
174
  MutexLock lock(&mu_);
175
+ tsi_handshaker* tsi_hs = nullptr;
175
176
  if (client_handshaker_factory_ != nullptr) {
176
177
  // Instantiate TSI handshaker.
177
- tsi_handshaker* tsi_hs = nullptr;
178
178
  tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker(
179
179
  client_handshaker_factory_,
180
180
  overridden_target_name_.empty() ? target_name_.c_str()
@@ -183,16 +183,10 @@ void TlsChannelSecurityConnector::add_handshakers(
183
183
  if (result != TSI_OK) {
184
184
  gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
185
185
  tsi_result_to_string(result));
186
- return;
187
186
  }
188
- // Create handshakers.
189
- handshake_mgr->Add(SecurityHandshakerCreate(tsi_hs, this, args));
190
- return;
191
187
  }
192
- // TODO(ZhenLian): Implement the logic(delegation to
193
- // BlockOnInitialCredentialHandshaker) when certificates are not ready.
194
- gpr_log(GPR_ERROR, "%s not supported yet.",
195
- "Client BlockOnInitialCredentialHandshaker");
188
+ // If tsi_hs is null, this will add a failing handshaker.
189
+ handshake_mgr->Add(SecurityHandshakerCreate(tsi_hs, this, args));
196
190
  }
197
191
 
198
192
  void TlsChannelSecurityConnector::check_peer(
@@ -549,24 +543,18 @@ void TlsServerSecurityConnector::add_handshakers(
549
543
  const grpc_channel_args* args, grpc_pollset_set* /*interested_parties*/,
550
544
  HandshakeManager* handshake_mgr) {
551
545
  MutexLock lock(&mu_);
546
+ tsi_handshaker* tsi_hs = nullptr;
552
547
  if (server_handshaker_factory_ != nullptr) {
553
548
  // Instantiate TSI handshaker.
554
- tsi_handshaker* tsi_hs = nullptr;
555
549
  tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker(
556
550
  server_handshaker_factory_, &tsi_hs);
557
551
  if (result != TSI_OK) {
558
552
  gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
559
553
  tsi_result_to_string(result));
560
- return;
561
554
  }
562
- // Create handshakers.
563
- handshake_mgr->Add(SecurityHandshakerCreate(tsi_hs, this, args));
564
- return;
565
555
  }
566
- // TODO(ZhenLian): Implement the logic(delegation to
567
- // BlockOnInitialCredentialHandshaker) when certificates are not ready.
568
- gpr_log(GPR_ERROR, "%s not supported yet.",
569
- "Server BlockOnInitialCredentialHandshaker");
556
+ // If tsi_hs is null, this will add a failing handshaker.
557
+ handshake_mgr->Add(SecurityHandshakerCreate(tsi_hs, this, args));
570
558
  }
571
559
 
572
560
  void TlsServerSecurityConnector::check_peer(
@@ -521,10 +521,18 @@ class FailHandshaker : public Handshaker {
521
521
  void Shutdown(grpc_error_handle why) override { GRPC_ERROR_UNREF(why); }
522
522
  void DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
523
523
  grpc_closure* on_handshake_done,
524
- HandshakerArgs* /*args*/) override {
525
- ExecCtx::Run(DEBUG_LOCATION, on_handshake_done,
526
- GRPC_ERROR_CREATE_FROM_STATIC_STRING(
527
- "Failed to create security handshaker"));
524
+ HandshakerArgs* args) override {
525
+ grpc_error_handle error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
526
+ "Failed to create security handshaker");
527
+ grpc_endpoint_shutdown(args->endpoint, GRPC_ERROR_REF(error));
528
+ grpc_endpoint_destroy(args->endpoint);
529
+ args->endpoint = nullptr;
530
+ grpc_channel_args_destroy(args->args);
531
+ args->args = nullptr;
532
+ grpc_slice_buffer_destroy_internal(args->read_buffer);
533
+ gpr_free(args->read_buffer);
534
+ args->read_buffer = nullptr;
535
+ ExecCtx::Run(DEBUG_LOCATION, on_handshake_done, error);
528
536
  }
529
537
 
530
538
  private:
@@ -306,13 +306,6 @@ static grpc_error_handle server_auth_init_channel_elem(
306
306
  GPR_ASSERT(!args->is_last);
307
307
  grpc_auth_context* auth_context =
308
308
  grpc_find_auth_context_in_args(args->channel_args);
309
- if (auth_context == nullptr) {
310
- grpc_error_handle error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
311
- "No authorization context found. This might be a TRANSIENT failure due "
312
- "to certificates not having been loaded yet.");
313
- gpr_log(GPR_DEBUG, "%s", grpc_error_std_string(error).c_str());
314
- return error;
315
- }
316
309
  GPR_ASSERT(auth_context != nullptr);
317
310
  grpc_server_credentials* creds =
318
311
  grpc_find_server_credentials_in_args(args->channel_args);
@@ -231,6 +231,7 @@ inline int grpc_slice_refcount::Eq(const grpc_slice& a, const grpc_slice& b) {
231
231
  GPR_DEBUG_ASSERT(
232
232
  (GRPC_STATIC_METADATA_INDEX(a) == GRPC_STATIC_METADATA_INDEX(b)) ==
233
233
  (a.refcount == b.refcount));
234
+ ABSL_FALLTHROUGH_INTENDED;
234
235
  case Type::INTERNED:
235
236
  return a.refcount == b.refcount;
236
237
  case Type::NOP:
@@ -151,6 +151,11 @@ struct grpc_call {
151
151
  }
152
152
 
153
153
  ~grpc_call() {
154
+ for (int i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
155
+ if (context[i].destroy) {
156
+ context[i].destroy(context[i].value);
157
+ }
158
+ }
154
159
  gpr_free(static_cast<void*>(const_cast<char*>(final_info.error_string)));
155
160
  }
156
161
 
@@ -554,11 +559,6 @@ static void destroy_call(void* call, grpc_error_handle /*error*/) {
554
559
  for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
555
560
  GRPC_MDELEM_UNREF(c->send_extra_metadata[ii].md);
556
561
  }
557
- for (i = 0; i < GRPC_CONTEXT_COUNT; i++) {
558
- if (c->context[i].destroy) {
559
- c->context[i].destroy(c->context[i].value);
560
- }
561
- }
562
562
  if (c->cq) {
563
563
  GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
564
564
  }
@@ -1625,7 +1625,6 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
1625
1625
  grpc_metadata& compression_md = call->compression_md;
1626
1626
  compression_md.key = grpc_empty_slice();
1627
1627
  compression_md.value = grpc_empty_slice();
1628
- compression_md.flags = 0;
1629
1628
  size_t additional_metadata_count = 0;
1630
1629
  grpc_compression_level effective_compression_level =
1631
1630
  GRPC_COMPRESS_LEVEL_NONE;
@@ -809,7 +809,9 @@ void Server::ShutdownAndNotify(grpc_completion_queue* cq, void* tag) {
809
809
  {
810
810
  // Wait for startup to be finished. Locks mu_global.
811
811
  MutexLock lock(&mu_global_);
812
- WaitUntil(&starting_cv_, &mu_global_, [this] { return !starting_; });
812
+ while (starting_) {
813
+ starting_cv_.Wait(&mu_global_);
814
+ }
813
815
  // Stay locked, and gather up some stuff to do.
814
816
  GPR_ASSERT(grpc_cq_begin_op(cq, tag));
815
817
  if (shutdown_published_) {
@@ -416,9 +416,9 @@ class Server : public InternallyRefCounted<Server> {
416
416
  Mutex mu_global_; // mutex for server and channel state
417
417
  Mutex mu_call_; // mutex for call-specific state
418
418
 
419
- // startup synchronization: flag is protected by mu_global_, signals whether
420
- // we are doing the listener start routine or not.
421
- bool starting_ = false;
419
+ // startup synchronization: flag, signals whether we are doing the listener
420
+ // start routine or not.
421
+ bool starting_ ABSL_GUARDED_BY(mu_global_) = false;
422
422
  CondVar starting_cv_;
423
423
 
424
424
  std::vector<std::unique_ptr<RegisteredMethod>> registered_methods_;
@@ -23,8 +23,6 @@
23
23
 
24
24
  #include <grpc/grpc.h>
25
25
 
26
- const char* grpc_version_string(void) { return "17.0.0"; }
26
+ const char* grpc_version_string(void) { return "18.0.0"; }
27
27
 
28
- const char* grpc_g_stands_for(void) {
29
- return "guadalupe_river_park_conservancy";
30
- }
28
+ const char* grpc_g_stands_for(void) { return "guileless"; }
@@ -57,7 +57,7 @@ end
57
57
 
58
58
  ENV['CPPFLAGS'] = '-DGPR_BACKWARDS_COMPATIBILITY_MODE'
59
59
  ENV['CPPFLAGS'] += ' -DGRPC_XDS_USER_AGENT_NAME_SUFFIX="\"RUBY\"" '
60
- ENV['CPPFLAGS'] += ' -DGRPC_XDS_USER_AGENT_VERSION_SUFFIX="\"1.39.0.pre1\"" '
60
+ ENV['CPPFLAGS'] += ' -DGRPC_XDS_USER_AGENT_VERSION_SUFFIX="\"1.40.0.pre1\"" '
61
61
 
62
62
  output_dir = File.expand_path(RbConfig::CONFIG['topdir'])
63
63
  grpc_lib_dir = File.join(output_dir, 'libs', grpc_config)
@@ -14,5 +14,5 @@
14
14
 
15
15
  # GRPC contains the General RPC module.
16
16
  module GRPC
17
- VERSION = '1.39.0.pre1'
17
+ VERSION = '1.40.0.pre1'
18
18
  end
@@ -266,7 +266,7 @@ extern "C" {
266
266
  ***************************************/
267
267
  #define XXH_VERSION_MAJOR 0
268
268
  #define XXH_VERSION_MINOR 8
269
- #define XXH_VERSION_RELEASE 0
269
+ #define XXH_VERSION_RELEASE 1
270
270
  #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
271
271
 
272
272
  /*!
@@ -275,7 +275,7 @@ extern "C" {
275
275
  * This is only useful when xxHash is compiled as a shared library, as it is
276
276
  * independent of the version defined in the header.
277
277
  *
278
- * @return `XXH_VERSION_NUMBER` as of when the function was compiled.
278
+ * @return `XXH_VERSION_NUMBER` as of when the libray was compiled.
279
279
  */
280
280
  XXH_PUBLIC_API unsigned XXH_versionNumber (void);
281
281
 
@@ -1394,6 +1394,27 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size)
1394
1394
  /* note: use after variable declarations */
1395
1395
  #define XXH_STATIC_ASSERT(c) do { enum { XXH_sa = 1/(int)(!!(c)) }; } while (0)
1396
1396
 
1397
+ /*!
1398
+ * @internal
1399
+ * @def XXH_COMPILER_GUARD(var)
1400
+ * @brief Used to prevent unwanted optimizations for @p var.
1401
+ *
1402
+ * It uses an empty GCC inline assembly statement with a register constraint
1403
+ * which forces @p var into a general purpose register (eg eax, ebx, ecx
1404
+ * on x86) and marks it as modified.
1405
+ *
1406
+ * This is used in a few places to avoid unwanted autovectorization (e.g.
1407
+ * XXH32_round()). All vectorization we want is explicit via intrinsics,
1408
+ * and _usually_ isn't wanted elsewhere.
1409
+ *
1410
+ * We also use it to prevent unwanted constant folding for AArch64 in
1411
+ * XXH3_initCustomSecret_scalar().
1412
+ */
1413
+ #ifdef __GNUC__
1414
+ # define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
1415
+ #else
1416
+ # define XXH_COMPILER_GUARD(var) ((void)0)
1417
+ #endif
1397
1418
 
1398
1419
  /* *************************************
1399
1420
  * Basic Types
@@ -1703,11 +1724,12 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
1703
1724
  * @ingroup impl
1704
1725
  * @{
1705
1726
  */
1706
- static const xxh_u32 XXH_PRIME32_1 = 0x9E3779B1U; /*!< 0b10011110001101110111100110110001 */
1707
- static const xxh_u32 XXH_PRIME32_2 = 0x85EBCA77U; /*!< 0b10000101111010111100101001110111 */
1708
- static const xxh_u32 XXH_PRIME32_3 = 0xC2B2AE3DU; /*!< 0b11000010101100101010111000111101 */
1709
- static const xxh_u32 XXH_PRIME32_4 = 0x27D4EB2FU; /*!< 0b00100111110101001110101100101111 */
1710
- static const xxh_u32 XXH_PRIME32_5 = 0x165667B1U; /*!< 0b00010110010101100110011110110001 */
1727
+ /* #define instead of static const, to be used as initializers */
1728
+ #define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
1729
+ #define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
1730
+ #define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
1731
+ #define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
1732
+ #define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
1711
1733
 
1712
1734
  #ifdef XXH_OLD_NAMES
1713
1735
  # define PRIME32_1 XXH_PRIME32_1
@@ -1733,13 +1755,12 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
1733
1755
  acc += input * XXH_PRIME32_2;
1734
1756
  acc = XXH_rotl32(acc, 13);
1735
1757
  acc *= XXH_PRIME32_1;
1736
- #if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE)
1758
+ #if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
1737
1759
  /*
1738
1760
  * UGLY HACK:
1739
- * This inline assembly hack forces acc into a normal register. This is the
1740
- * only thing that prevents GCC and Clang from autovectorizing the XXH32
1741
- * loop (pragmas and attributes don't work for some reason) without globally
1742
- * disabling SSE4.1.
1761
+ * A compiler fence is the only thing that prevents GCC and Clang from
1762
+ * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
1763
+ * reason) without globally disabling SSE4.1.
1743
1764
  *
1744
1765
  * The reason we want to avoid vectorization is because despite working on
1745
1766
  * 4 integers at a time, there are multiple factors slowing XXH32 down on
@@ -1764,22 +1785,11 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
1764
1785
  * can load data, while v3 can multiply. SSE forces them to operate
1765
1786
  * together.
1766
1787
  *
1767
- * How this hack works:
1768
- * __asm__("" // Declare an assembly block but don't declare any instructions
1769
- * : // However, as an Input/Output Operand,
1770
- * "+r" // constrain a read/write operand (+) as a general purpose register (r).
1771
- * (acc) // and set acc as the operand
1772
- * );
1773
- *
1774
- * Because of the 'r', the compiler has promised that seed will be in a
1775
- * general purpose register and the '+' says that it will be 'read/write',
1776
- * so it has to assume it has changed. It is like volatile without all the
1777
- * loads and stores.
1778
- *
1779
- * Since the argument has to be in a normal register (not an SSE register),
1780
- * each time XXH32_round is called, it is impossible to vectorize.
1788
+ * This is also enabled on AArch64, as Clang autovectorizes it incorrectly
1789
+ * and it is pointless writing a NEON implementation that is basically the
1790
+ * same speed as scalar for XXH32.
1781
1791
  */
1782
- __asm__("" : "+r" (acc));
1792
+ XXH_COMPILER_GUARD(acc);
1783
1793
  #endif
1784
1794
  return acc;
1785
1795
  }
@@ -1910,7 +1920,7 @@ XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
1910
1920
  XXH_FORCE_INLINE xxh_u32
1911
1921
  XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
1912
1922
  {
1913
- const xxh_u8* bEnd = input + len;
1923
+ const xxh_u8* bEnd = input ? input + len : NULL;
1914
1924
  xxh_u32 h32;
1915
1925
 
1916
1926
  #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
@@ -2134,35 +2144,6 @@ typedef XXH64_hash_t xxh_u64;
2134
2144
  # define U64 xxh_u64
2135
2145
  #endif
2136
2146
 
2137
- /*!
2138
- * XXH_REROLL_XXH64:
2139
- * Whether to reroll the XXH64_finalize() loop.
2140
- *
2141
- * Just like XXH32, we can unroll the XXH64_finalize() loop. This can be a
2142
- * performance gain on 64-bit hosts, as only one jump is required.
2143
- *
2144
- * However, on 32-bit hosts, because arithmetic needs to be done with two 32-bit
2145
- * registers, and 64-bit arithmetic needs to be simulated, it isn't beneficial
2146
- * to unroll. The code becomes ridiculously large (the largest function in the
2147
- * binary on i386!), and rerolling it saves anywhere from 3kB to 20kB. It is
2148
- * also slightly faster because it fits into cache better and is more likely
2149
- * to be inlined by the compiler.
2150
- *
2151
- * If XXH_REROLL is defined, this is ignored and the loop is always rerolled.
2152
- */
2153
- #ifndef XXH_REROLL_XXH64
2154
- # if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \
2155
- || !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \
2156
- || defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) /* aarch64 */ \
2157
- || defined(__PPC64__) || defined(__PPC64LE__) || defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */ \
2158
- || defined(__mips64__) || defined(__mips64)) /* mips64 */ \
2159
- || (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX) /* check limits */
2160
- # define XXH_REROLL_XXH64 1
2161
- # else
2162
- # define XXH_REROLL_XXH64 0
2163
- # endif
2164
- #endif /* !defined(XXH_REROLL_XXH64) */
2165
-
2166
2147
  #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2167
2148
  /*
2168
2149
  * Manual byteshift. Best for old compilers which don't inline memcpy.
@@ -2285,11 +2266,12 @@ XXH_readLE64_align(const void* ptr, XXH_alignment align)
2285
2266
  * @ingroup impl
2286
2267
  * @{
2287
2268
  */
2288
- static const xxh_u64 XXH_PRIME64_1 = 0x9E3779B185EBCA87ULL; /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
2289
- static const xxh_u64 XXH_PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
2290
- static const xxh_u64 XXH_PRIME64_3 = 0x165667B19E3779F9ULL; /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
2291
- static const xxh_u64 XXH_PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
2292
- static const xxh_u64 XXH_PRIME64_5 = 0x27D4EB2F165667C5ULL; /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
2269
+ /* #define rather that static const, to be used as initializers */
2270
+ #define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
2271
+ #define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
2272
+ #define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
2273
+ #define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
2274
+ #define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
2293
2275
 
2294
2276
  #ifdef XXH_OLD_NAMES
2295
2277
  # define PRIME64_1 XXH_PRIME64_1
@@ -2331,126 +2313,26 @@ static xxh_u64 XXH64_avalanche(xxh_u64 h64)
2331
2313
  static xxh_u64
2332
2314
  XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
2333
2315
  {
2334
- #define XXH_PROCESS1_64 do { \
2335
- h64 ^= (*ptr++) * XXH_PRIME64_5; \
2336
- h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1; \
2337
- } while (0)
2338
-
2339
- #define XXH_PROCESS4_64 do { \
2340
- h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; \
2341
- ptr += 4; \
2342
- h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; \
2343
- } while (0)
2344
-
2345
- #define XXH_PROCESS8_64 do { \
2346
- xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \
2347
- ptr += 8; \
2348
- h64 ^= k1; \
2349
- h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4; \
2350
- } while (0)
2351
-
2352
- /* Rerolled version for 32-bit targets is faster and much smaller. */
2353
- if (XXH_REROLL || XXH_REROLL_XXH64) {
2354
- len &= 31;
2355
- while (len >= 8) {
2356
- XXH_PROCESS8_64;
2357
- len -= 8;
2358
- }
2359
- if (len >= 4) {
2360
- XXH_PROCESS4_64;
2361
- len -= 4;
2362
- }
2363
- while (len > 0) {
2364
- XXH_PROCESS1_64;
2365
- --len;
2366
- }
2367
- return XXH64_avalanche(h64);
2368
- } else {
2369
- switch(len & 31) {
2370
- case 24: XXH_PROCESS8_64;
2371
- /* fallthrough */
2372
- case 16: XXH_PROCESS8_64;
2373
- /* fallthrough */
2374
- case 8: XXH_PROCESS8_64;
2375
- return XXH64_avalanche(h64);
2376
-
2377
- case 28: XXH_PROCESS8_64;
2378
- /* fallthrough */
2379
- case 20: XXH_PROCESS8_64;
2380
- /* fallthrough */
2381
- case 12: XXH_PROCESS8_64;
2382
- /* fallthrough */
2383
- case 4: XXH_PROCESS4_64;
2384
- return XXH64_avalanche(h64);
2385
-
2386
- case 25: XXH_PROCESS8_64;
2387
- /* fallthrough */
2388
- case 17: XXH_PROCESS8_64;
2389
- /* fallthrough */
2390
- case 9: XXH_PROCESS8_64;
2391
- XXH_PROCESS1_64;
2392
- return XXH64_avalanche(h64);
2393
-
2394
- case 29: XXH_PROCESS8_64;
2395
- /* fallthrough */
2396
- case 21: XXH_PROCESS8_64;
2397
- /* fallthrough */
2398
- case 13: XXH_PROCESS8_64;
2399
- /* fallthrough */
2400
- case 5: XXH_PROCESS4_64;
2401
- XXH_PROCESS1_64;
2402
- return XXH64_avalanche(h64);
2403
-
2404
- case 26: XXH_PROCESS8_64;
2405
- /* fallthrough */
2406
- case 18: XXH_PROCESS8_64;
2407
- /* fallthrough */
2408
- case 10: XXH_PROCESS8_64;
2409
- XXH_PROCESS1_64;
2410
- XXH_PROCESS1_64;
2411
- return XXH64_avalanche(h64);
2412
-
2413
- case 30: XXH_PROCESS8_64;
2414
- /* fallthrough */
2415
- case 22: XXH_PROCESS8_64;
2416
- /* fallthrough */
2417
- case 14: XXH_PROCESS8_64;
2418
- /* fallthrough */
2419
- case 6: XXH_PROCESS4_64;
2420
- XXH_PROCESS1_64;
2421
- XXH_PROCESS1_64;
2422
- return XXH64_avalanche(h64);
2423
-
2424
- case 27: XXH_PROCESS8_64;
2425
- /* fallthrough */
2426
- case 19: XXH_PROCESS8_64;
2427
- /* fallthrough */
2428
- case 11: XXH_PROCESS8_64;
2429
- XXH_PROCESS1_64;
2430
- XXH_PROCESS1_64;
2431
- XXH_PROCESS1_64;
2432
- return XXH64_avalanche(h64);
2433
-
2434
- case 31: XXH_PROCESS8_64;
2435
- /* fallthrough */
2436
- case 23: XXH_PROCESS8_64;
2437
- /* fallthrough */
2438
- case 15: XXH_PROCESS8_64;
2439
- /* fallthrough */
2440
- case 7: XXH_PROCESS4_64;
2441
- /* fallthrough */
2442
- case 3: XXH_PROCESS1_64;
2443
- /* fallthrough */
2444
- case 2: XXH_PROCESS1_64;
2445
- /* fallthrough */
2446
- case 1: XXH_PROCESS1_64;
2447
- /* fallthrough */
2448
- case 0: return XXH64_avalanche(h64);
2449
- }
2316
+ len &= 31;
2317
+ while (len >= 8) {
2318
+ xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
2319
+ ptr += 8;
2320
+ h64 ^= k1;
2321
+ h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
2322
+ len -= 8;
2323
+ }
2324
+ if (len >= 4) {
2325
+ h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
2326
+ ptr += 4;
2327
+ h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
2328
+ len -= 4;
2329
+ }
2330
+ while (len > 0) {
2331
+ h64 ^= (*ptr++) * XXH_PRIME64_5;
2332
+ h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;
2333
+ --len;
2450
2334
  }
2451
- /* impossible to reach */
2452
- XXH_ASSERT(0);
2453
- return 0; /* unreachable, but some compilers complain without it */
2335
+ return XXH64_avalanche(h64);
2454
2336
  }
2455
2337
 
2456
2338
  #ifdef XXH_OLD_NAMES
@@ -2466,7 +2348,7 @@ XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
2466
2348
  XXH_FORCE_INLINE xxh_u64
2467
2349
  XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
2468
2350
  {
2469
- const xxh_u8* bEnd = input + len;
2351
+ const xxh_u8* bEnd = input ? input + len : NULL;
2470
2352
  xxh_u64 h64;
2471
2353
 
2472
2354
  #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
@@ -2664,7 +2546,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src
2664
2546
  return XXH_readBE64(src);
2665
2547
  }
2666
2548
 
2667
-
2549
+ #ifndef XXH_NO_XXH3
2668
2550
 
2669
2551
  /* *********************************************************************
2670
2552
  * XXH3
@@ -2679,7 +2561,9 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src
2679
2561
 
2680
2562
  /* === Compiler specifics === */
2681
2563
 
2682
- #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
2564
+ #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
2565
+ # define XXH_RESTRICT /* disable */
2566
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
2683
2567
  # define XXH_RESTRICT restrict
2684
2568
  #else
2685
2569
  /* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
@@ -3441,7 +3325,7 @@ XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_h
3441
3325
  {
3442
3326
  XXH_ASSERT(input != NULL);
3443
3327
  XXH_ASSERT(secret != NULL);
3444
- XXH_ASSERT(4 <= len && len < 8);
3328
+ XXH_ASSERT(4 <= len && len <= 8);
3445
3329
  seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
3446
3330
  { xxh_u32 const input1 = XXH_readLE32(input);
3447
3331
  xxh_u32 const input2 = XXH_readLE32(input + len - 4);
@@ -3457,7 +3341,7 @@ XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_
3457
3341
  {
3458
3342
  XXH_ASSERT(input != NULL);
3459
3343
  XXH_ASSERT(secret != NULL);
3460
- XXH_ASSERT(8 <= len && len <= 16);
3344
+ XXH_ASSERT(9 <= len && len <= 16);
3461
3345
  { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
3462
3346
  xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
3463
3347
  xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
@@ -3527,7 +3411,7 @@ XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
3527
3411
  * GCC generates much better scalar code than Clang for the rest of XXH3,
3528
3412
  * which is why finding a more optimal codepath is an interest.
3529
3413
  */
3530
- __asm__ ("" : "+r" (seed64));
3414
+ XXH_COMPILER_GUARD(seed64);
3531
3415
  #endif
3532
3416
  { xxh_u64 const input_lo = XXH_readLE64(input);
3533
3417
  xxh_u64 const input_hi = XXH_readLE64(input+8);
@@ -3871,12 +3755,8 @@ XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTR
3871
3755
  * On GCC & Clang, marking 'dest' as modified will cause the compiler:
3872
3756
  * - do not extract the secret from sse registers in the internal loop
3873
3757
  * - use less common registers, and avoid pushing these reg into stack
3874
- * The asm hack causes Clang to assume that XXH3_kSecretPtr aliases with
3875
- * customSecret, and on aarch64, this prevented LDP from merging two
3876
- * loads together for free. Putting the loads together before the stores
3877
- * properly generates LDP.
3878
3758
  */
3879
- __asm__("" : "+r" (dest));
3759
+ XXH_COMPILER_GUARD(dest);
3880
3760
  # endif
3881
3761
 
3882
3762
  /* GCC -O2 need unroll loop manually */
@@ -3985,7 +3865,7 @@ XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTR
3985
3865
  * - do not extract the secret from sse registers in the internal loop
3986
3866
  * - use less common registers, and avoid pushing these reg into stack
3987
3867
  */
3988
- __asm__("" : "+r" (dest));
3868
+ XXH_COMPILER_GUARD(dest);
3989
3869
  # endif
3990
3870
 
3991
3871
  for (i=0; i < nbRounds; ++i) {
@@ -4231,7 +4111,7 @@ XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4231
4111
  * without hack: 2654.4 MB/s
4232
4112
  * with hack: 3202.9 MB/s
4233
4113
  */
4234
- __asm__("" : "+r" (kSecretPtr));
4114
+ XXH_COMPILER_GUARD(kSecretPtr);
4235
4115
  #endif
4236
4116
  /*
4237
4117
  * Note: in debug mode, this overrides the asm optimization
@@ -4396,7 +4276,7 @@ XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secre
4396
4276
  * without hack: 2063.7 MB/s
4397
4277
  * with hack: 2560.7 MB/s
4398
4278
  */
4399
- __asm__("" : "+r" (result64));
4279
+ XXH_COMPILER_GUARD(result64);
4400
4280
  #endif
4401
4281
  }
4402
4282
 
@@ -5432,6 +5312,8 @@ XXH128_hashFromCanonical(const XXH128_canonical_t* src)
5432
5312
 
5433
5313
  #endif /* XXH_NO_LONG_LONG */
5434
5314
 
5315
+ #endif /* XXH_NO_XXH3 */
5316
+
5435
5317
  /*!
5436
5318
  * @}
5437
5319
  */